From ac088de6322fc16ebe75c2e5554be73754bf1fe2 Mon Sep 17 00:00:00 2001 From: Wei-Ning Huang Date: Wed, 12 Jun 2019 17:31:08 +0800 Subject: Rebrand as tangerine-network/go-tangerine --- vendor/github.com/byzantine-lab/bls/.gitignore | 7 + vendor/github.com/byzantine-lab/bls/.travis.yml | 39 + vendor/github.com/byzantine-lab/bls/CMakeLists.txt | 33 + vendor/github.com/byzantine-lab/bls/Makefile | 164 + vendor/github.com/byzantine-lab/bls/bin/.emptydir | 0 vendor/github.com/byzantine-lab/bls/bls.sln | 30 + vendor/github.com/byzantine-lab/bls/bls_smpl.py | 40 + vendor/github.com/byzantine-lab/bls/common.props | 26 + vendor/github.com/byzantine-lab/bls/debug.props | 14 + .../github.com/byzantine-lab/bls/ffi/cs/App.config | 6 + .../bls/ffi/cs/Properties/AssemblyInfo.cs | 36 + vendor/github.com/byzantine-lab/bls/ffi/cs/bls.cs | 351 + .../github.com/byzantine-lab/bls/ffi/cs/bls.csproj | 97 + vendor/github.com/byzantine-lab/bls/ffi/cs/bls.sln | 25 + .../github.com/byzantine-lab/bls/ffi/cs/bls256.cs | 298 + .../byzantine-lab/bls/ffi/cs/bls256.csproj | 62 + .../github.com/byzantine-lab/bls/ffi/cs/bls256.sln | 22 + .../byzantine-lab/bls/ffi/cs/bls256_test.cs | 126 + .../byzantine-lab/bls/ffi/cs/bls_test.cs | 176 + .../byzantine-lab/bls/ffi/cs/readme-ja.md | 188 + .../github.com/byzantine-lab/bls/ffi/cs/readme.md | 185 + .../github.com/byzantine-lab/bls/ffi/go/bls/bls.go | 539 + .../byzantine-lab/bls/ffi/go/bls/callback.go | 12 + .../byzantine-lab/bls/ffi/go/bls/dummy.cpp | 3 + .../github.com/byzantine-lab/bls/ffi/go/bls/mcl.go | 646 + .../bls/images/bls-go-alpine/Dockerfile | 12 + .../github.com/byzantine-lab/bls/include/bls/bls.h | 275 + .../byzantine-lab/bls/include/bls/bls.hpp | 534 + vendor/github.com/byzantine-lab/bls/lib/.emptydir | 0 vendor/github.com/byzantine-lab/bls/mk.bat | 20 + vendor/github.com/byzantine-lab/bls/mkdll.bat | 8 + vendor/github.com/byzantine-lab/bls/mklib.bat | 26 + vendor/github.com/byzantine-lab/bls/obj/.emptydir | 0 vendor/github.com/byzantine-lab/bls/readme.md | 187 + vendor/github.com/byzantine-lab/bls/release.props | 12 + .../byzantine-lab/bls/sample/bls_smpl.cpp | 168 + vendor/github.com/byzantine-lab/bls/setvar.bat | 6 + .../github.com/byzantine-lab/bls/src/bls_c256.cpp | 3 + .../github.com/byzantine-lab/bls/src/bls_c384.cpp | 3 + .../byzantine-lab/bls/src/bls_c384_256.cpp | 4 + .../byzantine-lab/bls/src/bls_c_impl.hpp | 614 + .../byzantine-lab/bls/src/proj/bls.vcxproj | 92 + .../byzantine-lab/bls/src/qcoeff-bn254.hpp | 564 + .../byzantine-lab/bls/test/bls256_test.cpp | 3 + .../byzantine-lab/bls/test/bls384_256_test.cpp | 4 + .../byzantine-lab/bls/test/bls384_test.cpp | 3 + .../byzantine-lab/bls/test/bls_c256_test.cpp | 2 + .../byzantine-lab/bls/test/bls_c384_256_test.cpp | 3 + .../byzantine-lab/bls/test/bls_c384_test.cpp | 2 + .../byzantine-lab/bls/test/bls_c_test.hpp | 437 + .../github.com/byzantine-lab/bls/test/bls_test.hpp | 545 + .../bls/test/proj/bls_test/bls_test.vcxproj | 88 + .../byzantine-lab/dexon-consensus/LICENSE | 165 + .../byzantine-lab/dexon-consensus/common/event.go | 101 + .../byzantine-lab/dexon-consensus/common/logger.go | 134 + .../byzantine-lab/dexon-consensus/common/types.go | 90 + .../byzantine-lab/dexon-consensus/common/utils.go | 41 + .../dexon-consensus/core/agreement-mgr.go | 676 + .../dexon-consensus/core/agreement-state.go | 213 + .../dexon-consensus/core/agreement.go | 797 + .../dexon-consensus/core/blockchain.go | 681 + .../dexon-consensus/core/blockdb/interfaces.go | 70 + .../dexon-consensus/core/blockdb/level-db.go | 127 + .../dexon-consensus/core/blockdb/memory.go | 183 + .../dexon-consensus/core/configuration-chain.go | 795 + .../dexon-consensus/core/consensus.go | 1567 + .../byzantine-lab/dexon-consensus/core/constant.go | 41 + .../dexon-consensus/core/crypto/dkg/constant.go | 26 + .../dexon-consensus/core/crypto/dkg/dkg.go | 637 + .../dexon-consensus/core/crypto/dkg/utils.go | 92 + .../dexon-consensus/core/crypto/ecdsa/ecdsa.go | 135 + .../dexon-consensus/core/crypto/interfaces.go | 48 + .../dexon-consensus/core/crypto/utils.go | 80 + .../dexon-consensus/core/db/interfaces.go | 100 + .../dexon-consensus/core/db/level-db.go | 573 + .../dexon-consensus/core/db/memory.go | 262 + .../dexon-consensus/core/dkg-tsig-protocol.go | 709 + .../dexon-consensus/core/interfaces.go | 182 + .../dexon-consensus/core/leader-selector.go | 149 + .../dexon-consensus/core/nonblocking.go | 137 + .../dexon-consensus/core/syncer/agreement.go | 301 + .../dexon-consensus/core/syncer/consensus.go | 543 + .../dexon-consensus/core/syncer/watch-cat.go | 156 + .../byzantine-lab/dexon-consensus/core/ticker.go | 127 + .../dexon-consensus/core/types/block-randomness.go | 44 + .../dexon-consensus/core/types/block.go | 227 + .../dexon-consensus/core/types/config.go | 75 + .../dexon-consensus/core/types/dkg/dkg.go | 485 + .../dexon-consensus/core/types/message.go | 24 + .../dexon-consensus/core/types/node.go | 61 + .../dexon-consensus/core/types/nodeset.go | 162 + .../dexon-consensus/core/types/position.go | 51 + .../dexon-consensus/core/types/vote.go | 100 + .../byzantine-lab/dexon-consensus/core/utils.go | 255 + .../dexon-consensus/core/utils/crypto.go | 376 + .../dexon-consensus/core/utils/nodeset-cache.go | 245 + .../dexon-consensus/core/utils/penalty-helper.go | 131 + .../core/utils/round-based-config.go | 112 + .../dexon-consensus/core/utils/round-event.go | 358 + .../dexon-consensus/core/utils/signer.go | 154 + .../dexon-consensus/core/utils/utils.go | 207 + .../dexon-consensus/core/utils/vote-filter.go | 72 + vendor/github.com/byzantine-lab/mcl/.gitignore | 13 + vendor/github.com/byzantine-lab/mcl/.travis.yml | 17 + vendor/github.com/byzantine-lab/mcl/CMakeLists.txt | 119 + vendor/github.com/byzantine-lab/mcl/COPYRIGHT | 47 + vendor/github.com/byzantine-lab/mcl/Makefile | 373 + vendor/github.com/byzantine-lab/mcl/bench.txt | 114 + vendor/github.com/byzantine-lab/mcl/common.mk | 117 + vendor/github.com/byzantine-lab/mcl/common.props | 26 + vendor/github.com/byzantine-lab/mcl/debug.props | 14 + .../github.com/byzantine-lab/mcl/ffi/cs/App.config | 6 + .../mcl/ffi/cs/Properties/AssemblyInfo.cs | 36 + .../github.com/byzantine-lab/mcl/ffi/cs/bn256.cs | 475 + .../byzantine-lab/mcl/ffi/cs/bn256.csproj | 62 + .../github.com/byzantine-lab/mcl/ffi/cs/bn256.sln | 22 + .../byzantine-lab/mcl/ffi/cs/bn256_test.cs | 149 + .../github.com/byzantine-lab/mcl/ffi/go/mcl/mcl.go | 659 + .../byzantine-lab/mcl/ffi/go/mcl/mcl_test.go | 157 + .../byzantine-lab/mcl/ffi/java/Bn256Test.java | 104 + .../byzantine-lab/mcl/ffi/java/ElgamalTest.java | 144 + .../github.com/byzantine-lab/mcl/ffi/java/Makefile | 64 + .../github.com/byzantine-lab/mcl/ffi/java/bn256.i | 31 + .../byzantine-lab/mcl/ffi/java/bn256_impl.hpp | 249 + .../byzantine-lab/mcl/ffi/java/bn256_wrap.cxx | 1542 + .../byzantine-lab/mcl/ffi/java/elgamal.i | 28 + .../byzantine-lab/mcl/ffi/java/elgamal_impl.hpp | 147 + .../byzantine-lab/mcl/ffi/java/elgamal_wrap.cxx | 1129 + .../github.com/byzantine-lab/mcl/ffi/java/java.md | 95 + .../byzantine-lab/mcl/ffi/java/make_wrap.bat | 23 + .../byzantine-lab/mcl/ffi/java/run-bn256.bat | 9 + .../byzantine-lab/mcl/ffi/java/run-elgamal.bat | 9 + .../byzantine-lab/mcl/ffi/java/set-java-path.bat | 8 + .../byzantine-lab/mcl/ffi/js/export-functions.py | 73 + .../github.com/byzantine-lab/mcl/ffi/js/pre-mcl.js | 5 + .../byzantine-lab/mcl/ffi/python/pairing.py | 80 + .../github.com/byzantine-lab/mcl/ffi/python/she.py | 298 + .../byzantine-lab/mcl/include/cybozu/array.hpp | 197 + .../byzantine-lab/mcl/include/cybozu/atoi.hpp | 239 + .../byzantine-lab/mcl/include/cybozu/benchmark.hpp | 212 + .../mcl/include/cybozu/bit_operation.hpp | 139 + .../mcl/include/cybozu/critical_section.hpp | 60 + .../byzantine-lab/mcl/include/cybozu/crypto.hpp | 321 + .../byzantine-lab/mcl/include/cybozu/endian.hpp | 224 + .../byzantine-lab/mcl/include/cybozu/exception.hpp | 252 + .../byzantine-lab/mcl/include/cybozu/hash.hpp | 67 + .../byzantine-lab/mcl/include/cybozu/inttype.hpp | 163 + .../byzantine-lab/mcl/include/cybozu/itoa.hpp | 337 + .../mcl/include/cybozu/link_libeay32.hpp | 21 + .../byzantine-lab/mcl/include/cybozu/link_mpir.hpp | 18 + .../mcl/include/cybozu/link_ssleay32.hpp | 19 + .../byzantine-lab/mcl/include/cybozu/mutex.hpp | 141 + .../byzantine-lab/mcl/include/cybozu/option.hpp | 723 + .../mcl/include/cybozu/random_generator.hpp | 153 + .../mcl/include/cybozu/serializer.hpp | 363 + .../byzantine-lab/mcl/include/cybozu/sha2.hpp | 467 + .../byzantine-lab/mcl/include/cybozu/stream.hpp | 267 + .../byzantine-lab/mcl/include/cybozu/test.hpp | 373 + .../mcl/include/cybozu/unordered_map.hpp | 13 + .../byzantine-lab/mcl/include/cybozu/xorshift.hpp | 189 + .../mcl/include/mcl/aggregate_sig.hpp | 265 + .../byzantine-lab/mcl/include/mcl/ahe.hpp | 76 + .../byzantine-lab/mcl/include/mcl/array.hpp | 167 + .../byzantine-lab/mcl/include/mcl/bls12_381.hpp | 15 + .../github.com/byzantine-lab/mcl/include/mcl/bn.h | 428 + .../byzantine-lab/mcl/include/mcl/bn.hpp | 2261 + .../byzantine-lab/mcl/include/mcl/bn256.hpp | 15 + .../byzantine-lab/mcl/include/mcl/bn384.hpp | 15 + .../byzantine-lab/mcl/include/mcl/bn512.hpp | 14 + .../byzantine-lab/mcl/include/mcl/conversion.hpp | 495 + .../byzantine-lab/mcl/include/mcl/curve_type.h | 35 + .../byzantine-lab/mcl/include/mcl/ec.hpp | 1045 + .../byzantine-lab/mcl/include/mcl/ecdsa.h | 105 + .../byzantine-lab/mcl/include/mcl/ecdsa.hpp | 257 + .../byzantine-lab/mcl/include/mcl/ecparam.hpp | 191 + .../byzantine-lab/mcl/include/mcl/elgamal.hpp | 612 + .../byzantine-lab/mcl/include/mcl/fp.hpp | 661 + .../byzantine-lab/mcl/include/mcl/fp_tower.hpp | 1364 + .../byzantine-lab/mcl/include/mcl/gmp_util.hpp | 954 + .../mcl/include/mcl/impl/bn_c_impl.hpp | 643 + .../byzantine-lab/mcl/include/mcl/lagrange.hpp | 107 + .../byzantine-lab/mcl/include/mcl/op.hpp | 389 + .../byzantine-lab/mcl/include/mcl/operator.hpp | 177 + .../byzantine-lab/mcl/include/mcl/paillier.hpp | 84 + .../byzantine-lab/mcl/include/mcl/randgen.hpp | 156 + .../github.com/byzantine-lab/mcl/include/mcl/she.h | 270 + .../byzantine-lab/mcl/include/mcl/she.hpp | 1939 + .../byzantine-lab/mcl/include/mcl/util.hpp | 285 + .../byzantine-lab/mcl/include/mcl/vint.hpp | 1987 + .../mcl/include/mcl/window_method.hpp | 175 + vendor/github.com/byzantine-lab/mcl/lib/.emptydir | 0 vendor/github.com/byzantine-lab/mcl/mcl.sln | 57 + vendor/github.com/byzantine-lab/mcl/misc/bench.txt | 21 + .../byzantine-lab/mcl/misc/karatsuba.cpp | 75 + vendor/github.com/byzantine-lab/mcl/misc/mul.cpp | 58 + .../byzantine-lab/mcl/misc/precompute.cpp | 30 + .../github.com/byzantine-lab/mcl/misc/she/bench.sh | 6 + .../byzantine-lab/mcl/misc/she/bench4.txt | 99 + .../byzantine-lab/mcl/misc/she/bench6.txt | 99 + .../byzantine-lab/mcl/misc/she/bench8.txt | 99 + .../byzantine-lab/mcl/misc/she/nizkp.pdf | Bin 0 -> 28787 bytes .../byzantine-lab/mcl/misc/she/she-api-ja.md | 314 + .../byzantine-lab/mcl/misc/she/she-api.md | 322 + .../github.com/byzantine-lab/mcl/misc/she/she.pdf | Bin 0 -> 25716 bytes vendor/github.com/byzantine-lab/mcl/mk.bat | 20 + vendor/github.com/byzantine-lab/mcl/mklib.bat | 39 + vendor/github.com/byzantine-lab/mcl/obj/.emptydir | 0 vendor/github.com/byzantine-lab/mcl/readme.md | 457 + vendor/github.com/byzantine-lab/mcl/release.props | 12 + .../github.com/byzantine-lab/mcl/sample/bench.cpp | 233 + .../byzantine-lab/mcl/sample/bls_sig.cpp | 70 + .../github.com/byzantine-lab/mcl/sample/ecdh.cpp | 64 + .../github.com/byzantine-lab/mcl/sample/large.cpp | 125 + .../byzantine-lab/mcl/sample/pairing.cpp | 56 + .../byzantine-lab/mcl/sample/pairing_c.c | 52 + .../github.com/byzantine-lab/mcl/sample/random.cpp | 29 + .../byzantine-lab/mcl/sample/rawbench.cpp | 180 + .../mcl/sample/she_make_dlp_table.cpp | 69 + .../byzantine-lab/mcl/sample/she_smpl.cpp | 125 + .../github.com/byzantine-lab/mcl/sample/tri-dh.cpp | 97 + .../github.com/byzantine-lab/mcl/sample/vote.cpp | 206 + vendor/github.com/byzantine-lab/mcl/setvar.bat | 2 + .../github.com/byzantine-lab/mcl/src/asm/aarch64.s | 13197 +++ vendor/github.com/byzantine-lab/mcl/src/asm/arm.s | 84189 +++++++++++++++++++ .../github.com/byzantine-lab/mcl/src/asm/low_arm.s | 154 + .../byzantine-lab/mcl/src/asm/low_x86-64.asm | 153 + .../byzantine-lab/mcl/src/asm/low_x86.asm | 0 .../byzantine-lab/mcl/src/asm/x86-64.bmi2.s | 14155 ++++ .../github.com/byzantine-lab/mcl/src/asm/x86-64.s | 16652 ++++ .../byzantine-lab/mcl/src/asm/x86-64mac.bmi2.s | 13830 +++ .../byzantine-lab/mcl/src/asm/x86-64mac.s | 16313 ++++ .../byzantine-lab/mcl/src/asm/x86.bmi2.s | 71547 ++++++++++++++++ vendor/github.com/byzantine-lab/mcl/src/asm/x86.s | 73785 ++++++++++++++++ .../github.com/byzantine-lab/mcl/src/bn_c256.cpp | 6 + .../github.com/byzantine-lab/mcl/src/bn_c384.cpp | 7 + .../byzantine-lab/mcl/src/bn_c384_256.cpp | 7 + .../github.com/byzantine-lab/mcl/src/bn_c512.cpp | 6 + .../github.com/byzantine-lab/mcl/src/bn_c_impl.hpp | 517 + .../github.com/byzantine-lab/mcl/src/ecdsa_c.cpp | 110 + vendor/github.com/byzantine-lab/mcl/src/fp.cpp | 646 + .../byzantine-lab/mcl/src/fp_generator.hpp | 3885 + vendor/github.com/byzantine-lab/mcl/src/gen.cpp | 999 + .../github.com/byzantine-lab/mcl/src/llvm_gen.hpp | 616 + .../github.com/byzantine-lab/mcl/src/low_func.hpp | 706 + .../byzantine-lab/mcl/src/low_func_llvm.hpp | 94 + .../byzantine-lab/mcl/src/proj/mcl.vcxproj | 92 + vendor/github.com/byzantine-lab/mcl/src/proto.hpp | 81 + .../github.com/byzantine-lab/mcl/src/she_c256.cpp | 2 + .../github.com/byzantine-lab/mcl/src/she_c384.cpp | 2 + .../byzantine-lab/mcl/src/she_c_impl.hpp | 681 + .../github.com/byzantine-lab/mcl/src/xbyak/xbyak.h | 2611 + .../byzantine-lab/mcl/src/xbyak/xbyak_mnemonic.h | 1972 + .../byzantine-lab/mcl/src/xbyak/xbyak_util.h | 653 + .../byzantine-lab/mcl/test/aggregate_sig_test.cpp | 74 + .../byzantine-lab/mcl/test/array_test.cpp | 104 + .../byzantine-lab/mcl/test/base_test.cpp | 392 + vendor/github.com/byzantine-lab/mcl/test/bench.hpp | 192 + .../byzantine-lab/mcl/test/bls12_test.cpp | 720 + .../byzantine-lab/mcl/test/bn384_test.cpp | 83 + .../byzantine-lab/mcl/test/bn512_test.cpp | 68 + .../byzantine-lab/mcl/test/bn_c256_test.cpp | 6 + .../byzantine-lab/mcl/test/bn_c384_256_test.cpp | 7 + .../byzantine-lab/mcl/test/bn_c384_test.cpp | 6 + .../byzantine-lab/mcl/test/bn_c512_test.cpp | 6 + .../byzantine-lab/mcl/test/bn_c_test.hpp | 699 + .../github.com/byzantine-lab/mcl/test/bn_test.cpp | 408 + .../byzantine-lab/mcl/test/conversion_test.cpp | 96 + .../github.com/byzantine-lab/mcl/test/ec_test.cpp | 573 + .../byzantine-lab/mcl/test/ecdsa_c_test.cpp | 51 + .../byzantine-lab/mcl/test/ecdsa_test.cpp | 69 + .../byzantine-lab/mcl/test/elgamal_test.cpp | 155 + .../byzantine-lab/mcl/test/fp_generator_test.cpp | 207 + .../github.com/byzantine-lab/mcl/test/fp_test.cpp | 1046 + .../byzantine-lab/mcl/test/fp_tower_test.cpp | 477 + .../byzantine-lab/mcl/test/fp_util_test.cpp | 270 + .../github.com/byzantine-lab/mcl/test/glv_test.cpp | 209 + .../github.com/byzantine-lab/mcl/test/gmp_test.cpp | 70 + .../github.com/byzantine-lab/mcl/test/low_test.cpp | 73 + vendor/github.com/byzantine-lab/mcl/test/mk32.sh | 1 + .../byzantine-lab/mcl/test/modp_test.cpp | 37 + .../byzantine-lab/mcl/test/mont_fp_test.cpp | 332 + .../byzantine-lab/mcl/test/paillier_test.cpp | 24 + .../mcl/test/proj/bn_test/bn_test.vcxproj | 88 + .../mcl/test/proj/ec_test/ec_test.vcxproj | 88 + .../mcl/test/proj/fp_test/fp_test.vcxproj | 88 + .../test/proj/fp_tower_test/fp_tower_test.vcxproj | 88 + .../byzantine-lab/mcl/test/she_c256_test.cpp | 2 + .../byzantine-lab/mcl/test/she_c384_test.cpp | 2 + .../byzantine-lab/mcl/test/she_c_test.hpp | 535 + .../github.com/byzantine-lab/mcl/test/she_test.cpp | 756 + .../github.com/byzantine-lab/mcl/test/sq_test.cpp | 21 + .../byzantine-lab/mcl/test/vint_test.cpp | 1353 + .../byzantine-lab/mcl/test/window_method_test.cpp | 70 + vendor/github.com/dexon-foundation/bls/.gitignore | 7 - vendor/github.com/dexon-foundation/bls/.travis.yml | 39 - .../github.com/dexon-foundation/bls/CMakeLists.txt | 33 - vendor/github.com/dexon-foundation/bls/Makefile | 164 - .../github.com/dexon-foundation/bls/bin/.emptydir | 0 vendor/github.com/dexon-foundation/bls/bls.sln | 30 - vendor/github.com/dexon-foundation/bls/bls_smpl.py | 40 - .../github.com/dexon-foundation/bls/common.props | 26 - vendor/github.com/dexon-foundation/bls/debug.props | 14 - .../dexon-foundation/bls/ffi/cs/App.config | 6 - .../bls/ffi/cs/Properties/AssemblyInfo.cs | 36 - .../github.com/dexon-foundation/bls/ffi/cs/bls.cs | 351 - .../dexon-foundation/bls/ffi/cs/bls.csproj | 97 - .../github.com/dexon-foundation/bls/ffi/cs/bls.sln | 25 - .../dexon-foundation/bls/ffi/cs/bls256.cs | 298 - .../dexon-foundation/bls/ffi/cs/bls256.csproj | 62 - .../dexon-foundation/bls/ffi/cs/bls256.sln | 22 - .../dexon-foundation/bls/ffi/cs/bls256_test.cs | 126 - .../dexon-foundation/bls/ffi/cs/bls_test.cs | 176 - .../dexon-foundation/bls/ffi/cs/readme-ja.md | 188 - .../dexon-foundation/bls/ffi/cs/readme.md | 185 - .../dexon-foundation/bls/ffi/go/bls/bls.go | 539 - .../dexon-foundation/bls/ffi/go/bls/bls_test.go | 690 - .../dexon-foundation/bls/ffi/go/bls/callback.go | 12 - .../dexon-foundation/bls/ffi/go/bls/config.h | 6 - .../dexon-foundation/bls/ffi/go/bls/dummy.cpp | 3 - .../dexon-foundation/bls/ffi/go/bls/mcl.go | 646 - .../bls/images/bls-go-alpine/Dockerfile | 12 - .../dexon-foundation/bls/include/bls/bls.h | 275 - .../dexon-foundation/bls/include/bls/bls.hpp | 534 - .../github.com/dexon-foundation/bls/lib/.emptydir | 0 vendor/github.com/dexon-foundation/bls/mk.bat | 20 - vendor/github.com/dexon-foundation/bls/mkdll.bat | 8 - vendor/github.com/dexon-foundation/bls/mklib.bat | 26 - .../github.com/dexon-foundation/bls/obj/.emptydir | 0 vendor/github.com/dexon-foundation/bls/readme.md | 187 - .../github.com/dexon-foundation/bls/release.props | 12 - .../dexon-foundation/bls/sample/bls_smpl.cpp | 168 - vendor/github.com/dexon-foundation/bls/setvar.bat | 6 - .../dexon-foundation/bls/src/bls_c256.cpp | 3 - .../dexon-foundation/bls/src/bls_c384.cpp | 3 - .../dexon-foundation/bls/src/bls_c384_256.cpp | 4 - .../dexon-foundation/bls/src/bls_c_impl.hpp | 614 - .../dexon-foundation/bls/src/proj/bls.vcxproj | 92 - .../dexon-foundation/bls/src/qcoeff-bn254.hpp | 564 - .../dexon-foundation/bls/test/bls256_test.cpp | 3 - .../dexon-foundation/bls/test/bls384_256_test.cpp | 4 - .../dexon-foundation/bls/test/bls384_test.cpp | 3 - .../dexon-foundation/bls/test/bls_c256_test.cpp | 2 - .../bls/test/bls_c384_256_test.cpp | 3 - .../dexon-foundation/bls/test/bls_c384_test.cpp | 2 - .../dexon-foundation/bls/test/bls_c_test.hpp | 437 - .../dexon-foundation/bls/test/bls_test.hpp | 545 - .../bls/test/proj/bls_test/bls_test.vcxproj | 88 - .../dexon-foundation/dexon-consensus/LICENSE | 165 - .../dexon-consensus/common/event.go | 101 - .../dexon-consensus/common/logger.go | 134 - .../dexon-consensus/common/types.go | 90 - .../dexon-consensus/common/utils.go | 41 - .../dexon-consensus/core/agreement-mgr.go | 676 - .../dexon-consensus/core/agreement-state.go | 213 - .../dexon-consensus/core/agreement.go | 797 - .../dexon-consensus/core/blockchain.go | 681 - .../dexon-consensus/core/configuration-chain.go | 795 - .../dexon-consensus/core/consensus.go | 1567 - .../dexon-consensus/core/constant.go | 41 - .../dexon-consensus/core/crypto/dkg/constant.go | 26 - .../dexon-consensus/core/crypto/dkg/dkg.go | 637 - .../dexon-consensus/core/crypto/dkg/utils.go | 92 - .../dexon-consensus/core/crypto/ecdsa/ecdsa.go | 135 - .../dexon-consensus/core/crypto/interfaces.go | 48 - .../dexon-consensus/core/crypto/utils.go | 80 - .../dexon-consensus/core/db/interfaces.go | 100 - .../dexon-consensus/core/db/level-db.go | 573 - .../dexon-consensus/core/db/memory.go | 262 - .../dexon-consensus/core/dkg-tsig-protocol.go | 709 - .../dexon-consensus/core/interfaces.go | 182 - .../dexon-consensus/core/leader-selector.go | 149 - .../dexon-consensus/core/nonblocking.go | 137 - .../dexon-consensus/core/syncer/agreement.go | 301 - .../dexon-consensus/core/syncer/consensus.go | 543 - .../dexon-consensus/core/syncer/watch-cat.go | 156 - .../dexon-consensus/core/ticker.go | 127 - .../dexon-consensus/core/types/block-randomness.go | 44 - .../dexon-consensus/core/types/block.go | 227 - .../dexon-consensus/core/types/config.go | 75 - .../dexon-consensus/core/types/dkg/dkg.go | 485 - .../dexon-consensus/core/types/message.go | 24 - .../dexon-consensus/core/types/node.go | 61 - .../dexon-consensus/core/types/nodeset.go | 162 - .../dexon-consensus/core/types/position.go | 51 - .../dexon-consensus/core/types/vote.go | 100 - .../dexon-foundation/dexon-consensus/core/utils.go | 255 - .../dexon-consensus/core/utils/crypto.go | 376 - .../dexon-consensus/core/utils/nodeset-cache.go | 245 - .../dexon-consensus/core/utils/penalty-helper.go | 131 - .../core/utils/round-based-config.go | 112 - .../dexon-consensus/core/utils/round-event.go | 358 - .../dexon-consensus/core/utils/signer.go | 154 - .../dexon-consensus/core/utils/utils.go | 207 - .../dexon-consensus/core/utils/vote-filter.go | 72 - vendor/github.com/dexon-foundation/mcl/.gitignore | 13 - vendor/github.com/dexon-foundation/mcl/.travis.yml | 17 - .../github.com/dexon-foundation/mcl/CMakeLists.txt | 119 - vendor/github.com/dexon-foundation/mcl/COPYRIGHT | 47 - vendor/github.com/dexon-foundation/mcl/Makefile | 373 - vendor/github.com/dexon-foundation/mcl/bench.txt | 114 - vendor/github.com/dexon-foundation/mcl/common.mk | 117 - .../github.com/dexon-foundation/mcl/common.props | 26 - vendor/github.com/dexon-foundation/mcl/debug.props | 14 - .../dexon-foundation/mcl/ffi/cs/App.config | 6 - .../mcl/ffi/cs/Properties/AssemblyInfo.cs | 36 - .../dexon-foundation/mcl/ffi/cs/bn256.cs | 475 - .../dexon-foundation/mcl/ffi/cs/bn256.csproj | 62 - .../dexon-foundation/mcl/ffi/cs/bn256.sln | 22 - .../dexon-foundation/mcl/ffi/cs/bn256_test.cs | 149 - .../dexon-foundation/mcl/ffi/go/mcl/mcl.go | 659 - .../dexon-foundation/mcl/ffi/go/mcl/mcl_test.go | 157 - .../dexon-foundation/mcl/ffi/java/Bn256Test.java | 104 - .../dexon-foundation/mcl/ffi/java/ElgamalTest.java | 144 - .../dexon-foundation/mcl/ffi/java/Makefile | 64 - .../dexon-foundation/mcl/ffi/java/bn256.i | 31 - .../dexon-foundation/mcl/ffi/java/bn256_impl.hpp | 249 - .../dexon-foundation/mcl/ffi/java/bn256_wrap.cxx | 1542 - .../dexon-foundation/mcl/ffi/java/elgamal.i | 28 - .../dexon-foundation/mcl/ffi/java/elgamal_impl.hpp | 147 - .../dexon-foundation/mcl/ffi/java/elgamal_wrap.cxx | 1129 - .../dexon-foundation/mcl/ffi/java/java.md | 95 - .../dexon-foundation/mcl/ffi/java/make_wrap.bat | 23 - .../dexon-foundation/mcl/ffi/java/run-bn256.bat | 9 - .../dexon-foundation/mcl/ffi/java/run-elgamal.bat | 9 - .../mcl/ffi/java/set-java-path.bat | 8 - .../mcl/ffi/js/export-functions.py | 73 - .../dexon-foundation/mcl/ffi/js/pre-mcl.js | 5 - .../dexon-foundation/mcl/ffi/python/pairing.py | 80 - .../dexon-foundation/mcl/ffi/python/she.py | 298 - .../dexon-foundation/mcl/include/cybozu/array.hpp | 197 - .../dexon-foundation/mcl/include/cybozu/atoi.hpp | 239 - .../mcl/include/cybozu/benchmark.hpp | 212 - .../mcl/include/cybozu/bit_operation.hpp | 139 - .../mcl/include/cybozu/critical_section.hpp | 60 - .../dexon-foundation/mcl/include/cybozu/crypto.hpp | 321 - .../dexon-foundation/mcl/include/cybozu/endian.hpp | 224 - .../mcl/include/cybozu/exception.hpp | 252 - .../dexon-foundation/mcl/include/cybozu/hash.hpp | 67 - .../mcl/include/cybozu/inttype.hpp | 163 - .../dexon-foundation/mcl/include/cybozu/itoa.hpp | 337 - .../mcl/include/cybozu/link_libeay32.hpp | 21 - .../mcl/include/cybozu/link_mpir.hpp | 18 - .../mcl/include/cybozu/link_ssleay32.hpp | 19 - .../dexon-foundation/mcl/include/cybozu/mutex.hpp | 141 - .../dexon-foundation/mcl/include/cybozu/option.hpp | 723 - .../mcl/include/cybozu/random_generator.hpp | 153 - .../mcl/include/cybozu/serializer.hpp | 363 - .../dexon-foundation/mcl/include/cybozu/sha2.hpp | 467 - .../dexon-foundation/mcl/include/cybozu/stream.hpp | 267 - .../dexon-foundation/mcl/include/cybozu/test.hpp | 373 - .../mcl/include/cybozu/unordered_map.hpp | 13 - .../mcl/include/cybozu/xorshift.hpp | 189 - .../mcl/include/mcl/aggregate_sig.hpp | 265 - .../dexon-foundation/mcl/include/mcl/ahe.hpp | 76 - .../dexon-foundation/mcl/include/mcl/array.hpp | 167 - .../dexon-foundation/mcl/include/mcl/bls12_381.hpp | 15 - .../dexon-foundation/mcl/include/mcl/bn.h | 428 - .../dexon-foundation/mcl/include/mcl/bn.hpp | 2261 - .../dexon-foundation/mcl/include/mcl/bn256.hpp | 15 - .../dexon-foundation/mcl/include/mcl/bn384.hpp | 15 - .../dexon-foundation/mcl/include/mcl/bn512.hpp | 14 - .../mcl/include/mcl/conversion.hpp | 495 - .../dexon-foundation/mcl/include/mcl/curve_type.h | 35 - .../dexon-foundation/mcl/include/mcl/ec.hpp | 1045 - .../dexon-foundation/mcl/include/mcl/ecdsa.h | 105 - .../dexon-foundation/mcl/include/mcl/ecdsa.hpp | 257 - .../dexon-foundation/mcl/include/mcl/ecparam.hpp | 191 - .../dexon-foundation/mcl/include/mcl/elgamal.hpp | 612 - .../dexon-foundation/mcl/include/mcl/fp.hpp | 661 - .../dexon-foundation/mcl/include/mcl/fp_tower.hpp | 1364 - .../dexon-foundation/mcl/include/mcl/gmp_util.hpp | 954 - .../mcl/include/mcl/impl/bn_c_impl.hpp | 643 - .../dexon-foundation/mcl/include/mcl/lagrange.hpp | 107 - .../dexon-foundation/mcl/include/mcl/op.hpp | 389 - .../dexon-foundation/mcl/include/mcl/operator.hpp | 177 - .../dexon-foundation/mcl/include/mcl/paillier.hpp | 84 - .../dexon-foundation/mcl/include/mcl/randgen.hpp | 156 - .../dexon-foundation/mcl/include/mcl/she.h | 270 - .../dexon-foundation/mcl/include/mcl/she.hpp | 1939 - .../dexon-foundation/mcl/include/mcl/util.hpp | 285 - .../dexon-foundation/mcl/include/mcl/vint.hpp | 1987 - .../mcl/include/mcl/window_method.hpp | 175 - .../github.com/dexon-foundation/mcl/lib/.emptydir | 0 vendor/github.com/dexon-foundation/mcl/mcl.sln | 57 - .../github.com/dexon-foundation/mcl/misc/bench.txt | 21 - .../dexon-foundation/mcl/misc/karatsuba.cpp | 75 - .../github.com/dexon-foundation/mcl/misc/mul.cpp | 58 - .../dexon-foundation/mcl/misc/precompute.cpp | 30 - .../dexon-foundation/mcl/misc/she/bench.sh | 6 - .../dexon-foundation/mcl/misc/she/bench4.txt | 99 - .../dexon-foundation/mcl/misc/she/bench6.txt | 99 - .../dexon-foundation/mcl/misc/she/bench8.txt | 99 - .../dexon-foundation/mcl/misc/she/nizkp.pdf | Bin 28787 -> 0 bytes .../dexon-foundation/mcl/misc/she/she-api-ja.md | 314 - .../dexon-foundation/mcl/misc/she/she-api.md | 322 - .../dexon-foundation/mcl/misc/she/she.pdf | Bin 25716 -> 0 bytes vendor/github.com/dexon-foundation/mcl/mk.bat | 20 - vendor/github.com/dexon-foundation/mcl/mklib.bat | 39 - .../github.com/dexon-foundation/mcl/obj/.emptydir | 0 vendor/github.com/dexon-foundation/mcl/readme.md | 457 - .../github.com/dexon-foundation/mcl/release.props | 12 - .../dexon-foundation/mcl/sample/bench.cpp | 233 - .../dexon-foundation/mcl/sample/bls_sig.cpp | 70 - .../dexon-foundation/mcl/sample/ecdh.cpp | 64 - .../dexon-foundation/mcl/sample/large.cpp | 125 - .../dexon-foundation/mcl/sample/pairing.cpp | 56 - .../dexon-foundation/mcl/sample/pairing_c.c | 52 - .../dexon-foundation/mcl/sample/random.cpp | 29 - .../dexon-foundation/mcl/sample/rawbench.cpp | 180 - .../mcl/sample/she_make_dlp_table.cpp | 69 - .../dexon-foundation/mcl/sample/she_smpl.cpp | 125 - .../dexon-foundation/mcl/sample/tri-dh.cpp | 97 - .../dexon-foundation/mcl/sample/vote.cpp | 206 - vendor/github.com/dexon-foundation/mcl/setvar.bat | 2 - .../dexon-foundation/mcl/src/asm/aarch64.s | 13197 --- .../github.com/dexon-foundation/mcl/src/asm/arm.s | 84189 ------------------- .../dexon-foundation/mcl/src/asm/low_arm.s | 154 - .../dexon-foundation/mcl/src/asm/low_x86-64.asm | 153 - .../dexon-foundation/mcl/src/asm/low_x86.asm | 0 .../dexon-foundation/mcl/src/asm/x86-64.bmi2.s | 14155 ---- .../dexon-foundation/mcl/src/asm/x86-64.s | 16652 ---- .../dexon-foundation/mcl/src/asm/x86-64mac.bmi2.s | 13830 --- .../dexon-foundation/mcl/src/asm/x86-64mac.s | 16313 ---- .../dexon-foundation/mcl/src/asm/x86.bmi2.s | 71547 ---------------- .../github.com/dexon-foundation/mcl/src/asm/x86.s | 73785 ---------------- .../dexon-foundation/mcl/src/bn_c256.cpp | 6 - .../dexon-foundation/mcl/src/bn_c384.cpp | 7 - .../dexon-foundation/mcl/src/bn_c384_256.cpp | 7 - .../dexon-foundation/mcl/src/bn_c512.cpp | 6 - .../dexon-foundation/mcl/src/bn_c_impl.hpp | 517 - .../dexon-foundation/mcl/src/ecdsa_c.cpp | 110 - vendor/github.com/dexon-foundation/mcl/src/fp.cpp | 646 - .../dexon-foundation/mcl/src/fp_generator.hpp | 3885 - vendor/github.com/dexon-foundation/mcl/src/gen.cpp | 999 - .../dexon-foundation/mcl/src/llvm_gen.hpp | 616 - .../dexon-foundation/mcl/src/low_func.hpp | 706 - .../dexon-foundation/mcl/src/low_func_llvm.hpp | 94 - .../dexon-foundation/mcl/src/proj/mcl.vcxproj | 92 - .../github.com/dexon-foundation/mcl/src/proto.hpp | 81 - .../dexon-foundation/mcl/src/she_c256.cpp | 2 - .../dexon-foundation/mcl/src/she_c384.cpp | 2 - .../dexon-foundation/mcl/src/she_c_impl.hpp | 681 - .../dexon-foundation/mcl/src/xbyak/xbyak.h | 2611 - .../mcl/src/xbyak/xbyak_mnemonic.h | 1972 - .../dexon-foundation/mcl/src/xbyak/xbyak_util.h | 653 - .../mcl/test/aggregate_sig_test.cpp | 74 - .../dexon-foundation/mcl/test/array_test.cpp | 104 - .../dexon-foundation/mcl/test/base_test.cpp | 392 - .../github.com/dexon-foundation/mcl/test/bench.hpp | 192 - .../dexon-foundation/mcl/test/bls12_test.cpp | 720 - .../dexon-foundation/mcl/test/bn384_test.cpp | 83 - .../dexon-foundation/mcl/test/bn512_test.cpp | 68 - .../dexon-foundation/mcl/test/bn_c256_test.cpp | 6 - .../dexon-foundation/mcl/test/bn_c384_256_test.cpp | 7 - .../dexon-foundation/mcl/test/bn_c384_test.cpp | 6 - .../dexon-foundation/mcl/test/bn_c512_test.cpp | 6 - .../dexon-foundation/mcl/test/bn_c_test.hpp | 699 - .../dexon-foundation/mcl/test/bn_test.cpp | 408 - .../dexon-foundation/mcl/test/conversion_test.cpp | 96 - .../dexon-foundation/mcl/test/ec_test.cpp | 573 - .../dexon-foundation/mcl/test/ecdsa_c_test.cpp | 51 - .../dexon-foundation/mcl/test/ecdsa_test.cpp | 69 - .../dexon-foundation/mcl/test/elgamal_test.cpp | 155 - .../mcl/test/fp_generator_test.cpp | 207 - .../dexon-foundation/mcl/test/fp_test.cpp | 1046 - .../dexon-foundation/mcl/test/fp_tower_test.cpp | 477 - .../dexon-foundation/mcl/test/fp_util_test.cpp | 270 - .../dexon-foundation/mcl/test/glv_test.cpp | 209 - .../dexon-foundation/mcl/test/gmp_test.cpp | 70 - .../dexon-foundation/mcl/test/low_test.cpp | 73 - .../github.com/dexon-foundation/mcl/test/mk32.sh | 1 - .../dexon-foundation/mcl/test/modp_test.cpp | 37 - .../dexon-foundation/mcl/test/mont_fp_test.cpp | 332 - .../dexon-foundation/mcl/test/paillier_test.cpp | 24 - .../mcl/test/proj/bn_test/bn_test.vcxproj | 88 - .../mcl/test/proj/ec_test/ec_test.vcxproj | 88 - .../mcl/test/proj/fp_test/fp_test.vcxproj | 88 - .../test/proj/fp_tower_test/fp_tower_test.vcxproj | 88 - .../dexon-foundation/mcl/test/she_c256_test.cpp | 2 - .../dexon-foundation/mcl/test/she_c384_test.cpp | 2 - .../dexon-foundation/mcl/test/she_c_test.hpp | 535 - .../dexon-foundation/mcl/test/she_test.cpp | 756 - .../dexon-foundation/mcl/test/sq_test.cpp | 21 - .../dexon-foundation/mcl/test/vint_test.cpp | 1353 - .../mcl/test/window_method_test.cpp | 70 - .../github.com/ethereum/go-ethereum/common/big.go | 30 + .../ethereum/go-ethereum/common/bytes.go | 138 + .../ethereum/go-ethereum/common/debug.go | 52 + .../ethereum/go-ethereum/common/format.go | 82 + .../ethereum/go-ethereum/common/hexutil/hexutil.go | 240 + .../ethereum/go-ethereum/common/hexutil/json.go | 376 + .../ethereum/go-ethereum/common/math/big.go | 219 + .../ethereum/go-ethereum/common/math/integer.go | 99 + .../github.com/ethereum/go-ethereum/common/path.go | 49 + .../github.com/ethereum/go-ethereum/common/size.go | 56 + .../ethereum/go-ethereum/common/test_utils.go | 53 + .../ethereum/go-ethereum/common/types.go | 369 + .../github.com/ethereum/go-ethereum/rlp/decode.go | 1049 + vendor/github.com/ethereum/go-ethereum/rlp/doc.go | 33 + .../github.com/ethereum/go-ethereum/rlp/encode.go | 651 + vendor/github.com/ethereum/go-ethereum/rlp/raw.go | 156 + .../ethereum/go-ethereum/rlp/typecache.go | 165 + 602 files changed, 380453 insertions(+), 376952 deletions(-) create mode 100644 vendor/github.com/byzantine-lab/bls/.gitignore create mode 100644 vendor/github.com/byzantine-lab/bls/.travis.yml create mode 100644 vendor/github.com/byzantine-lab/bls/CMakeLists.txt create mode 100644 vendor/github.com/byzantine-lab/bls/Makefile create mode 100644 vendor/github.com/byzantine-lab/bls/bin/.emptydir create mode 100644 vendor/github.com/byzantine-lab/bls/bls.sln create mode 100644 vendor/github.com/byzantine-lab/bls/bls_smpl.py create mode 100644 vendor/github.com/byzantine-lab/bls/common.props create mode 100644 vendor/github.com/byzantine-lab/bls/debug.props create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/App.config create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/Properties/AssemblyInfo.cs create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls.cs create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls.csproj create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls.sln create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.cs create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.csproj create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.sln create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls256_test.cs create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/bls_test.cs create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/readme-ja.md create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/cs/readme.md create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/go/bls/bls.go create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/go/bls/callback.go create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/go/bls/dummy.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/ffi/go/bls/mcl.go create mode 100644 vendor/github.com/byzantine-lab/bls/images/bls-go-alpine/Dockerfile create mode 100644 vendor/github.com/byzantine-lab/bls/include/bls/bls.h create mode 100644 vendor/github.com/byzantine-lab/bls/include/bls/bls.hpp create mode 100644 vendor/github.com/byzantine-lab/bls/lib/.emptydir create mode 100644 vendor/github.com/byzantine-lab/bls/mk.bat create mode 100755 vendor/github.com/byzantine-lab/bls/mkdll.bat create mode 100644 vendor/github.com/byzantine-lab/bls/mklib.bat create mode 100644 vendor/github.com/byzantine-lab/bls/obj/.emptydir create mode 100644 vendor/github.com/byzantine-lab/bls/readme.md create mode 100644 vendor/github.com/byzantine-lab/bls/release.props create mode 100644 vendor/github.com/byzantine-lab/bls/sample/bls_smpl.cpp create mode 100755 vendor/github.com/byzantine-lab/bls/setvar.bat create mode 100644 vendor/github.com/byzantine-lab/bls/src/bls_c256.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/src/bls_c384.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/src/bls_c384_256.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/src/bls_c_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/bls/src/proj/bls.vcxproj create mode 100644 vendor/github.com/byzantine-lab/bls/src/qcoeff-bn254.hpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls384_256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls384_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls_c256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls_c384_256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls_c384_test.cpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls_c_test.hpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/bls_test.hpp create mode 100644 vendor/github.com/byzantine-lab/bls/test/proj/bls_test/bls_test.vcxproj create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/LICENSE create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/common/event.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/common/logger.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/common/types.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/common/utils.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go create mode 100644 vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go create mode 100644 vendor/github.com/byzantine-lab/mcl/.gitignore create mode 100644 vendor/github.com/byzantine-lab/mcl/.travis.yml create mode 100644 vendor/github.com/byzantine-lab/mcl/CMakeLists.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/COPYRIGHT create mode 100644 vendor/github.com/byzantine-lab/mcl/Makefile create mode 100644 vendor/github.com/byzantine-lab/mcl/bench.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/common.mk create mode 100644 vendor/github.com/byzantine-lab/mcl/common.props create mode 100644 vendor/github.com/byzantine-lab/mcl/debug.props create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/App.config create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/Properties/AssemblyInfo.cs create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.cs create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.csproj create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.sln create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256_test.cs create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl.go create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl_test.go create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/Bn256Test.java create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/ElgamalTest.java create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/Makefile create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/bn256.i create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_wrap.cxx create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal.i create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_wrap.cxx create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/java.md create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/make_wrap.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/run-bn256.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/run-elgamal.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/java/set-java-path.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/js/export-functions.py create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/js/pre-mcl.js create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/python/pairing.py create mode 100644 vendor/github.com/byzantine-lab/mcl/ffi/python/she.py create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/array.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/atoi.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/benchmark.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/bit_operation.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/critical_section.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/crypto.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/endian.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/exception.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/hash.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/inttype.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/itoa.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/link_libeay32.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/link_mpir.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/link_ssleay32.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/mutex.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/option.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/random_generator.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/serializer.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/sha2.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/stream.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/test.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/unordered_map.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/cybozu/xorshift.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/aggregate_sig.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/ahe.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/array.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bls12_381.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bn.h create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bn.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bn256.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bn384.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/bn512.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/conversion.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/curve_type.h create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/ec.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.h create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/ecparam.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/elgamal.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/fp.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/fp_tower.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/gmp_util.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/impl/bn_c_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/lagrange.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/op.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/operator.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/paillier.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/randgen.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/she.h create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/she.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/util.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/vint.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/include/mcl/window_method.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/lib/.emptydir create mode 100644 vendor/github.com/byzantine-lab/mcl/mcl.sln create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/bench.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/karatsuba.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/mul.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/precompute.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/bench.sh create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/bench4.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/bench6.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/bench8.txt create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/nizkp.pdf create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/she-api-ja.md create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/she-api.md create mode 100644 vendor/github.com/byzantine-lab/mcl/misc/she/she.pdf create mode 100644 vendor/github.com/byzantine-lab/mcl/mk.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/mklib.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/obj/.emptydir create mode 100644 vendor/github.com/byzantine-lab/mcl/readme.md create mode 100644 vendor/github.com/byzantine-lab/mcl/release.props create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/bench.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/bls_sig.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/ecdh.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/large.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/pairing.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/pairing_c.c create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/random.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/rawbench.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/she_make_dlp_table.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/she_smpl.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/tri-dh.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/sample/vote.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/setvar.bat create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/aarch64.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/arm.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/low_arm.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/low_x86-64.asm create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/low_x86.asm create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.bmi2.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.bmi2.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86.bmi2.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/asm/x86.s create mode 100644 vendor/github.com/byzantine-lab/mcl/src/bn_c256.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/bn_c384.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/bn_c384_256.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/bn_c512.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/bn_c_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/ecdsa_c.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/fp.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/fp_generator.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/gen.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/llvm_gen.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/low_func.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/low_func_llvm.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/proj/mcl.vcxproj create mode 100644 vendor/github.com/byzantine-lab/mcl/src/proto.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/she_c256.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/she_c384.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/she_c_impl.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak.h create mode 100644 vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_mnemonic.h create mode 100644 vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_util.h create mode 100644 vendor/github.com/byzantine-lab/mcl/test/aggregate_sig_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/array_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/base_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bench.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bls12_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn384_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn512_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_c256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_c384_256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_c384_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_c512_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_c_test.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/bn_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/conversion_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/ec_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/ecdsa_c_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/ecdsa_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/elgamal_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/fp_generator_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/fp_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/fp_tower_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/fp_util_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/glv_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/gmp_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/low_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/mk32.sh create mode 100644 vendor/github.com/byzantine-lab/mcl/test/modp_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/mont_fp_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/paillier_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/proj/bn_test/bn_test.vcxproj create mode 100644 vendor/github.com/byzantine-lab/mcl/test/proj/ec_test/ec_test.vcxproj create mode 100644 vendor/github.com/byzantine-lab/mcl/test/proj/fp_test/fp_test.vcxproj create mode 100644 vendor/github.com/byzantine-lab/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj create mode 100644 vendor/github.com/byzantine-lab/mcl/test/she_c256_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/she_c384_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/she_c_test.hpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/she_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/sq_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/vint_test.cpp create mode 100644 vendor/github.com/byzantine-lab/mcl/test/window_method_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/.gitignore delete mode 100644 vendor/github.com/dexon-foundation/bls/.travis.yml delete mode 100644 vendor/github.com/dexon-foundation/bls/CMakeLists.txt delete mode 100644 vendor/github.com/dexon-foundation/bls/Makefile delete mode 100644 vendor/github.com/dexon-foundation/bls/bin/.emptydir delete mode 100644 vendor/github.com/dexon-foundation/bls/bls.sln delete mode 100644 vendor/github.com/dexon-foundation/bls/bls_smpl.py delete mode 100644 vendor/github.com/dexon-foundation/bls/common.props delete mode 100644 vendor/github.com/dexon-foundation/bls/debug.props delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/App.config delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/Properties/AssemblyInfo.cs delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls.cs delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls.csproj delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls.sln delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.cs delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.csproj delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.sln delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls256_test.cs delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/bls_test.cs delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/readme-ja.md delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/cs/readme.md delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls_test.go delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/callback.go delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/config.h delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/dummy.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go delete mode 100644 vendor/github.com/dexon-foundation/bls/images/bls-go-alpine/Dockerfile delete mode 100644 vendor/github.com/dexon-foundation/bls/include/bls/bls.h delete mode 100644 vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp delete mode 100644 vendor/github.com/dexon-foundation/bls/lib/.emptydir delete mode 100644 vendor/github.com/dexon-foundation/bls/mk.bat delete mode 100755 vendor/github.com/dexon-foundation/bls/mkdll.bat delete mode 100644 vendor/github.com/dexon-foundation/bls/mklib.bat delete mode 100644 vendor/github.com/dexon-foundation/bls/obj/.emptydir delete mode 100644 vendor/github.com/dexon-foundation/bls/readme.md delete mode 100644 vendor/github.com/dexon-foundation/bls/release.props delete mode 100644 vendor/github.com/dexon-foundation/bls/sample/bls_smpl.cpp delete mode 100755 vendor/github.com/dexon-foundation/bls/setvar.bat delete mode 100644 vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/src/bls_c384_256.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls384_256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls384_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls_c256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls_c384_256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls_c384_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls_c_test.hpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/bls_test.hpp delete mode 100644 vendor/github.com/dexon-foundation/bls/test/proj/bls_test/bls_test.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/LICENSE delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/common/event.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/common/types.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/common/utils.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/constant.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/constant.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/utils.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa/ecdsa.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/interfaces.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/utils.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/watch-cat.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/ticker.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/block.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/dkg/dkg.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/message.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/node.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/position.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/penalty-helper.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/signer.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go delete mode 100644 vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go delete mode 100644 vendor/github.com/dexon-foundation/mcl/.gitignore delete mode 100644 vendor/github.com/dexon-foundation/mcl/.travis.yml delete mode 100644 vendor/github.com/dexon-foundation/mcl/CMakeLists.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/COPYRIGHT delete mode 100644 vendor/github.com/dexon-foundation/mcl/Makefile delete mode 100644 vendor/github.com/dexon-foundation/mcl/bench.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/common.mk delete mode 100644 vendor/github.com/dexon-foundation/mcl/common.props delete mode 100644 vendor/github.com/dexon-foundation/mcl/debug.props delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/App.config delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/Properties/AssemblyInfo.cs delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.cs delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.csproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.sln delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256_test.cs delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl.go delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl_test.go delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/Bn256Test.java delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/ElgamalTest.java delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/Makefile delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/bn256.i delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_wrap.cxx delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal.i delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_wrap.cxx delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/java.md delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/make_wrap.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/run-bn256.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/run-elgamal.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/java/set-java-path.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/js/export-functions.py delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/js/pre-mcl.js delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/python/pairing.py delete mode 100644 vendor/github.com/dexon-foundation/mcl/ffi/python/she.py delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/impl/bn_c_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/she.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/lib/.emptydir delete mode 100644 vendor/github.com/dexon-foundation/mcl/mcl.sln delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/bench.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/karatsuba.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/mul.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/precompute.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/bench.sh delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/bench4.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/bench6.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/bench8.txt delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/nizkp.pdf delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/she-api-ja.md delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/she-api.md delete mode 100644 vendor/github.com/dexon-foundation/mcl/misc/she/she.pdf delete mode 100644 vendor/github.com/dexon-foundation/mcl/mk.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/mklib.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/obj/.emptydir delete mode 100644 vendor/github.com/dexon-foundation/mcl/readme.md delete mode 100644 vendor/github.com/dexon-foundation/mcl/release.props delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/bench.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/bls_sig.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/ecdh.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/large.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/pairing.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/pairing_c.c delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/random.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/rawbench.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/she_make_dlp_table.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/she_smpl.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/tri-dh.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/sample/vote.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/setvar.bat delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/arm.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/low_arm.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/low_x86-64.asm delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/low_x86.asm delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.bmi2.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.bmi2.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86.bmi2.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/asm/x86.s delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/bn_c256.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/bn_c384.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/bn_c384_256.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/bn_c512.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/bn_c_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/ecdsa_c.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/fp.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/fp_generator.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/gen.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/llvm_gen.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/low_func.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/low_func_llvm.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/proj/mcl.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/proto.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/she_c256.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/she_c384.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/she_c_impl.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_mnemonic.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_util.h delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/aggregate_sig_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/array_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/base_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bench.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bls12_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn384_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn512_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_c256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_c384_256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_c384_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_c512_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_c_test.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/bn_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/conversion_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/ec_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/ecdsa_c_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/ecdsa_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/elgamal_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/fp_generator_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/fp_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/fp_tower_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/fp_util_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/glv_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/gmp_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/low_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/mk32.sh delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/modp_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/mont_fp_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/paillier_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/proj/bn_test/bn_test.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/proj/ec_test/ec_test.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/proj/fp_test/fp_test.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/she_c256_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/she_c384_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/she_c_test.hpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/she_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/sq_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/vint_test.cpp delete mode 100644 vendor/github.com/dexon-foundation/mcl/test/window_method_test.cpp create mode 100644 vendor/github.com/ethereum/go-ethereum/common/big.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/bytes.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/debug.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/format.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/hexutil/hexutil.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/hexutil/json.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/math/big.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/math/integer.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/path.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/size.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/test_utils.go create mode 100644 vendor/github.com/ethereum/go-ethereum/common/types.go create mode 100644 vendor/github.com/ethereum/go-ethereum/rlp/decode.go create mode 100644 vendor/github.com/ethereum/go-ethereum/rlp/doc.go create mode 100644 vendor/github.com/ethereum/go-ethereum/rlp/encode.go create mode 100644 vendor/github.com/ethereum/go-ethereum/rlp/raw.go create mode 100644 vendor/github.com/ethereum/go-ethereum/rlp/typecache.go (limited to 'vendor/github.com') diff --git a/vendor/github.com/byzantine-lab/bls/.gitignore b/vendor/github.com/byzantine-lab/bls/.gitignore new file mode 100644 index 000000000..dacdfc906 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/.gitignore @@ -0,0 +1,7 @@ +CVS +bin/*.exe +lib/*.a +lib/*.so +lib/*.dylib +obj/*.d +obj/*.o diff --git a/vendor/github.com/byzantine-lab/bls/.travis.yml b/vendor/github.com/byzantine-lab/bls/.travis.yml new file mode 100644 index 000000000..71a667a2e --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/.travis.yml @@ -0,0 +1,39 @@ +sudo: true +dist: xenial +services: +- docker +env: + global: + - IMAGE_TAG=dexonfoundation/bls-go-alpine + - DOCKER_USER=spiderpowadeploy + - secure: mqNCngWukyjE3UARxaPjqS0xgC1dsnWfmPhpH2mq7nR6S2cGfJ3xBfyiTS//Clz//7sAL+Tp62r3fxyMjDogrSHZUUssCAwf17RM6vnALqaVbc3wXcTNudiDB5cVKe9C9gZqn1Ivd+qbmtuCezSrOG5Xih1gh4bPTyiFvU1sp9C2icMHkJZkjsP0QqCbHlQrMeECSIPlEGIOXUUSp+WmrZAdi2rHezKeZxuaT73RX1+N/+1RfWXo2MR4ydQU3eALl5s5UA9JweQO+MYIVr8EEpGNqJRYUyURx/5G/Sy2v6Z3imUvXZv1J5aplW/UDls92Olla1JHuvFW6ptRO+PHITNwvEkhxPFj+HcOpqEuSISsdk9rkHUrM0wEYPv6A4vQPUjMHrLQs2tQShVCelM1HtNvDDjttKMmVyRLusFP9eS7uvmmXu2l6efJjsMSFkY5WKbu2U0MQ1j708KH9k2WunU6sjJ+b74PkkZVtkQMIqgTokC0IOqbbrnwh4I9PpVpHAQrewRimMH+lDHk+HlMUCWk7/IcIFUl+mh6RzW2vkZTTr2ctSBI6QzK5smdPmqQpp2lqkGv/hQCBp5ICzFSkU6Djqe3hG8ta3+/Zhi10fPU2HcHDi+gR79CG8dvy+iOeTS2csXZx+YoN2BVkfu9AnrjZ9Kjkf9BMay4CehBUWE= +language: cpp +compiler: +- gcc +- clang +addons: + apt: + packages: + - libgmp-dev +install: +- git clone --depth 1 https://github.com/dexon-foundation/mcl.git $TRAVIS_BUILD_DIR/../mcl +script: +- make -j3 +- make test_ci DISABLE_THREAD_TEST=1 +- make test_go +- env LD_LIBRARY_PATH=../mcl/lib bin/bls_c384_test.exe +- make clean && make -C ../mcl clean +- make -j3 MCL_USE_OPENSSL=0 +- make test_ci DISABLE_THREAD_TEST=1 MCL_USE_OPENSSL=0 +- docker build --tag "$IMAGE_TAG" . -f images/bls-go-alpine/Dockerfile --no-cache +before_deploy: +- echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin +- git_commit="$(git rev-parse --short HEAD)" +- docker tag "$IMAGE_TAG" "${IMAGE_TAG}:${git_commit}" +- docker tag "$IMAGE_TAG" "${IMAGE_TAG}:latest" +deploy: + provider: script + script: docker push "${IMAGE_TAG}:latest" && docker push "${IMAGE_TAG}:${git_commit}" + on: + branch: dev + condition: "$CC = gcc" diff --git a/vendor/github.com/byzantine-lab/bls/CMakeLists.txt b/vendor/github.com/byzantine-lab/bls/CMakeLists.txt new file mode 100644 index 000000000..30fb90fd5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/CMakeLists.txt @@ -0,0 +1,33 @@ +cmake_minimum_required (VERSION 2.6) +project(bls CXX ASM) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +set(LIBS mcl gmp) + +include_directories(include/) + +add_library(bls_c256 SHARED src/bls_c256.cpp) +add_library(bls_c384 SHARED src/bls_c384.cpp) +add_library(bls_c384_256 SHARED src/bls_c384_256.cpp) +target_link_libraries(bls_c256 ${LIBS}) +target_link_libraries(bls_c384 ${LIBS}) +target_link_libraries(bls_c384_256 ${LIBS}) + +file(GLOB BLS_HEADERS include/bls/bls.h include/bls/bls.hpp) + +install(TARGETS bls_c256 DESTINATION lib) +install(TARGETS bls_c384 DESTINATION lib) +install(TARGETS bls_c384_256 DESTINATION lib) +install(FILES ${BLS_HEADERS} DESTINATION include/bls) + +set(TEST_LIBS pthread gmpxx) + +add_executable(bls_c256_test test/bls_c256_test.cpp) +target_link_libraries(bls_c256_test bls_c256 ${TEST_LIBS}) +add_executable(bls_c384_test test/bls_c384_test.cpp) +target_link_libraries(bls_c384_test bls_c384 ${TEST_LIBS}) +add_executable(bls_c384_256_test test/bls_c384_256_test.cpp) +target_link_libraries(bls_c384_256_test bls_c384_256 ${TEST_LIBS}) diff --git a/vendor/github.com/byzantine-lab/bls/Makefile b/vendor/github.com/byzantine-lab/bls/Makefile new file mode 100644 index 000000000..efea22274 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/Makefile @@ -0,0 +1,164 @@ +ifeq ($(findstring MINGW64,$(shell uname -s)),MINGW64) + # cgo accepts not '/c/path' but 'c:/path' + PWD=$(shell pwd|sed s'@^/\([a-z]\)@\1:@') +else + PWD=$(shell pwd) +endif +MCL_DIR?=$(PWD)/../mcl +include $(MCL_DIR)/common.mk +LIB_DIR=lib +OBJ_DIR=obj +EXE_DIR=bin +CFLAGS += -std=c++11 +LDFLAGS += -lpthread + +SRC_SRC=bls_c256.cpp bls_c384.cpp bls_c384_256.cpp +TEST_SRC=bls256_test.cpp bls384_test.cpp bls384_256_test.cpp bls_c256_test.cpp bls_c384_test.cpp bls_c384_256_test.cpp +SAMPLE_SRC=bls256_smpl.cpp bls384_smpl.cpp + +CFLAGS+=-I$(MCL_DIR)/include +ifneq ($(MCL_MAX_BIT_SIZE),) + CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) +endif +ifeq ($(DISABLE_THREAD_TEST),1) + CFLAGS+=-DDISABLE_THREAD_TEST +endif +ifeq ($(BLS_SWAP_G),1) + CFLAGS+=-DBLS_SWAP_G +endif + +BLS256_LIB=$(LIB_DIR)/libbls256.a +BLS384_LIB=$(LIB_DIR)/libbls384.a +BLS384_256_LIB=$(LIB_DIR)/libbls384_256.a +BLS256_SNAME=bls256 +BLS384_SNAME=bls384 +BLS384_256_SNAME=bls384_256 +BLS256_SLIB=$(LIB_DIR)/lib$(BLS256_SNAME).$(LIB_SUF) +BLS384_SLIB=$(LIB_DIR)/lib$(BLS384_SNAME).$(LIB_SUF) +BLS384_256_SLIB=$(LIB_DIR)/lib$(BLS384_256_SNAME).$(LIB_SUF) +all: $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) $(BLS384_256_LIB) $(BLS384_256_SLIB) + +MCL_LIB=$(MCL_DIR)/lib/libmcl.a + +$(MCL_LIB): + $(MAKE) -C $(MCL_DIR) + +$(BLS256_LIB): $(OBJ_DIR)/bls_c256.o $(MCL_LIB) + $(AR) $@ $< +$(BLS384_LIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) + $(AR) $@ $< +$(BLS384_256_LIB): $(OBJ_DIR)/bls_c384_256.o $(MCL_LIB) + $(AR) $@ $< + +ifneq ($(findstring $(OS),mac/mingw64),) + COMMON_LIB=$(GMP_LIB) $(OPENSSL_LIB) -lstdc++ + BLS256_SLIB_LDFLAGS+=$(COMMON_LIB) + BLS384_SLIB_LDFLAGS+=$(COMMON_LIB) + BLS384_256_SLIB_LDFLAGS+=$(COMMON_LIB) +endif +ifeq ($(OS),mingw64) + CFLAGS+=-I$(MCL_DIR) + BLS256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS256_SNAME).a + BLS384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS384_SNAME).a + BLS384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS384_256_SNAME).a +endif +$(BLS256_SLIB): $(OBJ_DIR)/bls_c256.o $(MCL_LIB) + $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS256_SLIB_LDFLAGS) $(LDFLAGS) +$(BLS384_SLIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) + $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS384_SLIB_LDFLAGS) $(LDFLAGS) +$(BLS384_256_SLIB): $(OBJ_DIR)/bls_c384_256.o $(MCL_LIB) + $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS384_256_SLIB_LDFLAGS) $(LDFLAGS) + +VPATH=test sample src + +.SUFFIXES: .cpp .d .exe + +$(OBJ_DIR)/%.o: %.cpp + $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(EXE_DIR)/%384_256_test.exe: $(OBJ_DIR)/%384_256_test.o $(BLS384_256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS384_256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) + +$(EXE_DIR)/%384_test.exe: $(OBJ_DIR)/%384_test.o $(BLS384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS384_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) + +$(EXE_DIR)/%256_test.exe: $(OBJ_DIR)/%256_test.o $(BLS256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) + +# sample exe links libbls256.a +$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(BLS256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) +ifeq ($(OS),mac) + install_name_tool bin/bls_smpl.exe -change lib/libmcl.dylib $(MCL_DIR)/lib/libmcl.dylib +endif + +SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(SAMPLE_SRC:.cpp=.exe)) +sample: $(SAMPLE_EXE) + +TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) +ifeq ($(OS),mac) + LIBPATH_KEY=DYLD_LIBRARY_PATH +else + LIBPATH_KEY=LD_LIBRARY_PATH +endif +test_ci: $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do echo $$i; env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib LSAN_OPTIONS=verbosity=1 log_threads=1 $$i; done' + $(MAKE) sample_test + +test: $(TEST_EXE) + @echo test $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib $$i|grep "ctest:name"; done' > result.txt + @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi + $(MAKE) sample_test + +sample_test: $(EXE_DIR)/bls_smpl.exe + env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib python bls_smpl.py + +# PATH is for mingw, LD_LIBRARY_PATH is for linux, DYLD_LIBRARY_PATH is for mac +COMMON_LIB_PATH="../../../lib:../../../../mcl/lib" +PATH_VAL=$$PATH:$(COMMON_LIB_PATH) LD_LIBRARY_PATH=$(COMMON_LIB_PATH) DYLD_LIBRARY_PATH=$(COMMON_LIB_PATH) +test_go256: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS256_LIB) + cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn256 . +test_go384: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS384_LIB) + cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn384 . +test_go384_256: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS384_256_LIB) + cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn384_256 . + +test_go: + $(MAKE) test_go256 + $(MAKE) test_go384 + $(MAKE) test_go384_256 + +EMCC_OPT=-I./include -I./src -I../mcl/include -I./ -Wall -Wextra +EMCC_OPT+=-O3 -DNDEBUG +EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 +EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION +EMCC_OPT+=-s ABORTING_MALLOC=0 +EMCC_OPT+=-DMCLBN_FP_UNIT_SIZE=6 +JS_DEP=src/bls_c384.cpp ../mcl/src/fp.cpp Makefile + +../bls-wasm/bls_c.js: $(JS_DEP) + emcc -o $@ src/bls_c384.cpp ../mcl/src/fp.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -fno-exceptions -MD -MP -MF obj/bls_c384.d + +bls-wasm: + $(MAKE) ../bls-wasm/bls_c.js + +clean: + $(RM) $(OBJ_DIR)/*.d $(OBJ_DIR)/*.o $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_SRC) $(ASM_OBJ) $(LLVM_SRC) $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) $(BLS384_256_LIB) $(BLS384_256_SLIB) + +ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) +DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.d)) +-include $(DEPEND_FILE) + +PREFIX?=/usr/local +install: lib/libbls256.a lib/libbls256.$(LIB_SUF) lib/libbls384.a lib/libbls384.$(LIB_SUF) lib/libbls384_256.a lib/libbls384_256.$(LIB_SUF) + $(MKDIR) $(PREFIX)/include/bls + cp -a include/bls/ $(PREFIX)/include/ + $(MKDIR) $(PREFIX)/lib + cp -a lib/libbls256.a lib/libbls256.$(LIB_SUF) lib/libbls384.a lib/libbls384.$(LIB_SUF) lib/libbls384_256.a lib/libbls384_256.$(LIB_SUF) $(PREFIX)/lib/ + +.PHONY: test bls-wasm + +# don't remove these files automatically +.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) + diff --git a/vendor/github.com/byzantine-lab/bls/bin/.emptydir b/vendor/github.com/byzantine-lab/bls/bin/.emptydir new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/bls/bls.sln b/vendor/github.com/byzantine-lab/bls/bls.sln new file mode 100644 index 000000000..4889ec601 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/bls.sln @@ -0,0 +1,30 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 2013 +VisualStudioVersion = 12.0.40629.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls_test", "test\proj\bls_test\bls_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls", "src\proj\bls.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/byzantine-lab/bls/bls_smpl.py b/vendor/github.com/byzantine-lab/bls/bls_smpl.py new file mode 100644 index 000000000..f834d80aa --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/bls_smpl.py @@ -0,0 +1,40 @@ +import os, sys, subprocess + +EXE='bin/bls_smpl.exe' + +def init(): + subprocess.check_call([EXE, "init"]) + +def sign(m, i=0): + subprocess.check_call([EXE, "sign", "-m", m, "-id", str(i)]) + +def verify(m, i=0): + subprocess.check_call([EXE, "verify", "-m", m, "-id", str(i)]) + +def share(n, k): + subprocess.check_call([EXE, "share", "-n", str(n), "-k", str(k)]) + +def recover(ids): + cmd = [EXE, "recover", "-ids"] + for i in ids: + cmd.append(str(i)) + subprocess.check_call(cmd) + +def main(): + m = "hello bls threshold signature" + n = 10 + ids = [1, 5, 3, 7] + k = len(ids) + init() + sign(m) + verify(m) + share(n, k) + for i in ids: + sign(m, i) + verify(m, i) + subprocess.check_call(["rm", "sample/sign.txt"]) + recover(ids) + verify(m) + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/byzantine-lab/bls/common.props b/vendor/github.com/byzantine-lab/bls/common.props new file mode 100644 index 000000000..d6fdbb902 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/common.props @@ -0,0 +1,26 @@ + + + + + + $(SolutionDir)bin\ + + + + $(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../mcl/include + + + + + Level4 + MultiThreaded + + + _MBCS;%(PreprocessorDefinitions);NOMINMAX;BLS_MAX_OP_UNIT_SIZE=6 + + + $(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)../mcl/lib;$(SolutionDir)lib + + + + diff --git a/vendor/github.com/byzantine-lab/bls/debug.props b/vendor/github.com/byzantine-lab/bls/debug.props new file mode 100644 index 000000000..1553ae0dc --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/debug.props @@ -0,0 +1,14 @@ + + + + + + $(ProjectName)d + + + + MultiThreadedDebug + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/App.config b/vendor/github.com/byzantine-lab/bls/ffi/cs/App.config new file mode 100644 index 000000000..8d234373a --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/Properties/AssemblyInfo.cs b/vendor/github.com/byzantine-lab/bls/ffi/cs/Properties/AssemblyInfo.cs new file mode 100644 index 000000000..201222c55 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// アセンブリã«é–¢ã™ã‚‹ä¸€èˆ¬æƒ…å ±ã¯ä»¥ä¸‹ã®å±žæ€§ã‚»ãƒƒãƒˆã‚’ã¨ãŠã—ã¦åˆ¶å¾¡ã•ã‚Œã¾ã™ã€‚ +// アセンブリã«é–¢é€£ä»˜ã‘られã¦ã„る情報を変更ã™ã‚‹ã«ã¯ã€ +// ã“れらã®å±žæ€§å€¤ã‚’変更ã—ã¦ãã ã•ã„。 +[assembly: AssemblyTitle("bls256")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("bls256")] +[assembly: AssemblyCopyright("Copyright © 2017")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// ComVisible ã‚’ false ã«è¨­å®šã™ã‚‹ã¨ã€ãã®åž‹ã¯ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内㧠COM コンãƒãƒ¼ãƒãƒ³ãƒˆã‹ã‚‰ +// å‚ç…§ä¸å¯èƒ½ã«ãªã‚Šã¾ã™ã€‚COM ã‹ã‚‰ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内ã®åž‹ã«ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹å ´åˆã¯ã€ +// ãã®åž‹ã® ComVisible 属性を true ã«è¨­å®šã—ã¦ãã ã•ã„。 +[assembly: ComVisible(false)] + +// ã“ã®ãƒ—ロジェクト㌠COM ã«å…¬é–‹ã•ã‚Œã‚‹å ´åˆã€æ¬¡ã® GUID ㌠typelib ã® ID ã«ãªã‚Šã¾ã™ +[assembly: Guid("e9d06b1b-ea22-4ef4-ba4b-422f7625966c")] + +// アセンブリã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³æƒ…å ±ã¯æ¬¡ã® 4 ã¤ã®å€¤ã§æ§‹æˆã•ã‚Œã¦ã„ã¾ã™: +// +// メジャー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ +// マイナー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ +// ãƒ“ãƒ«ãƒ‰ç•ªå· +// Revision +// +// ã™ã¹ã¦ã®å€¤ã‚’指定ã™ã‚‹ã‹ã€ä¸‹ã®ã‚ˆã†ã« '*' を使ã£ã¦ãƒ“ルドãŠã‚ˆã³ãƒªãƒ“ジョン番å·ã‚’ +// 既定値ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.cs b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.cs new file mode 100644 index 000000000..6bcaf07fb --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.cs @@ -0,0 +1,351 @@ +þ½Ž¿using System; +using System.Text; +using System.Runtime.InteropServices; + +namespace mcl +{ + class BLS + { + public const int BN254 = 0; + public const int BLS12_381 = 5; + + const int IoEcComp = 512; // fixed byte representation + public const int FR_UNIT_SIZE = 4; + public const int FP_UNIT_SIZE = 6; // 4 if bls256.dll is used + public const int COMPILED_TIME_VAR = FR_UNIT_SIZE * 10 + FP_UNIT_SIZE; + + public const int ID_UNIT_SIZE = FR_UNIT_SIZE; + public const int SECRETKEY_UNIT_SIZE = FR_UNIT_SIZE; + public const int PUBLICKEY_UNIT_SIZE = FP_UNIT_SIZE * 3 * 2; + public const int SIGNATURE_UNIT_SIZE = FP_UNIT_SIZE * 3; + + public const int ID_SERIALIZE_SIZE = FR_UNIT_SIZE * 8; + public const int SECRETKEY_SERIALIZE_SIZE = FR_UNIT_SIZE * 8; + public const int PUBLICKEY_SERIALIZE_SIZE = FP_UNIT_SIZE * 8 * 2; + public const int SIGNATURE_SERIALIZE_SIZE = FP_UNIT_SIZE * 8; + + public const string dllName = FP_UNIT_SIZE == 4 ? "bls256.dll" : "bls384_256.dll"; + [DllImport(dllName)] + public static extern int blsInit(int curveType, int compiledTimeVar); + + [DllImport(dllName)] public static extern void blsIdSetInt(ref Id id, int x); + [DllImport(dllName)] public static extern int blsIdSetDecStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport(dllName)] public static extern int blsIdSetHexStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsIdGetDecStr([Out]StringBuilder buf, ulong maxBufSize, in Id id); + [DllImport(dllName)] public static extern ulong blsIdGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in Id id); + + [DllImport(dllName)] public static extern ulong blsIdSerialize([Out]byte[] buf, ulong maxBufSize, in Id id); + [DllImport(dllName)] public static extern ulong blsSecretKeySerialize([Out]byte[] buf, ulong maxBufSize, in SecretKey sec); + [DllImport(dllName)] public static extern ulong blsPublicKeySerialize([Out]byte[] buf, ulong maxBufSize, in PublicKey pub); + [DllImport(dllName)] public static extern ulong blsSignatureSerialize([Out]byte[] buf, ulong maxBufSize, in Signature sig); + [DllImport(dllName)] public static extern ulong blsIdDeserialize(ref Id id, [In]byte[] buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsSecretKeyDeserialize(ref SecretKey sec, [In]byte[] buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsPublicKeyDeserialize(ref PublicKey pub, [In]byte[] buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsSignatureDeserialize(ref Signature sig, [In]byte[] buf, ulong bufSize); + + [DllImport(dllName)] public static extern int blsIdIsEqual(in Id lhs, in Id rhs); + [DllImport(dllName)] public static extern int blsSecretKeyIsEqual(in SecretKey lhs, in SecretKey rhs); + [DllImport(dllName)] public static extern int blsPublicKeyIsEqual(in PublicKey lhs, in PublicKey rhs); + [DllImport(dllName)] public static extern int blsSignatureIsEqual(in Signature lhs, in Signature rhs); + // add + [DllImport(dllName)] public static extern void blsSecretKeyAdd(ref SecretKey sec, in SecretKey rhs); + [DllImport(dllName)] public static extern void blsPublicKeyAdd(ref PublicKey pub, in PublicKey rhs); + [DllImport(dllName)] public static extern void blsSignatureAdd(ref Signature sig, in Signature rhs); + // hash buf and set + [DllImport(dllName)] public static extern int blsHashToSecretKey(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success else -1 + */ + [DllImport(dllName)] public static extern int blsSecretKeySetByCSPRNG(ref SecretKey sec); + + [DllImport(dllName)] public static extern void blsGetPublicKey(ref PublicKey pub, in SecretKey sec); + [DllImport(dllName)] public static extern void blsGetPop(ref Signature sig, in SecretKey sec); + + // return 0 if success + [DllImport(dllName)] public static extern int blsSecretKeyShare(ref SecretKey sec, in SecretKey msk, ulong k, in Id id); + [DllImport(dllName)] public static extern int blsPublicKeyShare(ref PublicKey pub, in PublicKey mpk, ulong k, in Id id); + + + [DllImport(dllName)] public static extern int blsSecretKeyRecover(ref SecretKey sec, in SecretKey secVec, in Id idVec, ulong n); + [DllImport(dllName)] public static extern int blsPublicKeyRecover(ref PublicKey pub, in PublicKey pubVec, in Id idVec, ulong n); + [DllImport(dllName)] public static extern int blsSignatureRecover(ref Signature sig, in Signature sigVec, in Id idVec, ulong n); + + [DllImport(dllName)] public static extern void blsSign(ref Signature sig, in SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); + + // return 1 if valid + [DllImport(dllName)] public static extern int blsVerify(in Signature sig, in PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); + [DllImport(dllName)] public static extern int blsVerifyPop(in Signature sig, in PublicKey pub); + + ////////////////////////////////////////////////////////////////////////// + // the following apis will be removed + + // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r + [DllImport(dllName)] public static extern int blsIdSetLittleEndian(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + return written byte size if success else 0 + */ + [DllImport(dllName)] public static extern ulong blsIdGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, in Id id); + + // return 0 if success + // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r + [DllImport(dllName)] public static extern int blsSecretKeySetLittleEndian(ref SecretKey sec, [In]byte[] buf, ulong bufSize); + [DllImport(dllName)] public static extern int blsSecretKeySetDecStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport(dllName)] public static extern int blsSecretKeySetHexStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + return written byte size if success else 0 + */ + [DllImport(dllName)] public static extern ulong blsSecretKeyGetLittleEndian([Out]byte[] buf, ulong maxBufSize, in SecretKey sec); + /* + return strlen(buf) if success else 0 + buf is '\0' terminated + */ + [DllImport(dllName)] public static extern ulong blsSecretKeyGetDecStr([Out]StringBuilder buf, ulong maxBufSize, in SecretKey sec); + [DllImport(dllName)] public static extern ulong blsSecretKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in SecretKey sec); + [DllImport(dllName)] public static extern int blsPublicKeySetHexStr(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsPublicKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in PublicKey pub); + [DllImport(dllName)] public static extern int blsSignatureSetHexStr(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport(dllName)] public static extern ulong blsSignatureGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in Signature sig); + + public static void Init(int curveType = BN254) { + if (!System.Environment.Is64BitProcess) { + throw new PlatformNotSupportedException("not 64-bit system"); + } + int err = blsInit(curveType, COMPILED_TIME_VAR); + if (err != 0) { + throw new ArgumentException("blsInit"); + } + } + [StructLayout(LayoutKind.Sequential)] + public unsafe struct Id + { + private fixed ulong v[ID_UNIT_SIZE]; + public byte[] Serialize() { + byte[] buf = new byte[ID_SERIALIZE_SIZE]; + ulong n = blsIdSerialize(buf, (ulong)buf.Length, this); + if (n == 0) { + throw new ArithmeticException("blsIdSerialize"); + } + return buf; + } + public void Deserialize(byte[] buf) { + ulong n = blsIdDeserialize(ref this, buf, (ulong)buf.Length); + if (n == 0) { + throw new ArithmeticException("blsIdDeserialize"); + } + } + public bool IsEqual(in Id rhs) { + return blsIdIsEqual(this, rhs) != 0; + } + public void SetDecStr(string s) { + if (blsIdSetDecStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsIdSetDecSt:" + s); + } + } + public void SetHexStr(string s) { + if (blsIdSetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsIdSetHexStr:" + s); + } + } + public void SetInt(int x) { + blsIdSetInt(ref this, x); + } + public string GetDecStr() { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsIdGetDecStr(sb, (ulong)sb.Capacity, this); + if (size == 0) { + throw new ArgumentException("blsIdGetDecStr"); + } + return sb.ToString(0, (int)size); + } + public string GetHexStr() { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsIdGetHexStr(sb, (ulong)sb.Capacity, this); + if (size == 0) { + throw new ArgumentException("blsIdGetHexStr"); + } + return sb.ToString(0, (int)size); + } + } + [StructLayout(LayoutKind.Sequential)] + public unsafe struct SecretKey + { + private fixed ulong v[SECRETKEY_UNIT_SIZE]; + public byte[] Serialize() { + byte[] buf = new byte[SECRETKEY_SERIALIZE_SIZE]; + ulong n = blsSecretKeySerialize(buf, (ulong)buf.Length, this); + if (n == 0) { + throw new ArithmeticException("blsSecretKeySerialize"); + } + return buf; + } + public void Deserialize(byte[] buf) { + ulong n = blsSecretKeyDeserialize(ref this, buf, (ulong)buf.Length); + if (n == 0) { + throw new ArithmeticException("blsSecretKeyDeserialize"); + } + } + public bool IsEqual(in SecretKey rhs) { + return blsSecretKeyIsEqual(this, rhs) != 0; + } + public void SetHexStr(string s) { + if (blsSecretKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsSecretKeySetHexStr:" + s); + } + } + public string GetHexStr() { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsSecretKeyGetHexStr(sb, (ulong)sb.Capacity, this); + if (size == 0) { + throw new ArgumentException("mclBnFr_getStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(in SecretKey rhs) { + blsSecretKeyAdd(ref this, rhs); + } + public void SetByCSPRNG() { + blsSecretKeySetByCSPRNG(ref this); + } + public void SetHashOf(string s) { + if (blsHashToSecretKey(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsHashToSecretKey"); + } + } + public PublicKey GetPublicKey() { + PublicKey pub; + blsGetPublicKey(ref pub, this); + return pub; + } + public Signature Sign(string m) { + Signature sig; + blsSign(ref sig, this, m, (ulong)m.Length); + return sig; + } + public Signature GetPop() { + Signature sig; + blsGetPop(ref sig, this); + return sig; + } + } + // secretKey = sum_{i=0}^{msk.Length - 1} msk[i] * id^i + public static SecretKey ShareSecretKey(in SecretKey[] msk, in Id id) { + SecretKey sec; + if (blsSecretKeyShare(ref sec, msk[0], (ulong)msk.Length, id) != 0) { + throw new ArgumentException("GetSecretKeyForId:" + id.ToString()); + } + return sec; + } + public static SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec) { + SecretKey sec; + if (blsSecretKeyRecover(ref sec, secVec[0], idVec[0], (ulong)secVec.Length) != 0) { + throw new ArgumentException("Recover"); + } + return sec; + } + [StructLayout(LayoutKind.Sequential)] + public unsafe struct PublicKey + { + private fixed ulong v[PUBLICKEY_UNIT_SIZE]; + public byte[] Serialize() { + byte[] buf = new byte[PUBLICKEY_SERIALIZE_SIZE]; + ulong n = blsPublicKeySerialize(buf, (ulong)buf.Length, this); + if (n == 0) { + throw new ArithmeticException("blsPublicKeySerialize"); + } + return buf; + } + public void Deserialize(byte[] buf) { + ulong n = blsPublicKeyDeserialize(ref this, buf, (ulong)buf.Length); + if (n == 0) { + throw new ArithmeticException("blsPublicKeyDeserialize"); + } + } + public bool IsEqual(in PublicKey rhs) { + return blsPublicKeyIsEqual(this, rhs) != 0; + } + public void SetStr(string s) { + if (blsPublicKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsPublicKeySetStr:" + s); + } + } + public string GetHexStr() { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsPublicKeyGetHexStr(sb, (ulong)sb.Capacity, this); + if (size == 0) { + throw new ArgumentException("blsPublicKeyGetStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(in PublicKey rhs) { + blsPublicKeyAdd(ref this, rhs); + } + public bool Verify(in Signature sig, string m) { + return blsVerify(sig, this, m, (ulong)m.Length) == 1; + } + public bool VerifyPop(in Signature pop) { + return blsVerifyPop(pop, this) == 1; + } + } + // publicKey = sum_{i=0}^{mpk.Length - 1} mpk[i] * id^i + public static PublicKey SharePublicKey(in PublicKey[] mpk, in Id id) { + PublicKey pub; + if (blsPublicKeyShare(ref pub, mpk[0], (ulong)mpk.Length, id) != 0) { + throw new ArgumentException("GetPublicKeyForId:" + id.ToString()); + } + return pub; + } + public static PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec) { + PublicKey pub; + if (blsPublicKeyRecover(ref pub, pubVec[0], idVec[0], (ulong)pubVec.Length) != 0) { + throw new ArgumentException("Recover"); + } + return pub; + } + [StructLayout(LayoutKind.Sequential)] + public unsafe struct Signature + { + private fixed ulong v[SIGNATURE_UNIT_SIZE]; + public byte[] Serialize() { + byte[] buf = new byte[SIGNATURE_SERIALIZE_SIZE]; + ulong n = blsSignatureSerialize(buf, (ulong)buf.Length, this); + if (n == 0) { + throw new ArithmeticException("blsSignatureSerialize"); + } + return buf; + } + public void Deserialize(byte[] buf) { + ulong n = blsSignatureDeserialize(ref this, buf, (ulong)buf.Length); + if (n == 0) { + throw new ArithmeticException("blsSignatureDeserialize"); + } + } + public bool IsEqual(in Signature rhs) { + return blsSignatureIsEqual(this, rhs) != 0; + } + public void SetStr(string s) { + if (blsSignatureSetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsSignatureSetStr:" + s); + } + } + public string GetHexStr() { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsSignatureGetHexStr(sb, (ulong)sb.Capacity, this); + if (size == 0) { + throw new ArgumentException("blsSignatureGetStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(in Signature rhs) { + blsSignatureAdd(ref this, rhs); + } + } + public static Signature RecoverSign(in Signature[] sigVec, in Id[] idVec) { + Signature sig; + if (blsSignatureRecover(ref sig, sigVec[0], idVec[0], (ulong)sigVec.Length) != 0) { + throw new ArgumentException("Recover"); + } + return sig; + } + } +} diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.csproj b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.csproj new file mode 100644 index 000000000..c03afa436 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.csproj @@ -0,0 +1,97 @@ + + + + + Debug + AnyCPU + {E9D06B1B-EA22-4EF4-BA4B-422F7625966D} + Exe + Properties + bls + bls + v4.6.2 + 512 + true + publish\ + true + Disk + false + Foreground + 7 + Days + false + false + true + 0 + 1.0.0.%2a + false + false + true + + + + true + ..\..\bin\ + DEBUG;TRACE + true + full + x64 + prompt + MinimumRecommendedRules.ruleset + 7.2 + false + + + ..\..\bin\ + TRACE + true + pdbonly + x64 + prompt + MinimumRecommendedRules.ruleset + false + true + 7.2 + + + true + + + + + + + + + + + + + + + + + + + + + + False + Microsoft .NET Framework 4.5.2 %28x86 ãŠã‚ˆã³ x64%29 + true + + + False + .NET Framework 3.5 SP1 + false + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.sln b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.sln new file mode 100644 index 000000000..7c3dfba7b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls.sln @@ -0,0 +1,25 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.28307.539 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "bls", "bls.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966D}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Debug|x64.ActiveCfg = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Debug|x64.Build.0 = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Release|x64.ActiveCfg = Release|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {1935C301-6478-4F82-9587-6A66B531E327} + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.cs b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.cs new file mode 100644 index 000000000..3ef5fab9a --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.cs @@ -0,0 +1,298 @@ +þ½Ž¿using System; +using System.Text; +using System.Runtime.InteropServices; + +namespace mcl { + class BLS256 { + const int IoEcComp = 512; // fixed byte representation + public const int MCLBN_FR_UNIT_SIZE = 4; + public const int MCLBN_FP_UNIT_SIZE = 4; + public const int MCLBN_COMPILED_TIME_VAR = MCLBN_FR_UNIT_SIZE * 10 + MCLBN_FP_UNIT_SIZE; + [DllImport("bls256.dll")] + public static extern int blsInit(int curve, int compiledTimeVar); + + [DllImport("bls256.dll")] public static extern void blsIdSetInt(ref Id id, int x); + [DllImport("bls256.dll")] public static extern int blsIdSetDecStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsIdSetHexStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern ulong blsIdGetDecStr([Out]StringBuilder buf, ulong maxBufSize, ref Id id); + [DllImport("bls256.dll")] public static extern ulong blsIdGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref Id id); + + + [DllImport("bls256.dll")] public static extern ulong blsIdSerialize([Out]StringBuilder buf, ulong maxBufSize, ref Id id); + [DllImport("bls256.dll")] public static extern ulong blsSecretKeySerialize([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); + [DllImport("bls256.dll")] public static extern ulong blsPublicKeySerialize([Out]StringBuilder buf, ulong maxBufSize, ref PublicKey pub); + [DllImport("bls256.dll")] public static extern ulong blsSignatureSerialize([Out]StringBuilder buf, ulong maxBufSize, ref Signature sig); + + [DllImport("bls256.dll")] public static extern int blsIdDeserialize(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsSecretKeyDeserialize(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsPublicKeyDeserialize(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsSignatureDeserialize(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + + [DllImport("bls256.dll")] public static extern int blsIdIsEqual(ref Id lhs, ref Id rhs); + [DllImport("bls256.dll")] public static extern int blsSecretKeyIsEqual(ref SecretKey lhs, ref SecretKey rhs); + [DllImport("bls256.dll")] public static extern int blsPublicKeyIsEqual(ref PublicKey lhs, ref PublicKey rhs); + [DllImport("bls256.dll")] public static extern int blsSignatureIsEqual(ref Signature lhs, ref Signature rhs); + + // add + [DllImport("bls256.dll")] public static extern void blsSecretKeyAdd(ref SecretKey sec, ref SecretKey rhs); + [DllImport("bls256.dll")] public static extern void blsPublicKeyAdd(ref PublicKey pub, ref PublicKey rhs); + [DllImport("bls256.dll")] public static extern void blsSignatureAdd(ref Signature sig, ref Signature rhs); + + // hash buf and set + [DllImport("bls256.dll")] public static extern int blsHashToSecretKey(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success else -1 + */ + [DllImport("bls256.dll")] public static extern int blsSecretKeySetByCSPRNG(ref SecretKey sec); + + [DllImport("bls256.dll")] public static extern void blsGetPublicKey(ref PublicKey pub, ref SecretKey sec); + [DllImport("bls256.dll")] public static extern void blsGetPop(ref Signature sig, ref SecretKey sec); + + // return 0 if success + [DllImport("bls256.dll")] public static extern int blsSecretKeyShare(ref SecretKey sec, ref SecretKey msk, ulong k, ref Id id); + [DllImport("bls256.dll")] public static extern int blsPublicKeyShare(ref PublicKey pub, ref PublicKey mpk, ulong k, ref Id id); + + + [DllImport("bls256.dll")] public static extern int blsSecretKeyRecover(ref SecretKey sec, ref SecretKey secVec, ref Id idVec, ulong n); + [DllImport("bls256.dll")] public static extern int blsPublicKeyRecover(ref PublicKey pub, ref PublicKey pubVec, ref Id idVec, ulong n); + [DllImport("bls256.dll")] public static extern int blsSignatureRecover(ref Signature sig, ref Signature sigVec, ref Id idVec, ulong n); + + [DllImport("bls256.dll")] public static extern void blsSign(ref Signature sig, ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); + + // return 1 if valid + [DllImport("bls256.dll")] public static extern int blsVerify(ref Signature sig, ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); + [DllImport("bls256.dll")] public static extern int blsVerifyPop(ref Signature sig, ref PublicKey pub); + + ////////////////////////////////////////////////////////////////////////// + // the following apis will be removed + + // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r + [DllImport("bls256.dll")] public static extern int blsIdSetLittleEndian(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + return written byte size if success else 0 + */ + [DllImport("bls256.dll")] public static extern ulong blsIdGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, ref Id id); + + // return 0 if success + // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r + [DllImport("bls256.dll")] public static extern int blsSecretKeySetLittleEndian(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsSecretKeySetDecStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern int blsSecretKeySetHexStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + /* + return written byte size if success else 0 + */ + [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); + /* + return strlen(buf) if success else 0 + buf is '\0' terminated + */ + [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetDecStr([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); + [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); + [DllImport("bls256.dll")] public static extern int blsPublicKeySetHexStr(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern ulong blsPublicKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref PublicKey pub); + [DllImport("bls256.dll")] public static extern int blsSignatureSetHexStr(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); + [DllImport("bls256.dll")] public static extern ulong blsSignatureGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref Signature sig); + + public static void Init() + { + const int CurveFp254BNb = 0; + if (!System.Environment.Is64BitProcess) { + throw new PlatformNotSupportedException("not 64-bit system"); + } + int err = blsInit(CurveFp254BNb, MCLBN_COMPILED_TIME_VAR); + if (err != 0) { + throw new ArgumentException("blsInit"); + } + } + + public struct Id { + private ulong v0, v1, v2, v3; + public bool IsEqual(Id rhs) + { + return blsIdIsEqual(ref this, ref rhs) != 0; + } + public void SetDecStr(String s) + { + if (blsIdSetDecStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsIdSetDecSt:" + s); + } + } + public void SetHexStr(String s) + { + if (blsIdSetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsIdSetHexStr:" + s); + } + } + public void SetInt(int x) + { + blsIdSetInt(ref this, x); + } + public string GetDecStr() + { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsIdGetDecStr(sb, (ulong)sb.Capacity, ref this); + if (size == 0) { + throw new ArgumentException("blsIdGetDecStr"); + } + return sb.ToString(0, (int)size); + } + public string GetHexStr() + { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsIdGetHexStr(sb, (ulong)sb.Capacity, ref this); + if (size == 0) { + throw new ArgumentException("blsIdGetHexStr"); + } + return sb.ToString(0, (int)size); + } + } + public struct SecretKey { + private ulong v0, v1, v2, v3; + public bool IsEqual(SecretKey rhs) + { + return blsSecretKeyIsEqual(ref this, ref rhs) != 0; + } + public void SetHexStr(String s) + { + if (blsSecretKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsSecretKeySetHexStr:" + s); + } + } + public string GetHexStr() + { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsSecretKeyGetHexStr(sb, (ulong)sb.Capacity, ref this); + if (size == 0) { + throw new ArgumentException("mclBnFr_getStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(SecretKey rhs) + { + blsSecretKeyAdd(ref this, ref rhs); + } + public void SetByCSPRNG() + { + blsSecretKeySetByCSPRNG(ref this); + } + public void SetHashOf(string s) + { + if (blsHashToSecretKey(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsHashToSecretKey"); + } + } + public PublicKey GetPublicKey() + { + PublicKey pub = new PublicKey(); + blsGetPublicKey(ref pub, ref this); + return pub; + } + public Signature Signature(String m) + { + Signature Signature = new Signature(); + blsSign(ref Signature, ref this, m, (ulong)m.Length); + return Signature; + } + } + // secretKey = sum_{i=0}^{msk.Length - 1} msk[i] * id^i + public static SecretKey ShareSecretKey(SecretKey[] msk, Id id) + { + SecretKey sec = new SecretKey(); + if (blsSecretKeyShare(ref sec, ref msk[0], (ulong)msk.Length, ref id) != 0) { + throw new ArgumentException("GetSecretKeyForId:" + id.ToString()); + } + return sec; + } + public static SecretKey RecoverSecretKey(SecretKey[] secs, Id[] ids) + { + SecretKey sec = new SecretKey(); + if (blsSecretKeyRecover(ref sec, ref secs[0], ref ids[0], (ulong)secs.Length) != 0) { + throw new ArgumentException("Recover"); + } + return sec; + } + public struct PublicKey { + private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; + private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; + public bool IsEqual(PublicKey rhs) + { + return blsPublicKeyIsEqual(ref this, ref rhs) != 0; + } + public void SetStr(String s) + { + if (blsPublicKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsPublicKeySetStr:" + s); + } + } + public string GetHexStr() + { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsPublicKeyGetHexStr(sb, (ulong)sb.Capacity, ref this); + if (size == 0) { + throw new ArgumentException("blsPublicKeyGetStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(PublicKey rhs) + { + blsPublicKeyAdd(ref this, ref rhs); + } + public bool Verify(Signature Signature, string m) + { + return blsVerify(ref Signature, ref this, m, (ulong)m.Length) == 1; + } + } + // publicKey = sum_{i=0}^{mpk.Length - 1} mpk[i] * id^i + public static PublicKey SharePublicKey(PublicKey[] mpk, Id id) + { + PublicKey pub = new PublicKey(); + if (blsPublicKeyShare(ref pub, ref mpk[0], (ulong)mpk.Length, ref id) != 0) { + throw new ArgumentException("GetPublicKeyForId:" + id.ToString()); + } + return pub; + } + public static PublicKey RecoverPublicKey(PublicKey[] pubs, Id[] ids) + { + PublicKey pub = new PublicKey(); + if (blsPublicKeyRecover(ref pub, ref pubs[0], ref ids[0], (ulong)pubs.Length) != 0) { + throw new ArgumentException("Recover"); + } + return pub; + } + public struct Signature { + private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; + public bool IsEqual(Signature rhs) + { + return blsSignatureIsEqual(ref this, ref rhs) != 0; + } + public void SetStr(String s) + { + if (blsSignatureSetHexStr(ref this, s, (ulong)s.Length) != 0) { + throw new ArgumentException("blsSignatureSetStr:" + s); + } + } + public string GetHexStr() + { + StringBuilder sb = new StringBuilder(1024); + ulong size = blsSignatureGetHexStr(sb, (ulong)sb.Capacity, ref this); + if (size == 0) { + throw new ArgumentException("blsSignatureGetStr"); + } + return sb.ToString(0, (int)size); + } + public void Add(Signature rhs) + { + blsSignatureAdd(ref this, ref rhs); + } + } + public static Signature RecoverSign(Signature[] signs, Id[] ids) + { + Signature Signature = new Signature(); + if (blsSignatureRecover(ref Signature, ref signs[0], ref ids[0], (ulong)signs.Length) != 0) { + throw new ArgumentException("Recover"); + } + return Signature; + } + } +} diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.csproj b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.csproj new file mode 100644 index 000000000..032a1d347 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.csproj @@ -0,0 +1,62 @@ + + + + + Debug + AnyCPU + {E9D06B1B-EA22-4EF4-BA4B-422F7625966C} + Exe + Properties + bls256 + bls256 + v4.5.2 + 512 + true + + + true + ..\..\bin\ + DEBUG;TRACE + false + full + x64 + prompt + MinimumRecommendedRules.ruleset + + + ..\..\bin\ + TRACE + true + pdbonly + x64 + prompt + MinimumRecommendedRules.ruleset + true + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.sln b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.sln new file mode 100644 index 000000000..eb29af97b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256.sln @@ -0,0 +1,22 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBD}") = "bls256", "bls256.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966C}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Debug|x64.ActiveCfg = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Debug|x64.Build.0 = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Release|x64.ActiveCfg = Release|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256_test.cs b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256_test.cs new file mode 100644 index 000000000..989993e0f --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls256_test.cs @@ -0,0 +1,126 @@ +using System; + +namespace mcl { + using static BLS256; + class BLS256Test { + static int err = 0; + static void assert(string msg, bool b) + { + if (b) return; + Console.WriteLine("ERR {0}", msg); + err++; + } + static void TestId() + { + Console.WriteLine("TestId"); + Id id = new Id(); + id.SetDecStr("255"); + assert("GetStr(10)", id.GetDecStr() == "255"); + assert("GetStr(16)", id.GetHexStr() == "ff"); + } + static void TestSecretKey() + { + Console.WriteLine("TestSecretKey"); + SecretKey sec = new SecretKey(); + sec.SetHexStr("ff"); + assert("GetHexStr()", sec.GetHexStr() == "ff"); + { + SecretKey sec2 = new SecretKey(); + sec.SetHexStr("321"); + sec2.SetHexStr("4000"); + sec.Add(sec2); + assert("sec.Add", sec.GetHexStr() == "4321"); + sec.SetByCSPRNG(); + Console.WriteLine("sec.Init={0}", sec.GetHexStr()); + } + } + static void TestPublicKey() + { + Console.WriteLine("TestPublicKey"); + SecretKey sec = new SecretKey(); + sec.SetByCSPRNG(); + PublicKey pub = sec.GetPublicKey(); + String s = pub.GetHexStr(); + Console.WriteLine("pub={0}", s); + PublicKey pub2 = new PublicKey(); + pub2.SetStr(s); + assert("pub.SetStr", pub.IsEqual(pub2)); + } + static void TestSign() + { + Console.WriteLine("TestSign"); + SecretKey sec = new SecretKey(); + sec.SetByCSPRNG(); + PublicKey pub = sec.GetPublicKey(); + String m = "abc"; + Signature sig = sec.Signature(m); + assert("verify", pub.Verify(sig, m)); + assert("not verify", !pub.Verify(sig, m + "a")); + } + static void TestSharing() + { + Console.WriteLine("TestSharing"); + int k = 5; + SecretKey[] msk = new SecretKey[k]; + PublicKey[] mpk = new PublicKey[k]; + // make master secretkey + for (int i = 0; i < k; i++) { + msk[i].SetByCSPRNG(); + mpk[i] = msk[i].GetPublicKey(); + } + int n = 30; + Id[] ids = new Id[n]; + SecretKey[] secs = new SecretKey[n]; + PublicKey[] pubs = new PublicKey[n]; + for (int i = 0; i < n; i++) { + ids[i].SetInt(i * i + 123); + secs[i] = ShareSecretKey(msk, ids[i]); + pubs[i] = SharePublicKey(mpk, ids[i]); + assert("share publicKey", secs[i].GetPublicKey().IsEqual(pubs[i])); + } + string m = "doremi"; + for (int i = 0; i < n; i++) { + Signature Signature = secs[i].Signature(m); + assert("Signature.Verify", pubs[i].Verify(Signature, m)); + } + { + int[] idxTbl = { 0, 2, 5, 8, 10 }; + assert("idxTbl.Length=k", idxTbl.Length == k); + Id[] subIds = new Id[k]; + SecretKey[] subSecs = new SecretKey[k]; + PublicKey[] subPubs = new PublicKey[k]; + Signature[] subSigns = new Signature[k]; + for (int i = 0; i < k; i++) { + int idx = idxTbl[i]; + subIds[i] = ids[idx]; + subSecs[i] = secs[idx]; + subPubs[i] = pubs[idx]; + subSigns[i] = secs[idx].Signature(m); + } + SecretKey sec = RecoverSecretKey(subSecs, subIds); + PublicKey pub = RecoverPublicKey(subPubs, subIds); + assert("check pub", pub.IsEqual(sec.GetPublicKey())); + Signature Signature = RecoverSign(subSigns, subIds); + assert("Signature.verify", pub.Verify(Signature, m)); + } + } + static void Main(string[] args) + { + try { + Init(); + TestId(); + TestSecretKey(); + TestPublicKey(); + TestSign(); + TestSharing(); + if (err == 0) { + Console.WriteLine("all tests succeed"); + } else { + Console.WriteLine("err={0}", err); + } + } catch (Exception e) { + Console.WriteLine("ERR={0}", e); + } + } + } +} diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/bls_test.cs b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls_test.cs new file mode 100644 index 000000000..2eb451ba9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/bls_test.cs @@ -0,0 +1,176 @@ +using System; + +namespace mcl +{ + using static BLS; + class BLSTest + { + static int err = 0; + static void assert(string msg, bool b) { + if (b) return; + Console.WriteLine("ERR {0}", msg); + err++; + } + static void TestId() { + Console.WriteLine("TestId"); + Id id1; + id1.SetDecStr("255"); + assert("GetStr(10)", id1.GetDecStr() == "255"); + assert("GetStr(16)", id1.GetHexStr() == "ff"); + Id id2; + id2.SetInt(255); + assert("IsEqual", id1.IsEqual(id2)); + } + static void TestSecretKey() { + Console.WriteLine("TestSecretKey"); + SecretKey sec; + sec.SetHexStr("ff"); + assert("GetHexStr()", sec.GetHexStr() == "ff"); + { + SecretKey sec2; + sec.SetHexStr("321"); + sec2.SetHexStr("4000"); + sec.Add(sec2); + assert("sec.Add", sec.GetHexStr() == "4321"); + sec.SetByCSPRNG(); + Console.WriteLine("sec.Init={0}", sec.GetHexStr()); + } + { + SecretKey sec2; + byte[] buf = sec.Serialize(); + sec2.Deserialize(buf); + assert("serialize", sec2.IsEqual(sec)); + } + } + static void TestPublicKey() { + Console.WriteLine("TestPublicKey"); + SecretKey sec; + sec.SetByCSPRNG(); + PublicKey pub = sec.GetPublicKey(); + string s = pub.GetHexStr(); + Console.WriteLine("pub={0}", s); + { + PublicKey pub2; + pub2.SetStr(s); + assert("pub.SetStr", pub.IsEqual(pub2)); + } + { + PublicKey pub2; + byte[] buf = pub.Serialize(); + pub2.Deserialize(buf); + assert("serialize", pub2.IsEqual(pub)); + } + } + static void TestSign() { + Console.WriteLine("TestSign"); + SecretKey sec; + sec.SetByCSPRNG(); + PublicKey pub = sec.GetPublicKey(); + string m = "abc"; + Signature sig = sec.Sign(m); + Console.WriteLine("sig={0}", sig.GetHexStr()); + assert("verify", pub.Verify(sig, m)); + assert("not verify", !pub.Verify(sig, m + "a")); + { + Signature sig2; + byte[] buf = sig.Serialize(); + sig2.Deserialize(buf); + assert("serialize", sig2.IsEqual(sig)); + } + } + static void TestSharing() { + Console.WriteLine("TestSharing"); + int k = 5; + SecretKey[] msk = new SecretKey[k]; + PublicKey[] mpk = new PublicKey[k]; + // make master secretkey + for (int i = 0; i < k; i++) { + msk[i].SetByCSPRNG(); + mpk[i] = msk[i].GetPublicKey(); + } + int n = 30; + Id[] ids = new Id[n]; + SecretKey[] secs = new SecretKey[n]; + PublicKey[] pubs = new PublicKey[n]; + for (int i = 0; i < n; i++) { + ids[i].SetInt(i * i + 123); + secs[i] = ShareSecretKey(msk, ids[i]); + pubs[i] = SharePublicKey(mpk, ids[i]); + assert("share publicKey", secs[i].GetPublicKey().IsEqual(pubs[i])); + } + string m = "doremi"; + for (int i = 0; i < n; i++) { + Signature Signature = secs[i].Sign(m); + assert("Signature.Verify", pubs[i].Verify(Signature, m)); + } + { + int[] idxTbl = { 0, 2, 5, 8, 10 }; + assert("idxTbl.Length=k", idxTbl.Length == k); + Id[] subIds = new Id[k]; + SecretKey[] subSecs = new SecretKey[k]; + PublicKey[] subPubs = new PublicKey[k]; + Signature[] subSigns = new Signature[k]; + for (int i = 0; i < k; i++) { + int idx = idxTbl[i]; + subIds[i] = ids[idx]; + subSecs[i] = secs[idx]; + subPubs[i] = pubs[idx]; + subSigns[i] = secs[idx].Sign(m); + } + SecretKey sec = RecoverSecretKey(subSecs, subIds); + PublicKey pub = RecoverPublicKey(subPubs, subIds); + assert("check pub", pub.IsEqual(sec.GetPublicKey())); + Signature Signature = RecoverSign(subSigns, subIds); + assert("Signature.verify", pub.Verify(Signature, m)); + } + } + static void TestAggregate() { + Console.WriteLine("TestAggregate"); + const int n = 10; + const string m = "abc"; + SecretKey[] secVec = new SecretKey[n]; + PublicKey[] pubVec = new PublicKey[n]; + Signature[] popVec = new Signature[n]; + Signature[] sigVec = new Signature[n]; + for (int i = 0; i < n; i++) { + secVec[i].SetByCSPRNG(); + pubVec[i] = secVec[i].GetPublicKey(); + popVec[i] = secVec[i].GetPop(); + sigVec[i] = secVec[i].Sign(m); + } + SecretKey secAgg; + PublicKey pubAgg; + Signature sigAgg; + for (int i = 0; i < n; i++) { + secAgg.Add(secVec[i]); + assert("verify pop", pubVec[i].VerifyPop(popVec[i])); + pubAgg.Add(pubVec[i]); + sigAgg.Add(sigVec[i]); + } + assert("aggregate sec", secAgg.Sign(m).IsEqual(sigAgg)); + assert("aggregate", pubAgg.Verify(sigAgg, m)); + } + static void Main(string[] args) { + try { + int[] curveTypeTbl = { BN254, BLS12_381 }; + foreach (int curveType in curveTypeTbl) { + Console.WriteLine("curveType={0}", curveType); + Init(curveType); + TestId(); + TestSecretKey(); + TestPublicKey(); + TestSign(); + TestSharing(); + TestAggregate(); + if (err == 0) { + Console.WriteLine("all tests succeed"); + } else { + Console.WriteLine("err={0}", err); + } + } + } catch (Exception e) { + Console.WriteLine("ERR={0}", e); + } + } + } +} diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/readme-ja.md b/vendor/github.com/byzantine-lab/bls/ffi/cs/readme-ja.md new file mode 100644 index 000000000..199135725 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/readme-ja.md @@ -0,0 +1,188 @@ +# BLSç½²åã®C#ãƒã‚¤ãƒ³ãƒ‡ã‚£ãƒ³ã‚° + +# å¿…è¦ç’°å¢ƒ + +* Visual Studio 2017(x64) or later +* C# 7.2 or later +* .NET Framework 4.5.2 or later + +# DLLã®ãƒ“ルド方法 + +Visual Studio 2017ã®64bit用コマンドプロンプトを開ã„㦠+``` +md work +cd work +git clone https://github.com/herumi/cybozulib_ext +git clone https://github.com/herumi/mcl +git clone https://github.com/herumi/bls +cd bls +mklib dll +``` +`bls/bin/*.dll`ãŒä½œæˆã•ã‚Œã‚‹ã€‚ + +# サンプルã®ãƒ“ルド方法 + +bls/ffi/cs/bls.slnã‚’é–‹ã„ã¦å®Ÿè¡Œã™ã‚‹ã€‚ + +* æ³¨æ„ bls256.slnã¯å¤ã„ãŸã‚使ã‚ãªã„ã§ãã ã•ã„。 + +# クラスã¨API + +## API + +* `Init(int curveType = BN254);` + * ライブラリを曲線curveTypeã§åˆæœŸåŒ–ã™ã‚‹ã€‚ + * curveType = BN254 or BLS12_381 +* `SecretKey ShareSecretKey(in SecretKey[] msk, in Id id);` + * マスター秘密éµã®åˆ—mskã«å¯¾ã™ã‚‹idã®ç§˜å¯†éµã‚’生æˆ(共有)ã™ã‚‹ã€‚ +* `SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec);` + * 秘密éµsecVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰ç§˜å¯†éµã‚’復元ã™ã‚‹ã€‚ +* `PublicKey SharePublicKey(in PublicKey[] mpk, in Id id);` + * マスター公開éµã®åˆ—mpkã«å¯¾ã™ã‚‹idã®å…¬é–‹éµã‚’生æˆ(共有)ã™ã‚‹ã€‚ +* `PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec);` + * 公開éµpubVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰å…¬é–‹éµã‚’復元ã™ã‚‹ã€‚ +* `Signature RecoverSign(in Signature[] sigVec, in Id[] idVec);` + * ç½²åsigVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰ç½²åを復元ã™ã‚‹ã€‚ + +## Id + +識別å­ã‚¯ãƒ©ã‚¹ + +* `byte[] Serialize();` + * Idをシリアライズã™ã‚‹ã€‚ +* `void Deserialize(byte[] buf);` + * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰Idをデシリアライズã™ã‚‹ã€‚ +* `bool IsEqual(in Id rhs);` + * åŒå€¤åˆ¤å®šã€‚ +* `void SetDecStr(string s);` + * 10進数文字列を設定ã™ã‚‹ã€‚ +* `void SetHexStr(string s);` + * 16進数文字列を設定ã™ã‚‹ã€‚ +* `void SetInt(int x);` + * æ•´æ•°xを設定ã™ã‚‹ã€‚ +* `string GetDecStr();` + * 10進数表記をå–å¾—ã™ã‚‹ã€‚ +* `string GetHexStr();` + * 16進数表記をå–å¾—ã™ã‚‹ã€‚ + +## SecretKey + +* `byte[] Serialize();` + * Idをシリアライズã™ã‚‹ã€‚ +* `void Deserialize(byte[] buf);` + * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰SecretKeyをデシリアライズã™ã‚‹ã€‚ +* `bool IsEqual(in SecretKey rhs);` + * åŒå€¤åˆ¤å®šã€‚ +* `void SetHexStr(string s);` + * 16進数文字列を設定ã™ã‚‹ã€‚ +* `string GetHexStr();` + * 16進数表記をå–å¾—ã™ã‚‹ã€‚ +* `void Add(in SecretKey rhs);` + * 秘密éµrhsを加算ã™ã‚‹ã€‚ +* `void SetByCSPRNG();` + * æš—å·å­¦çš„乱数ã§è¨­å®šã™ã‚‹ã€‚ +* `void SetHashOf(string s);` + * 文字列sã®ãƒãƒƒã‚·ãƒ¥å€¤ã‚’設定ã™ã‚‹ã€‚ +* `PublicKey GetPublicKey();` + * 対応ã™ã‚‹å…¬é–‹éµã‚’å–å¾—ã™ã‚‹ã€‚ +* `Signature Sign(string m);` + * 文字列mã®ç½²åを生æˆã™ã‚‹ã€‚ +* `Signature GetPop();` + * 自身ã®ç§˜å¯†éµã«ã‚ˆã‚‹ç½²å(Proof Of Posession)を生æˆã™ã‚‹ã€‚ + +## PublicKey + +* `byte[] Serialize();` + * PublicKeyをシリアライズã™ã‚‹ã€‚ +* `void Deserialize(byte[] buf);` + * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰PublicKeyをデシリアライズã™ã‚‹ã€‚ +* `bool IsEqual(in PublicKey rhs);` + * åŒå€¤åˆ¤å®šã€‚ +* `void Add(in PublicKey rhs);` + * 公開éµrhsを加算ã™ã‚‹ã€‚ +* `void SetHexStr(string s);` + * 16進数文字列を設定ã™ã‚‹ã€‚ +* `string GetHexStr();` + * 16進数表記をå–å¾—ã™ã‚‹ã€‚ +* `bool Verify(in Signature sig, string m);` + * 文字列mã«å¯¾ã™ã‚‹ç½²åsigã®æ­£å½“性を確èªã™ã‚‹ã€‚ +* `bool VerifyPop(in Signature pop);` + * PoPã®æ­£å½“性を確èªã™ã‚‹ã€‚ + +## Signature + +* `byte[] Serialize();` + * Signatureをシリアライズã™ã‚‹ã€‚ +* `void Deserialize(byte[] buf);` + * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰Signatureをデシリアライズã™ã‚‹ã€‚ +* `bool IsEqual(in Signature rhs);` + * åŒå€¤åˆ¤å®šã€‚ +* `void Add(in Signature rhs);` + * ç½²århsを加算ã™ã‚‹ã€‚ +* `void SetHexStr(string s);` + * 16進数文字列を設定ã™ã‚‹ã€‚ +* `string GetHexStr();` + * 16進数表記をå–å¾—ã™ã‚‹ã€‚ + +## 使ã„æ–¹ + +### 最å°ã‚µãƒ³ãƒ—ル + +``` +using static BLS; + +Init(BN254); // ライブラリåˆæœŸåŒ– +SecretKey sec; +sec.SetByCSPRNG(); // 秘密éµã®åˆæœŸåŒ– +PublicKey pub = sec.GetPublicKey(); // 公開éµã®å–å¾— +string m = "abc"; +Signature sig = sec.Sign(m); // ç½²åã®ä½œæˆ +if (pub.Verify(sig, m))) { + // ç½²åã®ç¢ºèª +} +``` + +### 集約署å +``` +Init(BN254); // ライブラリåˆæœŸåŒ– +const int n = 10; +const string m = "abc"; +SecretKey[] secVec = new SecretKey[n]; +PublicKey[] pubVec = new PublicKey[n]; +Signature[] popVec = new Signature[n]; +Signature[] sigVec = new Signature[n]; + +for (int i = 0; i < n; i++) { + secVec[i].SetByCSPRNG(); // 秘密éµã®åˆæœŸåŒ– + pubVec[i] = secVec[i].GetPublicKey(); // 公開éµã®å–å¾— + popVec[i] = secVec[i].GetPop(); // 所有(PoP)ã®è¨¼æ˜Ž + sigVec[i] = secVec[i].Sign(m); // ç½²å +} + +SecretKey secAgg; +PublicKey pubAgg; +Signature sigAgg; +for (int i = 0; i < n; i++) { + // PoPã®ç¢ºèª + if (pubVec[i].VerifyPop(popVec[i]))) { + // エラー + return; + } + pubAgg.Add(pubVec[i]); // 公開éµã®é›†ç´„ + sigAgg.Add(sigVec[i]); // ç½²åã®é›†ç´„ +} +if (pubAgg.Verify(sigAgg, m)) { + // ç½²åã®ç¢ºèª +} +``` + +# ライセンス + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +# 著者 + +(C)2019 å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) All rights reserved. +本コンテンツã®è‘—作権ã€ãŠã‚ˆã³æœ¬ã‚³ãƒ³ãƒ†ãƒ³ãƒ„中ã«å‡ºã¦ãる商標権ã€å›£ä½“åã€ãƒ­ã‚´ã€è£½å“〠+サービスãªã©ã¯ãã‚Œãžã‚Œã€å„権利ä¿æœ‰è€…ã«å¸°å±žã—ã¾ã™ diff --git a/vendor/github.com/byzantine-lab/bls/ffi/cs/readme.md b/vendor/github.com/byzantine-lab/bls/ffi/cs/readme.md new file mode 100644 index 000000000..2b7191871 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/cs/readme.md @@ -0,0 +1,185 @@ +# C# binding of BLS threshold signature library + +# Installation Requirements + +* Visual Studio 2017 or later +* C# 7.2 or later +* .NET Framework 4.5.2 or later + +# How to build + +``` +md work +cd work +git clone https://github.com/herumi/cybozulib_ext +git clone https://github.com/herumi/mcl +git clone https://github.com/herumi/bls +cd bls +mklib dll +``` +bls/bin/*.dll are created + +# How to build a sample + +Open bls/ffi/cs/bls.sln and exec it. + +* Remark. bls256 is obsolete. Please use bls.sln. + +# class and API + +## API + +* `Init(int curveType = BN254);` + * initialize this library with a curve `curveType`. + * curveType = BN254 or BLS12_381 +* `SecretKey ShareSecretKey(in SecretKey[] msk, in Id id);` + * generate the shared secret key from a sequence of master secret keys msk and Id. +* `SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec);` + * recover the secret key from a sequence of secret keys secVec and idVec. +* `PublicKey SharePublicKey(in PublicKey[] mpk, in Id id);` + * generate the shared public key from a sequence of master public keys mpk and Id. +* `PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec);` + * recover the public key from a sequence of public keys pubVec and idVec. +* `Signature RecoverSign(in Signature[] sigVec, in Id[] idVec);` + * recover the signature from a sequence of signatures siVec and idVec. + +## Id + +Identifier class + +* `byte[] Serialize();` + * serialize Id +* `void Deserialize(byte[] buf);` + * deserialize from byte[] buf +* `bool IsEqual(in Id rhs);` + * equality +* `void SetDecStr(string s);` + * set by a decimal string s +* `void SetHexStr(string s);` + * set by a hexadecimal string s +* `void SetInt(int x);` + * set an integer x +* `string GetDecStr();` + * get a decimal string +* `string GetHexStr();` + * get a hexadecimal string + +## SecretKey + +* `byte[] Serialize();` + * serialize SecretKey +* `void Deserialize(byte[] buf);` + * deserialize from byte[] buf +* `bool IsEqual(in SecretKey rhs);` + * equality +* `string GetDecStr();` + * get a decimal string +* `string GetHexStr();` + * get a hexadecimal string +* `void Add(in SecretKey rhs);` + * add a secret key rhs +* `void SetByCSPRNG();` + * set a secret key by cryptographically secure pseudo random number generator +* `void SetHashOf(string s);` + * set a secret key by a hash of string s +* `PublicKey GetPublicKey();` + * get the corresponding public key to a secret key +* `Signature Sign(string m);` + * sign a string m +* `Signature GetPop();` + * get a PoP (Proof Of Posession) for a secret key + +## PublicKey + +* `byte[] Serialize();` + * serialize PublicKey +* `void Deserialize(byte[] buf);` + * deserialize from byte[] buf +* `bool IsEqual(in PublicKey rhs);` + * equality +* `void Add(in PublicKey rhs);` + * add a public key rhs +* `string GetDecStr();` + * get a decimal string +* `string GetHexStr();` + * get a hexadecimal string +* `bool Verify(in Signature sig, string m);` + * verify the validness of the sig with m +* `bool VerifyPop(in Signature pop);` + * verify the validness of PoP + +## Signature + +* `byte[] Serialize();` + * serialize Signature +* `void Deserialize(byte[] buf);` + * deserialize from byte[] buf +* `bool IsEqual(in Signature rhs);` + * equality +* `void Add(in Signature rhs);` + * add a signature key rhs +* `string GetDecStr();` + * get a decimal string +* `string GetHexStr();` + * get a hexadecimal string + +## How to use + +### A minimum sample + +``` +using static BLS; + +Init(BN254); // init library +SecretKey sec; +sec.SetByCSPRNG(); // init secret key +PublicKey pub = sec.GetPublicKey(); // get public key +string m = "abc"; +Signature sig = sec.Sign(m); // create signature +if (pub.Verify(sig, m))) { + // signature is verified +} +``` + +### Aggregate signature +``` +Init(BN254); // init library +const int n = 10; +const string m = "abc"; +SecretKey[] secVec = new SecretKey[n]; +PublicKey[] pubVec = new PublicKey[n]; +Signature[] popVec = new Signature[n]; +Signature[] sigVec = new Signature[n]; + +for (int i = 0; i < n; i++) { + secVec[i].SetByCSPRNG(); // init secret key + pubVec[i] = secVec[i].GetPublicKey(); // get public key + popVec[i] = secVec[i].GetPop(); // get a proof of Possesion (PoP) + sigVec[i] = secVec[i].Sign(m); // create signature +} + +SecretKey secAgg; +PublicKey pubAgg; +Signature sigAgg; +for (int i = 0; i < n; i++) { + // verify PoP + if (pubVec[i].VerifyPop(popVec[i]))) { + // error + return; + } + pubAgg.Add(pubVec[i]); // aggregate public key + sigAgg.Add(sigVec[i]); // aggregate signature +} +if (pubAgg.Verify(sigAgg, m)) { + // aggregated signature is verified +} +``` + +# License + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +# Author + +(C)2019 MITSUNARI Shigeo(herumi@nifty.com) All rights reserved. diff --git a/vendor/github.com/byzantine-lab/bls/ffi/go/bls/bls.go b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/bls.go new file mode 100644 index 000000000..56bf08039 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/bls.go @@ -0,0 +1,539 @@ +package bls + +/* +#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 +#cgo bn256 LDFLAGS:${SRCDIR}/../../../lib/libbls256.a +#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 +#cgo bn384 LDFLAGS:${SRCDIR}/../../../lib/libbls384.a +#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 +#cgo bn384_256 LDFLAGS:${SRCDIR}/../../../lib/libbls384_256.a +#cgo !bn256,!bn384,!bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 +#cgo !bn256,!bn384,!bn384_256 LDFLAGS:${SRCDIR}/../../../lib/libbls384.a +#cgo CFLAGS:-I${SRCDIR}/../../../include -I${SRCDIR}/../../../../mcl/include +#cgo LDFLAGS:${SRCDIR}/../../../../mcl/lib/libmcl.a -lgmpxx -lgmp +#cgo static LDFLAGS:-static +typedef unsigned int (*ReadRandFunc)(void *, void *, unsigned int); +int wrapReadRandCgo(void *self, void *buf, unsigned int n); +#include +*/ +import "C" +import "fmt" +import "unsafe" +import "io" +import "encoding/json" + +// Init -- +// call this function before calling all the other operations +// this function is not thread safe +func Init(curve int) error { + err := C.blsInit(C.int(curve), C.MCLBN_COMPILED_TIME_VAR) + if err != 0 { + return fmt.Errorf("ERR Init curve=%d", curve) + } + return nil +} + +// ID -- +type ID struct { + v Fr +} + +// getPointer -- +func (id *ID) getPointer() (p *C.blsId) { + // #nosec + return (*C.blsId)(unsafe.Pointer(id)) +} + +// GetLittleEndian -- +func (id *ID) GetLittleEndian() []byte { + return id.v.Serialize() +} + +// SetLittleEndian -- +func (id *ID) SetLittleEndian(buf []byte) error { + return id.v.SetLittleEndian(buf) +} + +// GetHexString -- +func (id *ID) GetHexString() string { + return id.v.GetString(16) +} + +// GetDecString -- +func (id *ID) GetDecString() string { + return id.v.GetString(10) +} + +// SetHexString -- +func (id *ID) SetHexString(s string) error { + return id.v.SetString(s, 16) +} + +// SetDecString -- +func (id *ID) SetDecString(s string) error { + return id.v.SetString(s, 10) +} + +// IsEqual -- +func (id *ID) IsEqual(rhs *ID) bool { + if id == nil || rhs == nil { + return false + } + return id.v.IsEqual(&rhs.v) +} + +// MarshalJSON implements json.Marshaller. +func (id *ID) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + ID []byte `json:"id"` + }{ + id.GetLittleEndian(), + }) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (id *ID) UnmarshalJSON(data []byte) error { + aux := &struct { + ID []byte `json:"id"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if err := id.SetLittleEndian(aux.ID); err != nil { + return err + } + return nil +} + +// SecretKey -- +type SecretKey struct { + v Fr +} + +// getPointer -- +func (sec *SecretKey) getPointer() (p *C.blsSecretKey) { + // #nosec + return (*C.blsSecretKey)(unsafe.Pointer(sec)) +} + +// GetLittleEndian -- +func (sec *SecretKey) GetLittleEndian() []byte { + return sec.v.Serialize() +} + +// SetLittleEndian -- +func (sec *SecretKey) SetLittleEndian(buf []byte) error { + return sec.v.SetLittleEndian(buf) +} + +// SerializeToHexStr -- +func (sec *SecretKey) SerializeToHexStr() string { + return sec.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (sec *SecretKey) DeserializeHexStr(s string) error { + return sec.v.SetString(s, IoSerializeHexStr) +} + +// GetHexString -- +func (sec *SecretKey) GetHexString() string { + return sec.v.GetString(16) +} + +// GetDecString -- +func (sec *SecretKey) GetDecString() string { + return sec.v.GetString(10) +} + +// SetHexString -- +func (sec *SecretKey) SetHexString(s string) error { + return sec.v.SetString(s, 16) +} + +// SetDecString -- +func (sec *SecretKey) SetDecString(s string) error { + return sec.v.SetString(s, 10) +} + +// IsEqual -- +func (sec *SecretKey) IsEqual(rhs *SecretKey) bool { + if sec == nil || rhs == nil { + return false + } + return sec.v.IsEqual(&rhs.v) +} + +// SetByCSPRNG -- +func (sec *SecretKey) SetByCSPRNG() { + sec.v.SetByCSPRNG() +} + +// Add -- +func (sec *SecretKey) Add(rhs *SecretKey) { + FrAdd(&sec.v, &sec.v, &rhs.v) +} + +// GetMasterSecretKey -- +func (sec *SecretKey) GetMasterSecretKey(k int) (msk []SecretKey) { + msk = make([]SecretKey, k) + msk[0] = *sec + for i := 1; i < k; i++ { + msk[i].SetByCSPRNG() + } + return msk +} + +// MarshalJSON implements json.Marshaller. +func (sec *SecretKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + SecretKey []byte `json:"secret_key"` + }{ + sec.GetLittleEndian(), + }) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (sec *SecretKey) UnmarshalJSON(data []byte) error { + aux := &struct { + SecretKey []byte `json:"secret_key"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if err := sec.SetLittleEndian(aux.SecretKey); err != nil { + return err + } + return nil +} + +// GetMasterPublicKey -- +func GetMasterPublicKey(msk []SecretKey) (mpk []PublicKey) { + n := len(msk) + mpk = make([]PublicKey, n) + for i := 0; i < n; i++ { + mpk[i] = *msk[i].GetPublicKey() + } + return mpk +} + +// Set -- +func (sec *SecretKey) Set(msk []SecretKey, id *ID) error { + // #nosec + return FrEvaluatePolynomial(&sec.v, *(*[]Fr)(unsafe.Pointer(&msk)), &id.v) +} + +// Recover -- +func (sec *SecretKey) Recover(secVec []SecretKey, idVec []ID) error { + // #nosec + return FrLagrangeInterpolation(&sec.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]Fr)(unsafe.Pointer(&secVec))) +} + +// GetPop -- +func (sec *SecretKey) GetPop() (sign *Sign) { + sign = new(Sign) + C.blsGetPop(sign.getPointer(), sec.getPointer()) + return sign +} + +// PublicKey -- +type PublicKey struct { + v G2 +} + +// getPointer -- +func (pub *PublicKey) getPointer() (p *C.blsPublicKey) { + // #nosec + return (*C.blsPublicKey)(unsafe.Pointer(pub)) +} + +// Serialize -- +func (pub *PublicKey) Serialize() []byte { + return pub.v.Serialize() +} + +// Deserialize -- +func (pub *PublicKey) Deserialize(buf []byte) error { + return pub.v.Deserialize(buf) +} + +// SerializeToHexStr -- +func (pub *PublicKey) SerializeToHexStr() string { + return pub.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (pub *PublicKey) DeserializeHexStr(s string) error { + return pub.v.SetString(s, IoSerializeHexStr) +} + +// GetHexString -- +func (pub *PublicKey) GetHexString() string { + return pub.v.GetString(16) +} + +// SetHexString -- +func (pub *PublicKey) SetHexString(s string) error { + return pub.v.SetString(s, 16) +} + +// IsEqual -- +func (pub *PublicKey) IsEqual(rhs *PublicKey) bool { + if pub == nil || rhs == nil { + return false + } + return pub.v.IsEqual(&rhs.v) +} + +// Add -- +func (pub *PublicKey) Add(rhs *PublicKey) { + G2Add(&pub.v, &pub.v, &rhs.v) +} + +// Set -- +func (pub *PublicKey) Set(mpk []PublicKey, id *ID) error { + // #nosec + return G2EvaluatePolynomial(&pub.v, *(*[]G2)(unsafe.Pointer(&mpk)), &id.v) +} + +// Recover -- +func (pub *PublicKey) Recover(pubVec []PublicKey, idVec []ID) error { + // #nosec + return G2LagrangeInterpolation(&pub.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]G2)(unsafe.Pointer(&pubVec))) +} + +// MarshalJSON implements json.Marshaller. +func (pub *PublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + PublicKey []byte `json:"public_key"` + }{ + pub.Serialize(), + }) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (pub *PublicKey) UnmarshalJSON(data []byte) error { + aux := &struct { + PublicKey []byte `json:"public_key"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if err := pub.Deserialize(aux.PublicKey); err != nil { + return err + } + return nil +} + +// Sign -- +type Sign struct { + v G1 +} + +// getPointer -- +func (sign *Sign) getPointer() (p *C.blsSignature) { + // #nosec + return (*C.blsSignature)(unsafe.Pointer(sign)) +} + +// Serialize -- +func (sign *Sign) Serialize() []byte { + return sign.v.Serialize() +} + +// Deserialize -- +func (sign *Sign) Deserialize(buf []byte) error { + return sign.v.Deserialize(buf) +} + +// SerializeToHexStr -- +func (sign *Sign) SerializeToHexStr() string { + return sign.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (sign *Sign) DeserializeHexStr(s string) error { + return sign.v.SetString(s, IoSerializeHexStr) +} + +// GetHexString -- +func (sign *Sign) GetHexString() string { + return sign.v.GetString(16) +} + +// SetHexString -- +func (sign *Sign) SetHexString(s string) error { + return sign.v.SetString(s, 16) +} + +// IsEqual -- +func (sign *Sign) IsEqual(rhs *Sign) bool { + if sign == nil || rhs == nil { + return false + } + return sign.v.IsEqual(&rhs.v) +} + +// GetPublicKey -- +func (sec *SecretKey) GetPublicKey() (pub *PublicKey) { + pub = new(PublicKey) + C.blsGetPublicKey(pub.getPointer(), sec.getPointer()) + return pub +} + +// Sign -- Constant Time version +func (sec *SecretKey) Sign(m string) (sign *Sign) { + sign = new(Sign) + buf := []byte(m) + // #nosec + C.blsSign(sign.getPointer(), sec.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + return sign +} + +// Add -- +func (sign *Sign) Add(rhs *Sign) { + C.blsSignatureAdd(sign.getPointer(), rhs.getPointer()) +} + +// Recover -- +func (sign *Sign) Recover(signVec []Sign, idVec []ID) error { + // #nosec + return G1LagrangeInterpolation(&sign.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]G1)(unsafe.Pointer(&signVec))) +} + +// Verify -- +func (sign *Sign) Verify(pub *PublicKey, m string) bool { + buf := []byte(m) + // #nosec + return C.blsVerify(sign.getPointer(), pub.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 1 +} + +// VerifyPop -- +func (sign *Sign) VerifyPop(pub *PublicKey) bool { + if pub.getPointer() == nil { + return false + } + return C.blsVerifyPop(sign.getPointer(), pub.getPointer()) == 1 +} + +// MarshalJSON implements json.Marshaller. +func (sign *Sign) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Sign []byte `json:"sign"` + }{ + sign.Serialize(), + }) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (sign *Sign) UnmarshalJSON(data []byte) error { + aux := &struct { + Sign []byte `json:"sign"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if err := sign.Deserialize(aux.Sign); err != nil { + return err + } + return nil +} + +// DHKeyExchange -- +func DHKeyExchange(sec *SecretKey, pub *PublicKey) (out PublicKey) { + C.blsDHKeyExchange(out.getPointer(), sec.getPointer(), pub.getPointer()) + return out +} + +// HashAndMapToSignature -- +func HashAndMapToSignature(buf []byte) *Sign { + sig := new(Sign) + err := sig.v.HashAndMapTo(buf) + if err == nil { + return sig + } else { + return nil + } +} + +// VerifyPairing -- +func VerifyPairing(X *Sign, Y *Sign, pub *PublicKey) bool { + if X.getPointer() == nil || Y.getPointer() == nil || pub.getPointer() == nil { + return false + } + return C.blsVerifyPairing(X.getPointer(), Y.getPointer(), pub.getPointer()) == 1 +} + +// SignHash -- +func (sec *SecretKey) SignHash(hash []byte) (sign *Sign) { + sign = new(Sign) + // #nosec + err := C.blsSignHash(sign.getPointer(), sec.getPointer(), unsafe.Pointer(&hash[0]), C.size_t(len(hash))) + if err == 0 { + return sign + } else { + return nil + } +} + +// VerifyHash -- +func (sign *Sign) VerifyHash(pub *PublicKey, hash []byte) bool { + if pub.getPointer() == nil { + return false + } + // #nosec + return C.blsVerifyHash(sign.getPointer(), pub.getPointer(), unsafe.Pointer(&hash[0]), C.size_t(len(hash))) == 1 +} + +func Min(x, y int) int { + if x < y { + return x + } + return y +} + +// VerifyAggregateHashes -- +func (sign *Sign) VerifyAggregateHashes(pubVec []PublicKey, hash [][]byte) bool { + hashByte := GetOpUnitSize() * 8 + n := len(hash) + h := make([]byte, n*hashByte) + for i := 0; i < n; i++ { + hn := len(hash[i]) + copy(h[i*hashByte:(i+1)*hashByte], hash[i][0:Min(hn, hashByte)]) + } + if pubVec[0].getPointer() == nil { + return false + } + return C.blsVerifyAggregatedHashes(sign.getPointer(), pubVec[0].getPointer(), unsafe.Pointer(&h[0]), C.size_t(hashByte), C.size_t(n)) == 1 +} + +/// + +var s_randReader io.Reader + +func createSlice(buf *C.char, n C.uint) []byte { + size := int(n) + return (*[1 << 30]byte)(unsafe.Pointer(buf))[:size:size] +} + +// this function can't be put in callback.go +//export wrapReadRandGo +func wrapReadRandGo(buf *C.char, n C.uint) C.uint { + slice := createSlice(buf, n) + ret, err := s_randReader.Read(slice) + if ret == int(n) && err == nil { + return n + } + return 0 +} + +// SetRandFunc -- +func SetRandFunc(randReader io.Reader) { + s_randReader = randReader + if randReader != nil { + C.blsSetRandFunc(nil, C.ReadRandFunc(unsafe.Pointer(C.wrapReadRandCgo))) + } else { + // use default random generator + C.blsSetRandFunc(nil, C.ReadRandFunc(unsafe.Pointer(nil))) + } +} diff --git a/vendor/github.com/byzantine-lab/bls/ffi/go/bls/callback.go b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/callback.go new file mode 100644 index 000000000..ba73a5e15 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/callback.go @@ -0,0 +1,12 @@ +package bls + +/* +// exported from bls.go +unsigned int wrapReadRandGo(void *buf, unsigned int n); +int wrapReadRandCgo(void *self, void *buf, unsigned int n) +{ + (void)self; + return wrapReadRandGo(buf, n); +} +*/ +import "C" diff --git a/vendor/github.com/byzantine-lab/bls/ffi/go/bls/dummy.cpp b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/dummy.cpp new file mode 100644 index 000000000..a5103a1c5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/dummy.cpp @@ -0,0 +1,3 @@ +// This is a dummy source file which forces cgo to use the C++ linker instead +// of the default C linker. We can therefore eliminate non-portable linker +// flags such as -lstdc++, which is likely to break on FreeBSD and OpenBSD. diff --git a/vendor/github.com/byzantine-lab/bls/ffi/go/bls/mcl.go b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/mcl.go new file mode 100644 index 000000000..ca8d7f02b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/ffi/go/bls/mcl.go @@ -0,0 +1,646 @@ +package bls + +/* +#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 +#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 +#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 +#cgo !bn256,!bn384,!bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 +#include +*/ +import "C" +import "fmt" +import "unsafe" + +// CurveFp254BNb -- 254 bit curve +const CurveFp254BNb = C.mclBn_CurveFp254BNb + +// CurveFp382_1 -- 382 bit curve 1 +const CurveFp382_1 = C.mclBn_CurveFp382_1 + +// CurveFp382_2 -- 382 bit curve 2 +const CurveFp382_2 = C.mclBn_CurveFp382_2 + +// BLS12_381 +const BLS12_381 = C.MCL_BLS12_381 + +// IoSerializeHexStr +const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR + +// GetFrUnitSize() -- +func GetFrUnitSize() int { + return int(C.MCLBN_FR_UNIT_SIZE) +} + +// GetFpUnitSize() -- +// same as GetMaxOpUnitSize() +func GetFpUnitSize() int { + return int(C.MCLBN_FP_UNIT_SIZE) +} + +// GetMaxOpUnitSize -- +func GetMaxOpUnitSize() int { + return int(C.MCLBN_FP_UNIT_SIZE) +} + +// GetOpUnitSize -- +// the length of Fr is GetOpUnitSize() * 8 bytes +func GetOpUnitSize() int { + return int(C.mclBn_getOpUnitSize()) +} + +// GetCurveOrder -- +// return the order of G1 +func GetCurveOrder() string { + buf := make([]byte, 1024) + // #nosec + n := C.mclBn_getCurveOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + if n == 0 { + panic("implementation err. size of buf is small") + } + return string(buf[:n]) +} + +// GetFieldOrder -- +// return the characteristic of the field where a curve is defined +func GetFieldOrder() string { + buf := make([]byte, 1024) + // #nosec + n := C.mclBn_getFieldOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + if n == 0 { + panic("implementation err. size of buf is small") + } + return string(buf[:n]) +} + +// Fr -- +type Fr struct { + v C.mclBnFr +} + +// getPointer -- +func (x *Fr) getPointer() (p *C.mclBnFr) { + // #nosec + return (*C.mclBnFr)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *Fr) Clear() { + // #nosec + C.mclBnFr_clear(x.getPointer()) +} + +// SetInt64 -- +func (x *Fr) SetInt64(v int64) { + // #nosec + C.mclBnFr_setInt(x.getPointer(), C.int64_t(v)) +} + +// SetString -- +func (x *Fr) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnFr_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnFr_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *Fr) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnFr_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnFr_deserialize %x", buf) + } + return nil +} + +// SetLittleEndian -- +func (x *Fr) SetLittleEndian(buf []byte) error { + // #nosec + err := C.mclBnFr_setLittleEndian(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnFr_setLittleEndian %x", err) + } + return nil +} + +// IsEqual -- +func (x *Fr) IsEqual(rhs *Fr) bool { + return C.mclBnFr_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *Fr) IsZero() bool { + return C.mclBnFr_isZero(x.getPointer()) == 1 +} + +// IsOne -- +func (x *Fr) IsOne() bool { + return C.mclBnFr_isOne(x.getPointer()) == 1 +} + +// SetByCSPRNG -- +func (x *Fr) SetByCSPRNG() { + err := C.mclBnFr_setByCSPRNG(x.getPointer()) + if err != 0 { + panic("err mclBnFr_setByCSPRNG") + } +} + +// SetHashOf -- +func (x *Fr) SetHashOf(buf []byte) bool { + // #nosec + return C.mclBnFr_setHashOf(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 0 +} + +// GetString -- +func (x *Fr) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnFr_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnFr_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *Fr) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnFr_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnFr_serialize") + } + return buf[:n] +} + +// FrNeg -- +func FrNeg(out *Fr, x *Fr) { + C.mclBnFr_neg(out.getPointer(), x.getPointer()) +} + +// FrInv -- +func FrInv(out *Fr, x *Fr) { + C.mclBnFr_inv(out.getPointer(), x.getPointer()) +} + +// FrAdd -- +func FrAdd(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrSub -- +func FrSub(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrMul -- +func FrMul(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrDiv -- +func FrDiv(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_div(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1 -- +type G1 struct { + v C.mclBnG1 +} + +// getPointer -- +func (x *G1) getPointer() (p *C.mclBnG1) { + // #nosec + return (*C.mclBnG1)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *G1) Clear() { + // #nosec + C.mclBnG1_clear(x.getPointer()) +} + +// SetString -- +func (x *G1) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnG1_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnG1_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *G1) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnG1_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnG1_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *G1) IsEqual(rhs *G1) bool { + return C.mclBnG1_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *G1) IsZero() bool { + return C.mclBnG1_isZero(x.getPointer()) == 1 +} + +// HashAndMapTo -- +func (x *G1) HashAndMapTo(buf []byte) error { + // #nosec + err := C.mclBnG1_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnG1_hashAndMapTo %x", err) + } + return nil +} + +// GetString -- +func (x *G1) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG1_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnG1_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *G1) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG1_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnG1_serialize") + } + return buf[:n] +} + +// G1Neg -- +func G1Neg(out *G1, x *G1) { + C.mclBnG1_neg(out.getPointer(), x.getPointer()) +} + +// G1Dbl -- +func G1Dbl(out *G1, x *G1) { + C.mclBnG1_dbl(out.getPointer(), x.getPointer()) +} + +// G1Add -- +func G1Add(out *G1, x *G1, y *G1) { + C.mclBnG1_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1Sub -- +func G1Sub(out *G1, x *G1, y *G1) { + C.mclBnG1_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1Mul -- +func G1Mul(out *G1, x *G1, y *Fr) { + C.mclBnG1_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1MulCT -- constant time (depending on bit lengh of y) +func G1MulCT(out *G1, x *G1, y *Fr) { + C.mclBnG1_mulCT(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2 -- +type G2 struct { + v C.mclBnG2 +} + +// getPointer -- +func (x *G2) getPointer() (p *C.mclBnG2) { + // #nosec + return (*C.mclBnG2)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *G2) Clear() { + // #nosec + C.mclBnG2_clear(x.getPointer()) +} + +// SetString -- +func (x *G2) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnG2_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnG2_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *G2) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnG2_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnG2_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *G2) IsEqual(rhs *G2) bool { + return C.mclBnG2_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *G2) IsZero() bool { + return C.mclBnG2_isZero(x.getPointer()) == 1 +} + +// HashAndMapTo -- +func (x *G2) HashAndMapTo(buf []byte) error { + // #nosec + err := C.mclBnG2_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnG2_hashAndMapTo %x", err) + } + return nil +} + +// GetString -- +func (x *G2) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG2_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnG2_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *G2) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG2_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnG2_serialize") + } + return buf[:n] +} + +// G2Neg -- +func G2Neg(out *G2, x *G2) { + C.mclBnG2_neg(out.getPointer(), x.getPointer()) +} + +// G2Dbl -- +func G2Dbl(out *G2, x *G2) { + C.mclBnG2_dbl(out.getPointer(), x.getPointer()) +} + +// G2Add -- +func G2Add(out *G2, x *G2, y *G2) { + C.mclBnG2_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2Sub -- +func G2Sub(out *G2, x *G2, y *G2) { + C.mclBnG2_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2Mul -- +func G2Mul(out *G2, x *G2, y *Fr) { + C.mclBnG2_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GT -- +type GT struct { + v C.mclBnGT +} + +// getPointer -- +func (x *GT) getPointer() (p *C.mclBnGT) { + // #nosec + return (*C.mclBnGT)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *GT) Clear() { + // #nosec + C.mclBnGT_clear(x.getPointer()) +} + +// SetInt64 -- +func (x *GT) SetInt64(v int64) { + // #nosec + C.mclBnGT_setInt(x.getPointer(), C.int64_t(v)) +} + +// SetString -- +func (x *GT) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnGT_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnGT_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *GT) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnGT_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnGT_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *GT) IsEqual(rhs *GT) bool { + return C.mclBnGT_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *GT) IsZero() bool { + return C.mclBnGT_isZero(x.getPointer()) == 1 +} + +// IsOne -- +func (x *GT) IsOne() bool { + return C.mclBnGT_isOne(x.getPointer()) == 1 +} + +// GetString -- +func (x *GT) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnGT_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnGT_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *GT) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnGT_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnGT_serialize") + } + return buf[:n] +} + +// GTNeg -- +func GTNeg(out *GT, x *GT) { + C.mclBnGT_neg(out.getPointer(), x.getPointer()) +} + +// GTInv -- +func GTInv(out *GT, x *GT) { + C.mclBnGT_inv(out.getPointer(), x.getPointer()) +} + +// GTAdd -- +func GTAdd(out *GT, x *GT, y *GT) { + C.mclBnGT_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTSub -- +func GTSub(out *GT, x *GT, y *GT) { + C.mclBnGT_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTMul -- +func GTMul(out *GT, x *GT, y *GT) { + C.mclBnGT_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTDiv -- +func GTDiv(out *GT, x *GT, y *GT) { + C.mclBnGT_div(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTPow -- +func GTPow(out *GT, x *GT, y *Fr) { + C.mclBnGT_pow(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// Pairing -- +func Pairing(out *GT, x *G1, y *G2) { + C.mclBn_pairing(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FinalExp -- +func FinalExp(out *GT, x *GT) { + C.mclBn_finalExp(out.getPointer(), x.getPointer()) +} + +// MillerLoop -- +func MillerLoop(out *GT, x *G1, y *G2) { + C.mclBn_millerLoop(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GetUint64NumToPrecompute -- +func GetUint64NumToPrecompute() int { + return int(C.mclBn_getUint64NumToPrecompute()) +} + +// PrecomputeG2 -- +func PrecomputeG2(Qbuf []uint64, Q *G2) { + // #nosec + C.mclBn_precomputeG2((*C.uint64_t)(unsafe.Pointer(&Qbuf[0])), Q.getPointer()) +} + +// PrecomputedMillerLoop -- +func PrecomputedMillerLoop(out *GT, P *G1, Qbuf []uint64) { + // #nosec + C.mclBn_precomputedMillerLoop(out.getPointer(), P.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Qbuf[0]))) +} + +// PrecomputedMillerLoop2 -- +func PrecomputedMillerLoop2(out *GT, P1 *G1, Q1buf []uint64, P2 *G1, Q2buf []uint64) { + // #nosec + C.mclBn_precomputedMillerLoop2(out.getPointer(), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0]))) +} + +// FrEvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func FrEvaluatePolynomial(y *Fr, c []Fr, x *Fr) error { + // #nosec + err := C.mclBn_FrEvaluatePolynomial(y.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_FrEvaluatePolynomial") + } + return nil +} + +// G1EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func G1EvaluatePolynomial(y *G1, c []G1, x *Fr) error { + // #nosec + err := C.mclBn_G1EvaluatePolynomial(y.getPointer(), (*C.mclBnG1)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_G1EvaluatePolynomial") + } + return nil +} + +// G2EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func G2EvaluatePolynomial(y *G2, c []G2, x *Fr) error { + // #nosec + err := C.mclBn_G2EvaluatePolynomial(y.getPointer(), (*C.mclBnG2)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_G2EvaluatePolynomial") + } + return nil +} + +// FrLagrangeInterpolation -- +func FrLagrangeInterpolation(out *Fr, xVec []Fr, yVec []Fr) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err FrLagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_FrLagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnFr)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err FrLagrangeInterpolation") + } + return nil +} + +// G1LagrangeInterpolation -- +func G1LagrangeInterpolation(out *G1, xVec []Fr, yVec []G1) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err G1LagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_G1LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG1)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err G1LagrangeInterpolation") + } + return nil +} + +// G2LagrangeInterpolation -- +func G2LagrangeInterpolation(out *G2, xVec []Fr, yVec []G2) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err G2LagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_G2LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG2)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err G2LagrangeInterpolation") + } + return nil +} diff --git a/vendor/github.com/byzantine-lab/bls/images/bls-go-alpine/Dockerfile b/vendor/github.com/byzantine-lab/bls/images/bls-go-alpine/Dockerfile new file mode 100644 index 000000000..edd49eb4b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/images/bls-go-alpine/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:alpine +MAINTAINER Jimmy Hu + +# Install dependencies +RUN apk add --update-cache build-base gmp-dev openssl-dev git + +# Build bls library +RUN mkdir work ; cd work +RUN git clone --depth 1 git://github.com/dexon-foundation/mcl.git +RUN mkdir bls +COPY . bls/ +RUN cd bls ; make clean && make test_go DOCKER=alpine -j && cp lib/* /usr/lib/ diff --git a/vendor/github.com/byzantine-lab/bls/include/bls/bls.h b/vendor/github.com/byzantine-lab/bls/include/bls/bls.h new file mode 100644 index 000000000..cb300bc49 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/include/bls/bls.h @@ -0,0 +1,275 @@ +#pragma once +/** + @file + @brief C interface of bls.hpp + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +#ifdef BLS_SWAP_G + /* + error if BLS_SWAP_G is inconsistently used between library and exe + */ + #undef MCLBN_COMPILED_TIME_VAR + #define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE) + 100) +#endif + +#ifdef _MSC_VER + #ifdef BLS_DONT_EXPORT + #define BLS_DLL_API + #else + #ifdef BLS_DLL_EXPORT + #define BLS_DLL_API __declspec(dllexport) + #else + #define BLS_DLL_API __declspec(dllimport) + #endif + #endif + #ifndef BLS_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "bls256.lib") + #elif (MCLBN_FP_UNIT_SIZE == 6) && (MCLBN_FR_UNIT_SIZE == 4) + #pragma comment(lib, "bls384_256.lib") + #elif (MCLBN_FP_UNIT_SIZE == 6) && (MCLBN_FR_UNIT_SIZE == 6) + #pragma comment(lib, "bls384.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) && !defined(BLS_DONT_EXPORT) + #define BLS_DLL_API __attribute__((used)) +#elif defined(__wasm__) && !defined(BLS_DONT_EXPORT) + #define BLS_DLL_API __attribute__((visibility("default"))) +#else + #define BLS_DLL_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + mclBnFr v; +} blsId; + +typedef struct { + mclBnFr v; +} blsSecretKey; + +typedef struct { +#ifdef BLS_SWAP_G + mclBnG1 v; +#else + mclBnG2 v; +#endif +} blsPublicKey; + +typedef struct { +#ifdef BLS_SWAP_G + mclBnG2 v; +#else + mclBnG1 v; +#endif +} blsSignature; + +/* + initialize this library + call this once before using the other functions + @param curve [in] enum value defined in mcl/bn.h + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + @note blsInit() is thread safe and serialized if it is called simultaneously + but don't call it while using other functions. +*/ +BLS_DLL_API int blsInit(int curve, int compiledTimeVar); + +BLS_DLL_API void blsIdSetInt(blsId *id, int x); + +// sec = buf & (1 << bitLen(r)) - 1 +// if (sec >= r) sec &= (1 << (bitLen(r) - 1)) - 1 +// always return 0 +BLS_DLL_API int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize); +// return 0 if success (bufSize <= 64) else -1 +// set (buf mod r) to sec +BLS_DLL_API int blsSecretKeySetLittleEndianMod(blsSecretKey *sec, const void *buf, mclSize bufSize); + +BLS_DLL_API void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec); + +// calculate the has of m and sign the hash +BLS_DLL_API void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size); + +// return 1 if valid +BLS_DLL_API int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size); + +// return written byte size if success else 0 +BLS_DLL_API mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id); +BLS_DLL_API mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub); +BLS_DLL_API mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig); + +// return read byte size if success else 0 +BLS_DLL_API mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSecretKeyDeserialize(blsSecretKey *sec, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize); + +// return 1 if same else 0 +BLS_DLL_API int blsIdIsEqual(const blsId *lhs, const blsId *rhs); +BLS_DLL_API int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs); +BLS_DLL_API int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs); +BLS_DLL_API int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs); + +// return 0 if success +BLS_DLL_API int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id); +BLS_DLL_API int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id); + +BLS_DLL_API int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n); +BLS_DLL_API int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n); +BLS_DLL_API int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n); + +// add +BLS_DLL_API void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs); +BLS_DLL_API void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs); +BLS_DLL_API void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs); + +/* + verify whether a point of an elliptic curve has order r + This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 + @param doVerify [in] does not verify if zero(default 1) + Signature = G1, PublicKey = G2 +*/ +BLS_DLL_API void blsSignatureVerifyOrder(int doVerify); +BLS_DLL_API void blsPublicKeyVerifyOrder(int doVerify); +// deserialize under VerifyOrder(true) = deserialize under VerifyOrder(false) + IsValidOrder +BLS_DLL_API int blsSignatureIsValidOrder(const blsSignature *sig); +BLS_DLL_API int blsPublicKeyIsValidOrder(const blsPublicKey *pub); + +#ifndef BLS_MINIMUM_API + +/* + verify X == sY by checking e(X, sQ) = e(Y, Q) + @param X [in] + @param Y [in] + @param pub [in] pub = sQ + @return 1 if e(X, pub) = e(Y, Q) else 0 +*/ +BLS_DLL_API int blsVerifyPairing(const blsSignature *X, const blsSignature *Y, const blsPublicKey *pub); + +/* + sign the hash + use the low (bitSize of r) - 1 bit of h + return 0 if success else -1 + NOTE : return false if h is zero or c1 or -c1 value for BN254. see hashTest() in test/bls_test.hpp +*/ +BLS_DLL_API int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size); +// return 1 if valid +BLS_DLL_API int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size); + +/* + verify aggSig with pubVec[0, n) and hVec[0, n) + e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) + return 1 if valid + @note do not check duplication of hVec +*/ +BLS_DLL_API int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n); + +// sub +BLS_DLL_API void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs); +BLS_DLL_API void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs); +BLS_DLL_API void blsSignatureSub(blsSignature *sig, const blsSignature *rhs); + +// not thread safe version (old blsInit) +BLS_DLL_API int blsInitNotThreadSafe(int curve, int compiledTimeVar); + +BLS_DLL_API mclSize blsGetOpUnitSize(void); +// return strlen(buf) if success else 0 +BLS_DLL_API int blsGetCurveOrder(char *buf, mclSize maxBufSize); +BLS_DLL_API int blsGetFieldOrder(char *buf, mclSize maxBufSize); + +// return bytes for serialized G1(=Fp) +BLS_DLL_API int blsGetG1ByteSize(void); + +// return bytes for serialized Fr +BLS_DLL_API int blsGetFrByteSize(void); + +#ifdef BLS_SWAP_G +// get a generator of G1 +BLS_DLL_API void blsGetGeneratorOfG1(blsPublicKey *pub); +#else +// get a generator of G2 +BLS_DLL_API void blsGetGeneratorOfG2(blsPublicKey *pub); +#endif + +// return 0 if success +BLS_DLL_API int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize); +BLS_DLL_API int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize); + +/* + return strlen(buf) if success else 0 + buf is '\0' terminated +*/ +BLS_DLL_API mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id); +BLS_DLL_API mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id); + +// hash buf and set +BLS_DLL_API int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize); +#ifndef MCL_DONT_USE_CSPRNG +/* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success else -1 +*/ +BLS_DLL_API int blsSecretKeySetByCSPRNG(blsSecretKey *sec); +/* + set user-defined random function for setByCSPRNG + @param self [in] user-defined pointer + @param readFunc [in] user-defined function, + which writes random bufSize bytes to buf and returns bufSize if success else returns 0 + @note if self == 0 and readFunc == 0 then set default random function + @note not threadsafe +*/ +BLS_DLL_API void blsSetRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)); +#endif + +BLS_DLL_API void blsGetPop(blsSignature *sig, const blsSecretKey *sec); + +BLS_DLL_API int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub); +////////////////////////////////////////////////////////////////////////// +// the following apis will be removed + +// mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r +BLS_DLL_API int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize); +/* + return written byte size if success else 0 +*/ +BLS_DLL_API mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id); + +// return 0 if success +BLS_DLL_API int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize); +BLS_DLL_API int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize); +/* + return written byte size if success else 0 +*/ +BLS_DLL_API mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec); +/* + return strlen(buf) if success else 0 + buf is '\0' terminated +*/ +BLS_DLL_API mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize); +BLS_DLL_API mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub); +BLS_DLL_API int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig); + +/* + Diffie Hellman key exchange + out = sec * pub +*/ +BLS_DLL_API void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub); + +#endif // BLS_MINIMUM_API + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/byzantine-lab/bls/include/bls/bls.hpp b/vendor/github.com/byzantine-lab/bls/include/bls/bls.hpp new file mode 100644 index 000000000..741334555 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/include/bls/bls.hpp @@ -0,0 +1,534 @@ +#pragma once +/** + @file + @brief BLS threshold signature on BN curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include +#include +#include + +namespace bls { + +// same value with IoMode of mcl/op.hpp +enum { + IoBin = 2, // binary number + IoDec = 10, // decimal number + IoHex = 16, // hexadecimal number + IoPrefix = 128, // append '0b'(bin) or '0x'(hex) + IoSerialize = 512, + IoFixedByteSeq = IoSerialize // fixed byte representation +}; + +/* + BLS signature + e : G2 x G1 -> Fp12 + Q in G2 ; fixed global parameter + H : {str} -> G1 + s : secret key + sQ ; public key + s H(m) ; signature of m + verify ; e(sQ, H(m)) = e(Q, s H(m)) +*/ + +/* + initialize this library + call this once before using the other method + @param curve [in] type of curve + @param compiledTimevar [in] use the default value + @note init() is not thread safe +*/ +inline void init(int curve = mclBn_CurveFp254BNb, int compiledTimeVar = MCLBN_COMPILED_TIME_VAR) +{ + if (blsInit(curve, compiledTimeVar) != 0) throw std::invalid_argument("blsInit"); +} +inline size_t getOpUnitSize() { return blsGetOpUnitSize(); } + +inline void getCurveOrder(std::string& str) +{ + str.resize(1024); + mclSize n = blsGetCurveOrder(&str[0], str.size()); + if (n == 0) throw std::runtime_error("blsGetCurveOrder"); + str.resize(n); +} +inline void getFieldOrder(std::string& str) +{ + str.resize(1024); + mclSize n = blsGetFieldOrder(&str[0], str.size()); + if (n == 0) throw std::runtime_error("blsGetFieldOrder"); + str.resize(n); +} +inline int getG1ByteSize() { return blsGetG1ByteSize(); } +inline int getFrByteSize() { return blsGetFrByteSize(); } + +namespace local { +/* + the value of secretKey and Id must be less than + r = 0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d + sizeof(uint64_t) * keySize byte +*/ +const size_t keySize = MCLBN_FP_UNIT_SIZE; +} + +class SecretKey; +class PublicKey; +class Signature; +class Id; + +typedef std::vector SecretKeyVec; +typedef std::vector PublicKeyVec; +typedef std::vector SignatureVec; +typedef std::vector IdVec; + +class Id { + blsId self_; + friend class PublicKey; + friend class SecretKey; + friend class Signature; +public: + Id(unsigned int id = 0) + { + blsIdSetInt(&self_, id); + } + bool operator==(const Id& rhs) const + { + return blsIdIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const Id& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const Id& id) + { + std::string str; + id.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, Id& id) + { + std::string str; + is >> str; + id.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnFr_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); + } + bool isZero() const + { + return mclBnFr_isZero(&self_.v) == 1; + } + /* + set p[0, .., keySize) + @note the value must be less than r + */ + void set(const uint64_t *p) + { + setLittleEndian(p, local::keySize * sizeof(uint64_t)); + } + // bufSize is truncted/zero extended to keySize + void setLittleEndian(const void *buf, size_t bufSize) + { + mclBnFr_setLittleEndian(&self_.v, buf, bufSize); + } +}; + +/* + s ; secret key +*/ +class SecretKey { + blsSecretKey self_; +public: + bool operator==(const SecretKey& rhs) const + { + return blsSecretKeyIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const SecretKey& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& sec) + { + std::string str; + sec.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, SecretKey& sec) + { + std::string str; + is >> str; + sec.setStr(str); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnFr_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); + } + /* + initialize secretKey with random number + */ + void init() + { + int ret = blsSecretKeySetByCSPRNG(&self_); + if (ret != 0) throw std::runtime_error("blsSecretKeySetByCSPRNG"); + } + /* + set secretKey with p[0, .., keySize) and set id = 0 + @note the value must be less than r + */ + void set(const uint64_t *p) + { + setLittleEndian(p, local::keySize * sizeof(uint64_t)); + } + // bufSize is truncted/zero extended to keySize + void setLittleEndian(const void *buf, size_t bufSize) + { + mclBnFr_setLittleEndian(&self_.v, buf, bufSize); + } + // set hash of buf + void setHashOf(const void *buf, size_t bufSize) + { + int ret = mclBnFr_setHashOf(&self_.v, buf, bufSize); + if (ret != 0) throw std::runtime_error("mclBnFr_setHashOf"); + } + void getPublicKey(PublicKey& pub) const; + // constant time sign + // sign hash(m) + void sign(Signature& sig, const void *m, size_t size) const; + void sign(Signature& sig, const std::string& m) const + { + sign(sig, m.c_str(), m.size()); + } + // sign hashed value + void signHash(Signature& sig, const void *h, size_t size) const; + void signHash(Signature& sig, const std::string& h) const + { + signHash(sig, h.c_str(), h.size()); + } + /* + make Pop(Proof of Possesion) + pop = prv.sign(pub) + */ + void getPop(Signature& pop) const; + /* + make [s_0, ..., s_{k-1}] to prepare k-out-of-n secret sharing + */ + void getMasterSecretKey(SecretKeyVec& msk, size_t k) const + { + if (k <= 1) throw std::invalid_argument("getMasterSecretKey"); + msk.resize(k); + msk[0] = *this; + for (size_t i = 1; i < k; i++) { + msk[i].init(); + } + } + /* + set a secret key for id > 0 from msk + */ + void set(const SecretKeyVec& msk, const Id& id) + { + set(msk.data(), msk.size(), id); + } + /* + recover secretKey from k secVec + */ + void recover(const SecretKeyVec& secVec, const IdVec& idVec) + { + if (secVec.size() != idVec.size()) throw std::invalid_argument("SecretKey:recover"); + recover(secVec.data(), idVec.data(), idVec.size()); + } + /* + add secret key + */ + void add(const SecretKey& rhs); + + // the following methods are for C api + /* + the size of msk must be k + */ + void set(const SecretKey *msk, size_t k, const Id& id) + { + int ret = blsSecretKeyShare(&self_, &msk->self_, k, &id.self_); + if (ret != 0) throw std::runtime_error("blsSecretKeyShare"); + } + void recover(const SecretKey *secVec, const Id *idVec, size_t n) + { + int ret = blsSecretKeyRecover(&self_, &secVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsSecretKeyRecover:same id"); + } +}; + +/* + sQ ; public key +*/ +class PublicKey { + blsPublicKey self_; + friend class SecretKey; + friend class Signature; +public: + bool operator==(const PublicKey& rhs) const + { + return blsPublicKeyIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const PublicKey& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& pub) + { + std::string str; + pub.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, PublicKey& pub) + { + std::string str; + is >> str; + if (str != "0") { + // 1 + std::string t; +#ifdef BLS_SWAP_G + const int elemNum = 2; +#else + const int elemNum = 4; +#endif + for (int i = 0; i < elemNum; i++) { + is >> t; + str += ' '; + str += t; + } + } + pub.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); +#ifdef BLS_SWAP_G + size_t n = mclBnG1_getStr(&str[0], str.size(), &self_.v, ioMode); +#else + size_t n = mclBnG2_getStr(&str[0], str.size(), &self_.v, ioMode); +#endif + if (n == 0) throw std::runtime_error("PublicKey:getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { +#ifdef BLS_SWAP_G + int ret = mclBnG1_setStr(&self_.v, str.c_str(), str.size(), ioMode); +#else + int ret = mclBnG2_setStr(&self_.v, str.c_str(), str.size(), ioMode); +#endif + if (ret != 0) throw std::runtime_error("PublicKey:setStr"); + } + /* + set public for id from mpk + */ + void set(const PublicKeyVec& mpk, const Id& id) + { + set(mpk.data(), mpk.size(), id); + } + /* + recover publicKey from k pubVec + */ + void recover(const PublicKeyVec& pubVec, const IdVec& idVec) + { + if (pubVec.size() != idVec.size()) throw std::invalid_argument("PublicKey:recover"); + recover(pubVec.data(), idVec.data(), idVec.size()); + } + /* + add public key + */ + void add(const PublicKey& rhs) + { + blsPublicKeyAdd(&self_, &rhs.self_); + } + + // the following methods are for C api + void set(const PublicKey *mpk, size_t k, const Id& id) + { + int ret = blsPublicKeyShare(&self_, &mpk->self_, k, &id.self_); + if (ret != 0) throw std::runtime_error("blsPublicKeyShare"); + } + void recover(const PublicKey *pubVec, const Id *idVec, size_t n) + { + int ret = blsPublicKeyRecover(&self_, &pubVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsPublicKeyRecover"); + } +}; + +/* + s H(m) ; signature +*/ +class Signature { + blsSignature self_; + friend class SecretKey; +public: + bool operator==(const Signature& rhs) const + { + return blsSignatureIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const Signature& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const Signature& sig) + { + std::string str; + sig.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, Signature& sig) + { + std::string str; + is >> str; + if (str != "0") { + // 1 + std::string t; +#ifdef BLS_SWAP_G + const int elemNum = 4; +#else + const int elemNum = 2; +#endif + for (int i = 0; i < elemNum; i++) { + is >> t; + str += ' '; + str += t; + } + } + sig.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); +#ifdef BLS_SWAP_G + size_t n = mclBnG2_getStr(&str[0], str.size(), &self_.v, ioMode); +#else + size_t n = mclBnG1_getStr(&str[0], str.size(), &self_.v, ioMode); +#endif + if (n == 0) throw std::runtime_error("Signature:tgetStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { +#ifdef BLS_SWAP_G + int ret = mclBnG2_setStr(&self_.v, str.c_str(), str.size(), ioMode); +#else + int ret = mclBnG1_setStr(&self_.v, str.c_str(), str.size(), ioMode); +#endif + if (ret != 0) throw std::runtime_error("Signature:setStr"); + } + bool verify(const PublicKey& pub, const void *m, size_t size) const + { + return blsVerify(&self_, &pub.self_, m, size) == 1; + } + bool verify(const PublicKey& pub, const std::string& m) const + { + return verify(pub, m.c_str(), m.size()); + } + bool verifyHash(const PublicKey& pub, const void *h, size_t size) const + { + return blsVerifyHash(&self_, &pub.self_, h, size) == 1; + } + bool verifyHash(const PublicKey& pub, const std::string& h) const + { + return verifyHash(pub, h.c_str(), h.size()); + } + bool verifyAggregatedHashes(const PublicKey *pubVec, const void *hVec, size_t sizeofHash, size_t n) const + { + return blsVerifyAggregatedHashes(&self_, &pubVec[0].self_, hVec, sizeofHash, n) == 1; + } + /* + verify self(pop) with pub + */ + bool verify(const PublicKey& pub) const + { + std::string str; + pub.getStr(str); + return verify(pub, str); + } + /* + recover sig from k sigVec + */ + void recover(const SignatureVec& sigVec, const IdVec& idVec) + { + if (sigVec.size() != idVec.size()) throw std::invalid_argument("Signature:recover"); + recover(sigVec.data(), idVec.data(), idVec.size()); + } + /* + add signature + */ + void add(const Signature& rhs) + { + blsSignatureAdd(&self_, &rhs.self_); + } + + // the following methods are for C api + void recover(const Signature* sigVec, const Id *idVec, size_t n) + { + int ret = blsSignatureRecover(&self_, &sigVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsSignatureRecover:same id"); + } +}; + +/* + make master public key [s_0 Q, ..., s_{k-1} Q] from msk +*/ +inline void getMasterPublicKey(PublicKeyVec& mpk, const SecretKeyVec& msk) +{ + const size_t n = msk.size(); + mpk.resize(n); + for (size_t i = 0; i < n; i++) { + msk[i].getPublicKey(mpk[i]); + } +} + +inline void SecretKey::getPublicKey(PublicKey& pub) const +{ + blsGetPublicKey(&pub.self_, &self_); +} +inline void SecretKey::sign(Signature& sig, const void *m, size_t size) const +{ + blsSign(&sig.self_, &self_, m, size); +} +inline void SecretKey::signHash(Signature& sig, const void *h, size_t size) const +{ + if (blsSignHash(&sig.self_, &self_, h, size) != 0) throw std::runtime_error("bad h"); +} +inline void SecretKey::getPop(Signature& pop) const +{ + PublicKey pub; + getPublicKey(pub); + std::string m; + pub.getStr(m); + sign(pop, m); +} + +/* + make pop from msk and mpk +*/ +inline void getPopVec(SignatureVec& popVec, const SecretKeyVec& msk) +{ + const size_t n = msk.size(); + popVec.resize(n); + for (size_t i = 0; i < n; i++) { + msk[i].getPop(popVec[i]); + } +} + +inline Signature operator+(const Signature& a, const Signature& b) { Signature r(a); r.add(b); return r; } +inline PublicKey operator+(const PublicKey& a, const PublicKey& b) { PublicKey r(a); r.add(b); return r; } +inline SecretKey operator+(const SecretKey& a, const SecretKey& b) { SecretKey r(a); r.add(b); return r; } + +} //bls diff --git a/vendor/github.com/byzantine-lab/bls/lib/.emptydir b/vendor/github.com/byzantine-lab/bls/lib/.emptydir new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/bls/mk.bat b/vendor/github.com/byzantine-lab/bls/mk.bat new file mode 100644 index 000000000..9bf8dd9e6 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/mk.bat @@ -0,0 +1,20 @@ +@echo off +if "%1"=="-s" ( + echo use static lib + set CFLAGS=%CFLAGS% /DMCLBN_NO_AUTOLINK /DBLS_DONT_EXPORT +) else if "%1"=="-d" ( + echo use dynamic lib +) else ( + echo "mk (-s|-d) " + goto exit +) +set CFLAGS=%CFLAGS% -I../mcl/include +set SRC=%2 +set EXE=%SRC:.cpp=.exe% +set EXE=%EXE:.c=.exe% +set EXE=%EXE:test\=bin\% +set EXE=%EXE:sample\=bin\% +echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% +cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% + +:exit diff --git a/vendor/github.com/byzantine-lab/bls/mkdll.bat b/vendor/github.com/byzantine-lab/bls/mkdll.bat new file mode 100755 index 000000000..17e934f92 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/mkdll.bat @@ -0,0 +1,8 @@ +rem @echo off + +call setvar.bat dll +echo make bls384.dll +cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/bls_c.obj src/bls_c.cpp +cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/fp.obj ../mcl/src/fp.cpp +lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c.obj obj/fp.obj %LDFLAGS% +cl /LD /MT obj/bls_c.obj obj/fp.obj %CFLAGS% /link /out:bin/bls384.dll %LDFLAGS% diff --git a/vendor/github.com/byzantine-lab/bls/mklib.bat b/vendor/github.com/byzantine-lab/bls/mklib.bat new file mode 100644 index 000000000..4a60d7196 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/mklib.bat @@ -0,0 +1,26 @@ +@echo off +call ..\mcl\setvar.bat +if "%1"=="dll" ( + echo make dynamic library DLL +) else ( + echo make static library LIB +) +call setvar.bat + +if "%1"=="dll" ( + cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp /DBLS_NO_AUTOLINK + cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp /DBLS_NO_AUTOLINK + cl /c %CFLAGS% /Foobj/bls_c384_256.obj src/bls_c384_256.cpp /DBLS_NO_AUTOLINK + cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp + link /nologo /DLL /OUT:bin\bls256.dll obj\bls_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\bls256.lib + link /nologo /DLL /OUT:bin\bls384.dll obj\bls_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\bls384.lib + link /nologo /DLL /OUT:bin\bls384_256.dll obj\bls_c384_256.obj obj\fp.obj %LDFLAGS% /implib:lib\bls384_256.lib +) else ( + cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp + cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp + cl /c %CFLAGS% /Foobj/bls_c384_256.obj src/bls_c384_256.cpp + cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp /DMCLBN_DONT_EXPORT + lib /OUT:lib/bls256.lib /nodefaultlib obj/bls_c256.obj obj/fp.obj %LDFLAGS% + lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c384.obj obj/fp.obj %LDFLAGS% + lib /OUT:lib/bls384_256.lib /nodefaultlib obj/bls_c384_256.obj obj/fp.obj %LDFLAGS% +) diff --git a/vendor/github.com/byzantine-lab/bls/obj/.emptydir b/vendor/github.com/byzantine-lab/bls/obj/.emptydir new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/bls/readme.md b/vendor/github.com/byzantine-lab/bls/readme.md new file mode 100644 index 000000000..b1efb3f36 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/readme.md @@ -0,0 +1,187 @@ +[![Build Status](https://travis-ci.com/dexon-foundation/bls.png?branch=dev)](https://travis-ci.com/dexon-foundation/bls) + +# BLS threshold signature + +An implementation of BLS threshold signature + +# Installation Requirements + +Create a working directory (e.g., work) and clone the following repositories. +``` +mkdir work +cd work +git clone git://github.com/dexon-foundation/mcl.git +git clone git://github.com/dexon-foundation/bls.git +git clone git://github.com/herumi/cybozulib_ext ; for only Windows +``` + +# News +* (Break backward compatibility) The suffix `_dy` of library name is removed and bls\*.a requires libmcl.so set LD_LIBRARY_PATH to the directory. +* -tags option for Go bindings + * -tags bn256 + * -tags bn384\_256 + * -tags bn384 ; default mode +* Support swap of G1 and G2 + * `make BLS_SWAP_G=1` then G1 is assigned to PublicKey and G2 is assigned to Signature. + * golang binding does not support this feature yet. +* Build option without GMP + * `make MCL_USE_GMP=0` +* Build option without OpenSSL + * `make MCL_USE_OPENSSL=0` +* Build option to specify `mcl` directory + * `make MCL_DIR=` + +* (old) libbls.a for C++ interface(bls/bls.hpp) is removed +Link `lib/libbls256.a` or `lib/libbls384.a` to use `bls/bls.hpp` according to MCLBN_FP_UNIT_SIZE = 4 or 6. + +# Build and test for Linux +To make and test, run +``` +cd bls +make test +``` +To make sample programs, run +``` +make sample_test +``` + +# Build and test for Windows +1) make static library and use it +``` +mklib +mk -s test\bls_c384_test.cpp +bin\bls_c384_test.exe +``` + +2) make dynamic library and use it +``` +mklib dll +mk -d test\bls_c384_test.cpp +bin\bls_c384_test.exe +``` + +# Library +* libbls256.a/libbls256.so ; for BN254 compiled with MCLBN_FP_UNIT_SIZE=4 +* libbls384.a/libbls384.so ; for BN254/BN381_1/BLS12_381 compiled with MCLBN_FP_UNIT_SIZE=6 +* libbls384_256.a/libbls384_256.so ; for BN254/BLS12_381 compiled with MCLBN_FP_UNIT_SIZE=6 and MCLBN_FR_UNIT_SIZE=4 + +See `mcl/include/curve_type.h` for curve parameter + +# API + +## Basic API + +BLS signature +``` +e : G2 x G1 -> Fp12 ; optimal ate pairing over BN curve +Q in G2 ; fixed global parameter +H : {str} -> G1 +s in Fr: secret key +sQ in G2; public key +s H(m) in G1; signature of m +verify ; e(sQ, H(m)) = e(Q, s H(m)) +``` + +``` +void bls::init(); +``` + +Initialize this library. Call this once to use the other api. + +``` +void SecretKey::init(); +``` + +Initialize the instance of SecretKey. `s` is a random number. + +``` +void SecretKey::getPublicKey(PublicKey& pub) const; +``` + +Get public key `sQ` for the secret key `s`. + +``` +void SecretKey::sign(Sign& sign, const std::string& m) const; +``` + +Make sign `s H(m)` from message m. + +``` +bool Sign::verify(const PublicKey& pub, const std::string& m) const; +``` + +Verify sign with pub and m and return true if it is valid. + +``` +e(sQ, H(m)) == e(Q, s H(m)) +``` + +### Secret Sharing API + +``` +void SecretKey::getMasterSecretKey(SecretKeyVec& msk, size_t k) const; +``` + +Prepare k-out-of-n secret sharing for the secret key. +`msk[0]` is the original secret key `s` and `msk[i]` for i > 0 are random secret key. + +``` +void SecretKey::set(const SecretKeyVec& msk, const Id& id); +``` + +Make secret key f(id) from msk and id where f(x) = msk[0] + msk[1] x + ... + msk[k-1] x^{k-1}. + +You can make a public key `f(id)Q` from each secret key f(id) for id != 0 and sign a message. + +``` +void Sign::recover(const SignVec& signVec, const IdVec& idVec); +``` + +Collect k pair of sign `f(id) H(m)` and `id` for a message m and recover the original signature `s H(m)` for the secret key `s`. + +### PoP (Proof of Possesion) + +``` +void SecretKey::getPop(Sign& pop) const; +``` + +Sign pub and make a pop `s H(sQ)` + +``` +bool Sign::verify(const PublicKey& pub) const; +``` + +Verify a public key by pop. + +# Check the order of a point + +deserializer functions check whether a point has correct order and +the cost is heavy for especially G2. +If you do not want to check it, then call +``` +void blsSignatureVerifyOrder(false); +void blsPublicKeyVerifyOrder(false); +``` + +cf. subgroup attack + +# Go +``` +make test_go +``` + +# WASM(WebAssembly) +``` +mkdir ../bls-wasm +make bls-wasm +``` +see [BLS signature demo on browser](https://herumi.github.io/bls-wasm/bls-demo.html) + +# License + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +# Author + +MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/byzantine-lab/bls/release.props b/vendor/github.com/byzantine-lab/bls/release.props new file mode 100644 index 000000000..886ce6890 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/release.props @@ -0,0 +1,12 @@ + + + + + + + + MultiThreaded + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/bls/sample/bls_smpl.cpp b/vendor/github.com/byzantine-lab/bls/sample/bls_smpl.cpp new file mode 100644 index 000000000..e812cd500 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/sample/bls_smpl.cpp @@ -0,0 +1,168 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include +#include +#include +#include + +const std::string pubFile = "sample/publickey"; +const std::string secFile = "sample/secretkey"; +const std::string signFile = "sample/sign"; + +std::string makeName(const std::string& name, const bls::Id& id) +{ + const std::string suf = ".txt"; + if (id.isZero()) return name + suf; + std::ostringstream os; + os << name << '.' << id << suf; + return os.str(); +} + +template +void save(const std::string& file, const T& t, const bls::Id& id = 0) +{ + const std::string name = makeName(file, id); + std::ofstream ofs(name.c_str(), std::ios::binary); + if (!(ofs << t)) { + throw cybozu::Exception("can't save") << name; + } +} + +template +void load(T& t, const std::string& file, const bls::Id& id = 0) +{ + const std::string name = makeName(file, id); + std::ifstream ifs(name.c_str(), std::ios::binary); + if (!(ifs >> t)) { + throw cybozu::Exception("can't load") << name; + } +} + +int init() +{ + printf("make %s and %s files\n", secFile.c_str(), pubFile.c_str()); + bls::SecretKey sec; + sec.init(); + save(secFile, sec); + bls::PublicKey pub; + sec.getPublicKey(pub); + save(pubFile, pub); + return 0; +} + +int sign(const std::string& m, int id) +{ + printf("sign message `%s` by id=%d\n", m.c_str(), id); + bls::SecretKey sec; + load(sec, secFile, id); + bls::Signature s; + sec.sign(s, m); + save(signFile, s, id); + return 0; +} + +int verify(const std::string& m, int id) +{ + printf("verify message `%s` by id=%d\n", m.c_str(), id); + bls::PublicKey pub; + load(pub, pubFile, id); + bls::Signature s; + load(s, signFile, id); + if (s.verify(pub, m)) { + puts("verify ok"); + return 0; + } else { + puts("verify err"); + return 1; + } +} + +int share(size_t n, size_t k) +{ + printf("%d-out-of-%d threshold sharing\n", (int)k, (int)n); + bls::SecretKey sec; + load(sec, secFile); + bls::SecretKeyVec msk; + sec.getMasterSecretKey(msk, k); + bls::SecretKeyVec secVec(n); + bls::IdVec ids(n); + for (size_t i = 0; i < n; i++) { + int id = i + 1; + ids[i] = id; + secVec[i].set(msk, id); + } + for (size_t i = 0; i < n; i++) { + save(secFile, secVec[i], ids[i]); + bls::PublicKey pub; + secVec[i].getPublicKey(pub); + save(pubFile, pub, ids[i]); + } + return 0; +} + +int recover(const bls::IdVec& ids) +{ + printf("recover from"); + for (size_t i = 0; i < ids.size(); i++) { + std::cout << ' ' << ids[i]; + } + printf("\n"); + bls::SignatureVec sigVec(ids.size()); + for (size_t i = 0; i < sigVec.size(); i++) { + load(sigVec[i], signFile, ids[i]); + } + bls::Signature s; + s.recover(sigVec, ids); + save(signFile, s); + return 0; +} + +int main(int argc, char *argv[]) + try +{ + bls::init(); // use BN254 + + std::string mode; + std::string m; + size_t n; + size_t k; + int id; + bls::IdVec ids; + + cybozu::Option opt; + opt.appendParam(&mode, "init|sign|verify|share|recover"); + opt.appendOpt(&n, 10, "n", ": k-out-of-n threshold"); + opt.appendOpt(&k, 3, "k", ": k-out-of-n threshold"); + opt.appendOpt(&m, "", "m", ": message to be signed"); + opt.appendOpt(&id, 0, "id", ": id of secretKey"); + opt.appendVec(&ids, "ids", ": select k id in [0, n). this option should be last"); + opt.appendHelp("h"); + if (!opt.parse(argc, argv)) { + goto ERR_EXIT; + } + + if (mode == "init") { + return init(); + } else if (mode == "sign") { + if (m.empty()) goto ERR_EXIT; + return sign(m, id); + } else if (mode == "verify") { + if (m.empty()) goto ERR_EXIT; + return verify(m, id); + } else if (mode == "share") { + return share(n, k); + } else if (mode == "recover") { + if (ids.empty()) { + fprintf(stderr, "use -ids option. ex. share -ids 1 3 5\n"); + goto ERR_EXIT; + } + return recover(ids); + } else { + fprintf(stderr, "bad mode %s\n", mode.c_str()); + } +ERR_EXIT: + opt.usage(); + return 1; +} catch (std::exception& e) { + fprintf(stderr, "ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/bls/setvar.bat b/vendor/github.com/byzantine-lab/bls/setvar.bat new file mode 100755 index 000000000..0ff286ab8 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/setvar.bat @@ -0,0 +1,6 @@ +@echo off +call ..\mcl\setvar.bat +set CFLAGS=%CFLAGS% /I ..\mcl\include /I ./ +set LDFLAGS=%LDFLAGS% /LIBPATH:..\mcl\lib +echo CFLAGS=%CFLAGS% +echo LDFLAGS=%LDFLAGS% diff --git a/vendor/github.com/byzantine-lab/bls/src/bls_c256.cpp b/vendor/github.com/byzantine-lab/bls/src/bls_c256.cpp new file mode 100644 index 000000000..a9f3412ea --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/bls_c256.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "bls_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/src/bls_c384.cpp b/vendor/github.com/byzantine-lab/bls/src/bls_c384.cpp new file mode 100644 index 000000000..d28f8547b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/bls_c384.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "bls_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/src/bls_c384_256.cpp b/vendor/github.com/byzantine-lab/bls/src/bls_c384_256.cpp new file mode 100644 index 000000000..3dcb3e7d7 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/bls_c384_256.cpp @@ -0,0 +1,4 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 4 +#include "bls_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/src/bls_c_impl.hpp b/vendor/github.com/byzantine-lab/bls/src/bls_c_impl.hpp new file mode 100644 index 000000000..b38c1ad06 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/bls_c_impl.hpp @@ -0,0 +1,614 @@ +#define MCLBN_DONT_EXPORT +#define BLS_DLL_EXPORT + +#include + +#if 1 +#include "mcl/impl/bn_c_impl.hpp" +#else +#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 +#include +#else + #error "not supported size" +#endif +#include +using namespace mcl::bn; +inline Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } +inline const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } + +inline G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } +inline const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } + +inline G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } +inline const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } + +inline Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } +inline const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } + +inline Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } +inline const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } +#endif + +void Gmul(G1& z, const G1& x, const Fr& y) { G1::mul(z, x, y); } +void Gmul(G2& z, const G2& x, const Fr& y) { G2::mul(z, x, y); } +void GmulCT(G1& z, const G1& x, const Fr& y) { G1::mulCT(z, x, y); } +void GmulCT(G2& z, const G2& x, const Fr& y) { G2::mulCT(z, x, y); } + +/* + BLS signature + e : G1 x G2 -> GT + Q in G2 ; fixed global parameter + H : {str} -> G1 + s : secret key + sQ ; public key + s H(m) ; signature of m + verify ; e(sQ, H(m)) = e(Q, s H(m)) + + swap G1 and G2 if BLS_SWAP_G is defined + @note the current implementation does not support precomputed miller loop +*/ + +#ifdef BLS_SWAP_G +static G1 g_P; +inline const G1& getBasePoint() { return g_P; } +#else +static G2 g_Q; +const size_t maxQcoeffN = 128; +static mcl::FixedArray g_Qcoeff; // precomputed Q +inline const G2& getBasePoint() { return g_Q; } +inline const mcl::FixedArray& getQcoeff() { return g_Qcoeff; } +#endif + +int blsInitNotThreadSafe(int curve, int compiledTimeVar) +{ + if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { + return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); + } + const mcl::CurveParam& cp = mcl::getCurveParam(curve); + bool b; + initPairing(&b, cp); + if (!b) return -1; + +#ifdef BLS_SWAP_G + mapToG1(&b, g_P, 1); +#else + + if (curve == MCL_BN254) { + const char *Qx_BN254 = "11ccb44e77ac2c5dc32a6009594dbe331ec85a61290d6bbac8cc7ebb2dceb128 f204a14bbdac4a05be9a25176de827f2e60085668becdd4fc5fa914c9ee0d9a"; + const char *Qy_BN254 = "7c13d8487903ee3c1c5ea327a3a52b6cc74796b1760d5ba20ed802624ed19c8 8f9642bbaacb73d8c89492528f58932f2de9ac3e80c7b0e41f1a84f1c40182"; + g_Q.x.setStr(&b, Qx_BN254, 16); + g_Q.y.setStr(&b, Qy_BN254, 16); + g_Q.z = 1; + } else { + mapToG2(&b, g_Q, 1); + } + if (!b) return -100; + if (curve == MCL_BN254) { + #include "./qcoeff-bn254.hpp" + g_Qcoeff.resize(BN::param.precomputedQcoeffSize); + assert(g_Qcoeff.size() == CYBOZU_NUM_OF_ARRAY(QcoeffTblBN254)); + for (size_t i = 0; i < g_Qcoeff.size(); i++) { + Fp6& x6 = g_Qcoeff[i]; + for (size_t j = 0; j < 6; j++) { + Fp& x = x6.getFp0()[j]; + mcl::fp::Unit *p = const_cast(x.getUnit()); + for (size_t k = 0; k < 4; k++) { + p[k] = QcoeffTblBN254[i][j][k]; + } + } + } + } else { + precomputeG2(&b, g_Qcoeff, getBasePoint()); + } +#endif + if (!b) return -101; + return 0; +} + +#ifdef __EMSCRIPTEN__ +extern "C" BLS_DLL_API void *blsMalloc(size_t n) +{ + return malloc(n); +} +extern "C" BLS_DLL_API void blsFree(void *p) +{ + free(p); +} +#endif + +#if !defined(__EMSCRIPTEN__) && !defined(__wasm__) + #if defined(CYBOZU_CPP_VERSION) && CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + #include + #define USE_STD_MUTEX + #else + #include + #define USE_CYBOZU_MUTEX + #endif +#endif + +int blsInit(int curve, int compiledTimeVar) +{ + int ret = 0; +#ifdef USE_STD_MUTEX + static std::mutex m; + std::lock_guard lock(m); +#elif defined(USE_CYBOZU_MUTEX) + static cybozu::Mutex m; + cybozu::AutoLock lock(m); +#endif + static int g_curve = -1; + if (g_curve != curve) { + ret = blsInitNotThreadSafe(curve, compiledTimeVar); + g_curve = curve; + } + return ret; +} + +static inline const mclBnG1 *cast(const G1* x) { return (const mclBnG1*)x; } +static inline const mclBnG2 *cast(const G2* x) { return (const mclBnG2*)x; } + +void blsIdSetInt(blsId *id, int x) +{ + *cast(&id->v) = x; +} + +int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + cast(&sec->v)->setArrayMask((const char *)buf, bufSize); + return 0; +} +int blsSecretKeySetLittleEndianMod(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + bool b; + cast(&sec->v)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); + return b ? 0 : -1; +} + +void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec) +{ + Gmul(*cast(&pub->v), getBasePoint(), *cast(&sec->v)); +} + +void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size) +{ +#ifdef BLS_SWAP_G + G2 Hm; + hashAndMapToG2(Hm, m, size); +#else + G1 Hm; + hashAndMapToG1(Hm, m, size); +#endif + GmulCT(*cast(&sig->v), Hm, *cast(&sec->v)); +} + +#ifdef BLS_SWAP_G +/* + e(P, sHm) == e(sP, Hm) + <=> finalExp(ML(P, sHm) * e(-sP, Hm)) == 1 +*/ +bool isEqualTwoPairings(const G2& sHm, const G1& sP, const G2& Hm) +{ + GT e1, e2; + millerLoop(e1, getBasePoint(), sHm); + G1 neg_sP; + G1::neg(neg_sP, sP); + millerLoop(e2, neg_sP, Hm); + e1 *= e2; + finalExp(e1, e1); + return e1.isOne(); +} +#else +/* + e(P1, Q1) == e(P2, Q2) + <=> finalExp(ML(P1, Q1)) == finalExp(ML(P2, Q2)) + <=> finalExp(ML(P1, Q1) / ML(P2, Q2)) == 1 + <=> finalExp(ML(P1, Q1) * ML(-P2, Q2)) == 1 + Q1 is precomputed +*/ +bool isEqualTwoPairings(const G1& P1, const Fp6* Q1coeff, const G1& P2, const G2& Q2) +{ + GT e; + precomputedMillerLoop2mixed(e, P2, Q2, -P1, Q1coeff); + finalExp(e, e); + return e.isOne(); +} +#endif + +int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size) +{ +#ifdef BLS_SWAP_G + G2 Hm; + hashAndMapToG2(Hm, m, size); + return isEqualTwoPairings(*cast(&sig->v), *cast(&pub->v), Hm); +#else + G1 Hm; + hashAndMapToG1(Hm, m, size); + /* + e(sHm, Q) = e(Hm, sQ) + e(sig, Q) = e(Hm, pub) + */ + return isEqualTwoPairings(*cast(&sig->v), getQcoeff().data(), Hm, *cast(&pub->v)); +#endif +} + +mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id) +{ + return cast(&id->v)->serialize(buf, maxBufSize); +} + +mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return cast(&sec->v)->serialize(buf, maxBufSize); +} + +mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub) +{ + return cast(&pub->v)->serialize(buf, maxBufSize); +} + +mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig) +{ + return cast(&sig->v)->serialize(buf, maxBufSize); +} + +mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize) +{ + return cast(&id->v)->deserialize(buf, bufSize); +} + +mclSize blsSecretKeyDeserialize(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + return cast(&sec->v)->deserialize(buf, bufSize); +} + +mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize) +{ + return cast(&pub->v)->deserialize(buf, bufSize); +} + +mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize) +{ + return cast(&sig->v)->deserialize(buf, bufSize); +} + +int blsIdIsEqual(const blsId *lhs, const blsId *rhs) +{ + return *cast(&lhs->v) == *cast(&rhs->v); +} + +int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs) +{ + return *cast(&lhs->v) == *cast(&rhs->v); +} + +int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs) +{ + return *cast(&lhs->v) == *cast(&rhs->v); +} + +int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs) +{ + return *cast(&lhs->v) == *cast(&rhs->v); +} + +int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(&sec->v), cast(&msk->v), k, *cast(&id->v)); + return b ? 0 : -1; +} + +int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(&pub->v), cast(&mpk->v), k, *cast(&id->v)); + return b ? 0 : -1; +} + +int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(&sec->v), cast(&idVec->v), cast(&secVec->v), n); + return b ? 0 : -1; +} + +int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(&pub->v), cast(&idVec->v), cast(&pubVec->v), n); + return b ? 0 : -1; +} + +int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(&sig->v), cast(&idVec->v), cast(&sigVec->v), n); + return b ? 0 : -1; +} + +void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs) +{ + *cast(&sec->v) += *cast(&rhs->v); +} + +void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs) +{ + *cast(&pub->v) += *cast(&rhs->v); +} + +void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs) +{ + *cast(&sig->v) += *cast(&rhs->v); +} + +void blsSignatureVerifyOrder(int doVerify) +{ +#ifdef BLS_SWAP_G + verifyOrderG2(doVerify != 0); +#else + verifyOrderG1(doVerify != 0); +#endif +} +void blsPublicKeyVerifyOrder(int doVerify) +{ +#ifdef BLS_SWAP_G + verifyOrderG1(doVerify != 0); +#else + verifyOrderG2(doVerify != 0); +#endif +} +int blsSignatureIsValidOrder(const blsSignature *sig) +{ + return cast(&sig->v)->isValidOrder(); +} +int blsPublicKeyIsValidOrder(const blsPublicKey *pub) +{ + return cast(&pub->v)->isValidOrder(); +} + +#ifndef BLS_MINIMUM_API +template +inline bool toG(G& Hm, const void *h, mclSize size) +{ + Fp t; + t.setArrayMask((const char *)h, size); + bool b; +#ifdef BLS_SWAP_G + BN::mapToG2(&b, Hm, Fp2(t, 0)); +#else + BN::mapToG1(&b, Hm, t); +#endif + return b; +} + +int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n) +{ + if (n == 0) return 0; + GT e1, e2; + const char *ph = (const char*)hVec; +#ifdef BLS_SWAP_G + millerLoop(e1, getBasePoint(), -*cast(&aggSig->v)); + G2 h; + if (!toG(h, &ph[0], sizeofHash)) return 0; + BN::millerLoop(e2, *cast(&pubVec[0].v), h); + e1 *= e2; + for (size_t i = 1; i < n; i++) { + if (!toG(h, &ph[i * sizeofHash], sizeofHash)) return 0; + millerLoop(e2, *cast(&pubVec[i].v), h); + e1 *= e2; + } +#else + /* + e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) + <=> finalExp(ML(-aggSig, Q) * prod_i ML(hVec[i], pubVec[i])) == 1 + */ + BN::precomputedMillerLoop(e1, -*cast(&aggSig->v), g_Qcoeff.data()); + G1 h; + if (!toG(h, &ph[0], sizeofHash)) return 0; + BN::millerLoop(e2, h, *cast(&pubVec[0].v)); + e1 *= e2; + for (size_t i = 1; i < n; i++) { + if (!toG(h, &ph[i * sizeofHash], sizeofHash)) return 0; + BN::millerLoop(e2, h, *cast(&pubVec[i].v)); + e1 *= e2; + } +#endif + BN::finalExp(e1, e1); + return e1.isOne(); +} + +int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size) +{ +#ifdef BLS_SWAP_G + G2 Hm; +#else + G1 Hm; +#endif + if (!toG(Hm, h, size)) return -1; + GmulCT(*cast(&sig->v), Hm, *cast(&sec->v)); + return 0; +} + +int blsVerifyPairing(const blsSignature *X, const blsSignature *Y, const blsPublicKey *pub) +{ +#ifdef BLS_SWAP_G + return isEqualTwoPairings(*cast(&X->v), *cast(&pub->v), *cast(&Y->v)); +#else + return isEqualTwoPairings(*cast(&X->v), getQcoeff().data(), *cast(&Y->v), *cast(&pub->v)); +#endif +} + +int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size) +{ + blsSignature Hm; + if (!toG(*cast(&Hm.v), h, size)) return 0; + return blsVerifyPairing(sig, &Hm, pub); +} + +void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs) +{ + *cast(&sec->v) -= *cast(&rhs->v); +} + +void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs) +{ + *cast(&pub->v) -= *cast(&rhs->v); +} + +void blsSignatureSub(blsSignature *sig, const blsSignature *rhs) +{ + *cast(&sig->v) -= *cast(&rhs->v); +} + +mclSize blsGetOpUnitSize() // FpUint64Size +{ + return Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); +} + +int blsGetCurveOrder(char *buf, mclSize maxBufSize) +{ + return (int)Fr::getModulo(buf, maxBufSize); +} + +int blsGetFieldOrder(char *buf, mclSize maxBufSize) +{ + return (int)Fp::getModulo(buf, maxBufSize); +} + +int blsGetG1ByteSize() +{ + return (int)Fp::getByteSize(); +} + +int blsGetFrByteSize() +{ + return (int)Fr::getByteSize(); +} + +#ifdef BLS_SWAP_G +void blsGetGeneratorOfG1(blsPublicKey *pub) +{ + *cast(&pub->v) = getBasePoint(); +} +#else +void blsGetGeneratorOfG2(blsPublicKey *pub) +{ + *cast(&pub->v) = getBasePoint(); +} +#endif + +int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize) +{ + return cast(&id->v)->deserialize(buf, bufSize, 10) > 0 ? 0 : -1; +} +int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize) +{ + return cast(&id->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; +} + +int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize) +{ + cast(&id->v)->setArrayMask((const char *)buf, bufSize); + return 0; +} + +mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id) +{ + return cast(&id->v)->getStr(buf, maxBufSize, 10); +} + +mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id) +{ + return cast(&id->v)->getStr(buf, maxBufSize, 16); +} + +int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + cast(&sec->v)->setHashOf(buf, bufSize); + return 0; +} + +#ifndef MCL_DONT_USE_CSPRNG +int blsSecretKeySetByCSPRNG(blsSecretKey *sec) +{ + bool b; + cast(&sec->v)->setByCSPRNG(&b); + return b ? 0 : -1; +} +void blsSetRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)) +{ + mcl::fp::RandGen::setRandFunc(self, readFunc); +} +#endif + +void blsGetPop(blsSignature *sig, const blsSecretKey *sec) +{ + blsPublicKey pub; + blsGetPublicKey(&pub, sec); + char buf[1024]; + mclSize n = cast(&pub.v)->serialize(buf, sizeof(buf)); + assert(n); + blsSign(sig, sec, buf, n); +} + +int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub) +{ + char buf[1024]; + mclSize n = cast(&pub->v)->serialize(buf, sizeof(buf)); + if (n == 0) return 0; + return blsVerify(sig, pub, buf, n); +} + +mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id) +{ + return cast(&id->v)->serialize(buf, maxBufSize); +} +int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize) +{ + return cast(&sec->v)->deserialize(buf, bufSize, 10) > 0 ? 0 : -1; +} +int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize) +{ + return cast(&sec->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; +} +mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return cast(&sec->v)->serialize(buf, maxBufSize); +} +mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return cast(&sec->v)->getStr(buf, maxBufSize, 10); +} +mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return cast(&sec->v)->getStr(buf, maxBufSize, 16); +} +int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize) +{ + return cast(&pub->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; +} +mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub) +{ + return cast(&pub->v)->getStr(buf, maxBufSize, 16); +} +int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize) +{ + return cast(&sig->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; +} +mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig) +{ + return cast(&sig->v)->getStr(buf, maxBufSize, 16); +} +void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub) +{ + GmulCT(*cast(&out->v), *cast(&pub->v), *cast(&sec->v)); +} + +#endif + diff --git a/vendor/github.com/byzantine-lab/bls/src/proj/bls.vcxproj b/vendor/github.com/byzantine-lab/bls/src/proj/bls.vcxproj new file mode 100644 index 000000000..b78c97919 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/proj/bls.vcxproj @@ -0,0 +1,92 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {1DBB979A-C212-45CD-9563-446A96F87F71} + Win32Proj + ec_test + + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + true + .lib + $(SolutionDir)lib\ + + + false + .lib + $(SolutionDir)lib\ + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/bls/src/qcoeff-bn254.hpp b/vendor/github.com/byzantine-lab/bls/src/qcoeff-bn254.hpp new file mode 100644 index 000000000..18d169568 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/src/qcoeff-bn254.hpp @@ -0,0 +1,564 @@ +#if MCL_SIZEOF_UNIT == 8 +static const uint64_t QcoeffTblBN254[][6][4] = { + { + {0x8c5c1b842e501310ull,0x6a418cdaced77710ull,0xf5ad725dd0d9a5ffull,0x012d501f32362f48ull,}, + {0xb8a8a8c11e51dc62ull,0xeaeb87e0f25a8611ull,0x9ba8738e4483d511ull,0x0664a4e36d64379eull,}, + {0x4a5af38c0aa5930aull,0x189fef61a74c388dull,0x83cc3225c7748527ull,0x2107491582310dc6ull,}, + {0x43aef621120a524aull,0x359d06a56e339486ull,0xdf5ab35e2222d9b1ull,0x20968bac939743acull,}, + {0xe8e4c0bb65cd67b8ull,0x255a0859bc55ff2cull,0xf1c57d1da3c060c6ull,0x138d83468f42cc0eull,}, + {0xdf985e5f768c149cull,0xa059c65c4b5da3ffull,0xed3d38d9f77bb968ull,0x02281f01255a850cull,}, + }, + { + {0xe3f53d8cfb4866a0ull,0xa0f9a16a21c159aaull,0x647fc210c7edf3a9ull,0x0db92f588c73caf9ull,}, + {0x6e9349b777bc2cf1ull,0x4fd987eb22e2469cull,0x666644a8e61b0a0eull,0x02f5bf9aae96c0deull,}, + {0xd5fd6288342479ebull,0x74022b671c6c8d8eull,0xda32d1b497cac7b2ull,0x0abecf35a19b5c7eull,}, + {0x1500891565b5f9aaull,0x4b7ce141cd7f4361ull,0xadf3447c534846c1ull,0x078b36a30d45de5full,}, + {0x37f172cff76e4b77ull,0x696d093b3ee37e4aull,0x2193797b7da56c6eull,0x1f5fc9efcbbb93e7ull,}, + {0x4c7d799b765b8f44ull,0x7adfd285e906edd8ull,0x79d68eaaf88a0885ull,0x20707d672be892cbull,}, + }, + { + {0x84bbf3849c691e74ull,0xeeb90e1efc3e3436ull,0xd9d9bb6257bf19e4ull,0x1b37ef04ea7d6f85ull,}, + {0xa6bdbbe0895ba12aull,0x58cade2ad0f1aa84ull,0xe0bb325678a2c748ull,0x23d1992e977c788cull,}, + {0x44559f0b0f4bb2ccull,0xe61b479bc88980eeull,0x2a70aa9df3e28c92ull,0x18039bee97722b74ull,}, + {0x9e5667da3db8e9e6ull,0x826ba07eb28c31f8ull,0x3f8b4eeb463d6923ull,0x1af85c2b10d3a2f0ull,}, + {0x8783f372684ea930ull,0x1aa0d9e436f41ea7ull,0xc84a3fc56af9f624ull,0x0d02698756cd5a2cull,}, + {0xe47407ede7b7c2afull,0x7d665c59e37ee7a7ull,0x542b91f12e0fa2a7ull,0x2084e73dc21f415eull,}, + }, + { + {0x2aebe318f3d167c6ull,0x5a2b2364b3abc614ull,0x31b2cdfd847e0053ull,0x04f0f63eed2a2f8cull,}, + {0x0573d320ee14ecf4ull,0x4e0dc9d92e543ddeull,0x58a280570cac8d36ull,0x16226935e8e9f9bdull,}, + {0x2d51a89174717a26ull,0x7341be7f883d0806ull,0xc9b4ee66364066ceull,0x018c79b95f127b49ull,}, + {0xe5420d2f4210dbd7ull,0x179c22d607a5c801ull,0xe3aae016e739bcc8ull,0x20c554233ddd50caull,}, + {0x6c5c4b29c77bb152ull,0xc30df398c85f0f2cull,0x5d5096a07ed6a257ull,0x0790d485c22a3639ull,}, + {0x8aadc7bbf2cb348eull,0xc339d87c2118c2cfull,0x8f49e0eb46961ca9ull,0x24740f0ee2134c2cull,}, + }, + { + {0x3b80354a061dbf06ull,0x961e0dfd74b84147ull,0xeb4b27dbde455fc1ull,0x100da22e6baf58b5ull,}, + {0xb156ffc78a60a8acull,0xc873bf776b8daaeeull,0x5a702f5446bf83fdull,0x1fce59e50222949bull,}, + {0x32d7640c0f717812ull,0xc58d05abdc19ceedull,0x1e63c2a492849373ull,0x23443ce8fb2d6feaull,}, + {0x870f2d1a2e39f52eull,0x7aa53cb06541429aull,0xee7b80b7031f23beull,0x0a8a095b3fdf2cf6ull,}, + {0x4e489bd278487a58ull,0xa914d93e5ed31065ull,0x6720c32ae140db7aull,0x0c22020e6a97031full,}, + {0x7535115a15420cceull,0x2cd019bac6256080ull,0x8234c3b61757e461ull,0x24d65e78c88298b2ull,}, + }, + { + {0x1f0bdc2cae53aa21ull,0x263506a6526641afull,0xacd41097fab7f62full,0x0b2c92453d474a86ull,}, + {0x2d23a58a46d63e3aull,0xa65ff6f1f716fe37ull,0xb86dc831f970fb2dull,0x0bc3cf38a191e63aull,}, + {0xeb0ca4fdeba41bbaull,0x969cf610e1a3a009ull,0x93c5d1bad6c7240bull,0x20ad29c9a9f1d6d6ull,}, + {0x006a12a187464b7aull,0xe354d9be0ec65202ull,0x9dff5c227947f5b7ull,0x24e3dc2833ba4d2full,}, + {0x9350693ebfdfb4c6ull,0x07d8abf27abb8fc3ull,0x58f5ab0b518e5113ull,0x125f2d7d40ed8650ull,}, + {0xc9fd435af6e86f34ull,0x04dc07374f373455ull,0xd040d286d71db00dull,0x141a1253f3bc2a50ull,}, + }, + { + {0xbcfee5dad6ad33b7ull,0x8cd72df36c5b56daull,0xc2949399ad52da69ull,0x0f6ffe6d68a398d6ull,}, + {0x777dc689b038aaf4ull,0xf7a8f41c7c04e9f1ull,0xcdab24ebcea39892ull,0x0178d69b1b935d81ull,}, + {0x65a001a22be563c6ull,0xfc1b2634dc76eafeull,0xed4f6ea19949392full,0x0e4e9127957d60e7ull,}, + {0x919a1c91a123e003ull,0x23f8ec239ef8a15dull,0x0470cb40e520d6f5ull,0x0be9b58098cd0f01ull,}, + {0x735e236220cf1152ull,0x82e68710903f65b1ull,0x6c932338d29169ccull,0x0e204d6a8c7d5034ull,}, + {0xac47692ec8245f1full,0x125db7c68d7e7a9bull,0x6ead9899d3150beaull,0x1957068d4a3da4b8ull,}, + }, + { + {0x45c168b2bce7b4daull,0x63afa3b276f9f910ull,0x60af02b6be9889a6ull,0x1adad7fa35385ae7ull,}, + {0x8d35cd7e7df59aa6ull,0x13cf29589f4b84b1ull,0xec6ecff2e1540013ull,0x0ecbf75abda6eb1dull,}, + {0xf6ce05fc3becfc23ull,0xe4ac8d257a7bf44eull,0x4c12510765eeaa43ull,0x06c870a377df50e4ull,}, + {0x2f6871bdc1d62dd7ull,0x80591505c1279cb7ull,0x1322088b2719ecd2ull,0x222e71f8f5995a2bull,}, + {0x2d1a1ab198363dfbull,0x35635c96cfa670ceull,0x7d5034dd7a26c656ull,0x003bf0608625abedull,}, + {0x98ca35cf5ed8716cull,0x2265e1237bc6df23ull,0x403b67933e14f23bull,0x17bd2dadc39729fdull,}, + }, + { + {0x73eaf26576b3ee71ull,0x1e385de29d896044ull,0x25a0f40f08a59317ull,0x19849935bfbebeeaull,}, + {0xc124cb642102cadcull,0x15bc408ad6ca7826ull,0x2d7fb7c9392b5314ull,0x191fe8471669f053ull,}, + {0x4519ddbccb6a7c26ull,0xf93bd195baec8228ull,0xacd754a721948defull,0x12f17b60c7e426bdull,}, + {0xcf447b92b04c15dbull,0xfcb7da793167f250ull,0xcbabb4ee570c4306ull,0x190ab94c6e5c81ceull,}, + {0x66edbe6740930cfcull,0x00c8c644983a181full,0xfe9e80b984c44209ull,0x1dd6f530584a7ffaull,}, + {0x14c61214aa1a9038ull,0xc34e5e23426cf8b6ull,0x89fca910ec46ae5full,0x04f1b9161a0f7c1dull,}, + }, + { + {0x60c3a79ac91ab4deull,0x541e051ca71a1a2bull,0x490abafd41a5d25cull,0x126275c8a46cf343ull,}, + {0xe5da0fcfffccd2b6ull,0xe3820301b166bb43ull,0xc6599e01bed6085dull,0x226548dff57c5cfbull,}, + {0x36428b1296882728ull,0xe08312e604299b9aull,0x5a15c889ce55478dull,0x172710198cd7c270ull,}, + {0x2450f291477cc1ddull,0xcb0f85c9d92d1bc3ull,0x86325c11cfe0e296ull,0x13ff03a4bd5be082ull,}, + {0x74298091e426bf55ull,0xbed700b48330ccdfull,0xb1ec45894f74fb11ull,0x1716d956bea958b0ull,}, + {0x91b29e513e9a4114ull,0xcdb3b373910c02fdull,0x268e148f9431fa75ull,0x1288ec8fde3009bdull,}, + }, + { + {0x02ae4c95e0afb8caull,0x8e7aec631cf8f5dfull,0xdfd9373259eca3c3ull,0x1fed34fb88af7224ull,}, + {0xc47e420205b5c88full,0x7207ef7451d1c567ull,0x53262358433f5294ull,0x193248ecf07ad085ull,}, + {0x49de15f9bb694200ull,0xc35f531086b5c242ull,0x95a1903858cd5140ull,0x032a4992511b1f97ull,}, + {0x42ee2c4def1faaa7ull,0xf6ca28bc9d99cd60ull,0x83c60d620a1e004cull,0x024ccf0ba1568759ull,}, + {0x6122291bf42e7d82ull,0x0866090d368a8205ull,0x11f04812ad6ec708ull,0x14cdebecb4ec13edull,}, + {0x535e8fd1ac15390dull,0xb37b579abb1773daull,0xbace0a295cd4b579ull,0x215e20d42270bcb0ull,}, + }, + { + {0x400bdbc644ac1d92ull,0x6d856667d971f595ull,0x03343816a1bd40f7ull,0x0361ad7534821a43ull,}, + {0x824222acf8437091ull,0x79141c3205b1032full,0x6b4d331fc9974530ull,0x1bf965a7ba2bade5ull,}, + {0x0bf66d1afdad6063ull,0xfe6571464fe71527ull,0x3ec25815cc90ab9bull,0x132ca2d9d51c3b56ull,}, + {0x37e3ae17fb5ac815ull,0x2dfedb4efe3f37c0ull,0x4b086ea5032745a4ull,0x0f966cabdd479e9full,}, + {0xb5266c229b7ebe0dull,0xc6717a5442929826ull,0xad22a19d8892adf1ull,0x172da87fcc14d4f9ull,}, + {0xae0d9866d891bb59ull,0xc500c36e3fe7d354ull,0xc2b582f2929b23abull,0x11428eb730dd4e8full,}, + }, + { + {0x81538fef8e07dae0ull,0x3c05274665489b39ull,0x36e4401350ceb55bull,0x23822f2029f31339ull,}, + {0x9a946e7c30090ad9ull,0x5bbc4c8c656ea3fdull,0x3cc2cecb7ec7227full,0x075a6fe87014899full,}, + {0x504b2ff7fbb0366bull,0xdbf315791bc3d5e8ull,0x34b16de185c8c4faull,0x0c722a3dffe0761cull,}, + {0xe1b2c1fc3b33c383ull,0xce84d3e5182665f5ull,0xbcedf2f72de4d795ull,0x1a84c62c0c4a6f6full,}, + {0x85ebabd309ae9553ull,0x1330ec03b0ac91f7ull,0x8f42ba9c8c1ae123ull,0x24c230fae89db4b9ull,}, + {0x63ba534e151566b6ull,0x7e44c5bd39e6334full,0x06921595325d11dfull,0x217f3a4e9d6413deull,}, + }, + { + {0x25ac71f16a82e592ull,0x47846dfdcc378ef2ull,0x75c53c75b38260a2ull,0x039b9da33bf9b997ull,}, + {0x53d30cb619b09dfeull,0x566d6a55a184cd91ull,0xa589c53ae28a8e0full,0x13c05b500d5f285eull,}, + {0xd22faf3af0a087b6ull,0xd5e538653ca52380ull,0x42c893b42092e080ull,0x18f7a1bdd3badfbbull,}, + {0xdba4e6c94bb0a0b2ull,0x323d4769578ee4deull,0xbaedb0f8e01fdb15ull,0x21ca037715dcfe00ull,}, + {0xe6ccc0bc06afac14ull,0xfb943c10916b581cull,0x2d5694a4c968aff2ull,0x054a1b209a812e31ull,}, + {0x1983e59a45dcb02cull,0x71dcb184a30af740ull,0xb75b69bd5ae155acull,0x13c7fc9ace199224ull,}, + }, + { + {0xddbd6b95067516b5ull,0x29ca0360372d54e8ull,0x3e2955c1d6748678ull,0x1f8b276aafcd2c7dull,}, + {0x893187796c272ab6ull,0xc843325fc53fa37eull,0xbe658fac833007a3ull,0x04bdf08356fbd83full,}, + {0xa0863d3fd012aa1cull,0xb1b2c2c3c2fa879eull,0x4cd718b80433407dull,0x1e1ff82d0a23f609ull,}, + {0x0c72fdbda5da70b5ull,0xfa2ad5a7dafb202bull,0xa63ce1e889feffefull,0x030b328f5fa93e0full,}, + {0xc4a01585dc609f7eull,0xade61ef3353eda34ull,0xfa884e9a73d65e8eull,0x24750424a4543a02ull,}, + {0x54f07e883bbe27b6ull,0xfb41ed1660623383ull,0xe112647feeae3cabull,0x055cf71a930304b0ull,}, + }, + { + {0xcc5f813b041ba372ull,0x1b892909c069bfd9ull,0xdfac1a47d46ba3dcull,0x1bc553fdedaa97e3ull,}, + {0x623da812c8d71640ull,0x59b3b84486ab96c5ull,0xd77a7d970676d563ull,0x09473f20b0087846ull,}, + {0x9214acc8a6ad6f76ull,0x53e9b1713dffaa0aull,0xe66631ab33f6477cull,0x16792dc3fd2138d9ull,}, + {0x612c9ffc45facb86ull,0xd43cb433555b3da3ull,0xb0ca697731e8202dull,0x141ac2b6bfa546e5ull,}, + {0x51b480946640c6a2ull,0xc71f677b6d96bb2bull,0x7e0377527663c0beull,0x036b2f551e8c7db8ull,}, + {0x09610b7524482b53ull,0x65196312af7438ccull,0x7050f94a8a70305eull,0x06fde0d46e6c059eull,}, + }, + { + {0x707927b8fc061859ull,0xd9e38cc9ebbd94ddull,0x96eba99c855f975full,0x0c12d088d263d28aull,}, + {0xfa236e22ee58216aull,0x470b1efa73ec6699ull,0x4c5457a04dbf7553ull,0x1a1dc4cbd3ccec1aull,}, + {0x9a327665f6db6d31ull,0x6443a4f683536914ull,0x58eff845741ae1d6ull,0x0b784f2a8c259646ull,}, + {0x08cfd913a263ce94ull,0xe58aab8c6b488744ull,0x335fa717116557daull,0x137bf0016a4e4c17ull,}, + {0x0c14566b7ca1106full,0xb5fac75743cf44ddull,0xe87d1d95b95cba63ull,0x1d2823802dac3d01ull,}, + {0x445099d6807bd76cull,0x41b66837529eb51bull,0x84267670e2264913ull,0x0ed84664bb37032eull,}, + }, + { + {0x938964e622d307e8ull,0x2edeb24656039ea6ull,0x642dd6f7e2144be3ull,0x1d31590cb07cb098ull,}, + {0xe57bf1b8729263c1ull,0x48f9b371fd250d79ull,0x670ce0ee36513b90ull,0x1b908986cbfec7f1ull,}, + {0x9fc8ffb876636effull,0xd57385d67c117698ull,0x4813753691eeba7full,0x0e36785e030209eaull,}, + {0xeef1935cb4c5e8f1ull,0x1b8726a75ab06681ull,0xee973c5cd718bf31ull,0x026910b1fafe0208ull,}, + {0x8c1826b08792fd9bull,0x00325e83cb419665ull,0x9cf44c5b81265badull,0x2462a8c0fc4f85f9ull,}, + {0xa4068de0bcf85b4cull,0x5292433f89646bedull,0x05b4bdd364d3bc53ull,0x1e25be7fab47bf9aull,}, + }, + { + {0x51c27ca3424bdf72ull,0x167926750fe4d445ull,0x41985a737513c6e8ull,0x070056ab60d56287ull,}, + {0x0a23d1344dfd91a0ull,0x6c518fef27a24e64ull,0x059a8c49360f8730ull,0x0f1d38b2c12772f2ull,}, + {0xaa2a1e60b126566eull,0x1ed2add1bb218007ull,0x71385f0a8fabe78eull,0x024c0880d7c0fd5aull,}, + {0xeef5704923a38ff1ull,0x34506a9872581fa9ull,0x78152bc691cbac5dull,0x0c41086d97a7fccaull,}, + {0xb0c0d854ad72b6b6ull,0xb38455c3e3e5f457ull,0xfe665f1f4ddafb6dull,0x10373cbf9ca2add9ull,}, + {0x8a306e7799aa2605ull,0x5dbca515ad2f9733ull,0x9b8b80da928edeb0ull,0x0052a2d2f8f7b1e2ull,}, + }, + { + {0x13e3e3df198f8864ull,0xc80f05cd02b931f3ull,0x8826debe7162b2f6ull,0x1d319ece62ae45e7ull,}, + {0x313e17d4fa80fd67ull,0x82c5f606bfe97eabull,0x66f092bfa6b46100ull,0x16fde5bd28d86353ull,}, + {0xcd4e7dfcd19cfb45ull,0x026d1e42ed44630bull,0x8d6b54119bc07918ull,0x1eff361145a4818bull,}, + {0xc80d511a9a448566ull,0x9df3e33a28a32065ull,0x5a5860db779cc4aaull,0x1c226a0a4bf8c193ull,}, + {0xfe0fa440138c1ebcull,0xc32c16bd93c71daaull,0x5e053ef1a9d73a8eull,0x2105d2d85afe7c65ull,}, + {0x553c6840e4d14fdfull,0x600506d781612ff5ull,0x3ab288079ba2da8full,0x19b8f14b3e9cefeaull,}, + }, + { + {0x101f9567b577f4ccull,0x9d7dfbbb95010b1eull,0x1801c3f5ef323a26ull,0x08066f8c302be6e0ull,}, + {0x301f867187aa8cc4ull,0xdcb504ccd5deb64bull,0x7a19b8838cf066e1ull,0x1ce06a9c35aa0809ull,}, + {0x010a732bda3f076eull,0xf36ad54eeb0df727ull,0xe7e3ba3699eb12eeull,0x1d65654037809723ull,}, + {0xb8ff82aa0c8f9e89ull,0x39fd76e872772dd1ull,0xd0a9a0cf7b300237ull,0x21cdd8098a877d70ull,}, + {0xfff1cbe2921532d7ull,0xe919f4cbb2b62082ull,0x43858e6488e4d9f3ull,0x227d32cd853e2a11ull,}, + {0xdd7807401672de18ull,0x7e3167a195002069ull,0xef20051461812a1full,0x1ee6ee09899caca3ull,}, + }, + { + {0x18dcb2c8d68bcf3dull,0x55c30335c441d056ull,0xddcda87759df1c4cull,0x0bd72b9d00117407ull,}, + {0x53759bf204dc6ee2ull,0x5a491198ccc07fb6ull,0x21023e765d4b082bull,0x052467582f570a64ull,}, + {0xc71f8479e69bc9efull,0x1a8b07f3a7f9f4e4ull,0x4898f9336938503bull,0x210b416bb55f686dull,}, + {0x2ea76a804a514216ull,0xaed1c043402cba72ull,0x8e96b191c8508968ull,0x0a6845487a544d0cull,}, + {0x20f8a88abe36a0fbull,0xf7be80390c4df565ull,0xb4d6ae73ab0ac7b4ull,0x03dee2bd150d75caull,}, + {0x31f41f54a9d5ba23ull,0x32d8a838645e8303ull,0x1ce68866725d4d63ull,0x16eff9d7d55f24a6ull,}, + }, + { + {0xc9ef98de3048fe85ull,0x91d247303ba2cc5dull,0xfeebf32febfe0c50ull,0x12193bd2dfc7cbaaull,}, + {0x05545cc46d6e2f10ull,0x0c1885bd6a173fe0ull,0x19192206ce77ae4dull,0x21bc567dedda3bcaull,}, + {0x0289985f4f8a3e0eull,0x46a6f360ff57d0beull,0x8ecf6d8914a57a28ull,0x16fad252e99a0f5dull,}, + {0xa1ce7650862f87aaull,0x624601ad20a0a754ull,0x181fa95e1dceca7aull,0x04c7063bf6031512ull,}, + {0x47221f77cb9dead6ull,0x0b0a1f41bf04b7daull,0x1285ec2ea904f669ull,0x05d815fd67d084b4ull,}, + {0x2f4621c7c48ac6bfull,0x6c94a7fc7433ddc8ull,0xbfbc34ad00dc77bdull,0x0d420c22daa0e425ull,}, + }, + { + {0xa125bb06b8f5ae5cull,0xf130e54b42e247baull,0xa7d5d0e59b914ac6ull,0x071f28cba94510baull,}, + {0x23781cfd40419519ull,0x2ea1f31e32e9865dull,0xb81d3422cdc1a049ull,0x09b4ecf31bed5dadull,}, + {0x7cad0528d1f2ffbdull,0x4aac3a0629f7f4f7ull,0xffa90428bf6d62ffull,0x1e313094fa295c2eull,}, + {0xac9d8af47d98869cull,0x8ecebc8bdf6c41e8ull,0x859d29cb97f9f264ull,0x0c9223c674634d76ull,}, + {0x5adcabb24bf08460ull,0xbc91aaa43338b671ull,0x7abcd2f2031ec66dull,0x19b3dbaaf6fb5a1bull,}, + {0x00b0c3d6c69380bbull,0x044a0a413e3aaea9ull,0x48d820b0f17d1ac2ull,0x1745bb82ed277652ull,}, + }, + { + {0xd921b459e78504aeull,0x79ef5733fecdb405ull,0x04020f6200148defull,0x1163b626e015e688ull,}, + {0x0781fcc9b627e44bull,0x5d8c6c8944d557a6ull,0x5493d9920c1d32fcull,0x0ecdc7510a2f454aull,}, + {0x7086854c556b47fdull,0x4ec3f6dd8ad274dbull,0x274e92925edf85deull,0x09e6aa987250022full,}, + {0xa63453a7eb3a8fb5ull,0xbd83f1e026f71f82ull,0x1386ec55e6450e3full,0x00873f000047164eull,}, + {0x179dbc93073fcb3aull,0x592c5c9b8baf6411ull,0x4b81a7b27b4d9070ull,0x1d26ead51df9a20eull,}, + {0x6a244f14dc36671cull,0xd1e9d82e3c5bea31ull,0xbd883c1283d17771ull,0x1e09e59618c6163full,}, + }, + { + {0xc1b0578027cdeed9ull,0x7ad19ad5cb04d6e5ull,0xee6f7f36d5ed1465ull,0x01d616ac45e80f5full,}, + {0x2c0c7df57e945feeull,0x9709cf12715b87afull,0xa6e99327a9e2f868ull,0x1dc75e316e45b2aeull,}, + {0xa7bc3e0832276f4bull,0x36ed99677fa22ffaull,0x89da95557e5dd91eull,0x0c1f4bf5d672d3b9ull,}, + {0x25624941c1047a5full,0x463ccb3bd3fce3b1ull,0xd115fc8570096682ull,0x17145e34ff1d0e9aull,}, + {0x4a3a34676a6a378full,0xac89a12198b0ca1cull,0xb97a2d982319e20eull,0x0caf54593dcf42e9ull,}, + {0x7a07a3d321faf4daull,0x6a062e2ec939fd56ull,0xfd7ac47f692009a9ull,0x1121561f1c332cd7ull,}, + }, + { + {0xcfb495c8f564f52cull,0x39665331e96c838bull,0x42c49998a1446b14ull,0x03cc4e294cff3ff7ull,}, + {0xd41d69b42b557d10ull,0x98dab8bd722a39a0ull,0xd4e24c4add54c81aull,0x1344527908d19fa6ull,}, + {0xe9648caa7c8da128ull,0x8497aa165fdee967ull,0xf437d75fab691b76ull,0x052cbd6eb6436a4bull,}, + {0x389f7092e280920bull,0x9b8625c09555310bull,0xe91f49f9d9031898ull,0x1c95a9d881b18be8ull,}, + {0xe8605b4d2212b1fbull,0xb1c4f57736dbf0c3ull,0x8a90c4bcc09cad9eull,0x12f03ba47d2620d4ull,}, + {0xcbd4494a5830ba3cull,0xb5a5d7b6b635fb6dull,0x154076781060c57aull,0x14e27241d5bdbe5dull,}, + }, + { + {0x5545df3af64ec9c4ull,0xff2adbc37d224acdull,0xcf02fc8672ce69ffull,0x0a7fcfe0b85478f0ull,}, + {0x402246e5d134054cull,0x0bd5980440304ad7ull,0x3df09979193914b6ull,0x22610927d3977e51ull,}, + {0x08235659dbd58c8full,0xd159c4e705d2f6d9ull,0x3c5ae22b53836251ull,0x137039c4b43f1c9dull,}, + {0x4ee6c2b196d188bbull,0x54ecda987459243eull,0xb3a9cfbf1aea2748ull,0x234243a4a87cf61eull,}, + {0x248eec552d9a5ef7ull,0xc8a98bee264e9e26ull,0xf3bcd8c268d0c073ull,0x16e365499a23e913ull,}, + {0xbb406c86a8f7f2d7ull,0x03426cc36d053972ull,0x047915ec9f472c4dull,0x2318c0030bfcee73ull,}, + }, + { + {0x3c783caa5308c82dull,0x81bcacdec8f45662ull,0xe169822ce2c0837cull,0x09c179836e05b980ull,}, + {0xf5d882cd842d337full,0x861761db32052e52ull,0xd6721854e7e686f2ull,0x0d22ec35de13a291ull,}, + {0xd9dd477876f2c6d0ull,0x5ef6dd9d6e4eb6b3ull,0xa22e8bf49d19a102ull,0x1fb12cb296762e6aull,}, + {0x8372df5211227b55ull,0xc3994286779c5c02ull,0xa302f7b3be87ac5bull,0x22b842b9b918d821ull,}, + {0x2cb75b8cb17911a1ull,0x5cd8f56c7f4dacf8ull,0x09874f95dd87d8d6ull,0x15b92554f1bdb068ull,}, + {0x4786ec1f88a80264ull,0x91dc53364f6aec54ull,0xbd9bd414e46eb290ull,0x1b27b7fd99d5e212ull,}, + }, + { + {0xbb40271789b4bb9cull,0xddf3b8f645386314ull,0xce090cc4ffeabe23ull,0x0c3920ea76b361f4ull,}, + {0x14c64e1eed2b5edeull,0x99c5289af2511b43ull,0x5de1d7b1dccb2575ull,0x0b5e4419ad2e1c52ull,}, + {0x0c04995f7bb764c0ull,0xbd9eb56e1c742072ull,0x9009271bd281dfd1ull,0x2464821365b75205ull,}, + {0x49724e13fe376d0cull,0x189fb55cbe1abfc2ull,0x80162bfa5b8980d5ull,0x1a96550a3916c5caull,}, + {0xcd79e4d9633065d2ull,0x2b51887668a49a0aull,0x8785b375ac581035ull,0x10a5547822c082bfull,}, + {0xb98da2585b65ccd3ull,0xa8015a03bee86a26ull,0x2eb6a1e1bd1cdf1bull,0x07bf364897d1c8b8ull,}, + }, + { + {0xb791c26545931abcull,0x9a1ad86e4fda79aeull,0x06855828328d0314ull,0x116650fafca899dcull,}, + {0x28a52543d8cb599cull,0xbdd390c86fa4fb40ull,0x903fff92c56629c6ull,0x0b496e3e73b93100ull,}, + {0x0f5622574884b369ull,0x48dc4ad8ee6e6c07ull,0x9bf8705b75932345ull,0x12fdae5ddc53fccbull,}, + {0xffbab25f3f4dbcc5ull,0x2e29054e3b0c795bull,0x4e42d9554507c4a9ull,0x0100c6ddccafa66full,}, + {0xd070c555e094dddeull,0xc33dd5eda3c03e59ull,0xaf83e343a270dd9aull,0x098aee3da1fa8162ull,}, + {0xad02918dc6d1048aull,0xf04903a09f8c1e95ull,0x51622aaf4848d918ull,0x1ded54a06c3901a3ull,}, + }, + { + {0x407e49d022ba5897ull,0xdb8d26843eab7b0full,0xf976a1b95413e184ull,0x0aec3abccfa3f870ull,}, + {0x5a796987e2623f7bull,0xf9ab67105d5e1b46ull,0x9d9d00cfaddf51aeull,0x1be8e30f8202ab70ull,}, + {0x793be4982c00e681ull,0x903759a9286f8a57ull,0x16a3daf170f851afull,0x13cf0c29956077fdull,}, + {0xfb5787f1092904dcull,0x9a7422c14149238aull,0xe8e61be7e9ad1fc9ull,0x10029d3e967eff2full,}, + {0x4a4887f39a050b1bull,0x2b7f2e2d718b7fa5ull,0xdcf39f9d5e4ccc87ull,0x0e9ae22b93f3c46cull,}, + {0xe2085144d647649full,0xbb22757ff04f1a8dull,0x39c323e34631d9f7ull,0x04865b0a1462c9b9ull,}, + }, + { + {0x684266fdd1482bdbull,0x49a7895fd6b87933ull,0x28476e848c17b925ull,0x19e95e89691c4ea5ull,}, + {0xe9a6a6bccaf53a2dull,0x479cccded58ddaccull,0x16049a3fd6291256ull,0x07364abc39086c40ull,}, + {0xf24da0fc6d7e4b82ull,0x29591202c08178e9ull,0xf9b5dff7dc07aae1ull,0x0ed06afda0a02f78ull,}, + {0xcac1c41fcc1f702cull,0x52b029719b5224f2ull,0xc838b665539d0364ull,0x246b61674cf835aaull,}, + {0x44068b26b9dce8e0ull,0x6b3a0b0e83a7c8b9ull,0x03feca47fb021110ull,0x10d9d6e7fbc944eaull,}, + {0x3a39ad7da63fd6fcull,0xaf3e9dde8885823full,0x31511af0a15648cfull,0x19de25d493f0200aull,}, + }, + { + {0xd4fff38e62012c13ull,0xae59ef30122850ffull,0x9d23a0381a012cf6ull,0x120ae1d814828c1full,}, + {0x42eb1c5dfbf07103ull,0xd254f031490046f0ull,0xb47882ae239b8ae8ull,0x11158120470a13baull,}, + {0xd5144f9267a09051ull,0x66da90aae84bab57ull,0x586fcfe6e1dfc445ull,0x221e49ed2a16e941ull,}, + {0xf467fe034d6cbdccull,0x7ac29c1d1e5e20feull,0xa110e6e05eb1585aull,0x23d954fcdf786a64ull,}, + {0xc1ae9be330026938ull,0x874b19ab11339205ull,0x0964cbafa59f62aeull,0x1e6167f38349f253ull,}, + {0x23efb445bd9ef627ull,0x897335bf70b7bcaeull,0xa00f86ae69e47650ull,0x2509e8fa87d5670bull,}, + }, + { + {0x22a00ec33abc6b8eull,0x09620addb21d394full,0xb965fdcb7ee143dfull,0x1febe6994e628a7bull,}, + {0x1c710a901e98b013ull,0x2801fd688f4dddf6ull,0x0abcab0ebadf8343ull,0x10f0cfd199338d92ull,}, + {0xd599e818b6e83ff6ull,0xb88539365c679f3eull,0x0313ce19b529a51dull,0x21f5f0b9f1cf3415ull,}, + {0xb59034f3ef13e954ull,0x6883ab623a40da9dull,0x94faebf81576de70ull,0x14d2247af37a0cceull,}, + {0x99757d5184162b77ull,0xf79b9dc74871c5dbull,0x608ad4501b03300bull,0x074149d915458798ull,}, + {0xa3252b36c3eda717ull,0xc1ded9f245002540ull,0x14b5755b56dac7b3ull,0x19308239f6756bf4ull,}, + }, + { + {0x07f4f5a6f26b067eull,0x32d2eb865477dbdfull,0x6945cbc86ac200a0ull,0x1e6311fd6ef61d2bull,}, + {0xa0d0920425c68e5cull,0x683d1987c8fe9e5aull,0xd7228b5e41a381faull,0x114a05f6a9f409b5ull,}, + {0xf677d47e68eeea17ull,0x87f50243b30d3112ull,0x084cf054770d8dc4ull,0x0bc9fe9990a74fb5ull,}, + {0xf22bdc5dc2eec0d2ull,0x3bae3de98c595ff4ull,0xc95e53073fd0b23bull,0x11a7e2b2d55a6ea2ull,}, + {0x8ddcbdbb83b870baull,0x728950ad96866c71ull,0xd145c1d31fae9c5cull,0x0547d0e831e70104ull,}, + {0xead79bef2b2433d9ull,0x0647d5966623bf56ull,0x4fb0056ba69d7958ull,0x1a0983813c5d2e9eull,}, + }, + { + {0x215a5a20e15d19d2ull,0xae9ceafe33084b69ull,0x80f85025ca380f77ull,0x1c19066c196d1a00ull,}, + {0x359cfc6bc545de2full,0x7339f8704a758d60ull,0x64eca98cd5f2d7edull,0x248ba44255247839ull,}, + {0xc2c6e70b389e8492ull,0xc9b97f7a19d874c9ull,0x87d7b9a332957727ull,0x0119950fe431afe3ull,}, + {0x51eeee98aaf4581cull,0x081de6981f8512e1ull,0x4bb18cf097ac6997ull,0x21e465b23c21951bull,}, + {0xe5bc584a9a1f5a1aull,0x1ccc4b14286b7ad9ull,0x435b382aeb470e64ull,0x1f9ae9143c5b987bull,}, + {0x990eccb3248cd3d9ull,0xe6cfbcdbd8c8fd0bull,0xb48de18c5009802full,0x198d98c5412a6213ull,}, + }, + { + {0x43cd5d8c9073ea61ull,0x5174db54059acdffull,0x45e871c04aa7a2ddull,0x05e16d3199d840a0ull,}, + {0x9ad1091f764df938ull,0x67637f20a74490b7ull,0xdbd73b8487d04861ull,0x15a139abaa8b478eull,}, + {0x1b10547972b4d507ull,0xf641d3763db1a739ull,0x15597787c5b84ec3ull,0x0134b78ebf335c12ull,}, + {0xf6b7a9d4052963daull,0x2d806855d9466783ull,0x623658a8a2d743dcull,0x00de0208fc0298b1ull,}, + {0x1b67ee84e8c40714ull,0x620107f4c2393038ull,0x96441ca3a07baeeeull,0x0b27368271b0f683ull,}, + {0xa65922c66ed876ebull,0xdc21179aa8971bdbull,0x9309a00b5206e041ull,0x088fc38497bf88ebull,}, + }, + { + {0xee8bf43d2fc34584ull,0x4ff6772e8da82b6bull,0xa7ae3c97dc955a78ull,0x09651f34f9ad7ab5ull,}, + {0x103de2e1906f8fd3ull,0x046ca4e6b276642full,0x220398cd397af5fdull,0x07b984811b0df962ull,}, + {0xd0519e42b872b7aaull,0x164acb4f7d9df94dull,0x54cd157448c94337ull,0x04c636efd3f59641ull,}, + {0x7cf41f52f0acc90eull,0x54dff80755d46315ull,0x83a7e3f528daec19ull,0x0039b02577bb91e6ull,}, + {0x828eb12b537a9732ull,0xd81ce0f79c6211ccull,0xcd2fd2f2e35379adull,0x1e84fa2068841dd3ull,}, + {0x931aef70f9a3a06dull,0x71abc5af88fa12caull,0xa70ddb3102a75247ull,0x14a049c881169cceull,}, + }, + { + {0xa9975bec6d3f0412ull,0x72feab9fdc81092full,0x49f533cdb7ae9d66ull,0x18632a2c4c5b4d2dull,}, + {0xaa9f81eeb706ca09ull,0xb1065065a3fe5198ull,0x3381765974ac94a8ull,0x0ec5d52c65b1f5e0ull,}, + {0xfe465050a5cd7ab1ull,0x5059fae63d47120aull,0x49ad1fd731ef0aebull,0x1e018673e33f45e5ull,}, + {0x6eebdeb52c24d248ull,0xa43988a55ccc8d10ull,0xe997fafe55d0ff64ull,0x233675abd5ad14e6ull,}, + {0x8b5530b175fbeaadull,0x27ba08984164ed08ull,0x94a9507d0189809dull,0x12fb832d1d13901cull,}, + {0x912ff6e6cf0c29f4ull,0x54d7a43121bcd1afull,0xcdf9fb448a1e2185ull,0x02aac1a8e253b8f9ull,}, + }, + { + {0x26a581d7ca270a84ull,0x989bddaaecea533cull,0xda7993327a4b8cddull,0x0c1637ca7d045160ull,}, + {0x6213cd9db7a6d076ull,0xc03037d124aded7bull,0x32d9e1bd41523d2bull,0x008ea641abbe75edull,}, + {0x7d3c23b227774f03ull,0x4a5e7805e6f9a14dull,0x1c24f1a43d487e79ull,0x18eafaffc703509bull,}, + {0xe146113f559bd9efull,0xe56825b1a7fcf7f5ull,0xa93689399f819fceull,0x14fa96013c5a6638ull,}, + {0x81c625bff8857fe7ull,0xc98edd68e7203a68ull,0xc88c3a681a3f1ac1ull,0x0bd4fa57e9b6d9f4ull,}, + {0x2dd6eb21127b1fefull,0x91b039a57e0f6233ull,0xd02548bc3dc3c783ull,0x0e8a4d19a777a688ull,}, + }, + { + {0x025c54533652a519ull,0xb3bcbf01559e8920ull,0x5c53eb97c55f25fbull,0x22322b9402949dccull,}, + {0x260ef92c70dd5c11ull,0x9e27626b6cd441acull,0xc6661507ed6f5d61ull,0x0fac1fb2f6bb53edull,}, + {0x5511ab3bd7ea4c51ull,0x6562a46409240916ull,0x83a5e441731b870dull,0x205c0c853ef83501ull,}, + {0x7c8ae57f4deec828ull,0x349dd08555bea497ull,0xcb5d3234c7b839bdull,0x153259da7d31993eull,}, + {0x964b508f6fa5bb3full,0x82b5262f18242750ull,0x970156d1896d43c2ull,0x028fc28439e44783ull,}, + {0xda5afd0f1a7d7fcaull,0xddb473f9a75a7a4cull,0x180c169ed34f6781ull,0x0cde138f3279be8bull,}, + }, + { + {0x63de6da225c321ddull,0x4832886b582d3833ull,0xb0dee708e55cb53bull,0x06c9e933c223ec30ull,}, + {0xdab1fab5dd78e263ull,0x3e658d3d9ec3bb7full,0x3d0a56ca4a1b088cull,0x008ce74540e8386dull,}, + {0x0b0ee452fc9bca4bull,0xfd0b0e032d16b266ull,0xfaeea7076b32cc91ull,0x1823f6048f88ea5cull,}, + {0x3966dc6553a5ff08ull,0x85192338024e75e5ull,0xff2cc296f92beee4ull,0x229caca8d4f809ffull,}, + {0x7702729e0d1f5157ull,0x1a3ac2432384d0bcull,0xd006954b39b11e9cull,0x118a5126dec2a2faull,}, + {0x2e9bfe6eaf026413ull,0xc720a61aef11d653ull,0x6ea67c87c36691a3ull,0x18f925014f9c61d4ull,}, + }, + { + {0xd3b27621ad1dd1abull,0xf97b0f55f22f18c9ull,0xb6113e8be6db1114ull,0x1a8a1ae8f65ead1aull,}, + {0x0007a32980115669ull,0x605196cb02f760a8ull,0xfbd2085c8671df43ull,0x0c381e59ea5960d2ull,}, + {0x94116d83a9603b67ull,0x92b23f61ccedfbbcull,0x50e0fc7e78727f5eull,0x23fc01a1d8cc7e65ull,}, + {0xd1b8a0d5024aff36ull,0x2b25d1cf4ab60e92ull,0x8dbbaf91e20c91fbull,0x185a985f30c061fcull,}, + {0x06fe112b333faa7aull,0x9323dbd6f08549bfull,0xcf5e43f668844df0ull,0x10df0c27f29e1637ull,}, + {0xf2afbd9928527e7dull,0xd856c6d7448b34ddull,0xc5e025621b375c86ull,0x01b0fe70c9b177dcull,}, + }, + { + {0xf09e65fdda5bf41cull,0x59ef2a8eb45985f0ull,0xfec4facae20ae75full,0x019f623d519953a8ull,}, + {0xd5dc50c38c7e165eull,0x62fc39995a53fcf4ull,0x557a7e55f3ae1284ull,0x0fde40ac729d9ca2ull,}, + {0x4b49ba1f5fcea25aull,0x631dbbd1d4e3cea5ull,0x7069fcd00919239full,0x09c559fb76aa0dbcull,}, + {0xbb6348d2d3a8d733ull,0x460c7255ba85e5c1ull,0x42e7d9808787c01cull,0x22c0fd2eef2261e2ull,}, + {0x19833887b93cc3abull,0x2cee6551569164daull,0x1c44fdcd7b0c79dbull,0x1807ed58575a7b33ull,}, + {0x30713388923e3b7eull,0x6d541ffc75d914c7ull,0xbbb50245851f0f6eull,0x1df0abdb9048edc2ull,}, + }, + { + {0x62788c325d2b0f0bull,0x33744819eb512733ull,0x83ff060d6ff7309cull,0x18829912bda99968ull,}, + {0xe09edb24cdbdfc6bull,0x099200c5850fc442ull,0x967790a56049a66bull,0x011cd382712b1d77ull,}, + {0x8df4e975f64427d7ull,0x2e3901a3a7b0f55dull,0x641ec6f45805e402ull,0x06e1d0db4022cd43ull,}, + {0x440dbd8590564164ull,0x6aa7d9c34c053da4ull,0xe0da2752be2f5aaaull,0x2264f00ad93d3d4aull,}, + {0x716e5f9a7e68031full,0x1bcb15653094bebaull,0xf84ac39bc138e963ull,0x1d7a1fc06adf5b63ull,}, + {0x8835962eb2e3079dull,0xc3d7c9d41261e319ull,0x30c0c53b9353bf58ull,0x03bf957dd1541c99ull,}, + }, + { + {0xe77e8891944694ccull,0x04efd57869ed85ceull,0xe9de08ffa6a88729ull,0x1d062265f1d299d3ull,}, + {0x387dab533dc83cc8ull,0xf7fa09c0bbdf31b7ull,0x59b84e1a3762d3b9ull,0x01b32660eab7f6beull,}, + {0xf7daf1d596d17df2ull,0xcd931e51341e0ebbull,0x51710bb172705525ull,0x244d6b81dbc7d273ull,}, + {0xe7a144e6eefd2dc8ull,0xf5c76e992d995cabull,0x477afe1374a66f3cull,0x1aebe5717b54fe53ull,}, + {0x541a0d7dc825c3b1ull,0x93a0cab475598133ull,0x096efa1eb12a99feull,0x17a85ece29f273fbull,}, + {0xa36f4f86b5bc5c1bull,0x1b4a0fc57947e76bull,0xaf302e3f7838388eull,0x06aadb4991feff1full,}, + }, + { + {0xd6afd4710167605eull,0x1897263cb81c98e1ull,0x90e133c23eb0207eull,0x0718643da3a96ba2ull,}, + {0x8344e521afad71f8ull,0x66af04f81ad9f156ull,0x5ecd25d48f404733ull,0x0234ffcdbb42d141ull,}, + {0x8a50c65ef686166dull,0x34cdda95694e0cacull,0xa8add01d08d2dbaaull,0x1ce98a7c6ceb5696ull,}, + {0xb1702710fa0af484ull,0xe30a4eb2f39aa3f1ull,0x7409d5afcd96441eull,0x1e0168166b2894d7ull,}, + {0x8cfa29792abed76aull,0x75d7bfbcee2073efull,0x7c0372e7080fdaedull,0x1ee8cc19eb967336ull,}, + {0x2a265f9eb8f2265eull,0x48f9b13b07b728f5ull,0x7b915e1225774e84ull,0x0d4eff23e23d5ae3ull,}, + }, + { + {0x13cc952b1ef56e58ull,0xeb3870335e75a7c9ull,0x2fe15087e3c0845bull,0x1011a2007bc71f04ull,}, + {0x472e18f407707bbbull,0x053d1dd70cceea98ull,0xe200cdc8798603d2ull,0x0bddb233bffdfc1aull,}, + {0xec920181b8484410ull,0xc6b9a9b74e18f513ull,0x84c1695c77cf9fc1ull,0x01005eda69cae7ceull,}, + {0x7c668bd94e95d9f5ull,0xbaf12b0a06fcd749ull,0x674b2e2824d6029aull,0x23c9d63fdca6307aull,}, + {0x92bd96dd3a545dceull,0xccb9355edd49cadcull,0xf49ca3d068b74eb3ull,0x1d9461936f823b86ull,}, + {0x6a2fa39fa7e93bb3ull,0x468fac8c8f151f41ull,0xd12e0aec4bb21bbeull,0x2326bbeb4405b3ebull,}, + }, + { + {0x1e029295309f1347ull,0x6589babde3a80cdbull,0x74de96ccf73da639ull,0x125810442f8c9fbaull,}, + {0x47d63700da3a6cefull,0x59c3fd0f2b9b6f35ull,0x66f1979c84873b7eull,0x02770c35ac617c99ull,}, + {0xa757e064e4f9edb2ull,0x46eb13ddfbda28f5ull,0x519177520a694aabull,0x04f6097d775debf9ull,}, + {0x072be9865dd6841dull,0x4d9d5c0fa6d6a7b1ull,0x1749ea911a952c21ull,0x15e98445e982607eull,}, + {0x6fb1b6845ce93f6dull,0x52d5387b1a0f8405ull,0xd6a11cff22d72a42ull,0x2283db33f8496ec9ull,}, + {0x77bae4ccdf2e5bf6ull,0x21812c170f736a30ull,0x5a8477a3203036fbull,0x1e667d8ca4a419f4ull,}, + }, + { + {0xfc925115198c93d4ull,0x0aebd45cf3b16db7ull,0x2f7c3d2ab0f16732ull,0x1c4b48273365c9bcull,}, + {0x2a26617f1f00e47full,0x828f68381a20ae68ull,0x0221e65b7f01b6e8ull,0x19e45e14ca4e5650ull,}, + {0x231de599fda4c7e2ull,0x55e6d0d3df2457abull,0x34f961f715fddd4aull,0x0e97e5f5fbfe6aecull,}, + {0x8f1f1a8b1b687949ull,0xbcbdae7ed35524edull,0xd7c78090035aa0b8ull,0x19f2a0d7fb844166ull,}, + {0xc397557bba8fe6a4ull,0x366daf415604f8f6ull,0xa9b99d86ac93e705ull,0x21fb72d548929de6ull,}, + {0x6a2ff9d0392aedf0ull,0xb0a90a0d10fb8fb2ull,0x5ef8e1768350ba26ull,0x24aca64027557318ull,}, + }, + { + {0x18e3eeb6b8937690ull,0x7c87ee4ffda9eb41ull,0x59d0d9e9eb070efdull,0x10b64beb52f348f5ull,}, + {0x60cb09b15da28d99ull,0xde4b5aaff3981423ull,0x7429b4169dfddfb9ull,0x199eb1a7a6de0f9full,}, + {0x450661858d54325eull,0x338439f5a896f88cull,0x9d41086dd111bec0ull,0x146d0b19b0b567ddull,}, + {0x93a470115d0544ceull,0xdbec88b263d6ba96ull,0x4162857e9d97ef77ull,0x07a4e45e194880aaull,}, + {0x7279bdde87e7ecb8ull,0xbfcc34d54c72df15ull,0x57d3ff1a2476f6c9ull,0x0f0da2351d32d405ull,}, + {0xffee1be1efc73104ull,0xb873a987a8076cb4ull,0xce026a94aa6b71f0ull,0x15d4bd558bf59554ull,}, + }, + { + {0xae631a8d76bd7f86ull,0x7e7d9176acbc845eull,0xea421fd87eb8808aull,0x20aaae552a029015ull,}, + {0x5c1c015cfce07393ull,0xc678b97a85aea9b0ull,0x1eea5259304f0a23ull,0x1464e4d058ceb8caull,}, + {0xc65d3f2d4e51915cull,0xeedd92d9fe368d68ull,0xc8df47e3a123fc9eull,0x0a40dfad54ccd6aaull,}, + {0x09a262e9428a05f8ull,0xa0510048ec69ab80ull,0x335a295aecb01ddbull,0x05d9e955d5b1a89full,}, + {0x5eb68ea11c52c37aull,0xe444556824dd8a88ull,0x8e380018a6aeef10ull,0x0442ce4eda39623dull,}, + {0xa77e431b883ec5b0ull,0xac34fb82921e9c20ull,0xa8cfc2d08ef8cfc0ull,0x24ae732a4db3bb4full,}, + }, + { + {0xd5563857f984777bull,0x538e5c618a4be3c1ull,0x5f8eff3fbeab5a7eull,0x017bdafb790e0102ull,}, + {0x6a62e076dc44c251ull,0xd4743cd8eb4cb3dfull,0x98f0d5617f07650full,0x0ef52eb4c0151010ull,}, + {0x516284d618713c13ull,0xe651d8c5769b47dfull,0x27fb0f16b90bfbdaull,0x10e729bd4403fe24ull,}, + {0x7770b670be42c842ull,0x6a9d9db10a3626b9ull,0x17676416c44a62ebull,0x2155a03fd59945caull,}, + {0xcd58941a2ba1e208ull,0x2d5e3caf14827df1ull,0x6e8dbafadc4e1635ull,0x03bbd3e6d397465aull,}, + {0x451703d643a411bbull,0xcca0c1d97355c175ull,0xc5074f56618aa2f1ull,0x04c8acdd37ef602full,}, + }, + { + {0x3f7e0caeff75a1d9ull,0x1b753ba68a2b8451ull,0xf46aeda408dbf4f5ull,0x11652b99c4365b3full,}, + {0x3f8bf5f03132d146ull,0x0b527b11a12d2424ull,0xd587034aa3632352ull,0x13ffef8175d1a563ull,}, + {0x2a30747e4ac8eeaaull,0x0aea36171552eed3ull,0x04e341313ec7b422ull,0x1fb62ea6d5e86357ull,}, + {0x13c69094d2dcc5aaull,0x54573685ddc44032ull,0xd95abdd392375f10ull,0x13a501913c2f1d0full,}, + {0x343cc1b0318577b8ull,0x98776ba96045eb10ull,0x5492dba5b5936d5dull,0x1d1bb567d6a602e6ull,}, + {0xccf58e05f8b305bdull,0x3fee26e8419548ceull,0x62c64af67fc27dc8ull,0x08456a814b2fe18bull,}, + }, + { + {0x47f8ccf69457895aull,0x66d08f143ca062fdull,0x8f0df2e2a97b4518ull,0x0cac6d2b34b243d6ull,}, + {0x758f56a94a45e6beull,0x63ed30c20cf6721cull,0x20e942550629c9ccull,0x167acfffb8203274ull,}, + {0x8e727dabacc57eb3ull,0xa2f85144ebbe15f3ull,0x7fc17e7a0a6a4291ull,0x1793c43f349e48b8ull,}, + {0xed2f91d056a5c2d3ull,0x30433d773122e8ddull,0x2c3fef6399c4f9deull,0x099b39a0e3e524f2ull,}, + {0x4cddac568a4b563cull,0xdcd1c44d3983138dull,0x2f421d9f8d71a88aull,0x01a02cb6459cdb12ull,}, + {0x68c09ced7ae8977dull,0x76cb2bf3a933cdaeull,0x6390cd95c4f85d40ull,0x1cad79870e6b2c2cull,}, + }, + { + {0xfd754584dcb80db2ull,0xb73ea36e2df2b8c0ull,0x3ca5645bffb60c04ull,0x1280d1e1f4dd4da6ull,}, + {0x75a069b69ae4403aull,0xbbf6c5ded1f82c60ull,0x34919f2295d7b5b4ull,0x1f7bc94e3a96507bull,}, + {0x9255ca27cb288f9dull,0x760719cfb400f56full,0x291bfbf807781368ull,0x15fa25b272fee67eull,}, + {0x6054f038190f5f6cull,0xe0978a57792a09bdull,0x1ed22ba69556fe50ull,0x20ba270b20baf856ull,}, + {0x55de530a1af249d0ull,0x249e57b2414ceb2cull,0xd98bdcde7f16edfcull,0x0ee1bfb7da744ae4ull,}, + {0x01b24c4d0bb96ddfull,0x32239e98244d75f0ull,0x20dc68759c157d45ull,0x0120769b781bc14eull,}, + }, + { + {0x4f93886e58c4695full,0x85d6a1914aba1d04ull,0x65bb00f8cf495806ull,0x22a2413c698ae97aull,}, + {0x5e7928222bb02f69ull,0x93a92c850ce1dfb0ull,0xab3eda670f968b1aull,0x1d80886e0fba63ffull,}, + {0x672372572dbdeb59ull,0xba4cd6dd6cb11489ull,0xc74f1c6e3b714d1bull,0x1680ad98da380987ull,}, + {0xbad24d644fd9ab88ull,0x5c817abf11d3ce46ull,0x50587e12664ad6ebull,0x13505c240ec7b092ull,}, + {0x69ade81d2b6d1284ull,0xdd1d9aacd53d3f77ull,0x0888b2de31545a07ull,0x110788f6944c78e4ull,}, + {0x81032f6ea72116caull,0xfcb0253b20bea779ull,0x3d0a38d424eba36eull,0x07bdfcb51526c1e5ull,}, + }, + { + {0xebb80cf2cf44bfbeull,0xb8d559e318097038ull,0x212ed4c3d148be8eull,0x07028dcc862fbbb7ull,}, + {0x91e0a395d89f04d4ull,0xf777ae0142ff07c1ull,0x546b9b47f738fa6eull,0x01c284ef516920c6ull,}, + {0x2042edb5a4eb2cdcull,0xc69cefe0a36a7068ull,0x54471d65b3238311ull,0x077562b3344b4304ull,}, + {0xdb85089b11ece88dull,0x5c27780550f90569ull,0xb9607c12434a6b3dull,0x0d02a6324718f932ull,}, + {0x22ef9b5c8b453c5dull,0x6fdc3875e9247830ull,0x20e375065f9e593aull,0x2351c044ce0d933aull,}, + {0xfa0fcb482093eacbull,0xf8d695e8413f5acdull,0xc7020d8c84a2d773ull,0x11bf7584e5283fa1ull,}, + }, + { + {0xc6b304aa2adf2dfcull,0x19aac2d5544ee834ull,0xb7966f8cd629c330ull,0x1bc72a08a8bf8f9bull,}, + {0x18a5f463799112c7ull,0x4f14db51e967ebc3ull,0xa5ddb48f64db5e8eull,0x15b4fdd8610f3a32ull,}, + {0xe7b86b479d7e2293ull,0x931034487abf490dull,0x8c40ab7dfd28a196ull,0x1d981d3918fdc3b5ull,}, + {0x00797000c2afd324ull,0xf2954f0f86622806ull,0x8464fe0995cd3a7dull,0x0f0a74df4ca00cc3ull,}, + {0x639707b1839c8330ull,0x9c8d491ad7d779a9ull,0x576b7e0f24ce5f46ull,0x21fbdcc42ccd04c2ull,}, + {0x4578db4bdfd55434ull,0x1126933c97e9f4dcull,0xe64529a8921d7415ull,0x12e48bab87ea1fe3ull,}, + }, + { + {0x3f6d2fd04bd5ed75ull,0x65e464cdac7d235bull,0x45903a63a3608961ull,0x1f60c825bccd55c9ull,}, + {0x36b33d0fb8528047ull,0xc8d1f1ad82683baeull,0x78f4b80065c2e4c6ull,0x2066f32874bd1228ull,}, + {0x8b6d6a4b986e8d4cull,0x58f6f275f1d020f4ull,0xe4f3c16209e87ad5ull,0x1cdc33d41ad30173ull,}, + {0x9ec18a6cba3fb3ecull,0x31fc74b68ac834c6ull,0x256788ece76e37b0ull,0x13de6919841928e1ull,}, + {0xae46aa08773971f6ull,0xacd04d9698d47643ull,0x3667178a594f2153ull,0x19a0cadfa3cb7fa0ull,}, + {0x228420456325e079ull,0x3e4ec53c418fdae9ull,0xb9fee919e867c6f1ull,0x2272413f3e989842ull,}, + }, + { + {0x6420ee94e7c764dcull,0x87b3c986d488deecull,0x11dc3e6b59de7ffbull,0x14bb613bce5792e2ull,}, + {0xcc0b60cd4e352976ull,0x794b585f70a5b463ull,0x415cb954036ba631ull,0x1e521f8201ca4258ull,}, + {0xd707ac91ecd5dbdaull,0x08ffd44e5fd83cc6ull,0xa5f39e0f8dff5afcull,0x02315f6a55599212ull,}, + {0x2cdbd9f11596e797ull,0x7c560adedcf2cb25ull,0xdc474409e5650d9dull,0x158bc955e7e492e2ull,}, + {0xd6023b14352a1766ull,0xd5c271d942b6541dull,0x5dc4d1c72d25258full,0x0753f065a4cb028eull,}, + {0x11b4229a4c62010aull,0x2949cb6b089b3aa9ull,0x01b8bdc50766366dull,0x1094dfda1e2e5e57ull,}, + }, + { + {0x773cc6e1ac12f73eull,0x77686f8d75a83e9eull,0x7ce94b7ef1bd53a0ull,0x005a7d3e75c16332ull,}, + {0xafdc64df2ceca388ull,0x15be551bbca0e367ull,0x62d9b7608cf3b8a2ull,0x11ddfe7a0a96af25ull,}, + {0x5d23851a77554f67ull,0xa0f51815094e8050ull,0x930af7569c7850d7ull,0x108eb034eeda1460ull,}, + {0x28a80b277688cae3ull,0xd09ef5d30ec9b193ull,0xb6c554e32540d421ull,0x1da12923355fd2faull,}, + {0x9db6509d0130494dull,0xe28936417c250459ull,0xde8b4491aa8d1dc1ull,0x194b8e7bfc005322ull,}, + {0x7aaeb4f2f941741bull,0xf9d7b55b452158f8ull,0x17e172a187f68105ull,0x02f620bde277950aull,}, + }, + { + {0xf555a7766ac21481ull,0x82b12050c9449770ull,0x7bd16da27eff49fcull,0x06d1ad9a6cd69b71ull,}, + {0xa059542aa0f64e9full,0x93671f16b269a351ull,0x795262fddcb7cc3eull,0x199f355d6263cf86ull,}, + {0x0cbf707f1f8f73aeull,0xf483501e15982b44ull,0x2456aaa4d84d80c0ull,0x0d0ffb5393f7dd0aull,}, + {0x62999996c09097e2ull,0x1b87e828f9fc66e4ull,0x6b17eb3166967f57ull,0x1603601303478f52ull,}, + {0xfb776d4fd407d485ull,0xac03efdb746bf127ull,0x57bde58a5671a601ull,0x0cfbfa20d141f05cull,}, + {0x625ac1161752cbe2ull,0xe3348570b6ad71bcull,0x155b3911f5335f75ull,0x1679ec68122edc64ull,}, + }, + { + {0x9334b4c82aee3ef8ull,0x7ea393af9d865ce4ull,0x0f4ee0906b864850ull,0x1d9e34461e27cc61ull,}, + {0x921b1a6aa179a081ull,0xcca25db2d609388dull,0x816b69ad9a56a314ull,0x00eb3f6388c4d375ull,}, + {0x04e25f4225e50e72ull,0x59a20b6edf897f2aull,0x0842d5f5823535b4ull,0x0dceaf5ae8e50885ull,}, + {0xac6598257175aa0aull,0x1d5d21e8129f2efaull,0xe81dcc9497cb17fdull,0x11327c40c92dff80ull,}, + {0x149e4b2c0a3bfd81ull,0xb8efe68c475436ebull,0x3a8bf06e9ca15cd8ull,0x152d72639c6e5308ull,}, + {0x217e0e34f3f76b8bull,0x5c722d926b596985ull,0x45417905be08807bull,0x1e6132b54ad5595eull,}, + }, + { + {0xe5b541097726667dull,0x5583dfb4ade471adull,0x1840bff44a2faef2ull,0x093c23f8028fe3b9ull,}, + {0xe1e3347370f6e6c7ull,0x8dd7352c4dcc2a17ull,0x3cade218210f9e29ull,0x190ff57eac6e8b87ull,}, + {0x34905e72c173fdc3ull,0x59f8c6f4373c834eull,0x1bd9feabed806c99ull,0x1f209a7935a8ba38ull,}, + {0xe44f080023c83b49ull,0xfd2006276058693cull,0x44b43b6e462a32cbull,0x0942a0ed8e4657ebull,}, + {0xf7e53796340fd772ull,0xf8219ede4152370full,0x548b9b002c19940cull,0x1d0aaff93f50f52full,}, + {0xb5987eb545462ddaull,0xe0f29867116336edull,0xcc75a11c3ff8374aull,0x144d0b8fda0a44a9ull,}, + }, + { + {0x676408d2ff1a7593ull,0xc96a8077d911776full,0x9efff30500904c63ull,0x100a6093df2ae343ull,}, + {0xf1f92502b846cf30ull,0x57888806036aec6cull,0x310ceb0b04caaa7cull,0x1192819a3058307bull,}, + {0xbbf882b39fec7883ull,0x4079d241f7e6e0efull,0xb3090a69b3c7261full,0x16440a02d7fb5d2dull,}, + {0x70e9c8a88422df45ull,0x48fa15635ca49bd9ull,0x0430c461bfb96d16ull,0x0a29a4007c99f6d1ull,}, + {0x643a2bdb308a297cull,0xe4a5bca158e65ff6ull,0xc8dd1579abdeb9e5ull,0x1ee4a94b3d6c775cull,}, + {0xc085b2622b5c4480ull,0x8c69048c5fcded96ull,0x418ba7bd3260d85dull,0x0b22158bb6c29f9eull,}, + }, + { + {0xf661abe667e83f01ull,0x41068a7e95fd10c0ull,0xc9c4cc186cb3eb72ull,0x1a95a93a30592461ull,}, + {0x78dfc65c7280895eull,0xb9f1514b98add459ull,0xc7d713fd92025a11ull,0x0dbe6c1ceabcf73full,}, + {0xe35368a946428244ull,0x990da5e2783a2762ull,0x686b61b7775fb02cull,0x1a79e39b78922172ull,}, + {0xbf8ca28c8d95600full,0x0f56487a909e51cbull,0xfa1da11e3018a2faull,0x07a32571b231773cull,}, + {0x46c84d812bce56f5ull,0x84aa8d8bfe2b498cull,0x699ad1f34e22d74cull,0x0ad743bd99c458dbull,}, + {0xa8d16c7e09aa59b0ull,0x59ba8cbe75f31d51ull,0x5c68705d7838ff4eull,0x1c863feb5090e87eull,}, + }, + { + {0x86af66313ed193baull,0xa0902147163778b5ull,0xa101fcdc6b2d6191ull,0x12fbff4713e6eb10ull,}, + {0x9e1abdaf6e329c66ull,0xd8de2fb4db8e7554ull,0xb4374e1e93a0171bull,0x0ba2ecd00749208full,}, + {0x0cad8f57c02ce090ull,0xcac04eddadd338ecull,0x7ee5c235934f9918ull,0x24db5a9b0ad7ed64ull,}, + {0x46288ad8e01c5063ull,0x4b4c58654226c44aull,0xc4974aaf56ae42dfull,0x173e64cdd5661536ull,}, + {0x58b3450781e7e080ull,0x14ab3a25a5e64bbcull,0x3f9f91743276d2f5ull,0x0e101d0b89b81cdcull,}, + {0xa6bca5fbe99b2b7full,0x5fb8817e670ef40eull,0xb44cbcb05de76cb3ull,0x17110ed4912babb5ull,}, + }, + { + {0x6745e77f4e05d8edull,0xed278e7875ebb5fdull,0x3662f60864a8ccd2ull,0x028104ffc0a31868ull,}, + {0x740b76d64f25c9f0ull,0xb519a415132160e7ull,0x550a38ed829c5f68ull,0x04ea27d6deefcfabull,}, + {0x32d82ea897185651ull,0x04a8f5b63a90573aull,0x2c88fdfba241b62full,0x0285780fe0b77687ull,}, + {0xfb6ebce4f4b20f13ull,0x8ce24ff3dad1a3c7ull,0x716f93b316af50c2ull,0x0a09e678713447efull,}, + {0x6868a19728642ca6ull,0x4be5579c08e0a30cull,0xbd630b8f9c3d1552ull,0x0f277cf26c8e60f2ull,}, + {0x1a105d54bc290b18ull,0xa7e1a7c716529370ull,0x6e5a6c5b44350fd0ull,0x1fd2ae638488fccbull,}, + }, +}; +#endif diff --git a/vendor/github.com/byzantine-lab/bls/test/bls256_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls256_test.cpp new file mode 100644 index 000000000..e53a87057 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls256_test.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "bls_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/test/bls384_256_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls384_256_test.cpp new file mode 100644 index 000000000..ea8126567 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls384_256_test.cpp @@ -0,0 +1,4 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 4 +#include "bls_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/test/bls384_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls384_test.cpp new file mode 100644 index 000000000..2212f8e6b --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls384_test.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "bls_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/bls/test/bls_c256_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls_c256_test.cpp new file mode 100644 index 000000000..8613720b4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls_c256_test.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "bls_c_test.hpp" diff --git a/vendor/github.com/byzantine-lab/bls/test/bls_c384_256_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls_c384_256_test.cpp new file mode 100644 index 000000000..6f153f9d8 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls_c384_256_test.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 4 +#include "bls_c_test.hpp" diff --git a/vendor/github.com/byzantine-lab/bls/test/bls_c384_test.cpp b/vendor/github.com/byzantine-lab/bls/test/bls_c384_test.cpp new file mode 100644 index 000000000..b6886dd04 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls_c384_test.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "bls_c_test.hpp" diff --git a/vendor/github.com/byzantine-lab/bls/test/bls_c_test.hpp b/vendor/github.com/byzantine-lab/bls/test/bls_c_test.hpp new file mode 100644 index 000000000..e9b6e6302 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls_c_test.hpp @@ -0,0 +1,437 @@ +#include +#include +#include +#include +#include +#include + +size_t pubSize(size_t FrSize) +{ +#ifdef BLS_SWAP_G + return FrSize; +#else + return FrSize * 2; +#endif +} +size_t sigSize(size_t FrSize) +{ +#ifdef BLS_SWAP_G + return FrSize * 2; +#else + return FrSize; +#endif +} + +void bls_use_stackTest() +{ + blsSecretKey sec; + blsPublicKey pub; + blsSignature sig; + const char *msg = "this is a pen"; + const size_t msgSize = strlen(msg); + + blsSecretKeySetByCSPRNG(&sec); + + blsGetPublicKey(&pub, &sec); + + blsSign(&sig, &sec, msg, msgSize); + + CYBOZU_TEST_ASSERT(blsVerify(&sig, &pub, msg, msgSize)); +} + +void blsDataTest() +{ + const char *msg = "test test"; + const size_t msgSize = strlen(msg); + const size_t FrSize = blsGetFrByteSize(); + const size_t FpSize = blsGetG1ByteSize(); + blsSecretKey sec1, sec2; + blsSecretKeySetByCSPRNG(&sec1); + char buf[1024]; + size_t n; + size_t ret; + n = blsSecretKeyGetHexStr(buf, sizeof(buf), &sec1); + CYBOZU_TEST_ASSERT(0 < n && n <= FrSize * 2); + ret = blsSecretKeySetHexStr(&sec2, buf, n); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + + memset(&sec2, 0, sizeof(sec2)); + n = blsSecretKeySerialize(buf, sizeof(buf), &sec1); + CYBOZU_TEST_EQUAL(n, FrSize); + ret = blsSecretKeyDeserialize(&sec2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + + blsPublicKey pub1, pub2; + blsGetPublicKey(&pub1, &sec1); + n = blsPublicKeySerialize(buf, sizeof(buf), &pub1); + CYBOZU_TEST_EQUAL(n, pubSize(FpSize)); + ret = blsPublicKeyDeserialize(&pub2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); + blsSignature sig1, sig2; + blsSign(&sig1, &sec1, msg, msgSize); + n = blsSignatureSerialize(buf, sizeof(buf), &sig1); + CYBOZU_TEST_EQUAL(n, sigSize(FpSize)); + ret = blsSignatureDeserialize(&sig2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); +} + +void blsOrderTest(const char *curveOrder/*Fr*/, const char *fieldOrder/*Fp*/) +{ + char buf[1024]; + size_t len; + len = blsGetCurveOrder(buf, sizeof(buf)); + CYBOZU_TEST_ASSERT(len > 0); + CYBOZU_TEST_EQUAL(buf, curveOrder); + len = blsGetFieldOrder(buf, sizeof(buf)); + CYBOZU_TEST_ASSERT(len > 0); + CYBOZU_TEST_EQUAL(buf, fieldOrder); +} + +#if !defined(DISABLE_THREAD_TEST) || defined(__clang__) +#if defined(CYBOZU_CPP_VERSION) && CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +#include +#include +struct Thread { + std::unique_ptr t; + Thread() : t() {} + ~Thread() + { + if (t) { + t->join(); + } + } + template + void run(F func, int p1, int p2) + { + t.reset(new std::thread(func, p1, p2)); + } +}; + +CYBOZU_TEST_AUTO(multipleInit) +{ + const size_t n = 100; + { + std::vector vt(n); + for (size_t i = 0; i < n; i++) { + vt[i].run(blsInit, MCL_BN254, MCLBN_COMPILED_TIME_VAR); + } + } + CYBOZU_TEST_EQUAL(blsGetOpUnitSize(), 4u); +#if MCLBN_FP_UNIT_SIZE == 6 + { + std::vector vt(n); + for (size_t i = 0; i < n; i++) { + vt[i].run(blsInit, MCL_BLS12_381, MCLBN_COMPILED_TIME_VAR); + } + } + CYBOZU_TEST_EQUAL(blsGetOpUnitSize(), 6u); +#endif +} +#endif +#endif + +void blsSerializeTest() +{ + const size_t FrSize = blsGetFrByteSize(); + const size_t FpSize = blsGetG1ByteSize(); + printf("FrSize=%d, FpSize=%d\n", (int)FrSize, (int)FpSize); + blsId id1, id2; + blsSecretKey sec1, sec2; + blsPublicKey pub1, pub2; + blsSignature sig1, sig2; + char buf[1024]; + size_t n; + size_t expectSize; + size_t ret; + const char dummyChar = '1'; + + // Id + expectSize = FrSize; + blsIdSetInt(&id1, -1); + n = blsIdSerialize(buf, sizeof(buf), &id1); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = blsIdDeserialize(&id2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsIdIsEqual(&id1, &id2)); + + ret = blsIdDeserialize(&id2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&id2, 0, sizeof(id2)); + buf[n] = dummyChar; + ret = blsIdDeserialize(&id2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsIdIsEqual(&id1, &id2)); + + n = blsIdSerialize(buf, expectSize, &id1); + CYBOZU_TEST_EQUAL(n, expectSize); + + // SecretKey + expectSize = FrSize; + blsSecretKeySetDecStr(&sec1, "-1", 2); + n = blsSecretKeySerialize(buf, sizeof(buf), &sec1); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = blsSecretKeyDeserialize(&sec2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + + ret = blsSecretKeyDeserialize(&sec2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&sec2, 0, sizeof(sec2)); + buf[n] = dummyChar; + ret = blsSecretKeyDeserialize(&sec2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + + n = blsSecretKeySerialize(buf, expectSize, &sec1); + CYBOZU_TEST_EQUAL(n, expectSize); + + // PublicKey + expectSize = pubSize(FpSize); + blsGetPublicKey(&pub1, &sec1); + n = blsPublicKeySerialize(buf, sizeof(buf), &pub1); + CYBOZU_TEST_EQUAL(n, expectSize); + CYBOZU_TEST_ASSERT(blsPublicKeyIsValidOrder(&pub1)); + + ret = blsPublicKeyDeserialize(&pub2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); + + ret = blsPublicKeyDeserialize(&pub2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&pub2, 0, sizeof(pub2)); + buf[n] = dummyChar; + ret = blsPublicKeyDeserialize(&pub2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); + + n = blsPublicKeySerialize(buf, expectSize, &pub1); + CYBOZU_TEST_EQUAL(n, expectSize); + + // Signature +#ifdef BLS_SWAP_G + expectSize = FpSize * 2; +#else + expectSize = FpSize; +#endif + blsSign(&sig1, &sec1, "abc", 3); + n = blsSignatureSerialize(buf, sizeof(buf), &sig1); + CYBOZU_TEST_EQUAL(n, expectSize); + CYBOZU_TEST_ASSERT(blsSignatureIsValidOrder(&sig1)); + + ret = blsSignatureDeserialize(&sig2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); + + ret = blsSignatureDeserialize(&sig2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&sig2, 0, sizeof(sig2)); + buf[n] = dummyChar; + ret = blsSignatureDeserialize(&sig2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); + + n = blsSignatureSerialize(buf, expectSize, &sig1); + CYBOZU_TEST_EQUAL(n, expectSize); +} + +void blsVerifyOrderTest() +{ + puts("blsVerifyOrderTest"); +#ifdef BLS_SWAP_G + const uint8_t Qs[] = +#else + const uint8_t Ps[] = +#endif + { +0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + }; +#ifdef BLS_SWAP_G + const uint8_t Ps[] = +#else + const uint8_t Qs[] = +#endif + { +0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + }; + size_t n; + blsPublicKey pub; + n = blsPublicKeyDeserialize(&pub, Ps, sizeof(Ps)); + CYBOZU_TEST_EQUAL(n, 0); + blsPublicKeyVerifyOrder(0); + n = blsPublicKeyDeserialize(&pub, Ps, sizeof(Ps)); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_ASSERT(!blsPublicKeyIsValidOrder(&pub)); + blsPublicKeyVerifyOrder(1); + + blsSignature sig; + n = blsSignatureDeserialize(&sig, Qs, sizeof(Qs)); + CYBOZU_TEST_EQUAL(n, 0); + blsSignatureVerifyOrder(0); + n = blsSignatureDeserialize(&sig, Qs, sizeof(Qs)); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_ASSERT(!blsSignatureIsValidOrder(&sig)); + blsSignatureVerifyOrder(1); +} + +void blsAddSubTest() +{ + blsSecretKey sec[3]; + blsPublicKey pub[3]; + blsSignature sig[3]; + const char *msg = "this is a pen"; + const size_t msgSize = strlen(msg); + + const char *secHexStr[8] = { "12", "34" }; + for (int i = 0; i < 2; i++) { + blsSecretKeySetHexStr(&sec[i], secHexStr[i], strlen(secHexStr[i])); + blsGetPublicKey(&pub[i], &sec[i]); + blsSign(&sig[i], &sec[i], msg, msgSize); + } + sec[2] = sec[0]; + blsSecretKeyAdd(&sec[2], &sec[1]); + char buf[1024]; + size_t n = blsSecretKeyGetHexStr(buf, sizeof(buf), &sec[2]); + CYBOZU_TEST_EQUAL(n, 2); + CYBOZU_TEST_EQUAL(buf, "46"); // "12" + "34" + + pub[2] = pub[0]; + blsPublicKeyAdd(&pub[2], &pub[1]); + sig[2] = sig[0]; + blsSignatureAdd(&sig[2], &sig[1]); // sig[2] = sig[0] + sig[1] + blsSignature sig2; + blsSign(&sig2, &sec[2], msg, msgSize); // sig2 = signature by sec[2] + CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig2, &sig[2])); + CYBOZU_TEST_ASSERT(blsVerify(&sig[2], &pub[2], msg, msgSize)); // verify by pub[2] + + blsSecretKeySub(&sec[2], &sec[1]); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec[2], &sec[0])); + blsPublicKeySub(&pub[2], &pub[1]); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub[2], &pub[0])); + blsSignatureSub(&sig[2], &sig[1]); + CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig[2], &sig[0])); +} + +void blsTrivialShareTest() +{ + blsSecretKey sec1, sec2; + blsPublicKey pub1, pub2; + blsId id; + blsIdSetInt(&id, 123); + + blsSecretKeySetByCSPRNG(&sec1); + blsGetPublicKey(&pub1, &sec1); + int ret; + + memset(&sec2, 0, sizeof(sec2)); + ret = blsSecretKeyShare(&sec2, &sec1, 1, &id); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + memset(&sec2, 0, sizeof(sec2)); + ret = blsSecretKeyRecover(&sec2, &sec1, &id, 1); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); + + memset(&pub2, 0, sizeof(pub2)); + ret = blsPublicKeyShare(&pub2, &pub1, 1, &id); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); + memset(&pub2, 0, sizeof(pub2)); + ret = blsPublicKeyRecover(&pub2, &pub1, &id, 1); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); +} + +void modTest(const char *rStr) +{ + unsigned char buf[1024] = {}; + int ret; + blsSecretKey sec; + const size_t maxByte = 64; // 512-bit + memset(buf, 0xff, maxByte); + ret = blsSecretKeySetLittleEndianMod(&sec, buf, maxByte); + CYBOZU_TEST_EQUAL(ret, 0); + const mpz_class x = (mpz_class(1) << (maxByte * 8)) - 1; // 512-bit 0xff....ff + const mpz_class r(rStr); + size_t n = blsSecretKeySerialize(buf, sizeof(buf), &sec); + CYBOZU_TEST_ASSERT(n > 0); + // serialized data to mpz_class + mpz_class y = 0; + for (size_t i = 0; i < n; i++) { + y <<= 8; + y += buf[n - 1 - i]; + } + CYBOZU_TEST_EQUAL(y, x % r); +} + +void blsBench() +{ + blsSecretKey sec; + blsPublicKey pub; + blsSignature sig; + const char *msg = "this is a pen"; + const size_t msgSize = strlen(msg); + + blsSecretKeySetByCSPRNG(&sec); + + blsGetPublicKey(&pub, &sec); + + CYBOZU_BENCH_C("sign", 10000, blsSign, &sig, &sec, msg, msgSize); + CYBOZU_BENCH_C("verify", 1000, blsVerify, &sig, &pub, msg, msgSize); +} + +CYBOZU_TEST_AUTO(all) +{ + const struct { + int curveType; + const char *r; + const char *p; + } tbl[] = { + { + MCL_BN254, + "16798108731015832284940804142231733909759579603404752749028378864165570215949", + "16798108731015832284940804142231733909889187121439069848933715426072753864723", + }, +#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 + { + MCL_BN381_1, + "5540996953667913971058039301942914304734176495422447785042938606876043190415948413757785063597439175372845535461389", + "5540996953667913971058039301942914304734176495422447785045292539108217242186829586959562222833658991069414454984723", + }, +#endif +#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE >= 4 + { + MCL_BLS12_381, + "52435875175126190479447740508185965837690552500527637822603658699938581184513", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787", + }, +#endif + }; + for (size_t i = 0; i < sizeof(tbl) / sizeof(tbl[0]); i++) { + printf("i=%d\n", (int)i); + int ret = blsInit(tbl[i].curveType, MCLBN_COMPILED_TIME_VAR); + CYBOZU_TEST_EQUAL(ret, 0); + if (ret) { + printf("ERR %d\n", ret); + exit(1); + } + bls_use_stackTest(); + blsDataTest(); + blsOrderTest(tbl[i].r, tbl[i].p); + blsSerializeTest(); + if (tbl[i].curveType == MCL_BLS12_381) blsVerifyOrderTest(); + blsAddSubTest(); + blsTrivialShareTest(); + modTest(tbl[i].r); + blsBench(); + } +} diff --git a/vendor/github.com/byzantine-lab/bls/test/bls_test.hpp b/vendor/github.com/byzantine-lab/bls/test/bls_test.hpp new file mode 100644 index 000000000..346fafe15 --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/bls_test.hpp @@ -0,0 +1,545 @@ +#include +#include +#include +#include +#include +#include +#include + +template +void streamTest(const T& t) +{ + std::ostringstream oss; + oss << t; + std::istringstream iss(oss.str()); + T t2; + iss >> t2; + CYBOZU_TEST_EQUAL(t, t2); +} + +template +void testSetForBN254() +{ + /* + mask value to be less than r if the value >= (1 << (192 + 62)) + */ + const uint64_t fff = uint64_t(-1); + const uint64_t one = uint64_t(1); + const struct { + uint64_t in; + uint64_t expected; + } tbl[] = { + { fff, (one << 61) - 1 }, // masked with (1 << 61) - 1 + { one << 62, 0 }, // masked + { (one << 62) | (one << 61), (one << 61) }, // masked + { (one << 61) - 1, (one << 61) - 1 }, // same + }; + T t1, t2; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + uint64_t v1[] = { fff, fff, fff, tbl[i].in }; + uint64_t v2[] = { fff, fff, fff, tbl[i].expected }; + t1.set(v1); + t2.set(v2); + CYBOZU_TEST_EQUAL(t1, t2); + } +} + +void testForBN254() +{ + CYBOZU_TEST_EQUAL(bls::getOpUnitSize(), 4); + bls::Id id; + CYBOZU_TEST_ASSERT(id.isZero()); + id = 5; + CYBOZU_TEST_EQUAL(id, 5); + { + const uint64_t id1[] = { 1, 2, 3, 4 }; + id.set(id1); + std::ostringstream os; + os << id; + CYBOZU_TEST_EQUAL(os.str(), "0x4000000000000000300000000000000020000000000000001"); + } + testSetForBN254(); + testSetForBN254(); +} + +void hashTest(int type) +{ + bls::SecretKey sec; + sec.init(); + bls::PublicKey pub; + sec.getPublicKey(pub); + const std::string h = "\x01\x02\x03"; + bls::Signature sig; + sec.signHash(sig, h); + CYBOZU_TEST_ASSERT(sig.verifyHash(pub, h)); + CYBOZU_TEST_ASSERT(!sig.verifyHash(pub, "\x01\x02\04")); + if (type == MCL_BN254) { + CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "", 0), std::exception); + CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "\x00", 1), std::exception); + CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "\x00\x00", 2), std::exception); +#ifndef BLS_SWAP_G + const uint64_t c1[] = { 0x0c00000000000004ull, 0xcf0f000000000006ull, 0x26cd890000000003ull, 0x2523648240000001ull }; + const uint64_t mc1[] = { 0x9b0000000000000full, 0x921200000000000dull, 0x9366c48000000004ull }; + CYBOZU_TEST_EXCEPTION(sec.signHash(sig, c1, 32), std::exception); + CYBOZU_TEST_EXCEPTION(sec.signHash(sig, mc1, 24), std::exception); +#endif + } +} + +void blsTest() +{ + bls::SecretKey sec; + sec.init(); + streamTest(sec); + bls::PublicKey pub; + sec.getPublicKey(pub); + streamTest(pub); + for (int i = 0; i < 5; i++) { + std::string m = "hello"; + m += char('0' + i); + bls::Signature sig; + sec.sign(sig, m); + CYBOZU_TEST_ASSERT(sig.verify(pub, m)); + CYBOZU_TEST_ASSERT(!sig.verify(pub, m + "a")); + streamTest(sig); + CYBOZU_BENCH_C("sign", 10000, sec.sign, sig, m); + CYBOZU_BENCH_C("verify", 1000, sig.verify, pub, m); + } +} + +void k_of_nTest() +{ + const std::string m = "abc"; + const int n = 5; + const int k = 3; + bls::SecretKey sec0; + sec0.init(); + bls::Signature sig0; + sec0.sign(sig0, m); + bls::PublicKey pub0; + sec0.getPublicKey(pub0); + CYBOZU_TEST_ASSERT(sig0.verify(pub0, m)); + + bls::SecretKeyVec msk; + sec0.getMasterSecretKey(msk, k); + + bls::SecretKeyVec allPrvVec(n); + bls::IdVec allIdVec(n); + for (int i = 0; i < n; i++) { + int id = i + 1; + allPrvVec[i].set(msk, id); + allIdVec[i] = id; + + bls::SecretKey p; + p.set(msk.data(), k, id); + CYBOZU_TEST_EQUAL(allPrvVec[i], p); + } + + bls::SignatureVec allSigVec(n); + for (int i = 0; i < n; i++) { + CYBOZU_TEST_ASSERT(allPrvVec[i] != sec0); + allPrvVec[i].sign(allSigVec[i], m); + bls::PublicKey pub; + allPrvVec[i].getPublicKey(pub); + CYBOZU_TEST_ASSERT(pub != pub0); + CYBOZU_TEST_ASSERT(allSigVec[i].verify(pub, m)); + } + + /* + 3-out-of-n + can recover + */ + bls::SecretKeyVec secVec(3); + bls::IdVec idVec(3); + for (int a = 0; a < n; a++) { + secVec[0] = allPrvVec[a]; + idVec[0] = allIdVec[a]; + for (int b = a + 1; b < n; b++) { + secVec[1] = allPrvVec[b]; + idVec[1] = allIdVec[b]; + for (int c = b + 1; c < n; c++) { + secVec[2] = allPrvVec[c]; + idVec[2] = allIdVec[c]; + bls::SecretKey sec; + sec.recover(secVec, idVec); + CYBOZU_TEST_EQUAL(sec, sec0); + bls::SecretKey sec2; + sec2.recover(secVec.data(), idVec.data(), secVec.size()); + CYBOZU_TEST_EQUAL(sec, sec2); + } + } + } + { + secVec[0] = allPrvVec[0]; + secVec[1] = allPrvVec[1]; + secVec[2] = allPrvVec[0]; // same of secVec[0] + idVec[0] = allIdVec[0]; + idVec[1] = allIdVec[1]; + idVec[2] = allIdVec[0]; + bls::SecretKey sec; + CYBOZU_TEST_EXCEPTION_MESSAGE(sec.recover(secVec, idVec), std::exception, "same id"); + } + { + /* + n-out-of-n + can recover + */ + bls::SecretKey sec; + sec.recover(allPrvVec, allIdVec); + CYBOZU_TEST_EQUAL(sec, sec0); + } + /* + 2-out-of-n + can't recover + */ + secVec.resize(2); + idVec.resize(2); + for (int a = 0; a < n; a++) { + secVec[0] = allPrvVec[a]; + idVec[0] = allIdVec[a]; + for (int b = a + 1; b < n; b++) { + secVec[1] = allPrvVec[b]; + idVec[1] = allIdVec[b]; + bls::SecretKey sec; + sec.recover(secVec, idVec); + CYBOZU_TEST_ASSERT(sec != sec0); + } + } + /* + 3-out-of-n + can recover + */ + bls::SignatureVec sigVec(3); + idVec.resize(3); + for (int a = 0; a < n; a++) { + sigVec[0] = allSigVec[a]; + idVec[0] = allIdVec[a]; + for (int b = a + 1; b < n; b++) { + sigVec[1] = allSigVec[b]; + idVec[1] = allIdVec[b]; + for (int c = b + 1; c < n; c++) { + sigVec[2] = allSigVec[c]; + idVec[2] = allIdVec[c]; + bls::Signature sig; + sig.recover(sigVec, idVec); + CYBOZU_TEST_EQUAL(sig, sig0); + } + } + } + { + sigVec[0] = allSigVec[1]; idVec[0] = allIdVec[1]; + sigVec[1] = allSigVec[4]; idVec[1] = allIdVec[4]; + sigVec[2] = allSigVec[3]; idVec[2] = allIdVec[3]; + bls::Signature sig; + CYBOZU_BENCH_C("sig.recover", 100, sig.recover, sigVec, idVec); + } + { + /* + n-out-of-n + can recover + */ + bls::Signature sig; + sig.recover(allSigVec, allIdVec); + CYBOZU_TEST_EQUAL(sig, sig0); + } + /* + 2-out-of-n + can't recover + */ + sigVec.resize(2); + idVec.resize(2); + for (int a = 0; a < n; a++) { + sigVec[0] = allSigVec[a]; + idVec[0] = allIdVec[a]; + for (int b = a + 1; b < n; b++) { + sigVec[1] = allSigVec[b]; + idVec[1] = allIdVec[b]; + bls::Signature sig; + sig.recover(sigVec, idVec); + CYBOZU_TEST_ASSERT(sig != sig0); + } + } + // return same value if n = 1 + sigVec.resize(1); + idVec.resize(1); + sigVec[0] = allSigVec[0]; + idVec[0] = allIdVec[0]; + { + bls::Signature sig; + sig.recover(sigVec, idVec); + CYBOZU_TEST_EQUAL(sig, sigVec[0]); + } + // share and recover publicKey + { + bls::PublicKeyVec pubVec(k); + idVec.resize(k); + // select [0, k) publicKey + for (int i = 0; i < k; i++) { + allPrvVec[i].getPublicKey(pubVec[i]); + idVec[i] = allIdVec[i]; + } + bls::PublicKey pub; + pub.recover(pubVec, idVec); + CYBOZU_TEST_EQUAL(pub, pub0); + bls::PublicKey pub2; + pub2.recover(pubVec.data(), idVec.data(), pubVec.size()); + CYBOZU_TEST_EQUAL(pub, pub2); + } +} + +void popTest() +{ + const size_t k = 3; + const size_t n = 6; + const std::string m = "pop test"; + bls::SecretKey sec0; + sec0.init(); + bls::PublicKey pub0; + sec0.getPublicKey(pub0); + bls::Signature sig0; + sec0.sign(sig0, m); + CYBOZU_TEST_ASSERT(sig0.verify(pub0, m)); + + bls::SecretKeyVec msk; + sec0.getMasterSecretKey(msk, k); + + bls::PublicKeyVec mpk; + bls::getMasterPublicKey(mpk, msk); + bls::SignatureVec popVec; + bls::getPopVec(popVec, msk); + + for (size_t i = 0; i < popVec.size(); i++) { + CYBOZU_TEST_ASSERT(popVec[i].verify(mpk[i])); + } + + const int idTbl[n] = { + 3, 5, 193, 22, 15 + }; + bls::SecretKeyVec secVec(n); + bls::PublicKeyVec pubVec(n); + bls::SignatureVec sVec(n); + for (size_t i = 0; i < n; i++) { + int id = idTbl[i]; + secVec[i].set(msk, id); + secVec[i].getPublicKey(pubVec[i]); + bls::PublicKey pub; + pub.set(mpk, id); + CYBOZU_TEST_EQUAL(pubVec[i], pub); + + bls::Signature pop; + secVec[i].getPop(pop); + CYBOZU_TEST_ASSERT(pop.verify(pubVec[i])); + + secVec[i].sign(sVec[i], m); + CYBOZU_TEST_ASSERT(sVec[i].verify(pubVec[i], m)); + } + secVec.resize(k); + sVec.resize(k); + bls::IdVec idVec(k); + for (size_t i = 0; i < k; i++) { + idVec[i] = idTbl[i]; + } + bls::SecretKey sec; + sec.recover(secVec, idVec); + CYBOZU_TEST_EQUAL(sec, sec0); + bls::Signature sig; + sig.recover(sVec, idVec); + CYBOZU_TEST_EQUAL(sig, sig0); + bls::Signature sig2; + sig2.recover(sVec.data(), idVec.data(), sVec.size()); + CYBOZU_TEST_EQUAL(sig, sig2); +} + +void addTest() +{ + bls::SecretKey sec1, sec2; + sec1.init(); + sec2.init(); + CYBOZU_TEST_ASSERT(sec1 != sec2); + + bls::PublicKey pub1, pub2; + sec1.getPublicKey(pub1); + sec2.getPublicKey(pub2); + + const std::string m = "doremi"; + bls::Signature sig1, sig2; + sec1.sign(sig1, m); + sec2.sign(sig2, m); + CYBOZU_TEST_ASSERT((sig1 + sig2).verify(pub1 + pub2, m)); +} + +void aggregateTest() +{ + const size_t n = 10; + bls::SecretKey secs[n]; + bls::PublicKey pubs[n], pub; + bls::Signature sigs[n], sig; + const std::string m = "abc"; + for (size_t i = 0; i < n; i++) { + secs[i].init(); + secs[i].getPublicKey(pubs[i]); + secs[i].sign(sigs[i], m); + } + pub = pubs[0]; + sig = sigs[0]; + for (size_t i = 1; i < n; i++) { + pub.add(pubs[i]); + sig.add(sigs[i]); + } + CYBOZU_TEST_ASSERT(sig.verify(pub, m)); +} + +void dataTest() +{ + const size_t FrSize = bls::getFrByteSize(); + const size_t FpSize = bls::getG1ByteSize(); + bls::SecretKey sec; + sec.init(); + std::string str; + sec.getStr(str, bls::IoFixedByteSeq); + { + CYBOZU_TEST_EQUAL(str.size(), FrSize); + bls::SecretKey sec2; + sec2.setStr(str, bls::IoFixedByteSeq); + CYBOZU_TEST_EQUAL(sec, sec2); + } + bls::PublicKey pub; + sec.getPublicKey(pub); + pub.getStr(str, bls::IoFixedByteSeq); + { +#ifdef BLS_SWAP_G + CYBOZU_TEST_EQUAL(str.size(), FpSize); +#else + CYBOZU_TEST_EQUAL(str.size(), FpSize * 2); +#endif + bls::PublicKey pub2; + pub2.setStr(str, bls::IoFixedByteSeq); + CYBOZU_TEST_EQUAL(pub, pub2); + } + std::string m = "abc"; + bls::Signature sign; + sec.sign(sign, m); + sign.getStr(str, bls::IoFixedByteSeq); + { +#ifdef BLS_SWAP_G + CYBOZU_TEST_EQUAL(str.size(), FpSize * 2); +#else + CYBOZU_TEST_EQUAL(str.size(), FpSize); +#endif + bls::Signature sign2; + sign2.setStr(str, bls::IoFixedByteSeq); + CYBOZU_TEST_EQUAL(sign, sign2); + } + bls::Id id; + const uint64_t v[] = { 1, 2, 3, 4, 5, 6, }; + id.set(v); + id.getStr(str, bls::IoFixedByteSeq); + { + CYBOZU_TEST_EQUAL(str.size(), FrSize); + bls::Id id2; + id2.setStr(str, bls::IoFixedByteSeq); + CYBOZU_TEST_EQUAL(id, id2); + } +} + +void verifyAggregateTest() +{ + const size_t n = 10; + bls::SecretKey secs[n]; + bls::PublicKey pubs[n]; + bls::Signature sigs[n], sig; + const size_t sizeofHash = 32; + struct Hash { char data[sizeofHash]; }; + std::vector h(n); + for (size_t i = 0; i < n; i++) { + char msg[128]; + CYBOZU_SNPRINTF(msg, sizeof(msg), "abc-%d", (int)i); + const size_t msgSize = strlen(msg); + cybozu::Sha256().digest(h[i].data, sizeofHash, msg, msgSize); + secs[i].init(); + secs[i].getPublicKey(pubs[i]); + secs[i].signHash(sigs[i], h[i].data, sizeofHash); + } + sig = sigs[0]; + for (size_t i = 1; i < n; i++) { + sig.add(sigs[i]); + } + CYBOZU_TEST_ASSERT(sig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); + bls::Signature invalidSig = sigs[0] + sigs[1]; + CYBOZU_TEST_ASSERT(!invalidSig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); + h[0].data[0]++; + CYBOZU_TEST_ASSERT(!sig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); +} + +unsigned int writeSeq(void *self, void *buf, unsigned int bufSize) +{ + int& seq = *(int*)self; + char *p = (char *)buf; + for (unsigned int i = 0; i < bufSize; i++) { + p[i] = char(seq++); + } + return bufSize; +} + +void setRandFuncTest() +{ + blsSecretKey sec; + const int seqInit1 = 5; + int seq = seqInit1; + blsSetRandFunc(&seq, writeSeq); + blsSecretKeySetByCSPRNG(&sec); + unsigned char buf[128]; + size_t n = blsSecretKeySerialize(buf, sizeof(buf), &sec); + CYBOZU_TEST_ASSERT(n > 0); + for (size_t i = 0; i < n - 1; i++) { + // ommit buf[n - 1] because it may be masked + CYBOZU_TEST_EQUAL(buf[i], seqInit1 + i); + } + // use default CSPRNG + blsSetRandFunc(0, 0); + blsSecretKeySetByCSPRNG(&sec); + n = blsSecretKeySerialize(buf, sizeof(buf), &sec); + CYBOZU_TEST_ASSERT(n > 0); + printf("sec="); + for (size_t i = 0; i < n; i++) { + printf("%02x", buf[i]); + } + printf("\n"); +} + +void testAll() +{ + blsTest(); + k_of_nTest(); + popTest(); + addTest(); + dataTest(); + aggregateTest(); + verifyAggregateTest(); + setRandFuncTest(); +} +CYBOZU_TEST_AUTO(all) +{ + const struct { + int type; + const char *name; + } tbl[] = { + { MCL_BN254, "BN254" }, +#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 + { MCL_BN381_1, "BN381_1" }, +#endif +#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 + { MCL_BLS12_381, "BLS12_381" }, +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + printf("curve=%s\n", tbl[i].name); + int type = tbl[i].type; + bls::init(type); + if (type == MCL_BN254) { + testForBN254(); + } + testAll(); + hashTest(type); + } +} diff --git a/vendor/github.com/byzantine-lab/bls/test/proj/bls_test/bls_test.vcxproj b/vendor/github.com/byzantine-lab/bls/test/proj/bls_test/bls_test.vcxproj new file mode 100644 index 000000000..1755135fb --- /dev/null +++ b/vendor/github.com/byzantine-lab/bls/test/proj/bls_test/bls_test.vcxproj @@ -0,0 +1,88 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {51266DE6-B57B-4AE3-B85C-282F170E1728} + Win32Proj + fp_test + + + + Application + true + v140 + MultiByte + + + Application + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + true + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/LICENSE b/vendor/github.com/byzantine-lab/dexon-consensus/LICENSE new file mode 100644 index 000000000..0a041280b --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/common/event.go b/vendor/github.com/byzantine-lab/dexon-consensus/common/event.go new file mode 100644 index 000000000..4e4e23bf3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/common/event.go @@ -0,0 +1,101 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package common + +import ( + "container/heap" + "sync" +) + +type heightEventFn func(uint64) + +type heightEvent struct { + h uint64 + fn heightEventFn +} + +// heightEvents implements a Min-Heap structure. +type heightEvents []heightEvent + +func (h heightEvents) Len() int { return len(h) } +func (h heightEvents) Less(i, j int) bool { return h[i].h < h[j].h } +func (h heightEvents) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *heightEvents) Push(x interface{}) { + *h = append(*h, x.(heightEvent)) +} +func (h *heightEvents) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// Event implements the Observer pattern. +type Event struct { + heightEvents heightEvents + heightEventsLock sync.Mutex +} + +// NewEvent creates a new event instance. +func NewEvent() *Event { + he := heightEvents{} + heap.Init(&he) + return &Event{ + heightEvents: he, + } +} + +// RegisterHeight to get notified on a specific height. +func (e *Event) RegisterHeight(h uint64, fn heightEventFn) { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + heap.Push(&e.heightEvents, heightEvent{ + h: h, + fn: fn, + }) +} + +// NotifyHeight and trigger function callback. +func (e *Event) NotifyHeight(h uint64) { + fns := func() (fns []heightEventFn) { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + if len(e.heightEvents) == 0 { + return + } + for h >= e.heightEvents[0].h { + he := heap.Pop(&e.heightEvents).(heightEvent) + fns = append(fns, he.fn) + if len(e.heightEvents) == 0 { + return + } + } + return + }() + for _, fn := range fns { + fn(h) + } +} + +// Reset clears all pending event +func (e *Event) Reset() { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + e.heightEvents = heightEvents{} +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/common/logger.go b/vendor/github.com/byzantine-lab/dexon-consensus/common/logger.go new file mode 100644 index 000000000..3328e939a --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/common/logger.go @@ -0,0 +1,134 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package common + +import "log" + +// Logger define the way to receive logs from Consensus instance. +// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example, +// to log an error with message: +// logger.Error("some message", "error", err) +// which is similar to loggers with context: +// logger.Error("some message", map[string]interface{}{ +// "error": err, +// }) +type Logger interface { + // Info logs info level logs. + Trace(msg string, ctx ...interface{}) + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) +} + +// NullLogger logs nothing. +type NullLogger struct{} + +// Trace implements Logger interface. +func (logger *NullLogger) Trace(msg string, ctx ...interface{}) { +} + +// Debug implements Logger interface. +func (logger *NullLogger) Debug(msg string, ctx ...interface{}) { +} + +// Info implements Logger interface. +func (logger *NullLogger) Info(msg string, ctx ...interface{}) { +} + +// Warn implements Logger interface. +func (logger *NullLogger) Warn(msg string, ctx ...interface{}) { +} + +// Error implements Logger interface. +func (logger *NullLogger) Error(msg string, ctx ...interface{}) { +} + +// SimpleLogger logs everything. +type SimpleLogger struct{} + +// composeVargs makes (msg, ctx...) could be pass to log.Println +func composeVargs(msg string, ctxs []interface{}) []interface{} { + args := []interface{}{msg} + for _, c := range ctxs { + args = append(args, c) + } + return args +} + +// Trace implements Logger interface. +func (logger *SimpleLogger) Trace(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Debug implements Logger interface. +func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Info implements Logger interface. +func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Warn implements Logger interface. +func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Error implements Logger interface. +func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// CustomLogger logs everything. +type CustomLogger struct { + logger *log.Logger +} + +// NewCustomLogger creates a new custom logger. +func NewCustomLogger(logger *log.Logger) *CustomLogger { + return &CustomLogger{ + logger: logger, + } +} + +// Trace implements Logger interface. +func (logger *CustomLogger) Trace(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Debug implements Logger interface. +func (logger *CustomLogger) Debug(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Info implements Logger interface. +func (logger *CustomLogger) Info(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Warn implements Logger interface. +func (logger *CustomLogger) Warn(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Error implements Logger interface. +func (logger *CustomLogger) Error(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/common/types.go b/vendor/github.com/byzantine-lab/dexon-consensus/common/types.go new file mode 100644 index 000000000..883492bf3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/common/types.go @@ -0,0 +1,90 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package common + +import ( + "bytes" + "encoding/hex" + "sort" + "time" +) + +const ( + // HashLength is the length of a hash in DEXON. + HashLength = 32 +) + +// Hash is the basic hash type in DEXON. +type Hash [HashLength]byte + +func (h Hash) String() string { + return hex.EncodeToString([]byte(h[:])) +} + +// Bytes return the hash as slice of bytes. +func (h Hash) Bytes() []byte { + return h[:] +} + +// Equal compares if two hashes are the same. +func (h Hash) Equal(hp Hash) bool { + return h == hp +} + +// Less compares if current hash is lesser. +func (h Hash) Less(hp Hash) bool { + return bytes.Compare(h[:], hp[:]) < 0 +} + +// MarshalText implements the encoding.TextMarhsaler interface. +func (h Hash) MarshalText() ([]byte, error) { + result := make([]byte, hex.EncodedLen(HashLength)) + hex.Encode(result, h[:]) + return result, nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (h *Hash) UnmarshalText(text []byte) error { + _, err := hex.Decode(h[:], text) + return err +} + +// Hashes is for sorting hashes. +type Hashes []Hash + +func (hs Hashes) Len() int { return len(hs) } +func (hs Hashes) Less(i, j int) bool { return hs[i].Less(hs[j]) } +func (hs Hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } + +// SortedHashes is a slice of hashes sorted in ascending order. +type SortedHashes Hashes + +// NewSortedHashes converts a slice of hashes to a sorted one. It's a +// firewall to prevent us from assigning unsorted hashes to a variable +// declared as SortedHashes directly. +func NewSortedHashes(hs Hashes) SortedHashes { + sort.Sort(hs) + return SortedHashes(hs) +} + +// ByTime implements sort.Interface for time.Time. +type ByTime []time.Time + +func (t ByTime) Len() int { return len(t) } +func (t ByTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t ByTime) Less(i, j int) bool { return t[i].Before(t[j]) } diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/common/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/common/utils.go new file mode 100644 index 000000000..0e847900f --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/common/utils.go @@ -0,0 +1,41 @@ +package common + +import ( + "math/rand" + "time" +) + +var random *rand.Rand + +func init() { + random = rand.New(rand.NewSource(time.Now().Unix())) +} + +// NewRandomHash returns a random Hash-like value. +func NewRandomHash() Hash { + x := Hash{} + for i := 0; i < HashLength; i++ { + x[i] = byte(random.Int() % 256) + } + return x +} + +// GenerateRandomBytes generates bytes randomly. +func GenerateRandomBytes() []byte { + randomness := make([]byte, 32) + _, err := rand.Read(randomness) + if err != nil { + panic(err) + } + return randomness +} + +// CopyBytes copies byte slice. +func CopyBytes(src []byte) (dst []byte) { + if len(src) == 0 { + return + } + dst = make([]byte, len(src)) + copy(dst, src) + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go new file mode 100644 index 000000000..cdbfadf13 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go @@ -0,0 +1,676 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "context" + "errors" + "math" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors returned from BA modules +var ( + ErrPreviousRoundIsNotFinished = errors.New("previous round is not finished") + ErrRoundOutOfRange = errors.New("round out of range") + ErrInvalidBlock = errors.New("invalid block") + ErrNoValidLeader = errors.New("no valid leader") + ErrIncorrectCRSSignature = errors.New("incorrect CRS signature") + ErrBlockTooOld = errors.New("block too old") +) + +const maxResultCache = 100 +const settingLimit = 3 + +// genValidLeader generate a validLeader function for agreement modules. +func genValidLeader( + mgr *agreementMgr) validLeaderFn { + return func(block *types.Block, crs common.Hash) (bool, error) { + if block.Timestamp.After(time.Now()) { + return false, nil + } + if block.Position.Round >= DKGDelayRound { + if mgr.recv.npks == nil { + return false, nil + } + if block.Position.Round > mgr.recv.npks.Round { + return false, nil + } + if block.Position.Round < mgr.recv.npks.Round { + return false, ErrBlockTooOld + } + } + if !utils.VerifyCRSSignature(block, crs, mgr.recv.npks) { + return false, ErrIncorrectCRSSignature + } + if err := mgr.bcModule.sanityCheck(block); err != nil { + if err == ErrRetrySanityCheckLater { + return false, nil + } + return false, err + } + mgr.logger.Debug("Calling Application.VerifyBlock", "block", block) + switch mgr.app.VerifyBlock(block) { + case types.VerifyInvalidBlock: + return false, ErrInvalidBlock + case types.VerifyRetryLater: + return false, nil + default: + } + return true, nil + } +} + +type agreementMgrConfig struct { + utils.RoundBasedConfig + + notarySetSize uint32 + lambdaBA time.Duration + crs common.Hash +} + +func (c *agreementMgrConfig) from( + round uint64, config *types.Config, crs common.Hash) { + c.notarySetSize = config.NotarySetSize + c.lambdaBA = config.LambdaBA + c.crs = crs + c.SetupRoundBasedFields(round, config) +} + +func newAgreementMgrConfig(prev agreementMgrConfig, config *types.Config, + crs common.Hash) (c agreementMgrConfig) { + c = agreementMgrConfig{} + c.from(prev.RoundID()+1, config, crs) + c.AppendTo(prev.RoundBasedConfig) + return +} + +type baRoundSetting struct { + round uint64 + dkgSet map[types.NodeID]struct{} + threshold int + ticker Ticker + crs common.Hash +} + +type agreementMgr struct { + // TODO(mission): unbound Consensus instance from this module. + con *Consensus + ID types.NodeID + app Application + gov Governance + network Network + logger common.Logger + cache *utils.NodeSetCache + signer *utils.Signer + bcModule *blockChain + ctx context.Context + configs []agreementMgrConfig + baModule *agreement + recv *consensusBAReceiver + processedBAResult map[types.Position]struct{} + voteFilter *utils.VoteFilter + settingCache *lru.Cache + curRoundSetting *baRoundSetting + waitGroup sync.WaitGroup + isRunning bool + lock sync.RWMutex +} + +func newAgreementMgr(con *Consensus) (mgr *agreementMgr, err error) { + settingCache, _ := lru.New(settingLimit) + mgr = &agreementMgr{ + con: con, + ID: con.ID, + app: con.app, + gov: con.gov, + network: con.network, + logger: con.logger, + cache: con.nodeSetCache, + signer: con.signer, + bcModule: con.bcModule, + ctx: con.ctx, + processedBAResult: make(map[types.Position]struct{}, maxResultCache), + voteFilter: utils.NewVoteFilter(), + settingCache: settingCache, + } + mgr.recv = &consensusBAReceiver{ + consensus: con, + restartNotary: make(chan types.Position, 1), + } + return mgr, nil +} + +func (mgr *agreementMgr) prepare() { + round := mgr.bcModule.tipRound() + agr := newAgreement( + mgr.ID, + mgr.recv, + newLeaderSelector(genValidLeader(mgr), mgr.logger), + mgr.signer, + mgr.logger) + setting := mgr.generateSetting(round) + if setting == nil { + mgr.logger.Warn("Unable to prepare init setting", "round", round) + return + } + mgr.curRoundSetting = setting + agr.notarySet = mgr.curRoundSetting.dkgSet + // Hacky way to make agreement module self contained. + mgr.recv.agreementModule = agr + mgr.baModule = agr + if round >= DKGDelayRound { + if _, exist := setting.dkgSet[mgr.ID]; exist { + mgr.logger.Debug("Preparing signer and npks.", "round", round) + npk, signer, err := mgr.con.cfgModule.getDKGInfo(round, false) + if err != nil { + mgr.logger.Error("Failed to prepare signer and npks.", + "round", round, + "error", err) + } + mgr.logger.Debug("Prepared signer and npks.", + "round", round, "signer", signer != nil, "npks", npk != nil) + } + } + return +} + +func (mgr *agreementMgr) run() { + mgr.lock.Lock() + defer mgr.lock.Unlock() + if mgr.isRunning { + return + } + mgr.isRunning = true + mgr.waitGroup.Add(1) + go func() { + defer mgr.waitGroup.Done() + mgr.runBA(mgr.bcModule.tipRound()) + }() +} + +func (mgr *agreementMgr) calcLeader( + dkgSet map[types.NodeID]struct{}, + crs common.Hash, pos types.Position) ( + types.NodeID, error) { + nodeSet := types.NewNodeSetFromMap(dkgSet) + leader := nodeSet.GetSubSet(1, types.NewNodeLeaderTarget( + crs, pos.Height)) + for nID := range leader { + return nID, nil + } + return types.NodeID{}, ErrNoValidLeader +} + +func (mgr *agreementMgr) config(round uint64) *agreementMgrConfig { + mgr.lock.RLock() + defer mgr.lock.RUnlock() + if round < mgr.configs[0].RoundID() { + panic(ErrRoundOutOfRange) + } + roundIndex := round - mgr.configs[0].RoundID() + if roundIndex >= uint64(len(mgr.configs)) { + return nil + } + return &mgr.configs[roundIndex] +} + +func (mgr *agreementMgr) notifyRoundEvents(evts []utils.RoundEventParam) error { + mgr.lock.Lock() + defer mgr.lock.Unlock() + apply := func(e utils.RoundEventParam) error { + if len(mgr.configs) > 0 { + lastCfg := mgr.configs[len(mgr.configs)-1] + if e.BeginHeight != lastCfg.RoundEndHeight() { + return ErrInvalidBlockHeight + } + if lastCfg.RoundID() == e.Round { + mgr.configs[len(mgr.configs)-1].ExtendLength() + } else if lastCfg.RoundID()+1 == e.Round { + mgr.configs = append(mgr.configs, newAgreementMgrConfig( + lastCfg, e.Config, e.CRS)) + } else { + return ErrInvalidRoundID + } + } else { + c := agreementMgrConfig{} + c.from(e.Round, e.Config, e.CRS) + c.SetRoundBeginHeight(e.BeginHeight) + mgr.configs = append(mgr.configs, c) + } + return nil + } + for _, e := range evts { + if err := apply(e); err != nil { + return err + } + } + return nil +} + +func (mgr *agreementMgr) checkProposer( + round uint64, proposerID types.NodeID) error { + if round == mgr.curRoundSetting.round { + if _, exist := mgr.curRoundSetting.dkgSet[proposerID]; !exist { + return ErrNotInNotarySet + } + } else if round == mgr.curRoundSetting.round+1 { + setting := mgr.generateSetting(round) + if setting == nil { + return ErrConfigurationNotReady + } + if _, exist := setting.dkgSet[proposerID]; !exist { + return ErrNotInNotarySet + } + } + return nil +} + +func (mgr *agreementMgr) processVote(v *types.Vote) (err error) { + if !mgr.recv.isNotary { + return nil + } + if mgr.voteFilter.Filter(v) { + return nil + } + if err := mgr.checkProposer(v.Position.Round, v.ProposerID); err != nil { + return err + } + if err = mgr.baModule.processVote(v); err == nil { + mgr.baModule.updateFilter(mgr.voteFilter) + mgr.voteFilter.AddVote(v) + } + if err == ErrSkipButNoError { + err = nil + } + return +} + +func (mgr *agreementMgr) processBlock(b *types.Block) error { + if err := mgr.checkProposer(b.Position.Round, b.ProposerID); err != nil { + return err + } + return mgr.baModule.processBlock(b) +} + +func (mgr *agreementMgr) touchAgreementResult( + result *types.AgreementResult) (first bool) { + // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! + if _, exist := mgr.processedBAResult[result.Position]; !exist { + first = true + if len(mgr.processedBAResult) > maxResultCache { + for k := range mgr.processedBAResult { + // Randomly drop one element. + delete(mgr.processedBAResult, k) + break + } + } + mgr.processedBAResult[result.Position] = struct{}{} + } + return +} + +func (mgr *agreementMgr) untouchAgreementResult( + result *types.AgreementResult) { + // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! + delete(mgr.processedBAResult, result.Position) +} + +func (mgr *agreementMgr) processAgreementResult( + result *types.AgreementResult) error { + aID := mgr.baModule.agreementID() + if isStop(aID) { + return nil + } + if result.Position == aID && !mgr.baModule.confirmed() { + mgr.logger.Info("Syncing BA", "position", result.Position) + if result.Position.Round >= DKGDelayRound { + return mgr.baModule.processAgreementResult(result) + } + for key := range result.Votes { + if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { + return err + } + } + } else if result.Position.Newer(aID) { + mgr.logger.Info("Fast syncing BA", "position", result.Position) + if result.Position.Round < DKGDelayRound { + mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA", + "hash", result.BlockHash) + mgr.network.PullBlocks(common.Hashes{result.BlockHash}) + for key := range result.Votes { + if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { + return err + } + } + } + setting := mgr.generateSetting(result.Position.Round) + if setting == nil { + mgr.logger.Warn("unable to get setting", "round", + result.Position.Round) + return ErrConfigurationNotReady + } + mgr.curRoundSetting = setting + leader, err := mgr.calcLeader(setting.dkgSet, setting.crs, result.Position) + if err != nil { + return err + } + mgr.baModule.restart( + setting.dkgSet, setting.threshold, + result.Position, leader, setting.crs) + if result.Position.Round >= DKGDelayRound { + return mgr.baModule.processAgreementResult(result) + } + } + return nil +} + +func (mgr *agreementMgr) processFinalizedBlock(block *types.Block) error { + aID := mgr.baModule.agreementID() + if block.Position.Older(aID) { + return nil + } + mgr.baModule.processFinalizedBlock(block) + return nil +} + +func (mgr *agreementMgr) stop() { + // Stop all running agreement modules. + func() { + mgr.lock.Lock() + defer mgr.lock.Unlock() + mgr.baModule.stop() + }() + // Block until all routines are done. + mgr.waitGroup.Wait() +} + +func (mgr *agreementMgr) generateSetting(round uint64) *baRoundSetting { + if setting, exist := mgr.settingCache.Get(round); exist { + return setting.(*baRoundSetting) + } + curConfig := mgr.config(round) + if curConfig == nil { + return nil + } + var dkgSet map[types.NodeID]struct{} + if round >= DKGDelayRound { + _, qualidifed, err := typesDKG.CalcQualifyNodes( + mgr.gov.DKGMasterPublicKeys(round), + mgr.gov.DKGComplaints(round), + utils.GetDKGThreshold(mgr.gov.Configuration(round)), + ) + if err != nil { + mgr.logger.Error("Failed to get gpk", "round", round, "error", err) + return nil + } + dkgSet = qualidifed + } + if len(dkgSet) == 0 { + var err error + dkgSet, err = mgr.cache.GetNotarySet(round) + if err != nil { + mgr.logger.Error("Failed to get notarySet", "round", round, "error", err) + return nil + } + } + setting := &baRoundSetting{ + crs: curConfig.crs, + dkgSet: dkgSet, + round: round, + threshold: utils.GetBAThreshold(&types.Config{ + NotarySetSize: curConfig.notarySetSize}), + } + mgr.settingCache.Add(round, setting) + return setting +} + +func (mgr *agreementMgr) runBA(initRound uint64) { + // These are round based variables. + var ( + currentRound uint64 + nextRound = initRound + curConfig = mgr.config(initRound) + setting = &baRoundSetting{} + tickDuration time.Duration + ticker Ticker + ) + + // Check if this routine needs to awake in this round and prepare essential + // variables when yes. + checkRound := func() (isDKG bool) { + defer func() { + currentRound = nextRound + nextRound++ + }() + // Wait until the configuartion for next round is ready. + for { + if setting = mgr.generateSetting(nextRound); setting != nil { + break + } else { + mgr.logger.Debug("Round is not ready", "round", nextRound) + time.Sleep(1 * time.Second) + } + } + _, isDKG = setting.dkgSet[mgr.ID] + if isDKG { + mgr.logger.Info("Selected as dkg set", + "ID", mgr.ID, + "round", nextRound) + } else { + mgr.logger.Info("Not selected as dkg set", + "ID", mgr.ID, + "round", nextRound) + } + // Setup ticker + if tickDuration != curConfig.lambdaBA { + if ticker != nil { + ticker.Stop() + } + ticker = newTicker(mgr.gov, nextRound, TickerBA) + tickDuration = curConfig.lambdaBA + } + setting.ticker = ticker + return + } +Loop: + for { + select { + case <-mgr.ctx.Done(): + break Loop + default: + } + mgr.recv.isNotary = checkRound() + mgr.voteFilter = utils.NewVoteFilter() + mgr.voteFilter.Position.Round = currentRound + mgr.recv.emptyBlockHashMap = &sync.Map{} + if currentRound >= DKGDelayRound && mgr.recv.isNotary { + var err error + mgr.recv.npks, mgr.recv.psigSigner, err = + mgr.con.cfgModule.getDKGInfo(currentRound, false) + if err != nil { + mgr.logger.Warn("cannot get dkg info", + "round", currentRound, "error", err) + } + } else { + mgr.recv.npks = nil + mgr.recv.psigSigner = nil + } + // Run BA for this round. + mgr.recv.restartNotary <- types.Position{ + Round: currentRound, + Height: math.MaxUint64, + } + if err := mgr.baRoutineForOneRound(setting); err != nil { + mgr.logger.Error("BA routine failed", + "error", err, + "nodeID", mgr.ID) + break Loop + } + } +} + +func (mgr *agreementMgr) baRoutineForOneRound( + setting *baRoundSetting) (err error) { + agr := mgr.baModule + recv := mgr.recv + oldPos := agr.agreementID() + restart := func(restartPos types.Position) (breakLoop bool, err error) { + if !isStop(restartPos) { + if restartPos.Height+1 >= mgr.config(setting.round).RoundEndHeight() { + for { + select { + case <-mgr.ctx.Done(): + break + default: + } + tipRound := mgr.bcModule.tipRound() + if tipRound > setting.round { + break + } else { + mgr.logger.Debug("Waiting blockChain to change round...", + "curRound", setting.round, + "tipRound", tipRound) + } + time.Sleep(100 * time.Millisecond) + } + // This round is finished. + breakLoop = true + return + } + if restartPos.Older(oldPos) { + // The restartNotary event is triggered by 'BlockConfirmed' + // of some older block. + return + } + } + var nextHeight uint64 + var nextTime time.Time + for { + // Make sure we are stoppable. + select { + case <-mgr.ctx.Done(): + breakLoop = true + return + default: + } + nextHeight, nextTime = mgr.bcModule.nextBlock() + if nextHeight != notReadyHeight { + if isStop(restartPos) { + break + } + if nextHeight > restartPos.Height { + break + } + } + mgr.logger.Debug("BlockChain not ready!!!", + "old", oldPos, "restart", restartPos, "next", nextHeight) + time.Sleep(100 * time.Millisecond) + } + nextPos := types.Position{ + Round: setting.round, + Height: nextHeight, + } + oldPos = nextPos + var leader types.NodeID + leader, err = mgr.calcLeader(setting.dkgSet, setting.crs, nextPos) + if err != nil { + return + } + time.Sleep(nextTime.Sub(time.Now())) + setting.ticker.Restart() + agr.restart(setting.dkgSet, setting.threshold, nextPos, leader, setting.crs) + return + } +Loop: + for { + select { + case <-mgr.ctx.Done(): + break Loop + default: + } + if agr.confirmed() { + // Block until receive restartPos + select { + case restartPos := <-recv.restartNotary: + breakLoop, err := restart(restartPos) + if err != nil { + return err + } + if breakLoop { + break Loop + } + case <-mgr.ctx.Done(): + break Loop + } + } + select { + case restartPos := <-recv.restartNotary: + breakLoop, err := restart(restartPos) + if err != nil { + return err + } + if breakLoop { + break Loop + } + default: + } + if !mgr.recv.isNotary { + select { + case <-setting.ticker.Tick(): + continue Loop + case <-mgr.ctx.Done(): + break Loop + } + } + if err = agr.nextState(); err != nil { + mgr.logger.Error("Failed to proceed to next state", + "nodeID", mgr.ID.String(), + "error", err) + break Loop + } + if agr.pullVotes() { + pos := agr.agreementID() + mgr.logger.Debug("Calling Network.PullVotes for syncing votes", + "position", pos) + mgr.network.PullVotes(pos) + } + for i := 0; i < agr.clocks(); i++ { + // Priority select for agreement.done(). + select { + case <-agr.done(): + continue Loop + default: + } + select { + case <-agr.done(): + continue Loop + case <-setting.ticker.Tick(): + } + } + } + return nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go new file mode 100644 index 000000000..fc2b6f3d5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go @@ -0,0 +1,213 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "fmt" + + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +// Errors for agreement state module. +var ( + ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state") + ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state") +) + +// agreementStateType is the state of agreement +type agreementStateType int + +// agreementStateType enum. +const ( + stateFast agreementStateType = iota + stateFastVote + stateInitial + statePreCommit + stateCommit + stateForward + statePullVote + stateSleep +) + +type agreementState interface { + state() agreementStateType + nextState() (agreementState, error) + clocks() int +} + +//----- FastState ----- +type fastState struct { + a *agreementData +} + +func newFastState(a *agreementData) *fastState { + return &fastState{a: a} +} + +func (s *fastState) state() agreementStateType { return stateFast } +func (s *fastState) clocks() int { return 0 } +func (s *fastState) nextState() (agreementState, error) { + if func() bool { + s.a.lock.Lock() + defer s.a.lock.Unlock() + return s.a.isLeader + }() { + hash := s.a.recv.ProposeBlock() + if hash != types.NullBlockHash { + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteFast, hash, s.a.period)) + } + } + return newFastVoteState(s.a), nil +} + +//----- FastVoteState ----- +type fastVoteState struct { + a *agreementData +} + +func newFastVoteState(a *agreementData) *fastVoteState { + return &fastVoteState{a: a} +} + +func (s *fastVoteState) state() agreementStateType { return stateFastVote } +func (s *fastVoteState) clocks() int { return 3 } +func (s *fastVoteState) nextState() (agreementState, error) { + return newInitialState(s.a), nil +} + +//----- InitialState ----- +type initialState struct { + a *agreementData +} + +func newInitialState(a *agreementData) *initialState { + return &initialState{a: a} +} + +func (s *initialState) state() agreementStateType { return stateInitial } +func (s *initialState) clocks() int { return 0 } +func (s *initialState) nextState() (agreementState, error) { + if func() bool { + s.a.lock.Lock() + defer s.a.lock.Unlock() + return !s.a.isLeader + }() { + // Leader already proposed block in fastState. + hash := s.a.recv.ProposeBlock() + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteInit, hash, s.a.period)) + } + return newPreCommitState(s.a), nil +} + +//----- PreCommitState ----- +type preCommitState struct { + a *agreementData +} + +func newPreCommitState(a *agreementData) *preCommitState { + return &preCommitState{a: a} +} + +func (s *preCommitState) state() agreementStateType { return statePreCommit } +func (s *preCommitState) clocks() int { return 2 } +func (s *preCommitState) nextState() (agreementState, error) { + s.a.lock.RLock() + defer s.a.lock.RUnlock() + if s.a.lockValue == types.SkipBlockHash || + s.a.lockValue == types.NullBlockHash { + hash := s.a.leader.leaderBlockHash() + s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period)) + } else { + s.a.recv.ProposeVote(types.NewVote( + types.VotePreCom, s.a.lockValue, s.a.period)) + } + return newCommitState(s.a), nil +} + +//----- CommitState ----- +type commitState struct { + a *agreementData +} + +func newCommitState(a *agreementData) *commitState { + return &commitState{a: a} +} + +func (s *commitState) state() agreementStateType { return stateCommit } +func (s *commitState) clocks() int { return 2 } +func (s *commitState) nextState() (agreementState, error) { + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteCom, s.a.lockValue, s.a.period)) + return newForwardState(s.a), nil +} + +// ----- ForwardState ----- +type forwardState struct { + a *agreementData +} + +func newForwardState(a *agreementData) *forwardState { + return &forwardState{a: a} +} + +func (s *forwardState) state() agreementStateType { return stateForward } +func (s *forwardState) clocks() int { return 4 } + +func (s *forwardState) nextState() (agreementState, error) { + return newPullVoteState(s.a), nil +} + +// ----- PullVoteState ----- +// pullVoteState is a special state to ensure the assumption in the consensus +// algorithm that every vote will eventually arrive for all nodes. +type pullVoteState struct { + a *agreementData +} + +func newPullVoteState(a *agreementData) *pullVoteState { + return &pullVoteState{a: a} +} + +func (s *pullVoteState) state() agreementStateType { return statePullVote } +func (s *pullVoteState) clocks() int { return 4 } + +func (s *pullVoteState) nextState() (agreementState, error) { + return s, nil +} + +// ----- SleepState ----- +// sleepState is a special state after BA has output and waits for restart. +type sleepState struct { + a *agreementData +} + +func newSleepState(a *agreementData) *sleepState { + return &sleepState{a: a} +} + +func (s *sleepState) state() agreementStateType { return stateSleep } +func (s *sleepState) clocks() int { return 65536 } + +func (s *sleepState) nextState() (agreementState, error) { + return s, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go new file mode 100644 index 000000000..bad6afa2b --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go @@ -0,0 +1,797 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// closedchan is a reusable closed channel. +var closedchan = make(chan struct{}) + +func init() { + close(closedchan) +} + +// Errors for agreement module. +var ( + ErrInvalidVote = fmt.Errorf("invalid vote") + ErrNotInNotarySet = fmt.Errorf("not in notary set") + ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature") + ErrIncorrectVotePartialSignature = fmt.Errorf("incorrect vote psig") + ErrMismatchBlockPosition = fmt.Errorf("mismatch block position") +) + +// ErrFork for fork error in agreement. +type ErrFork struct { + nID types.NodeID + old, new common.Hash +} + +func (e *ErrFork) Error() string { + return fmt.Sprintf("fork is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +// ErrForkVote for fork vote error in agreement. +type ErrForkVote struct { + nID types.NodeID + old, new *types.Vote +} + +func (e *ErrForkVote) Error() string { + return fmt.Sprintf("fork vote is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +func newVoteListMap() []map[types.NodeID]*types.Vote { + listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType) + for idx := range listMap { + listMap[idx] = make(map[types.NodeID]*types.Vote) + } + return listMap +} + +// agreementReceiver is the interface receiving agreement event. +type agreementReceiver interface { + ProposeVote(vote *types.Vote) + ProposeBlock() common.Hash + // ConfirmBlock is called with lock hold. User can safely use all data within + // agreement module. + ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote) + PullBlocks(common.Hashes) + ReportForkVote(v1, v2 *types.Vote) + ReportForkBlock(b1, b2 *types.Block) + VerifyPartialSignature(vote *types.Vote) (bool, bool) +} + +type pendingBlock struct { + block *types.Block + receivedTime time.Time +} + +type pendingVote struct { + vote *types.Vote + receivedTime time.Time +} + +// agreementData is the data for agreementState. +type agreementData struct { + recv agreementReceiver + + ID types.NodeID + isLeader bool + leader *leaderSelector + lockValue common.Hash + lockIter uint64 + period uint64 + requiredVote int + votes map[uint64][]map[types.NodeID]*types.Vote + lock sync.RWMutex + blocks map[types.NodeID]*types.Block + blocksLock sync.Mutex +} + +// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm. +type agreement struct { + state agreementState + data *agreementData + aID *atomic.Value + doneChan chan struct{} + notarySet map[types.NodeID]struct{} + hasVoteFast bool + hasOutput bool + lock sync.RWMutex + pendingBlock []pendingBlock + pendingVote []pendingVote + pendingAgreementResult map[types.Position]*types.AgreementResult + candidateBlock map[common.Hash]*types.Block + fastForward chan uint64 + signer *utils.Signer + logger common.Logger +} + +// newAgreement creates a agreement instance. +func newAgreement( + ID types.NodeID, + recv agreementReceiver, + leader *leaderSelector, + signer *utils.Signer, + logger common.Logger) *agreement { + agreement := &agreement{ + data: &agreementData{ + recv: recv, + ID: ID, + leader: leader, + }, + aID: &atomic.Value{}, + pendingAgreementResult: make(map[types.Position]*types.AgreementResult), + candidateBlock: make(map[common.Hash]*types.Block), + fastForward: make(chan uint64, 1), + signer: signer, + logger: logger, + } + agreement.stop() + return agreement +} + +// restart the agreement +func (a *agreement) restart( + notarySet map[types.NodeID]struct{}, + threshold int, aID types.Position, leader types.NodeID, + crs common.Hash) { + if !func() bool { + a.lock.Lock() + defer a.lock.Unlock() + if !isStop(aID) { + oldAID := a.agreementID() + if !isStop(oldAID) && !aID.Newer(oldAID) { + return false + } + } + a.logger.Debug("Restarting BA", + "notarySet", notarySet, "position", aID, "leader", leader) + a.data.lock.Lock() + defer a.data.lock.Unlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote) + a.data.votes[1] = newVoteListMap() + a.data.period = 2 + a.data.blocks = make(map[types.NodeID]*types.Block) + a.data.requiredVote = threshold + a.data.leader.restart(crs) + a.data.lockValue = types.SkipBlockHash + a.data.lockIter = 0 + a.data.isLeader = a.data.ID == leader + if a.doneChan != nil { + close(a.doneChan) + } + a.doneChan = make(chan struct{}) + a.fastForward = make(chan uint64, 1) + a.hasVoteFast = false + a.hasOutput = false + a.state = newFastState(a.data) + a.notarySet = notarySet + a.candidateBlock = make(map[common.Hash]*types.Block) + a.aID.Store(struct { + pos types.Position + leader types.NodeID + }{aID, leader}) + return true + }() { + return + } + + if isStop(aID) { + return + } + + var result *types.AgreementResult + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingAgreementResult := make( + map[types.Position]*types.AgreementResult) + for pos, agr := range a.pendingAgreementResult { + if pos.Newer(aID) { + newPendingAgreementResult[pos] = agr + } else if pos == aID { + result = agr + } + } + a.pendingAgreementResult = newPendingAgreementResult + }() + + expireTime := time.Now().Add(-10 * time.Second) + replayBlock := make([]*types.Block, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingBlock := make([]pendingBlock, 0) + for _, pending := range a.pendingBlock { + if aID.Newer(pending.block.Position) { + continue + } else if pending.block.Position == aID { + if result == nil || + result.Position.Round < DKGDelayRound || + result.BlockHash == pending.block.Hash { + replayBlock = append(replayBlock, pending.block) + } + } else if pending.receivedTime.After(expireTime) { + newPendingBlock = append(newPendingBlock, pending) + } + } + a.pendingBlock = newPendingBlock + }() + + replayVote := make([]*types.Vote, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingVote := make([]pendingVote, 0) + for _, pending := range a.pendingVote { + if aID.Newer(pending.vote.Position) { + continue + } else if pending.vote.Position == aID { + if result == nil || result.Position.Round < DKGDelayRound { + replayVote = append(replayVote, pending.vote) + } + } else if pending.receivedTime.After(expireTime) { + newPendingVote = append(newPendingVote, pending) + } + } + a.pendingVote = newPendingVote + }() + + for _, block := range replayBlock { + if err := a.processBlock(block); err != nil { + a.logger.Error("Failed to process block when restarting agreement", + "block", block) + } + } + + if result != nil { + if err := a.processAgreementResult(result); err != nil { + a.logger.Error("Failed to process agreement result when retarting", + "result", result) + } + } + + for _, vote := range replayVote { + if err := a.processVote(vote); err != nil { + a.logger.Error("Failed to process vote when restarting agreement", + "vote", vote) + } + } +} + +func (a *agreement) stop() { + a.restart(make(map[types.NodeID]struct{}), int(math.MaxInt32), + types.Position{ + Height: math.MaxUint64, + }, + types.NodeID{}, common.Hash{}) +} + +func isStop(aID types.Position) bool { + return aID.Height == math.MaxUint64 +} + +// clocks returns how many time this state is required. +func (a *agreement) clocks() int { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + scale := int(a.data.period) - 1 + if a.state.state() == stateForward { + scale = 1 + } + if scale < 1 { + // just in case. + scale = 1 + } + // 10 is a magic number derived from many years of experience. + if scale > 10 { + scale = 10 + } + return a.state.clocks() * scale +} + +// pullVotes returns if current agreement requires more votes to continue. +func (a *agreement) pullVotes() bool { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + return a.state.state() == statePullVote || + a.state.state() == stateInitial || + (a.state.state() == statePreCommit && (a.data.period%3) == 0) +} + +// agreementID returns the current agreementID. +func (a *agreement) agreementID() types.Position { + return a.aID.Load().(struct { + pos types.Position + leader types.NodeID + }).pos +} + +// leader returns the current leader. +func (a *agreement) leader() types.NodeID { + return a.aID.Load().(struct { + pos types.Position + leader types.NodeID + }).leader +} + +// nextState is called at the specific clock time. +func (a *agreement) nextState() (err error) { + a.lock.Lock() + defer a.lock.Unlock() + if a.hasOutput { + a.state = newSleepState(a.data) + return + } + a.state, err = a.state.nextState() + return +} + +func (a *agreement) sanityCheck(vote *types.Vote) error { + if vote.Type >= types.MaxVoteType { + return ErrInvalidVote + } + ok, err := utils.VerifyVoteSignature(vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + if vote.Position.Round != a.agreementID().Round { + // TODO(jimmy): maybe we can verify partial signature at agreement-mgr. + return nil + } + if ok, report := a.data.recv.VerifyPartialSignature(vote); !ok { + if report { + return ErrIncorrectVotePartialSignature + } + return ErrSkipButNoError + } + return nil +} + +func (a *agreement) checkForkVote(vote *types.Vote) ( + alreadyExist bool, err error) { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + if votes, exist := a.data.votes[vote.Period]; exist { + if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist { + alreadyExist = true + if vote.BlockHash != oldVote.BlockHash { + a.data.recv.ReportForkVote(oldVote, vote) + err = &ErrForkVote{vote.ProposerID, oldVote, vote} + return + } + } + } + return +} + +// prepareVote prepares a vote. +func (a *agreement) prepareVote(vote *types.Vote) (err error) { + vote.Position = a.agreementID() + err = a.signer.SignVote(vote) + return +} + +func (a *agreement) updateFilter(filter *utils.VoteFilter) { + if isStop(a.agreementID()) { + return + } + a.lock.RLock() + defer a.lock.RUnlock() + a.data.lock.RLock() + defer a.data.lock.RUnlock() + filter.Confirm = a.hasOutput + filter.LockIter = a.data.lockIter + filter.Period = a.data.period + filter.Position.Height = a.agreementID().Height +} + +// processVote is the entry point for processing Vote. +func (a *agreement) processVote(vote *types.Vote) error { + a.lock.Lock() + defer a.lock.Unlock() + if err := a.sanityCheck(vote); err != nil { + return err + } + aID := a.agreementID() + + // Agreement module has stopped. + if isStop(aID) { + // Hacky way to not drop first votes when round just begins. + if vote.Position.Round == aID.Round { + a.pendingVote = append(a.pendingVote, pendingVote{ + vote: vote, + receivedTime: time.Now().UTC(), + }) + return nil + } + return ErrSkipButNoError + } + if vote.Position != aID { + if aID.Newer(vote.Position) { + return nil + } + a.pendingVote = append(a.pendingVote, pendingVote{ + vote: vote, + receivedTime: time.Now().UTC(), + }) + return nil + } + exist, err := a.checkForkVote(vote) + if err != nil { + return err + } + if exist { + return nil + } + + a.data.lock.Lock() + defer a.data.lock.Unlock() + if _, exist := a.data.votes[vote.Period]; !exist { + a.data.votes[vote.Period] = newVoteListMap() + } + if _, exist := a.data.votes[vote.Period][vote.Type][vote.ProposerID]; exist { + return nil + } + a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote + if !a.hasOutput && + (vote.Type == types.VoteCom || + vote.Type == types.VoteFast || + vote.Type == types.VoteFastCom) { + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != types.SkipBlockHash { + if vote.Type == types.VoteFast { + if !a.hasVoteFast { + if a.state.state() == stateFast || + a.state.state() == stateFastVote { + a.data.recv.ProposeVote( + types.NewVote(types.VoteFastCom, hash, vote.Period)) + a.hasVoteFast = true + + } + if a.data.lockIter == 0 { + a.data.lockValue = hash + a.data.lockIter = 1 + } + } + } else { + a.hasOutput = true + a.data.recv.ConfirmBlock(hash, + a.data.votes[vote.Period][vote.Type]) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + } + return nil + } + } else if a.hasOutput { + return nil + } + + // Check if the agreement requires fast-forwarding. + if len(a.fastForward) > 0 { + return nil + } + if vote.Type == types.VotePreCom { + if vote.Period < a.data.lockIter { + // This PreCom is useless for us. + return nil + } + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != types.SkipBlockHash { + // Condition 1. + if vote.Period > a.data.lockIter { + a.data.lockValue = hash + a.data.lockIter = vote.Period + } + // Condition 2. + if vote.Period > a.data.period { + a.fastForward <- vote.Period + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil + } + } + } + // Condition 3. + if vote.Type == types.VoteCom && vote.Period >= a.data.period && + len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote { + hashes := common.Hashes{} + addPullBlocks := func(voteType types.VoteType) { + for _, vote := range a.data.votes[vote.Period][voteType] { + if vote.BlockHash == types.NullBlockHash || + vote.BlockHash == types.SkipBlockHash { + continue + } + if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found { + hashes = append(hashes, vote.BlockHash) + } + } + } + addPullBlocks(types.VotePreCom) + addPullBlocks(types.VoteCom) + if len(hashes) > 0 { + a.data.recv.PullBlocks(hashes) + } + a.fastForward <- vote.Period + 1 + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil + } + return nil +} + +func (a *agreement) processFinalizedBlock(block *types.Block) { + a.lock.Lock() + defer a.lock.Unlock() + if a.hasOutput { + return + } + aID := a.agreementID() + if aID.Older(block.Position) { + return + } + a.addCandidateBlockNoLock(block) + a.hasOutput = true + a.data.lock.Lock() + defer a.data.lock.Unlock() + a.data.recv.ConfirmBlock(block.Hash, nil) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } +} + +func (a *agreement) processAgreementResult(result *types.AgreementResult) error { + a.lock.Lock() + defer a.lock.Unlock() + aID := a.agreementID() + if result.Position.Older(aID) { + return nil + } else if result.Position.Newer(aID) { + a.pendingAgreementResult[result.Position] = result + return nil + } + if a.hasOutput { + return nil + } + a.data.lock.Lock() + defer a.data.lock.Unlock() + if _, exist := a.findCandidateBlockNoLock(result.BlockHash); !exist { + a.data.recv.PullBlocks(common.Hashes{result.BlockHash}) + } + a.hasOutput = true + a.data.recv.ConfirmBlock(result.BlockHash, nil) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil +} + +func (a *agreement) done() <-chan struct{} { + a.lock.Lock() + defer a.lock.Unlock() + select { + case period := <-a.fastForward: + a.data.lock.Lock() + defer a.data.lock.Unlock() + if period <= a.data.period { + break + } + a.data.setPeriod(period) + a.state = newPreCommitState(a.data) + a.doneChan = make(chan struct{}) + return closedchan + default: + } + if a.doneChan == nil { + return closedchan + } + return a.doneChan +} + +func (a *agreement) confirmed() bool { + a.lock.RLock() + defer a.lock.RUnlock() + return a.confirmedNoLock() +} + +func (a *agreement) confirmedNoLock() bool { + return a.hasOutput +} + +// processBlock is the entry point for processing Block. +func (a *agreement) processBlock(block *types.Block) error { + checkSkip := func() bool { + aID := a.agreementID() + if block.Position != aID { + // Agreement module has stopped. + if !isStop(aID) { + if aID.Newer(block.Position) { + return true + } + } + } + return false + } + if checkSkip() { + return nil + } + if err := utils.VerifyBlockSignature(block); err != nil { + return err + } + + a.lock.Lock() + defer a.lock.Unlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + aID := a.agreementID() + // a.agreementID might change during lock, so we need to checkSkip again. + if checkSkip() { + return nil + } else if aID != block.Position { + a.pendingBlock = append(a.pendingBlock, pendingBlock{ + block: block, + receivedTime: time.Now().UTC(), + }) + return nil + } else if a.confirmedNoLock() { + return nil + } + if b, exist := a.data.blocks[block.ProposerID]; exist { + if b.Hash != block.Hash { + a.data.recv.ReportForkBlock(b, block) + return &ErrFork{block.ProposerID, b.Hash, block.Hash} + } + return nil + } + if err := a.data.leader.processBlock(block); err != nil { + return err + } + a.data.blocks[block.ProposerID] = block + a.addCandidateBlockNoLock(block) + if block.ProposerID != a.data.ID && + (a.state.state() == stateFast || a.state.state() == stateFastVote) && + block.ProposerID == a.leader() { + go func() { + for func() bool { + if aID != a.agreementID() { + return false + } + a.lock.RLock() + defer a.lock.RUnlock() + if a.state.state() != stateFast && a.state.state() != stateFastVote { + return false + } + a.data.lock.RLock() + defer a.data.lock.RUnlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + block, exist := a.data.blocks[a.leader()] + if !exist { + return true + } + ok, err := a.data.leader.validLeader(block, a.data.leader.hashCRS) + if err != nil { + fmt.Println("Error checking validLeader for Fast BA", + "error", err, "block", block) + return false + } + if ok { + a.data.recv.ProposeVote( + types.NewVote(types.VoteFast, block.Hash, a.data.period)) + return false + } + return true + }() { + // TODO(jimmy): retry interval should be related to configurations. + time.Sleep(250 * time.Millisecond) + } + }() + } + return nil +} + +func (a *agreement) addCandidateBlock(block *types.Block) { + a.lock.Lock() + defer a.lock.Unlock() + a.addCandidateBlockNoLock(block) +} + +func (a *agreement) addCandidateBlockNoLock(block *types.Block) { + a.candidateBlock[block.Hash] = block +} + +func (a *agreement) findCandidateBlockNoLock( + hash common.Hash) (*types.Block, bool) { + b, e := a.candidateBlock[hash] + return b, e +} + +// find a block in both candidate blocks and pending blocks in leader-selector. +// A block might be confirmed by others while we can't verify its validity. +func (a *agreement) findBlockNoLock(hash common.Hash) (*types.Block, bool) { + b, e := a.findCandidateBlockNoLock(hash) + if !e { + b, e = a.data.leader.findPendingBlock(hash) + } + return b, e +} + +func (a *agreementData) countVote(period uint64, voteType types.VoteType) ( + blockHash common.Hash, ok bool) { + a.lock.RLock() + defer a.lock.RUnlock() + return a.countVoteNoLock(period, voteType) +} + +func (a *agreementData) countVoteNoLock( + period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) { + votes, exist := a.votes[period] + if !exist { + return + } + candidate := make(map[common.Hash]int) + for _, vote := range votes[voteType] { + if _, exist := candidate[vote.BlockHash]; !exist { + candidate[vote.BlockHash] = 0 + } + candidate[vote.BlockHash]++ + } + for candidateHash, votes := range candidate { + if votes >= a.requiredVote { + blockHash = candidateHash + ok = true + return + } + } + return +} + +func (a *agreementData) setPeriod(period uint64) { + for i := a.period + 1; i <= period; i++ { + if _, exist := a.votes[i]; !exist { + a.votes[i] = newVoteListMap() + } + } + a.period = period +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go new file mode 100644 index 000000000..579ccd44c --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go @@ -0,0 +1,681 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "bytes" + "errors" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors for sanity check error. +var ( + ErrBlockFromOlderPosition = errors.New("block from older position") + ErrNotGenesisBlock = errors.New("not a genesis block") + ErrIsGenesisBlock = errors.New("is a genesis block") + ErrIncorrectParentHash = errors.New("incorrect parent hash") + ErrInvalidBlockHeight = errors.New("invalid block height") + ErrInvalidRoundID = errors.New("invalid round id") + ErrInvalidTimestamp = errors.New("invalid timestamp") + ErrNotFollowTipPosition = errors.New("not follow tip position") + ErrDuplicatedPendingBlock = errors.New("duplicated pending block") + ErrRetrySanityCheckLater = errors.New("retry sanity check later") + ErrRoundNotSwitch = errors.New("round not switch") + ErrIncorrectAgreementResult = errors.New( + "incorrect block randomness result") + ErrMissingRandomness = errors.New("missing block randomness") +) + +const notReadyHeight uint64 = math.MaxUint64 + +type pendingBlockRecord struct { + position types.Position + block *types.Block +} + +type pendingBlockRecords []pendingBlockRecord + +func (pb *pendingBlockRecords) insert(p pendingBlockRecord) error { + idx := sort.Search(len(*pb), func(i int) bool { + return !(*pb)[i].position.Older(p.position) + }) + switch idx { + case len(*pb): + *pb = append(*pb, p) + default: + if (*pb)[idx].position.Equal(p.position) { + // Allow to overwrite pending block record for empty blocks, we may + // need to pull that block from others when its parent is not found + // locally. + if (*pb)[idx].block == nil && p.block != nil { + (*pb)[idx].block = p.block + return nil + } + return ErrDuplicatedPendingBlock + } + // Insert the value to that index. + *pb = append((*pb), pendingBlockRecord{}) + copy((*pb)[idx+1:], (*pb)[idx:]) + (*pb)[idx] = p + } + return nil +} + +func (pb pendingBlockRecords) searchByHeight(h uint64) ( + pendingBlockRecord, bool) { + idx := sort.Search(len(pb), func(i int) bool { + return pb[i].position.Height >= h + }) + if idx == len(pb) || pb[idx].position.Height != h { + return pendingBlockRecord{}, false + } + return pb[idx], true +} + +func (pb pendingBlockRecords) searchByPosition(p types.Position) ( + pendingBlockRecord, bool) { + idx := sort.Search(len(pb), func(i int) bool { + return !pb[i].block.Position.Older(p) + }) + if idx == len(pb) || !pb[idx].position.Equal(p) { + return pendingBlockRecord{}, false + } + return pb[idx], true +} + +type blockChainConfig struct { + utils.RoundBasedConfig + + minBlockInterval time.Duration +} + +func (c *blockChainConfig) fromConfig(round uint64, config *types.Config) { + c.minBlockInterval = config.MinBlockInterval + c.SetupRoundBasedFields(round, config) +} + +func newBlockChainConfig(prev blockChainConfig, config *types.Config) ( + c blockChainConfig) { + c = blockChainConfig{} + c.fromConfig(prev.RoundID()+1, config) + c.AppendTo(prev.RoundBasedConfig) + return +} + +type tsigVerifierGetter interface { + UpdateAndGet(uint64) (TSigVerifier, bool, error) + Purge(uint64) +} + +type blockChain struct { + lock sync.RWMutex + ID types.NodeID + lastConfirmed *types.Block + lastDelivered *types.Block + signer *utils.Signer + vGetter tsigVerifierGetter + app Application + logger common.Logger + pendingRandomnesses map[types.Position][]byte + configs []blockChainConfig + pendingBlocks pendingBlockRecords + confirmedBlocks types.BlocksByPosition + dMoment time.Time + + // Do not access this variable besides processAgreementResult. + lastPosition types.Position +} + +func newBlockChain(nID types.NodeID, dMoment time.Time, initBlock *types.Block, + app Application, vGetter tsigVerifierGetter, signer *utils.Signer, + logger common.Logger) *blockChain { + return &blockChain{ + ID: nID, + lastConfirmed: initBlock, + lastDelivered: initBlock, + signer: signer, + vGetter: vGetter, + app: app, + logger: logger, + dMoment: dMoment, + pendingRandomnesses: make( + map[types.Position][]byte), + } +} + +func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error { + bc.lock.Lock() + defer bc.lock.Unlock() + apply := func(e utils.RoundEventParam) error { + if len(bc.configs) > 0 { + lastCfg := bc.configs[len(bc.configs)-1] + if e.BeginHeight != lastCfg.RoundEndHeight() { + return ErrInvalidBlockHeight + } + if lastCfg.RoundID() == e.Round { + bc.configs[len(bc.configs)-1].ExtendLength() + } else if lastCfg.RoundID()+1 == e.Round { + bc.configs = append(bc.configs, newBlockChainConfig( + lastCfg, e.Config)) + } else { + return ErrInvalidRoundID + } + } else { + c := blockChainConfig{} + c.fromConfig(e.Round, e.Config) + c.SetRoundBeginHeight(e.BeginHeight) + if bc.lastConfirmed == nil { + if c.RoundID() != 0 { + panic(fmt.Errorf( + "genesis config should from round 0, but %d", + c.RoundID())) + } + } else { + if c.RoundID() != bc.lastConfirmed.Position.Round { + panic(fmt.Errorf("incompatible config/block round %s %d", + bc.lastConfirmed, c.RoundID())) + } + if !c.Contains(bc.lastConfirmed.Position.Height) { + panic(fmt.Errorf( + "unmatched round-event with block %s %d %d %d", + bc.lastConfirmed, e.Round, e.Reset, e.BeginHeight)) + } + } + bc.configs = append(bc.configs, c) + } + return nil + } + for _, e := range evts { + if err := apply(e); err != nil { + return err + } + } + return nil +} + +func (bc *blockChain) proposeBlock(position types.Position, + proposeTime time.Time, isEmpty bool) (b *types.Block, err error) { + bc.lock.RLock() + defer bc.lock.RUnlock() + return bc.prepareBlock(position, proposeTime, isEmpty) +} + +func (bc *blockChain) extractBlocks() (ret []*types.Block) { + bc.lock.Lock() + defer bc.lock.Unlock() + for len(bc.confirmedBlocks) > 0 { + c := bc.confirmedBlocks[0] + if c.Position.Round >= DKGDelayRound && + len(c.Randomness) == 0 && + !bc.setRandomnessFromPending(c) { + break + } + c, bc.confirmedBlocks = bc.confirmedBlocks[0], bc.confirmedBlocks[1:] + ret = append(ret, c) + bc.lastDelivered = c + } + return +} + +func (bc *blockChain) sanityCheck(b *types.Block) error { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed == nil { + // It should be a genesis block. + if !b.IsGenesis() { + return ErrNotGenesisBlock + } + if b.Timestamp.Before(bc.dMoment.Add(bc.configs[0].minBlockInterval)) { + return ErrInvalidTimestamp + } + return nil + } + if b.IsGenesis() { + return ErrIsGenesisBlock + } + if b.Position.Height != bc.lastConfirmed.Position.Height+1 { + if b.Position.Height > bc.lastConfirmed.Position.Height { + return ErrRetrySanityCheckLater + } + return ErrInvalidBlockHeight + } + tipConfig := bc.tipConfig() + if tipConfig.IsLastBlock(bc.lastConfirmed) { + if b.Position.Round != bc.lastConfirmed.Position.Round+1 { + return ErrRoundNotSwitch + } + } else { + if b.Position.Round != bc.lastConfirmed.Position.Round { + return ErrInvalidRoundID + } + } + if !b.ParentHash.Equal(bc.lastConfirmed.Hash) { + return ErrIncorrectParentHash + } + if b.Timestamp.Before(bc.lastConfirmed.Timestamp.Add( + tipConfig.minBlockInterval)) { + return ErrInvalidTimestamp + } + if err := utils.VerifyBlockSignature(b); err != nil { + return err + } + return nil +} + +// addEmptyBlock is called when an empty block is confirmed by BA. +func (bc *blockChain) addEmptyBlock(position types.Position) ( + *types.Block, error) { + bc.lock.Lock() + defer bc.lock.Unlock() + add := func() *types.Block { + emptyB, err := bc.prepareBlock(position, time.Time{}, true) + if err != nil || emptyB == nil { + // This helper is expected to be called when an empty block is ready + // to be confirmed. + panic(err) + } + bc.confirmBlock(emptyB) + bc.checkIfBlocksConfirmed() + return emptyB + } + if bc.lastConfirmed != nil { + if !position.Newer(bc.lastConfirmed.Position) { + bc.logger.Warn("Dropping empty block: older than tip", + "position", &position, + "last-confirmed", bc.lastConfirmed) + return nil, ErrBlockFromOlderPosition + } + if bc.lastConfirmed.Position.Height+1 == position.Height { + return add(), nil + } + } else if position.Height == types.GenesisHeight && position.Round == 0 { + return add(), nil + } else { + return nil, ErrInvalidBlockHeight + } + return nil, bc.addPendingBlockRecord(pendingBlockRecord{position, nil}) +} + +// addBlock should be called when the block is confirmed by BA, we won't perform +// sanity check against this block, it's ok to add block with skipping height. +func (bc *blockChain) addBlock(b *types.Block) error { + if b.Position.Round >= DKGDelayRound && + len(b.Randomness) == 0 && + !bc.setRandomnessFromPending(b) { + return ErrMissingRandomness + } + bc.lock.Lock() + defer bc.lock.Unlock() + confirmed := false + if bc.lastConfirmed != nil { + if !b.Position.Newer(bc.lastConfirmed.Position) { + bc.logger.Warn("Dropping block: older than tip", + "block", b, "last-confirmed", bc.lastConfirmed) + return nil + } + if bc.lastConfirmed.Position.Height+1 == b.Position.Height { + confirmed = true + } + } else if b.IsGenesis() { + confirmed = true + } + delete(bc.pendingRandomnesses, b.Position) + if !confirmed { + return bc.addPendingBlockRecord(pendingBlockRecord{b.Position, b}) + } + bc.confirmBlock(b) + bc.checkIfBlocksConfirmed() + return nil +} + +func (bc *blockChain) tipRound() uint64 { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed == nil { + return 0 + } + offset, tipConfig := uint64(0), bc.tipConfig() + if tipConfig.IsLastBlock(bc.lastConfirmed) { + offset++ + } + return bc.lastConfirmed.Position.Round + offset +} + +func (bc *blockChain) confirmed(h uint64) bool { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed != nil && bc.lastConfirmed.Position.Height >= h { + return true + } + r, found := bc.pendingBlocks.searchByHeight(h) + if !found { + return false + } + return r.block != nil +} + +func (bc *blockChain) nextBlock() (uint64, time.Time) { + bc.lock.RLock() + defer bc.lock.RUnlock() + // It's ok to access tip config directly without checking the existence of + // lastConfirmed block in the scenario of "nextBlock" method. + tip, config := bc.lastConfirmed, bc.configs[0] + if tip == nil { + return types.GenesisHeight, bc.dMoment + } + if tip != bc.lastDelivered { + // If tip is not delivered, we should not proceed to next block. + return notReadyHeight, time.Time{} + } + return tip.Position.Height + 1, tip.Timestamp.Add(config.minBlockInterval) +} + +func (bc *blockChain) pendingBlocksWithoutRandomness() []*types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + blocks := make([]*types.Block, 0) + for _, b := range bc.confirmedBlocks { + if b.Position.Round < DKGDelayRound || + len(b.Randomness) > 0 || + bc.setRandomnessFromPending(b) { + continue + } + blocks = append(blocks, b) + } + for _, r := range bc.pendingBlocks { + if r.position.Round < DKGDelayRound { + continue + } + if r.block != nil && + len(r.block.Randomness) == 0 && + !bc.setRandomnessFromPending(r.block) { + blocks = append(blocks, r.block) + } + } + return blocks +} + +func (bc *blockChain) lastDeliveredBlock() *types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + return bc.lastDelivered +} + +func (bc *blockChain) lastPendingBlock() *types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + if len(bc.confirmedBlocks) == 0 { + return nil + } + return bc.confirmedBlocks[0] +} + +///////////////////////////////////////////// +// +// internal helpers +// +///////////////////////////////////////////// + +// findPendingBlock is a helper to find a block in either pending or confirmed +// state by position. +func (bc *blockChain) findPendingBlock(p types.Position) *types.Block { + if idx := sort.Search(len(bc.confirmedBlocks), func(i int) bool { + return !bc.confirmedBlocks[i].Position.Older(p) + }); idx != len(bc.confirmedBlocks) && + bc.confirmedBlocks[idx].Position.Equal(p) { + return bc.confirmedBlocks[idx] + } + pendingRec, _ := bc.pendingBlocks.searchByPosition(p) + return pendingRec.block +} + +func (bc *blockChain) addPendingBlockRecord(p pendingBlockRecord) error { + if err := bc.pendingBlocks.insert(p); err != nil { + if err == ErrDuplicatedPendingBlock { + // We need to ignore this error because BA might confirm duplicated + // blocks in position. + err = nil + } + return err + } + return nil +} + +func (bc *blockChain) checkIfBlocksConfirmed() { + var err error + for len(bc.pendingBlocks) > 0 { + if bc.pendingBlocks[0].position.Height < + bc.lastConfirmed.Position.Height+1 { + panic(fmt.Errorf("unexpected case %s %s", bc.lastConfirmed, + bc.pendingBlocks[0].position)) + } + if bc.pendingBlocks[0].position.Height > + bc.lastConfirmed.Position.Height+1 { + break + } + var pending pendingBlockRecord + pending, bc.pendingBlocks = bc.pendingBlocks[0], bc.pendingBlocks[1:] + nextTip := pending.block + if nextTip == nil { + if nextTip, err = bc.prepareBlock( + pending.position, time.Time{}, true); err != nil { + // It should not be error when prepare empty block for correct + // position. + panic(err) + } + } + bc.confirmBlock(nextTip) + } +} + +func (bc *blockChain) purgeConfig() { + for bc.configs[0].RoundID() < bc.lastConfirmed.Position.Round { + bc.configs = bc.configs[1:] + } + if bc.configs[0].RoundID() != bc.lastConfirmed.Position.Round { + panic(fmt.Errorf("mismatched tip config: %d %d", + bc.configs[0].RoundID(), bc.lastConfirmed.Position.Round)) + } +} + +func (bc *blockChain) verifyRandomness( + blockHash common.Hash, round uint64, randomness []byte) (bool, error) { + if round < DKGDelayRound { + return bytes.Compare(randomness, NoRand) == 0, nil + } + v, ok, err := bc.vGetter.UpdateAndGet(round) + if err != nil { + return false, err + } + if !ok { + return false, ErrTSigNotReady + } + return v.VerifySignature(blockHash, crypto.Signature{ + Type: "bls", + Signature: randomness}), nil +} + +func (bc *blockChain) prepareBlock(position types.Position, + proposeTime time.Time, empty bool) (b *types.Block, err error) { + b = &types.Block{Position: position, Timestamp: proposeTime} + tip := bc.lastConfirmed + // Make sure we can propose a block at expected position for callers. + if tip == nil { + if bc.configs[0].RoundID() != uint64(0) { + panic(fmt.Errorf( + "Genesis config should be ready when preparing genesis: %d", + bc.configs[0].RoundID())) + } + // It should be the case for genesis block. + if !position.Equal(types.Position{Height: types.GenesisHeight}) { + b, err = nil, ErrNotGenesisBlock + return + } + minExpectedTime := bc.dMoment.Add(bc.configs[0].minBlockInterval) + if empty { + b.Timestamp = minExpectedTime + } else { + bc.logger.Debug("Calling genesis Application.PreparePayload") + if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { + b = nil + return + } + bc.logger.Debug("Calling genesis Application.PrepareWitness") + if b.Witness, err = bc.app.PrepareWitness(0); err != nil { + b = nil + return + } + if proposeTime.Before(minExpectedTime) { + b.Timestamp = minExpectedTime + } + } + } else { + tipConfig := bc.tipConfig() + if tip.Position.Height+1 != position.Height { + b, err = nil, ErrNotFollowTipPosition + return + } + if tipConfig.IsLastBlock(tip) { + if tip.Position.Round+1 != position.Round { + b, err = nil, ErrRoundNotSwitch + return + } + } else { + if tip.Position.Round != position.Round { + b, err = nil, ErrInvalidRoundID + return + } + } + minExpectedTime := tip.Timestamp.Add(bc.configs[0].minBlockInterval) + b.ParentHash = tip.Hash + if !empty { + bc.logger.Debug("Calling Application.PreparePayload", + "position", b.Position) + if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { + b = nil + return + } + bc.logger.Debug("Calling Application.PrepareWitness", + "height", tip.Witness.Height) + if b.Witness, err = bc.app.PrepareWitness( + tip.Witness.Height); err != nil { + b = nil + return + } + if b.Timestamp.Before(minExpectedTime) { + b.Timestamp = minExpectedTime + } + } else { + b.Witness.Height = tip.Witness.Height + b.Witness.Data = make([]byte, len(tip.Witness.Data)) + copy(b.Witness.Data, tip.Witness.Data) + b.Timestamp = minExpectedTime + } + } + if empty { + if b.Hash, err = utils.HashBlock(b); err != nil { + b = nil + return + } + } else { + if err = bc.signer.SignBlock(b); err != nil { + b = nil + return + } + } + return +} + +func (bc *blockChain) tipConfig() blockChainConfig { + if bc.lastConfirmed == nil { + panic(fmt.Errorf("attempting to access config without tip")) + } + if bc.lastConfirmed.Position.Round != bc.configs[0].RoundID() { + panic(fmt.Errorf("inconsist config and tip: %d %d", + bc.lastConfirmed.Position.Round, bc.configs[0].RoundID())) + } + return bc.configs[0] +} + +func (bc *blockChain) confirmBlock(b *types.Block) { + if bc.lastConfirmed != nil && + bc.lastConfirmed.Position.Height+1 != b.Position.Height { + panic(fmt.Errorf("confirmed blocks not continuous in height: %s %s", + bc.lastConfirmed, b)) + } + bc.logger.Debug("Calling Application.BlockConfirmed", "block", b) + bc.app.BlockConfirmed(*b) + bc.lastConfirmed = b + bc.confirmedBlocks = append(bc.confirmedBlocks, b) + bc.purgeConfig() +} + +func (bc *blockChain) setRandomnessFromPending(b *types.Block) bool { + if r, exist := bc.pendingRandomnesses[b.Position]; exist { + b.Randomness = r + delete(bc.pendingRandomnesses, b.Position) + return true + } + return false +} + +func (bc *blockChain) processAgreementResult(result *types.AgreementResult) error { + if result.Position.Round < DKGDelayRound { + return nil + } + if !result.Position.Newer(bc.lastPosition) { + return ErrSkipButNoError + } + ok, err := bc.verifyRandomness( + result.BlockHash, result.Position.Round, result.Randomness) + if err != nil { + return err + } + if !ok { + return ErrIncorrectAgreementResult + } + bc.lock.Lock() + defer bc.lock.Unlock() + if !result.Position.Newer(bc.lastDelivered.Position) { + return nil + } + bc.pendingRandomnesses[result.Position] = result.Randomness + bc.lastPosition = bc.lastDelivered.Position + return nil +} + +func (bc *blockChain) addBlockRandomness(pos types.Position, rand []byte) { + if pos.Round < DKGDelayRound { + return + } + bc.lock.Lock() + defer bc.lock.Unlock() + if !pos.Newer(bc.lastDelivered.Position) { + return + } + bc.pendingRandomnesses[pos] = rand +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go new file mode 100644 index 000000000..c85630775 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go @@ -0,0 +1,70 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package blockdb + +import ( + "errors" + "fmt" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +var ( + // ErrBlockExists is the error when block eixsts. + ErrBlockExists = errors.New("block exists") + // ErrBlockDoesNotExist is the error when block does not eixst. + ErrBlockDoesNotExist = errors.New("block does not exist") + // ErrIterationFinished is the error to check if the iteration is finished. + ErrIterationFinished = errors.New("iteration finished") + // ErrEmptyPath is the error when the required path is empty. + ErrEmptyPath = fmt.Errorf("empty path") + // ErrClosed is the error when using DB after it's closed. + ErrClosed = fmt.Errorf("db closed") + // ErrNotImplemented is the error that some interface is not implemented. + ErrNotImplemented = fmt.Errorf("not implemented") +) + +// BlockDatabase is the interface for a BlockDatabase. +type BlockDatabase interface { + Reader + Writer + + // Close allows database implementation able to + // release resource when finishing. + Close() error +} + +// Reader defines the interface for reading blocks into DB. +type Reader interface { + Has(hash common.Hash) bool + Get(hash common.Hash) (types.Block, error) + GetAll() (BlockIterator, error) +} + +// Writer defines the interface for writing blocks into DB. +type Writer interface { + Update(block types.Block) error + Put(block types.Block) error +} + +// BlockIterator defines an iterator on blocks hold +// in a DB. +type BlockIterator interface { + Next() (types.Block, error) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go new file mode 100644 index 000000000..76730fc9c --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go @@ -0,0 +1,127 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package blockdb + +import ( + "encoding/json" + + "github.com/syndtr/goleveldb/leveldb" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// LevelDBBackedBlockDB is a leveldb backed BlockDB implementation. +type LevelDBBackedBlockDB struct { + db *leveldb.DB +} + +// NewLevelDBBackedBlockDB initialize a leveldb-backed block database. +func NewLevelDBBackedBlockDB( + path string) (lvl *LevelDBBackedBlockDB, err error) { + + db, err := leveldb.OpenFile(path, nil) + if err != nil { + return + } + lvl = &LevelDBBackedBlockDB{db: db} + return +} + +// Close implement Closer interface, which would release allocated resource. +func (lvl *LevelDBBackedBlockDB) Close() error { + return lvl.db.Close() +} + +// Has implements the Reader.Has method. +func (lvl *LevelDBBackedBlockDB) Has(hash common.Hash) bool { + exists, err := lvl.db.Has([]byte(hash[:]), nil) + if err != nil { + // TODO(missionliao): Modify the interface to return error. + panic(err) + } + return exists +} + +// Get implements the Reader.Get method. +func (lvl *LevelDBBackedBlockDB) Get( + hash common.Hash) (block types.Block, err error) { + + queried, err := lvl.db.Get([]byte(hash[:]), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrBlockDoesNotExist + } + return + } + err = json.Unmarshal(queried, &block) + if err != nil { + return + } + return +} + +// Update implements the Writer.Update method. +func (lvl *LevelDBBackedBlockDB) Update(block types.Block) (err error) { + // NOTE: we didn't handle changes of block hash (and it + // should not happen). + marshaled, err := json.Marshal(&block) + if err != nil { + return + } + + if !lvl.Has(block.Hash) { + err = ErrBlockDoesNotExist + return + } + err = lvl.db.Put( + []byte(block.Hash[:]), + marshaled, + nil) + if err != nil { + return + } + return +} + +// Put implements the Writer.Put method. +func (lvl *LevelDBBackedBlockDB) Put(block types.Block) (err error) { + marshaled, err := json.Marshal(&block) + if err != nil { + return + } + if lvl.Has(block.Hash) { + err = ErrBlockExists + return + } + err = lvl.db.Put( + []byte(block.Hash[:]), + marshaled, + nil) + if err != nil { + return + } + return +} + +// GetAll implements Reader.GetAll method, which allows callers +// to retrieve all blocks in DB. +func (lvl *LevelDBBackedBlockDB) GetAll() (BlockIterator, error) { + // TODO (mission): Implement this part via goleveldb's iterator. + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go new file mode 100644 index 000000000..b45af229b --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go @@ -0,0 +1,183 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package blockdb + +import ( + "encoding/json" + "io/ioutil" + "os" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type seqIterator struct { + idx int + db *MemBackedBlockDB +} + +func (seq *seqIterator) Next() (types.Block, error) { + curIdx := seq.idx + seq.idx++ + return seq.db.getByIndex(curIdx) +} + +// MemBackedBlockDB is a memory backed BlockDB implementation. +type MemBackedBlockDB struct { + blocksMutex sync.RWMutex + blockHashSequence common.Hashes + blocksByHash map[common.Hash]*types.Block + persistantFilePath string +} + +// NewMemBackedBlockDB initialize a memory-backed block database. +func NewMemBackedBlockDB(persistantFilePath ...string) (db *MemBackedBlockDB, err error) { + db = &MemBackedBlockDB{ + blockHashSequence: common.Hashes{}, + blocksByHash: make(map[common.Hash]*types.Block), + } + if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 { + return + } + db.persistantFilePath = persistantFilePath[0] + buf, err := ioutil.ReadFile(db.persistantFilePath) + if err != nil { + if !os.IsNotExist(err) { + // Something unexpected happened. + return + } + // It's expected behavior that file doesn't exists, we should not + // report error on it. + err = nil + return + } + + // Init this instance by file content, it's a temporary way + // to export those private field for JSON encoding. + toLoad := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{} + err = json.Unmarshal(buf, &toLoad) + if err != nil { + return + } + db.blockHashSequence = toLoad.Sequence + db.blocksByHash = toLoad.ByHash + return +} + +// Has returns wheter or not the DB has a block identified with the hash. +func (m *MemBackedBlockDB) Has(hash common.Hash) bool { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + _, ok := m.blocksByHash[hash] + return ok +} + +// Get returns a block given a hash. +func (m *MemBackedBlockDB) Get(hash common.Hash) (types.Block, error) { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + return m.internalGet(hash) +} + +func (m *MemBackedBlockDB) internalGet(hash common.Hash) (types.Block, error) { + b, ok := m.blocksByHash[hash] + if !ok { + return types.Block{}, ErrBlockDoesNotExist + } + return *b, nil +} + +// Put inserts a new block into the database. +func (m *MemBackedBlockDB) Put(block types.Block) error { + if m.Has(block.Hash) { + return ErrBlockExists + } + + m.blocksMutex.Lock() + defer m.blocksMutex.Unlock() + + m.blockHashSequence = append(m.blockHashSequence, block.Hash) + m.blocksByHash[block.Hash] = &block + return nil +} + +// Update updates a block in the database. +func (m *MemBackedBlockDB) Update(block types.Block) error { + if !m.Has(block.Hash) { + return ErrBlockDoesNotExist + } + + m.blocksMutex.Lock() + defer m.blocksMutex.Unlock() + + m.blocksByHash[block.Hash] = &block + return nil +} + +// Close implement Closer interface, which would release allocated resource. +func (m *MemBackedBlockDB) Close() (err error) { + // Save internal state to a pretty-print json file. It's a temporary way + // to dump private file via JSON encoding. + if len(m.persistantFilePath) == 0 { + return + } + + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + toDump := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{ + Sequence: m.blockHashSequence, + ByHash: m.blocksByHash, + } + + // Dump to JSON with 2-space indent. + buf, err := json.Marshal(&toDump) + if err != nil { + return + } + + err = ioutil.WriteFile(m.persistantFilePath, buf, 0644) + return +} + +func (m *MemBackedBlockDB) getByIndex(idx int) (types.Block, error) { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + if idx >= len(m.blockHashSequence) { + return types.Block{}, ErrIterationFinished + } + + hash := m.blockHashSequence[idx] + return m.internalGet(hash) +} + +// GetAll implement Reader.GetAll method, which allows caller +// to retrieve all blocks in DB. +func (m *MemBackedBlockDB) GetAll() (BlockIterator, error) { + return &seqIterator{db: m}, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go new file mode 100644 index 000000000..0f1400cb5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go @@ -0,0 +1,795 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/db" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors for configuration chain.. +var ( + ErrDKGNotRegistered = fmt.Errorf( + "not yet registered in DKG protocol") + ErrTSigAlreadyRunning = fmt.Errorf( + "tsig is already running") + ErrDKGNotReady = fmt.Errorf( + "DKG is not ready") + ErrSkipButNoError = fmt.Errorf( + "skip but no error") + ErrDKGAborted = fmt.Errorf( + "DKG is aborted") +) + +// ErrMismatchDKG represent an attempt to run DKG protocol is failed because +// the register DKG protocol is mismatched, interms of round and resetCount. +type ErrMismatchDKG struct { + expectRound, expectReset uint64 + actualRound, actualReset uint64 +} + +func (e ErrMismatchDKG) Error() string { + return fmt.Sprintf( + "mismatch DKG, abort running: expect(%d %d) actual(%d %d)", + e.expectRound, e.expectReset, e.actualRound, e.actualReset) +} + +type dkgStepFn func(round uint64, reset uint64) error + +type configurationChain struct { + ID types.NodeID + recv dkgReceiver + gov Governance + dkg *dkgProtocol + dkgRunPhases []dkgStepFn + logger common.Logger + dkgLock sync.RWMutex + dkgSigner map[uint64]*dkgShareSecret + npks map[uint64]*typesDKG.NodePublicKeys + complaints []*typesDKG.Complaint + dkgResult sync.RWMutex + tsig map[common.Hash]*tsigProtocol + tsigTouched map[common.Hash]struct{} + tsigReady *sync.Cond + cache *utils.NodeSetCache + db db.Database + notarySet map[types.NodeID]struct{} + mpkReady bool + pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare + // TODO(jimmy-dexon): add timeout to pending psig. + pendingPsig map[common.Hash][]*typesDKG.PartialSignature + prevHash common.Hash + dkgCtx context.Context + dkgCtxCancel context.CancelFunc + dkgRunning bool +} + +func newConfigurationChain( + ID types.NodeID, + recv dkgReceiver, + gov Governance, + cache *utils.NodeSetCache, + dbInst db.Database, + logger common.Logger) *configurationChain { + configurationChain := &configurationChain{ + ID: ID, + recv: recv, + gov: gov, + logger: logger, + dkgSigner: make(map[uint64]*dkgShareSecret), + npks: make(map[uint64]*typesDKG.NodePublicKeys), + tsig: make(map[common.Hash]*tsigProtocol), + tsigTouched: make(map[common.Hash]struct{}), + tsigReady: sync.NewCond(&sync.Mutex{}), + cache: cache, + db: dbInst, + pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature), + } + configurationChain.initDKGPhasesFunc() + return configurationChain +} + +func (cc *configurationChain) abortDKG( + parentCtx context.Context, + round, reset uint64) bool { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil { + return cc.abortDKGNoLock(parentCtx, round, reset) + } + return false +} + +func (cc *configurationChain) abortDKGNoLock( + ctx context.Context, + round, reset uint64) bool { + if cc.dkg.round > round || + (cc.dkg.round == round && cc.dkg.reset > reset) { + cc.logger.Error("Newer DKG already is registered", + "round", round, + "reset", reset) + return false + } + cc.logger.Error("Previous DKG is not finished", + "round", round, + "reset", reset, + "previous-round", cc.dkg.round, + "previous-reset", cc.dkg.reset) + // Abort DKG routine in previous round. + cc.logger.Error("Aborting DKG in previous round", + "round", round, + "previous-round", cc.dkg.round) + // Notify current running DKG protocol to abort. + if cc.dkgCtxCancel != nil { + cc.dkgCtxCancel() + } + cc.dkgLock.Unlock() + // Wait for current running DKG protocol aborting. + for { + cc.dkgLock.Lock() + if cc.dkgRunning == false { + cc.dkg = nil + break + } + select { + case <-ctx.Done(): + return false + case <-time.After(100 * time.Millisecond): + } + cc.dkgLock.Unlock() + } + cc.logger.Error("Previous DKG aborted", + "round", round, + "reset", reset) + return cc.dkg == nil +} + +func (cc *configurationChain) registerDKG( + parentCtx context.Context, + round, reset uint64, + threshold int) { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil { + // Make sure we only proceed when cc.dkg is nil. + if !cc.abortDKGNoLock(parentCtx, round, reset) { + return + } + select { + case <-parentCtx.Done(): + return + default: + } + if cc.dkg != nil { + // This panic would only raise when multiple attampts to register + // a DKG protocol at the same time. + panic(ErrMismatchDKG{ + expectRound: round, + expectReset: reset, + actualRound: cc.dkg.round, + actualReset: cc.dkg.reset, + }) + } + } + notarySet, err := cc.cache.GetNotarySet(round) + if err != nil { + cc.logger.Error("Error getting notary set from cache", "error", err) + return + } + cc.notarySet = notarySet + cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare) + cc.mpkReady = false + cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db) + cc.dkgCtx, cc.dkgCtxCancel = context.WithCancel(parentCtx) + if err != nil { + panic(err) + } + if cc.dkg == nil { + cc.dkg = newDKGProtocol( + cc.ID, + cc.recv, + round, + reset, + threshold) + + err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) + if err != nil { + cc.logger.Error("Error put or update DKG protocol", "error", + err) + return + } + } + + go func() { + ticker := newTicker(cc.gov, round, TickerDKG) + defer ticker.Stop() + <-ticker.Tick() + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil && cc.dkg.round == round && cc.dkg.reset == reset { + cc.dkg.proposeMPKReady() + } + }() +} + +func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) error { + if cc.dkg.round < round || + (cc.dkg.round == round && cc.dkg.reset < reset) { + return ErrDKGNotRegistered + } + if cc.dkg.round != round || cc.dkg.reset != reset { + cc.logger.Warn("DKG canceled", "round", round, "reset", reset) + return ErrSkipButNoError + } + cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) + if cc.gov.IsDKGFinal(round) { + cc.logger.Warn("DKG already final", "round", round) + return ErrSkipButNoError + } + cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round) + var err error + for err == nil && !cc.gov.IsDKGMPKReady(round) { + cc.dkgLock.Unlock() + cc.logger.Debug("DKG MPKs are not ready yet. Try again later...", + "nodeID", cc.ID, + "round", round) + select { + case <-cc.dkgCtx.Done(): + err = ErrDKGAborted + case <-time.After(500 * time.Millisecond): + } + cc.dkgLock.Lock() + } + return err +} + +func (cc *configurationChain) runDKGPhaseTwoAndThree( + round uint64, reset uint64) error { + // Check if this node successfully join the protocol. + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + mpks := cc.gov.DKGMasterPublicKeys(round) + inProtocol := false + for _, mpk := range mpks { + if mpk.ProposerID == cc.ID { + inProtocol = true + break + } + } + if !inProtocol { + cc.logger.Warn("Failed to join DKG protocol", + "round", round, + "reset", reset) + return ErrSkipButNoError + } + // Phase 2(T = 0): Exchange DKG secret key share. + if err := cc.dkg.processMasterPublicKeys(mpks); err != nil { + cc.logger.Error("Failed to process master public key", + "round", round, + "reset", reset, + "error", err) + } + cc.mpkReady = true + // The time to process private share might be long, check aborting before + // get into that loop. + select { + case <-cc.dkgCtx.Done(): + return ErrDKGAborted + default: + } + for _, prvShare := range cc.pendingPrvShare { + if err := cc.dkg.processPrivateShare(prvShare); err != nil { + cc.logger.Error("Failed to process private share", + "round", round, + "reset", reset, + "error", err) + } + } + + // Phase 3(T = 0~λ): Propose complaint. + // Propose complaint is done in `processMasterPublicKeys`. + return nil +} + +func (cc *configurationChain) runDKGPhaseFour() { + // Phase 4(T = λ): Propose nack complaints. + cc.dkg.proposeNackComplaints() +} + +func (cc *configurationChain) runDKGPhaseFiveAndSix(round uint64, reset uint64) { + // Phase 5(T = 2λ): Propose Anti nack complaint. + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + cc.complaints = cc.gov.DKGComplaints(round) + if err := cc.dkg.processNackComplaints(cc.complaints); err != nil { + cc.logger.Error("Failed to process NackComplaint", + "round", round, + "reset", reset, + "error", err) + } + + // Phase 6(T = 3λ): Rebroadcast anti nack complaint. + // Rebroadcast is done in `processPrivateShare`. +} + +func (cc *configurationChain) runDKGPhaseSeven() { + // Phase 7(T = 4λ): Enforce complaints and nack complaints. + cc.dkg.enforceNackComplaints(cc.complaints) + // Enforce complaint is done in `processPrivateShare`. +} + +func (cc *configurationChain) runDKGPhaseEight() { + // Phase 8(T = 5λ): DKG finalize. + cc.dkg.proposeFinalize() +} + +func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) error { + // Phase 9(T = 6λ): DKG is ready. + // Normally, IsDKGFinal would return true here. Use this for in case of + // unexpected network fluctuation and ensure the robustness of DKG protocol. + cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) + var err error + for err == nil && !cc.gov.IsDKGFinal(round) { + cc.dkgLock.Unlock() + cc.logger.Debug("DKG is not ready yet. Try again later...", + "nodeID", cc.ID.String()[:6], + "round", round, + "reset", reset) + select { + case <-cc.dkgCtx.Done(): + err = ErrDKGAborted + case <-time.After(500 * time.Millisecond): + } + cc.dkgLock.Lock() + } + if err != nil { + return err + } + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + npks, err := typesDKG.NewNodePublicKeys(round, + cc.gov.DKGMasterPublicKeys(round), + cc.gov.DKGComplaints(round), + cc.dkg.threshold) + if err != nil { + return err + } + qualifies := "" + for nID := range npks.QualifyNodeIDs { + qualifies += fmt.Sprintf("%s ", nID.String()[:6]) + } + cc.logger.Info("Qualify Nodes", + "nodeID", cc.ID, + "round", round, + "reset", reset, + "count", len(npks.QualifyIDs), + "qualifies", qualifies) + if _, exist := npks.QualifyNodeIDs[cc.ID]; !exist { + cc.logger.Warn("Self is not in Qualify Nodes", + "round", round, + "reset", reset) + return nil + } + signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs) + if err != nil { + return err + } + // Save private shares to DB. + if err = + cc.db.PutDKGPrivateKey(round, reset, *signer.privateKey); err != nil { + return err + } + cc.dkg.proposeSuccess() + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.dkgSigner[round] = signer + cc.npks[round] = npks + return nil +} + +func (cc *configurationChain) initDKGPhasesFunc() { + cc.dkgRunPhases = []dkgStepFn{ + func(round uint64, reset uint64) error { + return cc.runDKGPhaseOne(round, reset) + }, + func(round uint64, reset uint64) error { + return cc.runDKGPhaseTwoAndThree(round, reset) + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseFour() + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseFiveAndSix(round, reset) + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseSeven() + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseEight() + return nil + }, + func(round uint64, reset uint64) error { + return cc.runDKGPhaseNine(round, reset) + }, + } +} + +func (cc *configurationChain) runDKG( + round uint64, reset uint64, event *common.Event, + dkgBeginHeight, dkgHeight uint64) (err error) { + // Check if corresponding DKG signer is ready. + if _, _, err = cc.getDKGInfo(round, false); err == nil { + return ErrSkipButNoError + } + cfg := utils.GetConfigWithPanic(cc.gov, round, cc.logger) + phaseHeight := uint64( + cfg.LambdaDKG.Nanoseconds() / cfg.MinBlockInterval.Nanoseconds()) + skipPhase := int(dkgHeight / phaseHeight) + cc.logger.Info("Skipping DKG phase", "phase", skipPhase) + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil { + return ErrDKGNotRegistered + } + // Make sure the existed dkgProtocol is expected one. + if cc.dkg.round != round || cc.dkg.reset != reset { + return ErrMismatchDKG{ + expectRound: round, + expectReset: reset, + actualRound: cc.dkg.round, + actualReset: cc.dkg.reset, + } + } + if cc.dkgRunning { + panic(fmt.Errorf("duplicated call to runDKG: %d %d", round, reset)) + } + cc.dkgRunning = true + defer func() { + // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done. + if cc.dkg != nil { + cc.dkg = nil + } + cc.dkgRunning = false + }() + wg := sync.WaitGroup{} + var dkgError error + // Make a copy of cc.dkgCtx so each phase function can refer to the correct + // context. + ctx := cc.dkgCtx + cc.dkg.step = skipPhase + for i := skipPhase; i < len(cc.dkgRunPhases); i++ { + wg.Add(1) + event.RegisterHeight(dkgBeginHeight+phaseHeight*uint64(i), func(uint64) { + go func() { + defer wg.Done() + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if dkgError != nil { + return + } + select { + case <-ctx.Done(): + dkgError = ErrDKGAborted + return + default: + } + + err := cc.dkgRunPhases[cc.dkg.step](round, reset) + if err == nil || err == ErrSkipButNoError { + err = nil + cc.dkg.step++ + err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) + if err != nil { + cc.logger.Error("Failed to save DKG Protocol", + "step", cc.dkg.step, + "error", err) + } + } + if err != nil && dkgError == nil { + dkgError = err + } + }() + }) + } + cc.dkgLock.Unlock() + wgChan := make(chan struct{}, 1) + go func() { + wg.Wait() + wgChan <- struct{}{} + }() + select { + case <-cc.dkgCtx.Done(): + case <-wgChan: + } + cc.dkgLock.Lock() + select { + case <-cc.dkgCtx.Done(): + return ErrDKGAborted + default: + } + return dkgError +} + +func (cc *configurationChain) isDKGFinal(round uint64) bool { + if !cc.gov.IsDKGFinal(round) { + return false + } + _, _, err := cc.getDKGInfo(round, false) + return err == nil +} + +func (cc *configurationChain) getDKGInfo( + round uint64, ignoreSigner bool) ( + *typesDKG.NodePublicKeys, *dkgShareSecret, error) { + getFromCache := func() (*typesDKG.NodePublicKeys, *dkgShareSecret) { + cc.dkgResult.RLock() + defer cc.dkgResult.RUnlock() + npks := cc.npks[round] + signer := cc.dkgSigner[round] + return npks, signer + } + npks, signer := getFromCache() + if npks == nil || (!ignoreSigner && signer == nil) { + if err := cc.recoverDKGInfo(round, ignoreSigner); err != nil { + return nil, nil, err + } + npks, signer = getFromCache() + } + if npks == nil || (!ignoreSigner && signer == nil) { + return nil, nil, ErrDKGNotReady + } + return npks, signer, nil +} + +func (cc *configurationChain) recoverDKGInfo( + round uint64, ignoreSigner bool) error { + var npksExists, signerExists bool + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + _, signerExists = cc.dkgSigner[round] + _, npksExists = cc.npks[round] + }() + if signerExists && npksExists { + return nil + } + if !cc.gov.IsDKGFinal(round) { + return ErrDKGNotReady + } + + threshold := utils.GetDKGThreshold( + utils.GetConfigWithPanic(cc.gov, round, cc.logger)) + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", + "round", round) + mpk := cc.gov.DKGMasterPublicKeys(round) + cc.logger.Debug("Calling Governance.DKGComplaints for recoverDKGInfo", + "round", round) + comps := cc.gov.DKGComplaints(round) + qualifies, _, err := typesDKG.CalcQualifyNodes(mpk, comps, threshold) + if err != nil { + return err + } + if len(qualifies) < + utils.GetDKGValidThreshold(utils.GetConfigWithPanic( + cc.gov, round, cc.logger)) { + return typesDKG.ErrNotReachThreshold + } + + if !npksExists { + npks, err := typesDKG.NewNodePublicKeys(round, + cc.gov.DKGMasterPublicKeys(round), + cc.gov.DKGComplaints(round), + threshold) + if err != nil { + cc.logger.Warn("Failed to create DKGNodePublicKeys", + "round", round, "error", err) + return err + } + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.npks[round] = npks + }() + } + if !signerExists && !ignoreSigner { + reset := cc.gov.DKGResetCount(round) + // Check if we have private shares in DB. + prvKey, err := cc.db.GetDKGPrivateKey(round, reset) + if err != nil { + cc.logger.Warn("Failed to create DKGPrivateKey", + "round", round, "error", err) + dkgProtocolInfo, err := cc.db.GetDKGProtocol() + if err != nil { + cc.logger.Warn("Unable to recover DKGProtocolInfo", + "round", round, "error", err) + return err + } + if dkgProtocolInfo.Round != round { + cc.logger.Warn("DKGProtocolInfo round mismatch", + "round", round, "infoRound", dkgProtocolInfo.Round) + return err + } + prvKeyRecover, err := + dkgProtocolInfo.PrvShares.RecoverPrivateKey(qualifies) + if err != nil { + cc.logger.Warn("Failed to recover DKGPrivateKey", + "round", round, "error", err) + return err + } + if err = cc.db.PutDKGPrivateKey( + round, reset, *prvKeyRecover); err != nil { + cc.logger.Warn("Failed to save DKGPrivateKey", + "round", round, "error", err) + } + prvKey = *prvKeyRecover + } + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.dkgSigner[round] = &dkgShareSecret{ + privateKey: &prvKey, + } + }() + } + return nil +} + +func (cc *configurationChain) preparePartialSignature( + round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) { + _, signer, _ := cc.getDKGInfo(round, false) + if signer == nil { + return nil, ErrDKGNotReady + } + return &typesDKG.PartialSignature{ + ProposerID: cc.ID, + Round: round, + Hash: hash, + PartialSignature: signer.sign(hash), + }, nil +} + +func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + _, exist := cc.tsigTouched[hash] + cc.tsigTouched[hash] = struct{}{} + return !exist +} + +func (cc *configurationChain) untouchTSigHash(hash common.Hash) { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + delete(cc.tsigTouched, hash) +} + +func (cc *configurationChain) runTSig( + round uint64, hash common.Hash, wait time.Duration) ( + crypto.Signature, error) { + npks, _, _ := cc.getDKGInfo(round, false) + if npks == nil { + return crypto.Signature{}, ErrDKGNotReady + } + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[hash]; exist { + return crypto.Signature{}, ErrTSigAlreadyRunning + } + cc.tsig[hash] = newTSigProtocol(npks, hash) + pendingPsig := cc.pendingPsig[hash] + delete(cc.pendingPsig, hash) + go func() { + for _, psig := range pendingPsig { + if err := cc.processPartialSignature(psig); err != nil { + cc.logger.Error("Failed to process partial signature", + "nodeID", cc.ID, + "error", err) + } + } + }() + timeout := make(chan struct{}, 1) + go func() { + time.Sleep(wait) + timeout <- struct{}{} + cc.tsigReady.Broadcast() + }() + var signature crypto.Signature + var err error + for func() bool { + signature, err = cc.tsig[hash].signature() + select { + case <-timeout: + return false + default: + } + return err == ErrNotEnoughtPartialSignatures + }() { + cc.tsigReady.Wait() + } + delete(cc.tsig, hash) + if err != nil { + return crypto.Signature{}, err + } + return signature, nil +} + +func (cc *configurationChain) runCRSTSig( + round uint64, crs common.Hash) ([]byte, error) { + sig, err := cc.runTSig(round, crs, cc.gov.Configuration(round).LambdaDKG*5) + cc.logger.Info("CRS", + "nodeID", cc.ID, + "round", round+1, + "signature", sig) + return sig.Signature[:], err +} + +func (cc *configurationChain) processPrivateShare( + prvShare *typesDKG.PrivateShare) error { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil { + return nil + } + if _, exist := cc.notarySet[prvShare.ProposerID]; !exist { + return ErrNotDKGParticipant + } + if !cc.mpkReady { + // TODO(jimmy-dexon): remove duplicated signature check in dkg module. + ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPrivateShareSignature + } + cc.pendingPrvShare[prvShare.ProposerID] = prvShare + return nil + } + return cc.dkg.processPrivateShare(prvShare) +} + +func (cc *configurationChain) processPartialSignature( + psig *typesDKG.PartialSignature) error { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[psig.Hash]; !exist { + ok, err := utils.VerifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig) + return nil + } + if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil { + return err + } + cc.tsigReady.Broadcast() + return nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go new file mode 100644 index 000000000..8b2b9a048 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go @@ -0,0 +1,1567 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/db" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors for consensus core. +var ( + ErrProposerNotInNodeSet = fmt.Errorf( + "proposer is not in node set") + ErrIncorrectHash = fmt.Errorf( + "hash of block is incorrect") + ErrIncorrectSignature = fmt.Errorf( + "signature of block is incorrect") + ErrUnknownBlockProposed = fmt.Errorf( + "unknown block is proposed") + ErrIncorrectAgreementResultPosition = fmt.Errorf( + "incorrect agreement result position") + ErrNotEnoughVotes = fmt.Errorf( + "not enought votes") + ErrCRSNotReady = fmt.Errorf( + "CRS not ready") + ErrConfigurationNotReady = fmt.Errorf( + "Configuration not ready") + ErrIncorrectBlockRandomness = fmt.Errorf( + "randomness of block is incorrect") + ErrCannotVerifyBlockRandomness = fmt.Errorf( + "cannot verify block randomness") +) + +type selfAgreementResult types.AgreementResult + +// consensusBAReceiver implements agreementReceiver. +type consensusBAReceiver struct { + consensus *Consensus + agreementModule *agreement + emptyBlockHashMap *sync.Map + isNotary bool + restartNotary chan types.Position + npks *typesDKG.NodePublicKeys + psigSigner *dkgShareSecret +} + +func (recv *consensusBAReceiver) emptyBlockHash(pos types.Position) ( + common.Hash, error) { + hashVal, ok := recv.emptyBlockHashMap.Load(pos) + if ok { + return hashVal.(common.Hash), nil + } + emptyBlock, err := recv.consensus.bcModule.prepareBlock( + pos, time.Time{}, true) + if err != nil { + return common.Hash{}, err + } + hash, err := utils.HashBlock(emptyBlock) + if err != nil { + return common.Hash{}, err + } + recv.emptyBlockHashMap.Store(pos, hash) + return hash, nil +} + +func (recv *consensusBAReceiver) VerifyPartialSignature(vote *types.Vote) ( + bool, bool) { + if vote.Position.Round >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash { + if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { + if recv.npks == nil { + recv.consensus.logger.Debug( + "Unable to verify psig, npks is nil", + "vote", vote) + return false, false + } + if vote.Position.Round != recv.npks.Round { + recv.consensus.logger.Debug( + "Unable to verify psig, round of npks mismatch", + "vote", vote, + "npksRound", recv.npks.Round) + return false, false + } + pubKey, exist := recv.npks.PublicKeys[vote.ProposerID] + if !exist { + recv.consensus.logger.Debug( + "Unable to verify psig, proposer is not qualified", + "vote", vote) + return false, true + } + blockHash := vote.BlockHash + if blockHash == types.NullBlockHash { + var err error + blockHash, err = recv.emptyBlockHash(vote.Position) + if err != nil { + recv.consensus.logger.Error( + "Failed to verify vote for empty block", + "position", vote.Position, + "error", err) + return false, true + } + } + return pubKey.VerifySignature( + blockHash, crypto.Signature(vote.PartialSignature)), true + } + } + return len(vote.PartialSignature.Signature) == 0, true +} + +func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) { + if !recv.isNotary { + return + } + if recv.psigSigner != nil && + vote.BlockHash != types.SkipBlockHash { + if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { + if vote.BlockHash == types.NullBlockHash { + hash, err := recv.emptyBlockHash(vote.Position) + if err != nil { + recv.consensus.logger.Error( + "Failed to propose vote for empty block", + "position", vote.Position, + "error", err) + return + } + vote.PartialSignature = recv.psigSigner.sign(hash) + } else { + vote.PartialSignature = recv.psigSigner.sign(vote.BlockHash) + } + } + } + if err := recv.agreementModule.prepareVote(vote); err != nil { + recv.consensus.logger.Error("Failed to prepare vote", "error", err) + return + } + go func() { + if err := recv.agreementModule.processVote(vote); err != nil { + recv.consensus.logger.Error("Failed to process self vote", + "error", err, + "vote", vote) + return + } + recv.consensus.logger.Debug("Calling Network.BroadcastVote", + "vote", vote) + recv.consensus.network.BroadcastVote(vote) + }() +} + +func (recv *consensusBAReceiver) ProposeBlock() common.Hash { + if !recv.isNotary { + return common.Hash{} + } + block, err := recv.consensus.proposeBlock(recv.agreementModule.agreementID()) + if err != nil || block == nil { + recv.consensus.logger.Error("Unable to propose block", "error", err) + return types.NullBlockHash + } + go func() { + if err := recv.consensus.preProcessBlock(block); err != nil { + recv.consensus.logger.Error("Failed to pre-process block", "error", err) + return + } + recv.consensus.logger.Debug("Calling Network.BroadcastBlock", + "block", block) + recv.consensus.network.BroadcastBlock(block) + }() + return block.Hash +} + +func (recv *consensusBAReceiver) ConfirmBlock( + hash common.Hash, votes map[types.NodeID]*types.Vote) { + var ( + block *types.Block + aID = recv.agreementModule.agreementID() + ) + + isEmptyBlockConfirmed := hash == common.Hash{} + if isEmptyBlockConfirmed { + recv.consensus.logger.Info("Empty block is confirmed", "position", aID) + var err error + block, err = recv.consensus.bcModule.addEmptyBlock(aID) + if err != nil { + recv.consensus.logger.Error("Add position for empty failed", + "error", err) + return + } + if block == nil { + // The empty block's parent is not found locally, thus we can't + // propose it at this moment. + // + // We can only rely on block pulling upon receiving + // types.AgreementResult from the next position. + recv.consensus.logger.Warn( + "An empty block is confirmed without its parent", + "position", aID) + return + } + } else { + var exist bool + block, exist = recv.agreementModule.findBlockNoLock(hash) + if !exist { + recv.consensus.logger.Debug("Unknown block confirmed", + "hash", hash.String()[:6]) + ch := make(chan *types.Block) + func() { + recv.consensus.lock.Lock() + defer recv.consensus.lock.Unlock() + recv.consensus.baConfirmedBlock[hash] = ch + }() + go func() { + hashes := common.Hashes{hash} + PullBlockLoop: + for { + recv.consensus.logger.Debug("Calling Network.PullBlock for BA block", + "hash", hash) + recv.consensus.network.PullBlocks(hashes) + select { + case block = <-ch: + break PullBlockLoop + case <-time.After(1 * time.Second): + } + } + recv.consensus.logger.Debug("Receive unknown block", + "hash", hash.String()[:6], + "position", block.Position) + recv.agreementModule.addCandidateBlock(block) + recv.agreementModule.lock.Lock() + defer recv.agreementModule.lock.Unlock() + recv.ConfirmBlock(block.Hash, votes) + }() + return + } + } + + if len(votes) == 0 && len(block.Randomness) == 0 { + recv.consensus.logger.Error("No votes to recover randomness", + "block", block) + } else if votes != nil { + voteList := make([]types.Vote, 0, len(votes)) + IDs := make(cryptoDKG.IDs, 0, len(votes)) + psigs := make([]cryptoDKG.PartialSignature, 0, len(votes)) + for _, vote := range votes { + if vote.BlockHash != hash { + continue + } + if block.Position.Round >= DKGDelayRound { + ID, exist := recv.npks.IDMap[vote.ProposerID] + if !exist { + continue + } + IDs = append(IDs, ID) + psigs = append(psigs, vote.PartialSignature) + } else { + voteList = append(voteList, *vote) + } + } + if block.Position.Round >= DKGDelayRound { + rand, err := cryptoDKG.RecoverSignature(psigs, IDs) + if err != nil { + recv.consensus.logger.Warn("Unable to recover randomness", + "block", block, + "error", err) + } else { + block.Randomness = rand.Signature[:] + } + } else { + block.Randomness = NoRand + } + + if recv.isNotary { + result := &types.AgreementResult{ + BlockHash: block.Hash, + Position: block.Position, + Votes: voteList, + IsEmptyBlock: isEmptyBlockConfirmed, + Randomness: block.Randomness, + } + // touchAgreementResult does not support concurrent access. + go func() { + recv.consensus.priorityMsgChan <- (*selfAgreementResult)(result) + }() + recv.consensus.logger.Debug("Broadcast AgreementResult", + "result", result) + recv.consensus.network.BroadcastAgreementResult(result) + if block.IsEmpty() { + recv.consensus.bcModule.addBlockRandomness( + block.Position, block.Randomness) + } + if block.Position.Round >= DKGDelayRound { + recv.consensus.logger.Debug( + "Broadcast finalized block", + "block", block) + recv.consensus.network.BroadcastBlock(block) + } + } + } + + if !block.IsGenesis() && + !recv.consensus.bcModule.confirmed(block.Position.Height-1) { + go func(hash common.Hash) { + parentHash := hash + for { + recv.consensus.logger.Warn("Parent block not confirmed", + "parent-hash", parentHash.String()[:6], + "cur-position", block.Position) + ch := make(chan *types.Block) + if !func() bool { + recv.consensus.lock.Lock() + defer recv.consensus.lock.Unlock() + if _, exist := recv.consensus.baConfirmedBlock[parentHash]; exist { + return false + } + recv.consensus.baConfirmedBlock[parentHash] = ch + return true + }() { + return + } + var block *types.Block + PullBlockLoop: + for { + recv.consensus.logger.Debug("Calling Network.PullBlock for parent", + "hash", parentHash) + recv.consensus.network.PullBlocks(common.Hashes{parentHash}) + select { + case block = <-ch: + break PullBlockLoop + case <-time.After(1 * time.Second): + } + } + recv.consensus.logger.Info("Receive parent block", + "parent-hash", block.ParentHash.String()[:6], + "cur-position", block.Position) + if !block.IsFinalized() { + // TODO(jimmy): use a seperate message to pull finalized + // block. Here, we pull it again as workaround. + continue + } + recv.consensus.processBlockChan <- block + parentHash = block.ParentHash + if block.IsGenesis() || recv.consensus.bcModule.confirmed( + block.Position.Height-1) { + return + } + } + }(block.ParentHash) + } + if !block.IsEmpty() { + recv.consensus.processBlockChan <- block + } + // Clean the restartNotary channel so BA will not stuck by deadlock. +CleanChannelLoop: + for { + select { + case <-recv.restartNotary: + default: + break CleanChannelLoop + } + } + recv.restartNotary <- block.Position +} + +func (recv *consensusBAReceiver) PullBlocks(hashes common.Hashes) { + if !recv.isNotary { + return + } + recv.consensus.logger.Debug("Calling Network.PullBlocks", "hashes", hashes) + recv.consensus.network.PullBlocks(hashes) +} + +func (recv *consensusBAReceiver) ReportForkVote(v1, v2 *types.Vote) { + recv.consensus.gov.ReportForkVote(v1, v2) +} + +func (recv *consensusBAReceiver) ReportForkBlock(b1, b2 *types.Block) { + b1Clone := b1.Clone() + b2Clone := b2.Clone() + b1Clone.Payload = []byte{} + b2Clone.Payload = []byte{} + recv.consensus.gov.ReportForkBlock(b1Clone, b2Clone) +} + +// consensusDKGReceiver implements dkgReceiver. +type consensusDKGReceiver struct { + ID types.NodeID + gov Governance + signer *utils.Signer + nodeSetCache *utils.NodeSetCache + cfgModule *configurationChain + network Network + logger common.Logger +} + +// ProposeDKGComplaint proposes a DKGComplaint. +func (recv *consensusDKGReceiver) ProposeDKGComplaint( + complaint *typesDKG.Complaint) { + if err := recv.signer.SignDKGComplaint(complaint); err != nil { + recv.logger.Error("Failed to sign DKG complaint", "error", err) + return + } + recv.logger.Debug("Calling Governace.AddDKGComplaint", + "complaint", complaint) + recv.gov.AddDKGComplaint(complaint) +} + +// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. +func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) { + if err := recv.signer.SignDKGMasterPublicKey(mpk); err != nil { + recv.logger.Error("Failed to sign DKG master public key", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk) + recv.gov.AddDKGMasterPublicKey(mpk) +} + +// ProposeDKGPrivateShare propose a DKGPrivateShare. +func (recv *consensusDKGReceiver) ProposeDKGPrivateShare( + prv *typesDKG.PrivateShare) { + if err := recv.signer.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed to sign DKG private share", "error", err) + return + } + receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID) + if !exists { + recv.logger.Error("Public key for receiver not found", + "receiver", prv.ReceiverID.String()[:6]) + return + } + if prv.ReceiverID == recv.ID { + go func() { + if err := recv.cfgModule.processPrivateShare(prv); err != nil { + recv.logger.Error("Failed to process self private share", "prvShare", prv) + } + }() + } else { + recv.logger.Debug("Calling Network.SendDKGPrivateShare", + "receiver", hex.EncodeToString(receiverPubKey.Bytes())) + recv.network.SendDKGPrivateShare(receiverPubKey, prv) + } +} + +// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. +func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint( + prv *typesDKG.PrivateShare) { + if prv.ProposerID == recv.ID { + if err := recv.signer.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed sign DKG private share", "error", err) + return + } + } + recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv) + recv.network.BroadcastDKGPrivateShare(prv) +} + +// ProposeDKGMPKReady propose a DKGMPKReady message. +func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + if err := recv.signer.SignDKGMPKReady(ready); err != nil { + recv.logger.Error("Failed to sign DKG ready", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGMPKReady", "ready", ready) + recv.gov.AddDKGMPKReady(ready) +} + +// ProposeDKGFinalize propose a DKGFinalize message. +func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { + if err := recv.signer.SignDKGFinalize(final); err != nil { + recv.logger.Error("Failed to sign DKG finalize", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final) + recv.gov.AddDKGFinalize(final) +} + +// ProposeDKGSuccess propose a DKGSuccess message. +func (recv *consensusDKGReceiver) ProposeDKGSuccess(success *typesDKG.Success) { + if err := recv.signer.SignDKGSuccess(success); err != nil { + recv.logger.Error("Failed to sign DKG successize", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGSuccess", "success", success) + recv.gov.AddDKGSuccess(success) +} + +// Consensus implements DEXON Consensus algorithm. +type Consensus struct { + // Node Info. + ID types.NodeID + signer *utils.Signer + + // BA. + baMgr *agreementMgr + baConfirmedBlock map[common.Hash]chan<- *types.Block + + // DKG. + dkgRunning int32 + dkgReady *sync.Cond + cfgModule *configurationChain + + // Interfaces. + db db.Database + app Application + debugApp Debug + gov Governance + network Network + + // Misc. + bcModule *blockChain + dMoment time.Time + nodeSetCache *utils.NodeSetCache + tsigVerifierCache *TSigVerifierCache + lock sync.RWMutex + ctx context.Context + ctxCancel context.CancelFunc + event *common.Event + roundEvent *utils.RoundEvent + logger common.Logger + resetDeliveryGuardTicker chan struct{} + msgChan chan types.Msg + priorityMsgChan chan interface{} + waitGroup sync.WaitGroup + processBlockChan chan *types.Block + + // Context of Dummy receiver during switching from syncer. + dummyCancel context.CancelFunc + dummyFinished <-chan struct{} + dummyMsgBuffer []types.Msg +} + +// NewConsensus construct an Consensus instance. +func NewConsensus( + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + return newConsensusForRound( + nil, dMoment, app, gov, db, network, prv, logger, true) +} + +// NewConsensusForSimulation creates an instance of Consensus for simulation, +// the only difference with NewConsensus is nonblocking of app. +func NewConsensusForSimulation( + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + return newConsensusForRound( + nil, dMoment, app, gov, db, network, prv, logger, false) +} + +// NewConsensusFromSyncer constructs an Consensus instance from information +// provided from syncer. +// +// You need to provide the initial block for this newly created Consensus +// instance to bootstrap with. A proper choice is the last finalized block you +// delivered to syncer. +// +// NOTE: those confirmed blocks should be organized by chainID and sorted by +// their positions, in ascending order. +func NewConsensusFromSyncer( + initBlock *types.Block, + startWithEmpty bool, + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + networkModule Network, + prv crypto.PrivateKey, + confirmedBlocks []*types.Block, + cachedMessages []types.Msg, + logger common.Logger) (*Consensus, error) { + // Setup Consensus instance. + con := newConsensusForRound(initBlock, dMoment, app, gov, db, + networkModule, prv, logger, true) + // Launch a dummy receiver before we start receiving from network module. + con.dummyMsgBuffer = cachedMessages + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + con.ctx, networkModule.ReceiveChan(), func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + // Dump all BA-confirmed blocks to the consensus instance, make sure these + // added blocks forming a DAG. + refBlock := initBlock + for _, b := range confirmedBlocks { + // Only when its parent block is already added to lattice, we can + // then add this block. If not, our pulling mechanism would stop at + // the block we added, and lost its parent block forever. + if b.Position.Height != refBlock.Position.Height+1 { + break + } + if err := con.processBlock(b); err != nil { + return nil, err + } + refBlock = b + } + if startWithEmpty { + emptyPos := types.Position{ + Round: con.bcModule.tipRound(), + Height: initBlock.Position.Height + 1, + } + _, err := con.bcModule.addEmptyBlock(emptyPos) + if err != nil { + panic(err) + } + } + return con, nil +} + +// newConsensusForRound creates a Consensus instance. +func newConsensusForRound( + initBlock *types.Block, + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger, + usingNonBlocking bool) *Consensus { + // TODO(w): load latest blockHeight from DB, and use config at that height. + nodeSetCache := utils.NewNodeSetCache(gov) + // Setup signer module. + signer := utils.NewSigner(prv) + // Check if the application implement Debug interface. + var debugApp Debug + if a, ok := app.(Debug); ok { + debugApp = a + } + // Get configuration for bootstrap round. + initPos := types.Position{ + Round: 0, + Height: types.GenesisHeight, + } + if initBlock != nil { + initPos = initBlock.Position + } + // Init configuration chain. + ID := types.NewNodeID(prv.PublicKey()) + recv := &consensusDKGReceiver{ + ID: ID, + gov: gov, + signer: signer, + nodeSetCache: nodeSetCache, + network: network, + logger: logger, + } + cfgModule := newConfigurationChain(ID, recv, gov, nodeSetCache, db, logger) + recv.cfgModule = cfgModule + signer.SetBLSSigner( + func(round uint64, hash common.Hash) (crypto.Signature, error) { + _, signer, err := cfgModule.getDKGInfo(round, false) + if err != nil { + return crypto.Signature{}, err + } + return crypto.Signature(signer.sign(hash)), nil + }) + appModule := app + if usingNonBlocking { + appModule = newNonBlocking(app, debugApp) + } + tsigVerifierCache := NewTSigVerifierCache(gov, 7) + bcModule := newBlockChain(ID, dMoment, initBlock, appModule, + tsigVerifierCache, signer, logger) + // Construct Consensus instance. + con := &Consensus{ + ID: ID, + app: appModule, + debugApp: debugApp, + gov: gov, + db: db, + network: network, + baConfirmedBlock: make(map[common.Hash]chan<- *types.Block), + dkgReady: sync.NewCond(&sync.Mutex{}), + cfgModule: cfgModule, + bcModule: bcModule, + dMoment: dMoment, + nodeSetCache: nodeSetCache, + tsigVerifierCache: tsigVerifierCache, + signer: signer, + event: common.NewEvent(), + logger: logger, + resetDeliveryGuardTicker: make(chan struct{}), + msgChan: make(chan types.Msg, 1024), + priorityMsgChan: make(chan interface{}, 1024), + processBlockChan: make(chan *types.Block, 1024), + } + con.ctx, con.ctxCancel = context.WithCancel(context.Background()) + var err error + con.roundEvent, err = utils.NewRoundEvent(con.ctx, gov, logger, initPos, + ConfigRoundShift) + if err != nil { + panic(err) + } + if con.baMgr, err = newAgreementMgr(con); err != nil { + panic(err) + } + if err = con.prepare(initBlock); err != nil { + panic(err) + } + return con +} + +// prepare the Consensus instance to be ready for blocks after 'initBlock'. +// 'initBlock' could be either: +// - nil +// - the last finalized block +func (con *Consensus) prepare(initBlock *types.Block) (err error) { + // Trigger the round validation method for the next round of the first + // round. + // The block past from full node should be delivered already or known by + // full node. We don't have to notify it. + initRound := uint64(0) + if initBlock != nil { + initRound = initBlock.Position.Round + } + if initRound == 0 { + if DKGDelayRound == 0 { + panic("not implemented yet") + } + } + // Measure time elapse for each handler of round events. + elapse := func(what string, lastE utils.RoundEventParam) func() { + start := time.Now() + con.logger.Info("Handle round event", + "what", what, + "event", lastE) + return func() { + con.logger.Info("Finish round event", + "what", what, + "event", lastE, + "elapse", time.Since(start)) + } + } + // Register round event handler to purge cached node set. To make sure each + // modules see the up-to-date node set, we need to make sure this action + // should be taken as the first one. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + defer elapse("purge-cache", evts[len(evts)-1])() + for _, e := range evts { + if e.Reset == 0 { + continue + } + con.nodeSetCache.Purge(e.Round + 1) + con.tsigVerifierCache.Purge(e.Round + 1) + } + }) + // Register round event handler to abort previous running DKG if any. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + go func() { + defer elapse("abort-DKG", e)() + if e.Reset > 0 { + aborted := con.cfgModule.abortDKG(con.ctx, e.Round+1, e.Reset-1) + con.logger.Info("DKG aborting result", + "round", e.Round+1, + "reset", e.Reset-1, + "aborted", aborted) + } + }() + }) + // Register round event handler to update BA and BC modules. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + defer elapse("append-config", evts[len(evts)-1])() + // Always updates newer configs to the later modules first in the data + // flow. + if err := con.bcModule.notifyRoundEvents(evts); err != nil { + panic(err) + } + if err := con.baMgr.notifyRoundEvents(evts); err != nil { + panic(err) + } + }) + // Register round event handler to reset DKG if the DKG set for next round + // failed to setup. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("reset-DKG", e)() + nextRound := e.Round + 1 + if nextRound < DKGDelayRound { + return + } + curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round) + if err != nil { + con.logger.Error("Error getting notary set when proposing CRS", + "round", e.Round, + "error", err) + return + } + if _, exist := curNotarySet[con.ID]; !exist { + return + } + con.event.RegisterHeight(e.NextDKGResetHeight(), func(uint64) { + if ok, _ := utils.IsDKGValid( + con.gov, con.logger, nextRound, e.Reset); ok { + return + } + // Aborting all previous running DKG protocol instance if any. + go con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true) + }) + }) + // Register round event handler to propose new CRS. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + // We don't have to propose new CRS during DKG reset, the reset of DKG + // would be done by the notary set in previous round. + e := evts[len(evts)-1] + defer elapse("propose-CRS", e)() + if e.Reset != 0 || e.Round < DKGDelayRound { + return + } + if curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round); err != nil { + con.logger.Error("Error getting notary set when proposing CRS", + "round", e.Round, + "error", err) + } else { + if _, exist := curNotarySet[con.ID]; !exist { + return + } + con.event.RegisterHeight(e.NextCRSProposingHeight(), func(uint64) { + con.logger.Debug( + "Calling Governance.CRS to check if already proposed", + "round", e.Round+1) + if (con.gov.CRS(e.Round+1) != common.Hash{}) { + con.logger.Debug("CRS already proposed", "round", e.Round+1) + return + } + go con.runCRS(e.Round, e.CRS, false) + }) + } + }) + // Touch nodeSetCache for next round. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("touch-NodeSetCache", e)() + con.event.RegisterHeight(e.NextTouchNodeSetCacheHeight(), func(uint64) { + if e.Reset == 0 { + return + } + go func() { + nextRound := e.Round + 1 + if err := con.nodeSetCache.Touch(nextRound); err != nil { + con.logger.Warn("Failed to update nodeSetCache", + "round", nextRound, + "error", err) + } + }() + }) + }) + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + if e.Reset != 0 { + return + } + defer elapse("touch-DKGCache", e)() + go func() { + if _, err := + con.tsigVerifierCache.Update(e.Round); err != nil { + con.logger.Warn("Failed to update tsig cache", + "round", e.Round, + "error", err) + } + }() + go func() { + threshold := utils.GetDKGThreshold( + utils.GetConfigWithPanic(con.gov, e.Round, con.logger)) + // Restore group public key. + con.logger.Debug( + "Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", + "round", e.Round) + con.logger.Debug( + "Calling Governance.DKGComplaints for recoverDKGInfo", + "round", e.Round) + _, qualifies, err := typesDKG.CalcQualifyNodes( + con.gov.DKGMasterPublicKeys(e.Round), + con.gov.DKGComplaints(e.Round), + threshold) + if err != nil { + con.logger.Warn("Failed to calculate dkg set", + "round", e.Round, + "error", err) + return + } + if _, exist := qualifies[con.ID]; !exist { + return + } + if _, _, err := + con.cfgModule.getDKGInfo(e.Round, true); err != nil { + con.logger.Warn("Failed to recover DKG info", + "round", e.Round, + "error", err) + } + }() + }) + // checkCRS is a generator of checker to check if CRS for that round is + // ready or not. + checkCRS := func(round uint64) func() bool { + return func() bool { + nextCRS := con.gov.CRS(round) + if (nextCRS != common.Hash{}) { + return true + } + con.logger.Debug("CRS is not ready yet. Try again later...", + "nodeID", con.ID, + "round", round) + return false + } + } + // Trigger round validation method for next period. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("next-round", e)() + // Register a routine to trigger round events. + con.event.RegisterHeight(e.NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event)) + // Register a routine to register next DKG. + con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) { + nextRound := e.Round + 1 + if nextRound < DKGDelayRound { + con.logger.Info("Skip runDKG for round", + "round", nextRound, + "reset", e.Reset) + return + } + go func() { + // Normally, gov.CRS would return non-nil. Use this for in case + // of unexpected network fluctuation and ensure the robustness. + if !checkWithCancel( + con.ctx, 500*time.Millisecond, checkCRS(nextRound)) { + con.logger.Debug("unable to prepare CRS for notary set", + "round", nextRound, + "reset", e.Reset) + return + } + nextNotarySet, err := con.nodeSetCache.GetNotarySet(nextRound) + if err != nil { + con.logger.Error("Error getting notary set for next round", + "round", nextRound, + "reset", e.Reset, + "error", err) + return + } + if _, exist := nextNotarySet[con.ID]; !exist { + con.logger.Info("Not selected as notary set", + "round", nextRound, + "reset", e.Reset) + return + } + con.logger.Info("Selected as notary set", + "round", nextRound, + "reset", e.Reset) + nextConfig := utils.GetConfigWithPanic(con.gov, nextRound, + con.logger) + con.cfgModule.registerDKG(con.ctx, nextRound, e.Reset, + utils.GetDKGThreshold(nextConfig)) + con.event.RegisterHeight(e.NextDKGPreparationHeight(), + func(h uint64) { + func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgRunning = 0 + }() + // We want to skip some of the DKG phases when started. + dkgCurrentHeight := h - e.NextDKGPreparationHeight() + con.runDKG( + nextRound, e.Reset, + e.NextDKGPreparationHeight(), dkgCurrentHeight) + }) + }() + }) + }) + con.roundEvent.TriggerInitEvent() + if initBlock != nil { + con.event.NotifyHeight(initBlock.Position.Height) + } + con.baMgr.prepare() + return +} + +// Run starts running DEXON Consensus. +func (con *Consensus) Run() { + // There may have emptys block in blockchain added by force sync. + blocksWithoutRandomness := con.bcModule.pendingBlocksWithoutRandomness() + // Launch BA routines. + con.baMgr.run() + // Launch network handler. + con.logger.Debug("Calling Network.ReceiveChan") + con.waitGroup.Add(1) + go con.deliverNetworkMsg() + con.waitGroup.Add(1) + go con.processMsg() + go con.processBlockLoop() + // Stop dummy receiver if launched. + if con.dummyCancel != nil { + con.logger.Trace("Stop dummy receiver") + con.dummyCancel() + <-con.dummyFinished + // Replay those cached messages. + con.logger.Trace("Dummy receiver stoped, start dumping cached messages", + "count", len(con.dummyMsgBuffer)) + for _, msg := range con.dummyMsgBuffer { + loop: + for { + select { + case con.msgChan <- msg: + break loop + case <-time.After(50 * time.Millisecond): + con.logger.Debug( + "internal message channel is full when syncing") + } + } + } + con.logger.Trace("Finish dumping cached messages") + } + con.generateBlockRandomness(blocksWithoutRandomness) + // Sleep until dMoment come. + time.Sleep(con.dMoment.Sub(time.Now().UTC())) + // Take some time to bootstrap. + time.Sleep(3 * time.Second) + con.waitGroup.Add(1) + go con.deliveryGuard() + // Block until done. + select { + case <-con.ctx.Done(): + } +} + +func (con *Consensus) generateBlockRandomness(blocks []*types.Block) { + con.logger.Debug("Start generating block randomness", "blocks", blocks) + isNotarySet := make(map[uint64]bool) + for _, block := range blocks { + if block.Position.Round < DKGDelayRound { + continue + } + doRun, exist := isNotarySet[block.Position.Round] + if !exist { + curNotarySet, err := con.nodeSetCache.GetNotarySet(block.Position.Round) + if err != nil { + con.logger.Error("Error getting notary set when generate block tsig", + "round", block.Position.Round, + "error", err) + continue + } + _, exist := curNotarySet[con.ID] + isNotarySet[block.Position.Round] = exist + doRun = exist + } + if !doRun { + continue + } + go func(block *types.Block) { + psig, err := con.cfgModule.preparePartialSignature( + block.Position.Round, block.Hash) + if err != nil { + con.logger.Error("Failed to prepare partial signature", + "block", block, + "error", err) + } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { + con.logger.Error("Failed to sign DKG partial signature", + "block", block, + "error", err) + } else if err = con.cfgModule.processPartialSignature(psig); err != nil { + con.logger.Error("Failed to process partial signature", + "block", block, + "error", err) + } else { + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "block", block) + con.network.BroadcastDKGPartialSignature(psig) + sig, err := con.cfgModule.runTSig( + block.Position.Round, + block.Hash, + 60*time.Minute, + ) + if err != nil { + con.logger.Error("Failed to run Block Tsig", + "block", block, + "error", err) + return + } + result := &types.AgreementResult{ + BlockHash: block.Hash, + Position: block.Position, + Randomness: sig.Signature[:], + } + con.bcModule.addBlockRandomness(block.Position, sig.Signature[:]) + con.logger.Debug("Broadcast BlockRandomness", + "block", block, + "result", result) + con.network.BroadcastAgreementResult(result) + if err := con.deliverFinalizedBlocks(); err != nil { + con.logger.Error("Failed to deliver finalized block", + "error", err) + } + } + }(block) + } +} + +// runDKG starts running DKG protocol. +func (con *Consensus) runDKG( + round, reset, dkgBeginHeight, dkgHeight uint64) { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + if con.dkgRunning != 0 { + return + } + con.dkgRunning = 1 + go func() { + defer func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgReady.Broadcast() + con.dkgRunning = 2 + }() + if err := + con.cfgModule.runDKG( + round, reset, + con.event, dkgBeginHeight, dkgHeight); err != nil { + con.logger.Error("Failed to runDKG", "error", err) + } + }() +} + +func (con *Consensus) runCRS(round uint64, hash common.Hash, reset bool) { + // Start running next round CRS. + psig, err := con.cfgModule.preparePartialSignature(round, hash) + if err != nil { + con.logger.Error("Failed to prepare partial signature", "error", err) + } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { + con.logger.Error("Failed to sign DKG partial signature", "error", err) + } else if err = con.cfgModule.processPartialSignature(psig); err != nil { + con.logger.Error("Failed to process partial signature", "error", err) + } else { + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "round", psig.Round, + "hash", psig.Hash) + con.network.BroadcastDKGPartialSignature(psig) + con.logger.Debug("Calling Governance.CRS", "round", round) + crs, err := con.cfgModule.runCRSTSig(round, hash) + if err != nil { + con.logger.Error("Failed to run CRS Tsig", "error", err) + } else { + if reset { + con.logger.Debug("Calling Governance.ResetDKG", + "round", round+1, + "crs", hex.EncodeToString(crs)) + con.gov.ResetDKG(crs) + } else { + con.logger.Debug("Calling Governance.ProposeCRS", + "round", round+1, + "crs", hex.EncodeToString(crs)) + con.gov.ProposeCRS(round+1, crs) + } + } + } +} + +// Stop the Consensus core. +func (con *Consensus) Stop() { + con.ctxCancel() + con.baMgr.stop() + con.event.Reset() + con.waitGroup.Wait() + if nbApp, ok := con.app.(*nonBlocking); ok { + nbApp.wait() + } +} + +func (con *Consensus) deliverNetworkMsg() { + defer con.waitGroup.Done() + recv := con.network.ReceiveChan() + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case msg := <-recv: + innerLoop: + for { + select { + case con.msgChan <- msg: + break innerLoop + case <-time.After(500 * time.Millisecond): + con.logger.Debug("internal message channel is full", + "pending", msg) + } + } + case <-con.ctx.Done(): + return + } + } +} + +func (con *Consensus) processMsg() { + defer con.waitGroup.Done() +MessageLoop: + for { + select { + case <-con.ctx.Done(): + return + default: + } + var msg, peer interface{} + select { + case msg = <-con.priorityMsgChan: + default: + } + if msg == nil { + select { + case message := <-con.msgChan: + msg, peer = message.Payload, message.PeerID + case msg = <-con.priorityMsgChan: + case <-con.ctx.Done(): + return + } + } + switch val := msg.(type) { + case *selfAgreementResult: + con.baMgr.touchAgreementResult((*types.AgreementResult)(val)) + case *types.Block: + if ch, exist := func() (chan<- *types.Block, bool) { + con.lock.RLock() + defer con.lock.RUnlock() + ch, e := con.baConfirmedBlock[val.Hash] + return ch, e + }(); exist { + if val.IsEmpty() { + hash, err := utils.HashBlock(val) + if err != nil { + con.logger.Error("Error verifying empty block hash", + "block", val, + "error, err") + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if hash != val.Hash { + con.logger.Error("Incorrect confirmed empty block hash", + "block", val, + "hash", hash) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if _, err := con.bcModule.proposeBlock( + val.Position, time.Time{}, true); err != nil { + con.logger.Error("Error adding empty block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + } else { + if !val.IsFinalized() { + con.logger.Warn("Ignore not finalized block", + "block", val) + continue MessageLoop + } + ok, err := con.bcModule.verifyRandomness( + val.Hash, val.Position.Round, val.Randomness) + if err != nil { + con.logger.Error("Error verifying confirmed block randomness", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if !ok { + con.logger.Error("Incorrect confirmed block randomness", + "block", val) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if err := utils.VerifyBlockSignature(val); err != nil { + con.logger.Error("VerifyBlockSignature failed", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + } + func() { + con.lock.Lock() + defer con.lock.Unlock() + // In case of multiple delivered block. + if _, exist := con.baConfirmedBlock[val.Hash]; !exist { + return + } + delete(con.baConfirmedBlock, val.Hash) + ch <- val + }() + } else if val.IsFinalized() { + if err := con.processFinalizedBlock(val); err != nil { + con.logger.Error("Failed to process finalized block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } else { + if err := con.preProcessBlock(val); err != nil { + con.logger.Error("Failed to pre process block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } + case *types.Vote: + if err := con.ProcessVote(val); err != nil { + con.logger.Error("Failed to process vote", + "vote", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + case *types.AgreementResult: + if err := con.ProcessAgreementResult(val); err != nil { + con.logger.Error("Failed to process agreement result", + "result", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + case *typesDKG.PrivateShare: + if err := con.cfgModule.processPrivateShare(val); err != nil { + con.logger.Error("Failed to process private share", + "error", err) + con.network.ReportBadPeerChan() <- peer + } + + case *typesDKG.PartialSignature: + if err := con.cfgModule.processPartialSignature(val); err != nil { + con.logger.Error("Failed to process partial signature", + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } + } +} + +// ProcessVote is the entry point to submit ont vote to a Consensus instance. +func (con *Consensus) ProcessVote(vote *types.Vote) (err error) { + err = con.baMgr.processVote(vote) + return +} + +// ProcessAgreementResult processes the randomness request. +func (con *Consensus) ProcessAgreementResult( + rand *types.AgreementResult) error { + if !con.baMgr.touchAgreementResult(rand) { + return nil + } + // Sanity Check. + if err := VerifyAgreementResult(rand, con.nodeSetCache); err != nil { + con.baMgr.untouchAgreementResult(rand) + return err + } + if err := con.bcModule.processAgreementResult(rand); err != nil { + con.baMgr.untouchAgreementResult(rand) + if err == ErrSkipButNoError { + return nil + } + return err + } + // Syncing BA Module. + if err := con.baMgr.processAgreementResult(rand); err != nil { + con.baMgr.untouchAgreementResult(rand) + return err + } + + con.logger.Debug("Rebroadcast AgreementResult", + "result", rand) + con.network.BroadcastAgreementResult(rand) + + return con.deliverFinalizedBlocks() +} + +// preProcessBlock performs Byzantine Agreement on the block. +func (con *Consensus) preProcessBlock(b *types.Block) (err error) { + err = con.baMgr.processBlock(b) + if err == nil && con.debugApp != nil { + con.debugApp.BlockReceived(b.Hash) + } + return +} + +func (con *Consensus) processFinalizedBlock(b *types.Block) (err error) { + if b.Position.Round < DKGDelayRound { + return + } + if err = utils.VerifyBlockSignature(b); err != nil { + return + } + verifier, ok, err := con.tsigVerifierCache.UpdateAndGet(b.Position.Round) + if err != nil { + return + } + if !ok { + err = ErrCannotVerifyBlockRandomness + return + } + if !verifier.VerifySignature(b.Hash, crypto.Signature{ + Type: "bls", + Signature: b.Randomness, + }) { + err = ErrIncorrectBlockRandomness + return + } + err = con.baMgr.processFinalizedBlock(b) + if err == nil && con.debugApp != nil { + con.debugApp.BlockReceived(b.Hash) + } + return +} + +func (con *Consensus) deliveryGuard() { + defer con.waitGroup.Done() + select { + case <-con.ctx.Done(): + case <-time.After(con.dMoment.Sub(time.Now())): + } + // Node takes time to start. + select { + case <-con.ctx.Done(): + case <-time.After(60 * time.Second): + } + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case <-con.ctx.Done(): + return + case <-con.resetDeliveryGuardTicker: + case <-time.After(60 * time.Second): + con.logger.Error("No blocks delivered for too long", "ID", con.ID) + panic(fmt.Errorf("No blocks delivered for too long")) + } + } +} + +// deliverBlock deliver a block to application layer. +func (con *Consensus) deliverBlock(b *types.Block) { + select { + case con.resetDeliveryGuardTicker <- struct{}{}: + default: + } + if err := con.db.PutBlock(*b); err != nil { + panic(err) + } + if err := con.db.PutCompactionChainTipInfo(b.Hash, + b.Position.Height); err != nil { + panic(err) + } + con.logger.Debug("Calling Application.BlockDelivered", "block", b) + con.app.BlockDelivered(b.Hash, b.Position, common.CopyBytes(b.Randomness)) + if con.debugApp != nil { + con.debugApp.BlockReady(b.Hash) + } +} + +// deliverFinalizedBlocks extracts and delivers finalized blocks to application +// layer. +func (con *Consensus) deliverFinalizedBlocks() error { + con.lock.Lock() + defer con.lock.Unlock() + return con.deliverFinalizedBlocksWithoutLock() +} + +func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) { + deliveredBlocks := con.bcModule.extractBlocks() + con.logger.Debug("Last blocks in compaction chain", + "delivered", con.bcModule.lastDeliveredBlock(), + "pending", con.bcModule.lastPendingBlock()) + for _, b := range deliveredBlocks { + con.deliverBlock(b) + con.event.NotifyHeight(b.Position.Height) + } + return +} + +func (con *Consensus) processBlockLoop() { + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case <-con.ctx.Done(): + return + case block := <-con.processBlockChan: + if err := con.processBlock(block); err != nil { + con.logger.Error("Error processing block", + "block", block, + "error", err) + } + } + } +} + +// processBlock is the entry point to submit one block to a Consensus instance. +func (con *Consensus) processBlock(block *types.Block) (err error) { + // Block processed by blockChain can be out-of-order. But the output from + // blockChain (deliveredBlocks) cannot, thus we need to protect the part + // below with writer lock. + con.lock.Lock() + defer con.lock.Unlock() + if err = con.bcModule.addBlock(block); err != nil { + return + } + if err = con.deliverFinalizedBlocksWithoutLock(); err != nil { + return + } + return +} + +// PrepareBlock would setup header fields of block based on its ProposerID. +func (con *Consensus) proposeBlock(position types.Position) ( + *types.Block, error) { + b, err := con.bcModule.proposeBlock(position, time.Now().UTC(), false) + if err != nil { + return nil, err + } + con.logger.Debug("Calling Governance.CRS", "round", b.Position.Round) + crs := con.gov.CRS(b.Position.Round) + if crs.Equal(common.Hash{}) { + con.logger.Error("CRS for round is not ready, unable to prepare block", + "position", &b.Position) + return nil, ErrCRSNotReady + } + if err = con.signer.SignCRS(b, crs); err != nil { + return nil, err + } + return b, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go new file mode 100644 index 000000000..51b95a3c0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go @@ -0,0 +1,41 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import "github.com/byzantine-lab/dexon-consensus/core/utils" + +// ConfigRoundShift refers to the difference between block's round and config +// round derived from its state. +// +// For example, when round shift is 2, a block in round 0 should derive config +// for round 2. +const ConfigRoundShift uint64 = 2 + +// DKGDelayRound refers to the round that first DKG is run. +// +// For example, when delay round is 1, new DKG will run at round 1. Round 0 will +// have neither DKG nor CRS. +const DKGDelayRound uint64 = 1 + +// NoRand is the magic placeholder for randomness field in blocks for blocks +// proposed before DKGDelayRound. +var NoRand = []byte("norand") + +func init() { + utils.SetDKGDelayRound(DKGDelayRound) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go new file mode 100644 index 000000000..3f6627b92 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go @@ -0,0 +1,26 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package dkg + +import ( + "github.com/byzantine-lab/bls/ffi/go/bls" +) + +const ( + curve = bls.BLS12_381 +) diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go new file mode 100644 index 000000000..b9dd038ce --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go @@ -0,0 +1,637 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package dkg + +import ( + "encoding/json" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/byzantine-lab/bls/ffi/go/bls" + "github.com/byzantine-lab/go-tangerine/rlp" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +var ( + // ErrDuplicatedShare is reported when adding an private key share of same id. + ErrDuplicatedShare = fmt.Errorf("invalid share") + // ErrNoIDToRecover is reported when no id is provided for recovering private + // key. + ErrNoIDToRecover = fmt.Errorf("no id to recover private key") + // ErrShareNotFound is reported when the private key share of id is not found + // when recovering private key. + ErrShareNotFound = fmt.Errorf("share not found") +) + +const cryptoType = "bls" + +var publicKeyLength int + +func init() { + if err := bls.Init(curve); err != nil { + panic(err) + } + + pubKey := &bls.PublicKey{} + publicKeyLength = len(pubKey.Serialize()) +} + +// PrivateKey represents a private key structure implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey bls.SecretKey + publicKey PublicKey +} + +// EncodeRLP implements rlp.Encoder +func (prv *PrivateKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, prv.Bytes()) +} + +// DecodeRLP implements rlp.Decoder +func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error { + var b []byte + if err := s.Decode(&b); err != nil { + return err + } + return prv.SetBytes(b) +} + +// MarshalJSON implements json.Marshaller. +func (prv *PrivateKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&prv.privateKey) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (prv *PrivateKey) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &prv.privateKey) +} + +// ID is the id for DKG protocol. +type ID = bls.ID + +// IDs is an array of ID. +type IDs []ID + +// PublicKey represents a public key structure implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey bls.PublicKey +} + +// PrivateKeyShares represents a private key shares for DKG protocol. +type PrivateKeyShares struct { + shares []PrivateKey + shareIndex map[ID]int + masterPrivateKey []bls.SecretKey +} + +// Equal check equality between two PrivateKeyShares instances. +func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool { + // Check shares. + if len(prvs.shareIndex) != len(other.shareIndex) { + return false + } + for dID, idx := range prvs.shareIndex { + otherIdx, exists := other.shareIndex[dID] + if !exists { + return false + } + if !prvs.shares[idx].privateKey.IsEqual( + &other.shares[otherIdx].privateKey) { + return false + } + } + // Check master private keys. + if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) { + return false + } + for idx, m := range prvs.masterPrivateKey { + if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() { + return false + } + } + return true +} + +// EncodeRLP implements rlp.Encoder +func (prvs *PrivateKeyShares) EncodeRLP(w io.Writer) error { + data := make([][][]byte, 3) + shares := make([][]byte, len(prvs.shares)) + for i, s := range prvs.shares { + shares[i] = s.Bytes() + } + data[0] = shares + + shareIndex := make([][]byte, 0) + for k, v := range prvs.shareIndex { + shareIndex = append(shareIndex, k.GetLittleEndian()) + + vBytes, err := rlp.EncodeToBytes(uint64(v)) + if err != nil { + return err + } + shareIndex = append(shareIndex, vBytes) + } + data[1] = shareIndex + + mpks := make([][]byte, len(prvs.masterPrivateKey)) + for i, m := range prvs.masterPrivateKey { + mpks[i] = m.GetLittleEndian() + } + data[2] = mpks + return rlp.Encode(w, data) +} + +// DecodeRLP implements rlp.Decoder +func (prvs *PrivateKeyShares) DecodeRLP(s *rlp.Stream) error { + *prvs = PrivateKeyShares{} + var dec [][][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + var shares []PrivateKey + for _, bs := range dec[0] { + var key PrivateKey + err := key.SetBytes(bs) + if err != nil { + return err + } + shares = append(shares, key) + } + (*prvs).shares = shares + + sharesIndex := map[ID]int{} + for i := 0; i < len(dec[1]); i += 2 { + var key ID + err := key.SetLittleEndian(dec[1][i]) + if err != nil { + return err + } + + var value uint64 + err = rlp.DecodeBytes(dec[1][i+1], &value) + if err != nil { + return err + } + + sharesIndex[key] = int(value) + } + (*prvs).shareIndex = sharesIndex + + var mpks []bls.SecretKey + for _, bs := range dec[2] { + var key bls.SecretKey + if err := key.SetLittleEndian(bs); err != nil { + return err + } + mpks = append(mpks, key) + } + (*prvs).masterPrivateKey = mpks + + return nil +} + +type publicKeySharesCache struct { + share []PublicKey + index map[ID]int +} + +// PublicKeyShares represents a public key shares for DKG protocol. +type PublicKeyShares struct { + cache atomic.Value + lock sync.Mutex + masterPublicKey []bls.PublicKey +} + +// Equal checks equality of two PublicKeyShares instance. +func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool { + cache := pubs.cache.Load().(*publicKeySharesCache) + cacheOther := other.cache.Load().(*publicKeySharesCache) + // Check shares. + for dID, idx := range cache.index { + otherIdx, exists := cacheOther.index[dID] + if !exists { + continue + } + if !cache.share[idx].publicKey.IsEqual( + &cacheOther.share[otherIdx].publicKey) { + return false + } + } + // Check master public keys. + if len(pubs.masterPublicKey) != len(other.masterPublicKey) { + return false + } + for idx, m := range pubs.masterPublicKey { + if m.GetHexString() != other.masterPublicKey[idx].GetHexString() { + return false + } + } + return true +} + +// EncodeRLP implements rlp.Encoder +func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error { + mpks := make([][]byte, len(pubs.masterPublicKey)) + for i, m := range pubs.masterPublicKey { + mpks[i] = m.Serialize() + } + return rlp.Encode(w, mpks) +} + +// DecodeRLP implements rlp.Decoder +func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error { + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + ps := NewEmptyPublicKeyShares() + for _, k := range dec { + var key bls.PublicKey + if err := key.Deserialize(k); err != nil { + return err + } + ps.masterPublicKey = append(ps.masterPublicKey, key) + } + + *pubs = *ps.Move() + return nil +} + +// MarshalJSON implements json.Marshaller. +func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) { + type Alias PublicKeyShares + data := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{ + make([]*bls.PublicKey, len(pubs.masterPublicKey)), + } + for i := range pubs.masterPublicKey { + data.MasterPublicKeys[i] = &pubs.masterPublicKey[i] + } + return json.Marshal(data) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error { + type Alias PublicKeyShares + aux := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys)) + for i, pk := range aux.MasterPublicKeys { + mpk[i] = *pk + } + pubs.masterPublicKey = mpk + return nil +} + +// Clone clones every fields of PublicKeyShares. This method is mainly +// for testing purpose thus would panic when error. +func (pubs *PublicKeyShares) Clone() *PublicKeyShares { + b, err := rlp.EncodeToBytes(pubs) + if err != nil { + panic(err) + } + pubsCopy := NewEmptyPublicKeyShares() + if err := rlp.DecodeBytes(b, pubsCopy); err != nil { + panic(err) + } + return pubsCopy +} + +// NewID creates a ew ID structure. +func NewID(id []byte) ID { + var blsID bls.ID + // #nosec G104 + blsID.SetLittleEndian(id) + return blsID +} + +// BytesID creates a new ID structure, +// It returns err if the byte slice is not valid. +func BytesID(id []byte) (ID, error) { + var blsID bls.ID + // #nosec G104 + err := blsID.SetLittleEndian(id) + return blsID, err +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() *PrivateKey { + var key bls.SecretKey + key.SetByCSPRNG() + return &PrivateKey{ + privateKey: key, + publicKey: *newPublicKey(&key), + } +} + +// NewPrivateKeyShares creates a DKG private key shares of threshold t. +func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) { + var prv bls.SecretKey + prv.SetByCSPRNG() + msk := prv.GetMasterSecretKey(t) + mpk := bls.GetMasterPublicKey(msk) + pubShare := NewEmptyPublicKeyShares() + pubShare.masterPublicKey = mpk + return &PrivateKeyShares{ + masterPrivateKey: msk, + shareIndex: make(map[ID]int), + }, pubShare +} + +// NewEmptyPrivateKeyShares creates an empty private key shares. +func NewEmptyPrivateKeyShares() *PrivateKeyShares { + return &PrivateKeyShares{ + shareIndex: make(map[ID]int), + } +} + +// SetParticipants sets the DKG participants. +func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) { + prvs.shares = make([]PrivateKey, len(IDs)) + prvs.shareIndex = make(map[ID]int, len(IDs)) + for idx, ID := range IDs { + // #nosec G104 + prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID) + prvs.shareIndex[ID] = idx + } +} + +// AddShare adds a share. +func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error { + if idx, exist := prvs.shareIndex[ID]; exist { + if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) { + return ErrDuplicatedShare + } + return nil + } + prvs.shareIndex[ID] = len(prvs.shares) + prvs.shares = append(prvs.shares, *share) + return nil +} + +// RecoverPrivateKey recovers private key from the shares. +func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) ( + *PrivateKey, error) { + var prv PrivateKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + prv.privateKey = prvs.shares[idx].privateKey + continue + } + prv.privateKey.Add(&prvs.shares[idx].privateKey) + } + return &prv, nil +} + +// RecoverPublicKey recovers public key from the shares. +func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey() + continue + } + pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey()) + } + return &pub, nil +} + +// Share returns the share for the ID. +func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, false + } + return &prvs.shares[idx], true +} + +// NewEmptyPublicKeyShares creates an empty public key shares. +func NewEmptyPublicKeyShares() *PublicKeyShares { + pubShares := &PublicKeyShares{} + pubShares.cache.Store(&publicKeySharesCache{ + index: make(map[ID]int), + }) + return pubShares +} + +// Move will invalidate itself. Do not access to original reference. +func (pubs *PublicKeyShares) Move() *PublicKeyShares { + return pubs +} + +// Share returns the share for the ID. +func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) { + cache := pubs.cache.Load().(*publicKeySharesCache) + idx, exist := cache.index[ID] + if exist { + return &cache.share[idx], nil + } + var pk PublicKey + if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil { + return nil, err + } + if err := pubs.AddShare(ID, &pk); err != nil { + return nil, err + } + return &pk, nil +} + +// AddShare adds a share. +func (pubs *PublicKeyShares) AddShare(shareID ID, share *PublicKey) error { + cache := pubs.cache.Load().(*publicKeySharesCache) + if idx, exist := cache.index[shareID]; exist { + if !share.publicKey.IsEqual(&cache.share[idx].publicKey) { + return ErrDuplicatedShare + } + return nil + } + pubs.lock.Lock() + defer pubs.lock.Unlock() + cache = pubs.cache.Load().(*publicKeySharesCache) + newCache := &publicKeySharesCache{ + index: make(map[ID]int, len(cache.index)+1), + share: make([]PublicKey, len(cache.share), len(cache.share)+1), + } + for k, v := range cache.index { + newCache.index[k] = v + } + copy(newCache.share, cache.share) + newCache.index[shareID] = len(newCache.share) + newCache.share = append(newCache.share, *share) + pubs.cache.Store(newCache) + return nil +} + +// VerifyPrvShare verifies if the private key shares is valid. +func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(share.privateKey.GetPublicKey()), nil +} + +// VerifyPubShare verifies if the public key shares is valid. +func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(&share.publicKey), nil +} + +// RecoverPublicKey recovers private key from the shares. +func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + pk, err := pubs.Share(ID) + if err != nil { + return nil, err + } + if i == 0 { + pub.publicKey = pk.publicKey + continue + } + pub.publicKey.Add(&pk.publicKey) + } + return &pub, nil +} + +// MasterKeyBytes returns []byte representation of master public key. +func (pubs *PublicKeyShares) MasterKeyBytes() []byte { + bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength) + for _, pk := range pubs.masterPublicKey { + bytes = append(bytes, pk.Serialize()...) + } + return bytes +} + +// newPublicKey creates a new PublicKey structure. +func newPublicKey(prvKey *bls.SecretKey) *PublicKey { + return &PublicKey{ + publicKey: *prvKey.GetPublicKey(), + } +} + +// newPublicKeyFromBytes create a new PublicKey structure +// from bytes representation of bls.PublicKey +func newPublicKeyFromBytes(b []byte) (*PublicKey, error) { + var pub PublicKey + err := pub.publicKey.Deserialize(b) + return &pub, err +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return prv.publicKey +} + +// Sign calculates a signature. +func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) { + msg := string(hash[:]) + sign := prv.privateKey.Sign(msg) + return crypto.Signature{ + Type: cryptoType, + Signature: sign.Serialize(), + }, nil +} + +// Bytes returns []byte representation of private key. +func (prv *PrivateKey) Bytes() []byte { + return prv.privateKey.GetLittleEndian() +} + +// SetBytes sets the private key data to []byte. +func (prv *PrivateKey) SetBytes(bytes []byte) error { + var key bls.SecretKey + if err := key.SetLittleEndian(bytes); err != nil { + return err + } + prv.privateKey = key + prv.publicKey = *newPublicKey(&prv.privateKey) + return nil +} + +// String returns string representation of privat key. +func (prv *PrivateKey) String() string { + return prv.privateKey.GetHexString() +} + +// VerifySignature checks that the given public key created signature over hash. +func (pub PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + if len(signature.Signature) == 0 { + return false + } + var sig bls.Sign + if err := sig.Deserialize(signature.Signature[:]); err != nil { + fmt.Println(err) + return false + } + msg := string(hash[:]) + return sig.Verify(&pub.publicKey, msg) +} + +// Bytes returns []byte representation of public key. +func (pub PublicKey) Bytes() []byte { + return pub.publicKey.Serialize() +} + +// Serialize return bytes representation of public key. +func (pub *PublicKey) Serialize() []byte { + return pub.publicKey.Serialize() +} + +// Deserialize parses bytes representation of public key. +func (pub *PublicKey) Deserialize(b []byte) error { + return pub.publicKey.Deserialize(b) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go new file mode 100644 index 000000000..589480a3b --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go @@ -0,0 +1,92 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package dkg + +import ( + "encoding/binary" + "fmt" + "math/rand" + + "github.com/byzantine-lab/bls/ffi/go/bls" + + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +// PartialSignature is a partial signature in DKG+TSIG protocol. +type PartialSignature crypto.Signature + +var ( + // ErrEmptySignature is reported if the signature is empty. + ErrEmptySignature = fmt.Errorf("invalid empty signature") +) + +// RecoverSignature recovers TSIG signature. +func RecoverSignature(sigs []PartialSignature, signerIDs IDs) ( + crypto.Signature, error) { + blsSigs := make([]bls.Sign, len(sigs)) + for i, sig := range sigs { + if len(sig.Signature) == 0 { + return crypto.Signature{}, ErrEmptySignature + } + if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil { + return crypto.Signature{}, err + } + } + var recoverSig bls.Sign + if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil { + return crypto.Signature{}, err + } + return crypto.Signature{ + Type: cryptoType, + Signature: recoverSig.Serialize()}, nil +} + +// RecoverGroupPublicKey recovers group public key. +func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey { + var pub *PublicKey + for _, pubShare := range pubShares { + pk0 := pubShare.masterPublicKey[0] + if pub == nil { + pub = &PublicKey{ + publicKey: pk0, + } + } else { + pub.publicKey.Add(&pk0) + } + } + return pub +} + +// NewRandomPrivateKeyShares constructs a private key shares randomly. +func NewRandomPrivateKeyShares() *PrivateKeyShares { + // Generate IDs. + rndIDs := make(IDs, 0, 10) + for i := range rndIDs { + id := make([]byte, 8) + binary.LittleEndian.PutUint64(id, rand.Uint64()) + rndIDs[i] = NewID(id) + } + prvShares := NewEmptyPrivateKeyShares() + prvShares.SetParticipants(rndIDs) + for _, id := range rndIDs { + if err := prvShares.AddShare(id, NewPrivateKey()); err != nil { + panic(err) + } + } + return prvShares +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go new file mode 100644 index 000000000..5c3bf96bb --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go @@ -0,0 +1,135 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package ecdsa + +import ( + "crypto/ecdsa" + + dexCrypto "github.com/byzantine-lab/go-tangerine/crypto" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +const cryptoType = "ecdsa" + +func init() { + if err := crypto.RegisterSigToPub(cryptoType, SigToPub); err != nil { + panic(err) + } +} + +// PrivateKey represents a private key structure used in geth and implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey *ecdsa.PrivateKey +} + +// PublicKey represents a public key structure used in geth and implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey *ecdsa.PublicKey +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() (*PrivateKey, error) { + key, err := dexCrypto.GenerateKey() + if err != nil { + return nil, err + } + return &PrivateKey{privateKey: key}, nil +} + +// NewPrivateKeyFromECDSA creates a new PrivateKey structure from +// ecdsa.PrivateKey. +func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey { + return &PrivateKey{privateKey: key} +} + +// NewPublicKeyFromECDSA creates a new PublicKey structure from +// ecdsa.PublicKey. +func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey { + return &PublicKey{publicKey: key} +} + +// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from +// a byte slice. +func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) { + pub, err := dexCrypto.UnmarshalPubkey(b) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: pub}, nil +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey)) +} + +// Sign calculates an ECDSA signature. +// +// This function is susceptible to chosen plaintext attacks that can leak +// information about the private key that is used for signing. Callers must +// be aware that the given hash cannot be chosen by an adversery. Common +// solution is to hash any input before calculating the signature. +// +// The produced signature is in the [R || S || V] format where V is 0 or 1. +func (prv *PrivateKey) Sign(hash common.Hash) ( + sig crypto.Signature, err error) { + s, err := dexCrypto.Sign(hash[:], prv.privateKey) + sig = crypto.Signature{ + Type: cryptoType, + Signature: s, + } + return +} + +// VerifySignature checks that the given public key created signature over hash. +// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) +// format. +// The signature should have the 64 byte [R || S] format. +func (pub *PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + sig := signature.Signature + if len(sig) == 65 { + // The last byte is for ecrecover. + sig = sig[:64] + } + return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig) +} + +// Compress encodes a public key to the 33-byte compressed format. +func (pub *PublicKey) Compress() []byte { + return dexCrypto.CompressPubkey(pub.publicKey) +} + +// Bytes returns the []byte representation of uncompressed public key. (65 bytes) +func (pub *PublicKey) Bytes() []byte { + return dexCrypto.FromECDSAPub(pub.publicKey) +} + +// SigToPub returns the PublicKey that created the given signature. +func SigToPub( + hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) { + key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:]) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: key}, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go new file mode 100644 index 000000000..9fe47f7dc --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go @@ -0,0 +1,48 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package crypto + +import ( + "github.com/byzantine-lab/dexon-consensus/common" +) + +// Signature is the basic signature type in DEXON. +type Signature struct { + Type string + Signature []byte +} + +// PrivateKey describes the asymmetric cryptography interface that interacts +// with the private key. +type PrivateKey interface { + // PublicKey returns the public key associate this private key. + PublicKey() PublicKey + + // Sign calculates a signature. + Sign(hash common.Hash) (Signature, error) +} + +// PublicKey describes the asymmetric cryptography interface that interacts +// with the public key. +type PublicKey interface { + // VerifySignature checks that the given public key created signature over hash. + VerifySignature(hash common.Hash, signature Signature) bool + + // Bytes returns the []byte representation of public key. + Bytes() []byte +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go new file mode 100644 index 000000000..744be3e5f --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go @@ -0,0 +1,80 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package crypto + +import ( + "encoding/hex" + "fmt" + + "github.com/byzantine-lab/go-tangerine/crypto" + + "github.com/byzantine-lab/dexon-consensus/common" +) + +var ( + // ErrSigToPubTypeNotFound is reported if the type is already used. + ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found") + + // ErrSigToPubTypeAlreadyExist is reported if the type is already used. + ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist") +) + +// SigToPubFn is a function to recover public key from signature. +type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error) + +var sigToPubCB map[string]SigToPubFn + +func init() { + sigToPubCB = make(map[string]SigToPubFn) +} + +// Keccak256Hash calculates and returns the Keccak256 hash of the input data, +// converting it to an internal Hash data structure. +func Keccak256Hash(data ...[]byte) (h common.Hash) { + return common.Hash(crypto.Keccak256Hash(data...)) +} + +// Clone returns a deep copy of a signature. +func (sig Signature) Clone() Signature { + return Signature{ + Type: sig.Type, + Signature: sig.Signature[:], + } +} + +func (sig Signature) String() string { + return hex.EncodeToString([]byte(sig.Signature[:])) +} + +// RegisterSigToPub registers a sigToPub function of type. +func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error { + if _, exist := sigToPubCB[sigType]; exist { + return ErrSigToPubTypeAlreadyExist + } + sigToPubCB[sigType] = sigToPub + return nil +} + +// SigToPub recovers public key from signature. +func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) { + sigToPub, exist := sigToPubCB[signature.Type] + if !exist { + return nil, ErrSigToPubTypeNotFound + } + return sigToPub(hash, signature) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go new file mode 100644 index 000000000..1d15c68a0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go @@ -0,0 +1,100 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package db + +import ( + "errors" + "fmt" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +var ( + // ErrBlockExists is the error when block eixsts. + ErrBlockExists = errors.New("block exists") + // ErrBlockDoesNotExist is the error when block does not eixst. + ErrBlockDoesNotExist = errors.New("block does not exist") + // ErrIterationFinished is the error to check if the iteration is finished. + ErrIterationFinished = errors.New("iteration finished") + // ErrEmptyPath is the error when the required path is empty. + ErrEmptyPath = fmt.Errorf("empty path") + // ErrClosed is the error when using DB after it's closed. + ErrClosed = fmt.Errorf("db closed") + // ErrNotImplemented is the error that some interface is not implemented. + ErrNotImplemented = fmt.Errorf("not implemented") + // ErrInvalidCompactionChainTipHeight means the newly updated height of + // the tip of compaction chain is invalid, usually means it's smaller than + // current cached one. + ErrInvalidCompactionChainTipHeight = fmt.Errorf( + "invalid compaction chain tip height") + // ErrDKGPrivateKeyExists raised when attempting to save DKG private key + // that already saved. + ErrDKGPrivateKeyExists = errors.New("dkg private key exists") + // ErrDKGPrivateKeyDoesNotExist raised when the DKG private key of the + // requested round does not exists. + ErrDKGPrivateKeyDoesNotExist = errors.New("dkg private key does not exists") + // ErrDKGProtocolExists raised when attempting to save DKG protocol + // that already saved. + ErrDKGProtocolExists = errors.New("dkg protocol exists") + // ErrDKGProtocolDoesNotExist raised when the DKG protocol of the + // requested round does not exists. + ErrDKGProtocolDoesNotExist = errors.New("dkg protocol does not exists") +) + +// Database is the interface for a Database. +type Database interface { + Reader + Writer + + // Close allows database implementation able to + // release resource when finishing. + Close() error +} + +// Reader defines the interface for reading blocks into DB. +type Reader interface { + HasBlock(hash common.Hash) bool + GetBlock(hash common.Hash) (types.Block, error) + GetAllBlocks() (BlockIterator, error) + + // GetCompactionChainTipInfo returns the block hash and finalization height + // of the tip block of compaction chain. Empty hash and zero height means + // the compaction chain is empty. + GetCompactionChainTipInfo() (common.Hash, uint64) + + // DKG Private Key related methods. + GetDKGPrivateKey(round, reset uint64) (dkg.PrivateKey, error) + GetDKGProtocol() (dkgProtocol DKGProtocolInfo, err error) +} + +// Writer defines the interface for writing blocks into DB. +type Writer interface { + UpdateBlock(block types.Block) error + PutBlock(block types.Block) error + PutCompactionChainTipInfo(common.Hash, uint64) error + PutDKGPrivateKey(round, reset uint64, pk dkg.PrivateKey) error + PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error +} + +// BlockIterator defines an iterator on blocks hold +// in a DB. +type BlockIterator interface { + NextBlock() (types.Block, error) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go new file mode 100644 index 000000000..9e3564b50 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go @@ -0,0 +1,573 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package db + +import ( + "encoding/binary" + "io" + + "github.com/syndtr/goleveldb/leveldb" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/go-tangerine/rlp" +) + +var ( + blockKeyPrefix = []byte("b-") + compactionChainTipInfoKey = []byte("cc-tip") + dkgPrivateKeyKeyPrefix = []byte("dkg-prvs") + dkgProtocolInfoKeyPrefix = []byte("dkg-protocol-info") +) + +type compactionChainTipInfo struct { + Height uint64 `json:"height"` + Hash common.Hash `json:"hash"` +} + +// DKGProtocolInfo DKG protocol info. +type DKGProtocolInfo struct { + ID types.NodeID + Round uint64 + Threshold uint64 + IDMap NodeIDToDKGID + MpkMap NodeIDToPubShares + MasterPrivateShare dkg.PrivateKeyShares + IsMasterPrivateShareEmpty bool + PrvShares dkg.PrivateKeyShares + IsPrvSharesEmpty bool + PrvSharesReceived NodeID + NodeComplained NodeID + AntiComplaintReceived NodeIDToNodeIDs + Step uint64 + Reset uint64 +} + +type dkgPrivateKey struct { + PK dkg.PrivateKey + Reset uint64 +} + +// Equal compare with target DKGProtocolInfo. +func (info *DKGProtocolInfo) Equal(target *DKGProtocolInfo) bool { + if !info.ID.Equal(target.ID) || + info.Round != target.Round || + info.Threshold != target.Threshold || + info.IsMasterPrivateShareEmpty != target.IsMasterPrivateShareEmpty || + info.IsPrvSharesEmpty != target.IsPrvSharesEmpty || + info.Step != target.Step || + info.Reset != target.Reset || + !info.MasterPrivateShare.Equal(&target.MasterPrivateShare) || + !info.PrvShares.Equal(&target.PrvShares) { + return false + } + + if len(info.IDMap) != len(target.IDMap) { + return false + } + for k, v := range info.IDMap { + tV, exist := target.IDMap[k] + if !exist { + return false + } + + if !v.IsEqual(&tV) { + return false + } + } + + if len(info.MpkMap) != len(target.MpkMap) { + return false + } + for k, v := range info.MpkMap { + tV, exist := target.MpkMap[k] + if !exist { + return false + } + + if !v.Equal(tV) { + return false + } + } + + if len(info.PrvSharesReceived) != len(target.PrvSharesReceived) { + return false + } + for k := range info.PrvSharesReceived { + _, exist := target.PrvSharesReceived[k] + if !exist { + return false + } + } + + if len(info.NodeComplained) != len(target.NodeComplained) { + return false + } + for k := range info.NodeComplained { + _, exist := target.NodeComplained[k] + if !exist { + return false + } + } + + if len(info.AntiComplaintReceived) != len(target.AntiComplaintReceived) { + return false + } + for k, v := range info.AntiComplaintReceived { + tV, exist := target.AntiComplaintReceived[k] + if !exist { + return false + } + + if len(v) != len(tV) { + return false + } + for kk := range v { + _, exist := tV[kk] + if !exist { + return false + } + } + } + + return true +} + +// NodeIDToNodeIDs the map with NodeID to NodeIDs. +type NodeIDToNodeIDs map[types.NodeID]map[types.NodeID]struct{} + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToNodeIDs) EncodeRLP(w io.Writer) error { + var allBytes [][][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, [][]byte{kBytes}) + + var vBytes [][]byte + for subK := range v { + bytes, err := subK.MarshalText() + if err != nil { + return err + } + vBytes = append(vBytes, bytes) + } + allBytes = append(allBytes, vBytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToNodeIDs) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToNodeIDs) + var dec [][][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i][0]) + if err != nil { + return err + } + + valueMap := map[types.NodeID]struct{}{} + for _, v := range dec[i+1] { + value := types.NodeID{} + err := value.UnmarshalText(v) + if err != nil { + return err + } + + valueMap[value] = struct{}{} + } + + (*m)[key] = valueMap + } + + return nil +} + +// NodeID the map with NodeID. +type NodeID map[types.NodeID]struct{} + +// EncodeRLP implements rlp.Encoder +func (m NodeID) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeID) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeID) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i++ { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + (*m)[key] = struct{}{} + } + + return nil +} + +// NodeIDToPubShares the map with NodeID to PublicKeyShares. +type NodeIDToPubShares map[types.NodeID]*dkg.PublicKeyShares + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToPubShares) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + + bytes, err := rlp.EncodeToBytes(v) + if err != nil { + return err + } + allBytes = append(allBytes, bytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToPubShares) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToPubShares) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + value := dkg.PublicKeyShares{} + err = rlp.DecodeBytes(dec[i+1], &value) + if err != nil { + return err + } + + (*m)[key] = &value + } + + return nil +} + +// NodeIDToDKGID the map with NodeID to DKGID. +type NodeIDToDKGID map[types.NodeID]dkg.ID + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToDKGID) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + allBytes = append(allBytes, v.GetLittleEndian()) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToDKGID) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToDKGID) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + value := dkg.ID{} + err = value.SetLittleEndian(dec[i+1]) + if err != nil { + return err + } + + (*m)[key] = value + } + + return nil +} + +// LevelDBBackedDB is a leveldb backed DB implementation. +type LevelDBBackedDB struct { + db *leveldb.DB +} + +// NewLevelDBBackedDB initialize a leveldb-backed database. +func NewLevelDBBackedDB( + path string) (lvl *LevelDBBackedDB, err error) { + + dbInst, err := leveldb.OpenFile(path, nil) + if err != nil { + return + } + lvl = &LevelDBBackedDB{db: dbInst} + return +} + +// Close implement Closer interface, which would release allocated resource. +func (lvl *LevelDBBackedDB) Close() error { + return lvl.db.Close() +} + +// HasBlock implements the Reader.Has method. +func (lvl *LevelDBBackedDB) HasBlock(hash common.Hash) bool { + exists, err := lvl.internalHasBlock(lvl.getBlockKey(hash)) + if err != nil { + panic(err) + } + return exists +} + +func (lvl *LevelDBBackedDB) internalHasBlock(key []byte) (bool, error) { + return lvl.db.Has(key, nil) +} + +// GetBlock implements the Reader.GetBlock method. +func (lvl *LevelDBBackedDB) GetBlock( + hash common.Hash) (block types.Block, err error) { + queried, err := lvl.db.Get(lvl.getBlockKey(hash), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrBlockDoesNotExist + } + return + } + err = rlp.DecodeBytes(queried, &block) + return +} + +// UpdateBlock implements the Writer.UpdateBlock method. +func (lvl *LevelDBBackedDB) UpdateBlock(block types.Block) (err error) { + // NOTE: we didn't handle changes of block hash (and it + // should not happen). + marshaled, err := rlp.EncodeToBytes(&block) + if err != nil { + return + } + blockKey := lvl.getBlockKey(block.Hash) + exists, err := lvl.internalHasBlock(blockKey) + if err != nil { + return + } + if !exists { + err = ErrBlockDoesNotExist + return + } + err = lvl.db.Put(blockKey, marshaled, nil) + return +} + +// PutBlock implements the Writer.PutBlock method. +func (lvl *LevelDBBackedDB) PutBlock(block types.Block) (err error) { + marshaled, err := rlp.EncodeToBytes(&block) + if err != nil { + return + } + blockKey := lvl.getBlockKey(block.Hash) + exists, err := lvl.internalHasBlock(blockKey) + if err != nil { + return + } + if exists { + err = ErrBlockExists + return + } + err = lvl.db.Put(blockKey, marshaled, nil) + return +} + +// GetAllBlocks implements Reader.GetAllBlocks method, which allows callers +// to retrieve all blocks in DB. +func (lvl *LevelDBBackedDB) GetAllBlocks() (BlockIterator, error) { + return nil, ErrNotImplemented +} + +// PutCompactionChainTipInfo saves tip of compaction chain into the database. +func (lvl *LevelDBBackedDB) PutCompactionChainTipInfo( + blockHash common.Hash, height uint64) error { + marshaled, err := rlp.EncodeToBytes(&compactionChainTipInfo{ + Hash: blockHash, + Height: height, + }) + if err != nil { + return err + } + // Check current cached tip info to make sure the one to be updated is + // valid. + info, err := lvl.internalGetCompactionChainTipInfo() + if err != nil { + return err + } + if info.Height+1 != height { + return ErrInvalidCompactionChainTipHeight + } + return lvl.db.Put(compactionChainTipInfoKey, marshaled, nil) +} + +func (lvl *LevelDBBackedDB) internalGetCompactionChainTipInfo() ( + info compactionChainTipInfo, err error) { + queried, err := lvl.db.Get(compactionChainTipInfoKey, nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = nil + } + return + } + err = rlp.DecodeBytes(queried, &info) + return +} + +// GetCompactionChainTipInfo get the tip info of compaction chain into the +// database. +func (lvl *LevelDBBackedDB) GetCompactionChainTipInfo() ( + hash common.Hash, height uint64) { + info, err := lvl.internalGetCompactionChainTipInfo() + if err != nil { + panic(err) + } + hash, height = info.Hash, info.Height + return +} + +// GetDKGPrivateKey get DKG private key of one round. +func (lvl *LevelDBBackedDB) GetDKGPrivateKey(round, reset uint64) ( + prv dkg.PrivateKey, err error) { + queried, err := lvl.db.Get(lvl.getDKGPrivateKeyKey(round), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrDKGPrivateKeyDoesNotExist + } + return + } + pk := dkgPrivateKey{} + err = rlp.DecodeBytes(queried, &pk) + if pk.Reset != reset { + err = ErrDKGPrivateKeyDoesNotExist + return + } + prv = pk.PK + return +} + +// PutDKGPrivateKey save DKG private key of one round. +func (lvl *LevelDBBackedDB) PutDKGPrivateKey( + round, reset uint64, prv dkg.PrivateKey) error { + // Check existence. + _, err := lvl.GetDKGPrivateKey(round, reset) + if err == nil { + return ErrDKGPrivateKeyExists + } + if err != ErrDKGPrivateKeyDoesNotExist { + return err + } + pk := &dkgPrivateKey{ + PK: prv, + Reset: reset, + } + marshaled, err := rlp.EncodeToBytes(&pk) + if err != nil { + return err + } + return lvl.db.Put( + lvl.getDKGPrivateKeyKey(round), marshaled, nil) +} + +// GetDKGProtocol get DKG protocol. +func (lvl *LevelDBBackedDB) GetDKGProtocol() ( + info DKGProtocolInfo, err error) { + queried, err := lvl.db.Get(lvl.getDKGProtocolInfoKey(), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrDKGProtocolDoesNotExist + } + return + } + + err = rlp.DecodeBytes(queried, &info) + return +} + +// PutOrUpdateDKGProtocol save DKG protocol. +func (lvl *LevelDBBackedDB) PutOrUpdateDKGProtocol(info DKGProtocolInfo) error { + marshaled, err := rlp.EncodeToBytes(&info) + if err != nil { + return err + } + return lvl.db.Put(lvl.getDKGProtocolInfoKey(), marshaled, nil) +} + +func (lvl *LevelDBBackedDB) getBlockKey(hash common.Hash) (ret []byte) { + ret = make([]byte, len(blockKeyPrefix)+len(hash[:])) + copy(ret, blockKeyPrefix) + copy(ret[len(blockKeyPrefix):], hash[:]) + return +} + +func (lvl *LevelDBBackedDB) getDKGPrivateKeyKey( + round uint64) (ret []byte) { + ret = make([]byte, len(dkgPrivateKeyKeyPrefix)+8) + copy(ret, dkgPrivateKeyKeyPrefix) + binary.LittleEndian.PutUint64( + ret[len(dkgPrivateKeyKeyPrefix):], round) + return +} + +func (lvl *LevelDBBackedDB) getDKGProtocolInfoKey() (ret []byte) { + ret = make([]byte, len(dkgProtocolInfoKeyPrefix)+8) + copy(ret, dkgProtocolInfoKeyPrefix) + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go new file mode 100644 index 000000000..2ad5cda9e --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go @@ -0,0 +1,262 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package db + +import ( + "encoding/json" + "io/ioutil" + "os" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +type blockSeqIterator struct { + idx int + db *MemBackedDB +} + +// NextBlock implemenets BlockIterator.NextBlock method. +func (seq *blockSeqIterator) NextBlock() (types.Block, error) { + curIdx := seq.idx + seq.idx++ + return seq.db.getBlockByIndex(curIdx) +} + +// MemBackedDB is a memory backed DB implementation. +type MemBackedDB struct { + blocksLock sync.RWMutex + blockHashSequence common.Hashes + blocksByHash map[common.Hash]*types.Block + compactionChainTipLock sync.RWMutex + compactionChainTipHash common.Hash + compactionChainTipHeight uint64 + dkgPrivateKeysLock sync.RWMutex + dkgPrivateKeys map[uint64]*dkgPrivateKey + dkgProtocolLock sync.RWMutex + dkgProtocolInfo *DKGProtocolInfo + persistantFilePath string +} + +// NewMemBackedDB initialize a memory-backed database. +func NewMemBackedDB(persistantFilePath ...string) ( + dbInst *MemBackedDB, err error) { + dbInst = &MemBackedDB{ + blockHashSequence: common.Hashes{}, + blocksByHash: make(map[common.Hash]*types.Block), + dkgPrivateKeys: make(map[uint64]*dkgPrivateKey), + } + if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 { + return + } + dbInst.persistantFilePath = persistantFilePath[0] + buf, err := ioutil.ReadFile(dbInst.persistantFilePath) + if err != nil { + if !os.IsNotExist(err) { + // Something unexpected happened. + return + } + // It's expected behavior that file doesn't exists, we should not + // report error on it. + err = nil + return + } + + // Init this instance by file content, it's a temporary way + // to export those private field for JSON encoding. + toLoad := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{} + err = json.Unmarshal(buf, &toLoad) + if err != nil { + return + } + dbInst.blockHashSequence = toLoad.Sequence + dbInst.blocksByHash = toLoad.ByHash + return +} + +// HasBlock returns wheter or not the DB has a block identified with the hash. +func (m *MemBackedDB) HasBlock(hash common.Hash) bool { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + _, ok := m.blocksByHash[hash] + return ok +} + +// GetBlock returns a block given a hash. +func (m *MemBackedDB) GetBlock(hash common.Hash) (types.Block, error) { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + return m.internalGetBlock(hash) +} + +func (m *MemBackedDB) internalGetBlock(hash common.Hash) (types.Block, error) { + b, ok := m.blocksByHash[hash] + if !ok { + return types.Block{}, ErrBlockDoesNotExist + } + return *b, nil +} + +// PutBlock inserts a new block into the database. +func (m *MemBackedDB) PutBlock(block types.Block) error { + if m.HasBlock(block.Hash) { + return ErrBlockExists + } + + m.blocksLock.Lock() + defer m.blocksLock.Unlock() + + m.blockHashSequence = append(m.blockHashSequence, block.Hash) + m.blocksByHash[block.Hash] = &block + return nil +} + +// UpdateBlock updates a block in the database. +func (m *MemBackedDB) UpdateBlock(block types.Block) error { + if !m.HasBlock(block.Hash) { + return ErrBlockDoesNotExist + } + + m.blocksLock.Lock() + defer m.blocksLock.Unlock() + + m.blocksByHash[block.Hash] = &block + return nil +} + +// PutCompactionChainTipInfo saves tip of compaction chain into the database. +func (m *MemBackedDB) PutCompactionChainTipInfo( + blockHash common.Hash, height uint64) error { + m.compactionChainTipLock.Lock() + defer m.compactionChainTipLock.Unlock() + if m.compactionChainTipHeight+1 != height { + return ErrInvalidCompactionChainTipHeight + } + m.compactionChainTipHeight = height + m.compactionChainTipHash = blockHash + return nil +} + +// GetCompactionChainTipInfo get the tip info of compaction chain into the +// database. +func (m *MemBackedDB) GetCompactionChainTipInfo() ( + hash common.Hash, height uint64) { + m.compactionChainTipLock.RLock() + defer m.compactionChainTipLock.RUnlock() + return m.compactionChainTipHash, m.compactionChainTipHeight +} + +// GetDKGPrivateKey get DKG private key of one round. +func (m *MemBackedDB) GetDKGPrivateKey(round, reset uint64) ( + dkg.PrivateKey, error) { + m.dkgPrivateKeysLock.RLock() + defer m.dkgPrivateKeysLock.RUnlock() + if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { + return prv.PK, nil + } + return dkg.PrivateKey{}, ErrDKGPrivateKeyDoesNotExist +} + +// PutDKGPrivateKey save DKG private key of one round. +func (m *MemBackedDB) PutDKGPrivateKey( + round, reset uint64, prv dkg.PrivateKey) error { + m.dkgPrivateKeysLock.Lock() + defer m.dkgPrivateKeysLock.Unlock() + if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { + return ErrDKGPrivateKeyExists + } + m.dkgPrivateKeys[round] = &dkgPrivateKey{ + PK: prv, + Reset: reset, + } + return nil +} + +// GetDKGProtocol get DKG protocol. +func (m *MemBackedDB) GetDKGProtocol() ( + DKGProtocolInfo, error) { + m.dkgProtocolLock.RLock() + defer m.dkgProtocolLock.RUnlock() + if m.dkgProtocolInfo == nil { + return DKGProtocolInfo{}, ErrDKGProtocolDoesNotExist + } + + return *m.dkgProtocolInfo, nil +} + +// PutOrUpdateDKGProtocol save DKG protocol. +func (m *MemBackedDB) PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error { + m.dkgProtocolLock.Lock() + defer m.dkgProtocolLock.Unlock() + m.dkgProtocolInfo = &dkgProtocol + return nil +} + +// Close implement Closer interface, which would release allocated resource. +func (m *MemBackedDB) Close() (err error) { + // Save internal state to a pretty-print json file. It's a temporary way + // to dump private file via JSON encoding. + if len(m.persistantFilePath) == 0 { + return + } + + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + toDump := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{ + Sequence: m.blockHashSequence, + ByHash: m.blocksByHash, + } + + // Dump to JSON with 2-space indent. + buf, err := json.Marshal(&toDump) + if err != nil { + return + } + + err = ioutil.WriteFile(m.persistantFilePath, buf, 0644) + return +} + +func (m *MemBackedDB) getBlockByIndex(idx int) (types.Block, error) { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + if idx >= len(m.blockHashSequence) { + return types.Block{}, ErrIterationFinished + } + + hash := m.blockHashSequence[idx] + return m.internalGetBlock(hash) +} + +// GetAllBlocks implement Reader.GetAllBlocks method, which allows caller +// to retrieve all blocks in DB. +func (m *MemBackedDB) GetAllBlocks() (BlockIterator, error) { + return &blockSeqIterator{db: m}, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go new file mode 100644 index 000000000..38739da4e --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go @@ -0,0 +1,709 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "fmt" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/db" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors for dkg module. +var ( + ErrNotDKGParticipant = fmt.Errorf( + "not a DKG participant") + ErrNotQualifyDKGParticipant = fmt.Errorf( + "not a qualified DKG participant") + ErrIDShareNotFound = fmt.Errorf( + "private share not found for specific ID") + ErrIncorrectPrivateShareSignature = fmt.Errorf( + "incorrect private share signature") + ErrMismatchPartialSignatureHash = fmt.Errorf( + "mismatch partialSignature hash") + ErrIncorrectPartialSignatureSignature = fmt.Errorf( + "incorrect partialSignature signature") + ErrIncorrectPartialSignature = fmt.Errorf( + "incorrect partialSignature") + ErrNotEnoughtPartialSignatures = fmt.Errorf( + "not enough of partial signatures") + ErrRoundAlreadyPurged = fmt.Errorf( + "cache of round already been purged") + ErrTSigNotReady = fmt.Errorf( + "tsig not ready") + ErrSelfMPKNotRegister = fmt.Errorf( + "self mpk not registered") + ErrUnableGetSelfPrvShare = fmt.Errorf( + "unable to get self DKG PrivateShare") + ErrSelfPrvShareMismatch = fmt.Errorf( + "self privateShare does not match mpk registered") +) + +// ErrUnexpectedDKGResetCount represents receiving a DKG message with unexpected +// DKG reset count. +type ErrUnexpectedDKGResetCount struct { + expect, actual uint64 + proposerID types.NodeID +} + +func (e ErrUnexpectedDKGResetCount) Error() string { + return fmt.Sprintf( + "unexpected DKG reset count, from:%s expect:%d actual:%d", + e.proposerID.String()[:6], e.expect, e.actual) +} + +// ErrUnexpectedRound represents receiving a DKG message with unexpected round. +type ErrUnexpectedRound struct { + expect, actual uint64 + proposerID types.NodeID +} + +func (e ErrUnexpectedRound) Error() string { + return fmt.Sprintf("unexpected round, from:%s expect:%d actual:%d", + e.proposerID.String()[:6], e.expect, e.actual) +} + +type dkgReceiver interface { + // ProposeDKGComplaint proposes a DKGComplaint. + ProposeDKGComplaint(complaint *typesDKG.Complaint) + + // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. + ProposeDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) + + // ProposeDKGPrivateShare propose a DKGPrivateShare. + ProposeDKGPrivateShare(prv *typesDKG.PrivateShare) + + // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. + ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare) + + // ProposeDKGMPKReady propose a DKGMPKReady message. + ProposeDKGMPKReady(ready *typesDKG.MPKReady) + + // ProposeDKGFinalize propose a DKGFinalize message. + ProposeDKGFinalize(final *typesDKG.Finalize) + + // ProposeDKGSuccess propose a DKGSuccess message. + ProposeDKGSuccess(final *typesDKG.Success) +} + +type dkgProtocol struct { + ID types.NodeID + recv dkgReceiver + round uint64 + reset uint64 + threshold int + idMap map[types.NodeID]dkg.ID + mpkMap map[types.NodeID]*dkg.PublicKeyShares + masterPrivateShare *dkg.PrivateKeyShares + prvShares *dkg.PrivateKeyShares + prvSharesReceived map[types.NodeID]struct{} + nodeComplained map[types.NodeID]struct{} + // Complaint[from][to]'s anti is saved to antiComplaint[from][to]. + antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{} + // The completed step in `runDKG`. + step int +} + +func (d *dkgProtocol) convertFromInfo(info db.DKGProtocolInfo) { + d.ID = info.ID + d.idMap = info.IDMap + d.round = info.Round + d.threshold = int(info.Threshold) + d.idMap = info.IDMap + d.mpkMap = info.MpkMap + d.prvSharesReceived = info.PrvSharesReceived + d.nodeComplained = info.NodeComplained + d.antiComplaintReceived = info.AntiComplaintReceived + d.step = int(info.Step) + d.reset = info.Reset + if info.IsMasterPrivateShareEmpty { + d.masterPrivateShare = nil + } else { + d.masterPrivateShare = &info.MasterPrivateShare + } + + if info.IsPrvSharesEmpty { + d.prvShares = nil + } else { + d.prvShares = &info.PrvShares + } +} + +func (d *dkgProtocol) toDKGProtocolInfo() db.DKGProtocolInfo { + info := db.DKGProtocolInfo{ + ID: d.ID, + Round: d.round, + Threshold: uint64(d.threshold), + IDMap: d.idMap, + MpkMap: d.mpkMap, + PrvSharesReceived: d.prvSharesReceived, + NodeComplained: d.nodeComplained, + AntiComplaintReceived: d.antiComplaintReceived, + Step: uint64(d.step), + Reset: d.reset, + } + + if d.masterPrivateShare != nil { + info.MasterPrivateShare = *d.masterPrivateShare + } else { + info.IsMasterPrivateShareEmpty = true + } + + if d.prvShares != nil { + info.PrvShares = *d.prvShares + } else { + info.IsPrvSharesEmpty = true + } + + return info +} + +type dkgShareSecret struct { + privateKey *dkg.PrivateKey +} + +// TSigVerifier is the interface verifying threshold signature. +type TSigVerifier interface { + VerifySignature(hash common.Hash, sig crypto.Signature) bool +} + +// TSigVerifierCacheInterface specifies interface used by TSigVerifierCache. +type TSigVerifierCacheInterface interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool +} + +// TSigVerifierCache is the cache for TSigVerifier. +type TSigVerifierCache struct { + intf TSigVerifierCacheInterface + verifier map[uint64]TSigVerifier + minRound uint64 + cacheSize int + lock sync.RWMutex +} + +type tsigProtocol struct { + nodePublicKeys *typesDKG.NodePublicKeys + hash common.Hash + sigs map[dkg.ID]dkg.PartialSignature + threshold int +} + +func newDKGProtocol( + ID types.NodeID, + recv dkgReceiver, + round uint64, + reset uint64, + threshold int) *dkgProtocol { + + prvShare, pubShare := dkg.NewPrivateKeyShares(threshold) + + recv.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(ID), + PublicKeyShares: *pubShare.Move(), + }) + + return &dkgProtocol{ + ID: ID, + recv: recv, + round: round, + reset: reset, + threshold: threshold, + idMap: make(map[types.NodeID]dkg.ID), + mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares), + masterPrivateShare: prvShare, + prvShares: dkg.NewEmptyPrivateKeyShares(), + prvSharesReceived: make(map[types.NodeID]struct{}), + nodeComplained: make(map[types.NodeID]struct{}), + antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}), + } +} + +func recoverDKGProtocol( + ID types.NodeID, + recv dkgReceiver, + round uint64, + reset uint64, + coreDB db.Database) (*dkgProtocol, error) { + dkgProtocolInfo, err := coreDB.GetDKGProtocol() + if err != nil { + if err == db.ErrDKGProtocolDoesNotExist { + return nil, nil + } + return nil, err + } + + dkgProtocol := dkgProtocol{ + recv: recv, + } + dkgProtocol.convertFromInfo(dkgProtocolInfo) + + if dkgProtocol.ID != ID || dkgProtocol.round != round || dkgProtocol.reset != reset { + return nil, nil + } + + return &dkgProtocol, nil +} + +func (d *dkgProtocol) processMasterPublicKeys( + mpks []*typesDKG.MasterPublicKey) (err error) { + d.idMap = make(map[types.NodeID]dkg.ID, len(mpks)) + d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks)) + d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks)) + ids := make(dkg.IDs, len(mpks)) + for i := range mpks { + if mpks[i].Reset != d.reset { + return ErrUnexpectedDKGResetCount{ + expect: d.reset, + actual: mpks[i].Reset, + proposerID: mpks[i].ProposerID, + } + } + nID := mpks[i].ProposerID + d.idMap[nID] = mpks[i].DKGID + d.mpkMap[nID] = &mpks[i].PublicKeyShares + ids[i] = mpks[i].DKGID + } + d.masterPrivateShare.SetParticipants(ids) + if err = d.verifySelfPrvShare(); err != nil { + return + } + for _, mpk := range mpks { + share, ok := d.masterPrivateShare.Share(mpk.DKGID) + if !ok { + err = ErrIDShareNotFound + continue + } + d.recv.ProposeDKGPrivateShare(&typesDKG.PrivateShare{ + ReceiverID: mpk.ProposerID, + Round: d.round, + Reset: d.reset, + PrivateShare: *share, + }) + } + return +} + +func (d *dkgProtocol) verifySelfPrvShare() error { + selfMPK, exist := d.mpkMap[d.ID] + if !exist { + return ErrSelfMPKNotRegister + } + share, ok := d.masterPrivateShare.Share(d.idMap[d.ID]) + if !ok { + return ErrUnableGetSelfPrvShare + } + ok, err := selfMPK.VerifyPrvShare( + d.idMap[d.ID], share) + if err != nil { + return err + } + if !ok { + return ErrSelfPrvShareMismatch + } + return nil +} + +func (d *dkgProtocol) proposeNackComplaints() { + for nID := range d.mpkMap { + if _, exist := d.prvSharesReceived[nID]; exist { + continue + } + d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: nID, + Round: d.round, + Reset: d.reset, + }, + }) + } +} + +func (d *dkgProtocol) processNackComplaints(complaints []*typesDKG.Complaint) ( + err error) { + if err = d.verifySelfPrvShare(); err != nil { + return + } + for _, complaint := range complaints { + if !complaint.IsNack() { + continue + } + if complaint.Reset != d.reset { + continue + } + if complaint.PrivateShare.ProposerID != d.ID { + continue + } + id, exist := d.idMap[complaint.ProposerID] + if !exist { + err = ErrNotDKGParticipant + continue + } + share, ok := d.masterPrivateShare.Share(id) + if !ok { + err = ErrIDShareNotFound + continue + } + d.recv.ProposeDKGAntiNackComplaint(&typesDKG.PrivateShare{ + ProposerID: d.ID, + ReceiverID: complaint.ProposerID, + Round: d.round, + Reset: d.reset, + PrivateShare: *share, + }) + } + return +} + +func (d *dkgProtocol) enforceNackComplaints(complaints []*typesDKG.Complaint) { + complained := make(map[types.NodeID]struct{}) + // Do not propose nack complaint to itself. + complained[d.ID] = struct{}{} + for _, complaint := range complaints { + if d.round != complaint.Round || d.reset != complaint.Reset { + continue + } + if !complaint.IsNack() { + continue + } + if complaint.Reset != d.reset { + continue + } + to := complaint.PrivateShare.ProposerID + if _, exist := complained[to]; exist { + continue + } + from := complaint.ProposerID + // Nack complaint is already proposed. + if from == d.ID { + continue + } + if _, exist := + d.antiComplaintReceived[from][to]; !exist { + complained[to] = struct{}{} + d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: to, + Round: d.round, + Reset: d.reset, + }, + }) + } + } +} + +func (d *dkgProtocol) sanityCheck(prvShare *typesDKG.PrivateShare) error { + if d.round != prvShare.Round { + return ErrUnexpectedRound{ + expect: d.round, + actual: prvShare.Round, + proposerID: prvShare.ProposerID, + } + } + if d.reset != prvShare.Reset { + return ErrUnexpectedDKGResetCount{ + expect: d.reset, + actual: prvShare.Reset, + proposerID: prvShare.ProposerID, + } + } + if _, exist := d.idMap[prvShare.ProposerID]; !exist { + return ErrNotDKGParticipant + } + ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPrivateShareSignature + } + return nil +} + +func (d *dkgProtocol) processPrivateShare( + prvShare *typesDKG.PrivateShare) error { + receiverID, exist := d.idMap[prvShare.ReceiverID] + // This node is not a DKG participant, ignore the private share. + if !exist { + return nil + } + if prvShare.ReceiverID == d.ID { + if _, exist := d.prvSharesReceived[prvShare.ProposerID]; exist { + return nil + } + } else { + if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; exist { + if _, exist := + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; exist { + return nil + } + } + } + if err := d.sanityCheck(prvShare); err != nil { + return err + } + mpk := d.mpkMap[prvShare.ProposerID] + ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare) + if err != nil { + return err + } + if prvShare.ReceiverID == d.ID { + d.prvSharesReceived[prvShare.ProposerID] = struct{}{} + } + if !ok { + if _, exist := d.nodeComplained[prvShare.ProposerID]; exist { + return nil + } + complaint := &typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: *prvShare, + } + d.nodeComplained[prvShare.ProposerID] = struct{}{} + d.recv.ProposeDKGComplaint(complaint) + } else if prvShare.ReceiverID == d.ID { + sender := d.idMap[prvShare.ProposerID] + if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil { + return err + } + } else { + // The prvShare is an anti complaint. + if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist { + d.antiComplaintReceived[prvShare.ReceiverID] = + make(map[types.NodeID]struct{}) + } + if _, exist := + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; !exist { + d.recv.ProposeDKGAntiNackComplaint(prvShare) + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] = + struct{}{} + } + } + return nil +} + +func (d *dkgProtocol) proposeMPKReady() { + d.recv.ProposeDKGMPKReady(&typesDKG.MPKReady{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) proposeFinalize() { + d.recv.ProposeDKGFinalize(&typesDKG.Finalize{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) proposeSuccess() { + d.recv.ProposeDKGSuccess(&typesDKG.Success{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) ( + *dkgShareSecret, error) { + if len(qualifyIDs) < d.threshold { + return nil, typesDKG.ErrNotReachThreshold + } + prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs) + if err != nil { + return nil, err + } + return &dkgShareSecret{ + privateKey: prvKey, + }, nil +} + +func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature { + // DKG sign will always success. + sig, _ := ss.privateKey.Sign(hash) + return dkg.PartialSignature(sig) +} + +// NewTSigVerifierCache creats a TSigVerifierCache instance. +func NewTSigVerifierCache( + intf TSigVerifierCacheInterface, cacheSize int) *TSigVerifierCache { + return &TSigVerifierCache{ + intf: intf, + verifier: make(map[uint64]TSigVerifier), + cacheSize: cacheSize, + } +} + +// UpdateAndGet calls Update and then Get. +func (tc *TSigVerifierCache) UpdateAndGet(round uint64) ( + TSigVerifier, bool, error) { + ok, err := tc.Update(round) + if err != nil { + return nil, false, err + } + if !ok { + return nil, false, nil + } + v, ok := tc.Get(round) + return v, ok, nil +} + +// Purge the cache. +func (tc *TSigVerifierCache) Purge(round uint64) { + tc.lock.Lock() + defer tc.lock.Unlock() + delete(tc.verifier, round) +} + +// Update the cache and returns if success. +func (tc *TSigVerifierCache) Update(round uint64) (bool, error) { + tc.lock.Lock() + defer tc.lock.Unlock() + if round < tc.minRound { + return false, ErrRoundAlreadyPurged + } + if _, exist := tc.verifier[round]; exist { + return true, nil + } + if !tc.intf.IsDKGFinal(round) { + return false, nil + } + gpk, err := typesDKG.NewGroupPublicKey(round, + tc.intf.DKGMasterPublicKeys(round), + tc.intf.DKGComplaints(round), + utils.GetDKGThreshold(utils.GetConfigWithPanic(tc.intf, round, nil))) + if err != nil { + return false, err + } + if len(tc.verifier) == 0 { + tc.minRound = round + } + tc.verifier[round] = gpk + if len(tc.verifier) > tc.cacheSize { + delete(tc.verifier, tc.minRound) + } + for { + if _, exist := tc.verifier[tc.minRound]; !exist { + tc.minRound++ + } else { + break + } + } + return true, nil +} + +// Delete the cache of given round. +func (tc *TSigVerifierCache) Delete(round uint64) { + tc.lock.Lock() + defer tc.lock.Unlock() + delete(tc.verifier, round) +} + +// Get the TSigVerifier of round and returns if it exists. +func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) { + tc.lock.RLock() + defer tc.lock.RUnlock() + verifier, exist := tc.verifier[round] + return verifier, exist +} + +func newTSigProtocol( + npks *typesDKG.NodePublicKeys, + hash common.Hash) *tsigProtocol { + return &tsigProtocol{ + nodePublicKeys: npks, + hash: hash, + sigs: make(map[dkg.ID]dkg.PartialSignature, npks.Threshold+1), + } +} + +func (tsig *tsigProtocol) sanityCheck(psig *typesDKG.PartialSignature) error { + _, exist := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + ok, err := utils.VerifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + if psig.Hash != tsig.hash { + return ErrMismatchPartialSignatureHash + } + return nil +} + +func (tsig *tsigProtocol) processPartialSignature( + psig *typesDKG.PartialSignature) error { + if psig.Round != tsig.nodePublicKeys.Round { + return nil + } + id, exist := tsig.nodePublicKeys.IDMap[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + if err := tsig.sanityCheck(psig); err != nil { + return err + } + pubKey := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] + if !pubKey.VerifySignature( + tsig.hash, crypto.Signature(psig.PartialSignature)) { + return ErrIncorrectPartialSignature + } + tsig.sigs[id] = psig.PartialSignature + return nil +} + +func (tsig *tsigProtocol) signature() (crypto.Signature, error) { + if len(tsig.sigs) < tsig.nodePublicKeys.Threshold { + return crypto.Signature{}, ErrNotEnoughtPartialSignatures + } + ids := make(dkg.IDs, 0, len(tsig.sigs)) + psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs)) + for id, psig := range tsig.sigs { + ids = append(ids, id) + psigs = append(psigs, psig) + } + return dkg.RecoverSignature(psigs, ids) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go new file mode 100644 index 000000000..3adcf78c9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go @@ -0,0 +1,182 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +// Application describes the application interface that interacts with DEXON +// consensus core. +type Application interface { + // PreparePayload is called when consensus core is preparing a block. + PreparePayload(position types.Position) ([]byte, error) + + // PrepareWitness will return the witness data no lower than consensusHeight. + PrepareWitness(consensusHeight uint64) (types.Witness, error) + + // VerifyBlock verifies if the block is valid. + VerifyBlock(block *types.Block) types.BlockVerifyStatus + + // BlockConfirmed is called when a block is confirmed and added to lattice. + BlockConfirmed(block types.Block) + + // BlockDelivered is called when a block is added to the compaction chain. + BlockDelivered(hash common.Hash, position types.Position, rand []byte) +} + +// Debug describes the application interface that requires +// more detailed consensus execution. +type Debug interface { + // BlockReceived is called when the block received in agreement. + BlockReceived(common.Hash) + // BlockReady is called when the block's randomness is ready. + BlockReady(common.Hash) +} + +// Network describs the network interface that interacts with DEXON consensus +// core. +type Network interface { + // PullBlocks tries to pull blocks from the DEXON network. + PullBlocks(hashes common.Hashes) + + // PullVotes tries to pull votes from the DEXON network. + PullVotes(position types.Position) + + // BroadcastVote broadcasts vote to all nodes in DEXON network. + BroadcastVote(vote *types.Vote) + + // BroadcastBlock broadcasts block to all nodes in DEXON network. + BroadcastBlock(block *types.Block) + + // BroadcastAgreementResult broadcasts agreement result to DKG set. + BroadcastAgreementResult(randRequest *types.AgreementResult) + + // SendDKGPrivateShare sends PrivateShare to a DKG participant. + SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare) + + // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants. + BroadcastDKGPrivateShare(prvShare *typesDKG.PrivateShare) + + // BroadcastDKGPartialSignature broadcasts partialSignature to all + // DKG participants. + BroadcastDKGPartialSignature(psig *typesDKG.PartialSignature) + + // ReceiveChan returns a channel to receive messages from DEXON network. + ReceiveChan() <-chan types.Msg + + // ReportBadPeerChan returns a channel to report bad peer. + ReportBadPeerChan() chan<- interface{} +} + +// Governance interface specifies interface to control the governance contract. +// Note that there are a lot more methods in the governance contract, that this +// interface only define those that are required to run the consensus algorithm. +type Governance interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. Return the genesis CRS if + // round == 0. + // + // The CRS returned is the proposed or latest reseted one, it would be + // changed later if corresponding DKG set failed to generate group public + // key. + CRS(round uint64) common.Hash + + // Propose a CRS of round. + ProposeCRS(round uint64, signedCRS []byte) + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey + + // Get the begin height of a round. + GetRoundHeight(round uint64) uint64 + + //// DKG-related methods. + + // AddDKGComplaint adds a DKGComplaint. + AddDKGComplaint(complaint *typesDKG.Complaint) + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // AddDKGMasterPublicKey adds a DKGMasterPublicKey. + AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey) + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // AddDKGMPKReady adds a DKG ready message. + AddDKGMPKReady(ready *typesDKG.MPKReady) + + // IsDKGMPKReady checks if DKG's master public key preparation is ready. + IsDKGMPKReady(round uint64) bool + + // AddDKGFinalize adds a DKG finalize message. + AddDKGFinalize(final *typesDKG.Finalize) + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool + + // AddDKGSuccess adds a DKG success message. + AddDKGSuccess(success *typesDKG.Success) + + // IsDKGSuccess checks if DKG is success. + IsDKGSuccess(round uint64) bool + + // ReportForkVote reports a node for forking votes. + ReportForkVote(vote1, vote2 *types.Vote) + + // ReportForkBlock reports a node for forking blocks. + ReportForkBlock(block1, block2 *types.Block) + + // ResetDKG resets latest DKG data and propose new CRS. + ResetDKG(newSignedCRS []byte) + + // DKGResetCount returns the reset count for DKG of given round. + DKGResetCount(round uint64) uint64 +} + +// Ticker define the capability to tick by interval. +type Ticker interface { + // Tick would return a channel, which would be triggered until next tick. + Tick() <-chan time.Time + + // Stop the ticker. + Stop() + + // Retart the ticker and clear all internal data. + Restart() +} + +// Recovery interface for interacting with recovery information. +type Recovery interface { + // ProposeSkipBlock proposes a skip block. + ProposeSkipBlock(height uint64) error + + // Votes gets the number of votes of given height. + Votes(height uint64) (uint64, error) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go new file mode 100644 index 000000000..9e3d406a7 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go @@ -0,0 +1,149 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "math/big" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +type validLeaderFn func(block *types.Block, crs common.Hash) (bool, error) + +// Some constant value. +var ( + maxHash *big.Int + one *big.Rat +) + +func init() { + hash := make([]byte, common.HashLength) + for i := range hash { + hash[i] = 0xff + } + maxHash = big.NewInt(0).SetBytes(hash) + one = big.NewRat(1, 1) +} + +type leaderSelector struct { + hashCRS common.Hash + numCRS *big.Int + minCRSBlock *big.Int + minBlockHash common.Hash + pendingBlocks map[common.Hash]*types.Block + validLeader validLeaderFn + lock sync.Mutex + logger common.Logger +} + +func newLeaderSelector( + validLeader validLeaderFn, logger common.Logger) *leaderSelector { + return &leaderSelector{ + minCRSBlock: maxHash, + validLeader: validLeader, + logger: logger, + } +} + +func (l *leaderSelector) distance(sig crypto.Signature) *big.Int { + hash := crypto.Keccak256Hash(sig.Signature[:]) + num := big.NewInt(0) + num.SetBytes(hash[:]) + num.Abs(num.Sub(l.numCRS, num)) + return num +} + +func (l *leaderSelector) probability(sig crypto.Signature) float64 { + dis := l.distance(sig) + prob := big.NewRat(1, 1).SetFrac(dis, maxHash) + p, _ := prob.Sub(one, prob).Float64() + return p +} + +func (l *leaderSelector) restart(crs common.Hash) { + numCRS := big.NewInt(0) + numCRS.SetBytes(crs[:]) + l.lock.Lock() + defer l.lock.Unlock() + l.numCRS = numCRS + l.hashCRS = crs + l.minCRSBlock = maxHash + l.minBlockHash = types.NullBlockHash + l.pendingBlocks = make(map[common.Hash]*types.Block) +} + +func (l *leaderSelector) leaderBlockHash() common.Hash { + l.lock.Lock() + defer l.lock.Unlock() + for _, b := range l.pendingBlocks { + ok, dist := l.potentialLeader(b) + if !ok { + continue + } + ok, err := l.validLeader(b, l.hashCRS) + if err != nil { + l.logger.Error("Error checking validLeader", "error", err, "block", b) + delete(l.pendingBlocks, b.Hash) + continue + } + if ok { + l.updateLeader(b, dist) + delete(l.pendingBlocks, b.Hash) + } + } + return l.minBlockHash +} + +func (l *leaderSelector) processBlock(block *types.Block) error { + l.lock.Lock() + defer l.lock.Unlock() + ok, dist := l.potentialLeader(block) + if !ok { + return nil + } + ok, err := l.validLeader(block, l.hashCRS) + if err != nil { + return err + } + if !ok { + l.pendingBlocks[block.Hash] = block + return nil + } + l.updateLeader(block, dist) + return nil +} + +func (l *leaderSelector) potentialLeader(block *types.Block) (bool, *big.Int) { + dist := l.distance(block.CRSSignature) + cmp := l.minCRSBlock.Cmp(dist) + return (cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash))), dist +} + +func (l *leaderSelector) updateLeader(block *types.Block, dist *big.Int) { + l.minCRSBlock = dist + l.minBlockHash = block.Hash +} + +func (l *leaderSelector) findPendingBlock( + hash common.Hash) (*types.Block, bool) { + b, e := l.pendingBlocks[hash] + return b, e +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go new file mode 100644 index 000000000..516138a63 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go @@ -0,0 +1,137 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "fmt" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +type blockConfirmedEvent struct { + block *types.Block +} + +type blockDeliveredEvent struct { + blockHash common.Hash + blockPosition types.Position + rand []byte +} + +// nonBlocking implements these interfaces and is a decorator for +// them that makes the methods to be non-blocking. +// - Application +// - Debug +// - It also provides nonblockig for db update. +type nonBlocking struct { + app Application + debug Debug + eventChan chan interface{} + events []interface{} + eventsChange *sync.Cond + running sync.WaitGroup +} + +func newNonBlocking(app Application, debug Debug) *nonBlocking { + nonBlockingModule := &nonBlocking{ + app: app, + debug: debug, + eventChan: make(chan interface{}, 6), + events: make([]interface{}, 0, 100), + eventsChange: sync.NewCond(&sync.Mutex{}), + } + go nonBlockingModule.run() + return nonBlockingModule +} + +func (nb *nonBlocking) addEvent(event interface{}) { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + nb.events = append(nb.events, event) + nb.eventsChange.Broadcast() +} + +func (nb *nonBlocking) run() { + // This go routine consume the first event from events and call the + // corresponding methods of Application/Debug/db. + for { + var event interface{} + func() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) == 0 { + nb.eventsChange.Wait() + } + event = nb.events[0] + nb.events = nb.events[1:] + nb.running.Add(1) + }() + switch e := event.(type) { + case blockConfirmedEvent: + nb.app.BlockConfirmed(*e.block) + case blockDeliveredEvent: + nb.app.BlockDelivered(e.blockHash, e.blockPosition, e.rand) + default: + fmt.Printf("Unknown event %v.", e) + } + nb.running.Done() + nb.eventsChange.Broadcast() + } +} + +// wait will wait for all event in events finishes. +func (nb *nonBlocking) wait() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) > 0 { + nb.eventsChange.Wait() + } + nb.running.Wait() +} + +// PreparePayload cannot be non-blocking. +func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) { + return nb.app.PreparePayload(position) +} + +// PrepareWitness cannot be non-blocking. +func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) { + return nb.app.PrepareWitness(height) +} + +// VerifyBlock cannot be non-blocking. +func (nb *nonBlocking) VerifyBlock(block *types.Block) types.BlockVerifyStatus { + return nb.app.VerifyBlock(block) +} + +// BlockConfirmed is called when a block is confirmed and added to lattice. +func (nb *nonBlocking) BlockConfirmed(block types.Block) { + nb.addEvent(blockConfirmedEvent{&block}) +} + +// BlockDelivered is called when a block is add to the compaction chain. +func (nb *nonBlocking) BlockDelivered(blockHash common.Hash, + blockPosition types.Position, rand []byte) { + nb.addEvent(blockDeliveredEvent{ + blockHash: blockHash, + blockPosition: blockPosition, + rand: rand, + }) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go new file mode 100644 index 000000000..274cbfc79 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go @@ -0,0 +1,301 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package syncer + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Struct agreement implements struct of BA (Byzantine Agreement) protocol +// needed in syncer, which only receives agreement results. +type agreement struct { + chainTip uint64 + cache *utils.NodeSetCache + tsigVerifierCache *core.TSigVerifierCache + inputChan chan interface{} + outputChan chan<- *types.Block + pullChan chan<- common.Hash + blocks map[types.Position]map[common.Hash]*types.Block + agreementResults map[common.Hash][]byte + latestCRSRound uint64 + pendingAgrs map[uint64]map[common.Hash]*types.AgreementResult + pendingBlocks map[uint64]map[common.Hash]*types.Block + logger common.Logger + confirmedBlocks map[common.Hash]struct{} + ctx context.Context + ctxCancel context.CancelFunc +} + +// newAgreement creates a new agreement instance. +func newAgreement(chainTip uint64, + ch chan<- *types.Block, pullChan chan<- common.Hash, + cache *utils.NodeSetCache, verifier *core.TSigVerifierCache, + logger common.Logger) *agreement { + a := &agreement{ + chainTip: chainTip, + cache: cache, + tsigVerifierCache: verifier, + inputChan: make(chan interface{}, 1000), + outputChan: ch, + pullChan: pullChan, + blocks: make(map[types.Position]map[common.Hash]*types.Block), + agreementResults: make(map[common.Hash][]byte), + logger: logger, + pendingAgrs: make( + map[uint64]map[common.Hash]*types.AgreementResult), + pendingBlocks: make( + map[uint64]map[common.Hash]*types.Block), + confirmedBlocks: make(map[common.Hash]struct{}), + } + a.ctx, a.ctxCancel = context.WithCancel(context.Background()) + return a +} + +// run starts the agreement, this does not start a new routine, go a new +// routine explicitly in the caller. +func (a *agreement) run() { + defer a.ctxCancel() + for { + select { + case val, ok := <-a.inputChan: + if !ok { + // InputChan is closed by network when network ends. + return + } + switch v := val.(type) { + case *types.Block: + if v.Position.Round >= core.DKGDelayRound && v.IsFinalized() { + a.processFinalizedBlock(v) + } else { + a.processBlock(v) + } + case *types.AgreementResult: + a.processAgreementResult(v) + case uint64: + a.processNewCRS(v) + } + } + } +} + +func (a *agreement) processBlock(b *types.Block) { + if _, exist := a.confirmedBlocks[b.Hash]; exist { + return + } + if rand, exist := a.agreementResults[b.Hash]; exist { + if len(b.Randomness) == 0 { + b.Randomness = rand + } + a.confirm(b) + } else { + if _, exist := a.blocks[b.Position]; !exist { + a.blocks[b.Position] = make(map[common.Hash]*types.Block) + } + a.blocks[b.Position][b.Hash] = b + } +} + +func (a *agreement) processFinalizedBlock(block *types.Block) { + // Cache those results that CRS is not ready yet. + if _, exists := a.confirmedBlocks[block.Hash]; exists { + a.logger.Trace("finalized block already confirmed", "block", block) + return + } + if block.Position.Round > a.latestCRSRound { + pendingsForRound, exists := a.pendingBlocks[block.Position.Round] + if !exists { + pendingsForRound = make(map[common.Hash]*types.Block) + a.pendingBlocks[block.Position.Round] = pendingsForRound + } + pendingsForRound[block.Hash] = block + a.logger.Trace("finalized block cached", "block", block) + return + } + if err := utils.VerifyBlockSignature(block); err != nil { + return + } + verifier, ok, err := a.tsigVerifierCache.UpdateAndGet( + block.Position.Round) + if err != nil { + a.logger.Error("error verifying block randomness", + "block", block, + "error", err) + return + } + if !ok { + a.logger.Error("cannot verify block randomness", "block", block) + return + } + if !verifier.VerifySignature(block.Hash, crypto.Signature{ + Type: "bls", + Signature: block.Randomness, + }) { + a.logger.Error("incorrect block randomness", "block", block) + return + } + a.confirm(block) +} + +func (a *agreement) processAgreementResult(r *types.AgreementResult) { + // Cache those results that CRS is not ready yet. + if _, exists := a.confirmedBlocks[r.BlockHash]; exists { + a.logger.Trace("Agreement result already confirmed", "result", r) + return + } + if r.Position.Round > a.latestCRSRound { + pendingsForRound, exists := a.pendingAgrs[r.Position.Round] + if !exists { + pendingsForRound = make(map[common.Hash]*types.AgreementResult) + a.pendingAgrs[r.Position.Round] = pendingsForRound + } + pendingsForRound[r.BlockHash] = r + a.logger.Trace("Agreement result cached", "result", r) + return + } + if err := core.VerifyAgreementResult(r, a.cache); err != nil { + a.logger.Error("Agreement result verification failed", + "result", r, + "error", err) + return + } + if r.Position.Round >= core.DKGDelayRound { + verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(r.Position.Round) + if err != nil { + a.logger.Error("error verifying agreement result randomness", + "result", r, + "error", err) + return + } + if !ok { + a.logger.Error("cannot verify agreement result randomness", "result", r) + return + } + if !verifier.VerifySignature(r.BlockHash, crypto.Signature{ + Type: "bls", + Signature: r.Randomness, + }) { + a.logger.Error("incorrect agreement result randomness", "result", r) + return + } + } else { + // Special case for rounds before DKGDelayRound. + if bytes.Compare(r.Randomness, core.NoRand) != 0 { + a.logger.Error("incorrect agreement result randomness", "result", r) + return + } + } + if r.IsEmptyBlock { + b := &types.Block{ + Position: r.Position, + Randomness: r.Randomness, + } + // Empty blocks should be confirmed directly, they won't be sent over + // the wire. + a.confirm(b) + return + } + if bs, exist := a.blocks[r.Position]; exist { + if b, exist := bs[r.BlockHash]; exist { + b.Randomness = r.Randomness + a.confirm(b) + return + } + } + a.agreementResults[r.BlockHash] = r.Randomness +loop: + for { + select { + case a.pullChan <- r.BlockHash: + break loop + case <-a.ctx.Done(): + a.logger.Error("Pull request is not sent", + "position", &r.Position, + "hash", r.BlockHash.String()[:6]) + return + case <-time.After(500 * time.Millisecond): + a.logger.Debug("Pull request is unable to send", + "position", &r.Position, + "hash", r.BlockHash.String()[:6]) + } + } +} + +func (a *agreement) processNewCRS(round uint64) { + if round <= a.latestCRSRound { + return + } + prevRound := a.latestCRSRound + 1 + a.latestCRSRound = round + // Verify all pending results. + for r := prevRound; r <= a.latestCRSRound; r++ { + pendingsForRound := a.pendingAgrs[r] + if pendingsForRound == nil { + continue + } + delete(a.pendingAgrs, r) + for _, res := range pendingsForRound { + if err := core.VerifyAgreementResult(res, a.cache); err != nil { + a.logger.Error("Invalid agreement result", + "result", res, + "error", err) + continue + } + a.logger.Error("Flush agreement result", "result", res) + a.processAgreementResult(res) + break + } + } +} + +// confirm notifies consensus the confirmation of a block in BA. +func (a *agreement) confirm(b *types.Block) { + if !b.IsFinalized() { + panic(fmt.Errorf("confirm a block %s without randomness", b)) + } + if _, exist := a.confirmedBlocks[b.Hash]; !exist { + delete(a.blocks, b.Position) + delete(a.agreementResults, b.Hash) + loop: + for { + select { + case a.outputChan <- b: + break loop + case <-a.ctx.Done(): + a.logger.Error("Confirmed block is not sent", "block", b) + return + case <-time.After(500 * time.Millisecond): + a.logger.Debug("Agreement output channel is full", "block", b) + } + } + a.confirmedBlocks[b.Hash] = struct{}{} + } + if b.Position.Height > a.chainTip+1 { + if _, exist := a.confirmedBlocks[b.ParentHash]; !exist { + a.pullChan <- b.ParentHash + } + } +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go new file mode 100644 index 000000000..d12dc4863 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go @@ -0,0 +1,543 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package syncer + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/db" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +var ( + // ErrAlreadySynced is reported when syncer is synced. + ErrAlreadySynced = fmt.Errorf("already synced") + // ErrNotSynced is reported when syncer is not synced yet. + ErrNotSynced = fmt.Errorf("not synced yet") + // ErrGenesisBlockReached is reported when genesis block reached. + ErrGenesisBlockReached = fmt.Errorf("genesis block reached") + // ErrInvalidBlockOrder is reported when SyncBlocks receives unordered + // blocks. + ErrInvalidBlockOrder = fmt.Errorf("invalid block order") + // ErrInvalidSyncingHeight raised when the blocks to sync is not following + // the compaction chain tip in database. + ErrInvalidSyncingHeight = fmt.Errorf("invalid syncing height") +) + +// Consensus is for syncing consensus module. +type Consensus struct { + db db.Database + gov core.Governance + dMoment time.Time + logger common.Logger + app core.Application + prv crypto.PrivateKey + network core.Network + nodeSetCache *utils.NodeSetCache + tsigVerifier *core.TSigVerifierCache + + blocks types.BlocksByPosition + agreementModule *agreement + agreementRoundCut uint64 + heightEvt *common.Event + roundEvt *utils.RoundEvent + + // lock for accessing all fields. + lock sync.RWMutex + duringBuffering bool + latestCRSRound uint64 + waitGroup sync.WaitGroup + agreementWaitGroup sync.WaitGroup + pullChan chan common.Hash + receiveChan chan *types.Block + ctx context.Context + ctxCancel context.CancelFunc + syncedLastBlock *types.Block + syncedConsensus *core.Consensus + syncedSkipNext bool + dummyCancel context.CancelFunc + dummyFinished <-chan struct{} + dummyMsgBuffer []types.Msg + initChainTipHeight uint64 +} + +// NewConsensus creates an instance for Consensus (syncer consensus). +func NewConsensus( + initHeight uint64, + dMoment time.Time, + app core.Application, + gov core.Governance, + db db.Database, + network core.Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + + con := &Consensus{ + dMoment: dMoment, + app: app, + gov: gov, + db: db, + network: network, + nodeSetCache: utils.NewNodeSetCache(gov), + tsigVerifier: core.NewTSigVerifierCache(gov, 7), + prv: prv, + logger: logger, + receiveChan: make(chan *types.Block, 1000), + pullChan: make(chan common.Hash, 1000), + heightEvt: common.NewEvent(), + } + con.ctx, con.ctxCancel = context.WithCancel(context.Background()) + _, con.initChainTipHeight = db.GetCompactionChainTipInfo() + con.agreementModule = newAgreement( + con.initChainTipHeight, + con.receiveChan, + con.pullChan, + con.nodeSetCache, + con.tsigVerifier, + con.logger) + con.agreementWaitGroup.Add(1) + go func() { + defer con.agreementWaitGroup.Done() + con.agreementModule.run() + }() + if err := con.deliverPendingBlocks(initHeight); err != nil { + panic(err) + } + return con +} + +func (con *Consensus) deliverPendingBlocks(height uint64) error { + if height >= con.initChainTipHeight { + return nil + } + blocks := make([]*types.Block, 0, con.initChainTipHeight-height) + hash, _ := con.db.GetCompactionChainTipInfo() + for { + block, err := con.db.GetBlock(hash) + if err != nil { + return err + } + if block.Position.Height == height { + break + } + blocks = append(blocks, &block) + hash = block.ParentHash + } + sort.Sort(types.BlocksByPosition(blocks)) + for _, b := range blocks { + con.logger.Debug("Syncer BlockConfirmed", "block", b) + con.app.BlockConfirmed(*b) + con.logger.Debug("Syncer BlockDelivered", "block", b) + con.app.BlockDelivered(b.Hash, b.Position, b.Randomness) + } + return nil +} + +func (con *Consensus) assureBuffering() { + if func() bool { + con.lock.RLock() + defer con.lock.RUnlock() + return con.duringBuffering + }() { + return + } + con.lock.Lock() + defer con.lock.Unlock() + if con.duringBuffering { + return + } + con.duringBuffering = true + // Get latest block to prepare utils.RoundEvent. + var ( + err error + blockHash, height = con.db.GetCompactionChainTipInfo() + ) + if height == 0 { + con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, con.logger, + types.Position{}, core.ConfigRoundShift) + } else { + var b types.Block + if b, err = con.db.GetBlock(blockHash); err == nil { + con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, + con.logger, b.Position, core.ConfigRoundShift) + } + } + if err != nil { + panic(err) + } + // Make sure con.roundEvt stopped before stopping con.agreementModule. + con.waitGroup.Add(1) + // Register a round event handler to reset node set cache, this handler + // should be the highest priority. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + for _, e := range evts { + if e.Reset == 0 { + continue + } + con.nodeSetCache.Purge(e.Round + 1) + con.tsigVerifier.Purge(e.Round + 1) + } + }) + // Register a round event handler to notify CRS to agreementModule. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + con.waitGroup.Add(1) + go func() { + defer con.waitGroup.Done() + for _, e := range evts { + select { + case <-con.ctx.Done(): + return + default: + } + for func() bool { + select { + case <-con.ctx.Done(): + return false + case con.agreementModule.inputChan <- e.Round: + return false + case <-time.After(500 * time.Millisecond): + con.logger.Warn( + "Agreement input channel is full when notifying new round", + "round", e.Round, + ) + return true + } + }() { + } + } + }() + }) + // Register a round event handler to validate next round. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + con.heightEvt.RegisterHeight( + evts[len(evts)-1].NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvt, con.heightEvt), + ) + }) + con.roundEvt.TriggerInitEvent() + con.startAgreement() + con.startNetwork() +} + +func (con *Consensus) checkIfSynced(blocks []*types.Block) (synced bool) { + con.lock.RLock() + defer con.lock.RUnlock() + defer func() { + con.logger.Debug("Syncer synced status", + "last-block", blocks[len(blocks)-1], + "synced", synced, + ) + }() + if len(con.blocks) == 0 || len(blocks) == 0 { + return + } + synced = !blocks[len(blocks)-1].Position.Older(con.blocks[0].Position) + return +} + +func (con *Consensus) buildAllEmptyBlocks() { + con.lock.Lock() + defer con.lock.Unlock() + // Clean empty blocks on tips of chains. + for len(con.blocks) > 0 && con.isEmptyBlock(con.blocks[0]) { + con.blocks = con.blocks[1:] + } + // Build empty blocks. + for i, b := range con.blocks { + if con.isEmptyBlock(b) { + if con.blocks[i-1].Position.Height+1 == b.Position.Height { + con.buildEmptyBlock(b, con.blocks[i-1]) + } + } + } +} + +// ForceSync forces syncer to become synced. +func (con *Consensus) ForceSync(lastPos types.Position, skip bool) { + if con.syncedLastBlock != nil { + return + } + hash, height := con.db.GetCompactionChainTipInfo() + if height < lastPos.Height { + panic(fmt.Errorf("compaction chain not synced height %d, tip %d", + lastPos.Height, height)) + } else if height > lastPos.Height { + skip = false + } + block, err := con.db.GetBlock(hash) + if err != nil { + panic(err) + } + con.syncedLastBlock = &block + con.stopBuffering() + // We might call stopBuffering without calling assureBuffering. + if con.dummyCancel == nil { + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + context.Background(), con.network.ReceiveChan(), + func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + } + con.syncedSkipNext = skip + con.logger.Info("Force Sync", "block", &block, "skip", skip) +} + +// SyncBlocks syncs blocks from compaction chain, latest is true if the caller +// regards the blocks are the latest ones. Notice that latest can be true for +// many times. +// NOTICE: parameter "blocks" should be consecutive in compaction height. +// NOTICE: this method is not expected to be called concurrently. +func (con *Consensus) SyncBlocks( + blocks []*types.Block, latest bool) (synced bool, err error) { + defer func() { + con.logger.Debug("SyncBlocks returned", + "synced", synced, + "error", err, + "last-block", con.syncedLastBlock, + ) + }() + if con.syncedLastBlock != nil { + synced, err = true, ErrAlreadySynced + return + } + if len(blocks) == 0 { + return + } + // Check if blocks are consecutive. + for i := 1; i < len(blocks); i++ { + if blocks[i].Position.Height != blocks[i-1].Position.Height+1 { + err = ErrInvalidBlockOrder + return + } + } + // Make sure the first block is the next block of current compaction chain + // tip in DB. + _, tipHeight := con.db.GetCompactionChainTipInfo() + if blocks[0].Position.Height != tipHeight+1 { + con.logger.Error("Mismatched block height", + "now", blocks[0].Position.Height, + "expected", tipHeight+1, + ) + err = ErrInvalidSyncingHeight + return + } + con.logger.Trace("SyncBlocks", + "position", &blocks[0].Position, + "len", len(blocks), + "latest", latest, + ) + for _, b := range blocks { + if err = con.db.PutBlock(*b); err != nil { + // A block might be put into db when confirmed by BA, but not + // finalized yet. + if err == db.ErrBlockExists { + err = con.db.UpdateBlock(*b) + } + if err != nil { + return + } + } + if err = con.db.PutCompactionChainTipInfo( + b.Hash, b.Position.Height); err != nil { + return + } + con.heightEvt.NotifyHeight(b.Position.Height) + } + if latest { + con.assureBuffering() + con.buildAllEmptyBlocks() + // Check if compaction and agreements' blocks are overlapped. The + // overlapping of compaction chain and BA's oldest blocks means the + // syncing is done. + if con.checkIfSynced(blocks) { + con.stopBuffering() + con.syncedLastBlock = blocks[len(blocks)-1] + synced = true + } + } + return +} + +// GetSyncedConsensus returns the core.Consensus instance after synced. +func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) { + con.lock.Lock() + defer con.lock.Unlock() + if con.syncedConsensus != nil { + return con.syncedConsensus, nil + } + if con.syncedLastBlock == nil { + return nil, ErrNotSynced + } + // flush all blocks in con.blocks into core.Consensus, and build + // core.Consensus from syncer. + con.dummyCancel() + <-con.dummyFinished + var err error + con.syncedConsensus, err = core.NewConsensusFromSyncer( + con.syncedLastBlock, + con.syncedSkipNext, + con.dMoment, + con.app, + con.gov, + con.db, + con.network, + con.prv, + con.blocks, + con.dummyMsgBuffer, + con.logger) + return con.syncedConsensus, err +} + +// stopBuffering stops the syncer buffering routines. +// +// This method is mainly for caller to stop the syncer before synced, the syncer +// would call this method automatically after being synced. +func (con *Consensus) stopBuffering() { + if func() (notBuffering bool) { + con.lock.RLock() + defer con.lock.RUnlock() + notBuffering = !con.duringBuffering + return + }() { + return + } + if func() (alreadyCanceled bool) { + con.lock.Lock() + defer con.lock.Unlock() + if !con.duringBuffering { + alreadyCanceled = true + return + } + con.duringBuffering = false + con.logger.Trace("Syncer is about to stop") + // Stop network and CRS routines, wait until they are all stoped. + con.ctxCancel() + return + }() { + return + } + con.logger.Trace("Stop syncer modules") + con.roundEvt.Stop() + con.waitGroup.Done() + // Wait for all routines depends on con.agreementModule stopped. + con.waitGroup.Wait() + // Since there is no one waiting for the receive channel of fullnode, we + // need to launch a dummy receiver right away. + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + context.Background(), con.network.ReceiveChan(), + func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + // Stop agreements. + con.logger.Trace("Stop syncer agreement modules") + con.stopAgreement() + con.logger.Trace("Syncer stopped") + return +} + +// isEmptyBlock checks if a block is an empty block by both its hash and parent +// hash are empty. +func (con *Consensus) isEmptyBlock(b *types.Block) bool { + return b.Hash == common.Hash{} && b.ParentHash == common.Hash{} +} + +// buildEmptyBlock builds an empty block in agreement. +func (con *Consensus) buildEmptyBlock(b *types.Block, parent *types.Block) { + cfg := utils.GetConfigWithPanic(con.gov, b.Position.Round, con.logger) + b.Timestamp = parent.Timestamp.Add(cfg.MinBlockInterval) + b.Witness.Height = parent.Witness.Height + b.Witness.Data = make([]byte, len(parent.Witness.Data)) + copy(b.Witness.Data, parent.Witness.Data) +} + +// startAgreement starts agreements for receiving votes and agreements. +func (con *Consensus) startAgreement() { + // Start a routine for listening receive channel and pull block channel. + go func() { + for { + select { + case b, ok := <-con.receiveChan: + if !ok { + return + } + func() { + con.lock.Lock() + defer con.lock.Unlock() + if len(con.blocks) > 0 && + !b.Position.Newer(con.blocks[0].Position) { + return + } + con.blocks = append(con.blocks, b) + sort.Sort(con.blocks) + }() + case h, ok := <-con.pullChan: + if !ok { + return + } + con.network.PullBlocks(common.Hashes{h}) + } + } + }() +} + +// startNetwork starts network for receiving blocks and agreement results. +func (con *Consensus) startNetwork() { + con.waitGroup.Add(1) + go func() { + defer con.waitGroup.Done() + loop: + for { + select { + case val := <-con.network.ReceiveChan(): + switch v := val.Payload.(type) { + case *types.Block: + case *types.AgreementResult: + // Avoid byzantine nodes attack by broadcasting older + // agreement results. Normal nodes might report 'synced' + // while still fall behind other nodes. + if v.Position.Height <= con.initChainTipHeight { + continue loop + } + default: + continue loop + } + con.agreementModule.inputChan <- val.Payload + case <-con.ctx.Done(): + break loop + } + } + }() +} + +func (con *Consensus) stopAgreement() { + if con.agreementModule.inputChan != nil { + close(con.agreementModule.inputChan) + } + con.agreementWaitGroup.Wait() + con.agreementModule.inputChan = nil + close(con.receiveChan) + close(con.pullChan) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go new file mode 100644 index 000000000..e5ba911a7 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go @@ -0,0 +1,156 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package syncer + +import ( + "context" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +type configReader interface { + Configuration(round uint64) *types.Config +} + +// WatchCat is reponsible for signaling if syncer object should be terminated. +type WatchCat struct { + recovery core.Recovery + timeout time.Duration + configReader configReader + feed chan types.Position + lastPosition types.Position + polling time.Duration + ctx context.Context + cancel context.CancelFunc + logger common.Logger +} + +// NewWatchCat creats a new WatchCat 🱠object. +func NewWatchCat( + recovery core.Recovery, + configReader configReader, + polling time.Duration, + timeout time.Duration, + logger common.Logger) *WatchCat { + wc := &WatchCat{ + recovery: recovery, + timeout: timeout, + configReader: configReader, + feed: make(chan types.Position), + polling: polling, + logger: logger, + } + return wc +} + +// Feed the WatchCat so it won't produce the termination signal. +func (wc *WatchCat) Feed(position types.Position) { + wc.feed <- position +} + +// Start the WatchCat. +func (wc *WatchCat) Start() { + wc.Stop() + wc.lastPosition = types.Position{} + wc.ctx, wc.cancel = context.WithCancel(context.Background()) + go func() { + var lastPos types.Position + MonitorLoop: + for { + select { + case <-wc.ctx.Done(): + return + default: + } + select { + case <-wc.ctx.Done(): + return + case pos := <-wc.feed: + if !pos.Newer(lastPos) { + wc.logger.Warn("Feed with older height", + "pos", pos, "lastPos", lastPos) + continue + } + lastPos = pos + case <-time.After(wc.timeout): + break MonitorLoop + } + } + go func() { + for { + select { + case <-wc.ctx.Done(): + return + case <-wc.feed: + } + } + }() + defer wc.cancel() + proposed := false + threshold := uint64( + utils.GetConfigWithPanic(wc.configReader, lastPos.Round, wc.logger). + NotarySetSize / 2) + wc.logger.Info("Threshold for recovery", "votes", threshold) + ResetLoop: + for { + if !proposed { + wc.logger.Info("Calling Recovery.ProposeSkipBlock", + "height", lastPos.Height) + if err := wc.recovery.ProposeSkipBlock(lastPos.Height); err != nil { + wc.logger.Warn("Failed to proposeSkipBlock", "height", lastPos.Height, "error", err) + } else { + proposed = true + } + } + votes, err := wc.recovery.Votes(lastPos.Height) + if err != nil { + wc.logger.Error("Failed to get recovery votes", "height", lastPos.Height, "error", err) + } else if votes > threshold { + wc.logger.Info("Threshold for recovery reached!") + wc.lastPosition = lastPos + break ResetLoop + } + select { + case <-wc.ctx.Done(): + return + case <-time.After(wc.polling): + } + } + }() +} + +// Stop the WatchCat. +func (wc *WatchCat) Stop() { + if wc.cancel != nil { + wc.cancel() + } +} + +// Meow return a closed channel if syncer should be terminated. +func (wc *WatchCat) Meow() <-chan struct{} { + return wc.ctx.Done() +} + +// LastPosition returns the last position for recovery. +func (wc *WatchCat) LastPosition() types.Position { + return wc.lastPosition +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go new file mode 100644 index 000000000..aba56ef9f --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go @@ -0,0 +1,127 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// TickerType is the type of ticker. +type TickerType int + +// TickerType enum. +const ( + TickerBA TickerType = iota + TickerDKG + TickerCRS +) + +// defaultTicker is a wrapper to implement ticker interface based on +// time.Ticker. +type defaultTicker struct { + ticker *time.Ticker + tickerChan chan time.Time + duration time.Duration + ctx context.Context + ctxCancel context.CancelFunc + waitGroup sync.WaitGroup +} + +// newDefaultTicker constructs an defaultTicker instance by giving an interval. +func newDefaultTicker(lambda time.Duration) *defaultTicker { + ticker := &defaultTicker{duration: lambda} + ticker.init() + return ticker +} + +// Tick implements Tick method of ticker interface. +func (t *defaultTicker) Tick() <-chan time.Time { + return t.tickerChan +} + +// Stop implements Stop method of ticker interface. +func (t *defaultTicker) Stop() { + t.ticker.Stop() + t.ctxCancel() + t.waitGroup.Wait() + t.ctx = nil + t.ctxCancel = nil + close(t.tickerChan) + t.tickerChan = nil +} + +// Restart implements Stop method of ticker interface. +func (t *defaultTicker) Restart() { + t.Stop() + t.init() +} + +func (t *defaultTicker) init() { + t.ticker = time.NewTicker(t.duration) + t.tickerChan = make(chan time.Time) + t.ctx, t.ctxCancel = context.WithCancel(context.Background()) + t.waitGroup.Add(1) + go t.monitor() +} + +func (t *defaultTicker) monitor() { + defer t.waitGroup.Done() +loop: + for { + select { + case <-t.ctx.Done(): + break loop + case v := <-t.ticker.C: + select { + case t.tickerChan <- v: + default: + } + } + } +} + +// newTicker is a helper to setup a ticker by giving an Governance. If +// the governace object implements a ticker generator, a ticker from that +// generator would be returned, else constructs a default one. +func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) { + type tickerGenerator interface { + NewTicker(TickerType) Ticker + } + + if gen, ok := gov.(tickerGenerator); ok { + t = gen.NewTicker(tickerType) + } + if t == nil { + var duration time.Duration + switch tickerType { + case TickerBA: + duration = utils.GetConfigWithPanic(gov, round, nil).LambdaBA + case TickerDKG: + duration = utils.GetConfigWithPanic(gov, round, nil).LambdaDKG + default: + panic(fmt.Errorf("unknown ticker type: %d", tickerType)) + } + t = newDefaultTicker(duration) + } + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go new file mode 100644 index 000000000..b97188705 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go @@ -0,0 +1,44 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "encoding/hex" + "fmt" + + "github.com/byzantine-lab/dexon-consensus/common" +) + +// AgreementResult describes an agremeent result. +type AgreementResult struct { + BlockHash common.Hash `json:"block_hash"` + Position Position `json:"position"` + Votes []Vote `json:"votes"` + IsEmptyBlock bool `json:"is_empty_block"` + Randomness []byte `json:"randomness"` +} + +func (r *AgreementResult) String() string { + if len(r.Randomness) == 0 { + return fmt.Sprintf("agreementResult{Block:%s Pos:%s}", + r.BlockHash.String()[:6], r.Position) + } + return fmt.Sprintf("agreementResult{Block:%s Pos:%s Rand:%s}", + r.BlockHash.String()[:6], r.Position, + hex.EncodeToString(r.Randomness)[:6]) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go new file mode 100644 index 000000000..bc92211b9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go @@ -0,0 +1,227 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +// TODO(jimmy-dexon): remove comments of WitnessAck before open source. + +package types + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/byzantine-lab/go-tangerine/rlp" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +// GenesisHeight refers to the initial height the genesis block should be. +const GenesisHeight uint64 = 1 + +// BlockVerifyStatus is the return code for core.Application.VerifyBlock +type BlockVerifyStatus int + +// Enums for return value of core.Application.VerifyBlock. +const ( + // VerifyOK: Block is verified. + VerifyOK BlockVerifyStatus = iota + // VerifyRetryLater: Block is unable to be verified at this moment. + // Try again later. + VerifyRetryLater + // VerifyInvalidBlock: Block is an invalid one. + VerifyInvalidBlock +) + +type rlpTimestamp struct { + time.Time +} + +func (t *rlpTimestamp) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, uint64(t.UTC().UnixNano())) +} + +func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error { + var nano uint64 + err := s.Decode(&nano) + if err == nil { + sec := int64(nano) / 1000000000 + nsec := int64(nano) % 1000000000 + t.Time = time.Unix(sec, nsec).UTC() + } + return err +} + +// Witness represents the consensus information on the compaction chain. +type Witness struct { + Height uint64 `json:"height"` + Data []byte `json:"data"` +} + +// Block represents a single event broadcasted on the network. +type Block struct { + ProposerID NodeID `json:"proposer_id"` + ParentHash common.Hash `json:"parent_hash"` + Hash common.Hash `json:"hash"` + Position Position `json:"position"` + Timestamp time.Time `json:"timestamp"` + Payload []byte `json:"payload"` + PayloadHash common.Hash `json:"payload_hash"` + Witness Witness `json:"witness"` + Randomness []byte `json:"randomness"` + Signature crypto.Signature `json:"signature"` + + CRSSignature crypto.Signature `json:"crs_signature"` +} + +type rlpBlock struct { + ProposerID NodeID + ParentHash common.Hash + Hash common.Hash + Position Position + Timestamp *rlpTimestamp + Payload []byte + PayloadHash common.Hash + Witness *Witness + Randomness []byte + Signature crypto.Signature + + CRSSignature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (b *Block) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpBlock{ + ProposerID: b.ProposerID, + ParentHash: b.ParentHash, + Hash: b.Hash, + Position: b.Position, + Timestamp: &rlpTimestamp{b.Timestamp}, + Payload: b.Payload, + PayloadHash: b.PayloadHash, + Witness: &b.Witness, + Randomness: b.Randomness, + Signature: b.Signature, + CRSSignature: b.CRSSignature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (b *Block) DecodeRLP(s *rlp.Stream) error { + var dec rlpBlock + err := s.Decode(&dec) + if err == nil { + *b = Block{ + ProposerID: dec.ProposerID, + ParentHash: dec.ParentHash, + Hash: dec.Hash, + Position: dec.Position, + Timestamp: dec.Timestamp.Time, + Payload: dec.Payload, + PayloadHash: dec.PayloadHash, + Witness: *dec.Witness, + Randomness: dec.Randomness, + Signature: dec.Signature, + CRSSignature: dec.CRSSignature, + } + } + return err +} + +func (b *Block) String() string { + return fmt.Sprintf("Block{Hash:%v %s}", b.Hash.String()[:6], b.Position) +} + +// Clone returns a deep copy of a block. +func (b *Block) Clone() (bcopy *Block) { + bcopy = &Block{} + bcopy.ProposerID = b.ProposerID + bcopy.ParentHash = b.ParentHash + bcopy.Hash = b.Hash + bcopy.Position.Round = b.Position.Round + bcopy.Position.Height = b.Position.Height + bcopy.Signature = b.Signature.Clone() + bcopy.CRSSignature = b.CRSSignature.Clone() + bcopy.Witness.Height = b.Witness.Height + bcopy.Witness.Data = common.CopyBytes(b.Witness.Data) + bcopy.Timestamp = b.Timestamp + bcopy.Payload = common.CopyBytes(b.Payload) + bcopy.PayloadHash = b.PayloadHash + bcopy.Randomness = common.CopyBytes(b.Randomness) + return +} + +// IsGenesis checks if the block is a genesisBlock +func (b *Block) IsGenesis() bool { + return b.Position.Height == GenesisHeight && b.ParentHash == common.Hash{} +} + +// IsFinalized checks if the block is finalized. +func (b *Block) IsFinalized() bool { + return len(b.Randomness) > 0 +} + +// IsEmpty checks if the block is an 'empty block'. +func (b *Block) IsEmpty() bool { + return b.ProposerID.Hash == common.Hash{} +} + +// ByHash is the helper type for sorting slice of blocks by hash. +type ByHash []*Block + +func (b ByHash) Len() int { + return len(b) +} + +func (b ByHash) Less(i int, j int) bool { + return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1 +} + +func (b ByHash) Swap(i int, j int) { + b[i], b[j] = b[j], b[i] +} + +// BlocksByPosition is the helper type for sorting slice of blocks by position. +type BlocksByPosition []*Block + +// Len implements Len method in sort.Sort interface. +func (bs BlocksByPosition) Len() int { + return len(bs) +} + +// Less implements Less method in sort.Sort interface. +func (bs BlocksByPosition) Less(i int, j int) bool { + return bs[j].Position.Newer(bs[i].Position) +} + +// Swap implements Swap method in sort.Sort interface. +func (bs BlocksByPosition) Swap(i int, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +// Push implements Push method in heap interface. +func (bs *BlocksByPosition) Push(x interface{}) { + *bs = append(*bs, x.(*Block)) +} + +// Pop implements Pop method in heap interface. +func (bs *BlocksByPosition) Pop() (ret interface{}) { + n := len(*bs) + *bs, ret = (*bs)[0:n-1], (*bs)[n-1] + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go new file mode 100644 index 000000000..dce38369e --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go @@ -0,0 +1,75 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "encoding/binary" + "time" +) + +// Config stands for Current Configuration Parameters. +type Config struct { + // Lambda related. + LambdaBA time.Duration + LambdaDKG time.Duration + + // Set related. + NotarySetSize uint32 + + // Time related. + RoundLength uint64 + MinBlockInterval time.Duration +} + +// Clone return a copied configuration. +func (c *Config) Clone() *Config { + return &Config{ + LambdaBA: c.LambdaBA, + LambdaDKG: c.LambdaDKG, + NotarySetSize: c.NotarySetSize, + RoundLength: c.RoundLength, + MinBlockInterval: c.MinBlockInterval, + } +} + +// Bytes returns []byte representation of Config. +func (c *Config) Bytes() []byte { + binaryLambdaBA := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds())) + binaryLambdaDKG := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds())) + + binaryNotarySetSize := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize) + + binaryRoundLength := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRoundLength, c.RoundLength) + binaryMinBlockInterval := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryMinBlockInterval, + uint64(c.MinBlockInterval.Nanoseconds())) + + enc := make([]byte, 0, 40) + enc = append(enc, binaryLambdaBA...) + enc = append(enc, binaryLambdaDKG...) + enc = append(enc, binaryNotarySetSize...) + enc = append(enc, binaryRoundLength...) + enc = append(enc, binaryMinBlockInterval...) + return enc +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go new file mode 100644 index 000000000..6c2b777cd --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go @@ -0,0 +1,485 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package dkg + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/byzantine-lab/go-tangerine/rlp" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +// Errors for typesDKG package. +var ( + ErrNotReachThreshold = fmt.Errorf("threshold not reach") + ErrInvalidThreshold = fmt.Errorf("invalid threshold") +) + +// NewID creates a DKGID from NodeID. +func NewID(ID types.NodeID) cryptoDKG.ID { + return cryptoDKG.NewID(ID.Hash[:]) +} + +// PrivateShare describe a secret share in DKG protocol. +type PrivateShare struct { + ProposerID types.NodeID `json:"proposer_id"` + ReceiverID types.NodeID `json:"receiver_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + PrivateShare cryptoDKG.PrivateKey `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +// Equal checks equality between two PrivateShare instances. +func (p *PrivateShare) Equal(other *PrivateShare) bool { + return p.ProposerID.Equal(other.ProposerID) && + p.ReceiverID.Equal(other.ReceiverID) && + p.Round == other.Round && + p.Reset == other.Reset && + p.Signature.Type == other.Signature.Type && + bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 && + bytes.Compare( + p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0 +} + +// MasterPublicKey decrtibe a master public key in DKG protocol. +type MasterPublicKey struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + DKGID cryptoDKG.ID `json:"dkg_id"` + PublicKeyShares cryptoDKG.PublicKeyShares `json:"public_key_shares"` + Signature crypto.Signature `json:"signature"` +} + +func (d *MasterPublicKey) String() string { + return fmt.Sprintf("MasterPublicKey{KP:%s Round:%d Reset:%d}", + d.ProposerID.String()[:6], + d.Round, + d.Reset) +} + +// Equal check equality of two DKG master public keys. +func (d *MasterPublicKey) Equal(other *MasterPublicKey) bool { + return d.ProposerID.Equal(other.ProposerID) && + d.Round == other.Round && + d.Reset == other.Reset && + d.DKGID.GetHexString() == other.DKGID.GetHexString() && + d.PublicKeyShares.Equal(&other.PublicKeyShares) && + d.Signature.Type == other.Signature.Type && + bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0 +} + +type rlpMasterPublicKey struct { + ProposerID types.NodeID + Round uint64 + Reset uint64 + DKGID []byte + PublicKeyShares *cryptoDKG.PublicKeyShares + Signature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (d *MasterPublicKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpMasterPublicKey{ + ProposerID: d.ProposerID, + Round: d.Round, + Reset: d.Reset, + DKGID: d.DKGID.GetLittleEndian(), + PublicKeyShares: &d.PublicKeyShares, + Signature: d.Signature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (d *MasterPublicKey) DecodeRLP(s *rlp.Stream) error { + var dec rlpMasterPublicKey + if err := s.Decode(&dec); err != nil { + return err + } + + id, err := cryptoDKG.BytesID(dec.DKGID) + if err != nil { + return err + } + + *d = MasterPublicKey{ + ProposerID: dec.ProposerID, + Round: dec.Round, + Reset: dec.Reset, + DKGID: id, + PublicKeyShares: *dec.PublicKeyShares.Move(), + Signature: dec.Signature, + } + return err +} + +// NewMasterPublicKey returns a new MasterPublicKey instance. +func NewMasterPublicKey() *MasterPublicKey { + return &MasterPublicKey{ + PublicKeyShares: *cryptoDKG.NewEmptyPublicKeyShares(), + } +} + +// UnmarshalJSON implements json.Unmarshaller. +func (d *MasterPublicKey) UnmarshalJSON(data []byte) error { + type innertMasterPublicKey MasterPublicKey + d.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares() + return json.Unmarshal(data, (*innertMasterPublicKey)(d)) +} + +// Complaint describe a complaint in DKG protocol. +type Complaint struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + PrivateShare PrivateShare `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +func (c *Complaint) String() string { + if c.IsNack() { + return fmt.Sprintf("DKGNackComplaint{CP:%s Round:%d Reset %d PSP:%s}", + c.ProposerID.String()[:6], c.Round, c.Reset, + c.PrivateShare.ProposerID.String()[:6]) + } + return fmt.Sprintf("DKGComplaint{CP:%s Round:%d Reset %d PrivateShare:%v}", + c.ProposerID.String()[:6], c.Round, c.Reset, c.PrivateShare) +} + +// Equal checks equality between two Complaint instances. +func (c *Complaint) Equal(other *Complaint) bool { + return c.ProposerID.Equal(other.ProposerID) && + c.Round == other.Round && + c.Reset == other.Reset && + c.PrivateShare.Equal(&other.PrivateShare) && + c.Signature.Type == other.Signature.Type && + bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0 +} + +type rlpComplaint struct { + ProposerID types.NodeID + Round uint64 + Reset uint64 + IsNack bool + PrivateShare []byte + Signature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (c *Complaint) EncodeRLP(w io.Writer) error { + if c.IsNack() { + return rlp.Encode(w, rlpComplaint{ + ProposerID: c.ProposerID, + Round: c.Round, + Reset: c.Reset, + IsNack: true, + PrivateShare: c.PrivateShare.ProposerID.Hash[:], + Signature: c.Signature, + }) + } + prvShare, err := rlp.EncodeToBytes(&c.PrivateShare) + if err != nil { + return err + } + return rlp.Encode(w, rlpComplaint{ + ProposerID: c.ProposerID, + Round: c.Round, + Reset: c.Reset, + IsNack: false, + PrivateShare: prvShare, + Signature: c.Signature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (c *Complaint) DecodeRLP(s *rlp.Stream) error { + var dec rlpComplaint + if err := s.Decode(&dec); err != nil { + return err + } + + var prvShare PrivateShare + if dec.IsNack { + copy(prvShare.ProposerID.Hash[:], dec.PrivateShare) + prvShare.Round = dec.Round + prvShare.Reset = dec.Reset + } else { + if err := rlp.DecodeBytes(dec.PrivateShare, &prvShare); err != nil { + return err + } + } + + *c = Complaint{ + ProposerID: dec.ProposerID, + Round: dec.Round, + Reset: dec.Reset, + PrivateShare: prvShare, + Signature: dec.Signature, + } + return nil +} + +// IsNack returns true if it's a nack complaint in DKG protocol. +func (c *Complaint) IsNack() bool { + return len(c.PrivateShare.Signature.Signature) == 0 +} + +// PartialSignature describe a partial signature in DKG protocol. +type PartialSignature struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Hash common.Hash `json:"hash"` + PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` + Signature crypto.Signature `json:"signature"` +} + +// MPKReady describe a dkg ready message in DKG protocol. +type MPKReady struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (ready *MPKReady) String() string { + return fmt.Sprintf("DKGMPKReady{RP:%s Round:%d Reset:%d}", + ready.ProposerID.String()[:6], + ready.Round, + ready.Reset) +} + +// Equal check equality of two MPKReady instances. +func (ready *MPKReady) Equal(other *MPKReady) bool { + return ready.ProposerID.Equal(other.ProposerID) && + ready.Round == other.Round && + ready.Reset == other.Reset && + ready.Signature.Type == other.Signature.Type && + bytes.Compare(ready.Signature.Signature, other.Signature.Signature) == 0 +} + +// Finalize describe a dkg finalize message in DKG protocol. +type Finalize struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (final *Finalize) String() string { + return fmt.Sprintf("DKGFinal{FP:%s Round:%d Reset:%d}", + final.ProposerID.String()[:6], + final.Round, + final.Reset) +} + +// Equal check equality of two Finalize instances. +func (final *Finalize) Equal(other *Finalize) bool { + return final.ProposerID.Equal(other.ProposerID) && + final.Round == other.Round && + final.Reset == other.Reset && + final.Signature.Type == other.Signature.Type && + bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0 +} + +// Success describe a dkg success message in DKG protocol. +type Success struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (s *Success) String() string { + return fmt.Sprintf("DKGSuccess{SP:%s Round:%d Reset:%d}", + s.ProposerID.String()[:6], + s.Round, + s.Reset) +} + +// Equal check equality of two Success instances. +func (s *Success) Equal(other *Success) bool { + return s.ProposerID.Equal(other.ProposerID) && + s.Round == other.Round && + s.Reset == other.Reset && + s.Signature.Type == other.Signature.Type && + bytes.Compare(s.Signature.Signature, other.Signature.Signature) == 0 +} + +// GroupPublicKey is the result of DKG protocol. +type GroupPublicKey struct { + Round uint64 + QualifyIDs cryptoDKG.IDs + QualifyNodeIDs map[types.NodeID]struct{} + IDMap map[types.NodeID]cryptoDKG.ID + GroupPublicKey *cryptoDKG.PublicKey + Threshold int +} + +// VerifySignature verifies if the signature is correct. +func (gpk *GroupPublicKey) VerifySignature( + hash common.Hash, sig crypto.Signature) bool { + return gpk.GroupPublicKey.VerifySignature(hash, sig) +} + +// CalcQualifyNodes returns the qualified nodes. +func CalcQualifyNodes( + mpks []*MasterPublicKey, complaints []*Complaint, threshold int) ( + qualifyIDs cryptoDKG.IDs, qualifyNodeIDs map[types.NodeID]struct{}, err error) { + if len(mpks) < threshold { + err = ErrInvalidThreshold + return + } + + // Calculate qualify members. + disqualifyIDs := map[types.NodeID]struct{}{} + complaintsByID := map[types.NodeID]map[types.NodeID]struct{}{} + for _, complaint := range complaints { + if complaint.IsNack() { + if _, exist := complaintsByID[complaint.PrivateShare.ProposerID]; !exist { + complaintsByID[complaint.PrivateShare.ProposerID] = + make(map[types.NodeID]struct{}) + } + complaintsByID[complaint.PrivateShare.ProposerID][complaint.ProposerID] = + struct{}{} + } else { + disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{} + } + } + for nID, complaints := range complaintsByID { + if len(complaints) >= threshold { + disqualifyIDs[nID] = struct{}{} + } + } + qualifyIDs = make(cryptoDKG.IDs, 0, len(mpks)-len(disqualifyIDs)) + if cap(qualifyIDs) < threshold { + err = ErrNotReachThreshold + return + } + qualifyNodeIDs = make(map[types.NodeID]struct{}) + for _, mpk := range mpks { + if _, exist := disqualifyIDs[mpk.ProposerID]; exist { + continue + } + qualifyIDs = append(qualifyIDs, mpk.DKGID) + qualifyNodeIDs[mpk.ProposerID] = struct{}{} + } + return +} + +// NewGroupPublicKey creats a GroupPublicKey instance. +func NewGroupPublicKey( + round uint64, + mpks []*MasterPublicKey, complaints []*Complaint, + threshold int) ( + *GroupPublicKey, error) { + qualifyIDs, qualifyNodeIDs, err := + CalcQualifyNodes(mpks, complaints, threshold) + if err != nil { + return nil, err + } + mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) + idMap := make(map[types.NodeID]cryptoDKG.ID) + for _, mpk := range mpks { + if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { + continue + } + mpkMap[mpk.DKGID] = mpk + idMap[mpk.ProposerID] = mpk.DKGID + } + // Recover Group Public Key. + pubShares := make([]*cryptoDKG.PublicKeyShares, 0, len(qualifyIDs)) + for _, id := range qualifyIDs { + pubShares = append(pubShares, &mpkMap[id].PublicKeyShares) + } + groupPK := cryptoDKG.RecoverGroupPublicKey(pubShares) + return &GroupPublicKey{ + Round: round, + QualifyIDs: qualifyIDs, + QualifyNodeIDs: qualifyNodeIDs, + IDMap: idMap, + Threshold: threshold, + GroupPublicKey: groupPK, + }, nil +} + +// NodePublicKeys is the result of DKG protocol. +type NodePublicKeys struct { + Round uint64 + QualifyIDs cryptoDKG.IDs + QualifyNodeIDs map[types.NodeID]struct{} + IDMap map[types.NodeID]cryptoDKG.ID + PublicKeys map[types.NodeID]*cryptoDKG.PublicKey + Threshold int +} + +// NewNodePublicKeys creats a NodePublicKeys instance. +func NewNodePublicKeys( + round uint64, + mpks []*MasterPublicKey, complaints []*Complaint, + threshold int) ( + *NodePublicKeys, error) { + qualifyIDs, qualifyNodeIDs, err := + CalcQualifyNodes(mpks, complaints, threshold) + if err != nil { + return nil, err + } + mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) + idMap := make(map[types.NodeID]cryptoDKG.ID) + for _, mpk := range mpks { + if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { + continue + } + mpkMap[mpk.DKGID] = mpk + idMap[mpk.ProposerID] = mpk.DKGID + } + // Recover qualify members' public key. + pubKeys := make(map[types.NodeID]*cryptoDKG.PublicKey, len(qualifyIDs)) + for _, recvID := range qualifyIDs { + pubShares := cryptoDKG.NewEmptyPublicKeyShares() + for _, id := range qualifyIDs { + pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID) + if err != nil { + return nil, err + } + if err := pubShares.AddShare(id, pubShare); err != nil { + return nil, err + } + } + pubKey, err := pubShares.RecoverPublicKey(qualifyIDs) + if err != nil { + return nil, err + } + pubKeys[mpkMap[recvID].ProposerID] = pubKey + } + return &NodePublicKeys{ + Round: round, + QualifyIDs: qualifyIDs, + QualifyNodeIDs: qualifyNodeIDs, + IDMap: idMap, + PublicKeys: pubKeys, + Threshold: threshold, + }, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go new file mode 100644 index 000000000..0335cfaae --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go @@ -0,0 +1,24 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +// Msg for the network ReceiveChan. +type Msg struct { + PeerID interface{} + Payload interface{} +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go new file mode 100644 index 000000000..84b38a3b1 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go @@ -0,0 +1,61 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "bytes" + "encoding/hex" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +// NodeID is the ID type for nodes. +type NodeID struct { + common.Hash +} + +// NewNodeID returns a NodeID with Hash set to the hash value of +// public key. +func NewNodeID(pubKey crypto.PublicKey) NodeID { + return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])} +} + +// Equal checks if the hash representation is the same NodeID. +func (v NodeID) Equal(v2 NodeID) bool { + return v.Hash == v2.Hash +} + +func (v NodeID) String() string { + return hex.EncodeToString(v.Hash[:])[:6] +} + +// NodeIDs implements sort.Interface for NodeID. +type NodeIDs []NodeID + +func (v NodeIDs) Len() int { + return len(v) +} + +func (v NodeIDs) Less(i int, j int) bool { + return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1 +} + +func (v NodeIDs) Swap(i int, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go new file mode 100644 index 000000000..522bcb224 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go @@ -0,0 +1,162 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "container/heap" + "encoding/binary" + "math/big" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" +) + +// NodeSet is the node set structure as defined in DEXON consensus core. +type NodeSet struct { + IDs map[NodeID]struct{} +} + +// SubSetTarget is the sub set target for GetSubSet(). +type SubSetTarget struct { + data [][]byte +} + +type subSetTargetType byte + +const ( + targetNotarySet subSetTargetType = iota + targetNodeLeader +) + +type nodeRank struct { + ID NodeID + rank *big.Int +} + +// rankHeap is a MaxHeap structure. +type rankHeap []*nodeRank + +func (h rankHeap) Len() int { return len(h) } +func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 } +func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *rankHeap) Push(x interface{}) { + *h = append(*h, x.(*nodeRank)) +} +func (h *rankHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// NewNodeSet creates a new NodeSet instance. +func NewNodeSet() *NodeSet { + return &NodeSet{ + IDs: make(map[NodeID]struct{}), + } +} + +// NewNodeSetFromMap creates a new NodeSet from NodeID map. +func NewNodeSetFromMap(nodes map[NodeID]struct{}) *NodeSet { + nIDs := make(map[NodeID]struct{}, len(nodes)) + for nID := range nodes { + nIDs[nID] = struct{}{} + } + return &NodeSet{ + IDs: nIDs, + } +} + +// NewNotarySetTarget is the target for getting Notary Set. +func NewNotarySetTarget(crs common.Hash) *SubSetTarget { + return newTarget(targetNotarySet, crs[:]) +} + +// NewNodeLeaderTarget is the target for getting leader of fast BA. +func NewNodeLeaderTarget(crs common.Hash, height uint64) *SubSetTarget { + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, height) + return newTarget(targetNodeLeader, crs[:], binaryHeight) +} + +// Add a NodeID to the set. +func (ns *NodeSet) Add(ID NodeID) { + ns.IDs[ID] = struct{}{} +} + +// Clone the NodeSet. +func (ns *NodeSet) Clone() *NodeSet { + nsCopy := NewNodeSet() + for ID := range ns.IDs { + nsCopy.Add(ID) + } + return nsCopy +} + +// GetSubSet returns the subset of given target. +func (ns *NodeSet) GetSubSet( + size int, target *SubSetTarget) map[NodeID]struct{} { + if size == 0 { + return make(map[NodeID]struct{}) + } + h := rankHeap{} + idx := 0 + for nID := range ns.IDs { + if idx < size { + h = append(h, newNodeRank(nID, target)) + } else if idx == size { + heap.Init(&h) + } + if idx >= size { + rank := newNodeRank(nID, target) + if rank.rank.Cmp(h[0].rank) < 0 { + h[0] = rank + heap.Fix(&h, 0) + } + } + idx++ + } + + nIDs := make(map[NodeID]struct{}, size) + for _, rank := range h { + nIDs[rank.ID] = struct{}{} + } + + return nIDs +} + +func newTarget(targetType subSetTargetType, data ...[]byte) *SubSetTarget { + data = append(data, []byte{byte(targetType)}) + return &SubSetTarget{ + data: data, + } +} + +func newNodeRank(ID NodeID, target *SubSetTarget) *nodeRank { + data := make([][]byte, 1, len(target.data)+1) + data[0] = make([]byte, len(ID.Hash)) + copy(data[0], ID.Hash[:]) + data = append(data, target.data...) + h := crypto.Keccak256Hash(data...) + num := new(big.Int).SetBytes(h[:]) + return &nodeRank{ + ID: ID, + rank: num, + } +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go new file mode 100644 index 000000000..81d23c266 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go @@ -0,0 +1,51 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "fmt" +) + +// Position describes the position in the block lattice of an entity. +type Position struct { + Round uint64 `json:"round"` + Height uint64 `json:"height"` +} + +func (pos Position) String() string { + return fmt.Sprintf("Position{Round:%d Height:%d}", pos.Round, pos.Height) +} + +// Equal checks if two positions are equal. +func (pos Position) Equal(other Position) bool { + return pos.Round == other.Round && pos.Height == other.Height +} + +// Newer checks if one block is newer than another one on the same chain. +// If two blocks on different chain compared by this function, it would panic. +func (pos Position) Newer(other Position) bool { + return pos.Round > other.Round || + (pos.Round == other.Round && pos.Height > other.Height) +} + +// Older checks if one block is older than another one on the same chain. +// If two blocks on different chain compared by this function, it would panic. +func (pos Position) Older(other Position) bool { + return pos.Round < other.Round || + (pos.Round == other.Round && pos.Height < other.Height) +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go new file mode 100644 index 000000000..def09293a --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go @@ -0,0 +1,100 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package types + +import ( + "fmt" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg" +) + +// VoteType is the type of vote. +type VoteType byte + +// VoteType enum. +const ( + VoteInit VoteType = iota + VotePreCom + VoteCom + VoteFast + VoteFastCom + // Do not add any type below MaxVoteType. + MaxVoteType +) + +// NullBlockHash is the blockHash for ⊥ value. +var NullBlockHash common.Hash + +// SkipBlockHash is the blockHash for SKIP value. +var SkipBlockHash common.Hash + +func init() { + for idx := range SkipBlockHash { + SkipBlockHash[idx] = 0xff + } +} + +// VoteHeader is the header for vote, which can be used as map keys. +type VoteHeader struct { + ProposerID NodeID `json:"proposer_id"` + Type VoteType `json:"type"` + BlockHash common.Hash `json:"block_hash"` + Period uint64 `json:"period"` + Position Position `json:"position"` +} + +// Vote is the vote structure defined in Crypto Shuffle Algorithm. +type Vote struct { + VoteHeader `json:"header"` + PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` + Signature crypto.Signature `json:"signature"` +} + +func (v *Vote) String() string { + return fmt.Sprintf("Vote{VP:%s %s Period:%d Type:%d Hash:%s}", + v.ProposerID.String()[:6], + v.Position, v.Period, v.Type, v.BlockHash.String()[:6]) +} + +// NewVote constructs a Vote instance with header fields. +func NewVote(t VoteType, hash common.Hash, period uint64) *Vote { + return &Vote{ + VoteHeader: VoteHeader{ + Type: t, + BlockHash: hash, + Period: period, + }} +} + +// Clone returns a deep copy of a vote. +func (v *Vote) Clone() *Vote { + return &Vote{ + VoteHeader: VoteHeader{ + ProposerID: v.ProposerID, + Type: v.Type, + BlockHash: v.BlockHash, + Period: v.Period, + Position: v.Position, + }, + PartialSignature: cryptoDKG.PartialSignature( + crypto.Signature(v.PartialSignature).Clone()), + Signature: v.Signature.Clone(), + } +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go new file mode 100644 index 000000000..4cb3bf18a --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go @@ -0,0 +1,255 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package core + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "time" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + "github.com/byzantine-lab/dexon-consensus/core/utils" +) + +// Errors for utils. +var ( + ErrIncorrectVoteBlockHash = fmt.Errorf( + "incorrect vote block hash") + ErrIncorrectVoteType = fmt.Errorf( + "incorrect vote type") + ErrIncorrectVotePosition = fmt.Errorf( + "incorrect vote position") + ErrIncorrectVoteProposer = fmt.Errorf( + "incorrect vote proposer") + ErrIncorrectVotePeriod = fmt.Errorf( + "incorrect vote period") +) + +// NodeSetCache is type alias to avoid fullnode compile error when moving +// it to core/utils package. +type NodeSetCache = utils.NodeSetCache + +// NewNodeSetCache is function alias to avoid fullnode compile error when moving +// it to core/utils package. +var NewNodeSetCache = utils.NewNodeSetCache + +var ( + debug = false + // ErrEmptyTimestamps would be reported if Block.timestamps is empty. + ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") +) + +func init() { + if os.Getenv("DEBUG") != "" { + debug = true + } +} + +// Debugf is like fmt.Printf, but only output when we are in debug mode. +func Debugf(format string, args ...interface{}) { + if debug { + fmt.Printf(format, args...) + } +} + +// Debugln is like fmt.Println, but only output when we are in debug mode. +func Debugln(args ...interface{}) { + if debug { + fmt.Println(args...) + } +} + +func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { + if sep == 0 { + return []time.Time{} + } + if t1.After(t2) { + return interpoTime(t2, t1, sep) + } + timestamps := make([]time.Time, sep) + duration := t2.Sub(t1) + period := time.Duration( + (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond + prevTime := t1 + for idx := range timestamps { + prevTime = prevTime.Add(period) + timestamps[idx] = prevTime + } + return timestamps +} + +func getMedianTime(timestamps []time.Time) (t time.Time, err error) { + if len(timestamps) == 0 { + err = ErrEmptyTimestamps + return + } + tscopy := make([]time.Time, 0, len(timestamps)) + for _, ts := range timestamps { + tscopy = append(tscopy, ts) + } + sort.Sort(common.ByTime(tscopy)) + if len(tscopy)%2 == 0 { + t1 := tscopy[len(tscopy)/2-1] + t2 := tscopy[len(tscopy)/2] + t = interpoTime(t1, t2, 1)[0] + } else { + t = tscopy[len(tscopy)/2] + } + return +} + +func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 { + indexToRemove := sort.Search(len(xs), func(idx int) bool { + return xs[idx] >= x + }) + if indexToRemove == len(xs) || xs[indexToRemove] != x { + // This value is not found. + return xs + } + return append(xs[:indexToRemove], xs[indexToRemove+1:]...) +} + +// HashConfigurationBlock returns the hash value of configuration block. +func HashConfigurationBlock( + notarySet map[types.NodeID]struct{}, + config *types.Config, + snapshotHash common.Hash, + prevHash common.Hash, +) common.Hash { + notaryIDs := make(types.NodeIDs, 0, len(notarySet)) + for nID := range notarySet { + notaryIDs = append(notaryIDs, nID) + } + sort.Sort(notaryIDs) + notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{})) + for _, nID := range notaryIDs { + notarySetBytes = append(notarySetBytes, nID.Hash[:]...) + } + configBytes := config.Bytes() + + return crypto.Keccak256Hash( + notarySetBytes[:], + configBytes[:], + snapshotHash[:], + prevHash[:], + ) +} + +// VerifyAgreementResult perform sanity check against a types.AgreementResult +// instance. +func VerifyAgreementResult( + res *types.AgreementResult, cache *NodeSetCache) error { + if res.Position.Round >= DKGDelayRound { + if len(res.Randomness) == 0 { + return ErrMissingRandomness + } + return nil + } + notarySet, err := cache.GetNotarySet(res.Position.Round) + if err != nil { + return err + } + if len(res.Votes) < len(notarySet)*2/3+1 { + return ErrNotEnoughVotes + } + voted := make(map[types.NodeID]struct{}, len(notarySet)) + voteType := res.Votes[0].Type + votePeriod := res.Votes[0].Period + if voteType != types.VoteFastCom && voteType != types.VoteCom { + return ErrIncorrectVoteType + } + for _, vote := range res.Votes { + if vote.Period != votePeriod { + return ErrIncorrectVotePeriod + } + if res.IsEmptyBlock { + if (vote.BlockHash != common.Hash{}) { + return ErrIncorrectVoteBlockHash + } + } else { + if vote.BlockHash != res.BlockHash { + return ErrIncorrectVoteBlockHash + } + } + if vote.Type != voteType { + return ErrIncorrectVoteType + } + if vote.Position != res.Position { + return ErrIncorrectVotePosition + } + if _, exist := notarySet[vote.ProposerID]; !exist { + return ErrIncorrectVoteProposer + } + ok, err := utils.VerifyVoteSignature(&vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + voted[vote.ProposerID] = struct{}{} + } + if len(voted) < len(notarySet)*2/3+1 { + return ErrNotEnoughVotes + } + return nil +} + +// DiffUint64 calculates difference between two uint64. +func DiffUint64(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +func isCI() bool { + return os.Getenv("CI") != "" +} + +func isCircleCI() bool { + return isCI() && os.Getenv("CIRCLECI") == "true" +} + +func isTravisCI() bool { + return isCI() && os.Getenv("TRAVIS") == "true" +} + +// checkWithCancel is a helper to perform periodic checking with cancel. +func checkWithCancel(parentCtx context.Context, interval time.Duration, + checker func() bool) (ret bool) { + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() +Loop: + for { + if ret = checker(); ret { + return + } + select { + case <-ctx.Done(): + break Loop + case <-time.After(interval): + } + } + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go new file mode 100644 index 000000000..161c1d495 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go @@ -0,0 +1,376 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "bytes" + "encoding/binary" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +func hashWitness(witness *types.Witness) (common.Hash, error) { + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, witness.Height) + return crypto.Keccak256Hash( + binaryHeight, + witness.Data), nil +} + +// HashBlock generates hash of a types.Block. +func HashBlock(block *types.Block) (common.Hash, error) { + hashPosition := HashPosition(block.Position) + binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary() + if err != nil { + return common.Hash{}, err + } + binaryWitness, err := hashWitness(&block.Witness) + if err != nil { + return common.Hash{}, err + } + + hash := crypto.Keccak256Hash( + block.ProposerID.Hash[:], + block.ParentHash[:], + hashPosition[:], + binaryTimestamp[:], + block.PayloadHash[:], + binaryWitness[:]) + return hash, nil +} + +// VerifyBlockSignature verifies the signature of types.Block. +func VerifyBlockSignature(b *types.Block) (err error) { + payloadHash := crypto.Keccak256Hash(b.Payload) + if payloadHash != b.PayloadHash { + err = ErrIncorrectHash + return + } + return VerifyBlockSignatureWithoutPayload(b) +} + +// VerifyBlockSignatureWithoutPayload verifies the signature of types.Block but +// does not check if PayloadHash is correct. +func VerifyBlockSignatureWithoutPayload(b *types.Block) (err error) { + hash, err := HashBlock(b) + if err != nil { + return + } + if hash != b.Hash { + err = ErrIncorrectHash + return + } + pubKey, err := crypto.SigToPub(b.Hash, b.Signature) + if err != nil { + return + } + if !b.ProposerID.Equal(types.NewNodeID(pubKey)) { + err = ErrIncorrectSignature + return + } + return + +} + +// HashVote generates hash of a types.Vote. +func HashVote(vote *types.Vote) common.Hash { + binaryPeriod := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryPeriod, vote.Period) + + hashPosition := HashPosition(vote.Position) + + hash := crypto.Keccak256Hash( + vote.ProposerID.Hash[:], + vote.BlockHash[:], + binaryPeriod, + hashPosition[:], + vote.PartialSignature.Signature[:], + []byte{byte(vote.Type)}, + ) + return hash +} + +// VerifyVoteSignature verifies the signature of types.Vote. +func VerifyVoteSignature(vote *types.Vote) (bool, error) { + hash := HashVote(vote) + pubKey, err := crypto.SigToPub(hash, vote.Signature) + if err != nil { + return false, err + } + if vote.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashCRS(block *types.Block, crs common.Hash) common.Hash { + hashPos := HashPosition(block.Position) + if block.Position.Round < dkgDelayRound { + return crypto.Keccak256Hash(crs[:], hashPos[:], block.ProposerID.Hash[:]) + } + return crypto.Keccak256Hash(crs[:], hashPos[:]) +} + +// VerifyCRSSignature verifies the CRS signature of types.Block. +func VerifyCRSSignature( + block *types.Block, crs common.Hash, npks *typesDKG.NodePublicKeys) bool { + hash := hashCRS(block, crs) + if block.Position.Round < dkgDelayRound { + return bytes.Compare(block.CRSSignature.Signature[:], hash[:]) == 0 + } + if npks == nil { + return false + } + pubKey, exist := npks.PublicKeys[block.ProposerID] + if !exist { + return false + } + return pubKey.VerifySignature(hash, block.CRSSignature) +} + +// HashPosition generates hash of a types.Position. +func HashPosition(position types.Position) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, position.Round) + + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, position.Height) + + return crypto.Keccak256Hash( + binaryRound, + binaryHeight, + ) +} + +func hashDKGPrivateShare(prvShare *typesDKG.PrivateShare) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, prvShare.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, prvShare.Reset) + + return crypto.Keccak256Hash( + prvShare.ProposerID.Hash[:], + prvShare.ReceiverID.Hash[:], + binaryRound, + binaryReset, + prvShare.PrivateShare.Bytes(), + ) +} + +// VerifyDKGPrivateShareSignature verifies the signature of +// typesDKG.PrivateShare. +func VerifyDKGPrivateShareSignature( + prvShare *typesDKG.PrivateShare) (bool, error) { + hash := hashDKGPrivateShare(prvShare) + pubKey, err := crypto.SigToPub(hash, prvShare.Signature) + if err != nil { + return false, err + } + if prvShare.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, mpk.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, mpk.Reset) + + return crypto.Keccak256Hash( + mpk.ProposerID.Hash[:], + mpk.DKGID.GetLittleEndian(), + mpk.PublicKeyShares.MasterKeyBytes(), + binaryRound, + binaryReset, + ) +} + +// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature. +func VerifyDKGMasterPublicKeySignature( + mpk *typesDKG.MasterPublicKey) (bool, error) { + hash := hashDKGMasterPublicKey(mpk) + pubKey, err := crypto.SigToPub(hash, mpk.Signature) + if err != nil { + return false, err + } + if mpk.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGComplaint(complaint *typesDKG.Complaint) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, complaint.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, complaint.Reset) + + hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare) + + return crypto.Keccak256Hash( + complaint.ProposerID.Hash[:], + binaryRound, + binaryReset, + hashPrvShare[:], + ) +} + +// VerifyDKGComplaintSignature verifies DKGCompliant signature. +func VerifyDKGComplaintSignature( + complaint *typesDKG.Complaint) (bool, error) { + if complaint.Round != complaint.PrivateShare.Round { + return false, nil + } + if complaint.Reset != complaint.PrivateShare.Reset { + return false, nil + } + hash := hashDKGComplaint(complaint) + pubKey, err := crypto.SigToPub(hash, complaint.Signature) + if err != nil { + return false, err + } + if complaint.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + if !complaint.IsNack() { + return VerifyDKGPrivateShareSignature(&complaint.PrivateShare) + } + return true, nil +} + +func hashDKGPartialSignature(psig *typesDKG.PartialSignature) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, psig.Round) + + return crypto.Keccak256Hash( + psig.ProposerID.Hash[:], + binaryRound, + psig.Hash[:], + psig.PartialSignature.Signature[:], + ) +} + +// VerifyDKGPartialSignatureSignature verifies the signature of +// typesDKG.PartialSignature. +func VerifyDKGPartialSignatureSignature( + psig *typesDKG.PartialSignature) (bool, error) { + hash := hashDKGPartialSignature(psig) + pubKey, err := crypto.SigToPub(hash, psig.Signature) + if err != nil { + return false, err + } + if psig.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGMPKReady(ready *typesDKG.MPKReady) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, ready.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, ready.Reset) + + return crypto.Keccak256Hash( + ready.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +// VerifyDKGMPKReadySignature verifies DKGMPKReady signature. +func VerifyDKGMPKReadySignature( + ready *typesDKG.MPKReady) (bool, error) { + hash := hashDKGMPKReady(ready) + pubKey, err := crypto.SigToPub(hash, ready.Signature) + if err != nil { + return false, err + } + if ready.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGFinalize(final *typesDKG.Finalize) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, final.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, final.Reset) + + return crypto.Keccak256Hash( + final.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +func hashDKGSuccess(success *typesDKG.Success) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, success.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, success.Reset) + + return crypto.Keccak256Hash( + success.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +// VerifyDKGFinalizeSignature verifies DKGFinalize signature. +func VerifyDKGFinalizeSignature( + final *typesDKG.Finalize) (bool, error) { + hash := hashDKGFinalize(final) + pubKey, err := crypto.SigToPub(hash, final.Signature) + if err != nil { + return false, err + } + if final.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +// VerifyDKGSuccessSignature verifies DKGSuccess signature. +func VerifyDKGSuccessSignature( + success *typesDKG.Success) (bool, error) { + hash := hashDKGSuccess(success) + pubKey, err := crypto.SigToPub(hash, success.Signature) + if err != nil { + return false, err + } + if success.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +// Rehash hashes the hash again and again and again... +func Rehash(hash common.Hash, count uint) common.Hash { + result := hash + for i := uint(0); i < count; i++ { + result = crypto.Keccak256Hash(result[:]) + } + return result +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go new file mode 100644 index 000000000..028690e18 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go @@ -0,0 +1,245 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "errors" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +var ( + // ErrNodeSetNotReady means we got nil empty node set. + ErrNodeSetNotReady = errors.New("node set is not ready") + // ErrCRSNotReady means we got empty CRS. + ErrCRSNotReady = errors.New("crs is not ready") + // ErrConfigurationNotReady means we go nil configuration. + ErrConfigurationNotReady = errors.New("configuration is not ready") +) + +type sets struct { + crs common.Hash + nodeSet *types.NodeSet + notarySet map[types.NodeID]struct{} +} + +// NodeSetCacheInterface interface specifies interface used by NodeSetCache. +type NodeSetCacheInterface interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey +} + +// NodeSetCache caches node set information. +// +// NOTE: this module doesn't handle DKG resetting and can only be used along +// with utils.RoundEvent. +type NodeSetCache struct { + lock sync.RWMutex + nsIntf NodeSetCacheInterface + rounds map[uint64]*sets + keyPool map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + } +} + +// NewNodeSetCache constructs an NodeSetCache instance. +func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache { + return &NodeSetCache{ + nsIntf: nsIntf, + rounds: make(map[uint64]*sets), + keyPool: make(map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + }), + } +} + +// Exists checks if a node is in node set of that round. +func (cache *NodeSetCache) Exists( + round uint64, nodeID types.NodeID) (exists bool, err error) { + + nIDs, exists := cache.get(round) + if !exists { + if nIDs, err = cache.update(round); err != nil { + return + } + } + _, exists = nIDs.nodeSet.IDs[nodeID] + return +} + +// GetPublicKey return public key for that node: +func (cache *NodeSetCache) GetPublicKey( + nodeID types.NodeID) (key crypto.PublicKey, exists bool) { + + cache.lock.RLock() + defer cache.lock.RUnlock() + + rec, exists := cache.keyPool[nodeID] + if exists { + key = rec.pubKey + } + return +} + +// GetNodeSet returns IDs of nodes set of this round as map. +func (cache *NodeSetCache) GetNodeSet(round uint64) (*types.NodeSet, error) { + IDs, exists := cache.get(round) + if !exists { + var err error + if IDs, err = cache.update(round); err != nil { + return nil, err + } + } + return IDs.nodeSet.Clone(), nil +} + +// GetNotarySet returns of notary set of this round. +func (cache *NodeSetCache) GetNotarySet( + round uint64) (map[types.NodeID]struct{}, error) { + IDs, err := cache.getOrUpdate(round) + if err != nil { + return nil, err + } + return cache.cloneMap(IDs.notarySet), nil +} + +// Purge a specific round. +func (cache *NodeSetCache) Purge(rID uint64) { + cache.lock.Lock() + defer cache.lock.Unlock() + nIDs, exist := cache.rounds[rID] + if !exist { + return + } + for nID := range nIDs.nodeSet.IDs { + rec := cache.keyPool[nID] + if rec.refCnt--; rec.refCnt == 0 { + delete(cache.keyPool, nID) + } + } + delete(cache.rounds, rID) +} + +// Touch updates the internal cache of round. +func (cache *NodeSetCache) Touch(round uint64) (err error) { + _, err = cache.update(round) + return +} + +func (cache *NodeSetCache) cloneMap( + nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} { + nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs)) + for k := range nIDs { + nIDsCopy[k] = struct{}{} + } + return nIDsCopy +} + +func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) { + s, exists := cache.get(round) + if !exists { + if s, err = cache.update(round); err != nil { + return + } + } + nIDs = s + return +} + +// update node set for that round. +// +// This cache would maintain 10 rounds before the updated round and purge +// rounds not in this range. +func (cache *NodeSetCache) update(round uint64) (nIDs *sets, err error) { + cache.lock.Lock() + defer cache.lock.Unlock() + // Get information for the requested round. + keySet := cache.nsIntf.NodeSet(round) + if keySet == nil { + err = ErrNodeSetNotReady + return + } + crs := cache.nsIntf.CRS(round) + if (crs == common.Hash{}) { + err = ErrCRSNotReady + return + } + // Cache new round. + nodeSet := types.NewNodeSet() + for _, key := range keySet { + nID := types.NewNodeID(key) + nodeSet.Add(nID) + if rec, exists := cache.keyPool[nID]; exists { + rec.refCnt++ + } else { + cache.keyPool[nID] = &struct { + pubKey crypto.PublicKey + refCnt int + }{key, 1} + } + } + cfg := cache.nsIntf.Configuration(round) + if cfg == nil { + err = ErrConfigurationNotReady + return + } + nIDs = &sets{ + crs: crs, + nodeSet: nodeSet, + notarySet: make(map[types.NodeID]struct{}), + } + nIDs.notarySet = nodeSet.GetSubSet( + int(cfg.NotarySetSize), types.NewNotarySetTarget(crs)) + cache.rounds[round] = nIDs + // Purge older rounds. + for rID, nIDs := range cache.rounds { + nodeSet := nIDs.nodeSet + if round-rID <= 5 { + continue + } + for nID := range nodeSet.IDs { + rec := cache.keyPool[nID] + if rec.refCnt--; rec.refCnt == 0 { + delete(cache.keyPool, nID) + } + } + delete(cache.rounds, rID) + } + return +} + +func (cache *NodeSetCache) get(round uint64) (nIDs *sets, exists bool) { + cache.lock.RLock() + defer cache.lock.RUnlock() + nIDs, exists = cache.rounds[round] + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go new file mode 100644 index 000000000..658fe79a9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go @@ -0,0 +1,131 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "errors" + + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +var ( + // ErrInvalidDKGMasterPublicKey means the DKG MasterPublicKey is invalid. + ErrInvalidDKGMasterPublicKey = errors.New("invalid DKG master public key") + // ErrPayloadNotEmpty means the payload of block is not empty. + ErrPayloadNotEmpty = errors.New("payload not empty") +) + +// NeedPenaltyDKGPrivateShare checks if the proposer of dkg private share +// should be penalized. +func NeedPenaltyDKGPrivateShare( + complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { + if complaint.IsNack() { + return false, nil + } + if mpk.ProposerID != complaint.PrivateShare.ProposerID { + return false, nil + } + ok, err := VerifyDKGMasterPublicKeySignature(mpk) + if err != nil { + return false, err + } + if !ok { + return false, ErrInvalidDKGMasterPublicKey + } + ok, err = VerifyDKGComplaintSignature(complaint) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = mpk.PublicKeyShares.VerifyPrvShare( + typesDKG.NewID(complaint.PrivateShare.ReceiverID), + &complaint.PrivateShare.PrivateShare) + if err != nil { + return false, err + } + return !ok, nil +} + +// NeedPenaltyForkVote checks if two votes are fork vote. +func NeedPenaltyForkVote(vote1, vote2 *types.Vote) (bool, error) { + if vote1.ProposerID != vote2.ProposerID || + vote1.Type != vote2.Type || + vote1.Period != vote2.Period || + vote1.Position != vote2.Position || + vote1.BlockHash == vote2.BlockHash { + return false, nil + } + ok, err := VerifyVoteSignature(vote1) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = VerifyVoteSignature(vote2) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + return true, nil +} + +// NeedPenaltyForkBlock checks if two blocks are fork block. +func NeedPenaltyForkBlock(block1, block2 *types.Block) (bool, error) { + if block1.ProposerID != block2.ProposerID || + block1.Position != block2.Position || + block1.Hash == block2.Hash { + return false, nil + } + if len(block1.Payload) != 0 || len(block2.Payload) != 0 { + return false, ErrPayloadNotEmpty + } + verifyBlock := func(block *types.Block) (bool, error) { + err := VerifyBlockSignatureWithoutPayload(block) + switch err { + case nil: + return true, nil + case ErrIncorrectSignature: + return false, nil + case ErrIncorrectHash: + return false, nil + default: + return false, err + } + } + ok, err := verifyBlock(block1) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = verifyBlock(block2) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go new file mode 100644 index 000000000..88842cacf --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go @@ -0,0 +1,112 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "fmt" + + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +// RoundBasedConfig is based config for rounds and provide boundary checking +// for rounds. +type RoundBasedConfig struct { + roundID uint64 + roundBeginHeight uint64 + roundEndHeight uint64 + roundLength uint64 +} + +// SetupRoundBasedFields setup round based fields, including round ID, the +// length of rounds. +func (c *RoundBasedConfig) SetupRoundBasedFields( + roundID uint64, cfg *types.Config) { + if c.roundLength > 0 { + panic(fmt.Errorf("duplicated set round based fields: %d", + c.roundLength)) + } + c.roundID = roundID + c.roundLength = cfg.RoundLength +} + +// SetRoundBeginHeight gives the beginning height for the initial round provided +// when constructed. +func (c *RoundBasedConfig) SetRoundBeginHeight(begin uint64) { + if c.roundBeginHeight != 0 { + panic(fmt.Errorf("duplicated set round begin height: %d", + c.roundBeginHeight)) + } + c.roundBeginHeight = begin + c.roundEndHeight = begin + c.roundLength +} + +// IsLastBlock checks if a block is the last block of this round. +func (c *RoundBasedConfig) IsLastBlock(b *types.Block) bool { + if b.Position.Round != c.roundID { + panic(fmt.Errorf("attempt to compare by different round: %s, %d", + b, c.roundID)) + } + return b.Position.Height+1 == c.roundEndHeight +} + +// ExtendLength extends round ending height by the length of current round. +func (c *RoundBasedConfig) ExtendLength() { + c.roundEndHeight += c.roundLength +} + +// Contains checks if a block height is in this round. +func (c *RoundBasedConfig) Contains(h uint64) bool { + return c.roundBeginHeight <= h && c.roundEndHeight > h +} + +// RoundID returns the round ID of this config. +func (c *RoundBasedConfig) RoundID() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundID +} + +// RoundEndHeight returns next checkpoint to varify if this round is ended. +func (c *RoundBasedConfig) RoundEndHeight() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundEndHeight +} + +// AppendTo a config from previous round. +func (c *RoundBasedConfig) AppendTo(other RoundBasedConfig) { + if c.roundID != other.roundID+1 { + panic(fmt.Errorf("round IDs of configs not continuous: %d %d", + c.roundID, other.roundID)) + } + c.SetRoundBeginHeight(other.roundEndHeight) +} + +// LastPeriodBeginHeight returns the begin height of last period. For example, +// if a round is extended twice, then the return from this method is: +// +// begin + 2 * roundLength - roundLength +// +func (c *RoundBasedConfig) LastPeriodBeginHeight() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundEndHeight - c.roundLength +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go new file mode 100644 index 000000000..4f4b04542 --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go @@ -0,0 +1,358 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "context" + "fmt" + "sync" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +// ErrUnmatchedBlockHeightWithConfig is for invalid parameters for NewRoundEvent. +type ErrUnmatchedBlockHeightWithConfig struct { + round uint64 + reset uint64 + blockHeight uint64 +} + +func (e ErrUnmatchedBlockHeightWithConfig) Error() string { + return fmt.Sprintf("unsynced block height and cfg: round:%d reset:%d h:%d", + e.round, e.reset, e.blockHeight) +} + +// RoundEventParam defines the parameters passed to event handlers of +// RoundEvent. +type RoundEventParam struct { + // 'Round' of next checkpoint, might be identical to previous checkpoint. + Round uint64 + // the count of reset DKG for 'Round+1'. + Reset uint64 + // the begin block height of this event, the end block height of this event + // would be BeginHeight + config.RoundLength. + BeginHeight uint64 + // The configuration for 'Round'. + Config *types.Config + // The CRS for 'Round'. + CRS common.Hash +} + +// NextRoundValidationHeight returns the height to check if the next round is +// ready. +func (e RoundEventParam) NextRoundValidationHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*9/10 +} + +// NextCRSProposingHeight returns the height to propose CRS for next round. +func (e RoundEventParam) NextCRSProposingHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// NextDKGPreparationHeight returns the height to prepare DKG set for next +// round. +func (e RoundEventParam) NextDKGPreparationHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*2/3 +} + +// NextRoundHeight returns the height of the beginning of next round. +func (e RoundEventParam) NextRoundHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength +} + +// NextTouchNodeSetCacheHeight returns the height to touch the node set cache. +func (e RoundEventParam) NextTouchNodeSetCacheHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// NextDKGResetHeight returns the height to reset DKG for next period. +func (e RoundEventParam) NextDKGResetHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*85/100 +} + +// NextDKGRegisterHeight returns the height to register DKG. +func (e RoundEventParam) NextDKGRegisterHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// RoundEndHeight returns the round ending height of this round event. +func (e RoundEventParam) RoundEndHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength +} + +func (e RoundEventParam) String() string { + return fmt.Sprintf("roundEvtParam{Round:%d Reset:%d Height:%d}", + e.Round, + e.Reset, + e.BeginHeight) +} + +// roundEventFn defines the fingerprint of handlers of round events. +type roundEventFn func([]RoundEventParam) + +// governanceAccessor is a subset of core.Governance to break the dependency +// between core and utils package. +type governanceAccessor interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool + + // IsDKGSuccess checks if DKG is success. + IsDKGSuccess(round uint64) bool + + // DKGResetCount returns the reset count for DKG of given round. + DKGResetCount(round uint64) uint64 + + // Get the begin height of a round. + GetRoundHeight(round uint64) uint64 +} + +// RoundEventRetryHandlerGenerator generates a handler to common.Event, which +// would register itself to retry next round validation if round event is not +// triggered. +func RoundEventRetryHandlerGenerator( + rEvt *RoundEvent, hEvt *common.Event) func(uint64) { + var hEvtHandler func(uint64) + hEvtHandler = func(h uint64) { + if rEvt.ValidateNextRound(h) == 0 { + // Retry until at least one round event is triggered. + hEvt.RegisterHeight(h+1, hEvtHandler) + } + } + return hEvtHandler +} + +// RoundEvent would be triggered when either: +// - the next DKG set setup is ready. +// - the next DKG set setup is failed, and previous DKG set already reset the +// CRS. +type RoundEvent struct { + gov governanceAccessor + logger common.Logger + lock sync.Mutex + handlers []roundEventFn + config RoundBasedConfig + lastTriggeredRound uint64 + lastTriggeredResetCount uint64 + roundShift uint64 + gpkInvalid bool + ctx context.Context + ctxCancel context.CancelFunc +} + +// NewRoundEvent creates an RoundEvent instance. +func NewRoundEvent(parentCtx context.Context, gov governanceAccessor, + logger common.Logger, initPos types.Position, roundShift uint64) ( + *RoundEvent, error) { + // We need to generate valid ending block height of this round (taken + // DKG reset count into consideration). + logger.Info("new RoundEvent", "position", initPos, "shift", roundShift) + initConfig := GetConfigWithPanic(gov, initPos.Round, logger) + e := &RoundEvent{ + gov: gov, + logger: logger, + lastTriggeredRound: initPos.Round, + roundShift: roundShift, + } + e.ctx, e.ctxCancel = context.WithCancel(parentCtx) + e.config = RoundBasedConfig{} + e.config.SetupRoundBasedFields(initPos.Round, initConfig) + e.config.SetRoundBeginHeight(GetRoundHeight(gov, initPos.Round)) + // Make sure the DKG reset count in current governance can cover the initial + // block height. + if initPos.Height >= types.GenesisHeight { + resetCount := gov.DKGResetCount(initPos.Round + 1) + remains := resetCount + for ; remains > 0 && !e.config.Contains(initPos.Height); remains-- { + e.config.ExtendLength() + } + if !e.config.Contains(initPos.Height) { + return nil, ErrUnmatchedBlockHeightWithConfig{ + round: initPos.Round, + reset: resetCount, + blockHeight: initPos.Height, + } + } + e.lastTriggeredResetCount = resetCount - remains + } + return e, nil +} + +// Register a handler to be called when new round is confirmed or new DKG reset +// is detected. +// +// The earlier registered handler has higher priority. +func (e *RoundEvent) Register(h roundEventFn) { + e.lock.Lock() + defer e.lock.Unlock() + e.handlers = append(e.handlers, h) +} + +// TriggerInitEvent triggers event from the initial setting. +func (e *RoundEvent) TriggerInitEvent() { + e.lock.Lock() + defer e.lock.Unlock() + events := []RoundEventParam{RoundEventParam{ + Round: e.lastTriggeredRound, + Reset: e.lastTriggeredResetCount, + BeginHeight: e.config.LastPeriodBeginHeight(), + CRS: GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger), + Config: GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger), + }} + for _, h := range e.handlers { + h(events) + } +} + +// ValidateNextRound validate if the DKG set for next round is ready to go or +// failed to setup, all registered handlers would be called once some decision +// is made on chain. +// +// The count of triggered events would be returned. +func (e *RoundEvent) ValidateNextRound(blockHeight uint64) (count uint) { + // To make triggers continuous and sequential, the next validation should + // wait for previous one finishing. That's why I use mutex here directly. + var events []RoundEventParam + e.lock.Lock() + defer e.lock.Unlock() + e.logger.Trace("ValidateNextRound", + "height", blockHeight, + "round", e.lastTriggeredRound, + "count", e.lastTriggeredResetCount) + defer func() { + count = uint(len(events)) + if count == 0 { + return + } + for _, h := range e.handlers { + // To make sure all handlers receive triggers sequentially, we can't + // raise go routines here. + h(events) + } + }() + var ( + triggered bool + param RoundEventParam + beginHeight = blockHeight + startRound = e.lastTriggeredRound + ) + for { + param, triggered = e.check(beginHeight, startRound) + if !triggered { + break + } + events = append(events, param) + beginHeight = param.BeginHeight + } + return +} + +func (e *RoundEvent) check(blockHeight, startRound uint64) ( + param RoundEventParam, triggered bool) { + defer func() { + if !triggered { + return + } + // A simple assertion to make sure we didn't pick the wrong round. + if e.config.RoundID() != e.lastTriggeredRound { + panic(fmt.Errorf("Triggered round not matched: %d, %d", + e.config.RoundID(), e.lastTriggeredRound)) + } + param.Round = e.lastTriggeredRound + param.Reset = e.lastTriggeredResetCount + param.BeginHeight = e.config.LastPeriodBeginHeight() + param.CRS = GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger) + param.Config = GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger) + e.logger.Info("New RoundEvent triggered", + "round", e.lastTriggeredRound, + "reset", e.lastTriggeredResetCount, + "begin-height", e.config.LastPeriodBeginHeight(), + "crs", param.CRS.String()[:6], + ) + }() + nextRound := e.lastTriggeredRound + 1 + if nextRound >= startRound+e.roundShift { + // Avoid access configuration newer than last confirmed one over + // 'roundShift' rounds. Fullnode might crash if we access it before it + // knows. + return + } + nextCfg := GetConfigWithPanic(e.gov, nextRound, e.logger) + resetCount := e.gov.DKGResetCount(nextRound) + if resetCount > e.lastTriggeredResetCount { + e.lastTriggeredResetCount++ + e.config.ExtendLength() + e.gpkInvalid = false + triggered = true + return + } + if e.gpkInvalid { + // We know that DKG already failed, now wait for the DKG set from + // previous round to reset DKG and don't have to reconstruct the + // group public key again. + return + } + if nextRound >= dkgDelayRound { + var ok bool + ok, e.gpkInvalid = IsDKGValid( + e.gov, e.logger, nextRound, e.lastTriggeredResetCount) + if !ok { + return + } + } + // The DKG set for next round is well prepared. + e.lastTriggeredRound = nextRound + e.lastTriggeredResetCount = 0 + e.gpkInvalid = false + rCfg := RoundBasedConfig{} + rCfg.SetupRoundBasedFields(nextRound, nextCfg) + rCfg.AppendTo(e.config) + e.config = rCfg + triggered = true + return +} + +// Stop the event source and block until last trigger returns. +func (e *RoundEvent) Stop() { + e.ctxCancel() +} + +// LastPeriod returns block height related info of the last period, including +// begin height and round length. +func (e *RoundEvent) LastPeriod() (begin uint64, length uint64) { + e.lock.Lock() + defer e.lock.Unlock() + begin = e.config.LastPeriodBeginHeight() + length = e.config.RoundEndHeight() - e.config.LastPeriodBeginHeight() + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go new file mode 100644 index 000000000..9128e264c --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go @@ -0,0 +1,154 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "errors" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/crypto" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +// Errors for signer. +var ( + ErrInvalidProposerID = errors.New("invalid proposer id") + ErrIncorrectHash = errors.New("hash of block is incorrect") + ErrIncorrectSignature = errors.New("signature of block is incorrect") + ErrNoBLSSigner = errors.New("bls signer not set") +) + +type blsSigner func(round uint64, hash common.Hash) (crypto.Signature, error) + +// Signer signs a segment of data. +type Signer struct { + prvKey crypto.PrivateKey + pubKey crypto.PublicKey + proposerID types.NodeID + blsSign blsSigner +} + +// NewSigner constructs an Signer instance. +func NewSigner(prvKey crypto.PrivateKey) (s *Signer) { + s = &Signer{ + prvKey: prvKey, + pubKey: prvKey.PublicKey(), + } + s.proposerID = types.NewNodeID(s.pubKey) + return +} + +// SetBLSSigner for signing CRSSignature +func (s *Signer) SetBLSSigner(signer blsSigner) { + s.blsSign = signer +} + +// SignBlock signs a types.Block. +func (s *Signer) SignBlock(b *types.Block) (err error) { + b.ProposerID = s.proposerID + b.PayloadHash = crypto.Keccak256Hash(b.Payload) + if b.Hash, err = HashBlock(b); err != nil { + return + } + if b.Signature, err = s.prvKey.Sign(b.Hash); err != nil { + return + } + return +} + +// SignVote signs a types.Vote. +func (s *Signer) SignVote(v *types.Vote) (err error) { + v.ProposerID = s.proposerID + v.Signature, err = s.prvKey.Sign(HashVote(v)) + return +} + +// SignCRS signs CRS signature of types.Block. +func (s *Signer) SignCRS(b *types.Block, crs common.Hash) (err error) { + if b.ProposerID != s.proposerID { + err = ErrInvalidProposerID + return + } + if b.Position.Round < dkgDelayRound { + hash := hashCRS(b, crs) + b.CRSSignature = crypto.Signature{ + Type: "bls", + Signature: hash[:], + } + return + } + if s.blsSign == nil { + err = ErrNoBLSSigner + return + } + b.CRSSignature, err = s.blsSign(b.Position.Round, hashCRS(b, crs)) + return +} + +// SignDKGComplaint signs a DKG complaint. +func (s *Signer) SignDKGComplaint(complaint *typesDKG.Complaint) (err error) { + complaint.ProposerID = s.proposerID + complaint.Signature, err = s.prvKey.Sign(hashDKGComplaint(complaint)) + return +} + +// SignDKGMasterPublicKey signs a DKG master public key. +func (s *Signer) SignDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) (err error) { + mpk.ProposerID = s.proposerID + mpk.Signature, err = s.prvKey.Sign(hashDKGMasterPublicKey(mpk)) + return +} + +// SignDKGPrivateShare signs a DKG private share. +func (s *Signer) SignDKGPrivateShare( + prvShare *typesDKG.PrivateShare) (err error) { + prvShare.ProposerID = s.proposerID + prvShare.Signature, err = s.prvKey.Sign(hashDKGPrivateShare(prvShare)) + return +} + +// SignDKGPartialSignature signs a DKG partial signature. +func (s *Signer) SignDKGPartialSignature( + pSig *typesDKG.PartialSignature) (err error) { + pSig.ProposerID = s.proposerID + pSig.Signature, err = s.prvKey.Sign(hashDKGPartialSignature(pSig)) + return +} + +// SignDKGMPKReady signs a DKG ready message. +func (s *Signer) SignDKGMPKReady(ready *typesDKG.MPKReady) (err error) { + ready.ProposerID = s.proposerID + ready.Signature, err = s.prvKey.Sign(hashDKGMPKReady(ready)) + return +} + +// SignDKGFinalize signs a DKG finalize message. +func (s *Signer) SignDKGFinalize(final *typesDKG.Finalize) (err error) { + final.ProposerID = s.proposerID + final.Signature, err = s.prvKey.Sign(hashDKGFinalize(final)) + return +} + +// SignDKGSuccess signs a DKG success message. +func (s *Signer) SignDKGSuccess(success *typesDKG.Success) (err error) { + success.ProposerID = s.proposerID + success.Signature, err = s.prvKey.Sign(hashDKGSuccess(success)) + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go new file mode 100644 index 000000000..6ff5bb62f --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go @@ -0,0 +1,207 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "context" + "fmt" + + "github.com/byzantine-lab/dexon-consensus/common" + "github.com/byzantine-lab/dexon-consensus/core/types" + typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg" +) + +var dkgDelayRound uint64 + +// SetDKGDelayRound sets the variable. +func SetDKGDelayRound(delay uint64) { + dkgDelayRound = delay +} + +type configAccessor interface { + Configuration(round uint64) *types.Config +} + +// GetConfigWithPanic is a helper to access configs, and panic when config for +// that round is not ready yet. +func GetConfigWithPanic(accessor configAccessor, round uint64, + logger common.Logger) *types.Config { + if logger != nil { + logger.Debug("Calling Governance.Configuration", "round", round) + } + c := accessor.Configuration(round) + if c == nil { + panic(fmt.Errorf("configuration is not ready %v", round)) + } + return c +} + +type crsAccessor interface { + CRS(round uint64) common.Hash +} + +// GetCRSWithPanic is a helper to access CRS, and panic when CRS for that +// round is not ready yet. +func GetCRSWithPanic(accessor crsAccessor, round uint64, + logger common.Logger) common.Hash { + if logger != nil { + logger.Debug("Calling Governance.CRS", "round", round) + } + crs := accessor.CRS(round) + if (crs == common.Hash{}) { + panic(fmt.Errorf("CRS is not ready %v", round)) + } + return crs +} + +// VerifyDKGComplaint verifies if its a valid DKGCompliant. +func VerifyDKGComplaint( + complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { + ok, err := VerifyDKGComplaintSignature(complaint) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + if complaint.IsNack() { + return true, nil + } + if complaint.Round != mpk.Round { + return false, nil + } + ok, err = VerifyDKGMasterPublicKeySignature(mpk) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = mpk.PublicKeyShares.VerifyPrvShare( + typesDKG.NewID(complaint.PrivateShare.ReceiverID), + &complaint.PrivateShare.PrivateShare) + if err != nil { + return false, err + } + return !ok, nil +} + +// LaunchDummyReceiver launches a go routine to receive from the receive +// channel of a network module. An context is required to stop the go routine +// automatically. An optinal message handler could be provided. +func LaunchDummyReceiver( + ctx context.Context, recv <-chan types.Msg, handler func(types.Msg)) ( + context.CancelFunc, <-chan struct{}) { + var ( + dummyCtx, dummyCancel = context.WithCancel(ctx) + finishedChan = make(chan struct{}, 1) + ) + go func() { + defer func() { + finishedChan <- struct{}{} + }() + loop: + for { + select { + case <-dummyCtx.Done(): + break loop + case v, ok := <-recv: + if !ok { + panic(fmt.Errorf( + "receive channel is closed before dummy receiver")) + } + if handler != nil { + handler(v) + } + } + } + }() + return dummyCancel, finishedChan +} + +// GetDKGThreshold return expected threshold for given DKG set size. +func GetDKGThreshold(config *types.Config) int { + return int(config.NotarySetSize*2/3) + 1 +} + +// GetDKGValidThreshold return threshold for DKG set to considered valid. +func GetDKGValidThreshold(config *types.Config) int { + return int(config.NotarySetSize * 5 / 6) +} + +// GetBAThreshold return threshold for BA votes. +func GetBAThreshold(config *types.Config) int { + return int(config.NotarySetSize*2/3 + 1) +} + +// GetNextRoundValidationHeight returns the block height to check if the next +// round is ready. +func GetNextRoundValidationHeight(begin, length uint64) uint64 { + return begin + length*9/10 +} + +// GetRoundHeight wraps the workaround for the round height logic in fullnode. +func GetRoundHeight(accessor interface{}, round uint64) uint64 { + type roundHeightAccessor interface { + GetRoundHeight(round uint64) uint64 + } + accessorInst := accessor.(roundHeightAccessor) + height := accessorInst.GetRoundHeight(round) + if round == 0 && height < types.GenesisHeight { + return types.GenesisHeight + } + return height +} + +// IsDKGValid check if DKG is correctly prepared. +func IsDKGValid( + gov governanceAccessor, logger common.Logger, round, reset uint64) ( + valid bool, gpkInvalid bool) { + if !gov.IsDKGFinal(round) { + logger.Debug("DKG is not final", "round", round, "reset", reset) + return + } + if !gov.IsDKGSuccess(round) { + logger.Debug("DKG is not successful", "round", round, "reset", reset) + return + } + cfg := GetConfigWithPanic(gov, round, logger) + gpk, err := typesDKG.NewGroupPublicKey( + round, + gov.DKGMasterPublicKeys(round), + gov.DKGComplaints(round), + GetDKGThreshold(cfg)) + if err != nil { + logger.Debug("Group public key setup failed", + "round", round, + "reset", reset, + "error", err) + gpkInvalid = true + return + } + if len(gpk.QualifyNodeIDs) < GetDKGValidThreshold(cfg) { + logger.Debug("Group public key threshold not reach", + "round", round, + "reset", reset, + "qualified", len(gpk.QualifyNodeIDs)) + gpkInvalid = true + return + } + valid = true + return +} diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go new file mode 100644 index 000000000..556c2489a --- /dev/null +++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go @@ -0,0 +1,72 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// . + +package utils + +import ( + "github.com/byzantine-lab/dexon-consensus/core/types" +) + +// VoteFilter filters votes that are useless for now. +// To maximize performance, this structure is not thread-safe and will never be. +type VoteFilter struct { + Voted map[types.VoteHeader]struct{} + Position types.Position + LockIter uint64 + Period uint64 + Confirm bool +} + +// NewVoteFilter creates a new vote filter instance. +func NewVoteFilter() *VoteFilter { + return &VoteFilter{ + Voted: make(map[types.VoteHeader]struct{}), + } +} + +// Filter checks if the vote should be filtered out. +func (vf *VoteFilter) Filter(vote *types.Vote) bool { + if vote.Type == types.VoteInit { + return true + } + if vote.Position.Older(vf.Position) { + return true + } else if vote.Position.Newer(vf.Position) { + // It's impossible to check the vote of other height. + return false + } + if vf.Confirm { + return true + } + if vote.Type == types.VotePreCom && vote.Period < vf.LockIter { + return true + } + if vote.Type == types.VoteCom && + vote.Period < vf.Period && + vote.BlockHash == types.SkipBlockHash { + return true + } + if _, exist := vf.Voted[vote.VoteHeader]; exist { + return true + } + return false +} + +// AddVote to the filter so the same vote will be filtered. +func (vf *VoteFilter) AddVote(vote *types.Vote) { + vf.Voted[vote.VoteHeader] = struct{}{} +} diff --git a/vendor/github.com/byzantine-lab/mcl/.gitignore b/vendor/github.com/byzantine-lab/mcl/.gitignore new file mode 100644 index 000000000..f5edb3706 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/.gitignore @@ -0,0 +1,13 @@ +CVS +java/*_wrap.cxx +lib/*.so +lib/*.a +*.class +GPATH +GRTAGS +GTAGS +*.o +*.d +*.exe +*.swp +.cvsignore diff --git a/vendor/github.com/byzantine-lab/mcl/.travis.yml b/vendor/github.com/byzantine-lab/mcl/.travis.yml new file mode 100644 index 000000000..73a97e6aa --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/.travis.yml @@ -0,0 +1,17 @@ +sudo: true +dist: trusty +language: cpp +compiler: + - gcc + - clang +addons: + apt: + packages: + - libgmp-dev +script: + - make test_ci DEBUG=1 -j3 + - make clean + - make test_ci CFLAGS_USER=-DMCL_DONT_USE_XBYAK -j3 + - make clean + - make test_go + diff --git a/vendor/github.com/byzantine-lab/mcl/CMakeLists.txt b/vendor/github.com/byzantine-lab/mcl/CMakeLists.txt new file mode 100644 index 000000000..aaa0a8cf2 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/CMakeLists.txt @@ -0,0 +1,119 @@ +cmake_minimum_required (VERSION 2.6) +project(mcl CXX ASM) +set(SRCS src/fp.cpp) + +option( + MCL_MAX_BIT_SIZE + "max bit size for Fp" + 0 +) +option( + DOWNLOAD_SOURCE + "download cybozulib_ext" + OFF +) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +if(MSVC) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} /MT /W4 /Oy /Ox /EHsc /GS- /Zi /DNDEBUG /DNOMINMAX") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} /MTd /W4 /DNOMINMAX") + link_directories(${CMAKE_SOURCE_DIR}/../cybozulib_ext/lib) + link_directories(${CMAKE_SOURCE_DIR}/lib) +else() + if("${CFLAGS_OPT_USER}" STREQUAL "") + set(CFLAGS_OPT_USER "-O3 -DNDEBUG -march=native") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith ${CFLAGS_OPT_USER}") + + if(${MCL_MAX_BIT_SIZE} GREATER 0) + add_definitions(-DMCL_MAX_BIT_SIZE=${MCL_MAX_BIT_SIZE}) + endif() + + if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/aarch64.s) + set(CPU arch64) + elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm") + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/arm.s) + set(CPU arm) + elseif(APPLE) + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/x86-64mac.s src/asm/x86-64mac.bmi2.s) + set(CPU x86-64) + elseif(UNIX) + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/x86-64.s src/asm/x86-64.bmi2.s) + set(CPU x86-64) + endif() + set(LIBS mcl gmp gmpxx crypto) +endif() + +if(DOWNLOAD_SOURCE) + if(MSVC) + set(CYBOZULIB_EXT_TAG release20170521) + set(FILES config.h gmp-impl.h gmp-mparam.h gmp.h gmpxx.h longlong.h mpir.h mpirxx.h) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/${file}) + message("download cybozulib_ext/" ${file}) + endforeach() + set(FILES aes.h applink.c asn1.h asn1_mac.h asn1t.h bio.h blowfish.h bn.h buffer.h camellia.h cast.h cmac.h cms.h comp.h conf.h conf_api.h crypto.h des.h des_old.h dh.h dsa.h dso.h dtls1.h e_os2.h ebcdic.h ec.h ecdh.h ecdsa.h engine.h err.h evp.h hmac.h idea.h krb5_asn.h kssl.h lhash.h md4.h md5.h mdc2.h modes.h obj_mac.h objects.h ocsp.h opensslconf.h opensslv.h ossl_typ.h pem.h pem2.h pkcs12.h pkcs7.h pqueue.h rand.h rc2.h rc4.h ripemd.h rsa.h safestack.h seed.h sha.h srp.h srtp.h ssl.h ssl2.h ssl23.h ssl3.h stack.h symhacks.h tls1.h ts.h txt_db.h ui.h ui_compat.h whrlpool.h x509.h x509_vfy.h x509v3.h) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/openssl/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/openssl/${file}) + message("download cybozulib_ext/openssl/" ${file}) + endforeach() + set(FILES mpir.lib mpirxx.lib mpirxx.pdb ssleay32.lib libeay32.lib mpir.pdb) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/lib/mt/14/${file} ${mcl_SOURCE_DIR}/lib/mt/14/${file}) + message("download lib/mt/14/" ${file}) + endforeach() + if(MSVC) + include_directories( + ${mcl_SOURCE_DIR}/include/cybozulib_ext + ) + endif() + endif() +else() + if(MSVC) + include_directories( + ${mcl_SOURCE_DIR}/../cybozulib_ext/include + ) + endif() +endif() + +include_directories( + ${mcl_SOURCE_DIR}/include +) + +add_library(mcl STATIC ${SRCS}) +add_library(mcl_dy SHARED ${SRCS}) +target_link_libraries(mcl_dy ${LIBS}) +set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl) +#set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl VERSION 1.0.0 SOVERSION 1) +# For semantics of ABI compatibility including when you must bump SOVERSION, see: +# https://community.kde.org/Policies/Binary_Compatibility_Issues_With_C%2B%2B#The_Do.27s_and_Don.27ts + +file(GLOB MCL_HEADERS include/mcl/*.hpp include/mcl/bn.h include/mcl/curve_type.h) +file(GLOB CYBOZULIB_HEADERS include/cybozu/*.hpp) + +install(TARGETS mcl DESTINATION lib) +install(TARGETS mcl_dy DESTINATION lib) +install(FILES ${MCL_HEADERS} DESTINATION include/mcl) +install(FILES include/mcl/impl/bn_c_impl.hpp DESTINATION include/mcl/impl) +install(FILES ${CYBOZULIB_HEADERS} DESTINATION include/cybozu) + +set(TEST_BASE fp_test ec_test fp_util_test window_method_test elgamal_test fp_tower_test gmp_test bn_test glv_test) +#set(TEST_BASE bn_test) +foreach(base IN ITEMS ${TEST_BASE}) + add_executable( + ${base} + test/${base}.cpp + ) + target_link_libraries( + ${base} + ${LIBS} + ) +endforeach() diff --git a/vendor/github.com/byzantine-lab/mcl/COPYRIGHT b/vendor/github.com/byzantine-lab/mcl/COPYRIGHT new file mode 100644 index 000000000..90e49b4bc --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/COPYRIGHT @@ -0,0 +1,47 @@ + +Copyright (c) 2015 MITSUNARI Shigeo +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. +Neither the name of the copyright owner nor the names of its contributors may +be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. +----------------------------------------------------------------------------- +ソースコード形å¼ã‹ãƒã‚¤ãƒŠãƒªå½¢å¼ã‹ã€å¤‰æ›´ã™ã‚‹ã‹ã—ãªã„ã‹ã‚’å•ã‚ãšã€ä»¥ä¸‹ã®æ¡ä»¶ã‚’満㟠+ã™å ´åˆã«é™ã‚Šã€å†é ’布ãŠã‚ˆã³ä½¿ç”¨ãŒè¨±å¯ã•ã‚Œã¾ã™ã€‚ + +ソースコードをå†é ’布ã™ã‚‹å ´åˆã€ä¸Šè¨˜ã®è‘—作権表示ã€æœ¬æ¡ä»¶ä¸€è¦§ã€ãŠã‚ˆã³ä¸‹è¨˜å…責æ¡é … +ã‚’å«ã‚ã‚‹ã“ã¨ã€‚ +ãƒã‚¤ãƒŠãƒªå½¢å¼ã§å†é ’布ã™ã‚‹å ´åˆã€é ’布物ã«ä»˜å±žã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆç­‰ã®è³‡æ–™ã«ã€ä¸Šè¨˜ã®è‘—作 +権表示ã€æœ¬æ¡ä»¶ä¸€è¦§ã€ãŠã‚ˆã³ä¸‹è¨˜å…責æ¡é …ã‚’å«ã‚ã‚‹ã“ã¨ã€‚ +書é¢ã«ã‚ˆã‚‹ç‰¹åˆ¥ã®è¨±å¯ãªã—ã«ã€æœ¬ã‚½ãƒ•ãƒˆã‚¦ã‚§ã‚¢ã‹ã‚‰æ´¾ç”Ÿã—ãŸè£½å“ã®å®£ä¼ã¾ãŸã¯è²©å£²ä¿ƒé€² +ã«ã€è‘—作権者ã®åå‰ã¾ãŸã¯ã‚³ãƒ³ãƒˆãƒªãƒ“ューターã®åå‰ã‚’使用ã—ã¦ã¯ãªã‚‰ãªã„。 +本ソフトウェアã¯ã€è‘—作権者ãŠã‚ˆã³ã‚³ãƒ³ãƒˆãƒªãƒ“ューターã«ã‚ˆã£ã¦ã€Œç¾çŠ¶ã®ã¾ã¾ã€æ供㕠+ã‚Œã¦ãŠã‚Šã€æ˜Žç¤ºé»™ç¤ºã‚’å•ã‚ãšã€å•†æ¥­çš„ãªä½¿ç”¨å¯èƒ½æ€§ã€ãŠã‚ˆã³ç‰¹å®šã®ç›®çš„ã«å¯¾ã™ã‚‹é©åˆæ€§ +ã«é–¢ã™ã‚‹æš—é»™ã®ä¿è¨¼ã‚‚å«ã‚ã€ã¾ãŸãã‚Œã«é™å®šã•ã‚Œãªã„ã€ã„ã‹ãªã‚‹ä¿è¨¼ã‚‚ã‚ã‚Šã¾ã›ã‚“。 +著作権者もコントリビューターもã€äº‹ç”±ã®ã„ã‹ã‚“ã‚’å•ã‚ãšã€ æ害発生ã®åŽŸå› ã„ã‹ã‚“ã‚’ +å•ã‚ãšã€ã‹ã¤è²¬ä»»ã®æ ¹æ‹ ãŒå¥‘ç´„ã§ã‚ã‚‹ã‹åŽ³æ ¼è²¬ä»»ã§ã‚ã‚‹ã‹ï¼ˆéŽå¤±ãã®ä»–ã®ï¼‰ä¸æ³•è¡Œç‚ºã§ +ã‚ã‚‹ã‹ã‚’å•ã‚ãšã€ä»®ã«ãã®ã‚ˆã†ãªæ害ãŒç™ºç”Ÿã™ã‚‹å¯èƒ½æ€§ã‚’知らã•ã‚Œã¦ã„ãŸã¨ã—ã¦ã‚‚〠+本ソフトウェアã®ä½¿ç”¨ã«ã‚ˆã£ã¦ç™ºç”Ÿã—ãŸï¼ˆä»£æ›¿å“ã¾ãŸã¯ä»£ç”¨ã‚µãƒ¼ãƒ“スã®èª¿é”ã€ä½¿ç”¨ã® +喪失ã€ãƒ‡ãƒ¼ã‚¿ã®å–ªå¤±ã€åˆ©ç›Šã®å–ªå¤±ã€æ¥­å‹™ã®ä¸­æ–­ã‚‚å«ã‚ã€ã¾ãŸãã‚Œã«é™å®šã•ã‚Œãªã„)直接 +æ害ã€é–“接æ害ã€å¶ç™ºçš„ãªæ害ã€ç‰¹åˆ¥æ害ã€æ‡²ç½°çš„æ害ã€ã¾ãŸã¯çµæžœæ害ã«ã¤ã„ã¦ã€ +一切責任を負ã‚ãªã„ã‚‚ã®ã¨ã—ã¾ã™ã€‚ diff --git a/vendor/github.com/byzantine-lab/mcl/Makefile b/vendor/github.com/byzantine-lab/mcl/Makefile new file mode 100644 index 000000000..7df1dd300 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/Makefile @@ -0,0 +1,373 @@ +include common.mk +LIB_DIR=lib +OBJ_DIR=obj +EXE_DIR=bin +SRC_SRC=fp.cpp bn_c256.cpp bn_c384.cpp bn_c512.cpp she_c256.cpp +TEST_SRC=fp_test.cpp ec_test.cpp fp_util_test.cpp window_method_test.cpp elgamal_test.cpp fp_tower_test.cpp gmp_test.cpp bn_test.cpp bn384_test.cpp glv_test.cpp paillier_test.cpp she_test.cpp vint_test.cpp bn512_test.cpp ecdsa_test.cpp conversion_test.cpp +TEST_SRC+=bn_c256_test.cpp bn_c384_test.cpp bn_c384_256_test.cpp bn_c512_test.cpp she_c256_test.cpp she_c384_test.cpp +TEST_SRC+=aggregate_sig_test.cpp array_test.cpp +TEST_SRC+=bls12_test.cpp +TEST_SRC+=ecdsa_c_test.cpp +TEST_SRC+=modp_test.cpp +ifeq ($(CPU),x86-64) + MCL_USE_XBYAK?=1 + TEST_SRC+=mont_fp_test.cpp sq_test.cpp + ifeq ($(USE_LOW_ASM),1) + TEST_SRC+=low_test.cpp + endif + ifeq ($(MCL_USE_XBYAK),1) + TEST_SRC+=fp_generator_test.cpp + endif +endif +SAMPLE_SRC=bench.cpp ecdh.cpp random.cpp rawbench.cpp vote.cpp pairing.cpp large.cpp tri-dh.cpp bls_sig.cpp pairing_c.c she_smpl.cpp + +ifneq ($(MCL_MAX_BIT_SIZE),) + CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) +endif +ifeq ($(MCL_USE_XBYAK),0) + CFLAGS+=-DMCL_DONT_USE_XBYAK +endif +################################################################## +MCL_LIB=$(LIB_DIR)/libmcl.a +MCL_SNAME=mcl +BN256_SNAME=mclbn256 +BN384_SNAME=mclbn384 +BN384_256_SNAME=mclbn384_256 +BN512_SNAME=mclbn512 +SHE256_SNAME=mclshe256 +MCL_SLIB=$(LIB_DIR)/lib$(MCL_SNAME).$(LIB_SUF) +BN256_LIB=$(LIB_DIR)/libmclbn256.a +BN256_SLIB=$(LIB_DIR)/lib$(BN256_SNAME).$(LIB_SUF) +BN384_LIB=$(LIB_DIR)/libmclbn384.a +BN384_SLIB=$(LIB_DIR)/lib$(BN384_SNAME).$(LIB_SUF) +BN384_256_LIB=$(LIB_DIR)/libmclbn384_256.a +BN384_256_SLIB=$(LIB_DIR)/lib$(BN384_256_SNAME).$(LIB_SUF) +BN512_LIB=$(LIB_DIR)/libmclbn512.a +BN512_SLIB=$(LIB_DIR)/lib$(BN512_SNAME).$(LIB_SUF) +SHE256_LIB=$(LIB_DIR)/libmclshe256.a +SHE256_SLIB=$(LIB_DIR)/lib$(SHE256_SNAME).$(LIB_SUF) +SHE384_LIB=$(LIB_DIR)/libmclshe384.a +ECDSA_LIB=$(LIB_DIR)/libmclecdsa.a +all: $(MCL_LIB) $(MCL_SLIB) $(BN256_LIB) $(BN256_SLIB) $(BN384_LIB) $(BN384_SLIB) $(BN384_256_LIB) $(BN384_256_SLIB) $(BN512_LIB) $(BN512_SLIB) $(SHE256_LIB) $(SHE256_SLIB) $(SHE384_lib) $(ECDSA_LIB) + +#LLVM_VER=-3.8 +LLVM_LLC=llc$(LLVM_VER) +LLVM_OPT=opt$(LLVM_VER) +LLVM_OPT_VERSION=$(shell $(LLVM_OPT) --version 2>/dev/null | awk '/version/ {print $$3}') +GEN_EXE=src/gen +# incompatibility between llvm 3.4 and the later version +ifneq ($(LLVM_OPT_VERSION),) +ifeq ($(shell expr $(LLVM_OPT_VERSION) \< 3.5.0),1) + GEN_EXE_OPT=-old +endif +endif +ifeq ($(OS),mac) + ASM_SRC_PATH_NAME=src/asm/$(CPU)mac +else + ASM_SRC_PATH_NAME=src/asm/$(CPU) +endif +ifneq ($(CPU),) + ASM_SRC=$(ASM_SRC_PATH_NAME).s +endif +ASM_OBJ=$(OBJ_DIR)/$(CPU).o +LIB_OBJ=$(OBJ_DIR)/fp.o +BN256_OBJ=$(OBJ_DIR)/bn_c256.o +BN384_OBJ=$(OBJ_DIR)/bn_c384.o +BN384_256_OBJ=$(OBJ_DIR)/bn_c384_256.o +BN512_OBJ=$(OBJ_DIR)/bn_c512.o +SHE256_OBJ=$(OBJ_DIR)/she_c256.o +SHE384_OBJ=$(OBJ_DIR)/she_c384.o +ECDSA_OBJ=$(OBJ_DIR)/ecdsa_c.o +FUNC_LIST=src/func.list +ifeq ($(findstring $(OS),mingw64/cygwin),) + MCL_USE_LLVM?=1 +else + MCL_USE_LLVM=0 +endif +ifeq ($(MCL_USE_LLVM),1) + CFLAGS+=-DMCL_USE_LLVM=1 + LIB_OBJ+=$(ASM_OBJ) + # special case for intel with bmi2 + ifeq ($(INTEL),1) + LIB_OBJ+=$(OBJ_DIR)/$(CPU).bmi2.o + endif +endif +LLVM_SRC=src/base$(BIT).ll + +# CPU is used for llvm +# see $(LLVM_LLC) --version +LLVM_FLAGS=-march=$(CPU) -relocation-model=pic #-misched=ilpmax +LLVM_FLAGS+=-pre-RA-sched=list-ilp -max-sched-reorder=128 -mattr=-sse + +#HAS_BMI2=$(shell cat "/proc/cpuinfo" | grep bmi2 >/dev/null && echo "1") +#ifeq ($(HAS_BMI2),1) +# LLVM_FLAGS+=-mattr=bmi2 +#endif + +ifeq ($(USE_LOW_ASM),1) + LOW_ASM_OBJ=$(LOW_ASM_SRC:.asm=.o) + LIB_OBJ+=$(LOW_ASM_OBJ) +endif + +ifeq ($(UPDATE_ASM),1) + ASM_SRC_DEP=$(LLVM_SRC) + ASM_BMI2_SRC_DEP=src/base$(BIT).bmi2.ll +else + ASM_SRC_DEP= + ASM_BMI2_SRC_DEP= +endif + +ifneq ($(findstring $(OS),mac/mingw64),) + BN256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN384_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN384_256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN512_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + SHE256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib +endif +ifeq ($(OS),mingw64) + MCL_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(MCL_SNAME).a + BN256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN256_SNAME).a + BN384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_SNAME).a + BN384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_256_SNAME).a + BN512_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN512_SNAME).a + SHE256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(SHE256_SNAME).a +endif + +$(MCL_LIB): $(LIB_OBJ) + $(AR) $@ $(LIB_OBJ) + +$(MCL_SLIB): $(LIB_OBJ) + $(PRE)$(CXX) -o $@ $(LIB_OBJ) -shared $(LDFLAGS) $(MCL_SLIB_LDFLAGS) + +$(BN256_LIB): $(BN256_OBJ) + $(AR) $@ $(BN256_OBJ) + +$(SHE256_LIB): $(SHE256_OBJ) + $(AR) $@ $(SHE256_OBJ) + +$(SHE256_SLIB): $(SHE256_OBJ) $(MCL_LIB) + $(PRE)$(CXX) -o $@ $(SHE256_OBJ) $(MCL_LIB) -shared $(LDFLAGS) $(SHE256_SLIB_LDFLAGS) + +$(SHE384_LIB): $(SHE384_OBJ) + $(AR) $@ $(SHE384_OBJ) + +$(ECDSA_LIB): $(ECDSA_OBJ) + $(AR) $@ $(ECDSA_OBJ) + +$(BN256_SLIB): $(BN256_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN256_OBJ) -shared $(LDFLAGS) $(BN256_SLIB_LDFLAGS) + +$(BN384_LIB): $(BN384_OBJ) + $(AR) $@ $(BN384_OBJ) + +$(BN384_256_LIB): $(BN384_256_OBJ) + $(AR) $@ $(BN384_256_OBJ) + +$(BN512_LIB): $(BN512_OBJ) + $(AR) $@ $(BN512_OBJ) + +$(BN384_SLIB): $(BN384_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN384_OBJ) -shared $(LDFLAGS) $(BN384_SLIB_LDFLAGS) + +$(BN384_256_SLIB): $(BN384_256_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN384_256_OBJ) -shared $(LDFLAGS) $(BN384_256_SLIB_LDFLAGS) + +$(BN512_SLIB): $(BN512_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN512_OBJ) -shared $(LDFLAGS) $(BN512_SLIB_LDFLAGS) + +$(ASM_OBJ): $(ASM_SRC) + $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) + +$(ASM_SRC): $(ASM_SRC_DEP) + $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) + +$(LLVM_SRC): $(GEN_EXE) $(FUNC_LIST) + $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) > $@ + +$(ASM_SRC_PATH_NAME).bmi2.s: $(ASM_BMI2_SRC_DEP) + $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) -mattr=bmi2 + +$(OBJ_DIR)/$(CPU).bmi2.o: $(ASM_SRC_PATH_NAME).bmi2.s + $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) + +src/base$(BIT).bmi2.ll: $(GEN_EXE) + $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) -s bmi2 > $@ + +src/base64m.ll: $(GEN_EXE) + $(GEN_EXE) $(GEN_EXE_OPT) -wasm > $@ + +$(FUNC_LIST): $(LOW_ASM_SRC) +ifeq ($(USE_LOW_ASM),1) + $(shell awk '/global/ { print $$2}' $(LOW_ASM_SRC) > $(FUNC_LIST)) + $(shell awk '/proc/ { print $$2}' $(LOW_ASM_SRC) >> $(FUNC_LIST)) +else + $(shell touch $(FUNC_LIST)) +endif + +$(GEN_EXE): src/gen.cpp src/llvm_gen.hpp + $(CXX) -o $@ $< $(CFLAGS) + +asm: $(LLVM_SRC) + $(LLVM_OPT) -O3 -o - $(LLVM_SRC) | $(LLVM_LLC) -O3 $(LLVM_FLAGS) -x86-asm-syntax=intel + +$(LOW_ASM_OBJ): $(LOW_ASM_SRC) + $(ASM) $< + +# set PATH for mingw, set LD_LIBRARY_PATH is for other env +COMMON_LIB_PATH="../../../lib" +PATH_VAL=$$PATH:$(COMMON_LIB_PATH) LD_LIBRARY_PATH=$(COMMON_LIB_PATH) DYLD_LIBRARY_PATH=$(COMMON_LIB_PATH) CGO_CFLAGS="-I$(shell pwd)/include" CGO_LDFLAGS="-L../../../lib" +test_go256: $(MCL_SLIB) $(BN256_SLIB) + cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn256 . + +test_go384: $(MCL_SLIB) $(BN384_SLIB) + cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384 . + +test_go384_256: $(MCL_SLIB) $(BN384_256_SLIB) + cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384_256 . + +test_go: + $(MAKE) test_go256 + $(MAKE) test_go384 + $(MAKE) test_go384_256 + +test_python_she: $(SHE256_SLIB) + cd ffi/python && env LD_LIBRARY_PATH="../../lib" DYLD_LIBRARY_PATH="../../lib" PATH=$$PATH:"../../lib" python3 she.py +test_python: + $(MAKE) test_python_she + +test_java: + $(MAKE) -C ffi/java test + +################################################################## + +VPATH=test sample src + +.SUFFIXES: .cpp .d .exe .c .o + +$(OBJ_DIR)/%.o: %.cpp + $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(OBJ_DIR)/%.o: %.c + $(PRE)$(CC) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c256_test.exe: $(OBJ_DIR)/bn_c256_test.o $(BN256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c384_test.exe: $(OBJ_DIR)/bn_c384_test.o $(BN384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN384_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c384_256_test.exe: $(OBJ_DIR)/bn_c384_256_test.o $(BN384_256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN384_256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c512_test.exe: $(OBJ_DIR)/bn_c512_test.o $(BN512_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN512_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/pairing_c.exe: $(OBJ_DIR)/pairing_c.o $(BN256_LIB) $(MCL_LIB) + $(PRE)$(CC) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) -lstdc++ + +$(EXE_DIR)/she_c256_test.exe: $(OBJ_DIR)/she_c256_test.o $(SHE256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(SHE256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/she_c384_test.exe: $(OBJ_DIR)/she_c384_test.o $(SHE384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(SHE384_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/ecdsa_c_test.exe: $(OBJ_DIR)/ecdsa_c_test.o $(ECDSA_LIB) $(MCL_LIB) src/ecdsa_c.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h + $(PRE)$(CXX) $< -o $@ $(ECDSA_LIB) $(MCL_LIB) $(LDFLAGS) + +$(OBJ_DIR)/modp_test.o: test/modp_test.cpp + $(PRE)$(CXX) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) -DMCL_USE_VINT -DMCL_MAX_BIT_SIZE=384 -DMCL_VINT_64BIT_PORTABLE -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -I./include -O2 $(CFLAGS_WARN) + +$(EXE_DIR)/modp_test.exe: $(OBJ_DIR)/modp_test.o + $(PRE)$(CXX) $< -o $@ + +SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(addsuffix .exe,$(basename $(SAMPLE_SRC)))) +sample: $(SAMPLE_EXE) $(MCL_LIB) + +TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) +test_ci: $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do echo $$i; env LSAN_OPTIONS=verbosity=1:log_threads=1 $$i; done' +test: $(TEST_EXE) + @echo test $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do $$i|grep "ctest:name"; done' > result.txt + @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi + +EMCC_OPT=-I./include -I./src -Wall -Wextra +EMCC_OPT+=-O3 -DNDEBUG -DMCLSHE_WIN_SIZE=8 +EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 +EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION +EMCC_OPT+=-s ABORTING_MALLOC=0 +SHE_C_DEP=src/fp.cpp src/she_c_impl.hpp include/mcl/she.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/she.h Makefile +MCL_C_DEP=src/fp.cpp include/mcl/impl/bn_c_impl.hpp include/mcl/bn.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/bn.h Makefile +ifeq ($(MCL_USE_LLVM),2) + EMCC_OPT+=src/base64m.ll -DMCL_USE_LLVM + SHE_C_DEP+=src/base64m.ll +endif +../she-wasm/she_c.js: src/she_c256.cpp $(SHE_C_DEP) + emcc -o $@ src/fp.cpp src/she_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 + +../she-wasm/she_c384.js: src/she_c384.cpp $(SHE_C_DEP) + emcc -o $@ src/fp.cpp src/she_c384.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 + +../mcl-wasm/mcl_c.js: src/bn_c256.cpp $(MCL_C_DEP) + emcc -o $@ src/fp.cpp src/bn_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions -MD -MP -MF obj/mcl_c.d + +../mcl-wasm/mcl_c512.js: src/bn_c512.cpp $(MCL_C_DEP) + emcc -o $@ src/fp.cpp src/bn_c512.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=512 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions + +../ecdsa-wasm/ecdsa_c.js: src/ecdsa_c.cpp src/fp.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h Makefile + emcc -o $@ src/fp.cpp src/ecdsa_c.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions + +mcl-wasm: + $(MAKE) ../mcl-wasm/mcl_c.js + $(MAKE) ../mcl-wasm/mcl_c512.js + +she-wasm: + $(MAKE) ../she-wasm/she_c.js + $(MAKE) ../she-wasm/she_c384.js + +ecdsa-wasm: + $(MAKE) ../ecdsa-wasm/ecdsa_c.js + +# test +bin/emu: + $(CXX) -g -o $@ src/fp.cpp src/bn_c256.cpp test/bn_c256_test.cpp -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_64BIT_PORTABLE -DMCL_VINT_FIXED_BUFFER -DMCL_MAX_BIT_SIZE=256 -I./include +bin/pairing_c_min.exe: sample/pairing_c.c include/mcl/vint.hpp src/fp.cpp include/mcl/bn.hpp +# $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-exceptions -fno-rtti -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG -pg + $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG + +make_tbl: + $(MAKE) ../bls/src/qcoeff-bn254.hpp + +../bls/src/qcoeff-bn254.hpp: $(MCL_LIB) misc/precompute.cpp + $(CXX) -o misc/precompute misc/precompute.cpp $(CFLAGS) $(MCL_LIB) $(LDFLAGS) + ./misc/precompute > ../bls/src/qcoeff-bn254.hpp + +update_xbyak: + cp -a ../xbyak/xbyak/xbyak.h ../xbyak/xbyak/xbyak_util.h ../xbyak/xbyak/xbyak_mnemonic.h src/xbyak/ + +update_cybozulib: + cp -a $(addprefix ../cybozulib/,$(wildcard include/cybozu/*.hpp)) include/cybozu/ + +clean: + $(RM) $(LIB_DIR)/*.a $(LIB_DIR)/*.$(LIB_SUF) $(OBJ_DIR)/*.o $(OBJ_DIR)/*.obj $(OBJ_DIR)/*.d $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_OBJ) $(LIB_OBJ) $(BN256_OBJ) $(BN384_OBJ) $(BN512_OBJ) $(LLVM_SRC) $(FUNC_LIST) src/*.ll lib/*.a + +ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) +DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(addsuffix .d,$(basename $(ALL_SRC)))) +-include $(DEPEND_FILE) + +PREFIX?=/usr/local +install: lib/libmcl.a lib/libmcl.$(LIB_SUF) + $(MKDIR) $(PREFIX)/include/mcl + cp -a include/mcl/ $(PREFIX)/include/ + cp -a include/cybozu/ $(PREFIX)/include/ + $(MKDIR) $(PREFIX)/lib + cp -a lib/libmcl.a lib/libmcl.$(LIB_SUF) $(PREFIX)/lib/ + +.PHONY: test mcl-wasm she-wasm bin/emu + +# don't remove these files automatically +.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) + diff --git a/vendor/github.com/byzantine-lab/mcl/bench.txt b/vendor/github.com/byzantine-lab/mcl/bench.txt new file mode 100644 index 000000000..35e47dca5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/bench.txt @@ -0,0 +1,114 @@ +----------------------------------------------------------------------------- +Core i7-7700@3.6GHz Firefox 58.0.1(64-bit) + BN254 Fp381_1 Fp462 +op msec +Fr::setByCSPRNG 0.022 0.021 0.019 +pairing 2.446 7.353 14.596 +millerLoop 1.467 4.219 8.598 +finalExp 0.97 3.127 6.005 +precomputedMillerLoop 1.087 3.171 6.305 +G1::add 0.007 0.01 0.014 +G1::dbl 0.005 0.007 0.011 +G1::mul 0.479 1.529 3.346 +G2::add 0.013 0.022 0.033 +G2::dbl 0.01 0.016 0.025 +G2::mul 0.989 2.955 5.921 +hashAndMapToG1 0.135 0.309 0.76 +hashAndMapToG2 2.14 6.44 14.249 +Fr::add 0.004 0.003 0.003 +Fr::mul 0.004 0.004 0.005 +Fr::sqr 0.003 0.003 0.004 +Fr::inv 0.025 0.038 0.05 +GT::add 0.005 0.004 0.005 +GT::mul 0.016 0.027 0.041 +GT::sqr 0.012 0.018 0.028 +GT::inv 0.051 0.081 0.122 + +----------------------------------------------------------------------------- +iPhone7 iOS 11.2.1 Safari/604.1 + BN254 Fp381_1 Fp462 +op msec +Fr::setByCSPRNG 0.041 0.038 0.154 +pairing 3.9 11.752 22.578 +millerLoop 2.29 6.55 13.067 +finalExp 1.501 4.741 9.016 +precomputedMillerLoop 1.675 4.818 9.492 +G1::add 0.006 0.015 0.018 +G1::dbl 0.005 0.01 0.019 +G1::mul 0.843 2.615 5.339 +G2::add 0.015 0.03 0.048 +G2::dbl 0.011 0.022 0.034 +G2::mul 1.596 4.581 9.077 +hashAndMapToG1 0.212 0.507 1.201 +hashAndMapToG2 3.486 9.93 21.817 +Fr::add 0.002 0.002 0.002 +Fr::mul 0.002 0.003 0.003 +Fr::sqr 0.002 0.003 0.004 +Fr::inv 0.037 0.062 0.078 +GT::add 0.003 0.003 0.003 +GT::mul 0.021 0.037 0.058 +GT::sqr 0.014 0.026 0.04 +GT::inv 0.074 0.131 0.19 +----------------------------------------------------------------------------- +Core i7-7700@3.6GHz Linux gcc.5.4.0 + + BN254 Fp381_1 Fp462 +G1::mulCT 202.807Kclk 597.410Kclk 1.658Mclk +G1::mulCTsmall 200.968Kclk 596.074Kclk 1.650Mclk +G1::mul 185.935Kclk 555.147Kclk 1.495Mclk +G1::mulsmall 1.856Kclk 3.740Kclk 8.054Kclk +G1::add 866.89 clk 1.710Kclk 3.663Kclk +G1::dbl 798.60 clk 1.770Kclk 3.755Kclk +G2::mulCT 391.655Kclk 1.351Mclk 3.102Mclk +G2::mulCTsmall 369.134Kclk 1.358Mclk 3.105Mclk +G2::mul 400.098Kclk 1.277Mclk 3.009Mclk +G2::mulsmall 5.774Kclk 12.806Kclk 25.374Kclk +G2::add 2.696Kclk 7.547Kclk 14.683Kclk +G2::dbl 2.600Kclk 5.366Kclk 10.436Kclk +GT::pow 727.157Kclk 1.991Mclk 4.364Mclk +hashAndMapToG1 27.953Kclk 87.291Kclk 200.972Kclk +hashAndMapToG2 775.186Kclk 2.629Mclk 6.937Mclk +Fp::add 11.48 clk 69.54 clk 21.36 clk +Fp::mul 63.11 clk 134.90 clk 303.75 clk +Fp::sqr 64.39 clk 134.29 clk 305.38 clk +Fp::inv 2.302Kclk 4.185Kclk 5.485Kclk +GT::add 180.93 clk 247.70 clk 256.55 clk +GT::mul 5.278Kclk 10.887Kclk 19.844Kclk +GT::sqr 3.666Kclk 7.444Kclk 13.694Kclk +GT::inv 11.322Kclk 22.480Kclk 41.796Kclk +pairing 1.044Mclk 3.445Mclk 7.789Mclk +millerLoop 634.214Kclk 1.913Mclk 4.466Mclk +finalExp 423.413Kclk 1.535Mclk 3.328Mclk +precomputedML 479.849Kclk 1.461Mclk 3.299Mclk +----------------------------------------------------------------------------- + +1.2GHz ARM Cortex-A53 [HiKey] Linux gcc 4.9.2 + + BN254 Fp381_1 Fp462 +G1::mulCT 858.149usec 2.780msec 8.507msec +G1::mulCTsmall 854.535usec 2.773msec 8.499msec +G1::mul 743.100usec 2.484msec 7.536msec +G1::mulsmall 7.680usec 16.528usec 41.818usec +G1::add 3.347usec 7.363usec 18.544usec +G1::dbl 3.294usec 7.351usec 18.472usec +G2::mulCT 1.627msec 5.083msec 12.142msec +G2::mulCTsmall 1.534msec 5.124msec 12.125msec +G2::mul 1.677msec 4.806msec 11.757msec +G2::mulsmall 23.581usec 48.504usec 96.780usec +G2::add 10.751usec 27.759usec 54.392usec +G2::dbl 10.076usec 20.625usec 42.032usec +GT::pow 2.662msec 7.091msec 14.042msec +hashAndMapToG1 111.256usec 372.665usec 1.031msec +hashAndMapToG2 3.199msec 10.168msec 27.391msec +Fp::add 27.19nsec 38.02nsec 45.68nsec +Fp::mul 279.17nsec 628.44nsec 1.662usec +Fp::sqr 276.56nsec 651.67nsec 1.675usec +Fp::inv 9.743usec 14.364usec 18.116usec +GT::add 373.18nsec 530.62nsec 625.26nsec +GT::mul 19.557usec 38.623usec 63.111usec +GT::sqr 13.345usec 26.218usec 43.008usec +GT::inv 44.119usec 84.581usec 153.046usec +pairing 3.913msec 12.606msec 26.818msec +millerLoop 2.402msec 7.202msec 15.711msec +finalExp 1.506msec 5.395msec 11.098msec +precomputedML 1.815msec 5.447msec 11.094msec diff --git a/vendor/github.com/byzantine-lab/mcl/common.mk b/vendor/github.com/byzantine-lab/mcl/common.mk new file mode 100644 index 000000000..5c749e1a6 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/common.mk @@ -0,0 +1,117 @@ +GCC_VER=$(shell $(PRE)$(CC) -dumpversion) +UNAME_S=$(shell uname -s) +ifeq ($(UNAME_S),Linux) + OS=Linux +endif +ifeq ($(findstring MINGW64,$(UNAME_S)),MINGW64) + OS=mingw64 + CFLAGS+=-D__USE_MINGW_ANSI_STDIO=1 +endif +ifeq ($(findstring CYGWIN,$(UNAME_S)),CYGWIN) + OS=cygwin +endif +ifeq ($(UNAME_S),Darwin) + OS=mac + ARCH=x86_64 + LIB_SUF=dylib + OPENSSL_DIR?=/usr/local/opt/openssl + CFLAGS+=-I$(OPENSSL_DIR)/include + LDFLAGS+=-L$(OPENSSL_DIR)/lib + GMP_DIR?=/usr/local/opt/gmp + CFLAGS+=-I$(GMP_DIR)/include + LDFLAGS+=-L$(GMP_DIR)/lib +else + LIB_SUF=so +endif +ARCH?=$(shell uname -m) +ifneq ($(findstring $(ARCH),x86_64/amd64),) + CPU=x86-64 + INTEL=1 + ifeq ($(findstring $(OS),mingw64/cygwin),) + GCC_EXT=1 + endif + BIT=64 + BIT_OPT=-m64 + #LOW_ASM_SRC=src/asm/low_x86-64.asm + #ASM=nasm -felf64 +endif +ifeq ($(ARCH),x86) + CPU=x86 + INTEL=1 + BIT=32 + BIT_OPT=-m32 + #LOW_ASM_SRC=src/asm/low_x86.asm +endif +ifeq ($(ARCH),armv7l) + CPU=arm + BIT=32 + #LOW_ASM_SRC=src/asm/low_arm.s +endif +ifeq ($(ARCH),aarch64) + CPU=aarch64 + BIT=64 +endif +ifeq ($(findstring $(OS),mac/mingw64),) + LDFLAGS+=-lrt +endif + +CP=cp -f +AR=ar r +MKDIR=mkdir -p +RM=rm -rf + +ifeq ($(DEBUG),1) + ifeq ($(GCC_EXT),1) + CFLAGS+=-fsanitize=address + LDFLAGS+=-fsanitize=address + endif +else + CFLAGS_OPT+=-fomit-frame-pointer -DNDEBUG + ifeq ($(CXX),clang++) + CFLAGS_OPT+=-O3 + else + ifeq ($(shell expr $(GCC_VER) \> 4.6.0),1) + CFLAGS_OPT+=-Ofast + else + CFLAGS_OPT+=-O3 + endif + endif + ifeq ($(MARCH),) + ifeq ($(INTEL),1) +# CFLAGS_OPT+=-march=native + endif + else + CFLAGS_OPT+=$(MARCH) + endif +endif +CFLAGS_WARN=-Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith +CFLAGS+=-g3 +INC_OPT=-I include -I test +CFLAGS+=$(CFLAGS_WARN) $(BIT_OPT) $(INC_OPT) +DEBUG=0 +CFLAGS_OPT_USER?=$(CFLAGS_OPT) +ifeq ($(DEBUG),0) +CFLAGS+=$(CFLAGS_OPT_USER) +endif +CFLAGS+=$(CFLAGS_USER) +MCL_USE_GMP?=1 +MCL_USE_OPENSSL?=1 +ifeq ($(MCL_USE_GMP),0) + CFLAGS+=-DMCL_USE_VINT +endif +ifneq ($(MCL_SIZEOF_UNIT),) + CFLAGS+=-DMCL_SIZEOF_UNIT=$(MCL_SIZEOF_UNIT) +endif +ifeq ($(MCL_USE_OPENSSL),0) + CFLAGS+=-DMCL_DONT_USE_OPENSSL +endif +ifeq ($(MCL_USE_GMP),1) + GMP_LIB=-lgmp -lgmpxx +endif +ifeq ($(MCL_USE_OPENSSL),1) + OPENSSL_LIB=-lcrypto +endif +LDFLAGS+=$(GMP_LIB) $(OPENSSL_LIB) $(BIT_OPT) $(LDFLAGS_USER) + +CFLAGS+=-fPIC + diff --git a/vendor/github.com/byzantine-lab/mcl/common.props b/vendor/github.com/byzantine-lab/mcl/common.props new file mode 100644 index 000000000..912f39e30 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/common.props @@ -0,0 +1,26 @@ + + + + + + $(SolutionDir)bin\ + + + + $(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../xbyak + + + + + Level4 + MultiThreaded + + + _MBCS;%(PreprocessorDefinitions);NOMINMAX + + + $(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)lib + + + + diff --git a/vendor/github.com/byzantine-lab/mcl/debug.props b/vendor/github.com/byzantine-lab/mcl/debug.props new file mode 100644 index 000000000..1553ae0dc --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/debug.props @@ -0,0 +1,14 @@ + + + + + + $(ProjectName)d + + + + MultiThreadedDebug + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/App.config b/vendor/github.com/byzantine-lab/mcl/ffi/cs/App.config new file mode 100644 index 000000000..88fa4027b --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/App.config @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/Properties/AssemblyInfo.cs b/vendor/github.com/byzantine-lab/mcl/ffi/cs/Properties/AssemblyInfo.cs new file mode 100644 index 000000000..c87e1d44b --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// アセンブリã«é–¢ã™ã‚‹ä¸€èˆ¬æƒ…å ±ã¯ä»¥ä¸‹ã®å±žæ€§ã‚»ãƒƒãƒˆã‚’ã¨ãŠã—ã¦åˆ¶å¾¡ã•ã‚Œã¾ã™ã€‚ +// アセンブリã«é–¢é€£ä»˜ã‘られã¦ã„る情報を変更ã™ã‚‹ã«ã¯ã€ +// ã“れらã®å±žæ€§å€¤ã‚’変更ã—ã¦ãã ã•ã„。 +[assembly: AssemblyTitle("bn256")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("bn256")] +[assembly: AssemblyCopyright("Copyright © 2017")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// ComVisible ã‚’ false ã«è¨­å®šã™ã‚‹ã¨ã€ãã®åž‹ã¯ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内㧠COM コンãƒãƒ¼ãƒãƒ³ãƒˆã‹ã‚‰ +// å‚ç…§ä¸å¯èƒ½ã«ãªã‚Šã¾ã™ã€‚COM ã‹ã‚‰ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内ã®åž‹ã«ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹å ´åˆã¯ã€ +// ãã®åž‹ã® ComVisible 属性を true ã«è¨­å®šã—ã¦ãã ã•ã„。 +[assembly: ComVisible(false)] + +// ã“ã®ãƒ—ロジェクト㌠COM ã«å…¬é–‹ã•ã‚Œã‚‹å ´åˆã€æ¬¡ã® GUID ㌠typelib ã® ID ã«ãªã‚Šã¾ã™ +[assembly: Guid("e9d06b1b-ea22-4ef4-ba4b-422f7625966b")] + +// アセンブリã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³æƒ…å ±ã¯æ¬¡ã® 4 ã¤ã®å€¤ã§æ§‹æˆã•ã‚Œã¦ã„ã¾ã™: +// +// メジャー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ +// マイナー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ +// ãƒ“ãƒ«ãƒ‰ç•ªå· +// Revision +// +// ã™ã¹ã¦ã®å€¤ã‚’指定ã™ã‚‹ã‹ã€ä¸‹ã®ã‚ˆã†ã« '*' を使ã£ã¦ãƒ“ルドãŠã‚ˆã³ãƒªãƒ“ジョン番å·ã‚’ +// 既定値ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.cs b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.cs new file mode 100644 index 000000000..0e1ed032c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.cs @@ -0,0 +1,475 @@ +using System; +using System.Text; +using System.Runtime.InteropServices; + +namespace mcl { + public class BN256 { + [DllImport("mclBn256.dll")] + public static extern int mclBn_init(int curve, int maxUnitSize); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_clear(ref Fr x); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_setInt(ref Fr y, int x); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_setStr(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_isValid(ref Fr x); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_isEqual(ref Fr x, ref Fr y); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_isZero(ref Fr x); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_isOne(ref Fr x); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_setByCSPRNG(ref Fr x); + + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_setHashOf(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); + [DllImport("mclBn256.dll")] + public static extern int mclBnFr_getStr([Out]StringBuilder buf, long maxBufSize, ref Fr x, int ioMode); + + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_neg(ref Fr y, ref Fr x); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_inv(ref Fr y, ref Fr x); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_add(ref Fr z, ref Fr x, ref Fr y); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_sub(ref Fr z, ref Fr x, ref Fr y); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_mul(ref Fr z, ref Fr x, ref Fr y); + [DllImport("mclBn256.dll")] + public static extern void mclBnFr_div(ref Fr z, ref Fr x, ref Fr y); + + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_clear(ref G1 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG1_setStr(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); + [DllImport("mclBn256.dll")] + public static extern int mclBnG1_isValid(ref G1 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG1_isEqual(ref G1 x, ref G1 y); + [DllImport("mclBn256.dll")] + public static extern int mclBnG1_isZero(ref G1 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG1_hashAndMapTo(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); + [DllImport("mclBn256.dll")] + public static extern long mclBnG1_getStr([Out]StringBuilder buf, long maxBufSize, ref G1 x, int ioMode); + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_neg(ref G1 y, ref G1 x); + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_dbl(ref G1 y, ref G1 x); + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_add(ref G1 z, ref G1 x, ref G1 y); + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_sub(ref G1 z, ref G1 x, ref G1 y); + [DllImport("mclBn256.dll")] + public static extern void mclBnG1_mul(ref G1 z, ref G1 x, ref Fr y); + + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_clear(ref G2 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG2_setStr(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); + [DllImport("mclBn256.dll")] + public static extern int mclBnG2_isValid(ref G2 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG2_isEqual(ref G2 x, ref G2 y); + [DllImport("mclBn256.dll")] + public static extern int mclBnG2_isZero(ref G2 x); + [DllImport("mclBn256.dll")] + public static extern int mclBnG2_hashAndMapTo(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); + [DllImport("mclBn256.dll")] + public static extern long mclBnG2_getStr([Out]StringBuilder buf, long maxBufSize, ref G2 x, int ioMode); + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_neg(ref G2 y, ref G2 x); + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_dbl(ref G2 y, ref G2 x); + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_add(ref G2 z, ref G2 x, ref G2 y); + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_sub(ref G2 z, ref G2 x, ref G2 y); + [DllImport("mclBn256.dll")] + public static extern void mclBnG2_mul(ref G2 z, ref G2 x, ref Fr y); + + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_clear(ref GT x); + [DllImport("mclBn256.dll")] + public static extern int mclBnGT_setStr(ref GT x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); + [DllImport("mclBn256.dll")] + public static extern int mclBnGT_isEqual(ref GT x, ref GT y); + [DllImport("mclBn256.dll")] + public static extern int mclBnGT_isZero(ref GT x); + [DllImport("mclBn256.dll")] + public static extern int mclBnGT_isOne(ref GT x); + [DllImport("mclBn256.dll")] + public static extern long mclBnGT_getStr([Out]StringBuilder buf, long maxBufSize, ref GT x, int ioMode); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_neg(ref GT y, ref GT x); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_inv(ref GT y, ref GT x); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_add(ref GT z, ref GT x, ref GT y); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_sub(ref GT z, ref GT x, ref GT y); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_mul(ref GT z, ref GT x, ref GT y); + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_div(ref GT z, ref GT x, ref GT y); + + [DllImport("mclBn256.dll")] + public static extern void mclBnGT_pow(ref GT z, ref GT x, ref Fr y); + [DllImport("mclBn256.dll")] + public static extern void mclBn_pairing(ref GT z, ref G1 x, ref G2 y); + [DllImport("mclBn256.dll")] + public static extern void mclBn_finalExp(ref GT y, ref GT x); + [DllImport("mclBn256.dll")] + public static extern void mclBn_millerLoop(ref GT z, ref G1 x, ref G2 y); + + public static void init() + { + const int curveFp254BNb = 0; + const int maxUnitSize = 4; + if (mclBn_init(curveFp254BNb, maxUnitSize) != 0) { + throw new InvalidOperationException("mclBn_init"); + } + } + [StructLayout(LayoutKind.Sequential)] + public struct Fr { + private ulong v0, v1, v2, v3; + public void Clear() + { + mclBnFr_clear(ref this); + } + public void SetInt(int x) + { + mclBnFr_setInt(ref this, x); + } + public void SetStr(string s, int ioMode) + { + if (mclBnFr_setStr(ref this, s, s.Length, ioMode) != 0) { + throw new ArgumentException("mclBnFr_setStr" + s); + } + } + public bool IsValid() + { + return mclBnFr_isValid(ref this) == 1; + } + public bool Equals(Fr rhs) + { + return mclBnFr_isEqual(ref this, ref rhs) == 1; + } + public bool IsZero() + { + return mclBnFr_isZero(ref this) == 1; + } + public bool IsOne() + { + return mclBnFr_isOne(ref this) == 1; + } + public void SetByCSPRNG() + { + mclBnFr_setByCSPRNG(ref this); + } + public void SetHashOf(String s) + { + if (mclBnFr_setHashOf(ref this, s, s.Length) != 0) { + throw new InvalidOperationException("mclBnFr_setHashOf:" + s); + } + } + public string GetStr(int ioMode) + { + StringBuilder sb = new StringBuilder(1024); + long size = mclBnFr_getStr(sb, sb.Capacity, ref this, ioMode); + if (size == 0) { + throw new InvalidOperationException("mclBnFr_getStr:"); + } + return sb.ToString(); + } + public void Neg(Fr x) + { + mclBnFr_neg(ref this, ref x); + } + public void Inv(Fr x) + { + mclBnFr_inv(ref this, ref x); + } + public void Add(Fr x, Fr y) + { + mclBnFr_add(ref this, ref x, ref y); + } + public void Sub(Fr x, Fr y) + { + mclBnFr_sub(ref this, ref x, ref y); + } + public void Mul(Fr x, Fr y) + { + mclBnFr_mul(ref this, ref x, ref y); + } + public void Div(Fr x, Fr y) + { + mclBnFr_div(ref this, ref x, ref y); + } + public static Fr operator -(Fr x) + { + Fr y = new Fr(); + y.Neg(x); + return y; + } + public static Fr operator +(Fr x, Fr y) + { + Fr z = new Fr(); + z.Add(x, y); + return z; + } + public static Fr operator -(Fr x, Fr y) + { + Fr z = new Fr(); + z.Sub(x, y); + return z; + } + public static Fr operator *(Fr x, Fr y) + { + Fr z = new Fr(); + z.Mul(x, y); + return z; + } + public static Fr operator /(Fr x, Fr y) + { + Fr z = new Fr(); + z.Div(x, y); + return z; + } + } + [StructLayout(LayoutKind.Sequential)] + public struct G1 { + private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; + public void Clear() + { + mclBnG1_clear(ref this); + } + public void setStr(String s, int ioMode) + { + if (mclBnG1_setStr(ref this, s, s.Length, ioMode) != 0) { + throw new ArgumentException("mclBnG1_setStr:" + s); + } + } + public bool IsValid() + { + return mclBnG1_isValid(ref this) == 1; + } + public bool Equals(G1 rhs) + { + return mclBnG1_isEqual(ref this, ref rhs) == 1; + } + public bool IsZero() + { + return mclBnG1_isZero(ref this) == 1; + } + public void HashAndMapTo(String s) + { + if (mclBnG1_hashAndMapTo(ref this, s, s.Length) != 0) { + throw new ArgumentException("mclBnG1_hashAndMapTo:" + s); + } + } + public string GetStr(int ioMode) + { + StringBuilder sb = new StringBuilder(1024); + long size = mclBnG1_getStr(sb, sb.Capacity, ref this, ioMode); + if (size == 0) { + throw new InvalidOperationException("mclBnG1_getStr:"); + } + return sb.ToString(); + } + public void Neg(G1 x) + { + mclBnG1_neg(ref this, ref x); + } + public void Dbl(G1 x) + { + mclBnG1_dbl(ref this, ref x); + } + public void Add(G1 x, G1 y) + { + mclBnG1_add(ref this, ref x, ref y); + } + public void Sub(G1 x, G1 y) + { + mclBnG1_sub(ref this, ref x, ref y); + } + public void Mul(G1 x, Fr y) + { + mclBnG1_mul(ref this, ref x, ref y); + } + } + [StructLayout(LayoutKind.Sequential)] + public struct G2 { + private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; + private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; + public void Clear() + { + mclBnG2_clear(ref this); + } + public void setStr(String s, int ioMode) + { + if (mclBnG2_setStr(ref this, s, s.Length, ioMode) != 0) { + throw new ArgumentException("mclBnG2_setStr:" + s); + } + } + public bool IsValid() + { + return mclBnG2_isValid(ref this) == 1; + } + public bool Equals(G2 rhs) + { + return mclBnG2_isEqual(ref this, ref rhs) == 1; + } + public bool IsZero() + { + return mclBnG2_isZero(ref this) == 1; + } + public void HashAndMapTo(String s) + { + if (mclBnG2_hashAndMapTo(ref this, s, s.Length) != 0) { + throw new ArgumentException("mclBnG2_hashAndMapTo:" + s); + } + } + public string GetStr(int ioMode) + { + StringBuilder sb = new StringBuilder(1024); + long size = mclBnG2_getStr(sb, sb.Capacity, ref this, ioMode); + if (size == 0) { + throw new InvalidOperationException("mclBnG2_getStr:"); + } + return sb.ToString(); + } + public void Neg(G2 x) + { + mclBnG2_neg(ref this, ref x); + } + public void Dbl(G2 x) + { + mclBnG2_dbl(ref this, ref x); + } + public void Add(G2 x, G2 y) + { + mclBnG2_add(ref this, ref x, ref y); + } + public void Sub(G2 x, G2 y) + { + mclBnG2_sub(ref this, ref x, ref y); + } + public void Mul(G2 x, Fr y) + { + mclBnG2_mul(ref this, ref x, ref y); + } + } + [StructLayout(LayoutKind.Sequential)] + public struct GT { + private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; + private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; + private ulong v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35; + private ulong v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47; + public void Clear() + { + mclBnGT_clear(ref this); + } + public void setStr(String s, int ioMode) + { + if (mclBnGT_setStr(ref this, s, s.Length, ioMode) != 0) { + throw new ArgumentException("mclBnGT_setStr:" + s); + } + } + public bool Equals(GT rhs) + { + return mclBnGT_isEqual(ref this, ref rhs) == 1; + } + public bool IsZero() + { + return mclBnGT_isZero(ref this) == 1; + } + public bool IsOne() + { + return mclBnGT_isOne(ref this) == 1; + } + public string GetStr(int ioMode) + { + StringBuilder sb = new StringBuilder(1024); + long size = mclBnGT_getStr(sb, sb.Capacity, ref this, ioMode); + if (size == 0) { + throw new InvalidOperationException("mclBnGT_getStr:"); + } + return sb.ToString(); + } + public void Neg(GT x) + { + mclBnGT_neg(ref this, ref x); + } + public void Inv(GT x) + { + mclBnGT_inv(ref this, ref x); + } + public void Add(GT x, GT y) + { + mclBnGT_add(ref this, ref x, ref y); + } + public void Sub(GT x, GT y) + { + mclBnGT_sub(ref this, ref x, ref y); + } + public void Mul(GT x, GT y) + { + mclBnGT_mul(ref this, ref x, ref y); + } + public void Div(GT x, GT y) + { + mclBnGT_div(ref this, ref x, ref y); + } + public static GT operator -(GT x) + { + GT y = new GT(); + y.Neg(x); + return y; + } + public static GT operator +(GT x, GT y) + { + GT z = new GT(); + z.Add(x, y); + return z; + } + public static GT operator -(GT x, GT y) + { + GT z = new GT(); + z.Sub(x, y); + return z; + } + public static GT operator *(GT x, GT y) + { + GT z = new GT(); + z.Mul(x, y); + return z; + } + public static GT operator /(GT x, GT y) + { + GT z = new GT(); + z.Div(x, y); + return z; + } + public void Pow(GT x, Fr y) + { + mclBnGT_pow(ref this, ref x, ref y); + } + public void Pairing(G1 x, G2 y) + { + mclBn_pairing(ref this, ref x, ref y); + } + public void FinalExp(GT x) + { + mclBn_finalExp(ref this, ref x); + } + public void MillerLoop(G1 x, G2 y) + { + mclBn_millerLoop(ref this, ref x, ref y); + } + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.csproj b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.csproj new file mode 100644 index 000000000..21a049f01 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.csproj @@ -0,0 +1,62 @@ + + + + + Debug + AnyCPU + {E9D06B1B-EA22-4EF4-BA4B-422F7625966B} + Exe + Properties + bn256 + bn256 + v4.5.2 + 512 + true + + + true + ..\..\bin\ + DEBUG;TRACE + false + full + x64 + prompt + MinimumRecommendedRules.ruleset + + + ..\..\bin\ + TRACE + true + pdbonly + x64 + prompt + MinimumRecommendedRules.ruleset + true + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.sln b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.sln new file mode 100644 index 000000000..6e6aa67ee --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256.sln @@ -0,0 +1,22 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "bn256", "bn256.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966B}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.ActiveCfg = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.Build.0 = Debug|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.ActiveCfg = Release|x64 + {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256_test.cs b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256_test.cs new file mode 100644 index 000000000..cad8c03d3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/cs/bn256_test.cs @@ -0,0 +1,149 @@ +using System; + +namespace mcl { + using static BN256; + class BN256Test { + static int err = 0; + static void assert(string msg, bool b) + { + if (b) return; + Console.WriteLine("ERR {0}", msg); + err++; + } + static void Main(string[] args) + { + try { + assert("64bit system", System.Environment.Is64BitProcess); + init(); + TestFr(); + TestG1(); + TestG2(); + TestPairing(); + if (err == 0) { + Console.WriteLine("all tests succeed"); + } else { + Console.WriteLine("err={0}", err); + } + } catch (Exception e) { + Console.WriteLine("ERR={0}", e); + } + } + static void TestFr() + { + Console.WriteLine("TestFr"); + Fr x = new Fr(); + x.Clear(); + assert("0", x.GetStr(10) == "0"); + assert("0.IzZero", x.IsZero()); + assert("!0.IzOne", !x.IsOne()); + x.SetInt(1); + assert("1", x.GetStr(10) == "1"); + assert("!1.IzZero", !x.IsZero()); + assert("1.IzOne", x.IsOne()); + x.SetInt(3); + assert("3", x.GetStr(10) == "3"); + assert("!3.IzZero", !x.IsZero()); + assert("!3.IzOne", !x.IsOne()); + x.SetInt(-5); + x = -x; + assert("5", x.GetStr(10) == "5"); + x.SetInt(4); + x = x * x; + assert("16", x.GetStr(10) == "16"); + assert("10", x.GetStr(16) == "10"); + Fr y; + y = x; + assert("x == y", x.Equals(y)); + x.SetInt(123); + assert("123", x.GetStr(10) == "123"); + assert("7b", x.GetStr(16) == "7b"); + assert("y != x", !x.Equals(y)); + try { + x.SetStr("1234567891234x", 10); + Console.WriteLine("x = {0}", x); + } catch (Exception e) { + Console.WriteLine("exception test OK\n'{0}'", e); + } + x.SetStr("1234567891234", 10); + assert("1234567891234", x.GetStr(10) == "1234567891234"); + } + static void TestG1() + { + Console.WriteLine("TestG1"); + G1 P = new G1(); + P.Clear(); + assert("P.IsValid", P.IsValid()); + assert("P.IsZero", P.IsZero()); + P.HashAndMapTo("abc"); + assert("P.IsValid", P.IsValid()); + assert("!P.IsZero", !P.IsZero()); + G1 Q = new G1(); + Q = P; + assert("P == Q", Q.Equals(P)); + Q.Neg(P); + Q.Add(Q, P); + assert("P = Q", Q.IsZero()); + Q.Dbl(P); + G1 R = new G1(); + R.Add(P, P); + assert("Q == R", Q.Equals(R)); + Fr x = new Fr(); + x.SetInt(3); + R.Add(R, P); + Q.Mul(P, x); + assert("Q == R", Q.Equals(R)); + } + static void TestG2() + { + Console.WriteLine("TestG2"); + G2 P = new G2(); + P.Clear(); + assert("P is valid", P.IsValid()); + assert("P is zero", P.IsZero()); + P.HashAndMapTo("abc"); + assert("P is valid", P.IsValid()); + assert("P is not zero", !P.IsZero()); + G2 Q = new G2(); + Q = P; + assert("P == Q", Q.Equals(P)); + Q.Neg(P); + Q.Add(Q, P); + assert("Q is zero", Q.IsZero()); + Q.Dbl(P); + G2 R = new G2(); + R.Add(P, P); + assert("Q == R", Q.Equals(R)); + Fr x = new Fr(); + x.SetInt(3); + R.Add(R, P); + Q.Mul(P, x); + assert("Q == R", Q.Equals(R)); + } + static void TestPairing() + { + Console.WriteLine("TestG2"); + G1 P = new G1(); + P.HashAndMapTo("123"); + G2 Q = new G2(); + Q.HashAndMapTo("1"); + Fr a = new Fr(); + Fr b = new Fr(); + a.SetStr("12345678912345673453", 10); + b.SetStr("230498230982394243424", 10); + G1 aP = new G1(); + G2 bQ = new G2(); + aP.Mul(P, a); + bQ.Mul(Q, b); + GT e1 = new GT(); + GT e2 = new GT(); + GT e3 = new GT(); + e1.Pairing(P, Q); + e2.Pairing(aP, Q); + e3.Pow(e1, a); + assert("e2.Equals(e3)", e2.Equals(e3)); + e2.Pairing(P, bQ); + e3.Pow(e1, b); + assert("e2.Equals(e3)", e2.Equals(e3)); + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl.go b/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl.go new file mode 100644 index 000000000..a0c8bb4d3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl.go @@ -0,0 +1,659 @@ +package mcl + +/* +#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 +#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 +#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 +#cgo bn256 LDFLAGS:-lmclbn256 -lmcl +#cgo bn384 LDFLAGS:-lmclbn384 -lmcl +#cgo bn384_256 LDFLAGS:-lmclbn384_256 -lmcl +#include +*/ +import "C" +import "fmt" +import "unsafe" + +// CurveFp254BNb -- 254 bit curve +const CurveFp254BNb = C.mclBn_CurveFp254BNb + +// CurveFp382_1 -- 382 bit curve 1 +const CurveFp382_1 = C.mclBn_CurveFp382_1 + +// CurveFp382_2 -- 382 bit curve 2 +const CurveFp382_2 = C.mclBn_CurveFp382_2 + +// BLS12_381 +const BLS12_381 = C.MCL_BLS12_381 + +// IoSerializeHexStr +const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR + +// Init -- +// call this function before calling all the other operations +// this function is not thread safe +func Init(curve int) error { + err := C.mclBn_init(C.int(curve), C.MCLBN_COMPILED_TIME_VAR) + if err != 0 { + return fmt.Errorf("ERR mclBn_init curve=%d", curve) + } + return nil +} + +// GetFrUnitSize() -- +func GetFrUnitSize() int { + return int(C.MCLBN_FR_UNIT_SIZE) +} + +// GetFpUnitSize() -- +// same as GetMaxOpUnitSize() +func GetFpUnitSize() int { + return int(C.MCLBN_FP_UNIT_SIZE) +} + +// GetMaxOpUnitSize -- +func GetMaxOpUnitSize() int { + return int(C.MCLBN_FP_UNIT_SIZE) +} + +// GetOpUnitSize -- +// the length of Fr is GetOpUnitSize() * 8 bytes +func GetOpUnitSize() int { + return int(C.mclBn_getOpUnitSize()) +} + +// GetCurveOrder -- +// return the order of G1 +func GetCurveOrder() string { + buf := make([]byte, 1024) + // #nosec + n := C.mclBn_getCurveOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + if n == 0 { + panic("implementation err. size of buf is small") + } + return string(buf[:n]) +} + +// GetFieldOrder -- +// return the characteristic of the field where a curve is defined +func GetFieldOrder() string { + buf := make([]byte, 1024) + // #nosec + n := C.mclBn_getFieldOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + if n == 0 { + panic("implementation err. size of buf is small") + } + return string(buf[:n]) +} + +// Fr -- +type Fr struct { + v C.mclBnFr +} + +// getPointer -- +func (x *Fr) getPointer() (p *C.mclBnFr) { + // #nosec + return (*C.mclBnFr)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *Fr) Clear() { + // #nosec + C.mclBnFr_clear(x.getPointer()) +} + +// SetInt64 -- +func (x *Fr) SetInt64(v int64) { + // #nosec + C.mclBnFr_setInt(x.getPointer(), C.int64_t(v)) +} + +// SetString -- +func (x *Fr) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnFr_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnFr_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *Fr) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnFr_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnFr_deserialize %x", buf) + } + return nil +} + +// SetLittleEndian -- +func (x *Fr) SetLittleEndian(buf []byte) error { + // #nosec + err := C.mclBnFr_setLittleEndian(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnFr_setLittleEndian %x", err) + } + return nil +} + +// IsEqual -- +func (x *Fr) IsEqual(rhs *Fr) bool { + return C.mclBnFr_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *Fr) IsZero() bool { + return C.mclBnFr_isZero(x.getPointer()) == 1 +} + +// IsOne -- +func (x *Fr) IsOne() bool { + return C.mclBnFr_isOne(x.getPointer()) == 1 +} + +// SetByCSPRNG -- +func (x *Fr) SetByCSPRNG() { + err := C.mclBnFr_setByCSPRNG(x.getPointer()) + if err != 0 { + panic("err mclBnFr_setByCSPRNG") + } +} + +// SetHashOf -- +func (x *Fr) SetHashOf(buf []byte) bool { + // #nosec + return C.mclBnFr_setHashOf(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 0 +} + +// GetString -- +func (x *Fr) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnFr_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnFr_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *Fr) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnFr_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnFr_serialize") + } + return buf[:n] +} + +// FrNeg -- +func FrNeg(out *Fr, x *Fr) { + C.mclBnFr_neg(out.getPointer(), x.getPointer()) +} + +// FrInv -- +func FrInv(out *Fr, x *Fr) { + C.mclBnFr_inv(out.getPointer(), x.getPointer()) +} + +// FrAdd -- +func FrAdd(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrSub -- +func FrSub(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrMul -- +func FrMul(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FrDiv -- +func FrDiv(out *Fr, x *Fr, y *Fr) { + C.mclBnFr_div(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1 -- +type G1 struct { + v C.mclBnG1 +} + +// getPointer -- +func (x *G1) getPointer() (p *C.mclBnG1) { + // #nosec + return (*C.mclBnG1)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *G1) Clear() { + // #nosec + C.mclBnG1_clear(x.getPointer()) +} + +// SetString -- +func (x *G1) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnG1_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnG1_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *G1) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnG1_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnG1_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *G1) IsEqual(rhs *G1) bool { + return C.mclBnG1_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *G1) IsZero() bool { + return C.mclBnG1_isZero(x.getPointer()) == 1 +} + +// HashAndMapTo -- +func (x *G1) HashAndMapTo(buf []byte) error { + // #nosec + err := C.mclBnG1_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnG1_hashAndMapTo %x", err) + } + return nil +} + +// GetString -- +func (x *G1) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG1_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnG1_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *G1) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG1_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnG1_serialize") + } + return buf[:n] +} + +// G1Neg -- +func G1Neg(out *G1, x *G1) { + C.mclBnG1_neg(out.getPointer(), x.getPointer()) +} + +// G1Dbl -- +func G1Dbl(out *G1, x *G1) { + C.mclBnG1_dbl(out.getPointer(), x.getPointer()) +} + +// G1Add -- +func G1Add(out *G1, x *G1, y *G1) { + C.mclBnG1_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1Sub -- +func G1Sub(out *G1, x *G1, y *G1) { + C.mclBnG1_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1Mul -- +func G1Mul(out *G1, x *G1, y *Fr) { + C.mclBnG1_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G1MulCT -- constant time (depending on bit lengh of y) +func G1MulCT(out *G1, x *G1, y *Fr) { + C.mclBnG1_mulCT(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2 -- +type G2 struct { + v C.mclBnG2 +} + +// getPointer -- +func (x *G2) getPointer() (p *C.mclBnG2) { + // #nosec + return (*C.mclBnG2)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *G2) Clear() { + // #nosec + C.mclBnG2_clear(x.getPointer()) +} + +// SetString -- +func (x *G2) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnG2_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnG2_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *G2) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnG2_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnG2_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *G2) IsEqual(rhs *G2) bool { + return C.mclBnG2_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *G2) IsZero() bool { + return C.mclBnG2_isZero(x.getPointer()) == 1 +} + +// HashAndMapTo -- +func (x *G2) HashAndMapTo(buf []byte) error { + // #nosec + err := C.mclBnG2_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err != 0 { + return fmt.Errorf("err mclBnG2_hashAndMapTo %x", err) + } + return nil +} + +// GetString -- +func (x *G2) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG2_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnG2_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *G2) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnG2_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnG2_serialize") + } + return buf[:n] +} + +// G2Neg -- +func G2Neg(out *G2, x *G2) { + C.mclBnG2_neg(out.getPointer(), x.getPointer()) +} + +// G2Dbl -- +func G2Dbl(out *G2, x *G2) { + C.mclBnG2_dbl(out.getPointer(), x.getPointer()) +} + +// G2Add -- +func G2Add(out *G2, x *G2, y *G2) { + C.mclBnG2_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2Sub -- +func G2Sub(out *G2, x *G2, y *G2) { + C.mclBnG2_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// G2Mul -- +func G2Mul(out *G2, x *G2, y *Fr) { + C.mclBnG2_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GT -- +type GT struct { + v C.mclBnGT +} + +// getPointer -- +func (x *GT) getPointer() (p *C.mclBnGT) { + // #nosec + return (*C.mclBnGT)(unsafe.Pointer(x)) +} + +// Clear -- +func (x *GT) Clear() { + // #nosec + C.mclBnGT_clear(x.getPointer()) +} + +// SetInt64 -- +func (x *GT) SetInt64(v int64) { + // #nosec + C.mclBnGT_setInt(x.getPointer(), C.int64_t(v)) +} + +// SetString -- +func (x *GT) SetString(s string, base int) error { + buf := []byte(s) + // #nosec + err := C.mclBnGT_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) + if err != 0 { + return fmt.Errorf("err mclBnGT_setStr %x", err) + } + return nil +} + +// Deserialize -- +func (x *GT) Deserialize(buf []byte) error { + // #nosec + err := C.mclBnGT_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) + if err == 0 { + return fmt.Errorf("err mclBnGT_deserialize %x", buf) + } + return nil +} + +// IsEqual -- +func (x *GT) IsEqual(rhs *GT) bool { + return C.mclBnGT_isEqual(x.getPointer(), rhs.getPointer()) == 1 +} + +// IsZero -- +func (x *GT) IsZero() bool { + return C.mclBnGT_isZero(x.getPointer()) == 1 +} + +// IsOne -- +func (x *GT) IsOne() bool { + return C.mclBnGT_isOne(x.getPointer()) == 1 +} + +// GetString -- +func (x *GT) GetString(base int) string { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnGT_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) + if n == 0 { + panic("err mclBnGT_getStr") + } + return string(buf[:n]) +} + +// Serialize -- +func (x *GT) Serialize() []byte { + buf := make([]byte, 2048) + // #nosec + n := C.mclBnGT_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) + if n == 0 { + panic("err mclBnGT_serialize") + } + return buf[:n] +} + +// GTNeg -- +func GTNeg(out *GT, x *GT) { + C.mclBnGT_neg(out.getPointer(), x.getPointer()) +} + +// GTInv -- +func GTInv(out *GT, x *GT) { + C.mclBnGT_inv(out.getPointer(), x.getPointer()) +} + +// GTAdd -- +func GTAdd(out *GT, x *GT, y *GT) { + C.mclBnGT_add(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTSub -- +func GTSub(out *GT, x *GT, y *GT) { + C.mclBnGT_sub(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTMul -- +func GTMul(out *GT, x *GT, y *GT) { + C.mclBnGT_mul(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTDiv -- +func GTDiv(out *GT, x *GT, y *GT) { + C.mclBnGT_div(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GTPow -- +func GTPow(out *GT, x *GT, y *Fr) { + C.mclBnGT_pow(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// Pairing -- +func Pairing(out *GT, x *G1, y *G2) { + C.mclBn_pairing(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// FinalExp -- +func FinalExp(out *GT, x *GT) { + C.mclBn_finalExp(out.getPointer(), x.getPointer()) +} + +// MillerLoop -- +func MillerLoop(out *GT, x *G1, y *G2) { + C.mclBn_millerLoop(out.getPointer(), x.getPointer(), y.getPointer()) +} + +// GetUint64NumToPrecompute -- +func GetUint64NumToPrecompute() int { + return int(C.mclBn_getUint64NumToPrecompute()) +} + +// PrecomputeG2 -- +func PrecomputeG2(Qbuf []uint64, Q *G2) { + // #nosec + C.mclBn_precomputeG2((*C.uint64_t)(unsafe.Pointer(&Qbuf[0])), Q.getPointer()) +} + +// PrecomputedMillerLoop -- +func PrecomputedMillerLoop(out *GT, P *G1, Qbuf []uint64) { + // #nosec + C.mclBn_precomputedMillerLoop(out.getPointer(), P.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Qbuf[0]))) +} + +// PrecomputedMillerLoop2 -- +func PrecomputedMillerLoop2(out *GT, P1 *G1, Q1buf []uint64, P2 *G1, Q2buf []uint64) { + // #nosec + C.mclBn_precomputedMillerLoop2(out.getPointer(), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0]))) +} + +// FrEvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func FrEvaluatePolynomial(y *Fr, c []Fr, x *Fr) error { + // #nosec + err := C.mclBn_FrEvaluatePolynomial(y.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_FrEvaluatePolynomial") + } + return nil +} + +// G1EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func G1EvaluatePolynomial(y *G1, c []G1, x *Fr) error { + // #nosec + err := C.mclBn_G1EvaluatePolynomial(y.getPointer(), (*C.mclBnG1)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_G1EvaluatePolynomial") + } + return nil +} + +// G2EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... +func G2EvaluatePolynomial(y *G2, c []G2, x *Fr) error { + // #nosec + err := C.mclBn_G2EvaluatePolynomial(y.getPointer(), (*C.mclBnG2)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) + if err != 0 { + return fmt.Errorf("err mclBn_G2EvaluatePolynomial") + } + return nil +} + +// FrLagrangeInterpolation -- +func FrLagrangeInterpolation(out *Fr, xVec []Fr, yVec []Fr) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err FrLagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_FrLagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnFr)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err FrLagrangeInterpolation") + } + return nil +} + +// G1LagrangeInterpolation -- +func G1LagrangeInterpolation(out *G1, xVec []Fr, yVec []G1) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err G1LagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_G1LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG1)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err G1LagrangeInterpolation") + } + return nil +} + +// G2LagrangeInterpolation -- +func G2LagrangeInterpolation(out *G2, xVec []Fr, yVec []G2) error { + if len(xVec) != len(yVec) { + return fmt.Errorf("err G2LagrangeInterpolation:bad size") + } + // #nosec + err := C.mclBn_G2LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG2)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) + if err != 0 { + return fmt.Errorf("err G2LagrangeInterpolation") + } + return nil +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl_test.go b/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl_test.go new file mode 100644 index 000000000..16bb6910f --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/go/mcl/mcl_test.go @@ -0,0 +1,157 @@ +package mcl + +import "testing" +import "fmt" + +func testBadPointOfG2(t *testing.T) { + var Q G2 + // this value is not in G2 so should return an error + err := Q.SetString("1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50", 16) + if err == nil { + t.Error(err) + } +} + +func testGT(t *testing.T) { + var x GT + x.Clear() + if !x.IsZero() { + t.Errorf("not zero") + } + x.SetInt64(1) + if !x.IsOne() { + t.Errorf("not one") + } +} + +func testHash(t *testing.T) { + var x Fr + if !x.SetHashOf([]byte("abc")) { + t.Error("SetHashOf") + } + fmt.Printf("x=%s\n", x.GetString(16)) +} + +func testNegAdd(t *testing.T) { + var x Fr + var P1, P2, P3 G1 + var Q1, Q2, Q3 G2 + err := P1.HashAndMapTo([]byte("this")) + if err != nil { + t.Error(err) + } + err = Q1.HashAndMapTo([]byte("this")) + if err != nil { + t.Error(err) + } + fmt.Printf("P1=%s\n", P1.GetString(16)) + fmt.Printf("Q1=%s\n", Q1.GetString(16)) + G1Neg(&P2, &P1) + G2Neg(&Q2, &Q1) + fmt.Printf("P2=%s\n", P2.GetString(16)) + fmt.Printf("Q2=%s\n", Q2.GetString(16)) + + x.SetInt64(-1) + G1Mul(&P3, &P1, &x) + G2Mul(&Q3, &Q1, &x) + if !P2.IsEqual(&P3) { + t.Errorf("P2 != P3 %s\n", P3.GetString(16)) + } + if !Q2.IsEqual(&Q3) { + t.Errorf("Q2 != Q3 %s\n", Q3.GetString(16)) + } + + G1Add(&P2, &P2, &P1) + G2Add(&Q2, &Q2, &Q1) + if !P2.IsZero() { + t.Errorf("P2 is not zero %s\n", P2.GetString(16)) + } + if !Q2.IsZero() { + t.Errorf("Q2 is not zero %s\n", Q2.GetString(16)) + } +} + +func testPairing(t *testing.T) { + var a, b, ab Fr + err := a.SetString("123", 10) + if err != nil { + t.Error(err) + return + } + err = b.SetString("456", 10) + if err != nil { + t.Error(err) + return + } + FrMul(&ab, &a, &b) + var P, aP G1 + var Q, bQ G2 + err = P.HashAndMapTo([]byte("this")) + if err != nil { + t.Error(err) + return + } + fmt.Printf("P=%s\n", P.GetString(16)) + G1Mul(&aP, &P, &a) + fmt.Printf("aP=%s\n", aP.GetString(16)) + err = Q.HashAndMapTo([]byte("that")) + if err != nil { + t.Error(err) + return + } + fmt.Printf("Q=%s\n", Q.GetString(16)) + G2Mul(&bQ, &Q, &b) + fmt.Printf("bQ=%s\n", bQ.GetString(16)) + var e1, e2 GT + Pairing(&e1, &P, &Q) + fmt.Printf("e1=%s\n", e1.GetString(16)) + Pairing(&e2, &aP, &bQ) + fmt.Printf("e2=%s\n", e1.GetString(16)) + GTPow(&e1, &e1, &ab) + fmt.Printf("e1=%s\n", e1.GetString(16)) + if !e1.IsEqual(&e2) { + t.Errorf("not equal pairing\n%s\n%s", e1.GetString(16), e2.GetString(16)) + } + { + s := P.GetString(IoSerializeHexStr) + var P1 G1 + P1.SetString(s, IoSerializeHexStr) + if !P1.IsEqual(&P) { + t.Error("not equal to P") + return + } + s = Q.GetString(IoSerializeHexStr) + var Q1 G2 + Q1.SetString(s, IoSerializeHexStr) + if !Q1.IsEqual(&Q) { + t.Error("not equal to Q") + return + } + } +} + +func testMcl(t *testing.T, c int) { + err := Init(c) + if err != nil { + t.Fatal(err) + } + testHash(t) + testNegAdd(t) + testPairing(t) + testGT(t) + testBadPointOfG2(t) +} + +func TestMclMain(t *testing.T) { + t.Logf("GetMaxOpUnitSize() = %d\n", GetMaxOpUnitSize()) + t.Log("CurveFp254BNb") + testMcl(t, CurveFp254BNb) + if GetMaxOpUnitSize() == 6 { + if GetFrUnitSize() == 6 { + t.Log("CurveFp382_1") + testMcl(t, CurveFp382_1) + } + t.Log("BLS12_381") + testMcl(t, BLS12_381) + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/Bn256Test.java b/vendor/github.com/byzantine-lab/mcl/ffi/java/Bn256Test.java new file mode 100644 index 000000000..b1f9f6f34 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/Bn256Test.java @@ -0,0 +1,104 @@ +import java.io.*; +import com.herumi.mcl.*; + +/* + Bn256Test +*/ +public class Bn256Test { + static { + String lib = "mcl_bn256"; + String libName = System.mapLibraryName(lib); + System.out.println("libName : " + libName); + System.loadLibrary(lib); + } + public static void assertEquals(String msg, String x, String y) { + if (x.equals(y)) { + System.out.println("OK : " + msg); + } else { + System.out.println("NG : " + msg + ", x = " + x + ", y = " + y); + } + } + public static void assertBool(String msg, boolean b) { + if (b) { + System.out.println("OK : " + msg); + } else { + System.out.println("NG : " + msg); + } + } + public static void main(String argv[]) { + try { + Bn256.SystemInit(); + Fr x = new Fr(5); + Fr y = new Fr(-2); + Fr z = new Fr(5); + assertBool("x != y", !x.equals(y)); + assertBool("x == z", x.equals(z)); + assertEquals("x == 5", x.toString(), "5"); + Bn256.add(x, x, y); + assertEquals("x == 3", x.toString(), "3"); + Bn256.mul(x, x, x); + assertEquals("x == 9", x.toString(), "9"); + G1 P = new G1(); + System.out.println("P=" + P); + P.set("-1", "1"); + System.out.println("P=" + P); + Bn256.neg(P, P); + System.out.println("P=" + P); + + String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; + String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; + String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; + String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; + + G2 Q = new G2(xa, xb, ya, yb); + + P.hashAndMapToG1("This is a pen"); + { + String s = P.toString(); + G1 P1 = new G1(); + P1.setStr(s); + assertBool("P == P1", P1.equals(P)); + } + + GT e = new GT(); + Bn256.pairing(e, P, Q); + GT e1 = new GT(); + GT e2 = new GT(); + Fr c = new Fr("1234567890123234928348230428394234"); + G2 cQ = new G2(Q); + Bn256.mul(cQ, Q, c); // cQ = Q * c + Bn256.pairing(e1, P, cQ); + Bn256.pow(e2, e, c); // e2 = e^c + assertBool("e1 == e2", e1.equals(e2)); + + G1 cP = new G1(P); + Bn256.mul(cP, P, c); // cP = P * c + Bn256.pairing(e1, cP, Q); + assertBool("e1 == e2", e1.equals(e2)); + + BLSsignature(Q); + } catch (RuntimeException e) { + System.out.println("unknown exception :" + e); + } + } + public static void BLSsignature(G2 Q) + { + Fr s = new Fr(); + s.setRand(); // secret key + System.out.println("secret key " + s); + G2 pub = new G2(); + Bn256.mul(pub, Q, s); // public key = sQ + + String m = "signature test"; + G1 H = new G1(); + H.hashAndMapToG1(m); // H = Hash(m) + G1 sign = new G1(); + Bn256.mul(sign, H, s); // signature of m = s H + + GT e1 = new GT(); + GT e2 = new GT(); + Bn256.pairing(e1, H, pub); // e1 = e(H, s Q) + Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q); + assertBool("verify signature", e1.equals(e2)); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/ElgamalTest.java b/vendor/github.com/byzantine-lab/mcl/ffi/java/ElgamalTest.java new file mode 100644 index 000000000..0cf49e144 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/ElgamalTest.java @@ -0,0 +1,144 @@ +import java.io.*; +import com.herumi.mcl.*; + +/* + ElgamalTest [ecParam] + ecParam = secp192k1, NIST_P224, ... + hashParam = hash224, hash384, ... +*/ +public class ElgamalTest { + static { + String lib = "mcl_elgamal"; + String libName = System.mapLibraryName(lib); + System.out.println("libName : " + libName); + System.loadLibrary(lib); + } + public static void assertEquals(String msg, int x, int y) { + if (x == y) { + System.out.println("OK : " + msg); + } else { + System.out.println("NG : " + msg + ", x = " + x + ", y = " + y); + } + } + public static void assertBool(String msg, boolean b) { + if (b) { + System.out.println("OK : " + msg); + } else { + System.out.println("NG : " + msg); + } + } + public static void main(String argv[]) { + try { + String ecStr = "secp192k1"; + String hashStr = "sha224"; + for (int i = 0; i < argv.length; i++) { + if (argv[i].equals("-e") && i < argv.length - 1) { + ecStr = argv[i + 1]; + i++; + } else + if (argv[i].equals("-h") && i < argv.length - 1) { + hashStr = argv[i + 1]; + i++; + } + } + String param = ecStr + " " + hashStr; + System.out.println("param=" + param); + Elgamal.SystemInit(param); + + String prvStr = ""; + String pubStr = ""; + { + PrivateKey prv = new PrivateKey(); + prv.init(); + prvStr = prv.toStr(); + PublicKey pub = prv.getPublicKey(); + pubStr = pub.toStr(); + } + int m = 1234; + CipherText c = new CipherText(); + PublicKey pub = new PublicKey(); + + pub.fromStr(pubStr); + + pub.enc(c, m); + + PrivateKey prv = new PrivateKey(); + prv.fromStr(prvStr); + prv.setCache(0, 60000); + + int dec = prv.dec(c); + // verify dec(enc(m)) == m + assertEquals("dec(enc(m)) == m", m, dec); + + // verify toStr, fromStr + { + String cStr = c.toStr(); + CipherText c2 = new CipherText(); + c2.fromStr(cStr); + int dec2 = prv.dec(c2); + assertEquals("fromStr(toStr(CipherText) == CipherText", dec, dec2); + } + + // verify dec(enc(str)) == str + pub.enc(c, "1234"); + dec = prv.dec(c); + assertEquals("dec(enc(str)) == str", m, dec); + + // verify dec(mul(enc(m), 3)) == m * 3 + c.mul(3); + m *= 3; + dec = prv.dec(c); + assertEquals("mul(int)", m, dec); + + // verify dec(mul(enc(m), "10")) == m * 10 + c.mul("10"); + m *= 10; + dec = prv.dec(c); + assertEquals("mul(str)", m, dec); + + // convert str + { + String s = c.toStr(); + CipherText c2 = new CipherText(); + c2.fromStr(s); + dec = prv.dec(c); + assertEquals("fromStr", m, dec); + } + // rerandomize + pub.rerandomize(c); + dec = prv.dec(c); + assertEquals("rerandomize", m, dec); + int m2 = 12345; + // verify dec(add(enc(m), m2)) == m + m2 + pub.add(c, m2); + m += m2; + dec = prv.dec(c); + assertEquals("pub.add(int)", m, dec); + + pub.add(c, "993"); + m += 993; + dec = prv.dec(c); + assertEquals("pub.add(str)", m, dec); + + // string test + String m3 = "-2000000"; + String m4 = "2001234"; + CipherText c2 = new CipherText(); + SWIGTYPE_p_bool b = Elgamal.new_p_bool(); + pub.enc(c, m3); + dec = prv.dec(c, b); + assertBool("expect dec fail", !Elgamal.p_bool_value(b)); + pub.enc(c2, m4); + dec = prv.dec(c2, b); + assertBool("expect dec fail", !Elgamal.p_bool_value(b)); + c.add(c2); // m3 + m4 + + dec = prv.dec(c, b); + assertEquals("int add", 1234, dec); + assertBool("expect dec success", Elgamal.p_bool_value(b)); + Elgamal.delete_p_bool(b); + } catch (RuntimeException e) { + System.out.println("unknown exception :" + e); + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/Makefile b/vendor/github.com/byzantine-lab/mcl/ffi/java/Makefile new file mode 100644 index 000000000..d69c043fb --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/Makefile @@ -0,0 +1,64 @@ +TOP_DIR=../.. +include $(TOP_DIR)/common.mk +ifeq ($(UNAME_S),Darwin) + JAVA_INC=-I/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/ +else + JAVA_INC=-I/usr/lib/jvm/default-java/include +#JAVA_INC=-I/usr/lib/jvm/java-7-openjdk-amd64/include + CFLAGS+=-z noexecstack + LDFLAGS+=-lrt +endif +CFLAGS+=$(JAVA_INC) $(JAVA_INC)/linux -I $(TOP_DIR)/include -I $(TOP_DIR)/../xbyak -I $(TOP_DIR)/../cybozulib/include -Wno-strict-aliasing +MCL_LIB=$(TOP_DIR)/lib/libmcl.a + +PACKAGE_NAME=com.herumi.mcl +PACKAGE_DIR=$(subst .,/,$(PACKAGE_NAME)) + +ELGAMAL_LIB=$(TOP_DIR)/bin/libmcl_elgamal.$(LIB_SUF) +BN256_LIB=$(TOP_DIR)/bin/libmcl_bn256.$(LIB_SUF) +JAVA_EXE=cd $(TOP_DIR)/bin && LD_LIBRARY_PATH=./:$(LD_LIBRARY_PATH) java -classpath ../ffi/java +all: $(ELGAMAL_LIB) + +elgamal_wrap.cxx: elgamal.i elgamal_impl.hpp + $(MKDIR) $(PACKAGE_DIR) + swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall elgamal.i + +bn256_wrap.cxx: bn256.i bn256_impl.hpp + $(MKDIR) $(PACKAGE_DIR) + swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall bn256.i + +$(MCL_LIB): + make -C $(TOP_DIR) + +$(ELGAMAL_LIB): elgamal_wrap.cxx $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared + +$(BN256_LIB): bn256_wrap.cxx $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared + +%.class: %.java + javac $< + +ElgamalTest.class: ElgamalTest.java $(ELGAMAL_LIB) +Bn256Test.class: Bn256Test.java $(BN256_LIB) + +jar: + jar cvf mcl.jar com + +test_elgamal: ElgamalTest.class $(ELGAMAL_LIB) + $(JAVA_EXE) ElgamalTest + $(JAVA_EXE) ElgamalTest -e NIST_P192 + $(JAVA_EXE) ElgamalTest -e NIST_P256 -h sha256 + $(JAVA_EXE) ElgamalTest -e NIST_P384 -h sha384 + $(JAVA_EXE) ElgamalTest -e NIST_P521 -h sha512 + +test_bn256: Bn256Test.class $(BN256_LIB) + $(JAVA_EXE) Bn256Test + +test: + $(MAKE) test_elgamal + $(MAKE) test_bn256 + +clean: + rm -rf *.class $(ELGAMAL_LIB) $(PACKAGE_DIR)/*.class *_wrap.cxx + diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256.i b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256.i new file mode 100644 index 000000000..94a8edb7a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256.i @@ -0,0 +1,31 @@ +%module Bn256 + +%include "std_string.i" +%include "std_except.i" + + +%{ +#include +#include +#include +struct Param { + cybozu::RandomGenerator rg; + static inline Param& getParam() + { + static Param p; + return p; + } +}; + +static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m) +{ + std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m); + mcl::bn256::Fp t; + t.setArrayMask(digest.c_str(), digest.size()); + mcl::bn256::BN::param.mapTo.calcG1(P, t); +} + +#include "bn256_impl.hpp" +%} + +%include "bn256_impl.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_impl.hpp b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_impl.hpp new file mode 100644 index 000000000..c4caaf3ca --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_impl.hpp @@ -0,0 +1,249 @@ +#include +#include +#include + +void SystemInit() throw(std::exception) +{ + mcl::bn256::initPairing(); +} + +class G1; +class G2; +class GT; +/* + Fr = Z / rZ +*/ +class Fr { + mcl::bn256::Fr self_; + friend class G1; + friend class G2; + friend class GT; + friend void neg(Fr& y, const Fr& x); + friend void add(Fr& z, const Fr& x, const Fr& y); + friend void sub(Fr& z, const Fr& x, const Fr& y); + friend void mul(Fr& z, const Fr& x, const Fr& y); + friend void mul(G1& z, const G1& x, const Fr& y); + friend void mul(G2& z, const G2& x, const Fr& y); + friend void div(Fr& z, const Fr& x, const Fr& y); + friend void pow(GT& z, const GT& x, const Fr& y); +public: + Fr() {} + Fr(const Fr& rhs) : self_(rhs.self_) {} + Fr(int x) : self_(x) {} + Fr(const std::string& str) throw(std::exception) + : self_(str) {} + bool equals(const Fr& rhs) const { return self_ == rhs.self_; } + void setStr(const std::string& str) throw(std::exception) + { + self_.setStr(str); + } + void setInt(int x) + { + self_ = x; + } + void clear() + { + self_.clear(); + } + void setRand() + { + self_.setRand(Param::getParam().rg); + } + std::string toString() const throw(std::exception) + { + return self_.getStr(); + } +}; + +void neg(Fr& y, const Fr& x) +{ + mcl::bn256::Fr::neg(y.self_, x.self_); +} + +void add(Fr& z, const Fr& x, const Fr& y) +{ + mcl::bn256::Fr::add(z.self_, x.self_, y.self_); +} + +void sub(Fr& z, const Fr& x, const Fr& y) +{ + mcl::bn256::Fr::sub(z.self_, x.self_, y.self_); +} + +void mul(Fr& z, const Fr& x, const Fr& y) +{ + mcl::bn256::Fr::mul(z.self_, x.self_, y.self_); +} + +void div(Fr& z, const Fr& x, const Fr& y) +{ + mcl::bn256::Fr::div(z.self_, x.self_, y.self_); +} + +/* + #G1 = r +*/ +class G1 { + mcl::bn256::G1 self_; + friend void neg(G1& y, const G1& x); + friend void dbl(G1& y, const G1& x); + friend void add(G1& z, const G1& x, const G1& y); + friend void sub(G1& z, const G1& x, const G1& y); + friend void mul(G1& z, const G1& x, const Fr& y); + friend void pairing(GT& e, const G1& P, const G2& Q); +public: + G1() {} + G1(const G1& rhs) : self_(rhs.self_) {} + G1(const std::string& x, const std::string& y) throw(std::exception) + : self_(mcl::bn256::Fp(x), mcl::bn256::Fp(y)) + { + } + bool equals(const G1& rhs) const { return self_ == rhs.self_; } + void set(const std::string& x, const std::string& y) + { + self_.set(mcl::bn256::Fp(x), mcl::bn256::Fp(y)); + } + void hashAndMapToG1(const std::string& m) throw(std::exception) + { + HashAndMapToG1(self_, m); + } + void clear() + { + self_.clear(); + } + /* + compressed format + */ + void setStr(const std::string& str) throw(std::exception) + { + self_.setStr(str); + } + std::string toString() const throw(std::exception) + { + return self_.getStr(); + } +}; + +void neg(G1& y, const G1& x) +{ + mcl::bn256::G1::neg(y.self_, x.self_); +} +void dbl(G1& y, const G1& x) +{ + mcl::bn256::G1::dbl(y.self_, x.self_); +} +void add(G1& z, const G1& x, const G1& y) +{ + mcl::bn256::G1::add(z.self_, x.self_, y.self_); +} +void sub(G1& z, const G1& x, const G1& y) +{ + mcl::bn256::G1::sub(z.self_, x.self_, y.self_); +} +void mul(G1& z, const G1& x, const Fr& y) +{ + mcl::bn256::G1::mul(z.self_, x.self_, y.self_); +} + +/* + #G2 = r +*/ +class G2 { + mcl::bn256::G2 self_; + friend void neg(G2& y, const G2& x); + friend void dbl(G2& y, const G2& x); + friend void add(G2& z, const G2& x, const G2& y); + friend void sub(G2& z, const G2& x, const G2& y); + friend void mul(G2& z, const G2& x, const Fr& y); + friend void pairing(GT& e, const G1& P, const G2& Q); +public: + G2() {} + G2(const G2& rhs) : self_(rhs.self_) {} + G2(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb) throw(std::exception) + : self_(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb)) + { + } + bool equals(const G2& rhs) const { return self_ == rhs.self_; } + void set(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb) + { + self_.set(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb)); + } + void clear() + { + self_.clear(); + } + /* + compressed format + */ + void setStr(const std::string& str) throw(std::exception) + { + self_.setStr(str); + } + std::string toString() const throw(std::exception) + { + return self_.getStr(); + } +}; + +void neg(G2& y, const G2& x) +{ + mcl::bn256::G2::neg(y.self_, x.self_); +} +void dbl(G2& y, const G2& x) +{ + mcl::bn256::G2::dbl(y.self_, x.self_); +} +void add(G2& z, const G2& x, const G2& y) +{ + mcl::bn256::G2::add(z.self_, x.self_, y.self_); +} +void sub(G2& z, const G2& x, const G2& y) +{ + mcl::bn256::G2::sub(z.self_, x.self_, y.self_); +} +void mul(G2& z, const G2& x, const Fr& y) +{ + mcl::bn256::G2::mul(z.self_, x.self_, y.self_); +} + +/* + #GT = r +*/ +class GT { + mcl::bn256::Fp12 self_; + friend void mul(GT& z, const GT& x, const GT& y); + friend void pow(GT& z, const GT& x, const Fr& y); + friend void pairing(GT& e, const G1& P, const G2& Q); +public: + GT() {} + GT(const GT& rhs) : self_(rhs.self_) {} + bool equals(const GT& rhs) const { return self_ == rhs.self_; } + void clear() + { + self_.clear(); + } + void setStr(const std::string& str) throw(std::exception) + { + std::istringstream iss(str); + iss >> self_; + } + std::string toString() const throw(std::exception) + { + std::ostringstream oss; + oss << self_; + return oss.str(); + } +}; + +void mul(GT& z, const GT& x, const GT& y) +{ + mcl::bn256::Fp12::mul(z.self_, x.self_, y.self_); +} +void pow(GT& z, const GT& x, const Fr& y) +{ + mcl::bn256::Fp12::pow(z.self_, x.self_, y.self_); +} +void pairing(GT& e, const G1& P, const G2& Q) +{ + mcl::bn256::pairing(e.self_, P.self_, Q.self_); +} diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_wrap.cxx b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_wrap.cxx new file mode 100644 index 000000000..0c8257af5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/bn256_wrap.cxx @@ -0,0 +1,1542 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version 3.0.12 + * + * This file is not intended to be easily readable and contains a number of + * coding conventions designed to improve portability and efficiency. Do not make + * changes to this file unless you know what you are doing--modify the SWIG + * interface file instead. + * ----------------------------------------------------------------------------- */ + + +#ifndef SWIGJAVA +#define SWIGJAVA +#endif + + + +#ifdef __cplusplus +/* SwigValueWrapper is described in swig.swg */ +template class SwigValueWrapper { + struct SwigMovePointer { + T *ptr; + SwigMovePointer(T *p) : ptr(p) { } + ~SwigMovePointer() { delete ptr; } + SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } + } pointer; + SwigValueWrapper& operator=(const SwigValueWrapper& rhs); + SwigValueWrapper(const SwigValueWrapper& rhs); +public: + SwigValueWrapper() : pointer(0) { } + SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } + operator T&() const { return *pointer.ptr; } + T *operator&() { return pointer.ptr; } +}; + +template T SwigValueInit() { + return T(); +} +#endif + +/* ----------------------------------------------------------------------------- + * This section contains generic SWIG labels for method/variable + * declarations/attributes, and other compiler dependent labels. + * ----------------------------------------------------------------------------- */ + +/* template workaround for compilers that cannot correctly implement the C++ standard */ +#ifndef SWIGTEMPLATEDISAMBIGUATOR +# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) +# define SWIGTEMPLATEDISAMBIGUATOR template +# elif defined(__HP_aCC) +/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ +/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ +# define SWIGTEMPLATEDISAMBIGUATOR template +# else +# define SWIGTEMPLATEDISAMBIGUATOR +# endif +#endif + +/* inline attribute */ +#ifndef SWIGINLINE +# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) +# define SWIGINLINE inline +# else +# define SWIGINLINE +# endif +#endif + +/* attribute recognised by some compilers to avoid 'unused' warnings */ +#ifndef SWIGUNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define SWIGUNUSED __attribute__ ((__unused__)) +# else +# define SWIGUNUSED +# endif +# elif defined(__ICC) +# define SWIGUNUSED __attribute__ ((__unused__)) +# else +# define SWIGUNUSED +# endif +#endif + +#ifndef SWIG_MSC_UNSUPPRESS_4505 +# if defined(_MSC_VER) +# pragma warning(disable : 4505) /* unreferenced local function has been removed */ +# endif +#endif + +#ifndef SWIGUNUSEDPARM +# ifdef __cplusplus +# define SWIGUNUSEDPARM(p) +# else +# define SWIGUNUSEDPARM(p) p SWIGUNUSED +# endif +#endif + +/* internal SWIG method */ +#ifndef SWIGINTERN +# define SWIGINTERN static SWIGUNUSED +#endif + +/* internal inline SWIG method */ +#ifndef SWIGINTERNINLINE +# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE +#endif + +/* exporting methods */ +#if defined(__GNUC__) +# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) +# ifndef GCC_HASCLASSVISIBILITY +# define GCC_HASCLASSVISIBILITY +# endif +# endif +#endif + +#ifndef SWIGEXPORT +# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) +# if defined(STATIC_LINKED) +# define SWIGEXPORT +# else +# define SWIGEXPORT __declspec(dllexport) +# endif +# else +# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) +# define SWIGEXPORT __attribute__ ((visibility("default"))) +# else +# define SWIGEXPORT +# endif +# endif +#endif + +/* calling conventions for Windows */ +#ifndef SWIGSTDCALL +# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) +# define SWIGSTDCALL __stdcall +# else +# define SWIGSTDCALL +# endif +#endif + +/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ +#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) +# define _CRT_SECURE_NO_DEPRECATE +#endif + +/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ +#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) +# define _SCL_SECURE_NO_DEPRECATE +#endif + +/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ +#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) +# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 +#endif + +/* Intel's compiler complains if a variable which was never initialised is + * cast to void, which is a common idiom which we use to indicate that we + * are aware a variable isn't used. So we just silence that warning. + * See: https://github.com/swig/swig/issues/192 for more discussion. + */ +#ifdef __INTEL_COMPILER +# pragma warning disable 592 +#endif + + +/* Fix for jlong on some versions of gcc on Windows */ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) + typedef long long __int64; +#endif + +/* Fix for jlong on 64-bit x86 Solaris */ +#if defined(__x86_64) +# ifdef _LP64 +# undef _LP64 +# endif +#endif + +#include +#include +#include + + +/* Support for throwing Java exceptions */ +typedef enum { + SWIG_JavaOutOfMemoryError = 1, + SWIG_JavaIOException, + SWIG_JavaRuntimeException, + SWIG_JavaIndexOutOfBoundsException, + SWIG_JavaArithmeticException, + SWIG_JavaIllegalArgumentException, + SWIG_JavaNullPointerException, + SWIG_JavaDirectorPureVirtual, + SWIG_JavaUnknownError +} SWIG_JavaExceptionCodes; + +typedef struct { + SWIG_JavaExceptionCodes code; + const char *java_exception; +} SWIG_JavaExceptions_t; + + +static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { + jclass excep; + static const SWIG_JavaExceptions_t java_exceptions[] = { + { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, + { SWIG_JavaIOException, "java/io/IOException" }, + { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, + { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, + { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, + { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, + { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, + { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, + { SWIG_JavaUnknownError, "java/lang/UnknownError" }, + { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } + }; + const SWIG_JavaExceptions_t *except_ptr = java_exceptions; + + while (except_ptr->code != code && except_ptr->code) + except_ptr++; + + jenv->ExceptionClear(); + excep = jenv->FindClass(except_ptr->java_exception); + if (excep) + jenv->ThrowNew(excep, msg); +} + + +/* Contract support */ + +#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else + + +#include + + +#include +#include + + +#include +#include +#include +struct Param { + cybozu::RandomGenerator rg; + static inline Param& getParam() + { + static Param p; + return p; + } +}; + +static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m) +{ + std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m); + mcl::bn256::Fp t; + t.setArrayMask(digest.c_str(), digest.size()); + mcl::bn256::BN::param.mapTo.calcG1(P, t); +} + +#include "bn256_impl.hpp" + + +#ifdef __cplusplus +extern "C" { +#endif + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_SystemInit(JNIEnv *jenv, jclass jcls) { + (void)jenv; + (void)jcls; + try { + SystemInit(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + Fr *arg1 = 0 ; + Fr *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); + return ; + } + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + neg(*arg1,(Fr const &)*arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + Fr *arg1 = 0 ; + Fr *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); + return ; + } + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + add(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + Fr *arg1 = 0 ; + Fr *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); + return ; + } + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + sub(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + Fr *arg1 = 0 ; + Fr *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); + return ; + } + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + mul(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G1 *arg1 = 0 ; + G1 *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + mul(*arg1,(G1 const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G2 *arg1 = 0 ; + G2 *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); + return ; + } + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + mul(*arg1,(G2 const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_div(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + Fr *arg1 = 0 ; + Fr *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); + return ; + } + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + div(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pow(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + GT *arg1 = 0 ; + GT *arg2 = 0 ; + Fr *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(GT **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); + return ; + } + arg2 = *(GT **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); + return ; + } + arg3 = *(Fr **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return ; + } + pow(*arg1,(GT const &)*arg2,(Fr const &)*arg3); +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + Fr *result = 0 ; + + (void)jenv; + (void)jcls; + result = (Fr *)new Fr(); + *(Fr **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jlong jresult = 0 ; + Fr *arg1 = 0 ; + Fr *result = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return 0; + } + result = (Fr *)new Fr((Fr const &)*arg1); + *(Fr **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jint jarg1) { + jlong jresult = 0 ; + int arg1 ; + Fr *result = 0 ; + + (void)jenv; + (void)jcls; + arg1 = (int)jarg1; + result = (Fr *)new Fr(arg1); + *(Fr **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jstring jarg1) { + jlong jresult = 0 ; + std::string *arg1 = 0 ; + Fr *result = 0 ; + + (void)jenv; + (void)jcls; + if(!jarg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); + if (!arg1_pstr) return 0; + std::string arg1_str(arg1_pstr); + arg1 = &arg1_str; + jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); + try { + result = (Fr *)new Fr((std::string const &)*arg1); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + *(Fr **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + jboolean jresult = 0 ; + Fr *arg1 = (Fr *) 0 ; + Fr *arg2 = 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(Fr **)&jarg1; + arg2 = *(Fr **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); + return 0; + } + result = (bool)((Fr const *)arg1)->equals((Fr const &)*arg2); + jresult = (jboolean)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + Fr *arg1 = (Fr *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->setStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setInt(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { + Fr *arg1 = (Fr *) 0 ; + int arg2 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + arg2 = (int)jarg2; + (arg1)->setInt(arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + Fr *arg1 = (Fr *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + (arg1)->clear(); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setRand(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + Fr *arg1 = (Fr *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + (arg1)->setRand(); +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + Fr *arg1 = (Fr *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(Fr **)&jarg1; + try { + result = ((Fr const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1Fr(JNIEnv *jenv, jclass jcls, jlong jarg1) { + Fr *arg1 = (Fr *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(Fr **)&jarg1; + delete arg1; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + G1 *arg1 = 0 ; + G1 *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + neg(*arg1,(G1 const &)*arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + G1 *arg1 = 0 ; + G1 *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + dbl(*arg1,(G1 const &)*arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G1 *arg1 = 0 ; + G1 *arg2 = 0 ; + G1 *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + arg3 = *(G1 **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + add(*arg1,(G1 const &)*arg2,(G1 const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G1 *arg1 = 0 ; + G1 *arg2 = 0 ; + G1 *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + arg3 = *(G1 **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + sub(*arg1,(G1 const &)*arg2,(G1 const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pairing(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + GT *arg1 = 0 ; + G1 *arg2 = 0 ; + G2 *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(GT **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); + return ; + } + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return ; + } + arg3 = *(G2 **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + pairing(*arg1,(G1 const &)*arg2,(G2 const &)*arg3); +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + G1 *result = 0 ; + + (void)jenv; + (void)jcls; + result = (G1 *)new G1(); + *(G1 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jlong jresult = 0 ; + G1 *arg1 = 0 ; + G1 *result = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return 0; + } + result = (G1 *)new G1((G1 const &)*arg1); + *(G1 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2) { + jlong jresult = 0 ; + std::string *arg1 = 0 ; + std::string *arg2 = 0 ; + G1 *result = 0 ; + + (void)jenv; + (void)jcls; + if(!jarg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); + if (!arg1_pstr) return 0; + std::string arg1_str(arg1_pstr); + arg1 = &arg1_str; + jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return 0; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + result = (G1 *)new G1((std::string const &)*arg1,(std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + *(G1 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + jboolean jresult = 0 ; + G1 *arg1 = (G1 *) 0 ; + G1 *arg2 = 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G1 **)&jarg1; + arg2 = *(G1 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); + return 0; + } + result = (bool)((G1 const *)arg1)->equals((G1 const &)*arg2); + jresult = (jboolean)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3) { + G1 *arg1 = (G1 *) 0 ; + std::string *arg2 = 0 ; + std::string *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + if(!jarg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); + if (!arg3_pstr) return ; + std::string arg3_str(arg3_pstr); + arg3 = &arg3_str; + jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); + (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1hashAndMapToG1(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + G1 *arg1 = (G1 *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->hashAndMapToG1((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + G1 *arg1 = (G1 *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + (arg1)->clear(); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + G1 *arg1 = (G1 *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->setStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + G1 *arg1 = (G1 *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G1 **)&jarg1; + try { + result = ((G1 const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G1(JNIEnv *jenv, jclass jcls, jlong jarg1) { + G1 *arg1 = (G1 *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(G1 **)&jarg1; + delete arg1; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + G2 *arg1 = 0 ; + G2 *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); + return ; + } + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + neg(*arg1,(G2 const &)*arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + G2 *arg1 = 0 ; + G2 *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); + return ; + } + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + dbl(*arg1,(G2 const &)*arg2); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G2 *arg1 = 0 ; + G2 *arg2 = 0 ; + G2 *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); + return ; + } + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + arg3 = *(G2 **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + add(*arg1,(G2 const &)*arg2,(G2 const &)*arg3); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + G2 *arg1 = 0 ; + G2 *arg2 = 0 ; + G2 *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); + return ; + } + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + arg3 = *(G2 **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return ; + } + sub(*arg1,(G2 const &)*arg2,(G2 const &)*arg3); +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + G2 *result = 0 ; + + (void)jenv; + (void)jcls; + result = (G2 *)new G2(); + *(G2 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jlong jresult = 0 ; + G2 *arg1 = 0 ; + G2 *result = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G2 **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return 0; + } + result = (G2 *)new G2((G2 const &)*arg1); + *(G2 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2, jstring jarg3, jstring jarg4) { + jlong jresult = 0 ; + std::string *arg1 = 0 ; + std::string *arg2 = 0 ; + std::string *arg3 = 0 ; + std::string *arg4 = 0 ; + G2 *result = 0 ; + + (void)jenv; + (void)jcls; + if(!jarg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); + if (!arg1_pstr) return 0; + std::string arg1_str(arg1_pstr); + arg1 = &arg1_str; + jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return 0; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + if(!jarg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); + if (!arg3_pstr) return 0; + std::string arg3_str(arg3_pstr); + arg3 = &arg3_str; + jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); + if(!jarg4) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return 0; + } + const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0); + if (!arg4_pstr) return 0; + std::string arg4_str(arg4_pstr); + arg4 = &arg4_str; + jenv->ReleaseStringUTFChars(jarg4, arg4_pstr); + try { + result = (G2 *)new G2((std::string const &)*arg1,(std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + *(G2 **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + jboolean jresult = 0 ; + G2 *arg1 = (G2 *) 0 ; + G2 *arg2 = 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(G2 **)&jarg1; + arg2 = *(G2 **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); + return 0; + } + result = (bool)((G2 const *)arg1)->equals((G2 const &)*arg2); + jresult = (jboolean)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3, jstring jarg4, jstring jarg5) { + G2 *arg1 = (G2 *) 0 ; + std::string *arg2 = 0 ; + std::string *arg3 = 0 ; + std::string *arg4 = 0 ; + std::string *arg5 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G2 **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + if(!jarg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); + if (!arg3_pstr) return ; + std::string arg3_str(arg3_pstr); + arg3 = &arg3_str; + jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); + if(!jarg4) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0); + if (!arg4_pstr) return ; + std::string arg4_str(arg4_pstr); + arg4 = &arg4_str; + jenv->ReleaseStringUTFChars(jarg4, arg4_pstr); + if(!jarg5) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg5_pstr = (const char *)jenv->GetStringUTFChars(jarg5, 0); + if (!arg5_pstr) return ; + std::string arg5_str(arg5_pstr); + arg5 = &arg5_str; + jenv->ReleaseStringUTFChars(jarg5, arg5_pstr); + (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4,(std::string const &)*arg5); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + G2 *arg1 = (G2 *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G2 **)&jarg1; + (arg1)->clear(); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + G2 *arg1 = (G2 *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G2 **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->setStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + G2 *arg1 = (G2 *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(G2 **)&jarg1; + try { + result = ((G2 const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G2(JNIEnv *jenv, jclass jcls, jlong jarg1) { + G2 *arg1 = (G2 *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(G2 **)&jarg1; + delete arg1; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { + GT *arg1 = 0 ; + GT *arg2 = 0 ; + GT *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + (void)jarg3_; + arg1 = *(GT **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); + return ; + } + arg2 = *(GT **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); + return ; + } + arg3 = *(GT **)&jarg3; + if (!arg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); + return ; + } + mul(*arg1,(GT const &)*arg2,(GT const &)*arg3); +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + GT *result = 0 ; + + (void)jenv; + (void)jcls; + result = (GT *)new GT(); + *(GT **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jlong jresult = 0 ; + GT *arg1 = 0 ; + GT *result = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(GT **)&jarg1; + if (!arg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); + return 0; + } + result = (GT *)new GT((GT const &)*arg1); + *(GT **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + jboolean jresult = 0 ; + GT *arg1 = (GT *) 0 ; + GT *arg2 = 0 ; + bool result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(GT **)&jarg1; + arg2 = *(GT **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); + return 0; + } + result = (bool)((GT const *)arg1)->equals((GT const &)*arg2); + jresult = (jboolean)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + GT *arg1 = (GT *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(GT **)&jarg1; + (arg1)->clear(); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + GT *arg1 = (GT *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(GT **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->setStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + GT *arg1 = (GT *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(GT **)&jarg1; + try { + result = ((GT const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1GT(JNIEnv *jenv, jclass jcls, jlong jarg1) { + GT *arg1 = (GT *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(GT **)&jarg1; + delete arg1; +} + + +#ifdef __cplusplus +} +#endif + diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal.i b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal.i new file mode 100644 index 000000000..410723174 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal.i @@ -0,0 +1,28 @@ +%module Elgamal + +%include "std_string.i" +%include "std_except.i" + + +%{ +#include +#include +#include +#include +struct Param { +const mcl::EcParam *ecParam; +cybozu::RandomGenerator rg; +cybozu::crypto::Hash::Name hashName; +static inline Param& getParam() +{ + static Param p; + return p; +} +}; + +#include "elgamal_impl.hpp" +%} +%include cpointer.i +%pointer_functions(bool, p_bool); + +%include "elgamal_impl.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_impl.hpp b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_impl.hpp new file mode 100644 index 000000000..dbf2ba64e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_impl.hpp @@ -0,0 +1,147 @@ +#pragma once +//#define MCL_MAX_BIT_SIZE 521 +#include +#include +#include +#include +#include +#include +#include + +typedef mcl::FpT Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; +typedef mcl::ElgamalT Elgamal; + +/* + init system + @param param [in] string such as "ecParamName hashName" + @note NOT thread safe because setting global parameters of elliptic curve + ex1) "secp192k1 sha256" // 192bit security + sha256 + ex2) "secp160k1 sha1" // 160bit security + sha1 + hashName : sha1 sha224 sha256 sha384 sha512 +*/ +void SystemInit(const std::string& param) throw(std::exception) +{ + std::istringstream iss(param); + std::string ecParamStr; + std::string hashNameStr; + if (iss >> ecParamStr >> hashNameStr) { + Param& p = Param::getParam(); + p.ecParam = mcl::getEcParam(ecParamStr); + Zn::init(p.ecParam->n); + Fp::init(p.ecParam->p); + Ec::init(p.ecParam->a, p.ecParam->b); + p.hashName = cybozu::crypto::Hash::getName(hashNameStr); + return; + } + throw cybozu::Exception("SystemInit:bad param") << param; +} + +class CipherText { + Elgamal::CipherText self_; + friend class PublicKey; + friend class PrivateKey; +public: + std::string toStr() const throw(std::exception) { return self_.toStr(); } + std::string toString() const throw(std::exception) { return toStr(); } + void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } + + void add(const CipherText& c) throw(std::exception) { self_.add(c.self_); } + void mul(int m) throw(std::exception) + { + self_.mul(m); + } + void mul(const std::string& str) throw(std::exception) + { + Zn zn(str); + self_.mul(zn); + } +}; + +class PublicKey { + Elgamal::PublicKey self_; + friend class PrivateKey; +public: + std::string toStr() const throw(std::exception) { return self_.toStr(); } + std::string toString() const throw(std::exception) { return toStr(); } + void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } + + void save(const std::string& fileName) const throw(std::exception) + { + std::ofstream ofs(fileName.c_str(), std::ios::binary); + if (!(ofs << self_)) throw cybozu::Exception("PublicKey:save") << fileName; + } + void load(const std::string& fileName) throw(std::exception) + { + std::ifstream ifs(fileName.c_str(), std::ios::binary); + if (!(ifs >> self_)) throw cybozu::Exception("PublicKey:load") << fileName; + } + void enc(CipherText& c, int m) const throw(std::exception) + { + self_.enc(c.self_, m, Param::getParam().rg); + } + void enc(CipherText& c, const std::string& str) const throw(std::exception) + { + Zn zn(str); + self_.enc(c.self_, zn, Param::getParam().rg); + } + void rerandomize(CipherText& c) const throw(std::exception) + { + self_.rerandomize(c.self_, Param::getParam().rg); + } + void add(CipherText& c, int m) const throw(std::exception) + { + self_.add(c.self_, m); + } + void add(CipherText& c, const std::string& str) const throw(std::exception) + { + Zn zn(str); + self_.add(c.self_, zn); + } +}; + +class PrivateKey { + Elgamal::PrivateKey self_; +public: + std::string toStr() const throw(std::exception) { return self_.toStr(); } + std::string toString() const throw(std::exception) { return toStr(); } + void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } + + void save(const std::string& fileName) const throw(std::exception) + { + std::ofstream ofs(fileName.c_str(), std::ios::binary); + if (!(ofs << self_)) throw cybozu::Exception("PrivateKey:save") << fileName; + } + void load(const std::string& fileName) throw(std::exception) + { + std::ifstream ifs(fileName.c_str(), std::ios::binary); + if (!(ifs >> self_)) throw cybozu::Exception("PrivateKey:load") << fileName; + } + void init() throw(std::exception) + { + Param& p = Param::getParam(); + const Fp x0(p.ecParam->gx); + const Fp y0(p.ecParam->gy); + Ec P(x0, y0); + self_.init(P, Zn::getBitSize(), p.rg); + } + PublicKey getPublicKey() const throw(std::exception) + { + PublicKey ret; + ret.self_ = self_.getPublicKey(); + return ret; + } + int dec(const CipherText& c, bool *b = 0) const throw(std::exception) + { + return self_.dec(c.self_, b); + } + void setCache(int rangeMin, int rangeMax) throw(std::exception) + { + self_.setCache(rangeMin, rangeMax); + } + void clearCache() throw(std::exception) + { + self_.clearCache(); + } +}; diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_wrap.cxx b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_wrap.cxx new file mode 100644 index 000000000..38d05f489 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/elgamal_wrap.cxx @@ -0,0 +1,1129 @@ +/* ---------------------------------------------------------------------------- + * This file was automatically generated by SWIG (http://www.swig.org). + * Version 3.0.12 + * + * This file is not intended to be easily readable and contains a number of + * coding conventions designed to improve portability and efficiency. Do not make + * changes to this file unless you know what you are doing--modify the SWIG + * interface file instead. + * ----------------------------------------------------------------------------- */ + + +#ifndef SWIGJAVA +#define SWIGJAVA +#endif + + + +#ifdef __cplusplus +/* SwigValueWrapper is described in swig.swg */ +template class SwigValueWrapper { + struct SwigMovePointer { + T *ptr; + SwigMovePointer(T *p) : ptr(p) { } + ~SwigMovePointer() { delete ptr; } + SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } + } pointer; + SwigValueWrapper& operator=(const SwigValueWrapper& rhs); + SwigValueWrapper(const SwigValueWrapper& rhs); +public: + SwigValueWrapper() : pointer(0) { } + SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } + operator T&() const { return *pointer.ptr; } + T *operator&() { return pointer.ptr; } +}; + +template T SwigValueInit() { + return T(); +} +#endif + +/* ----------------------------------------------------------------------------- + * This section contains generic SWIG labels for method/variable + * declarations/attributes, and other compiler dependent labels. + * ----------------------------------------------------------------------------- */ + +/* template workaround for compilers that cannot correctly implement the C++ standard */ +#ifndef SWIGTEMPLATEDISAMBIGUATOR +# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) +# define SWIGTEMPLATEDISAMBIGUATOR template +# elif defined(__HP_aCC) +/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ +/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ +# define SWIGTEMPLATEDISAMBIGUATOR template +# else +# define SWIGTEMPLATEDISAMBIGUATOR +# endif +#endif + +/* inline attribute */ +#ifndef SWIGINLINE +# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) +# define SWIGINLINE inline +# else +# define SWIGINLINE +# endif +#endif + +/* attribute recognised by some compilers to avoid 'unused' warnings */ +#ifndef SWIGUNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define SWIGUNUSED __attribute__ ((__unused__)) +# else +# define SWIGUNUSED +# endif +# elif defined(__ICC) +# define SWIGUNUSED __attribute__ ((__unused__)) +# else +# define SWIGUNUSED +# endif +#endif + +#ifndef SWIG_MSC_UNSUPPRESS_4505 +# if defined(_MSC_VER) +# pragma warning(disable : 4505) /* unreferenced local function has been removed */ +# endif +#endif + +#ifndef SWIGUNUSEDPARM +# ifdef __cplusplus +# define SWIGUNUSEDPARM(p) +# else +# define SWIGUNUSEDPARM(p) p SWIGUNUSED +# endif +#endif + +/* internal SWIG method */ +#ifndef SWIGINTERN +# define SWIGINTERN static SWIGUNUSED +#endif + +/* internal inline SWIG method */ +#ifndef SWIGINTERNINLINE +# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE +#endif + +/* exporting methods */ +#if defined(__GNUC__) +# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) +# ifndef GCC_HASCLASSVISIBILITY +# define GCC_HASCLASSVISIBILITY +# endif +# endif +#endif + +#ifndef SWIGEXPORT +# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) +# if defined(STATIC_LINKED) +# define SWIGEXPORT +# else +# define SWIGEXPORT __declspec(dllexport) +# endif +# else +# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) +# define SWIGEXPORT __attribute__ ((visibility("default"))) +# else +# define SWIGEXPORT +# endif +# endif +#endif + +/* calling conventions for Windows */ +#ifndef SWIGSTDCALL +# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) +# define SWIGSTDCALL __stdcall +# else +# define SWIGSTDCALL +# endif +#endif + +/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ +#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) +# define _CRT_SECURE_NO_DEPRECATE +#endif + +/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ +#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) +# define _SCL_SECURE_NO_DEPRECATE +#endif + +/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ +#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) +# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 +#endif + +/* Intel's compiler complains if a variable which was never initialised is + * cast to void, which is a common idiom which we use to indicate that we + * are aware a variable isn't used. So we just silence that warning. + * See: https://github.com/swig/swig/issues/192 for more discussion. + */ +#ifdef __INTEL_COMPILER +# pragma warning disable 592 +#endif + + +/* Fix for jlong on some versions of gcc on Windows */ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) + typedef long long __int64; +#endif + +/* Fix for jlong on 64-bit x86 Solaris */ +#if defined(__x86_64) +# ifdef _LP64 +# undef _LP64 +# endif +#endif + +#include +#include +#include + + +/* Support for throwing Java exceptions */ +typedef enum { + SWIG_JavaOutOfMemoryError = 1, + SWIG_JavaIOException, + SWIG_JavaRuntimeException, + SWIG_JavaIndexOutOfBoundsException, + SWIG_JavaArithmeticException, + SWIG_JavaIllegalArgumentException, + SWIG_JavaNullPointerException, + SWIG_JavaDirectorPureVirtual, + SWIG_JavaUnknownError +} SWIG_JavaExceptionCodes; + +typedef struct { + SWIG_JavaExceptionCodes code; + const char *java_exception; +} SWIG_JavaExceptions_t; + + +static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { + jclass excep; + static const SWIG_JavaExceptions_t java_exceptions[] = { + { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, + { SWIG_JavaIOException, "java/io/IOException" }, + { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, + { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, + { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, + { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, + { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, + { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, + { SWIG_JavaUnknownError, "java/lang/UnknownError" }, + { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } + }; + const SWIG_JavaExceptions_t *except_ptr = java_exceptions; + + while (except_ptr->code != code && except_ptr->code) + except_ptr++; + + jenv->ExceptionClear(); + excep = jenv->FindClass(except_ptr->java_exception); + if (excep) + jenv->ThrowNew(excep, msg); +} + + +/* Contract support */ + +#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else + + +#include + + +#include +#include + + +#include +#include +#include +#include +struct Param { +const mcl::EcParam *ecParam; +cybozu::RandomGenerator rg; +cybozu::crypto::Hash::Name hashName; +static inline Param& getParam() +{ + static Param p; + return p; +} +}; + +#include "elgamal_impl.hpp" + + +static bool *new_p_bool() { + return new bool(); +} + +static bool *copy_p_bool(bool value) { + return new bool(value); +} + +static void delete_p_bool(bool *obj) { + if (obj) delete obj; +} + +static void p_bool_assign(bool *obj, bool value) { + *obj = value; +} + +static bool p_bool_value(bool *obj) { + return *obj; +} + + +#ifdef __cplusplus +extern "C" { +#endif + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1p_1bool(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + bool *result = 0 ; + + (void)jenv; + (void)jcls; + result = (bool *)new_p_bool(); + *(bool **)&jresult = result; + return jresult; +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_copy_1p_1bool(JNIEnv *jenv, jclass jcls, jboolean jarg1) { + jlong jresult = 0 ; + bool arg1 ; + bool *result = 0 ; + + (void)jenv; + (void)jcls; + arg1 = jarg1 ? true : false; + result = (bool *)copy_p_bool(arg1); + *(bool **)&jresult = result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1p_1bool(JNIEnv *jenv, jclass jcls, jlong jarg1) { + bool *arg1 = (bool *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(bool **)&jarg1; + delete_p_bool(arg1); +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1assign(JNIEnv *jenv, jclass jcls, jlong jarg1, jboolean jarg2) { + bool *arg1 = (bool *) 0 ; + bool arg2 ; + + (void)jenv; + (void)jcls; + arg1 = *(bool **)&jarg1; + arg2 = jarg2 ? true : false; + p_bool_assign(arg1,arg2); +} + + +SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1value(JNIEnv *jenv, jclass jcls, jlong jarg1) { + jboolean jresult = 0 ; + bool *arg1 = (bool *) 0 ; + bool result; + + (void)jenv; + (void)jcls; + arg1 = *(bool **)&jarg1; + result = (bool)p_bool_value(arg1); + jresult = (jboolean)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_SystemInit(JNIEnv *jenv, jclass jcls, jstring jarg1) { + std::string *arg1 = 0 ; + + (void)jenv; + (void)jcls; + if(!jarg1) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); + if (!arg1_pstr) return ; + std::string arg1_str(arg1_pstr); + arg1 = &arg1_str; + jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); + try { + SystemInit((std::string const &)*arg1); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + CipherText *arg1 = (CipherText *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(CipherText **)&jarg1; + try { + result = ((CipherText const *)arg1)->toStr(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + CipherText *arg1 = (CipherText *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(CipherText **)&jarg1; + try { + result = ((CipherText const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + CipherText *arg1 = (CipherText *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(CipherText **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->fromStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1add(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + CipherText *arg1 = (CipherText *) 0 ; + CipherText *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(CipherText **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); + return ; + } + try { + (arg1)->add((CipherText const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { + CipherText *arg1 = (CipherText *) 0 ; + int arg2 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(CipherText **)&jarg1; + arg2 = (int)jarg2; + try { + (arg1)->mul(arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + CipherText *arg1 = (CipherText *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(CipherText **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->mul((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1CipherText(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + CipherText *result = 0 ; + + (void)jenv; + (void)jcls; + result = (CipherText *)new CipherText(); + *(CipherText **)&jresult = result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1CipherText(JNIEnv *jenv, jclass jcls, jlong jarg1) { + CipherText *arg1 = (CipherText *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(CipherText **)&jarg1; + delete arg1; +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + PublicKey *arg1 = (PublicKey *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PublicKey **)&jarg1; + try { + result = ((PublicKey const *)arg1)->toStr(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + PublicKey *arg1 = (PublicKey *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PublicKey **)&jarg1; + try { + result = ((PublicKey const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PublicKey *arg1 = (PublicKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PublicKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->fromStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PublicKey *arg1 = (PublicKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PublicKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + ((PublicKey const *)arg1)->save((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PublicKey *arg1 = (PublicKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PublicKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->load((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) { + PublicKey *arg1 = (PublicKey *) 0 ; + CipherText *arg2 = 0 ; + int arg3 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PublicKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); + return ; + } + arg3 = (int)jarg3; + try { + ((PublicKey const *)arg1)->enc(*arg2,arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) { + PublicKey *arg1 = (PublicKey *) 0 ; + CipherText *arg2 = 0 ; + std::string *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PublicKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); + return ; + } + if(!jarg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); + if (!arg3_pstr) return ; + std::string arg3_str(arg3_pstr); + arg3 = &arg3_str; + jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); + try { + ((PublicKey const *)arg1)->enc(*arg2,(std::string const &)*arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1rerandomize(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + PublicKey *arg1 = (PublicKey *) 0 ; + CipherText *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PublicKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); + return ; + } + try { + ((PublicKey const *)arg1)->rerandomize(*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) { + PublicKey *arg1 = (PublicKey *) 0 ; + CipherText *arg2 = 0 ; + int arg3 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PublicKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); + return ; + } + arg3 = (int)jarg3; + try { + ((PublicKey const *)arg1)->add(*arg2,arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) { + PublicKey *arg1 = (PublicKey *) 0 ; + CipherText *arg2 = 0 ; + std::string *arg3 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PublicKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); + return ; + } + if(!jarg3) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); + if (!arg3_pstr) return ; + std::string arg3_str(arg3_pstr); + arg3 = &arg3_str; + jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); + try { + ((PublicKey const *)arg1)->add(*arg2,(std::string const &)*arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PublicKey(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + PublicKey *result = 0 ; + + (void)jenv; + (void)jcls; + result = (PublicKey *)new PublicKey(); + *(PublicKey **)&jresult = result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1) { + PublicKey *arg1 = (PublicKey *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(PublicKey **)&jarg1; + delete arg1; +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + PrivateKey *arg1 = (PrivateKey *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + try { + result = ((PrivateKey const *)arg1)->toStr(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jstring jresult = 0 ; + PrivateKey *arg1 = (PrivateKey *) 0 ; + std::string result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + try { + result = ((PrivateKey const *)arg1)->toString(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = jenv->NewStringUTF((&result)->c_str()); + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->fromStr((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + ((PrivateKey const *)arg1)->save((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + std::string *arg2 = 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + if(!jarg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); + return ; + } + const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); + if (!arg2_pstr) return ; + std::string arg2_str(arg2_pstr); + arg2 = &arg2_str; + jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); + try { + (arg1)->load((std::string const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1init(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + try { + (arg1)->init(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1getPublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + jlong jresult = 0 ; + PrivateKey *arg1 = (PrivateKey *) 0 ; + PublicKey result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + try { + result = ((PrivateKey const *)arg1)->getPublicKey(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + *(PublicKey **)&jresult = new PublicKey((const PublicKey &)result); + return jresult; +} + + +SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3) { + jint jresult = 0 ; + PrivateKey *arg1 = (PrivateKey *) 0 ; + CipherText *arg2 = 0 ; + bool *arg3 = (bool *) 0 ; + int result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PrivateKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); + return 0; + } + arg3 = *(bool **)&jarg3; + try { + result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2,arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = (jint)result; + return jresult; +} + + +SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { + jint jresult = 0 ; + PrivateKey *arg1 = (PrivateKey *) 0 ; + CipherText *arg2 = 0 ; + int result; + + (void)jenv; + (void)jcls; + (void)jarg1_; + (void)jarg2_; + arg1 = *(PrivateKey **)&jarg1; + arg2 = *(CipherText **)&jarg2; + if (!arg2) { + SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); + return 0; + } + try { + result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return 0; + } + + jresult = (jint)result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1setCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + int arg2 ; + int arg3 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + arg2 = (int)jarg2; + arg3 = (int)jarg3; + try { + (arg1)->setCache(arg2,arg3); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1clearCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + + (void)jenv; + (void)jcls; + (void)jarg1_; + arg1 = *(PrivateKey **)&jarg1; + try { + (arg1)->clearCache(); + } + catch(std::exception &_e) { + SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); + return ; + } + +} + + +SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PrivateKey(JNIEnv *jenv, jclass jcls) { + jlong jresult = 0 ; + PrivateKey *result = 0 ; + + (void)jenv; + (void)jcls; + result = (PrivateKey *)new PrivateKey(); + *(PrivateKey **)&jresult = result; + return jresult; +} + + +SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PrivateKey(JNIEnv *jenv, jclass jcls, jlong jarg1) { + PrivateKey *arg1 = (PrivateKey *) 0 ; + + (void)jenv; + (void)jcls; + arg1 = *(PrivateKey **)&jarg1; + delete arg1; +} + + +#ifdef __cplusplus +} +#endif + diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/java.md b/vendor/github.com/byzantine-lab/mcl/ffi/java/java.md new file mode 100644 index 000000000..3fe861351 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/java.md @@ -0,0 +1,95 @@ +# JNI for mcl (experimental) +This library provides functionality to compute the optimal ate pairing +over Barreto-Naehrig (BN) curves. + +# Initialization +Load the library `mcl_bn256`. +``` +import com.herumi.mcl.*; + +System.loadLibrary("mcl_bn256"); +``` + +# Classes +* `G1` ; The cyclic group instantiated as E(Fp)[r] where where r = p + 1 - t. +* `G2` ; The cyclic group instantiated as the inverse image of E'(Fp^2)[r]. +* `GT` ; The cyclic group in the image of the optimal ate pairing. + * `e : G1 x G2 -> GT` +* `Fr` ; The finite field with characteristic r. + +# Methods and Functions +## Fr +* `Fr::setInt(int x)` ; set by x +* `Fr::setStr(String str)` ; set by str such as "123", "0xfff", etc. +* `Fr::setRand()` ; randomly set +* `Bn256.neg(Fr y, Fr x)` ; `y = -x` +* `Bn256.add(Fr z, Fr x, Fr y)` ; `z = x + y` +* `Bn256.sub(Fr z, Fr x, Fr y)` ; `z = x - y` +* `Bn256.mul(Fr z, Fr x, Fr y)` ; `z = x * y` +* `Bn256.div(Fr z, Fr x, Fr y)` ; `z = x / y` + +## G1 + +* `G1::set(String x, String y)` ; set by (x, y) +* `G1::hashAndMapToG1(String m)` ; take SHA-256 of m and map it to an element of G1 +* `G1::setStr(String str)` ; set by the result of `toString()` method +* `Bn256.neg(G1 y, G1 x)` ; `y = -x` +* `Bn256.dbl(G1 y, G1 x)` ; `y = 2x` +* `Bn256.add(G1 z, G1 x, G1 y)` ; `z = x + y` +* `Bn256.sub(G1 z, G1 x, G1 y)` ; `z = x - y` +* `Bn256.mul(G1 z, G1 x, Fr y)` ; `z = x * y` + +## G2 + +* `G2::set(String xa, String xb, String ya, String yb)` ; set by ((xa, xb), (ya, yb)) +* `G2::setStr(String str)` ; set by the result of `toString()` method +* `Bn256.neg(G2 y, G2 x)` ; `y = -x` +* `Bn256.dbl(G2 y, G2 x)` ; `y = 2x` +* `Bn256.add(G2 z, G2 x, G2 y)` ; `z = x + y` +* `Bn256.sub(G2 z, G2 x, G2 y)` ; `z = x - y` +* `Bn256.mul(G2 z, G2 x, Fr y)` ; `z = x * y` + +## GT + +* `GT::setStr(String str)` ; set by the result of `toString()` method +* `Bn256.mul(GT z, GT x, GT y)` ; `z = x * y` +* `Bn256.pow(GT z, GT x, Fr y)` ; `z = x ^ y` + +## pairing +* `Bn256.pairing(GT e, G1 P, G2 Q)` ; e = e(P, Q) + +# BLS signature sample +``` +String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; +String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; +String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; +String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; + +G2 Q = new G2(xa, xb, ya, yb); // fixed point of G2 + +Fr s = new Fr(); +s.setRand(); // secret key +G2 pub = new G2(); +Bn256.mul(pub, Q, s); // public key = sQ + +String m = "signature test"; +G1 H = new G1(); +H.hashAndMapToG1(m); // H = Hash(m) +G1 sign = new G1(); +Bn256.mul(sign, H, s); // signature of m = s H + +GT e1 = new GT(); +GT e2 = new GT(); +Bn256.pairing(e1, H, pub); // e1 = e(H, s Q) +Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q); +assertBool("verify signature", e1.equals(e2)); +``` + +# Make test +``` +cd java +make test_bn256 +``` + +# Sample code +[Bn256Test.java](https://github.com/herumi/mcl/blob/master/java/Bn256Test.java) diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/make_wrap.bat b/vendor/github.com/byzantine-lab/mcl/ffi/java/make_wrap.bat new file mode 100644 index 000000000..b7008bc02 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/make_wrap.bat @@ -0,0 +1,23 @@ +@echo off +call set-java-path.bat +set JAVA_INCLUDE=%JAVA_DIR%\include +set SWIG=..\..\..\..\p\swig\swig.exe +set PACKAGE_NAME=com.herumi.mcl +set PACKAGE_DIR=%PACKAGE_NAME:.=\% +if /i "%1"=="" ( + set NAME=elgamal +) else ( + set NAME=%1 +) + +echo [[run swig]] +mkdir %PACKAGE_DIR% +set TOP_DIR=../.. +%SWIG% -java -package %PACKAGE_NAME% -outdir %PACKAGE_DIR% -c++ -Wall %NAME%.i +echo [[make dll]] +cl /MT /DNOMINMAX /LD /Ox /DNDEBUG /EHsc %NAME%_wrap.cxx %TOP_DIR%/src/fp.cpp -DMCL_NO_AUTOLINK -I%JAVA_INCLUDE% -I%JAVA_INCLUDE%\win32 -I%TOP_DIR%/include -I%TOP_DIR%/../cybozulib/include -I%TOP_DIR%/../cybozulib_ext/include -I%TOP_DIR%/../xbyak /link /LIBPATH:%TOP_DIR%/../cybozulib_ext/lib /OUT:%TOP_DIR%/bin/mcl_%NAME%.dll + +call run-%NAME%.bat + +echo [[make jar]] +%JAVA_DIR%\bin\jar cvf mcl.jar com diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/run-bn256.bat b/vendor/github.com/byzantine-lab/mcl/ffi/java/run-bn256.bat new file mode 100644 index 000000000..903876ec6 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/run-bn256.bat @@ -0,0 +1,9 @@ +@echo off +echo [[compile Bn256Test.java]] +%JAVA_DIR%\bin\javac Bn256Test.java + +echo [[run Bn256Test]] +set TOP_DIR=..\.. +pushd %TOP_DIR%\bin +%JAVA_DIR%\bin\java -classpath ../ffi/java Bn256Test %1 %2 %3 %4 %5 %6 +popd diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/run-elgamal.bat b/vendor/github.com/byzantine-lab/mcl/ffi/java/run-elgamal.bat new file mode 100644 index 000000000..8b889a64c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/run-elgamal.bat @@ -0,0 +1,9 @@ +@echo off +echo [[compile ElgamalTest.java]] +%JAVA_DIR%\bin\javac ElgamalTest.java + +echo [[run ElgamalTest]] +set TOP_DIR=..\.. +pushd %TOP_DIR%\bin +%JAVA_DIR%\bin\java -classpath ../ffi/java ElgamalTest %1 %2 %3 %4 %5 %6 +popd diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/java/set-java-path.bat b/vendor/github.com/byzantine-lab/mcl/ffi/java/set-java-path.bat new file mode 100644 index 000000000..c66f81830 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/java/set-java-path.bat @@ -0,0 +1,8 @@ +@echo off +if "%JAVA_HOME%"=="" ( + set JAVA_DIR=c:/p/Java/jdk +) else ( + set JAVA_DIR=%JAVA_HOME% +) +echo JAVA_DIR=%JAVA_DIR% +rem set PATH=%PATH%;%JAVA_DIR%\bin diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/js/export-functions.py b/vendor/github.com/byzantine-lab/mcl/ffi/js/export-functions.py new file mode 100644 index 000000000..2a929564b --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/js/export-functions.py @@ -0,0 +1,73 @@ +import sys, re, argparse + +#RE_PROTOTYPE = re.compile(r'MCLBN_DLL_API\s\w\s\w\([^)]*\);') +RE_PROTOTYPE = re.compile(r'\w*\s(\w*)\s(\w*)\(([^)]*)\);') +def export_functions(args, fileNames, reToAddUnderscore): + modName = args.js + json = args.json + if not reToAddUnderscore: + reToAddUnderscore = r'(mclBn_init|setStr|getStr|[sS]erialize|setLittleEndian|setHashOf|hashAndMapTo|DecStr|HexStr|HashTo|blsSign|blsVerify|GetCurveOrder|GetFieldOrder|KeyShare|KeyRecover|blsSignatureRecover|blsInit)' + reSpecialFunctionName = re.compile(reToAddUnderscore) + if json: + print '[' + elif modName: + print 'function define_exported_' + modName + '(mod) {' + comma = '' + for fileName in fileNames: + with open(fileName, 'rb') as f: + for line in f.readlines(): + p = RE_PROTOTYPE.search(line) + if p: + ret = p.group(1) + name = p.group(2) + arg = p.group(3) + if json or modName: + retType = 'null' if ret == 'void' else 'number' + if arg == '' or arg == 'void': + paramNum = 0 + else: + paramNum = len(arg.split(',')) + if reSpecialFunctionName.search(name): + exportName = '_' + name # to wrap function + else: + exportName = name + if json: + print comma + '{' + if comma == '': + comma = ',' + print ' "name":"{0}",'.format(name) + print ' "exportName":"{0}",'.format(exportName) + print ' "ret":"{0}",'.format(retType) + print ' "args":[', + if paramNum > 0: + print '"number"' + (', "number"' * (paramNum - 1)), + print ']' + print '}' + else: + paramType = '[' + ("'number', " * paramNum) + ']' + print "{0} = mod.cwrap('{1}', '{2}', {3})".format(exportName, name, retType, paramType) + else: + print comma + "'_" + name + "'", + if comma == '': + comma = ',' + if json: + print ']' + elif modName: + print '}' + +def main(): + p = argparse.ArgumentParser('export_functions') + p.add_argument('header', type=str, nargs='+', help='headers') + p.add_argument('-js', type=str, nargs='?', help='module name') + p.add_argument('-re', type=str, nargs='?', help='regular expression file to add underscore to function name') + p.add_argument('-json', action='store_true', help='output json') + args = p.parse_args() + + reToAddUnderscore = '' + if args.re: + reToAddUnderscore = open(args.re).read().strip() + export_functions(args, args.header, reToAddUnderscore) + +if __name__ == '__main__': + main() + diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/js/pre-mcl.js b/vendor/github.com/byzantine-lab/mcl/ffi/js/pre-mcl.js new file mode 100644 index 000000000..ebc93e581 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/js/pre-mcl.js @@ -0,0 +1,5 @@ +if (typeof __dirname === 'string') { + var Module = {} + Module.wasmBinaryFile = __dirname + '/mcl_c.wasm' +} + diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/python/pairing.py b/vendor/github.com/byzantine-lab/mcl/ffi/python/pairing.py new file mode 100644 index 000000000..88b729176 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/python/pairing.py @@ -0,0 +1,80 @@ +from ctypes import * +from ctypes.wintypes import LPWSTR, LPCSTR, LPVOID + +g_lib = None + +def BN256_init(): + global g_lib + g_lib = cdll.LoadLibrary("../../bin/bn256.dll") + ret = g_lib.BN256_init() + if ret: + print "ERR BN256_init" + +class Fr(Structure): + _fields_ = [("v", c_ulonglong * 4)] + def setInt(self, v): + g_lib.BN256_Fr_setInt(self.v, v) + def setStr(self, s): + ret = g_lib.BN256_Fr_setStr(self.v, c_char_p(s)) + if ret: + print("ERR Fr:setStr") + def __str__(self): + svLen = 1024 + sv = create_string_buffer('\0' * svLen) + ret = g_lib.BN256_Fr_getStr(sv, svLen, self.v) + if ret: + print("ERR Fr:getStr") + return sv.value + def isZero(self, rhs): + return g_lib.BN256_Fr_isZero(self.v) != 0 + def isOne(self, rhs): + return g_lib.BN256_Fr_isOne(self.v) != 0 + def __eq__(self, rhs): + return g_lib.BN256_Fr_isEqual(self.v, rhs.v) != 0 + def __ne__(self, rhs): + return not(P == Q) + def __add__(self, rhs): + ret = Fr() + g_lib.BN256_Fr_add(ret.v, self.v, rhs.v) + return ret + def __sub__(self, rhs): + ret = Fr() + g_lib.BN256_Fr_sub(ret.v, self.v, rhs.v) + return ret + def __mul__(self, rhs): + ret = Fr() + g_lib.BN256_Fr_mul(ret.v, self.v, rhs.v) + return ret + def __div__(self, rhs): + ret = Fr() + g_lib.BN256_Fr_div(ret.v, self.v, rhs.v) + return ret + def __neg__(self): + ret = Fr() + g_lib.BN256_Fr_neg(ret.v, self.v) + return ret + +def Fr_add(z, x, y): + g_lib.BN256_Fr_add(z.v, x.v, y.v) + +def Fr_sub(z, x, y): + g_lib.BN256_Fr_sub(z.v, x.v, y.v) + +def Fr_mul(z, x, y): + g_lib.BN256_Fr_mul(z.v, x.v, y.v) + +def Fr_div(z, x, y): + g_lib.BN256_Fr_div(z.v, x.v, y.v) + +BN256_init() + +P = Fr() +Q = Fr() +print P == Q +print P != Q +P.setInt(5) +Q.setStr("34982034824") +print Q +R = Fr() +Fr_add(R, P, Q) +print R diff --git a/vendor/github.com/byzantine-lab/mcl/ffi/python/she.py b/vendor/github.com/byzantine-lab/mcl/ffi/python/she.py new file mode 100644 index 000000000..ab8975274 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/ffi/python/she.py @@ -0,0 +1,298 @@ +import os +import platform +from ctypes import * + +MCL_BN254 = 0 +MCLBN_FR_UNIT_SIZE = 4 +MCLBN_FP_UNIT_SIZE = 4 + +FR_SIZE = MCLBN_FR_UNIT_SIZE +G1_SIZE = MCLBN_FP_UNIT_SIZE * 3 +G2_SIZE = MCLBN_FP_UNIT_SIZE * 6 +GT_SIZE = MCLBN_FP_UNIT_SIZE * 12 + +SEC_SIZE = FR_SIZE * 2 +PUB_SIZE = G1_SIZE + G2_SIZE +G1_CIPHER_SIZE = G1_SIZE * 2 +G2_CIPHER_SIZE = G2_SIZE * 2 +GT_CIPHER_SIZE = GT_SIZE * 4 + +MCLBN_COMPILED_TIME_VAR = (MCLBN_FR_UNIT_SIZE * 10) + MCLBN_FP_UNIT_SIZE + +Buffer = c_ubyte * 1536 +lib = None + +def init(curveType=MCL_BN254): + global lib + name = platform.system() + if name == 'Linux': + libName = 'libmclshe256.so' + elif name == 'Darwin': + libName = 'libmclshe256.dylib' + elif name == 'Windows': + libName = 'mclshe256.dll' + else: + raise RuntimeError("not support yet", name) + lib = cdll.LoadLibrary(libName) + ret = lib.sheInit(MCL_BN254, MCLBN_COMPILED_TIME_VAR) + if ret != 0: + raise RuntimeError("sheInit", ret) + # custom setup for a function which returns pointer + lib.shePrecomputedPublicKeyCreate.restype = c_void_p + +def setRangeForDLP(hashSize): + ret = lib.sheSetRangeForDLP(hashSize) + if ret != 0: + raise RuntimeError("setRangeForDLP", ret) + +def setTryNum(tryNum): + ret = lib.sheSetTryNum(tryNum) + if ret != 0: + raise RuntimeError("setTryNum", ret) + +def hexStr(v): + s = "" + for x in v: + s += format(x, '02x') + return s + +class CipherTextG1(Structure): + _fields_ = [("v", c_ulonglong * G1_CIPHER_SIZE)] + def serialize(self): + buf = Buffer() + ret = lib.sheCipherTextG1Serialize(byref(buf), len(buf), byref(self.v)) + if ret == 0: + raise RuntimeError("serialize") + return buf[0:ret] + def serializeToHexStr(self): + return hexStr(self.serialize()) + +class CipherTextG2(Structure): + _fields_ = [("v", c_ulonglong * G2_CIPHER_SIZE)] + def serialize(self): + buf = Buffer() + ret = lib.sheCipherTextG2Serialize(byref(buf), len(buf), byref(self.v)) + if ret == 0: + raise RuntimeError("serialize") + return buf[0:ret] + def serializeToHexStr(self): + return hexStr(self.serialize()) + +class CipherTextGT(Structure): + _fields_ = [("v", c_ulonglong * GT_CIPHER_SIZE)] + def serialize(self): + buf = Buffer() + ret = lib.sheCipherTextGTSerialize(byref(buf), len(buf), byref(self.v)) + if ret == 0: + raise RuntimeError("serialize") + return buf[0:ret] + def serializeToHexStr(self): + return hexStr(self.serialize()) + +class PrecomputedPublicKey(Structure): + def __init__(self): + self.p = 0 + def create(self): + if not self.p: + self.p = c_void_p(lib.shePrecomputedPublicKeyCreate()) + if self.p == 0: + raise RuntimeError("PrecomputedPublicKey::create") + def destroy(self): + lib.shePrecomputedPublicKeyDestroy(self.p) + def encG1(self, m): + c = CipherTextG1() + ret = lib.shePrecomputedPublicKeyEncG1(byref(c.v), self.p, m) + if ret != 0: + raise RuntimeError("encG1", m) + return c + def encG2(self, m): + c = CipherTextG2() + ret = lib.shePrecomputedPublicKeyEncG2(byref(c.v), self.p, m) + if ret != 0: + raise RuntimeError("encG2", m) + return c + def encGT(self, m): + c = CipherTextGT() + ret = lib.shePrecomputedPublicKeyEncGT(byref(c.v), self.p, m) + if ret != 0: + raise RuntimeError("encGT", m) + return c + +class PublicKey(Structure): + _fields_ = [("v", c_ulonglong * PUB_SIZE)] + def serialize(self): + buf = Buffer() + ret = lib.shePublicKeySerialize(byref(buf), len(buf), byref(self.v)) + if ret == 0: + raise RuntimeError("serialize") + return buf[0:ret] + def serializeToHexStr(self): + return hexStr(self.serialize()) + def encG1(self, m): + c = CipherTextG1() + ret = lib.sheEncG1(byref(c.v), byref(self.v), m) + if ret != 0: + raise RuntimeError("encG1", m) + return c + def encG2(self, m): + c = CipherTextG2() + ret = lib.sheEncG2(byref(c.v), byref(self.v), m) + if ret != 0: + raise RuntimeError("encG2", m) + return c + def encGT(self, m): + c = CipherTextGT() + ret = lib.sheEncGT(byref(c.v), byref(self.v), m) + if ret != 0: + raise RuntimeError("encGT", m) + return c + def createPrecomputedPublicKey(self): + ppub = PrecomputedPublicKey() + ppub.create() + ret = lib.shePrecomputedPublicKeyInit(ppub.p, byref(self.v)) + if ret != 0: + raise RuntimeError("createPrecomputedPublicKey") + return ppub + +class SecretKey(Structure): + _fields_ = [("v", c_ulonglong * SEC_SIZE)] + def setByCSPRNG(self): + ret = lib.sheSecretKeySetByCSPRNG(byref(self.v)) + if ret != 0: + raise RuntimeError("setByCSPRNG", ret) + def serialize(self): + buf = Buffer() + ret = lib.sheSecretKeySerialize(byref(buf), len(buf), byref(self.v)) + if ret == 0: + raise RuntimeError("serialize") + return buf[0:ret] + def serializeToHexStr(self): + return hexStr(self.serialize()) + def getPulicKey(self): + pub = PublicKey() + lib.sheGetPublicKey(byref(pub.v), byref(self.v)) + return pub + def dec(self, c): + m = c_longlong() + if isinstance(c, CipherTextG1): + ret = lib.sheDecG1(byref(m), byref(self.v), byref(c.v)) + elif isinstance(c, CipherTextG2): + ret = lib.sheDecG2(byref(m), byref(self.v), byref(c.v)) + elif isinstance(c, CipherTextGT): + ret = lib.sheDecGT(byref(m), byref(self.v), byref(c.v)) + if ret != 0: + raise RuntimeError("dec") + return m.value + +def neg(c): + ret = -1 + if isinstance(c, CipherTextG1): + out = CipherTextG1() + ret = lib.sheNegG1(byref(out.v), byref(c.v)) + elif isinstance(c, CipherTextG2): + out = CipherTextG2() + ret = lib.sheNegG2(byref(out.v), byref(c.v)) + elif isinstance(c, CipherTextGT): + out = CipherTextGT() + ret = lib.sheNegGT(byref(out.v), byref(c.v)) + if ret != 0: + raise RuntimeError("neg") + return out + +def add(cx, cy): + ret = -1 + if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1): + out = CipherTextG1() + ret = lib.sheAddG1(byref(out.v), byref(cx.v), byref(cy.v)) + elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2): + out = CipherTextG2() + ret = lib.sheAddG2(byref(out.v), byref(cx.v), byref(cy.v)) + elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT): + out = CipherTextGT() + ret = lib.sheAddGT(byref(out.v), byref(cx.v), byref(cy.v)) + if ret != 0: + raise RuntimeError("add") + return out + +def sub(cx, cy): + ret = -1 + if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1): + out = CipherTextG1() + ret = lib.sheSubG1(byref(out.v), byref(cx.v), byref(cy.v)) + elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2): + out = CipherTextG2() + ret = lib.sheSubG2(byref(out.v), byref(cx.v), byref(cy.v)) + elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT): + out = CipherTextGT() + ret = lib.sheSubGT(byref(out.v), byref(cx.v), byref(cy.v)) + if ret != 0: + raise RuntimeError("sub") + return out + +def mul(cx, cy): + ret = -1 + if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG2): + out = CipherTextGT() + ret = lib.sheMul(byref(out.v), byref(cx.v), byref(cy.v)) + elif isinstance(cx, CipherTextG1) and isinstance(cy, int): + out = CipherTextG1() + ret = lib.sheMulG1(byref(out.v), byref(cx.v), cy) + elif isinstance(cx, CipherTextG2) and isinstance(cy, int): + out = CipherTextG2() + ret = lib.sheMulG2(byref(out.v), byref(cx.v), cy) + elif isinstance(cx, CipherTextGT) and isinstance(cy, int): + out = CipherTextGT() + ret = lib.sheMulGT(byref(out.v), byref(cx.v), cy) + if ret != 0: + raise RuntimeError("mul") + return out + +if __name__ == '__main__': + init() + sec = SecretKey() + sec.setByCSPRNG() + print("sec=", sec.serializeToHexStr()) + pub = sec.getPulicKey() + print("pub=", pub.serializeToHexStr()) + + m11 = 1 + m12 = 5 + m21 = 3 + m22 = -4 + c11 = pub.encG1(m11) + c12 = pub.encG1(m12) + # dec(enc) for G1 + if sec.dec(c11) != m11: print("err1") + + # add/sub for G1 + if sec.dec(add(c11, c12)) != m11 + m12: print("err2") + if sec.dec(sub(c11, c12)) != m11 - m12: print("err3") + + # add/sub for G2 + c21 = pub.encG2(m21) + c22 = pub.encG2(m22) + if sec.dec(c21) != m21: print("err4") + if sec.dec(add(c21, c22)) != m21 + m22: print("err5") + if sec.dec(sub(c21, c22)) != m21 - m22: print("err6") + + mt = -56 + ct = pub.encGT(mt) + if sec.dec(ct) != mt: print("err7") + + # mul G1 and G2 + if sec.dec(mul(c11, c21)) != m11 * m21: print("err8") + + # use precomputedPublicKey for performance + ppub = pub.createPrecomputedPublicKey() + c1 = ppub.encG1(m11) + if sec.dec(c1) != m11: print("err9") + + import sys + if sys.version_info.major >= 3: + import timeit + N = 100000 + print(str(timeit.timeit("pub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec") + print(str(timeit.timeit("ppub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec") + + ppub.destroy() # necessary to avoid memory leak + diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/array.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/array.hpp new file mode 100644 index 000000000..30df3667d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/array.hpp @@ -0,0 +1,197 @@ +#pragma once + +/** + @file + @brief scoped array and aligned array + + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#include +#ifdef _WIN32 + #include +#else + #include +#endif +#include + +namespace cybozu { + +inline void *AlignedMalloc(size_t size, size_t alignment) +{ +#ifdef _WIN32 + return _aligned_malloc(size, alignment); +#else + void *p; + int ret = posix_memalign(&p, alignment, size); + return (ret == 0) ? p : 0; +#endif +} + +inline void AlignedFree(void *p) +{ +#ifdef _WIN32 + if (p == 0) return; + _aligned_free(p); +#else + free(p); +#endif +} + +template +class ScopedArray { + T *p_; + size_t size_; + ScopedArray(const ScopedArray&); + void operator=(const ScopedArray&); +public: + explicit ScopedArray(size_t size) + : p_(new T[size]) + , size_(size) + { + } + ~ScopedArray() + { + delete[] p_; + } + T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } + const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } + size_t size() const CYBOZU_NOEXCEPT { return size_; } + bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } + T* begin() CYBOZU_NOEXCEPT { return p_; } + T* end() CYBOZU_NOEXCEPT { return p_ + size_; } + const T* begin() const CYBOZU_NOEXCEPT { return p_; } + const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } + T* data() CYBOZU_NOEXCEPT { return p_; } + const T* data() const CYBOZU_NOEXCEPT { return p_; } +}; + +/** + T must be POD type + 16byte aligment array +*/ +template +class AlignedArray { + T *p_; + size_t size_; + size_t allocSize_; + T *alloc(size_t size) const + { + T *p = static_cast(AlignedMalloc(size * sizeof(T), N)); + if (p == 0) throw std::bad_alloc(); + return p; + } + void copy(T *dst, const T *src, size_t n) const + { + for (size_t i = 0; i < n; i++) dst[i] = src[i]; + } + void setZero(T *p, size_t n) const + { + for (size_t i = 0; i < n; i++) p[i] = 0; + } + /* + alloc allocN and copy [p, p + copyN) to new p_ + don't modify size_ + */ + void allocCopy(size_t allocN, const T *p, size_t copyN) + { + T *q = alloc(allocN); + copy(q, p, copyN); + AlignedFree(p_); + p_ = q; + allocSize_ = allocN; + } +public: + /* + don't clear buffer with zero if doClear is false + */ + explicit AlignedArray(size_t size = 0, bool doClear = defaultDoClear) + : p_(0) + , size_(0) + , allocSize_(0) + { + resize(size, doClear); + } + AlignedArray(const AlignedArray& rhs) + : p_(0) + , size_(0) + , allocSize_(0) + { + *this = rhs; + } + AlignedArray& operator=(const AlignedArray& rhs) + { + if (allocSize_ < rhs.size_) { + allocCopy(rhs.size_, rhs.p_, rhs.size_); + } else { + copy(p_, rhs.p_, rhs.size_); + } + size_ = rhs.size_; + return *this; + } +#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) + AlignedArray(AlignedArray&& rhs) CYBOZU_NOEXCEPT + : p_(rhs.p_) + , size_(rhs.size_) + , allocSize_(rhs.allocSize_) + { + rhs.p_ = 0; + rhs.size_ = 0; + rhs.allocSize_ = 0; + } + AlignedArray& operator=(AlignedArray&& rhs) CYBOZU_NOEXCEPT + { + swap(rhs); + rhs.clear(); + return *this; + } +#endif + /* + don't clear buffer with zero if doClear is false + @note don't free if shrinked + */ + void resize(size_t size, bool doClear = defaultDoClear) + { + // shrink + if (size <= size_) { + size_ = size; + return; + } + // realloc if necessary + if (size > allocSize_) { + allocCopy(size, p_, size_); + } + if (doClear) setZero(p_ + size_, size - size_); + size_ = size; + } + void clear() // not free + { + size_ = 0; + } + ~AlignedArray() + { + AlignedFree(p_); + } + void swap(AlignedArray& rhs) CYBOZU_NOEXCEPT + { + std::swap(p_, rhs.p_); + std::swap(size_, rhs.size_); + std::swap(allocSize_, rhs.allocSize_); + } + T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } + const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } + size_t size() const CYBOZU_NOEXCEPT { return size_; } + bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } + T* begin() CYBOZU_NOEXCEPT { return p_; } + T* end() CYBOZU_NOEXCEPT { return p_ + size_; } + const T* begin() const CYBOZU_NOEXCEPT { return p_; } + const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } + T* data() CYBOZU_NOEXCEPT { return p_; } + const T* data() const CYBOZU_NOEXCEPT { return p_; } +#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) + const T* cbegin() const CYBOZU_NOEXCEPT { return p_; } + const T* cend() const CYBOZU_NOEXCEPT { return p_ + size_; } +#endif +}; + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/atoi.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/atoi.hpp new file mode 100644 index 000000000..a22853a17 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/atoi.hpp @@ -0,0 +1,239 @@ +#pragma once +/** + @file + @brief converter between integer and string + + @author MITSUNARI Shigeo(@herumi) +*/ + +#include +#include +#include +#include + +namespace cybozu { + +namespace atoi_local { + +template +T convertToInt(bool *b, const char *p, size_t size, const char (&max)[n], T min, T overflow1, char overflow2) +{ + if (size > 0 && *p) { + bool isMinus = false; + size_t i = 0; + if (*p == '-') { + isMinus = true; + i++; + } + if (i < size && p[i]) { + // skip leading zero + while (i < size && p[i] == '0') i++; + // check minimum + if (isMinus && size - i >= n - 1 && memcmp(max, &p[i], n - 1) == 0) { + if (b) *b = true; + return min; + } + T x = 0; + for (;;) { + unsigned char c; + if (i == size || (c = static_cast(p[i])) == '\0') { + if (b) *b = true; + return isMinus ? -x : x; + } + unsigned int y = c - '0'; + if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { + break; + } + x = x * 10 + T(y); + i++; + } + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertToInt") << cybozu::exception::makeString(p, size); + } +} + +template +T convertToUint(bool *b, const char *p, size_t size, T overflow1, char overflow2) +{ + if (size > 0 && *p) { + size_t i = 0; + // skip leading zero + while (i < size && p[i] == '0') i++; + T x = 0; + for (;;) { + unsigned char c; + if (i == size || (c = static_cast(p[i])) == '\0') { + if (b) *b = true; + return x; + } + unsigned int y = c - '0'; + if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { + break; + } + x = x * 10 + T(y); + i++; + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertToUint") << cybozu::exception::makeString(p, size); + } +} + +template +T convertHexToInt(bool *b, const char *p, size_t size) +{ + if (size > 0 && *p) { + size_t i = 0; + T x = 0; + for (;;) { + unsigned int c; + if (i == size || (c = static_cast(p[i])) == '\0') { + if (b) *b = true; + return x; + } + if (c - 'A' <= 'F' - 'A') { + c = (c - 'A') + 10; + } else if (c - 'a' <= 'f' - 'a') { + c = (c - 'a') + 10; + } else if (c - '0' <= '9' - '0') { + c = c - '0'; + } else { + break; + } + // avoid overflow + if (x > (std::numeric_limits::max)() / 16) break; + x = x * 16 + T(c); + i++; + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertHexToInt") << cybozu::exception::makeString(p, size); + } +} + +} // atoi_local + +/** + auto detect return value class + @note if you set bool pointer p then throw nothing and set *p = false if bad string +*/ +class atoi { + const char *p_; + size_t size_; + bool *b_; + void set(bool *b, const char *p, size_t size) + { + b_ = b; + p_ = p; + size_ = size; + } +public: + atoi(const char *p, size_t size = -1) + { + set(0, p, size); + } + atoi(bool *b, const char *p, size_t size = -1) + { + set(b, p, size); + } + atoi(const std::string& str) + { + set(0, str.c_str(), str.size()); + } + atoi(bool *b, const std::string& str) + { + set(b, str.c_str(), str.size()); + } + inline operator signed char() const + { + return atoi_local::convertToInt(b_, p_, size_, "128", -128, 12, '8'); + } + inline operator unsigned char() const + { + return atoi_local::convertToUint(b_, p_, size_, 25, '6'); + } + inline operator short() const + { + return atoi_local::convertToInt(b_, p_, size_, "32768", -32768, 3276, '8'); + } + inline operator unsigned short() const + { + return atoi_local::convertToUint(b_, p_, size_, 6553, '6'); + } + inline operator int() const + { + return atoi_local::convertToInt(b_, p_, size_, "2147483648", INT_MIN, 214748364, '8'); + } + inline operator unsigned int() const + { + return atoi_local::convertToUint(b_, p_, size_, 429496729, '6'); + } + inline operator long long() const + { + return atoi_local::convertToInt(b_, p_, size_, "9223372036854775808", LLONG_MIN, 922337203685477580LL, '8'); + } + inline operator unsigned long long() const + { + return atoi_local::convertToUint(b_, p_, size_, 1844674407370955161ULL, '6'); + } +#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) + inline operator long() const { return static_cast(static_cast(*this)); } + inline operator unsigned long() const { return static_cast(static_cast(*this)); } +#else + inline operator long() const { return static_cast(static_cast(*this)); } + inline operator unsigned long() const { return static_cast(static_cast(*this)); } +#endif +}; + +class hextoi { + const char *p_; + size_t size_; + bool *b_; + void set(bool *b, const char *p, size_t size) + { + b_ = b; + p_ = p; + size_ = size; + } +public: + hextoi(const char *p, size_t size = -1) + { + set(0, p, size); + } + hextoi(bool *b, const char *p, size_t size = -1) + { + set(b, p, size); + } + hextoi(const std::string& str) + { + set(0, str.c_str(), str.size()); + } + hextoi(bool *b, const std::string& str) + { + set(b, str.c_str(), str.size()); + } + operator unsigned char() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator unsigned short() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator unsigned int() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator unsigned long() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator unsigned long long() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator char() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator signed char() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator short() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator int() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator long() const { return atoi_local::convertHexToInt(b_, p_, size_); } + operator long long() const { return atoi_local::convertHexToInt(b_, p_, size_); } +}; + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/benchmark.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/benchmark.hpp new file mode 100644 index 000000000..4c02f1869 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/benchmark.hpp @@ -0,0 +1,212 @@ +#pragma once +/** + @file + @brief measure exec time of function + @author MITSUNARI Shigeo +*/ +#if defined(_MSC_VER) && (MSC_VER <= 1500) + #include +#else + #include +#endif +#include + +#ifdef __EMSCRIPTEN__ + #define CYBOZU_BENCH_USE_GETTIMEOFDAY +#endif + +#ifdef CYBOZU_BENCH_USE_GETTIMEOFDAY + #include +#elif !defined(CYBOZU_BENCH_DONT_USE_RDTSC) + #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__) + #define CYBOZU_BENCH_USE_RDTSC + #define CYBOZU_BENCH_USE_CPU_TIMER + #endif + #if defined(__GNUC__) && defined(__ARM_ARCH_7A__) +// #define CYBOZU_BENCH_USE_MRC +// #define CYBOZU_BENCH_USE_CPU_TIMER + #endif +#endif + + +#include +#include +#ifdef _MSC_VER + #include + #include +#else +#endif + +#ifndef CYBOZU_UNUSED + #ifdef __GNUC__ + #define CYBOZU_UNUSED __attribute__((unused)) + #else + #define CYBOZU_UNUSED + #endif +#endif + +namespace cybozu { + +namespace bench { + +static void (*g_putCallback)(double); + +static inline void setPutCallback(void (*f)(double)) +{ + g_putCallback = f; +} + +} // cybozu::bench + +class CpuClock { +public: + static inline uint64_t getCpuClk() + { +#ifdef CYBOZU_BENCH_USE_RDTSC +#ifdef _MSC_VER + return __rdtsc(); +#else + unsigned int eax, edx; + __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx)); + return ((uint64_t)edx << 32) | eax; +#endif +#elif defined(CYBOZU_BENCH_USE_MRC) + uint32_t clk; + __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(clk)); + return clk; +#else +#ifdef _MSC_VER + struct _timeb timeb; + _ftime_s(&timeb); + return uint64_t(timeb.time) * 1000000000 + timeb.millitm * 1000000; +#elif defined(CYBOZU_BENCH_USE_GETTIMEOFDAY) + struct timeval tv; + int ret CYBOZU_UNUSED = gettimeofday(&tv, 0); + assert(ret == 0); + return uint64_t(tv.tv_sec) * 1000000000 + tv.tv_usec * 1000; +#else + struct timespec tp; + int ret CYBOZU_UNUSED = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp); + assert(ret == 0); + return uint64_t(tp.tv_sec) * 1000000000 + tp.tv_nsec; +#endif +#endif + } + CpuClock() + : clock_(0) + , count_(0) + { + } + void begin() + { + clock_ -= getCpuClk(); + } + void end() + { + clock_ += getCpuClk(); + count_++; + } + int getCount() const { return count_; } + uint64_t getClock() const { return clock_; } + void clear() { count_ = 0; clock_ = 0; } + void put(const char *msg = 0, int N = 1) const + { + double t = getClock() / double(getCount()) / N; + if (msg && *msg) printf("%s ", msg); + if (bench::g_putCallback) { + bench::g_putCallback(t); + return; + } +#ifdef CYBOZU_BENCH_USE_CPU_TIMER + if (t > 1e6) { + printf("%7.3fMclk", t * 1e-6); + } else if (t > 1e3) { + printf("%7.3fKclk", t * 1e-3); + } else { + printf("%6.2f clk", t); + } +#else + if (t > 1e6) { + printf("%7.3fmsec", t * 1e-6); + } else if (t > 1e3) { + printf("%7.3fusec", t * 1e-3); + } else { + printf("%6.2fnsec", t); + } +#endif + if (msg && *msg) printf("\n"); + } + // adhoc constatns for CYBOZU_BENCH +#ifdef CYBOZU_BENCH_USE_CPU_TIMER + static const int loopN1 = 1000; + static const int loopN2 = 100; + static const uint64_t maxClk = (uint64_t)1e8; +#else + static const int loopN1 = 100; + static const int loopN2 = 100; + static const uint64_t maxClk = (uint64_t)1e8; +#endif +private: + uint64_t clock_; + int count_; +}; + +namespace bench { + +static CpuClock g_clk; +static int CYBOZU_UNUSED g_loopNum; + +} // cybozu::bench +/* + loop counter is automatically determined + CYBOZU_BENCH(, , , , ...); + if msg == "" then only set g_clk, g_loopNum +*/ +#define CYBOZU_BENCH(msg, func, ...) \ +{ \ + const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ + cybozu::CpuClock _cybozu_clk; \ + for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ + } \ + if (msg && *msg) _cybozu_clk.put(msg, cybozu::CpuClock::loopN1); \ + cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = cybozu::CpuClock::loopN1; \ +} + +/* + double clk; + CYBOZU_BENCH_T(clk, , , , ...); + clk is set by CYBOZU_BENCH_T +*/ +#define CYBOZU_BENCH_T(clk, func, ...) \ +{ \ + const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ + cybozu::CpuClock _cybozu_clk; \ + for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ + } \ + clk = _cybozu_clk.getClock() / (double)_cybozu_clk.getCount() / cybozu::CpuClock::loopN1; \ +} + +/* + loop counter N is given + CYBOZU_BENCH_C(, , , , , ...); + if msg == "" then only set g_clk, g_loopNum +*/ +#define CYBOZU_BENCH_C(msg, _N, func, ...) \ +{ \ + cybozu::CpuClock _cybozu_clk; \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < _N; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (msg && *msg) _cybozu_clk.put(msg, _N); \ + cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = _N; \ +} + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/bit_operation.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/bit_operation.hpp new file mode 100644 index 000000000..865c1e47d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/bit_operation.hpp @@ -0,0 +1,139 @@ +#pragma once +/** + @file + @brief bit operation +*/ +#include +#include + +#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) + #if defined(_WIN32) + #include + #elif defined(__linux__) || defined(__CYGWIN__) || defined(__clang__) + #include + #elif defined(__GNUC__) + #include + #endif +#endif + +namespace cybozu { + +namespace bit_op_local { + +template +struct Tag {}; + +// sizeof(T) < 8 +template<> +struct Tag { + template + static inline int bsf(T x) + { +#if defined(_MSC_VER) + unsigned long out; + _BitScanForward(&out, x); +#pragma warning(suppress: 6102) + return out; +#else + return __builtin_ctz(x); +#endif + } + template + static inline int bsr(T x) + { +#if defined(_MSC_VER) + unsigned long out; + _BitScanReverse(&out, x); +#pragma warning(suppress: 6102) + return out; +#else + return __builtin_clz(x) ^ 0x1f; +#endif + } +}; + +// sizeof(T) == 8 +template<> +struct Tag { + template + static inline int bsf(T x) + { +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long out; + _BitScanForward64(&out, x); +#pragma warning(suppress: 6102) + return out; +#elif defined(__x86_64__) + return __builtin_ctzll(x); +#else + const uint32_t L = uint32_t(x); + if (L) return Tag::bsf(L); + const uint32_t H = uint32_t(x >> 32); + return Tag::bsf(H) + 32; +#endif + } + template + static inline int bsr(T x) + { +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long out; + _BitScanReverse64(&out, x); +#pragma warning(suppress: 6102) + return out; +#elif defined(__x86_64__) + return __builtin_clzll(x) ^ 0x3f; +#else + const uint32_t H = uint32_t(x >> 32); + if (H) return Tag::bsr(H) + 32; + const uint32_t L = uint32_t(x); + return Tag::bsr(L); +#endif + } +}; + +} // bit_op_local + +template +int bsf(T x) +{ + return bit_op_local::Tag::bsf(x); +} +template +int bsr(T x) +{ + return bit_op_local::Tag::bsr(x); +} + +template +uint64_t makeBitMask64(T x) +{ + assert(x < 64); + return (uint64_t(1) << x) - 1; +} + +template +uint32_t popcnt(T x); + +template<> +inline uint32_t popcnt(uint32_t x) +{ +#if defined(_MSC_VER) + return static_cast(_mm_popcnt_u32(x)); +#else + return static_cast(__builtin_popcount(x)); +#endif +} + +template<> +inline uint32_t popcnt(uint64_t x) +{ +#if defined(__x86_64__) + return static_cast(__builtin_popcountll(x)); +#elif defined(_WIN64) + return static_cast(_mm_popcnt_u64(x)); +#else + return popcnt(static_cast(x)) + popcnt(static_cast(x >> 32)); +#endif +} + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/critical_section.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/critical_section.hpp new file mode 100644 index 000000000..13d7f3a0e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/critical_section.hpp @@ -0,0 +1,60 @@ +#pragma once +/** + @file + @brief critical section + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ +#include + +namespace cybozu { + +class ConditionVariableCs; + +namespace thread { + +#ifdef _WIN32 +typedef CRITICAL_SECTION CsHandle; +inline void CsInit(CsHandle& cs) { InitializeCriticalSection(&cs); } +inline void CsLock(CsHandle& cs) { EnterCriticalSection(&cs); } +inline void CsUnlock(CsHandle& cs) { LeaveCriticalSection(&cs); } +inline void CsTerm(CsHandle& cs) { DeleteCriticalSection(&cs); } +#else +typedef pthread_mutex_t CsHandle; +inline void CsInit(CsHandle& cs) { pthread_mutex_init(&cs, NULL); } +inline void CsLock(CsHandle& cs) { pthread_mutex_lock(&cs); } +inline void CsUnlock(CsHandle& cs) { pthread_mutex_unlock(&cs); } +inline void CsTerm(CsHandle& cs) { pthread_mutex_destroy(&cs); } +#endif + +} // cybozu::thread + +class CriticalSection { + friend class cybozu::ConditionVariableCs; +public: + CriticalSection() + { + thread::CsInit(hdl_); + } + ~CriticalSection() + { + thread::CsTerm(hdl_); + } + inline void lock() + { + thread::CsLock(hdl_); + } + inline void unlock() + { + thread::CsUnlock(hdl_); + } +private: + CriticalSection(const CriticalSection&); + CriticalSection& operator=(const CriticalSection&); + thread::CsHandle hdl_; +}; + +typedef cybozu::thread::AutoLockT AutoLockCs; //!< auto lock critical section + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/crypto.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/crypto.hpp new file mode 100644 index 000000000..d427179d9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/crypto.hpp @@ -0,0 +1,321 @@ +#pragma once +/** + @file + @brief wrap openssl + @author MITSUNARI Shigeo(@herumi) +*/ + +#include +#ifdef __APPLE__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +#if 0 //#ifdef __APPLE__ + #define COMMON_DIGEST_FOR_OPENSSL + #include + #include + #define SHA1 CC_SHA1 + #define SHA224 CC_SHA224 + #define SHA256 CC_SHA256 + #define SHA384 CC_SHA384 + #define SHA512 CC_SHA512 +#else +#include +#include +#include +#endif +#ifdef _MSC_VER + #include +#endif + +namespace cybozu { + +namespace crypto { + +class Hash { +public: + enum Name { + N_SHA1, + N_SHA224, + N_SHA256, + N_SHA384, + N_SHA512 + }; +private: + Name name_; + size_t hashSize_; + union { + SHA_CTX sha1; + SHA256_CTX sha256; + SHA512_CTX sha512; + } ctx_; +public: + static inline size_t getSize(Name name) + { + switch (name) { + case N_SHA1: return SHA_DIGEST_LENGTH; + case N_SHA224: return SHA224_DIGEST_LENGTH; + case N_SHA256: return SHA256_DIGEST_LENGTH; + case N_SHA384: return SHA384_DIGEST_LENGTH; + case N_SHA512: return SHA512_DIGEST_LENGTH; + default: + throw cybozu::Exception("crypto:Hash:getSize") << name; + } + } + static inline const char *getName(Name name) + { + switch (name) { + case N_SHA1: return "sha1"; + case N_SHA224: return "sha224"; + case N_SHA256: return "sha256"; + case N_SHA384: return "sha384"; + case N_SHA512: return "sha512"; + default: + throw cybozu::Exception("crypto:Hash:getName") << name; + } + } + static inline Name getName(const std::string& nameStr) + { + static const struct { + const char *nameStr; + Name name; + } tbl[] = { + { "sha1", N_SHA1 }, + { "sha224", N_SHA224 }, + { "sha256", N_SHA256 }, + { "sha384", N_SHA384 }, + { "sha512", N_SHA512 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (nameStr == tbl[i].nameStr) return tbl[i].name; + } + throw cybozu::Exception("crypto:Hash:getName") << nameStr; + } + explicit Hash(Name name = N_SHA1) + : name_(name) + , hashSize_(getSize(name)) + { + reset(); + } + void update(const void *buf, size_t bufSize) + { + switch (name_) { + case N_SHA1: SHA1_Update(&ctx_.sha1, buf, bufSize); break; + case N_SHA224: SHA224_Update(&ctx_.sha256, buf, bufSize); break; + case N_SHA256: SHA256_Update(&ctx_.sha256, buf, bufSize); break; + case N_SHA384: SHA384_Update(&ctx_.sha512, buf, bufSize); break; + case N_SHA512: SHA512_Update(&ctx_.sha512, buf, bufSize); break; + } + } + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + void reset() + { + switch (name_) { + case N_SHA1: SHA1_Init(&ctx_.sha1); break; + case N_SHA224: SHA224_Init(&ctx_.sha256); break; + case N_SHA256: SHA256_Init(&ctx_.sha256); break; + case N_SHA384: SHA384_Init(&ctx_.sha512); break; + case N_SHA512: SHA512_Init(&ctx_.sha512); break; + default: + throw cybozu::Exception("crypto:Hash:rset") << name_; + } + } + /* + md must have hashSize byte + @note clear inner buffer after calling digest + */ + void digest(void *out, const void *buf, size_t bufSize) + { + update(buf, bufSize); + unsigned char *md = reinterpret_cast(out); + switch (name_) { + case N_SHA1: SHA1_Final(md, &ctx_.sha1); break; + case N_SHA224: SHA224_Final(md, &ctx_.sha256); break; + case N_SHA256: SHA256_Final(md, &ctx_.sha256); break; + case N_SHA384: SHA384_Final(md, &ctx_.sha512); break; + case N_SHA512: SHA512_Final(md, &ctx_.sha512); break; + default: + throw cybozu::Exception("crypto:Hash:digest") << name_; + } + reset(); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string ret; + ret.resize(hashSize_); + digest(&ret[0], buf, bufSize); + return ret; + } + std::string digest(const std::string& buf = "") + { + return digest(buf.c_str(), buf.size()); + } + /* + out must have necessary size + @note return written size + */ + static inline size_t digest(void *out, Name name, const void *buf, size_t bufSize) + { + unsigned char *md = (unsigned char*)out; + const unsigned char *src = cybozu::cast(buf); + switch (name) { + case N_SHA1: SHA1(src, bufSize, md); return 160 / 8; + case N_SHA224: SHA224(src, bufSize, md); return 224 / 8; + case N_SHA256: SHA256(src, bufSize, md); return 256 / 8; + case N_SHA384: SHA384(src, bufSize, md); return 384 / 8; + case N_SHA512: SHA512(src, bufSize, md); return 512 / 8; + default: + return 0; + } + } + static inline std::string digest(Name name, const void *buf, size_t bufSize) + { + char md[128]; + size_t size = digest(md, name, buf, bufSize); + if (size == 0) throw cybozu::Exception("crypt:Hash:digest") << name; + return std::string(md, size); + } + static inline std::string digest(Name name, const std::string& buf) + { + return digest(name, buf.c_str(), buf.size()); + } +}; + +class Hmac { + const EVP_MD *evp_; +public: + explicit Hmac(Hash::Name name = Hash::N_SHA1) + { + switch (name) { + case Hash::N_SHA1: evp_ = EVP_sha1(); break; + case Hash::N_SHA224: evp_ = EVP_sha224(); break; + case Hash::N_SHA256: evp_ = EVP_sha256(); break; + case Hash::N_SHA384: evp_ = EVP_sha384(); break; + case Hash::N_SHA512: evp_ = EVP_sha512(); break; + default: + throw cybozu::Exception("crypto:Hmac:") << name; + } + } + std::string eval(const std::string& key, const std::string& data) + { + std::string out(EVP_MD_size(evp_) + 1, 0); + unsigned int outLen = 0; + if (HMAC(evp_, key.c_str(), static_cast(key.size()), + cybozu::cast(data.c_str()), data.size(), cybozu::cast(&out[0]), &outLen)) { + out.resize(outLen); + return out; + } + throw cybozu::Exception("crypto::Hamc::eval"); + } +}; + +class Cipher { + const EVP_CIPHER *cipher_; + EVP_CIPHER_CTX *ctx_; +public: + enum Name { + N_AES128_CBC, + N_AES192_CBC, + N_AES256_CBC, + N_AES128_ECB, // be carefull to use + N_AES192_ECB, // be carefull to use + N_AES256_ECB, // be carefull to use + }; + static inline size_t getSize(Name name) + { + switch (name) { + case N_AES128_CBC: return 128; + case N_AES192_CBC: return 192; + case N_AES256_CBC: return 256; + case N_AES128_ECB: return 128; + case N_AES192_ECB: return 192; + case N_AES256_ECB: return 256; + default: + throw cybozu::Exception("crypto:Cipher:getSize") << name; + } + } + enum Mode { + Decoding, + Encoding + }; + explicit Cipher(Name name = N_AES128_CBC) + : cipher_(0) + , ctx_(0) + { + ctx_ = EVP_CIPHER_CTX_new(); + if (ctx_ == 0) throw cybozu::Exception("crypto:Cipher:EVP_CIPHER_CTX_new"); + switch (name) { + case N_AES128_CBC: cipher_ = EVP_aes_128_cbc(); break; + case N_AES192_CBC: cipher_ = EVP_aes_192_cbc(); break; + case N_AES256_CBC: cipher_ = EVP_aes_256_cbc(); break; + case N_AES128_ECB: cipher_ = EVP_aes_128_ecb(); break; + case N_AES192_ECB: cipher_ = EVP_aes_192_ecb(); break; + case N_AES256_ECB: cipher_ = EVP_aes_256_ecb(); break; + default: + throw cybozu::Exception("crypto:Cipher:Cipher:name") << (int)name; + } + } + ~Cipher() + { + if (ctx_) EVP_CIPHER_CTX_free(ctx_); + } + /* + @note don't use padding = true + */ + void setup(Mode mode, const std::string& key, const std::string& iv, bool padding = false) + { + const int keyLen = static_cast(key.size()); + const int expectedKeyLen = EVP_CIPHER_key_length(cipher_); + if (keyLen != expectedKeyLen) { + throw cybozu::Exception("crypto:Cipher:setup:keyLen") << keyLen << expectedKeyLen; + } + + int ret = EVP_CipherInit_ex(ctx_, cipher_, NULL, cybozu::cast(key.c_str()), cybozu::cast(iv.c_str()), mode == Encoding ? 1 : 0); + if (ret != 1) { + throw cybozu::Exception("crypto:Cipher:setup:EVP_CipherInit_ex") << ret; + } + ret = EVP_CIPHER_CTX_set_padding(ctx_, padding ? 1 : 0); + if (ret != 1) { + throw cybozu::Exception("crypto:Cipher:setup:EVP_CIPHER_CTX_set_padding") << ret; + } +/* + const int ivLen = static_cast(iv.size()); + const int expectedIvLen = EVP_CIPHER_CTX_iv_length(&ctx_); + if (ivLen != expectedIvLen) { + throw cybozu::Exception("crypto:Cipher:setup:ivLen") << ivLen << expectedIvLen; + } +*/ + } + /* + the size of outBuf must be larger than inBufSize + blockSize + @retval positive or 0 : writeSize(+blockSize) + @retval -1 : error + */ + int update(char *outBuf, const char *inBuf, int inBufSize) + { + int outLen = 0; + int ret = EVP_CipherUpdate(ctx_, cybozu::cast(outBuf), &outLen, cybozu::cast(inBuf), inBufSize); + if (ret != 1) return -1; + return outLen; + } + /* + return -1 if padding + @note don't use + */ + int finalize(char *outBuf) + { + int outLen = 0; + int ret = EVP_CipherFinal_ex(ctx_, cybozu::cast(outBuf), &outLen); + if (ret != 1) return -1; + return outLen; + } +}; + +} } // cybozu::crypto + +#ifdef __APPLE__ + #pragma GCC diagnostic pop +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/endian.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/endian.hpp new file mode 100644 index 000000000..3f1575c46 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/endian.hpp @@ -0,0 +1,224 @@ +#pragma once + +/** + @file + @brief deal with big and little endian + + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#include +#include +#include + +namespace cybozu { + +#ifdef _MSC_VER +inline uint16_t byteSwap(uint16_t x) { return _byteswap_ushort(x); } +inline uint32_t byteSwap(uint32_t x) { return _byteswap_ulong(x); } +inline uint64_t byteSwap(uint64_t x) { return _byteswap_uint64(x); } +#else +#if (((__GNUC__) << 16) + (__GNUC_MINOR__)) >= ((4 << 16) + 8) +inline uint16_t byteSwap(uint16_t x) { return __builtin_bswap16(x); } +#else +inline uint16_t byteSwap(uint16_t x) { return (x >> 8) | (x << 8); } +#endif +inline uint32_t byteSwap(uint32_t x) { return __builtin_bswap32(x); } +inline uint64_t byteSwap(uint64_t x) { return __builtin_bswap64(x); } +#endif + +/** + get 16bit integer as little endian + @param src [in] pointer +*/ +inline uint16_t Get16bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint16_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast(src); + return p[0] | (p[1] << 8); +#endif +} + +/** + get 32bit integer as little endian + @param src [in] pointer +*/ +inline uint32_t Get32bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint32_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast(src); + return Get16bitAsLE(p) | (static_cast(Get16bitAsLE(p + 2)) << 16); +#endif +} + +/** + get 64bit integer as little endian + @param src [in] pointer +*/ +inline uint64_t Get64bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint64_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast(src); + return Get32bitAsLE(p) | (static_cast(Get32bitAsLE(p + 4)) << 32); +#endif +} + +/** + get 16bit integer as bit endian + @param src [in] pointer +*/ +inline uint16_t Get16bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint16_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast(src); + return p[1] | (p[0] << 8); +#endif +} + +/** + get 32bit integer as bit endian + @param src [in] pointer +*/ +inline uint32_t Get32bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint32_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast(src); + return Get16bitAsBE(p + 2) | (static_cast(Get16bitAsBE(p)) << 16); +#endif +} + +/** + get 64bit integer as big endian + @param src [in] pointer +*/ +inline uint64_t Get64bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint64_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast(src); + return Get32bitAsBE(p + 4) | (static_cast(Get32bitAsBE(p)) << 32); +#endif +} + +/** + set 16bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set16bitAsLE(void *src, uint16_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + p[0] = static_cast(x); + p[1] = static_cast(x >> 8); +#endif +} +/** + set 32bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set32bitAsLE(void *src, uint32_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + p[0] = static_cast(x); + p[1] = static_cast(x >> 8); + p[2] = static_cast(x >> 16); + p[3] = static_cast(x >> 24); +#endif +} +/** + set 64bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set64bitAsLE(void *src, uint64_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + Set32bitAsLE(p, static_cast(x)); + Set32bitAsLE(p + 4, static_cast(x >> 32)); +#endif +} +/** + set 16bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set16bitAsBE(void *src, uint16_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + p[0] = static_cast(x >> 8); + p[1] = static_cast(x); +#endif +} +/** + set 32bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set32bitAsBE(void *src, uint32_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + p[0] = static_cast(x >> 24); + p[1] = static_cast(x >> 16); + p[2] = static_cast(x >> 8); + p[3] = static_cast(x); +#endif +} +/** + set 64bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set64bitAsBE(void *src, uint64_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast(src); + Set32bitAsBE(p, static_cast(x >> 32)); + Set32bitAsBE(p + 4, static_cast(x)); +#endif +} + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/exception.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/exception.hpp new file mode 100644 index 000000000..247ba4de0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/exception.hpp @@ -0,0 +1,252 @@ +#pragma once +/** + @file + @brief definition of abstruct exception class + @author MITSUNARI Shigeo(@herumi) +*/ +#ifdef CYBOZU_MINIMUM_EXCEPTION + +#include + +namespace cybozu { + +namespace exception { +inline const char *makeString(const char *, size_t) +{ + return ""; +} + +} // cybozu::exception + +class Exception { +public: + explicit Exception(const char* = 0, bool = true) + { + } + ~Exception() CYBOZU_NOEXCEPT {} + const char *what() const CYBOZU_NOEXCEPT { return "cybozu:Exception"; } + template + Exception& operator<<(const T&) + { + return *this; + } +}; + +} // cybozu + +#else + +#include +#include +#include +#include +#include +#ifdef _WIN32 + #include + #include +#else + #include // for strerror_r +#endif +#include +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + #include +#endif + +namespace cybozu { + +const bool DontThrow = true; + +namespace exception { + +/* get max 16 characters to avoid buffer overrun */ +inline std::string makeString(const char *str, size_t size) +{ + return std::string(str, std::min(size, 16)); +} + +#ifdef _WIN32 +inline std::string wstr2str(const std::wstring& wstr) +{ + std::string str; + for (size_t i = 0; i < wstr.size(); i++) { + uint16_t c = wstr[i]; + if (c < 0x80) { + str += char(c); + } else { + char buf[16]; + CYBOZU_SNPRINTF(buf, sizeof(buf), "\\u%04x", c); + str += buf; + } + } + return str; +} +#endif + +} // cybozu::exception + +/** + convert errno to string + @param err [in] errno + @note for both windows and linux +*/ +inline std::string ConvertErrorNoToString(int err) +{ + char errBuf[256]; + std::string ret; +#ifdef _WIN32 + if (strerror_s(errBuf, sizeof(errBuf), err) == 0) { + ret = errBuf; + } else { + ret = "err"; + } +#elif defined(_GNU_SOURCE) + ret = ::strerror_r(err, errBuf, sizeof(errBuf)); +#else + if (strerror_r(err, errBuf, sizeof(errBuf)) == 0) { + ret = errBuf; + } else { + ret = "err"; + } +#endif + char buf2[64]; + CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%d)", err); + ret += buf2; + return ret; +} + +class Exception : public std::exception { + mutable std::string str_; +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + mutable std::string stackTrace_; +#endif +public: + explicit Exception(const std::string& name = "", bool enableStackTrace = true) + : str_(name) + { +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + if (enableStackTrace) stackTrace_ = cybozu::StackTrace().toString(); +#else + cybozu::disable_warning_unused_variable(enableStackTrace); +#endif + } + ~Exception() CYBOZU_NOEXCEPT {} + const char *what() const CYBOZU_NOEXCEPT { return toString().c_str(); } + const std::string& toString() const CYBOZU_NOEXCEPT + { +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + try { + if (!stackTrace_.empty()) { +#ifdef CYBOZU_STACKTRACE_ONELINE + str_ += "\n<<>> "; + str_ += stackTrace_; +#else + str_ += "\n<< + Exception& operator<<(const T& x) + { + std::ostringstream os; + os << x; + return operator<<(os.str()); + } +}; + +class ErrorNo { +public: +#ifdef _WIN32 + typedef unsigned int NativeErrorNo; +#else + typedef int NativeErrorNo; +#endif + explicit ErrorNo(NativeErrorNo err) + : err_(err) + { + } + ErrorNo() + : err_(getLatestNativeErrorNo()) + { + } + NativeErrorNo getLatestNativeErrorNo() const + { +#ifdef _WIN32 + return ::GetLastError(); +#else + return errno; +#endif + } + /** + convert NativeErrNo to string(maybe UTF8) + @param err [in] errno + @note Linux : same as ConvertErrorNoToString + Windows : for Win32 API(use en-us) + */ + std::string toString() const + { +#ifdef _WIN32 + const int msgSize = 256; + wchar_t msg[msgSize]; + int size = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + 0, + err_, + MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), + msg, + msgSize, + NULL + ); + if (size <= 0) return ""; + // remove last "\r\n" + if (size > 2 && msg[size - 2] == '\r') { + msg[size - 2] = 0; + size -= 2; + } + std::string ret; + ret.resize(size); + // assume ascii only + for (int i = 0; i < size; i++) { + ret[i] = (char)msg[i]; + } + char buf2[64]; + CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%u)", err_); + ret += buf2; + return ret; +#else + return ConvertErrorNoToString(err_); +#endif + } +private: + NativeErrorNo err_; +}; + +inline std::ostream& operator<<(std::ostream& os, const cybozu::ErrorNo& self) +{ + return os << self.toString(); +} + +} // cybozu +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/hash.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/hash.hpp new file mode 100644 index 000000000..3fd246fa1 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/hash.hpp @@ -0,0 +1,67 @@ +#pragma once +#include + +namespace cybozu { + +template +uint32_t hash32(Iter begin, Iter end, uint32_t v = 0) +{ + if (v == 0) v = 2166136261U; + while (begin != end) { + v ^= *begin++; + v *= 16777619; + } + return v; +} +template +uint64_t hash64(Iter begin, Iter end, uint64_t v = 0) +{ + if (v == 0) v = 14695981039346656037ULL; + while (begin != end) { + v ^= *begin++; + v *= 1099511628211ULL; + } + v ^= v >> 32; + return v; +} +template +uint32_t hash32(const T *x, size_t n, uint32_t v = 0) +{ + return hash32(x, x + n, v); +} +template +uint64_t hash64(const T *x, size_t n, uint64_t v = 0) +{ + return hash64(x, x + n, v); +} + +} // cybozu + +namespace boost { + +template +struct hash; + +} // boost + +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +#include +#else + +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4099) // missmatch class and struct +#endif +#ifndef __APPLE__ +template +struct hash; +#endif +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +CYBOZU_NAMESPACE_TR1_END } // std + +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/inttype.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/inttype.hpp new file mode 100644 index 000000000..62856bdb3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/inttype.hpp @@ -0,0 +1,163 @@ +#pragma once +/** + @file + @brief int type definition and macros + @author MITSUNARI Shigeo(@herumi) +*/ + +#if defined(_MSC_VER) && (MSC_VER <= 1500) && !defined(CYBOZU_DEFINED_INTXX) + #define CYBOZU_DEFINED_INTXX + typedef __int64 int64_t; + typedef unsigned __int64 uint64_t; + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef signed char int8_t; +#else + #include +#endif + +#ifdef _MSC_VER + #ifndef CYBOZU_DEFINED_SSIZE_T + #define CYBOZU_DEFINED_SSIZE_T + #ifdef _WIN64 + typedef int64_t ssize_t; + #else + typedef int32_t ssize_t; + #endif + #endif +#else + #include // for ssize_t +#endif + +#ifndef CYBOZU_ALIGN + #ifdef _MSC_VER + #define CYBOZU_ALIGN(x) __declspec(align(x)) + #else + #define CYBOZU_ALIGN(x) __attribute__((aligned(x))) + #endif +#endif +#ifndef CYBOZU_FORCE_INLINE + #ifdef _MSC_VER + #define CYBOZU_FORCE_INLINE __forceinline + #else + #define CYBOZU_FORCE_INLINE __attribute__((always_inline)) + #endif +#endif +#ifndef CYBOZU_UNUSED + #ifdef __GNUC__ + #define CYBOZU_UNUSED __attribute__((unused)) + #else + #define CYBOZU_UNUSED + #endif +#endif +#ifndef CYBOZU_ALLOCA + #ifdef _MSC_VER + #include + #define CYBOZU_ALLOCA(x) _malloca(x) + #else + #define CYBOZU_ALLOCA(x) __builtin_alloca(x) + #endif +#endif +#ifndef CYBOZU_NUM_OF_ARRAY + #define CYBOZU_NUM_OF_ARRAY(x) (sizeof(x) / sizeof(*x)) +#endif +#ifndef CYBOZU_SNPRINTF + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define CYBOZU_SNPRINTF(x, len, ...) (void)_snprintf_s(x, len, len - 1, __VA_ARGS__) + #else + #define CYBOZU_SNPRINTF(x, len, ...) (void)snprintf(x, len, __VA_ARGS__) + #endif +#endif + +#define CYBOZU_CPP_VERSION_CPP03 0 +#define CYBOZU_CPP_VERSION_TR1 1 +#define CYBOZU_CPP_VERSION_CPP11 2 +#define CYBOZU_CPP_VERSION_CPP14 3 +#define CYBOZU_CPP_VERSION_CPP17 4 + +#ifdef __GNUC__ + #define CYBOZU_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor)) +#else + #define CYBOZU_GNUC_PREREQ(major, minor) 0 +#endif + +#if (__cplusplus >= 201703) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP17 +#elif (__cplusplus >= 201402) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP14 +#elif (__cplusplus >= 201103) || (_MSC_VER >= 1500) || defined(__GXX_EXPERIMENTAL_CXX0X__) + #if defined(_MSC_VER) && (_MSC_VER <= 1600) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 + #else + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP11 + #endif +#elif CYBOZU_GNUC_PREREQ(4, 5) || (CYBOZU_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || (__clang_major__ >= 3) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 +#else + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP03 +#endif + +#ifdef CYBOZU_USE_BOOST + #define CYBOZU_NAMESPACE_STD boost + #define CYBOZU_NAMESPACE_TR1_BEGIN + #define CYBOZU_NAMESPACE_TR1_END +#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) && !defined(__APPLE__) + #define CYBOZU_NAMESPACE_STD std::tr1 + #define CYBOZU_NAMESPACE_TR1_BEGIN namespace tr1 { + #define CYBOZU_NAMESPACE_TR1_END } +#else + #define CYBOZU_NAMESPACE_STD std + #define CYBOZU_NAMESPACE_TR1_BEGIN + #define CYBOZU_NAMESPACE_TR1_END +#endif + +#ifndef CYBOZU_OS_BIT + #if defined(_WIN64) || defined(__x86_64__) || defined(__AARCH64EL__) || defined(__EMSCRIPTEN__) + #define CYBOZU_OS_BIT 64 + #else + #define CYBOZU_OS_BIT 32 + #endif +#endif + +#ifndef CYBOZU_HOST + #define CYBOZU_HOST_UNKNOWN 0 + #define CYBOZU_HOST_INTEL 1 + #define CYBOZU_HOST_ARM 2 + #if defined(_M_IX86) || defined(_M_AMD64) || defined(__x86_64__) || defined(__i386__) + #define CYBOZU_HOST CYBOZU_HOST_INTEL + #elif defined(__arm__) || defined(__AARCH64EL__) + #define CYBOZU_HOST CYBOZU_HOST_ARM + #else + #define CYBOZU_HOST CYBOZU_HOST_UNKNOWN + #endif +#endif + +#ifndef CYBOZU_ENDIAN + #define CYBOZU_ENDIAN_UNKNOWN 0 + #define CYBOZU_ENDIAN_LITTLE 1 + #define CYBOZU_ENDIAN_BIG 2 + #if (CYBOZU_HOST == CYBOZU_HOST_INTEL) + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE + #elif (CYBOZU_HOST == CYBOZU_HOST_ARM) && (defined(__ARM_EABI__) || defined(__AARCH64EL__)) + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE + #else + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_UNKNOWN + #endif +#endif + +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + #define CYBOZU_NOEXCEPT noexcept +#else + #define CYBOZU_NOEXCEPT throw() +#endif +namespace cybozu { +template +void disable_warning_unused_variable(const T&) { } +template +T cast(const S* ptr) { return static_cast(static_cast(ptr)); } +template +T cast(S* ptr) { return static_cast(static_cast(ptr)); } +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/itoa.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/itoa.hpp new file mode 100644 index 000000000..072e5b8b4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/itoa.hpp @@ -0,0 +1,337 @@ +#pragma once +/** + @file + @brief convert integer to string(ascii) + + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#ifndef CYBOZU_DONT_USE_STRING +#include +#endif +#include +#include +#include + +namespace cybozu { + +template +size_t getHexLength(T x) +{ + return x == 0 ? 1 : cybozu::bsr(x) / 4 + 1; +} + +template +size_t getBinLength(T x) +{ + return x == 0 ? 1 : cybozu::bsr(x) + 1; +} +/* + convert x to hex string with len + @note out should have getHexLength(x) size + out is not NUL terminated +*/ +template +void itohex(char *out, size_t len, T x, bool upCase = true) +{ + static const char *hexTbl[] = { + "0123456789abcdef", + "0123456789ABCDEF" + }; + const char *tbl = hexTbl[upCase]; + for (size_t i = 0; i < len; i++) { + out[len - i - 1] = tbl[x % 16]; + x /= 16; + } +} +/* + convert x to bin string with len + @note out should have getBinLength(x) size + out is not NUL terminated +*/ +template +void itobin(char *out, size_t len, T x) +{ + for (size_t i = 0; i < len; i++) { + out[len - i - 1] = '0' + (x & 1); + x >>= 1; + } +} + +namespace itoa_local { + +/* + convert x to dec + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template +size_t uintToDec(char *buf, size_t bufSize, UT x) +{ + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = '0' + static_cast(x % 10); + x /= 10; + if (x == 0) return i + 1; + } + return 0; +} + +/* + convert x to hex + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template +size_t uintToHex(char *buf, size_t bufSize, UT x, bool upCase = true) +{ + static const char *hexTbl[] = { + "0123456789abcdef", + "0123456789ABCDEF" + }; + const char *tbl = hexTbl[upCase]; + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = tbl[x % 16]; + x /= 16; + if (x == 0) return i + 1; + } + return 0; +} + +/* + convert x to bin + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template +size_t uintToBin(char *buf, size_t bufSize, UT x) +{ + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = '0' + (x & 1); + x >>= 1; + if (x == 0) return i + 1; + } + return 0; +} + +template +size_t intToDec(char *buf, size_t bufSize, T x) +{ + if (x == LLONG_MIN) { + const char minStr[] = "-9223372036854775808"; + const size_t minStrLen = sizeof(minStr) - 1; + if (bufSize < minStrLen) { + return 0; + } else { + memcpy(buf + bufSize - minStrLen, minStr, minStrLen); + return minStrLen; + } + } + bool negative = x < 0; + uint64_t absX = negative ? -x : x; + size_t n = uintToDec(buf, bufSize, absX); + if (n == 0) return 0; + if (negative) { + if (bufSize == n) return 0; + n++; + buf[bufSize - n] = '-'; + } + return n; +} + +#ifndef CYBOZU_DONT_USE_STRING +template +void convertFromUint(std::string& out, T x) +{ + char buf[40]; + size_t n = uintToDec(buf, sizeof(buf), x); + assert(n > 0); + out.assign(buf + sizeof(buf) - n, n); +} + +inline void convertFromInt(std::string& out, long long x) +{ + char buf[40]; + size_t n = intToDec(buf, sizeof(buf), x); + assert(n > 0); + out.assign(buf + sizeof(buf) - n, n); +} + +template +void itohexLocal(std::string& out, T x, bool upCase, bool withZero) +{ + const size_t size = withZero ? sizeof(T) * 2 : getHexLength(x); + out.resize(size); + itohex(&out[0], size, x, upCase); +} + +template +void itobinLocal(std::string& out, T x, bool withZero) +{ + const size_t size = withZero ? sizeof(T) * 8 : getBinLength(x); + out.resize(size); + itobin(&out[0], size, x); +} +#endif + +} // itoa_local + +#ifndef CYBOZU_DONT_USE_STRING +/** + convert int to string + @param out [out] string + @param x [in] int +*/ +inline void itoa(std::string& out, int x) +{ + itoa_local::convertFromInt(out, x); +} + +/** + convert long long to string + @param out [out] string + @param x [in] long long +*/ +inline void itoa(std::string& out, long long x) +{ + itoa_local::convertFromInt(out, x); +} + +/** + convert unsigned int to string + @param out [out] string + @param x [in] unsigned int +*/ +inline void itoa(std::string& out, unsigned int x) +{ + itoa_local::convertFromUint(out, x); +} + +/** + convert unsigned long long to string + @param out [out] string + @param x [in] unsigned long long +*/ +inline void itoa(std::string& out, unsigned long long x) +{ + itoa_local::convertFromUint(out, x); +} + +#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) +inline void itoa(std::string& out, long x) { itoa(out, static_cast(x)); } +inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast(x)); } +#else +inline void itoa(std::string& out, long x) { itoa(out, static_cast(x)); } +inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast(x)); } +#endif +/** + convert integer to string + @param x [in] int +*/ +template +inline std::string itoa(T x) +{ + std::string ret; + itoa(ret, x); + return ret; +} + +inline void itohex(std::string& out, unsigned char x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned short x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned int x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned long x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned long long x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +template +inline std::string itobin(T x, bool withZero = true) +{ + std::string out; + itoa_local::itobinLocal(out, x, withZero); + return out; +} + +inline void itobin(std::string& out, unsigned char x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned short x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned int x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned long x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned long long x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +template +inline std::string itohex(T x, bool upCase = true, bool withZero = true) +{ + std::string out; + itohex(out, x, upCase, withZero); + return out; +} +/** + convert integer to string with zero padding + @param x [in] int + @param len [in] minimum lengh of string + @param c [in] padding character + @note + itoa(12, 4) == "0012" + itoa(1234, 4) == "1234" + itoa(12345, 4) == "12345" + itoa(-12, 4) == "-012" +*/ +template +inline std::string itoaWithZero(T x, size_t len, char c = '0') +{ + std::string ret; + itoa(ret, x); + if (ret.size() < len) { + std::string zero(len - ret.size(), c); + if (x >= 0) { + ret = zero + ret; + } else { + ret = "-" + zero + ret.substr(1); + } + } + return ret; +} +#endif + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_libeay32.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_libeay32.hpp new file mode 100644 index 000000000..d83f1b6ea --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_libeay32.hpp @@ -0,0 +1,21 @@ +#pragma once +/** + @file + @brief link libeay32.lib of openssl + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015 + #ifdef _WIN64 + #pragma comment(lib, "mt/14/libeay32.lib") + #else + #pragma comment(lib, "mt/14/32/libeay32.lib") + #endif +// #elif _MSC_VER == 1800 // VC2013 + #else + #pragma comment(lib, "mt/12/libeay32.lib") + #endif + #pragma comment(lib, "advapi32.lib") + #pragma comment(lib, "gdi32.lib") + #pragma comment(lib, "user32.lib") +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_mpir.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_mpir.hpp new file mode 100644 index 000000000..d20d7b1a9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_mpir.hpp @@ -0,0 +1,18 @@ +#pragma once +/** + @file + @brief link mpir/mpirxx of mpir + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015, VC2017(1910) + #pragma comment(lib, "mt/14/mpir.lib") + #pragma comment(lib, "mt/14/mpirxx.lib") + #elif _MSC_VER == 1800 // VC2013 + #pragma comment(lib, "mt/12/mpir.lib") + #pragma comment(lib, "mt/12/mpirxx.lib") + #elif _MSC_VER == 1700 // VC2012 + #pragma comment(lib, "mt/11/mpir.lib") + #pragma comment(lib, "mt/11/mpirxx.lib") + #endif +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_ssleay32.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_ssleay32.hpp new file mode 100644 index 000000000..60c2361ae --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/link_ssleay32.hpp @@ -0,0 +1,19 @@ +#pragma once +/** + @file + @brief link ssleay32.lib of openssl + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015 + #ifdef _WIN64 + #pragma comment(lib, "mt/14/ssleay32.lib") + #else + #pragma comment(lib, "mt/14/32/ssleay32.lib") + #endif +// #elif _MSC_VER == 1800 // VC2013 + #else + #pragma comment(lib, "mt/12/ssleay32.lib") + #endif + #pragma comment(lib, "user32.lib") +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/mutex.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/mutex.hpp new file mode 100644 index 000000000..acde6bcbf --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/mutex.hpp @@ -0,0 +1,141 @@ +#pragma once +/** + @file + @brief mutex + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ + +#ifdef _WIN32 + #include +#else + #include + #include +#endif +#include +#include + +namespace cybozu { + +class ConditionVariable; + +namespace thread { + +#ifdef _WIN32 + typedef HANDLE MutexHandle; + inline void MutexInit(MutexHandle& mutex) + { +// mutex = CreateSemaphore(NULL /* no security */, 1 /* init */, 0x7FFFFFFF /* max */, NULL /* no name */); + mutex = CreateMutex(NULL /* no security */, FALSE /* no owner */, NULL /* no name */); + } + inline void MutexLock(MutexHandle& mutex) { WaitForSingleObject(mutex, INFINITE); } + /* + return false if timeout + @param msec [in] msec + */ + inline bool MutexLockTimeout(MutexHandle& mutex, int msec) + { + DWORD ret = WaitForSingleObject(mutex, msec); + if (ret == WAIT_OBJECT_0) { + return true; + } + if (ret == WAIT_TIMEOUT) { + return false; + } + /* ret == WAIT_ABANDONED */ + assert(0); + return false; + } + inline void MutexUnlock(MutexHandle& mutex) + { +// ReleaseSemaphore(mutex, 1, NULL); + ReleaseMutex(mutex); + } + inline void MutexTerm(MutexHandle& mutex) { CloseHandle(mutex); } +#else + typedef pthread_mutex_t MutexHandle; + inline void MutexInit(MutexHandle& mutex) + { +#if 1 + pthread_mutex_init(&mutex, NULL); +#else + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_TIMED_NP)) { + perror("pthread_mutexattr_settype"); + exit(1); + } + pthread_mutex_init(&mutex, &attr); + pthread_mutexattr_destroy(&attr); +#endif + } + inline void MutexLock(MutexHandle& mutex) { pthread_mutex_lock(&mutex); } +#if 0 + inline bool MutexLockTimeout(MutexHandle& mutex, int msec) + { + timespec absTime; + clock_gettime(CLOCK_REALTIME, &absTime); + absTime.tv_sec += msec / 1000; + absTime.tv_nsec += msec % 1000; + bool ret = pthread_mutex_timedlock(&mutex, &absTime) == 0; + return ret; + } +#endif + inline void MutexUnlock(MutexHandle& mutex) { pthread_mutex_unlock(&mutex); } + inline void MutexTerm(MutexHandle& mutex) { pthread_mutex_destroy(&mutex); } +#endif + +template +class AutoLockT { +public: + explicit AutoLockT(T &t) + : t_(t) + { + t_.lock(); + } + ~AutoLockT() + { + t_.unlock(); + } +private: + T& t_; + AutoLockT& operator=(const AutoLockT&); +}; + +} // cybozu::thread + +class Mutex { + friend class cybozu::ConditionVariable; +public: + Mutex() + { + thread::MutexInit(hdl_); + } + ~Mutex() + { + thread::MutexTerm(hdl_); + } + void lock() + { + thread::MutexLock(hdl_); + } +#if 0 + bool lockTimeout(int msec) + { + return thread::MutexLockTimeout(hdl_, msec); + } +#endif + void unlock() + { + thread::MutexUnlock(hdl_); + } +private: + Mutex(const Mutex&); + Mutex& operator=(const Mutex&); + thread::MutexHandle hdl_; +}; + +typedef cybozu::thread::AutoLockT AutoLock; + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/option.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/option.hpp new file mode 100644 index 000000000..a5dfd137d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/option.hpp @@ -0,0 +1,723 @@ +#pragma once +/** + @file + @brief command line parser + + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + Option parser + + progName (opt1-name|opt2-name|...) param1 param2 ... + param1:param1-help + param2:param2-help + -op1-name:opt1-help + ... + + How to setup + int num; + -n num ; (optional) option => appendOpt(&x, , "num", "num-help"); + -n num ; must option => appendMust(&x, "num", "num-help"); + + std::vector v; + -v s1 s2 s3 ... => appendVec(&v, "v"); + + Remark1: terminate parsing of v if argv begins with '-[^0-9]' + Remark2: the begining character of opt-name is not a number ('0'...'9') + because avoid conflict with minus number + + std::string file1; + file1 is param => appendParam(&file1, "input-file"); + file2 is optional param => appendParamOpt(&file2, "output-file"); + + How to use + opt.parse(argc, argv); + + see sample/option_smpl.cpp +*/ + +namespace cybozu { + +struct OptionError : public cybozu::Exception { + enum Type { + NoError = 0, + BAD_OPT = 1, + BAD_VALUE, + NO_VALUE, + OPT_IS_NECESSARY, + PARAM_IS_NECESSARY, + REDUNDANT_VAL, + BAD_ARGC + }; + Type type; + int argPos; + OptionError() + : cybozu::Exception("OptionError", false) + , type(NoError) + , argPos(0) + { + } + cybozu::Exception& set(Type _type, int _argPos = 0) + { + this->type = _type; + this->argPos = _argPos; + switch (_type) { + case BAD_OPT: + (*this) << "bad opt"; + break; + case BAD_VALUE: + (*this) << "bad value"; + break; + case NO_VALUE: + (*this) << "no value"; + break; + case OPT_IS_NECESSARY: + (*this) << "opt is necessary"; + break; + case PARAM_IS_NECESSARY: + (*this) << "param is necessary"; + break; + case REDUNDANT_VAL: + (*this) << "redundant argVal"; + break; + case BAD_ARGC: + (*this) << "bad argc"; + default: + break; + } + return *this; + } +}; + +namespace option_local { + +template +bool convert(T* x, const char *str) +{ + std::istringstream is(str); + is >> *x; + return !!is; +} + +template<> +inline bool convert(std::string* x, const char *str) +{ + *x = str; + return true; +} + +template +bool convertInt(T* x, const char *str) +{ + if (str[0] == '0' && str[1] == 'x') { + bool b; + *x = cybozu::hextoi(&b, str + 2); + return b; + } + size_t len = strlen(str); + int factor = 1; + if (len > 1) { + switch (str[len - 1]) { + case 'k': factor = 1000; len--; break; + case 'm': factor = 1000 * 1000; len--; break; + case 'g': factor = 1000 * 1000 * 1000; len--; break; + case 'K': factor = 1024; len--; break; + case 'M': factor = 1024 * 1024; len--; break; + case 'G': factor = 1024 * 1024 * 1024; len--; break; + default: break; + } + } + bool b; + T y = cybozu::atoi(&b, str, len); + if (!b) return false; + if (factor > 1) { + if ((std::numeric_limits::min)() / factor <= y + && y <= (std::numeric_limits::max)() / factor) { + *x = y * factor; + } else { + return false; + } + } else { + *x = y; + } + return true; +} + +#define CYBOZU_OPTION_DEFINE_CONVERT_INT(type) \ +template<>inline bool convert(type* x, const char *str) { return convertInt(x, str); } + +CYBOZU_OPTION_DEFINE_CONVERT_INT(int) +CYBOZU_OPTION_DEFINE_CONVERT_INT(long) +CYBOZU_OPTION_DEFINE_CONVERT_INT(long long) + +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned int) +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long) +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long long) + +#undef CYBOZU_OPTION_DEFINE_CONVERT_INT + +struct HolderBase { + virtual ~HolderBase(){} + virtual bool set(const char*) = 0; + virtual HolderBase *clone() const = 0; + virtual std::string toStr() const = 0; + virtual const void *get() const = 0; +}; + +template +struct Holder : public HolderBase { + T *p_; + Holder(T *p) : p_(p) {} + HolderBase *clone() const { return new Holder(p_); } + bool set(const char *str) { return option_local::convert(p_, str); } + std::string toStr() const + { + std::ostringstream os; + os << *p_; + return os.str(); + } + const void *get() const { return (void*)p_; } +}; + +/* + for gcc 7 with -fnew-ttp-matching + this specialization is not necessary under -fno-new-ttp-matching +*/ +template struct Holder; + +templateclass Container> +struct Holder > : public HolderBase { + typedef Container Vec; + Vec *p_; + Holder(Vec *p) : p_(p) {} + HolderBase *clone() const { return new Holder(p_); } + bool set(const char *str) + { + T t; + bool b = option_local::convert(&t, str); + if (b) p_->push_back(t); + return b; + } + std::string toStr() const + { + std::ostringstream os; + bool isFirst = true; + for (typename Vec::const_iterator i = p_->begin(), ie = p_->end(); i != ie; ++i) { + if (isFirst) { + isFirst = false; + } else { + os << ' '; + } + os << *i; + } + return os.str(); + } + const void *get() const { return (void*)p_; } +}; + +class Var { + HolderBase *p_; + bool isSet_; +public: + Var() : p_(0), isSet_(false) { } + Var(const Var& rhs) : p_(rhs.p_->clone()), isSet_(false) { } + template + explicit Var(T *x) : p_(new Holder(x)), isSet_(false) { } + + ~Var() { delete p_; } + + void swap(Var& rhs) CYBOZU_NOEXCEPT + { + std::swap(p_, rhs.p_); + std::swap(isSet_, rhs.isSet_); + } + void operator=(const Var& rhs) + { + Var v(rhs); + swap(v); + } + bool set(const char *str) + { + isSet_ = true; + return p_->set(str); + } + std::string toStr() const { return p_ ? p_->toStr() : ""; } + bool isSet() const { return isSet_; } + const void *get() const { return p_ ? p_->get() : 0; } +}; + +} // option_local + +class Option { + enum Mode { // for opt + N_is0 = 0, // for bool by appendBoolOpt() + N_is1 = 1, + N_any = 2 + }; + enum ParamMode { + P_exact = 0, // one + P_optional = 1, // zero or one + P_variable = 2 // zero or greater + }; + struct Info { + option_local::Var var; + Mode mode; // 0 or 1 or any ; for opt, not used for Param + bool isMust; // this option is must + std::string opt; // option param name without '-' + std::string help; // description of option + + Info() : mode(N_is0), isMust(false) {} + template + Info(T* pvar, Mode mode, bool isMust, const char *opt, const std::string& help) + : var(pvar) + , mode(mode) + , isMust(isMust) + , opt(opt) + , help(help) + { + } + friend inline std::ostream& operator<<(std::ostream& os, const Info& self) + { + os << self.opt << '=' << self.var.toStr(); + if (self.var.isSet()) { + os << " (set)"; + } else { + os << " (default)"; + } + return os; + } + void put() const + { + std::cout << *this; + } + void usage() const + { + printf(" -%s %s%s\n", opt.c_str(), help.c_str(), isMust ? " (must)" : ""); + } + void shortUsage() const + { + printf(" -%s %s", opt.c_str(), mode == N_is0 ? "" : mode == N_is1 ? "para" : "para..."); + } + bool isSet() const { return var.isSet(); } + const void *get() const { return var.get(); } + }; + typedef std::vector InfoVec; + typedef std::vector StrVec; + typedef std::map OptMap; + InfoVec infoVec_; + InfoVec paramVec_; + Info remains_; + OptMap optMap_; + bool showOptUsage_; + ParamMode paramMode_; + std::string progName_; + std::string desc_; + std::string helpOpt_; + std::string help_; + std::string usage_; + StrVec delimiters_; + StrVec *remainsAfterDelimiter_; + int nextDelimiter_; + template + void appendSub(T *pvar, Mode mode, bool isMust, const char *opt, const std::string& help) + { + const char c = opt[0]; + if ('0' <= c && c <= '9') throw cybozu::Exception("Option::appendSub:opt must begin with not number") << opt; + if (optMap_.find(opt) != optMap_.end()) { + throw cybozu::Exception("Option::append:duplicate option") << opt; + } + optMap_[opt] = infoVec_.size(); + infoVec_.push_back(Info(pvar, mode, isMust, opt, help)); + } + + template + void append(T *pvar, const U& defaultVal, bool isMust, const char *opt, const std::string& help = "") + { + *pvar = defaultVal; + appendSub(pvar, N_is1, isMust, opt, help); + } + /* + don't deal with negative number as option + */ + bool isOpt(const char *str) const + { + if (str[0] != '-') return false; + const char c = str[1]; + if ('0' <= c && c <= '9') return false; + return true; + } + void verifyParamMode() + { + if (paramMode_ != P_exact) throw cybozu::Exception("Option:appendParamVec:appendParam is forbidden after appendParamOpt/appendParamVec"); + } + std::string getBaseName(const std::string& name) const + { + size_t pos = name.find_last_of("/\\"); + if (pos == std::string::npos) return name; + return name.substr(pos + 1); + } + bool inDelimiters(const std::string& str) const + { + return std::find(delimiters_.begin(), delimiters_.end(), str) != delimiters_.end(); + } +public: + Option() + : showOptUsage_(true) + , paramMode_(P_exact) + , remainsAfterDelimiter_(0) + , nextDelimiter_(-1) + { + } + virtual ~Option() {} + /* + append optional option with default value + @param pvar [in] pointer to option variable + @param defaultVal [in] default value + @param opt [in] option name + @param help [in] option help + @note you can use 123k, 56M if T is int/long/long long + k : *1000 + m : *1000000 + g : *1000000000 + K : *1024 + M : *1024*1024 + G : *1024*1024*1024 + */ + template + void appendOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") + { + append(pvar, defaultVal, false, opt, help); + } + /* + default value of *pvar is false + */ + void appendBoolOpt(bool *pvar, const char *opt, const std::string& help = "") + { + *pvar = false; + appendSub(pvar, N_is0, false, opt, help); + } + /* + append necessary option + @param pvar [in] pointer to option variable + @param opt [in] option name + @param help [in] option help + */ + template + void appendMust(T *pvar, const char *opt, const std::string& help = "") + { + append(pvar, T(), true, opt, help); + } + /* + append vector option + @param pvar [in] pointer to option variable + @param opt [in] option name + @param help [in] option help + */ + templateclass Container> + void appendVec(Container *pvar, const char *opt, const std::string& help = "") + { + appendSub(pvar, N_any, false, opt, help); + } + /* + append parameter + @param pvar [in] pointer to parameter + @param opt [in] option name + @param help [in] option help + */ + template + void appendParam(T *pvar, const char *opt, const std::string& help = "") + { + verifyParamMode(); + paramVec_.push_back(Info(pvar, N_is1, true, opt, help)); + } + /* + append optional parameter + @param pvar [in] pointer to parameter + @param defaultVal [in] default value + @param opt [in] option name + @param help [in] option help + @note you can call appendParamOpt once after appendParam + */ + template + void appendParamOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") + { + verifyParamMode(); + *pvar = defaultVal; + paramMode_ = P_optional; + paramVec_.push_back(Info(pvar, N_is1, false, opt, help)); + } + /* + append remain parameter + @param pvar [in] pointer to vector of parameter + @param opt [in] option name + @param help [in] option help + @note you can call appendParamVec once after appendParam + */ + templateclass Container> + void appendParamVec(Container *pvar, const char *name, const std::string& help = "") + { + verifyParamMode(); + paramMode_ = P_variable; + remains_.var = option_local::Var(pvar); + remains_.mode = N_any; + remains_.isMust = false; + remains_.opt = name; + remains_.help = help; + } + void appendHelp(const char *opt, const std::string& help = ": show this message") + { + helpOpt_ = opt; + help_ = help; + } + /* + stop parsing after delimiter is found + @param delimiter [in] string to stop + @param remain [out] set remaining strings if remain + */ + void setDelimiter(const std::string& delimiter, std::vector *remain = 0) + { + delimiters_.push_back(delimiter); + remainsAfterDelimiter_ = remain; + } + /* + stop parsing after delimiter is found + @param delimiter [in] string to stop to append list of delimiters + */ + void appendDelimiter(const std::string& delimiter) + { + delimiters_.push_back(delimiter); + } + /* + clear list of delimiters + */ + void clearDelimiterList() { delimiters_.clear(); } + /* + return the next position of delimiter between [0, argc] + @note return argc if delimiter is not set nor found + */ + int getNextPositionOfDelimiter() const { return nextDelimiter_; } + /* + parse (argc, argv) + @param argc [in] argc of main + @param argv [in] argv of main + @param startPos [in] start position of argc + @param progName [in] used instead of argv[0] + */ + bool parse(int argc, const char *const argv[], int startPos = 1, const char *progName = 0) + { + if (argc < 1 || startPos > argc) return false; + progName_ = getBaseName(progName ? progName : argv[startPos - 1]); + nextDelimiter_ = argc; + OptionError err; + for (int pos = startPos; pos < argc; pos++) { + if (inDelimiters(argv[pos])) { + nextDelimiter_ = pos + 1; + if (remainsAfterDelimiter_) { + for (int i = nextDelimiter_; i < argc; i++) { + remainsAfterDelimiter_->push_back(argv[i]); + } + } + break; + } + if (isOpt(argv[pos])) { + const std::string str = argv[pos] + 1; + if (helpOpt_ == str) { + usage(); + exit(0); + } + OptMap::const_iterator i = optMap_.find(str); + if (i == optMap_.end()) { + err.set(OptionError::BAD_OPT, pos); + goto ERR; + } + + Info& info = infoVec_[i->second]; + switch (info.mode) { + case N_is0: + if (!info.var.set("1")) { + err.set(OptionError::BAD_VALUE, pos); + goto ERR; + } + break; + case N_is1: + pos++; + if (pos == argc) { + err.set(OptionError::BAD_VALUE, pos) << (std::string("no value for -") + info.opt); + goto ERR; + } + if (!info.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt); + goto ERR; + } + break; + case N_any: + default: + { + pos++; + int j = 0; + while (pos < argc && !isOpt(argv[pos])) { + if (!info.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt) << j; + goto ERR; + } + pos++; + j++; + } + if (j > 0) { + pos--; + } else { + err.set(OptionError::NO_VALUE, pos) << (std::string("for -") + info.opt); + goto ERR; + } + } + break; + } + } else { + bool used = false; + for (size_t i = 0; i < paramVec_.size(); i++) { + Info& param = paramVec_[i]; + if (!param.var.isSet()) { + if (!param.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for " + param.opt); + goto ERR; + } + used = true; + break; + } + } + if (!used) { + if (paramMode_ == P_variable) { + remains_.var.set(argv[pos]); + } else { + err.set(OptionError::REDUNDANT_VAL, pos) << argv[pos]; + goto ERR; + } + } + } + } + // check whether must-opt is set + for (size_t i = 0; i < infoVec_.size(); i++) { + const Info& info = infoVec_[i]; + if (info.isMust && !info.var.isSet()) { + err.set(OptionError::OPT_IS_NECESSARY) << info.opt; + goto ERR; + } + } + // check whether param is set + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& param = paramVec_[i]; + if (param.isMust && !param.var.isSet()) { + err.set(OptionError::PARAM_IS_NECESSARY) << param.opt; + goto ERR; + } + } + // check whether remains is set + if (paramMode_ == P_variable && remains_.isMust && !remains_.var.isSet()) { + err.set(OptionError::PARAM_IS_NECESSARY) << remains_.opt; + goto ERR; + } + return true; + ERR: + assert(err.type); + printf("%s\n", err.what()); + return false; + } + /* + show desc at first in usage() + */ + void setDescription(const std::string& desc) + { + desc_ = desc; + } + /* + show command line after desc + don't put option message if not showOptUsage + */ + void setUsage(const std::string& usage, bool showOptUsage = false) + { + usage_ = usage; + showOptUsage_ = showOptUsage; + } + void usage() const + { + if (!desc_.empty()) printf("%s\n", desc_.c_str()); + if (usage_.empty()) { + printf("usage:%s", progName_.c_str()); + if (!infoVec_.empty()) printf(" [opt]"); + for (size_t i = 0; i < infoVec_.size(); i++) { + if (infoVec_[i].isMust) infoVec_[i].shortUsage(); + } + for (size_t i = 0; i < paramVec_.size(); i++) { + printf(" %s", paramVec_[i].opt.c_str()); + } + if (paramMode_ == P_variable) { + printf(" %s", remains_.opt.c_str()); + } + printf("\n"); + } else { + printf("%s\n", usage_.c_str()); + if (!showOptUsage_) return; + } + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& param = paramVec_[i]; + if (!param.help.empty()) printf(" %s %s\n", paramVec_[i].opt.c_str(), paramVec_[i].help.c_str()); + } + if (!remains_.help.empty()) printf(" %s %s\n", remains_.opt.c_str(), remains_.help.c_str()); + if (!helpOpt_.empty()) { + printf(" -%s %s\n", helpOpt_.c_str(), help_.c_str()); + } + for (size_t i = 0; i < infoVec_.size(); i++) { + infoVec_[i].usage(); + } + } + friend inline std::ostream& operator<<(std::ostream& os, const Option& self) + { + for (size_t i = 0; i < self.paramVec_.size(); i++) { + const Info& param = self.paramVec_[i]; + os << param.opt << '=' << param.var.toStr() << std::endl; + } + if (self.paramMode_ == P_variable) { + os << "remains=" << self.remains_.var.toStr() << std::endl; + } + for (size_t i = 0; i < self.infoVec_.size(); i++) { + os << self.infoVec_[i] << std::endl; + } + return os; + } + void put() const + { + std::cout << *this; + } + /* + whether pvar is set or not + */ + template + bool isSet(const T* pvar) const + { + const void *p = static_cast(pvar); + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& v = paramVec_[i]; + if (v.get() == p) return v.isSet(); + } + if (remains_.get() == p) return remains_.isSet(); + for (size_t i = 0; i < infoVec_.size(); i++) { + const Info& v = infoVec_[i]; + if (v.get() == p) return v.isSet(); + } + throw cybozu::Exception("Option:isSet:no assigned var") << pvar; + } +}; + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/random_generator.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/random_generator.hpp new file mode 100644 index 000000000..ff4a78da5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/random_generator.hpp @@ -0,0 +1,153 @@ +#pragma once +/** + @file + @brief pseudrandom generator + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ + +#include +#ifdef _WIN32 +#include +#include +#include +#ifdef _MSC_VER +#pragma comment (lib, "advapi32.lib") +#endif +#include +#else +#include +#include +#endif + +namespace cybozu { + +class RandomGenerator { + RandomGenerator(const RandomGenerator&); + void operator=(const RandomGenerator&); +public: + uint32_t operator()() + { + return get32(); + } + uint32_t get32() + { + uint32_t ret; + read(&ret, 1); + return ret; + } + uint64_t get64() + { + uint64_t ret; + read(&ret, 1); + return ret; + } +#ifdef _WIN32 + RandomGenerator() + : prov_(0) + , pos_(bufSize) + { + DWORD flagTbl[] = { 0, CRYPT_NEWKEYSET }; + for (int i = 0; i < 2; i++) { + if (CryptAcquireContext(&prov_, NULL, NULL, PROV_RSA_FULL, flagTbl[i]) != 0) return; + } + throw cybozu::Exception("randomgenerator"); + } + bool read_inner(void *buf, size_t byteSize) + { + return CryptGenRandom(prov_, static_cast(byteSize), static_cast(buf)) != 0; + } + ~RandomGenerator() + { + if (prov_) { + CryptReleaseContext(prov_, 0); + } + } + /* + fill buf[0..bufNum-1] with random data + @note bufNum is not byte size + */ + template + void read(bool *pb, T *buf, size_t bufNum) + { + cybozu::AutoLockCs al(cs_); + const size_t byteSize = sizeof(T) * bufNum; + if (byteSize > bufSize) { + if (!read_inner(buf, byteSize)) { + *pb = false; + return; + } + } else { + if (pos_ + byteSize > bufSize) { + read_inner(buf_, bufSize); + pos_ = 0; + } + memcpy(buf, buf_ + pos_, byteSize); + pos_ += byteSize; + } + *pb = true; + } + template + void read(T *buf, size_t bufNum) + { + bool b; + read(&b, buf, bufNum); + if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum; + } +private: + HCRYPTPROV prov_; + static const size_t bufSize = 1024; + char buf_[bufSize]; + size_t pos_; + cybozu::CriticalSection cs_; +#else + RandomGenerator() + : fp_(::fopen("/dev/urandom", "rb")) + { + if (!fp_) throw cybozu::Exception("randomgenerator"); + } + ~RandomGenerator() + { + if (fp_) ::fclose(fp_); + } + /* + fill buf[0..bufNum-1] with random data + @note bufNum is not byte size + */ + template + void read(bool *pb, T *buf, size_t bufNum) + { + const size_t byteSize = sizeof(T) * bufNum; + *pb = ::fread(buf, 1, (int)byteSize, fp_) == byteSize; + } + template + void read(T *buf, size_t bufNum) + { + bool b; + read(&b, buf, bufNum); + if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum; + } +#endif +private: + FILE *fp_; +}; + +template +void shuffle(T* v, size_t n, RG& rg) +{ + if (n <= 1) return; + for (size_t i = 0; i < n - 1; i++) { + size_t r = i + size_t(rg.get64() % (n - i)); + using namespace std; + swap(v[i], v[r]); + } +} + +template +void shuffle(V& v, RG& rg) +{ + shuffle(v.data(), v.size(), rg); +} + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/serializer.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/serializer.hpp new file mode 100644 index 000000000..1e23c8f42 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/serializer.hpp @@ -0,0 +1,363 @@ +#pragma once +/** + @file + @brief serializer for vector, list, map and so on + + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#include + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) +#endif + +//#define CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER + +namespace cybozu { + +namespace serializer_local { + +template +union ci { + T i; + uint8_t c[sizeof(T)]; +}; + +template +struct HasMemFunc { }; + +template +void dispatch_reserve(T& t, size_t size, int, HasMemFunc* = 0) +{ + t.reserve(size); +} + +template +void dispatch_reserve(T&, size_t, int*) +{ +} + +template +void reserve_if_exists(T& t, size_t size) +{ + dispatch_reserve(t, size, 0); +} + +} // serializer_local + +template +void loadRange(T *p, size_t num, InputStream& is) +{ + cybozu::read(p, num * sizeof(T), is); +} + +template +void saveRange(OutputStream& os, const T *p, size_t num) +{ + cybozu::write(os, p, num * sizeof(T)); +} + +template +void loadPod(T& x, InputStream& is) +{ + serializer_local::ci ci; + loadRange(ci.c, sizeof(ci.c), is); + x = ci.i; +} + +template +void savePod(OutputStream& os, const T& x) +{ + serializer_local::ci ci; + ci.i = x; + saveRange(os, ci.c, sizeof(ci.c)); +} + +template +void load(T& x, InputStream& is) +{ + x.load(is); +} + +template +void save(OutputStream& os, const T& x) +{ + x.save(os); +} + +#define CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) \ +templatevoid load(type& x, InputStream& is) { loadPod(x, is); } \ +templatevoid save(OutputStream& os, type x) { savePod(os, x); } + +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(bool) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(char) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(short) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned char) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned short) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(wchar_t) + +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(float) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(double) + +#ifdef CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER + +#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) + +#else + +namespace serializer_local { + +template +bool isRecoverable(T x) +{ + return T(S(x)) == x; +} +/* + data structure H:D of integer x + H:header(1byte) + 0x80 ; D = 1 byte zero ext + 0x81 ; D = 2 byte zero ext + 0x82 ; D = 4 byte zero ext + 0x83 ; D = 8 byte zero ext + 0x84 ; D = 1 byte signed ext + 0x85 ; D = 2 byte signed ext + 0x86 ; D = 4 byte signed ext + 0x87 ; D = 8 byte signed ext + other; x = signed H, D = none +*/ +template +void saveVariableInt(OutputStream& os, const T& x) +{ + if (isRecoverable(x)) { + uint8_t u8 = uint8_t(x); + if (unsigned(u8 - 0x80) <= 7) { + savePod(os, uint8_t(0x84)); + } + savePod(os, u8); + } else if (isRecoverable(x)) { + savePod(os, uint8_t(0x80)); + savePod(os, uint8_t(x)); + } else if (isRecoverable(x) || isRecoverable(x)) { + savePod(os, uint8_t(isRecoverable(x) ? 0x81 : 0x85)); + savePod(os, uint16_t(x)); + } else if (isRecoverable(x) || isRecoverable(x)) { + savePod(os, uint8_t(isRecoverable(x) ? 0x82 : 0x86)); + savePod(os, uint32_t(x)); + } else { + assert(sizeof(T) == 8); + savePod(os, uint8_t(0x83)); + savePod(os, uint64_t(x)); + } +} + +template +void loadVariableInt(T& x, InputStream& is) +{ + uint8_t h; + loadPod(h, is); + if (h == 0x80) { + uint8_t v; + loadPod(v, is); + x = v; + } else if (h == 0x81) { + uint16_t v; + loadPod(v, is); + x = v; + } else if (h == 0x82) { + uint32_t v; + loadPod(v, is); + x = v; + } else if (h == 0x83) { + if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; + uint64_t v; + loadPod(v, is); + x = static_cast(v); + } else if (h == 0x84) { + int8_t v; + loadPod(v, is); + x = v; + } else if (h == 0x85) { + int16_t v; + loadPod(v, is); + x = v; + } else if (h == 0x86) { + int32_t v; + loadPod(v, is); + x = v; + } else if (h == 0x87) { + if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; + int64_t v; + loadPod(v, is); + x = static_cast(v); + } else { + x = static_cast(h); + } +} + +} // serializer_local + +#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) \ +templatevoid load(type& x, InputStream& is) { serializer_local::loadVariableInt(x, is); } \ +templatevoid save(OutputStream& os, type x) { serializer_local::saveVariableInt(os, x); } + +#endif + +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(int) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned int) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long long) + +#undef CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER +#undef CYBOZU_SERIALIZER_MAKE_UNT_SERIALIZER +#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_F +#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_V + +// only for std::vector +template +void loadPodVec(V& v, InputStream& is) +{ + size_t size; + load(size, is); + v.resize(size); + if (size > 0) loadRange(&v[0], size, is); +} + +// only for std::vector +template +void savePodVec(OutputStream& os, const V& v) +{ + save(os, v.size()); + if (!v.empty()) saveRange(os, &v[0], v.size()); +} + +template +void load(std::string& str, InputStream& is) +{ + loadPodVec(str, is); +} + +template +void save(OutputStream& os, const std::string& str) +{ + savePodVec(os, str); +} + +template +void save(OutputStream& os, const char *x) +{ + const size_t len = strlen(x); + save(os, len); + if (len > 0) saveRange(os, x, len); +} + + +// for vector, list +templateclass Container> +void load(Container& x, InputStream& is) +{ + size_t size; + load(size, is); + serializer_local::reserve_if_exists(x, size); + for (size_t i = 0; i < size; i++) { + x.push_back(T()); + T& t = x.back(); + load(t, is); + } +} + +templateclass Container> +void save(OutputStream& os, const Container& x) +{ + typedef Container V; + save(os, x.size()); + for (typename V::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, *i); + } +} + +// for set +templateclass Container> +void load(Container& x, InputStream& is) +{ + size_t size; + load(size, is); + for (size_t i = 0; i < size; i++) { + K t; + load(t, is); + x.insert(t); + } +} + +templateclass Container> +void save(OutputStream& os, const Container& x) +{ + typedef Container Set; + save(os, x.size()); + for (typename Set::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, *i); + } +} + +// for map +templateclass Container> +void load(Container& x, InputStream& is) +{ + typedef Container Map; + size_t size; + load(size, is); + for (size_t i = 0; i < size; i++) { + std::pair vt; + load(vt.first, is); + load(vt.second, is); + x.insert(vt); + } +} + +templateclass Container> +void save(OutputStream& os, const Container& x) +{ + typedef Container Map; + save(os, x.size()); + for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, i->first); + save(os, i->second); + } +} + +// unordered_map +templateclass Container> +void load(Container& x, InputStream& is) +{ + typedef Container Map; + size_t size; + load(size, is); +// x.reserve(size); // tr1::unordered_map may not have reserve + cybozu::serializer_local::reserve_if_exists(x, size); + for (size_t i = 0; i < size; i++) { + std::pair vt; + load(vt.first, is); + load(vt.second, is); + x.insert(vt); + } +} + +templateclass Container> +void save(OutputStream& os, const Container& x) +{ + typedef Container Map; + save(os, x.size()); + for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, i->first); + save(os, i->second); + } +} + +} // cybozu + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/sha2.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/sha2.hpp new file mode 100644 index 000000000..1830936f0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/sha2.hpp @@ -0,0 +1,467 @@ +#pragma once +/** + @file + @brief SHA-256, SHA-512 class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#if !defined(CYBOZU_DONT_USE_OPENSSL) && !defined(MCL_DONT_USE_OPENSSL) + #define CYBOZU_USE_OPENSSL_SHA +#endif + +#ifndef CYBOZU_DONT_USE_STRING +#include +#endif + +#ifdef CYBOZU_USE_OPENSSL_SHA +#ifdef __APPLE__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +#include +#ifdef _MSC_VER + #include +#endif + +#ifdef __APPLE__ + #pragma GCC diagnostic pop +#endif + +namespace cybozu { + +class Sha256 { + SHA256_CTX ctx_; +public: + Sha256() + { + clear(); + } + void clear() + { + SHA256_Init(&ctx_); + } + void update(const void *buf, size_t bufSize) + { + SHA256_Update(&ctx_, buf, bufSize); + } + size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) + { + if (mdSize < SHA256_DIGEST_LENGTH) return 0; + update(buf, bufSize); + SHA256_Final(reinterpret_cast(md), &ctx_); + return SHA256_DIGEST_LENGTH; + } +#ifndef CYBOZU_DONT_USE_STRING + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + std::string digest(const std::string& buf) + { + return digest(buf.c_str(), buf.size()); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string md(SHA256_DIGEST_LENGTH, 0); + digest(&md[0], md.size(), buf, bufSize); + return md; + } +#endif +}; + +class Sha512 { + SHA512_CTX ctx_; +public: + Sha512() + { + clear(); + } + void clear() + { + SHA512_Init(&ctx_); + } + void update(const void *buf, size_t bufSize) + { + SHA512_Update(&ctx_, buf, bufSize); + } + size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) + { + if (mdSize < SHA512_DIGEST_LENGTH) return 0; + update(buf, bufSize); + SHA512_Final(reinterpret_cast(md), &ctx_); + return SHA512_DIGEST_LENGTH; + } +#ifndef CYBOZU_DONT_USE_STRING + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + std::string digest(const std::string& buf) + { + return digest(buf.c_str(), buf.size()); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string md(SHA512_DIGEST_LENGTH, 0); + digest(&md[0], md.size(), buf, bufSize); + return md; + } +#endif +}; + +} // cybozu + +#else + +#include +#include +#include + +namespace cybozu { + +namespace sha2_local { + +template +T min_(T x, T y) { return x < y ? x : y;; } + +inline uint32_t rot32(uint32_t x, int s) +{ +#ifdef _MSC_VER + return _rotr(x, s); +#else + return (x >> s) | (x << (32 - s)); +#endif +} + +inline uint64_t rot64(uint64_t x, int s) +{ +#ifdef _MSC_VER + return _rotr64(x, s); +#else + return (x >> s) | (x << (64 - s)); +#endif +} + +template +struct Common { + void term(const char *buf, size_t bufSize) + { + assert(bufSize < T::blockSize_); + T& self = static_cast(*this); + const uint64_t totalSize = self.totalSize_ + bufSize; + + uint8_t last[T::blockSize_]; + memcpy(last, buf, bufSize); + last[bufSize] = uint8_t(0x80); /* top bit = 1 */ + memset(&last[bufSize + 1], 0, T::blockSize_ - bufSize - 1); + if (bufSize >= T::blockSize_ - T::msgLenByte_) { + self.round(reinterpret_cast(last)); + memset(last, 0, sizeof(last)); // clear stack + } + cybozu::Set64bitAsBE(&last[T::blockSize_ - 8], totalSize * 8); + self.round(reinterpret_cast(last)); + } + void inner_update(const char *buf, size_t bufSize) + { + T& self = static_cast(*this); + if (bufSize == 0) return; + if (self.roundBufSize_ > 0) { + size_t size = sha2_local::min_(T::blockSize_ - self.roundBufSize_, bufSize); + memcpy(self.roundBuf_ + self.roundBufSize_, buf, size); + self.roundBufSize_ += size; + buf += size; + bufSize -= size; + } + if (self.roundBufSize_ == T::blockSize_) { + self.round(self.roundBuf_); + self.roundBufSize_ = 0; + } + while (bufSize >= T::blockSize_) { + assert(self.roundBufSize_ == 0); + self.round(buf); + buf += T::blockSize_; + bufSize -= T::blockSize_; + } + if (bufSize > 0) { + assert(bufSize < T::blockSize_); + assert(self.roundBufSize_ == 0); + memcpy(self.roundBuf_, buf, bufSize); + self.roundBufSize_ = bufSize; + } + assert(self.roundBufSize_ < T::blockSize_); + } +}; + +} // cybozu::sha2_local + +class Sha256 : public sha2_local::Common { + friend struct sha2_local::Common; +private: + static const size_t blockSize_ = 64; + static const size_t hSize_ = 8; + static const size_t msgLenByte_ = 8; + uint64_t totalSize_; + size_t roundBufSize_; + char roundBuf_[blockSize_]; + uint32_t h_[hSize_]; + static const size_t outByteSize_ = hSize_ * sizeof(uint32_t); + const uint32_t *k_; + + /** + @param buf [in] buffer(64byte) + */ + void round(const char *buf) + { + using namespace sha2_local; + uint32_t w[64]; + for (int i = 0; i < 16; i++) { + w[i] = cybozu::Get32bitAsBE(&buf[i * 4]); + } + for (int i = 16 ; i < 64; i++) { + uint32_t t = w[i - 15]; + uint32_t s0 = rot32(t, 7) ^ rot32(t, 18) ^ (t >> 3); + t = w[i - 2]; + uint32_t s1 = rot32(t, 17) ^ rot32(t, 19) ^ (t >> 10); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + uint32_t a = h_[0]; + uint32_t b = h_[1]; + uint32_t c = h_[2]; + uint32_t d = h_[3]; + uint32_t e = h_[4]; + uint32_t f = h_[5]; + uint32_t g = h_[6]; + uint32_t h = h_[7]; + for (int i = 0; i < 64; i++) { + uint32_t s1 = rot32(e, 6) ^ rot32(e, 11) ^ rot32(e, 25); + uint32_t ch = g ^ (e & (f ^ g)); + uint32_t t1 = h + s1 + ch + k_[i] + w[i]; + uint32_t s0 = rot32(a, 2) ^ rot32(a, 13) ^ rot32(a, 22); + uint32_t maj = ((a | b) & c) | (a & b); + uint32_t t2 = s0 + maj; + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + h_[0] += a; + h_[1] += b; + h_[2] += c; + h_[3] += d; + h_[4] += e; + h_[5] += f; + h_[6] += g; + h_[7] += h; + totalSize_ += blockSize_; + } +public: + Sha256() + { + clear(); + } + void clear() + { + static const uint32_t kTbl[] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + }; + k_ = kTbl; + totalSize_ = 0; + roundBufSize_ = 0; + h_[0] = 0x6a09e667; + h_[1] = 0xbb67ae85; + h_[2] = 0x3c6ef372; + h_[3] = 0xa54ff53a; + h_[4] = 0x510e527f; + h_[5] = 0x9b05688c; + h_[6] = 0x1f83d9ab; + h_[7] = 0x5be0cd19; + } + void update(const void *buf, size_t bufSize) + { + inner_update(reinterpret_cast(buf), bufSize); + } + size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) + { + if (mdSize < outByteSize_) return 0; + update(buf, bufSize); + term(roundBuf_, roundBufSize_); + char *p = reinterpret_cast(md); + for (size_t i = 0; i < hSize_; i++) { + cybozu::Set32bitAsBE(&p[i * sizeof(h_[0])], h_[i]); + } + return outByteSize_; + } +#ifndef CYBOZU_DONT_USE_STRING + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + std::string digest(const std::string& buf) + { + return digest(buf.c_str(), buf.size()); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string md(outByteSize_, 0); + digest(&md[0], md.size(), buf, bufSize); + return md; + } +#endif +}; + +class Sha512 : public sha2_local::Common { + friend struct sha2_local::Common; +private: + static const size_t blockSize_ = 128; + static const size_t hSize_ = 8; + static const size_t msgLenByte_ = 16; + uint64_t totalSize_; + size_t roundBufSize_; + char roundBuf_[blockSize_]; + uint64_t h_[hSize_]; + static const size_t outByteSize_ = hSize_ * sizeof(uint64_t); + const uint64_t *k_; + + template + void round1(uint64_t *S, const uint64_t *w, size_t i) + { + using namespace sha2_local; + uint64_t& a = S[i0]; + uint64_t& b = S[i1]; + uint64_t& c = S[i2]; + uint64_t& d = S[i3]; + uint64_t& e = S[i4]; + uint64_t& f = S[i5]; + uint64_t& g = S[i6]; + uint64_t& h = S[i7]; + + uint64_t s1 = rot64(e, 14) ^ rot64(e, 18) ^ rot64(e, 41); + uint64_t ch = g ^ (e & (f ^ g)); + uint64_t t0 = h + s1 + ch + k_[i] + w[i]; + uint64_t s0 = rot64(a, 28) ^ rot64(a, 34) ^ rot64(a, 39); + uint64_t maj = ((a | b) & c) | (a & b); + uint64_t t1 = s0 + maj; + d += t0; + h = t0 + t1; + } + /** + @param buf [in] buffer(64byte) + */ + void round(const char *buf) + { + using namespace sha2_local; + uint64_t w[80]; + for (int i = 0; i < 16; i++) { + w[i] = cybozu::Get64bitAsBE(&buf[i * 8]); + } + for (int i = 16 ; i < 80; i++) { + uint64_t t = w[i - 15]; + uint64_t s0 = rot64(t, 1) ^ rot64(t, 8) ^ (t >> 7); + t = w[i - 2]; + uint64_t s1 = rot64(t, 19) ^ rot64(t, 61) ^ (t >> 6); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + uint64_t s[8]; + for (int i = 0; i < 8; i++) { + s[i] = h_[i]; + } + for (int i = 0; i < 80; i += 8) { + round1<0, 1, 2, 3, 4, 5, 6, 7>(s, w, i + 0); + round1<7, 0, 1, 2, 3, 4, 5, 6>(s, w, i + 1); + round1<6, 7, 0, 1, 2, 3, 4, 5>(s, w, i + 2); + round1<5, 6, 7, 0, 1, 2, 3, 4>(s, w, i + 3); + round1<4, 5, 6, 7, 0, 1, 2, 3>(s, w, i + 4); + round1<3, 4, 5, 6, 7, 0, 1, 2>(s, w, i + 5); + round1<2, 3, 4, 5, 6, 7, 0, 1>(s, w, i + 6); + round1<1, 2, 3, 4, 5, 6, 7, 0>(s, w, i + 7); + } + for (int i = 0; i < 8; i++) { + h_[i] += s[i]; + } + totalSize_ += blockSize_; + } +public: + Sha512() + { + clear(); + } + void clear() + { + static const uint64_t kTbl[] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, + 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, + 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, + 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, + 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, + 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, + 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, + 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, + 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL + }; + k_ = kTbl; + totalSize_ = 0; + roundBufSize_ = 0; + h_[0] = 0x6a09e667f3bcc908ull; + h_[1] = 0xbb67ae8584caa73bull; + h_[2] = 0x3c6ef372fe94f82bull; + h_[3] = 0xa54ff53a5f1d36f1ull; + h_[4] = 0x510e527fade682d1ull; + h_[5] = 0x9b05688c2b3e6c1full; + h_[6] = 0x1f83d9abfb41bd6bull; + h_[7] = 0x5be0cd19137e2179ull; + } + void update(const void *buf, size_t bufSize) + { + inner_update(reinterpret_cast(buf), bufSize); + } + size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) + { + if (mdSize < outByteSize_) return 0; + update(buf, bufSize); + term(roundBuf_, roundBufSize_); + char *p = reinterpret_cast(md); + for (size_t i = 0; i < hSize_; i++) { + cybozu::Set64bitAsBE(&p[i * sizeof(h_[0])], h_[i]); + } + return outByteSize_; + } +#ifndef CYBOZU_DONT_USE_STRING + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + std::string digest(const std::string& buf) + { + return digest(buf.c_str(), buf.size()); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string md(outByteSize_, 0); + digest(&md[0], md.size(), buf, bufSize); + return md; + } +#endif +}; + +} // cybozu + +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/stream.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/stream.hpp new file mode 100644 index 000000000..bc110bdb0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/stream.hpp @@ -0,0 +1,267 @@ +#pragma once +/** + @file + @brief stream and line stream class + + @author MITSUNARI Shigeo(@herumi) +*/ +#ifndef CYBOZU_DONT_USE_STRING +#include +#include +#endif +#include +#include + +namespace cybozu { + +namespace stream_local { + +template +struct is_convertible { + typedef char yes; + typedef int no; + + static no test(...); + static yes test(const To*); + static const bool value = sizeof(test(static_cast(0))) == sizeof(yes); +}; + +template +struct enable_if { typedef T type; }; + +template +struct enable_if {}; + +#ifndef CYBOZU_DONT_USE_STRING +/* specialization for istream */ +template +size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if::value>::type* = 0) +{ + if (size > 0x7fffffff) size = 0x7fffffff; + is.read(static_cast(buf), size); + const int64_t readSize = is.gcount(); + if (readSize < 0) return 0; + if (size == 1 && readSize == 0) is.clear(); + return static_cast(readSize); +} + +/* generic version for size_t readSome(void *, size_t) */ +template +size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if::value>::type* = 0) +{ + return is.readSome(buf, size); +} +#else +template +size_t readSome_inner(void *buf, size_t size, InputStream& is) +{ + return is.readSome(buf, size); +} +#endif + +#ifndef CYBOZU_DONT_USE_EXCEPTION +/* specialization for ostream */ +template +void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) +{ + if (!os.write(static_cast(buf), size)) throw cybozu::Exception("stream:writeSub") << size; +} +#endif + +#ifndef CYBOZU_DONT_USE_STRING +/* generic version for void write(const void*, size_t), which writes all data */ +template +void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) +{ + os.write(buf, size); +} + +template +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) +{ + *pb = !!os.write(static_cast(buf), size); +} + +/* generic version for void write(const void*, size_t), which writes all data */ +template +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) +{ + os.write(pb, buf, size); +} +#else +template +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size) +{ + os.write(pb, buf, size); +} +#endif + +} // stream_local + +/* + make a specializaiton of class to use new InputStream, OutputStream +*/ +template +struct InputStreamTag { + static size_t readSome(void *buf, size_t size, InputStream& is) + { + return stream_local::readSome_inner(buf, size, is); + } + static bool readChar(char *c, InputStream& is) + { + return readSome(c, 1, is) == 1; + } +}; + +template +struct OutputStreamTag { + static void write(OutputStream& os, const void *buf, size_t size) + { + stream_local::writeSub(os, buf, size); + } +}; + +class MemoryInputStream { + const char *p_; + size_t size_; + size_t pos; +public: + MemoryInputStream(const void *p, size_t size) : p_(static_cast(p)), size_(size), pos(0) {} + size_t readSome(void *buf, size_t size) + { + if (size > size_ - pos) size = size_ - pos; + memcpy(buf, p_ + pos, size); + pos += size; + return size; + } + size_t getPos() const { return pos; } +}; + +class MemoryOutputStream { + char *p_; + size_t size_; + size_t pos; +public: + MemoryOutputStream(void *p, size_t size) : p_(static_cast(p)), size_(size), pos(0) {} + void write(bool *pb, const void *buf, size_t size) + { + if (size > size_ - pos) { + *pb = false; + return; + } + memcpy(p_ + pos, buf, size); + pos += size; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void write(const void *buf, size_t size) + { + bool b; + write(&b, buf, size); + if (!b) throw cybozu::Exception("MemoryOutputStream:write") << size << size_ << pos; + } +#endif + size_t getPos() const { return pos; } +}; + +#ifndef CYBOZU_DONT_USE_STRING +class StringInputStream { + const std::string& str_; + size_t pos; + StringInputStream(const StringInputStream&); + void operator=(const StringInputStream&); +public: + explicit StringInputStream(const std::string& str) : str_(str), pos(0) {} + size_t readSome(void *buf, size_t size) + { + const size_t remainSize = str_.size() - pos; + if (size > remainSize) size = remainSize; + memcpy(buf, &str_[pos], size); + pos += size; + return size; + } + size_t getPos() const { return pos; } +}; + +class StringOutputStream { + std::string& str_; + StringOutputStream(const StringOutputStream&); + void operator=(const StringOutputStream&); +public: + explicit StringOutputStream(std::string& str) : str_(str) {} + void write(bool *pb, const void *buf, size_t size) + { + str_.append(static_cast(buf), size); + *pb = true; + } + void write(const void *buf, size_t size) + { + str_.append(static_cast(buf), size); + } + size_t getPos() const { return str_.size(); } +}; +#endif + +template +size_t readSome(void *buf, size_t size, InputStream& is) +{ + return stream_local::readSome_inner(buf, size, is); +} + +template +void write(OutputStream& os, const void *buf, size_t size) +{ + stream_local::writeSub(os, buf, size); +} + +template +void write(bool *pb, OutputStream& os, const void *buf, size_t size) +{ + stream_local::writeSub(pb, os, buf, size); +} + +template +void read(bool *pb, void *buf, size_t size, InputStream& is) +{ + char *p = static_cast(buf); + while (size > 0) { + size_t readSize = cybozu::readSome(p, size, is); + if (readSize == 0) { + *pb = false; + return; + } + p += readSize; + size -= readSize; + } + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +template +void read(void *buf, size_t size, InputStream& is) +{ + bool b; + read(&b, buf, size, is); + if (!b) throw cybozu::Exception("stream:read"); +} +#endif + +template +bool readChar(char *c, InputStream& is) +{ + return readSome(c, 1, is) == 1; +} + +template +void writeChar(OutputStream& os, char c) +{ + cybozu::write(os, &c, 1); +} + +template +void writeChar(bool *pb, OutputStream& os, char c) +{ + cybozu::write(pb, os, &c, 1); +} + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/test.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/test.hpp new file mode 100644 index 000000000..7dfffab96 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/test.hpp @@ -0,0 +1,373 @@ +#pragma once +/** + @file + @brief unit test class + + @author MITSUNARI Shigeo(@herumi) +*/ + +#include +#include +#include +#include +#include +#include +#if defined(_MSC_VER) && (MSC_VER <= 1500) + #include +#else + #include +#endif + +namespace cybozu { namespace test { + +class AutoRun { + typedef void (*Func)(); + typedef std::list > UnitTestList; +public: + AutoRun() + : init_(0) + , term_(0) + , okCount_(0) + , ngCount_(0) + , exceptionCount_(0) + { + } + void setup(Func init, Func term) + { + init_ = init; + term_ = term; + } + void append(const char *name, Func func) + { + list_.push_back(std::make_pair(name, func)); + } + void set(bool isOK) + { + if (isOK) { + okCount_++; + } else { + ngCount_++; + } + } + std::string getBaseName(const std::string& name) const + { +#ifdef _WIN32 + const char sep = '\\'; +#else + const char sep = '/'; +#endif + size_t pos = name.find_last_of(sep); + std::string ret = name.substr(pos + 1); + pos = ret.find('.'); + return ret.substr(0, pos); + } + int run(int, char *argv[]) + { + std::string msg; + try { + if (init_) init_(); + for (UnitTestList::const_iterator i = list_.begin(), ie = list_.end(); i != ie; ++i) { + std::cout << "ctest:module=" << i->first << std::endl; + try { + (i->second)(); + } catch (std::exception& e) { + exceptionCount_++; + std::cout << "ctest: " << i->first << " is stopped by exception " << e.what() << std::endl; + } catch (...) { + exceptionCount_++; + std::cout << "ctest: " << i->first << " is stopped by unknown exception" << std::endl; + } + } + if (term_) term_(); + } catch (std::exception& e) { + msg = std::string("ctest:err:") + e.what(); + } catch (...) { + msg = "ctest:err: catch unknown exception"; + } + fflush(stdout); + if (msg.empty()) { + int err = ngCount_ + exceptionCount_; + int total = okCount_ + err; + std::cout << "ctest:name=" << getBaseName(*argv) + << ", module=" << list_.size() + << ", total=" << total + << ", ok=" << okCount_ + << ", ng=" << ngCount_ + << ", exception=" << exceptionCount_ << std::endl; + return err > 0 ? 1 : 0; + } else { + std::cout << msg << std::endl; + return 1; + } + } + static inline AutoRun& getInstance() + { + static AutoRun instance; + return instance; + } +private: + Func init_; + Func term_; + int okCount_; + int ngCount_; + int exceptionCount_; + UnitTestList list_; +}; + +static AutoRun& autoRun = AutoRun::getInstance(); + +inline void test(bool ret, const std::string& msg, const std::string& param, const char *file, int line) +{ + autoRun.set(ret); + if (!ret) { + printf("%s(%d):ctest:%s(%s);\n", file, line, msg.c_str(), param.c_str()); + } +} + +template +bool isEqual(const T& lhs, const U& rhs) +{ + return lhs == rhs; +} + +// avoid warning of comparision of integers of different signs +inline bool isEqual(size_t lhs, int rhs) +{ + return lhs == size_t(rhs); +} +inline bool isEqual(int lhs, size_t rhs) +{ + return size_t(lhs) == rhs; +} +inline bool isEqual(const char *lhs, const char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(char *lhs, const char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(const char *lhs, char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(char *lhs, char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +// avoid to compare float directly +inline bool isEqual(float lhs, float rhs) +{ + union fi { + float f; + uint32_t i; + } lfi, rfi; + lfi.f = lhs; + rfi.f = rhs; + return lfi.i == rfi.i; +} +// avoid to compare double directly +inline bool isEqual(double lhs, double rhs) +{ + union di { + double d; + uint64_t i; + } ldi, rdi; + ldi.d = lhs; + rdi.d = rhs; + return ldi.i == rdi.i; +} + +} } // cybozu::test + +#ifndef CYBOZU_TEST_DISABLE_AUTO_RUN +int main(int argc, char *argv[]) +{ + return cybozu::test::autoRun.run(argc, argv); +} +#endif + +/** + alert if !x + @param x [in] +*/ +#define CYBOZU_TEST_ASSERT(x) cybozu::test::test(!!(x), "CYBOZU_TEST_ASSERT", #x, __FILE__, __LINE__) + +/** + alert if x != y + @param x [in] + @param y [in] +*/ +#define CYBOZU_TEST_EQUAL(x, y) { \ + bool _cybozu_eq = cybozu::test::isEqual(x, y); \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: lhs=" << (x) << std::endl; \ + std::cout << "ctest: rhs=" << (y) << std::endl; \ + } \ +} +/** + alert if fabs(x, y) >= eps + @param x [in] + @param y [in] +*/ +#define CYBOZU_TEST_NEAR(x, y, eps) { \ + bool _cybozu_isNear = fabs((x) - (y)) < eps; \ + cybozu::test::test(_cybozu_isNear, "CYBOZU_TEST_NEAR", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_isNear) { \ + std::cout << "ctest: lhs=" << (x) << std::endl; \ + std::cout << "ctest: rhs=" << (y) << std::endl; \ + } \ +} + +#define CYBOZU_TEST_EQUAL_POINTER(x, y) { \ + bool _cybozu_eq = x == y; \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_POINTER", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: lhs=" << static_cast(x) << std::endl; \ + std::cout << "ctest: rhs=" << static_cast(y) << std::endl; \ + } \ +} +/** + alert if x[] != y[] + @param x [in] + @param y [in] + @param n [in] +*/ +#define CYBOZU_TEST_EQUAL_ARRAY(x, y, n) { \ + for (size_t _cybozu_test_i = 0, _cybozu_ie = (size_t)(n); _cybozu_test_i < _cybozu_ie; _cybozu_test_i++) { \ + bool _cybozu_eq = cybozu::test::isEqual((x)[_cybozu_test_i], (y)[_cybozu_test_i]); \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_ARRAY", #x ", " #y ", " #n, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: i=" << _cybozu_test_i << std::endl; \ + std::cout << "ctest: lhs=" << (x)[_cybozu_test_i] << std::endl; \ + std::cout << "ctest: rhs=" << (y)[_cybozu_test_i] << std::endl; \ + } \ + } \ +} + +/** + always alert + @param msg [in] +*/ +#define CYBOZU_TEST_FAIL(msg) cybozu::test::test(false, "CYBOZU_TEST_FAIL", msg, __FILE__, __LINE__) + +/** + verify message in exception +*/ +#define CYBOZU_TEST_EXCEPTION_MESSAGE(statement, Exception, msg) \ +{ \ + int _cybozu_ret = 0; \ + std::string _cybozu_errMsg; \ + try { \ + statement; \ + _cybozu_ret = 1; \ + } catch (const Exception& _cybozu_e) { \ + _cybozu_errMsg = _cybozu_e.what(); \ + if (_cybozu_errMsg.find(msg) == std::string::npos) { \ + _cybozu_ret = 2; \ + } \ + } catch (...) { \ + _cybozu_ret = 3; \ + } \ + if (_cybozu_ret) { \ + cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION_MESSAGE", #statement ", " #Exception ", " #msg, __FILE__, __LINE__); \ + if (_cybozu_ret == 1) { \ + std::cout << "ctest: no exception" << std::endl; \ + } else if (_cybozu_ret == 2) { \ + std::cout << "ctest: bad exception msg:" << _cybozu_errMsg << std::endl; \ + } else { \ + std::cout << "ctest: unexpected exception" << std::endl; \ + } \ + } else { \ + cybozu::test::autoRun.set(true); \ + } \ +} + +#define CYBOZU_TEST_EXCEPTION(statement, Exception) \ +{ \ + int _cybozu_ret = 0; \ + try { \ + statement; \ + _cybozu_ret = 1; \ + } catch (const Exception&) { \ + } catch (...) { \ + _cybozu_ret = 2; \ + } \ + if (_cybozu_ret) { \ + cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION", #statement ", " #Exception, __FILE__, __LINE__); \ + if (_cybozu_ret == 1) { \ + std::cout << "ctest: no exception" << std::endl; \ + } else { \ + std::cout << "ctest: unexpected exception" << std::endl; \ + } \ + } else { \ + cybozu::test::autoRun.set(true); \ + } \ +} + +/** + verify statement does not throw +*/ +#define CYBOZU_TEST_NO_EXCEPTION(statement) \ +try { \ + statement; \ + cybozu::test::autoRun.set(true); \ +} catch (...) { \ + cybozu::test::test(false, "CYBOZU_TEST_NO_EXCEPTION", #statement, __FILE__, __LINE__); \ +} + +/** + append auto unit test + @param name [in] module name +*/ +#define CYBOZU_TEST_AUTO(name) \ +void cybozu_test_ ## name(); \ +struct cybozu_test_local_ ## name { \ + cybozu_test_local_ ## name() \ + { \ + cybozu::test::autoRun.append(#name, cybozu_test_ ## name); \ + } \ +} cybozu_test_local_instance_ ## name; \ +void cybozu_test_ ## name() + +/** + append auto unit test with fixture + @param name [in] module name +*/ +#define CYBOZU_TEST_AUTO_WITH_FIXTURE(name, Fixture) \ +void cybozu_test_ ## name(); \ +void cybozu_test_real_ ## name() \ +{ \ + Fixture f; \ + cybozu_test_ ## name(); \ +} \ +struct cybozu_test_local_ ## name { \ + cybozu_test_local_ ## name() \ + { \ + cybozu::test::autoRun.append(#name, cybozu_test_real_ ## name); \ + } \ +} cybozu_test_local_instance_ ## name; \ +void cybozu_test_ ## name() + +/** + setup fixture + @param Fixture [in] class name of fixture + @note cstr of Fixture is called before test and dstr of Fixture is called after test +*/ +#define CYBOZU_TEST_SETUP_FIXTURE(Fixture) \ +Fixture *cybozu_test_local_fixture; \ +void cybozu_test_local_init() \ +{ \ + cybozu_test_local_fixture = new Fixture(); \ +} \ +void cybozu_test_local_term() \ +{ \ + delete cybozu_test_local_fixture; \ +} \ +struct cybozu_test_local_fixture_setup_ { \ + cybozu_test_local_fixture_setup_() \ + { \ + cybozu::test::autoRun.setup(cybozu_test_local_init, cybozu_test_local_term); \ + } \ +} cybozu_test_local_fixture_setup_instance_; diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/unordered_map.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/unordered_map.hpp new file mode 100644 index 000000000..89f8f8774 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/unordered_map.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include + +#ifdef CYBOZU_USE_BOOST + #include +#elif (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) || (defined __APPLE__) + #include +#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) + #include + #include +#endif + diff --git a/vendor/github.com/byzantine-lab/mcl/include/cybozu/xorshift.hpp b/vendor/github.com/byzantine-lab/mcl/include/cybozu/xorshift.hpp new file mode 100644 index 000000000..08c6a04f9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/cybozu/xorshift.hpp @@ -0,0 +1,189 @@ +#pragma once +/** + @file + @brief XorShift + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ +#include +#include + +namespace cybozu { + +namespace xorshift_local { + +/* + U is uint32_t or uint64_t +*/ +template +void read_local(void *p, size_t n, Gen& gen, U (Gen::*f)()) +{ + uint8_t *dst = static_cast(p); + const size_t uSize = sizeof(U); + assert(uSize == 4 || uSize == 8); + union ua { + U u; + uint8_t a[uSize]; + }; + + while (n >= uSize) { + ua ua; + ua.u = (gen.*f)(); + for (size_t i = 0; i < uSize; i++) { + dst[i] = ua.a[i]; + } + dst += uSize; + n -= uSize; + } + assert(n < uSize); + if (n > 0) { + ua ua; + ua.u = (gen.*f)(); + for (size_t i = 0; i < n; i++) { + dst[i] = ua.a[i]; + } + } +} + +} // xorshift_local + +class XorShift { + uint32_t x_, y_, z_, w_; +public: + explicit XorShift(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) + { + init(x, y, z, w); + } + void init(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) + { + x_ = x ? x : 123456789; + y_ = y ? y : 362436069; + z_ = z ? z : 521288629; + w_ = w ? w : 88675123; + } + uint32_t get32() + { + unsigned int t = x_ ^ (x_ << 11); + x_ = y_; y_ = z_; z_ = w_; + return w_ = (w_ ^ (w_ >> 19)) ^ (t ^ (t >> 8)); + } + uint32_t operator()() { return get32(); } + uint64_t get64() + { + uint32_t a = get32(); + uint32_t b = get32(); + return (uint64_t(a) << 32) | b; + } + template + void read(bool *pb, T *p, size_t n) + { + xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift::get32); + *pb = true; + } + template + size_t read(T *p, size_t n) + { + bool b; + read(&b, p, n); + (void)b; + return n; + } +}; + +// see http://xorshift.di.unimi.it/xorshift128plus.c +class XorShift128Plus { + uint64_t s_[2]; + static const uint64_t seed0 = 123456789; + static const uint64_t seed1 = 987654321; +public: + explicit XorShift128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + init(s0, s1); + } + void init(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + s_[0] = s0; + s_[1] = s1; + } + uint32_t get32() + { + return static_cast(get64()); + } + uint64_t operator()() { return get64(); } + uint64_t get64() + { + uint64_t s1 = s_[0]; + const uint64_t s0 = s_[1]; + s_[0] = s0; + s1 ^= s1 << 23; + s_[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); + return s_[1] + s0; + } + template + void read(bool *pb, T *p, size_t n) + { + xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift128Plus::get64); + *pb = true; + } + template + size_t read(T *p, size_t n) + { + bool b; + read(&b, p, n); + (void)b; + return n; + } +}; + +// see http://xoroshiro.di.unimi.it/xoroshiro128plus.c +class Xoroshiro128Plus { + uint64_t s_[2]; + static const uint64_t seed0 = 123456789; + static const uint64_t seed1 = 987654321; + uint64_t rotl(uint64_t x, unsigned int k) const + { + return (x << k) | (x >> (64 - k)); + } +public: + explicit Xoroshiro128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + init(s0, s1); + } + void init(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + s_[0] = s0; + s_[1] = s1; + } + uint32_t get32() + { + return static_cast(get64()); + } + uint64_t operator()() { return get64(); } + uint64_t get64() + { + uint64_t s0 = s_[0]; + uint64_t s1 = s_[1]; + uint64_t result = s0 + s1; + s1 ^= s0; + s_[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); + s_[1] = rotl(s1, 36); + return result; + } + template + void read(bool *pb, T *p, size_t n) + { + xorshift_local::read_local(p, n * sizeof(T), *this, &Xoroshiro128Plus::get64); + *pb = true; + } + template + size_t read(T *p, size_t n) + { + bool b; + read(&b, p, n); + (void)b; + return n; + } +}; + +} // cybozu diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/aggregate_sig.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/aggregate_sig.hpp new file mode 100644 index 000000000..f31405705 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/aggregate_sig.hpp @@ -0,0 +1,265 @@ +#pragma once +/** + @file + @brief aggregate signature + @author MITSUNARI Shigeo(@herumi) + see http://crypto.stanford.edu/~dabo/papers/aggreg.pdf + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include +#ifndef MCLBN_FP_UNIT_SIZE + #define MCLBN_FP_UNIT_SIZE 4 +#endif +#if MCLBN_FP_UNIT_SIZE == 4 +#include +namespace mcl { +using namespace mcl::bn256; +} +#elif MCLBN_FP_UNIT_SIZE == 6 +#include +namespace mcl { +using namespace mcl::bn384; +} +#elif MCLBN_FP_UNIT_SIZE == 8 +#include +namespace mcl { +using namespace mcl::bn512; +} +#else + #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" +#endif + +namespace mcl { namespace aggs { + +/* + AGGregate Signature Template class +*/ +template +struct AGGST { + typedef typename G1::BaseFp Fp; + + class SecretKey; + class PublicKey; + class Signature; + + static G1 P_; + static G2 Q_; + static std::vector Qcoeff_; +public: + static void init(const mcl::CurveParam& cp = mcl::BN254) + { + initPairing(cp); + hashAndMapToG1(P_, "0"); + hashAndMapToG2(Q_, "0"); + precomputeG2(Qcoeff_, Q_); + } + class Signature : public fp::Serializable { + G1 S_; + friend class SecretKey; + friend class PublicKey; + public: + template + void load(InputStream& is, int ioMode = IoSerialize) + { + S_.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + S_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, Signature& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Signature& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const Signature& rhs) const + { + return S_ == rhs.S_; + } + bool operator!=(const Signature& rhs) const { return !operator==(rhs); } + /* + aggregate sig[0..n) and set *this + */ + void aggregate(const Signature *sig, size_t n) + { + G1 S; + S.clear(); + for (size_t i = 0; i < n; i++) { + S += sig[i].S_; + } + S_ = S; + } + void aggregate(const std::vector& sig) + { + aggregate(sig.data(), sig.size()); + } + /* + aggregate verification + */ + bool verify(const void *const *msgVec, const size_t *sizeVec, const PublicKey *pubVec, size_t n) const + { + if (n == 0) return false; + typedef std::set FpSet; + FpSet msgSet; + typedef std::vector G1Vec; + G1Vec hv(n); + for (size_t i = 0; i < n; i++) { + Fp h; + h.setHashOf(msgVec[i], sizeVec[i]); + std::pair ret = msgSet.insert(h); + if (!ret.second) throw cybozu::Exception("aggs::verify:same msg"); + mapToG1(hv[i], h); + } + /* + e(aggSig, xQ) = prod_i e(hv[i], pub[i].Q) + <=> finalExp(e(-aggSig, xQ) * prod_i millerLoop(hv[i], pub[i].xQ)) == 1 + */ + GT e1, e2; + precomputedMillerLoop(e1, -S_, Qcoeff_); + millerLoop(e2, hv[0], pubVec[0].xQ_); + for (size_t i = 1; i < n; i++) { + GT e; + millerLoop(e, hv[i], pubVec[i].xQ_); + e2 *= e; + } + e1 *= e2; + finalExp(e1, e1); + return e1.isOne(); + } + bool verify(const std::vector& msgVec, const std::vector& pubVec) const + { + const size_t n = msgVec.size(); + if (n != pubVec.size()) throw cybozu::Exception("aggs:Signature:verify:bad size") << msgVec.size() << pubVec.size(); + if (n == 0) return false; + std::vector mv(n); + std::vector sv(n); + for (size_t i = 0; i < n; i++) { + mv[i] = msgVec[i].c_str(); + sv[i] = msgVec[i].size(); + } + return verify(&mv[0], &sv[0], &pubVec[0], n); + } + }; + class PublicKey : public fp::Serializable { + G2 xQ_; + friend class SecretKey; + friend class Signature; + public: + template + void load(InputStream& is, int ioMode = IoSerialize) + { + xQ_.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + xQ_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(G2::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(G2::getIoMode(), os)); + return os; + } + bool operator==(const PublicKey& rhs) const + { + return xQ_ == rhs.xQ_; + } + bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } + bool verify(const Signature& sig, const void *m, size_t mSize) const + { + /* + H = hash(m) + e(S, Q) = e(H, xQ) where S = xH + <=> e(S, Q)e(-H, xQ) = 1 + <=> finalExp(millerLoop(S, Q)e(-H, x)) = 1 + */ + G1 H; + hashAndMapToG1(H, m, mSize); + G1::neg(H, H); + GT e1, e2; + precomputedMillerLoop(e1, sig.S_, Qcoeff_); + millerLoop(e2, H, xQ_); + e1 *= e2; + finalExp(e1, e1); + return e1.isOne(); + } + bool verify(const Signature& sig, const std::string& m) const + { + return verify(sig, m.c_str(), m.size()); + } + }; + class SecretKey : public fp::Serializable { + Fr x_; + friend class PublicKey; + friend class Signature; + public: + template + void load(InputStream& is, int ioMode = IoSerialize) + { + x_.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + x_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, SecretKey& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + bool operator==(const SecretKey& rhs) const + { + return x_ == rhs.x_; + } + bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } + void init() + { + x_.setByCSPRNG(); + } + void getPublicKey(PublicKey& pub) const + { + G2::mul(pub.xQ_, Q_, x_); + } + void sign(Signature& sig, const void *m, size_t mSize) const + { + hashAndMapToG1(sig.S_, m, mSize); + G1::mul(sig.S_, sig.S_, x_); + } + void sign(Signature& sig, const std::string& m) const + { + sign(sig, m.c_str(), m.size()); + } + }; +}; + +template G1 AGGST::P_; +template G2 AGGST::Q_; +template std::vector AGGST::Qcoeff_; + +typedef AGGST<> AGGS; +typedef AGGS::SecretKey SecretKey; +typedef AGGS::PublicKey PublicKey; +typedef AGGS::Signature Signature; + +} } // mcl::aggs diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/ahe.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/ahe.hpp new file mode 100644 index 000000000..239319d0d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/ahe.hpp @@ -0,0 +1,76 @@ +#pragma once +/** + @file + @brief 192/256-bit additive homomorphic encryption by lifted-ElGamal + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include + +namespace mcl { + +#ifdef MCL_USE_AHE192 +namespace ahe192 { + +const mcl::EcParam& para = mcl::ecparam::NIST_P192; + +typedef mcl::FpT Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; +typedef mcl::ElgamalT ElgamalEc; +typedef ElgamalEc::PrivateKey SecretKey; +typedef ElgamalEc::PublicKey PublicKey; +typedef ElgamalEc::CipherText CipherText; + +static inline void initAhe() +{ + Fp::init(para.p); + Zn::init(para.n); + Ec::init(para.a, para.b); + Ec::setIoMode(16); + Zn::setIoMode(16); +} + +static inline void initSecretKey(SecretKey& sec) +{ + const Ec P(Fp(para.gx), Fp(para.gy)); + sec.init(P, Zn::getBitSize()); +} + +} //mcl::ahe192 +#endif + +#ifdef MCL_USE_AHE256 +namespace ahe256 { + +const mcl::EcParam& para = mcl::ecparam::NIST_P256; + +typedef mcl::FpT Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; +typedef mcl::ElgamalT ElgamalEc; +typedef ElgamalEc::PrivateKey SecretKey; +typedef ElgamalEc::PublicKey PublicKey; +typedef ElgamalEc::CipherText CipherText; + +static inline void initAhe() +{ + Fp::init(para.p); + Zn::init(para.n); + Ec::init(para.a, para.b); + Ec::setIoMode(16); + Zn::setIoMode(16); +} + +static inline void initSecretKey(SecretKey& sec) +{ + const Ec P(Fp(para.gx), Fp(para.gy)); + sec.init(P, Zn::getBitSize()); +} + +} //mcl::ahe256 +#endif + +} // mcl diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/array.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/array.hpp new file mode 100644 index 000000000..a6d2a8fa3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/array.hpp @@ -0,0 +1,167 @@ +#pragma once +/** + @file + @brief tiny vector class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#ifndef CYBOZU_DONT_USE_EXCEPTION +#include +#endif + +namespace mcl { + +template +class Array { + T *p_; + size_t n_; + template + void swap_(U& x, U& y) const + { + U t; + t = x; + x = y; + y = t; + } +public: + Array() : p_(0), n_(0) {} + ~Array() + { + free(p_); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + Array(const Array& rhs) + : p_(0) + , n_(0) + { + if (rhs.n_ == 0) return; + p_ = (T*)malloc(sizeof(T) * rhs.n_); + if (p_ == 0) throw std::bad_alloc(); + n_ = rhs.n_; + for (size_t i = 0; i < n_; i++) { + p_[i] = rhs.p_[i]; + } + } + Array& operator=(const Array& rhs) + { + Array tmp(rhs); + tmp.swap(*this); + return *this; + } +#endif + bool resize(size_t n) + { + if (n <= n_) { + n_ = n; + if (n == 0) { + free(p_); + p_ = 0; + } + return true; + } + T *q = (T*)malloc(sizeof(T) * n); + if (q == 0) return false; + for (size_t i = 0; i < n_; i++) { + q[i] = p_[i]; + } + free(p_); + p_ = q; + n_ = n; + return true; + } + bool copy(const Array& rhs) + { + if (this == &rhs) return true; + if (n_ < rhs.n_) { + clear(); + if (!resize(rhs.n_)) return false; + } + for (size_t i = 0; i < rhs.n_; i++) { + p_[i] = rhs.p_[i]; + } + n_ = rhs.n_; + return true; + } + void clear() + { + free(p_); + p_ = 0; + n_ = 0; + } + size_t size() const { return n_; } + void swap(Array& rhs) + { + swap_(p_, rhs.p_); + swap_(n_, rhs.n_); + } + T& operator[](size_t n) { return p_[n]; } + const T& operator[](size_t n) const { return p_[n]; } + T* data() { return p_; } + const T* data() const { return p_; } +}; + +template +class FixedArray { + T p_[maxSize]; + size_t n_; + FixedArray(const FixedArray&); + void operator=(const FixedArray&); + template + void swap_(U& x, U& y) const + { + U t; + t = x; + x = y; + y = t; + } +public: + FixedArray() : n_(0) {} + bool resize(size_t n) + { + if (n > maxSize) return false; + n_ = n; + return true; + } + bool copy(const FixedArray& rhs) + { + if (this == &rhs) return true; + for (size_t i = 0; i < rhs.n_; i++) { + p_[i] = rhs.p_[i]; + } + n_ = rhs.n_; + return true; + } + void clear() + { + n_ = 0; + } + size_t size() const { return n_; } + void swap(FixedArray& rhs) + { + T *minP = p_; + size_t minN = n_; + T *maxP = rhs.p_; + size_t maxN = rhs.n_; + if (minP > maxP) { + swap_(minP, maxP); + swap_(minN, maxN); + } + for (size_t i = 0; i < minN; i++) { + swap_(minP[i], maxP[i]); + } + for (size_t i = minN; i < maxN; i++) { + minP[i] = maxP[i]; + } + swap_(n_, rhs.n_); + } + T& operator[](size_t n) { return p_[n]; } + const T& operator[](size_t n) const { return p_[n]; } + T* data() { return p_; } + const T* data() const { return p_; } +}; + +} // mcl + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bls12_381.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/bls12_381.hpp new file mode 100644 index 000000000..316e142af --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bls12_381.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for BLS12-381 pairing + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 384 +#define MCL_MAX_FR_BIT_SIZE 256 +#include + +namespace mcl { namespace bls12 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.h b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.h new file mode 100644 index 000000000..0a31d5501 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.h @@ -0,0 +1,428 @@ +#pragma once +/** + @file + @brief C interface of 256/384-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +/* + the order of an elliptic curve over Fp is Fr +*/ +#ifndef MCLBN_FP_UNIT_SIZE + #error "define MCLBN_FP_UNIT_SIZE 4(, 6 or 8)" +#endif +#ifndef MCLBN_FR_UNIT_SIZE + #define MCLBN_FR_UNIT_SIZE MCLBN_FP_UNIT_SIZE +#endif +#define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE)) + +#include // for uint64_t, uint8_t +#include // for size_t + + +#if defined(_MSC_VER) + #ifdef MCLBN_DONT_EXPORT + #define MCLBN_DLL_API + #else + #ifdef MCLBN_DLL_EXPORT + #define MCLBN_DLL_API __declspec(dllexport) + #else + #define MCLBN_DLL_API __declspec(dllimport) + #endif + #endif + #ifndef MCLBN_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "mclbn256.lib") + #elif MCLBN_FP_UNIT_SIZE == 6 + #pragma comment(lib, "mclbn384.lib") + #else + #pragma comment(lib, "mclbn512.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) && !defined(MCLBN_DONT_EXPORT) + #define MCLBN_DLL_API __attribute__((used)) +#elif defined(__wasm__) && !defined(MCLBN_DONT_EXPORT) + #define MCLBN_DLL_API __attribute__((visibility("default"))) +#else + #define MCLBN_DLL_API +#endif + +#ifdef __EMSCRIPTEN__ + // avoid 64-bit integer + #define mclSize unsigned int + #define mclInt int +#else + // use #define for cgo + #define mclSize size_t + #define mclInt int64_t +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef MCLBN_NOT_DEFINE_STRUCT + +typedef struct mclBnFr mclBnFr; +typedef struct mclBnG1 mclBnG1; +typedef struct mclBnG2 mclBnG2; +typedef struct mclBnGT mclBnGT; +typedef struct mclBnFp mclBnFp; +typedef struct mclBnFp2 mclBnFp2; + +#else + +typedef struct { + uint64_t d[MCLBN_FR_UNIT_SIZE]; +} mclBnFr; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 3]; +} mclBnG1; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 2 * 3]; +} mclBnG2; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 12]; +} mclBnGT; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE]; +} mclBnFp; + +typedef struct { + mclBnFp d[2]; +} mclBnFp2; + +#endif + +#include + +#define MCLBN_IO_SERIALIZE_HEX_STR 2048 +// for backword compatibility +enum { + mclBn_CurveFp254BNb = 0, + mclBn_CurveFp382_1 = 1, + mclBn_CurveFp382_2 = 2, + mclBn_CurveFp462 = 3, + mclBn_CurveSNARK1 = 4, + mclBls12_CurveFp381 = 5 +}; + +// return 0xABC which means A.BC +MCLBN_DLL_API int mclBn_getVersion(); +/* + init library + @param curve [in] type of bn curve + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + curve = BN254/BN_SNARK1 is allowed if maxUnitSize = 4 + curve = BN381_1/BN381_2/BLS12_381 are allowed if maxUnitSize = 6 + This parameter is used to detect a library compiled with different MCLBN_FP_UNIT_SIZE for safety. + @note not threadsafe + @note BN_init is used in libeay32 +*/ +MCLBN_DLL_API int mclBn_init(int curve, int compiledTimeVar); + + +/* + pairing : G1 x G2 -> GT + #G1 = #G2 = r + G1 is a curve defined on Fp + + serialized size of elements + |Fr| |Fp| + BN254 32 32 + BN381 48 48 + BLS12_381 32 48 + BN462 58 58 + |G1| = |Fp| + |G2| = |G1| * 2 + |GT| = |G1| * 12 +*/ +/* + return the num of Unit(=uint64_t) to store Fr +*/ +MCLBN_DLL_API int mclBn_getOpUnitSize(void); + +/* + return bytes for serialized G1(=Fp) +*/ +MCLBN_DLL_API int mclBn_getG1ByteSize(void); +/* + return bytes for serialized Fr +*/ +MCLBN_DLL_API int mclBn_getFrByteSize(void); +/* + return bytes for serialized Fp +*/ +MCLBN_DLL_API int mclBn_getFpByteSize(void); + +/* + return decimal string of the order of the curve(=the characteristic of Fr) + return str(buf) if success +*/ +MCLBN_DLL_API mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize); + +/* + return decimal string of the characteristic of Fp + return str(buf) if success +*/ +MCLBN_DLL_API mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize); + +//////////////////////////////////////////////// +/* + deserialize + return read size if success else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize); + +/* + serialize + return written byte if sucess else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x); +MCLBN_DLL_API mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x); +MCLBN_DLL_API mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x); +MCLBN_DLL_API mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x); +MCLBN_DLL_API mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x); +MCLBN_DLL_API mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x); + +/* + set string + ioMode + 10 : decimal number + 16 : hexadecimal number + MCLBN_IO_SERIALIZE_HEX_STR : hex string of serialized data + return 0 if success else -1 +*/ +MCLBN_DLL_API int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode); + +/* + buf is terminated by '\0' + return strlen(buf) if sucess else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode); +MCLBN_DLL_API mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode); +MCLBN_DLL_API mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode); +MCLBN_DLL_API mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode); +MCLBN_DLL_API mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode); + +// set zero +MCLBN_DLL_API void mclBnFr_clear(mclBnFr *x); +MCLBN_DLL_API void mclBnFp_clear(mclBnFp *x); +MCLBN_DLL_API void mclBnFp2_clear(mclBnFp2 *x); + +// set x to y +MCLBN_DLL_API void mclBnFr_setInt(mclBnFr *y, mclInt x); +MCLBN_DLL_API void mclBnFr_setInt32(mclBnFr *y, int x); + +// x = buf & (1 << bitLen(r)) - 1 +// if (x >= r) x &= (1 << (bitLen(r) - 1)) - 1 +// always return 0 +MCLBN_DLL_API int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize); + +// set (buf mod r) to x +// return 0 if bufSize <= (byte size of Fr * 2) else -1 +MCLBN_DLL_API int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize); +// set (buf mod p) to x +// return 0 if bufSize <= (byte size of Fp * 2) else -1 +MCLBN_DLL_API int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnFr_isValid(const mclBnFr *x); +MCLBN_DLL_API int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API int mclBnFr_isZero(const mclBnFr *x); +MCLBN_DLL_API int mclBnFr_isOne(const mclBnFr *x); + +MCLBN_DLL_API int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y); +MCLBN_DLL_API int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y); + +#ifndef MCL_DONT_USE_CSRPNG +// return 0 if success +MCLBN_DLL_API int mclBnFr_setByCSPRNG(mclBnFr *x); + +/* + set user-defined random function for setByCSPRNG + @param self [in] user-defined pointer + @param readFunc [in] user-defined function, + which writes random bufSize bytes to buf and returns bufSize if success else returns 0 + @note if self == 0 and readFunc == 0 then set default random function + @note not threadsafe +*/ +MCLBN_DLL_API void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)); +#endif + +// hash(s) and set x +// return 0 if success +MCLBN_DLL_API int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize); + +// map x to y +// return 0 if success else -1 +MCLBN_DLL_API int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x); +MCLBN_DLL_API int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x); + +MCLBN_DLL_API void mclBnFr_neg(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_inv(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnG1_clear(mclBnG1 *x); + + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnG1_isValid(const mclBnG1 *x); +MCLBN_DLL_API int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API int mclBnG1_isZero(const mclBnG1 *x); +/* + return 1 if x has a correct order + x is valid point of G1 if and only if + mclBnG1_isValid() is true, which contains mclBnG1_isValidOrder() if mclBn_verifyOrderG1(true) + mclBnG1_isValid() && mclBnG1_isValidOrder() is true if mclBn_verifyOrderG1(false) +*/ +MCLBN_DLL_API int mclBnG1_isValidOrder(const mclBnG1 *x); + +MCLBN_DLL_API int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize); + + +MCLBN_DLL_API void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); + +/* + constant time mul +*/ +MCLBN_DLL_API void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnG2_clear(mclBnG2 *x); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnG2_isValid(const mclBnG2 *x); +MCLBN_DLL_API int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API int mclBnG2_isZero(const mclBnG2 *x); +// return 1 if x has a correct order +MCLBN_DLL_API int mclBnG2_isValidOrder(const mclBnG2 *x); + +MCLBN_DLL_API int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize); + +// return written size if sucess else 0 + +MCLBN_DLL_API void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); +/* + constant time mul +*/ +MCLBN_DLL_API void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnGT_clear(mclBnGT *x); +// set x to y +MCLBN_DLL_API void mclBnGT_setInt(mclBnGT *y, mclInt x); +MCLBN_DLL_API void mclBnGT_setInt32(mclBnGT *y, int x); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API int mclBnGT_isZero(const mclBnGT *x); +MCLBN_DLL_API int mclBnGT_isOne(const mclBnGT *x); + +MCLBN_DLL_API void mclBnGT_neg(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_inv(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); + +/* + pow for all elements of Fp12 +*/ +MCLBN_DLL_API void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); +/* + pow for only {x|x^r = 1} in Fp12 by GLV method + the value generated by pairing satisfies the condition +*/ +MCLBN_DLL_API void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); + +MCLBN_DLL_API void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBn_finalExp(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); + +// return precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t) +MCLBN_DLL_API int mclBn_getUint64NumToPrecompute(void); + +// allocate Qbuf[MCLBN_getUint64NumToPrecompute()] before calling this +MCLBN_DLL_API void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q); + +MCLBN_DLL_API void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf); +MCLBN_DLL_API void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf); +MCLBN_DLL_API void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf); + +/* + Lagrange interpolation + recover out = y(0) by { (xVec[i], yVec[i]) } + return 0 if success else -1 + @note *out = yVec[0] if k = 1 + @note k >= 2, xVec[i] != 0, xVec[i] != xVec[j] for i != j +*/ +MCLBN_DLL_API int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k); +MCLBN_DLL_API int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k); +MCLBN_DLL_API int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k); + +/* + evaluate polynomial + out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) + @note cSize >= 2 +*/ +MCLBN_DLL_API int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x); +MCLBN_DLL_API int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x); +MCLBN_DLL_API int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x); + +/* + verify whether a point of an elliptic curve has order r + This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 + @param doVerify [in] does not verify if zero(default 1) +*/ +MCLBN_DLL_API void mclBn_verifyOrderG1(int doVerify); +MCLBN_DLL_API void mclBn_verifyOrderG2(int doVerify); + +/* + EXPERIMENTAL + only for curve = MCL_SECP* or MCL_NIST* + return standard base point of the current elliptic curve +*/ +MCLBN_DLL_API int mclBnG1_getBasePoint(mclBnG1 *x); + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.hpp new file mode 100644 index 000000000..5ebe5d956 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn.hpp @@ -0,0 +1,2261 @@ +#pragma once +/** + @file + @brief optimal ate pairing over BN-curve / BLS12-curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include +#ifndef CYBOZU_DONT_USE_EXCEPTION +#include +#endif + +/* + set bit size of Fp and Fr +*/ +#ifndef MCL_MAX_FP_BIT_SIZE + #define MCL_MAX_FP_BIT_SIZE 256 +#endif + +#ifndef MCL_MAX_FR_BIT_SIZE + #define MCL_MAX_FR_BIT_SIZE MCL_MAX_FP_BIT_SIZE +#endif +namespace mcl { + +struct CurveParam { + /* + y^2 = x^3 + b + i^2 = -1 + xi = xi_a + i + v^3 = xi + w^2 = v + */ + const char *z; + int b; // y^2 = x^3 + b + int xi_a; // xi = xi_a + i + /* + BN254, BN381 : Dtype + BLS12-381 : Mtype + */ + bool isMtype; + int curveType; // same in curve_type.h + bool operator==(const CurveParam& rhs) const + { + return strcmp(z, rhs.z) == 0 && b == rhs.b && xi_a == rhs.xi_a && isMtype == rhs.isMtype; + } + bool operator!=(const CurveParam& rhs) const { return !operator==(rhs); } +}; + +const CurveParam BN254 = { "-0x4080000000000001", 2, 1, false, MCL_BN254 }; // -(2^62 + 2^55 + 1) +// provisional(experimental) param with maxBitSize = 384 +const CurveParam BN381_1 = { "-0x400011000000000000000001", 2, 1, false, MCL_BN381_1 }; // -(2^94 + 2^76 + 2^72 + 1) // A Family of Implementation-Friendly BN Elliptic Curves +const CurveParam BN381_2 = { "-0x400040090001000000000001", 2, 1, false, MCL_BN381_2 }; // -(2^94 + 2^78 + 2^67 + 2^64 + 2^48 + 1) // used in relic-toolkit +const CurveParam BN462 = { "0x4001fffffffffffffffffffffbfff", 5, 2, false, MCL_BN462 }; // 2^114 + 2^101 - 2^14 - 1 // https://eprint.iacr.org/2017/334 +const CurveParam BN_SNARK1 = { "4965661367192848881", 3, 9, false, MCL_BN_SNARK1 }; +const CurveParam BLS12_381 = { "-0xd201000000010000", 4, 1, true, MCL_BLS12_381 }; +const CurveParam BN160 = { "0x4000000031", 3, 4, false, MCL_BN160 }; + +inline const CurveParam& getCurveParam(int type) +{ + switch (type) { + case MCL_BN254: return mcl::BN254; + case MCL_BN381_1: return mcl::BN381_1; + case MCL_BN381_2: return mcl::BN381_2; + case MCL_BN462: return mcl::BN462; + case MCL_BN_SNARK1: return mcl::BN_SNARK1; + case MCL_BLS12_381: return mcl::BLS12_381; + case MCL_BN160: return mcl::BN160; + default: + assert(0); + return mcl::BN254; + } +} + +namespace bn { + +namespace local { +struct FpTag; +struct FrTag; +} + +typedef mcl::FpT Fp; +typedef mcl::FpT Fr; +typedef mcl::Fp2T Fp2; +typedef mcl::Fp6T Fp6; +typedef mcl::Fp12T Fp12; +typedef mcl::EcT G1; +typedef mcl::EcT G2; +typedef Fp12 GT; + +typedef mcl::FpDblT FpDbl; +typedef mcl::Fp2DblT Fp2Dbl; + +inline void Frobenius(Fp2& y, const Fp2& x) +{ + Fp2::Frobenius(y, x); +} +inline void Frobenius(Fp12& y, const Fp12& x) +{ + Fp12::Frobenius(y, x); +} +/* + twisted Frobenius for G2 +*/ +void Frobenius(G2& D, const G2& S); +void Frobenius2(G2& D, const G2& S); +void Frobenius3(G2& D, const G2& S); + +namespace local { + +typedef mcl::FixedArray SignVec; + +inline size_t getPrecomputeQcoeffSize(const SignVec& sv) +{ + size_t idx = 2 + 2; + for (size_t i = 2; i < sv.size(); i++) { + idx++; + if (sv[i]) idx++; + } + return idx; +} + +template +X evalPoly(const X& x, const C (&c)[N]) +{ + X ret = c[N - 1]; + for (size_t i = 1; i < N; i++) { + ret *= x; + ret += c[N - 1 - i]; + } + return ret; +} + +enum TwistBtype { + tb_generic, + tb_1m1i, // 1 - 1i + tb_1m2i // 1 - 2i +}; + +/* + l = (a, b, c) => (a, b * P.y, c * P.x) +*/ +inline void updateLine(Fp6& l, const G1& P) +{ + l.b.a *= P.y; + l.b.b *= P.y; + l.c.a *= P.x; + l.c.b *= P.x; +} + +struct Compress { + Fp12& z_; + Fp2& g1_; + Fp2& g2_; + Fp2& g3_; + Fp2& g4_; + Fp2& g5_; + // z is output area + Compress(Fp12& z, const Fp12& x) + : z_(z) + , g1_(z.getFp2()[4]) + , g2_(z.getFp2()[3]) + , g3_(z.getFp2()[2]) + , g4_(z.getFp2()[1]) + , g5_(z.getFp2()[5]) + { + g2_ = x.getFp2()[3]; + g3_ = x.getFp2()[2]; + g4_ = x.getFp2()[1]; + g5_ = x.getFp2()[5]; + } + Compress(Fp12& z, const Compress& c) + : z_(z) + , g1_(z.getFp2()[4]) + , g2_(z.getFp2()[3]) + , g3_(z.getFp2()[2]) + , g4_(z.getFp2()[1]) + , g5_(z.getFp2()[5]) + { + g2_ = c.g2_; + g3_ = c.g3_; + g4_ = c.g4_; + g5_ = c.g5_; + } + void decompressBeforeInv(Fp2& nume, Fp2& denomi) const + { + assert(&nume != &denomi); + + if (g2_.isZero()) { + Fp2::add(nume, g4_, g4_); + nume *= g5_; + denomi = g3_; + } else { + Fp2 t; + Fp2::sqr(nume, g5_); + Fp2::mul_xi(denomi, nume); + Fp2::sqr(nume, g4_); + Fp2::sub(t, nume, g3_); + t += t; + t += nume; + Fp2::add(nume, denomi, t); + Fp2::divBy4(nume, nume); + denomi = g2_; + } + } + + // output to z + void decompressAfterInv() + { + Fp2& g0 = z_.getFp2()[0]; + Fp2 t0, t1; + // Compute g0. + Fp2::sqr(t0, g1_); + Fp2::mul(t1, g3_, g4_); + t0 -= t1; + t0 += t0; + t0 -= t1; + Fp2::mul(t1, g2_, g5_); + t0 += t1; + Fp2::mul_xi(g0, t0); + g0.a += Fp::one(); + } + +public: + void decompress() // for test + { + Fp2 nume, denomi; + decompressBeforeInv(nume, denomi); + Fp2::inv(denomi, denomi); + g1_ = nume * denomi; // g1 is recoverd. + decompressAfterInv(); + } + /* + 2275clk * 186 = 423Kclk QQQ + */ + static void squareC(Compress& z) + { + Fp2 t0, t1, t2; + Fp2Dbl T0, T1, T2, T3; + Fp2Dbl::sqrPre(T0, z.g4_); + Fp2Dbl::sqrPre(T1, z.g5_); + Fp2Dbl::mul_xi(T2, T1); + T2 += T0; + Fp2Dbl::mod(t2, T2); + Fp2::add(t0, z.g4_, z.g5_); + Fp2Dbl::sqrPre(T2, t0); + T0 += T1; + T2 -= T0; + Fp2Dbl::mod(t0, T2); + Fp2::add(t1, z.g2_, z.g3_); + Fp2Dbl::sqrPre(T3, t1); + Fp2Dbl::sqrPre(T2, z.g2_); + Fp2::mul_xi(t1, t0); + z.g2_ += t1; + z.g2_ += z.g2_; + z.g2_ += t1; + Fp2::sub(t1, t2, z.g3_); + t1 += t1; + Fp2Dbl::sqrPre(T1, z.g3_); + Fp2::add(z.g3_, t1, t2); + Fp2Dbl::mul_xi(T0, T1); + T0 += T2; + Fp2Dbl::mod(t0, T0); + Fp2::sub(z.g4_, t0, z.g4_); + z.g4_ += z.g4_; + z.g4_ += t0; + Fp2Dbl::addPre(T2, T2, T1); + T3 -= T2; + Fp2Dbl::mod(t0, T3); + z.g5_ += t0; + z.g5_ += z.g5_; + z.g5_ += t0; + } + static void square_n(Compress& z, int n) + { + for (int i = 0; i < n; i++) { + squareC(z); + } + } + /* + Exponentiation over compression for: + z = x^Param::z.abs() + */ + static void fixed_power(Fp12& z, const Fp12& x) + { + if (x.isOne()) { + z = 1; + return; + } + Fp12 x_org = x; + Fp12 d62; + Fp2 c55nume, c55denomi, c62nume, c62denomi; + Compress c55(z, x); + square_n(c55, 55); + c55.decompressBeforeInv(c55nume, c55denomi); + Compress c62(d62, c55); + square_n(c62, 62 - 55); + c62.decompressBeforeInv(c62nume, c62denomi); + Fp2 acc; + Fp2::mul(acc, c55denomi, c62denomi); + Fp2::inv(acc, acc); + Fp2 t; + Fp2::mul(t, acc, c62denomi); + Fp2::mul(c55.g1_, c55nume, t); + c55.decompressAfterInv(); + Fp2::mul(t, acc, c55denomi); + Fp2::mul(c62.g1_, c62nume, t); + c62.decompressAfterInv(); + z *= x_org; + z *= d62; + } +}; + +struct MapTo { + enum { + BNtype, + BLS12type, + STD_ECtype + }; + Fp c1_; // sqrt(-3) + Fp c2_; // (-1 + sqrt(-3)) / 2 + mpz_class z_; + mpz_class cofactor_; + int type_; + bool useNaiveMapTo_; + + int legendre(bool *pb, const Fp& x) const + { + mpz_class xx; + x.getMpz(pb, xx); + if (!*pb) return 0; + return gmp::legendre(xx, Fp::getOp().mp); + } + int legendre(bool *pb, const Fp2& x) const + { + Fp y; + Fp2::norm(y, x); + return legendre(pb, y); + } + void mulFp(Fp& x, const Fp& y) const + { + x *= y; + } + void mulFp(Fp2& x, const Fp& y) const + { + x.a *= y; + x.b *= y; + } + /* + P.-A. Fouque and M. Tibouchi, + "Indifferentiable hashing to Barreto Naehrig curves," + in Proc. Int. Conf. Cryptol. Inform. Security Latin Amer., 2012, vol. 7533, pp.1-17. + + w = sqrt(-3) t / (1 + b + t^2) + Remark: throw exception if t = 0, c1, -c1 and b = 2 + */ + template + bool calcBN(G& P, const F& t) const + { + F x, y, w; + bool b; + bool negative = legendre(&b, t) < 0; + if (!b) return false; + if (t.isZero()) return false; + F::sqr(w, t); + w += G::b_; + *w.getFp0() += Fp::one(); + if (w.isZero()) return false; + F::inv(w, w); + mulFp(w, c1_); + w *= t; + for (int i = 0; i < 3; i++) { + switch (i) { + case 0: F::mul(x, t, w); F::neg(x, x); *x.getFp0() += c2_; break; + case 1: F::neg(x, x); *x.getFp0() -= Fp::one(); break; + case 2: F::sqr(x, w); F::inv(x, x); *x.getFp0() += Fp::one(); break; + } + G::getWeierstrass(y, x); + if (F::squareRoot(y, y)) { + if (negative) F::neg(y, y); + P.set(&b, x, y, false); + assert(b); + return true; + } + } + return false; + } + /* + Faster Hashing to G2 + Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez + section 6.1 + for BN + Q = zP + Frob(3zP) + Frob^2(zP) + Frob^3(P) + = -(18x^3 + 12x^2 + 3x + 1)cofactor_ P + */ + void mulByCofactorBN(G2& Q, const G2& P) const + { +#if 0 + G2::mulGeneric(Q, P, cofactor_); +#else +#if 0 + mpz_class t = -(1 + z_ * (3 + z_ * (12 + z_ * 18))); + G2::mulGeneric(Q, P, t * cofactor_); +#else + G2 T0, T1, T2; + /* + G2::mul (GLV method) can't be used because P is not on G2 + */ + G2::mulGeneric(T0, P, z_); + G2::dbl(T1, T0); + T1 += T0; // 3zP + Frobenius(T1, T1); + Frobenius2(T2, T0); + T0 += T1; + T0 += T2; + Frobenius3(T2, P); + G2::add(Q, T0, T2); +#endif +#endif + } + /* + 1.2~1.4 times faster than calBN + */ + template + void naiveMapTo(G& P, const F& t) const + { + F x = t; + for (;;) { + F y; + G::getWeierstrass(y, x); + if (F::squareRoot(y, y)) { + bool b; + P.set(&b, x, y, false); + assert(b); + return; + } + *x.getFp0() += Fp::one(); + } + } + /* + #(Fp) / r = (z + 1 - t) / r = (z - 1)^2 / 3 + */ + void mulByCofactorBLS12(G1& Q, const G1& P) const + { + G1::mulGeneric(Q, P, cofactor_); + } + /* + Efficient hash maps to G2 on BLS curves + Alessandro Budroni, Federico Pintore + Q = (z(z-1)-1)P + Frob((z-1)P) + Frob^2(2P) + */ + void mulByCofactorBLS12(G2& Q, const G2& P) const + { + G2 T0, T1; + G2::mulGeneric(T0, P, z_ - 1); + G2::mulGeneric(T1, T0, z_); + T1 -= P; + Frobenius(T0, T0); + T0 += T1; + G2::dbl(T1, P); + Frobenius2(T1, T1); + G2::add(Q, T0, T1); + } + /* + cofactor_ is for G2(not used now) + */ + void initBN(const mpz_class& cofactor, const mpz_class &z, int curveType) + { + z_ = z; + cofactor_ = cofactor; + if (curveType == MCL_BN254) { + const char *c1 = "252364824000000126cd890000000003cf0f0000000000060c00000000000004"; + const char *c2 = "25236482400000017080eb4000000006181800000000000cd98000000000000b"; + bool b; + c1_.setStr(&b, c1, 16); + c2_.setStr(&b, c2, 16); + (void)b; + return; + } + bool b = Fp::squareRoot(c1_, -3); + assert(b); + (void)b; + c2_ = (c1_ - 1) / 2; + } + void initBLS12(const mpz_class& z) + { + z_ = z; + // cofactor for G1 + cofactor_ = (z - 1) * (z - 1) / 3; + bool b = Fp::squareRoot(c1_, -3); + assert(b); + (void)b; + c2_ = (c1_ - 1) / 2; + } + /* + if type == STD_ECtype, then cofactor, z are not used. + */ + void init(const mpz_class& cofactor, const mpz_class &z, int curveType) + { + if (0 <= curveType && curveType < MCL_EC_BEGIN) { + type_ = curveType == MCL_BLS12_381 ? BLS12type : BNtype; + } else { + type_ = STD_ECtype; + } + if (type_ == STD_ECtype) { + useNaiveMapTo_ = true; + } else { + useNaiveMapTo_ = false; + } +#ifdef MCL_USE_OLD_MAPTO_FOR_BLS12 + if (type == BLS12type) useNaiveMapTo_ = true; +#endif + if (type_ == BNtype) { + initBN(cofactor, z, curveType); + } else if (type_ == BLS12type) { + initBLS12(z); + } + } + bool calcG1(G1& P, const Fp& t) const + { + if (useNaiveMapTo_) { + naiveMapTo(P, t); + } else { + if (!calcBN(P, t)) return false; + } + switch (type_) { + case BNtype: + // no subgroup + break; + case BLS12type: + mulByCofactorBLS12(P, P); + break; + } + assert(P.isValid()); + return true; + } + /* + get the element in G2 by multiplying the cofactor + */ + bool calcG2(G2& P, const Fp2& t) const + { + if (useNaiveMapTo_) { + naiveMapTo(P, t); + } else { + if (!calcBN(P, t)) return false; + } + switch(type_) { + case BNtype: + mulByCofactorBN(P, P); + break; + case BLS12type: + mulByCofactorBLS12(P, P); + break; + } + assert(P.isValid()); + return true; + } +}; + +/* + Software implementation of Attribute-Based Encryption: Appendixes + GLV for G1 on BN/BLS12 +*/ +struct GLV1 { + Fp rw; // rw = 1 / w = (-1 - sqrt(-3)) / 2 + size_t rBitSize; + mpz_class v0, v1; + mpz_class B[2][2]; + mpz_class r; +private: + bool usePrecomputedTable(int curveType) + { + if (curveType < 0) return false; + const struct Tbl { + int curveType; + const char *rw; + size_t rBitSize; + const char *v0, *v1; + const char *B[2][2]; + const char *r; + } tbl[] = { + { + MCL_BN254, + "49b36240000000024909000000000006cd80000000000007", + 256, + "2a01fab7e04a017b9c0eb31ff36bf3357", + "37937ca688a6b4904", + { + { + "61818000000000028500000000000004", + "8100000000000001", + }, + { + "8100000000000001", + "-61818000000000020400000000000003", + }, + }, + "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (tbl[i].curveType != curveType) continue; + bool b; + rw.setStr(&b, tbl[i].rw, 16); if (!b) continue; + rBitSize = tbl[i].rBitSize; + mcl::gmp::setStr(&b, v0, tbl[i].v0, 16); if (!b) continue; + mcl::gmp::setStr(&b, v1, tbl[i].v1, 16); if (!b) continue; + mcl::gmp::setStr(&b, B[0][0], tbl[i].B[0][0], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[0][1], tbl[i].B[0][1], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[1][0], tbl[i].B[1][0], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[1][1], tbl[i].B[1][1], 16); if (!b) continue; + mcl::gmp::setStr(&b, r, tbl[i].r, 16); if (!b) continue; + return true; + } + return false; + } +public: + bool operator==(const GLV1& rhs) const + { + return rw == rhs.rw && rBitSize == rhs.rBitSize && v0 == rhs.v0 && v1 == rhs.v1 + && B[0][0] == rhs.B[0][0] && B[0][1] == rhs.B[0][1] && B[1][0] == rhs.B[1][0] + && B[1][1] == rhs.B[1][1] && r == rhs.r; + } + bool operator!=(const GLV1& rhs) const { return !operator==(rhs); } +#ifndef CYBOZU_DONT_USE_STRING + void dump(const mpz_class& x) const + { + printf("\"%s\",\n", mcl::gmp::getStr(x, 16).c_str()); + } + void dump() const + { + printf("\"%s\",\n", rw.getStr(16).c_str()); + printf("%d,\n", (int)rBitSize); + dump(v0); + dump(v1); + dump(B[0][0]); dump(B[0][1]); dump(B[1][0]); dump(B[1][1]); + dump(r); + } +#endif + void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false, int curveType = -1) + { + if (usePrecomputedTable(curveType)) return; + bool b = Fp::squareRoot(rw, -3); + assert(b); + (void)b; + rw = -(rw + 1) / 2; + this->r = r; + rBitSize = gmp::getBitSize(r); + rBitSize = (rBitSize + fp::UnitBitSize - 1) & ~(fp::UnitBitSize - 1);// a little better size + if (isBLS12) { + /* + BLS12 + L = z^4 + (-z^2+1) + L = 0 + 1 + z^2 L = 0 + */ + B[0][0] = -z * z + 1; + B[0][1] = 1; + B[1][0] = 1; + B[1][1] = z * z; + } else { + /* + BN + L = 36z^4 - 1 + (6z^2+2z) - (2z+1) L = 0 + (-2z-1) - (6z^2+4z+1)L = 0 + */ + B[0][0] = 6 * z * z + 2 * z; + B[0][1] = -2 * z - 1; + B[1][0] = -2 * z - 1; + B[1][1] = -6 * z * z - 4 * z - 1; + } + // [v0 v1] = [r 0] * B^(-1) + v0 = ((-B[1][1]) << rBitSize) / r; + v1 = ((B[1][0]) << rBitSize) / r; + } + /* + L = lambda = p^4 + L (x, y) = (rw x, y) + */ + void mulLambda(G1& Q, const G1& P) const + { + Fp::mul(Q.x, P.x, rw); + Q.y = P.y; + Q.z = P.z; + } + /* + x = a + b * lambda mod r + */ + void split(mpz_class& a, mpz_class& b, const mpz_class& x) const + { + mpz_class t; + t = (x * v0) >> rBitSize; + b = (x * v1) >> rBitSize; + a = x - (t * B[0][0] + b * B[1][0]); + b = - (t * B[0][1] + b * B[1][1]); + } + void mul(G1& Q, const G1& P, mpz_class x, bool constTime = false) const + { + typedef mcl::fp::Unit Unit; + const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; + const int splitN = 2; + mpz_class u[splitN]; + G1 in[splitN]; + G1 tbl[4]; + int bitTbl[splitN]; // bit size of u[i] + Unit w[splitN][maxUnit]; // unit array of u[i] + int maxBit = 0; // max bit of u[i] + int maxN = 0; + int remainBit = 0; + + x %= r; + if (x == 0) { + Q.clear(); + if (constTime) goto DummyLoop; + return; + } + if (x < 0) { + x += r; + } + split(u[0], u[1], x); + in[0] = P; + mulLambda(in[1], in[0]); + for (int i = 0; i < splitN; i++) { + if (u[i] < 0) { + u[i] = -u[i]; + G1::neg(in[i], in[i]); + } + in[i].normalize(); + } +#if 0 + G1::mulGeneric(in[0], in[0], u[0]); + G1::mulGeneric(in[1], in[1], u[1]); + G1::add(Q, in[0], in[1]); + return; +#else + tbl[0] = in[0]; // dummy + tbl[1] = in[0]; + tbl[2] = in[1]; + G1::add(tbl[3], in[0], in[1]); + tbl[3].normalize(); + for (int i = 0; i < splitN; i++) { + bool b; + mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); + assert(b); + bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); + maxBit = fp::max_(maxBit, bitTbl[i]); + } + assert(maxBit > 0); + maxBit--; + /* + maxBit = maxN * UnitBitSize + remainBit + 0 < remainBit <= UnitBitSize + */ + maxN = maxBit / mcl::fp::UnitBitSize; + remainBit = maxBit % mcl::fp::UnitBitSize; + remainBit++; + Q.clear(); + for (int i = maxN; i >= 0; i--) { + for (int j = remainBit - 1; j >= 0; j--) { + G1::dbl(Q, Q); + uint32_t b0 = (w[0][i] >> j) & 1; + uint32_t b1 = (w[1][i] >> j) & 1; + uint32_t c = b1 * 2 + b0; + if (c == 0) { + if (constTime) tbl[0] += tbl[1]; + } else { + Q += tbl[c]; + } + } + remainBit = (int)mcl::fp::UnitBitSize; + } +#endif + DummyLoop: + if (!constTime) return; + const int limitBit = (int)rBitSize / splitN; + G1 D = tbl[0]; + for (int i = maxBit + 1; i < limitBit; i++) { + G1::dbl(D, D); + D += tbl[0]; + } + } +}; + +/* + GLV method for G2 and GT on BN/BLS12 +*/ +struct GLV2 { + size_t rBitSize; + mpz_class B[4][4]; + mpz_class r; + mpz_class v[4]; + mpz_class z; + mpz_class abs_z; + bool isBLS12; + GLV2() : rBitSize(0), isBLS12(false) {} + void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false) + { + this->r = r; + this->z = z; + this->abs_z = z < 0 ? -z : z; + this->isBLS12 = isBLS12; + rBitSize = mcl::gmp::getBitSize(r); + rBitSize = (rBitSize + mcl::fp::UnitBitSize - 1) & ~(mcl::fp::UnitBitSize - 1);// a little better size + mpz_class z2p1 = z * 2 + 1; + B[0][0] = z + 1; + B[0][1] = z; + B[0][2] = z; + B[0][3] = -2 * z; + B[1][0] = z2p1; + B[1][1] = -z; + B[1][2] = -(z + 1); + B[1][3] = -z; + B[2][0] = 2 * z; + B[2][1] = z2p1; + B[2][2] = z2p1; + B[2][3] = z2p1; + B[3][0] = z - 1; + B[3][1] = 2 * z2p1; + B[3][2] = -2 * z + 1; + B[3][3] = z - 1; + /* + v[] = [r 0 0 0] * B^(-1) = [2z^2+3z+1, 12z^3+8z^2+z, 6z^3+4z^2+z, -(2z+1)] + */ + const char *zBN254 = "-4080000000000001"; + mpz_class t; + bool b; + mcl::gmp::setStr(&b, t, zBN254, 16); + assert(b); + (void)b; + if (z == t) { + static const char *vTblBN254[] = { + "e00a8e7f56e007e5b09fe7fdf43ba998", + "-152aff56a8054abf9da75db2da3d6885101e5fd3997d41cb1", + "-a957fab5402a55fced3aed96d1eb44295f40f136ee84e09b", + "-e00a8e7f56e007e929d7b2667ea6f29c", + }; + for (int i = 0; i < 4; i++) { + mcl::gmp::setStr(&b, v[i], vTblBN254[i], 16); + assert(b); + (void)b; + } + } else { + v[0] = ((1 + z * (3 + z * 2)) << rBitSize) / r; + v[1] = ((z * (1 + z * (8 + z * 12))) << rBitSize) / r; + v[2] = ((z * (1 + z * (4 + z * 6))) << rBitSize) / r; + v[3] = -((z * (1 + z * 2)) << rBitSize) / r; + } + } + /* + u[] = [x, 0, 0, 0] - v[] * x * B + */ + void split(mpz_class u[4], const mpz_class& x) const + { + if (isBLS12) { + /* + Frob(P) = zP + x = u[0] + u[1] z + u[2] z^2 + u[3] z^3 + */ + bool isNeg = false; + mpz_class t = x; + if (t < 0) { + t = -t; + isNeg = true; + } + for (int i = 0; i < 4; i++) { + // t = t / abs_z, u[i] = t % abs_z + mcl::gmp::divmod(t, u[i], t, abs_z); + if (((z < 0) && (i & 1)) ^ isNeg) { + u[i] = -u[i]; + } + } + return; + } + // BN + mpz_class t[4]; + for (int i = 0; i < 4; i++) { + t[i] = (x * v[i]) >> rBitSize; + } + for (int i = 0; i < 4; i++) { + u[i] = (i == 0) ? x : 0; + for (int j = 0; j < 4; j++) { + u[i] -= t[j] * B[j][i]; + } + } + } + template + void mul(T& Q, const T& P, mpz_class x, bool constTime = false) const + { +#if 0 // #ifndef NDEBUG + { + T R; + T::mulGeneric(R, P, r); + assert(R.isZero()); + } +#endif + typedef mcl::fp::Unit Unit; + const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; + const int splitN = 4; + mpz_class u[splitN]; + T in[splitN]; + T tbl[16]; + int bitTbl[splitN]; // bit size of u[i] + Unit w[splitN][maxUnit]; // unit array of u[i] + int maxBit = 0; // max bit of u[i] + int maxN = 0; + int remainBit = 0; + + x %= r; + if (x == 0) { + Q.clear(); + if (constTime) goto DummyLoop; + return; + } + if (x < 0) { + x += r; + } + split(u, x); + in[0] = P; + Frobenius(in[1], in[0]); + Frobenius(in[2], in[1]); + Frobenius(in[3], in[2]); + for (int i = 0; i < splitN; i++) { + if (u[i] < 0) { + u[i] = -u[i]; + T::neg(in[i], in[i]); + } +// in[i].normalize(); // slow + } +#if 0 + for (int i = 0; i < splitN; i++) { + T::mulGeneric(in[i], in[i], u[i]); + } + T::add(Q, in[0], in[1]); + Q += in[2]; + Q += in[3]; + return; +#else + tbl[0] = in[0]; + for (size_t i = 1; i < 16; i++) { + tbl[i].clear(); + if (i & 1) { + tbl[i] += in[0]; + } + if (i & 2) { + tbl[i] += in[1]; + } + if (i & 4) { + tbl[i] += in[2]; + } + if (i & 8) { + tbl[i] += in[3]; + } +// tbl[i].normalize(); + } + for (int i = 0; i < splitN; i++) { + bool b; + mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); + assert(b); + bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); + maxBit = fp::max_(maxBit, bitTbl[i]); + } + maxBit--; + /* + maxBit = maxN * UnitBitSize + remainBit + 0 < remainBit <= UnitBitSize + */ + maxN = maxBit / mcl::fp::UnitBitSize; + remainBit = maxBit % mcl::fp::UnitBitSize; + remainBit++; + Q.clear(); + for (int i = maxN; i >= 0; i--) { + for (int j = remainBit - 1; j >= 0; j--) { + T::dbl(Q, Q); + uint32_t b0 = (w[0][i] >> j) & 1; + uint32_t b1 = (w[1][i] >> j) & 1; + uint32_t b2 = (w[2][i] >> j) & 1; + uint32_t b3 = (w[3][i] >> j) & 1; + uint32_t c = b3 * 8 + b2 * 4 + b1 * 2 + b0; + if (c == 0) { + if (constTime) tbl[0] += tbl[1]; + } else { + Q += tbl[c]; + } + } + remainBit = (int)mcl::fp::UnitBitSize; + } +#endif + DummyLoop: + if (!constTime) return; + const int limitBit = (int)rBitSize / splitN; + T D = tbl[0]; + for (int i = maxBit + 1; i < limitBit; i++) { + T::dbl(D, D); + D += tbl[0]; + } + } + void pow(Fp12& z, const Fp12& x, mpz_class y, bool constTime = false) const + { + typedef GroupMtoA AG; // as additive group + AG& _z = static_cast(z); + const AG& _x = static_cast(x); + mul(_z, _x, y, constTime); + } +}; + +struct Param { + CurveParam cp; + mpz_class z; + mpz_class abs_z; + bool isNegative; + bool isBLS12; + mpz_class p; + mpz_class r; + local::MapTo mapTo; + local::GLV1 glv1; + local::GLV2 glv2; + // for G2 Frobenius + Fp2 g2; + Fp2 g3; + /* + Dtype twist + (x', y') = phi(x, y) = (x/w^2, y/w^3) + y^2 = x^3 + b + => (y'w^3)^2 = (x'w^2)^3 + b + => y'^2 = x'^3 + b / w^6 ; w^6 = xi + => y'^2 = x'^3 + twist_b; + */ + Fp2 twist_b; + local::TwistBtype twist_b_type; +/* + mpz_class exp_c0; + mpz_class exp_c1; + mpz_class exp_c2; + mpz_class exp_c3; +*/ + + // Loop parameter for the Miller loop part of opt. ate pairing. + local::SignVec siTbl; + size_t precomputedQcoeffSize; + bool useNAF; + local::SignVec zReplTbl; + + // for initG1only + G1 basePoint; + + void init(bool *pb, const mcl::CurveParam& cp, fp::Mode mode) + { + this->cp = cp; + isBLS12 = cp.curveType == MCL_BLS12_381; + gmp::setStr(pb, z, cp.z); + if (!*pb) return; + isNegative = z < 0; + if (isNegative) { + abs_z = -z; + } else { + abs_z = z; + } + if (isBLS12) { + mpz_class z2 = z * z; + mpz_class z4 = z2 * z2; + r = z4 - z2 + 1; + p = z - 1; + p = p * p * r / 3 + z; + } else { + const int pCoff[] = { 1, 6, 24, 36, 36 }; + const int rCoff[] = { 1, 6, 18, 36, 36 }; + p = local::evalPoly(z, pCoff); + assert((p % 6) == 1); + r = local::evalPoly(z, rCoff); + } + Fr::init(pb, r, mode); + if (!*pb) return; + Fp::init(pb, cp.xi_a, p, mode); + if (!*pb) return; + Fp2::init(); + const Fp2 xi(cp.xi_a, 1); + g2 = Fp2::get_gTbl()[0]; + g3 = Fp2::get_gTbl()[3]; + if (cp.isMtype) { + Fp2::inv(g2, g2); + Fp2::inv(g3, g3); + } + if (cp.isMtype) { + twist_b = Fp2(cp.b) * xi; + } else { + if (cp.b == 2 && cp.xi_a == 1) { + twist_b = Fp2(1, -1); // shortcut + } else { + twist_b = Fp2(cp.b) / xi; + } + } + if (twist_b == Fp2(1, -1)) { + twist_b_type = tb_1m1i; + } else if (twist_b == Fp2(1, -2)) { + twist_b_type = tb_1m2i; + } else { + twist_b_type = tb_generic; + } + G1::init(0, cp.b, mcl::ec::Proj); + if (isBLS12) { + G1::setOrder(r); + } + G2::init(0, twist_b, mcl::ec::Proj); + G2::setOrder(r); + + const mpz_class largest_c = isBLS12 ? abs_z : gmp::abs(z * 6 + 2); + useNAF = gmp::getNAF(siTbl, largest_c); + precomputedQcoeffSize = local::getPrecomputeQcoeffSize(siTbl); + gmp::getNAF(zReplTbl, gmp::abs(z)); +/* + if (isBLS12) { + mpz_class z2 = z * z; + mpz_class z3 = z2 * z; + mpz_class z4 = z3 * z; + mpz_class z5 = z4 * z; + exp_c0 = z5 - 2 * z4 + 2 * z2 - z + 3; + exp_c1 = z4 - 2 * z3 + 2 * z - 1; + exp_c2 = z3 - 2 * z2 + z; + exp_c3 = z2 - 2 * z + 1; + } else { + exp_c0 = -2 + z * (-18 + z * (-30 - 36 * z)); + exp_c1 = 1 + z * (-12 + z * (-18 - 36 * z)); + exp_c2 = 6 * z * z + 1; + } +*/ + if (isBLS12) { + mapTo.init(0, z, cp.curveType); + } else { + mapTo.init(2 * p - r, z, cp.curveType); + } + glv1.init(r, z, isBLS12, cp.curveType); + glv2.init(r, z, isBLS12); + basePoint.clear(); + *pb = true; + } + void initG1only(bool *pb, const mcl::EcParam& para) + { + Fp::init(pb, para.p); + if (!*pb) return; + Fr::init(pb, para.n); + if (!*pb) return; + G1::init(pb, para.a, para.b); + if (!*pb) return; + G1::setOrder(Fr::getOp().mp); + mapTo.init(0, 0, para.curveType); + Fp x0, y0; + x0.setStr(pb, para.gx); + if (!*pb) return; + y0.setStr(pb, para.gy); + basePoint.set(pb, x0, y0); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const mcl::CurveParam& cp, fp::Mode mode) + { + bool b; + init(&b, cp, mode); + if (!b) throw cybozu::Exception("Param:init"); + } +#endif +}; + +template +struct StaticVar { + static local::Param param; +}; + +template +local::Param StaticVar::param; + +} // mcl::bn::local + +namespace BN { + +static const local::Param& param = local::StaticVar<>::param; + +} // mcl::bn::BN + +namespace local { + +inline void mulArrayGLV1(G1& z, const G1& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv1.mul(z, x, s, constTime); +} +inline void mulArrayGLV2(G2& z, const G2& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv2.mul(z, x, s, constTime); +} +inline void powArrayGLV2(Fp12& z, const Fp12& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv2.pow(z, x, s, constTime); +} + +/* + Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions + Robert Granger, Michael Scott +*/ +inline void sqrFp4(Fp2& z0, Fp2& z1, const Fp2& x0, const Fp2& x1) +{ +#if 1 + Fp2Dbl T0, T1, T2; + Fp2Dbl::sqrPre(T0, x0); + Fp2Dbl::sqrPre(T1, x1); + Fp2Dbl::mul_xi(T2, T1); + Fp2Dbl::add(T2, T2, T0); + Fp2::add(z1, x0, x1); + Fp2Dbl::mod(z0, T2); + Fp2Dbl::sqrPre(T2, z1); + Fp2Dbl::sub(T2, T2, T0); + Fp2Dbl::sub(T2, T2, T1); + Fp2Dbl::mod(z1, T2); +#else + Fp2 t0, t1, t2; + Fp2::sqr(t0, x0); + Fp2::sqr(t1, x1); + Fp2::mul_xi(z0, t1); + z0 += t0; + Fp2::add(z1, x0, x1); + Fp2::sqr(z1, z1); + z1 -= t0; + z1 -= t1; +#endif +} + +inline void fasterSqr(Fp12& y, const Fp12& x) +{ +#if 0 + Fp12::sqr(y, x); +#else + const Fp2& x0(x.a.a); + const Fp2& x4(x.a.b); + const Fp2& x3(x.a.c); + const Fp2& x2(x.b.a); + const Fp2& x1(x.b.b); + const Fp2& x5(x.b.c); + Fp2& y0(y.a.a); + Fp2& y4(y.a.b); + Fp2& y3(y.a.c); + Fp2& y2(y.b.a); + Fp2& y1(y.b.b); + Fp2& y5(y.b.c); + Fp2 t0, t1; + sqrFp4(t0, t1, x0, x1); + Fp2::sub(y0, t0, x0); + y0 += y0; + y0 += t0; + Fp2::add(y1, t1, x1); + y1 += y1; + y1 += t1; + Fp2 t2, t3; + sqrFp4(t0, t1, x2, x3); + sqrFp4(t2, t3, x4, x5); + Fp2::sub(y4, t0, x4); + y4 += y4; + y4 += t0; + Fp2::add(y5, t1, x5); + y5 += y5; + y5 += t1; + Fp2::mul_xi(t0, t3); + Fp2::add(y2, t0, x2); + y2 += y2; + y2 += t0; + Fp2::sub(y3, t2, x3); + y3 += y3; + y3 += t2; +#endif +} + +/* + y = x^z if z > 0 + = unitaryInv(x^(-z)) if z < 0 +*/ +inline void pow_z(Fp12& y, const Fp12& x) +{ +#if 1 + if (BN::param.cp.curveType == MCL_BN254) { + Compress::fixed_power(y, x); + } else { + Fp12 orgX = x; + y = x; + Fp12 conj; + conj.a = x.a; + Fp6::neg(conj.b, x.b); + for (size_t i = 1; i < BN::param.zReplTbl.size(); i++) { + fasterSqr(y, y); + if (BN::param.zReplTbl[i] > 0) { + y *= orgX; + } else if (BN::param.zReplTbl[i] < 0) { + y *= conj; + } + } + } +#else + Fp12::pow(y, x, param.abs_z); +#endif + if (BN::param.isNegative) { + Fp12::unitaryInv(y, y); + } +} +inline void mul_twist_b(Fp2& y, const Fp2& x) +{ + switch (BN::param.twist_b_type) { + case local::tb_1m1i: + /* + b / xi = 1 - 1i + (a + bi)(1 - 1i) = (a + b) + (b - a)i + */ + { + Fp t; + Fp::add(t, x.a, x.b); + Fp::sub(y.b, x.b, x.a); + y.a = t; + } + return; + case local::tb_1m2i: + /* + b / xi = 1 - 2i + (a + bi)(1 - 2i) = (a + 2b) + (b - 2a)i + */ + { + Fp t; + Fp::sub(t, x.b, x.a); + t -= x.a; + Fp::add(y.a, x.a, x.b); + y.a += x.b; + y.b = t; + } + return; + case local::tb_generic: + Fp2::mul(y, x, BN::param.twist_b); + return; + } +} + +inline void dblLineWithoutP(Fp6& l, G2& Q) +{ + Fp2 t0, t1, t2, t3, t4, t5; + Fp2Dbl T0, T1; + Fp2::sqr(t0, Q.z); + Fp2::mul(t4, Q.x, Q.y); + Fp2::sqr(t1, Q.y); + Fp2::add(t3, t0, t0); + Fp2::divBy2(t4, t4); + Fp2::add(t5, t0, t1); + t0 += t3; + mul_twist_b(t2, t0); + Fp2::sqr(t0, Q.x); + Fp2::add(t3, t2, t2); + t3 += t2; + Fp2::sub(Q.x, t1, t3); + t3 += t1; + Q.x *= t4; + Fp2::divBy2(t3, t3); + Fp2Dbl::sqrPre(T0, t3); + Fp2Dbl::sqrPre(T1, t2); + Fp2Dbl::sub(T0, T0, T1); + Fp2Dbl::add(T1, T1, T1); + Fp2Dbl::sub(T0, T0, T1); + Fp2::add(t3, Q.y, Q.z); + Fp2Dbl::mod(Q.y, T0); + Fp2::sqr(t3, t3); + t3 -= t5; + Fp2::mul(Q.z, t1, t3); + Fp2::sub(l.a, t2, t1); + l.c = t0; + l.b = t3; +} +inline void addLineWithoutP(Fp6& l, G2& R, const G2& Q) +{ + Fp2 t1, t2, t3, t4; + Fp2Dbl T1, T2; + Fp2::mul(t1, R.z, Q.x); + Fp2::mul(t2, R.z, Q.y); + Fp2::sub(t1, R.x, t1); + Fp2::sub(t2, R.y, t2); + Fp2::sqr(t3, t1); + Fp2::mul(R.x, t3, R.x); + Fp2::sqr(t4, t2); + t3 *= t1; + t4 *= R.z; + t4 += t3; + t4 -= R.x; + t4 -= R.x; + R.x -= t4; + Fp2Dbl::mulPre(T1, t2, R.x); + Fp2Dbl::mulPre(T2, t3, R.y); + Fp2Dbl::sub(T2, T1, T2); + Fp2Dbl::mod(R.y, T2); + Fp2::mul(R.x, t1, t4); + Fp2::mul(R.z, t3, R.z); + Fp2::neg(l.c, t2); + Fp2Dbl::mulPre(T1, t2, Q.x); + Fp2Dbl::mulPre(T2, t1, Q.y); + Fp2Dbl::sub(T1, T1, T2); + l.b = t1; + Fp2Dbl::mod(l.a, T1); +} +inline void dblLine(Fp6& l, G2& Q, const G1& P) +{ + dblLineWithoutP(l, Q); + local::updateLine(l, P); +} +inline void addLine(Fp6& l, G2& R, const G2& Q, const G1& P) +{ + addLineWithoutP(l, R, Q); + local::updateLine(l, P); +} +inline void mulFp6cb_by_G1xy(Fp6& y, const Fp6& x, const G1& P) +{ + assert(P.isNormalized()); + if (&y != &x) y.a = x.a; + Fp2::mulFp(y.c, x.c, P.x); + Fp2::mulFp(y.b, x.b, P.y); +} + +/* + x = a + bv + cv^2 + y = (y0, y4, y2) -> (y0, 0, y2, 0, y4, 0) + z = xy = (a + bv + cv^2)(d + ev) + = (ad + ce xi) + ((a + b)(d + e) - ad - be)v + (be + cd)v^2 +*/ +inline void Fp6mul_01(Fp6& z, const Fp6& x, const Fp2& d, const Fp2& e) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp2 t0, t1; + Fp2Dbl AD, CE, BE, CD, T; + Fp2Dbl::mulPre(AD, a, d); + Fp2Dbl::mulPre(CE, c, e); + Fp2Dbl::mulPre(BE, b, e); + Fp2Dbl::mulPre(CD, c, d); + Fp2::add(t0, a, b); + Fp2::add(t1, d, e); + Fp2Dbl::mulPre(T, t0, t1); + T -= AD; + T -= BE; + Fp2Dbl::mod(z.b, T); + Fp2Dbl::mul_xi(CE, CE); + AD += CE; + Fp2Dbl::mod(z.a, AD); + BE += CD; + Fp2Dbl::mod(z.c, BE); +} +/* + input + z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w + 0 3 4 + x = (a, b, c) -> (b, 0, 0, c, a, 0) = X0 + X1w + X0 = b = (b, 0, 0) + X1 = c + av = (c, a, 0) + w^2 = v, v^3 = xi + output + z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w + Z0X0 = Z0 b + Z1X1 = Z1 (c, a, 0) + (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (b + c, a, 0) +*/ +inline void mul_403(Fp12& z, const Fp6& x) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; +#if 1 + Fp6& z0 = z.a; + Fp6& z1 = z.b; + Fp6 z0x0, z1x1, t0; + Fp2 t1; + Fp2::add(t1, x.b, c); + Fp6::add(t0, z0, z1); + Fp2::mul(z0x0.a, z0.a, b); + Fp2::mul(z0x0.b, z0.b, b); + Fp2::mul(z0x0.c, z0.c, b); + Fp6mul_01(z1x1, z1, c, a); + Fp6mul_01(t0, t0, t1, a); + Fp6::sub(z.b, t0, z0x0); + z.b -= z1x1; + // a + bv + cv^2 = cxi + av + bv^2 + Fp2::mul_xi(z1x1.c, z1x1.c); + Fp2::add(z.a.a, z0x0.a, z1x1.c); + Fp2::add(z.a.b, z0x0.b, z1x1.a); + Fp2::add(z.a.c, z0x0.c, z1x1.b); +#else + Fp2& z0 = z.a.a; + Fp2& z1 = z.a.b; + Fp2& z2 = z.a.c; + Fp2& z3 = z.b.a; + Fp2& z4 = z.b.b; + Fp2& z5 = z.b.c; + Fp2Dbl Z0B, Z1B, Z2B, Z3C, Z4C, Z5C; + Fp2Dbl T0, T1, T2, T3, T4, T5; + Fp2 bc, t; + Fp2::addPre(bc, b, c); + Fp2::addPre(t, z5, z2); + Fp2Dbl::mulPre(T5, t, bc); + Fp2Dbl::mulPre(Z5C, z5, c); + Fp2Dbl::mulPre(Z2B, z2, b); + Fp2Dbl::sub(T5, T5, Z5C); + Fp2Dbl::sub(T5, T5, Z2B); + Fp2Dbl::mulPre(T0, z1, a); + T5 += T0; + + Fp2::addPre(t, z4, z1); + Fp2Dbl::mulPre(T4, t, bc); + Fp2Dbl::mulPre(Z4C, z4, c); + Fp2Dbl::mulPre(Z1B, z1, b); + Fp2Dbl::sub(T4, T4, Z4C); + Fp2Dbl::sub(T4, T4, Z1B); + Fp2Dbl::mulPre(T0, z0, a); + T4 += T0; + + Fp2::addPre(t, z3, z0); + Fp2Dbl::mulPre(T3, t, bc); + Fp2Dbl::mulPre(Z3C, z3, c); + Fp2Dbl::mulPre(Z0B, z0, b); + Fp2Dbl::sub(T3, T3, Z3C); + Fp2Dbl::sub(T3, T3, Z0B); + Fp2::mul_xi(t, z2); + Fp2Dbl::mulPre(T0, t, a); + T3 += T0; + + Fp2Dbl::mulPre(T2, z3, a); + T2 += Z2B; + T2 += Z4C; + + Fp2::mul_xi(t, z5); + Fp2Dbl::mulPre(T1, t, a); + T1 += Z1B; + T1 += Z3C; + + Fp2Dbl::mulPre(T0, z4, a); + T0 += Z5C; + Fp2Dbl::mul_xi(T0, T0); + T0 += Z0B; + + Fp2Dbl::mod(z0, T0); + Fp2Dbl::mod(z1, T1); + Fp2Dbl::mod(z2, T2); + Fp2Dbl::mod(z3, T3); + Fp2Dbl::mod(z4, T4); + Fp2Dbl::mod(z5, T5); +#endif +} +/* + input + z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w + 0 1 4 + x = (a, b, c) -> (a, c, 0, 0, b, 0) = X0 + X1w + X0 = (a, c, 0) + X1 = (0, b, 0) + w^2 = v, v^3 = xi + output + z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w + Z0X0 = Z0 (a, c, 0) + Z1X1 = Z1 (0, b, 0) = Z1 bv + (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (a, b + c, 0) + + (a + bv + cv^2)v = c xi + av + bv^2 +*/ +inline void mul_041(Fp12& z, const Fp6& x) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp6& z0 = z.a; + Fp6& z1 = z.b; + Fp6 z0x0, z1x1, t0; + Fp2 t1; + Fp2::mul(z1x1.a, z1.c, b); + Fp2::mul_xi(z1x1.a, z1x1.a); + Fp2::mul(z1x1.b, z1.a, b); + Fp2::mul(z1x1.c, z1.b, b); + Fp2::add(t1, x.b, c); + Fp6::add(t0, z0, z1); + Fp6mul_01(z0x0, z0, a, c); + Fp6mul_01(t0, t0, a, t1); + Fp6::sub(z.b, t0, z0x0); + z.b -= z1x1; + // a + bv + cv^2 = cxi + av + bv^2 + Fp2::mul_xi(z1x1.c, z1x1.c); + Fp2::add(z.a.a, z0x0.a, z1x1.c); + Fp2::add(z.a.b, z0x0.b, z1x1.a); + Fp2::add(z.a.c, z0x0.c, z1x1.b); +} +inline void mulSparse(Fp12& z, const Fp6& x) +{ + if (BN::param.cp.isMtype) { + mul_041(z, x); + } else { + mul_403(z, x); + } +} +inline void convertFp6toFp12(Fp12& y, const Fp6& x) +{ + if (BN::param.cp.isMtype) { + // (a, b, c) -> (a, c, 0, 0, b, 0) + y.a.a = x.a; + y.b.b = x.b; + y.a.b = x.c; + y.a.c.clear(); + y.b.a.clear(); + y.b.c.clear(); + } else { + // (a, b, c) -> (b, 0, 0, c, a, 0) + y.b.b = x.a; + y.a.a = x.b; + y.b.a = x.c; + y.a.b.clear(); + y.a.c.clear(); + y.b.c.clear(); + } +} +inline void mulSparse2(Fp12& z, const Fp6& x, const Fp6& y) +{ + convertFp6toFp12(z, x); + mulSparse(z, y); +} +inline void mapToCyclotomic(Fp12& y, const Fp12& x) +{ + Fp12 z; + Fp12::Frobenius2(z, x); // z = x^(p^2) + z *= x; // x^(p^2 + 1) + Fp12::inv(y, z); + Fp6::neg(z.b, z.b); // z^(p^6) = conjugate of z + y *= z; +} +/* + Implementing Pairings at the 192-bit Security Level + D.F.Aranha, L.F.Castaneda, E.Knapp, A.Menezes, F.R.Henriquez + Section 4 +*/ +inline void expHardPartBLS12(Fp12& y, const Fp12& x) +{ +#if 0 + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, (p4 - p2 + 1) / param.r * 3); + return; +#endif +#if 1 + Fp12 a0, a1, a2, a3, a4, a5, a6, a7; + Fp12::unitaryInv(a0, x); // a0 = x^-1 + fasterSqr(a1, a0); // x^-2 + pow_z(a2, x); // x^z + fasterSqr(a3, a2); // x^2z + a1 *= a2; // a1 = x^(z-2) + pow_z(a7, a1); // a7 = x^(z^2-2z) + pow_z(a4, a7); // a4 = x^(z^3-2z^2) + pow_z(a5, a4); // a5 = x^(z^4-2z^3) + a3 *= a5; // a3 = x^(z^4-2z^3+2z) + pow_z(a6, a3); // a6 = x^(z^5-2z^4+2z^2) + + Fp12::unitaryInv(a1, a1); // x^(2-z) + a1 *= a6; // x^(z^5-2z^4+2z^2-z+2) + a1 *= x; // x^(z^5-2z^4+2z^2-z+3) = x^c0 + a3 *= a0; // x^(z^4-2z^3-1) = x^c1 + Fp12::Frobenius(a3, a3); // x^(c1 p) + a1 *= a3; // x^(c0 + c1 p) + a4 *= a2; // x^(z^3-2z^2+z) = x^c2 + Fp12::Frobenius2(a4, a4); // x^(c2 p^2) + a1 *= a4; // x^(c0 + c1 p + c2 p^2) + a7 *= x; // x^(z^2-2z+1) = x^c3 + Fp12::Frobenius3(y, a7); + y *= a1; +#else + Fp12 t1, t2, t3; + Fp12::Frobenius(t1, x); + Fp12::Frobenius(t2, t1); + Fp12::Frobenius(t3, t2); + Fp12::pow(t1, t1, param.exp_c1); + Fp12::pow(t2, t2, param.exp_c2); + Fp12::pow(t3, t3, param.exp_c3); + Fp12::pow(y, x, param.exp_c0); + y *= t1; + y *= t2; + y *= t3; +#endif +} +/* + Faster Hashing to G2 + Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez + section 4.1 + y = x^(d 2z(6z^2 + 3z + 1)) where + p = p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1 + r = r(z) = 36z^4 + 36z^3 + 18z^2 + 6z + 1 + d = (p^4 - p^2 + 1) / r + d1 = d 2z(6z^2 + 3z + 1) + = c0 + c1 p + c2 p^2 + c3 p^3 + + c0 = 1 + 6z + 12z^2 + 12z^3 + c1 = 4z + 6z^2 + 12z^3 + c2 = 6z + 6z^2 + 12z^3 + c3 = -1 + 4z + 6z^2 + 12z^3 + x -> x^z -> x^2z -> x^4z -> x^6z -> x^(6z^2) -> x^(12z^2) -> x^(12z^3) + a = x^(6z) x^(6z^2) x^(12z^3) + b = a / (x^2z) + x^d1 = (a x^(6z^2) x) b^p a^(p^2) (b / x)^(p^3) +*/ +inline void expHardPartBN(Fp12& y, const Fp12& x) +{ +#if 0 + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, (p4 - p2 + 1) / param.r); + return; +#endif +#if 1 + Fp12 a, b; + Fp12 a2, a3; + pow_z(b, x); // x^z + fasterSqr(b, b); // x^2z + fasterSqr(a, b); // x^4z + a *= b; // x^6z + pow_z(a2, a); // x^(6z^2) + a *= a2; + fasterSqr(a3, a2); // x^(12z^2) + pow_z(a3, a3); // x^(12z^3) + a *= a3; + Fp12::unitaryInv(b, b); + b *= a; + a2 *= a; + Fp12::Frobenius2(a, a); + a *= a2; + a *= x; + Fp12::unitaryInv(y, x); + y *= b; + Fp12::Frobenius(b, b); + a *= b; + Fp12::Frobenius3(y, y); + y *= a; +#else + Fp12 t1, t2, t3; + Fp12::Frobenius(t1, x); + Fp12::Frobenius(t2, t1); + Fp12::Frobenius(t3, t2); + Fp12::pow(t1, t1, param.exp_c1); + Fp12::pow(t2, t2, param.exp_c2); + Fp12::pow(y, x, param.exp_c0); + y *= t1; + y *= t2; + y *= t3; +#endif +} +/* + remark : returned value is NOT on a curve +*/ +inline G1 makeAdjP(const G1& P) +{ + G1 adjP; + Fp::add(adjP.x, P.x, P.x); + adjP.x += P.x; + Fp::neg(adjP.y, P.y); + adjP.z = 1; + return adjP; +} + +} // mcl::bn::local + +/* + y = x^((p^12 - 1) / r) + (p^12 - 1) / r = (p^2 + 1) (p^6 - 1) (p^4 - p^2 + 1)/r + (a + bw)^(p^6) = a - bw in Fp12 + (p^4 - p^2 + 1)/r = c0 + c1 p + c2 p^2 + p^3 +*/ +inline void finalExp(Fp12& y, const Fp12& x) +{ +#if 1 + mapToCyclotomic(y, x); +#else + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, p2 + 1); + Fp12::pow(y, y, p4 * p2 - 1); +#endif + if (BN::param.isBLS12) { + expHardPartBLS12(y, y); + } else { + expHardPartBN(y, y); + } +} +inline void millerLoop(Fp12& f, const G1& P_, const G2& Q_) +{ + G1 P(P_); + G2 Q(Q_); + P.normalize(); + Q.normalize(); + if (Q.isZero()) { + f = 1; + return; + } + assert(BN::param.siTbl[1] == 1); + G2 T = Q; + G2 negQ; + if (BN::param.useNAF) { + G2::neg(negQ, Q); + } + Fp6 d, e, l; + d = e = l = 1; + G1 adjP = makeAdjP(P); + dblLine(d, T, adjP); + addLine(l, T, Q, P); + mulSparse2(f, d, l); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLine(l, T, adjP); + Fp12::sqr(f, f); + mulSparse(f, l); + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLine(l, T, Q, P); + } else { + addLine(l, T, negQ, P); + } + mulSparse(f, l); + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + G2 Q1, Q2; + Frobenius(Q1, Q); + Frobenius(Q2, Q1); + G2::neg(Q2, Q2); + addLine(d, T, Q1, P); + addLine(e, T, Q2, P); + Fp12 ft; + mulSparse2(ft, d, e); + f *= ft; +} +inline void pairing(Fp12& f, const G1& P, const G2& Q) +{ + millerLoop(f, P, Q); + finalExp(f, f); +} +/* + allocate param.precomputedQcoeffSize elements of Fp6 for Qcoeff +*/ +inline void precomputeG2(Fp6 *Qcoeff, const G2& Q_) +{ + size_t idx = 0; + G2 Q(Q_); + Q.normalize(); + if (Q.isZero()) { + for (size_t i = 0; i < BN::param.precomputedQcoeffSize; i++) { + Qcoeff[i] = 1; + } + return; + } + G2 T = Q; + G2 negQ; + if (BN::param.useNAF) { + G2::neg(negQ, Q); + } + assert(BN::param.siTbl[1] == 1); + dblLineWithoutP(Qcoeff[idx++], T); + addLineWithoutP(Qcoeff[idx++], T, Q); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLineWithoutP(Qcoeff[idx++], T); + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLineWithoutP(Qcoeff[idx++], T, Q); + } else { + addLineWithoutP(Qcoeff[idx++], T, negQ); + } + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + } + if (BN::param.isBLS12) return; + G2 Q1, Q2; + Frobenius(Q1, Q); + Frobenius(Q2, Q1); + G2::neg(Q2, Q2); + addLineWithoutP(Qcoeff[idx++], T, Q1); + addLineWithoutP(Qcoeff[idx++], T, Q2); + assert(idx == BN::param.precomputedQcoeffSize); +} +/* + millerLoop(e, P, Q) is same as the following + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e, P, Qcoeff); +*/ +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputeG2(std::vector& Qcoeff, const G2& Q) +{ + Qcoeff.resize(BN::param.precomputedQcoeffSize); + precomputeG2(Qcoeff.data(), Q); +} +#endif +template +void precomputeG2(bool *pb, Array& Qcoeff, const G2& Q) +{ + *pb = Qcoeff.resize(BN::param.precomputedQcoeffSize); + if (!*pb) return; + precomputeG2(Qcoeff.data(), Q); +} + +inline void precomputedMillerLoop(Fp12& f, const G1& P_, const Fp6* Qcoeff) +{ + G1 P(P_); + P.normalize(); + G1 adjP = makeAdjP(P); + size_t idx = 0; + Fp6 d, e, l; + mulFp6cb_by_G1xy(d, Qcoeff[idx], adjP); + idx++; + + mulFp6cb_by_G1xy(e, Qcoeff[idx], P); + idx++; + mulSparse2(f, d, e); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + mulFp6cb_by_G1xy(l, Qcoeff[idx], adjP); + idx++; + Fp12::sqr(f, f); + mulSparse(f, l); + if (BN::param.siTbl[i]) { + mulFp6cb_by_G1xy(l, Qcoeff[idx], P); + idx++; + mulSparse(f, l); + } + } + if (BN::param.z < 0) { + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + mulFp6cb_by_G1xy(d, Qcoeff[idx], P); + idx++; + mulFp6cb_by_G1xy(e, Qcoeff[idx], P); + idx++; + Fp12 ft; + mulSparse2(ft, d, e); + f *= ft; +} +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputedMillerLoop(Fp12& f, const G1& P, const std::vector& Qcoeff) +{ + precomputedMillerLoop(f, P, Qcoeff.data()); +} +#endif +/* + f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) + Q2coeff : precomputed Q2 +*/ +inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1_, const G2& Q1_, const G1& P2_, const Fp6* Q2coeff) +{ + G1 P1(P1_), P2(P2_); + G2 Q1(Q1_); + P1.normalize(); + P2.normalize(); + Q1.normalize(); + if (Q1.isZero()) { + precomputedMillerLoop(f, P2_, Q2coeff); + return; + } + G2 T = Q1; + G2 negQ1; + if (BN::param.useNAF) { + G2::neg(negQ1, Q1); + } + G1 adjP1 = makeAdjP(P1); + G1 adjP2 = makeAdjP(P2); + size_t idx = 0; + Fp6 d1, d2, e1, e2, l1, l2; + dblLine(d1, T, adjP1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); + idx++; + + Fp12 f1, f2; + e1 = 1; + addLine(e1, T, Q1, P1); + mulSparse2(f1, d1, e1); + + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + mulSparse2(f2, d2, e2); + Fp12::mul(f, f1, f2); + idx++; + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLine(l1, T, adjP1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); + idx++; + Fp12::sqr(f, f); + mulSparse2(f1, l1, l2); + f *= f1; + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLine(l1, T, Q1, P1); + } else { + addLine(l1, T, negQ1, P1); + } + mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, l1, l2); + f *= f1; + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + G2 Q11, Q12; + Frobenius(Q11, Q1); + Frobenius(Q12, Q11); + G2::neg(Q12, Q12); + addLine(d1, T, Q11, P1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); + idx++; + addLine(e1, T, Q12, P1); + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, d1, e1); + mulSparse2(f2, d2, e2); + f *= f1; + f *= f2; +} +/* + f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) + Q1coeff, Q2coeff : precomputed Q1, Q2 +*/ +inline void precomputedMillerLoop2(Fp12& f, const G1& P1_, const Fp6* Q1coeff, const G1& P2_, const Fp6* Q2coeff) +{ + G1 P1(P1_), P2(P2_); + P1.normalize(); + P2.normalize(); + G1 adjP1 = makeAdjP(P1); + G1 adjP2 = makeAdjP(P2); + size_t idx = 0; + Fp6 d1, d2, e1, e2, l1, l2; + mulFp6cb_by_G1xy(d1, Q1coeff[idx], adjP1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); + idx++; + + Fp12 f1, f2; + mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); + mulSparse2(f1, d1, e1); + + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + mulSparse2(f2, d2, e2); + Fp12::mul(f, f1, f2); + idx++; + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + mulFp6cb_by_G1xy(l1, Q1coeff[idx], adjP1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); + idx++; + Fp12::sqr(f, f); + mulSparse2(f1, l1, l2); + f *= f1; + if (BN::param.siTbl[i]) { + mulFp6cb_by_G1xy(l1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, l1, l2); + f *= f1; + } + } + if (BN::param.z < 0) { + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + mulFp6cb_by_G1xy(d1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); + idx++; + mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, d1, e1); + mulSparse2(f2, d2, e2); + f *= f1; + f *= f2; +} +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputedMillerLoop2(Fp12& f, const G1& P1, const std::vector& Q1coeff, const G1& P2, const std::vector& Q2coeff) +{ + precomputedMillerLoop2(f, P1, Q1coeff.data(), P2, Q2coeff.data()); +} +inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1, const G2& Q1, const G1& P2, const std::vector& Q2coeff) +{ + precomputedMillerLoop2mixed(f, P1, Q1, P2, Q2coeff.data()); +} +#endif +inline void mapToG1(bool *pb, G1& P, const Fp& x) { *pb = BN::param.mapTo.calcG1(P, x); } +inline void mapToG2(bool *pb, G2& P, const Fp2& x) { *pb = BN::param.mapTo.calcG2(P, x); } +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void mapToG1(G1& P, const Fp& x) +{ + bool b; + mapToG1(&b, P, x); + if (!b) throw cybozu::Exception("mapToG1:bad value") << x; +} +inline void mapToG2(G2& P, const Fp2& x) +{ + bool b; + mapToG2(&b, P, x); + if (!b) throw cybozu::Exception("mapToG2:bad value") << x; +} +#endif +inline void hashAndMapToG1(G1& P, const void *buf, size_t bufSize) +{ + Fp t; + t.setHashOf(buf, bufSize); + bool b; + mapToG1(&b, P, t); + // It will not happen that the hashed value is equal to special value + assert(b); + (void)b; +} +inline void hashAndMapToG2(G2& P, const void *buf, size_t bufSize) +{ + Fp2 t; + t.a.setHashOf(buf, bufSize); + t.b.clear(); + bool b; + mapToG2(&b, P, t); + // It will not happen that the hashed value is equal to special value + assert(b); + (void)b; +} +#ifndef CYBOZU_DONT_USE_STRING +inline void hashAndMapToG1(G1& P, const std::string& str) +{ + hashAndMapToG1(P, str.c_str(), str.size()); +} +inline void hashAndMapToG2(G2& P, const std::string& str) +{ + hashAndMapToG2(P, str.c_str(), str.size()); +} +#endif +inline void verifyOrderG1(bool doVerify) +{ + if (BN::param.isBLS12) { + G1::setOrder(doVerify ? BN::param.r : 0); + } +} +inline void verifyOrderG2(bool doVerify) +{ + G2::setOrder(doVerify ? BN::param.r : 0); +} + +// backward compatibility +using mcl::CurveParam; +static const CurveParam& CurveFp254BNb = BN254; +static const CurveParam& CurveFp382_1 = BN381_1; +static const CurveParam& CurveFp382_2 = BN381_2; +static const CurveParam& CurveFp462 = BN462; +static const CurveParam& CurveSNARK1 = BN_SNARK1; + +/* + FrobeniusOnTwist for Dtype + p mod 6 = 1, w^6 = xi + Frob(x', y') = phi Frob phi^-1(x', y') + = phi Frob (x' w^2, y' w^3) + = phi (x'^p w^2p, y'^p w^3p) + = (F(x') w^2(p - 1), F(y') w^3(p - 1)) + = (F(x') g^2, F(y') g^3) + + FrobeniusOnTwist for Dtype + use (1/g) instead of g +*/ +inline void Frobenius(G2& D, const G2& S) +{ + Fp2::Frobenius(D.x, S.x); + Fp2::Frobenius(D.y, S.y); + Fp2::Frobenius(D.z, S.z); + D.x *= BN::param.g2; + D.y *= BN::param.g3; +} +inline void Frobenius2(G2& D, const G2& S) +{ + Frobenius(D, S); + Frobenius(D, D); +} +inline void Frobenius3(G2& D, const G2& S) +{ + Frobenius(D, S); + Frobenius(D, D); + Frobenius(D, D); +} + +namespace BN { + +using namespace mcl::bn; // backward compatibility + +inline void init(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + local::StaticVar<>::param.init(pb, cp, mode); + if (!*pb) return; + G1::setMulArrayGLV(local::mulArrayGLV1); + G2::setMulArrayGLV(local::mulArrayGLV2); + Fp12::setPowArrayGLV(local::powArrayGLV2); + G1::setCompressedExpression(); + G2::setCompressedExpression(); + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void init(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + bool b; + init(&b, cp, mode); + if (!b) throw cybozu::Exception("BN:init"); +} +#endif + +} // mcl::bn::BN + +inline void initPairing(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + BN::init(pb, cp, mode); +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void initPairing(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + bool b; + BN::init(&b, cp, mode); + if (!b) throw cybozu::Exception("bn:initPairing"); +} +#endif + +inline void initG1only(bool *pb, const mcl::EcParam& para) +{ + local::StaticVar<>::param.initG1only(pb, para); + if (!*pb) return; + G1::setMulArrayGLV(0); + G2::setMulArrayGLV(0); + Fp12::setPowArrayGLV(0); + G1::setCompressedExpression(); + G2::setCompressedExpression(); +} + +inline const G1& getG1basePoint() +{ + return local::StaticVar<>::param.basePoint; +} + +} } // mcl::bn + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bn256.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn256.hpp new file mode 100644 index 000000000..7a5da7a05 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn256.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for 256-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 256 +#include + +namespace mcl { namespace bn256 { +using namespace mcl::bn; +} } + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bn384.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn384.hpp new file mode 100644 index 000000000..8aa14fe5c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn384.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for 384-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 384 +#include +// #define MCL_MAX_FR_BIT_SIZE 256 // can set if BLS12_381 + +namespace mcl { namespace bn384 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/bn512.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn512.hpp new file mode 100644 index 000000000..c87ad9035 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/bn512.hpp @@ -0,0 +1,14 @@ +#pragma once +/** + @file + @brief preset class for 512-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 512 +#include + +namespace mcl { namespace bn512 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/conversion.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/conversion.hpp new file mode 100644 index 000000000..7a04b7fa2 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/conversion.hpp @@ -0,0 +1,495 @@ +#pragma once +#include +#include +/** + @file + @brief convertion bin/dec/hex <=> array + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) +#endif + +namespace mcl { namespace fp { + +namespace local { + +inline bool isSpace(char c) +{ + return c == ' ' || c == '\t' || c == '\r' || c == '\n'; +} +template +bool skipSpace(char *c, InputStream& is) +{ + for (;;) { + if (!cybozu::readChar(c, is)) return false; + if (!isSpace(*c)) return true; + } +} + +#ifndef CYBOZU_DONT_USE_STRING +template +void loadWord(std::string& s, InputStream& is) +{ + s.clear(); + char c; + if (!skipSpace(&c, is)) return; + s = c; + for (;;) { + if (!cybozu::readChar(&c, is)) return; + if (isSpace(c)) break; + s += c; + } +} +#endif + +template +size_t loadWord(char *buf, size_t bufSize, InputStream& is) +{ + if (bufSize == 0) return 0; + char c; + if (!skipSpace(&c, is)) return 0; + size_t pos = 0; + buf[pos++] = c; + for (;;) { + if (!cybozu::readChar(&c, is)) break; + if (isSpace(c)) break; + if (pos == bufSize) return 0; + buf[pos++] = c; + } + return pos; +} + + +/* + q = x[] / x + @retval r = x[] % x + @note accept q == x +*/ +inline uint32_t divU32(uint32_t *q, const uint32_t *x, size_t xn, uint32_t y) +{ + if (xn == 0) return 0; + uint32_t r = 0; + for (int i = (int)xn - 1; i >= 0; i--) { + uint64_t t = (uint64_t(r) << 32) | x[i]; + q[i] = uint32_t(t / y); + r = uint32_t(t % y); + } + return r; +} + +/* + z[0, xn) = x[0, xn) * y + return z[xn] + @note accept z == x +*/ +inline uint32_t mulU32(uint32_t *z, const uint32_t *x, size_t xn, uint32_t y) +{ + uint32_t H = 0; + for (size_t i = 0; i < xn; i++) { + uint32_t t = H; + uint64_t v = uint64_t(x[i]) * y; + uint32_t L = uint32_t(v); + H = uint32_t(v >> 32); + z[i] = t + L; + if (z[i] < t) { + H++; + } + } + return H; +} + +/* + x[0, xn) += y + return 1 if overflow else 0 +*/ +inline uint32_t addU32(uint32_t *x, size_t xn, uint32_t y) +{ + uint32_t t = x[0] + y; + x[0] = t; + if (t >= y) return 0; + for (size_t i = 1; i < xn; i++) { + t = x[i] + 1; + x[i] = t; + if (t != 0) return 0; + } + return 1; +} + +inline uint32_t decToU32(const char *p, size_t size, bool *pb) +{ + assert(0 < size && size <= 9); + uint32_t x = 0; + for (size_t i = 0; i < size; i++) { + char c = p[i]; + if (c < '0' || c > '9') { + *pb = false; + return 0; + } + x = x * 10 + uint32_t(c - '0'); + } + *pb = true; + return x; +} + +inline bool hexCharToUint8(uint8_t *v, char _c) +{ + uint32_t c = uint8_t(_c); // cast is necessary + if (c - '0' <= '9' - '0') { + c = c - '0'; + } else if (c - 'a' <= 'f' - 'a') { + c = (c - 'a') + 10; + } else if (c - 'A' <= 'F' - 'A') { + c = (c - 'A') + 10; + } else { + return false; + } + *v = uint8_t(c); + return true; +} + +template +bool hexToUint(UT *px, const char *p, size_t size) +{ + assert(0 < size && size <= sizeof(UT) * 2); + UT x = 0; + for (size_t i = 0; i < size; i++) { + uint8_t v; + if (!hexCharToUint8(&v, p[i])) return false; + x = x * 16 + v; + } + *px = x; + return true; +} + +template +bool binToUint(UT *px, const char *p, size_t size) +{ + assert(0 < size && size <= sizeof(UT) * 8); + UT x = 0; + for (size_t i = 0; i < size; i++) { + UT c = static_cast(p[i]); + if (c == '0') { + x = x * 2; + } else if (c == '1') { + x = x * 2 + 1; + } else { + return false; + } + } + *px = x; + return true; +} + +inline bool parsePrefix(size_t *readSize, bool *isMinus, int *base, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return false; + size_t pos = 0; + if (*buf == '-') { + if (bufSize == 1) return false; + *isMinus = true; + buf++; + pos++; + } else { + *isMinus = false; + } + if (buf[0] == '0') { + if (bufSize > 1 && buf[1] == 'x') { + if (*base == 0 || *base == 16) { + *base = 16; + pos += 2; + } else { + return false; + } + } else if (bufSize > 1 && buf[1] == 'b') { + if (*base == 0 || *base == 2) { + *base = 2; + pos += 2; + } else { + return false; + } + } + } + if (*base == 0) *base = 10; + if (pos == bufSize) return false; + *readSize = pos; + return true; +} + +} // mcl::fp::local + +/* + convert little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) + start "0x" if withPrefix +*/ +template +size_t arrayToHex(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix = false) +{ + size_t fullN = 0; + if (n > 1) { + size_t pos = n - 1; + while (pos > 0) { + if (x[pos]) break; + pos--; + } + if (pos > 0) fullN = pos; + } + const T v = n == 0 ? 0 : x[fullN]; + const size_t topLen = cybozu::getHexLength(v); + const size_t startPos = withPrefix ? 2 : 0; + const size_t lenT = sizeof(T) * 2; + const size_t totalSize = startPos + fullN * lenT + topLen; + if (totalSize > bufSize) return 0; + char *const top = buf + bufSize - totalSize; + if (withPrefix) { + top[0] = '0'; + top[1] = 'x'; + } + cybozu::itohex(&top[startPos], topLen, v, false); + for (size_t i = 0; i < fullN; i++) { + cybozu::itohex(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i], false); + } + return totalSize; +} + +/* + convert little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) + start "0b" if withPrefix +*/ +template +size_t arrayToBin(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix) +{ + size_t fullN = 0; + if (n > 1) { + size_t pos = n - 1; + while (pos > 0) { + if (x[pos]) break; + pos--; + } + if (pos > 0) fullN = pos; + } + const T v = n == 0 ? 0 : x[fullN]; + const size_t topLen = cybozu::getBinLength(v); + const size_t startPos = withPrefix ? 2 : 0; + const size_t lenT = sizeof(T) * 8; + const size_t totalSize = startPos + fullN * lenT + topLen; + if (totalSize > bufSize) return 0; + char *const top = buf + bufSize - totalSize; + if (withPrefix) { + top[0] = '0'; + top[1] = 'b'; + } + cybozu::itobin(&top[startPos], topLen, v); + for (size_t i = 0; i < fullN; i++) { + cybozu::itobin(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i]); + } + return totalSize; +} + +/* + convert hex string to x[0..xn) + hex string = [0-9a-fA-F]+ +*/ +template +inline size_t hexToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return 0; + const size_t unitLen = sizeof(UT) * 2; + const size_t q = bufSize / unitLen; + const size_t r = bufSize % unitLen; + const size_t requireSize = q + (r ? 1 : 0); + if (maxN < requireSize) return 0; + for (size_t i = 0; i < q; i++) { + if (!local::hexToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; + } + if (r) { + if (!local::hexToUint(&x[q], buf, r)) return 0; + } + return requireSize; +} +/* + convert bin string to x[0..xn) + bin string = [01]+ +*/ +template +inline size_t binToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return 0; + const size_t unitLen = sizeof(UT) * 8; + const size_t q = bufSize / unitLen; + const size_t r = bufSize % unitLen; + const size_t requireSize = q + (r ? 1 : 0); + if (maxN < requireSize) return 0; + for (size_t i = 0; i < q; i++) { + if (!local::binToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; + } + if (r) { + if (!local::binToUint(&x[q], buf, r)) return 0; + } + return requireSize; +} + +/* + little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) +*/ +template +inline size_t arrayToDec(char *buf, size_t bufSize, const UT *x, size_t xn) +{ + const size_t maxN = 64; + uint32_t t[maxN]; + if (sizeof(UT) == 8) { + xn *= 2; + } + if (xn > maxN) return 0; + memcpy(t, x, xn * sizeof(t[0])); + + const size_t width = 9; + const uint32_t i1e9 = 1000000000U; + size_t pos = 0; + for (;;) { + uint32_t r = local::divU32(t, t, xn, i1e9); + while (xn > 0 && t[xn - 1] == 0) xn--; + size_t len = cybozu::itoa_local::uintToDec(buf, bufSize - pos, r); + if (len == 0) return 0; + assert(0 < len && len <= width); + if (xn == 0) return pos + len; + // fill (width - len) '0' + for (size_t j = 0; j < width - len; j++) { + buf[bufSize - pos - width + j] = '0'; + } + pos += width; + } +} + +/* + convert buf[0, bufSize) to x[0, num) + return written num if success else 0 +*/ +template +inline size_t decToArray(UT *_x, size_t maxN, const char *buf, size_t bufSize) +{ + assert(sizeof(UT) == 4 || sizeof(UT) == 8); + const size_t width = 9; + const uint32_t i1e9 = 1000000000U; + if (maxN == 0) return 0; + if (sizeof(UT) == 8) { + maxN *= 2; + } + uint32_t *x = reinterpret_cast(_x); + size_t xn = 1; + x[0] = 0; + while (bufSize > 0) { + size_t n = bufSize % width; + if (n == 0) n = width; + bool b; + uint32_t v = local::decToU32(buf, n, &b); + if (!b) return 0; + uint32_t H = local::mulU32(x, x, xn, i1e9); + if (H > 0) { + if (xn == maxN) return 0; + x[xn++] = H; + } + H = local::addU32(x, xn, v); + if (H > 0) { + if (xn == maxN) return 0; + x[xn++] = H; + } + buf += n; + bufSize -= n; + } + if (sizeof(UT) == 8 && (xn & 1)) { + x[xn++] = 0; + } + return xn / (sizeof(UT) / 4); +} + +/* + return retavl is written size if success else 0 + REMARK : the top of string is buf + bufSize - retval +*/ +template +size_t arrayToStr(char *buf, size_t bufSize, const UT *x, size_t n, int base, bool withPrefix) +{ + switch (base) { + case 0: + case 10: + return arrayToDec(buf, bufSize, x, n); + case 16: + return arrayToHex(buf, bufSize, x, n, withPrefix); + case 2: + return arrayToBin(buf, bufSize, x, n, withPrefix); + default: + return 0; + } +} + +template +size_t strToArray(bool *pIsMinus, UT *x, size_t xN, const char *buf, size_t bufSize, int ioMode) +{ + ioMode &= 31; + size_t readSize; + if (!local::parsePrefix(&readSize, pIsMinus, &ioMode, buf, bufSize)) return 0; + switch (ioMode) { + case 10: + return decToArray(x, xN, buf + readSize, bufSize - readSize); + case 16: + return hexToArray(x, xN, buf + readSize, bufSize - readSize); + case 2: + return binToArray(x, xN, buf + readSize, bufSize - readSize); + default: + return 0; + } +} + +/* + convert src[0, n) to (n * 2) byte hex string and write it to os + return true if success else flase +*/ +template +void writeHexStr(bool *pb, OutputStream& os, const void *src, size_t n) +{ + const uint8_t *p = (const uint8_t *)src; + for (size_t i = 0; i < n; i++) { + char hex[2]; + cybozu::itohex(hex, sizeof(hex), p[i], false); + cybozu::write(pb, os, hex, sizeof(hex)); + if (!*pb) return; + } + *pb = true; +} +/* + read hex string from is and convert it to byte array + return written buffer size +*/ +template +inline size_t readHexStr(void *buf, size_t n, InputStream& is) +{ + bool b; + uint8_t *dst = (uint8_t *)buf; + for (size_t i = 0; i < n; i++) { + uint8_t L, H; + char c[2]; + if (cybozu::readSome(c, sizeof(c), is) != sizeof(c)) return i; + b = local::hexCharToUint8(&H, c[0]); + if (!b) return i; + b = local::hexCharToUint8(&L, c[1]); + if (!b) return i; + dst[i] = (H << 4) | L; + } + return n; +} + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/curve_type.h b/vendor/github.com/byzantine-lab/mcl/include/mcl/curve_type.h new file mode 100644 index 000000000..9e4a941a0 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/curve_type.h @@ -0,0 +1,35 @@ +#pragma once +/** + @file + @brief curve type + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ + +enum { + MCL_BN254 = 0, + MCL_BN381_1 = 1, + MCL_BN381_2 = 2, + MCL_BN462 = 3, + MCL_BN_SNARK1 = 4, + MCL_BLS12_381 = 5, + MCL_BN160 = 6, + + /* + for only G1 + the size of curve must be less or equal to MCLBN_FP_UNIT_SIZE + */ + MCL_EC_BEGIN = 100, + MCL_SECP192K1 = MCL_EC_BEGIN, + MCL_SECP224K1 = 101, + MCL_SECP256K1 = 102, + MCL_SECP384R1 = 103, + MCL_SECP521R1 = 104, + MCL_NIST_P192 = 105, + MCL_NIST_P224 = 106, + MCL_NIST_P256 = 107, + MCL_EC_END = MCL_NIST_P256 + 1, + MCL_NIST_P384 = MCL_SECP384R1, + MCL_NIST_P521 = MCL_SECP521R1 +}; diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/ec.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/ec.hpp new file mode 100644 index 000000000..b8eb10be3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/ec.hpp @@ -0,0 +1,1045 @@ +#pragma once +/** + @file + @brief elliptic curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include + +//#define MCL_EC_USE_AFFINE + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4458) +#endif + +namespace mcl { + +namespace ec { + +enum Mode { + Jacobi = 0, + Proj = 1 +}; + +} // mcl::ec + +/* + elliptic curve + y^2 = x^3 + ax + b (affine) + y^2 = x^3 + az^4 + bz^6 (Jacobi) x = X/Z^2, y = Y/Z^3 +*/ +template +class EcT : public fp::Serializable > { + enum { + zero, + minus3, + generic + }; +public: + typedef _Fp Fp; + typedef _Fp BaseFp; +#ifdef MCL_EC_USE_AFFINE + Fp x, y; + bool inf_; +#else + Fp x, y, z; + static int mode_; +#endif + static Fp a_; + static Fp b_; + static int specialA_; + static int ioMode_; + /* + order_ is the order of G2 which is the subgroup of EcT. + check the order of the elements if verifyOrder_ is true + */ + static bool verifyOrder_; + static mpz_class order_; + static void (*mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); + /* default constructor is undefined value */ + EcT() {} + EcT(const Fp& _x, const Fp& _y) + { + set(_x, _y); + } + bool isNormalized() const + { +#ifdef MCL_EC_USE_AFFINE + return true; +#else + return isZero() || z.isOne(); +#endif + } +#ifndef MCL_EC_USE_AFFINE +private: + void normalizeJacobi() + { + assert(!z.isZero()); + Fp rz2; + Fp::inv(z, z); + Fp::sqr(rz2, z); + x *= rz2; + y *= rz2; + y *= z; + z = 1; + } + void normalizeProj() + { + assert(!z.isZero()); + Fp::inv(z, z); + x *= z; + y *= z; + z = 1; + } + // Y^2 == X(X^2 + aZ^4) + bZ^6 + bool isValidJacobi() const + { + Fp y2, x2, z2, z4, t; + Fp::sqr(x2, x); + Fp::sqr(y2, y); + Fp::sqr(z2, z); + Fp::sqr(z4, z2); + Fp::mul(t, z4, a_); + t += x2; + t *= x; + z4 *= z2; + z4 *= b_; + t += z4; + return y2 == t; + } + // (Y^2 - bZ^2)Z = X(X^2 + aZ^2) + bool isValidProj() const + { + Fp y2, x2, z2, t; + Fp::sqr(x2, x); + Fp::sqr(y2, y); + Fp::sqr(z2, z); + Fp::mul(t, a_, z2); + t += x2; + t *= x; + z2 *= b_; + y2 -= z2; + y2 *= z; + return y2 == t; + } +#endif + // y^2 == (x^2 + a)x + b + static inline bool isValid(const Fp& _x, const Fp& _y) + { + Fp y2, t; + Fp::sqr(y2, _y); + Fp::sqr(t, _x); + t += a_; + t *= _x; + t += b_; + return y2 == t; + } +public: + void normalize() + { +#ifndef MCL_EC_USE_AFFINE + if (isNormalized()) return; + switch (mode_) { + case ec::Jacobi: + normalizeJacobi(); + break; + case ec::Proj: + normalizeProj(); + break; + } +#endif + } + static void normalize(EcT& y, const EcT& x) + { + y = x; + y.normalize(); + } + static inline void init(const Fp& a, const Fp& b, int mode = ec::Jacobi) + { + a_ = a; + b_ = b; + if (a_.isZero()) { + specialA_ = zero; + } else if (a_ == -3) { + specialA_ = minus3; + } else { + specialA_ = generic; + } + ioMode_ = 0; + verifyOrder_ = false; + order_ = 0; + mulArrayGLV = 0; +#ifdef MCL_EC_USE_AFFINE + cybozu::disable_warning_unused_variable(mode); +#else + assert(mode == ec::Jacobi || mode == ec::Proj); + mode_ = mode; +#endif + } + /* + verify the order of *this is equal to order if order != 0 + in constructor, set, setStr, operator<<(). + */ + static void setOrder(const mpz_class& order) + { + if (order != 0) { + verifyOrder_ = true; + order_ = order; + } else { + verifyOrder_ = false; + // don't clear order_ because it is used for isValidOrder() + } + } + static void setMulArrayGLV(void f(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime)) + { + mulArrayGLV = f; + } + static inline void init(bool *pb, const char *astr, const char *bstr, int mode = ec::Jacobi) + { + Fp a, b; + a.setStr(pb, astr); + if (!*pb) return; + b.setStr(pb, bstr); + if (!*pb) return; + init(a, b, mode); + } + // verify the order + bool isValidOrder() const + { + EcT Q; + EcT::mulGeneric(Q, *this, order_); + return Q.isZero(); + } + bool isValid() const + { + if (isZero()) return true; + bool isOK = false; +#ifndef MCL_EC_USE_AFFINE + if (!z.isOne()) { + switch (mode_) { + case ec::Jacobi: + isOK = isValidJacobi(); + break; + case ec::Proj: + isOK = isValidProj(); + break; + } + } else +#endif + { + isOK = isValid(x, y); + } + if (!isOK) return false; + if (verifyOrder_) return isValidOrder(); + return true; + } + void set(bool *pb, const Fp& _x, const Fp& _y, bool verify = true) + { + if (verify && !isValid(_x, _y)) { + *pb = false; + return; + } + x = _x; y = _y; +#ifdef MCL_EC_USE_AFFINE + inf_ = false; +#else + z = 1; +#endif + if (verify && verifyOrder_ && !isValidOrder()) { + *pb = false; + } else { + *pb = true; + } + } + void clear() + { +#ifdef MCL_EC_USE_AFFINE + inf_ = true; +#else + z.clear(); +#endif + x.clear(); + y.clear(); + } +#ifndef MCL_EC_USE_AFFINE + static inline void dblNoVerifyInfJacobi(EcT& R, const EcT& P) + { + Fp S, M, t, y2; + Fp::sqr(y2, P.y); + Fp::mul(S, P.x, y2); + const bool isPzOne = P.z.isOne(); + S += S; + S += S; + Fp::sqr(M, P.x); + switch (specialA_) { + case zero: + Fp::add(t, M, M); + M += t; + break; + case minus3: + if (isPzOne) { + M -= P.z; + } else { + Fp::sqr(t, P.z); + Fp::sqr(t, t); + M -= t; + } + Fp::add(t, M, M); + M += t; + break; + case generic: + default: + if (isPzOne) { + t = a_; + } else { + Fp::sqr(t, P.z); + Fp::sqr(t, t); + t *= a_; + } + t += M; + M += M; + M += t; + break; + } + Fp::sqr(R.x, M); + R.x -= S; + R.x -= S; + if (isPzOne) { + R.z = P.y; + } else { + Fp::mul(R.z, P.y, P.z); + } + R.z += R.z; + Fp::sqr(y2, y2); + y2 += y2; + y2 += y2; + y2 += y2; + Fp::sub(R.y, S, R.x); + R.y *= M; + R.y -= y2; + } + static inline void dblNoVerifyInfProj(EcT& R, const EcT& P) + { + const bool isPzOne = P.z.isOne(); + Fp w, t, h; + switch (specialA_) { + case zero: + Fp::sqr(w, P.x); + Fp::add(t, w, w); + w += t; + break; + case minus3: + Fp::sqr(w, P.x); + if (isPzOne) { + w -= P.z; + } else { + Fp::sqr(t, P.z); + w -= t; + } + Fp::add(t, w, w); + w += t; + break; + case generic: + default: + if (isPzOne) { + w = a_; + } else { + Fp::sqr(w, P.z); + w *= a_; + } + Fp::sqr(t, P.x); + w += t; + w += t; + w += t; // w = a z^2 + 3x^2 + break; + } + if (isPzOne) { + R.z = P.y; + } else { + Fp::mul(R.z, P.y, P.z); // s = yz + } + Fp::mul(t, R.z, P.x); + t *= P.y; // xys + t += t; + t += t; // 4(xys) ; 4B + Fp::sqr(h, w); + h -= t; + h -= t; // w^2 - 8B + Fp::mul(R.x, h, R.z); + t -= h; // h is free + t *= w; + Fp::sqr(w, P.y); + R.x += R.x; + R.z += R.z; + Fp::sqr(h, R.z); + w *= h; + R.z *= h; + Fp::sub(R.y, t, w); + R.y -= w; + } +#endif + static inline void dblNoVerifyInf(EcT& R, const EcT& P) + { +#ifdef MCL_EC_USE_AFFINE + Fp t, s; + Fp::sqr(t, P.x); + Fp::add(s, t, t); + t += s; + t += a_; + Fp::add(s, P.y, P.y); + t /= s; + Fp::sqr(s, t); + s -= P.x; + Fp x3; + Fp::sub(x3, s, P.x); + Fp::sub(s, P.x, x3); + s *= t; + Fp::sub(R.y, s, P.y); + R.x = x3; + R.inf_ = false; +#else + switch (mode_) { + case ec::Jacobi: + dblNoVerifyInfJacobi(R, P); + break; + case ec::Proj: + dblNoVerifyInfProj(R, P); + break; + } +#endif + } + static inline void dbl(EcT& R, const EcT& P) + { + if (P.isZero()) { + R.clear(); + return; + } + dblNoVerifyInf(R, P); + } +#ifndef MCL_EC_USE_AFFINE + static inline void addJacobi(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne) + { + Fp r, U1, S1, H, H3; + if (isPzOne) { + // r = 1; + } else { + Fp::sqr(r, P.z); + } + if (isQzOne) { + U1 = P.x; + if (isPzOne) { + H = Q.x; + } else { + Fp::mul(H, Q.x, r); + } + H -= U1; + S1 = P.y; + } else { + Fp::sqr(S1, Q.z); + Fp::mul(U1, P.x, S1); + if (isPzOne) { + H = Q.x; + } else { + Fp::mul(H, Q.x, r); + } + H -= U1; + S1 *= Q.z; + S1 *= P.y; + } + if (isPzOne) { + r = Q.y; + } else { + r *= P.z; + r *= Q.y; + } + r -= S1; + if (H.isZero()) { + if (r.isZero()) { + dblNoVerifyInf(R, P); + } else { + R.clear(); + } + return; + } + if (isPzOne) { + R.z = H; + } else { + Fp::mul(R.z, P.z, H); + } + if (!isQzOne) { + R.z *= Q.z; + } + Fp::sqr(H3, H); // H^2 + Fp::sqr(R.y, r); // r^2 + U1 *= H3; // U1 H^2 + H3 *= H; // H^3 + R.y -= U1; + R.y -= U1; + Fp::sub(R.x, R.y, H3); + U1 -= R.x; + U1 *= r; + H3 *= S1; + Fp::sub(R.y, U1, H3); + } + static inline void addProj(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne) + { + Fp r, PyQz, v, A, vv; + if (isQzOne) { + r = P.x; + PyQz = P.y; + } else { + Fp::mul(r, P.x, Q.z); + Fp::mul(PyQz, P.y, Q.z); + } + if (isPzOne) { + A = Q.y; + v = Q.x; + } else { + Fp::mul(A, Q.y, P.z); + Fp::mul(v, Q.x, P.z); + } + v -= r; + if (v.isZero()) { + if (A == PyQz) { + dblNoVerifyInf(R, P); + } else { + R.clear(); + } + return; + } + Fp::sub(R.y, A, PyQz); + Fp::sqr(A, R.y); + Fp::sqr(vv, v); + r *= vv; + vv *= v; + if (isQzOne) { + R.z = P.z; + } else { + if (isPzOne) { + R.z = Q.z; + } else { + Fp::mul(R.z, P.z, Q.z); + } + } + // R.z = 1 if isPzOne && isQzOne + if (isPzOne && isQzOne) { + R.z = vv; + } else { + A *= R.z; + R.z *= vv; + } + A -= vv; + vv *= PyQz; + A -= r; + A -= r; + Fp::mul(R.x, v, A); + r -= A; + R.y *= r; + R.y -= vv; + } +#endif + static inline void add(EcT& R, const EcT& P, const EcT& Q) { + if (P.isZero()) { R = Q; return; } + if (Q.isZero()) { R = P; return; } + if (&P == &Q) { + dblNoVerifyInf(R, P); + return; + } +#ifdef MCL_EC_USE_AFFINE + Fp t; + Fp::neg(t, Q.y); + if (P.y == t) { R.clear(); return; } + Fp::sub(t, Q.x, P.x); + if (t.isZero()) { + dblNoVerifyInf(R, P); + return; + } + Fp s; + Fp::sub(s, Q.y, P.y); + Fp::div(t, s, t); + R.inf_ = false; + Fp x3; + Fp::sqr(x3, t); + x3 -= P.x; + x3 -= Q.x; + Fp::sub(s, P.x, x3); + s *= t; + Fp::sub(R.y, s, P.y); + R.x = x3; +#else + bool isPzOne = P.z.isOne(); + bool isQzOne = Q.z.isOne(); + switch (mode_) { + case ec::Jacobi: + addJacobi(R, P, Q, isPzOne, isQzOne); + break; + case ec::Proj: + addProj(R, P, Q, isPzOne, isQzOne); + break; + } +#endif + } + static inline void sub(EcT& R, const EcT& P, const EcT& Q) + { + EcT nQ; + neg(nQ, Q); + add(R, P, nQ); + } + static inline void neg(EcT& R, const EcT& P) + { + if (P.isZero()) { + R.clear(); + return; + } + R.x = P.x; + Fp::neg(R.y, P.y); +#ifdef MCL_EC_USE_AFFINE + R.inf_ = false; +#else + R.z = P.z; +#endif + } + templateclass FpT> + static inline void mul(EcT& z, const EcT& x, const FpT& y) + { + fp::Block b; + y.getBlock(b); + mulArray(z, x, b.p, b.n, false); + } + static inline void mul(EcT& z, const EcT& x, int64_t y) + { + const uint64_t u = fp::abs_(y); +#if MCL_SIZEOF_UNIT == 8 + mulArray(z, x, &u, 1, y < 0); +#else + uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; + size_t un = ua[1] ? 2 : 1; + mulArray(z, x, ua, un, y < 0); +#endif + } + static inline void mul(EcT& z, const EcT& x, const mpz_class& y) + { + mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); + } + templateclass FpT> + static inline void mulCT(EcT& z, const EcT& x, const FpT& y) + { + fp::Block b; + y.getBlock(b); + mulArray(z, x, b.p, b.n, false, true); + } + static inline void mulCT(EcT& z, const EcT& x, const mpz_class& y) + { + mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); + } + /* + 0 <= P for any P + (Px, Py) <= (P'x, P'y) iff Px < P'x or Px == P'x and Py <= P'y + @note compare function calls normalize() + */ + template + static inline int compareFunc(const EcT& P_, const EcT& Q_, F comp) + { + const bool QisZero = Q_.isZero(); + if (P_.isZero()) { + if (QisZero) return 0; + return -1; + } + if (QisZero) return 1; + EcT P(P_), Q(Q_); + P.normalize(); + Q.normalize(); + int c = comp(P.x, Q.x); + if (c > 0) return 1; + if (c < 0) return -1; + return comp(P.y, Q.y); + } + static inline int compare(const EcT& P, const EcT& Q) + { + return compareFunc(P, Q, Fp::compare); + } + static inline int compareRaw(const EcT& P, const EcT& Q) + { + return compareFunc(P, Q, Fp::compareRaw); + } + bool isZero() const + { +#ifdef MCL_EC_USE_AFFINE + return inf_; +#else + return z.isZero(); +#endif + } + static inline bool isMSBserialize() + { + return !b_.isZero() && (Fp::BaseFp::getBitSize() & 7) != 0; + } + template + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + if (ioMode & IoEcProj) { + cybozu::writeChar(pb, os, '4'); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + x.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + y.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } +#ifndef MCL_EC_USE_AFFINE + z.save(pb, os, ioMode); +#endif + return; + } + EcT P(*this); + P.normalize(); + if (ioMode & (IoSerialize | IoSerializeHexStr)) { + /* + if (isMSBserialize()) { + // n bytes + x | (y.isOdd ? 0x80 : 0) + } else { + // n + 1 bytes + (y.isOdd ? 3 : 2), x + } + */ + const size_t n = Fp::getByteSize(); + const size_t adj = isMSBserialize() ? 0 : 1; + char buf[sizeof(Fp) + 1]; + if (isZero()) { + memset(buf, 0, n + adj); + } else { + cybozu::MemoryOutputStream mos(buf + adj, n); + P.x.save(pb, mos, IoSerialize); if (!*pb) return; + if (adj) { + buf[0] = P.y.isOdd() ? 3 : 2; + } else { + if (P.y.isOdd()) { + buf[n - 1] |= 0x80; + } + } + } + if (ioMode & IoSerializeHexStr) { + mcl::fp::writeHexStr(pb, os, buf, n + adj); + } else { + cybozu::write(pb, os, buf, n + adj); + } + return; + } + if (isZero()) { + cybozu::writeChar(pb, os, '0'); + return; + } + if (ioMode & IoEcCompY) { + cybozu::writeChar(pb, os, P.y.isOdd() ? '3' : '2'); + if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.x.save(pb, os, ioMode); + } else { + cybozu::writeChar(pb, os, '1'); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.x.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.y.save(pb, os, ioMode); + } + } + template + void load(bool *pb, InputStream& is, int ioMode) + { +#ifdef MCL_EC_USE_AFFINE + inf_ = false; +#else + z = 1; +#endif + if (ioMode & (IoSerialize | IoSerializeHexStr)) { + const size_t n = Fp::getByteSize(); + const size_t adj = isMSBserialize() ? 0 : 1; + const size_t n1 = n + adj; + char buf[sizeof(Fp) + 1]; + size_t readSize; + if (ioMode & IoSerializeHexStr) { + readSize = mcl::fp::readHexStr(buf, n1, is); + } else { + readSize = cybozu::readSome(buf, n1, is); + } + if (readSize != n1) { + *pb = false; + return; + } + if (fp::isZeroArray(buf, n1)) { + clear(); + *pb = true; + return; + } + bool isYodd; + if (adj) { + char c = buf[0]; + if (c != 2 && c != 3) { + *pb = false; + return; + } + isYodd = c == 3; + } else { + isYodd = (buf[n - 1] >> 7) != 0; + buf[n - 1] &= 0x7f; + } + x.setArray(pb, buf + adj, n); + if (!*pb) return; + *pb = getYfromX(y, x, isYodd); + if (!*pb) return; + } else { + char c = 0; + if (!fp::local::skipSpace(&c, is)) { + *pb = false; + return; + } + if (c == '0') { + clear(); + *pb = true; + return; + } + x.load(pb, is, ioMode); if (!*pb) return; + if (c == '1') { + y.load(pb, is, ioMode); if (!*pb) return; + if (!isValid(x, y)) { + *pb = false; + return; + } + } else if (c == '2' || c == '3') { + bool isYodd = c == '3'; + *pb = getYfromX(y, x, isYodd); + if (!*pb) return; + } else if (c == '4') { + y.load(pb, is, ioMode); if (!*pb) return; +#ifndef MCL_EC_USE_AFFINE + z.load(pb, is, ioMode); if (!*pb) return; +#endif + } else { + *pb = false; + return; + } + } + if (verifyOrder_ && !isValidOrder()) { + *pb = false; + } else { + *pb = true; + } + } + // deplicated + static void setCompressedExpression(bool compressedExpression = true) + { + if (compressedExpression) { + ioMode_ |= IoEcCompY; + } else { + ioMode_ &= ~IoEcCompY; + } + } + /* + set IoMode for operator<<(), or operator>>() + */ + static void setIoMode(int ioMode) + { + assert(!(ioMode & 0xff)); + ioMode_ = ioMode; + } + static inline int getIoMode() { return Fp::BaseFp::getIoMode() | ioMode_; } + static inline void getWeierstrass(Fp& yy, const Fp& x) + { + Fp t; + Fp::sqr(t, x); + t += a_; + t *= x; + Fp::add(yy, t, b_); + } + static inline bool getYfromX(Fp& y, const Fp& x, bool isYodd) + { + getWeierstrass(y, x); + if (!Fp::squareRoot(y, y)) { + return false; + } + if (y.isOdd() ^ isYodd) { + Fp::neg(y, y); + } + return true; + } + inline friend EcT operator+(const EcT& x, const EcT& y) { EcT z; add(z, x, y); return z; } + inline friend EcT operator-(const EcT& x, const EcT& y) { EcT z; sub(z, x, y); return z; } + template + inline friend EcT operator*(const EcT& x, const INT& y) { EcT z; mul(z, x, y); return z; } + EcT& operator+=(const EcT& x) { add(*this, *this, x); return *this; } + EcT& operator-=(const EcT& x) { sub(*this, *this, x); return *this; } + template + EcT& operator*=(const INT& x) { mul(*this, *this, x); return *this; } + EcT operator-() const { EcT x; neg(x, *this); return x; } + bool operator==(const EcT& rhs) const + { + EcT R; + sub(R, *this, rhs); // QQQ : optimized later + return R.isZero(); + } + bool operator!=(const EcT& rhs) const { return !operator==(rhs); } + bool operator<(const EcT& rhs) const + { + return compare(*this, rhs) < 0; + } + bool operator>=(const EcT& rhs) const { return !operator<(rhs); } + bool operator>(const EcT& rhs) const { return rhs < *this; } + bool operator<=(const EcT& rhs) const { return !operator>(rhs); } + static inline void mulArray(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime = false) + { + if (!constTime && x.isZero()) { + z.clear(); + return; + } + if (mulArrayGLV && (constTime || yn > 1)) { + mulArrayGLV(z, x, y, yn, isNegative, constTime); + return; + } + mulArrayBase(z, x, y, yn, isNegative, constTime); + } + static inline void mulArrayBase(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime) + { + EcT tmp; + const EcT *px = &x; + if (&z == &x) { + tmp = x; + px = &tmp; + } + z.clear(); + fp::powGeneric(z, *px, y, yn, EcT::add, EcT::dbl, EcT::normalize, constTime ? Fp::BaseFp::getBitSize() : 0); + if (isNegative) { + neg(z, z); + } + } + /* + generic mul + */ + static inline void mulGeneric(EcT& z, const EcT& x, const mpz_class& y, bool constTime = false) + { + mulArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, constTime); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + static inline void init(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) + { + bool b; + init(&b, astr.c_str(), bstr.c_str(), mode); + if (!b) throw cybozu::Exception("mcl:EcT:init"); + } + void set(const Fp& _x, const Fp& _y, bool verify = true) + { + bool b; + set(&b, _x, _y, verify); + if (!b) throw cybozu::Exception("ec:EcT:set") << _x << _y; + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("EcT:save"); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("EcT:load"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + // backward compatilibity + static inline void setParam(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) + { + init(astr, bstr, mode); + } + friend inline std::istream& operator>>(std::istream& is, EcT& self) + { + self.load(is, fp::detectIoMode(getIoMode(), is)); + return is; + } + friend inline std::ostream& operator<<(std::ostream& os, const EcT& self) + { + self.save(os, fp::detectIoMode(getIoMode(), os)); + return os; + } +#endif +}; + +template Fp EcT::a_; +template Fp EcT::b_; +template int EcT::specialA_; +template int EcT::ioMode_; +template bool EcT::verifyOrder_; +template mpz_class EcT::order_; +template void (*EcT::mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); +#ifndef MCL_EC_USE_AFFINE +template int EcT::mode_; +#endif + +struct EcParam { + const char *name; + const char *p; + const char *a; + const char *b; + const char *gx; + const char *gy; + const char *n; + size_t bitSize; // bit length of p + int curveType; +}; + +} // mcl + +#ifdef CYBOZU_USE_BOOST +namespace mcl { +template +size_t hash_value(const mcl::EcT& P_) +{ + if (P_.isZero()) return 0; + mcl::EcT P(P_); P.normalize(); + return mcl::hash_value(P.y, mcl::hash_value(P.x)); +} + +} +#else +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +template +struct hash > { + size_t operator()(const mcl::EcT& P_) const + { + if (P_.isZero()) return 0; + mcl::EcT P(P_); P.normalize(); + return hash()(P.y, hash()(P.x)); + } +}; + +CYBOZU_NAMESPACE_TR1_END } // std +#endif + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.h b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.h new file mode 100644 index 000000000..daeb6be53 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.h @@ -0,0 +1,105 @@ +#pragma once +/** + @file + @brief C interface of ECDSA + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include // for uint64_t, uint8_t +#include // for size_t + +#if defined(_MSC_VER) + #ifdef ECDSA_DLL_EXPORT + #define ECDSA_DLL_API __declspec(dllexport) + #else + #define ECDSA_DLL_API __declspec(dllimport) + #ifndef ECDSA_NO_AUTOLINK + #pragma comment(lib, "mclecdsa.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) + #define ECDSA_DLL_API __attribute__((used)) +#else + #define ECDSA_DLL_API +#endif + +#ifndef mclSize + #ifdef __EMSCRIPTEN__ + // avoid 64-bit integer + #define mclSize unsigned int + #define mclInt int + #else + // use #define for cgo + #define mclSize size_t + #define mclInt int64_t + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ECDSA_NOT_DEFINE_STRUCT + +typedef struct ecdsaSecretKey ecdsaSecretKey; +typedef struct ecdsaPublicKey ecdsaPublicKey; +typedef struct ecdsaSignature ecdsaSignature; + +#else + +typedef struct { + uint64_t d[4]; +} ecdsaSecretKey; + +typedef struct { + uint64_t d[4 * 3]; +} ecdsaPublicKey; + +typedef struct { + uint64_t d[4 * 2]; +} ecdsaSignature; + +#endif + +struct ecdsaPrecomputedPublicKey; + +/* + init library + return 0 if success + @note not threadsafe +*/ +ECDSA_DLL_API int ecdsaInit(void); + +// return written byte size if success else 0 +ECDSA_DLL_API mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec); +ECDSA_DLL_API mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub); +ECDSA_DLL_API mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig); + +// return read byte size if sucess else 0 +ECDSA_DLL_API mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize); +ECDSA_DLL_API mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize); +ECDSA_DLL_API mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize); + +// return 0 if success +ECDSA_DLL_API int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec); + +ECDSA_DLL_API void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec); + +ECDSA_DLL_API void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size); + +// return 1 if valid +ECDSA_DLL_API int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size); +ECDSA_DLL_API int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *pub, const void *m, mclSize size); + +// return nonzero if success +ECDSA_DLL_API ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate(); +// call this function to avoid memory leak +ECDSA_DLL_API void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub); +// return 0 if success +ECDSA_DLL_API int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub); + +#ifdef __cplusplus +} +#endif + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.hpp new file mode 100644 index 000000000..cf3ed3f65 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecdsa.hpp @@ -0,0 +1,257 @@ +#pragma once +/** + @file + @brief ECDSA + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include + +namespace mcl { namespace ecdsa { + +namespace local { + +#ifndef MCLSHE_WIN_SIZE + #define MCLSHE_WIN_SIZE 10 +#endif +static const size_t winSize = MCLSHE_WIN_SIZE; + +struct FpTag; +struct ZnTag; + +} // mcl::ecdsa::local + +typedef mcl::FpT Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; + +namespace local { + +struct Param { + mcl::EcParam ecParam; + Ec P; + mcl::fp::WindowMethod Pbase; +}; + +inline Param& getParam() +{ + static Param p; + return p; +} + +inline void be32toZn(Zn& x, const mcl::fp::Unit *buf) +{ + const size_t n = 32; + const unsigned char *p = (const unsigned char*)buf; + unsigned char be[n]; + for (size_t i = 0; i < n; i++) { + be[i] = p[n - 1 - i]; + } + x.setArrayMaskMod(be, n); +} + +/* + y = x mod n +*/ +inline void FpToZn(Zn& y, const Fp& x) +{ + fp::Block b; + x.getBlock(b); + y.setArrayMaskMod(b.p, b.n); +} + +inline void setHashOf(Zn& x, const void *msg, size_t msgSize) +{ + mcl::fp::Unit xBuf[256 / 8 / sizeof(mcl::fp::Unit)]; + uint32_t hashSize = mcl::fp::sha256(xBuf, sizeof(xBuf), msg, (uint32_t)msgSize); + assert(hashSize == sizeof(xBuf)); + (void)hashSize; + be32toZn(x, xBuf); +} + +} // mcl::ecdsa::local + +const local::Param& param = local::getParam(); + +inline void init(bool *pb) +{ + const mcl::EcParam& ecParam = mcl::ecparam::secp256k1; + Zn::init(pb, ecParam.n); + if (!*pb) return; + Fp::init(pb, ecParam.p); + if (!*pb) return; + Ec::init(pb, ecParam.a, ecParam.b); + if (!*pb) return; + Zn::setIoMode(16); + Fp::setIoMode(16); + Ec::setIoMode(mcl::IoEcAffine); + local::Param& p = local::getParam(); + p.ecParam = ecParam; + Fp x, y; + x.setStr(pb, ecParam.gx); + if (!*pb) return; + y.setStr(pb, ecParam.gy); + if (!*pb) return; + p.P.set(pb, x, y); + if (!*pb) return; + p.Pbase.init(pb, p.P, ecParam.bitSize, local::winSize); +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void init() +{ + bool b; + init(&b); + if (!b) throw cybozu::Exception("ecdsa:init"); +} +#endif + +typedef Zn SecretKey; +typedef Ec PublicKey; + +struct PrecomputedPublicKey { + mcl::fp::WindowMethod pubBase_; + void init(bool *pb, const PublicKey& pub) + { + pubBase_.init(pb, pub, param.ecParam.bitSize, local::winSize); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const PublicKey& pub) + { + bool b; + init(&b, pub); + if (!b) throw cybozu::Exception("ecdsa:PrecomputedPublicKey:init"); + } +#endif +}; + +inline void getPublicKey(PublicKey& pub, const SecretKey& sec) +{ + Ec::mul(pub, param.P, sec); + pub.normalize(); +} + +struct Signature : public mcl::fp::Serializable { + Zn r, s; + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + r.load(pb, is, ioMode); if (!*pb) return; + s.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + r.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + s.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("ecdsa:Signature:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("ecdsa:Signature:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Signature& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Signature& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } +#endif +}; + +inline void sign(Signature& sig, const SecretKey& sec, const void *msg, size_t msgSize) +{ + Zn& r = sig.r; + Zn& s = sig.s; + Zn z, k; + local::setHashOf(z, msg, msgSize); + Ec Q; + for (;;) { + k.setByCSPRNG(); + param.Pbase.mul(Q, k); + if (Q.isZero()) continue; + Q.normalize(); + local::FpToZn(r, Q.x); + if (r.isZero()) continue; + Zn::mul(s, r, sec); + s += z; + if (s.isZero()) continue; + s /= k; + return; + } +} + +namespace local { + +inline void mulDispatch(Ec& Q, const PublicKey& pub, const Zn& y) +{ + Ec::mul(Q, pub, y); +} + +inline void mulDispatch(Ec& Q, const PrecomputedPublicKey& ppub, const Zn& y) +{ + ppub.pubBase_.mul(Q, y); +} + +template +inline bool verify(const Signature& sig, const Pub& pub, const void *msg, size_t msgSize) +{ + const Zn& r = sig.r; + const Zn& s = sig.s; + if (r.isZero() || s.isZero()) return false; + Zn z, w, u1, u2; + local::setHashOf(z, msg, msgSize); + Zn::inv(w, s); + Zn::mul(u1, z, w); + Zn::mul(u2, r, w); + Ec Q1, Q2; + param.Pbase.mul(Q1, u1); +// Ec::mul(Q2, pub, u2); + local::mulDispatch(Q2, pub, u2); + Q1 += Q2; + if (Q1.isZero()) return false; + Q1.normalize(); + Zn x; + local::FpToZn(x, Q1.x); + return r == x; +} + +} // mcl::ecdsa::local + +inline bool verify(const Signature& sig, const PublicKey& pub, const void *msg, size_t msgSize) +{ + return local::verify(sig, pub, msg, msgSize); +} + +inline bool verify(const Signature& sig, const PrecomputedPublicKey& ppub, const void *msg, size_t msgSize) +{ + return local::verify(sig, ppub, msg, msgSize); +} + +} } // mcl::ecdsa + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/ecparam.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecparam.hpp new file mode 100644 index 000000000..087bf8b6c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/ecparam.hpp @@ -0,0 +1,191 @@ +#pragma once +/** + @file + @brief Elliptic curve parameter + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include + +namespace mcl { namespace ecparam { + +const struct mcl::EcParam secp160k1 = { + "secp160k1", + "0xfffffffffffffffffffffffffffffffeffffac73", + "0", + "7", + "0x3b4c382ce37aa192a4019e763036f4f5dd4d7ebb", + "0x938cf935318fdced6bc28286531733c3f03c4fee", + "0x100000000000000000001b8fa16dfab9aca16b6b3", + 160, + -1 +}; +// p=2^160 + 7 +const struct mcl::EcParam p160_1 = { + "p160_1", + "0x10000000000000000000000000000000000000007", + "10", + "1343632762150092499701637438970764818528075565078", + "1", + "1236612389951462151661156731535316138439983579284", + "1461501637330902918203683518218126812711137002561", + 161, + -1 +}; +const struct mcl::EcParam secp192k1 = { + "secp192k1", + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0", + "3", + "0xdb4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d", + "0x9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + 192, + MCL_SECP192K1 +}; +const struct mcl::EcParam secp224k1 = { + "secp224k1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffeffffe56d", + "0", + "5", + "0xa1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c", + "0x7e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5", + "0x10000000000000000000000000001dce8d2ec6184caf0a971769fb1f7", + 224, + MCL_SECP224K1 +}; +const struct mcl::EcParam secp256k1 = { + "secp256k1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "0", + "7", + "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + 256, + MCL_SECP256K1 +}; +const struct mcl::EcParam secp384r1 = { + "secp384r1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "-3", + "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", + "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", + "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", + 384, + MCL_SECP384R1 +}; +const struct mcl::EcParam secp521r1 = { + "secp521r1", + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "-3", + "0x51953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", + "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", + "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", + 521, + MCL_SECP521R1 +}; +const struct mcl::EcParam NIST_P192 = { + "NIST_P192", + "0xfffffffffffffffffffffffffffffffeffffffffffffffff", + "-3", + "0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1", + "0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012", + "0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811", + "0xffffffffffffffffffffffff99def836146bc9b1b4d22831", + 192, + MCL_NIST_P192 +}; +const struct mcl::EcParam NIST_P224 = { + "NIST_P224", + "0xffffffffffffffffffffffffffffffff000000000000000000000001", + "-3", + "0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", + "0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", + "0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", + "0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", + 224, + MCL_NIST_P224 +}; +const struct mcl::EcParam NIST_P256 = { + "NIST_P256", + "0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff", + "-3", + "0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", + "0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", + "0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", + "0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551", + 256, + MCL_NIST_P256 +}; +// same secp384r1 +const struct mcl::EcParam NIST_P384 = { + "NIST_P384", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "-3", + "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", + "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", + "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", + 384, + MCL_NIST_P384 +}; +// same secp521r1 +const struct mcl::EcParam NIST_P521 = { + "NIST_P521", + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "-3", + "0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", + "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", + "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", + 521, + MCL_NIST_P521 +}; + +} // mcl::ecparam + +#ifndef CYBOZU_DONT_USE_STRING +static inline const mcl::EcParam* getEcParam(const std::string& name) +{ + static const mcl::EcParam *tbl[] = { + &ecparam::p160_1, + &ecparam::secp160k1, + &ecparam::secp192k1, + &ecparam::secp224k1, + &ecparam::secp256k1, + &ecparam::secp384r1, + &ecparam::secp521r1, + + &ecparam::NIST_P192, + &ecparam::NIST_P224, + &ecparam::NIST_P256, + &ecparam::NIST_P384, + &ecparam::NIST_P521, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (name == tbl[i]->name) return tbl[i]; + } + throw cybozu::Exception("mcl::getEcParam:not support name") << name; +} +#endif + +inline const mcl::EcParam* getEcParam(int curve) +{ + switch (curve) { + case MCL_SECP192K1: return &ecparam::secp192k1; + case MCL_SECP224K1: return &ecparam::secp224k1; + case MCL_SECP256K1: return &ecparam::secp256k1; + case MCL_SECP384R1: return &ecparam::secp384r1; + case MCL_NIST_P192: return &ecparam::NIST_P192; + case MCL_NIST_P224: return &ecparam::NIST_P224; + case MCL_NIST_P256: return &ecparam::NIST_P256; + default: return 0; + } +} + +} // mcl diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/elgamal.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/elgamal.hpp new file mode 100644 index 000000000..431148508 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/elgamal.hpp @@ -0,0 +1,612 @@ +#pragma once +/** + @file + @brief lifted-ElGamal encryption + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause + + original: + Copyright (c) 2014, National Institute of Advanced Industrial + Science and Technology All rights reserved. + This source file is subject to BSD 3-Clause license. +*/ +#include +#include +#include +#ifndef CYBOZU_UNORDERED_MAP_STD +#include +#endif +#include +#include +#include +#include + +namespace mcl { + +template +struct ElgamalT { + typedef _Ec Ec; + struct CipherText { + Ec c1; + Ec c2; + CipherText() + { + clear(); + } + /* + (c1, c2) = (0, 0) is trivial valid ciphertext for m = 0 + */ + void clear() + { + c1.clear(); + c2.clear(); + } + /* + add encoded message with encoded message + input : this = Enc(m1), c = Enc(m2) + output : this = Enc(m1 + m2) + */ + void add(const CipherText& c) + { + Ec::add(c1, c1, c.c1); + Ec::add(c2, c2, c.c2); + } + /* + mul by x + input : this = Enc(m), x + output : this = Enc(m x) + */ + template + void mul(const N& x) + { + Ec::mul(c1, c1, x); + Ec::mul(c2, c2, x); + } + /* + negative encoded message + input : this = Enc(m) + output : this = Enc(-m) + */ + void neg() + { + Ec::neg(c1, c1); + Ec::neg(c2, c2); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + c1.load(is, ioMode); + c2.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c1.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + c2.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const CipherText& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, CipherText& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + /* + Zero Knowledge Proof + cipher text with ZKP to ensure m = 0 or 1 + http://dx.doi.org/10.1587/transfun.E96.A.1156 + */ + struct Zkp { + Zn c0, c1, s0, s1; + template + void load(InputStream& is, int ioMode = IoSerialize) + { + c0.load(is, ioMode); + c1.load(is, ioMode); + s0.load(is, ioMode); + s1.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c0.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + c1.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + s0.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + s1.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const Zkp& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, Zkp& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + + class PublicKey { + size_t bitSize; + Ec f; + Ec g; + Ec h; + bool enableWindowMethod_; + fp::WindowMethod wm_f; + fp::WindowMethod wm_g; + fp::WindowMethod wm_h; + template + void mulDispatch(Ec& z, const Ec& x, const N& n, const fp::WindowMethod& pw) const + { + if (enableWindowMethod_) { + pw.mul(z, n); + } else { + Ec::mul(z, x, n); + } + } + template + void mulF(Ec& z, const N& n) const { mulDispatch(z, f, n, wm_f); } + template + void mulG(Ec& z, const N& n) const { mulDispatch(z, g, n, wm_g); } + template + void mulH(Ec& z, const N& n) const { mulDispatch(z, h, n, wm_h); } + public: + PublicKey() + : bitSize(0) + , enableWindowMethod_(false) + { + } + void enableWindowMethod(size_t winSize = 10) + { + wm_f.init(f, bitSize, winSize); + wm_g.init(g, bitSize, winSize); + wm_h.init(h, bitSize, winSize); + enableWindowMethod_ = true; + } + const Ec& getF() const { return f; } + void init(size_t bitSize, const Ec& f, const Ec& g, const Ec& h) + { + this->bitSize = bitSize; + this->f = f; + this->g = g; + this->h = h; + enableWindowMethod_ = false; + enableWindowMethod(); + } + /* + encode message + input : m + output : c = (c1, c2) = (g^u, h^u f^m) + */ + void enc(CipherText& c, const Zn& m, fp::RandGen rg = fp::RandGen()) const + { + Zn u; + u.setRand(rg); + mulG(c.c1, u); + mulH(c.c2, u); + Ec t; + mulF(t, m); + Ec::add(c.c2, c.c2, t); + } + /* + encode message + input : m = 0 or 1 + output : c (c1, c2), zkp + */ + void encWithZkp(CipherText& c, Zkp& zkp, int m, fp::RandGen rg = fp::RandGen()) const + { + if (m != 0 && m != 1) { + throw cybozu::Exception("elgamal:PublicKey:encWithZkp") << m; + } + Zn u; + u.setRand(rg); + mulG(c.c1, u); + mulH(c.c2, u); + if (m) { + Ec::add(c.c2, c.c2, f); + Zn r1; + r1.setRand(rg); + zkp.c0.setRand(rg); + zkp.s0.setRand(rg); + Ec R01, R02, R11, R12; + Ec t1, t2; + mulG(t1, zkp.s0); + Ec::mul(t2, c.c1, zkp.c0); + Ec::sub(R01, t1, t2); + mulH(t1, zkp.s0); + Ec::mul(t2, c.c2, zkp.c0); + Ec::sub(R02, t1, t2); + mulG(R11, r1); + mulH(R12, r1); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + Zn cc; + cc.setHashOf(os.str()); + zkp.c1 = cc - zkp.c0; + zkp.s1 = r1 + zkp.c1 * u; + } else { + Zn r0; + r0.setRand(rg); + zkp.c1.setRand(rg); + zkp.s1.setRand(rg); + Ec R01, R02, R11, R12; + mulG(R01, r0); + mulH(R02, r0); + Ec t1, t2; + mulG(t1, zkp.s1); + Ec::mul(t2, c.c1, zkp.c1); + Ec::sub(R11, t1, t2); + mulH(t1, zkp.s1); + Ec::sub(t2, c.c2, f); + Ec::mul(t2, t2, zkp.c1); + Ec::sub(R12, t1, t2); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + Zn cc; + cc.setHashOf(os.str()); + zkp.c0 = cc - zkp.c1; + zkp.s0 = r0 + zkp.c0 * u; + } + } + /* + verify cipher text with ZKP + */ + bool verify(const CipherText& c, const Zkp& zkp) const + { + Ec R01, R02, R11, R12; + Ec t1, t2; + mulG(t1, zkp.s0); + Ec::mul(t2, c.c1, zkp.c0); + Ec::sub(R01, t1, t2); + mulH(t1, zkp.s0); + Ec::mul(t2, c.c2, zkp.c0); + Ec::sub(R02, t1, t2); + mulG(t1, zkp.s1); + Ec::mul(t2, c.c1, zkp.c1); + Ec::sub(R11, t1, t2); + mulH(t1, zkp.s1); + Ec::sub(t2, c.c2, f); + Ec::mul(t2, t2, zkp.c1); + Ec::sub(R12, t1, t2); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + Zn cc; + cc.setHashOf(os.str()); + return cc == zkp.c0 + zkp.c1; + } + /* + rerandomize encoded message + input : c = (c1, c2) + output : c = (c1 g^v, c2 h^v) + */ + void rerandomize(CipherText& c, fp::RandGen rg = fp::RandGen()) const + { + Zn v; + v.setRand(rg); + Ec t; + mulG(t, v); + Ec::add(c.c1, c.c1, t); + mulH(t, v); + Ec::add(c.c2, c.c2, t); + } + /* + add encoded message with plain message + input : c = Enc(m1) = (c1, c2), m2 + ouput : c = Enc(m1 + m2) = (c1, c2 f^m2) + */ + template + void add(CipherText& c, const N& m) const + { + Ec fm; + mulF(fm, m); + Ec::add(c.c2, c.c2, fm); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + std::string s; + mcl::fp::local::loadWord(s, is); + bitSize = cybozu::atoi(s); + f.load(is, ioMode); + g.load(is, ioMode); + h.load(is, ioMode); + init(bitSize, f, g, h); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + std::string s = cybozu::itoa(bitSize); + cybozu::write(os, s.c_str(), s.size()); + cybozu::writeChar(os, ' '); + + const char sep = *fp::getIoSeparator(ioMode); + f.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + g.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + h.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + /* + create table f^i for i in [rangeMin, rangeMax] + */ + struct PowerCache { +#if (CYBOZU_CPP_VERSION > CYBOZU_CPP_VERSION_CP03) + typedef CYBOZU_NAMESPACE_STD::unordered_map Cache; +#else + typedef std::map Cache; +#endif + Cache cache; + void init(const Ec& f, int rangeMin, int rangeMax) + { + if (rangeMin > rangeMax) throw cybozu::Exception("mcl:ElgamalT:PowerCache:bad range") << rangeMin << rangeMax; + Ec x; + x.clear(); + cache[x] = 0; + for (int i = 1; i <= rangeMax; i++) { + Ec::add(x, x, f); + cache[x] = i; + } + Ec nf; + Ec::neg(nf, f); + x.clear(); + for (int i = -1; i >= rangeMin; i--) { + Ec::add(x, x, nf); + cache[x] = i; + } + } + /* + return m such that f^m = g + */ + int getExponent(const Ec& g, bool *b = 0) const + { + typename Cache::const_iterator i = cache.find(g); + if (i == cache.end()) { + if (b) { + *b = false; + return 0; + } + throw cybozu::Exception("Elgamal:PowerCache:getExponent:not found") << g; + } + if (b) *b = true; + return i->second; + } + void clear() + { + cache.clear(); + } + bool isEmpty() const + { + return cache.empty(); + } + }; + class PrivateKey { + PublicKey pub; + Zn z; + PowerCache cache; + public: + /* + init + input : f + output : (g, h, z) + Ec = + g in Ec + h = g^z + */ + void init(const Ec& f, size_t bitSize, fp::RandGen rg = fp::RandGen()) + { + Ec g, h; + z.setRand(rg); + Ec::mul(g, f, z); + z.setRand(rg); + Ec::mul(h, g, z); + pub.init(bitSize, f, g, h); + } + const PublicKey& getPublicKey() const { return pub; } + /* + decode message by brute-force attack + input : c = (c1, c2) + output : m + M = c2 / c1^z + find m such that M = f^m and |m| < limit + @memo 7sec@core i3 for m = 1e6 + */ + void dec(Zn& m, const CipherText& c, int limit = 100000) const + { + const Ec& f = pub.getF(); + Ec c1z; + Ec::mul(c1z, c.c1, z); + if (c1z == c.c2) { + m = 0; + return; + } + Ec t1(c1z); + Ec t2(c.c2); + for (int i = 1; i < limit; i++) { + Ec::add(t1, t1, f); + if (t1 == c.c2) { + m = i; + return; + } + Ec::add(t2, t2, f); + if (t2 == c1z) { + m = -i; + return; + } + } + throw cybozu::Exception("elgamal:PrivateKey:dec:overflow"); + } + /* + powfm = c2 / c1^z = f^m + */ + void getPowerf(Ec& powfm, const CipherText& c) const + { + Ec c1z; + Ec::mul(c1z, c.c1, z); + Ec::sub(powfm, c.c2, c1z); + } + /* + set range of message to decode quickly + */ + void setCache(int rangeMin, int rangeMax) + { + cache.init(pub.getF(), rangeMin, rangeMax); + } + /* + clear cache + */ + void clearCache() + { + cache.clear(); + } + /* + decode message by lookup table if !cache.isEmpty() + brute-force attack otherwise + input : c = (c1, c2) + b : set false if not found + return m + */ + int dec(const CipherText& c, bool *b = 0) const + { + Ec powfm; + getPowerf(powfm, c); + return cache.getExponent(powfm, b); + } + /* + check whether c is encrypted zero message + */ + bool isZeroMessage(const CipherText& c) const + { + Ec c1z; + Ec::mul(c1z, c.c1, z); + return c.c2 == c1z; + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + pub.load(is, ioMode); + z.load(is, ioMode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + pub.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + z.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const PrivateKey& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, PrivateKey& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; +}; + +} // mcl diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/fp.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/fp.hpp new file mode 100644 index 000000000..2e69729dd --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/fp.hpp @@ -0,0 +1,661 @@ +#pragma once +/** + @file + @brief finite field class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifndef CYBOZU_DONT_USE_STRING +#include +#endif +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) + #pragma warning(disable : 4458) + #ifndef NOMINMAX + #define NOMINMAX + #endif + #ifndef MCL_NO_AUTOLINK + #ifdef NDEBUG + #pragma comment(lib, "mcl.lib") + #else + #pragma comment(lib, "mcl.lib") + #endif + #endif +#endif +#include +#include +#include +#include +#include +#include + +namespace mcl { + +struct FpTag; +struct ZnTag; + +namespace fp { + +// copy src to dst as little endian +void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize); +// copy src to dst as little endian +void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize); + +bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode); + +uint64_t getUint64(bool *pb, const fp::Block& b); +int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op); + +const char *ModeToStr(Mode mode); + +Mode StrToMode(const char *s); + +#ifndef CYBOZU_DONT_USE_STRING +inline Mode StrToMode(const std::string& s) +{ + return StrToMode(s.c_str()); +} +#endif + +inline void dumpUnit(Unit x) +{ +#if MCL_SIZEOF_UNIT == 4 + printf("%08x", (uint32_t)x); +#else + printf("%016llx", (unsigned long long)x); +#endif +} + +bool isEnableJIT(); // 1st call is not threadsafe + +uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); +uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); + +} // mcl::fp + +template +class FpT : public fp::Serializable, + fp::Operator > > { + typedef fp::Unit Unit; + typedef fp::Operator > Operator; + typedef fp::Serializable, Operator> Serializer; +public: + static const size_t maxSize = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize; +private: + template friend class FpT; + Unit v_[maxSize]; + static fp::Op op_; + static FpT inv2_; + static int ioMode_; + template friend class FpDblT; + template friend class Fp2T; + template friend struct Fp6T; +public: + typedef FpT BaseFp; + // return pointer to array v_[] + const Unit *getUnit() const { return v_; } + FpT* getFp0() { return this; } + const FpT* getFp0() const { return this; } + static inline size_t getUnitSize() { return op_.N; } + static inline size_t getBitSize() { return op_.bitSize; } + static inline size_t getByteSize() { return (op_.bitSize + 7) / 8; } + static inline const fp::Op& getOp() { return op_; } + void dump() const + { + const size_t N = op_.N; + for (size_t i = 0; i < N; i++) { + fp::dumpUnit(v_[N - 1 - i]); + } + printf("\n"); + } + /* + xi_a is used for Fp2::mul_xi(), where xi = xi_a + i and i^2 = -1 + if xi_a = 0 then asm functions for Fp2 are not generated. + */ + static inline void init(bool *pb, int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) + { + assert(maxBitSize <= MCL_MAX_BIT_SIZE); + *pb = op_.init(p, maxBitSize, xi_a, mode); + if (!*pb) return; + { // set oneRep + FpT& one = *reinterpret_cast(op_.oneRep); + one.clear(); + one.v_[0] = 1; + one.toMont(); + } + { // set half + mpz_class half = (op_.mp + 1) / 2; + gmp::getArray(pb, op_.half, op_.N, half); + if (!*pb) return; + } + inv(inv2_, 2); +#ifdef MCL_XBYAK_DIRECT_CALL + add = fp::func_ptr_cast(op_.fp_addA_); + if (add == 0) add = addC; + sub = fp::func_ptr_cast(op_.fp_subA_); + if (sub == 0) sub = subC; + neg = fp::func_ptr_cast(op_.fp_negA_); + if (neg == 0) neg = negC; + mul = fp::func_ptr_cast(op_.fp_mulA_); + if (mul == 0) mul = mulC; + sqr = fp::func_ptr_cast(op_.fp_sqrA_); + if (sqr == 0) sqr = sqrC; +#endif + *pb = true; + } + static inline void init(bool *pb, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) + { + init(pb, 0, p, mode); + } + static inline void init(bool *pb, const char *mstr, fp::Mode mode = fp::FP_AUTO) + { + mpz_class p; + gmp::setStr(pb, p, mstr); + if (!*pb) return; + init(pb, p, mode); + } + static inline size_t getModulo(char *buf, size_t bufSize) + { + return gmp::getStr(buf, bufSize, op_.mp); + } + static inline bool isFullBit() { return op_.isFullBit; } + /* + binary patter of p + @note the value of p is zero + */ + static inline const FpT& getP() + { + return *reinterpret_cast(op_.p); + } + bool isOdd() const + { + fp::Block b; + getBlock(b); + return (b.p[0] & 1) == 1; + } + static inline bool squareRoot(FpT& y, const FpT& x) + { + if (isMont()) return op_.sq.get(y, x); + mpz_class mx, my; + bool b = false; + x.getMpz(&b, mx); + if (!b) return false; + b = op_.sq.get(my, mx); + if (!b) return false; + y.setMpz(&b, my); + return b; + } + FpT() {} + FpT(const FpT& x) + { + op_.fp_copy(v_, x.v_); + } + FpT& operator=(const FpT& x) + { + op_.fp_copy(v_, x.v_); + return *this; + } + void clear() + { + op_.fp_clear(v_); + } + FpT(int64_t x) { operator=(x); } + FpT& operator=(int64_t x) + { + if (x == 1) { + op_.fp_copy(v_, op_.oneRep); + } else { + clear(); + if (x) { + int64_t y = x < 0 ? -x : x; + if (sizeof(Unit) == 8) { + v_[0] = y; + } else { + v_[0] = (uint32_t)y; + v_[1] = (uint32_t)(y >> 32); + } + if (x < 0) neg(*this, *this); + toMont(); + } + } + return *this; + } + static inline bool isMont() { return op_.isMont; } + /* + convert normal value to Montgomery value + do nothing is !isMont() + */ + void toMont() + { + if (isMont()) op_.toMont(v_, v_); + } + /* + convert Montgomery value to normal value + do nothing is !isMont() + */ + void fromMont() + { + if (isMont()) op_.fromMont(v_, v_); + } + template + void load(bool *pb, InputStream& is, int ioMode) + { + bool isMinus = false; + *pb = false; + if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { + const size_t n = getByteSize(); + v_[op_.N - 1] = 0; + size_t readSize; + if (ioMode & IoSerializeHexStr) { + readSize = mcl::fp::readHexStr(v_, n, is); + } else { + readSize = cybozu::readSome(v_, n, is); + } + if (readSize != n) return; + } else { + char buf[1024]; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + n = fp::strToArray(&isMinus, v_, op_.N, buf, n, ioMode); + if (n == 0) return; + for (size_t i = n; i < op_.N; i++) v_[i] = 0; + } + if (fp::isGreaterOrEqualArray(v_, op_.p, op_.N)) { + return; + } + if (isMinus) { + neg(*this, *this); + } + if (!(ioMode & IoArrayRaw)) { + toMont(); + } + *pb = true; + } + template + void save(bool *pb, OutputStream& os, int ioMode) const + { + const size_t n = getByteSize(); + if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { + if (ioMode & IoArrayRaw) { + cybozu::write(pb, os, v_, n); + } else { + fp::Block b; + getBlock(b); + if (ioMode & IoSerializeHexStr) { + mcl::fp::writeHexStr(pb, os, b.p, n); + } else { + cybozu::write(pb, os, b.p, n); + } + } + return; + } + fp::Block b; + getBlock(b); + // use low 8-bit ioMode for (base, withPrefix) + char buf[2048]; + size_t len = mcl::fp::arrayToStr(buf, sizeof(buf), b.p, b.n, ioMode & 31, (ioMode & IoPrefix) != 0); + if (len == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - len, len); + } + /* + mode = Mod : set x mod p if sizeof(S) * n <= 64 else error + */ + template + void setArray(bool *pb, const S *x, size_t n, mcl::fp::MaskMode mode = fp::NoMask) + { + *pb = fp::copyAndMask(v_, x, sizeof(S) * n, op_, mode); + toMont(); + } + /* + mask x with (1 << bitLen) and subtract p if x >= p + */ + template + void setArrayMaskMod(const S *x, size_t n) + { + fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::MaskAndMod); + toMont(); + } + + /* + mask x with (1 << (bitLen - 1)) - 1 if x >= p + */ + template + void setArrayMask(const S *x, size_t n) + { + fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::SmallMask); + toMont(); + } + void getBlock(fp::Block& b) const + { + b.n = op_.N; + if (isMont()) { + op_.fromMont(b.v_, v_); + b.p = &b.v_[0]; + } else { + b.p = &v_[0]; + } + } + void setByCSPRNG(bool *pb, fp::RandGen rg = fp::RandGen()) + { + if (rg.isZero()) rg = fp::RandGen::get(); + rg.read(pb, v_, op_.N * sizeof(Unit)); // byte size + if (!pb) return; + setArrayMask(v_, op_.N); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void setByCSPRNG(fp::RandGen rg = fp::RandGen()) + { + bool b; + setByCSPRNG(&b, rg); + if (!b) throw cybozu::Exception("setByCSPRNG"); + } +#endif + void setRand(fp::RandGen rg = fp::RandGen()) // old api + { + setByCSPRNG(rg); + } + /* + hash msg and mask with (1 << (bitLen - 1)) - 1 + */ + void setHashOf(const void *msg, size_t msgSize) + { + char buf[MCL_MAX_HASH_BIT_SIZE / 8]; + uint32_t size = op_.hash(buf, static_cast(sizeof(buf)), msg, static_cast(msgSize)); + setArrayMask(buf, size); + } + void getMpz(bool *pb, mpz_class& x) const + { + fp::Block b; + getBlock(b); + gmp::setArray(pb, x, b.p, b.n); + } + void setMpz(bool *pb, const mpz_class& x) + { + if (x < 0) { + *pb = false; + return; + } + setArray(pb, gmp::getUnit(x), gmp::getUnitSize(x)); + } +#ifdef MCL_XBYAK_DIRECT_CALL + static void (*add)(FpT& z, const FpT& x, const FpT& y); + static inline void addC(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } + static void (*sub)(FpT& z, const FpT& x, const FpT& y); + static inline void subC(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } + static void (*neg)(FpT& y, const FpT& x); + static inline void negC(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } + static void (*mul)(FpT& z, const FpT& x, const FpT& y); + static inline void mulC(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } + static void (*sqr)(FpT& y, const FpT& x); + static inline void sqrC(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } +#else + static inline void add(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } + static inline void sub(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } + static inline void neg(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } + static inline void mul(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } + static inline void sqr(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } +#endif + static inline void addPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_addPre(z.v_, x.v_, y.v_); } + static inline void subPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_subPre(z.v_, x.v_, y.v_); } + static inline void mulUnit(FpT& z, const FpT& x, const Unit y) + { + if (mulSmallUnit(z, x, y)) return; + op_.fp_mulUnit(z.v_, x.v_, y, op_.p); + } + static inline void inv(FpT& y, const FpT& x) { op_.fp_invOp(y.v_, x.v_, op_); } + static inline void divBy2(FpT& y, const FpT& x) + { +#if 0 + mul(y, x, inv2_); +#else + bool odd = (x.v_[0] & 1) != 0; + op_.fp_shr1(y.v_, x.v_); + if (odd) { + op_.fp_addPre(y.v_, y.v_, op_.half); + } +#endif + } + static inline void divBy4(FpT& y, const FpT& x) + { + divBy2(y, x); // QQQ : optimize later + divBy2(y, y); + } + bool isZero() const { return op_.fp_isZero(v_); } + bool isOne() const { return fp::isEqualArray(v_, op_.oneRep, op_.N); } + static const inline FpT& one() { return *reinterpret_cast(op_.oneRep); } + /* + half = (p + 1) / 2 + return true if half <= x < p + return false if 0 <= x < half + */ + bool isNegative() const + { + fp::Block b; + getBlock(b); + return fp::isGreaterOrEqualArray(b.p, op_.half, op_.N); + } + bool isValid() const + { + return fp::isLessArray(v_, op_.p, op_.N); + } + uint64_t getUint64(bool *pb) const + { + fp::Block b; + getBlock(b); + return fp::getUint64(pb, b); + } + int64_t getInt64(bool *pb) const + { + fp::Block b; + getBlock(b); + return fp::getInt64(pb, b, op_); + } + bool operator==(const FpT& rhs) const { return fp::isEqualArray(v_, rhs.v_, op_.N); } + bool operator!=(const FpT& rhs) const { return !operator==(rhs); } + /* + @note + this compare functions is slow because of calling mul if isMont is true. + */ + static inline int compare(const FpT& x, const FpT& y) + { + fp::Block xb, yb; + x.getBlock(xb); + y.getBlock(yb); + return fp::compareArray(xb.p, yb.p, op_.N); + } + bool isLess(const FpT& rhs) const + { + fp::Block xb, yb; + getBlock(xb); + rhs.getBlock(yb); + return fp::isLessArray(xb.p, yb.p, op_.N); + } + bool operator<(const FpT& rhs) const { return isLess(rhs); } + bool operator>=(const FpT& rhs) const { return !operator<(rhs); } + bool operator>(const FpT& rhs) const { return rhs < *this; } + bool operator<=(const FpT& rhs) const { return !operator>(rhs); } + /* + @note + return unexpected order if isMont is set. + */ + static inline int compareRaw(const FpT& x, const FpT& y) + { + return fp::compareArray(x.v_, y.v_, op_.N); + } + bool isLessRaw(const FpT& rhs) const + { + return fp::isLessArray(v_, rhs.v_, op_.N); + } + /* + set IoMode for operator<<(), or operator>>() + */ + static inline void setIoMode(int ioMode) + { + ioMode_ = ioMode; + } + static inline int getIoMode() { return ioMode_; } + static inline size_t getModBitLen() { return getBitSize(); } + static inline void setHashFunc(uint32_t hash(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize)) + { + op_.hash = hash; + } +#ifndef CYBOZU_DONT_USE_STRING + explicit FpT(const std::string& str, int base = 0) + { + Serializer::setStr(str, base); + } + static inline void getModulo(std::string& pstr) + { + gmp::getStr(pstr, op_.mp); + } + static std::string getModulo() + { + std::string s; + getModulo(s); + return s; + } + void setHashOf(const std::string& msg) + { + setHashOf(msg.data(), msg.size()); + } + // backward compatibility + static inline void setModulo(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) + { + init(mstr, mode); + } + friend inline std::ostream& operator<<(std::ostream& os, const FpT& self) + { + self.save(os, fp::detectIoMode(getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, FpT& self) + { + self.load(is, fp::detectIoMode(getIoMode(), is)); + return is; + } +#endif +#ifndef CYBOZU_DONT_USE_EXCEPTION + static inline void init(int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) + { + bool b; + init(&b, xi_a, p, mode); + if (!b) throw cybozu::Exception("Fp:init"); + } + static inline void init(int xi_a, const std::string& mstr, fp::Mode mode = fp::FP_AUTO) + { + mpz_class p; + gmp::setStr(p, mstr); + init(xi_a, p, mode); + } + static inline void init(const mpz_class& p, fp::Mode mode = fp::FP_AUTO) + { + init(0, p, mode); + } + static inline void init(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) + { + init(0, mstr, mode); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("fp:save") << ioMode; + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("fp:load") << ioMode; + } + /* + throw exception if x >= p + */ + template + void setArray(const S *x, size_t n) + { + bool b; + setArray(&b, x, n); + if (!b) throw cybozu::Exception("Fp:setArray"); + } + void setMpz(const mpz_class& x) + { + bool b; + setMpz(&b, x); + if (!b) throw cybozu::Exception("Fp:setMpz"); + } + uint64_t getUint64() const + { + bool b; + uint64_t v = getUint64(&b); + if (!b) throw cybozu::Exception("Fp:getUint64:large value"); + return v; + } + int64_t getInt64() const + { + bool b; + int64_t v = getInt64(&b); + if (!b) throw cybozu::Exception("Fp:getInt64:large value"); + return v; + } + void getMpz(mpz_class& x) const + { + bool b; + getMpz(&b, x); + if (!b) throw cybozu::Exception("Fp:getMpz"); + } + mpz_class getMpz() const + { + mpz_class x; + getMpz(x); + return x; + } +#endif +}; + +template fp::Op FpT::op_; +template FpT FpT::inv2_; +template int FpT::ioMode_ = IoAuto; +#ifdef MCL_XBYAK_DIRECT_CALL +template void (*FpT::add)(FpT& z, const FpT& x, const FpT& y); +template void (*FpT::sub)(FpT& z, const FpT& x, const FpT& y); +template void (*FpT::neg)(FpT& y, const FpT& x); +template void (*FpT::mul)(FpT& z, const FpT& x, const FpT& y); +template void (*FpT::sqr)(FpT& y, const FpT& x); +#endif + +} // mcl + +#ifdef CYBOZU_USE_BOOST +namespace mcl { + +template +size_t hash_value(const mcl::FpT& x, size_t v = 0) +{ + return static_cast(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); +} + +} +#else +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +template +struct hash > { + size_t operator()(const mcl::FpT& x, uint64_t v = 0) const + { + return static_cast(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); + } +}; + +CYBOZU_NAMESPACE_TR1_END } // std::tr1 +#endif + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/fp_tower.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/fp_tower.hpp new file mode 100644 index 000000000..95722e2d5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/fp_tower.hpp @@ -0,0 +1,1364 @@ +#pragma once +/** + @file + @brief finite field extension class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +namespace mcl { + +template +class FpDblT : public fp::Serializable > { + typedef fp::Unit Unit; + Unit v_[Fp::maxSize * 2]; +public: + static size_t getUnitSize() { return Fp::op_.N * 2; } + FpDblT() : v_() + { + } + FpDblT(const FpDblT& rhs) + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = rhs.v_[i]; + } + } + void dump() const + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + mcl::fp::dumpUnit(v_[n - 1 - i]); + } + printf("\n"); + } + template + void save(bool *pb, OutputStream& os, int) const + { + char buf[1024]; + size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), v_, getUnitSize()); + if (n == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - n, sizeof(buf)); + } + template + void load(bool *pb, InputStream& is, int) + { + char buf[1024]; + *pb = false; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + n = fp::hexToArray(v_, getUnitSize(), buf, n); + if (n == 0) return; + for (size_t i = n; i < getUnitSize(); i++) v_[i] = 0; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("FpDblT:save") << ioMode; + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("FpDblT:load") << ioMode; + } + void getMpz(mpz_class& x) const + { + bool b; + getMpz(&b, x); + if (!b) throw cybozu::Exception("FpDblT:getMpz"); + } + mpz_class getMpz() const + { + mpz_class x; + getMpz(x); + return x; + } +#endif + void clear() + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = 0; + } + } + FpDblT& operator=(const FpDblT& rhs) + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = rhs.v_[i]; + } + return *this; + } + // QQQ : does not check range of x strictly(use for debug) + void setMpz(const mpz_class& x) + { + assert(x >= 0); + const size_t xn = gmp::getUnitSize(x); + const size_t N2 = getUnitSize(); + if (xn > N2) { + assert(0); + return; + } + memcpy(v_, gmp::getUnit(x), xn * sizeof(Unit)); + memset(v_ + xn, 0, (N2 - xn) * sizeof(Unit)); + } + void getMpz(bool *pb, mpz_class& x) const + { + gmp::setArray(pb, x, v_, Fp::op_.N * 2); + } +#ifdef MCL_XBYAK_DIRECT_CALL + static void (*add)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*sub)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*mod)(Fp& z, const FpDblT& xy); + static void (*addPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*subPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void addC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } + static void subC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } + static void modC(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } + static void addPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); } + static void subPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); } +#else + static void add(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } + static void sub(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } + static void mod(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } + static void addPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); } + static void subPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); } +#endif + static void mulPreC(FpDblT& xy, const Fp& x, const Fp& y) { Fp::op_.fpDbl_mulPre(xy.v_, x.v_, y.v_); } + static void sqrPreC(FpDblT& xx, const Fp& x) { Fp::op_.fpDbl_sqrPre(xx.v_, x.v_); } + /* + mul(z, x, y) = mulPre(xy, x, y) + mod(z, xy) + */ + static void (*mulPre)(FpDblT& xy, const Fp& x, const Fp& y); + static void (*sqrPre)(FpDblT& xx, const Fp& x); + static void mulUnit(FpDblT& z, const FpDblT& x, Unit y) + { + if (mulSmallUnit(z, x, y)) return; + assert(0); // not supported y + } + static void init() + { + const mcl::fp::Op& op = Fp::getOp(); +#ifdef MCL_XBYAK_DIRECT_CALL + add = fp::func_ptr_cast(op.fpDbl_addA_); + if (add == 0) add = addC; + sub = fp::func_ptr_cast(op.fpDbl_subA_); + if (sub == 0) sub = subC; + mod = fp::func_ptr_cast(op.fpDbl_modA_); + if (mod == 0) mod = modC; + addPre = fp::func_ptr_cast(op.fpDbl_addPre); + if (addPre == 0) addPre = addPreC; + subPre = fp::func_ptr_cast(op.fpDbl_subPre); + if (subPre == 0) subPre = subPreC; +#endif + if (op.fpDbl_mulPreA_) { + mulPre = fp::func_ptr_cast(op.fpDbl_mulPreA_); + } else { + mulPre = mulPreC; + } + if (op.fpDbl_sqrPreA_) { + sqrPre = fp::func_ptr_cast(op.fpDbl_sqrPreA_); + } else { + sqrPre = sqrPreC; + } + } + void operator+=(const FpDblT& x) { add(*this, *this, x); } + void operator-=(const FpDblT& x) { sub(*this, *this, x); } +}; + +#ifdef MCL_XBYAK_DIRECT_CALL +template void (*FpDblT::add)(FpDblT&, const FpDblT&, const FpDblT&); +template void (*FpDblT::sub)(FpDblT&, const FpDblT&, const FpDblT&); +template void (*FpDblT::mod)(Fp&, const FpDblT&); +template void (*FpDblT::addPre)(FpDblT&, const FpDblT&, const FpDblT&); +template void (*FpDblT::subPre)(FpDblT&, const FpDblT&, const FpDblT&); +#endif +template void (*FpDblT::mulPre)(FpDblT&, const Fp&, const Fp&); +template void (*FpDblT::sqrPre)(FpDblT&, const Fp&); + +template struct Fp12T; +template class BNT; +template struct Fp2DblT; +/* + beta = -1 + Fp2 = F[i] / (i^2 + 1) + x = a + bi +*/ +template +class Fp2T : public fp::Serializable, + fp::Operator > > { + typedef _Fp Fp; + typedef fp::Unit Unit; + typedef FpDblT FpDbl; + typedef Fp2DblT Fp2Dbl; + static const size_t gN = 5; + /* + g = xi^((p - 1) / 6) + g[] = { g^2, g^4, g^1, g^3, g^5 } + */ + static Fp2T g[gN]; + static Fp2T g2[gN]; + static Fp2T g3[gN]; +public: + static const Fp2T *get_gTbl() { return &g[0]; } + static const Fp2T *get_g2Tbl() { return &g2[0]; } + static const Fp2T *get_g3Tbl() { return &g3[0]; } + typedef typename Fp::BaseFp BaseFp; + static const size_t maxSize = Fp::maxSize * 2; + static inline size_t getByteSize() { return Fp::getByteSize() * 2; } + void dump() const + { + a.dump(); + b.dump(); + } + Fp a, b; + Fp2T() { } + Fp2T(int64_t a) : a(a), b(0) { } + Fp2T(const Fp& a, const Fp& b) : a(a), b(b) { } + Fp2T(int64_t a, int64_t b) : a(a), b(b) { } + Fp* getFp0() { return &a; } + const Fp* getFp0() const { return &a; } + const Unit* getUnit() const { return a.getUnit(); } + void clear() + { + a.clear(); + b.clear(); + } + void set(const Fp &a_, const Fp &b_) + { + a = a_; + b = b_; + } +#ifdef MCL_XBYAK_DIRECT_CALL + static void (*add)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*neg)(Fp2T& y, const Fp2T& x); + static void (*mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*sqr)(Fp2T& y, const Fp2T& x); +#else + static void add(Fp2T& z, const Fp2T& x, const Fp2T& y) { addC(z, x, y); } + static void sub(Fp2T& z, const Fp2T& x, const Fp2T& y) { subC(z, x, y); } + static void neg(Fp2T& y, const Fp2T& x) { negC(y, x); } + static void mul(Fp2T& z, const Fp2T& x, const Fp2T& y) { mulC(z, x, y); } + static void sqr(Fp2T& y, const Fp2T& x) { sqrC(y, x); } +#endif + static void (*mul_xi)(Fp2T& y, const Fp2T& x); + static void addPre(Fp2T& z, const Fp2T& x, const Fp2T& y) { Fp::addPre(z.a, x.a, y.a); Fp::addPre(z.b, x.b, y.b); } + static void inv(Fp2T& y, const Fp2T& x) { Fp::op_.fp2_inv(y.a.v_, x.a.v_); } + static void divBy2(Fp2T& y, const Fp2T& x) + { + Fp::divBy2(y.a, x.a); + Fp::divBy2(y.b, x.b); + } + static void divBy4(Fp2T& y, const Fp2T& x) + { + Fp::divBy4(y.a, x.a); + Fp::divBy4(y.b, x.b); + } + static void mulFp(Fp2T& z, const Fp2T& x, const Fp& y) + { + Fp::mul(z.a, x.a, y); + Fp::mul(z.b, x.b, y); + } + template + void setArray(bool *pb, const S *buf, size_t n) + { + assert((n & 1) == 0); + n /= 2; + a.setArray(pb, buf, n); + if (!*pb) return; + b.setArray(pb, buf + n, n); + } + template + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); + if (!*pb) return; + b.load(pb, is, ioMode); + } + /* + Fp2T = + ' ' + + */ + template + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); + if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); + } + bool isZero() const { return a.isZero() && b.isZero(); } + bool isOne() const { return a.isOne() && b.isZero(); } + bool operator==(const Fp2T& rhs) const { return a == rhs.a && b == rhs.b; } + bool operator!=(const Fp2T& rhs) const { return !operator==(rhs); } + /* + return true is a is odd (do not consider b) + this function is for only compressed reprezentation of EC + isOdd() is not good naming. QQQ + */ + bool isOdd() const { return a.isOdd(); } + /* + (a + bi)^2 = (a^2 - b^2) + 2ab i = c + di + A = a^2 + B = b^2 + A = (c +/- sqrt(c^2 + d^2))/2 + b = d / 2a + */ + static inline bool squareRoot(Fp2T& y, const Fp2T& x) + { + Fp t1, t2; + if (x.b.isZero()) { + if (Fp::squareRoot(t1, x.a)) { + y.a = t1; + y.b.clear(); + } else { + bool b = Fp::squareRoot(t1, -x.a); + assert(b); (void)b; + y.a.clear(); + y.b = t1; + } + return true; + } + Fp::sqr(t1, x.a); + Fp::sqr(t2, x.b); + t1 += t2; // c^2 + d^2 + if (!Fp::squareRoot(t1, t1)) return false; + Fp::add(t2, x.a, t1); + Fp::divBy2(t2, t2); + if (!Fp::squareRoot(t2, t2)) { + Fp::sub(t2, x.a, t1); + Fp::divBy2(t2, t2); + bool b = Fp::squareRoot(t2, t2); + assert(b); (void)b; + } + y.a = t2; + t2 += t2; + Fp::inv(t2, t2); + Fp::mul(y.b, x.b, t2); + return true; + } + static void inline norm(Fp& y, const Fp2T& x) + { + Fp aa, bb; + Fp::sqr(aa, x.a); + Fp::sqr(bb, x.b); + Fp::add(y, aa, bb); + } + /* + Frobenius + i^2 = -1 + (a + bi)^p = a + bi^p in Fp + = a + bi if p = 1 mod 4 + = a - bi if p = 3 mod 4 + */ + static void Frobenius(Fp2T& y, const Fp2T& x) + { + if (Fp::getOp().pmod4 == 1) { + if (&y != &x) { + y = x; + } + } else { + if (&y != &x) { + y.a = x.a; + } + Fp::neg(y.b, x.b); + } + } + + static uint32_t get_xi_a() { return Fp::getOp().xi_a; } + static void init() + { +// assert(Fp::maxSize <= 256); + mcl::fp::Op& op = Fp::op_; + assert(op.xi_a); + mul_xi = 0; +#ifdef MCL_XBYAK_DIRECT_CALL + add = fp::func_ptr_cast(op.fp2_addA_); + if (add == 0) add = addC; + sub = fp::func_ptr_cast(op.fp2_subA_); + if (sub == 0) sub = subC; + neg = fp::func_ptr_cast(op.fp2_negA_); + if (neg == 0) neg = negC; + mul = fp::func_ptr_cast(op.fp2_mulA_); + if (mul == 0) mul = mulC; + sqr = fp::func_ptr_cast(op.fp2_sqrA_); + if (sqr == 0) sqr = sqrC; + mul_xi = fp::func_ptr_cast(op.fp2_mul_xiA_); +#endif + op.fp2_inv = fp2_invW; + if (mul_xi == 0) { + if (op.xi_a == 1) { + mul_xi = fp2_mul_xi_1_1iC; + } else { + mul_xi = fp2_mul_xiC; + } + } + FpDblT::init(); + Fp2DblT::init(); + // call init before Fp2::pow because FpDbl is used in Fp2T + const Fp2T xi(op.xi_a, 1); + const mpz_class& p = Fp::getOp().mp; + Fp2T::pow(g[0], xi, (p - 1) / 6); // g = xi^((p-1)/6) + for (size_t i = 1; i < gN; i++) { + g[i] = g[i - 1] * g[0]; + } + /* + permutate [0, 1, 2, 3, 4] => [1, 3, 0, 2, 4] + g[0] = g^2 + g[1] = g^4 + g[2] = g^1 + g[3] = g^3 + g[4] = g^5 + */ + { + Fp2T t = g[0]; + g[0] = g[1]; + g[1] = g[3]; + g[3] = g[2]; + g[2] = t; + } + for (size_t i = 0; i < gN; i++) { + Fp2T t(g[i].a, g[i].b); + if (Fp::getOp().pmod4 == 3) Fp::neg(t.b, t.b); + Fp2T::mul(g2[i], t, g[i]); + g3[i] = g[i] * g2[i]; + } + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp2T:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp2T:save"); + } + template + void setArray(const S *buf, size_t n) + { + bool b; + setArray(&b, buf, n); + if (!b) throw cybozu::Exception("Fp2T:setArray"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + Fp2T(const std::string& a, const std::string& b, int base = 0) : a(a, base), b(b, base) {} + friend std::istream& operator>>(std::istream& is, Fp2T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp2T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif +private: + /* + default Fp2T operator + Fp2T = Fp[i]/(i^2 + 1) + */ + static void addC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp::add(z.a, x.a, y.a); + Fp::add(z.b, x.b, y.b); + } + static void subC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp::sub(z.a, x.a, y.a); + Fp::sub(z.b, x.b, y.b); + } + static void negC(Fp2T& y, const Fp2T& x) + { + Fp::neg(y.a, x.a); + Fp::neg(y.b, x.b); + } +#if 0 + /* + x = a + bi, y = c + di, i^2 = -1 + z = xy = (a + bi)(c + di) = (ac - bd) + (ad + bc)i + ad+bc = (a + b)(c + d) - ac - bd + # of mod = 3 + */ + static void fp2_mulW(Unit *z, const Unit *x, const Unit *y) + { + const Fp *px = reinterpret_cast(x); + const Fp *py = reinterpret_cast(y); + const Fp& a = px[0]; + const Fp& b = px[1]; + const Fp& c = py[0]; + const Fp& d = py[1]; + Fp *pz = reinterpret_cast(z); + Fp t1, t2, ac, bd; + Fp::add(t1, a, b); + Fp::add(t2, c, d); + t1 *= t2; // (a + b)(c + d) + Fp::mul(ac, a, c); + Fp::mul(bd, b, d); + Fp::sub(pz[0], ac, bd); // ac - bd + Fp::sub(pz[1], t1, ac); + pz[1] -= bd; + } + static void fp2_mulNFW(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + const fp::Op& op = Fp::op_; + op.fp2_mulNF((Unit*)&z, (const Unit*)&x, (const Unit*)&y, op.p); + } +#endif + static void mulC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp2Dbl d; + Fp2Dbl::mulPre(d, x, y); + FpDbl::mod(z.a, d.a); + FpDbl::mod(z.b, d.b); + } + /* + x = a + bi, i^2 = -1 + y = x^2 = (a + bi)^2 = (a + b)(a - b) + 2abi + */ + static void sqrC(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; +#if 1 // faster than using FpDbl + Fp t1, t2, t3; + Fp::add(t1, b, b); // 2b + t1 *= a; // 2ab + Fp::add(t2, a, b); // a + b + Fp::sub(t3, a, b); // a - b + Fp::mul(y.a, t2, t3); // (a + b)(a - b) + y.b = t1; +#else + Fp t1, t2; + FpDbl d1, d2; + Fp::addPre(t1, b, b); // 2b + FpDbl::mulPre(d2, t1, a); // 2ab + Fp::addPre(t1, a, b); // a + b + Fp::sub(t2, a, b); // a - b + FpDbl::mulPre(d1, t1, t2); // (a + b)(a - b) + FpDbl::mod(py[0], d1); + FpDbl::mod(py[1], d2); +#endif + } + /* + xi = xi_a + i + x = a + bi + y = (a + bi)xi = (a + bi)(xi_a + i) + =(a * x_ia - b) + (a + b xi_a)i + */ + static void fp2_mul_xiC(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; + Fp t; + Fp::mulUnit(t, a, Fp::getOp().xi_a); + t -= b; + Fp::mulUnit(y.b, b, Fp::getOp().xi_a); + y.b += a; + y.a = t; + } + /* + xi = 1 + i ; xi_a = 1 + y = (a + bi)xi = (a - b) + (a + b)i + */ + static void fp2_mul_xi_1_1iC(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; + Fp t; + Fp::add(t, a, b); + Fp::sub(y.a, a, b); + y.b = t; + } + /* + x = a + bi + 1 / x = (a - bi) / (a^2 + b^2) + */ + static void fp2_invW(Unit *y, const Unit *x) + { + const Fp *px = reinterpret_cast(x); + Fp *py = reinterpret_cast(y); + const Fp& a = px[0]; + const Fp& b = px[1]; + Fp aa, bb; + Fp::sqr(aa, a); + Fp::sqr(bb, b); + aa += bb; + Fp::inv(aa, aa); // aa = 1 / (a^2 + b^2) + Fp::mul(py[0], a, aa); + Fp::mul(py[1], b, aa); + Fp::neg(py[1], py[1]); + } +}; + +#ifdef MCL_XBYAK_DIRECT_CALL +template void (*Fp2T::add)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template void (*Fp2T::sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template void (*Fp2T::neg)(Fp2T& y, const Fp2T& x); +template void (*Fp2T::mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template void (*Fp2T::sqr)(Fp2T& y, const Fp2T& x); +#endif +template void (*Fp2T::mul_xi)(Fp2T& y, const Fp2T& x); + +template +struct Fp2DblT { + typedef FpDblT FpDbl; + typedef Fp2T Fp2; + typedef fp::Unit Unit; + FpDbl a, b; + static void add(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::add(z.a, x.a, y.a); + FpDbl::add(z.b, x.b, y.b); + } + static void addPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::addPre(z.a, x.a, y.a); + FpDbl::addPre(z.b, x.b, y.b); + } + static void sub(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::sub(z.a, x.a, y.a); + FpDbl::sub(z.b, x.b, y.b); + } + static void subPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::subPre(z.a, x.a, y.a); + FpDbl::subPre(z.b, x.b, y.b); + } + static void neg(Fp2DblT& y, const Fp2DblT& x) + { + FpDbl::neg(y.a, x.a); + FpDbl::neg(y.b, x.b); + } + static void mul_xi(Fp2DblT& y, const Fp2DblT& x) + { + const uint32_t xi_a = Fp2::get_xi_a(); + if (xi_a == 1) { + FpDbl t; + FpDbl::add(t, x.a, x.b); + FpDbl::sub(y.a, x.a, x.b); + y.b = t; + } else { + FpDbl t; + FpDbl::mulUnit(t, x.a, xi_a); + FpDbl::sub(t, t, x.b); + FpDbl::mulUnit(y.b, x.b, xi_a); + FpDbl::add(y.b, y.b, x.a); + y.a = t; + } + } + static void (*mulPre)(Fp2DblT&, const Fp2&, const Fp2&); + static void (*sqrPre)(Fp2DblT&, const Fp2&); + static void mod(Fp2& y, const Fp2DblT& x) + { + FpDbl::mod(y.a, x.a); + FpDbl::mod(y.b, x.b); + } +#ifndef CYBOZU_DONT_USE_STRING + friend std::ostream& operator<<(std::ostream& os, const Fp2DblT& x) + { + return os << x.a << ' ' << x.b; + } +#endif + void operator+=(const Fp2DblT& x) { add(*this, *this, x); } + void operator-=(const Fp2DblT& x) { sub(*this, *this, x); } + static void init() + { + const mcl::fp::Op& op = Fp::getOp(); + if (op.fp2Dbl_mulPreA_) { + mulPre = fp::func_ptr_cast(op.fp2Dbl_mulPreA_); + } else { + if (op.isFullBit) { + mulPre = fp2Dbl_mulPreW; + } else { + mulPre = fp2Dbl_mulPreW; + } + } + if (op.fp2Dbl_sqrPreA_) { + sqrPre = fp::func_ptr_cast(op.fp2Dbl_sqrPreA_); + } else { + if (op.isFullBit) { + sqrPre = fp2Dbl_sqrPreW; + } else { + sqrPre = fp2Dbl_sqrPreW; + } + } + } + /* + Fp2Dbl::mulPre by FpDblT + @note mod of NIST_P192 is fast + */ + template + static void fp2Dbl_mulPreW(Fp2DblT& z, const Fp2& x, const Fp2& y) + { + const Fp& a = x.a; + const Fp& b = x.b; + const Fp& c = y.a; + const Fp& d = y.b; + FpDbl& d0 = z.a; + FpDbl& d1 = z.b; + FpDbl d2; + Fp s, t; + if (isFullBit) { + Fp::add(s, a, b); + Fp::add(t, c, d); + } else { + Fp::addPre(s, a, b); + Fp::addPre(t, c, d); + } + FpDbl::mulPre(d1, s, t); // (a + b)(c + d) + FpDbl::mulPre(d0, a, c); + FpDbl::mulPre(d2, b, d); + if (isFullBit) { + FpDbl::sub(d1, d1, d0); // (a + b)(c + d) - ac + FpDbl::sub(d1, d1, d2); // (a + b)(c + d) - ac - bd + } else { + FpDbl::subPre(d1, d1, d0); + FpDbl::subPre(d1, d1, d2); + } + FpDbl::sub(d0, d0, d2); // ac - bd + } + template + static void fp2Dbl_sqrPreW(Fp2DblT& y, const Fp2& x) + { + Fp t1, t2; + if (isFullBit) { + Fp::add(t1, x.b, x.b); // 2b + Fp::add(t2, x.a, x.b); // a + b + } else { + Fp::addPre(t1, x.b, x.b); // 2b + Fp::addPre(t2, x.a, x.b); // a + b + } + FpDbl::mulPre(y.b, t1, x.a); // 2ab + Fp::sub(t1, x.a, x.b); // a - b + FpDbl::mulPre(y.a, t1, t2); // (a + b)(a - b) + } +}; + +template void (*Fp2DblT::mulPre)(Fp2DblT&, const Fp2T&, const Fp2T&); +template void (*Fp2DblT::sqrPre)(Fp2DblT&, const Fp2T&); + +template Fp2T Fp2T::g[Fp2T::gN]; +template Fp2T Fp2T::g2[Fp2T::gN]; +template Fp2T Fp2T::g3[Fp2T::gN]; + +template +struct Fp6DblT; +/* + Fp6T = Fp2[v] / (v^3 - xi) + x = a + b v + c v^2 +*/ +template +struct Fp6T : public fp::Serializable, + fp::Operator > > { + typedef _Fp Fp; + typedef Fp2T Fp2; + typedef Fp2DblT Fp2Dbl; + typedef Fp6DblT Fp6Dbl; + typedef Fp BaseFp; + Fp2 a, b, c; + Fp6T() { } + Fp6T(int64_t a) : a(a) , b(0) , c(0) { } + Fp6T(const Fp2& a, const Fp2& b, const Fp2& c) : a(a) , b(b) , c(c) { } + void clear() + { + a.clear(); + b.clear(); + c.clear(); + } + Fp* getFp0() { return a.getFp0(); } + const Fp* getFp0() const { return a.getFp0(); } + Fp2* getFp2() { return &a; } + const Fp2* getFp2() const { return &a; } + void set(const Fp2 &a_, const Fp2 &b_, const Fp2 &c_) + { + a = a_; + b = b_; + c = c_; + } + bool isZero() const + { + return a.isZero() && b.isZero() && c.isZero(); + } + bool isOne() const + { + return a.isOne() && b.isZero() && c.isZero(); + } + bool operator==(const Fp6T& rhs) const + { + return a == rhs.a && b == rhs.b && c == rhs.c; + } + bool operator!=(const Fp6T& rhs) const { return !operator==(rhs); } + template + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); if (!*pb) return; + b.load(pb, is, ioMode); if (!*pb) return; + c.load(pb, is, ioMode); if (!*pb) return; + } + template + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + c.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp6T:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp6T:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Fp6T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp6T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif + static void add(Fp6T& z, const Fp6T& x, const Fp6T& y) + { + Fp2::add(z.a, x.a, y.a); + Fp2::add(z.b, x.b, y.b); + Fp2::add(z.c, x.c, y.c); + } + static void sub(Fp6T& z, const Fp6T& x, const Fp6T& y) + { + Fp2::sub(z.a, x.a, y.a); + Fp2::sub(z.b, x.b, y.b); + Fp2::sub(z.c, x.c, y.c); + } + static void neg(Fp6T& y, const Fp6T& x) + { + Fp2::neg(y.a, x.a); + Fp2::neg(y.b, x.b); + Fp2::neg(y.c, x.c); + } + /* + x = a + bv + cv^2, v^3 = xi + x^2 = (a^2 + 2bc xi) + (c^2 xi + 2ab)v + (b^2 + 2ac)v^2 + + b^2 + 2ac = (a + b + c)^2 - a^2 - 2bc - c^2 - 2ab + */ + static void sqr(Fp6T& y, const Fp6T& x) + { + Fp2 t1, t2, t3; + Fp2::mul(t1, x.a, x.b); + t1 += t1; // 2ab + Fp2::mul(t2, x.b, x.c); + t2 += t2; // 2bc + Fp2::sqr(t3, x.c); // c^2 + Fp2::add(y.c, x.a, x.c); // a + c, destroy y.c + y.c += x.b; // a + b + c + Fp2::sqr(y.b, y.c); // (a + b + c)^2, destroy y.b + y.b -= t2; // (a + b + c)^2 - 2bc + Fp2::mul_xi(t2, t2); // 2bc xi + Fp2::sqr(y.a, x.a); // a^2, destroy y.a + y.b -= y.a; // (a + b + c)^2 - 2bc - a^2 + y.a += t2; // a^2 + 2bc xi + Fp2::sub(y.c, y.b, t3); // (a + b + c)^2 - 2bc - a^2 - c^2 + Fp2::mul_xi(y.b, t3); // c^2 xi + y.b += t1; // c^2 xi + 2ab + y.c -= t1; // b^2 + 2ac + } + static inline void mul(Fp6T& z, const Fp6T& x, const Fp6T& y); + /* + x = a + bv + cv^2, v^3 = xi + y = 1/x = p/q where + p = (a^2 - bc xi) + (c^2 xi - ab)v + (b^2 - ac)v^2 + q = c^3 xi^2 + b(b^2 - 3ac)xi + a^3 + = (a^2 - bc xi)a + ((c^2 xi - ab)c + (b^2 - ac)b) xi + */ + static void inv(Fp6T& y, const Fp6T& x) + { + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp2 aa, bb, cc, ab, bc, ac; + Fp2::sqr(aa, a); + Fp2::sqr(bb, b); + Fp2::sqr(cc, c); + Fp2::mul(ab, a, b); + Fp2::mul(bc, b, c); + Fp2::mul(ac, c, a); + + Fp6T p; + Fp2::mul_xi(p.a, bc); + Fp2::sub(p.a, aa, p.a); // a^2 - bc xi + Fp2::mul_xi(p.b, cc); + p.b -= ab; // c^2 xi - ab + Fp2::sub(p.c, bb, ac); // b^2 - ac + Fp2 q, t; + Fp2::mul(q, p.b, c); + Fp2::mul(t, p.c, b); + q += t; + Fp2::mul_xi(q, q); + Fp2::mul(t, p.a, a); + q += t; + Fp2::inv(q, q); + + Fp2::mul(y.a, p.a, q); + Fp2::mul(y.b, p.b, q); + Fp2::mul(y.c, p.c, q); + } +}; + +template +struct Fp6DblT { + typedef Fp2T Fp2; + typedef Fp6T Fp6; + typedef Fp2DblT Fp2Dbl; + typedef Fp6DblT Fp6Dbl; + typedef fp::Unit Unit; + Fp2Dbl a, b, c; + static void add(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) + { + Fp2Dbl::add(z.a, x.a, y.a); + Fp2Dbl::add(z.b, x.b, y.b); + Fp2Dbl::add(z.c, x.c, y.c); + } + static void sub(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) + { + Fp2Dbl::sub(z.a, x.a, y.a); + Fp2Dbl::sub(z.b, x.b, y.b); + Fp2Dbl::sub(z.c, x.c, y.c); + } + /* + x = a + bv + cv^2, y = d + ev + fv^2, v^3 = xi + xy = (ad + (bf + ce)xi) + ((ae + bd) + cf xi)v + ((af + cd) + be)v^2 + bf + ce = (b + c)(e + f) - be - cf + ae + bd = (a + b)(e + d) - ad - be + af + cd = (a + c)(d + f) - ad - cf + */ + static void mulPre(Fp6DblT& z, const Fp6& x, const Fp6& y) + { +//clk.begin(); + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + const Fp2& d = y.a; + const Fp2& e = y.b; + const Fp2& f = y.c; + Fp2Dbl& za = z.a; + Fp2Dbl& zb = z.b; + Fp2Dbl& zc = z.c; + Fp2Dbl BE; + Fp2Dbl::mulPre(za, a, d); + Fp2Dbl::mulPre(BE, b, e); + Fp2Dbl::mulPre(zb, c, f); + + Fp2 t1, t2, t3, t4; + Fp2::add(t1, b, c); + Fp2::add(t2, e, f); + Fp2Dbl T1; + Fp2Dbl::mulPre(T1, t1, t2); + Fp2Dbl::sub(T1, T1, BE); + Fp2Dbl::sub(T1, T1, zb); + Fp2Dbl::mul_xi(T1, T1); + + Fp2::add(t2, a, b); + Fp2::add(t3, e, d); + Fp2Dbl T2; + Fp2Dbl::mulPre(T2, t2, t3); + Fp2Dbl::sub(T2, T2, za); + Fp2Dbl::sub(T2, T2, BE); + + Fp2::add(t3, a, c); + Fp2::add(t4, d, f); + Fp2Dbl::mulPre(zc, t3, t4); + Fp2Dbl::sub(zc, zc, za); + Fp2Dbl::sub(zc, zc, zb); + + Fp2Dbl::add(za, za, T1); + Fp2Dbl::mul_xi(zb, zb); + Fp2Dbl::add(zb, zb, T2); + Fp2Dbl::add(zc, zc, BE); +//clk.end(); + } + static void mod(Fp6& y, const Fp6Dbl& x) + { + Fp2Dbl::mod(y.a, x.a); + Fp2Dbl::mod(y.b, x.b); + Fp2Dbl::mod(y.c, x.c); + } +}; + +template +inline void Fp6T::mul(Fp6T& z, const Fp6T& x, const Fp6T& y) +{ + Fp6DblT Z; + Fp6DblT::mulPre(Z, x, y); + Fp6DblT::mod(z, Z); +} + +/* + Fp12T = Fp6[w] / (w^2 - v) + x = a + b w +*/ +template +struct Fp12T : public fp::Serializable, + fp::Operator > > { + typedef Fp2T Fp2; + typedef Fp6T Fp6; + typedef Fp2DblT Fp2Dbl; + typedef Fp6DblT Fp6Dbl; + typedef Fp BaseFp; + Fp6 a, b; + Fp12T() {} + Fp12T(int64_t a) : a(a), b(0) {} + Fp12T(const Fp6& a, const Fp6& b) : a(a), b(b) {} + void clear() + { + a.clear(); + b.clear(); + } + void setOne() + { + clear(); + a.a.a = 1; + } + + Fp* getFp0() { return a.getFp0(); } + const Fp* getFp0() const { return a.getFp0(); } + Fp2* getFp2() { return a.getFp2(); } + const Fp2* getFp2() const { return a.getFp2(); } + void set(const Fp2& v0, const Fp2& v1, const Fp2& v2, const Fp2& v3, const Fp2& v4, const Fp2& v5) + { + a.set(v0, v1, v2); + b.set(v3, v4, v5); + } + + bool isZero() const + { + return a.isZero() && b.isZero(); + } + bool isOne() const + { + return a.isOne() && b.isZero(); + } + bool operator==(const Fp12T& rhs) const + { + return a == rhs.a && b == rhs.b; + } + bool operator!=(const Fp12T& rhs) const { return !operator==(rhs); } + static void add(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + Fp6::add(z.a, x.a, y.a); + Fp6::add(z.b, x.b, y.b); + } + static void sub(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + Fp6::sub(z.a, x.a, y.a); + Fp6::sub(z.b, x.b, y.b); + } + static void neg(Fp12T& z, const Fp12T& x) + { + Fp6::neg(z.a, x.a); + Fp6::neg(z.b, x.b); + } + /* + z = x v + y + in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 + */ + static void mulVadd(Fp6& z, const Fp6& x, const Fp6& y) + { + Fp2 t; + Fp2::mul_xi(t, x.c); + Fp2::add(z.c, x.b, y.c); + Fp2::add(z.b, x.a, y.b); + Fp2::add(z.a, t, y.a); + } + static void mulVadd(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) + { + Fp2Dbl t; + Fp2Dbl::mul_xi(t, x.c); + Fp2Dbl::add(z.c, x.b, y.c); + Fp2Dbl::add(z.b, x.a, y.b); + Fp2Dbl::add(z.a, t, y.a); + } + /* + x = a + bw, y = c + dw, w^2 = v + z = xy = (a + bw)(c + dw) = (ac + bdv) + (ad + bc)w + ad+bc = (a + b)(c + d) - ac - bd + + in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 + */ + static void mul(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + // 4.7Kclk -> 4.55Kclk + const Fp6& a = x.a; + const Fp6& b = x.b; + const Fp6& c = y.a; + const Fp6& d = y.b; + Fp6 t1, t2; + Fp6::add(t1, a, b); + Fp6::add(t2, c, d); +#if 1 + Fp6Dbl T, AC, BD; + Fp6Dbl::mulPre(AC, a, c); + Fp6Dbl::mulPre(BD, b, d); + mulVadd(T, BD, AC); + Fp6Dbl::mod(z.a, T); + Fp6Dbl::mulPre(T, t1, t2); // (a + b)(c + d) + Fp6Dbl::sub(T, T, AC); + Fp6Dbl::sub(T, T, BD); + Fp6Dbl::mod(z.b, T); +#else + Fp6 ac, bd; + t1 *= t2; // (a + b)(c + d) + Fp6::mul(ac, a, c); + Fp6::mul(bd, b, d); + mulVadd(z.a, bd, ac); + t1 -= ac; + Fp6::sub(z.b, t1, bd); +#endif + } + /* + x = a + bw, w^2 = v + y = x^2 = (a + bw)^2 = (a^2 + b^2v) + 2abw + a^2 + b^2v = (a + b)(bv + a) - (abv + ab) + */ + static void sqr(Fp12T& y, const Fp12T& x) + { + const Fp6& a = x.a; + const Fp6& b = x.b; + Fp6 t0, t1; + Fp6::add(t0, a, b); // a + b + mulVadd(t1, b, a); // bv + a + t0 *= t1; // (a + b)(bv + a) + Fp6::mul(t1, a, b); // ab + Fp6::add(y.b, t1, t1); // 2ab + mulVadd(y.a, t1, t1); // abv + ab + Fp6::sub(y.a, t0, y.a); + } + /* + x = a + bw, w^2 = v + y = 1/x = (a - bw) / (a^2 - b^2v) + */ + static void inv(Fp12T& y, const Fp12T& x) + { + const Fp6& a = x.a; + const Fp6& b = x.b; + Fp6 t0, t1; + Fp6::sqr(t0, a); + Fp6::sqr(t1, b); + Fp2::mul_xi(t1.c, t1.c); + t0.a -= t1.c; + t0.b -= t1.a; + t0.c -= t1.b; // t0 = a^2 - b^2v + Fp6::inv(t0, t0); + Fp6::mul(y.a, x.a, t0); + Fp6::mul(y.b, x.b, t0); + Fp6::neg(y.b, y.b); + } + /* + y = 1 / x = conjugate of x if |x| = 1 + */ + static void unitaryInv(Fp12T& y, const Fp12T& x) + { + if (&y != &x) y.a = x.a; + Fp6::neg(y.b, x.b); + } + /* + Frobenius + i^2 = -1 + (a + bi)^p = a + bi^p in Fp + = a + bi if p = 1 mod 4 + = a - bi if p = 3 mod 4 + + g = xi^(p - 1) / 6 + v^3 = xi in Fp2 + v^p = ((v^6) ^ (p-1)/6) v = g^2 v + v^2p = g^4 v^2 + (a + bv + cv^2)^p in Fp6 + = F(a) + F(b)g^2 v + F(c) g^4 v^2 + + w^p = ((w^6) ^ (p-1)/6) w = g w + ((a + bv + cv^2)w)^p in Fp12T + = (F(a) g + F(b) g^3 v + F(c) g^5 v^2)w + */ + static void Frobenius(Fp12T& y, const Fp12T& x) + { + for (int i = 0; i < 6; i++) { + Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); + } + for (int i = 1; i < 6; i++) { + y.getFp2()[i] *= Fp2::get_gTbl()[i - 1]; + } + } + static void Frobenius2(Fp12T& y, const Fp12T& x) + { +#if 0 + Frobenius(y, x); + Frobenius(y, y); +#else + y.getFp2()[0] = x.getFp2()[0]; + if (Fp::getOp().pmod4 == 1) { + for (int i = 1; i < 6; i++) { + Fp2::mul(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i]); + } + } else { + for (int i = 1; i < 6; i++) { + Fp2::mulFp(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i - 1].a); + } + } +#endif + } + static void Frobenius3(Fp12T& y, const Fp12T& x) + { +#if 0 + Frobenius(y, x); + Frobenius(y, y); + Frobenius(y, y); +#else + Fp2::Frobenius(y.getFp2()[0], x.getFp2()[0]); + for (int i = 1; i < 6; i++) { + Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); + y.getFp2()[i] *= Fp2::get_g3Tbl()[i - 1]; + } +#endif + } + template + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); if (!*pb) return; + b.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp12T:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp12T:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Fp12T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp12T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif +}; + +/* + convert multiplicative group to additive group +*/ +template +struct GroupMtoA : public T { + static T& castT(GroupMtoA& x) { return static_cast(x); } + static const T& castT(const GroupMtoA& x) { return static_cast(x); } + void clear() + { + castT(*this) = 1; + } + bool isZero() const { return castT(*this).isOne(); } + static void add(GroupMtoA& z, const GroupMtoA& x, const GroupMtoA& y) + { + T::mul(castT(z), castT(x), castT(y)); + } + static void dbl(GroupMtoA& y, const GroupMtoA& x) + { + T::sqr(castT(y), castT(x)); + } + static void neg(GroupMtoA& y, const GroupMtoA& x) + { + // assume Fp12 + T::unitaryInv(castT(y), castT(x)); + } + static void Frobenus(GroupMtoA& y, const GroupMtoA& x) + { + T::Frobenius(castT(y), castT(x)); + } + template + static void mul(GroupMtoA& z, const GroupMtoA& x, const INT& y) + { + T::pow(castT(z), castT(x), y); + } + template + static void mulGeneric(GroupMtoA& z, const GroupMtoA& x, const INT& y) + { + T::powGeneric(castT(z), castT(x), y); + } + void operator+=(const GroupMtoA& rhs) + { + add(*this, *this, rhs); + } + void normalize() {} +private: + bool isOne() const; +}; + +} // mcl + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/gmp_util.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/gmp_util.hpp new file mode 100644 index 000000000..bcbd91a1e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/gmp_util.hpp @@ -0,0 +1,954 @@ +#pragma once +/** + @file + @brief util function for gmp + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#include +#include +#include +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4616) + #pragma warning(disable : 4800) + #pragma warning(disable : 4244) + #pragma warning(disable : 4127) + #pragma warning(disable : 4512) + #pragma warning(disable : 4146) +#endif +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_USE_VINT +#endif +#ifdef MCL_USE_VINT +#include +typedef mcl::Vint mpz_class; +#else +#include +#ifdef _MSC_VER + #pragma warning(pop) + #include +#endif +#endif + +#ifndef MCL_SIZEOF_UNIT + #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) + #define MCL_SIZEOF_UNIT 4 + #else + #define MCL_SIZEOF_UNIT 8 + #endif +#endif + +namespace mcl { + +namespace fp { + +#if MCL_SIZEOF_UNIT == 8 +typedef uint64_t Unit; +#else +typedef uint32_t Unit; +#endif +#define MCL_UNIT_BIT_SIZE (MCL_SIZEOF_UNIT * 8) + +} // mcl::fp + +namespace gmp { + +typedef mpz_class ImplType; + +// z = [buf[n-1]:..:buf[1]:buf[0]] +// eg. buf[] = {0x12345678, 0xaabbccdd}; => z = 0xaabbccdd12345678; +template +void setArray(bool *pb, mpz_class& z, const T *buf, size_t n) +{ +#ifdef MCL_USE_VINT + z.setArray(pb, buf, n); +#else + mpz_import(z.get_mpz_t(), n, -1, sizeof(*buf), 0, 0, buf); + *pb = true; +#endif +} +/* + buf[0, size) = x + buf[size, maxSize) with zero +*/ +template +bool getArray_(T *buf, size_t maxSize, const U *x, int xn)//const mpz_srcptr x) +{ + const size_t bufByteSize = sizeof(T) * maxSize; + if (xn < 0) return false; + size_t xByteSize = sizeof(*x) * xn; + if (xByteSize > bufByteSize) return false; + memcpy(buf, x, xByteSize); + memset((char*)buf + xByteSize, 0, bufByteSize - xByteSize); + return true; +} +template +void getArray(bool *pb, T *buf, size_t maxSize, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + *pb = getArray_(buf, maxSize, x.getUnit(), x.getUnitSize()); +#else + *pb = getArray_(buf, maxSize, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size); +#endif +} +inline void set(mpz_class& z, uint64_t x) +{ + bool b; + setArray(&b, z, &x, 1); + assert(b); + (void)b; +} +inline void setStr(bool *pb, mpz_class& z, const char *str, int base = 0) +{ +#ifdef MCL_USE_VINT + z.setStr(pb, str, base); +#else + *pb = z.set_str(str, base) == 0; +#endif +} + +/* + set buf with string terminated by '\0' + return strlen(buf) if success else 0 +*/ +inline size_t getStr(char *buf, size_t bufSize, const mpz_class& z, int base = 10) +{ +#ifdef MCL_USE_VINT + return z.getStr(buf, bufSize, base); +#else + __gmp_alloc_cstring tmp(mpz_get_str(0, base, z.get_mpz_t())); + size_t n = strlen(tmp.str); + if (n + 1 > bufSize) return 0; + memcpy(buf, tmp.str, n + 1); + return n; +#endif +} + +#ifndef CYBOZU_DONT_USE_STRING +inline void getStr(std::string& str, const mpz_class& z, int base = 10) +{ +#ifdef MCL_USE_VINT + z.getStr(str, base); +#else + str = z.get_str(base); +#endif +} +inline std::string getStr(const mpz_class& z, int base = 10) +{ + std::string s; + gmp::getStr(s, z, base); + return s; +} +#endif + +inline void add(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::add(z, x, y); +#else + mpz_add(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +#ifndef MCL_USE_VINT +inline void add(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_add_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void sub(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_sub_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void mul(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_mul_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void div(mpz_class& q, const mpz_class& x, unsigned int y) +{ + mpz_div_ui(q.get_mpz_t(), x.get_mpz_t(), y); +} +inline void mod(mpz_class& r, const mpz_class& x, unsigned int m) +{ + mpz_mod_ui(r.get_mpz_t(), x.get_mpz_t(), m); +} +inline int compare(const mpz_class& x, int y) +{ + return mpz_cmp_si(x.get_mpz_t(), y); +} +#endif +inline void sub(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::sub(z, x, y); +#else + mpz_sub(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::mul(z, x, y); +#else + mpz_mul(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void sqr(mpz_class& z, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + Vint::mul(z, x, x); +#else + mpz_mul(z.get_mpz_t(), x.get_mpz_t(), x.get_mpz_t()); +#endif +} +inline void divmod(mpz_class& q, mpz_class& r, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::divMod(&q, r, x, y); +#else + mpz_divmod(q.get_mpz_t(), r.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void div(mpz_class& q, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::div(q, x, y); +#else + mpz_div(q.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void mod(mpz_class& r, const mpz_class& x, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::mod(r, x, m); +#else + mpz_mod(r.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); +#endif +} +inline void clear(mpz_class& z) +{ +#ifdef MCL_USE_VINT + z.clear(); +#else + mpz_set_ui(z.get_mpz_t(), 0); +#endif +} +inline bool isZero(const mpz_class& z) +{ +#ifdef MCL_USE_VINT + return z.isZero(); +#else + return mpz_sgn(z.get_mpz_t()) == 0; +#endif +} +inline bool isNegative(const mpz_class& z) +{ +#ifdef MCL_USE_VINT + return z.isNegative(); +#else + return mpz_sgn(z.get_mpz_t()) < 0; +#endif +} +inline void neg(mpz_class& z, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + Vint::neg(z, x); +#else + mpz_neg(z.get_mpz_t(), x.get_mpz_t()); +#endif +} +inline int compare(const mpz_class& x, const mpz_class & y) +{ +#ifdef MCL_USE_VINT + return Vint::compare(x, y); +#else + return mpz_cmp(x.get_mpz_t(), y.get_mpz_t()); +#endif +} +template +void addMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + add(z, x, y); + if (compare(z, m) >= 0) { + sub(z, z, m); + } +} +template +void subMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + sub(z, x, y); + if (!isNegative(z)) return; + add(z, z, m); +} +template +void mulMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + mul(z, x, y); + mod(z, z, m); +} +inline void sqrMod(mpz_class& z, const mpz_class& x, const mpz_class& m) +{ + sqr(z, x); + mod(z, z, m); +} +// z = x^y (y >= 0) +inline void pow(mpz_class& z, const mpz_class& x, unsigned int y) +{ +#ifdef MCL_USE_VINT + Vint::pow(z, x, y); +#else + mpz_pow_ui(z.get_mpz_t(), x.get_mpz_t(), y); +#endif +} +// z = x^y mod m (y >=0) +inline void powMod(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::powMod(z, x, y, m); +#else + mpz_powm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t(), m.get_mpz_t()); +#endif +} +// z = 1/x mod m +inline void invMod(mpz_class& z, const mpz_class& x, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::invMod(z, x, m); +#else + mpz_invert(z.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); +#endif +} +// z = lcm(x, y) +inline void lcm(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::lcm(z, x, y); +#else + mpz_lcm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline mpz_class lcm(const mpz_class& x, const mpz_class& y) +{ + mpz_class z; + lcm(z, x, y); + return z; +} +// z = gcd(x, y) +inline void gcd(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::gcd(z, x, y); +#else + mpz_gcd(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline mpz_class gcd(const mpz_class& x, const mpz_class& y) +{ + mpz_class z; + gcd(z, x, y); + return z; +} +/* + assume p : odd prime + return 1 if x^2 = a mod p for some x + return -1 if x^2 != a mod p for any x +*/ +inline int legendre(const mpz_class& a, const mpz_class& p) +{ +#ifdef MCL_USE_VINT + return Vint::jacobi(a, p); +#else + return mpz_legendre(a.get_mpz_t(), p.get_mpz_t()); +#endif +} +inline bool isPrime(bool *pb, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.isPrime(pb, 32); +#else + *pb = true; + return mpz_probab_prime_p(x.get_mpz_t(), 32) != 0; +#endif +} +inline size_t getBitSize(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getBitSize(); +#else + return mpz_sizeinbase(x.get_mpz_t(), 2); +#endif +} +inline bool testBit(const mpz_class& x, size_t pos) +{ +#ifdef MCL_USE_VINT + return x.testBit(pos); +#else + return mpz_tstbit(x.get_mpz_t(), pos) != 0; +#endif +} +inline void resetBit(mpz_class& x, size_t pos) +{ +#ifdef MCL_USE_VINT + x.setBit(pos, false); +#else + mpz_clrbit(x.get_mpz_t(), pos); +#endif +} +inline void setBit(mpz_class& x, size_t pos, bool v = true) +{ +#ifdef MCL_USE_VINT + x.setBit(pos, v); +#else + if (v) { + mpz_setbit(x.get_mpz_t(), pos); + } else { + resetBit(x, pos); + } +#endif +} +inline const fp::Unit *getUnit(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getUnit(); +#else + return reinterpret_cast(x.get_mpz_t()->_mp_d); +#endif +} +inline fp::Unit getUnit(const mpz_class& x, size_t i) +{ + return getUnit(x)[i]; +} +inline size_t getUnitSize(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getUnitSize(); +#else + return std::abs(x.get_mpz_t()->_mp_size); +#endif +} +inline mpz_class abs(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return Vint::abs(x); +#else + return ::abs(x); +#endif +} + +inline void getRand(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) +{ + if (rg.isZero()) rg = fp::RandGen::get(); + assert(bitSize > 1); + const size_t rem = bitSize & 31; + const size_t n = (bitSize + 31) / 32; + uint32_t buf[128]; + assert(n <= CYBOZU_NUM_OF_ARRAY(buf)); + if (n > CYBOZU_NUM_OF_ARRAY(buf)) { + *pb = false; + return; + } + rg.read(pb, buf, n * sizeof(buf[0])); + if (!*pb) return; + uint32_t v = buf[n - 1]; + if (rem == 0) { + v |= 1U << 31; + } else { + v &= (1U << rem) - 1; + v |= 1U << (rem - 1); + } + buf[n - 1] = v; + setArray(pb, z, buf, n); +} + +inline void getRandPrime(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) +{ + if (rg.isZero()) rg = fp::RandGen::get(); + assert(bitSize > 2); + for (;;) { + getRand(pb, z, bitSize, rg); + if (!*pb) return; + if (setSecondBit) { + z |= mpz_class(1) << (bitSize - 2); + } + if (mustBe3mod4) { + z |= 3; + } + bool ret = isPrime(pb, z); + if (!*pb) return; + if (ret) return; + } +} +inline mpz_class getQuadraticNonResidue(const mpz_class& p) +{ + mpz_class g = 2; + while (legendre(g, p) > 0) { + ++g; + } + return g; +} + +namespace impl { + +template +void convertToBinary(Vec& v, const mpz_class& x) +{ + const size_t len = gmp::getBitSize(x); + v.resize(len); + for (size_t i = 0; i < len; i++) { + v[i] = gmp::testBit(x, len - 1 - i) ? 1 : 0; + } +} + +template +size_t getContinuousVal(const Vec& v, size_t pos, int val) +{ + while (pos >= 2) { + if (v[pos] != val) break; + pos--; + } + return pos; +} + +template +void convertToNAF(Vec& v, const Vec& in) +{ + v.copy(in); + size_t pos = v.size() - 1; + for (;;) { + size_t p = getContinuousVal(v, pos, 0); + if (p == 1) return; + assert(v[p] == 1); + size_t q = getContinuousVal(v, p, 1); + if (q == 1) return; + assert(v[q] == 0); + if (p - q <= 1) { + pos = p - 1; + continue; + } + v[q] = 1; + for (size_t i = q + 1; i < p; i++) { + v[i] = 0; + } + v[p] = -1; + pos = q; + } +} + +template +size_t getNumOfNonZeroElement(const Vec& v) +{ + size_t w = 0; + for (size_t i = 0; i < v.size(); i++) { + if (v[i]) w++; + } + return w; +} + +} // impl + +/* + compute a repl of x which has smaller Hamming weights. + return true if naf is selected +*/ +template +bool getNAF(Vec& v, const mpz_class& x) +{ + Vec bin; + impl::convertToBinary(bin, x); + Vec naf; + impl::convertToNAF(naf, bin); + const size_t binW = impl::getNumOfNonZeroElement(bin); + const size_t nafW = impl::getNumOfNonZeroElement(naf); + if (nafW < binW) { + v.swap(naf); + return true; + } else { + v.swap(bin); + return false; + } +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void setStr(mpz_class& z, const std::string& str, int base = 0) +{ + bool b; + setStr(&b, z, str.c_str(), base); + if (!b) throw cybozu::Exception("gmp:setStr"); +} +template +void setArray(mpz_class& z, const T *buf, size_t n) +{ + bool b; + setArray(&b, z, buf, n); + if (!b) throw cybozu::Exception("gmp:setArray"); +} +template +void getArray(T *buf, size_t maxSize, const mpz_class& x) +{ + bool b; + getArray(&b, buf, maxSize, x); + if (!b) throw cybozu::Exception("gmp:getArray"); +} +inline bool isPrime(const mpz_class& x) +{ + bool b; + bool ret = isPrime(&b, x); + if (!b) throw cybozu::Exception("gmp:isPrime"); + return ret; +} +inline void getRand(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) +{ + bool b; + getRand(&b, z, bitSize, rg); + if (!b) throw cybozu::Exception("gmp:getRand"); +} +inline void getRandPrime(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) +{ + bool b; + getRandPrime(&b, z, bitSize, rg, setSecondBit, mustBe3mod4); + if (!b) throw cybozu::Exception("gmp:getRandPrime"); +} +#endif + + +} // mcl::gmp + +/* + Tonelli-Shanks +*/ +class SquareRoot { + bool isPrecomputed_; + bool isPrime; + mpz_class p; + mpz_class g; + int r; + mpz_class q; // p - 1 = 2^r q + mpz_class s; // s = g^q + mpz_class q_add_1_div_2; + struct Tbl { + const char *p; + const char *g; + int r; + const char *q; + const char *s; + const char *q_add_1_div_2; + }; + bool setIfPrecomputed(const mpz_class& p_) + { + static const Tbl tbl[] = { + { // BN254.p + "2523648240000001ba344d80000000086121000000000013a700000000000013", + "2", + 1, + "1291b24120000000dd1a26c0000000043090800000000009d380000000000009", + "2523648240000001ba344d80000000086121000000000013a700000000000012", + "948d920900000006e8d1360000000021848400000000004e9c0000000000005", + }, + { // BN254.r + "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + "2", + 2, + "948d920900000006e8d136000000001ffe7e000000000042840000000000003", + "9366c4800000000555150000000000122400000000000015", + "4a46c9048000000374689b000000000fff3f000000000021420000000000002", + }, + { // BLS12_381,p + "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "2", + 1, + "d0088f51cbff34d258dd3db21a5d66bb23ba5c279c2895fb39869507b587b120f55ffff58a9ffffdcff7fffffffd555", + "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa", + "680447a8e5ff9a692c6e9ed90d2eb35d91dd2e13ce144afd9cc34a83dac3d8907aaffffac54ffffee7fbfffffffeaab", + }, + { // BLS12_381.r + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + "5", + 32, + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff", + "212d79e5b416b6f0fd56dc8d168d6c0c4024ff270b3e0941b788f500b912f1f", + "39f6d3a994cebea4199cec0404d0ec02a9ded2017fff2dff80000000", + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class targetPrime; + bool b; + mcl::gmp::setStr(&b, targetPrime, tbl[i].p, 16); + if (!b) continue; + if (targetPrime != p_) continue; + isPrime = true; + p = p_; + mcl::gmp::setStr(&b, g, tbl[i].g, 16); + if (!b) continue; + r = tbl[i].r; + mcl::gmp::setStr(&b, q, tbl[i].q, 16); + if (!b) continue; + mcl::gmp::setStr(&b, s, tbl[i].s, 16); + if (!b) continue; + mcl::gmp::setStr(&b, q_add_1_div_2, tbl[i].q_add_1_div_2, 16); + if (!b) continue; + isPrecomputed_ = true; + return true; + } + return false; + } +public: + SquareRoot() { clear(); } + bool isPrecomputed() const { return isPrecomputed_; } + void clear() + { + isPrecomputed_ = false; + isPrime = false; + p = 0; + g = 0; + r = 0; + q = 0; + s = 0; + q_add_1_div_2 = 0; + } +#if !defined(CYBOZU_DONT_USE_USE_STRING) && !defined(CYBOZU_DONT_USE_EXCEPTION) + void dump() const + { + printf("\"%s\",\n", mcl::gmp::getStr(p, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(g, 16).c_str()); + printf("%d,\n", r); + printf("\"%s\",\n", mcl::gmp::getStr(q, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(s, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(q_add_1_div_2, 16).c_str()); + } +#endif + void set(bool *pb, const mpz_class& _p, bool usePrecomputedTable = true) + { + if (usePrecomputedTable && setIfPrecomputed(_p)) { + *pb = true; + return; + } + p = _p; + if (p <= 2) { + *pb = false; + return; + } + isPrime = gmp::isPrime(pb, p); + if (!*pb) return; + if (!isPrime) { + *pb = false; + return; + } + g = gmp::getQuadraticNonResidue(p); + // p - 1 = 2^r q, q is odd + r = 0; + q = p - 1; + while ((q & 1) == 0) { + r++; + q /= 2; + } + gmp::powMod(s, g, q, p); + q_add_1_div_2 = (q + 1) / 2; + *pb = true; + } + /* + solve x^2 = a mod p + */ + bool get(mpz_class& x, const mpz_class& a) const + { + if (!isPrime) { + return false; + } + if (a == 0) { + x = 0; + return true; + } + if (gmp::legendre(a, p) < 0) return false; + if (r == 1) { + // (p + 1) / 4 = (q + 1) / 2 + gmp::powMod(x, a, q_add_1_div_2, p); + return true; + } + mpz_class c = s, d; + int e = r; + gmp::powMod(d, a, q, p); + gmp::powMod(x, a, q_add_1_div_2, p); // destroy a if &x == &a + mpz_class dd; + mpz_class b; + while (d != 1) { + int i = 1; + dd = d * d; dd %= p; + while (dd != 1) { + dd *= dd; dd %= p; + i++; + } + b = 1; + b <<= e - i - 1; + gmp::powMod(b, c, b, p); + x *= b; x %= p; + c = b * b; c %= p; + d *= c; d %= p; + e = i; + } + return true; + } + /* + solve x^2 = a in Fp + */ + template + bool get(Fp& x, const Fp& a) const + { + assert(Fp::getOp().mp == p); + if (a == 0) { + x = 0; + return true; + } + { + bool b; + mpz_class aa; + a.getMpz(&b, aa); + assert(b); + if (gmp::legendre(aa, p) < 0) return false; + } + if (r == 1) { + // (p + 1) / 4 = (q + 1) / 2 + Fp::pow(x, a, q_add_1_div_2); + return true; + } + Fp c, d; + { + bool b; + c.setMpz(&b, s); + assert(b); + } + int e = r; + Fp::pow(d, a, q); + Fp::pow(x, a, q_add_1_div_2); // destroy a if &x == &a + Fp dd; + Fp b; + while (!d.isOne()) { + int i = 1; + Fp::sqr(dd, d); + while (!dd.isOne()) { + dd *= dd; + i++; + } + b = 1; +// b <<= e - i - 1; + for (int j = 0; j < e - i - 1; j++) { + b += b; + } + Fp::pow(b, c, b); + x *= b; + Fp::sqr(c, b); + d *= c; + e = i; + } + return true; + } + bool operator==(const SquareRoot& rhs) const + { + return isPrime == rhs.isPrime && p == rhs.p && g == rhs.g && r == rhs.r + && q == rhs.q && s == rhs.s && q_add_1_div_2 == rhs.q_add_1_div_2; + } + bool operator!=(const SquareRoot& rhs) const { return !operator==(rhs); } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void set(const mpz_class& _p) + { + bool b; + set(&b, _p); + if (!b) throw cybozu::Exception("gmp:SquareRoot:set"); + } +#endif +}; + +/* + Barrett Reduction + for non GMP version + mod of GMP is faster than Modp +*/ +struct Modp { + static const size_t unitBitSize = sizeof(mcl::fp::Unit) * 8; + mpz_class p_; + mpz_class u_; + mpz_class a_; + size_t pBitSize_; + size_t N_; + bool initU_; // Is u_ initialized? + Modp() + : pBitSize_(0) + , N_(0) + , initU_(false) + { + } + // x &= 1 << (unitBitSize * unitSize) + void shrinkSize(mpz_class &x, size_t unitSize) const + { + size_t u = gmp::getUnitSize(x); + if (u < unitSize) return; + bool b; + gmp::setArray(&b, x, gmp::getUnit(x), unitSize); + (void)b; + assert(b); + } + // p_ is set by p and compute (u_, a_) if possible + void init(const mpz_class& p) + { + p_ = p; + pBitSize_ = gmp::getBitSize(p); + N_ = (pBitSize_ + unitBitSize - 1) / unitBitSize; + initU_ = false; +#if 0 + u_ = (mpz_class(1) << (unitBitSize * 2 * N_)) / p_; +#else + /* + 1 << (unitBitSize * 2 * N_) may be overflow, + so use (1 << (unitBitSize * 2 * N_)) - 1 because u_ is same. + */ + uint8_t buf[48 * 2]; + const size_t byteSize = unitBitSize / 8 * 2 * N_; + if (byteSize > sizeof(buf)) return; + memset(buf, 0xff, byteSize); + bool b; + gmp::setArray(&b, u_, buf, byteSize); + if (!b) return; +#endif + u_ /= p_; + a_ = mpz_class(1) << (unitBitSize * (N_ + 1)); + initU_ = true; + } + void modp(mpz_class& r, const mpz_class& t) const + { + assert(p_ > 0); + const size_t tBitSize = gmp::getBitSize(t); + // use gmp::mod if init() fails or t is too large + if (tBitSize > unitBitSize * 2 * N_ || !initU_) { + gmp::mod(r, t, p_); + return; + } + if (tBitSize < pBitSize_) { + r = t; + return; + } + // mod is faster than modp if t is small + if (tBitSize <= unitBitSize * N_) { + gmp::mod(r, t, p_); + return; + } + mpz_class q; + q = t; + q >>= unitBitSize * (N_ - 1); + q *= u_; + q >>= unitBitSize * (N_ + 1); + q *= p_; + shrinkSize(q, N_ + 1); + r = t; + shrinkSize(r, N_ + 1); + r -= q; + if (r < 0) { + r += a_; + } + if (r >= p_) { + r -= p_; + } + } +}; + +} // mcl diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/impl/bn_c_impl.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/impl/bn_c_impl.hpp new file mode 100644 index 000000000..bec2466dd --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/impl/bn_c_impl.hpp @@ -0,0 +1,643 @@ +/* + This is an internal header + Do not include this +*/ +#define MCLBN_DLL_EXPORT +#include + +#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 +#include +#else + #error "not supported size" +#endif +#include +#include +using namespace mcl::bn; + +static Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } +static const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } + +static G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } +static const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } + +static G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } +static const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } + +static Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } +static const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } + +static Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } +static const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } + +static Fp2 *cast(mclBnFp2 *p) { return reinterpret_cast(p); } +static const Fp2 *cast(const mclBnFp2 *p) { return reinterpret_cast(p); } + +static Fp *cast(mclBnFp *p) { return reinterpret_cast(p); } +static const Fp *cast(const mclBnFp *p) { return reinterpret_cast(p); } + +template +int setStr(T *x, const char *buf, mclSize bufSize, int ioMode) +{ + size_t n = cast(x)->deserialize(buf, bufSize, ioMode); + return n > 0 ? 0 : -1; +} + +#ifdef __EMSCRIPTEN__ +// use these functions forcibly +extern "C" MCLBN_DLL_API void *mclBnMalloc(size_t n) +{ + return malloc(n); +} +extern "C" MCLBN_DLL_API void mclBnFree(void *p) +{ + free(p); +} +#endif + +int mclBn_getVersion() +{ + return mcl::version; +} + +int mclBn_init(int curve, int compiledTimeVar) +{ + if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { + return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); + } + if (MCL_EC_BEGIN <= curve && curve < MCL_EC_END) { + const mcl::EcParam *para = mcl::getEcParam(curve); + if (para == 0) return -2; + bool b; + initG1only(&b, *para); + return b ? 0 : -1; + } + const mcl::CurveParam& cp = mcl::getCurveParam(curve); + bool b; + initPairing(&b, cp); + return b ? 0 : -1; +} + +int mclBn_getOpUnitSize() +{ + return (int)Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); +} + +int mclBn_getG1ByteSize() +{ + return mclBn_getFpByteSize(); +} + +int mclBn_getFrByteSize() +{ + return (int)Fr::getByteSize(); +} + +int mclBn_getFpByteSize() +{ + return (int)Fp::getByteSize(); +} + +mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize) +{ + return Fr::getModulo(buf, maxBufSize); +} + +mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize) +{ + return Fp::getModulo(buf, maxBufSize); +} + +//////////////////////////////////////////////// +// set zero +void mclBnFr_clear(mclBnFr *x) +{ + cast(x)->clear(); +} + +// set x to y +void mclBnFr_setInt(mclBnFr *y, mclInt x) +{ + *cast(y) = x; +} +void mclBnFr_setInt32(mclBnFr *y, int x) +{ + *cast(y) = x; +} + +int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize) +{ + cast(x)->setArrayMask((const char *)buf, bufSize); + return 0; +} +int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize) +{ + bool b; + cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); + return b ? 0 : -1; +} +mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} +// return 1 if true +int mclBnFr_isValid(const mclBnFr *x) +{ + return cast(x)->isValid(); +} +int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y) +{ + return *cast(x) == *cast(y); +} +int mclBnFr_isZero(const mclBnFr *x) +{ + return cast(x)->isZero(); +} +int mclBnFr_isOne(const mclBnFr *x) +{ + return cast(x)->isOne(); +} + +#ifndef MCL_DONT_USE_CSRPNG +int mclBnFr_setByCSPRNG(mclBnFr *x) +{ + bool b; + cast(x)->setByCSPRNG(&b); + return b ? 0 : -1; +} +void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)) +{ + mcl::fp::RandGen::setRandFunc(self, readFunc); +} +#endif + +// hash(buf) and set x +int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize) +{ + cast(x)->setHashOf(buf, bufSize); + return 0; +} + +mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} +mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnFr_neg(mclBnFr *y, const mclBnFr *x) +{ + Fr::neg(*cast(y), *cast(x)); +} +void mclBnFr_inv(mclBnFr *y, const mclBnFr *x) +{ + Fr::inv(*cast(y), *cast(x)); +} +void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x) +{ + Fr::sqr(*cast(y), *cast(x)); +} +void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::div(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnG1_clear(mclBnG1 *x) +{ + cast(x)->clear(); +} + +int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnG1_isValid(const mclBnG1 *x) +{ + return cast(x)->isValid(); +} +int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y) +{ + return *cast(x) == *cast(y); +} +int mclBnG1_isZero(const mclBnG1 *x) +{ + return cast(x)->isZero(); +} +int mclBnG1_isValidOrder(const mclBnG1 *x) +{ + return cast(x)->isValidOrder(); +} + +int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize) +{ + hashAndMapToG1(*cast(x), buf, bufSize); + return 0; +} + +mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x) +{ + G1::neg(*cast(y), *cast(x)); +} +void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x) +{ + G1::dbl(*cast(y), *cast(x)); +} +void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x) +{ + G1::normalize(*cast(y), *cast(x)); +} +void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) +{ + G1::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) +{ + G1::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) +{ + G1::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) +{ + G1::mulCT(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnG2_clear(mclBnG2 *x) +{ + cast(x)->clear(); +} + +int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnG2_isValid(const mclBnG2 *x) +{ + return cast(x)->isValid(); +} +int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y) +{ + return *cast(x) == *cast(y); +} +int mclBnG2_isZero(const mclBnG2 *x) +{ + return cast(x)->isZero(); +} +int mclBnG2_isValidOrder(const mclBnG2 *x) +{ + return cast(x)->isValidOrder(); +} + +int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize) +{ + hashAndMapToG2(*cast(x), buf, bufSize); + return 0; +} + +mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x) +{ + G2::neg(*cast(y), *cast(x)); +} +void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x) +{ + G2::dbl(*cast(y), *cast(x)); +} +void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x) +{ + G2::normalize(*cast(y), *cast(x)); +} +void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) +{ + G2::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) +{ + G2::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) +{ + G2::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) +{ + G2::mulCT(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnGT_clear(mclBnGT *x) +{ + cast(x)->clear(); +} +void mclBnGT_setInt(mclBnGT *y, mclInt x) +{ + cast(y)->clear(); + *(cast(y)->getFp0()) = x; +} +void mclBnGT_setInt32(mclBnGT *y, int x) +{ + cast(y)->clear(); + *(cast(y)->getFp0()) = x; +} + +int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y) +{ + return *cast(x) == *cast(y); +} +int mclBnGT_isZero(const mclBnGT *x) +{ + return cast(x)->isZero(); +} +int mclBnGT_isOne(const mclBnGT *x) +{ + return cast(x)->isOne(); +} + +mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnGT_neg(mclBnGT *y, const mclBnGT *x) +{ + Fp12::neg(*cast(y), *cast(x)); +} +void mclBnGT_inv(mclBnGT *y, const mclBnGT *x) +{ + Fp12::inv(*cast(y), *cast(x)); +} +void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x) +{ + Fp12::sqr(*cast(y), *cast(x)); +} +void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::div(*cast(z),*cast(x), *cast(y)); +} + +void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) +{ + Fp12::pow(*cast(z), *cast(x), *cast(y)); +} +void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) +{ + Fp12::powGeneric(*cast(z), *cast(x), *cast(y)); +} + +void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) +{ + pairing(*cast(z), *cast(x), *cast(y)); +} +void mclBn_finalExp(mclBnGT *y, const mclBnGT *x) +{ + finalExp(*cast(y), *cast(x)); +} +void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) +{ + millerLoop(*cast(z), *cast(x), *cast(y)); +} +int mclBn_getUint64NumToPrecompute(void) +{ + return int(BN::param.precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t)); +} + +void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q) +{ + precomputeG2(cast(Qbuf), *cast(Q)); +} + +void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf) +{ + precomputedMillerLoop(*cast(f), *cast(P), cast(Qbuf)); +} + +void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf) +{ + precomputedMillerLoop2(*cast(f), *cast(P1), cast(Q1buf), *cast(P2), cast(Q2buf)); +} + +void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf) +{ + precomputedMillerLoop2mixed(*cast(f), *cast(P1), *cast(Q1), *cast(P2), cast(Q2buf)); +} + +int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} +int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} +int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} + +void mclBn_verifyOrderG1(int doVerify) +{ + verifyOrderG1(doVerify != 0); +} + +void mclBn_verifyOrderG2(int doVerify) +{ + verifyOrderG2(doVerify != 0); +} + +mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} +int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnFp_clear(mclBnFp *x) +{ + cast(x)->clear(); +} + +int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize) +{ + cast(x)->setArrayMask((const char *)buf, bufSize); + return 0; +} + +int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize) +{ + bool b; + cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); + return b ? 0 : -1; +} +int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y) +{ + return *cast(x) == *cast(y); +} + +int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize) +{ + cast(x)->setHashOf(buf, bufSize); + return 0; +} + +int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x) +{ + bool b; + mapToG1(&b, *cast(y), *cast(x)); + return b ? 0 : -1; +} + +mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnFp2_clear(mclBnFp2 *x) +{ + cast(x)->clear(); +} + +int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y) +{ + return *cast(x) == *cast(y); +} + +int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x) +{ + bool b; + mapToG2(&b, *cast(y), *cast(x)); + return b ? 0 : -1; +} + +int mclBnG1_getBasePoint(mclBnG1 *x) +{ + *cast(x) = mcl::bn::getG1basePoint(); + return 0; +} + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/lagrange.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/lagrange.hpp new file mode 100644 index 000000000..18e0597ec --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/lagrange.hpp @@ -0,0 +1,107 @@ +#pragma once +/** + @file + @brief Lagrange Interpolation + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +namespace mcl { + +/* + recover out = f(0) by { (x, y) | x = S[i], y = f(x) = vec[i] } + @retval 0 if succeed else -1 +*/ +template +void LagrangeInterpolation(bool *pb, G& out, const F *S, const G *vec, size_t k) +{ + if (k == 0) { + *pb = false; + return; + } + if (k == 1) { + out = vec[0]; + *pb = true; + return; + } + /* + delta_{i,S}(0) = prod_{j != i} S[j] / (S[j] - S[i]) = a / b + where a = prod S[j], b = S[i] * prod_{j != i} (S[j] - S[i]) + */ + F a = S[0]; + for (size_t i = 1; i < k; i++) { + a *= S[i]; + } + if (a.isZero()) { + *pb = false; + return; + } + /* + f(0) = sum_i f(S[i]) delta_{i,S}(0) + */ + G r; + r.clear(); + for (size_t i = 0; i < k; i++) { + F b = S[i]; + for (size_t j = 0; j < k; j++) { + if (j != i) { + F v = S[j] - S[i]; + if (v.isZero()) { + *pb = false; + return; + } + b *= v; + } + } + G t; + G::mul(t, vec[i], a / b); + r += t; + } + out = r; + *pb = true; +} + +/* + out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) + @retval 0 if succeed else -1 (if cSize == 0) +*/ +template +void evaluatePolynomial(bool *pb, G& out, const G *c, size_t cSize, const T& x) +{ + if (cSize == 0) { + *pb = false; + return; + } + if (cSize == 1) { + out = c[0]; + *pb = true; + return; + } + G y = c[cSize - 1]; + for (int i = (int)cSize - 2; i >= 0; i--) { + G::mul(y, y, x); + G::add(y, y, c[i]); + } + out = y; + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +template +void LagrangeInterpolation(G& out, const F *S, const G *vec, size_t k) +{ + bool b; + LagrangeInterpolation(&b, out, S, vec, k); + if (!b) throw cybozu::Exception("LagrangeInterpolation"); +} + +template +void evaluatePolynomial(G& out, const G *c, size_t cSize, const T& x) +{ + bool b; + evaluatePolynomial(&b, out, c, cSize, x); + if (!b) throw cybozu::Exception("evaluatePolynomial"); +} +#endif + +} // mcl diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/op.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/op.hpp new file mode 100644 index 000000000..36d37035e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/op.hpp @@ -0,0 +1,389 @@ +#pragma once +/** + @file + @brief definition of Op + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include + +#ifndef MCL_MAX_BIT_SIZE + #define MCL_MAX_BIT_SIZE 521 +#endif +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_DONT_USE_XBYAK + #define MCL_DONT_USE_OPENSSL +#endif +#if !defined(MCL_DONT_USE_XBYAK) && (defined(_WIN64) || defined(__x86_64__)) && (MCL_SIZEOF_UNIT == 8) + #define MCL_USE_XBYAK + #define MCL_XBYAK_DIRECT_CALL +#endif + +#define MCL_MAX_HASH_BIT_SIZE 512 + +namespace mcl { + +static const int version = 0x092; /* 0xABC = A.BC */ + +/* + specifies available string format mode for X::setIoMode() + // for Fp, Fp2, Fp6, Fp12 + default(0) : IoDec + printable string(zero terminated, variable size) + IoBin(2) | IoDec(10) | IoHex(16) | IoBinPrefix | IoHexPrefix + + byte string(not zero terminated, fixed size) + IoArray | IoArrayRaw + IoArray = IoSerialize + + // for Ec + affine(0) | IoEcCompY | IoComp + default : affine + + affine and IoEcCompY are available with ioMode for Fp + IoSerialize ignores ioMode for Fp + + IoAuto + dec or hex according to ios_base::fmtflags + IoBin + binary number([01]+) + IoDec + decimal number + IoHex + hexadecimal number([0-9a-fA-F]+) + IoBinPrefix + 0b + + IoHexPrefix + 0x + + IoArray + array of Unit(fixed size = Fp::getByteSize()) + IoArrayRaw + array of Unit(fixed size = Fp::getByteSize()) without Montgomery conversion + + // for Ec::setIoMode() + IoEcAffine(default) + "0" ; infinity + "1 " ; affine coordinate + + IoEcProj + "4" ; projective or jacobi coordinate + + IoEcCompY + 1-bit y prepresentation of elliptic curve + "2 " ; compressed for even y + "3 " ; compressed for odd y + + IoSerialize + if isMSBserialize(): // p is not full bit + size = Fp::getByteSize() + use MSB of array of x for 1-bit y for prime p where (p % 8 != 0) + [0] ; infinity + ; for even y + |1 ; for odd y ; |1 means set MSB of x + else: + size = Fp::getByteSize() + 1 + [0] ; infinity + 2 ; for even y + 3 ; for odd y +*/ +enum IoMode { + IoAuto = 0, // dec or hex according to ios_base::fmtflags + IoBin = 2, // binary number without prefix + IoDec = 10, // decimal number without prefix + IoHex = 16, // hexadecimal number without prefix + IoArray = 32, // array of Unit(fixed size) + IoArrayRaw = 64, // raw array of Unit without Montgomery conversion + IoPrefix = 128, // append '0b'(bin) or '0x'(hex) + IoBinPrefix = IoBin | IoPrefix, + IoHexPrefix = IoHex | IoPrefix, + IoEcAffine = 0, // affine coordinate + IoEcCompY = 256, // 1-bit y representation of elliptic curve + IoSerialize = 512, // use MBS for 1-bit y + IoFixedSizeByteSeq = IoSerialize, // obsolete + IoEcProj = 1024, // projective or jacobi coordinate + IoSerializeHexStr = 2048 // printable hex string +}; + +namespace fp { + +const size_t UnitBitSize = sizeof(Unit) * 8; + +const size_t maxUnitSize = (MCL_MAX_BIT_SIZE + UnitBitSize - 1) / UnitBitSize; +#define MCL_MAX_UNIT_SIZE ((MCL_MAX_BIT_SIZE + MCL_UNIT_BIT_SIZE - 1) / MCL_UNIT_BIT_SIZE) + +struct FpGenerator; +struct Op; + +typedef void (*void1u)(Unit*); +typedef void (*void2u)(Unit*, const Unit*); +typedef void (*void2uI)(Unit*, const Unit*, Unit); +typedef void (*void2uIu)(Unit*, const Unit*, Unit, const Unit*); +typedef void (*void2uOp)(Unit*, const Unit*, const Op&); +typedef void (*void3u)(Unit*, const Unit*, const Unit*); +typedef void (*void4u)(Unit*, const Unit*, const Unit*, const Unit*); +typedef int (*int2u)(Unit*, const Unit*); + +typedef Unit (*u1uII)(Unit*, Unit, Unit); +typedef Unit (*u3u)(Unit*, const Unit*, const Unit*); + +/* + disable -Wcast-function-type + the number of arguments of some JIT functions is smaller than that of T +*/ +template +T func_ptr_cast(S func) +{ + return reinterpret_cast(reinterpret_cast(func)); +} +struct Block { + const Unit *p; // pointer to original FpT.v_ + size_t n; + Unit v_[maxUnitSize]; +}; + +enum Mode { + FP_AUTO, + FP_GMP, + FP_GMP_MONT, + FP_LLVM, + FP_LLVM_MONT, + FP_XBYAK +}; + +enum PrimeMode { + PM_GENERIC = 0, + PM_NIST_P192, + PM_SECP256K1, + PM_NIST_P521 +}; + +enum MaskMode { + NoMask = 0, // throw if greater or equal + SmallMask = 1, // 1-bit smaller mask if greater or equal + MaskAndMod = 2, // mask and substract if greater or equal + Mod = 3 // mod p +}; + +struct Op { + /* + don't change the layout of rp and p + asm code assumes &rp + 1 == p + */ + Unit rp; + Unit p[maxUnitSize]; + mpz_class mp; + uint32_t pmod4; + mcl::SquareRoot sq; + mcl::Modp modp; + Unit half[maxUnitSize]; // (p + 1) / 2 + Unit oneRep[maxUnitSize]; // 1(=inv R if Montgomery) + /* + for Montgomery + one = 1 + R = (1 << (N * sizeof(Unit) * 8)) % p + R2 = (R * R) % p + R3 = RR^3 + */ + Unit one[maxUnitSize]; + Unit R2[maxUnitSize]; + Unit R3[maxUnitSize]; +#ifdef MCL_USE_XBYAK + FpGenerator *fg; + mcl::Array invTbl; +#endif + void3u fp_addA_; + void3u fp_subA_; + void2u fp_negA_; + void3u fp_mulA_; + void2u fp_sqrA_; + void3u fp2_addA_; + void3u fp2_subA_; + void2u fp2_negA_; + void3u fp2_mulA_; + void2u fp2_sqrA_; + void3u fpDbl_addA_; + void3u fpDbl_subA_; + void3u fpDbl_mulPreA_; + void2u fpDbl_sqrPreA_; + void2u fpDbl_modA_; + void3u fp2Dbl_mulPreA_; + void2u fp2Dbl_sqrPreA_; + size_t maxN; + size_t N; + size_t bitSize; + bool (*fp_isZero)(const Unit*); + void1u fp_clear; + void2u fp_copy; + void2u fp_shr1; + void3u fp_neg; + void4u fp_add; + void4u fp_sub; + void4u fp_mul; + void3u fp_sqr; + void2uOp fp_invOp; + void2uIu fp_mulUnit; // fpN1_mod + fp_mulUnitPre + + void3u fpDbl_mulPre; + void2u fpDbl_sqrPre; + int2u fp_preInv; + void2uI fp_mulUnitPre; // z[N + 1] = x[N] * y + void3u fpN1_mod; // y[N] = x[N + 1] % p[N] + + void4u fpDbl_add; + void4u fpDbl_sub; + void3u fpDbl_mod; + + u3u fp_addPre; // without modulo p + u3u fp_subPre; // without modulo p + u3u fpDbl_addPre; + u3u fpDbl_subPre; + /* + for Fp2 = F[u] / (u^2 + 1) + x = a + bu + */ + int xi_a; // xi = xi_a + u + void4u fp2_mulNF; + void2u fp2_inv; + void2u fp2_mul_xiA_; + uint32_t (*hash)(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); + + PrimeMode primeMode; + bool isFullBit; // true if bitSize % uniSize == 0 + bool isMont; // true if use Montgomery + bool isFastMod; // true if modulo is fast + + Op() + { + clear(); + } + ~Op() + { +#ifdef MCL_USE_XBYAK + destroyFpGenerator(fg); +#endif + } + void clear() + { + rp = 0; + memset(p, 0, sizeof(p)); + mp = 0; + pmod4 = 0; + sq.clear(); + // fg is not set + memset(half, 0, sizeof(half)); + memset(oneRep, 0, sizeof(oneRep)); + memset(one, 0, sizeof(one)); + memset(R2, 0, sizeof(R2)); + memset(R3, 0, sizeof(R3)); +#ifdef MCL_USE_XBYAK + invTbl.clear(); +#endif + fp_addA_ = 0; + fp_subA_ = 0; + fp_negA_ = 0; + fp_mulA_ = 0; + fp_sqrA_ = 0; + fp2_addA_ = 0; + fp2_subA_ = 0; + fp2_negA_ = 0; + fp2_mulA_ = 0; + fp2_sqrA_ = 0; + fpDbl_addA_ = 0; + fpDbl_subA_ = 0; + fpDbl_mulPreA_ = 0; + fpDbl_sqrPreA_ = 0; + fpDbl_modA_ = 0; + fp2Dbl_mulPreA_ = 0; + fp2Dbl_sqrPreA_ = 0; + maxN = 0; + N = 0; + bitSize = 0; + fp_isZero = 0; + fp_clear = 0; + fp_copy = 0; + fp_shr1 = 0; + fp_neg = 0; + fp_add = 0; + fp_sub = 0; + fp_mul = 0; + fp_sqr = 0; + fp_invOp = 0; + fp_mulUnit = 0; + + fpDbl_mulPre = 0; + fpDbl_sqrPre = 0; + fp_preInv = 0; + fp_mulUnitPre = 0; + fpN1_mod = 0; + + fpDbl_add = 0; + fpDbl_sub = 0; + fpDbl_mod = 0; + + fp_addPre = 0; + fp_subPre = 0; + fpDbl_addPre = 0; + fpDbl_subPre = 0; + + xi_a = 0; + fp2_mulNF = 0; + fp2_inv = 0; + fp2_mul_xiA_ = 0; + + primeMode = PM_GENERIC; + isFullBit = false; + isMont = false; + isFastMod = false; + hash = 0; + } + void fromMont(Unit* y, const Unit *x) const + { + /* + M(x, y) = xyR^-1 + y = M(x, 1) = xR^-1 + */ + fp_mul(y, x, one, p); + } + void toMont(Unit* y, const Unit *x) const + { + /* + y = M(x, R2) = xR^2 R^-1 = xR + */ + fp_mul(y, x, R2, p); + } + bool init(const mpz_class& p, size_t maxBitSize, int xi_a, Mode mode, size_t mclMaxBitSize = MCL_MAX_BIT_SIZE); +#ifdef MCL_USE_XBYAK + static FpGenerator* createFpGenerator(); + static void destroyFpGenerator(FpGenerator *fg); +#endif +private: + Op(const Op&); + void operator=(const Op&); +}; + +inline const char* getIoSeparator(int ioMode) +{ + return (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) ? "" : " "; +} + +inline void dump(const char *s, size_t n) +{ + for (size_t i = 0; i < n; i++) { + printf("%02x ", (uint8_t)s[i]); + } + printf("\n"); +} + +#ifndef CYBOZU_DONT_USE_STRING +int detectIoMode(int ioMode, const std::ios_base& ios); + +inline void dump(const std::string& s) +{ + dump(s.c_str(), s.size()); +} +#endif + +} } // mcl::fp diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/operator.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/operator.hpp new file mode 100644 index 000000000..e9bc506df --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/operator.hpp @@ -0,0 +1,177 @@ +#pragma once +/** + @file + @brief operator class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#ifdef _MSC_VER + #ifndef MCL_FORCE_INLINE + #define MCL_FORCE_INLINE __forceinline + #endif + #pragma warning(push) + #pragma warning(disable : 4714) +#else + #ifndef MCL_FORCE_INLINE + #define MCL_FORCE_INLINE __attribute__((always_inline)) + #endif +#endif + +namespace mcl { namespace fp { + +template +struct Empty {}; + +/* + T must have add, sub, mul, inv, neg +*/ +template > +struct Operator : public E { + template MCL_FORCE_INLINE T& operator+=(const S& rhs) { T::add(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } + template MCL_FORCE_INLINE T& operator-=(const S& rhs) { T::sub(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } + template friend MCL_FORCE_INLINE T operator+(const T& a, const S& b) { T c; T::add(c, a, b); return c; } + template friend MCL_FORCE_INLINE T operator-(const T& a, const S& b) { T c; T::sub(c, a, b); return c; } + template MCL_FORCE_INLINE T& operator*=(const S& rhs) { T::mul(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } + template friend MCL_FORCE_INLINE T operator*(const T& a, const S& b) { T c; T::mul(c, a, b); return c; } + MCL_FORCE_INLINE T& operator/=(const T& rhs) { T c; T::inv(c, rhs); T::mul(static_cast(*this), static_cast(*this), c); return static_cast(*this); } + static MCL_FORCE_INLINE void div(T& c, const T& a, const T& b) { T t; T::inv(t, b); T::mul(c, a, t); } + friend MCL_FORCE_INLINE T operator/(const T& a, const T& b) { T c; T::inv(c, b); c *= a; return c; } + MCL_FORCE_INLINE T operator-() const { T c; T::neg(c, static_cast(*this)); return c; } + template class FpT> + static void pow(T& z, const T& x, const FpT& y) + { + fp::Block b; + y.getBlock(b); + powArray(z, x, b.p, b.n, false, false); + } + template class FpT> + static void powGeneric(T& z, const T& x, const FpT& y) + { + fp::Block b; + y.getBlock(b); + powArrayBase(z, x, b.p, b.n, false, false); + } + template class FpT> + static void powCT(T& z, const T& x, const FpT& y) + { + fp::Block b; + y.getBlock(b); + powArray(z, x, b.p, b.n, false, true); + } + static void pow(T& z, const T& x, int64_t y) + { + const uint64_t u = fp::abs_(y); +#if MCL_SIZEOF_UNIT == 8 + powArray(z, x, &u, 1, y < 0, false); +#else + uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; + size_t un = ua[1] ? 2 : 1; + powArray(z, x, ua, un, y < 0, false); +#endif + } + static void pow(T& z, const T& x, const mpz_class& y) + { + powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); + } + static void powGeneric(T& z, const T& x, const mpz_class& y) + { + powArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); + } + static void powCT(T& z, const T& x, const mpz_class& y) + { + powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); + } + static void setPowArrayGLV(void f(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime)) + { + powArrayGLV = f; + } +private: + static void (*powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); + static void powArray(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) + { + if (powArrayGLV && (constTime || yn > 1)) { + powArrayGLV(z, x, y, yn, isNegative, constTime); + return; + } + powArrayBase(z, x, y, yn, isNegative, constTime); + } + static void powArrayBase(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) + { + T tmp; + const T *px = &x; + if (&z == &x) { + tmp = x; + px = &tmp; + } + z = 1; + fp::powGeneric(z, *px, y, yn, T::mul, T::sqr, (void (*)(T&, const T&))0, constTime ? T::BaseFp::getBitSize() : 0); + if (isNegative) { + T::inv(z, z); + } + } +}; + +template +void (*Operator::powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); + +/* + T must have save and load +*/ +template > +struct Serializable : public E { + void setStr(bool *pb, const char *str, int ioMode = 0) + { + size_t len = strlen(str); + size_t n = deserialize(str, len, ioMode); + *pb = n > 0 && n == len; + } + // return strlen(buf) if success else 0 + size_t getStr(char *buf, size_t maxBufSize, int ioMode = 0) const + { + size_t n = serialize(buf, maxBufSize, ioMode); + if (n == 0 || n == maxBufSize - 1) return 0; + buf[n] = '\0'; + return n; + } +#ifndef CYBOZU_DONT_USE_STRING + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + static_cast(*this).load(is, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + static_cast(*this).save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } +#endif + // return written bytes + size_t serialize(void *buf, size_t maxBufSize, int ioMode = IoSerialize) const + { + cybozu::MemoryOutputStream os(buf, maxBufSize); + bool b; + static_cast(*this).save(&b, os, ioMode); + return b ? os.getPos() : 0; + } + // return read bytes + size_t deserialize(const void *buf, size_t bufSize, int ioMode = IoSerialize) + { + cybozu::MemoryInputStream is(buf, bufSize); + bool b; + static_cast(*this).load(&b, is, ioMode); + return b ? is.getPos() : 0; + } +}; + +} } // mcl::fp + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/paillier.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/paillier.hpp new file mode 100644 index 000000000..03e44cb16 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/paillier.hpp @@ -0,0 +1,84 @@ +#pragma once +/** + @file + @brief paillier encryption + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +namespace mcl { namespace paillier { + +class PublicKey { + size_t primeBitSize; + mpz_class g; + mpz_class n; + mpz_class n2; +public: + PublicKey() : primeBitSize(0) {} + void init(size_t _primeBitSize, const mpz_class& _n) + { + primeBitSize = _primeBitSize; + n = _n; + g = 1 + _n; + n2 = _n * _n; + } + void enc(mpz_class& c, const mpz_class& m, mcl::fp::RandGen rg = mcl::fp::RandGen()) const + { + if (rg.isZero()) rg = mcl::fp::RandGen::get(); + if (primeBitSize == 0) throw cybozu::Exception("paillier:PublicKey:not init"); + mpz_class r; + mcl::gmp::getRand(r, primeBitSize, rg); + mpz_class a, b; + mcl::gmp::powMod(a, g, m, n2); + mcl::gmp::powMod(b, r, n, n2); + c = (a * b) % n2; + } + /* + additive homomorphic encryption + cz = cx + cy + */ + void add(mpz_class& cz, mpz_class& cx, mpz_class& cy) const + { + cz = (cx * cy) % n2; + } +}; + +class SecretKey { + size_t primeBitSize; + mpz_class n; + mpz_class n2; + mpz_class lambda; + mpz_class invLambda; +public: + SecretKey() : primeBitSize(0) {} + /* + the size of prime is half of bitSize + */ + void init(size_t bitSize, mcl::fp::RandGen rg = mcl::fp::RandGen()) + { + if (rg.isZero()) rg = mcl::fp::RandGen::get(); + primeBitSize = bitSize / 2; + mpz_class p, q; + mcl::gmp::getRandPrime(p, primeBitSize, rg); + mcl::gmp::getRandPrime(q, primeBitSize, rg); + lambda = (p - 1) * (q - 1); + n = p * q; + n2 = n * n; + mcl::gmp::invMod(invLambda, lambda, n); + } + void getPublicKey(PublicKey& pub) const + { + pub.init(primeBitSize, n); + } + void dec(mpz_class& m, const mpz_class& c) const + { + mpz_class L; + mcl::gmp::powMod(L, c, lambda, n2); + L = ((L - 1) / n) % n; + m = (L * invLambda) % n; + } +}; + +} } // mcl::paillier diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/randgen.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/randgen.hpp new file mode 100644 index 000000000..30502fc10 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/randgen.hpp @@ -0,0 +1,156 @@ +#pragma once +/** + @file + @brief definition of Op + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifdef MCL_DONT_USE_CSPRNG + +// nothing + +#elif defined(MCL_USE_WEB_CRYPTO_API) +#include + +namespace mcl { +struct RandomGeneratorJS { + void read(bool *pb, void *buf, uint32_t byteSize) + { + // cf. https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues + if (byteSize > 65536) { + *pb = false; + return; + } + // use crypto.getRandomValues + EM_ASM({Module.cryptoGetRandomValues($0, $1)}, buf, byteSize); + *pb = true; + } +}; +} // mcl + +#else +#include +#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +#include +#endif +#endif +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4521) +#endif +namespace mcl { namespace fp { + +namespace local { + +template +uint32_t readWrapper(void *self, void *buf, uint32_t byteSize) +{ + bool b; + reinterpret_cast(self)->read(&b, (uint8_t*)buf, byteSize); + if (b) return byteSize; + return 0; +} + +#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +template<> +inline uint32_t readWrapper(void *self, void *buf, uint32_t byteSize) +{ + const uint32_t keep = byteSize; + std::random_device& rg = *reinterpret_cast(self); + uint8_t *p = reinterpret_cast(buf); + uint32_t v; + while (byteSize >= 4) { + v = rg(); + memcpy(p, &v, 4); + p += 4; + byteSize -= 4; + } + if (byteSize > 0) { + v = rg(); + memcpy(p, &v, byteSize); + } + return keep; +} +#endif +} // local +/* + wrapper of cryptographically secure pseudo random number generator +*/ +class RandGen { + typedef uint32_t (*readFuncType)(void *self, void *buf, uint32_t byteSize); + void *self_; + readFuncType readFunc_; +public: + RandGen() : self_(0), readFunc_(0) {} + RandGen(void *self, readFuncType readFunc) : self_(self) , readFunc_(readFunc) {} + RandGen(const RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} + RandGen(RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} + RandGen& operator=(const RandGen& rhs) + { + self_ = rhs.self_; + readFunc_ = rhs.readFunc_; + return *this; + } + template + RandGen(RG& rg) + : self_(reinterpret_cast(&rg)) + , readFunc_(local::readWrapper) + { + } + void read(bool *pb, void *out, size_t byteSize) + { + uint32_t size = readFunc_(self_, out, static_cast(byteSize)); + *pb = size == byteSize; + } +#ifdef MCL_DONT_USE_CSPRNG + bool isZero() const { return false; } /* return false to avoid copying default rg */ +#else + bool isZero() const { return self_ == 0 && readFunc_ == 0; } +#endif + static RandGen& getDefaultRandGen() + { +#ifdef MCL_DONT_USE_CSPRNG + static RandGen wrg; +#elif defined(MCL_USE_WEB_CRYPTO_API) + static mcl::RandomGeneratorJS rg; + static RandGen wrg(rg); +#else + static cybozu::RandomGenerator rg; + static RandGen wrg(rg); +#endif + return wrg; + } + static RandGen& get() + { + static RandGen wrg(getDefaultRandGen()); + return wrg; + } + /* + rg must be thread safe + rg.read(void *buf, size_t byteSize); + */ + static void setRandGen(const RandGen& rg) + { + get() = rg; + } + /* + set rand function + if self and readFunc are NULL then set default rand function + */ + static void setRandFunc(void *self, readFuncType readFunc) + { + if (self == 0 && readFunc == 0) { + setRandGen(getDefaultRandGen()); + } else { + RandGen rg(self, readFunc); + setRandGen(rg); + } + } +}; + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/she.h b/vendor/github.com/byzantine-lab/mcl/include/mcl/she.h new file mode 100644 index 000000000..60b399c65 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/she.h @@ -0,0 +1,270 @@ +#pragma once +/** + @file + @brief C api of somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +#ifdef _MSC_VER +#ifdef MCLSHE_DLL_EXPORT +#define MCLSHE_DLL_API __declspec(dllexport) +#else +#define MCLSHE_DLL_API __declspec(dllimport) +#ifndef MCLSHE_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "mclshe256.lib") + #elif MCLBN_FP_UNIT_SIZE == 6 + #pragma comment(lib, "mclshe384.lib") + #else + #pragma comment(lib, "mclshe512.lib") + #endif +#endif +#endif +#else +#ifdef __EMSCRIPTEN__ + #define MCLSHE_DLL_API __attribute__((used)) +#elif defined(__wasm__) + #define MCLSHE_DLL_API __attribute__((visibility("default"))) +#else + #define MCLSHE_DLL_API +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + mclBnFr x; + mclBnFr y; +} sheSecretKey; + +typedef struct { + mclBnG1 xP; + mclBnG2 yQ; +} shePublicKey; + +struct shePrecomputedPublicKey; + +typedef struct { + mclBnG1 S; + mclBnG1 T; +} sheCipherTextG1; + +typedef struct { + mclBnG2 S; + mclBnG2 T; +} sheCipherTextG2; + +typedef struct { + mclBnGT g[4]; +} sheCipherTextGT; + +typedef struct { + mclBnFr d[4]; +} sheZkpBin; + +typedef struct { + mclBnFr d[4]; +} sheZkpEq; + +typedef struct { + mclBnFr d[7]; +} sheZkpBinEq; +/* + initialize this library + call this once before using the other functions + @param curve [in] enum value defined in mcl/bn.h + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + @note sheInit() is thread safe and serialized if it is called simultaneously + but don't call it while using other functions. +*/ +MCLSHE_DLL_API int sheInit(int curve, int compiledTimeVar); + +// return written byte size if success else 0 +MCLSHE_DLL_API mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec); +MCLSHE_DLL_API mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub); +MCLSHE_DLL_API mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c); +MCLSHE_DLL_API mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c); +MCLSHE_DLL_API mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c); +MCLSHE_DLL_API mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp); +MCLSHE_DLL_API mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp); +MCLSHE_DLL_API mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp); + +// return read byte size if sucess else 0 +MCLSHE_DLL_API mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize); + +/* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success +*/ +MCLSHE_DLL_API int sheSecretKeySetByCSPRNG(sheSecretKey *sec); + +MCLSHE_DLL_API void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec); + +/* + make table to decode DLP + return 0 if success +*/ +MCLSHE_DLL_API int sheSetRangeForDLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForG1DLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForG2DLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForGTDLP(mclSize hashSize); + +/* + set tryNum to decode DLP +*/ +MCLSHE_DLL_API void sheSetTryNum(mclSize tryNum); + +/* + decode G1 via GT if use != 0 + @note faster if tryNum >= 300 +*/ +MCLSHE_DLL_API void sheUseDecG1ViaGT(int use); +/* + decode G2 via GT if use != 0 + @note faster if tryNum >= 100 +*/ +MCLSHE_DLL_API void sheUseDecG2ViaGT(int use); +/* + load table for DLP + return read size if success else 0 +*/ +MCLSHE_DLL_API mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize); + +/* + save table for DLP + return written size if success else 0 +*/ +MCLSHE_DLL_API mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize); +MCLSHE_DLL_API mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize); +MCLSHE_DLL_API mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize); + +// return 0 if success +MCLSHE_DLL_API int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *ppub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *ppub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *ppub, mclInt m); + +/* + m must be 0 or 1 +*/ +MCLSHE_DLL_API int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m); + +/* + arbitary m +*/ +MCLSHE_DLL_API int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m); + +/* + decode c and set m + return 0 if success +*/ +MCLSHE_DLL_API int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); +MCLSHE_DLL_API int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c); +/* + verify zkp + return 1 if valid +*/ +MCLSHE_DLL_API int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); +MCLSHE_DLL_API int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *ppub, const sheCipherTextG2 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); +/* + decode c via GT and set m + return 0 if success +*/ +MCLSHE_DLL_API int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); + +/* + return 1 if dec(c) == 0 +*/ +MCLSHE_DLL_API int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c); +MCLSHE_DLL_API int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c); + +// return 0 if success +// y = -x +MCLSHE_DLL_API int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x); +MCLSHE_DLL_API int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x); +MCLSHE_DLL_API int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x); + +// return 0 if success +// z = x + y +MCLSHE_DLL_API int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); +MCLSHE_DLL_API int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); + +// return 0 if success +// z = x - y +MCLSHE_DLL_API int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); +MCLSHE_DLL_API int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); + +// return 0 if success +// z = x * y +MCLSHE_DLL_API int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y); +MCLSHE_DLL_API int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y); +MCLSHE_DLL_API int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y); + +// return 0 if success +// z = x * y +MCLSHE_DLL_API int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); +/* + sheMul(z, x, y) = sheMulML(z, x, y) + sheFinalExpGT(z) + @note + Mul(x1, y1) + ... + Mul(xn, yn) = finalExp(MulML(x1, y1) + ... + MulML(xn, yn)) +*/ +MCLSHE_DLL_API int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x); + +// return 0 if success +// rerandomize(c) +MCLSHE_DLL_API int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub); +MCLSHE_DLL_API int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub); +MCLSHE_DLL_API int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub); + +// return 0 if success +// y = convert(x) +MCLSHE_DLL_API int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x); +MCLSHE_DLL_API int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x); + +// return nonzero if success +MCLSHE_DLL_API shePrecomputedPublicKey *shePrecomputedPublicKeyCreate(); +// call this function to avoid memory leak +MCLSHE_DLL_API void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub); +// return 0 if success +MCLSHE_DLL_API int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub); + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/she.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/she.hpp new file mode 100644 index 000000000..3ce361454 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/she.hpp @@ -0,0 +1,1939 @@ +#pragma once +/** + @file + @brief somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings + @author MITSUNARI Shigeo(@herumi) + see https://github.com/herumi/mcl/blob/master/misc/she/she.pdf + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include +#ifndef MCLBN_FP_UNIT_SIZE + #define MCLBN_FP_UNIT_SIZE 4 +#endif +#if MCLBN_FP_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 +#include +#elif MCLBN_FP_UNIT_SIZE == 8 +#include +#else + #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" +#endif + +#include +#include +#include + +namespace mcl { namespace she { + +using namespace mcl::bn; + +namespace local { + +#ifndef MCLSHE_WIN_SIZE + #define MCLSHE_WIN_SIZE 10 +#endif +static const size_t winSize = MCLSHE_WIN_SIZE; +static const size_t defaultTryNum = 2048; + +struct KeyCount { + uint32_t key; + int32_t count; // power + bool operator<(const KeyCount& rhs) const + { + return key < rhs.key; + } + bool isSame(const KeyCount& rhs) const + { + return key == rhs.key && count == rhs.count; + } +}; + +template +struct InterfaceForHashTable : G { + static G& castG(InterfaceForHashTable& x) { return static_cast(x); } + static const G& castG(const InterfaceForHashTable& x) { return static_cast(x); } + void clear() { clear(castG(*this)); } + void normalize() { normalize(castG(*this)); } + static bool isOdd(const G& P) { return P.y.isOdd(); } + static bool isZero(const G& P) { return P.isZero(); } + static bool isSameX(const G& P, const G& Q) { return P.x == Q.x; } + static uint32_t getHash(const G& P) { return uint32_t(*P.x.getUnit()); } + static void clear(G& P) { P.clear(); } + static void normalize(G& P) { P.normalize(); } + static void dbl(G& Q, const G& P) { G::dbl(Q, P); } + static void neg(G& Q, const G& P) { G::neg(Q, P); } + static void add(G& R, const G& P, const G& Q) { G::add(R, P, Q); } + template + static void mul(G& Q, const G& P, const INT& x) { G::mul(Q, P, x); } +}; + +/* + treat Fp12 as EC + unitary inverse of (a, b) = (a, -b) + then b.a.a or -b.a.a is odd +*/ +template +struct InterfaceForHashTable : G { + static G& castG(InterfaceForHashTable& x) { return static_cast(x); } + static const G& castG(const InterfaceForHashTable& x) { return static_cast(x); } + void clear() { clear(castG(*this)); } + void normalize() { normalize(castG(*this)); } + static bool isOdd(const G& x) { return x.b.a.a.isOdd(); } + static bool isZero(const G& x) { return x.isOne(); } + static bool isSameX(const G& x, const G& Q) { return x.a == Q.a; } + static uint32_t getHash(const G& x) { return uint32_t(*x.getFp0()->getUnit()); } + static void clear(G& x) { x = 1; } + static void normalize(G&) { } + static void dbl(G& y, const G& x) { G::sqr(y, x); } + static void neg(G& Q, const G& P) { G::unitaryInv(Q, P); } + static void add(G& z, const G& x, const G& y) { G::mul(z, x, y); } + template + static void mul(G& z, const G& x, const INT& y) { G::pow(z, x, y); } +}; + +template +char GtoChar(); +template<>char GtoChar() { return '1'; } +template<>char GtoChar() { return '2'; } +template<>char GtoChar() { return 'T'; } + +/* + HashTable or HashTable +*/ +template +class HashTable { + typedef InterfaceForHashTable I; + typedef std::vector KeyCountVec; + KeyCountVec kcv_; + G P_; + mcl::fp::WindowMethod wm_; + G nextP_; + G nextNegP_; + size_t tryNum_; + void setWindowMethod() + { + const size_t bitSize = G::BaseFp::BaseFp::getBitSize(); + wm_.init(static_cast(P_), bitSize, local::winSize); + } +public: + HashTable() : tryNum_(local::defaultTryNum) {} + bool operator==(const HashTable& rhs) const + { + if (kcv_.size() != rhs.kcv_.size()) return false; + for (size_t i = 0; i < kcv_.size(); i++) { + if (!kcv_[i].isSame(rhs.kcv_[i])) return false; + } + return P_ == rhs.P_ && nextP_ == rhs.nextP_; + } + bool operator!=(const HashTable& rhs) const { return !operator==(rhs); } + /* + compute log_P(xP) for |x| <= hashSize * tryNum + */ + void init(const G& P, size_t hashSize, size_t tryNum = local::defaultTryNum) + { + if (hashSize == 0) { + kcv_.clear(); + return; + } + if (hashSize >= 0x80000000u) throw cybozu::Exception("HashTable:init:hashSize is too large"); + P_ = P; + tryNum_ = tryNum; + kcv_.resize(hashSize); + G xP; + I::clear(xP); + for (int i = 1; i <= (int)kcv_.size(); i++) { + I::add(xP, xP, P_); + I::normalize(xP); + kcv_[i - 1].key = I::getHash(xP); + kcv_[i - 1].count = I::isOdd(xP) ? i : -i; + } + nextP_ = xP; + I::dbl(nextP_, nextP_); + I::add(nextP_, nextP_, P_); // nextP = (hasSize * 2 + 1)P + I::neg(nextNegP_, nextP_); // nextNegP = -nextP + /* + ascending order of abs(count) for same key + */ + std::stable_sort(kcv_.begin(), kcv_.end()); + setWindowMethod(); + } + void setTryNum(size_t tryNum) + { + this->tryNum_ = tryNum; + } + /* + log_P(xP) + find range which has same hash of xP in kcv_, + and detect it + */ + int basicLog(G xP, bool *ok = 0) const + { + if (ok) *ok = true; + if (I::isZero(xP)) return 0; + typedef KeyCountVec::const_iterator Iter; + KeyCount kc; + I::normalize(xP); + kc.key = I::getHash(xP); + kc.count = 0; + std::pair p = std::equal_range(kcv_.begin(), kcv_.end(), kc); + G Q; + I::clear(Q); + int prev = 0; + /* + check range which has same hash + */ + while (p.first != p.second) { + int count = p.first->count; + int abs_c = std::abs(count); + assert(abs_c >= prev); // assume ascending order + bool neg = count < 0; + G T; +// I::mul(T, P, abs_c - prev); + mulByWindowMethod(T, abs_c - prev); + I::add(Q, Q, T); + I::normalize(Q); + if (I::isSameX(Q, xP)) { + bool QisOdd = I::isOdd(Q); + bool xPisOdd = I::isOdd(xP); + if (QisOdd ^ xPisOdd ^ neg) return -count; + return count; + } + prev = abs_c; + ++p.first; + } + if (ok) { + *ok = false; + return 0; + } + throw cybozu::Exception("HashTable:basicLog:not found"); + } + /* + compute log_P(xP) + call basicLog at most 2 * tryNum + */ + int64_t log(const G& xP) const + { + bool ok; + int c = basicLog(xP, &ok); + if (ok) { + return c; + } + G posP = xP, negP = xP; + int64_t posCenter = 0; + int64_t negCenter = 0; + int64_t next = (int64_t)kcv_.size() * 2 + 1; + for (size_t i = 1; i < tryNum_; i++) { + I::add(posP, posP, nextNegP_); + posCenter += next; + c = basicLog(posP, &ok); + if (ok) { + return posCenter + c; + } + I::add(negP, negP, nextP_); + negCenter -= next; + c = basicLog(negP, &ok); + if (ok) { + return negCenter + c; + } + } + throw cybozu::Exception("HashTable:log:not found"); + } + /* + remark + tryNum is not saved. + */ + template + void save(OutputStream& os) const + { + cybozu::save(os, BN::param.cp.curveType); + cybozu::writeChar(os, GtoChar()); + cybozu::save(os, kcv_.size()); + cybozu::write(os, &kcv_[0], sizeof(kcv_[0]) * kcv_.size()); + P_.save(os); + } + size_t save(void *buf, size_t maxBufSize) const + { + cybozu::MemoryOutputStream os(buf, maxBufSize); + save(os); + return os.getPos(); + } + /* + remark + tryNum is not set + */ + template + void load(InputStream& is) + { + int curveType; + cybozu::load(curveType, is); + if (curveType != BN::param.cp.curveType) throw cybozu::Exception("HashTable:bad curveType") << curveType; + char c = 0; + if (!cybozu::readChar(&c, is) || c != GtoChar()) throw cybozu::Exception("HashTable:bad c") << (int)c; + size_t kcvSize; + cybozu::load(kcvSize, is); + kcv_.resize(kcvSize); + cybozu::read(&kcv_[0], sizeof(kcv_[0]) * kcvSize, is); + P_.load(is); + I::mul(nextP_, P_, (kcvSize * 2) + 1); + I::neg(nextNegP_, nextP_); + setWindowMethod(); + } + size_t load(const void *buf, size_t bufSize) + { + cybozu::MemoryInputStream is(buf, bufSize); + load(is); + return is.getPos(); + } + const mcl::fp::WindowMethod& getWM() const { return wm_; } + /* + mul(x, P, y); + */ + template + void mulByWindowMethod(G& x, const T& y) const + { + wm_.mul(static_cast(x), y); + } +}; + +template +int log(const G& P, const G& xP) +{ + if (xP.isZero()) return 0; + if (xP == P) return 1; + G negT; + G::neg(negT, P); + if (xP == negT) return -1; + G T = P; + for (int i = 2; i < 100; i++) { + T += P; + if (xP == T) return i; + G::neg(negT, T); + if (xP == negT) return -i; + } + throw cybozu::Exception("she:log:not found"); +} + +} // mcl::she::local + +template +struct SHET { + class SecretKey; + class PublicKey; + class PrecomputedPublicKey; + // additive HE + class CipherTextA; // = CipherTextG1 + CipherTextG2 + class CipherTextGT; // multiplicative HE + class CipherText; // CipherTextA + CipherTextGT + + static G1 P_; + static G2 Q_; + static GT ePQ_; // e(P, Q) + static std::vector Qcoeff_; + static local::HashTable PhashTbl_; + static local::HashTable QhashTbl_; + static mcl::fp::WindowMethod Qwm_; + typedef local::InterfaceForHashTable GTasEC; + static local::HashTable ePQhashTbl_; + static bool useDecG1ViaGT_; + static bool useDecG2ViaGT_; + static bool isG1only_; +private: + template + class CipherTextAT : public fp::Serializable > { + G S_, T_; + friend class SecretKey; + friend class PublicKey; + friend class PrecomputedPublicKey; + friend class CipherTextA; + friend class CipherTextGT; + bool isZero(const Fr& x) const + { + G xT; + G::mul(xT, T_, x); + return S_ == xT; + } + public: + const G& getS() const { return S_; } + const G& getT() const { return T_; } + void clear() + { + S_.clear(); + T_.clear(); + } + static void add(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) + { + /* + (S, T) + (S', T') = (S + S', T + T') + */ + G::add(z.S_, x.S_, y.S_); + G::add(z.T_, x.T_, y.T_); + } + static void sub(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) + { + /* + (S, T) - (S', T') = (S - S', T - T') + */ + G::sub(z.S_, x.S_, y.S_); + G::sub(z.T_, x.T_, y.T_); + } + // INT = int64_t or Fr + template + static void mul(CipherTextAT& z, const CipherTextAT& x, const INT& y) + { + G::mul(z.S_, x.S_, y); + G::mul(z.T_, x.T_, y); + } + static void neg(CipherTextAT& y, const CipherTextAT& x) + { + G::neg(y.S_, x.S_); + G::neg(y.T_, x.T_); + } + void add(const CipherTextAT& c) { add(*this, *this, c); } + void sub(const CipherTextAT& c) { sub(*this, *this, c); } + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + S_.load(pb, is, ioMode); if (!*pb) return; + T_.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + S_.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + T_.save(pb, os, ioMode); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextAT& self) + { + self.load(is, fp::detectIoMode(G::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextAT& self) + { + self.save(os, fp::detectIoMode(G::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextAT& rhs) const + { + return S_ == rhs.S_ && T_ == rhs.T_; + } + bool operator!=(const CipherTextAT& rhs) const { return !operator==(rhs); } + }; + /* + g1 = millerLoop(P1, Q) + g2 = millerLoop(P2, Q) + */ + static void doubleMillerLoop(GT& g1, GT& g2, const G1& P1, const G1& P2, const G2& Q) + { +#if 1 + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(g1, P1, Qcoeff); + precomputedMillerLoop(g2, P2, Qcoeff); +#else + millerLoop(g1, P1, Q); + millerLoop(g2, P2, Q); +#endif + } + static void finalExp4(GT out[4], const GT in[4]) + { + for (int i = 0; i < 4; i++) { + finalExp(out[i], in[i]); + } + } + static void tensorProductML(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) + { + /* + (S1, T1) x (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) + */ + doubleMillerLoop(g[0], g[2], S1, T1, S2); + doubleMillerLoop(g[1], g[3], S1, T1, T2); + } + static void tensorProduct(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) + { + /* + (S1, T1) x (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) + */ + tensorProductML(g,S1, T1, S2,T2); + finalExp4(g, g); + } + template + struct ZkpT : public fp::Serializable > { + Fr d_[n]; + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + for (size_t i = 0; i < n; i++) { + d_[i].load(pb, is, ioMode); if (!*pb) return; + } + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + d_[0].save(pb, os, ioMode); if (!*pb) return; + for (size_t i = 1; i < n; i++) { + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + d_[i].save(pb, os, ioMode); + } + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:ZkpT:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:ZkpT:save"); + } + friend std::istream& operator>>(std::istream& is, ZkpT& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const ZkpT& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + }; + struct ZkpBinTag; + struct ZkpEqTag; // d_[] = { c, sp, ss, sm } + struct ZkpBinEqTag; // d_[] = { d0, d1, sp0, sp1, ss, sp, sm } +public: + /* + Zkp for m = 0 or 1 + */ + typedef ZkpT ZkpBin; + /* + Zkp for decG1(c1) == decG2(c2) + */ + typedef ZkpT ZkpEq; + /* + Zkp for (m = 0 or 1) and decG1(c1) == decG2(c2) + */ + typedef ZkpT ZkpBinEq; + + typedef CipherTextAT CipherTextG1; + typedef CipherTextAT CipherTextG2; + + static void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) + { + initPairing(cp); + hashAndMapToG1(P_, "0"); + hashAndMapToG2(Q_, "0"); + pairing(ePQ_, P_, Q_); + precomputeG2(Qcoeff_, Q_); + setRangeForDLP(hashSize); + useDecG1ViaGT_ = false; + useDecG2ViaGT_ = false; + isG1only_ = false; + setTryNum(tryNum); + } + static void init(size_t hashSize, size_t tryNum = local::defaultTryNum) + { + init(mcl::BN254, hashSize, tryNum); + } + /* + standard lifted ElGamal encryption + */ + static void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) + { + Fp::init(para.p); + Fr::init(para.n); + G1::init(para.a, para.b); + const Fp x0(para.gx); + const Fp y0(para.gy); + P_.set(x0, y0); + + setRangeForG1DLP(hashSize); + useDecG1ViaGT_ = false; + useDecG2ViaGT_ = false; + isG1only_ = true; + setTryNum(tryNum); + } + /* + set range for G1-DLP + */ + static void setRangeForG1DLP(size_t hashSize) + { + PhashTbl_.init(P_, hashSize); + } + /* + set range for G2-DLP + */ + static void setRangeForG2DLP(size_t hashSize) + { + QhashTbl_.init(Q_, hashSize); + } + /* + set range for GT-DLP + */ + static void setRangeForGTDLP(size_t hashSize) + { + ePQhashTbl_.init(ePQ_, hashSize); + } + /* + set range for G1/G2/GT DLP + decode message m for |m| <= hasSize * tryNum + decode time = O(log(hasSize) * tryNum) + */ + static void setRangeForDLP(size_t hashSize) + { + setRangeForG1DLP(hashSize); + setRangeForG2DLP(hashSize); + setRangeForGTDLP(hashSize); + } + static void setTryNum(size_t tryNum) + { + PhashTbl_.setTryNum(tryNum); + QhashTbl_.setTryNum(tryNum); + ePQhashTbl_.setTryNum(tryNum); + } + static void useDecG1ViaGT(bool use = true) + { + useDecG1ViaGT_ = use; + } + static void useDecG2ViaGT(bool use = true) + { + useDecG2ViaGT_ = use; + } + /* + only one element is necessary for each G1 and G2. + this is better than David Mandell Freeman's algorithm + */ + class SecretKey : public fp::Serializable { + Fr x_, y_; + void getPowOfePQ(GT& v, const CipherTextGT& c) const + { + /* + (s, t, u, v) := (e(S, S'), e(S, T'), e(T, S'), e(T, T')) + s v^(xy) / (t^y u^x) = s (v^x / t) ^ y / u^x + = e(P, Q)^(mm') + */ + GT t, u; + GT::unitaryInv(t, c.g_[1]); + GT::unitaryInv(u, c.g_[2]); + GT::pow(v, c.g_[3], x_); + v *= t; + GT::pow(v, v, y_); + GT::pow(u, u, x_); + v *= u; + v *= c.g_[0]; + } + public: + void setByCSPRNG() + { + x_.setRand(); + if (!isG1only_) y_.setRand(); + } + /* + set xP and yQ + */ + void getPublicKey(PublicKey& pub) const + { + pub.set(x_, y_); + } +#if 0 + // log_x(y) + int log(const GT& x, const GT& y) const + { + if (y == 1) return 0; + if (y == x) return 1; + GT inv; + GT::unitaryInv(inv, x); + if (y == inv) return -1; + GT t = x; + for (int i = 2; i < 100; i++) { + t *= x; + if (y == t) return i; + GT::unitaryInv(inv, t); + if (y == inv) return -i; + } + throw cybozu::Exception("she:dec:log:not found"); + } +#endif + int64_t dec(const CipherTextG1& c) const + { + if (useDecG1ViaGT_) return decViaGT(c); + /* + S = mP + rxP + T = rP + R = S - xT = mP + */ + G1 R; + G1::mul(R, c.T_, x_); + G1::sub(R, c.S_, R); + return PhashTbl_.log(R); + } + int64_t dec(const CipherTextG2& c) const + { + if (useDecG2ViaGT_) return decViaGT(c); + G2 R; + G2::mul(R, c.T_, y_); + G2::sub(R, c.S_, R); + return QhashTbl_.log(R); + } + int64_t dec(const CipherTextA& c) const + { + return dec(c.c1_); + } + int64_t dec(const CipherTextGT& c) const + { + GT v; + getPowOfePQ(v, c); + return ePQhashTbl_.log(v); +// return log(g, v); + } + int64_t decViaGT(const CipherTextG1& c) const + { + G1 R; + G1::mul(R, c.T_, x_); + G1::sub(R, c.S_, R); + GT v; + pairing(v, R, Q_); + return ePQhashTbl_.log(v); + } + int64_t decViaGT(const CipherTextG2& c) const + { + G2 R; + G2::mul(R, c.T_, y_); + G2::sub(R, c.S_, R); + GT v; + pairing(v, P_, R); + return ePQhashTbl_.log(v); + } + int64_t dec(const CipherText& c) const + { + if (c.isMultiplied()) { + return dec(c.m_); + } else { + return dec(c.a_); + } + } + bool isZero(const CipherTextG1& c) const + { + return c.isZero(x_); + } + bool isZero(const CipherTextG2& c) const + { + return c.isZero(y_); + } + bool isZero(const CipherTextA& c) const + { + return c.c1_.isZero(x_); + } + bool isZero(const CipherTextGT& c) const + { + GT v; + getPowOfePQ(v, c); + return v.isOne(); + } + bool isZero(const CipherText& c) const + { + if (c.isMultiplied()) { + return isZero(c.m_); + } else { + return isZero(c.a_); + } + } + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + x_.load(pb, is, ioMode); if (!*pb) return; + if (!isG1only_) y_.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + x_.save(pb, os, ioMode); if (!*pb) return; + if (isG1only_) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + y_.save(os, ioMode); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:SecretKey:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:SecretKey:save"); + } + friend std::istream& operator>>(std::istream& is, SecretKey& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + bool operator==(const SecretKey& rhs) const + { + return x_ == rhs.x_ && (isG1only_ || y_ == rhs.y_); + } + bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } + }; +private: + /* + simple ElGamal encryptionfor G1 and G2 + (S, T) = (m P + r xP, rP) + Pmul.mul(X, a) // X = a P + xPmul.mul(X, a) // X = a xP + use *encRand if encRand is not null + */ + template + static void ElGamalEnc(G& S, G& T, const INT& m, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul, const Fr *encRand = 0) + { + Fr r; + if (encRand) { + r = *encRand; + } else { + r.setRand(); + } + Pmul.mul(static_cast(T), r); + xPmul.mul(S, r); // S = r xP + if (m == 0) return; + G C; + Pmul.mul(static_cast(C), m); + S += C; + } + /* + https://github.com/herumi/mcl/blob/master/misc/she/nizkp.pdf + + encRand is a random value used for ElGamalEnc() + d[1-m] ; rand + s[1-m] ; rand + R[0][1-m] = s[1-m] P - d[1-m] T + R[1][1-m] = s[1-m] xP - d[1-m] (S - (1-m) P) + r ; rand + R[0][m] = r P + R[1][m] = r xP + c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) + d[m] = c - d[1-m] + s[m] = r + d[m] encRand + */ + template + static void makeZkpBin(ZkpBin& zkp, const G& S, const G& T, const Fr& encRand, const G& P, int m, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul) + { + if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBin:bad m") << m; + Fr *s = &zkp.d_[0]; + Fr *d = &zkp.d_[2]; + G R[2][2]; + d[1-m].setRand(); + s[1-m].setRand(); + G T1, T2; + Pmul.mul(static_cast(T1), s[1-m]); // T1 = s[1-m] P + G::mul(T2, T, d[1-m]); + G::sub(R[0][1-m], T1, T2); // s[1-m] P - d[1-m]T + xPmul.mul(T1, s[1-m]); // T1 = s[1-m] xP + if (m == 0) { + G::sub(T2, S, P); + G::mul(T2, T2, d[1-m]); + } else { + G::mul(T2, S, d[1-m]); + } + G::sub(R[1][1-m], T1, T2); // s[1-m] xP - d[1-m](S - (1-m) P) + Fr r; + r.setRand(); + Pmul.mul(static_cast(R[0][m]), r); // R[0][m] = r P + xPmul.mul(R[1][m], r); // R[1][m] = r xP + char buf[sizeof(G) * 2]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S.save(os); + T.save(os); + R[0][0].save(os); + R[0][1].save(os); + R[1][0].save(os); + R[1][1].save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + d[m] = c - d[1-m]; + s[m] = r + d[m] * encRand; + } + /* + R[0][i] = s[i] P - d[i] T ; i = 0,1 + R[1][0] = s[0] xP - d[0] S + R[1][1] = s[1] xP - d[1](S - P) + c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) + c == d[0] + d[1] + */ + template + static bool verifyZkpBin(const G& S, const G& T, const G& P, const ZkpBin& zkp, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul) + { + const Fr *s = &zkp.d_[0]; + const Fr *d = &zkp.d_[2]; + G R[2][2]; + G T1, T2; + for (int i = 0; i < 2; i++) { + Pmul.mul(static_cast(T1), s[i]); // T1 = s[i] P + G::mul(T2, T, d[i]); + G::sub(R[0][i], T1, T2); + } + xPmul.mul(T1, s[0]); // T1 = s[0] xP + G::mul(T2, S, d[0]); + G::sub(R[1][0], T1, T2); + xPmul.mul(T1, s[1]); // T1 = x[1] xP + G::sub(T2, S, P); + G::mul(T2, T2, d[1]); + G::sub(R[1][1], T1, T2); + char buf[sizeof(G) * 2]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S.save(os); + T.save(os); + R[0][0].save(os); + R[0][1].save(os); + R[1][0].save(os); + R[1][1].save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + return c == d[0] + d[1]; + } + /* + encRand1, encRand2 are random values use for ElGamalEnc() + */ + template + static void makeZkpEq(ZkpEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, const INT& m, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) + { + Fr p, s; + p.setRand(); + s.setRand(); + ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); + ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); + Fr rp, rs, rm; + rp.setRand(); + rs.setRand(); + rm.setRand(); + G1 R1, R2; + G2 R3, R4; + ElGamalEnc(R1, R2, rm, Pmul, xPmul, &rp); + ElGamalEnc(R3, R4, rm, Qmul, yQmul, &rs); + char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + S2.save(os); + T2.save(os); + R1.save(os); + R2.save(os); + R3.save(os); + R4.save(os); + Fr& c = zkp.d_[0]; + Fr& sp = zkp.d_[1]; + Fr& ss = zkp.d_[2]; + Fr& sm = zkp.d_[3]; + c.setHashOf(buf, os.getPos()); + Fr::mul(sp, c, p); + sp += rp; + Fr::mul(ss, c, s); + ss += rs; + Fr::mul(sm, c, m); + sm += rm; + } + template + static bool verifyZkpEq(const ZkpEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) + { + const Fr& c = zkp.d_[0]; + const Fr& sp = zkp.d_[1]; + const Fr& ss = zkp.d_[2]; + const Fr& sm = zkp.d_[3]; + G1 R1, R2, X1; + G2 R3, R4, X2; + ElGamalEnc(R1, R2, sm, Pmul, xPmul, &sp); + G1::mul(X1, S1, c); + R1 -= X1; + G1::mul(X1, T1, c); + R2 -= X1; + ElGamalEnc(R3, R4, sm, Qmul, yQmul, &ss); + G2::mul(X2, S2, c); + R3 -= X2; + G2::mul(X2, T2, c); + R4 -= X2; + char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + S2.save(os); + T2.save(os); + R1.save(os); + R2.save(os); + R3.save(os); + R4.save(os); + Fr c2; + c2.setHashOf(buf, os.getPos()); + return c == c2; + } + /* + encRand1, encRand2 are random values use for ElGamalEnc() + */ + template + static void makeZkpBinEq(ZkpBinEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, int m, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) + { + if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBinEq:bad m") << m; + Fr *d = &zkp.d_[0]; + Fr *spm = &zkp.d_[2]; + Fr& ss = zkp.d_[4]; + Fr& sp = zkp.d_[5]; + Fr& sm = zkp.d_[6]; + Fr p, s; + p.setRand(); + s.setRand(); + ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); + ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); + d[1-m].setRand(); + spm[1-m].setRand(); + G1 R1[2], R2[2], X1; + Pmul.mul(static_cast(R1[1-m]), spm[1-m]); + G1::mul(X1, T1, d[1-m]); + R1[1-m] -= X1; + if (m == 0) { + G1::sub(X1, S1, P_); + G1::mul(X1, X1, d[1-m]); + } else { + G1::mul(X1, S1, d[1-m]); + } + xPmul.mul(R2[1-m], spm[1-m]); + R2[1-m] -= X1; + Fr rpm, rp, rs, rm; + rpm.setRand(); + rp.setRand(); + rs.setRand(); + rm.setRand(); + ElGamalEnc(R2[m], R1[m], 0, Pmul, xPmul, &rpm); + G1 R3, R4; + G2 R5, R6; + ElGamalEnc(R4, R3, rm, Pmul, xPmul, &rp); + ElGamalEnc(R6, R5, rm, Qmul, yQmul, &rs); + char buf[sizeof(Fr) * 12]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + R1[0].save(os); + R1[1].save(os); + R2[0].save(os); + R2[1].save(os); + R3.save(os); + R4.save(os); + R5.save(os); + R6.save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + Fr::sub(d[m], c, d[1-m]); + Fr::mul(spm[m], d[m], p); + spm[m] += rpm; + Fr::mul(sp, c, p); + sp += rp; + Fr::mul(ss, c, s); + ss += rs; + Fr::mul(sm, c, m); + sm += rm; + } + template + static bool verifyZkpBinEq(const ZkpBinEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) + { + const Fr *d = &zkp.d_[0]; + const Fr *spm = &zkp.d_[2]; + const Fr& ss = zkp.d_[4]; + const Fr& sp = zkp.d_[5]; + const Fr& sm = zkp.d_[6]; + G1 R1[2], R2[2], X1; + for (int i = 0; i < 2; i++) { + Pmul.mul(static_cast(R1[i]), spm[i]); + G1::mul(X1, T1, d[i]); + R1[i] -= X1; + } + xPmul.mul(R2[0], spm[0]); + G1::mul(X1, S1, d[0]); + R2[0] -= X1; + xPmul.mul(R2[1], spm[1]); + G1::sub(X1, S1, P_); + G1::mul(X1, X1, d[1]); + R2[1] -= X1; + Fr c; + Fr::add(c, d[0], d[1]); + G1 R3, R4; + G2 R5, R6; + ElGamalEnc(R4, R3, sm, Pmul, xPmul, &sp); + G1::mul(X1, T1, c); + R3 -= X1; + G1::mul(X1, S1, c); + R4 -= X1; + ElGamalEnc(R6, R5, sm, Qmul, yQmul, &ss); + G2 X2; + G2::mul(X2, T2, c); + R5 -= X2; + G2::mul(X2, S2, c); + R6 -= X2; + char buf[sizeof(Fr) * 12]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + R1[0].save(os); + R1[1].save(os); + R2[0].save(os); + R2[1].save(os); + R3.save(os); + R4.save(os); + R5.save(os); + R6.save(os); + Fr c2; + c2.setHashOf(buf, os.getPos()); + return c == c2; + } + /* + common method for PublicKey and PrecomputedPublicKey + */ + template + struct PublicKeyMethod { + /* + you can use INT as int64_t and Fr, + but the return type of dec() is int64_t. + */ + template + void enc(CipherTextG1& c, const INT& m) const + { + static_cast(*this).encG1(c, m); + } + template + void enc(CipherTextG2& c, const INT& m) const + { + static_cast(*this).encG2(c, m); + } + template + void enc(CipherTextA& c, const INT& m) const + { + enc(c.c1_, m); + enc(c.c2_, m); + } + template + void enc(CipherTextGT& c, const INT& m) const + { + static_cast(*this).encGT(c, m); + } + template + void enc(CipherText& c, const INT& m, bool multiplied = false) const + { + c.isMultiplied_ = multiplied; + if (multiplied) { + enc(c.m_, m); + } else { + enc(c.a_, m); + } + } + /* + reRand method is for circuit privacy + */ + template + void reRandT(CT& c) const + { + CT c0; + static_cast(*this).enc(c0, 0); + CT::add(c, c, c0); + } + void reRand(CipherTextG1& c) const { reRandT(c); } + void reRand(CipherTextG2& c) const { reRandT(c); } + void reRand(CipherTextGT& c) const { reRandT(c); } + void reRand(CipherText& c) const + { + if (c.isMultiplied()) { + reRandT(c.m_); + } else { + reRandT(c.a_); + } + } + /* + convert from CipherTextG1 to CipherTextGT + */ + void convert(CipherTextGT& cm, const CipherTextG1& c1) const + { + /* + Enc(1) = (S, T) = (Q + r yQ, rQ) = (Q, 0) if r = 0 + cm = c1 * (Q, 0) = (S, T) * (Q, 0) = (e(S, Q), 1, e(T, Q), 1) + */ + precomputedMillerLoop(cm.g_[0], c1.getS(), Qcoeff_); + finalExp(cm.g_[0], cm.g_[0]); + precomputedMillerLoop(cm.g_[2], c1.getT(), Qcoeff_); + finalExp(cm.g_[2], cm.g_[2]); + + cm.g_[1] = cm.g_[3] = 1; + } + /* + convert from CipherTextG2 to CipherTextGT + */ + void convert(CipherTextGT& cm, const CipherTextG2& c2) const + { + /* + Enc(1) = (S, T) = (P + r xP, rP) = (P, 0) if r = 0 + cm = (P, 0) * c2 = (e(P, S), e(P, T), 1, 1) + */ + pairing(cm.g_[0], P_, c2.getS()); + pairing(cm.g_[1], P_, c2.getT()); + cm.g_[2] = cm.g_[3] = 1; + } + void convert(CipherTextGT& cm, const CipherTextA& ca) const + { + convert(cm, ca.c1_); + } + void convert(CipherText& cm, const CipherText& ca) const + { + if (ca.isMultiplied()) throw cybozu::Exception("she:PublicKey:convertCipherText:already isMultiplied"); + cm.isMultiplied_ = true; + convert(cm.m_, ca.a_); + } + }; +public: + class PublicKey : public fp::Serializable > { + G1 xP_; + G2 yQ_; + friend class SecretKey; + friend class PrecomputedPublicKey; + template + friend struct PublicKeyMethod; + template + struct MulG { + const G& base; + MulG(const G& base) : base(base) {} + template + void mul(G& out, const INT& m) const + { + G::mul(out, base, m); + } + }; + void set(const Fr& x, const Fr& y) + { + G1::mul(xP_, P_, x); + if (!isG1only_) G2::mul(yQ_, Q_, y); + } + template + void encG1(CipherTextG1& c, const INT& m) const + { + const MulG xPmul(xP_); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul); + } + template + void encG2(CipherTextG2& c, const INT& m) const + { + const MulG yQmul(yQ_); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul); + } +public: + void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + const MulG xPmul(xP_); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPmul); + } + void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + const MulG yQmul(yQ_); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c, const ZkpBin& zkp) const + { + const MulG xPmul(xP_); + return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPmul); + } + bool verify(const CipherTextG2& c, const ZkpBin& zkp) const + { + const MulG yQmul(yQ_); + return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQmul); + } + template + void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const + { + const MulG xPmul(xP_); + const MulG yQmul(yQ_); + makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const + { + const MulG xPmul(xP_); + const MulG yQmul(yQ_); + return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const + { + const MulG xPmul(xP_); + const MulG yQmul(yQ_); + makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const + { + const MulG xPmul(xP_); + const MulG yQmul(yQ_); + return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + template + void encGT(CipherTextGT& c, const INT& m) const + { + /* + (s, t, u, v) = ((e^x)^a (e^y)^b (e^-xy)^c e^m, e^b, e^a, e^c) + s = e(a xP + m P, Q)e(b P - c xP, yQ) + */ + Fr ra, rb, rc; + ra.setRand(); + rb.setRand(); + rc.setRand(); + GT e; + + G1 P1, P2; + G1::mul(P1, xP_, ra); + if (m) { +// G1::mul(P2, P, m); + PhashTbl_.mulByWindowMethod(P2, m); + P1 += P2; + } +// millerLoop(c.g[0], P1, Q); + precomputedMillerLoop(c.g_[0], P1, Qcoeff_); +// G1::mul(P1, P, rb); + PhashTbl_.mulByWindowMethod(P1, rb); + G1::mul(P2, xP_, rc); + P1 -= P2; + millerLoop(e, P1, yQ_); + c.g_[0] *= e; + finalExp(c.g_[0], c.g_[0]); +#if 1 + ePQhashTbl_.mulByWindowMethod(c.g_[1], rb); + ePQhashTbl_.mulByWindowMethod(c.g_[2], ra); + ePQhashTbl_.mulByWindowMethod(c.g_[3], rc); +#else + GT::pow(c.g_[1], ePQ_, rb); + GT::pow(c.g_[2], ePQ_, ra); + GT::pow(c.g_[3], ePQ_, rc); +#endif + } + public: + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + xP_.load(pb, is, ioMode); if (!*pb) return; + if (!isG1only_) yQ_.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + xP_.save(pb, os, ioMode); if (!*pb) return; + if (isG1only_) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + yQ_.save(pb, os, ioMode); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:PublicKey:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:PublicKey:save"); + } + friend std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const PublicKey& rhs) const + { + return xP_ == rhs.xP_ && (isG1only_ || yQ_ == rhs.yQ_); + } + bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } + }; + + class PrecomputedPublicKey : public fp::Serializable > { + typedef local::InterfaceForHashTable GTasEC; + typedef mcl::fp::WindowMethod GTwin; + template + friend struct PublicKeyMethod; + GT exPQ_; + GT eyPQ_; + GT exyPQ_; + GTwin exPQwm_; + GTwin eyPQwm_; + GTwin exyPQwm_; + mcl::fp::WindowMethod xPwm_; + mcl::fp::WindowMethod yQwm_; + template + void mulByWindowMethod(GT& x, const GTwin& wm, const T& y) const + { + wm.mul(static_cast(x), y); + } + template + void encG1(CipherTextG1& c, const INT& m) const + { + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_); + } + template + void encG2(CipherTextG2& c, const INT& m) const + { + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_); + } + template + void encGT(CipherTextGT& c, const INT& m) const + { + /* + (s, t, u, v) = (e^m e^(xya), (e^x)^b, (e^y)^c, e^(b + c - a)) + */ + Fr ra, rb, rc; + ra.setRand(); + rb.setRand(); + rc.setRand(); + GT t; + ePQhashTbl_.mulByWindowMethod(c.g_[0], m); // e^m + mulByWindowMethod(t, exyPQwm_, ra); // (e^xy)^a + c.g_[0] *= t; + mulByWindowMethod(c.g_[1], exPQwm_, rb); // (e^x)^b + mulByWindowMethod(c.g_[2], eyPQwm_, rc); // (e^y)^c + rb += rc; + rb -= ra; + ePQhashTbl_.mulByWindowMethod(c.g_[3], rb); + } + public: + void init(const PublicKey& pub) + { + const size_t bitSize = Fr::getBitSize(); + xPwm_.init(pub.xP_, bitSize, local::winSize); + if (isG1only_) return; + yQwm_.init(pub.yQ_, bitSize, local::winSize); + pairing(exPQ_, pub.xP_, Q_); + pairing(eyPQ_, P_, pub.yQ_); + pairing(exyPQ_, pub.xP_, pub.yQ_); + exPQwm_.init(static_cast(exPQ_), bitSize, local::winSize); + eyPQwm_.init(static_cast(eyPQ_), bitSize, local::winSize); + exyPQwm_.init(static_cast(exyPQ_), bitSize, local::winSize); + } + void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPwm_); + } + void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c, const ZkpBin& zkp) const + { + return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPwm_); + } + bool verify(const CipherTextG2& c, const ZkpBin& zkp) const + { + return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQwm_); + } + template + void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const + { + makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const + { + return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const + { + makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const + { + return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + }; + class CipherTextA { + CipherTextG1 c1_; + CipherTextG2 c2_; + friend class SecretKey; + friend class PublicKey; + friend class CipherTextGT; + template + friend struct PublicKeyMethod; + public: + void clear() + { + c1_.clear(); + c2_.clear(); + } + static void add(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) + { + CipherTextG1::add(z.c1_, x.c1_, y.c1_); + CipherTextG2::add(z.c2_, x.c2_, y.c2_); + } + static void sub(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) + { + CipherTextG1::sub(z.c1_, x.c1_, y.c1_); + CipherTextG2::sub(z.c2_, x.c2_, y.c2_); + } + static void mul(CipherTextA& z, const CipherTextA& x, int64_t y) + { + CipherTextG1::mul(z.c1_, x.c1_, y); + CipherTextG2::mul(z.c2_, x.c2_, y); + } + static void neg(CipherTextA& y, const CipherTextA& x) + { + CipherTextG1::neg(y.c1_, x.c1_); + CipherTextG2::neg(y.c2_, x.c2_); + } + void add(const CipherTextA& c) { add(*this, *this, c); } + void sub(const CipherTextA& c) { sub(*this, *this, c); } + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + c1_.load(pb, is, ioMode); if (!*pb) return; + c2_.load(pb, is, ioMode); + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c1_.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + c2_.save(pb, os, ioMode); + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextA& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextA& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextA& rhs) const + { + return c1_ == rhs.c1_ && c2_ == rhs.c2_; + } + bool operator!=(const CipherTextA& rhs) const { return !operator==(rhs); } + }; + + class CipherTextGT : public fp::Serializable { + GT g_[4]; + friend class SecretKey; + friend class PublicKey; + friend class PrecomputedPublicKey; + friend class CipherTextA; + template + friend struct PublicKeyMethod; + public: + void clear() + { + for (int i = 0; i < 4; i++) { + g_[i].setOne(); + } + } + static void neg(CipherTextGT& y, const CipherTextGT& x) + { + for (int i = 0; i < 4; i++) { + GT::unitaryInv(y.g_[i], x.g_[i]); + } + } + static void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) + { + /* + (g[i]) + (g'[i]) = (g[i] * g'[i]) + */ + for (int i = 0; i < 4; i++) { + GT::mul(z.g_[i], x.g_[i], y.g_[i]); + } + } + static void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) + { + /* + (g[i]) - (g'[i]) = (g[i] / g'[i]) + */ + GT t; + for (size_t i = 0; i < 4; i++) { + GT::unitaryInv(t, y.g_[i]); + GT::mul(z.g_[i], x.g_[i], t); + } + } + static void mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) + { + /* + (S1, T1) * (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) + */ + tensorProductML(z.g_, x.S_, x.T_, y.S_, y.T_); + } + static void finalExp(CipherTextGT& y, const CipherTextGT& x) + { + finalExp4(y.g_, x.g_); + } + /* + mul(x, y) = mulML(x, y) + finalExp + mul(c11, c12) + mul(c21, c22) + = finalExp(mulML(c11, c12) + mulML(c21, c22)), + then one finalExp can be reduced + */ + static void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) + { + /* + (S1, T1) * (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) + */ + mulML(z, x, y); + finalExp(z, z); + } + static void mul(CipherTextGT& z, const CipherTextA& x, const CipherTextA& y) + { + mul(z, x.c1_, y.c2_); + } + static void mul(CipherTextGT& z, const CipherTextGT& x, int64_t y) + { + for (int i = 0; i < 4; i++) { + GT::pow(z.g_[i], x.g_[i], y); + } + } + void add(const CipherTextGT& c) { add(*this, *this, c); } + void sub(const CipherTextGT& c) { sub(*this, *this, c); } + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + for (int i = 0; i < 4; i++) { + g_[i].load(pb, is, ioMode); if (!*pb) return; + } + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + g_[0].save(pb, os, ioMode); if (!*pb) return; + for (int i = 1; i < 4; i++) { + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + g_[i].save(pb, os, ioMode); if (!*pb) return; + } + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextGT:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextGT:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextGT& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextGT& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextGT& rhs) const + { + for (int i = 0; i < 4; i++) { + if (g_[i] != rhs.g_[i]) return false; + } + return true; + } + bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } + }; + + class CipherText : public fp::Serializable { + bool isMultiplied_; + CipherTextA a_; + CipherTextGT m_; + friend class SecretKey; + friend class PublicKey; + template + friend struct PublicKeyMethod; + public: + CipherText() : isMultiplied_(false) {} + void clearAsAdded() + { + isMultiplied_ = false; + a_.clear(); + } + void clearAsMultiplied() + { + isMultiplied_ = true; + m_.clear(); + } + bool isMultiplied() const { return isMultiplied_; } + static void add(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() && y.isMultiplied()) { + z.isMultiplied_ = true; + CipherTextGT::add(z.m_, x.m_, y.m_); + return; + } + if (!x.isMultiplied() && !y.isMultiplied()) { + z.isMultiplied_ = false; + CipherTextA::add(z.a_, x.a_, y.a_); + return; + } + throw cybozu::Exception("she:CipherText:add:mixed CipherText"); + } + static void sub(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() && y.isMultiplied()) { + z.isMultiplied_ = true; + CipherTextGT::sub(z.m_, x.m_, y.m_); + return; + } + if (!x.isMultiplied() && !y.isMultiplied()) { + z.isMultiplied_ = false; + CipherTextA::sub(z.a_, x.a_, y.a_); + return; + } + throw cybozu::Exception("she:CipherText:sub:mixed CipherText"); + } + static void neg(CipherText& y, const CipherText& x) + { + if (x.isMultiplied()) { + y.isMultiplied_ = true; + CipherTextGT::neg(y.m_, x.m_); + return; + } else { + y.isMultiplied_ = false; + CipherTextA::neg(y.a_, x.a_); + return; + } + } + static void mul(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() || y.isMultiplied()) { + throw cybozu::Exception("she:CipherText:mul:mixed CipherText"); + } + z.isMultiplied_ = true; + CipherTextGT::mul(z.m_, x.a_, y.a_); + } + static void mul(CipherText& z, const CipherText& x, int64_t y) + { + if (x.isMultiplied()) { + CipherTextGT::mul(z.m_, x.m_, y); + } else { + CipherTextA::mul(z.a_, x.a_, y); + } + } + void add(const CipherText& c) { add(*this, *this, c); } + void sub(const CipherText& c) { sub(*this, *this, c); } + void mul(const CipherText& c) { mul(*this, *this, c); } + template + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + cybozu::writeChar(pb, isMultiplied_ ? '0' : '1', is); if (!*pb) return; + if (isMultiplied()) { + m_.load(pb, is, ioMode); + } else { + a_.load(pb, is, ioMode); + } + } + template + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + char c; + if (!cybozu::readChar(&c, os)) return; + if (c == '0' || c == '1') { + isMultiplied_ = c == '0'; + } else { + *pb = false; + return; + } + if (isMultiplied()) { + m_.save(pb, os, ioMode); + } else { + a_.save(pb, os, ioMode); + } + } + template + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherText:load"); + } + template + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherText:save"); + } + friend std::istream& operator>>(std::istream& is, CipherText& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherText& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextGT& rhs) const + { + if (isMultiplied() != rhs.isMultiplied()) return false; + if (isMultiplied()) { + return m_ == rhs.m_; + } + return a_ == rhs.a_; + } + bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } + }; +}; +typedef local::HashTable HashTableG1; +typedef local::HashTable HashTableG2; +typedef local::HashTable HashTableGT; + +template G1 SHET::P_; +template G2 SHET::Q_; +template Fp12 SHET::ePQ_; +template std::vector SHET::Qcoeff_; +template HashTableG1 SHET::PhashTbl_; +template HashTableG2 SHET::QhashTbl_; +template HashTableGT SHET::ePQhashTbl_; +template bool SHET::useDecG1ViaGT_; +template bool SHET::useDecG2ViaGT_; +template bool SHET::isG1only_; +typedef mcl::she::SHET<> SHE; +typedef SHE::SecretKey SecretKey; +typedef SHE::PublicKey PublicKey; +typedef SHE::PrecomputedPublicKey PrecomputedPublicKey; +typedef SHE::CipherTextG1 CipherTextG1; +typedef SHE::CipherTextG2 CipherTextG2; +typedef SHE::CipherTextGT CipherTextGT; +typedef SHE::CipherTextA CipherTextA; +typedef CipherTextGT CipherTextGM; // old class +typedef SHE::CipherText CipherText; +typedef SHE::ZkpBin ZkpBin; +typedef SHE::ZkpEq ZkpEq; +typedef SHE::ZkpBinEq ZkpBinEq; + +inline void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) +{ + SHE::init(cp, hashSize, tryNum); +} +inline void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) +{ + SHE::initG1only(para, hashSize, tryNum); +} +inline void init(size_t hashSize, size_t tryNum = local::defaultTryNum) { SHE::init(hashSize, tryNum); } +inline void setRangeForG1DLP(size_t hashSize) { SHE::setRangeForG1DLP(hashSize); } +inline void setRangeForG2DLP(size_t hashSize) { SHE::setRangeForG2DLP(hashSize); } +inline void setRangeForGTDLP(size_t hashSize) { SHE::setRangeForGTDLP(hashSize); } +inline void setRangeForDLP(size_t hashSize) { SHE::setRangeForDLP(hashSize); } +inline void setTryNum(size_t tryNum) { SHE::setTryNum(tryNum); } +inline void useDecG1ViaGT(bool use = true) { SHE::useDecG1ViaGT(use); } +inline void useDecG2ViaGT(bool use = true) { SHE::useDecG2ViaGT(use); } +inline HashTableG1& getHashTableG1() { return SHE::PhashTbl_; } +inline HashTableG2& getHashTableG2() { return SHE::QhashTbl_; } +inline HashTableGT& getHashTableGT() { return SHE::ePQhashTbl_; } + +inline void add(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::add(z, x, y); } +inline void add(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::add(z, x, y); } +inline void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::add(z, x, y); } +inline void add(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::add(z, x, y); } + +inline void sub(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::sub(z, x, y); } +inline void sub(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::sub(z, x, y); } +inline void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::sub(z, x, y); } +inline void sub(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::sub(z, x, y); } + +inline void neg(CipherTextG1& y, const CipherTextG1& x) { CipherTextG1::neg(y, x); } +inline void neg(CipherTextG2& y, const CipherTextG2& x) { CipherTextG2::neg(y, x); } +inline void neg(CipherTextGT& y, const CipherTextGT& x) { CipherTextGT::neg(y, x); } +inline void neg(CipherText& y, const CipherText& x) { CipherText::neg(y, x); } + +template +inline void mul(CipherTextG1& z, const CipherTextG1& x, const INT& y) { CipherTextG1::mul(z, x, y); } +template +inline void mul(CipherTextG2& z, const CipherTextG2& x, const INT& y) { CipherTextG2::mul(z, x, y); } +template +inline void mul(CipherTextGT& z, const CipherTextGT& x, const INT& y) { CipherTextGT::mul(z, x, y); } +template +inline void mul(CipherText& z, const CipherText& x, const INT& y) { CipherText::mul(z, x, y); } + +inline void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) { CipherTextGT::mul(z, x, y); } +inline void mul(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::mul(z, x, y); } + +} } // mcl::she + diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/util.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/util.hpp new file mode 100644 index 000000000..edef971cb --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/util.hpp @@ -0,0 +1,285 @@ +#pragma once +/** + @file + @brief functions for T[] + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4456) + #pragma warning(disable : 4459) +#endif + +namespace mcl { namespace fp { + +template +T abs_(T x) { return x < 0 ? -x : x; } + +template +T min_(T x, T y) { return x < y ? x : y; } + +template +T max_(T x, T y) { return x < y ? y : x; } + +template +void swap_(T& x, T& y) +{ + T t; + t = x; + x = y; + y = t; +} + + +/* + get pp such that p * pp = -1 mod M, + where p is prime and M = 1 << 64(or 32). + @param pLow [in] p mod M +*/ +template +T getMontgomeryCoeff(T pLow) +{ + T ret = 0; + T t = 0; + T x = 1; + for (size_t i = 0; i < sizeof(T) * 8; i++) { + if ((t & 1) == 0) { + t += pLow; + ret += x; + } + t >>= 1; + x <<= 1; + } + return ret; +} + +template +int compareArray(const T* x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b ? -1 : 1; + } + return 0; +} + +template +bool isLessArray(const T *x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b; + } + return false; +} + +template +bool isGreaterOrEqualArray(const T *x, const T* y, size_t n) +{ + return !isLessArray(x, y, n); +} + +template +bool isLessOrEqualArray(const T *x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b; + } + return true; +} + +template +bool isGreaterArray(const T *x, const T* y, size_t n) +{ + return !isLessOrEqualArray(x, y, n); +} + +template +bool isEqualArray(const T* x, const T* y, size_t n) +{ + for (size_t i = 0; i < n; i++) { + if (x[i] != y[i]) return false; + } + return true; +} + +template +bool isZeroArray(const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) { + if (x[i]) return false; + } + return true; +} + +template +void clearArray(T *x, size_t begin, size_t end) +{ + for (size_t i = begin; i < end; i++) x[i] = 0; +} + +template +void copyArray(T *y, const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) y[i] = x[i]; +} + +/* + x &= (1 << bitSize) - 1 +*/ +template +void maskArray(T *x, size_t n, size_t bitSize) +{ + const size_t TbitSize = sizeof(T) * 8; + assert(bitSize <= TbitSize * n); + const size_t q = bitSize / TbitSize; + const size_t r = bitSize % TbitSize; + if (r) { + x[q] &= (T(1) << r) - 1; + clearArray(x, q + 1, n); + } else { + clearArray(x, q, n); + } +} + +/* + return non zero size of x[] + return 1 if x[] == 0 +*/ +template +size_t getNonZeroArraySize(const T *x, size_t n) +{ + assert(n > 0); + while (n > 0) { + if (x[n - 1]) return n; + n--; + } + return 1; +} + +/* + @param out [inout] : set element of G ; out = x^y[] + @param x [in] + @param y [in] + @param n [in] size of y[] + @param limitBit [in] const time version if the value is positive + @note &out != x and out = the unit element of G +*/ +template +void powGeneric(G& out, const G& x, const T *y, size_t n, const Mul& mul, const Sqr& sqr, void normalize(G&, const G&), size_t limitBit = 0) +{ + assert(&out != &x); + G tbl[4]; // tbl = { discard, x, x^2, x^3 } + T v; + bool constTime = limitBit > 0; + int maxBit = 0; + int m = 0; + while (n > 0) { + if (y[n - 1]) break; + n--; + } + if (n == 0) { + if (constTime) goto DummyLoop; + return; + } + if (!constTime && n == 1) { + switch (y[0]) { + case 1: + out = x; + return; + case 2: + sqr(out, x); + return; + case 3: + sqr(out, x); + mul(out, out, x); + return; + case 4: + sqr(out, x); + sqr(out, out); + return; + } + } + if (normalize != 0) { + normalize(tbl[0], x); + } else { + tbl[0] = x; + } + tbl[1] = tbl[0]; + sqr(tbl[2], tbl[1]); + if (normalize != 0) { normalize(tbl[2], tbl[2]); } + mul(tbl[3], tbl[2], x); + if (normalize != 0) { normalize(tbl[3], tbl[3]); } + v = y[n - 1]; + assert(v); + m = cybozu::bsr(v); + maxBit = int(m + (n - 1) * sizeof(T) * 8); + if (m & 1) { + m--; + T idx = (v >> m) & 3; + assert(idx > 0); + out = tbl[idx]; + } else { + out = x; + } + for (int i = (int)n - 1; i >= 0; i--) { + T v = y[i]; + for (int j = m - 2; j >= 0; j -= 2) { + sqr(out, out); + sqr(out, out); + T idx = (v >> j) & 3; + if (idx == 0) { + if (constTime) mul(tbl[0], tbl[0], tbl[1]); + } else { + mul(out, out, tbl[idx]); + } + } + m = (int)sizeof(T) * 8; + } +DummyLoop: + if (!constTime) return; + G D = out; + for (size_t i = maxBit + 1; i < limitBit; i += 2) { + sqr(D, D); + sqr(D, D); + mul(D, D, tbl[1]); + } +} + +/* + shortcut of multiplication by Unit +*/ +template +bool mulSmallUnit(T& z, const T& x, U y) +{ + switch (y) { + case 0: z.clear(); break; + case 1: z = x; break; + case 2: T::add(z, x, x); break; + case 3: { T t; T::add(t, x, x); T::add(z, t, x); break; } + case 4: T::add(z, x, x); T::add(z, z, z); break; + case 5: { T t; T::add(t, x, x); T::add(t, t, t); T::add(z, t, x); break; } + case 6: { T t; T::add(t, x, x); T::add(t, t, x); T::add(z, t, t); break; } + case 7: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::sub(z, t, x); break; } + case 8: T::add(z, x, x); T::add(z, z, z); T::add(z, z, z); break; + case 9: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::add(z, t, x); break; } + case 10: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, x); T::add(z, t, t); break; } + default: + return false; + } + return true; +} + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/vint.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/vint.hpp new file mode 100644 index 000000000..b087688c3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/vint.hpp @@ -0,0 +1,1987 @@ +#pragma once +/** + emulate mpz_class +*/ +#include +#include +#include +#include +#ifndef CYBOZU_DONT_USE_STRING +#include +#endif +#include +#include +#include +#include + +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_VINT_64BIT_PORTABLE + #define MCL_VINT_FIXED_BUFFER +#endif +#ifndef MCL_MAX_BIT_SIZE + #define MCL_MAX_BIT_SIZE 384 +#endif + +#ifndef MCL_SIZEOF_UNIT + #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) + #define MCL_SIZEOF_UNIT 4 + #else + #define MCL_SIZEOF_UNIT 8 + #endif +#endif + +namespace mcl { + +namespace vint { + +#if MCL_SIZEOF_UNIT == 8 +typedef uint64_t Unit; +#else +typedef uint32_t Unit; +#endif + +template +void dump(const T *x, size_t n, const char *msg = "") +{ + const size_t is4byteUnit = sizeof(*x) == 4; + if (msg) printf("%s ", msg); + for (size_t i = 0; i < n; i++) { + if (is4byteUnit) { + printf("%08x", (uint32_t)x[n - 1 - i]); + } else { + printf("%016llx", (unsigned long long)x[n - 1 - i]); + } + } + printf("\n"); +} + +inline uint64_t make64(uint32_t H, uint32_t L) +{ + return ((uint64_t)H << 32) | L; +} + +inline void split64(uint32_t *H, uint32_t *L, uint64_t x) +{ + *H = uint32_t(x >> 32); + *L = uint32_t(x); +} + +/* + [H:L] <= x * y + @return L +*/ +inline uint32_t mulUnit(uint32_t *pH, uint32_t x, uint32_t y) +{ + uint64_t t = uint64_t(x) * y; + uint32_t L; + split64(pH, &L, t); + return L; +} +#if MCL_SIZEOF_UNIT == 8 +inline uint64_t mulUnit(uint64_t *pH, uint64_t x, uint64_t y) +{ +#ifdef MCL_VINT_64BIT_PORTABLE + uint32_t a = uint32_t(x >> 32); + uint32_t b = uint32_t(x); + uint32_t c = uint32_t(y >> 32); + uint32_t d = uint32_t(y); + + uint64_t ad = uint64_t(d) * a; + uint64_t bd = uint64_t(d) * b; + uint64_t L = uint32_t(bd); + ad += bd >> 32; // [ad:L] + + uint64_t ac = uint64_t(c) * a; + uint64_t bc = uint64_t(c) * b; + uint64_t H = uint32_t(bc); + ac += bc >> 32; // [ac:H] + /* + adL + acH + */ + uint64_t t = (ac << 32) | H; + ac >>= 32; + H = t + ad; + if (H < t) { + ac++; + } + /* + ac:H:L + */ + L |= H << 32; + H = (ac << 32) | uint32_t(H >> 32); + *pH = H; + return L; +#elif defined(_WIN64) && !defined(__INTEL_COMPILER) + return _umul128(x, y, pH); +#else + typedef __attribute__((mode(TI))) unsigned int uint128; + uint128 t = uint128(x) * y; + *pH = uint64_t(t >> 64); + return uint64_t(t); +#endif +} +#endif + +template +void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn); + +/* + q = [H:L] / y + r = [H:L] % y + return q +*/ +inline uint32_t divUnit(uint32_t *pr, uint32_t H, uint32_t L, uint32_t y) +{ + uint64_t t = make64(H, L); + uint32_t q = uint32_t(t / y); + *pr = uint32_t(t % y); + return q; +} +#if MCL_SIZEOF_UNIT == 8 +inline uint64_t divUnit(uint64_t *pr, uint64_t H, uint64_t L, uint64_t y) +{ +#if defined(MCL_VINT_64BIT_PORTABLE) + uint32_t px[4] = { uint32_t(L), uint32_t(L >> 32), uint32_t(H), uint32_t(H >> 32) }; + uint32_t py[2] = { uint32_t(y), uint32_t(y >> 32) }; + size_t xn = 4; + size_t yn = 2; + uint32_t q[4]; + uint32_t r[2]; + size_t qn = xn - yn + 1; + divNM(q, qn, r, px, xn, py, yn); + *pr = make64(r[1], r[0]); + return make64(q[1], q[0]); +#elif defined(_MSC_VER) + #error "divUnit for uint64_t is not supported" +#else + typedef __attribute__((mode(TI))) unsigned int uint128; + uint128 t = (uint128(H) << 64) | L; + uint64_t q = uint64_t(t / y); + *pr = uint64_t(t % y); + return q; +#endif +} +#endif + +/* + compare x[] and y[] + @retval positive if x > y + @retval 0 if x == y + @retval negative if x < y +*/ +template +int compareNM(const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + if (xn != yn) return xn > yn ? 1 : -1; + for (int i = (int)xn - 1; i >= 0; i--) { + if (x[i] != y[i]) return x[i] > y[i] ? 1 : -1; + } + return 0; +} + +template +void clearN(T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) x[i] = 0; +} + +template +void copyN(T *y, const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) y[i] = x[i]; +} + +/* + z[] = x[n] + y[n] + @note return 1 if having carry + z may be equal to x or y +*/ +template +T addN(T *z, const T *x, const T *y, size_t n) +{ + T c = 0; + for (size_t i = 0; i < n; i++) { + T xc = x[i] + c; + if (xc < c) { + // x[i] = Unit(-1) and c = 1 + z[i] = y[i]; + } else { + xc += y[i]; + c = y[i] > xc ? 1 : 0; + z[i] = xc; + } + } + return c; +} + +/* + z[] = x[] + y +*/ +template +T addu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); + T t = x[0] + y; + z[0] = t; + size_t i = 0; + if (t >= y) goto EXIT_0; + i = 1; + for (; i < n; i++) { + t = x[i] + 1; + z[i] = t; + if (t != 0) goto EXIT_0; + } + return 1; +EXIT_0: + i++; + for (; i < n; i++) { + z[i] = x[i]; + } + return 0; +} + +/* + x[] += y +*/ +template +T addu1(T *x, size_t n, T y) +{ + assert(n > 0); + T t = x[0] + y; + x[0] = t; + size_t i = 0; + if (t >= y) return 0; + i = 1; + for (; i < n; i++) { + t = x[i] + 1; + x[i] = t; + if (t != 0) return 0; + } + return 1; +} +/* + z[zn] = x[xn] + y[yn] + @note zn = max(xn, yn) +*/ +template +T addNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + if (yn > xn) { + fp::swap_(xn, yn); + fp::swap_(x, y); + } + assert(xn >= yn); + size_t max = xn; + size_t min = yn; + T c = vint::addN(z, x, y, min); + if (max > min) { + c = vint::addu1(z + min, x + min, max - min, c); + } + return c; +} + +/* + z[] = x[n] - y[n] + z may be equal to x or y +*/ +template +T subN(T *z, const T *x, const T *y, size_t n) +{ + assert(n > 0); + T c = 0; + for (size_t i = 0; i < n; i++) { + T yc = y[i] + c; + if (yc < c) { + // y[i] = T(-1) and c = 1 + z[i] = x[i]; + } else { + c = x[i] < yc ? 1 : 0; + z[i] = x[i] - yc; + } + } + return c; +} + +/* + out[] = x[n] - y +*/ +template +T subu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); +#if 0 + T t = x[0]; + z[0] = t - y; + size_t i = 0; + if (t >= y) goto EXIT_0; + i = 1; + for (; i < n; i++ ){ + t = x[i]; + z[i] = t - 1; + if (t != 0) goto EXIT_0; + } + return 1; +EXIT_0: + i++; + for (; i < n; i++) { + z[i] = x[i]; + } + return 0; +#else + T c = x[0] < y ? 1 : 0; + z[0] = x[0] - y; + for (size_t i = 1; i < n; i++) { + if (x[i] < c) { + z[i] = T(-1); + } else { + z[i] = x[i] - c; + c = 0; + } + } + return c; +#endif +} + +/* + z[xn] = x[xn] - y[yn] + @note xn >= yn +*/ +template +T subNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn >= yn); + T c = vint::subN(z, x, y, yn); + if (xn > yn) { + c = vint::subu1(z + yn, x + yn, xn - yn, c); + } + return c; +} + +/* + z[0..n) = x[0..n) * y + return z[n] + @note accept z == x +*/ +template +T mulu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); + T H = 0; + for (size_t i = 0; i < n; i++) { + T t = H; + T L = mulUnit(&H, x[i], y); + z[i] = t + L; + if (z[i] < t) { + H++; + } + } + return H; // z[n] +} + +/* + z[xn * yn] = x[xn] * y[ym] +*/ +template +static inline void mulNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + if (yn > xn) { + fp::swap_(yn, xn); + fp::swap_(x, y); + } + assert(xn >= yn); + if (z == x) { + T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * xn); + copyN(p, x, xn); + x = p; + } + if (z == y) { + T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); + copyN(p, y, yn); + y = p; + } + z[xn] = vint::mulu1(&z[0], x, xn, y[0]); + clearN(z + xn + 1, yn - 1); + + T *t2 = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); + for (size_t i = 1; i < yn; i++) { + t2[xn] = vint::mulu1(&t2[0], x, xn, y[i]); + vint::addN(&z[i], &z[i], &t2[0], xn + 1); + } +} +/* + out[xn * 2] = x[xn] * x[xn] + QQQ : optimize this +*/ +template +static inline void sqrN(T *y, const T *x, size_t xn) +{ + mulNM(y, x, xn, x, xn); +} + +/* + q[] = x[] / y + @retval r = x[] % y + accept q == x +*/ +template +T divu1(T *q, const T *x, size_t n, T y) +{ + T r = 0; + for (int i = (int)n - 1; i >= 0; i--) { + q[i] = divUnit(&r, r, x[i], y); + } + return r; +} +/* + q[] = x[] / y + @retval r = x[] % y +*/ +template +T modu1(const T *x, size_t n, T y) +{ + T r = 0; + for (int i = (int)n - 1; i >= 0; i--) { + divUnit(&r, r, x[i], y); + } + return r; +} + +/* + y[] = x[] << bit + 0 < bit < sizeof(T) * 8 + accept y == x +*/ +template +T shlBit(T *y, const T *x, size_t xn, size_t bit) +{ + assert(0 < bit && bit < sizeof(T) * 8); + assert(xn > 0); + size_t rBit = sizeof(T) * 8 - bit; + T keep = x[xn - 1]; + T prev = keep; + for (size_t i = xn - 1; i > 0; i--) { + T t = x[i - 1]; + y[i] = (prev << bit) | (t >> rBit); + prev = t; + } + y[0] = prev << bit; + return keep >> rBit; +} + +/* + y[yn] = x[xn] << bit + yn = xn + (bit + unitBitBit - 1) / unitBitSize + accept y == x +*/ +template +void shlN(T *y, const T *x, size_t xn, size_t bit) +{ + assert(xn > 0); + const size_t unitBitSize = sizeof(T) * 8; + size_t q = bit / unitBitSize; + size_t r = bit % unitBitSize; + if (r == 0) { + // don't use copyN(y + q, x, xn); if overlaped + for (size_t i = 0; i < xn; i++) { + y[q + xn - 1 - i] = x[xn - 1 - i]; + } + } else { + y[q + xn] = shlBit(y + q, x, xn, r); + } + clearN(y, q); +} + +/* + y[] = x[] >> bit + 0 < bit < sizeof(T) * 8 +*/ +template +void shrBit(T *y, const T *x, size_t xn, size_t bit) +{ + assert(0 < bit && bit < sizeof(T) * 8); + assert(xn > 0); + size_t rBit = sizeof(T) * 8 - bit; + T prev = x[0]; + for (size_t i = 1; i < xn; i++) { + T t = x[i]; + y[i - 1] = (prev >> bit) | (t << rBit); + prev = t; + } + y[xn - 1] = prev >> bit; +} +/* + y[yn] = x[xn] >> bit + yn = xn - bit / unitBit +*/ +template +void shrN(T *y, const T *x, size_t xn, size_t bit) +{ + assert(xn > 0); + const size_t unitBitSize = sizeof(T) * 8; + size_t q = bit / unitBitSize; + size_t r = bit % unitBitSize; + assert(xn >= q); + if (r == 0) { + copyN(y, x + q, xn - q); + } else { + shrBit(y, x + q, xn - q, r); + } +} + +template +size_t getRealSize(const T *x, size_t xn) +{ + int i = (int)xn - 1; + for (; i > 0; i--) { + if (x[i]) { + return i + 1; + } + } + return 1; +} + +template +size_t getBitSize(const T *x, size_t n) +{ + if (n == 1 && x[0] == 0) return 1; + T v = x[n - 1]; + assert(v); + return (n - 1) * sizeof(T) * 8 + 1 + cybozu::bsr(v); +} + +/* + q[qn] = x[xn] / y[yn] ; qn == xn - yn + 1 if xn >= yn if q + r[rn] = x[xn] % y[yn] ; rn = yn before getRealSize + allow q == 0 +*/ +template +void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + assert(xn < yn || (q == 0 || qn == xn - yn + 1)); + assert(q != r); + const size_t rn = yn; + xn = getRealSize(x, xn); + yn = getRealSize(y, yn); + if (x == y) { + assert(xn == yn); + x_is_y: + clearN(r, rn); + if (q) { + q[0] = 1; + clearN(q + 1, qn - 1); + } + return; + } + if (yn > xn) { + /* + if y > x then q = 0 and r = x + */ + q_is_zero: + copyN(r, x, xn); + clearN(r + xn, rn - xn); + if (q) clearN(q, qn); + return; + } + if (yn == 1) { + T t; + if (q) { + if (qn > xn) { + clearN(q + xn, qn - xn); + } + t = divu1(q, x, xn, y[0]); + } else { + t = modu1(x, xn, y[0]); + } + r[0] = t; + clearN(r + 1, rn - 1); + return; + } + const size_t yTopBit = cybozu::bsr(y[yn - 1]); + assert(yn >= 2); + if (xn == yn) { + const size_t xTopBit = cybozu::bsr(x[xn - 1]); + if (xTopBit < yTopBit) goto q_is_zero; + if (yTopBit == xTopBit) { + int ret = compareNM(x, xn, y, yn); + if (ret == 0) goto x_is_y; + if (ret < 0) goto q_is_zero; + if (r) { + subN(r, x, y, yn); + } + if (q) { + q[0] = 1; + clearN(q + 1, qn - 1); + } + return; + } + assert(xTopBit > yTopBit); + // fast reduction for larger than fullbit-3 size p + if (yTopBit >= sizeof(T) * 8 - 4) { + T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * xn); + T qv = 0; + if (yTopBit == sizeof(T) * 8 - 2) { + copyN(xx, x, xn); + } else { + qv = x[xn - 1] >> (yTopBit + 1); + mulu1(xx, y, yn, qv); + subN(xx, x, xx, xn); + xn = getRealSize(xx, xn); + } + for (;;) { + T ret = subN(xx, xx, y, yn); + if (ret) { + addN(xx, xx, y, yn); + break; + } + qv++; + xn = getRealSize(xx, xn); + } + if (r) { + copyN(r, xx, xn); + clearN(r + xn, rn - xn); + } + if (q) { + q[0] = qv; + clearN(q + 1, qn - 1); + } + return; + } + } + /* + bitwise left shift x and y to adjust MSB of y[yn - 1] = 1 + */ + const size_t shift = sizeof(T) * 8 - 1 - yTopBit; + T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); + const T *yy; + if (shift) { + T v = shlBit(xx, x, xn, shift); + if (v) { + xx[xn] = v; + xn++; + } + T *yBuf = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); + shlBit(yBuf, y, yn ,shift); + yy = yBuf; + } else { + copyN(xx, x, xn); + yy = y; + } + if (q) { + clearN(q, qn); + } + assert((yy[yn - 1] >> (sizeof(T) * 8 - 1)) != 0); + T *tt = (T*)CYBOZU_ALLOCA(sizeof(T) * (yn + 1)); + while (xn > yn) { + size_t d = xn - yn; + T xTop = xx[xn - 1]; + T yTop = yy[yn - 1]; + if (xTop > yTop || (compareNM(xx + d, xn - d, yy, yn) >= 0)) { + vint::subN(xx + d, xx + d, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1(q + d, qn - d, 1); + continue; + } + if (xTop == 1) { + vint::subNM(xx + d - 1, xx + d - 1, xn - d + 1, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1(q + d - 1, qn - d + 1, 1); + continue; + } + tt[yn] = vint::mulu1(tt, yy, yn, xTop); + vint::subN(xx + d - 1, xx + d - 1, tt, yn + 1); + xn = getRealSize(xx, xn); + if (q) vint::addu1(q + d - 1, qn - d + 1, xTop); + } + if (xn == yn && compareNM(xx, xn, yy, yn) >= 0) { + subN(xx, xx, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1(q, qn, 1); + } + if (shift) { + shrBit(r, xx, xn, shift); + } else { + copyN(r, xx, xn); + } + clearN(r + xn, rn - xn); +} + +#ifndef MCL_VINT_FIXED_BUFFER +template +class Buffer { + size_t allocSize_; + T *ptr_; +public: + typedef T Unit; + Buffer() : allocSize_(0), ptr_(0) {} + ~Buffer() + { + clear(); + } + Buffer(const Buffer& rhs) + : allocSize_(rhs.allocSize_) + , ptr_(0) + { + ptr_ = (T*)malloc(allocSize_ * sizeof(T)); + if (ptr_ == 0) throw cybozu::Exception("Buffer:malloc") << rhs.allocSize_; + memcpy(ptr_, rhs.ptr_, allocSize_ * sizeof(T)); + } + Buffer& operator=(const Buffer& rhs) + { + Buffer t(rhs); + swap(t); + return *this; + } + void swap(Buffer& rhs) +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + noexcept +#endif + { + fp::swap_(allocSize_, rhs.allocSize_); + fp::swap_(ptr_, rhs.ptr_); + } + void clear() + { + allocSize_ = 0; + free(ptr_); + ptr_ = 0; + } + + /* + @note extended buffer may be not cleared + */ + void alloc(bool *pb, size_t n) + { + if (n > allocSize_) { + T *p = (T*)malloc(n * sizeof(T)); + if (p == 0) { + *pb = false; + return; + } + copyN(p, ptr_, allocSize_); + free(ptr_); + ptr_ = p; + allocSize_ = n; + } + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void alloc(size_t n) + { + bool b; + alloc(&b, n); + if (!b) throw cybozu::Exception("Buffer:alloc"); + } +#endif + /* + *this = rhs + rhs may be destroyed + */ + const T& operator[](size_t n) const { return ptr_[n]; } + T& operator[](size_t n) { return ptr_[n]; } +}; +#endif + +template +class FixedBuffer { + enum { + N = (BitLen + sizeof(T) * 8 - 1) / (sizeof(T) * 8) + }; + size_t size_; + T v_[N]; +public: + typedef T Unit; + FixedBuffer() + : size_(0) + { + } + FixedBuffer(const FixedBuffer& rhs) + { + operator=(rhs); + } + FixedBuffer& operator=(const FixedBuffer& rhs) + { + size_ = rhs.size_; + for (size_t i = 0; i < size_; i++) { + v_[i] = rhs.v_[i]; + } + return *this; + } + void clear() { size_ = 0; } + void alloc(bool *pb, size_t n) + { + if (n > N) { + *pb = false; + return; + } + size_ = n; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void alloc(size_t n) + { + bool b; + alloc(&b, n); + if (!b) throw cybozu::Exception("FixedBuffer:alloc"); + } +#endif + void swap(FixedBuffer& rhs) + { + FixedBuffer *p1 = this; + FixedBuffer *p2 = &rhs; + if (p1->size_ < p2->size_) { + fp::swap_(p1, p2); + } + assert(p1->size_ >= p2->size_); + for (size_t i = 0; i < p2->size_; i++) { + fp::swap_(p1->v_[i], p2->v_[i]); + } + for (size_t i = p2->size_; i < p1->size_; i++) { + p2->v_[i] = p1->v_[i]; + } + fp::swap_(p1->size_, p2->size_); + } + // to avoid warning of gcc + void verify(size_t n) const + { + assert(n <= N); + (void)n; + } + const T& operator[](size_t n) const { verify(n); return v_[n]; } + T& operator[](size_t n) { verify(n); return v_[n]; } +}; + +#if MCL_SIZEOF_UNIT == 8 +/* + M = 1 << 256 + a = M mod p = (1 << 32) + 0x3d1 + [H:L] mod p = H * a + L + + if H = L = M - 1, t = H * a + L = aM + (M - a - 1) + H' = a, L' = M - a - 1 + t' = H' * a + L' = M + (a^2 - a - 1) + H'' = 1, L'' = a^2 - a - 1 + t'' = H'' * a + L'' = a^2 - 1 +*/ +inline void mcl_fpDbl_mod_SECP256K1(Unit *z, const Unit *x, const Unit *p) +{ + const Unit a = (uint64_t(1) << 32) + 0x3d1; + Unit buf[5]; + buf[4] = mulu1(buf, x + 4, 4, a); // H * a + buf[4] += addN(buf, buf, x, 4); // t = H * a + L + Unit x2[2]; + x2[0] = mulUnit(&x2[1], buf[4], a); + Unit x3 = addN(buf, buf, x2, 2); + if (x3) { + x3 = addu1(buf + 2, buf + 2, 2, Unit(1)); // t' = H' * a + L' + if (x3) { + x3 = addu1(buf, buf, 4, a); + assert(x3 == 0); + } + } + if (fp::isGreaterOrEqualArray(buf, p, 4)) { + subN(z, buf, p, 4); + } else { + fp::copyArray(z, buf, 4); + } +} + +inline void mcl_fp_mul_SECP256K1(Unit *z, const Unit *x, const Unit *y, const Unit *p) +{ + Unit xy[8]; + mulNM(xy, x, 4, y, 4); + mcl_fpDbl_mod_SECP256K1(z, xy, p); +} +inline void mcl_fp_sqr_SECP256K1(Unit *y, const Unit *x, const Unit *p) +{ + Unit xx[8]; + sqrN(xx, x, 4); + mcl_fpDbl_mod_SECP256K1(y, xx, p); +} +#endif + +} // vint + +/** + signed integer with variable length +*/ +template +class VintT { +public: + typedef _Buffer Buffer; + typedef typename Buffer::Unit Unit; + static const size_t unitBitSize = sizeof(Unit) * 8; + static const int invalidVar = -2147483647 - 1; // abs(invalidVar) is not defined +private: + Buffer buf_; + size_t size_; + bool isNeg_; + void trim(size_t n) + { + assert(n > 0); + int i = (int)n - 1; + for (; i > 0; i--) { + if (buf_[i]) { + size_ = i + 1; + return; + } + } + size_ = 1; + // zero + if (buf_[0] == 0) { + isNeg_ = false; + } + } + static int ucompare(const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + return vint::compareNM(&x[0], xn, &y[0], yn); + } + static void uadd(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + size_t zn = fp::max_(xn, yn) + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::addNM(&z.buf_[0], &x[0], xn, &y[0], yn); + z.trim(zn); + } + static void uadd1(VintT& z, const Buffer& x, size_t xn, Unit y) + { + size_t zn = xn + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::addu1(&z.buf_[0], &x[0], xn, y); + z.trim(zn); + } + static void usub1(VintT& z, const Buffer& x, size_t xn, Unit y) + { + size_t zn = xn; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + Unit c = vint::subu1(&z.buf_[0], &x[0], xn, y); + (void)c; + assert(!c); + z.trim(zn); + } + static void usub(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + assert(xn >= yn); + bool b; + z.buf_.alloc(&b, xn); + assert(b); (void)b; + Unit c = vint::subN(&z.buf_[0], &x[0], &y[0], yn); + if (xn > yn) { + c = vint::subu1(&z.buf_[yn], &x[yn], xn - yn, c); + } + assert(!c); + z.trim(xn); + } + static void _add(VintT& z, const VintT& x, bool xNeg, const VintT& y, bool yNeg) + { + if ((xNeg ^ yNeg) == 0) { + // same sign + uadd(z, x.buf_, x.size(), y.buf_, y.size()); + z.isNeg_ = xNeg; + return; + } + int r = ucompare(x.buf_, x.size(), y.buf_, y.size()); + if (r >= 0) { + usub(z, x.buf_, x.size(), y.buf_, y.size()); + z.isNeg_ = xNeg; + } else { + usub(z, y.buf_, y.size(), x.buf_, x.size()); + z.isNeg_ = yNeg; + } + } + static void _adds1(VintT& z, const VintT& x, int y, bool yNeg) + { + assert(y >= 0); + if ((x.isNeg_ ^ yNeg) == 0) { + // same sign + uadd1(z, x.buf_, x.size(), y); + z.isNeg_ = yNeg; + return; + } + if (x.size() > 1 || x.buf_[0] >= (Unit)y) { + usub1(z, x.buf_, x.size(), y); + z.isNeg_ = x.isNeg_; + } else { + z = y - x.buf_[0]; + z.isNeg_ = yNeg; + } + } + static void _addu1(VintT& z, const VintT& x, Unit y, bool yNeg) + { + if ((x.isNeg_ ^ yNeg) == 0) { + // same sign + uadd1(z, x.buf_, x.size(), y); + z.isNeg_ = yNeg; + return; + } + if (x.size() > 1 || x.buf_[0] >= y) { + usub1(z, x.buf_, x.size(), y); + z.isNeg_ = x.isNeg_; + } else { + z = y - x.buf_[0]; + z.isNeg_ = yNeg; + } + } + /** + @param q [out] x / y if q != 0 + @param r [out] x % y + */ + static void udiv(VintT* q, VintT& r, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + assert(q != &r); + if (xn < yn) { + r.buf_ = x; + r.trim(xn); + if (q) q->clear(); + return; + } + size_t qn = xn - yn + 1; + bool b; + if (q) { + q->buf_.alloc(&b, qn); + assert(b); (void)b; + } + r.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::divNM(q ? &q->buf_[0] : 0, qn, &r.buf_[0], &x[0], xn, &y[0], yn); + if (q) { + q->trim(qn); + } + r.trim(yn); + } + /* + @param x [inout] x <- d + @retval s for x = 2^s d where d is odd + */ + static uint32_t countTrailingZero(VintT& x) + { + uint32_t s = 0; + while (x.isEven()) { + x >>= 1; + s++; + } + return s; + } + struct MulMod { + const VintT *pm; + void operator()(VintT& z, const VintT& x, const VintT& y) const + { + VintT::mul(z, x, y); + z %= *pm; + } + }; + struct SqrMod { + const VintT *pm; + void operator()(VintT& y, const VintT& x) const + { + VintT::sqr(y, x); + y %= *pm; + } + }; +public: + VintT(int x = 0) + : size_(0) + { + *this = x; + } + VintT(Unit x) + : size_(0) + { + *this = x; + } + VintT(const VintT& rhs) + : buf_(rhs.buf_) + , size_(rhs.size_) + , isNeg_(rhs.isNeg_) + { + } + VintT& operator=(int x) + { + assert(x != invalidVar); + isNeg_ = x < 0; + bool b; + buf_.alloc(&b, 1); + assert(b); (void)b; + buf_[0] = fp::abs_(x); + size_ = 1; + return *this; + } + VintT& operator=(Unit x) + { + isNeg_ = false; + bool b; + buf_.alloc(&b, 1); + assert(b); (void)b; + buf_[0] = x; + size_ = 1; + return *this; + } + VintT& operator=(const VintT& rhs) + { + buf_ = rhs.buf_; + size_ = rhs.size_; + isNeg_ = rhs.isNeg_; + return *this; + } +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + VintT(VintT&& rhs) + : buf_(rhs.buf_) + , size_(rhs.size_) + , isNeg_(rhs.isNeg_) + { + } + VintT& operator=(VintT&& rhs) + { + buf_ = std::move(rhs.buf_); + size_ = rhs.size_; + isNeg_ = rhs.isNeg_; + return *this; + } +#endif + void swap(VintT& rhs) +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + noexcept +#endif + { + fp::swap_(buf_, rhs.buf_); + fp::swap_(size_, rhs.size_); + fp::swap_(isNeg_, rhs.isNeg_); + } + void dump(const char *msg = "") const + { + vint::dump(&buf_[0], size_, msg); + } + /* + set positive value + @note assume little endian system + */ + template + void setArray(bool *pb, const S *x, size_t size) + { + isNeg_ = false; + if (size == 0) { + clear(); + *pb = true; + return; + } + size_t unitSize = (sizeof(S) * size + sizeof(Unit) - 1) / sizeof(Unit); + buf_.alloc(pb, unitSize); + if (!*pb) return; + char *dst = (char *)&buf_[0]; + const char *src = (const char *)x; + size_t i = 0; + for (; i < sizeof(S) * size; i++) { + dst[i] = src[i]; + } + for (; i < sizeof(Unit) * unitSize; i++) { + dst[i] = 0; + } + trim(unitSize); + } + /* + set [0, max) randomly + */ + void setRand(bool *pb, const VintT& max, fp::RandGen rg = fp::RandGen()) + { + assert(max > 0); + if (rg.isZero()) rg = fp::RandGen::get(); + size_t n = max.size(); + buf_.alloc(pb, n); + if (!*pb) return; + rg.read(pb, &buf_[0], n * sizeof(buf_[0])); + if (!*pb) return; + trim(n); + *this %= max; + } + /* + get abs value + buf_[0, size) = x + buf_[size, maxSize) with zero + @note assume little endian system + */ + void getArray(bool *pb, Unit *x, size_t maxSize) const + { + size_t n = size(); + if (n > maxSize) { + *pb = false; + return; + } + vint::copyN(x, &buf_[0], n); + vint::clearN(x + n, maxSize - n); + *pb = true; + } + void clear() { *this = 0; } + template + void save(bool *pb, OutputStream& os, int base = 10) const + { + if (isNeg_) cybozu::writeChar(pb, os, '-'); + char buf[1024]; + size_t n = mcl::fp::arrayToStr(buf, sizeof(buf), &buf_[0], size_, base, false); + if (n == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - n, n); + } + /* + set buf with string terminated by '\0' + return strlen(buf) if success else 0 + */ + size_t getStr(char *buf, size_t bufSize, int base = 10) const + { + cybozu::MemoryOutputStream os(buf, bufSize); + bool b; + save(&b, os, base); + const size_t n = os.getPos(); + if (!b || n == bufSize) return 0; + buf[n] = '\0'; + return n; + } + /* + return bitSize(abs(*this)) + @note return 1 if zero + */ + size_t getBitSize() const + { + if (isZero()) return 1; + size_t n = size(); + Unit v = buf_[n - 1]; + assert(v); + return (n - 1) * sizeof(Unit) * 8 + 1 + cybozu::bsr(v); + } + // ignore sign + bool testBit(size_t i) const + { + size_t q = i / unitBitSize; + size_t r = i % unitBitSize; + assert(q <= size()); + Unit mask = Unit(1) << r; + return (buf_[q] & mask) != 0; + } + void setBit(size_t i, bool v = true) + { + size_t q = i / unitBitSize; + size_t r = i % unitBitSize; + assert(q <= size()); + bool b; + buf_.alloc(&b, q + 1); + assert(b); (void)b; + Unit mask = Unit(1) << r; + if (v) { + buf_[q] |= mask; + } else { + buf_[q] &= ~mask; + trim(q + 1); + } + } + /* + @param str [in] number string + @note "0x..." => base = 16 + "0b..." => base = 2 + otherwise => base = 10 + */ + void setStr(bool *pb, const char *str, int base = 0) + { + // allow twice size of MCL_MAX_BIT_SIZE because of multiplication + const size_t maxN = (MCL_MAX_BIT_SIZE * 2 + unitBitSize - 1) / unitBitSize; + buf_.alloc(pb, maxN); + if (!*pb) return; + *pb = false; + isNeg_ = false; + size_t len = strlen(str); + size_t n = fp::strToArray(&isNeg_, &buf_[0], maxN, str, len, base); + if (n == 0) return; + trim(n); + *pb = true; + } + static int compare(const VintT& x, const VintT& y) + { + if (x.isNeg_ ^ y.isNeg_) { + if (x.isZero() && y.isZero()) return 0; + return x.isNeg_ ? -1 : 1; + } else { + // same sign + int c = ucompare(x.buf_, x.size(), y.buf_, y.size()); + if (x.isNeg_) { + return -c; + } + return c; + } + } + static int compares1(const VintT& x, int y) + { + assert(y != invalidVar); + if (x.isNeg_ ^ (y < 0)) { + if (x.isZero() && y == 0) return 0; + return x.isNeg_ ? -1 : 1; + } else { + // same sign + Unit y0 = fp::abs_(y); + int c = vint::compareNM(&x.buf_[0], x.size(), &y0, 1); + if (x.isNeg_) { + return -c; + } + return c; + } + } + static int compareu1(const VintT& x, uint32_t y) + { + if (x.isNeg_) return -1; + if (x.size() > 1) return 1; + Unit x0 = x.buf_[0]; + return x0 > y ? 1 : x0 == y ? 0 : -1; + } + size_t size() const { return size_; } + bool isZero() const { return size() == 1 && buf_[0] == 0; } + bool isNegative() const { return !isZero() && isNeg_; } + uint32_t getLow32bit() const { return (uint32_t)buf_[0]; } + bool isOdd() const { return (buf_[0] & 1) == 1; } + bool isEven() const { return !isOdd(); } + const Unit *getUnit() const { return &buf_[0]; } + size_t getUnitSize() const { return size_; } + static void add(VintT& z, const VintT& x, const VintT& y) + { + _add(z, x, x.isNeg_, y, y.isNeg_); + } + static void sub(VintT& z, const VintT& x, const VintT& y) + { + _add(z, x, x.isNeg_, y, !y.isNeg_); + } + static void mul(VintT& z, const VintT& x, const VintT& y) + { + const size_t xn = x.size(); + const size_t yn = y.size(); + size_t zn = xn + yn; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + vint::mulNM(&z.buf_[0], &x.buf_[0], xn, &y.buf_[0], yn); + z.isNeg_ = x.isNeg_ ^ y.isNeg_; + z.trim(zn); + } + static void sqr(VintT& y, const VintT& x) + { + mul(y, x, x); + } + static void addu1(VintT& z, const VintT& x, Unit y) + { + _addu1(z, x, y, false); + } + static void subu1(VintT& z, const VintT& x, Unit y) + { + _addu1(z, x, y, true); + } + static void mulu1(VintT& z, const VintT& x, Unit y) + { + size_t xn = x.size(); + size_t zn = xn + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::mulu1(&z.buf_[0], &x.buf_[0], xn, y); + z.isNeg_ = x.isNeg_; + z.trim(zn); + } + static void divu1(VintT& q, const VintT& x, Unit y) + { + udivModu1(&q, x, y); + } + static void modu1(VintT& r, const VintT& x, Unit y) + { + bool xNeg = x.isNeg_; + r = divModu1(0, x, y); + r.isNeg_ = xNeg; + } + static void adds1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + _adds1(z, x, fp::abs_(y), y < 0); + } + static void subs1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + _adds1(z, x, fp::abs_(y), !(y < 0)); + } + static void muls1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + mulu1(z, x, fp::abs_(y)); + z.isNeg_ ^= (y < 0); + } + /* + @param q [out] q = x / y if q is not zero + @param x [in] + @param y [in] must be not zero + return x % y + */ + static int divMods1(VintT *q, const VintT& x, int y) + { + assert(y != invalidVar); + bool xNeg = x.isNeg_; + bool yNeg = y < 0; + Unit absY = fp::abs_(y); + size_t xn = x.size(); + int r; + if (q) { + q->isNeg_ = xNeg ^ yNeg; + bool b; + q->buf_.alloc(&b, xn); + assert(b); (void)b; + r = (int)vint::divu1(&q->buf_[0], &x.buf_[0], xn, absY); + q->trim(xn); + } else { + r = (int)vint::modu1(&x.buf_[0], xn, absY); + } + return xNeg ? -r : r; + } + /* + like C + 13 / 5 = 2 ... 3 + 13 / -5 = -2 ... 3 + -13 / 5 = -2 ... -3 + -13 / -5 = 2 ... -3 + */ + static void divMod(VintT *q, VintT& r, const VintT& x, const VintT& y) + { + bool qsign = x.isNeg_ ^ y.isNeg_; + udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); + r.isNeg_ = x.isNeg_; + if (q) q->isNeg_ = qsign; + } + static void div(VintT& q, const VintT& x, const VintT& y) + { + VintT r; + divMod(&q, r, x, y); + } + static void mod(VintT& r, const VintT& x, const VintT& y) + { + divMod(0, r, x, y); + } + static void divs1(VintT& q, const VintT& x, int y) + { + divMods1(&q, x, y); + } + static void mods1(VintT& r, const VintT& x, int y) + { + bool xNeg = x.isNeg_; + r = divMods1(0, x, y); + r.isNeg_ = xNeg; + } + static Unit udivModu1(VintT *q, const VintT& x, Unit y) + { + assert(!x.isNeg_); + size_t xn = x.size(); + if (q) { + bool b; + q->buf_.alloc(&b, xn); + assert(b); (void)b; + } + Unit r = vint::divu1(q ? &q->buf_[0] : 0, &x.buf_[0], xn, y); + if (q) { + q->trim(xn); + q->isNeg_ = false; + } + return r; + } + /* + like Python + 13 / 5 = 2 ... 3 + 13 / -5 = -3 ... -2 + -13 / 5 = -3 ... 2 + -13 / -5 = 2 ... -3 + */ + static void quotRem(VintT *q, VintT& r, const VintT& x, const VintT& y) + { + VintT yy = y; + bool qsign = x.isNeg_ ^ y.isNeg_; + udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); + r.isNeg_ = y.isNeg_; + if (q) q->isNeg_ = qsign; + if (!r.isZero() && qsign) { + if (q) { + uadd1(*q, q->buf_, q->size(), 1); + } + usub(r, yy.buf_, yy.size(), r.buf_, r.size()); + } + } + template + void load(bool *pb, InputStream& is, int ioMode) + { + *pb = false; + char buf[1024]; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + const size_t maxN = 384 / (sizeof(MCL_SIZEOF_UNIT) * 8); + buf_.alloc(pb, maxN); + if (!*pb) return; + isNeg_ = false; + n = fp::strToArray(&isNeg_, &buf_[0], maxN, buf, n, ioMode); + if (n == 0) return; + trim(n); + *pb = true; + } + // logical left shift (copy sign) + static void shl(VintT& y, const VintT& x, size_t shiftBit) + { + size_t xn = x.size(); + size_t yn = xn + (shiftBit + unitBitSize - 1) / unitBitSize; + bool b; + y.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::shlN(&y.buf_[0], &x.buf_[0], xn, shiftBit); + y.isNeg_ = x.isNeg_; + y.trim(yn); + } + // logical right shift (copy sign) + static void shr(VintT& y, const VintT& x, size_t shiftBit) + { + size_t xn = x.size(); + if (xn * unitBitSize <= shiftBit) { + y.clear(); + return; + } + size_t yn = xn - shiftBit / unitBitSize; + bool b; + y.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::shrN(&y.buf_[0], &x.buf_[0], xn, shiftBit); + y.isNeg_ = x.isNeg_; + y.trim(yn); + } + static void neg(VintT& y, const VintT& x) + { + if (&y != &x) { y = x; } + y.isNeg_ = !x.isNeg_; + } + static void abs(VintT& y, const VintT& x) + { + if (&y != &x) { y = x; } + y.isNeg_ = false; + } + static VintT abs(const VintT& x) + { + VintT y = x; + abs(y, x); + return y; + } + // accept only non-negative value + static void orBit(VintT& z, const VintT& x, const VintT& y) + { + assert(!x.isNeg_ && !y.isNeg_); + const VintT *px = &x, *py = &y; + if (x.size() < y.size()) { + fp::swap_(px, py); + } + size_t xn = px->size(); + size_t yn = py->size(); + assert(xn >= yn); + bool b; + z.buf_.alloc(&b, xn); + assert(b); (void)b; + for (size_t i = 0; i < yn; i++) { + z.buf_[i] = x.buf_[i] | y.buf_[i]; + } + vint::copyN(&z.buf_[0] + yn, &px->buf_[0] + yn, xn - yn); + z.trim(xn); + } + static void andBit(VintT& z, const VintT& x, const VintT& y) + { + assert(!x.isNeg_ && !y.isNeg_); + const VintT *px = &x, *py = &y; + if (x.size() < y.size()) { + fp::swap_(px, py); + } + size_t yn = py->size(); + assert(px->size() >= yn); + bool b; + z.buf_.alloc(&b, yn); + assert(b); (void)b; + for (size_t i = 0; i < yn; i++) { + z.buf_[i] = x.buf_[i] & y.buf_[i]; + } + z.trim(yn); + } + static void orBitu1(VintT& z, const VintT& x, Unit y) + { + assert(!x.isNeg_); + z = x; + z.buf_[0] |= y; + } + static void andBitu1(VintT& z, const VintT& x, Unit y) + { + assert(!x.isNeg_); + bool b; + z.buf_.alloc(&b, 1); + assert(b); (void)b; + z.buf_[0] = x.buf_[0] & y; + z.size_ = 1; + z.isNeg_ = false; + } + /* + REMARK y >= 0; + */ + static void pow(VintT& z, const VintT& x, const VintT& y) + { + assert(!y.isNeg_); + const VintT xx = x; + z = 1; + mcl::fp::powGeneric(z, xx, &y.buf_[0], y.size(), mul, sqr, (void (*)(VintT&, const VintT&))0); + } + /* + REMARK y >= 0; + */ + static void pow(VintT& z, const VintT& x, int64_t y) + { + assert(y >= 0); + const VintT xx = x; + z = 1; +#if MCL_SIZEOF_UNIT == 8 + Unit ua = fp::abs_(y); + mcl::fp::powGeneric(z, xx, &ua, 1, mul, sqr, (void (*)(VintT&, const VintT&))0); +#else + uint64_t ua = fp::abs_(y); + Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; + size_t un = u[1] ? 2 : 1; + mcl::fp::powGeneric(z, xx, u, un, mul, sqr, (void (*)(VintT&, const VintT&))0); +#endif + } + /* + z = x ^ y mod m + REMARK y >= 0; + */ + static void powMod(VintT& z, const VintT& x, const VintT& y, const VintT& m) + { + assert(!y.isNeg_); + VintT zz; + MulMod mulMod; + SqrMod sqrMod; + mulMod.pm = &m; + sqrMod.pm = &m; + zz = 1; + mcl::fp::powGeneric(zz, x, &y.buf_[0], y.size(), mulMod, sqrMod, (void (*)(VintT&, const VintT&))0); + z.swap(zz); + } + /* + inverse mod + y = 1/x mod m + REMARK x != 0 and m != 0; + */ + static void invMod(VintT& y, const VintT& x, const VintT& m) + { + assert(!x.isZero() && !m.isZero()); + if (x == 1) { + y = 1; + return; + } + VintT a = 1; + VintT t; + VintT q; + divMod(&q, t, m, x); + VintT s = x; + VintT b = -q; + + for (;;) { + divMod(&q, s, s, t); + if (s.isZero()) { + if (b.isNeg_) { + b += m; + } + y = b; + return; + } + a -= b * q; + + divMod(&q, t, t, s); + if (t.isZero()) { + if (a.isNeg_) { + a += m; + } + y = a; + return; + } + b -= a * q; + } + } + /* + Miller-Rabin + */ + static bool isPrime(bool *pb, const VintT& n, int tryNum = 32) + { + *pb = true; + if (n <= 1) return false; + if (n == 2 || n == 3) return true; + if (n.isEven()) return false; + cybozu::XorShift rg; + const VintT nm1 = n - 1; + VintT d = nm1; + uint32_t r = countTrailingZero(d); + // n - 1 = 2^r d + VintT a, x; + for (int i = 0; i < tryNum; i++) { + a.setRand(pb, n - 3, rg); + if (!*pb) return false; + a += 2; // a in [2, n - 2] + powMod(x, a, d, n); + if (x == 1 || x == nm1) { + continue; + } + for (uint32_t j = 1; j < r; j++) { + sqr(x, x); + x %= n; + if (x == 1) return false; + if (x == nm1) goto NEXT_LOOP; + } + return false; + NEXT_LOOP:; + } + return true; + } + bool isPrime(bool *pb, int tryNum = 32) const + { + return isPrime(pb, *this, tryNum); + } + static void gcd(VintT& z, VintT x, VintT y) + { + VintT t; + for (;;) { + if (y.isZero()) { + z = x; + return; + } + t = x; + x = y; + mod(y, t, y); + } + } + static VintT gcd(const VintT& x, const VintT& y) + { + VintT z; + gcd(z, x, y); + return z; + } + static void lcm(VintT& z, const VintT& x, const VintT& y) + { + VintT c; + gcd(c, x, y); + div(c, x, c); + mul(z, c, y); + } + static VintT lcm(const VintT& x, const VintT& y) + { + VintT z; + lcm(z, x, y); + return z; + } + /* + 1 if m is quadratic residue modulo n (i.e., there exists an x s.t. x^2 = m mod n) + 0 if m = 0 mod n + -1 otherwise + @note return legendre_symbol(m, p) for m and odd prime p + */ + static int jacobi(VintT m, VintT n) + { + assert(n.isOdd()); + if (n == 1) return 1; + if (m < 0 || m > n) { + quotRem(0, m, m, n); // m = m mod n + } + if (m.isZero()) return 0; + if (m == 1) return 1; + if (gcd(m, n) != 1) return 0; + + int j = 1; + VintT t; + goto START; + while (m != 1) { + if ((m.getLow32bit() % 4) == 3 && (n.getLow32bit() % 4) == 3) { + j = -j; + } + mod(t, n, m); + n = m; + m = t; + START: + int s = countTrailingZero(m); + uint32_t nmod8 = n.getLow32bit() % 8; + if ((s % 2) && (nmod8 == 3 || nmod8 == 5)) { + j = -j; + } + } + return j; + } +#ifndef CYBOZU_DONT_USE_STRING + explicit VintT(const std::string& str) + : size_(0) + { + setStr(str); + } + void getStr(std::string& s, int base = 10) const + { + s.clear(); + cybozu::StringOutputStream os(s); + save(os, base); + } + std::string getStr(int base = 10) const + { + std::string s; + getStr(s, base); + return s; + } + inline friend std::ostream& operator<<(std::ostream& os, const VintT& x) + { + return os << x.getStr(os.flags() & std::ios_base::hex ? 16 : 10); + } + inline friend std::istream& operator>>(std::istream& is, VintT& x) + { + x.load(is); + return is; + } +#endif +#ifndef CYBOZU_DONT_USE_EXCEPTION + void setStr(const std::string& str, int base = 0) + { + bool b; + setStr(&b, str.c_str(), base); + if (!b) throw cybozu::Exception("Vint:setStr") << str; + } + void setRand(const VintT& max, fp::RandGen rg = fp::RandGen()) + { + bool b; + setRand(&b, max, rg); + if (!b) throw cybozu::Exception("Vint:setRand"); + } + void getArray(Unit *x, size_t maxSize) const + { + bool b; + getArray(&b, x, maxSize); + if (!b) throw cybozu::Exception("Vint:getArray"); + } + template + void load(InputStream& is, int ioMode = 0) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Vint:load"); + } + template + void save(OutputStream& os, int base = 10) const + { + bool b; + save(&b, os, base); + if (!b) throw cybozu::Exception("Vint:save"); + } + static bool isPrime(const VintT& n, int tryNum = 32) + { + bool b; + bool ret = isPrime(&b, n, tryNum); + if (!b) throw cybozu::Exception("Vint:isPrime"); + return ret; + } + bool isPrime(int tryNum = 32) const + { + bool b; + bool ret = isPrime(&b, *this, tryNum); + if (!b) throw cybozu::Exception("Vint:isPrime"); + return ret; + } + template + void setArray(const S *x, size_t size) + { + bool b; + setArray(&b, x, size); + if (!b) throw cybozu::Exception("Vint:setArray"); + } +#endif + VintT& operator++() { adds1(*this, *this, 1); return *this; } + VintT& operator--() { subs1(*this, *this, 1); return *this; } + VintT operator++(int) { VintT c = *this; adds1(*this, *this, 1); return c; } + VintT operator--(int) { VintT c = *this; subs1(*this, *this, 1); return c; } + friend bool operator<(const VintT& x, const VintT& y) { return compare(x, y) < 0; } + friend bool operator>=(const VintT& x, const VintT& y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, const VintT& y) { return compare(x, y) > 0; } + friend bool operator<=(const VintT& x, const VintT& y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, const VintT& y) { return compare(x, y) == 0; } + friend bool operator!=(const VintT& x, const VintT& y) { return !operator==(x, y); } + + friend bool operator<(const VintT& x, int y) { return compares1(x, y) < 0; } + friend bool operator>=(const VintT& x, int y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, int y) { return compares1(x, y) > 0; } + friend bool operator<=(const VintT& x, int y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, int y) { return compares1(x, y) == 0; } + friend bool operator!=(const VintT& x, int y) { return !operator==(x, y); } + + friend bool operator<(const VintT& x, uint32_t y) { return compareu1(x, y) < 0; } + friend bool operator>=(const VintT& x, uint32_t y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, uint32_t y) { return compareu1(x, y) > 0; } + friend bool operator<=(const VintT& x, uint32_t y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, uint32_t y) { return compareu1(x, y) == 0; } + friend bool operator!=(const VintT& x, uint32_t y) { return !operator==(x, y); } + + VintT& operator+=(const VintT& rhs) { add(*this, *this, rhs); return *this; } + VintT& operator-=(const VintT& rhs) { sub(*this, *this, rhs); return *this; } + VintT& operator*=(const VintT& rhs) { mul(*this, *this, rhs); return *this; } + VintT& operator/=(const VintT& rhs) { div(*this, *this, rhs); return *this; } + VintT& operator%=(const VintT& rhs) { mod(*this, *this, rhs); return *this; } + VintT& operator&=(const VintT& rhs) { andBit(*this, *this, rhs); return *this; } + VintT& operator|=(const VintT& rhs) { orBit(*this, *this, rhs); return *this; } + + VintT& operator+=(int rhs) { adds1(*this, *this, rhs); return *this; } + VintT& operator-=(int rhs) { subs1(*this, *this, rhs); return *this; } + VintT& operator*=(int rhs) { muls1(*this, *this, rhs); return *this; } + VintT& operator/=(int rhs) { divs1(*this, *this, rhs); return *this; } + VintT& operator%=(int rhs) { mods1(*this, *this, rhs); return *this; } + VintT& operator+=(Unit rhs) { addu1(*this, *this, rhs); return *this; } + VintT& operator-=(Unit rhs) { subu1(*this, *this, rhs); return *this; } + VintT& operator*=(Unit rhs) { mulu1(*this, *this, rhs); return *this; } + VintT& operator/=(Unit rhs) { divu1(*this, *this, rhs); return *this; } + VintT& operator%=(Unit rhs) { modu1(*this, *this, rhs); return *this; } + + VintT& operator&=(Unit rhs) { andBitu1(*this, *this, rhs); return *this; } + VintT& operator|=(Unit rhs) { orBitu1(*this, *this, rhs); return *this; } + + friend VintT operator+(const VintT& a, const VintT& b) { VintT c; add(c, a, b); return c; } + friend VintT operator-(const VintT& a, const VintT& b) { VintT c; sub(c, a, b); return c; } + friend VintT operator*(const VintT& a, const VintT& b) { VintT c; mul(c, a, b); return c; } + friend VintT operator/(const VintT& a, const VintT& b) { VintT c; div(c, a, b); return c; } + friend VintT operator%(const VintT& a, const VintT& b) { VintT c; mod(c, a, b); return c; } + friend VintT operator&(const VintT& a, const VintT& b) { VintT c; andBit(c, a, b); return c; } + friend VintT operator|(const VintT& a, const VintT& b) { VintT c; orBit(c, a, b); return c; } + + friend VintT operator+(const VintT& a, int b) { VintT c; adds1(c, a, b); return c; } + friend VintT operator-(const VintT& a, int b) { VintT c; subs1(c, a, b); return c; } + friend VintT operator*(const VintT& a, int b) { VintT c; muls1(c, a, b); return c; } + friend VintT operator/(const VintT& a, int b) { VintT c; divs1(c, a, b); return c; } + friend VintT operator%(const VintT& a, int b) { VintT c; mods1(c, a, b); return c; } + friend VintT operator+(const VintT& a, Unit b) { VintT c; addu1(c, a, b); return c; } + friend VintT operator-(const VintT& a, Unit b) { VintT c; subu1(c, a, b); return c; } + friend VintT operator*(const VintT& a, Unit b) { VintT c; mulu1(c, a, b); return c; } + friend VintT operator/(const VintT& a, Unit b) { VintT c; divu1(c, a, b); return c; } + friend VintT operator%(const VintT& a, Unit b) { VintT c; modu1(c, a, b); return c; } + + friend VintT operator&(const VintT& a, Unit b) { VintT c; andBitu1(c, a, b); return c; } + friend VintT operator|(const VintT& a, Unit b) { VintT c; orBitu1(c, a, b); return c; } + + VintT operator-() const { VintT c; neg(c, *this); return c; } + VintT& operator<<=(size_t n) { shl(*this, *this, n); return *this; } + VintT& operator>>=(size_t n) { shr(*this, *this, n); return *this; } + VintT operator<<(size_t n) const { VintT c = *this; c <<= n; return c; } + VintT operator>>(size_t n) const { VintT c = *this; c >>= n; return c; } +}; + +#ifdef MCL_VINT_FIXED_BUFFER +typedef VintT > Vint; +#else +typedef VintT > Vint; +#endif + +} // mcl + +//typedef mcl::Vint mpz_class; diff --git a/vendor/github.com/byzantine-lab/mcl/include/mcl/window_method.hpp b/vendor/github.com/byzantine-lab/mcl/include/mcl/window_method.hpp new file mode 100644 index 000000000..cb4fad37e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/include/mcl/window_method.hpp @@ -0,0 +1,175 @@ +#pragma once +/** + @file + @brief window method + @author MITSUNARI Shigeo(@herumi) +*/ +#include +#include + +namespace mcl { namespace fp { + +/* + get w-bit size from x[0, bitSize) + @param x [in] data + @param bitSize [in] data size + @param w [in] split size < UnitBitSize +*/ +template +struct ArrayIterator { + static const size_t TbitSize = sizeof(T) * 8; + ArrayIterator(const T *x, size_t bitSize, size_t w) + : x(x) + , bitSize(bitSize) + , w(w) + , pos(0) + , mask((w == TbitSize ? 0 : (T(1) << w)) - 1) + { + assert(w <= TbitSize); + } + bool hasNext() const { return bitSize > 0; } + T getNext() + { + if (w == TbitSize) { + bitSize -= w; + return *x++; + } + if (pos + w < TbitSize) { + T v = (*x >> pos) & mask; + pos += w; + if (bitSize < w) { + bitSize = 0; + } else { + bitSize -= w; + } + return v; + } + if (pos + bitSize <= TbitSize) { + assert(bitSize <= w); + T v = *x >> pos; + assert((v >> bitSize) == 0); + bitSize = 0; + return v & mask; + } + assert(pos > 0); + T v = (x[0] >> pos) | (x[1] << (TbitSize - pos)); + v &= mask; + pos = (pos + w) - TbitSize; + bitSize -= w; + x++; + return v; + } + const T *x; + size_t bitSize; + size_t w; + size_t pos; + T mask; +}; + +template +class WindowMethod { +public: + size_t bitSize_; + size_t winSize_; + mcl::Array tbl_; + WindowMethod(const Ec& x, size_t bitSize, size_t winSize) + { + init(x, bitSize, winSize); + } + WindowMethod() + : bitSize_(0) + , winSize_(0) + { + } + /* + @param x [in] base index + @param bitSize [in] exponent bit length + @param winSize [in] window size + */ + void init(bool *pb, const Ec& x, size_t bitSize, size_t winSize) + { + bitSize_ = bitSize; + winSize_ = winSize; + const size_t tblNum = (bitSize + winSize - 1) / winSize; + const size_t r = size_t(1) << winSize; + *pb = tbl_.resize(tblNum * r); + if (!*pb) return; + Ec t(x); + for (size_t i = 0; i < tblNum; i++) { + Ec* w = &tbl_[i * r]; + w[0].clear(); + for (size_t d = 1; d < r; d *= 2) { + for (size_t j = 0; j < d; j++) { + Ec::add(w[j + d], w[j], t); + } + Ec::dbl(t, t); + } + for (size_t j = 0; j < r; j++) { + w[j].normalize(); + } + } + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const Ec& x, size_t bitSize, size_t winSize) + { + bool b; + init(&b, x, bitSize, winSize); + if (!b) throw cybozu::Exception("mcl:WindowMethod:init") << bitSize << winSize; + } +#endif + /* + @param z [out] x multiplied by y + @param y [in] exponent + */ + template + void mul(Ec& z, const FpT& y) const + { + fp::Block b; + y.getBlock(b); + powArray(z, b.p, b.n, false); + } + void mul(Ec& z, int64_t y) const + { +#if MCL_SIZEOF_UNIT == 8 + Unit u = fp::abs_(y); + powArray(z, &u, 1, y < 0); +#else + uint64_t ua = fp::abs_(y); + Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; + size_t un = u[1] ? 2 : 1; + powArray(z, u, un, y < 0); +#endif + } + void mul(Ec& z, const mpz_class& y) const + { + powArray(z, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); + } + void powArray(Ec& z, const Unit* y, size_t n, bool isNegative) const + { + z.clear(); + while (n > 0) { + if (y[n - 1]) break; + n--; + } + if (n == 0) return; + assert((n << winSize_) <= tbl_.size()); + if ((n << winSize_) > tbl_.size()) return; + assert(y[n - 1]); + const size_t bitSize = (n - 1) * UnitBitSize + cybozu::bsr(y[n - 1]) + 1; + size_t i = 0; + ArrayIterator ai(y, bitSize, winSize_); + do { + Unit v = ai.getNext(); + if (v) { + Ec::add(z, z, tbl_[(i << winSize_) + v]); + } + i++; + } while (ai.hasNext()); + if (isNegative) { + Ec::neg(z, z); + } + } +}; + +} } // mcl::fp + diff --git a/vendor/github.com/byzantine-lab/mcl/lib/.emptydir b/vendor/github.com/byzantine-lab/mcl/lib/.emptydir new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/mcl/mcl.sln b/vendor/github.com/byzantine-lab/mcl/mcl.sln new file mode 100644 index 000000000..7c4fe8f0c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/mcl.sln @@ -0,0 +1,57 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 2013 +VisualStudioVersion = 12.0.40629.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_test", "test\proj\fp_test\fp_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ec_test", "test\proj\ec_test\ec_test.vcxproj", "{46B6E88E-739A-406B-9F68-BC46C5950FA3}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mcl", "src\proj\mcl.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_tower_test", "test\proj\fp_tower_test\fp_tower_test.vcxproj", "{733B6250-D249-4A99-B2A6-C8FAF6A90E97}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bn_test", "test\proj\bn_test\bn_test.vcxproj", "{9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.ActiveCfg = Debug|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.Build.0 = Debug|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.ActiveCfg = Release|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.Build.0 = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.ActiveCfg = Debug|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.Build.0 = Debug|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.ActiveCfg = Release|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.Build.0 = Release|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.ActiveCfg = Debug|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.Build.0 = Debug|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.ActiveCfg = Release|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/byzantine-lab/mcl/misc/bench.txt b/vendor/github.com/byzantine-lab/mcl/misc/bench.txt new file mode 100644 index 000000000..3e18e6b44 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/bench.txt @@ -0,0 +1,21 @@ +Core i7-7700 @ 3.6GHz + BN254 BLS12_381 +G1::mul 185.863Kclk 360.723Kclk +G1::add 812.01 clk 1.540Kclk +G1::dbl 837.24 clk 1.977Kclk +G2::mul 340.125Kclk 642.457Kclk +G2::add 2.233Kclk 4.368Kclk +G2::dbl 2.134Kclk 4.088Kclk +GT::pow 615.052Kclk 1.055Mclk +G1::setStr chk 1.546Kclk 534.376Kclk +G1::setStr 1.592Kclk 4.000Kclk +G2::setStr chk 609.195Kclk 1.402Mclk +G2::setStr 5.444Kclk 8.282Kclk +hashAndMapToG1 26.997Kclk 336.207Kclk +hashAndMapToG2 212.800Kclk 775.072Kclk +pairing 909.076Kclk 2.367Mclk +millerLoop 549.957Kclk 983.935Kclk +finalExp 375.203Kclk 1.404Mclk +precomputeG2 126.000Kclk 236.912Kclk +precomputedML 427.272Kclk 729.234Kclk + diff --git a/vendor/github.com/byzantine-lab/mcl/misc/karatsuba.cpp b/vendor/github.com/byzantine-lab/mcl/misc/karatsuba.cpp new file mode 100644 index 000000000..7c150c6e3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/karatsuba.cpp @@ -0,0 +1,75 @@ +/* + sudo cpufreq-set -c 0 -g performance + mycl karatsuba.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out +*/ +#include +#include +#include +#include "../src/proto.hpp" +#include "../src/low_func.hpp" +#ifdef MCL_USE_LLVM +#include "../src/low_func_llvm.hpp" +#endif +#include +#include + +typedef mcl::FpT<> Fp; + +using namespace mcl::fp; + +void dump(const Unit *x, size_t N) +{ + for (size_t i = 0; i < N; i++) { + printf("%016llx ", (long long)x[N - 1 - i]); + } + printf("\n"); +} + +void gggKara(uint64_t *z, const uint64_t *x, const uint64_t *) +{ + SqrPre<8, Gtag>::f(z, x); +} +void gggLLVM(uint64_t *z, const uint64_t *x, const uint64_t *y) +{ + MulPre<8, Ltag>::f(z, x, y); +} + +template +void benchKaratsuba() +{ + cybozu::XorShift rg; + printf("N=%d\n", (int)N); + Unit z[N * 2]; + rg.read(z, N); + CYBOZU_BENCH("g:mulPre ", (MulPreCore::f), z, z, z); +// CYBOZU_BENCH("g:mulKara", (MulPre::karatsuba), z, z, z); + CYBOZU_BENCH("g:sqrPre ", (SqrPreCore::f), z, z); +// CYBOZU_BENCH("g:sqrKara", (SqrPre::karatsuba), z, z); + +#ifdef MCL_USE_LLVM + CYBOZU_BENCH("l:mulPre ", (MulPreCore::f), z, z, z); + CYBOZU_BENCH("l:sqrPre ", (SqrPreCore::f), z, z); + CYBOZU_BENCH("l:mulKara", (MulPre::karatsuba), z, z, z); + CYBOZU_BENCH("l:sqrKara", (SqrPre::karatsuba), z, z); +#endif +} + +CYBOZU_TEST_AUTO(karatsuba) +{ + benchKaratsuba<4>(); + benchKaratsuba<6>(); + benchKaratsuba<8>(); +#if MCL_MAX_BIT_SIZE >= 640 + benchKaratsuba<10>(); +#endif +#if MCL_MAX_BIT_SIZE >= 768 + benchKaratsuba<12>(); +#endif +#if MCL_MAX_BIT_SIZE >= 896 + benchKaratsuba<14>(); +#endif +#if MCL_MAX_BIT_SIZE >= 1024 + benchKaratsuba<16>(); +#endif +} + diff --git a/vendor/github.com/byzantine-lab/mcl/misc/mul.cpp b/vendor/github.com/byzantine-lab/mcl/misc/mul.cpp new file mode 100644 index 000000000..146ac33a9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/mul.cpp @@ -0,0 +1,58 @@ +/* + sudo cpufreq-set -c 0 -g performance + mycl mul.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out +*/ +#include +#include +#include +#include +#include + +typedef mcl::FpT<> Fp; + +using namespace mcl::fp; + +void dump(const Unit *x, size_t N) +{ + for (size_t i = 0; i < N; i++) { + printf("%016llx ", (long long)x[N - 1 - i]); + } + printf("\n"); +} + +CYBOZU_TEST_AUTO(mulPre) +{ + cybozu::XorShift rg; + const char *pTbl[] = { + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "6701817056313037086248947066310538444882082605308124576230408038843357549886356779857393369967010764802541005796711440355753503701056323603", // 462 bit + "4562440617622195218641171605700291324893228507248559930579192517899275167208677386505912811317371399778642309573594407310688704721375437998252661319722214188251994674360264950082874192246603471", // 640 bit + "1552518092300708935148979488462502555256886017116696611139052038026050952686376886330878408828646477950487730697131073206171580044114814391444287275041181139204454976020849905550265285631598444825262999193716468750892846853816057031", // 768 bit + }; + const size_t N = 16; + const Mode modeTbl[] = { + FP_GMP_MONT, +#ifdef MCL_USE_LLVM + FP_LLVM_MONT, +#endif + }; + for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(modeTbl); j++) { + Mode mode = modeTbl[j]; + printf("%s\n", ModeToStr(mode)); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { + const char *p = pTbl[i]; + Fp::init(p, mode); + printf("bitSize=%d\n", (int)Fp::getBitSize()); + const Op& op = Fp::getOp(); + Unit x[N], y[N * 2]; + rg.read(x, N); + rg.read(y, N * 2); + CYBOZU_BENCH("mul ", op.fp_mul, y, y, x, op.p); + CYBOZU_BENCH("sqr ", op.fp_sqr, y, y, op.p); + CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, y, y, y); + CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, y, y); + CYBOZU_BENCH("mod ", op.fpDbl_mod, y, y, op.p); + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/misc/precompute.cpp b/vendor/github.com/byzantine-lab/mcl/misc/precompute.cpp new file mode 100644 index 000000000..63cdd663b --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/precompute.cpp @@ -0,0 +1,30 @@ +#include +#include + +using namespace mcl::bn; + +int main() +{ + initPairing(mcl::BN254); + G2 Q; + mapToG2(Q, 1); + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + puts("#if MCL_SIZEOF_UNIT == 8"); + puts("static const uint64_t QcoeffTblBN254[][6][4] = {"); + for (size_t i = 0; i < Qcoeff.size(); i++) { + const Fp6& x6 = Qcoeff[i]; + puts("\t{"); + for (size_t j = 0; j < 6; j++) { + printf("\t\t{"); + const Fp& x = x6.getFp0()[j]; + for (size_t k = 0; k < 4; k++) { + printf("0x%016llxull,", (unsigned long long)x.getUnit()[k]); + } + puts("},"); + } + puts("\t},"); + } + puts("};"); + puts("#endif"); +} diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/bench.sh b/vendor/github.com/byzantine-lab/mcl/misc/she/bench.sh new file mode 100644 index 000000000..ced87b4db --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/bench.sh @@ -0,0 +1,6 @@ +for i in 4 6 8 +do echo $i +touch test/she_test.cpp +make bin/she_test.exe CFLAGS_USER=-DMCLBN_FP_UNIT_SIZE=$i +bin/she_test.exe > misc/she/bench$i.txt +done diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/bench4.txt b/vendor/github.com/byzantine-lab/mcl/misc/she/bench4.txt new file mode 100644 index 000000000..99b2593c4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/bench4.txt @@ -0,0 +1,99 @@ +ctest:module=log +CurveFp254BNb +ctest:module=HashTable +ctest:module=GTHashTable +ctest:module=enc_dec +ctest:module=add_sub_mul +ctest:module=add_mul_add_sub +ctest:module=innerProduct +ctest:module=io +ctest:module=bench +enc 673.772Kclk +add 8.021Kclk +mul 4.042Mclk +dec 2.194Mclk +add after mul 20.693Kclk +ctest:module=saveHash +ctest:module=hashBench +Kclk +m=000fffff decG1 1.83e+02 +m=001fffff decG1 1.83e+02 +m=003fffff decG1 1.83e+02 +m=007fffff decG1 1.90e+02 +m=00ffffff decG1 2.04e+02 +m=01ffffff decG1 2.66e+02 +m=03ffffff decG1 4.17e+02 +m=07ffffff decG1 7.15e+02 +m=0fffffff decG1 1.29e+03 +m=1fffffff decG1 2.43e+03 +m=3fffffff decG1 4.70e+03 +m=7fffffff decG1 9.28e+03 + +m=000fffff decG2 4.09e+02 +m=001fffff decG2 4.11e+02 +m=003fffff decG2 4.09e+02 +m=007fffff decG2 4.23e+02 +m=00ffffff decG2 4.48e+02 +m=01ffffff decG2 5.21e+02 +m=03ffffff decG2 7.25e+02 +m=07ffffff decG2 1.11e+03 +m=0fffffff decG2 1.87e+03 +m=1fffffff decG2 3.36e+03 +m=3fffffff decG2 6.38e+03 +m=7fffffff decG2 1.24e+04 + +m=000fffff decGT 2.20e+03 +m=001fffff decGT 2.21e+03 +m=003fffff decGT 2.20e+03 +m=007fffff decGT 2.21e+03 +m=00ffffff decGT 2.23e+03 +m=01ffffff decGT 2.28e+03 +m=03ffffff decGT 2.37e+03 +m=07ffffff decGT 2.56e+03 +m=0fffffff decGT 2.94e+03 +m=1fffffff decGT 3.78e+03 +m=3fffffff decGT 5.41e+03 +m=7fffffff decGT 8.69e+03 +large m +G1::add 7.36e-01 +G1::mul 1.92e+02 +G2::add 3.51e+00 +G2::mul 4.03e+02 +GT::mul 5.47e+00 +GT::pow 7.27e+02 +G1window 1.92e+01 +G2window 6.15e+01 +GTwindow 1.35e+02 +miller 6.69e+02 +finalExp 4.23e+02 +precomML 5.16e+02 +small m = 2097151 +G1::mul 4.52e+01 +G2::mul 1.01e+02 +GT::pow 1.33e+02 +G1window 1.55e+00 +G2window 5.02e+00 +GTwindow 1.55e+01 +encG1 2.10e+02 +encG2 4.82e+02 +encGT 2.47e+03 +encG1pre 5.31e+01 +encG2pre 1.47e+02 +encGTpre 6.01e+02 +decG1 1.84e+02 +decG2 3.96e+02 +degGT 2.20e+03 +mul 4.07e+03 +addG1 1.56e+00 +addG2 4.72e+00 +addGT 2.12e+01 +reRandG1 2.10e+02 +reRandG2 4.71e+02 +reRandGT 2.49e+03 +reRandG1pre 5.16e+01 +reRandG2pre 1.44e+02 +reRandGTpre 6.10e+02 +mulG1 9.03e+01 +mulG2 2.03e+02 +mulGT 5.34e+02 +ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/bench6.txt b/vendor/github.com/byzantine-lab/mcl/misc/she/bench6.txt new file mode 100644 index 000000000..863f7129a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/bench6.txt @@ -0,0 +1,99 @@ +ctest:module=log +CurveFp382_1 +ctest:module=HashTable +ctest:module=GTHashTable +ctest:module=enc_dec +ctest:module=add_sub_mul +ctest:module=add_mul_add_sub +ctest:module=innerProduct +ctest:module=io +ctest:module=bench +enc 2.077Mclk +add 17.694Kclk +mul 13.408Mclk +dec 5.854Mclk +add after mul 41.570Kclk +ctest:module=saveHash +ctest:module=hashBench +Kclk +m=000fffff decG1 5.34e+02 +m=001fffff decG1 5.36e+02 +m=003fffff decG1 5.34e+02 +m=007fffff decG1 5.48e+02 +m=00ffffff decG1 5.87e+02 +m=01ffffff decG1 7.11e+02 +m=03ffffff decG1 9.53e+02 +m=07ffffff decG1 1.41e+03 +m=0fffffff decG1 2.30e+03 +m=1fffffff decG1 4.11e+03 +m=3fffffff decG1 7.71e+03 +m=7fffffff decG1 1.50e+04 + +m=000fffff decG2 1.27e+03 +m=001fffff decG2 1.27e+03 +m=003fffff decG2 1.27e+03 +m=007fffff decG2 1.30e+03 +m=00ffffff decG2 1.35e+03 +m=01ffffff decG2 1.53e+03 +m=03ffffff decG2 1.88e+03 +m=07ffffff decG2 2.55e+03 +m=0fffffff decG2 3.87e+03 +m=1fffffff decG2 6.53e+03 +m=3fffffff decG2 1.18e+04 +m=7fffffff decG2 2.25e+04 + +m=000fffff decGT 6.01e+03 +m=001fffff decGT 6.03e+03 +m=003fffff decGT 6.01e+03 +m=007fffff decGT 6.04e+03 +m=00ffffff decGT 6.08e+03 +m=01ffffff decGT 6.17e+03 +m=03ffffff decGT 6.39e+03 +m=07ffffff decGT 6.71e+03 +m=0fffffff decGT 7.44e+03 +m=1fffffff decGT 8.95e+03 +m=3fffffff decGT 1.20e+04 +m=7fffffff decGT 1.80e+04 +large m +G1::add 1.48e+00 +G1::mul 5.44e+02 +G2::add 6.91e+00 +G2::mul 1.28e+03 +GT::mul 1.04e+01 +GT::pow 2.04e+03 +G1window 5.57e+01 +G2window 2.04e+02 +GTwindow 4.03e+02 +miller 2.09e+03 +finalExp 1.50e+03 +precomML 1.63e+03 +small m = 2097151 +G1::mul 8.29e+01 +G2::mul 2.05e+02 +GT::pow 2.66e+02 +G1window 3.18e+00 +G2window 1.14e+01 +GTwindow 3.19e+01 +encG1 6.01e+02 +encG2 1.49e+03 +encGT 7.66e+03 +encG1pre 1.41e+02 +encG2pre 4.71e+02 +encGTpre 1.76e+03 +decG1 5.37e+02 +decG2 1.27e+03 +degGT 6.02e+03 +mul 1.34e+04 +addG1 3.07e+00 +addG2 1.02e+01 +addGT 4.18e+01 +reRandG1 5.99e+02 +reRandG2 1.49e+03 +reRandGT 7.69e+03 +reRandG1pre 1.40e+02 +reRandG2pre 4.68e+02 +reRandGTpre 1.75e+03 +mulG1 1.65e+02 +mulG2 4.14e+02 +mulGT 1.06e+03 +ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/bench8.txt b/vendor/github.com/byzantine-lab/mcl/misc/she/bench8.txt new file mode 100644 index 000000000..f8fe8fd75 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/bench8.txt @@ -0,0 +1,99 @@ +ctest:module=log +CurveFp462 +ctest:module=HashTable +ctest:module=GTHashTable +ctest:module=enc_dec +ctest:module=add_sub_mul +ctest:module=add_mul_add_sub +ctest:module=innerProduct +ctest:module=io +ctest:module=bench +enc 5.095Mclk +add 36.280Kclk +mul 30.163Mclk +dec 12.974Mclk +add after mul 76.646Kclk +ctest:module=saveHash +ctest:module=hashBench +Kclk +m=000fffff decG1 1.44e+03 +m=001fffff decG1 1.45e+03 +m=003fffff decG1 1.45e+03 +m=007fffff decG1 1.47e+03 +m=00ffffff decG1 1.54e+03 +m=01ffffff decG1 1.70e+03 +m=03ffffff decG1 2.03e+03 +m=07ffffff decG1 2.64e+03 +m=0fffffff decG1 3.88e+03 +m=1fffffff decG1 6.32e+03 +m=3fffffff decG1 1.12e+04 +m=7fffffff decG1 2.11e+04 + +m=000fffff decG2 2.99e+03 +m=001fffff decG2 3.01e+03 +m=003fffff decG2 2.99e+03 +m=007fffff decG2 3.05e+03 +m=00ffffff decG2 3.15e+03 +m=01ffffff decG2 3.41e+03 +m=03ffffff decG2 3.93e+03 +m=07ffffff decG2 4.95e+03 +m=0fffffff decG2 6.97e+03 +m=1fffffff decG2 1.10e+04 +m=3fffffff decG2 1.91e+04 +m=7fffffff decG2 3.54e+04 + +m=000fffff decGT 1.31e+04 +m=001fffff decGT 1.31e+04 +m=003fffff decGT 1.31e+04 +m=007fffff decGT 1.31e+04 +m=00ffffff decGT 1.32e+04 +m=01ffffff decGT 1.33e+04 +m=03ffffff decGT 1.36e+04 +m=07ffffff decGT 1.43e+04 +m=0fffffff decGT 1.56e+04 +m=1fffffff decGT 1.82e+04 +m=3fffffff decGT 2.34e+04 +m=7fffffff decGT 3.39e+04 +large m +G1::add 3.40e+00 +G1::mul 1.41e+03 +G2::add 1.38e+01 +G2::mul 2.93e+03 +GT::mul 1.94e+01 +GT::pow 4.30e+03 +G1window 1.59e+02 +G2window 4.89e+02 +GTwindow 8.96e+02 +miller 4.99e+03 +finalExp 3.26e+03 +precomML 3.71e+03 +small m = 2097151 +G1::mul 1.53e+02 +G2::mul 3.85e+02 +GT::pow 4.88e+02 +G1window 6.96e+00 +G2window 2.17e+01 +GTwindow 5.83e+01 +encG1 1.62e+03 +encG2 3.48e+03 +encGT 1.79e+04 +encG1pre 3.67e+02 +encG2pre 1.09e+03 +encGTpre 3.88e+03 +decG1 1.45e+03 +decG2 3.02e+03 +degGT 1.31e+04 +mul 3.02e+04 +addG1 7.08e+00 +addG2 2.03e+01 +addGT 7.68e+01 +reRandG1 1.63e+03 +reRandG2 3.48e+03 +reRandGT 1.79e+04 +reRandG1pre 3.65e+02 +reRandG2pre 1.08e+03 +reRandGTpre 3.79e+03 +mulG1 3.08e+02 +mulG2 7.65e+02 +mulGT 1.95e+03 +ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/nizkp.pdf b/vendor/github.com/byzantine-lab/mcl/misc/she/nizkp.pdf new file mode 100644 index 000000000..7e61b5a64 Binary files /dev/null and b/vendor/github.com/byzantine-lab/mcl/misc/she/nizkp.pdf differ diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/she-api-ja.md b/vendor/github.com/byzantine-lab/mcl/misc/she/she-api-ja.md new file mode 100644 index 000000000..850f11ff3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/she-api-ja.md @@ -0,0 +1,314 @@ +# L2準åŒåž‹æš—å·ãƒ©ã‚¤ãƒ–ラリshe + +# æ¦‚è¦ +she(somewhat homomorphic encryption)ã¯ãƒšã‚¢ãƒªãƒ³ã‚°ãƒ™ãƒ¼ã‚¹ã®L2準åŒåž‹æš—å·ã¨å‘¼ã°ã‚Œã‚‹å…¬é–‹éµæš—å·ãƒ©ã‚¤ãƒ–ラリã§ã‚る。 +L2準åŒåž‹æš—å·ã¨ã¯æš—å·æ–‡åŒå£«ã®åŠ ç®—を複数回ã€ä¹—算を一度ã ã‘ã§ãる性質を表ã™ã€‚ + +特ã«2個ã®æ•´æ•°å€¤ãƒ™ã‚¯ãƒˆãƒ«x = (x_i), y = (y_i)ã®å„è¦ç´ ãŒæš—å·åŒ–ã•ã‚ŒãŸçŠ¶æ…‹ã§ã€ãã®2個ã®ãƒ™ã‚¯ãƒˆãƒ«ã®å†…ç©ã‚’æš—å·åŒ–ã—ãŸã¾ã¾è¨ˆç®—ã§ãる。 + +ΣEnc(x_i) Enc(y_i) = Enc(Σx_i y_i). + +# 特長 +* ペアリングベースã®æœ€æ–°ã‚¢ãƒ«ã‚´ãƒªã‚ºãƒ ã‚’実装 + * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632) +* C++版ã¯Windows(x64), Linux(x64, ARM64), OSX(x64)ã«å¯¾å¿œ +* JavaScript(WebAssembly 以é™JSã¨è¨˜ã™)版ã¯Chrome, Firefox, Edge, Safari(Android, iPhoneå«ã‚€), Node.jsã«å¯¾å¿œ + +# クラスã¨ä¸»ãªæ©Ÿèƒ½ + +## 主ãªã‚¯ãƒ©ã‚¹ +* 秘密éµã‚¯ãƒ©ã‚¹ SecretKey +* 公開éµã‚¯ãƒ©ã‚¹ PublicKey +* æš—å·æ–‡ã‚¯ãƒ©ã‚¹ CipherTextG1, CipherTextG2, CipherTextGT +* ゼロ知識証明クラス ZkpBin, ZkpEq, ZkpBinEq + +## æš—å·åŒ–ã¨å¾©å·æ–¹æ³• +* 秘密éµã‹ã‚‰å…¬é–‹éµã‚’作æˆã™ã‚‹ +* 公開éµã‚’用ã„ã¦æ•´æ•°ã‹ã‚‰æš—å·æ–‡ã‚’作る +* 秘密éµã‚’用ã„ã¦æš—å·æ–‡ã‚’復å·ã™ã‚‹ + +## æš—å·æ–‡åŒå£«ã®è¨ˆç®— +* åŒã˜æš—å·æ–‡ã‚¯ãƒ©ã‚¹åŒå£«ã¯åŠ ç®—・減算ã§ãã‚‹ +* CipherTextG1ã¨CipherTextG2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTã«ãªã‚‹ + +## 復å·ã®é‡è¦ãªæ³¨æ„点 +* ã“ã®sheã¯å¾©å·æ™‚ã«å°ã•ãªé›¢æ•£å¯¾æ•°å•é¡Œ(DLP)を解ãå¿…è¦ãŒã‚ã‚‹ +* DLPã®ãƒ†ãƒ¼ãƒ–ルサイズをsã€æš—å·æ–‡ã‚’Enc(m)ã¨ã™ã‚‹ã¨å¾©å·æ™‚é–“ã¯m/sã«æ¯”例ã™ã‚‹ +* テーブルサイズã®è¨­å®šã¯`setRangeForDLP(s)`を使ㆠ+ * `m/s`ã®æœ€å¤§å€¤ã¯`setTryNum(tryNum)`ã§è¡Œã† + +## ゼロ知識証明クラス +* mã‚’æš—å·ã™ã‚‹ã¨ãã«åŒæ™‚ã«ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žã‚’生æˆã™ã‚‹ +* æš—å·æ–‡ã¨ç”Ÿæˆã•ã‚ŒãŸã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žã¨å…¬é–‹éµã§mã«é–¢ã™ã‚‹åˆ¶ç´„æ¡ä»¶ã‚’検証ã§ãã‚‹ + +# JS版 + +## Node.jsã§ã®èª­ã¿è¾¼ã¿ + +``` +>npm install she-wasm +>node +>const she = require('she-wasm') +``` + +## ブラウザã§ã®èª­ã¿è¾¼ã¿ +[she-wasm](https://github.com/herumi/she-wasm/)ã®she.js, she\_c.js, she\_c.wasmファイルをåŒã˜ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ç½®ã„ã¦she.jsを読ã¿è¾¼ã‚€ +``` +// HTML + +``` + +## JS版サンプル + +``` +// システムã®åˆæœŸåŒ– +she.init().then(() => { + const sec = new she.SecretKey() + // 秘密éµã®åˆæœŸåŒ– + sec.setByCSPRNG() + + // 秘密éµsecã‹ã‚‰å…¬é–‹éµpubã‚’ä½œæˆ + const pub = sec.getPublicKey() + + const m1 = 1 + const m2 = 2 + const m3 = 3 + const m4 = -1 + + // 平文m1ã¨m2ã‚’CipherTextG1ã¨ã—ã¦æš—å·åŒ– + const c11 = pub.encG1(m1) + const c12 = pub.encG1(m2) + + // 平文m3ã¨m4ã‚’CipherTextG2ã¨ã—ã¦æš—å·åŒ– + const c21 = pub.encG2(m3) + const c22 = pub.encG2(m4) + + // c11ã¨c12, c21ã¨c22ã‚’ãã‚Œãžã‚ŒåŠ ç®— + const c1 = she.add(c11, c12) + const c2 = she.add(c21, c22) + + // c1ã¨c2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTåž‹ã«ãªã‚‹ + const ct = she.mul(c1, c2) + + // æš—å·æ–‡ctを復å·ã™ã‚‹ + console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`) +}) +``` + +# C++版サンプル +ライブラリã®ãƒ“ルドã¯[mcl](https://github.com/herumi/mcl/#installation-requirements)ã‚’å‚ç…§ +``` +#include +int main() + try +{ + using namespace mcl::she; + // システã®ãƒ åˆæœŸåŒ– + init(); + + SecretKey sec; + + // 秘密éµã®åˆæœŸåŒ– + sec.setByCSPRNG(); + + // 秘密éµsecã‹ã‚‰å…¬é–‹éµpubã‚’ä½œæˆ + PublicKey pub; + sec.getPublicKey(pub); + + int m1 = 1; + int m2 = 2; + int m3 = 3; + int m4 = -1; + + // 平文m1ã¨m2ã‚’CipherTextG1ã¨ã—ã¦æš—å·åŒ– + CipherTextG1 c11, c12; + pub.enc(c11, m1); + pub.enc(c12, m2); + + // 平文m3ã¨m4ã‚’CipherTextG2ã¨ã—ã¦æš—å·åŒ– + CipherTextG2 c21, c22; + pub.enc(c21, m3); + pub.enc(c22, m4); + + // c11ã¨c12, c21ã¨c22ã‚’ãã‚Œãžã‚ŒåŠ ç®— + CipherTextG1 c1; + CipherTextG2 c2; + CipherTextG1::add(c1, c11, c12); + CipherTextG2::add(c2, c21, c22); + + // c1ã¨c2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTåž‹ã«ãªã‚‹ + CipherTextGT ct; + CipherTextGT::mul(ct, c1, c2); + + // æš—å·æ–‡ctを復å·ã™ã‚‹ + printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct)); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} + +``` + +# クラス共通メソッド + +## シリアライズ(C++) + +* `setStr(const std::string& str, int ioMode = 0)` + * ioModeã«å¾“ã£ã¦strã§è¨­å®šã™ã‚‹ + +* `getStr(std::string& str, int ioMode = 0) const` +* `std::string getStr(int ioMode = 0) const` + * ioModeã«å¾“ã£ã¦strã‚’å–å¾—ã™ã‚‹ +* `size_t serialize(void *buf, size_t maxBufSize) const` + * maxBufSize確ä¿ã•ã‚ŒãŸbufã«ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚ºã™ã‚‹ + * bufã«æ›¸ãè¾¼ã¾ã‚ŒãŸbyteé•·ãŒè¿”ã‚‹ + * エラーã®å ´åˆã¯0ãŒè¿”ã‚‹ +* `size_t deserialize(const void *buf, size_t bufSize)` + * bufã‹ã‚‰æœ€å¤§bufSizeã¾ã§å€¤ã‚’読ã¿è¾¼ã¿ãƒ‡ãƒªã‚·ã‚¢ãƒ©ã‚¤ã‚ºã™ã‚‹ + * 読ã¿è¾¼ã¾ã‚ŒãŸbyteé•·ãŒè¿”ã‚‹ + * エラーã®å ´åˆã¯0ãŒè¿”ã‚‹ + +## シリアライズ(JS) + +* `deserialize(s)` + * Uint8Arrayåž‹sã§ãƒ‡ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º +* `serialize()` + * シリアライズã—ã¦Uint8Arrayã®å€¤ã‚’返㙠+* `deserializeHexStr(s)` + * 16進数文字列sã§ãƒ‡ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º +* `serializeToHexStr()` + * 16進数文字列sã§ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º + +## ioMode + +* 2 ; 2進数 +* 10 ; 10進数 +* 16 ; 16進数 +* IoPrefix ; 2ã¾ãŸã¯16ã¨orã®å€¤ã‚’設定ã™ã‚‹ã¨0bã¾ãŸã¯0xãŒã¤ã +* IoEcAffine ; (G1, G2ã®ã¿)アフィン座標 +* IoEcProj ; (G1, G2ã®ã¿)射影座標 +* IoSerialize ; serialize()/deserialize()ã¨åŒã˜ + +## æ³¨æ„ +* C++ã®åå‰ç©ºé–“ã¯`mcl::she` +* 以下CTã¯CipherTextG1, CipherTextG2, CipherTextGTã®ã„ãšã‚Œã‹ã‚’表㙠+* JS版ã®å¹³æ–‡ã¯32ビット整数ã®ç¯„囲ã«åˆ¶é™ã•ã‚Œã‚‹ + +## SecretKeyクラス + +* `void setByCSPRNG()`(C++) +* `void setByCSPRNG()`(JS) + * 疑似乱数ã§ç§˜å¯†éµã‚’åˆæœŸåŒ–ã™ã‚‹ + +* `int64_t dec(const CT& c) const`(C++) +* `int dec(CT c)`(JS) + * æš—å·æ–‡cを復å·ã™ã‚‹ +* `int64_t decViaGT(const CipherTextG1& c) const`(C++) +* `int64_t decViaGT(const CipherTextG2& c) const`(C++) +* `int decViaGT(CT c)`(JS) + * æš—å·æ–‡ã‚’GT経由ã§å¾©å·ã™ã‚‹ +* `bool isZero(const CT& c) const`(C++) +* `bool isZero(CT c)`(JS) + * cã®å¾©å·çµæžœãŒ0ãªã‚‰ã°true + * decã—ã¦ã‹ã‚‰0ã¨æ¯”較ã™ã‚‹ã‚ˆã‚Šã‚‚高速 + +## PublicKey, PrecomputedPublicKeyクラス +PrecomputedPublicKeyã¯PublicKeyã®é«˜é€Ÿç‰ˆ + +* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++) +* `void PrecomputedPublicKey::init(pub)`(JS) + * 公開éµpubã§PrecomputedPublicKeyã‚’åˆæœŸåŒ–ã™ã‚‹ + + +* `PrecomputedPublicKey::destroy()`(JS) + * JavaScriptã§ã¯PrecomputedPublicKeyãŒä¸è¦ã«ãªã£ãŸã‚‰ã“ã®ãƒ¡ã‚½ãƒƒãƒ‰ã‚’呼ã¶å¿…è¦ãŒã‚ã‚‹ + * ãã†ã—ãªã„ã¨ãƒ¡ãƒ¢ãƒªãƒªãƒ¼ã‚¯ã™ã‚‹ + +以下ã¯PK = PublicKey or PrecomputedPublicKey + +* `void PK::enc(CT& c, int64_t m) const`(C++) +* `CipherTextG1 PK::encG1(m)`(JS) +* `CipherTextG2 PK::encG2(m)`(JS) +* `CipherTextGT PK::encGT(m)`(JS) + * mã‚’æš—å·åŒ–ã—ã¦cã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) + +* `void PK::reRand(CT& c) const`(C++) +* `CT PK::reRand(CT c)`(JS) + * cã‚’å†ãƒ©ãƒ³ãƒ€ãƒ åŒ–ã™ã‚‹ + * å†ãƒ©ãƒ³ãƒ€ãƒ åŒ–ã•ã‚ŒãŸæš—å·æ–‡ã¨å…ƒã®æš—å·æ–‡ã¯åŒã˜å¹³æ–‡ã‚’æš—å·åŒ–ã—ãŸã‚‚ã®ã‹ã©ã†ã‹åˆ¤å®šã§ããªã„ + +* `void convert(CipherTextGT& cm, const CT& ca) const` +* `CipherTextGT convert(CT ca)` + * æš—å·æ–‡ca(CipherTextG1ã‹CipherTextG2)ã‚’CipherTextGTã«å¤‰æ›ã™ã‚‹ + +## CipherTextクラス + +* `void CT::add(CT& z, const CT& x const CT& y)`(C++) +* `CT she.add(CT x, CT y)`(JS) + * æš—å·æ–‡xã¨æš—å·æ–‡yを足ã—ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) +* `void CT::sub(CT& z, const CT& x const CT& y)`(C++) +* `CT she.sub(CT x, CT y)`(JS) + * æš—å·æ–‡xã‹ã‚‰æš—å·æ–‡yを引ã„ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) +* `void CT::neg(CT& y, const CT& x)`(C++) +* `void she.neg(CT x)`(JS) + * æš—å·æ–‡xã®ç¬¦å·å転をyã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) +* `void CT::mul(CT& z, const CT& x, int y)`(C++) +* `CT she.mulInt(CT x, int y)`(JS) + * æš—å·æ–‡xã‚’æ•´æ•°å€yã—ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) + +* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) +* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS) + * æš—å·æ–‡xã¨æš—å·æ–‡yを掛ã‘ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) + +* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) + * æš—å·æ–‡xã¨æš—å·æ–‡yを掛ã‘ã¦(Millerループã ã‘ã—ã¦)zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) +* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++) + * mul(a, b) = finalExp(mulML(a, b)) + * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d))) + * ã™ãªã‚ã¡ç©å’Œæ¼”ç®—ã¯mulMLã—ãŸã‚‚ã®ã‚’足ã—ã¦ã‹ã‚‰æœ€å¾Œã«ä¸€åº¦finalExpã™ã‚‹ã®ãŒã‚ˆã„ + +## ゼロ知識証明クラス + +### æ¦‚è¦ +* ZkpBin æš—å·æ–‡encGi(m)(i = 1, 2, T)ã«ã¤ã„ã¦m = 0ã¾ãŸã¯1ã§ã‚ã‚‹ã“ã¨ã‚’復å·ã›ãšã«æ¤œè¨¼ã§ãã‚‹ +* ZkpEq æš—å·æ–‡encG1(m1), encG2(m2)ã«ã¤ã„ã¦m1 = m2ã§ã‚ã‚‹ã“ã¨ã‚’検証ã§ãã‚‹ +* ZkpBinEq æš—å·æ–‡encG1(m1), encG2(m2)ã«ã¤ã„ã¦m1 = m2 = 0ã¾ãŸã¯1ã§ã‚ã‚‹ã“ã¨ã‚’検証ã§ãã‚‹ + +### API +PK = PublicKey or PrecomputedPublicKey + +* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++) +* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++) +* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS) +* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS) + * m(=0 or 1)ã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡cã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c, zkp]ã‚’è¿”ã™) + * mãŒ0ã§ã‚‚1ã§ã‚‚ãªã‘ã‚Œã°ä¾‹å¤– +* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++) +* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS) + * mã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡c1, c2ã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c1, c2, zkp]ã‚’è¿”ã™) +* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++) +* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS) + * m(=0 or 1)ã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡c1, c2ã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c1, c2, zkp]ã‚’è¿”ã™) + * mãŒ0ã§ã‚‚1ã§ã‚‚ãªã‘ã‚Œã°ä¾‹å¤– + +## グローãƒãƒ«é–¢æ•° + +* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++) +* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS) + * hashSizeã®å¤§ãã•ã®å¾©å·ç”¨ãƒ†ãƒ¼ãƒ–ルã¨tryNumã‚’å…ƒã«åˆæœŸåŒ–ã™ã‚‹ + * 復å·å¯èƒ½ãªå¹³æ–‡mã®ç¯„囲ã¯|m| <= hashSize * tryNum +* `she.loadTableForGTDLP(Uint8Array a)`(JS) + * 復å·ç”¨ãƒ†ãƒ¼ãƒ–ルを読ã¿è¾¼ã‚€ + * ç¾åœ¨ã¯`https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin`ã®ã¿ãŒã‚ã‚‹ +* `void useDecG1ViaGT(bool use)`(C++/JS) +* `void useDecG2ViaGT(bool use)`(C++/JS) + * CipherTextG1, CipherTextG2ã®å¾©å·ã‚’CipherTextGT経由ã§è¡Œã† + * 大ããªå€¤ã‚’復å·ã™ã‚‹ã¨ãã¯DLP用ã®å·¨å¤§ãªãƒ†ãƒ¼ãƒ–ルをãã‚Œãžã‚Œã«æŒã¤ã‚ˆã‚Šã‚‚GTã«é›†ç´„ã—ãŸæ–¹ãŒåŠ¹çŽ‡ãŒã‚ˆã„ + +# ライセンス + +ã“ã®ãƒ©ã‚¤ãƒ–ラリã¯[修正BSDライセンス](https://github.com/herumi/mcl/blob/master/COPYRIGHT)ã§æä¾›ã•ã‚Œã¾ã™ + +# 開発者 + +å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/she-api.md b/vendor/github.com/byzantine-lab/mcl/misc/she/she-api.md new file mode 100644 index 000000000..af54311e9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/misc/she/she-api.md @@ -0,0 +1,322 @@ +# she ; Two-level homomorphic encryption library for browser/Node.js by WebAssembly + +# Abstruct +she is a somewhat(two-level) homomorphic encryption library, +which is based on pairings. +This library supports polynomially many homomorphic additions and +one multiplication over encrypted data. + +Especially, the inner products of two encrypted integer vectors such as Enc(x) = (Enc(x_i)), Enc(y) = (Enc(y_i)) +can be computed. + +Sum_i Enc(x_i) Enc(y_i) = Enc(Sum_i x_i y_i). + +# Features +* supports the latest pairing based algorithm + * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632) +* supports Windows(x64), Linux(x64, ARM64), OSX(x64) +* supports JavaScript(WebAssembly), Chrome, Firefox, Safari(contains Android, iPhone), Node.js + +# Classes + +## Main classes +* secret key class ; SecretKey +* public key class ; PublicKey +* ciphertext class ; CipherTextG1, CipherTextG2, CipherTextGT +* zero-knowledge proof class ; ZkpBin, ZkpEq, ZkpBinEq + +## Encryption and decryption +* create the corresponding public key from a secret key +* encrypt an integer(plaintext) with a public key +* decrypt a ciphertext with a secret key + +## Homomorphic operations +* homomorphic addtion/substraction over ciphertexts of the same ciphertext class +* homomprphic multiplication over ciphertext of CipherTextG1 and CipherTextG2 + * The class of the result is CipherTextGT. + +## Important notation of decryption +* This library requires to solve a small DLP to decrypt a ciphertext. +* The decryption timing is O(m/s), where s is the size of table to solve DLP, and m is the size fo a plaintext. +* call `setRangeForDLP(s)` to set the table size. + * The maximun `m/s` is set by `setTryNum(tryNum)`. + +## Zero-knowledge proof class +* A zero-knowledge proof is simultaneously created when encrypting a plaintext `m`. +* The restriction according to `m` can be verified with a created zero-knowledge proof and a public key. + +# Setup for JavaScript(JS) + +## for Node.js + +``` +>npm install she-wasm +>node +>const she = require('she-wasm') +``` + +## for a browser + +Copy `she.js`, `she\_c.js`, `she\_c.wasm` to your directory from [she-wasm](https://github.com/herumi/she-wasm/), +and read `she.js`. +``` +// HTML + +``` + +## A sample for JS + +``` +// initialize a library +she.init().then(() => { + const sec = new she.SecretKey() + // initialize a secret key by CSPRNG(cryptographically secure pseudo random number generator) + sec.setByCSPRNG() + + // create a public key from a secret key + const pub = sec.getPublicKey() + + const m1 = 1 + const m2 = 2 + const m3 = 3 + const m4 = -1 + + // encrypt m1 and m2 as CipherTextG1 class + const c11 = pub.encG1(m1) + const c12 = pub.encG1(m2) + + // encrypt m3 and m4 as CipherTextG2 class + const c21 = pub.encG2(m3) + const c22 = pub.encG2(m4) + + // add c11 and c12, c21 and c22 respectively + const c1 = she.add(c11, c12) + const c2 = she.add(c21, c22) + + // get ct as a CipherTextGT class by multiplying c1 with c2 + const ct = she.mul(c1, c2) + + // decrypt ct + console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`) +}) +``` + +# A sample for C++ +How to build the library, see [mcl](https://github.com/herumi/mcl/#installation-requirements). +``` +#include +int main() + try +{ + using namespace mcl::she; + // initialize a library + init(); + + SecretKey sec; + + // initialize a secret key by CSPRNG + sec.setByCSPRNG(); + + // create a public key from a secret key + PublicKey pub; + sec.getPublicKey(pub); + + int m1 = 1; + int m2 = 2; + int m3 = 3; + int m4 = -1; + + // encrypt m1 and m2 as CipherTextG1 class + CipherTextG1 c11, c12; + pub.enc(c11, m1); + pub.enc(c12, m2); + + // encrypt m3 and m4 as CipherTextG2 class + CipherTextG2 c21, c22; + pub.enc(c21, m3); + pub.enc(c22, m4); + + // add c11 and c12, c21 and c22 respectively + CipherTextG1 c1; + CipherTextG2 c2; + CipherTextG1::add(c1, c11, c12); + CipherTextG2::add(c2, c21, c22); + + // get ct as a CipherTextGT class by multiplying c1 with c2 + CipherTextGT ct; + CipherTextGT::mul(ct, c1, c2); + + // decrypt ct + printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct)); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} + +``` +# Class method + +## Serialization(C++) + +* `setStr(const std::string& str, int ioMode = 0)` + * set a value by `str` according to `ioMode` + +* `getStr(std::string& str, int ioMode = 0) const` +* `std::string getStr(int ioMode = 0) const` + * get a string `str` according to `ioMode` +* `size_t serialize(void *buf, size_t maxBufSize) const` + * serialize a value to buf which has maxBufSize byte size + * return the byte size to be written in `buf` + * return zero if error +* `size_t deserialize(const void *buf, size_t bufSize)` + * deserialize a value from buf which has bufSize byte size + * return the byte size to be read from `buf` + * return zero if error + +## Serialization(JS) + +* `deserialize(s)` + * deserialize from `s` as Uint8Array type +* `serialize()` + * serialize a value and return Uint8Array value +* `deserializeHexStr(s)` + * deserialize as a hexadecimal string +* `serializeToHexStr()` + * serialize as a hexadecimal string + +## ioMode + +* 2 ; binary number +* 10 ; decimal number +* 16 ; hexadecimal number +* IoPrefix ; append a prefix 0b(resp. 2) or 0x(resp. 16) +* IoEcAffine ; affine coordinate (for only G1, G2) +* IoEcProj ; projective coordinate (for only G1, G2) +* IoSerialize ; same as serialize()/deserialize() + +## Notation +* the namespace of C++ is `mcl::she` +* CT means one of CipherTextG1, CipherTextG2, CipherTextGT +* The range of plaintext is rectricted as a 32-bit integer for JS + +## SecretKey class + +* `void setByCSPRNG()`(C++) +* `void setByCSPRNG()`(JS) + * set a secret key by CSPRNG(cryptographically secure pseudo random number generator) + +* `int64_t dec(const CT& c) const`(C++) +* `int dec(CT c)`(JS) + * decrypt `c` +* `int64_t decViaGT(const CipherTextG1& c) const`(C++) +* `int64_t decViaGT(const CipherTextG2& c) const`(C++) +* `int decViaGT(CT c)`(JS) + * decrypt `c` through CipherTextGT +* `bool isZero(const CT& c) const`(C++) +* `bool isZero(CT c)`(JS) + * return true if decryption of `c` is zero + * it is faster than the timing of comparision with zero after decrypting `c` + +## PublicKey, PrecomputedPublicKey class +`PrecomputedPublicKey` is a faster version of `PublicKey` + +* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++) +* `void PrecomputedPublicKey::init(pub)`(JS) + * initialize `PrecomputedPublicKey` by a public key `pub` + +* `PrecomputedPublicKey::destroy()`(JS) + * It is necessary to call this method if this instance becomes unnecessary + * otherwise a memory leak will be caused + +PK means PublicKey or PrecomputedPublicKey + +* `void PK::enc(CT& c, int64_t m) const`(C++) +* `CipherTextG1 PK::encG1(m)`(JS) +* `CipherTextG2 PK::encG2(m)`(JS) +* `CipherTextGT PK::encGT(m)`(JS) + * encrypt `m` and set `c`(or return the value) + +* `void PK::reRand(CT& c) const`(C++) +* `CT PK::reRand(CT c)`(JS) + * rerandomize `c` + * For `c = Enc(m)`, the rerandomized ciphertext is hard to detect if it is generated by the rerandomization + or an encrypted `m` freshly again. + +* `void convert(CipherTextGT& cm, const CT& ca) const` +* `CipherTextGT convert(CT ca)` + * convert `ca`(CipherTextG1 or CipherTextG2) to `CipherTextGT` class + +## CipherText class + +* `void CT::add(CT& z, const CT& x const CT& y)`(C++) +* `CT she.add(CT x, CT y)`(JS) + * add `x` and `y` and set the value to `z`(or return the value) +* `void CT::sub(CT& z, const CT& x const CT& y)`(C++) +* `CT she.sub(CT x, CT y)`(JS) + * subtract `x` and `y` and set the value to `z`(or return the value) +* `void CT::neg(CT& y, const CT& x)`(C++) +* `void she.neg(CT x)`(JS) + * negate `x` and set the value to `y`(or return the value) +* `void CT::mul(CT& z, const CT& x, int y)`(C++) +* `CT she.mulInt(CT x, int y)`(JS) + * multiple `x` and `y` and set the value `y`(or return the value) + +* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) +* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS) + * multiple `x` and `y` and set the value `y`(or return the value) + +* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) + * multiple(only Miller Loop) `x` and `y` and set the value `y`(or return the value) + +* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++) + * mul(a, b) = finalExp(mulML(a, b)) + * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d))) + * i.e., innor product can be computed as once calling `finalExp` after computing `mulML` for each elements of two vectors and adding all + +## Zero knowledge proof class + +### Abstract +* ZkpBin ; verify whether `m = 0` or `1` for ciphertexts `encGi(m)(i = 1, 2, T)` +* ZkpEq ; verify whether `m1 = m2` for ciphertexts `encG1(m1)` and `encG2(m2)` +* ZkpBinEq ; verify whether `m1 = m2 = 0` or `1` for ciphertexts `encG1(m1)` and `encG2(m2)` + +### API +PK = PublicKey or PrecomputedPublicKey + +* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++) +* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++) +* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS) +* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS) + * encrypt `m`(=0 or 1) and set the ciphertext `c` and zero-knowledge proof `zkp`(or returns [c, zkp]) + * throw exception if m != 0 and m != 1 +* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++) +* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS) + * encrypt `m` and set the ciphertext `c1`, `c2` and zero-knowledge proof `zk`(or returns [c1, c2, zkp]) +* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++) +* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS) + * encrypt `m`(=0 or 1) and set ciphertexts `c1`, `c2` and zero-knowledge proof `zkp`(or returns [c1, c2, zkp]) + * throw exception if m != 0 and m != 1 + +## Global functions + +* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++) +* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS) + * initialize a table to solve a DLP with `hashSize` size and set maximum trying count `tryNum`. + * the range `m` to be solvable is |m| <= hashSize * tryNum +* `getHashTableGT().load(InputStream& is)`(C++) +* `she.loadTableForGTDLP(Uint8Array a)`(JS) + * load a DLP table for CipherTextGT + * reset the value of `hashSize` used in `init()` + * `https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin` is a precomputed table +* `void useDecG1ViaGT(bool use)`(C++/JS) +* `void useDecG2ViaGT(bool use)`(C++/JS) + * decrypt a ciphertext of CipherTextG1 and CipherTextG2 through CipherTextGT + * it is better when decrypt a big value + +# License + +[modified new BSD License](https://github.com/herumi/mcl/blob/master/COPYRIGHT) + +# Author + +å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/byzantine-lab/mcl/misc/she/she.pdf b/vendor/github.com/byzantine-lab/mcl/misc/she/she.pdf new file mode 100644 index 000000000..355a308b3 Binary files /dev/null and b/vendor/github.com/byzantine-lab/mcl/misc/she/she.pdf differ diff --git a/vendor/github.com/byzantine-lab/mcl/mk.bat b/vendor/github.com/byzantine-lab/mcl/mk.bat new file mode 100644 index 000000000..19eb84197 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/mk.bat @@ -0,0 +1,20 @@ +@echo off +call setvar.bat +if "%1"=="-s" ( + echo use static lib + set CFLAGS=%CFLAGS% /DMCLBN_DONT_EXPORT +) else if "%1"=="-d" ( + echo use dynamic lib +) else ( + echo "mk (-s|-d) " + goto exit +) +set SRC=%2 +set EXE=%SRC:.cpp=.exe% +set EXE=%EXE:.c=.exe% +set EXE=%EXE:test\=bin\% +set EXE=%EXE:sample\=bin\% +echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% +cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% + +:exit diff --git a/vendor/github.com/byzantine-lab/mcl/mklib.bat b/vendor/github.com/byzantine-lab/mcl/mklib.bat new file mode 100644 index 000000000..389b69009 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/mklib.bat @@ -0,0 +1,39 @@ +@echo off +call setvar.bat +if "%1"=="dll" ( + echo make dynamic library DLL +) else ( + echo make static library LIB +) +rem nasm -f win64 -D_WIN64 src\asm\low_x86-64.asm +rem lib /OUT:lib\mcl.lib /nodefaultlib fp.obj src\asm\low_x86-64.obj + +echo cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj + cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj +echo lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj + lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj + +if "%1"=="dll" ( + echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj /DMCLBN_NO_AUTOLINK + echo link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib + link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib + + echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj /DMCLBN_NO_AUTOLINK + echo link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib + link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib + + echo cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK + cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK + echo link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib + link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib +) else ( + echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + lib /nologo /OUT:lib\mclbn256.lib /nodefaultlib obj\bn_c256.obj lib\mcl.lib + + echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + lib /nologo /OUT:lib\mclbn384.lib /nodefaultlib obj\bn_c384.obj lib\mcl.lib +) diff --git a/vendor/github.com/byzantine-lab/mcl/obj/.emptydir b/vendor/github.com/byzantine-lab/mcl/obj/.emptydir new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/mcl/readme.md b/vendor/github.com/byzantine-lab/mcl/readme.md new file mode 100644 index 000000000..39b3d4d42 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/readme.md @@ -0,0 +1,457 @@ +[![Build Status](https://travis-ci.org/herumi/mcl.png)](https://travis-ci.org/herumi/mcl) + +# mcl + +A portable and fast pairing-based cryptography library. + +# Abstract + +mcl is a library for pairing-based cryptography. +The current version supports the optimal Ate pairing over BN curves and BLS12-381 curves. + +# News +* (Break backward compatibility) libmcl_dy.a is renamed to libmcl.a + * The option SHARE_BASENAME_SUF is removed +* 2nd argument of `mclBn_init` is changed from `maxUnitSize` to `compiledTimeVar`, which must be `MCLBN_COMPILED_TIME_VAR`. +* break backward compatibility of mapToGi for BLS12. A map-to-function for BN is used. +If `MCL_USE_OLD_MAPTO_FOR_BLS12` is defined, then the old function is used, but this will be removed in the future. + +# Support architecture + +* x86-64 Windows + Visual Studio +* x86, x86-64 Linux + gcc/clang +* ARM Linux +* ARM64 Linux +* (maybe any platform to be supported by LLVM) +* WebAssembly + +# Support curves + +p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1. + +* BN254 ; a BN curve over the 254-bit prime p(z) where z = -(2^62 + 2^55 + 1). +* BN\_SNARK1 ; a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. +* BN381\_1 ; a BN curve over the 381-bit prime p(z) where z = -(2^94 + 2^76 + 2^72 + 1). +* BN462 ; a BN curve over the 462-bit prime p(z) where z = 2^114 + 2^101 - 2^14 - 1. +* BLS12\_381 ; [a BLS12-381 curve](https://blog.z.cash/new-snark-curve/) + +# Benchmark + +## The latest benchmark(2018/11/7) + +### Intel Core i7-6700 3.4GHz(Skylake), Ubuntu 18.04.1 LTS + +curveType | binary|clang-6.0.0|gcc-7.3.0| +----------|--------------------|-----------|---------| +BN254 | bin/bn\_test.exe| 882Kclk| 933Kclk| +BLS12-381 | bin/bls12\_test.exe| 2290Kclk| 2630Kclk| + +### Intel Core i7-7700 3.6GHz(Kaby Lake), Ubuntu 18.04.1 LTS on Windows 10 Vmware + +curveType | binary|clang-6.0.0|gcc-7.3.0| +----------|--------------------|-----------|---------| +BN254 | bin/bn\_test.exe| 900Kclk| 954Kclk| +BLS12-381 | bin/bls12\_test.exe| 2340Kclk| 2680Kclk| + +* now investigating the reason why gcc is slower than clang. + +## Higher-bit BN curve benchmark + +For JavaScript(WebAssembly), see [ID based encryption demo](https://herumi.github.io/mcl-wasm/ibe-demo.html). + +paramter | x64| Firefox on x64|Safari on iPhone7| +-----------|-----|---------------|-----------------| +BN254 | 0.25| 2.48| 4.78| +BN381\_1 | 0.95| 7.91| 11.74| +BN462 | 2.16| 14.73| 22.77| + +* x64 : 'Kaby Lake Core i7-7700(3.6GHz)'. +* Firefox : 64-bit version 58. +* iPhone7 : iOS 11.2.1. +* BN254 is by `test/bn_test.cpp`. +* BN381\_1 and BN462 are by `test/bn512_test.cpp`. +* All the timings are given in ms(milliseconds). + +The other benchmark results are [bench.txt](bench.txt). + +## An old benchmark of a BN curve BN254(2016/12/25). + +* x64, x86 ; Inte Core i7-6700 3.4GHz(Skylake) upto 4GHz on Ubuntu 16.04. + * `sudo cpufreq-set -g performance` +* arm ; 900MHz quad-core ARM Cortex-A7 on Raspberry Pi2, Linux 4.4.11-v7+ +* arm64 ; 1.2GHz ARM Cortex-A53 [HiKey](http://www.96boards.org/product/hikey/) + +software | x64| x86| arm|arm64(msec) +---------------------------------------------------------|------|-----|----|----- +[ate-pairing](https://github.com/herumi/ate-pairing) | 0.21 | - | - | - +mcl | 0.31 | 1.6 |22.6| 3.9 +[TEPLA](http://www.cipher.risk.tsukuba.ac.jp/tepla/) | 1.76 | 3.7 | 37 | 17.9 +[RELIC](https://github.com/relic-toolkit/relic) PRIME=254| 0.30 | 3.5 | 36 | - +[MIRACL](https://github.com/miracl/MIRACL) ake12bnx | 4.2 | - | 78 | - +[NEONabe](http://sandia.cs.cinvestav.mx/Site/NEONabe) | - | - | 16 | - + +* compile option for RELIC +``` +cmake -DARITH=x64-asm-254 -DFP_PRIME=254 -DFPX_METHD="INTEG;INTEG;LAZYR" -DPP_METHD="LAZYR;OATEP" +``` + +# Installation Requirements + +* [GMP](https://gmplib.org/) and OpenSSL +``` +apt install libgmp-dev libssl-dev +``` + +Create a working directory (e.g., work) and clone the following repositories. +``` +mkdir work +cd work +git clone git://github.com/herumi/mcl +git clone git://github.com/herumi/cybozulib_ext ; for only Windows +``` +* Cybozulib\_ext is a prerequisite for running OpenSSL and GMP on VC (Visual C++). + +# (Option) Without GMP +``` +make MCL_USE_GMP=0 +``` +Define `MCL_USE_VINT` before including `bn.hpp` + +# (Option) Without Openssl +``` +make MCL_USE_OPENSSL=0 +``` +Define `MCL_DONT_USE_OPENSSL` before including `bn.hpp` + +# Build and test on x86-64 Linux, macOS, ARM and ARM64 Linux +To make lib/libmcl.a and test it: +``` +cd work/mcl +make test +``` +To benchmark a pairing: +``` +bin/bn_test.exe +``` +To make sample programs: +``` +make sample +``` + +if you want to change compiler options for optimization, then set `CFLAGS_OPT_USER`. +``` +make CLFAGS_OPT_USER="-O2" +``` + +## Build for 32-bit Linux +Build openssl and gmp for 32-bit mode and install `` +``` +make ARCH=x86 CFLAGS_USER="-I /include" LDFLAGS_USER="-L /lib -Wl,-rpath,/lib" +``` + +## Build for 64-bit Windows +1) make static library and use it + +``` +mklib +mk -s test\bn_c256_test.cpp +bin\bn_c256_test.exe +``` +2) make dynamic library and use it + +``` +mklib dll +mk -d test\bn_c256_test.cpp +bin\bn_c256_test.exe +``` + +open mcl.sln and build or if you have msbuild.exe +``` +msbuild /p:Configuration=Release +``` + +## Build with cmake +For Linux, +``` +mkdir build +cd build +cmake .. +make +``` +For Visual Studio, +``` +mkdir build +cd build +cmake .. -A x64 +msbuild mcl.sln /p:Configuration=Release /m +``` +## Build for wasm(WebAssembly) +mcl supports emcc (Emscripten) and `test/bn_test.cpp` runs on browers such as Firefox, Chrome and Edge. + +* [IBE on browser](https://herumi.github.io/mcl-wasm/ibe-demo.html) +* [SHE on browser](https://herumi.github.io/she-wasm/she-demo.html) +* [BLS signature on brower](https://herumi.github.io/bls-wasm/bls-demo.html) + +The timing of a pairing on `BN254` is 2.8msec on 64-bit Firefox with Skylake 3.4GHz. + +### Node.js + +* [mcl-wasm](https://www.npmjs.com/package/mcl-wasm) pairing library +* [bls-wasm](https://www.npmjs.com/package/bls-wasm) BLS signature library +* [she-wasm](https://www.npmjs.com/package/she-wasm) 2 Level Homomorphic Encryption library + +### SELinux +mcl uses Xbyak JIT engine if it is available on x64 architecture, +otherwise mcl uses a little slower functions generated by LLVM. +The default mode enables SELinux security policy on CentOS, then JIT is disabled. +``` +% sudo setenforce 1 +% getenforce +Enforcing +% bin/bn_test.exe +JIT 0 +pairing 1.496Mclk +finalExp 581.081Kclk + +% sudo setenforce 0 +% getenforce +Permissive +% bin/bn_test.exe +JIT 1 +pairing 1.394Mclk +finalExp 546.259Kclk +``` + +# Libraries + +* G1 and G2 is defined over Fp +* The order of G1 and G2 is r. +* Use `bn256.hpp` if only BN254 is used. + +## C++ library + +* libmcl.a ; static C++ library of mcl +* libmcl.so ; shared C++ library of mcl +* the default parameter of curveType is BN254 + +header |support curveType |sizeof Fr|sizeof Fp| +--------------|-------------------------|---------|---------| +bn256.hpp |BN254 | 32 | 32 | +bls12_381.hpp |BLS12_381, BN254 | 32 | 48 | +bn384.hpp |BN381_1, BLS12_381, BN254| 48 | 48 | + +## C library + +* Define `MCLBN_FR_UNIT_SIZE` and `MCLBN_FP_UNIT_SIZE` and include bn.h +* set `MCLBN_FR_UNIT_SIZE = MCLBN_FP_UNIT_SIZE` unless `MCLBN_FR_UNIT_SIZE` is defined + + +library |MCLBN_FR_UNIT_SIZE|MCLBN_FP_UNIT_SIZE| +------------------|------------------|------------------| +sizeof | Fr | Fp | +libmclbn256.a | 4 | 4 | +libmclbn384_256.a | 4 | 6 | +libmclbn384.a | 6 | 6 | + + +* libmclbn*.a ; static C library +* libmclbn*.so ; shared C library + +### 2nd argument of `mclBn_init` +Specify `MCLBN_COMPILED_TIME_VAR` to 2nd argument of `mclBn_init`, which +is defined as `MCLBN_FR_UNIT_SIZE * 10 + MCLBN_FP_UNIT_SIZE`. +This parameter is used to make sure that the values are the same when the library is built and used. + +# How to initialize pairing library +Call `mcl::bn256::initPairing` before calling any operations. +``` +#include +mcl::bn::CurveParam cp = mcl::BN254; // or mcl::BN_SNARK1 +mcl::bn256::initPairing(cp); +mcl::bn256::G1 P(...); +mcl::bn256::G2 Q(...); +mcl::bn256::Fp12 e; +mcl::bn256::pairing(e, P, Q); +``` +1. (BN254) a BN curve over the 254-bit prime p = p(z) where z = -(2^62 + 2^55 + 1). +2. (BN_SNARK1) a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. +3. BN381_1 with `mcl/bn384.hpp`. +4. BN462 with `mcl/bn512.hpp`. + +See [test/bn_test.cpp](https://github.com/herumi/mcl/blob/master/test/bn_test.cpp). + +## Default constructor of Fp, Ec, etc. +A default constructor does not initialize the instance. +Set a valid value before reffering it. + +## Definition of groups + +The curve equation for a BN curve is: + + E/Fp: y^2 = x^3 + b . + +* the cyclic group G1 is instantiated as E(Fp)[n] where n := p + 1 - t; +* the cyclic group G2 is instantiated as the inverse image of E'(Fp^2)[n] under a twisting isomorphism phi from E' to E; and +* the pairing e: G1 x G2 -> Fp12 is the optimal ate pairing. + +The field Fp12 is constructed via the following tower: + +* Fp2 = Fp[u] / (u^2 + 1) +* Fp6 = Fp2[v] / (v^3 - Xi) where Xi = u + 1 +* Fp12 = Fp6[w] / (w^2 - v) +* GT = { x in Fp12 | x^r = 1 } + + +## Arithmetic operations + +G1 and G2 is additive group and has the following operations: + +* T::add(T& z, const T& x, const T& y); // z = x + y +* T::sub(T& z, const T& x, const T& y); // z = x - y +* T::neg(T& y, const T& x); // y = -x +* T::mul(T& z, const T& x, const INT& y); // z = y times scalar multiplication of x + +Remark: &z == &x or &y are allowed. INT means integer type such as Fr, int and mpz_class. + +`T::mul` uses GLV method then `G2::mul` returns wrong value if x is not in G2. +Use `T::mulGeneric(T& z, const T& x, const INT& y)` for x in phi^-1(E'(Fp^2)) - G2. + +Fp, Fp2, Fp6 and Fp12 have the following operations: + +* T::add(T& z, const T& x, const T& y); // z = x + y +* T::sub(T& z, const T& x, const T& y); // z = x - y +* T::mul(T& z, const T& x, const T& y); // z = x * y +* T::div(T& z, const T& x, const T& y); // z = x / y +* T::neg(T& y, const T& x); // y = -x +* T::inv(T& y, const T& x); // y = 1/x +* T::pow(T& z, const T& x, const INT& y); // z = x^y +* Fp12::unitaryInv(T& y, const T& x); // y = conjugate of x + +Remark: `Fp12::mul` uses GLV method then returns wrong value if x is not in GT. +Use `Fp12::mulGeneric` for x in Fp12 - GT. + +## Map To points + +Use these functions to make a point of G1 and G2. + +* mapToG1(G1& P, const Fp& x); // assume x != 0 +* mapToG2(G2& P, const Fp2& x); +* hashAndMapToG1(G1& P, const void *buf, size_t bufSize); // set P by the hash value of [buf, bufSize) +* hashAndMapToG2(G2& P, const void *buf, size_t bufSize); + +These functions maps x into Gi according to [\[_Faster hashing to G2_\]]. + +## String format of G1 and G2 +G1 and G2 have three elements of Fp (x, y, z) for Jacobi coordinate. +normalize() method normalizes it to affine coordinate (x, y, 1) or (0, 0, 0). + +getStr() method gets + +* `0` ; infinity +* `1 ` ; not compressed format +* `2 ` ; compressed format for even y +* `3 ` ; compressed format for odd y + +## Generator of G1 and G2 + +If you want to use the same generators of BLS12-381 with [zkcrypto](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381#g2) then, + +``` +// G1 P +P.setStr('1 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569') + +// G2 Q +Q.setStr('1 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582') +``` + +## Serialization format of G1 and G2 + +pseudo-code to serialize of p +``` +if bit-length(p) % 8 != 0: + size = Fp::getByteSize() + if p is zero: + return [0] * size + else: + s = x.serialize() + # x in Fp2 is odd <=> x.a is odd + if y is odd: + s[byte-length(s) - 1] |= 0x80 + return s +else: + size = Fp::getByteSize() + 1 + if p is zero: + return [0] * size + else: + s = x.serialize() + if y is odd: + return 2:s + else: + return 3:s +``` + +## Verify an element in G2 +`G2::isValid()` checks that the element is in the curve of G2 and the order of it is r for subgroup attack. +`G2::set()`, `G2::setStr` and `operator<<` also check the order. +If you check it out of the library, then you can stop the verification by calling `G2::verifyOrderG2(false)`. + +# How to make asm files (optional) +The asm files generated by this way are already put in `src/asm`, then it is not necessary to do this. + +Install [LLVM](http://llvm.org/). +``` +make MCL_USE_LLVM=1 LLVM_VER= UPDATE_ASM=1 +``` +For example, specify `-3.8` for `` if `opt-3.8` and `llc-3.8` are installed. + +If you want to use Fp with 1024-bit prime on x86-64, then +``` +make MCL_USE_LLVM=1 LLVM_VER= UPDATE_ASM=1 MCL_MAX_BIT_SIZE=1024 +``` + +# API for Two level homomorphic encryption +* [_Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly_](https://dl.acm.org/citation.cfm?doid=3196494.3196552), N. Attrapadung, G. Hanaoka, S. Mitsunari, Y. Sakai, +K. Shimizu, and T. Teruya. ASIACCS 2018 +* [she-api](https://github.com/herumi/mcl/blob/master/misc/she/she-api.md) +* [she-api(Japanese)](https://github.com/herumi/mcl/blob/master/misc/she/she-api-ja.md) + +# Java API +See [java.md](https://github.com/herumi/mcl/blob/master/java/java.md) + +# License + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +This library contains some part of the followings software licensed by BSD-3-Clause. +* [xbyak](https://github.com/heurmi/xbyak) +* [cybozulib](https://github.com/heurmi/cybozulib) +* [Lifted-ElGamal](https://github.com/aistcrypt/Lifted-ElGamal) + +# References +* [ate-pairing](https://github.com/herumi/ate-pairing/) +* [_Faster Explicit Formulas for Computing Pairings over Ordinary Curves_](http://dx.doi.org/10.1007/978-3-642-20465-4_5), + D.F. Aranha, K. Karabina, P. Longa, C.H. Gebotys, J. Lopez, + EUROCRYPTO 2011, ([preprint](http://eprint.iacr.org/2010/526)) +* [_High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_](http://dx.doi.org/10.1007/978-3-642-17455-1_2), + Jean-Luc Beuchat, Jorge Enrique González Díaz, Shigeo Mitsunari, Eiji Okamoto, Francisco Rodríguez-Henríquez, Tadanori Teruya, + Pairing 2010, ([preprint](http://eprint.iacr.org/2010/354)) +* [_Faster hashing to G2_](http://dx.doi.org/10.1007/978-3-642-28496-0_25),Laura Fuentes-Castañeda, Edward Knapp, Francisco Rodríguez-Henríquez, + SAC 2011, ([preprint](https://eprint.iacr.org/2008/530)) +* [_Skew Frobenius Map and Efficient Scalar Multiplication for Pairing–Based Cryptography_](https://www.researchgate.net/publication/221282560_Skew_Frobenius_Map_and_Efficient_Scalar_Multiplication_for_Pairing-Based_Cryptography), +Y. Sakemi, Y. Nogami, K. Okeya, Y. Morikawa, CANS 2008. + +# History + +* 2019/Mar/22 v0.92 shortcut for Ec::mul(Px, P, x) if P = 0 +* 2019/Mar/21 python binding of she256 for Linux/Mac/Windows +* 2019/Mar/14 v0.91 modp supports mcl-wasm +* 2019/Mar/12 v0.90 fix Vint::setArray(x) for x == this +* 2019/Mar/07 add mclBnFr_setLittleEndianMod, mclBnFp_setLittleEndianMod +* 2019/Feb/20 LagrangeInterpolation sets out = yVec[0] if k = 1 +* 2019/Jan/31 add mclBnFp_mapToG1, mclBnFp2_mapToG2 +* 2019/Jan/31 fix crash on x64-CPU without AVX (thanks to mortdeus) + +# Author + +å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/byzantine-lab/mcl/release.props b/vendor/github.com/byzantine-lab/mcl/release.props new file mode 100644 index 000000000..886ce6890 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/release.props @@ -0,0 +1,12 @@ + + + + + + + + MultiThreaded + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/sample/bench.cpp b/vendor/github.com/byzantine-lab/mcl/sample/bench.cpp new file mode 100644 index 000000000..0f865b189 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/bench.cpp @@ -0,0 +1,233 @@ +#include +#include +#include +#include +#include +#include + +typedef mcl::FpT<> Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; + +void benchFpSub(const char *pStr, const char *xStr, const char *yStr, mcl::fp::Mode mode) +{ + const char *s = mcl::fp::ModeToStr(mode); + Fp::init(pStr, mode); + Fp x(xStr); + Fp y(yStr); + + double addT, subT, mulT, sqrT, invT; + CYBOZU_BENCH_T(addT, Fp::add, x, x, x); + CYBOZU_BENCH_T(subT, Fp::sub, x, x, y); + CYBOZU_BENCH_T(mulT, Fp::mul, x, x, x); + CYBOZU_BENCH_T(sqrT, Fp::sqr, x, x); + CYBOZU_BENCH_T(invT, x += y;Fp::inv, x, x); // avoid same jmp + printf("%10s bit % 3d add %8.2f sub %8.2f mul %8.2f sqr %8.2f inv %8.2f\n", s, (int)Fp::getBitSize(), addT, subT, mulT, sqrT, invT); +} + +void benchFp(size_t bitSize, int mode) +{ + const struct { + size_t bitSize; + const char *p; + const char *x; + const char *y; + } tbl[] = { + { + 192, + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + "0x148094810948190412345678901234567900342423332197", + "0x7fffffffffffffffffffffe26f2fc170f69466a74defd8d", + }, + { + 256, + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0x1480948109481904123456789234234242423424201234567900342423332197", + "0x151342342342341517fffffffffffffffffffffe26f2fc170f69466a74defd8d", + }, + { + 384, + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "0x19481084109481094820948209482094820984290482212345678901234567900342308472047204720422423332197", + "0x209348209481094820984209842094820948204204243123456789012345679003423084720472047204224233321972", + + }, + { + 521, + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x2908209582095820941098410948109482094820984209840294829049240294242498540975555312345678901234567900342308472047204720422423332197", + "0x3948384209834029834092384204920349820948205872380573205782385729385729385723985837ffffffffffffffffffffffe26f2fc170f69466a74defd8d", + + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (bitSize != 0 && tbl[i].bitSize != bitSize) continue; + if (mode & 1) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP); + if (mode & 2) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP_MONT); +#ifdef MCL_USE_LLVM + if (mode & 4) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM); + if (mode & 8) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM_MONT); +#endif +#ifdef MCL_USE_XBYAK + if (mode & 16) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_XBYAK); +#endif + } +} + +void benchEcSub(const mcl::EcParam& para, mcl::fp::Mode mode, mcl::ec::Mode ecMode) +{ + Fp::init(para.p, mode); + Zn::init(para.n); + Ec::init(para.a, para.b, ecMode); + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y); + Ec P2; Ec::add(P2, P, P); + Ec Q = P + P + P; + double addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT; + CYBOZU_BENCH_T(addT, P = P2; Ec::add, Q, P, Q); + P.normalize(); + CYBOZU_BENCH_T(add2T, Ec::add, Q, P, Q); + CYBOZU_BENCH_T(subT, Ec::sub, Q, P, Q); + CYBOZU_BENCH_T(dblT, Ec::dbl, P, P); + Zn z("3"); + CYBOZU_BENCH_T(mulT, Ec::mul, Q, P, z); + CYBOZU_BENCH_T(mulCTT, Ec::mulCT, Q, P, z); + cybozu::XorShift rg; + z.setRand(rg); + CYBOZU_BENCH_T(mulRandT, Ec::mul, Q, P, z); + CYBOZU_BENCH_T(mulCTRandT, Ec::mulCT, Q, P, z); + CYBOZU_BENCH_T(normT, Q = P; Q.normalize); + printf("%10s %10s add %8.2f add2 %8.2f sub %8.2f dbl %8.2f mul(3) %8.2f mulCT(3) %8.2f mul(rand) %8.2f mulCT(rand) %8.2f norm %8.2f\n", para.name, mcl::fp::ModeToStr(mode), addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT); + +} +void benchEc(size_t bitSize, int mode, mcl::ec::Mode ecMode) +{ + const struct mcl::EcParam tbl[] = { + mcl::ecparam::p160_1, + mcl::ecparam::secp160k1, + mcl::ecparam::secp192k1, + mcl::ecparam::NIST_P192, + mcl::ecparam::secp224k1, + mcl::ecparam::secp256k1, + mcl::ecparam::NIST_P224, + mcl::ecparam::NIST_P256, +// mcl::ecparam::secp384r1, + mcl::ecparam::NIST_P384, +// mcl::ecparam::secp521r1, + mcl::ecparam::NIST_P521, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (bitSize != 0 && tbl[i].bitSize != bitSize) continue; + benchEcSub(tbl[i], mcl::fp::FP_AUTO, ecMode); + if (mode & 1) benchEcSub(tbl[i], mcl::fp::FP_GMP, ecMode); + if (mode & 2) benchEcSub(tbl[i], mcl::fp::FP_GMP_MONT, ecMode); +#ifdef MCL_USE_LLVM + if (mode & 4) benchEcSub(tbl[i], mcl::fp::FP_LLVM, ecMode); + if (mode & 8) benchEcSub(tbl[i], mcl::fp::FP_LLVM_MONT, ecMode); +#endif +#ifdef MCL_USE_XBYAK + if (mode & 16) benchEcSub(tbl[i], mcl::fp::FP_XBYAK, ecMode); +#endif + } +} + +void benchToStr16() +{ + puts("benchToStr16"); + const char *tbl[] = { + "0x0", + "0x5", + "0x123", + "0x123456789012345679adbc", + "0xffffffff26f2fc170f69466a74defd8d", + "0x100000000000000000000000000000033", + "0x11ee12312312940000000000000000000000000002342343" + }; + Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13"); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + char buf[128]; + std::string str; + Fp x(tbl[i]); + CYBOZU_BENCH("fp::arrayToHex", mcl::fp::arrayToHex, buf, sizeof(buf), x.getUnit(), x.getUnitSize(), true); + mpz_class y(tbl[i]); + CYBOZU_BENCH("gmp:getStr ", mcl::gmp::getStr, str, y, 16); + } +} + +void benchFromStr16() +{ + puts("benchFromStr16"); + const char *tbl[] = { + "0", + "5", + "123", + "123456789012345679adbc", + "ffffffff26f2fc170f69466a74defd8d", + "100000000000000000000000000000033", + "11ee12312312940000000000000000000000000002342343" + }; + Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13"); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + std::string str = tbl[i]; + Fp x; + const size_t N = 64; + mcl::fp::Unit buf[N]; + CYBOZU_BENCH("fp:hexToArray", mcl::fp::hexToArray, buf, N, str.c_str(), str.size()); + + mpz_class y; + CYBOZU_BENCH("gmp:setStr ", mcl::gmp::setStr, y, str, 16); + } +} + +int main(int argc, char *argv[]) + try +{ + size_t bitSize; + int mode; + bool ecOnly; + bool fpOnly; + bool misc; + mcl::ec::Mode ecMode; + std::string ecModeStr; + cybozu::Option opt; + opt.appendOpt(&bitSize, 0, "s", ": bitSize"); + opt.appendOpt(&mode, 0, "m", ": mode(0:all, sum of 1:gmp, 2:gmp+mont, 4:llvm, 8:llvm+mont, 16:xbyak"); + opt.appendBoolOpt(&ecOnly, "ec", ": ec only"); + opt.appendBoolOpt(&fpOnly, "fp", ": fp only"); + opt.appendBoolOpt(&misc, "misc", ": other benchmark"); + opt.appendOpt(&ecModeStr, "jacobi", "ecmode", ": jacobi or proj"); + opt.appendHelp("h", ": show this message"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + if (ecModeStr == "jacobi") { + ecMode = mcl::ec::Jacobi; + } else if (ecModeStr == "proj") { + ecMode = mcl::ec::Proj; + } else { + printf("bad ecstr %s\n", ecModeStr.c_str()); + opt.usage(); + return 1; + } + if (mode < 0 || mode > 31) { + printf("bad mode %d\n", mode); + opt.usage(); + return 1; + } + if (mode == 0) mode = 31; + if (misc) { + benchToStr16(); + benchFromStr16(); + } else { + if (!ecOnly) benchFp(bitSize, mode); + if (!fpOnly) { + printf("ecMode=%s\n", ecModeStr.c_str()); + benchEc(bitSize, mode, ecMode); + } + } +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); +} + diff --git a/vendor/github.com/byzantine-lab/mcl/sample/bls_sig.cpp b/vendor/github.com/byzantine-lab/mcl/sample/bls_sig.cpp new file mode 100644 index 000000000..d75f7d427 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/bls_sig.cpp @@ -0,0 +1,70 @@ +/** + @file + @brief a sample of BLS signature + see https://github.com/herumi/bls + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause + +*/ +#include +#include + +using namespace mcl::bn256; + +void Hash(G1& P, const std::string& m) +{ + Fp t; + t.setHashOf(m); + mapToG1(P, t); +} + +void KeyGen(Fr& s, G2& pub, const G2& Q) +{ + s.setRand(); + G2::mul(pub, Q, s); // pub = sQ +} + +void Sign(G1& sign, const Fr& s, const std::string& m) +{ + G1 Hm; + Hash(Hm, m); + G1::mul(sign, Hm, s); // sign = s H(m) +} + +bool Verify(const G1& sign, const G2& Q, const G2& pub, const std::string& m) +{ + Fp12 e1, e2; + G1 Hm; + Hash(Hm, m); + pairing(e1, sign, Q); // e1 = e(sign, Q) + pairing(e2, Hm, pub); // e2 = e(Hm, sQ) + return e1 == e2; +} + +int main(int argc, char *argv[]) +{ + std::string m = argc == 1 ? "hello mcl" : argv[1]; + + // setup parameter + initPairing(); + G2 Q; + mapToG2(Q, 1); + + // generate secret key and public key + Fr s; + G2 pub; + KeyGen(s, pub, Q); + std::cout << "secret key " << s << std::endl; + std::cout << "public key " << pub << std::endl; + + // sign + G1 sign; + Sign(sign, s, m); + std::cout << "msg " << m << std::endl; + std::cout << "sign " << sign << std::endl; + + // verify + bool ok = Verify(sign, Q, pub, m); + std::cout << "verify " << (ok ? "ok" : "ng") << std::endl; +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/ecdh.cpp b/vendor/github.com/byzantine-lab/mcl/sample/ecdh.cpp new file mode 100644 index 000000000..d5c4a31b2 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/ecdh.cpp @@ -0,0 +1,64 @@ +/* + sample of Elliptic Curve Diffie-Hellman key sharing +*/ +#include +#include +#include +#include +#include + +typedef mcl::FpT<> Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; + +int main() +{ + cybozu::RandomGenerator rg; + /* + system setup with a parameter secp192k1 recommended by SECG + Ec is an elliptic curve over Fp + the cyclic group of

is isomorphic to Zn + */ + const mcl::EcParam& para = mcl::ecparam::secp192k1; + Zn::init(para.n); + Fp::init(para.p); + Ec::init(para.a, para.b); + const Ec P(Fp(para.gx), Fp(para.gy)); + + /* + Alice setups a private key a and public key aP + */ + Zn a; + Ec aP; + + a.setRand(rg); + Ec::mul(aP, P, a); // aP = a * P; + + std::cout << "aP=" << aP << std::endl; + + /* + Bob setups a private key b and public key bP + */ + Zn b; + Ec bP; + + b.setRand(rg); + Ec::mul(bP, P, b); // bP = b * P; + + std::cout << "bP=" << bP << std::endl; + + Ec abP, baP; + + // Alice uses bP(B's public key) and a(A's priavte key) + Ec::mul(abP, bP, a); // abP = a * (bP) + + // Bob uses aP(A's public key) and b(B's private key) + Ec::mul(baP, aP, b); // baP = b * (aP) + + if (abP == baP) { + std::cout << "key sharing succeed:" << abP << std::endl; + } else { + std::cout << "ERR(not here)" << std::endl; + } +} + diff --git a/vendor/github.com/byzantine-lab/mcl/sample/large.cpp b/vendor/github.com/byzantine-lab/mcl/sample/large.cpp new file mode 100644 index 000000000..60b2ac900 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/large.cpp @@ -0,0 +1,125 @@ +/* + large prime sample for 64-bit arch + make MCL_USE_LLVM=1 MCL_MAX_BIT_SIZE=768 +*/ +#include +#include +#include +#include "../src/low_func.hpp" + +typedef mcl::FpT<> Fp; + +using namespace mcl::fp; +const size_t N = 12; + +void testMul() +{ + Unit ux[N], uy[N], a[N * 2], b[N * 2]; + for (size_t i = 0; i < N; i++) { + ux[i] = -i * i + 5; + uy[i] = -i * i + 9; + } + MulPreCore::f(a, ux, uy); + MulPreCore::f(b, ux, uy); + for (size_t i = 0; i < N * 2; i++) { + if (a[i] != b[i]) { + printf("ERR %016llx %016llx\n", (long long)a[i], (long long)b[i]); + } + } + puts("end testMul"); + CYBOZU_BENCH("gmp ", (MulPreCore::f), ux, ux, uy); + CYBOZU_BENCH("kara", (MulPre::karatsuba), ux, ux, uy); +} + +void mulGmp(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& p) +{ + z = (x * y) % p; +} +void compareGmp(const std::string& pStr) +{ + Fp::init(pStr); + std::string xStr = "2104871209348712947120947102843728"; + std::string s1, s2; + { + Fp x(xStr); + CYBOZU_BENCH_C("mul by mcl", 1000, Fp::mul, x, x, x); + std::ostringstream os; + os << x; + s1 = os.str(); + } + { + const mpz_class p(pStr); + mpz_class x(xStr); + CYBOZU_BENCH_C("mul by GMP", 1000, mulGmp, x, x, x, p); + std::ostringstream os; + os << x; + s2 = os.str(); + } + if (s1 != s2) { + puts("ERR"); + } +} + +void test(const std::string& pStr, mcl::fp::Mode mode) +{ + printf("test %s\n", mcl::fp::ModeToStr(mode)); + Fp::init(pStr, mode); + const mcl::fp::Op& op = Fp::getOp(); + printf("bitSize=%d\n", (int)Fp::getBitSize()); + mpz_class p(pStr); + Fp x = 123456; + Fp y; + Fp::pow(y, x, p); + std::cout << y << std::endl; + if (x != y) { + std::cout << "err:pow:" << y << std::endl; + return; + } + const size_t N = 24; + mcl::fp::Unit ux[N], uy[N]; + for (size_t i = 0; i < N; i++) { + ux[i] = -i * i + 5; + uy[i] = -i * i + 9; + } + CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, ux, ux, uy); + CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, ux, ux); + CYBOZU_BENCH("add", op.fpDbl_add, ux, ux, ux, op.p); + CYBOZU_BENCH("sub", op.fpDbl_sub, ux, ux, ux, op.p); + if (op.fpDbl_addPre) { + CYBOZU_BENCH("addPre", op.fpDbl_addPre, ux, ux, ux); + CYBOZU_BENCH("subPre", op.fpDbl_subPre, ux, ux, ux); + } + CYBOZU_BENCH("mont", op.fpDbl_mod, ux, ux, op.p); + CYBOZU_BENCH("mul", Fp::mul, x, x, x); + compareGmp(pStr); +} + +void testAll(const std::string& pStr) +{ + test(pStr, mcl::fp::FP_GMP); + test(pStr, mcl::fp::FP_GMP_MONT); +#ifdef MCL_USE_LLVM + test(pStr, mcl::fp::FP_LLVM); + test(pStr, mcl::fp::FP_LLVM_MONT); +#endif + compareGmp(pStr); +} +int main() + try +{ + const char *pTbl[] = { + "40347654345107946713373737062547060536401653012956617387979052445947619094013143666088208645002153616185987062074179207", + "13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006083527", + "776259046150354467574489744231251277628443008558348305569526019013025476343188443165439204414323238975243865348565536603085790022057407195722143637520590569602227488010424952775132642815799222412631499596858234375446423426908029627", + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { + testAll(pTbl[i]); + } + testMul(); +} catch (std::exception& e) { + printf("err %s\n", e.what()); + puts("make clean"); + puts("make -DMCL_MAX_BIT_SIZE=768"); + return 1; +} + diff --git a/vendor/github.com/byzantine-lab/mcl/sample/pairing.cpp b/vendor/github.com/byzantine-lab/mcl/sample/pairing.cpp new file mode 100644 index 000000000..230583b6e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/pairing.cpp @@ -0,0 +1,56 @@ +#include + +using namespace mcl::bn256; + +void minimum_sample(const G1& P, const G2& Q) +{ + const mpz_class a = 123; + const mpz_class b = 456; + Fp12 e1, e2; + pairing(e1, P, Q); + G2 aQ; + G1 bP; + G2::mul(aQ, Q, a); + G1::mul(bP, P, b); + pairing(e2, bP, aQ); + Fp12::pow(e1, e1, a * b); + printf("%s\n", e1 == e2 ? "ok" : "ng"); +} + +void miller_and_finel_exp(const G1& P, const G2& Q) +{ + Fp12 e1, e2; + pairing(e1, P, Q); + + millerLoop(e2, P, Q); + finalExp(e2, e2); + printf("%s\n", e1 == e2 ? "ok" : "ng"); +} + +void precomputed(const G1& P, const G2& Q) +{ + Fp12 e1, e2; + pairing(e1, P, Q); + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e2, P, Qcoeff); + finalExp(e2, e2); + printf("%s\n", e1 == e2 ? "ok" : "ng"); +} + +int main() +{ + const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; + const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; + const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; + const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; + + initPairing(); + G2 Q(Fp2(aa, ab), Fp2(ba, bb)); + G1 P(-1, 1); + + minimum_sample(P, Q); + miller_and_finel_exp(P, Q); + precomputed(P, Q); +} + diff --git a/vendor/github.com/byzantine-lab/mcl/sample/pairing_c.c b/vendor/github.com/byzantine-lab/mcl/sample/pairing_c.c new file mode 100644 index 000000000..5c2cd222a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/pairing_c.c @@ -0,0 +1,52 @@ +#include +#include +#define MCLBN_FP_UNIT_SIZE 4 +#include + +int g_err = 0; +#define ASSERT(x) { if (!(x)) { printf("err %s:%d\n", __FILE__, __LINE__); g_err++; } } + +int main() +{ + char buf[1024]; + const char *aStr = "123"; + const char *bStr = "456"; + mclBn_init(MCL_BN254, MCLBN_FP_UNIT_SIZE); + mclBnFr a, b, ab; + mclBnG1 P, aP; + mclBnG2 Q, bQ; + mclBnGT e, e1, e2; + mclBnFr_setStr(&a, aStr, strlen(aStr), 10); + mclBnFr_setStr(&b, bStr, strlen(bStr), 10); + mclBnFr_mul(&ab, &a, &b); + mclBnFr_getStr(buf, sizeof(buf), &ab, 10); + printf("%s x %s = %s\n", aStr, bStr, buf); + + ASSERT(!mclBnG1_hashAndMapTo(&P, "this", 4)); + ASSERT(!mclBnG2_hashAndMapTo(&Q, "that", 4)); + mclBnG1_getStr(buf, sizeof(buf), &P, 16); + printf("P = %s\n", buf); + mclBnG2_getStr(buf, sizeof(buf), &Q, 16); + printf("Q = %s\n", buf); + + mclBnG1_mul(&aP, &P, &a); + mclBnG2_mul(&bQ, &Q, &b); + + mclBn_pairing(&e, &P, &Q); + mclBnGT_getStr(buf, sizeof(buf), &e, 16); + printf("e = %s\n", buf); + mclBnGT_pow(&e1, &e, &a); + mclBn_pairing(&e2, &aP, &Q); + ASSERT(mclBnGT_isEqual(&e1, &e2)); + + mclBnGT_pow(&e1, &e, &b); + mclBn_pairing(&e2, &P, &bQ); + ASSERT(mclBnGT_isEqual(&e1, &e2)); + if (g_err) { + printf("err %d\n", g_err); + return 1; + } else { + printf("no err\n"); + return 0; + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/random.cpp b/vendor/github.com/byzantine-lab/mcl/sample/random.cpp new file mode 100644 index 000000000..a2a3619ad --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/random.cpp @@ -0,0 +1,29 @@ +#include +#include +#include +#include +#include +#include +typedef mcl::FpT<> Fp; + +typedef std::map Map; + +int main(int argc, char *argv[]) +{ + cybozu::RandomGenerator rg; + const char *p = mcl::ecparam::secp192k1.p; + if (argc == 2) { + p = argv[1]; + } + Fp::init(p); + Fp x; + printf("p=%s\n", p); + Map m; + for (int i = 0; i < 10000; i++) { + x.setRand(rg); + m[x.getStr(16)]++; + } + for (Map::const_iterator i = m.begin(), ie = m.end(); i != ie; ++i) { + printf("%s %d\n", i->first.c_str(), i->second); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/rawbench.cpp b/vendor/github.com/byzantine-lab/mcl/sample/rawbench.cpp new file mode 100644 index 000000000..4d7506ef5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/rawbench.cpp @@ -0,0 +1,180 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#include +#include +#include +#include +#include + +typedef mcl::FpT Fp; +typedef mcl::Fp2T Fp2; +typedef mcl::FpDblT FpDbl; +typedef mcl::Fp6T Fp6; +typedef mcl::Fp12T Fp12; + +typedef mcl::fp::Unit Unit; + +void mul9(const mcl::fp::Op& op, Unit *y, const Unit *x, const Unit *p) +{ + const size_t maxN = sizeof(Fp) / sizeof(Unit); + Unit tmp[maxN]; + op.fp_add(tmp, x, x, p); // 2x + op.fp_add(tmp, tmp, tmp, p); // 4x + op.fp_add(tmp, tmp, tmp, p); // 8x + op.fp_add(y, tmp, x, p); // 9x +} + +void benchRaw(const char *p, mcl::fp::Mode mode) +{ + Fp::init(1, p, mode); + Fp2::init(); + const size_t maxN = sizeof(Fp) / sizeof(Unit); + const mcl::fp::Op& op = Fp::getOp(); + cybozu::XorShift rg; + Fp fx, fy; + fx.setRand(rg); + fy.setRand(rg); + Unit ux[maxN * 2] = {}; + Unit uy[maxN * 2] = {}; + Unit uz[maxN * 2] = {}; + memcpy(ux, fx.getUnit(), sizeof(Unit) * op.N); + memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N); + memcpy(uy, fy.getUnit(), sizeof(Unit) * op.N); + memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N); + double fp_addT, fp_subT; + double fp_addPreT, fp_subPreT; + double fp_sqrT, fp_mulT; + double fp_mulUnitT; + double mul9T; + double fp_mulUnitPreT; + double fpN1_modT; + double fpDbl_addT, fpDbl_subT; + double fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT; + double fp2_sqrT, fp2_mulT; + CYBOZU_BENCH_T(fp_addT, op.fp_add, uz, ux, uy, op.p); + CYBOZU_BENCH_T(fp_subT, op.fp_sub, uz, uy, ux, op.p); + CYBOZU_BENCH_T(fp_addPreT, op.fp_addPre, uz, ux, uy); + CYBOZU_BENCH_T(fp_subPreT, op.fp_subPre, uz, uy, ux); + CYBOZU_BENCH_T(fp_sqrT, op.fp_sqr, uz, ux, op.p); + CYBOZU_BENCH_T(fp_mulT, op.fp_mul, uz, ux, uy, op.p); + CYBOZU_BENCH_T(fp_mulUnitT, op.fp_mulUnit, uz, ux, 9, op.p); + CYBOZU_BENCH_T(mul9T, mul9, op, uz, ux, op.p); + CYBOZU_BENCH_T(fp_mulUnitPreT, op.fp_mulUnitPre, ux, ux, 9); + CYBOZU_BENCH_T(fpN1_modT, op.fpN1_mod, ux, uy, op.p); + CYBOZU_BENCH_T(fpDbl_addT, op.fpDbl_add, uz, ux, uy, op.p); + CYBOZU_BENCH_T(fpDbl_subT, op.fpDbl_sub, uz, uy, ux, op.p); + CYBOZU_BENCH_T(fpDbl_sqrPreT, op.fpDbl_sqrPre, uz, ux); + CYBOZU_BENCH_T(fpDbl_mulPreT, op.fpDbl_mulPre, uz, ux, uy); + CYBOZU_BENCH_T(fpDbl_modT, op.fpDbl_mod, uz, ux, op.p); + Fp2 f2x, f2y; + f2x.a = fx; + f2x.b = fy; + f2y = f2x; + CYBOZU_BENCH_T(fp2_sqrT, Fp2::sqr, f2x, f2x); + CYBOZU_BENCH_T(fp2_mulT, Fp2::mul, f2x, f2x, f2y); + printf("%s\n", mcl::fp::ModeToStr(mode)); + const char *tStrTbl[] = { + "fp_add", "fp_sub", + "addPre", "subPre", + "fp_sqr", "fp_mul", + "mulUnit", + "mul9", + "mulUnitP", + "fpN1_mod", + "D_add", "D_sub", + "D_sqrPre", "D_mulPre", "D_mod", + "fp2_sqr", "fp2_mul", + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tStrTbl); i++) { + printf(" %8s", tStrTbl[i]); + } + printf("\n"); + const double tTbl[] = { + fp_addT, fp_subT, + fp_addPreT, fp_subPreT, + fp_sqrT, fp_mulT, + fp_mulUnitT, + mul9T, + fp_mulUnitPreT, + fpN1_modT, + fpDbl_addT, fpDbl_subT, + fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT, + fp2_sqrT, fp2_mulT, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tTbl); i++) { + printf(" %8.2f", tTbl[i]); + } + printf("\n"); +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + size_t bitSize; + opt.appendOpt(&bitSize, 0, "s", ": bitSize"); + opt.appendHelp("h", ": show this message"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + const char *tbl[] = { + // N = 2 + "0x0000000000000001000000000000000d", + "0x7fffffffffffffffffffffffffffffff", + "0x8000000000000000000000000000001d", + "0xffffffffffffffffffffffffffffff61", + + // N = 3 + "0x000000000000000100000000000000000000000000000033", // min prime + "0x70000000000000000000000000000000000000000000001f", + "0x800000000000000000000000000000000000000000000005", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + "0xfffffffffffffffffffffffffffffffeffffffffffffffff", + "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime + + // N = 4 + "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", // BN254 + "0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", // Snark + "0x7523648240000001ba344d80000000086121000000000013a700000000000017", + "0x800000000000000000000000000000000000000000000000000000000000005f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime + // N = 5 + "0x80000000000000000000000000000000000000000000000000000000000000000000000000000009", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3b", + // N = 6 + "0x800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000171", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec3", + // N = 7 + "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff35", + // N = 8 + "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006f", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc7", +#if MCL_MAX_BIT_SIZE == 1024 + "0xc70b1ddda9b96e3965e5855942aa5852d8f8e052c760ac32cdfec16a2ed3d56981e1a475e20a70144ed2f5061ba64900f69451492803f815d446ee133d0668f7a7f3276d6301c95ce231f0e4b0d0f3882f10014fca04454cff55d2e2d4cfc1aad33b8d38397e2fc8b623177e63d0b783269c40a85b8f105654783b8ed2e737df", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff97", +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const char *p = tbl[i]; + if (bitSize > 0 && (strlen(p) - 2) * 4 != bitSize) { + continue; + } + printf("prime=%s\n", p); + benchRaw(tbl[i], mcl::fp::FP_GMP); + benchRaw(tbl[i], mcl::fp::FP_GMP_MONT); +#ifdef MCL_USE_LLVM + benchRaw(tbl[i], mcl::fp::FP_LLVM); + benchRaw(tbl[i], mcl::fp::FP_LLVM_MONT); +#endif +#ifdef MCL_USE_XBYAK + if (bitSize <= 384) { + benchRaw(tbl[i], mcl::fp::FP_XBYAK); + } +#endif + } +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/she_make_dlp_table.cpp b/vendor/github.com/byzantine-lab/mcl/sample/she_make_dlp_table.cpp new file mode 100644 index 000000000..41f18e225 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/she_make_dlp_table.cpp @@ -0,0 +1,69 @@ +/* + make she DLP table +*/ +#include +#include +#include + +using namespace mcl::she; + +struct Param { + int curveType; + int hashBitSize; + int group; + std::string path; +}; + +template +void makeTable(const Param& param, const char *groupStr, HashTable& hashTbl, const G& P) +{ + char baseName[32]; + CYBOZU_SNPRINTF(baseName, sizeof(baseName), "she-dlp-%d-%d-%s.bin", param.curveType, param.hashBitSize, groupStr); + const std::string fileName = param.path + baseName; + printf("file=%s\n", fileName.c_str()); + std::ofstream ofs(fileName.c_str(), std::ios::binary); + + const size_t hashSize = 1u << param.hashBitSize; + hashTbl.init(P, hashSize); + hashTbl.save(ofs); +} + +void run(const Param& param) +{ + SHE::init(mcl::getCurveParam(param.curveType)); + + switch (param.group) { + case 1: + makeTable(param, "g1", getHashTableG1(), SHE::P_); + break; + case 2: + makeTable(param, "g2", getHashTableG2(), SHE::Q_); + break; + case 3: + makeTable(param, "gt", getHashTableGT(), SHE::ePQ_); + break; + default: + throw cybozu::Exception("bad group") << param.group; + } +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + Param param; + opt.appendOpt(¶m.curveType, 0, "ct", ": curveType(0:BN254, 1:BN381_1, 5:BLS12_381)"); + opt.appendOpt(¶m.hashBitSize, 20, "hb", ": hash bit size"); + opt.appendOpt(¶m.group, 3, "g", ": group(1:G1, 2:G2, 3:GT"); + opt.appendOpt(¶m.path, "./", "path", ": path to table"); + opt.appendHelp("h"); + if (opt.parse(argc, argv)) { + run(param); + } else { + opt.usage(); + return 1; + } +} catch (std::exception& e) { + printf("err %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/she_smpl.cpp b/vendor/github.com/byzantine-lab/mcl/sample/she_smpl.cpp new file mode 100644 index 000000000..e01b9c130 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/she_smpl.cpp @@ -0,0 +1,125 @@ +/* + sample of somewhat homomorphic encryption(SHE) +*/ +#define PUT(x) std::cout << #x << "=" << (x) << std::endl; +#include +#include + +using namespace mcl::she; + +void miniSample() +{ + // init library + SHE::init(); + + SecretKey sec; + + // init secret key by random_device + sec.setByCSPRNG(); + + // set range to decode GT DLP + SHE::setRangeForDLP(1000); + + PublicKey pub; + // get public key + sec.getPublicKey(pub); + + const int N = 5; + int a[] = { 1, 5, -3, 4, 6 }; + int b[] = { 4, 2, 1, 9, -2 }; + // compute correct value + int sum = 0; + for (size_t i = 0; i < N; i++) { + sum += a[i] * b[i]; + } + + std::vector ca(N), cb(N); + + // encrypt each a[] and b[] + for (size_t i = 0; i < N; i++) { + pub.enc(ca[i], a[i]); + pub.enc(cb[i], b[i]); + } + CipherText c; + c.clearAsMultiplied(); // clear as multiplied before using c.add() + // inner product of encrypted vector + for (size_t i = 0; i < N; i++) { + CipherText t; + CipherText::mul(t, ca[i], cb[i]); // t = ca[i] * cb[i] + c.add(t); // c += t + } + // decode it + int m = (int)sec.dec(c); + // verify the value + if (m == sum) { + puts("ok"); + } else { + printf("err correct %d err %d\n", sum, m); + } +} + +void usePrimitiveCipherText() +{ + // init library + SHE::init(); + + SecretKey sec; + + // init secret key by random_device + sec.setByCSPRNG(); + + // set range to decode GT DLP + SHE::setRangeForGTDLP(100); + + PublicKey pub; + // get public key + sec.getPublicKey(pub); + + int a1 = 1, a2 = 2; + int b1 = 5, b2 = -4; + CipherTextG1 c1, c2; // size of CipherTextG1 = N * 2 ; N = 256-bit for CurveFp254BNb + CipherTextG2 d1, d2; // size of CipherTextG2 = N * 4 + pub.enc(c1, a1); + pub.enc(c2, a2); + pub.enc(d1, b1); + pub.enc(d2, b2); + c1.add(c2); // CipherTextG1 is additive HE + d1.add(d2); // CipherTextG2 is additive HE + CipherTextGT cm; // size of CipherTextGT = N * 12 * 4 + CipherTextGT::mul(cm, c1, d1); // cm = c1 * d1 + cm.add(cm); // 2cm + int m = (int)sec.dec(cm); + int ok = (a1 + a2) * (b1 + b2) * 2; + if (m == ok) { + puts("ok"); + } else { + printf("err m=%d ok=%d\n", m, ok); + } + std::string s; + s = c1.getStr(mcl::IoSerialize); // serialize + printf("c1 data size %d byte\n", (int)s.size()); + + c2.setStr(s, mcl::IoSerialize); + printf("deserialize %s\n", c1 == c2 ? "ok" : "ng"); + + s = d1.getStr(mcl::IoSerialize); // serialize + printf("d1 data size %d byte\n", (int)s.size()); + d2.setStr(s, mcl::IoSerialize); + printf("deserialize %s\n", d1 == d2 ? "ok" : "ng"); + + s = cm.getStr(mcl::IoSerialize); // serialize + printf("cm data size %d byte\n", (int)s.size()); + CipherTextGT cm2; + cm2.setStr(s, mcl::IoSerialize); + printf("deserialize %s\n", cm == cm2 ? "ok" : "ng"); +} + +int main() + try +{ + miniSample(); + usePrimitiveCipherText(); +} catch (std::exception& e) { + printf("err %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/sample/tri-dh.cpp b/vendor/github.com/byzantine-lab/mcl/sample/tri-dh.cpp new file mode 100644 index 000000000..8b720edbf --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/tri-dh.cpp @@ -0,0 +1,97 @@ +/* + tripartie Diffie-Hellman +*/ +#include +#include +#include +#include +#include + +static cybozu::RandomGenerator rg; + +const std::string skSuf = ".sk.txt"; +const std::string pkSuf = ".pk.txt"; + +using namespace mcl::bn256; + +void keygen(const std::string& user) +{ + if (user.empty()) { + throw cybozu::Exception("keygen:user is empty"); + } + const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; + const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; + const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; + const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; + + + initPairing(); + G2 Q(Fp2(aa, ab), Fp2(ba, bb)); + G1 P(-1, 1); + + Fr s; + s.setRand(rg); + G1::mul(P, P, s); + G2::mul(Q, Q, s); + { + std::string name = user + skSuf; + std::ofstream ofs(name.c_str(), std::ios::binary); + ofs << s << std::endl; + } + { + std::string name = user + pkSuf; + std::ofstream ofs(name.c_str(), std::ios::binary); + ofs << P << std::endl; + ofs << Q << std::endl; + } +} + +void load(G1& P, G2& Q, const std::string& fileName) +{ + std::ifstream ifs(fileName.c_str(), std::ios::binary); + ifs >> P >> Q; +} + +void share(const std::string& skFile, const std::string& pk1File, const std::string& pk2File) +{ + initPairing(); + Fr s; + G1 P1, P2; + G2 Q1, Q2; + { + std::ifstream ifs(skFile.c_str(), std::ios::binary); + ifs >> s; + } + load(P1, Q1, pk1File); + load(P2, Q2, pk2File); + Fp12 e; + pairing(e, P1, Q2); + { + // verify(not necessary) + Fp12 e2; + pairing(e2, P2, Q1); + if (e != e2) { + throw cybozu::Exception("share:bad public key file") << e << e2; + } + } + Fp12::pow(e, e, s); + std::cout << "share key:\n" << e << std::endl; +} + +int main(int argc, char *argv[]) + try +{ + if (argc == 3 && strcmp(argv[1], "keygen") == 0) { + keygen(argv[2]); + } else if (argc == 5 && strcmp(argv[1], "share") == 0) { + share(argv[2], argv[3], argv[4]); + } else { + fprintf(stderr, "tri-dh.exe keygen \n"); + fprintf(stderr, "tri-dh.exe share \n"); + return 1; + } +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} + diff --git a/vendor/github.com/byzantine-lab/mcl/sample/vote.cpp b/vendor/github.com/byzantine-lab/mcl/sample/vote.cpp new file mode 100644 index 000000000..88137187c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/sample/vote.cpp @@ -0,0 +1,206 @@ +/* + vote sample tool + Copyright (c) 2014, National Institute of Advanced Industrial + Science and Technology All rights reserved. + This source file is subject to BSD 3-Clause license. + + modifyed for mcl by herumi +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef mcl::FpT<> Fp; +typedef mcl::FpT Zn; // use ZnTag because Zn is different class with Fp +typedef mcl::EcT Ec; +typedef mcl::ElgamalT Elgamal; + +cybozu::RandomGenerator rg; + +const std::string pubFile = "vote_pub.txt"; +const std::string prvFile = "vote_prv.txt"; +const std::string resultFile = "vote_ret.txt"; + +std::string GetSheetName(size_t n) +{ + return std::string("vote_") + cybozu::itoa(n) + ".txt"; +} + +struct Param { + std::string mode; + std::string voteList; + Param(int argc, const char *const argv[]) + { + cybozu::Option opt; + opt.appendOpt(&voteList, "11001100", "l", ": list of voters for vote mode(eg. 11001100)"); + opt.appendHelp("h", ": put this message"); + opt.appendParam(&mode, "mode", ": init/vote/count/open"); + if (!opt.parse(argc, argv)) { + opt.usage(); + exit(1); + } + printf("mode=%s\n", mode.c_str()); + if (mode == "vote") { + printf("voters=%s\n", voteList.c_str()); + size_t pos = voteList.find_first_not_of("01"); + if (pos != std::string::npos) { + printf("bad char %c\n", voteList[pos]); + exit(1); + } + } + } +}; + +void SysInit() +{ + const mcl::EcParam& para = mcl::ecparam::secp192k1; + Zn::init(para.n); + Fp::init(para.p); + Ec::init(para.a, para.b); +} + +template +bool Load(T& t, const std::string& name, bool doThrow = true) +{ + std::ifstream ifs(name.c_str(), std::ios::binary); + if (!ifs) { + if (doThrow) throw cybozu::Exception("Load:can't read") << name; + return false; + } + if (ifs >> t) return true; + if (doThrow) throw cybozu::Exception("Load:bad data") << name; + return false; +} + +template +void Save(const std::string& name, const T& t) +{ + std::ofstream ofs(name.c_str(), std::ios::binary); + ofs << t; +} + +void Init() +{ + const mcl::EcParam& para = mcl::ecparam::secp192k1; + const Fp x0(para.gx); + const Fp y0(para.gy); + const Ec P(x0, y0); + const size_t bitSize = para.bitSize; + + Elgamal::PrivateKey prv; + prv.init(P, bitSize, rg); + const Elgamal::PublicKey& pub = prv.getPublicKey(); + printf("make privateKey=%s, publicKey=%s\n", prvFile.c_str(), pubFile.c_str()); + Save(prvFile, prv); + Save(pubFile, pub); +} + +struct CipherWithZkp { + Elgamal::CipherText c; + Elgamal::Zkp zkp; + bool verify(const Elgamal::PublicKey& pub) const + { + return pub.verify(c, zkp); + } +}; + +inline std::ostream& operator<<(std::ostream& os, const CipherWithZkp& self) +{ + return os << self.c << std::endl << self.zkp; +} +inline std::istream& operator>>(std::istream& is, CipherWithZkp& self) +{ + return is >> self.c >> self.zkp; +} + +void Vote(const std::string& voteList) +{ + Elgamal::PublicKey pub; + Load(pub, pubFile); + puts("shuffle"); + std::vector idxTbl(voteList.size()); + for (size_t i = 0; i < idxTbl.size(); i++) { + idxTbl[i] = i; + } + cybozu::shuffle(idxTbl, rg); + puts("each voter votes"); + for (size_t i = 0; i < voteList.size(); i++) { + CipherWithZkp c; + pub.encWithZkp(c.c, c.zkp, voteList[i] - '0', rg); + const std::string sheetName = GetSheetName(idxTbl[i]); + printf("make %s\n", sheetName.c_str()); + Save(sheetName, c); + } +} + +void Count() +{ + Elgamal::PublicKey pub; + Load(pub, pubFile); + Elgamal::CipherText result; + puts("aggregate votes"); + for (size_t i = 0; ; i++) { + const std::string sheetName = GetSheetName(i); + CipherWithZkp c; + if (!Load(c, sheetName, false)) break; + if (!c.verify(pub)) throw cybozu::Exception("bad cipher text") << i; + printf("add %s\n", sheetName.c_str()); + result.add(c.c); + } + printf("create result file : %s\n", resultFile.c_str()); + Save(resultFile, result); +} + +void Open() +{ + Elgamal::PrivateKey prv; + Load(prv, prvFile); + Elgamal::CipherText c; + Load(c, resultFile); + Zn n; + prv.dec(n, c); + std::cout << "result of vote count " << n << std::endl; +#if 0 + puts("open real value"); + for (size_t i = 0; ; i++) { + Elgamal::CipherText c; + const std::string sheetName = GetSheetName(i); + if (!Load(c, sheetName, false)) break; + Zn n; + prv.dec(n, c); + std::cout << sheetName << " " << n << std::endl; + } +#endif +} + +int main(int argc, char *argv[]) + try +{ + const Param p(argc, argv); + SysInit(); + if (p.mode == "init") { + Init(); + } else + if (p.mode == "vote") { + Vote(p.voteList); + } else + if (p.mode == "count") { + Count(); + } else + if (p.mode == "open") { + Open(); + } else + { + printf("bad mode=%s\n", p.mode.c_str()); + return 1; + } +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); +} + diff --git a/vendor/github.com/byzantine-lab/mcl/setvar.bat b/vendor/github.com/byzantine-lab/mcl/setvar.bat new file mode 100644 index 000000000..1d57fa69e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/setvar.bat @@ -0,0 +1,2 @@ +set CFLAGS=/MT /DNOMINMAX /Ox /DNDEBUG /W4 /Zi /EHsc /nologo -I./include -I../cybozulib_ext/include +set LDFLAGS=/LIBPATH:..\cybozulib_ext\lib /LIBPATH:.\lib diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/aarch64.s b/vendor/github.com/byzantine-lab/mcl/src/asm/aarch64.s new file mode 100644 index 000000000..a49a36e3a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/aarch64.s @@ -0,0 +1,13197 @@ + .text + .file "" + .globl makeNIST_P192L + .align 2 + .type makeNIST_P192L,@function +makeNIST_P192L: // @makeNIST_P192L +// BB#0: + movn x0, #0 + orr x1, xzr, #0xfffffffffffffffe + movn x2, #0 + ret +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P192L + .align 2 + .type mcl_fpDbl_mod_NIST_P192L,@function +mcl_fpDbl_mod_NIST_P192L: // @mcl_fpDbl_mod_NIST_P192L +// BB#0: + ldp x8, x9, [x1, #16] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x1] + orr w14, wzr, #0x1 + adds x13, x11, x13 + adcs x8, x8, xzr + adcs x15, xzr, xzr + adds x12, x12, x9 + adcs x13, x13, x10 + adcs x8, x8, x11 + adcs x15, x15, xzr + adds x11, x12, x11 + movn x12, #0 + adcs x9, x13, x9 + adcs x8, x8, x10 + adcs x10, x15, xzr + adds x11, x10, x11 + adcs x9, x10, x9 + adcs x8, x8, xzr + adcs x10, xzr, xzr + adds x13, x11, #1 // =1 + adcs x14, x9, x14 + adcs x15, x8, xzr + adcs x10, x10, x12 + tst x10, #0x1 + csel x10, x11, x13, ne + csel x9, x9, x14, ne + csel x8, x8, x15, ne + stp x10, x9, [x0] + str x8, [x0, #16] + ret +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + + .globl mcl_fp_sqr_NIST_P192L + .align 2 + .type mcl_fp_sqr_NIST_P192L,@function +mcl_fp_sqr_NIST_P192L: // @mcl_fp_sqr_NIST_P192L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + orr w11, wzr, #0x1 + umulh x12, x8, x8 + mul x13, x9, x8 + mul x14, x10, x8 + umulh x15, x9, x8 + adds x12, x12, x13 + umulh x16, x10, x8 + adcs x17, x15, x14 + adcs x18, x16, xzr + mul x1, x9, x9 + mul x2, x10, x9 + adds x15, x15, x1 + umulh x1, x9, x9 + umulh x9, x10, x9 + adcs x1, x1, x2 + adcs x3, x9, xzr + adds x12, x13, x12 + adcs x13, x15, x17 + adcs x15, x1, x18 + movn x17, #0 + umulh x18, x10, x10 + mul x10, x10, x10 + mul x8, x8, x8 + adcs x1, x3, xzr + adds x16, x16, x2 + adcs x9, x9, x10 + adcs x10, x18, xzr + adds x13, x14, x13 + adcs x14, x16, x15 + adcs x9, x9, x1 + adcs x10, x10, xzr + adds x12, x12, x10 + adcs x13, x13, xzr + adcs x15, xzr, xzr + adds x8, x8, x14 + adcs x12, x12, x9 + adcs x13, x13, x10 + adcs x15, x15, xzr + adds x8, x8, x10 + adcs x10, x12, x14 + adcs x9, x13, x9 + adcs x12, x15, xzr + adds x8, x12, x8 + adcs x10, x12, x10 + adcs x9, x9, xzr + adcs x12, xzr, xzr + adds x13, x8, #1 // =1 + adcs x11, x10, x11 + adcs x14, x9, xzr + adcs x12, x12, x17 + tst x12, #0x1 + csel x8, x8, x13, ne + csel x10, x10, x11, ne + csel x9, x9, x14, ne + stp x8, x10, [x0] + str x9, [x0, #16] + ret +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + + .globl mcl_fp_mulNIST_P192L + .align 2 + .type mcl_fp_mulNIST_P192L,@function +mcl_fp_mulNIST_P192L: // @mcl_fp_mulNIST_P192L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #48 // =48 + mov x19, x0 + mov x0, sp + bl mcl_fpDbl_mulPre3L + ldp x9, x8, [sp, #8] + ldp x11, x10, [sp, #32] + ldr x12, [sp, #24] + ldr x13, [sp] + orr w14, wzr, #0x1 + adds x9, x10, x9 + adcs x8, x8, xzr + adcs x15, xzr, xzr + adds x13, x13, x12 + adcs x9, x9, x11 + adcs x8, x8, x10 + adcs x15, x15, xzr + adds x10, x13, x10 + movn x13, #0 + adcs x9, x9, x12 + adcs x8, x8, x11 + adcs x11, x15, xzr + adds x10, x11, x10 + adcs x9, x11, x9 + adcs x8, x8, xzr + adcs x11, xzr, xzr + adds x12, x10, #1 // =1 + adcs x14, x9, x14 + adcs x15, x8, xzr + adcs x11, x11, x13 + tst x11, #0x1 + csel x10, x10, x12, ne + csel x9, x9, x14, ne + csel x8, x8, x15, ne + stp x10, x9, [x19] + str x8, [x19, #16] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P521L + .align 2 + .type mcl_fpDbl_mod_NIST_P521L,@function +mcl_fpDbl_mod_NIST_P521L: // @mcl_fpDbl_mod_NIST_P521L +// BB#0: + stp x29, x30, [sp, #-16]! + mov x29, sp + ldp x8, x9, [x1, #112] + ldr x10, [x1, #128] + ldp x11, x12, [x1, #96] + ldp x13, x14, [x1, #80] + ldp x15, x16, [x1, #64] + ldp x17, x18, [x1, #48] + ldp x2, x3, [x1, #32] + ldp x4, x5, [x1, #16] + ldp x6, x1, [x1] + extr x7, x10, x9, #9 + extr x9, x9, x8, #9 + extr x8, x8, x12, #9 + extr x12, x12, x11, #9 + extr x11, x11, x14, #9 + extr x14, x14, x13, #9 + extr x13, x13, x16, #9 + extr x16, x16, x15, #9 + and x15, x15, #0x1ff + lsr x10, x10, #9 + adds x16, x16, x6 + adcs x13, x13, x1 + adcs x14, x14, x4 + adcs x11, x11, x5 + adcs x12, x12, x2 + adcs x1, x8, x3 + adcs x17, x9, x17 + adcs x18, x7, x18 + adcs x2, x10, x15 + ubfx x8, x2, #9, #1 + adds x8, x8, x16 + adcs x9, x13, xzr + and x13, x9, x8 + adcs x10, x14, xzr + and x13, x13, x10 + adcs x11, x11, xzr + and x13, x13, x11 + adcs x12, x12, xzr + and x14, x13, x12 + adcs x13, x1, xzr + and x15, x14, x13 + adcs x14, x17, xzr + and x16, x15, x14 + adcs x15, x18, xzr + and x17, x16, x15 + adcs x16, x2, xzr + orr x18, x16, #0xfffffffffffffe00 + and x17, x17, x18 + cmn x17, #1 // =1 + b.eq .LBB4_2 +// BB#1: // %nonzero + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + and x8, x16, #0x1ff + str x8, [x0, #64] + ldp x29, x30, [sp], #16 + ret +.LBB4_2: // %zero + mov w1, wzr + movz w2, #0x48 + bl memset + ldp x29, x30, [sp], #16 + ret +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + + .globl mcl_fp_mulUnitPre1L + .align 2 + .type mcl_fp_mulUnitPre1L,@function +mcl_fp_mulUnitPre1L: // @mcl_fp_mulUnitPre1L +// BB#0: + ldr x8, [x1] + mul x9, x8, x2 + umulh x8, x8, x2 + stp x9, x8, [x0] + ret +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + + .globl mcl_fpDbl_mulPre1L + .align 2 + .type mcl_fpDbl_mulPre1L,@function +mcl_fpDbl_mulPre1L: // @mcl_fpDbl_mulPre1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + mul x10, x9, x8 + umulh x8, x9, x8 + stp x10, x8, [x0] + ret +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + + .globl mcl_fpDbl_sqrPre1L + .align 2 + .type mcl_fpDbl_sqrPre1L,@function +mcl_fpDbl_sqrPre1L: // @mcl_fpDbl_sqrPre1L +// BB#0: + ldr x8, [x1] + mul x9, x8, x8 + umulh x8, x8, x8 + stp x9, x8, [x0] + ret +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + + .globl mcl_fp_mont1L + .align 2 + .type mcl_fp_mont1L,@function +mcl_fp_mont1L: // @mcl_fp_mont1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldur x10, [x3, #-8] + ldr x11, [x3] + umulh x12, x9, x8 + mul x8, x9, x8 + mul x9, x8, x10 + umulh x10, x9, x11 + mul x9, x9, x11 + cmn x9, x8 + adcs x8, x10, x12 + adcs x9, xzr, xzr + subs x10, x8, x11 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0] + ret +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + + .globl mcl_fp_montNF1L + .align 2 + .type mcl_fp_montNF1L,@function +mcl_fp_montNF1L: // @mcl_fp_montNF1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldur x10, [x3, #-8] + ldr x11, [x3] + umulh x12, x9, x8 + mul x8, x9, x8 + mul x9, x8, x10 + umulh x10, x9, x11 + mul x9, x9, x11 + cmn x9, x8 + adcs x8, x10, x12 + sub x9, x8, x11 + cmp x9, #0 // =0 + csel x8, x8, x9, lt + str x8, [x0] + ret +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + + .globl mcl_fp_montRed1L + .align 2 + .type mcl_fp_montRed1L,@function +mcl_fp_montRed1L: // @mcl_fp_montRed1L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x11, [x1] + ldr x10, [x2] + mul x8, x9, x8 + umulh x12, x8, x10 + mul x8, x8, x10 + cmn x9, x8 + adcs x8, x11, x12 + adcs x9, xzr, xzr + subs x10, x8, x10 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0] + ret +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + + .globl mcl_fp_addPre1L + .align 2 + .type mcl_fp_addPre1L,@function +mcl_fp_addPre1L: // @mcl_fp_addPre1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + adds x9, x9, x8 + adcs x8, xzr, xzr + str x9, [x0] + mov x0, x8 + ret +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + + .globl mcl_fp_subPre1L + .align 2 + .type mcl_fp_subPre1L,@function +mcl_fp_subPre1L: // @mcl_fp_subPre1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + subs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0] + mov x0, x8 + ret +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + + .globl mcl_fp_shr1_1L + .align 2 + .type mcl_fp_shr1_1L,@function +mcl_fp_shr1_1L: // @mcl_fp_shr1_1L +// BB#0: + ldr x8, [x1] + lsr x8, x8, #1 + str x8, [x0] + ret +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + + .globl mcl_fp_add1L + .align 2 + .type mcl_fp_add1L,@function +mcl_fp_add1L: // @mcl_fp_add1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + ldr x10, [x3] + adds x8, x9, x8 + str x8, [x0] + adcs x9, xzr, xzr + subs x8, x8, x10 + sbcs x9, x9, xzr + and w9, w9, #0x1 + tbnz w9, #0, .LBB14_2 +// BB#1: // %nocarry + str x8, [x0] +.LBB14_2: // %carry + ret +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + + .globl mcl_fp_addNF1L + .align 2 + .type mcl_fp_addNF1L,@function +mcl_fp_addNF1L: // @mcl_fp_addNF1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + ldr x10, [x3] + add x8, x9, x8 + sub x9, x8, x10 + cmp x9, #0 // =0 + csel x8, x8, x9, lt + str x8, [x0] + ret +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + + .globl mcl_fp_sub1L + .align 2 + .type mcl_fp_sub1L,@function +mcl_fp_sub1L: // @mcl_fp_sub1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + subs x8, x9, x8 + str x8, [x0] + ngcs x9, xzr + and w9, w9, #0x1 + tbnz w9, #0, .LBB16_2 +// BB#1: // %nocarry + ret +.LBB16_2: // %carry + ldr x9, [x3] + add x8, x9, x8 + str x8, [x0] + ret +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + + .globl mcl_fp_subNF1L + .align 2 + .type mcl_fp_subNF1L,@function +mcl_fp_subNF1L: // @mcl_fp_subNF1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldr x10, [x3] + sub x8, x9, x8 + and x9, x10, x8, asr #63 + add x8, x9, x8 + str x8, [x0] + ret +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + + .globl mcl_fpDbl_add1L + .align 2 + .type mcl_fpDbl_add1L,@function +mcl_fpDbl_add1L: // @mcl_fpDbl_add1L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + ldr x12, [x3] + adds x8, x9, x8 + str x8, [x0] + adcs x8, x10, x11 + adcs x9, xzr, xzr + subs x10, x8, x12 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0, #8] + ret +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + + .globl mcl_fpDbl_sub1L + .align 2 + .type mcl_fpDbl_sub1L,@function +mcl_fpDbl_sub1L: // @mcl_fpDbl_sub1L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + ldr x12, [x3] + subs x8, x8, x9 + str x8, [x0] + sbcs x8, x11, x10 + ngcs x9, xzr + tst x9, #0x1 + csel x9, x12, xzr, ne + add x8, x9, x8 + str x8, [x0, #8] + ret +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + + .globl mcl_fp_mulUnitPre2L + .align 2 + .type mcl_fp_mulUnitPre2L,@function +mcl_fp_mulUnitPre2L: // @mcl_fp_mulUnitPre2L +// BB#0: + ldp x8, x9, [x1] + mul x10, x8, x2 + mul x11, x9, x2 + umulh x8, x8, x2 + umulh x9, x9, x2 + adds x8, x8, x11 + stp x10, x8, [x0] + adcs x8, x9, xzr + str x8, [x0, #16] + ret +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + + .globl mcl_fpDbl_mulPre2L + .align 2 + .type mcl_fpDbl_mulPre2L,@function +mcl_fpDbl_mulPre2L: // @mcl_fpDbl_mulPre2L +// BB#0: + ldp x8, x11, [x2] + ldp x9, x10, [x1] + mul x12, x9, x8 + umulh x13, x10, x8 + mul x14, x10, x8 + umulh x8, x9, x8 + mul x15, x9, x11 + mul x16, x10, x11 + umulh x9, x9, x11 + umulh x10, x10, x11 + adds x8, x8, x14 + adcs x11, x13, xzr + adds x8, x8, x15 + stp x12, x8, [x0] + adcs x8, x11, x16 + adcs x11, xzr, xzr + adds x8, x8, x9 + str x8, [x0, #16] + adcs x8, x11, x10 + str x8, [x0, #24] + ret +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + + .globl mcl_fpDbl_sqrPre2L + .align 2 + .type mcl_fpDbl_sqrPre2L,@function +mcl_fpDbl_sqrPre2L: // @mcl_fpDbl_sqrPre2L +// BB#0: + ldp x8, x9, [x1] + mul x10, x8, x8 + umulh x11, x9, x8 + mul x12, x9, x8 + umulh x8, x8, x8 + umulh x13, x9, x9 + mul x9, x9, x9 + str x10, [x0] + adds x8, x8, x12 + adcs x10, x11, xzr + adds x9, x11, x9 + adcs x11, x13, xzr + adds x8, x12, x8 + str x8, [x0, #8] + adcs x8, x9, x10 + str x8, [x0, #16] + adcs x8, x11, xzr + str x8, [x0, #24] + ret +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + + .globl mcl_fp_mont2L + .align 2 + .type mcl_fp_mont2L,@function +mcl_fp_mont2L: // @mcl_fp_mont2L +// BB#0: + ldp x8, x14, [x2] + ldp x9, x10, [x1] + ldur x11, [x3, #-8] + ldp x12, x13, [x3] + umulh x15, x10, x8 + mul x16, x10, x8 + umulh x17, x9, x8 + mul x8, x9, x8 + umulh x18, x14, x10 + mul x10, x14, x10 + umulh x1, x14, x9 + mul x9, x14, x9 + adds x14, x17, x16 + mul x16, x8, x11 + adcs x15, x15, xzr + mul x17, x16, x13 + umulh x2, x16, x12 + adds x17, x2, x17 + umulh x2, x16, x13 + mul x16, x16, x12 + adcs x2, x2, xzr + cmn x16, x8 + adcs x8, x17, x14 + adcs x14, x2, x15 + adcs x15, xzr, xzr + adds x10, x1, x10 + adcs x16, x18, xzr + adds x8, x8, x9 + adcs x9, x14, x10 + mul x10, x8, x11 + adcs x11, x15, x16 + umulh x14, x10, x13 + mul x15, x10, x13 + umulh x16, x10, x12 + mul x10, x10, x12 + adcs x17, xzr, xzr + adds x15, x16, x15 + adcs x14, x14, xzr + cmn x10, x8 + adcs x8, x15, x9 + adcs x9, x14, x11 + adcs x10, x17, xzr + subs x11, x8, x12 + sbcs x12, x9, x13 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + stp x8, x9, [x0] + ret +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + + .globl mcl_fp_montNF2L + .align 2 + .type mcl_fp_montNF2L,@function +mcl_fp_montNF2L: // @mcl_fp_montNF2L +// BB#0: + ldp x8, x14, [x2] + ldp x9, x10, [x1] + ldur x11, [x3, #-8] + ldp x12, x13, [x3] + umulh x15, x10, x8 + mul x16, x10, x8 + umulh x17, x9, x8 + mul x8, x9, x8 + umulh x18, x14, x10 + mul x10, x14, x10 + umulh x1, x14, x9 + mul x9, x14, x9 + adds x14, x17, x16 + mul x16, x8, x11 + adcs x15, x15, xzr + mul x17, x16, x12 + cmn x17, x8 + mul x8, x16, x13 + umulh x17, x16, x13 + umulh x16, x16, x12 + adcs x8, x8, x14 + adcs x14, x15, xzr + adds x8, x8, x16 + adcs x14, x14, x17 + adds x10, x1, x10 + adcs x15, x18, xzr + adds x8, x9, x8 + adcs x9, x10, x14 + mul x10, x8, x11 + adcs x11, x15, xzr + mul x14, x10, x13 + mul x15, x10, x12 + umulh x16, x10, x13 + umulh x10, x10, x12 + cmn x15, x8 + adcs x8, x14, x9 + adcs x9, x11, xzr + adds x8, x8, x10 + adcs x9, x9, x16 + subs x10, x8, x12 + sbcs x11, x9, x13 + cmp x11, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + stp x8, x9, [x0] + ret +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + + .globl mcl_fp_montRed2L + .align 2 + .type mcl_fp_montRed2L,@function +mcl_fp_montRed2L: // @mcl_fp_montRed2L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x14, [x1] + ldp x10, x11, [x2] + ldp x12, x13, [x1, #16] + mul x15, x9, x8 + mul x16, x15, x11 + umulh x17, x15, x10 + adds x16, x17, x16 + umulh x17, x15, x11 + mul x15, x15, x10 + adcs x17, x17, xzr + cmn x9, x15 + adcs x9, x14, x16 + adcs x12, x12, x17 + mul x8, x9, x8 + adcs x13, x13, xzr + umulh x14, x8, x11 + mul x15, x8, x11 + umulh x16, x8, x10 + mul x8, x8, x10 + adcs x17, xzr, xzr + adds x15, x16, x15 + adcs x14, x14, xzr + cmn x8, x9 + adcs x8, x15, x12 + adcs x9, x14, x13 + adcs x12, x17, xzr + subs x10, x8, x10 + sbcs x11, x9, x11 + sbcs x12, x12, xzr + tst x12, #0x1 + csel x8, x8, x10, ne + csel x9, x9, x11, ne + stp x8, x9, [x0] + ret +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + + .globl mcl_fp_addPre2L + .align 2 + .type mcl_fp_addPre2L,@function +mcl_fp_addPre2L: // @mcl_fp_addPre2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + adds x8, x9, x8 + str x8, [x0] + adcs x9, x10, x11 + adcs x8, xzr, xzr + str x9, [x0, #8] + mov x0, x8 + ret +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + + .globl mcl_fp_subPre2L + .align 2 + .type mcl_fp_subPre2L,@function +mcl_fp_subPre2L: // @mcl_fp_subPre2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x8, x8, x9 + str x8, [x0] + sbcs x9, x11, x10 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #8] + mov x0, x8 + ret +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + + .globl mcl_fp_shr1_2L + .align 2 + .type mcl_fp_shr1_2L,@function +mcl_fp_shr1_2L: // @mcl_fp_shr1_2L +// BB#0: + ldp x8, x9, [x1] + extr x8, x9, x8, #1 + lsr x9, x9, #1 + stp x8, x9, [x0] + ret +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + + .globl mcl_fp_add2L + .align 2 + .type mcl_fp_add2L,@function +mcl_fp_add2L: // @mcl_fp_add2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + adds x8, x9, x8 + ldp x9, x12, [x3] + adcs x10, x10, x11 + stp x8, x10, [x0] + adcs x11, xzr, xzr + subs x9, x8, x9 + sbcs x8, x10, x12 + sbcs x10, x11, xzr + and w10, w10, #0x1 + tbnz w10, #0, .LBB29_2 +// BB#1: // %nocarry + stp x9, x8, [x0] +.LBB29_2: // %carry + ret +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + + .globl mcl_fp_addNF2L + .align 2 + .type mcl_fp_addNF2L,@function +mcl_fp_addNF2L: // @mcl_fp_addNF2L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x2] + ldp x12, x13, [x3] + adds x8, x10, x8 + adcs x9, x11, x9 + subs x10, x8, x12 + sbcs x11, x9, x13 + cmp x11, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + stp x8, x9, [x0] + ret +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + + .globl mcl_fp_sub2L + .align 2 + .type mcl_fp_sub2L,@function +mcl_fp_sub2L: // @mcl_fp_sub2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x9, x8, x9 + sbcs x8, x11, x10 + stp x9, x8, [x0] + ngcs x10, xzr + and w10, w10, #0x1 + tbnz w10, #0, .LBB31_2 +// BB#1: // %nocarry + ret +.LBB31_2: // %carry + ldp x10, x11, [x3] + adds x9, x10, x9 + adcs x8, x11, x8 + stp x9, x8, [x0] + ret +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + + .globl mcl_fp_subNF2L + .align 2 + .type mcl_fp_subNF2L,@function +mcl_fp_subNF2L: // @mcl_fp_subNF2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x8, x8, x9 + ldp x9, x12, [x3] + sbcs x10, x11, x10 + asr x11, x10, #63 + and x9, x11, x9 + and x11, x11, x12 + adds x8, x9, x8 + str x8, [x0] + adcs x8, x11, x10 + str x8, [x0, #8] + ret +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + + .globl mcl_fpDbl_add2L + .align 2 + .type mcl_fpDbl_add2L,@function +mcl_fpDbl_add2L: // @mcl_fpDbl_add2L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x15, [x1] + ldp x11, x14, [x2] + ldp x12, x13, [x1, #16] + adds x10, x11, x10 + ldp x11, x16, [x3] + str x10, [x0] + adcs x10, x14, x15 + str x10, [x0, #8] + adcs x8, x8, x12 + adcs x9, x9, x13 + adcs x10, xzr, xzr + subs x11, x8, x11 + sbcs x12, x9, x16 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + stp x8, x9, [x0, #16] + ret +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + + .globl mcl_fpDbl_sub2L + .align 2 + .type mcl_fpDbl_sub2L,@function +mcl_fpDbl_sub2L: // @mcl_fpDbl_sub2L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x14, [x2] + ldp x11, x15, [x1] + ldp x12, x13, [x1, #16] + subs x10, x11, x10 + ldp x11, x16, [x3] + str x10, [x0] + sbcs x10, x15, x14 + str x10, [x0, #8] + sbcs x8, x12, x8 + sbcs x9, x13, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x16, xzr, ne + csel x11, x11, xzr, ne + adds x8, x11, x8 + str x8, [x0, #16] + adcs x8, x10, x9 + str x8, [x0, #24] + ret +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + + .globl mcl_fp_mulUnitPre3L + .align 2 + .type mcl_fp_mulUnitPre3L,@function +mcl_fp_mulUnitPre3L: // @mcl_fp_mulUnitPre3L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + mul x11, x8, x2 + mul x12, x9, x2 + umulh x8, x8, x2 + mul x13, x10, x2 + umulh x9, x9, x2 + umulh x10, x10, x2 + adds x8, x8, x12 + stp x11, x8, [x0] + adcs x8, x9, x13 + str x8, [x0, #16] + adcs x8, x10, xzr + str x8, [x0, #24] + ret +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + + .globl mcl_fpDbl_mulPre3L + .align 2 + .type mcl_fpDbl_mulPre3L,@function +mcl_fpDbl_mulPre3L: // @mcl_fpDbl_mulPre3L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x8, x9, [x1] + ldp x10, x12, [x2] + ldr x11, [x1, #16] + ldr x13, [x2, #16] + mul x14, x8, x10 + umulh x15, x11, x10 + mul x16, x11, x10 + umulh x17, x9, x10 + mul x18, x9, x10 + umulh x10, x8, x10 + mul x1, x8, x12 + mul x2, x11, x12 + mul x3, x9, x12 + umulh x4, x11, x12 + umulh x5, x9, x12 + umulh x12, x8, x12 + mul x6, x8, x13 + mul x7, x11, x13 + mul x19, x9, x13 + umulh x8, x8, x13 + umulh x9, x9, x13 + umulh x11, x11, x13 + str x14, [x0] + adds x10, x10, x18 + adcs x13, x17, x16 + adcs x14, x15, xzr + adds x10, x10, x1 + str x10, [x0, #8] + adcs x10, x13, x3 + adcs x13, x14, x2 + adcs x14, xzr, xzr + adds x10, x10, x12 + adcs x12, x13, x5 + adcs x13, x14, x4 + adds x10, x10, x6 + str x10, [x0, #16] + adcs x10, x12, x19 + adcs x12, x13, x7 + adcs x13, xzr, xzr + adds x8, x10, x8 + str x8, [x0, #24] + adcs x8, x12, x9 + str x8, [x0, #32] + adcs x8, x13, x11 + str x8, [x0, #40] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + + .globl mcl_fpDbl_sqrPre3L + .align 2 + .type mcl_fpDbl_sqrPre3L,@function +mcl_fpDbl_sqrPre3L: // @mcl_fpDbl_sqrPre3L +// BB#0: + ldp x8, x10, [x1] + ldr x9, [x1, #16] + mul x11, x8, x8 + umulh x12, x9, x8 + mul x13, x9, x8 + umulh x14, x10, x8 + mul x15, x10, x8 + umulh x8, x8, x8 + mul x16, x9, x10 + str x11, [x0] + adds x8, x8, x15 + adcs x11, x14, x13 + adcs x17, x12, xzr + adds x8, x8, x15 + mul x15, x10, x10 + str x8, [x0, #8] + umulh x8, x9, x10 + umulh x10, x10, x10 + adcs x11, x11, x15 + adcs x15, x17, x16 + adcs x17, xzr, xzr + adds x11, x11, x14 + umulh x14, x9, x9 + mul x9, x9, x9 + adcs x10, x15, x10 + adcs x15, x17, x8 + adds x12, x12, x16 + adcs x8, x8, x9 + adcs x9, x14, xzr + adds x11, x13, x11 + adcs x10, x12, x10 + stp x11, x10, [x0, #16] + adcs x8, x8, x15 + str x8, [x0, #32] + adcs x8, x9, xzr + str x8, [x0, #40] + ret +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + + .globl mcl_fp_mont3L + .align 2 + .type mcl_fp_mont3L,@function +mcl_fp_mont3L: // @mcl_fp_mont3L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x15, x16, [x2] + ldp x13, x14, [x1, #8] + ldr x12, [x1] + ldur x11, [x3, #-8] + ldp x9, x8, [x3, #8] + ldr x10, [x3] + ldr x17, [x2, #16] + umulh x18, x14, x15 + mul x1, x14, x15 + umulh x2, x13, x15 + mul x3, x13, x15 + umulh x4, x12, x15 + mul x15, x12, x15 + umulh x5, x16, x14 + mul x6, x16, x14 + umulh x7, x16, x13 + mul x19, x16, x13 + umulh x20, x16, x12 + mul x16, x16, x12 + umulh x21, x17, x14 + mul x14, x17, x14 + adds x3, x4, x3 + mul x4, x15, x11 + adcs x1, x2, x1 + mul x2, x4, x8 + mul x22, x4, x9 + umulh x23, x4, x10 + adcs x18, x18, xzr + adds x22, x23, x22 + umulh x23, x4, x9 + adcs x2, x23, x2 + umulh x23, x4, x8 + mul x4, x4, x10 + adcs x23, x23, xzr + cmn x4, x15 + umulh x15, x17, x13 + mul x13, x17, x13 + umulh x4, x17, x12 + mul x12, x17, x12 + adcs x17, x22, x3 + adcs x1, x2, x1 + adcs x18, x23, x18 + adcs x2, xzr, xzr + adds x3, x20, x19 + adcs x6, x7, x6 + adcs x5, x5, xzr + adds x16, x17, x16 + adcs x17, x1, x3 + mul x1, x16, x11 + adcs x18, x18, x6 + mul x3, x1, x8 + mul x6, x1, x9 + umulh x7, x1, x10 + adcs x2, x2, x5 + adcs x5, xzr, xzr + adds x6, x7, x6 + umulh x7, x1, x9 + adcs x3, x7, x3 + umulh x7, x1, x8 + mul x1, x1, x10 + adcs x7, x7, xzr + cmn x1, x16 + adcs x16, x6, x17 + adcs x17, x3, x18 + adcs x18, x7, x2 + adcs x1, x5, xzr + adds x13, x4, x13 + adcs x14, x15, x14 + adcs x15, x21, xzr + adds x12, x16, x12 + adcs x13, x17, x13 + mul x11, x12, x11 + adcs x14, x18, x14 + umulh x16, x11, x8 + mul x17, x11, x8 + umulh x18, x11, x9 + mul x2, x11, x9 + umulh x3, x11, x10 + mul x11, x11, x10 + adcs x15, x1, x15 + adcs x1, xzr, xzr + adds x2, x3, x2 + adcs x17, x18, x17 + adcs x16, x16, xzr + cmn x11, x12 + adcs x11, x2, x13 + adcs x12, x17, x14 + adcs x13, x16, x15 + adcs x14, x1, xzr + subs x10, x11, x10 + sbcs x9, x12, x9 + sbcs x8, x13, x8 + sbcs x14, x14, xzr + tst x14, #0x1 + csel x10, x11, x10, ne + csel x9, x12, x9, ne + csel x8, x13, x8, ne + stp x10, x9, [x0] + str x8, [x0, #16] + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + + .globl mcl_fp_montNF3L + .align 2 + .type mcl_fp_montNF3L,@function +mcl_fp_montNF3L: // @mcl_fp_montNF3L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x14, x16, [x2] + ldp x15, x13, [x1, #8] + ldr x12, [x1] + ldur x11, [x3, #-8] + ldp x9, x8, [x3, #8] + ldr x10, [x3] + ldr x17, [x2, #16] + umulh x18, x13, x14 + mul x1, x13, x14 + umulh x2, x15, x14 + mul x3, x15, x14 + umulh x4, x12, x14 + mul x14, x12, x14 + umulh x5, x16, x13 + mul x6, x16, x13 + umulh x7, x16, x15 + mul x19, x16, x15 + umulh x20, x16, x12 + mul x16, x16, x12 + umulh x21, x17, x13 + mul x13, x17, x13 + adds x3, x4, x3 + mul x4, x14, x11 + adcs x1, x2, x1 + mul x2, x4, x10 + adcs x18, x18, xzr + cmn x2, x14 + umulh x14, x17, x15 + mul x15, x17, x15 + umulh x2, x17, x12 + mul x12, x17, x12 + mul x17, x4, x9 + adcs x17, x17, x3 + mul x3, x4, x8 + adcs x1, x3, x1 + umulh x3, x4, x10 + adcs x18, x18, xzr + adds x17, x17, x3 + umulh x3, x4, x9 + adcs x1, x1, x3 + umulh x3, x4, x8 + adcs x18, x18, x3 + adds x3, x20, x19 + adcs x4, x7, x6 + adcs x5, x5, xzr + adds x16, x16, x17 + adcs x17, x3, x1 + mul x1, x16, x11 + adcs x18, x4, x18 + mul x3, x1, x8 + mul x4, x1, x10 + adcs x5, x5, xzr + cmn x4, x16 + mul x16, x1, x9 + umulh x4, x1, x8 + adcs x16, x16, x17 + umulh x17, x1, x9 + umulh x1, x1, x10 + adcs x18, x3, x18 + adcs x3, x5, xzr + adds x16, x16, x1 + adcs x17, x18, x17 + adcs x18, x3, x4 + adds x15, x2, x15 + adcs x13, x14, x13 + adcs x14, x21, xzr + adds x12, x12, x16 + adcs x15, x15, x17 + mul x11, x12, x11 + adcs x13, x13, x18 + mul x16, x11, x8 + mul x17, x11, x9 + mul x18, x11, x10 + umulh x1, x11, x8 + umulh x2, x11, x9 + umulh x11, x11, x10 + adcs x14, x14, xzr + cmn x18, x12 + adcs x12, x17, x15 + adcs x13, x16, x13 + adcs x14, x14, xzr + adds x11, x12, x11 + adcs x12, x13, x2 + adcs x13, x14, x1 + subs x10, x11, x10 + sbcs x9, x12, x9 + sbcs x8, x13, x8 + asr x14, x8, #63 + cmp x14, #0 // =0 + csel x10, x11, x10, lt + csel x9, x12, x9, lt + csel x8, x13, x8, lt + stp x10, x9, [x0] + str x8, [x0, #16] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + + .globl mcl_fp_montRed3L + .align 2 + .type mcl_fp_montRed3L,@function +mcl_fp_montRed3L: // @mcl_fp_montRed3L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x17, [x1] + ldp x12, x10, [x2, #8] + ldr x11, [x2] + ldp x13, x14, [x1, #32] + ldp x15, x16, [x1, #16] + mul x18, x9, x8 + umulh x1, x18, x10 + mul x2, x18, x10 + umulh x3, x18, x12 + mul x4, x18, x12 + umulh x5, x18, x11 + mul x18, x18, x11 + adds x4, x5, x4 + adcs x2, x3, x2 + adcs x1, x1, xzr + cmn x9, x18 + adcs x9, x17, x4 + adcs x15, x15, x2 + mul x17, x9, x8 + adcs x16, x16, x1 + umulh x18, x17, x10 + mul x1, x17, x10 + umulh x2, x17, x12 + mul x3, x17, x12 + umulh x4, x17, x11 + mul x17, x17, x11 + adcs x13, x13, xzr + adcs x14, x14, xzr + adcs x5, xzr, xzr + adds x3, x4, x3 + adcs x1, x2, x1 + adcs x18, x18, xzr + cmn x17, x9 + adcs x9, x3, x15 + adcs x15, x1, x16 + mul x8, x9, x8 + adcs x13, x18, x13 + umulh x16, x8, x10 + mul x17, x8, x10 + umulh x18, x8, x12 + mul x1, x8, x12 + umulh x2, x8, x11 + mul x8, x8, x11 + adcs x14, x14, xzr + adcs x3, x5, xzr + adds x1, x2, x1 + adcs x17, x18, x17 + adcs x16, x16, xzr + cmn x8, x9 + adcs x8, x1, x15 + adcs x9, x17, x13 + adcs x13, x16, x14 + adcs x14, x3, xzr + subs x11, x8, x11 + sbcs x12, x9, x12 + sbcs x10, x13, x10 + sbcs x14, x14, xzr + tst x14, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + csel x10, x13, x10, ne + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + + .globl mcl_fp_addPre3L + .align 2 + .type mcl_fp_addPre3L,@function +mcl_fp_addPre3L: // @mcl_fp_addPre3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + adds x9, x10, x9 + str x9, [x0] + adcs x9, x11, x12 + str x9, [x0, #8] + adcs x9, x8, x13 + adcs x8, xzr, xzr + str x9, [x0, #16] + mov x0, x8 + ret +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + + .globl mcl_fp_subPre3L + .align 2 + .type mcl_fp_subPre3L,@function +mcl_fp_subPre3L: // @mcl_fp_subPre3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + subs x9, x9, x10 + str x9, [x0] + sbcs x9, x12, x11 + str x9, [x0, #8] + sbcs x9, x13, x8 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #16] + mov x0, x8 + ret +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + + .globl mcl_fp_shr1_3L + .align 2 + .type mcl_fp_shr1_3L,@function +mcl_fp_shr1_3L: // @mcl_fp_shr1_3L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + + .globl mcl_fp_add3L + .align 2 + .type mcl_fp_add3L,@function +mcl_fp_add3L: // @mcl_fp_add3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + adds x9, x10, x9 + adcs x11, x11, x12 + ldr x10, [x3] + ldp x12, x14, [x3, #8] + stp x9, x11, [x0] + adcs x8, x8, x13 + str x8, [x0, #16] + adcs x13, xzr, xzr + subs x10, x9, x10 + sbcs x9, x11, x12 + sbcs x8, x8, x14 + sbcs x11, x13, xzr + and w11, w11, #0x1 + tbnz w11, #0, .LBB44_2 +// BB#1: // %nocarry + stp x10, x9, [x0] + str x8, [x0, #16] +.LBB44_2: // %carry + ret +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + + .globl mcl_fp_addNF3L + .align 2 + .type mcl_fp_addNF3L,@function +mcl_fp_addNF3L: // @mcl_fp_addNF3L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x2] + ldr x12, [x1, #16] + ldr x13, [x2, #16] + adds x8, x10, x8 + adcs x9, x11, x9 + ldp x10, x11, [x3] + ldr x14, [x3, #16] + adcs x12, x13, x12 + subs x10, x8, x10 + sbcs x11, x9, x11 + sbcs x13, x12, x14 + asr x14, x13, #63 + cmp x14, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + csel x10, x12, x13, lt + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + + .globl mcl_fp_sub3L + .align 2 + .type mcl_fp_sub3L,@function +mcl_fp_sub3L: // @mcl_fp_sub3L +// BB#0: + ldp x11, x10, [x2, #8] + ldp x8, x12, [x1] + ldr x9, [x2] + ldr x13, [x1, #16] + subs x8, x8, x9 + sbcs x9, x12, x11 + stp x8, x9, [x0] + sbcs x10, x13, x10 + str x10, [x0, #16] + ngcs x11, xzr + and w11, w11, #0x1 + tbnz w11, #0, .LBB46_2 +// BB#1: // %nocarry + ret +.LBB46_2: // %carry + ldp x13, x11, [x3, #8] + ldr x12, [x3] + adds x8, x12, x8 + adcs x9, x13, x9 + adcs x10, x11, x10 + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + + .globl mcl_fp_subNF3L + .align 2 + .type mcl_fp_subNF3L,@function +mcl_fp_subNF3L: // @mcl_fp_subNF3L +// BB#0: + ldp x8, x9, [x2] + ldp x10, x11, [x1] + ldr x12, [x2, #16] + ldr x13, [x1, #16] + subs x8, x10, x8 + sbcs x9, x11, x9 + ldp x10, x11, [x3] + ldr x14, [x3, #16] + sbcs x12, x13, x12 + asr x13, x12, #63 + and x11, x13, x11 + and x14, x13, x14 + extr x13, x13, x12, #63 + and x10, x13, x10 + adds x8, x10, x8 + str x8, [x0] + adcs x8, x11, x9 + str x8, [x0, #8] + adcs x8, x14, x12 + str x8, [x0, #16] + ret +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + + .globl mcl_fpDbl_add3L + .align 2 + .type mcl_fpDbl_add3L,@function +mcl_fpDbl_add3L: // @mcl_fpDbl_add3L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x15, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x14, x1, [x1] + adds x14, x15, x14 + ldr x15, [x3, #16] + str x14, [x0] + ldp x14, x2, [x3] + adcs x18, x18, x1 + adcs x12, x12, x16 + stp x18, x12, [x0, #8] + adcs x12, x13, x17 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x12, x14 + sbcs x13, x8, x2 + sbcs x14, x9, x15 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x12, x11, ne + csel x8, x8, x13, ne + csel x9, x9, x14, ne + stp x10, x8, [x0, #24] + str x9, [x0, #40] + ret +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + + .globl mcl_fpDbl_sub3L + .align 2 + .type mcl_fpDbl_sub3L,@function +mcl_fpDbl_sub3L: // @mcl_fpDbl_sub3L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x15, x1, [x1] + subs x14, x15, x14 + ldr x15, [x3, #16] + str x14, [x0] + ldp x14, x2, [x3] + sbcs x18, x1, x18 + sbcs x12, x16, x12 + stp x18, x12, [x0, #8] + sbcs x12, x17, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x15, xzr, ne + csel x11, x2, xzr, ne + csel x13, x14, xzr, ne + adds x12, x13, x12 + adcs x8, x11, x8 + stp x12, x8, [x0, #24] + adcs x8, x10, x9 + str x8, [x0, #40] + ret +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + + .globl mcl_fp_mulUnitPre4L + .align 2 + .type mcl_fp_mulUnitPre4L,@function +mcl_fp_mulUnitPre4L: // @mcl_fp_mulUnitPre4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + mul x12, x8, x2 + mul x13, x9, x2 + umulh x8, x8, x2 + mul x14, x10, x2 + umulh x9, x9, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + umulh x11, x11, x2 + adds x8, x8, x13 + stp x12, x8, [x0] + adcs x8, x9, x14 + str x8, [x0, #16] + adcs x8, x10, x15 + str x8, [x0, #24] + adcs x8, x11, xzr + str x8, [x0, #32] + ret +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + + .globl mcl_fpDbl_mulPre4L + .align 2 + .type mcl_fpDbl_mulPre4L,@function +mcl_fpDbl_mulPre4L: // @mcl_fpDbl_mulPre4L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + ldp x8, x10, [x1] + ldp x9, x11, [x1] + ldp x12, x14, [x1, #16] + ldp x13, x1, [x1, #16] + ldp x15, x16, [x2] + ldp x17, x18, [x2, #16] + mul x2, x8, x15 + umulh x3, x14, x15 + mul x4, x14, x15 + umulh x5, x12, x15 + mul x6, x12, x15 + umulh x7, x10, x15 + mul x19, x10, x15 + umulh x15, x8, x15 + mul x20, x8, x16 + mul x21, x14, x16 + mul x22, x12, x16 + mul x23, x10, x16 + umulh x24, x14, x16 + umulh x25, x12, x16 + umulh x26, x10, x16 + umulh x16, x8, x16 + mul x27, x8, x17 + mul x28, x14, x17 + mul x29, x12, x17 + mul x30, x10, x17 + umulh x14, x14, x17 + stp x3, x14, [sp, #16] + umulh x12, x12, x17 + str x12, [sp, #8] // 8-byte Folded Spill + umulh x3, x10, x17 + umulh x14, x8, x17 + mul x17, x9, x18 + umulh x12, x9, x18 + mul x10, x11, x18 + umulh x11, x11, x18 + mul x9, x13, x18 + umulh x13, x13, x18 + mul x8, x1, x18 + umulh x18, x1, x18 + str x2, [x0] + adds x15, x15, x19 + adcs x1, x7, x6 + adcs x2, x5, x4 + ldr x4, [sp, #16] // 8-byte Folded Reload + adcs x4, x4, xzr + adds x15, x20, x15 + str x15, [x0, #8] + adcs x15, x23, x1 + adcs x1, x22, x2 + adcs x2, x21, x4 + adcs x4, xzr, xzr + adds x15, x15, x16 + adcs x16, x1, x26 + adcs x1, x2, x25 + adcs x2, x4, x24 + adds x15, x15, x27 + str x15, [x0, #16] + adcs x15, x16, x30 + adcs x16, x1, x29 + adcs x1, x2, x28 + adcs x2, xzr, xzr + adds x14, x15, x14 + adcs x15, x16, x3 + ldr x16, [sp, #8] // 8-byte Folded Reload + adcs x16, x1, x16 + ldr x1, [sp, #24] // 8-byte Folded Reload + adcs x1, x2, x1 + adds x14, x14, x17 + str x14, [x0, #24] + adcs x10, x15, x10 + adcs x9, x16, x9 + adcs x8, x1, x8 + adcs x14, xzr, xzr + adds x10, x10, x12 + adcs x9, x9, x11 + stp x10, x9, [x0, #32] + adcs x8, x8, x13 + str x8, [x0, #48] + adcs x8, x14, x18 + str x8, [x0, #56] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + + .globl mcl_fpDbl_sqrPre4L + .align 2 + .type mcl_fpDbl_sqrPre4L,@function +mcl_fpDbl_sqrPre4L: // @mcl_fpDbl_sqrPre4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x13, [x1] + ldp x11, x12, [x1, #16] + ldr x14, [x1, #16] + mul x15, x10, x10 + umulh x16, x12, x10 + mul x17, x12, x10 + umulh x18, x14, x10 + mul x2, x14, x10 + umulh x3, x9, x10 + mul x4, x9, x10 + umulh x10, x10, x10 + str x15, [x0] + adds x10, x10, x4 + adcs x15, x3, x2 + adcs x17, x18, x17 + adcs x16, x16, xzr + adds x10, x10, x4 + mul x4, x12, x9 + str x10, [x0, #8] + mul x10, x9, x9 + adcs x10, x15, x10 + mul x15, x14, x9 + adcs x17, x17, x15 + adcs x16, x16, x4 + adcs x4, xzr, xzr + adds x10, x10, x3 + umulh x3, x9, x9 + adcs x17, x17, x3 + umulh x3, x12, x9 + umulh x9, x14, x9 + adcs x16, x16, x9 + adcs x3, x4, x3 + ldr x1, [x1, #24] + adds x10, x10, x2 + mul x2, x12, x14 + str x10, [x0, #16] + mul x10, x14, x14 + umulh x12, x12, x14 + umulh x14, x14, x14 + adcs x15, x17, x15 + mul x17, x8, x1 + adcs x10, x16, x10 + mul x16, x11, x1 + adcs x2, x3, x2 + adcs x3, xzr, xzr + adds x15, x15, x18 + mul x18, x13, x1 + adcs x9, x10, x9 + mul x10, x1, x1 + umulh x8, x8, x1 + umulh x13, x13, x1 + umulh x11, x11, x1 + umulh x1, x1, x1 + adcs x14, x2, x14 + adcs x12, x3, x12 + adds x15, x15, x17 + adcs x9, x9, x18 + adcs x14, x14, x16 + adcs x10, x12, x10 + adcs x12, xzr, xzr + adds x8, x9, x8 + stp x15, x8, [x0, #24] + adcs x8, x14, x13 + str x8, [x0, #40] + adcs x8, x10, x11 + str x8, [x0, #48] + adcs x8, x12, x1 + str x8, [x0, #56] + ret +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + + .globl mcl_fp_mont4L + .align 2 + .type mcl_fp_mont4L,@function +mcl_fp_mont4L: // @mcl_fp_mont4L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #16 // =16 + str x0, [sp, #8] // 8-byte Folded Spill + ldp x13, x16, [x1, #16] + ldp x14, x15, [x1] + ldur x0, [x3, #-8] + ldp x9, x8, [x3, #16] + ldp x11, x10, [x3] + ldp x17, x18, [x2] + ldp x1, x2, [x2, #16] + umulh x3, x16, x17 + mul x4, x16, x17 + umulh x5, x13, x17 + mul x6, x13, x17 + umulh x7, x15, x17 + mul x19, x15, x17 + umulh x20, x14, x17 + mul x17, x14, x17 + umulh x21, x18, x16 + mul x22, x18, x16 + umulh x23, x18, x13 + mul x24, x18, x13 + umulh x25, x18, x15 + mul x26, x18, x15 + umulh x27, x18, x14 + mul x18, x18, x14 + umulh x28, x1, x16 + adds x19, x20, x19 + mul x20, x17, x0 + adcs x6, x7, x6 + mul x7, x20, x8 + mul x29, x20, x9 + mul x30, x20, x10 + adcs x4, x5, x4 + umulh x5, x20, x11 + adcs x3, x3, xzr + adds x5, x5, x30 + umulh x30, x20, x10 + adcs x29, x30, x29 + umulh x30, x20, x9 + adcs x7, x30, x7 + umulh x30, x20, x8 + mul x20, x20, x11 + adcs x30, x30, xzr + cmn x20, x17 + mul x17, x1, x16 + umulh x20, x1, x13 + adcs x5, x5, x19 + mul x19, x1, x13 + adcs x6, x29, x6 + umulh x29, x1, x15 + adcs x4, x7, x4 + mul x7, x1, x15 + adcs x3, x30, x3 + adcs x30, xzr, xzr + adds x26, x27, x26 + umulh x27, x1, x14 + mul x1, x1, x14 + adcs x24, x25, x24 + umulh x25, x2, x16 + mul x16, x2, x16 + adcs x22, x23, x22 + adcs x21, x21, xzr + adds x18, x5, x18 + adcs x5, x6, x26 + mul x6, x18, x0 + adcs x4, x4, x24 + mul x23, x6, x8 + mul x24, x6, x9 + mul x26, x6, x10 + adcs x3, x3, x22 + umulh x22, x6, x11 + adcs x21, x30, x21 + adcs x30, xzr, xzr + adds x22, x22, x26 + umulh x26, x6, x10 + adcs x24, x26, x24 + umulh x26, x6, x9 + adcs x23, x26, x23 + umulh x26, x6, x8 + mul x6, x6, x11 + adcs x26, x26, xzr + cmn x6, x18 + umulh x18, x2, x13 + mul x13, x2, x13 + umulh x6, x2, x15 + mul x15, x2, x15 + umulh x12, x2, x14 + mul x14, x2, x14 + adcs x2, x22, x5 + adcs x4, x24, x4 + adcs x3, x23, x3 + adcs x5, x26, x21 + adcs x21, x30, xzr + adds x7, x27, x7 + adcs x19, x29, x19 + adcs x17, x20, x17 + adcs x20, x28, xzr + adds x1, x2, x1 + adcs x2, x4, x7 + mul x4, x1, x0 + adcs x3, x3, x19 + mul x7, x4, x8 + mul x19, x4, x9 + mul x22, x4, x10 + adcs x17, x5, x17 + umulh x5, x4, x11 + adcs x20, x21, x20 + adcs x21, xzr, xzr + adds x5, x5, x22 + umulh x22, x4, x10 + adcs x19, x22, x19 + umulh x22, x4, x9 + adcs x7, x22, x7 + umulh x22, x4, x8 + mul x4, x4, x11 + adcs x22, x22, xzr + cmn x4, x1 + adcs x1, x5, x2 + adcs x2, x19, x3 + adcs x17, x7, x17 + adcs x3, x22, x20 + adcs x4, x21, xzr + adds x12, x12, x15 + adcs x13, x6, x13 + adcs x15, x18, x16 + adcs x16, x25, xzr + adds x14, x1, x14 + adcs x12, x2, x12 + mul x18, x14, x0 + adcs x13, x17, x13 + umulh x17, x18, x8 + mul x0, x18, x8 + umulh x1, x18, x9 + mul x2, x18, x9 + umulh x5, x18, x10 + mul x6, x18, x10 + umulh x7, x18, x11 + mul x18, x18, x11 + adcs x15, x3, x15 + adcs x16, x4, x16 + adcs x3, xzr, xzr + adds x4, x7, x6 + adcs x2, x5, x2 + adcs x0, x1, x0 + adcs x17, x17, xzr + cmn x18, x14 + adcs x12, x4, x12 + adcs x13, x2, x13 + adcs x14, x0, x15 + adcs x15, x17, x16 + adcs x16, x3, xzr + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x11, x12, x11, ne + csel x10, x13, x10, ne + csel x9, x14, x9, ne + csel x8, x15, x8, ne + ldr x12, [sp, #8] // 8-byte Folded Reload + stp x11, x10, [x12] + stp x9, x8, [x12, #16] + add sp, sp, #16 // =16 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + + .globl mcl_fp_montNF4L + .align 2 + .type mcl_fp_montNF4L,@function +mcl_fp_montNF4L: // @mcl_fp_montNF4L +// BB#0: + stp x28, x27, [sp, #-80]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + ldp x14, x15, [x1, #16] + ldp x13, x16, [x1] + ldur x12, [x3, #-8] + ldp x9, x8, [x3, #16] + ldp x11, x10, [x3] + ldp x17, x18, [x2] + ldp x1, x2, [x2, #16] + umulh x3, x15, x17 + mul x4, x15, x17 + umulh x5, x14, x17 + mul x6, x14, x17 + umulh x7, x16, x17 + mul x19, x16, x17 + umulh x20, x13, x17 + mul x17, x13, x17 + umulh x21, x18, x15 + mul x22, x18, x15 + umulh x23, x18, x14 + mul x24, x18, x14 + umulh x25, x18, x16 + mul x26, x18, x16 + umulh x27, x18, x13 + mul x18, x18, x13 + adds x19, x20, x19 + umulh x20, x1, x15 + adcs x6, x7, x6 + mul x7, x17, x12 + adcs x4, x5, x4 + mul x5, x7, x11 + adcs x3, x3, xzr + cmn x5, x17 + mul x17, x1, x15 + mul x5, x7, x10 + adcs x5, x5, x19 + mul x19, x7, x9 + adcs x6, x19, x6 + mul x19, x7, x8 + adcs x4, x19, x4 + umulh x19, x7, x11 + adcs x3, x3, xzr + adds x5, x5, x19 + umulh x19, x7, x10 + adcs x6, x6, x19 + umulh x19, x7, x9 + adcs x4, x4, x19 + umulh x19, x1, x14 + umulh x7, x7, x8 + adcs x3, x3, x7 + mul x7, x1, x14 + adds x26, x27, x26 + umulh x27, x1, x16 + adcs x24, x25, x24 + mul x25, x1, x16 + adcs x22, x23, x22 + umulh x23, x1, x13 + mul x1, x1, x13 + adcs x21, x21, xzr + adds x18, x18, x5 + umulh x5, x2, x15 + mul x15, x2, x15 + adcs x6, x26, x6 + umulh x26, x2, x14 + mul x14, x2, x14 + adcs x4, x24, x4 + mul x24, x18, x12 + adcs x3, x22, x3 + mul x22, x24, x11 + adcs x21, x21, xzr + cmn x22, x18 + umulh x18, x2, x16 + mul x16, x2, x16 + umulh x22, x2, x13 + mul x13, x2, x13 + mul x2, x24, x10 + adcs x2, x2, x6 + mul x6, x24, x9 + adcs x4, x6, x4 + mul x6, x24, x8 + adcs x3, x6, x3 + umulh x6, x24, x11 + adcs x21, x21, xzr + adds x2, x2, x6 + umulh x6, x24, x10 + adcs x4, x4, x6 + umulh x6, x24, x9 + adcs x3, x3, x6 + umulh x6, x24, x8 + adcs x6, x21, x6 + adds x21, x23, x25 + adcs x7, x27, x7 + adcs x17, x19, x17 + adcs x19, x20, xzr + adds x1, x1, x2 + adcs x2, x21, x4 + mul x4, x1, x12 + adcs x3, x7, x3 + mul x7, x4, x8 + mul x20, x4, x9 + adcs x17, x17, x6 + mul x6, x4, x11 + adcs x19, x19, xzr + cmn x6, x1 + mul x1, x4, x10 + umulh x6, x4, x8 + adcs x1, x1, x2 + umulh x2, x4, x9 + adcs x3, x20, x3 + umulh x20, x4, x10 + umulh x4, x4, x11 + adcs x17, x7, x17 + adcs x7, x19, xzr + adds x1, x1, x4 + adcs x3, x3, x20 + adcs x17, x17, x2 + adcs x2, x7, x6 + adds x16, x22, x16 + adcs x14, x18, x14 + adcs x15, x26, x15 + adcs x18, x5, xzr + adds x13, x13, x1 + adcs x16, x16, x3 + mul x12, x13, x12 + adcs x14, x14, x17 + mul x17, x12, x8 + mul x1, x12, x9 + mul x3, x12, x10 + mul x4, x12, x11 + umulh x5, x12, x8 + umulh x6, x12, x9 + umulh x7, x12, x10 + umulh x12, x12, x11 + adcs x15, x15, x2 + adcs x18, x18, xzr + cmn x4, x13 + adcs x13, x3, x16 + adcs x14, x1, x14 + adcs x15, x17, x15 + adcs x16, x18, xzr + adds x12, x13, x12 + adcs x13, x14, x7 + adcs x14, x15, x6 + adcs x15, x16, x5 + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + cmp x8, #0 // =0 + csel x11, x12, x11, lt + csel x10, x13, x10, lt + csel x9, x14, x9, lt + csel x8, x15, x8, lt + stp x11, x10, [x0] + stp x9, x8, [x0, #16] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #80 + ret +.Lfunc_end54: + .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L + + .globl mcl_fp_montRed4L + .align 2 + .type mcl_fp_montRed4L,@function +mcl_fp_montRed4L: // @mcl_fp_montRed4L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldur x12, [x2, #-8] + ldp x9, x8, [x2, #16] + ldp x11, x10, [x2] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x1, #32] + ldp x18, x2, [x1, #16] + ldp x13, x1, [x1] + mul x3, x13, x12 + umulh x4, x3, x8 + mul x5, x3, x8 + umulh x6, x3, x9 + mul x7, x3, x9 + umulh x19, x3, x10 + mul x20, x3, x10 + umulh x21, x3, x11 + mul x3, x3, x11 + adds x20, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x4, x4, xzr + cmn x13, x3 + adcs x13, x1, x20 + adcs x18, x18, x7 + mul x1, x13, x12 + adcs x2, x2, x5 + umulh x3, x1, x8 + mul x5, x1, x8 + umulh x6, x1, x9 + mul x7, x1, x9 + umulh x19, x1, x10 + mul x20, x1, x10 + umulh x21, x1, x11 + mul x1, x1, x11 + adcs x16, x16, x4 + adcs x17, x17, xzr + adcs x14, x14, xzr + adcs x15, x15, xzr + adcs x4, xzr, xzr + adds x20, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x3, x3, xzr + cmn x1, x13 + adcs x13, x20, x18 + adcs x18, x7, x2 + mul x1, x13, x12 + adcs x16, x5, x16 + umulh x2, x1, x8 + mul x5, x1, x8 + umulh x6, x1, x9 + mul x7, x1, x9 + umulh x19, x1, x10 + mul x20, x1, x10 + umulh x21, x1, x11 + mul x1, x1, x11 + adcs x17, x3, x17 + adcs x14, x14, xzr + adcs x15, x15, xzr + adcs x3, x4, xzr + adds x4, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x2, x2, xzr + cmn x1, x13 + adcs x13, x4, x18 + adcs x16, x7, x16 + mul x12, x13, x12 + adcs x17, x5, x17 + umulh x18, x12, x8 + mul x1, x12, x8 + umulh x4, x12, x9 + mul x5, x12, x9 + umulh x6, x12, x10 + mul x7, x12, x10 + umulh x19, x12, x11 + mul x12, x12, x11 + adcs x14, x2, x14 + adcs x15, x15, xzr + adcs x2, x3, xzr + adds x3, x19, x7 + adcs x5, x6, x5 + adcs x1, x4, x1 + adcs x18, x18, xzr + cmn x12, x13 + adcs x12, x3, x16 + adcs x13, x5, x17 + adcs x14, x1, x14 + adcs x15, x18, x15 + adcs x16, x2, xzr + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x11, x12, x11, ne + csel x10, x13, x10, ne + csel x9, x14, x9, ne + csel x8, x15, x8, ne + stp x11, x10, [x0] + stp x9, x8, [x0, #16] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end55: + .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L + + .globl mcl_fp_addPre4L + .align 2 + .type mcl_fp_addPre4L,@function +mcl_fp_addPre4L: // @mcl_fp_addPre4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + adds x10, x10, x12 + str x10, [x0] + adcs x10, x11, x13 + adcs x8, x8, x14 + stp x10, x8, [x0, #8] + adcs x9, x9, x15 + adcs x8, xzr, xzr + str x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end56: + .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L + + .globl mcl_fp_subPre4L + .align 2 + .type mcl_fp_subPre4L,@function +mcl_fp_subPre4L: // @mcl_fp_subPre4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x10, x12, x10 + str x10, [x0] + sbcs x10, x13, x11 + sbcs x8, x14, x8 + stp x10, x8, [x0, #8] + sbcs x9, x15, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end57: + .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L + + .globl mcl_fp_shr1_4L + .align 2 + .type mcl_fp_shr1_4L,@function +mcl_fp_shr1_4L: // @mcl_fp_shr1_4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + lsr x11, x11, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + ret +.Lfunc_end58: + .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L + + .globl mcl_fp_add4L + .align 2 + .type mcl_fp_add4L,@function +mcl_fp_add4L: // @mcl_fp_add4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + adds x10, x10, x12 + adcs x12, x11, x13 + ldp x11, x13, [x3] + stp x10, x12, [x0] + adcs x8, x8, x14 + adcs x14, x9, x15 + stp x8, x14, [x0, #16] + adcs x15, xzr, xzr + ldp x9, x16, [x3, #16] + subs x11, x10, x11 + sbcs x10, x12, x13 + sbcs x9, x8, x9 + sbcs x8, x14, x16 + sbcs x12, x15, xzr + and w12, w12, #0x1 + tbnz w12, #0, .LBB59_2 +// BB#1: // %nocarry + stp x11, x10, [x0] + stp x9, x8, [x0, #16] +.LBB59_2: // %carry + ret +.Lfunc_end59: + .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L + + .globl mcl_fp_addNF4L + .align 2 + .type mcl_fp_addNF4L,@function +mcl_fp_addNF4L: // @mcl_fp_addNF4L +// BB#0: + ldp x8, x9, [x1, #16] + ldp x10, x11, [x1] + ldp x12, x13, [x2] + ldp x14, x15, [x2, #16] + adds x10, x12, x10 + adcs x11, x13, x11 + ldp x12, x13, [x3] + adcs x8, x14, x8 + ldp x14, x16, [x3, #16] + adcs x9, x15, x9 + subs x12, x10, x12 + sbcs x13, x11, x13 + sbcs x14, x8, x14 + sbcs x15, x9, x16 + cmp x15, #0 // =0 + csel x10, x10, x12, lt + csel x11, x11, x13, lt + csel x8, x8, x14, lt + csel x9, x9, x15, lt + stp x10, x11, [x0] + stp x8, x9, [x0, #16] + ret +.Lfunc_end60: + .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L + + .globl mcl_fp_sub4L + .align 2 + .type mcl_fp_sub4L,@function +mcl_fp_sub4L: // @mcl_fp_sub4L +// BB#0: + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x8, x12, x8 + sbcs x9, x13, x9 + stp x8, x9, [x0] + sbcs x10, x14, x10 + sbcs x11, x15, x11 + stp x10, x11, [x0, #16] + ngcs x12, xzr + and w12, w12, #0x1 + tbnz w12, #0, .LBB61_2 +// BB#1: // %nocarry + ret +.LBB61_2: // %carry + ldp x12, x13, [x3, #16] + ldp x14, x15, [x3] + adds x8, x14, x8 + adcs x9, x15, x9 + adcs x10, x12, x10 + adcs x11, x13, x11 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + ret +.Lfunc_end61: + .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L + + .globl mcl_fp_subNF4L + .align 2 + .type mcl_fp_subNF4L,@function +mcl_fp_subNF4L: // @mcl_fp_subNF4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x10, x12, x10 + sbcs x11, x13, x11 + ldp x12, x13, [x3, #16] + sbcs x8, x14, x8 + ldp x14, x16, [x3] + sbcs x9, x15, x9 + asr x15, x9, #63 + and x14, x15, x14 + and x16, x15, x16 + and x12, x15, x12 + and x13, x15, x13 + adds x10, x14, x10 + str x10, [x0] + adcs x10, x16, x11 + adcs x8, x12, x8 + stp x10, x8, [x0, #8] + adcs x8, x13, x9 + str x8, [x0, #24] + ret +.Lfunc_end62: + .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L + + .globl mcl_fpDbl_add4L + .align 2 + .type mcl_fpDbl_add4L,@function +mcl_fpDbl_add4L: // @mcl_fpDbl_add4L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x4, x2, [x2] + ldp x5, x6, [x1, #16] + ldp x18, x1, [x1] + adds x18, x4, x18 + str x18, [x0] + ldp x18, x4, [x3, #16] + adcs x1, x2, x1 + ldp x2, x3, [x3] + adcs x16, x16, x5 + stp x1, x16, [x0, #8] + adcs x16, x17, x6 + str x16, [x0, #24] + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x12, x2 + sbcs x14, x13, x3 + sbcs x15, x8, x18 + sbcs x16, x9, x4 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x12, x11, ne + csel x11, x13, x14, ne + csel x8, x8, x15, ne + csel x9, x9, x16, ne + stp x10, x11, [x0, #32] + stp x8, x9, [x0, #48] + ret +.Lfunc_end63: + .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L + + .globl mcl_fpDbl_sub4L + .align 2 + .type mcl_fpDbl_sub4L,@function +mcl_fpDbl_sub4L: // @mcl_fpDbl_sub4L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x5, x6, [x1, #16] + ldp x4, x1, [x1] + subs x18, x4, x18 + str x18, [x0] + ldp x18, x4, [x3, #16] + sbcs x1, x1, x2 + ldp x2, x3, [x3] + sbcs x16, x5, x16 + stp x1, x16, [x0, #8] + sbcs x16, x6, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x4, xzr, ne + csel x11, x18, xzr, ne + csel x14, x3, xzr, ne + csel x15, x2, xzr, ne + adds x12, x15, x12 + stp x16, x12, [x0, #24] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #40] + adcs x8, x10, x9 + str x8, [x0, #56] + ret +.Lfunc_end64: + .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L + + .globl mcl_fp_mulUnitPre5L + .align 2 + .type mcl_fp_mulUnitPre5L,@function +mcl_fp_mulUnitPre5L: // @mcl_fp_mulUnitPre5L +// BB#0: + ldp x12, x8, [x1, #24] + ldp x9, x10, [x1] + ldr x11, [x1, #16] + mul x13, x9, x2 + mul x14, x10, x2 + umulh x9, x9, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x8, x2 + umulh x12, x12, x2 + umulh x8, x8, x2 + adds x9, x9, x14 + stp x13, x9, [x0] + adcs x9, x10, x15 + str x9, [x0, #16] + adcs x9, x11, x16 + str x9, [x0, #24] + adcs x9, x12, x17 + adcs x8, x8, xzr + stp x9, x8, [x0, #32] + ret +.Lfunc_end65: + .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L + + .globl mcl_fpDbl_mulPre5L + .align 2 + .type mcl_fpDbl_mulPre5L,@function +mcl_fpDbl_mulPre5L: // @mcl_fpDbl_mulPre5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #176 // =176 + ldp x8, x10, [x1] + ldp x9, x15, [x1] + ldp x11, x12, [x1, #24] + ldp x13, x14, [x2] + ldp x16, x18, [x1, #16] + ldr x17, [x1, #16] + ldr x3, [x1, #32] + ldp x4, x5, [x2, #16] + mul x6, x8, x13 + str x6, [sp, #72] // 8-byte Folded Spill + umulh x6, x12, x13 + str x6, [sp, #168] // 8-byte Folded Spill + mul x6, x12, x13 + str x6, [sp, #152] // 8-byte Folded Spill + umulh x6, x11, x13 + str x6, [sp, #112] // 8-byte Folded Spill + mul x6, x11, x13 + str x6, [sp, #64] // 8-byte Folded Spill + umulh x6, x17, x13 + mul x23, x17, x13 + umulh x24, x10, x13 + mul x25, x10, x13 + umulh x7, x8, x13 + mul x26, x8, x14 + mul x13, x12, x14 + str x13, [sp, #104] // 8-byte Folded Spill + mul x13, x11, x14 + stp x13, x6, [sp, #40] + mul x29, x17, x14 + mul x30, x10, x14 + umulh x12, x12, x14 + umulh x11, x11, x14 + str x11, [sp, #96] // 8-byte Folded Spill + umulh x11, x17, x14 + umulh x27, x10, x14 + umulh x20, x8, x14 + mul x8, x9, x4 + stp x8, x11, [sp, #24] + mul x8, x3, x4 + stp x8, x12, [sp, #136] + mul x8, x18, x4 + str x8, [sp, #88] // 8-byte Folded Spill + mul x8, x16, x4 + str x8, [sp, #16] // 8-byte Folded Spill + mul x28, x15, x4 + umulh x8, x3, x4 + str x8, [sp, #160] // 8-byte Folded Spill + umulh x8, x18, x4 + str x8, [sp, #128] // 8-byte Folded Spill + umulh x8, x16, x4 + str x8, [sp, #80] // 8-byte Folded Spill + umulh x8, x15, x4 + str x8, [sp, #8] // 8-byte Folded Spill + umulh x22, x9, x4 + mul x8, x3, x5 + str x8, [sp, #120] // 8-byte Folded Spill + umulh x8, x3, x5 + str x8, [sp, #56] // 8-byte Folded Spill + mul x6, x18, x5 + umulh x21, x18, x5 + mul x3, x16, x5 + umulh x19, x16, x5 + mul x17, x15, x5 + umulh x4, x15, x5 + mul x16, x9, x5 + umulh x18, x9, x5 + ldr x2, [x2, #32] + ldp x10, x5, [x1, #16] + ldp x8, x9, [x1] + ldr x1, [x1, #32] + mul x15, x8, x2 + umulh x14, x8, x2 + mul x12, x9, x2 + umulh x13, x9, x2 + mul x11, x10, x2 + umulh x10, x10, x2 + mul x9, x5, x2 + umulh x5, x5, x2 + mul x8, x1, x2 + umulh x1, x1, x2 + ldr x2, [sp, #72] // 8-byte Folded Reload + str x2, [x0] + adds x2, x7, x25 + adcs x7, x24, x23 + ldr x23, [sp, #64] // 8-byte Folded Reload + ldr x24, [sp, #48] // 8-byte Folded Reload + adcs x23, x24, x23 + ldr x24, [sp, #152] // 8-byte Folded Reload + ldr x25, [sp, #112] // 8-byte Folded Reload + adcs x24, x25, x24 + ldr x25, [sp, #168] // 8-byte Folded Reload + adcs x25, x25, xzr + adds x2, x26, x2 + str x2, [x0, #8] + adcs x2, x30, x7 + adcs x7, x29, x23 + ldr x23, [sp, #40] // 8-byte Folded Reload + adcs x23, x23, x24 + ldr x24, [sp, #104] // 8-byte Folded Reload + adcs x24, x24, x25 + adcs x25, xzr, xzr + adds x2, x2, x20 + adcs x7, x7, x27 + ldr x20, [sp, #32] // 8-byte Folded Reload + adcs x20, x23, x20 + ldr x23, [sp, #96] // 8-byte Folded Reload + adcs x23, x24, x23 + ldr x24, [sp, #144] // 8-byte Folded Reload + adcs x24, x25, x24 + ldr x25, [sp, #24] // 8-byte Folded Reload + adds x2, x25, x2 + str x2, [x0, #16] + adcs x2, x28, x7 + ldr x7, [sp, #16] // 8-byte Folded Reload + adcs x7, x7, x20 + ldr x20, [sp, #88] // 8-byte Folded Reload + adcs x20, x20, x23 + ldr x23, [sp, #136] // 8-byte Folded Reload + adcs x23, x23, x24 + adcs x24, xzr, xzr + adds x2, x2, x22 + ldr x22, [sp, #8] // 8-byte Folded Reload + adcs x7, x7, x22 + ldr x22, [sp, #80] // 8-byte Folded Reload + adcs x20, x20, x22 + ldr x22, [sp, #128] // 8-byte Folded Reload + adcs x22, x23, x22 + ldr x23, [sp, #160] // 8-byte Folded Reload + adcs x23, x24, x23 + adds x16, x16, x2 + str x16, [x0, #24] + adcs x16, x17, x7 + adcs x17, x3, x20 + adcs x2, x6, x22 + ldr x3, [sp, #120] // 8-byte Folded Reload + adcs x3, x3, x23 + adcs x6, xzr, xzr + adds x16, x16, x18 + adcs x17, x17, x4 + adcs x18, x2, x19 + adcs x2, x3, x21 + ldr x3, [sp, #56] // 8-byte Folded Reload + adcs x3, x6, x3 + adds x15, x15, x16 + str x15, [x0, #32] + adcs x12, x12, x17 + adcs x11, x11, x18 + adcs x9, x9, x2 + adcs x8, x8, x3 + adcs x15, xzr, xzr + adds x12, x12, x14 + adcs x11, x11, x13 + stp x12, x11, [x0, #40] + adcs x9, x9, x10 + adcs x8, x8, x5 + stp x9, x8, [x0, #56] + adcs x8, x15, x1 + str x8, [x0, #72] + add sp, sp, #176 // =176 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end66: + .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L + + .globl mcl_fpDbl_sqrPre5L + .align 2 + .type mcl_fpDbl_sqrPre5L,@function +mcl_fpDbl_sqrPre5L: // @mcl_fpDbl_sqrPre5L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldp x12, x15, [x1] + ldp x13, x14, [x1, #24] + ldr x16, [x1, #16] + mul x17, x12, x12 + mul x18, x14, x12 + mul x2, x11, x12 + umulh x3, x16, x12 + mul x4, x16, x12 + umulh x5, x9, x12 + mul x6, x9, x12 + str x17, [x0] + umulh x17, x12, x12 + adds x17, x17, x6 + adcs x4, x5, x4 + adcs x2, x3, x2 + umulh x3, x11, x12 + adcs x18, x3, x18 + umulh x12, x14, x12 + adcs x12, x12, xzr + adds x17, x6, x17 + ldr x3, [x1] + str x17, [x0, #8] + mul x17, x9, x9 + adcs x17, x17, x4 + mul x4, x16, x9 + adcs x2, x4, x2 + mul x4, x11, x9 + adcs x18, x4, x18 + mul x4, x14, x9 + adcs x12, x4, x12 + adcs x4, xzr, xzr + adds x17, x17, x5 + umulh x5, x9, x9 + adcs x2, x2, x5 + umulh x5, x16, x9 + adcs x18, x18, x5 + ldr x5, [x1, #8] + umulh x11, x11, x9 + adcs x11, x12, x11 + ldr x12, [x1, #24] + umulh x9, x14, x9 + adcs x9, x4, x9 + mul x4, x3, x16 + adds x17, x4, x17 + mul x4, x14, x16 + str x17, [x0, #16] + mul x17, x5, x16 + adcs x17, x17, x2 + mul x2, x16, x16 + adcs x18, x2, x18 + mul x2, x12, x16 + adcs x11, x2, x11 + umulh x2, x3, x16 + adcs x9, x4, x9 + adcs x4, xzr, xzr + adds x17, x17, x2 + umulh x2, x5, x16 + adcs x18, x18, x2 + umulh x2, x16, x16 + adcs x11, x11, x2 + umulh x14, x14, x16 + umulh x16, x12, x16 + adcs x9, x9, x16 + ldr x16, [x1, #32] + adcs x14, x4, x14 + mul x1, x3, x12 + adds x17, x1, x17 + mul x1, x16, x12 + str x17, [x0, #24] + mul x17, x5, x12 + adcs x17, x17, x18 + mul x18, x10, x12 + adcs x11, x18, x11 + mul x18, x12, x12 + adcs x9, x18, x9 + umulh x18, x16, x12 + umulh x2, x3, x12 + adcs x14, x1, x14 + adcs x1, xzr, xzr + adds x17, x17, x2 + umulh x2, x10, x12 + umulh x3, x5, x12 + umulh x12, x12, x12 + adcs x11, x11, x3 + mul x3, x8, x16 + adcs x9, x9, x2 + mul x2, x13, x16 + adcs x12, x14, x12 + mul x14, x10, x16 + adcs x18, x1, x18 + mul x1, x15, x16 + adds x17, x17, x3 + mul x3, x16, x16 + umulh x8, x8, x16 + umulh x15, x15, x16 + umulh x10, x10, x16 + umulh x13, x13, x16 + umulh x16, x16, x16 + str x17, [x0, #32] + adcs x11, x11, x1 + adcs x9, x9, x14 + adcs x12, x12, x2 + adcs x14, x18, x3 + adcs x17, xzr, xzr + adds x8, x11, x8 + str x8, [x0, #40] + adcs x8, x9, x15 + str x8, [x0, #48] + adcs x8, x12, x10 + str x8, [x0, #56] + adcs x8, x14, x13 + str x8, [x0, #64] + adcs x8, x17, x16 + str x8, [x0, #72] + ret +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L + + .globl mcl_fp_mont5L + .align 2 + .type mcl_fp_mont5L,@function +mcl_fp_mont5L: // @mcl_fp_mont5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #80 // =80 + str x0, [sp, #72] // 8-byte Folded Spill + ldp x16, x10, [x1, #24] + ldp x18, x0, [x1, #8] + ldr x17, [x1] + ldur x9, [x3, #-8] + str x9, [sp, #16] // 8-byte Folded Spill + ldp x11, x8, [x3, #24] + ldp x14, x12, [x3, #8] + ldr x13, [x3] + ldp x3, x1, [x2] + ldp x4, x5, [x2, #16] + ldr x2, [x2, #32] + umulh x6, x10, x3 + mul x7, x10, x3 + umulh x19, x16, x3 + mul x20, x16, x3 + umulh x21, x0, x3 + mul x22, x0, x3 + umulh x23, x18, x3 + mul x24, x18, x3 + umulh x25, x17, x3 + mul x3, x17, x3 + umulh x26, x1, x10 + mul x27, x1, x10 + umulh x28, x1, x16 + adds x24, x25, x24 + mul x25, x3, x9 + adcs x22, x23, x22 + mul x23, x25, x8 + mul x29, x25, x11 + mul x30, x25, x12 + adcs x20, x21, x20 + mul x21, x25, x14 + adcs x7, x19, x7 + umulh x19, x25, x13 + adcs x6, x6, xzr + adds x19, x19, x21 + umulh x21, x25, x14 + adcs x21, x21, x30 + umulh x30, x25, x12 + adcs x29, x30, x29 + umulh x30, x25, x11 + adcs x23, x30, x23 + umulh x30, x25, x8 + mul x25, x25, x13 + adcs x30, x30, xzr + cmn x25, x3 + mul x3, x1, x16 + umulh x25, x1, x0 + adcs x19, x19, x24 + mul x24, x1, x0 + adcs x21, x21, x22 + umulh x22, x1, x18 + adcs x20, x29, x20 + mul x29, x1, x18 + adcs x7, x23, x7 + umulh x23, x1, x17 + mul x1, x1, x17 + adcs x6, x30, x6 + adcs x30, xzr, xzr + adds x23, x23, x29 + umulh x29, x4, x10 + adcs x22, x22, x24 + mul x24, x4, x10 + adcs x3, x25, x3 + umulh x25, x4, x16 + adcs x27, x28, x27 + adcs x26, x26, xzr + adds x1, x19, x1 + adcs x19, x21, x23 + mul x21, x1, x9 + adcs x20, x20, x22 + mul x22, x21, x8 + mul x23, x21, x11 + mul x28, x21, x12 + adcs x3, x7, x3 + mul x7, x21, x14 + adcs x6, x6, x27 + umulh x27, x21, x13 + adcs x26, x30, x26 + adcs x30, xzr, xzr + adds x7, x27, x7 + umulh x27, x21, x14 + adcs x27, x27, x28 + umulh x28, x21, x12 + adcs x23, x28, x23 + umulh x28, x21, x11 + adcs x22, x28, x22 + umulh x28, x21, x8 + mul x21, x21, x13 + adcs x28, x28, xzr + cmn x21, x1 + mul x1, x4, x16 + umulh x21, x4, x0 + adcs x7, x7, x19 + mul x19, x4, x0 + adcs x20, x27, x20 + umulh x27, x4, x18 + adcs x3, x23, x3 + mul x23, x4, x18 + adcs x6, x22, x6 + umulh x22, x4, x17 + mul x4, x4, x17 + adcs x26, x28, x26 + umulh x15, x5, x10 + str x15, [sp, #64] // 8-byte Folded Spill + adcs x30, x30, xzr + adds x22, x22, x23 + mul x15, x5, x10 + str x15, [sp, #56] // 8-byte Folded Spill + adcs x19, x27, x19 + umulh x15, x5, x16 + str x15, [sp, #40] // 8-byte Folded Spill + adcs x1, x21, x1 + mul x15, x5, x16 + str x15, [sp, #32] // 8-byte Folded Spill + adcs x24, x25, x24 + adcs x25, x29, xzr + adds x4, x7, x4 + adcs x7, x20, x22 + mul x20, x4, x9 + adcs x3, x3, x19 + mul x19, x20, x8 + mul x22, x20, x11 + mov x15, x12 + mul x29, x20, x15 + adcs x1, x6, x1 + mov x21, x14 + mul x6, x20, x21 + adcs x24, x26, x24 + mov x9, x13 + umulh x26, x20, x9 + adcs x25, x30, x25 + adcs x30, xzr, xzr + adds x6, x26, x6 + umulh x26, x20, x21 + adcs x26, x26, x29 + umulh x29, x20, x15 + adcs x22, x29, x22 + umulh x29, x20, x11 + mov x13, x11 + adcs x19, x29, x19 + umulh x29, x20, x8 + mov x12, x8 + mul x20, x20, x9 + mov x14, x9 + adcs x29, x29, xzr + cmn x20, x4 + umulh x4, x5, x0 + mul x20, x5, x0 + umulh x11, x5, x18 + mul x9, x5, x18 + umulh x8, x5, x17 + mul x5, x5, x17 + umulh x23, x2, x10 + str x23, [sp, #48] // 8-byte Folded Spill + mul x10, x2, x10 + str x10, [sp, #24] // 8-byte Folded Spill + umulh x10, x2, x16 + str x10, [sp, #8] // 8-byte Folded Spill + mul x28, x2, x16 + umulh x27, x2, x0 + mul x23, x2, x0 + umulh x16, x2, x18 + mul x18, x2, x18 + umulh x0, x2, x17 + mul x17, x2, x17 + adcs x2, x6, x7 + adcs x3, x26, x3 + adcs x1, x22, x1 + adcs x6, x19, x24 + adcs x7, x29, x25 + adcs x19, x30, xzr + adds x8, x8, x9 + adcs x9, x11, x20 + ldr x10, [sp, #32] // 8-byte Folded Reload + adcs x10, x4, x10 + ldr x11, [sp, #56] // 8-byte Folded Reload + ldr x4, [sp, #40] // 8-byte Folded Reload + adcs x4, x4, x11 + ldr x11, [sp, #64] // 8-byte Folded Reload + adcs x20, x11, xzr + adds x2, x2, x5 + adcs x8, x3, x8 + ldr x24, [sp, #16] // 8-byte Folded Reload + mul x3, x2, x24 + adcs x9, x1, x9 + mul x1, x3, x12 + mul x5, x3, x13 + mul x22, x3, x15 + adcs x10, x6, x10 + mul x6, x3, x21 + adcs x4, x7, x4 + umulh x7, x3, x14 + adcs x19, x19, x20 + adcs x20, xzr, xzr + adds x6, x7, x6 + umulh x7, x3, x21 + adcs x7, x7, x22 + umulh x22, x3, x15 + mov x25, x15 + adcs x5, x22, x5 + umulh x22, x3, x13 + adcs x1, x22, x1 + umulh x22, x3, x12 + mul x3, x3, x14 + adcs x22, x22, xzr + cmn x3, x2 + adcs x8, x6, x8 + adcs x9, x7, x9 + adcs x10, x5, x10 + adcs x1, x1, x4 + adcs x2, x22, x19 + adcs x3, x20, xzr + adds x11, x0, x18 + adcs x15, x16, x23 + adcs x16, x27, x28 + ldr x18, [sp, #24] // 8-byte Folded Reload + ldr x0, [sp, #8] // 8-byte Folded Reload + adcs x18, x0, x18 + ldr x0, [sp, #48] // 8-byte Folded Reload + adcs x4, x0, xzr + adds x8, x8, x17 + adcs x9, x9, x11 + mul x11, x8, x24 + adcs x10, x10, x15 + umulh x15, x11, x12 + mul x17, x11, x12 + umulh x5, x11, x13 + mul x6, x11, x13 + mov x0, x13 + mov x20, x25 + umulh x7, x11, x20 + mul x19, x11, x20 + mov x23, x20 + mov x13, x21 + umulh x20, x11, x13 + mul x21, x11, x13 + umulh x22, x11, x14 + mul x11, x11, x14 + adcs x16, x1, x16 + adcs x18, x2, x18 + adcs x1, x3, x4 + adcs x2, xzr, xzr + adds x3, x22, x21 + adcs x4, x20, x19 + adcs x6, x7, x6 + adcs x17, x5, x17 + adcs x15, x15, xzr + cmn x11, x8 + adcs x8, x3, x9 + adcs x9, x4, x10 + adcs x10, x6, x16 + adcs x11, x17, x18 + adcs x15, x15, x1 + adcs x16, x2, xzr + subs x1, x8, x14 + sbcs x13, x9, x13 + sbcs x14, x10, x23 + sbcs x17, x11, x0 + sbcs x18, x15, x12 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x8, x8, x1, ne + csel x9, x9, x13, ne + csel x10, x10, x14, ne + csel x11, x11, x17, ne + csel x12, x15, x18, ne + ldr x13, [sp, #72] // 8-byte Folded Reload + stp x8, x9, [x13] + stp x10, x11, [x13, #16] + str x12, [x13, #32] + add sp, sp, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end68: + .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L + + .globl mcl_fp_montNF5L + .align 2 + .type mcl_fp_montNF5L,@function +mcl_fp_montNF5L: // @mcl_fp_montNF5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + str x0, [sp, #24] // 8-byte Folded Spill + ldp x16, x14, [x1, #24] + ldp x18, x15, [x1, #8] + ldr x17, [x1] + ldur x13, [x3, #-8] + ldp x9, x8, [x3, #24] + ldp x11, x10, [x3, #8] + ldr x12, [x3] + ldp x1, x3, [x2] + ldp x4, x5, [x2, #16] + ldr x2, [x2, #32] + umulh x6, x14, x1 + mul x7, x14, x1 + umulh x19, x16, x1 + mul x20, x16, x1 + umulh x21, x15, x1 + mul x22, x15, x1 + umulh x23, x18, x1 + mul x24, x18, x1 + umulh x25, x17, x1 + mul x1, x17, x1 + umulh x26, x3, x14 + mul x27, x3, x14 + umulh x28, x3, x16 + mul x29, x3, x16 + umulh x30, x3, x15 + adds x24, x25, x24 + mul x25, x3, x15 + adcs x22, x23, x22 + umulh x23, x3, x18 + adcs x20, x21, x20 + mul x21, x1, x13 + adcs x7, x19, x7 + mul x19, x21, x12 + adcs x6, x6, xzr + cmn x19, x1 + mul x1, x3, x18 + mul x19, x21, x11 + adcs x19, x19, x24 + mul x24, x21, x10 + adcs x22, x24, x22 + mul x24, x21, x9 + adcs x20, x24, x20 + mul x24, x21, x8 + adcs x7, x24, x7 + umulh x24, x21, x12 + adcs x6, x6, xzr + adds x19, x19, x24 + umulh x24, x21, x11 + adcs x22, x22, x24 + umulh x24, x21, x10 + adcs x20, x20, x24 + umulh x24, x21, x9 + adcs x7, x7, x24 + umulh x24, x3, x17 + mul x3, x3, x17 + umulh x21, x21, x8 + adcs x6, x6, x21 + umulh x21, x4, x14 + adds x1, x24, x1 + mul x24, x4, x14 + adcs x23, x23, x25 + umulh x25, x4, x16 + adcs x29, x30, x29 + mul x30, x4, x16 + adcs x27, x28, x27 + umulh x28, x4, x15 + adcs x26, x26, xzr + adds x3, x3, x19 + mul x19, x4, x15 + adcs x1, x1, x22 + umulh x22, x4, x18 + adcs x20, x23, x20 + mul x23, x4, x18 + adcs x7, x29, x7 + mul x29, x3, x13 + adcs x6, x27, x6 + mul x27, x29, x12 + adcs x26, x26, xzr + cmn x27, x3 + umulh x3, x4, x17 + mul x4, x4, x17 + mul x27, x29, x11 + adcs x1, x27, x1 + mul x27, x29, x10 + adcs x20, x27, x20 + mul x27, x29, x9 + adcs x7, x27, x7 + mul x27, x29, x8 + adcs x6, x27, x6 + umulh x27, x29, x12 + adcs x26, x26, xzr + adds x1, x1, x27 + umulh x27, x29, x11 + adcs x20, x20, x27 + umulh x27, x29, x10 + adcs x7, x7, x27 + umulh x27, x29, x9 + adcs x6, x6, x27 + umulh x27, x5, x14 + umulh x29, x29, x8 + adcs x26, x26, x29 + mul x29, x5, x14 + adds x3, x3, x23 + umulh x23, x5, x16 + adcs x19, x22, x19 + mul x22, x5, x16 + adcs x28, x28, x30 + umulh x30, x5, x15 + adcs x24, x25, x24 + mul x25, x5, x15 + adcs x21, x21, xzr + adds x1, x4, x1 + umulh x4, x5, x18 + adcs x3, x3, x20 + mul x20, x5, x18 + adcs x7, x19, x7 + umulh x19, x5, x17 + mul x5, x5, x17 + adcs x6, x28, x6 + mul x28, x1, x13 + adcs x24, x24, x26 + mul x26, x28, x12 + adcs x21, x21, xzr + cmn x26, x1 + umulh x0, x2, x14 + mul x14, x2, x14 + stp x14, x0, [sp, #8] + umulh x26, x2, x16 + mul x1, x2, x16 + umulh x0, x2, x15 + mul x16, x2, x15 + umulh x15, x2, x18 + mul x18, x2, x18 + umulh x14, x2, x17 + mul x17, x2, x17 + mul x2, x28, x11 + adcs x2, x2, x3 + mul x3, x28, x10 + adcs x3, x3, x7 + mul x7, x28, x9 + adcs x6, x7, x6 + mul x7, x28, x8 + adcs x7, x7, x24 + adcs x21, x21, xzr + umulh x24, x28, x12 + adds x2, x2, x24 + umulh x24, x28, x11 + adcs x3, x3, x24 + umulh x24, x28, x10 + adcs x6, x6, x24 + umulh x24, x28, x9 + adcs x7, x7, x24 + umulh x24, x28, x8 + adcs x21, x21, x24 + adds x19, x19, x20 + adcs x4, x4, x25 + adcs x20, x30, x22 + adcs x22, x23, x29 + adcs x23, x27, xzr + adds x2, x5, x2 + adcs x3, x19, x3 + mov x24, x13 + mul x5, x2, x24 + adcs x4, x4, x6 + mul x6, x5, x8 + mul x19, x5, x9 + adcs x7, x20, x7 + mul x20, x5, x10 + adcs x21, x22, x21 + mul x22, x5, x12 + adcs x23, x23, xzr + cmn x22, x2 + mul x2, x5, x11 + umulh x22, x5, x8 + adcs x2, x2, x3 + umulh x3, x5, x9 + adcs x4, x20, x4 + umulh x20, x5, x10 + adcs x7, x19, x7 + umulh x19, x5, x11 + umulh x5, x5, x12 + adcs x6, x6, x21 + adcs x21, x23, xzr + adds x2, x2, x5 + adcs x4, x4, x19 + adcs x5, x7, x20 + adcs x3, x6, x3 + adcs x6, x21, x22 + adds x13, x14, x18 + adcs x14, x15, x16 + adcs x15, x0, x1 + ldp x16, x18, [sp, #8] + adcs x16, x26, x16 + adcs x18, x18, xzr + adds x17, x17, x2 + adcs x13, x13, x4 + mul x0, x17, x24 + adcs x14, x14, x5 + mul x1, x0, x8 + mul x2, x0, x9 + mul x4, x0, x10 + mul x5, x0, x11 + mul x7, x0, x12 + umulh x19, x0, x8 + umulh x20, x0, x9 + umulh x21, x0, x10 + umulh x22, x0, x11 + umulh x0, x0, x12 + adcs x15, x15, x3 + adcs x16, x16, x6 + adcs x18, x18, xzr + cmn x7, x17 + adcs x13, x5, x13 + adcs x14, x4, x14 + adcs x15, x2, x15 + adcs x16, x1, x16 + adcs x17, x18, xzr + adds x13, x13, x0 + adcs x14, x14, x22 + adcs x15, x15, x21 + adcs x16, x16, x20 + adcs x17, x17, x19 + subs x12, x13, x12 + sbcs x11, x14, x11 + sbcs x10, x15, x10 + sbcs x9, x16, x9 + sbcs x8, x17, x8 + asr x18, x8, #63 + cmp x18, #0 // =0 + csel x12, x13, x12, lt + csel x11, x14, x11, lt + csel x10, x15, x10, lt + csel x9, x16, x9, lt + csel x8, x17, x8, lt + ldr x13, [sp, #24] // 8-byte Folded Reload + stp x12, x11, [x13] + stp x10, x9, [x13, #16] + str x8, [x13, #32] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end69: + .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L + + .globl mcl_fp_montRed5L + .align 2 + .type mcl_fp_montRed5L,@function +mcl_fp_montRed5L: // @mcl_fp_montRed5L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldur x13, [x2, #-8] + ldp x9, x8, [x2, #24] + ldp x11, x10, [x2, #8] + ldr x12, [x2] + ldp x15, x16, [x1, #64] + ldp x17, x18, [x1, #48] + ldp x2, x3, [x1, #32] + ldp x4, x5, [x1, #16] + ldp x14, x1, [x1] + mul x6, x14, x13 + umulh x7, x6, x8 + mul x19, x6, x8 + umulh x20, x6, x9 + mul x21, x6, x9 + umulh x22, x6, x10 + mul x23, x6, x10 + umulh x24, x6, x11 + mul x25, x6, x11 + umulh x26, x6, x12 + mul x6, x6, x12 + adds x25, x26, x25 + adcs x23, x24, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + adcs x7, x7, xzr + cmn x14, x6 + adcs x14, x1, x25 + adcs x1, x4, x23 + mul x4, x14, x13 + adcs x5, x5, x21 + umulh x6, x4, x8 + mul x20, x4, x8 + umulh x21, x4, x9 + mul x22, x4, x9 + umulh x23, x4, x10 + mul x24, x4, x10 + umulh x25, x4, x11 + mul x26, x4, x11 + adcs x2, x2, x19 + umulh x19, x4, x12 + mul x4, x4, x12 + adcs x3, x3, x7 + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x7, xzr, xzr + adds x19, x19, x26 + adcs x24, x25, x24 + adcs x22, x23, x22 + adcs x20, x21, x20 + adcs x6, x6, xzr + cmn x4, x14 + adcs x14, x19, x1 + adcs x1, x24, x5 + mul x4, x14, x13 + adcs x2, x22, x2 + umulh x5, x4, x8 + mul x19, x4, x8 + umulh x21, x4, x9 + mul x22, x4, x9 + umulh x23, x4, x10 + mul x24, x4, x10 + umulh x25, x4, x11 + mul x26, x4, x11 + adcs x3, x20, x3 + umulh x20, x4, x12 + mul x4, x4, x12 + adcs x17, x6, x17 + adcs x18, x18, xzr + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x6, x7, xzr + adds x7, x20, x26 + adcs x20, x25, x24 + adcs x22, x23, x22 + adcs x19, x21, x19 + adcs x5, x5, xzr + cmn x4, x14 + adcs x14, x7, x1 + adcs x1, x20, x2 + mul x2, x14, x13 + adcs x3, x22, x3 + umulh x4, x2, x8 + mul x7, x2, x8 + umulh x20, x2, x9 + mul x21, x2, x9 + umulh x22, x2, x10 + mul x23, x2, x10 + umulh x24, x2, x11 + mul x25, x2, x11 + umulh x26, x2, x12 + mul x2, x2, x12 + adcs x17, x19, x17 + adcs x18, x5, x18 + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x5, x6, xzr + adds x6, x26, x25 + adcs x19, x24, x23 + adcs x21, x22, x21 + adcs x7, x20, x7 + adcs x4, x4, xzr + cmn x2, x14 + adcs x14, x6, x1 + adcs x1, x19, x3 + mul x13, x14, x13 + adcs x17, x21, x17 + umulh x2, x13, x8 + mul x3, x13, x8 + umulh x6, x13, x9 + mul x19, x13, x9 + umulh x20, x13, x10 + mul x21, x13, x10 + umulh x22, x13, x11 + mul x23, x13, x11 + umulh x24, x13, x12 + mul x13, x13, x12 + adcs x18, x7, x18 + adcs x15, x4, x15 + adcs x16, x16, xzr + adcs x4, x5, xzr + adds x5, x24, x23 + adcs x7, x22, x21 + adcs x19, x20, x19 + adcs x3, x6, x3 + adcs x2, x2, xzr + cmn x13, x14 + adcs x13, x5, x1 + adcs x14, x7, x17 + adcs x17, x19, x18 + adcs x15, x3, x15 + adcs x16, x2, x16 + adcs x18, x4, xzr + subs x12, x13, x12 + sbcs x11, x14, x11 + sbcs x10, x17, x10 + sbcs x9, x15, x9 + sbcs x8, x16, x8 + sbcs x18, x18, xzr + tst x18, #0x1 + csel x12, x13, x12, ne + csel x11, x14, x11, ne + csel x10, x17, x10, ne + csel x9, x15, x9, ne + csel x8, x16, x8, ne + stp x12, x11, [x0] + stp x10, x9, [x0, #16] + str x8, [x0, #32] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end70: + .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L + + .globl mcl_fp_addPre5L + .align 2 + .type mcl_fp_addPre5L,@function +mcl_fp_addPre5L: // @mcl_fp_addPre5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + adds x12, x12, x14 + str x12, [x0] + adcs x12, x13, x15 + adcs x10, x10, x16 + stp x12, x10, [x0, #8] + adcs x10, x11, x17 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end71: + .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L + + .globl mcl_fp_subPre5L + .align 2 + .type mcl_fp_subPre5L,@function +mcl_fp_subPre5L: // @mcl_fp_subPre5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x12, x14, x12 + str x12, [x0] + sbcs x12, x15, x13 + sbcs x10, x16, x10 + stp x12, x10, [x0, #8] + sbcs x10, x17, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end72: + .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L + + .globl mcl_fp_shr1_5L + .align 2 + .type mcl_fp_shr1_5L,@function +mcl_fp_shr1_5L: // @mcl_fp_shr1_5L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldr x12, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + extr x11, x12, x11, #1 + lsr x12, x12, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + str x12, [x0, #32] + ret +.Lfunc_end73: + .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L + + .globl mcl_fp_add5L + .align 2 + .type mcl_fp_add5L,@function +mcl_fp_add5L: // @mcl_fp_add5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + adds x12, x12, x14 + ldr x14, [x3, #32] + adcs x13, x13, x15 + adcs x10, x10, x16 + ldp x15, x16, [x3] + stp x12, x13, [x0] + adcs x17, x11, x17 + stp x10, x17, [x0, #16] + adcs x8, x8, x9 + str x8, [x0, #32] + adcs x18, xzr, xzr + ldp x9, x1, [x3, #16] + subs x12, x12, x15 + sbcs x11, x13, x16 + sbcs x10, x10, x9 + sbcs x9, x17, x1 + sbcs x8, x8, x14 + sbcs x13, x18, xzr + and w13, w13, #0x1 + tbnz w13, #0, .LBB74_2 +// BB#1: // %nocarry + stp x12, x11, [x0] + stp x10, x9, [x0, #16] + str x8, [x0, #32] +.LBB74_2: // %carry + ret +.Lfunc_end74: + .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L + + .globl mcl_fp_addNF5L + .align 2 + .type mcl_fp_addNF5L,@function +mcl_fp_addNF5L: // @mcl_fp_addNF5L +// BB#0: + ldp x11, x8, [x1, #24] + ldp x17, x9, [x2, #24] + ldp x13, x10, [x1, #8] + ldr x12, [x1] + ldp x14, x15, [x2] + ldr x16, [x2, #16] + adds x12, x14, x12 + ldp x18, x14, [x3, #24] + adcs x13, x15, x13 + adcs x10, x16, x10 + ldp x15, x16, [x3] + adcs x11, x17, x11 + ldr x17, [x3, #16] + adcs x8, x9, x8 + subs x9, x12, x15 + sbcs x15, x13, x16 + sbcs x16, x10, x17 + sbcs x17, x11, x18 + sbcs x14, x8, x14 + asr x18, x14, #63 + cmp x18, #0 // =0 + csel x9, x12, x9, lt + csel x12, x13, x15, lt + csel x10, x10, x16, lt + csel x11, x11, x17, lt + csel x8, x8, x14, lt + stp x9, x12, [x0] + stp x10, x11, [x0, #16] + str x8, [x0, #32] + ret +.Lfunc_end75: + .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L + + .globl mcl_fp_sub5L + .align 2 + .type mcl_fp_sub5L,@function +mcl_fp_sub5L: // @mcl_fp_sub5L +// BB#0: + ldp x11, x12, [x2, #24] + ldp x17, x13, [x1, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x8, x14, x8 + sbcs x9, x15, x9 + stp x8, x9, [x0] + sbcs x10, x16, x10 + sbcs x11, x17, x11 + stp x10, x11, [x0, #16] + sbcs x12, x13, x12 + str x12, [x0, #32] + ngcs x13, xzr + and w13, w13, #0x1 + tbnz w13, #0, .LBB76_2 +// BB#1: // %nocarry + ret +.LBB76_2: // %carry + ldp x17, x13, [x3, #24] + ldp x14, x15, [x3] + ldr x16, [x3, #16] + adds x8, x14, x8 + adcs x9, x15, x9 + adcs x10, x16, x10 + adcs x11, x17, x11 + adcs x12, x13, x12 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + str x12, [x0, #32] + ret +.Lfunc_end76: + .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L + + .globl mcl_fp_subNF5L + .align 2 + .type mcl_fp_subNF5L,@function +mcl_fp_subNF5L: // @mcl_fp_subNF5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x12, x14, x12 + sbcs x13, x15, x13 + ldp x1, x14, [x3, #8] + ldp x15, x18, [x3, #24] + sbcs x10, x16, x10 + ldr x16, [x3] + sbcs x11, x17, x11 + sbcs x8, x9, x8 + asr x9, x8, #63 + extr x17, x9, x8, #63 + and x16, x17, x16 + and x14, x14, x9, ror #63 + and x15, x9, x15 + and x17, x9, x18 + ror x9, x9, #63 + and x9, x9, x1 + adds x12, x16, x12 + adcs x9, x9, x13 + stp x12, x9, [x0] + adcs x9, x14, x10 + str x9, [x0, #16] + adcs x9, x15, x11 + adcs x8, x17, x8 + stp x9, x8, [x0, #24] + ret +.Lfunc_end77: + .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L + + .globl mcl_fpDbl_add5L + .align 2 + .type mcl_fpDbl_add5L,@function +mcl_fpDbl_add5L: // @mcl_fpDbl_add5L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #64] + ldp x10, x11, [x1, #64] + ldp x12, x13, [x2, #48] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x5, x6, [x2, #16] + ldp x19, x2, [x2] + ldp x20, x21, [x1, #16] + ldp x7, x1, [x1] + adds x7, x19, x7 + ldr x19, [x3, #32] + str x7, [x0] + adcs x1, x2, x1 + ldp x2, x7, [x3, #16] + str x1, [x0, #8] + ldp x1, x3, [x3] + adcs x5, x5, x20 + str x5, [x0, #16] + adcs x5, x6, x21 + adcs x16, x16, x18 + stp x5, x16, [x0, #24] + adcs x16, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x16, x1 + sbcs x14, x12, x3 + sbcs x15, x13, x2 + sbcs x17, x8, x7 + sbcs x18, x9, x19 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x16, x11, ne + csel x11, x12, x14, ne + csel x12, x13, x15, ne + csel x8, x8, x17, ne + csel x9, x9, x18, ne + stp x10, x11, [x0, #40] + stp x12, x8, [x0, #56] + str x9, [x0, #72] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end78: + .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L + + .globl mcl_fpDbl_sub5L + .align 2 + .type mcl_fpDbl_sub5L,@function +mcl_fpDbl_sub5L: // @mcl_fpDbl_sub5L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #64] + ldp x10, x11, [x1, #64] + ldp x12, x13, [x2, #48] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x5, x6, [x2, #16] + ldp x7, x2, [x2] + ldp x20, x21, [x1, #16] + ldp x19, x1, [x1] + subs x7, x19, x7 + ldr x19, [x3, #32] + str x7, [x0] + sbcs x1, x1, x2 + ldp x2, x7, [x3, #16] + str x1, [x0, #8] + ldp x1, x3, [x3] + sbcs x5, x20, x5 + str x5, [x0, #16] + sbcs x5, x21, x6 + sbcs x16, x18, x16 + stp x5, x16, [x0, #24] + sbcs x16, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x19, xzr, ne + csel x11, x7, xzr, ne + csel x14, x2, xzr, ne + csel x15, x3, xzr, ne + csel x17, x1, xzr, ne + adds x16, x17, x16 + adcs x12, x15, x12 + stp x16, x12, [x0, #40] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #56] + adcs x8, x10, x9 + str x8, [x0, #72] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end79: + .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L + + .globl mcl_fp_mulUnitPre6L + .align 2 + .type mcl_fp_mulUnitPre6L,@function +mcl_fp_mulUnitPre6L: // @mcl_fp_mulUnitPre6L +// BB#0: + ldp x8, x9, [x1, #32] + ldp x10, x11, [x1] + ldp x12, x13, [x1, #16] + mul x14, x10, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x13, x2 + umulh x12, x12, x2 + mul x18, x8, x2 + umulh x13, x13, x2 + mul x1, x9, x2 + umulh x8, x8, x2 + umulh x9, x9, x2 + adds x10, x10, x15 + stp x14, x10, [x0] + adcs x10, x11, x16 + str x10, [x0, #16] + adcs x10, x12, x17 + str x10, [x0, #24] + adcs x10, x13, x18 + adcs x8, x8, x1 + stp x10, x8, [x0, #32] + adcs x8, x9, xzr + str x8, [x0, #48] + ret +.Lfunc_end80: + .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L + + .globl mcl_fpDbl_mulPre6L + .align 2 + .type mcl_fpDbl_mulPre6L,@function +mcl_fpDbl_mulPre6L: // @mcl_fpDbl_mulPre6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #400 // =400 + ldp x8, x9, [x1] + ldp x11, x13, [x1] + ldp x10, x17, [x1, #16] + ldp x12, x14, [x1, #32] + ldp x15, x16, [x2] + ldr x3, [x1, #32] + mul x30, x8, x15 + umulh x18, x14, x15 + str x18, [sp, #392] // 8-byte Folded Spill + mul x18, x14, x15 + str x18, [sp, #384] // 8-byte Folded Spill + umulh x18, x12, x15 + str x18, [sp, #376] // 8-byte Folded Spill + mul x18, x12, x15 + str x18, [sp, #360] // 8-byte Folded Spill + umulh x18, x17, x15 + str x18, [sp, #336] // 8-byte Folded Spill + mul x18, x17, x15 + str x18, [sp, #312] // 8-byte Folded Spill + umulh x18, x10, x15 + str x18, [sp, #304] // 8-byte Folded Spill + mul x18, x10, x15 + str x18, [sp, #272] // 8-byte Folded Spill + umulh x18, x9, x15 + str x18, [sp, #248] // 8-byte Folded Spill + mul x18, x9, x15 + umulh x15, x8, x15 + stp x15, x18, [sp, #216] + mul x15, x8, x16 + str x15, [sp, #280] // 8-byte Folded Spill + mul x15, x14, x16 + str x15, [sp, #352] // 8-byte Folded Spill + mul x15, x12, x16 + str x15, [sp, #328] // 8-byte Folded Spill + mul x15, x17, x16 + str x15, [sp, #296] // 8-byte Folded Spill + mul x15, x10, x16 + str x15, [sp, #264] // 8-byte Folded Spill + mul x15, x9, x16 + umulh x14, x14, x16 + str x14, [sp, #368] // 8-byte Folded Spill + umulh x12, x12, x16 + str x12, [sp, #344] // 8-byte Folded Spill + umulh x12, x17, x16 + str x12, [sp, #320] // 8-byte Folded Spill + umulh x10, x10, x16 + str x10, [sp, #288] // 8-byte Folded Spill + umulh x9, x9, x16 + str x9, [sp, #256] // 8-byte Folded Spill + umulh x8, x8, x16 + stp x8, x15, [sp, #232] + ldp x12, x8, [x2, #16] + ldr x9, [x1, #40] + ldp x15, x10, [x1, #16] + mul x14, x11, x12 + str x14, [sp, #144] // 8-byte Folded Spill + mul x14, x9, x12 + str x14, [sp, #200] // 8-byte Folded Spill + mul x14, x3, x12 + str x14, [sp, #176] // 8-byte Folded Spill + mul x14, x10, x12 + str x14, [sp, #160] // 8-byte Folded Spill + mul x14, x15, x12 + str x14, [sp, #128] // 8-byte Folded Spill + mul x14, x13, x12 + str x14, [sp, #112] // 8-byte Folded Spill + umulh x14, x9, x12 + str x14, [sp, #208] // 8-byte Folded Spill + umulh x14, x3, x12 + str x14, [sp, #192] // 8-byte Folded Spill + umulh x14, x10, x12 + str x14, [sp, #168] // 8-byte Folded Spill + umulh x14, x15, x12 + str x14, [sp, #152] // 8-byte Folded Spill + umulh x14, x13, x12 + str x14, [sp, #120] // 8-byte Folded Spill + umulh x12, x11, x12 + str x12, [sp, #104] // 8-byte Folded Spill + mul x12, x9, x8 + str x12, [sp, #184] // 8-byte Folded Spill + umulh x9, x9, x8 + str x9, [sp, #136] // 8-byte Folded Spill + mul x9, x3, x8 + str x9, [sp, #80] // 8-byte Folded Spill + umulh x9, x3, x8 + str x9, [sp, #96] // 8-byte Folded Spill + mul x9, x10, x8 + str x9, [sp, #64] // 8-byte Folded Spill + umulh x9, x10, x8 + str x9, [sp, #88] // 8-byte Folded Spill + mul x9, x15, x8 + str x9, [sp, #48] // 8-byte Folded Spill + umulh x9, x15, x8 + str x9, [sp, #72] // 8-byte Folded Spill + mul x9, x13, x8 + str x9, [sp, #32] // 8-byte Folded Spill + umulh x9, x13, x8 + str x9, [sp, #56] // 8-byte Folded Spill + mul x9, x11, x8 + str x9, [sp, #24] // 8-byte Folded Spill + umulh x8, x11, x8 + str x8, [sp, #40] // 8-byte Folded Spill + ldp x12, x13, [x1, #32] + ldp x9, x10, [x1] + ldp x11, x1, [x1, #16] + ldp x8, x2, [x2, #32] + mul x22, x9, x8 + mul x28, x13, x8 + mul x27, x12, x8 + mul x24, x1, x8 + mul x20, x11, x8 + mul x19, x10, x8 + umulh x14, x13, x8 + str x14, [sp, #16] // 8-byte Folded Spill + umulh x29, x12, x8 + umulh x26, x1, x8 + umulh x23, x11, x8 + umulh x21, x10, x8 + umulh x7, x9, x8 + mul x25, x9, x2 + umulh x6, x9, x2 + mul x4, x10, x2 + umulh x5, x10, x2 + mul x18, x11, x2 + umulh x3, x11, x2 + mul x16, x1, x2 + umulh x1, x1, x2 + mul x15, x12, x2 + umulh x17, x12, x2 + mul x14, x13, x2 + umulh x13, x13, x2 + str x30, [x0] + ldp x9, x8, [sp, #216] + adds x2, x9, x8 + ldp x8, x30, [sp, #272] + ldr x9, [sp, #248] // 8-byte Folded Reload + adcs x8, x9, x8 + ldp x10, x9, [sp, #304] + adcs x9, x10, x9 + ldr x10, [sp, #360] // 8-byte Folded Reload + ldr x11, [sp, #336] // 8-byte Folded Reload + adcs x10, x11, x10 + ldp x12, x11, [sp, #376] + adcs x11, x12, x11 + ldr x12, [sp, #392] // 8-byte Folded Reload + adcs x12, x12, xzr + adds x2, x30, x2 + str x2, [x0, #8] + ldp x30, x2, [sp, #232] + adcs x8, x2, x8 + ldr x2, [sp, #264] // 8-byte Folded Reload + adcs x9, x2, x9 + ldr x2, [sp, #296] // 8-byte Folded Reload + adcs x10, x2, x10 + ldr x2, [sp, #328] // 8-byte Folded Reload + adcs x11, x2, x11 + ldr x2, [sp, #352] // 8-byte Folded Reload + adcs x12, x2, x12 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #256] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #288] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #320] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #344] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #368] // 8-byte Folded Reload + adcs x2, x2, x30 + ldr x30, [sp, #144] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #16] + ldp x30, x8, [sp, #104] + adcs x8, x8, x9 + ldr x9, [sp, #128] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #160] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #176] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #200] // 8-byte Folded Reload + adcs x12, x12, x2 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #120] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #152] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #168] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #192] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #208] // 8-byte Folded Reload + adcs x2, x2, x30 + ldr x30, [sp, #24] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #24] + ldp x8, x30, [sp, #32] + adcs x8, x8, x9 + ldr x9, [sp, #48] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #64] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #80] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #184] // 8-byte Folded Reload + adcs x12, x12, x2 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #56] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #72] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #88] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #96] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #136] // 8-byte Folded Reload + adcs x2, x2, x30 + adds x8, x22, x8 + str x8, [x0, #32] + adcs x8, x19, x9 + adcs x9, x20, x10 + adcs x10, x24, x11 + adcs x11, x27, x12 + adcs x12, x28, x2 + adcs x2, xzr, xzr + adds x8, x8, x7 + adcs x9, x9, x21 + adcs x10, x10, x23 + adcs x11, x11, x26 + adcs x12, x12, x29 + ldr x7, [sp, #16] // 8-byte Folded Reload + adcs x2, x2, x7 + adds x8, x25, x8 + str x8, [x0, #40] + adcs x8, x4, x9 + adcs x9, x18, x10 + adcs x10, x16, x11 + adcs x11, x15, x12 + adcs x12, x14, x2 + adcs x14, xzr, xzr + adds x8, x8, x6 + str x8, [x0, #48] + adcs x8, x9, x5 + str x8, [x0, #56] + adcs x8, x10, x3 + str x8, [x0, #64] + adcs x8, x11, x1 + str x8, [x0, #72] + adcs x8, x12, x17 + str x8, [x0, #80] + adcs x8, x14, x13 + str x8, [x0, #88] + add sp, sp, #400 // =400 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end81: + .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L + + .globl mcl_fpDbl_sqrPre6L + .align 2 + .type mcl_fpDbl_sqrPre6L,@function +mcl_fpDbl_sqrPre6L: // @mcl_fpDbl_sqrPre6L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x8, x9, [x1, #8] + ldp x15, x10, [x1, #32] + ldp x11, x13, [x1] + ldr x12, [x1] + ldp x17, x14, [x1, #32] + ldr x16, [x1, #24] + mul x18, x11, x11 + umulh x2, x10, x11 + mul x3, x15, x11 + mul x4, x16, x11 + umulh x5, x9, x11 + mul x6, x9, x11 + umulh x7, x8, x11 + mul x19, x8, x11 + str x18, [x0] + umulh x18, x11, x11 + adds x18, x18, x19 + adcs x6, x7, x6 + adcs x4, x5, x4 + umulh x5, x16, x11 + adcs x3, x5, x3 + mul x5, x10, x11 + umulh x11, x15, x11 + adcs x11, x11, x5 + adcs x2, x2, xzr + adds x18, x19, x18 + ldp x5, x19, [x1, #16] + str x18, [x0, #8] + mul x18, x8, x8 + adcs x18, x18, x6 + mul x6, x9, x8 + adcs x4, x6, x4 + mul x6, x16, x8 + adcs x3, x6, x3 + mul x6, x15, x8 + adcs x11, x6, x11 + mul x6, x10, x8 + adcs x2, x6, x2 + adcs x6, xzr, xzr + adds x18, x18, x7 + ldr x7, [x1, #32] + umulh x10, x10, x8 + umulh x15, x15, x8 + umulh x16, x16, x8 + umulh x9, x9, x8 + umulh x8, x8, x8 + adcs x8, x4, x8 + adcs x9, x3, x9 + ldp x3, x4, [x1] + adcs x11, x11, x16 + mul x16, x12, x5 + adcs x15, x2, x15 + mul x2, x14, x5 + adcs x10, x6, x10 + mul x6, x7, x5 + adds x16, x16, x18 + mul x18, x19, x5 + str x16, [x0, #16] + mul x16, x13, x5 + adcs x8, x16, x8 + mul x16, x5, x5 + adcs x9, x16, x9 + umulh x16, x7, x5 + adcs x11, x18, x11 + adcs x15, x6, x15 + umulh x6, x12, x5 + adcs x10, x2, x10 + adcs x2, xzr, xzr + adds x8, x8, x6 + umulh x6, x13, x5 + adcs x9, x9, x6 + umulh x6, x5, x5 + adcs x11, x11, x6 + umulh x6, x19, x5 + adcs x15, x15, x6 + adcs x10, x10, x16 + umulh x5, x14, x5 + adcs x2, x2, x5 + mul x5, x12, x19 + adds x8, x5, x8 + ldp x16, x5, [x1, #16] + ldr x1, [x1, #40] + str x8, [x0, #24] + mul x8, x13, x19 + adcs x8, x8, x9 + mul x9, x14, x19 + adcs x11, x18, x11 + mul x18, x19, x19 + adcs x15, x18, x15 + mul x18, x7, x19 + umulh x14, x14, x19 + umulh x7, x7, x19 + umulh x13, x13, x19 + umulh x12, x12, x19 + umulh x19, x19, x19 + adcs x10, x18, x10 + mul x18, x3, x17 + adcs x9, x9, x2 + adcs x2, xzr, xzr + adds x8, x8, x12 + mul x12, x1, x17 + adcs x11, x11, x13 + mul x13, x5, x17 + adcs x15, x15, x6 + mul x6, x16, x17 + adcs x10, x10, x19 + mul x19, x4, x17 + adcs x9, x9, x7 + mul x7, x17, x17 + adcs x14, x2, x14 + umulh x2, x1, x17 + adds x8, x18, x8 + umulh x18, x5, x17 + str x8, [x0, #32] + umulh x8, x16, x17 + adcs x11, x19, x11 + umulh x19, x4, x17 + adcs x15, x6, x15 + umulh x6, x3, x17 + umulh x17, x17, x17 + adcs x10, x13, x10 + mul x13, x3, x1 + adcs x9, x7, x9 + adcs x14, x12, x14 + adcs x7, xzr, xzr + adds x11, x11, x6 + mul x6, x5, x1 + adcs x15, x15, x19 + mul x19, x16, x1 + adcs x8, x10, x8 + mul x10, x4, x1 + adcs x9, x9, x18 + mul x18, x1, x1 + umulh x3, x3, x1 + umulh x4, x4, x1 + umulh x16, x16, x1 + umulh x5, x5, x1 + umulh x1, x1, x1 + adcs x14, x14, x17 + adcs x17, x7, x2 + adds x11, x13, x11 + str x11, [x0, #40] + adcs x10, x10, x15 + adcs x8, x19, x8 + adcs x9, x6, x9 + adcs x11, x12, x14 + adcs x12, x18, x17 + adcs x13, xzr, xzr + adds x10, x10, x3 + adcs x8, x8, x4 + stp x10, x8, [x0, #48] + adcs x8, x9, x16 + str x8, [x0, #64] + adcs x8, x11, x5 + str x8, [x0, #72] + adcs x8, x12, x2 + str x8, [x0, #80] + adcs x8, x13, x1 + str x8, [x0, #88] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L + + .globl mcl_fp_mont6L + .align 2 + .type mcl_fp_mont6L,@function +mcl_fp_mont6L: // @mcl_fp_mont6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #48 // =48 + str x0, [sp, #24] // 8-byte Folded Spill + ldr x5, [x2] + ldp x0, x4, [x1, #32] + ldp x16, x18, [x1, #16] + ldp x10, x1, [x1] + ldur x12, [x3, #-8] + str x12, [sp, #40] // 8-byte Folded Spill + ldp x11, x8, [x3, #32] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x13, x17, [x3, #16] + ldp x14, x15, [x3] + ldr x3, [x2, #8] + umulh x6, x4, x5 + mul x7, x4, x5 + umulh x19, x0, x5 + mul x20, x0, x5 + umulh x21, x18, x5 + mul x22, x18, x5 + umulh x23, x16, x5 + mul x24, x16, x5 + umulh x25, x1, x5 + mul x26, x1, x5 + umulh x27, x10, x5 + mul x5, x10, x5 + umulh x28, x3, x4 + adds x26, x27, x26 + mul x27, x5, x12 + adcs x24, x25, x24 + mul x25, x27, x8 + mul x29, x27, x11 + mul x30, x27, x17 + adcs x22, x23, x22 + mul x23, x27, x13 + adcs x20, x21, x20 + mul x21, x27, x15 + adcs x7, x19, x7 + umulh x19, x27, x14 + adcs x6, x6, xzr + adds x19, x19, x21 + umulh x21, x27, x15 + adcs x21, x21, x23 + umulh x23, x27, x13 + adcs x23, x23, x30 + umulh x30, x27, x17 + adcs x29, x30, x29 + umulh x30, x27, x11 + adcs x25, x30, x25 + umulh x30, x27, x8 + mul x27, x27, x14 + adcs x30, x30, xzr + cmn x27, x5 + mul x5, x3, x4 + umulh x27, x3, x0 + adcs x19, x19, x26 + mul x26, x3, x0 + adcs x21, x21, x24 + mul x24, x3, x18 + adcs x22, x23, x22 + mul x23, x3, x16 + adcs x20, x29, x20 + mul x29, x3, x1 + adcs x7, x25, x7 + umulh x25, x3, x10 + adcs x30, x30, x6 + adcs x6, xzr, xzr + adds x25, x25, x29 + umulh x29, x3, x1 + adcs x23, x29, x23 + umulh x29, x3, x16 + adcs x24, x29, x24 + umulh x29, x3, x18 + mul x3, x3, x10 + adcs x26, x29, x26 + adcs x27, x27, x5 + adcs x29, x28, xzr + adds x3, x19, x3 + adcs x5, x21, x25 + mul x21, x3, x12 + adcs x28, x22, x23 + umulh x22, x21, x8 + mul x23, x21, x8 + mul x25, x21, x11 + mul x9, x21, x17 + adcs x19, x20, x24 + mul x8, x21, x13 + adcs x20, x7, x26 + mul x24, x21, x15 + adcs x30, x30, x27 + umulh x26, x21, x14 + adcs x6, x6, x29 + adcs x7, xzr, xzr + adds x24, x26, x24 + umulh x26, x21, x15 + adcs x29, x26, x8 + umulh x8, x21, x13 + adcs x26, x8, x9 + umulh x8, x21, x17 + adcs x27, x8, x25 + umulh x8, x21, x11 + mul x9, x21, x14 + adcs x8, x8, x23 + adcs x21, x22, xzr + cmn x9, x3 + ldp x23, x3, [x2, #16] + umulh x9, x23, x4 + adcs x5, x24, x5 + mul x22, x23, x4 + adcs x24, x29, x28 + mul x25, x23, x0 + adcs x19, x26, x19 + mul x26, x23, x18 + adcs x20, x27, x20 + mul x27, x23, x16 + adcs x8, x8, x30 + mul x28, x23, x1 + adcs x21, x21, x6 + umulh x6, x23, x10 + adcs x7, x7, xzr + adds x6, x6, x28 + umulh x28, x23, x1 + adcs x27, x28, x27 + umulh x28, x23, x16 + adcs x26, x28, x26 + umulh x28, x23, x18 + adcs x25, x28, x25 + umulh x28, x23, x0 + mul x23, x23, x10 + adcs x22, x28, x22 + adcs x9, x9, xzr + adds x23, x5, x23 + adcs x5, x24, x6 + mul x29, x23, x12 + adcs x6, x19, x27 + ldr x12, [sp, #32] // 8-byte Folded Reload + mul x28, x29, x12 + mul x27, x29, x11 + mul x30, x29, x17 + adcs x19, x20, x26 + mul x26, x29, x13 + adcs x20, x8, x25 + mul x8, x29, x15 + adcs x21, x21, x22 + umulh x24, x29, x14 + adcs x22, x7, x9 + adcs x7, xzr, xzr + adds x24, x24, x8 + umulh x8, x29, x15 + adcs x25, x8, x26 + umulh x8, x29, x13 + adcs x26, x8, x30 + umulh x8, x29, x17 + adcs x27, x8, x27 + umulh x8, x29, x11 + adcs x28, x8, x28 + umulh x8, x29, x12 + mul x9, x29, x14 + adcs x29, x8, xzr + cmn x9, x23 + ldp x23, x8, [x2, #32] + umulh x30, x3, x4 + adcs x2, x24, x5 + mul x5, x3, x4 + adcs x6, x25, x6 + mul x24, x3, x0 + adcs x19, x26, x19 + mul x25, x3, x18 + adcs x20, x27, x20 + mul x26, x3, x16 + adcs x21, x28, x21 + mul x27, x3, x1 + adcs x22, x29, x22 + mov x9, x10 + umulh x28, x3, x9 + adcs x7, x7, xzr + adds x27, x28, x27 + umulh x28, x3, x1 + adcs x26, x28, x26 + umulh x28, x3, x16 + adcs x25, x28, x25 + umulh x28, x3, x18 + adcs x24, x28, x24 + umulh x28, x3, x0 + mul x3, x3, x9 + adcs x5, x28, x5 + adcs x29, x30, xzr + adds x2, x2, x3 + adcs x3, x6, x27 + ldr x10, [sp, #40] // 8-byte Folded Reload + mul x6, x2, x10 + adcs x19, x19, x26 + mul x26, x6, x12 + mul x27, x6, x11 + mov x30, x17 + mul x28, x6, x30 + adcs x20, x20, x25 + mul x25, x6, x13 + adcs x21, x21, x24 + mov x17, x15 + mul x24, x6, x17 + adcs x5, x22, x5 + umulh x22, x6, x14 + adcs x29, x7, x29 + adcs x7, xzr, xzr + adds x22, x22, x24 + umulh x24, x6, x17 + adcs x24, x24, x25 + umulh x25, x6, x13 + mov x15, x13 + adcs x25, x25, x28 + umulh x28, x6, x30 + mov x13, x30 + adcs x27, x28, x27 + umulh x28, x6, x11 + adcs x26, x28, x26 + umulh x28, x6, x12 + mul x6, x6, x14 + adcs x28, x28, xzr + cmn x6, x2 + umulh x2, x23, x4 + mul x6, x23, x4 + adcs x3, x22, x3 + umulh x22, x23, x0 + adcs x19, x24, x19 + mul x24, x23, x0 + adcs x20, x25, x20 + mul x25, x23, x18 + adcs x21, x27, x21 + mul x27, x23, x16 + adcs x5, x26, x5 + mul x26, x23, x1 + adcs x29, x28, x29 + umulh x28, x23, x9 + adcs x7, x7, xzr + adds x26, x28, x26 + umulh x28, x23, x1 + adcs x27, x28, x27 + umulh x28, x23, x16 + adcs x25, x28, x25 + umulh x28, x23, x18 + mul x23, x23, x9 + adcs x24, x28, x24 + umulh x28, x8, x4 + str x28, [sp, #16] // 8-byte Folded Spill + mul x28, x8, x4 + adcs x6, x22, x6 + adcs x2, x2, xzr + adds x3, x3, x23 + adcs x19, x19, x26 + mul x22, x3, x10 + adcs x20, x20, x27 + mul x23, x22, x12 + mul x26, x22, x11 + mul x27, x22, x13 + adcs x21, x21, x25 + mul x25, x22, x15 + adcs x5, x5, x24 + mul x24, x22, x17 + adcs x4, x29, x6 + umulh x6, x22, x14 + adcs x2, x7, x2 + adcs x7, xzr, xzr + adds x6, x6, x24 + umulh x24, x22, x17 + adcs x24, x24, x25 + umulh x25, x22, x15 + adcs x25, x25, x27 + umulh x27, x22, x13 + adcs x26, x27, x26 + umulh x27, x22, x11 + adcs x23, x27, x23 + umulh x27, x22, x12 + mul x22, x22, x14 + adcs x27, x27, xzr + cmn x22, x3 + umulh x3, x8, x0 + mul x0, x8, x0 + umulh x22, x8, x18 + mul x18, x8, x18 + umulh x29, x8, x16 + mul x16, x8, x16 + umulh x30, x8, x1 + mul x1, x8, x1 + umulh x10, x8, x9 + mul x8, x8, x9 + adcs x6, x6, x19 + adcs x19, x24, x20 + adcs x20, x25, x21 + adcs x5, x26, x5 + adcs x9, x23, x4 + str x9, [sp, #8] // 8-byte Folded Spill + adcs x2, x27, x2 + adcs x7, x7, xzr + adds x9, x10, x1 + adcs x16, x30, x16 + adcs x18, x29, x18 + adcs x0, x22, x0 + adcs x1, x3, x28 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x3, x10, xzr + adds x8, x6, x8 + adcs x9, x19, x9 + ldr x10, [sp, #40] // 8-byte Folded Reload + mul x4, x8, x10 + adcs x16, x20, x16 + umulh x6, x4, x12 + mul x19, x4, x12 + mov x30, x11 + umulh x20, x4, x30 + mul x21, x4, x30 + umulh x22, x4, x13 + mul x23, x4, x13 + mov x29, x13 + umulh x24, x4, x15 + mul x25, x4, x15 + umulh x26, x4, x17 + mul x27, x4, x17 + umulh x28, x4, x14 + mul x4, x4, x14 + adcs x18, x5, x18 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x10, x10, x0 + adcs x0, x2, x1 + adcs x1, x7, x3 + adcs x2, xzr, xzr + adds x3, x28, x27 + adcs x5, x26, x25 + adcs x7, x24, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + adcs x6, x6, xzr + cmn x4, x8 + adcs x8, x3, x9 + adcs x9, x5, x16 + adcs x16, x7, x18 + adcs x10, x21, x10 + adcs x18, x19, x0 + adcs x0, x6, x1 + adcs x1, x2, xzr + subs x13, x8, x14 + sbcs x12, x9, x17 + sbcs x11, x16, x15 + sbcs x14, x10, x29 + sbcs x15, x18, x30 + ldr x17, [sp, #32] // 8-byte Folded Reload + sbcs x17, x0, x17 + sbcs x1, x1, xzr + tst x1, #0x1 + csel x8, x8, x13, ne + csel x9, x9, x12, ne + csel x11, x16, x11, ne + csel x10, x10, x14, ne + csel x12, x18, x15, ne + csel x13, x0, x17, ne + ldr x14, [sp, #24] // 8-byte Folded Reload + stp x8, x9, [x14] + stp x11, x10, [x14, #16] + stp x12, x13, [x14, #32] + add sp, sp, #48 // =48 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end83: + .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L + + .globl mcl_fp_montNF6L + .align 2 + .type mcl_fp_montNF6L,@function +mcl_fp_montNF6L: // @mcl_fp_montNF6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #112 // =112 + str x0, [sp, #96] // 8-byte Folded Spill + ldp x16, x12, [x1, #32] + ldp x13, x11, [x1, #16] + ldp x17, x0, [x1] + ldur x18, [x3, #-8] + ldr x9, [x3, #32] + str x9, [sp, #104] // 8-byte Folded Spill + ldr x14, [x3, #40] + ldp x4, x10, [x3, #16] + ldr x15, [x3] + str x15, [sp, #8] // 8-byte Folded Spill + ldr x9, [x3, #8] + ldp x5, x3, [x2] + ldp x6, x7, [x2, #16] + ldp x19, x2, [x2, #32] + umulh x20, x12, x5 + mul x21, x12, x5 + umulh x22, x16, x5 + mul x23, x16, x5 + umulh x24, x11, x5 + mul x25, x11, x5 + mov x1, x13 + umulh x26, x1, x5 + mul x27, x1, x5 + mov x13, x0 + umulh x28, x13, x5 + mul x29, x13, x5 + mov x8, x17 + umulh x30, x8, x5 + mul x5, x8, x5 + adds x29, x30, x29 + mul x30, x3, x12 + adcs x27, x28, x27 + mul x28, x3, x16 + adcs x25, x26, x25 + mul x26, x3, x11 + adcs x23, x24, x23 + mul x24, x5, x18 + adcs x21, x22, x21 + mul x22, x24, x15 + adcs x20, x20, xzr + cmn x22, x5 + mul x5, x3, x1 + mov x0, x9 + mul x22, x24, x0 + adcs x22, x22, x29 + mul x29, x24, x4 + adcs x17, x29, x27 + mul x29, x24, x10 + adcs x25, x29, x25 + ldr x9, [sp, #104] // 8-byte Folded Reload + mul x29, x24, x9 + adcs x23, x29, x23 + mul x29, x24, x14 + adcs x21, x29, x21 + umulh x29, x24, x15 + adcs x20, x20, xzr + adds x22, x22, x29 + umulh x29, x24, x0 + adcs x15, x17, x29 + umulh x29, x24, x4 + mov x17, x4 + adcs x25, x25, x29 + umulh x29, x24, x10 + adcs x23, x23, x29 + umulh x29, x24, x9 + adcs x21, x21, x29 + mul x29, x3, x13 + umulh x24, x24, x14 + adcs x20, x20, x24 + umulh x24, x3, x8 + adds x24, x24, x29 + umulh x29, x3, x13 + adcs x5, x29, x5 + umulh x29, x3, x1 + adcs x26, x29, x26 + umulh x29, x3, x11 + adcs x28, x29, x28 + umulh x29, x3, x16 + adcs x29, x29, x30 + umulh x30, x3, x12 + mul x3, x3, x8 + adcs x30, x30, xzr + adds x3, x3, x22 + umulh x22, x6, x12 + adcs x24, x24, x15 + mul x27, x6, x12 + adcs x5, x5, x25 + mul x25, x6, x16 + adcs x23, x26, x23 + mul x26, x6, x11 + adcs x21, x28, x21 + mul x28, x3, x18 + mov x4, x18 + adcs x20, x29, x20 + ldr x18, [sp, #8] // 8-byte Folded Reload + mul x29, x28, x18 + adcs x30, x30, xzr + cmn x29, x3 + mul x3, x6, x1 + mul x29, x28, x0 + adcs x24, x29, x24 + mul x29, x28, x17 + adcs x5, x29, x5 + mul x29, x28, x10 + adcs x23, x29, x23 + mul x29, x28, x9 + adcs x21, x29, x21 + mul x29, x28, x14 + adcs x20, x29, x20 + umulh x29, x28, x18 + adcs x30, x30, xzr + adds x24, x24, x29 + umulh x29, x28, x0 + adcs x5, x5, x29 + umulh x29, x28, x17 + adcs x23, x23, x29 + umulh x29, x28, x10 + adcs x21, x21, x29 + umulh x29, x28, x9 + adcs x20, x20, x29 + mul x29, x6, x13 + umulh x28, x28, x14 + adcs x28, x30, x28 + umulh x30, x6, x8 + adds x29, x30, x29 + umulh x30, x6, x13 + adcs x3, x30, x3 + umulh x30, x6, x1 + adcs x26, x30, x26 + umulh x30, x6, x11 + adcs x25, x30, x25 + umulh x30, x6, x16 + mul x6, x6, x8 + adcs x27, x30, x27 + umulh x30, x7, x12 + adcs x22, x22, xzr + adds x6, x6, x24 + mul x24, x7, x12 + adcs x5, x29, x5 + umulh x29, x7, x16 + adcs x3, x3, x23 + mul x23, x7, x16 + adcs x21, x26, x21 + mul x26, x7, x11 + adcs x20, x25, x20 + mul x25, x6, x4 + adcs x27, x27, x28 + mul x28, x25, x18 + adcs x22, x22, xzr + cmn x28, x6 + mul x6, x7, x1 + mul x28, x25, x0 + adcs x5, x28, x5 + mul x28, x25, x17 + adcs x3, x28, x3 + mul x28, x25, x10 + adcs x21, x28, x21 + mul x28, x25, x9 + adcs x20, x28, x20 + mul x28, x25, x14 + adcs x27, x28, x27 + umulh x28, x25, x18 + adcs x22, x22, xzr + adds x5, x5, x28 + umulh x28, x25, x0 + adcs x3, x3, x28 + umulh x28, x25, x17 + adcs x21, x21, x28 + umulh x28, x25, x10 + adcs x20, x20, x28 + umulh x28, x25, x9 + adcs x27, x27, x28 + mul x28, x7, x13 + umulh x25, x25, x14 + adcs x22, x22, x25 + umulh x25, x7, x8 + adds x25, x25, x28 + umulh x28, x7, x13 + adcs x6, x28, x6 + umulh x28, x7, x1 + adcs x26, x28, x26 + umulh x28, x7, x11 + mul x7, x7, x8 + adcs x23, x28, x23 + umulh x9, x19, x12 + str x9, [sp, #16] // 8-byte Folded Spill + adcs x24, x29, x24 + mul x9, x19, x12 + str x9, [sp, #32] // 8-byte Folded Spill + adcs x30, x30, xzr + adds x5, x7, x5 + umulh x7, x19, x16 + adcs x3, x25, x3 + mul x25, x19, x16 + adcs x6, x6, x21 + umulh x21, x19, x11 + adcs x20, x26, x20 + mul x26, x19, x11 + adcs x23, x23, x27 + mul x27, x5, x4 + adcs x22, x24, x22 + mul x24, x27, x18 + adcs x30, x30, xzr + cmn x24, x5 + mov x28, x1 + mul x5, x19, x28 + mul x24, x19, x13 + umulh x1, x19, x8 + umulh x9, x19, x13 + umulh x15, x19, x28 + mul x19, x19, x8 + umulh x29, x2, x12 + str x29, [sp, #88] // 8-byte Folded Spill + mul x29, x2, x12 + umulh x12, x2, x16 + str x12, [sp, #80] // 8-byte Folded Spill + mul x12, x2, x16 + str x12, [sp, #72] // 8-byte Folded Spill + umulh x12, x2, x11 + mul x11, x2, x11 + stp x11, x12, [sp, #56] + umulh x11, x2, x28 + str x11, [sp, #48] // 8-byte Folded Spill + mul x11, x2, x28 + str x11, [sp, #40] // 8-byte Folded Spill + umulh x11, x2, x13 + str x11, [sp, #24] // 8-byte Folded Spill + mul x13, x2, x13 + umulh x16, x2, x8 + mul x28, x2, x8 + mul x2, x27, x0 + adcs x2, x2, x3 + mul x3, x27, x17 + adcs x3, x3, x6 + mul x6, x27, x10 + adcs x6, x6, x20 + ldr x8, [sp, #104] // 8-byte Folded Reload + mul x20, x27, x8 + adcs x20, x20, x23 + mul x23, x27, x14 + adcs x22, x23, x22 + adcs x23, x30, xzr + umulh x30, x27, x18 + adds x2, x2, x30 + umulh x30, x27, x0 + adcs x3, x3, x30 + umulh x30, x27, x17 + mov x12, x17 + adcs x6, x6, x30 + umulh x30, x27, x10 + adcs x20, x20, x30 + umulh x30, x27, x8 + mov x11, x8 + adcs x22, x22, x30 + mov x30, x14 + umulh x27, x27, x30 + adcs x23, x23, x27 + adds x8, x1, x24 + adcs x9, x9, x5 + adcs x14, x15, x26 + adcs x5, x21, x25 + ldr x15, [sp, #32] // 8-byte Folded Reload + adcs x7, x7, x15 + ldr x15, [sp, #16] // 8-byte Folded Reload + adcs x21, x15, xzr + adds x2, x19, x2 + adcs x8, x8, x3 + adcs x9, x9, x6 + mov x24, x4 + mul x3, x2, x24 + adcs x14, x14, x20 + mul x6, x3, x30 + adcs x5, x5, x22 + mul x19, x3, x11 + adcs x7, x7, x23 + mul x20, x3, x18 + adcs x21, x21, xzr + cmn x20, x2 + mul x2, x3, x10 + mul x20, x3, x0 + adcs x8, x20, x8 + mul x20, x3, x12 + adcs x9, x20, x9 + umulh x20, x3, x30 + adcs x14, x2, x14 + umulh x2, x3, x11 + mov x27, x11 + adcs x5, x19, x5 + mov x11, x10 + umulh x19, x3, x11 + adcs x6, x6, x7 + umulh x7, x3, x18 + adcs x21, x21, xzr + adds x8, x8, x7 + umulh x7, x3, x12 + umulh x3, x3, x0 + adcs x9, x9, x3 + adcs x10, x14, x7 + adcs x3, x5, x19 + adcs x2, x6, x2 + adcs x5, x21, x20 + adds x15, x16, x13 + ldr x13, [sp, #40] // 8-byte Folded Reload + ldr x14, [sp, #24] // 8-byte Folded Reload + adcs x16, x14, x13 + ldp x14, x13, [sp, #48] + adcs x17, x14, x13 + ldp x14, x13, [sp, #64] + adcs x1, x14, x13 + ldr x13, [sp, #80] // 8-byte Folded Reload + adcs x4, x13, x29 + ldr x13, [sp, #88] // 8-byte Folded Reload + adcs x6, x13, xzr + adds x8, x28, x8 + adcs x9, x15, x9 + mul x15, x8, x24 + adcs x10, x16, x10 + mul x16, x15, x30 + mul x14, x15, x27 + mul x7, x15, x11 + mul x19, x15, x12 + mul x20, x15, x0 + mul x21, x15, x18 + umulh x22, x15, x30 + umulh x23, x15, x27 + umulh x24, x15, x11 + mov x28, x11 + umulh x25, x15, x12 + umulh x26, x15, x0 + umulh x15, x15, x18 + adcs x17, x17, x3 + adcs x1, x1, x2 + adcs x2, x4, x5 + adcs x3, x6, xzr + cmn x21, x8 + adcs x8, x20, x9 + adcs x9, x19, x10 + adcs x10, x7, x17 + adcs x17, x14, x1 + adcs x16, x16, x2 + adcs x11, x3, xzr + adds x8, x8, x15 + adcs x9, x9, x26 + adcs x10, x10, x25 + adcs x15, x17, x24 + adcs x16, x16, x23 + adcs x17, x11, x22 + subs x3, x8, x18 + sbcs x2, x9, x0 + sbcs x11, x10, x12 + sbcs x14, x15, x28 + sbcs x18, x16, x27 + sbcs x0, x17, x30 + asr x1, x0, #63 + cmp x1, #0 // =0 + csel x8, x8, x3, lt + csel x9, x9, x2, lt + csel x10, x10, x11, lt + csel x11, x15, x14, lt + csel x12, x16, x18, lt + csel x13, x17, x0, lt + ldr x14, [sp, #96] // 8-byte Folded Reload + stp x8, x9, [x14] + stp x10, x11, [x14, #16] + stp x12, x13, [x14, #32] + add sp, sp, #112 // =112 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end84: + .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L + + .globl mcl_fp_montRed6L + .align 2 + .type mcl_fp_montRed6L,@function +mcl_fp_montRed6L: // @mcl_fp_montRed6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldur x14, [x2, #-8] + ldp x9, x8, [x2, #32] + ldp x11, x10, [x2, #16] + ldp x13, x12, [x2] + ldp x16, x17, [x1, #80] + ldp x18, x2, [x1, #64] + ldp x3, x4, [x1, #48] + ldp x5, x6, [x1, #32] + ldp x7, x19, [x1, #16] + ldp x15, x1, [x1] + mul x20, x15, x14 + mul x21, x20, x8 + mul x22, x20, x9 + mul x23, x20, x10 + mul x24, x20, x11 + mul x25, x20, x12 + umulh x26, x20, x13 + adds x25, x26, x25 + umulh x26, x20, x12 + adcs x24, x26, x24 + umulh x26, x20, x11 + adcs x23, x26, x23 + umulh x26, x20, x10 + adcs x22, x26, x22 + umulh x26, x20, x9 + adcs x21, x26, x21 + umulh x26, x20, x8 + mul x20, x20, x13 + adcs x26, x26, xzr + cmn x15, x20 + adcs x15, x1, x25 + adcs x1, x7, x24 + mul x7, x15, x14 + adcs x19, x19, x23 + mul x20, x7, x8 + mul x23, x7, x9 + mul x24, x7, x10 + mul x25, x7, x11 + adcs x5, x5, x22 + mul x22, x7, x12 + adcs x6, x6, x21 + umulh x21, x7, x13 + adcs x3, x3, x26 + adcs x4, x4, xzr + adcs x18, x18, xzr + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x26, xzr, xzr + adds x21, x21, x22 + umulh x22, x7, x12 + adcs x22, x22, x25 + umulh x25, x7, x11 + adcs x24, x25, x24 + umulh x25, x7, x10 + adcs x23, x25, x23 + umulh x25, x7, x9 + adcs x20, x25, x20 + umulh x25, x7, x8 + mul x7, x7, x13 + adcs x25, x25, xzr + cmn x7, x15 + adcs x15, x21, x1 + adcs x1, x22, x19 + mul x7, x15, x14 + adcs x5, x24, x5 + mul x19, x7, x8 + mul x21, x7, x9 + mul x22, x7, x10 + adcs x6, x23, x6 + mul x23, x7, x11 + adcs x3, x20, x3 + mul x20, x7, x12 + adcs x4, x25, x4 + umulh x24, x7, x13 + adcs x18, x18, xzr + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x25, x26, xzr + adds x20, x24, x20 + umulh x24, x7, x12 + adcs x23, x24, x23 + umulh x24, x7, x11 + adcs x22, x24, x22 + umulh x24, x7, x10 + adcs x21, x24, x21 + umulh x24, x7, x9 + adcs x19, x24, x19 + umulh x24, x7, x8 + mul x7, x7, x13 + adcs x24, x24, xzr + cmn x7, x15 + adcs x15, x20, x1 + adcs x1, x23, x5 + mul x5, x15, x14 + adcs x6, x22, x6 + mul x7, x5, x8 + mul x20, x5, x9 + mul x22, x5, x10 + adcs x3, x21, x3 + mul x21, x5, x11 + adcs x4, x19, x4 + mul x19, x5, x12 + adcs x18, x24, x18 + umulh x23, x5, x13 + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x24, x25, xzr + adds x19, x23, x19 + umulh x23, x5, x12 + adcs x21, x23, x21 + umulh x23, x5, x11 + adcs x22, x23, x22 + umulh x23, x5, x10 + adcs x20, x23, x20 + umulh x23, x5, x9 + adcs x7, x23, x7 + umulh x23, x5, x8 + mul x5, x5, x13 + adcs x23, x23, xzr + cmn x5, x15 + adcs x15, x19, x1 + adcs x1, x21, x6 + mul x5, x15, x14 + adcs x3, x22, x3 + mul x6, x5, x8 + mul x19, x5, x9 + mul x21, x5, x10 + adcs x4, x20, x4 + mul x20, x5, x11 + adcs x18, x7, x18 + mul x7, x5, x12 + adcs x2, x23, x2 + umulh x22, x5, x13 + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x23, x24, xzr + adds x7, x22, x7 + umulh x22, x5, x12 + adcs x20, x22, x20 + umulh x22, x5, x11 + adcs x21, x22, x21 + umulh x22, x5, x10 + adcs x19, x22, x19 + umulh x22, x5, x9 + adcs x6, x22, x6 + umulh x22, x5, x8 + mul x5, x5, x13 + adcs x22, x22, xzr + cmn x5, x15 + adcs x15, x7, x1 + adcs x1, x20, x3 + mul x14, x15, x14 + adcs x3, x21, x4 + mul x4, x14, x8 + mul x5, x14, x9 + mul x7, x14, x10 + adcs x18, x19, x18 + mul x19, x14, x11 + adcs x2, x6, x2 + mul x6, x14, x12 + adcs x16, x22, x16 + umulh x20, x14, x13 + adcs x17, x17, xzr + adcs x21, x23, xzr + adds x6, x20, x6 + umulh x20, x14, x12 + adcs x19, x20, x19 + umulh x20, x14, x11 + adcs x7, x20, x7 + umulh x20, x14, x10 + adcs x5, x20, x5 + umulh x20, x14, x9 + adcs x4, x20, x4 + umulh x20, x14, x8 + mul x14, x14, x13 + adcs x20, x20, xzr + cmn x14, x15 + adcs x14, x6, x1 + adcs x15, x19, x3 + adcs x18, x7, x18 + adcs x1, x5, x2 + adcs x16, x4, x16 + adcs x17, x20, x17 + adcs x2, x21, xzr + subs x13, x14, x13 + sbcs x12, x15, x12 + sbcs x11, x18, x11 + sbcs x10, x1, x10 + sbcs x9, x16, x9 + sbcs x8, x17, x8 + sbcs x2, x2, xzr + tst x2, #0x1 + csel x13, x14, x13, ne + csel x12, x15, x12, ne + csel x11, x18, x11, ne + csel x10, x1, x10, ne + csel x9, x16, x9, ne + csel x8, x17, x8, ne + stp x13, x12, [x0] + stp x11, x10, [x0, #16] + stp x9, x8, [x0, #32] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end85: + .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L + + .globl mcl_fp_addPre6L + .align 2 + .type mcl_fp_addPre6L,@function +mcl_fp_addPre6L: // @mcl_fp_addPre6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + adds x14, x14, x16 + str x14, [x0] + adcs x14, x15, x17 + adcs x12, x12, x18 + stp x14, x12, [x0, #8] + adcs x12, x13, x1 + adcs x8, x8, x10 + stp x12, x8, [x0, #24] + adcs x9, x9, x11 + adcs x8, xzr, xzr + str x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end86: + .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L + + .globl mcl_fp_subPre6L + .align 2 + .type mcl_fp_subPre6L,@function +mcl_fp_subPre6L: // @mcl_fp_subPre6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + subs x14, x16, x14 + str x14, [x0] + sbcs x14, x17, x15 + sbcs x12, x18, x12 + stp x14, x12, [x0, #8] + sbcs x12, x1, x13 + sbcs x8, x10, x8 + stp x12, x8, [x0, #24] + sbcs x9, x11, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end87: + .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L + + .globl mcl_fp_shr1_6L + .align 2 + .type mcl_fp_shr1_6L,@function +mcl_fp_shr1_6L: // @mcl_fp_shr1_6L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldp x12, x13, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + extr x11, x12, x11, #1 + extr x12, x13, x12, #1 + lsr x13, x13, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + ret +.Lfunc_end88: + .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L + + .globl mcl_fp_add6L + .align 2 + .type mcl_fp_add6L,@function +mcl_fp_add6L: // @mcl_fp_add6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + adds x14, x14, x16 + adcs x15, x15, x17 + ldp x16, x17, [x3, #32] + adcs x18, x12, x18 + adcs x1, x13, x1 + ldp x12, x2, [x3] + stp x14, x15, [x0] + stp x18, x1, [x0, #16] + adcs x8, x8, x10 + adcs x4, x9, x11 + stp x8, x4, [x0, #32] + adcs x5, xzr, xzr + ldp x9, x10, [x3, #16] + subs x13, x14, x12 + sbcs x12, x15, x2 + sbcs x11, x18, x9 + sbcs x10, x1, x10 + sbcs x9, x8, x16 + sbcs x8, x4, x17 + sbcs x14, x5, xzr + and w14, w14, #0x1 + tbnz w14, #0, .LBB89_2 +// BB#1: // %nocarry + stp x13, x12, [x0] + stp x11, x10, [x0, #16] + stp x9, x8, [x0, #32] +.LBB89_2: // %carry + ret +.Lfunc_end89: + .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L + + .globl mcl_fp_addNF6L + .align 2 + .type mcl_fp_addNF6L,@function +mcl_fp_addNF6L: // @mcl_fp_addNF6L +// BB#0: + ldp x8, x9, [x1, #32] + ldp x10, x11, [x2, #32] + ldp x12, x13, [x1, #16] + ldp x14, x15, [x1] + ldp x16, x17, [x2] + ldp x18, x1, [x2, #16] + adds x14, x16, x14 + adcs x15, x17, x15 + ldp x16, x17, [x3, #32] + adcs x12, x18, x12 + adcs x13, x1, x13 + ldp x18, x1, [x3] + adcs x8, x10, x8 + ldp x10, x2, [x3, #16] + adcs x9, x11, x9 + subs x11, x14, x18 + sbcs x18, x15, x1 + sbcs x10, x12, x10 + sbcs x1, x13, x2 + sbcs x16, x8, x16 + sbcs x17, x9, x17 + asr x2, x17, #63 + cmp x2, #0 // =0 + csel x11, x14, x11, lt + csel x14, x15, x18, lt + csel x10, x12, x10, lt + csel x12, x13, x1, lt + csel x8, x8, x16, lt + csel x9, x9, x17, lt + stp x11, x14, [x0] + stp x10, x12, [x0, #16] + stp x8, x9, [x0, #32] + ret +.Lfunc_end90: + .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L + + .globl mcl_fp_sub6L + .align 2 + .type mcl_fp_sub6L,@function +mcl_fp_sub6L: // @mcl_fp_sub6L +// BB#0: + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + subs x8, x16, x8 + sbcs x9, x17, x9 + stp x8, x9, [x0] + sbcs x10, x18, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x14, x12 + sbcs x13, x15, x13 + stp x12, x13, [x0, #32] + ngcs x14, xzr + and w14, w14, #0x1 + tbnz w14, #0, .LBB91_2 +// BB#1: // %nocarry + ret +.LBB91_2: // %carry + ldp x14, x15, [x3, #32] + ldp x16, x17, [x3] + ldp x18, x1, [x3, #16] + adds x8, x16, x8 + adcs x9, x17, x9 + adcs x10, x18, x10 + adcs x11, x1, x11 + adcs x12, x14, x12 + adcs x13, x15, x13 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + ret +.Lfunc_end91: + .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L + + .globl mcl_fp_subNF6L + .align 2 + .type mcl_fp_subNF6L,@function +mcl_fp_subNF6L: // @mcl_fp_subNF6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x15, x1, [x1] + subs x14, x15, x14 + ldp x15, x2, [x3, #32] + sbcs x18, x1, x18 + sbcs x12, x16, x12 + ldp x16, x1, [x3, #16] + sbcs x13, x17, x13 + ldp x17, x3, [x3] + sbcs x8, x10, x8 + sbcs x9, x11, x9 + asr x10, x9, #63 + adds x11, x10, x10 + and x16, x10, x16 + and x1, x10, x1 + and x15, x10, x15 + and x2, x10, x2 + adcs x10, x10, x10 + orr x11, x11, x9, lsr #63 + and x11, x11, x17 + and x10, x10, x3 + adds x11, x11, x14 + adcs x10, x10, x18 + stp x11, x10, [x0] + adcs x10, x16, x12 + str x10, [x0, #16] + adcs x10, x1, x13 + adcs x8, x15, x8 + stp x10, x8, [x0, #24] + adcs x8, x2, x9 + str x8, [x0, #40] + ret +.Lfunc_end92: + .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L + + .globl mcl_fpDbl_add6L + .align 2 + .type mcl_fpDbl_add6L,@function +mcl_fpDbl_add6L: // @mcl_fpDbl_add6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldp x8, x9, [x2, #80] + ldp x10, x11, [x1, #80] + ldp x12, x13, [x2, #64] + ldp x14, x15, [x1, #64] + ldp x16, x17, [x2, #48] + ldp x18, x4, [x1, #48] + ldp x5, x6, [x2, #32] + ldp x7, x19, [x1, #32] + ldp x20, x21, [x2, #16] + ldp x23, x2, [x2] + ldp x24, x25, [x1, #16] + ldp x22, x1, [x1] + adds x22, x23, x22 + str x22, [x0] + ldp x22, x23, [x3, #32] + adcs x1, x2, x1 + str x1, [x0, #8] + ldp x1, x2, [x3, #16] + adcs x20, x20, x24 + ldp x24, x3, [x3] + str x20, [x0, #16] + adcs x20, x21, x25 + adcs x5, x5, x7 + stp x20, x5, [x0, #24] + adcs x5, x6, x19 + str x5, [x0, #40] + adcs x16, x16, x18 + adcs x17, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x16, x24 + sbcs x14, x17, x3 + sbcs x15, x12, x1 + sbcs x18, x13, x2 + sbcs x1, x8, x22 + sbcs x2, x9, x23 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x16, x11, ne + csel x11, x17, x14, ne + csel x12, x12, x15, ne + csel x13, x13, x18, ne + csel x8, x8, x1, ne + csel x9, x9, x2, ne + stp x10, x11, [x0, #48] + stp x12, x13, [x0, #64] + stp x8, x9, [x0, #80] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end93: + .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L + + .globl mcl_fpDbl_sub6L + .align 2 + .type mcl_fpDbl_sub6L,@function +mcl_fpDbl_sub6L: // @mcl_fpDbl_sub6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldp x8, x9, [x2, #80] + ldp x10, x11, [x1, #80] + ldp x12, x13, [x2, #64] + ldp x14, x15, [x1, #64] + ldp x16, x17, [x2, #48] + ldp x18, x4, [x1, #48] + ldp x5, x6, [x2, #32] + ldp x7, x19, [x1, #32] + ldp x20, x21, [x2, #16] + ldp x22, x2, [x2] + ldp x24, x25, [x1, #16] + ldp x23, x1, [x1] + subs x22, x23, x22 + str x22, [x0] + ldp x22, x23, [x3, #32] + sbcs x1, x1, x2 + str x1, [x0, #8] + ldp x1, x2, [x3, #16] + sbcs x20, x24, x20 + ldp x24, x3, [x3] + str x20, [x0, #16] + sbcs x20, x25, x21 + sbcs x5, x7, x5 + stp x20, x5, [x0, #24] + sbcs x5, x19, x6 + sbcs x16, x18, x16 + sbcs x17, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x23, xzr, ne + csel x11, x22, xzr, ne + csel x14, x2, xzr, ne + csel x15, x1, xzr, ne + csel x18, x3, xzr, ne + csel x1, x24, xzr, ne + adds x16, x1, x16 + stp x5, x16, [x0, #40] + adcs x16, x18, x17 + adcs x12, x15, x12 + stp x16, x12, [x0, #56] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #72] + adcs x8, x10, x9 + str x8, [x0, #88] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end94: + .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L + + .globl mcl_fp_mulUnitPre7L + .align 2 + .type mcl_fp_mulUnitPre7L,@function +mcl_fp_mulUnitPre7L: // @mcl_fp_mulUnitPre7L +// BB#0: + ldp x10, x8, [x1, #40] + ldp x14, x9, [x1, #24] + ldp x11, x12, [x1] + ldr x13, [x1, #16] + mul x15, x11, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x13, x2 + umulh x12, x12, x2 + mul x18, x14, x2 + umulh x13, x13, x2 + mul x1, x9, x2 + umulh x14, x14, x2 + mul x3, x10, x2 + umulh x9, x9, x2 + mul x4, x8, x2 + umulh x10, x10, x2 + umulh x8, x8, x2 + adds x11, x11, x16 + stp x15, x11, [x0] + adcs x11, x12, x17 + str x11, [x0, #16] + adcs x11, x13, x18 + str x11, [x0, #24] + adcs x11, x14, x1 + adcs x9, x9, x3 + stp x11, x9, [x0, #32] + adcs x9, x10, x4 + adcs x8, x8, xzr + stp x9, x8, [x0, #48] + ret +.Lfunc_end95: + .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L + + .globl mcl_fpDbl_mulPre7L + .align 2 + .type mcl_fpDbl_mulPre7L,@function +mcl_fpDbl_mulPre7L: // @mcl_fpDbl_mulPre7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #624 // =624 + ldp x8, x9, [x1] + ldp x10, x11, [x1, #24] + ldp x12, x13, [x1, #40] + ldp x14, x15, [x2] + ldp x16, x18, [x1, #16] + mul x17, x8, x14 + str x17, [sp, #528] // 8-byte Folded Spill + umulh x17, x13, x14 + str x17, [sp, #616] // 8-byte Folded Spill + mul x17, x13, x14 + str x17, [sp, #608] // 8-byte Folded Spill + umulh x17, x12, x14 + str x17, [sp, #592] // 8-byte Folded Spill + mul x17, x12, x14 + str x17, [sp, #568] // 8-byte Folded Spill + umulh x17, x11, x14 + str x17, [sp, #552] // 8-byte Folded Spill + mul x17, x11, x14 + str x17, [sp, #512] // 8-byte Folded Spill + umulh x17, x10, x14 + str x17, [sp, #496] // 8-byte Folded Spill + mul x17, x10, x14 + str x17, [sp, #456] // 8-byte Folded Spill + umulh x17, x16, x14 + str x17, [sp, #424] // 8-byte Folded Spill + mul x17, x16, x14 + str x17, [sp, #368] // 8-byte Folded Spill + umulh x17, x9, x14 + str x17, [sp, #352] // 8-byte Folded Spill + mul x17, x9, x14 + str x17, [sp, #304] // 8-byte Folded Spill + umulh x14, x8, x14 + str x14, [sp, #272] // 8-byte Folded Spill + mul x14, x13, x15 + str x14, [sp, #560] // 8-byte Folded Spill + mul x14, x12, x15 + str x14, [sp, #520] // 8-byte Folded Spill + mul x14, x11, x15 + str x14, [sp, #488] // 8-byte Folded Spill + mul x14, x10, x15 + str x14, [sp, #448] // 8-byte Folded Spill + mul x14, x16, x15 + umulh x13, x13, x15 + str x13, [sp, #600] // 8-byte Folded Spill + umulh x12, x12, x15 + str x12, [sp, #576] // 8-byte Folded Spill + umulh x11, x11, x15 + str x11, [sp, #544] // 8-byte Folded Spill + umulh x10, x10, x15 + str x10, [sp, #504] // 8-byte Folded Spill + umulh x10, x16, x15 + str x10, [sp, #472] // 8-byte Folded Spill + mul x10, x9, x15 + str x10, [sp, #208] // 8-byte Folded Spill + umulh x9, x9, x15 + stp x9, x14, [sp, #400] + mul x9, x8, x15 + str x9, [sp, #96] // 8-byte Folded Spill + umulh x8, x8, x15 + str x8, [sp, #320] // 8-byte Folded Spill + ldp x9, x11, [x1] + ldp x10, x17, [x2, #16] + ldp x12, x13, [x1, #16] + ldp x14, x16, [x1, #32] + ldr x15, [x1, #48] + mul x8, x9, x10 + str x8, [sp, #248] // 8-byte Folded Spill + mul x8, x15, x10 + str x8, [sp, #392] // 8-byte Folded Spill + mul x8, x16, x10 + str x8, [sp, #344] // 8-byte Folded Spill + mul x8, x14, x10 + str x8, [sp, #296] // 8-byte Folded Spill + mul x8, x13, x10 + str x8, [sp, #240] // 8-byte Folded Spill + mul x8, x12, x10 + str x8, [sp, #192] // 8-byte Folded Spill + mul x8, x11, x10 + str x8, [sp, #136] // 8-byte Folded Spill + umulh x8, x15, x10 + str x8, [sp, #440] // 8-byte Folded Spill + umulh x8, x16, x10 + str x8, [sp, #384] // 8-byte Folded Spill + umulh x8, x14, x10 + str x8, [sp, #336] // 8-byte Folded Spill + umulh x8, x13, x10 + str x8, [sp, #288] // 8-byte Folded Spill + umulh x8, x12, x10 + str x8, [sp, #232] // 8-byte Folded Spill + umulh x8, x11, x10 + str x8, [sp, #184] // 8-byte Folded Spill + umulh x8, x9, x10 + str x8, [sp, #128] // 8-byte Folded Spill + mul x8, x15, x17 + str x8, [sp, #464] // 8-byte Folded Spill + umulh x8, x15, x17 + str x8, [sp, #584] // 8-byte Folded Spill + mul x8, x16, x17 + str x8, [sp, #376] // 8-byte Folded Spill + umulh x8, x16, x17 + str x8, [sp, #536] // 8-byte Folded Spill + mul x8, x14, x17 + str x8, [sp, #312] // 8-byte Folded Spill + umulh x8, x14, x17 + str x8, [sp, #480] // 8-byte Folded Spill + mul x8, x13, x17 + str x8, [sp, #224] // 8-byte Folded Spill + umulh x8, x13, x17 + str x8, [sp, #416] // 8-byte Folded Spill + mul x8, x12, x17 + str x8, [sp, #144] // 8-byte Folded Spill + umulh x8, x12, x17 + str x8, [sp, #328] // 8-byte Folded Spill + mul x8, x11, x17 + str x8, [sp, #80] // 8-byte Folded Spill + umulh x8, x11, x17 + str x8, [sp, #264] // 8-byte Folded Spill + mul x28, x9, x17 + umulh x8, x9, x17 + str x8, [sp, #176] // 8-byte Folded Spill + ldp x14, x12, [x1, #24] + ldp x10, x9, [x1] + ldr x7, [x1, #16] + ldp x30, x5, [x1, #40] + ldp x27, x8, [x2, #32] + ldr x13, [x1, #48] + mul x11, x10, x27 + str x11, [sp, #48] // 8-byte Folded Spill + mul x11, x5, x27 + str x11, [sp, #168] // 8-byte Folded Spill + mul x11, x30, x27 + str x11, [sp, #120] // 8-byte Folded Spill + mul x11, x12, x27 + str x11, [sp, #72] // 8-byte Folded Spill + mul x11, x14, x27 + str x11, [sp, #40] // 8-byte Folded Spill + mul x11, x7, x27 + str x11, [sp, #16] // 8-byte Folded Spill + mul x24, x9, x27 + umulh x11, x5, x27 + str x11, [sp, #216] // 8-byte Folded Spill + umulh x11, x30, x27 + str x11, [sp, #160] // 8-byte Folded Spill + umulh x11, x12, x27 + str x11, [sp, #112] // 8-byte Folded Spill + umulh x11, x14, x27 + str x11, [sp, #64] // 8-byte Folded Spill + umulh x11, x7, x27 + str x11, [sp, #32] // 8-byte Folded Spill + umulh x29, x9, x27 + umulh x23, x10, x27 + mul x11, x5, x8 + str x11, [sp, #256] // 8-byte Folded Spill + umulh x11, x5, x8 + str x11, [sp, #432] // 8-byte Folded Spill + mul x11, x30, x8 + str x11, [sp, #152] // 8-byte Folded Spill + umulh x11, x30, x8 + str x11, [sp, #360] // 8-byte Folded Spill + mul x11, x12, x8 + str x11, [sp, #88] // 8-byte Folded Spill + umulh x11, x12, x8 + str x11, [sp, #280] // 8-byte Folded Spill + mul x11, x14, x8 + str x11, [sp, #24] // 8-byte Folded Spill + umulh x11, x14, x8 + str x11, [sp, #200] // 8-byte Folded Spill + mul x25, x7, x8 + umulh x11, x7, x8 + str x11, [sp, #104] // 8-byte Folded Spill + mul x22, x9, x8 + umulh x9, x9, x8 + str x9, [sp, #56] // 8-byte Folded Spill + mul x20, x10, x8 + umulh x26, x10, x8 + ldr x10, [x2, #48] + ldp x2, x8, [x1] + ldr x9, [x1, #16] + ldp x11, x1, [x1, #32] + mul x27, x2, x10 + umulh x21, x2, x10 + mul x5, x8, x10 + umulh x19, x8, x10 + mul x3, x9, x10 + umulh x7, x9, x10 + mul x2, x18, x10 + umulh x6, x18, x10 + mul x17, x11, x10 + umulh x4, x11, x10 + mul x16, x1, x10 + umulh x1, x1, x10 + mul x15, x13, x10 + umulh x18, x13, x10 + ldr x8, [sp, #528] // 8-byte Folded Reload + str x8, [x0] + ldr x8, [sp, #304] // 8-byte Folded Reload + ldr x9, [sp, #272] // 8-byte Folded Reload + adds x13, x9, x8 + ldr x8, [sp, #368] // 8-byte Folded Reload + ldr x9, [sp, #352] // 8-byte Folded Reload + adcs x8, x9, x8 + ldr x9, [sp, #456] // 8-byte Folded Reload + ldr x10, [sp, #424] // 8-byte Folded Reload + adcs x9, x10, x9 + ldr x10, [sp, #512] // 8-byte Folded Reload + ldr x11, [sp, #496] // 8-byte Folded Reload + adcs x10, x11, x10 + ldr x11, [sp, #568] // 8-byte Folded Reload + ldr x12, [sp, #552] // 8-byte Folded Reload + adcs x11, x12, x11 + ldr x12, [sp, #608] // 8-byte Folded Reload + ldr x14, [sp, #592] // 8-byte Folded Reload + adcs x12, x14, x12 + ldr x14, [sp, #616] // 8-byte Folded Reload + adcs x14, x14, xzr + ldr x30, [sp, #96] // 8-byte Folded Reload + adds x13, x30, x13 + str x13, [x0, #8] + ldr x13, [sp, #208] // 8-byte Folded Reload + adcs x8, x13, x8 + ldr x13, [sp, #408] // 8-byte Folded Reload + adcs x9, x13, x9 + ldr x13, [sp, #448] // 8-byte Folded Reload + adcs x10, x13, x10 + ldr x13, [sp, #488] // 8-byte Folded Reload + adcs x11, x13, x11 + ldr x13, [sp, #520] // 8-byte Folded Reload + adcs x12, x13, x12 + ldr x13, [sp, #560] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + ldr x30, [sp, #320] // 8-byte Folded Reload + adds x8, x8, x30 + ldr x30, [sp, #400] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #472] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #504] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #544] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #576] // 8-byte Folded Reload + adcs x13, x13, x30 + ldr x30, [sp, #600] // 8-byte Folded Reload + adcs x14, x14, x30 + ldr x30, [sp, #248] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #16] + ldp x30, x8, [sp, #128] + adcs x8, x8, x9 + ldr x9, [sp, #192] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #240] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #296] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #344] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #392] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #184] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #232] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #288] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #336] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #384] // 8-byte Folded Reload + adcs x13, x13, x30 + ldr x30, [sp, #440] // 8-byte Folded Reload + adcs x14, x14, x30 + adds x8, x28, x8 + str x8, [x0, #24] + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, x9 + ldr x9, [sp, #144] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #224] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #312] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #376] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #464] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + ldr x28, [sp, #176] // 8-byte Folded Reload + adds x8, x8, x28 + ldr x28, [sp, #264] // 8-byte Folded Reload + adcs x9, x9, x28 + ldr x28, [sp, #328] // 8-byte Folded Reload + adcs x10, x10, x28 + ldr x28, [sp, #416] // 8-byte Folded Reload + adcs x11, x11, x28 + ldr x28, [sp, #480] // 8-byte Folded Reload + adcs x12, x12, x28 + ldr x28, [sp, #536] // 8-byte Folded Reload + adcs x13, x13, x28 + ldr x28, [sp, #584] // 8-byte Folded Reload + adcs x14, x14, x28 + ldr x28, [sp, #48] // 8-byte Folded Reload + adds x8, x28, x8 + str x8, [x0, #32] + adcs x8, x24, x9 + ldr x9, [sp, #16] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #72] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #120] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #168] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x23 + adcs x9, x9, x29 + ldr x23, [sp, #32] // 8-byte Folded Reload + adcs x10, x10, x23 + ldr x23, [sp, #64] // 8-byte Folded Reload + adcs x11, x11, x23 + ldr x23, [sp, #112] // 8-byte Folded Reload + adcs x12, x12, x23 + ldr x23, [sp, #160] // 8-byte Folded Reload + adcs x13, x13, x23 + ldr x23, [sp, #216] // 8-byte Folded Reload + adcs x14, x14, x23 + adds x8, x20, x8 + str x8, [x0, #40] + adcs x8, x22, x9 + adcs x9, x25, x10 + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #88] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #152] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #256] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x26 + ldr x20, [sp, #56] // 8-byte Folded Reload + adcs x9, x9, x20 + ldr x20, [sp, #104] // 8-byte Folded Reload + adcs x10, x10, x20 + ldr x20, [sp, #200] // 8-byte Folded Reload + adcs x11, x11, x20 + ldr x20, [sp, #280] // 8-byte Folded Reload + adcs x12, x12, x20 + ldr x20, [sp, #360] // 8-byte Folded Reload + adcs x13, x13, x20 + ldr x20, [sp, #432] // 8-byte Folded Reload + adcs x14, x14, x20 + adds x8, x27, x8 + str x8, [x0, #48] + adcs x8, x5, x9 + adcs x9, x3, x10 + adcs x10, x2, x11 + adcs x11, x17, x12 + adcs x12, x16, x13 + adcs x13, x15, x14 + adcs x14, xzr, xzr + adds x8, x8, x21 + str x8, [x0, #56] + adcs x8, x9, x19 + str x8, [x0, #64] + adcs x8, x10, x7 + str x8, [x0, #72] + adcs x8, x11, x6 + str x8, [x0, #80] + adcs x8, x12, x4 + str x8, [x0, #88] + adcs x8, x13, x1 + str x8, [x0, #96] + adcs x8, x14, x18 + str x8, [x0, #104] + add sp, sp, #624 // =624 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end96: + .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L + + .globl mcl_fpDbl_sqrPre7L + .align 2 + .type mcl_fpDbl_sqrPre7L,@function +mcl_fpDbl_sqrPre7L: // @mcl_fpDbl_sqrPre7L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x11, x8, [x1] + ldp x9, x10, [x1, #40] + ldp x15, x12, [x1, #16] + ldp x16, x3, [x1, #16] + ldp x13, x14, [x1, #32] + ldp x18, x17, [x1, #32] + ldr x2, [x1, #32] + mul x4, x11, x11 + umulh x5, x10, x11 + mul x6, x9, x11 + mul x7, x18, x11 + mul x19, x3, x11 + umulh x20, x16, x11 + mul x21, x16, x11 + umulh x22, x8, x11 + mul x23, x8, x11 + str x4, [x0] + umulh x4, x11, x11 + adds x4, x4, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + umulh x20, x3, x11 + adcs x7, x20, x7 + umulh x20, x18, x11 + adcs x6, x20, x6 + mul x20, x10, x11 + umulh x11, x9, x11 + adcs x20, x11, x20 + adcs x5, x5, xzr + adds x4, x23, x4 + ldp x11, x23, [x1, #40] + str x4, [x0, #8] + mul x4, x8, x8 + adcs x4, x4, x21 + mul x21, x16, x8 + adcs x19, x21, x19 + mul x21, x3, x8 + adcs x7, x21, x7 + mul x21, x18, x8 + adcs x6, x21, x6 + mul x21, x9, x8 + adcs x20, x21, x20 + mul x21, x10, x8 + umulh x10, x10, x8 + umulh x9, x9, x8 + umulh x18, x18, x8 + umulh x3, x3, x8 + umulh x16, x16, x8 + umulh x8, x8, x8 + adcs x5, x21, x5 + adcs x21, xzr, xzr + adds x4, x4, x22 + adcs x8, x19, x8 + ldp x19, x22, [x1] + adcs x16, x7, x16 + adcs x3, x6, x3 + ldp x6, x7, [x1, #8] + adcs x18, x20, x18 + mul x20, x19, x15 + adcs x9, x5, x9 + mul x5, x23, x15 + adcs x10, x21, x10 + mul x21, x14, x15 + adds x4, x20, x4 + mul x20, x13, x15 + str x4, [x0, #16] + mul x4, x6, x15 + adcs x8, x4, x8 + mul x4, x15, x15 + adcs x16, x4, x16 + mul x4, x12, x15 + adcs x3, x4, x3 + adcs x18, x20, x18 + umulh x20, x13, x15 + adcs x9, x21, x9 + umulh x21, x19, x15 + adcs x10, x5, x10 + adcs x5, xzr, xzr + adds x8, x8, x21 + umulh x21, x6, x15 + adcs x16, x16, x21 + umulh x21, x15, x15 + adcs x3, x3, x21 + umulh x21, x12, x15 + adcs x18, x18, x21 + adcs x9, x9, x20 + umulh x20, x14, x15 + adcs x10, x10, x20 + umulh x15, x23, x15 + adcs x15, x5, x15 + mul x5, x19, x12 + adds x8, x5, x8 + ldr x5, [x1, #32] + str x8, [x0, #24] + mul x8, x6, x12 + adcs x8, x8, x16 + ldr x16, [x1] + adcs x3, x4, x3 + mul x4, x12, x12 + adcs x18, x4, x18 + mul x4, x13, x12 + adcs x9, x4, x9 + mul x4, x14, x12 + adcs x10, x4, x10 + mul x4, x23, x12 + umulh x19, x19, x12 + adcs x15, x4, x15 + adcs x4, xzr, xzr + adds x8, x8, x19 + ldr x19, [x1, #24] + umulh x6, x6, x12 + adcs x3, x3, x6 + ldr x6, [x1, #48] + adcs x18, x18, x21 + ldr x20, [x1, #48] + umulh x21, x23, x12 + umulh x14, x14, x12 + umulh x13, x13, x12 + umulh x12, x12, x12 + adcs x9, x9, x12 + adcs x10, x10, x13 + ldp x12, x13, [x1] + adcs x14, x15, x14 + mul x15, x16, x5 + adcs x4, x4, x21 + mul x21, x6, x5 + adds x8, x15, x8 + mul x15, x17, x5 + str x8, [x0, #32] + mul x8, x22, x5 + adcs x8, x8, x3 + mul x3, x7, x5 + adcs x18, x3, x18 + mul x3, x19, x5 + adcs x9, x3, x9 + mul x3, x5, x5 + adcs x10, x3, x10 + umulh x3, x16, x5 + adcs x14, x15, x14 + adcs x4, x21, x4 + adcs x21, xzr, xzr + adds x8, x8, x3 + umulh x3, x22, x5 + adcs x18, x18, x3 + umulh x3, x7, x5 + adcs x9, x9, x3 + umulh x3, x19, x5 + adcs x10, x10, x3 + umulh x3, x5, x5 + adcs x14, x14, x3 + umulh x3, x6, x5 + umulh x5, x17, x5 + adcs x4, x4, x5 + adcs x3, x21, x3 + mul x21, x16, x17 + adds x8, x21, x8 + ldp x21, x1, [x1, #16] + str x8, [x0, #40] + mul x8, x22, x17 + adcs x8, x8, x18 + mul x18, x7, x17 + adcs x9, x18, x9 + mul x18, x19, x17 + adcs x10, x18, x10 + mul x18, x6, x17 + adcs x14, x15, x14 + mul x15, x17, x17 + umulh x6, x6, x17 + umulh x19, x19, x17 + umulh x7, x7, x17 + umulh x22, x22, x17 + umulh x16, x16, x17 + umulh x17, x17, x17 + adcs x15, x15, x4 + mul x4, x12, x20 + adcs x18, x18, x3 + adcs x3, xzr, xzr + adds x8, x8, x16 + mul x16, x11, x20 + adcs x9, x9, x22 + mul x22, x2, x20 + adcs x10, x10, x7 + mul x7, x1, x20 + adcs x14, x14, x19 + mul x19, x21, x20 + adcs x15, x15, x5 + mul x5, x13, x20 + adcs x17, x18, x17 + mul x18, x20, x20 + umulh x12, x12, x20 + umulh x13, x13, x20 + umulh x21, x21, x20 + umulh x1, x1, x20 + umulh x2, x2, x20 + umulh x11, x11, x20 + umulh x20, x20, x20 + adcs x3, x3, x6 + adds x8, x4, x8 + str x8, [x0, #48] + adcs x8, x5, x9 + adcs x9, x19, x10 + adcs x10, x7, x14 + adcs x14, x22, x15 + adcs x15, x16, x17 + adcs x16, x18, x3 + adcs x17, xzr, xzr + adds x8, x8, x12 + str x8, [x0, #56] + adcs x8, x9, x13 + str x8, [x0, #64] + adcs x8, x10, x21 + str x8, [x0, #72] + adcs x8, x14, x1 + str x8, [x0, #80] + adcs x8, x15, x2 + str x8, [x0, #88] + adcs x8, x16, x11 + str x8, [x0, #96] + adcs x8, x17, x20 + str x8, [x0, #104] + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L + + .globl mcl_fp_mont7L + .align 2 + .type mcl_fp_mont7L,@function +mcl_fp_mont7L: // @mcl_fp_mont7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #144 // =144 + str x2, [sp, #112] // 8-byte Folded Spill + str x0, [sp, #64] // 8-byte Folded Spill + ldr x6, [x2] + ldr x15, [x1, #48] + str x15, [sp, #96] // 8-byte Folded Spill + ldr x0, [x1, #32] + str x0, [sp, #56] // 8-byte Folded Spill + ldr x18, [x1, #40] + ldp x11, x13, [x1, #16] + ldp x17, x5, [x1] + str x5, [sp, #88] // 8-byte Folded Spill + ldur x12, [x3, #-8] + str x12, [sp, #128] // 8-byte Folded Spill + ldr x1, [x3, #32] + str x1, [sp, #104] // 8-byte Folded Spill + ldr x9, [x3, #40] + str x9, [sp, #80] // 8-byte Folded Spill + ldr x8, [x3, #16] + str x8, [sp, #136] // 8-byte Folded Spill + ldr x10, [x3, #24] + str x10, [sp, #120] // 8-byte Folded Spill + ldr x14, [x3] + str x14, [sp, #24] // 8-byte Folded Spill + ldr x4, [x3, #8] + str x4, [sp, #72] // 8-byte Folded Spill + ldr x7, [x2, #8] + umulh x19, x15, x6 + mul x20, x15, x6 + umulh x21, x18, x6 + mul x22, x18, x6 + mov x15, x0 + umulh x23, x15, x6 + mul x24, x15, x6 + mov x16, x13 + umulh x25, x16, x6 + mul x26, x16, x6 + mov x13, x11 + umulh x27, x13, x6 + mul x28, x13, x6 + mul x29, x5, x6 + mov x11, x17 + umulh x30, x11, x6 + adds x29, x30, x29 + umulh x30, x5, x6 + mul x6, x11, x6 + adcs x28, x30, x28 + mul x30, x6, x12 + adcs x26, x27, x26 + mul x27, x30, x10 + adcs x24, x25, x24 + mul x25, x30, x8 + adcs x22, x23, x22 + mul x23, x30, x4 + adcs x20, x21, x20 + umulh x21, x30, x14 + adcs x19, x19, xzr + adds x21, x21, x23 + umulh x23, x30, x4 + adcs x23, x23, x25 + umulh x25, x30, x8 + adcs x25, x25, x27 + mul x27, x30, x1 + umulh x17, x30, x10 + adcs x17, x17, x27 + ldr x3, [x3, #48] + str x3, [sp, #48] // 8-byte Folded Spill + mul x27, x30, x9 + umulh x0, x30, x1 + adcs x0, x0, x27 + mul x27, x30, x3 + umulh x2, x30, x9 + adcs x2, x2, x27 + umulh x27, x30, x3 + mul x30, x30, x14 + adcs x27, x27, xzr + cmn x30, x6 + adcs x6, x21, x29 + adcs x21, x23, x28 + mul x23, x7, x15 + adcs x25, x25, x26 + mul x26, x7, x16 + adcs x17, x17, x24 + mul x24, x7, x13 + adcs x0, x0, x22 + mul x22, x7, x5 + adcs x2, x2, x20 + umulh x20, x7, x11 + adcs x19, x27, x19 + adcs x27, xzr, xzr + adds x20, x20, x22 + umulh x22, x7, x5 + adcs x22, x22, x24 + umulh x24, x7, x13 + mov x5, x13 + adcs x24, x24, x26 + umulh x26, x7, x16 + adcs x23, x26, x23 + mul x26, x7, x18 + umulh x28, x7, x15 + adcs x26, x28, x26 + ldr x15, [sp, #96] // 8-byte Folded Reload + mul x28, x7, x15 + umulh x29, x7, x18 + adcs x28, x29, x28 + umulh x29, x7, x15 + mul x7, x7, x11 + adcs x29, x29, xzr + adds x30, x6, x7 + adcs x6, x21, x20 + adcs x25, x25, x22 + mul x22, x30, x12 + adcs x24, x17, x24 + mul x17, x22, x10 + adcs x0, x0, x23 + mul x23, x22, x8 + adcs x7, x2, x26 + mul x2, x22, x4 + adcs x20, x19, x28 + umulh x26, x22, x14 + adcs x21, x27, x29 + adcs x19, xzr, xzr + adds x2, x26, x2 + umulh x26, x22, x4 + adcs x23, x26, x23 + umulh x26, x22, x8 + adcs x17, x26, x17 + mul x26, x22, x1 + umulh x27, x22, x10 + adcs x26, x27, x26 + mul x27, x22, x9 + umulh x28, x22, x1 + adcs x27, x28, x27 + mul x28, x22, x3 + umulh x29, x22, x9 + adcs x28, x29, x28 + umulh x29, x22, x3 + mul x22, x22, x14 + mov x10, x14 + adcs x29, x29, xzr + cmn x22, x30 + adcs x22, x2, x6 + adcs x23, x23, x25 + ldr x8, [sp, #112] // 8-byte Folded Reload + adcs x24, x17, x24 + ldp x25, x17, [x8, #16] + adcs x0, x26, x0 + mul x2, x25, x16 + adcs x6, x27, x7 + mul x7, x25, x5 + adcs x20, x28, x20 + ldp x15, x8, [sp, #88] + mul x26, x25, x15 + adcs x21, x29, x21 + mov x12, x11 + umulh x27, x25, x12 + adcs x19, x19, xzr + adds x26, x27, x26 + umulh x27, x25, x15 + adcs x7, x27, x7 + umulh x27, x25, x5 + mov x9, x5 + adcs x2, x27, x2 + ldr x11, [sp, #56] // 8-byte Folded Reload + mul x27, x25, x11 + umulh x28, x25, x16 + mov x13, x16 + adcs x27, x28, x27 + mul x28, x25, x18 + umulh x29, x25, x11 + adcs x28, x29, x28 + mul x29, x25, x8 + umulh x30, x25, x18 + adcs x29, x30, x29 + umulh x30, x25, x8 + mov x14, x8 + mul x25, x25, x12 + mov x5, x12 + adcs x30, x30, xzr + adds x22, x22, x25 + adcs x23, x23, x26 + adcs x7, x24, x7 + adcs x0, x0, x2 + ldp x8, x12, [sp, #128] + mul x2, x22, x8 + adcs x6, x6, x27 + mul x24, x2, x12 + adcs x20, x20, x28 + mul x25, x2, x4 + adcs x21, x21, x29 + mov x1, x10 + umulh x26, x2, x1 + adcs x19, x19, x30 + adcs x27, xzr, xzr + adds x25, x26, x25 + umulh x26, x2, x4 + adcs x24, x26, x24 + ldr x10, [sp, #120] // 8-byte Folded Reload + mul x26, x2, x10 + umulh x28, x2, x12 + adcs x26, x28, x26 + ldr x12, [sp, #104] // 8-byte Folded Reload + mul x28, x2, x12 + umulh x29, x2, x10 + adcs x28, x29, x28 + ldr x10, [sp, #80] // 8-byte Folded Reload + mul x29, x2, x10 + umulh x30, x2, x12 + adcs x29, x30, x29 + mul x30, x2, x3 + umulh x12, x2, x10 + adcs x12, x12, x30 + umulh x30, x2, x3 + mul x2, x2, x1 + adcs x30, x30, xzr + cmn x2, x22 + adcs x2, x25, x23 + adcs x7, x24, x7 + adcs x0, x26, x0 + mul x22, x17, x11 + adcs x6, x28, x6 + mul x23, x17, x13 + adcs x20, x29, x20 + mul x24, x17, x9 + adcs x12, x12, x21 + mul x21, x17, x15 + adcs x19, x30, x19 + umulh x25, x17, x5 + adcs x26, x27, xzr + adds x21, x25, x21 + umulh x25, x17, x15 + adcs x24, x25, x24 + umulh x25, x17, x9 + mov x16, x9 + adcs x23, x25, x23 + umulh x25, x17, x13 + adcs x22, x25, x22 + mul x25, x17, x18 + umulh x27, x17, x11 + adcs x25, x27, x25 + mov x9, x14 + mul x27, x17, x9 + umulh x28, x17, x18 + adcs x27, x28, x27 + umulh x28, x17, x9 + mul x17, x17, x5 + mov x15, x5 + adcs x28, x28, xzr + adds x17, x2, x17 + adcs x2, x7, x21 + adcs x0, x0, x24 + mul x24, x17, x8 + adcs x29, x6, x23 + ldr x9, [sp, #120] // 8-byte Folded Reload + mul x23, x24, x9 + adcs x6, x20, x22 + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x22, x24, x8 + adcs x7, x12, x25 + mul x12, x24, x4 + adcs x20, x19, x27 + umulh x25, x24, x1 + adcs x21, x26, x28 + adcs x19, xzr, xzr + adds x12, x25, x12 + umulh x25, x24, x4 + adcs x25, x25, x22 + umulh x22, x24, x8 + adcs x26, x22, x23 + ldr x5, [sp, #104] // 8-byte Folded Reload + mul x22, x24, x5 + umulh x23, x24, x9 + adcs x27, x23, x22 + mov x9, x10 + mul x22, x24, x9 + umulh x23, x24, x5 + adcs x28, x23, x22 + mul x22, x24, x3 + umulh x23, x24, x9 + adcs x30, x23, x22 + umulh x22, x24, x3 + mul x23, x24, x1 + mov x3, x1 + adcs x24, x22, xzr + cmn x23, x17 + adcs x22, x12, x2 + adcs x23, x25, x0 + ldr x10, [sp, #112] // 8-byte Folded Reload + ldp x12, x0, [x10, #32] + adcs x17, x26, x29 + adcs x2, x27, x6 + mul x6, x12, x13 + adcs x7, x28, x7 + mov x10, x16 + mul x25, x12, x10 + adcs x20, x30, x20 + ldr x16, [sp, #88] // 8-byte Folded Reload + mul x26, x12, x16 + adcs x21, x24, x21 + umulh x24, x12, x15 + adcs x1, x19, xzr + adds x24, x24, x26 + umulh x26, x12, x16 + adcs x25, x26, x25 + umulh x26, x12, x10 + adcs x6, x26, x6 + mul x26, x12, x11 + umulh x27, x12, x13 + adcs x26, x27, x26 + mul x27, x12, x18 + umulh x28, x12, x11 + adcs x27, x28, x27 + mul x28, x12, x14 + umulh x29, x12, x18 + adcs x28, x29, x28 + umulh x29, x12, x14 + mul x12, x12, x15 + adcs x29, x29, xzr + adds x12, x22, x12 + adcs x22, x23, x24 + adcs x17, x17, x25 + adcs x2, x2, x6 + ldr x19, [sp, #128] // 8-byte Folded Reload + mul x6, x12, x19 + adcs x7, x7, x26 + mov x30, x8 + mul x23, x6, x30 + adcs x20, x20, x27 + mul x24, x6, x4 + adcs x21, x21, x28 + mov x8, x3 + umulh x25, x6, x8 + adcs x1, x1, x29 + adcs x26, xzr, xzr + adds x24, x25, x24 + umulh x25, x6, x4 + adcs x23, x25, x23 + ldr x4, [sp, #120] // 8-byte Folded Reload + mul x25, x6, x4 + umulh x27, x6, x30 + adcs x25, x27, x25 + mul x27, x6, x5 + umulh x28, x6, x4 + adcs x27, x28, x27 + mov x3, x9 + mul x28, x6, x3 + umulh x29, x6, x5 + adcs x28, x29, x28 + ldr x9, [sp, #48] // 8-byte Folded Reload + mul x29, x6, x9 + umulh x30, x6, x3 + adcs x29, x30, x29 + umulh x30, x6, x9 + mov x3, x9 + mul x6, x6, x8 + mov x5, x8 + adcs x30, x30, xzr + cmn x6, x12 + adcs x12, x24, x22 + adcs x17, x23, x17 + adcs x2, x25, x2 + mul x6, x0, x11 + adcs x7, x27, x7 + mul x22, x0, x13 + adcs x20, x28, x20 + mul x23, x0, x10 + adcs x21, x29, x21 + mul x24, x0, x16 + adcs x29, x30, x1 + mov x1, x15 + umulh x25, x0, x1 + adcs x26, x26, xzr + adds x24, x25, x24 + umulh x25, x0, x16 + adcs x23, x25, x23 + umulh x25, x0, x10 + adcs x22, x25, x22 + umulh x25, x0, x13 + adcs x6, x25, x6 + mul x25, x0, x18 + umulh x27, x0, x11 + adcs x25, x27, x25 + mov x9, x14 + mul x27, x0, x9 + umulh x28, x0, x18 + adcs x27, x28, x27 + umulh x28, x0, x9 + mul x0, x0, x1 + adcs x28, x28, xzr + adds x12, x12, x0 + adcs x8, x17, x24 + str x8, [sp, #40] // 8-byte Folded Spill + adcs x8, x2, x23 + str x8, [sp, #32] // 8-byte Folded Spill + mul x2, x12, x19 + adcs x7, x7, x22 + mul x22, x2, x4 + adcs x8, x20, x6 + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x20, x2, x8 + adcs x21, x21, x25 + ldr x9, [sp, #72] // 8-byte Folded Reload + mul x23, x2, x9 + adcs x19, x29, x27 + mov x15, x5 + umulh x24, x2, x15 + adcs x17, x26, x28 + str x17, [sp, #8] // 8-byte Folded Spill + adcs x26, xzr, xzr + adds x23, x24, x23 + umulh x24, x2, x9 + adcs x20, x24, x20 + umulh x24, x2, x8 + adcs x22, x24, x22 + ldp x25, x8, [sp, #104] + mul x24, x2, x25 + umulh x27, x2, x4 + adcs x6, x27, x24 + ldr x5, [sp, #80] // 8-byte Folded Reload + mul x27, x2, x5 + umulh x28, x2, x25 + adcs x27, x28, x27 + mul x28, x2, x3 + umulh x29, x2, x5 + adcs x28, x29, x28 + ldr x29, [x8, #48] + mul x30, x2, x15 + umulh x2, x2, x3 + adcs x2, x2, xzr + cmn x30, x12 + umulh x24, x29, x14 + mul x30, x29, x14 + umulh x0, x29, x18 + mul x18, x29, x18 + umulh x17, x29, x11 + mul x15, x29, x11 + umulh x14, x29, x13 + mul x13, x29, x13 + umulh x12, x29, x10 + mul x11, x29, x10 + mul x10, x29, x16 + umulh x9, x29, x16 + umulh x8, x29, x1 + mul x29, x29, x1 + ldr x16, [sp, #40] // 8-byte Folded Reload + adcs x23, x23, x16 + ldr x16, [sp, #32] // 8-byte Folded Reload + adcs x20, x20, x16 + adcs x7, x22, x7 + ldr x16, [sp, #16] // 8-byte Folded Reload + adcs x6, x6, x16 + adcs x21, x27, x21 + adcs x19, x28, x19 + ldr x16, [sp, #8] // 8-byte Folded Reload + adcs x2, x2, x16 + adcs x22, x26, xzr + adds x8, x8, x10 + adcs x9, x9, x11 + adcs x10, x12, x13 + adcs x11, x14, x15 + adcs x12, x17, x18 + adcs x13, x0, x30 + adcs x14, x24, xzr + adds x15, x23, x29 + adcs x8, x20, x8 + ldr x16, [sp, #128] // 8-byte Folded Reload + mul x16, x15, x16 + adcs x9, x7, x9 + mul x17, x16, x3 + mul x18, x16, x5 + mul x0, x16, x25 + adcs x10, x6, x10 + mul x6, x16, x4 + adcs x11, x21, x11 + ldr x21, [sp, #136] // 8-byte Folded Reload + mul x7, x16, x21 + adcs x12, x19, x12 + ldr x23, [sp, #72] // 8-byte Folded Reload + mul x19, x16, x23 + adcs x13, x2, x13 + ldr x24, [sp, #24] // 8-byte Folded Reload + umulh x2, x16, x24 + adcs x14, x22, x14 + adcs x20, xzr, xzr + adds x2, x2, x19 + umulh x19, x16, x23 + adcs x7, x19, x7 + umulh x19, x16, x21 + adcs x6, x19, x6 + umulh x19, x16, x4 + adcs x0, x19, x0 + umulh x19, x16, x25 + adcs x18, x19, x18 + umulh x19, x16, x5 + adcs x17, x19, x17 + umulh x19, x16, x3 + mul x16, x16, x24 + adcs x19, x19, xzr + cmn x16, x15 + adcs x8, x2, x8 + adcs x9, x7, x9 + adcs x10, x6, x10 + adcs x11, x0, x11 + adcs x12, x18, x12 + adcs x13, x17, x13 + adcs x14, x19, x14 + adcs x15, x20, xzr + subs x16, x8, x24 + sbcs x17, x9, x23 + sbcs x18, x10, x21 + sbcs x0, x11, x4 + sbcs x1, x12, x25 + sbcs x2, x13, x5 + sbcs x3, x14, x3 + sbcs x15, x15, xzr + tst x15, #0x1 + csel x8, x8, x16, ne + csel x9, x9, x17, ne + csel x10, x10, x18, ne + csel x11, x11, x0, ne + csel x12, x12, x1, ne + csel x13, x13, x2, ne + csel x14, x14, x3, ne + ldr x15, [sp, #64] // 8-byte Folded Reload + stp x8, x9, [x15] + stp x10, x11, [x15, #16] + stp x12, x13, [x15, #32] + str x14, [x15, #48] + add sp, sp, #144 // =144 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end98: + .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L + + .globl mcl_fp_montNF7L + .align 2 + .type mcl_fp_montNF7L,@function +mcl_fp_montNF7L: // @mcl_fp_montNF7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + stp x0, x2, [sp, #8] + ldr x7, [x2] + ldp x5, x16, [x1, #40] + ldp x6, x17, [x1, #24] + ldr x4, [x1] + ldp x1, x18, [x1, #8] + ldur x8, [x3, #-8] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x15, x0, [x3, #40] + ldp x11, x10, [x3, #24] + ldp x13, x12, [x3, #8] + ldr x14, [x3] + ldr x25, [x2, #8] + umulh x3, x16, x7 + mul x19, x16, x7 + umulh x20, x5, x7 + mul x21, x5, x7 + umulh x22, x17, x7 + mul x23, x17, x7 + umulh x24, x6, x7 + mul x26, x6, x7 + umulh x27, x18, x7 + mul x28, x18, x7 + mul x29, x1, x7 + umulh x30, x4, x7 + adds x29, x30, x29 + umulh x30, x1, x7 + mul x7, x4, x7 + adcs x28, x30, x28 + mul x30, x25, x5 + adcs x26, x27, x26 + mul x27, x25, x17 + adcs x23, x24, x23 + mul x24, x25, x6 + adcs x21, x22, x21 + mul x22, x7, x8 + adcs x19, x20, x19 + mul x20, x22, x14 + adcs x3, x3, xzr + cmn x20, x7 + mul x9, x25, x18 + mul x7, x22, x13 + adcs x7, x7, x29 + mul x20, x22, x12 + adcs x20, x20, x28 + mul x28, x22, x11 + adcs x26, x28, x26 + mul x28, x22, x10 + adcs x23, x28, x23 + mul x28, x22, x15 + adcs x21, x28, x21 + mul x28, x22, x0 + adcs x19, x28, x19 + umulh x28, x22, x14 + adcs x29, x3, xzr + adds x28, x7, x28 + umulh x3, x22, x13 + adcs x8, x20, x3 + umulh x3, x22, x12 + adcs x26, x26, x3 + umulh x3, x22, x11 + adcs x3, x23, x3 + umulh x7, x22, x10 + adcs x7, x21, x7 + umulh x20, x22, x15 + adcs x19, x19, x20 + mul x21, x25, x1 + umulh x20, x22, x0 + adcs x20, x29, x20 + umulh x22, x25, x4 + adds x29, x22, x21 + umulh x21, x25, x1 + adcs x23, x21, x9 + umulh x9, x25, x18 + adcs x21, x9, x24 + umulh x9, x25, x6 + adcs x22, x9, x27 + umulh x9, x25, x17 + adcs x30, x9, x30 + mul x9, x25, x16 + umulh x24, x25, x5 + adcs x24, x24, x9 + umulh x9, x25, x16 + mul x25, x25, x4 + adcs x9, x9, xzr + adds x27, x25, x28 + adcs x25, x29, x8 + ldp x28, x8, [x2, #16] + adcs x29, x23, x26 + adcs x3, x21, x3 + mul x21, x28, x17 + adcs x7, x22, x7 + mul x22, x28, x6 + adcs x19, x30, x19 + ldr x2, [sp, #24] // 8-byte Folded Reload + mul x23, x27, x2 + adcs x20, x24, x20 + mul x24, x23, x14 + adcs x9, x9, xzr + cmn x24, x27 + mul x24, x28, x18 + mul x26, x23, x13 + adcs x25, x26, x25 + mul x26, x23, x12 + adcs x26, x26, x29 + mul x27, x23, x11 + adcs x3, x27, x3 + mul x27, x23, x10 + adcs x7, x27, x7 + mul x27, x23, x15 + adcs x19, x27, x19 + mul x27, x23, x0 + adcs x20, x27, x20 + umulh x27, x23, x14 + adcs x9, x9, xzr + adds x25, x25, x27 + umulh x27, x23, x13 + adcs x26, x26, x27 + umulh x27, x23, x12 + adcs x3, x3, x27 + umulh x27, x23, x11 + adcs x7, x7, x27 + umulh x27, x23, x10 + adcs x19, x19, x27 + umulh x27, x23, x15 + adcs x20, x20, x27 + mul x27, x28, x1 + umulh x23, x23, x0 + adcs x9, x9, x23 + umulh x23, x28, x4 + adds x23, x23, x27 + umulh x27, x28, x1 + adcs x24, x27, x24 + umulh x27, x28, x18 + adcs x22, x27, x22 + umulh x27, x28, x6 + adcs x21, x27, x21 + mul x27, x28, x5 + umulh x29, x28, x17 + adcs x27, x29, x27 + mul x29, x28, x16 + umulh x30, x28, x5 + adcs x29, x30, x29 + umulh x30, x28, x16 + mul x28, x28, x4 + adcs x30, x30, xzr + adds x25, x28, x25 + adcs x23, x23, x26 + adcs x3, x24, x3 + mul x26, x8, x5 + adcs x7, x22, x7 + mul x22, x8, x17 + adcs x19, x21, x19 + mul x24, x8, x6 + adcs x20, x27, x20 + mul x21, x25, x2 + adcs x9, x29, x9 + mul x27, x21, x14 + adcs x28, x30, xzr + cmn x27, x25 + mul x25, x8, x18 + mul x27, x21, x13 + adcs x23, x27, x23 + mul x27, x21, x12 + adcs x3, x27, x3 + mul x27, x21, x11 + adcs x7, x27, x7 + mul x27, x21, x10 + adcs x19, x27, x19 + mul x27, x21, x15 + adcs x20, x27, x20 + mul x27, x21, x0 + adcs x9, x27, x9 + umulh x27, x21, x14 + adcs x28, x28, xzr + adds x27, x23, x27 + umulh x23, x21, x13 + adcs x3, x3, x23 + umulh x23, x21, x12 + adcs x30, x7, x23 + umulh x7, x21, x11 + adcs x7, x19, x7 + umulh x19, x21, x10 + adcs x19, x20, x19 + umulh x20, x21, x15 + adcs x20, x9, x20 + mul x9, x8, x1 + umulh x21, x21, x0 + adcs x21, x28, x21 + umulh x23, x8, x4 + adds x9, x23, x9 + umulh x23, x8, x1 + adcs x28, x23, x25 + umulh x23, x8, x18 + adcs x23, x23, x24 + umulh x24, x8, x6 + adcs x24, x24, x22 + umulh x22, x8, x17 + adcs x25, x22, x26 + mul x22, x8, x16 + umulh x26, x8, x5 + adcs x26, x26, x22 + umulh x22, x8, x16 + mul x29, x8, x4 + adcs x2, x22, xzr + adds x29, x29, x27 + adcs x27, x9, x3 + ldr x8, [sp, #16] // 8-byte Folded Reload + ldp x22, x3, [x8, #32] + adcs x9, x28, x30 + adcs x7, x23, x7 + mul x23, x22, x17 + adcs x19, x24, x19 + mul x24, x22, x6 + adcs x20, x25, x20 + ldr x8, [sp, #24] // 8-byte Folded Reload + mul x25, x29, x8 + adcs x21, x26, x21 + mul x26, x25, x14 + adcs x2, x2, xzr + cmn x26, x29 + mul x26, x22, x18 + mul x28, x25, x13 + adcs x27, x28, x27 + mul x28, x25, x12 + adcs x9, x28, x9 + mul x28, x25, x11 + adcs x7, x28, x7 + mul x28, x25, x10 + adcs x19, x28, x19 + mul x28, x25, x15 + adcs x20, x28, x20 + mul x28, x25, x0 + adcs x21, x28, x21 + umulh x28, x25, x14 + adcs x2, x2, xzr + adds x27, x27, x28 + umulh x28, x25, x13 + adcs x9, x9, x28 + umulh x28, x25, x12 + adcs x7, x7, x28 + umulh x28, x25, x11 + adcs x19, x19, x28 + umulh x28, x25, x10 + adcs x20, x20, x28 + umulh x28, x25, x15 + adcs x21, x21, x28 + mul x28, x22, x1 + umulh x25, x25, x0 + adcs x2, x2, x25 + umulh x25, x22, x4 + adds x25, x25, x28 + umulh x28, x22, x1 + adcs x26, x28, x26 + umulh x28, x22, x18 + adcs x24, x28, x24 + umulh x28, x22, x6 + adcs x23, x28, x23 + mul x28, x22, x5 + umulh x29, x22, x17 + adcs x28, x29, x28 + mul x29, x22, x16 + umulh x30, x22, x5 + adcs x29, x30, x29 + umulh x30, x22, x16 + mul x22, x22, x4 + adcs x30, x30, xzr + adds x22, x22, x27 + adcs x9, x25, x9 + adcs x7, x26, x7 + mul x25, x3, x5 + adcs x19, x24, x19 + mul x24, x3, x17 + adcs x20, x23, x20 + mul x23, x3, x6 + adcs x21, x28, x21 + mul x26, x22, x8 + adcs x8, x29, x2 + mul x27, x26, x14 + adcs x28, x30, xzr + cmn x27, x22 + mul x22, x3, x18 + mul x27, x26, x13 + adcs x9, x27, x9 + mul x27, x26, x12 + adcs x7, x27, x7 + mul x27, x26, x11 + adcs x19, x27, x19 + mul x27, x26, x10 + adcs x20, x27, x20 + mul x27, x26, x15 + adcs x21, x27, x21 + mul x27, x26, x0 + adcs x8, x27, x8 + umulh x27, x26, x14 + adcs x28, x28, xzr + adds x9, x9, x27 + umulh x27, x26, x13 + adcs x7, x7, x27 + umulh x27, x26, x12 + adcs x19, x19, x27 + umulh x27, x26, x11 + adcs x20, x20, x27 + umulh x27, x26, x10 + adcs x21, x21, x27 + umulh x27, x26, x15 + adcs x8, x8, x27 + mul x27, x3, x1 + umulh x26, x26, x0 + adcs x26, x28, x26 + umulh x28, x3, x4 + adds x27, x28, x27 + umulh x28, x3, x1 + adcs x22, x28, x22 + umulh x28, x3, x18 + adcs x23, x28, x23 + umulh x28, x3, x6 + adcs x24, x28, x24 + umulh x28, x3, x17 + adcs x25, x28, x25 + mul x28, x3, x16 + umulh x29, x3, x5 + adcs x28, x29, x28 + ldp x2, x30, [sp, #16] + ldr x2, [x2, #48] + umulh x29, x3, x16 + mul x3, x3, x4 + adcs x29, x29, xzr + adds x9, x3, x9 + adcs x3, x27, x7 + umulh x7, x2, x16 + mul x16, x2, x16 + adcs x19, x22, x19 + umulh x22, x2, x5 + mul x5, x2, x5 + adcs x20, x23, x20 + umulh x23, x2, x17 + mul x17, x2, x17 + adcs x21, x24, x21 + umulh x24, x2, x6 + mul x6, x2, x6 + adcs x8, x25, x8 + mul x25, x9, x30 + adcs x26, x28, x26 + mul x27, x25, x14 + adcs x28, x29, xzr + cmn x27, x9 + umulh x9, x2, x18 + mul x18, x2, x18 + umulh x27, x2, x1 + mul x1, x2, x1 + umulh x29, x2, x4 + mul x2, x2, x4 + mul x4, x25, x13 + adcs x3, x4, x3 + mul x4, x25, x12 + adcs x4, x4, x19 + mul x19, x25, x11 + adcs x19, x19, x20 + mul x20, x25, x10 + adcs x20, x20, x21 + mul x21, x25, x15 + adcs x8, x21, x8 + mul x21, x25, x0 + adcs x21, x21, x26 + adcs x26, x28, xzr + umulh x28, x25, x14 + adds x3, x3, x28 + umulh x28, x25, x13 + adcs x4, x4, x28 + umulh x28, x25, x12 + adcs x19, x19, x28 + umulh x28, x25, x11 + adcs x20, x20, x28 + umulh x28, x25, x10 + adcs x8, x8, x28 + umulh x28, x25, x15 + adcs x21, x21, x28 + umulh x25, x25, x0 + adcs x25, x26, x25 + adds x1, x29, x1 + adcs x18, x27, x18 + adcs x9, x9, x6 + adcs x17, x24, x17 + adcs x5, x23, x5 + adcs x16, x22, x16 + adcs x6, x7, xzr + adds x2, x2, x3 + adcs x1, x1, x4 + adcs x18, x18, x19 + adcs x9, x9, x20 + adcs x8, x17, x8 + adcs x17, x5, x21 + mul x3, x2, x30 + adcs x16, x16, x25 + mul x4, x3, x14 + adcs x5, x6, xzr + cmn x4, x2 + mul x2, x3, x13 + adcs x1, x2, x1 + mul x2, x3, x12 + adcs x18, x2, x18 + mul x2, x3, x11 + adcs x9, x2, x9 + mul x2, x3, x10 + adcs x8, x2, x8 + mul x2, x3, x15 + adcs x17, x2, x17 + mul x2, x3, x0 + adcs x16, x2, x16 + umulh x2, x3, x14 + adcs x4, x5, xzr + adds x1, x1, x2 + umulh x2, x3, x13 + adcs x18, x18, x2 + umulh x2, x3, x12 + adcs x9, x9, x2 + umulh x2, x3, x11 + adcs x8, x8, x2 + umulh x2, x3, x10 + adcs x17, x17, x2 + umulh x2, x3, x15 + adcs x16, x16, x2 + umulh x2, x3, x0 + adcs x2, x4, x2 + subs x14, x1, x14 + sbcs x13, x18, x13 + sbcs x12, x9, x12 + sbcs x11, x8, x11 + sbcs x10, x17, x10 + sbcs x15, x16, x15 + sbcs x0, x2, x0 + asr x3, x0, #63 + cmp x3, #0 // =0 + csel x14, x1, x14, lt + csel x13, x18, x13, lt + csel x9, x9, x12, lt + csel x8, x8, x11, lt + csel x10, x17, x10, lt + csel x11, x16, x15, lt + csel x12, x2, x0, lt + ldr x15, [sp, #8] // 8-byte Folded Reload + stp x14, x13, [x15] + stp x9, x8, [x15, #16] + stp x10, x11, [x15, #32] + str x12, [x15, #48] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end99: + .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L + + .globl mcl_fp_montRed7L + .align 2 + .type mcl_fp_montRed7L,@function +mcl_fp_montRed7L: // @mcl_fp_montRed7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldur x15, [x2, #-8] + ldp x9, x8, [x2, #40] + ldp x11, x10, [x2, #24] + ldp x13, x12, [x2, #8] + ldr x14, [x2] + ldp x17, x18, [x1, #96] + ldp x2, x3, [x1, #80] + ldp x4, x5, [x1, #64] + ldp x6, x7, [x1, #48] + ldp x19, x20, [x1, #32] + ldp x21, x22, [x1, #16] + ldp x16, x1, [x1] + mul x23, x16, x15 + mul x24, x23, x8 + mul x25, x23, x9 + mul x26, x23, x10 + mul x27, x23, x11 + mul x28, x23, x12 + mul x29, x23, x13 + umulh x30, x23, x14 + adds x29, x30, x29 + umulh x30, x23, x13 + adcs x28, x30, x28 + umulh x30, x23, x12 + adcs x27, x30, x27 + umulh x30, x23, x11 + adcs x26, x30, x26 + umulh x30, x23, x10 + adcs x25, x30, x25 + umulh x30, x23, x9 + adcs x24, x30, x24 + umulh x30, x23, x8 + mul x23, x23, x14 + adcs x30, x30, xzr + cmn x16, x23 + adcs x16, x1, x29 + adcs x1, x21, x28 + mul x21, x16, x15 + adcs x22, x22, x27 + mul x23, x21, x8 + mul x27, x21, x9 + mul x28, x21, x10 + mul x29, x21, x11 + adcs x19, x19, x26 + mul x26, x21, x12 + adcs x20, x20, x25 + mul x25, x21, x13 + adcs x6, x6, x24 + umulh x24, x21, x14 + adcs x7, x7, x30 + adcs x4, x4, xzr + adcs x5, x5, xzr + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x30, xzr, xzr + adds x24, x24, x25 + umulh x25, x21, x13 + adcs x25, x25, x26 + umulh x26, x21, x12 + adcs x26, x26, x29 + umulh x29, x21, x11 + adcs x28, x29, x28 + umulh x29, x21, x10 + adcs x27, x29, x27 + umulh x29, x21, x9 + adcs x23, x29, x23 + umulh x29, x21, x8 + mul x21, x21, x14 + adcs x29, x29, xzr + cmn x21, x16 + adcs x16, x24, x1 + adcs x1, x25, x22 + mul x21, x16, x15 + adcs x19, x26, x19 + mul x22, x21, x8 + mul x24, x21, x9 + mul x25, x21, x10 + adcs x20, x28, x20 + mul x26, x21, x11 + adcs x6, x27, x6 + mul x27, x21, x12 + adcs x7, x23, x7 + mul x23, x21, x13 + adcs x4, x29, x4 + umulh x28, x21, x14 + adcs x5, x5, xzr + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x29, x30, xzr + adds x23, x28, x23 + umulh x28, x21, x13 + adcs x27, x28, x27 + umulh x28, x21, x12 + adcs x26, x28, x26 + umulh x28, x21, x11 + adcs x25, x28, x25 + umulh x28, x21, x10 + adcs x24, x28, x24 + umulh x28, x21, x9 + adcs x22, x28, x22 + umulh x28, x21, x8 + mul x21, x21, x14 + adcs x28, x28, xzr + cmn x21, x16 + adcs x16, x23, x1 + adcs x1, x27, x19 + mul x19, x16, x15 + adcs x20, x26, x20 + mul x21, x19, x8 + mul x23, x19, x9 + mul x26, x19, x10 + adcs x6, x25, x6 + mul x25, x19, x11 + adcs x7, x24, x7 + mul x24, x19, x12 + adcs x4, x22, x4 + mul x22, x19, x13 + adcs x5, x28, x5 + umulh x27, x19, x14 + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x28, x29, xzr + adds x22, x27, x22 + umulh x27, x19, x13 + adcs x24, x27, x24 + umulh x27, x19, x12 + adcs x25, x27, x25 + umulh x27, x19, x11 + adcs x26, x27, x26 + umulh x27, x19, x10 + adcs x23, x27, x23 + umulh x27, x19, x9 + adcs x21, x27, x21 + umulh x27, x19, x8 + mul x19, x19, x14 + adcs x27, x27, xzr + cmn x19, x16 + adcs x16, x22, x1 + adcs x1, x24, x20 + mul x19, x16, x15 + adcs x6, x25, x6 + mul x20, x19, x8 + mul x22, x19, x9 + mul x24, x19, x10 + adcs x7, x26, x7 + mul x25, x19, x11 + adcs x4, x23, x4 + mul x23, x19, x12 + adcs x5, x21, x5 + mul x21, x19, x13 + adcs x2, x27, x2 + umulh x26, x19, x14 + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x27, x28, xzr + adds x21, x26, x21 + umulh x26, x19, x13 + adcs x23, x26, x23 + umulh x26, x19, x12 + adcs x25, x26, x25 + umulh x26, x19, x11 + adcs x24, x26, x24 + umulh x26, x19, x10 + adcs x22, x26, x22 + umulh x26, x19, x9 + adcs x20, x26, x20 + umulh x26, x19, x8 + mul x19, x19, x14 + adcs x26, x26, xzr + cmn x19, x16 + adcs x16, x21, x1 + adcs x1, x23, x6 + mul x6, x16, x15 + adcs x7, x25, x7 + mul x19, x6, x8 + mul x21, x6, x9 + mul x23, x6, x10 + adcs x4, x24, x4 + mul x24, x6, x11 + adcs x5, x22, x5 + mul x22, x6, x12 + adcs x2, x20, x2 + mul x20, x6, x13 + adcs x3, x26, x3 + umulh x25, x6, x14 + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x26, x27, xzr + adds x20, x25, x20 + umulh x25, x6, x13 + adcs x22, x25, x22 + umulh x25, x6, x12 + adcs x24, x25, x24 + umulh x25, x6, x11 + adcs x23, x25, x23 + umulh x25, x6, x10 + adcs x21, x25, x21 + umulh x25, x6, x9 + adcs x19, x25, x19 + umulh x25, x6, x8 + mul x6, x6, x14 + adcs x25, x25, xzr + cmn x6, x16 + adcs x16, x20, x1 + adcs x1, x22, x7 + mul x15, x16, x15 + adcs x4, x24, x4 + mul x6, x15, x8 + mul x7, x15, x9 + mul x20, x15, x10 + adcs x5, x23, x5 + mul x22, x15, x11 + adcs x2, x21, x2 + mul x21, x15, x12 + adcs x3, x19, x3 + mul x19, x15, x13 + adcs x17, x25, x17 + umulh x23, x15, x14 + adcs x18, x18, xzr + adcs x24, x26, xzr + adds x19, x23, x19 + umulh x23, x15, x13 + adcs x21, x23, x21 + umulh x23, x15, x12 + adcs x22, x23, x22 + umulh x23, x15, x11 + adcs x20, x23, x20 + umulh x23, x15, x10 + adcs x7, x23, x7 + umulh x23, x15, x9 + adcs x6, x23, x6 + umulh x23, x15, x8 + mul x15, x15, x14 + adcs x23, x23, xzr + cmn x15, x16 + adcs x15, x19, x1 + adcs x16, x21, x4 + adcs x1, x22, x5 + adcs x2, x20, x2 + adcs x3, x7, x3 + adcs x17, x6, x17 + adcs x18, x23, x18 + adcs x4, x24, xzr + subs x14, x15, x14 + sbcs x13, x16, x13 + sbcs x12, x1, x12 + sbcs x11, x2, x11 + sbcs x10, x3, x10 + sbcs x9, x17, x9 + sbcs x8, x18, x8 + sbcs x4, x4, xzr + tst x4, #0x1 + csel x14, x15, x14, ne + csel x13, x16, x13, ne + csel x12, x1, x12, ne + csel x11, x2, x11, ne + csel x10, x3, x10, ne + csel x9, x17, x9, ne + csel x8, x18, x8, ne + stp x14, x13, [x0] + stp x12, x11, [x0, #16] + stp x10, x9, [x0, #32] + str x8, [x0, #48] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end100: + .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L + + .globl mcl_fp_addPre7L + .align 2 + .type mcl_fp_addPre7L,@function +mcl_fp_addPre7L: // @mcl_fp_addPre7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x3, [x1, #16] + ldp x1, x12, [x1, #24] + adds x16, x16, x18 + str x16, [x0] + adcs x16, x17, x2 + adcs x14, x14, x3 + stp x16, x14, [x0, #8] + adcs x14, x15, x1 + adcs x10, x10, x12 + stp x14, x10, [x0, #24] + adcs x10, x11, x13 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end101: + .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L + + .globl mcl_fp_subPre7L + .align 2 + .type mcl_fp_subPre7L,@function +mcl_fp_subPre7L: // @mcl_fp_subPre7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x3, [x1, #16] + ldp x1, x12, [x1, #24] + subs x16, x18, x16 + str x16, [x0] + sbcs x16, x2, x17 + sbcs x14, x3, x14 + stp x16, x14, [x0, #8] + sbcs x14, x1, x15 + sbcs x10, x12, x10 + stp x14, x10, [x0, #24] + sbcs x10, x13, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end102: + .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L + + .globl mcl_fp_shr1_7L + .align 2 + .type mcl_fp_shr1_7L,@function +mcl_fp_shr1_7L: // @mcl_fp_shr1_7L +// BB#0: + ldp x8, x9, [x1] + ldp x14, x10, [x1, #40] + ldp x11, x12, [x1, #16] + ldr x13, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x11, x9, #1 + extr x11, x12, x11, #1 + extr x12, x13, x12, #1 + extr x13, x14, x13, #1 + extr x14, x10, x14, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + stp x11, x12, [x0, #16] + stp x13, x14, [x0, #32] + str x10, [x0, #48] + ret +.Lfunc_end103: + .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L + + .globl mcl_fp_add7L + .align 2 + .type mcl_fp_add7L,@function +mcl_fp_add7L: // @mcl_fp_add7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x12, [x1, #24] + adds x16, x16, x18 + ldp x5, x18, [x3, #40] + adcs x17, x17, x2 + adcs x2, x14, x4 + ldr x4, [x3, #32] + adcs x15, x15, x1 + adcs x10, x10, x12 + ldp x12, x1, [x3] + stp x16, x17, [x0] + stp x2, x15, [x0, #16] + adcs x6, x11, x13 + stp x10, x6, [x0, #32] + adcs x8, x8, x9 + str x8, [x0, #48] + adcs x7, xzr, xzr + ldp x9, x11, [x3, #16] + subs x14, x16, x12 + sbcs x13, x17, x1 + sbcs x12, x2, x9 + sbcs x11, x15, x11 + sbcs x10, x10, x4 + sbcs x9, x6, x5 + sbcs x8, x8, x18 + sbcs x15, x7, xzr + and w15, w15, #0x1 + tbnz w15, #0, .LBB104_2 +// BB#1: // %nocarry + stp x14, x13, [x0] + stp x12, x11, [x0, #16] + stp x10, x9, [x0, #32] + str x8, [x0, #48] +.LBB104_2: // %carry + ret +.Lfunc_end104: + .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L + + .globl mcl_fp_addNF7L + .align 2 + .type mcl_fp_addNF7L,@function +mcl_fp_addNF7L: // @mcl_fp_addNF7L +// BB#0: + ldp x11, x8, [x1, #40] + ldp x13, x9, [x2, #40] + ldp x15, x10, [x1, #24] + ldp x17, x14, [x1, #8] + ldr x16, [x1] + ldp x18, x1, [x2] + ldr x4, [x2, #16] + ldp x2, x12, [x2, #24] + adds x16, x18, x16 + adcs x17, x1, x17 + adcs x14, x4, x14 + ldp x4, x18, [x3, #40] + adcs x15, x2, x15 + adcs x10, x12, x10 + ldp x12, x2, [x3] + adcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x1, [x3, #24] + adcs x8, x9, x8 + subs x9, x16, x12 + sbcs x12, x17, x2 + sbcs x13, x14, x13 + sbcs x2, x15, x3 + sbcs x1, x10, x1 + sbcs x3, x11, x4 + sbcs x18, x8, x18 + asr x4, x18, #63 + cmp x4, #0 // =0 + csel x9, x16, x9, lt + csel x12, x17, x12, lt + csel x13, x14, x13, lt + csel x14, x15, x2, lt + csel x10, x10, x1, lt + csel x11, x11, x3, lt + csel x8, x8, x18, lt + stp x9, x12, [x0] + stp x13, x14, [x0, #16] + stp x10, x11, [x0, #32] + str x8, [x0, #48] + ret +.Lfunc_end105: + .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L + + .globl mcl_fp_sub7L + .align 2 + .type mcl_fp_sub7L,@function +mcl_fp_sub7L: // @mcl_fp_sub7L +// BB#0: + ldp x13, x14, [x2, #40] + ldp x17, x15, [x1, #40] + ldp x11, x12, [x2, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x16, [x1, #24] + subs x8, x18, x8 + sbcs x9, x2, x9 + stp x8, x9, [x0] + sbcs x10, x4, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x16, x12 + sbcs x13, x17, x13 + stp x12, x13, [x0, #32] + sbcs x14, x15, x14 + str x14, [x0, #48] + ngcs x15, xzr + and w15, w15, #0x1 + tbnz w15, #0, .LBB106_2 +// BB#1: // %nocarry + ret +.LBB106_2: // %carry + ldp x16, x17, [x3] + ldp x18, x1, [x3, #16] + ldr x2, [x3, #32] + ldp x3, x15, [x3, #40] + adds x8, x16, x8 + adcs x9, x17, x9 + adcs x10, x18, x10 + adcs x11, x1, x11 + adcs x12, x2, x12 + adcs x13, x3, x13 + adcs x14, x15, x14 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + str x14, [x0, #48] + ret +.Lfunc_end106: + .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L + + .globl mcl_fp_subNF7L + .align 2 + .type mcl_fp_subNF7L,@function +mcl_fp_subNF7L: // @mcl_fp_subNF7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x12, [x1, #24] + subs x16, x18, x16 + sbcs x17, x2, x17 + sbcs x14, x4, x14 + ldp x4, x18, [x3, #40] + sbcs x15, x1, x15 + sbcs x10, x12, x10 + ldp x12, x1, [x3] + sbcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x2, [x3, #24] + sbcs x8, x9, x8 + asr x9, x8, #63 + and x1, x9, x1 + and x13, x9, x13 + and x3, x9, x3 + and x2, x9, x2 + and x4, x9, x4 + and x18, x9, x18 + extr x9, x9, x8, #63 + and x9, x9, x12 + adds x9, x9, x16 + str x9, [x0] + adcs x9, x1, x17 + str x9, [x0, #8] + adcs x9, x13, x14 + str x9, [x0, #16] + adcs x9, x3, x15 + str x9, [x0, #24] + adcs x9, x2, x10 + str x9, [x0, #32] + adcs x9, x4, x11 + adcs x8, x18, x8 + stp x9, x8, [x0, #40] + ret +.Lfunc_end107: + .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L + + .globl mcl_fpDbl_add7L + .align 2 + .type mcl_fpDbl_add7L,@function +mcl_fpDbl_add7L: // @mcl_fpDbl_add7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldp x8, x9, [x2, #96] + ldp x10, x11, [x1, #96] + ldp x12, x13, [x2, #80] + ldp x14, x15, [x1, #80] + ldp x16, x17, [x2, #64] + ldp x18, x4, [x1, #64] + ldp x5, x6, [x2, #48] + ldp x7, x19, [x1, #48] + ldp x20, x21, [x2, #32] + ldp x22, x23, [x1, #32] + ldp x24, x25, [x2, #16] + ldp x27, x2, [x2] + ldp x28, x29, [x1, #16] + ldp x26, x1, [x1] + adds x26, x27, x26 + ldr x27, [x3, #48] + str x26, [x0] + adcs x1, x2, x1 + ldp x2, x26, [x3, #32] + str x1, [x0, #8] + adcs x1, x24, x28 + ldp x24, x28, [x3, #16] + str x1, [x0, #16] + ldp x1, x3, [x3] + adcs x25, x25, x29 + adcs x20, x20, x22 + stp x25, x20, [x0, #24] + adcs x20, x21, x23 + adcs x5, x5, x7 + stp x20, x5, [x0, #40] + adcs x5, x6, x19 + adcs x16, x16, x18 + adcs x17, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x5, x1 + sbcs x14, x16, x3 + sbcs x15, x17, x24 + sbcs x18, x12, x28 + sbcs x1, x13, x2 + sbcs x2, x8, x26 + sbcs x3, x9, x27 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x5, x11, ne + csel x11, x16, x14, ne + csel x14, x17, x15, ne + csel x12, x12, x18, ne + csel x13, x13, x1, ne + csel x8, x8, x2, ne + csel x9, x9, x3, ne + stp x10, x11, [x0, #56] + stp x14, x12, [x0, #72] + stp x13, x8, [x0, #88] + str x9, [x0, #104] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end108: + .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L + + .globl mcl_fpDbl_sub7L + .align 2 + .type mcl_fpDbl_sub7L,@function +mcl_fpDbl_sub7L: // @mcl_fpDbl_sub7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldp x9, x8, [x2, #96] + ldp x11, x10, [x1, #96] + ldp x12, x13, [x2, #80] + ldp x14, x15, [x1, #80] + ldp x16, x17, [x2, #64] + ldp x18, x4, [x1, #64] + ldp x5, x6, [x2, #48] + ldp x7, x19, [x1, #48] + ldp x20, x21, [x2, #32] + ldp x22, x23, [x1, #32] + ldp x24, x25, [x2, #16] + ldp x26, x2, [x2] + ldp x28, x29, [x1, #16] + ldp x27, x1, [x1] + subs x26, x27, x26 + ldr x27, [x3, #48] + str x26, [x0] + sbcs x1, x1, x2 + ldp x2, x26, [x3, #32] + str x1, [x0, #8] + sbcs x1, x28, x24 + ldp x24, x28, [x3, #16] + str x1, [x0, #16] + ldp x1, x3, [x3] + sbcs x25, x29, x25 + sbcs x20, x22, x20 + stp x25, x20, [x0, #24] + sbcs x20, x23, x21 + sbcs x5, x7, x5 + stp x20, x5, [x0, #40] + sbcs x5, x19, x6 + sbcs x16, x18, x16 + sbcs x17, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x9, x11, x9 + sbcs x8, x10, x8 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x27, xzr, ne + csel x11, x26, xzr, ne + csel x14, x2, xzr, ne + csel x15, x28, xzr, ne + csel x18, x24, xzr, ne + csel x2, x3, xzr, ne + csel x1, x1, xzr, ne + adds x1, x1, x5 + adcs x16, x2, x16 + stp x1, x16, [x0, #56] + adcs x16, x18, x17 + adcs x12, x15, x12 + stp x16, x12, [x0, #72] + adcs x12, x14, x13 + adcs x9, x11, x9 + stp x12, x9, [x0, #88] + adcs x8, x10, x8 + str x8, [x0, #104] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end109: + .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L + + .align 2 + .type .LmulPv512x64,@function +.LmulPv512x64: // @mulPv512x64 +// BB#0: + ldr x9, [x0] + mul x10, x9, x1 + str x10, [x8] + ldr x10, [x0, #8] + umulh x9, x9, x1 + mul x11, x10, x1 + adds x9, x9, x11 + str x9, [x8, #8] + ldr x9, [x0, #16] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #16] + ldr x10, [x0, #24] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #24] + ldr x9, [x0, #32] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #32] + ldr x10, [x0, #40] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #40] + ldr x9, [x0, #48] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #48] + ldr x10, [x0, #56] + umulh x9, x9, x1 + mul x11, x10, x1 + umulh x10, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #56] + adcs x9, x10, xzr + str x9, [x8, #64] + ret +.Lfunc_end110: + .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 + + .globl mcl_fp_mulUnitPre8L + .align 2 + .type mcl_fp_mulUnitPre8L,@function +mcl_fp_mulUnitPre8L: // @mcl_fp_mulUnitPre8L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #80 // =80 + mov x19, x0 + mov x8, sp + mov x0, x1 + mov x1, x2 + bl .LmulPv512x64 + ldp x9, x8, [sp, #56] + ldp x11, x10, [sp, #40] + ldp x16, x12, [sp, #24] + ldp x13, x14, [sp] + ldr x15, [sp, #16] + stp x13, x14, [x19] + stp x15, x16, [x19, #16] + stp x12, x11, [x19, #32] + stp x10, x9, [x19, #48] + str x8, [x19, #64] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end111: + .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L + + .globl mcl_fpDbl_mulPre8L + .align 2 + .type mcl_fpDbl_mulPre8L,@function +mcl_fpDbl_mulPre8L: // @mcl_fpDbl_mulPre8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #144 // =144 + mov x20, x2 + mov x21, x1 + mov x19, x0 + bl mcl_fpDbl_mulPre4L + add x0, x19, #64 // =64 + add x1, x21, #32 // =32 + add x2, x20, #32 // =32 + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x20, #48] + ldp x10, x11, [x20, #32] + ldp x12, x13, [x20] + ldp x14, x15, [x20, #16] + adds x18, x12, x10 + str x18, [sp, #8] // 8-byte Folded Spill + ldp x10, x12, [x21, #16] + ldp x16, x17, [x21, #48] + adcs x22, x13, x11 + ldp x11, x13, [x21] + adcs x23, x14, x8 + ldp x8, x14, [x21, #32] + stp x18, x22, [sp, #16] + adcs x21, x15, x9 + stp x23, x21, [sp, #32] + adcs x24, xzr, xzr + adds x25, x11, x8 + adcs x26, x13, x14 + stp x25, x26, [sp, #48] + adcs x27, x10, x16 + adcs x28, x12, x17 + stp x27, x28, [sp, #64] + adcs x20, xzr, xzr + add x0, sp, #80 // =80 + add x1, sp, #48 // =48 + add x2, sp, #16 // =16 + bl mcl_fpDbl_mulPre4L + cmp x24, #0 // =0 + csel x8, x28, xzr, ne + and x9, x24, x20 + ldp x11, x10, [sp, #128] + ldp x13, x12, [sp, #112] + ldp x14, x15, [x19, #48] + ldp x16, x17, [x19, #32] + ldp x18, x0, [x19, #16] + csel x1, x27, xzr, ne + csel x2, x26, xzr, ne + csel x3, x25, xzr, ne + cmp x20, #0 // =0 + ldp x4, x5, [x19] + csel x6, x21, xzr, ne + csel x7, x23, xzr, ne + csel x20, x22, xzr, ne + ldr x21, [sp, #8] // 8-byte Folded Reload + csel x21, x21, xzr, ne + adds x3, x21, x3 + adcs x2, x20, x2 + ldp x20, x21, [sp, #96] + adcs x1, x7, x1 + adcs x8, x6, x8 + adcs x6, xzr, xzr + adds x13, x3, x13 + ldp x3, x7, [sp, #80] + adcs x12, x2, x12 + adcs x11, x1, x11 + ldp x1, x2, [x19, #112] + adcs x8, x8, x10 + adcs x9, x6, x9 + ldp x10, x6, [x19, #96] + subs x3, x3, x4 + sbcs x4, x7, x5 + ldp x5, x7, [x19, #80] + sbcs x18, x20, x18 + sbcs x0, x21, x0 + ldp x20, x21, [x19, #64] + sbcs x13, x13, x16 + sbcs x12, x12, x17 + sbcs x11, x11, x14 + sbcs x8, x8, x15 + sbcs x9, x9, xzr + subs x3, x3, x20 + sbcs x4, x4, x21 + sbcs x18, x18, x5 + sbcs x0, x0, x7 + sbcs x13, x13, x10 + sbcs x12, x12, x6 + sbcs x11, x11, x1 + sbcs x8, x8, x2 + sbcs x9, x9, xzr + adds x16, x16, x3 + str x16, [x19, #32] + adcs x16, x17, x4 + adcs x14, x14, x18 + stp x16, x14, [x19, #40] + adcs x14, x15, x0 + adcs x13, x20, x13 + stp x14, x13, [x19, #56] + adcs x12, x21, x12 + adcs x11, x5, x11 + stp x12, x11, [x19, #72] + adcs x8, x7, x8 + str x8, [x19, #88] + adcs x8, x10, x9 + str x8, [x19, #96] + adcs x8, x6, xzr + str x8, [x19, #104] + adcs x8, x1, xzr + str x8, [x19, #112] + adcs x8, x2, xzr + str x8, [x19, #120] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end112: + .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L + + .globl mcl_fpDbl_sqrPre8L + .align 2 + .type mcl_fpDbl_sqrPre8L,@function +mcl_fpDbl_sqrPre8L: // @mcl_fpDbl_sqrPre8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #128 // =128 + mov x20, x1 + mov x19, x0 + mov x2, x20 + bl mcl_fpDbl_mulPre4L + add x0, x19, #64 // =64 + add x1, x20, #32 // =32 + mov x2, x1 + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x20, #16] + ldp x10, x11, [x20, #32] + ldp x12, x13, [x20] + ldp x14, x15, [x20, #48] + adds x22, x12, x10 + adcs x23, x13, x11 + adcs x20, x8, x14 + adcs x21, x9, x15 + stp x22, x23, [sp, #32] + stp x22, x23, [sp] + stp x20, x21, [sp, #48] + stp x20, x21, [sp, #16] + adcs x24, xzr, xzr + add x0, sp, #64 // =64 + add x1, sp, #32 // =32 + mov x2, sp + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x19, #48] + ldp x10, x11, [x19] + ldp x12, x13, [sp, #64] + ldp x14, x15, [x19, #16] + ldp x16, x17, [sp, #80] + ldp x18, x0, [x19, #32] + subs x10, x12, x10 + ldp x1, x12, [sp, #96] + sbcs x11, x13, x11 + sbcs x14, x16, x14 + ldp x13, x16, [sp, #112] + sbcs x15, x17, x15 + sbcs x17, x1, x18 + ldp x1, x2, [x19, #64] + ldp x3, x4, [x19, #80] + ldp x5, x6, [x19, #96] + ldp x7, x25, [x19, #112] + lsr x26, x21, #63 + sbcs x12, x12, x0 + sbcs x13, x13, x8 + sbcs x16, x16, x9 + sbcs x27, x24, xzr + subs x10, x10, x1 + sbcs x11, x11, x2 + sbcs x14, x14, x3 + sbcs x15, x15, x4 + sbcs x17, x17, x5 + sbcs x12, x12, x6 + sbcs x13, x13, x7 + sbcs x16, x16, x25 + sbcs x27, x27, xzr + adds x22, x22, x22 + adcs x23, x23, x23 + adcs x20, x20, x20 + adcs x21, x21, x21 + cmp x24, #0 // =0 + csel x24, x26, xzr, ne + csel x21, x21, xzr, ne + csel x20, x20, xzr, ne + csel x23, x23, xzr, ne + csel x22, x22, xzr, ne + adds x17, x17, x22 + adcs x12, x12, x23 + adcs x13, x13, x20 + adcs x16, x16, x21 + adcs x20, x27, x24 + adds x10, x10, x18 + str x10, [x19, #32] + adcs x10, x11, x0 + adcs x8, x14, x8 + stp x10, x8, [x19, #40] + adcs x8, x15, x9 + str x8, [x19, #56] + adcs x8, x17, x1 + str x8, [x19, #64] + adcs x8, x12, x2 + str x8, [x19, #72] + adcs x8, x13, x3 + str x8, [x19, #80] + adcs x8, x16, x4 + str x8, [x19, #88] + adcs x8, x20, x5 + str x8, [x19, #96] + adcs x8, x6, xzr + str x8, [x19, #104] + adcs x8, x7, xzr + str x8, [x19, #112] + adcs x8, x25, xzr + str x8, [x19, #120] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L + + .globl mcl_fp_mont8L + .align 2 + .type mcl_fp_mont8L,@function +mcl_fp_mont8L: // @mcl_fp_mont8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1424 // =1424 + mov x20, x3 + mov x26, x2 + str x26, [sp, #120] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #136] // 8-byte Folded Spill + ldr x9, [x26] + mov x27, x1 + str x27, [sp, #128] // 8-byte Folded Spill + str x0, [sp, #112] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x27 + mov x1, x9 + bl .LmulPv512x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-96] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x19, x28, [x29, #-208] + ldp x21, x23, [x29, #-224] + ldp x25, x22, [x29, #-240] + ldr x1, [x26, #8] + add x8, sp, #1184 // =1184 + mov x0, x27 + bl .LmulPv512x64 + cmn x25, x24 + ldr x8, [sp, #1248] + ldr x9, [sp, #1240] + ldp x10, x12, [sp, #48] + adcs x10, x22, x10 + ldr x11, [sp, #1232] + adcs x12, x21, x12 + ldr x13, [sp, #1224] + ldp x14, x16, [sp, #64] + adcs x14, x23, x14 + ldr x15, [sp, #1216] + adcs x16, x19, x16 + ldr x17, [sp, #1208] + ldp x18, x1, [sp, #80] + adcs x18, x28, x18 + ldr x0, [sp, #1200] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1184] + ldp x3, x5, [sp, #96] + adcs x3, x4, x3 + ldr x4, [sp, #1192] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + adcs x6, xzr, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1168] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1144] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x25, [sp, #1136] + ldr x26, [sp, #1128] + ldr x27, [sp, #1120] + ldr x21, [sp, #1112] + ldr x28, [sp, #1104] + ldp x24, x23, [sp, #120] + ldr x1, [x24, #16] + add x8, sp, #1024 // =1024 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #1088] + ldr x9, [sp, #1080] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #1072] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #1064] + adcs x14, x14, x26 + ldr x15, [sp, #1056] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #1048] + ldp x0, x2, [sp, #8] + adcs x18, x18, x0 + ldr x0, [sp, #1040] + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x2 + ldr x2, [sp, #1024] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldr x4, [sp, #1032] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x22 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1008] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #984] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x25, [sp, #976] + ldr x26, [sp, #968] + ldr x27, [sp, #960] + ldr x21, [sp, #952] + ldr x28, [sp, #944] + mov x22, x24 + ldr x1, [x22, #24] + add x8, sp, #864 // =864 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #928] + ldr x9, [sp, #920] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #912] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #904] + adcs x14, x14, x26 + ldr x15, [sp, #896] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #888] + ldp x0, x2, [sp, #8] + adcs x18, x18, x0 + ldr x0, [sp, #880] + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x2 + ldr x2, [sp, #864] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldr x4, [sp, #872] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x23, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x23 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #848] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x24, [sp, #824] + ldr x25, [sp, #816] + ldr x26, [sp, #808] + ldr x27, [sp, #800] + ldr x21, [sp, #792] + ldr x28, [sp, #784] + ldr x1, [x22, #32] + add x8, sp, #704 // =704 + ldr x22, [sp, #128] // 8-byte Folded Reload + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #768] + ldr x9, [sp, #760] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #752] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #744] + adcs x14, x14, x26 + ldr x15, [sp, #736] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #728] + adcs x18, x18, x24 + ldr x0, [sp, #720] + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #704] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #712] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x23 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #688] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x24, [sp, #664] + ldr x25, [sp, #656] + ldr x26, [sp, #648] + ldr x27, [sp, #640] + ldr x21, [sp, #632] + ldr x28, [sp, #624] + ldr x23, [sp, #120] // 8-byte Folded Reload + ldr x1, [x23, #40] + add x8, sp, #544 // =544 + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #608] + ldr x9, [sp, #600] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #592] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #584] + adcs x14, x14, x26 + ldr x15, [sp, #576] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #568] + adcs x18, x18, x24 + ldr x0, [sp, #560] + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #544] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #552] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #528] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [sp, #496] + ldp x27, x26, [sp, #480] + ldp x28, x21, [sp, #464] + ldr x1, [x23, #48] + add x8, sp, #384 // =384 + ldr x23, [sp, #128] // 8-byte Folded Reload + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldp x9, x8, [sp, #440] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldp x13, x11, [sp, #424] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + adcs x14, x14, x26 + ldp x17, x15, [sp, #408] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #384] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldp x4, x0, [sp, #392] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #368] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x22, x8, [sp, #352] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x25, x24, [sp, #336] + ldp x27, x26, [sp, #320] + ldp x28, x21, [sp, #304] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #224 // =224 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldp x9, x8, [sp, #280] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldp x13, x11, [sp, #264] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + adcs x14, x14, x26 + ldp x17, x15, [sp, #248] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x22 + ldr x2, [sp, #224] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldp x4, x0, [sp, #232] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x21, x12, x4 + adcs x22, x14, x0 + adcs x23, x16, x17 + adcs x24, x18, x15 + adcs x25, x1, x13 + adcs x10, x3, x11 + str x10, [sp, #128] // 8-byte Folded Spill + adcs x27, x5, x9 + adcs x28, x6, x8 + adcs x26, xzr, xzr + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv512x64 + ldp x15, x8, [sp, #200] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldr x14, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + ldp x16, x17, [x20, #48] + ldp x18, x0, [x20, #32] + ldp x1, x2, [x20, #16] + ldp x3, x4, [x20] + ldr x5, [sp, #128] // 8-byte Folded Reload + adcs x14, x5, x14 + adcs x15, x27, x15 + adcs x8, x28, x8 + adcs x5, x26, xzr + subs x3, x10, x3 + sbcs x4, x11, x4 + sbcs x1, x12, x1 + sbcs x2, x13, x2 + sbcs x18, x9, x18 + sbcs x0, x14, x0 + sbcs x16, x15, x16 + sbcs x17, x8, x17 + sbcs x5, x5, xzr + tst x5, #0x1 + csel x10, x10, x3, ne + csel x11, x11, x4, ne + csel x12, x12, x1, ne + csel x13, x13, x2, ne + csel x9, x9, x18, ne + csel x14, x14, x0, ne + csel x15, x15, x16, ne + csel x8, x8, x17, ne + ldr x16, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x16] + stp x12, x13, [x16, #16] + stp x9, x14, [x16, #32] + stp x15, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end114: + .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L + + .globl mcl_fp_montNF8L + .align 2 + .type mcl_fp_montNF8L,@function +mcl_fp_montNF8L: // @mcl_fp_montNF8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1424 // =1424 + mov x20, x3 + mov x26, x2 + str x26, [sp, #128] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #136] // 8-byte Folded Spill + ldr x9, [x26] + mov x27, x1 + stp x0, x27, [sp, #112] + sub x8, x29, #160 // =160 + mov x0, x27 + mov x1, x9 + bl .LmulPv512x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-96] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x19, x28, [x29, #-208] + ldp x21, x23, [x29, #-224] + ldp x25, x22, [x29, #-240] + ldr x1, [x26, #8] + add x8, sp, #1184 // =1184 + mov x0, x27 + bl .LmulPv512x64 + cmn x25, x24 + ldr x8, [sp, #1248] + ldr x9, [sp, #1240] + ldp x10, x12, [sp, #48] + adcs x10, x22, x10 + ldr x11, [sp, #1232] + adcs x12, x21, x12 + ldr x13, [sp, #1224] + ldp x14, x16, [sp, #64] + adcs x14, x23, x14 + ldr x15, [sp, #1216] + adcs x16, x19, x16 + ldr x17, [sp, #1208] + ldp x18, x1, [sp, #80] + adcs x18, x28, x18 + ldr x0, [sp, #1192] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1184] + ldp x3, x5, [sp, #96] + adcs x3, x4, x3 + ldr x4, [sp, #1200] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x27, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x27 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1168] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1144] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #1136] + ldr x24, [sp, #1128] + ldr x25, [sp, #1120] + ldr x21, [sp, #1112] + ldr x26, [sp, #1104] + ldp x22, x28, [sp, #120] + ldr x1, [x28, #16] + add x8, sp, #1024 // =1024 + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #1088] + ldr x9, [sp, #1080] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #1072] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #1064] + adcs x14, x14, x24 + ldr x15, [sp, #1056] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #1048] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #1032] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #1024] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #1040] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x27 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1008] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #984] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #976] + ldr x24, [sp, #968] + ldr x25, [sp, #960] + ldr x21, [sp, #952] + ldr x26, [sp, #944] + ldr x1, [x28, #24] + add x8, sp, #864 // =864 + mov x27, x22 + mov x0, x27 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #928] + ldr x9, [sp, #920] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #912] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #904] + adcs x14, x14, x24 + ldr x15, [sp, #896] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #888] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #872] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #864] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #880] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x28, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x28 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #848] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #824] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #816] + ldr x24, [sp, #808] + ldr x25, [sp, #800] + ldr x21, [sp, #792] + ldr x26, [sp, #784] + ldr x22, [sp, #128] // 8-byte Folded Reload + ldr x1, [x22, #32] + add x8, sp, #704 // =704 + mov x0, x27 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #768] + ldr x9, [sp, #760] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #752] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #744] + adcs x14, x14, x24 + ldr x15, [sp, #736] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #728] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #712] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #704] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #720] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x28 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #688] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #664] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #656] + ldr x24, [sp, #648] + ldr x25, [sp, #640] + ldr x21, [sp, #632] + ldr x26, [sp, #624] + mov x27, x22 + ldr x1, [x27, #40] + add x8, sp, #544 // =544 + ldr x28, [sp, #120] // 8-byte Folded Reload + mov x0, x28 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #608] + ldr x9, [sp, #600] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #592] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #584] + adcs x14, x14, x24 + ldr x15, [sp, #576] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #568] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #552] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #544] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #560] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #528] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x23, x8, [sp, #496] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [sp, #480] + ldp x26, x21, [sp, #464] + ldr x1, [x27, #48] + add x8, sp, #384 // =384 + mov x0, x28 + bl .LmulPv512x64 + cmn x19, x26 + ldp x9, x8, [sp, #440] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldp x13, x11, [sp, #424] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + adcs x14, x14, x24 + ldp x17, x15, [sp, #408] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldp x2, x0, [sp, #384] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #400] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv512x64 + ldp x27, x8, [sp, #360] + str x8, [sp, #40] // 8-byte Folded Spill + ldp x22, x28, [sp, #344] + ldp x24, x23, [sp, #328] + ldp x21, x25, [sp, #312] + ldr x26, [sp, #304] + ldp x0, x8, [sp, #120] + ldr x1, [x8, #56] + add x8, sp, #224 // =224 + bl .LmulPv512x64 + cmn x19, x26 + ldp x9, x8, [sp, #280] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldp x13, x11, [sp, #264] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + adcs x14, x14, x24 + ldp x17, x15, [sp, #248] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + adcs x18, x18, x22 + ldp x2, x0, [sp, #224] + ldp x3, x1, [sp, #96] + adcs x1, x1, x28 + adcs x3, x3, x27 + ldr x4, [sp, #240] + ldr x5, [sp, #88] // 8-byte Folded Reload + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x21, x12, x0 + adcs x22, x14, x4 + adcs x23, x16, x17 + adcs x24, x18, x15 + adcs x25, x1, x13 + adcs x26, x3, x11 + adcs x27, x5, x9 + adcs x28, x8, xzr + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv512x64 + ldp x15, x8, [sp, #200] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldr x14, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + ldp x16, x17, [x20, #48] + ldp x18, x0, [x20, #32] + ldp x1, x2, [x20, #16] + ldp x3, x4, [x20] + adcs x14, x26, x14 + adcs x15, x27, x15 + adcs x8, x28, x8 + subs x3, x10, x3 + sbcs x4, x11, x4 + sbcs x1, x12, x1 + sbcs x2, x13, x2 + sbcs x18, x9, x18 + sbcs x0, x14, x0 + sbcs x16, x15, x16 + sbcs x17, x8, x17 + cmp x17, #0 // =0 + csel x10, x10, x3, lt + csel x11, x11, x4, lt + csel x12, x12, x1, lt + csel x13, x13, x2, lt + csel x9, x9, x18, lt + csel x14, x14, x0, lt + csel x15, x15, x16, lt + csel x8, x8, x17, lt + ldr x16, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x16] + stp x12, x13, [x16, #16] + stp x9, x14, [x16, #32] + stp x15, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end115: + .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L + + .globl mcl_fp_montRed8L + .align 2 + .type mcl_fp_montRed8L,@function +mcl_fp_montRed8L: // @mcl_fp_montRed8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #800 // =800 + mov x20, x2 + ldur x9, [x20, #-8] + str x9, [sp, #32] // 8-byte Folded Spill + ldr x8, [x20, #48] + str x8, [sp, #144] // 8-byte Folded Spill + ldr x8, [x20, #56] + str x8, [sp, #152] // 8-byte Folded Spill + ldr x8, [x20, #32] + str x8, [sp, #120] // 8-byte Folded Spill + ldr x8, [x20, #40] + str x8, [sp, #128] // 8-byte Folded Spill + ldr x8, [x20, #16] + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [x20, #24] + str x8, [sp, #112] // 8-byte Folded Spill + ldr x8, [x20] + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [x20, #8] + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [x1, #112] + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [x1, #120] + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [x1, #96] + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [x1, #104] + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [x1, #80] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [x1, #88] + str x8, [sp, #48] // 8-byte Folded Spill + ldp x28, x8, [x1, #64] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x22, x25, [x1, #48] + ldp x24, x19, [x1, #32] + ldp x27, x26, [x1, #16] + ldp x21, x23, [x1] + str x0, [sp, #136] // 8-byte Folded Spill + mul x1, x21, x9 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [x29, #-104] + ldp x11, x10, [x29, #-120] + ldp x16, x12, [x29, #-136] + ldp x13, x14, [x29, #-160] + ldur x15, [x29, #-144] + cmn x21, x13 + adcs x21, x23, x14 + adcs x13, x27, x15 + adcs x26, x26, x16 + adcs x24, x24, x12 + adcs x11, x19, x11 + stp x11, x13, [sp, #8] + adcs x22, x22, x10 + adcs x25, x25, x9 + adcs x27, x28, x8 + ldr x8, [sp, #24] // 8-byte Folded Reload + adcs x28, x8, xzr + ldp x19, x8, [sp, #32] + adcs x23, x8, xzr + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #48] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + adcs x8, xzr, xzr + str x8, [sp, #40] // 8-byte Folded Spill + mul x1, x21, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [x29, #-184] + ldp x11, x10, [x29, #-200] + ldp x16, x12, [x29, #-216] + ldp x13, x14, [x29, #-240] + ldur x15, [x29, #-224] + cmn x21, x13 + ldr x13, [sp, #16] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x26, x15 + str x13, [sp, #24] // 8-byte Folded Spill + adcs x24, x24, x16 + ldr x13, [sp, #8] // 8-byte Folded Reload + adcs x12, x13, x12 + str x12, [sp, #16] // 8-byte Folded Spill + adcs x22, x22, x11 + adcs x25, x25, x10 + adcs x27, x27, x9 + adcs x28, x28, x8 + adcs x23, x23, xzr + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x26, x8, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #40] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #560 // =560 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #624] + ldr x9, [sp, #616] + ldr x10, [sp, #608] + ldr x11, [sp, #600] + ldr x12, [sp, #592] + ldr x13, [sp, #560] + ldr x14, [sp, #568] + ldr x15, [sp, #576] + ldr x16, [sp, #584] + cmn x21, x13 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x24, x15 + str x13, [sp, #40] // 8-byte Folded Spill + ldr x13, [sp, #16] // 8-byte Folded Reload + adcs x13, x13, x16 + str x13, [sp, #24] // 8-byte Folded Spill + adcs x22, x22, x12 + adcs x25, x25, x11 + adcs x27, x27, x10 + adcs x28, x28, x9 + adcs x23, x23, x8 + adcs x26, x26, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x24, x8, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #480 // =480 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #544] + ldr x9, [sp, #536] + ldr x10, [sp, #528] + ldr x11, [sp, #520] + ldr x12, [sp, #512] + ldp x13, x14, [sp, #480] + ldp x15, x16, [sp, #496] + cmn x21, x13 + ldr x13, [sp, #40] // 8-byte Folded Reload + adcs x21, x13, x14 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x13, x13, x15 + adcs x22, x22, x16 + adcs x25, x25, x12 + adcs x27, x27, x11 + adcs x28, x28, x10 + adcs x23, x23, x9 + adcs x26, x26, x8 + adcs x24, x24, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + stp x13, x8, [sp, #48] + mul x1, x21, x19 + add x8, sp, #400 // =400 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #456] + ldp x11, x10, [sp, #440] + ldp x16, x12, [sp, #424] + ldp x13, x14, [sp, #400] + ldr x15, [sp, #416] + cmn x21, x13 + ldr x13, [sp, #48] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x22, x15 + str x13, [sp, #48] // 8-byte Folded Spill + adcs x25, x25, x16 + adcs x27, x27, x12 + adcs x28, x28, x11 + adcs x23, x23, x10 + adcs x26, x26, x9 + adcs x24, x24, x8 + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x22, x8, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #320 // =320 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #376] + ldp x11, x10, [sp, #360] + ldp x16, x12, [sp, #344] + ldp x13, x14, [sp, #320] + ldr x15, [sp, #336] + cmn x21, x13 + ldr x13, [sp, #48] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x25, x15 + adcs x27, x27, x16 + adcs x28, x28, x12 + adcs x23, x23, x11 + adcs x26, x26, x10 + adcs x24, x24, x9 + ldr x9, [sp, #64] // 8-byte Folded Reload + adcs x8, x9, x8 + stp x13, x8, [sp, #56] + adcs x22, x22, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x25, x8, xzr + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #296] + ldp x11, x10, [sp, #280] + ldp x16, x12, [sp, #264] + ldp x13, x14, [sp, #240] + ldr x15, [sp, #256] + cmn x21, x13 + ldr x13, [sp, #56] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x27, x15 + adcs x28, x28, x16 + adcs x23, x23, x12 + adcs x26, x26, x11 + adcs x24, x24, x10 + ldr x10, [sp, #64] // 8-byte Folded Reload + adcs x9, x10, x9 + stp x9, x13, [sp, #64] + adcs x22, x22, x8 + adcs x25, x25, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x27, x8, xzr + mul x1, x21, x19 + add x8, sp, #160 // =160 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #216] + ldp x11, x10, [sp, #200] + ldp x16, x12, [sp, #184] + ldp x13, x14, [sp, #160] + ldr x15, [sp, #176] + cmn x21, x13 + ldr x13, [sp, #72] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, x28, x15 + adcs x15, x23, x16 + adcs x12, x26, x12 + adcs x11, x24, x11 + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x10, x16, x10 + adcs x9, x22, x9 + adcs x8, x25, x8 + adcs x16, x27, xzr + ldp x17, x18, [sp, #88] + subs x17, x13, x17 + sbcs x18, x14, x18 + ldp x0, x1, [sp, #104] + sbcs x0, x15, x0 + sbcs x1, x12, x1 + ldp x2, x3, [sp, #120] + sbcs x2, x11, x2 + sbcs x3, x10, x3 + ldp x4, x5, [sp, #144] + sbcs x4, x9, x4 + sbcs x5, x8, x5 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x13, x13, x17, ne + csel x14, x14, x18, ne + csel x15, x15, x0, ne + csel x12, x12, x1, ne + csel x11, x11, x2, ne + csel x10, x10, x3, ne + csel x9, x9, x4, ne + csel x8, x8, x5, ne + ldr x16, [sp, #136] // 8-byte Folded Reload + stp x13, x14, [x16] + stp x15, x12, [x16, #16] + stp x11, x10, [x16, #32] + stp x9, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end116: + .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L + + .globl mcl_fp_addPre8L + .align 2 + .type mcl_fp_addPre8L,@function +mcl_fp_addPre8L: // @mcl_fp_addPre8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x3, x4, [x1] + ldp x5, x1, [x1, #16] + adds x18, x18, x3 + str x18, [x0] + adcs x18, x2, x4 + adcs x16, x16, x5 + stp x18, x16, [x0, #8] + adcs x16, x17, x1 + adcs x12, x12, x14 + stp x16, x12, [x0, #24] + adcs x12, x13, x15 + adcs x8, x8, x10 + stp x12, x8, [x0, #40] + adcs x9, x9, x11 + adcs x8, xzr, xzr + str x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end117: + .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L + + .globl mcl_fp_subPre8L + .align 2 + .type mcl_fp_subPre8L,@function +mcl_fp_subPre8L: // @mcl_fp_subPre8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x3, x4, [x1] + ldp x5, x1, [x1, #16] + subs x18, x3, x18 + str x18, [x0] + sbcs x18, x4, x2 + sbcs x16, x5, x16 + stp x18, x16, [x0, #8] + sbcs x16, x1, x17 + sbcs x12, x14, x12 + stp x16, x12, [x0, #24] + sbcs x12, x15, x13 + sbcs x8, x10, x8 + stp x12, x8, [x0, #40] + sbcs x9, x11, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end118: + .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L + + .globl mcl_fp_shr1_8L + .align 2 + .type mcl_fp_shr1_8L,@function +mcl_fp_shr1_8L: // @mcl_fp_shr1_8L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x1, #16] + ldp x14, x15, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x12, x9, #1 + extr x12, x13, x12, #1 + extr x13, x14, x13, #1 + extr x14, x15, x14, #1 + extr x15, x10, x15, #1 + extr x10, x11, x10, #1 + lsr x11, x11, #1 + stp x8, x9, [x0] + stp x12, x13, [x0, #16] + stp x14, x15, [x0, #32] + stp x10, x11, [x0, #48] + ret +.Lfunc_end119: + .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L + + .globl mcl_fp_add8L + .align 2 + .type mcl_fp_add8L,@function +mcl_fp_add8L: // @mcl_fp_add8L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x4, x5, [x1] + ldp x6, x1, [x1, #16] + adds x18, x18, x4 + adcs x2, x2, x5 + ldp x4, x5, [x3, #48] + adcs x16, x16, x6 + adcs x17, x17, x1 + ldp x1, x6, [x3, #32] + adcs x7, x12, x14 + adcs x19, x13, x15 + ldp x12, x13, [x3] + stp x18, x2, [x0] + stp x16, x17, [x0, #16] + stp x7, x19, [x0, #32] + adcs x8, x8, x10 + adcs x20, x9, x11 + stp x8, x20, [x0, #48] + adcs x21, xzr, xzr + ldp x9, x10, [x3, #16] + subs x15, x18, x12 + sbcs x14, x2, x13 + sbcs x13, x16, x9 + sbcs x12, x17, x10 + sbcs x11, x7, x1 + sbcs x10, x19, x6 + sbcs x9, x8, x4 + sbcs x8, x20, x5 + sbcs x16, x21, xzr + and w16, w16, #0x1 + tbnz w16, #0, .LBB120_2 +// BB#1: // %nocarry + stp x15, x14, [x0] + stp x13, x12, [x0, #16] + stp x11, x10, [x0, #32] + stp x9, x8, [x0, #48] +.LBB120_2: // %carry + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end120: + .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L + + .globl mcl_fp_addNF8L + .align 2 + .type mcl_fp_addNF8L,@function +mcl_fp_addNF8L: // @mcl_fp_addNF8L +// BB#0: + ldp x8, x9, [x1, #48] + ldp x10, x11, [x2, #48] + ldp x12, x13, [x1, #32] + ldp x14, x15, [x2, #32] + ldp x16, x17, [x1, #16] + ldp x18, x1, [x1] + ldp x4, x5, [x2] + ldp x6, x2, [x2, #16] + adds x18, x4, x18 + adcs x1, x5, x1 + ldp x4, x5, [x3, #48] + adcs x16, x6, x16 + adcs x17, x2, x17 + ldp x2, x6, [x3, #32] + adcs x12, x14, x12 + adcs x13, x15, x13 + ldp x14, x15, [x3] + adcs x8, x10, x8 + ldp x10, x3, [x3, #16] + adcs x9, x11, x9 + subs x11, x18, x14 + sbcs x14, x1, x15 + sbcs x10, x16, x10 + sbcs x15, x17, x3 + sbcs x2, x12, x2 + sbcs x3, x13, x6 + sbcs x4, x8, x4 + sbcs x5, x9, x5 + cmp x5, #0 // =0 + csel x11, x18, x11, lt + csel x14, x1, x14, lt + csel x10, x16, x10, lt + csel x15, x17, x15, lt + csel x12, x12, x2, lt + csel x13, x13, x3, lt + csel x8, x8, x4, lt + csel x9, x9, x5, lt + stp x11, x14, [x0] + stp x10, x15, [x0, #16] + stp x12, x13, [x0, #32] + stp x8, x9, [x0, #48] + ret +.Lfunc_end121: + .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L + + .globl mcl_fp_sub8L + .align 2 + .type mcl_fp_sub8L,@function +mcl_fp_sub8L: // @mcl_fp_sub8L +// BB#0: + ldp x14, x15, [x2, #48] + ldp x16, x17, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x2, x5, [x1] + ldp x6, x1, [x1, #16] + subs x8, x2, x8 + sbcs x9, x5, x9 + stp x8, x9, [x0] + sbcs x10, x6, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x18, x12 + sbcs x13, x4, x13 + stp x12, x13, [x0, #32] + sbcs x14, x16, x14 + sbcs x15, x17, x15 + stp x14, x15, [x0, #48] + ngcs x16, xzr + and w16, w16, #0x1 + tbnz w16, #0, .LBB122_2 +// BB#1: // %nocarry + ret +.LBB122_2: // %carry + ldp x16, x17, [x3, #48] + ldp x18, x1, [x3] + ldp x2, x4, [x3, #16] + ldp x5, x3, [x3, #32] + adds x8, x18, x8 + adcs x9, x1, x9 + adcs x10, x2, x10 + adcs x11, x4, x11 + adcs x12, x5, x12 + adcs x13, x3, x13 + adcs x14, x16, x14 + adcs x15, x17, x15 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + ret +.Lfunc_end122: + .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L + + .globl mcl_fp_subNF8L + .align 2 + .type mcl_fp_subNF8L,@function +mcl_fp_subNF8L: // @mcl_fp_subNF8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x4, x5, [x1] + ldp x6, x1, [x1, #16] + subs x18, x4, x18 + sbcs x2, x5, x2 + ldp x4, x5, [x3, #48] + sbcs x16, x6, x16 + sbcs x17, x1, x17 + ldp x1, x6, [x3, #32] + sbcs x12, x14, x12 + sbcs x13, x15, x13 + ldp x14, x15, [x3, #16] + sbcs x8, x10, x8 + ldp x10, x3, [x3] + sbcs x9, x11, x9 + asr x11, x9, #63 + and x10, x11, x10 + and x3, x11, x3 + and x14, x11, x14 + and x15, x11, x15 + and x1, x11, x1 + and x6, x11, x6 + and x4, x11, x4 + and x11, x11, x5 + adds x10, x10, x18 + str x10, [x0] + adcs x10, x3, x2 + str x10, [x0, #8] + adcs x10, x14, x16 + str x10, [x0, #16] + adcs x10, x15, x17 + str x10, [x0, #24] + adcs x10, x1, x12 + str x10, [x0, #32] + adcs x10, x6, x13 + adcs x8, x4, x8 + stp x10, x8, [x0, #40] + adcs x8, x11, x9 + str x8, [x0, #56] + ret +.Lfunc_end123: + .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L + + .globl mcl_fpDbl_add8L + .align 2 + .type mcl_fpDbl_add8L,@function +mcl_fpDbl_add8L: // @mcl_fpDbl_add8L +// BB#0: + ldp x8, x9, [x2, #112] + ldp x10, x11, [x1, #112] + ldp x12, x13, [x2, #96] + ldp x14, x15, [x1, #96] + ldp x16, x5, [x2] + ldp x17, x6, [x1] + ldp x18, x4, [x2, #80] + adds x16, x16, x17 + ldr x17, [x1, #16] + str x16, [x0] + adcs x16, x5, x6 + ldp x5, x6, [x2, #16] + str x16, [x0, #8] + adcs x17, x5, x17 + ldp x16, x5, [x1, #24] + str x17, [x0, #16] + adcs x16, x6, x16 + ldp x17, x6, [x2, #32] + str x16, [x0, #24] + adcs x17, x17, x5 + ldp x16, x5, [x1, #40] + str x17, [x0, #32] + adcs x16, x6, x16 + ldp x17, x6, [x2, #48] + str x16, [x0, #40] + ldr x16, [x1, #56] + adcs x17, x17, x5 + ldp x5, x2, [x2, #64] + str x17, [x0, #48] + adcs x16, x6, x16 + ldp x17, x6, [x1, #64] + str x16, [x0, #56] + ldp x16, x1, [x1, #80] + adcs x17, x5, x17 + adcs x2, x2, x6 + ldp x5, x6, [x3, #48] + adcs x16, x18, x16 + adcs x18, x4, x1 + ldp x1, x4, [x3, #32] + adcs x12, x12, x14 + adcs x13, x13, x15 + ldp x14, x15, [x3, #16] + adcs x8, x8, x10 + ldp x10, x3, [x3] + adcs x9, x9, x11 + adcs x11, xzr, xzr + subs x10, x17, x10 + sbcs x3, x2, x3 + sbcs x14, x16, x14 + sbcs x15, x18, x15 + sbcs x1, x12, x1 + sbcs x4, x13, x4 + sbcs x5, x8, x5 + sbcs x6, x9, x6 + sbcs x11, x11, xzr + tst x11, #0x1 + csel x10, x17, x10, ne + csel x11, x2, x3, ne + csel x14, x16, x14, ne + csel x15, x18, x15, ne + csel x12, x12, x1, ne + csel x13, x13, x4, ne + csel x8, x8, x5, ne + csel x9, x9, x6, ne + stp x10, x11, [x0, #64] + stp x14, x15, [x0, #80] + stp x12, x13, [x0, #96] + stp x8, x9, [x0, #112] + ret +.Lfunc_end124: + .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L + + .globl mcl_fpDbl_sub8L + .align 2 + .type mcl_fpDbl_sub8L,@function +mcl_fpDbl_sub8L: // @mcl_fpDbl_sub8L +// BB#0: + ldp x10, x8, [x2, #112] + ldp x11, x9, [x1, #112] + ldp x12, x13, [x2, #96] + ldp x14, x15, [x1, #96] + ldp x16, x5, [x1] + ldp x17, x4, [x2] + ldr x18, [x1, #80] + subs x16, x16, x17 + ldr x17, [x1, #16] + str x16, [x0] + sbcs x16, x5, x4 + ldp x4, x5, [x2, #16] + str x16, [x0, #8] + sbcs x17, x17, x4 + ldp x16, x4, [x1, #24] + str x17, [x0, #16] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #32] + str x16, [x0, #24] + sbcs x17, x4, x17 + ldp x16, x4, [x1, #40] + str x17, [x0, #32] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #48] + str x16, [x0, #40] + sbcs x17, x4, x17 + ldp x16, x4, [x1, #56] + str x17, [x0, #48] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #64] + str x16, [x0, #56] + ldr x16, [x1, #72] + sbcs x17, x4, x17 + ldp x4, x2, [x2, #80] + ldr x1, [x1, #88] + sbcs x16, x16, x5 + sbcs x18, x18, x4 + ldp x4, x5, [x3, #48] + sbcs x1, x1, x2 + sbcs x12, x14, x12 + ldp x14, x2, [x3, #32] + sbcs x13, x15, x13 + sbcs x10, x11, x10 + ldp x11, x15, [x3, #16] + sbcs x8, x9, x8 + ngcs x9, xzr + tst x9, #0x1 + ldp x9, x3, [x3] + csel x5, x5, xzr, ne + csel x4, x4, xzr, ne + csel x2, x2, xzr, ne + csel x14, x14, xzr, ne + csel x15, x15, xzr, ne + csel x11, x11, xzr, ne + csel x3, x3, xzr, ne + csel x9, x9, xzr, ne + adds x9, x9, x17 + str x9, [x0, #64] + adcs x9, x3, x16 + str x9, [x0, #72] + adcs x9, x11, x18 + str x9, [x0, #80] + adcs x9, x15, x1 + str x9, [x0, #88] + adcs x9, x14, x12 + str x9, [x0, #96] + adcs x9, x2, x13 + str x9, [x0, #104] + adcs x9, x4, x10 + adcs x8, x5, x8 + stp x9, x8, [x0, #112] + ret +.Lfunc_end125: + .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L + + .align 2 + .type .LmulPv576x64,@function +.LmulPv576x64: // @mulPv576x64 +// BB#0: + ldr x9, [x0] + mul x10, x9, x1 + str x10, [x8] + ldr x10, [x0, #8] + umulh x9, x9, x1 + mul x11, x10, x1 + adds x9, x9, x11 + str x9, [x8, #8] + ldr x9, [x0, #16] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #16] + ldr x10, [x0, #24] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #24] + ldr x9, [x0, #32] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #32] + ldr x10, [x0, #40] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #40] + ldr x9, [x0, #48] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #48] + ldr x10, [x0, #56] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #56] + ldr x9, [x0, #64] + umulh x10, x10, x1 + mul x11, x9, x1 + umulh x9, x9, x1 + adcs x10, x10, x11 + adcs x9, x9, xzr + stp x10, x9, [x8, #64] + ret +.Lfunc_end126: + .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 + + .globl mcl_fp_mulUnitPre9L + .align 2 + .type mcl_fp_mulUnitPre9L,@function +mcl_fp_mulUnitPre9L: // @mcl_fp_mulUnitPre9L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #80 // =80 + mov x19, x0 + mov x8, sp + mov x0, x1 + mov x1, x2 + bl .LmulPv576x64 + ldp x9, x8, [sp, #64] + ldp x11, x10, [sp, #48] + ldp x13, x12, [sp, #32] + ldp x14, x15, [sp] + ldp x16, x17, [sp, #16] + stp x14, x15, [x19] + stp x16, x17, [x19, #16] + stp x13, x12, [x19, #32] + stp x11, x10, [x19, #48] + stp x9, x8, [x19, #64] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end127: + .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L + + .globl mcl_fpDbl_mulPre9L + .align 2 + .type mcl_fpDbl_mulPre9L,@function +mcl_fpDbl_mulPre9L: // @mcl_fpDbl_mulPre9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #752 // =752 + mov x21, x2 + ldr x9, [x21] + mov x20, x1 + mov x19, x0 + sub x8, x29, #160 // =160 + mov x0, x20 + mov x1, x9 + bl .LmulPv576x64 + ldur x8, [x29, #-88] + str x8, [sp, #24] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [x29, #-112] + ldp x27, x26, [x29, #-128] + ldp x22, x28, [x29, #-144] + ldp x8, x23, [x29, #-160] + ldr x1, [x21, #8] + str x8, [x19] + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x16, [x29, #-240] + ldp x17, x15, [x29, #-224] + adds x14, x14, x23 + str x14, [x19, #8] + adcs x22, x16, x22 + adcs x23, x17, x28 + adcs x27, x15, x27 + adcs x26, x13, x26 + adcs x25, x12, x25 + adcs x24, x11, x24 + ldr x1, [x21, #16] + ldr x11, [sp, #16] // 8-byte Folded Reload + adcs x28, x10, x11 + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x9, x9, x10 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #512 // =512 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #584] + ldr x9, [sp, #576] + ldr x10, [sp, #568] + ldr x11, [sp, #560] + ldr x12, [sp, #552] + ldr x13, [sp, #544] + ldr x14, [sp, #512] + ldr x15, [sp, #536] + ldr x16, [sp, #520] + ldr x17, [sp, #528] + adds x14, x22, x14 + str x14, [x19, #16] + adcs x22, x23, x16 + adcs x23, x27, x17 + adcs x26, x26, x15 + adcs x25, x25, x13 + adcs x24, x24, x12 + adcs x27, x28, x11 + ldr x1, [x21, #24] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #432 // =432 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #496] + ldp x11, x10, [sp, #480] + ldp x13, x12, [sp, #464] + ldp x14, x16, [sp, #432] + ldp x17, x15, [sp, #448] + adds x14, x22, x14 + str x14, [x19, #24] + adcs x22, x23, x16 + adcs x23, x26, x17 + adcs x25, x25, x15 + adcs x24, x24, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #32] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #352 // =352 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #416] + ldp x11, x10, [sp, #400] + ldp x13, x12, [sp, #384] + ldp x14, x16, [sp, #352] + ldp x17, x15, [sp, #368] + adds x14, x22, x14 + str x14, [x19, #32] + adcs x22, x23, x16 + adcs x23, x25, x17 + adcs x24, x24, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #40] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #272 // =272 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #336] + ldp x11, x10, [sp, #320] + ldp x13, x12, [sp, #304] + ldp x14, x16, [sp, #272] + ldp x17, x15, [sp, #288] + adds x14, x22, x14 + str x14, [x19, #40] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #48] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #192 // =192 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #256] + ldp x11, x10, [sp, #240] + ldp x13, x12, [sp, #224] + ldp x14, x16, [sp, #192] + ldp x17, x15, [sp, #208] + adds x14, x22, x14 + str x14, [x19, #48] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #56] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #112 // =112 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #176] + ldp x11, x10, [sp, #160] + ldp x13, x12, [sp, #144] + ldp x14, x16, [sp, #112] + ldp x17, x15, [sp, #128] + adds x14, x22, x14 + str x14, [x19, #56] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #64] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x21, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #24] // 8-byte Folded Spill + add x8, sp, #32 // =32 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #96] + ldp x11, x10, [sp, #80] + ldp x13, x12, [sp, #64] + ldp x14, x16, [sp, #32] + ldp x17, x15, [sp, #48] + adds x14, x22, x14 + str x14, [x19, #64] + adcs x14, x23, x16 + str x14, [x19, #72] + adcs x14, x24, x17 + str x14, [x19, #80] + adcs x14, x25, x15 + adcs x13, x26, x13 + stp x14, x13, [x19, #88] + adcs x12, x27, x12 + adcs x11, x21, x11 + stp x12, x11, [x19, #104] + adcs x10, x28, x10 + str x10, [x19, #120] + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x9, x8, [x19, #128] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end128: + .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L + + .globl mcl_fpDbl_sqrPre9L + .align 2 + .type mcl_fpDbl_sqrPre9L,@function +mcl_fpDbl_sqrPre9L: // @mcl_fpDbl_sqrPre9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #736 // =736 + mov x20, x1 + ldr x1, [x20] + mov x19, x0 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-88] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x23, x22, [x29, #-104] + ldp x25, x24, [x29, #-120] + ldp x27, x26, [x29, #-136] + ldp x21, x28, [x29, #-152] + ldur x8, [x29, #-160] + ldr x1, [x20, #8] + str x8, [x19] + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x16, [x29, #-240] + ldp x17, x15, [x29, #-224] + adds x14, x14, x21 + str x14, [x19, #8] + adcs x21, x16, x28 + adcs x27, x17, x27 + adcs x26, x15, x26 + adcs x25, x13, x25 + adcs x24, x12, x24 + adcs x23, x11, x23 + ldr x1, [x20, #16] + adcs x22, x10, x22 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x9, x10 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #496 // =496 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #568] + ldr x9, [sp, #560] + ldr x10, [sp, #552] + ldr x11, [sp, #544] + ldr x12, [sp, #536] + ldr x13, [sp, #528] + ldp x14, x16, [sp, #496] + ldr x15, [sp, #520] + ldr x17, [sp, #512] + adds x14, x21, x14 + str x14, [x19, #16] + adcs x21, x27, x16 + adcs x26, x26, x17 + adcs x25, x25, x15 + adcs x24, x24, x13 + adcs x23, x23, x12 + adcs x22, x22, x11 + ldr x1, [x20, #24] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #416 // =416 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #480] + ldp x11, x10, [sp, #464] + ldp x13, x12, [sp, #448] + ldp x14, x16, [sp, #416] + ldp x17, x15, [sp, #432] + adds x14, x21, x14 + str x14, [x19, #24] + adcs x21, x26, x16 + adcs x25, x25, x17 + adcs x24, x24, x15 + adcs x23, x23, x13 + adcs x22, x22, x12 + adcs x26, x27, x11 + ldr x1, [x20, #32] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #336 // =336 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #400] + ldp x11, x10, [sp, #384] + ldp x13, x12, [sp, #368] + ldp x14, x16, [sp, #336] + ldp x17, x15, [sp, #352] + adds x14, x21, x14 + str x14, [x19, #32] + adcs x21, x25, x16 + adcs x24, x24, x17 + adcs x23, x23, x15 + adcs x22, x22, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #40] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #256 // =256 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #320] + ldp x11, x10, [sp, #304] + ldp x13, x12, [sp, #288] + ldp x14, x16, [sp, #256] + ldp x17, x15, [sp, #272] + adds x14, x21, x14 + str x14, [x19, #40] + adcs x21, x24, x16 + adcs x23, x23, x17 + adcs x22, x22, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #48] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #176 // =176 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #240] + ldp x11, x10, [sp, #224] + ldp x13, x12, [sp, #208] + ldp x14, x16, [sp, #176] + ldp x17, x15, [sp, #192] + adds x14, x21, x14 + str x14, [x19, #48] + adcs x21, x23, x16 + adcs x22, x22, x17 + adcs x23, x24, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #56] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #96 // =96 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #160] + ldp x11, x10, [sp, #144] + ldp x13, x12, [sp, #128] + ldp x14, x16, [sp, #96] + ldp x17, x15, [sp, #112] + adds x14, x21, x14 + str x14, [x19, #56] + adcs x21, x22, x16 + adcs x22, x23, x17 + adcs x23, x24, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #64] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #16 // =16 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #80] + ldp x11, x10, [sp, #64] + ldp x13, x12, [sp, #48] + ldp x14, x16, [sp, #16] + ldp x17, x15, [sp, #32] + adds x14, x21, x14 + str x14, [x19, #64] + adcs x14, x22, x16 + str x14, [x19, #72] + adcs x14, x23, x17 + str x14, [x19, #80] + adcs x14, x24, x15 + adcs x13, x25, x13 + stp x14, x13, [x19, #88] + adcs x12, x26, x12 + adcs x11, x27, x11 + stp x12, x11, [x19, #104] + adcs x10, x28, x10 + str x10, [x19, #120] + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x9, x8, [x19, #128] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L + + .globl mcl_fp_mont9L + .align 2 + .type mcl_fp_mont9L,@function +mcl_fp_mont9L: // @mcl_fp_mont9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1600 // =1600 + mov x20, x3 + mov x28, x2 + str x28, [sp, #136] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #144] // 8-byte Folded Spill + ldr x9, [x28] + mov x23, x1 + str x23, [sp, #152] // 8-byte Folded Spill + str x0, [sp, #128] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x23 + mov x1, x9 + bl .LmulPv576x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-88] + str x8, [sp, #120] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #112] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-168] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x21, x19, [x29, #-208] + ldp x26, x22, [x29, #-224] + ldp x27, x25, [x29, #-240] + ldr x1, [x28, #8] + add x8, sp, #1360 // =1360 + mov x0, x23 + bl .LmulPv576x64 + cmn x27, x24 + ldr x8, [sp, #1432] + ldr x9, [sp, #1424] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x25, x10 + ldr x11, [sp, #1416] + ldp x12, x14, [sp, #64] + adcs x12, x26, x12 + ldr x13, [sp, #1408] + adcs x14, x22, x14 + ldr x15, [sp, #1400] + ldp x16, x18, [sp, #80] + adcs x16, x21, x16 + ldr x17, [sp, #1392] + adcs x18, x19, x18 + ldr x0, [sp, #1384] + ldp x1, x3, [sp, #96] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1376] + adcs x3, x4, x3 + ldr x4, [sp, #1360] + ldp x5, x7, [sp, #112] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + ldr x6, [sp, #1368] + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x7, x19, x7 + adcs x19, xzr, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x24, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x24 + add x8, sp, #1280 // =1280 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1352] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1344] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1336] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1328] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1320] + ldr x27, [sp, #1312] + ldr x28, [sp, #1304] + ldr x22, [sp, #1296] + ldr x19, [sp, #1288] + ldr x23, [sp, #1280] + ldr x25, [sp, #136] // 8-byte Folded Reload + ldr x1, [x25, #16] + add x8, sp, #1200 // =1200 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #1272] + ldr x9, [sp, #1264] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #1256] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #1248] + adcs x14, x14, x28 + ldr x15, [sp, #1240] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #1232] + adcs x18, x18, x26 + ldr x0, [sp, #1224] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #1216] + adcs x3, x3, x4 + ldr x4, [sp, #1200] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #1208] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x24 + add x8, sp, #1120 // =1120 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1192] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1184] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1176] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1168] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1160] + ldr x27, [sp, #1152] + ldr x28, [sp, #1144] + ldr x22, [sp, #1136] + ldr x19, [sp, #1128] + ldr x23, [sp, #1120] + ldr x1, [x25, #24] + add x8, sp, #1040 // =1040 + ldr x24, [sp, #152] // 8-byte Folded Reload + mov x0, x24 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #1112] + ldr x9, [sp, #1104] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #1096] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #1088] + adcs x14, x14, x28 + ldr x15, [sp, #1080] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #1072] + adcs x18, x18, x26 + ldr x0, [sp, #1064] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #1056] + adcs x3, x3, x4 + ldr x4, [sp, #1040] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #1048] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x8, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x8 + add x8, sp, #960 // =960 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1032] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1024] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1016] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1008] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1000] + ldr x27, [sp, #992] + ldr x28, [sp, #984] + ldr x22, [sp, #976] + ldr x19, [sp, #968] + ldr x23, [sp, #960] + ldr x1, [x25, #32] + add x8, sp, #880 // =880 + mov x0, x24 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #952] + ldr x9, [sp, #944] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #936] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #928] + adcs x14, x14, x28 + ldr x15, [sp, #920] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #912] + adcs x18, x18, x26 + ldr x0, [sp, #904] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #896] + adcs x3, x3, x4 + ldr x4, [sp, #880] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #888] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x25, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x25 + add x8, sp, #800 // =800 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #872] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #864] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #856] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #848] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #840] + ldr x27, [sp, #832] + ldr x28, [sp, #824] + ldr x22, [sp, #816] + ldr x19, [sp, #808] + ldr x23, [sp, #800] + ldr x24, [sp, #136] // 8-byte Folded Reload + ldr x1, [x24, #40] + add x8, sp, #720 // =720 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #792] + ldr x9, [sp, #784] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #776] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #768] + adcs x14, x14, x28 + ldr x15, [sp, #760] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #752] + adcs x18, x18, x26 + ldr x0, [sp, #744] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #736] + adcs x3, x3, x4 + ldr x4, [sp, #720] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #728] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x25 + add x8, sp, #640 // =640 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #712] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #704] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #696] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #688] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #680] + ldr x27, [sp, #672] + ldr x28, [sp, #664] + ldr x22, [sp, #656] + ldr x19, [sp, #648] + ldr x23, [sp, #640] + ldr x1, [x24, #48] + add x8, sp, #560 // =560 + ldr x25, [sp, #152] // 8-byte Folded Reload + mov x0, x25 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #632] + ldr x9, [sp, #624] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #616] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #608] + adcs x14, x14, x28 + ldr x15, [sp, #600] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #592] + adcs x18, x18, x26 + ldr x0, [sp, #584] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #576] + adcs x3, x3, x4 + ldr x4, [sp, #560] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #568] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x24, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x24 + add x8, sp, #480 // =480 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #552] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #544] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #536] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #528] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #520] + ldr x27, [sp, #512] + ldp x22, x28, [sp, #496] + ldp x23, x19, [sp, #480] + ldr x8, [sp, #136] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #400 // =400 + mov x0, x25 + bl .LmulPv576x64 + cmn x21, x23 + ldp x9, x8, [sp, #464] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldp x13, x11, [sp, #448] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + adcs x14, x14, x28 + ldp x17, x15, [sp, #432] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + adcs x18, x18, x26 + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldp x2, x0, [sp, #416] + adcs x3, x3, x4 + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldp x4, x6, [sp, #400] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x24 + add x8, sp, #320 // =320 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #392] + str x8, [sp, #40] // 8-byte Folded Spill + ldp x24, x8, [sp, #376] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x26, x25, [sp, #360] + ldp x28, x27, [sp, #344] + ldp x19, x22, [sp, #328] + ldr x23, [sp, #320] + ldr x8, [sp, #136] // 8-byte Folded Reload + ldr x1, [x8, #64] + add x8, sp, #240 // =240 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldp x9, x8, [sp, #304] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldp x13, x11, [sp, #288] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + adcs x14, x14, x28 + ldp x17, x15, [sp, #272] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + adcs x18, x18, x26 + ldp x2, x0, [sp, #256] + ldp x3, x1, [sp, #64] + adcs x1, x1, x25 + adcs x3, x3, x24 + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldp x4, x6, [sp, #240] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x22, x12, x6 + adcs x23, x14, x2 + adcs x24, x16, x0 + adcs x25, x18, x17 + adcs x26, x1, x15 + adcs x27, x3, x13 + adcs x10, x5, x11 + str x10, [sp, #152] // 8-byte Folded Spill + adcs x9, x7, x9 + str x9, [sp, #136] // 8-byte Folded Spill + adcs x19, x19, x8 + adcs x28, xzr, xzr + ldr x8, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x8 + add x8, sp, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldp x16, x8, [sp, #224] + ldp x9, x10, [sp, #160] + ldp x11, x12, [sp, #176] + cmn x21, x9 + ldp x13, x9, [sp, #192] + adcs x10, x22, x10 + ldp x14, x15, [sp, #208] + adcs x11, x23, x11 + adcs x12, x24, x12 + adcs x13, x25, x13 + adcs x9, x26, x9 + adcs x14, x27, x14 + ldp x0, x17, [x20, #56] + ldp x2, x18, [x20, #40] + ldp x4, x1, [x20, #24] + ldp x6, x3, [x20, #8] + ldr x5, [x20] + ldr x7, [sp, #152] // 8-byte Folded Reload + adcs x15, x7, x15 + ldr x7, [sp, #136] // 8-byte Folded Reload + adcs x16, x7, x16 + adcs x8, x19, x8 + adcs x7, x28, xzr + subs x5, x10, x5 + sbcs x6, x11, x6 + sbcs x3, x12, x3 + sbcs x4, x13, x4 + sbcs x1, x9, x1 + sbcs x2, x14, x2 + sbcs x18, x15, x18 + sbcs x0, x16, x0 + sbcs x17, x8, x17 + sbcs x7, x7, xzr + tst x7, #0x1 + csel x10, x10, x5, ne + csel x11, x11, x6, ne + csel x12, x12, x3, ne + csel x13, x13, x4, ne + csel x9, x9, x1, ne + csel x14, x14, x2, ne + csel x15, x15, x18, ne + csel x16, x16, x0, ne + csel x8, x8, x17, ne + ldr x17, [sp, #128] // 8-byte Folded Reload + stp x10, x11, [x17] + stp x12, x13, [x17, #16] + stp x9, x14, [x17, #32] + stp x15, x16, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end130: + .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L + + .globl mcl_fp_montNF9L + .align 2 + .type mcl_fp_montNF9L,@function +mcl_fp_montNF9L: // @mcl_fp_montNF9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1584 // =1584 + mov x20, x3 + mov x28, x2 + str x28, [sp, #120] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #128] // 8-byte Folded Spill + ldr x9, [x28] + mov x23, x1 + str x23, [sp, #136] // 8-byte Folded Spill + str x0, [sp, #112] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x23 + mov x1, x9 + bl .LmulPv576x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-88] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #48] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #32] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-168] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-176] + str x8, [sp, #24] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #16] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x21, x19, [x29, #-208] + ldp x26, x22, [x29, #-224] + ldp x27, x25, [x29, #-240] + ldr x1, [x28, #8] + add x8, sp, #1344 // =1344 + mov x0, x23 + bl .LmulPv576x64 + cmn x27, x24 + ldr x8, [sp, #1416] + ldr x9, [sp, #1408] + ldr x10, [sp, #32] // 8-byte Folded Reload + adcs x10, x25, x10 + ldr x11, [sp, #1400] + ldp x12, x14, [sp, #48] + adcs x12, x26, x12 + ldr x13, [sp, #1392] + adcs x14, x22, x14 + ldr x15, [sp, #1384] + ldp x16, x18, [sp, #64] + adcs x16, x21, x16 + ldr x17, [sp, #1376] + adcs x18, x19, x18 + ldr x0, [sp, #1368] + ldp x1, x3, [sp, #80] + ldp x2, x4, [sp, #8] + adcs x1, x2, x1 + ldr x2, [sp, #1352] + adcs x3, x4, x3 + ldr x4, [sp, #1344] + ldp x5, x7, [sp, #96] + ldr x6, [sp, #24] // 8-byte Folded Reload + adcs x5, x6, x5 + ldr x6, [sp, #1360] + ldr x19, [sp, #40] // 8-byte Folded Reload + adcs x7, x19, x7 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #1264 // =1264 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1336] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1328] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1320] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1312] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #1304] + ldr x25, [sp, #1296] + ldr x26, [sp, #1288] + ldr x21, [sp, #1280] + ldr x27, [sp, #1272] + ldr x28, [sp, #1264] + ldr x23, [sp, #120] // 8-byte Folded Reload + ldr x1, [x23, #16] + add x8, sp, #1184 // =1184 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #1256] + ldr x9, [sp, #1248] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #1240] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #1232] + adcs x14, x14, x26 + ldr x15, [sp, #1224] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #1216] + adcs x18, x18, x24 + ldr x0, [sp, #1208] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #1192] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #1184] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #1200] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1176] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1168] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #1144] + ldr x25, [sp, #1136] + ldr x26, [sp, #1128] + ldr x21, [sp, #1120] + ldr x27, [sp, #1112] + ldr x28, [sp, #1104] + ldr x1, [x23, #24] + add x8, sp, #1024 // =1024 + ldr x22, [sp, #136] // 8-byte Folded Reload + mov x0, x22 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #1096] + ldr x9, [sp, #1088] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #1080] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #1072] + adcs x14, x14, x26 + ldr x15, [sp, #1064] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #1056] + adcs x18, x18, x24 + ldr x0, [sp, #1048] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #1032] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #1024] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #1040] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1016] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1008] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #984] + ldr x25, [sp, #976] + ldr x26, [sp, #968] + ldr x21, [sp, #960] + ldr x27, [sp, #952] + ldr x28, [sp, #944] + ldr x1, [x23, #32] + add x8, sp, #864 // =864 + mov x0, x22 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #936] + ldr x9, [sp, #928] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #920] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #912] + adcs x14, x14, x26 + ldr x15, [sp, #904] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #896] + adcs x18, x18, x24 + ldr x0, [sp, #888] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #872] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #864] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #880] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x23, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x23 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #856] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #848] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #824] + ldr x25, [sp, #816] + ldr x26, [sp, #808] + ldr x21, [sp, #800] + ldr x27, [sp, #792] + ldr x28, [sp, #784] + ldr x22, [sp, #120] // 8-byte Folded Reload + ldr x1, [x22, #40] + add x8, sp, #704 // =704 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #776] + ldr x9, [sp, #768] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #760] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #752] + adcs x14, x14, x26 + ldr x15, [sp, #744] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #736] + adcs x18, x18, x24 + ldr x0, [sp, #728] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #712] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #704] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #720] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x23 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #696] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #688] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #664] + ldr x25, [sp, #656] + ldr x26, [sp, #648] + ldr x21, [sp, #640] + ldr x27, [sp, #632] + ldr x28, [sp, #624] + ldr x1, [x22, #48] + add x8, sp, #544 // =544 + ldr x23, [sp, #136] // 8-byte Folded Reload + mov x0, x23 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #616] + ldr x9, [sp, #608] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #600] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #592] + adcs x14, x14, x26 + ldr x15, [sp, #584] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #576] + adcs x18, x18, x24 + ldr x0, [sp, #568] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #552] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #544] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #560] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #536] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #528] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x25, x24, [sp, #496] + ldp x21, x26, [sp, #480] + ldp x28, x27, [sp, #464] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #384 // =384 + mov x0, x23 + bl .LmulPv576x64 + cmn x19, x28 + ldp x9, x8, [sp, #448] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldp x13, x11, [sp, #432] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + adcs x14, x14, x26 + ldp x17, x15, [sp, #416] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldp x4, x2, [sp, #384] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldp x6, x0, [sp, #400] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #376] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x22, x8, [sp, #360] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x24, x23, [sp, #344] + ldp x26, x25, [sp, #328] + ldp x27, x21, [sp, #312] + ldr x28, [sp, #304] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #64] + add x8, sp, #224 // =224 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldp x9, x8, [sp, #288] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldp x13, x11, [sp, #272] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + adcs x14, x14, x26 + ldp x17, x15, [sp, #256] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + adcs x18, x18, x24 + adcs x1, x1, x23 + ldp x4, x2, [sp, #224] + ldp x5, x3, [sp, #96] + adcs x3, x3, x22 + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldp x6, x0, [sp, #240] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x21, x12, x2 + adcs x22, x14, x6 + adcs x23, x16, x0 + adcs x24, x18, x17 + adcs x25, x1, x15 + adcs x26, x3, x13 + adcs x10, x5, x11 + str x10, [sp, #136] // 8-byte Folded Spill + adcs x28, x7, x9 + adcs x27, x8, xzr + ldr x8, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv576x64 + ldp x16, x8, [sp, #208] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldp x14, x15, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + adcs x14, x26, x14 + ldp x0, x17, [x20, #56] + ldp x2, x18, [x20, #40] + ldp x4, x1, [x20, #24] + ldp x6, x3, [x20, #8] + ldr x5, [x20] + ldr x7, [sp, #136] // 8-byte Folded Reload + adcs x15, x7, x15 + adcs x16, x28, x16 + adcs x8, x27, x8 + subs x5, x10, x5 + sbcs x6, x11, x6 + sbcs x3, x12, x3 + sbcs x4, x13, x4 + sbcs x1, x9, x1 + sbcs x2, x14, x2 + sbcs x18, x15, x18 + sbcs x0, x16, x0 + sbcs x17, x8, x17 + asr x7, x17, #63 + cmp x7, #0 // =0 + csel x10, x10, x5, lt + csel x11, x11, x6, lt + csel x12, x12, x3, lt + csel x13, x13, x4, lt + csel x9, x9, x1, lt + csel x14, x14, x2, lt + csel x15, x15, x18, lt + csel x16, x16, x0, lt + csel x8, x8, x17, lt + ldr x17, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x17] + stp x12, x13, [x17, #16] + stp x9, x14, [x17, #32] + stp x15, x16, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end131: + .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L + + .globl mcl_fp_montRed9L + .align 2 + .type mcl_fp_montRed9L,@function +mcl_fp_montRed9L: // @mcl_fp_montRed9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #912 // =912 + mov x20, x2 + ldur x9, [x20, #-8] + str x9, [sp, #40] // 8-byte Folded Spill + ldr x8, [x20, #64] + str x8, [sp, #184] // 8-byte Folded Spill + ldr x8, [x20, #48] + str x8, [sp, #168] // 8-byte Folded Spill + ldr x8, [x20, #56] + str x8, [sp, #176] // 8-byte Folded Spill + ldr x8, [x20, #32] + str x8, [sp, #144] // 8-byte Folded Spill + ldr x8, [x20, #40] + str x8, [sp, #152] // 8-byte Folded Spill + ldr x8, [x20, #16] + str x8, [sp, #128] // 8-byte Folded Spill + ldr x8, [x20, #24] + str x8, [sp, #136] // 8-byte Folded Spill + ldr x8, [x20] + str x8, [sp, #112] // 8-byte Folded Spill + ldr x8, [x20, #8] + str x8, [sp, #120] // 8-byte Folded Spill + ldr x8, [x1, #128] + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [x1, #136] + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [x1, #112] + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [x1, #120] + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [x1, #96] + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [x1, #104] + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [x1, #80] + str x8, [sp, #48] // 8-byte Folded Spill + ldr x8, [x1, #88] + str x8, [sp, #56] // 8-byte Folded Spill + ldp x23, x8, [x1, #64] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x19, [x1, #48] + ldp x28, x27, [x1, #32] + ldp x22, x24, [x1, #16] + ldp x21, x26, [x1] + str x0, [sp, #160] // 8-byte Folded Spill + mul x1, x21, x9 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-96] + ldp x11, x10, [x29, #-112] + ldp x13, x12, [x29, #-128] + ldp x14, x15, [x29, #-160] + ldp x16, x17, [x29, #-144] + cmn x21, x14 + adcs x21, x26, x15 + adcs x14, x22, x16 + adcs x24, x24, x17 + adcs x26, x28, x13 + adcs x27, x27, x12 + adcs x25, x25, x11 + adcs x10, x19, x10 + stp x10, x14, [sp, #24] + adcs x23, x23, x9 + ldr x9, [sp, #16] // 8-byte Folded Reload + adcs x28, x9, x8 + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x22, x8, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + adcs x8, xzr, xzr + str x8, [sp, #48] // 8-byte Folded Spill + ldr x19, [sp, #40] // 8-byte Folded Reload + mul x1, x21, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x15, [x29, #-240] + ldp x16, x17, [x29, #-224] + cmn x21, x14 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x24, x16 + adcs x26, x26, x17 + adcs x27, x27, x13 + adcs x25, x25, x12 + ldr x12, [sp, #24] // 8-byte Folded Reload + adcs x11, x12, x11 + stp x11, x14, [sp, #24] + adcs x23, x23, x10 + adcs x28, x28, x9 + adcs x22, x22, x8 + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x24, x8, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #672 // =672 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #744] + ldr x9, [sp, #736] + ldr x10, [sp, #728] + ldr x11, [sp, #720] + ldr x12, [sp, #712] + ldr x13, [sp, #704] + ldr x14, [sp, #672] + ldr x15, [sp, #680] + ldr x16, [sp, #688] + ldr x17, [sp, #696] + cmn x21, x14 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x26, x16 + str x14, [sp, #48] // 8-byte Folded Spill + adcs x27, x27, x17 + adcs x25, x25, x13 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x12, x13, x12 + str x12, [sp, #32] // 8-byte Folded Spill + adcs x23, x23, x11 + adcs x28, x28, x10 + adcs x22, x22, x9 + adcs x24, x24, x8 + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x26, x8, xzr + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #592 // =592 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #664] + ldr x9, [sp, #656] + ldr x10, [sp, #648] + ldr x11, [sp, #640] + ldr x12, [sp, #632] + ldr x13, [sp, #624] + ldr x14, [sp, #592] + ldr x15, [sp, #600] + ldr x16, [sp, #608] + ldr x17, [sp, #616] + cmn x21, x14 + ldr x14, [sp, #48] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x27, x16 + str x14, [sp, #56] // 8-byte Folded Spill + adcs x25, x25, x17 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x13, x14, x13 + str x13, [sp, #48] // 8-byte Folded Spill + adcs x23, x23, x12 + adcs x28, x28, x11 + adcs x22, x22, x10 + adcs x24, x24, x9 + adcs x26, x26, x8 + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x27, x8, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #512 // =512 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #584] + ldr x9, [sp, #576] + ldr x10, [sp, #568] + ldr x11, [sp, #560] + ldr x12, [sp, #552] + ldr x13, [sp, #544] + ldr x14, [sp, #512] + ldr x15, [sp, #520] + ldr x16, [sp, #528] + ldr x17, [sp, #536] + cmn x21, x14 + ldr x14, [sp, #56] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x25, x16 + str x14, [sp, #64] // 8-byte Folded Spill + ldr x14, [sp, #48] // 8-byte Folded Reload + adcs x14, x14, x17 + str x14, [sp, #56] // 8-byte Folded Spill + adcs x23, x23, x13 + adcs x28, x28, x12 + adcs x22, x22, x11 + adcs x24, x24, x10 + adcs x26, x26, x9 + adcs x27, x27, x8 + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x25, x8, xzr + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #432 // =432 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #496] + ldp x11, x10, [sp, #480] + ldp x13, x12, [sp, #464] + ldp x14, x15, [sp, #432] + ldp x16, x17, [sp, #448] + cmn x21, x14 + ldr x14, [sp, #64] // 8-byte Folded Reload + adcs x21, x14, x15 + ldr x14, [sp, #56] // 8-byte Folded Reload + adcs x14, x14, x16 + adcs x23, x23, x17 + adcs x28, x28, x13 + adcs x22, x22, x12 + adcs x24, x24, x11 + adcs x26, x26, x10 + adcs x27, x27, x9 + adcs x25, x25, x8 + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + stp x14, x8, [sp, #72] + mul x1, x21, x19 + add x8, sp, #352 // =352 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #416] + ldp x11, x10, [sp, #400] + ldp x13, x12, [sp, #384] + ldp x14, x15, [sp, #352] + ldp x16, x17, [sp, #368] + cmn x21, x14 + ldr x14, [sp, #72] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x23, x16 + str x14, [sp, #72] // 8-byte Folded Spill + adcs x28, x28, x17 + adcs x22, x22, x13 + adcs x24, x24, x12 + adcs x26, x26, x11 + adcs x27, x27, x10 + adcs x25, x25, x9 + ldr x9, [sp, #88] // 8-byte Folded Reload + adcs x8, x9, x8 + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x23, x8, xzr + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #272 // =272 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #336] + ldp x11, x10, [sp, #320] + ldp x13, x12, [sp, #304] + ldp x14, x15, [sp, #272] + ldp x16, x17, [sp, #288] + cmn x21, x14 + ldr x14, [sp, #72] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x28, x16 + adcs x22, x22, x17 + adcs x24, x24, x13 + adcs x26, x26, x12 + adcs x27, x27, x11 + adcs x25, x25, x10 + ldr x10, [sp, #88] // 8-byte Folded Reload + adcs x9, x10, x9 + stp x14, x9, [sp, #80] + adcs x23, x23, x8 + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x28, x8, xzr + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #192 // =192 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #256] + ldp x11, x10, [sp, #240] + ldp x13, x12, [sp, #224] + ldp x14, x15, [sp, #192] + ldp x16, x17, [sp, #208] + cmn x21, x14 + ldr x14, [sp, #80] // 8-byte Folded Reload + adcs x14, x14, x15 + adcs x15, x22, x16 + adcs x16, x24, x17 + adcs x13, x26, x13 + adcs x12, x27, x12 + adcs x11, x25, x11 + ldr x17, [sp, #88] // 8-byte Folded Reload + adcs x10, x17, x10 + adcs x9, x23, x9 + adcs x8, x28, x8 + ldp x17, x18, [sp, #104] + adcs x17, x17, xzr + subs x18, x14, x18 + ldp x0, x1, [sp, #120] + sbcs x0, x15, x0 + sbcs x1, x16, x1 + ldp x2, x3, [sp, #136] + sbcs x2, x13, x2 + sbcs x3, x12, x3 + ldr x4, [sp, #152] // 8-byte Folded Reload + sbcs x4, x11, x4 + ldp x5, x6, [sp, #168] + sbcs x5, x10, x5 + sbcs x6, x9, x6 + ldr x7, [sp, #184] // 8-byte Folded Reload + sbcs x7, x8, x7 + sbcs x17, x17, xzr + tst x17, #0x1 + csel x14, x14, x18, ne + csel x15, x15, x0, ne + csel x16, x16, x1, ne + csel x13, x13, x2, ne + csel x12, x12, x3, ne + csel x11, x11, x4, ne + csel x10, x10, x5, ne + csel x9, x9, x6, ne + csel x8, x8, x7, ne + ldr x17, [sp, #160] // 8-byte Folded Reload + stp x14, x15, [x17] + stp x16, x13, [x17, #16] + stp x12, x11, [x17, #32] + stp x10, x9, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end132: + .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L + + .globl mcl_fp_addPre9L + .align 2 + .type mcl_fp_addPre9L,@function +mcl_fp_addPre9L: // @mcl_fp_addPre9L +// BB#0: + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x3, x14, [x2, #24] + ldr x4, [x2] + ldp x2, x18, [x2, #8] + ldp x5, x6, [x1] + ldr x7, [x1, #16] + ldp x1, x16, [x1, #24] + adds x4, x4, x5 + adcs x2, x2, x6 + stp x4, x2, [x0] + adcs x18, x18, x7 + str x18, [x0, #16] + adcs x18, x3, x1 + adcs x14, x14, x16 + stp x18, x14, [x0, #24] + adcs x14, x15, x17 + adcs x10, x10, x12 + stp x14, x10, [x0, #40] + adcs x10, x11, x13 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end133: + .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L + + .globl mcl_fp_subPre9L + .align 2 + .type mcl_fp_subPre9L,@function +mcl_fp_subPre9L: // @mcl_fp_subPre9L +// BB#0: + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x3, x14, [x2, #24] + ldr x4, [x2] + ldp x2, x18, [x2, #8] + ldp x5, x6, [x1] + ldr x7, [x1, #16] + ldp x1, x16, [x1, #24] + subs x4, x5, x4 + sbcs x2, x6, x2 + stp x4, x2, [x0] + sbcs x18, x7, x18 + str x18, [x0, #16] + sbcs x18, x1, x3 + sbcs x14, x16, x14 + stp x18, x14, [x0, #24] + sbcs x14, x17, x15 + sbcs x10, x12, x10 + stp x14, x10, [x0, #40] + sbcs x10, x13, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end134: + .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L + + .globl mcl_fp_shr1_9L + .align 2 + .type mcl_fp_shr1_9L,@function +mcl_fp_shr1_9L: // @mcl_fp_shr1_9L +// BB#0: + ldp x8, x9, [x1] + ldp x12, x10, [x1, #56] + ldp x16, x11, [x1, #40] + ldp x13, x14, [x1, #16] + ldr x15, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x13, x9, #1 + extr x13, x14, x13, #1 + extr x14, x15, x14, #1 + extr x15, x16, x15, #1 + extr x16, x11, x16, #1 + extr x11, x12, x11, #1 + extr x12, x10, x12, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + stp x13, x14, [x0, #16] + stp x15, x16, [x0, #32] + stp x11, x12, [x0, #48] + str x10, [x0, #64] + ret +.Lfunc_end135: + .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L + + .globl mcl_fp_add9L + .align 2 + .type mcl_fp_add9L,@function +mcl_fp_add9L: // @mcl_fp_add9L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x4, x14, [x2, #24] + ldr x5, [x2] + ldp x2, x18, [x2, #8] + ldp x6, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x16, [x1, #24] + adds x5, x5, x6 + adcs x2, x2, x7 + adcs x18, x18, x19 + ldp x21, x7, [x3, #40] + ldp x19, x6, [x3, #56] + adcs x1, x4, x1 + adcs x4, x14, x16 + ldr x20, [x3, #32] + adcs x17, x15, x17 + adcs x10, x10, x12 + ldp x12, x14, [x3] + stp x5, x2, [x0] + stp x18, x1, [x0, #16] + stp x4, x17, [x0, #32] + adcs x22, x11, x13 + stp x10, x22, [x0, #48] + adcs x8, x8, x9 + str x8, [x0, #64] + adcs x23, xzr, xzr + ldp x9, x11, [x3, #16] + subs x16, x5, x12 + sbcs x15, x2, x14 + sbcs x14, x18, x9 + sbcs x13, x1, x11 + sbcs x12, x4, x20 + sbcs x11, x17, x21 + sbcs x10, x10, x7 + sbcs x9, x22, x19 + sbcs x8, x8, x6 + sbcs x17, x23, xzr + and w17, w17, #0x1 + tbnz w17, #0, .LBB136_2 +// BB#1: // %nocarry + stp x16, x15, [x0] + stp x14, x13, [x0, #16] + stp x12, x11, [x0, #32] + stp x10, x9, [x0, #48] + str x8, [x0, #64] +.LBB136_2: // %carry + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end136: + .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L + + .globl mcl_fp_addNF9L + .align 2 + .type mcl_fp_addNF9L,@function +mcl_fp_addNF9L: // @mcl_fp_addNF9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x11, x8, [x1, #56] + ldp x13, x9, [x2, #56] + ldp x15, x10, [x1, #40] + ldp x17, x12, [x2, #40] + ldp x4, x14, [x1, #24] + ldr x5, [x1] + ldp x1, x18, [x1, #8] + ldp x6, x7, [x2] + ldr x19, [x2, #16] + ldp x2, x16, [x2, #24] + adds x5, x6, x5 + adcs x1, x7, x1 + adcs x18, x19, x18 + ldp x19, x6, [x3, #56] + adcs x2, x2, x4 + adcs x14, x16, x14 + ldp x4, x7, [x3, #40] + adcs x15, x17, x15 + adcs x10, x12, x10 + ldp x12, x17, [x3] + adcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x16, [x3, #24] + adcs x8, x9, x8 + subs x9, x5, x12 + sbcs x12, x1, x17 + sbcs x13, x18, x13 + sbcs x17, x2, x3 + sbcs x16, x14, x16 + sbcs x3, x15, x4 + sbcs x4, x10, x7 + sbcs x7, x11, x19 + sbcs x6, x8, x6 + asr x19, x6, #63 + cmp x19, #0 // =0 + csel x9, x5, x9, lt + csel x12, x1, x12, lt + csel x13, x18, x13, lt + csel x17, x2, x17, lt + csel x14, x14, x16, lt + csel x15, x15, x3, lt + csel x10, x10, x4, lt + csel x11, x11, x7, lt + csel x8, x8, x6, lt + stp x9, x12, [x0] + stp x13, x17, [x0, #16] + stp x14, x15, [x0, #32] + stp x10, x11, [x0, #48] + str x8, [x0, #64] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end137: + .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L + + .globl mcl_fp_sub9L + .align 2 + .type mcl_fp_sub9L,@function +mcl_fp_sub9L: // @mcl_fp_sub9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x15, x16, [x2, #56] + ldp x4, x17, [x1, #56] + ldp x13, x14, [x2, #40] + ldp x6, x18, [x1, #40] + ldp x11, x12, [x2, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x2, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x5, [x1, #24] + subs x8, x2, x8 + sbcs x9, x7, x9 + stp x8, x9, [x0] + sbcs x10, x19, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x5, x12 + sbcs x13, x6, x13 + stp x12, x13, [x0, #32] + sbcs x14, x18, x14 + sbcs x15, x4, x15 + stp x14, x15, [x0, #48] + sbcs x16, x17, x16 + str x16, [x0, #64] + ngcs x17, xzr + and w17, w17, #0x1 + tbnz w17, #0, .LBB138_2 +// BB#1: // %nocarry + ldp x20, x19, [sp], #16 + ret +.LBB138_2: // %carry + ldp x18, x1, [x3] + ldp x2, x4, [x3, #16] + ldp x5, x6, [x3, #32] + adds x8, x18, x8 + adcs x9, x1, x9 + ldr x18, [x3, #48] + ldp x1, x17, [x3, #56] + adcs x10, x2, x10 + adcs x11, x4, x11 + adcs x12, x5, x12 + adcs x13, x6, x13 + adcs x14, x18, x14 + adcs x15, x1, x15 + adcs x16, x17, x16 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + str x16, [x0, #64] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end138: + .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L + + .globl mcl_fp_subNF9L + .align 2 + .type mcl_fp_subNF9L,@function +mcl_fp_subNF9L: // @mcl_fp_subNF9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x4, x14, [x2, #24] + ldr x5, [x2] + ldp x2, x18, [x2, #8] + ldp x6, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x16, [x1, #24] + subs x5, x6, x5 + sbcs x2, x7, x2 + sbcs x18, x19, x18 + ldp x19, x6, [x3, #56] + sbcs x1, x1, x4 + sbcs x14, x16, x14 + ldp x4, x7, [x3, #40] + sbcs x15, x17, x15 + sbcs x10, x12, x10 + ldp x12, x17, [x3] + sbcs x11, x13, x11 + sbcs x8, x9, x8 + asr x9, x8, #63 + extr x13, x9, x8, #63 + and x12, x13, x12 + ldr x13, [x3, #16] + ldp x3, x16, [x3, #24] + and x19, x9, x19 + and x6, x9, x6 + ror x9, x9, #63 + and x17, x9, x17 + and x13, x9, x13 + and x3, x9, x3 + and x16, x9, x16 + and x4, x9, x4 + and x9, x9, x7 + adds x12, x12, x5 + str x12, [x0] + adcs x12, x17, x2 + str x12, [x0, #8] + adcs x12, x13, x18 + str x12, [x0, #16] + adcs x12, x3, x1 + str x12, [x0, #24] + adcs x12, x16, x14 + str x12, [x0, #32] + adcs x12, x4, x15 + adcs x9, x9, x10 + stp x12, x9, [x0, #40] + adcs x9, x19, x11 + adcs x8, x6, x8 + stp x9, x8, [x0, #56] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end139: + .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L + + .globl mcl_fpDbl_add9L + .align 2 + .type mcl_fpDbl_add9L,@function +mcl_fpDbl_add9L: // @mcl_fpDbl_add9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x10, x8, [x2, #128] + ldp x11, x9, [x1, #128] + ldp x12, x13, [x2, #112] + ldp x14, x15, [x1, #112] + ldp x16, x17, [x2, #96] + ldp x18, x4, [x2] + ldp x5, x6, [x1] + ldp x7, x19, [x2, #16] + adds x18, x18, x5 + adcs x4, x4, x6 + ldp x5, x6, [x1, #16] + str x18, [x0] + adcs x18, x7, x5 + ldp x5, x7, [x1, #96] + str x4, [x0, #8] + ldr x4, [x1, #32] + str x18, [x0, #16] + adcs x18, x19, x6 + ldp x6, x19, [x2, #32] + str x18, [x0, #24] + adcs x4, x6, x4 + ldp x18, x6, [x1, #40] + str x4, [x0, #32] + adcs x18, x19, x18 + ldp x4, x19, [x2, #48] + str x18, [x0, #40] + adcs x4, x4, x6 + ldp x18, x6, [x1, #56] + str x4, [x0, #48] + adcs x18, x19, x18 + ldp x4, x19, [x2, #64] + str x18, [x0, #56] + ldr x18, [x1, #72] + adcs x4, x4, x6 + ldp x6, x2, [x2, #80] + str x4, [x0, #64] + ldp x4, x1, [x1, #80] + adcs x18, x19, x18 + adcs x4, x6, x4 + adcs x1, x2, x1 + ldp x6, x19, [x3, #56] + adcs x16, x16, x5 + adcs x17, x17, x7 + ldp x7, x2, [x3, #40] + adcs x12, x12, x14 + adcs x13, x13, x15 + ldp x15, x5, [x3, #24] + adcs x10, x10, x11 + ldr x11, [x3] + ldp x3, x14, [x3, #8] + adcs x8, x8, x9 + adcs x9, xzr, xzr + subs x11, x18, x11 + sbcs x3, x4, x3 + sbcs x14, x1, x14 + sbcs x15, x16, x15 + sbcs x5, x17, x5 + sbcs x7, x12, x7 + sbcs x2, x13, x2 + sbcs x6, x10, x6 + sbcs x19, x8, x19 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x9, x18, x11, ne + csel x11, x4, x3, ne + csel x14, x1, x14, ne + csel x15, x16, x15, ne + csel x16, x17, x5, ne + csel x12, x12, x7, ne + csel x13, x13, x2, ne + csel x10, x10, x6, ne + csel x8, x8, x19, ne + stp x9, x11, [x0, #72] + stp x14, x15, [x0, #88] + stp x16, x12, [x0, #104] + stp x13, x10, [x0, #120] + str x8, [x0, #136] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end140: + .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L + + .globl mcl_fpDbl_sub9L + .align 2 + .type mcl_fpDbl_sub9L,@function +mcl_fpDbl_sub9L: // @mcl_fpDbl_sub9L +// BB#0: + ldp x10, x8, [x2, #128] + ldp x11, x9, [x1, #128] + ldp x14, x12, [x2, #112] + ldp x15, x13, [x1, #112] + ldp x16, x17, [x2] + ldp x18, x4, [x1] + ldp x5, x6, [x2, #96] + ldr x7, [x1, #16] + subs x16, x18, x16 + sbcs x17, x4, x17 + ldp x18, x4, [x2, #16] + str x16, [x0] + ldr x16, [x1, #24] + sbcs x18, x7, x18 + str x17, [x0, #8] + ldp x17, x7, [x2, #32] + str x18, [x0, #16] + sbcs x16, x16, x4 + ldp x18, x4, [x1, #32] + str x16, [x0, #24] + sbcs x16, x18, x17 + ldp x17, x18, [x2, #48] + str x16, [x0, #32] + sbcs x4, x4, x7 + ldp x16, x7, [x1, #48] + str x4, [x0, #40] + sbcs x16, x16, x17 + ldp x17, x4, [x2, #80] + str x16, [x0, #48] + ldr x16, [x1, #64] + sbcs x18, x7, x18 + ldp x7, x2, [x2, #64] + str x18, [x0, #56] + ldr x18, [x1, #72] + sbcs x16, x16, x7 + str x16, [x0, #64] + ldp x16, x7, [x1, #80] + sbcs x18, x18, x2 + ldp x2, x1, [x1, #96] + sbcs x16, x16, x17 + sbcs x4, x7, x4 + sbcs x2, x2, x5 + ldp x7, x17, [x3, #56] + sbcs x1, x1, x6 + sbcs x14, x15, x14 + ldp x6, x5, [x3, #40] + sbcs x12, x13, x12 + sbcs x10, x11, x10 + ldp x13, x15, [x3, #24] + sbcs x8, x9, x8 + ngcs x9, xzr + tst x9, #0x1 + ldr x9, [x3] + ldp x3, x11, [x3, #8] + csel x17, x17, xzr, ne + csel x7, x7, xzr, ne + csel x5, x5, xzr, ne + csel x6, x6, xzr, ne + csel x15, x15, xzr, ne + csel x13, x13, xzr, ne + csel x11, x11, xzr, ne + csel x3, x3, xzr, ne + csel x9, x9, xzr, ne + adds x9, x9, x18 + str x9, [x0, #72] + adcs x9, x3, x16 + str x9, [x0, #80] + adcs x9, x11, x4 + str x9, [x0, #88] + adcs x9, x13, x2 + str x9, [x0, #96] + adcs x9, x15, x1 + str x9, [x0, #104] + adcs x9, x6, x14 + str x9, [x0, #112] + adcs x9, x5, x12 + str x9, [x0, #120] + adcs x9, x7, x10 + adcs x8, x17, x8 + stp x9, x8, [x0, #128] + ret +.Lfunc_end141: + .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/arm.s b/vendor/github.com/byzantine-lab/mcl/src/asm/arm.s new file mode 100644 index 000000000..2df9bfb92 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/arm.s @@ -0,0 +1,84189 @@ + .text + .syntax unified + .eabi_attribute 67, "2.09" @ Tag_conformance + .eabi_attribute 6, 1 @ Tag_CPU_arch + .eabi_attribute 8, 1 @ Tag_ARM_ISA_use + .eabi_attribute 15, 1 @ Tag_ABI_PCS_RW_data + .eabi_attribute 16, 1 @ Tag_ABI_PCS_RO_data + .eabi_attribute 17, 2 @ Tag_ABI_PCS_GOT_use + .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal + .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions + .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model + .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + .eabi_attribute 24, 1 @ Tag_ABI_align_needed + .eabi_attribute 25, 1 @ Tag_ABI_align_preserved + .eabi_attribute 28, 1 @ Tag_ABI_VFP_args + .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format + .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use + .file "" + .globl makeNIST_P192L + .align 2 + .type makeNIST_P192L,%function +makeNIST_P192L: @ @makeNIST_P192L + .fnstart +@ BB#0: + mvn r1, #0 + mvn r2, #1 + str r1, [r0] + stmib r0, {r1, r2} + str r1, [r0, #12] + str r1, [r0, #16] + str r1, [r0, #20] + mov pc, lr +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + .cantunwind + .fnend + + .globl mcl_fpDbl_mod_NIST_P192L + .align 2 + .type mcl_fpDbl_mod_NIST_P192L,%function +mcl_fpDbl_mod_NIST_P192L: @ @mcl_fpDbl_mod_NIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #8 + sub sp, sp, #8 + add lr, r1, #24 + ldr r2, [r1, #40] + ldr r3, [r1, #44] + ldr r7, [r1, #16] + ldr r8, [r1, #20] + ldm lr, {r4, r5, r6, lr} + ldm r1, {r1, r9, r10, r12} + adds r11, r4, r1 + adcs r9, r5, r9 + adcs r10, r6, r10 + adcs r1, lr, r12 + str r1, [sp, #4] @ 4-byte Spill + adcs r1, r2, r7 + mov r7, #0 + str r1, [sp] @ 4-byte Spill + adcs r8, r3, r8 + mov r1, #0 + adcs r1, r1, #0 + adc r12, r7, #0 + ldr r7, [sp, #4] @ 4-byte Reload + adds r11, r11, r2 + adcs r9, r9, r3 + adcs r4, r10, r4 + adcs r5, r7, r5 + ldr r7, [sp] @ 4-byte Reload + adcs r6, r7, r6 + adcs r7, r8, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r4, r2 + adcs r3, r5, r3 + adcs r6, r6, #0 + adcs r7, r7, #0 + adcs r1, r1, #0 + adc r5, r12, #0 + adds r12, r1, r11 + adcs r11, r5, r9 + adcs r10, r1, lr + mov r1, #0 + adcs r8, r5, r3 + adcs lr, r6, #0 + adcs r2, r7, #0 + adc r9, r1, #0 + adds r7, r12, #1 + str r2, [sp, #4] @ 4-byte Spill + adcs r6, r11, #0 + adcs r3, r10, #1 + adcs r5, r8, #0 + adcs r1, lr, #0 + adcs r2, r2, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r12 + movne r6, r11 + movne r3, r10 + cmp r4, #0 + movne r5, r8 + movne r1, lr + str r7, [r0] + str r6, [r0, #4] + str r3, [r0, #8] + str r5, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #4] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #20] + add sp, sp, #8 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + .cantunwind + .fnend + + .globl mcl_fp_sqr_NIST_P192L + .align 2 + .type mcl_fp_sqr_NIST_P192L,%function +mcl_fp_sqr_NIST_P192L: @ @mcl_fp_sqr_NIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + mov r8, r0 + add r0, sp, #12 + bl mcl_fpDbl_sqrPre6L(PLT) + add r12, sp, #12 + ldr lr, [sp, #48] + ldr r2, [sp, #44] + ldr r3, [sp, #40] + mov r4, #0 + ldm r12, {r0, r1, r5, r6, r12} + ldr r7, [sp, #36] + adds r0, r7, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r3, r1 + mov r1, #0 + adcs r10, r2, r5 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] + ldr r5, [sp, #32] + adcs r11, lr, r6 + ldr r6, [sp, #56] + adcs r9, r0, r12 + adcs r5, r6, r5 + adcs r1, r1, #0 + adc r12, r4, #0 + ldr r4, [sp, #8] @ 4-byte Reload + adds r4, r4, r0 + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + adcs r4, r4, r6 + adcs r7, r10, r7 + adcs r3, r11, r3 + adcs r2, r9, r2 + adcs r5, r5, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r7, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r3, r3, r6 + adcs r2, r2, #0 + adcs r7, r5, #0 + adcs r1, r1, #0 + adc r6, r12, #0 + adds r5, r1, r0 + mov r0, #0 + adcs r11, r6, r4 + adcs r10, r1, lr + adcs r12, r6, r3 + adcs lr, r2, #0 + adcs r4, r7, #0 + adc r9, r0, #0 + adds r7, r5, #1 + str r4, [sp, #8] @ 4-byte Spill + adcs r2, r11, #0 + adcs r3, r10, #1 + adcs r6, r12, #0 + adcs r1, lr, #0 + adcs r0, r4, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r5 + movne r2, r11 + movne r3, r10 + cmp r4, #0 + movne r6, r12 + movne r1, lr + str r7, [r8] + str r2, [r8, #4] + str r3, [r8, #8] + str r6, [r8, #12] + str r1, [r8, #16] + ldr r1, [sp, #8] @ 4-byte Reload + movne r0, r1 + str r0, [r8, #20] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + .cantunwind + .fnend + + .globl mcl_fp_mulNIST_P192L + .align 2 + .type mcl_fp_mulNIST_P192L,%function +mcl_fp_mulNIST_P192L: @ @mcl_fp_mulNIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + mov r8, r0 + add r0, sp, #12 + bl mcl_fpDbl_mulPre6L(PLT) + add r12, sp, #12 + ldr lr, [sp, #48] + ldr r2, [sp, #44] + ldr r3, [sp, #40] + mov r4, #0 + ldm r12, {r0, r1, r5, r6, r12} + ldr r7, [sp, #36] + adds r0, r7, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r3, r1 + mov r1, #0 + adcs r10, r2, r5 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] + ldr r5, [sp, #32] + adcs r11, lr, r6 + ldr r6, [sp, #56] + adcs r9, r0, r12 + adcs r5, r6, r5 + adcs r1, r1, #0 + adc r12, r4, #0 + ldr r4, [sp, #8] @ 4-byte Reload + adds r4, r4, r0 + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + adcs r4, r4, r6 + adcs r7, r10, r7 + adcs r3, r11, r3 + adcs r2, r9, r2 + adcs r5, r5, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r7, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r3, r3, r6 + adcs r2, r2, #0 + adcs r7, r5, #0 + adcs r1, r1, #0 + adc r6, r12, #0 + adds r5, r1, r0 + mov r0, #0 + adcs r11, r6, r4 + adcs r10, r1, lr + adcs r12, r6, r3 + adcs lr, r2, #0 + adcs r4, r7, #0 + adc r9, r0, #0 + adds r7, r5, #1 + str r4, [sp, #8] @ 4-byte Spill + adcs r2, r11, #0 + adcs r3, r10, #1 + adcs r6, r12, #0 + adcs r1, lr, #0 + adcs r0, r4, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r5 + movne r2, r11 + movne r3, r10 + cmp r4, #0 + movne r6, r12 + movne r1, lr + str r7, [r8] + str r2, [r8, #4] + str r3, [r8, #8] + str r6, [r8, #12] + str r1, [r8, #16] + ldr r1, [sp, #8] @ 4-byte Reload + movne r0, r1 + str r0, [r8, #20] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + .cantunwind + .fnend + + .globl mcl_fpDbl_mod_NIST_P521L + .align 2 + .type mcl_fpDbl_mod_NIST_P521L,%function +mcl_fpDbl_mod_NIST_P521L: @ @mcl_fpDbl_mod_NIST_P521L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldr r6, [r1, #64] + mov r5, #255 + ldr r3, [r1, #72] + ldr r2, [r1, #76] + mov r9, r0 + orr r5, r5, #256 + and r5, r6, r5 + lsr r6, r6, #9 + lsr r7, r3, #9 + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [r1, #68] + orr r12, r7, r2, lsl #23 + lsr r2, r2, #9 + lsr r4, r5, #9 + orr r6, r6, r5, lsl #23 + ldr r5, [r1] + orr r3, r4, r3, lsl #23 + ldmib r1, {r4, r7, lr} + adds r5, r6, r5 + ldr r6, [r1, #36] + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [r1, #80] + adcs r3, r3, r4 + str r3, [sp, #32] @ 4-byte Spill + adcs r7, r12, r7 + ldr r3, [r1, #84] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #88] + orr r2, r2, r5, lsl #23 + lsr r5, r5, #9 + adcs r12, r2, lr + ldr r2, [r1, #16] + orr r4, r5, r3, lsl #23 + lsr r3, r3, #9 + orr r3, r3, r7, lsl #23 + lsr r5, r7, #9 + ldr r7, [r1, #40] + adcs r2, r4, r2 + ldr r4, [r1, #24] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r1, #20] + adcs r2, r3, r2 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r1, #92] + orr r3, r5, r2, lsl #23 + ldr r5, [r1, #28] + lsr r2, r2, #9 + adcs lr, r3, r4 + ldr r3, [r1, #96] + ldr r4, [r1, #44] + orr r2, r2, r3, lsl #23 + adcs r2, r2, r5 + ldr r5, [r1, #32] + str r2, [sp, #16] @ 4-byte Spill + lsr r2, r3, #9 + ldr r3, [r1, #100] + orr r2, r2, r3, lsl #23 + adcs r2, r2, r5 + ldr r5, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + lsr r2, r3, #9 + ldr r3, [r1, #104] + orr r2, r2, r3, lsl #23 + adcs r0, r2, r6 + lsr r2, r3, #9 + ldr r3, [r1, #108] + ldr r6, [r1, #52] + str r0, [sp, #8] @ 4-byte Spill + orr r2, r2, r3, lsl #23 + adcs r7, r2, r7 + lsr r2, r3, #9 + ldr r3, [r1, #112] + orr r2, r2, r3, lsl #23 + lsr r3, r3, #9 + adcs r2, r2, r4 + ldr r4, [r1, #116] + orr r3, r3, r4, lsl #23 + lsr r4, r4, #9 + adcs r3, r3, r5 + ldr r5, [r1, #120] + orr r4, r4, r5, lsl #23 + adcs r11, r4, r6 + lsr r4, r5, #9 + ldr r5, [r1, #124] + ldr r6, [r1, #56] + orr r4, r4, r5, lsl #23 + adcs r10, r4, r6 + lsr r4, r5, #9 + ldr r5, [r1, #128] + ldr r1, [r1, #60] + orr r4, r4, r5, lsl #23 + adcs r8, r4, r1 + ldr r4, [sp, #40] @ 4-byte Reload + lsr r1, r5, #9 + ldr r5, [sp, #36] @ 4-byte Reload + adc r1, r1, r4 + mov r4, #1 + and r4, r4, r1, lsr #9 + adds r5, r4, r5 + ldr r4, [sp, #32] @ 4-byte Reload + str r5, [sp, #40] @ 4-byte Spill + adcs r6, r4, #0 + ldr r4, [sp, #28] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + adcs r0, r4, #0 + and r4, r6, r5 + ldr r5, [sp, #24] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + and r4, r4, r0 + adcs r0, r12, #0 + str r0, [sp, #28] @ 4-byte Spill + and r6, r4, r0 + adcs r0, r5, #0 + and r4, r6, r0 + ldr r6, [sp, #20] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r6, #0 + ldr r6, [sp, #16] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + and r5, r4, r0 + adcs r0, lr, #0 + and r5, r5, r0 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs lr, r6, #0 + and r6, r5, lr + ldr r5, [sp, #12] @ 4-byte Reload + adcs r5, r5, #0 + and r12, r6, r5 + adcs r6, r0, #0 + adcs r7, r7, #0 + and r4, r12, r6 + adcs r2, r2, #0 + and r4, r4, r7 + adcs r3, r3, #0 + and r4, r4, r2 + adcs r0, r11, #0 + and r4, r4, r3 + adcs r10, r10, #0 + and r4, r4, r0 + adcs r11, r8, #0 + and r4, r4, r10 + adc r8, r1, #0 + ldr r1, .LCPI4_0 + and r4, r4, r11 + orr r1, r8, r1 + and r1, r4, r1 + cmn r1, #1 + beq .LBB4_2 +@ BB#1: @ %nonzero + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r9] + ldr r1, [sp, #36] @ 4-byte Reload + str r1, [r9, #4] + ldr r1, [sp, #32] @ 4-byte Reload + str r1, [r9, #8] + ldr r1, [sp, #28] @ 4-byte Reload + str r1, [r9, #12] + ldr r1, [sp, #24] @ 4-byte Reload + str r1, [r9, #16] + ldr r1, [sp, #20] @ 4-byte Reload + str r1, [r9, #20] + ldr r1, [sp, #4] @ 4-byte Reload + str r1, [r9, #24] + add r1, r9, #32 + str lr, [r9, #28] + stm r1, {r5, r6, r7} + add r1, r9, #52 + str r2, [r9, #44] + str r3, [r9, #48] + stm r1, {r0, r10, r11} + mov r1, #255 + orr r1, r1, #256 + and r1, r8, r1 + str r1, [r9, #64] + b .LBB4_3 +.LBB4_2: @ %zero + mov r0, r9 + mov r1, #0 + mov r2, #68 + bl memset(PLT) +.LBB4_3: @ %zero + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr + .align 2 +@ BB#4: +.LCPI4_0: + .long 4294966784 @ 0xfffffe00 +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre1L + .align 2 + .type mcl_fp_mulUnitPre1L,%function +mcl_fp_mulUnitPre1L: @ @mcl_fp_mulUnitPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + umull r3, r12, r1, r2 + stm r0, {r3, r12} + mov pc, lr +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre1L + .align 2 + .type mcl_fpDbl_mulPre1L,%function +mcl_fpDbl_mulPre1L: @ @mcl_fpDbl_mulPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + umull r3, r12, r2, r1 + stm r0, {r3, r12} + mov pc, lr +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre1L + .align 2 + .type mcl_fpDbl_sqrPre1L,%function +mcl_fpDbl_sqrPre1L: @ @mcl_fpDbl_sqrPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + umull r2, r3, r1, r1 + stm r0, {r2, r3} + mov pc, lr +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + .cantunwind + .fnend + + .globl mcl_fp_mont1L + .align 2 + .type mcl_fp_mont1L,%function +mcl_fp_mont1L: @ @mcl_fp_mont1L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r12, [r2] + ldr r1, [r1] + mov r6, #0 + umull lr, r2, r1, r12 + ldr r12, [r3, #-4] + ldr r3, [r3] + mul r1, lr, r12 + umull r12, r4, r1, r3 + adds r5, r12, lr + adcs r5, r4, r2 + umlal lr, r2, r1, r3 + adc r6, r6, #0 + subs r1, r2, r3 + sbc r3, r6, #0 + tst r3, #1 + movne r1, r2 + str r1, [r0] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + .cantunwind + .fnend + + .globl mcl_fp_montNF1L + .align 2 + .type mcl_fp_montNF1L,%function +mcl_fp_montNF1L: @ @mcl_fp_montNF1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldr r12, [r2] + ldr r1, [r1] + umull lr, r2, r1, r12 + ldr r12, [r3, #-4] + ldr r3, [r3] + mul r1, lr, r12 + umlal lr, r2, r1, r3 + sub r1, r2, r3 + cmp r1, #0 + movge r2, r1 + str r2, [r0] + pop {r11, lr} + mov pc, lr +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + .cantunwind + .fnend + + .globl mcl_fp_montRed1L + .align 2 + .type mcl_fp_montRed1L,%function +mcl_fp_montRed1L: @ @mcl_fp_montRed1L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r12, [r2, #-4] + ldr r3, [r1] + ldr r2, [r2] + ldr r1, [r1, #4] + mov r6, #0 + mul lr, r3, r12 + umull r12, r4, lr, r2 + adds r5, r3, r12 + adcs r5, r1, r4 + umlal r3, r1, lr, r2 + adc r6, r6, #0 + subs r2, r1, r2 + sbc r3, r6, #0 + tst r3, #1 + movne r2, r1 + str r2, [r0] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + .cantunwind + .fnend + + .globl mcl_fp_addPre1L + .align 2 + .type mcl_fp_addPre1L,%function +mcl_fp_addPre1L: @ @mcl_fp_addPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + adds r1, r2, r1 + str r1, [r0] + mov r0, #0 + adc r0, r0, #0 + mov pc, lr +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + .cantunwind + .fnend + + .globl mcl_fp_subPre1L + .align 2 + .type mcl_fp_subPre1L,%function +mcl_fp_subPre1L: @ @mcl_fp_subPre1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + subs r1, r1, r2 + str r1, [r0] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + mov pc, lr +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + .cantunwind + .fnend + + .globl mcl_fp_shr1_1L + .align 2 + .type mcl_fp_shr1_1L,%function +mcl_fp_shr1_1L: @ @mcl_fp_shr1_1L + .fnstart +@ BB#0: + ldr r1, [r1] + lsr r1, r1, #1 + str r1, [r0] + mov pc, lr +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + .cantunwind + .fnend + + .globl mcl_fp_add1L + .align 2 + .type mcl_fp_add1L,%function +mcl_fp_add1L: @ @mcl_fp_add1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + ldr r3, [r3] + adds r1, r2, r1 + mov r2, #0 + str r1, [r0] + adc r2, r2, #0 + subs r1, r1, r3 + sbc r2, r2, #0 + tst r2, #1 + streq r1, [r0] + mov pc, lr +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + .cantunwind + .fnend + + .globl mcl_fp_addNF1L + .align 2 + .type mcl_fp_addNF1L,%function +mcl_fp_addNF1L: @ @mcl_fp_addNF1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + add r1, r2, r1 + ldr r2, [r3] + sub r2, r1, r2 + cmp r2, #0 + movlt r2, r1 + str r2, [r0] + mov pc, lr +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + .cantunwind + .fnend + + .globl mcl_fp_sub1L + .align 2 + .type mcl_fp_sub1L,%function +mcl_fp_sub1L: @ @mcl_fp_sub1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + subs r1, r1, r2 + mov r2, #0 + sbc r2, r2, #0 + str r1, [r0] + tst r2, #1 + ldrne r2, [r3] + addne r1, r2, r1 + strne r1, [r0] + movne pc, lr + mov pc, lr +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + .cantunwind + .fnend + + .globl mcl_fp_subNF1L + .align 2 + .type mcl_fp_subNF1L,%function +mcl_fp_subNF1L: @ @mcl_fp_subNF1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + sub r1, r1, r2 + ldr r2, [r3] + cmp r1, #0 + addlt r1, r1, r2 + str r1, [r0] + mov pc, lr +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + .cantunwind + .fnend + + .globl mcl_fpDbl_add1L + .align 2 + .type mcl_fpDbl_add1L,%function +mcl_fpDbl_add1L: @ @mcl_fpDbl_add1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + ldr r3, [r3] + adds r1, r1, r12 + str r1, [r0] + mov r1, #0 + adcs r2, r2, lr + adc r1, r1, #0 + subs r3, r2, r3 + sbc r1, r1, #0 + tst r1, #1 + movne r3, r2 + str r3, [r0, #4] + pop {r11, lr} + mov pc, lr +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub1L + .align 2 + .type mcl_fpDbl_sub1L,%function +mcl_fpDbl_sub1L: @ @mcl_fpDbl_sub1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r2, {r12, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + ldr r3, [r3] + subs r2, r2, r12 + str r2, [r0] + mov r2, #0 + sbcs r1, r1, lr + sbc r2, r2, #0 + tst r2, #1 + addne r1, r1, r3 + str r1, [r0, #4] + pop {r11, lr} + mov pc, lr +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre2L + .align 2 + .type mcl_fp_mulUnitPre2L,%function +mcl_fp_mulUnitPre2L: @ @mcl_fp_mulUnitPre2L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r1, {r3, lr} + umull r12, r1, r3, r2 + mov r3, #0 + umlal r1, r3, lr, r2 + str r12, [r0] + stmib r0, {r1, r3} + pop {r11, lr} + mov pc, lr +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre2L + .align 2 + .type mcl_fpDbl_mulPre2L,%function +mcl_fpDbl_mulPre2L: @ @mcl_fpDbl_mulPre2L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r2, [r2, #4] + mov r5, #0 + umull r1, r4, r12, r3 + umlal r4, r5, lr, r3 + umull r3, r6, r12, r2 + str r1, [r0] + mov r1, #0 + adds r3, r3, r4 + str r3, [r0, #4] + umull r3, r4, lr, r2 + adcs r2, r3, r5 + adc r1, r1, #0 + adds r2, r2, r6 + adc r1, r1, r4 + str r2, [r0, #8] + str r1, [r0, #12] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre2L + .align 2 + .type mcl_fpDbl_sqrPre2L,%function +mcl_fpDbl_sqrPre2L: @ @mcl_fpDbl_sqrPre2L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + mov r4, #0 + mov lr, #0 + umull r12, r3, r2, r2 + umull r5, r6, r1, r2 + umlal r3, r4, r1, r2 + str r12, [r0] + adds r2, r3, r5 + umull r3, r5, r1, r1 + adcs r1, r4, r3 + str r2, [r0, #4] + adc r3, lr, #0 + adds r1, r1, r6 + adc r3, r3, r5 + str r1, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + .cantunwind + .fnend + + .globl mcl_fp_mont2L + .align 2 + .type mcl_fp_mont2L,%function +mcl_fp_mont2L: @ @mcl_fp_mont2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + mov r7, #0 + mov r5, #0 + mov r6, #0 + umull r8, r9, r2, r12 + umull r11, r4, r12, r1 + umlal r9, r7, r2, lr + umlal r4, r5, lr, r1 + ldmda r3, {r12, lr} + ldr r10, [r3, #4] + mul r1, r11, r12 + umull r3, r2, r1, lr + adds r3, r3, r11 + mov r3, #0 + umlal r2, r3, r1, r10 + adcs r1, r2, r4 + adcs r2, r3, r5 + adc r3, r6, #0 + adds r1, r1, r8 + adcs r8, r2, r9 + mul r5, r1, r12 + adcs r3, r3, r7 + umull r7, r2, r5, lr + adc r4, r6, #0 + umlal r2, r6, r5, r10 + adds r1, r7, r1 + adcs r1, r2, r8 + adcs r2, r6, r3 + adc r3, r4, #0 + subs r7, r1, lr + sbcs r6, r2, r10 + sbc r3, r3, #0 + ands r3, r3, #1 + movne r7, r1 + movne r6, r2 + str r7, [r0] + str r6, [r0, #4] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + .cantunwind + .fnend + + .globl mcl_fp_montNF2L + .align 2 + .type mcl_fp_montNF2L,%function +mcl_fp_montNF2L: @ @mcl_fp_montNF2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r2, {r12, lr} + ldr r11, [r1] + ldr r8, [r3, #-4] + ldr r7, [r3] + ldr r9, [r1, #4] + ldr r3, [r3, #4] + umull r4, r5, r11, r12 + mul r6, r4, r8 + umull r1, r10, r6, r7 + adds r1, r1, r4 + mov r4, #0 + umlal r5, r4, r9, r12 + umull r2, r12, r6, r3 + mov r1, #0 + adcs r2, r2, r5 + adc r4, r4, #0 + adds r2, r2, r10 + adc r6, r4, r12 + umull r5, r4, lr, r11 + adds r2, r5, r2 + umlal r4, r1, lr, r9 + adcs r9, r4, r6 + mul r5, r2, r8 + adc lr, r1, #0 + umull r1, r6, r5, r7 + umull r4, r12, r5, r3 + adds r1, r1, r2 + adcs r1, r4, r9 + adc r2, lr, #0 + adds r1, r1, r6 + adc r2, r2, r12 + subs r7, r1, r7 + sbc r3, r2, r3 + cmp r3, #0 + movlt r7, r1 + movlt r3, r2 + str r7, [r0] + str r3, [r0, #4] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + .cantunwind + .fnend + + .globl mcl_fp_montRed2L + .align 2 + .type mcl_fp_montRed2L,%function +mcl_fp_montRed2L: @ @mcl_fp_montRed2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldr r12, [r2, #-4] + ldm r2, {r3, lr} + ldm r1, {r2, r9, r10} + ldr r8, [r1, #12] + mov r5, #0 + mov r7, #0 + mul r6, r2, r12 + umull r1, r4, r6, r3 + umlal r4, r5, r6, lr + adds r1, r2, r1 + adcs r1, r9, r4 + adcs r9, r10, r5 + mul r6, r1, r12 + adcs r8, r8, #0 + umull r2, r4, r6, r3 + adc r5, r7, #0 + umlal r4, r7, r6, lr + adds r1, r2, r1 + adcs r1, r4, r9 + adcs r2, r7, r8 + adc r7, r5, #0 + subs r3, r1, r3 + sbcs r6, r2, lr + sbc r7, r7, #0 + ands r7, r7, #1 + movne r3, r1 + movne r6, r2 + stm r0, {r3, r6} + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + .cantunwind + .fnend + + .globl mcl_fp_addPre2L + .align 2 + .type mcl_fp_addPre2L,%function +mcl_fp_addPre2L: @ @mcl_fp_addPre2L + .fnstart +@ BB#0: + ldm r1, {r3, r12} + ldm r2, {r1, r2} + adds r1, r1, r3 + adcs r2, r2, r12 + stm r0, {r1, r2} + mov r0, #0 + adc r0, r0, #0 + mov pc, lr +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + .cantunwind + .fnend + + .globl mcl_fp_subPre2L + .align 2 + .type mcl_fp_subPre2L,%function +mcl_fp_subPre2L: @ @mcl_fp_subPre2L + .fnstart +@ BB#0: + ldm r2, {r3, r12} + ldr r2, [r1] + ldr r1, [r1, #4] + subs r2, r2, r3 + sbcs r1, r1, r12 + str r2, [r0] + str r1, [r0, #4] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + mov pc, lr +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + .cantunwind + .fnend + + .globl mcl_fp_shr1_2L + .align 2 + .type mcl_fp_shr1_2L,%function +mcl_fp_shr1_2L: @ @mcl_fp_shr1_2L + .fnstart +@ BB#0: + ldr r2, [r1] + ldr r1, [r1, #4] + lsrs r3, r1, #1 + lsr r1, r1, #1 + rrx r2, r2 + str r2, [r0] + str r1, [r0, #4] + mov pc, lr +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + .cantunwind + .fnend + + .globl mcl_fp_add2L + .align 2 + .type mcl_fp_add2L,%function +mcl_fp_add2L: @ @mcl_fp_add2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + adds r12, r1, r12 + mov r1, #0 + adcs r2, r2, lr + str r12, [r0] + str r2, [r0, #4] + adc lr, r1, #0 + ldm r3, {r1, r4} + subs r3, r12, r1 + sbcs r2, r2, r4 + sbc r1, lr, #0 + tst r1, #1 + streq r3, [r0] + streq r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + .cantunwind + .fnend + + .globl mcl_fp_addNF2L + .align 2 + .type mcl_fp_addNF2L,%function +mcl_fp_addNF2L: @ @mcl_fp_addNF2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + adds r1, r1, r12 + adc r4, r2, lr + ldm r3, {r12, lr} + subs r3, r1, r12 + sbc r2, r4, lr + cmp r2, #0 + movlt r3, r1 + movlt r2, r4 + str r3, [r0] + str r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + .cantunwind + .fnend + + .globl mcl_fp_sub2L + .align 2 + .type mcl_fp_sub2L,%function +mcl_fp_sub2L: @ @mcl_fp_sub2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r12, lr} + ldm r1, {r2, r4} + subs r1, r2, r12 + sbcs r2, r4, lr + mov r4, #0 + sbc r4, r4, #0 + stm r0, {r1, r2} + tst r4, #1 + popeq {r4, lr} + moveq pc, lr + ldr r4, [r3] + ldr r3, [r3, #4] + adds r1, r4, r1 + adc r2, r3, r2 + stm r0, {r1, r2} + pop {r4, lr} + mov pc, lr +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + .cantunwind + .fnend + + .globl mcl_fp_subNF2L + .align 2 + .type mcl_fp_subNF2L,%function +mcl_fp_subNF2L: @ @mcl_fp_subNF2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r12, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + subs r4, r2, r12 + sbc r1, r1, lr + ldm r3, {r12, lr} + adds r3, r4, r12 + adc r2, r1, lr + cmp r1, #0 + movge r3, r4 + movge r2, r1 + str r3, [r0] + str r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + .cantunwind + .fnend + + .globl mcl_fpDbl_add2L + .align 2 + .type mcl_fpDbl_add2L,%function +mcl_fpDbl_add2L: @ @mcl_fpDbl_add2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r1, {r12, lr} + ldr r4, [r1, #8] + ldr r1, [r1, #12] + ldm r2, {r5, r6, r7} + ldr r2, [r2, #12] + adds r5, r5, r12 + adcs r6, r6, lr + str r5, [r0] + adcs r7, r7, r4 + str r6, [r0, #4] + mov r6, #0 + adcs r1, r2, r1 + adc r2, r6, #0 + ldr r6, [r3] + ldr r3, [r3, #4] + subs r6, r7, r6 + sbcs r3, r1, r3 + sbc r2, r2, #0 + ands r2, r2, #1 + movne r6, r7 + movne r3, r1 + str r6, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub2L + .align 2 + .type mcl_fpDbl_sub2L,%function +mcl_fpDbl_sub2L: @ @mcl_fpDbl_sub2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldr r2, [r2, #12] + ldm r1, {r5, r6, r7} + ldr r1, [r1, #12] + subs r5, r5, r12 + sbcs r6, r6, lr + str r5, [r0] + sbcs r7, r7, r4 + str r6, [r0, #4] + mov r6, #0 + sbcs r1, r1, r2 + sbc r2, r6, #0 + ldr r6, [r3] + ldr r3, [r3, #4] + adds r6, r7, r6 + adc r3, r1, r3 + ands r2, r2, #1 + moveq r6, r7 + moveq r3, r1 + str r6, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre3L + .align 2 + .type mcl_fp_mulUnitPre3L,%function +mcl_fp_mulUnitPre3L: @ @mcl_fp_mulUnitPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldr r12, [r1] + ldmib r1, {r3, r5} + umull lr, r4, r12, r2 + umull r1, r12, r5, r2 + umull r7, r8, r3, r2 + mov r5, r1 + mov r6, r4 + str lr, [r0] + umlal r6, r5, r3, r2 + adds r2, r4, r7 + adcs r1, r8, r1 + str r6, [r0, #4] + str r5, [r0, #8] + adc r1, r12, #0 + str r1, [r0, #12] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre3L + .align 2 + .type mcl_fpDbl_mulPre3L,%function +mcl_fpDbl_mulPre3L: @ @mcl_fpDbl_mulPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r1, [r1, #8] + umull r4, r5, r12, r3 + str r4, [r0] + umull r4, r6, lr, r3 + adds r4, r5, r4 + umull r7, r4, r1, r3 + adcs r6, r6, r7 + umlal r5, r7, lr, r3 + ldr r3, [r2, #4] + ldr r2, [r2, #8] + adc r8, r4, #0 + umull r6, r10, r12, r3 + adds r9, r6, r5 + umull r6, r5, lr, r3 + adcs r6, r6, r7 + umull r7, r4, r1, r3 + str r9, [r0, #4] + adcs r3, r7, r8 + mov r8, #0 + adc r7, r8, #0 + adds r6, r6, r10 + adcs r11, r3, r5 + umull r5, r9, r1, r2 + umull r1, r10, lr, r2 + adc r4, r7, r4 + umull r7, r3, r12, r2 + adds r2, r6, r7 + adcs r1, r11, r1 + str r2, [r0, #8] + adcs r2, r4, r5 + adc r7, r8, #0 + adds r1, r1, r3 + str r1, [r0, #12] + adcs r1, r2, r10 + str r1, [r0, #16] + adc r1, r7, r9 + str r1, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre3L + .align 2 + .type mcl_fpDbl_sqrPre3L,%function +mcl_fpDbl_sqrPre3L: @ @mcl_fpDbl_sqrPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldm r1, {r2, r3, r12} + mov r10, #0 + umull r1, lr, r2, r2 + umull r7, r4, r3, r2 + str r1, [r0] + umull r1, r8, r12, r2 + mov r5, lr + mov r6, r1 + umlal r5, r6, r3, r2 + adds r2, lr, r7 + adcs r2, r4, r1 + adc r2, r8, #0 + adds lr, r5, r7 + umull r5, r9, r3, r3 + adcs r5, r6, r5 + umull r6, r7, r12, r3 + str lr, [r0, #4] + adcs r2, r2, r6 + adc r3, r10, #0 + adds r4, r5, r4 + adcs r2, r2, r9 + adc r3, r3, r7 + adds r1, r4, r1 + umull r5, r4, r12, r12 + str r1, [r0, #8] + adcs r1, r2, r6 + adcs r2, r3, r5 + adc r3, r10, #0 + adds r1, r1, r8 + str r1, [r0, #12] + adcs r1, r2, r7 + str r1, [r0, #16] + adc r1, r3, r4 + str r1, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + .cantunwind + .fnend + + .globl mcl_fp_mont3L + .align 2 + .type mcl_fp_mont3L,%function +mcl_fp_mont3L: @ @mcl_fp_mont3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + str r0, [sp, #24] @ 4-byte Spill + ldm r2, {r8, lr} + ldr r0, [r2, #8] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1] + str r0, [sp, #36] @ 4-byte Spill + ldmib r1, {r4, r9} + ldr r2, [r3, #-4] + umull r7, r6, r0, r8 + ldr r0, [r3] + ldr r1, [r3, #8] + ldr r10, [r3, #4] + str r7, [sp, #12] @ 4-byte Spill + mul r5, r7, r2 + str r2, [sp, #16] @ 4-byte Spill + str r9, [sp, #32] @ 4-byte Spill + str r0, [sp, #40] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + umull r12, r2, r5, r1 + umull r1, r3, r5, r0 + umull r0, r7, r9, r8 + umull r11, r9, r4, r8 + str r7, [sp] @ 4-byte Spill + adds r7, r6, r11 + str r1, [sp, #8] @ 4-byte Spill + mov r1, r3 + str r2, [sp, #4] @ 4-byte Spill + mov r2, r12 + adcs r7, r9, r0 + umlal r1, r2, r5, r10 + umlal r6, r0, r4, r8 + mov r8, #0 + ldr r7, [sp] @ 4-byte Reload + adc r9, r7, #0 + umull r7, r11, r5, r10 + ldr r5, [sp, #8] @ 4-byte Reload + adds r3, r3, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r3, r11, r12 + ldr r3, [sp, #4] @ 4-byte Reload + adc r3, r3, #0 + adds r7, r5, r7 + adcs r11, r1, r6 + adcs r12, r2, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r9, r3, r9 + ldr r3, [sp, #36] @ 4-byte Reload + adc r8, r8, #0 + umull r6, r7, lr, r0 + umull r5, r0, lr, r4 + umull r1, r2, lr, r3 + adds r5, r2, r5 + adcs r0, r0, r6 + umlal r2, r6, lr, r4 + adc r0, r7, #0 + adds r1, r11, r1 + ldr r11, [sp, #16] @ 4-byte Reload + adcs r2, r12, r2 + ldr r12, [sp, #28] @ 4-byte Reload + str r2, [sp, #12] @ 4-byte Spill + adcs r2, r9, r6 + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #4] @ 4-byte Spill + mov r0, #0 + mul r6, r1, r11 + adc r0, r0, #0 + umull r7, r9, r6, r12 + str r0, [sp] @ 4-byte Spill + mov r5, r7 + umull r8, r0, r6, r2 + umull lr, r2, r6, r10 + mov r3, r0 + adds r0, r0, lr + ldr lr, [sp, #36] @ 4-byte Reload + adcs r0, r2, r7 + umlal r3, r5, r6, r10 + adc r0, r9, #0 + adds r1, r8, r1 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r1, r3, r1 + ldr r3, [sp, #20] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adcs r8, r5, r1 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp] @ 4-byte Reload + umull r1, r2, r3, lr + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + umull r6, r7, r3, r0 + umull r5, r0, r3, r4 + adds r5, r2, r5 + adcs r0, r0, r6 + umlal r2, r6, r3, r4 + ldr r3, [sp, #12] @ 4-byte Reload + adc r0, r7, #0 + adds r1, r3, r1 + adcs r2, r8, r2 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #8] @ 4-byte Reload + adcs r9, r9, r6 + mul r6, r1, r11 + umull r7, r4, r6, r12 + ldr r12, [sp, #40] @ 4-byte Reload + mov r5, r7 + adcs r0, r2, r0 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + umull r11, r3, r6, r12 + adc r8, r0, #0 + umull r0, lr, r6, r10 + mov r2, r3 + adds r0, r3, r0 + ldr r3, [sp, #32] @ 4-byte Reload + umlal r2, r5, r6, r10 + adcs r0, lr, r7 + adc r0, r4, #0 + adds r1, r11, r1 + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r2, r1 + adcs r2, r5, r9 + ldr r5, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + adc r3, r8, #0 + subs r7, r1, r12 + sbcs r6, r2, r10 + sbcs r5, r0, r5 + sbc r3, r3, #0 + ands r3, r3, #1 + movne r5, r0 + ldr r0, [sp, #24] @ 4-byte Reload + movne r7, r1 + movne r6, r2 + str r7, [r0] + str r6, [r0, #4] + str r5, [r0, #8] + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + .cantunwind + .fnend + + .globl mcl_fp_montNF3L + .align 2 + .type mcl_fp_montNF3L,%function +mcl_fp_montNF3L: @ @mcl_fp_montNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + str r0, [sp, #64] @ 4-byte Spill + ldr r8, [r1] + ldmib r1, {r6, r9} + ldm r2, {r4, r7} + ldr r0, [r2, #8] + mov r10, r3 + umull r3, r1, r0, r9 + str r1, [sp, #52] @ 4-byte Spill + umull r1, r2, r0, r8 + str r3, [sp, #44] @ 4-byte Spill + str r1, [sp, #48] @ 4-byte Spill + str r2, [sp, #40] @ 4-byte Spill + mov r1, r2 + mov r2, r3 + umull r3, r5, r0, r6 + umlal r1, r2, r0, r6 + str r3, [sp, #32] @ 4-byte Spill + umull r3, r0, r7, r6 + str r5, [sp, #36] @ 4-byte Spill + str r1, [sp, #56] @ 4-byte Spill + str r2, [sp, #60] @ 4-byte Spill + umull r2, r1, r7, r9 + str r0, [sp, #8] @ 4-byte Spill + str r3, [sp, #4] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + umull r1, r11, r7, r8 + str r2, [sp, #16] @ 4-byte Spill + str r1, [sp, #24] @ 4-byte Spill + mov r1, r2 + str r11, [sp, #12] @ 4-byte Spill + umlal r11, r1, r7, r6 + umull r0, r7, r6, r4 + str r1, [sp, #20] @ 4-byte Spill + umull lr, r1, r9, r4 + umull r9, r2, r8, r4 + ldr r8, [r10, #-4] + adds r0, r2, r0 + str r1, [sp] @ 4-byte Spill + mov r1, r2 + mov r12, lr + adcs r0, r7, lr + umlal r1, r12, r6, r4 + ldr r0, [sp] @ 4-byte Reload + ldm r10, {r6, r7} + mul r2, r9, r8 + adc r3, r0, #0 + ldr r0, [r10, #8] + umull r4, lr, r2, r6 + adds r4, r4, r9 + umull r4, r9, r2, r7 + adcs r1, r4, r1 + umull r4, r5, r2, r0 + adcs r2, r4, r12 + ldr r4, [sp, #4] @ 4-byte Reload + adc r3, r3, #0 + adds r1, r1, lr + adcs r2, r2, r9 + adc r3, r3, r5 + ldr r5, [sp, #12] @ 4-byte Reload + adds r5, r5, r4 + ldr r4, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #16] @ 4-byte Reload + adcs r5, r4, r5 + ldr r4, [sp, #24] @ 4-byte Reload + ldr r5, [sp, #28] @ 4-byte Reload + adc r5, r5, #0 + adds r1, r4, r1 + ldr r4, [sp, #20] @ 4-byte Reload + adcs r2, r11, r2 + adcs r12, r4, r3 + mul r4, r1, r8 + umull r3, r9, r4, r6 + adc lr, r5, #0 + adds r1, r3, r1 + umull r1, r3, r4, r7 + adcs r1, r1, r2 + umull r2, r5, r4, r0 + adcs r2, r2, r12 + adc r4, lr, #0 + adds r1, r1, r9 + adcs r12, r2, r3 + ldr r2, [sp, #40] @ 4-byte Reload + ldr r3, [sp, #32] @ 4-byte Reload + adc r9, r4, r5 + adds r5, r2, r3 + ldr r2, [sp, #44] @ 4-byte Reload + ldr r3, [sp, #36] @ 4-byte Reload + adcs r5, r3, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r5, [sp, #60] @ 4-byte Reload + adc lr, r2, #0 + ldr r2, [sp, #48] @ 4-byte Reload + adds r1, r2, r1 + mul r4, r1, r8 + umull r10, r2, r4, r0 + umull r3, r8, r4, r7 + str r2, [sp, #52] @ 4-byte Spill + umull r2, r11, r4, r6 + ldr r4, [sp, #56] @ 4-byte Reload + adcs r4, r4, r12 + adcs r12, r5, r9 + adc r5, lr, #0 + adds r1, r2, r1 + adcs r1, r3, r4 + adcs r2, r10, r12 + adc r3, r5, #0 + ldr r5, [sp, #52] @ 4-byte Reload + adds r1, r1, r11 + adcs r2, r2, r8 + adc r3, r3, r5 + subs r6, r1, r6 + sbcs r7, r2, r7 + sbc r0, r3, r0 + asr r5, r0, #31 + cmp r5, #0 + movlt r6, r1 + ldr r1, [sp, #64] @ 4-byte Reload + movlt r7, r2 + movlt r0, r3 + stm r1, {r6, r7} + str r0, [r1, #8] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + .cantunwind + .fnend + + .globl mcl_fp_montRed3L + .align 2 + .type mcl_fp_montRed3L,%function +mcl_fp_montRed3L: @ @mcl_fp_montRed3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + ldr r5, [r2] + ldr lr, [r2, #-4] + ldr r3, [r2, #4] + ldr r2, [r2, #8] + str r0, [sp, #24] @ 4-byte Spill + str r5, [sp, #20] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldm r1, {r4, r7} + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #8] + mul r6, r4, lr + umull r10, r8, r6, r3 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #12] @ 4-byte Spill + umull r7, r9, r6, r2 + umull r11, r2, r6, r5 + mov r0, r2 + adds r2, r2, r10 + mov r12, r7 + adcs r2, r8, r7 + umlal r0, r12, r6, r3 + ldr r8, [r1, #20] + ldr r1, [r1, #16] + ldr r2, [sp, #8] @ 4-byte Reload + adc r10, r9, #0 + adds r7, r4, r11 + mov r11, lr + adcs r9, r2, r0 + ldr r2, [sp] @ 4-byte Reload + mul r7, r9, lr + umull lr, r0, r7, r2 + str r0, [sp, #8] @ 4-byte Spill + umull r4, r0, r7, r5 + ldr r5, [sp, #16] @ 4-byte Reload + mov r6, lr + str r4, [sp, #4] @ 4-byte Spill + mov r4, r0 + umlal r4, r6, r7, r3 + adcs r12, r5, r12 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r10, r5, r10 + adcs r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + adcs r1, r8, #0 + str r1, [sp, #12] @ 4-byte Spill + mov r1, #0 + adc r8, r1, #0 + umull r1, r5, r7, r3 + ldr r7, [sp, #16] @ 4-byte Reload + adds r1, r0, r1 + adcs r0, r5, lr + ldr r1, [sp, #4] @ 4-byte Reload + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + adds r1, r1, r9 + adcs r1, r4, r12 + adcs lr, r6, r10 + ldr r6, [sp, #20] @ 4-byte Reload + mul r5, r1, r11 + mov r11, r2 + adcs r0, r0, r7 + umull r4, r12, r5, r2 + umull r2, r7, r5, r3 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r10, r0, #0 + umull r9, r0, r5, r6 + adc r8, r8, #0 + adds r2, r0, r2 + mov r2, r4 + adcs r4, r7, r4 + adc r7, r12, #0 + adds r1, r9, r1 + umlal r0, r2, r5, r3 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, lr + adcs r1, r2, r1 + adcs r2, r7, r10 + adc r7, r8, #0 + subs r6, r0, r6 + sbcs r3, r1, r3 + sbcs r5, r2, r11 + sbc r7, r7, #0 + ands r7, r7, #1 + movne r6, r0 + ldr r0, [sp, #24] @ 4-byte Reload + movne r3, r1 + movne r5, r2 + str r6, [r0] + stmib r0, {r3, r5} + add sp, sp, #28 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + .cantunwind + .fnend + + .globl mcl_fp_addPre3L + .align 2 + .type mcl_fp_addPre3L,%function +mcl_fp_addPre3L: @ @mcl_fp_addPre3L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r3, r12, lr} + ldm r2, {r1, r4} + ldr r2, [r2, #8] + adds r1, r1, r3 + adcs r3, r4, r12 + adcs r2, r2, lr + stm r0, {r1, r3} + str r2, [r0, #8] + mov r0, #0 + adc r0, r0, #0 + pop {r4, lr} + mov pc, lr +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + .cantunwind + .fnend + + .globl mcl_fp_subPre3L + .align 2 + .type mcl_fp_subPre3L,%function +mcl_fp_subPre3L: @ @mcl_fp_subPre3L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r3, r12, lr} + ldm r1, {r2, r4} + ldr r1, [r1, #8] + subs r2, r2, r3 + sbcs r3, r4, r12 + sbcs r1, r1, lr + stm r0, {r2, r3} + str r1, [r0, #8] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + pop {r4, lr} + mov pc, lr +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + .cantunwind + .fnend + + .globl mcl_fp_shr1_3L + .align 2 + .type mcl_fp_shr1_3L,%function +mcl_fp_shr1_3L: @ @mcl_fp_shr1_3L + .fnstart +@ BB#0: + ldr r3, [r1, #4] + ldr r12, [r1] + ldr r1, [r1, #8] + lsrs r2, r3, #1 + lsr r3, r3, #1 + orr r3, r3, r1, lsl #31 + rrx r2, r12 + lsr r1, r1, #1 + stm r0, {r2, r3} + str r1, [r0, #8] + mov pc, lr +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + .cantunwind + .fnend + + .globl mcl_fp_add3L + .align 2 + .type mcl_fp_add3L,%function +mcl_fp_add3L: @ @mcl_fp_add3L + .fnstart +@ BB#0: + .save {r4, r5, r11, lr} + push {r4, r5, r11, lr} + ldm r1, {r12, lr} + ldr r1, [r1, #8] + ldm r2, {r4, r5} + ldr r2, [r2, #8] + adds r4, r4, r12 + adcs r5, r5, lr + adcs r1, r2, r1 + stm r0, {r4, r5} + mov r2, #0 + str r1, [r0, #8] + adc r12, r2, #0 + ldm r3, {r2, lr} + ldr r3, [r3, #8] + subs r4, r4, r2 + sbcs r5, r5, lr + sbcs r3, r1, r3 + sbc r1, r12, #0 + tst r1, #1 + stmeq r0, {r4, r5} + streq r3, [r0, #8] + pop {r4, r5, r11, lr} + mov pc, lr +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + .cantunwind + .fnend + + .globl mcl_fp_addNF3L + .align 2 + .type mcl_fp_addNF3L,%function +mcl_fp_addNF3L: @ @mcl_fp_addNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r1, {r12, lr} + ldr r1, [r1, #8] + ldm r2, {r4, r5} + ldr r2, [r2, #8] + adds r4, r4, r12 + adcs r5, r5, lr + adc r7, r2, r1 + ldm r3, {r2, r12, lr} + subs r2, r4, r2 + sbcs r3, r5, r12 + sbc r1, r7, lr + asr r6, r1, #31 + cmp r6, #0 + movlt r2, r4 + movlt r3, r5 + movlt r1, r7 + stm r0, {r2, r3} + str r1, [r0, #8] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + .cantunwind + .fnend + + .globl mcl_fp_sub3L + .align 2 + .type mcl_fp_sub3L,%function +mcl_fp_sub3L: @ @mcl_fp_sub3L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldm r1, {r2, r5, r6} + subs r1, r2, r12 + sbcs r2, r5, lr + sbcs r12, r6, r4 + mov r6, #0 + sbc r6, r6, #0 + stm r0, {r1, r2, r12} + tst r6, #1 + popeq {r4, r5, r6, lr} + moveq pc, lr + ldr r6, [r3] + ldr r5, [r3, #4] + ldr r3, [r3, #8] + adds r1, r6, r1 + adcs r2, r5, r2 + adc r3, r3, r12 + stm r0, {r1, r2, r3} + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + .cantunwind + .fnend + + .globl mcl_fp_subNF3L + .align 2 + .type mcl_fp_subNF3L,%function +mcl_fp_subNF3L: @ @mcl_fp_subNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r2, {r12, lr} + ldr r2, [r2, #8] + ldm r1, {r4, r5} + ldr r1, [r1, #8] + subs r4, r4, r12 + sbcs r7, r5, lr + sbc r1, r1, r2 + ldm r3, {r2, r12, lr} + asr r6, r1, #31 + adds r2, r4, r2 + adcs r3, r7, r12 + adc r5, r1, lr + cmp r6, #0 + movge r2, r4 + movge r3, r7 + movge r5, r1 + stm r0, {r2, r3, r5} + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + .cantunwind + .fnend + + .globl mcl_fpDbl_add3L + .align 2 + .type mcl_fpDbl_add3L,%function +mcl_fpDbl_add3L: @ @mcl_fpDbl_add3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r1, {r12, lr} + ldr r7, [r2] + ldr r11, [r1, #8] + ldr r9, [r1, #12] + ldr r10, [r1, #16] + ldr r8, [r1, #20] + ldmib r2, {r1, r5, r6} + ldr r4, [r2, #16] + ldr r2, [r2, #20] + adds r7, r7, r12 + adcs r1, r1, lr + str r7, [r0] + str r1, [r0, #4] + adcs r1, r5, r11 + ldr r5, [r3] + adcs r7, r6, r9 + str r1, [r0, #8] + mov r1, #0 + adcs r6, r4, r10 + ldr r4, [r3, #4] + ldr r3, [r3, #8] + adcs r2, r2, r8 + adc r1, r1, #0 + subs r5, r7, r5 + sbcs r4, r6, r4 + sbcs r3, r2, r3 + sbc r1, r1, #0 + ands r1, r1, #1 + movne r5, r7 + movne r4, r6 + movne r3, r2 + str r5, [r0, #12] + str r4, [r0, #16] + str r3, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub3L + .align 2 + .type mcl_fpDbl_sub3L,%function +mcl_fpDbl_sub3L: @ @mcl_fpDbl_sub3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r2, {r12, lr} + ldr r7, [r1] + ldr r11, [r2, #8] + ldr r9, [r2, #12] + ldr r10, [r2, #16] + ldr r8, [r2, #20] + ldmib r1, {r2, r5, r6} + ldr r4, [r1, #16] + ldr r1, [r1, #20] + subs r7, r7, r12 + sbcs r2, r2, lr + str r7, [r0] + str r2, [r0, #4] + sbcs r2, r5, r11 + ldr r5, [r3] + sbcs r7, r6, r9 + str r2, [r0, #8] + mov r2, #0 + sbcs r6, r4, r10 + ldr r4, [r3, #4] + ldr r3, [r3, #8] + sbcs r1, r1, r8 + sbc r2, r2, #0 + adds r5, r7, r5 + adcs r4, r6, r4 + adc r3, r1, r3 + ands r2, r2, #1 + moveq r5, r7 + moveq r4, r6 + moveq r3, r1 + str r5, [r0, #12] + str r4, [r0, #16] + str r3, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre4L + .align 2 + .type mcl_fp_mulUnitPre4L,%function +mcl_fp_mulUnitPre4L: @ @mcl_fp_mulUnitPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r1, [r1, #12] + umull r4, r6, r12, r2 + umull r7, r12, lr, r2 + str r4, [r0] + mov r5, r6 + mov r4, r7 + umlal r5, r4, r3, r2 + str r5, [r0, #4] + str r4, [r0, #8] + umull r5, lr, r1, r2 + umull r1, r4, r3, r2 + adds r1, r6, r1 + adcs r1, r4, r7 + adcs r1, r12, r5 + str r1, [r0, #12] + adc r1, lr, #0 + str r1, [r0, #16] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre4L + .align 2 + .type mcl_fpDbl_mulPre4L,%function +mcl_fpDbl_mulPre4L: @ @mcl_fpDbl_mulPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #40 + sub sp, sp, #40 + mov lr, r2 + ldr r11, [r1] + ldr r4, [lr] + ldmib r1, {r8, r12} + ldr r3, [r1, #12] + umull r2, r7, r11, r4 + umull r6, r9, r8, r4 + str r12, [sp] @ 4-byte Spill + adds r6, r7, r6 + str r2, [sp, #36] @ 4-byte Spill + mov r2, r3 + umull r6, r10, r12, r4 + adcs r5, r9, r6 + umlal r7, r6, r8, r4 + umull r5, r9, r3, r4 + ldr r3, [sp, #36] @ 4-byte Reload + ldr r4, [lr, #4] + adcs r10, r10, r5 + str r3, [r0] + adc r3, r9, #0 + str r3, [sp, #24] @ 4-byte Spill + umull r5, r3, r11, r4 + adds r7, r5, r7 + str r3, [sp, #32] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + umull r7, r3, r8, r4 + str r3, [sp, #28] @ 4-byte Spill + adcs r3, r7, r6 + umull r7, r9, r12, r4 + mov r12, r2 + ldr r6, [sp, #32] @ 4-byte Reload + adcs r7, r7, r10 + umull r5, r10, r2, r4 + ldr r2, [sp, #24] @ 4-byte Reload + mov r4, #0 + adcs r5, r5, r2 + ldr r2, [sp, #28] @ 4-byte Reload + adc r4, r4, #0 + adds r6, r3, r6 + adcs r7, r7, r2 + ldr r2, [lr, #12] + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r5, r9 + str r7, [sp, #20] @ 4-byte Spill + adc r7, r4, r10 + ldr r4, [lr, #8] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #36] @ 4-byte Reload + str r7, [r0, #4] + umull r5, r7, r11, r4 + adds r5, r5, r6 + str r7, [sp, #12] @ 4-byte Spill + str r5, [r0, #8] + ldm r1, {r11, lr} + ldr r5, [r1, #8] + ldr r1, [r1, #12] + ldr r3, [sp, #24] @ 4-byte Reload + umull r6, r7, r1, r2 + umull r10, r1, r5, r2 + str r1, [sp, #32] @ 4-byte Spill + umull r5, r1, lr, r2 + str r6, [sp, #8] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #16] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + umull r6, r1, r11, r2 + umull r2, r11, r12, r4 + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + umull lr, r12, r1, r4 + umull r9, r1, r8, r4 + ldr r4, [sp, #20] @ 4-byte Reload + mov r8, #0 + adcs r3, r9, r3 + adcs r4, lr, r4 + adcs r2, r2, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adc lr, r8, #0 + adds r3, r3, r7 + adcs r1, r4, r1 + adcs r2, r2, r12 + adc r4, lr, r11 + adds r3, r6, r3 + ldr r6, [sp, #4] @ 4-byte Reload + str r3, [r0, #12] + ldr r3, [sp, #8] @ 4-byte Reload + adcs r1, r5, r1 + adcs r2, r10, r2 + adcs r3, r3, r4 + adc r7, r8, #0 + adds r1, r1, r6 + str r1, [r0, #16] + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [r0, #20] + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r3, r1 + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r7, r1 + str r1, [r0, #28] + add sp, sp, #40 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre4L + .align 2 + .type mcl_fpDbl_sqrPre4L,%function +mcl_fpDbl_sqrPre4L: @ @mcl_fpDbl_sqrPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r2, r3, r12} + ldr r8, [r1, #12] + umull r4, r6, r2, r2 + umull r11, lr, r12, r2 + str r4, [r0] + umull r10, r4, r8, r2 + mov r7, r11 + mov r5, r6 + str lr, [sp, #12] @ 4-byte Spill + str r4, [sp, #8] @ 4-byte Spill + umull r4, r9, r3, r2 + umlal r5, r7, r3, r2 + adds r2, r6, r4 + adcs r2, r9, r11 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r10, lr, r10 + adc r2, r2, #0 + adds r4, r4, r5 + str r2, [sp] @ 4-byte Spill + umull r6, r2, r3, r3 + str r4, [sp, #8] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [sp] @ 4-byte Reload + adcs r5, r6, r7 + umull r6, r7, r12, r3 + adcs lr, r6, r10 + umull r4, r10, r8, r3 + adcs r3, r4, r2 + ldr r2, [sp, #4] @ 4-byte Reload + mov r4, #0 + adc r4, r4, #0 + adds r5, r5, r9 + adcs r9, lr, r2 + adcs r2, r3, r7 + ldr r3, [sp, #8] @ 4-byte Reload + adc r4, r4, r10 + adds r5, r11, r5 + str r2, [sp, #4] @ 4-byte Spill + umull r2, r10, r8, r12 + umull lr, r8, r12, r12 + adcs r6, r6, r9 + stmib r0, {r3, r5} + mov r5, #0 + ldr r3, [sp, #4] @ 4-byte Reload + adcs r3, lr, r3 + adcs r2, r2, r4 + ldr r4, [sp, #12] @ 4-byte Reload + adc r5, r5, #0 + adds r6, r6, r4 + adcs r11, r3, r7 + adcs lr, r2, r8 + adc r8, r5, r10 + ldr r5, [r1] + ldmib r1, {r4, r7} + ldr r1, [r1, #12] + umull r12, r2, r1, r1 + umull r3, r9, r7, r1 + umull r7, r10, r4, r1 + str r2, [sp, #12] @ 4-byte Spill + umull r4, r2, r5, r1 + adds r1, r4, r6 + adcs r4, r7, r11 + str r1, [r0, #12] + mov r7, #0 + adcs r3, r3, lr + adcs r1, r12, r8 + adc r7, r7, #0 + adds r2, r4, r2 + str r2, [r0, #16] + adcs r2, r3, r10 + adcs r1, r1, r9 + str r2, [r0, #20] + str r1, [r0, #24] + ldr r1, [sp, #12] @ 4-byte Reload + adc r1, r7, r1 + str r1, [r0, #28] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + .cantunwind + .fnend + + .globl mcl_fp_mont4L + .align 2 + .type mcl_fp_mont4L,%function +mcl_fp_mont4L: @ @mcl_fp_mont4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r2, #8] + ldr r9, [r2] + ldr r8, [r2, #4] + ldr r6, [r3, #-4] + ldr r11, [r1, #8] + ldr r10, [r1, #12] + ldr r7, [r3, #8] + ldr r5, [r3, #4] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r2, #12] + ldr r2, [r1, #4] + str r6, [sp, #44] @ 4-byte Spill + str r7, [sp, #40] @ 4-byte Spill + str r5, [sp, #52] @ 4-byte Spill + str r11, [sp, #60] @ 4-byte Spill + str r10, [sp, #56] @ 4-byte Spill + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1] + ldr r1, [r3] + str r2, [sp, #72] @ 4-byte Spill + ldr r3, [r3, #12] + umull r4, r2, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + str r1, [sp, #48] @ 4-byte Spill + mul r0, r4, r6 + str r4, [sp, #24] @ 4-byte Spill + mov r4, r5 + umull lr, r6, r0, r7 + umull r7, r12, r0, r1 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + mov r6, r12 + str lr, [sp, #8] @ 4-byte Spill + umlal r6, lr, r0, r5 + umull r5, r1, r10, r9 + str r1, [sp, #68] @ 4-byte Spill + str r5, [sp, #12] @ 4-byte Spill + umull r1, r10, r11, r9 + umull r11, r5, r7, r9 + adds r7, r2, r11 + adcs r5, r5, r1 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r11, r10, r5 + ldr r5, [sp, #68] @ 4-byte Reload + str r3, [sp, #68] @ 4-byte Spill + adc r5, r5, #0 + str r5, [sp, #12] @ 4-byte Spill + umull r5, r7, r0, r3 + umull r10, r3, r0, r4 + ldr r4, [sp, #24] @ 4-byte Reload + adds r0, r12, r10 + mov r12, #0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r3, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #20] @ 4-byte Reload + adc r3, r7, #0 + ldr r7, [sp, #72] @ 4-byte Reload + adds r4, r5, r4 + umlal r2, r1, r7, r9 + adcs r2, r6, r2 + adcs r1, lr, r1 + str r2, [sp, #24] @ 4-byte Spill + adcs r9, r0, r11 + ldr r0, [sp, #12] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + adcs r6, r3, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r3, r7 + adc r10, r12, #0 + umull r2, r12, r8, r7 + ldr r7, [sp, #64] @ 4-byte Reload + umull r5, r4, r8, r0 + ldr r0, [sp, #60] @ 4-byte Reload + umull r1, lr, r8, r0 + umull r11, r0, r8, r7 + adds r2, r0, r2 + adcs r2, r12, r1 + umlal r0, r1, r8, r3 + ldr r3, [sp, #24] @ 4-byte Reload + ldr r8, [sp, #48] @ 4-byte Reload + adcs r2, lr, r5 + adc r5, r4, #0 + adds r7, r3, r11 + ldr r3, [sp, #20] @ 4-byte Reload + ldr r11, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r9, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r6, r2 + str r0, [sp, #16] @ 4-byte Spill + adcs r0, r10, r5 + ldr r10, [sp, #44] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + mul r5, r7, r10 + umull r6, r0, r5, r11 + str r0, [sp] @ 4-byte Spill + umull r0, r3, r5, r8 + mov r4, r6 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + mov r2, r3 + umlal r2, r4, r5, r1 + umull r9, r12, r5, r0 + umull lr, r0, r5, r1 + adds r3, r3, lr + adcs r0, r0, r6 + ldr r3, [sp, #4] @ 4-byte Reload + ldr r0, [sp] @ 4-byte Reload + adcs r0, r0, r9 + adc r1, r12, #0 + adds r3, r3, r7 + ldr r12, [sp, #64] @ 4-byte Reload + ldr r3, [sp, #24] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #28] @ 4-byte Reload + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + umull r9, r7, r3, r12 + adcs r2, r4, r2 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #72] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r6, r5, r3, r0 + umull r0, r4, r3, r1 + umull r1, lr, r3, r2 + adds r1, r7, r1 + adcs r1, lr, r0 + umlal r7, r0, r3, r2 + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r4, r6 + adc r6, r5, #0 + adds r3, r2, r9 + ldr r2, [sp, #20] @ 4-byte Reload + adcs r2, r2, r7 + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r6 + mul r6, r3, r10 + str r0, [sp, #16] @ 4-byte Spill + mov r0, #0 + umull r7, r9, r6, r11 + umull r10, r4, r6, r8 + adc r0, r0, #0 + mov r2, r4 + mov r5, r7 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + umlal r2, r5, r6, r1 + umull r8, r12, r6, r0 + umull lr, r0, r6, r1 + adds r6, r4, lr + adcs r0, r0, r7 + adcs r0, r9, r8 + adc r1, r12, #0 + adds r3, r10, r3 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #32] @ 4-byte Reload + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adcs r8, r5, r2 + ldr r2, [sp, #20] @ 4-byte Reload + ldr r5, [sp, #64] @ 4-byte Reload + adcs r9, r0, r2 + ldr r0, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #72] @ 4-byte Reload + umull lr, r7, r3, r5 + ldr r5, [sp, #52] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #12] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r6, r10, r3, r0 + umull r0, r4, r3, r1 + umull r1, r12, r3, r2 + adds r1, r7, r1 + adcs r1, r12, r0 + umlal r7, r0, r3, r2 + ldr r2, [sp, #28] @ 4-byte Reload + ldr r12, [sp, #68] @ 4-byte Reload + adcs r1, r4, r6 + ldr r4, [sp, #40] @ 4-byte Reload + adc r6, r10, #0 + adds lr, r2, lr + ldr r2, [sp, #48] @ 4-byte Reload + adcs r10, r8, r7 + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + adcs r0, r11, r1 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r8, r0, r6 + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + mul r6, lr, r0 + umull r1, r3, r6, r5 + umull r11, r7, r6, r2 + umull r0, r9, r6, r4 + adds r1, r7, r1 + adcs r1, r3, r0 + umlal r7, r0, r6, r5 + umull r1, r3, r6, r12 + adcs r1, r9, r1 + mov r9, r5 + adc r5, r3, #0 + adds r3, r11, lr + adcs r3, r7, r10 + ldr r7, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #64] @ 4-byte Reload + adcs r1, r1, r7 + adcs lr, r5, r8 + ldr r5, [sp, #60] @ 4-byte Reload + adc r8, r5, #0 + subs r6, r3, r2 + sbcs r5, r0, r9 + sbcs r4, r1, r4 + sbcs r7, lr, r12 + sbc r2, r8, #0 + ands r2, r2, #1 + movne r5, r0 + ldr r0, [sp, #36] @ 4-byte Reload + movne r6, r3 + movne r4, r1 + cmp r2, #0 + movne r7, lr + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + str r7, [r0, #12] + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + .cantunwind + .fnend + + .globl mcl_fp_montNF4L + .align 2 + .type mcl_fp_montNF4L,%function +mcl_fp_montNF4L: @ @mcl_fp_montNF4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #140 + sub sp, sp, #140 + mov r10, r3 + str r0, [sp, #132] @ 4-byte Spill + ldr lr, [r1] + ldmib r1, {r4, r8, r12} + ldr r3, [r2] + ldr r1, [r2, #4] + ldr r0, [r2, #8] + ldr r2, [r2, #12] + umull r6, r5, r2, r8 + str r5, [sp, #124] @ 4-byte Spill + umull r5, r7, r2, lr + str r6, [sp, #112] @ 4-byte Spill + str r5, [sp, #128] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + str r7, [sp, #108] @ 4-byte Spill + umlal r6, r5, r2, r4 + str r5, [sp, #120] @ 4-byte Spill + umull r7, r5, r0, r8 + str r6, [sp, #116] @ 4-byte Spill + str r5, [sp, #84] @ 4-byte Spill + umull r5, r6, r0, lr + str r7, [sp, #72] @ 4-byte Spill + str r5, [sp, #88] @ 4-byte Spill + str r6, [sp, #68] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + umlal r5, r6, r0, r4 + str r5, [sp, #76] @ 4-byte Spill + str r6, [sp, #80] @ 4-byte Spill + umull r6, r5, r1, r8 + str r5, [sp, #44] @ 4-byte Spill + umull r5, r7, r1, lr + str r6, [sp, #32] @ 4-byte Spill + str r5, [sp, #48] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + str r7, [sp, #28] @ 4-byte Spill + umlal r6, r5, r1, r4 + str r5, [sp, #40] @ 4-byte Spill + umull r9, r5, r8, r3 + str r6, [sp, #36] @ 4-byte Spill + str r5, [sp, #136] @ 4-byte Spill + umull r6, r5, lr, r3 + mov r8, r9 + str r6, [sp, #4] @ 4-byte Spill + umull r11, r6, r2, r12 + mov lr, r5 + str r6, [sp, #104] @ 4-byte Spill + umull r7, r6, r2, r4 + umlal lr, r8, r4, r3 + str r11, [sp, #100] @ 4-byte Spill + str r6, [sp, #96] @ 4-byte Spill + umull r6, r2, r0, r12 + str r7, [sp, #92] @ 4-byte Spill + str r6, [sp, #60] @ 4-byte Spill + str r2, [sp, #64] @ 4-byte Spill + umull r6, r2, r0, r4 + str r2, [sp, #56] @ 4-byte Spill + umull r2, r0, r1, r12 + str r6, [sp, #52] @ 4-byte Spill + str r2, [sp, #20] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + umull r2, r0, r1, r4 + str r2, [sp, #12] @ 4-byte Spill + umull r2, r6, r4, r3 + str r0, [sp, #16] @ 4-byte Spill + umull r0, r1, r12, r3 + ldr r4, [r10, #4] + adds r2, r5, r2 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r2, r6, r9 + ldr r9, [r10, #8] + ldr r2, [sp, #136] @ 4-byte Reload + str r4, [sp, #136] @ 4-byte Spill + adcs r12, r2, r0 + ldr r2, [r10, #-4] + adc r0, r1, #0 + str r0, [sp] @ 4-byte Spill + ldr r0, [r10] + mul r1, r5, r2 + mov r7, r2 + umull r3, r11, r1, r0 + str r0, [sp, #8] @ 4-byte Spill + mov r6, r0 + umull r2, r0, r1, r9 + adds r3, r3, r5 + umull r3, r5, r1, r4 + adcs r3, r3, lr + ldr lr, [r10, #12] + adcs r2, r2, r8 + umull r4, r8, r1, lr + adcs r1, r4, r12 + ldr r4, [sp] @ 4-byte Reload + adc r4, r4, #0 + adds r3, r3, r11 + adcs r2, r2, r5 + adcs r12, r1, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adc r1, r4, r8 + ldr r4, [sp, #12] @ 4-byte Reload + adds r4, r0, r4 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r4, [sp, #16] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #24] @ 4-byte Reload + adc r5, r0, #0 + ldr r0, [sp, #48] @ 4-byte Reload + adds r3, r0, r3 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r2, r0, r2 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r12 + mov r12, r7 + adcs r8, r4, r1 + ldr r1, [sp, #136] @ 4-byte Reload + adc r10, r5, #0 + mul r5, r3, r7 + umull r7, r11, r5, r6 + adds r3, r7, r3 + umull r3, r7, r5, r1 + adcs r2, r3, r2 + umull r3, r4, r5, r9 + adcs r0, r3, r0 + umull r3, r6, r5, lr + adcs r3, r3, r8 + ldr r8, [sp, #8] @ 4-byte Reload + adc r5, r10, #0 + adds r2, r2, r11 + adcs r0, r0, r7 + adcs r3, r3, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adc r7, r5, r6 + ldr r5, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #88] @ 4-byte Reload + adds r4, r4, r5 + ldr r5, [sp, #56] @ 4-byte Reload + ldr r4, [sp, #72] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #60] @ 4-byte Reload + ldr r4, [sp, #84] @ 4-byte Reload + adcs r4, r4, r5 + ldr r5, [sp, #64] @ 4-byte Reload + adc r5, r5, #0 + adds r2, r6, r2 + ldr r6, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r3, r6, r3 + adcs r6, r4, r7 + adc r10, r5, #0 + mul r5, r2, r12 + umull r7, r11, r5, r8 + adds r2, r7, r2 + umull r2, r7, r5, r1 + adcs r0, r2, r0 + umull r2, r4, r5, r9 + adcs r2, r2, r3 + umull r3, r1, r5, lr + adcs r3, r3, r6 + ldr r6, [sp, #128] @ 4-byte Reload + adc r5, r10, #0 + adds r0, r0, r11 + adcs r2, r2, r7 + adcs r3, r3, r4 + ldr r4, [sp, #108] @ 4-byte Reload + adc r1, r5, r1 + ldr r5, [sp, #92] @ 4-byte Reload + adds r4, r4, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r4, [sp, #112] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #100] @ 4-byte Reload + ldr r4, [sp, #124] @ 4-byte Reload + adcs r4, r4, r5 + ldr r5, [sp, #104] @ 4-byte Reload + adc r5, r5, #0 + adds r0, r6, r0 + ldr r6, [sp, #116] @ 4-byte Reload + adcs r2, r6, r2 + ldr r6, [sp, #120] @ 4-byte Reload + adcs r3, r6, r3 + adcs r11, r4, r1 + adc r10, r5, #0 + mul r5, r0, r12 + umull r7, r1, r5, r8 + adds r0, r7, r0 + ldr r7, [sp, #136] @ 4-byte Reload + umull r0, r12, r5, r9 + umull r6, r4, r5, r7 + adcs r2, r6, r2 + adcs r0, r0, r3 + umull r3, r6, r5, lr + adcs r3, r3, r11 + adc r5, r10, #0 + adds r1, r2, r1 + adcs r0, r0, r4 + adcs r2, r3, r12 + adc r3, r5, r6 + subs r4, r1, r8 + sbcs r7, r0, r7 + sbcs r6, r2, r9 + sbc r5, r3, lr + cmp r5, #0 + movlt r7, r0 + ldr r0, [sp, #132] @ 4-byte Reload + movlt r4, r1 + movlt r6, r2 + cmp r5, #0 + movlt r5, r3 + stm r0, {r4, r7} + str r6, [r0, #8] + str r5, [r0, #12] + add sp, sp, #140 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end54: + .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L + .cantunwind + .fnend + + .globl mcl_fp_montRed4L + .align 2 + .type mcl_fp_montRed4L,%function +mcl_fp_montRed4L: @ @mcl_fp_montRed4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + ldr r7, [r1, #4] + ldr r6, [r2, #-4] + ldr r10, [r1] + ldr r3, [r2, #8] + ldr r8, [r2] + ldr r12, [r2, #4] + ldr r2, [r2, #12] + str r0, [sp, #52] @ 4-byte Spill + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #8] + str r6, [sp, #56] @ 4-byte Spill + str r3, [sp, #40] @ 4-byte Spill + str r2, [sp, #36] @ 4-byte Spill + str r8, [sp, #32] @ 4-byte Spill + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #44] @ 4-byte Spill + mul r7, r10, r6 + umull r6, r5, r7, r3 + str r5, [sp, #20] @ 4-byte Spill + mov r5, r3 + umull r4, r3, r7, r8 + mov lr, r6 + str r4, [sp, #24] @ 4-byte Spill + umull r9, r4, r7, r2 + umull r11, r2, r7, r12 + mov r0, r3 + adds r3, r3, r11 + umlal r0, lr, r7, r12 + adcs r2, r2, r6 + ldr r6, [sp, #56] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + adcs r2, r2, r9 + str r2, [sp, #20] @ 4-byte Spill + adc r2, r4, #0 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adds r4, r10, r2 + ldr r2, [sp, #28] @ 4-byte Reload + add r10, r1, #16 + adcs r11, r2, r0 + mul r4, r11, r6 + umull r9, r0, r4, r5 + str r0, [sp, #24] @ 4-byte Spill + umull r0, r2, r4, r8 + mov r5, r9 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #28] + mov r7, r2 + umlal r7, r5, r4, r12 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r1, r8, r10} + ldr r3, [sp, #48] @ 4-byte Reload + adcs r0, r3, lr + ldr r3, [sp, #44] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r3, r3, r0 + ldr r0, [sp, #16] @ 4-byte Reload + str r3, [sp, #48] @ 4-byte Spill + adcs r1, r1, r0 + adcs r0, r8, #0 + str r1, [sp, #44] @ 4-byte Spill + ldr r8, [sp, #32] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r10, #0 + ldr r10, [sp, #36] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + umull r1, lr, r4, r10 + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + umull r3, r0, r4, r12 + adds r3, r2, r3 + ldr r2, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #40] @ 4-byte Reload + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adc r1, lr, #0 + adds r2, r2, r11 + adcs r11, r7, r0 + mul r3, r11, r6 + umull r2, r0, r3, r9 + str r0, [sp, #24] @ 4-byte Spill + umull r0, r6, r3, r8 + mov r7, r2 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mov r4, r6 + umlal r4, r7, r3, r12 + adcs r0, r5, r0 + ldr r5, [sp] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r1, r0 + umull r1, r5, r3, r10 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + umull lr, r0, r3, r12 + adds r3, r6, lr + mov lr, r8 + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + ldr r3, [sp, #44] @ 4-byte Reload + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + adc r1, r5, #0 + adds r2, r2, r11 + ldr r2, [sp, #48] @ 4-byte Reload + adcs r2, r4, r2 + adcs r3, r7, r3 + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [sp, #20] @ 4-byte Reload + adcs r0, r0, r3 + mov r3, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + mul r5, r2, r0 + umull r4, r0, r5, r12 + umull r8, r6, r5, lr + adds r4, r6, r4 + umull r1, r4, r5, r3 + adcs r0, r0, r1 + umlal r6, r1, r5, r12 + umull r0, r7, r5, r10 + adcs r0, r4, r0 + ldr r4, [sp, #44] @ 4-byte Reload + adc r5, r7, #0 + adds r2, r8, r2 + ldr r2, [sp, #48] @ 4-byte Reload + adcs r2, r6, r2 + adcs r1, r1, r4 + ldr r4, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + adcs r9, r5, r4 + ldr r4, [sp, #24] @ 4-byte Reload + adc r8, r4, #0 + subs r6, r2, lr + sbcs r5, r1, r12 + sbcs r4, r0, r3 + sbcs r7, r9, r10 + sbc r3, r8, #0 + ands r3, r3, #1 + movne r4, r0 + ldr r0, [sp, #52] @ 4-byte Reload + movne r6, r2 + movne r5, r1 + cmp r3, #0 + movne r7, r9 + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + str r7, [r0, #12] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end55: + .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L + .cantunwind + .fnend + + .globl mcl_fp_addPre4L + .align 2 + .type mcl_fp_addPre4L,%function +mcl_fp_addPre4L: @ @mcl_fp_addPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldm r1, {r3, r12, lr} + ldr r1, [r1, #12] + ldm r2, {r4, r5, r6} + ldr r2, [r2, #12] + adds r3, r4, r3 + adcs r5, r5, r12 + adcs r6, r6, lr + adcs r1, r2, r1 + stm r0, {r3, r5, r6} + str r1, [r0, #12] + mov r0, #0 + adc r0, r0, #0 + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end56: + .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L + .cantunwind + .fnend + + .globl mcl_fp_subPre4L + .align 2 + .type mcl_fp_subPre4L,%function +mcl_fp_subPre4L: @ @mcl_fp_subPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldm r2, {r3, r12, lr} + ldr r2, [r2, #12] + ldm r1, {r4, r5, r6} + ldr r1, [r1, #12] + subs r3, r4, r3 + sbcs r5, r5, r12 + sbcs r6, r6, lr + sbcs r1, r1, r2 + stm r0, {r3, r5, r6} + str r1, [r0, #12] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end57: + .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L + .cantunwind + .fnend + + .globl mcl_fp_shr1_4L + .align 2 + .type mcl_fp_shr1_4L,%function +mcl_fp_shr1_4L: @ @mcl_fp_shr1_4L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldr r3, [r1, #4] + ldr r12, [r1] + ldr lr, [r1, #12] + ldr r2, [r1, #8] + lsrs r1, r3, #1 + lsr r3, r3, #1 + rrx r12, r12 + lsrs r1, lr, #1 + orr r3, r3, r2, lsl #31 + rrx r1, r2 + lsr r2, lr, #1 + str r12, [r0] + str r3, [r0, #4] + str r1, [r0, #8] + str r2, [r0, #12] + pop {r11, lr} + mov pc, lr +.Lfunc_end58: + .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L + .cantunwind + .fnend + + .globl mcl_fp_add4L + .align 2 + .type mcl_fp_add4L,%function +mcl_fp_add4L: @ @mcl_fp_add4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r1, {r12, lr} + ldr r4, [r1, #8] + ldr r1, [r1, #12] + ldm r2, {r5, r6, r7} + ldr r2, [r2, #12] + adds r5, r5, r12 + adcs r6, r6, lr + adcs r7, r7, r4 + stm r0, {r5, r6, r7} + adcs r4, r2, r1 + mov r1, #0 + ldr r2, [r3] + adc lr, r1, #0 + str r4, [r0, #12] + ldmib r3, {r1, r12} + ldr r3, [r3, #12] + subs r5, r5, r2 + sbcs r2, r6, r1 + sbcs r1, r7, r12 + sbcs r12, r4, r3 + sbc r3, lr, #0 + tst r3, #1 + streq r5, [r0] + streq r2, [r0, #4] + streq r1, [r0, #8] + streq r12, [r0, #12] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end59: + .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L + .cantunwind + .fnend + + .globl mcl_fp_addNF4L + .align 2 + .type mcl_fp_addNF4L,%function +mcl_fp_addNF4L: @ @mcl_fp_addNF4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldm r1, {r12, lr} + ldr r4, [r1, #8] + ldr r1, [r1, #12] + ldm r2, {r5, r6, r7} + ldr r2, [r2, #12] + adds r5, r5, r12 + adcs r6, r6, lr + adcs r7, r7, r4 + adc r8, r2, r1 + ldm r3, {r2, r4, r12, lr} + subs r2, r5, r2 + sbcs r4, r6, r4 + sbcs r3, r7, r12 + sbc r1, r8, lr + cmp r1, #0 + movlt r2, r5 + movlt r4, r6 + movlt r3, r7 + cmp r1, #0 + movlt r1, r8 + stm r0, {r2, r4} + str r3, [r0, #8] + str r1, [r0, #12] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end60: + .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L + .cantunwind + .fnend + + .globl mcl_fp_sub4L + .align 2 + .type mcl_fp_sub4L,%function +mcl_fp_sub4L: @ @mcl_fp_sub4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldr r5, [r2, #12] + ldm r1, {r2, r6, r7} + ldr r1, [r1, #12] + subs r8, r2, r12 + sbcs r2, r6, lr + str r8, [r0] + sbcs r12, r7, r4 + sbcs lr, r1, r5 + mov r1, #0 + sbc r1, r1, #0 + stmib r0, {r2, r12, lr} + tst r1, #1 + popeq {r4, r5, r6, r7, r8, lr} + moveq pc, lr + ldm r3, {r1, r4, r5} + ldr r3, [r3, #12] + adds r1, r1, r8 + adcs r2, r4, r2 + adcs r7, r5, r12 + adc r3, r3, lr + stm r0, {r1, r2, r7} + str r3, [r0, #12] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end61: + .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L + .cantunwind + .fnend + + .globl mcl_fp_subNF4L + .align 2 + .type mcl_fp_subNF4L,%function +mcl_fp_subNF4L: @ @mcl_fp_subNF4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldr r2, [r2, #12] + ldm r1, {r5, r6, r7} + ldr r1, [r1, #12] + subs r5, r5, r12 + sbcs r6, r6, lr + sbcs r8, r7, r4 + sbc r1, r1, r2 + ldm r3, {r2, r4, r12, lr} + adds r2, r5, r2 + adcs r4, r6, r4 + adcs r3, r8, r12 + adc r7, r1, lr + cmp r1, #0 + movge r2, r5 + movge r4, r6 + movge r3, r8 + cmp r1, #0 + movge r7, r1 + stm r0, {r2, r4} + str r3, [r0, #8] + str r7, [r0, #12] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end62: + .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L + .cantunwind + .fnend + + .globl mcl_fpDbl_add4L + .align 2 + .type mcl_fpDbl_add4L,%function +mcl_fpDbl_add4L: @ @mcl_fpDbl_add4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r8, r9, r10, r11} + ldr r7, [r1, #16] + str r7, [sp] @ 4-byte Spill + ldr r7, [r1, #20] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r1, #24] + ldr r1, [r1, #28] + str r7, [sp, #8] @ 4-byte Spill + str r1, [sp, #12] @ 4-byte Spill + ldm r2, {r1, r6, r7, r12, lr} + ldr r4, [r2, #20] + ldr r5, [r2, #24] + ldr r2, [r2, #28] + adds r1, r1, r8 + adcs r6, r6, r9 + adcs r7, r7, r10 + adcs r12, r12, r11 + stm r0, {r1, r6, r7, r12} + mov r1, #0 + ldr r7, [sp] @ 4-byte Reload + ldr r6, [sp, #4] @ 4-byte Reload + adcs r7, lr, r7 + adcs r6, r4, r6 + ldr r4, [sp, #8] @ 4-byte Reload + adcs r8, r5, r4 + ldr r5, [sp, #12] @ 4-byte Reload + ldr r4, [r3] + adcs lr, r2, r5 + adc r12, r1, #0 + ldmib r3, {r1, r2, r3} + subs r4, r7, r4 + sbcs r1, r6, r1 + sbcs r2, r8, r2 + sbcs r3, lr, r3 + sbc r5, r12, #0 + ands r5, r5, #1 + movne r4, r7 + movne r1, r6 + movne r2, r8 + cmp r5, #0 + movne r3, lr + str r4, [r0, #16] + str r1, [r0, #20] + str r2, [r0, #24] + str r3, [r0, #28] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end63: + .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub4L + .align 2 + .type mcl_fpDbl_sub4L,%function +mcl_fpDbl_sub4L: @ @mcl_fpDbl_sub4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r2, {r8, r9, r10, r11} + ldr r7, [r2, #16] + str r7, [sp] @ 4-byte Spill + ldr r7, [r2, #20] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r2, #28] + str r7, [sp, #8] @ 4-byte Spill + str r2, [sp, #12] @ 4-byte Spill + ldm r1, {r2, r6, r7, r12, lr} + ldr r4, [r1, #20] + ldr r5, [r1, #24] + ldr r1, [r1, #28] + subs r2, r2, r8 + str r2, [r0] + sbcs r2, r6, r9 + ldr r6, [sp, #4] @ 4-byte Reload + str r2, [r0, #4] + sbcs r2, r7, r10 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #8] + sbcs r2, r12, r11 + str r2, [r0, #12] + mov r2, #0 + sbcs r7, lr, r7 + sbcs r6, r4, r6 + ldr r4, [sp, #8] @ 4-byte Reload + sbcs r5, r5, r4 + ldr r4, [sp, #12] @ 4-byte Reload + sbcs lr, r1, r4 + ldr r4, [r3] + ldr r1, [r3, #8] + sbc r12, r2, #0 + ldr r2, [r3, #4] + ldr r3, [r3, #12] + adds r4, r7, r4 + adcs r2, r6, r2 + adcs r1, r5, r1 + adc r3, lr, r3 + ands r12, r12, #1 + moveq r4, r7 + moveq r2, r6 + moveq r1, r5 + cmp r12, #0 + moveq r3, lr + str r4, [r0, #16] + str r2, [r0, #20] + str r1, [r0, #24] + str r3, [r0, #28] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end64: + .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre5L + .align 2 + .type mcl_fp_mulUnitPre5L,%function +mcl_fp_mulUnitPre5L: @ @mcl_fp_mulUnitPre5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r10, [r1, #12] + ldr r8, [r1, #16] + umull r4, r9, lr, r2 + umull r1, r6, r12, r2 + mov r7, r6 + mov r5, r4 + umlal r7, r5, r3, r2 + stm r0, {r1, r7} + str r5, [r0, #8] + umull r5, r7, r3, r2 + umull r1, r12, r10, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r1, r9, r1 + str r1, [r0, #12] + umull r1, r3, r8, r2 + adcs r1, r12, r1 + str r1, [r0, #16] + adc r1, r3, #0 + str r1, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end65: + .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre5L + .align 2 + .type mcl_fpDbl_mulPre5L,%function +mcl_fpDbl_mulPre5L: @ @mcl_fpDbl_mulPre5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #36 + sub sp, sp, #36 + str r2, [sp, #32] @ 4-byte Spill + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r9, [r1, #8] + ldr r10, [r1, #12] + umull r5, r4, r12, r3 + umull r6, r7, lr, r3 + adds r6, r4, r6 + str r5, [sp, #24] @ 4-byte Spill + umull r5, r6, r9, r3 + adcs r7, r7, r5 + umlal r4, r5, lr, r3 + umull r7, r11, r10, r3 + adcs r6, r6, r7 + ldr r7, [r1, #16] + str r6, [sp, #28] @ 4-byte Spill + umull r6, r8, r7, r3 + ldr r3, [sp, #24] @ 4-byte Reload + adcs r11, r11, r6 + ldr r6, [r2, #4] + str r3, [r0] + umull r3, r2, r12, r6 + adc r12, r8, #0 + adds r8, r3, r4 + str r2, [sp, #24] @ 4-byte Spill + umull r3, r2, lr, r6 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adcs r5, r3, r5 + umull r3, lr, r10, r6 + umull r4, r10, r9, r6 + str r8, [r0, #4] + adcs r4, r4, r2 + umull r2, r9, r7, r6 + adcs r3, r3, r11 + adcs r7, r2, r12 + mov r2, #0 + adc r6, r2, #0 + ldr r2, [sp, #24] @ 4-byte Reload + adds r5, r5, r2 + ldr r2, [sp, #20] @ 4-byte Reload + adcs r11, r4, r2 + adcs r2, r3, r10 + ldr r3, [sp, #32] @ 4-byte Reload + str r2, [sp, #16] @ 4-byte Spill + adcs r2, r7, lr + ldr r7, [r1] + str r2, [sp, #8] @ 4-byte Spill + adc r2, r6, r9 + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r3, #8] + str r7, [sp, #28] @ 4-byte Spill + ldmib r1, {r8, lr} + ldr r6, [r1, #12] + umull r12, r4, r7, r2 + adds r7, r12, r5 + str r4, [sp, #12] @ 4-byte Spill + ldr r12, [r1, #16] + str r7, [sp, #20] @ 4-byte Spill + umull r5, r7, r8, r2 + str r7, [sp, #4] @ 4-byte Spill + adcs r10, r5, r11 + umull r5, r7, lr, r2 + str r7, [sp] @ 4-byte Spill + ldr r7, [sp, #16] @ 4-byte Reload + adcs r9, r5, r7 + umull r4, r7, r6, r2 + mov r5, #0 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #8] @ 4-byte Reload + adcs r4, r4, r7 + umull r11, r7, r12, r2 + ldr r2, [sp, #24] @ 4-byte Reload + adcs r2, r11, r2 + adc r11, r5, #0 + ldr r5, [sp, #12] @ 4-byte Reload + adds r5, r10, r5 + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [sp, #4] @ 4-byte Reload + adcs r5, r9, r5 + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [sp] @ 4-byte Reload + adcs r4, r4, r5 + ldr r5, [sp, #16] @ 4-byte Reload + adcs r10, r2, r5 + adc r2, r11, r7 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + str r2, [r0, #8] + ldr r2, [r3, #12] + umull r11, r3, r6, r2 + str r3, [sp, #20] @ 4-byte Spill + umull r6, r3, lr, r2 + umull lr, r9, r8, r2 + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [sp, #28] @ 4-byte Reload + umull r7, r8, r3, r2 + ldr r3, [sp, #12] @ 4-byte Reload + adds r3, r7, r3 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [sp, #8] @ 4-byte Reload + adcs r5, lr, r3 + mov r3, #0 + adcs r6, r6, r4 + umull r4, lr, r12, r2 + ldr r2, [sp, #16] @ 4-byte Reload + adcs r7, r11, r10 + adcs r2, r4, r2 + adc r3, r3, #0 + adds r10, r5, r8 + adcs r11, r6, r9 + ldr r6, [sp, #24] @ 4-byte Reload + adcs r7, r7, r6 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [sp, #20] @ 4-byte Reload + adcs r2, r2, r7 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + str r2, [r0, #12] + adc r2, r3, lr + ldr r3, [r1] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + ldr r4, [r2, #16] + ldmib r1, {r2, r5, r6} + ldr r1, [r1, #16] + umull lr, r9, r6, r4 + umull r6, r8, r5, r4 + umull r5, r7, r2, r4 + umull r2, r12, r3, r4 + adds r10, r2, r10 + ldr r2, [sp, #24] @ 4-byte Reload + adcs r3, r5, r11 + str r10, [r0, #16] + adcs r5, r6, r2 + ldr r2, [sp, #20] @ 4-byte Reload + adcs r6, lr, r2 + umull r2, lr, r1, r4 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r2, r1 + mov r2, #0 + adc r2, r2, #0 + adds r3, r3, r12 + adcs r7, r5, r7 + str r3, [r0, #20] + adcs r6, r6, r8 + str r7, [r0, #24] + adcs r1, r1, r9 + str r6, [r0, #28] + adc r2, r2, lr + str r1, [r0, #32] + str r2, [r0, #36] + add sp, sp, #36 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end66: + .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre5L + .align 2 + .type mcl_fpDbl_sqrPre5L,%function +mcl_fpDbl_sqrPre5L: @ @mcl_fpDbl_sqrPre5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #32 + sub sp, sp, #32 + ldm r1, {r2, r3, r12} + ldr lr, [r1, #16] + ldr r9, [r1, #12] + umull r5, r6, r2, r2 + umull r7, r11, r3, r2 + str r5, [r0] + umull r5, r4, lr, r2 + adds r8, r6, r7 + str r5, [sp, #24] @ 4-byte Spill + umull r5, r10, r12, r2 + str r4, [sp, #28] @ 4-byte Spill + adcs r4, r11, r5 + umlal r6, r5, r3, r2 + umull r4, r8, r9, r2 + adcs r10, r10, r4 + ldr r4, [sp, #24] @ 4-byte Reload + adcs r8, r8, r4 + ldr r4, [sp, #28] @ 4-byte Reload + adc r4, r4, #0 + str r4, [sp, #24] @ 4-byte Spill + umull r2, r4, r3, r3 + str r4, [sp, #28] @ 4-byte Spill + adds r4, r7, r6 + str r4, [sp, #16] @ 4-byte Spill + adcs r5, r2, r5 + umull r2, r4, r12, r3 + str r4, [sp, #12] @ 4-byte Spill + adcs r4, r2, r10 + umull r2, r6, r9, r3 + adcs r2, r2, r8 + umull r7, r8, lr, r3 + ldr r3, [sp, #24] @ 4-byte Reload + adcs r7, r7, r3 + mov r3, #0 + adc r3, r3, #0 + adds r5, r5, r11 + str r5, [sp, #24] @ 4-byte Spill + ldr r5, [sp, #28] @ 4-byte Reload + adcs r4, r4, r5 + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [sp, #16] @ 4-byte Reload + str r4, [r0, #4] + ldr r4, [sp, #12] @ 4-byte Reload + adcs r2, r2, r4 + str r2, [sp, #12] @ 4-byte Spill + adcs r2, r7, r6 + str r2, [sp, #8] @ 4-byte Spill + adc r2, r3, r8 + str r2, [sp, #4] @ 4-byte Spill + umull r11, r2, lr, r12 + umull lr, r10, r12, r12 + str r2, [sp, #28] @ 4-byte Spill + ldm r1, {r4, r6} + ldr r2, [r1, #12] + ldr r7, [sp, #24] @ 4-byte Reload + umull r8, r3, r2, r12 + str r3, [sp, #16] @ 4-byte Spill + umull r5, r3, r6, r12 + str r3, [sp] @ 4-byte Spill + umull r3, r9, r4, r12 + adds r3, r3, r7 + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [sp, #20] @ 4-byte Reload + adcs r5, r5, r3 + ldr r3, [sp, #12] @ 4-byte Reload + adcs r12, lr, r3 + ldr r3, [sp, #8] @ 4-byte Reload + adcs r7, r8, r3 + ldr r3, [sp, #4] @ 4-byte Reload + adcs lr, r11, r3 + mov r3, #0 + adc r11, r3, #0 + ldr r3, [sp] @ 4-byte Reload + adds r5, r5, r9 + adcs r12, r12, r3 + ldr r3, [sp, #16] @ 4-byte Reload + adcs r9, r7, r10 + ldr r7, [sp, #28] @ 4-byte Reload + adcs r8, lr, r3 + adc r11, r11, r7 + umull r7, r3, r4, r2 + adds r7, r7, r5 + str r3, [sp, #20] @ 4-byte Spill + umull r5, r3, r6, r2 + ldr r6, [r1, #8] + str r3, [sp, #16] @ 4-byte Spill + adcs r10, r5, r12 + ldr r3, [sp, #24] @ 4-byte Reload + ldr r5, [r1, #16] + str r7, [sp, #28] @ 4-byte Spill + umull r4, lr, r6, r2 + adcs r12, r4, r9 + ldr r4, [sp, #20] @ 4-byte Reload + umull r7, r9, r2, r2 + str r3, [r0, #8] + adcs r7, r7, r8 + umull r3, r8, r5, r2 + adcs r2, r3, r11 + mov r3, #0 + adc r3, r3, #0 + adds r11, r10, r4 + ldr r4, [sp, #16] @ 4-byte Reload + adcs r4, r12, r4 + adcs r10, r7, lr + adcs r12, r2, r9 + ldr r2, [sp, #28] @ 4-byte Reload + adc r8, r3, r8 + ldr r3, [r1] + str r2, [r0, #12] + ldr r2, [r1, #4] + ldr r1, [r1, #12] + umull r7, r9, r3, r5 + adds lr, r7, r11 + str lr, [r0, #16] + umull r7, r11, r2, r5 + adcs r2, r7, r4 + umull r4, r7, r6, r5 + adcs r4, r4, r10 + umull r6, r10, r1, r5 + adcs r1, r6, r12 + umull r6, r3, r5, r5 + mov r5, #0 + adcs r6, r6, r8 + adc r5, r5, #0 + adds r2, r2, r9 + adcs r4, r4, r11 + str r2, [r0, #20] + adcs r1, r1, r7 + str r4, [r0, #24] + adcs r7, r6, r10 + str r1, [r0, #28] + adc r3, r5, r3 + str r7, [r0, #32] + str r3, [r0, #36] + add sp, sp, #32 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L + .cantunwind + .fnend + + .globl mcl_fp_mont5L + .align 2 + .type mcl_fp_mont5L,%function +mcl_fp_mont5L: @ @mcl_fp_mont5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #100 + sub sp, sp, #100 + str r0, [sp, #52] @ 4-byte Spill + mov r0, r2 + str r2, [sp, #48] @ 4-byte Spill + ldm r0, {r2, r8} + ldr r7, [r0, #8] + ldr r0, [r0, #12] + ldr r6, [r3, #-4] + ldr r5, [r3, #8] + ldr r9, [r3] + ldr r11, [r1, #8] + ldr r12, [r1, #12] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r1, #4] + ldr r1, [r1, #16] + str r6, [sp, #84] @ 4-byte Spill + str r5, [sp, #88] @ 4-byte Spill + str r9, [sp, #80] @ 4-byte Spill + str r11, [sp, #60] @ 4-byte Spill + str r12, [sp, #56] @ 4-byte Spill + umull r4, lr, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r3, #4] + str r1, [sp, #64] @ 4-byte Spill + mul r0, r4, r6 + str r4, [sp, #36] @ 4-byte Spill + umull r6, r4, r0, r5 + str r4, [sp, #28] @ 4-byte Spill + umull r4, r5, r0, r9 + mov r10, r6 + mov r9, r5 + str r4, [sp, #32] @ 4-byte Spill + str r7, [sp, #76] @ 4-byte Spill + str r5, [sp, #12] @ 4-byte Spill + mov r4, r7 + umlal r9, r10, r0, r7 + umull r7, r5, r1, r2 + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [sp, #96] @ 4-byte Reload + str r5, [sp, #92] @ 4-byte Spill + umull r5, r1, r12, r2 + str r1, [sp, #20] @ 4-byte Spill + str r5, [sp, #24] @ 4-byte Spill + umull r12, r1, r11, r2 + umull r11, r5, r7, r2 + adds r7, lr, r11 + adcs r5, r5, r12 + ldr r5, [sp, #24] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #20] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r5, r1 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [r3, #16] + str r1, [sp, #68] @ 4-byte Spill + umull r7, r11, r0, r1 + ldr r1, [r3, #12] + umull r3, r5, r0, r4 + ldr r4, [sp, #12] @ 4-byte Reload + adds r3, r4, r3 + str r1, [sp, #92] @ 4-byte Spill + umull r3, r4, r0, r1 + adcs r0, r5, r6 + mov r1, #0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + adcs r3, r4, r7 + ldr r7, [sp, #96] @ 4-byte Reload + ldr r4, [sp, #32] @ 4-byte Reload + adc r5, r11, #0 + umlal lr, r12, r7, r2 + ldr r2, [sp, #36] @ 4-byte Reload + adds r2, r4, r2 + adcs r2, r9, lr + ldr r9, [sp, #64] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + adcs r2, r10, r12 + ldr r10, [sp, #72] @ 4-byte Reload + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r5, r0 + umull r5, lr, r8, r9 + str r0, [sp, #20] @ 4-byte Spill + adc r0, r1, #0 + umull r6, r1, r8, r7 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r12, r4, r8, r0 + ldr r0, [sp, #60] @ 4-byte Reload + umull r3, r2, r8, r0 + umull r11, r0, r8, r10 + ldr r10, [sp, #68] @ 4-byte Reload + adds r6, r0, r6 + adcs r1, r1, r3 + umlal r0, r3, r8, r7 + ldr r7, [sp, #36] @ 4-byte Reload + adcs r1, r2, r12 + adcs r2, r4, r5 + adc r6, lr, #0 + adds r8, r7, r11 + ldr r7, [sp, #32] @ 4-byte Reload + adcs r11, r7, r0 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r7, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + mul r4, r8, r0 + ldr r0, [sp, #88] @ 4-byte Reload + umull r6, r1, r4, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + umull r1, r5, r4, r0 + mov r0, r6 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + mov r3, r5 + umull r12, lr, r4, r1 + umlal r3, r0, r4, r1 + umull r1, r2, r4, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adds r5, r5, r12 + adcs r6, lr, r6 + umull r5, r12, r4, r10 + adcs r1, r7, r1 + ldr r7, [sp, #16] @ 4-byte Reload + adcs r2, r2, r5 + adc r6, r12, #0 + adds r7, r7, r8 + ldr r8, [sp, #60] @ 4-byte Reload + adcs r3, r3, r11 + ldr r11, [sp, #72] @ 4-byte Reload + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + umull r2, r1, r0, r9 + ldr r9, [sp, #56] @ 4-byte Reload + umull r3, r12, r0, r8 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + str r2, [sp, #4] @ 4-byte Spill + mov r2, r0 + umull r4, r5, r0, r9 + umull r6, r7, r0, r1 + umull lr, r0, r2, r11 + adds r6, r0, r6 + str lr, [sp, #8] @ 4-byte Spill + adcs r6, r7, r3 + ldr r7, [sp, #4] @ 4-byte Reload + umlal r0, r3, r2, r1 + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + adcs r12, r12, r4 + adcs r4, r5, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adc r7, r7, #0 + adds r2, r1, r2 + ldr r1, [sp, #36] @ 4-byte Reload + str r2, [sp] @ 4-byte Spill + adcs r0, r1, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + mul r4, r2, r0 + ldr r0, [sp, #88] @ 4-byte Reload + umull r5, r1, r4, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + mov r2, r5 + umull r1, r7, r4, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + umull r6, r1, r4, r10 + mov r3, r7 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + str r6, [sp, #4] @ 4-byte Spill + umlal r3, r2, r4, r0 + umull r12, lr, r4, r1 + umull r10, r1, r4, r0 + ldr r0, [sp, #12] @ 4-byte Reload + adds r4, r7, r10 + adcs r1, r1, r5 + ldr r4, [sp, #64] @ 4-byte Reload + ldr r1, [sp] @ 4-byte Reload + adcs r10, r0, r12 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r12, lr, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adc lr, r0, #0 + ldr r0, [sp, #16] @ 4-byte Reload + adds r6, r0, r1 + ldr r0, [sp, #44] @ 4-byte Reload + umull r5, r1, r0, r4 + mov r6, r0 + str r1, [sp, #16] @ 4-byte Spill + umull r4, r1, r0, r9 + str r5, [sp, #8] @ 4-byte Spill + umull r5, r9, r0, r8 + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + str r4, [sp] @ 4-byte Spill + umull r4, r8, r0, r1 + umull r7, r0, r6, r11 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #40] @ 4-byte Reload + adcs r11, r3, r7 + ldr r3, [sp, #36] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #32] @ 4-byte Reload + str r2, [sp, #40] @ 4-byte Spill + adcs r10, r10, r3 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r12, r12, r3 + ldr r3, [sp, #24] @ 4-byte Reload + adcs r7, lr, r3 + ldr r3, [sp, #20] @ 4-byte Reload + adc r2, r3, #0 + adds r4, r0, r4 + ldr r3, [sp, #4] @ 4-byte Reload + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp] @ 4-byte Reload + adcs r4, r8, r5 + umlal r0, r5, r6, r1 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r4, r9, r2 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r3, r3, r2 + ldr r2, [sp, #16] @ 4-byte Reload + adc r8, r2, #0 + adds lr, r11, r1 + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #76] @ 4-byte Reload + adcs r9, r10, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r10, [sp, #92] @ 4-byte Reload + adcs r0, r12, r4 + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + mul r4, lr, r0 + ldr r0, [sp, #88] @ 4-byte Reload + umull r12, r3, r4, r1 + umull r7, r11, r4, r0 + ldr r0, [sp, #80] @ 4-byte Reload + umull r8, r6, r4, r0 + mov r0, r7 + mov r5, r6 + adds r6, r6, r12 + umlal r5, r0, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + adcs r3, r3, r7 + umull r6, r12, r4, r1 + umull r1, r2, r4, r10 + adcs r1, r11, r1 + adcs r2, r2, r6 + adc r3, r12, #0 + adds r7, r8, lr + ldr r7, [sp, #44] @ 4-byte Reload + adcs r7, r5, r7 + adcs r0, r0, r9 + ldr r9, [sp, #72] @ 4-byte Reload + str r7, [sp, #44] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #96] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + ldr r5, [r0, #16] + ldr r0, [sp, #64] @ 4-byte Reload + umull r4, r8, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + umull r7, r1, r5, r2 + umull r12, lr, r5, r0 + ldr r0, [sp, #60] @ 4-byte Reload + umull r6, r3, r5, r0 + umull r11, r0, r5, r9 + ldr r9, [sp, #76] @ 4-byte Reload + adds r7, r0, r7 + adcs r1, r1, r6 + umlal r0, r6, r5, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adcs r1, r3, r12 + ldr r12, [sp, #80] @ 4-byte Reload + adcs r4, lr, r4 + ldr lr, [sp, #88] @ 4-byte Reload + adc r3, r8, #0 + adds r7, r2, r11 + ldr r2, [sp, #24] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #68] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + mul r4, r7, r0 + umull r0, r1, r4, r9 + umull r8, r3, r4, r12 + adds r0, r3, r0 + umull r5, r0, r4, lr + adcs r1, r1, r5 + umlal r3, r5, r4, r9 + umull r1, r6, r4, r10 + adcs r10, r0, r1 + umull r1, r0, r4, r2 + mov r4, r9 + adcs r1, r6, r1 + ldr r6, [sp, #96] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r8, r7 + adcs r3, r3, r6 + adcs r7, r5, r11 + ldr r5, [sp, #72] @ 4-byte Reload + adcs r11, r10, r5 + ldr r5, [sp, #64] @ 4-byte Reload + adcs r8, r1, r5 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + adc r9, r0, #0 + subs r5, r3, r12 + sbcs r4, r7, r4 + sbcs r0, r11, lr + sbcs r6, r8, r1 + sbcs r1, r10, r2 + sbc r2, r9, #0 + ands r2, r2, #1 + movne r5, r3 + ldr r3, [sp, #52] @ 4-byte Reload + movne r4, r7 + movne r0, r11 + cmp r2, #0 + movne r6, r8 + movne r1, r10 + str r5, [r3] + str r4, [r3, #4] + str r0, [r3, #8] + str r6, [r3, #12] + str r1, [r3, #16] + add sp, sp, #100 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end68: + .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L + .cantunwind + .fnend + + .globl mcl_fp_montNF5L + .align 2 + .type mcl_fp_montNF5L,%function +mcl_fp_montNF5L: @ @mcl_fp_montNF5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + str r2, [sp, #24] @ 4-byte Spill + str r0, [sp, #28] @ 4-byte Spill + ldm r2, {r4, r9, r10} + ldr r6, [r1, #4] + ldr r0, [r2, #12] + ldr r7, [r1] + ldr r5, [r1, #8] + ldr lr, [r3, #8] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #12] + str r6, [sp, #32] @ 4-byte Spill + umull r2, r8, r6, r4 + mov r11, r6 + umull r6, r12, r7, r4 + str r7, [sp, #56] @ 4-byte Spill + str r5, [sp, #48] @ 4-byte Spill + str lr, [sp, #36] @ 4-byte Spill + adds r7, r12, r2 + umull r2, r7, r5, r4 + adcs r5, r8, r2 + umlal r12, r2, r11, r4 + umull r5, r8, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + adcs r0, r7, r5 + ldr r5, [r3, #4] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #16] + str r5, [sp, #60] @ 4-byte Spill + umull r1, r7, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + adcs r0, r8, r1 + ldr r1, [r3] + str r0, [sp, #16] @ 4-byte Spill + adc r0, r7, #0 + ldr r7, [r3, #-4] + str r0, [sp, #12] @ 4-byte Spill + str r1, [sp, #40] @ 4-byte Spill + mul r0, r6, r7 + str r7, [sp, #72] @ 4-byte Spill + umull r8, r7, r0, r1 + ldr r1, [r3, #12] + ldr r3, [r3, #16] + adds r6, r8, r6 + umull r4, r8, r0, r5 + str r7, [sp, #8] @ 4-byte Spill + umull r5, r7, r0, lr + ldr lr, [sp, #64] @ 4-byte Reload + adcs r6, r4, r12 + adcs r5, r5, r2 + str r1, [sp, #52] @ 4-byte Spill + umull r2, r4, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r3, [sp, #44] @ 4-byte Spill + adcs r2, r2, r1 + umull r12, r1, r0, r3 + ldr r0, [sp, #16] @ 4-byte Reload + ldr r3, [sp, #12] @ 4-byte Reload + adcs r0, r12, r0 + adc r12, r3, #0 + ldr r3, [sp, #8] @ 4-byte Reload + adds r6, r6, r3 + adcs r3, r5, r8 + ldr r8, [sp, #56] @ 4-byte Reload + adcs r2, r2, r7 + str r3, [sp, #16] @ 4-byte Spill + adcs r0, r0, r4 + umull r7, r4, r9, r11 + str r2, [sp, #12] @ 4-byte Spill + str r0, [sp, #8] @ 4-byte Spill + adc r0, r12, r1 + ldr r12, [sp, #68] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + umull r5, r1, r9, r8 + adds r7, r1, r7 + umull r2, r7, r9, r0 + adcs r4, r4, r2 + umlal r1, r2, r9, r11 + ldr r11, [sp, #44] @ 4-byte Reload + umull r4, r0, r9, r12 + adcs r4, r7, r4 + umull r7, r3, r9, lr + ldr r9, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + adc r3, r3, #0 + adds r7, r5, r6 + ldr r5, [sp, #16] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r2, r2, r5 + ldr r5, [sp, #8] @ 4-byte Reload + adcs r6, r4, r5 + ldr r4, [sp, #4] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r3, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r5, r7, r0 + ldr r0, [sp, #40] @ 4-byte Reload + umull r4, r3, r5, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adds r7, r4, r7 + ldr r4, [sp, #52] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + umull r7, r3, r5, r0 + adcs r1, r7, r1 + umull r7, r0, r5, r9 + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp, #8] @ 4-byte Reload + str r0, [sp] @ 4-byte Spill + adcs r2, r7, r2 + umull r7, r0, r5, r4 + adcs r6, r7, r6 + umull r7, r4, r5, r11 + ldr r5, [sp, #16] @ 4-byte Reload + adcs r7, r7, r5 + ldr r5, [sp, #12] @ 4-byte Reload + adc r5, r5, #0 + adds r1, r1, r3 + ldr r3, [sp, #48] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + adcs r1, r6, r1 + adcs r0, r7, r0 + ldr r7, [sp, #32] @ 4-byte Reload + str r1, [sp, #8] @ 4-byte Spill + adc r11, r5, r4 + str r0, [sp, #4] @ 4-byte Spill + umull r4, r0, r10, r8 + ldr r8, [sp, #60] @ 4-byte Reload + umull r6, r5, r10, r7 + adds r6, r0, r6 + umull r1, r6, r10, r3 + adcs r5, r5, r1 + umlal r0, r1, r10, r7 + umull r5, r2, r10, r12 + adcs r12, r6, r5 + umull r6, r5, r10, lr + mov lr, r7 + adcs r2, r2, r6 + ldr r6, [sp, #16] @ 4-byte Reload + adc r5, r5, #0 + adds r6, r4, r6 + ldr r4, [sp, #12] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #8] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #4] @ 4-byte Reload + adcs r10, r12, r4 + adcs r2, r2, r11 + ldr r11, [sp, #40] @ 4-byte Reload + str r2, [sp, #8] @ 4-byte Spill + adc r2, r5, #0 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #72] @ 4-byte Reload + mul r7, r6, r2 + umull r4, r2, r7, r11 + adds r6, r4, r6 + str r2, [sp, #12] @ 4-byte Spill + umull r6, r2, r7, r8 + str r2, [sp, #4] @ 4-byte Spill + adcs r0, r6, r0 + umull r6, r2, r7, r9 + ldr r9, [sp, #52] @ 4-byte Reload + adcs r1, r6, r1 + str r2, [sp] @ 4-byte Spill + ldr r2, [sp, #8] @ 4-byte Reload + umull r6, r12, r7, r9 + adcs r5, r6, r10 + ldr r10, [sp, #44] @ 4-byte Reload + umull r6, r4, r7, r10 + adcs r7, r6, r2 + ldr r6, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + adc r6, r6, #0 + adds r0, r0, r2 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r7, r12 + ldr r7, [sp, #20] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + adc r0, r6, r4 + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r1, r5, r7, r3 + mov r6, r1 + umull r4, r2, r7, r0 + mov r0, lr + mov r12, r2 + umull r3, lr, r7, r0 + umlal r12, r6, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adds r2, r2, r3 + adcs r1, lr, r1 + umull r1, r2, r7, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r5, r1 + umull r3, r5, r7, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r2, r2, r3 + adc r3, r5, #0 + ldr r5, [sp, #8] @ 4-byte Reload + adds r7, r4, r0 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r12, r0 + adcs r6, r6, r5 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [sp, #20] @ 4-byte Spill + adc r2, r3, #0 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #72] @ 4-byte Reload + mul r5, r7, r2 + ldr r2, [sp, #36] @ 4-byte Reload + umull r4, lr, r5, r11 + adds r7, r4, r7 + umull r7, r12, r5, r8 + adcs r0, r7, r0 + umull r7, r3, r5, r2 + adcs r6, r7, r6 + umull r7, r2, r5, r9 + adcs r1, r7, r1 + umull r7, r4, r5, r10 + ldr r5, [sp, #20] @ 4-byte Reload + adcs r7, r7, r5 + ldr r5, [sp, #16] @ 4-byte Reload + adc r5, r5, #0 + adds r0, r0, lr + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r10, r6, r12 + adcs lr, r1, r3 + adcs r8, r7, r2 + adc r9, r5, r4 + ldr r4, [sp, #32] @ 4-byte Reload + ldr r7, [r0, #16] + ldr r0, [sp, #48] @ 4-byte Reload + umull r3, r11, r7, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r5, r3 + umull r12, r2, r7, r0 + umull r6, r0, r7, r4 + mov r1, r2 + adds r2, r2, r6 + ldr r6, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [sp, #68] @ 4-byte Reload + umlal r1, r5, r7, r4 + umull r0, r2, r7, r3 + umull r3, r4, r7, r6 + adcs r0, r11, r0 + adcs r2, r2, r3 + adc r3, r4, #0 + ldr r4, [sp, #20] @ 4-byte Reload + adds r7, r12, r4 + ldr r12, [sp, #60] @ 4-byte Reload + adcs r1, r1, r10 + adcs r6, r5, lr + adcs r11, r0, r8 + ldr r8, [sp, #40] @ 4-byte Reload + adcs r0, r2, r9 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + adc r0, r3, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r5, r7, r0 + umull r4, r0, r5, r8 + umull r3, lr, r5, r12 + adds r7, r4, r7 + ldr r4, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + adcs r1, r3, r1 + ldr r9, [sp, #72] @ 4-byte Reload + umull r7, r0, r5, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r3, r7, r6 + umull r6, r10, r5, r2 + adcs r7, r6, r11 + umull r6, r11, r5, r0 + ldr r5, [sp, #68] @ 4-byte Reload + adcs r6, r6, r5 + ldr r5, [sp, #64] @ 4-byte Reload + adc r5, r5, #0 + adds r1, r1, r9 + adcs lr, r3, lr + ldr r3, [sp, #56] @ 4-byte Reload + adcs r9, r7, r3 + adcs r10, r6, r10 + adc r11, r5, r11 + subs r6, r1, r8 + sbcs r5, lr, r12 + sbcs r4, r9, r4 + sbcs r7, r10, r2 + sbc r3, r11, r0 + asr r0, r3, #31 + cmp r0, #0 + movlt r6, r1 + ldr r1, [sp, #28] @ 4-byte Reload + movlt r5, lr + movlt r4, r9 + cmp r0, #0 + movlt r7, r10 + movlt r3, r11 + str r6, [r1] + str r5, [r1, #4] + str r4, [r1, #8] + str r7, [r1, #12] + str r3, [r1, #16] + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end69: + .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L + .cantunwind + .fnend + + .globl mcl_fp_montRed5L + .align 2 + .type mcl_fp_montRed5L,%function +mcl_fp_montRed5L: @ @mcl_fp_montRed5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #84 + sub sp, sp, #84 + ldr r6, [r1, #4] + ldr r9, [r2, #-4] + ldr r4, [r1] + ldr r8, [r2, #8] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r2] + ldr r10, [r2, #4] + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [r1, #8] + mul r5, r4, r9 + str r4, [sp, #24] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + str r9, [sp, #64] @ 4-byte Spill + str r8, [sp, #68] @ 4-byte Spill + umull lr, r4, r5, r8 + str r4, [sp, #40] @ 4-byte Spill + umull r4, r3, r5, r0 + mov r12, lr + str r4, [sp, #44] @ 4-byte Spill + ldr r4, [r2, #16] + ldr r2, [r2, #12] + mov r0, r3 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [r1, #12] + umlal r0, r12, r5, r10 + str r4, [sp, #76] @ 4-byte Spill + str r2, [sp, #80] @ 4-byte Spill + str r6, [sp, #52] @ 4-byte Spill + umull r7, r6, r5, r4 + str r6, [sp, #28] @ 4-byte Spill + umull r4, r6, r5, r2 + umull r11, r2, r5, r10 + str r7, [sp, #32] @ 4-byte Spill + adds r3, r3, r11 + ldr r11, [r1, #36] + adcs r2, r2, lr + ldr r3, [sp, #24] @ 4-byte Reload + add lr, r1, #16 + ldr r2, [sp, #40] @ 4-byte Reload + adcs r2, r2, r4 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + adcs r2, r6, r2 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adc r2, r2, #0 + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + adds r5, r3, r2 + ldr r2, [sp, #48] @ 4-byte Reload + ldr r3, [sp, #72] @ 4-byte Reload + adcs r2, r2, r0 + mul r0, r2, r9 + str r2, [sp, #48] @ 4-byte Spill + ldr r9, [r1, #28] + umull r6, r2, r0, r8 + str r2, [sp, #40] @ 4-byte Spill + umull r2, r4, r0, r3 + mov r5, r6 + mov r8, r6 + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #32] + mov r7, r4 + umlal r7, r5, r0, r10 + str r2, [sp, #24] @ 4-byte Spill + ldm lr, {r1, r2, lr} + ldr r6, [sp, #56] @ 4-byte Reload + adcs r3, r6, r12 + ldr r6, [sp, #52] @ 4-byte Reload + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [sp, #36] @ 4-byte Reload + adcs r6, r6, r3 + ldr r3, [sp, #32] @ 4-byte Reload + str r6, [sp, #56] @ 4-byte Spill + adcs r1, r1, r3 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #36] @ 4-byte Spill + adcs r1, lr, #0 + ldr lr, [sp, #76] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + adcs r1, r9, #0 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adcs r1, r1, #0 + str r1, [sp, #24] @ 4-byte Spill + adcs r1, r11, #0 + umull r6, r11, r0, lr + str r1, [sp, #20] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + umull r2, r3, r0, r1 + umull r9, r1, r0, r10 + adds r0, r4, r9 + adcs r0, r1, r8 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r0, [sp, #40] @ 4-byte Reload + adcs r9, r0, r2 + ldr r2, [sp, #64] @ 4-byte Reload + adcs r0, r3, r6 + ldr r6, [sp, #72] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r11, r11, #0 + adds r3, r1, r0 + ldr r0, [sp, #12] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r7, r0 + mul r7, r0, r2 + str r0, [sp, #12] @ 4-byte Spill + umull r8, r0, r7, r1 + str r0, [sp, #4] @ 4-byte Spill + umull r3, r0, r7, r6 + mov r12, r8 + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [sp, #56] @ 4-byte Reload + mov r4, r0 + umlal r4, r12, r7, r10 + adcs r3, r5, r3 + ldr r5, [sp, #40] @ 4-byte Reload + str r3, [sp] @ 4-byte Spill + ldr r3, [sp, #52] @ 4-byte Reload + adcs r3, r9, r3 + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [sp, #36] @ 4-byte Reload + adcs r3, r5, r3 + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [sp, #32] @ 4-byte Reload + adcs r3, r11, r3 + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [sp, #28] @ 4-byte Reload + adcs r3, r3, #0 + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [sp, #24] @ 4-byte Reload + adcs r3, r3, #0 + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [sp, #20] @ 4-byte Reload + adcs r3, r3, #0 + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [sp, #16] @ 4-byte Reload + adc r3, r3, #0 + str r3, [sp, #32] @ 4-byte Spill + umull r5, r3, r7, lr + ldr lr, [sp, #80] @ 4-byte Reload + str r3, [sp, #28] @ 4-byte Spill + umull r9, r3, r7, r10 + str r5, [sp, #24] @ 4-byte Spill + adds r0, r0, r9 + adcs r0, r3, r8 + ldr r3, [sp, #8] @ 4-byte Reload + ldr r0, [sp, #4] @ 4-byte Reload + umull r5, r11, r7, lr + adcs r9, r0, r5 + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r8, r0, #0 + ldr r0, [sp, #12] @ 4-byte Reload + adds r3, r3, r0 + ldr r0, [sp] @ 4-byte Reload + adcs r11, r4, r0 + mul r7, r11, r2 + ldr r2, [sp, #20] @ 4-byte Reload + umull r3, r0, r7, r1 + str r0, [sp, #24] @ 4-byte Spill + umull r1, r0, r7, r6 + mov r5, r3 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + mov r4, r0 + umlal r4, r5, r7, r10 + adcs r1, r12, r1 + umull r12, r6, r7, lr + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #76] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r8, r1 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, #0 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, #0 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #32] @ 4-byte Spill + umull r9, r1, r7, r2 + str r1, [sp, #20] @ 4-byte Spill + umull r8, r1, r7, r10 + adds r0, r0, r8 + ldr r8, [sp, #72] @ 4-byte Reload + adcs r0, r1, r3 + ldr r3, [sp, #20] @ 4-byte Reload + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r12 + adcs r1, r6, r9 + adc r7, r3, #0 + ldr r3, [sp, #28] @ 4-byte Reload + adds r3, r3, r11 + ldr r3, [sp, #56] @ 4-byte Reload + adcs r12, r4, r3 + ldr r3, [sp, #52] @ 4-byte Reload + adcs r3, r5, r3 + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #68] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + mul r4, r12, r0 + umull r0, r1, r4, r10 + umull r11, r5, r4, r8 + adds r0, r5, r0 + umull r6, r0, r4, r7 + adcs r1, r1, r6 + umlal r5, r6, r4, r10 + umull r1, r3, r4, lr + adcs r9, r0, r1 + umull r1, r0, r4, r2 + adcs r1, r3, r1 + ldr r3, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r2, r11, r12 + ldr r2, [sp, #56] @ 4-byte Reload + adcs r2, r5, r2 + adcs r3, r6, r3 + ldr r6, [sp, #48] @ 4-byte Reload + adcs lr, r9, r6 + ldr r6, [sp, #44] @ 4-byte Reload + adcs r9, r1, r6 + ldr r1, [sp, #40] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #80] @ 4-byte Reload + adc r12, r0, #0 + subs r5, r2, r8 + sbcs r4, r3, r10 + sbcs r0, lr, r7 + sbcs r6, r9, r1 + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r1, r11, r1 + sbc r7, r12, #0 + ands r7, r7, #1 + movne r5, r2 + ldr r2, [sp, #60] @ 4-byte Reload + movne r4, r3 + movne r0, lr + cmp r7, #0 + movne r6, r9 + movne r1, r11 + str r5, [r2] + str r4, [r2, #4] + str r0, [r2, #8] + str r6, [r2, #12] + str r1, [r2, #16] + add sp, sp, #84 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end70: + .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L + .cantunwind + .fnend + + .globl mcl_fp_addPre5L + .align 2 + .type mcl_fp_addPre5L,%function +mcl_fp_addPre5L: @ @mcl_fp_addPre5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldm r2, {r3, r12, lr} + ldr r4, [r2, #12] + ldr r8, [r2, #16] + ldm r1, {r5, r6, r7} + ldr r2, [r1, #12] + ldr r1, [r1, #16] + adds r3, r3, r5 + adcs r6, r12, r6 + adcs r7, lr, r7 + adcs r2, r4, r2 + stm r0, {r3, r6, r7} + adcs r1, r8, r1 + str r2, [r0, #12] + str r1, [r0, #16] + mov r0, #0 + adc r0, r0, #0 + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end71: + .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L + .cantunwind + .fnend + + .globl mcl_fp_subPre5L + .align 2 + .type mcl_fp_subPre5L,%function +mcl_fp_subPre5L: @ @mcl_fp_subPre5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldm r2, {r3, r12, lr} + ldr r4, [r2, #12] + ldr r8, [r2, #16] + ldm r1, {r5, r6, r7} + ldr r2, [r1, #12] + ldr r1, [r1, #16] + subs r3, r5, r3 + sbcs r6, r6, r12 + sbcs r7, r7, lr + sbcs r2, r2, r4 + stm r0, {r3, r6, r7} + sbcs r1, r1, r8 + str r2, [r0, #12] + str r1, [r0, #16] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end72: + .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L + .cantunwind + .fnend + + .globl mcl_fp_shr1_5L + .align 2 + .type mcl_fp_shr1_5L,%function +mcl_fp_shr1_5L: @ @mcl_fp_shr1_5L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldr r3, [r1, #4] + ldr r12, [r1] + ldr lr, [r1, #12] + ldr r2, [r1, #8] + ldr r1, [r1, #16] + lsrs r4, r3, #1 + lsr r3, r3, #1 + rrx r12, r12 + lsrs r4, lr, #1 + orr r3, r3, r2, lsl #31 + lsr r4, lr, #1 + rrx r2, r2 + str r12, [r0] + str r3, [r0, #4] + orr r4, r4, r1, lsl #31 + lsr r1, r1, #1 + str r2, [r0, #8] + str r4, [r0, #12] + str r1, [r0, #16] + pop {r4, lr} + mov pc, lr +.Lfunc_end73: + .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L + .cantunwind + .fnend + + .globl mcl_fp_add5L + .align 2 + .type mcl_fp_add5L,%function +mcl_fp_add5L: @ @mcl_fp_add5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldm r2, {r12, lr} + ldr r9, [r2, #8] + ldr r5, [r2, #12] + ldr r8, [r2, #16] + ldm r1, {r6, r7} + ldr r2, [r1, #8] + ldr r4, [r1, #12] + ldr r1, [r1, #16] + adds r6, r12, r6 + adcs r7, lr, r7 + adcs r2, r9, r2 + stm r0, {r6, r7} + adcs r5, r5, r4 + mov r4, #0 + str r2, [r0, #8] + adcs r1, r8, r1 + str r5, [r0, #12] + str r1, [r0, #16] + adc r8, r4, #0 + ldm r3, {r4, r12, lr} + ldr r9, [r3, #12] + ldr r3, [r3, #16] + subs r6, r6, r4 + sbcs r7, r7, r12 + sbcs r2, r2, lr + sbcs r12, r5, r9 + sbcs lr, r1, r3 + sbc r1, r8, #0 + tst r1, #1 + stmeq r0!, {r6, r7} + stmeq r0, {r2, r12, lr} + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end74: + .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L + .cantunwind + .fnend + + .globl mcl_fp_addNF5L + .align 2 + .type mcl_fp_addNF5L,%function +mcl_fp_addNF5L: @ @mcl_fp_addNF5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldm r1, {r12, lr} + ldr r9, [r1, #8] + ldr r5, [r1, #12] + ldr r8, [r1, #16] + ldm r2, {r6, r7} + ldr r1, [r2, #8] + ldr r4, [r2, #12] + ldr r2, [r2, #16] + adds r6, r6, r12 + adcs r10, r7, lr + adcs r9, r1, r9 + adcs lr, r4, r5 + ldr r4, [r3] + adc r12, r2, r8 + ldmib r3, {r2, r5} + ldr r1, [r3, #12] + ldr r3, [r3, #16] + subs r4, r6, r4 + sbcs r2, r10, r2 + sbcs r5, r9, r5 + sbcs r1, lr, r1 + sbc r3, r12, r3 + asr r7, r3, #31 + cmp r7, #0 + movlt r4, r6 + movlt r2, r10 + movlt r5, r9 + cmp r7, #0 + movlt r1, lr + movlt r3, r12 + str r4, [r0] + str r2, [r0, #4] + str r5, [r0, #8] + str r1, [r0, #12] + str r3, [r0, #16] + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end75: + .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L + .cantunwind + .fnend + + .globl mcl_fp_sub5L + .align 2 + .type mcl_fp_sub5L,%function +mcl_fp_sub5L: @ @mcl_fp_sub5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldm r2, {r8, r12, lr} + ldr r9, [r2, #12] + ldr r6, [r2, #16] + ldm r1, {r2, r7} + ldr r4, [r1, #8] + ldr r5, [r1, #12] + ldr r1, [r1, #16] + subs r8, r2, r8 + sbcs r2, r7, r12 + str r8, [r0] + sbcs r12, r4, lr + sbcs lr, r5, r9 + sbcs r4, r1, r6 + mov r1, #0 + stmib r0, {r2, r12, lr} + sbc r1, r1, #0 + str r4, [r0, #16] + tst r1, #1 + popeq {r4, r5, r6, r7, r8, r9, r11, lr} + moveq pc, lr + ldm r3, {r1, r5, r6, r7} + ldr r3, [r3, #16] + adds r1, r1, r8 + adcs r2, r5, r2 + adcs r6, r6, r12 + adcs r7, r7, lr + adc r3, r3, r4 + stm r0, {r1, r2, r6, r7} + str r3, [r0, #16] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end76: + .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L + .cantunwind + .fnend + + .globl mcl_fp_subNF5L + .align 2 + .type mcl_fp_subNF5L,%function +mcl_fp_subNF5L: @ @mcl_fp_subNF5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r2, {r12, lr} + ldr r9, [r2, #8] + ldr r5, [r2, #12] + ldr r8, [r2, #16] + ldm r1, {r6, r7} + ldr r2, [r1, #8] + ldr r4, [r1, #12] + ldr r1, [r1, #16] + subs r11, r6, r12 + sbcs r10, r7, lr + sbcs lr, r2, r9 + add r9, r3, #8 + sbcs r12, r4, r5 + ldm r3, {r4, r5} + sbc r1, r1, r8 + ldm r9, {r2, r8, r9} + asr r6, r1, #31 + adds r4, r11, r4 + adcs r5, r10, r5 + adcs r2, lr, r2 + adcs r3, r12, r8 + adc r7, r1, r9 + cmp r6, #0 + movge r4, r11 + movge r5, r10 + movge r2, lr + cmp r6, #0 + movge r3, r12 + movge r7, r1 + str r4, [r0] + str r5, [r0, #4] + str r2, [r0, #8] + str r3, [r0, #12] + str r7, [r0, #16] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end77: + .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L + .cantunwind + .fnend + + .globl mcl_fpDbl_add5L + .align 2 + .type mcl_fpDbl_add5L,%function +mcl_fpDbl_add5L: @ @mcl_fpDbl_add5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldr r12, [r1] + ldr r9, [r1, #4] + ldr r8, [r1, #8] + ldr r10, [r1, #12] + ldmib r2, {r6, r7} + ldr r5, [r2, #16] + ldr r11, [r2] + ldr r4, [r2, #12] + str r5, [sp] @ 4-byte Spill + ldr r5, [r2, #20] + adds lr, r11, r12 + ldr r11, [r2, #32] + add r12, r1, #16 + adcs r6, r6, r9 + add r9, r1, #28 + adcs r7, r7, r8 + str r5, [sp, #4] @ 4-byte Spill + ldr r5, [r2, #24] + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [r2, #28] + ldr r2, [r2, #36] + str r5, [sp, #16] @ 4-byte Spill + str r2, [sp, #8] @ 4-byte Spill + adcs r5, r4, r10 + ldm r9, {r4, r8, r9} + ldm r12, {r1, r2, r12} + str lr, [r0] + stmib r0, {r6, r7} + ldr r7, [sp] @ 4-byte Reload + str r5, [r0, #12] + adcs r1, r7, r1 + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #12] @ 4-byte Reload + adcs r2, r7, r2 + mov r7, #0 + adcs r12, r1, r12 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r10, r1, r4 + ldr r1, [sp, #8] @ 4-byte Reload + adcs r8, r11, r8 + adcs lr, r1, r9 + adc r1, r7, #0 + ldr r7, [r3] + ldmib r3, {r4, r5, r6} + ldr r3, [r3, #16] + subs r7, r2, r7 + sbcs r4, r12, r4 + sbcs r5, r10, r5 + sbcs r6, r8, r6 + sbcs r3, lr, r3 + sbc r1, r1, #0 + ands r1, r1, #1 + movne r7, r2 + movne r4, r12 + movne r5, r10 + cmp r1, #0 + movne r6, r8 + movne r3, lr + str r7, [r0, #20] + str r4, [r0, #24] + str r5, [r0, #28] + str r6, [r0, #32] + str r3, [r0, #36] + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end78: + .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub5L + .align 2 + .type mcl_fpDbl_sub5L,%function +mcl_fpDbl_sub5L: @ @mcl_fpDbl_sub5L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #32 + sub sp, sp, #32 + ldr r7, [r2, #32] + add r8, r1, #12 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #32] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #36] + str r7, [sp, #20] @ 4-byte Spill + ldmib r2, {r9, r10, r11} + ldr r7, [r2, #16] + str r7, [sp] @ 4-byte Spill + ldr r7, [r2, #20] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r2, #28] + ldr r2, [r2] + str r7, [sp, #12] @ 4-byte Spill + ldm r8, {r4, r5, r6, r7, r8} + ldm r1, {r1, r12, lr} + subs r1, r1, r2 + sbcs r2, r12, r9 + stm r0, {r1, r2} + sbcs r1, lr, r10 + str r1, [r0, #8] + sbcs r1, r4, r11 + ldr r2, [sp, #4] @ 4-byte Reload + str r1, [r0, #12] + ldr r1, [sp] @ 4-byte Reload + sbcs r1, r5, r1 + ldr r5, [sp, #16] @ 4-byte Reload + sbcs r2, r6, r2 + ldr r6, [sp, #8] @ 4-byte Reload + str r1, [r0, #16] + mov r1, #0 + sbcs r7, r7, r6 + ldr r6, [sp, #12] @ 4-byte Reload + sbcs r9, r8, r6 + ldr r6, [sp, #24] @ 4-byte Reload + sbcs r8, r5, r6 + ldr r6, [sp, #28] @ 4-byte Reload + ldr r5, [sp, #20] @ 4-byte Reload + sbcs lr, r5, r6 + sbc r12, r1, #0 + ldm r3, {r1, r4, r5, r6} + ldr r3, [r3, #16] + adds r1, r2, r1 + adcs r4, r7, r4 + adcs r5, r9, r5 + adcs r6, r8, r6 + adc r3, lr, r3 + ands r12, r12, #1 + moveq r1, r2 + moveq r4, r7 + moveq r5, r9 + cmp r12, #0 + moveq r6, r8 + moveq r3, lr + str r1, [r0, #20] + str r4, [r0, #24] + str r5, [r0, #28] + str r6, [r0, #32] + str r3, [r0, #36] + add sp, sp, #32 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end79: + .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre6L + .align 2 + .type mcl_fp_mulUnitPre6L,%function +mcl_fp_mulUnitPre6L: @ @mcl_fp_mulUnitPre6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r11, [r1, #12] + ldr r9, [r1, #16] + ldr r8, [r1, #20] + umull r4, r10, lr, r2 + umull r1, r7, r12, r2 + mov r5, r7 + mov r6, r4 + umlal r5, r6, r3, r2 + stm r0, {r1, r5, r6} + umull r5, r6, r3, r2 + umull r1, r12, r11, r2 + adds r3, r7, r5 + adcs r3, r6, r4 + adcs r1, r10, r1 + str r1, [r0, #12] + umull r1, r3, r9, r2 + adcs r1, r12, r1 + str r1, [r0, #16] + umull r1, r7, r8, r2 + adcs r1, r3, r1 + str r1, [r0, #20] + adc r1, r7, #0 + str r1, [r0, #24] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end80: + .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre6L + .align 2 + .type mcl_fpDbl_mulPre6L,%function +mcl_fpDbl_mulPre6L: @ @mcl_fpDbl_mulPre6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #48 + sub sp, sp, #48 + str r2, [sp, #44] @ 4-byte Spill + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r2, [r1, #8] + mov r8, r0 + ldr r10, [r1, #12] + umull r0, r4, r12, r3 + umull r6, r7, lr, r3 + str r2, [sp, #24] @ 4-byte Spill + adds r6, r4, r6 + str r0, [sp, #32] @ 4-byte Spill + umull r5, r6, r2, r3 + adcs r7, r7, r5 + umlal r4, r5, lr, r3 + umull r7, r11, r10, r3 + adcs r0, r6, r7 + ldr r7, [r1, #16] + str r0, [sp, #40] @ 4-byte Spill + umull r6, r0, r7, r3 + adcs r2, r11, r6 + ldr r6, [r1, #20] + str r2, [sp, #36] @ 4-byte Spill + umull r11, r2, r6, r3 + adcs r0, r0, r11 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r2, r2, #0 + str r2, [sp, #12] @ 4-byte Spill + str r0, [r8] + ldr r0, [sp, #44] @ 4-byte Reload + ldr r3, [r0, #4] + umull r11, r9, r12, r3 + adds r2, r11, r4 + umull r4, r11, lr, r3 + str r9, [sp, #28] @ 4-byte Spill + adcs lr, r4, r5 + ldr r5, [sp, #24] @ 4-byte Reload + str r2, [sp, #32] @ 4-byte Spill + umull r4, r2, r10, r3 + str r2, [sp, #20] @ 4-byte Spill + umull r2, r10, r5, r3 + ldr r5, [sp, #40] @ 4-byte Reload + adcs r2, r2, r5 + ldr r5, [sp, #36] @ 4-byte Reload + adcs r4, r4, r5 + umull r5, r9, r7, r3 + ldr r7, [sp, #16] @ 4-byte Reload + adcs r5, r5, r7 + umull r7, r12, r6, r3 + ldr r3, [sp, #12] @ 4-byte Reload + adcs r7, r7, r3 + mov r3, #0 + adc r6, r3, #0 + ldr r3, [sp, #28] @ 4-byte Reload + adds r3, lr, r3 + adcs r2, r2, r11 + adcs lr, r4, r10 + ldr r4, [sp, #20] @ 4-byte Reload + adcs r10, r5, r4 + ldr r4, [r1, #8] + adcs r11, r7, r9 + ldr r9, [r1, #4] + adc r7, r6, r12 + ldr r6, [r0, #8] + ldr r0, [r1] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #32] @ 4-byte Reload + str r9, [sp, #8] @ 4-byte Spill + umull r12, r5, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + adds r0, r12, r3 + str r7, [r8, #4] + ldr r7, [r1, #12] + ldr r12, [r1, #20] + str r5, [sp, #28] @ 4-byte Spill + str r0, [sp, #32] @ 4-byte Spill + umull r3, r0, r9, r6 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r3, r2 + str r0, [sp, #12] @ 4-byte Spill + umull r3, r0, r4, r6 + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r3, lr + ldr lr, [r1, #16] + ldr r9, [sp, #12] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + umull r2, r0, r7, r6 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r2, r2, r10 + umull r10, r5, lr, r6 + adcs r10, r10, r11 + umull r11, r3, r12, r6 + adcs r6, r11, r0 + mov r0, #0 + adc r11, r0, #0 + ldr r0, [sp, #28] @ 4-byte Reload + adds r0, r9, r0 + ldr r9, [sp, #4] @ 4-byte Reload + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r9, r2, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r10, r10, r0 + adcs r0, r6, r5 + ldr r5, [sp, #8] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + adc r0, r11, r3 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + str r0, [r8, #8] + ldr r0, [sp, #44] @ 4-byte Reload + ldr r6, [r0, #12] + umull r11, r3, r7, r6 + str r3, [sp, #36] @ 4-byte Spill + umull r7, r3, r4, r6 + str r3, [sp, #32] @ 4-byte Spill + umull r4, r3, r5, r6 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp, #40] @ 4-byte Reload + umull r5, r2, r3, r6 + ldr r3, [sp] @ 4-byte Reload + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adds r3, r5, r3 + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [sp, #12] @ 4-byte Reload + adcs r4, r4, r3 + ldr r3, [sp, #24] @ 4-byte Reload + adcs r7, r7, r9 + adcs r9, r11, r10 + umull r5, r11, lr, r6 + adcs r3, r5, r3 + umull r5, r10, r12, r6 + mov r6, #0 + adcs r2, r5, r2 + adc r5, r6, #0 + ldr r6, [sp, #16] @ 4-byte Reload + adds r12, r4, r6 + ldr r4, [sp, #20] @ 4-byte Reload + adcs lr, r7, r4 + ldr r4, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #36] @ 4-byte Reload + adcs r9, r9, r4 + adcs r3, r3, r7 + adcs r2, r2, r11 + str r3, [sp, #20] @ 4-byte Spill + str r2, [sp, #28] @ 4-byte Spill + adc r2, r5, r10 + ldr r5, [r0, #16] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + str r2, [r8, #12] + ldr r2, [r1] + str r2, [sp, #40] @ 4-byte Spill + ldmib r1, {r0, r6} + umull r7, r4, r2, r5 + ldr r3, [r1, #12] + adds r2, r7, r12 + str r4, [sp, #24] @ 4-byte Spill + str r2, [sp, #32] @ 4-byte Spill + umull r7, r2, r0, r5 + str r2, [sp, #16] @ 4-byte Spill + adcs r2, r7, lr + str r2, [sp, #4] @ 4-byte Spill + umull r4, r2, r6, r5 + str r2, [sp, #12] @ 4-byte Spill + adcs r2, r4, r9 + ldr r4, [sp, #28] @ 4-byte Reload + ldr r9, [sp, #4] @ 4-byte Reload + str r2, [sp] @ 4-byte Spill + umull r7, r2, r3, r5 + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + adcs r7, r7, r2 + ldr r2, [r1, #16] + ldr r1, [r1, #20] + umull r10, lr, r2, r5 + umull r11, r12, r1, r5 + adcs r10, r10, r4 + ldr r4, [sp, #36] @ 4-byte Reload + adcs r5, r11, r4 + mov r4, #0 + adc r11, r4, #0 + ldr r4, [sp, #24] @ 4-byte Reload + adds r4, r9, r4 + ldr r9, [sp] @ 4-byte Reload + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [sp, #16] @ 4-byte Reload + adcs r4, r9, r4 + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [sp, #12] @ 4-byte Reload + adcs r4, r7, r4 + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [sp, #8] @ 4-byte Reload + adcs r10, r10, r4 + adcs lr, r5, lr + ldr r5, [sp, #44] @ 4-byte Reload + adc r7, r11, r12 + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [sp, #32] @ 4-byte Reload + ldr r5, [r5, #20] + str r7, [r8, #16] + umull r11, r7, r3, r5 + str r7, [sp, #44] @ 4-byte Spill + umull r3, r7, r6, r5 + umull r6, r12, r0, r5 + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #40] @ 4-byte Reload + umull r4, r0, r7, r5 + ldr r7, [sp, #4] @ 4-byte Reload + adds r9, r4, r7 + ldr r4, [sp, #24] @ 4-byte Reload + str r9, [r8, #20] + adcs r6, r6, r4 + ldr r4, [sp, #20] @ 4-byte Reload + adcs r3, r3, r4 + adcs r7, r11, r10 + umull r4, r10, r2, r5 + adcs r2, r4, lr + umull r4, lr, r1, r5 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r4, r1 + mov r4, #0 + adc r4, r4, #0 + adds r5, r6, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r3, r3, r12 + str r5, [r8, #24] + str r3, [r8, #28] + adcs r3, r7, r0 + ldr r0, [sp, #44] @ 4-byte Reload + str r3, [r8, #32] + adcs r2, r2, r0 + adcs r1, r1, r10 + str r2, [r8, #36] + str r1, [r8, #40] + adc r1, r4, lr + str r1, [r8, #44] + add sp, sp, #48 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end81: + .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre6L + .align 2 + .type mcl_fpDbl_sqrPre6L,%function +mcl_fpDbl_sqrPre6L: @ @mcl_fpDbl_sqrPre6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + ldm r1, {r2, r3} + ldr r7, [r1, #12] + mov lr, r0 + ldr r0, [r1, #8] + ldr r9, [r1, #16] + ldr r12, [r1, #20] + umull r10, r6, r7, r2 + str r0, [sp, #48] @ 4-byte Spill + umull r4, r8, r0, r2 + umull r5, r0, r2, r2 + str r7, [sp, #44] @ 4-byte Spill + str r6, [sp, #36] @ 4-byte Spill + umull r6, r7, r3, r2 + str r5, [sp, #24] @ 4-byte Spill + adds r11, r0, r6 + ldr r5, [sp, #36] @ 4-byte Reload + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r7, r4 + umlal r0, r4, r3, r2 + adcs r7, r8, r10 + str r7, [sp, #40] @ 4-byte Spill + umull r7, r10, r9, r2 + adcs r7, r5, r7 + str r7, [sp, #32] @ 4-byte Spill + umull r7, r8, r12, r2 + adcs r11, r10, r7 + adc r2, r8, #0 + adds r0, r6, r0 + umull r6, r10, r3, r3 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r4, r6, r4 + str r0, [lr] + umull r6, r0, r12, r3 + str r0, [sp, #36] @ 4-byte Spill + umull r5, r0, r9, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + umull r9, r12, r0, r3 + ldr r0, [sp, #48] @ 4-byte Reload + umull r7, r8, r0, r3 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r3, r7, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r9, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r5, r5, r11 + adcs r6, r6, r2 + mov r2, #0 + adc r2, r2, #0 + adds r4, r4, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r11, r3, r10 + adcs r8, r7, r8 + ldr r7, [r1, #4] + adcs r10, r5, r12 + ldr r5, [r1, #12] + str r0, [lr, #4] + ldr r0, [sp, #24] @ 4-byte Reload + str r7, [sp, #16] @ 4-byte Spill + adcs r0, r6, r0 + ldr r6, [r1, #8] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r2, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1] + umull r3, r2, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + adds r0, r3, r4 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #16] + str r0, [sp, #52] @ 4-byte Spill + umull r3, r0, r7, r6 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r3, r11 + ldr r3, [sp, #44] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + umull r4, r0, r6, r6 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r4, r8 + umull r12, r4, r5, r6 + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r12, r10 + ldr r10, [sp, #24] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + str r0, [sp, #8] @ 4-byte Spill + umull r9, r0, r2, r6 + ldr r7, [sp, #20] @ 4-byte Reload + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r9, r9, r0 + ldr r0, [r1, #20] + umull r11, r8, r0, r6 + adcs r6, r11, r3 + mov r3, #0 + adc r11, r3, #0 + ldr r3, [sp, #36] @ 4-byte Reload + adds r3, r10, r3 + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [sp, #32] @ 4-byte Reload + adcs r3, r7, r3 + ldr r7, [sp, #8] @ 4-byte Reload + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp, #28] @ 4-byte Reload + adcs r3, r7, r3 + str r3, [sp, #28] @ 4-byte Spill + adcs r3, r9, r4 + ldr r4, [sp, #16] @ 4-byte Reload + ldr r9, [sp, #48] @ 4-byte Reload + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp] @ 4-byte Reload + ldr r7, [sp, #20] @ 4-byte Reload + adcs r3, r6, r3 + str r3, [sp, #12] @ 4-byte Spill + umull r6, r3, r0, r5 + adc r11, r11, r8 + str r3, [sp, #44] @ 4-byte Spill + umull r3, r0, r2, r5 + str r0, [sp, #36] @ 4-byte Spill + umull r2, r0, r5, r5 + str r0, [sp, #32] @ 4-byte Spill + umull r0, r10, r4, r5 + umull r4, r8, r9, r5 + ldr r5, [sp, #24] @ 4-byte Reload + adds r4, r4, r5 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #28] @ 4-byte Reload + adcs r5, r12, r5 + adcs r2, r2, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r3, r3, r7 + mov r7, #0 + adcs r6, r6, r11 + adc r7, r7, #0 + adds r9, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r11, r5, r10 + adcs r0, r2, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + add r3, r1, #8 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r12, r6, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r0, [lr, #8] + ldr r0, [sp, #44] @ 4-byte Reload + str r4, [lr, #12] + adc r0, r7, r0 + str r0, [sp, #12] @ 4-byte Spill + ldm r1, {r4, r6} + ldm r3, {r0, r2, r3} + ldr r1, [r1, #20] + umull r5, r7, r2, r1 + str r5, [sp, #32] @ 4-byte Spill + str r7, [sp, #52] @ 4-byte Spill + umull r5, r7, r0, r1 + str r5, [sp, #28] @ 4-byte Spill + str r7, [sp, #48] @ 4-byte Spill + umull r5, r7, r6, r1 + str r5, [sp, #24] @ 4-byte Spill + str r7, [sp, #44] @ 4-byte Spill + umull r5, r7, r4, r1 + str r5, [sp, #8] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + umull r7, r5, r2, r3 + str r5, [sp, #4] @ 4-byte Spill + umull r2, r5, r0, r3 + umull r0, r10, r6, r3 + umull r6, r8, r4, r3 + adds r4, r6, r9 + str r5, [sp] @ 4-byte Spill + adcs r11, r0, r11 + ldr r0, [sp, #20] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + umull r4, r9, r3, r3 + adcs r5, r2, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r6, r7, r0 + umull r0, r2, r1, r3 + ldr r3, [sp, #12] @ 4-byte Reload + mov r7, #0 + adcs r12, r4, r12 + ldr r4, [sp] @ 4-byte Reload + adcs r3, r0, r3 + adc r7, r7, #0 + adds r8, r11, r8 + adcs r5, r5, r10 + adcs r6, r6, r4 + ldr r4, [sp, #4] @ 4-byte Reload + adcs r4, r12, r4 + adcs r3, r3, r9 + adc r10, r7, r2 + ldr r7, [sp, #8] @ 4-byte Reload + adds r12, r7, r8 + ldr r7, [sp, #24] @ 4-byte Reload + adcs r9, r7, r5 + ldr r5, [sp, #28] @ 4-byte Reload + ldr r7, [sp, #36] @ 4-byte Reload + adcs r6, r5, r6 + ldr r5, [sp, #32] @ 4-byte Reload + adcs r4, r5, r4 + adcs r0, r0, r3 + umull r3, r8, r1, r1 + adcs r1, r3, r10 + mov r3, #0 + adc r3, r3, #0 + adds r5, r9, r7 + ldr r7, [sp, #44] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #48] @ 4-byte Reload + adcs r4, r4, r7 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + adcs r1, r1, r2 + adc r2, r3, r8 + ldr r3, [sp, #40] @ 4-byte Reload + str r3, [lr, #16] + add r3, lr, #36 + str r12, [lr, #20] + str r5, [lr, #24] + str r6, [lr, #28] + str r4, [lr, #32] + stm r3, {r0, r1, r2} + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L + .cantunwind + .fnend + + .globl mcl_fp_mont6L + .align 2 + .type mcl_fp_mont6L,%function +mcl_fp_mont6L: @ @mcl_fp_mont6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #116 + sub sp, sp, #116 + str r0, [sp, #56] @ 4-byte Spill + mov r0, r2 + str r2, [sp, #60] @ 4-byte Spill + ldm r0, {r2, r6, r7} + ldr r0, [r0, #12] + ldr r5, [r3, #8] + ldr r9, [r3] + ldr r11, [r1, #8] + ldr lr, [r1, #12] + ldr r12, [r3, #4] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #4] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1] + str r5, [sp, #92] @ 4-byte Spill + str r9, [sp, #84] @ 4-byte Spill + str r11, [sp, #100] @ 4-byte Spill + str lr, [sp, #64] @ 4-byte Spill + str r12, [sp, #112] @ 4-byte Spill + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [r3, #-4] + umull r4, r8, r0, r2 + str r0, [sp, #88] @ 4-byte Spill + str r4, [sp, #44] @ 4-byte Spill + mul r0, r4, r7 + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r1, #20] + ldr r1, [r1, #16] + umull r10, r4, r0, r5 + str r4, [sp, #36] @ 4-byte Spill + umull r4, r5, r0, r9 + str r10, [sp, #16] @ 4-byte Spill + mov r9, r5 + str r5, [sp, #12] @ 4-byte Spill + str r4, [sp, #40] @ 4-byte Spill + umull r5, r4, r7, r2 + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [sp, #108] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + umlal r9, r10, r0, r12 + str r5, [sp, #72] @ 4-byte Spill + str r4, [sp, #76] @ 4-byte Spill + umull r5, r4, r1, r2 + str r4, [sp, #68] @ 4-byte Spill + umull r1, r4, lr, r2 + str r5, [sp, #28] @ 4-byte Spill + umull lr, r5, r11, r2 + str r4, [sp, #24] @ 4-byte Spill + umull r11, r4, r7, r2 + adds r7, r8, r11 + adcs r4, r4, lr + ldr r7, [r3, #12] + adcs r1, r5, r1 + ldr r4, [sp, #24] @ 4-byte Reload + ldr r5, [sp, #12] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r4, r1 + ldr r4, [sp, #68] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + str r7, [sp, #72] @ 4-byte Spill + adcs r1, r4, r1 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [r3, #20] + umull r11, r4, r0, r1 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [r3, #16] + str r4, [sp, #8] @ 4-byte Spill + umull r3, r4, r0, r12 + adds r3, r5, r3 + str r1, [sp, #68] @ 4-byte Spill + umull r5, r12, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r4, r4, r1 + umull r4, r3, r0, r7 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #40] @ 4-byte Reload + adcs r1, r0, r4 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r3, r3, r5 + adcs r4, r12, r11 + mov r12, #0 + adc r5, r0, #0 + ldr r0, [sp, #108] @ 4-byte Reload + umlal r8, lr, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adds r2, r7, r2 + adcs r2, r9, r8 + str r2, [sp, #44] @ 4-byte Spill + adcs r2, r10, lr + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #88] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r3, r1 + mov r3, r0 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adcs r1, r4, r1 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #20] @ 4-byte Reload + adcs r1, r5, r1 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adc r11, r12, #0 + umull lr, r10, r6, r1 + ldr r1, [sp, #96] @ 4-byte Reload + umull r7, r4, r6, r1 + ldr r1, [sp, #100] @ 4-byte Reload + umull r5, r12, r6, r1 + umull r1, r8, r6, r0 + umull r9, r0, r6, r2 + adds r1, r0, r1 + adcs r1, r8, r5 + ldr r8, [sp, #64] @ 4-byte Reload + umlal r0, r5, r6, r3 + ldr r3, [sp, #44] @ 4-byte Reload + umull r1, r2, r6, r8 + adcs r1, r12, r1 + adcs r2, r2, r7 + adcs r12, r4, lr + adc r4, r10, #0 + adds r7, r3, r9 + ldr r3, [sp, #40] @ 4-byte Reload + ldr r10, [sp, #68] @ 4-byte Reload + adcs r9, r3, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #112] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r11, r4 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + mul r0, r7, r1 + ldr r1, [sp, #92] @ 4-byte Reload + umull lr, r3, r0, r5 + umull r6, r12, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + umull r11, r2, r0, r1 + mov r1, r6 + mov r4, r2 + adds r2, r2, lr + umlal r4, r1, r0, r5 + ldr r5, [sp, #76] @ 4-byte Reload + adcs r3, r3, r6 + umull r2, lr, r0, r5 + ldr r5, [sp, #72] @ 4-byte Reload + umull r3, r6, r0, r5 + adcs r12, r12, r3 + umull r5, r3, r0, r10 + adcs r0, r6, r5 + adcs r2, r3, r2 + adc r3, lr, #0 + adds r7, r11, r7 + adcs r7, r4, r9 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [sp, #44] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #108] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + umull r4, r5, r2, r8 + ldr r8, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + umull r3, r1, r2, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + mov r3, r2 + str r1, [sp, #16] @ 4-byte Spill + umull r6, r9, r2, r0 + ldr r0, [sp, #100] @ 4-byte Reload + umull r1, lr, r2, r0 + umull r11, r0, r3, r8 + umull r2, r12, r3, r7 + adds r2, r0, r2 + str r11, [sp, #12] @ 4-byte Spill + adcs r2, r12, r1 + umlal r0, r1, r3, r7 + ldr r3, [sp, #20] @ 4-byte Reload + ldr r7, [sp, #12] @ 4-byte Reload + adcs r2, lr, r4 + adcs r4, r5, r6 + ldr r6, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #16] @ 4-byte Reload + adcs r6, r9, r6 + adc r5, r5, #0 + adds r8, r3, r7 + ldr r3, [sp, #44] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + mul r0, r8, r1 + ldr r1, [sp, #92] @ 4-byte Reload + umull r2, r3, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r3, [sp, #16] @ 4-byte Spill + umull r3, r5, r0, r1 + mov r1, r2 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp, #76] @ 4-byte Reload + mov r4, r5 + umlal r4, r1, r0, r7 + umull r9, r6, r0, r3 + ldr r3, [sp, #72] @ 4-byte Reload + str r6, [sp, #12] @ 4-byte Spill + umull r6, lr, r0, r10 + umull r12, r10, r0, r3 + umull r11, r3, r0, r7 + adds r0, r5, r11 + adcs r0, r3, r2 + ldr r3, [sp, #52] @ 4-byte Reload + ldr r0, [sp, #16] @ 4-byte Reload + adcs r11, r0, r12 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r10, r10, r6 + adcs lr, lr, r9 + adc r9, r0, #0 + ldr r0, [sp, #20] @ 4-byte Reload + adds r6, r0, r8 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r8, [sp, #88] @ 4-byte Reload + umull r7, r2, r3, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #20] @ 4-byte Spill + umull r7, r2, r3, r0 + ldr r0, [sp, #100] @ 4-byte Reload + str r2, [sp, #8] @ 4-byte Spill + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [sp, #108] @ 4-byte Reload + umull r5, r2, r3, r0 + str r2, [sp] @ 4-byte Spill + umull r2, r0, r3, r8 + umull r6, r12, r3, r7 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + adcs r4, r4, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r11, r11, r1 + ldr r1, [sp, #36] @ 4-byte Reload + adcs r10, r10, r1 + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, lr, r1 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adc lr, r1, #0 + adds r6, r0, r6 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r2, r12, r5 + umlal r0, r5, r3, r7 + ldr r2, [sp] @ 4-byte Reload + adcs r9, r2, r1 + ldr r1, [sp, #96] @ 4-byte Reload + umull r6, r2, r3, r1 + ldr r1, [sp, #8] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r2, r2, r1 + ldr r1, [sp, #20] @ 4-byte Reload + adc r8, r1, #0 + ldr r1, [sp, #16] @ 4-byte Reload + adds r4, r4, r1 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + adcs r0, r11, r5 + ldr r5, [sp, #112] @ 4-byte Reload + ldr r11, [sp, #76] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r10, r9 + ldr r10, [sp, #80] @ 4-byte Reload + ldr r9, [sp, #72] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, lr, r8 + ldr r8, [sp, #68] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + mul r0, r4, r10 + umull r2, r12, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + umull r3, r7, r0, r1 + mov r1, r2 + str r3, [sp, #24] @ 4-byte Spill + umull lr, r3, r0, r5 + mov r6, r7 + adds r7, r7, lr + umlal r6, r1, r0, r5 + adcs r2, r3, r2 + umull r7, lr, r0, r11 + umull r2, r3, r0, r9 + adcs r12, r12, r2 + umull r5, r2, r0, r8 + adcs r0, r3, r5 + adcs r2, r2, r7 + ldr r7, [sp, #24] @ 4-byte Reload + adc r3, lr, #0 + adds r7, r7, r4 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r7, r6, r7 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [sp, #48] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #108] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r4, [r0, #16] + ldr r0, [sp, #104] @ 4-byte Reload + umull r12, lr, r4, r0 + ldr r0, [sp, #64] @ 4-byte Reload + umull r5, r6, r4, r3 + umull r2, r8, r4, r0 + ldr r0, [sp, #88] @ 4-byte Reload + umull r7, r1, r4, r0 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [sp, #100] @ 4-byte Reload + adds r5, r1, r5 + umull r0, r5, r4, r7 + ldr r7, [sp, #96] @ 4-byte Reload + adcs r6, r6, r0 + umlal r1, r0, r4, r3 + ldr r3, [sp, #52] @ 4-byte Reload + adcs r2, r5, r2 + umull r5, r6, r4, r7 + ldr r4, [sp, #24] @ 4-byte Reload + adcs r7, r8, r5 + adcs r6, r6, r12 + adc r5, lr, #0 + adds r8, r3, r4 + ldr r3, [sp, #48] @ 4-byte Reload + adcs r1, r3, r1 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #68] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #112] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + mul r0, r8, r10 + umull r5, r12, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + umull lr, r3, r0, r6 + umull r10, r2, r0, r1 + mov r1, r5 + mov r4, r2 + adds r2, r2, lr + adcs r3, r3, r5 + umlal r4, r1, r0, r6 + umull r2, lr, r0, r11 + ldr r11, [sp, #88] @ 4-byte Reload + umull r3, r5, r0, r9 + adcs r12, r12, r3 + umull r6, r3, r0, r7 + adcs r0, r5, r6 + adcs r2, r3, r2 + adc r3, lr, #0 + adds r7, r10, r8 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r7, r4, r7 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [sp, #48] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #108] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r4, [r0, #20] + ldr r0, [sp, #104] @ 4-byte Reload + umull r9, r1, r4, r0 + ldr r0, [sp, #96] @ 4-byte Reload + umull r2, r12, r4, r3 + str r1, [sp, #60] @ 4-byte Spill + umull r7, r8, r4, r0 + ldr r0, [sp, #64] @ 4-byte Reload + umull r5, r6, r4, r0 + ldr r0, [sp, #100] @ 4-byte Reload + umull r1, lr, r4, r0 + umull r10, r0, r4, r11 + ldr r11, [sp, #92] @ 4-byte Reload + adds r2, r0, r2 + adcs r2, r12, r1 + umlal r0, r1, r4, r3 + ldr r3, [sp, #52] @ 4-byte Reload + ldr r12, [sp, #112] @ 4-byte Reload + adcs r2, lr, r5 + adcs r5, r6, r7 + ldr r6, [sp, #60] @ 4-byte Reload + adcs r7, r8, r9 + ldr r9, [sp, #68] @ 4-byte Reload + adc r6, r6, #0 + adds r8, r3, r10 + ldr r3, [sp, #48] @ 4-byte Reload + ldr r10, [sp, #84] @ 4-byte Reload + adcs lr, r3, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #72] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #76] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #88] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + mul r0, r8, r1 + umull r3, r4, r0, r10 + umull r1, r2, r0, r12 + adds r1, r4, r1 + str r3, [sp, #80] @ 4-byte Spill + umull r6, r1, r0, r11 + adcs r2, r2, r6 + umlal r4, r6, r0, r12 + umull r2, r3, r0, r5 + adcs r1, r1, r2 + str r1, [sp, #60] @ 4-byte Spill + umull r2, r1, r0, r9 + adcs r2, r3, r2 + str r2, [sp, #52] @ 4-byte Spill + umull r3, r2, r0, r7 + adcs r1, r1, r3 + ldr r3, [sp, #60] @ 4-byte Reload + adc r0, r2, #0 + ldr r2, [sp, #80] @ 4-byte Reload + adds r2, r2, r8 + ldr r2, [sp, #108] @ 4-byte Reload + adcs r12, r4, lr + adcs lr, r6, r2 + ldr r2, [sp, #104] @ 4-byte Reload + adcs r8, r3, r2 + ldr r2, [sp, #100] @ 4-byte Reload + ldr r3, [sp, #52] @ 4-byte Reload + adcs r6, r3, r2 + ldr r2, [sp, #96] @ 4-byte Reload + adcs r3, r1, r2 + ldr r1, [sp, #88] @ 4-byte Reload + adcs r2, r0, r1 + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + subs r4, r12, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r0, lr, r0 + sbcs r1, r8, r11 + mov r11, r6 + sbcs r5, r6, r5 + sbcs r6, r3, r9 + mov r9, r2 + sbcs r10, r2, r7 + ldr r2, [sp, #108] @ 4-byte Reload + sbc r7, r2, #0 + ldr r2, [sp, #56] @ 4-byte Reload + ands r7, r7, #1 + movne r4, r12 + movne r0, lr + movne r1, r8 + cmp r7, #0 + movne r5, r11 + movne r6, r3 + movne r10, r9 + str r4, [r2] + str r0, [r2, #4] + str r1, [r2, #8] + str r5, [r2, #12] + str r6, [r2, #16] + str r10, [r2, #20] + add sp, sp, #116 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end83: + .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L + .cantunwind + .fnend + + .globl mcl_fp_montNF6L + .align 2 + .type mcl_fp_montNF6L,%function +mcl_fp_montNF6L: @ @mcl_fp_montNF6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #88 + sub sp, sp, #88 + str r2, [sp, #32] @ 4-byte Spill + str r0, [sp, #28] @ 4-byte Spill + ldm r2, {r4, r12} + ldr r5, [r1, #4] + ldr r0, [r2, #12] + ldr r9, [r2, #8] + ldr r2, [r1] + ldr r7, [r1, #8] + ldr lr, [r3, #8] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #12] + str r5, [sp, #44] @ 4-byte Spill + umull r6, r8, r5, r4 + mov r10, r5 + umull r11, r5, r2, r4 + str r2, [sp, #52] @ 4-byte Spill + str r7, [sp, #48] @ 4-byte Spill + str lr, [sp, #40] @ 4-byte Spill + adds r6, r5, r6 + umull r2, r6, r7, r4 + adcs r7, r8, r2 + umlal r5, r2, r10, r4 + umull r7, r8, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r6, r7 + ldr r6, [r1, #16] + str r0, [sp, #64] @ 4-byte Spill + umull r7, r0, r6, r4 + str r6, [sp, #72] @ 4-byte Spill + ldr r6, [r3] + adcs r7, r8, r7 + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r1, #20] + str r6, [sp, #80] @ 4-byte Spill + umull r1, r8, r7, r4 + str r7, [sp, #76] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [r3, #-4] + str r0, [sp, #20] @ 4-byte Spill + adc r0, r8, #0 + ldr r8, [r3, #4] + str r0, [sp, #16] @ 4-byte Spill + mul r0, r11, r1 + str r1, [sp, #56] @ 4-byte Spill + umull r1, r7, r0, r6 + str r8, [sp, #68] @ 4-byte Spill + adds r1, r1, r11 + str r7, [sp, #12] @ 4-byte Spill + umull r1, r4, r0, r8 + adcs r8, r1, r5 + ldr r1, [r3, #12] + umull r5, r11, r0, lr + str r4, [sp, #8] @ 4-byte Spill + adcs r6, r5, r2 + str r1, [sp, #84] @ 4-byte Spill + umull r5, r7, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + adcs lr, r5, r1 + ldr r1, [r3, #16] + str r1, [sp, #64] @ 4-byte Spill + umull r5, r4, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r5, r5, r1 + ldr r1, [r3, #20] + umull r3, r2, r0, r1 + ldr r0, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r3, r0 + adc r3, r1, #0 + ldr r1, [sp, #12] @ 4-byte Reload + adds r1, r8, r1 + ldr r8, [sp, #36] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adcs r1, r6, r1 + adcs r11, lr, r11 + str r1, [sp, #16] @ 4-byte Spill + ldr lr, [sp, #76] @ 4-byte Reload + adcs r1, r5, r7 + ldr r5, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r1, [sp, #12] @ 4-byte Spill + str r0, [sp, #8] @ 4-byte Spill + adc r0, r3, r2 + umull r3, r6, r12, r10 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + umull r7, r1, r12, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adds r3, r1, r3 + umull r2, r3, r12, r0 + adcs r6, r6, r2 + umlal r1, r2, r12, r10 + ldr r10, [sp, #68] @ 4-byte Reload + umull r6, r0, r12, r8 + adcs r4, r3, r6 + umull r6, r3, r12, r5 + adcs r5, r0, r6 + umull r6, r0, r12, lr + ldr r12, [sp, #60] @ 4-byte Reload + adcs r3, r3, r6 + ldr r6, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r7, r6 + ldr r6, [sp, #16] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #12] @ 4-byte Reload + adcs r2, r2, r11 + adcs r6, r4, r6 + ldr r4, [sp, #8] @ 4-byte Reload + adcs r11, r5, r4 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r3, r3, r5 + adc r0, r0, #0 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp, #80] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + mul r4, r7, r0 + umull r0, r5, r4, r3 + adds r0, r0, r7 + str r5, [sp, #12] @ 4-byte Spill + umull r0, r3, r4, r10 + ldr r5, [sp, #12] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + adcs r3, r0, r1 + ldr r0, [sp, #40] @ 4-byte Reload + umull r1, r7, r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r7, [sp, #4] @ 4-byte Spill + adcs r1, r1, r2 + umull r2, r7, r4, r0 + str r7, [sp] @ 4-byte Spill + ldr r7, [sp, #64] @ 4-byte Reload + adcs r2, r2, r6 + umull r6, r0, r4, r7 + adcs r6, r6, r11 + umull r7, r11, r4, r12 + ldr r4, [sp, #20] @ 4-byte Reload + ldr r12, [sp, #48] @ 4-byte Reload + adcs r4, r7, r4 + ldr r7, [sp, #16] @ 4-byte Reload + adc r7, r7, #0 + adds r3, r3, r5 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp, #8] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp, #72] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + adcs r1, r6, r1 + adcs r0, r4, r0 + str r1, [sp, #8] @ 4-byte Spill + str r0, [sp, #4] @ 4-byte Spill + adc r0, r7, r11 + ldr r11, [sp, #52] @ 4-byte Reload + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + umull r6, r1, r9, r11 + umull r5, r4, r9, r0 + adds r5, r1, r5 + umull r2, r5, r9, r12 + adcs r4, r4, r2 + umlal r1, r2, r9, r0 + ldr r0, [sp, #20] @ 4-byte Reload + umull r4, r7, r9, r8 + adcs r8, r5, r4 + umull r5, r4, r9, r3 + adcs r5, r7, r5 + umull r7, r3, r9, lr + ldr lr, [sp, #60] @ 4-byte Reload + adcs r4, r4, r7 + adc r3, r3, #0 + adds r7, r6, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r1, r1, r0 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r2, r2, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r6, r8, r0 + ldr r0, [sp, #4] @ 4-byte Reload + ldr r8, [sp, #56] @ 4-byte Reload + adcs r9, r5, r0 + ldr r0, [sp] @ 4-byte Reload + adcs r0, r4, r0 + mul r4, r7, r8 + str r0, [sp, #20] @ 4-byte Spill + adc r0, r3, #0 + ldr r3, [sp, #80] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + umull r0, r5, r4, r3 + adds r0, r0, r7 + str r5, [sp, #12] @ 4-byte Spill + umull r0, r3, r4, r10 + ldr r10, [sp, #40] @ 4-byte Reload + ldr r5, [sp, #12] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + adcs r0, r0, r1 + umull r1, r3, r4, r10 + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + umull r2, r7, r4, r3 + ldr r3, [sp, #64] @ 4-byte Reload + str r7, [sp] @ 4-byte Spill + adcs r2, r2, r6 + umull r6, r7, r4, r3 + adcs r6, r6, r9 + umull r3, r9, r4, lr + ldr r4, [sp, #20] @ 4-byte Reload + adcs r3, r3, r4 + ldr r4, [sp, #16] @ 4-byte Reload + adc r4, r4, #0 + adds r0, r0, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r3, r7 + str r0, [sp, #4] @ 4-byte Spill + adc r0, r4, r9 + ldr r4, [sp, #44] @ 4-byte Reload + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + umull r3, lr, r0, r12 + ldr r12, [sp, #36] @ 4-byte Reload + umull r9, r2, r0, r11 + umull r6, r7, r0, r4 + mov r1, r2 + adds r2, r2, r6 + mov r5, r3 + adcs r2, r7, r3 + umlal r1, r5, r0, r4 + umull r2, r3, r0, r12 + adcs r11, lr, r2 + ldr lr, [sp, #72] @ 4-byte Reload + ldr r2, [sp, #76] @ 4-byte Reload + umull r4, r6, r0, lr + adcs r3, r3, r4 + umull r4, r7, r0, r2 + ldr r0, [sp, #20] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + adcs r4, r6, r4 + adc r6, r7, #0 + adds r0, r9, r0 + ldr r9, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + adcs r7, r5, r2 + ldr r2, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #4] @ 4-byte Reload + adcs r2, r11, r2 + adcs r11, r3, r5 + ldr r3, [sp] @ 4-byte Reload + adcs r3, r4, r3 + mul r4, r0, r8 + ldr r8, [sp, #80] @ 4-byte Reload + str r3, [sp, #24] @ 4-byte Spill + adc r3, r6, #0 + str r3, [sp, #20] @ 4-byte Spill + umull r5, r3, r4, r8 + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [sp, #68] @ 4-byte Reload + adds r0, r5, r0 + umull r0, r5, r4, r3 + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + umull r1, r3, r4, r10 + ldr r10, [sp, #60] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + adcs r1, r1, r7 + umull r7, r3, r4, r5 + adcs r2, r7, r2 + umull r7, r5, r4, r9 + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp, #16] @ 4-byte Reload + adcs r7, r7, r11 + umull r6, r11, r4, r10 + ldr r4, [sp, #24] @ 4-byte Reload + adcs r4, r6, r4 + ldr r6, [sp, #20] @ 4-byte Reload + adc r6, r6, #0 + adds r0, r0, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #44] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + adcs r0, r4, r5 + str r0, [sp, #8] @ 4-byte Spill + adc r0, r6, r11 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + ldr r5, [r0, #16] + umull r11, r2, r5, r1 + ldr r1, [sp, #48] @ 4-byte Reload + umull r4, r0, r5, r7 + adds r4, r2, r4 + umull r3, r4, r5, r1 + adcs r0, r0, r3 + umlal r2, r3, r5, r7 + ldr r7, [sp, #76] @ 4-byte Reload + umull r0, r6, r5, r12 + adcs r12, r4, r0 + umull r4, r1, r5, lr + adcs r4, r6, r4 + umull r6, r0, r5, r7 + ldr r7, [sp, #24] @ 4-byte Reload + adcs r1, r1, r6 + adc r0, r0, #0 + adds r6, r11, r7 + ldr r7, [sp, #20] @ 4-byte Reload + adcs r2, r2, r7 + ldr r7, [sp, #16] @ 4-byte Reload + adcs r3, r3, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r5, r12, r7 + ldr r7, [sp, #8] @ 4-byte Reload + adcs r7, r4, r7 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #4] @ 4-byte Reload + adcs r1, r1, r7 + adc r0, r0, #0 + str r1, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + mul r4, r6, r0 + umull r0, r1, r4, r8 + ldr r8, [sp, #40] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adds r0, r0, r6 + ldr r7, [sp, #16] @ 4-byte Reload + umull r0, r11, r4, r1 + ldr r1, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + umull r2, lr, r4, r8 + adcs r2, r2, r3 + umull r3, r12, r4, r1 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r3, r3, r5 + umull r5, r6, r4, r9 + adcs r5, r5, r1 + umull r1, r9, r4, r10 + ldr r4, [sp, #24] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #20] @ 4-byte Reload + adc r4, r4, #0 + adds r0, r0, r7 + ldr r7, [sp, #44] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r2, r11 + adcs r11, r3, lr + str r0, [sp, #20] @ 4-byte Spill + adcs r10, r5, r12 + adcs r0, r1, r6 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r4, r9 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + ldr r5, [r0, #20] + ldr r0, [sp, #48] @ 4-byte Reload + umull r6, r1, r5, r0 + ldr r0, [sp, #52] @ 4-byte Reload + mov r4, r6 + umull lr, r3, r5, r0 + umull r12, r0, r5, r7 + mov r2, r3 + adds r3, r3, r12 + umlal r2, r4, r5, r7 + ldr r7, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + umull r0, r3, r5, r7 + ldr r7, [sp, #76] @ 4-byte Reload + adcs r12, r1, r0 + ldr r0, [sp, #72] @ 4-byte Reload + umull r1, r6, r5, r0 + adcs r1, r3, r1 + umull r3, r0, r5, r7 + ldr r5, [sp, #24] @ 4-byte Reload + ldr r7, [sp, #20] @ 4-byte Reload + adcs r3, r6, r3 + adc r0, r0, #0 + adds r6, lr, r5 + ldr r5, [sp, #16] @ 4-byte Reload + ldr lr, [sp, #68] @ 4-byte Reload + adcs r2, r2, r7 + adcs r7, r4, r11 + adcs r9, r12, r10 + adcs r1, r1, r5 + ldr r5, [sp, #80] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #12] @ 4-byte Reload + adcs r1, r3, r1 + adc r0, r0, #0 + str r1, [sp, #76] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + mul r4, r6, r0 + umull r0, r1, r4, r5 + umull r3, r11, r4, lr + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adds r0, r0, r6 + umull r6, r0, r4, r8 + adcs r12, r3, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + adcs r10, r6, r7 + umull r3, r0, r4, r1 + adcs r9, r3, r9 + ldr r3, [sp, #64] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + umull r7, r0, r4, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r7, r7, r0 + umull r6, r0, r4, r2 + ldr r4, [sp, #76] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r6, r6, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adc r4, r4, #0 + adds r12, r12, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r11, r10, r11 + adcs r9, r9, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r7, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r6, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r4, r0 + subs r5, r12, r5 + sbcs r4, r11, lr + mov lr, r0 + sbcs r6, r9, r8 + sbcs r1, r10, r1 + sbcs r8, r7, r3 + sbc r3, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + asr r0, r3, #31 + cmp r0, #0 + movlt r5, r12 + movlt r4, r11 + movlt r6, r9 + cmp r0, #0 + movlt r1, r10 + movlt r8, r7 + movlt r3, lr + str r5, [r2] + str r4, [r2, #4] + str r6, [r2, #8] + str r1, [r2, #12] + str r8, [r2, #16] + str r3, [r2, #20] + add sp, sp, #88 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end84: + .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L + .cantunwind + .fnend + + .globl mcl_fp_montRed6L + .align 2 + .type mcl_fp_montRed6L,%function +mcl_fp_montRed6L: @ @mcl_fp_montRed6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #100 + sub sp, sp, #100 + ldr r6, [r1, #4] + ldr r10, [r2, #-4] + ldr r9, [r1] + ldr r3, [r2, #8] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r2] + ldr r8, [r2, #4] + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [r1, #8] + mul r4, r9, r10 + str r3, [sp, #80] @ 4-byte Spill + str r0, [sp, #76] @ 4-byte Spill + str r10, [sp, #92] @ 4-byte Spill + umull r12, r7, r4, r3 + str r7, [sp, #52] @ 4-byte Spill + umull r7, r3, r4, r0 + mov lr, r12 + str r7, [sp, #56] @ 4-byte Spill + mov r0, r3 + str r6, [sp, #64] @ 4-byte Spill + ldr r6, [r1, #12] + umlal r0, lr, r4, r8 + str r6, [sp, #60] @ 4-byte Spill + ldr r6, [r2, #20] + umull r5, r7, r4, r6 + str r6, [sp, #84] @ 4-byte Spill + ldr r6, [r2, #16] + ldr r2, [r2, #12] + str r5, [sp, #44] @ 4-byte Spill + str r7, [sp, #48] @ 4-byte Spill + umull r5, r7, r4, r6 + str r6, [sp, #96] @ 4-byte Spill + str r2, [sp, #88] @ 4-byte Spill + str r7, [sp, #40] @ 4-byte Spill + umull r6, r7, r4, r2 + umull r11, r2, r4, r8 + adds r3, r3, r11 + adcs r2, r2, r12 + ldr r3, [sp, #40] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + adcs r12, r2, r6 + ldr r2, [sp, #44] @ 4-byte Reload + adcs r11, r7, r5 + adcs r2, r3, r2 + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + adc r2, r2, #0 + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [sp, #56] @ 4-byte Reload + adds r6, r9, r2 + ldr r2, [sp, #68] @ 4-byte Reload + add r9, r1, #16 + adcs r0, r2, r0 + mul r6, r0, r10 + ldr r10, [sp, #80] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + umull r3, r0, r6, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #28] @ 4-byte Spill + ldm r9, {r2, r4, r7, r9} + ldr r5, [sp, #76] @ 4-byte Reload + umull r0, r1, r6, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + ldr lr, [sp, #84] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + mov r12, r3 + adcs r2, r2, r11 + str r0, [sp, #64] @ 4-byte Spill + mov r0, r1 + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [sp, #52] @ 4-byte Reload + umlal r0, r12, r6, r8 + adcs r2, r4, r2 + ldr r4, [sp, #96] @ 4-byte Reload + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + adcs r2, r7, r2 + str r2, [sp, #48] @ 4-byte Spill + adcs r2, r9, #0 + umull r9, r11, r6, lr + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #36] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #28] @ 4-byte Spill + mov r2, #0 + adc r2, r2, #0 + str r2, [sp, #24] @ 4-byte Spill + umull r7, r2, r6, r8 + adds r1, r1, r7 + adcs r2, r2, r3 + ldr r3, [sp, #88] @ 4-byte Reload + umull r1, r7, r6, r4 + umull r2, r4, r6, r3 + ldr r6, [sp, #56] @ 4-byte Reload + adcs r2, r6, r2 + adcs r1, r4, r1 + ldr r4, [sp, #20] @ 4-byte Reload + str r2, [sp, #56] @ 4-byte Spill + str r1, [sp, #4] @ 4-byte Spill + adcs r1, r7, r9 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adc r7, r11, #0 + adds r6, r4, r1 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #92] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + mul r6, r1, r0 + umull r9, r0, r6, r10 + str r0, [sp, #8] @ 4-byte Spill + umull r0, r1, r6, r5 + ldr r5, [sp, #60] @ 4-byte Reload + mov r4, r9 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + adcs r5, r2, r5 + ldr r2, [sp, #4] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + mov r0, r1 + str r5, [sp, #68] @ 4-byte Spill + ldr r5, [sp, #52] @ 4-byte Reload + umlal r0, r4, r6, r8 + adcs r2, r2, r5 + ldr r5, [sp] @ 4-byte Reload + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + adcs r2, r5, r2 + umull r5, r10, r6, lr + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + adcs r2, r7, r2 + umull r7, r12, r6, r8 + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [sp, #36] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adc r2, r2, #0 + adds r1, r1, r7 + ldr r1, [sp, #96] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + umull r7, r2, r6, r3 + ldr r3, [sp, #8] @ 4-byte Reload + umull r11, lr, r6, r1 + adcs r6, r12, r9 + adcs r3, r3, r7 + adcs r12, r2, r11 + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [sp, #16] @ 4-byte Reload + adcs r2, lr, r5 + ldr r5, [sp, #80] @ 4-byte Reload + ldr lr, [sp, #76] @ 4-byte Reload + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + adc r9, r10, #0 + adds r6, r3, r2 + ldr r2, [sp, #12] @ 4-byte Reload + ldr r3, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + mul r6, r0, r3 + str r0, [sp, #32] @ 4-byte Spill + umull r11, r0, r6, r5 + str r0, [sp, #24] @ 4-byte Spill + umull r0, r7, r6, lr + mov r10, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + mov r2, r7 + umlal r2, r10, r6, r8 + adcs r0, r4, r0 + ldr r4, [sp, #8] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #4] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + ldr r12, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + umull r4, r0, r6, r12 + str r4, [sp, #12] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + umull r4, r0, r6, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + umull r9, r0, r6, r8 + adds r7, r7, r9 + adcs r0, r0, r11 + ldr r0, [sp, #24] @ 4-byte Reload + umull r7, r9, r6, r1 + ldr r6, [sp, #28] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r9, r4 + ldr r4, [sp, #8] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r7, r4, r0 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r4, [sp, #32] @ 4-byte Reload + adc r11, r0, #0 + adds r4, r6, r4 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + adcs r2, r2, r4 + mul r4, r2, r3 + str r2, [sp, #36] @ 4-byte Spill + umull r9, r2, r4, r5 + ldr r5, [sp, #68] @ 4-byte Reload + str r2, [sp, #28] @ 4-byte Spill + umull r3, r2, r4, lr + mov r6, r2 + str r3, [sp, #32] @ 4-byte Spill + mov r3, r9 + umlal r6, r3, r4, r8 + adcs r5, r10, r5 + str r5, [sp, #68] @ 4-byte Spill + ldr r5, [sp, #64] @ 4-byte Reload + adcs r5, r0, r5 + ldr r0, [sp, #16] @ 4-byte Reload + str r5, [sp, #64] @ 4-byte Spill + ldr r5, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + umull r7, r0, r4, r12 + mov r12, r1 + str r0, [sp, #24] @ 4-byte Spill + umull r11, r0, r4, r8 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [sp, #96] @ 4-byte Reload + umull r1, r5, r4, r12 + adds r2, r2, r11 + adcs r0, r0, r9 + ldr r2, [sp, #20] @ 4-byte Reload + ldr r0, [sp, #28] @ 4-byte Reload + umull lr, r10, r4, r7 + ldr r4, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + adcs r1, r5, lr + ldr r5, [sp, #24] @ 4-byte Reload + adcs r2, r10, r2 + adc lr, r5, #0 + ldr r5, [sp, #32] @ 4-byte Reload + adds r4, r5, r4 + ldr r5, [sp, #76] @ 4-byte Reload + ldr r4, [sp, #68] @ 4-byte Reload + adcs r9, r6, r4 + ldr r4, [sp, #64] @ 4-byte Reload + ldr r6, [sp, #80] @ 4-byte Reload + adcs r3, r3, r4 + str r3, [sp, #68] @ 4-byte Spill + ldr r3, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + mul r0, r9, r1 + umull r2, r4, r0, r5 + umull r1, r3, r0, r8 + adds r1, r4, r1 + str r2, [sp, #92] @ 4-byte Spill + umull r1, r2, r0, r6 + adcs r3, r3, r1 + umlal r4, r1, r0, r8 + umull r3, lr, r0, r12 + adcs r10, r2, r3 + umull r3, r2, r0, r7 + adcs r11, lr, r3 + ldr lr, [sp, #84] @ 4-byte Reload + umull r7, r3, r0, lr + adcs r2, r2, r7 + ldr r7, [sp, #64] @ 4-byte Reload + adc r0, r3, #0 + ldr r3, [sp, #92] @ 4-byte Reload + adds r3, r3, r9 + ldr r3, [sp, #68] @ 4-byte Reload + adcs r3, r4, r3 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r12, r1, r7 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r10, r10, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r9, r11, r1 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r7, r2, r1 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #44] @ 4-byte Reload + adc r11, r0, #0 + subs r0, r3, r5 + sbcs r5, r12, r8 + mov r8, r7 + sbcs r2, r10, r6 + ldr r6, [sp, #96] @ 4-byte Reload + sbcs r4, r9, r4 + sbcs r6, r7, r6 + sbcs r7, r1, lr + mov lr, r1 + sbc r1, r11, #0 + ands r1, r1, #1 + movne r0, r3 + ldr r3, [sp, #72] @ 4-byte Reload + movne r5, r12 + movne r2, r10 + cmp r1, #0 + movne r4, r9 + movne r6, r8 + movne r7, lr + str r0, [r3] + str r5, [r3, #4] + str r2, [r3, #8] + str r4, [r3, #12] + str r6, [r3, #16] + str r7, [r3, #20] + add sp, sp, #100 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end85: + .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L + .cantunwind + .fnend + + .globl mcl_fp_addPre6L + .align 2 + .type mcl_fp_addPre6L,%function +mcl_fp_addPre6L: @ @mcl_fp_addPre6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldm r1, {r9, r12, lr} + ldr r10, [r1, #12] + ldr r5, [r1, #16] + ldr r8, [r1, #20] + ldm r2, {r6, r7} + add r4, r2, #8 + ldm r4, {r1, r3, r4} + ldr r2, [r2, #20] + adds r6, r6, r9 + adcs r7, r7, r12 + add r12, r0, #8 + adcs r1, r1, lr + stm r0, {r6, r7} + adcs r3, r3, r10 + adcs r5, r4, r5 + adcs r2, r2, r8 + stm r12, {r1, r3, r5} + str r2, [r0, #20] + mov r0, #0 + adc r0, r0, #0 + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end86: + .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L + .cantunwind + .fnend + + .globl mcl_fp_subPre6L + .align 2 + .type mcl_fp_subPre6L,%function +mcl_fp_subPre6L: @ @mcl_fp_subPre6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldm r2, {r9, r12, lr} + ldr r10, [r2, #12] + ldr r5, [r2, #16] + ldr r8, [r2, #20] + ldm r1, {r6, r7} + add r4, r1, #8 + ldm r4, {r2, r3, r4} + ldr r1, [r1, #20] + subs r6, r6, r9 + sbcs r7, r7, r12 + add r12, r0, #8 + sbcs r2, r2, lr + stm r0, {r6, r7} + sbcs r3, r3, r10 + sbcs r5, r4, r5 + sbcs r1, r1, r8 + stm r12, {r2, r3, r5} + str r1, [r0, #20] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end87: + .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L + .cantunwind + .fnend + + .globl mcl_fp_shr1_6L + .align 2 + .type mcl_fp_shr1_6L,%function +mcl_fp_shr1_6L: @ @mcl_fp_shr1_6L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r3, [r1, #4] + ldr r12, [r1] + ldr lr, [r1, #12] + ldr r2, [r1, #8] + ldr r4, [r1, #16] + ldr r1, [r1, #20] + lsrs r5, r3, #1 + lsr r3, r3, #1 + rrx r12, r12 + lsrs r5, lr, #1 + orr r6, r3, r2, lsl #31 + lsr r5, lr, #1 + rrx r2, r2 + lsrs r3, r1, #1 + lsr r1, r1, #1 + str r12, [r0] + str r6, [r0, #4] + orr r5, r5, r4, lsl #31 + rrx r3, r4 + str r2, [r0, #8] + str r5, [r0, #12] + str r3, [r0, #16] + str r1, [r0, #20] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end88: + .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L + .cantunwind + .fnend + + .globl mcl_fp_add6L + .align 2 + .type mcl_fp_add6L,%function +mcl_fp_add6L: @ @mcl_fp_add6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r1, {r9, r12, lr} + ldr r7, [r2] + ldr r10, [r1, #12] + ldr r11, [r1, #16] + ldr r8, [r1, #20] + ldmib r2, {r1, r4, r5, r6} + ldr r2, [r2, #20] + adds r7, r7, r9 + adcs r12, r1, r12 + add r1, r0, #8 + adcs r4, r4, lr + stm r0, {r7, r12} + adcs r5, r5, r10 + adcs r6, r6, r11 + stm r1, {r4, r5, r6} + adcs r2, r2, r8 + mov r1, #0 + str r2, [r0, #20] + adc r9, r1, #0 + ldm r3, {r1, lr} + ldr r10, [r3, #8] + ldr r11, [r3, #12] + ldr r8, [r3, #16] + ldr r3, [r3, #20] + subs r7, r7, r1 + sbcs r1, r12, lr + sbcs r10, r4, r10 + sbcs r12, r5, r11 + sbcs lr, r6, r8 + sbcs r4, r2, r3 + sbc r2, r9, #0 + tst r2, #1 + streq r7, [r0] + stmibeq r0, {r1, r10, r12, lr} + streq r4, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end89: + .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L + .cantunwind + .fnend + + .globl mcl_fp_addNF6L + .align 2 + .type mcl_fp_addNF6L,%function +mcl_fp_addNF6L: @ @mcl_fp_addNF6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + add r11, r1, #8 + ldm r1, {r12, lr} + ldm r11, {r9, r10, r11} + ldr r7, [r2] + ldr r8, [r1, #20] + ldmib r2, {r1, r4, r5, r6} + ldr r2, [r2, #20] + adds r7, r7, r12 + adcs r1, r1, lr + adcs r4, r4, r9 + adcs r9, r5, r10 + adcs lr, r6, r11 + add r11, r3, #8 + adc r12, r2, r8 + ldm r3, {r2, r6} + ldm r11, {r5, r8, r10, r11} + subs r2, r7, r2 + sbcs r6, r1, r6 + sbcs r5, r4, r5 + sbcs r3, r9, r8 + sbcs r8, lr, r10 + sbc r10, r12, r11 + asr r11, r10, #31 + cmp r11, #0 + movlt r2, r7 + movlt r6, r1 + movlt r5, r4 + cmp r11, #0 + movlt r3, r9 + movlt r8, lr + movlt r10, r12 + str r2, [r0] + str r6, [r0, #4] + str r5, [r0, #8] + str r3, [r0, #12] + str r8, [r0, #16] + str r10, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end90: + .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L + .cantunwind + .fnend + + .globl mcl_fp_sub6L + .align 2 + .type mcl_fp_sub6L,%function +mcl_fp_sub6L: @ @mcl_fp_sub6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldr r9, [r2] + ldmib r2, {r8, r12, lr} + ldr r10, [r2, #16] + ldr r11, [r2, #20] + ldm r1, {r2, r4, r5, r6, r7} + ldr r1, [r1, #20] + subs r9, r2, r9 + sbcs r2, r4, r8 + str r9, [r0] + sbcs r12, r5, r12 + sbcs lr, r6, lr + sbcs r4, r7, r10 + stmib r0, {r2, r12, lr} + sbcs r5, r1, r11 + mov r1, #0 + str r4, [r0, #16] + sbc r1, r1, #0 + str r5, [r0, #20] + tst r1, #1 + popeq {r4, r5, r6, r7, r8, r9, r10, r11, lr} + moveq pc, lr + ldm r3, {r1, r6, r7, r8, r10} + ldr r3, [r3, #20] + adds r1, r1, r9 + adcs r2, r6, r2 + adcs r7, r7, r12 + adcs r6, r8, lr + stm r0, {r1, r2, r7} + adcs r4, r10, r4 + str r6, [r0, #12] + adc r3, r3, r5 + str r4, [r0, #16] + str r3, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end91: + .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L + .cantunwind + .fnend + + .globl mcl_fp_subNF6L + .align 2 + .type mcl_fp_subNF6L,%function +mcl_fp_subNF6L: @ @mcl_fp_subNF6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + add r11, r2, #8 + ldm r2, {r12, lr} + ldm r11, {r9, r10, r11} + ldr r7, [r1] + ldr r8, [r2, #20] + ldmib r1, {r2, r4, r5, r6} + ldr r1, [r1, #20] + subs r7, r7, r12 + sbcs r2, r2, lr + sbcs r9, r4, r9 + sbcs lr, r5, r10 + ldr r5, [r3, #4] + sbcs r12, r6, r11 + ldr r6, [r3] + add r11, r3, #8 + sbc r1, r1, r8 + ldm r11, {r4, r8, r10, r11} + adds r6, r7, r6 + adcs r5, r2, r5 + adcs r4, r9, r4 + adcs r3, lr, r8 + adcs r8, r12, r10 + adc r10, r1, r11 + asr r11, r1, #31 + cmp r11, #0 + movge r6, r7 + movge r5, r2 + movge r4, r9 + cmp r11, #0 + movge r3, lr + movge r8, r12 + movge r10, r1 + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + str r3, [r0, #12] + str r8, [r0, #16] + str r10, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end92: + .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L + .cantunwind + .fnend + + .globl mcl_fpDbl_add6L + .align 2 + .type mcl_fpDbl_add6L,%function +mcl_fpDbl_add6L: @ @mcl_fpDbl_add6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #32 + sub sp, sp, #32 + ldm r1, {r12, lr} + ldr r8, [r1, #8] + ldr r10, [r1, #12] + ldmib r2, {r6, r7} + ldr r5, [r2, #16] + ldr r11, [r2] + ldr r4, [r2, #12] + str r5, [sp] @ 4-byte Spill + ldr r5, [r2, #20] + adds r9, r11, r12 + add r11, r1, #32 + adcs r6, r6, lr + add lr, r1, #16 + adcs r7, r7, r8 + str r5, [sp, #4] @ 4-byte Spill + ldr r5, [r2, #24] + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [r2, #28] + str r5, [sp, #28] @ 4-byte Spill + ldr r5, [r2, #32] + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [r2, #36] + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [r2, #40] + ldr r2, [r2, #44] + str r5, [sp, #20] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + adcs r5, r4, r10 + ldm r11, {r4, r8, r11} + ldr r10, [r1, #44] + ldm lr, {r1, r2, r12, lr} + str r9, [r0] + stmib r0, {r6, r7} + ldr r6, [sp] @ 4-byte Reload + str r5, [r0, #12] + ldr r5, [sp, #4] @ 4-byte Reload + ldr r7, [sp, #8] @ 4-byte Reload + adcs r1, r6, r1 + adcs r2, r5, r2 + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + ldr r5, [r3] + str r2, [r0, #20] + ldr r2, [sp, #28] @ 4-byte Reload + adcs r1, r1, r12 + adcs r2, r2, lr + adcs r12, r7, r4 + ldr r7, [sp, #12] @ 4-byte Reload + mov r4, #0 + adcs r9, r7, r8 + ldr r7, [sp, #20] @ 4-byte Reload + adcs r8, r7, r11 + ldr r7, [sp, #24] @ 4-byte Reload + adcs lr, r7, r10 + adc r7, r4, #0 + ldmib r3, {r4, r6, r10, r11} + subs r5, r1, r5 + ldr r3, [r3, #20] + sbcs r4, r2, r4 + sbcs r6, r12, r6 + sbcs r10, r9, r10 + sbcs r11, r8, r11 + sbcs r3, lr, r3 + sbc r7, r7, #0 + ands r7, r7, #1 + movne r5, r1 + movne r4, r2 + movne r6, r12 + cmp r7, #0 + add r1, r0, #32 + movne r10, r9 + movne r11, r8 + movne r3, lr + str r5, [r0, #24] + str r4, [r0, #28] + stm r1, {r6, r10, r11} + str r3, [r0, #44] + add sp, sp, #32 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end93: + .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub6L + .align 2 + .type mcl_fpDbl_sub6L,%function +mcl_fpDbl_sub6L: @ @mcl_fpDbl_sub6L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldr r6, [r2, #8] + ldr r7, [r2, #32] + add r10, r1, #12 + str r6, [sp] @ 4-byte Spill + ldr r6, [r2, #12] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #36] + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #40] + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [r2, #20] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #44] + str r6, [sp, #12] @ 4-byte Spill + ldr r6, [r2, #24] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r1, #44] + str r6, [sp, #16] @ 4-byte Spill + ldr r6, [r2, #28] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #4] + ldr r2, [r2] + str r6, [sp, #20] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldm r1, {r11, r12, lr} + ldr r6, [sp] @ 4-byte Reload + subs r2, r11, r2 + ldr r11, [r1, #40] + sbcs r7, r12, r7 + ldr r12, [r1, #36] + ldr r1, [r1, #32] + sbcs lr, lr, r6 + ldr r6, [sp, #4] @ 4-byte Reload + stm r0, {r2, r7, lr} + mov lr, #0 + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r4, r4, r6 + str r4, [r0, #12] + sbcs r2, r5, r2 + ldr r5, [sp, #24] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #12] @ 4-byte Reload + sbcs r2, r8, r2 + str r2, [r0, #20] + ldr r2, [sp, #16] @ 4-byte Reload + sbcs r7, r9, r2 + ldr r2, [sp, #20] @ 4-byte Reload + sbcs r6, r10, r2 + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + sbcs r10, r12, r2 + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r9, r11, r2 + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r8, r5, r2 + sbc r12, lr, #0 + ldm r3, {r2, r4, r5, lr} + ldr r11, [r3, #16] + ldr r3, [r3, #20] + adds r2, r7, r2 + adcs r4, r6, r4 + adcs r5, r1, r5 + adcs lr, r10, lr + adcs r11, r9, r11 + adc r3, r8, r3 + ands r12, r12, #1 + moveq r2, r7 + moveq r4, r6 + moveq r5, r1 + cmp r12, #0 + moveq lr, r10 + moveq r11, r9 + moveq r3, r8 + str r2, [r0, #24] + str r4, [r0, #28] + str r5, [r0, #32] + str lr, [r0, #36] + str r11, [r0, #40] + str r3, [r0, #44] + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end94: + .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre7L + .align 2 + .type mcl_fp_mulUnitPre7L,%function +mcl_fp_mulUnitPre7L: @ @mcl_fp_mulUnitPre7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r11, [r1, #12] + ldr r10, [r1, #16] + ldr r9, [r1, #20] + ldr r8, [r1, #24] + umull r7, r1, lr, r2 + umull lr, r4, r12, r2 + mov r5, r4 + mov r6, r7 + str lr, [r0] + umlal r5, r6, r3, r2 + stmib r0, {r5, r6} + umull r6, r5, r3, r2 + adds r3, r4, r6 + umull r3, r6, r11, r2 + adcs r7, r5, r7 + adcs r1, r1, r3 + str r1, [r0, #12] + umull r1, r3, r10, r2 + adcs r1, r6, r1 + str r1, [r0, #16] + umull r1, r7, r9, r2 + adcs r1, r3, r1 + str r1, [r0, #20] + umull r1, r3, r8, r2 + adcs r1, r7, r1 + str r1, [r0, #24] + adc r1, r3, #0 + str r1, [r0, #28] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end95: + .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre7L + .align 2 + .type mcl_fpDbl_mulPre7L,%function +mcl_fpDbl_mulPre7L: @ @mcl_fpDbl_mulPre7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + mov r3, r2 + ldr r7, [r1] + ldr lr, [r1, #4] + mov r9, r0 + ldr r0, [r1, #8] + ldr r2, [r1, #12] + ldr r10, [r1, #16] + ldr r8, [r1, #20] + str r3, [sp, #64] @ 4-byte Spill + ldr r3, [r3] + str r9, [sp, #60] @ 4-byte Spill + str r7, [sp, #28] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + str r2, [sp, #44] @ 4-byte Spill + umull r5, r4, r7, r3 + umull r6, r12, lr, r3 + adds r6, r4, r6 + str r5, [sp, #48] @ 4-byte Spill + umull r5, r6, r0, r3 + adcs r7, r12, r5 + umlal r4, r5, lr, r3 + umull r7, r11, r2, r3 + adcs r0, r6, r7 + str r0, [sp, #52] @ 4-byte Spill + umull r6, r0, r10, r3 + adcs r2, r11, r6 + umull r11, r7, r8, r3 + ldr r6, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r2, [sp, #40] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #24] + umull r11, r12, r0, r3 + adcs r2, r7, r11 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + str r2, [r9] + ldr r2, [sp, #64] @ 4-byte Reload + ldr r3, [r2, #4] + umull r11, r7, r6, r3 + str r7, [sp, #32] @ 4-byte Spill + adc r7, r12, #0 + str r7, [sp, #16] @ 4-byte Spill + adds r7, r11, r4 + str r7, [sp, #48] @ 4-byte Spill + umull r4, r7, lr, r3 + str r7, [sp, #28] @ 4-byte Spill + adcs r7, r4, r5 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #44] @ 4-byte Reload + umull r4, r5, r7, r3 + ldr r7, [sp, #56] @ 4-byte Reload + str r5, [sp, #24] @ 4-byte Spill + umull r5, r6, r7, r3 + ldr r7, [sp, #52] @ 4-byte Reload + str r6, [sp, #44] @ 4-byte Spill + ldr r6, [sp, #20] @ 4-byte Reload + adcs r11, r5, r7 + ldr r7, [sp, #40] @ 4-byte Reload + ldr r5, [sp, #12] @ 4-byte Reload + adcs lr, r4, r7 + umull r9, r7, r10, r3 + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [sp, #36] @ 4-byte Reload + adcs r7, r9, r7 + umull r4, r9, r8, r3 + adcs r4, r4, r6 + umull r6, r12, r0, r3 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r3, r6, r0 + mov r0, #0 + adc r6, r0, #0 + ldr r0, [sp, #32] @ 4-byte Reload + adds r8, r5, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r5, r11, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + ldr lr, [r1, #12] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r7, r7, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r4, r0 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r4, [r1, #4] + adcs r3, r3, r9 + ldr r9, [r1, #8] + str r7, [sp, #36] @ 4-byte Spill + str r3, [sp, #40] @ 4-byte Spill + adc r3, r6, r12 + ldr r6, [r2, #8] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [sp, #48] @ 4-byte Reload + str r4, [sp, #52] @ 4-byte Spill + str r3, [r0, #4] + ldr r3, [r1] + umull r12, r7, r3, r6 + str r3, [sp, #56] @ 4-byte Spill + str r7, [sp, #32] @ 4-byte Spill + adds r3, r12, r8 + umull r7, r0, r4, r6 + ldr r12, [r1, #24] + str r0, [sp, #28] @ 4-byte Spill + adcs r2, r7, r5 + umull r7, r0, r9, r6 + str r3, [sp, #48] @ 4-byte Spill + ldr r10, [sp, #32] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #12] @ 4-byte Spill + umull r5, r0, lr, r6 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #12] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [r1, #16] + umull r11, r3, r0, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r1, #20] + adcs r11, r11, r0 + ldr r0, [sp, #40] @ 4-byte Reload + umull r8, r4, r3, r6 + adcs r8, r8, r0 + umull r7, r0, r12, r6 + ldr r6, [sp, #44] @ 4-byte Reload + adcs r6, r7, r6 + mov r7, #0 + adc r7, r7, #0 + adds r2, r2, r10 + str r2, [sp] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adcs r2, r5, r2 + ldr r5, [sp, #4] @ 4-byte Reload + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + adcs r10, r5, r2 + ldr r2, [sp, #16] @ 4-byte Reload + adcs r11, r11, r2 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r2, r8, r2 + ldr r8, [sp, #56] @ 4-byte Reload + str r2, [sp, #28] @ 4-byte Spill + adcs r2, r6, r4 + adc r0, r7, r0 + ldr r7, [sp, #60] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + str r0, [r7, #8] + ldr r0, [sp, #64] @ 4-byte Reload + ldr r6, [r0, #12] + umull r2, r4, lr, r6 + str r4, [sp, #48] @ 4-byte Spill + umull lr, r4, r9, r6 + str r4, [sp, #44] @ 4-byte Spill + ldr r4, [sp, #52] @ 4-byte Reload + umull r9, r5, r4, r6 + str r5, [sp, #32] @ 4-byte Spill + umull r4, r5, r8, r6 + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [sp] @ 4-byte Reload + adds r4, r4, r5 + umull r5, r8, r3, r6 + str r4, [sp, #56] @ 4-byte Spill + ldr r4, [sp, #12] @ 4-byte Reload + adcs r9, r9, r4 + adcs lr, lr, r10 + adcs r11, r2, r11 + ldr r2, [sp, #24] @ 4-byte Reload + umull r4, r10, r2, r6 + ldr r2, [sp, #28] @ 4-byte Reload + adcs r4, r4, r2 + ldr r2, [sp, #36] @ 4-byte Reload + adcs r3, r5, r2 + umull r5, r2, r12, r6 + ldr r6, [sp, #40] @ 4-byte Reload + adcs r12, r5, r6 + ldr r6, [sp, #52] @ 4-byte Reload + mov r5, #0 + adc r5, r5, #0 + adds r9, r9, r6 + ldr r6, [sp, #32] @ 4-byte Reload + adcs lr, lr, r6 + ldr r6, [sp, #44] @ 4-byte Reload + adcs r6, r11, r6 + ldr r11, [r1, #8] + str r6, [sp, #20] @ 4-byte Spill + ldr r6, [sp, #48] @ 4-byte Reload + adcs r4, r4, r6 + adcs r3, r3, r10 + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r1, #12] + adcs r12, r12, r8 + str r3, [sp, #40] @ 4-byte Spill + adc r2, r5, r2 + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #56] @ 4-byte Reload + str r2, [r7, #12] + ldr r7, [r0, #16] + ldr r0, [r1] + ldr r2, [r1, #4] + umull r8, r3, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + str r2, [sp, #52] @ 4-byte Spill + adds r0, r8, r9 + str r3, [sp, #36] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + umull r6, r0, r2, r7 + ldr r2, [r1, #24] + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r6, lr + ldr lr, [r1, #16] + str r0, [sp, #16] @ 4-byte Spill + umull r6, r0, r11, r7 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r6, r0 + mov r6, #0 + str r0, [sp, #12] @ 4-byte Spill + umull r3, r0, r4, r7 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [r1, #20] + str r0, [sp, #8] @ 4-byte Spill + umull r10, r0, lr, r7 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + umull r9, r5, r3, r7 + adcs r10, r10, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r9, r9, r12 + umull r8, r12, r2, r7 + adcs r7, r8, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adc r8, r6, #0 + ldr r6, [sp, #16] @ 4-byte Reload + adds r0, r6, r0 + ldr r6, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #8] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r10, r10, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r7, r5 + ldr r7, [sp, #48] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + adc r0, r8, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + str r7, [r0, #16] + ldr r7, [sp, #64] @ 4-byte Reload + ldr r7, [r7, #20] + umull r8, r6, r4, r7 + str r6, [sp, #48] @ 4-byte Spill + umull r4, r6, r11, r7 + str r6, [sp, #40] @ 4-byte Spill + ldr r6, [sp, #52] @ 4-byte Reload + umull r11, r5, r6, r7 + ldr r6, [sp, #56] @ 4-byte Reload + str r5, [sp, #28] @ 4-byte Spill + umull r5, r9, r6, r7 + ldr r6, [sp, #44] @ 4-byte Reload + adds r6, r5, r6 + str r6, [sp, #44] @ 4-byte Spill + ldr r6, [sp, #16] @ 4-byte Reload + adcs r11, r11, r6 + ldr r6, [sp, #12] @ 4-byte Reload + adcs r12, r4, r6 + ldr r6, [sp, #24] @ 4-byte Reload + adcs r10, r8, r10 + umull r5, r8, lr, r7 + umull r4, lr, r3, r7 + ldr r3, [sp, #32] @ 4-byte Reload + adcs r5, r5, r6 + adcs r3, r4, r3 + umull r4, r6, r2, r7 + ldr r2, [sp, #36] @ 4-byte Reload + adcs r2, r4, r2 + mov r4, #0 + adc r4, r4, #0 + adds r7, r11, r9 + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #28] @ 4-byte Reload + adcs r7, r12, r7 + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [sp, #40] @ 4-byte Reload + adcs r9, r10, r7 + ldr r7, [sp, #48] @ 4-byte Reload + adcs r11, r5, r7 + adcs r3, r3, r8 + adcs r2, r2, lr + str r3, [sp, #40] @ 4-byte Spill + str r2, [sp, #52] @ 4-byte Spill + adc r2, r4, r6 + ldr r6, [r1] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + str r2, [r0, #20] + ldr r0, [sp, #64] @ 4-byte Reload + ldr r4, [r0, #24] + ldmib r1, {r0, r3, r5} + umull r12, r2, r5, r4 + str r2, [sp, #64] @ 4-byte Spill + umull r5, r2, r3, r4 + umull r3, r10, r0, r4 + umull r0, r8, r6, r4 + ldr r6, [r1, #16] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #36] @ 4-byte Reload + adds r0, r0, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs lr, r3, r0 + adcs r9, r5, r9 + adcs r11, r12, r11 + umull r0, r12, r6, r4 + ldr r6, [r1, #20] + ldr r1, [r1, #24] + adcs r0, r0, r2 + ldr r2, [sp, #52] @ 4-byte Reload + umull r3, r5, r6, r4 + umull r6, r7, r1, r4 + ldr r1, [sp, #56] @ 4-byte Reload + mov r4, #0 + adcs r3, r3, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adcs r1, r6, r1 + adc r4, r4, #0 + adds r6, lr, r8 + adcs lr, r9, r10 + adcs r8, r11, r2 + ldr r2, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #60] @ 4-byte Reload + adcs r3, r3, r12 + adcs r1, r1, r5 + ldr r5, [sp, #48] @ 4-byte Reload + adc r7, r4, r7 + add r12, r2, #24 + stm r12, {r5, r6, lr} + str r8, [r2, #36] + str r0, [r2, #40] + str r3, [r2, #44] + str r1, [r2, #48] + str r7, [r2, #52] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end96: + .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre7L + .align 2 + .type mcl_fpDbl_sqrPre7L,%function +mcl_fpDbl_sqrPre7L: @ @mcl_fpDbl_sqrPre7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #16] + ldr r9, [r1, #20] + str r0, [sp, #8] @ 4-byte Spill + ldm r1, {r2, r3} + ldr r0, [r1, #8] + ldr r11, [r1, #12] + umull r6, r7, r2, r2 + str r0, [sp, #48] @ 4-byte Spill + umull r5, r4, r0, r2 + umull r12, r0, r3, r2 + umull r8, r10, r11, r2 + adds lr, r7, r12 + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + adcs r6, r0, r5 + umlal r7, r5, r3, r2 + adcs r0, r4, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + umull r4, r6, r0, r2 + adcs r4, r10, r4 + mov r10, r9 + str r4, [sp, #40] @ 4-byte Spill + umull r4, r8, r10, r2 + adcs r6, r6, r4 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [r1, #24] + umull lr, r9, r6, r2 + adcs r4, r8, lr + str r4, [sp, #20] @ 4-byte Spill + adc r4, r9, #0 + adds r2, r12, r7 + ldr r12, [sp, #56] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + umull r2, r7, r3, r3 + adcs r2, r2, r5 + str r7, [sp, #16] @ 4-byte Spill + umull r5, r8, r11, r3 + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [sp, #32] @ 4-byte Reload + str r2, [r12] + umull lr, r2, r6, r3 + str r2, [sp, #32] @ 4-byte Spill + umull r6, r2, r10, r3 + str r2, [sp, #24] @ 4-byte Spill + umull r2, r10, r0, r3 + ldr r0, [sp, #48] @ 4-byte Reload + umull r7, r9, r0, r3 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r3, r7, r0 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r5, r0 + ldr r0, [sp, #28] @ 4-byte Reload + mov r5, #0 + adcs r2, r2, r0 + ldr r0, [sp, #20] @ 4-byte Reload + adcs r6, r6, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adcs lr, lr, r4 + ldr r4, [sp, #12] @ 4-byte Reload + adc r5, r5, #0 + adds r11, r4, r0 + ldr r0, [sp, #16] @ 4-byte Reload + ldr r4, [r1, #4] + adcs r3, r3, r0 + ldr r0, [sp, #36] @ 4-byte Reload + str r4, [sp, #44] @ 4-byte Spill + adcs r7, r7, r9 + adcs r9, r2, r8 + ldr r2, [r1, #12] + str r0, [r12, #4] + ldr r0, [sp, #24] @ 4-byte Reload + adcs r12, r6, r10 + adcs r10, lr, r0 + ldr r0, [sp, #32] @ 4-byte Reload + ldr lr, [r1, #8] + adc r0, r5, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1] + umull r8, r5, r0, lr + str r0, [sp, #48] @ 4-byte Spill + adds r0, r8, r11 + str r5, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + umull r5, r0, r4, lr + ldr r4, [r1, #16] + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r5, r3 + str r0, [sp, #20] @ 4-byte Spill + umull r3, r0, lr, lr + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r3, r7 + ldr r3, [r1, #20] + ldr r7, [sp, #40] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + umull r0, r5, r2, lr + str r0, [sp, #12] @ 4-byte Spill + adcs r0, r0, r9 + ldr r9, [sp, #20] @ 4-byte Reload + str r5, [sp, #36] @ 4-byte Spill + str r0, [sp, #4] @ 4-byte Spill + umull r11, r0, r4, lr + str r0, [sp, #8] @ 4-byte Spill + umull r8, r0, r3, lr + adcs r11, r11, r12 + str r0, [sp] @ 4-byte Spill + ldr r0, [r1, #24] + adcs r8, r8, r10 + umull r10, r12, r0, lr + adcs lr, r10, r7 + mov r7, #0 + adc r10, r7, #0 + ldr r7, [sp, #32] @ 4-byte Reload + adds r6, r9, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str r6, [sp, #20] @ 4-byte Spill + ldr r6, [sp, #16] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #24] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + ldr r6, [sp, #4] @ 4-byte Reload + adcs r6, r6, r7 + adcs r11, r11, r5 + ldr r5, [sp, #8] @ 4-byte Reload + adcs r5, r8, r5 + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [sp] @ 4-byte Reload + adcs r7, lr, r5 + str r7, [sp, #4] @ 4-byte Spill + adc r7, r10, r12 + ldr r10, [sp, #48] @ 4-byte Reload + str r7, [sp] @ 4-byte Spill + umull r9, r7, r0, r2 + umull r5, r0, r3, r2 + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [sp, #44] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + umull r3, r0, r4, r2 + str r0, [sp, #28] @ 4-byte Spill + umull r4, r0, r2, r2 + str r0, [sp, #24] @ 4-byte Spill + umull r8, lr, r10, r2 + umull r0, r12, r7, r2 + ldr r2, [sp, #20] @ 4-byte Reload + mov r7, #0 + adds r8, r8, r2 + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #12] @ 4-byte Reload + adcs r6, r2, r6 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r4, r4, r11 + adcs r3, r3, r2 + ldr r2, [sp, #4] @ 4-byte Reload + adcs r5, r5, r2 + ldr r2, [sp] @ 4-byte Reload + adcs r2, r9, r2 + adc r9, r7, #0 + adds r0, r0, lr + adcs r7, r6, r12 + ldr r6, [sp, #36] @ 4-byte Reload + adcs r4, r4, r6 + ldr r6, [sp, #24] @ 4-byte Reload + adcs r11, r3, r6 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r12, r5, r3 + ldr r3, [sp, #32] @ 4-byte Reload + ldr r5, [r1, #12] + adcs r10, r2, r3 + ldr r2, [sp, #40] @ 4-byte Reload + ldr r3, [sp, #56] @ 4-byte Reload + adc r2, r9, r2 + ldr r9, [r1, #4] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #52] @ 4-byte Reload + str r9, [sp, #16] @ 4-byte Spill + str r2, [r3, #8] + str r8, [r3, #12] + ldr r2, [r1] + ldr r3, [r1, #16] + ldr r8, [r1, #8] + umull lr, r6, r2, r3 + str r2, [sp, #48] @ 4-byte Spill + str r8, [sp, #4] @ 4-byte Spill + adds r0, lr, r0 + ldr lr, [r1, #24] + str r6, [sp, #36] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + umull r0, r2, r9, r3 + adcs r0, r0, r7 + str r2, [sp, #32] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + umull r7, r0, r8, r3 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r7, r4 + ldr r9, [sp, #20] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + umull r7, r0, r5, r3 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r7, r11 + mov r7, #0 + str r0, [sp] @ 4-byte Spill + umull r11, r0, r3, r3 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #20] + adcs r11, r11, r12 + umull r12, r2, r0, r3 + adcs r4, r12, r10 + umull r10, r8, lr, r3 + ldr r3, [sp, #44] @ 4-byte Reload + str r2, [sp, #40] @ 4-byte Spill + adcs r3, r10, r3 + adc r10, r7, #0 + ldr r7, [sp, #36] @ 4-byte Reload + adds r6, r9, r7 + ldr r7, [sp, #32] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + ldr r6, [sp, #8] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str r6, [sp, #20] @ 4-byte Spill + ldr r6, [sp] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #24] @ 4-byte Reload + str r6, [sp, #8] @ 4-byte Spill + adcs r11, r11, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r4, r4, r7 + adcs r2, r3, r2 + ldr r3, [sp, #4] @ 4-byte Reload + str r2, [sp, #24] @ 4-byte Spill + umull r6, r2, r5, r0 + adc r10, r10, r8 + str r2, [sp, #44] @ 4-byte Spill + umull r5, r2, r3, r0 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + umull r8, r3, r2, r0 + ldr r2, [sp, #48] @ 4-byte Reload + str r3, [sp, #28] @ 4-byte Spill + umull r3, r9, r2, r0 + ldr r2, [sp, #36] @ 4-byte Reload + adds r2, r3, r2 + ldr r3, [sp, #24] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + adcs r7, r8, r2 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r5, r5, r2 + adcs r6, r6, r11 + adcs r2, r12, r4 + umull r4, r8, r0, r0 + adcs r4, r4, r3 + umull r3, r11, lr, r0 + adcs r0, r3, r10 + mov r3, #0 + adc r3, r3, #0 + adds r7, r7, r9 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [sp, #28] @ 4-byte Reload + adcs r9, r5, r7 + ldr r5, [sp, #32] @ 4-byte Reload + adcs r6, r6, r5 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [sp, #44] @ 4-byte Reload + adcs r10, r2, r6 + ldr r2, [sp, #40] @ 4-byte Reload + adcs r12, r4, r2 + ldr r2, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + adc r0, r3, r11 + ldr r3, [r1, #24] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + str r0, [r2, #16] + ldr r0, [sp, #36] @ 4-byte Reload + str r0, [r2, #20] + ldm r1, {r0, r4} + ldr r5, [r1, #12] + ldr r2, [r1, #8] + umull lr, r6, r5, r3 + umull r5, r11, r2, r3 + umull r2, r8, r4, r3 + str r6, [sp, #52] @ 4-byte Spill + umull r4, r6, r0, r3 + ldr r0, [sp, #24] @ 4-byte Reload + adds r0, r4, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r9, r2, r9 + ldr r2, [sp, #52] @ 4-byte Reload + adcs r4, r5, r0 + ldr r0, [r1, #16] + ldr r1, [r1, #20] + adcs r10, lr, r10 + umull r7, lr, r0, r3 + adcs r0, r7, r12 + umull r7, r12, r1, r3 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r7, r1 + umull r7, r5, r3, r3 + ldr r3, [sp, #48] @ 4-byte Reload + adcs r3, r7, r3 + mov r7, #0 + adc r7, r7, #0 + adds r6, r9, r6 + adcs r4, r4, r8 + adcs r8, r10, r11 + adcs r0, r0, r2 + adcs r1, r1, lr + adcs r2, r3, r12 + adc r3, r7, r5 + ldr r7, [sp, #56] @ 4-byte Reload + ldr r5, [sp, #40] @ 4-byte Reload + add r12, r7, #40 + str r5, [r7, #24] + str r6, [r7, #28] + str r4, [r7, #32] + str r8, [r7, #36] + stm r12, {r0, r1, r2, r3} + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L + .cantunwind + .fnend + + .globl mcl_fp_mont7L + .align 2 + .type mcl_fp_mont7L,%function +mcl_fp_mont7L: @ @mcl_fp_mont7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #124 + sub sp, sp, #124 + str r0, [sp, #56] @ 4-byte Spill + mov r0, r2 + str r2, [sp, #60] @ 4-byte Spill + ldm r0, {r2, lr} + ldr r7, [r0, #8] + ldr r0, [r0, #12] + ldr r5, [r3, #-4] + ldr r6, [r3, #8] + ldr r9, [r3, #4] + ldr r11, [r1, #8] + ldr r12, [r1, #12] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #4] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1] + str r5, [sp, #80] @ 4-byte Spill + str r6, [sp, #116] @ 4-byte Spill + str r9, [sp, #108] @ 4-byte Spill + str r11, [sp, #104] @ 4-byte Spill + str r12, [sp, #72] @ 4-byte Spill + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r3] + umull r4, r8, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + mul r0, r4, r5 + str r4, [sp, #44] @ 4-byte Spill + umull r10, r4, r0, r6 + str r4, [sp, #32] @ 4-byte Spill + str r10, [sp, #8] @ 4-byte Spill + umull r4, r5, r0, r7 + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [sp, #68] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + mov r4, r5 + str r5, [sp, #4] @ 4-byte Spill + umlal r4, r10, r0, r9 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r1, #24] + umull r6, r5, r4, r2 + str r4, [sp, #88] @ 4-byte Spill + ldr r4, [r1, #20] + ldr r1, [r1, #16] + str r6, [sp, #96] @ 4-byte Spill + str r5, [sp, #120] @ 4-byte Spill + umull r6, r5, r4, r2 + str r4, [sp, #64] @ 4-byte Spill + umull r9, r4, r1, r2 + str r1, [sp, #100] @ 4-byte Spill + str r6, [sp, #76] @ 4-byte Spill + str r5, [sp, #92] @ 4-byte Spill + str r4, [sp, #20] @ 4-byte Spill + umull r6, r5, r12, r2 + umull r12, r4, r11, r2 + umull r11, r1, r7, r2 + adds r7, r8, r11 + adcs r7, r1, r12 + adcs r1, r4, r6 + ldr r4, [sp, #20] @ 4-byte Reload + ldr r6, [sp, #108] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + adcs r1, r5, r9 + ldr r5, [r3, #12] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + str r5, [sp, #76] @ 4-byte Spill + adcs r1, r4, r1 + ldr r4, [sp, #92] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r4, r1 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #24] + umull r9, r4, r0, r1 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [r3, #16] + str r4, [sp] @ 4-byte Spill + ldr r4, [r3, #20] + umull r3, r7, r0, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r1, [sp, #120] @ 4-byte Spill + adds r3, r6, r3 + str r4, [sp, #92] @ 4-byte Spill + umull r3, r6, r0, r5 + ldr r5, [sp, #8] @ 4-byte Reload + adcs r7, r7, r5 + ldr r5, [sp, #32] @ 4-byte Reload + adcs r11, r5, r3 + umull r7, r5, r0, r1 + adcs r1, r6, r7 + umull r7, r3, r0, r4 + ldr r4, [sp] @ 4-byte Reload + ldr r6, [sp, #40] @ 4-byte Reload + adcs r0, r5, r7 + ldr r5, [sp, #68] @ 4-byte Reload + adcs r3, r3, r9 + adc r7, r4, #0 + mov r4, #0 + umlal r8, r12, r5, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adds r2, r6, r2 + mov r6, r5 + ldr r2, [sp, #36] @ 4-byte Reload + adcs r2, r2, r8 + str r2, [sp, #44] @ 4-byte Spill + adcs r2, r10, r12 + ldr r10, [sp, #84] @ 4-byte Reload + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + adcs r2, r11, r2 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r3, r0 + umull r2, r3, lr, r5 + ldr r5, [sp, #72] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #20] @ 4-byte Spill + adc r0, r4, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + umull r12, r9, lr, r0 + ldr r0, [sp, #100] @ 4-byte Reload + umull r8, r4, lr, r0 + ldr r0, [sp, #104] @ 4-byte Reload + umull r1, r7, lr, r0 + umull r11, r0, lr, r10 + adds r2, r0, r2 + adcs r2, r3, r1 + umlal r0, r1, lr, r6 + ldr r6, [sp, #40] @ 4-byte Reload + umull r2, r3, lr, r5 + adcs r2, r7, r2 + adcs r10, r3, r8 + ldr r8, [sp, #64] @ 4-byte Reload + umull r7, r3, lr, r8 + adcs r4, r4, r7 + ldr r7, [sp, #44] @ 4-byte Reload + adcs r3, r3, r12 + adc r5, r9, #0 + adds r7, r7, r11 + adcs r0, r6, r0 + ldr r6, [sp, #108] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #96] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #20] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + mul r0, r7, r1 + ldr r1, [sp, #116] @ 4-byte Reload + umull lr, r12, r0, r6 + umull r3, r4, r0, r1 + ldr r1, [sp, #112] @ 4-byte Reload + mov r2, r3 + umull r9, r5, r0, r1 + mov r1, r5 + adds r5, r5, lr + umlal r1, r2, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + adcs r3, r12, r3 + umull r5, lr, r0, r6 + ldr r6, [sp, #76] @ 4-byte Reload + umull r3, r12, r0, r6 + ldr r6, [sp, #92] @ 4-byte Reload + adcs r3, r4, r3 + adcs r12, r12, r5 + umull r4, r5, r0, r6 + adcs lr, lr, r4 + umull r6, r4, r0, r10 + adcs r0, r5, r6 + adc r4, r4, #0 + adds r5, r9, r7 + ldr r9, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #44] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r3, r1 + ldr r3, [sp, #68] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r12, r1 + ldr r12, [sp, #48] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, lr, r1 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + umull r2, r1, r12, r0 + umull r10, r0, r12, r8 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + str r2, [sp, #8] @ 4-byte Spill + str r1, [sp, #12] @ 4-byte Spill + umull r2, lr, r12, r3 + umull r7, r8, r12, r0 + ldr r0, [sp, #72] @ 4-byte Reload + umull r5, r6, r12, r0 + ldr r0, [sp, #104] @ 4-byte Reload + umull r1, r4, r12, r0 + umull r11, r0, r12, r9 + adds r2, r0, r2 + str r11, [sp] @ 4-byte Spill + adcs r2, lr, r1 + umlal r0, r1, r12, r3 + adcs lr, r4, r5 + ldmib sp, {r4, r5} + ldr r3, [sp, #44] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + adcs r7, r6, r7 + adcs r6, r8, r10 + adcs r4, r4, r5 + ldr r5, [sp, #12] @ 4-byte Reload + adc r5, r5, #0 + adds r9, r3, r2 + ldr r3, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #108] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + mul r0, r9, r1 + ldr r1, [sp, #116] @ 4-byte Reload + umull r3, r2, r0, r1 + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [sp, #112] @ 4-byte Reload + umull r7, r1, r0, r2 + mov r2, r3 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #96] @ 4-byte Reload + mov r5, r1 + umlal r5, r2, r0, r6 + umull r10, r4, r0, r7 + ldr r7, [sp, #92] @ 4-byte Reload + str r4, [sp, #8] @ 4-byte Spill + umull r12, r8, r0, r7 + ldr r7, [sp, #120] @ 4-byte Reload + umull lr, r4, r0, r7 + umull r11, r7, r0, r6 + ldr r6, [sp, #8] @ 4-byte Reload + adds r1, r1, r11 + ldr r11, [sp, #76] @ 4-byte Reload + adcs r1, r7, r3 + umull r1, r3, r0, r11 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + adcs r1, r3, lr + adcs r3, r4, r12 + ldr r4, [sp, #16] @ 4-byte Reload + adcs r7, r8, r10 + ldr r10, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #64] @ 4-byte Reload + adc r6, r6, #0 + adds r4, r4, r9 + ldr r9, [sp, #72] @ 4-byte Reload + ldr r4, [sp, #48] @ 4-byte Reload + adcs r5, r5, r4 + str r5, [sp, #48] @ 4-byte Spill + ldr r5, [sp, #44] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #84] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + umull r4, r5, r10, r7 + adcs r0, r6, r0 + str r4, [sp, #16] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + umull r1, r6, r10, r0 + ldr r0, [sp, #68] @ 4-byte Reload + umull r2, r3, r10, r0 + adds r2, r5, r2 + adcs r2, r3, r1 + umull r2, r3, r10, r9 + adcs r7, r6, r2 + ldr r6, [sp, #100] @ 4-byte Reload + umull r2, r12, r10, r6 + adcs r6, r3, r2 + umull r3, lr, r10, r8 + mov r2, r10 + ldr r10, [sp, #88] @ 4-byte Reload + adcs r4, r12, r3 + umlal r5, r1, r2, r0 + umull r3, r12, r2, r10 + mov r10, r0 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + adcs r3, lr, r3 + adc r12, r12, #0 + adds lr, r0, r2 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #108] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + mul r0, lr, r1 + ldr r1, [sp, #116] @ 4-byte Reload + umull r5, r12, r0, r7 + umull r3, r6, r0, r1 + ldr r1, [sp, #112] @ 4-byte Reload + umull r2, r4, r0, r1 + str r2, [sp, #20] @ 4-byte Spill + mov r1, r4 + mov r2, r3 + adds r4, r4, r5 + umlal r1, r2, r0, r7 + ldr r7, [sp, #120] @ 4-byte Reload + adcs r3, r12, r3 + umull r3, r12, r0, r11 + adcs r11, r6, r3 + ldr r3, [sp, #92] @ 4-byte Reload + umull r4, r5, r0, r7 + ldr r7, [sp, #96] @ 4-byte Reload + adcs r12, r12, r4 + umull r4, r6, r0, r3 + adcs r4, r5, r4 + umull r5, r3, r0, r7 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r0, r6, r5 + ldr r5, [sp, #20] @ 4-byte Reload + adc r3, r3, #0 + adds r6, r5, lr + adcs r1, r1, r7 + ldr r7, [sp, #104] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r11, r1 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r0, [r0, #16] + umull lr, r6, r0, r8 + umull r5, r3, r0, r10 + umull r8, r2, r0, r1 + umull r12, r4, r0, r9 + adds r5, r2, r5 + umull r1, r5, r0, r7 + ldr r7, [sp, #100] @ 4-byte Reload + adcs r3, r3, r1 + umlal r2, r1, r0, r10 + adcs r9, r5, r12 + umull r5, r3, r0, r7 + ldr r7, [sp, #108] @ 4-byte Reload + adcs r12, r4, r5 + ldr r4, [sp, #88] @ 4-byte Reload + adcs lr, r3, lr + umull r5, r3, r0, r4 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r5, r6, r5 + adc r3, r3, #0 + adds r4, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r11, r12 + ldr r11, [sp, #80] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + mul r1, r4, r11 + adcs r0, r0, lr + umull lr, r12, r1, r7 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + umull r2, r6, r1, r0 + ldr r0, [sp, #112] @ 4-byte Reload + mov r3, r2 + umull r8, r5, r1, r0 + mov r0, r5 + adds r5, r5, lr + umlal r0, r3, r1, r7 + ldr r7, [sp, #120] @ 4-byte Reload + adcs r2, r12, r2 + umull r5, lr, r1, r7 + ldr r7, [sp, #76] @ 4-byte Reload + umull r2, r12, r1, r7 + ldr r7, [sp, #92] @ 4-byte Reload + adcs r9, r6, r2 + ldr r2, [sp, #96] @ 4-byte Reload + adcs r12, r12, r5 + umull r5, r6, r1, r7 + adcs lr, lr, r5 + umull r7, r5, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + adcs r1, r6, r7 + ldr r7, [sp, #104] @ 4-byte Reload + adc r5, r5, #0 + adds r4, r8, r4 + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r12, r0 + mov r12, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r0, [r0, #20] + umull lr, r8, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + umull r6, r3, r0, r12 + umull r4, r5, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + umull r10, r2, r0, r1 + adds r6, r2, r6 + umull r1, r6, r0, r7 + ldr r7, [sp, #88] @ 4-byte Reload + adcs r3, r3, r1 + umlal r2, r1, r0, r12 + ldr r3, [sp, #100] @ 4-byte Reload + adcs r9, r6, r4 + umull r4, r6, r0, r3 + adcs r4, r5, r4 + adcs r3, r6, lr + umull r5, r6, r0, r7 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r7, [sp, #108] @ 4-byte Reload + adcs r5, r8, r5 + adc r6, r6, #0 + adds lr, r0, r10 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r0, r2 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + mul r1, lr, r11 + ldr r11, [sp, #84] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r6 + umull r6, r12, r1, r7 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + umull r3, r4, r1, r0 + ldr r0, [sp, #112] @ 4-byte Reload + mov r2, r3 + umull r8, r5, r1, r0 + mov r0, r5 + adds r5, r5, r6 + umlal r0, r2, r1, r7 + ldr r7, [sp, #120] @ 4-byte Reload + adcs r3, r12, r3 + umull r5, r6, r1, r7 + ldr r7, [sp, #76] @ 4-byte Reload + umull r3, r12, r1, r7 + ldr r7, [sp, #96] @ 4-byte Reload + adcs r9, r4, r3 + ldr r3, [sp, #92] @ 4-byte Reload + adcs r12, r12, r5 + umull r4, r5, r1, r3 + adcs r4, r6, r4 + umull r6, r3, r1, r7 + adcs r1, r5, r6 + adc r3, r3, #0 + adds r6, r8, lr + adcs r0, r0, r10 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + ldr r12, [sp, #68] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r0, [r0, #24] + umull r3, r2, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r2, [sp, #60] @ 4-byte Spill + str r3, [sp, #20] @ 4-byte Spill + umull r3, lr, r0, r12 + umull r9, r2, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r2, [sp, #88] @ 4-byte Spill + umull r7, r8, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + umull r5, r6, r0, r1 + ldr r1, [sp, #104] @ 4-byte Reload + umull r2, r4, r0, r1 + umull r10, r1, r0, r11 + ldr r11, [sp, #92] @ 4-byte Reload + adds r3, r1, r3 + str r10, [sp, #104] @ 4-byte Spill + ldr r10, [sp, #96] @ 4-byte Reload + adcs r3, lr, r2 + umlal r1, r2, r0, r12 + ldr r0, [sp, #24] @ 4-byte Reload + adcs lr, r4, r5 + ldr r5, [sp, #20] @ 4-byte Reload + ldr r3, [sp, #88] @ 4-byte Reload + ldr r4, [sp, #60] @ 4-byte Reload + adcs r6, r6, r7 + adcs r7, r8, r9 + ldr r8, [sp, #108] @ 4-byte Reload + adcs r5, r3, r5 + ldr r3, [sp, #104] @ 4-byte Reload + adc r4, r4, #0 + adds r9, r0, r3 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + ldr lr, [sp, #76] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #116] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r1, r9, r0 + ldr r0, [sp, #112] @ 4-byte Reload + umull r2, r3, r1, r8 + umull r4, r5, r1, r0 + adds r2, r5, r2 + umull r0, r2, r1, r7 + ldr r7, [sp, #120] @ 4-byte Reload + adcs r3, r3, r0 + umull r3, r12, r1, lr + adcs r6, r2, r3 + umull r3, r2, r1, r7 + adcs r12, r12, r3 + umull r7, r3, r1, r11 + adcs r2, r2, r7 + str r2, [sp, #80] @ 4-byte Spill + umull r7, r2, r1, r10 + adcs r3, r3, r7 + mov r7, r8 + umlal r5, r0, r1, r7 + adc r1, r2, #0 + adds r2, r4, r9 + ldr r2, [sp, #104] @ 4-byte Reload + adcs r8, r5, r2 + ldr r2, [sp, #100] @ 4-byte Reload + ldr r5, [sp, #116] @ 4-byte Reload + adcs r9, r0, r2 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #80] @ 4-byte Reload + adcs r4, r6, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r4, [sp, #88] @ 4-byte Spill + adcs r6, r12, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r6, [sp, #100] @ 4-byte Spill + adcs r12, r2, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r2, r3, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r2, [sp, #104] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #60] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + subs r1, r8, r1 + sbcs r3, r9, r7 + ldr r7, [sp, #120] @ 4-byte Reload + sbcs r5, r4, r5 + sbcs r6, r6, lr + sbcs r4, r12, r7 + sbcs r11, r2, r11 + ldr r2, [sp, #84] @ 4-byte Reload + sbcs lr, r0, r10 + sbc r7, r2, #0 + ldr r2, [sp, #56] @ 4-byte Reload + ands r7, r7, #1 + movne r1, r8 + movne r3, r9 + str r1, [r2] + ldr r1, [sp, #88] @ 4-byte Reload + str r3, [r2, #4] + movne r5, r1 + ldr r1, [sp, #100] @ 4-byte Reload + cmp r7, #0 + movne r4, r12 + str r5, [r2, #8] + movne r6, r1 + ldr r1, [sp, #104] @ 4-byte Reload + str r6, [r2, #12] + str r4, [r2, #16] + movne r11, r1 + cmp r7, #0 + movne lr, r0 + str r11, [r2, #20] + str lr, [r2, #24] + add sp, sp, #124 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end98: + .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L + .cantunwind + .fnend + + .globl mcl_fp_montNF7L + .align 2 + .type mcl_fp_montNF7L,%function +mcl_fp_montNF7L: @ @mcl_fp_montNF7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #104 + sub sp, sp, #104 + str r0, [sp, #36] @ 4-byte Spill + mov r0, r2 + str r2, [sp, #40] @ 4-byte Spill + ldm r0, {r4, r12} + ldr r6, [r1, #4] + ldr r2, [r0, #8] + ldr r7, [r1] + ldr r0, [r0, #12] + ldr r5, [r1, #8] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #12] + umull r9, r8, r6, r4 + umull lr, r10, r7, r4 + str r6, [sp, #52] @ 4-byte Spill + mov r11, r6 + str r7, [sp, #96] @ 4-byte Spill + str r5, [sp, #80] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + adds r6, r10, r9 + umull r6, r9, r5, r4 + ldr r5, [r1, #20] + adcs r7, r8, r6 + umlal r10, r6, r11, r4 + umull r7, r8, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r9, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #16] + str r5, [sp, #44] @ 4-byte Spill + umull r7, r9, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + adcs r0, r8, r7 + str r0, [sp, #84] @ 4-byte Spill + umull r7, r0, r5, r4 + adcs r5, r9, r7 + ldr r7, [r3, #4] + str r5, [sp, #76] @ 4-byte Spill + ldr r5, [r1, #24] + str r7, [sp, #72] @ 4-byte Spill + umull r1, r9, r5, r4 + str r5, [sp, #68] @ 4-byte Spill + ldr r5, [r3] + adcs r0, r0, r1 + ldr r1, [r3, #-4] + str r0, [sp, #28] @ 4-byte Spill + adc r0, r9, #0 + ldr r9, [r3, #8] + str r0, [sp, #24] @ 4-byte Spill + str r5, [sp, #56] @ 4-byte Spill + mul r0, lr, r1 + str r1, [sp, #60] @ 4-byte Spill + umull r1, r2, r0, r5 + str r9, [sp, #100] @ 4-byte Spill + adds r1, r1, lr + str r2, [sp, #20] @ 4-byte Spill + umull r1, lr, r0, r7 + adcs r11, r1, r10 + umull r5, r1, r0, r9 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [r3, #12] + adcs r9, r5, r6 + str r1, [sp, #92] @ 4-byte Spill + umull r5, r10, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + adcs r7, r5, r1 + ldr r1, [r3, #16] + str r1, [sp, #88] @ 4-byte Spill + umull r5, r8, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + adcs r4, r5, r1 + ldr r1, [r3, #20] + str r1, [sp, #84] @ 4-byte Spill + umull r5, r6, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + adcs r5, r5, r1 + ldr r1, [r3, #24] + umull r3, r2, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adcs r0, r3, r0 + adc r3, r1, #0 + ldr r1, [sp, #20] @ 4-byte Reload + adds r11, r11, r1 + adcs r1, r9, lr + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #16] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #80] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + adcs r1, r4, r10 + str r1, [sp, #20] @ 4-byte Spill + adcs r1, r5, r8 + ldr r5, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + adc r0, r3, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + umull r9, r0, r12, r1 + umull r3, r4, r12, r2 + adds r3, r0, r3 + umull r1, r3, r12, r7 + ldr r7, [sp, #44] @ 4-byte Reload + adcs r4, r4, r1 + umlal r0, r1, r12, r2 + umull r4, r6, r12, r5 + ldr r5, [sp, #64] @ 4-byte Reload + adcs r10, r3, r4 + umull r4, r3, r12, r5 + adcs r8, r6, r4 + umull r6, r4, r12, r7 + ldr r7, [sp, #68] @ 4-byte Reload + adcs r5, r3, r6 + umull r6, r3, r12, r7 + ldr r7, [sp, #28] @ 4-byte Reload + adcs r4, r4, r6 + adc r2, r3, #0 + adds r3, r9, r11 + adcs r0, r0, r7 + ldr r7, [sp, #24] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #20] @ 4-byte Reload + adcs r6, r10, r7 + ldr r7, [sp, #16] @ 4-byte Reload + adcs r11, r8, r7 + ldr r7, [sp, #12] @ 4-byte Reload + ldr r8, [sp, #72] @ 4-byte Reload + adcs r7, r5, r7 + ldr r5, [sp, #8] @ 4-byte Reload + str r7, [sp, #16] @ 4-byte Spill + adcs r7, r4, r5 + ldr r5, [sp, #60] @ 4-byte Reload + adc r2, r2, #0 + str r7, [sp, #20] @ 4-byte Spill + str r2, [sp, #28] @ 4-byte Spill + mul r2, r3, r5 + ldr r5, [sp, #56] @ 4-byte Reload + umull r4, r7, r2, r5 + adds r3, r4, r3 + str r7, [sp, #24] @ 4-byte Spill + umull r3, r7, r2, r8 + ldr r4, [sp, #24] @ 4-byte Reload + adcs lr, r3, r0 + ldr r0, [sp, #100] @ 4-byte Reload + str r7, [sp, #12] @ 4-byte Spill + umull r3, r7, r2, r0 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r12, r3, r1 + str r7, [sp, #8] @ 4-byte Spill + umull r3, r10, r2, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r3, r3, r6 + umull r6, r9, r2, r0 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r5, r6, r11 + ldr r11, [sp, #76] @ 4-byte Reload + umull r6, r1, r2, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r6, r6, r0 + umull r7, r0, r2, r11 + ldr r2, [sp, #20] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp, #28] @ 4-byte Reload + adc r7, r7, #0 + adds r4, lr, r4 + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [sp, #12] @ 4-byte Reload + adcs r4, r12, r4 + ldr r12, [sp, #52] @ 4-byte Reload + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [sp, #8] @ 4-byte Reload + adcs r3, r3, r4 + ldr r4, [sp, #64] @ 4-byte Reload + str r3, [sp, #20] @ 4-byte Spill + adcs r3, r5, r10 + ldr r5, [sp, #48] @ 4-byte Reload + str r3, [sp, #16] @ 4-byte Spill + adcs r3, r6, r9 + ldr r9, [sp, #68] @ 4-byte Reload + adcs r1, r2, r1 + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [sp, #80] @ 4-byte Reload + adc r0, r7, r0 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp] @ 4-byte Reload + umull r2, r6, r0, r12 + umull r11, lr, r0, r1 + adds r2, lr, r2 + umull r1, r2, r0, r3 + adcs r6, r6, r1 + umlal lr, r1, r0, r12 + umull r6, r3, r0, r5 + adcs r5, r2, r6 + umull r6, r2, r0, r4 + adcs r10, r3, r6 + umull r6, r3, r0, r7 + ldr r7, [sp, #28] @ 4-byte Reload + adcs r4, r2, r6 + umull r6, r2, r0, r9 + ldr r9, [sp, #56] @ 4-byte Reload + adcs r3, r3, r6 + ldr r6, [sp, #24] @ 4-byte Reload + adc r2, r2, #0 + adds r7, r11, r7 + adcs r0, lr, r6 + ldr r6, [sp, #20] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #16] @ 4-byte Reload + adcs r6, r5, r6 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r11, r10, r5 + ldr r5, [sp, #8] @ 4-byte Reload + adcs r10, r4, r5 + ldr r5, [sp, #4] @ 4-byte Reload + ldr r4, [sp, #92] @ 4-byte Reload + adcs r3, r3, r5 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [sp, #60] @ 4-byte Reload + adc r2, r2, #0 + str r2, [sp, #24] @ 4-byte Spill + mul r2, r7, r3 + umull r3, r5, r2, r9 + adds r3, r3, r7 + str r5, [sp, #20] @ 4-byte Spill + umull r3, r7, r2, r8 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #100] @ 4-byte Reload + adcs r8, r3, r0 + ldr r0, [sp, #76] @ 4-byte Reload + umull r3, lr, r2, r7 + ldr r7, [sp, #84] @ 4-byte Reload + adcs r1, r3, r1 + umull r3, r12, r2, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r3, r3, r6 + umull r6, r5, r2, r4 + adcs r6, r6, r11 + umull r4, r11, r2, r7 + adcs r4, r4, r10 + umull r7, r10, r2, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r2, r7, r0 + ldr r0, [sp, #24] @ 4-byte Reload + adc r7, r0, #0 + ldr r0, [sp, #20] @ 4-byte Reload + adds r0, r8, r0 + ldr r8, [sp, #48] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r3, lr + ldr r3, [sp, #96] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r6, r12 + ldr r6, [sp, #32] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + adcs r0, r4, r5 + str r0, [sp, #12] @ 4-byte Spill + adcs r0, r2, r11 + str r0, [sp, #8] @ 4-byte Spill + adc r0, r7, r10 + ldr r7, [sp, #80] @ 4-byte Reload + ldr r10, [sp, #44] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + umull r4, r0, r6, r1 + umull r11, r2, r6, r3 + adds r4, r2, r4 + umull r3, r4, r6, r7 + adcs r0, r0, r3 + umlal r2, r3, r6, r1 + umull r0, r7, r6, r8 + adcs r5, r4, r0 + ldr r0, [sp, #64] @ 4-byte Reload + umull r4, r1, r6, r0 + mov r0, r6 + adcs r4, r7, r4 + umull r7, r12, r6, r10 + ldr r6, [sp, #68] @ 4-byte Reload + adcs lr, r1, r7 + umull r7, r1, r0, r6 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r7, r12, r7 + adc r12, r1, #0 + ldr r1, [sp, #24] @ 4-byte Reload + adds r0, r11, r0 + adcs r2, r2, r1 + ldr r1, [sp, #20] @ 4-byte Reload + adcs r3, r3, r1 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r6, r5, r1 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #8] @ 4-byte Reload + adcs r1, lr, r1 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + adcs r1, r7, r1 + str r1, [sp, #24] @ 4-byte Spill + adc r1, r12, #0 + ldr r12, [sp, #76] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + mul r4, r0, r1 + umull r7, r1, r4, r9 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adds r0, r7, r0 + umull r0, r7, r4, r1 + ldr r1, [sp, #100] @ 4-byte Reload + adcs lr, r0, r2 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #84] @ 4-byte Reload + umull r2, r0, r4, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + adcs r2, r2, r3 + umull r3, r0, r4, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r3, r3, r6 + umull r6, r5, r4, r1 + adcs r6, r6, r11 + umull r1, r11, r4, r7 + umull r7, r9, r4, r12 + ldr r12, [sp, #52] @ 4-byte Reload + adcs r1, r1, r0 + ldr r0, [sp, #24] @ 4-byte Reload + adcs r4, r7, r0 + ldr r7, [sp, #32] @ 4-byte Reload + ldr r0, [sp, #28] @ 4-byte Reload + adc r7, r7, #0 + adds r0, lr, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #96] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #68] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r1, r5 + str r0, [sp, #16] @ 4-byte Spill + adcs r0, r4, r11 + str r0, [sp, #12] @ 4-byte Spill + adc r0, r7, r9 + ldr r9, [sp, #40] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r4, [r9, #16] + umull r11, r3, r4, r2 + ldr r2, [sp, #80] @ 4-byte Reload + umull r0, r1, r4, r12 + adds r0, r3, r0 + umull r5, r0, r4, r2 + ldr r2, [sp, #64] @ 4-byte Reload + adcs r1, r1, r5 + umlal r3, r5, r4, r12 + umull r1, r7, r4, r8 + adcs r8, r0, r1 + umull r1, r0, r4, r2 + adcs lr, r7, r1 + umull r7, r1, r4, r10 + adcs r2, r0, r7 + umull r7, r0, r4, r6 + ldr r6, [sp, #16] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + adds r4, r11, r7 + ldr r7, [sp, #28] @ 4-byte Reload + adcs r3, r3, r7 + ldr r7, [sp, #24] @ 4-byte Reload + adcs r5, r5, r7 + ldr r7, [sp, #20] @ 4-byte Reload + adcs r7, r8, r7 + adcs r11, lr, r6 + ldr r6, [sp, #12] @ 4-byte Reload + adcs r10, r2, r6 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + mul r0, r4, r1 + umull r1, r6, r0, r2 + ldr r2, [sp, #72] @ 4-byte Reload + adds r1, r1, r4 + str r6, [sp, #24] @ 4-byte Spill + ldr r4, [sp, #84] @ 4-byte Reload + umull r1, r6, r0, r2 + adcs lr, r1, r3 + ldr r1, [sp, #100] @ 4-byte Reload + str r6, [sp, #20] @ 4-byte Spill + umull r3, r2, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + adcs r3, r3, r5 + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + umull r5, r8, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + adcs r5, r5, r7 + umull r7, r12, r0, r1 + adcs r6, r7, r11 + ldr r11, [sp, #76] @ 4-byte Reload + umull r7, r1, r0, r4 + adcs r7, r7, r10 + umull r4, r10, r0, r11 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #28] @ 4-byte Reload + adc r4, r4, #0 + adds r2, lr, r2 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + adcs r2, r3, r2 + ldr r3, [sp, #52] @ 4-byte Reload + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + adcs r11, r5, r2 + adcs r2, r6, r8 + ldr r6, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #76] @ 4-byte Reload + str r2, [sp, #24] @ 4-byte Spill + adcs r2, r7, r12 + ldr r7, [r9, #20] + adcs r0, r0, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r2, [sp, #20] @ 4-byte Spill + str r0, [sp, #16] @ 4-byte Spill + adc r0, r4, r10 + str r0, [sp, #12] @ 4-byte Spill + umull r4, r0, r7, r3 + umull r10, r2, r7, r1 + ldr r1, [sp, #80] @ 4-byte Reload + adds r4, r2, r4 + umull r5, r4, r7, r1 + adcs r0, r0, r5 + umlal r2, r5, r7, r3 + ldr r3, [sp, #68] @ 4-byte Reload + umull r0, r1, r7, r6 + ldr r6, [sp, #64] @ 4-byte Reload + adcs lr, r4, r0 + umull r4, r0, r7, r6 + ldr r6, [sp, #44] @ 4-byte Reload + adcs r12, r1, r4 + umull r4, r1, r7, r6 + adcs r9, r0, r4 + umull r4, r0, r7, r3 + ldr r3, [sp, #32] @ 4-byte Reload + adcs r1, r1, r4 + adc r0, r0, #0 + adds r4, r10, r3 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #24] @ 4-byte Reload + adcs r5, r5, r11 + adcs r7, lr, r3 + ldr r3, [sp, #20] @ 4-byte Reload + adcs r11, r12, r3 + ldr r3, [sp, #16] @ 4-byte Reload + adcs r9, r9, r3 + ldr r3, [sp, #12] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp, #56] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + mul r0, r4, r1 + umull r1, r6, r0, r3 + ldr r3, [sp, #72] @ 4-byte Reload + adds r1, r1, r4 + str r6, [sp, #24] @ 4-byte Spill + ldr r4, [sp, #84] @ 4-byte Reload + umull r1, r6, r0, r3 + ldr r3, [sp, #100] @ 4-byte Reload + adcs r12, r1, r2 + str r6, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + umull r2, r10, r0, r3 + ldr r3, [sp, #92] @ 4-byte Reload + adcs r2, r2, r5 + umull r5, lr, r0, r3 + ldr r3, [sp, #88] @ 4-byte Reload + adcs r5, r5, r7 + umull r7, r6, r0, r3 + adcs r7, r7, r11 + umull r3, r11, r0, r4 + adcs r3, r3, r9 + umull r4, r9, r0, r8 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #28] @ 4-byte Reload + adc r4, r4, #0 + adds r8, r12, r1 + ldr r1, [sp, #20] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #16] @ 4-byte Spill + adcs r1, r5, r10 + ldr r5, [sp, #52] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + adcs r1, r7, lr + ldr r7, [sp, #64] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + adcs r1, r3, r6 + adcs r0, r0, r11 + str r1, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r9, r4, r9 + ldr r4, [r0, #24] + ldr r0, [sp, #80] @ 4-byte Reload + umull r6, lr, r4, r0 + ldr r0, [sp, #96] @ 4-byte Reload + umull r12, r1, r4, r5 + umull r11, r2, r4, r0 + mov r0, r6 + mov r3, r2 + adds r2, r2, r12 + adcs r1, r1, r6 + ldr r6, [sp, #48] @ 4-byte Reload + umlal r3, r0, r4, r5 + umull r1, r2, r4, r6 + adcs r5, lr, r1 + umull r6, r1, r4, r7 + ldr r7, [sp, #44] @ 4-byte Reload + adcs lr, r2, r6 + umull r6, r2, r4, r7 + ldr r7, [sp, #68] @ 4-byte Reload + adcs r12, r1, r6 + umull r6, r1, r4, r7 + ldr r7, [sp, #20] @ 4-byte Reload + adcs r2, r2, r6 + ldr r6, [sp, #16] @ 4-byte Reload + adc r1, r1, #0 + adds r4, r11, r8 + ldr r11, [sp, #88] @ 4-byte Reload + adcs r3, r3, r6 + ldr r6, [sp, #32] @ 4-byte Reload + adcs r6, r0, r6 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r5, r5, r0 + ldr r0, [sp, #24] @ 4-byte Reload + adcs r10, lr, r0 + adcs r7, r12, r7 + adcs r12, r2, r9 + ldr r2, [sp, #60] @ 4-byte Reload + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [sp, #56] @ 4-byte Reload + adc lr, r1, #0 + mul r1, r4, r2 + umull r2, r8, r1, r7 + ldr r7, [sp, #100] @ 4-byte Reload + adds r2, r2, r4 + umull r2, r9, r1, r7 + ldr r7, [sp, #72] @ 4-byte Reload + umull r4, r0, r1, r7 + ldr r7, [sp, #92] @ 4-byte Reload + adcs r3, r4, r3 + str r0, [sp, #80] @ 4-byte Spill + adcs r0, r2, r6 + str r0, [sp, #60] @ 4-byte Spill + umull r2, r0, r1, r7 + str r0, [sp, #68] @ 4-byte Spill + adcs r0, r2, r5 + str r0, [sp, #48] @ 4-byte Spill + umull r5, r0, r1, r11 + adcs r2, r5, r10 + ldr r10, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r5, [sp, #76] @ 4-byte Reload + umull r6, r0, r1, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r6, r6, r0 + umull r4, r0, r1, r5 + adcs r1, r4, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adc r4, lr, #0 + adds r8, r3, r8 + ldr r3, [sp, #60] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #48] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + adcs lr, r3, r9 + ldr r3, [sp, #68] @ 4-byte Reload + adcs r12, r2, r3 + ldr r2, [sp, #64] @ 4-byte Reload + adcs r3, r6, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r3, [sp, #96] @ 4-byte Spill + adcs r2, r1, r2 + ldr r1, [sp, #44] @ 4-byte Reload + adc r9, r4, r1 + ldr r1, [sp, #56] @ 4-byte Reload + subs r4, r8, r1 + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r6, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + sbcs r1, lr, r1 + sbcs r7, r12, r7 + sbcs r11, r3, r11 + ldr r3, [sp, #36] @ 4-byte Reload + sbcs r10, r2, r10 + sbc r5, r9, r5 + asr r0, r5, #31 + cmp r0, #0 + movlt r4, r8 + movlt r1, lr + str r4, [r3] + ldr r4, [sp, #80] @ 4-byte Reload + movlt r6, r4 + cmp r0, #0 + str r6, [r3, #4] + str r1, [r3, #8] + ldr r1, [sp, #96] @ 4-byte Reload + movlt r7, r12 + movlt r10, r2 + str r7, [r3, #12] + movlt r11, r1 + cmp r0, #0 + movlt r5, r9 + str r11, [r3, #16] + str r10, [r3, #20] + str r5, [r3, #24] + add sp, sp, #104 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end99: + .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L + .cantunwind + .fnend + + .globl mcl_fp_montRed7L + .align 2 + .type mcl_fp_montRed7L,%function +mcl_fp_montRed7L: @ @mcl_fp_montRed7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #120 + sub sp, sp, #120 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #4] + ldr r10, [r2, #-4] + ldr r4, [r1] + ldr r3, [r2] + ldr r7, [r2, #8] + ldr r5, [r2, #4] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #8] + str r4, [sp, #60] @ 4-byte Spill + str r7, [sp, #108] @ 4-byte Spill + str r3, [sp, #116] @ 4-byte Spill + str r5, [sp, #24] @ 4-byte Spill + str r10, [sp, #92] @ 4-byte Spill + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #12] + str r0, [sp, #76] @ 4-byte Spill + mul r0, r4, r10 + umull r4, r12, r0, r3 + umull lr, r6, r0, r7 + str r4, [sp, #52] @ 4-byte Spill + ldr r4, [r2, #24] + str r6, [sp, #72] @ 4-byte Spill + mov r9, lr + mov r3, r12 + umlal r3, r9, r0, r5 + umull r7, r6, r0, r4 + str r4, [sp, #104] @ 4-byte Spill + ldr r4, [r2, #20] + str r7, [sp, #68] @ 4-byte Spill + str r6, [sp, #64] @ 4-byte Spill + umull r7, r6, r0, r4 + str r4, [sp, #112] @ 4-byte Spill + ldr r4, [r2, #16] + ldr r2, [r2, #12] + str r7, [sp, #44] @ 4-byte Spill + str r6, [sp, #48] @ 4-byte Spill + str r4, [sp, #96] @ 4-byte Spill + umull r8, r7, r0, r4 + str r2, [sp, #100] @ 4-byte Spill + umull r4, r6, r0, r2 + umull r11, r2, r0, r5 + adds r0, r12, r11 + ldr r11, [r1, #36] + adcs r0, r2, lr + ldr r2, [sp, #48] @ 4-byte Reload + ldr lr, [r1, #28] + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r5, r6, r8 + ldr r8, [sp, #108] @ 4-byte Reload + ldr r6, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adds r0, r0, r2 + ldr r2, [r1, #24] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [r1, #20] + mul r4, r0, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #32] + ldr r10, [r1, #40] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #16] + umull r12, r1, r4, r8 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r9 + ldr r9, [sp, #96] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #116] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #24] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + umull r7, r1, r4, r6 + str r7, [sp, #28] @ 4-byte Spill + mov r7, r12 + adcs r0, r3, r0 + ldr r3, [sp, #68] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + mov r0, r1 + umlal r0, r7, r4, r5 + adcs r2, r2, r3 + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [sp, #64] @ 4-byte Reload + adcs r2, lr, r2 + ldr lr, [sp, #100] @ 4-byte Reload + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [sp, #60] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #60] @ 4-byte Spill + adcs r2, r11, #0 + mov r11, r5 + str r2, [sp, #56] @ 4-byte Spill + adcs r2, r10, #0 + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [sp, #48] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r2, r2, #0 + str r2, [sp, #40] @ 4-byte Spill + mov r2, #0 + adc r2, r2, #0 + str r2, [sp, #36] @ 4-byte Spill + umull r3, r2, r4, r5 + ldr r5, [sp, #20] @ 4-byte Reload + adds r1, r1, r3 + adcs r2, r2, r12 + umull r1, r3, r4, r9 + umull r2, r12, r4, lr + adcs r2, r5, r2 + adcs r10, r12, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r2, [sp] @ 4-byte Spill + ldr r12, [sp, #92] @ 4-byte Reload + umull r5, r2, r4, r1 + adcs r1, r3, r5 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + umull r5, r3, r4, r1 + adcs r2, r2, r5 + ldr r5, [sp] @ 4-byte Reload + str r2, [sp, #8] @ 4-byte Spill + adc r2, r3, #0 + ldr r3, [sp, #28] @ 4-byte Reload + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [sp, #84] @ 4-byte Reload + adds r4, r3, r2 + ldr r2, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r4, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + umull r3, r0, r4, r8 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #12] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + umull r0, r2, r4, r6 + ldr r6, [sp, #68] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + adcs r6, r7, r6 + ldr r7, [sp, #8] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + mov r0, r2 + str r6, [sp, #76] @ 4-byte Spill + ldr r6, [sp, #64] @ 4-byte Reload + umlal r0, r5, r4, r11 + adcs r6, r7, r6 + ldr r7, [sp, #4] @ 4-byte Reload + str r6, [sp, #72] @ 4-byte Spill + ldr r6, [sp, #60] @ 4-byte Reload + adcs r6, r7, r6 + umull r7, r8, r4, r1 + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [sp, #56] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #64] @ 4-byte Spill + ldr r6, [sp, #52] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #60] @ 4-byte Spill + ldr r6, [sp, #48] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [sp, #44] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #52] @ 4-byte Spill + ldr r6, [sp, #40] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [sp, #36] @ 4-byte Reload + adc r6, r6, #0 + str r6, [sp, #44] @ 4-byte Spill + umull r6, r10, r4, r11 + adds r1, r2, r6 + adcs r2, r10, r3 + umull r1, r6, r4, lr + ldr lr, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + adcs r10, r2, r1 + umull r2, r3, r4, r9 + adcs r9, r6, r2 + ldr r2, [sp, #112] @ 4-byte Reload + umull r6, r1, r4, r2 + adcs r3, r3, r6 + adcs r1, r1, r7 + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [sp, #20] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adc r8, r8, #0 + ldr r6, [sp, #16] @ 4-byte Reload + adds r7, r3, r1 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + mul r7, r0, r12 + str r0, [sp, #40] @ 4-byte Spill + umull r3, r0, r7, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + umull r4, r1, r7, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r4, [sp, #36] @ 4-byte Spill + mov r4, r3 + adcs r0, r5, r0 + ldr r5, [sp, #76] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #104] @ 4-byte Reload + adcs r5, r9, r5 + str r0, [sp, #84] @ 4-byte Spill + mov r0, r1 + str r5, [sp, #80] @ 4-byte Spill + ldr r5, [sp, #72] @ 4-byte Reload + umlal r0, r4, r7, r11 + adcs r5, r6, r5 + ldr r6, [sp, #12] @ 4-byte Reload + str r5, [sp, #76] @ 4-byte Spill + ldr r5, [sp, #68] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #72] @ 4-byte Spill + ldr r5, [sp, #64] @ 4-byte Reload + adcs r6, r8, r5 + ldr r8, [sp, #100] @ 4-byte Reload + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [sp, #60] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #64] @ 4-byte Spill + ldr r6, [sp, #56] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #60] @ 4-byte Spill + ldr r6, [sp, #52] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [sp, #48] @ 4-byte Reload + adcs r6, r6, #0 + str r6, [sp, #52] @ 4-byte Spill + ldr r6, [sp, #44] @ 4-byte Reload + adc r6, r6, #0 + str r6, [sp, #48] @ 4-byte Spill + umull r9, r6, r7, r10 + str r6, [sp, #44] @ 4-byte Spill + umull r6, r5, r7, r11 + adds r1, r1, r6 + umull r6, r12, r7, r2 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r2, r5, r3 + umull r2, r3, r7, r8 + adcs r1, r1, r2 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + umull r5, r2, r7, r1 + ldr r7, [sp, #36] @ 4-byte Reload + adcs r3, r3, r5 + ldr r5, [sp, #116] @ 4-byte Reload + adcs r2, r2, r6 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [sp, #28] @ 4-byte Reload + str r2, [sp, #16] @ 4-byte Spill + adcs r2, r12, r9 + ldr r9, [sp, #92] @ 4-byte Reload + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + adc r2, r2, #0 + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adds r6, r7, r2 + ldr r2, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r6, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + umull r7, r0, r6, lr + str r0, [sp, #32] @ 4-byte Spill + umull r0, r2, r6, r5 + mov r12, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r2 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + umlal r4, r12, r6, r11 + adcs r0, r3, r0 + ldr r3, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #12] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + ldr r3, [sp, #8] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + umull r3, r0, r6, r10 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [sp, #112] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + umull lr, r0, r6, r3 + str r0, [sp, #20] @ 4-byte Spill + umull r10, r0, r6, r11 + adds r2, r2, r10 + adcs r0, r0, r7 + umull r2, r10, r6, r1 + umull r0, r1, r6, r8 + ldr r6, [sp, #32] @ 4-byte Reload + adcs r8, r6, r0 + adcs r0, r1, r2 + ldr r1, [sp, #20] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r10, r10, lr + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc lr, r0, #0 + ldr r0, [sp, #44] @ 4-byte Reload + adds r7, r2, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #48] @ 4-byte Spill + mul r4, r0, r9 + ldr r0, [sp, #108] @ 4-byte Reload + umull r7, r2, r4, r0 + str r2, [sp, #40] @ 4-byte Spill + umull r2, r0, r4, r5 + ldr r5, [sp, #84] @ 4-byte Reload + str r2, [sp, #44] @ 4-byte Spill + mov r6, r0 + mov r2, r7 + umlal r6, r2, r4, r11 + adcs r5, r12, r5 + ldr r12, [sp, #100] @ 4-byte Reload + str r5, [sp, #84] @ 4-byte Spill + ldr r5, [sp, #80] @ 4-byte Reload + adcs r5, r8, r5 + ldr r8, [sp, #104] @ 4-byte Reload + str r5, [sp, #80] @ 4-byte Spill + ldr r5, [sp, #76] @ 4-byte Reload + adcs r5, r1, r5 + ldr r1, [sp, #28] @ 4-byte Reload + str r5, [sp, #76] @ 4-byte Spill + ldr r5, [sp, #72] @ 4-byte Reload + adcs r5, r10, r5 + str r5, [sp, #72] @ 4-byte Spill + ldr r5, [sp, #68] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, lr, r1 + ldr lr, [sp, #96] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, #0 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, #0 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #52] @ 4-byte Spill + umull r5, r1, r4, r8 + str r5, [sp, #32] @ 4-byte Spill + str r1, [sp, #36] @ 4-byte Spill + umull r5, r1, r4, r3 + str r5, [sp, #20] @ 4-byte Spill + umull r9, r5, r4, r11 + str r1, [sp, #28] @ 4-byte Spill + adds r0, r0, r9 + umull r3, r9, r4, lr + umull r0, r1, r4, r12 + adcs r4, r5, r7 + ldr r4, [sp, #40] @ 4-byte Reload + adcs r10, r4, r0 + ldr r0, [sp, #20] @ 4-byte Reload + ldr r4, [sp, #28] @ 4-byte Reload + adcs r1, r1, r3 + adcs r3, r9, r0 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r9, [sp, #112] @ 4-byte Reload + adcs r7, r4, r0 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r4, [sp, #48] @ 4-byte Reload + adc r5, r0, #0 + ldr r0, [sp, #44] @ 4-byte Reload + adds r4, r0, r4 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r4, r6, r0 + ldr r0, [sp, #80] @ 4-byte Reload + ldr r6, [sp, #108] @ 4-byte Reload + adcs r2, r2, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r2, [sp, #84] @ 4-byte Spill + adcs r0, r10, r0 + mov r10, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + mul r0, r4, r1 + ldr r1, [sp, #116] @ 4-byte Reload + umull r2, r7, r0, r11 + umull r4, r3, r0, r1 + adds r2, r3, r2 + str r4, [sp, #92] @ 4-byte Spill + umull r1, r2, r0, r6 + adcs r4, r7, r1 + umlal r3, r1, r0, r11 + umull r4, r5, r0, r12 + adcs r2, r2, r4 + str r2, [sp, #52] @ 4-byte Spill + umull r4, r2, r0, lr + adcs r7, r5, r4 + str r7, [sp, #48] @ 4-byte Spill + umull r7, r4, r0, r9 + adcs r5, r2, r7 + umull r7, r2, r0, r8 + adcs r7, r4, r7 + adc r0, r2, #0 + ldr r2, [sp, #92] @ 4-byte Reload + adds r2, r2, r10 + ldr r2, [sp, #84] @ 4-byte Reload + adcs r12, r3, r2 + ldr r2, [sp, #80] @ 4-byte Reload + adcs lr, r1, r2 + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + adcs r10, r2, r1 + ldr r1, [sp, #72] @ 4-byte Reload + ldr r2, [sp, #48] @ 4-byte Reload + adcs r4, r2, r1 + ldr r1, [sp, #68] @ 4-byte Reload + adcs r8, r5, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r8, [sp, #84] @ 4-byte Spill + adcs r2, r7, r1 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r7, [sp, #100] @ 4-byte Reload + str r2, [sp, #92] @ 4-byte Spill + adcs r1, r0, r1 + ldr r0, [sp, #56] @ 4-byte Reload + adc r3, r0, #0 + ldr r0, [sp, #116] @ 4-byte Reload + subs r0, r12, r0 + sbcs r5, lr, r11 + mov r11, r4 + sbcs r6, r10, r6 + sbcs r7, r4, r7 + ldr r4, [sp, #96] @ 4-byte Reload + sbcs r4, r8, r4 + sbcs r8, r2, r9 + ldr r2, [sp, #104] @ 4-byte Reload + sbcs r9, r1, r2 + ldr r2, [sp, #88] @ 4-byte Reload + sbc r3, r3, #0 + ands r3, r3, #1 + movne r0, r12 + movne r5, lr + movne r6, r10 + cmp r3, #0 + str r0, [r2] + ldr r0, [sp, #84] @ 4-byte Reload + movne r7, r11 + str r5, [r2, #4] + str r6, [r2, #8] + str r7, [r2, #12] + movne r4, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r4, [r2, #16] + movne r8, r0 + cmp r3, #0 + movne r9, r1 + str r8, [r2, #20] + str r9, [r2, #24] + add sp, sp, #120 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end100: + .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L + .cantunwind + .fnend + + .globl mcl_fp_addPre7L + .align 2 + .type mcl_fp_addPre7L,%function +mcl_fp_addPre7L: @ @mcl_fp_addPre7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #8 + sub sp, sp, #8 + ldr r3, [r1, #4] + ldr r9, [r1] + ldr r7, [r2] + ldr lr, [r1, #8] + ldr r10, [r1, #12] + ldr r11, [r1, #16] + ldr r8, [r1, #24] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r1, #20] + adds r7, r7, r9 + str r3, [sp] @ 4-byte Spill + ldmib r2, {r1, r3, r4, r5, r12} + ldr r6, [sp, #4] @ 4-byte Reload + ldr r2, [r2, #24] + str r7, [r0] + adcs r1, r1, r6 + ldr r6, [sp] @ 4-byte Reload + adcs r3, r3, lr + adcs r4, r4, r10 + adcs r5, r5, r11 + adcs r6, r12, r6 + adcs r2, r2, r8 + stmib r0, {r1, r3, r4, r5, r6} + str r2, [r0, #24] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #8 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end101: + .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L + .cantunwind + .fnend + + .globl mcl_fp_subPre7L + .align 2 + .type mcl_fp_subPre7L,%function +mcl_fp_subPre7L: @ @mcl_fp_subPre7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #8 + sub sp, sp, #8 + ldr r3, [r2, #4] + ldr r9, [r2] + ldr r7, [r1] + ldr lr, [r2, #8] + ldr r10, [r2, #12] + ldr r11, [r2, #16] + ldr r8, [r2, #24] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r2, #20] + subs r7, r7, r9 + str r3, [sp] @ 4-byte Spill + ldmib r1, {r2, r3, r4, r5, r12} + ldr r6, [sp, #4] @ 4-byte Reload + ldr r1, [r1, #24] + str r7, [r0] + sbcs r2, r2, r6 + ldr r6, [sp] @ 4-byte Reload + sbcs r3, r3, lr + sbcs r4, r4, r10 + sbcs r5, r5, r11 + sbcs r6, r12, r6 + sbcs r1, r1, r8 + stmib r0, {r2, r3, r4, r5, r6} + str r1, [r0, #24] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #8 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end102: + .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L + .cantunwind + .fnend + + .globl mcl_fp_shr1_7L + .align 2 + .type mcl_fp_shr1_7L,%function +mcl_fp_shr1_7L: @ @mcl_fp_shr1_7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldr r3, [r1, #4] + ldr r12, [r1] + ldr lr, [r1, #12] + ldr r2, [r1, #8] + ldr r5, [r1, #20] + ldr r4, [r1, #16] + ldr r1, [r1, #24] + lsrs r6, r3, #1 + lsr r3, r3, #1 + rrx r12, r12 + lsrs r6, lr, #1 + orr r7, r3, r2, lsl #31 + lsr r6, lr, #1 + rrx r2, r2 + lsrs r3, r5, #1 + lsr r5, r5, #1 + str r12, [r0] + str r7, [r0, #4] + orr r5, r5, r1, lsl #31 + orr r6, r6, r4, lsl #31 + rrx r3, r4 + lsr r1, r1, #1 + str r2, [r0, #8] + str r6, [r0, #12] + str r3, [r0, #16] + str r5, [r0, #20] + str r1, [r0, #24] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end103: + .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L + .cantunwind + .fnend + + .globl mcl_fp_add7L + .align 2 + .type mcl_fp_add7L,%function +mcl_fp_add7L: @ @mcl_fp_add7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #12 + sub sp, sp, #12 + ldr r7, [r1, #8] + ldr r10, [r1] + ldr r9, [r1, #4] + ldr r11, [r1, #16] + ldr r8, [r1, #24] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r1, #20] + ldm r2, {r1, r4, r5, r6, r12, lr} + ldr r2, [r2, #24] + adds r10, r1, r10 + ldr r1, [sp, #8] @ 4-byte Reload + adcs r4, r4, r9 + str r10, [r0] + adcs r5, r5, r1 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r6, r6, r1 + mov r1, #0 + adcs r9, r12, r11 + adcs r7, lr, r7 + stmib r0, {r4, r5, r6, r9} + adcs r2, r2, r8 + str r7, [r0, #20] + adc r1, r1, #0 + str r2, [r0, #24] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3] + str r1, [sp] @ 4-byte Spill + ldmib r3, {r12, lr} + ldr r1, [r3, #20] + ldr r8, [r3, #12] + ldr r11, [r3, #16] + ldr r3, [r3, #24] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + subs r10, r10, r1 + sbcs r1, r4, r12 + ldr r4, [sp, #4] @ 4-byte Reload + sbcs r5, r5, lr + sbcs r12, r6, r8 + str r5, [sp] @ 4-byte Spill + sbcs lr, r9, r11 + sbcs r4, r7, r4 + sbcs r5, r2, r3 + ldr r2, [sp, #8] @ 4-byte Reload + sbc r2, r2, #0 + tst r2, #1 + bne .LBB104_2 +@ BB#1: @ %nocarry + str r10, [r0] + str r1, [r0, #4] + ldr r1, [sp] @ 4-byte Reload + add r2, r0, #8 + stm r2, {r1, r12, lr} + str r4, [r0, #20] + str r5, [r0, #24] +.LBB104_2: @ %carry + add sp, sp, #12 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end104: + .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L + .cantunwind + .fnend + + .globl mcl_fp_addNF7L + .align 2 + .type mcl_fp_addNF7L,%function +mcl_fp_addNF7L: @ @mcl_fp_addNF7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldm r1, {r6, r7} + ldr r11, [r1, #16] + ldr r9, [r1, #20] + ldr r8, [r1, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #8] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #8] @ 4-byte Spill + ldm r2, {r1, r4, r5, r10, r12, lr} + ldr r2, [r2, #24] + adds r7, r1, r6 + ldr r1, [sp, #16] @ 4-byte Reload + str r7, [sp, #4] @ 4-byte Spill + adcs r6, r4, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + adcs r5, r5, r1 + ldr r1, [sp, #8] @ 4-byte Reload + adcs r4, r10, r1 + ldr r10, [r3, #8] + adcs r12, r12, r11 + ldr r11, [r3, #16] + adcs lr, lr, r9 + ldr r9, [r3, #20] + adc r1, r2, r8 + ldr r2, [r3] + ldr r8, [r3, #12] + str r1, [sp, #12] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #4] + ldr r3, [r3, #24] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [sp] @ 4-byte Reload + subs r2, r7, r2 + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r7, r6, r7 + sbcs r6, r5, r10 + mov r10, r12 + sbcs r8, r4, r8 + sbcs r11, r12, r11 + sbcs r12, lr, r9 + ldr r9, [sp, #4] @ 4-byte Reload + sbc r3, r1, r3 + asr r1, r3, #31 + cmp r1, #0 + movlt r2, r9 + movlt r6, r5 + str r2, [r0] + ldr r2, [sp, #16] @ 4-byte Reload + movlt r7, r2 + cmp r1, #0 + movlt r8, r4 + movlt r11, r10 + movlt r12, lr + cmp r1, #0 + ldr r1, [sp, #12] @ 4-byte Reload + str r7, [r0, #4] + str r6, [r0, #8] + str r8, [r0, #12] + str r11, [r0, #16] + str r12, [r0, #20] + movlt r3, r1 + str r3, [r0, #24] + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end105: + .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L + .cantunwind + .fnend + + .globl mcl_fp_sub7L + .align 2 + .type mcl_fp_sub7L,%function +mcl_fp_sub7L: @ @mcl_fp_sub7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #12 + sub sp, sp, #12 + ldr r7, [r2, #8] + ldr r11, [r2] + ldr r9, [r2, #4] + ldr r8, [r2, #20] + ldr r10, [r2, #24] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r2, #12] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r2, #16] + str r7, [sp] @ 4-byte Spill + ldm r1, {r2, r4, r5, r6, r7, lr} + ldr r1, [r1, #24] + subs r12, r2, r11 + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r9, r4, r9 + ldr r4, [sp, #4] @ 4-byte Reload + str r12, [r0] + str r9, [r0, #4] + sbcs r2, r5, r2 + sbcs r11, r6, r4 + ldr r4, [sp] @ 4-byte Reload + str r2, [r0, #8] + str r11, [r0, #12] + sbcs r4, r7, r4 + sbcs r5, lr, r8 + sbcs r6, r1, r10 + add r1, r0, #16 + stm r1, {r4, r5, r6} + mov r1, #0 + sbc r1, r1, #0 + tst r1, #1 + beq .LBB106_2 +@ BB#1: @ %carry + ldr r1, [r3] + ldr r7, [r3, #4] + ldr lr, [r3, #12] + ldr r8, [r3, #16] + ldr r10, [r3, #20] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #8] + ldr r3, [r3, #24] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + adds r1, r1, r12 + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adcs r7, r7, r9 + adcs r2, r1, r2 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r12, lr, r11 + adcs r4, r8, r4 + adcs r5, r10, r5 + adc r3, r3, r6 + stm r0, {r1, r7} + str r2, [r0, #8] + str r12, [r0, #12] + str r4, [r0, #16] + str r5, [r0, #20] + str r3, [r0, #24] +.LBB106_2: @ %nocarry + add sp, sp, #12 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end106: + .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L + .cantunwind + .fnend + + .globl mcl_fp_subNF7L + .align 2 + .type mcl_fp_subNF7L,%function +mcl_fp_subNF7L: @ @mcl_fp_subNF7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r2, {r5, lr} + ldr r7, [r2, #8] + ldr r11, [r2, #16] + ldr r10, [r2, #24] + add r9, r1, #12 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r2, #12] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r2, #20] + str r7, [sp, #8] @ 4-byte Spill + ldm r1, {r2, r4, r12} + ldm r9, {r6, r8, r9} + ldr r7, [r1, #24] + ldr r1, [sp, #12] @ 4-byte Reload + subs r5, r2, r5 + sbcs lr, r4, lr + sbcs r4, r12, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str lr, [sp] @ 4-byte Spill + sbcs r12, r6, r1 + ldr r6, [r3, #4] + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r2, r8, r11 + ldr r8, [r3, #12] + ldr r11, [r3, #16] + str r2, [sp, #12] @ 4-byte Spill + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [r3, #20] + sbcs r1, r9, r1 + sbc r9, r7, r10 + ldr r7, [r3] + ldr r10, [r3, #8] + ldr r3, [r3, #24] + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [sp, #4] @ 4-byte Reload + adds r7, r5, r7 + adcs r6, lr, r6 + adcs lr, r4, r10 + mov r10, r1 + adcs r8, r12, r8 + adcs r11, r2, r11 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r2, r1, r2 + asr r1, r9, #31 + adc r3, r9, r3 + cmp r1, #0 + movge r7, r5 + ldr r5, [sp] @ 4-byte Reload + movge lr, r4 + str r7, [r0] + ldr r7, [sp, #12] @ 4-byte Reload + movge r6, r5 + cmp r1, #0 + movge r8, r12 + movge r11, r7 + movge r2, r10 + cmp r1, #0 + str r6, [r0, #4] + str lr, [r0, #8] + movge r3, r9 + str r8, [r0, #12] + str r11, [r0, #16] + str r2, [r0, #20] + str r3, [r0, #24] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end107: + .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L + .cantunwind + .fnend + + .globl mcl_fpDbl_add7L + .align 2 + .type mcl_fpDbl_add7L,%function +mcl_fpDbl_add7L: @ @mcl_fpDbl_add7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #48 + sub sp, sp, #48 + ldm r1, {r12, lr} + ldr r8, [r1, #8] + ldr r10, [r1, #12] + ldmib r2, {r6, r7} + ldr r4, [r2, #16] + ldr r11, [r2] + ldr r5, [r2, #12] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #20] + adds r9, r11, r12 + ldr r11, [r1, #44] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r2, #24] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r2, #28] + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [r2, #32] + str r4, [sp, #16] @ 4-byte Spill + ldr r4, [r2, #36] + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [r2, #40] + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r2, #44] + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #48] + ldr r2, [r2, #52] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #36] + str r4, [sp, #36] @ 4-byte Spill + adcs r4, r6, lr + add lr, r1, #16 + adcs r7, r7, r8 + ldr r8, [r1, #52] + adcs r6, r5, r10 + ldr r5, [r1, #32] + ldr r10, [r1, #48] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + str r9, [r0] + stmib r0, {r4, r7} + str r6, [r0, #12] + ldr r4, [sp, #8] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + adcs r1, r4, r1 + ldr r4, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #20] @ 4-byte Reload + adcs r2, r4, r2 + str r2, [r0, #20] + adcs r1, r1, r12 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r2, lr + str r2, [sp, #20] @ 4-byte Spill + adcs r2, r1, r5 + ldr r1, [sp, #24] @ 4-byte Reload + str r2, [sp, #16] @ 4-byte Spill + adcs r5, r1, r7 + ldr r1, [sp, #28] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + adcs r12, r1, r7 + ldr r1, [sp, #32] @ 4-byte Reload + mov r7, #0 + str r12, [sp, #40] @ 4-byte Spill + adcs lr, r1, r11 + ldr r1, [sp, #36] @ 4-byte Reload + adcs r4, r1, r10 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r9, r1, r8 + adc r1, r7, #0 + str r1, [sp, #44] @ 4-byte Spill + ldm r3, {r1, r7, r11} + ldr r10, [r3, #12] + ldr r8, [r3, #16] + ldr r6, [r3, #20] + ldr r3, [r3, #24] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [sp, #20] @ 4-byte Reload + subs r1, r3, r1 + sbcs r7, r2, r7 + sbcs r2, r5, r11 + mov r11, lr + sbcs r10, r12, r10 + sbcs r12, lr, r8 + sbcs lr, r4, r6 + ldr r6, [sp, #36] @ 4-byte Reload + sbcs r8, r9, r6 + ldr r6, [sp, #44] @ 4-byte Reload + sbc r6, r6, #0 + ands r6, r6, #1 + movne r1, r3 + movne r2, r5 + str r1, [r0, #28] + ldr r1, [sp, #16] @ 4-byte Reload + movne r7, r1 + ldr r1, [sp, #40] @ 4-byte Reload + cmp r6, #0 + movne r12, r11 + movne lr, r4 + str r7, [r0, #32] + str r2, [r0, #36] + movne r10, r1 + cmp r6, #0 + movne r8, r9 + str r10, [r0, #40] + str r12, [r0, #44] + str lr, [r0, #48] + str r8, [r0, #52] + add sp, sp, #48 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end108: + .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub7L + .align 2 + .type mcl_fpDbl_sub7L,%function +mcl_fpDbl_sub7L: @ @mcl_fpDbl_sub7L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + ldr r7, [r2, #32] + add r8, r1, #16 + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #64] @ 4-byte Spill + ldm r2, {r4, r7} + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #8] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #12] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r2, #16] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #28] + ldr r2, [r2, #20] + str r7, [sp, #36] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + ldmib r1, {r2, r12, lr} + ldm r8, {r5, r6, r8} + ldr r7, [r1, #28] + ldr r11, [r1] + ldr r9, [r1, #32] + ldr r10, [r1, #44] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #36] + subs r4, r11, r4 + str r4, [r0] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r1, #40] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #48] + ldr r1, [r1, #52] + str r7, [sp] @ 4-byte Spill + ldr r7, [sp, #20] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp, #16] @ 4-byte Reload + sbcs r12, r12, r7 + ldr r7, [sp, #12] @ 4-byte Reload + stmib r0, {r2, r12} + ldr r2, [sp, #32] @ 4-byte Reload + sbcs lr, lr, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str lr, [r0, #12] + sbcs r2, r5, r2 + str r2, [r0, #16] + ldr r2, [sp, #24] @ 4-byte Reload + sbcs r2, r6, r2 + ldr r6, [sp, #8] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r2, r8, r2 + mov r8, #0 + str r2, [r0, #24] + ldr r2, [sp, #36] @ 4-byte Reload + sbcs lr, r7, r2 + ldr r2, [sp, #44] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + sbcs r4, r9, r2 + ldr r2, [sp, #48] @ 4-byte Reload + ldr r9, [r3, #20] + str r4, [sp, #44] @ 4-byte Spill + sbcs r7, r7, r2 + ldr r2, [sp, #52] @ 4-byte Reload + sbcs r12, r6, r2 + ldr r2, [sp, #56] @ 4-byte Reload + ldr r6, [sp] @ 4-byte Reload + str r12, [sp, #52] @ 4-byte Spill + sbcs r11, r10, r2 + ldr r2, [sp, #60] @ 4-byte Reload + ldr r10, [r3, #12] + sbcs r6, r6, r2 + ldr r2, [sp, #64] @ 4-byte Reload + sbcs r5, r1, r2 + ldr r2, [r3, #8] + sbc r1, r8, #0 + ldr r8, [r3, #4] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [r3] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [r3, #16] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [r3, #24] + ldr r3, [sp, #60] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adds r1, lr, r1 + adcs r4, r4, r8 + adcs r2, r7, r2 + adcs r10, r12, r10 + adcs r12, r11, r3 + ldr r3, [sp, #56] @ 4-byte Reload + adcs r8, r6, r9 + adc r9, r5, r3 + ldr r3, [sp, #64] @ 4-byte Reload + ands r3, r3, #1 + moveq r1, lr + moveq r2, r7 + str r1, [r0, #28] + ldr r1, [sp, #44] @ 4-byte Reload + moveq r4, r1 + ldr r1, [sp, #52] @ 4-byte Reload + cmp r3, #0 + moveq r12, r11 + moveq r8, r6 + str r4, [r0, #32] + str r2, [r0, #36] + moveq r10, r1 + cmp r3, #0 + moveq r9, r5 + str r10, [r0, #40] + str r12, [r0, #44] + str r8, [r0, #48] + str r9, [r0, #52] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end109: + .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L + .cantunwind + .fnend + + .align 2 + .type .LmulPv256x32,%function +.LmulPv256x32: @ @mulPv256x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r1, [r1, #28] + umull r3, r7, r1, r2 + adcs r1, r6, r3 + str r1, [r0, #28] + adc r1, r7, #0 + str r1, [r0, #32] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end110: + .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre8L + .align 2 + .type mcl_fp_mulUnitPre8L,%function +mcl_fp_mulUnitPre8L: @ @mcl_fp_mulUnitPre8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + .pad #40 + sub sp, sp, #40 + mov r4, r0 + mov r0, sp + bl .LmulPv256x32(PLT) + ldr r0, [sp, #32] + add lr, sp, #16 + ldr r12, [sp, #28] + ldm lr, {r1, r3, lr} + ldm sp, {r2, r5, r6, r7} + str r0, [r4, #32] + add r0, r4, #16 + stm r4, {r2, r5, r6, r7} + stm r0, {r1, r3, lr} + str r12, [r4, #28] + add sp, sp, #40 + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end111: + .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre8L + .align 2 + .type mcl_fpDbl_mulPre8L,%function +mcl_fpDbl_mulPre8L: @ @mcl_fpDbl_mulPre8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #124 + sub sp, sp, #124 + mov r6, r2 + mov r5, r1 + mov r4, r0 + bl mcl_fpDbl_mulPre4L(PLT) + add r0, r4, #32 + add r1, r5, #16 + add r2, r6, #16 + bl mcl_fpDbl_mulPre4L(PLT) + ldm r6, {r12, lr} + ldr r7, [r6, #16] + ldr r9, [r6, #8] + ldr r3, [r6, #12] + add r6, r6, #20 + mov r8, #0 + ldm r6, {r0, r1, r6} + adds r2, r12, r7 + adcs r0, lr, r0 + str r2, [sp, #56] @ 4-byte Spill + adcs r1, r9, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r9, [r5] + str r1, [sp, #44] @ 4-byte Spill + adcs r1, r3, r6 + str r1, [sp, #48] @ 4-byte Spill + adc r6, r8, #0 + ldmib r5, {r8, r10, r12} + ldr r7, [r5, #16] + ldr r3, [r5, #20] + ldr lr, [r5, #24] + ldr r11, [r5, #28] + str r2, [sp, #60] + str r0, [sp, #64] + mov r0, #0 + add r2, sp, #60 + adds r5, r9, r7 + ldr r7, [sp, #44] @ 4-byte Reload + adcs r8, r8, r3 + str r5, [sp, #76] + adcs r10, r10, lr + str r8, [sp, #80] + adcs r9, r12, r11 + str r10, [sp, #84] + str r7, [sp, #68] + str r1, [sp, #72] + adc r11, r0, #0 + add r0, sp, #92 + add r1, sp, #76 + str r9, [sp, #88] + bl mcl_fpDbl_mulPre4L(PLT) + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [sp, #52] @ 4-byte Reload + cmp r6, #0 + ldr r3, [sp, #48] @ 4-byte Reload + and r12, r6, r11 + ldr lr, [sp, #120] + moveq r5, r6 + moveq r9, r6 + moveq r10, r6 + moveq r8, r6 + ldr r6, [sp, #116] + adds r0, r5, r0 + adcs r1, r8, r1 + adcs r2, r10, r7 + mov r7, #0 + adcs r3, r9, r3 + adc r7, r7, #0 + cmp r11, #0 + moveq r0, r5 + ldr r5, [sp, #108] + moveq r2, r10 + moveq r3, r9 + moveq r7, r11 + moveq r1, r8 + adds r8, r0, r5 + ldr r5, [sp, #112] + adcs r10, r1, r5 + adcs r9, r2, r6 + ldr r6, [r4] + ldmib r4, {r5, r11} + ldr r2, [sp, #92] + adcs lr, r3, lr + add r3, sp, #96 + adc r12, r7, r12 + ldr r7, [r4, #12] + ldm r3, {r0, r1, r3} + subs r2, r2, r6 + str r2, [sp, #52] @ 4-byte Spill + sbcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + sbcs r0, r1, r11 + str r0, [sp, #44] @ 4-byte Spill + sbcs r0, r3, r7 + ldr r7, [r4, #20] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r4, #16] + str r0, [sp, #56] @ 4-byte Spill + sbcs r0, r8, r0 + ldr r8, [r4, #28] + str r0, [sp, #28] @ 4-byte Spill + sbcs r0, r10, r7 + ldr r10, [r4, #24] + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, r9, r10 + str r0, [sp, #20] @ 4-byte Spill + sbcs r0, lr, r8 + add lr, r4, #32 + str r0, [sp, #16] @ 4-byte Spill + sbc r0, r12, #0 + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r5, r9, lr} + ldr r6, [sp, #52] @ 4-byte Reload + ldr r12, [r4, #44] + ldr r2, [r4, #48] + ldr r0, [r4, #52] + ldr r1, [r4, #56] + ldr r3, [r4, #60] + subs r6, r6, r5 + str r1, [sp, #36] @ 4-byte Spill + str r3, [sp, #32] @ 4-byte Spill + str r6, [sp] @ 4-byte Spill + ldr r6, [sp, #48] @ 4-byte Reload + sbcs r11, r6, r9 + ldr r6, [sp, #44] @ 4-byte Reload + sbcs r6, r6, lr + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [sp, #40] @ 4-byte Reload + sbcs r6, r6, r12 + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [sp, #28] @ 4-byte Reload + sbcs r6, r6, r2 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [sp, #24] @ 4-byte Reload + sbcs r6, r6, r0 + str r6, [sp, #40] @ 4-byte Spill + mov r6, r0 + ldr r0, [sp, #20] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adds r3, r0, r1 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r7, r7, r11 + str r3, [r4, #16] + str r7, [r4, #20] + adcs r3, r10, r0 + ldr r0, [sp, #8] @ 4-byte Reload + str r3, [r4, #24] + ldr r3, [sp, #32] @ 4-byte Reload + adcs r1, r8, r0 + ldr r0, [sp, #28] @ 4-byte Reload + str r1, [r4, #28] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [r4, #32] + ldr r0, [sp, #44] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [r4, #36] + adcs r0, lr, r0 + str r0, [r4, #40] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r12, r0 + add r12, r4, #48 + str r0, [r4, #44] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #36] @ 4-byte Reload + adcs r1, r6, #0 + adcs r2, r2, #0 + adc r3, r3, #0 + stm r12, {r0, r1, r2, r3} + add sp, sp, #124 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end112: + .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre8L + .align 2 + .type mcl_fpDbl_sqrPre8L,%function +mcl_fpDbl_sqrPre8L: @ @mcl_fpDbl_sqrPre8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #124 + sub sp, sp, #124 + mov r5, r1 + mov r4, r0 + mov r2, r5 + bl mcl_fpDbl_mulPre4L(PLT) + add r1, r5, #16 + add r0, r4, #32 + mov r2, r1 + bl mcl_fpDbl_mulPre4L(PLT) + ldm r5, {r0, r8, lr} + ldr r3, [r5, #16] + ldr r2, [r5, #20] + ldr r6, [r5, #24] + ldr r12, [r5, #12] + ldr r1, [r5, #28] + adds r9, r0, r3 + add r0, sp, #64 + adcs r5, r8, r2 + str r9, [sp, #76] + str r9, [sp, #60] + add r2, sp, #60 + adcs r6, lr, r6 + str r5, [sp, #80] + adcs r7, r12, r1 + str r6, [sp, #84] + add r1, sp, #76 + str r7, [sp, #88] + stm r0, {r5, r6, r7} + mov r0, #0 + adc r8, r0, #0 + add r0, sp, #92 + bl mcl_fpDbl_mulPre4L(PLT) + adds r12, r9, r9 + adcs lr, r5, r5 + adcs r9, r6, r6 + add r6, sp, #112 + ldm r6, {r0, r5, r6} + ldr r1, [sp, #108] + adc r10, r7, r7 + adds r2, r1, r12 + adcs r3, r0, lr + adcs r12, r5, r9 + adcs lr, r6, r10 + adc r7, r8, r7, lsr #31 + cmp r8, #0 + moveq lr, r6 + add r6, sp, #92 + moveq r7, r8 + moveq r12, r5 + moveq r3, r0 + moveq r2, r1 + ldm r4, {r8, r9, r10, r11} + ldm r6, {r0, r1, r5, r6} + subs r0, r0, r8 + ldr r8, [r4, #20] + str r0, [sp, #52] @ 4-byte Spill + sbcs r0, r1, r9 + ldr r9, [r4, #24] + str r0, [sp, #48] @ 4-byte Spill + sbcs r0, r5, r10 + ldr r10, [r4, #28] + str r0, [sp, #44] @ 4-byte Spill + sbcs r0, r6, r11 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r4, #16] + str r0, [sp, #56] @ 4-byte Spill + sbcs r0, r2, r0 + str r0, [sp, #28] @ 4-byte Spill + sbcs r0, r3, r8 + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, r12, r9 + str r0, [sp, #20] @ 4-byte Spill + sbcs r0, lr, r10 + add lr, r4, #32 + str r0, [sp, #16] @ 4-byte Spill + sbc r0, r7, #0 + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r5, r7, lr} + ldr r6, [sp, #52] @ 4-byte Reload + ldr r12, [r4, #44] + ldr r2, [r4, #48] + ldr r0, [r4, #52] + ldr r1, [r4, #56] + ldr r3, [r4, #60] + subs r6, r6, r5 + str r1, [sp, #36] @ 4-byte Spill + str r3, [sp, #32] @ 4-byte Spill + str r6, [sp] @ 4-byte Spill + ldr r6, [sp, #48] @ 4-byte Reload + sbcs r11, r6, r7 + ldr r6, [sp, #44] @ 4-byte Reload + sbcs r6, r6, lr + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [sp, #40] @ 4-byte Reload + sbcs r6, r6, r12 + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [sp, #28] @ 4-byte Reload + sbcs r6, r6, r2 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [sp, #24] @ 4-byte Reload + sbcs r6, r6, r0 + str r6, [sp, #40] @ 4-byte Spill + mov r6, r0 + ldr r0, [sp, #20] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adds r3, r1, r0 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r1, r11, r8 + str r3, [r4, #16] + str r1, [r4, #20] + adcs r3, r0, r9 + ldr r0, [sp, #8] @ 4-byte Reload + str r3, [r4, #24] + ldr r3, [sp, #32] @ 4-byte Reload + adcs r1, r0, r10 + ldr r0, [sp, #28] @ 4-byte Reload + str r1, [r4, #28] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [r4, #32] + ldr r0, [sp, #44] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [r4, #36] + adcs r0, r0, lr + str r0, [r4, #40] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + add r12, r4, #48 + str r0, [r4, #44] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #36] @ 4-byte Reload + adcs r1, r6, #0 + adcs r2, r2, #0 + adc r3, r3, #0 + stm r12, {r0, r1, r2, r3} + add sp, sp, #124 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L + .cantunwind + .fnend + + .globl mcl_fp_mont8L + .align 2 + .type mcl_fp_mont8L,%function +mcl_fp_mont8L: @ @mcl_fp_mont8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #724 + sub sp, sp, #724 + mov r7, r2 + ldr r5, [r3, #-4] + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #680 + str r3, [sp, #64] @ 4-byte Spill + str r1, [sp, #68] @ 4-byte Spill + mov r4, r3 + mov r11, r1 + ldr r2, [r7] + str r7, [sp, #76] @ 4-byte Spill + str r5, [sp, #72] @ 4-byte Spill + bl .LmulPv256x32(PLT) + ldr r0, [sp, #684] + ldr r9, [sp, #680] + mov r1, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #688] + mul r2, r9, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #640 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #672] + add r10, sp, #644 + ldr r4, [sp, #656] + ldr r6, [sp, #640] + mov r1, r11 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r5, r8, r10} + ldr r2, [r7, #4] + add r0, sp, #600 + bl .LmulPv256x32(PLT) + adds r0, r6, r9 + ldr r2, [sp, #12] @ 4-byte Reload + mov r1, #0 + add r12, sp, #604 + ldr r9, [sp, #628] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #632] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r10, r10, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #600] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r11, r2, r0 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + adcs r7, r2, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [sp, #24] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r1, #0 + str r0, [sp, #24] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r6, r12} + ldr lr, [sp, #48] @ 4-byte Reload + ldr r5, [sp, #44] @ 4-byte Reload + adds r4, lr, r4 + adcs r0, r5, r0 + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r10, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r11, r6 + ldr r6, [sp, #64] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + mov r1, r6 + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #560 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #592] + ldr r5, [sp, #76] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r7, [sp, #576] + ldr r10, [sp, #560] + ldr r11, [sp, #564] + ldr r8, [sp, #568] + ldr r9, [sp, #572] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #588] + ldr r2, [r5, #8] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #520 + bl .LmulPv256x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #520 + ldr r4, [sp, #544] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #552] + adcs r11, r0, r9 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r9, [sp, #548] + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #56] @ 4-byte Reload + adds r7, r7, r0 + adcs r0, r10, r1 + mov r1, r6 + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r11, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #480 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #512] + ldr r2, [r5, #12] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #500] + ldr r6, [sp, #496] + ldr r10, [sp, #480] + ldr r11, [sp, #484] + ldr r8, [sp, #488] + ldr r9, [sp, #492] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #440 + bl .LmulPv256x32(PLT) + adds r0, r7, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #440 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #472] + adcs r11, r0, r9 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r9, [sp, #468] + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #464] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r5, r0 + adcs r0, r10, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r11, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r6, r4 + ldr r6, [sp, #72] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + mul r2, r7, r6 + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #400 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #432] + ldr r5, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #416] + ldr r10, [sp, #400] + ldr r11, [sp, #404] + ldr r8, [sp, #408] + ldr r9, [sp, #412] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #428] + mov r1, r5 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #360 + bl .LmulPv256x32(PLT) + adds r0, r7, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #360 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r7, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #392] + adcs r11, r0, r9 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r9, [sp, #388] + adcs r0, r0, r4 + ldr r4, [sp, #384] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r7, r0 + adcs r0, r10, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r11, r2 + mul r2, r7, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #320 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #352] + ldr r6, [sp, #340] + ldr r4, [sp, #336] + ldr r10, [sp, #320] + ldr r11, [sp, #324] + ldr r8, [sp, #328] + ldr r9, [sp, #332] + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #348] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #280 + bl .LmulPv256x32(PLT) + adds r0, r7, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #280 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #312] + adcs r11, r0, r9 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r9, [sp, #308] + adcs r0, r0, r4 + ldr r4, [sp, #304] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r5, r0 + ldr r5, [sp, #64] @ 4-byte Reload + adcs r0, r10, r1 + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r11, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r6, r4 + ldr r6, [sp, #72] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + mul r2, r7, r6 + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #240 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #272] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #256] + ldr r10, [sp, #240] + ldr r11, [sp, #244] + ldr r8, [sp, #248] + ldr r9, [sp, #252] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #200 + bl .LmulPv256x32(PLT) + adds r0, r7, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #200 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r7, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r8, [sp, #232] + adcs r11, r0, r9 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r9, [sp, #228] + adcs r0, r0, r4 + ldr r4, [sp, #224] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r7, r0 + adcs r0, r10, r1 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + adcs r0, r11, r2 + mul r2, r7, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #160 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #192] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r6, [sp, #184] + ldr r4, [sp, #180] + ldr r5, [sp, #176] + ldr r10, [sp, #160] + ldr r11, [sp, #164] + ldr r8, [sp, #168] + ldr r9, [sp, #172] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #120 + bl .LmulPv256x32(PLT) + adds r0, r7, r10 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + ldr r12, [sp, #124] + ldr r3, [sp, #128] + add lr, sp, #136 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + adcs r8, r1, r8 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r9, r1, r9 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r11, r1, r4 + ldr r1, [sp, #36] @ 4-byte Reload + ldr r4, [sp, #132] + adcs r1, r1, r6 + ldr r6, [sp, #152] + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r10, r1, r2 + ldr r1, [sp, #28] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #120] + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adc r1, r1, #0 + adds r5, r0, r2 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r8, r8, r12 + str r1, [sp, #52] @ 4-byte Spill + adcs r3, r9, r3 + mul r7, r5, r0 + ldm lr, {r0, r1, r2, lr} + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [sp, #76] @ 4-byte Reload + adcs r3, r3, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r9, r11, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r3, [sp, #44] @ 4-byte Spill + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #68] @ 4-byte Spill + adcs r0, r10, r2 + mov r2, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r10, r0, r6 + mov r0, #0 + adc r11, r0, #0 + add r0, sp, #80 + bl .LmulPv256x32(PLT) + add r3, sp, #80 + ldm r3, {r0, r1, r2, r3} + adds r0, r5, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adcs lr, r8, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str lr, [sp, #40] @ 4-byte Spill + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r7, r0, r3 + ldr r0, [sp, #96] + str r7, [sp, #52] @ 4-byte Spill + adcs r9, r9, r0 + ldr r0, [sp, #100] + adcs r12, r1, r0 + ldr r0, [sp, #104] + ldr r1, [sp, #72] @ 4-byte Reload + str r12, [sp, #68] @ 4-byte Spill + adcs r8, r1, r0 + ldr r0, [sp, #108] + ldr r1, [sp, #76] @ 4-byte Reload + str r8, [sp, #72] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #112] + adcs r5, r10, r0 + adc r0, r11, #0 + str r0, [sp, #76] @ 4-byte Spill + ldm r4, {r1, r2, r3, r11} + ldr r0, [r4, #16] + ldr r10, [r4, #24] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r4, #20] + subs r1, lr, r1 + ldr lr, [sp, #56] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r4, #28] + sbcs r2, lr, r2 + ldr r4, [sp, #48] @ 4-byte Reload + sbcs r3, r7, r3 + sbcs r7, r9, r11 + mov r11, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r12, r0 + sbcs r12, r8, r4 + ldr r4, [sp, #64] @ 4-byte Reload + sbcs r8, r6, r10 + mov r10, r5 + sbcs r4, r5, r4 + ldr r5, [sp, #76] @ 4-byte Reload + sbc r6, r5, #0 + ldr r5, [sp, #40] @ 4-byte Reload + ands r6, r6, #1 + movne r2, lr + movne r1, r5 + ldr r5, [sp, #60] @ 4-byte Reload + str r1, [r5] + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [r5, #4] + movne r3, r1 + ldr r1, [sp, #68] @ 4-byte Reload + cmp r6, #0 + movne r7, r9 + str r3, [r5, #8] + str r7, [r5, #12] + movne r0, r1 + str r0, [r5, #16] + ldr r0, [sp, #72] @ 4-byte Reload + movne r12, r0 + cmp r6, #0 + movne r8, r11 + movne r4, r10 + str r12, [r5, #20] + str r8, [r5, #24] + str r4, [r5, #28] + add sp, sp, #724 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end114: + .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L + .cantunwind + .fnend + + .globl mcl_fp_montNF8L + .align 2 + .type mcl_fp_montNF8L,%function +mcl_fp_montNF8L: @ @mcl_fp_montNF8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #716 + sub sp, sp, #716 + mov r7, r2 + ldr r5, [r3, #-4] + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #672 + str r3, [sp, #60] @ 4-byte Spill + str r1, [sp, #68] @ 4-byte Spill + mov r4, r3 + mov r10, r1 + ldr r2, [r7] + str r7, [sp, #56] @ 4-byte Spill + str r5, [sp, #64] @ 4-byte Spill + bl .LmulPv256x32(PLT) + ldr r0, [sp, #676] + ldr r11, [sp, #672] + mov r1, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #680] + mul r2, r11, r5 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #632 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #664] + ldr r2, [r7, #4] + ldr r4, [sp, #648] + ldr r6, [sp, #632] + ldr r8, [sp, #636] + ldr r5, [sp, #640] + ldr r9, [sp, #644] + mov r1, r10 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #656] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #652] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #592 + bl .LmulPv256x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add r6, sp, #596 + ldr r12, [sp, #616] + ldr r3, [sp, #612] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #620] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r9, r9, r0 + ldr r0, [sp, #20] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r4, [sp, #592] + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r10, r1, r0 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r7, r1, r0 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adc r0, r1, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #624] + str r0, [sp, #20] @ 4-byte Spill + ldm r6, {r0, r1, r2, r6} + ldr lr, [sp, #40] @ 4-byte Reload + ldr r5, [sp, #36] @ 4-byte Reload + adds r4, lr, r4 + adcs r0, r5, r0 + ldr r5, [sp, #64] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r9, r1 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r11, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + mul r2, r4, r5 + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r10, r3 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r7, r12 + ldr r7, [sp, #60] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + mov r1, r7 + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #552 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #584] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r9, [sp, #568] + ldr r10, [sp, #552] + ldr r11, [sp, #556] + ldr r8, [sp, #560] + ldr r6, [sp, #564] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #576] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #512 + bl .LmulPv256x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #516 + ldr r4, [sp, #536] + ldr r3, [sp, #512] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #540] + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + ldr r6, [sp, #48] @ 4-byte Reload + adds r9, r6, r3 + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r1 + mov r1, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r9, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #472 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #504] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #492] + ldr r7, [sp, #488] + ldr r10, [sp, #472] + ldr r11, [sp, #476] + ldr r8, [sp, #480] + ldr r6, [sp, #484] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #432 + bl .LmulPv256x32(PLT) + adds r0, r9, r10 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r3, [sp, #432] + add lr, sp, #436 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r5, r0, r11 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #460] + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #456] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, r1 + adds r9, r5, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #464] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r7, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r6, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #392 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #424] + ldr r5, [sp, #56] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #408] + ldr r10, [sp, #392] + ldr r11, [sp, #396] + ldr r8, [sp, #400] + ldr r6, [sp, #404] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #420] + ldr r2, [r5, #16] + mov r1, r7 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #352 + bl .LmulPv256x32(PLT) + adds r0, r9, r10 + ldr r1, [sp, #4] @ 4-byte Reload + ldr r3, [sp, #352] + add lr, sp, #356 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r9, r0, r11 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #380] + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #376] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, r1 + adds r9, r9, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r6, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #312 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #344] + ldr r2, [r5, #20] + ldr r4, [sp, #328] + ldr r10, [sp, #312] + ldr r11, [sp, #316] + ldr r8, [sp, #320] + ldr r6, [sp, #324] + mov r1, r7 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #340] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #336] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #332] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #272 + bl .LmulPv256x32(PLT) + adds r0, r9, r10 + ldr r1, [sp, #4] @ 4-byte Reload + ldr r3, [sp, #272] + add lr, sp, #276 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r5, r0, r11 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #300] + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r0, r4 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r4, [sp, #296] + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, r1 + adds r9, r5, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #304] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + ldr r5, [sp, #60] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r1 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r7, r2 + ldr r7, [sp, #64] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + mul r2, r9, r7 + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r6, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #232 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #264] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #248] + ldr r10, [sp, #232] + ldr r11, [sp, #236] + ldr r8, [sp, #240] + ldr r6, [sp, #244] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #192 + bl .LmulPv256x32(PLT) + adds r0, r9, r10 + ldr r1, [sp, #4] @ 4-byte Reload + ldr r3, [sp, #192] + add lr, sp, #196 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r9, r0, r11 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r10, r0, r8 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #220] + adcs r11, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #216] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, r1 + adds r9, r9, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #224] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r1 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r9, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r6, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #152 + bl .LmulPv256x32(PLT) + ldr r0, [sp, #184] + ldr r1, [sp, #68] @ 4-byte Reload + ldr r5, [sp, #176] + ldr r4, [sp, #172] + ldr r7, [sp, #168] + ldr r10, [sp, #152] + ldr r11, [sp, #156] + ldr r8, [sp, #160] + ldr r6, [sp, #164] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #180] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #112 + bl .LmulPv256x32(PLT) + adds r0, r9, r10 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #128 + ldr r12, [sp, #116] + ldr r3, [sp, #120] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + adcs r1, r1, r8 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r11, r1, r7 + ldr r1, [sp, #32] @ 4-byte Reload + adcs r10, r1, r4 + ldr r1, [sp, #28] @ 4-byte Reload + ldr r4, [sp, #124] + adcs r1, r1, r5 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #20] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #112] + str r1, [sp, #40] @ 4-byte Spill + adds r5, r0, r2 + ldr r0, [sp, #64] @ 4-byte Reload + mul r9, r5, r0 + ldm lr, {r0, r1, r2, r6, lr} + ldr r8, [sp, #68] @ 4-byte Reload + adcs r7, r8, r12 + ldr r8, [sp, #60] @ 4-byte Reload + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #56] @ 4-byte Reload + adcs r3, r7, r3 + adcs r11, r11, r4 + str r3, [sp, #56] @ 4-byte Spill + adcs r4, r10, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + mov r2, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r10, r0, r6 + add r0, sp, #72 + adc r7, lr, #0 + bl .LmulPv256x32(PLT) + add r3, sp, #72 + ldm r3, {r0, r1, r2, r3} + adds r0, r5, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r5, r0, r1 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #88] + adcs r3, r4, r0 + ldr r0, [sp, #92] + str r3, [sp, #40] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #96] + ldr r1, [sp, #68] @ 4-byte Reload + str r6, [sp, #64] @ 4-byte Spill + adcs r12, r1, r0 + ldr r0, [sp, #100] + ldr r1, [sp, #104] + str r12, [sp, #68] @ 4-byte Spill + adcs r11, r10, r0 + adc r4, r7, r1 + ldm r8, {r1, r2, r9, r10} + ldr r0, [r8, #20] + ldr r7, [r8, #16] + ldr lr, [r8, #28] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r8, #24] + str r0, [sp, #44] @ 4-byte Spill + mov r0, r5 + subs r5, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r8, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + sbcs r9, r2, r9 + sbcs r10, r3, r10 + ldr r3, [sp, #36] @ 4-byte Reload + sbcs r7, r6, r7 + sbcs r6, r12, r3 + ldr r3, [sp, #44] @ 4-byte Reload + sbcs r12, r11, r3 + sbc lr, r4, lr + cmp lr, #0 + movlt r5, r0 + ldr r0, [sp, #40] @ 4-byte Reload + movlt r8, r1 + movlt r9, r2 + cmp lr, #0 + movlt r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + movlt r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + movlt r6, r0 + cmp lr, #0 + movlt lr, r4 + ldr r4, [sp, #52] @ 4-byte Reload + movlt r12, r11 + add r0, r4, #20 + stm r4, {r5, r8, r9, r10} + str r7, [r4, #16] + stm r0, {r6, r12, lr} + add sp, sp, #716 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end115: + .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L + .cantunwind + .fnend + + .globl mcl_fp_montRed8L + .align 2 + .type mcl_fp_montRed8L,%function +mcl_fp_montRed8L: @ @mcl_fp_montRed8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #420 + sub sp, sp, #420 + mov r5, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r4, [r1] + ldr r9, [r1, #40] + ldr r10, [r1, #44] + ldr r0, [r5] + ldr r11, [r5, #-4] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r5, #4] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r5, #8] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #16] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r5, #12] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #20] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r5, #16] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #24] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r5, #20] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #28] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r5, #24] + str r2, [sp, #44] @ 4-byte Spill + mul r2, r4, r11 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r5, #28] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #60] + mov r1, r5 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #376 + bl .LmulPv256x32(PLT) + add lr, sp, #396 + ldr r8, [sp, #408] + add r6, sp, #384 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #376] + ldr r1, [sp, #380] + ldm r6, {r0, r2, r6} + adds r4, r4, r7 + ldr r4, [sp, #56] @ 4-byte Reload + adcs r4, r4, r1 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r11 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + adcs r9, r9, #0 + str r0, [sp, #12] @ 4-byte Spill + adcs r0, r10, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #336 + bl .LmulPv256x32(PLT) + add lr, sp, #356 + ldr r8, [sp, #368] + add r6, sp, #340 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #336] + ldm r6, {r0, r1, r2, r6} + adds r4, r4, r7 + ldr r4, [sp, #56] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r0, r2 + ldr r0, [sp, #36] @ 4-byte Reload + mul r2, r4, r11 + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r9, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #296 + bl .LmulPv256x32(PLT) + add r8, sp, #320 + add lr, sp, #300 + ldm r8, {r6, r7, r8} + ldr r1, [sp, #296] + ldm lr, {r0, r2, r3, r12, lr} + adds r1, r4, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r4, r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r10, r10, r2 + mul r2, r4, r11 + adcs r9, r0, r3 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #256 + bl .LmulPv256x32(PLT) + add lr, sp, #276 + ldr r8, [sp, #288] + add r6, sp, #260 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #256] + ldm r6, {r0, r1, r2, r6} + adds r4, r4, r7 + adcs r4, r10, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r9, r9, r1 + mov r1, r5 + adcs r10, r0, r2 + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r4, r11 + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #216 + bl .LmulPv256x32(PLT) + add r8, sp, #240 + add lr, sp, #220 + ldm r8, {r6, r7, r8} + ldr r1, [sp, #216] + ldm lr, {r0, r2, r3, r12, lr} + adds r1, r4, r1 + adcs r4, r9, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r1, r5 + adcs r10, r10, r2 + mul r2, r4, r11 + adcs r9, r0, r3 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #176 + bl .LmulPv256x32(PLT) + add lr, sp, #196 + ldr r8, [sp, #208] + add r6, sp, #180 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #176] + ldm r6, {r0, r1, r2, r6} + adds r4, r4, r7 + adcs r4, r10, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r7, r9, r1 + mov r1, r5 + adcs r9, r0, r2 + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r4, r11 + adcs r6, r0, r6 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r0, r3 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #136 + bl .LmulPv256x32(PLT) + add r12, sp, #136 + ldm r12, {r0, r1, r3, r12} + adds r0, r4, r0 + adcs r4, r7, r1 + ldr r7, [sp, #152] + ldr r0, [sp, #168] + adcs r1, r9, r3 + ldr r3, [sp, #160] + mul r2, r4, r11 + adcs r9, r6, r12 + ldr r6, [sp, #156] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #164] + adcs r10, r10, r7 + ldr r7, [sp, #56] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r8, r7, r3 + ldr r3, [sp, #48] @ 4-byte Reload + adcs r11, r3, r1 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #96 + bl .LmulPv256x32(PLT) + add r3, sp, #96 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r5, r0, r1 + ldr r0, [sp, #112] + ldr r1, [sp, #48] @ 4-byte Reload + adcs r9, r9, r2 + adcs r10, r10, r3 + adcs r3, r6, r0 + ldr r0, [sp, #116] + str r3, [sp, #36] @ 4-byte Spill + adcs lr, r8, r0 + ldr r0, [sp, #120] + str lr, [sp, #40] @ 4-byte Spill + adcs r7, r11, r0 + ldr r0, [sp, #124] + str r7, [sp, #44] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #128] + ldr r1, [sp, #56] @ 4-byte Reload + str r4, [sp, #48] @ 4-byte Spill + adcs r12, r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adc r8, r0, #0 + ldr r0, [sp, #88] @ 4-byte Reload + subs r1, r5, r0 + ldr r0, [sp, #84] @ 4-byte Reload + sbcs r2, r9, r0 + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r6, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r3, r0 + ldr r3, [sp, #68] @ 4-byte Reload + sbcs r11, lr, r3 + ldr r3, [sp, #72] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #76] @ 4-byte Reload + sbcs lr, r4, r7 + ldr r7, [sp, #60] @ 4-byte Reload + sbcs r4, r12, r7 + sbc r7, r8, #0 + ands r7, r7, #1 + movne r1, r5 + ldr r5, [sp, #92] @ 4-byte Reload + movne r2, r9 + movne r6, r10 + cmp r7, #0 + str r1, [r5] + ldr r1, [sp, #36] @ 4-byte Reload + str r2, [r5, #4] + str r6, [r5, #8] + movne r0, r1 + str r0, [r5, #12] + ldr r0, [sp, #40] @ 4-byte Reload + movne r11, r0 + ldr r0, [sp, #44] @ 4-byte Reload + str r11, [r5, #16] + movne r3, r0 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r7, #0 + movne r4, r12 + str r3, [r5, #20] + movne lr, r0 + str lr, [r5, #24] + str r4, [r5, #28] + add sp, sp, #420 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end116: + .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L + .cantunwind + .fnend + + .globl mcl_fp_addPre8L + .align 2 + .type mcl_fp_addPre8L,%function +mcl_fp_addPre8L: @ @mcl_fp_addPre8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldr r3, [r1, #4] + ldr r9, [r1] + ldr r10, [r1, #12] + ldr r11, [r1, #16] + ldr r8, [r1, #28] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r1, #8] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r1, #20] + str r3, [sp] @ 4-byte Spill + ldr r3, [r1, #24] + str r3, [sp, #4] @ 4-byte Spill + ldm r2, {r1, r3, r4, r5, r12, lr} + ldr r7, [sp, #12] @ 4-byte Reload + ldr r6, [r2, #24] + ldr r2, [r2, #28] + adds r1, r1, r9 + adcs r3, r3, r7 + ldr r7, [sp, #8] @ 4-byte Reload + adcs r4, r4, r7 + ldr r7, [sp] @ 4-byte Reload + adcs r5, r5, r10 + adcs r12, r12, r11 + adcs lr, lr, r7 + ldr r7, [sp, #4] @ 4-byte Reload + stm r0, {r1, r3, r4, r5, r12, lr} + adcs r6, r6, r7 + adcs r2, r2, r8 + str r6, [r0, #24] + str r2, [r0, #28] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end117: + .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L + .cantunwind + .fnend + + .globl mcl_fp_subPre8L + .align 2 + .type mcl_fp_subPre8L,%function +mcl_fp_subPre8L: @ @mcl_fp_subPre8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldr r3, [r2, #4] + ldr r9, [r2] + ldr r10, [r2, #12] + ldr r11, [r2, #16] + ldr r8, [r2, #28] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #8] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r2, #20] + str r3, [sp] @ 4-byte Spill + ldr r3, [r2, #24] + str r3, [sp, #4] @ 4-byte Spill + ldm r1, {r2, r3, r4, r5, r12, lr} + ldr r7, [sp, #12] @ 4-byte Reload + ldr r6, [r1, #24] + ldr r1, [r1, #28] + subs r2, r2, r9 + sbcs r3, r3, r7 + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r4, r4, r7 + ldr r7, [sp] @ 4-byte Reload + sbcs r5, r5, r10 + sbcs r12, r12, r11 + sbcs lr, lr, r7 + ldr r7, [sp, #4] @ 4-byte Reload + stm r0, {r2, r3, r4, r5, r12, lr} + sbcs r6, r6, r7 + sbcs r1, r1, r8 + str r6, [r0, #24] + str r1, [r0, #28] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end118: + .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L + .cantunwind + .fnend + + .globl mcl_fp_shr1_8L + .align 2 + .type mcl_fp_shr1_8L,%function +mcl_fp_shr1_8L: @ @mcl_fp_shr1_8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldr r3, [r1, #4] + ldr r12, [r1] + ldr lr, [r1, #12] + add r6, r1, #16 + ldr r2, [r1, #8] + ldm r6, {r4, r5, r6} + ldr r1, [r1, #28] + lsrs r7, r3, #1 + lsr r3, r3, #1 + rrx r12, r12 + lsrs r7, lr, #1 + orr r8, r3, r2, lsl #31 + lsr r7, lr, #1 + rrx r2, r2 + lsrs r3, r5, #1 + lsr r5, r5, #1 + str r12, [r0] + str r8, [r0, #4] + orr r7, r7, r4, lsl #31 + rrx r3, r4 + lsrs r4, r1, #1 + str r2, [r0, #8] + orr r5, r5, r6, lsl #31 + lsr r1, r1, #1 + add r2, r0, #16 + rrx r6, r6 + str r7, [r0, #12] + stm r2, {r3, r5, r6} + str r1, [r0, #28] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end119: + .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L + .cantunwind + .fnend + + .globl mcl_fp_add8L + .align 2 + .type mcl_fp_add8L,%function +mcl_fp_add8L: @ @mcl_fp_add8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + ldr r7, [r1, #12] + ldr lr, [r1] + ldr r11, [r1, #4] + ldr r10, [r1, #8] + add r8, r2, #20 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #16] + str r7, [sp] @ 4-byte Spill + ldr r7, [r1, #20] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #24] + ldr r1, [r1, #28] + str r7, [sp, #12] @ 4-byte Spill + str r1, [sp, #4] @ 4-byte Spill + ldm r2, {r1, r4, r5, r12} + ldr r9, [r2, #16] + ldm r8, {r6, r7, r8} + ldr r2, [sp] @ 4-byte Reload + adds lr, r1, lr + adcs r1, r4, r11 + str lr, [r0] + adcs r4, r5, r10 + ldr r5, [sp, #16] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + str r4, [sp, #20] @ 4-byte Spill + adcs r10, r12, r5 + adcs r5, r9, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r5, [sp, #16] @ 4-byte Spill + adcs r12, r6, r2 + ldr r6, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + stmib r0, {r1, r4, r10} + mov r1, #0 + str r5, [r0, #16] + str r12, [r0, #20] + adcs r7, r7, r6 + mov r6, r12 + adcs r11, r8, r2 + str r7, [r0, #24] + mov r8, lr + adc r1, r1, #0 + str r11, [r0, #28] + str r1, [sp, #12] @ 4-byte Spill + ldm r3, {r1, r2, r9, r12, lr} + ldr r4, [r3, #20] + ldr r5, [r3, #24] + ldr r3, [r3, #28] + subs r1, r8, r1 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r8, r1, r2 + ldr r1, [sp, #20] @ 4-byte Reload + sbcs r2, r1, r9 + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r12, r10, r12 + sbcs lr, r1, lr + ldr r1, [sp, #12] @ 4-byte Reload + sbcs r4, r6, r4 + sbcs r5, r7, r5 + sbcs r6, r11, r3 + sbc r3, r1, #0 + tst r3, #1 + bne .LBB120_2 +@ BB#1: @ %nocarry + ldr r1, [sp, #8] @ 4-byte Reload + stm r0, {r1, r8} + add r1, r0, #8 + add r0, r0, #20 + stm r1, {r2, r12, lr} + stm r0, {r4, r5, r6} +.LBB120_2: @ %carry + add sp, sp, #28 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end120: + .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L + .cantunwind + .fnend + + .globl mcl_fp_addNF8L + .align 2 + .type mcl_fp_addNF8L,%function +mcl_fp_addNF8L: @ @mcl_fp_addNF8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #32 + sub sp, sp, #32 + ldm r1, {r6, r8} + ldr r7, [r1, #8] + ldr r9, [r1, #28] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r1, #16] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r1, #20] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #24] + str r7, [sp, #20] @ 4-byte Spill + ldm r2, {r1, r4, r5, r12, lr} + ldr r10, [r2, #20] + ldr r11, [r2, #24] + ldr r2, [r2, #28] + adds r7, r1, r6 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r6, r4, r8 + ldr r4, [sp, #20] @ 4-byte Reload + str r7, [sp, #4] @ 4-byte Spill + str r6, [sp, #8] @ 4-byte Spill + adcs r8, r5, r1 + ldr r1, [sp, #24] @ 4-byte Reload + ldr r5, [sp, #12] @ 4-byte Reload + adcs r1, r12, r1 + adcs r12, lr, r5 + ldr r5, [sp, #16] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + adcs lr, r10, r5 + adcs r5, r11, r4 + ldr r4, [r3, #4] + ldr r11, [r3, #16] + str lr, [sp, #24] @ 4-byte Spill + adc r10, r2, r9 + ldr r2, [r3] + ldr r9, [r3, #12] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r3, #8] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #24] + ldr r3, [r3, #28] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + subs r2, r7, r2 + sbcs r7, r6, r4 + ldr r4, [sp, #20] @ 4-byte Reload + sbcs r6, r8, r4 + sbcs r9, r1, r9 + ldr r1, [sp] @ 4-byte Reload + sbcs r4, r12, r11 + mov r11, r12 + sbcs r12, lr, r1 + ldr r1, [sp, #12] @ 4-byte Reload + sbcs lr, r5, r1 + ldr r1, [sp, #4] @ 4-byte Reload + sbc r3, r10, r3 + cmp r3, #0 + movlt r6, r8 + movlt r2, r1 + ldr r1, [sp, #8] @ 4-byte Reload + movlt r7, r1 + ldr r1, [sp, #28] @ 4-byte Reload + cmp r3, #0 + movlt r4, r11 + movlt r9, r1 + ldr r1, [sp, #24] @ 4-byte Reload + stm r0, {r2, r7} + str r6, [r0, #8] + str r9, [r0, #12] + movlt r12, r1 + cmp r3, #0 + add r1, r0, #16 + movlt lr, r5 + movlt r3, r10 + stm r1, {r4, r12, lr} + str r3, [r0, #28] + add sp, sp, #32 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end121: + .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L + .cantunwind + .fnend + + .globl mcl_fp_sub8L + .align 2 + .type mcl_fp_sub8L,%function +mcl_fp_sub8L: @ @mcl_fp_sub8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldr r9, [r2, #20] + ldr r10, [r2, #24] + add r8, r1, #12 + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r2, #12] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #16] + ldr r2, [r2, #28] + str r4, [sp] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldm r1, {r4, r5, r11} + ldm r8, {r2, r7, r8} + ldr r6, [r1, #24] + ldr r1, [r1, #28] + subs r12, r4, r12 + ldr r4, [sp, #12] @ 4-byte Reload + sbcs lr, r5, lr + sbcs r11, r11, r4 + ldr r4, [sp, #8] @ 4-byte Reload + sbcs r2, r2, r4 + ldr r4, [sp] @ 4-byte Reload + sbcs r4, r7, r4 + ldr r7, [sp, #4] @ 4-byte Reload + stm r0, {r12, lr} + str r11, [r0, #8] + sbcs r5, r8, r9 + sbcs r6, r6, r10 + sbcs r7, r1, r7 + add r1, r0, #12 + stm r1, {r2, r4, r5, r6, r7} + mov r1, #0 + sbc r1, r1, #0 + tst r1, #1 + beq .LBB122_2 +@ BB#1: @ %carry + ldr r1, [r3] + add r10, r3, #12 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3, #4] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #8] + str r1, [sp] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r1, [r3, #24] + ldr r3, [r3, #28] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adds r1, r1, r12 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + adcs r12, r1, lr + ldr r1, [sp] @ 4-byte Reload + adcs lr, r1, r11 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r2, r8, r2 + adcs r4, r9, r4 + adcs r5, r10, r5 + adcs r6, r1, r6 + ldr r1, [sp, #8] @ 4-byte Reload + adc r3, r3, r7 + stm r0, {r1, r12, lr} + add r1, r0, #12 + stm r1, {r2, r4, r5, r6} + str r3, [r0, #28] +.LBB122_2: @ %nocarry + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end122: + .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L + .cantunwind + .fnend + + .globl mcl_fp_subNF8L + .align 2 + .type mcl_fp_subNF8L,%function +mcl_fp_subNF8L: @ @mcl_fp_subNF8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #36 + sub sp, sp, #36 + ldm r2, {r6, r8} + ldr r7, [r2, #8] + ldr r11, [r2, #12] + ldr r9, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #16] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #20] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #32] @ 4-byte Spill + ldm r1, {r2, r4, r5, r12, lr} + ldr r10, [r1, #20] + ldr r7, [r1, #24] + ldr r1, [r1, #28] + subs r6, r2, r6 + ldr r2, [sp, #20] @ 4-byte Reload + sbcs r8, r4, r8 + ldr r4, [sp, #24] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + sbcs r5, r5, r2 + sbcs r2, r12, r11 + ldr r11, [r3, #12] + sbcs r12, lr, r4 + ldr r4, [sp, #28] @ 4-byte Reload + str r2, [sp, #20] @ 4-byte Spill + str r12, [sp, #24] @ 4-byte Spill + sbcs lr, r10, r4 + ldr r4, [sp, #32] @ 4-byte Reload + ldr r10, [r3, #16] + str lr, [sp, #28] @ 4-byte Spill + sbcs r4, r7, r4 + ldr r7, [r3] + sbc r1, r1, r9 + ldr r9, [r3, #8] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r3, #4] + str r7, [sp] @ 4-byte Spill + ldr r7, [r3, #20] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r3, #24] + ldr r3, [r3, #28] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [sp, #4] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adds r7, r6, r3 + ldr r3, [sp] @ 4-byte Reload + adcs r6, r8, r3 + ldr r3, [sp, #8] @ 4-byte Reload + adcs r9, r5, r9 + adcs r11, r2, r11 + adcs r2, r12, r10 + ldr r10, [sp, #16] @ 4-byte Reload + adcs r12, lr, r3 + ldr r3, [sp, #32] @ 4-byte Reload + adcs lr, r4, r3 + ldr r3, [sp, #12] @ 4-byte Reload + adc r3, r1, r3 + cmp r1, #0 + movge r9, r5 + ldr r5, [sp, #20] @ 4-byte Reload + movge r7, r10 + movge r6, r8 + cmp r1, #0 + str r7, [r0] + movge r11, r5 + ldr r5, [sp, #24] @ 4-byte Reload + movge r2, r5 + ldr r5, [sp, #28] @ 4-byte Reload + stmib r0, {r6, r9, r11} + movge r12, r5 + cmp r1, #0 + movge r3, r1 + movge lr, r4 + add r1, r0, #16 + stm r1, {r2, r12, lr} + str r3, [r0, #28] + add sp, sp, #36 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end123: + .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L + .cantunwind + .fnend + + .globl mcl_fpDbl_add8L + .align 2 + .type mcl_fpDbl_add8L,%function +mcl_fpDbl_add8L: @ @mcl_fpDbl_add8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + ldm r1, {r7, r9} + ldr r6, [r1, #8] + ldr r8, [r1, #12] + ldm r2, {r4, r12, lr} + ldr r5, [r2, #12] + adds r4, r4, r7 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #32] + adcs r7, r12, r9 + adcs r6, lr, r6 + add lr, r1, #16 + adcs r9, r5, r8 + ldr r5, [r2, #28] + add r8, r2, #16 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #36] + str r5, [sp, #28] @ 4-byte Spill + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [r2, #40] + str r4, [sp, #44] @ 4-byte Spill + ldr r4, [r2, #44] + str r4, [sp, #48] @ 4-byte Spill + ldr r4, [r2, #48] + str r4, [sp, #52] @ 4-byte Spill + ldr r4, [r2, #52] + str r4, [sp, #56] @ 4-byte Spill + ldr r4, [r2, #56] + str r4, [sp, #60] @ 4-byte Spill + ldr r4, [r2, #60] + str r4, [sp, #64] @ 4-byte Spill + ldm r8, {r4, r5, r8} + ldr r2, [r1, #36] + ldr r10, [r1, #32] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #24] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #32] @ 4-byte Reload + adcs r1, r4, r1 + str r11, [r0] + str r7, [r0, #4] + str r6, [r0, #8] + str r9, [r0, #12] + ldr r6, [sp, #8] @ 4-byte Reload + ldr r4, [sp, #16] @ 4-byte Reload + adcs r2, r5, r2 + str r1, [r0, #16] + str r2, [r0, #20] + adcs r1, r8, r12 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r2, r2, lr + adcs r1, r1, r10 + str r2, [r0, #28] + ldr r2, [sp] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r7, r1, r2 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + adcs r2, r1, r2 + ldr r1, [sp, #48] @ 4-byte Reload + str r2, [sp, #44] @ 4-byte Spill + adcs r12, r1, r6 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #12] @ 4-byte Reload + str r12, [sp, #48] @ 4-byte Spill + adcs lr, r1, r6 + ldr r1, [sp, #56] @ 4-byte Reload + str lr, [sp, #52] @ 4-byte Spill + adcs r5, r1, r4 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + str r5, [sp, #56] @ 4-byte Spill + adcs r8, r1, r4 + ldr r1, [sp, #64] @ 4-byte Reload + ldr r4, [sp, #24] @ 4-byte Reload + adcs r10, r1, r4 + mov r1, #0 + adc r1, r1, #0 + str r10, [sp, #60] @ 4-byte Spill + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [r3] + str r1, [sp, #24] @ 4-byte Spill + ldmib r3, {r4, r11} + ldr r6, [r3, #12] + ldr r1, [r3, #24] + ldr r9, [r3, #16] + str r6, [sp, #40] @ 4-byte Spill + ldr r6, [r3, #20] + ldr r3, [r3, #28] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [sp, #36] @ 4-byte Reload + subs r1, r3, r1 + sbcs r4, r7, r4 + sbcs r11, r2, r11 + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r2, r12, r2 + sbcs r12, lr, r9 + mov r9, r8 + sbcs lr, r5, r6 + ldr r5, [sp, #28] @ 4-byte Reload + sbcs r6, r8, r5 + ldr r5, [sp, #32] @ 4-byte Reload + sbcs r8, r10, r5 + ldr r5, [sp, #64] @ 4-byte Reload + sbc r10, r5, #0 + ands r10, r10, #1 + movne r1, r3 + movne r4, r7 + str r1, [r0, #32] + ldr r1, [sp, #44] @ 4-byte Reload + str r4, [r0, #36] + movne r11, r1 + ldr r1, [sp, #48] @ 4-byte Reload + cmp r10, #0 + str r11, [r0, #40] + movne r2, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [r0, #44] + movne r12, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r12, [r0, #48] + movne lr, r1 + ldr r1, [sp, #60] @ 4-byte Reload + cmp r10, #0 + movne r6, r9 + str lr, [r0, #52] + str r6, [r0, #56] + movne r8, r1 + str r8, [r0, #60] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end124: + .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub8L + .align 2 + .type mcl_fpDbl_sub8L,%function +mcl_fpDbl_sub8L: @ @mcl_fpDbl_sub8L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldr r7, [r2, #32] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #60] @ 4-byte Spill + ldm r2, {r4, r5, r8} + ldr r6, [r2, #20] + ldr r7, [r2, #12] + ldr r9, [r2, #16] + ldr r11, [r2, #24] + ldr r10, [r2, #28] + str r6, [sp, #28] @ 4-byte Spill + ldm r1, {r2, r12, lr} + ldr r6, [r1, #12] + subs r4, r2, r4 + ldr r2, [r1, #32] + sbcs r5, r12, r5 + ldr r12, [r1, #36] + sbcs lr, lr, r8 + add r8, r1, #16 + sbcs r6, r6, r7 + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #24] @ 4-byte Spill + ldm r8, {r1, r2, r7, r8} + stm r0, {r4, r5, lr} + str r6, [r0, #12] + mov r4, #0 + ldr r6, [sp, #28] @ 4-byte Reload + ldr r5, [sp, #20] @ 4-byte Reload + sbcs r1, r1, r9 + sbcs r2, r2, r6 + str r1, [r0, #16] + sbcs r1, r7, r11 + str r2, [r0, #20] + ldr r2, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #8] @ 4-byte Reload + str r1, [r0, #24] + sbcs r1, r8, r10 + str r1, [r0, #28] + ldr r1, [sp] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r6, r12, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + sbcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + sbcs r9, r7, r2 + ldr r2, [sp, #48] @ 4-byte Reload + ldr r7, [sp, #12] @ 4-byte Reload + sbcs r12, r7, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r7, [sp, #16] @ 4-byte Reload + str r12, [sp, #48] @ 4-byte Spill + sbcs lr, r7, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str lr, [sp, #52] @ 4-byte Spill + sbcs r8, r5, r2 + ldr r2, [sp, #60] @ 4-byte Reload + ldr r5, [sp, #24] @ 4-byte Reload + sbcs r11, r5, r2 + sbc r2, r4, #0 + str r2, [sp, #60] @ 4-byte Spill + ldm r3, {r4, r5} + ldr r2, [r3, #8] + ldr r10, [r3, #20] + ldr r7, [r3, #24] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r3, #12] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r3, #16] + ldr r3, [r3, #28] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [sp, #32] @ 4-byte Reload + adds r4, r3, r4 + adcs r5, r6, r5 + ldr r6, [sp, #44] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r9, r1 + adcs r2, r12, r2 + adcs r12, lr, r10 + adcs lr, r8, r7 + ldr r7, [sp, #56] @ 4-byte Reload + adc r10, r11, r7 + ldr r7, [sp, #60] @ 4-byte Reload + ands r7, r7, #1 + moveq r4, r3 + ldr r3, [sp, #36] @ 4-byte Reload + str r4, [r0, #32] + moveq r5, r3 + ldr r3, [sp, #40] @ 4-byte Reload + str r5, [r0, #36] + moveq r6, r3 + cmp r7, #0 + moveq r1, r9 + str r6, [r0, #40] + str r1, [r0, #44] + ldr r1, [sp, #48] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [r0, #48] + moveq r12, r1 + cmp r7, #0 + moveq lr, r8 + moveq r10, r11 + str r12, [r0, #52] + str lr, [r0, #56] + str r10, [r0, #60] + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end125: + .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L + .cantunwind + .fnend + + .align 2 + .type .LmulPv288x32,%function +.LmulPv288x32: @ @mulPv288x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r1, [r1, #32] + umull r3, r7, r1, r2 + adcs r1, r5, r3 + adc r2, r7, #0 + str r1, [r0, #32] + str r2, [r0, #36] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end126: + .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre9L + .align 2 + .type mcl_fp_mulUnitPre9L,%function +mcl_fp_mulUnitPre9L: @ @mcl_fp_mulUnitPre9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + .pad #40 + sub sp, sp, #40 + mov r4, r0 + mov r0, sp + bl .LmulPv288x32(PLT) + add lr, sp, #20 + ldr r12, [sp, #36] + ldm lr, {r0, r3, r8, lr} + ldr r1, [sp, #16] + ldm sp, {r5, r6, r7} + ldr r2, [sp, #12] + stm r4, {r5, r6, r7} + str r2, [r4, #12] + str r1, [r4, #16] + add r1, r4, #20 + stm r1, {r0, r3, r8, lr} + str r12, [r4, #36] + add sp, sp, #40 + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end127: + .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre9L + .align 2 + .type mcl_fpDbl_mulPre9L,%function +mcl_fpDbl_mulPre9L: @ @mcl_fpDbl_mulPre9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #412 + sub sp, sp, #412 + mov r10, r2 + mov r8, r0 + add r0, sp, #368 + str r1, [sp, #44] @ 4-byte Spill + mov r4, r1 + ldr r2, [r10] + bl .LmulPv288x32(PLT) + ldr r0, [sp, #404] + ldr r1, [sp, #376] + ldr r2, [r10, #4] + ldr r9, [sp, #372] + ldr r11, [sp, #380] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #400] + str r1, [sp, #16] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #396] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #392] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #388] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [r8] + add r0, sp, #328 + bl .LmulPv288x32(PLT) + add lr, sp, #352 + ldr r4, [sp, #364] + add r7, sp, #332 + ldm lr, {r3, r12, lr} + ldr r6, [sp, #328] + ldm r7, {r0, r1, r2, r5, r7} + adds r6, r6, r9 + str r6, [r8, #4] + ldr r6, [sp, #16] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #12] @ 4-byte Spill + adcs r0, r1, r11 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r10, #8] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r4, #0 + ldr r4, [sp, #44] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #288 + mov r1, r4 + bl .LmulPv288x32(PLT) + add r9, sp, #312 + add lr, sp, #288 + ldm r9, {r5, r6, r7, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #12] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r8, #8] + ldr r0, [sp, #8] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #20] @ 4-byte Reload + mov r1, r4 + adcs r0, r2, r0 + ldr r2, [r10, #12] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r9, #0 + mov r9, r4 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #248 + bl .LmulPv288x32(PLT) + add lr, sp, #272 + ldr r4, [sp, #284] + add r6, sp, #252 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #248] + ldr r5, [sp, #268] + ldm r6, {r0, r1, r2, r6} + adds r7, r7, r11 + str r7, [r8, #12] + ldr r7, [sp, #20] @ 4-byte Reload + adcs r11, r0, r7 + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r10, #16] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r4, #0 + mov r4, r9 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #208 + mov r1, r4 + bl .LmulPv288x32(PLT) + add r9, sp, #232 + add lr, sp, #208 + ldm r9, {r5, r6, r7, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r0, r11 + str r0, [r8, #16] + ldr r0, [sp, #24] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #28] @ 4-byte Reload + mov r1, r4 + adcs r0, r2, r0 + ldr r2, [r10, #20] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r9, #0 + mov r9, r4 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #168 + bl .LmulPv288x32(PLT) + add lr, sp, #192 + ldr r4, [sp, #204] + add r6, sp, #172 + ldm lr, {r3, r12, lr} + ldr r7, [sp, #168] + ldr r5, [sp, #188] + ldm r6, {r0, r1, r2, r6} + adds r7, r7, r11 + str r7, [r8, #20] + ldr r7, [sp, #28] @ 4-byte Reload + adcs r11, r0, r7 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r10, #24] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #12] @ 4-byte Spill + adc r0, r4, #0 + mov r4, r9 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #128 + mov r1, r4 + bl .LmulPv288x32(PLT) + add r9, sp, #152 + add lr, sp, #128 + ldm r9, {r5, r6, r7, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r0, r11 + str r0, [r8, #24] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #36] @ 4-byte Reload + mov r1, r4 + adcs r0, r2, r0 + ldr r2, [r10, #28] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #16] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #88 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #124] + add lr, sp, #112 + add r7, sp, #92 + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r5, r12, lr} + ldr r2, [sp, #88] + ldr r6, [sp, #108] + ldm r7, {r0, r1, r3, r7} + ldr r4, [sp, #40] @ 4-byte Reload + adds r2, r2, r11 + adcs r9, r0, r4 + ldr r0, [sp, #36] @ 4-byte Reload + str r2, [r8, #28] + ldr r2, [r10, #32] + adcs r10, r1, r0 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #44] @ 4-byte Reload + adcs r11, r3, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r7, r7, r0 + ldr r0, [sp, #24] @ 4-byte Reload + adcs r6, r6, r0 + ldr r0, [sp, #20] @ 4-byte Reload + adcs r5, r5, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r4, r0, #0 + add r0, sp, #48 + bl .LmulPv288x32(PLT) + add r3, sp, #48 + ldm r3, {r0, r1, r2, r3} + ldr r12, [sp, #84] + ldr lr, [sp, #80] + adds r0, r0, r9 + ldr r9, [sp, #76] + adcs r1, r1, r10 + adcs r2, r2, r11 + ldr r11, [sp, #72] + adcs r10, r3, r7 + ldr r7, [sp, #64] + ldr r3, [sp, #68] + str r0, [r8, #32] + str r1, [r8, #36] + str r2, [r8, #40] + str r10, [r8, #44] + adcs r0, r7, r6 + str r0, [r8, #48] + adcs r0, r3, r5 + str r0, [r8, #52] + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [r8, #56] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [r8, #60] + adcs r0, lr, r4 + adc r1, r12, #0 + str r0, [r8, #64] + str r1, [r8, #68] + add sp, sp, #412 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end128: + .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre9L + .align 2 + .type mcl_fpDbl_sqrPre9L,%function +mcl_fpDbl_sqrPre9L: @ @mcl_fpDbl_sqrPre9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #412 + sub sp, sp, #412 + mov r5, r1 + mov r4, r0 + add r0, sp, #368 + ldr r2, [r5] + bl .LmulPv288x32(PLT) + ldr r0, [sp, #404] + add r11, sp, #368 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #400] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #396] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #392] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #388] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r0, r10, r11} + ldr r1, [sp, #380] + ldr r2, [r5, #4] + str r1, [sp, #20] @ 4-byte Spill + str r0, [r4] + add r0, sp, #328 + mov r1, r5 + bl .LmulPv288x32(PLT) + add lr, sp, #348 + add r7, sp, #328 + ldr r9, [sp, #364] + ldr r8, [sp, #360] + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + adds r0, r0, r10 + str r0, [r4, #4] + ldr r0, [sp, #20] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #24] @ 4-byte Reload + ldr r2, [r5, #8] + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #44] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #288 + bl .LmulPv288x32(PLT) + add r9, sp, #312 + add lr, sp, #288 + ldm r9, {r6, r7, r8, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r0, r10 + str r0, [r4, #8] + ldr r0, [sp, #24] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r2, [r5, #12] + adcs r0, r3, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #248 + bl .LmulPv288x32(PLT) + add lr, sp, #268 + add r7, sp, #248 + ldr r9, [sp, #284] + ldr r8, [sp, #280] + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + adds r0, r0, r10 + str r0, [r4, #12] + ldr r0, [sp, #28] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r2, [r5, #16] + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #208 + bl .LmulPv288x32(PLT) + add r9, sp, #232 + add lr, sp, #208 + ldm r9, {r6, r7, r8, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r0, r10 + str r0, [r4, #16] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r2, [r5, #20] + adcs r0, r3, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #168 + bl .LmulPv288x32(PLT) + add lr, sp, #188 + add r7, sp, #168 + ldr r9, [sp, #204] + ldr r8, [sp, #200] + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + adds r0, r0, r10 + str r0, [r4, #20] + ldr r0, [sp, #36] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r2, [r5, #24] + adcs r0, r3, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #128 + bl .LmulPv288x32(PLT) + add r9, sp, #152 + add lr, sp, #128 + ldm r9, {r6, r7, r8, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r0, r10 + str r0, [r4, #24] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r10, r1, r11 + mov r1, r5 + adcs r11, r2, r0 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r2, [r5, #28] + adcs r0, r3, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #88 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #124] + ldr r2, [sp, #88] + ldr r1, [sp, #92] + add r12, sp, #96 + ldr lr, [sp, #116] + ldr r6, [sp, #112] + ldr r7, [sp, #108] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #120] + adds r2, r2, r10 + adcs r10, r1, r11 + str r0, [sp, #8] @ 4-byte Spill + ldm r12, {r0, r3, r12} + ldr r1, [sp, #44] @ 4-byte Reload + str r2, [r4, #28] + ldr r2, [r5, #32] + adcs r11, r0, r1 + ldr r0, [sp, #40] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r8, r3, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r9, r12, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #48 + bl .LmulPv288x32(PLT) + add r3, sp, #48 + add lr, sp, #72 + ldm r3, {r0, r1, r2, r3} + ldr r12, [sp, #84] + adds r0, r0, r10 + adcs r1, r1, r11 + adcs r2, r2, r8 + ldm lr, {r5, r8, lr} + ldr r6, [sp, #68] + ldr r7, [sp, #64] + adcs r3, r3, r9 + add r9, r4, #32 + stm r9, {r0, r1, r2} + str r3, [r4, #44] + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [r4, #48] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [r4, #52] + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [r4, #56] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [r4, #60] + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + adc r1, r12, #0 + str r0, [r4, #64] + str r1, [r4, #68] + add sp, sp, #412 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L + .cantunwind + .fnend + + .globl mcl_fp_mont9L + .align 2 + .type mcl_fp_mont9L,%function +mcl_fp_mont9L: @ @mcl_fp_mont9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #804 + sub sp, sp, #804 + str r2, [sp, #64] @ 4-byte Spill + ldr r6, [r3, #-4] + ldr r2, [r2] + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #760 + str r3, [sp, #76] @ 4-byte Spill + str r1, [sp, #68] @ 4-byte Spill + mov r4, r3 + mov r7, r1 + str r6, [sp, #72] @ 4-byte Spill + bl .LmulPv288x32(PLT) + ldr r0, [sp, #764] + ldr r5, [sp, #760] + mov r1, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #768] + mul r2, r5, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #772] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #720 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #756] + add r11, sp, #724 + ldr r4, [sp, #736] + ldr r9, [sp, #720] + mov r1, r7 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #744] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #740] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r8, r10, r11} + ldr r6, [sp, #64] @ 4-byte Reload + add r0, sp, #680 + ldr r2, [r6, #4] + bl .LmulPv288x32(PLT) + adds r0, r9, r5 + ldr r2, [sp, #4] @ 4-byte Reload + mov r1, #0 + add lr, sp, #680 + ldr r9, [sp, #716] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r5, r8, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #712] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #708] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #704] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #8] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r2, r0 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #20] @ 4-byte Spill + adc r8, r1, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #640 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #676] + add r10, sp, #640 + ldr r11, [sp, #660] + ldr r7, [sp, #656] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r2, [r6, #8] + ldr r6, [sp, #68] @ 4-byte Reload + add r0, sp, #600 + mov r1, r6 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #600 + ldr r4, [sp, #624] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #636] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #632] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #628] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #560 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #596] + add r10, sp, #560 + ldr r11, [sp, #580] + ldr r7, [sp, #576] + mov r1, r6 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r0, [sp, #64] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #520 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #520 + ldr r4, [sp, #544] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #556] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #552] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #548] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r6, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #480 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #516] + add r10, sp, #480 + ldr r11, [sp, #500] + ldr r7, [sp, #496] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r6, [sp, #64] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + add r0, sp, #440 + ldr r2, [r6, #16] + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #440 + ldr r4, [sp, #464] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #476] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #472] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #468] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #400 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #436] + add r10, sp, #400 + ldr r11, [sp, #420] + ldr r7, [sp, #416] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #432] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #428] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r2, [r6, #20] + ldr r1, [sp, #68] @ 4-byte Reload + add r0, sp, #360 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #360 + ldr r4, [sp, #384] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #396] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #392] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #388] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r6, lr + ldr r6, [sp, #72] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + mul r2, r5, r6 + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #320 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #356] + add r10, sp, #320 + ldr r11, [sp, #340] + ldr r7, [sp, #336] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #352] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #348] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r0, [sp, #64] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #280 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #280 + ldr r4, [sp, #304] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #316] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #312] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #308] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r5, r6 + ldr r6, [sp, #76] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mov r1, r6 + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #240 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #276] + add r10, sp, #240 + ldr r11, [sp, #260] + ldr r7, [sp, #256] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r0, [sp, #64] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #200 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #200 + ldr r4, [sp, #224] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #236] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #232] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #228] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r8, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + adcs r0, r8, r9 + str r0, [sp, #24] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #160 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #196] + add r10, sp, #160 + ldr r11, [sp, #184] + ldr r6, [sp, #180] + ldr r7, [sp, #176] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldr r0, [sp, #64] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #120 + bl .LmulPv288x32(PLT) + adds r0, r5, r4 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #120] + ldr lr, [sp, #124] + ldr r5, [sp, #128] + ldr r12, [sp, #132] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r9, r0, r9 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r0, r10 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #136 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r11, r0, r11 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + adds r4, r4, r2 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r9, r9, lr + adcs r10, r10, r5 + mul r8, r4, r0 + ldm r7, {r0, r1, r2, r3, r6, r7} + ldr r5, [sp, #68] @ 4-byte Reload + adcs r5, r5, r12 + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [sp, #64] @ 4-byte Reload + adcs r5, r5, r0 + adcs r0, r11, r1 + ldr r11, [sp, #76] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + mov r1, r11 + adcs r0, r0, r2 + mov r2, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #80 + bl .LmulPv288x32(PLT) + add r3, sp, #80 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + adcs r0, r9, r1 + ldr r1, [sp, #96] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r10, r2 + str r7, [sp, #40] @ 4-byte Spill + adcs r8, r0, r3 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r5, r1 + ldr r1, [sp, #100] + adcs r4, r0, r1 + ldr r1, [sp, #104] + ldr r0, [sp, #56] @ 4-byte Reload + str r4, [sp, #44] @ 4-byte Spill + adcs r6, r0, r1 + ldr r1, [sp, #108] + ldr r0, [sp, #68] @ 4-byte Reload + str r6, [sp, #48] @ 4-byte Spill + adcs r12, r0, r1 + ldr r1, [sp, #112] + ldr r0, [sp, #32] @ 4-byte Reload + str r12, [sp, #56] @ 4-byte Spill + adcs lr, r0, r1 + ldr r1, [sp, #116] + ldr r0, [sp, #72] @ 4-byte Reload + str lr, [sp, #68] @ 4-byte Spill + adcs r5, r0, r1 + ldr r0, [sp, #64] @ 4-byte Reload + str r5, [sp, #72] @ 4-byte Spill + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + mov r0, r11 + ldmib r0, {r2, r3, r11} + ldr r1, [r0, #16] + ldr r9, [r0] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r0, #20] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r0, #24] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r0, #28] + str r1, [sp, #36] @ 4-byte Spill + mov r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + subs r9, r0, r9 + sbcs r2, r7, r2 + sbcs r3, r8, r3 + sbcs r7, r10, r11 + ldr r11, [r1, #32] + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r1, r4, r1 + ldr r4, [sp, #28] @ 4-byte Reload + sbcs r4, r6, r4 + ldr r6, [sp, #32] @ 4-byte Reload + sbcs r12, r12, r6 + ldr r6, [sp, #36] @ 4-byte Reload + sbcs lr, lr, r6 + sbcs r11, r5, r11 + ldr r5, [sp, #64] @ 4-byte Reload + sbc r6, r5, #0 + ldr r5, [sp, #60] @ 4-byte Reload + ands r6, r6, #1 + movne r9, r0 + ldr r0, [sp, #40] @ 4-byte Reload + movne r3, r8 + str r9, [r5] + movne r2, r0 + ldr r0, [sp, #44] @ 4-byte Reload + cmp r6, #0 + movne r7, r10 + str r2, [r5, #4] + str r3, [r5, #8] + str r7, [r5, #12] + movne r1, r0 + ldr r0, [sp, #48] @ 4-byte Reload + str r1, [r5, #16] + movne r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + cmp r6, #0 + str r4, [r5, #20] + movne r12, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r12, [r5, #24] + movne lr, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str lr, [r5, #28] + movne r11, r0 + str r11, [r5, #32] + add sp, sp, #804 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end130: + .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L + .cantunwind + .fnend + + .globl mcl_fp_montNF9L + .align 2 + .type mcl_fp_montNF9L,%function +mcl_fp_montNF9L: @ @mcl_fp_montNF9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #804 + sub sp, sp, #804 + add r12, sp, #60 + str r2, [sp, #72] @ 4-byte Spill + mov r4, r3 + mov r7, r1 + stm r12, {r0, r1, r3} + add r0, sp, #760 + ldr r6, [r3, #-4] + ldr r2, [r2] + str r6, [sp, #76] @ 4-byte Spill + bl .LmulPv288x32(PLT) + ldr r0, [sp, #764] + ldr r5, [sp, #760] + mov r1, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #768] + mul r2, r5, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #772] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #720 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #756] + add r10, sp, #724 + ldr r6, [sp, #736] + ldr r11, [sp, #720] + mov r1, r7 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #744] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #740] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r4, [sp, #72] @ 4-byte Reload + add r0, sp, #680 + ldr r2, [r4, #4] + bl .LmulPv288x32(PLT) + adds r0, r11, r5 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #680 + ldr r11, [sp, #704] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #716] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #712] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #708] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r5, r1, r0 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r1, r0 + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #48] @ 4-byte Reload + adds r6, r6, r0 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r5, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #640 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #676] + add r10, sp, #644 + ldr r7, [sp, #656] + ldr r11, [sp, #640] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r5, [sp, #64] @ 4-byte Reload + ldr r2, [r4, #8] + add r0, sp, #600 + mov r1, r5 + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #600 + ldr r11, [sp, #624] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #636] + adcs r0, r0, r9 + ldr r9, [sp, #632] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #628] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #560 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #596] + add r10, sp, #564 + ldr r7, [sp, #576] + ldr r11, [sp, #560] + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #520 + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #520 + ldr r11, [sp, #544] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r6, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #556] + adcs r0, r0, r9 + ldr r9, [sp, #552] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #548] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r5, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r6, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r5, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #480 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #516] + add r10, sp, #484 + ldr r7, [sp, #496] + ldr r11, [sp, #480] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r5, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + add r0, sp, #440 + ldr r2, [r5, #16] + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #440 + ldr r11, [sp, #464] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #476] + adcs r0, r0, r9 + ldr r9, [sp, #472] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #468] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r4, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #400 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #436] + add r10, sp, #404 + ldr r7, [sp, #416] + ldr r11, [sp, #400] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #432] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #428] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r2, [r5, #20] + ldr r1, [sp, #64] @ 4-byte Reload + add r0, sp, #360 + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #360 + ldr r11, [sp, #384] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #396] + adcs r0, r0, r9 + ldr r9, [sp, #392] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #388] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #320 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #356] + add r10, sp, #324 + ldr r7, [sp, #336] + ldr r11, [sp, #320] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #352] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #348] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #340] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r5, [sp, #64] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #280 + mov r1, r5 + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #280 + ldr r11, [sp, #304] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #316] + adcs r0, r0, r9 + ldr r9, [sp, #312] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #308] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #240 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #276] + add r10, sp, #244 + ldr r7, [sp, #256] + ldr r11, [sp, #240] + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #200 + bl .LmulPv288x32(PLT) + adds r0, r6, r11 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #200 + ldr r11, [sp, #224] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r5, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #236] + adcs r0, r0, r9 + ldr r9, [sp, #232] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #228] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r5, r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r6, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + adcs r0, r7, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #160 + bl .LmulPv288x32(PLT) + ldr r0, [sp, #196] + add r10, sp, #164 + ldr r4, [sp, #184] + ldr r6, [sp, #180] + ldr r7, [sp, #176] + ldr r11, [sp, #160] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #120 + bl .LmulPv288x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #120 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #136 + adcs r1, r1, r9 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r10, r1, r10 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r11, r1, r7 + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adc r1, r1, r2 + str r1, [sp, #44] @ 4-byte Spill + ldm lr, {r2, r12, lr} + ldr r4, [sp, #132] + adds r5, r0, r2 + ldr r0, [sp, #76] @ 4-byte Reload + mul r9, r5, r0 + ldm r8, {r0, r1, r2, r3, r6, r8} + ldr r7, [sp, #56] @ 4-byte Reload + adcs r7, r7, r12 + str r7, [sp, #32] @ 4-byte Spill + adcs r7, r10, lr + ldr r10, [sp, #68] @ 4-byte Reload + adcs r11, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + str r7, [sp, #36] @ 4-byte Spill + adcs r0, r4, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + mov r2, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r4, r0, r3 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + adc r0, r8, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #80 + bl .LmulPv288x32(PLT) + add r3, sp, #80 + ldm r3, {r0, r1, r2, r3} + adds r0, r5, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #96] + str r9, [sp, #32] @ 4-byte Spill + adcs r2, r0, r2 + adcs r0, r11, r3 + str r2, [sp, #44] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r1, [sp, #100] + ldr r0, [sp, #56] @ 4-byte Reload + str r7, [sp, #48] @ 4-byte Spill + adcs r6, r0, r1 + ldr r1, [sp, #104] + ldr r0, [sp, #64] @ 4-byte Reload + adcs lr, r0, r1 + ldr r1, [sp, #108] + ldr r0, [sp, #76] @ 4-byte Reload + str lr, [sp, #56] @ 4-byte Spill + adcs r4, r4, r1 + ldr r1, [sp, #112] + str r4, [sp, #64] @ 4-byte Spill + adcs r5, r0, r1 + ldr r1, [sp, #116] + ldr r0, [sp, #72] @ 4-byte Reload + str r5, [sp, #76] @ 4-byte Spill + adc r12, r0, r1 + mov r0, r10 + ldr r1, [r0, #16] + ldr r8, [r0] + ldr r11, [r0, #4] + ldr r10, [r0, #8] + ldr r3, [r0, #12] + str r12, [sp, #72] @ 4-byte Spill + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r0, #20] + subs r8, r9, r8 + ldr r9, [sp, #52] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r0, #24] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [r0, #28] + ldr r0, [r0, #32] + str r1, [sp, #40] @ 4-byte Spill + sbcs r1, r2, r11 + sbcs r2, r9, r10 + mov r10, r6 + sbcs r3, r7, r3 + ldr r7, [sp, #24] @ 4-byte Reload + sbcs r7, r6, r7 + ldr r6, [sp, #28] @ 4-byte Reload + sbcs r11, lr, r6 + ldr r6, [sp, #36] @ 4-byte Reload + sbcs lr, r4, r6 + ldr r4, [sp, #40] @ 4-byte Reload + ldr r6, [sp, #44] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #32] @ 4-byte Reload + sbc r0, r12, r0 + asr r12, r0, #31 + cmp r12, #0 + movlt r8, r5 + ldr r5, [sp, #60] @ 4-byte Reload + movlt r1, r6 + movlt r2, r9 + cmp r12, #0 + movlt r7, r10 + str r8, [r5] + str r1, [r5, #4] + ldr r1, [sp, #48] @ 4-byte Reload + str r2, [r5, #8] + movlt r3, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r3, [r5, #12] + str r7, [r5, #16] + movlt r11, r1 + ldr r1, [sp, #64] @ 4-byte Reload + cmp r12, #0 + str r11, [r5, #20] + movlt lr, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str lr, [r5, #24] + movlt r4, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r4, [r5, #28] + movlt r0, r1 + str r0, [r5, #32] + add sp, sp, #804 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end131: + .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L + .cantunwind + .fnend + + .globl mcl_fp_montRed9L + .align 2 + .type mcl_fp_montRed9L,%function +mcl_fp_montRed9L: @ @mcl_fp_montRed9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #476 + sub sp, sp, #476 + mov r5, r2 + str r0, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r4, [r1] + ldr r11, [r1, #32] + ldr r10, [r1, #36] + ldr r0, [r5] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r5, #4] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r5, #8] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #16] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r5, #12] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #20] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r5, #16] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #24] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r5, #20] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #28] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r5, #24] + str r2, [sp, #44] @ 4-byte Spill + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r5, #-4] + str r0, [sp, #108] @ 4-byte Spill + mul r2, r4, r0 + ldr r0, [r5, #28] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r5, #32] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #60] + mov r1, r5 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #432 + bl .LmulPv288x32(PLT) + ldr r1, [sp, #432] + add lr, sp, #436 + ldr r9, [sp, #468] + ldr r8, [sp, #464] + ldm lr, {r0, r2, r3, r6, r7, r12, lr} + adds r1, r4, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r4, r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #108] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + mul r2, r4, r7 + adcs r0, r0, r12 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r11, r8 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r9, r10, r9 + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #392 + bl .LmulPv288x32(PLT) + add r11, sp, #408 + add r6, sp, #392 + ldr r12, [sp, #428] + ldr lr, [sp, #424] + ldr r8, [sp, #420] + ldm r11, {r2, r10, r11} + ldm r6, {r0, r1, r3, r6} + adds r0, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r11, r0, r11 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #44] @ 4-byte Spill + adcs r0, r9, lr + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #4] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #352 + bl .LmulPv288x32(PLT) + add lr, sp, #372 + add r7, sp, #352 + ldr r10, [sp, #388] + ldr r9, [sp, #384] + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + adds r0, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + mul r2, r4, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r11, r6 + mov r11, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #312 + bl .LmulPv288x32(PLT) + add lr, sp, #332 + ldr r7, [sp, #348] + add r9, sp, #320 + ldm lr, {r6, r8, r12, lr} + ldr r1, [sp, #312] + ldr r3, [sp, #316] + ldm r9, {r0, r2, r9} + adds r1, r4, r1 + mov r4, r11 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r10, r1, r3 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #272 + bl .LmulPv288x32(PLT) + add lr, sp, #272 + ldr r11, [sp, #308] + ldr r9, [sp, #304] + ldm lr, {r0, r1, r2, r3, r6, r7, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + mul r2, r8, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + mov r6, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #232 + bl .LmulPv288x32(PLT) + add r11, sp, #256 + add lr, sp, #232 + ldm r11, {r7, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r8, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + mul r2, r4, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #192 + bl .LmulPv288x32(PLT) + add lr, sp, #212 + add r7, sp, #192 + ldr r9, [sp, #228] + ldr r8, [sp, #224] + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + adds r0, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r4, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r10, r0, r2 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r11, r0, r3 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r6, r0, r6 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #108] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + mul r2, r4, r8 + adcs r9, r0, r9 + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #152 + bl .LmulPv288x32(PLT) + add r12, sp, #152 + ldm r12, {r0, r1, r3, r12} + ldr lr, [sp, #188] + adds r0, r4, r0 + adcs r4, r10, r1 + ldr r1, [sp, #168] + adcs r11, r11, r3 + mul r2, r4, r8 + ldr r3, [sp, #180] + adcs r0, r7, r12 + ldr r7, [sp, #176] + ldr r12, [sp, #184] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r10, r6, r1 + ldr r1, [sp, #64] @ 4-byte Reload + adcs r8, r1, r0 + ldr r0, [sp, #60] @ 4-byte Reload + mov r1, r5 + adcs r7, r0, r7 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r9, r12 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #112 + bl .LmulPv288x32(PLT) + add r3, sp, #112 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r6, r11, r1 + ldr r1, [sp, #128] + adcs r9, r0, r2 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r11, r10, r3 + adcs lr, r8, r1 + ldr r1, [sp, #132] + str r11, [sp, #28] @ 4-byte Spill + str lr, [sp, #32] @ 4-byte Spill + adcs r7, r7, r1 + ldr r1, [sp, #136] + str r7, [sp, #44] @ 4-byte Spill + adcs r8, r0, r1 + ldr r1, [sp, #140] + ldr r0, [sp, #40] @ 4-byte Reload + str r8, [sp, #48] @ 4-byte Spill + adcs r4, r0, r1 + ldr r1, [sp, #144] + ldr r0, [sp, #56] @ 4-byte Reload + str r4, [sp, #52] @ 4-byte Spill + adcs r5, r0, r1 + ldr r1, [sp, #148] + ldr r0, [sp, #64] @ 4-byte Reload + str r5, [sp, #108] @ 4-byte Spill + adcs r12, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + adc r10, r0, #0 + ldr r0, [sp, #100] @ 4-byte Reload + subs r2, r6, r0 + ldr r0, [sp, #96] @ 4-byte Reload + sbcs r3, r9, r0 + ldr r0, [sp, #92] @ 4-byte Reload + sbcs r1, r11, r0 + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r11, lr, r0 + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #84] @ 4-byte Reload + sbcs lr, r8, r7 + ldr r7, [sp, #88] @ 4-byte Reload + sbcs r8, r4, r7 + ldr r4, [sp, #68] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #72] @ 4-byte Reload + sbcs r5, r12, r5 + sbc r7, r10, #0 + ands r7, r7, #1 + movne r2, r6 + ldr r6, [sp, #104] @ 4-byte Reload + movne r3, r9 + str r2, [r6] + ldr r2, [sp, #28] @ 4-byte Reload + str r3, [r6, #4] + movne r1, r2 + cmp r7, #0 + str r1, [r6, #8] + ldr r1, [sp, #32] @ 4-byte Reload + movne r11, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r11, [r6, #12] + movne r0, r1 + str r0, [r6, #16] + ldr r0, [sp, #48] @ 4-byte Reload + movne lr, r0 + ldr r0, [sp, #52] @ 4-byte Reload + cmp r7, #0 + movne r5, r12 + str lr, [r6, #20] + movne r8, r0 + ldr r0, [sp, #108] @ 4-byte Reload + str r8, [r6, #24] + movne r4, r0 + str r4, [r6, #28] + str r5, [r6, #32] + add sp, sp, #476 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end132: + .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L + .cantunwind + .fnend + + .globl mcl_fp_addPre9L + .align 2 + .type mcl_fp_addPre9L,%function +mcl_fp_addPre9L: @ @mcl_fp_addPre9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r3, r12, lr} + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7} + ldr r4, [r2, #16] + ldr r8, [r2] + ldr r11, [r2, #28] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r2, #20] + adds r10, r8, r3 + adcs r5, r5, r12 + ldr r12, [r1, #32] + ldr r8, [sp, #12] @ 4-byte Reload + str r10, [r0] + adcs lr, r6, lr + ldr r6, [r1, #20] + adcs r7, r7, r9 + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r2, #24] + ldr r2, [r2, #32] + ldr r3, [sp, #4] @ 4-byte Reload + str r4, [sp] @ 4-byte Spill + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #28] + ldr r4, [r1, #24] + ldr r1, [r1, #16] + adcs r1, r8, r1 + adcs r6, r3, r6 + ldr r3, [sp] @ 4-byte Reload + stmib r0, {r5, lr} + str r7, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #8] @ 4-byte Reload + str r6, [r0, #20] + adcs r4, r3, r4 + adcs r2, r11, r2 + str r4, [r0, #24] + adcs r1, r1, r12 + str r2, [r0, #28] + str r1, [r0, #32] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end133: + .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L + .cantunwind + .fnend + + .globl mcl_fp_subPre9L + .align 2 + .type mcl_fp_subPre9L,%function +mcl_fp_subPre9L: @ @mcl_fp_subPre9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldr r3, [r2, #8] + add lr, r1, #16 + ldr r11, [r2, #4] + ldr r10, [r2, #12] + ldr r4, [r2] + str r3, [sp] @ 4-byte Spill + ldr r3, [r2, #16] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r2, #20] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r2, #24] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #16] @ 4-byte Spill + ldmib r1, {r5, r6, r7} + ldm lr, {r3, r12, lr} + ldr r9, [r1] + ldr r8, [r1, #28] + subs r4, r9, r4 + ldr r9, [r2, #32] + ldr r2, [sp] @ 4-byte Reload + sbcs r11, r5, r11 + ldr r5, [sp, #16] @ 4-byte Reload + sbcs r6, r6, r2 + sbcs r7, r7, r10 + ldr r10, [r1, #32] + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r3, r3, r1 + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r2, r12, r1 + ldr r1, [sp, #12] @ 4-byte Reload + stm r0, {r4, r11} + str r6, [r0, #8] + str r7, [r0, #12] + str r3, [r0, #16] + str r2, [r0, #20] + sbcs r1, lr, r1 + sbcs r5, r8, r5 + str r1, [r0, #24] + sbcs r1, r10, r9 + str r5, [r0, #28] + str r1, [r0, #32] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end134: + .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L + .cantunwind + .fnend + + .globl mcl_fp_shr1_9L + .align 2 + .type mcl_fp_shr1_9L,%function +mcl_fp_shr1_9L: @ @mcl_fp_shr1_9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + add r12, r1, #16 + ldr r2, [r1, #8] + ldr lr, [r1, #12] + ldm r12, {r4, r5, r6, r8, r12} + ldm r1, {r1, r3} + lsrs r7, r3, #1 + rrx r1, r1 + str r1, [r0] + lsr r1, r3, #1 + orr r1, r1, r2, lsl #31 + str r1, [r0, #4] + lsrs r1, lr, #1 + rrx r1, r2 + str r1, [r0, #8] + lsr r1, lr, #1 + orr r1, r1, r4, lsl #31 + str r1, [r0, #12] + lsrs r1, r5, #1 + rrx r1, r4 + str r1, [r0, #16] + lsr r1, r5, #1 + orr r1, r1, r6, lsl #31 + str r1, [r0, #20] + lsrs r1, r8, #1 + rrx r1, r6 + str r1, [r0, #24] + lsr r1, r8, #1 + orr r1, r1, r12, lsl #31 + str r1, [r0, #28] + lsr r1, r12, #1 + str r1, [r0, #32] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end135: + .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L + .cantunwind + .fnend + + .globl mcl_fp_add9L + .align 2 + .type mcl_fp_add9L,%function +mcl_fp_add9L: @ @mcl_fp_add9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r12, lr} + ldr r5, [r2] + ldr r9, [r1, #8] + ldr r8, [r1, #12] + ldmib r2, {r4, r6, r7} + adds r12, r5, r12 + ldr r5, [r1, #24] + adcs lr, r4, lr + ldr r4, [r1, #20] + str r12, [sp, #8] @ 4-byte Spill + adcs r10, r6, r9 + ldr r6, [r1, #16] + adcs r9, r7, r8 + ldr r7, [r2, #16] + str r10, [sp, #4] @ 4-byte Spill + adcs r6, r7, r6 + ldr r7, [r2, #20] + adcs r7, r7, r4 + ldr r4, [r2, #24] + adcs r11, r4, r5 + ldr r5, [r1, #28] + ldr r4, [r2, #28] + ldr r1, [r1, #32] + ldr r2, [r2, #32] + adcs r8, r4, r5 + adcs r4, r2, r1 + mov r2, lr + add r1, r0, #16 + str r4, [r0, #32] + str r12, [r0] + stmib r0, {r2, r10} + str r9, [r0, #12] + stm r1, {r6, r7, r11} + mov r1, #0 + str r8, [r0, #28] + adc r1, r1, #0 + str r1, [sp, #12] @ 4-byte Spill + ldm r3, {r1, r5, lr} + ldr r10, [sp, #8] @ 4-byte Reload + ldr r12, [r3, #12] + subs r1, r10, r1 + str r1, [sp, #8] @ 4-byte Spill + sbcs r1, r2, r5 + ldr r5, [r3, #20] + str r1, [sp] @ 4-byte Spill + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r2, r1, lr + ldr r1, [r3, #16] + sbcs r12, r9, r12 + sbcs r1, r6, r1 + ldr r6, [r3, #24] + sbcs r5, r7, r5 + ldr r7, [r3, #28] + ldr r3, [r3, #32] + sbcs r6, r11, r6 + sbcs r7, r8, r7 + sbcs r3, r4, r3 + ldr r4, [sp, #12] @ 4-byte Reload + sbc r4, r4, #0 + tst r4, #1 + bne .LBB136_2 +@ BB#1: @ %nocarry + str r3, [r0, #32] + ldr r3, [sp, #8] @ 4-byte Reload + str r3, [r0] + ldr r3, [sp] @ 4-byte Reload + str r3, [r0, #4] + str r2, [r0, #8] + str r12, [r0, #12] + add r0, r0, #16 + stm r0, {r1, r5, r6, r7} +.LBB136_2: @ %carry + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end136: + .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L + .cantunwind + .fnend + + .globl mcl_fp_addNF9L + .align 2 + .type mcl_fp_addNF9L,%function +mcl_fp_addNF9L: @ @mcl_fp_addNF9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #52 + sub sp, sp, #52 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r5, [r2] + ldr r12, [r1, #12] + ldmib r2, {r4, r6, r7} + ldr r10, [r3, #4] + adds r5, r5, r9 + adcs r9, r4, r8 + ldr r4, [r1, #16] + ldr r8, [r1, #20] + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [r1, #24] + adcs r11, r6, lr + ldr lr, [sp, #16] @ 4-byte Reload + str r9, [sp, #28] @ 4-byte Spill + adcs r12, r7, r12 + ldr r7, [r2, #16] + str r12, [sp, #32] @ 4-byte Spill + adcs r6, r7, r4 + ldr r7, [r2, #20] + str r6, [sp, #36] @ 4-byte Spill + adcs r4, r7, r8 + ldr r7, [r2, #24] + ldr r8, [r3] + str r4, [sp, #40] @ 4-byte Spill + adcs r7, r7, r5 + ldr r5, [r2, #28] + ldr r2, [r2, #32] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #28] + ldr r1, [r1, #32] + adcs r7, r5, r7 + ldr r5, [r3, #8] + adc r1, r2, r1 + ldr r2, [r3, #16] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r3, #12] + subs r8, lr, r8 + str r1, [sp, #24] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3, #28] + ldr r3, [r3, #32] + str r3, [sp] @ 4-byte Spill + ldr r3, [sp, #4] @ 4-byte Reload + str r2, [sp, #20] @ 4-byte Spill + sbcs r2, r9, r10 + sbcs r5, r11, r5 + sbcs r7, r12, r7 + sbcs r12, r6, r3 + ldr r3, [sp, #8] @ 4-byte Reload + sbcs r6, r4, r3 + ldr r4, [sp, #48] @ 4-byte Reload + ldr r3, [sp, #12] @ 4-byte Reload + sbcs r9, r4, r3 + ldr r3, [sp, #44] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + sbcs r10, r3, r4 + ldr r3, [sp] @ 4-byte Reload + ldr r4, [sp, #28] @ 4-byte Reload + sbc r3, r1, r3 + asr r1, r3, #31 + cmp r1, #0 + movlt r8, lr + movlt r2, r4 + movlt r5, r11 + cmp r1, #0 + str r8, [r0] + str r2, [r0, #4] + ldr r2, [sp, #32] @ 4-byte Reload + str r5, [r0, #8] + movlt r7, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r7, [r0, #12] + movlt r12, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r12, [r0, #16] + movlt r6, r2 + cmp r1, #0 + ldr r1, [sp, #48] @ 4-byte Reload + str r6, [r0, #20] + movlt r9, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r9, [r0, #24] + movlt r10, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r10, [r0, #28] + movlt r3, r1 + str r3, [r0, #32] + add sp, sp, #52 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end137: + .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L + .cantunwind + .fnend + + .globl mcl_fp_sub9L + .align 2 + .type mcl_fp_sub9L,%function +mcl_fp_sub9L: @ @mcl_fp_sub9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #24 + sub sp, sp, #24 + ldm r2, {r12, lr} + ldr r5, [r1] + ldr r8, [r2, #8] + ldr r9, [r2, #12] + ldmib r1, {r4, r6, r7} + subs r12, r5, r12 + ldr r5, [r2, #24] + sbcs lr, r4, lr + ldr r4, [r2, #20] + sbcs r8, r6, r8 + ldr r6, [r2, #16] + sbcs r9, r7, r9 + ldr r7, [r1, #16] + sbcs r10, r7, r6 + ldr r7, [r1, #20] + ldr r6, [r1, #28] + sbcs r7, r7, r4 + ldr r4, [r1, #24] + ldr r1, [r1, #32] + sbcs r4, r4, r5 + ldr r5, [r2, #28] + ldr r2, [r2, #32] + sbcs r5, r6, r5 + sbcs r1, r1, r2 + add r2, r0, #8 + str r1, [r0, #32] + stm r0, {r12, lr} + stm r2, {r8, r9, r10} + mov r2, #0 + str r7, [r0, #20] + str r4, [r0, #24] + str r5, [r0, #28] + sbc r2, r2, #0 + tst r2, #1 + beq .LBB138_2 +@ BB#1: @ %carry + ldr r2, [r3, #32] + ldr r6, [r3, #4] + ldr r11, [r3, #12] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #8] + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #16] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3, #28] + ldr r3, [r3] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [sp] @ 4-byte Reload + adds r3, r3, r12 + adcs r6, r6, lr + adcs r8, r2, r8 + ldr r2, [sp, #4] @ 4-byte Reload + adcs r12, r11, r9 + adcs lr, r2, r10 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r7, r2, r7 + ldr r2, [sp, #12] @ 4-byte Reload + adcs r4, r2, r4 + ldr r2, [sp, #16] @ 4-byte Reload + stm r0, {r3, r6, r8, r12, lr} + str r7, [r0, #20] + str r4, [r0, #24] + adcs r5, r2, r5 + ldr r2, [sp, #20] @ 4-byte Reload + str r5, [r0, #28] + adc r1, r2, r1 + str r1, [r0, #32] +.LBB138_2: @ %nocarry + add sp, sp, #24 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end138: + .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L + .cantunwind + .fnend + + .globl mcl_fp_subNF9L + .align 2 + .type mcl_fp_subNF9L,%function +mcl_fp_subNF9L: @ @mcl_fp_subNF9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #52 + sub sp, sp, #52 + ldr r7, [r2, #32] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #32] + str r7, [sp, #44] @ 4-byte Spill + ldm r2, {r6, r8} + ldr r7, [r2, #8] + ldr r5, [r2, #16] + ldr r4, [r1, #16] + ldr r11, [r1, #20] + ldr r10, [r1, #24] + ldr r9, [r1, #28] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #12] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #20] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r2, #28] + str r7, [sp, #24] @ 4-byte Spill + str r2, [sp, #28] @ 4-byte Spill + ldm r1, {r1, r2, r12, lr} + subs r6, r1, r6 + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r7, r2, r8 + ldr r2, [sp, #44] @ 4-byte Reload + str r6, [sp, #12] @ 4-byte Spill + str r7, [sp, #16] @ 4-byte Spill + sbcs r8, r12, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r8, [sp, #20] @ 4-byte Spill + sbcs r12, lr, r1 + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r5, r4, r5 + str r12, [sp, #32] @ 4-byte Spill + str r5, [sp, #36] @ 4-byte Spill + sbcs lr, r11, r1 + ldr r1, [sp, #24] @ 4-byte Reload + ldr r11, [r3, #16] + str lr, [sp, #40] @ 4-byte Spill + sbcs r4, r10, r1 + ldr r1, [sp, #28] @ 4-byte Reload + ldr r10, [r3, #20] + str r4, [sp, #24] @ 4-byte Spill + sbcs r9, r9, r1 + ldr r1, [sp, #48] @ 4-byte Reload + sbc r1, r2, r1 + ldr r2, [r3, #24] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [r3, #4] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3, #8] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #12] + str r1, [sp] @ 4-byte Spill + ldr r1, [r3, #28] + ldr r3, [r3] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adds r3, r6, r3 + adcs r6, r7, r1 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r7, r8, r1 + ldr r1, [sp] @ 4-byte Reload + adcs r1, r12, r1 + adcs r12, r5, r11 + adcs r5, lr, r10 + ldr r10, [sp, #12] @ 4-byte Reload + adcs lr, r4, r2 + ldr r2, [sp, #28] @ 4-byte Reload + ldr r4, [sp, #48] @ 4-byte Reload + adcs r8, r9, r2 + ldr r2, [sp, #44] @ 4-byte Reload + adc r11, r4, r2 + asr r2, r4, #31 + cmp r2, #0 + movge r3, r10 + str r3, [r0] + ldr r3, [sp, #16] @ 4-byte Reload + movge r6, r3 + ldr r3, [sp, #20] @ 4-byte Reload + str r6, [r0, #4] + movge r7, r3 + ldr r3, [sp, #32] @ 4-byte Reload + cmp r2, #0 + str r7, [r0, #8] + movge r1, r3 + str r1, [r0, #12] + ldr r1, [sp, #36] @ 4-byte Reload + movge r12, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r12, [r0, #16] + movge r5, r1 + ldr r1, [sp, #24] @ 4-byte Reload + cmp r2, #0 + movge r8, r9 + movge r11, r4 + str r5, [r0, #20] + movge lr, r1 + str lr, [r0, #24] + str r8, [r0, #28] + str r11, [r0, #32] + add sp, sp, #52 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end139: + .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L + .cantunwind + .fnend + + .globl mcl_fpDbl_add9L + .align 2 + .type mcl_fpDbl_add9L,%function +mcl_fpDbl_add9L: @ @mcl_fpDbl_add9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #88 + sub sp, sp, #88 + ldm r1, {r7, r9} + ldr r8, [r1, #8] + ldr lr, [r1, #12] + ldm r2, {r4, r5, r6, r12} + add r11, r2, #16 + adds r4, r4, r7 + ldr r7, [r2, #28] + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #64] + str r7, [sp, #24] @ 4-byte Spill + str r4, [sp, #76] @ 4-byte Spill + ldr r4, [r2, #68] + str r4, [sp, #80] @ 4-byte Spill + adcs r4, r5, r9 + str r4, [sp, #32] @ 4-byte Spill + adcs r4, r6, r8 + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r2, #32] + adcs r9, r12, lr + add lr, r1, #16 + str r4, [sp, #48] @ 4-byte Spill + ldr r4, [r2, #36] + str r4, [sp, #52] @ 4-byte Spill + ldr r4, [r2, #40] + str r4, [sp, #56] @ 4-byte Spill + ldr r4, [r2, #44] + str r4, [sp, #60] @ 4-byte Spill + ldr r4, [r2, #48] + str r4, [sp, #64] @ 4-byte Spill + ldr r4, [r2, #52] + str r4, [sp, #68] @ 4-byte Spill + ldr r4, [r2, #56] + str r4, [sp, #72] @ 4-byte Spill + ldr r4, [r2, #60] + str r4, [sp, #84] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r2, [r1, #64] + ldr r8, [r1, #32] + ldr r4, [r1, #36] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #20] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r10, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #32] @ 4-byte Reload + str r10, [r0] + str r7, [r0, #4] + ldr r7, [sp, #28] @ 4-byte Reload + adcs r1, r5, r1 + adcs r2, r6, r2 + str r7, [r0, #8] + str r9, [r0, #12] + str r1, [r0, #16] + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r11, r12 + str r1, [r0, #24] + ldr r1, [sp, #48] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + ldr r2, [sp, #52] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [r0, #32] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r4, r2, r4 + ldr r2, [sp] @ 4-byte Reload + adcs r5, r1, r2 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + str r5, [sp, #56] @ 4-byte Spill + adcs lr, r1, r2 + ldr r1, [sp, #64] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str lr, [sp, #60] @ 4-byte Spill + adcs r12, r1, r2 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str r12, [sp, #64] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #72] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r7, [sp, #68] @ 4-byte Spill + adcs r8, r1, r2 + ldr r1, [sp, #84] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + str r8, [sp, #72] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r6, r1, r2 + ldr r1, [sp, #80] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + adcs r9, r1, r2 + mov r2, #0 + adc r1, r2, #0 + str r9, [sp, #76] @ 4-byte Spill + str r1, [sp, #80] @ 4-byte Spill + ldmib r3, {r2, r11} + ldr r1, [r3, #12] + ldr r10, [r3] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [r3, #16] + subs r10, r4, r10 + sbcs r2, r5, r2 + sbcs r11, lr, r11 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #20] + ldr r5, [sp, #40] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [r3, #24] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [r3, #28] + ldr r3, [r3, #32] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r1, r12, r1 + sbcs r12, r7, r5 + ldr r7, [sp, #44] @ 4-byte Reload + ldr r5, [sp, #84] @ 4-byte Reload + sbcs lr, r8, r7 + ldr r7, [sp, #48] @ 4-byte Reload + mov r8, r6 + sbcs r7, r5, r7 + ldr r5, [sp, #52] @ 4-byte Reload + sbcs r5, r6, r5 + sbcs r6, r9, r3 + ldr r3, [sp, #80] @ 4-byte Reload + sbc r9, r3, #0 + ldr r3, [sp, #56] @ 4-byte Reload + ands r9, r9, #1 + movne r10, r4 + str r10, [r0, #36] + movne r2, r3 + str r2, [r0, #40] + ldr r2, [sp, #60] @ 4-byte Reload + movne r11, r2 + ldr r2, [sp, #64] @ 4-byte Reload + cmp r9, #0 + str r11, [r0, #44] + movne r1, r2 + str r1, [r0, #48] + ldr r1, [sp, #68] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r12, [r0, #52] + movne lr, r1 + ldr r1, [sp, #84] @ 4-byte Reload + cmp r9, #0 + movne r5, r8 + str lr, [r0, #56] + movne r7, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r7, [r0, #60] + str r5, [r0, #64] + movne r6, r1 + str r6, [r0, #68] + add sp, sp, #88 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end140: + .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub9L + .align 2 + .type mcl_fpDbl_sub9L,%function +mcl_fpDbl_sub9L: @ @mcl_fpDbl_sub9L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #80 + sub sp, sp, #80 + ldr r7, [r2, #64] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #44] @ 4-byte Spill + ldm r2, {r5, r6, r7, r8} + ldr r4, [r2, #16] + ldr r10, [r2, #24] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r2, #20] + ldr r2, [r2, #28] + str r4, [sp, #24] @ 4-byte Spill + str r2, [sp, #32] @ 4-byte Spill + ldm r1, {r2, r12, lr} + ldr r4, [r1, #12] + ldr r11, [r1, #60] + subs r9, r2, r5 + ldr r2, [r1, #64] + sbcs r5, r12, r6 + sbcs r6, lr, r7 + add lr, r1, #16 + ldr r7, [r1, #36] + sbcs r4, r4, r8 + ldr r8, [r1, #32] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + str r9, [r0] + stmib r0, {r5, r6} + str r4, [r0, #12] + ldr r5, [sp, #20] @ 4-byte Reload + ldr r4, [sp, #24] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #16] @ 4-byte Reload + sbcs r2, r2, r4 + str r1, [r0, #16] + str r2, [r0, #20] + ldr r2, [sp, #32] @ 4-byte Reload + sbcs r1, r12, r10 + str r1, [r0, #24] + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + ldr r2, [sp, #44] @ 4-byte Reload + sbcs r1, r8, r1 + str r1, [r0, #32] + sbcs r1, r7, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + sbcs r4, r7, r2 + ldr r2, [sp, #40] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + sbcs r9, r7, r2 + ldr r2, [sp, #56] @ 4-byte Reload + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r12, r7, r2 + ldr r2, [sp, #60] @ 4-byte Reload + ldr r7, [sp, #12] @ 4-byte Reload + str r12, [sp, #56] @ 4-byte Spill + sbcs lr, r7, r2 + ldr r2, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #36] @ 4-byte Reload + str lr, [sp, #60] @ 4-byte Spill + sbcs r10, r5, r2 + ldr r2, [sp, #68] @ 4-byte Reload + ldr r5, [sp, #28] @ 4-byte Reload + str r10, [sp, #64] @ 4-byte Spill + sbcs r6, r11, r2 + ldr r2, [sp, #72] @ 4-byte Reload + str r6, [sp, #68] @ 4-byte Spill + sbcs r8, r7, r2 + ldr r2, [sp, #76] @ 4-byte Reload + str r8, [sp, #44] @ 4-byte Spill + sbcs r11, r5, r2 + mov r2, #0 + sbc r2, r2, #0 + str r11, [sp, #76] @ 4-byte Spill + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r3, #32] + str r2, [sp, #52] @ 4-byte Spill + ldmib r3, {r5, r7} + ldr r2, [r3, #12] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r3, #16] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r3, #28] + ldr r3, [r3] + adds r3, r1, r3 + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adcs r5, r4, r5 + adcs r1, r9, r7 + ldr r7, [sp, #32] @ 4-byte Reload + adcs r2, r12, r2 + adcs r12, lr, r7 + ldr r7, [sp, #28] @ 4-byte Reload + adcs lr, r10, r7 + ldr r7, [sp, #36] @ 4-byte Reload + adcs r10, r6, r7 + ldr r6, [sp, #40] @ 4-byte Reload + ldr r7, [sp, #52] @ 4-byte Reload + adcs r6, r8, r6 + adc r11, r11, r7 + ldr r7, [sp, #72] @ 4-byte Reload + ands r8, r7, #1 + ldr r7, [sp, #48] @ 4-byte Reload + moveq r5, r4 + moveq r1, r9 + moveq r3, r7 + cmp r8, #0 + str r3, [r0, #36] + str r5, [r0, #40] + str r1, [r0, #44] + ldr r1, [sp, #56] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r2, [r0, #48] + moveq r12, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r12, [r0, #52] + moveq lr, r1 + ldr r1, [sp, #68] @ 4-byte Reload + cmp r8, #0 + str lr, [r0, #56] + moveq r10, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r10, [r0, #60] + moveq r6, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r6, [r0, #64] + moveq r11, r1 + str r11, [r0, #68] + add sp, sp, #80 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end141: + .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L + .cantunwind + .fnend + + .align 2 + .type .LmulPv320x32,%function +.LmulPv320x32: @ @mulPv320x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r1, [r1, #36] + umull r3, r7, r1, r2 + adcs r1, r6, r3 + str r1, [r0, #36] + adc r1, r7, #0 + str r1, [r0, #40] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end142: + .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre10L + .align 2 + .type mcl_fp_mulUnitPre10L,%function +mcl_fp_mulUnitPre10L: @ @mcl_fp_mulUnitPre10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + .pad #48 + sub sp, sp, #48 + mov r4, r0 + mov r0, sp + bl .LmulPv320x32(PLT) + ldr r12, [sp, #40] + ldr lr, [sp, #36] + ldr r8, [sp, #32] + ldr r9, [sp, #28] + ldr r0, [sp, #24] + ldr r1, [sp, #20] + ldm sp, {r6, r7} + add r5, sp, #8 + ldm r5, {r2, r3, r5} + stm r4, {r6, r7} + add r6, r4, #8 + stm r6, {r2, r3, r5} + str r1, [r4, #20] + str r0, [r4, #24] + str r9, [r4, #28] + str r8, [r4, #32] + str lr, [r4, #36] + str r12, [r4, #40] + add sp, sp, #48 + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end143: + .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre10L + .align 2 + .type mcl_fpDbl_mulPre10L,%function +mcl_fpDbl_mulPre10L: @ @mcl_fpDbl_mulPre10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #156 + sub sp, sp, #156 + mov r6, r2 + mov r5, r1 + mov r4, r0 + bl mcl_fpDbl_mulPre5L(PLT) + add r0, r4, #40 + add r1, r5, #20 + add r2, r6, #20 + bl mcl_fpDbl_mulPre5L(PLT) + add r11, r6, #24 + ldr r7, [r6, #12] + ldr r8, [r6, #16] + ldr r1, [r6, #20] + ldm r11, {r0, r2, r10, r11} + ldm r6, {r6, r9, r12} + adds lr, r6, r1 + adcs r3, r9, r0 + mov r0, #0 + str lr, [sp, #72] @ 4-byte Spill + adcs r2, r12, r2 + str r3, [sp, #68] @ 4-byte Spill + adcs r12, r7, r10 + str r2, [sp, #64] @ 4-byte Spill + adcs r10, r8, r11 + str r12, [sp, #60] @ 4-byte Spill + adc r6, r0, #0 + ldr r0, [r5, #32] + str r10, [sp, #56] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r5, #36] + str r0, [sp, #52] @ 4-byte Spill + ldmib r5, {r8, r9, r11} + ldr r0, [r5, #16] + ldr r7, [r5, #20] + ldr r1, [r5, #28] + str lr, [sp, #76] + str r3, [sp, #80] + str r2, [sp, #84] + str r12, [sp, #88] + str r10, [sp, #92] + add r2, sp, #76 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r5, #24] + ldr r5, [r5] + adds r5, r5, r7 + adcs r7, r8, r0 + ldr r0, [sp, #48] @ 4-byte Reload + str r5, [sp, #96] + adcs r9, r9, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r7, [sp, #100] + str r9, [sp, #104] + adcs r11, r11, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r11, [sp, #108] + adcs r8, r1, r0 + mov r0, #0 + add r1, sp, #96 + adc r10, r0, #0 + add r0, sp, #116 + str r8, [sp, #112] + bl mcl_fpDbl_mulPre5L(PLT) + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + cmp r6, #0 + ldr r2, [sp, #64] @ 4-byte Reload + ldr r3, [sp, #60] @ 4-byte Reload + moveq r5, r6 + moveq r8, r6 + moveq r11, r6 + moveq r9, r6 + moveq r7, r6 + str r5, [sp, #52] @ 4-byte Spill + adds r0, r5, r0 + ldr r5, [sp, #56] @ 4-byte Reload + adcs r1, r7, r1 + adcs r2, r9, r2 + adcs r3, r11, r3 + adcs r12, r8, r5 + mov r5, #0 + adc lr, r5, #0 + cmp r10, #0 + ldr r5, [sp, #52] @ 4-byte Reload + moveq r1, r7 + ldr r7, [sp, #136] + moveq r3, r11 + moveq r2, r9 + moveq r12, r8 + moveq lr, r10 + cmp r10, #0 + moveq r0, r5 + and r5, r6, r10 + ldr r6, [sp, #152] + adds r8, r0, r7 + ldr r7, [sp, #140] + adcs r10, r1, r7 + ldr r7, [sp, #144] + adcs r11, r2, r7 + ldr r7, [sp, #148] + adcs r0, r3, r7 + adcs r12, r12, r6 + str r0, [sp, #60] @ 4-byte Spill + adc r9, lr, r5 + ldm r4, {r5, r6, r7, lr} + ldr r1, [sp, #116] + ldr r2, [sp, #120] + ldr r0, [sp, #124] + ldr r3, [sp, #128] + subs r1, r1, r5 + sbcs r2, r2, r6 + ldr r6, [sp, #132] + sbcs r0, r0, r7 + ldr r7, [r4, #16] + sbcs lr, r3, lr + ldr r3, [r4, #20] + sbcs r5, r6, r7 + ldr r6, [r4, #32] + ldr r7, [r4, #52] + str r3, [sp, #72] @ 4-byte Spill + sbcs r3, r8, r3 + ldr r8, [r4, #56] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r4, #24] + str r6, [sp, #28] @ 4-byte Spill + str r3, [sp, #68] @ 4-byte Spill + sbcs r3, r10, r3 + ldr r10, [r4, #44] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r4, #28] + str r3, [sp, #64] @ 4-byte Spill + sbcs r3, r11, r3 + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [sp, #60] @ 4-byte Reload + sbcs r3, r3, r6 + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r4, #36] + str r3, [sp, #60] @ 4-byte Spill + sbcs r3, r12, r3 + ldr r12, [r4, #64] + str r3, [sp, #40] @ 4-byte Spill + sbc r3, r9, #0 + ldr r9, [r4, #40] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r4, #76] + subs r1, r1, r9 + sbcs r2, r2, r10 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r4, #48] + ldr r11, [sp, #32] @ 4-byte Reload + sbcs r0, r0, r2 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r4, #72] + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, lr, r7 + ldr lr, [r4, #68] + str r0, [sp, #16] @ 4-byte Spill + sbcs r0, r5, r8 + ldr r5, [r4, #60] + ldr r6, [sp, #24] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r0, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r12 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbcs r0, r0, lr + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + sbcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [r4, #20] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r1, r11 + adcs r0, r0, r6 + str r1, [r4, #24] + ldr r1, [sp, #28] @ 4-byte Reload + ldr r6, [sp, #16] @ 4-byte Reload + str r0, [r4, #28] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #12] @ 4-byte Reload + str r1, [r4, #32] + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #8] @ 4-byte Reload + str r0, [r4, #36] + adcs r1, r9, r1 + ldr r0, [sp, #4] @ 4-byte Reload + str r1, [r4, #40] + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r10, r0 + adcs r1, r1, r6 + str r0, [r4, #44] + ldr r0, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #52] @ 4-byte Reload + str r1, [r4, #48] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + adcs r1, r8, r1 + adcs r5, r5, r6 + adcs r7, r12, #0 + add r12, r4, #52 + adcs r6, lr, #0 + stm r12, {r0, r1, r5, r7} + adcs r2, r2, #0 + str r6, [r4, #68] + adc r3, r3, #0 + str r2, [r4, #72] + str r3, [r4, #76] + add sp, sp, #156 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end144: + .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre10L + .align 2 + .type mcl_fpDbl_sqrPre10L,%function +mcl_fpDbl_sqrPre10L: @ @mcl_fpDbl_sqrPre10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #156 + sub sp, sp, #156 + mov r5, r1 + mov r4, r0 + mov r2, r5 + bl mcl_fpDbl_mulPre5L(PLT) + add r1, r5, #20 + add r0, r4, #40 + mov r2, r1 + bl mcl_fpDbl_mulPre5L(PLT) + ldr lr, [r5, #32] + ldr r12, [r5, #36] + ldmib r5, {r2, r3, r6, r8} + ldr r0, [r5, #20] + ldr r7, [r5, #24] + ldr r1, [r5, #28] + ldr r5, [r5] + adds r5, r5, r0 + adcs r0, r2, r7 + str r5, [sp, #96] + str r5, [sp, #76] + adcs r1, r3, r1 + add r3, sp, #80 + str r0, [sp, #100] + adcs r2, r6, lr + str r1, [sp, #104] + adcs r6, r8, r12 + str r2, [sp, #108] + str r6, [sp, #112] + stm r3, {r0, r1, r2, r6} + lsr r3, r2, #31 + orr r3, r3, r6, lsl #1 + str r3, [sp, #72] @ 4-byte Spill + lsr r3, r1, #31 + lsl r1, r1, #1 + orr r1, r1, r0, lsr #31 + orr r2, r3, r2, lsl #1 + str r1, [sp, #64] @ 4-byte Spill + lsr r1, r5, #31 + str r2, [sp, #68] @ 4-byte Spill + add r2, sp, #76 + orr r11, r1, r0, lsl #1 + mov r0, #0 + add r1, sp, #96 + adc r7, r0, #0 + add r0, sp, #116 + bl mcl_fpDbl_mulPre5L(PLT) + ldr r10, [sp, #136] + ldr r9, [sp, #140] + ldr r8, [sp, #144] + ldr r0, [sp, #64] @ 4-byte Reload + ldr r2, [sp, #148] + ldr r1, [sp, #152] + adds r3, r10, r5, lsl #1 + adcs r5, r9, r11 + adcs r12, r8, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs lr, r2, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r1, r0 + adc r6, r7, r6, lsr #31 + cmp r7, #0 + moveq lr, r2 + moveq r12, r8 + moveq r11, r1 + moveq r6, r7 + moveq r5, r9 + cmp r7, #0 + add r7, sp, #116 + moveq r3, r10 + ldm r4, {r9, r10} + ldr r0, [r4, #8] + ldr r8, [r4, #12] + str r0, [sp, #72] @ 4-byte Spill + ldm r7, {r1, r2, r7} + ldr r0, [sp, #128] + subs r1, r1, r9 + ldr r9, [r4, #40] + sbcs r2, r2, r10 + ldr r10, [r4, #44] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #72] @ 4-byte Reload + sbcs r7, r7, r2 + ldr r2, [r4, #48] + str r7, [sp, #44] @ 4-byte Spill + sbcs r8, r0, r8 + ldr r0, [r4, #16] + ldr r7, [sp, #132] + str r2, [sp, #16] @ 4-byte Spill + sbcs r0, r7, r0 + ldr r7, [r4, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r4, #20] + sbcs r3, r3, r0 + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r4, #24] + str r3, [sp, #72] @ 4-byte Spill + sbcs r3, r5, r3 + ldr r5, [r4, #60] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r4, #28] + str r3, [sp, #68] @ 4-byte Spill + sbcs r3, r12, r3 + ldr r12, [r4, #64] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r4, #32] + str r3, [sp, #64] @ 4-byte Spill + sbcs r3, lr, r3 + ldr lr, [r4, #68] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r4, #36] + str r3, [sp, #60] @ 4-byte Spill + sbcs r3, r11, r3 + str r3, [sp, #32] @ 4-byte Spill + sbc r3, r6, #0 + subs r1, r1, r9 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #20] @ 4-byte Reload + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r4, #76] + sbcs r1, r1, r10 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + ldr r11, [sp, #20] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [r4, #72] + str r1, [sp, #44] @ 4-byte Spill + sbcs r1, r8, r7 + ldr r8, [r4, #56] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + ldr r6, [sp, #44] @ 4-byte Reload + sbcs r1, r1, r8 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r1, r1, r5 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r1, r1, r12 + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r1, r1, lr + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r1, r1, r2 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + sbcs r1, r1, r3 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + sbc r1, r1, #0 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [r4, #20] + ldr r0, [sp, #68] @ 4-byte Reload + adcs r1, r1, r11 + adcs r0, r0, r6 + str r1, [r4, #24] + ldr r1, [sp, #64] @ 4-byte Reload + ldr r6, [sp, #12] @ 4-byte Reload + str r0, [r4, #28] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #40] @ 4-byte Reload + str r1, [r4, #32] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #8] @ 4-byte Reload + str r0, [r4, #36] + adcs r1, r9, r1 + ldr r0, [sp, #4] @ 4-byte Reload + str r1, [r4, #40] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r10, r0 + adcs r1, r1, r6 + str r0, [r4, #44] + ldr r0, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #52] @ 4-byte Reload + str r1, [r4, #48] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + adcs r1, r8, r1 + adcs r5, r5, r6 + adcs r7, r12, #0 + add r12, r4, #52 + adcs r6, lr, #0 + stm r12, {r0, r1, r5, r7} + adcs r2, r2, #0 + str r6, [r4, #68] + adc r3, r3, #0 + str r2, [r4, #72] + str r3, [r4, #76] + add sp, sp, #156 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end145: + .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L + .cantunwind + .fnend + + .globl mcl_fp_mont10L + .align 2 + .type mcl_fp_mont10L,%function +mcl_fp_mont10L: @ @mcl_fp_mont10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + .pad #1024 + sub sp, sp, #1024 + mov r7, r2 + ldr r5, [r3, #-4] + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #1000 + str r3, [sp, #84] @ 4-byte Spill + str r1, [sp, #76] @ 4-byte Spill + mov r4, r3 + mov r6, r1 + ldr r2, [r7] + str r7, [sp, #72] @ 4-byte Spill + str r5, [sp, #80] @ 4-byte Spill + bl .LmulPv320x32(PLT) + ldr r0, [sp, #1004] + ldr r10, [sp, #1000] + mov r1, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1008] + mul r2, r10, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1012] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #952 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #992] + ldr r2, [r7, #4] + ldr r9, [sp, #968] + ldr r8, [sp, #952] + ldr r11, [sp, #956] + ldr r5, [sp, #960] + ldr r4, [sp, #964] + mov r1, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #976] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #972] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #904 + bl .LmulPv320x32(PLT) + ldr r1, [sp, #52] @ 4-byte Reload + adds r0, r8, r10 + ldr r2, [sp, #4] @ 4-byte Reload + add lr, sp, #908 + ldr r10, [sp, #944] + mov r0, #0 + adcs r1, r11, r1 + add r11, sp, #932 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r5, r1 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r4, r1 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + ldm r11, {r5, r6, r11} + ldr r4, [sp, #904] + adcs r8, r2, r1 + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #52] @ 4-byte Reload + adds r4, r7, r4 + ldr r7, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #856 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #896] + add r11, sp, #856 + ldr r6, [sp, #880] + ldr r7, [sp, #876] + ldr r5, [sp, #872] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #888] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #884] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #808 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #808 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #848] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #832 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r11} + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #760 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #800] + add r11, sp, #760 + ldr r6, [sp, #784] + ldr r4, [sp, #780] + ldr r5, [sp, #776] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #712 + bl .LmulPv320x32(PLT) + adds r0, r7, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #716 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #752] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #740 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r5, r6, r11} + ldr r4, [sp, #712] + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r4, r7, r4 + ldr r7, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #664 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #704] + add r11, sp, #664 + ldr r6, [sp, #688] + ldr r7, [sp, #684] + ldr r5, [sp, #680] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #616 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #616 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #656] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #640 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r11} + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #568 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #608] + add r11, sp, #568 + ldr r6, [sp, #592] + ldr r4, [sp, #588] + ldr r5, [sp, #584] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #520 + bl .LmulPv320x32(PLT) + adds r0, r7, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #524 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #560] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #548 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r5, r6, r11} + ldr r4, [sp, #520] + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r4, r7, r4 + ldr r7, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #472 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #512] + add r11, sp, #472 + ldr r6, [sp, #496] + ldr r7, [sp, #492] + ldr r5, [sp, #488] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #424 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #424 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #464] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #448 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r11} + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #376 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #416] + add r11, sp, #376 + ldr r6, [sp, #400] + ldr r4, [sp, #396] + ldr r5, [sp, #392] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #328 + bl .LmulPv320x32(PLT) + adds r0, r7, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #332 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #368] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #356 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r5, r6, r11} + ldr r4, [sp, #328] + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r4, r7, r4 + ldr r7, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #280 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #320] + add r11, sp, #280 + ldr r6, [sp, #304] + ldr r7, [sp, #300] + ldr r5, [sp, #296] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #316] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #312] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #308] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #232 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #232 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #272] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #256 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r11} + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r11 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #28] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #184 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #224] + add r11, sp, #184 + ldr r6, [sp, #208] + ldr r4, [sp, #204] + ldr r5, [sp, #200] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #216] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #212] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #136 + bl .LmulPv320x32(PLT) + adds r0, r7, r8 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #136 + add r7, sp, #152 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + add r9, sp, #164 + adcs r10, r1, r10 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r11, r1, r11 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #44] @ 4-byte Spill + ldm lr, {r2, r6, r12, lr} + ldr r8, [sp, #176] + adds r4, r0, r2 + ldr r0, [sp, #80] @ 4-byte Reload + ldm r9, {r3, r5, r9} + adcs r6, r10, r6 + mul r2, r4, r0 + ldm r7, {r0, r1, r7} + str r6, [sp, #40] @ 4-byte Spill + adcs r6, r11, r12 + ldr r11, [sp, #84] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + ldr r6, [sp, #76] @ 4-byte Reload + adcs r10, r6, lr + ldr r6, [sp, #72] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + mov r1, r11 + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r7, r0, r8 + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #88 + bl .LmulPv320x32(PLT) + add r3, sp, #88 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r4, r0, r2 + ldr r2, [sp, #104] + adcs r0, r10, r3 + str r4, [sp, #40] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #108] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r5, r6, r2 + ldr r2, [sp, #112] + str r5, [sp, #48] @ 4-byte Spill + adcs r12, r0, r2 + ldr r2, [sp, #116] + ldr r0, [sp, #64] @ 4-byte Reload + str r12, [sp, #52] @ 4-byte Spill + adcs lr, r0, r2 + ldr r2, [sp, #120] + ldr r0, [sp, #76] @ 4-byte Reload + str lr, [sp, #60] @ 4-byte Spill + adcs r0, r0, r2 + ldr r2, [sp, #124] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #128] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r9, r7, r2 + adc r0, r0, #0 + str r9, [sp, #64] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + mov r0, r11 + ldr r2, [r0, #16] + ldr r10, [r0] + ldr r3, [r0, #4] + ldr r1, [r0, #8] + ldr r6, [r0, #12] + ldr r7, [r0, #24] + ldr r11, [r0, #32] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r0, #20] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r0, #28] + ldr r0, [r0, #36] + str r2, [sp, #36] @ 4-byte Spill + mov r2, r8 + ldr r8, [sp, #56] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + subs r10, r2, r10 + sbcs r3, r4, r3 + ldr r4, [sp, #80] @ 4-byte Reload + sbcs r1, r8, r1 + sbcs r6, r4, r6 + sbcs r4, r5, r0 + ldr r0, [sp, #32] @ 4-byte Reload + sbcs r5, r12, r0 + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r12, lr, r7 + ldr r7, [sp, #36] @ 4-byte Reload + sbcs lr, r0, r7 + ldr r0, [sp, #72] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + sbcs r11, r0, r11 + ldr r0, [sp, #84] @ 4-byte Reload + sbcs r0, r9, r0 + ldr r9, [sp, #68] @ 4-byte Reload + sbc r7, r7, #0 + ands r7, r7, #1 + movne r10, r2 + ldr r2, [sp, #40] @ 4-byte Reload + movne r1, r8 + str r10, [r9] + movne r3, r2 + cmp r7, #0 + str r3, [r9, #4] + str r1, [r9, #8] + ldr r1, [sp, #80] @ 4-byte Reload + movne r6, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r6, [r9, #12] + movne r4, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r4, [r9, #16] + movne r5, r1 + ldr r1, [sp, #60] @ 4-byte Reload + cmp r7, #0 + str r5, [r9, #20] + movne r12, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r12, [r9, #24] + movne lr, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str lr, [r9, #28] + movne r11, r1 + ldr r1, [sp, #64] @ 4-byte Reload + cmp r7, #0 + str r11, [r9, #32] + movne r0, r1 + str r0, [r9, #36] + add sp, sp, #28 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end146: + .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L + .cantunwind + .fnend + + .globl mcl_fp_montNF10L + .align 2 + .type mcl_fp_montNF10L,%function +mcl_fp_montNF10L: @ @mcl_fp_montNF10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + .pad #1024 + sub sp, sp, #1024 + mov r7, r2 + ldr r5, [r3, #-4] + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #1000 + str r3, [sp, #84] @ 4-byte Spill + str r1, [sp, #76] @ 4-byte Spill + mov r4, r3 + mov r6, r1 + ldr r2, [r7] + str r7, [sp, #72] @ 4-byte Spill + str r5, [sp, #80] @ 4-byte Spill + bl .LmulPv320x32(PLT) + ldr r0, [sp, #1004] + ldr r10, [sp, #1000] + mov r1, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1008] + mul r2, r10, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1012] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #952 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #992] + ldr r2, [r7, #4] + ldr r9, [sp, #968] + ldr r8, [sp, #952] + ldr r11, [sp, #956] + ldr r5, [sp, #960] + ldr r4, [sp, #964] + mov r1, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #976] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #972] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #904 + bl .LmulPv320x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #908 + ldr r10, [sp, #940] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #936] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #932] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #904] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #944] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r8, r1, r0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #52] @ 4-byte Reload + adds r4, r6, r4 + ldr r6, [sp, #48] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #856 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #896] + add r11, sp, #856 + ldr r6, [sp, #880] + ldr r7, [sp, #876] + ldr r5, [sp, #872] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #888] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #884] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #808 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #808 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #848] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #844] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #832 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r4, r5, r11} + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r6, r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #760 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #800] + add r11, sp, #760 + ldr r5, [sp, #784] + ldr r7, [sp, #780] + ldr r4, [sp, #776] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #712 + bl .LmulPv320x32(PLT) + adds r0, r6, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #716 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #752] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #748] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #744] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #712] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #740] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r4, r6, r4 + ldr r6, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #664 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #704] + add r11, sp, #664 + ldr r6, [sp, #688] + ldr r7, [sp, #684] + ldr r5, [sp, #680] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #616 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #616 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #656] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #652] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #640 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r4, r5, r11} + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r6, r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #568 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #608] + add r11, sp, #568 + ldr r5, [sp, #592] + ldr r7, [sp, #588] + ldr r4, [sp, #584] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #520 + bl .LmulPv320x32(PLT) + adds r0, r6, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #524 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #560] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #556] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #552] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #520] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #548] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r4, r6, r4 + ldr r6, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #472 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #512] + add r11, sp, #472 + ldr r6, [sp, #496] + ldr r7, [sp, #492] + ldr r5, [sp, #488] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #424 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #424 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #464] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #460] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #448 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r4, r5, r11} + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r6, r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #376 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #416] + add r11, sp, #376 + ldr r5, [sp, #400] + ldr r7, [sp, #396] + ldr r4, [sp, #392] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #328 + bl .LmulPv320x32(PLT) + adds r0, r6, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #332 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #368] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #364] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #360] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #328] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #356] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r4, r6, r4 + ldr r6, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #280 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #320] + add r11, sp, #280 + ldr r6, [sp, #304] + ldr r7, [sp, #300] + ldr r5, [sp, #296] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #316] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #312] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #308] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #232 + bl .LmulPv320x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #232 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #272] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #268] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #256 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r7, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r4, r5, r11} + adc r8, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r6, [sp, #64] @ 4-byte Reload + adds r6, r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + adcs r0, r7, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r8, r10 + str r0, [sp, #32] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #184 + bl .LmulPv320x32(PLT) + ldr r0, [sp, #224] + add r11, sp, #184 + ldr r5, [sp, #208] + ldr r7, [sp, #204] + ldr r4, [sp, #200] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #216] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #212] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #136 + bl .LmulPv320x32(PLT) + adds r0, r6, r8 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + ldr lr, [sp, #140] + ldr r6, [sp, #144] + add r8, sp, #152 + ldr r12, [sp, #148] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + adcs r9, r1, r10 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r10, r1, r11 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #136] + str r1, [sp, #48] @ 4-byte Spill + adds r4, r0, r2 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r9, r9, lr + adcs r11, r10, r6 + mul r1, r4, r0 + str r1, [sp, #44] @ 4-byte Spill + ldm r8, {r0, r1, r2, r3, r5, r7, r8} + ldr r6, [sp, #76] @ 4-byte Reload + adcs r10, r6, r12 + ldr r6, [sp, #72] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #84] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #88 + adc r8, r8, #0 + bl .LmulPv320x32(PLT) + add r3, sp, #88 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + adcs r7, r9, r1 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r9, r11, r2 + ldr r2, [sp, #104] + str r7, [sp, #48] @ 4-byte Spill + adcs lr, r10, r3 + str lr, [sp, #52] @ 4-byte Spill + adcs r6, r0, r2 + ldr r2, [sp, #108] + ldr r0, [sp, #80] @ 4-byte Reload + str r6, [sp, #56] @ 4-byte Spill + adcs r0, r0, r2 + ldr r2, [sp, #112] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r4, r0, r2 + ldr r2, [sp, #116] + ldr r0, [sp, #64] @ 4-byte Reload + str r4, [sp, #60] @ 4-byte Spill + adcs r12, r0, r2 + ldr r2, [sp, #120] + ldr r0, [sp, #76] @ 4-byte Reload + str r12, [sp, #64] @ 4-byte Spill + adcs r0, r0, r2 + ldr r2, [sp, #124] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r2 + ldr r2, [sp, #128] + mov r0, r5 + str r11, [sp, #72] @ 4-byte Spill + adc r1, r8, r2 + str r1, [sp, #44] @ 4-byte Spill + ldmib r0, {r2, r8} + ldr r5, [r0, #16] + ldr r10, [r0] + ldr r3, [r0, #12] + str r5, [sp, #28] @ 4-byte Spill + ldr r5, [r0, #20] + subs r10, r7, r10 + str r5, [sp, #32] @ 4-byte Spill + ldr r5, [r0, #24] + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [r0, #28] + str r5, [sp, #40] @ 4-byte Spill + mov r5, r0 + sbcs r0, r9, r2 + sbcs r2, lr, r8 + ldr r8, [r5, #32] + sbcs r7, r6, r3 + ldr r3, [r5, #36] + ldr r6, [sp, #80] @ 4-byte Reload + ldr r5, [sp, #76] @ 4-byte Reload + str r3, [sp, #84] @ 4-byte Spill + ldr r3, [sp, #28] @ 4-byte Reload + sbcs r6, r6, r3 + ldr r3, [sp, #32] @ 4-byte Reload + sbcs lr, r4, r3 + ldr r3, [sp, #36] @ 4-byte Reload + sbcs r4, r12, r3 + ldr r3, [sp, #40] @ 4-byte Reload + sbcs r12, r5, r3 + ldr r3, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #48] @ 4-byte Reload + sbcs r11, r11, r8 + ldr r8, [sp, #68] @ 4-byte Reload + sbc r3, r1, r3 + asr r1, r3, #31 + cmp r1, #0 + movlt r10, r5 + movlt r0, r9 + str r10, [r8] + str r0, [r8, #4] + ldr r0, [sp, #52] @ 4-byte Reload + movlt r2, r0 + ldr r0, [sp, #56] @ 4-byte Reload + cmp r1, #0 + str r2, [r8, #8] + movlt r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r7, [r8, #12] + movlt r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + str r6, [r8, #16] + movlt lr, r0 + ldr r0, [sp, #64] @ 4-byte Reload + cmp r1, #0 + str lr, [r8, #20] + movlt r4, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r4, [r8, #24] + movlt r12, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r12, [r8, #28] + movlt r11, r0 + ldr r0, [sp, #44] @ 4-byte Reload + cmp r1, #0 + str r11, [r8, #32] + movlt r3, r0 + str r3, [r8, #36] + add sp, sp, #28 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end147: + .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L + .cantunwind + .fnend + + .globl mcl_fp_montRed10L + .align 2 + .type mcl_fp_montRed10L,%function +mcl_fp_montRed10L: @ @mcl_fp_montRed10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #612 + sub sp, sp, #612 + mov r5, r2 + str r0, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r9, [r1] + ldr r11, [r1, #16] + ldr r0, [r5] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r5, #4] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r5, #8] + str r2, [sp, #52] @ 4-byte Spill + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r5, #12] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r5, #16] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r5, #20] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r5, #24] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r5, #-4] + str r0, [sp, #124] @ 4-byte Spill + mul r2, r9, r0 + ldr r0, [r5, #28] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r5, #32] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r5, #36] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #20] + mov r1, r5 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #560 + bl .LmulPv320x32(PLT) + add lr, sp, #584 + ldr r10, [sp, #600] + ldr r8, [sp, #596] + add r7, sp, #564 + ldm lr, {r6, r12, lr} + ldr r4, [sp, #560] + ldm r7, {r0, r1, r2, r3, r7} + adds r4, r9, r4 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + adcs r0, r11, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #512 + bl .LmulPv320x32(PLT) + add r6, sp, #512 + ldr r12, [sp, #552] + ldr lr, [sp, #548] + ldr r2, [sp, #544] + ldr r10, [sp, #540] + ldr r11, [sp, #536] + ldr r7, [sp, #532] + ldr r8, [sp, #528] + ldm r6, {r1, r3, r6} + ldr r0, [sp, #524] + adds r1, r4, r1 + ldr r4, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + adcs r9, r9, r3 + adcs r1, r1, r6 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r9, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #464 + bl .LmulPv320x32(PLT) + ldr r1, [sp, #464] + ldr r0, [sp, #504] + add r12, sp, #468 + ldr r10, [sp, #500] + ldr r8, [sp, #496] + ldr lr, [sp, #492] + ldr r6, [sp, #488] + ldr r7, [sp, #484] + adds r1, r9, r1 + str r0, [sp, #4] @ 4-byte Spill + ldm r12, {r0, r2, r3, r12} + ldr r1, [sp, #60] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #416 + bl .LmulPv320x32(PLT) + add r7, sp, #416 + ldr r12, [sp, #456] + ldr lr, [sp, #452] + ldr r2, [sp, #448] + ldr r3, [sp, #444] + add r10, sp, #428 + ldm r7, {r1, r6, r7} + ldm r10, {r0, r8, r9, r10} + adds r1, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r11, r1, r6 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #368 + bl .LmulPv320x32(PLT) + add r10, sp, #400 + add r12, sp, #372 + ldm r10, {r8, r9, r10} + ldr r1, [sp, #368] + ldr lr, [sp, #396] + ldr r6, [sp, #392] + ldr r7, [sp, #388] + ldm r12, {r0, r2, r3, r12} + adds r1, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #320 + bl .LmulPv320x32(PLT) + add r7, sp, #320 + ldr r12, [sp, #360] + ldr lr, [sp, #356] + ldr r2, [sp, #352] + ldr r3, [sp, #348] + add r10, sp, #332 + ldm r7, {r1, r6, r7} + ldm r10, {r0, r8, r9, r10} + adds r1, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r11, r1, r6 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #272 + bl .LmulPv320x32(PLT) + add r10, sp, #304 + add r12, sp, #276 + ldm r10, {r8, r9, r10} + ldr r1, [sp, #272] + ldr lr, [sp, #300] + ldr r6, [sp, #296] + ldr r7, [sp, #292] + ldm r12, {r0, r2, r3, r12} + adds r1, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r1, r5 + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #224 + bl .LmulPv320x32(PLT) + add r10, sp, #240 + add r6, sp, #224 + ldr r12, [sp, #264] + ldr lr, [sp, #260] + ldr r8, [sp, #256] + ldr r9, [sp, #252] + ldm r10, {r0, r7, r10} + ldm r6, {r1, r2, r3, r6} + adds r1, r11, r1 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r4, r1, r2 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r11, r1, r3 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #124] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + mul r2, r4, r7 + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r9, r0, r9 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #176 + bl .LmulPv320x32(PLT) + add r12, sp, #176 + ldm r12, {r0, r1, r3, r12} + ldr lr, [sp, #216] + adds r0, r4, r0 + ldr r4, [sp, #76] @ 4-byte Reload + adcs r10, r11, r1 + ldr r1, [sp, #192] + adcs r0, r6, r3 + mul r2, r10, r7 + ldr r7, [sp, #200] + ldr r6, [sp, #204] + ldr r3, [sp, #208] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r12 + ldr r12, [sp, #212] + str r0, [sp, #44] @ 4-byte Spill + adcs r8, r4, r1 + ldr r0, [sp, #196] + ldr r1, [sp, #72] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #68] @ 4-byte Reload + mov r1, r5 + adcs r9, r9, r7 + adcs r6, r0, r6 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #128 + bl .LmulPv320x32(PLT) + add r3, sp, #128 + ldm r3, {r0, r1, r2, r3} + adds r0, r10, r0 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r1, r0, r2 + ldr r0, [sp, #144] + adcs r2, r8, r3 + ldr r3, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + str r2, [sp, #44] @ 4-byte Spill + adcs r7, r11, r0 + ldr r0, [sp, #148] + str r7, [sp, #48] @ 4-byte Spill + adcs r12, r9, r0 + ldr r0, [sp, #152] + str r12, [sp, #52] @ 4-byte Spill + adcs r4, r6, r0 + ldr r0, [sp, #156] + str r4, [sp, #56] @ 4-byte Spill + adcs r5, r3, r0 + ldr r0, [sp, #160] + ldr r3, [sp, #68] @ 4-byte Reload + str r5, [sp, #60] @ 4-byte Spill + adcs r6, r3, r0 + ldr r0, [sp, #164] + ldr r3, [sp, #64] @ 4-byte Reload + str r6, [sp, #68] @ 4-byte Spill + adcs r8, r3, r0 + ldr r0, [sp, #168] + ldr r3, [sp, #76] @ 4-byte Reload + str r8, [sp, #124] @ 4-byte Spill + adcs lr, r3, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adc r11, r0, #0 + ldr r0, [sp, #116] @ 4-byte Reload + subs r3, r10, r0 + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #92] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #96] @ 4-byte Reload + sbcs r12, r12, r7 + ldr r7, [sp, #100] @ 4-byte Reload + sbcs r7, r4, r7 + ldr r4, [sp, #104] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #80] @ 4-byte Reload + sbcs r5, r6, r5 + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r9, r8, r6 + ldr r6, [sp, #88] @ 4-byte Reload + sbcs r8, lr, r6 + sbc r6, r11, #0 + ands r11, r6, #1 + ldr r6, [sp, #120] @ 4-byte Reload + movne r3, r10 + str r3, [r6] + ldr r3, [sp, #36] @ 4-byte Reload + movne r0, r3 + str r0, [r6, #4] + ldr r0, [sp, #44] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r11, #0 + str r1, [r6, #8] + movne r2, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r2, [r6, #12] + movne r12, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r12, [r6, #16] + movne r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r11, #0 + str r7, [r6, #20] + movne r4, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r4, [r6, #24] + movne r5, r0 + ldr r0, [sp, #124] @ 4-byte Reload + str r5, [r6, #28] + movne r9, r0 + cmp r11, #0 + movne r8, lr + str r9, [r6, #32] + str r8, [r6, #36] + add sp, sp, #612 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end148: + .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L + .cantunwind + .fnend + + .globl mcl_fp_addPre10L + .align 2 + .type mcl_fp_addPre10L,%function +mcl_fp_addPre10L: @ @mcl_fp_addPre10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r3, r8, lr} + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7, r10} + ldr r4, [r2, #20] + ldr r11, [r2] + str r4, [sp] @ 4-byte Spill + ldr r4, [r2, #24] + adds r12, r11, r3 + ldr r11, [r2, #32] + adcs r5, r5, r8 + ldr r8, [r1, #36] + adcs r6, r6, lr + add lr, r1, #16 + adcs r7, r7, r9 + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r2, #28] + ldr r2, [r2, #36] + str r4, [sp, #12] @ 4-byte Spill + str r2, [sp, #8] @ 4-byte Spill + ldm lr, {r1, r2, r3, r4, lr} + str r12, [r0] + stmib r0, {r5, r6} + str r7, [r0, #12] + ldr r7, [sp] @ 4-byte Reload + adcs r1, r10, r1 + str r1, [r0, #16] + ldr r1, [sp, #4] @ 4-byte Reload + adcs r2, r7, r2 + str r2, [r0, #20] + ldr r2, [sp, #8] @ 4-byte Reload + adcs r1, r1, r3 + str r1, [r0, #24] + ldr r1, [sp, #12] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [r0, #28] + adcs r1, r11, lr + adcs r2, r2, r8 + str r1, [r0, #32] + str r2, [r0, #36] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end149: + .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L + .cantunwind + .fnend + + .globl mcl_fp_subPre10L + .align 2 + .type mcl_fp_subPre10L,%function +mcl_fp_subPre10L: @ @mcl_fp_subPre10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #24 + sub sp, sp, #24 + ldr r3, [r2, #4] + ldr r7, [r2] + ldr r11, [r1] + ldr r6, [r1, #4] + ldr r9, [r2, #8] + ldr r5, [r1, #8] + ldr lr, [r2, #12] + ldr r4, [r1, #12] + ldr r12, [r1, #16] + ldr r8, [r1, #20] + ldr r10, [r1, #24] + str r3, [sp] @ 4-byte Spill + ldr r3, [r2, #16] + subs r7, r11, r7 + ldr r11, [r2, #32] + str r7, [r0] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r2, #20] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #24] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #28] + ldr r2, [r2, #36] + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [r1, #28] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp] @ 4-byte Reload + sbcs r6, r6, r3 + sbcs r5, r5, r9 + str r6, [r0, #4] + str r5, [r0, #8] + ldr r5, [sp, #8] @ 4-byte Reload + sbcs r4, r4, lr + ldr lr, [r1, #32] + ldr r1, [r1, #36] + str r4, [r0, #12] + ldr r4, [sp, #12] @ 4-byte Reload + sbcs r3, r12, r5 + str r3, [r0, #16] + ldr r3, [sp, #16] @ 4-byte Reload + sbcs r7, r8, r4 + str r7, [r0, #20] + ldr r7, [sp, #4] @ 4-byte Reload + sbcs r3, r10, r3 + str r3, [r0, #24] + ldr r3, [sp, #20] @ 4-byte Reload + sbcs r3, r7, r3 + str r3, [r0, #28] + sbcs r3, lr, r11 + sbcs r1, r1, r2 + str r3, [r0, #32] + str r1, [r0, #36] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #24 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end150: + .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L + .cantunwind + .fnend + + .globl mcl_fp_shr1_10L + .align 2 + .type mcl_fp_shr1_10L,%function +mcl_fp_shr1_10L: @ @mcl_fp_shr1_10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr lr, [r1, #32] + ldr r12, [r1, #36] + ldr r8, [r1, #28] + ldm r1, {r1, r2, r3, r4, r5, r6, r9} + lsrs r7, r2, #1 + rrx r1, r1 + str r1, [r0] + lsr r1, r2, #1 + lsr r2, r12, #1 + orr r1, r1, r3, lsl #31 + str r1, [r0, #4] + lsrs r1, r4, #1 + rrx r1, r3 + str r1, [r0, #8] + lsr r1, r4, #1 + orr r1, r1, r5, lsl #31 + str r1, [r0, #12] + lsrs r1, r6, #1 + rrx r1, r5 + str r1, [r0, #16] + lsr r1, r6, #1 + orr r1, r1, r9, lsl #31 + str r1, [r0, #20] + lsrs r1, r8, #1 + rrx r1, r9 + str r1, [r0, #24] + lsr r1, r8, #1 + orr r1, r1, lr, lsl #31 + str r1, [r0, #28] + lsrs r1, r12, #1 + rrx r1, lr + str r1, [r0, #32] + str r2, [r0, #36] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end151: + .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L + .cantunwind + .fnend + + .globl mcl_fp_add10L + .align 2 + .type mcl_fp_add10L,%function +mcl_fp_add10L: @ @mcl_fp_add10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldm r1, {r12, lr} + ldr r5, [r2] + ldr r10, [r1, #8] + ldr r8, [r1, #12] + ldmib r2, {r4, r6, r7} + adds r9, r5, r12 + ldr r5, [r1, #24] + adcs lr, r4, lr + ldr r4, [r1, #20] + adcs r6, r6, r10 + ldr r10, [r1, #36] + str lr, [sp] @ 4-byte Spill + str r6, [sp, #12] @ 4-byte Spill + adcs r12, r7, r8 + ldr r6, [r1, #16] + ldr r7, [r2, #16] + adcs r6, r7, r6 + ldr r7, [r2, #20] + str r6, [sp, #4] @ 4-byte Spill + adcs r8, r7, r4 + ldr r4, [r2, #24] + adcs r6, r4, r5 + ldr r4, [r1, #28] + ldr r5, [r2, #28] + str r6, [sp, #8] @ 4-byte Spill + adcs r7, r5, r4 + ldr r5, [r1, #32] + ldr r1, [r2, #32] + ldr r2, [r2, #36] + stm r0, {r9, lr} + mov lr, r12 + ldr r4, [sp, #4] @ 4-byte Reload + adcs r11, r1, r5 + add r1, r0, #24 + adcs r10, r2, r10 + ldr r2, [sp, #12] @ 4-byte Reload + str r2, [r0, #8] + str lr, [r0, #12] + str r4, [r0, #16] + str r8, [r0, #20] + stm r1, {r6, r7, r11} + mov r1, #0 + str r10, [r0, #36] + adc r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + ldm r3, {r1, r6, r12} + ldr r5, [r3, #12] + subs r9, r9, r1 + ldr r1, [sp] @ 4-byte Reload + sbcs r6, r1, r6 + sbcs r1, r2, r12 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r12, lr, r5 + sbcs lr, r4, r1 + ldr r1, [r3, #20] + ldr r4, [sp, #16] @ 4-byte Reload + sbcs r8, r8, r1 + ldr r1, [r3, #24] + sbcs r5, r2, r1 + ldr r2, [r3, #28] + sbcs r1, r7, r2 + ldr r2, [r3, #32] + ldr r7, [r3, #36] + sbcs r3, r11, r2 + sbcs r2, r10, r7 + sbc r4, r4, #0 + tst r4, #1 + bne .LBB152_2 +@ BB#1: @ %nocarry + ldr r4, [sp, #12] @ 4-byte Reload + str r9, [r0] + str r6, [r0, #4] + str r4, [r0, #8] + str r12, [r0, #12] + str lr, [r0, #16] + str r8, [r0, #20] + str r5, [r0, #24] + str r1, [r0, #28] + str r3, [r0, #32] + str r2, [r0, #36] +.LBB152_2: @ %carry + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end152: + .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L + .cantunwind + .fnend + + .globl mcl_fp_addNF10L + .align 2 + .type mcl_fp_addNF10L,%function +mcl_fp_addNF10L: @ @mcl_fp_addNF10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r5, [r2] + ldr r12, [r1, #12] + ldmib r2, {r4, r6, r7} + ldr r10, [r1, #24] + adds r9, r5, r9 + ldr r5, [r1, #16] + adcs r11, r4, r8 + ldr r8, [r1, #20] + str r9, [sp, #16] @ 4-byte Spill + adcs r6, r6, lr + str r11, [sp, #20] @ 4-byte Spill + str r6, [sp, #32] @ 4-byte Spill + adcs r6, r7, r12 + ldr r7, [r2, #16] + str r6, [sp, #24] @ 4-byte Spill + adcs r4, r7, r5 + ldr r7, [r2, #20] + ldr r5, [r2, #28] + str r4, [sp, #28] @ 4-byte Spill + adcs r7, r7, r8 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r7, r7, r10 + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r5, r7 + ldr r5, [r1, #32] + ldr r1, [r1, #36] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #32] + ldr r2, [r2, #36] + adcs lr, r7, r5 + adc r1, r2, r1 + str lr, [sp, #36] @ 4-byte Spill + str r1, [sp, #40] @ 4-byte Spill + ldmib r3, {r1, r2, r12} + ldr r7, [r3, #20] + ldr r8, [r3] + ldr r10, [sp, #32] @ 4-byte Reload + ldr r5, [r3, #16] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r3, #24] + subs r8, r9, r8 + sbcs r1, r11, r1 + ldr r11, [r3, #32] + sbcs r2, r10, r2 + sbcs r12, r6, r12 + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r3, #28] + ldr r3, [r3, #36] + sbcs r6, r4, r5 + ldr r4, [sp, #4] @ 4-byte Reload + ldr r5, [sp, #8] @ 4-byte Reload + str r3, [sp] @ 4-byte Spill + ldr r3, [sp, #52] @ 4-byte Reload + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #12] @ 4-byte Reload + sbcs r3, r3, r4 + ldr r4, [sp, #48] @ 4-byte Reload + sbcs r4, r4, r5 + ldr r5, [sp, #44] @ 4-byte Reload + sbcs r9, r5, r7 + ldr r7, [sp, #40] @ 4-byte Reload + ldr r5, [sp] @ 4-byte Reload + sbcs r11, lr, r11 + sbc lr, r7, r5 + ldr r5, [sp, #16] @ 4-byte Reload + asr r7, lr, #31 + cmp r7, #0 + movlt r2, r10 + movlt r8, r5 + ldr r5, [sp, #20] @ 4-byte Reload + str r8, [r0] + movlt r1, r5 + cmp r7, #0 + str r1, [r0, #4] + ldr r1, [sp, #24] @ 4-byte Reload + str r2, [r0, #8] + movlt r12, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r12, [r0, #12] + movlt r6, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r6, [r0, #16] + movlt r3, r1 + ldr r1, [sp, #48] @ 4-byte Reload + cmp r7, #0 + str r3, [r0, #20] + movlt r4, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r4, [r0, #24] + movlt r9, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r9, [r0, #28] + movlt r11, r1 + ldr r1, [sp, #40] @ 4-byte Reload + cmp r7, #0 + str r11, [r0, #32] + movlt lr, r1 + str lr, [r0, #36] + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end153: + .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L + .cantunwind + .fnend + + .globl mcl_fp_sub10L + .align 2 + .type mcl_fp_sub10L,%function +mcl_fp_sub10L: @ @mcl_fp_sub10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #36 + sub sp, sp, #36 + ldm r2, {r12, lr} + ldr r8, [r2, #8] + ldr r10, [r2, #12] + ldm r1, {r4, r5, r6, r7} + subs r4, r4, r12 + ldr r12, [r1, #36] + sbcs r9, r5, lr + ldr r5, [r2, #20] + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #24] + sbcs lr, r6, r8 + ldr r6, [r2, #16] + sbcs r8, r7, r10 + ldr r7, [r1, #16] + sbcs r10, r7, r6 + ldr r6, [r1, #20] + sbcs r7, r6, r5 + ldr r5, [r1, #24] + ldr r6, [r1, #32] + str r7, [sp, #28] @ 4-byte Spill + sbcs r11, r5, r4 + ldr r4, [r2, #28] + ldr r5, [r1, #28] + sbcs r5, r5, r4 + ldr r4, [r2, #32] + ldr r2, [r2, #36] + sbcs r1, r6, r4 + mov r6, #0 + sbcs r2, r12, r2 + ldr r12, [sp, #32] @ 4-byte Reload + sbc r6, r6, #0 + tst r6, #1 + str r12, [r0] + stmib r0, {r9, lr} + str r8, [r0, #12] + str r10, [r0, #16] + str r7, [r0, #20] + mov r7, r11 + str r7, [r0, #24] + str r5, [r0, #28] + str r1, [r0, #32] + str r2, [r0, #36] + beq .LBB154_2 +@ BB#1: @ %carry + ldr r4, [r3, #32] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r3, #36] + str r4, [sp, #24] @ 4-byte Spill + ldmib r3, {r4, r11} + ldr r6, [r3, #12] + str r6, [sp] @ 4-byte Spill + ldr r6, [r3, #16] + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [r3, #20] + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [r3, #24] + str r6, [sp, #12] @ 4-byte Spill + ldr r6, [r3, #28] + ldr r3, [r3] + adds r3, r3, r12 + str r6, [sp, #16] @ 4-byte Spill + adcs r4, r4, r9 + stm r0, {r3, r4} + adcs r3, r11, lr + str r3, [r0, #8] + ldr r3, [sp] @ 4-byte Reload + ldr r6, [sp, #8] @ 4-byte Reload + adcs r3, r3, r8 + str r3, [r0, #12] + ldr r3, [sp, #4] @ 4-byte Reload + adcs r3, r3, r10 + str r3, [r0, #16] + ldr r3, [sp, #28] @ 4-byte Reload + adcs r3, r6, r3 + str r3, [r0, #20] + ldr r3, [sp, #12] @ 4-byte Reload + adcs r3, r3, r7 + str r3, [r0, #24] + ldr r3, [sp, #16] @ 4-byte Reload + adcs r3, r3, r5 + str r3, [r0, #28] + ldr r3, [sp, #20] @ 4-byte Reload + adcs r1, r3, r1 + ldr r3, [sp, #24] @ 4-byte Reload + str r1, [r0, #32] + adc r2, r3, r2 + str r2, [r0, #36] +.LBB154_2: @ %nocarry + add sp, sp, #36 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end154: + .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L + .cantunwind + .fnend + + .globl mcl_fp_subNF10L + .align 2 + .type mcl_fp_subNF10L,%function +mcl_fp_subNF10L: @ @mcl_fp_subNF10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + mov r12, r0 + ldr r0, [r2, #32] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #44] @ 4-byte Spill + ldm r2, {r4, r5} + ldr r0, [r2, #8] + ldr r7, [r2, #16] + ldr r8, [r2, #20] + ldr lr, [r1, #12] + ldr r6, [r1, #16] + ldr r11, [r1, #20] + ldr r9, [r1, #24] + ldr r10, [r1, #28] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r2, #12] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r2, #24] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r2, #28] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #8] + ldm r1, {r1, r2} + subs r1, r1, r4 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r2, r2, r5 + str r2, [sp, #16] @ 4-byte Spill + sbcs r4, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + str r4, [sp, #20] @ 4-byte Spill + sbcs r5, lr, r0 + ldr r0, [sp, #48] @ 4-byte Reload + sbcs r7, r6, r7 + ldr r6, [sp, #44] @ 4-byte Reload + str r5, [sp, #28] @ 4-byte Spill + sbcs lr, r11, r8 + str r7, [sp, #32] @ 4-byte Spill + str lr, [sp, #36] @ 4-byte Spill + sbcs r8, r9, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r8, [sp, #48] @ 4-byte Spill + sbcs r9, r10, r0 + ldr r0, [sp, #60] @ 4-byte Reload + str r9, [sp, #56] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + sbc r1, r6, r1 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #44] @ 4-byte Spill + ldmib r3, {r1, r6} + ldr r11, [r3, #24] + ldr r10, [sp, #24] @ 4-byte Reload + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [r3, #12] + str r6, [sp] @ 4-byte Spill + ldr r6, [r3, #16] + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [r3, #20] + str r6, [sp, #12] @ 4-byte Spill + ldr r6, [r3, #28] + ldr r3, [r3] + adds r3, r10, r3 + adcs r1, r2, r1 + ldr r2, [sp, #4] @ 4-byte Reload + adcs r2, r4, r2 + ldr r4, [sp] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #8] @ 4-byte Reload + adcs r5, r7, r5 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r7, lr, r7 + adcs r11, r8, r11 + adcs r8, r9, r6 + ldr r6, [sp, #40] @ 4-byte Reload + adcs r9, r0, r6 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #44] @ 4-byte Reload + asr lr, r0, #31 + adc r6, r0, r6 + cmp lr, #0 + movge r3, r10 + str r3, [r12] + ldr r3, [sp, #16] @ 4-byte Reload + movge r1, r3 + str r1, [r12, #4] + ldr r1, [sp, #20] @ 4-byte Reload + movge r2, r1 + ldr r1, [sp, #28] @ 4-byte Reload + cmp lr, #0 + str r2, [r12, #8] + movge r4, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r4, [r12, #12] + movge r5, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r5, [r12, #16] + movge r7, r1 + ldr r1, [sp, #48] @ 4-byte Reload + cmp lr, #0 + str r7, [r12, #20] + movge r11, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r11, [r12, #24] + movge r8, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r8, [r12, #28] + movge r9, r1 + cmp lr, #0 + movge r6, r0 + str r9, [r12, #32] + str r6, [r12, #36] + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end155: + .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L + .cantunwind + .fnend + + .globl mcl_fpDbl_add10L + .align 2 + .type mcl_fpDbl_add10L,%function +mcl_fpDbl_add10L: @ @mcl_fpDbl_add10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #104 + sub sp, sp, #104 + ldm r1, {r7, r9} + ldr r8, [r1, #8] + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r10} + add lr, r1, #16 + adds r7, r4, r7 + ldr r4, [r2, #16] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #92] @ 4-byte Spill + adcs r7, r5, r9 + str r7, [sp, #28] @ 4-byte Spill + adcs r7, r6, r8 + ldr r8, [r2, #20] + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r10, r12 + add r10, r1, #32 + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r1, #64] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #68] + str r7, [sp] @ 4-byte Spill + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #52] @ 4-byte Spill + ldm r10, {r7, r9, r10} + ldr r2, [r1, #48] + ldr r5, [r1, #44] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #16] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #36] @ 4-byte Reload + ldr r6, [sp, #28] @ 4-byte Reload + adcs r1, r4, r1 + str r11, [r0] + str r6, [r0, #4] + ldr r6, [sp, #24] @ 4-byte Reload + ldr r4, [sp, #32] @ 4-byte Reload + adcs r2, r8, r2 + str r6, [r0, #8] + str r4, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + adcs r1, r1, r12 + str r1, [r0, #24] + ldr r1, [sp, #60] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + ldr r2, [sp, #64] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [r0, #32] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r2, r2, r9 + str r2, [r0, #36] + ldr r2, [sp, #4] @ 4-byte Reload + adcs lr, r1, r10 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r4, r1, r5 + ldr r1, [sp, #72] @ 4-byte Reload + str r4, [sp, #68] @ 4-byte Spill + adcs r12, r1, r2 + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r12, [sp, #72] @ 4-byte Spill + adcs r5, r1, r2 + ldr r1, [sp, #80] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str r5, [sp, #76] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #84] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r7, [sp, #80] @ 4-byte Spill + adcs r9, r1, r2 + ldr r1, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r9, [sp, #84] @ 4-byte Spill + adcs r10, r1, r2 + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r10, [sp, #64] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #92] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #88] @ 4-byte Spill + ldmib r3, {r1, r2, r8} + ldr r6, [r3, #16] + ldr r11, [r3] + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [r3, #20] + subs r11, lr, r11 + sbcs r1, r4, r1 + sbcs r2, r12, r2 + sbcs r12, r5, r8 + ldr r8, [r3, #32] + ldr r5, [r3, #36] + str r6, [sp, #52] @ 4-byte Spill + ldr r6, [r3, #24] + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [r3, #28] + ldr r3, [sp, #48] @ 4-byte Reload + str r6, [sp, #60] @ 4-byte Spill + sbcs r6, r7, r3 + ldr r3, [sp, #52] @ 4-byte Reload + ldr r4, [sp, #60] @ 4-byte Reload + sbcs r7, r9, r3 + ldr r3, [sp, #56] @ 4-byte Reload + sbcs r9, r10, r3 + ldr r3, [sp, #100] @ 4-byte Reload + sbcs r10, r3, r4 + ldr r3, [sp, #96] @ 4-byte Reload + ldr r4, [sp, #68] @ 4-byte Reload + sbcs r8, r3, r8 + ldr r3, [sp, #92] @ 4-byte Reload + sbcs r5, r3, r5 + ldr r3, [sp, #88] @ 4-byte Reload + sbc r3, r3, #0 + ands r3, r3, #1 + movne r11, lr + movne r1, r4 + str r11, [r0, #40] + str r1, [r0, #44] + ldr r1, [sp, #72] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #76] @ 4-byte Reload + cmp r3, #0 + str r2, [r0, #48] + movne r12, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r12, [r0, #52] + movne r6, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r6, [r0, #56] + movne r7, r1 + ldr r1, [sp, #64] @ 4-byte Reload + cmp r3, #0 + str r7, [r0, #60] + movne r9, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r9, [r0, #64] + movne r10, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r10, [r0, #68] + movne r8, r1 + ldr r1, [sp, #92] @ 4-byte Reload + cmp r3, #0 + str r8, [r0, #72] + movne r5, r1 + str r5, [r0, #76] + add sp, sp, #104 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end156: + .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub10L + .align 2 + .type mcl_fpDbl_sub10L,%function +mcl_fpDbl_sub10L: @ @mcl_fpDbl_sub10L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #96 + sub sp, sp, #96 + ldr r7, [r2, #64] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #52] @ 4-byte Spill + ldm r2, {r6, r7, r8, r9} + ldm r1, {r12, lr} + ldr r4, [r1, #8] + ldr r10, [r2, #20] + ldr r5, [r1, #12] + subs r11, r12, r6 + ldr r6, [r2, #28] + sbcs r7, lr, r7 + add lr, r1, #16 + sbcs r8, r4, r8 + ldr r4, [r2, #16] + sbcs r5, r5, r9 + ldr r9, [r1, #32] + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [r2, #24] + ldr r2, [r1, #64] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #68] + str r6, [sp, #24] @ 4-byte Spill + ldr r6, [r1, #44] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #20] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + str r11, [r0] + stmib r0, {r7, r8} + str r5, [r0, #12] + ldr r7, [sp] @ 4-byte Reload + ldr r8, [r3, #20] + sbcs r1, r1, r4 + str r1, [r0, #16] + sbcs r2, r2, r10 + ldr r1, [sp, #24] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [r0, #24] + sbcs r2, lr, r2 + ldr r1, [sp, #48] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #56] @ 4-byte Reload + sbcs r1, r9, r1 + sbcs r2, r7, r2 + str r1, [r0, #32] + ldr r1, [sp, #60] @ 4-byte Reload + str r2, [r0, #36] + ldr r2, [sp, #4] @ 4-byte Reload + sbcs r12, r2, r1 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r12, [sp, #48] @ 4-byte Spill + sbcs r4, r6, r1 + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r11, r2, r1 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str r11, [sp, #52] @ 4-byte Spill + sbcs r6, r2, r1 + ldr r1, [sp, #72] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r6, [sp, #64] @ 4-byte Spill + sbcs r7, r2, r1 + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + str r7, [sp, #68] @ 4-byte Spill + sbcs r9, r2, r1 + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r9, [sp, #76] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r10, r2, r1 + ldr r1, [sp, #84] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r10, [sp, #80] @ 4-byte Spill + sbcs lr, r2, r1 + mov r1, #0 + ldr r2, [r3, #4] + sbc r1, r1, #0 + str lr, [sp, #84] @ 4-byte Spill + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [r3, #8] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #12] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #16] + ldr r5, [sp, #28] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [r3, #24] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #28] + ldr r3, [r3] + str r1, [sp, #44] @ 4-byte Spill + adds r1, r12, r3 + ldr r3, [sp, #32] @ 4-byte Reload + adcs r2, r4, r2 + adcs r3, r11, r3 + adcs r12, r6, r5 + ldr r6, [sp, #36] @ 4-byte Reload + ldr r5, [sp, #92] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #40] @ 4-byte Reload + adcs r8, r9, r8 + adcs r9, r5, r7 + ldr r5, [sp, #44] @ 4-byte Reload + ldr r7, [sp, #88] @ 4-byte Reload + adcs r7, r7, r5 + ldr r5, [sp, #56] @ 4-byte Reload + adcs r11, r10, r5 + ldr r5, [sp, #60] @ 4-byte Reload + adc r10, lr, r5 + ldr r5, [sp, #72] @ 4-byte Reload + ands lr, r5, #1 + ldr r5, [sp, #48] @ 4-byte Reload + moveq r2, r4 + moveq r1, r5 + str r1, [r0, #40] + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [r0, #44] + moveq r3, r1 + ldr r1, [sp, #64] @ 4-byte Reload + cmp lr, #0 + str r3, [r0, #48] + moveq r12, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r12, [r0, #52] + moveq r6, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r6, [r0, #56] + moveq r8, r1 + ldr r1, [sp, #92] @ 4-byte Reload + cmp lr, #0 + str r8, [r0, #60] + moveq r9, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r9, [r0, #64] + moveq r7, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r7, [r0, #68] + moveq r11, r1 + ldr r1, [sp, #84] @ 4-byte Reload + cmp lr, #0 + str r11, [r0, #72] + moveq r10, r1 + str r10, [r0, #76] + add sp, sp, #96 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end157: + .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L + .cantunwind + .fnend + + .align 2 + .type .LmulPv352x32,%function +.LmulPv352x32: @ @mulPv352x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r1, [r1, #40] + umull r3, r7, r1, r2 + adcs r1, r5, r3 + str r1, [r0, #40] + adc r1, r7, #0 + str r1, [r0, #44] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end158: + .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre11L + .align 2 + .type mcl_fp_mulUnitPre11L,%function +mcl_fp_mulUnitPre11L: @ @mcl_fp_mulUnitPre11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + .pad #48 + sub sp, sp, #48 + mov r4, r0 + mov r0, sp + bl .LmulPv352x32(PLT) + ldr r12, [sp, #44] + ldr lr, [sp, #40] + ldr r8, [sp, #36] + ldr r9, [sp, #32] + ldr r10, [sp, #28] + ldr r1, [sp, #24] + ldr r5, [sp, #20] + ldr r6, [sp, #16] + ldr r7, [sp] + ldmib sp, {r2, r3} + ldr r0, [sp, #12] + str r7, [r4] + stmib r4, {r2, r3} + str r0, [r4, #12] + str r6, [r4, #16] + str r5, [r4, #20] + str r1, [r4, #24] + str r10, [r4, #28] + str r9, [r4, #32] + str r8, [r4, #36] + str lr, [r4, #40] + str r12, [r4, #44] + add sp, sp, #48 + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end159: + .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre11L + .align 2 + .type mcl_fpDbl_mulPre11L,%function +mcl_fpDbl_mulPre11L: @ @mcl_fpDbl_mulPre11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #604 + sub sp, sp, #604 + mov r3, r2 + mov r4, r0 + add r0, sp, #552 + str r1, [sp, #68] @ 4-byte Spill + mov r5, r1 + ldr r2, [r3] + str r3, [sp, #64] @ 4-byte Spill + str r4, [sp, #60] @ 4-byte Spill + mov r6, r3 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #596] + ldr r1, [sp, #560] + ldr r2, [r6, #4] + ldr r11, [sp, #556] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #592] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #564] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #588] + str r1, [sp, #20] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #576] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [r4] + add r0, sp, #504 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #548] + add r10, sp, #532 + add r12, sp, #508 + mov r6, r4 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r1, [sp, #504] + ldr lr, [sp, #528] + ldr r7, [sp, #524] + ldm r12, {r0, r2, r3, r12} + adds r1, r1, r11 + str r1, [r4, #4] + ldr r1, [sp, #28] @ 4-byte Reload + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #64] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + ldr r2, [r5, #8] + adcs r0, r8, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #456 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #500] + add r10, sp, #484 + add r12, sp, #460 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr lr, [sp, #480] + ldr r7, [sp, #476] + ldr r1, [sp, #456] + ldm r12, {r0, r2, r3, r12} + ldr r11, [sp, #16] @ 4-byte Reload + adds r1, r1, r11 + str r1, [r6, #8] + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #12] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #408 + bl .LmulPv352x32(PLT) + add r10, sp, #444 + add lr, sp, #432 + add r12, sp, #412 + ldm r10, {r8, r9, r10} + ldm lr, {r6, r11, lr} + ldr r7, [sp, #428] + ldr r1, [sp, #408] + ldm r12, {r0, r2, r3, r12} + ldr r4, [sp, #16] @ 4-byte Reload + adds r1, r1, r4 + ldr r4, [sp, #60] @ 4-byte Reload + str r1, [r4, #12] + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + ldr r5, [sp, #68] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + mov r1, r5 + adcs r0, r3, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #360 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #404] + add r10, sp, #392 + add r12, sp, #364 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr lr, [sp, #388] + ldr r6, [sp, #384] + ldr r7, [sp, #380] + ldr r1, [sp, #360] + ldm r12, {r0, r2, r3, r12} + ldr r11, [sp, #16] @ 4-byte Reload + adds r1, r1, r11 + str r1, [r4, #16] + ldr r1, [sp, #32] @ 4-byte Reload + ldr r4, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #20] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #312 + bl .LmulPv352x32(PLT) + add r11, sp, #344 + add r12, sp, #316 + ldm r11, {r8, r9, r10, r11} + ldr lr, [sp, #340] + ldr r6, [sp, #336] + ldr r7, [sp, #332] + ldr r1, [sp, #312] + ldm r12, {r0, r2, r3, r12} + ldr r5, [sp, #16] @ 4-byte Reload + adds r1, r1, r5 + ldr r5, [sp, #60] @ 4-byte Reload + str r1, [r5, #20] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #24] + ldr r4, [sp, #68] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + mov r1, r4 + adcs r0, r3, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #24] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #264 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #308] + add r10, sp, #296 + add r12, sp, #268 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr lr, [sp, #292] + ldr r6, [sp, #288] + ldr r7, [sp, #284] + ldr r1, [sp, #264] + ldm r12, {r0, r2, r3, r12} + ldr r11, [sp, #16] @ 4-byte Reload + adds r1, r1, r11 + str r1, [r5, #24] + ldr r1, [sp, #40] @ 4-byte Reload + ldr r5, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #28] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #216 + bl .LmulPv352x32(PLT) + add r10, sp, #252 + add lr, sp, #240 + add r12, sp, #220 + ldm r10, {r8, r9, r10} + ldm lr, {r6, r11, lr} + ldr r7, [sp, #236] + ldr r1, [sp, #216] + ldm r12, {r0, r2, r3, r12} + ldr r4, [sp, #16] @ 4-byte Reload + adds r1, r1, r4 + ldr r4, [sp, #60] @ 4-byte Reload + str r1, [r4, #28] + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + ldr r5, [sp, #68] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + mov r1, r5 + adcs r0, r3, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #20] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #168 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #212] + add r10, sp, #200 + add r12, sp, #172 + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr lr, [sp, #196] + ldr r6, [sp, #192] + ldr r7, [sp, #188] + ldr r1, [sp, #168] + ldm r12, {r0, r2, r3, r12} + ldr r11, [sp, #12] @ 4-byte Reload + adds r1, r1, r11 + ldr r11, [sp, #64] @ 4-byte Reload + str r1, [r4, #32] + ldr r1, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r11, #36] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #120 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #164] + add lr, sp, #152 + add r10, sp, #140 + add r8, sp, #128 + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r9, r12, lr} + ldm r10, {r0, r6, r10} + ldr r2, [sp, #120] + ldr r3, [sp, #124] + ldm r8, {r1, r7, r8} + ldr r5, [sp, #12] @ 4-byte Reload + adds r2, r2, r5 + ldr r5, [sp, #56] @ 4-byte Reload + str r2, [r4, #36] + ldr r2, [r11, #40] + adcs r11, r3, r5 + ldr r3, [sp, #52] @ 4-byte Reload + adcs r5, r1, r3 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r7, r7, r1 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r8, r8, r1 + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r10, r10, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #72 + bl .LmulPv352x32(PLT) + add r3, sp, #72 + ldm r3, {r0, r1, r2, r3} + ldr r9, [sp, #116] + ldr r6, [sp, #112] + adds r12, r0, r11 + add r11, sp, #88 + adcs lr, r1, r5 + adcs r2, r2, r7 + adcs r3, r3, r8 + ldr r8, [sp, #108] + ldm r11, {r0, r1, r5, r7, r11} + str r12, [r4, #40] + str lr, [r4, #44] + str r2, [r4, #48] + ldr r2, [sp, #40] @ 4-byte Reload + add r12, r4, #72 + str r3, [r4, #52] + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [r4, #56] + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + adcs r0, r5, r10 + str r1, [r4, #60] + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [r4, #64] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [r4, #68] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r11, r0 + adcs r1, r8, r1 + adcs r2, r6, r2 + adc r3, r9, #0 + stm r12, {r0, r1, r2, r3} + add sp, sp, #604 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end160: + .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre11L + .align 2 + .type mcl_fpDbl_sqrPre11L,%function +mcl_fpDbl_sqrPre11L: @ @mcl_fpDbl_sqrPre11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #596 + sub sp, sp, #596 + mov r5, r1 + mov r4, r0 + add r0, sp, #544 + ldr r2, [r5] + bl .LmulPv352x32(PLT) + ldr r0, [sp, #588] + ldr r1, [sp, #548] + ldr r2, [r5, #4] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #584] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #552] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #580] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #556] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #576] + str r1, [sp, #24] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [r4] + add r0, sp, #496 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #540] + add r10, sp, #520 + add lr, sp, #496 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #8] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #448 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #492] + add r10, sp, #476 + add lr, sp, #448 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #472] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #8] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #12] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #400 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #444] + add r10, sp, #428 + add lr, sp, #400 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #424] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #12] + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #352 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #396] + add r10, sp, #380 + add lr, sp, #352 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #376] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #16] + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #20] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #304 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #348] + add r10, sp, #332 + add lr, sp, #304 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #328] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #20] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #24] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #256 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #300] + add r10, sp, #284 + add lr, sp, #256 + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #280] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #24] + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #28] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #208 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #252] + add r10, sp, #236 + add lr, sp, #208 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #232] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #28] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #160 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #204] + add r10, sp, #188 + add lr, sp, #160 + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r7, [sp, #184] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #16] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #32] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #36] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #112 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #156] + add lr, sp, #140 + add r12, sp, #124 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #152] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r8, r11, lr} + ldr r9, [sp, #136] + ldr r2, [sp, #112] + ldr r7, [sp, #116] + ldr r6, [sp, #120] + ldm r12, {r0, r3, r12} + ldr r1, [sp, #16] @ 4-byte Reload + adds r2, r2, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r2, [r4, #36] + ldr r2, [r5, #40] + adcs r7, r7, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r6, r6, r1 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r11, r11, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #64 + bl .LmulPv352x32(PLT) + add r3, sp, #64 + ldm r3, {r0, r1, r2, r3} + ldr r9, [sp, #108] + ldr r8, [sp, #104] + adds r12, r0, r7 + ldr r0, [sp, #16] @ 4-byte Reload + adcs lr, r1, r6 + adcs r2, r2, r10 + add r10, sp, #80 + adcs r3, r3, r0 + ldm r10, {r0, r1, r5, r6, r7, r10} + str r12, [r4, #40] + str lr, [r4, #44] + str r2, [r4, #48] + ldr r2, [sp, #20] @ 4-byte Reload + add r12, r4, #72 + str r3, [r4, #52] + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + adcs r0, r5, r0 + str r1, [r4, #60] + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [r4, #64] + adcs r0, r6, r11 + str r0, [r4, #68] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + adcs r1, r10, r1 + adcs r2, r8, r2 + adc r3, r9, #0 + stm r12, {r0, r1, r2, r3} + add sp, sp, #596 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end161: + .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L + .cantunwind + .fnend + + .globl mcl_fp_mont11L + .align 2 + .type mcl_fp_mont11L,%function +mcl_fp_mont11L: @ @mcl_fp_mont11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #132 + sub sp, sp, #132 + .pad #1024 + sub sp, sp, #1024 + mov r7, r2 + ldr r5, [r3, #-4] + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #1104 + str r3, [sp, #92] @ 4-byte Spill + str r1, [sp, #84] @ 4-byte Spill + mov r4, r3 + mov r6, r1 + ldr r2, [r7] + str r7, [sp, #80] @ 4-byte Spill + str r5, [sp, #88] @ 4-byte Spill + bl .LmulPv352x32(PLT) + ldr r0, [sp, #1108] + ldr r8, [sp, #1104] + mov r1, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1112] + mul r2, r8, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1124] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1120] + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #1056 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #1100] + ldr r2, [r7, #4] + ldr r11, [sp, #1072] + ldr r5, [sp, #1056] + ldr r4, [sp, #1060] + ldr r10, [sp, #1064] + ldr r9, [sp, #1068] + mov r1, r6 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1096] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1092] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1088] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1084] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1080] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #4] @ 4-byte Spill + add r0, sp, #1008 + bl .LmulPv352x32(PLT) + adds r0, r5, r8 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + add lr, sp, #1008 + ldr r7, [sp, #1044] + ldr r6, [sp, #1040] + ldr r5, [sp, #1036] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r8, r4, r0 + mov r0, #0 + ldr r4, [sp, #1032] + adcs r1, r10, r1 + ldr r10, [sp, #1052] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r11, r1 + ldr r11, [sp, #1048] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [sp, #28] @ 4-byte Spill + adc r9, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r8, r8, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r9, r10 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #960 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #1004] + ldr r1, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #984] + ldr r6, [sp, #980] + ldr r9, [sp, #976] + ldr r10, [sp, #960] + ldr r11, [sp, #964] + ldr r7, [sp, #968] + ldr r4, [sp, #972] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #996] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #912 + bl .LmulPv352x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #916 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #940 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r5, r6, r7, r8, r11} + ldr r4, [sp, #912] + adc r10, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r9, [sp, #76] @ 4-byte Reload + adds r9, r9, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r10, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #864 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #908] + add r11, sp, #864 + ldr r7, [sp, #888] + ldr r5, [sp, #884] + ldr r8, [sp, #880] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #876] + ldr r2, [r0, #12] + add r0, sp, #816 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #816 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #840 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r4, r5, r6, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #76] @ 4-byte Reload + adds r8, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #768 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #812] + ldr r1, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #792] + ldr r6, [sp, #788] + ldr r9, [sp, #784] + ldr r10, [sp, #768] + ldr r11, [sp, #772] + ldr r7, [sp, #776] + ldr r4, [sp, #780] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #720 + bl .LmulPv352x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #724 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #748 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r5, r6, r7, r8, r11} + ldr r4, [sp, #720] + adc r10, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r9, [sp, #76] @ 4-byte Reload + adds r9, r9, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r10, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #672 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #716] + add r11, sp, #672 + ldr r7, [sp, #696] + ldr r5, [sp, #692] + ldr r8, [sp, #688] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #684] + ldr r2, [r0, #20] + add r0, sp, #624 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #624 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #648 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r4, r5, r6, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #76] @ 4-byte Reload + adds r8, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #576 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #620] + ldr r1, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #600] + ldr r6, [sp, #596] + ldr r9, [sp, #592] + ldr r10, [sp, #576] + ldr r11, [sp, #580] + ldr r7, [sp, #584] + ldr r4, [sp, #588] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #616] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #612] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #528 + bl .LmulPv352x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #532 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #556 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r5, r6, r7, r8, r11} + ldr r4, [sp, #528] + adc r10, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r9, [sp, #76] @ 4-byte Reload + adds r9, r9, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r10, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #480 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #524] + add r11, sp, #480 + ldr r7, [sp, #504] + ldr r5, [sp, #500] + ldr r8, [sp, #496] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #520] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #516] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #492] + ldr r2, [r0, #28] + add r0, sp, #432 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #432 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #456 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r4, r5, r6, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #76] @ 4-byte Reload + adds r8, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #384 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #428] + ldr r1, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #408] + ldr r6, [sp, #404] + ldr r9, [sp, #400] + ldr r10, [sp, #384] + ldr r11, [sp, #388] + ldr r7, [sp, #392] + ldr r4, [sp, #396] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #336 + bl .LmulPv352x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #340 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #364 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + ldm r11, {r5, r6, r7, r8, r11} + ldr r4, [sp, #336] + adc r10, r0, #0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r9, [sp, #76] @ 4-byte Reload + adds r9, r9, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adcs r0, r10, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #288 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #332] + add r11, sp, #288 + ldr r7, [sp, #312] + ldr r5, [sp, #308] + ldr r8, [sp, #304] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #328] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #324] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #320] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #316] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #300] + ldr r2, [r0, #36] + add r0, sp, #240 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #240 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #264 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r4, r5, r6, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #76] @ 4-byte Reload + adds r8, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #192 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #236] + ldr r1, [sp, #84] @ 4-byte Reload + ldr r5, [sp, #216] + ldr r6, [sp, #212] + ldr r9, [sp, #208] + ldr r10, [sp, #192] + ldr r11, [sp, #196] + ldr r7, [sp, #200] + ldr r4, [sp, #204] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #232] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #228] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #224] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #144 + bl .LmulPv352x32(PLT) + adds r0, r8, r10 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #144 + add r12, sp, #160 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + adcs r10, r1, r7 + ldr r1, [sp, #64] @ 4-byte Reload + adcs r11, r1, r4 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r9 + add r9, sp, #180 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #48] @ 4-byte Spill + ldm lr, {r2, r6, lr} + ldr r5, [sp, #156] + adds r4, r0, r2 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r6, r10, r6 + mul r1, r4, r0 + str r1, [sp, #44] @ 4-byte Spill + ldm r9, {r7, r8, r9} + ldm r12, {r0, r1, r2, r3, r12} + str r6, [sp, #40] @ 4-byte Spill + adcs r6, r11, lr + ldr r10, [sp, #92] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + ldr r6, [sp, #84] @ 4-byte Reload + adcs r11, r6, r5 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r6, r6, r0 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r5, r0, r3 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r8, r0, r9 + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #96 + bl .LmulPv352x32(PLT) + add r7, sp, #96 + ldm r7, {r0, r1, r3, r7} + adds r0, r4, r0 + ldr r0, [sp, #40] @ 4-byte Reload + adcs lr, r0, r1 + ldr r0, [sp, #36] @ 4-byte Reload + str lr, [sp, #44] @ 4-byte Spill + adcs r1, r0, r3 + ldr r3, [sp, #112] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r9, r11, r7 + str r1, [sp, #48] @ 4-byte Spill + adcs r6, r6, r3 + ldr r3, [sp, #116] + str r6, [sp, #52] @ 4-byte Spill + adcs r0, r0, r3 + ldr r3, [sp, #120] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r12, r0, r3 + ldr r3, [sp, #124] + ldr r0, [sp, #88] @ 4-byte Reload + str r12, [sp, #56] @ 4-byte Spill + adcs r5, r5, r3 + ldr r3, [sp, #128] + str r5, [sp, #60] @ 4-byte Spill + adcs r0, r0, r3 + ldr r3, [sp, #132] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [sp, #136] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [sp, #140] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r8, r8, r3 + adc r0, r0, #0 + str r8, [sp, #68] @ 4-byte Spill + str r0, [sp, #64] @ 4-byte Spill + ldmib r10, {r3, r7} + ldr r4, [r10, #16] + ldr r11, [r10] + ldr r2, [r10, #12] + mov r0, r10 + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r10, #20] + subs r11, lr, r11 + ldr lr, [sp, #84] @ 4-byte Reload + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r10, #24] + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r10, #28] + sbcs r10, r1, r3 + mov r3, r9 + ldr r9, [r0, #32] + sbcs r1, r3, r7 + ldr r7, [r0, #36] + ldr r0, [r0, #40] + sbcs r2, r6, r2 + ldr r6, [sp, #36] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [sp, #28] @ 4-byte Reload + sbcs lr, lr, r4 + ldr r4, [sp, #32] @ 4-byte Reload + sbcs r4, r12, r4 + ldr r12, [sp, #88] @ 4-byte Reload + sbcs r5, r5, r6 + ldr r6, [sp, #40] @ 4-byte Reload + sbcs r12, r12, r6 + ldr r6, [sp, #80] @ 4-byte Reload + sbcs r9, r6, r9 + ldr r6, [sp, #76] @ 4-byte Reload + sbcs r7, r6, r7 + ldr r6, [sp, #64] @ 4-byte Reload + sbcs r0, r8, r0 + ldr r8, [sp, #72] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbc r6, r6, #0 + ands r6, r6, #1 + movne r11, r0 + ldr r0, [sp, #48] @ 4-byte Reload + movne r1, r3 + str r11, [r8] + movne r10, r0 + cmp r6, #0 + ldr r0, [sp, #92] @ 4-byte Reload + str r10, [r8, #4] + str r1, [r8, #8] + ldr r1, [sp, #52] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r2, [r8, #12] + movne lr, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str lr, [r8, #16] + movne r4, r1 + ldr r1, [sp, #60] @ 4-byte Reload + cmp r6, #0 + str r4, [r8, #20] + movne r5, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r5, [r8, #24] + movne r12, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r12, [r8, #28] + movne r9, r1 + ldr r1, [sp, #76] @ 4-byte Reload + cmp r6, #0 + str r9, [r8, #32] + movne r7, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r7, [r8, #36] + movne r0, r1 + str r0, [r8, #40] + add sp, sp, #132 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end162: + .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L + .cantunwind + .fnend + + .globl mcl_fp_montNF11L + .align 2 + .type mcl_fp_montNF11L,%function +mcl_fp_montNF11L: @ @mcl_fp_montNF11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #124 + sub sp, sp, #124 + .pad #1024 + sub sp, sp, #1024 + str r2, [sp, #72] @ 4-byte Spill + ldr r5, [r3, #-4] + ldr r2, [r2] + add r6, sp, #1024 + str r0, [sp, #68] @ 4-byte Spill + str r3, [sp, #84] @ 4-byte Spill + str r1, [sp, #76] @ 4-byte Spill + mov r4, r3 + add r0, r6, #72 + str r5, [sp, #80] @ 4-byte Spill + bl .LmulPv352x32(PLT) + ldr r0, [sp, #1100] + ldr r10, [sp, #1096] + add r9, sp, #1024 + mov r1, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1104] + mul r2, r10, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1108] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1124] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1120] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1112] + str r0, [sp, #24] @ 4-byte Spill + add r0, r9, #24 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #1092] + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #1072] + ldr r7, [sp, #1068] + ldr r8, [sp, #1064] + ldr r11, [sp, #1048] + ldr r4, [sp, #1052] + ldr r6, [sp, #1056] + ldr r9, [sp, #1060] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1088] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1084] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1080] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, sp, #1000 + bl .LmulPv352x32(PLT) + adds r0, r11, r10 + ldr r1, [sp, #4] @ 4-byte Reload + add r11, sp, #1024 + add lr, sp, #1000 + ldr r10, [sp, #1044] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + ldm r11, {r4, r5, r6, r8, r11} + adc r9, r1, r0 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #52] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #952 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #996] + add r11, sp, #952 + ldr r6, [sp, #976] + ldr r4, [sp, #972] + ldr r8, [sp, #968] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #964] + ldr r2, [r0, #8] + add r0, sp, #904 + bl .LmulPv352x32(PLT) + adds r0, r7, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #908 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #948] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #932 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r5, r6, r9, r11} + ldr r4, [sp, #904] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r4, r8, r4 + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #856 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #900] + add r11, sp, #856 + ldr r7, [sp, #880] + ldr r5, [sp, #876] + ldr r8, [sp, #872] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #888] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #884] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r6, [sp, #868] + ldr r2, [r0, #12] + add r0, sp, #808 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #808 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #852] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #832 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r8, r11} + adc r9, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #760 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #804] + add r11, sp, #760 + ldr r6, [sp, #784] + ldr r4, [sp, #780] + ldr r8, [sp, #776] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #772] + ldr r2, [r0, #16] + add r0, sp, #712 + bl .LmulPv352x32(PLT) + adds r0, r7, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #716 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #756] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #740 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r5, r6, r9, r11} + ldr r4, [sp, #712] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r4, r8, r4 + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #664 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #708] + add r11, sp, #664 + ldr r7, [sp, #688] + ldr r5, [sp, #684] + ldr r8, [sp, #680] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r6, [sp, #676] + ldr r2, [r0, #20] + add r0, sp, #616 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #616 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #660] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #640 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r8, r11} + adc r9, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #568 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #612] + add r11, sp, #568 + ldr r6, [sp, #592] + ldr r4, [sp, #588] + ldr r8, [sp, #584] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #580] + ldr r2, [r0, #24] + add r0, sp, #520 + bl .LmulPv352x32(PLT) + adds r0, r7, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #524 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #564] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #548 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r5, r6, r9, r11} + ldr r4, [sp, #520] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r4, r8, r4 + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #472 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #516] + add r11, sp, #472 + ldr r7, [sp, #496] + ldr r5, [sp, #492] + ldr r8, [sp, #488] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r6, [sp, #484] + ldr r2, [r0, #28] + add r0, sp, #424 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #424 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #468] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #448 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r8, r11} + adc r9, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #376 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #420] + add r11, sp, #376 + ldr r6, [sp, #400] + ldr r4, [sp, #396] + ldr r8, [sp, #392] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #388] + ldr r2, [r0, #32] + add r0, sp, #328 + bl .LmulPv352x32(PLT) + adds r0, r7, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #332 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #372] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #356 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r5, r6, r9, r11} + ldr r4, [sp, #328] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r4, r8, r4 + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r4, r0 + add r0, sp, #280 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #324] + add r11, sp, #280 + ldr r7, [sp, #304] + ldr r5, [sp, #300] + ldr r8, [sp, #296] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #320] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #316] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #312] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #308] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r6, [sp, #292] + ldr r2, [r0, #36] + add r0, sp, #232 + bl .LmulPv352x32(PLT) + adds r0, r4, r9 + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #232 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #276] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #256 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + ldm r11, {r4, r5, r6, r8, r11} + adc r9, r0, r1 + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #64] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #32] @ 4-byte Spill + adcs r0, r9, r11 + str r0, [sp, #28] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #184 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #228] + add r11, sp, #184 + ldr r6, [sp, #208] + ldr r4, [sp, #204] + ldr r8, [sp, #200] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #224] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #216] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #212] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #196] + ldr r2, [r0, #40] + add r0, sp, #136 + bl .LmulPv352x32(PLT) + adds r0, r7, r9 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + ldr lr, [sp, #140] + add r9, sp, #172 + add r12, sp, #152 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + adcs r11, r1, r11 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r10, r1, r5 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #148] + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #144] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #24] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #136] + str r1, [sp, #44] @ 4-byte Spill + adds r5, r0, r2 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r11, r11, lr + adcs r6, r10, r6 + mul r1, r5, r0 + str r1, [sp, #40] @ 4-byte Spill + ldm r9, {r7, r8, r9} + ldm r12, {r0, r1, r2, r3, r12} + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [sp, #76] @ 4-byte Reload + adcs r10, r6, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #84] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #88 + adc r9, r9, #0 + bl .LmulPv352x32(PLT) + add r7, sp, #88 + ldm r7, {r0, r1, r3, r7} + adds r0, r5, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r8, r11, r1 + str r8, [sp, #28] @ 4-byte Spill + adcs r6, r0, r3 + ldr r3, [sp, #104] + ldr r0, [sp, #36] @ 4-byte Reload + adcs r2, r10, r7 + str r6, [sp, #44] @ 4-byte Spill + str r2, [sp, #48] @ 4-byte Spill + adcs r7, r0, r3 + ldr r3, [sp, #108] + ldr r0, [sp, #72] @ 4-byte Reload + str r7, [sp, #52] @ 4-byte Spill + adcs r0, r0, r3 + ldr r3, [sp, #112] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r5, r0, r3 + ldr r3, [sp, #116] + ldr r0, [sp, #64] @ 4-byte Reload + str r5, [sp, #56] @ 4-byte Spill + adcs lr, r0, r3 + ldr r3, [sp, #120] + ldr r0, [sp, #80] @ 4-byte Reload + str lr, [sp, #60] @ 4-byte Spill + adcs r0, r0, r3 + ldr r3, [sp, #124] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [sp, #128] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r10, r0, r3 + ldr r3, [sp, #132] + str r10, [sp, #64] @ 4-byte Spill + adc r12, r9, r3 + mov r3, r4 + str r12, [sp, #40] @ 4-byte Spill + ldmib r3, {r0, r1, r9} + ldr r4, [r3, #16] + ldr r11, [r3] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r3, #20] + subs r11, r8, r11 + ldr r8, [r3, #36] + sbcs r0, r6, r0 + sbcs r1, r2, r1 + sbcs r2, r7, r9 + ldr r9, [r3, #32] + ldr r7, [sp, #80] @ 4-byte Reload + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [r3, #24] + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r3, #28] + ldr r3, [r3, #40] + str r4, [sp, #36] @ 4-byte Spill + str r3, [sp, #84] @ 4-byte Spill + ldr r3, [sp, #72] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + ldr r6, [sp, #36] @ 4-byte Reload + sbcs r3, r3, r4 + ldr r4, [sp, #24] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #32] @ 4-byte Reload + sbcs r5, lr, r5 + sbcs lr, r7, r6 + ldr r7, [sp, #76] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r9, r7, r9 + ldr r7, [sp, #28] @ 4-byte Reload + sbcs r10, r10, r8 + ldr r8, [sp, #68] @ 4-byte Reload + sbc r12, r12, r6 + asr r6, r12, #31 + cmp r6, #0 + movlt r11, r7 + ldr r7, [sp, #44] @ 4-byte Reload + str r11, [r8] + movlt r0, r7 + str r0, [r8, #4] + ldr r0, [sp, #48] @ 4-byte Reload + movlt r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + cmp r6, #0 + str r1, [r8, #8] + movlt r2, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r2, [r8, #12] + movlt r3, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r3, [r8, #16] + movlt r4, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r6, #0 + str r4, [r8, #20] + movlt r5, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r5, [r8, #24] + movlt lr, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str lr, [r8, #28] + movlt r9, r0 + ldr r0, [sp, #64] @ 4-byte Reload + cmp r6, #0 + movlt r10, r0 + ldr r0, [sp, #40] @ 4-byte Reload + movlt r12, r0 + add r0, r8, #32 + stm r0, {r9, r10, r12} + add sp, sp, #124 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end163: + .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L + .cantunwind + .fnend + + .globl mcl_fp_montRed11L + .align 2 + .type mcl_fp_montRed11L,%function +mcl_fp_montRed11L: @ @mcl_fp_montRed11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #676 + sub sp, sp, #676 + mov r10, r2 + str r0, [sp, #136] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r5, [r1] + ldr r0, [r10] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r10, #4] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r10, #8] + str r2, [sp, #56] @ 4-byte Spill + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r10, #12] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r10, #16] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r10, #20] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r10, #24] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r10, #-4] + str r0, [sp, #140] @ 4-byte Spill + mul r2, r5, r0 + ldr r0, [r10, #28] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r10, #32] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r10, #36] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r10, #40] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r10 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #624 + bl .LmulPv352x32(PLT) + add r11, sp, #656 + add lr, sp, #624 + ldm r11, {r4, r8, r9, r11} + ldr r7, [sp, #652] + ldr r6, [sp, #648] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r5, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + mov r1, r10 + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + mul r2, r5, r0 + add r0, sp, #576 + bl .LmulPv352x32(PLT) + ldr r4, [sp, #576] + add r9, sp, #584 + ldr r12, [sp, #620] + ldr lr, [sp, #616] + ldr r2, [sp, #612] + ldr r3, [sp, #608] + ldr r11, [sp, #604] + ldr r7, [sp, #600] + ldr r6, [sp, #580] + ldm r9, {r0, r1, r8, r9} + adds r4, r5, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r5, r4, r6 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #140] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + mov r9, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r5, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #528 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #572] + add r11, sp, #560 + add lr, sp, #528 + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r8, r11} + ldr r6, [sp, #556] + ldr r7, [sp, #552] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r9, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + mov r5, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r1, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #480 + bl .LmulPv352x32(PLT) + ldr r4, [sp, #480] + add r9, sp, #488 + ldr r12, [sp, #524] + ldr lr, [sp, #520] + ldr r2, [sp, #516] + ldr r3, [sp, #512] + ldr r11, [sp, #508] + ldr r7, [sp, #504] + ldr r6, [sp, #484] + ldm r9, {r0, r1, r8, r9} + adds r4, r5, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r5, r4, r6 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #140] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r5, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #432 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #476] + add r11, sp, #460 + add lr, sp, #432 + str r0, [sp, #16] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #456] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r4, r1 + mov r1, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #384 + bl .LmulPv352x32(PLT) + ldr r6, [sp, #384] + add r9, sp, #392 + ldr r12, [sp, #428] + ldr lr, [sp, #424] + ldr r2, [sp, #420] + ldr r3, [sp, #416] + ldr r11, [sp, #412] + ldr r5, [sp, #408] + ldr r7, [sp, #388] + ldm r9, {r0, r1, r8, r9} + adds r4, r4, r6 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r6, r4, r7 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #140] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + mov r5, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #336 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #380] + add r11, sp, #364 + add lr, sp, #336 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #360] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r4, r1 + mov r1, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #288 + bl .LmulPv352x32(PLT) + ldr r6, [sp, #288] + add r9, sp, #296 + ldr r12, [sp, #332] + ldr lr, [sp, #328] + ldr r2, [sp, #324] + ldr r3, [sp, #320] + ldr r11, [sp, #316] + ldr r5, [sp, #312] + ldr r7, [sp, #292] + ldm r9, {r0, r1, r8, r9} + adds r4, r4, r6 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r6, r4, r7 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #140] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + mov r5, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #240 + bl .LmulPv352x32(PLT) + ldr r0, [sp, #284] + add r11, sp, #264 + add lr, sp, #240 + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r6, r7, r8, r9, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r5, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r5, r4 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r11, r0, r11 + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #192 + bl .LmulPv352x32(PLT) + add r6, sp, #192 + add r7, sp, #208 + ldm r6, {r0, r1, r3, r6} + ldr r12, [sp, #236] + ldr lr, [sp, #232] + adds r0, r5, r0 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #16] @ 4-byte Reload + mul r2, r8, r4 + adcs r0, r0, r3 + ldr r3, [sp, #228] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #224] + str r0, [sp, #52] @ 4-byte Spill + ldm r7, {r0, r1, r4, r7} + ldr r5, [sp, #88] @ 4-byte Reload + adcs r9, r5, r0 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r5, r0, r6 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r11, r11, r3 + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r6, r0, #0 + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + add r0, sp, #144 + bl .LmulPv352x32(PLT) + add r3, sp, #144 + ldm r3, {r0, r1, r2, r3} + adds r0, r8, r0 + ldr r0, [sp, #140] @ 4-byte Reload + adcs r12, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r1, [sp, #160] + str r12, [sp, #44] @ 4-byte Spill + adcs r2, r0, r2 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r3, r9, r3 + str r2, [sp, #52] @ 4-byte Spill + str r3, [sp, #56] @ 4-byte Spill + adcs r7, r0, r1 + ldr r1, [sp, #164] + ldr r0, [sp, #76] @ 4-byte Reload + str r7, [sp, #60] @ 4-byte Spill + adcs r8, r4, r1 + ldr r1, [sp, #168] + str r8, [sp, #64] @ 4-byte Spill + adcs r4, r0, r1 + ldr r1, [sp, #172] + ldr r0, [sp, #84] @ 4-byte Reload + str r4, [sp, #68] @ 4-byte Spill + adcs r5, r5, r1 + ldr r1, [sp, #176] + str r5, [sp, #72] @ 4-byte Spill + adcs r11, r11, r1 + ldr r1, [sp, #180] + str r11, [sp, #76] @ 4-byte Spill + adcs r9, r0, r1 + ldr r1, [sp, #184] + ldr r0, [sp, #88] @ 4-byte Reload + str r9, [sp, #84] @ 4-byte Spill + adcs lr, r0, r1 + ldr r1, [sp, #188] + str lr, [sp, #88] @ 4-byte Spill + adcs r0, r6, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r6, [sp, #140] @ 4-byte Reload + adc r10, r0, #0 + ldr r0, [sp, #132] @ 4-byte Reload + subs r0, r12, r0 + sbcs r1, r2, r1 + ldr r2, [sp, #124] @ 4-byte Reload + sbcs r2, r3, r2 + ldr r3, [sp, #108] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #112] @ 4-byte Reload + sbcs r12, r8, r7 + ldr r7, [sp, #116] @ 4-byte Reload + sbcs r8, r4, r7 + ldr r4, [sp, #120] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #92] @ 4-byte Reload + sbcs r5, r11, r5 + sbcs r11, r9, r7 + ldr r7, [sp, #100] @ 4-byte Reload + sbcs r9, lr, r7 + ldr r7, [sp, #104] @ 4-byte Reload + sbcs lr, r6, r7 + ldr r7, [sp, #44] @ 4-byte Reload + sbc r6, r10, #0 + ldr r10, [sp, #136] @ 4-byte Reload + ands r6, r6, #1 + movne r0, r7 + str r0, [r10] + ldr r0, [sp, #52] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r1, [r10, #4] + movne r2, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r6, #0 + str r2, [r10, #8] + movne r3, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r3, [r10, #12] + movne r12, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r12, [r10, #16] + movne r8, r0 + ldr r0, [sp, #72] @ 4-byte Reload + cmp r6, #0 + str r8, [r10, #20] + movne r4, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r4, [r10, #24] + movne r5, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r5, [r10, #28] + movne r11, r0 + ldr r0, [sp, #88] @ 4-byte Reload + cmp r6, #0 + str r11, [r10, #32] + movne r9, r0 + ldr r0, [sp, #140] @ 4-byte Reload + str r9, [r10, #36] + movne lr, r0 + str lr, [r10, #40] + add sp, sp, #676 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end164: + .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L + .cantunwind + .fnend + + .globl mcl_fp_addPre11L + .align 2 + .type mcl_fp_addPre11L,%function +mcl_fp_addPre11L: @ @mcl_fp_addPre11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldm r1, {r3, r12} + ldr r8, [r1, #8] + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7, r10} + ldr r4, [r2, #20] + ldr r11, [r2] + str r4, [sp] @ 4-byte Spill + ldr r4, [r2, #24] + adds lr, r11, r3 + ldr r3, [r2, #36] + ldr r11, [r2, #32] + adcs r5, r5, r12 + add r12, r1, #16 + adcs r6, r6, r8 + adcs r7, r7, r9 + add r9, r1, #32 + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r2, #28] + ldr r2, [r2, #40] + str r3, [sp, #8] @ 4-byte Spill + str r4, [sp, #16] @ 4-byte Spill + str r2, [sp, #12] @ 4-byte Spill + ldm r9, {r4, r8, r9} + ldm r12, {r1, r2, r3, r12} + str lr, [r0] + stmib r0, {r5, r6} + str r7, [r0, #12] + ldr r7, [sp] @ 4-byte Reload + adcs r1, r10, r1 + str r1, [r0, #16] + ldr r1, [sp, #4] @ 4-byte Reload + adcs r2, r7, r2 + str r2, [r0, #20] + ldr r2, [sp, #8] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp, #12] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r1, r1, r12 + str r1, [r0, #28] + adcs r1, r11, r4 + add r0, r0, #32 + adcs r2, r2, r8 + adcs r3, r3, r9 + stm r0, {r1, r2, r3} + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end165: + .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L + .cantunwind + .fnend + + .globl mcl_fp_subPre11L + .align 2 + .type mcl_fp_subPre11L,%function +mcl_fp_subPre11L: @ @mcl_fp_subPre11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldmib r2, {r8, r12, lr} + ldr r3, [r2, #16] + ldr r7, [r2] + ldr r6, [r1] + ldr r5, [r1, #4] + ldr r4, [r1, #8] + ldr r11, [r2, #32] + ldr r10, [r2, #40] + ldr r9, [r1, #36] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r2, #20] + subs r6, r6, r7 + ldr r7, [r2, #36] + sbcs r5, r5, r8 + ldr r8, [r1, #40] + sbcs r4, r4, r12 + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r2, #24] + str r7, [sp] @ 4-byte Spill + ldr r7, [r1, #32] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r1, #12] + sbcs r12, r3, lr + add lr, r1, #16 + ldm lr, {r1, r2, r3, lr} + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + ldr r4, [sp, #4] @ 4-byte Reload + ldr r6, [sp, #8] @ 4-byte Reload + str r12, [r0, #12] + sbcs r1, r1, r4 + str r1, [r0, #16] + ldr r1, [sp, #12] @ 4-byte Reload + sbcs r2, r2, r6 + str r2, [r0, #20] + ldr r2, [sp] @ 4-byte Reload + sbcs r1, r3, r1 + str r1, [r0, #24] + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r1, lr, r1 + str r1, [r0, #28] + sbcs r1, r7, r11 + add r0, r0, #32 + sbcs r2, r9, r2 + sbcs r3, r8, r10 + stm r0, {r1, r2, r3} + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end166: + .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L + .cantunwind + .fnend + + .globl mcl_fp_shr1_11L + .align 2 + .type mcl_fp_shr1_11L,%function +mcl_fp_shr1_11L: @ @mcl_fp_shr1_11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldmib r1, {r2, r3, r12, lr} + add r8, r1, #20 + add r11, r1, #32 + ldm r8, {r4, r5, r8} + ldr r7, [r1] + ldm r11, {r9, r10, r11} + lsrs r1, r12, #1 + lsr r6, r2, #1 + rrx r1, r3 + lsrs r2, r2, #1 + orr r6, r6, r3, lsl #31 + lsr r3, r11, #1 + rrx r2, r7 + stm r0, {r2, r6} + str r1, [r0, #8] + lsr r1, r12, #1 + lsr r2, r10, #1 + orr r1, r1, lr, lsl #31 + orr r2, r2, r11, lsl #31 + str r1, [r0, #12] + lsrs r1, r4, #1 + rrx r1, lr + str r1, [r0, #16] + lsr r1, r4, #1 + orr r1, r1, r5, lsl #31 + str r1, [r0, #20] + lsrs r1, r8, #1 + rrx r1, r5 + str r1, [r0, #24] + lsr r1, r8, #1 + orr r1, r1, r9, lsl #31 + str r1, [r0, #28] + lsrs r1, r10, #1 + add r0, r0, #32 + rrx r1, r9 + stm r0, {r1, r2, r3} + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end167: + .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L + .cantunwind + .fnend + + .globl mcl_fp_add11L + .align 2 + .type mcl_fp_add11L,%function +mcl_fp_add11L: @ @mcl_fp_add11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #32 + sub sp, sp, #32 + ldm r1, {r12, lr} + ldr r5, [r2] + ldr r8, [r1, #8] + ldr r9, [r1, #12] + ldmib r2, {r4, r6, r7} + adds r5, r5, r12 + ldr r12, [r1, #32] + adcs r4, r4, lr + str r5, [sp, #28] @ 4-byte Spill + ldr r5, [r1, #24] + ldr lr, [r1, #40] + adcs r6, r6, r8 + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [r1, #20] + adcs r7, r7, r9 + str r6, [sp, #12] @ 4-byte Spill + ldr r6, [r1, #16] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r2, #16] + adcs r9, r7, r6 + ldr r7, [r2, #20] + str r9, [sp] @ 4-byte Spill + adcs r7, r7, r4 + ldr r4, [r2, #24] + str r7, [sp, #4] @ 4-byte Spill + adcs r8, r4, r5 + ldr r4, [r1, #28] + ldr r5, [r2, #28] + adcs r6, r5, r4 + ldr r5, [r2, #32] + ldr r4, [r1, #36] + ldr r1, [r2, #36] + ldr r2, [r2, #40] + adcs r10, r5, r12 + ldr r12, [sp, #24] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #8] @ 4-byte Reload + adcs r11, r2, lr + ldr r2, [sp, #28] @ 4-byte Reload + ldr lr, [sp, #12] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + str r2, [r0] + str r12, [r0, #4] + str lr, [r0, #8] + str r4, [r0, #12] + str r9, [r0, #16] + str r7, [r0, #20] + str r8, [r0, #24] + str r6, [r0, #28] + str r10, [r0, #32] + str r1, [r0, #36] + mov r1, #0 + str r11, [r0, #40] + mov r9, r6 + adc r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + ldm r3, {r1, r7} + ldr r5, [r3, #8] + ldr r6, [r3, #12] + subs r1, r2, r1 + ldr r2, [sp] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + sbcs r1, r12, r7 + str r1, [sp, #24] @ 4-byte Spill + sbcs r1, lr, r5 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r5, r4, r6 + sbcs r7, r2, r1 + ldr r1, [r3, #20] + ldr r2, [sp, #4] @ 4-byte Reload + sbcs r4, r2, r1 + ldr r1, [r3, #24] + sbcs r12, r8, r1 + ldr r1, [r3, #28] + add r3, r3, #32 + sbcs lr, r9, r1 + ldm r3, {r1, r2, r3} + ldr r6, [sp, #20] @ 4-byte Reload + sbcs r1, r10, r1 + sbcs r2, r6, r2 + ldr r6, [sp, #16] @ 4-byte Reload + sbcs r3, r11, r3 + sbc r6, r6, #0 + tst r6, #1 + bne .LBB168_2 +@ BB#1: @ %nocarry + ldr r6, [sp, #28] @ 4-byte Reload + str r6, [r0] + ldr r6, [sp, #24] @ 4-byte Reload + str r6, [r0, #4] + ldr r6, [sp, #12] @ 4-byte Reload + str r6, [r0, #8] + str r5, [r0, #12] + str r7, [r0, #16] + str r4, [r0, #20] + str r12, [r0, #24] + str lr, [r0, #28] + add r0, r0, #32 + stm r0, {r1, r2, r3} +.LBB168_2: @ %carry + add sp, sp, #32 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end168: + .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L + .cantunwind + .fnend + + .globl mcl_fp_addNF11L + .align 2 + .type mcl_fp_addNF11L,%function +mcl_fp_addNF11L: @ @mcl_fp_addNF11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + ldm r1, {r5, r8, lr} + ldr r6, [r2] + ldr r12, [r1, #12] + ldmib r2, {r4, r7, r9} + ldr r11, [r1, #24] + adds r10, r6, r5 + adcs r4, r4, r8 + ldr r8, [r1, #20] + adcs r7, r7, lr + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #16] + ldr lr, [r1, #36] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r1, #16] + adcs r6, r9, r12 + ldr r12, [r2, #36] + str r6, [sp, #16] @ 4-byte Spill + adcs r7, r4, r7 + ldr r4, [r2, #28] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r8 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r8, r7, r11 + ldr r7, [r1, #28] + ldr r11, [r1, #40] + str r8, [sp, #20] @ 4-byte Spill + adcs r7, r4, r7 + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r1, #32] + ldr r1, [r2, #32] + ldr r2, [r2, #40] + adcs r4, r1, r7 + adcs r1, r12, lr + str r4, [sp, #24] @ 4-byte Spill + str r1, [sp, #48] @ 4-byte Spill + adc r9, r2, r11 + ldmib r3, {r1, r2, lr} + ldr r5, [r3, #20] + ldr r11, [r3] + ldr r7, [r3, #16] + ldr r12, [r3, #24] + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [r3, #28] + subs r11, r10, r11 + str r5, [sp, #28] @ 4-byte Spill + ldr r5, [sp, #32] @ 4-byte Reload + sbcs r1, r5, r1 + ldr r5, [sp, #40] @ 4-byte Reload + sbcs r2, r5, r2 + ldr r5, [r3, #32] + sbcs lr, r6, lr + ldr r6, [sp, #36] @ 4-byte Reload + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [r3, #36] + ldr r3, [r3, #40] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [sp, #44] @ 4-byte Reload + str r5, [sp] @ 4-byte Spill + ldr r5, [sp, #12] @ 4-byte Reload + sbcs r7, r3, r7 + ldr r3, [sp, #52] @ 4-byte Reload + sbcs r3, r3, r5 + ldr r5, [sp, #28] @ 4-byte Reload + sbcs r12, r8, r12 + sbcs r8, r6, r5 + ldr r5, [sp, #8] @ 4-byte Reload + sbcs r4, r4, r5 + ldr r5, [sp] @ 4-byte Reload + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [sp, #48] @ 4-byte Reload + sbcs r4, r4, r5 + ldr r5, [sp, #32] @ 4-byte Reload + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + sbc r6, r9, r4 + asr r4, r6, #31 + cmp r4, #0 + movlt r11, r10 + movlt r1, r5 + str r11, [r0] + str r1, [r0, #4] + ldr r1, [sp, #40] @ 4-byte Reload + movlt r2, r1 + ldr r1, [sp, #16] @ 4-byte Reload + cmp r4, #0 + str r2, [r0, #8] + ldr r2, [sp, #28] @ 4-byte Reload + movlt lr, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str lr, [r0, #12] + movlt r7, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r7, [r0, #16] + movlt r3, r1 + ldr r1, [sp, #20] @ 4-byte Reload + cmp r4, #0 + str r3, [r0, #20] + ldr r3, [sp, #12] @ 4-byte Reload + movlt r12, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r12, [r0, #24] + movlt r8, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r8, [r0, #28] + movlt r3, r1 + ldr r1, [sp, #48] @ 4-byte Reload + cmp r4, #0 + movlt r6, r9 + str r3, [r0, #32] + movlt r2, r1 + str r2, [r0, #36] + str r6, [r0, #40] + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end169: + .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L + .cantunwind + .fnend + + .globl mcl_fp_sub11L + .align 2 + .type mcl_fp_sub11L,%function +mcl_fp_sub11L: @ @mcl_fp_sub11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #48 + sub sp, sp, #48 + mov r10, r3 + ldr r12, [r2] + ldr r9, [r2, #4] + ldr r8, [r2, #8] + ldr r3, [r2, #12] + ldm r1, {r4, r5, r6, r7} + subs r4, r4, r12 + sbcs r5, r5, r9 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #24] + sbcs r6, r6, r8 + str r5, [sp, #44] @ 4-byte Spill + ldr r5, [r2, #20] + add r8, r1, #32 + sbcs r12, r7, r3 + str r6, [sp, #40] @ 4-byte Spill + ldr r6, [r2, #16] + ldr r7, [r1, #16] + ldr r3, [sp, #36] @ 4-byte Reload + str r12, [sp, #24] @ 4-byte Spill + sbcs r11, r7, r6 + ldr r6, [r1, #20] + ldr r7, [r2, #40] + sbcs r9, r6, r5 + ldr r5, [r1, #24] + sbcs r6, r5, r4 + ldr r4, [r2, #28] + ldr r5, [r1, #28] + str r6, [sp, #28] @ 4-byte Spill + sbcs lr, r5, r4 + ldr r4, [r2, #36] + ldr r5, [r2, #32] + str lr, [sp, #20] @ 4-byte Spill + str r4, [sp, #32] @ 4-byte Spill + ldm r8, {r2, r4, r8} + str r3, [r0] + sbcs r1, r2, r5 + ldr r2, [sp, #32] @ 4-byte Reload + sbcs r2, r4, r2 + mov r4, r3 + ldr r3, [sp, #44] @ 4-byte Reload + sbcs r8, r8, r7 + mov r7, #0 + sbc r7, r7, #0 + tst r7, #1 + str r3, [r0, #4] + ldr r3, [sp, #40] @ 4-byte Reload + str r3, [r0, #8] + add r3, r0, #32 + str r12, [r0, #12] + str r11, [r0, #16] + str r9, [r0, #20] + str r6, [r0, #24] + str lr, [r0, #28] + stm r3, {r1, r2, r8} + beq .LBB170_2 +@ BB#1: @ %carry + ldr r3, [r10, #32] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r10, #36] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r10, #40] + str r3, [sp, #32] @ 4-byte Spill + ldmib r10, {r5, lr} + ldr r3, [r10, #20] + ldr r6, [sp, #44] @ 4-byte Reload + ldr r7, [r10, #12] + ldr r12, [r10, #16] + str r3, [sp] @ 4-byte Spill + ldr r3, [r10, #24] + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r10, #28] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r10] + adds r3, r3, r4 + ldr r4, [sp, #40] @ 4-byte Reload + adcs r5, r5, r6 + stm r0, {r3, r5} + ldr r3, [sp, #24] @ 4-byte Reload + adcs r4, lr, r4 + str r4, [r0, #8] + adcs r3, r7, r3 + ldr r7, [sp, #4] @ 4-byte Reload + str r3, [r0, #12] + adcs r3, r12, r11 + str r3, [r0, #16] + ldr r3, [sp] @ 4-byte Reload + adcs r3, r3, r9 + str r3, [r0, #20] + ldr r3, [sp, #28] @ 4-byte Reload + adcs r3, r7, r3 + ldr r7, [sp, #8] @ 4-byte Reload + str r3, [r0, #24] + ldr r3, [sp, #20] @ 4-byte Reload + adcs r3, r7, r3 + str r3, [r0, #28] + ldr r3, [sp, #12] @ 4-byte Reload + add r0, r0, #32 + adcs r1, r3, r1 + ldr r3, [sp, #16] @ 4-byte Reload + adcs r2, r3, r2 + ldr r3, [sp, #32] @ 4-byte Reload + adc r3, r3, r8 + stm r0, {r1, r2, r3} +.LBB170_2: @ %nocarry + add sp, sp, #48 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end170: + .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L + .cantunwind + .fnend + + .globl mcl_fp_subNF11L + .align 2 + .type mcl_fp_subNF11L,%function +mcl_fp_subNF11L: @ @mcl_fp_subNF11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + mov r12, r0 + ldr r0, [r2, #32] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #44] @ 4-byte Spill + ldm r2, {r8, r10} + ldr r0, [r2, #8] + ldr r5, [r2, #16] + ldr r11, [r2, #20] + ldr lr, [r1, #16] + ldr r6, [r1, #20] + ldr r9, [r1, #24] + ldr r7, [r1, #28] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r2, #12] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r2, #24] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r2, #28] + ldr r2, [r1, #8] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #12] + ldm r1, {r1, r4} + subs r1, r1, r8 + sbcs r8, r4, r10 + ldr r4, [sp, #32] @ 4-byte Reload + str r8, [sp, #16] @ 4-byte Spill + sbcs r2, r2, r4 + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r4, r0, r2 + ldr r0, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r5, lr, r5 + ldr lr, [r3, #12] + str r4, [sp, #20] @ 4-byte Spill + sbcs r11, r6, r11 + mov r6, r1 + str r5, [sp, #28] @ 4-byte Spill + str r11, [sp, #32] @ 4-byte Spill + sbcs r0, r9, r0 + ldr r9, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r2, r0 + ldr r2, [sp, #40] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + sbcs r10, r2, r0 + ldr r2, [sp, #56] @ 4-byte Reload + str r10, [sp, #48] @ 4-byte Spill + sbc r0, r7, r2 + ldr r2, [r3, #36] + ldr r7, [r3, #4] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r3, #32] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r3, #40] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #16] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r3, #8] + str r0, [sp] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r3, #28] + ldr r3, [r3] + adds r1, r6, r3 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp] @ 4-byte Reload + ldr r3, [sp, #8] @ 4-byte Reload + adcs r7, r8, r7 + adcs r2, r9, r2 + adcs lr, r4, lr + adcs r4, r5, r0 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r5, r11, r0 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r8, r0, r3 + ldr r3, [sp, #64] @ 4-byte Reload + ldr r0, [sp, #12] @ 4-byte Reload + adcs r11, r3, r0 + ldr r3, [sp, #60] @ 4-byte Reload + ldr r0, [sp, #40] @ 4-byte Reload + adcs r3, r3, r0 + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [sp, #44] @ 4-byte Reload + adcs r0, r10, r3 + ldr r3, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r10, r0, r3 + asr r3, r0, #31 + ldr r0, [sp, #16] @ 4-byte Reload + cmp r3, #0 + movge r1, r6 + movge r2, r9 + str r1, [r12] + ldr r1, [sp, #60] @ 4-byte Reload + movge r7, r0 + ldr r0, [sp, #20] @ 4-byte Reload + cmp r3, #0 + str r7, [r12, #4] + str r2, [r12, #8] + ldr r2, [sp, #48] @ 4-byte Reload + movge lr, r0 + ldr r0, [sp, #28] @ 4-byte Reload + str lr, [r12, #12] + movge r4, r0 + ldr r0, [sp, #32] @ 4-byte Reload + str r4, [r12, #16] + movge r5, r0 + ldr r0, [sp, #52] @ 4-byte Reload + cmp r3, #0 + str r5, [r12, #20] + movge r8, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r8, [r12, #24] + movge r11, r0 + ldr r0, [sp, #40] @ 4-byte Reload + movge r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + cmp r3, #0 + str r11, [r12, #28] + movge r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + movge r10, r2 + add r2, r12, #32 + stm r2, {r0, r1, r10} + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end171: + .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L + .cantunwind + .fnend + + .globl mcl_fpDbl_add11L + .align 2 + .type mcl_fpDbl_add11L,%function +mcl_fpDbl_add11L: @ @mcl_fpDbl_add11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #120 + sub sp, sp, #120 + ldm r1, {r7, r12, lr} + ldr r8, [r1, #12] + ldm r2, {r4, r5, r6, r9} + ldr r10, [r2, #20] + adds r4, r4, r7 + adcs r7, r5, r12 + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [r2, #64] + str r7, [sp, #28] @ 4-byte Spill + adcs r7, r6, lr + add lr, r1, #16 + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r9, r8 + add r8, r1, #32 + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #32] + str r4, [sp, #108] @ 4-byte Spill + ldr r4, [r2, #68] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #36] + str r4, [sp, #104] @ 4-byte Spill + ldr r4, [r2, #72] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #40] + str r4, [sp, #96] @ 4-byte Spill + ldr r4, [r2, #76] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #44] + str r4, [sp, #116] @ 4-byte Spill + ldr r4, [r2, #80] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #48] + str r4, [sp, #100] @ 4-byte Spill + ldr r4, [r2, #84] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #56] + str r4, [sp, #112] @ 4-byte Spill + ldr r4, [r2, #16] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r1, #64] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #68] + str r7, [sp, #16] @ 4-byte Spill + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #64] @ 4-byte Spill + ldm r8, {r5, r6, r8} + ldr r2, [r1, #44] + ldr r11, [r1, #52] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #12] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r9, [sp, #40] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + adcs r1, r4, r1 + str r9, [r0] + str r7, [r0, #4] + ldr r7, [sp, #24] @ 4-byte Reload + ldr r4, [sp, #32] @ 4-byte Reload + adcs r2, r10, r2 + add r10, r3, #32 + str r7, [r0, #8] + str r4, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + adcs r1, r1, r12 + str r1, [r0, #24] + adcs r2, r2, lr + ldr r1, [sp, #68] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #72] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [r0, #32] + adcs r2, r2, r6 + ldr r1, [sp, #76] @ 4-byte Reload + str r2, [r0, #36] + ldr r2, [sp, #84] @ 4-byte Reload + adcs r1, r1, r8 + adcs r6, r2, r7 + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + ldr r7, [sp, #8] @ 4-byte Reload + str r6, [sp, #72] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #80] @ 4-byte Reload + str r4, [sp, #76] @ 4-byte Spill + adcs r2, r1, r11 + ldr r1, [sp, #92] @ 4-byte Reload + str r2, [sp, #80] @ 4-byte Spill + adcs r5, r1, r7 + ldr r1, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #12] @ 4-byte Reload + str r5, [sp, #92] @ 4-byte Spill + adcs r8, r1, r7 + ldr r1, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + str r8, [sp, #84] @ 4-byte Spill + adcs r1, r1, r7 + ldr r7, [sp, #48] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #52] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r12, r1, r7 + ldr r1, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #56] @ 4-byte Reload + str r12, [sp, #96] @ 4-byte Spill + adcs r1, r1, r7 + ldr r7, [sp, #60] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #64] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #112] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #88] @ 4-byte Spill + ldmib r3, {r1, r9, lr} + ldr r7, [r3, #16] + ldr r11, [r3] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r3, #20] + subs r11, r6, r11 + sbcs r1, r4, r1 + sbcs r4, r2, r9 + sbcs r2, r5, lr + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r3, #24] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r3, #28] + str r7, [sp, #68] @ 4-byte Spill + ldm r10, {r5, r9, r10} + ldr r3, [sp, #56] @ 4-byte Reload + ldr r6, [sp, #60] @ 4-byte Reload + sbcs r7, r8, r3 + ldr r3, [sp, #108] @ 4-byte Reload + sbcs r8, r3, r6 + ldr r3, [sp, #104] @ 4-byte Reload + ldr r6, [sp, #64] @ 4-byte Reload + sbcs r3, r3, r6 + ldr r6, [sp, #68] @ 4-byte Reload + sbcs r12, r12, r6 + ldr r6, [sp, #116] @ 4-byte Reload + sbcs lr, r6, r5 + ldr r5, [sp, #100] @ 4-byte Reload + ldr r6, [sp, #112] @ 4-byte Reload + sbcs r9, r5, r9 + ldr r5, [sp, #72] @ 4-byte Reload + sbcs r10, r6, r10 + ldr r6, [sp, #88] @ 4-byte Reload + sbc r6, r6, #0 + ands r6, r6, #1 + movne r11, r5 + ldr r5, [sp, #76] @ 4-byte Reload + str r11, [r0, #44] + movne r1, r5 + str r1, [r0, #48] + ldr r1, [sp, #80] @ 4-byte Reload + movne r4, r1 + ldr r1, [sp, #92] @ 4-byte Reload + cmp r6, #0 + str r4, [r0, #52] + movne r2, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r2, [r0, #56] + movne r7, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r7, [r0, #60] + movne r8, r1 + ldr r1, [sp, #104] @ 4-byte Reload + cmp r6, #0 + str r8, [r0, #64] + movne r3, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r3, [r0, #68] + movne r12, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r12, [r0, #72] + movne lr, r1 + ldr r1, [sp, #100] @ 4-byte Reload + cmp r6, #0 + str lr, [r0, #76] + movne r9, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r9, [r0, #80] + movne r10, r1 + str r10, [r0, #84] + add sp, sp, #120 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end172: + .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub11L + .align 2 + .type mcl_fpDbl_sub11L,%function +mcl_fpDbl_sub11L: @ @mcl_fpDbl_sub11L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #120 + sub sp, sp, #120 + ldr r7, [r2, #64] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2] + ldmib r2, {r4, r8, r10} + ldm r1, {r5, r6, r12, lr} + ldr r9, [r2, #20] + subs r5, r5, r7 + ldr r7, [r2, #24] + sbcs r4, r6, r4 + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [r2, #32] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #28] + sbcs r8, r12, r8 + str r7, [sp, #32] @ 4-byte Spill + sbcs r7, lr, r10 + add r10, r1, #32 + add lr, r1, #16 + str r5, [sp, #40] @ 4-byte Spill + str r7, [sp] @ 4-byte Spill + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #16] + ldr r2, [r1, #64] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #64] @ 4-byte Spill + ldm r10, {r5, r6, r10} + ldr r2, [r1, #44] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #16] @ 4-byte Reload + ldr r7, [sp, #8] @ 4-byte Reload + str r11, [r0] + stmib r0, {r7, r8} + sbcs r1, r1, r4 + mov r8, #0 + ldr r4, [sp] @ 4-byte Reload + sbcs r2, r2, r9 + ldr r7, [sp, #4] @ 4-byte Reload + str r4, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #32] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + ldr r2, [sp, #68] @ 4-byte Reload + sbcs r1, r5, r1 + str r1, [r0, #32] + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r2, r6, r2 + str r2, [r0, #36] + ldr r2, [sp, #12] @ 4-byte Reload + sbcs r1, r10, r1 + str r1, [r0, #40] + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r4, r2, r1 + ldr r1, [sp, #80] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + sbcs r2, r2, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r2, [sp, #68] @ 4-byte Spill + sbcs r9, r7, r1 + ldr r1, [sp, #88] @ 4-byte Reload + ldr r7, [sp, #24] @ 4-byte Reload + sbcs r12, r7, r1 + ldr r1, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + str r12, [sp, #80] @ 4-byte Spill + sbcs lr, r7, r1 + ldr r1, [sp, #96] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + str lr, [sp, #84] @ 4-byte Spill + sbcs r5, r7, r1 + ldr r1, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #48] @ 4-byte Reload + str r5, [sp, #96] @ 4-byte Spill + sbcs r6, r7, r1 + ldr r1, [sp, #104] @ 4-byte Reload + ldr r7, [sp, #52] @ 4-byte Reload + str r6, [sp, #100] @ 4-byte Spill + sbcs r11, r7, r1 + ldr r1, [sp, #112] @ 4-byte Reload + ldr r7, [sp, #56] @ 4-byte Reload + str r11, [sp, #104] @ 4-byte Spill + sbcs r1, r7, r1 + ldr r7, [sp, #60] @ 4-byte Reload + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + sbcs r10, r7, r1 + ldr r1, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #64] @ 4-byte Reload + str r10, [sp, #108] @ 4-byte Spill + sbcs r1, r7, r1 + ldr r7, [r3, #4] + str r1, [sp, #116] @ 4-byte Spill + sbc r1, r8, #0 + ldr r8, [r3, #28] + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [r3, #8] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [r3, #12] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [r3, #16] + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [r3, #20] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [r3, #24] + ldr r3, [r3] + str r1, [sp, #64] @ 4-byte Spill + adds r1, r4, r3 + ldr r3, [sp, #48] @ 4-byte Reload + ldr r4, [sp, #56] @ 4-byte Reload + adcs r7, r2, r7 + ldr r2, [sp, #52] @ 4-byte Reload + adcs r2, r9, r2 + adcs r3, r12, r3 + adcs r12, lr, r4 + ldr r4, [sp, #60] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #64] @ 4-byte Reload + adcs lr, r6, r5 + ldr r6, [sp, #112] @ 4-byte Reload + ldr r5, [sp, #72] @ 4-byte Reload + adcs r8, r11, r8 + adcs r11, r6, r5 + ldr r6, [sp, #76] @ 4-byte Reload + ldr r5, [sp, #116] @ 4-byte Reload + adcs r10, r10, r6 + ldr r6, [sp, #88] @ 4-byte Reload + adc r6, r5, r6 + str r6, [sp, #88] @ 4-byte Spill + ldr r6, [sp, #92] @ 4-byte Reload + ands r5, r6, #1 + ldr r6, [sp, #40] @ 4-byte Reload + moveq r2, r9 + moveq r1, r6 + str r1, [r0, #44] + ldr r1, [sp, #68] @ 4-byte Reload + moveq r7, r1 + ldr r1, [sp, #80] @ 4-byte Reload + cmp r5, #0 + str r7, [r0, #48] + str r2, [r0, #52] + ldr r2, [sp, #88] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r3, [r0, #56] + moveq r12, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r12, [r0, #60] + moveq r4, r1 + ldr r1, [sp, #100] @ 4-byte Reload + cmp r5, #0 + str r4, [r0, #64] + moveq lr, r1 + ldr r1, [sp, #104] @ 4-byte Reload + str lr, [r0, #68] + moveq r8, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r8, [r0, #72] + moveq r11, r1 + ldr r1, [sp, #108] @ 4-byte Reload + cmp r5, #0 + str r11, [r0, #76] + moveq r10, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r10, [r0, #80] + moveq r2, r1 + str r2, [r0, #84] + add sp, sp, #120 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end173: + .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L + .cantunwind + .fnend + + .align 2 + .type .LmulPv384x32,%function +.LmulPv384x32: @ @mulPv384x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r1, [r1, #44] + umull r3, r7, r1, r2 + adcs r1, r6, r3 + str r1, [r0, #44] + adc r1, r7, #0 + str r1, [r0, #48] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end174: + .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre12L + .align 2 + .type mcl_fp_mulUnitPre12L,%function +mcl_fp_mulUnitPre12L: @ @mcl_fp_mulUnitPre12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + mov r4, r0 + mov r0, sp + bl .LmulPv384x32(PLT) + ldr r12, [sp, #48] + ldr lr, [sp, #44] + ldr r8, [sp, #40] + ldr r9, [sp, #36] + ldr r10, [sp, #32] + ldr r11, [sp, #28] + ldr r5, [sp, #24] + ldr r6, [sp, #20] + ldm sp, {r2, r3} + add r7, sp, #8 + ldm r7, {r0, r1, r7} + stm r4, {r2, r3} + add r2, r4, #8 + stm r2, {r0, r1, r7} + str r6, [r4, #20] + str r5, [r4, #24] + str r11, [r4, #28] + str r10, [r4, #32] + str r9, [r4, #36] + str r8, [r4, #40] + str lr, [r4, #44] + str r12, [r4, #48] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end175: + .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre12L + .align 2 + .type mcl_fpDbl_mulPre12L,%function +mcl_fpDbl_mulPre12L: @ @mcl_fpDbl_mulPre12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #196 + sub sp, sp, #196 + mov r6, r2 + mov r5, r1 + mov r4, r0 + bl mcl_fpDbl_mulPre6L(PLT) + add r0, r4, #48 + add r1, r5, #24 + add r2, r6, #24 + bl mcl_fpDbl_mulPre6L(PLT) + add lr, r6, #24 + ldr r8, [r6, #40] + ldr r9, [r6, #44] + ldr r2, [r6, #16] + ldr r3, [r6, #20] + ldm lr, {r0, r1, r12, lr} + ldm r6, {r6, r7, r10, r11} + adds r0, r6, r0 + adcs r1, r7, r1 + str r0, [sp, #80] @ 4-byte Spill + adcs r12, r10, r12 + str r1, [sp, #72] @ 4-byte Spill + ldr r10, [r5, #36] + adcs r0, r11, lr + add lr, r5, #8 + str r12, [sp, #68] @ 4-byte Spill + str r0, [sp, #92] @ 4-byte Spill + adcs r0, r2, r8 + str r0, [sp, #88] @ 4-byte Spill + adcs r0, r3, r9 + ldr r9, [r5, #32] + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + adc r6, r0, #0 + ldr r0, [r5, #40] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r5, #44] + str r0, [sp, #76] @ 4-byte Spill + ldm lr, {r3, r11, lr} + ldr r8, [r5, #20] + ldr r0, [r5, #24] + ldr r2, [r5, #28] + ldm r5, {r5, r7} + adds r0, r5, r0 + ldr r5, [sp, #80] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + str r0, [sp, #124] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r7, r7, r2 + add r2, sp, #100 + adcs r9, r3, r9 + str r7, [sp, #128] + adcs r11, r11, r10 + str r9, [sp, #132] + str r5, [sp, #100] + str r1, [sp, #104] + str r12, [sp, #108] + add r1, sp, #124 + str r11, [sp, #136] + adcs r10, lr, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r10, [sp, #140] + adcs r8, r8, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r8, [sp, #144] + str r0, [sp, #112] + ldr r0, [sp, #88] @ 4-byte Reload + str r0, [sp, #116] + ldr r0, [sp, #84] @ 4-byte Reload + str r0, [sp, #120] + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + add r0, sp, #148 + bl mcl_fpDbl_mulPre6L(PLT) + cmp r6, #0 + ldr r0, [sp, #96] @ 4-byte Reload + ldr r3, [sp, #92] @ 4-byte Reload + moveq r8, r6 + moveq r10, r6 + moveq r11, r6 + moveq r9, r6 + moveq r7, r6 + cmp r6, #0 + moveq r0, r6 + adds r2, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + ldr r5, [sp, #88] @ 4-byte Reload + adcs r1, r7, r0 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r12, r9, r0 + adcs r3, r11, r3 + adcs lr, r10, r5 + ldr r5, [sp, #84] @ 4-byte Reload + adcs r0, r8, r5 + str r0, [sp, #92] @ 4-byte Spill + mov r0, #0 + adc r5, r0, #0 + ldr r0, [sp, #76] @ 4-byte Reload + cmp r0, #0 + and r6, r6, r0 + moveq r1, r7 + ldr r7, [sp, #96] @ 4-byte Reload + moveq r12, r9 + ldr r9, [sp, #92] @ 4-byte Reload + moveq lr, r10 + moveq r3, r11 + moveq r2, r7 + ldr r7, [sp, #172] + cmp r0, #0 + moveq r9, r8 + moveq r5, r0 + adds r8, r2, r7 + ldr r7, [sp, #176] + adcs r10, r1, r7 + ldr r7, [sp, #180] + adcs r0, r12, r7 + ldr r7, [sp, #184] + str r0, [sp, #96] @ 4-byte Spill + adcs r0, r3, r7 + ldr r7, [sp, #188] + str r0, [sp, #92] @ 4-byte Spill + adcs r0, lr, r7 + ldr r7, [sp, #192] + str r0, [sp, #84] @ 4-byte Spill + adcs r0, r9, r7 + ldr r7, [r4] + str r0, [sp, #80] @ 4-byte Spill + adc r0, r5, r6 + str r0, [sp, #76] @ 4-byte Spill + ldmib r4, {r6, r9, lr} + ldr r0, [sp, #148] + ldr r5, [sp, #152] + ldr r1, [sp, #156] + ldr r2, [sp, #160] + ldr r11, [r4, #24] + subs r3, r0, r7 + ldr r0, [r4, #16] + sbcs r12, r5, r6 + ldr r5, [r4, #68] + sbcs r6, r1, r9 + ldr r1, [sp, #164] + ldr r9, [r4, #32] + sbcs r2, r2, lr + ldr lr, [r4, #72] + str r5, [sp, #56] @ 4-byte Spill + sbcs r7, r1, r0 + ldr r0, [r4, #20] + ldr r1, [sp, #168] + sbcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + sbcs r0, r8, r11 + ldr r8, [r4, #28] + str r0, [sp, #60] @ 4-byte Spill + sbcs r0, r10, r8 + ldr r10, [r4, #52] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + sbcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r4, #36] + str r0, [sp, #96] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r4, #40] + str r0, [sp, #88] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r4, #44] + str r0, [sp, #92] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [r4, #92] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + sbc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r4, #48] + str r0, [sp, #80] @ 4-byte Spill + subs r0, r3, r0 + ldr r3, [r4, #80] + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, r12, r10 + ldr r12, [r4, #76] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r4, #56] + str r0, [sp, #76] @ 4-byte Spill + sbcs r0, r6, r0 + ldr r6, [r4, #64] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r4, #60] + str r6, [sp, #44] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + sbcs r0, r2, r0 + ldr r2, [r4, #84] + sbcs r7, r7, r6 + ldr r6, [sp, #64] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r4, #88] + str r2, [sp, #68] @ 4-byte Spill + sbcs r6, r6, r5 + ldr r5, [sp, #60] @ 4-byte Reload + sbcs r5, r5, lr + str r5, [sp] @ 4-byte Spill + ldr r5, [sp, #52] @ 4-byte Reload + sbcs r5, r5, r12 + str r5, [sp, #4] @ 4-byte Spill + ldr r5, [sp, #48] @ 4-byte Reload + sbcs r5, r5, r3 + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [sp, #40] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r2, r2, r0 + str r2, [sp, #52] @ 4-byte Spill + mov r2, r0 + ldr r0, [sp, #32] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adds r11, r11, r0 + ldr r0, [sp, #20] @ 4-byte Reload + str r11, [r4, #24] + adcs r8, r8, r0 + ldr r0, [sp, #16] @ 4-byte Reload + str r8, [r4, #28] + adcs r9, r9, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r9, [r4, #32] + adcs r5, r0, r1 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r1, [sp] @ 4-byte Reload + str r5, [r4, #36] + ldr r5, [sp, #8] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #92] @ 4-byte Reload + str r7, [r4, #40] + adcs r6, r0, r6 + ldr r0, [sp, #80] @ 4-byte Reload + str r6, [r4, #44] + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [r4, #48] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r1, r10, r1 + adcs r0, r0, r5 + str r1, [r4, #52] + ldr r1, [sp, #72] @ 4-byte Reload + ldr r5, [sp, #48] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #44] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #52] @ 4-byte Reload + str r1, [r4, #60] + ldr r1, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [r4, #64] + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [r4, #68] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [r4, #72] + adcs r0, r12, #0 + str r0, [r4, #76] + adcs r0, r3, #0 + str r0, [r4, #80] + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [r4, #84] + adcs r0, r2, #0 + adc r1, r1, #0 + str r0, [r4, #88] + str r1, [r4, #92] + add sp, sp, #196 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end176: + .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre12L + .align 2 + .type mcl_fpDbl_sqrPre12L,%function +mcl_fpDbl_sqrPre12L: @ @mcl_fpDbl_sqrPre12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #204 + sub sp, sp, #204 + mov r5, r1 + mov r4, r0 + mov r2, r5 + bl mcl_fpDbl_mulPre6L(PLT) + add r1, r5, #24 + add r0, r4, #48 + mov r2, r1 + bl mcl_fpDbl_mulPre6L(PLT) + ldr r10, [r5, #32] + ldr r9, [r5, #36] + ldr lr, [r5, #40] + ldr r12, [r5, #44] + ldr r3, [r5, #8] + ldr r2, [r5, #12] + ldr r1, [r5, #16] + ldr r11, [r5, #20] + ldr r6, [r5, #24] + ldr r0, [r5, #28] + ldm r5, {r5, r7} + adds r8, r5, r6 + adcs r6, r7, r0 + mov r0, #0 + str r8, [sp, #132] + str r8, [sp, #108] + adcs r10, r3, r10 + str r6, [sp, #136] + str r6, [sp, #112] + adcs r5, r2, r9 + add r2, sp, #108 + str r10, [sp, #140] + str r10, [sp, #116] + adcs r9, r1, lr + add r1, sp, #132 + str r5, [sp, #144] + str r5, [sp, #120] + adcs r7, r11, r12 + str r9, [sp, #148] + str r9, [sp, #124] + adc r11, r0, #0 + add r0, sp, #156 + str r7, [sp, #152] + str r7, [sp, #128] + bl mcl_fpDbl_mulPre6L(PLT) + adds r0, r9, r9 + ldr lr, [sp, #192] + ldr r12, [sp, #196] + ldr r9, [sp, #200] + orr r0, r0, r5, lsr #31 + str r0, [sp, #104] @ 4-byte Spill + adc r0, r7, r7 + str r0, [sp, #100] @ 4-byte Spill + adds r0, r10, r10 + ldr r10, [sp, #180] + adc r1, r5, r5 + orr r0, r0, r6, lsr #31 + str r1, [sp, #92] @ 4-byte Spill + adds r1, r8, r8 + ldr r8, [sp, #184] + adc r5, r6, r6 + ldr r6, [sp, #188] + adds r1, r10, r1 + str r1, [sp, #96] @ 4-byte Spill + adcs r3, r8, r5 + ldr r5, [sp, #100] @ 4-byte Reload + adcs r2, r6, r0 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r1, lr, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r12, r0 + adcs r5, r9, r5 + adc r7, r11, r7, lsr #31 + cmp r11, #0 + moveq r3, r8 + moveq r2, r6 + moveq r5, r9 + moveq r0, r12 + moveq r1, lr + cmp r11, #0 + ldr r6, [sp, #96] @ 4-byte Reload + mov r8, r3 + add r3, sp, #156 + str r0, [sp, #104] @ 4-byte Spill + str r1, [sp, #100] @ 4-byte Spill + str r2, [sp, #88] @ 4-byte Spill + mov r9, r5 + ldm r4, {r12, lr} + moveq r7, r11 + ldr r11, [r4, #8] + ldr r5, [r4, #12] + moveq r6, r10 + ldm r3, {r0, r1, r2, r3} + ldr r10, [r4, #64] + subs r12, r0, r12 + ldr r0, [r4, #16] + sbcs lr, r1, lr + ldr r1, [sp, #172] + sbcs r2, r2, r11 + ldr r11, [r4, #48] + sbcs r3, r3, r5 + ldr r5, [r4, #68] + sbcs r0, r1, r0 + ldr r1, [sp, #176] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r4, #20] + str r5, [sp, #60] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r4, #24] + str r0, [sp, #96] @ 4-byte Spill + sbcs r0, r6, r0 + ldr r6, [sp, #76] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r4, #28] + str r0, [sp, #72] @ 4-byte Spill + sbcs r0, r8, r0 + ldr r8, [r4, #56] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r4, #32] + str r0, [sp, #92] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r4, #36] + str r0, [sp, #88] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [r4, #40] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + sbcs r0, r0, r1 + ldr r1, [r4, #92] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r4, #44] + str r1, [sp, #84] @ 4-byte Spill + str r0, [sp, #104] @ 4-byte Spill + sbcs r0, r9, r0 + ldr r9, [r4, #60] + str r0, [sp, #40] @ 4-byte Spill + sbc r0, r7, #0 + ldr r7, [r4, #52] + str r0, [sp, #36] @ 4-byte Spill + subs r0, r12, r11 + ldr r12, [r4, #76] + str r0, [sp, #32] @ 4-byte Spill + sbcs r0, lr, r7 + ldr lr, [r4, #72] + str r0, [sp, #28] @ 4-byte Spill + sbcs r0, r2, r8 + ldr r2, [r4, #84] + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, r3, r9 + ldr r3, [r4, #80] + sbcs r6, r6, r10 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r4, #88] + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [sp, #68] @ 4-byte Reload + str r2, [sp, #80] @ 4-byte Spill + sbcs r5, r6, r5 + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [sp, #64] @ 4-byte Reload + sbcs r5, r5, lr + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [sp, #56] @ 4-byte Reload + sbcs r5, r5, r12 + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [sp, #52] @ 4-byte Reload + sbcs r5, r5, r3 + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [sp, #48] @ 4-byte Reload + sbcs r2, r5, r2 + ldr r5, [sp, #28] @ 4-byte Reload + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [sp, #44] @ 4-byte Reload + sbcs r2, r2, r0 + str r2, [sp, #64] @ 4-byte Spill + mov r2, r0 + ldr r0, [sp, #40] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [r4, #24] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r6, r1, r5 + ldr r1, [sp, #24] @ 4-byte Reload + ldr r5, [sp, #20] @ 4-byte Reload + str r6, [r4, #28] + adcs r0, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [r4, #32] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r6, r1, r5 + ldr r1, [sp, #4] @ 4-byte Reload + ldr r5, [sp, #8] @ 4-byte Reload + str r6, [r4, #36] + adcs r0, r0, r1 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [r4, #40] + ldr r0, [sp, #12] @ 4-byte Reload + adcs r5, r1, r5 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r11, r0 + str r5, [r4, #44] + str r0, [r4, #48] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r1, r7, r1 + str r1, [r4, #52] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [r4, #56] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r1, r9, r1 + str r1, [r4, #60] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [r4, #64] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [r4, #68] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [r4, #72] + adcs r0, r12, #0 + str r0, [r4, #76] + adcs r0, r3, #0 + str r0, [r4, #80] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [r4, #84] + adcs r0, r2, #0 + adc r1, r1, #0 + str r0, [r4, #88] + str r1, [r4, #92] + add sp, sp, #204 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end177: + .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L + .cantunwind + .fnend + + .globl mcl_fp_mont12L + .align 2 + .type mcl_fp_mont12L,%function +mcl_fp_mont12L: @ @mcl_fp_mont12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #428 + sub sp, sp, #428 + .pad #1024 + sub sp, sp, #1024 + str r2, [sp, #92] @ 4-byte Spill + ldr r5, [r3, #-4] + ldr r2, [r2] + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #1392 + str r3, [sp, #100] @ 4-byte Spill + str r1, [sp, #96] @ 4-byte Spill + mov r4, r3 + str r5, [sp, #88] @ 4-byte Spill + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1396] + ldr r6, [sp, #1392] + add r11, sp, #1024 + mov r1, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1400] + mul r2, r6, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1404] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1424] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1420] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1416] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1412] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1408] + str r0, [sp, #36] @ 4-byte Spill + add r0, r11, #312 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1384] + ldr r1, [sp, #96] @ 4-byte Reload + ldr r5, [sp, #1360] + ldr r8, [sp, #1356] + ldr r7, [sp, #1352] + ldr r10, [sp, #1336] + ldr r9, [sp, #1340] + ldr r4, [sp, #1344] + ldr r11, [sp, #1348] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1380] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1376] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1372] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1368] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1364] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, sp, #1280 + bl .LmulPv384x32(PLT) + adds r0, r10, r6 + ldr r1, [sp, #64] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + ldr r3, [sp, #1296] + ldr r12, [sp, #1300] + ldr lr, [sp, #1304] + ldr r6, [sp, #1312] + ldr r10, [sp, #1328] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #1324] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1280] + adcs r1, r11, r1 + ldr r11, [sp, #60] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #1316] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r8, r1 + ldr r8, [sp, #1320] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r5, r1 + ldr r5, [sp, #1308] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #1292] + adc r0, r0, #0 + adds r11, r11, r4 + ldr r4, [sp, #56] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #1288] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1284] + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r11, r0 + add r0, r7, #200 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1272] + add r9, sp, #1232 + ldr r5, [sp, #1248] + ldr r8, [sp, #1244] + ldr r10, [sp, #1224] + ldr r11, [sp, #1228] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1256] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1252] + str r0, [sp, #8] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #1168 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #1168 + ldr r10, [sp, #1212] + ldr r4, [sp, #1192] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #1216] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1200] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1208] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1204] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1196] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + add r5, sp, #1024 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, r5, #88 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1160] + add r10, sp, #1120 + ldr r6, [sp, #1136] + ldr r9, [sp, #1132] + ldr r11, [sp, #1112] + ldr r7, [sp, #1116] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #1056 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1068] + ldr r3, [sp, #1072] + ldr r12, [sp, #1076] + ldr lr, [sp, #1080] + ldr r8, [sp, #1096] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1092] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r11, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1056] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1084] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1104] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1100] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1088] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1064] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + adds r11, r11, r4 + ldr r4, [sp, #80] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1060] + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1000 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1048] + add r9, sp, #1008 + ldr r5, [sp, #1024] + ldr r8, [sp, #1020] + ldr r10, [sp, #1000] + ldr r11, [sp, #1004] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #8] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #944 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #944 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #968 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #888 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #936] + add r10, sp, #896 + ldr r6, [sp, #912] + ldr r9, [sp, #908] + ldr r11, [sp, #888] + ldr r7, [sp, #892] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #832 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #836 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #860 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #832] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #776 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #824] + add r9, sp, #784 + ldr r5, [sp, #800] + ldr r8, [sp, #796] + ldr r10, [sp, #776] + ldr r11, [sp, #780] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #8] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #720 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #720 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #744 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #664 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #712] + add r10, sp, #672 + ldr r6, [sp, #688] + ldr r9, [sp, #684] + ldr r11, [sp, #664] + ldr r7, [sp, #668] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #608 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #612 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #636 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #608] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #552 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #600] + add r9, sp, #560 + ldr r5, [sp, #576] + ldr r8, [sp, #572] + ldr r10, [sp, #552] + ldr r11, [sp, #556] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #8] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #496 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #496 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #520 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #440 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #488] + add r10, sp, #448 + ldr r6, [sp, #464] + ldr r9, [sp, #460] + ldr r11, [sp, #440] + ldr r7, [sp, #444] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #484] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #384 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #388 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #412 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #384] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #88] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + mul r2, r11, r6 + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #328 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #376] + ldr r1, [sp, #96] @ 4-byte Reload + ldr r5, [sp, #348] + ldr r9, [sp, #344] + ldr r10, [sp, #328] + ldr r11, [sp, #332] + ldr r8, [sp, #336] + ldr r7, [sp, #340] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #364] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #360] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #356] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #352] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #272 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r2, [sp, #4] @ 4-byte Reload + add r12, sp, #288 + ldr lr, [sp, #276] + ldr r4, [sp, #284] + ldr r10, [sp, #312] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r1, r0, r11 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #316] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #320] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #280] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #272] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + adds r0, r1, r2 + mul r11, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r6, [sp, #308] + ldm r12, {r0, r1, r2, r3, r12} + ldr r7, [sp, #80] @ 4-byte Reload + adcs r7, r7, lr + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [sp, #76] @ 4-byte Reload + adcs r7, r7, r5 + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + adcs r7, r7, r4 + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + mov r2, r11 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #216 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #264] + add r10, sp, #220 + ldr r6, [sp, #244] + ldr r7, [sp, #240] + ldr r8, [sp, #236] + ldr r9, [sp, #232] + ldr r11, [sp, #216] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #160 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #160 + add r12, sp, #176 + adds r0, r0, r11 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r11, r0, r5 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #196 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldm lr, {r2, r7, lr} + ldr r0, [sp, #88] @ 4-byte Reload + ldr r6, [sp, #172] + adds r4, r4, r2 + mul r1, r4, r0 + adcs r7, r11, r7 + str r1, [sp, #44] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldm r12, {r0, r1, r2, r3, r12} + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #96] @ 4-byte Reload + adcs r11, r7, lr + ldr r7, [sp, #92] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #100] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [sp, #84] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r7, r0, r5 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r10, r0, r10 + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #104 + bl .LmulPv384x32(PLT) + add r5, sp, #104 + mov r3, r6 + ldm r5, {r0, r1, r2, r5} + adds r0, r4, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs lr, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r4, r11, r2 + str lr, [sp, #44] @ 4-byte Spill + str r4, [sp, #48] @ 4-byte Spill + adcs r2, r0, r5 + ldr r0, [sp, #120] + str r2, [sp, #52] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #124] + ldr r1, [sp, #80] @ 4-byte Reload + str r5, [sp, #56] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #132] + adcs r12, r1, r0 + ldr r0, [sp, #136] + ldr r1, [sp, #92] @ 4-byte Reload + str r12, [sp, #60] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #140] + adcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r0, r8, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #148] + adcs r0, r1, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r10, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldmib r3, {r0, r1, r7, r10} + ldr r11, [r3] + ldr r6, [r3, #24] + ldr r9, [r3, #20] + ldr r8, [r3, #36] + subs r11, lr, r11 + str r6, [sp, #36] @ 4-byte Spill + ldr r6, [r3, #28] + ldr lr, [r3, #44] + sbcs r0, r4, r0 + ldr r4, [sp, #72] @ 4-byte Reload + sbcs r1, r2, r1 + sbcs r2, r5, r7 + ldr r7, [r3, #32] + ldr r5, [r3, #40] + ldr r3, [sp, #80] @ 4-byte Reload + str r6, [sp, #40] @ 4-byte Spill + sbcs r10, r3, r10 + ldr r3, [sp, #84] @ 4-byte Reload + sbcs r6, r3, r9 + ldr r3, [sp, #36] @ 4-byte Reload + ldr r9, [sp, #40] @ 4-byte Reload + sbcs r3, r12, r3 + ldr r12, [sp, #88] @ 4-byte Reload + sbcs r12, r12, r9 + sbcs r7, r4, r7 + ldr r4, [sp, #76] @ 4-byte Reload + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [sp, #48] @ 4-byte Reload + sbcs r9, r4, r8 + ldr r4, [sp, #96] @ 4-byte Reload + sbcs r8, r4, r5 + ldr r4, [sp, #92] @ 4-byte Reload + ldr r5, [sp, #44] @ 4-byte Reload + sbcs lr, r4, lr + ldr r4, [sp, #64] @ 4-byte Reload + sbc r4, r4, #0 + ands r4, r4, #1 + movne r11, r5 + ldr r5, [sp, #68] @ 4-byte Reload + movne r0, r7 + str r11, [r5] + str r0, [r5, #4] + ldr r0, [sp, #52] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + cmp r4, #0 + str r1, [r5, #8] + ldr r1, [sp, #100] @ 4-byte Reload + movne r2, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r2, [r5, #12] + movne r10, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r10, [r5, #16] + movne r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r4, #0 + str r6, [r5, #20] + movne r3, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r3, [r5, #24] + movne r12, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r12, [r5, #28] + movne r1, r0 + ldr r0, [sp, #76] @ 4-byte Reload + cmp r4, #0 + str r1, [r5, #32] + movne r9, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r9, [r5, #36] + movne r8, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r8, [r5, #40] + movne lr, r0 + str lr, [r5, #44] + add sp, sp, #428 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end178: + .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L + .cantunwind + .fnend + + .globl mcl_fp_montNF12L + .align 2 + .type mcl_fp_montNF12L,%function +mcl_fp_montNF12L: @ @mcl_fp_montNF12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #428 + sub sp, sp, #428 + .pad #1024 + sub sp, sp, #1024 + add r12, sp, #92 + mov r4, r3 + mov r7, r1 + stm r12, {r1, r2, r3} + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #1392 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #88] @ 4-byte Spill + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1396] + ldr r8, [sp, #1392] + add r10, sp, #1024 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1400] + mul r2, r8, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1404] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1424] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1420] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1416] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1412] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1408] + str r0, [sp, #36] @ 4-byte Spill + add r0, r10, #312 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1384] + add r11, sp, #1344 + ldr r9, [sp, #1356] + ldr r4, [sp, #1336] + ldr r6, [sp, #1340] + mov r1, r7 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1380] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1376] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1372] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1368] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1364] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1360] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r10, r11} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, sp, #1280 + bl .LmulPv384x32(PLT) + adds r0, r4, r8 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #1280 + ldr r7, [sp, #1316] + ldr r4, [sp, #1304] + ldr r0, [sp, #64] @ 4-byte Reload + adcs r8, r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r6, [sp, #1312] + adcs r0, r5, r0 + ldr r5, [sp, #1308] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #1324] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #1328] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #1320] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r1, r0 + str r0, [sp, #32] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r8, r8, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + add r5, sp, #1024 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r8, r0 + add r0, r5, #200 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1272] + add r10, sp, #1232 + ldr r6, [sp, #1248] + ldr r9, [sp, #1244] + ldr r11, [sp, #1224] + ldr r7, [sp, #1228] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1256] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1252] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #1168 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1180] + ldr r3, [sp, #1184] + ldr r12, [sp, #1188] + ldr lr, [sp, #1192] + ldr r8, [sp, #1208] + ldr r11, [sp, #1216] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1204] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1168] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1196] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #84] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1212] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1200] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + adds r10, r10, r4 + ldr r4, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #1176] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1172] + adcs r0, r4, r0 + mov r4, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r10, r0 + add r0, r7, #88 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1160] + add r9, sp, #1120 + ldr r5, [sp, #1136] + ldr r8, [sp, #1132] + ldr r10, [sp, #1112] + ldr r11, [sp, #1116] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #1056 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #1056 + ldr r10, [sp, #1100] + ldr r4, [sp, #1080] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #1104] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1088] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1096] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1092] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1084] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1000 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #1048] + add r10, sp, #1008 + ldr r6, [sp, #1024] + ldr r9, [sp, #1020] + ldr r11, [sp, #1000] + ldr r7, [sp, #1004] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #944 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #972 + add lr, sp, #948 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r11} + ldr r4, [sp, #944] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #84] @ 4-byte Reload + adds r10, r10, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #888 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #936] + add r9, sp, #896 + ldr r5, [sp, #912] + ldr r8, [sp, #908] + ldr r10, [sp, #888] + ldr r11, [sp, #892] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #832 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #832 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #856 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #776 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #824] + add r10, sp, #784 + ldr r6, [sp, #800] + ldr r9, [sp, #796] + ldr r11, [sp, #776] + ldr r7, [sp, #780] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #720 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #748 + add lr, sp, #724 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r11} + ldr r4, [sp, #720] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #84] @ 4-byte Reload + adds r10, r10, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #664 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #712] + add r9, sp, #672 + ldr r5, [sp, #688] + ldr r8, [sp, #684] + ldr r10, [sp, #664] + ldr r11, [sp, #668] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #608 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #608 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #632 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #552 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #600] + add r10, sp, #560 + ldr r6, [sp, #576] + ldr r9, [sp, #572] + ldr r11, [sp, #552] + ldr r7, [sp, #556] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #496 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #524 + add lr, sp, #500 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r11} + ldr r4, [sp, #496] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #84] @ 4-byte Reload + adds r10, r10, r4 + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #440 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #488] + add r9, sp, #448 + ldr r5, [sp, #464] + ldr r8, [sp, #460] + ldr r10, [sp, #440] + ldr r11, [sp, #444] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #484] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #384 + bl .LmulPv384x32(PLT) + adds r0, r4, r10 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #384 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #408 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #84] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #88] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r7, r4 + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r11, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #328 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #376] + ldr r1, [sp, #92] @ 4-byte Reload + ldr r6, [sp, #348] + ldr r10, [sp, #344] + ldr r11, [sp, #328] + ldr r7, [sp, #332] + ldr r9, [sp, #336] + ldr r5, [sp, #340] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #364] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #360] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #356] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #352] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #272 + bl .LmulPv384x32(PLT) + adds r0, r8, r11 + ldr r1, [sp, #80] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + ldr lr, [sp, #276] + add r12, sp, #288 + ldr r8, [sp, #316] + ldr r11, [sp, #312] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + adcs r7, r1, r9 + ldr r1, [sp, #76] @ 4-byte Reload + ldr r9, [sp, #320] + adcs r1, r1, r5 + ldr r5, [sp, #280] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r10 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #284] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #272] + str r1, [sp, #36] @ 4-byte Spill + adds r0, r0, r2 + adcs r7, r7, lr + mul r10, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r4, [sp, #308] + ldm r12, {r0, r1, r2, r3, r12} + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [sp, #76] @ 4-byte Reload + adcs r7, r7, r5 + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + adcs r7, r7, r6 + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + mov r2, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + adc r0, r9, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #216 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #264] + ldr r1, [sp, #92] @ 4-byte Reload + ldr r5, [sp, #244] + ldr r6, [sp, #240] + ldr r8, [sp, #236] + ldr r9, [sp, #232] + ldr r10, [sp, #216] + ldr r7, [sp, #220] + ldr r4, [sp, #224] + ldr r11, [sp, #228] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #160 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add r12, sp, #176 + ldr lr, [sp, #164] + adds r0, r0, r10 + add r10, sp, #200 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #172] + adcs r1, r1, r4 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #168] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #160] + str r1, [sp, #48] @ 4-byte Spill + adds r4, r0, r2 + ldr r0, [sp, #88] @ 4-byte Reload + mul r1, r4, r0 + str r1, [sp, #44] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r11, [sp, #196] + ldm r12, {r0, r1, r2, r3, r12} + ldr r5, [sp, #96] @ 4-byte Reload + adcs r5, r5, lr + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [sp, #92] @ 4-byte Reload + adcs r6, r5, r6 + ldr r5, [sp, #100] @ 4-byte Reload + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [sp, #84] @ 4-byte Reload + adcs r7, r6, r7 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r11, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r9, r0, r9 + adc r0, r10, #0 + str r0, [sp, #92] @ 4-byte Spill + add r0, sp, #104 + bl .LmulPv384x32(PLT) + add r6, sp, #104 + ldm r6, {r0, r1, r2, r6} + adds r0, r4, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs lr, r0, r1 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r10, r0, r2 + ldr r0, [sp, #120] + mov r2, r5 + adcs r3, r7, r6 + str r10, [sp, #52] @ 4-byte Spill + str r3, [sp, #56] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #124] + ldr r1, [sp, #80] @ 4-byte Reload + str r6, [sp, #60] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #128] + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #132] + adcs r12, r1, r0 + ldr r0, [sp, #136] + ldr r1, [sp, #96] @ 4-byte Reload + str r12, [sp, #64] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #140] + adcs r0, r11, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r0, r8, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #148] + adcs r0, r9, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #152] + adc r0, r1, r0 + str r0, [sp, #92] @ 4-byte Spill + ldmib r2, {r0, r1, r7, r9} + ldr r4, [r2, #24] + ldr r8, [r2] + ldr r5, [r2, #20] + str r4, [sp, #44] @ 4-byte Spill + ldr r4, [r2, #28] + subs r8, lr, r8 + sbcs r0, r10, r0 + sbcs r1, r3, r1 + sbcs r7, r6, r7 + str r4, [sp, #48] @ 4-byte Spill + mov r4, r2 + ldr r2, [r4, #44] + ldr r10, [r4, #32] + ldr r6, [r4, #36] + ldr r11, [r4, #40] + ldr r4, [sp, #48] @ 4-byte Reload + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [sp, #76] @ 4-byte Reload + sbcs r9, r2, r9 + ldr r2, [sp, #80] @ 4-byte Reload + sbcs r5, r2, r5 + ldr r2, [sp, #44] @ 4-byte Reload + sbcs r3, r12, r2 + ldr r2, [sp, #84] @ 4-byte Reload + sbcs r12, r2, r4 + ldr r2, [sp, #88] @ 4-byte Reload + ldr r4, [sp, #40] @ 4-byte Reload + sbcs r10, r2, r10 + ldr r2, [sp, #72] @ 4-byte Reload + sbcs r2, r2, r6 + ldr r6, [sp, #52] @ 4-byte Reload + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [sp, #96] @ 4-byte Reload + sbcs r2, r2, r11 + ldr r11, [sp, #68] @ 4-byte Reload + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [sp, #92] @ 4-byte Reload + sbc r2, r2, r4 + asr r4, r2, #31 + cmp r4, #0 + movlt r8, lr + movlt r0, r6 + str r8, [r11] + str r0, [r11, #4] + ldr r0, [sp, #56] @ 4-byte Reload + movlt r1, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r4, #0 + str r1, [r11, #8] + ldr r1, [sp, #100] @ 4-byte Reload + movlt r7, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r7, [r11, #12] + movlt r9, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r9, [r11, #16] + movlt r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + cmp r4, #0 + str r5, [r11, #20] + movlt r3, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r3, [r11, #24] + ldr r3, [sp, #48] @ 4-byte Reload + movlt r12, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r12, [r11, #28] + movlt r10, r0 + ldr r0, [sp, #72] @ 4-byte Reload + cmp r4, #0 + str r10, [r11, #32] + movlt r3, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r3, [r11, #36] + movlt r1, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r1, [r11, #40] + movlt r2, r0 + str r2, [r11, #44] + add sp, sp, #428 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end179: + .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L + .cantunwind + .fnend + + .globl mcl_fp_montRed12L + .align 2 + .type mcl_fp_montRed12L,%function +mcl_fp_montRed12L: @ @mcl_fp_montRed12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #836 + sub sp, sp, #836 + mov r3, r2 + str r0, [sp, #148] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r10, [r1] + ldr r0, [r3] + str r3, [sp, #152] @ 4-byte Spill + mov r5, r3 + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #56] @ 4-byte Spill + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #156] @ 4-byte Spill + mul r2, r10, r0 + ldr r0, [r3, #28] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #8] @ 4-byte Spill + add r0, sp, #776 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #824] + add r11, sp, #808 + add lr, sp, #776 + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #804] + ldr r4, [sp, #800] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #156] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #720 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #768] + add lr, sp, #756 + add r9, sp, #732 + str r0, [sp, #4] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #720] + ldr r6, [sp, #752] + ldr r11, [sp, #748] + ldr r2, [sp, #744] + ldr r1, [sp, #724] + ldr r7, [sp, #728] + ldm r9, {r0, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r10, r4, r1 + ldr r1, [sp, #60] @ 4-byte Reload + mov r4, r5 + adcs r1, r1, r7 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #156] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #664 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #712] + add r11, sp, #696 + add lr, sp, #664 + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #692] + ldr r5, [sp, #688] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #156] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + mul r2, r10, r5 + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #608 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #656] + add lr, sp, #644 + add r9, sp, #620 + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #608] + ldr r6, [sp, #640] + ldr r11, [sp, #636] + ldr r2, [sp, #632] + ldr r1, [sp, #612] + ldr r7, [sp, #616] + ldm r9, {r0, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r10, r4, r1 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r4, [sp, #152] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + mov r0, r5 + mul r2, r10, r0 + add r0, sp, #552 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #600] + add r11, sp, #584 + add lr, sp, #552 + str r0, [sp, #16] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #580] + ldr r5, [sp, #576] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #156] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + mul r2, r10, r5 + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #496 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #544] + add lr, sp, #532 + add r9, sp, #508 + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #496] + ldr r6, [sp, #528] + ldr r11, [sp, #524] + ldr r2, [sp, #520] + ldr r1, [sp, #500] + ldr r7, [sp, #504] + ldm r9, {r0, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r10, r4, r1 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r4, [sp, #152] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #440 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #488] + add r11, sp, #472 + add lr, sp, #440 + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r6, r8, r9, r11} + ldr r7, [sp, #468] + ldr r5, [sp, #464] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #156] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + mul r2, r10, r5 + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #384 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #432] + add lr, sp, #420 + add r9, sp, #396 + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #384] + ldr r6, [sp, #416] + ldr r11, [sp, #412] + ldr r2, [sp, #408] + ldr r1, [sp, #388] + ldr r7, [sp, #392] + ldm r9, {r0, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #64] @ 4-byte Reload + adcs r10, r4, r1 + ldr r1, [sp, #60] @ 4-byte Reload + mov r4, r5 + adcs r1, r1, r7 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #152] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #328 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #376] + add r11, sp, #352 + add lr, sp, #328 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r5, r7, r8, r9, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + mov r5, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #272 + bl .LmulPv384x32(PLT) + ldr r0, [sp, #320] + add lr, sp, #300 + add r6, sp, #272 + add r12, sp, #284 + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r4, r8, r9, r11, lr} + ldr r7, [sp, #296] + ldm r6, {r2, r3, r6} + ldm r12, {r0, r1, r12} + adds r2, r10, r2 + ldr r2, [sp, #64] @ 4-byte Reload + adcs r10, r2, r3 + ldr r2, [sp, #60] @ 4-byte Reload + adcs r6, r2, r6 + ldr r2, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #156] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r10, r4 + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #216 + bl .LmulPv384x32(PLT) + add r7, sp, #216 + add lr, sp, #252 + ldm r7, {r0, r1, r3, r7} + ldr r8, [sp, #264] + adds r0, r10, r0 + adcs r10, r6, r1 + mul r0, r10, r4 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #232 + str r0, [sp, #52] @ 4-byte Spill + ldm lr, {r6, r12, lr} + ldm r7, {r0, r1, r2, r3, r7} + ldr r4, [sp, #96] @ 4-byte Reload + adcs r9, r4, r0 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r11 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r4, r0, r3 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r5, r0, r7 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r6, r0, r6 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + add r0, sp, #160 + bl .LmulPv384x32(PLT) + add r3, sp, #160 + ldm r3, {r0, r1, r2, r3} + adds r0, r10, r0 + ldr r0, [sp, #156] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #52] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + adcs r12, r0, r2 + ldr r2, [sp, #176] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r3, r9, r3 + str r12, [sp, #52] @ 4-byte Spill + str r3, [sp, #56] @ 4-byte Spill + adcs r7, r0, r2 + ldr r2, [sp, #180] + ldr r0, [sp, #44] @ 4-byte Reload + str r7, [sp, #60] @ 4-byte Spill + adcs r8, r0, r2 + ldr r2, [sp, #184] + ldr r0, [sp, #84] @ 4-byte Reload + str r8, [sp, #64] @ 4-byte Spill + adcs r4, r4, r2 + ldr r2, [sp, #188] + str r4, [sp, #68] @ 4-byte Spill + adcs r5, r5, r2 + ldr r2, [sp, #192] + str r5, [sp, #72] @ 4-byte Spill + adcs r6, r6, r2 + ldr r2, [sp, #196] + str r6, [sp, #76] @ 4-byte Spill + adcs r9, r0, r2 + ldr r2, [sp, #200] + ldr r0, [sp, #96] @ 4-byte Reload + str r9, [sp, #84] @ 4-byte Spill + adcs r10, r0, r2 + ldr r2, [sp, #204] + ldr r0, [sp, #80] @ 4-byte Reload + str r10, [sp, #96] @ 4-byte Spill + adcs lr, r0, r2 + ldr r2, [sp, #208] + ldr r0, [sp, #92] @ 4-byte Reload + str lr, [sp, #156] @ 4-byte Spill + adcs r11, r0, r2 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #136] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + subs r0, r1, r0 + ldr r1, [sp, #140] @ 4-byte Reload + sbcs r1, r12, r1 + sbcs r2, r3, r2 + ldr r3, [sp, #120] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #124] @ 4-byte Reload + sbcs r12, r8, r7 + ldr r7, [sp, #128] @ 4-byte Reload + sbcs r7, r4, r7 + ldr r4, [sp, #132] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #100] @ 4-byte Reload + sbcs r8, r6, r5 + ldr r6, [sp, #104] @ 4-byte Reload + sbcs r5, r9, r6 + ldr r6, [sp, #108] @ 4-byte Reload + str r5, [sp, #144] @ 4-byte Spill + ldr r5, [sp, #92] @ 4-byte Reload + sbcs r9, r10, r6 + ldr r6, [sp, #112] @ 4-byte Reload + sbcs r6, lr, r6 + mov lr, r11 + ldr r11, [sp, #148] @ 4-byte Reload + str r6, [sp, #152] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + sbcs r10, lr, r6 + sbc r6, r5, #0 + ldr r5, [sp, #48] @ 4-byte Reload + ands r6, r6, #1 + movne r0, r5 + str r0, [r11] + ldr r0, [sp, #52] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r1, [r11, #4] + ldr r1, [sp, #156] @ 4-byte Reload + movne r2, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r6, #0 + str r2, [r11, #8] + ldr r2, [sp, #144] @ 4-byte Reload + movne r3, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r3, [r11, #12] + movne r12, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r12, [r11, #16] + movne r7, r0 + ldr r0, [sp, #72] @ 4-byte Reload + cmp r6, #0 + str r7, [r11, #20] + movne r4, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r4, [r11, #24] + movne r8, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r8, [r11, #28] + movne r2, r0 + ldr r0, [sp, #96] @ 4-byte Reload + cmp r6, #0 + movne r10, lr + str r2, [r11, #32] + movne r9, r0 + ldr r0, [sp, #152] @ 4-byte Reload + movne r0, r1 + str r9, [r11, #36] + str r0, [r11, #40] + str r10, [r11, #44] + add sp, sp, #836 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end180: + .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L + .cantunwind + .fnend + + .globl mcl_fp_addPre12L + .align 2 + .type mcl_fp_addPre12L,%function +mcl_fp_addPre12L: @ @mcl_fp_addPre12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + ldm r1, {r3, r12, lr} + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7} + ldr r4, [r2, #16] + ldr r11, [r2] + str r4, [sp] @ 4-byte Spill + ldr r4, [r2, #20] + adds r8, r11, r3 + ldr r3, [r2, #36] + ldr r11, [r2, #32] + adcs r5, r5, r12 + add r12, r1, #16 + adcs r6, r6, lr + add lr, r1, #32 + adcs r7, r7, r9 + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r2, #24] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #40] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #28] + ldr r2, [r2, #44] + str r3, [sp, #20] @ 4-byte Spill + str r4, [sp, #12] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + ldm lr, {r4, r10, lr} + ldr r9, [r1, #44] + ldm r12, {r1, r2, r3, r12} + str r8, [r0] + stmib r0, {r5, r6} + str r7, [r0, #12] + ldr r5, [sp] @ 4-byte Reload + ldr r7, [sp, #24] @ 4-byte Reload + adcs r1, r5, r1 + ldr r5, [sp, #4] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #8] @ 4-byte Reload + adcs r2, r5, r2 + str r2, [r0, #20] + ldr r2, [sp, #12] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + adcs r2, r2, r12 + str r2, [r0, #28] + ldr r2, [sp, #16] @ 4-byte Reload + adcs r1, r11, r4 + add r0, r0, #32 + adcs r2, r2, r10 + adcs r3, r3, lr + adcs r7, r7, r9 + stm r0, {r1, r2, r3, r7} + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #28 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end181: + .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L + .cantunwind + .fnend + + .globl mcl_fp_subPre12L + .align 2 + .type mcl_fp_subPre12L,%function +mcl_fp_subPre12L: @ @mcl_fp_subPre12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + ldmib r2, {r8, r12, lr} + ldr r3, [r2, #16] + ldr r7, [r2] + ldr r6, [r1] + ldr r5, [r1, #4] + ldr r4, [r1, #8] + ldr r11, [r2, #44] + ldr r9, [r1, #32] + ldr r10, [r1, #36] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #20] + subs r6, r6, r7 + ldr r7, [r2, #32] + sbcs r5, r5, r8 + ldr r8, [r1, #40] + sbcs r4, r4, r12 + add r12, r1, #16 + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #24] + str r7, [sp] @ 4-byte Spill + ldr r7, [r2, #36] + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [r2, #28] + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r2, #40] + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [r1, #12] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #44] + sbcs lr, r3, lr + ldm r12, {r1, r2, r3, r12} + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + ldr r4, [sp, #12] @ 4-byte Reload + ldr r6, [sp, #16] @ 4-byte Reload + str lr, [r0, #12] + sbcs r1, r1, r4 + str r1, [r0, #16] + sbcs r2, r2, r6 + ldr r1, [sp, #20] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + sbcs r1, r3, r1 + ldr r3, [sp, #8] @ 4-byte Reload + str r1, [r0, #24] + sbcs r2, r12, r2 + ldr r1, [sp] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #4] @ 4-byte Reload + add r0, r0, #32 + sbcs r1, r9, r1 + sbcs r2, r10, r2 + sbcs r3, r8, r3 + sbcs r7, r7, r11 + stm r0, {r1, r2, r3, r7} + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #28 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end182: + .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L + .cantunwind + .fnend + + .globl mcl_fp_shr1_12L + .align 2 + .type mcl_fp_shr1_12L,%function +mcl_fp_shr1_12L: @ @mcl_fp_shr1_12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #4 + sub sp, sp, #4 + add r6, r1, #20 + ldr r3, [r1, #8] + ldr r2, [r1, #12] + ldr lr, [r1, #16] + add r11, r1, #32 + ldm r6, {r4, r5, r6} + ldm r1, {r8, r12} + lsr r7, r12, #1 + orr r9, r7, r3, lsl #31 + ldm r11, {r7, r10, r11} + ldr r1, [r1, #44] + str r1, [sp] @ 4-byte Spill + lsr r1, r2, #1 + lsrs r2, r2, #1 + rrx r2, r3 + lsrs r3, r12, #1 + orr r1, r1, lr, lsl #31 + rrx r3, r8 + stm r0, {r3, r9} + str r2, [r0, #8] + str r1, [r0, #12] + lsrs r1, r4, #1 + lsr r2, r10, #1 + rrx r1, lr + orr r2, r2, r11, lsl #31 + str r1, [r0, #16] + lsr r1, r4, #1 + orr r1, r1, r5, lsl #31 + str r1, [r0, #20] + lsrs r1, r6, #1 + rrx r1, r5 + str r1, [r0, #24] + lsr r1, r6, #1 + orr r1, r1, r7, lsl #31 + str r1, [r0, #28] + lsrs r1, r10, #1 + add r0, r0, #32 + rrx r1, r7 + ldr r7, [sp] @ 4-byte Reload + lsrs r3, r7, #1 + lsr r7, r7, #1 + rrx r3, r11 + stm r0, {r1, r2, r3, r7} + add sp, sp, #4 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end183: + .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L + .cantunwind + .fnend + + .globl mcl_fp_add12L + .align 2 + .type mcl_fp_add12L,%function +mcl_fp_add12L: @ @mcl_fp_add12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldm r1, {r12, lr} + ldr r5, [r2] + ldr r8, [r1, #8] + ldr r9, [r1, #12] + ldmib r2, {r4, r6, r7} + ldr r11, [r1, #40] + adds r5, r5, r12 + ldr r12, [r2, #40] + adcs r4, r4, lr + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [r1, #24] + ldr lr, [r1, #32] + adcs r6, r6, r8 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r1, #20] + ldr r8, [r1, #36] + adcs r7, r7, r9 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r1, #16] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #16] + adcs r10, r7, r6 + ldr r6, [r2, #20] + adcs r7, r6, r4 + ldr r4, [r2, #24] + str r7, [sp, #12] @ 4-byte Spill + adcs r7, r4, r5 + ldr r4, [r1, #28] + ldr r5, [r2, #28] + str r7, [sp, #4] @ 4-byte Spill + adcs r6, r5, r4 + ldr r5, [r2, #32] + ldr r4, [r1, #44] + ldr r1, [r2, #36] + ldr r2, [r2, #44] + str r6, [sp, #8] @ 4-byte Spill + adcs r9, r5, lr + ldr lr, [sp, #32] @ 4-byte Reload + adcs r5, r1, r8 + ldr r1, [sp, #40] @ 4-byte Reload + ldr r8, [sp, #12] @ 4-byte Reload + adcs r11, r12, r11 + ldr r12, [sp, #36] @ 4-byte Reload + str r5, [sp, #28] @ 4-byte Spill + adcs r2, r2, r4 + ldr r4, [sp, #16] @ 4-byte Reload + str r2, [sp, #24] @ 4-byte Spill + str r1, [r0] + str r12, [r0, #4] + str lr, [r0, #8] + str r4, [r0, #12] + str r10, [r0, #16] + str r8, [r0, #20] + str r7, [r0, #24] + str r6, [r0, #28] + str r9, [r0, #32] + str r5, [r0, #36] + str r11, [r0, #40] + str r2, [r0, #44] + mov r2, #0 + adc r2, r2, #0 + str r2, [sp, #20] @ 4-byte Spill + ldm r3, {r2, r6, r7} + ldr r5, [r3, #12] + subs r1, r1, r2 + ldr r2, [sp, #4] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + sbcs r1, r12, r6 + str r1, [sp] @ 4-byte Spill + sbcs r1, lr, r7 + str r1, [sp, #36] @ 4-byte Spill + sbcs r1, r4, r5 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r1, r10, r1 + add r10, r3, #36 + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [r3, #20] + sbcs r6, r8, r1 + ldr r1, [r3, #24] + sbcs lr, r2, r1 + ldr r2, [r3, #28] + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r12, r1, r2 + ldr r2, [r3, #32] + ldm r10, {r1, r4, r10} + sbcs r7, r9, r2 + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r2, r2, r1 + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r3, r11, r4 + sbcs r5, r1, r10 + ldr r1, [sp, #20] @ 4-byte Reload + sbc r1, r1, #0 + tst r1, #1 + bne .LBB184_2 +@ BB#1: @ %nocarry + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r0] + ldr r1, [sp] @ 4-byte Reload + str r1, [r0, #4] + ldr r1, [sp, #36] @ 4-byte Reload + str r1, [r0, #8] + ldr r1, [sp, #32] @ 4-byte Reload + str r1, [r0, #12] + ldr r1, [sp, #16] @ 4-byte Reload + str r1, [r0, #16] + str r6, [r0, #20] + str lr, [r0, #24] + str r12, [r0, #28] + str r7, [r0, #32] + add r0, r0, #36 + stm r0, {r2, r3, r5} +.LBB184_2: @ %carry + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end184: + .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L + .cantunwind + .fnend + + .globl mcl_fp_addNF12L + .align 2 + .type mcl_fp_addNF12L,%function +mcl_fp_addNF12L: @ @mcl_fp_addNF12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + ldm r1, {r5, r8, lr} + ldr r6, [r2] + ldr r10, [r1, #12] + ldmib r2, {r4, r7, r9} + ldr r12, [r1, #20] + adds r6, r6, r5 + ldr r5, [r1, #24] + adcs r8, r4, r8 + ldr r4, [r2, #16] + str r6, [sp, #16] @ 4-byte Spill + adcs r7, r7, lr + add lr, r2, #32 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r1, #16] + adcs r6, r9, r10 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r1, #44] + adcs r7, r4, r7 + ldr r4, [r1, #40] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r12 + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r7, r7, r5 + ldr r5, [r2, #28] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r5, r7 + ldr r5, [r1, #36] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r1, #32] + ldm lr, {r1, r12, lr} + ldr r2, [r2, #44] + adcs r1, r1, r7 + str r1, [sp, #20] @ 4-byte Spill + adcs r1, r12, r5 + str r1, [sp, #28] @ 4-byte Spill + adcs r1, lr, r4 + str r1, [sp, #36] @ 4-byte Spill + adc r1, r2, r6 + str r1, [sp, #44] @ 4-byte Spill + ldmib r3, {r1, r2, r6, r11} + ldr r7, [r3, #20] + ldr r4, [r3, #32] + ldr r9, [r3] + ldr r5, [sp, #16] @ 4-byte Reload + ldr lr, [r3, #24] + ldr r10, [r3, #28] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #24] @ 4-byte Reload + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r3, #36] + subs r9, r5, r9 + sbcs r1, r8, r1 + sbcs r2, r7, r2 + ldr r7, [sp, #32] @ 4-byte Reload + str r4, [sp] @ 4-byte Spill + ldr r4, [r3, #40] + sbcs r12, r7, r6 + ldr r7, [r3, #44] + ldr r3, [sp, #40] @ 4-byte Reload + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #12] @ 4-byte Reload + sbcs r3, r3, r11 + sbcs r11, r4, r6 + ldr r4, [sp, #56] @ 4-byte Reload + ldr r6, [sp, #8] @ 4-byte Reload + sbcs lr, r4, lr + ldr r4, [sp, #52] @ 4-byte Reload + sbcs r10, r4, r10 + ldr r4, [sp, #20] @ 4-byte Reload + sbcs r4, r4, r6 + ldr r6, [sp] @ 4-byte Reload + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [sp, #28] @ 4-byte Reload + sbcs r4, r4, r6 + ldr r6, [sp, #36] @ 4-byte Reload + str r4, [sp] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + sbcs r6, r6, r4 + str r6, [sp, #12] @ 4-byte Spill + ldr r6, [sp, #44] @ 4-byte Reload + sbc r6, r6, r7 + asr r7, r6, #31 + cmp r7, #0 + movlt r9, r5 + movlt r1, r8 + str r9, [r0] + str r1, [r0, #4] + ldr r1, [sp, #24] @ 4-byte Reload + movlt r2, r1 + ldr r1, [sp, #32] @ 4-byte Reload + cmp r7, #0 + str r2, [r0, #8] + ldr r2, [sp, #8] @ 4-byte Reload + movlt r12, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r12, [r0, #12] + movlt r3, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r3, [r0, #16] + ldr r3, [sp, #12] @ 4-byte Reload + movlt r11, r1 + ldr r1, [sp, #56] @ 4-byte Reload + cmp r7, #0 + str r11, [r0, #20] + movlt lr, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str lr, [r0, #24] + movlt r10, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r10, [r0, #28] + movlt r2, r1 + ldr r1, [sp, #28] @ 4-byte Reload + cmp r7, #0 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #32] + movlt r7, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r7, [r0, #36] + movlt r3, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r3, [r0, #40] + movlt r6, r1 + str r6, [r0, #44] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end185: + .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L + .cantunwind + .fnend + + .globl mcl_fp_sub12L + .align 2 + .type mcl_fp_sub12L,%function +mcl_fp_sub12L: @ @mcl_fp_sub12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldr r9, [r2] + ldmib r2, {r8, r12, lr} + ldm r1, {r4, r5, r6, r7} + add r10, r1, #32 + subs r4, r4, r9 + sbcs r5, r5, r8 + str r4, [sp, #48] @ 4-byte Spill + ldr r4, [r2, #24] + sbcs r6, r6, r12 + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [r2, #20] + sbcs r7, r7, lr + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r1, #16] + sbcs r11, r7, r6 + ldr r6, [r1, #20] + str r11, [sp, #28] @ 4-byte Spill + sbcs lr, r6, r5 + ldr r5, [r1, #24] + str lr, [sp, #40] @ 4-byte Spill + sbcs r7, r5, r4 + ldr r4, [r2, #28] + ldr r5, [r1, #28] + str r7, [sp, #44] @ 4-byte Spill + add r7, r2, #32 + sbcs r12, r5, r4 + str r12, [sp, #36] @ 4-byte Spill + ldm r7, {r4, r5, r6, r7} + ldm r10, {r2, r8, r9, r10} + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r4, r2, r4 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [r0] + sbcs r8, r8, r5 + str r4, [sp, #32] @ 4-byte Spill + sbcs r6, r9, r6 + sbcs r7, r10, r7 + ldr r10, [sp, #52] @ 4-byte Reload + str r10, [r0, #4] + str r2, [r0, #8] + ldr r2, [sp, #60] @ 4-byte Reload + str r2, [r0, #12] + ldr r2, [sp, #44] @ 4-byte Reload + str r11, [r0, #16] + str lr, [r0, #20] + str r2, [r0, #24] + str r12, [r0, #28] + str r4, [r0, #32] + mov r4, #0 + str r8, [r0, #36] + str r6, [r0, #40] + str r7, [r0, #44] + sbc r4, r4, #0 + tst r4, #1 + beq .LBB186_2 +@ BB#1: @ %carry + ldr r5, [r3, #32] + ldr r4, [r3, #20] + ldr r12, [r3, #28] + ldr r9, [r3, #4] + ldr lr, [r3, #12] + ldr r11, [r3, #16] + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [r3, #36] + str r4, [sp] @ 4-byte Spill + ldr r4, [r3, #24] + str r12, [sp, #8] @ 4-byte Spill + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [r3, #40] + str r4, [sp, #4] @ 4-byte Spill + str r5, [sp, #20] @ 4-byte Spill + ldr r5, [r3, #44] + str r5, [sp, #24] @ 4-byte Spill + ldr r5, [r3, #8] + ldr r3, [r3] + adds r3, r3, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r4, r9, r10 + adcs r5, r5, r1 + ldr r1, [sp, #60] @ 4-byte Reload + stm r0, {r3, r4, r5} + ldr r3, [sp] @ 4-byte Reload + adcs r1, lr, r1 + str r1, [r0, #12] + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r11, r1 + str r1, [r0, #16] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r3, r1 + ldr r3, [sp, #20] @ 4-byte Reload + str r1, [r0, #20] + ldr r1, [sp, #4] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [r0, #28] + ldr r1, [sp, #32] @ 4-byte Reload + add r0, r0, #32 + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + adcs r2, r2, r8 + adcs r3, r3, r6 + ldr r6, [sp, #24] @ 4-byte Reload + adc r7, r6, r7 + stm r0, {r1, r2, r3, r7} +.LBB186_2: @ %nocarry + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end186: + .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L + .cantunwind + .fnend + + .globl mcl_fp_subNF12L + .align 2 + .type mcl_fp_subNF12L,%function +mcl_fp_subNF12L: @ @mcl_fp_subNF12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + mov r12, r0 + ldr r0, [r2, #32] + add r11, r2, #8 + ldr r6, [r2] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r2, #44] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r2, #4] + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r8, r10, r11} + ldr r0, [r2, #20] + ldr lr, [r1, #16] + ldr r7, [r1, #20] + ldr r5, [r1, #24] + ldr r4, [r1, #28] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r2, #24] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r2, #28] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #12] + ldm r1, {r1, r2, r9} + subs r1, r1, r6 + ldr r6, [sp, #36] @ 4-byte Reload + sbcs r2, r2, r6 + sbcs r6, r9, r8 + mov r9, r2 + sbcs r10, r0, r10 + str r6, [sp, #4] @ 4-byte Spill + sbcs r0, lr, r11 + add r11, r3, #8 + ldr lr, [r3, #4] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r5, r0 + ldr r5, [sp, #20] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + sbcs r0, r4, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #24] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + sbc r0, r5, r7 + ldr r7, [r3, #36] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r3, #32] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r3, #40] + str r0, [sp] @ 4-byte Spill + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r3, #44] + str r7, [sp, #20] @ 4-byte Spill + ldm r11, {r7, r8, r11} + ldr r4, [r3, #28] + ldr r5, [r3, #20] + ldr r0, [r3, #24] + ldr r3, [r3] + str r4, [sp, #8] @ 4-byte Spill + mov r4, r1 + adds r1, r4, r3 + ldr r3, [sp, #36] @ 4-byte Reload + adcs r2, r9, lr + adcs lr, r6, r7 + adcs r6, r10, r8 + adcs r7, r3, r11 + ldr r3, [sp, #40] @ 4-byte Reload + adcs r8, r3, r5 + ldr r3, [sp, #44] @ 4-byte Reload + adcs r5, r3, r0 + ldr r3, [sp, #48] @ 4-byte Reload + ldr r0, [sp, #8] @ 4-byte Reload + adcs r11, r3, r0 + ldr r3, [sp, #52] @ 4-byte Reload + ldr r0, [sp] @ 4-byte Reload + adcs r3, r3, r0 + ldr r0, [sp, #12] @ 4-byte Reload + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [sp, #32] @ 4-byte Reload + adcs r3, r3, r0 + ldr r0, [sp, #28] @ 4-byte Reload + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [sp, #16] @ 4-byte Reload + adcs r0, r0, r3 + ldr r3, [sp, #20] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r3, r0, r3 + str r3, [sp, #20] @ 4-byte Spill + asr r3, r0, #31 + ldr r0, [sp, #4] @ 4-byte Reload + cmp r3, #0 + movge r1, r4 + movge r2, r9 + str r1, [r12] + str r2, [r12, #4] + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + movge lr, r0 + ldr r0, [sp, #36] @ 4-byte Reload + cmp r3, #0 + movge r6, r10 + str lr, [r12, #8] + str r6, [r12, #12] + movge r7, r0 + ldr r0, [sp, #40] @ 4-byte Reload + str r7, [r12, #16] + ldr r7, [sp, #24] @ 4-byte Reload + movge r8, r0 + ldr r0, [sp, #44] @ 4-byte Reload + cmp r3, #0 + str r8, [r12, #20] + movge r5, r0 + ldr r0, [sp, #48] @ 4-byte Reload + str r5, [r12, #24] + movge r11, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r11, [r12, #28] + movge r1, r0 + cmp r3, #0 + ldr r3, [sp, #28] @ 4-byte Reload + ldr r0, [sp, #12] @ 4-byte Reload + movge r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [r12, #32] + add r1, r12, #36 + movge r2, r3 + ldr r3, [sp, #20] @ 4-byte Reload + movge r3, r7 + stm r1, {r0, r2, r3} + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end187: + .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L + .cantunwind + .fnend + + .globl mcl_fpDbl_add12L + .align 2 + .type mcl_fpDbl_add12L,%function +mcl_fpDbl_add12L: @ @mcl_fpDbl_add12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #136 + sub sp, sp, #136 + ldm r1, {r7, r8, r12, lr} + ldm r2, {r4, r5, r6, r9} + ldr r10, [r2, #20] + adds r4, r4, r7 + str r4, [sp, #80] @ 4-byte Spill + ldr r4, [r2, #64] + str r4, [sp, #108] @ 4-byte Spill + ldr r4, [r2, #68] + str r4, [sp, #112] @ 4-byte Spill + ldr r4, [r2, #72] + str r4, [sp, #116] @ 4-byte Spill + ldr r4, [r2, #76] + str r4, [sp, #120] @ 4-byte Spill + ldr r4, [r2, #80] + str r4, [sp, #124] @ 4-byte Spill + ldr r4, [r2, #84] + str r4, [sp, #128] @ 4-byte Spill + ldr r4, [r2, #88] + str r4, [sp, #132] @ 4-byte Spill + ldr r4, [r2, #92] + str r4, [sp, #76] @ 4-byte Spill + adcs r4, r5, r8 + adcs r7, r6, r12 + ldr r6, [r2, #16] + str r4, [sp, #28] @ 4-byte Spill + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r9, lr + add r9, r1, #32 + add lr, r1, #16 + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r1, #64] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #68] + str r7, [sp, #16] @ 4-byte Spill + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #68] @ 4-byte Spill + ldm r9, {r4, r5, r8, r9} + ldr r2, [r1, #48] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #12] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #80] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + adcs r1, r6, r1 + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #24] @ 4-byte Reload + ldr r6, [sp, #32] @ 4-byte Reload + adcs r2, r10, r2 + ldr r10, [r3] + str r7, [r0, #8] + str r6, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + adcs r1, r1, r12 + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + ldr r2, [sp, #72] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [r0, #32] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r2, r2, r5 + ldr r5, [r3, #12] + str r2, [r0, #36] + ldr r2, [sp, #88] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [r0, #40] + ldr r1, [sp, #92] @ 4-byte Reload + adcs r2, r2, r9 + str r2, [r0, #44] + ldr r2, [sp, #4] @ 4-byte Reload + adcs r12, r1, r7 + ldr r1, [sp, #96] @ 4-byte Reload + str r12, [sp, #80] @ 4-byte Spill + adcs r8, r1, r2 + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r8, [sp, #88] @ 4-byte Spill + adcs lr, r1, r2 + ldr r1, [sp, #104] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str lr, [sp, #92] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r4, [sp, #104] @ 4-byte Spill + adcs r9, r1, r2 + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r9, [sp, #96] @ 4-byte Spill + adcs r11, r1, r2 + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [sp, #48] @ 4-byte Reload + str r11, [sp, #108] @ 4-byte Spill + adcs r6, r1, r2 + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + str r6, [sp, #112] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [sp, #56] @ 4-byte Reload + str r7, [sp, #116] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #132] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [r3, #8] + str r1, [sp, #132] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + subs r10, r12, r10 + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [r3, #4] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [r3, #16] + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [r3, #20] + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [r3, #24] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [r3, #28] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r1, r8, r1 + ldr r8, [r3, #40] + sbcs r2, lr, r2 + ldr lr, [r3, #32] + sbcs r12, r4, r5 + ldr r4, [r3, #36] + ldr r3, [r3, #44] + ldr r5, [sp, #72] @ 4-byte Reload + str r3, [sp, #64] @ 4-byte Spill + ldr r3, [sp, #68] @ 4-byte Reload + sbcs r3, r9, r3 + sbcs r9, r11, r5 + ldr r5, [sp, #76] @ 4-byte Reload + sbcs r5, r6, r5 + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r6, r7, r6 + ldr r7, [sp, #124] @ 4-byte Reload + sbcs r11, r7, lr + ldr r7, [sp, #120] @ 4-byte Reload + sbcs lr, r7, r4 + ldr r7, [sp, #128] @ 4-byte Reload + ldr r4, [sp, #64] @ 4-byte Reload + sbcs r8, r7, r8 + ldr r7, [sp, #132] @ 4-byte Reload + sbcs r4, r7, r4 + ldr r7, [sp, #100] @ 4-byte Reload + str r4, [sp, #84] @ 4-byte Spill + ldr r4, [sp, #80] @ 4-byte Reload + sbc r7, r7, #0 + ands r7, r7, #1 + movne r10, r4 + ldr r4, [sp, #88] @ 4-byte Reload + str r10, [r0, #48] + movne r1, r4 + str r1, [r0, #52] + ldr r1, [sp, #92] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #104] @ 4-byte Reload + cmp r7, #0 + str r2, [r0, #56] + ldr r2, [sp, #84] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r12, [r0, #60] + movne r3, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r3, [r0, #64] + movne r9, r1 + ldr r1, [sp, #112] @ 4-byte Reload + cmp r7, #0 + str r9, [r0, #68] + movne r5, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r5, [r0, #72] + movne r6, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r6, [r0, #76] + movne r11, r1 + ldr r1, [sp, #120] @ 4-byte Reload + cmp r7, #0 + str r11, [r0, #80] + movne lr, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str lr, [r0, #84] + movne r8, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r8, [r0, #88] + movne r2, r1 + str r2, [r0, #92] + add sp, sp, #136 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end188: + .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub12L + .align 2 + .type mcl_fpDbl_sub12L,%function +mcl_fpDbl_sub12L: @ @mcl_fpDbl_sub12L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #136 + sub sp, sp, #136 + ldr r7, [r2, #64] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2] + ldmib r2, {r6, r9} + ldr r5, [r1] + ldr r8, [r2, #12] + ldmib r1, {r4, lr} + ldr r12, [r1, #12] + ldr r10, [r2, #20] + subs r5, r5, r7 + sbcs r4, r4, r6 + str r5, [sp, #32] @ 4-byte Spill + ldr r5, [r2, #36] + ldr r6, [r2, #16] + sbcs r7, lr, r9 + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [r2, #32] + add r9, r1, #32 + add lr, r1, #16 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r2, #28] + str r5, [sp, #44] @ 4-byte Spill + str r4, [sp, #40] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + sbcs r7, r12, r8 + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r2, [r1, #64] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #68] + str r7, [sp, #28] @ 4-byte Spill + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #76] @ 4-byte Spill + ldm r9, {r4, r5, r8, r9} + ldr r2, [r1, #48] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #20] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #24] @ 4-byte Reload + sbcs r1, r1, r6 + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #12] @ 4-byte Reload + ldr r6, [sp, #8] @ 4-byte Reload + sbcs r2, r2, r10 + str r7, [r0, #8] + str r6, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #28] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #36] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + ldr r2, [sp, #44] @ 4-byte Reload + sbcs r1, r4, r1 + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [r0, #36] + ldr r2, [sp, #84] @ 4-byte Reload + sbcs r1, r8, r1 + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r2, r9, r2 + str r2, [r0, #44] + ldr r2, [sp, #4] @ 4-byte Reload + sbcs r9, r7, r1 + ldr r1, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #16] @ 4-byte Reload + str r9, [sp, #40] @ 4-byte Spill + sbcs lr, r2, r1 + ldr r2, [sp, #96] @ 4-byte Reload + mov r1, #0 + str lr, [sp, #44] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #20] @ 4-byte Reload + str r2, [sp, #92] @ 4-byte Spill + ldr r2, [sp, #100] @ 4-byte Reload + sbcs r4, r7, r2 + ldr r2, [sp, #128] @ 4-byte Reload + ldr r7, [sp, #48] @ 4-byte Reload + str r4, [sp, #88] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #52] @ 4-byte Reload + str r2, [sp, #128] @ 4-byte Spill + ldr r2, [sp, #104] @ 4-byte Reload + sbcs r5, r7, r2 + ldr r2, [sp, #132] @ 4-byte Reload + ldr r7, [sp, #56] @ 4-byte Reload + str r5, [sp, #96] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #60] @ 4-byte Reload + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [sp, #108] @ 4-byte Reload + sbcs r8, r7, r2 + ldr r2, [sp, #112] @ 4-byte Reload + ldr r7, [sp, #64] @ 4-byte Reload + str r8, [sp, #104] @ 4-byte Spill + sbcs r10, r7, r2 + ldr r2, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + str r10, [sp, #108] @ 4-byte Spill + sbcs r6, r7, r2 + ldr r2, [sp, #124] @ 4-byte Reload + ldr r7, [sp, #72] @ 4-byte Reload + str r6, [sp, #112] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #76] @ 4-byte Reload + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [sp, #120] @ 4-byte Reload + sbcs r2, r7, r2 + sbc r1, r1, #0 + str r2, [sp, #120] @ 4-byte Spill + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #116] @ 4-byte Spill + ldmib r3, {r1, r2, r12} + ldr r7, [r3, #16] + ldr r11, [r3, #20] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r3, #24] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r3, #28] + ldr r3, [r3] + adds r3, r9, r3 + ldr r9, [sp, #92] @ 4-byte Reload + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + adcs r1, lr, r1 + ldr lr, [sp, #128] @ 4-byte Reload + adcs r2, r9, r2 + adcs r12, r4, r12 + ldr r4, [sp, #64] @ 4-byte Reload + adcs lr, lr, r4 + adcs r4, r5, r11 + ldr r5, [sp, #132] @ 4-byte Reload + ldr r11, [sp, #116] @ 4-byte Reload + adcs r5, r5, r7 + ldr r7, [sp, #68] @ 4-byte Reload + adcs r8, r8, r7 + ldr r7, [sp, #76] @ 4-byte Reload + adcs r10, r10, r7 + ldr r7, [sp, #80] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #84] @ 4-byte Reload + str r6, [sp, #80] @ 4-byte Spill + ldr r6, [sp, #124] @ 4-byte Reload + adcs r6, r6, r7 + ldr r7, [sp, #40] @ 4-byte Reload + str r6, [sp, #84] @ 4-byte Spill + ldr r6, [sp, #120] @ 4-byte Reload + adc r6, r6, r11 + str r6, [sp, #116] @ 4-byte Spill + ldr r6, [sp, #100] @ 4-byte Reload + ands r6, r6, #1 + moveq r3, r7 + moveq r2, r9 + str r3, [r0, #48] + ldr r3, [sp, #44] @ 4-byte Reload + moveq r1, r3 + cmp r6, #0 + str r1, [r0, #52] + ldr r1, [sp, #88] @ 4-byte Reload + str r2, [r0, #56] + ldr r2, [sp, #80] @ 4-byte Reload + moveq r12, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r12, [r0, #60] + moveq lr, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str lr, [r0, #64] + moveq r4, r1 + ldr r1, [sp, #132] @ 4-byte Reload + cmp r6, #0 + str r4, [r0, #68] + moveq r5, r1 + ldr r1, [sp, #104] @ 4-byte Reload + str r5, [r0, #72] + moveq r8, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r8, [r0, #76] + moveq r10, r1 + ldr r1, [sp, #112] @ 4-byte Reload + cmp r6, #0 + str r10, [r0, #80] + moveq r2, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r2, [r0, #84] + ldr r2, [sp, #84] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #120] @ 4-byte Reload + str r2, [r0, #88] + ldr r2, [sp, #116] @ 4-byte Reload + moveq r2, r1 + str r2, [r0, #92] + add sp, sp, #136 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end189: + .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L + .cantunwind + .fnend + + .align 2 + .type .LmulPv416x32,%function +.LmulPv416x32: @ @mulPv416x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r3, [r1, #44] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #44] + ldr r1, [r1, #48] + umull r3, r7, r1, r2 + adcs r1, r5, r3 + str r1, [r0, #48] + adc r1, r7, #0 + str r1, [r0, #52] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end190: + .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre13L + .align 2 + .type mcl_fp_mulUnitPre13L,%function +mcl_fp_mulUnitPre13L: @ @mcl_fp_mulUnitPre13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + mov r4, r0 + add r0, sp, #8 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #60] + add r12, sp, #12 + ldr lr, [sp, #56] + ldr r8, [sp, #52] + ldr r9, [sp, #48] + ldr r10, [sp, #44] + ldr r11, [sp, #40] + ldr r5, [sp, #36] + ldr r6, [sp, #32] + ldr r7, [sp, #28] + ldr r3, [sp, #8] + str r0, [sp, #4] @ 4-byte Spill + ldm r12, {r0, r1, r2, r12} + str r3, [r4] + stmib r4, {r0, r1, r2, r12} + str r7, [r4, #20] + str r6, [r4, #24] + str r5, [r4, #28] + str r11, [r4, #32] + str r10, [r4, #36] + str r9, [r4, #40] + str r8, [r4, #44] + str lr, [r4, #48] + ldr r0, [sp, #4] @ 4-byte Reload + str r0, [r4, #52] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end191: + .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre13L + .align 2 + .type mcl_fpDbl_mulPre13L,%function +mcl_fpDbl_mulPre13L: @ @mcl_fpDbl_mulPre13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #820 + sub sp, sp, #820 + mov r7, r2 + mov r4, r0 + add r0, sp, #760 + str r1, [sp, #84] @ 4-byte Spill + mov r5, r1 + ldr r2, [r7] + str r7, [sp, #80] @ 4-byte Spill + str r4, [sp, #76] @ 4-byte Spill + bl .LmulPv416x32(PLT) + ldr r0, [sp, #812] + ldr r1, [sp, #764] + ldr r2, [r7, #4] + mov r6, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #808] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #768] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #804] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #772] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #800] + str r1, [sp, #20] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #760] + str r0, [r4] + add r0, sp, #704 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #756] + add r10, sp, #728 + add lr, sp, #704 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #744] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #24] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #8] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #648 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #700] + add lr, sp, #676 + add r9, sp, #656 + ldr r11, [sp, #692] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r5, r7, r12, lr} + ldr r8, [sp, #648] + ldr r10, [sp, #652] + ldm r9, {r0, r1, r2, r3, r9} + ldr r6, [sp, #24] @ 4-byte Reload + adds r6, r8, r6 + str r6, [r4, #8] + mov r6, r4 + ldr r4, [sp, #40] @ 4-byte Reload + adcs r4, r10, r4 + str r4, [sp, #24] @ 4-byte Spill + ldr r4, [sp, #36] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #84] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #80] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + ldr r2, [r5, #12] + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #592 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #644] + add lr, sp, #612 + add r7, sp, #600 + ldr r8, [sp, #628] + ldr r11, [sp, #624] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #640] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #636] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #632] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r0, [sp, #592] + ldr r9, [sp, #596] + ldm r7, {r1, r2, r7} + ldr r10, [sp, #24] @ 4-byte Reload + adds r0, r0, r10 + str r0, [r6, #12] + ldr r0, [sp, #40] @ 4-byte Reload + adcs r6, r9, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #536 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #588] + ldr r8, [sp, #536] + add r4, sp, #540 + ldr r11, [sp, #580] + ldr r9, [sp, #576] + ldr lr, [sp, #572] + ldr r5, [sp, #568] + ldr r10, [sp, #564] + ldr r12, [sp, #560] + ldr r3, [sp, #556] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #584] + adds r6, r8, r6 + str r0, [sp, #16] @ 4-byte Spill + ldm r4, {r0, r1, r2, r4} + ldr r7, [sp, #76] @ 4-byte Reload + str r6, [r7, #16] + ldr r6, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #80] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + ldr r2, [r4, #20] + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #84] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #480 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #532] + add r10, sp, #480 + add r12, sp, #492 + ldr r6, [sp, #516] + ldr r11, [sp, #512] + ldr lr, [sp, #508] + ldr r9, [sp, #504] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #528] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #524] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #520] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r0, r1, r10} + ldm r12, {r2, r3, r12} + ldr r8, [sp, #24] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r7, #20] + ldr r0, [sp, #44] @ 4-byte Reload + mov r7, r5 + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #24] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #424 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #476] + add r5, sp, #428 + ldr r11, [sp, #464] + ldr r9, [sp, #460] + ldr lr, [sp, #456] + ldr r10, [sp, #452] + ldr r12, [sp, #448] + ldr r3, [sp, #444] + ldr r8, [sp, #424] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #12] @ 4-byte Spill + ldm r5, {r0, r1, r2, r5} + ldr r4, [sp, #24] @ 4-byte Reload + adds r6, r8, r4 + ldr r4, [sp, #76] @ 4-byte Reload + str r6, [r4, #24] + ldr r6, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #80] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + ldr r2, [r5, #28] + adcs r0, r3, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #368 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #420] + add r12, sp, #388 + add r10, sp, #368 + ldr lr, [sp, #408] + ldr r6, [sp, #404] + ldr r11, [sp, #400] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #12] @ 4-byte Spill + ldm r12, {r3, r9, r12} + ldr r7, [sp, #384] + ldm r10, {r0, r1, r10} + ldr r8, [sp, #24] @ 4-byte Reload + ldr r2, [sp, #380] + adds r0, r0, r8 + str r0, [r4, #28] + ldr r0, [sp, #52] @ 4-byte Reload + ldr r4, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #312 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #364] + add r11, sp, #344 + add lr, sp, #316 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #360] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #356] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r7, r9, r11} + ldr r10, [sp, #340] + ldr r8, [sp, #312] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r5, [sp, #24] @ 4-byte Reload + adds r6, r8, r5 + ldr r5, [sp, #76] @ 4-byte Reload + str r6, [r5, #32] + ldr r6, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #80] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r6, #36] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #256 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #308] + add lr, sp, #288 + add r12, sp, #268 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #304] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #300] + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r7, r8, lr} + ldr r11, [sp, #284] + ldr r1, [sp, #256] + ldr r0, [sp, #260] + ldr r10, [sp, #264] + ldm r12, {r2, r3, r9, r12} + ldr r4, [sp, #24] @ 4-byte Reload + adds r1, r1, r4 + str r1, [r5, #36] + ldr r1, [sp, #60] @ 4-byte Reload + ldr r5, [sp, #84] @ 4-byte Reload + adcs r4, r0, r1 + ldr r0, [sp, #64] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r6, #40] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #200 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #252] + add r11, sp, #228 + add lr, sp, #204 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #244] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r7, r8, r10, r11} + ldr r9, [sp, #200] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r6, r9, r4 + ldr r4, [sp, #76] @ 4-byte Reload + str r6, [r4, #40] + ldr r6, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #80] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r6, #44] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #144 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #196] + add r11, sp, #164 + add r12, sp, #152 + ldr lr, [sp, #184] + ldr r7, [sp, #180] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r8, r10, r11} + ldr r2, [sp, #144] + ldr r1, [sp, #148] + ldm r12, {r0, r3, r12} + ldr r9, [sp, #24] @ 4-byte Reload + adds r2, r2, r9 + str r2, [r4, #44] + ldr r2, [r6, #48] + ldr r6, [sp, #20] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #72] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #88 + bl .LmulPv416x32(PLT) + add r3, sp, #88 + add r11, sp, #104 + ldm r3, {r0, r1, r2, r3} + adds r12, r0, r6 + ldr r0, [sp, #20] @ 4-byte Reload + adcs lr, r1, r9 + adcs r5, r2, r0 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r6, r3, r0 + ldr r0, [sp, #140] + str r0, [sp, #84] @ 4-byte Spill + ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11} + str r12, [r4, #48] + str lr, [r4, #52] + str r5, [r4, #56] + ldr r5, [sp, #24] @ 4-byte Reload + str r6, [r4, #60] + ldr r6, [sp, #28] @ 4-byte Reload + add r12, r4, #80 + adcs r0, r0, r5 + adcs r1, r1, r6 + str r0, [r4, #64] + ldr r0, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + str r1, [r4, #68] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #72] @ 4-byte Reload + adcs r1, r3, r1 + str r0, [r4, #72] + ldr r0, [sp, #60] @ 4-byte Reload + ldr r3, [sp, #68] @ 4-byte Reload + str r1, [r4, #76] + ldr r1, [sp, #80] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #64] @ 4-byte Reload + adcs r1, r8, r1 + adcs r2, r9, r2 + adcs r3, r10, r3 + adcs r7, r11, r7 + adc r6, r6, #0 + stm r12, {r0, r1, r2, r3, r7} + str r6, [r4, #100] + add sp, sp, #820 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end192: + .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre13L + .align 2 + .type mcl_fpDbl_sqrPre13L,%function +mcl_fpDbl_sqrPre13L: @ @mcl_fpDbl_sqrPre13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #820 + sub sp, sp, #820 + mov r5, r1 + mov r4, r0 + add r0, sp, #760 + ldr r2, [r5] + bl .LmulPv416x32(PLT) + ldr r0, [sp, #812] + ldr r1, [sp, #764] + ldr r2, [r5, #4] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #808] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #768] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #804] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #772] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #800] + str r1, [sp, #32] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #760] + str r0, [r4] + add r0, sp, #704 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #756] + add r10, sp, #728 + add lr, sp, #704 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #36] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #8] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #648 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #700] + add lr, sp, #680 + add r11, sp, #656 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r6, r12, lr} + ldr r8, [sp, #648] + ldr r10, [sp, #652] + ldm r11, {r0, r1, r2, r3, r9, r11} + ldr r7, [sp, #36] @ 4-byte Reload + adds r7, r8, r7 + str r7, [r4, #8] + ldr r7, [sp, #52] @ 4-byte Reload + adcs r7, r10, r7 + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #12] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #592 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #644] + add r9, sp, #620 + add lr, sp, #600 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #640] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #636] + str r0, [sp, #24] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r0, [sp, #592] + ldr r11, [sp, #596] + ldm lr, {r1, r2, r3, r12, lr} + ldr r10, [sp, #36] @ 4-byte Reload + adds r0, r0, r10 + str r0, [r4, #12] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #536 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #588] + add r12, sp, #540 + ldr r11, [sp, #576] + ldr lr, [sp, #572] + ldr r6, [sp, #568] + ldr r8, [sp, #536] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #24] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r9, r10, r12} + ldr r7, [sp, #36] @ 4-byte Reload + adds r7, r8, r7 + str r7, [r4, #16] + ldr r7, [sp, #52] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #20] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #480 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #532] + add r10, sp, #512 + add lr, sp, #484 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #528] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #524] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r6, r8, r10} + ldr r9, [sp, #480] + ldr r11, [sp, #508] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r9, r7 + str r7, [r4, #20] + ldr r7, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #24] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #424 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #476] + add r8, sp, #456 + add r12, sp, #432 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #24] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldr lr, [sp, #452] + ldr r10, [sp, #448] + ldr r0, [sp, #424] + ldr r11, [sp, #428] + ldm r12, {r1, r2, r3, r12} + ldr r9, [sp, #36] @ 4-byte Reload + adds r0, r0, r9 + str r0, [r4, #24] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #28] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #368 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #420] + add r11, sp, #400 + add lr, sp, #372 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r6, r8, r11} + ldr r10, [sp, #368] + ldm lr, {r0, r1, r2, r3, r9, r12, lr} + ldr r7, [sp, #36] @ 4-byte Reload + adds r7, r10, r7 + str r7, [r4, #28] + ldr r7, [sp, #64] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #312 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #364] + add r10, sp, #344 + add lr, sp, #316 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #360] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #356] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r6, r8, r10} + ldr r9, [sp, #312] + ldr r11, [sp, #340] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r9, r7 + str r7, [r4, #32] + ldr r7, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #36] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #256 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #308] + add r8, sp, #288 + add r12, sp, #264 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #304] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #300] + str r0, [sp, #20] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldr lr, [sp, #284] + ldr r10, [sp, #280] + ldr r0, [sp, #256] + ldr r11, [sp, #260] + ldm r12, {r1, r2, r3, r12} + ldr r9, [sp, #36] @ 4-byte Reload + adds r0, r0, r9 + str r0, [r4, #36] + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #40] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #200 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #252] + add r10, sp, #228 + add r12, sp, #200 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #244] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r10} + ldr lr, [sp, #224] + ldr r9, [sp, #220] + ldm r12, {r0, r1, r2, r3, r12} + ldr r11, [sp, #32] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #40] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #44] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #144 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #196] + add r12, sp, #148 + ldr r7, [sp, #180] + ldr r11, [sp, #176] + ldr r8, [sp, #172] + ldr lr, [sp, #168] + ldr r10, [sp, #164] + ldr r2, [sp, #144] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #184] + str r0, [sp, #16] @ 4-byte Spill + ldm r12, {r0, r1, r3, r12} + ldr r6, [sp, #32] @ 4-byte Reload + adds r2, r2, r6 + ldr r6, [sp, #84] @ 4-byte Reload + str r2, [r4, #44] + ldr r2, [r5, #48] + adcs r6, r0, r6 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r9, r1, r0 + ldr r0, [sp, #76] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #88 + bl .LmulPv416x32(PLT) + add r3, sp, #88 + add r11, sp, #104 + ldm r3, {r0, r1, r2, r3} + adds r12, r0, r6 + ldr r0, [sp, #12] @ 4-byte Reload + adcs lr, r1, r9 + adcs r5, r2, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r6, r3, r0 + ldr r0, [sp, #140] + str r0, [sp, #56] @ 4-byte Spill + ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11} + str r12, [r4, #48] + str lr, [r4, #52] + str r5, [r4, #56] + ldr r5, [sp, #32] @ 4-byte Reload + str r6, [r4, #60] + ldr r6, [sp, #36] @ 4-byte Reload + add r12, r4, #80 + adcs r0, r0, r5 + adcs r1, r1, r6 + str r0, [r4, #64] + ldr r0, [sp, #60] @ 4-byte Reload + ldr r6, [sp, #56] @ 4-byte Reload + str r1, [r4, #68] + ldr r1, [sp, #64] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #80] @ 4-byte Reload + adcs r1, r3, r1 + str r0, [r4, #72] + ldr r0, [sp, #68] @ 4-byte Reload + ldr r3, [sp, #76] @ 4-byte Reload + str r1, [r4, #76] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #72] @ 4-byte Reload + adcs r1, r8, r1 + adcs r2, r9, r2 + adcs r3, r10, r3 + adcs r7, r11, r7 + adc r6, r6, #0 + stm r12, {r0, r1, r2, r3, r7} + str r6, [r4, #100] + add sp, sp, #820 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end193: + .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L + .cantunwind + .fnend + + .globl mcl_fp_mont13L + .align 2 + .type mcl_fp_mont13L,%function +mcl_fp_mont13L: @ @mcl_fp_mont13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #548 + sub sp, sp, #548 + .pad #1024 + sub sp, sp, #1024 + add r12, sp, #100 + add r6, sp, #1024 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #68] @ 4-byte Spill + add r0, r6, #488 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #96] @ 4-byte Spill + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1516] + ldr r7, [sp, #1512] + mov r1, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1520] + mul r2, r7, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1524] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1556] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1552] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1548] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1544] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1540] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1536] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1532] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1528] + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #1456 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1508] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r5, [sp, #1480] + ldr r10, [sp, #1476] + ldr r11, [sp, #1472] + ldr r6, [sp, #1456] + ldr r9, [sp, #1460] + ldr r8, [sp, #1464] + ldr r4, [sp, #1468] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1504] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1500] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1496] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #376 + bl .LmulPv416x32(PLT) + adds r0, r6, r7 + ldr r1, [sp, #36] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + ldr r3, [sp, #1416] + ldr r12, [sp, #1420] + ldr lr, [sp, #1424] + ldr r6, [sp, #1432] + ldr r7, [sp, #1436] + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #1444] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #1440] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1428] + adcs r1, r11, r1 + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + ldr r11, [sp, #72] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r10, r1 + ldr r10, [sp, #1448] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r5, r1 + ldr r5, [sp, #1400] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #1412] + adc r0, r0, #0 + adds r11, r11, r5 + ldr r5, [sp, #64] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #1408] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1404] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1344 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1396] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1368] + ldr r9, [sp, #1364] + ldr r10, [sp, #1360] + ldr r11, [sp, #1344] + ldr r6, [sp, #1348] + ldr r7, [sp, #1352] + ldr r4, [sp, #1356] + add lr, sp, #1024 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1392] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1388] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1384] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1380] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1376] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1372] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #264 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #1288] + ldr r2, [sp, #1300] + ldr r3, [sp, #1304] + ldr r12, [sp, #1308] + ldr lr, [sp, #1312] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1320] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + ldr r11, [sp, #92] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1324] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1316] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1336] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1332] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1328] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1296] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1292] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1232 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1284] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1256] + ldr r9, [sp, #1252] + ldr r10, [sp, #1248] + ldr r11, [sp, #1232] + ldr r6, [sp, #1236] + ldr r7, [sp, #1240] + ldr r4, [sp, #1244] + add lr, sp, #1024 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #152 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #1176] + ldr r2, [sp, #1188] + ldr r3, [sp, #1192] + ldr r12, [sp, #1196] + ldr lr, [sp, #1200] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1208] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + ldr r11, [sp, #92] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1212] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1204] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1224] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1220] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1216] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1184] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1228] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1180] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1120 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1172] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1144] + ldr r9, [sp, #1140] + ldr r10, [sp, #1136] + ldr r11, [sp, #1120] + ldr r6, [sp, #1124] + ldr r7, [sp, #1128] + ldr r4, [sp, #1132] + add lr, sp, #1024 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1168] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1164] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1160] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #40 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #1064] + ldr r2, [sp, #1076] + ldr r3, [sp, #1080] + ldr r12, [sp, #1084] + ldr lr, [sp, #1088] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1096] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + ldr r11, [sp, #92] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1100] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1092] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1112] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1108] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1104] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1072] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1068] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1008 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1060] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1032] + ldr r9, [sp, #1028] + ldr r10, [sp, #1024] + ldr r11, [sp, #1008] + ldr r6, [sp, #1012] + ldr r7, [sp, #1016] + ldr r4, [sp, #1020] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #952 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #956 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #980 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #952] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #896 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #948] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #920] + ldr r9, [sp, #916] + ldr r10, [sp, #912] + ldr r11, [sp, #896] + ldr r6, [sp, #900] + ldr r7, [sp, #904] + ldr r4, [sp, #908] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #840 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #844 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #868 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #840] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #784 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #836] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #808] + ldr r9, [sp, #804] + ldr r10, [sp, #800] + ldr r11, [sp, #784] + ldr r6, [sp, #788] + ldr r7, [sp, #792] + ldr r4, [sp, #796] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #832] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #828] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #728 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #732 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #756 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #728] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #672 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #724] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #696] + ldr r9, [sp, #692] + ldr r10, [sp, #688] + ldr r11, [sp, #672] + ldr r6, [sp, #676] + ldr r7, [sp, #680] + ldr r4, [sp, #684] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #720] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #716] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #616 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #620 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #644 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #616] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #560 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #612] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #584] + ldr r9, [sp, #580] + ldr r10, [sp, #576] + ldr r11, [sp, #560] + ldr r6, [sp, #564] + ldr r7, [sp, #568] + ldr r4, [sp, #572] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #504 + bl .LmulPv416x32(PLT) + adds r0, r5, r11 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #508 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #532 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r6, r8, r9, r10} + ldr r5, [sp, #504] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #88] @ 4-byte Reload + adds r5, r11, r5 + adcs r0, r7, r0 + str r5, [sp, #20] @ 4-byte Spill + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #96] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mul r2, r5, r8 + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #448 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #500] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r5, [sp, #472] + ldr r9, [sp, #468] + ldr r10, [sp, #464] + ldr r11, [sp, #448] + ldr r6, [sp, #452] + ldr r7, [sp, #456] + ldr r4, [sp, #460] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #492] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #488] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #484] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #392 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + add lr, sp, #408 + adds r0, r0, r11 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + add r6, sp, #392 + adcs r11, r1, r7 + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #432 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #36] @ 4-byte Spill + ldm r6, {r2, r5, r6} + ldr r4, [sp, #404] + adds r0, r0, r2 + mul r1, r0, r8 + adcs r5, r11, r5 + str r0, [sp, #92] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + str r5, [sp, #88] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r5, r5, r6 + str r5, [sp, #84] @ 4-byte Spill + ldr r5, [sp, #80] @ 4-byte Reload + adcs r4, r5, r4 + str r4, [sp, #80] @ 4-byte Spill + ldr r4, [sp, #76] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #336 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #388] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r6, [sp, #364] + ldr r8, [sp, #360] + ldr r9, [sp, #356] + ldr r10, [sp, #352] + ldr r7, [sp, #336] + ldr r4, [sp, #340] + ldr r11, [sp, #344] + ldr r5, [sp, #348] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #380] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #376] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #280 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #296 + adds r0, r0, r7 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #292] + adcs r11, r1, r11 + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #288] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #320 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #284] + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #280] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #28] @ 4-byte Spill + adds r1, r0, r2 + ldr r0, [sp, #96] @ 4-byte Reload + adcs r6, r11, r6 + str r1, [sp, #92] @ 4-byte Spill + mul r2, r1, r0 + str r2, [sp, #24] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + str r6, [sp, #40] @ 4-byte Spill + ldr r6, [sp, #88] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r4, r5, r4 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #224 + bl .LmulPv416x32(PLT) + ldr r1, [sp, #276] + add r11, sp, #224 + ldr r4, [sp, #252] + ldr r8, [sp, #248] + ldr r9, [sp, #244] + ldr r10, [sp, #240] + add r0, sp, #168 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #272] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #268] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #264] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #260] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #256] + str r1, [sp, #8] @ 4-byte Spill + ldm r11, {r6, r7, r11} + ldr r1, [sp, #104] @ 4-byte Reload + ldr r5, [sp, #236] + ldr r2, [r1, #48] + ldr r1, [sp, #100] @ 4-byte Reload + bl .LmulPv416x32(PLT) + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #36] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #184 + adds r0, r0, r6 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #168 + adcs r1, r1, r11 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #208 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #48] @ 4-byte Spill + ldm r7, {r2, r6, r7} + ldr r5, [sp, #180] + adds r4, r0, r2 + ldr r0, [sp, #96] @ 4-byte Reload + mul r1, r4, r0 + ldr r0, [sp, #220] + str r1, [sp, #44] @ 4-byte Spill + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #104] @ 4-byte Reload + adcs r11, r11, r6 + ldr r6, [sp, #100] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #36] @ 4-byte Spill + ldr r6, [sp, #92] @ 4-byte Reload + adcs r5, r6, r5 + ldr r6, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r9, r0, r9 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #108] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r6, r0, r1 + mov r0, #0 + mov r1, r10 + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #112 + bl .LmulPv416x32(PLT) + add r3, sp, #112 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r7, r11, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r7, [sp, #48] @ 4-byte Spill + adcs lr, r0, r2 + ldr r0, [sp, #128] + adcs r12, r5, r3 + str lr, [sp, #52] @ 4-byte Spill + str r12, [sp, #56] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #132] + ldr r1, [sp, #84] @ 4-byte Reload + str r4, [sp, #60] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #136] + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #140] + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #148] + adcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #156] + adcs r0, r9, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #160] + adcs r0, r1, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + mov r0, r10 + ldmib r0, {r1, r2, r3, r5} + ldr r6, [r0] + ldr r10, [r0, #20] + ldr r11, [r0, #28] + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [r0, #24] + subs r6, r7, r6 + sbcs r9, lr, r1 + str r5, [sp, #44] @ 4-byte Spill + mov r5, r0 + sbcs r0, r12, r2 + ldr r2, [sp, #40] @ 4-byte Reload + ldr r1, [r5, #48] + sbcs r3, r4, r3 + ldr lr, [r5, #32] + ldr r12, [r5, #36] + ldr r8, [r5, #40] + ldr r4, [r5, #44] + ldr r5, [sp, #44] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #76] @ 4-byte Reload + sbcs r7, r2, r10 + ldr r2, [sp, #80] @ 4-byte Reload + sbcs r2, r2, r5 + ldr r5, [sp, #84] @ 4-byte Reload + sbcs r10, r5, r11 + ldr r5, [sp, #88] @ 4-byte Reload + sbcs r11, r5, lr + ldr r5, [sp, #92] @ 4-byte Reload + sbcs r12, r5, r12 + ldr r5, [sp, #96] @ 4-byte Reload + sbcs lr, r5, r8 + ldr r5, [sp, #100] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #104] @ 4-byte Reload + str r4, [sp, #44] @ 4-byte Spill + ldr r4, [sp, #108] @ 4-byte Reload + sbcs r5, r5, r4 + str r5, [sp, #108] @ 4-byte Spill + ldr r5, [sp, #64] @ 4-byte Reload + sbc r5, r5, #0 + ands r8, r5, #1 + ldr r5, [sp, #48] @ 4-byte Reload + movne r6, r5 + ldr r5, [sp, #68] @ 4-byte Reload + str r6, [r5] + ldr r6, [sp, #52] @ 4-byte Reload + movne r9, r6 + ldr r6, [sp, #56] @ 4-byte Reload + str r9, [r5, #4] + movne r0, r6 + cmp r8, #0 + str r0, [r5, #8] + ldr r0, [sp, #60] @ 4-byte Reload + movne r3, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r3, [r5, #12] + movne r1, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r1, [r5, #16] + ldr r1, [sp, #44] @ 4-byte Reload + movne r7, r0 + ldr r0, [sp, #80] @ 4-byte Reload + cmp r8, #0 + str r7, [r5, #20] + movne r2, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r2, [r5, #24] + movne r10, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r10, [r5, #28] + movne r11, r0 + ldr r0, [sp, #92] @ 4-byte Reload + cmp r8, #0 + str r11, [r5, #32] + movne r12, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r12, [r5, #36] + movne lr, r0 + ldr r0, [sp, #100] @ 4-byte Reload + str lr, [r5, #40] + movne r1, r0 + ldr r0, [sp, #104] @ 4-byte Reload + cmp r8, #0 + str r1, [r5, #44] + ldr r1, [sp, #108] @ 4-byte Reload + movne r1, r0 + str r1, [r5, #48] + add sp, sp, #548 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end194: + .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L + .cantunwind + .fnend + + .globl mcl_fp_montNF13L + .align 2 + .type mcl_fp_montNF13L,%function +mcl_fp_montNF13L: @ @mcl_fp_montNF13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #548 + sub sp, sp, #548 + .pad #1024 + sub sp, sp, #1024 + add r12, sp, #100 + add r6, sp, #1024 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #72] @ 4-byte Spill + add r0, r6, #488 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #96] @ 4-byte Spill + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1516] + ldr r8, [sp, #1512] + mov r1, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1520] + mul r2, r8, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1524] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1556] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1552] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1548] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1544] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1540] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1536] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1532] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1528] + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #1456 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1508] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r10, [sp, #1480] + ldr r11, [sp, #1476] + ldr r6, [sp, #1472] + ldr r7, [sp, #1456] + ldr r9, [sp, #1460] + ldr r4, [sp, #1464] + ldr r5, [sp, #1468] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1504] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1500] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1496] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #376 + bl .LmulPv416x32(PLT) + adds r0, r7, r8 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1412] + ldr r3, [sp, #1416] + ldr r12, [sp, #1420] + ldr lr, [sp, #1424] + ldr r7, [sp, #1436] + ldr r8, [sp, #1440] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #1444] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1400] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #1428] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #1432] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #76] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #1448] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adc r0, r1, r0 + adds r11, r11, r4 + ldr r4, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #1408] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1404] + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1344 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1396] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1368] + ldr r9, [sp, #1364] + ldr r10, [sp, #1360] + ldr r11, [sp, #1344] + ldr r6, [sp, #1348] + ldr r7, [sp, #1352] + ldr r5, [sp, #1356] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1392] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1388] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1384] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1380] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1376] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1372] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #264 + bl .LmulPv416x32(PLT) + adds r0, r4, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #1312 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldr r0, [sp, #1288] + ldr r7, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #1292] + ldr r2, [sp, #1296] + ldr r3, [sp, #1300] + ldr r12, [sp, #1304] + ldr lr, [sp, #1308] + adds r7, r7, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1232 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1284] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r9, [sp, #1256] + ldr r10, [sp, #1252] + ldr r11, [sp, #1248] + ldr r7, [sp, #1232] + ldr r5, [sp, #1236] + ldr r4, [sp, #1240] + ldr r6, [sp, #1244] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #152 + bl .LmulPv416x32(PLT) + adds r0, r8, r7 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1188] + ldr r3, [sp, #1192] + ldr r12, [sp, #1196] + ldr lr, [sp, #1200] + ldr r7, [sp, #1212] + ldr r8, [sp, #1216] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1204] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1176] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1208] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #92] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1224] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1220] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r4 + ldr r4, [sp, #88] @ 4-byte Reload + ldr r1, [sp, #1184] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1228] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1180] + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1120 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1172] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #1144] + ldr r9, [sp, #1140] + ldr r10, [sp, #1136] + ldr r11, [sp, #1120] + ldr r6, [sp, #1124] + ldr r7, [sp, #1128] + ldr r5, [sp, #1132] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1168] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1164] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1160] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #40 + bl .LmulPv416x32(PLT) + adds r0, r4, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #1088 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldr r0, [sp, #1064] + ldr r7, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #1068] + ldr r2, [sp, #1072] + ldr r3, [sp, #1076] + ldr r12, [sp, #1080] + ldr lr, [sp, #1084] + adds r7, r7, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1008 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #1060] + add r11, sp, #1016 + ldr r9, [sp, #1032] + ldr r10, [sp, #1028] + ldr r7, [sp, #1008] + ldr r5, [sp, #1012] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r6, r11} + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #952 + bl .LmulPv416x32(PLT) + adds r0, r8, r7 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #956 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #980 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #952] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #896 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #948] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #920] + ldr r9, [sp, #916] + ldr r10, [sp, #912] + ldr r11, [sp, #896] + ldr r6, [sp, #900] + ldr r7, [sp, #904] + ldr r5, [sp, #908] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #840 + bl .LmulPv416x32(PLT) + adds r0, r4, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #864 + add lr, sp, #840 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #92] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #784 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #836] + add r11, sp, #792 + ldr r9, [sp, #808] + ldr r10, [sp, #804] + ldr r7, [sp, #784] + ldr r5, [sp, #788] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #832] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #828] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r6, r11} + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #728 + bl .LmulPv416x32(PLT) + adds r0, r8, r7 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #732 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #756 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #728] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #672 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #724] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r8, [sp, #696] + ldr r9, [sp, #692] + ldr r10, [sp, #688] + ldr r11, [sp, #672] + ldr r6, [sp, #676] + ldr r7, [sp, #680] + ldr r5, [sp, #684] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #720] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #716] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #616 + bl .LmulPv416x32(PLT) + adds r0, r4, r11 + ldr r1, [sp, #12] @ 4-byte Reload + add r11, sp, #640 + add lr, sp, #616 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #36] @ 4-byte Spill + ldm r11, {r4, r5, r6, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #92] @ 4-byte Reload + adds r7, r7, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #560 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #612] + add r11, sp, #568 + ldr r9, [sp, #584] + ldr r10, [sp, #580] + ldr r7, [sp, #560] + ldr r5, [sp, #564] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r6, r11} + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #504 + bl .LmulPv416x32(PLT) + adds r0, r8, r7 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #508 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #532 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldr r4, [sp, #504] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + adds r11, r11, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + mov r4, r11 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #96] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + mul r2, r11, r8 + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #448 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #500] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r9, [sp, #468] + ldr r10, [sp, #464] + ldr r11, [sp, #448] + ldr r6, [sp, #452] + ldr r7, [sp, #456] + ldr r5, [sp, #460] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #492] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #488] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #484] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #392 + bl .LmulPv416x32(PLT) + adds r0, r4, r11 + ldr r1, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #408 + ldr r4, [sp, #400] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #396] + adcs r1, r1, r7 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #404] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #432 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #392] + str r1, [sp, #40] @ 4-byte Spill + adds r0, r0, r2 + mul r1, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + str r1, [sp, #32] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #88] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #88] @ 4-byte Spill + ldr r6, [sp, #84] @ 4-byte Reload + adcs r4, r6, r4 + str r4, [sp, #84] @ 4-byte Spill + ldr r4, [sp, #80] @ 4-byte Reload + adcs r4, r4, r5 + str r4, [sp, #80] @ 4-byte Spill + ldr r4, [sp, #76] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #336 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #388] + add r9, sp, #344 + ldr r6, [sp, #364] + ldr r7, [sp, #360] + ldr r8, [sp, #356] + ldr r10, [sp, #336] + ldr r11, [sp, #340] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #380] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #376] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r4, r5, r9} + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #280 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #84] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #296 + adds r0, r0, r10 + add r10, sp, #320 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + adcs r1, r1, r4 + ldr r4, [sp, #288] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #292] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #284] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #280] + str r1, [sp, #32] @ 4-byte Spill + adds r1, r0, r2 + ldr r0, [sp, #96] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + mul r2, r1, r0 + str r2, [sp, #24] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #88] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #44] @ 4-byte Spill + ldr r6, [sp, #84] @ 4-byte Reload + adcs r4, r6, r4 + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [sp, #80] @ 4-byte Reload + adcs r4, r4, r5 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [sp, #76] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + adc r0, r10, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #224 + bl .LmulPv416x32(PLT) + ldr r1, [sp, #276] + add r9, sp, #232 + ldr r6, [sp, #252] + ldr r7, [sp, #248] + ldr r8, [sp, #244] + ldr r10, [sp, #224] + ldr r11, [sp, #228] + add r0, sp, #168 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #272] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #268] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #264] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #260] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #256] + str r1, [sp, #12] @ 4-byte Spill + ldm r9, {r4, r5, r9} + ldr r1, [sp, #104] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #100] @ 4-byte Reload + bl .LmulPv416x32(PLT) + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #184 + adds r0, r0, r10 + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r11 + adcs r1, r1, r4 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r7 + add r7, sp, #168 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adc r1, r1, r2 + str r1, [sp, #52] @ 4-byte Spill + ldm r7, {r2, r6, r7} + ldr r5, [sp, #180] + ldr r4, [sp, #216] + ldr r9, [sp, #212] + ldr r8, [sp, #208] + adds r10, r0, r2 + ldr r0, [sp, #96] @ 4-byte Reload + mul r1, r10, r0 + ldr r0, [sp, #220] + str r1, [sp, #48] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #104] @ 4-byte Reload + adcs r11, r11, r6 + ldr r6, [sp, #100] @ 4-byte Reload + adcs r7, r6, r7 + ldr r6, [sp, #92] @ 4-byte Reload + adcs r5, r6, r5 + ldr r6, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #108] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + mov r1, r4 + adc r6, r0, #0 + add r0, sp, #112 + bl .LmulPv416x32(PLT) + add r3, sp, #112 + ldm r3, {r0, r1, r2, r3} + adds r0, r10, r0 + adcs r12, r11, r1 + ldr r0, [sp, #128] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r2, r7, r2 + str r12, [sp, #52] @ 4-byte Spill + adcs lr, r5, r3 + str r2, [sp, #56] @ 4-byte Spill + str lr, [sp, #60] @ 4-byte Spill + adcs r9, r1, r0 + ldr r0, [sp, #132] + ldr r1, [sp, #44] @ 4-byte Reload + str r9, [sp, #64] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #136] + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #140] + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r10, r1, r0 + ldr r0, [sp, #148] + ldr r1, [sp, #96] @ 4-byte Reload + str r10, [sp, #68] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #156] + adcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #160] + adcs r0, r1, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #164] + adc r0, r6, r0 + mov r6, r4 + str r0, [sp, #104] @ 4-byte Spill + ldmib r6, {r0, r1, r7} + ldr r5, [r6, #24] + ldr r4, [r6, #28] + ldr r3, [r6, #16] + ldr r11, [r6, #20] + str r5, [sp, #48] @ 4-byte Spill + ldr r5, [r6] + str r4, [sp, #44] @ 4-byte Spill + subs r5, r12, r5 + sbcs r8, r2, r0 + sbcs r2, lr, r1 + sbcs lr, r9, r7 + add r7, r6, #32 + ldm r7, {r0, r1, r7} + ldr r4, [r6, #44] + ldr r9, [r6, #48] + ldr r6, [sp, #76] @ 4-byte Reload + sbcs r3, r6, r3 + ldr r6, [sp, #80] @ 4-byte Reload + str r4, [sp, #40] @ 4-byte Spill + ldr r4, [sp, #48] @ 4-byte Reload + sbcs r12, r6, r11 + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r11, r6, r4 + ldr r4, [sp, #44] @ 4-byte Reload + sbcs r10, r10, r4 + ldr r4, [sp, #88] @ 4-byte Reload + sbcs r4, r4, r0 + ldr r0, [sp, #92] @ 4-byte Reload + sbcs r6, r0, r1 + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r7, r0, r7 + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + sbc r9, r0, r9 + ldr r0, [sp, #52] @ 4-byte Reload + asr r1, r9, #31 + cmp r1, #0 + movlt r5, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r5, [r0] + ldr r5, [sp, #56] @ 4-byte Reload + movlt r8, r5 + ldr r5, [sp, #60] @ 4-byte Reload + str r8, [r0, #4] + movlt r2, r5 + cmp r1, #0 + str r2, [r0, #8] + ldr r2, [sp, #64] @ 4-byte Reload + movlt lr, r2 + ldr r2, [sp, #76] @ 4-byte Reload + str lr, [r0, #12] + movlt r3, r2 + ldr r2, [sp, #80] @ 4-byte Reload + str r3, [r0, #16] + ldr r3, [sp, #108] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #84] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #20] + movlt r11, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r11, [r0, #24] + movlt r10, r2 + ldr r2, [sp, #88] @ 4-byte Reload + str r10, [r0, #28] + movlt r4, r2 + ldr r2, [sp, #92] @ 4-byte Reload + cmp r1, #0 + str r4, [r0, #32] + movlt r6, r2 + ldr r2, [sp, #96] @ 4-byte Reload + str r6, [r0, #36] + movlt r7, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r7, [r0, #40] + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #104] @ 4-byte Reload + str r3, [r0, #44] + movlt r9, r1 + str r9, [r0, #48] + add sp, sp, #548 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end195: + .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L + .cantunwind + .fnend + + .globl mcl_fp_montRed13L + .align 2 + .type mcl_fp_montRed13L,%function +mcl_fp_montRed13L: @ @mcl_fp_montRed13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #908 + sub sp, sp, #908 + mov r3, r2 + str r0, [sp, #164] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r11, [r1] + ldr r0, [r3] + str r3, [sp, #168] @ 4-byte Spill + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #160] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #64] @ 4-byte Spill + str r0, [sp, #152] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #172] @ 4-byte Spill + mul r2, r11, r0 + ldr r0, [r3, #28] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r1, #96] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r1, #100] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #848 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #900] + add r10, sp, #872 + add lr, sp, #848 + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #168] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #172] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #792 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #844] + add lr, sp, #832 + add r9, sp, #800 + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #792] + ldr r5, [sp, #828] + ldr r6, [sp, #824] + ldr r7, [sp, #820] + ldr r10, [sp, #816] + ldr r8, [sp, #812] + ldr r1, [sp, #796] + ldm r9, {r0, r2, r9} + adds r4, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #172] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #168] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #736 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #788] + add r10, sp, #760 + add lr, sp, #736 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #680 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #732] + add lr, sp, #720 + add r10, sp, #688 + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #680] + ldr r5, [sp, #716] + ldr r6, [sp, #712] + ldr r7, [sp, #708] + ldr r1, [sp, #684] + ldm r10, {r0, r2, r8, r9, r10} + adds r4, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #172] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #168] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #624 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #676] + add r10, sp, #648 + add lr, sp, #624 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #568 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #620] + add lr, sp, #608 + add r10, sp, #576 + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #568] + ldr r5, [sp, #604] + ldr r6, [sp, #600] + ldr r7, [sp, #596] + ldr r1, [sp, #572] + ldm r10, {r0, r2, r8, r9, r10} + adds r4, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #172] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #168] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #512 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #564] + add r10, sp, #536 + add lr, sp, #512 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #456 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #508] + add lr, sp, #496 + add r10, sp, #464 + str r0, [sp, #32] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #456] + ldr r5, [sp, #492] + ldr r6, [sp, #488] + ldr r7, [sp, #484] + ldr r1, [sp, #460] + ldm r10, {r0, r2, r8, r9, r10} + adds r4, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r4, [sp, #172] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #168] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #400 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #452] + add r10, sp, #424 + add lr, sp, #400 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #448] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #444] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #344 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #396] + add lr, sp, #384 + add r10, sp, #352 + str r0, [sp, #40] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r4, [sp, #344] + ldr r5, [sp, #380] + ldr r6, [sp, #376] + ldr r7, [sp, #372] + ldr r1, [sp, #348] + ldm r10, {r0, r2, r8, r9, r10} + adds r4, r11, r4 + ldr r4, [sp, #72] @ 4-byte Reload + adcs r11, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #168] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #172] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + mul r2, r11, r7 + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r8 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #288 + bl .LmulPv416x32(PLT) + ldr r0, [sp, #340] + add r10, sp, #312 + add lr, sp, #288 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #336] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #332] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r6, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + mov r4, r7 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + mul r2, r11, r4 + adcs r0, r0, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r9 + mov r9, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #232 + bl .LmulPv416x32(PLT) + add r7, sp, #232 + add lr, sp, #272 + ldm r7, {r0, r1, r3, r7} + ldr r8, [sp, #284] + adds r0, r11, r0 + ldr r0, [sp, #20] @ 4-byte Reload + adcs r11, r0, r1 + mul r0, r11, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #256 + str r0, [sp, #60] @ 4-byte Spill + ldm lr, {r5, r12, lr} + ldr r6, [sp, #268] + ldm r7, {r1, r2, r7} + ldr r0, [sp, #248] + ldr r3, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #252] + adcs r10, r3, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + add r0, sp, #176 + bl .LmulPv416x32(PLT) + add r3, sp, #176 + ldm r3, {r0, r1, r2, r3} + adds r0, r11, r0 + ldr r0, [sp, #172] @ 4-byte Reload + adcs r12, r0, r1 + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #44] @ 4-byte Reload + str r12, [sp, #52] @ 4-byte Spill + adcs r2, r0, r2 + ldr r0, [sp, #192] + adcs r3, r10, r3 + str r2, [sp, #64] @ 4-byte Spill + str r3, [sp, #68] @ 4-byte Spill + adcs r7, r4, r0 + ldr r0, [sp, #196] + str r7, [sp, #72] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #200] + ldr r1, [sp, #48] @ 4-byte Reload + str r4, [sp, #76] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #204] + ldr r1, [sp, #56] @ 4-byte Reload + str r5, [sp, #80] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #208] + ldr r1, [sp, #88] @ 4-byte Reload + str r6, [sp, #84] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [sp, #212] + adcs r11, r1, r0 + ldr r0, [sp, #216] + ldr r1, [sp, #100] @ 4-byte Reload + str r11, [sp, #92] @ 4-byte Spill + adcs r10, r1, r0 + ldr r0, [sp, #220] + ldr r1, [sp, #108] @ 4-byte Reload + str r10, [sp, #100] @ 4-byte Spill + adcs r9, r1, r0 + ldr r0, [sp, #224] + ldr r1, [sp, #104] @ 4-byte Reload + str r9, [sp, #108] @ 4-byte Spill + adcs r8, r8, r0 + ldr r0, [sp, #228] + str r8, [sp, #168] @ 4-byte Spill + adcs lr, r1, r0 + ldr r0, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #156] @ 4-byte Reload + str lr, [sp, #104] @ 4-byte Spill + adc r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #160] @ 4-byte Reload + subs r0, r12, r0 + sbcs r1, r2, r1 + ldr r2, [sp, #152] @ 4-byte Reload + sbcs r2, r3, r2 + ldr r3, [sp, #136] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #140] @ 4-byte Reload + sbcs r12, r4, r7 + ldr r4, [sp, #144] @ 4-byte Reload + ldr r7, [sp, #172] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #148] @ 4-byte Reload + sbcs r5, r6, r5 + ldr r6, [sp, #112] @ 4-byte Reload + sbcs r6, r7, r6 + ldr r7, [sp, #116] @ 4-byte Reload + sbcs r7, r11, r7 + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [sp, #120] @ 4-byte Reload + sbcs r11, r10, r7 + ldr r7, [sp, #124] @ 4-byte Reload + sbcs r9, r9, r7 + ldr r7, [sp, #128] @ 4-byte Reload + sbcs r10, r8, r7 + ldr r7, [sp, #132] @ 4-byte Reload + sbcs r8, lr, r7 + ldr r7, [sp, #96] @ 4-byte Reload + sbc r7, r7, #0 + ands lr, r7, #1 + ldr r7, [sp, #52] @ 4-byte Reload + movne r0, r7 + ldr r7, [sp, #164] @ 4-byte Reload + str r0, [r7] + ldr r0, [sp, #64] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r1, [r7, #4] + ldr r1, [sp, #92] @ 4-byte Reload + movne r2, r0 + ldr r0, [sp, #72] @ 4-byte Reload + cmp lr, #0 + str r2, [r7, #8] + movne r3, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r3, [r7, #12] + movne r12, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r12, [r7, #16] + movne r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + cmp lr, #0 + str r4, [r7, #20] + movne r5, r0 + ldr r0, [sp, #172] @ 4-byte Reload + str r5, [r7, #24] + movne r6, r0 + ldr r0, [sp, #160] @ 4-byte Reload + movne r0, r1 + str r6, [r7, #28] + cmp lr, #0 + str r0, [r7, #32] + ldr r0, [sp, #100] @ 4-byte Reload + movne r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + str r11, [r7, #36] + movne r9, r0 + ldr r0, [sp, #168] @ 4-byte Reload + str r9, [r7, #40] + movne r10, r0 + ldr r0, [sp, #104] @ 4-byte Reload + cmp lr, #0 + str r10, [r7, #44] + movne r8, r0 + str r8, [r7, #48] + add sp, sp, #908 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end196: + .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L + .cantunwind + .fnend + + .globl mcl_fp_addPre13L + .align 2 + .type mcl_fp_addPre13L,%function +mcl_fp_addPre13L: @ @mcl_fp_addPre13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #36 + sub sp, sp, #36 + ldm r1, {r3, r12, lr} + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7} + ldr r11, [r2] + ldr r4, [r2, #16] + ldr r10, [r2, #32] + adds r8, r11, r3 + ldr r3, [r2, #48] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #20] + ldr r11, [r1, #44] + adcs r5, r5, r12 + add r12, r1, #16 + adcs r6, r6, lr + ldr lr, [r1, #32] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #44] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r2, #24] + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #40] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r2, #28] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #36] + ldr r2, [r1, #36] + str r4, [sp, #24] @ 4-byte Spill + adcs r4, r7, r9 + ldr r7, [r1, #40] + ldr r9, [r1, #48] + str r3, [sp, #4] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + str r8, [r0] + stmib r0, {r5, r6} + str r4, [r0, #12] + ldr r5, [sp, #8] @ 4-byte Reload + ldr r4, [sp, #12] @ 4-byte Reload + ldr r6, [sp, #32] @ 4-byte Reload + adcs r1, r5, r1 + str r1, [r0, #16] + adcs r2, r4, r2 + ldr r1, [sp, #20] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp] @ 4-byte Reload + adcs r2, r2, r12 + str r1, [r0, #24] + add r12, r0, #32 + str r2, [r0, #28] + ldr r2, [sp, #4] @ 4-byte Reload + adcs r1, r10, lr + adcs r2, r2, r3 + ldr r3, [sp, #16] @ 4-byte Reload + adcs r3, r3, r7 + ldr r7, [sp, #28] @ 4-byte Reload + adcs r7, r7, r11 + adcs r6, r6, r9 + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #36 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end197: + .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L + .cantunwind + .fnend + + .globl mcl_fp_subPre13L + .align 2 + .type mcl_fp_subPre13L,%function +mcl_fp_subPre13L: @ @mcl_fp_subPre13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #36 + sub sp, sp, #36 + ldr r3, [r2, #16] + ldr r7, [r2] + ldr r6, [r1] + ldr r12, [r2, #4] + ldr r4, [r2, #8] + ldr r11, [r2, #12] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #20] + subs r7, r6, r7 + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [r2, #24] + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #28] @ 4-byte Spill + ldmib r1, {r5, lr} + ldr r6, [r2, #48] + ldr r3, [r1, #12] + ldr r10, [r2, #32] + ldr r8, [r1, #44] + ldr r9, [r1, #48] + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r2, #44] + sbcs r5, r5, r12 + add r12, r1, #16 + sbcs r4, lr, r4 + sbcs lr, r3, r11 + ldr r3, [r2, #36] + ldr r11, [r1, #36] + str r6, [sp, #16] @ 4-byte Spill + ldr r6, [r2, #40] + ldr r2, [r1, #40] + str r3, [sp, #4] @ 4-byte Spill + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [r1, #32] + str r2, [sp] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + str r7, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + ldr r4, [sp, #12] @ 4-byte Reload + ldr r7, [sp, #20] @ 4-byte Reload + str lr, [r0, #12] + sbcs r1, r1, r4 + sbcs r2, r2, r7 + str r1, [r0, #16] + ldr r1, [sp, #24] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r1, r3, r1 + ldr r3, [sp, #8] @ 4-byte Reload + sbcs r2, r12, r2 + str r1, [r0, #24] + add r12, r0, #32 + str r2, [r0, #28] + ldr r2, [sp, #4] @ 4-byte Reload + sbcs r1, r6, r10 + ldr r6, [sp, #32] @ 4-byte Reload + sbcs r2, r11, r2 + sbcs r3, r7, r3 + ldr r7, [sp, #16] @ 4-byte Reload + sbcs r7, r8, r7 + sbcs r6, r9, r6 + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #36 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end198: + .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L + .cantunwind + .fnend + + .globl mcl_fp_shr1_13L + .align 2 + .type mcl_fp_shr1_13L,%function +mcl_fp_shr1_13L: @ @mcl_fp_shr1_13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #8 + sub sp, sp, #8 + add r9, r1, #8 + ldm r9, {r2, r3, r4, r5, r8, r9} + ldm r1, {r10, lr} + ldr r12, [r1, #36] + lsr r7, lr, #1 + lsr r6, r3, #1 + lsrs r3, r3, #1 + orr r11, r7, r2, lsl #31 + ldr r7, [r1, #48] + rrx r2, r2 + lsrs r3, lr, #1 + rrx r3, r10 + str r7, [sp, #4] @ 4-byte Spill + ldr r7, [r1, #44] + str r7, [sp] @ 4-byte Spill + ldr r7, [r1, #40] + ldr r1, [r1, #32] + stm r0, {r3, r11} + str r2, [r0, #8] + orr r2, r6, r4, lsl #31 + str r2, [r0, #12] + lsrs r2, r5, #1 + ldr r6, [sp] @ 4-byte Reload + rrx r2, r4 + str r2, [r0, #16] + lsr r2, r5, #1 + orr r2, r2, r8, lsl #31 + str r2, [r0, #20] + lsrs r2, r9, #1 + rrx r2, r8 + str r2, [r0, #24] + lsr r2, r9, #1 + orr r2, r2, r1, lsl #31 + str r2, [r0, #28] + lsrs r2, r12, #1 + lsr r2, r12, #1 + rrx r1, r1 + lsrs r3, r6, #1 + add r12, r0, #32 + orr r2, r2, r7, lsl #31 + rrx r3, r7 + lsr r7, r6, #1 + ldr r6, [sp, #4] @ 4-byte Reload + orr r7, r7, r6, lsl #31 + lsr r6, r6, #1 + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + add sp, sp, #8 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end199: + .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L + .cantunwind + .fnend + + .globl mcl_fp_add13L + .align 2 + .type mcl_fp_add13L,%function +mcl_fp_add13L: @ @mcl_fp_add13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r11, r4, r9 + ldr r9, [r1, #24] + adcs r4, r5, r8 + ldr r5, [r1, #20] + adcs r6, r6, lr + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r1, #16] + mov lr, r11 + adcs r7, r7, r12 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [r2, #32] + str lr, [r0] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #16] + adcs r8, r7, r4 + ldr r4, [r2, #20] + adcs r7, r4, r5 + ldr r5, [r2, #24] + ldr r4, [r1, #28] + str r7, [sp, #40] @ 4-byte Spill + adcs r7, r5, r9 + ldr r5, [r2, #28] + str r7, [sp, #4] @ 4-byte Spill + ldr r11, [sp, #4] @ 4-byte Reload + adcs r7, r5, r4 + ldr r5, [r1, #32] + ldr r4, [sp, #32] @ 4-byte Reload + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #20] @ 4-byte Reload + adcs r10, r6, r5 + ldr r6, [r1, #36] + ldr r5, [r2, #36] + str r4, [r0, #4] + str r10, [sp, #24] @ 4-byte Spill + adcs r9, r5, r6 + ldr r6, [r1, #40] + ldr r5, [r2, #40] + adcs r12, r5, r6 + ldr r6, [r1, #44] + ldr r5, [r2, #44] + ldr r1, [r1, #48] + ldr r2, [r2, #48] + adcs r6, r5, r6 + ldr r5, [sp, #28] @ 4-byte Reload + adcs r2, r2, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + str r2, [sp, #12] @ 4-byte Spill + str r5, [r0, #8] + str r7, [r0, #12] + str r8, [r0, #16] + str r1, [r0, #20] + ldr r1, [sp, #36] @ 4-byte Reload + str r11, [r0, #24] + str r1, [r0, #28] + str r10, [r0, #32] + str r9, [r0, #36] + str r12, [r0, #40] + str r6, [r0, #44] + str r2, [r0, #48] + mov r2, #0 + mov r10, r12 + adc r1, r2, #0 + str r1, [sp, #8] @ 4-byte Spill + ldm r3, {r2, r6} + ldr r1, [r3, #8] + ldr r12, [r3, #12] + subs r2, lr, r2 + str r2, [sp] @ 4-byte Spill + sbcs r2, r4, r6 + sbcs r1, r5, r1 + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r7, r7, r12 + add r12, r3, #32 + sbcs r8, r8, r1 + ldr r1, [r3, #20] + sbcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #24] + sbcs r1, r11, r1 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [r3, #28] + sbcs r5, r2, r1 + ldm r12, {r1, r2, r6, r11, r12} + ldr r3, [sp, #24] @ 4-byte Reload + sbcs r3, r3, r1 + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r4, r9, r2 + sbcs lr, r10, r6 + ldr r6, [sp, #8] @ 4-byte Reload + sbcs r2, r1, r11 + ldr r1, [sp, #12] @ 4-byte Reload + sbcs r1, r1, r12 + sbc r6, r6, #0 + tst r6, #1 + bne .LBB200_2 +@ BB#1: @ %nocarry + mov r6, r7 + ldr r7, [sp] @ 4-byte Reload + add r12, r0, #32 + str r7, [r0] + ldr r7, [sp, #32] @ 4-byte Reload + str r7, [r0, #4] + ldr r7, [sp, #28] @ 4-byte Reload + str r7, [r0, #8] + ldr r7, [sp, #40] @ 4-byte Reload + str r6, [r0, #12] + str r8, [r0, #16] + str r7, [r0, #20] + ldr r7, [sp, #20] @ 4-byte Reload + str r7, [r0, #24] + str r5, [r0, #28] + stm r12, {r3, r4, lr} + str r2, [r0, #44] + str r1, [r0, #48] +.LBB200_2: @ %carry + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end200: + .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L + .cantunwind + .fnend + + .globl mcl_fp_addNF13L + .align 2 + .type mcl_fp_addNF13L,%function +mcl_fp_addNF13L: @ @mcl_fp_addNF13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldm r1, {r7, r8, lr} + ldr r6, [r2] + ldr r12, [r1, #12] + ldmib r2, {r4, r5, r9} + adds r10, r6, r7 + ldr r7, [r2, #16] + ldr r6, [r1, #24] + adcs r4, r4, r8 + adcs lr, r5, lr + ldr r5, [r1, #16] + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r1, #20] + adcs r9, r9, r12 + str lr, [sp, #8] @ 4-byte Spill + str r9, [sp, #12] @ 4-byte Spill + adcs r7, r7, r5 + ldr r5, [r2, #20] + str r7, [sp, #32] @ 4-byte Spill + adcs r7, r5, r4 + ldr r5, [r2, #24] + str r7, [sp, #36] @ 4-byte Spill + adcs r8, r5, r6 + ldr r6, [r1, #28] + ldr r5, [r2, #28] + str r8, [sp, #16] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #32] + ldr r5, [r2, #32] + str r7, [sp, #40] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #36] + ldr r5, [r2, #36] + str r7, [sp, #44] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #40] + ldr r5, [r2, #40] + str r7, [sp, #56] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #44] + ldr r5, [r2, #44] + ldr r1, [r1, #48] + ldr r2, [r2, #48] + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r5, r6 + adc r1, r2, r1 + str r7, [sp, #48] @ 4-byte Spill + str r1, [sp, #60] @ 4-byte Spill + ldmib r3, {r1, r12} + ldr r2, [r3, #24] + ldr r7, [r3] + ldr r6, [r3, #12] + ldr r5, [r3, #16] + ldr r4, [r3, #20] + ldr r11, [r3, #28] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [sp, #28] @ 4-byte Reload + subs r7, r10, r7 + sbcs r2, r2, r1 + ldr r1, [r3, #40] + sbcs r12, lr, r12 + sbcs lr, r9, r6 + ldr r9, [r3, #32] + ldr r6, [r3, #36] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp, #32] @ 4-byte Reload + sbcs r5, r1, r5 + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r3, r1, r4 + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r4, r8, r1 + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r8, r1, r11 + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r9, r1, r9 + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r11, r1, r6 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #20] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + sbc r6, r1, r6 + asr r1, r6, #31 + cmp r1, #0 + movlt r7, r10 + str r7, [r0] + ldr r7, [sp, #28] @ 4-byte Reload + movlt r2, r7 + str r2, [r0, #4] + ldr r2, [sp, #8] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #12] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #8] + movlt lr, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str lr, [r0, #12] + movlt r5, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r5, [r0, #16] + movlt r3, r2 + ldr r2, [sp, #16] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #20] @ 4-byte Reload + movlt r4, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r4, [r0, #24] + movlt r8, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r8, [r0, #28] + movlt r9, r2 + ldr r2, [sp, #56] @ 4-byte Reload + cmp r1, #0 + str r9, [r0, #32] + movlt r11, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r11, [r0, #36] + movlt r3, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #24] @ 4-byte Reload + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #60] @ 4-byte Reload + str r3, [r0, #44] + movlt r6, r1 + str r6, [r0, #48] + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end201: + .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L + .cantunwind + .fnend + + .globl mcl_fp_sub13L + .align 2 + .type mcl_fp_sub13L,%function +mcl_fp_sub13L: @ @mcl_fp_sub13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + ldr r9, [r2] + ldmib r2, {r8, lr} + ldr r12, [r2, #12] + ldm r1, {r4, r5, r6, r7} + subs r11, r4, r9 + ldr r4, [r2, #24] + sbcs r5, r5, r8 + str r11, [sp, #28] @ 4-byte Spill + str r11, [r0] + sbcs r6, r6, lr + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [r2, #20] + sbcs r7, r7, r12 + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #16] + ldr r11, [sp, #44] @ 4-byte Reload + sbcs r10, r7, r6 + ldr r7, [r1, #20] + str r10, [sp, #36] @ 4-byte Spill + sbcs r12, r7, r5 + ldr r7, [r1, #24] + ldr r5, [r1, #28] + sbcs r8, r7, r4 + ldr r7, [r2, #28] + ldr r4, [r1, #36] + str r8, [sp, #40] @ 4-byte Spill + sbcs r9, r5, r7 + ldr r7, [r2, #32] + ldr r5, [r1, #32] + sbcs r5, r5, r7 + ldr r7, [r2, #36] + sbcs r6, r4, r7 + ldr r7, [r2, #40] + ldr r4, [r1, #40] + sbcs lr, r4, r7 + ldr r7, [r2, #44] + ldr r4, [r1, #44] + ldr r2, [r2, #48] + ldr r1, [r1, #48] + sbcs r7, r4, r7 + ldr r4, [sp, #52] @ 4-byte Reload + sbcs r2, r1, r2 + ldr r1, [sp, #48] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + str r4, [r0, #4] + str r1, [r0, #8] + str r11, [r0, #12] + str r10, [r0, #16] + str r12, [r0, #20] + str r8, [r0, #24] + str r9, [r0, #28] + str r5, [r0, #32] + str r6, [r0, #36] + str lr, [r0, #40] + str r7, [r0, #44] + str r2, [r0, #48] + mov r2, #0 + sbc r2, r2, #0 + tst r2, #1 + beq .LBB202_2 +@ BB#1: @ %carry + ldr r2, [r3, #48] + ldr r7, [sp, #28] @ 4-byte Reload + ldr r10, [r3, #4] + ldr r8, [r3, #8] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #12] + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #16] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3, #28] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r3] + adds r2, r2, r7 + ldr r7, [r3, #44] + adcs r4, r10, r4 + ldr r10, [r3, #36] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r3, #40] + ldr r3, [r3, #32] + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r8, r1 + ldr r1, [sp] @ 4-byte Reload + stm r0, {r2, r4, r7} + ldr r2, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [r0, #12] + ldr r1, [sp, #8] @ 4-byte Reload + adcs r2, r7, r2 + str r2, [r0, #16] + adcs r2, r1, r12 + ldr r1, [sp, #12] @ 4-byte Reload + add r12, r0, #32 + str r2, [r0, #20] + ldr r2, [sp, #40] @ 4-byte Reload + adcs r2, r1, r2 + ldr r1, [sp, #16] @ 4-byte Reload + str r2, [r0, #24] + adcs r2, r1, r9 + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [r0, #28] + adcs r2, r3, r5 + ldr r5, [sp, #20] @ 4-byte Reload + adcs r3, r10, r6 + ldr r6, [sp, #28] @ 4-byte Reload + adcs r7, r1, lr + ldr r1, [sp, #32] @ 4-byte Reload + adcs r6, r6, r1 + ldr r1, [sp, #24] @ 4-byte Reload + stm r12, {r2, r3, r7} + str r6, [r0, #44] + adc r1, r5, r1 + str r1, [r0, #48] +.LBB202_2: @ %nocarry + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end202: + .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L + .cantunwind + .fnend + + .globl mcl_fp_subNF13L + .align 2 + .type mcl_fp_subNF13L,%function +mcl_fp_subNF13L: @ @mcl_fp_subNF13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #72 + sub sp, sp, #72 + mov r12, r0 + ldr r0, [r2, #32] + add r9, r1, #20 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r2, #44] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r2, #48] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #28] @ 4-byte Spill + ldm r2, {r7, r11} + ldr r0, [r2, #8] + ldr r10, [r2, #12] + ldr r8, [r2, #16] + ldr lr, [r1, #16] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r2, #20] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r2, #24] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r2, #28] + ldr r2, [r1, #8] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #12] + ldm r9, {r4, r5, r9} + ldm r1, {r1, r6} + subs r7, r1, r7 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r6, r6, r11 + str r7, [sp] @ 4-byte Spill + str r6, [sp, #4] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + sbcs r0, r0, r10 + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + sbcs r0, lr, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r4, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r5, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + sbcs r11, r1, r0 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + str r11, [sp, #20] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + sbc r0, r2, r1 + ldr r1, [r3, #40] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #32] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r3, #44] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r3, #36] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #32] @ 4-byte Spill + ldm r3, {r2, lr} + ldr r1, [r3, #20] + ldr r5, [r3, #8] + ldr r10, [sp, #8] @ 4-byte Reload + ldr r4, [r3, #12] + ldr r8, [r3, #24] + ldr r9, [r3, #28] + adds r2, r7, r2 + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #16] + adcs r3, r6, lr + ldr r6, [sp, #12] @ 4-byte Reload + adcs lr, r10, r5 + ldr r5, [sp, #48] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #52] @ 4-byte Reload + adcs r5, r5, r1 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r7, r1, r8 + ldr r1, [sp, #64] @ 4-byte Reload + adcs r8, r1, r9 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r9, r11, r1 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r11, r1, r0 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r0, [sp, #24] @ 4-byte Reload + adcs r1, r1, r0 + ldr r0, [sp, #36] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r1, r0, r1 + str r1, [sp, #32] @ 4-byte Spill + asr r1, r0, #31 + ldr r0, [sp] @ 4-byte Reload + cmp r1, #0 + movge lr, r10 + movge r2, r0 + ldr r0, [sp, #4] @ 4-byte Reload + str r2, [r12] + ldr r2, [sp, #24] @ 4-byte Reload + movge r3, r0 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r1, #0 + str r3, [r12, #4] + str lr, [r12, #8] + movge r4, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r4, [r12, #12] + movge r5, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r5, [r12, #16] + movge r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r1, #0 + str r6, [r12, #20] + movge r7, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r7, [r12, #24] + movge r8, r0 + ldr r0, [sp, #20] @ 4-byte Reload + str r8, [r12, #28] + movge r9, r0 + ldr r0, [sp, #44] @ 4-byte Reload + cmp r1, #0 + str r9, [r12, #32] + movge r11, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r11, [r12, #36] + movge r2, r0 + ldr r0, [sp, #28] @ 4-byte Reload + str r2, [r12, #40] + ldr r2, [sp, #36] @ 4-byte Reload + movge r0, r2 + cmp r1, #0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [r12, #44] + ldr r0, [sp, #32] @ 4-byte Reload + movge r0, r1 + str r0, [r12, #48] + add sp, sp, #72 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end203: + .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L + .cantunwind + .fnend + + .globl mcl_fpDbl_add13L + .align 2 + .type mcl_fpDbl_add13L,%function +mcl_fpDbl_add13L: @ @mcl_fpDbl_add13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #152 + sub sp, sp, #152 + ldm r1, {r7, r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r9} + add r10, r1, #32 + adds r4, r4, r7 + str r4, [sp, #84] @ 4-byte Spill + ldr r4, [r2, #96] + str r4, [sp, #144] @ 4-byte Spill + ldr r4, [r2, #100] + str r4, [sp, #148] @ 4-byte Spill + adcs r4, r5, r8 + ldr r8, [r2, #16] + adcs r7, r6, lr + str r4, [sp, #72] @ 4-byte Spill + add lr, r1, #16 + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #140] @ 4-byte Spill + adcs r7, r9, r12 + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r1, #96] + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #100] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r6, r9, r10} + ldr r2, [r1, #52] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #8] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + ldr r7, [sp, #72] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #68] @ 4-byte Reload + adcs r1, r8, r1 + str r7, [r0, #8] + ldr r7, [sp, #28] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + adcs r1, r1, r12 + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + ldr r2, [sp, #64] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [r0, #32] + ldr r1, [sp, #76] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [r0, #36] + ldr r2, [sp, #80] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + adcs r2, r2, r9 + str r2, [r0, #44] + ldr r2, [sp, #92] @ 4-byte Reload + adcs r1, r1, r10 + str r1, [r0, #48] + ldr r1, [sp, #96] @ 4-byte Reload + adcs r6, r2, r7 + ldr r2, [sp, #4] @ 4-byte Reload + str r6, [sp, #88] @ 4-byte Spill + adcs r5, r1, r2 + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r5, [sp, #92] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r4, [sp, #96] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [sp, #24] @ 4-byte Reload + str r7, [sp, #112] @ 4-byte Spill + adcs lr, r1, r2 + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str lr, [sp, #100] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r8, r1, r2 + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [sp, #48] @ 4-byte Reload + str r8, [sp, #116] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #132] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #136] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [sp, #140] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [sp, #144] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [sp, #148] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #148] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #108] @ 4-byte Spill + ldmib r3, {r2, r9, r12} + ldr r1, [r3, #20] + ldr r11, [r3] + ldr r10, [r3, #16] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [r3, #24] + subs r11, r6, r11 + sbcs r2, r5, r2 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [r3, #28] + str r1, [sp, #120] @ 4-byte Spill + sbcs r1, r4, r9 + add r9, r3, #32 + sbcs r12, r7, r12 + ldm r9, {r5, r7, r9} + ldr r4, [r3, #44] + ldr r3, [r3, #48] + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r10, lr, r10 + str r3, [sp, #80] @ 4-byte Spill + ldr r3, [sp, #124] @ 4-byte Reload + str r4, [sp, #76] @ 4-byte Spill + sbcs lr, r3, r6 + ldr r3, [sp, #104] @ 4-byte Reload + ldr r6, [sp, #120] @ 4-byte Reload + sbcs r4, r8, r3 + ldr r3, [sp, #128] @ 4-byte Reload + sbcs r6, r3, r6 + ldr r3, [sp, #132] @ 4-byte Reload + sbcs r5, r3, r5 + ldr r3, [sp, #136] @ 4-byte Reload + sbcs r8, r3, r7 + ldr r3, [sp, #140] @ 4-byte Reload + ldr r7, [sp, #76] @ 4-byte Reload + sbcs r9, r3, r9 + ldr r3, [sp, #144] @ 4-byte Reload + sbcs r3, r3, r7 + ldr r7, [sp, #80] @ 4-byte Reload + str r3, [sp, #120] @ 4-byte Spill + ldr r3, [sp, #148] @ 4-byte Reload + sbcs r3, r3, r7 + ldr r7, [sp, #88] @ 4-byte Reload + str r3, [sp, #104] @ 4-byte Spill + ldr r3, [sp, #108] @ 4-byte Reload + sbc r3, r3, #0 + ands r3, r3, #1 + movne r11, r7 + ldr r7, [sp, #92] @ 4-byte Reload + str r11, [r0, #52] + movne r2, r7 + str r2, [r0, #56] + ldr r2, [sp, #96] @ 4-byte Reload + movne r1, r2 + cmp r3, #0 + ldr r2, [sp, #120] @ 4-byte Reload + str r1, [r0, #60] + ldr r1, [sp, #112] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r12, [r0, #64] + movne r10, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r10, [r0, #68] + movne lr, r1 + ldr r1, [sp, #116] @ 4-byte Reload + cmp r3, #0 + str lr, [r0, #72] + movne r4, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r4, [r0, #76] + movne r6, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r6, [r0, #80] + movne r5, r1 + ldr r1, [sp, #136] @ 4-byte Reload + cmp r3, #0 + str r5, [r0, #84] + movne r8, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r8, [r0, #88] + movne r9, r1 + ldr r1, [sp, #144] @ 4-byte Reload + str r9, [r0, #92] + movne r2, r1 + ldr r1, [sp, #148] @ 4-byte Reload + cmp r3, #0 + ldr r3, [sp, #104] @ 4-byte Reload + str r2, [r0, #96] + movne r3, r1 + str r3, [r0, #100] + add sp, sp, #152 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end204: + .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub13L + .align 2 + .type mcl_fpDbl_sub13L,%function +mcl_fpDbl_sub13L: @ @mcl_fpDbl_sub13L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #152 + sub sp, sp, #152 + ldr r7, [r2, #96] + add r10, r1, #32 + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #92] @ 4-byte Spill + ldm r2, {r9, lr} + ldr r6, [r1] + ldr r5, [r1, #4] + ldr r12, [r2, #8] + ldr r4, [r1, #8] + ldr r8, [r2, #12] + ldr r7, [r1, #12] + subs r6, r6, r9 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r2, #40] + str r6, [sp, #80] @ 4-byte Spill + sbcs r6, r5, lr + add lr, r1, #16 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [r2, #36] + str r6, [sp, #48] @ 4-byte Spill + sbcs r6, r4, r12 + sbcs r7, r7, r8 + str r6, [sp, #20] @ 4-byte Spill + ldr r6, [r2, #32] + ldr r8, [r2, #16] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #28] + str r6, [sp, #40] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r1, #96] + str r2, [sp, #84] @ 4-byte Spill + ldr r2, [r1, #100] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #88] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #76] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #44] @ 4-byte Spill + ldm r10, {r4, r5, r6, r9, r10} + ldr r2, [r1, #52] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #8] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #20] @ 4-byte Reload + sbcs r1, r1, r8 + str r7, [r0, #8] + ldr r7, [sp, #16] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + ldr r2, [sp, #48] @ 4-byte Reload + sbcs r1, r4, r1 + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [r0, #36] + ldr r2, [sp, #92] @ 4-byte Reload + sbcs r1, r6, r1 + str r1, [r0, #40] + ldr r1, [sp, #96] @ 4-byte Reload + sbcs r2, r9, r2 + str r2, [r0, #44] + ldr r2, [sp, #100] @ 4-byte Reload + sbcs r1, r10, r1 + add r10, r3, #16 + str r1, [r0, #48] + ldr r1, [sp, #104] @ 4-byte Reload + sbcs r9, r7, r2 + ldr r2, [sp, #4] @ 4-byte Reload + ldr r7, [sp, #52] @ 4-byte Reload + sbcs r11, r2, r1 + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #124] @ 4-byte Reload + str r1, [sp, #120] @ 4-byte Spill + mov r1, #0 + sbcs r6, r7, r2 + ldr r2, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + str r6, [sp, #92] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #56] @ 4-byte Reload + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [sp, #112] @ 4-byte Reload + sbcs r8, r7, r2 + ldr r2, [sp, #140] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + str r8, [sp, #96] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #64] @ 4-byte Reload + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [sp, #132] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #68] @ 4-byte Reload + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [sp, #128] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #72] @ 4-byte Reload + str r2, [sp, #128] @ 4-byte Spill + ldr r2, [sp, #116] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #76] @ 4-byte Reload + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [sp, #136] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #84] @ 4-byte Reload + str r2, [sp, #136] @ 4-byte Spill + ldr r2, [sp, #144] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #88] @ 4-byte Reload + str r2, [sp, #144] @ 4-byte Spill + ldr r2, [sp, #148] @ 4-byte Reload + sbcs r2, r7, r2 + mov r7, r9 + mov r9, r11 + sbc r1, r1, #0 + str r2, [sp, #148] @ 4-byte Spill + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #112] @ 4-byte Spill + ldm r3, {r1, r2, r12, lr} + ldm r10, {r3, r4, r5, r10} + ldr r11, [sp, #120] @ 4-byte Reload + adds r1, r7, r1 + adcs r2, r9, r2 + adcs r12, r11, r12 + ldr r11, [sp, #112] @ 4-byte Reload + adcs lr, r6, lr + ldr r6, [sp, #124] @ 4-byte Reload + adcs r3, r6, r3 + ldr r6, [sp, #140] @ 4-byte Reload + adcs r4, r8, r4 + adcs r8, r6, r5 + ldr r5, [sp, #132] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + adcs r10, r5, r10 + ldr r5, [sp, #128] @ 4-byte Reload + adcs r5, r5, r6 + ldr r6, [sp, #88] @ 4-byte Reload + str r5, [sp, #84] @ 4-byte Spill + ldr r5, [sp, #116] @ 4-byte Reload + adcs r5, r5, r6 + ldr r6, [sp, #104] @ 4-byte Reload + str r5, [sp, #88] @ 4-byte Spill + ldr r5, [sp, #136] @ 4-byte Reload + adcs r5, r5, r6 + ldr r6, [sp, #108] @ 4-byte Reload + str r5, [sp, #104] @ 4-byte Spill + ldr r5, [sp, #144] @ 4-byte Reload + adcs r5, r5, r6 + str r5, [sp, #108] @ 4-byte Spill + ldr r5, [sp, #148] @ 4-byte Reload + adc r5, r5, r11 + str r5, [sp, #112] @ 4-byte Spill + ldr r5, [sp, #100] @ 4-byte Reload + ands r5, r5, #1 + moveq r1, r7 + moveq r2, r9 + str r1, [r0, #52] + ldr r1, [sp, #120] @ 4-byte Reload + str r2, [r0, #56] + ldr r2, [sp, #84] @ 4-byte Reload + moveq r12, r1 + ldr r1, [sp, #92] @ 4-byte Reload + cmp r5, #0 + str r12, [r0, #60] + moveq lr, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str lr, [r0, #64] + moveq r3, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r3, [r0, #68] + ldr r3, [sp, #112] @ 4-byte Reload + moveq r4, r1 + ldr r1, [sp, #140] @ 4-byte Reload + cmp r5, #0 + str r4, [r0, #72] + moveq r8, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r8, [r0, #76] + moveq r10, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r10, [r0, #80] + moveq r2, r1 + ldr r1, [sp, #116] @ 4-byte Reload + cmp r5, #0 + str r2, [r0, #84] + ldr r2, [sp, #88] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #136] @ 4-byte Reload + str r2, [r0, #88] + ldr r2, [sp, #104] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #144] @ 4-byte Reload + str r2, [r0, #92] + ldr r2, [sp, #108] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #148] @ 4-byte Reload + cmp r5, #0 + str r2, [r0, #96] + moveq r3, r1 + str r3, [r0, #100] + add sp, sp, #152 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end205: + .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L + .cantunwind + .fnend + + .align 2 + .type .LmulPv448x32,%function +.LmulPv448x32: @ @mulPv448x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r3, [r1, #44] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #44] + ldr r3, [r1, #48] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #48] + ldr r1, [r1, #52] + umull r3, r7, r1, r2 + adcs r1, r6, r3 + str r1, [r0, #52] + adc r1, r7, #0 + str r1, [r0, #56] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end206: + .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre14L + .align 2 + .type mcl_fp_mulUnitPre14L,%function +mcl_fp_mulUnitPre14L: @ @mcl_fp_mulUnitPre14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + mov r4, r0 + add r0, sp, #8 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #64] + add lr, sp, #8 + ldr r8, [sp, #56] + ldr r9, [sp, #52] + ldr r10, [sp, #48] + ldr r11, [sp, #44] + ldr r5, [sp, #40] + ldr r6, [sp, #36] + ldr r7, [sp, #32] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #60] + str r0, [sp] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + stm r4, {r0, r1, r2, r3, r12, lr} + str r7, [r4, #24] + str r6, [r4, #28] + str r5, [r4, #32] + str r11, [r4, #36] + str r10, [r4, #40] + str r9, [r4, #44] + str r8, [r4, #48] + ldr r0, [sp] @ 4-byte Reload + str r0, [r4, #52] + ldr r0, [sp, #4] @ 4-byte Reload + str r0, [r4, #56] + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end207: + .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre14L + .align 2 + .type mcl_fpDbl_mulPre14L,%function +mcl_fpDbl_mulPre14L: @ @mcl_fpDbl_mulPre14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #228 + sub sp, sp, #228 + mov r6, r2 + mov r5, r1 + mov r4, r0 + bl mcl_fpDbl_mulPre7L(PLT) + add r0, r4, #56 + add r1, r5, #28 + add r2, r6, #28 + bl mcl_fpDbl_mulPre7L(PLT) + ldr r0, [r6, #32] + add r11, r6, #36 + str r0, [sp, #104] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [r6, #52] + ldr r12, [r6] + str r0, [sp, #112] @ 4-byte Spill + ldmib r6, {r1, r2, r3, r7} + ldr r0, [r6, #28] + ldr lr, [r6, #24] + ldr r6, [r6, #20] + adds r0, r12, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #104] @ 4-byte Spill + adcs r0, r2, r8 + str r0, [sp, #100] @ 4-byte Spill + adcs r0, r3, r9 + str r0, [sp, #96] @ 4-byte Spill + adcs r0, r7, r10 + str r0, [sp, #92] @ 4-byte Spill + adcs r0, r6, r11 + add r11, r5, #32 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, lr, r0 + add lr, r5, #12 + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + ldm r11, {r8, r10, r11} + ldr r7, [r5] + ldr r3, [r5, #4] + ldr r2, [r5, #8] + adc r6, r0, #0 + ldr r0, [r5, #44] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r5, #48] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r5, #52] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r5, #28] + ldm lr, {r1, r9, r12, lr} + adds r0, r7, r0 + str r0, [sp, #112] @ 4-byte Spill + str r0, [sp, #144] + ldr r0, [sp, #72] @ 4-byte Reload + adcs r7, r3, r8 + adcs r10, r2, r10 + add r2, sp, #116 + str r7, [sp, #148] + adcs r11, r1, r11 + add r1, sp, #144 + str r10, [sp, #152] + str r11, [sp, #156] + adcs r5, r9, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r5, [sp, #160] + adcs r9, r12, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r9, [sp, #164] + adcs r8, lr, r0 + ldr r0, [sp, #108] @ 4-byte Reload + str r8, [sp, #168] + str r0, [sp, #116] + ldr r0, [sp, #104] @ 4-byte Reload + str r0, [sp, #120] + ldr r0, [sp, #100] @ 4-byte Reload + str r0, [sp, #124] + ldr r0, [sp, #96] @ 4-byte Reload + str r0, [sp, #128] + ldr r0, [sp, #92] @ 4-byte Reload + str r0, [sp, #132] + ldr r0, [sp, #88] @ 4-byte Reload + str r0, [sp, #136] + ldr r0, [sp, #84] @ 4-byte Reload + str r0, [sp, #140] + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + add r0, sp, #172 + bl mcl_fpDbl_mulPre7L(PLT) + ldr r0, [sp, #108] @ 4-byte Reload + cmp r6, #0 + ldr r2, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #100] @ 4-byte Reload + moveq r8, r6 + moveq r9, r6 + moveq r5, r6 + moveq r11, r6 + moveq r10, r6 + cmp r6, #0 + moveq r2, r6 + moveq r7, r6 + str r2, [sp, #112] @ 4-byte Spill + str r7, [sp, #76] @ 4-byte Spill + adds r3, r2, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r2, [sp, #92] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #88] @ 4-byte Reload + adcs lr, r10, r1 + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r11, r1 + adcs r2, r5, r2 + adcs r12, r9, r7 + ldr r7, [sp, #84] @ 4-byte Reload + adcs r7, r8, r7 + str r7, [sp, #104] @ 4-byte Spill + mov r7, #0 + adc r7, r7, #0 + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [sp, #80] @ 4-byte Reload + cmp r7, #0 + moveq r2, r5 + ldr r5, [sp, #76] @ 4-byte Reload + moveq r1, r11 + moveq lr, r10 + ldr r11, [sp, #104] @ 4-byte Reload + moveq r0, r5 + ldr r5, [sp, #112] @ 4-byte Reload + moveq r3, r5 + cmp r7, #0 + ldr r5, [sp, #108] @ 4-byte Reload + moveq r5, r7 + and r7, r6, r7 + ldr r6, [sp, #200] + moveq r12, r9 + moveq r11, r8 + adds r10, r3, r6 + ldr r3, [sp, #204] + adcs r8, r0, r3 + ldr r0, [sp, #208] + add r3, sp, #172 + adcs r9, lr, r0 + ldr r0, [sp, #212] + ldr lr, [r4] + adcs r0, r1, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #216] + adcs r0, r2, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #220] + adcs r0, r12, r0 + ldr r12, [r4, #4] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #224] + adcs r0, r11, r0 + ldr r11, [r4, #12] + str r0, [sp, #92] @ 4-byte Spill + adc r0, r5, r7 + ldr r5, [r4, #8] + str r0, [sp, #88] @ 4-byte Spill + ldm r3, {r0, r1, r2, r3} + subs lr, r0, lr + sbcs r12, r1, r12 + ldr r1, [sp, #188] + sbcs r5, r2, r5 + ldr r2, [r4, #36] + sbcs r0, r3, r11 + ldr r3, [sp, #104] @ 4-byte Reload + ldr r11, [r4, #60] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r4, #16] + str r2, [sp, #112] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #192] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r4, #20] + sbcs r0, r1, r0 + ldr r1, [sp, #196] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r4, #24] + sbcs r6, r1, r0 + ldr r0, [r4, #28] + sbcs r7, r10, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r4, #32] + ldr r10, [r4, #56] + sbcs r8, r8, r0 + str r0, [sp, #44] @ 4-byte Spill + sbcs r9, r9, r2 + ldr r2, [r4, #40] + sbcs r0, r3, r2 + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r4, #44] + ldr r3, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + sbcs r0, r3, r2 + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r4, #48] + ldr r3, [sp, #96] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + sbcs r0, r3, r2 + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [r4, #52] + ldr r3, [sp, #92] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + sbcs r0, r3, r2 + str r2, [sp, #96] @ 4-byte Spill + ldr r2, [sp, #88] @ 4-byte Reload + ldr r3, [r4, #68] + str r0, [sp, #56] @ 4-byte Spill + sbc r0, r2, #0 + str r0, [sp, #52] @ 4-byte Spill + subs r0, lr, r10 + ldr lr, [r4, #76] + str r0, [sp, #48] @ 4-byte Spill + sbcs r0, r12, r11 + ldr r12, [r4, #72] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r4, #64] + str r0, [sp, #36] @ 4-byte Spill + sbcs r0, r5, r0 + ldr r5, [sp, #84] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + sbcs r0, r5, r3 + ldr r5, [r4, #80] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + sbcs r0, r6, r5 + ldr r6, [r4, #84] + str r0, [sp, #24] @ 4-byte Spill + sbcs r0, r7, r6 + str r6, [sp, #92] @ 4-byte Spill + ldr r6, [r4, #88] + str r0, [sp, #20] @ 4-byte Spill + sbcs r0, r8, r6 + str r6, [sp, #88] @ 4-byte Spill + ldr r6, [r4, #92] + str r0, [sp, #16] @ 4-byte Spill + sbcs r0, r9, r6 + add r9, r4, #96 + str r6, [sp, #84] @ 4-byte Spill + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #48] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r0, r0, r6 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r0, r7 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [r4, #28] + ldr r0, [sp, #112] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [r4, #32] + ldr r1, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [r4, #36] + ldr r0, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #72] @ 4-byte Reload + str r1, [r4, #40] + ldr r1, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r0, [r4, #44] + ldr r0, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [r4, #48] + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r0, [r4, #52] + adcs r1, r10, r1 + ldr r0, [sp, #16] @ 4-byte Reload + str r1, [r4, #56] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [r4, #60] + adcs r1, r1, r2 + ldr r0, [sp, #4] @ 4-byte Reload + str r1, [r4, #64] + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r3, r0 + adcs r1, r12, r1 + str r0, [r4, #68] + ldr r0, [sp, #60] @ 4-byte Reload + add r12, r4, #92 + str r1, [r4, #72] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + adcs r1, r5, r1 + str r0, [r4, #76] + ldr r0, [sp, #92] @ 4-byte Reload + str r1, [r4, #80] + ldr r1, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [r4, #84] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [r4, #88] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + adcs r1, r6, #0 + adcs r2, r7, #0 + adcs r3, r8, #0 + adc r7, r9, #0 + stm r12, {r0, r1, r2, r3, r7} + add sp, sp, #228 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end208: + .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre14L + .align 2 + .type mcl_fpDbl_sqrPre14L,%function +mcl_fpDbl_sqrPre14L: @ @mcl_fpDbl_sqrPre14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #220 + sub sp, sp, #220 + mov r5, r1 + mov r4, r0 + mov r2, r5 + bl mcl_fpDbl_mulPre7L(PLT) + add r1, r5, #28 + add r0, r4, #56 + mov r2, r1 + bl mcl_fpDbl_mulPre7L(PLT) + ldr r0, [r5, #44] + ldr r11, [r5, #32] + ldr r10, [r5, #36] + ldr r8, [r5, #40] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r5, #48] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r5, #52] + str r0, [sp, #104] @ 4-byte Spill + ldm r5, {r6, r7} + ldr r0, [r5, #28] + ldr r3, [r5, #8] + ldr r2, [r5, #12] + ldr r12, [r5, #16] + ldr lr, [r5, #24] + ldr r1, [r5, #20] + ldr r5, [sp, #96] @ 4-byte Reload + adds r9, r6, r0 + adcs r0, r7, r11 + ldr r7, [sp, #100] @ 4-byte Reload + str r9, [sp, #136] + str r9, [sp, #108] + adcs r3, r3, r10 + str r0, [sp, #140] + str r0, [sp, #112] + adcs r2, r2, r8 + str r3, [sp, #144] + str r3, [sp, #116] + adcs r6, r12, r5 + str r2, [sp, #148] + str r2, [sp, #120] + adcs r1, r1, r7 + ldr r7, [sp, #104] @ 4-byte Reload + str r6, [sp, #152] + str r6, [sp, #124] + lsr r5, r1, #31 + str r1, [sp, #156] + str r1, [sp, #128] + adcs r8, lr, r7 + orr r5, r5, r8, lsl #1 + str r8, [sp, #160] + str r8, [sp, #132] + str r5, [sp, #104] @ 4-byte Spill + lsr r5, r6, #31 + orr r1, r5, r1, lsl #1 + str r1, [sp, #100] @ 4-byte Spill + lsr r1, r2, #31 + orr r1, r1, r6, lsl #1 + str r1, [sp, #96] @ 4-byte Spill + lsr r1, r3, #31 + orr r1, r1, r2, lsl #1 + add r2, sp, #108 + str r1, [sp, #92] @ 4-byte Spill + lsr r1, r0, #31 + orr r1, r1, r3, lsl #1 + str r1, [sp, #84] @ 4-byte Spill + lsr r1, r9, #31 + orr r0, r1, r0, lsl #1 + add r1, sp, #136 + str r0, [sp, #76] @ 4-byte Spill + mov r0, #0 + adc r6, r0, #0 + add r0, sp, #164 + bl mcl_fpDbl_mulPre7L(PLT) + add lr, sp, #204 + add r7, sp, #192 + ldm lr, {r5, r10, r11, lr} + ldm r7, {r0, r1, r7} + ldr r2, [sp, #100] @ 4-byte Reload + ldr r3, [sp, #104] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + adds r0, r0, r9, lsl #1 + mov r9, r1 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r12, r7, r0 + ldr r0, [sp, #92] @ 4-byte Reload + adcs r1, r5, r0 + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + adcs r2, r11, r2 + adcs r3, lr, r3 + adc r8, r6, r8, lsr #31 + cmp r6, #0 + moveq r0, r10 + moveq r1, r5 + moveq r3, lr + moveq r2, r11 + moveq r12, r7 + cmp r6, #0 + ldr lr, [r4] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + moveq r8, r6 + str r2, [sp, #100] @ 4-byte Spill + mov r5, r3 + ldr r3, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #80] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + mov r7, r8 + add r8, sp, #164 + moveq r3, r9 + ldmib r4, {r9, r10, r11} + moveq r2, r0 + ldm r8, {r0, r1, r8} + ldr r6, [sp, #176] + subs lr, r0, lr + sbcs r0, r1, r9 + ldr r1, [sp, #180] + str r0, [sp, #60] @ 4-byte Spill + sbcs r0, r8, r10 + ldr r10, [r4, #56] + str r0, [sp, #76] @ 4-byte Spill + sbcs r0, r6, r11 + ldr r11, [r4, #60] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r4, #16] + sbcs r0, r1, r0 + ldr r1, [sp, #184] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r4, #20] + sbcs r0, r1, r0 + ldr r1, [sp, #188] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r4, #24] + sbcs r6, r1, r0 + ldr r1, [r4, #28] + ldr r0, [r4, #32] + sbcs r9, r2, r1 + str r0, [sp, #92] @ 4-byte Spill + ldr r2, [sp, #96] @ 4-byte Reload + sbcs r8, r3, r0 + ldr r0, [r4, #36] + ldr r3, [r4, #68] + str r0, [sp, #88] @ 4-byte Spill + sbcs r0, r12, r0 + ldr r12, [r4, #72] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r4, #40] + str r0, [sp, #84] @ 4-byte Spill + sbcs r0, r2, r0 + ldr r2, [r4, #44] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + str r2, [sp, #96] @ 4-byte Spill + sbcs r0, r0, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r4, #48] + str r0, [sp, #104] @ 4-byte Spill + sbcs r0, r2, r0 + ldr r2, [r4, #64] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r4, #52] + str r2, [sp, #32] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + sbcs r0, r5, r0 + ldr r5, [r4, #80] + str r0, [sp, #44] @ 4-byte Spill + sbc r0, r7, #0 + str r0, [sp, #40] @ 4-byte Spill + subs r0, lr, r10 + ldr lr, [r4, #76] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r0, r11 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r0, r2 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r0, r12 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + sbcs r0, r6, r5 + ldr r6, [sp, #48] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r4, #84] + str r0, [sp, #80] @ 4-byte Spill + sbcs r0, r9, r0 + add r9, r4, #96 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r4, #88] + str r0, [sp, #76] @ 4-byte Spill + sbcs r0, r8, r0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r4, #92] + str r0, [sp, #72] @ 4-byte Spill + sbcs r0, r6, r0 + str r0, [sp, #48] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r0, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #60] @ 4-byte Reload + sbcs r0, r0, r6 + str r0, [sp] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r0, r7 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + sbc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adds r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [r4, #28] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [r4, #32] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [r4, #36] + ldr r0, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r4, #40] + ldr r1, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r0, [r4, #44] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [r4, #48] + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [r4, #52] + adcs r1, r10, r1 + ldr r0, [sp, #8] @ 4-byte Reload + str r1, [r4, #56] + ldr r1, [sp, #32] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [r4, #60] + adcs r1, r1, r2 + ldr r0, [sp] @ 4-byte Reload + str r1, [r4, #64] + ldr r1, [sp, #4] @ 4-byte Reload + adcs r0, r3, r0 + adcs r1, r12, r1 + str r0, [r4, #68] + ldr r0, [sp, #52] @ 4-byte Reload + add r12, r4, #92 + str r1, [r4, #72] + ldr r1, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + adcs r1, r5, r1 + str r0, [r4, #76] + ldr r0, [sp, #80] @ 4-byte Reload + str r1, [r4, #80] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [r4, #84] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [r4, #88] + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + adcs r1, r6, #0 + adcs r2, r7, #0 + adcs r3, r8, #0 + adc r7, r9, #0 + stm r12, {r0, r1, r2, r3, r7} + add sp, sp, #220 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end209: + .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L + .cantunwind + .fnend + + .globl mcl_fp_mont14L + .align 2 + .type mcl_fp_mont14L,%function +mcl_fp_mont14L: @ @mcl_fp_mont14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #892 + sub sp, sp, #892 + .pad #1024 + sub sp, sp, #1024 + add r12, sp, #108 + add r7, sp, #1024 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #72] @ 4-byte Spill + add r0, r7, #824 + ldr r6, [r3, #-4] + ldr r2, [r2] + str r6, [sp, #104] @ 4-byte Spill + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1852] + ldr r5, [sp, #1848] + add r8, sp, #1024 + mov r1, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1856] + mul r2, r5, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1860] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #1900] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #1896] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1892] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1888] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1884] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1880] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1876] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1872] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1868] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1864] + str r0, [sp, #36] @ 4-byte Spill + add r0, r8, #760 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1840] + ldr r1, [sp, #108] @ 4-byte Reload + ldr r10, [sp, #1808] + ldr r11, [sp, #1804] + ldr r7, [sp, #1800] + ldr r9, [sp, #1784] + ldr r4, [sp, #1788] + ldr r6, [sp, #1792] + ldr r8, [sp, #1796] + add lr, sp, #1024 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1836] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1832] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1828] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1824] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1820] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1816] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1812] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #696 + bl .LmulPv448x32(PLT) + adds r0, r9, r5 + ldr r1, [sp, #48] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + ldr r3, [sp, #1736] + ldr r12, [sp, #1740] + ldr lr, [sp, #1744] + ldr r5, [sp, #1752] + ldr r9, [sp, #1760] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1748] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #1720] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #1756] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #76] @ 4-byte Reload + adcs r1, r11, r1 + str r0, [sp, #36] @ 4-byte Spill + mov r0, #0 + ldr r11, [sp, #80] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r10, r1 + ldr r10, [sp, #1764] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #1732] + adc r0, r0, #0 + adds r6, r11, r6 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #1728] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1776] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1768] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #1724] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r6, sp, #1024 + add r0, r6, #632 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1712] + add r11, sp, #1664 + ldr r8, [sp, #1684] + ldr r9, [sp, #1680] + ldr r10, [sp, #1676] + ldr r4, [sp, #1656] + ldr r7, [sp, #1660] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1700] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1696] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1692] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1688] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #568 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1604] + ldr r3, [sp, #1608] + ldr r12, [sp, #1612] + ldr lr, [sp, #1616] + adds r0, r0, r4 + ldr r4, [sp, #1620] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1624] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1592] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1636] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1632] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1628] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1600] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1648] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1644] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1640] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1596] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r6, sp, #1024 + add r0, r6, #504 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1584] + add r11, sp, #1536 + ldr r8, [sp, #1556] + ldr r9, [sp, #1552] + ldr r10, [sp, #1548] + ldr r4, [sp, #1528] + ldr r7, [sp, #1532] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #440 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1476] + ldr r3, [sp, #1480] + ldr r12, [sp, #1484] + ldr lr, [sp, #1488] + adds r0, r0, r4 + ldr r4, [sp, #1492] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1496] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1464] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1508] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1504] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1500] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1472] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1520] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1516] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1512] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1468] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r6, sp, #1024 + add r0, r6, #376 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1456] + add r11, sp, #1408 + ldr r8, [sp, #1428] + ldr r9, [sp, #1424] + ldr r10, [sp, #1420] + ldr r4, [sp, #1400] + ldr r7, [sp, #1404] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1448] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1444] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #312 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1348] + ldr r3, [sp, #1352] + ldr r12, [sp, #1356] + ldr lr, [sp, #1360] + adds r0, r0, r4 + ldr r4, [sp, #1364] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1368] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1336] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1380] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1376] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1372] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1344] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1392] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1388] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1384] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1340] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r6, sp, #1024 + add r0, r6, #248 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1328] + add r11, sp, #1280 + ldr r8, [sp, #1300] + ldr r9, [sp, #1296] + ldr r10, [sp, #1292] + ldr r4, [sp, #1272] + ldr r7, [sp, #1276] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1316] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1308] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1304] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, lr, #184 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1220] + ldr r3, [sp, #1224] + ldr r12, [sp, #1228] + ldr lr, [sp, #1232] + adds r0, r0, r4 + ldr r4, [sp, #1236] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1240] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1208] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1252] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1248] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1244] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1216] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1264] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1256] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1212] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r6, sp, #1024 + add r0, r6, #120 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1200] + add r11, sp, #1152 + ldr r8, [sp, #1172] + ldr r9, [sp, #1168] + ldr r10, [sp, #1164] + ldr r4, [sp, #1144] + ldr r7, [sp, #1148] + add lr, sp, #1024 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1180] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1176] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, lr, #56 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + ldr r2, [sp, #1092] + ldr r3, [sp, #1096] + ldr r12, [sp, #1100] + ldr lr, [sp, #1104] + adds r0, r0, r4 + ldr r4, [sp, #1108] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1112] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1080] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1124] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1120] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1116] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1088] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1136] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1084] + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1016 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1072] + add r11, sp, #1024 + ldr r8, [sp, #1044] + ldr r9, [sp, #1040] + ldr r10, [sp, #1036] + ldr r4, [sp, #1016] + ldr r7, [sp, #1020] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #952 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #956 + adds r0, r0, r4 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #980 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1008] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #952] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #888 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #944] + add r11, sp, #896 + ldr r8, [sp, #916] + ldr r9, [sp, #912] + ldr r10, [sp, #908] + ldr r4, [sp, #888] + ldr r7, [sp, #892] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #824 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #828 + adds r0, r0, r4 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #852 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #880] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #876] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #872] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #824] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #760 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #816] + add r11, sp, #768 + ldr r8, [sp, #788] + ldr r9, [sp, #784] + ldr r10, [sp, #780] + ldr r4, [sp, #760] + ldr r7, [sp, #764] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r5, r6, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #696 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #700 + adds r0, r0, r4 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #724 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #744] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #696] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #104] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + mul r2, r6, r5 + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #632 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #688] + add r11, sp, #632 + ldr r6, [sp, #656] + ldr r4, [sp, #652] + ldr r7, [sp, #648] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #676] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #568 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + add lr, sp, #584 + adds r0, r0, r8 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r2, r0, r9 + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #608 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #568 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldm r11, {r4, r6, r7, r11} + adds r0, r2, r4 + mul r1, r0, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #624] + str r1, [sp, #32] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r5, [sp, #96] @ 4-byte Reload + adcs r6, r5, r6 + ldr r5, [sp, #92] @ 4-byte Reload + str r6, [sp, #96] @ 4-byte Spill + adcs r6, r5, r7 + ldr r5, [sp, #88] @ 4-byte Reload + str r6, [sp, #92] @ 4-byte Spill + adcs r6, r5, r11 + ldr r5, [sp, #84] @ 4-byte Reload + str r6, [sp, #88] @ 4-byte Spill + adcs r0, r5, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #504 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #560] + add r10, sp, #504 + ldr r11, [sp, #532] + ldr r4, [sp, #528] + ldr r6, [sp, #524] + ldr r7, [sp, #520] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #548] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #536] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #440 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #456 + adds r0, r0, r5 + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r8 + adcs r1, r1, r9 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #480 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r7 + add r7, sp, #440 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #24] @ 4-byte Spill + ldm r7, {r4, r6, r7} + ldr r5, [sp, #452] + adds r1, r0, r4 + ldr r0, [sp, #104] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #496] + str r2, [sp, #20] @ 4-byte Spill + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #96] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [sp, #92] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #44] @ 4-byte Spill + ldr r6, [sp, #88] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #376 + bl .LmulPv448x32(PLT) + ldr r1, [sp, #432] + ldr r8, [sp, #404] + ldr r9, [sp, #400] + ldr r10, [sp, #396] + ldr r11, [sp, #392] + ldr r6, [sp, #376] + ldr r5, [sp, #380] + ldr r7, [sp, #384] + ldr r4, [sp, #388] + add r0, sp, #312 + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #428] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #424] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #420] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #416] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #412] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #408] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #108] @ 4-byte Reload + bl .LmulPv448x32(PLT) + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #316 + adds r0, r0, r6 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #340 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #364] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #312] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #48] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #248 + bl .LmulPv448x32(PLT) + ldr r1, [sp, #304] + ldr r10, [sp, #272] + ldr r11, [sp, #268] + ldr r8, [sp, #264] + ldr r6, [sp, #248] + ldr r7, [sp, #252] + ldr r4, [sp, #256] + ldr r9, [sp, #260] + add r0, sp, #184 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #300] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #296] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #292] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #288] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #284] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #280] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #276] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #108] @ 4-byte Reload + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #200 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r3, r0, r7 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #184 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #224 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldm r8, {r4, r7, r8} + ldr r0, [sp, #104] @ 4-byte Reload + ldr r5, [sp, #196] + adds r4, r3, r4 + mul r1, r4, r0 + ldr r0, [sp, #240] + str r1, [sp, #48] @ 4-byte Spill + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #236] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r6, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #112] @ 4-byte Reload + adcs r11, r11, r7 + ldr r7, [sp, #108] @ 4-byte Reload + adcs r8, r7, r8 + ldr r7, [sp, #52] @ 4-byte Reload + adcs r5, r7, r5 + ldr r7, [sp, #100] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r9, r0, r9 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #116] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #68] @ 4-byte Spill + mov r0, #0 + adc r7, r0, #0 + add r0, sp, #120 + bl .LmulPv448x32(PLT) + add r3, sp, #120 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + adcs r4, r11, r1 + ldr r0, [sp, #136] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r6, r8, r2 + str r4, [sp, #36] @ 4-byte Spill + adcs r12, r5, r3 + str r6, [sp, #48] @ 4-byte Spill + str r12, [sp, #56] @ 4-byte Spill + adcs r8, r1, r0 + ldr r0, [sp, #140] + ldr r1, [sp, #44] @ 4-byte Reload + str r8, [sp, #64] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #148] + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #156] + adcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #160] + adcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r9, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #168] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r0, r1, r0 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + str r0, [sp, #112] @ 4-byte Spill + adc r0, r7, #0 + mov r7, r10 + str r0, [sp, #60] @ 4-byte Spill + ldmib r7, {r1, r2, r3, r10, r11, lr} + ldr r5, [r7] + ldr r0, [r7, #28] + ldr r9, [r7, #44] + subs r5, r4, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r7, #40] + sbcs r6, r6, r1 + ldr r1, [r7, #32] + ldr r4, [sp, #68] @ 4-byte Reload + sbcs r2, r12, r2 + sbcs r12, r8, r3 + ldr r3, [r7, #48] + ldr r8, [r7, #36] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r7, #52] + ldr r7, [sp, #84] @ 4-byte Reload + str r3, [sp, #116] @ 4-byte Spill + ldr r3, [sp, #80] @ 4-byte Reload + sbcs r10, r3, r10 + ldr r3, [sp, #76] @ 4-byte Reload + sbcs r3, r3, r11 + sbcs lr, r7, lr + ldr r7, [sp, #88] @ 4-byte Reload + sbcs r4, r7, r4 + ldr r7, [sp, #92] @ 4-byte Reload + sbcs r7, r7, r1 + ldr r1, [sp, #96] @ 4-byte Reload + sbcs r8, r1, r8 + ldr r1, [sp, #100] @ 4-byte Reload + sbcs r11, r1, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r9, r0, r9 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbc r0, r0, #0 + ands r1, r0, #1 + ldr r0, [sp, #36] @ 4-byte Reload + movne r5, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r5, [r0] + ldr r5, [sp, #48] @ 4-byte Reload + movne r6, r5 + ldr r5, [sp, #56] @ 4-byte Reload + str r6, [r0, #4] + movne r2, r5 + cmp r1, #0 + str r2, [r0, #8] + ldr r2, [sp, #64] @ 4-byte Reload + movne r12, r2 + ldr r2, [sp, #80] @ 4-byte Reload + str r12, [r0, #12] + movne r10, r2 + ldr r2, [sp, #76] @ 4-byte Reload + str r10, [r0, #16] + movne r3, r2 + ldr r2, [sp, #84] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + movne lr, r2 + ldr r2, [sp, #88] @ 4-byte Reload + str lr, [r0, #24] + movne r4, r2 + ldr r2, [sp, #92] @ 4-byte Reload + str r4, [r0, #28] + movne r7, r2 + ldr r2, [sp, #96] @ 4-byte Reload + cmp r1, #0 + str r7, [r0, #32] + movne r8, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r8, [r0, #36] + movne r11, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r11, [r0, #40] + movne r9, r2 + cmp r1, #0 + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #68] @ 4-byte Reload + str r9, [r0, #44] + movne r2, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r2, [r0, #48] + ldr r2, [sp, #116] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #52] + add sp, sp, #892 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end210: + .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L + .cantunwind + .fnend + + .globl mcl_fp_montNF14L + .align 2 + .type mcl_fp_montNF14L,%function +mcl_fp_montNF14L: @ @mcl_fp_montNF14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #892 + sub sp, sp, #892 + .pad #1024 + sub sp, sp, #1024 + add r12, sp, #108 + add r6, sp, #1024 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #76] @ 4-byte Spill + add r0, r6, #824 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #104] @ 4-byte Spill + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1852] + ldr r8, [sp, #1848] + add r10, sp, #1024 + mov r1, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1856] + mul r2, r8, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1860] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #1900] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #1896] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1892] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1888] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1884] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1880] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1876] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1872] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1868] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1864] + str r0, [sp, #40] @ 4-byte Spill + add r0, r10, #760 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1840] + ldr r1, [sp, #108] @ 4-byte Reload + ldr r11, [sp, #1808] + ldr r6, [sp, #1804] + ldr r7, [sp, #1800] + ldr r5, [sp, #1784] + ldr r9, [sp, #1788] + ldr r10, [sp, #1792] + ldr r4, [sp, #1796] + add lr, sp, #1024 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1836] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1832] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1828] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1824] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1820] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1816] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1812] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #696 + bl .LmulPv448x32(PLT) + adds r0, r5, r8 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1720] + ldr r2, [sp, #1732] + ldr r3, [sp, #1736] + ldr r12, [sp, #1740] + ldr lr, [sp, #1744] + ldr r8, [sp, #1760] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #1764] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #1768] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1748] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #1756] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #1752] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #80] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adc r0, r1, r0 + adds r11, r11, r5 + ldr r5, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #1728] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1724] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, r8, #632 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1712] + add r11, sp, #1664 + ldr r9, [sp, #1680] + ldr r10, [sp, #1676] + ldr r6, [sp, #1656] + ldr r7, [sp, #1660] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1700] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1696] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1692] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1688] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1684] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #568 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1592] + ldr r2, [sp, #1604] + ldr r3, [sp, #1608] + ldr r12, [sp, #1612] + ldr lr, [sp, #1616] + ldr r6, [sp, #1624] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1628] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1620] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1632] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1640] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1636] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #1600] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1648] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1644] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1596] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, r8, #504 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1584] + add r11, sp, #1536 + ldr r9, [sp, #1552] + ldr r10, [sp, #1548] + ldr r6, [sp, #1528] + ldr r7, [sp, #1532] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1556] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #440 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1464] + ldr r2, [sp, #1476] + ldr r3, [sp, #1480] + ldr r12, [sp, #1484] + ldr lr, [sp, #1488] + ldr r6, [sp, #1496] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1500] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1492] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1504] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1512] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1508] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #1472] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1520] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1516] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1468] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, r8, #376 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1456] + add r11, sp, #1408 + ldr r9, [sp, #1424] + ldr r10, [sp, #1420] + ldr r6, [sp, #1400] + ldr r7, [sp, #1404] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1448] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1444] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #312 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1336] + ldr r2, [sp, #1348] + ldr r3, [sp, #1352] + ldr r12, [sp, #1356] + ldr lr, [sp, #1360] + ldr r6, [sp, #1368] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1372] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1364] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1376] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1384] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1380] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #1344] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1392] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1388] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1340] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, r8, #248 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1328] + add r11, sp, #1280 + ldr r9, [sp, #1296] + ldr r10, [sp, #1292] + ldr r6, [sp, #1272] + ldr r7, [sp, #1276] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1316] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1308] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1304] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1300] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, lr, #184 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1208] + ldr r2, [sp, #1220] + ldr r3, [sp, #1224] + ldr r12, [sp, #1228] + ldr lr, [sp, #1232] + ldr r6, [sp, #1240] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1244] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1236] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1248] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1256] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1252] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #1216] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1212] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, r8, #120 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1200] + add r11, sp, #1152 + ldr r9, [sp, #1168] + ldr r10, [sp, #1164] + ldr r6, [sp, #1144] + ldr r7, [sp, #1148] + add lr, sp, #1024 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1180] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1176] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1172] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, lr, #56 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r5, [sp, #1080] + ldr r2, [sp, #1092] + ldr r3, [sp, #1096] + ldr r12, [sp, #1100] + ldr lr, [sp, #1104] + ldr r6, [sp, #1112] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1116] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1108] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1120] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1128] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1124] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r1, [sp, #1088] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1084] + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #1016 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1072] + add r11, sp, #1024 + ldr r9, [sp, #1040] + ldr r10, [sp, #1036] + ldr r6, [sp, #1016] + ldr r7, [sp, #1020] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #952 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #956 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #980 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1008] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #952] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #888 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #944] + add r11, sp, #896 + ldr r9, [sp, #912] + ldr r10, [sp, #908] + ldr r6, [sp, #888] + ldr r7, [sp, #892] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #824 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #828 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #852 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #880] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #876] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #824] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #96] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #760 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #816] + add r11, sp, #768 + ldr r9, [sp, #784] + ldr r10, [sp, #780] + ldr r6, [sp, #760] + ldr r7, [sp, #764] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #696 + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #700 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #724 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #744] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r8, r9, r10} + ldr r5, [sp, #696] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + adds r5, r11, r5 + adcs r0, r7, r0 + str r5, [sp, #24] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #104] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r5, r9 + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #632 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #688] + add r11, sp, #640 + ldr r5, [sp, #656] + ldr r10, [sp, #652] + ldr r6, [sp, #632] + ldr r7, [sp, #636] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #676] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #568 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #96] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + add lr, sp, #584 + adds r0, r0, r6 + ldr r6, [sp, #580] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #572] + adcs r1, r1, r4 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #608 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #576] + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #568] + str r1, [sp, #44] @ 4-byte Spill + adds r0, r0, r2 + mul r1, r0, r9 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #624] + str r1, [sp, #36] @ 4-byte Spill + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #96] @ 4-byte Reload + adcs r7, r11, r7 + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [sp, #92] @ 4-byte Reload + adcs r5, r7, r5 + str r5, [sp, #92] @ 4-byte Spill + ldr r5, [sp, #88] @ 4-byte Reload + adcs r5, r5, r6 + str r5, [sp, #88] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #504 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #560] + add r10, sp, #508 + ldr r7, [sp, #532] + ldr r8, [sp, #528] + ldr r9, [sp, #524] + ldr r11, [sp, #504] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #548] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #536] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r6, r10} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #440 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #456 + adds r0, r0, r11 + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + adcs r1, r1, r5 + ldr r5, [sp, #448] + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #452] + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #480 + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #444] + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #40] @ 4-byte Reload + adc r1, r1, r2 + ldr r2, [sp, #440] + str r1, [sp, #36] @ 4-byte Spill + adds r1, r0, r2 + ldr r0, [sp, #104] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #496] + str r2, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #96] @ 4-byte Reload + adcs r7, r11, r7 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [sp, #92] @ 4-byte Reload + adcs r5, r7, r5 + str r5, [sp, #48] @ 4-byte Spill + ldr r5, [sp, #88] @ 4-byte Reload + adcs r5, r5, r6 + str r5, [sp, #44] @ 4-byte Spill + ldr r5, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #376 + bl .LmulPv448x32(PLT) + ldr r1, [sp, #432] + add r10, sp, #380 + ldr r7, [sp, #404] + ldr r8, [sp, #400] + ldr r9, [sp, #396] + ldr r11, [sp, #376] + add r0, sp, #312 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #428] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #424] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #420] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #416] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #412] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #408] + str r1, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r6, r10} + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #108] @ 4-byte Reload + bl .LmulPv448x32(PLT) + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #316 + adds r0, r0, r11 + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #340 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #364] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r6, r7, r8, r9, r10} + ldr r5, [sp, #312] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + adds r11, r11, r5 + ldr r5, [sp, #52] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r11 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #248 + bl .LmulPv448x32(PLT) + ldr r1, [sp, #304] + ldr r10, [sp, #272] + ldr r11, [sp, #268] + ldr r8, [sp, #264] + ldr r6, [sp, #248] + ldr r7, [sp, #252] + ldr r4, [sp, #256] + ldr r9, [sp, #260] + add r0, sp, #184 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #300] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #296] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #292] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #288] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #284] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #280] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #276] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #108] @ 4-byte Reload + bl .LmulPv448x32(PLT) + adds r0, r5, r6 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #200 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + adcs r1, r1, r4 + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + adcs r1, r1, r8 + add r8, sp, #184 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #224 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adc r1, r1, r2 + str r1, [sp, #60] @ 4-byte Spill + ldm r8, {r2, r7, r8} + ldr r6, [sp, #196] + adds r4, r0, r2 + ldr r0, [sp, #104] @ 4-byte Reload + mul r1, r4, r0 + ldr r0, [sp, #240] + str r1, [sp, #52] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #236] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r5, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #112] @ 4-byte Reload + adcs r11, r11, r7 + ldr r7, [sp, #108] @ 4-byte Reload + adcs r8, r7, r8 + ldr r7, [sp, #56] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #100] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r7, r0, r5 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r5, [sp, #116] @ 4-byte Reload + adcs r9, r0, r9 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + add r0, sp, #120 + bl .LmulPv448x32(PLT) + add r3, sp, #120 + ldm r3, {r0, r1, r2, r3} + adds r0, r4, r0 + mov r4, r5 + adcs r11, r11, r1 + ldr r0, [sp, #136] + ldr r1, [sp, #40] @ 4-byte Reload + adcs r2, r8, r2 + str r11, [sp, #44] @ 4-byte Spill + adcs lr, r6, r3 + str r2, [sp, #52] @ 4-byte Spill + str lr, [sp, #60] @ 4-byte Spill + adcs r8, r1, r0 + ldr r0, [sp, #140] + ldr r1, [sp, #48] @ 4-byte Reload + str r8, [sp, #64] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #144] + adcs r0, r1, r0 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #148] + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #156] + adcs r10, r1, r0 + ldr r0, [sp, #160] + ldr r1, [sp, #104] @ 4-byte Reload + str r10, [sp, #68] @ 4-byte Spill + adcs r0, r7, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r9, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #168] + adcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #176] + adc r0, r1, r0 + str r0, [sp, #112] @ 4-byte Spill + ldmib r4, {r0, r1, r7, r9, r12} + ldr r6, [r4] + ldr r3, [r4, #24] + ldr r5, [r4, #28] + subs r6, r11, r6 + str r3, [sp, #72] @ 4-byte Spill + add r11, r4, #32 + sbcs r3, r2, r0 + sbcs r2, lr, r1 + ldm r11, {r0, r1, r11} + sbcs lr, r8, r7 + ldr r7, [r4, #44] + ldr r8, [r4, #52] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r4, #48] + ldr r4, [sp, #80] @ 4-byte Reload + sbcs r9, r4, r9 + ldr r4, [sp, #84] @ 4-byte Reload + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + sbcs r12, r4, r12 + ldr r4, [sp, #88] @ 4-byte Reload + sbcs r4, r4, r7 + ldr r7, [sp, #92] @ 4-byte Reload + sbcs r5, r7, r5 + sbcs r7, r10, r0 + ldr r0, [sp, #96] @ 4-byte Reload + sbcs r10, r0, r1 + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r11, r0, r11 + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + sbc r8, r0, r8 + ldr r0, [sp, #44] @ 4-byte Reload + asr r1, r8, #31 + cmp r1, #0 + movlt r6, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r6, [r0] + ldr r6, [sp, #52] @ 4-byte Reload + movlt r3, r6 + str r3, [r0, #4] + ldr r3, [sp, #60] @ 4-byte Reload + movlt r2, r3 + cmp r1, #0 + ldr r3, [sp, #72] @ 4-byte Reload + str r2, [r0, #8] + ldr r2, [sp, #64] @ 4-byte Reload + movlt lr, r2 + ldr r2, [sp, #80] @ 4-byte Reload + str lr, [r0, #12] + movlt r9, r2 + ldr r2, [sp, #84] @ 4-byte Reload + str r9, [r0, #16] + movlt r12, r2 + ldr r2, [sp, #88] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #20] + movlt r4, r2 + ldr r2, [sp, #92] @ 4-byte Reload + str r4, [r0, #24] + movlt r5, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r5, [r0, #28] + movlt r7, r2 + ldr r2, [sp, #96] @ 4-byte Reload + cmp r1, #0 + str r7, [r0, #32] + movlt r10, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r10, [r0, #36] + movlt r11, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r11, [r0, #40] + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #116] @ 4-byte Reload + str r3, [r0, #44] + movlt r2, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r2, [r0, #48] + movlt r8, r1 + str r8, [r0, #52] + add sp, sp, #892 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end211: + .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L + .cantunwind + .fnend + + .globl mcl_fp_montRed14L + .align 2 + .type mcl_fp_montRed14L,%function +mcl_fp_montRed14L: @ @mcl_fp_montRed14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + .pad #1024 + sub sp, sp, #1024 + mov r3, r2 + str r0, [sp, #180] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r6, [r1] + ldr r0, [r3] + str r3, [sp, #184] @ 4-byte Spill + str r2, [sp, #88] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #84] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #80] @ 4-byte Spill + str r0, [sp, #168] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #152] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #160] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #164] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #188] @ 4-byte Spill + mul r2, r6, r0 + ldr r0, [r3, #28] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [r1, #96] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r1, #100] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r1, #104] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r1, #108] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #12] @ 4-byte Spill + add r0, sp, #1024 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1080] + ldr r8, [sp, #1024] + ldr r1, [sp, #1032] + ldr r2, [sp, #1036] + ldr r3, [sp, #1040] + ldr r12, [sp, #1044] + ldr lr, [sp, #1048] + ldr r4, [sp, #1052] + ldr r5, [sp, #1056] + ldr r7, [sp, #1060] + ldr r9, [sp, #1064] + ldr r10, [sp, #1068] + ldr r11, [sp, #1072] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1076] + adds r6, r6, r8 + ldr r6, [sp, #88] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1028] + adcs r8, r6, r0 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #184] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #188] @ 4-byte Reload + mul r2, r8, r0 + add r0, sp, #960 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #1016] + add lr, sp, #996 + add r10, sp, #964 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1012] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r4, r5, r12, lr} + ldr r6, [sp, #960] + ldr r7, [sp, #992] + ldr r11, [sp, #988] + ldr r3, [sp, #984] + ldm r10, {r0, r1, r2, r9, r10} + adds r6, r8, r6 + ldr r6, [sp, #88] @ 4-byte Reload + adcs r8, r6, r0 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #188] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r8, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #184] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #896 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #952] + add r10, sp, #924 + add lr, sp, #900 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #948] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #4] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r4, [sp, #896] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #832 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #888] + add lr, sp, #872 + add r11, sp, #832 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #884] + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r5, r12, lr} + ldr r6, [sp, #868] + ldr r7, [sp, #864] + ldm r11, {r0, r1, r2, r3, r8, r9, r10, r11} + adds r0, r4, r0 + ldr r4, [sp, #188] @ 4-byte Reload + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #184] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r1, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #768 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #824] + add r10, sp, #796 + add lr, sp, #784 + add r9, sp, #768 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r6, r7, r8, r10} + ldm lr, {r3, r12, lr} + ldm r9, {r0, r1, r2, r9} + adds r0, r11, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + mov r10, r1 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r1, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #704 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #760] + add lr, sp, #744 + add r9, sp, #708 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #756] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r5, r12, lr} + ldr r4, [sp, #704] + ldr r6, [sp, #740] + ldr r7, [sp, #736] + ldr r11, [sp, #732] + ldr r3, [sp, #728] + ldm r9, {r0, r1, r2, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #188] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + mul r2, r4, r5 + ldr r4, [sp, #184] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #640 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #696] + add r10, sp, #664 + add lr, sp, #640 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + mov r10, r1 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r5 + mov r1, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #576 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #632] + add lr, sp, #616 + add r9, sp, #580 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #628] + str r0, [sp, #32] @ 4-byte Spill + ldm lr, {r5, r12, lr} + ldr r4, [sp, #576] + ldr r6, [sp, #612] + ldr r7, [sp, #608] + ldr r11, [sp, #604] + ldr r3, [sp, #600] + ldm r9, {r0, r1, r2, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r10, r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r4, [sp, #188] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #184] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r9 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #512 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #568] + add r11, sp, #536 + add lr, sp, #512 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + mov r5, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #448 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #504] + add lr, sp, #484 + add r9, sp, #452 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #36] @ 4-byte Spill + ldm lr, {r6, r12, lr} + ldr r4, [sp, #448] + ldr r7, [sp, #480] + ldr r11, [sp, #476] + ldr r3, [sp, #472] + ldm r9, {r0, r1, r2, r8, r9} + adds r4, r10, r4 + ldr r4, [sp, #88] @ 4-byte Reload + adcs r10, r4, r0 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r4, [sp, #188] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #384 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #440] + add r11, sp, #408 + add lr, sp, #384 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #436] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #432] + str r0, [sp, #40] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r10, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #184] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r7 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #320 + bl .LmulPv448x32(PLT) + ldr r0, [sp, #376] + add r9, sp, #348 + ldr r11, [sp, #364] + ldr r8, [sp, #360] + add lr, sp, #328 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #16] @ 4-byte Spill + ldm r9, {r4, r6, r9} + ldr r3, [sp, #320] + ldr r5, [sp, #324] + ldm lr, {r0, r1, r2, r12, lr} + adds r3, r10, r3 + ldr r3, [sp, #88] @ 4-byte Reload + adcs r5, r3, r5 + ldr r3, [sp, #84] @ 4-byte Reload + adcs r10, r3, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #188] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + mul r2, r5, r6 + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r7 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r11 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #256 + bl .LmulPv448x32(PLT) + add r7, sp, #256 + add r12, sp, #272 + ldm r7, {r0, r1, r3, r7} + ldr r9, [sp, #312] + ldr r8, [sp, #308] + ldr lr, [sp, #304] + adds r0, r5, r0 + ldr r5, [sp, #300] + adcs r10, r10, r1 + mul r0, r10, r6 + ldr r6, [sp, #296] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #292] + str r0, [sp, #68] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r12} + ldr r4, [sp, #120] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r11 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r4, r0, r2 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r8, r0, r9 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + add r0, sp, #192 + bl .LmulPv448x32(PLT) + add r3, sp, #192 + ldm r3, {r0, r1, r2, r3} + adds r0, r10, r0 + ldr r0, [sp, #188] @ 4-byte Reload + adcs lr, r0, r1 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #48] @ 4-byte Reload + str lr, [sp, #72] @ 4-byte Spill + adcs r2, r0, r2 + ldr r0, [sp, #44] @ 4-byte Reload + str r2, [sp, #76] @ 4-byte Spill + adcs r3, r0, r3 + ldr r0, [sp, #208] + str r3, [sp, #80] @ 4-byte Spill + adcs r7, r1, r0 + ldr r0, [sp, #212] + ldr r1, [sp, #52] @ 4-byte Reload + str r7, [sp, #84] @ 4-byte Spill + adcs r4, r4, r0 + ldr r0, [sp, #216] + str r4, [sp, #88] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #220] + ldr r1, [sp, #56] @ 4-byte Reload + str r5, [sp, #92] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #224] + ldr r1, [sp, #60] @ 4-byte Reload + str r6, [sp, #96] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [sp, #228] + adcs r11, r1, r0 + ldr r0, [sp, #232] + ldr r1, [sp, #108] @ 4-byte Reload + str r11, [sp, #100] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [sp, #236] + adcs r10, r1, r0 + ldr r0, [sp, #240] + ldr r1, [sp, #120] @ 4-byte Reload + str r10, [sp, #108] @ 4-byte Spill + adcs r9, r1, r0 + ldr r0, [sp, #244] + ldr r1, [sp, #112] @ 4-byte Reload + str r9, [sp, #116] @ 4-byte Spill + adcs r8, r8, r0 + ldr r0, [sp, #248] + str r8, [sp, #120] @ 4-byte Spill + adcs r12, r1, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #172] @ 4-byte Reload + str r12, [sp, #112] @ 4-byte Spill + adc r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #176] @ 4-byte Reload + subs r0, lr, r0 + sbcs r1, r2, r1 + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r3, r2 + ldr r3, [sp, #152] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #156] @ 4-byte Reload + sbcs lr, r4, r7 + ldr r4, [sp, #160] @ 4-byte Reload + ldr r7, [sp, #184] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #164] @ 4-byte Reload + sbcs r5, r6, r5 + ldr r6, [sp, #124] @ 4-byte Reload + sbcs r6, r7, r6 + ldr r7, [sp, #128] @ 4-byte Reload + sbcs r7, r11, r7 + ldr r11, [sp, #188] @ 4-byte Reload + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [sp, #132] @ 4-byte Reload + sbcs r11, r11, r7 + ldr r7, [sp, #136] @ 4-byte Reload + sbcs r7, r10, r7 + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [sp, #140] @ 4-byte Reload + sbcs r9, r9, r7 + ldr r7, [sp, #144] @ 4-byte Reload + sbcs r10, r8, r7 + ldr r7, [sp, #148] @ 4-byte Reload + sbcs r8, r12, r7 + ldr r7, [sp, #104] @ 4-byte Reload + sbc r7, r7, #0 + ands r12, r7, #1 + ldr r7, [sp, #72] @ 4-byte Reload + movne r0, r7 + ldr r7, [sp, #180] @ 4-byte Reload + str r0, [r7] + ldr r0, [sp, #76] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r1, [r7, #4] + ldr r1, [sp, #100] @ 4-byte Reload + movne r2, r0 + ldr r0, [sp, #84] @ 4-byte Reload + cmp r12, #0 + str r2, [r7, #8] + movne r3, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r3, [r7, #12] + movne lr, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str lr, [r7, #16] + movne r4, r0 + ldr r0, [sp, #96] @ 4-byte Reload + cmp r12, #0 + str r4, [r7, #20] + movne r5, r0 + ldr r0, [sp, #184] @ 4-byte Reload + str r5, [r7, #24] + movne r6, r0 + ldr r0, [sp, #172] @ 4-byte Reload + movne r0, r1 + str r6, [r7, #28] + cmp r12, #0 + str r0, [r7, #32] + ldr r0, [sp, #188] @ 4-byte Reload + movne r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + str r11, [r7, #36] + ldr r11, [sp, #176] @ 4-byte Reload + movne r11, r0 + ldr r0, [sp, #116] @ 4-byte Reload + str r11, [r7, #40] + movne r9, r0 + ldr r0, [sp, #120] @ 4-byte Reload + cmp r12, #0 + str r9, [r7, #44] + movne r10, r0 + ldr r0, [sp, #112] @ 4-byte Reload + str r10, [r7, #48] + movne r8, r0 + str r8, [r7, #52] + add sp, sp, #68 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end212: + .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L + .cantunwind + .fnend + + .globl mcl_fp_addPre14L + .align 2 + .type mcl_fp_addPre14L,%function +mcl_fp_addPre14L: @ @mcl_fp_addPre14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldm r1, {r3, r12, lr} + ldr r9, [r1, #12] + ldmib r2, {r5, r6, r7} + ldr r11, [r2] + ldr r4, [r2, #16] + ldr r10, [r1, #44] + adds r8, r11, r3 + ldr r3, [r2, #32] + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r2, #20] + ldr r11, [r1, #48] + adcs r5, r5, r12 + add r12, r1, #16 + adcs r6, r6, lr + ldr lr, [r1, #40] + adcs r7, r7, r9 + ldr r9, [r1, #52] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #36] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r2, #24] + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [r2, #40] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r2, #28] + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #44] + str r4, [sp, #20] @ 4-byte Spill + ldr r4, [r1, #32] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #48] + ldr r2, [r2, #52] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #36] + str r3, [sp, #36] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + str r8, [r0] + stmib r0, {r5, r6} + str r7, [r0, #12] + ldr r5, [sp, #4] @ 4-byte Reload + ldr r7, [sp, #32] @ 4-byte Reload + ldr r6, [sp, #36] @ 4-byte Reload + adcs r1, r5, r1 + ldr r5, [sp, #8] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #12] @ 4-byte Reload + adcs r2, r5, r2 + ldr r5, [sp, #40] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r2, r12 + add r12, r0, #32 + str r2, [r0, #28] + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r1, r4 + adcs r2, r2, r3 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r3, r3, lr + adcs r7, r7, r10 + adcs r6, r6, r11 + stm r12, {r1, r2, r3, r7} + adcs r5, r5, r9 + str r6, [r0, #48] + str r5, [r0, #52] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end213: + .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L + .cantunwind + .fnend + + .globl mcl_fp_subPre14L + .align 2 + .type mcl_fp_subPre14L,%function +mcl_fp_subPre14L: @ @mcl_fp_subPre14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldmib r2, {r10, r11} + ldr r3, [r2, #16] + ldr r7, [r1] + ldr r6, [r2, #12] + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #20] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #24] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2] + ldmib r1, {r4, r5, r12} + subs lr, r7, r3 + ldr r3, [r2, #32] + sbcs r4, r4, r10 + sbcs r5, r5, r11 + add r11, r1, #32 + sbcs r6, r12, r6 + add r12, r1, #16 + str r3, [sp, #4] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #8] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #12] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #16] @ 4-byte Spill + ldr r3, [r2, #48] + ldr r2, [r2, #52] + str r3, [sp, #20] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + ldm r11, {r7, r10, r11} + ldr r2, [r1, #52] + ldr r8, [r1, #44] + ldr r9, [r1, #48] + str r2, [sp] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + str lr, [r0] + stmib r0, {r4, r5} + str r6, [r0, #12] + ldr r5, [sp, #28] @ 4-byte Reload + ldr r6, [sp, #32] @ 4-byte Reload + ldr r4, [sp] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #24] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r2, r2, r6 + ldr r6, [sp, #20] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r1, r3, r1 + ldr r3, [sp, #12] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r2, r12, r2 + add r12, r0, #32 + str r2, [r0, #28] + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r1, r7, r1 + ldr r7, [sp, #16] @ 4-byte Reload + sbcs r2, r10, r2 + sbcs r3, r11, r3 + sbcs r7, r8, r7 + sbcs r6, r9, r6 + stm r12, {r1, r2, r3, r7} + sbcs r5, r4, r5 + str r6, [r0, #48] + str r5, [r0, #52] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end214: + .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L + .cantunwind + .fnend + + .globl mcl_fp_shr1_14L + .align 2 + .type mcl_fp_shr1_14L,%function +mcl_fp_shr1_14L: @ @mcl_fp_shr1_14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + add r9, r1, #8 + add r12, r1, #32 + ldm r9, {r2, r3, r4, r5, r6, r9} + ldm r1, {r7, lr} + str r7, [sp, #4] @ 4-byte Spill + lsr r7, lr, #1 + orr r7, r7, r2, lsl #31 + str r7, [sp] @ 4-byte Spill + ldm r12, {r7, r11, r12} + ldr r10, [r1, #48] + ldr r8, [r1, #44] + ldr r1, [r1, #52] + str r1, [sp, #12] @ 4-byte Spill + lsr r1, r3, #1 + lsrs r3, r3, #1 + str r10, [sp, #8] @ 4-byte Spill + rrx r2, r2 + lsrs r3, lr, #1 + orr r1, r1, r4, lsl #31 + ldr r3, [sp, #4] @ 4-byte Reload + rrx r3, r3 + str r3, [r0] + ldr r3, [sp] @ 4-byte Reload + str r3, [r0, #4] + str r2, [r0, #8] + str r1, [r0, #12] + lsrs r1, r5, #1 + lsr r2, r11, #1 + rrx r1, r4 + ldr r4, [sp, #8] @ 4-byte Reload + orr r2, r2, r12, lsl #31 + str r1, [r0, #16] + lsr r1, r5, #1 + ldr r5, [sp, #12] @ 4-byte Reload + orr r1, r1, r6, lsl #31 + str r1, [r0, #20] + lsrs r1, r9, #1 + rrx r1, r6 + str r1, [r0, #24] + lsr r1, r9, #1 + orr r1, r1, r7, lsl #31 + str r1, [r0, #28] + lsrs r1, r11, #1 + rrx r1, r7 + lsrs r3, r8, #1 + lsr r7, r8, #1 + rrx r3, r12 + lsrs r6, r5, #1 + orr r7, r7, r4, lsl #31 + add r12, r0, #32 + lsr r5, r5, #1 + rrx r6, r4 + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + str r5, [r0, #52] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end215: + .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L + .cantunwind + .fnend + + .globl mcl_fp_add14L + .align 2 + .type mcl_fp_add14L,%function +mcl_fp_add14L: @ @mcl_fp_add14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #52 + sub sp, sp, #52 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r9, r4, r9 + ldr r4, [r1, #24] + adcs r10, r5, r8 + ldr r5, [r1, #20] + str r9, [r0] + adcs r6, r6, lr + mov lr, r10 + adcs r7, r7, r12 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r1, #16] + str lr, [r0, #4] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #16] + adcs r7, r7, r6 + ldr r6, [r2, #44] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r5 + ldr r5, [r2, #28] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r10, [sp, #16] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [sp, #32] @ 4-byte Reload + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #28] + str r4, [r0, #8] + adcs r7, r5, r7 + ldr r5, [r2, #32] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r1, #32] + adcs r7, r5, r7 + ldr r5, [r2, #36] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r1, #36] + adcs r11, r5, r7 + ldr r7, [r1, #40] + ldr r5, [r2, #40] + str r11, [sp, #24] @ 4-byte Spill + adcs r8, r5, r7 + ldr r7, [r1, #44] + ldr r5, [sp, #28] @ 4-byte Reload + str r8, [sp, #12] @ 4-byte Spill + adcs r12, r6, r7 + ldr r7, [r1, #48] + ldr r6, [r2, #48] + ldr r1, [r1, #52] + ldr r2, [r2, #52] + str r5, [r0, #12] + str r12, [sp, #8] @ 4-byte Spill + adcs r6, r6, r7 + adcs r2, r2, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #44] @ 4-byte Reload + str r10, [r0, #20] + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + str r1, [r0, #28] + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r0, #32] + str r11, [r0, #36] + str r8, [r0, #40] + str r12, [r0, #44] + str r6, [r0, #48] + str r2, [r0, #52] + mov r8, r2 + mov r2, #0 + mov r12, r6 + add r11, r3, #32 + adc r1, r2, #0 + str r1, [sp, #20] @ 4-byte Spill + ldm r3, {r6, r7} + ldr r1, [r3, #8] + ldr r2, [r3, #12] + subs r6, r9, r6 + sbcs r7, lr, r7 + str r6, [sp, #4] @ 4-byte Spill + sbcs r1, r4, r1 + str r7, [sp] @ 4-byte Spill + str r1, [sp, #32] @ 4-byte Spill + sbcs r1, r5, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r1, r2, r1 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [r3, #20] + sbcs r10, r10, r1 + ldr r1, [r3, #24] + sbcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [r3, #28] + sbcs r5, r2, r1 + ldm r11, {r1, r2, r6, r7, r11} + ldr r9, [r3, #52] + ldr r3, [sp, #40] @ 4-byte Reload + sbcs r3, r3, r1 + ldr r1, [sp, #24] @ 4-byte Reload + sbcs lr, r1, r2 + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + sbcs r4, r1, r6 + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r7, r1, r7 + sbcs r6, r12, r11 + sbcs r1, r8, r9 + sbc r2, r2, #0 + tst r2, #1 + bne .LBB216_2 +@ BB#1: @ %nocarry + ldr r2, [sp, #4] @ 4-byte Reload + str r2, [r0] + ldr r2, [sp] @ 4-byte Reload + str r2, [r0, #4] + ldr r2, [sp, #32] @ 4-byte Reload + str r2, [r0, #8] + ldr r2, [sp, #28] @ 4-byte Reload + str r2, [r0, #12] + ldr r2, [sp, #48] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #44] @ 4-byte Reload + str r10, [r0, #20] + str r2, [r0, #24] + str r5, [r0, #28] + str r3, [r0, #32] + str lr, [r0, #36] + str r4, [r0, #40] + str r7, [r0, #44] + str r6, [r0, #48] + str r1, [r0, #52] +.LBB216_2: @ %carry + add sp, sp, #52 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end216: + .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L + .cantunwind + .fnend + + .globl mcl_fp_addNF14L + .align 2 + .type mcl_fp_addNF14L,%function +mcl_fp_addNF14L: @ @mcl_fp_addNF14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #80 + sub sp, sp, #80 + ldm r1, {r7, r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r10} + adds r4, r4, r7 + ldr r7, [r2, #16] + adcs r5, r5, r8 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r1, #24] + adcs lr, r6, lr + ldr r6, [r1, #16] + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [r1, #20] + adcs r9, r10, r12 + str lr, [sp, #12] @ 4-byte Spill + str r9, [sp, #16] @ 4-byte Spill + adcs r7, r7, r6 + ldr r6, [r2, #20] + str r7, [sp, #44] @ 4-byte Spill + adcs r7, r6, r5 + ldr r6, [r2, #24] + ldr r5, [r2, #28] + str r7, [sp, #48] @ 4-byte Spill + adcs r8, r6, r4 + ldr r6, [r1, #28] + str r8, [sp, #20] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #32] + ldr r5, [r2, #32] + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #36] + ldr r5, [r2, #36] + str r7, [sp, #56] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #40] + ldr r5, [r2, #40] + str r7, [sp, #68] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #44] + ldr r5, [r2, #44] + str r7, [sp, #64] @ 4-byte Spill + adcs r7, r5, r6 + ldr r6, [r1, #48] + ldr r5, [r2, #48] + ldr r1, [r1, #52] + ldr r2, [r2, #52] + str r7, [sp, #60] @ 4-byte Spill + adcs r7, r5, r6 + adc r1, r2, r1 + str r7, [sp, #76] @ 4-byte Spill + str r1, [sp, #72] @ 4-byte Spill + ldmib r3, {r1, r4, r6} + ldr r2, [r3, #24] + ldr r7, [r3] + ldr r5, [r3, #16] + ldr r11, [r3, #20] + ldr r10, [r3, #40] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r3, #28] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [sp, #36] @ 4-byte Reload + subs r7, r2, r7 + ldr r2, [sp, #40] @ 4-byte Reload + sbcs r2, r2, r1 + ldr r1, [r3, #36] + sbcs r12, lr, r4 + sbcs lr, r9, r6 + ldr r9, [r3, #32] + ldr r6, [sp, #32] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r5, r1, r5 + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r3, r1, r11 + ldr r1, [sp, #28] @ 4-byte Reload + sbcs r4, r8, r1 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r8, r1, r6 + ldr r1, [sp, #56] @ 4-byte Reload + ldr r6, [sp, #24] @ 4-byte Reload + sbcs r11, r1, r9 + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r9, r1, r6 + ldr r1, [sp, #64] @ 4-byte Reload + ldr r6, [sp] @ 4-byte Reload + sbcs r1, r1, r10 + ldr r10, [sp, #36] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #8] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + sbc r6, r1, r6 + asr r1, r6, #31 + cmp r1, #0 + movlt r7, r10 + str r7, [r0] + ldr r7, [sp, #40] @ 4-byte Reload + movlt r2, r7 + str r2, [r0, #4] + ldr r2, [sp, #12] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #16] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #8] + movlt lr, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str lr, [r0, #12] + movlt r5, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r5, [r0, #16] + movlt r3, r2 + ldr r2, [sp, #20] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #24] @ 4-byte Reload + movlt r4, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r4, [r0, #24] + movlt r8, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r8, [r0, #28] + movlt r11, r2 + ldr r2, [sp, #68] @ 4-byte Reload + cmp r1, #0 + str r11, [r0, #32] + movlt r9, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r9, [r0, #36] + movlt r3, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #28] @ 4-byte Reload + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r3, [r0, #44] + movlt r2, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r2, [r0, #48] + movlt r6, r1 + str r6, [r0, #52] + add sp, sp, #80 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end217: + .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L + .cantunwind + .fnend + + .globl mcl_fp_sub14L + .align 2 + .type mcl_fp_sub14L,%function +mcl_fp_sub14L: @ @mcl_fp_sub14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + ldr r9, [r2] + ldmib r2, {r8, lr} + ldr r5, [r1] + ldr r12, [r2, #12] + ldmib r1, {r4, r6, r7} + subs r5, r5, r9 + sbcs r4, r4, r8 + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [r2, #24] + sbcs r6, r6, lr + str r4, [sp, #48] @ 4-byte Spill + ldr r4, [r2, #20] + sbcs r7, r7, r12 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #16] + sbcs r8, r7, r6 + ldr r7, [r1, #20] + ldr r6, [r1, #28] + str r8, [sp, #40] @ 4-byte Spill + sbcs r10, r7, r4 + ldr r7, [r1, #24] + ldr r4, [r1, #40] + str r10, [sp, #36] @ 4-byte Spill + sbcs r9, r7, r5 + ldr r7, [r2, #28] + sbcs r11, r6, r7 + ldr r7, [r2, #32] + ldr r6, [r1, #32] + str r11, [sp, #32] @ 4-byte Spill + sbcs r12, r6, r7 + ldr r7, [r2, #36] + ldr r6, [r1, #36] + str r12, [sp, #28] @ 4-byte Spill + sbcs r6, r6, r7 + ldr r7, [r2, #40] + sbcs r5, r4, r7 + ldr r7, [r2, #44] + ldr r4, [r1, #44] + str r5, [sp, #24] @ 4-byte Spill + sbcs lr, r4, r7 + ldr r4, [r2, #48] + ldr r7, [r1, #48] + ldr r2, [r2, #52] + ldr r1, [r1, #52] + sbcs r7, r7, r4 + ldr r4, [sp, #44] @ 4-byte Reload + sbcs r2, r1, r2 + ldr r1, [sp, #52] @ 4-byte Reload + str r1, [r0] + ldr r1, [sp, #48] @ 4-byte Reload + str r1, [r0, #4] + ldr r1, [sp, #56] @ 4-byte Reload + str r1, [r0, #8] + str r4, [r0, #12] + str r8, [r0, #16] + mov r1, lr + add r8, r0, #24 + str r10, [r0, #20] + stm r8, {r9, r11, r12} + str r6, [r0, #36] + str r5, [r0, #40] + str r1, [r0, #44] + str r7, [r0, #48] + mov r8, r2 + str r2, [r0, #52] + mov r2, #0 + sbc r2, r2, #0 + tst r2, #1 + beq .LBB218_2 +@ BB#1: @ %carry + ldr r2, [r3, #52] + ldr r5, [r3, #48] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #52] @ 4-byte Reload + ldr lr, [r3, #4] + ldr r12, [r3, #8] + ldr r10, [r3, #12] + ldr r11, [r3, #40] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #16] + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [sp, #48] @ 4-byte Reload + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #28] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3] + adds r2, r2, r7 + ldr r7, [sp, #56] @ 4-byte Reload + adcs lr, lr, r5 + ldr r5, [r3, #44] + adcs r7, r12, r7 + add r12, r0, #32 + str r5, [sp, #48] @ 4-byte Spill + adcs r5, r10, r4 + ldr r10, [r3, #36] + ldr r3, [r3, #32] + stm r0, {r2, lr} + str r7, [r0, #8] + ldr r2, [sp, #40] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + ldr r4, [sp, #36] @ 4-byte Reload + str r5, [r0, #12] + ldr r5, [sp, #52] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp, #4] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #8] @ 4-byte Reload + adcs r4, r7, r4 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r2, r2, r9 + str r4, [r0, #20] + str r2, [r0, #24] + ldr r2, [sp, #32] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp, #24] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #28] @ 4-byte Reload + adcs r2, r3, r2 + adcs r3, r10, r6 + ldr r6, [sp, #48] @ 4-byte Reload + adcs r7, r11, r7 + adcs r6, r6, r1 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r5, r5, r1 + ldr r1, [sp, #20] @ 4-byte Reload + stm r12, {r2, r3, r7} + str r6, [r0, #44] + str r5, [r0, #48] + adc r1, r1, r8 + str r1, [r0, #52] +.LBB218_2: @ %nocarry + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end218: + .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L + .cantunwind + .fnend + + .globl mcl_fp_subNF14L + .align 2 + .type mcl_fp_subNF14L,%function +mcl_fp_subNF14L: @ @mcl_fp_subNF14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #80 + sub sp, sp, #80 + mov r12, r0 + ldr r0, [r2, #32] + add r7, r1, #16 + ldr r9, [r2] + ldr r11, [r2, #20] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r2, #44] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r2, #48] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r2, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r2, #4] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r2, #8] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r2, #12] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r2, #16] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r2, #24] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r2, #28] + str r0, [sp, #44] @ 4-byte Spill + ldmib r1, {r2, r8, lr} + ldm r7, {r4, r5, r6, r7} + ldr r10, [r1] + ldr r0, [sp, #56] @ 4-byte Reload + ldr r1, [r1, #32] + subs r10, r10, r9 + sbcs r9, r2, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r10, [sp] @ 4-byte Spill + str r9, [sp, #4] @ 4-byte Spill + sbcs r0, r8, r0 + add r8, r3, #20 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + sbcs r0, lr, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r4, r0 + str r0, [sp, #56] @ 4-byte Spill + sbcs r0, r5, r11 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r7, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + sbcs r11, r1, r0 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + str r11, [sp, #20] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + sbc r0, r1, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #36] @ 4-byte Spill + ldm r3, {r2, r4, r6} + ldr r5, [r3, #12] + ldr lr, [r3, #16] + ldm r8, {r0, r7, r8} + ldr r3, [sp, #56] @ 4-byte Reload + adds r1, r10, r2 + ldr r10, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + adcs r4, r9, r4 + adcs r6, r10, r6 + adcs r2, r2, r5 + ldr r5, [sp, #60] @ 4-byte Reload + adcs r3, r3, lr + adcs lr, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r5, r0, r7 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r7, [sp, #16] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r9, r11, r0 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r11, r0, r7 + ldr r0, [sp, #68] @ 4-byte Reload + ldr r7, [sp, #24] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r7, r0, r7 + str r7, [sp, #36] @ 4-byte Spill + asr r7, r0, #31 + ldr r0, [sp] @ 4-byte Reload + cmp r7, #0 + movge r6, r10 + movge r1, r0 + ldr r0, [sp, #4] @ 4-byte Reload + str r1, [r12] + ldr r1, [sp, #24] @ 4-byte Reload + movge r4, r0 + ldr r0, [sp, #52] @ 4-byte Reload + cmp r7, #0 + str r4, [r12, #4] + str r6, [r12, #8] + movge r2, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r2, [r12, #12] + movge r3, r0 + ldr r0, [sp, #60] @ 4-byte Reload + str r3, [r12, #16] + movge lr, r0 + ldr r0, [sp, #64] @ 4-byte Reload + cmp r7, #0 + str lr, [r12, #20] + movge r5, r0 + ldr r0, [sp, #44] @ 4-byte Reload + str r5, [r12, #24] + movge r8, r0 + ldr r0, [sp, #20] @ 4-byte Reload + str r8, [r12, #28] + movge r9, r0 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r7, #0 + str r9, [r12, #32] + movge r11, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r11, [r12, #36] + movge r1, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r1, [r12, #40] + ldr r1, [sp, #28] @ 4-byte Reload + movge r1, r0 + ldr r0, [sp, #76] @ 4-byte Reload + cmp r7, #0 + str r1, [r12, #44] + ldr r1, [sp, #32] @ 4-byte Reload + movge r1, r0 + ldr r0, [sp, #36] @ 4-byte Reload + str r1, [r12, #48] + ldr r1, [sp, #40] @ 4-byte Reload + movge r0, r1 + str r0, [r12, #52] + add sp, sp, #80 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end219: + .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L + .cantunwind + .fnend + + .globl mcl_fpDbl_add14L + .align 2 + .type mcl_fpDbl_add14L,%function +mcl_fpDbl_add14L: @ @mcl_fpDbl_add14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #168 + sub sp, sp, #168 + ldr r7, [r1] + ldmib r1, {r6, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r8, r9} + add r10, r1, #32 + adds r4, r4, r7 + str r4, [sp, #92] @ 4-byte Spill + ldr r4, [r2, #96] + str r4, [sp, #152] @ 4-byte Spill + ldr r4, [r2, #100] + str r4, [sp, #156] @ 4-byte Spill + ldr r4, [r2, #104] + str r4, [sp, #160] @ 4-byte Spill + ldr r4, [r2, #108] + str r4, [sp, #164] @ 4-byte Spill + adcs r4, r5, r6 + adcs r7, r8, lr + str r4, [sp, #68] @ 4-byte Spill + add lr, r1, #16 + str r7, [sp, #64] @ 4-byte Spill + adcs r7, r9, r12 + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #100] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #96] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r6, r10} + ldr r2, [r1, #56] + ldr r8, [r1, #48] + ldr r9, [r1, #52] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #8] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #64] @ 4-byte Reload + str r7, [r0, #8] + ldr r7, [sp] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #76] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp, #4] @ 4-byte Reload + str r2, [r0, #20] + adcs r1, r1, r12 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + adcs r1, r1, r4 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #72] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [r0, #36] + adcs r1, r1, r6 + ldr r2, [sp, #80] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r2, r2, r10 + str r2, [r0, #44] + adcs r1, r1, r8 + ldr r2, [sp, #88] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #96] @ 4-byte Reload + adcs r2, r2, r9 + adcs r6, r1, r7 + str r2, [r0, #52] + ldr r1, [sp, #100] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r6, [sp, #84] @ 4-byte Spill + adcs r5, r1, r2 + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [sp, #28] @ 4-byte Reload + str r5, [sp, #88] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r4, [sp, #96] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [sp, #36] @ 4-byte Reload + str r7, [sp, #100] @ 4-byte Spill + adcs lr, r1, r2 + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [sp, #24] @ 4-byte Reload + str lr, [sp, #92] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [sp, #140] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [sp, #144] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [sp, #132] @ 4-byte Reload + adcs r8, r1, r2 + ldr r1, [sp, #148] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + str r8, [sp, #124] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [sp, #152] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [sp, #156] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [sp, #160] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r1, [sp, #160] @ 4-byte Spill + ldr r1, [sp, #164] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #164] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #120] @ 4-byte Spill + ldmib r3, {r2, r12} + ldr r1, [r3, #16] + ldr r11, [r3] + ldr r9, [r3, #12] + ldr r10, [r3, #36] + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [r3, #20] + subs r11, r6, r11 + sbcs r2, r5, r2 + sbcs r12, r4, r12 + sbcs r4, r7, r9 + ldr r7, [r3, #32] + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [r3, #24] + ldr r6, [sp, #116] @ 4-byte Reload + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [r3, #28] + ldr r5, [sp, #128] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + sbcs r3, lr, r1 + ldr r1, [sp, #136] @ 4-byte Reload + sbcs lr, r1, r6 + ldr r1, [sp, #140] @ 4-byte Reload + ldr r6, [sp, #132] @ 4-byte Reload + sbcs r5, r1, r5 + ldr r1, [sp, #144] @ 4-byte Reload + sbcs r6, r1, r6 + ldr r1, [sp, #148] @ 4-byte Reload + sbcs r8, r8, r7 + ldr r7, [sp, #76] @ 4-byte Reload + sbcs r9, r1, r10 + ldr r1, [sp, #152] @ 4-byte Reload + sbcs r10, r1, r7 + ldr r1, [sp, #156] @ 4-byte Reload + ldr r7, [sp, #80] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #104] @ 4-byte Reload + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #160] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #108] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #164] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #84] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + sbc r1, r1, #0 + ands r1, r1, #1 + movne r11, r7 + ldr r7, [sp, #88] @ 4-byte Reload + str r11, [r0, #56] + movne r2, r7 + ldr r7, [sp, #116] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #96] @ 4-byte Reload + movne r12, r2 + ldr r2, [sp, #100] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #64] + movne r4, r2 + ldr r2, [sp, #92] @ 4-byte Reload + str r4, [r0, #68] + movne r3, r2 + ldr r2, [sp, #136] @ 4-byte Reload + str r3, [r0, #72] + ldr r3, [sp, #128] @ 4-byte Reload + movne lr, r2 + ldr r2, [sp, #140] @ 4-byte Reload + cmp r1, #0 + str lr, [r0, #76] + movne r5, r2 + ldr r2, [sp, #144] @ 4-byte Reload + str r5, [r0, #80] + movne r6, r2 + ldr r2, [sp, #124] @ 4-byte Reload + str r6, [r0, #84] + movne r8, r2 + ldr r2, [sp, #148] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #88] + movne r9, r2 + ldr r2, [sp, #152] @ 4-byte Reload + str r9, [r0, #92] + movne r10, r2 + ldr r2, [sp, #156] @ 4-byte Reload + str r10, [r0, #96] + movne r3, r2 + cmp r1, #0 + ldr r1, [sp, #160] @ 4-byte Reload + ldr r2, [sp, #132] @ 4-byte Reload + str r3, [r0, #100] + movne r2, r1 + ldr r1, [sp, #164] @ 4-byte Reload + str r2, [r0, #104] + movne r7, r1 + str r7, [r0, #108] + add sp, sp, #168 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end220: + .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub14L + .align 2 + .type mcl_fpDbl_sub14L,%function +mcl_fpDbl_sub14L: @ @mcl_fpDbl_sub14L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #168 + sub sp, sp, #168 + ldr r7, [r2, #96] + add r9, r1, #32 + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #104] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #108] + str r7, [sp, #164] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #108] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #104] @ 4-byte Spill + ldm r2, {r5, r8, r12, lr} + ldr r6, [r1] + ldmib r1, {r4, r7, r10} + subs r5, r6, r5 + sbcs r4, r4, r8 + str r5, [sp, #32] @ 4-byte Spill + ldr r5, [r2, #44] + sbcs r7, r7, r12 + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r2, #40] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #36] + str r5, [sp, #84] @ 4-byte Spill + str r4, [sp, #80] @ 4-byte Spill + str r7, [sp, #48] @ 4-byte Spill + sbcs r7, r10, lr + ldr r10, [r2, #16] + add lr, r1, #16 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r1, #96] + str r2, [sp, #88] @ 4-byte Spill + ldr r2, [r1, #100] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #92] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #96] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #76] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #44] @ 4-byte Spill + ldm r9, {r4, r5, r6, r8, r9} + ldr r2, [r1, #52] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #8] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #24] @ 4-byte Reload + sbcs r1, r1, r10 + str r7, [r0, #8] + ldr r7, [sp, #20] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + add lr, r3, #8 + str r2, [r0, #28] + ldr r2, [sp, #48] @ 4-byte Reload + sbcs r1, r4, r1 + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [r0, #36] + ldr r2, [sp, #84] @ 4-byte Reload + sbcs r1, r6, r1 + str r1, [r0, #40] + ldr r1, [sp, #104] @ 4-byte Reload + sbcs r2, r8, r2 + str r2, [r0, #44] + ldr r2, [sp, #108] @ 4-byte Reload + sbcs r1, r9, r1 + str r1, [r0, #48] + ldr r1, [sp, #112] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #4] @ 4-byte Reload + str r2, [r0, #52] + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r10, r7, r1 + ldr r1, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + str r10, [sp, #80] @ 4-byte Spill + sbcs r11, r2, r1 + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + str r11, [sp, #84] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #136] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #144] @ 4-byte Reload + str r1, [sp, #136] @ 4-byte Spill + mov r1, #0 + sbcs r2, r7, r2 + ldr r7, [sp, #44] @ 4-byte Reload + str r2, [sp, #128] @ 4-byte Spill + ldr r2, [sp, #120] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #64] @ 4-byte Reload + str r2, [sp, #144] @ 4-byte Spill + ldr r2, [sp, #148] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #68] @ 4-byte Reload + str r2, [sp, #148] @ 4-byte Spill + ldr r2, [sp, #152] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #72] @ 4-byte Reload + str r2, [sp, #152] @ 4-byte Spill + ldr r2, [sp, #124] @ 4-byte Reload + sbcs r9, r7, r2 + ldr r2, [sp, #132] @ 4-byte Reload + ldr r7, [sp, #76] @ 4-byte Reload + str r9, [sp, #108] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #88] @ 4-byte Reload + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [sp, #160] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #92] @ 4-byte Reload + str r2, [sp, #160] @ 4-byte Spill + ldr r2, [sp, #156] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #96] @ 4-byte Reload + str r2, [sp, #156] @ 4-byte Spill + ldr r2, [sp, #140] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #100] @ 4-byte Reload + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [sp, #164] @ 4-byte Reload + sbcs r2, r7, r2 + sbc r1, r1, #0 + str r2, [sp, #164] @ 4-byte Spill + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #124] @ 4-byte Spill + ldm r3, {r2, r5} + ldm lr, {r4, r6, lr} + ldr r7, [r3, #24] + ldr r8, [r3, #28] + ldr r12, [r3, #20] + ldr r3, [sp, #128] @ 4-byte Reload + adds r1, r10, r2 + ldr r10, [sp, #104] @ 4-byte Reload + ldr r2, [sp, #136] @ 4-byte Reload + adcs r5, r11, r5 + ldr r11, [sp, #124] @ 4-byte Reload + adcs r4, r10, r4 + adcs r2, r2, r6 + ldr r6, [sp, #144] @ 4-byte Reload + adcs r3, r3, lr + adcs r12, r6, r12 + ldr r6, [sp, #148] @ 4-byte Reload + adcs lr, r6, r7 + ldr r6, [sp, #152] @ 4-byte Reload + ldr r7, [sp, #132] @ 4-byte Reload + adcs r8, r6, r8 + ldr r6, [sp, #92] @ 4-byte Reload + adcs r9, r9, r6 + ldr r6, [sp, #96] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #160] @ 4-byte Reload + str r6, [sp, #96] @ 4-byte Spill + ldr r6, [sp, #112] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #116] @ 4-byte Reload + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [sp, #156] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [sp, #140] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #80] @ 4-byte Reload + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [sp, #164] @ 4-byte Reload + adc r7, r7, r11 + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [sp, #100] @ 4-byte Reload + ands r7, r7, #1 + moveq r1, r6 + moveq r4, r10 + ldr r6, [sp, #124] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #84] @ 4-byte Reload + moveq r5, r1 + ldr r1, [sp, #136] @ 4-byte Reload + cmp r7, #0 + str r5, [r0, #60] + str r4, [r0, #64] + moveq r2, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r2, [r0, #68] + ldr r2, [sp, #96] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #144] @ 4-byte Reload + str r3, [r0, #72] + ldr r3, [sp, #116] @ 4-byte Reload + moveq r12, r1 + ldr r1, [sp, #148] @ 4-byte Reload + cmp r7, #0 + str r12, [r0, #76] + moveq lr, r1 + ldr r1, [sp, #152] @ 4-byte Reload + str lr, [r0, #80] + moveq r8, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r8, [r0, #84] + moveq r9, r1 + ldr r1, [sp, #132] @ 4-byte Reload + cmp r7, #0 + str r9, [r0, #88] + moveq r2, r1 + ldr r1, [sp, #160] @ 4-byte Reload + str r2, [r0, #92] + ldr r2, [sp, #112] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #156] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #140] @ 4-byte Reload + cmp r7, #0 + ldr r7, [sp, #120] @ 4-byte Reload + moveq r7, r1 + ldr r1, [sp, #164] @ 4-byte Reload + moveq r6, r1 + add r1, r0, #96 + stm r1, {r2, r3, r7} + str r6, [r0, #108] + add sp, sp, #168 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end221: + .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L + .cantunwind + .fnend + + .align 2 + .type .LmulPv480x32,%function +.LmulPv480x32: @ @mulPv480x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r3, [r1, #44] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #44] + ldr r3, [r1, #48] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #48] + ldr r3, [r1, #52] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #52] + ldr r1, [r1, #56] + umull r3, r7, r1, r2 + adcs r1, r5, r3 + str r1, [r0, #56] + adc r1, r7, #0 + str r1, [r0, #60] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end222: + .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre15L + .align 2 + .type mcl_fp_mulUnitPre15L,%function +mcl_fp_mulUnitPre15L: @ @mcl_fp_mulUnitPre15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #84 + sub sp, sp, #84 + mov r4, r0 + add r0, sp, #16 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #76] + add r11, sp, #48 + add lr, sp, #20 + ldr r9, [sp, #64] + ldr r10, [sp, #60] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #72] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #68] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r6, r8, r11} + ldr r7, [sp, #44] + ldr r5, [sp, #40] + ldr r1, [sp, #16] + ldm lr, {r0, r2, r3, r12, lr} + str r1, [r4] + stmib r4, {r0, r2, r3, r12, lr} + add r0, r4, #32 + str r5, [r4, #24] + str r7, [r4, #28] + stm r0, {r6, r8, r11} + str r10, [r4, #44] + str r9, [r4, #48] + ldr r0, [sp, #4] @ 4-byte Reload + str r0, [r4, #52] + ldr r0, [sp, #8] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #12] @ 4-byte Reload + str r0, [r4, #60] + add sp, sp, #84 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end223: + .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre15L + .align 2 + .type mcl_fpDbl_mulPre15L,%function +mcl_fpDbl_mulPre15L: @ @mcl_fpDbl_mulPre15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + .pad #1024 + sub sp, sp, #1024 + mov r3, r2 + mov r4, r0 + add r0, sp, #1000 + str r1, [sp, #96] @ 4-byte Spill + mov r8, r1 + ldr r2, [r3] + str r3, [sp, #92] @ 4-byte Spill + str r4, [sp, #100] @ 4-byte Spill + mov r6, r3 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1060] + ldr r1, [sp, #1004] + ldr r2, [r6, #4] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1056] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #1008] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1052] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #1012] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1048] + str r1, [sp, #28] @ 4-byte Spill + mov r1, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [r4] + add r0, sp, #936 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #996] + add r10, sp, #960 + add lr, sp, #936 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r5, r6, r7, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #24] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r4, r1, r0 + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #92] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + ldr r2, [r6, #8] + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #872 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #932] + ldr r8, [sp, #872] + add r12, sp, #880 + ldr lr, [sp, #912] + ldr r7, [sp, #908] + ldr r11, [sp, #904] + ldr r9, [sp, #900] + ldr r10, [sp, #876] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #928] + adds r4, r8, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #12] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r12} + ldr r5, [sp, #100] @ 4-byte Reload + str r4, [r5, #8] + ldr r4, [sp, #52] @ 4-byte Reload + adcs r4, r10, r4 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [sp, #48] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #96] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r6, #12] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #808 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #868] + add r9, sp, #836 + add lr, sp, #816 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #864] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #856] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r0, [sp, #808] + ldr r11, [sp, #812] + ldm lr, {r1, r2, r3, r12, lr} + ldr r10, [sp, #32] @ 4-byte Reload + adds r0, r0, r10 + str r0, [r5, #12] + ldr r0, [sp, #52] @ 4-byte Reload + ldr r5, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r6, r0 + mov r6, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #744 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #804] + add lr, sp, #768 + add r12, sp, #748 + ldr r11, [sp, #780] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r9, r10, lr} + ldr r8, [sp, #744] + ldm r12, {r0, r1, r2, r3, r12} + ldr r4, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #100] @ 4-byte Reload + adds r4, r8, r4 + str r4, [r7, #16] + ldr r4, [sp, #52] @ 4-byte Reload + mov r7, r5 + adcs r4, r0, r4 + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #20] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #680 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #740] + ldr r9, [sp, #680] + add lr, sp, #684 + ldr r10, [sp, #720] + ldr r8, [sp, #716] + ldr r11, [sp, #712] + ldr r6, [sp, #708] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #736] + adds r4, r9, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #732] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #728] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #724] + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r5, [sp, #100] @ 4-byte Reload + str r4, [r5, #20] + ldr r4, [sp, #52] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #96] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #24] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #616 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #676] + add r8, sp, #648 + add lr, sp, #624 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #12] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldr r10, [sp, #644] + ldr r0, [sp, #616] + ldr r11, [sp, #620] + ldm lr, {r1, r2, r3, r12, lr} + ldr r9, [sp, #32] @ 4-byte Reload + adds r0, r0, r9 + str r0, [r5, #24] + ldr r0, [sp, #56] @ 4-byte Reload + ldr r5, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #28] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #552 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #612] + add r11, sp, #584 + add r12, sp, #556 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r6, r7, r11} + ldr lr, [sp, #580] + ldr r9, [sp, #576] + ldr r10, [sp, #552] + ldm r12, {r0, r1, r2, r3, r12} + ldr r4, [sp, #32] @ 4-byte Reload + ldr r8, [sp, #100] @ 4-byte Reload + adds r4, r10, r4 + str r4, [r8, #28] + ldr r4, [sp, #60] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + ldr r5, [sp, #96] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #488 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #548] + ldr r9, [sp, #488] + add lr, sp, #492 + mov r6, r8 + ldr r10, [sp, #524] + ldr r11, [sp, #520] + ldr r7, [sp, #516] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #544] + adds r4, r9, r4 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #536] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #532] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #528] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + str r4, [r6, #32] + ldr r4, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #92] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #36] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #424 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #484] + add r8, sp, #456 + add lr, sp, #432 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #12] @ 4-byte Spill + ldm r8, {r5, r7, r8} + ldr r10, [sp, #452] + ldr r0, [sp, #424] + ldr r11, [sp, #428] + ldm lr, {r1, r2, r3, r12, lr} + ldr r9, [sp, #32] @ 4-byte Reload + adds r0, r0, r9 + str r0, [r6, #36] + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #40] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #96] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r7, r0 + mov r7, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #360 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #420] + add r12, sp, #364 + ldr r11, [sp, #396] + ldr r6, [sp, #392] + ldr lr, [sp, #388] + ldr r9, [sp, #384] + ldr r10, [sp, #360] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #400] + str r0, [sp, #8] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r12} + ldr r4, [sp, #32] @ 4-byte Reload + ldr r8, [sp, #100] @ 4-byte Reload + adds r4, r10, r4 + str r4, [r8, #40] + ldr r4, [sp, #72] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #44] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #296 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #356] + ldr r9, [sp, #296] + add lr, sp, #300 + mov r5, r8 + ldr r10, [sp, #336] + ldr r7, [sp, #332] + ldr r11, [sp, #328] + ldr r6, [sp, #324] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #352] + adds r4, r9, r4 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #348] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #340] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + str r4, [r5, #44] + ldr r4, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #92] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #48] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #96] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #232 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #292] + add lr, sp, #240 + ldr r8, [sp, #268] + ldr r7, [sp, #264] + ldr r10, [sp, #260] + ldr r3, [sp, #232] + ldr r11, [sp, #236] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #288] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #284] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #280] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #276] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #4] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + ldr r9, [sp, #28] @ 4-byte Reload + adds r3, r3, r9 + add r9, sp, #168 + str r3, [r5, #48] + ldr r3, [r4, #52] + ldr r4, [sp, #88] @ 4-byte Reload + adcs r4, r11, r4 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [sp, #84] @ 4-byte Reload + adcs r11, r0, r4 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + mov r0, r9 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #228] + add r12, sp, #172 + ldr r6, [sp, #204] + ldr r4, [sp, #200] + ldr lr, [sp, #196] + ldr r8, [sp, #192] + ldr r9, [sp, #188] + ldr r2, [sp, #168] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #224] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #216] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #212] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #208] + str r0, [sp, #4] @ 4-byte Spill + ldm r12, {r0, r1, r3, r12} + ldr r7, [sp, #32] @ 4-byte Reload + adds r2, r2, r7 + str r2, [r5, #52] + adcs r5, r0, r11 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #92] @ 4-byte Reload + adcs r7, r1, r0 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + ldr r2, [r2, #56] + adcs r10, r3, r0 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r11, r12, r0 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #104 + bl .LmulPv480x32(PLT) + add r3, sp, #104 + add r12, sp, #120 + ldm r3, {r0, r1, r2, r3} + adds r6, r0, r5 + ldr r0, [sp, #164] + adcs lr, r1, r7 + adcs r4, r2, r10 + adcs r7, r3, r11 + add r11, sp, #136 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #160] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #156] + str r0, [sp, #52] @ 4-byte Spill + ldm r11, {r5, r8, r9, r10, r11} + ldm r12, {r1, r2, r3, r12} + ldr r0, [sp, #100] @ 4-byte Reload + str r6, [r0, #56] + ldr r6, [sp, #28] @ 4-byte Reload + str lr, [r0, #60] + str r4, [r0, #64] + str r7, [r0, #68] + ldr r7, [sp, #80] @ 4-byte Reload + ldr r4, [sp, #56] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #32] @ 4-byte Reload + str r6, [r0, #72] + ldr r6, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [r0, #76] + ldr r1, [sp, #64] @ 4-byte Reload + adcs r2, r3, r2 + ldr r3, [sp, #84] @ 4-byte Reload + str r2, [r0, #80] + ldr r2, [sp, #68] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [r0, #84] + ldr r1, [sp, #92] @ 4-byte Reload + adcs r12, r5, r2 + ldr r2, [sp, #88] @ 4-byte Reload + ldr r5, [sp, #52] @ 4-byte Reload + adcs r1, r8, r1 + str r12, [r0, #88] + add r12, r0, #92 + adcs r2, r9, r2 + adcs r3, r10, r3 + adcs r7, r11, r7 + adcs r6, r5, r6 + ldr r5, [sp, #72] @ 4-byte Reload + adcs r5, r4, r5 + ldr r4, [sp, #96] @ 4-byte Reload + stm r12, {r1, r2, r3, r7} + str r6, [r0, #108] + str r5, [r0, #112] + adc r4, r4, #0 + str r4, [r0, #116] + add sp, sp, #44 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end224: + .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre15L + .align 2 + .type mcl_fpDbl_sqrPre15L,%function +mcl_fpDbl_sqrPre15L: @ @mcl_fpDbl_sqrPre15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + .pad #1024 + sub sp, sp, #1024 + mov r5, r1 + mov r4, r0 + add r0, sp, #1000 + ldr r2, [r5] + str r4, [sp, #100] @ 4-byte Spill + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1060] + ldr r1, [sp, #1004] + ldr r2, [r5, #4] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #1056] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #1008] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1052] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #1012] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1048] + str r1, [sp, #36] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [r4] + add r0, sp, #936 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #996] + add r10, sp, #960 + add lr, sp, #936 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #32] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r4, r1, r0 + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #8] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #872 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #932] + add r12, sp, #896 + ldr lr, [sp, #912] + ldr r6, [sp, #908] + add r10, sp, #876 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #24] @ 4-byte Spill + ldm r12, {r9, r11, r12} + ldr r8, [sp, #872] + ldm r10, {r0, r1, r2, r3, r10} + ldr r7, [sp, #100] @ 4-byte Reload + adds r4, r8, r4 + str r4, [r7, #8] + ldr r4, [sp, #60] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #12] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #808 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #868] + add r10, sp, #836 + add lr, sp, #812 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #864] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #856] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldr r11, [sp, #808] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r11, r4 + ldr r4, [sp, #100] @ 4-byte Reload + str r7, [r4, #12] + ldr r7, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #744 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #804] + add r8, sp, #776 + add lr, sp, #764 + add r12, sp, #744 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #792] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #20] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r11, [sp, #40] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #16] + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #20] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #680 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #740] + add r8, sp, #712 + add lr, sp, #684 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #736] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #732] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #728] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #724] + str r0, [sp, #20] @ 4-byte Spill + ldm r8, {r4, r6, r8} + ldr r11, [sp, #708] + ldr r10, [sp, #680] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #40] @ 4-byte Reload + ldr r9, [sp, #100] @ 4-byte Reload + adds r7, r10, r7 + str r7, [r9, #20] + ldr r7, [sp, #60] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #24] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #616 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #676] + add r10, sp, #644 + add lr, sp, #620 + mov r4, r9 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #668] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #656] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r10} + ldr r11, [sp, #616] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r11, r7 + str r7, [r4, #24] + ldr r7, [sp, #64] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #28] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #552 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #612] + add r8, sp, #584 + add lr, sp, #572 + add r12, sp, #552 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #604] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #600] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #596] + str r0, [sp, #20] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r11, [sp, #40] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #28] + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #32] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #488 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #548] + add r8, sp, #520 + add lr, sp, #492 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #536] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #532] + str r0, [sp, #20] @ 4-byte Spill + ldm r8, {r4, r6, r8} + ldr r11, [sp, #516] + ldr r10, [sp, #488] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #40] @ 4-byte Reload + ldr r9, [sp, #100] @ 4-byte Reload + adds r7, r10, r7 + str r7, [r9, #32] + ldr r7, [sp, #72] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #36] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #424 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #484] + add r10, sp, #452 + add lr, sp, #428 + mov r4, r9 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #464] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r8, r10} + ldr r11, [sp, #424] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r11, r7 + str r7, [r4, #36] + ldr r7, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #40] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #360 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #420] + add r8, sp, #392 + add lr, sp, #380 + add r12, sp, #360 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #16] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r11, [sp, #40] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #40] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #44] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #296 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #356] + add r9, sp, #328 + add lr, sp, #300 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #352] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #348] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #340] + str r0, [sp, #16] @ 4-byte Spill + ldm r9, {r6, r8, r9} + ldr r11, [sp, #324] + ldr r10, [sp, #296] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #36] @ 4-byte Reload + adds r7, r10, r7 + str r7, [r4, #44] + ldr r7, [sp, #84] @ 4-byte Reload + adcs r7, r0, r7 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #48] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #232 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #292] + add r11, sp, #256 + add lr, sp, #236 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #288] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #284] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #280] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #276] + str r0, [sp, #16] @ 4-byte Spill + ldm r11, {r6, r8, r9, r10, r11} + ldr r12, [sp, #232] + ldm lr, {r0, r1, r2, r3, lr} + adds r7, r12, r7 + ldr r12, [r5, #52] + str r7, [r4, #48] + ldr r7, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r7, r1, r0 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #168 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #228] + add lr, sp, #196 + add r12, sp, #172 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #224] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #220] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #216] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #212] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #208] + str r0, [sp, #12] @ 4-byte Spill + ldm lr, {r8, r11, lr} + ldr r9, [sp, #192] + ldr r10, [sp, #188] + ldr r2, [sp, #168] + ldm r12, {r0, r1, r3, r12} + ldr r6, [sp, #40] @ 4-byte Reload + adds r2, r2, r6 + add r6, sp, #104 + str r2, [r4, #52] + adcs r4, r0, r7 + ldr r0, [sp, #96] @ 4-byte Reload + ldr r2, [r5, #56] + adcs r0, r1, r0 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r7, r3, r0 + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + mov r0, r6 + bl .LmulPv480x32(PLT) + add r3, sp, #104 + add r11, sp, #136 + add r12, sp, #120 + ldm r3, {r0, r1, r2, r3} + adds r6, r0, r4 + ldr r0, [sp, #8] @ 4-byte Reload + adcs lr, r1, r0 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r4, r2, r7 + adcs r7, r3, r0 + ldr r0, [sp, #164] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #160] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #156] + str r0, [sp, #52] @ 4-byte Spill + ldm r11, {r5, r8, r9, r10, r11} + ldm r12, {r1, r2, r3, r12} + ldr r0, [sp, #100] @ 4-byte Reload + str r6, [r0, #56] + ldr r6, [sp, #36] @ 4-byte Reload + str lr, [r0, #60] + str r4, [r0, #64] + str r7, [r0, #68] + ldr r7, [sp, #84] @ 4-byte Reload + ldr r4, [sp, #56] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #40] @ 4-byte Reload + str r6, [r0, #72] + ldr r6, [sp, #80] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #64] @ 4-byte Reload + str r1, [r0, #76] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r2, r3, r2 + ldr r3, [sp, #88] @ 4-byte Reload + str r2, [r0, #80] + ldr r2, [sp, #72] @ 4-byte Reload + adcs r1, r12, r1 + str r1, [r0, #84] + ldr r1, [sp, #96] @ 4-byte Reload + adcs r12, r5, r2 + ldr r2, [sp, #92] @ 4-byte Reload + ldr r5, [sp, #52] @ 4-byte Reload + adcs r1, r8, r1 + str r12, [r0, #88] + add r12, r0, #92 + adcs r2, r9, r2 + adcs r3, r10, r3 + adcs r7, r11, r7 + adcs r6, r5, r6 + ldr r5, [sp, #76] @ 4-byte Reload + adcs r5, r4, r5 + ldr r4, [sp, #60] @ 4-byte Reload + stm r12, {r1, r2, r3, r7} + str r6, [r0, #108] + str r5, [r0, #112] + adc r4, r4, #0 + str r4, [r0, #116] + add sp, sp, #44 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end225: + .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L + .cantunwind + .fnend + + .globl mcl_fp_mont15L + .align 2 + .type mcl_fp_mont15L,%function +mcl_fp_mont15L: @ @mcl_fp_mont15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #12 + sub sp, sp, #12 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #124 + add r7, sp, #1024 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #88] @ 4-byte Spill + add r0, r7, #968 + ldr r6, [r3, #-4] + ldr r2, [r2] + str r6, [sp, #120] @ 4-byte Spill + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1996] + ldr r5, [sp, #1992] + add r7, sp, #1024 + mov r1, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #2000] + mul r2, r5, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #2004] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #2052] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #2048] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #2044] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2040] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #2036] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #2032] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #2028] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #2024] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #2020] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #2016] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2012] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2008] + str r0, [sp, #48] @ 4-byte Spill + add r0, r7, #904 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1988] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r9, [sp, #1952] + ldr r6, [sp, #1948] + ldr r8, [sp, #1944] + ldr r4, [sp, #1928] + ldr r10, [sp, #1932] + ldr r11, [sp, #1936] + ldr r7, [sp, #1940] + add lr, sp, #1024 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1984] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1980] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1976] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1972] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1968] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1964] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1960] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1956] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #840 + bl .LmulPv480x32(PLT) + adds r0, r4, r5 + ldr r1, [sp, #64] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + ldr r3, [sp, #1880] + ldr r12, [sp, #1884] + ldr lr, [sp, #1888] + ldr r4, [sp, #1892] + ldr r5, [sp, #1896] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #1908] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #92] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #84] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #1900] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #1864] + adcs r1, r9, r1 + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + ldr r9, [sp, #1904] + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #1876] + adc r0, r0, #0 + adds r6, r11, r6 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #1872] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1924] + str r6, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1920] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1916] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1868] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #776 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1860] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1828] + ldr r11, [sp, #1824] + ldr r8, [sp, #1820] + ldr r4, [sp, #1816] + ldr r5, [sp, #1800] + ldr r7, [sp, #1804] + ldr r9, [sp, #1808] + ldr r10, [sp, #1812] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1856] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1852] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1848] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1844] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1840] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1836] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1832] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #712 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1748] + ldr r3, [sp, #1752] + ldr r12, [sp, #1756] + ldr lr, [sp, #1760] + adds r0, r0, r5 + ldr r5, [sp, #1768] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1776] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1780] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1764] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1772] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1736] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1744] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1796] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1792] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1788] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1784] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1740] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #648 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1732] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1700] + ldr r11, [sp, #1696] + ldr r8, [sp, #1692] + ldr r4, [sp, #1688] + ldr r5, [sp, #1672] + ldr r7, [sp, #1676] + ldr r9, [sp, #1680] + ldr r10, [sp, #1684] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1728] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1724] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1720] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1716] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1712] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #584 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1620] + ldr r3, [sp, #1624] + ldr r12, [sp, #1628] + ldr lr, [sp, #1632] + adds r0, r0, r5 + ldr r5, [sp, #1640] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1648] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1652] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1636] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1644] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1608] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1616] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1668] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1664] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1660] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1656] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1612] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #520 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1604] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1572] + ldr r11, [sp, #1568] + ldr r8, [sp, #1564] + ldr r4, [sp, #1560] + ldr r5, [sp, #1544] + ldr r7, [sp, #1548] + ldr r9, [sp, #1552] + ldr r10, [sp, #1556] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1600] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1596] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1592] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1588] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1584] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #456 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1492] + ldr r3, [sp, #1496] + ldr r12, [sp, #1500] + ldr lr, [sp, #1504] + adds r0, r0, r5 + ldr r5, [sp, #1512] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1520] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1524] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1508] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1516] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1480] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1488] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1540] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1536] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1532] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1528] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1484] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #392 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1476] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1444] + ldr r11, [sp, #1440] + ldr r8, [sp, #1436] + ldr r4, [sp, #1432] + ldr r5, [sp, #1416] + ldr r7, [sp, #1420] + ldr r9, [sp, #1424] + ldr r10, [sp, #1428] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1472] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1468] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1464] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1460] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1456] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1448] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, lr, #328 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1364] + ldr r3, [sp, #1368] + ldr r12, [sp, #1372] + ldr lr, [sp, #1376] + adds r0, r0, r5 + ldr r5, [sp, #1384] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1392] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1396] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1380] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1388] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1352] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1360] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1412] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1408] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1404] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1400] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1356] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #264 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1348] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1316] + ldr r11, [sp, #1312] + ldr r8, [sp, #1308] + ldr r4, [sp, #1304] + ldr r5, [sp, #1288] + ldr r7, [sp, #1292] + ldr r9, [sp, #1296] + ldr r10, [sp, #1300] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1344] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, lr, #200 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1236] + ldr r3, [sp, #1240] + ldr r12, [sp, #1244] + ldr lr, [sp, #1248] + adds r0, r0, r5 + ldr r5, [sp, #1256] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1264] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1268] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1252] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1260] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1224] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1232] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1284] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1228] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #136 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1220] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1188] + ldr r11, [sp, #1184] + ldr r8, [sp, #1180] + ldr r4, [sp, #1176] + ldr r5, [sp, #1160] + ldr r7, [sp, #1164] + ldr r9, [sp, #1168] + ldr r10, [sp, #1172] + add lr, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1216] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1212] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, lr, #72 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1108] + ldr r3, [sp, #1112] + ldr r12, [sp, #1116] + ldr lr, [sp, #1120] + adds r0, r0, r5 + ldr r5, [sp, #1128] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1136] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1140] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1124] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1132] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1096] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1104] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r6, r11, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1156] + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1100] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1024 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r10, #8 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1092] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1060] + ldr r11, [sp, #1056] + ldr r8, [sp, #1052] + ldr r4, [sp, #1048] + ldr r5, [sp, #1032] + ldr r7, [sp, #1036] + ldr r9, [sp, #1040] + ldr r10, [sp, #1044] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1088] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1084] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1080] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1072] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #968 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #972 + adds r0, r0, r5 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #996 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #968] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #112] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #904 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #964] + add r11, sp, #920 + add r10, sp, #904 + ldr r6, [sp, #932] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #960] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #956] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #952] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #948] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #840 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #844 + adds r0, r0, r5 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #880 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + add r11, sp, #868 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldm r11, {r4, r5, r11} + ldr r6, [sp, #840] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #116] @ 4-byte Reload + adds r6, r7, r6 + ldr r7, [sp, #112] @ 4-byte Reload + str r6, [sp, #32] @ 4-byte Spill + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #120] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + mul r2, r6, r11 + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #776 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #836] + add r10, sp, #776 + ldr r4, [sp, #800] + ldr r5, [sp, #796] + ldr r6, [sp, #792] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #832] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #828] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #712 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #728 + adds r0, r0, r7 + ldr r7, [sp, #724] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r8 + adcs r1, r1, r9 + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #752 + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #716] + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #720] + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #712] + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adc r1, r1, #0 + adds r0, r0, r4 + str r1, [sp, #52] @ 4-byte Spill + mul r1, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #772] + str r1, [sp, #44] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #768] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #112] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #112] @ 4-byte Spill + ldr r6, [sp, #108] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #108] @ 4-byte Spill + ldr r5, [sp, #104] @ 4-byte Reload + adcs r5, r5, r7 + str r5, [sp, #104] @ 4-byte Spill + ldr r5, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #648 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #708] + add r10, sp, #648 + ldr r11, [sp, #676] + ldr r4, [sp, #672] + ldr r6, [sp, #668] + ldr r5, [sp, #664] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #584 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #108] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + add lr, sp, #600 + adds r0, r0, r7 + add r7, sp, #584 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r8 + adcs r1, r1, r9 + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #624 + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #32] @ 4-byte Spill + ldm r7, {r4, r5, r6, r7} + adds r1, r0, r4 + ldr r0, [sp, #120] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #644] + str r2, [sp, #28] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #640] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #112] @ 4-byte Reload + adcs r5, r11, r5 + str r5, [sp, #64] @ 4-byte Spill + ldr r5, [sp, #108] @ 4-byte Reload + adcs r5, r5, r6 + str r5, [sp, #60] @ 4-byte Spill + ldr r5, [sp, #104] @ 4-byte Reload + adcs r5, r5, r7 + str r5, [sp, #56] @ 4-byte Spill + ldr r5, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #520 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #580] + add r11, sp, #524 + ldr r10, [sp, #548] + ldr r5, [sp, #544] + ldr r6, [sp, #540] + ldr r7, [sp, #520] + add r0, sp, #456 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #576] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #572] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #568] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #564] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #560] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #556] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #552] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r8, r9, r11} + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #460 + adds r0, r0, r7 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #484 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #516] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #456] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #64] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #392 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #452] + ldr r6, [sp, #420] + ldr r7, [sp, #416] + ldr r9, [sp, #412] + ldr r4, [sp, #408] + ldr r10, [sp, #392] + ldr r11, [sp, #396] + ldr r8, [sp, #400] + ldr r5, [sp, #404] + add r0, sp, #328 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #448] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #444] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #440] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #436] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #432] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #428] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #424] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #332 + adds r0, r0, r10 + add r10, sp, #356 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #388] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #380] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #376] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #328] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #68] @ 4-byte Reload + ldr r7, [sp, #64] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #264 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #324] + add r9, sp, #276 + ldr r6, [sp, #292] + ldr r7, [sp, #288] + ldr r10, [sp, #264] + ldr r11, [sp, #268] + ldr r5, [sp, #272] + add r0, sp, #200 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #320] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #316] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #312] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #308] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #304] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #300] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #296] + str r1, [sp, #16] @ 4-byte Spill + ldm r9, {r4, r8, r9} + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + add lr, sp, #216 + adds r0, r0, r10 + ldr r10, [sp, #212] + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r11 + adcs r1, r1, r5 + ldr r5, [sp, #208] + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #200] + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r9 + add r9, sp, #240 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #204] + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adc r1, r1, #0 + adds r7, r0, r4 + ldr r0, [sp, #120] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + mul r1, r7, r0 + ldr r0, [sp, #260] + str r1, [sp, #60] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #120] @ 4-byte Spill + ldm r9, {r4, r8, r9} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #128] @ 4-byte Reload + adcs r11, r11, r6 + ldr r6, [sp, #124] @ 4-byte Reload + adcs r5, r6, r5 + ldr r6, [sp, #68] @ 4-byte Reload + adcs r10, r6, r10 + ldr r6, [sp, #64] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r6, r0, r9 + ldr r0, [sp, #80] @ 4-byte Reload + ldr r9, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r9 + str r0, [sp, #128] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + add r0, sp, #136 + bl .LmulPv480x32(PLT) + add r3, sp, #136 + ldm r3, {r0, r1, r2, r3} + adds r0, r7, r0 + adcs r11, r11, r1 + ldr r0, [sp, #152] + ldr r1, [sp, #48] @ 4-byte Reload + adcs lr, r5, r2 + mov r5, r9 + str r11, [sp, #44] @ 4-byte Spill + adcs r10, r10, r3 + str lr, [sp, #52] @ 4-byte Spill + str r10, [sp, #60] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #156] + ldr r1, [sp, #56] @ 4-byte Reload + str r4, [sp, #76] @ 4-byte Spill + adcs r12, r1, r0 + ldr r0, [sp, #160] + ldr r1, [sp, #64] @ 4-byte Reload + str r12, [sp, #56] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #168] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r8, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r6, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #188] + adcs r0, r1, r0 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #192] + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #196] + adcs r0, r1, r0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldmib r5, {r1, r2} + ldr r3, [r5, #16] + ldr r7, [r5] + ldr r0, [r5, #12] + ldr r6, [r5, #20] + ldr r9, [r5, #24] + ldr r8, [r5, #32] + str r3, [sp, #80] @ 4-byte Spill + ldr r3, [r5, #28] + subs r7, r11, r7 + add r11, r5, #36 + str r3, [sp, #84] @ 4-byte Spill + sbcs r3, lr, r1 + sbcs lr, r10, r2 + ldm r11, {r1, r10, r11} + sbcs r4, r4, r0 + ldr r0, [r5, #48] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r5, #52] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r5, #56] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r2, r12, r0 + ldr r0, [sp, #92] @ 4-byte Reload + sbcs r12, r0, r6 + ldr r0, [sp, #96] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + sbcs r5, r0, r9 + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r6, r0, r6 + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r8, r0, r8 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r9, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r10, r0, r10 + ldr r0, [sp, #116] @ 4-byte Reload + sbcs r11, r0, r11 + ldr r0, [sp, #120] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbc r0, r0, #0 + ands r1, r0, #1 + ldr r0, [sp, #44] @ 4-byte Reload + movne r7, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r7, [r0] + ldr r7, [sp, #52] @ 4-byte Reload + movne r3, r7 + str r3, [r0, #4] + ldr r3, [sp, #60] @ 4-byte Reload + movne lr, r3 + ldr r3, [sp, #76] @ 4-byte Reload + cmp r1, #0 + str lr, [r0, #8] + movne r4, r3 + ldr r3, [sp, #56] @ 4-byte Reload + str r4, [r0, #12] + movne r2, r3 + str r2, [r0, #16] + ldr r2, [sp, #92] @ 4-byte Reload + movne r12, r2 + ldr r2, [sp, #96] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #20] + movne r5, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r5, [r0, #24] + movne r6, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r6, [r0, #28] + movne r8, r2 + ldr r2, [sp, #108] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #32] + movne r9, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r9, [r0, #36] + movne r10, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r10, [r0, #40] + movne r11, r2 + cmp r1, #0 + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [sp, #80] @ 4-byte Reload + str r11, [r0, #44] + movne r2, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r2, [r0, #48] + ldr r2, [sp, #84] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r2, [r0, #52] + ldr r2, [sp, #132] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #56] + add sp, sp, #12 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end226: + .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L + .cantunwind + .fnend + + .globl mcl_fp_montNF15L + .align 2 + .type mcl_fp_montNF15L,%function +mcl_fp_montNF15L: @ @mcl_fp_montNF15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #4 + sub sp, sp, #4 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #116 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #76] @ 4-byte Spill + add r0, sp, #1984 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #112] @ 4-byte Spill + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1984] + ldr r1, [sp, #1988] + str r0, [sp, #60] @ 4-byte Spill + mul r2, r0, r5 + ldr r0, [sp, #2044] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #1992] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2040] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #1996] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #2036] + str r1, [sp, #80] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #2032] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #2028] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #2024] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #2020] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #2016] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2012] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2008] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2004] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2000] + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #1920 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1980] + add r7, sp, #1936 + add r11, sp, #1920 + ldr r6, [sp, #1948] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1976] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1972] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1968] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1964] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1960] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1956] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1952] + str r0, [sp, #12] @ 4-byte Spill + ldm r7, {r4, r5, r7} + ldm r11, {r9, r10, r11} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r8, [sp, #1932] + ldr r2, [r0, #4] + add r0, sp, #1856 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #60] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1868] + ldr r3, [sp, #1872] + ldr r12, [sp, #1876] + ldr lr, [sp, #1880] + adds r0, r9, r0 + ldr r9, [sp, #1896] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #1900] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #88] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #1892] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #1884] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #1888] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #84] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #1856] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adc r0, r1, r0 + adds r6, r11, r6 + ldr r1, [sp, #1864] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1916] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1908] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1860] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1792 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1852] + add r11, sp, #1808 + add r10, sp, #1792 + ldr r6, [sp, #1820] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1848] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1844] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1840] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1836] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1832] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1828] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1824] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #1728 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1740] + ldr r3, [sp, #1744] + ldr r12, [sp, #1748] + ldr lr, [sp, #1752] + adds r0, r0, r5 + ldr r5, [sp, #1760] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1768] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1772] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1756] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1764] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1728] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1736] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1788] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1784] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1780] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1732] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1664 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1724] + add r11, sp, #1680 + add r10, sp, #1664 + ldr r6, [sp, #1692] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1720] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1716] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1712] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1700] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1696] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #1600 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1612] + ldr r3, [sp, #1616] + ldr r12, [sp, #1620] + ldr lr, [sp, #1624] + adds r0, r0, r5 + ldr r5, [sp, #1632] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1640] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1644] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1628] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1636] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1600] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1608] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1660] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1656] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1652] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1648] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1604] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1536 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1596] + add r11, sp, #1552 + add r10, sp, #1536 + ldr r6, [sp, #1564] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1592] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1588] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1584] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #1472 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1484] + ldr r3, [sp, #1488] + ldr r12, [sp, #1492] + ldr lr, [sp, #1496] + adds r0, r0, r5 + ldr r5, [sp, #1504] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1512] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1516] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1500] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1508] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1472] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1480] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1532] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1528] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1524] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1520] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1476] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1408 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1468] + add r11, sp, #1424 + add r10, sp, #1408 + ldr r6, [sp, #1436] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1464] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1460] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1456] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1448] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1444] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #1344 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1356] + ldr r3, [sp, #1360] + ldr r12, [sp, #1364] + ldr lr, [sp, #1368] + adds r0, r0, r5 + ldr r5, [sp, #1376] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1384] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1388] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1372] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1380] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1344] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1352] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1404] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1400] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1396] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1392] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1348] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1280 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1340] + add r11, sp, #1296 + add r10, sp, #1280 + ldr r6, [sp, #1308] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1316] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #1216 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1228] + ldr r3, [sp, #1232] + ldr r12, [sp, #1236] + ldr lr, [sp, #1240] + adds r0, r0, r5 + ldr r5, [sp, #1248] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1256] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1260] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1244] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1252] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1216] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1224] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1276] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1220] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1152 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1212] + add r11, sp, #1168 + add r10, sp, #1152 + ldr r6, [sp, #1180] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #1088 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1100] + ldr r3, [sp, #1104] + ldr r12, [sp, #1108] + ldr lr, [sp, #1112] + adds r0, r0, r5 + ldr r5, [sp, #1120] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1128] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1132] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1116] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1124] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #108] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1088] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1096] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1148] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1092] + adcs r0, r7, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #1024 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1084] + add r11, sp, #1040 + add r10, sp, #1024 + ldr r6, [sp, #1052] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1080] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1072] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #960 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #964 + adds r0, r0, r5 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #988 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1012] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1008] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #960] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #896 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #956] + add r11, sp, #912 + add r10, sp, #896 + ldr r6, [sp, #924] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #952] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #948] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #936] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r8, r11} + ldm r10, {r5, r7, r9, r10} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #832 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #836 + adds r0, r0, r5 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #860 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #888] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #884] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #880] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #832] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + ldr r7, [sp, #112] @ 4-byte Reload + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #768 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #828] + add r11, sp, #768 + ldr r6, [sp, #792] + ldr r5, [sp, #788] + ldr r8, [sp, #784] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #816] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #812] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r4, [sp, #780] + ldr r2, [r0, #40] + add r0, sp, #704 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #720 + adds r0, r0, r9 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r2, r0, r10 + ldr r0, [sp, #104] @ 4-byte Reload + add r10, sp, #744 + adcs r0, r0, r11 + ldr r11, [sp, #708] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #716] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #704] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #712] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + adds r0, r2, r5 + mul r1, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #764] + str r1, [sp, #40] @ 4-byte Spill + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #760] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #104] @ 4-byte Reload + adcs r7, r7, r11 + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [sp, #100] @ 4-byte Reload + adcs r6, r7, r6 + str r6, [sp, #100] @ 4-byte Spill + ldr r6, [sp, #96] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #96] @ 4-byte Spill + ldr r6, [sp, #92] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #640 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #700] + add r7, sp, #656 + add r11, sp, #640 + ldr r4, [sp, #668] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #676] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #672] + str r0, [sp, #12] @ 4-byte Spill + ldm r7, {r5, r6, r7} + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #120] @ 4-byte Reload + ldr r1, [sp, #116] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #576 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #592 + adds r0, r0, r8 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r2, r0, r9 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #616 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + add r7, sp, #576 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #40] @ 4-byte Spill + ldm r7, {r4, r6, r7} + ldr r0, [sp, #112] @ 4-byte Reload + ldr r5, [sp, #588] + adds r1, r2, r4 + mul r2, r1, r0 + ldr r0, [sp, #636] + str r1, [sp, #108] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #632] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #104] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #60] @ 4-byte Spill + ldr r6, [sp, #100] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [sp, #96] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #52] @ 4-byte Spill + ldr r5, [sp, #92] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #512 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #572] + add r11, sp, #520 + ldr r8, [sp, #540] + ldr r9, [sp, #536] + ldr r10, [sp, #532] + ldr r6, [sp, #512] + ldr r7, [sp, #516] + add r0, sp, #448 + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #568] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #564] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #560] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #556] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #552] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #548] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #544] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r5, r11} + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #116] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #452 + adds r0, r0, r6 + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #476 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #448] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #108] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #24] @ 4-byte Spill + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #384 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #444] + add r9, sp, #396 + ldr r11, [sp, #412] + ldr r8, [sp, #408] + ldr r5, [sp, #384] + ldr r4, [sp, #388] + ldr r10, [sp, #392] + add r0, sp, #320 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #440] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #436] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #432] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #428] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #424] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #420] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #416] + str r1, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r9} + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #116] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #324 + adds r0, r0, r5 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #348 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #380] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #376] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #372] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #368] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #320] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #64] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #24] @ 4-byte Spill + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #256 + bl .LmulPv480x32(PLT) + ldr r1, [sp, #316] + add r11, sp, #260 + ldr r8, [sp, #284] + ldr r9, [sp, #280] + ldr r10, [sp, #276] + ldr r7, [sp, #256] + add r0, sp, #192 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #312] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #308] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #304] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #300] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #296] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #292] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #288] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r5, r6, r11} + ldr r1, [sp, #120] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #116] @ 4-byte Reload + bl .LmulPv480x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #208 + adds r0, r0, r7 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + adcs r1, r1, r5 + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + adcs r1, r1, r10 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r9 + add r9, sp, #192 + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adc r1, r1, r2 + str r1, [sp, #68] @ 4-byte Spill + ldm r9, {r4, r8, r9} + ldr r7, [sp, #204] + ldr r10, [sp, #236] + adds r5, r0, r4 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r4, [sp, #232] + mul r1, r5, r0 + ldr r0, [sp, #252] + str r1, [sp, #56] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #244] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #240] + str r0, [sp, #36] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #120] @ 4-byte Reload + ldr r6, [sp, #116] @ 4-byte Reload + adcs r8, r11, r8 + adcs r9, r6, r9 + ldr r6, [sp, #64] @ 4-byte Reload + adcs r7, r6, r7 + ldr r6, [sp, #60] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r6, r0, r10 + ldr r0, [sp, #80] @ 4-byte Reload + ldr r10, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + add r0, sp, #128 + bl .LmulPv480x32(PLT) + add r3, sp, #128 + ldm r3, {r0, r1, r2, r3} + adds r0, r5, r0 + adcs r11, r8, r1 + ldr r0, [sp, #144] + ldr r1, [sp, #64] @ 4-byte Reload + adcs lr, r9, r2 + str r11, [sp, #40] @ 4-byte Spill + adcs r8, r7, r3 + str lr, [sp, #48] @ 4-byte Spill + str r8, [sp, #56] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #148] + ldr r1, [sp, #44] @ 4-byte Reload + str r4, [sp, #64] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #152] + adcs r0, r1, r0 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #156] + adcs r0, r1, r0 + ldr r1, [sp, #96] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #160] + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #168] + adcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #188] + adc r0, r1, r0 + mov r1, r10 + add r10, r1, #20 + str r0, [sp, #120] @ 4-byte Spill + ldmib r1, {r0, r6} + ldr r2, [r1, #12] + ldr r12, [r1, #16] + ldm r10, {r5, r9, r10} + ldr r7, [r1] + subs r7, r11, r7 + ldr r11, [r1, #36] + sbcs r3, lr, r0 + ldr r0, [r1, #32] + sbcs lr, r8, r6 + ldr r8, [r1, #40] + sbcs r4, r4, r2 + ldr r2, [r1, #44] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #52] + ldr r1, [r1, #56] + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + str r2, [sp, #52] @ 4-byte Spill + sbcs r2, r1, r12 + ldr r1, [sp, #84] @ 4-byte Reload + sbcs r12, r1, r5 + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r5, r1, r9 + ldr r1, [sp, #92] @ 4-byte Reload + sbcs r6, r1, r10 + ldr r1, [sp, #96] @ 4-byte Reload + sbcs r9, r1, r0 + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r10, r0, r11 + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r11, r0, r8 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + sbc r8, r0, r1 + ldr r0, [sp, #40] @ 4-byte Reload + asr r1, r8, #31 + cmp r1, #0 + movlt r7, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r7, [r0] + ldr r7, [sp, #48] @ 4-byte Reload + movlt r3, r7 + str r3, [r0, #4] + ldr r3, [sp, #56] @ 4-byte Reload + movlt lr, r3 + ldr r3, [sp, #64] @ 4-byte Reload + cmp r1, #0 + str lr, [r0, #8] + movlt r4, r3 + ldr r3, [sp, #80] @ 4-byte Reload + str r4, [r0, #12] + movlt r2, r3 + ldr r3, [sp, #68] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #84] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #88] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #20] + movlt r5, r2 + ldr r2, [sp, #92] @ 4-byte Reload + str r5, [r0, #24] + movlt r6, r2 + ldr r2, [sp, #96] @ 4-byte Reload + str r6, [r0, #28] + movlt r9, r2 + ldr r2, [sp, #100] @ 4-byte Reload + cmp r1, #0 + str r9, [r0, #32] + movlt r10, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r10, [r0, #36] + movlt r11, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r11, [r0, #40] + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #112] @ 4-byte Reload + ldr r2, [sp, #72] @ 4-byte Reload + str r3, [r0, #44] + movlt r2, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str r2, [r0, #48] + ldr r2, [sp, #124] @ 4-byte Reload + movlt r2, r1 + ldr r1, [sp, #120] @ 4-byte Reload + str r2, [r0, #52] + movlt r8, r1 + str r8, [r0, #56] + add sp, sp, #4 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end227: + .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L + .cantunwind + .fnend + + .globl mcl_fp_montRed15L + .align 2 + .type mcl_fp_montRed15L,%function +mcl_fp_montRed15L: @ @mcl_fp_montRed15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #148 + sub sp, sp, #148 + .pad #1024 + sub sp, sp, #1024 + mov r3, r2 + str r0, [sp, #192] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r7, [r1] + ldr r0, [r3] + str r3, [sp, #200] @ 4-byte Spill + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #100] @ 4-byte Spill + str r0, [sp, #180] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #164] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #168] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #204] @ 4-byte Spill + mul r2, r7, r0 + ldr r0, [r3, #28] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #152] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #160] @ 4-byte Spill + ldr r0, [r1, #96] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [r1, #100] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r1, #104] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r1, #108] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r1, #112] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r1, #116] + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #16] @ 4-byte Spill + add r0, sp, #1104 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1164] + ldr r9, [sp, #1104] + ldr r1, [sp, #1112] + ldr r2, [sp, #1116] + ldr r3, [sp, #1120] + ldr r12, [sp, #1124] + ldr lr, [sp, #1128] + ldr r4, [sp, #1132] + ldr r5, [sp, #1136] + ldr r6, [sp, #1140] + ldr r8, [sp, #1144] + ldr r10, [sp, #1148] + ldr r11, [sp, #1152] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1160] + adds r7, r7, r9 + ldr r7, [sp, #108] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1108] + adcs r9, r7, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #200] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + mul r2, r9, r0 + add r0, sp, #1040 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1100] + ldr r4, [sp, #1040] + ldr r1, [sp, #1048] + ldr r2, [sp, #1052] + ldr r8, [sp, #1056] + ldr r3, [sp, #1060] + ldr r10, [sp, #1064] + ldr r11, [sp, #1068] + ldr r12, [sp, #1072] + ldr r7, [sp, #1076] + ldr r6, [sp, #1080] + ldr lr, [sp, #1084] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1096] + adds r4, r9, r4 + ldr r4, [sp, #108] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1092] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1088] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #1044] + adcs r9, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r4, [sp, #204] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r9, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + add r0, sp, #976 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #1036] + add lr, sp, #1000 + add r10, sp, #976 + ldr r5, [sp, #1020] + ldr r6, [sp, #1016] + ldr r7, [sp, #1012] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #8] @ 4-byte Spill + ldm lr, {r3, r12, lr} + ldr r9, [sp, #996] + ldr r2, [sp, #992] + ldm r10, {r0, r1, r8, r10} + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r1 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + ldr r1, [sp, #200] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, sp, #912 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #972] + ldr r4, [sp, #912] + add lr, sp, #916 + ldr r11, [sp, #960] + ldr r5, [sp, #956] + ldr r6, [sp, #952] + ldr r7, [sp, #948] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #968] + adds r4, r8, r4 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #964] + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r9, r10, r12, lr} + ldr r4, [sp, #108] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + str r4, [sp, #12] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #200] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #204] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + mul r2, r4, r5 + adcs r0, r0, r11 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #848 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #908] + add r10, sp, #872 + add lr, sp, #848 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #12] @ 4-byte Reload + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + mov r11, r1 + adcs r0, r0, r2 + ldr r2, [sp, #8] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r5 + mov r1, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #784 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #844] + ldr r4, [sp, #784] + add r10, sp, #788 + ldr lr, [sp, #832] + ldr r5, [sp, #828] + ldr r6, [sp, #824] + ldr r7, [sp, #820] + ldr r12, [sp, #816] + ldr r3, [sp, #812] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #840] + adds r4, r11, r4 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #836] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r0, r1, r2, r8, r9, r10} + ldr r4, [sp, #108] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r4, [sp, #204] @ 4-byte Reload + str r11, [sp, #20] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #200] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #720 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #780] + add r10, sp, #744 + add lr, sp, #720 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #772] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #768] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #764] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #20] @ 4-byte Reload + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + mov r11, r1 + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r1, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #656 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #716] + ldr r4, [sp, #656] + add r10, sp, #660 + ldr lr, [sp, #704] + ldr r5, [sp, #700] + ldr r6, [sp, #696] + ldr r7, [sp, #692] + ldr r12, [sp, #688] + ldr r3, [sp, #684] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #712] + adds r4, r11, r4 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r0, r1, r2, r8, r9, r10} + ldr r4, [sp, #108] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r4, [sp, #200] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + mul r2, r11, r0 + add r0, sp, #592 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #652] + add r10, sp, #616 + add lr, sp, #592 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #648] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #644] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #640] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #204] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r11, r5 + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #528 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #588] + ldr r4, [sp, #528] + add r10, sp, #532 + ldr lr, [sp, #572] + ldr r6, [sp, #568] + ldr r7, [sp, #564] + ldr r12, [sp, #560] + ldr r3, [sp, #556] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #584] + adds r4, r11, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #576] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r0, r1, r2, r8, r9, r10} + ldr r4, [sp, #108] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #200] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #464 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #524] + add r10, sp, #488 + add lr, sp, #464 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #520] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #516] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r5, r6, r7, r8, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #204] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r11, r5 + adcs r0, r0, r6 + mov r6, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #400 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #460] + ldr r4, [sp, #400] + add r10, sp, #404 + ldr lr, [sp, #440] + ldr r7, [sp, #436] + ldr r12, [sp, #432] + ldr r3, [sp, #428] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #456] + adds r4, r11, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #452] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #448] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #444] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r0, r1, r2, r8, r9, r10} + ldr r4, [sp, #108] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #196] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #336 + bl .LmulPv480x32(PLT) + ldr r0, [sp, #396] + add r10, sp, #360 + add lr, sp, #336 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #392] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #388] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #384] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r6, r7, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #204] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + mul r2, r11, r6 + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + add r0, sp, #272 + bl .LmulPv480x32(PLT) + add r5, sp, #272 + add lr, sp, #288 + ldm r5, {r0, r1, r3, r5} + ldr r9, [sp, #332] + ldr r8, [sp, #328] + ldr r7, [sp, #312] + adds r0, r11, r0 + ldr r11, [sp, #324] + ldr r0, [sp, #24] @ 4-byte Reload + adcs r10, r0, r1 + mul r0, r10, r6 + ldr r6, [sp, #316] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #320] + str r0, [sp, #76] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r4, [sp, #196] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #200] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r4, r0, r2 + ldr r0, [sp, #120] @ 4-byte Reload + ldr r2, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r8, r0, r9 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + add r0, sp, #208 + bl .LmulPv480x32(PLT) + add r3, sp, #208 + ldm r3, {r0, r1, r2, r3} + adds r0, r10, r0 + ldr r0, [sp, #204] @ 4-byte Reload + adcs lr, r0, r1 + ldr r0, [sp, #76] @ 4-byte Reload + ldr r1, [sp, #52] @ 4-byte Reload + str lr, [sp, #80] @ 4-byte Spill + adcs r2, r0, r2 + ldr r0, [sp, #48] @ 4-byte Reload + str r2, [sp, #84] @ 4-byte Spill + adcs r3, r0, r3 + ldr r0, [sp, #224] + str r3, [sp, #88] @ 4-byte Spill + adcs r7, r1, r0 + ldr r0, [sp, #228] + ldr r1, [sp, #56] @ 4-byte Reload + str r7, [sp, #92] @ 4-byte Spill + adcs r4, r4, r0 + ldr r0, [sp, #232] + str r4, [sp, #96] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #236] + ldr r1, [sp, #60] @ 4-byte Reload + str r5, [sp, #100] @ 4-byte Spill + adcs r6, r1, r0 + ldr r0, [sp, #240] + ldr r1, [sp, #64] @ 4-byte Reload + str r6, [sp, #104] @ 4-byte Spill + adcs r11, r1, r0 + ldr r0, [sp, #244] + ldr r1, [sp, #68] @ 4-byte Reload + str r11, [sp, #108] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #196] @ 4-byte Reload + str r0, [sp, #200] @ 4-byte Spill + ldr r0, [sp, #248] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #252] + adcs r0, r1, r0 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #256] + adcs r10, r1, r0 + ldr r0, [sp, #260] + ldr r1, [sp, #128] @ 4-byte Reload + str r10, [sp, #124] @ 4-byte Spill + adcs r9, r1, r0 + ldr r0, [sp, #264] + ldr r1, [sp, #120] @ 4-byte Reload + str r9, [sp, #128] @ 4-byte Spill + adcs r8, r8, r0 + ldr r0, [sp, #268] + adcs r12, r1, r0 + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #184] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #188] @ 4-byte Reload + subs r0, lr, r0 + sbcs r1, r2, r1 + ldr r2, [sp, #180] @ 4-byte Reload + sbcs r2, r3, r2 + ldr r3, [sp, #164] @ 4-byte Reload + sbcs r3, r7, r3 + ldr r7, [sp, #168] @ 4-byte Reload + sbcs lr, r4, r7 + ldr r4, [sp, #172] @ 4-byte Reload + ldr r7, [sp, #136] @ 4-byte Reload + sbcs r4, r5, r4 + ldr r5, [sp, #176] @ 4-byte Reload + sbcs r5, r6, r5 + ldr r6, [sp, #132] @ 4-byte Reload + sbcs r6, r11, r6 + ldr r11, [sp, #200] @ 4-byte Reload + str r6, [sp, #172] @ 4-byte Spill + sbcs r6, r11, r7 + ldr r7, [sp, #140] @ 4-byte Reload + ldr r11, [sp, #204] @ 4-byte Reload + str r6, [sp, #176] @ 4-byte Spill + ldr r6, [sp, #196] @ 4-byte Reload + sbcs r6, r6, r7 + ldr r7, [sp, #144] @ 4-byte Reload + str r6, [sp, #180] @ 4-byte Spill + sbcs r6, r11, r7 + ldr r7, [sp, #148] @ 4-byte Reload + str r6, [sp, #184] @ 4-byte Spill + sbcs r6, r10, r7 + ldr r7, [sp, #152] @ 4-byte Reload + mov r10, r8 + str r6, [sp, #188] @ 4-byte Spill + sbcs r6, r9, r7 + ldr r7, [sp, #156] @ 4-byte Reload + sbcs r11, r8, r7 + ldr r7, [sp, #160] @ 4-byte Reload + mov r8, r12 + sbcs r9, r12, r7 + ldr r7, [sp, #120] @ 4-byte Reload + sbc r7, r7, #0 + ands r12, r7, #1 + ldr r7, [sp, #80] @ 4-byte Reload + movne r0, r7 + ldr r7, [sp, #192] @ 4-byte Reload + str r0, [r7] + ldr r0, [sp, #84] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #88] @ 4-byte Reload + str r1, [r7, #4] + ldr r1, [sp, #108] @ 4-byte Reload + movne r2, r0 + ldr r0, [sp, #92] @ 4-byte Reload + cmp r12, #0 + str r2, [r7, #8] + movne r3, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r3, [r7, #12] + movne lr, r0 + ldr r0, [sp, #100] @ 4-byte Reload + str lr, [r7, #16] + movne r4, r0 + ldr r0, [sp, #104] @ 4-byte Reload + cmp r12, #0 + str r4, [r7, #20] + movne r5, r0 + ldr r0, [sp, #172] @ 4-byte Reload + movne r0, r1 + str r5, [r7, #24] + ldr r1, [sp, #176] @ 4-byte Reload + str r0, [r7, #28] + ldr r0, [sp, #200] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #196] @ 4-byte Reload + cmp r12, #0 + str r1, [r7, #32] + ldr r1, [sp, #180] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #204] @ 4-byte Reload + str r1, [r7, #36] + ldr r1, [sp, #184] @ 4-byte Reload + movne r1, r0 + ldr r0, [sp, #188] @ 4-byte Reload + str r1, [r7, #40] + ldr r1, [sp, #124] @ 4-byte Reload + movne r0, r1 + cmp r12, #0 + str r0, [r7, #44] + ldr r0, [sp, #128] @ 4-byte Reload + movne r11, r10 + movne r9, r8 + movne r6, r0 + str r6, [r7, #48] + str r11, [r7, #52] + str r9, [r7, #56] + add sp, sp, #148 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end228: + .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L + .cantunwind + .fnend + + .globl mcl_fp_addPre15L + .align 2 + .type mcl_fp_addPre15L,%function +mcl_fp_addPre15L: @ @mcl_fp_addPre15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #52 + sub sp, sp, #52 + ldm r1, {r3, r7, r11} + ldr r10, [r2] + ldr r5, [r2, #16] + ldr r6, [r2, #4] + ldr r4, [r2, #8] + ldr r12, [r2, #12] + ldr r8, [r1, #12] + ldr r9, [r1, #56] + adds lr, r10, r3 + ldr r3, [r2, #32] + str r5, [sp, #8] @ 4-byte Spill + ldr r5, [r2, #20] + ldr r10, [r1, #44] + adcs r6, r6, r7 + adcs r4, r4, r11 + ldr r11, [r1, #40] + adcs r7, r12, r8 + add r12, r1, #16 + ldr r8, [r1, #52] + str r3, [sp, #20] @ 4-byte Spill + ldr r3, [r2, #36] + str r5, [sp, #12] @ 4-byte Spill + ldr r5, [r2, #24] + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #40] + str r5, [sp, #16] @ 4-byte Spill + ldr r5, [r2, #28] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #44] + str r5, [sp, #24] @ 4-byte Spill + ldr r5, [r1, #32] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #52] + ldr r2, [r2, #56] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #36] + str r3, [sp, #44] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #4] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + str lr, [r0] + str r6, [r0, #4] + ldr r6, [sp, #8] @ 4-byte Reload + str r4, [r0, #8] + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + ldr r4, [sp, #48] @ 4-byte Reload + adcs r1, r6, r1 + ldr r6, [sp, #40] @ 4-byte Reload + adcs r2, r7, r2 + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + ldr r7, [sp, #36] @ 4-byte Reload + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r1, r3 + ldr r3, [sp] @ 4-byte Reload + adcs r2, r2, r12 + str r1, [r0, #24] + ldr r1, [sp, #20] @ 4-byte Reload + add r12, r0, #32 + str r2, [r0, #28] + ldr r2, [sp, #28] @ 4-byte Reload + adcs r1, r1, r5 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #32] @ 4-byte Reload + adcs r3, r3, r11 + adcs r7, r7, r10 + adcs r6, r6, r5 + ldr r5, [sp, #44] @ 4-byte Reload + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + adcs r5, r5, r8 + adcs r4, r4, r9 + str r5, [r0, #52] + str r4, [r0, #56] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #52 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end229: + .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L + .cantunwind + .fnend + + .globl mcl_fp_subPre15L + .align 2 + .type mcl_fp_subPre15L,%function +mcl_fp_subPre15L: @ @mcl_fp_subPre15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #56 + sub sp, sp, #56 + ldm r2, {r3, r11} + ldr r7, [r1] + ldr r5, [r2, #8] + ldr r6, [r2, #12] + ldmib r1, {r4, r12, lr} + ldr r8, [r1, #32] + ldr r10, [r1, #52] + subs r3, r7, r3 + ldr r7, [r2, #24] + str r3, [sp, #24] @ 4-byte Spill + ldr r3, [r2, #32] + sbcs r4, r4, r11 + sbcs r5, r12, r5 + add r12, r1, #16 + sbcs r11, lr, r6 + ldr r6, [r2, #20] + ldr lr, [r2, #16] + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r2, #52] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r2, #56] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r2, #28] + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r3, [sp, #20] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + ldr r9, [sp, #24] @ 4-byte Reload + sbcs r1, r1, lr + str r9, [r0] + stmib r0, {r4, r5} + str r11, [r0, #12] + sbcs r2, r2, r6 + str r1, [r0, #16] + ldr r6, [sp, #44] @ 4-byte Reload + ldr r5, [sp, #48] @ 4-byte Reload + ldr r4, [sp, #52] @ 4-byte Reload + sbcs r1, r3, r7 + str r2, [r0, #20] + ldr r2, [sp, #20] @ 4-byte Reload + ldr r3, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #40] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #28] @ 4-byte Reload + sbcs r2, r12, r2 + sbcs r12, r8, r1 + str r2, [r0, #28] + ldr r2, [sp, #32] @ 4-byte Reload + ldr r1, [sp] @ 4-byte Reload + str r12, [r0, #32] + sbcs r2, r1, r2 + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r3, r1, r3 + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r7, r1, r7 + ldr r1, [sp, #12] @ 4-byte Reload + sbcs r6, r1, r6 + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r5, r10, r5 + sbcs r4, r1, r4 + add r1, r0, #36 + stm r1, {r2, r3, r7} + str r6, [r0, #48] + str r5, [r0, #52] + str r4, [r0, #56] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #56 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end230: + .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L + .cantunwind + .fnend + + .globl mcl_fp_shr1_15L + .align 2 + .type mcl_fp_shr1_15L,%function +mcl_fp_shr1_15L: @ @mcl_fp_shr1_15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #24 + sub sp, sp, #24 + ldmib r1, {r2, r3, r4, r5, r6, r10} + ldr r7, [r1] + ldr r11, [r1, #52] + ldr r8, [r1, #28] + ldr lr, [r1, #32] + ldr r12, [r1, #36] + ldr r9, [r1, #44] + str r7, [sp, #4] @ 4-byte Spill + lsr r7, r2, #1 + str r11, [sp, #16] @ 4-byte Spill + orr r7, r7, r3, lsl #31 + str r7, [sp] @ 4-byte Spill + ldr r7, [r1, #40] + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #48] + ldr r1, [r1, #56] + str r1, [sp, #20] @ 4-byte Spill + lsr r1, r4, #1 + lsrs r4, r4, #1 + str r7, [sp, #12] @ 4-byte Spill + rrx r3, r3 + lsrs r2, r2, #1 + orr r1, r1, r5, lsl #31 + ldr r2, [sp, #4] @ 4-byte Reload + rrx r2, r2 + str r2, [r0] + ldr r2, [sp] @ 4-byte Reload + stmib r0, {r2, r3} + str r1, [r0, #12] + lsrs r1, r6, #1 + lsr r2, r12, #1 + rrx r1, r5 + ldr r7, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #16] @ 4-byte Reload + ldr r4, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + lsr r1, r6, #1 + orr r1, r1, r10, lsl #31 + str r1, [r0, #20] + lsrs r1, r8, #1 + rrx r1, r10 + orr r2, r2, r7, lsl #31 + str r1, [r0, #24] + lsr r1, r8, #1 + orr r1, r1, lr, lsl #31 + str r1, [r0, #28] + lsrs r1, r12, #1 + add r12, r0, #32 + rrx r1, lr + lsrs r3, r9, #1 + rrx r3, r7 + lsrs r6, r5, #1 + lsr r7, r9, #1 + lsr r5, r5, #1 + orr r7, r7, r4, lsl #31 + rrx r6, r4 + ldr r4, [sp, #20] @ 4-byte Reload + stm r12, {r1, r2, r3, r7} + str r6, [r0, #48] + orr r5, r5, r4, lsl #31 + lsr r4, r4, #1 + str r5, [r0, #52] + str r4, [r0, #56] + add sp, sp, #24 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end231: + .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L + .cantunwind + .fnend + + .globl mcl_fp_add15L + .align 2 + .type mcl_fp_add15L,%function +mcl_fp_add15L: @ @mcl_fp_add15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r10, r4, r9 + ldr r4, [r1, #24] + adcs r11, r5, r8 + ldr r5, [r1, #20] + mov r8, r10 + adcs r6, r6, lr + mov lr, r11 + str r8, [r0] + adcs r9, r7, r12 + str r6, [sp, #40] @ 4-byte Spill + ldr r6, [r1, #16] + ldr r7, [r2, #16] + str lr, [r0, #4] + str r9, [sp, #8] @ 4-byte Spill + adcs r7, r7, r6 + ldr r6, [r2, #48] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r5 + ldr r5, [r2, #28] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r10, [sp, #32] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r5, r7 + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r1, #32] + ldr r11, [sp, #12] @ 4-byte Reload + adcs r7, r4, r7 + ldr r4, [r2, #36] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r1, #36] + adcs r7, r4, r7 + ldr r4, [r2, #40] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #40] + adcs r7, r4, r7 + ldr r4, [r2, #44] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r1, #44] + adcs r5, r4, r7 + ldr r7, [r1, #48] + ldr r4, [sp, #40] @ 4-byte Reload + str r5, [sp, #28] @ 4-byte Spill + adcs r12, r6, r7 + ldr r7, [r1, #52] + ldr r6, [r2, #52] + ldr r1, [r1, #56] + ldr r2, [r2, #56] + str r4, [r0, #8] + str r9, [r0, #12] + ldr r9, [sp, #36] @ 4-byte Reload + adcs r6, r6, r7 + str r9, [r0, #16] + str r10, [r0, #20] + add r7, r0, #40 + adcs r2, r2, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r6, [sp, #24] @ 4-byte Spill + str r2, [sp, #20] @ 4-byte Spill + str r1, [r0, #24] + ldr r1, [sp, #52] @ 4-byte Reload + str r11, [r0, #28] + str r1, [r0, #32] + ldr r1, [sp, #44] @ 4-byte Reload + str r1, [r0, #36] + ldr r1, [sp, #48] @ 4-byte Reload + stm r7, {r1, r5, r12} + str r6, [r0, #52] + str r2, [r0, #56] + mov r2, #0 + adc r1, r2, #0 + str r1, [sp, #16] @ 4-byte Spill + ldm r3, {r6, r7} + ldr r1, [r3, #8] + ldr r2, [r3, #12] + subs r5, r8, r6 + sbcs r7, lr, r7 + str r5, [sp, #4] @ 4-byte Spill + sbcs r1, r4, r1 + str r7, [sp] @ 4-byte Spill + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r9, r9, r1 + ldr r1, [r3, #20] + sbcs r1, r10, r1 + add r10, r3, #32 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [r3, #24] + sbcs r1, r2, r1 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [r3, #28] + sbcs r11, r11, r1 + ldm r10, {r1, r2, r6, r10} + ldr r5, [sp, #52] @ 4-byte Reload + ldr r8, [r3, #48] + ldr r7, [r3, #52] + ldr r3, [r3, #56] + sbcs r1, r5, r1 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r4, r1, r2 + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r2, r1, r6 + ldr r1, [sp, #28] @ 4-byte Reload + sbcs lr, r1, r10 + ldr r1, [sp, #24] @ 4-byte Reload + sbcs r6, r12, r8 + sbcs r5, r1, r7 + ldr r1, [sp, #20] @ 4-byte Reload + sbcs r1, r1, r3 + ldr r3, [sp, #16] @ 4-byte Reload + sbc r3, r3, #0 + tst r3, #1 + bne .LBB232_2 +@ BB#1: @ %nocarry + ldr r3, [sp, #4] @ 4-byte Reload + str r3, [r0] + ldr r3, [sp] @ 4-byte Reload + str r3, [r0, #4] + ldr r3, [sp, #40] @ 4-byte Reload + str r3, [r0, #8] + ldr r3, [sp, #8] @ 4-byte Reload + str r3, [r0, #12] + ldr r3, [sp, #36] @ 4-byte Reload + str r9, [r0, #16] + str r3, [r0, #20] + ldr r3, [sp, #56] @ 4-byte Reload + str r3, [r0, #24] + ldr r3, [sp, #52] @ 4-byte Reload + str r11, [r0, #28] + str r3, [r0, #32] + str r4, [r0, #36] + str r2, [r0, #40] + str lr, [r0, #44] + str r6, [r0, #48] + str r5, [r0, #52] + str r1, [r0, #56] +.LBB232_2: @ %carry + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end232: + .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L + .cantunwind + .fnend + + .globl mcl_fp_addNF15L + .align 2 + .type mcl_fp_addNF15L,%function +mcl_fp_addNF15L: @ @mcl_fp_addNF15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #92 + sub sp, sp, #92 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + add r11, r3, #32 + adds r10, r4, r9 + ldr r4, [r1, #24] + adcs r9, r5, r8 + ldr r5, [r1, #20] + str r10, [sp, #20] @ 4-byte Spill + adcs lr, r6, lr + ldr r6, [r1, #16] + str r9, [sp, #24] @ 4-byte Spill + adcs r8, r7, r12 + ldr r7, [r2, #16] + str lr, [sp, #28] @ 4-byte Spill + str r8, [sp, #32] @ 4-byte Spill + adcs r7, r7, r6 + ldr r6, [r2, #28] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r5 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r7, r7, r4 + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r6, r7 + ldr r6, [r2, #32] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r1, #32] + adcs r7, r6, r7 + ldr r6, [r2, #36] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #36] + adcs r7, r6, r7 + ldr r6, [r2, #40] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r1, #40] + adcs r7, r6, r7 + ldr r6, [r2, #44] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r1, #44] + adcs r7, r6, r7 + ldr r6, [r2, #48] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r1, #48] + adcs r7, r6, r7 + ldr r6, [r2, #52] + ldr r2, [r2, #56] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r1, #52] + ldr r1, [r1, #56] + adcs r7, r6, r7 + adc r1, r2, r1 + str r7, [sp, #84] @ 4-byte Spill + str r1, [sp, #80] @ 4-byte Spill + ldmib r3, {r1, r5, r7} + ldr r2, [r3, #16] + ldr r4, [r3] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r3, #20] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r3, #24] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r3, #28] + str r2, [sp, #44] @ 4-byte Spill + subs r2, r10, r4 + sbcs r12, r9, r1 + ldm r11, {r9, r10, r11} + ldr r1, [r3, #44] + ldr r4, [sp, #36] @ 4-byte Reload + sbcs lr, lr, r5 + ldr r5, [sp, #64] @ 4-byte Reload + sbcs r6, r8, r7 + ldr r7, [sp, #60] @ 4-byte Reload + str r1, [sp] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3, #56] + ldr r3, [sp, #16] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r3, r1, r3 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r4, r1, r4 + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r5, r5, r1 + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r8, r7, r1 + ldr r1, [sp, #56] @ 4-byte Reload + ldr r7, [sp] @ 4-byte Reload + sbcs r9, r1, r9 + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r10, r1, r10 + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r1, r1, r11 + ldr r11, [sp, #20] @ 4-byte Reload + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #8] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + sbc r7, r1, r7 + asr r1, r7, #31 + cmp r1, #0 + movlt r2, r11 + str r2, [r0] + ldr r2, [sp, #24] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r12, [r0, #4] + movlt lr, r2 + ldr r2, [sp, #32] @ 4-byte Reload + cmp r1, #0 + str lr, [r0, #8] + movlt r6, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r6, [r0, #12] + movlt r3, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r3, [r0, #16] + ldr r3, [sp, #16] @ 4-byte Reload + movlt r4, r2 + ldr r2, [sp, #64] @ 4-byte Reload + cmp r1, #0 + str r4, [r0, #20] + movlt r5, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r5, [r0, #24] + movlt r8, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r8, [r0, #28] + movlt r9, r2 + ldr r2, [sp, #76] @ 4-byte Reload + cmp r1, #0 + str r9, [r0, #32] + movlt r10, r2 + ldr r2, [sp, #72] @ 4-byte Reload + str r10, [r0, #36] + movlt r3, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #36] @ 4-byte Reload + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #88] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r3, [r0, #44] + movlt r2, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r2, [r0, #48] + ldr r2, [sp, #44] @ 4-byte Reload + movlt r2, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r2, [r0, #52] + movlt r7, r1 + str r7, [r0, #56] + add sp, sp, #92 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end233: + .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L + .cantunwind + .fnend + + .globl mcl_fp_sub15L + .align 2 + .type mcl_fp_sub15L,%function +mcl_fp_sub15L: @ @mcl_fp_sub15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldr r9, [r2] + ldmib r2, {r8, lr} + ldr r5, [r1] + ldr r12, [r2, #12] + ldmib r1, {r4, r6, r7} + subs r5, r5, r9 + sbcs r4, r4, r8 + str r5, [sp, #48] @ 4-byte Spill + ldr r5, [r2, #24] + sbcs r6, r6, lr + str r4, [sp, #60] @ 4-byte Spill + ldr r4, [r2, #20] + sbcs r7, r7, r12 + str r6, [sp, #56] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r1, #16] + sbcs r9, r7, r6 + ldr r7, [r1, #20] + ldr r6, [r1, #28] + str r9, [sp, #40] @ 4-byte Spill + sbcs r7, r7, r4 + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #24] + sbcs r5, r7, r5 + ldr r7, [r2, #28] + sbcs r10, r6, r7 + ldr r7, [r2, #32] + ldr r6, [r1, #32] + str r10, [sp, #36] @ 4-byte Spill + sbcs r11, r6, r7 + ldr r7, [r2, #36] + ldr r6, [r1, #36] + str r11, [sp, #32] @ 4-byte Spill + sbcs lr, r6, r7 + ldr r7, [r2, #40] + ldr r6, [r1, #40] + str lr, [sp, #28] @ 4-byte Spill + sbcs r12, r6, r7 + ldr r7, [r2, #44] + ldr r6, [r1, #44] + str r12, [sp, #24] @ 4-byte Spill + sbcs r4, r6, r7 + ldr r6, [r2, #48] + ldr r7, [r1, #48] + sbcs r8, r7, r6 + ldr r6, [r2, #52] + ldr r7, [r1, #52] + ldr r2, [r2, #56] + ldr r1, [r1, #56] + sbcs r6, r7, r6 + ldr r7, [sp, #48] @ 4-byte Reload + sbcs r2, r1, r2 + ldr r1, [sp, #60] @ 4-byte Reload + str r2, [sp, #20] @ 4-byte Spill + str r7, [r0] + str r1, [r0, #4] + ldr r1, [sp, #56] @ 4-byte Reload + str r1, [r0, #8] + ldr r1, [sp, #52] @ 4-byte Reload + str r1, [r0, #12] + str r9, [r0, #16] + mov r9, r6 + mov r6, r5 + ldr r5, [sp, #44] @ 4-byte Reload + mov r1, r4 + str r5, [r0, #20] + str r6, [r0, #24] + str r10, [r0, #28] + str r11, [r0, #32] + str lr, [r0, #36] + str r12, [r0, #40] + add r12, r0, #44 + stm r12, {r1, r8, r9} + str r2, [r0, #56] + mov r2, #0 + sbc r2, r2, #0 + tst r2, #1 + beq .LBB234_2 +@ BB#1: @ %carry + ldr r2, [r3, #56] + str r2, [sp, #16] @ 4-byte Spill + ldmib r3, {r2, lr} + ldr r4, [r3, #16] + ldr r12, [r3, #12] + str r4, [sp] @ 4-byte Spill + ldr r4, [r3, #20] + str r4, [sp, #4] @ 4-byte Spill + ldr r4, [r3, #24] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r3, #28] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r3] + adds r4, r4, r7 + ldr r7, [r3, #52] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [sp, #60] @ 4-byte Reload + adcs r11, r2, r7 + ldr r2, [r3, #48] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [sp, #56] @ 4-byte Reload + adcs r7, lr, r2 + ldr r2, [r3, #44] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [sp, #52] @ 4-byte Reload + adcs r2, r12, r2 + add r12, r3, #32 + ldm r12, {r3, r10, r12} + stm r0, {r4, r11} + str r7, [r0, #8] + str r2, [r0, #12] + ldr r7, [sp, #40] @ 4-byte Reload + ldr r4, [sp] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + adcs r4, r4, r7 + ldr r7, [sp, #8] @ 4-byte Reload + adcs r2, r2, r5 + str r4, [r0, #16] + str r2, [r0, #20] + ldr r2, [sp, #36] @ 4-byte Reload + adcs r4, r7, r6 + ldr r7, [sp, #12] @ 4-byte Reload + str r4, [r0, #24] + adcs r2, r7, r2 + ldr r7, [sp, #24] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #32] @ 4-byte Reload + adcs lr, r3, r2 + ldr r3, [sp, #28] @ 4-byte Reload + ldr r2, [sp, #56] @ 4-byte Reload + str lr, [r0, #32] + adcs r3, r10, r3 + adcs r7, r12, r7 + str r3, [r0, #36] + adcs r6, r2, r1 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r7, [r0, #40] + str r6, [r0, #44] + adcs r5, r1, r8 + ldr r1, [sp, #48] @ 4-byte Reload + str r5, [r0, #48] + adcs r4, r1, r9 + ldr r1, [sp, #20] @ 4-byte Reload + str r4, [r0, #52] + adc r1, r2, r1 + str r1, [r0, #56] +.LBB234_2: @ %nocarry + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end234: + .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L + .cantunwind + .fnend + + .globl mcl_fp_subNF15L + .align 2 + .type mcl_fp_subNF15L,%function +mcl_fp_subNF15L: @ @mcl_fp_subNF15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #84 + sub sp, sp, #84 + mov r12, r0 + ldr r0, [r2, #32] + add r9, r2, #8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r2, #44] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r2, #48] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r2, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r2, #56] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #16] @ 4-byte Spill + ldm r2, {r10, r11} + ldm r9, {r5, r6, r7, r9} + ldr r0, [r2, #28] + ldr r8, [r2, #24] + ldr r2, [r1] + str r0, [sp, #64] @ 4-byte Spill + ldmib r1, {r0, lr} + ldr r4, [r1, #12] + subs r2, r2, r10 + add r10, r3, #12 + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #36] + sbcs r11, r0, r11 + ldr r0, [r1, #32] + sbcs lr, lr, r5 + ldr r5, [r1, #28] + str r11, [sp] @ 4-byte Spill + sbcs r6, r4, r6 + str r6, [sp, #48] @ 4-byte Spill + ldr r6, [r1, #16] + sbcs r7, r6, r7 + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #24] + ldr r1, [r1, #20] + sbcs r1, r1, r9 + str r1, [sp, #52] @ 4-byte Spill + sbcs r1, r7, r8 + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r1, r5, r1 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbcs r0, r2, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + sbc r0, r1, r0 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #32] @ 4-byte Spill + ldm r3, {r2, r5, r7} + ldm r10, {r6, r9, r10} + ldr r8, [sp, #8] @ 4-byte Reload + ldr r4, [sp, #48] @ 4-byte Reload + ldr r0, [r3, #28] + ldr r1, [r3, #24] + adds r2, r8, r2 + adcs r3, r11, r5 + mov r11, lr + ldr r5, [sp, #56] @ 4-byte Reload + adcs lr, r11, r7 + ldr r7, [sp, #60] @ 4-byte Reload + adcs r4, r4, r6 + ldr r6, [sp, #52] @ 4-byte Reload + adcs r5, r5, r9 + adcs r6, r6, r10 + adcs r7, r7, r1 + ldr r1, [sp, #64] @ 4-byte Reload + adcs r9, r1, r0 + ldr r1, [sp, #68] @ 4-byte Reload + ldr r0, [sp, #4] @ 4-byte Reload + adcs r10, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r1, r0, r1 + str r1, [sp, #32] @ 4-byte Spill + asr r1, r0, #31 + ldr r0, [sp] @ 4-byte Reload + cmp r1, #0 + movge r2, r8 + movge lr, r11 + str r2, [r12] + ldr r2, [sp, #12] @ 4-byte Reload + movge r3, r0 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r1, #0 + str r3, [r12, #4] + str lr, [r12, #8] + movge r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r4, [r12, #12] + movge r5, r0 + ldr r0, [sp, #52] @ 4-byte Reload + str r5, [r12, #16] + movge r6, r0 + ldr r0, [sp, #60] @ 4-byte Reload + cmp r1, #0 + str r6, [r12, #20] + movge r7, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r7, [r12, #24] + movge r9, r0 + ldr r0, [sp, #68] @ 4-byte Reload + str r9, [r12, #28] + movge r10, r0 + ldr r0, [sp, #44] @ 4-byte Reload + cmp r1, #0 + str r10, [r12, #32] + movge r2, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r2, [r12, #36] + ldr r2, [sp, #16] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #76] @ 4-byte Reload + str r2, [r12, #40] + ldr r2, [sp, #20] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #80] @ 4-byte Reload + cmp r1, #0 + ldr r1, [sp, #24] @ 4-byte Reload + str r2, [r12, #44] + movge r1, r0 + ldr r0, [sp, #40] @ 4-byte Reload + str r1, [r12, #48] + ldr r1, [sp, #28] @ 4-byte Reload + movge r1, r0 + ldr r0, [sp, #32] @ 4-byte Reload + str r1, [r12, #52] + ldr r1, [sp, #36] @ 4-byte Reload + movge r0, r1 + str r0, [r12, #56] + add sp, sp, #84 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end235: + .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L + .cantunwind + .fnend + + .globl mcl_fpDbl_add15L + .align 2 + .type mcl_fpDbl_add15L,%function +mcl_fpDbl_add15L: @ @mcl_fpDbl_add15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #184 + sub sp, sp, #184 + ldm r1, {r7, r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r10} + adds r4, r4, r7 + str r4, [sp, #100] @ 4-byte Spill + ldr r4, [r2, #96] + str r4, [sp, #148] @ 4-byte Spill + ldr r4, [r2, #100] + str r4, [sp, #164] @ 4-byte Spill + ldr r4, [r2, #104] + str r4, [sp, #168] @ 4-byte Spill + ldr r4, [r2, #108] + str r4, [sp, #172] @ 4-byte Spill + ldr r4, [r2, #112] + str r4, [sp, #176] @ 4-byte Spill + ldr r4, [r2, #116] + str r4, [sp, #180] @ 4-byte Spill + adcs r4, r5, r8 + adcs r7, r6, lr + str r4, [sp, #68] @ 4-byte Spill + add lr, r1, #16 + str r7, [sp, #64] @ 4-byte Spill + adcs r7, r10, r12 + add r10, r1, #32 + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #96] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #64] @ 4-byte Reload + add r11, r3, #32 + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #76] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + adcs r1, r1, r12 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + adcs r1, r1, r4 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #72] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [r0, #36] + adcs r1, r1, r6 + ldr r2, [sp, #80] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r2, r2, r8 + str r2, [r0, #44] + adcs r1, r1, r9 + ldr r2, [sp, #88] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #92] @ 4-byte Reload + adcs r2, r2, r10 + adcs r1, r1, r7 + str r2, [r0, #52] + ldr r2, [sp, #96] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #132] @ 4-byte Reload + adcs r12, r2, r7 + ldr r2, [sp, #28] @ 4-byte Reload + str r12, [sp, #84] @ 4-byte Spill + adcs r9, r1, r2 + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r9, [sp, #88] @ 4-byte Spill + adcs r6, r1, r2 + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [sp, #36] @ 4-byte Reload + str r6, [sp, #96] @ 4-byte Spill + adcs r7, r1, r2 + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r7, [sp, #132] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #152] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r4, [sp, #92] @ 4-byte Spill + adcs r5, r1, r2 + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [sp, #24] @ 4-byte Reload + str r5, [sp, #100] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [sp, #156] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [sp, #160] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r1, [sp, #160] @ 4-byte Spill + ldr r1, [sp, #148] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [sp, #164] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r1, [sp, #164] @ 4-byte Spill + ldr r1, [sp, #168] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r1, [sp, #168] @ 4-byte Spill + ldr r1, [sp, #172] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r1, [sp, #172] @ 4-byte Spill + ldr r1, [sp, #176] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #124] @ 4-byte Reload + str r1, [sp, #176] @ 4-byte Spill + ldr r1, [sp, #180] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #180] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #128] @ 4-byte Spill + ldmib r3, {r2, lr} + ldr r1, [r3, #16] + ldr r8, [r3, #12] + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [r3, #20] + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [r3, #24] + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [r3, #28] + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [r3] + subs r1, r12, r1 + sbcs r12, r9, r2 + ldm r11, {r9, r10, r11} + ldr r2, [r3, #44] + sbcs lr, r6, lr + sbcs r6, r7, r8 + ldr r7, [sp, #144] @ 4-byte Reload + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r3, #48] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r3, #52] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r3, #56] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [sp, #124] @ 4-byte Reload + sbcs r3, r4, r2 + ldr r2, [sp, #136] @ 4-byte Reload + sbcs r4, r5, r2 + ldr r2, [sp, #152] @ 4-byte Reload + ldr r5, [sp, #140] @ 4-byte Reload + sbcs r5, r2, r5 + ldr r2, [sp, #156] @ 4-byte Reload + sbcs r8, r2, r7 + ldr r2, [sp, #160] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + sbcs r9, r2, r9 + ldr r2, [sp, #148] @ 4-byte Reload + sbcs r10, r2, r10 + ldr r2, [sp, #164] @ 4-byte Reload + sbcs r2, r2, r11 + ldr r11, [sp, #84] @ 4-byte Reload + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp, #108] @ 4-byte Reload + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [sp, #172] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r2, [sp, #136] @ 4-byte Spill + ldr r2, [sp, #176] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp, #120] @ 4-byte Reload + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [sp, #180] @ 4-byte Reload + sbcs r2, r2, r7 + str r2, [sp, #144] @ 4-byte Spill + ldr r2, [sp, #128] @ 4-byte Reload + sbc r2, r2, #0 + ands r2, r2, #1 + movne r1, r11 + str r1, [r0, #60] + ldr r1, [sp, #88] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #96] @ 4-byte Reload + str r12, [r0, #64] + movne lr, r1 + ldr r1, [sp, #132] @ 4-byte Reload + cmp r2, #0 + str lr, [r0, #68] + movne r6, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r6, [r0, #72] + movne r3, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r3, [r0, #76] + ldr r3, [sp, #116] @ 4-byte Reload + movne r4, r1 + ldr r1, [sp, #152] @ 4-byte Reload + cmp r2, #0 + str r4, [r0, #80] + movne r5, r1 + ldr r1, [sp, #156] @ 4-byte Reload + str r5, [r0, #84] + movne r8, r1 + ldr r1, [sp, #160] @ 4-byte Reload + str r8, [r0, #88] + movne r9, r1 + ldr r1, [sp, #148] @ 4-byte Reload + cmp r2, #0 + str r9, [r0, #92] + movne r10, r1 + ldr r1, [sp, #164] @ 4-byte Reload + str r10, [r0, #96] + movne r3, r1 + ldr r1, [sp, #168] @ 4-byte Reload + str r3, [r0, #100] + ldr r3, [sp, #124] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #172] @ 4-byte Reload + cmp r2, #0 + ldr r2, [sp, #136] @ 4-byte Reload + str r3, [r0, #104] + movne r2, r1 + ldr r1, [sp, #176] @ 4-byte Reload + str r2, [r0, #108] + ldr r2, [sp, #140] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #180] @ 4-byte Reload + str r2, [r0, #112] + ldr r2, [sp, #144] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #116] + add sp, sp, #184 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end236: + .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub15L + .align 2 + .type mcl_fpDbl_sub15L,%function +mcl_fpDbl_sub15L: @ @mcl_fpDbl_sub15L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #184 + sub sp, sp, #184 + ldr r7, [r2, #96] + ldr r9, [r2] + add r10, r1, #32 + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #104] + str r7, [sp, #168] @ 4-byte Spill + ldr r7, [r2, #108] + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [r2, #112] + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [r2, #116] + str r7, [sp, #180] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #164] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #116] @ 4-byte Spill + ldmib r2, {r8, lr} + ldr r5, [r1] + ldr r12, [r2, #12] + ldmib r1, {r4, r6, r7} + subs r5, r5, r9 + sbcs r4, r4, r8 + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [r2, #48] + sbcs r6, r6, lr + str r4, [sp, #28] @ 4-byte Spill + ldr r4, [r2, #44] + add lr, r1, #16 + sbcs r7, r7, r12 + str r6, [sp, #24] @ 4-byte Spill + ldr r6, [r2, #40] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #36] + str r5, [sp, #88] @ 4-byte Spill + str r4, [sp, #84] @ 4-byte Spill + str r6, [sp, #80] @ 4-byte Spill + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #96] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #92] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #96] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #76] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #48] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #24] @ 4-byte Reload + ldr r11, [r3, #32] + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #20] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + sbcs r1, r12, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + sbcs r1, r4, r1 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [r0, #36] + sbcs r1, r6, r1 + ldr r2, [sp, #84] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r2, r8, r2 + str r2, [r0, #44] + sbcs r1, r9, r1 + ldr r2, [sp, #116] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #120] @ 4-byte Reload + sbcs r2, r10, r2 + sbcs r1, r7, r1 + str r2, [r0, #52] + ldr r2, [sp, #124] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #132] @ 4-byte Reload + sbcs lr, r7, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + sbcs r9, r2, r1 + ldr r1, [sp, #148] @ 4-byte Reload + ldr r2, [sp, #56] @ 4-byte Reload + str r9, [sp, #88] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #152] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #64] @ 4-byte Reload + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [sp, #156] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #160] @ 4-byte Reload + str r1, [sp, #152] @ 4-byte Spill + mov r1, #0 + sbcs r2, r7, r2 + ldr r7, [sp, #48] @ 4-byte Reload + str r2, [sp, #156] @ 4-byte Spill + ldr r2, [sp, #128] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #72] @ 4-byte Reload + str r2, [sp, #160] @ 4-byte Spill + ldr r2, [sp, #164] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #76] @ 4-byte Reload + str r2, [sp, #164] @ 4-byte Spill + ldr r2, [sp, #140] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #92] @ 4-byte Reload + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [sp, #136] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #96] @ 4-byte Reload + str r2, [sp, #136] @ 4-byte Spill + ldr r2, [sp, #144] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #100] @ 4-byte Reload + str r2, [sp, #144] @ 4-byte Spill + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #104] @ 4-byte Reload + str r2, [sp, #168] @ 4-byte Spill + ldr r2, [sp, #172] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #108] @ 4-byte Reload + str r2, [sp, #172] @ 4-byte Spill + ldr r2, [sp, #176] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #112] @ 4-byte Reload + str r2, [sp, #176] @ 4-byte Spill + ldr r2, [sp, #180] @ 4-byte Reload + sbcs r2, r7, r2 + sbc r1, r1, #0 + str r2, [sp, #180] @ 4-byte Spill + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #128] @ 4-byte Spill + ldm r3, {r2, r5, r6} + ldr r4, [r3, #12] + ldr r12, [r3, #16] + ldr r8, [r3, #20] + ldr r10, [r3, #28] + ldr r7, [r3, #24] + ldr r3, [sp, #152] @ 4-byte Reload + adds r1, lr, r2 + ldr r2, [sp, #132] @ 4-byte Reload + adcs r5, r9, r5 + adcs r6, r2, r6 + ldr r2, [sp, #148] @ 4-byte Reload + adcs r2, r2, r4 + ldr r4, [sp, #156] @ 4-byte Reload + adcs r3, r3, r12 + adcs r12, r4, r8 + ldr r4, [sp, #160] @ 4-byte Reload + adcs r8, r4, r7 + ldr r4, [sp, #164] @ 4-byte Reload + ldr r7, [sp, #140] @ 4-byte Reload + adcs r9, r4, r10 + ldr r4, [sp, #104] @ 4-byte Reload + ldr r10, [sp, #128] @ 4-byte Reload + adcs r11, r7, r11 + ldr r7, [sp, #136] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [sp, #112] @ 4-byte Reload + str r7, [sp, #104] @ 4-byte Spill + ldr r7, [sp, #144] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [sp, #116] @ 4-byte Reload + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [sp, #168] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [sp, #120] @ 4-byte Reload + str r7, [sp, #116] @ 4-byte Spill + ldr r7, [sp, #172] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [sp, #124] @ 4-byte Reload + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [sp, #176] @ 4-byte Reload + adcs r7, r7, r4 + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [sp, #180] @ 4-byte Reload + adc r7, r7, r10 + str r7, [sp, #128] @ 4-byte Spill + ldr r7, [sp, #108] @ 4-byte Reload + ands r7, r7, #1 + moveq r1, lr + str r1, [r0, #60] + ldr r1, [sp, #88] @ 4-byte Reload + moveq r5, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r5, [r0, #64] + moveq r6, r1 + ldr r1, [sp, #148] @ 4-byte Reload + cmp r7, #0 + str r6, [r0, #68] + moveq r2, r1 + ldr r1, [sp, #152] @ 4-byte Reload + str r2, [r0, #72] + ldr r2, [sp, #104] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #156] @ 4-byte Reload + str r3, [r0, #76] + moveq r12, r1 + ldr r1, [sp, #160] @ 4-byte Reload + cmp r7, #0 + str r12, [r0, #80] + moveq r8, r1 + ldr r1, [sp, #164] @ 4-byte Reload + str r8, [r0, #84] + moveq r9, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r9, [r0, #88] + moveq r11, r1 + ldr r1, [sp, #136] @ 4-byte Reload + cmp r7, #0 + str r11, [r0, #92] + moveq r2, r1 + ldr r1, [sp, #144] @ 4-byte Reload + str r2, [r0, #96] + ldr r2, [sp, #112] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #168] @ 4-byte Reload + str r2, [r0, #100] + ldr r2, [sp, #116] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #172] @ 4-byte Reload + cmp r7, #0 + str r2, [r0, #104] + ldr r2, [sp, #120] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #176] @ 4-byte Reload + str r2, [r0, #108] + ldr r2, [sp, #124] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #180] @ 4-byte Reload + str r2, [r0, #112] + ldr r2, [sp, #128] @ 4-byte Reload + moveq r2, r1 + str r2, [r0, #116] + add sp, sp, #184 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end237: + .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L + .cantunwind + .fnend + + .align 2 + .type .LmulPv512x32,%function +.LmulPv512x32: @ @mulPv512x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r3, [r1, #44] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #44] + ldr r3, [r1, #48] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #48] + ldr r3, [r1, #52] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #52] + ldr r3, [r1, #56] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #56] + ldr r1, [r1, #60] + umull r3, r7, r1, r2 + adcs r1, r6, r3 + str r1, [r0, #60] + adc r1, r7, #0 + str r1, [r0, #64] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end238: + .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre16L + .align 2 + .type mcl_fp_mulUnitPre16L,%function +mcl_fp_mulUnitPre16L: @ @mcl_fp_mulUnitPre16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #92 + sub sp, sp, #92 + mov r4, r0 + add r0, sp, #16 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #76] + add r11, sp, #40 + add lr, sp, #16 + ldr r10, [sp, #80] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #72] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #68] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #64] + str r0, [sp] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + stm r4, {r0, r1, r2, r3, r12, lr} + add r0, r4, #24 + str r10, [r4, #64] + stm r0, {r5, r6, r7, r8, r9, r11} + ldr r0, [sp] @ 4-byte Reload + str r0, [r4, #48] + ldr r0, [sp, #4] @ 4-byte Reload + str r0, [r4, #52] + ldr r0, [sp, #8] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #12] @ 4-byte Reload + str r0, [r4, #60] + add sp, sp, #92 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end239: + .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre16L + .align 2 + .type mcl_fpDbl_mulPre16L,%function +mcl_fpDbl_mulPre16L: @ @mcl_fpDbl_mulPre16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #276 + sub sp, sp, #276 + mov r6, r2 + mov r5, r1 + mov r4, r0 + bl mcl_fpDbl_mulPre8L(PLT) + add r0, r4, #64 + add r1, r5, #32 + add r2, r6, #32 + bl mcl_fpDbl_mulPre8L(PLT) + add r11, r6, #32 + ldm r11, {r9, r10, r11} + ldr r0, [r6, #44] + ldr r8, [r6, #60] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r6, #48] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r6, #52] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r6, #56] + str r0, [sp, #144] @ 4-byte Spill + ldm r6, {r0, r1, r2, r3, r12, lr} + ldr r7, [r6, #24] + ldr r6, [r6, #28] + adds r0, r0, r9 + str r0, [sp, #136] @ 4-byte Spill + adcs r0, r1, r10 + str r0, [sp, #132] @ 4-byte Spill + adcs r0, r2, r11 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, lr, r0 + add lr, r5, #44 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #112] @ 4-byte Spill + adcs r0, r6, r8 + str r0, [sp, #108] @ 4-byte Spill + mov r0, #0 + ldm r5, {r8, r10, r11} + ldr r7, [r5, #32] + ldr r3, [r5, #36] + ldr r2, [r5, #40] + adc r6, r0, #0 + ldr r0, [r5, #12] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r5, #16] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r5, #20] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r5, #24] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r5, #28] + str r0, [sp, #104] @ 4-byte Spill + ldm lr, {r0, r1, r12, lr} + ldr r9, [r5, #60] + adds r5, r8, r7 + adcs r3, r10, r3 + str r5, [sp, #180] + str r5, [sp, #144] @ 4-byte Spill + adcs r8, r11, r2 + ldr r2, [sp, #88] @ 4-byte Reload + str r3, [sp, #184] + str r3, [sp, #140] @ 4-byte Spill + str r8, [sp, #188] + adcs r11, r2, r0 + ldr r0, [sp, #92] @ 4-byte Reload + add r2, sp, #148 + str r11, [sp, #192] + adcs r5, r0, r1 + ldr r0, [sp, #96] @ 4-byte Reload + add r1, sp, #180 + str r5, [sp, #196] + adcs r7, r0, r12 + ldr r0, [sp, #100] @ 4-byte Reload + str r7, [sp, #200] + adcs r10, r0, lr + ldr r0, [sp, #104] @ 4-byte Reload + str r10, [sp, #204] + adcs r0, r0, r9 + str r0, [sp, #208] + mov r9, r0 + ldr r0, [sp, #136] @ 4-byte Reload + str r0, [sp, #148] + ldr r0, [sp, #132] @ 4-byte Reload + str r0, [sp, #152] + ldr r0, [sp, #128] @ 4-byte Reload + str r0, [sp, #156] + ldr r0, [sp, #124] @ 4-byte Reload + str r0, [sp, #160] + ldr r0, [sp, #120] @ 4-byte Reload + str r0, [sp, #164] + ldr r0, [sp, #116] @ 4-byte Reload + str r0, [sp, #168] + ldr r0, [sp, #112] @ 4-byte Reload + str r0, [sp, #172] + ldr r0, [sp, #108] @ 4-byte Reload + str r0, [sp, #176] + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + add r0, sp, #212 + bl mcl_fpDbl_mulPre8L(PLT) + ldr r0, [sp, #136] @ 4-byte Reload + cmp r6, #0 + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [sp, #140] @ 4-byte Reload + ldr r3, [sp, #124] @ 4-byte Reload + moveq r9, r6 + moveq r10, r6 + moveq r7, r6 + moveq r5, r6 + moveq r11, r6 + cmp r6, #0 + moveq r1, r6 + moveq r8, r6 + moveq r2, r6 + str r9, [sp, #104] @ 4-byte Spill + str r1, [sp, #144] @ 4-byte Spill + str r2, [sp, #140] @ 4-byte Spill + str r8, [sp, #96] @ 4-byte Spill + adds r12, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + adcs lr, r2, r1 + ldr r2, [sp, #128] @ 4-byte Reload + adcs r2, r8, r2 + ldr r8, [sp, #104] @ 4-byte Reload + adcs r9, r11, r3 + ldr r3, [sp, #120] @ 4-byte Reload + adcs r1, r5, r3 + ldr r3, [sp, #116] @ 4-byte Reload + adcs r0, r7, r3 + ldr r3, [sp, #112] @ 4-byte Reload + adcs r3, r10, r3 + str r3, [sp, #124] @ 4-byte Spill + ldr r3, [sp, #108] @ 4-byte Reload + adcs r3, r8, r3 + ldr r8, [sp, #124] @ 4-byte Reload + str r3, [sp, #128] @ 4-byte Spill + mov r3, #0 + adc r3, r3, #0 + str r3, [sp, #136] @ 4-byte Spill + ldr r3, [sp, #100] @ 4-byte Reload + cmp r3, #0 + moveq r0, r7 + moveq r1, r5 + moveq r9, r11 + ldr r5, [sp, #136] @ 4-byte Reload + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + moveq r2, r0 + ldr r0, [sp, #140] @ 4-byte Reload + moveq lr, r0 + ldr r0, [sp, #144] @ 4-byte Reload + cmp r3, #0 + moveq r5, r3 + and r3, r6, r3 + ldr r6, [sp, #244] + moveq r8, r10 + moveq r12, r0 + ldr r0, [sp, #104] @ 4-byte Reload + moveq r7, r0 + adds r0, r12, r6 + add r6, sp, #216 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #248] + adcs r0, lr, r0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #252] + adcs r10, r2, r0 + ldr r0, [sp, #256] + adcs r0, r9, r0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #260] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #264] + adcs r0, r1, r0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #268] + adcs r0, r8, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #272] + adcs r0, r7, r0 + str r0, [sp, #112] @ 4-byte Spill + adc r0, r5, r3 + str r0, [sp, #108] @ 4-byte Spill + ldm r4, {r1, r12, lr} + ldr r5, [sp, #212] + ldr r8, [r4, #12] + ldm r6, {r2, r3, r6} + ldr r0, [sp, #236] + ldr r7, [sp, #240] + ldr r9, [r4, #72] + subs r1, r5, r1 + ldr r5, [sp, #228] + sbcs r2, r2, r12 + sbcs r12, r3, lr + ldr r3, [sp, #140] @ 4-byte Reload + sbcs r11, r6, r8 + ldr r6, [r4, #16] + ldr r8, [r4, #68] + sbcs lr, r5, r6 + ldr r5, [r4, #20] + ldr r6, [sp, #232] + sbcs r5, r6, r5 + ldr r6, [r4, #24] + sbcs r6, r0, r6 + ldr r0, [r4, #28] + sbcs r0, r7, r0 + ldr r7, [r4, #32] + sbcs r3, r3, r7 + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r4, #36] + str r3, [sp, #84] @ 4-byte Spill + ldr r3, [sp, #136] @ 4-byte Reload + str r7, [sp, #140] @ 4-byte Spill + sbcs r3, r3, r7 + ldr r7, [r4, #40] + str r3, [sp, #76] @ 4-byte Spill + sbcs r3, r10, r7 + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r4, #44] + ldr r10, [r4, #76] + str r3, [sp, #72] @ 4-byte Spill + ldr r3, [sp, #128] @ 4-byte Reload + str r7, [sp, #132] @ 4-byte Spill + sbcs r3, r3, r7 + ldr r7, [r4, #48] + str r3, [sp, #68] @ 4-byte Spill + ldr r3, [sp, #124] @ 4-byte Reload + str r7, [sp, #128] @ 4-byte Spill + sbcs r3, r3, r7 + ldr r7, [r4, #52] + str r3, [sp, #64] @ 4-byte Spill + ldr r3, [sp, #120] @ 4-byte Reload + str r7, [sp, #124] @ 4-byte Spill + sbcs r3, r3, r7 + ldr r7, [r4, #56] + str r3, [sp, #60] @ 4-byte Spill + ldr r3, [sp, #116] @ 4-byte Reload + str r7, [sp, #120] @ 4-byte Spill + sbcs r3, r3, r7 + ldr r7, [r4, #60] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [sp, #112] @ 4-byte Reload + str r7, [sp, #116] @ 4-byte Spill + sbcs r3, r3, r7 + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [sp, #108] @ 4-byte Reload + sbc r3, r3, #0 + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r4, #64] + subs r1, r1, r3 + str r3, [sp, #80] @ 4-byte Spill + str r1, [sp, #44] @ 4-byte Spill + sbcs r1, r2, r8 + str r1, [sp, #40] @ 4-byte Spill + sbcs r1, r12, r9 + add r12, r4, #104 + str r1, [sp, #36] @ 4-byte Spill + sbcs r1, r11, r10 + ldr r11, [r4, #80] + str r1, [sp, #32] @ 4-byte Spill + sbcs r1, lr, r11 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r4, #84] + str r1, [sp, #112] @ 4-byte Spill + sbcs r1, r5, r1 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r4, #88] + str r1, [sp, #108] @ 4-byte Spill + sbcs r1, r6, r1 + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [r4, #92] + sbcs r0, r0, r1 + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [r4, #100] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r4, #96] + str r1, [sp, #96] @ 4-byte Spill + str r0, [sp, #100] @ 4-byte Spill + ldm r12, {r2, r3, r12} + ldr r7, [sp, #84] @ 4-byte Reload + ldr lr, [r4, #116] + ldr r5, [r4, #120] + ldr r6, [r4, #124] + sbcs r0, r7, r0 + str r12, [sp, #92] @ 4-byte Spill + str r6, [sp, #88] @ 4-byte Spill + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r0, r2 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r0, r12 + mov r12, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r0, lr + mov lr, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbcs r7, r0, r6 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #40] @ 4-byte Reload + sbc r5, r0, #0 + ldr r0, [sp, #144] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [r4, #32] + ldr r0, [sp, #136] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #36] @ 4-byte Reload + str r1, [r4, #36] + ldr r1, [sp, #132] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #32] @ 4-byte Reload + str r0, [r4, #40] + ldr r0, [sp, #128] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #28] @ 4-byte Reload + str r1, [r4, #44] + ldr r1, [sp, #124] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #24] @ 4-byte Reload + str r0, [r4, #48] + ldr r0, [sp, #120] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #20] @ 4-byte Reload + str r1, [r4, #52] + ldr r1, [sp, #116] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #16] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r1, [r4, #60] + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #76] @ 4-byte Reload + str r0, [r4, #64] + adcs r1, r8, r1 + ldr r0, [sp, #12] @ 4-byte Reload + str r1, [r4, #68] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [r4, #72] + adcs r1, r10, r1 + ldr r0, [sp, #72] @ 4-byte Reload + str r1, [r4, #76] + ldr r1, [sp, #112] @ 4-byte Reload + adcs r0, r11, r0 + adcs r1, r1, r6 + str r0, [r4, #80] + ldr r0, [sp, #108] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + str r1, [r4, #84] + ldr r1, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [r4, #88] + adcs r1, r1, r7 + ldr r0, [sp, #100] @ 4-byte Reload + str r1, [r4, #92] + ldr r1, [sp, #96] @ 4-byte Reload + adcs r8, r0, r5 + ldr r5, [sp, #92] @ 4-byte Reload + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r1, #0 + str r8, [r4, #96] + adcs r2, r2, #0 + adcs r3, r3, #0 + adcs r7, r5, #0 + adcs r6, r12, #0 + adcs r5, lr, #0 + adc r12, r0, #0 + add r0, r4, #100 + stm r0, {r1, r2, r3, r7} + str r6, [r4, #116] + str r5, [r4, #120] + str r12, [r4, #124] + add sp, sp, #276 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end240: + .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre16L + .align 2 + .type mcl_fpDbl_sqrPre16L,%function +mcl_fpDbl_sqrPre16L: @ @mcl_fpDbl_sqrPre16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #276 + sub sp, sp, #276 + mov r5, r1 + mov r4, r0 + mov r2, r5 + bl mcl_fpDbl_mulPre8L(PLT) + add r1, r5, #32 + add r0, r4, #64 + mov r2, r1 + bl mcl_fpDbl_mulPre8L(PLT) + ldm r5, {r8, r9, r10} + ldr r0, [r5, #12] + ldr r6, [r5, #32] + ldr r7, [r5, #36] + ldr r3, [r5, #40] + add lr, r5, #44 + ldr r11, [r5, #16] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r5, #20] + adds r6, r8, r6 + adcs r7, r9, r7 + adcs r3, r10, r3 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r5, #24] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r5, #28] + str r0, [sp, #144] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + ldr r5, [sp, #136] @ 4-byte Reload + str r6, [sp, #180] + str r7, [sp, #184] + str r6, [sp, #148] + str r3, [sp, #128] @ 4-byte Spill + str r3, [sp, #188] + str r7, [sp, #152] + adcs r10, r5, r0 + ldr r0, [sp, #140] @ 4-byte Reload + adcs r11, r11, r1 + str r10, [sp, #192] + add r1, sp, #180 + str r11, [sp, #196] + adcs r8, r0, r2 + ldr r0, [sp, #132] @ 4-byte Reload + add r2, sp, #148 + str r8, [sp, #200] + adcs r9, r0, r12 + ldr r0, [sp, #144] @ 4-byte Reload + str r9, [sp, #204] + adcs r5, r0, lr + add r0, sp, #156 + str r5, [sp, #208] + stm r0, {r3, r10, r11} + mov r0, #0 + str r8, [sp, #168] + str r9, [sp, #172] + str r5, [sp, #176] + adc r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + add r0, sp, #212 + bl mcl_fpDbl_mulPre8L(PLT) + ldr r0, [sp, #128] @ 4-byte Reload + adds r2, r6, r6 + ldr r1, [sp, #244] + ldr r6, [sp, #248] + ldr lr, [sp, #264] + ldr r12, [sp, #268] + adcs r3, r7, r7 + adcs r7, r0, r0 + str r1, [sp, #128] @ 4-byte Spill + str r6, [sp, #116] @ 4-byte Spill + str r12, [sp, #108] @ 4-byte Spill + adcs r10, r10, r10 + adcs r0, r11, r11 + ldr r11, [sp, #252] + str r0, [sp, #144] @ 4-byte Spill + adcs r0, r8, r8 + ldr r8, [sp, #260] + str r0, [sp, #140] @ 4-byte Spill + adcs r0, r9, r9 + ldr r9, [sp, #256] + str r0, [sp, #120] @ 4-byte Spill + adc r0, r5, r5 + adds r2, r1, r2 + adcs r1, r6, r3 + str r2, [sp, #132] @ 4-byte Spill + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #272] + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #144] @ 4-byte Reload + adcs r7, r11, r7 + adcs r3, r9, r10 + adcs r2, r8, r1 + ldr r1, [sp, #140] @ 4-byte Reload + adcs r1, lr, r1 + adcs r10, r12, r6 + ldr r6, [sp, #112] @ 4-byte Reload + adcs r12, r0, r6 + mov r6, r0 + ldr r0, [sp, #136] @ 4-byte Reload + adc r5, r0, r5, lsr #31 + cmp r0, #0 + moveq r1, lr + moveq r2, r8 + moveq r3, r9 + moveq r7, r11 + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [sp, #128] @ 4-byte Reload + str r3, [sp, #120] @ 4-byte Spill + add r3, sp, #216 + moveq r10, r1 + ldr r1, [sp, #116] @ 4-byte Reload + cmp r0, #0 + moveq r12, r6 + ldr r6, [sp, #124] @ 4-byte Reload + moveq r5, r0 + str r12, [sp, #112] @ 4-byte Spill + moveq r6, r1 + ldr r1, [sp, #132] @ 4-byte Reload + ldm r4, {r12, lr} + ldr r9, [sp, #212] + ldr r11, [r4, #8] + ldr r8, [r4, #12] + moveq r1, r2 + ldm r3, {r0, r2, r3} + subs r12, r9, r12 + sbcs r9, r0, lr + ldr r0, [r4, #16] + sbcs r11, r2, r11 + ldr r2, [sp, #228] + sbcs lr, r3, r8 + ldr r8, [r4, #68] + sbcs r0, r2, r0 + ldr r2, [sp, #232] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [r4, #20] + sbcs r0, r2, r0 + ldr r2, [sp, #236] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r4, #24] + sbcs r0, r2, r0 + ldr r2, [sp, #240] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r4, #28] + sbcs r3, r2, r0 + ldr r0, [r4, #32] + str r0, [sp, #136] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r4, #36] + str r0, [sp, #132] @ 4-byte Spill + sbcs r0, r6, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r4, #40] + str r0, [sp, #128] @ 4-byte Spill + sbcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r4, #44] + str r0, [sp, #124] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r4, #48] + str r0, [sp, #120] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [r4, #52] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + str r1, [sp, #140] @ 4-byte Spill + sbcs r0, r0, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r4, #56] + str r0, [sp, #144] @ 4-byte Spill + sbcs r0, r10, r0 + ldr r10, [r4, #76] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r4, #60] + str r0, [sp, #116] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + sbc r0, r5, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r4, #64] + str r0, [sp, #80] @ 4-byte Spill + subs r0, r12, r0 + add r12, r4, #104 + str r0, [sp, #44] @ 4-byte Spill + sbcs r0, r9, r8 + ldr r9, [r4, #72] + str r0, [sp, #40] @ 4-byte Spill + sbcs r0, r11, r9 + ldr r11, [r4, #80] + str r0, [sp, #36] @ 4-byte Spill + sbcs r0, lr, r10 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r0, r0, r11 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r4, #84] + str r0, [sp, #112] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r4, #88] + str r0, [sp, #108] @ 4-byte Spill + sbcs r0, r1, r0 + ldr r1, [r4, #100] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r4, #92] + str r1, [sp, #96] @ 4-byte Spill + str r0, [sp, #104] @ 4-byte Spill + sbcs r0, r3, r0 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r4, #96] + str r0, [sp, #100] @ 4-byte Spill + ldm r12, {r2, r3, r12} + ldr r7, [sp, #84] @ 4-byte Reload + ldr lr, [r4, #116] + ldr r5, [r4, #120] + ldr r6, [r4, #124] + sbcs r0, r7, r0 + str r12, [sp, #92] @ 4-byte Spill + str r6, [sp, #88] @ 4-byte Spill + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r0, r2 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r0, r12 + mov r12, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r0, lr + mov lr, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + sbcs r7, r0, r6 + ldr r0, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #40] @ 4-byte Reload + sbc r5, r0, #0 + ldr r0, [sp, #136] @ 4-byte Reload + adds r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [r4, #32] + ldr r0, [sp, #128] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #36] @ 4-byte Reload + str r1, [r4, #36] + ldr r1, [sp, #124] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #32] @ 4-byte Reload + str r0, [r4, #40] + ldr r0, [sp, #120] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #28] @ 4-byte Reload + str r1, [r4, #44] + ldr r1, [sp, #140] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #24] @ 4-byte Reload + str r0, [r4, #48] + ldr r0, [sp, #144] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #20] @ 4-byte Reload + str r1, [r4, #52] + ldr r1, [sp, #116] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #16] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r1, r1, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r1, [r4, #60] + ldr r1, [sp, #8] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #76] @ 4-byte Reload + str r0, [r4, #64] + adcs r1, r8, r1 + ldr r0, [sp, #12] @ 4-byte Reload + str r1, [r4, #68] + ldr r1, [sp, #68] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [r4, #72] + adcs r1, r10, r1 + ldr r0, [sp, #72] @ 4-byte Reload + str r1, [r4, #76] + ldr r1, [sp, #112] @ 4-byte Reload + adcs r0, r11, r0 + adcs r1, r1, r6 + str r0, [r4, #80] + ldr r0, [sp, #108] @ 4-byte Reload + ldr r6, [sp, #84] @ 4-byte Reload + str r1, [r4, #84] + ldr r1, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [r4, #88] + adcs r1, r1, r7 + ldr r0, [sp, #100] @ 4-byte Reload + str r1, [r4, #92] + ldr r1, [sp, #96] @ 4-byte Reload + adcs r8, r0, r5 + ldr r5, [sp, #92] @ 4-byte Reload + ldr r0, [sp, #88] @ 4-byte Reload + adcs r1, r1, #0 + str r8, [r4, #96] + adcs r2, r2, #0 + adcs r3, r3, #0 + adcs r7, r5, #0 + adcs r6, r12, #0 + adcs r5, lr, #0 + adc r12, r0, #0 + add r0, r4, #100 + stm r0, {r1, r2, r3, r7} + str r6, [r4, #116] + str r5, [r4, #120] + str r12, [r4, #124] + add sp, sp, #276 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end241: + .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L + .cantunwind + .fnend + + .globl mcl_fp_mont16L + .align 2 + .type mcl_fp_mont16L,%function +mcl_fp_mont16L: @ @mcl_fp_mont16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #404 + sub sp, sp, #404 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #132 + add r6, sp, #2048 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #92] @ 4-byte Spill + add r0, r6, #328 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #128] @ 4-byte Spill + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2376] + ldr r1, [sp, #2380] + str r0, [sp, #72] @ 4-byte Spill + mul r2, r0, r5 + ldr r0, [sp, #2440] + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #2384] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #2436] + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #2388] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #2432] + str r1, [sp, #88] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #2428] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #2424] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2420] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #2416] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #2412] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #2408] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #2404] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #2400] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2396] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2392] + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #2304 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2368] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r7, [sp, #2332] + ldr r4, [sp, #2328] + ldr r8, [sp, #2324] + ldr r11, [sp, #2320] + ldr r9, [sp, #2304] + ldr r10, [sp, #2308] + ldr r6, [sp, #2312] + ldr r5, [sp, #2316] + add lr, sp, #2048 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2364] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2360] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2356] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2352] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2348] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2344] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2340] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2336] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, lr, #184 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + ldr r3, [sp, #2248] + ldr r12, [sp, #2252] + ldr lr, [sp, #2256] + adds r0, r9, r0 + ldr r9, [sp, #2272] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #2276] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + ldr r6, [sp, #96] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #2264] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r11, r0 + ldr r11, [sp, #100] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #2268] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #2260] + adcs r1, r7, r1 + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + ldr r7, [sp, #2232] + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #64] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #124] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #2244] + adc r0, r0, #0 + adds r7, r11, r7 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #2240] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2296] + str r7, [sp, #24] @ 4-byte Spill + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2292] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2288] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2284] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #2280] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #2236] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #2160 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2224] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #2188] + ldr r6, [sp, #2184] + ldr r8, [sp, #2180] + ldr r9, [sp, #2176] + ldr r10, [sp, #2160] + ldr r11, [sp, #2164] + ldr r4, [sp, #2168] + ldr r7, [sp, #2172] + add lr, sp, #2048 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2220] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2216] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2212] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2208] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2204] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2200] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2196] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2192] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #40 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #2100] + ldr r3, [sp, #2104] + ldr r12, [sp, #2108] + ldr lr, [sp, #2112] + adds r0, r0, r10 + ldr r10, [sp, #2132] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #2116] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #2088] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #2128] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #2124] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #2120] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #2096] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2152] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2148] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2144] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2140] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2136] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2092] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #2016 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2080] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #2044] + ldr r6, [sp, #2040] + ldr r8, [sp, #2036] + ldr r9, [sp, #2032] + ldr r10, [sp, #2016] + ldr r11, [sp, #2020] + ldr r4, [sp, #2024] + ldr r7, [sp, #2028] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2076] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2072] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2068] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2064] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2060] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2056] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2052] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2048] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #920 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1956] + ldr r3, [sp, #1960] + ldr r12, [sp, #1964] + ldr lr, [sp, #1968] + adds r0, r0, r10 + ldr r10, [sp, #1988] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1972] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1944] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1984] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1980] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1976] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1952] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2008] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2004] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2000] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1996] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1992] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1948] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1872 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1936] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1900] + ldr r6, [sp, #1896] + ldr r8, [sp, #1892] + ldr r9, [sp, #1888] + ldr r10, [sp, #1872] + ldr r11, [sp, #1876] + ldr r4, [sp, #1880] + ldr r7, [sp, #1884] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1932] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1928] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1924] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1920] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1916] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1908] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #776 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1812] + ldr r3, [sp, #1816] + ldr r12, [sp, #1820] + ldr lr, [sp, #1824] + adds r0, r0, r10 + ldr r10, [sp, #1844] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1828] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1800] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1840] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1836] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1832] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1808] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1864] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1860] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1856] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1852] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1848] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1804] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1728 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1792] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1756] + ldr r6, [sp, #1752] + ldr r8, [sp, #1748] + ldr r9, [sp, #1744] + ldr r10, [sp, #1728] + ldr r11, [sp, #1732] + ldr r4, [sp, #1736] + ldr r7, [sp, #1740] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1788] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1784] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1780] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1768] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1764] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1760] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, lr, #632 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1668] + ldr r3, [sp, #1672] + ldr r12, [sp, #1676] + ldr lr, [sp, #1680] + adds r0, r0, r10 + ldr r10, [sp, #1700] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1684] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1656] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1696] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1692] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1688] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1664] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1720] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1716] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1712] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1660] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1584 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1648] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1612] + ldr r6, [sp, #1608] + ldr r8, [sp, #1604] + ldr r9, [sp, #1600] + ldr r10, [sp, #1584] + ldr r11, [sp, #1588] + ldr r4, [sp, #1592] + ldr r7, [sp, #1596] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1644] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1640] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1636] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1632] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1628] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1624] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1620] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1616] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, lr, #488 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1524] + ldr r3, [sp, #1528] + ldr r12, [sp, #1532] + ldr lr, [sp, #1536] + adds r0, r0, r10 + ldr r10, [sp, #1556] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1540] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1512] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1552] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1548] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1544] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1520] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1576] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1516] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1440 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1504] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1468] + ldr r6, [sp, #1464] + ldr r8, [sp, #1460] + ldr r9, [sp, #1456] + ldr r10, [sp, #1440] + ldr r11, [sp, #1444] + ldr r4, [sp, #1448] + ldr r7, [sp, #1452] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1500] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1496] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1480] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1476] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1472] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, lr, #344 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1380] + ldr r3, [sp, #1384] + ldr r12, [sp, #1388] + ldr lr, [sp, #1392] + adds r0, r0, r10 + ldr r10, [sp, #1412] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1396] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1368] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1408] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1404] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1400] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1376] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1432] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1424] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1420] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1416] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1372] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1296 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1360] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1324] + ldr r6, [sp, #1320] + ldr r8, [sp, #1316] + ldr r9, [sp, #1312] + ldr r10, [sp, #1296] + ldr r11, [sp, #1300] + ldr r4, [sp, #1304] + ldr r7, [sp, #1308] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1356] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1352] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1348] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1344] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, lr, #200 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1236] + ldr r3, [sp, #1240] + ldr r12, [sp, #1244] + ldr lr, [sp, #1248] + adds r0, r0, r10 + ldr r10, [sp, #1268] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1252] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1224] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1264] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1260] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1256] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1232] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1288] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1284] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1228] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1152 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1216] + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #1180] + ldr r6, [sp, #1176] + ldr r8, [sp, #1172] + ldr r9, [sp, #1168] + ldr r10, [sp, #1152] + ldr r11, [sp, #1156] + ldr r4, [sp, #1160] + ldr r7, [sp, #1164] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1212] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, lr, #56 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1092] + ldr r3, [sp, #1096] + ldr r12, [sp, #1100] + ldr lr, [sp, #1104] + adds r0, r0, r10 + ldr r10, [sp, #1124] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1108] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1080] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1120] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1116] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1112] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1088] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + adds r7, r11, r7 + ldr r11, [sp, #128] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1144] + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1084] + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r7, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #1008 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1072] + add r10, sp, #1008 + ldr r4, [sp, #1032] + ldr r5, [sp, #1028] + ldr r6, [sp, #1024] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #936 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #952 + adds r0, r0, r7 + ldr r7, [sp, #948] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r2, r0, r8 + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #976 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #940] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #944] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #936] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + adds r0, r2, r4 + mul r1, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #1000] + str r1, [sp, #48] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #996] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #120] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #120] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #116] @ 4-byte Spill + ldr r5, [sp, #112] @ 4-byte Reload + adcs r5, r5, r7 + str r5, [sp, #112] @ 4-byte Spill + ldr r5, [sp, #108] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #864 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #928] + add r10, sp, #864 + ldr r11, [sp, #892] + ldr r4, [sp, #888] + ldr r5, [sp, #884] + ldr r6, [sp, #880] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #912] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #908] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #792 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #808 + adds r0, r0, r7 + ldr r7, [sp, #804] + ldr r0, [sp, #120] @ 4-byte Reload + adcs r2, r0, r8 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #832 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #796] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #800] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #792] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + adds r1, r2, r4 + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + str r1, [sp, #124] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #856] + str r2, [sp, #28] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #848] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #120] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #72] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #68] @ 4-byte Spill + ldr r5, [sp, #112] @ 4-byte Reload + adcs r5, r5, r7 + str r5, [sp, #64] @ 4-byte Spill + ldr r5, [sp, #108] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + add r0, sp, #720 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #784] + add r10, sp, #720 + ldr r5, [sp, #748] + ldr r6, [sp, #744] + ldr r7, [sp, #740] + ldr r11, [sp, #736] + add r0, sp, #648 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #780] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #776] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #772] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #768] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #764] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #760] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #756] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #752] + str r1, [sp, #16] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r4, [sp, #732] + ldr r2, [r1, #48] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #652 + adds r0, r0, r8 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #676 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #648] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #124] @ 4-byte Reload + ldr r7, [sp, #72] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #576 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #640] + add r11, sp, #584 + ldr r6, [sp, #604] + ldr r5, [sp, #600] + ldr r8, [sp, #596] + ldr r9, [sp, #576] + ldr r10, [sp, #580] + add r0, sp, #504 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #636] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #632] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #628] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #624] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #620] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #616] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #612] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #608] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r7, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #508 + adds r0, r0, r9 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #532 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #504] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #76] @ 4-byte Reload + ldr r7, [sp, #72] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #432 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #496] + add r11, sp, #440 + ldr r6, [sp, #460] + ldr r5, [sp, #456] + ldr r8, [sp, #452] + ldr r9, [sp, #432] + ldr r10, [sp, #436] + add r0, sp, #360 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #492] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #488] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #484] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #480] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #476] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #472] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #468] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #464] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r7, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #364 + adds r0, r0, r9 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #388 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #360] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #76] @ 4-byte Reload + ldr r7, [sp, #72] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #32] @ 4-byte Spill + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #288 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #352] + add r11, sp, #296 + ldr r7, [sp, #316] + ldr r9, [sp, #288] + ldr r5, [sp, #292] + add r0, sp, #216 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #348] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #344] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #340] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #336] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #332] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #328] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #324] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #320] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r6, r8, r10, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #60] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #72] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + add lr, sp, #232 + adds r0, r0, r9 + add r9, sp, #216 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + adcs r1, r1, r4 + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r8 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #256 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #124] @ 4-byte Reload + adcs r1, r1, r7 + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #80] @ 4-byte Spill + ldm r9, {r4, r7, r9} + ldr r5, [sp, #228] + adds r8, r0, r4 + ldr r0, [sp, #128] @ 4-byte Reload + mul r1, r8, r0 + ldr r0, [sp, #280] + str r1, [sp, #64] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #276] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r6, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #136] @ 4-byte Reload + adcs r11, r11, r7 + ldr r7, [sp, #132] @ 4-byte Reload + adcs r9, r7, r9 + ldr r7, [sp, #76] @ 4-byte Reload + adcs r5, r7, r5 + ldr r7, [sp, #72] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #140] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r10, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #84] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + add r0, sp, #144 + bl .LmulPv512x32(PLT) + add r3, sp, #144 + ldm r3, {r0, r1, r2, r3} + adds r0, r8, r0 + adcs r7, r11, r1 + ldr r0, [sp, #160] + ldr r1, [sp, #76] @ 4-byte Reload + adcs r8, r9, r2 + str r7, [sp, #56] @ 4-byte Spill + adcs r5, r5, r3 + mov r3, r6 + str r8, [sp, #64] @ 4-byte Spill + str r5, [sp, #72] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #164] + ldr r1, [sp, #48] @ 4-byte Reload + str r4, [sp, #76] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #168] + adcs lr, r1, r0 + ldr r0, [sp, #172] + ldr r1, [sp, #60] @ 4-byte Reload + str lr, [sp, #52] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r1, r0 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #188] + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #192] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #196] + adcs r0, r1, r0 + ldr r1, [sp, #136] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #200] + adcs r0, r10, r0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #204] + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #208] + adcs r0, r1, r0 + ldr r1, [r3] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adc r0, r0, #0 + subs r12, r7, r1 + str r0, [sp, #68] @ 4-byte Spill + ldmib r3, {r0, r2, r6} + ldr r1, [r3, #32] + ldr r11, [r3, #40] + ldr r9, [r3, #28] + sbcs r7, r8, r0 + ldr r0, [r3, #36] + sbcs r5, r5, r2 + ldr r2, [sp, #96] @ 4-byte Reload + sbcs r10, r4, r6 + ldr r6, [r3, #20] + ldr r4, [r3, #24] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r3, #60] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r3, #16] + sbcs r2, r2, r0 + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r3, lr, r6 + ldr r6, [sp, #64] @ 4-byte Reload + sbcs lr, r0, r4 + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r4, r0, r9 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r8, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r9, r0, r1 + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + sbcs r11, r0, r11 + ldr r0, [sp, #120] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbc r0, r0, #0 + ands r1, r0, #1 + ldr r0, [sp, #56] @ 4-byte Reload + movne r7, r6 + movne r12, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r12, [r0] + str r7, [r0, #4] + ldr r7, [sp, #72] @ 4-byte Reload + movne r5, r7 + ldr r7, [sp, #76] @ 4-byte Reload + cmp r1, #0 + str r5, [r0, #8] + movne r10, r7 + ldr r7, [sp, #96] @ 4-byte Reload + str r10, [r0, #12] + movne r2, r7 + str r2, [r0, #16] + ldr r2, [sp, #52] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #100] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #60] @ 4-byte Reload + movne lr, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str lr, [r0, #24] + movne r4, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r4, [r0, #28] + movne r8, r2 + ldr r2, [sp, #112] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #32] + movne r9, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r9, [r0, #36] + movne r11, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r11, [r0, #40] + movne r3, r2 + ldr r2, [sp, #124] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #44] + ldr r3, [sp, #80] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #128] @ 4-byte Reload + str r3, [r0, #48] + ldr r3, [sp, #84] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #132] @ 4-byte Reload + str r3, [r0, #52] + ldr r3, [sp, #88] @ 4-byte Reload + movne r3, r2 + cmp r1, #0 + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [sp, #140] @ 4-byte Reload + str r3, [r0, #56] + movne r2, r1 + str r2, [r0, #60] + add sp, sp, #404 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end242: + .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L + .cantunwind + .fnend + + .globl mcl_fp_montNF16L + .align 2 + .type mcl_fp_montNF16L,%function +mcl_fp_montNF16L: @ @mcl_fp_montNF16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #396 + sub sp, sp, #396 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #124 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #92] @ 4-byte Spill + add r0, sp, #2368 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #120] @ 4-byte Spill + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2368] + ldr r1, [sp, #2372] + add r9, sp, #2048 + str r0, [sp, #68] @ 4-byte Spill + mul r2, r0, r5 + ldr r0, [sp, #2432] + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #2376] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #2428] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #2380] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #2424] + str r1, [sp, #80] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2420] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #2416] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #2412] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #2408] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #2404] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #2400] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2396] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2392] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2388] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2384] + str r0, [sp, #40] @ 4-byte Spill + add r0, r9, #248 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2360] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r5, [sp, #2324] + ldr r6, [sp, #2320] + ldr r7, [sp, #2316] + ldr r8, [sp, #2312] + ldr r10, [sp, #2296] + ldr r11, [sp, #2300] + ldr r4, [sp, #2304] + ldr r9, [sp, #2308] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2356] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2352] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2348] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2344] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2340] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2336] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2332] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2328] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, sp, #2224 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #68] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #2236] + ldr r3, [sp, #2240] + ldr r12, [sp, #2244] + ldr lr, [sp, #2248] + adds r0, r10, r0 + ldr r10, [sp, #2268] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + ldr r11, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #2252] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #2264] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #2260] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #84] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #2224] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #2256] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adc r0, r1, r0 + adds r6, r11, r6 + ldr r1, [sp, #2232] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2288] + str r6, [sp, #20] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2284] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2280] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #2276] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #2272] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #2228] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #2048 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #104 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2216] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #2180] + ldr r7, [sp, #2176] + ldr r5, [sp, #2172] + ldr r8, [sp, #2168] + ldr r9, [sp, #2152] + ldr r10, [sp, #2156] + ldr r11, [sp, #2160] + ldr r4, [sp, #2164] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2212] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2208] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2204] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2200] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2196] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2192] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2188] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2184] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #2080 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #20] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #2092] + ldr r3, [sp, #2096] + ldr r12, [sp, #2100] + ldr lr, [sp, #2104] + adds r0, r0, r9 + ldr r9, [sp, #2120] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #2124] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #2108] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #2116] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #2112] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #2080] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #2088] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2144] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2140] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2136] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2132] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2128] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2084] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #984 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #2072] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #2036] + ldr r7, [sp, #2032] + ldr r5, [sp, #2028] + ldr r8, [sp, #2024] + ldr r9, [sp, #2008] + ldr r10, [sp, #2012] + ldr r11, [sp, #2016] + ldr r4, [sp, #2020] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2068] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2064] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2060] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2056] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2052] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2048] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2044] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2040] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #1936 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1948] + ldr r3, [sp, #1952] + ldr r12, [sp, #1956] + ldr lr, [sp, #1960] + adds r0, r0, r9 + ldr r9, [sp, #1976] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1980] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1964] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1972] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1968] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1936] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1944] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2000] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1996] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1992] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1988] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1984] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1940] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #840 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1928] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1892] + ldr r7, [sp, #1888] + ldr r5, [sp, #1884] + ldr r8, [sp, #1880] + ldr r9, [sp, #1864] + ldr r10, [sp, #1868] + ldr r11, [sp, #1872] + ldr r4, [sp, #1876] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1924] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1920] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1916] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1908] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1900] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1896] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #1792 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1804] + ldr r3, [sp, #1808] + ldr r12, [sp, #1812] + ldr lr, [sp, #1816] + adds r0, r0, r9 + ldr r9, [sp, #1832] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1836] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1820] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1828] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1824] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1792] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1800] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1856] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1852] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1848] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1844] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1840] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1796] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #696 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1784] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1748] + ldr r7, [sp, #1744] + ldr r5, [sp, #1740] + ldr r8, [sp, #1736] + ldr r9, [sp, #1720] + ldr r10, [sp, #1724] + ldr r11, [sp, #1728] + ldr r4, [sp, #1732] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1780] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1768] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1764] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1760] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1756] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1752] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #1648 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1660] + ldr r3, [sp, #1664] + ldr r12, [sp, #1668] + ldr lr, [sp, #1672] + adds r0, r0, r9 + ldr r9, [sp, #1688] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1692] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1676] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1684] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1680] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1648] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1656] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1712] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1704] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1700] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1696] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1652] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #552 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1640] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1604] + ldr r7, [sp, #1600] + ldr r5, [sp, #1596] + ldr r8, [sp, #1592] + ldr r9, [sp, #1576] + ldr r10, [sp, #1580] + ldr r11, [sp, #1584] + ldr r4, [sp, #1588] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1636] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1632] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1628] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1624] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1620] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1616] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1612] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1608] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #1504 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1516] + ldr r3, [sp, #1520] + ldr r12, [sp, #1524] + ldr lr, [sp, #1528] + adds r0, r0, r9 + ldr r9, [sp, #1544] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1548] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1532] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1540] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1536] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1504] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1512] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1568] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1560] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1556] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1552] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1508] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #408 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1496] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1460] + ldr r7, [sp, #1456] + ldr r5, [sp, #1452] + ldr r8, [sp, #1448] + ldr r9, [sp, #1432] + ldr r10, [sp, #1436] + ldr r11, [sp, #1440] + ldr r4, [sp, #1444] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1480] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1476] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1472] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1468] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1464] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #1360 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1372] + ldr r3, [sp, #1376] + ldr r12, [sp, #1380] + ldr lr, [sp, #1384] + adds r0, r0, r9 + ldr r9, [sp, #1400] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1404] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1388] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1396] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1392] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1360] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1368] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1424] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1420] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1416] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1412] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1408] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1364] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #264 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1352] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1316] + ldr r7, [sp, #1312] + ldr r5, [sp, #1308] + ldr r8, [sp, #1304] + ldr r9, [sp, #1288] + ldr r10, [sp, #1292] + ldr r11, [sp, #1296] + ldr r4, [sp, #1300] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1348] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1344] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #1216 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1228] + ldr r3, [sp, #1232] + ldr r12, [sp, #1236] + ldr lr, [sp, #1240] + adds r0, r0, r9 + ldr r9, [sp, #1256] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1260] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1244] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1252] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1248] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1216] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1224] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1280] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1220] + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, r4, #120 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1208] + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #1172] + ldr r7, [sp, #1168] + ldr r5, [sp, #1164] + ldr r8, [sp, #1160] + ldr r9, [sp, #1144] + ldr r10, [sp, #1148] + ldr r11, [sp, #1152] + ldr r4, [sp, #1156] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1180] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1176] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #1072 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + ldr r2, [sp, #1084] + ldr r3, [sp, #1088] + ldr r12, [sp, #1092] + ldr lr, [sp, #1096] + adds r0, r0, r9 + ldr r9, [sp, #1112] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1116] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + ldr r11, [sp, #116] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1100] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1108] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1104] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1072] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + adds r6, r11, r6 + ldr r1, [sp, #1080] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1136] + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1124] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1120] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1076] + adcs r0, r7, r0 + ldr r7, [sp, #120] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r6, r7 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #1000 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1064] + add r11, sp, #1000 + ldr r6, [sp, #1024] + ldr r5, [sp, #1020] + ldr r8, [sp, #1016] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1032] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1028] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r4, [sp, #1012] + ldr r2, [r0, #40] + add r0, sp, #928 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + add lr, sp, #944 + adds r0, r0, r9 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r2, r0, r10 + ldr r0, [sp, #112] @ 4-byte Reload + add r10, sp, #968 + adcs r0, r0, r11 + ldr r11, [sp, #932] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #940] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #928] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #936] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #52] @ 4-byte Spill + adds r0, r2, r5 + mul r1, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #992] + str r1, [sp, #44] @ 4-byte Spill + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #112] @ 4-byte Reload + adcs r7, r7, r11 + str r7, [sp, #112] @ 4-byte Spill + ldr r7, [sp, #108] @ 4-byte Reload + adcs r6, r7, r6 + str r6, [sp, #108] @ 4-byte Spill + ldr r6, [sp, #104] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #104] @ 4-byte Spill + ldr r6, [sp, #100] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #856 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #920] + add r11, sp, #856 + ldr r4, [sp, #884] + ldr r7, [sp, #880] + ldr r5, [sp, #876] + ldr r6, [sp, #872] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #912] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #908] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #888] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #784 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #800 + adds r0, r0, r8 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r2, r0, r9 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #824 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #788] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #792] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #796] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #784] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, r1 + adds r1, r2, r4 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #848] + str r2, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #844] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #840] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #112] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [sp, #108] @ 4-byte Reload + adcs r5, r6, r5 + str r5, [sp, #64] @ 4-byte Spill + ldr r5, [sp, #104] @ 4-byte Reload + adcs r5, r5, r7 + str r5, [sp, #60] @ 4-byte Spill + ldr r5, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #712 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #776] + ldr r11, [sp, #740] + ldr r8, [sp, #736] + ldr r9, [sp, #732] + ldr r10, [sp, #728] + ldr r6, [sp, #712] + ldr r7, [sp, #716] + ldr r5, [sp, #720] + ldr r4, [sp, #724] + add r0, sp, #640 + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #772] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #768] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #764] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #760] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #756] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #752] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #748] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #744] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #116] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #644 + adds r0, r0, r6 + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #668 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #640] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #116] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #568 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #632] + ldr r6, [sp, #596] + ldr r7, [sp, #592] + ldr r8, [sp, #588] + ldr r5, [sp, #584] + ldr r9, [sp, #568] + ldr r10, [sp, #572] + ldr r4, [sp, #576] + ldr r11, [sp, #580] + add r0, sp, #496 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #628] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #624] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #620] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #616] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #612] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #608] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #604] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #600] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #500 + adds r0, r0, r9 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #524 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #548] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #496] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #72] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #424 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #488] + ldr r6, [sp, #452] + ldr r7, [sp, #448] + ldr r8, [sp, #444] + ldr r5, [sp, #440] + ldr r9, [sp, #424] + ldr r10, [sp, #428] + ldr r4, [sp, #432] + ldr r11, [sp, #436] + add r0, sp, #352 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #484] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #480] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #476] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #472] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #468] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #464] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #460] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #456] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #356 + adds r0, r0, r9 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #380 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #400] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r6, [sp, #352] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #72] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + adds r6, r11, r6 + adcs r0, r7, r0 + str r6, [sp, #28] @ 4-byte Spill + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + mul r2, r6, r0 + add r0, sp, #280 + bl .LmulPv512x32(PLT) + ldr r1, [sp, #344] + add r11, sp, #284 + ldr r8, [sp, #308] + ldr r9, [sp, #304] + ldr r10, [sp, #300] + ldr r7, [sp, #280] + add r0, sp, #208 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #340] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #336] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #332] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #328] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #324] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #320] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #316] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #312] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r5, r6, r11} + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [r1, #60] + ldr r1, [sp, #124] @ 4-byte Reload + bl .LmulPv512x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #68] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #224 + adds r0, r0, r7 + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r4 + adcs r1, r1, r5 + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #248 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #52] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r8 + add r8, sp, #208 + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #96] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adc r1, r1, r2 + str r1, [sp, #76] @ 4-byte Spill + ldm r8, {r4, r5, r6, r8} + adds r9, r0, r4 + ldr r0, [sp, #120] @ 4-byte Reload + mul r1, r9, r0 + ldr r0, [sp, #272] + str r1, [sp, #60] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r7, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #128] @ 4-byte Reload + adcs r11, r11, r5 + ldr r5, [sp, #124] @ 4-byte Reload + adcs r6, r5, r6 + ldr r5, [sp, #72] @ 4-byte Reload + adcs r8, r5, r8 + ldr r5, [sp, #68] @ 4-byte Reload + adcs r5, r5, r0 + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #60] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #132] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r7 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r10, r0, #0 + add r0, sp, #136 + bl .LmulPv512x32(PLT) + add r3, sp, #136 + ldm r3, {r0, r1, r2, r3} + adds r0, r9, r0 + ldr r0, [sp, #152] + adcs r4, r11, r1 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r6, r6, r2 + str r4, [sp, #48] @ 4-byte Spill + adcs lr, r8, r3 + mov r3, r7 + str r6, [sp, #56] @ 4-byte Spill + str lr, [sp, #60] @ 4-byte Spill + adcs r5, r5, r0 + ldr r0, [sp, #156] + str r5, [sp, #68] @ 4-byte Spill + adcs r9, r1, r0 + ldr r0, [sp, #160] + ldr r1, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #164] + adcs r0, r1, r0 + ldr r1, [sp, #108] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #168] + adcs r0, r1, r0 + ldr r1, [sp, #112] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #172] + adcs r0, r1, r0 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #188] + adcs r0, r1, r0 + ldr r1, [sp, #100] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #192] + adcs r0, r1, r0 + ldr r1, [sp, #104] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #196] + adcs r0, r1, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #200] + adc r10, r10, r0 + ldm r3, {r0, r7} + ldr r1, [r3, #8] + ldr r2, [r3, #12] + subs r12, r4, r0 + ldr r0, [r3, #32] + sbcs r4, r6, r7 + ldr r7, [r3, #60] + sbcs r6, lr, r1 + add lr, r3, #16 + ldr r1, [r3, #28] + sbcs r8, r5, r2 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #52] @ 4-byte Spill + ldm lr, {r0, r5, lr} + ldr r11, [sp, #84] @ 4-byte Reload + sbcs r2, r9, r0 + ldr r0, [sp, #96] @ 4-byte Reload + sbcs r3, r0, r5 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs lr, r11, lr + sbcs r5, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + sbc r1, r10, r7 + ldr r7, [sp, #56] @ 4-byte Reload + cmp r1, #0 + movlt r12, r0 + ldr r0, [sp, #92] @ 4-byte Reload + movlt r4, r7 + ldr r7, [sp, #52] @ 4-byte Reload + str r12, [r0] + str r4, [r0, #4] + ldr r4, [sp, #60] @ 4-byte Reload + ldr r12, [sp, #64] @ 4-byte Reload + movlt r6, r4 + cmp r1, #0 + ldr r4, [sp, #88] @ 4-byte Reload + str r6, [r0, #8] + ldr r6, [sp, #68] @ 4-byte Reload + movlt r2, r9 + movlt r8, r6 + ldr r6, [sp, #76] @ 4-byte Reload + str r8, [r0, #12] + str r2, [r0, #16] + ldr r2, [sp, #96] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #108] @ 4-byte Reload + cmp r1, #0 + movlt lr, r11 + str r3, [r0, #20] + ldr r3, [sp, #132] @ 4-byte Reload + str lr, [r0, #24] + ldr lr, [sp, #72] @ 4-byte Reload + movlt r5, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r5, [r0, #28] + ldr r5, [sp, #80] @ 4-byte Reload + movlt r12, r2 + ldr r2, [sp, #124] @ 4-byte Reload + cmp r1, #0 + str r12, [r0, #32] + movlt lr, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str lr, [r0, #36] + movlt r6, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r6, [r0, #40] + movlt r5, r2 + ldr r2, [sp, #128] @ 4-byte Reload + cmp r1, #0 + str r5, [r0, #44] + movlt r4, r2 + ldr r2, [sp, #100] @ 4-byte Reload + str r4, [r0, #48] + movlt r3, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r3, [r0, #52] + movlt r7, r2 + cmp r1, #0 + movlt r1, r10 + str r7, [r0, #56] + str r1, [r0, #60] + add sp, sp, #396 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end243: + .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L + .cantunwind + .fnend + + .globl mcl_fp_montRed16L + .align 2 + .type mcl_fp_montRed16L,%function +mcl_fp_montRed16L: @ @mcl_fp_montRed16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #356 + sub sp, sp, #356 + .pad #1024 + sub sp, sp, #1024 + mov r3, r2 + str r0, [sp, #200] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r7, [r1] + add r10, sp, #1024 + ldr r0, [r3] + str r3, [sp, #216] @ 4-byte Spill + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #192] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #104] @ 4-byte Spill + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #168] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #180] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #220] @ 4-byte Spill + mul r2, r7, r0 + ldr r0, [r3, #60] + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #152] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #160] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #164] @ 4-byte Spill + ldr r0, [r3, #28] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r1, #96] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [r1, #100] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r1, #104] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r1, #108] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r1, #112] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r1, #116] + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [r1, #120] + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [r1, #124] + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #16] @ 4-byte Spill + add r0, r10, #280 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1368] + ldr r10, [sp, #1304] + ldr r1, [sp, #1312] + ldr r2, [sp, #1316] + ldr r3, [sp, #1320] + ldr r12, [sp, #1324] + ldr lr, [sp, #1328] + ldr r4, [sp, #1332] + ldr r5, [sp, #1336] + ldr r6, [sp, #1340] + ldr r8, [sp, #1344] + ldr r9, [sp, #1348] + ldr r11, [sp, #1352] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1364] + adds r7, r7, r10 + ldr r7, [sp, #112] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1360] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1356] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1308] + adcs r10, r7, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + mul r2, r10, r0 + add r0, sp, #1232 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1296] + ldr r4, [sp, #1232] + ldr r1, [sp, #1240] + ldr r2, [sp, #1244] + ldr r3, [sp, #1248] + ldr r9, [sp, #1252] + ldr r12, [sp, #1256] + ldr r11, [sp, #1260] + ldr lr, [sp, #1264] + ldr r6, [sp, #1268] + ldr r7, [sp, #1272] + ldr r8, [sp, #1276] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1292] + adds r4, r10, r4 + ldr r4, [sp, #112] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1288] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1284] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #1236] + adcs r10, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + add r0, r8, #136 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1224] + add r12, sp, #1168 + ldr r9, [sp, #1204] + ldr r7, [sp, #1200] + ldr r6, [sp, #1196] + ldr r5, [sp, #1192] + ldr lr, [sp, #1188] + ldr r10, [sp, #1184] + ldr r8, [sp, #1164] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1220] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1216] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1212] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1160] + ldm r12, {r1, r2, r3, r12} + adds r0, r11, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r8, r0, r8 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r8, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + mov r10, r8 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #1088 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1152] + add r9, sp, #1120 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r4, [sp, #1088] + ldr r0, [sp, #1092] + ldr r1, [sp, #1096] + ldr r2, [sp, #1100] + ldr r3, [sp, #1104] + ldr r12, [sp, #1108] + ldr lr, [sp, #1112] + ldr r11, [sp, #1116] + adds r4, r10, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r10, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + mov r8, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, sp, #1016 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1080] + add r11, sp, #1016 + ldr r6, [sp, #1060] + ldr r7, [sp, #1056] + ldr r5, [sp, #1052] + ldr lr, [sp, #1048] + ldr r12, [sp, #1044] + ldr r10, [sp, #1040] + ldr r9, [sp, #1036] + ldr r3, [sp, #1032] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1072] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #16] @ 4-byte Spill + ldm r11, {r0, r1, r2, r11} + adds r0, r8, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r1, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + mov r10, r1 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r1, r4 + mov r1, r5 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #944 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #1008] + add r9, sp, #976 + add lr, sp, #948 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #996] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #20] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r4, [sp, #944] + ldr r11, [sp, #972] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r10, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r10, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r10, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r10 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, sp, #872 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #936] + add lr, sp, #888 + add r8, sp, #872 + ldr r6, [sp, #916] + ldr r7, [sp, #912] + ldr r5, [sp, #908] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #932] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #928] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #924] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r3, r9, r10, r12, lr} + ldm r8, {r0, r1, r2, r8} + adds r0, r11, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #800 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #864] + add r10, sp, #828 + add lr, sp, #804 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #856] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #848] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldr r4, [sp, #800] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #728 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #792] + add r8, sp, #760 + add lr, sp, #748 + add r12, sp, #728 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #788] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #784] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #780] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #32] @ 4-byte Spill + ldm r8, {r5, r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + adds r0, r11, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #32] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #656 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #720] + add r10, sp, #684 + add lr, sp, #660 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #716] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldr r4, [sp, #656] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #584 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #648] + add r8, sp, #616 + add lr, sp, #604 + add r12, sp, #584 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #644] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #640] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #636] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #632] + str r0, [sp, #40] @ 4-byte Spill + ldm r8, {r5, r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + adds r0, r11, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #512 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #576] + add r10, sp, #540 + add lr, sp, #516 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #44] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldr r4, [sp, #512] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r4, [sp, #220] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #440 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #504] + add r8, sp, #472 + add lr, sp, #460 + add r12, sp, #440 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #500] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #496] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #492] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #488] + str r0, [sp, #48] @ 4-byte Spill + ldm r8, {r5, r6, r7, r8} + ldm lr, {r9, r10, lr} + ldm r12, {r0, r1, r2, r3, r12} + adds r0, r11, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #48] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #216] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #212] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #368 + bl .LmulPv512x32(PLT) + ldr r0, [sp, #432] + add r10, sp, #396 + add lr, sp, #372 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #428] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldr r4, [sp, #368] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #112] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #220] @ 4-byte Reload + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + mul r2, r4, r6 + adcs r0, r0, r7 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + add r0, sp, #296 + bl .LmulPv512x32(PLT) + add r5, sp, #296 + add r7, sp, #336 + add lr, sp, #312 + ldm r5, {r0, r1, r3, r5} + ldr r9, [sp, #356] + adds r0, r4, r0 + adcs r8, r11, r1 + ldr r11, [sp, #352] + mul r0, r8, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #360] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #348] + str r0, [sp, #84] @ 4-byte Spill + ldm r7, {r4, r6, r7} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #212] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #208] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #204] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #80] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r10, r0, r11 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + add r0, sp, #224 + bl .LmulPv512x32(PLT) + add r3, sp, #224 + ldm r3, {r0, r1, r2, r3} + adds r0, r8, r0 + ldr r0, [sp, #220] @ 4-byte Reload + adcs r12, r0, r1 + ldr r0, [sp, #84] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + str r12, [sp, #92] @ 4-byte Spill + adcs r2, r0, r2 + ldr r0, [sp, #52] @ 4-byte Reload + str r2, [sp, #96] @ 4-byte Spill + adcs lr, r0, r3 + ldr r0, [sp, #240] + str lr, [sp, #100] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #244] + ldr r1, [sp, #64] @ 4-byte Reload + str r4, [sp, #104] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #248] + ldr r1, [sp, #68] @ 4-byte Reload + str r5, [sp, #108] @ 4-byte Spill + adcs r7, r1, r0 + ldr r0, [sp, #252] + ldr r1, [sp, #208] @ 4-byte Reload + str r7, [sp, #112] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #204] @ 4-byte Reload + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [sp, #256] + adcs r0, r1, r0 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [sp, #260] + adcs r11, r1, r0 + ldr r0, [sp, #264] + ldr r1, [sp, #76] @ 4-byte Reload + str r11, [sp, #116] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #212] @ 4-byte Reload + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #268] + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #212] @ 4-byte Spill + ldr r0, [sp, #272] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #276] + adcs r10, r10, r0 + ldr r0, [sp, #280] + str r10, [sp, #128] @ 4-byte Spill + adcs r8, r1, r0 + ldr r0, [sp, #284] + ldr r1, [sp, #124] @ 4-byte Reload + str r8, [sp, #132] @ 4-byte Spill + adcs r6, r6, r0 + ldr r0, [sp, #288] + adcs r3, r1, r0 + ldr r0, [sp, #120] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #192] @ 4-byte Reload + subs r1, r12, r0 + ldr r0, [sp, #188] @ 4-byte Reload + sbcs r2, r2, r0 + ldr r0, [sp, #184] @ 4-byte Reload + sbcs r12, lr, r0 + ldr r0, [sp, #168] @ 4-byte Reload + sbcs lr, r4, r0 + ldr r0, [sp, #172] @ 4-byte Reload + sbcs r4, r5, r0 + ldr r0, [sp, #176] @ 4-byte Reload + sbcs r5, r7, r0 + ldr r0, [sp, #180] @ 4-byte Reload + ldr r7, [sp, #208] @ 4-byte Reload + sbcs r9, r7, r0 + ldr r0, [sp, #136] @ 4-byte Reload + ldr r7, [sp, #204] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #212] @ 4-byte Reload + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + sbcs r0, r11, r0 + ldr r11, [sp, #216] @ 4-byte Reload + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + sbcs r0, r11, r0 + ldr r11, [sp, #220] @ 4-byte Reload + str r0, [sp, #180] @ 4-byte Spill + ldr r0, [sp, #148] @ 4-byte Reload + sbcs r0, r7, r0 + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [sp, #152] @ 4-byte Reload + sbcs r0, r11, r0 + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [sp, #156] @ 4-byte Reload + sbcs r0, r10, r0 + mov r10, r6 + str r0, [sp, #192] @ 4-byte Spill + ldr r0, [sp, #160] @ 4-byte Reload + sbcs r7, r8, r0 + ldr r0, [sp, #164] @ 4-byte Reload + mov r8, r3 + sbcs r11, r6, r0 + ldr r0, [sp, #196] @ 4-byte Reload + sbcs r6, r3, r0 + ldr r0, [sp, #124] @ 4-byte Reload + sbc r3, r0, #0 + ldr r0, [sp, #92] @ 4-byte Reload + ands r3, r3, #1 + movne r1, r0 + ldr r0, [sp, #200] @ 4-byte Reload + str r1, [r0] + ldr r1, [sp, #96] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #100] @ 4-byte Reload + str r2, [r0, #4] + ldr r2, [sp, #172] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #104] @ 4-byte Reload + cmp r3, #0 + str r12, [r0, #8] + movne lr, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str lr, [r0, #12] + movne r4, r1 + ldr r1, [sp, #112] @ 4-byte Reload + str r4, [r0, #16] + movne r5, r1 + ldr r1, [sp, #208] @ 4-byte Reload + cmp r3, #0 + str r5, [r0, #20] + movne r9, r1 + ldr r1, [sp, #204] @ 4-byte Reload + str r9, [r0, #24] + movne r2, r1 + ldr r1, [sp, #176] @ 4-byte Reload + str r2, [r0, #28] + ldr r2, [sp, #116] @ 4-byte Reload + movne r1, r2 + cmp r3, #0 + ldr r2, [sp, #180] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #216] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #212] @ 4-byte Reload + str r2, [r0, #36] + ldr r2, [sp, #184] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #220] @ 4-byte Reload + str r2, [r0, #40] + ldr r2, [sp, #188] @ 4-byte Reload + movne r2, r1 + cmp r3, #0 + ldr r1, [sp, #192] @ 4-byte Reload + str r2, [r0, #44] + ldr r2, [sp, #128] @ 4-byte Reload + movne r11, r10 + movne r1, r2 + str r1, [r0, #48] + ldr r1, [sp, #132] @ 4-byte Reload + movne r7, r1 + cmp r3, #0 + movne r6, r8 + str r7, [r0, #52] + str r11, [r0, #56] + str r6, [r0, #60] + add sp, sp, #356 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end244: + .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L + .cantunwind + .fnend + + .globl mcl_fp_addPre16L + .align 2 + .type mcl_fp_addPre16L,%function +mcl_fp_addPre16L: @ @mcl_fp_addPre16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldm r1, {r3, r8} + ldr r5, [r1, #8] + ldr r6, [r1, #12] + ldm r2, {r7, r12, lr} + ldr r4, [r2, #12] + ldr r9, [r1, #32] + ldr r11, [r1, #52] + adds r3, r7, r3 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #32] + adcs r7, r12, r8 + ldr r8, [r2, #24] + add r12, r1, #16 + adcs r5, lr, r5 + ldr lr, [r2, #16] + adcs r6, r4, r6 + ldr r4, [r2, #20] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r2, #52] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r2, #56] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r2, #60] + str r3, [sp, #60] @ 4-byte Spill + ldr r3, [r2, #28] + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r3, [sp, #24] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #20] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + ldr r10, [sp, #28] @ 4-byte Reload + adcs r1, lr, r1 + str r10, [r0] + str r7, [r0, #4] + str r5, [r0, #8] + str r6, [r0, #12] + adcs r2, r4, r2 + str r1, [r0, #16] + str r2, [r0, #20] + adcs r1, r8, r3 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #32] @ 4-byte Reload + adcs r2, r2, r12 + adcs r12, r1, r9 + str r2, [r0, #28] + ldr r1, [sp, #36] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + str r12, [r0, #32] + adcs lr, r1, r2 + ldr r1, [sp, #40] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + str lr, [r0, #36] + adcs r3, r1, r2 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r3, [r0, #40] + adcs r7, r1, r2 + ldr r1, [sp, #48] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str r7, [r0, #44] + adcs r6, r1, r2 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r6, [r0, #48] + adcs r5, r1, r11 + ldr r1, [sp, #56] @ 4-byte Reload + str r5, [r0, #52] + adcs r4, r1, r2 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + str r4, [r0, #56] + adcs r1, r1, r2 + str r1, [r0, #60] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end245: + .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L + .cantunwind + .fnend + + .globl mcl_fp_subPre16L + .align 2 + .type mcl_fp_subPre16L,%function +mcl_fp_subPre16L: @ @mcl_fp_subPre16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldm r2, {r3, r8} + ldr r5, [r2, #8] + ldr r6, [r2, #12] + ldm r1, {r7, r12, lr} + ldr r4, [r1, #12] + ldr r9, [r1, #32] + ldr r11, [r1, #52] + subs r3, r7, r3 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #32] + sbcs r7, r12, r8 + ldr r8, [r2, #24] + add r12, r1, #16 + sbcs r5, lr, r5 + ldr lr, [r2, #16] + sbcs r6, r4, r6 + ldr r4, [r2, #20] + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r2, #52] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r2, #56] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r2, #60] + str r3, [sp, #60] @ 4-byte Spill + ldr r3, [r2, #28] + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r3, [sp, #24] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #20] @ 4-byte Spill + ldm r12, {r1, r2, r3, r12} + ldr r10, [sp, #28] @ 4-byte Reload + sbcs r1, r1, lr + str r10, [r0] + str r7, [r0, #4] + str r5, [r0, #8] + str r6, [r0, #12] + sbcs r2, r2, r4 + str r1, [r0, #16] + str r2, [r0, #20] + sbcs r1, r3, r8 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #32] @ 4-byte Reload + sbcs r2, r12, r2 + sbcs r12, r9, r1 + str r2, [r0, #28] + ldr r1, [sp, #36] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + str r12, [r0, #32] + sbcs lr, r2, r1 + ldr r1, [sp, #40] @ 4-byte Reload + ldr r2, [sp, #4] @ 4-byte Reload + str lr, [r0, #36] + sbcs r3, r2, r1 + ldr r1, [sp, #44] @ 4-byte Reload + ldr r2, [sp, #8] @ 4-byte Reload + str r3, [r0, #40] + sbcs r7, r2, r1 + ldr r1, [sp, #48] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + str r7, [r0, #44] + sbcs r6, r2, r1 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + str r6, [r0, #48] + sbcs r5, r11, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r5, [r0, #52] + sbcs r4, r2, r1 + ldr r1, [sp, #60] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + str r4, [r0, #56] + sbcs r1, r2, r1 + str r1, [r0, #60] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end246: + .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L + .cantunwind + .fnend + + .globl mcl_fp_shr1_16L + .align 2 + .type mcl_fp_shr1_16L,%function +mcl_fp_shr1_16L: @ @mcl_fp_shr1_16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #24 + sub sp, sp, #24 + ldr r3, [r1, #16] + ldr r2, [r1, #20] + ldr r12, [r1, #24] + ldr r11, [r1, #28] + ldm r1, {r4, r5, r6, r7} + ldr r8, [r1, #56] + ldr lr, [r1, #32] + ldr r9, [r1, #36] + ldr r10, [r1, #40] + str r4, [sp, #4] @ 4-byte Spill + lsr r4, r5, #1 + str r8, [sp, #16] @ 4-byte Spill + orr r4, r4, r6, lsl #31 + str r4, [sp] @ 4-byte Spill + ldr r4, [r1, #44] + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [r1, #48] + str r4, [sp, #12] @ 4-byte Spill + ldr r4, [r1, #52] + ldr r1, [r1, #60] + str r1, [sp, #20] @ 4-byte Spill + lsr r1, r7, #1 + lsrs r7, r7, #1 + rrx r6, r6 + lsrs r5, r5, #1 + orr r1, r1, r3, lsl #31 + ldr r5, [sp, #4] @ 4-byte Reload + rrx r5, r5 + str r5, [r0] + ldr r5, [sp] @ 4-byte Reload + stmib r0, {r5, r6} + str r1, [r0, #12] + lsrs r1, r2, #1 + rrx r1, r3 + str r1, [r0, #16] + lsr r1, r2, #1 + lsr r2, r9, #1 + orr r1, r1, r12, lsl #31 + str r1, [r0, #20] + lsrs r1, r11, #1 + rrx r1, r12 + str r1, [r0, #24] + lsr r1, r11, #1 + orr r1, r1, lr, lsl #31 + str r1, [r0, #28] + lsrs r1, r9, #1 + ldr r1, [sp, #8] @ 4-byte Reload + rrx r12, lr + orr lr, r2, r10, lsl #31 + mov r2, r4 + lsr r5, r2, #1 + str r12, [r0, #32] + str lr, [r0, #36] + lsrs r3, r1, #1 + lsr r7, r1, #1 + ldr r1, [sp, #12] @ 4-byte Reload + rrx r3, r10 + lsrs r6, r2, #1 + ldr r2, [sp, #16] @ 4-byte Reload + str r3, [r0, #40] + orr r7, r7, r1, lsl #31 + rrx r6, r1 + ldr r1, [sp, #20] @ 4-byte Reload + orr r5, r5, r2, lsl #31 + str r7, [r0, #44] + str r6, [r0, #48] + str r5, [r0, #52] + lsrs r4, r1, #1 + lsr r1, r1, #1 + rrx r4, r2 + str r4, [r0, #56] + str r1, [r0, #60] + add sp, sp, #24 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end247: + .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L + .cantunwind + .fnend + + .globl mcl_fp_add16L + .align 2 + .type mcl_fp_add16L,%function +mcl_fp_add16L: @ @mcl_fp_add16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r9, r4, r9 + ldr r4, [r1, #24] + adcs r5, r5, r8 + mov r8, r9 + adcs r6, r6, lr + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [r1, #20] + str r8, [r0] + adcs r10, r7, r12 + str r6, [sp, #32] @ 4-byte Spill + ldr r6, [r1, #16] + ldr r7, [r2, #16] + ldr lr, [sp, #36] @ 4-byte Reload + str r10, [sp] @ 4-byte Spill + adcs r7, r7, r6 + ldr r6, [r1, #28] + str lr, [r0, #4] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r9, [sp, #28] @ 4-byte Reload + adcs r7, r7, r5 + ldr r5, [r2, #28] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r7, r7, r4 + ldr r4, [r2, #32] + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r5, r6 + ldr r5, [r1, #32] + str r7, [sp, #40] @ 4-byte Spill + adcs r7, r4, r5 + ldr r5, [r1, #36] + ldr r4, [r2, #36] + str r7, [sp, #48] @ 4-byte Spill + adcs r7, r4, r5 + ldr r5, [r1, #40] + ldr r4, [r2, #40] + str r7, [sp, #56] @ 4-byte Spill + adcs r7, r4, r5 + ldr r5, [r1, #44] + ldr r4, [r2, #44] + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #48] + adcs r11, r4, r5 + ldr r4, [r1, #48] + ldr r5, [r2, #52] + str r11, [sp, #20] @ 4-byte Spill + adcs r12, r7, r4 + ldr r7, [r1, #52] + ldr r4, [sp, #32] @ 4-byte Reload + str r12, [sp, #16] @ 4-byte Spill + adcs r6, r5, r7 + ldr r7, [r1, #56] + ldr r5, [r2, #56] + ldr r1, [r1, #60] + ldr r2, [r2, #60] + str r4, [r0, #8] + str r10, [r0, #12] + ldr r10, [sp, #24] @ 4-byte Reload + str r9, [r0, #16] + str r6, [sp, #4] @ 4-byte Spill + adcs r5, r5, r7 + str r10, [r0, #20] + add r7, r0, #40 + adcs r2, r2, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r2, [sp, #8] @ 4-byte Spill + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r0, #28] + ldr r1, [sp, #48] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #56] @ 4-byte Reload + str r1, [r0, #36] + ldr r1, [sp, #44] @ 4-byte Reload + stm r7, {r1, r11, r12} + str r6, [r0, #52] + str r5, [r0, #56] + str r2, [r0, #60] + mov r2, #0 + mov r12, r5 + add r11, r3, #32 + adc r1, r2, #0 + str r1, [sp, #12] @ 4-byte Spill + ldm r3, {r5, r7} + ldr r1, [r3, #8] + ldr r2, [r3, #12] + subs r8, r8, r5 + sbcs lr, lr, r7 + sbcs r1, r4, r1 + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r1, r9, r1 + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #20] + sbcs r1, r10, r1 + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r3, #24] + sbcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [r3, #28] + sbcs r1, r2, r1 + str r1, [sp, #40] @ 4-byte Spill + ldm r11, {r1, r2, r5, r7, r9, r10, r11} + ldr r6, [sp, #48] @ 4-byte Reload + ldr r3, [r3, #60] + sbcs r1, r6, r1 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r1, r1, r2 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r2, r1, r5 + ldr r1, [sp, #20] @ 4-byte Reload + sbcs r5, r1, r7 + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r7, r1, r9 + ldr r1, [sp, #4] @ 4-byte Reload + sbcs r4, r1, r10 + ldr r1, [sp, #8] @ 4-byte Reload + sbcs r6, r12, r11 + sbcs r1, r1, r3 + ldr r3, [sp, #12] @ 4-byte Reload + sbc r3, r3, #0 + tst r3, #1 + bne .LBB248_2 +@ BB#1: @ %nocarry + stm r0, {r8, lr} + ldr r3, [sp, #36] @ 4-byte Reload + str r3, [r0, #8] + ldr r3, [sp, #32] @ 4-byte Reload + str r3, [r0, #12] + ldr r3, [sp, #28] @ 4-byte Reload + str r3, [r0, #16] + ldr r3, [sp, #24] @ 4-byte Reload + str r3, [r0, #20] + ldr r3, [sp, #52] @ 4-byte Reload + str r3, [r0, #24] + ldr r3, [sp, #40] @ 4-byte Reload + str r3, [r0, #28] + ldr r3, [sp, #48] @ 4-byte Reload + str r3, [r0, #32] + ldr r3, [sp, #56] @ 4-byte Reload + str r3, [r0, #36] + add r3, r0, #40 + stm r3, {r2, r5, r7} + str r4, [r0, #52] + str r6, [r0, #56] + str r1, [r0, #60] +.LBB248_2: @ %carry + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end248: + .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L + .cantunwind + .fnend + + .globl mcl_fp_addNF16L + .align 2 + .type mcl_fp_addNF16L,%function +mcl_fp_addNF16L: @ @mcl_fp_addNF16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #88 + sub sp, sp, #88 + mov r12, r0 + ldm r1, {r0, r9} + ldr r8, [r1, #8] + ldr lr, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r10, r4, r0 + ldr r4, [r1, #20] + ldr r0, [r1, #24] + adcs r9, r5, r9 + ldr r5, [r1, #16] + adcs r8, r6, r8 + str r9, [sp, #4] @ 4-byte Spill + adcs r6, r7, lr + ldr r7, [r2, #16] + str r8, [sp, #8] @ 4-byte Spill + str r6, [sp, #16] @ 4-byte Spill + adcs r7, r7, r5 + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r2, #20] + adcs r7, r7, r4 + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r2, #24] + adcs r0, r7, r0 + ldr r7, [r2, #28] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #28] + adcs r0, r7, r0 + ldr r7, [r2, #32] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #32] + adcs r0, r7, r0 + ldr r7, [r2, #36] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #36] + adcs r0, r7, r0 + ldr r7, [r2, #40] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #40] + adcs r0, r7, r0 + ldr r7, [r2, #44] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #44] + adcs r0, r7, r0 + ldr r7, [r2, #48] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #48] + adcs r0, r7, r0 + ldr r7, [r2, #52] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #52] + adcs r0, r7, r0 + ldr r7, [r2, #56] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #56] + adcs r0, r7, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #60] + ldr r1, [r2, #60] + adc r11, r1, r0 + ldm r3, {r0, r7} + ldr r1, [r3, #8] + ldr r4, [r3, #12] + subs lr, r10, r0 + ldr r0, [r3, #32] + sbcs r5, r9, r7 + ldr r9, [sp, #44] @ 4-byte Reload + sbcs r7, r8, r1 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r8, r6, r4 + ldr r4, [r3, #24] + ldr r6, [r3, #20] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #60] + str r0, [sp] @ 4-byte Spill + ldr r0, [r3, #28] + ldr r3, [r3, #16] + sbcs r1, r1, r3 + ldr r3, [sp, #48] @ 4-byte Reload + sbcs r2, r9, r6 + ldr r6, [sp, #12] @ 4-byte Reload + sbcs r3, r3, r4 + ldr r4, [sp, #84] @ 4-byte Reload + sbcs r4, r4, r0 + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #20] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #24] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp] @ 4-byte Reload + sbc r0, r11, r0 + cmp r0, #0 + movlt lr, r10 + movlt r5, r6 + ldr r6, [sp, #28] @ 4-byte Reload + str lr, [r12] + str r5, [r12, #4] + ldr r5, [sp, #8] @ 4-byte Reload + ldr lr, [sp, #12] @ 4-byte Reload + movlt r7, r5 + cmp r0, #0 + ldr r5, [sp, #32] @ 4-byte Reload + str r7, [r12, #8] + ldr r7, [sp, #16] @ 4-byte Reload + movlt r2, r9 + movlt r8, r7 + ldr r7, [sp, #52] @ 4-byte Reload + str r8, [r12, #12] + movlt r1, r7 + cmp r0, #0 + ldr r7, [sp, #24] @ 4-byte Reload + str r1, [r12, #16] + ldr r1, [sp, #48] @ 4-byte Reload + str r2, [r12, #20] + ldr r2, [sp, #40] @ 4-byte Reload + movlt r3, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r3, [r12, #24] + ldr r3, [sp, #20] @ 4-byte Reload + movlt r4, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r4, [r12, #28] + ldr r4, [sp, #36] @ 4-byte Reload + movlt lr, r1 + ldr r1, [sp, #76] @ 4-byte Reload + cmp r0, #0 + str lr, [r12, #32] + movlt r3, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r3, [r12, #36] + movlt r7, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r7, [r12, #40] + movlt r6, r1 + ldr r1, [sp, #64] @ 4-byte Reload + cmp r0, #0 + str r6, [r12, #44] + movlt r5, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r5, [r12, #48] + movlt r4, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r4, [r12, #52] + movlt r2, r1 + cmp r0, #0 + movlt r0, r11 + str r2, [r12, #56] + str r0, [r12, #60] + add sp, sp, #88 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end249: + .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L + .cantunwind + .fnend + + .globl mcl_fp_sub16L + .align 2 + .type mcl_fp_sub16L,%function +mcl_fp_sub16L: @ @mcl_fp_sub16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #84 + sub sp, sp, #84 + ldr r9, [r2] + ldmib r2, {r8, lr} + ldr r5, [r1] + ldr r12, [r2, #12] + ldmib r1, {r4, r6, r7} + subs r5, r5, r9 + sbcs r4, r4, r8 + str r5, [sp, #60] @ 4-byte Spill + ldr r5, [r2, #24] + sbcs r6, r6, lr + str r4, [sp, #64] @ 4-byte Spill + ldr r4, [r2, #20] + sbcs r7, r7, r12 + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r1, #16] + sbcs r7, r7, r6 + ldr r6, [r1, #28] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r1, #20] + sbcs r7, r7, r4 + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r1, #24] + sbcs r7, r7, r5 + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #28] + sbcs r11, r6, r7 + ldr r7, [r2, #32] + ldr r6, [r1, #32] + sbcs r10, r6, r7 + ldr r7, [r2, #36] + ldr r6, [r1, #36] + sbcs r8, r6, r7 + ldr r7, [r2, #40] + ldr r6, [r1, #40] + str r8, [sp, #52] @ 4-byte Spill + sbcs r5, r6, r7 + ldr r7, [r2, #44] + ldr r6, [r1, #44] + str r5, [sp, #48] @ 4-byte Spill + sbcs r4, r6, r7 + ldr r6, [r2, #48] + ldr r7, [r1, #48] + str r4, [sp, #44] @ 4-byte Spill + sbcs lr, r7, r6 + ldr r6, [r2, #52] + ldr r7, [r1, #52] + str lr, [sp, #40] @ 4-byte Spill + sbcs r9, r7, r6 + ldr r6, [r2, #56] + ldr r7, [r1, #56] + ldr r2, [r2, #60] + ldr r1, [r1, #60] + sbcs r6, r7, r6 + sbcs r12, r1, r2 + ldr r1, [sp, #60] @ 4-byte Reload + mov r2, #0 + str r6, [sp, #36] @ 4-byte Spill + sbc r2, r2, #0 + str r12, [sp, #32] @ 4-byte Spill + tst r2, #1 + str r1, [r0] + ldr r1, [sp, #64] @ 4-byte Reload + str r1, [r0, #4] + ldr r1, [sp, #68] @ 4-byte Reload + str r1, [r0, #8] + ldr r1, [sp, #76] @ 4-byte Reload + str r1, [r0, #12] + ldr r1, [sp, #72] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #80] @ 4-byte Reload + str r1, [r0, #20] + ldr r1, [sp, #56] @ 4-byte Reload + str r1, [r0, #24] + str r11, [r0, #28] + str r10, [r0, #32] + str r8, [r0, #36] + str r5, [r0, #40] + str r4, [r0, #44] + str lr, [r0, #48] + str r9, [r0, #52] + str r6, [r0, #56] + str r12, [r0, #60] + beq .LBB250_2 +@ BB#1: @ %carry + ldr r2, [r3, #32] + ldr r8, [r3, #60] + str r11, [sp] @ 4-byte Spill + ldr r5, [r3] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #36] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #40] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3, #44] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r3, #48] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #52] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r3, #56] + str r2, [sp, #28] @ 4-byte Spill + ldmib r3, {r4, r11, r12} + ldr r1, [sp, #60] @ 4-byte Reload + ldr r7, [sp, #76] @ 4-byte Reload + ldr lr, [r3, #20] + ldr r2, [sp, #80] @ 4-byte Reload + adds r5, r5, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r5, [r0] + adcs r4, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r4, [r0, #4] + adcs r1, r11, r1 + ldr r11, [r3, #24] + adcs r6, r12, r7 + str r1, [r0, #8] + ldr r12, [r3, #28] + ldr r3, [r3, #16] + ldr r1, [sp, #72] @ 4-byte Reload + ldr r7, [sp, #44] @ 4-byte Reload + str r6, [r0, #12] + adcs r1, r3, r1 + str r1, [r0, #16] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r3, lr, r2 + ldr r2, [sp] @ 4-byte Reload + str r3, [r0, #20] + adcs r1, r11, r1 + str r1, [r0, #24] + ldr r1, [sp, #4] @ 4-byte Reload + adcs r3, r12, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r3, [r0, #28] + ldr r3, [sp, #48] @ 4-byte Reload + adcs lr, r1, r10 + ldr r1, [sp, #8] @ 4-byte Reload + str lr, [r0, #32] + adcs r2, r1, r2 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r3, r1, r3 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r6, r1, r7 + ldr r7, [sp, #40] @ 4-byte Reload + ldr r1, [sp, #20] @ 4-byte Reload + adcs r5, r1, r7 + ldr r1, [sp, #24] @ 4-byte Reload + ldr r7, [sp, #36] @ 4-byte Reload + adcs r4, r1, r9 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r7, r1, r7 + ldr r1, [sp, #32] @ 4-byte Reload + adc r12, r8, r1 + add r1, r0, #36 + stm r1, {r2, r3, r6} + str r5, [r0, #48] + add r0, r0, #52 + stm r0, {r4, r7, r12} +.LBB250_2: @ %nocarry + add sp, sp, #84 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end250: + .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L + .cantunwind + .fnend + + .globl mcl_fp_subNF16L + .align 2 + .type mcl_fp_subNF16L,%function +mcl_fp_subNF16L: @ @mcl_fp_subNF16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #92 + sub sp, sp, #92 + ldr r7, [r2, #32] + add r9, r2, #8 + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [r1, #60] + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [r1, #56] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #52] + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r1, #48] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #44] + str r7, [sp, #12] @ 4-byte Spill + ldm r2, {r10, r11} + ldm r9, {r5, r6, r7, r9} + ldr r4, [r2, #24] + ldr r2, [r2, #28] + str r4, [sp, #60] @ 4-byte Spill + str r2, [sp, #64] @ 4-byte Spill + ldm r1, {r2, r12, lr} + ldr r4, [r1, #12] + ldr r8, [r1, #40] + subs r2, r2, r10 + str r2, [sp, #40] @ 4-byte Spill + sbcs r2, r12, r11 + ldr r12, [r1, #36] + sbcs lr, lr, r5 + str r2, [sp, #20] @ 4-byte Spill + ldr r5, [r1, #32] + ldr r2, [sp, #60] @ 4-byte Reload + sbcs r4, r4, r6 + ldr r6, [r1, #16] + str lr, [sp] @ 4-byte Spill + str r4, [sp, #44] @ 4-byte Spill + sbcs r4, r6, r7 + ldr r7, [r1, #20] + str r4, [sp, #52] @ 4-byte Spill + sbcs r4, r7, r9 + ldr r7, [r1, #28] + ldr r1, [r1, #24] + str r4, [sp, #48] @ 4-byte Spill + sbcs r1, r1, r2 + ldr r2, [sp, #12] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r1, r7, r1 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r1, r5, r1 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r1, r12, r1 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r1, r8, r1 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #36] @ 4-byte Reload + sbc r2, r2, r1 + ldr r1, [r3, #32] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #8] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #36] @ 4-byte Spill + ldm r3, {r1, r4, r5, r6, r7, r8, r9, r10} + ldr r3, [sp, #40] @ 4-byte Reload + ldr r11, [sp, #20] @ 4-byte Reload + adds r1, r3, r1 + adcs r3, r11, r4 + ldr r4, [sp, #52] @ 4-byte Reload + adcs r12, lr, r5 + ldr r5, [sp, #44] @ 4-byte Reload + adcs lr, r5, r6 + ldr r5, [sp, #48] @ 4-byte Reload + ldr r6, [sp, #60] @ 4-byte Reload + adcs r4, r4, r7 + ldr r7, [sp, #64] @ 4-byte Reload + adcs r5, r5, r8 + ldr r8, [sp, #88] @ 4-byte Reload + adcs r9, r6, r9 + ldr r6, [sp, #12] @ 4-byte Reload + adcs r10, r7, r10 + ldr r7, [sp, #68] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #16] @ 4-byte Reload + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #4] @ 4-byte Reload + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #76] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #80] @ 4-byte Reload + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [sp, #24] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #28] @ 4-byte Reload + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [sp, #84] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #40] @ 4-byte Reload + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [sp, #8] @ 4-byte Reload + adcs r7, r8, r7 + ldr r8, [sp, #32] @ 4-byte Reload + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [sp, #56] @ 4-byte Reload + adcs r7, r7, r8 + str r7, [sp, #32] @ 4-byte Spill + ldr r7, [sp, #36] @ 4-byte Reload + adc r7, r2, r7 + cmp r2, #0 + movge r1, r6 + movge r3, r11 + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #4] @ 4-byte Reload + ldr r6, [sp, #24] @ 4-byte Reload + str r1, [r0] + ldr r1, [sp] @ 4-byte Reload + str r3, [r0, #4] + ldr r3, [sp, #8] @ 4-byte Reload + movge r12, r1 + ldr r1, [sp, #44] @ 4-byte Reload + cmp r2, #0 + str r12, [r0, #8] + ldr r12, [sp, #12] @ 4-byte Reload + movge lr, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str lr, [r0, #12] + ldr lr, [sp, #16] @ 4-byte Reload + movge r4, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r4, [r0, #16] + ldr r4, [sp, #32] @ 4-byte Reload + movge r5, r1 + ldr r1, [sp, #60] @ 4-byte Reload + cmp r2, #0 + str r5, [r0, #20] + ldr r5, [sp, #28] @ 4-byte Reload + movge r9, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r9, [r0, #24] + movge r10, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r10, [r0, #28] + movge r12, r1 + ldr r1, [sp, #72] @ 4-byte Reload + cmp r2, #0 + str r12, [r0, #32] + movge lr, r1 + ldr r1, [sp, #76] @ 4-byte Reload + str lr, [r0, #36] + movge r7, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r7, [r0, #40] + movge r6, r1 + ldr r1, [sp, #84] @ 4-byte Reload + cmp r2, #0 + str r6, [r0, #44] + movge r5, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r5, [r0, #48] + movge r3, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r3, [r0, #52] + movge r4, r1 + ldr r1, [sp, #36] @ 4-byte Reload + cmp r2, #0 + movge r1, r2 + str r4, [r0, #56] + str r1, [r0, #60] + add sp, sp, #92 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end251: + .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L + .cantunwind + .fnend + + .globl mcl_fpDbl_add16L + .align 2 + .type mcl_fpDbl_add16L,%function +mcl_fpDbl_add16L: @ @mcl_fpDbl_add16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #200 + sub sp, sp, #200 + ldm r1, {r7, r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r9} + add r10, r1, #32 + adds r4, r4, r7 + str r4, [sp, #100] @ 4-byte Spill + ldr r4, [r2, #96] + str r4, [sp, #164] @ 4-byte Spill + ldr r4, [r2, #100] + str r4, [sp, #160] @ 4-byte Spill + ldr r4, [r2, #104] + str r4, [sp, #156] @ 4-byte Spill + ldr r4, [r2, #108] + str r4, [sp, #180] @ 4-byte Spill + ldr r4, [r2, #112] + str r4, [sp, #184] @ 4-byte Spill + ldr r4, [r2, #116] + str r4, [sp, #188] @ 4-byte Spill + ldr r4, [r2, #120] + str r4, [sp, #192] @ 4-byte Spill + ldr r4, [r2, #124] + str r4, [sp, #196] @ 4-byte Spill + adcs r4, r5, r8 + adcs r7, r6, lr + str r4, [sp, #68] @ 4-byte Spill + add lr, r1, #16 + str r7, [sp, #64] @ 4-byte Spill + adcs r7, r9, r12 + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #168] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #96] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [r1, #120] + str r2, [sp, #128] @ 4-byte Spill + ldr r2, [r1, #124] + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #52] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #100] @ 4-byte Reload + ldr r7, [sp, #68] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #64] @ 4-byte Reload + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #76] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + adcs r1, r1, r12 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + adcs r1, r1, r4 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #72] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [r0, #36] + adcs r1, r1, r6 + ldr r2, [sp, #80] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #84] @ 4-byte Reload + adcs r2, r2, r8 + str r2, [r0, #44] + adcs r1, r1, r9 + ldr r2, [sp, #88] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #92] @ 4-byte Reload + adcs r2, r2, r10 + adcs r1, r1, r7 + str r2, [r0, #52] + ldr r2, [sp, #96] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #136] @ 4-byte Reload + adcs r2, r2, r7 + ldr r7, [sp, #24] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #28] @ 4-byte Reload + adcs r12, r1, r7 + ldr r1, [sp, #140] @ 4-byte Reload + str r12, [sp, #92] @ 4-byte Spill + adcs r9, r1, r2 + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [sp, #32] @ 4-byte Reload + str r9, [sp, #96] @ 4-byte Spill + adcs r8, r1, r2 + ldr r1, [sp, #148] @ 4-byte Reload + ldr r2, [sp, #36] @ 4-byte Reload + str r8, [sp, #100] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #168] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r4, [sp, #136] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #168] @ 4-byte Spill + ldr r1, [sp, #152] @ 4-byte Reload + adcs r10, r1, r2 + ldr r1, [sp, #172] @ 4-byte Reload + ldr r2, [sp, #48] @ 4-byte Reload + str r10, [sp, #88] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #172] @ 4-byte Spill + ldr r1, [sp, #176] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str r1, [sp, #176] @ 4-byte Spill + ldr r1, [sp, #164] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r1, [sp, #164] @ 4-byte Spill + ldr r1, [sp, #160] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r1, [sp, #160] @ 4-byte Spill + ldr r1, [sp, #156] @ 4-byte Reload + adcs r11, r1, r2 + ldr r1, [sp, #180] @ 4-byte Reload + ldr r2, [sp, #116] @ 4-byte Reload + str r11, [sp, #140] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r1, [sp, #180] @ 4-byte Spill + ldr r1, [sp, #184] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #124] @ 4-byte Reload + str r1, [sp, #184] @ 4-byte Spill + ldr r1, [sp, #188] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #128] @ 4-byte Reload + str r1, [sp, #188] @ 4-byte Spill + ldr r1, [sp, #192] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #132] @ 4-byte Reload + str r1, [sp, #192] @ 4-byte Spill + ldr r1, [sp, #196] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #196] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #128] @ 4-byte Spill + ldm r3, {r2, r7} + ldr r1, [r3, #36] + ldr r6, [r3, #8] + ldr r5, [r3, #12] + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [r3, #40] + subs r12, r12, r2 + ldr r2, [sp, #168] @ 4-byte Reload + sbcs lr, r9, r7 + sbcs r7, r8, r6 + ldr r8, [r3, #32] + ldr r6, [r3, #24] + sbcs r9, r4, r5 + ldr r5, [r3, #28] + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [r3, #20] + ldr r3, [r3, #16] + sbcs r2, r2, r3 + sbcs r3, r10, r1 + ldr r1, [sp, #172] @ 4-byte Reload + sbcs r4, r1, r6 + ldr r1, [sp, #176] @ 4-byte Reload + ldr r6, [sp, #120] @ 4-byte Reload + sbcs r5, r1, r5 + ldr r1, [sp, #164] @ 4-byte Reload + sbcs r8, r1, r8 + ldr r1, [sp, #160] @ 4-byte Reload + sbcs r10, r1, r6 + ldr r1, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #132] @ 4-byte Reload + sbcs r11, r11, r1 + ldr r1, [sp, #180] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #144] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #184] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #148] @ 4-byte Reload + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [sp, #188] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #152] @ 4-byte Reload + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [sp, #192] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #156] @ 4-byte Reload + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [sp, #196] @ 4-byte Reload + sbcs r1, r1, r6 + ldr r6, [sp, #92] @ 4-byte Reload + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + sbc r1, r1, #0 + ands r1, r1, #1 + movne r12, r6 + ldr r6, [sp, #96] @ 4-byte Reload + str r12, [r0, #64] + movne lr, r6 + ldr r6, [sp, #100] @ 4-byte Reload + str lr, [r0, #68] + movne r7, r6 + cmp r1, #0 + str r7, [r0, #72] + ldr r7, [sp, #136] @ 4-byte Reload + movne r9, r7 + ldr r7, [sp, #168] @ 4-byte Reload + str r9, [r0, #76] + movne r2, r7 + str r2, [r0, #80] + ldr r2, [sp, #88] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #172] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #84] + ldr r3, [sp, #132] @ 4-byte Reload + movne r4, r2 + ldr r2, [sp, #176] @ 4-byte Reload + str r4, [r0, #88] + movne r5, r2 + ldr r2, [sp, #164] @ 4-byte Reload + str r5, [r0, #92] + movne r8, r2 + ldr r2, [sp, #160] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #96] + movne r10, r2 + ldr r2, [sp, #140] @ 4-byte Reload + str r10, [r0, #100] + movne r11, r2 + ldr r2, [sp, #180] @ 4-byte Reload + str r11, [r0, #104] + movne r3, r2 + ldr r2, [sp, #184] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #108] + ldr r3, [sp, #144] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #188] @ 4-byte Reload + str r3, [r0, #112] + ldr r3, [sp, #148] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #192] @ 4-byte Reload + str r3, [r0, #116] + ldr r3, [sp, #152] @ 4-byte Reload + movne r3, r2 + cmp r1, #0 + ldr r1, [sp, #196] @ 4-byte Reload + ldr r2, [sp, #156] @ 4-byte Reload + str r3, [r0, #120] + movne r2, r1 + str r2, [r0, #124] + add sp, sp, #200 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end252: + .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub16L + .align 2 + .type mcl_fpDbl_sub16L,%function +mcl_fpDbl_sub16L: @ @mcl_fpDbl_sub16L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #200 + sub sp, sp, #200 + ldr r7, [r2, #96] + ldr r9, [r2] + add r10, r1, #32 + str r7, [sp, #168] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [r2, #104] + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [r2, #108] + str r7, [sp, #180] @ 4-byte Spill + ldr r7, [r2, #112] + str r7, [sp, #184] @ 4-byte Spill + ldr r7, [r2, #116] + str r7, [sp, #188] @ 4-byte Spill + ldr r7, [r2, #120] + str r7, [sp, #192] @ 4-byte Spill + ldr r7, [r2, #124] + str r7, [sp, #196] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #164] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #128] @ 4-byte Spill + ldmib r2, {r8, r12, lr} + ldm r1, {r4, r5, r6, r7} + subs r4, r4, r9 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #52] + str r4, [sp, #92] @ 4-byte Spill + sbcs r4, r5, r8 + sbcs r6, r6, r12 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #48] + sbcs r7, r7, lr + str r6, [sp, #24] @ 4-byte Spill + ldr r6, [r2, #44] + add lr, r1, #16 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #40] + str r4, [sp, #88] @ 4-byte Spill + str r6, [sp, #84] @ 4-byte Spill + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #96] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #96] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #120] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #124] + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #72] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #76] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #32] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #24] @ 4-byte Reload + add r11, r3, #12 + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #20] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + sbcs r1, r12, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + str r2, [r0, #28] + sbcs r1, r4, r1 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + ldr r5, [sp, #72] @ 4-byte Reload + str r2, [r0, #36] + sbcs r1, r6, r1 + ldr r2, [sp, #84] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r2, r8, r2 + str r2, [r0, #44] + sbcs r1, r9, r1 + ldr r2, [sp, #92] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #128] @ 4-byte Reload + sbcs r2, r10, r2 + sbcs r1, r7, r1 + str r2, [r0, #52] + ldr r2, [sp, #132] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #136] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #44] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #52] @ 4-byte Reload + sbcs r9, r7, r1 + ldr r1, [sp, #140] @ 4-byte Reload + ldr r7, [sp, #96] @ 4-byte Reload + str r9, [sp, #80] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #144] @ 4-byte Reload + sbcs r12, r2, r1 + ldr r1, [sp, #148] @ 4-byte Reload + ldr r2, [sp, #60] @ 4-byte Reload + str r12, [sp, #84] @ 4-byte Spill + sbcs lr, r2, r1 + ldr r1, [sp, #152] @ 4-byte Reload + ldr r2, [sp, #64] @ 4-byte Reload + str lr, [sp, #88] @ 4-byte Spill + sbcs r4, r2, r1 + ldr r1, [sp, #156] @ 4-byte Reload + ldr r2, [sp, #68] @ 4-byte Reload + str r4, [sp, #92] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #160] @ 4-byte Reload + str r1, [sp, #156] @ 4-byte Spill + mov r1, #0 + sbcs r2, r5, r2 + ldr r5, [sp, #76] @ 4-byte Reload + str r2, [sp, #160] @ 4-byte Spill + ldr r2, [sp, #164] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [sp, #164] @ 4-byte Spill + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #100] @ 4-byte Reload + str r2, [sp, #168] @ 4-byte Spill + ldr r2, [sp, #172] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #104] @ 4-byte Reload + str r2, [sp, #172] @ 4-byte Spill + ldr r2, [sp, #176] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #108] @ 4-byte Reload + str r2, [sp, #176] @ 4-byte Spill + ldr r2, [sp, #180] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #112] @ 4-byte Reload + str r2, [sp, #180] @ 4-byte Spill + ldr r2, [sp, #184] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #116] @ 4-byte Reload + str r2, [sp, #184] @ 4-byte Spill + ldr r2, [sp, #188] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #120] @ 4-byte Reload + str r2, [sp, #188] @ 4-byte Spill + ldr r2, [sp, #192] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #124] @ 4-byte Reload + str r2, [sp, #192] @ 4-byte Spill + ldr r2, [sp, #196] @ 4-byte Reload + sbcs r2, r7, r2 + sbc r1, r1, #0 + str r2, [sp, #196] @ 4-byte Spill + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #124] @ 4-byte Spill + ldm r3, {r2, r6, r7} + ldm r11, {r5, r8, r11} + ldr r1, [r3, #28] + ldr r10, [r3, #24] + str r1, [sp, #112] @ 4-byte Spill + adds r1, r9, r2 + ldr r9, [sp, #132] @ 4-byte Reload + adcs r2, r9, r6 + ldr r6, [sp, #164] @ 4-byte Reload + adcs r3, r12, r7 + ldr r7, [sp, #112] @ 4-byte Reload + adcs r12, lr, r5 + ldr r5, [sp, #160] @ 4-byte Reload + adcs lr, r4, r8 + ldr r4, [sp, #156] @ 4-byte Reload + adcs r4, r4, r11 + adcs r5, r5, r10 + adcs r8, r6, r7 + ldr r7, [sp, #168] @ 4-byte Reload + ldr r6, [sp, #116] @ 4-byte Reload + adcs r11, r7, r6 + ldr r7, [sp, #172] @ 4-byte Reload + ldr r6, [sp, #120] @ 4-byte Reload + adcs r6, r7, r6 + ldr r7, [sp, #176] @ 4-byte Reload + str r6, [sp, #120] @ 4-byte Spill + ldr r6, [sp, #136] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #140] @ 4-byte Reload + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [sp, #180] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #144] @ 4-byte Reload + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [sp, #184] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #148] @ 4-byte Reload + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [sp, #188] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #152] @ 4-byte Reload + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [sp, #192] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #124] @ 4-byte Reload + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [sp, #196] @ 4-byte Reload + adc r7, r7, r6 + ldr r6, [sp, #80] @ 4-byte Reload + str r7, [sp, #124] @ 4-byte Spill + ldr r7, [sp, #128] @ 4-byte Reload + ands r10, r7, #1 + moveq r1, r6 + moveq r2, r9 + str r1, [r0, #64] + ldr r1, [sp, #84] @ 4-byte Reload + str r2, [r0, #68] + ldr r2, [sp, #120] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #88] @ 4-byte Reload + cmp r10, #0 + str r3, [r0, #72] + moveq r12, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r12, [r0, #76] + moveq lr, r1 + ldr r1, [sp, #156] @ 4-byte Reload + str lr, [r0, #80] + moveq r4, r1 + ldr r1, [sp, #160] @ 4-byte Reload + cmp r10, #0 + str r4, [r0, #84] + moveq r5, r1 + ldr r1, [sp, #164] @ 4-byte Reload + str r5, [r0, #88] + moveq r8, r1 + ldr r1, [sp, #168] @ 4-byte Reload + str r8, [r0, #92] + moveq r11, r1 + ldr r1, [sp, #172] @ 4-byte Reload + cmp r10, #0 + str r11, [r0, #96] + moveq r2, r1 + ldr r1, [sp, #176] @ 4-byte Reload + str r2, [r0, #100] + ldr r2, [sp, #136] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #180] @ 4-byte Reload + str r2, [r0, #104] + ldr r2, [sp, #140] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #184] @ 4-byte Reload + cmp r10, #0 + str r2, [r0, #108] + ldr r2, [sp, #144] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #188] @ 4-byte Reload + str r2, [r0, #112] + ldr r2, [sp, #148] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #192] @ 4-byte Reload + str r2, [r0, #116] + ldr r2, [sp, #152] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #196] @ 4-byte Reload + cmp r10, #0 + str r2, [r0, #120] + ldr r2, [sp, #124] @ 4-byte Reload + moveq r2, r1 + str r2, [r0, #124] + add sp, sp, #200 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end253: + .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L + .cantunwind + .fnend + + .align 2 + .type .LmulPv544x32,%function +.LmulPv544x32: @ @mulPv544x32 + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r11, lr} + push {r4, r5, r6, r7, r8, r9, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r9, [r1, #12] + umull r4, r8, lr, r2 + umull lr, r6, r12, r2 + mov r5, r4 + mov r7, r6 + str lr, [r0] + umull lr, r12, r9, r2 + umlal r7, r5, r3, r2 + str r5, [r0, #8] + str r7, [r0, #4] + umull r5, r7, r3, r2 + adds r3, r6, r5 + adcs r3, r7, r4 + adcs r3, r8, lr + str r3, [r0, #12] + ldr r3, [r1, #16] + umull r7, r6, r3, r2 + adcs r3, r12, r7 + str r3, [r0, #16] + ldr r3, [r1, #20] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #20] + ldr r3, [r1, #24] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #24] + ldr r3, [r1, #28] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #28] + ldr r3, [r1, #32] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #32] + ldr r3, [r1, #36] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #36] + ldr r3, [r1, #40] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #40] + ldr r3, [r1, #44] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #44] + ldr r3, [r1, #48] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #48] + ldr r3, [r1, #52] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #52] + ldr r3, [r1, #56] + umull r7, r6, r3, r2 + adcs r3, r5, r7 + str r3, [r0, #56] + ldr r3, [r1, #60] + umull r7, r5, r3, r2 + adcs r3, r6, r7 + str r3, [r0, #60] + ldr r1, [r1, #64] + umull r3, r7, r1, r2 + adcs r1, r5, r3 + adc r2, r7, #0 + str r1, [r0, #64] + str r2, [r0, #68] + pop {r4, r5, r6, r7, r8, r9, r11, lr} + mov pc, lr +.Lfunc_end254: + .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre17L + .align 2 + .type mcl_fp_mulUnitPre17L,%function +mcl_fp_mulUnitPre17L: @ @mcl_fp_mulUnitPre17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #100 + sub sp, sp, #100 + mov r4, r0 + add r0, sp, #24 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #92] + add r11, sp, #48 + add lr, sp, #24 + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #88] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #84] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #80] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #76] + str r0, [sp, #4] @ 4-byte Spill + ldm r11, {r5, r6, r7, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + stm r4, {r0, r1, r2, r3, r12, lr} + add r0, r4, #24 + stm r0, {r5, r6, r7, r8, r9, r10, r11} + ldr r0, [sp, #4] @ 4-byte Reload + str r0, [r4, #52] + ldr r0, [sp, #8] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #12] @ 4-byte Reload + str r0, [r4, #60] + ldr r0, [sp, #16] @ 4-byte Reload + str r0, [r4, #64] + ldr r0, [sp, #20] @ 4-byte Reload + str r0, [r4, #68] + add sp, sp, #100 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end255: + .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre17L + .align 2 + .type mcl_fpDbl_mulPre17L,%function +mcl_fpDbl_mulPre17L: @ @mcl_fpDbl_mulPre17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #340 + sub sp, sp, #340 + .pad #1024 + sub sp, sp, #1024 + mov r9, r2 + add r6, sp, #1024 + mov r4, r0 + str r1, [sp, #128] @ 4-byte Spill + mov r5, r1 + ldr r2, [r9] + add r0, r6, #264 + str r9, [sp, #124] @ 4-byte Spill + str r4, [sp, #132] @ 4-byte Spill + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1356] + ldr r1, [sp, #1292] + ldr r2, [r9, #4] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #1352] + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #1296] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #1348] + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #1300] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #1344] + str r1, [sp, #44] @ 4-byte Spill + mov r1, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1316] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1308] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1304] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #1288] + str r0, [r4] + add r0, sp, #1216 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1284] + add lr, sp, #1216 + ldr r10, [sp, #1256] + ldr r8, [sp, #1252] + ldr r7, [sp, #1248] + ldr r6, [sp, #1244] + ldr r5, [sp, #1240] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #56] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r9, #8] + add r9, sp, #1024 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #128] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, r9, #120 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1212] + ldr r9, [sp, #56] @ 4-byte Reload + ldr r8, [sp, #1184] + ldr r7, [sp, #1180] + ldr r11, [sp, #1176] + ldr r5, [sp, #1172] + ldr lr, [sp, #1168] + ldr r10, [sp, #1164] + ldr r12, [sp, #1160] + ldr r1, [sp, #1148] + ldr r2, [sp, #1152] + ldr r3, [sp, #1156] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1144] + adds r0, r0, r9 + str r0, [r4, #8] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #124] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + ldr r2, [r5, #12] + adcs r0, r11, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #1072 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1140] + add lr, sp, #1072 + ldr r10, [sp, #1112] + ldr r9, [sp, #1108] + ldr r8, [sp, #1104] + ldr r7, [sp, #1100] + ldr r6, [sp, #1096] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1132] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1124] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1120] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #28] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #56] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #12] + ldr r0, [sp, #80] @ 4-byte Reload + ldr r4, [sp, #128] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #16] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #1000 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1068] + add r11, sp, #1024 + add lr, sp, #1000 + ldr r6, [sp, #1040] + ldr r5, [sp, #1036] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #56] @ 4-byte Reload + ldr r8, [sp, #132] @ 4-byte Reload + adds r0, r0, r7 + str r0, [r8, #16] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #124] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + ldr r2, [r5, #20] + adcs r0, r6, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #928 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #996] + add r11, sp, #952 + add lr, sp, #928 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #988] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #976] + str r0, [sp, #32] @ 4-byte Spill + ldm r11, {r6, r7, r8, r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r4, [sp, #56] @ 4-byte Reload + adds r0, r0, r4 + ldr r4, [sp, #132] @ 4-byte Reload + str r0, [r4, #20] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r5, #24] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #128] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #856 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #924] + add r11, sp, #880 + add lr, sp, #856 + ldr r7, [sp, #896] + ldr r5, [sp, #892] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #920] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #916] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #912] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #908] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #56] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r4, #24] + ldr r0, [sp, #80] @ 4-byte Reload + ldr r4, [sp, #124] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #28] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r5, r0 + mov r5, r6 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #784 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #852] + add r10, sp, #808 + add lr, sp, #784 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #848] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #844] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #840] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #836] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #832] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #828] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r6, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r7, [sp, #56] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adds r0, r0, r7 + str r0, [r11, #28] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #32] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #712 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #780] + add r8, sp, #748 + add r11, sp, #736 + add lr, sp, #712 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #776] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #772] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #768] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #764] + str r0, [sp, #36] @ 4-byte Spill + ldm r8, {r4, r6, r7, r8} + ldm r11, {r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r5, [sp, #56] @ 4-byte Reload + adds r0, r0, r5 + ldr r5, [sp, #132] @ 4-byte Reload + str r0, [r5, #32] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #128] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #124] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + ldr r2, [r6, #36] + adcs r0, r7, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #640 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #708] + add r10, sp, #664 + add lr, sp, #640 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #56] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r5, #36] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r6, #40] + mov r6, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #568 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #636] + add r11, sp, #592 + add lr, sp, #568 + ldr r7, [sp, #608] + ldr r4, [sp, #604] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #632] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #628] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #624] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #620] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #616] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #612] + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r8, [sp, #56] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r5, #40] + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #124] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + ldr r2, [r4, #44] + adcs r0, r7, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #496 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #564] + add r10, sp, #520 + add lr, sp, #496 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #548] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #48] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r5, #44] + ldr r0, [sp, #100] @ 4-byte Reload + ldr r5, [sp, #128] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r4, #48] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r8, r0 + mov r8, r4 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #424 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #492] + add lr, sp, #428 + ldr r9, [sp, #460] + ldr r7, [sp, #456] + ldr r11, [sp, #452] + ldr r10, [sp, #448] + ldr r3, [sp, #424] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #488] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #484] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #464] + str r0, [sp, #20] @ 4-byte Spill + ldm lr, {r0, r1, r2, r12, lr} + ldr r6, [sp, #48] @ 4-byte Reload + ldr r4, [sp, #120] @ 4-byte Reload + adds r3, r3, r6 + ldr r6, [sp, #132] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + str r3, [r6, #48] + ldr r3, [r8, #52] + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r3 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r5 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #352 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #420] + add r11, sp, #380 + add r12, sp, #356 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #400] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #396] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r4, r9, r10, r11} + ldr r5, [sp, #376] + ldr lr, [sp, #352] + ldm r12, {r0, r1, r2, r3, r12} + ldr r7, [sp, #52] @ 4-byte Reload + adds r7, lr, r7 + ldr lr, [r8, #56] + str r7, [r6, #52] + ldr r6, [sp, #120] @ 4-byte Reload + add r7, sp, #280 + adcs r0, r0, r6 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #128] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + mov r0, r7 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #348] + add r8, sp, #316 + add r11, sp, #304 + add lr, sp, #280 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #344] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #340] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #336] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #332] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #328] + str r0, [sp, #28] @ 4-byte Spill + ldm r8, {r6, r7, r8} + ldm r11, {r9, r10, r11} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r5, [sp, #52] @ 4-byte Reload + adds r0, r0, r5 + ldr r5, [sp, #132] @ 4-byte Reload + str r0, [r5, #56] + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #124] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r8, #60] + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #208 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #276] + add lr, sp, #228 + add r12, sp, #212 + ldr r6, [sp, #248] + ldr r9, [sp, #244] + ldr r4, [sp, #240] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r10, r11, lr} + ldr r3, [sp, #208] + ldm r12, {r0, r1, r2, r12} + ldr r7, [sp, #88] @ 4-byte Reload + adds r3, r3, r7 + str r3, [r5, #60] + ldr r5, [sp, #120] @ 4-byte Reload + ldr r3, [r8, #64] + adcs r8, r0, r5 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r5, r1, r0 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r3 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + add r0, sp, #136 + bl .LmulPv544x32(PLT) + add r3, sp, #136 + add r11, sp, #172 + add lr, sp, #152 + ldm r3, {r0, r1, r2, r3} + adds r7, r0, r8 + ldr r0, [sp, #12] @ 4-byte Reload + adcs r6, r1, r5 + adcs r5, r2, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r4, r3, r0 + ldr r0, [sp, #204] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #200] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #196] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #72] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldm lr, {r0, r2, r3, r12, lr} + ldr r1, [sp, #132] @ 4-byte Reload + str r7, [r1, #64] + str r6, [r1, #68] + str r5, [r1, #72] + ldr r5, [sp, #44] @ 4-byte Reload + str r4, [r1, #76] + ldr r4, [sp, #48] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [r1, #80] + ldr r0, [sp, #52] @ 4-byte Reload + adcs r2, r2, r4 + str r2, [r1, #84] + ldr r2, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [r1, #88] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r2, r12, r2 + str r2, [r1, #92] + ldr r2, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [r1, #96] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r2, r8, r2 + str r2, [r1, #100] + ldr r2, [sp, #104] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [r1, #104] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r2, r10, r2 + str r2, [r1, #108] + ldr r2, [sp, #72] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [r1, #112] + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #76] @ 4-byte Reload + str r0, [r1, #116] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #80] @ 4-byte Reload + str r0, [r1, #120] + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #84] @ 4-byte Reload + str r0, [r1, #124] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #128] @ 4-byte Reload + str r0, [r1, #128] + adc r2, r2, #0 + str r2, [r1, #132] + add sp, sp, #340 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end256: + .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre17L + .align 2 + .type mcl_fpDbl_sqrPre17L,%function +mcl_fpDbl_sqrPre17L: @ @mcl_fpDbl_sqrPre17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #332 + sub sp, sp, #332 + .pad #1024 + sub sp, sp, #1024 + mov r7, r1 + mov r4, r0 + add r0, sp, #1280 + ldr r2, [r7] + str r7, [sp, #120] @ 4-byte Spill + str r4, [sp, #124] @ 4-byte Spill + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1348] + ldr r1, [sp, #1284] + ldr r2, [r7, #4] + add r11, sp, #1024 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #1344] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #1288] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #1340] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #1292] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #1336] + str r1, [sp, #40] @ 4-byte Spill + mov r1, r7 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #1324] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #1320] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #1316] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #1308] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1304] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #1300] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #1296] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [r4] + add r0, r11, #184 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1276] + add r10, sp, #1232 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1272] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1268] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1264] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1260] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1256] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1252] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r5, r6, r8, r9, r10} + ldr r0, [sp, #1208] + ldr r11, [sp, #52] @ 4-byte Reload + ldr lr, [sp, #1228] + ldr r12, [sp, #1224] + ldr r1, [sp, #1212] + ldr r2, [sp, #1216] + ldr r3, [sp, #1220] + adds r0, r0, r11 + str r0, [r4, #4] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #8] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #1136 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1204] + add r12, sp, #1136 + ldr r6, [sp, #1176] + ldr r4, [sp, #1172] + ldr lr, [sp, #1168] + ldr r11, [sp, #1164] + ldr r10, [sp, #1160] + ldr r9, [sp, #1156] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1180] + str r0, [sp, #24] @ 4-byte Spill + ldm r12, {r0, r1, r2, r3, r12} + ldr r5, [sp, #52] @ 4-byte Reload + ldr r8, [sp, #124] @ 4-byte Reload + adds r0, r0, r5 + str r0, [r8, #8] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #12] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r4, r0 + add r4, sp, #1024 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, r4, #40 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1132] + add r11, sp, #1088 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1128] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1124] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1120] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1116] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1112] + str r0, [sp, #28] @ 4-byte Spill + ldm r11, {r5, r6, r8, r9, r10, r11} + ldr r0, [sp, #1064] + ldr r4, [sp, #52] @ 4-byte Reload + ldr lr, [sp, #1084] + ldr r12, [sp, #1080] + ldr r1, [sp, #1068] + ldr r2, [sp, #1072] + ldr r3, [sp, #1076] + adds r0, r0, r4 + ldr r4, [sp, #124] @ 4-byte Reload + str r0, [r4, #12] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #16] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #992 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1060] + add lr, sp, #1012 + add r12, sp, #992 + ldr r6, [sp, #1032] + ldr r5, [sp, #1028] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1036] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r9, r10, r11, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r8, [sp, #52] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r4, #16] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #20] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #920 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #988] + add r10, sp, #944 + add lr, sp, #920 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #984] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #980] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #976] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #972] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #968] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #964] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r5, r6, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #52] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #20] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #24] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #848 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #916] + add lr, sp, #868 + add r12, sp, #848 + ldr r6, [sp, #888] + ldr r5, [sp, #884] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #912] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #908] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #904] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #900] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #896] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #892] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r9, r10, r11, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r8, [sp, #52] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r4, #24] + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #28] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #776 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #844] + add r10, sp, #800 + add lr, sp, #776 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #840] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #836] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #832] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #828] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #824] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #820] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r5, r6, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #52] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #28] + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #32] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #704 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #772] + add lr, sp, #724 + add r12, sp, #704 + ldr r6, [sp, #744] + ldr r5, [sp, #740] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #768] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #764] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #760] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #756] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #752] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #748] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r9, r10, r11, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r8, [sp, #52] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r4, #32] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #36] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #632 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #700] + add r10, sp, #656 + add lr, sp, #632 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #692] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #688] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #684] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #680] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #676] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r5, r6, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #52] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #36] + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [r7, #40] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #560 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #628] + add r7, sp, #596 + add lr, sp, #580 + add r12, sp, #560 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #624] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #620] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #616] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #612] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #608] + str r0, [sp, #24] @ 4-byte Spill + ldm r7, {r5, r6, r7} + ldm lr, {r9, r10, r11, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r8, [sp, #52] @ 4-byte Reload + adds r0, r0, r8 + str r0, [r4, #40] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + ldr r2, [r1, #44] + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #488 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #556] + add r10, sp, #512 + add lr, sp, #488 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #548] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #544] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #540] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #536] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #44] @ 4-byte Reload + adds r0, r0, r11 + str r0, [r4, #44] + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #120] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r9, #48] + adcs r0, r10, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #416 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #484] + add r10, sp, #444 + add lr, sp, #420 + mov r8, r4 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #480] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #476] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #472] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #468] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #464] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #460] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #456] + str r0, [sp, #20] @ 4-byte Spill + ldm r10, {r5, r6, r10} + ldr r11, [sp, #440] + ldr r12, [sp, #416] + ldm lr, {r0, r1, r2, r3, lr} + ldr r7, [sp, #44] @ 4-byte Reload + adds r7, r12, r7 + str r7, [r4, #48] + ldr r7, [sp, #116] @ 4-byte Reload + mov r4, r9 + add r9, sp, #344 + ldr r12, [r4, #52] + adcs r7, r0, r7 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r12 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r6, r0 + mov r6, r4 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + mov r0, r9 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #412] + add r11, sp, #368 + add r12, sp, #348 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #404] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #400] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #396] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #392] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #388] + str r0, [sp, #24] @ 4-byte Spill + ldm r11, {r4, r5, r9, r10, r11} + ldr lr, [sp, #344] + ldm r12, {r0, r1, r2, r3, r12} + adds r7, lr, r7 + str r7, [r8, #52] + mov r7, r6 + ldr r6, [sp, #116] @ 4-byte Reload + add r8, sp, #272 + ldr lr, [r7, #56] + adcs r0, r0, r6 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r7 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + mov r0, r8 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #340] + add r8, sp, #308 + add lr, sp, #292 + add r12, sp, #272 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #336] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #332] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #328] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #324] + str r0, [sp, #28] @ 4-byte Spill + ldm r8, {r5, r6, r7, r8} + ldm lr, {r9, r10, r11, lr} + ldm r12, {r0, r1, r2, r3, r12} + ldr r4, [sp, #48] @ 4-byte Reload + adds r0, r0, r4 + ldr r4, [sp, #124] @ 4-byte Reload + str r0, [r4, #56] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r12, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #120] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + ldr r2, [r6, #60] + adcs r0, r7, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #200 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #268] + add r9, sp, #232 + add lr, sp, #204 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #260] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #256] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #252] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #248] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #244] + str r0, [sp, #12] @ 4-byte Spill + ldm r9, {r5, r8, r9} + ldr r10, [sp, #228] + ldr r12, [sp, #200] + ldm lr, {r0, r1, r2, r3, r11, lr} + ldr r7, [sp, #80] @ 4-byte Reload + adds r7, r12, r7 + ldr r12, [r6, #64] + str r7, [r4, #60] + ldr r4, [sp, #116] @ 4-byte Reload + adcs r7, r0, r4 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r4, r1, r0 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adcs r0, r2, r0 + mov r2, r12 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r10, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r1, r0 + mov r1, r6 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #116] @ 4-byte Spill + add r0, sp, #128 + bl .LmulPv544x32(PLT) + add r3, sp, #128 + add r11, sp, #164 + add lr, sp, #144 + ldm r3, {r0, r1, r2, r3} + adds r7, r0, r7 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r6, r1, r4 + adcs r5, r2, r0 + ldr r0, [sp, #4] @ 4-byte Reload + adcs r4, r3, r0 + ldr r0, [sp, #196] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #192] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #188] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #184] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #180] + str r0, [sp, #64] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldm lr, {r0, r2, r3, r12, lr} + ldr r1, [sp, #124] @ 4-byte Reload + str r7, [r1, #64] + str r6, [r1, #68] + str r5, [r1, #72] + ldr r5, [sp, #40] @ 4-byte Reload + str r4, [r1, #76] + ldr r4, [sp, #44] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [r1, #80] + ldr r0, [sp, #48] @ 4-byte Reload + adcs r2, r2, r4 + str r2, [r1, #84] + ldr r2, [sp, #80] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [r1, #88] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r2, r12, r2 + str r2, [r1, #92] + ldr r2, [sp, #88] @ 4-byte Reload + adcs r0, lr, r0 + str r0, [r1, #96] + ldr r0, [sp, #92] @ 4-byte Reload + adcs r2, r8, r2 + str r2, [r1, #100] + ldr r2, [sp, #96] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [r1, #104] + ldr r0, [sp, #100] @ 4-byte Reload + adcs r2, r10, r2 + str r2, [r1, #108] + ldr r2, [sp, #64] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [r1, #112] + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #68] @ 4-byte Reload + str r0, [r1, #116] + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #72] @ 4-byte Reload + str r0, [r1, #120] + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #76] @ 4-byte Reload + str r0, [r1, #124] + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r2, r0 + ldr r2, [sp, #120] @ 4-byte Reload + str r0, [r1, #128] + adc r2, r2, #0 + str r2, [r1, #132] + add sp, sp, #332 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end257: + .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L + .cantunwind + .fnend + + .globl mcl_fp_mont17L + .align 2 + .type mcl_fp_mont17L,%function +mcl_fp_mont17L: @ @mcl_fp_mont17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #556 + sub sp, sp, #556 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #140 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #96] @ 4-byte Spill + add r0, sp, #2528 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #136] @ 4-byte Spill + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2528] + ldr r1, [sp, #2532] + mul r2, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #2596] + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #2536] + add r5, sp, #2048 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #2592] + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #2540] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #2588] + str r1, [sp, #92] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #2584] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #2580] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #2576] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #2572] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2568] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #2564] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #2560] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #2556] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #2552] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2548] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2544] + str r0, [sp, #48] @ 4-byte Spill + add r0, r5, #408 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2524] + ldr r1, [sp, #140] @ 4-byte Reload + ldr r4, [sp, #2484] + ldr r10, [sp, #2480] + ldr r6, [sp, #2476] + ldr r7, [sp, #2472] + ldr r11, [sp, #2456] + ldr r9, [sp, #2460] + ldr r5, [sp, #2464] + ldr r8, [sp, #2468] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #2520] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2516] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2512] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2508] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2504] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2500] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2496] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2492] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2488] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + ldr r2, [r0, #4] + add r0, sp, #2384 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #80] @ 4-byte Reload + ldr r1, [sp, #76] @ 4-byte Reload + ldr r2, [sp, #20] @ 4-byte Reload + ldr r3, [sp, #2400] + ldr r12, [sp, #2404] + ldr lr, [sp, #2408] + adds r0, r11, r0 + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r9, r0 + ldr r9, [sp, #2424] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + ldr r11, [sp, #104] @ 4-byte Reload + adcs r0, r5, r0 + ldr r5, [sp, #2416] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r8, r0 + ldr r8, [sp, #2384] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + ldr r7, [sp, #100] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #2420] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r10, r0 + ldr r10, [sp, #2428] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r4, r0 + ldr r4, [sp, #2412] + adcs r1, r2, r1 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + mov r0, #0 + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #124] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #68] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [sp, #132] @ 4-byte Reload + adcs r1, r2, r1 + ldr r2, [sp, #2396] + adc r0, r0, #0 + adds r8, r11, r8 + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #2392] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2452] + str r8, [sp, #24] @ 4-byte Spill + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2448] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2444] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2440] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #2436] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #2432] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #2388] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #2048 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #264 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2380] + add r10, sp, #2320 + ldr r7, [sp, #2340] + ldr r6, [sp, #2336] + ldr r4, [sp, #2312] + ldr r11, [sp, #2316] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2376] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2372] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2368] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2364] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2360] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2356] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2352] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2348] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2344] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, sp, #2240 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #2252] + ldr r3, [sp, #2256] + ldr r12, [sp, #2260] + ldr lr, [sp, #2264] + adds r0, r0, r4 + ldr r4, [sp, #2268] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #2272] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #2240] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #2280] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #2284] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #2276] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #2248] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2308] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2304] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2300] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2296] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2292] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2288] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2244] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #2048 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #120 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2236] + add r10, sp, #2176 + ldr r7, [sp, #2196] + ldr r6, [sp, #2192] + ldr r4, [sp, #2168] + ldr r11, [sp, #2172] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2232] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2228] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2224] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2220] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2216] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2212] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2208] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2204] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2200] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, sp, #2096 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #2108] + ldr r3, [sp, #2112] + ldr r12, [sp, #2116] + ldr lr, [sp, #2120] + adds r0, r0, r4 + ldr r4, [sp, #2124] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #2128] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #2096] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #2136] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #2140] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #2132] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #2104] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2164] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2160] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2156] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2152] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2148] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2144] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2100] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #1000 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2092] + add r10, sp, #2032 + ldr r7, [sp, #2052] + ldr r6, [sp, #2048] + ldr r4, [sp, #2024] + ldr r11, [sp, #2028] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2088] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2084] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2080] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2076] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2072] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2068] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2064] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2060] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2056] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, sp, #1952 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1964] + ldr r3, [sp, #1968] + ldr r12, [sp, #1972] + ldr lr, [sp, #1976] + adds r0, r0, r4 + ldr r4, [sp, #1980] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1984] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1952] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1992] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1996] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1988] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1960] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2020] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2016] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2012] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2008] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2004] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2000] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1956] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #856 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1948] + add r10, sp, #1888 + ldr r7, [sp, #1908] + ldr r6, [sp, #1904] + ldr r4, [sp, #1880] + ldr r11, [sp, #1884] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1944] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1940] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1936] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1932] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1928] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1924] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1920] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1916] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, sp, #1808 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1820] + ldr r3, [sp, #1824] + ldr r12, [sp, #1828] + ldr lr, [sp, #1832] + adds r0, r0, r4 + ldr r4, [sp, #1836] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1840] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1808] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1848] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1852] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1844] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1816] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1876] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1872] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1868] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1864] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1860] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1856] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1812] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #712 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1804] + add r10, sp, #1744 + ldr r7, [sp, #1764] + ldr r6, [sp, #1760] + ldr r4, [sp, #1736] + ldr r11, [sp, #1740] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1800] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1796] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1792] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1788] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1784] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1780] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1768] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, sp, #1664 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1676] + ldr r3, [sp, #1680] + ldr r12, [sp, #1684] + ldr lr, [sp, #1688] + adds r0, r0, r4 + ldr r4, [sp, #1692] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1696] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1664] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1704] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1708] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1700] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1672] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1732] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1728] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1724] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1720] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1716] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1712] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1668] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #568 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1660] + add r10, sp, #1600 + ldr r7, [sp, #1620] + ldr r6, [sp, #1616] + ldr r4, [sp, #1592] + ldr r11, [sp, #1596] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1656] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1652] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1648] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1644] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1640] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1636] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1632] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1628] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1624] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, sp, #1520 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1532] + ldr r3, [sp, #1536] + ldr r12, [sp, #1540] + ldr lr, [sp, #1544] + adds r0, r0, r4 + ldr r4, [sp, #1548] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1552] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1520] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1560] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1564] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1556] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1528] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1588] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1584] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1524] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #424 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1516] + add r10, sp, #1456 + ldr r7, [sp, #1476] + ldr r6, [sp, #1472] + ldr r4, [sp, #1448] + ldr r11, [sp, #1452] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1512] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1508] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1504] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1500] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1496] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1480] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, sp, #1376 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1388] + ldr r3, [sp, #1392] + ldr r12, [sp, #1396] + ldr lr, [sp, #1400] + adds r0, r0, r4 + ldr r4, [sp, #1404] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1408] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1376] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1416] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1420] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1412] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1384] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1444] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1440] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1424] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1380] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r8, r0 + add r0, r4, #280 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1372] + add r10, sp, #1312 + ldr r7, [sp, #1332] + ldr r6, [sp, #1328] + ldr r4, [sp, #1304] + ldr r11, [sp, #1308] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1368] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1364] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1360] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1356] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1352] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1348] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1344] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r5, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, sp, #1232 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #1244] + ldr r3, [sp, #1248] + ldr r12, [sp, #1252] + ldr lr, [sp, #1256] + adds r0, r0, r4 + ldr r4, [sp, #1260] + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + ldr r11, [sp, #132] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #1264] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + ldr r8, [sp, #1232] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #1272] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + ldr r10, [sp, #1276] + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1268] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #128] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #1240] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + adds r8, r11, r8 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #1300] + str r8, [sp, #36] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1296] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1292] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1288] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1284] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1236] + adcs r0, r7, r0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + add r4, sp, #1024 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #136] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + mul r2, r8, r5 + adcs r0, r0, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, r4, #136 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1228] + ldr r1, [sp, #140] @ 4-byte Reload + ldr r11, [sp, #1184] + ldr r4, [sp, #1180] + ldr r6, [sp, #1176] + ldr r7, [sp, #1160] + ldr r8, [sp, #1164] + ldr r9, [sp, #1168] + ldr r10, [sp, #1172] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1224] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1220] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1216] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1212] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, sp, #1088 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #36] @ 4-byte Reload + ldr r1, [sp, #128] @ 4-byte Reload + ldr r2, [sp, #12] @ 4-byte Reload + add lr, sp, #1104 + adds r0, r0, r7 + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1088 + adcs r1, r1, r9 + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #124] @ 4-byte Reload + adcs r1, r1, r10 + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #16] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #68] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #64] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #60] @ 4-byte Spill + ldm r8, {r4, r6, r8} + ldr r7, [sp, #1100] + ldr r10, [sp, #1140] + ldr r9, [sp, #1136] + adds r0, r0, r4 + ldr r4, [sp, #1128] + mul r1, r0, r5 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #1156] + ldr r5, [sp, #1132] + str r1, [sp, #52] @ 4-byte Spill + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #40] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #128] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #128] @ 4-byte Spill + ldr r6, [sp, #124] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #124] @ 4-byte Spill + ldr r6, [sp, #120] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #120] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #1016 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1084] + add r10, sp, #1016 + ldr r11, [sp, #1044] + ldr r4, [sp, #1040] + ldr r5, [sp, #1036] + ldr r6, [sp, #1032] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1080] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1076] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1072] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #16] @ 4-byte Spill + ldm r10, {r7, r8, r9, r10} + ldr r0, [sp, #144] @ 4-byte Reload + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #944 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #132] @ 4-byte Reload + ldr r1, [sp, #124] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + add lr, sp, #960 + adds r0, r0, r7 + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #944 + adcs r1, r1, r9 + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r1, r10 + add r10, sp, #984 + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r6 + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r5 + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r4 + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #32] @ 4-byte Reload + str r1, [sp, #88] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #56] @ 4-byte Reload + adc r1, r1, #0 + str r1, [sp, #32] @ 4-byte Spill + ldm r8, {r4, r6, r8} + ldr r7, [sp, #956] + adds r1, r0, r4 + ldr r0, [sp, #136] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + mul r2, r1, r0 + ldr r0, [sp, #1012] + str r2, [sp, #28] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1008] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [sp, #12] @ 4-byte Spill + ldm r10, {r4, r5, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #128] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #80] @ 4-byte Spill + ldr r6, [sp, #124] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #76] @ 4-byte Spill + ldr r6, [sp, #120] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #72] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + add r0, sp, #872 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #940] + add r11, sp, #880 + ldr r5, [sp, #900] + ldr r4, [sp, #896] + ldr r9, [sp, #872] + ldr r10, [sp, #876] + add r0, sp, #800 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #936] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #932] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #928] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #924] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #920] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #916] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #912] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #908] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #904] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r6, r7, r8, r11} + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #140] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #132] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #804 + adds r0, r0, r9 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #828 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #868] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #864] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #856] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #848] + str r0, [sp, #28] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #800] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #132] @ 4-byte Reload + ldr r6, [sp, #80] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #728 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #796] + add r9, sp, #732 + ldr r5, [sp, #756] + ldr r11, [sp, #752] + ldr r8, [sp, #748] + ldr r10, [sp, #728] + add r0, sp, #656 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #792] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #788] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #784] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #780] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #776] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #772] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #768] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #764] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #760] + str r1, [sp, #16] @ 4-byte Spill + ldm r9, {r4, r6, r7, r9} + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #140] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #660 + adds r0, r0, r10 + add r10, sp, #684 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #724] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #720] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #716] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #656] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #80] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #584 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #652] + add r9, sp, #588 + ldr r5, [sp, #612] + ldr r11, [sp, #608] + ldr r8, [sp, #604] + ldr r10, [sp, #584] + add r0, sp, #512 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #648] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #644] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #640] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #636] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #632] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #628] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #624] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #620] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #616] + str r1, [sp, #16] @ 4-byte Spill + ldm r9, {r4, r6, r7, r9} + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #140] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #516 + adds r0, r0, r10 + add r10, sp, #540 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #576] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #512] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #80] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #440 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #508] + add r9, sp, #444 + ldr r5, [sp, #468] + ldr r11, [sp, #464] + ldr r8, [sp, #460] + ldr r10, [sp, #440] + add r0, sp, #368 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #504] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #500] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #496] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #492] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #488] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #484] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #480] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #476] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #472] + str r1, [sp, #16] @ 4-byte Spill + ldm r9, {r4, r6, r7, r9} + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [r1, #60] + ldr r1, [sp, #140] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #16] @ 4-byte Reload + add lr, sp, #372 + adds r0, r0, r10 + add r10, sp, #396 + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #436] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #432] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #428] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #368] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #84] @ 4-byte Reload + ldr r6, [sp, #80] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #32] @ 4-byte Spill + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #148] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #296 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #364] + add r11, sp, #312 + add r7, sp, #300 + ldr r9, [sp, #324] + add r0, sp, #224 + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [sp, #360] + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #356] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #352] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #348] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #344] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #340] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #336] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #332] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #328] + str r1, [sp, #16] @ 4-byte Spill + ldm r11, {r4, r10, r11} + ldr r8, [sp, #296] + ldm r7, {r5, r6, r7} + ldr r1, [sp, #144] @ 4-byte Reload + ldr r2, [r1, #64] + ldr r1, [sp, #140] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #80] @ 4-byte Reload + ldr r2, [sp, #16] @ 4-byte Reload + add lr, sp, #240 + adds r0, r0, r8 + ldr r8, [sp, #232] + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #268] + adcs r1, r1, r6 + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #236] + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r4 + ldr r4, [sp, #224] + str r1, [sp, #84] @ 4-byte Spill + ldr r1, [sp, #68] @ 4-byte Reload + adcs r1, r1, r10 + str r1, [sp, #80] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + adcs r1, r1, r11 + ldr r11, [sp, #228] + str r1, [sp, #76] @ 4-byte Spill + ldr r1, [sp, #60] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [sp, #72] @ 4-byte Spill + ldr r1, [sp, #132] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [sp, #128] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r1, [sp, #128] @ 4-byte Spill + ldr r1, [sp, #124] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #28] @ 4-byte Reload + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [sp, #120] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #36] @ 4-byte Reload + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [sp, #116] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #116] @ 4-byte Spill + ldr r1, [sp, #112] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #44] @ 4-byte Reload + str r1, [sp, #112] @ 4-byte Spill + ldr r1, [sp, #108] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #108] @ 4-byte Spill + ldr r1, [sp, #104] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #104] @ 4-byte Spill + ldr r1, [sp, #100] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #92] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + adc r1, r1, #0 + adds r9, r0, r4 + ldr r0, [sp, #136] @ 4-byte Reload + ldr r4, [sp, #264] + str r1, [sp, #88] @ 4-byte Spill + mul r1, r9, r0 + ldr r0, [sp, #292] + str r1, [sp, #68] @ 4-byte Spill + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #288] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #284] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #280] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #276] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #36] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #144] @ 4-byte Reload + ldr r6, [sp, #140] @ 4-byte Reload + adcs r11, r10, r11 + adcs r10, r6, r8 + ldr r6, [sp, #84] @ 4-byte Reload + adcs r7, r6, r7 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #148] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #136] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #144] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + add r0, sp, #152 + bl .LmulPv544x32(PLT) + add r3, sp, #152 + ldm r3, {r0, r1, r2, r3} + adds r0, r9, r0 + adcs r4, r11, r1 + ldr r0, [sp, #168] + ldr r1, [sp, #44] @ 4-byte Reload + adcs r6, r10, r2 + str r4, [sp, #52] @ 4-byte Spill + adcs r9, r7, r3 + mov r3, r5 + str r6, [sp, #60] @ 4-byte Spill + str r9, [sp, #68] @ 4-byte Spill + adcs lr, r1, r0 + ldr r0, [sp, #172] + ldr r1, [sp, #48] @ 4-byte Reload + str lr, [sp, #72] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r1, r0 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r1, r0 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #188] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #192] + adcs r11, r1, r0 + ldr r0, [sp, #196] + ldr r1, [sp, #124] @ 4-byte Reload + str r11, [sp, #76] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #200] + adcs r0, r8, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #204] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #208] + adcs r0, r1, r0 + ldr r1, [sp, #136] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #212] + adcs r0, r1, r0 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #216] + adcs r0, r1, r0 + ldr r1, [sp, #144] @ 4-byte Reload + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #220] + adcs r0, r1, r0 + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + ldm r3, {r1, r2, r7} + ldr r0, [r3, #64] + ldr r5, [r3, #12] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r3, #36] + subs r12, r4, r1 + ldr r1, [r3, #40] + sbcs r4, r6, r2 + ldr r2, [sp, #100] @ 4-byte Reload + sbcs r6, r9, r7 + ldr r7, [r3, #32] + ldr r9, [r3, #28] + sbcs r10, lr, r5 + ldr r5, [r3, #16] + ldr lr, [r3, #24] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r3, #44] + sbcs r2, r2, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r3, #60] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r3, #20] + ldr r3, [sp, #104] @ 4-byte Reload + sbcs r3, r3, r0 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs lr, r0, lr + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r5, r0, r9 + ldr r0, [sp, #116] @ 4-byte Reload + sbcs r8, r0, r7 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r7, [sp, #60] @ 4-byte Reload + sbcs r9, r11, r0 + ldr r0, [sp, #120] @ 4-byte Reload + sbcs r11, r0, r1 + ldr r0, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #84] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #92] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + sbcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + sbcs r0, r0, r1 + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbc r0, r0, #0 + ands r1, r0, #1 + ldr r0, [sp, #52] @ 4-byte Reload + movne r4, r7 + movne r12, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r12, [r0] + str r4, [r0, #4] + ldr r4, [sp, #68] @ 4-byte Reload + movne r6, r4 + cmp r1, #0 + str r6, [r0, #8] + ldr r6, [sp, #72] @ 4-byte Reload + movne r10, r6 + ldr r6, [sp, #100] @ 4-byte Reload + str r10, [r0, #12] + movne r2, r6 + str r2, [r0, #16] + ldr r2, [sp, #104] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #108] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #56] @ 4-byte Reload + movne lr, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str lr, [r0, #24] + movne r5, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r5, [r0, #28] + movne r8, r2 + ldr r2, [sp, #76] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #32] + movne r9, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r9, [r0, #36] + movne r11, r2 + ldr r2, [sp, #124] @ 4-byte Reload + str r11, [r0, #40] + movne r3, r2 + ldr r2, [sp, #128] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #44] + ldr r3, [sp, #80] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #132] @ 4-byte Reload + str r3, [r0, #48] + ldr r3, [sp, #84] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #136] @ 4-byte Reload + str r3, [r0, #52] + ldr r3, [sp, #88] @ 4-byte Reload + movne r3, r2 + cmp r1, #0 + ldr r1, [sp, #140] @ 4-byte Reload + ldr r2, [sp, #92] @ 4-byte Reload + str r3, [r0, #56] + movne r2, r1 + ldr r1, [sp, #144] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #148] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #64] + add sp, sp, #556 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end258: + .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L + .cantunwind + .fnend + + .globl mcl_fp_montNF17L + .align 2 + .type mcl_fp_montNF17L,%function +mcl_fp_montNF17L: @ @mcl_fp_montNF17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #548 + sub sp, sp, #548 + .pad #2048 + sub sp, sp, #2048 + add r12, sp, #132 + add r6, sp, #2048 + mov r4, r3 + stm r12, {r1, r2, r3} + str r0, [sp, #92] @ 4-byte Spill + add r0, r6, #472 + ldr r5, [r3, #-4] + ldr r2, [r2] + str r5, [sp, #128] @ 4-byte Spill + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2520] + ldr r1, [sp, #2524] + str r0, [sp, #72] @ 4-byte Spill + mul r2, r0, r5 + ldr r0, [sp, #2588] + str r1, [sp, #100] @ 4-byte Spill + ldr r1, [sp, #2528] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #2584] + str r1, [sp, #96] @ 4-byte Spill + ldr r1, [sp, #2532] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #2580] + str r1, [sp, #88] @ 4-byte Spill + mov r1, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #2576] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #2572] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #2568] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #2564] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #2560] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #2556] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #2552] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #2548] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #2544] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2540] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2536] + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #2448 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2516] + add r11, sp, #2448 + ldr r9, [sp, #2476] + ldr r4, [sp, #2472] + ldr r7, [sp, #2468] + ldr r6, [sp, #2464] + add lr, sp, #2048 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2512] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2508] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2504] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2500] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2496] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2492] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2488] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2484] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2480] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r5, [sp, #2460] + ldr r2, [r0, #4] + add r0, lr, #328 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r8, r0 + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r10, r0 + add r10, sp, #2416 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r5, r0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r7, r0 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r4, r0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r9, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adc r0, r1, r0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #2444] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2440] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2436] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2432] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #2428] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #2376] + ldr r6, [sp, #100] @ 4-byte Reload + ldr r0, [sp, #2380] + ldr r1, [sp, #2384] + ldr r2, [sp, #2388] + ldr r3, [sp, #2392] + ldr r12, [sp, #2396] + ldr lr, [sp, #2400] + ldr r4, [sp, #2404] + ldr r5, [sp, #2408] + ldr r11, [sp, #2412] + adds r7, r6, r7 + ldr r6, [sp, #96] @ 4-byte Reload + str r7, [sp, #24] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #2304 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2372] + add r11, sp, #2304 + ldr r4, [sp, #2332] + ldr r5, [sp, #2328] + ldr r6, [sp, #2324] + ldr r7, [sp, #2320] + add lr, sp, #2048 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2368] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2364] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2360] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2356] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2352] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #2348] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2344] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2340] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2336] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #8] + add r0, lr, #184 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #24] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #2272 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2300] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2296] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2292] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2288] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2284] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #2232] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #2236] + ldr r1, [sp, #2240] + ldr r2, [sp, #2244] + ldr r3, [sp, #2248] + ldr r12, [sp, #2252] + ldr lr, [sp, #2256] + ldr r4, [sp, #2260] + ldr r5, [sp, #2264] + ldr r11, [sp, #2268] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #2160 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2228] + add r11, sp, #2160 + ldr r4, [sp, #2188] + ldr r5, [sp, #2184] + ldr r6, [sp, #2180] + ldr r7, [sp, #2176] + add lr, sp, #2048 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2224] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2220] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2216] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2212] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2208] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2204] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2200] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2196] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2192] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #12] + add r0, lr, #40 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #2128 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2156] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2152] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2148] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2144] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2140] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #2088] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #2092] + ldr r1, [sp, #2096] + ldr r2, [sp, #2100] + ldr r3, [sp, #2104] + ldr r12, [sp, #2108] + ldr lr, [sp, #2112] + ldr r4, [sp, #2116] + ldr r5, [sp, #2120] + ldr r11, [sp, #2124] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #2016 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #2084] + add r11, sp, #2016 + ldr r4, [sp, #2044] + ldr r5, [sp, #2040] + ldr r6, [sp, #2036] + ldr r7, [sp, #2032] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2080] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2076] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2072] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #2068] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #2064] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #2060] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #2056] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #2052] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #2048] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #16] + add r0, lr, #920 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1984 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #2012] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #2008] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #2004] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #2000] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1996] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1944] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1948] + ldr r1, [sp, #1952] + ldr r2, [sp, #1956] + ldr r3, [sp, #1960] + ldr r12, [sp, #1964] + ldr lr, [sp, #1968] + ldr r4, [sp, #1972] + ldr r5, [sp, #1976] + ldr r11, [sp, #1980] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1872 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1940] + add r11, sp, #1872 + ldr r4, [sp, #1900] + ldr r5, [sp, #1896] + ldr r6, [sp, #1892] + ldr r7, [sp, #1888] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1936] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1932] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1928] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1924] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1920] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1916] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1912] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1908] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1904] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #20] + add r0, lr, #776 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1840 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1868] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1864] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1860] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1856] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1852] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1800] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1804] + ldr r1, [sp, #1808] + ldr r2, [sp, #1812] + ldr r3, [sp, #1816] + ldr r12, [sp, #1820] + ldr lr, [sp, #1824] + ldr r4, [sp, #1828] + ldr r5, [sp, #1832] + ldr r11, [sp, #1836] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1728 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1796] + add r11, sp, #1728 + ldr r4, [sp, #1756] + ldr r5, [sp, #1752] + ldr r6, [sp, #1748] + ldr r7, [sp, #1744] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1792] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1788] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1784] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1780] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1776] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1772] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1768] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1764] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1760] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #24] + add r0, lr, #632 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1696 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1724] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1720] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1716] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1712] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1708] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1656] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1660] + ldr r1, [sp, #1664] + ldr r2, [sp, #1668] + ldr r3, [sp, #1672] + ldr r12, [sp, #1676] + ldr lr, [sp, #1680] + ldr r4, [sp, #1684] + ldr r5, [sp, #1688] + ldr r11, [sp, #1692] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1584 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1652] + add r11, sp, #1584 + ldr r4, [sp, #1612] + ldr r5, [sp, #1608] + ldr r6, [sp, #1604] + ldr r7, [sp, #1600] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1648] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1644] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1640] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1636] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1632] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1628] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1624] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1620] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1616] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #28] + add r0, lr, #488 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1552 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1580] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1576] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1572] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1568] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1564] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1512] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1516] + ldr r1, [sp, #1520] + ldr r2, [sp, #1524] + ldr r3, [sp, #1528] + ldr r12, [sp, #1532] + ldr lr, [sp, #1536] + ldr r4, [sp, #1540] + ldr r5, [sp, #1544] + ldr r11, [sp, #1548] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1440 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1508] + add r11, sp, #1440 + ldr r4, [sp, #1468] + ldr r5, [sp, #1464] + ldr r6, [sp, #1460] + ldr r7, [sp, #1456] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1504] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1500] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1496] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1492] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1488] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1484] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1480] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1476] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1472] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #32] + add r0, lr, #344 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1408 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1436] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1432] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1428] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1424] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1420] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1368] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1372] + ldr r1, [sp, #1376] + ldr r2, [sp, #1380] + ldr r3, [sp, #1384] + ldr r12, [sp, #1388] + ldr lr, [sp, #1392] + ldr r4, [sp, #1396] + ldr r5, [sp, #1400] + ldr r11, [sp, #1404] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #1296 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1364] + add r11, sp, #1296 + ldr r4, [sp, #1324] + ldr r5, [sp, #1320] + ldr r6, [sp, #1316] + ldr r7, [sp, #1312] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1360] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1356] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1352] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1348] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1344] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1340] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1336] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1332] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1328] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #36] + add r0, lr, #200 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + adds r0, r0, r8 + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1264 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #1292] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1288] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1284] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1280] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1276] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r8, r9, r10} + ldr r7, [sp, #1224] + ldr r6, [sp, #124] @ 4-byte Reload + ldr r0, [sp, #1228] + ldr r1, [sp, #1232] + ldr r2, [sp, #1236] + ldr r3, [sp, #1240] + ldr r12, [sp, #1244] + ldr lr, [sp, #1248] + ldr r4, [sp, #1252] + ldr r5, [sp, #1256] + ldr r11, [sp, #1260] + adds r7, r6, r7 + ldr r6, [sp, #120] @ 4-byte Reload + str r7, [sp, #32] @ 4-byte Spill + adcs r0, r6, r0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #128] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + mul r2, r7, r5 + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #1152 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1220] + add r11, sp, #1152 + ldr r4, [sp, #1176] + ldr r6, [sp, #1172] + ldr r7, [sp, #1168] + add lr, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1216] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1212] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1208] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1204] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1200] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1196] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1192] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1188] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1184] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1180] + str r0, [sp, #8] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #40] + add r0, lr, #56 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #32] @ 4-byte Reload + ldr r1, [sp, #8] @ 4-byte Reload + adds r0, r0, r8 + ldr r8, [sp, #1092] + ldr r0, [sp, #124] @ 4-byte Reload + adcs r2, r0, r9 + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #1120 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #1084] + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #1088] + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r4 + ldr r4, [sp, #1080] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #56] @ 4-byte Spill + adds r0, r2, r4 + mul r1, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #1148] + str r1, [sp, #48] @ 4-byte Spill + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #1144] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1140] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1136] + str r0, [sp, #36] @ 4-byte Spill + ldm r10, {r4, r5, r9, r10} + ldr r11, [sp, #120] @ 4-byte Reload + ldr r0, [sp, #1096] + ldr r1, [sp, #1100] + ldr r2, [sp, #1104] + ldr r3, [sp, #1108] + ldr r12, [sp, #1112] + ldr lr, [sp, #1116] + adcs r7, r11, r7 + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [sp, #116] @ 4-byte Reload + adcs r6, r7, r6 + str r6, [sp, #116] @ 4-byte Spill + ldr r6, [sp, #112] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #112] @ 4-byte Spill + ldr r6, [sp, #108] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #1008 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1076] + add r11, sp, #1008 + ldr r4, [sp, #1036] + ldr r5, [sp, #1032] + ldr r6, [sp, #1028] + ldr r7, [sp, #1024] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #1072] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1068] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1064] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1060] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1056] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1052] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1048] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1044] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1040] + str r0, [sp, #12] @ 4-byte Spill + ldm r11, {r8, r9, r10, r11} + ldr r0, [sp, #136] @ 4-byte Reload + ldr r1, [sp, #132] @ 4-byte Reload + ldr r2, [r0, #44] + add r0, sp, #936 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #952 + adds r0, r0, r8 + add r8, sp, #936 + ldr r0, [sp, #120] @ 4-byte Reload + adcs r2, r0, r9 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #976 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #48] @ 4-byte Spill + ldm r8, {r4, r6, r7, r8} + ldr r0, [sp, #128] @ 4-byte Reload + adds r1, r2, r4 + mul r2, r1, r0 + ldr r0, [sp, #1004] + str r1, [sp, #124] @ 4-byte Spill + str r2, [sp, #24] @ 4-byte Spill + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1000] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #996] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #992] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r5, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #120] @ 4-byte Reload + adcs r6, r11, r6 + str r6, [sp, #76] @ 4-byte Spill + ldr r6, [sp, #116] @ 4-byte Reload + adcs r6, r6, r7 + str r6, [sp, #72] @ 4-byte Spill + ldr r6, [sp, #112] @ 4-byte Reload + adcs r6, r6, r8 + str r6, [sp, #68] @ 4-byte Spill + ldr r6, [sp, #108] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #24] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #80] @ 4-byte Spill + add r0, sp, #864 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #932] + ldr r5, [sp, #892] + ldr r7, [sp, #888] + ldr r4, [sp, #884] + ldr r9, [sp, #880] + ldr r8, [sp, #864] + ldr r11, [sp, #868] + ldr r10, [sp, #872] + ldr r6, [sp, #876] + add r0, sp, #792 + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #928] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #924] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #920] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #916] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #912] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #908] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #904] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #900] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #896] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #48] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #124] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #796 + adds r0, r0, r8 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r10 + add r10, sp, #820 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #856] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #852] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #848] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #844] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #840] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #792] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #124] @ 4-byte Reload + ldr r6, [sp, #76] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #28] @ 4-byte Spill + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #720 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #788] + add r11, sp, #728 + ldr r5, [sp, #748] + ldr r9, [sp, #744] + ldr r10, [sp, #720] + ldr r6, [sp, #724] + add r0, sp, #648 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #784] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #780] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #776] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #772] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #768] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #764] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #760] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #756] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #752] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r7, r8, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #52] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #652 + adds r0, r0, r10 + add r10, sp, #676 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #716] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #712] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #708] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #704] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #700] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #696] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #648] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #80] @ 4-byte Reload + ldr r6, [sp, #76] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #28] @ 4-byte Spill + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #576 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #644] + add r11, sp, #584 + ldr r5, [sp, #604] + ldr r9, [sp, #600] + ldr r10, [sp, #576] + ldr r6, [sp, #580] + add r0, sp, #504 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #640] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #636] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #632] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #628] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #624] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #620] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #616] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #612] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #608] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r7, r8, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #56] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #508 + adds r0, r0, r10 + add r10, sp, #532 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #572] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #568] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #564] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #560] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #556] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #552] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #504] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #80] @ 4-byte Reload + ldr r6, [sp, #76] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #28] @ 4-byte Spill + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #432 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #500] + add r11, sp, #440 + ldr r5, [sp, #460] + ldr r9, [sp, #456] + ldr r10, [sp, #432] + ldr r6, [sp, #436] + add r0, sp, #360 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #496] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #492] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #488] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #484] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #480] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #476] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #472] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #468] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #464] + str r1, [sp, #12] @ 4-byte Spill + ldm r11, {r4, r7, r8, r11} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [r1, #60] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #364 + adds r0, r0, r10 + add r10, sp, #388 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #428] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #424] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #420] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #416] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #412] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #408] + str r0, [sp, #32] @ 4-byte Spill + ldm r10, {r4, r5, r8, r9, r10} + ldr r7, [sp, #360] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #80] @ 4-byte Reload + ldr r6, [sp, #76] @ 4-byte Reload + adds r7, r11, r7 + adcs r0, r6, r0 + str r7, [sp, #28] @ 4-byte Spill + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + mul r2, r7, r0 + add r0, sp, #288 + bl .LmulPv544x32(PLT) + ldr r1, [sp, #356] + add r8, sp, #288 + ldr r9, [sp, #316] + ldr r10, [sp, #312] + ldr r11, [sp, #308] + ldr r6, [sp, #304] + add r0, sp, #216 + str r1, [sp, #52] @ 4-byte Spill + ldr r1, [sp, #352] + str r1, [sp, #48] @ 4-byte Spill + ldr r1, [sp, #348] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #344] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #340] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #336] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #332] + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [sp, #328] + str r1, [sp, #20] @ 4-byte Spill + ldr r1, [sp, #324] + str r1, [sp, #16] @ 4-byte Spill + ldr r1, [sp, #320] + str r1, [sp, #12] @ 4-byte Spill + ldm r8, {r4, r5, r8} + ldr r1, [sp, #136] @ 4-byte Reload + ldr r7, [sp, #300] + ldr r2, [r1, #64] + ldr r1, [sp, #132] @ 4-byte Reload + bl .LmulPv544x32(PLT) + ldr r0, [sp, #28] @ 4-byte Reload + ldr r1, [sp, #12] @ 4-byte Reload + add lr, sp, #232 + adds r0, r0, r4 + ldr r0, [sp, #80] @ 4-byte Reload + adcs r2, r0, r5 + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r9 + add r9, sp, #216 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adc r0, r0, r1 + str r0, [sp, #84] @ 4-byte Spill + ldm r9, {r4, r8, r9} + ldr r0, [sp, #128] @ 4-byte Reload + ldr r7, [sp, #228] + ldr r5, [sp, #260] + adds r11, r2, r4 + ldr r4, [sp, #256] + mul r1, r11, r0 + ldr r0, [sp, #284] + str r1, [sp, #64] @ 4-byte Spill + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #280] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #276] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #272] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #268] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #264] + str r0, [sp, #32] @ 4-byte Spill + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r10, [sp, #136] @ 4-byte Reload + ldr r6, [sp, #132] @ 4-byte Reload + adcs r8, r10, r8 + ldr r10, [sp, #140] @ 4-byte Reload + adcs r9, r6, r9 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r7, r6, r7 + ldr r6, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #88] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r10 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + add r0, sp, #144 + bl .LmulPv544x32(PLT) + add r3, sp, #144 + ldm r3, {r0, r1, r2, r3} + adds r0, r11, r0 + adcs r4, r8, r1 + ldr r0, [sp, #160] + ldr r1, [sp, #44] @ 4-byte Reload + adcs r8, r9, r2 + str r4, [sp, #52] @ 4-byte Spill + adcs r9, r7, r3 + mov r3, r10 + str r8, [sp, #60] @ 4-byte Spill + str r9, [sp, #64] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #164] + ldr r1, [sp, #48] @ 4-byte Reload + str r5, [sp, #68] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #168] + adcs lr, r1, r0 + ldr r0, [sp, #172] + ldr r1, [sp, #72] @ 4-byte Reload + str lr, [sp, #48] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #76] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #176] + adcs r0, r1, r0 + ldr r1, [sp, #80] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #180] + adcs r0, r1, r0 + ldr r1, [sp, #116] @ 4-byte Reload + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #184] + adcs r0, r1, r0 + ldr r1, [sp, #120] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #188] + adcs r0, r1, r0 + ldr r1, [sp, #124] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #192] + adcs r0, r1, r0 + ldr r1, [sp, #128] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #196] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #200] + adcs r0, r6, r0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #204] + adcs r0, r1, r0 + ldr r1, [sp, #136] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #208] + adcs r0, r1, r0 + ldr r1, [sp, #88] @ 4-byte Reload + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #212] + adc r1, r1, r0 + str r1, [sp, #88] @ 4-byte Spill + ldm r3, {r0, r2, r7} + ldr r6, [r3, #12] + ldr r11, [r3, #36] + ldr r10, [r3, #32] + subs r12, r4, r0 + ldr r0, [r3, #64] + sbcs r4, r8, r2 + ldr r2, [sp, #96] @ 4-byte Reload + sbcs r8, r9, r7 + ldr r7, [r3, #20] + sbcs r9, r5, r6 + ldr r6, [r3, #24] + ldr r5, [r3, #28] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r3, #60] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #16] + sbcs r2, r2, r0 + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r3, lr, r7 + ldr r7, [sp, #56] @ 4-byte Reload + sbcs lr, r0, r6 + ldr r0, [sp, #104] @ 4-byte Reload + sbcs r5, r0, r5 + ldr r0, [sp, #108] @ 4-byte Reload + sbcs r6, r0, r10 + ldr r0, [sp, #112] @ 4-byte Reload + sbcs r11, r0, r11 + ldr r0, [sp, #116] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #72] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #76] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #80] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #84] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #40] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + sbcs r0, r0, r7 + ldr r7, [sp, #60] @ 4-byte Reload + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbc r10, r1, r0 + ldr r0, [sp, #52] @ 4-byte Reload + asr r1, r10, #31 + cmp r1, #0 + movlt r4, r7 + movlt r12, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r12, [r0] + str r4, [r0, #4] + ldr r4, [sp, #64] @ 4-byte Reload + movlt r8, r4 + ldr r4, [sp, #68] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #8] + movlt r9, r4 + ldr r4, [sp, #96] @ 4-byte Reload + str r9, [r0, #12] + movlt r2, r4 + str r2, [r0, #16] + ldr r2, [sp, #48] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #100] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #56] @ 4-byte Reload + movlt lr, r2 + ldr r2, [sp, #104] @ 4-byte Reload + str lr, [r0, #24] + movlt r5, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r5, [r0, #28] + movlt r6, r2 + ldr r2, [sp, #112] @ 4-byte Reload + cmp r1, #0 + str r6, [r0, #32] + movlt r11, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r11, [r0, #36] + movlt r3, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #72] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #124] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #44] + ldr r3, [sp, #76] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #128] @ 4-byte Reload + str r3, [r0, #48] + ldr r3, [sp, #80] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #132] @ 4-byte Reload + str r3, [r0, #52] + ldr r3, [sp, #84] @ 4-byte Reload + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #136] @ 4-byte Reload + ldr r2, [sp, #140] @ 4-byte Reload + str r3, [r0, #56] + movlt r2, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r2, [r0, #60] + movlt r10, r1 + str r10, [r0, #64] + add sp, sp, #548 + add sp, sp, #2048 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end259: + .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L + .cantunwind + .fnend + + .globl mcl_fp_montRed17L + .align 2 + .type mcl_fp_montRed17L,%function +mcl_fp_montRed17L: @ @mcl_fp_montRed17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #444 + sub sp, sp, #444 + .pad #1024 + sub sp, sp, #1024 + mov r3, r2 + str r0, [sp, #212] @ 4-byte Spill + ldr r2, [r1, #4] + ldr r7, [r1] + ldr r0, [r3] + str r3, [sp, #236] @ 4-byte Spill + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #8] + str r0, [sp, #200] @ 4-byte Spill + ldr r0, [r3, #4] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #12] + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [r3, #8] + str r2, [sp, #108] @ 4-byte Spill + str r0, [sp, #192] @ 4-byte Spill + ldr r0, [r3, #12] + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [r3, #16] + str r0, [sp, #180] @ 4-byte Spill + ldr r0, [r3, #20] + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [r3, #24] + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [r3, #-4] + str r0, [sp, #232] @ 4-byte Spill + mul r2, r7, r0 + ldr r0, [r3, #60] + str r0, [sp, #204] @ 4-byte Spill + ldr r0, [r3, #64] + str r0, [sp, #208] @ 4-byte Spill + ldr r0, [r3, #28] + str r0, [sp, #148] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #152] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #156] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #160] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #164] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #168] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #172] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp, #144] @ 4-byte Spill + ldr r0, [r1, #128] + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [r1, #132] + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [r1, #96] + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [r1, #104] + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [r1, #108] + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [r1, #112] + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [r1, #116] + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [r1, #120] + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [r1, #124] + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [r1, #100] + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r1, #68] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r1, #72] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r1, #76] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r1, #80] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r1, #84] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r1, #88] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r1, #92] + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [r1, #32] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [r1, #36] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [r1, #40] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [r1, #44] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #28] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [r1, #24] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [r1, #20] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r1, #16] + mov r1, r3 + str r0, [sp, #20] @ 4-byte Spill + add r0, sp, #1392 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1460] + ldr r11, [sp, #1392] + ldr r1, [sp, #1400] + ldr r2, [sp, #1404] + ldr r3, [sp, #1408] + ldr r12, [sp, #1412] + ldr lr, [sp, #1416] + ldr r4, [sp, #1420] + ldr r5, [sp, #1424] + ldr r6, [sp, #1428] + ldr r8, [sp, #1432] + ldr r9, [sp, #1436] + ldr r10, [sp, #1440] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1456] + adds r7, r7, r11 + ldr r7, [sp, #116] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1452] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1448] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1444] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1396] + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #20] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #236] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + add r9, sp, #1024 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #232] @ 4-byte Reload + mul r2, r7, r0 + add r0, r9, #296 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1388] + ldr r9, [sp, #1320] + ldr r1, [sp, #1328] + ldr r2, [sp, #1332] + ldr r3, [sp, #1336] + ldr r12, [sp, #1340] + ldr r10, [sp, #1344] + ldr lr, [sp, #1348] + ldr r4, [sp, #1352] + ldr r5, [sp, #1356] + ldr r8, [sp, #1360] + ldr r11, [sp, #1364] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1384] + adds r7, r7, r9 + ldr r7, [sp, #116] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1380] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1376] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1372] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1368] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #1324] + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #232] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mul r2, r7, r5 + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #28] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #32] @ 4-byte Spill + add r0, sp, #1248 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1316] + add r10, sp, #1280 + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1312] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1308] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1304] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1300] + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #1296] + str r0, [sp, #8] @ 4-byte Spill + ldm r10, {r4, r6, r9, r10} + ldr r8, [sp, #1248] + ldr r0, [sp, #1252] + ldr r1, [sp, #1256] + ldr r2, [sp, #1260] + ldr r3, [sp, #1264] + ldr r12, [sp, #1268] + ldr lr, [sp, #1272] + ldr r11, [sp, #1276] + adds r7, r7, r8 + ldr r7, [sp, #116] @ 4-byte Reload + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r7, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + add r9, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #12] @ 4-byte Reload + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #236] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #36] @ 4-byte Spill + add r0, r9, #152 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1244] + ldr r9, [sp, #1176] + ldr r1, [sp, #1184] + ldr r2, [sp, #1188] + ldr r3, [sp, #1192] + ldr r12, [sp, #1196] + ldr lr, [sp, #1200] + ldr r4, [sp, #1204] + ldr r5, [sp, #1208] + ldr r6, [sp, #1212] + ldr r8, [sp, #1216] + ldr r10, [sp, #1220] + ldr r11, [sp, #1224] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1240] + adds r7, r7, r9 + ldr r7, [sp, #116] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1236] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1232] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1228] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1180] + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + str r7, [sp, #12] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #236] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #232] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + mul r2, r7, r6 + adcs r0, r0, r8 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #36] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #40] @ 4-byte Spill + add r0, sp, #1104 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1172] + ldr r4, [sp, #1104] + ldr r9, [sp, #12] @ 4-byte Reload + ldr r1, [sp, #1112] + ldr r2, [sp, #1116] + ldr r3, [sp, #1120] + ldr r12, [sp, #1124] + ldr r10, [sp, #1128] + ldr r11, [sp, #1132] + ldr lr, [sp, #1136] + ldr r7, [sp, #1140] + ldr r8, [sp, #1144] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1168] + adds r4, r9, r4 + ldr r4, [sp, #116] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1164] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1160] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1156] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #1152] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #1148] + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #1108] + adcs r4, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #8] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r6 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + mov r7, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r8 + add r8, sp, #1024 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r5 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #40] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #44] @ 4-byte Spill + add r0, r8, #8 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1100] + ldr r8, [sp, #1032] + ldr r1, [sp, #1040] + ldr r2, [sp, #1044] + ldr r3, [sp, #1048] + ldr r12, [sp, #1052] + ldr lr, [sp, #1056] + ldr r4, [sp, #1060] + ldr r5, [sp, #1064] + ldr r6, [sp, #1068] + ldr r9, [sp, #1072] + ldr r10, [sp, #1076] + ldr r11, [sp, #1080] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1096] + adds r7, r7, r8 + ldr r7, [sp, #116] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1092] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1088] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1084] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1036] + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + str r7, [sp, #20] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #232] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + mul r2, r7, r5 + adcs r0, r0, r6 + ldr r6, [sp, #236] @ 4-byte Reload + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #48] @ 4-byte Spill + add r0, sp, #960 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #1028] + add lr, sp, #984 + add r12, sp, #964 + ldr r8, [sp, #1000] + ldr r7, [sp, #996] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #1024] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #1020] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #1016] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #1012] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #1008] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #1004] + str r0, [sp, #16] @ 4-byte Spill + ldm lr, {r10, r11, lr} + ldr r4, [sp, #960] + ldm r12, {r0, r1, r2, r3, r12} + ldr r9, [sp, #20] @ 4-byte Reload + adds r4, r9, r4 + ldr r4, [sp, #116] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, r7 + mov r7, r4 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #48] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + add r0, sp, #888 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #956] + add r11, sp, #916 + add lr, sp, #892 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #952] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #948] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #944] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #940] + str r0, [sp, #32] @ 4-byte Spill + ldm r11, {r4, r5, r6, r9, r10, r11} + ldr r8, [sp, #888] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r7, r7, r8 + ldr r7, [sp, #116] @ 4-byte Reload + adcs r7, r7, r0 + ldr r0, [sp, #112] @ 4-byte Reload + str r7, [sp, #28] @ 4-byte Spill + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #232] @ 4-byte Reload + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + mul r2, r7, r5 + ldr r7, [sp, #236] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r11 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r7 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + add r0, sp, #816 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #884] + add lr, sp, #840 + add r12, sp, #820 + ldr r8, [sp, #856] + ldr r6, [sp, #852] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #880] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #876] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #872] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #868] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #864] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #860] + str r0, [sp, #24] @ 4-byte Spill + ldm lr, {r10, r11, lr} + ldr r4, [sp, #816] + ldm r12, {r0, r1, r2, r3, r12} + ldr r9, [sp, #28] @ 4-byte Reload + adds r4, r9, r4 + ldr r4, [sp, #116] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r4, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r11 + mov r11, r4 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r7 + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + add r0, sp, #744 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #812] + add r10, sp, #768 + add lr, sp, #744 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #808] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #804] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #800] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #796] + str r0, [sp, #40] @ 4-byte Spill + ldm r10, {r4, r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #40] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #232] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + mul r2, r11, r5 + adcs r0, r0, r6 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #236] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #64] @ 4-byte Spill + add r0, sp, #672 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #740] + add r9, sp, #704 + add r12, sp, #676 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #736] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #732] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #728] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #724] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #720] + str r0, [sp, #40] @ 4-byte Spill + ldm r9, {r6, r7, r8, r9} + ldr r4, [sp, #672] + ldr lr, [sp, #700] + ldr r10, [sp, #696] + ldm r12, {r0, r1, r2, r3, r12} + adds r4, r11, r4 + ldr r4, [sp, #116] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r4, [sp, #236] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r5 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #44] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #68] @ 4-byte Spill + add r0, sp, #600 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #668] + add r10, sp, #624 + add lr, sp, #600 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #664] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #660] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #656] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #652] + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #648] + str r0, [sp, #44] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #44] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r9 + ldr r9, [sp, #232] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + mul r2, r11, r9 + adcs r0, r0, r10 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r4 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #72] @ 4-byte Spill + add r0, sp, #528 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #596] + add r8, sp, #560 + add r12, sp, #532 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #592] + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #588] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #584] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #580] + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #576] + str r0, [sp, #48] @ 4-byte Spill + ldm r8, {r5, r6, r7, r8} + ldr r4, [sp, #528] + ldr lr, [sp, #556] + ldr r10, [sp, #552] + ldm r12, {r0, r1, r2, r3, r12} + adds r4, r11, r4 + ldr r4, [sp, #116] @ 4-byte Reload + adcs r11, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + mov r4, r9 + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #236] @ 4-byte Reload + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + add r0, sp, #456 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #524] + add r10, sp, #480 + add lr, sp, #456 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #520] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #516] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #512] + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #508] + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #504] + str r0, [sp, #52] @ 4-byte Spill + ldm r10, {r5, r6, r7, r8, r9, r10} + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r0, r11, r0 + ldr r0, [sp, #116] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #112] @ 4-byte Reload + ldr r1, [sp, #52] @ 4-byte Reload + adcs r0, r0, r2 + mul r2, r11, r4 + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #108] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r6 + ldr r6, [sp, #236] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r7 + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #56] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #72] @ 4-byte Reload + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #68] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #56] @ 4-byte Spill + ldr r0, [sp, #228] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #52] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #76] @ 4-byte Spill + add r0, sp, #384 + bl .LmulPv544x32(PLT) + ldr r0, [sp, #452] + add r10, sp, #412 + add lr, sp, #388 + str r0, [sp, #48] @ 4-byte Spill + ldr r0, [sp, #448] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [sp, #444] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #440] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #436] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #432] + str r0, [sp, #24] @ 4-byte Spill + ldm r10, {r5, r7, r8, r9, r10} + ldr r4, [sp, #384] + ldm lr, {r0, r1, r2, r3, r12, lr} + adds r4, r11, r4 + ldr r4, [sp, #116] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #112] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #108] @ 4-byte Reload + ldr r1, [sp, #24] @ 4-byte Reload + adcs r0, r0, r2 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #232] @ 4-byte Reload + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + mul r2, r4, r5 + adcs r0, r0, r7 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #104] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r10 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #128] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #124] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #120] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #116] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #48] @ 4-byte Reload + str r0, [sp, #112] @ 4-byte Spill + ldr r0, [sp, #60] @ 4-byte Reload + adcs r0, r0, r1 + mov r1, r6 + str r0, [sp, #108] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #104] @ 4-byte Spill + ldr r0, [sp, #52] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #96] @ 4-byte Spill + add r0, sp, #312 + bl .LmulPv544x32(PLT) + add r6, sp, #312 + add r10, sp, #356 + add lr, sp, #328 + ldm r6, {r0, r1, r3, r6} + adds r0, r4, r0 + adcs r7, r11, r1 + mul r0, r7, r5 + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #380] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adcs r0, r0, r3 + str r0, [sp, #232] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #376] + str r0, [sp, #64] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r10} + ldr r9, [sp, #352] + ldm lr, {r0, r1, r2, r3, r12, lr} + ldr r11, [sp, #228] @ 4-byte Reload + adcs r0, r11, r0 + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #224] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #64] @ 4-byte Reload + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #220] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #88] @ 4-byte Reload + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #216] @ 4-byte Reload + adcs r11, r0, r3 + ldr r0, [sp, #140] @ 4-byte Reload + adcs r0, r0, r12 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #136] @ 4-byte Reload + adcs r0, r0, lr + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #132] @ 4-byte Reload + adcs r0, r0, r9 + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #128] @ 4-byte Reload + adcs r0, r0, r4 + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #124] @ 4-byte Reload + adcs r0, r0, r5 + str r0, [sp, #136] @ 4-byte Spill + ldr r0, [sp, #120] @ 4-byte Reload + adcs r0, r0, r6 + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #116] @ 4-byte Reload + adcs r0, r0, r8 + str r0, [sp, #140] @ 4-byte Spill + ldr r0, [sp, #112] @ 4-byte Reload + adcs r10, r0, r10 + ldr r0, [sp, #108] @ 4-byte Reload + adcs r8, r0, r1 + ldr r0, [sp, #104] @ 4-byte Reload + ldr r1, [sp, #80] @ 4-byte Reload + adcs r6, r0, r1 + ldr r0, [sp, #100] @ 4-byte Reload + ldr r1, [sp, #236] @ 4-byte Reload + adcs r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #128] @ 4-byte Spill + add r0, sp, #240 + bl .LmulPv544x32(PLT) + add r3, sp, #240 + ldm r3, {r0, r1, r2, r3} + adds r0, r7, r0 + ldr r0, [sp, #232] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp, #92] @ 4-byte Reload + ldr r1, [sp, #72] @ 4-byte Reload + str r9, [sp, #100] @ 4-byte Spill + adcs r12, r0, r2 + ldr r0, [sp, #68] @ 4-byte Reload + str r12, [sp, #104] @ 4-byte Spill + adcs lr, r0, r3 + ldr r0, [sp, #256] + str lr, [sp, #108] @ 4-byte Spill + adcs r4, r1, r0 + ldr r0, [sp, #260] + ldr r1, [sp, #76] @ 4-byte Reload + str r4, [sp, #112] @ 4-byte Spill + adcs r5, r1, r0 + ldr r0, [sp, #264] + ldr r1, [sp, #84] @ 4-byte Reload + str r5, [sp, #116] @ 4-byte Spill + adcs r11, r11, r0 + ldr r0, [sp, #268] + str r11, [sp, #120] @ 4-byte Spill + adcs r7, r1, r0 + ldr r0, [sp, #272] + ldr r1, [sp, #224] @ 4-byte Reload + str r7, [sp, #124] @ 4-byte Spill + adcs r0, r1, r0 + ldr r1, [sp, #220] @ 4-byte Reload + str r0, [sp, #224] @ 4-byte Spill + ldr r0, [sp, #276] + adcs r0, r1, r0 + ldr r1, [sp, #216] @ 4-byte Reload + str r0, [sp, #220] @ 4-byte Spill + ldr r0, [sp, #280] + adcs r0, r1, r0 + ldr r1, [sp, #136] @ 4-byte Reload + str r0, [sp, #216] @ 4-byte Spill + ldr r0, [sp, #284] + adcs r0, r1, r0 + ldr r1, [sp, #228] @ 4-byte Reload + str r0, [sp, #232] @ 4-byte Spill + ldr r0, [sp, #288] + adcs r0, r1, r0 + ldr r1, [sp, #140] @ 4-byte Reload + str r0, [sp, #228] @ 4-byte Spill + ldr r0, [sp, #292] + adcs r0, r1, r0 + ldr r1, [sp, #132] @ 4-byte Reload + str r0, [sp, #236] @ 4-byte Spill + ldr r0, [sp, #296] + adcs r10, r10, r0 + ldr r0, [sp, #300] + str r10, [sp, #136] @ 4-byte Spill + adcs r8, r8, r0 + ldr r0, [sp, #304] + str r8, [sp, #140] @ 4-byte Spill + adcs r6, r6, r0 + ldr r0, [sp, #308] + adcs r2, r1, r0 + ldr r0, [sp, #128] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #132] @ 4-byte Spill + ldr r0, [sp, #200] @ 4-byte Reload + subs r1, r9, r0 + ldr r0, [sp, #196] @ 4-byte Reload + sbcs r3, r12, r0 + ldr r0, [sp, #192] @ 4-byte Reload + sbcs r12, lr, r0 + ldr r0, [sp, #176] @ 4-byte Reload + sbcs lr, r4, r0 + ldr r0, [sp, #180] @ 4-byte Reload + sbcs r4, r5, r0 + ldr r0, [sp, #184] @ 4-byte Reload + sbcs r5, r11, r0 + ldr r0, [sp, #188] @ 4-byte Reload + ldr r11, [sp, #224] @ 4-byte Reload + sbcs r9, r7, r0 + ldr r0, [sp, #148] @ 4-byte Reload + ldr r7, [sp, #220] @ 4-byte Reload + sbcs r0, r11, r0 + ldr r11, [sp, #232] @ 4-byte Reload + str r0, [sp, #176] @ 4-byte Spill + ldr r0, [sp, #144] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #216] @ 4-byte Reload + str r0, [sp, #180] @ 4-byte Spill + ldr r0, [sp, #152] @ 4-byte Reload + sbcs r0, r7, r0 + ldr r7, [sp, #228] @ 4-byte Reload + str r0, [sp, #184] @ 4-byte Spill + ldr r0, [sp, #156] @ 4-byte Reload + sbcs r0, r11, r0 + ldr r11, [sp, #236] @ 4-byte Reload + str r0, [sp, #188] @ 4-byte Spill + ldr r0, [sp, #160] @ 4-byte Reload + sbcs r0, r7, r0 + str r0, [sp, #192] @ 4-byte Spill + ldr r0, [sp, #164] @ 4-byte Reload + sbcs r0, r11, r0 + str r0, [sp, #196] @ 4-byte Spill + ldr r0, [sp, #168] @ 4-byte Reload + sbcs r0, r10, r0 + mov r10, r6 + str r0, [sp, #200] @ 4-byte Spill + ldr r0, [sp, #172] @ 4-byte Reload + sbcs r7, r8, r0 + ldr r0, [sp, #204] @ 4-byte Reload + mov r8, r2 + sbcs r11, r6, r0 + ldr r0, [sp, #208] @ 4-byte Reload + sbcs r6, r2, r0 + ldr r0, [sp, #132] @ 4-byte Reload + sbc r2, r0, #0 + ldr r0, [sp, #100] @ 4-byte Reload + ands r2, r2, #1 + movne r1, r0 + ldr r0, [sp, #212] @ 4-byte Reload + str r1, [r0] + ldr r1, [sp, #104] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #108] @ 4-byte Reload + str r3, [r0, #4] + ldr r3, [sp, #176] @ 4-byte Reload + movne r12, r1 + ldr r1, [sp, #112] @ 4-byte Reload + cmp r2, #0 + str r12, [r0, #8] + movne lr, r1 + ldr r1, [sp, #116] @ 4-byte Reload + str lr, [r0, #12] + movne r4, r1 + ldr r1, [sp, #120] @ 4-byte Reload + str r4, [r0, #16] + movne r5, r1 + ldr r1, [sp, #124] @ 4-byte Reload + cmp r2, #0 + str r5, [r0, #20] + movne r9, r1 + ldr r1, [sp, #224] @ 4-byte Reload + str r9, [r0, #24] + movne r3, r1 + ldr r1, [sp, #220] @ 4-byte Reload + str r3, [r0, #28] + ldr r3, [sp, #180] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #216] @ 4-byte Reload + cmp r2, #0 + str r3, [r0, #32] + ldr r3, [sp, #184] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #232] @ 4-byte Reload + str r3, [r0, #36] + ldr r3, [sp, #188] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #228] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #192] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #236] @ 4-byte Reload + cmp r2, #0 + str r3, [r0, #44] + ldr r3, [sp, #196] @ 4-byte Reload + movne r3, r1 + ldr r1, [sp, #200] @ 4-byte Reload + str r3, [r0, #48] + ldr r3, [sp, #136] @ 4-byte Reload + movne r1, r3 + str r1, [r0, #52] + ldr r1, [sp, #140] @ 4-byte Reload + movne r7, r1 + cmp r2, #0 + movne r11, r10 + movne r6, r8 + str r7, [r0, #56] + str r11, [r0, #60] + str r6, [r0, #64] + add sp, sp, #444 + add sp, sp, #1024 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end260: + .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L + .cantunwind + .fnend + + .globl mcl_fp_addPre17L + .align 2 + .type mcl_fp_addPre17L,%function +mcl_fp_addPre17L: @ @mcl_fp_addPre17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + ldm r1, {r3, lr} + ldr r8, [r1, #8] + ldr r5, [r1, #12] + ldm r2, {r6, r7, r12} + ldr r4, [r2, #12] + add r10, r2, #16 + adds r3, r6, r3 + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #64] + str r3, [sp, #72] @ 4-byte Spill + adcs r3, r7, lr + add lr, r1, #16 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #32] + adcs r6, r12, r8 + adcs r8, r4, r5 + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r2, #52] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r2, #56] + str r3, [sp, #60] @ 4-byte Spill + ldr r3, [r2, #60] + str r3, [sp, #64] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r2, [r1, #64] + ldr r11, [r1, #60] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #20] @ 4-byte Spill + ldm lr, {r1, r2, r3, r12, lr} + ldr r9, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + adcs r1, r4, r1 + str r9, [r0] + str r7, [r0, #4] + str r6, [r0, #8] + str r8, [r0, #12] + ldr r7, [sp, #8] @ 4-byte Reload + ldr r6, [sp, #12] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + adcs r2, r5, r2 + str r1, [r0, #16] + ldr r5, [sp, #16] @ 4-byte Reload + adcs r1, r10, r3 + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + ldr r3, [sp, #4] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adcs r2, r2, r12 + adcs r12, r1, lr + str r2, [r0, #28] + ldr r1, [sp, #40] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + str r12, [r0, #32] + add r12, r0, #36 + adcs r2, r1, r2 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r3, r1, r3 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r7, r1, r7 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r5, r1, r5 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r4, r1, r4 + ldr r1, [sp, #64] @ 4-byte Reload + stm r12, {r2, r3, r7} + str r6, [r0, #48] + str r5, [r0, #52] + str r4, [r0, #56] + ldr r2, [sp, #68] @ 4-byte Reload + adcs r1, r1, r11 + str r1, [r0, #60] + ldr r1, [sp, #72] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [r0, #64] + mov r0, #0 + adc r0, r0, #0 + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end261: + .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L + .cantunwind + .fnend + + .globl mcl_fp_subPre17L + .align 2 + .type mcl_fp_subPre17L,%function +mcl_fp_subPre17L: @ @mcl_fp_subPre17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + ldm r2, {r3, lr} + ldr r8, [r2, #8] + ldr r5, [r2, #12] + ldm r1, {r6, r7, r12} + ldr r4, [r1, #12] + add r10, r2, #16 + subs r3, r6, r3 + str r3, [sp, #32] @ 4-byte Spill + ldr r3, [r2, #64] + str r3, [sp, #72] @ 4-byte Spill + sbcs r3, r7, lr + add lr, r1, #16 + str r3, [sp, #28] @ 4-byte Spill + ldr r3, [r2, #32] + sbcs r6, r12, r8 + sbcs r8, r4, r5 + str r3, [sp, #36] @ 4-byte Spill + ldr r3, [r2, #36] + str r3, [sp, #40] @ 4-byte Spill + ldr r3, [r2, #40] + str r3, [sp, #44] @ 4-byte Spill + ldr r3, [r2, #44] + str r3, [sp, #48] @ 4-byte Spill + ldr r3, [r2, #48] + str r3, [sp, #52] @ 4-byte Spill + ldr r3, [r2, #52] + str r3, [sp, #56] @ 4-byte Spill + ldr r3, [r2, #56] + str r3, [sp, #60] @ 4-byte Spill + ldr r3, [r2, #60] + str r3, [sp, #64] @ 4-byte Spill + ldr r3, [r2, #28] + str r3, [sp, #24] @ 4-byte Spill + ldm r10, {r4, r5, r10} + ldr r2, [r1, #64] + ldr r11, [r1, #60] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #36] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #40] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r1, #44] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #52] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r1, #56] + str r2, [sp, #20] @ 4-byte Spill + ldm lr, {r1, r2, r3, r12, lr} + ldr r9, [sp, #32] @ 4-byte Reload + ldr r7, [sp, #28] @ 4-byte Reload + sbcs r1, r1, r4 + str r9, [r0] + str r7, [r0, #4] + str r6, [r0, #8] + str r8, [r0, #12] + ldr r7, [sp, #8] @ 4-byte Reload + ldr r6, [sp, #12] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + sbcs r2, r2, r5 + str r1, [r0, #16] + ldr r5, [sp, #16] @ 4-byte Reload + sbcs r1, r3, r10 + str r2, [r0, #20] + ldr r2, [sp, #24] @ 4-byte Reload + ldr r3, [sp, #4] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + sbcs r2, r12, r2 + sbcs r12, lr, r1 + str r2, [r0, #28] + ldr r1, [sp, #40] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + str r12, [r0, #32] + add r12, r0, #36 + sbcs r2, r2, r1 + ldr r1, [sp, #44] @ 4-byte Reload + sbcs r3, r3, r1 + ldr r1, [sp, #48] @ 4-byte Reload + sbcs r7, r7, r1 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r6, r6, r1 + ldr r1, [sp, #56] @ 4-byte Reload + sbcs r5, r5, r1 + ldr r1, [sp, #60] @ 4-byte Reload + sbcs r4, r4, r1 + ldr r1, [sp, #64] @ 4-byte Reload + stm r12, {r2, r3, r7} + str r6, [r0, #48] + str r5, [r0, #52] + str r4, [r0, #56] + ldr r2, [sp, #68] @ 4-byte Reload + sbcs r1, r11, r1 + str r1, [r0, #60] + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r1, r2, r1 + str r1, [r0, #64] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end262: + .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L + .cantunwind + .fnend + + .globl mcl_fp_shr1_17L + .align 2 + .type mcl_fp_shr1_17L,%function +mcl_fp_shr1_17L: @ @mcl_fp_shr1_17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #20 + sub sp, sp, #20 + ldr r4, [r1, #4] + ldr r3, [r1, #8] + add r9, r1, #32 + ldr r2, [r1, #12] + ldr r11, [r1] + lsr r7, r4, #1 + lsr lr, r2, #1 + lsrs r2, r2, #1 + orr r10, r7, r3, lsl #31 + ldr r7, [r1, #64] + rrx r12, r3 + lsrs r3, r4, #1 + add r4, r1, #16 + rrx r11, r11 + str r7, [sp, #16] @ 4-byte Spill + ldm r9, {r5, r7, r9} + ldr r6, [r1, #48] + ldr r8, [r1, #44] + str r6, [sp] @ 4-byte Spill + ldr r6, [r1, #52] + str r6, [sp, #4] @ 4-byte Spill + ldr r6, [r1, #56] + str r6, [sp, #8] @ 4-byte Spill + ldr r6, [r1, #60] + str r6, [sp, #12] @ 4-byte Spill + ldm r4, {r1, r2, r3, r4} + str r11, [r0] + stmib r0, {r10, r12} + orr r6, lr, r1, lsl #31 + str r6, [r0, #12] + lsrs r6, r2, #1 + rrx r1, r1 + str r1, [r0, #16] + lsr r1, r2, #1 + ldr r2, [sp, #4] @ 4-byte Reload + orr r1, r1, r3, lsl #31 + str r1, [r0, #20] + lsrs r1, r4, #1 + rrx r1, r3 + ldr r3, [sp] @ 4-byte Reload + str r1, [r0, #24] + lsr r1, r4, #1 + orr r1, r1, r5, lsl #31 + str r1, [r0, #28] + lsrs r1, r7, #1 + rrx r1, r5 + str r1, [r0, #32] + lsr r1, r7, #1 + orr r1, r1, r9, lsl #31 + str r1, [r0, #36] + lsrs r1, r8, #1 + rrx r1, r9 + str r1, [r0, #40] + lsr r1, r8, #1 + orr r1, r1, r3, lsl #31 + str r1, [r0, #44] + lsrs r1, r2, #1 + rrx r1, r3 + ldr r3, [sp, #8] @ 4-byte Reload + str r1, [r0, #48] + lsr r1, r2, #1 + ldr r2, [sp, #12] @ 4-byte Reload + orr r1, r1, r3, lsl #31 + str r1, [r0, #52] + lsrs r1, r2, #1 + rrx r1, r3 + str r1, [r0, #56] + lsr r1, r2, #1 + ldr r2, [sp, #16] @ 4-byte Reload + orr r1, r1, r2, lsl #31 + str r1, [r0, #60] + lsr r1, r2, #1 + str r1, [r0, #64] + add sp, sp, #20 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end263: + .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L + .cantunwind + .fnend + + .globl mcl_fp_add17L + .align 2 + .type mcl_fp_add17L,%function +mcl_fp_add17L: @ @mcl_fp_add17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #64 + sub sp, sp, #64 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r9, r4, r9 + ldr r4, [r1, #24] + adcs r5, r5, r8 + mov r8, r9 + adcs r6, r6, lr + str r5, [sp, #32] @ 4-byte Spill + ldr r5, [r1, #20] + str r8, [r0] + adcs r7, r7, r12 + str r6, [sp, #28] @ 4-byte Spill + ldr r6, [r1, #16] + ldr lr, [sp, #32] @ 4-byte Reload + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #16] + str lr, [r0, #4] + adcs r10, r7, r6 + ldr r7, [r2, #20] + ldr r6, [r2, #28] + str r10, [sp, #4] @ 4-byte Spill + adcs r7, r7, r5 + ldr r5, [r2, #44] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + ldr r9, [sp, #20] @ 4-byte Reload + adcs r7, r7, r4 + ldr r4, [r2, #48] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r6, r7 + ldr r6, [r2, #32] + str r7, [sp, #12] @ 4-byte Spill + ldr r7, [r1, #32] + adcs r7, r6, r7 + ldr r6, [r2, #36] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #36] + adcs r7, r6, r7 + ldr r6, [r2, #40] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r1, #40] + adcs r7, r6, r7 + ldr r6, [r1, #44] + str r7, [sp, #52] @ 4-byte Spill + adcs r7, r5, r6 + ldr r5, [r1, #48] + ldr r6, [r2, #56] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #52] + adcs r11, r4, r5 + ldr r4, [r1, #52] + ldr r5, [sp, #24] @ 4-byte Reload + str r11, [sp, #8] @ 4-byte Spill + adcs r7, r7, r4 + ldr r4, [sp, #28] @ 4-byte Reload + str r7, [sp, #44] @ 4-byte Spill + ldr r7, [r1, #56] + str r4, [r0, #8] + str r5, [r0, #12] + str r10, [r0, #16] + str r9, [r0, #20] + ldr r10, [sp, #12] @ 4-byte Reload + adcs r12, r6, r7 + ldr r7, [r1, #60] + ldr r6, [r2, #60] + ldr r1, [r1, #64] + ldr r2, [r2, #64] + adcs r6, r6, r7 + adcs r2, r2, r1 + ldr r1, [sp, #60] @ 4-byte Reload + str r2, [sp, #36] @ 4-byte Spill + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + str r10, [r0, #28] + str r2, [r0, #64] + mov r2, #0 + str r1, [r0, #32] + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r0, #36] + ldr r1, [sp, #52] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #48] @ 4-byte Reload + str r1, [r0, #44] + ldr r1, [sp, #44] @ 4-byte Reload + str r11, [r0, #48] + mov r11, r12 + str r1, [r0, #52] + adc r1, r2, #0 + str r12, [r0, #56] + str r6, [r0, #60] + mov r12, r6 + str r1, [sp, #16] @ 4-byte Spill + ldm r3, {r6, r7} + ldr r1, [r3, #8] + ldr r2, [r3, #12] + subs r6, r8, r6 + sbcs r7, lr, r7 + str r6, [sp] @ 4-byte Spill + sbcs r1, r4, r1 + str r7, [sp, #32] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + sbcs r1, r5, r2 + ldr r2, [sp, #4] @ 4-byte Reload + str r1, [sp, #24] @ 4-byte Spill + ldr r1, [r3, #16] + sbcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [r3, #20] + sbcs r9, r9, r1 + ldr r1, [r3, #24] + sbcs r1, r2, r1 + ldr r2, [sp, #56] @ 4-byte Reload + str r1, [sp, #60] @ 4-byte Spill + ldr r1, [r3, #28] + sbcs r10, r10, r1 + ldr r1, [r3, #32] + sbcs r1, r2, r1 + ldr r2, [sp, #40] @ 4-byte Reload + str r1, [sp, #56] @ 4-byte Spill + ldr r1, [r3, #36] + sbcs r1, r2, r1 + ldr r2, [sp, #52] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #40] + sbcs lr, r2, r1 + ldr r1, [r3, #44] + ldr r2, [sp, #48] @ 4-byte Reload + sbcs r8, r2, r1 + ldr r1, [r3, #48] + ldr r2, [sp, #8] @ 4-byte Reload + sbcs r4, r2, r1 + ldr r1, [r3, #52] + ldr r2, [sp, #44] @ 4-byte Reload + sbcs r5, r2, r1 + ldr r1, [r3, #56] + ldr r2, [sp, #36] @ 4-byte Reload + sbcs r7, r11, r1 + ldr r1, [r3, #60] + sbcs r6, r12, r1 + ldr r1, [r3, #64] + sbcs r1, r2, r1 + ldr r2, [sp, #16] @ 4-byte Reload + sbc r2, r2, #0 + tst r2, #1 + bne .LBB264_2 +@ BB#1: @ %nocarry + ldr r2, [sp] @ 4-byte Reload + str r2, [r0] + ldr r2, [sp, #32] @ 4-byte Reload + str r2, [r0, #4] + ldr r2, [sp, #28] @ 4-byte Reload + str r2, [r0, #8] + ldr r2, [sp, #24] @ 4-byte Reload + str r2, [r0, #12] + ldr r2, [sp, #4] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #60] @ 4-byte Reload + str r9, [r0, #20] + str r2, [r0, #24] + str r10, [r0, #28] + str r1, [r0, #64] + ldr r1, [sp, #56] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r0, #36] + add r1, r0, #48 + str lr, [r0, #40] + str r8, [r0, #44] + stm r1, {r4, r5, r7} + str r6, [r0, #60] +.LBB264_2: @ %carry + add sp, sp, #64 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end264: + .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L + .cantunwind + .fnend + + .globl mcl_fp_addNF17L + .align 2 + .type mcl_fp_addNF17L,%function +mcl_fp_addNF17L: @ @mcl_fp_addNF17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #96 + sub sp, sp, #96 + ldr r9, [r1] + ldmib r1, {r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r7} + adds r11, r4, r9 + ldr r4, [r1, #24] + adcs r10, r5, r8 + ldr r5, [r1, #20] + str r11, [sp, #8] @ 4-byte Spill + adcs r8, r6, lr + ldr r6, [r1, #16] + str r10, [sp, #16] @ 4-byte Spill + adcs r9, r7, r12 + ldr r7, [r2, #16] + str r8, [sp, #20] @ 4-byte Spill + str r9, [sp, #24] @ 4-byte Spill + adcs r7, r7, r6 + ldr r6, [r2, #28] + str r7, [sp, #48] @ 4-byte Spill + ldr r7, [r2, #20] + adcs lr, r7, r5 + ldr r7, [r2, #24] + str lr, [sp, #4] @ 4-byte Spill + adcs r7, r7, r4 + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r1, #28] + adcs r7, r6, r7 + ldr r6, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r1, #32] + adcs r7, r6, r7 + ldr r6, [r2, #36] + str r7, [sp, #52] @ 4-byte Spill + ldr r7, [r1, #36] + adcs r7, r6, r7 + ldr r6, [r2, #40] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r1, #40] + adcs r7, r6, r7 + ldr r6, [r2, #44] + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r1, #44] + adcs r7, r6, r7 + ldr r6, [r2, #48] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r1, #48] + adcs r7, r6, r7 + ldr r6, [r2, #52] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r1, #52] + adcs r7, r6, r7 + ldr r6, [r2, #56] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r1, #56] + adcs r7, r6, r7 + ldr r6, [r2, #60] + ldr r2, [r2, #64] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r1, #60] + ldr r1, [r1, #64] + adcs r7, r6, r7 + adc r1, r2, r1 + str r7, [sp, #92] @ 4-byte Spill + str r1, [sp, #88] @ 4-byte Spill + ldm r3, {r1, r7} + ldr r6, [r3, #8] + ldr r5, [r3, #12] + ldr r2, [sp, #48] @ 4-byte Reload + subs r12, r11, r1 + ldr r1, [r3, #64] + ldr r11, [r3, #36] + sbcs r4, r10, r7 + ldr r10, [r3, #32] + ldr r7, [r3, #24] + sbcs r6, r8, r6 + sbcs r9, r9, r5 + ldr r5, [r3, #28] + str r1, [sp] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [r3, #20] + ldr r3, [r3, #16] + sbcs r2, r2, r3 + sbcs r3, lr, r1 + ldr r1, [sp, #60] @ 4-byte Reload + sbcs lr, r1, r7 + ldr r1, [sp, #56] @ 4-byte Reload + ldr r7, [sp, #12] @ 4-byte Reload + sbcs r5, r1, r5 + ldr r1, [sp, #52] @ 4-byte Reload + sbcs r8, r1, r10 + ldr r1, [sp, #72] @ 4-byte Reload + sbcs r11, r1, r11 + ldr r1, [sp, #68] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #64] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #32] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + ldr r1, [sp, #84] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #36] @ 4-byte Reload + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #40] @ 4-byte Reload + str r1, [sp, #36] @ 4-byte Spill + ldr r1, [sp, #76] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #44] @ 4-byte Reload + str r1, [sp, #40] @ 4-byte Spill + ldr r1, [sp, #92] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp] @ 4-byte Reload + str r1, [sp, #44] @ 4-byte Spill + ldr r1, [sp, #88] @ 4-byte Reload + sbc r10, r1, r7 + ldr r7, [sp, #8] @ 4-byte Reload + asr r1, r10, #31 + cmp r1, #0 + movlt r12, r7 + ldr r7, [sp, #16] @ 4-byte Reload + str r12, [r0] + movlt r4, r7 + str r4, [r0, #4] + ldr r4, [sp, #20] @ 4-byte Reload + movlt r6, r4 + cmp r1, #0 + str r6, [r0, #8] + ldr r6, [sp, #24] @ 4-byte Reload + movlt r9, r6 + ldr r6, [sp, #48] @ 4-byte Reload + str r9, [r0, #12] + movlt r2, r6 + str r2, [r0, #16] + ldr r2, [sp, #4] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #60] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #20] + ldr r3, [sp, #12] @ 4-byte Reload + movlt lr, r2 + ldr r2, [sp, #56] @ 4-byte Reload + str lr, [r0, #24] + movlt r5, r2 + ldr r2, [sp, #52] @ 4-byte Reload + str r5, [r0, #28] + movlt r8, r2 + ldr r2, [sp, #72] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #32] + movlt r11, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r11, [r0, #36] + movlt r3, r2 + ldr r2, [sp, #64] @ 4-byte Reload + str r3, [r0, #40] + ldr r3, [sp, #28] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #84] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #44] + ldr r3, [sp, #32] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #80] @ 4-byte Reload + str r3, [r0, #48] + ldr r3, [sp, #36] @ 4-byte Reload + movlt r3, r2 + ldr r2, [sp, #76] @ 4-byte Reload + str r3, [r0, #52] + ldr r3, [sp, #40] @ 4-byte Reload + movlt r3, r2 + cmp r1, #0 + ldr r1, [sp, #92] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r3, [r0, #56] + movlt r2, r1 + ldr r1, [sp, #88] @ 4-byte Reload + str r2, [r0, #60] + movlt r10, r1 + str r10, [r0, #64] + add sp, sp, #96 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end265: + .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L + .cantunwind + .fnend + + .globl mcl_fp_sub17L + .align 2 + .type mcl_fp_sub17L,%function +mcl_fp_sub17L: @ @mcl_fp_sub17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #92 + sub sp, sp, #92 + ldm r2, {r8, r9, lr} + ldr r12, [r2, #12] + ldm r1, {r4, r5, r6, r7} + subs r4, r4, r8 + sbcs r5, r5, r9 + str r4, [sp, #68] @ 4-byte Spill + ldr r4, [r2, #24] + sbcs r6, r6, lr + str r5, [sp, #88] @ 4-byte Spill + ldr r5, [r2, #20] + sbcs r7, r7, r12 + str r6, [sp, #84] @ 4-byte Spill + ldr r6, [r2, #16] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r1, #16] + sbcs r7, r7, r6 + ldr r6, [r1, #28] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r1, #20] + sbcs r7, r7, r5 + ldr r5, [r1, #44] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r1, #24] + sbcs r11, r7, r4 + ldr r7, [r2, #28] + ldr r4, [r2, #52] + sbcs r10, r6, r7 + ldr r7, [r2, #32] + ldr r6, [r1, #32] + str r10, [sp, #60] @ 4-byte Spill + sbcs r9, r6, r7 + ldr r7, [r2, #36] + ldr r6, [r1, #36] + str r9, [sp, #56] @ 4-byte Spill + sbcs r7, r6, r7 + ldr r6, [r1, #40] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #40] + sbcs r8, r6, r7 + ldr r7, [r2, #44] + str r8, [sp, #52] @ 4-byte Spill + sbcs lr, r5, r7 + ldr r7, [r2, #48] + ldr r5, [r1, #48] + str lr, [sp, #48] @ 4-byte Spill + sbcs r6, r5, r7 + ldr r5, [r1, #52] + sbcs r7, r5, r4 + ldr r4, [r2, #56] + ldr r5, [r1, #56] + str r7, [sp, #44] @ 4-byte Spill + sbcs r12, r5, r4 + ldr r4, [r2, #60] + ldr r5, [r1, #60] + ldr r2, [r2, #64] + ldr r1, [r1, #64] + str r12, [sp, #40] @ 4-byte Spill + sbcs r4, r5, r4 + ldr r5, [sp, #64] @ 4-byte Reload + sbcs r1, r1, r2 + ldr r2, [sp, #68] @ 4-byte Reload + str r2, [r0] + ldr r2, [sp, #88] @ 4-byte Reload + str r2, [r0, #4] + ldr r2, [sp, #84] @ 4-byte Reload + str r2, [r0, #8] + ldr r2, [sp, #80] @ 4-byte Reload + str r2, [r0, #12] + ldr r2, [sp, #76] @ 4-byte Reload + str r2, [r0, #16] + ldr r2, [sp, #72] @ 4-byte Reload + str r2, [r0, #20] + add r2, r0, #36 + str r11, [r0, #24] + str r10, [r0, #28] + str r1, [r0, #64] + str r9, [r0, #32] + stm r2, {r5, r8, lr} + add r2, r0, #48 + stm r2, {r6, r7, r12} + mov r2, #0 + str r4, [r0, #60] + sbc r2, r2, #0 + tst r2, #1 + beq .LBB266_2 +@ BB#1: @ %carry + ldr r2, [r3, #64] + mov r9, r4 + str r2, [sp, #36] @ 4-byte Spill + ldm r3, {r4, r12} + ldr r2, [sp, #68] @ 4-byte Reload + str r6, [sp, #28] @ 4-byte Spill + ldr r7, [r3, #8] + str r1, [sp, #32] @ 4-byte Spill + ldr r1, [r3, #12] + ldr lr, [r3, #20] + adds r8, r4, r2 + ldr r2, [r3, #32] + str r8, [r0] + str r2, [sp] @ 4-byte Spill + ldr r2, [r3, #36] + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [r3, #40] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r3, #44] + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r3, #48] + str r2, [sp, #16] @ 4-byte Spill + ldr r2, [r3, #52] + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r3, #56] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r3, #60] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [sp, #88] @ 4-byte Reload + adcs r6, r12, r2 + ldr r2, [sp, #84] @ 4-byte Reload + adcs r7, r7, r2 + ldr r2, [sp, #80] @ 4-byte Reload + adcs r4, r1, r2 + ldr r2, [r3, #28] + ldr r1, [r3, #24] + ldr r3, [r3, #16] + stmib r0, {r6, r7} + ldr r7, [sp, #76] @ 4-byte Reload + str r4, [r0, #12] + ldr r6, [sp, #16] @ 4-byte Reload + ldr r4, [sp, #24] @ 4-byte Reload + adcs r3, r3, r7 + ldr r7, [sp, #72] @ 4-byte Reload + str r3, [r0, #16] + ldr r3, [sp, #60] @ 4-byte Reload + adcs r7, lr, r7 + adcs r1, r1, r11 + str r7, [r0, #20] + ldr r7, [sp, #12] @ 4-byte Reload + adcs r3, r2, r3 + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + ldr r2, [sp] @ 4-byte Reload + str r3, [r0, #28] + ldr r3, [sp, #8] @ 4-byte Reload + adcs r12, r2, r1 + ldr r1, [sp, #4] @ 4-byte Reload + str r12, [r0, #32] + add r12, r0, #36 + adcs r2, r1, r5 + ldr r1, [sp, #52] @ 4-byte Reload + ldr r5, [sp, #20] @ 4-byte Reload + adcs r3, r3, r1 + ldr r1, [sp, #48] @ 4-byte Reload + adcs r7, r7, r1 + ldr r1, [sp, #28] @ 4-byte Reload + adcs r6, r6, r1 + ldr r1, [sp, #44] @ 4-byte Reload + adcs r5, r5, r1 + ldr r1, [sp, #40] @ 4-byte Reload + adcs r4, r4, r1 + ldr r1, [sp, #68] @ 4-byte Reload + stm r12, {r2, r3, r7} + str r6, [r0, #48] + str r5, [r0, #52] + str r4, [r0, #56] + ldr r2, [sp, #32] @ 4-byte Reload + adcs r1, r1, r9 + str r1, [r0, #60] + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r1, r2 + str r1, [r0, #64] +.LBB266_2: @ %nocarry + add sp, sp, #92 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end266: + .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L + .cantunwind + .fnend + + .globl mcl_fp_subNF17L + .align 2 + .type mcl_fp_subNF17L,%function +mcl_fp_subNF17L: @ @mcl_fp_subNF17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #104 + sub sp, sp, #104 + mov r12, r0 + ldr r0, [r2, #64] + ldr r11, [r2] + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r1, #64] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r2, #32] + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [r2, #36] + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [r2, #40] + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [r2, #44] + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [r2, #48] + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [r2, #52] + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [r2, #56] + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [r2, #60] + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [r1, #60] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r1, #56] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1, #52] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r1, #48] + str r0, [sp, #24] @ 4-byte Spill + ldmib r2, {r5, r6, r7, r8, r9, r10} + ldr r0, [r2, #28] + ldr r2, [r1] + str r0, [sp, #64] @ 4-byte Spill + ldmib r1, {r0, lr} + ldr r4, [r1, #12] + subs r2, r2, r11 + add r11, r3, #8 + str r2, [sp, #12] @ 4-byte Spill + ldr r2, [r1, #44] + sbcs r0, r0, r5 + ldr r5, [r1, #40] + str r0, [sp, #8] @ 4-byte Spill + sbcs r0, lr, r6 + ldr r6, [r1, #36] + str r0, [sp, #48] @ 4-byte Spill + sbcs r0, r4, r7 + ldr r7, [r1, #16] + str r0, [sp, #52] @ 4-byte Spill + sbcs r0, r7, r8 + ldr r7, [r1, #20] + str r0, [sp, #56] @ 4-byte Spill + sbcs r0, r7, r9 + ldr r7, [r1, #24] + str r0, [sp, #60] @ 4-byte Spill + sbcs r0, r7, r10 + ldr r7, [r1, #32] + ldr r1, [r1, #28] + str r0, [sp, #68] @ 4-byte Spill + ldr r0, [sp, #64] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #72] @ 4-byte Reload + sbcs r0, r7, r0 + str r0, [sp, #72] @ 4-byte Spill + ldr r0, [sp, #76] @ 4-byte Reload + sbcs r0, r6, r0 + str r0, [sp, #76] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + sbcs r0, r5, r0 + str r0, [sp, #80] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + sbcs r0, r2, r0 + str r0, [sp, #84] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #88] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #92] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #96] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + sbcs r0, r1, r0 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #100] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + sbc r0, r1, r0 + str r0, [sp, #44] @ 4-byte Spill + ldr r0, [r3, #64] + str r0, [sp, #40] @ 4-byte Spill + ldr r0, [r3, #36] + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [r3, #40] + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [r3, #44] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r3, #48] + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [r3, #52] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r3, #56] + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r3, #60] + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r3, #32] + str r0, [sp] @ 4-byte Spill + ldm r3, {r2, r7} + ldm r11, {r1, r4, r5, r6, r11} + ldr r8, [sp, #12] @ 4-byte Reload + ldr r10, [sp, #8] @ 4-byte Reload + ldr r0, [r3, #28] + adds r2, r8, r2 + adcs r3, r10, r7 + ldr r7, [sp, #48] @ 4-byte Reload + adcs lr, r7, r1 + ldr r1, [sp, #52] @ 4-byte Reload + adcs r4, r1, r4 + ldr r1, [sp, #56] @ 4-byte Reload + adcs r5, r1, r5 + ldr r1, [sp, #60] @ 4-byte Reload + adcs r6, r1, r6 + ldr r1, [sp, #68] @ 4-byte Reload + adcs r7, r1, r11 + ldr r1, [sp, #64] @ 4-byte Reload + adcs r9, r1, r0 + ldr r0, [sp, #72] @ 4-byte Reload + ldr r1, [sp] @ 4-byte Reload + adcs r11, r0, r1 + ldr r0, [sp, #76] @ 4-byte Reload + ldr r1, [sp, #4] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #16] @ 4-byte Reload + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #80] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #20] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #84] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #24] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #88] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #28] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #92] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #32] @ 4-byte Reload + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [sp, #96] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #36] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [sp, #100] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #40] @ 4-byte Reload + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + adc r1, r0, r1 + str r1, [sp, #40] @ 4-byte Spill + asr r1, r0, #31 + ldr r0, [sp, #48] @ 4-byte Reload + cmp r1, #0 + movge r2, r8 + movge r3, r10 + str r2, [r12] + ldr r2, [sp, #4] @ 4-byte Reload + str r3, [r12, #4] + movge lr, r0 + ldr r0, [sp, #52] @ 4-byte Reload + cmp r1, #0 + str lr, [r12, #8] + movge r4, r0 + ldr r0, [sp, #56] @ 4-byte Reload + str r4, [r12, #12] + movge r5, r0 + ldr r0, [sp, #60] @ 4-byte Reload + str r5, [r12, #16] + movge r6, r0 + ldr r0, [sp, #68] @ 4-byte Reload + cmp r1, #0 + str r6, [r12, #20] + movge r7, r0 + ldr r0, [sp, #64] @ 4-byte Reload + str r7, [r12, #24] + movge r9, r0 + ldr r0, [sp, #72] @ 4-byte Reload + str r9, [r12, #28] + movge r11, r0 + ldr r0, [sp, #76] @ 4-byte Reload + cmp r1, #0 + str r11, [r12, #32] + movge r2, r0 + ldr r0, [sp, #80] @ 4-byte Reload + str r2, [r12, #36] + ldr r2, [sp, #16] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #84] @ 4-byte Reload + str r2, [r12, #40] + ldr r2, [sp, #20] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #88] @ 4-byte Reload + cmp r1, #0 + str r2, [r12, #44] + ldr r2, [sp, #24] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #92] @ 4-byte Reload + str r2, [r12, #48] + ldr r2, [sp, #28] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #96] @ 4-byte Reload + str r2, [r12, #52] + ldr r2, [sp, #32] @ 4-byte Reload + movge r2, r0 + ldr r0, [sp, #100] @ 4-byte Reload + cmp r1, #0 + ldr r1, [sp, #36] @ 4-byte Reload + str r2, [r12, #56] + movge r1, r0 + ldr r0, [sp, #40] @ 4-byte Reload + str r1, [r12, #60] + ldr r1, [sp, #44] @ 4-byte Reload + movge r0, r1 + str r0, [r12, #64] + add sp, sp, #104 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end267: + .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L + .cantunwind + .fnend + + .globl mcl_fpDbl_add17L + .align 2 + .type mcl_fpDbl_add17L,%function +mcl_fpDbl_add17L: @ @mcl_fpDbl_add17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #216 + sub sp, sp, #216 + ldm r1, {r7, r8, lr} + ldr r12, [r1, #12] + ldm r2, {r4, r5, r6, r9} + add r10, r1, #32 + adds r4, r4, r7 + str r4, [sp, #104] @ 4-byte Spill + ldr r4, [r2, #128] + str r4, [sp, #208] @ 4-byte Spill + ldr r4, [r2, #132] + str r4, [sp, #212] @ 4-byte Spill + adcs r4, r5, r8 + adcs r7, r6, lr + str r4, [sp, #100] @ 4-byte Spill + add lr, r1, #16 + str r7, [sp, #96] @ 4-byte Spill + ldr r7, [r2, #96] + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #180] @ 4-byte Spill + ldr r7, [r2, #104] + str r7, [sp, #184] @ 4-byte Spill + ldr r7, [r2, #108] + str r7, [sp, #188] @ 4-byte Spill + ldr r7, [r2, #112] + str r7, [sp, #192] @ 4-byte Spill + ldr r7, [r2, #116] + str r7, [sp, #196] @ 4-byte Spill + ldr r7, [r2, #120] + str r7, [sp, #200] @ 4-byte Spill + ldr r7, [r2, #124] + str r7, [sp, #204] @ 4-byte Spill + adcs r7, r9, r12 + str r7, [sp, #68] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #168] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #164] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #56] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #60] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #64] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #72] @ 4-byte Spill + ldr r7, [r2, #48] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #52] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #56] + str r7, [sp, #88] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #92] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #128] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #136] @ 4-byte Spill + ldr r2, [r1, #132] + str r2, [sp, #140] @ 4-byte Spill + ldr r2, [r1, #96] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [r1, #120] + str r2, [sp, #128] @ 4-byte Spill + ldr r2, [r1, #124] + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #84] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #32] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #40] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #52] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #104] @ 4-byte Reload + ldr r7, [sp, #100] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #96] @ 4-byte Reload + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + adcs r1, r7, r1 + ldr r7, [sp, #68] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + adcs r2, r7, r2 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + adcs r1, r1, r12 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #56] @ 4-byte Reload + adcs r2, r2, lr + str r2, [r0, #28] + adcs r1, r1, r4 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #64] @ 4-byte Reload + adcs r2, r2, r5 + str r2, [r0, #36] + adcs r1, r1, r6 + ldr r2, [sp, #72] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #76] @ 4-byte Reload + adcs r2, r2, r8 + str r2, [r0, #44] + adcs r1, r1, r9 + ldr r2, [sp, #80] @ 4-byte Reload + str r1, [r0, #48] + ldr r1, [sp, #88] @ 4-byte Reload + adcs r2, r2, r10 + adcs r1, r1, r7 + str r2, [r0, #52] + ldr r2, [sp, #92] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #144] @ 4-byte Reload + adcs r2, r2, r7 + ldr r7, [sp, #24] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #148] @ 4-byte Reload + adcs r1, r1, r7 + ldr r7, [sp, #28] @ 4-byte Reload + str r1, [r0, #64] + ldr r1, [sp, #152] @ 4-byte Reload + adcs r12, r2, r7 + ldr r2, [sp, #32] @ 4-byte Reload + str r12, [sp, #96] @ 4-byte Spill + adcs r9, r1, r2 + ldr r1, [sp, #156] @ 4-byte Reload + ldr r2, [sp, #36] @ 4-byte Reload + str r9, [sp, #100] @ 4-byte Spill + adcs r8, r1, r2 + ldr r1, [sp, #160] @ 4-byte Reload + ldr r2, [sp, #40] @ 4-byte Reload + str r8, [sp, #104] @ 4-byte Spill + adcs r4, r1, r2 + ldr r1, [sp, #168] @ 4-byte Reload + ldr r2, [sp, #44] @ 4-byte Reload + str r4, [sp, #144] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #48] @ 4-byte Reload + str r1, [sp, #168] @ 4-byte Spill + ldr r1, [sp, #164] @ 4-byte Reload + adcs lr, r1, r2 + ldr r1, [sp, #172] @ 4-byte Reload + ldr r2, [sp, #52] @ 4-byte Reload + str lr, [sp, #92] @ 4-byte Spill + adcs r1, r1, r2 + ldr r2, [sp, #108] @ 4-byte Reload + str r1, [sp, #172] @ 4-byte Spill + ldr r1, [sp, #176] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #84] @ 4-byte Reload + str r1, [sp, #176] @ 4-byte Spill + ldr r1, [sp, #180] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #112] @ 4-byte Reload + str r1, [sp, #180] @ 4-byte Spill + ldr r1, [sp, #184] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #116] @ 4-byte Reload + str r1, [sp, #184] @ 4-byte Spill + ldr r1, [sp, #188] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #120] @ 4-byte Reload + str r1, [sp, #188] @ 4-byte Spill + ldr r1, [sp, #192] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #124] @ 4-byte Reload + str r1, [sp, #192] @ 4-byte Spill + ldr r1, [sp, #196] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #128] @ 4-byte Reload + str r1, [sp, #196] @ 4-byte Spill + ldr r1, [sp, #200] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #132] @ 4-byte Reload + str r1, [sp, #200] @ 4-byte Spill + ldr r1, [sp, #204] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #136] @ 4-byte Reload + str r1, [sp, #204] @ 4-byte Spill + ldr r1, [sp, #208] @ 4-byte Reload + adcs r1, r1, r2 + ldr r2, [sp, #140] @ 4-byte Reload + str r1, [sp, #208] @ 4-byte Spill + ldr r1, [sp, #212] @ 4-byte Reload + adcs r1, r1, r2 + str r1, [sp, #212] @ 4-byte Spill + mov r1, #0 + adc r1, r1, #0 + str r1, [sp, #140] @ 4-byte Spill + ldm r3, {r2, r7} + ldr r1, [r3, #64] + ldr r6, [r3, #8] + ldr r5, [r3, #12] + ldr r10, [r3, #36] + ldr r11, [r3, #40] + str r1, [sp, #164] @ 4-byte Spill + ldr r1, [r3, #44] + subs r12, r12, r2 + sbcs r7, r9, r7 + sbcs r6, r8, r6 + add r8, r3, #20 + sbcs r9, r4, r5 + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #160] @ 4-byte Spill + ldm r8, {r1, r4, r5, r8} + ldr r3, [r3, #16] + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r2, r3 + sbcs r3, lr, r1 + ldr r1, [sp, #172] @ 4-byte Reload + sbcs lr, r1, r4 + ldr r1, [sp, #176] @ 4-byte Reload + sbcs r4, r1, r5 + ldr r1, [sp, #180] @ 4-byte Reload + ldr r5, [sp, #136] @ 4-byte Reload + sbcs r8, r1, r8 + ldr r1, [sp, #184] @ 4-byte Reload + sbcs r10, r1, r10 + ldr r1, [sp, #188] @ 4-byte Reload + sbcs r11, r1, r11 + ldr r1, [sp, #192] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #148] @ 4-byte Reload + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [sp, #196] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #152] @ 4-byte Reload + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [sp, #200] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #156] @ 4-byte Reload + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [sp, #204] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #160] @ 4-byte Reload + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [sp, #208] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #164] @ 4-byte Reload + str r1, [sp, #160] @ 4-byte Spill + ldr r1, [sp, #212] @ 4-byte Reload + sbcs r1, r1, r5 + ldr r5, [sp, #96] @ 4-byte Reload + str r1, [sp, #164] @ 4-byte Spill + ldr r1, [sp, #140] @ 4-byte Reload + sbc r1, r1, #0 + ands r1, r1, #1 + movne r12, r5 + ldr r5, [sp, #100] @ 4-byte Reload + str r12, [r0, #68] + movne r7, r5 + str r7, [r0, #72] + ldr r7, [sp, #104] @ 4-byte Reload + movne r6, r7 + ldr r7, [sp, #144] @ 4-byte Reload + cmp r1, #0 + str r6, [r0, #76] + movne r9, r7 + ldr r7, [sp, #168] @ 4-byte Reload + str r9, [r0, #80] + movne r2, r7 + str r2, [r0, #84] + ldr r2, [sp, #92] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #172] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #88] + ldr r3, [sp, #136] @ 4-byte Reload + movne lr, r2 + ldr r2, [sp, #176] @ 4-byte Reload + str lr, [r0, #92] + movne r4, r2 + ldr r2, [sp, #180] @ 4-byte Reload + str r4, [r0, #96] + movne r8, r2 + ldr r2, [sp, #184] @ 4-byte Reload + cmp r1, #0 + str r8, [r0, #100] + movne r10, r2 + ldr r2, [sp, #188] @ 4-byte Reload + str r10, [r0, #104] + movne r11, r2 + ldr r2, [sp, #192] @ 4-byte Reload + str r11, [r0, #108] + movne r3, r2 + ldr r2, [sp, #196] @ 4-byte Reload + cmp r1, #0 + str r3, [r0, #112] + ldr r3, [sp, #148] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #200] @ 4-byte Reload + str r3, [r0, #116] + ldr r3, [sp, #152] @ 4-byte Reload + movne r3, r2 + ldr r2, [sp, #204] @ 4-byte Reload + str r3, [r0, #120] + ldr r3, [sp, #156] @ 4-byte Reload + movne r3, r2 + cmp r1, #0 + ldr r1, [sp, #208] @ 4-byte Reload + ldr r2, [sp, #160] @ 4-byte Reload + str r3, [r0, #124] + ldr r3, [sp, #164] @ 4-byte Reload + movne r2, r1 + ldr r1, [sp, #212] @ 4-byte Reload + str r2, [r0, #128] + movne r3, r1 + str r3, [r0, #132] + add sp, sp, #216 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end268: + .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub17L + .align 2 + .type mcl_fpDbl_sub17L,%function +mcl_fpDbl_sub17L: @ @mcl_fpDbl_sub17L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #216 + sub sp, sp, #216 + ldr r7, [r2, #128] + add r10, r1, #32 + str r7, [sp, #208] @ 4-byte Spill + ldr r7, [r2, #132] + str r7, [sp, #212] @ 4-byte Spill + ldr r7, [r2, #96] + str r7, [sp, #188] @ 4-byte Spill + ldr r7, [r2, #104] + str r7, [sp, #164] @ 4-byte Spill + ldr r7, [r2, #108] + str r7, [sp, #168] @ 4-byte Spill + ldr r7, [r2, #112] + str r7, [sp, #192] @ 4-byte Spill + ldr r7, [r2, #116] + str r7, [sp, #196] @ 4-byte Spill + ldr r7, [r2, #120] + str r7, [sp, #200] @ 4-byte Spill + ldr r7, [r2, #124] + str r7, [sp, #204] @ 4-byte Spill + ldr r7, [r2, #100] + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [r2, #64] + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [r2, #68] + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [r2, #72] + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [r2, #76] + str r7, [sp, #160] @ 4-byte Spill + ldr r7, [r2, #80] + str r7, [sp, #172] @ 4-byte Spill + ldr r7, [r2, #84] + str r7, [sp, #176] @ 4-byte Spill + ldr r7, [r2, #88] + str r7, [sp, #180] @ 4-byte Spill + ldr r7, [r2, #92] + str r7, [sp, #184] @ 4-byte Spill + ldr r7, [r2, #60] + str r7, [sp, #140] @ 4-byte Spill + ldm r2, {r6, r8, r12, lr} + ldm r1, {r4, r5, r7, r9} + subs r4, r4, r6 + str r4, [sp, #36] @ 4-byte Spill + ldr r4, [r2, #56] + str r4, [sp, #128] @ 4-byte Spill + sbcs r4, r5, r8 + sbcs r7, r7, r12 + str r4, [sp, #32] @ 4-byte Spill + ldr r4, [r2, #52] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r2, #48] + str r4, [sp, #96] @ 4-byte Spill + str r7, [sp, #88] @ 4-byte Spill + sbcs r7, r9, lr + add lr, r1, #16 + str r7, [sp, #24] @ 4-byte Spill + ldr r7, [r2, #44] + str r7, [sp, #84] @ 4-byte Spill + ldr r7, [r2, #40] + str r7, [sp, #80] @ 4-byte Spill + ldr r7, [r2, #36] + str r7, [sp, #76] @ 4-byte Spill + ldr r7, [r2, #32] + str r7, [sp, #40] @ 4-byte Spill + ldr r7, [r2, #28] + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [r2, #24] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r2, #20] + ldr r2, [r2, #16] + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [r1, #128] + str r7, [sp, #12] @ 4-byte Spill + str r2, [sp, #132] @ 4-byte Spill + ldr r2, [r1, #132] + str r2, [sp, #136] @ 4-byte Spill + ldr r2, [r1, #96] + str r2, [sp, #100] @ 4-byte Spill + ldr r2, [r1, #104] + str r2, [sp, #104] @ 4-byte Spill + ldr r2, [r1, #108] + str r2, [sp, #108] @ 4-byte Spill + ldr r2, [r1, #112] + str r2, [sp, #112] @ 4-byte Spill + ldr r2, [r1, #116] + str r2, [sp, #116] @ 4-byte Spill + ldr r2, [r1, #120] + str r2, [sp, #120] @ 4-byte Spill + ldr r2, [r1, #124] + str r2, [sp, #124] @ 4-byte Spill + ldr r2, [r1, #100] + str r2, [sp, #92] @ 4-byte Spill + ldr r2, [r1, #64] + str r2, [sp, #44] @ 4-byte Spill + ldr r2, [r1, #68] + str r2, [sp, #48] @ 4-byte Spill + ldr r2, [r1, #72] + str r2, [sp, #52] @ 4-byte Spill + ldr r2, [r1, #76] + str r2, [sp, #56] @ 4-byte Spill + ldr r2, [r1, #80] + str r2, [sp, #60] @ 4-byte Spill + ldr r2, [r1, #84] + str r2, [sp, #64] @ 4-byte Spill + ldr r2, [r1, #88] + str r2, [sp, #68] @ 4-byte Spill + ldr r2, [r1, #92] + str r2, [sp, #72] @ 4-byte Spill + ldm r10, {r4, r5, r6, r8, r9, r10} + ldr r2, [r1, #56] + str r2, [sp] @ 4-byte Spill + ldr r2, [r1, #60] + str r2, [sp, #4] @ 4-byte Spill + ldm lr, {r1, r2, r12, lr} + ldr r11, [sp, #36] @ 4-byte Reload + ldr r7, [sp, #32] @ 4-byte Reload + str r11, [r0] + str r7, [r0, #4] + ldr r7, [sp, #28] @ 4-byte Reload + str r7, [r0, #8] + ldr r7, [sp, #8] @ 4-byte Reload + sbcs r1, r1, r7 + ldr r7, [sp, #24] @ 4-byte Reload + str r7, [r0, #12] + ldr r7, [sp, #12] @ 4-byte Reload + str r1, [r0, #16] + ldr r1, [sp, #16] @ 4-byte Reload + sbcs r2, r2, r7 + ldr r7, [sp] @ 4-byte Reload + str r2, [r0, #20] + sbcs r1, r12, r1 + ldr r2, [sp, #20] @ 4-byte Reload + str r1, [r0, #24] + ldr r1, [sp, #40] @ 4-byte Reload + sbcs r2, lr, r2 + add lr, r3, #8 + str r2, [r0, #28] + sbcs r1, r4, r1 + ldr r2, [sp, #76] @ 4-byte Reload + str r1, [r0, #32] + ldr r1, [sp, #80] @ 4-byte Reload + sbcs r2, r5, r2 + str r2, [r0, #36] + sbcs r1, r6, r1 + ldr r2, [sp, #84] @ 4-byte Reload + str r1, [r0, #40] + ldr r1, [sp, #88] @ 4-byte Reload + sbcs r2, r8, r2 + sbcs r1, r9, r1 + str r2, [r0, #44] + ldr r2, [sp, #96] @ 4-byte Reload + add r9, r3, #20 + str r1, [r0, #48] + ldr r1, [sp, #128] @ 4-byte Reload + sbcs r2, r10, r2 + sbcs r1, r7, r1 + str r2, [r0, #52] + ldr r2, [sp, #140] @ 4-byte Reload + ldr r7, [sp, #4] @ 4-byte Reload + str r1, [r0, #56] + ldr r1, [sp, #144] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #44] @ 4-byte Reload + str r2, [r0, #60] + ldr r2, [sp, #148] @ 4-byte Reload + sbcs r1, r7, r1 + ldr r7, [sp, #48] @ 4-byte Reload + str r1, [r0, #64] + ldr r1, [sp, #152] @ 4-byte Reload + sbcs r5, r7, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r7, [sp, #100] @ 4-byte Reload + sbcs r10, r2, r1 + ldr r1, [sp, #160] @ 4-byte Reload + ldr r2, [sp, #56] @ 4-byte Reload + str r10, [sp, #96] @ 4-byte Spill + sbcs r1, r2, r1 + ldr r2, [sp, #60] @ 4-byte Reload + str r1, [sp, #160] @ 4-byte Spill + ldr r1, [sp, #172] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #64] @ 4-byte Reload + str r1, [sp, #172] @ 4-byte Spill + ldr r1, [sp, #176] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #68] @ 4-byte Reload + str r1, [sp, #176] @ 4-byte Spill + ldr r1, [sp, #180] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #72] @ 4-byte Reload + str r1, [sp, #180] @ 4-byte Spill + ldr r1, [sp, #184] @ 4-byte Reload + sbcs r1, r2, r1 + ldr r2, [sp, #188] @ 4-byte Reload + str r1, [sp, #184] @ 4-byte Spill + mov r1, #0 + sbcs r2, r7, r2 + ldr r7, [sp, #92] @ 4-byte Reload + str r2, [sp, #188] @ 4-byte Spill + ldr r2, [sp, #156] @ 4-byte Reload + sbcs r11, r7, r2 + ldr r2, [sp, #164] @ 4-byte Reload + ldr r7, [sp, #104] @ 4-byte Reload + str r11, [sp, #128] @ 4-byte Spill + sbcs r2, r7, r2 + ldr r7, [sp, #108] @ 4-byte Reload + str r2, [sp, #164] @ 4-byte Spill + ldr r2, [sp, #168] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #112] @ 4-byte Reload + str r2, [sp, #168] @ 4-byte Spill + ldr r2, [sp, #192] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #116] @ 4-byte Reload + str r2, [sp, #192] @ 4-byte Spill + ldr r2, [sp, #196] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #120] @ 4-byte Reload + str r2, [sp, #196] @ 4-byte Spill + ldr r2, [sp, #200] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #124] @ 4-byte Reload + str r2, [sp, #200] @ 4-byte Spill + ldr r2, [sp, #204] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #132] @ 4-byte Reload + str r2, [sp, #204] @ 4-byte Spill + ldr r2, [sp, #208] @ 4-byte Reload + sbcs r2, r7, r2 + ldr r7, [sp, #136] @ 4-byte Reload + str r2, [sp, #208] @ 4-byte Spill + ldr r2, [sp, #212] @ 4-byte Reload + sbcs r2, r7, r2 + sbc r1, r1, #0 + str r2, [sp, #212] @ 4-byte Spill + str r1, [sp, #124] @ 4-byte Spill + ldr r1, [r3, #64] + str r1, [sp, #156] @ 4-byte Spill + ldr r1, [r3, #36] + str r1, [sp, #120] @ 4-byte Spill + ldr r1, [r3, #40] + str r1, [sp, #132] @ 4-byte Spill + ldr r1, [r3, #44] + str r1, [sp, #136] @ 4-byte Spill + ldr r1, [r3, #48] + str r1, [sp, #140] @ 4-byte Spill + ldr r1, [r3, #52] + str r1, [sp, #144] @ 4-byte Spill + ldr r1, [r3, #56] + str r1, [sp, #148] @ 4-byte Spill + ldr r1, [r3, #60] + str r1, [sp, #152] @ 4-byte Spill + ldr r1, [r3, #32] + str r1, [sp, #116] @ 4-byte Spill + ldm r3, {r2, r7} + ldm lr, {r6, r12, lr} + ldm r9, {r4, r8, r9} + ldr r3, [sp, #160] @ 4-byte Reload + adds r1, r5, r2 + adcs r2, r10, r7 + ldr r7, [sp, #164] @ 4-byte Reload + adcs r3, r3, r6 + ldr r6, [sp, #172] @ 4-byte Reload + adcs r12, r6, r12 + ldr r6, [sp, #176] @ 4-byte Reload + adcs lr, r6, lr + ldr r6, [sp, #180] @ 4-byte Reload + adcs r4, r6, r4 + ldr r6, [sp, #184] @ 4-byte Reload + adcs r8, r6, r8 + ldr r6, [sp, #188] @ 4-byte Reload + adcs r9, r6, r9 + ldr r6, [sp, #116] @ 4-byte Reload + adcs r10, r11, r6 + ldr r6, [sp, #120] @ 4-byte Reload + ldr r11, [sp, #156] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #132] @ 4-byte Reload + str r7, [sp, #120] @ 4-byte Spill + ldr r7, [sp, #168] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #136] @ 4-byte Reload + str r7, [sp, #132] @ 4-byte Spill + ldr r7, [sp, #192] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #140] @ 4-byte Reload + str r7, [sp, #136] @ 4-byte Spill + ldr r7, [sp, #196] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #144] @ 4-byte Reload + str r7, [sp, #140] @ 4-byte Spill + ldr r7, [sp, #200] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #148] @ 4-byte Reload + str r7, [sp, #144] @ 4-byte Spill + ldr r7, [sp, #204] @ 4-byte Reload + adcs r7, r7, r6 + ldr r6, [sp, #152] @ 4-byte Reload + str r7, [sp, #148] @ 4-byte Spill + ldr r7, [sp, #208] @ 4-byte Reload + adcs r7, r7, r6 + str r7, [sp, #152] @ 4-byte Spill + ldr r7, [sp, #212] @ 4-byte Reload + adc r7, r7, r11 + str r7, [sp, #156] @ 4-byte Spill + ldr r7, [sp, #124] @ 4-byte Reload + ands r7, r7, #1 + moveq r1, r5 + str r1, [r0, #68] + ldr r1, [sp, #96] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #160] @ 4-byte Reload + str r2, [r0, #72] + ldr r2, [sp, #120] @ 4-byte Reload + moveq r3, r1 + ldr r1, [sp, #172] @ 4-byte Reload + cmp r7, #0 + str r3, [r0, #76] + ldr r3, [sp, #156] @ 4-byte Reload + moveq r12, r1 + ldr r1, [sp, #176] @ 4-byte Reload + str r12, [r0, #80] + moveq lr, r1 + ldr r1, [sp, #180] @ 4-byte Reload + str lr, [r0, #84] + moveq r4, r1 + ldr r1, [sp, #184] @ 4-byte Reload + cmp r7, #0 + str r4, [r0, #88] + moveq r8, r1 + ldr r1, [sp, #188] @ 4-byte Reload + str r8, [r0, #92] + moveq r9, r1 + ldr r1, [sp, #128] @ 4-byte Reload + str r9, [r0, #96] + moveq r10, r1 + ldr r1, [sp, #164] @ 4-byte Reload + cmp r7, #0 + str r10, [r0, #100] + moveq r2, r1 + ldr r1, [sp, #168] @ 4-byte Reload + str r2, [r0, #104] + ldr r2, [sp, #132] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #192] @ 4-byte Reload + str r2, [r0, #108] + ldr r2, [sp, #136] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #196] @ 4-byte Reload + cmp r7, #0 + str r2, [r0, #112] + ldr r2, [sp, #140] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #200] @ 4-byte Reload + str r2, [r0, #116] + ldr r2, [sp, #144] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #204] @ 4-byte Reload + str r2, [r0, #120] + ldr r2, [sp, #148] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #208] @ 4-byte Reload + cmp r7, #0 + str r2, [r0, #124] + ldr r2, [sp, #152] @ 4-byte Reload + moveq r2, r1 + ldr r1, [sp, #212] @ 4-byte Reload + str r2, [r0, #128] + moveq r3, r1 + str r3, [r0, #132] + add sp, sp, #216 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end269: + .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L + .cantunwind + .fnend + + + .section ".note.GNU-stack","",%progbits + .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/low_arm.s b/vendor/github.com/byzantine-lab/mcl/src/asm/low_arm.s new file mode 100644 index 000000000..1ed2a1233 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/low_arm.s @@ -0,0 +1,154 @@ + .arch armv7-a + + .align 2 + .global mcl_fp_addPre64 +mcl_fp_addPre64: + ldm r1, {r3, r12} + ldm r2, {r1, r2} + adds r1, r1, r3 + adc r2, r2, r12 + stm r0, {r1, r2} + bx lr + + + .align 2 + .global mcl_fp_addPre96 +mcl_fp_addPre96: + push {r4, lr} + ldm r1, {r1, r3, r12} + ldm r2, {r2, r4, lr} + adds r1, r1, r2 + adcs r3, r3, r4 + adc r12, r12, lr + stm r0, {r1, r3, r12} + pop {r4, lr} + bx lr + +# slower + .align 2 + .global mcl_fp_addPre96_2 +mcl_fp_addPre96_2: + ldr r3, [r1], #4 + ldr r12, [r2], #4 + adds r3, r3, r12 + str r3, [r0], #4 + + ldm r1, {r1, r3} + ldm r2, {r2, r12} + adcs r1, r1, r2 + adcs r3, r3, r12 + stm r0, {r1, r3} + bx lr + + .globl mcl_fp_addPre128 + .align 2 +mcl_fp_addPre128: + push {r4, lr} + ldm r1!, {r3, r4} + ldm r2!, {r12, lr} + adds r3, r3, r12 + adcs r4, r4, lr + stm r0!, {r3, r4} + ldm r1, {r3, r4} + ldm r2, {r12, lr} + adcs r3, r3, r12 + adcs r4, r4, lr + stm r0, {r3, r4} + pop {r4, lr} + bx lr + + # almost same + .globl mcl_fp_addPre128_2 + .align 2 +cl_fp_addPre128_2: + push {r4, r5, r6, lr} + ldm r1, {r1, r3, r4, r5} + ldm r2, {r2, r6, r12, lr} + adds r1, r1, r2 + adcs r3, r3, r6 + adcs r4, r4, r12 + adcs r5, r5, lr + stm r0, {r1, r3, r4, r5} + pop {r4, r5, r6, lr} + bx lr + + .globl mcl_fp_addPre160 + .align 2 +mcl_fp_addPre160: + push {r4, lr} + ldm r1!, {r3, r4} + ldm r2!, {r12, lr} + adds r3, r3, r12 + adcs r4, r4, lr + stm r0!, {r3, r4} + ldm r1, {r1, r3, r4} + ldm r2, {r2, r12, lr} + adcs r1, r1, r2 + adcs r3, r3, r12 + adcs r4, r4, lr + stm r0, {r1, r3, r4} + pop {r4, lr} + bx lr + + .globl mcl_fp_addPre192 + .align 2 +mcl_fp_addPre192: + push {r4, r5, r6, lr} + ldm r1!, {r3, r4, r5} + ldm r2!, {r6, r12, lr} + adds r3, r3, r6 + adcs r4, r4, r12 + adcs r5, r5, lr + stm r0!, {r3, r4, r5} + + ldm r1, {r3, r4, r5} + ldm r2, {r6, r12, lr} + adcs r3, r3, r6 + adcs r4, r4, r12 + adcs r5, r5, lr + stm r0, {r3, r4, r5} + pop {r4, r5, r6, lr} + bx lr + + .globl mcl_fp_addPre224 + .align 2 +mcl_fp_addPre224: + push {r4, r5, r6, lr} + ldm r1!, {r3, r4, r5} + ldm r2!, {r6, r12, lr} + adds r3, r3, r6 + adcs r4, r4, r12 + adcs r5, r5, lr + stm r0!, {r3, r4, r5} + + ldm r1, {r1, r3, r4, r5} + ldm r2, {r2, r6, r12, lr} + adcs r1, r1, r2 + adcs r3, r3, r6 + adcs r4, r4, r12 + adcs r5, r5, lr + stm r0, {r1, r3, r4, r5} + pop {r4, r5, r6, lr} + bx lr + + .globl mcl_fp_addPre256 + .align 2 +mcl_fp_addPre256: + push {r4, r5, r6, r7, r8, lr} + ldm r1!, {r3, r4, r5, r6} + ldm r2!, {r7, r8, r12, lr} + adds r3, r3, r7 + adcs r4, r4, r8 + adcs r5, r5, r12 + adcs r6, r6, lr + stm r0!, {r3, r4, r5, r6} + + ldm r1, {r3, r4, r5, r6} + ldm r2, {r7, r8, r12, lr} + adcs r3, r3, r7 + adcs r4, r4, r8 + adcs r5, r5, r12 + adcs r6, r6, lr + stm r0, {r3, r4, r5, r6} + pop {r4, r5, r6, r7, r8, lr} + bx lr diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/low_x86-64.asm b/vendor/github.com/byzantine-lab/mcl/src/asm/low_x86-64.asm new file mode 100644 index 000000000..b09b9dcd3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/low_x86-64.asm @@ -0,0 +1,153 @@ + +; Linux rdi rsi rdx rcx +; Win rcx rdx r8 r9 + +%ifdef _WIN64 + %define p1org rcx + %define p2org rdx + %define p3org r8 + %define p4org r9 +%else + %define p1org rdi + %define p2org rsi + %define p3org rdx + %define p4org rcx +%endif + +%imacro proc 1 +global %1 +%1: +%endmacro + +segment .text + +%imacro addPre 1 + mov rax, [p2org] + add rax, [p3org] + mov [p1org], rax +%assign i 1 +%rep %1 + mov rax, [p2org + i * 8] + adc rax, [p3org + i * 8] + mov [p1org + i * 8], rax +%assign i (i+1) +%endrep + setc al + movzx eax, al + ret +%endmacro + +%imacro subNC 1 + mov rax, [p2org] + sub rax, [p3org] + mov [p1org], rax +%assign i 1 +%rep %1 + mov rax, [p2org + i * 8] + sbb rax, [p3org + i * 8] + mov [p1org + i * 8], rax +%assign i (i+1) +%endrep + setc al + movzx eax, al + ret +%endmacro + +proc mcl_fp_addPre64 + addPre 0 +proc mcl_fp_addPre128 + addPre 1 +proc mcl_fp_addPre192 + addPre 2 +proc mcl_fp_addPre256 + addPre 3 +proc mcl_fp_addPre320 + addPre 4 +proc mcl_fp_addPre384 + addPre 5 +proc mcl_fp_addPre448 + addPre 6 +proc mcl_fp_addPre512 + addPre 7 +proc mcl_fp_addPre576 + addPre 8 +proc mcl_fp_addPre640 + addPre 9 +proc mcl_fp_addPre704 + addPre 10 +proc mcl_fp_addPre768 + addPre 11 +proc mcl_fp_addPre832 + addPre 12 +proc mcl_fp_addPre896 + addPre 13 +proc mcl_fp_addPre960 + addPre 14 +proc mcl_fp_addPre1024 + addPre 15 +proc mcl_fp_addPre1088 + addPre 16 +proc mcl_fp_addPre1152 + addPre 17 +proc mcl_fp_addPre1216 + addPre 18 +proc mcl_fp_addPre1280 + addPre 19 +proc mcl_fp_addPre1344 + addPre 20 +proc mcl_fp_addPre1408 + addPre 21 +proc mcl_fp_addPre1472 + addPre 22 +proc mcl_fp_addPre1536 + addPre 23 + +proc mcl_fp_subNC64 + subNC 0 +proc mcl_fp_subNC128 + subNC 1 +proc mcl_fp_subNC192 + subNC 2 +proc mcl_fp_subNC256 + subNC 3 +proc mcl_fp_subNC320 + subNC 4 +proc mcl_fp_subNC384 + subNC 5 +proc mcl_fp_subNC448 + subNC 6 +proc mcl_fp_subNC512 + subNC 7 +proc mcl_fp_subNC576 + subNC 8 +proc mcl_fp_subNC640 + subNC 9 +proc mcl_fp_subNC704 + subNC 10 +proc mcl_fp_subNC768 + subNC 11 +proc mcl_fp_subNC832 + subNC 12 +proc mcl_fp_subNC896 + subNC 13 +proc mcl_fp_subNC960 + subNC 14 +proc mcl_fp_subNC1024 + subNC 15 +proc mcl_fp_subNC1088 + subNC 16 +proc mcl_fp_subNC1152 + subNC 17 +proc mcl_fp_subNC1216 + subNC 18 +proc mcl_fp_subNC1280 + subNC 19 +proc mcl_fp_subNC1344 + subNC 20 +proc mcl_fp_subNC1408 + subNC 21 +proc mcl_fp_subNC1472 + subNC 22 +proc mcl_fp_subNC1536 + subNC 23 + diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/low_x86.asm b/vendor/github.com/byzantine-lab/mcl/src/asm/low_x86.asm new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.bmi2.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.bmi2.s new file mode 100644 index 000000000..e12174ac6 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.bmi2.s @@ -0,0 +1,14155 @@ + .text + .file "" + .globl makeNIST_P192Lbmi2 + .align 16, 0x90 + .type makeNIST_P192Lbmi2,@function +makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2 +# BB#0: + movq $-1, %rax + movq $-2, %rdx + movq $-1, %rcx + retq +.Lfunc_end0: + .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function +mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq 24(%rsi), %r8 + movq 40(%rsi), %r9 + movq 8(%rsi), %rax + addq %r9, %rax + adcq $0, %r10 + sbbq %rcx, %rcx + andl $1, %ecx + movq 32(%rsi), %r11 + movq (%rsi), %r14 + addq %r8, %r14 + adcq %r11, %rax + adcq %r9, %r10 + adcq $0, %rcx + addq %r9, %r14 + adcq %r8, %rax + adcq %r11, %r10 + adcq $0, %rcx + addq %rcx, %r14 + adcq %rax, %rcx + adcq $0, %r10 + sbbq %rax, %rax + andl $1, %eax + movq %r14, %rsi + addq $1, %rsi + movq %rcx, %rdx + adcq $1, %rdx + movq %r10, %rbx + adcq $0, %rbx + adcq $-1, %rax + andl $1, %eax + cmovneq %r14, %rsi + movq %rsi, (%rdi) + testb %al, %al + cmovneq %rcx, %rdx + movq %rdx, 8(%rdi) + cmovneq %r10, %rbx + movq %rbx, 16(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2 + + .globl mcl_fp_sqr_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_sqr_NIST_P192Lbmi2,@function +mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %r8, %rdx + mulxq %rsi, %r14, %rbx + movq %rbx, -16(%rsp) # 8-byte Spill + movq %rsi, %rdx + mulxq %rsi, %r13, %r15 + movq %rsi, %rdx + mulxq %rcx, %r12, %rsi + addq %rsi, %r13 + adcq %r14, %r15 + adcq $0, %rbx + movq %rcx, %rdx + mulxq %rcx, %r9, %rax + addq %r12, %rax + movq %r8, %rdx + mulxq %rcx, %rbp, %r11 + adcq %rbp, %rsi + movq %r11, %r10 + adcq $0, %r10 + addq %r12, %rax + adcq %r13, %rsi + adcq %r15, %r10 + adcq $0, %rbx + movq %r8, %rdx + mulxq %r8, %rcx, %rdi + addq %r14, %r11 + adcq -16(%rsp), %rcx # 8-byte Folded Reload + adcq $0, %rdi + addq %rbp, %rsi + adcq %r10, %r11 + adcq %rbx, %rcx + adcq $0, %rdi + addq %rdi, %rax + adcq $0, %rsi + sbbq %rdx, %rdx + andl $1, %edx + addq %r11, %r9 + adcq %rcx, %rax + adcq %rdi, %rsi + adcq $0, %rdx + addq %rdi, %r9 + adcq %r11, %rax + adcq %rcx, %rsi + adcq $0, %rdx + addq %rdx, %r9 + adcq %rax, %rdx + adcq $0, %rsi + sbbq %rax, %rax + andl $1, %eax + movq %r9, %rcx + addq $1, %rcx + movq %rdx, %rdi + adcq $1, %rdi + movq %rsi, %rbp + adcq $0, %rbp + adcq $-1, %rax + andl $1, %eax + cmovneq %r9, %rcx + movq -8(%rsp), %rbx # 8-byte Reload + movq %rcx, (%rbx) + testb %al, %al + cmovneq %rdx, %rdi + movq %rdi, 8(%rbx) + cmovneq %rsi, %rbp + movq %rbp, 16(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2 + + .globl mcl_fp_mulNIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulNIST_P192Lbmi2,@function +mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + subq $56, %rsp + movq %rdi, %r14 + leaq 8(%rsp), %rdi + callq mcl_fpDbl_mulPre3Lbmi2@PLT + movq 24(%rsp), %r9 + movq 32(%rsp), %r8 + movq 48(%rsp), %rdi + movq 16(%rsp), %rbx + addq %rdi, %rbx + adcq $0, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + movq 40(%rsp), %rsi + movq 8(%rsp), %rdx + addq %r8, %rdx + adcq %rsi, %rbx + adcq %rdi, %r9 + adcq $0, %rcx + addq %rdi, %rdx + adcq %r8, %rbx + adcq %rsi, %r9 + adcq $0, %rcx + addq %rcx, %rdx + adcq %rbx, %rcx + adcq $0, %r9 + sbbq %rsi, %rsi + andl $1, %esi + movq %rdx, %rdi + addq $1, %rdi + movq %rcx, %rbx + adcq $1, %rbx + movq %r9, %rax + adcq $0, %rax + adcq $-1, %rsi + andl $1, %esi + cmovneq %rdx, %rdi + movq %rdi, (%r14) + testb %sil, %sil + cmovneq %rcx, %rbx + movq %rbx, 8(%r14) + cmovneq %r9, %rax + movq %rax, 16(%r14) + addq $56, %rsp + popq %rbx + popq %r14 + retq +.Lfunc_end3: + .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P521Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function +mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 120(%rsi), %r9 + movq 128(%rsi), %r14 + movq %r14, %r8 + shldq $55, %r9, %r8 + movq 112(%rsi), %r10 + shldq $55, %r10, %r9 + movq 104(%rsi), %r11 + shldq $55, %r11, %r10 + movq 96(%rsi), %r15 + shldq $55, %r15, %r11 + movq 88(%rsi), %r12 + shldq $55, %r12, %r15 + movq 80(%rsi), %rcx + shldq $55, %rcx, %r12 + movq 64(%rsi), %rbx + movq 72(%rsi), %rax + shldq $55, %rax, %rcx + shrq $9, %r14 + shldq $55, %rbx, %rax + andl $511, %ebx # imm = 0x1FF + addq (%rsi), %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r10 + adcq 48(%rsi), %r9 + adcq 56(%rsi), %r8 + adcq %r14, %rbx + movq %rbx, %rsi + shrq $9, %rsi + andl $1, %esi + addq %rax, %rsi + adcq $0, %rcx + adcq $0, %r12 + adcq $0, %r15 + adcq $0, %r11 + adcq $0, %r10 + adcq $0, %r9 + adcq $0, %r8 + adcq $0, %rbx + movq %rsi, %rax + andq %r12, %rax + andq %r15, %rax + andq %r11, %rax + andq %r10, %rax + andq %r9, %rax + andq %r8, %rax + movq %rbx, %rdx + orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00 + andq %rax, %rdx + andq %rcx, %rdx + cmpq $-1, %rdx + je .LBB4_1 +# BB#3: # %nonzero + movq %rsi, (%rdi) + movq %rcx, 8(%rdi) + movq %r12, 16(%rdi) + movq %r15, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) + andl $511, %ebx # imm = 0x1FF + movq %rbx, 64(%rdi) + jmp .LBB4_2 +.LBB4_1: # %zero + movq $0, 64(%rdi) + movq $0, 56(%rdi) + movq $0, 48(%rdi) + movq $0, 40(%rdi) + movq $0, 32(%rdi) + movq $0, 24(%rdi) + movq $0, 16(%rdi) + movq $0, 8(%rdi) + movq $0, (%rdi) +.LBB4_2: # %zero + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2 + + .globl mcl_fp_mulUnitPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre1Lbmi2,@function +mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2 +# BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq +.Lfunc_end5: + .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2 + + .globl mcl_fpDbl_mulPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre1Lbmi2,@function +mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2 +# BB#0: + movq (%rdx), %rdx + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq +.Lfunc_end6: + .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2 + + .globl mcl_fpDbl_sqrPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre1Lbmi2,@function +mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2 +# BB#0: + movq (%rsi), %rdx + mulxq %rdx, %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2 + + .globl mcl_fp_mont1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont1Lbmi2,@function +mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2 +# BB#0: + movq %rdx, %rax + movq (%rsi), %rdx + mulxq (%rax), %rsi, %r8 + movq -8(%rcx), %rdx + imulq %rsi, %rdx + movq (%rcx), %rcx + mulxq %rcx, %rdx, %rax + addq %rsi, %rdx + adcq %r8, %rax + sbbq %rdx, %rdx + andl $1, %edx + movq %rax, %rsi + subq %rcx, %rsi + sbbq $0, %rdx + testb $1, %dl + cmovneq %rax, %rsi + movq %rsi, (%rdi) + retq +.Lfunc_end8: + .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2 + + .globl mcl_fp_montNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF1Lbmi2,@function +mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2 +# BB#0: + movq %rdx, %rax + movq (%rsi), %rdx + mulxq (%rax), %rsi, %r8 + movq -8(%rcx), %rdx + imulq %rsi, %rdx + movq (%rcx), %rcx + mulxq %rcx, %rdx, %rax + addq %rsi, %rdx + adcq %r8, %rax + movq %rax, %rdx + subq %rcx, %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq +.Lfunc_end9: + .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2 + + .globl mcl_fp_montRed1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed1Lbmi2,@function +mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2 +# BB#0: + movq (%rsi), %rcx + movq -8(%rdx), %rax + imulq %rcx, %rax + movq (%rdx), %r8 + movq %rax, %rdx + mulxq %r8, %rax, %rdx + addq %rcx, %rax + adcq 8(%rsi), %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rcx + subq %r8, %rcx + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rcx + movq %rcx, (%rdi) + retq +.Lfunc_end10: + .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2 + + .globl mcl_fp_addPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre1Lbmi2,@function +mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2 +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end11: + .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2 + + .globl mcl_fp_subPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre1Lbmi2,@function +mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2 +# BB#0: + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end12: + .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2 + + .globl mcl_fp_shr1_1Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_1Lbmi2,@function +mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2 +# BB#0: + movq (%rsi), %rax + shrq %rax + movq %rax, (%rdi) + retq +.Lfunc_end13: + .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2 + + .globl mcl_fp_add1Lbmi2 + .align 16, 0x90 + .type mcl_fp_add1Lbmi2,@function +mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2 +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rax + sbbq $0, %rdx + testb $1, %dl + jne .LBB14_2 +# BB#1: # %nocarry + movq %rax, (%rdi) +.LBB14_2: # %carry + retq +.Lfunc_end14: + .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2 + + .globl mcl_fp_addNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF1Lbmi2,@function +mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2 +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, %rdx + subq (%rcx), %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq +.Lfunc_end15: + .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2 + + .globl mcl_fp_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub1Lbmi2,@function +mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2 +# BB#0: + movq (%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rax + movq %rax, (%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB16_2 +# BB#1: # %nocarry + retq +.LBB16_2: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + retq +.Lfunc_end16: + .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2 + + .globl mcl_fp_subNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF1Lbmi2,@function +mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2 +# BB#0: + movq (%rsi), %rax + subq (%rdx), %rax + movq %rax, %rdx + sarq $63, %rdx + andq (%rcx), %rdx + addq %rax, %rdx + movq %rdx, (%rdi) + retq +.Lfunc_end17: + .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2 + + .globl mcl_fpDbl_add1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add1Lbmi2,@function +mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2 +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq (%rcx), %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, 8(%rdi) + retq +.Lfunc_end18: + .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2 + + .globl mcl_fpDbl_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub1Lbmi2,@function +mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2 +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movl $0, %eax + sbbq $0, %rax + testb $1, %al + cmovneq (%rcx), %rsi + addq %r8, %rsi + movq %rsi, 8(%rdi) + retq +.Lfunc_end19: + .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2 + + .globl mcl_fp_mulUnitPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre2Lbmi2,@function +mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2 +# BB#0: + mulxq 8(%rsi), %rax, %rcx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %rax, %rsi + movq %rsi, 8(%rdi) + adcq $0, %rcx + movq %rcx, 16(%rdi) + retq +.Lfunc_end20: + .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2 + + .globl mcl_fpDbl_mulPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre2Lbmi2,@function +mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2 +# BB#0: + movq %rdx, %r10 + movq (%rsi), %r11 + movq 8(%rsi), %r8 + movq (%r10), %rsi + movq %r11, %rdx + mulxq %rsi, %rdx, %r9 + movq %rdx, (%rdi) + movq %r8, %rdx + mulxq %rsi, %rsi, %rax + addq %r9, %rsi + adcq $0, %rax + movq 8(%r10), %rcx + movq %r11, %rdx + mulxq %rcx, %rdx, %r9 + addq %rsi, %rdx + movq %rdx, 8(%rdi) + movq %r8, %rdx + mulxq %rcx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %rcx, %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end21: + .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2 + + .globl mcl_fpDbl_sqrPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre2Lbmi2,@function +mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2 +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + movq %rax, %rdx + mulxq %rax, %rdx, %rsi + movq %rdx, (%rdi) + movq %rcx, %rdx + mulxq %rax, %rdx, %r8 + addq %rdx, %rsi + movq %r8, %rax + adcq $0, %rax + addq %rdx, %rsi + movq %rsi, 8(%rdi) + movq %rcx, %rdx + mulxq %rcx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r8, %rdx + movq %rdx, 16(%rdi) + adcq %rcx, %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2 + + .globl mcl_fp_mont2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont2Lbmi2,@function +mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%rdx), %rax + movq 8(%rdx), %r11 + movq %r9, %rdx + mulxq %rax, %r10, %r13 + movq %r8, %rdx + mulxq %rax, %r14, %rsi + addq %r10, %rsi + adcq $0, %r13 + movq -8(%rcx), %rbp + movq (%rcx), %r10 + movq %r14, %rdx + imulq %rbp, %rdx + movq 8(%rcx), %r15 + mulxq %r15, %r12, %rcx + mulxq %r10, %rdx, %rbx + addq %r12, %rbx + adcq $0, %rcx + addq %r14, %rdx + adcq %rsi, %rbx + adcq %r13, %rcx + sbbq %rsi, %rsi + andl $1, %esi + movq %r11, %rdx + mulxq %r9, %r9, %r14 + movq %r11, %rdx + mulxq %r8, %r8, %rax + addq %r9, %rax + adcq $0, %r14 + addq %rbx, %r8 + adcq %rcx, %rax + adcq %rsi, %r14 + sbbq %rsi, %rsi + andl $1, %esi + imulq %r8, %rbp + movq %rbp, %rdx + mulxq %r15, %rcx, %rbx + movq %rbp, %rdx + mulxq %r10, %rdx, %rbp + addq %rcx, %rbp + adcq $0, %rbx + addq %r8, %rdx + adcq %rax, %rbp + adcq %r14, %rbx + adcq $0, %rsi + movq %rbp, %rax + subq %r10, %rax + movq %rbx, %rcx + sbbq %r15, %rcx + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rcx + testb %sil, %sil + cmovneq %rbp, %rax + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end23: + .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2 + + .globl mcl_fp_montNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF2Lbmi2,@function +mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%rdx), %rax + movq 8(%rdx), %r11 + movq %r9, %rdx + mulxq %rax, %r10, %rsi + movq %r8, %rdx + mulxq %rax, %r15, %r13 + addq %r10, %r13 + adcq $0, %rsi + movq -8(%rcx), %rbp + movq (%rcx), %r10 + movq %r15, %rdx + imulq %rbp, %rdx + movq 8(%rcx), %r14 + mulxq %r10, %rcx, %r12 + addq %r15, %rcx + mulxq %r14, %rbx, %rcx + adcq %r13, %rbx + adcq $0, %rsi + addq %r12, %rbx + adcq %rcx, %rsi + movq %r11, %rdx + mulxq %r9, %r9, %rcx + movq %r11, %rdx + mulxq %r8, %r8, %rax + addq %r9, %rax + adcq $0, %rcx + addq %rbx, %r8 + adcq %rsi, %rax + adcq $0, %rcx + imulq %r8, %rbp + movq %rbp, %rdx + mulxq %r14, %rbx, %rsi + movq %rbp, %rdx + mulxq %r10, %rbp, %rdx + addq %r8, %rbp + adcq %rax, %rbx + adcq $0, %rcx + addq %rdx, %rbx + adcq %rsi, %rcx + movq %rbx, %rax + subq %r10, %rax + movq %rcx, %rdx + sbbq %r14, %rdx + cmovsq %rbx, %rax + movq %rax, (%rdi) + cmovsq %rcx, %rdx + movq %rdx, 8(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end24: + .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2 + + .globl mcl_fp_montRed2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed2Lbmi2,@function +mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq -8(%rdx), %r15 + movq (%rdx), %r8 + movq (%rsi), %r10 + movq %r10, %rcx + imulq %r15, %rcx + movq 8(%rdx), %r9 + movq %rcx, %rdx + mulxq %r9, %r11, %r14 + movq %rcx, %rdx + mulxq %r8, %rcx, %rax + addq %r11, %rax + adcq $0, %r14 + movq 24(%rsi), %r11 + addq %r10, %rcx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r14 + adcq $0, %r11 + sbbq %rcx, %rcx + andl $1, %ecx + imulq %rax, %r15 + movq %r15, %rdx + mulxq %r9, %r10, %rbx + movq %r15, %rdx + mulxq %r8, %rsi, %rdx + addq %r10, %rdx + adcq $0, %rbx + addq %rax, %rsi + adcq %r14, %rdx + adcq %r11, %rbx + adcq $0, %rcx + movq %rdx, %rax + subq %r8, %rax + movq %rbx, %rsi + sbbq %r9, %rsi + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rbx, %rsi + testb %cl, %cl + cmovneq %rdx, %rax + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end25: + .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2 + + .globl mcl_fp_addPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre2Lbmi2,@function +mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2 +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rcx + addq (%rsi), %rax + adcq 8(%rsi), %rcx + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end26: + .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2 + + .globl mcl_fp_subPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre2Lbmi2,@function +mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2 +# BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end27: + .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2 + + .globl mcl_fp_shr1_2Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_2Lbmi2,@function +mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2 +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + shrdq $1, %rcx, %rax + movq %rax, (%rdi) + shrq %rcx + movq %rcx, 8(%rdi) + retq +.Lfunc_end28: + .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2 + + .globl mcl_fp_add2Lbmi2 + .align 16, 0x90 + .type mcl_fp_add2Lbmi2,@function +mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2 +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq $0, %rsi + testb $1, %sil + jne .LBB29_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) +.LBB29_2: # %carry + retq +.Lfunc_end29: + .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2 + + .globl mcl_fp_addNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF2Lbmi2,@function +mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2 +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %r8 + addq (%rsi), %rax + adcq 8(%rsi), %r8 + movq %rax, %rsi + subq (%rcx), %rsi + movq %r8, %rdx + sbbq 8(%rcx), %rdx + testq %rdx, %rdx + cmovsq %rax, %rsi + movq %rsi, (%rdi) + cmovsq %r8, %rdx + movq %rdx, 8(%rdi) + retq +.Lfunc_end30: + .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2 + + .globl mcl_fp_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub2Lbmi2,@function +mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2 +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movq %r8, 8(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB31_2 +# BB#1: # %nocarry + retq +.LBB31_2: # %carry + movq 8(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r8, %rdx + movq %rdx, 8(%rdi) + retq +.Lfunc_end31: + .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2 + + .globl mcl_fp_subNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF2Lbmi2,@function +mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2 +# BB#0: + movq (%rsi), %r8 + movq 8(%rsi), %rsi + subq (%rdx), %r8 + sbbq 8(%rdx), %rsi + movq %rsi, %rdx + sarq $63, %rdx + movq 8(%rcx), %rax + andq %rdx, %rax + andq (%rcx), %rdx + addq %r8, %rdx + movq %rdx, (%rdi) + adcq %rsi, %rax + movq %rax, 8(%rdi) + retq +.Lfunc_end32: + .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2 + + .globl mcl_fpDbl_add2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add2Lbmi2,@function +mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2 +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + adcq %r8, %r9 + sbbq %rax, %rax + andl $1, %eax + movq %r10, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + sbbq $0, %rax + andl $1, %eax + cmovneq %r10, %rdx + movq %rdx, 16(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 24(%rdi) + retq +.Lfunc_end33: + .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2 + + .globl mcl_fpDbl_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub2Lbmi2,@function +mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2 +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %r11 + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %r11, (%rdi) + movq %rsi, 8(%rdi) + sbbq %r8, %r9 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + cmovneq 8(%rcx), %rax + addq %r10, %rsi + movq %rsi, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end34: + .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2 + + .globl mcl_fp_mulUnitPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre3Lbmi2,@function +mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2 +# BB#0: + mulxq 16(%rsi), %r8, %rcx + mulxq 8(%rsi), %r9, %rax + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r9, %rsi + movq %rsi, 8(%rdi) + adcq %r8, %rax + movq %rax, 16(%rdi) + adcq $0, %rcx + movq %rcx, 24(%rdi) + retq +.Lfunc_end35: + .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2 + + .globl mcl_fpDbl_mulPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre3Lbmi2,@function +mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq %rdx, %r9 + movq (%rsi), %r10 + movq 8(%rsi), %r8 + movq (%r9), %rax + movq %r10, %rdx + mulxq %rax, %rdx, %r14 + movq 16(%rsi), %r11 + movq %rdx, (%rdi) + movq %r11, %rdx + mulxq %rax, %rsi, %rbx + movq %r8, %rdx + mulxq %rax, %rax, %rcx + addq %r14, %rax + adcq %rsi, %rcx + adcq $0, %rbx + movq 8(%r9), %rsi + movq %r10, %rdx + mulxq %rsi, %rdx, %r14 + addq %rax, %rdx + movq %rdx, 8(%rdi) + movq %r11, %rdx + mulxq %rsi, %rax, %r15 + movq %r8, %rdx + mulxq %rsi, %rsi, %rdx + adcq %rcx, %rsi + adcq %rbx, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r14, %rsi + adcq %rdx, %rax + adcq %r15, %rcx + movq 16(%r9), %rbx + movq %r10, %rdx + mulxq %rbx, %rdx, %r9 + addq %rsi, %rdx + movq %rdx, 16(%rdi) + movq %r11, %rdx + mulxq %rbx, %rsi, %r10 + movq %r8, %rdx + mulxq %rbx, %rbx, %rdx + adcq %rax, %rbx + adcq %rcx, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %r9, %rbx + movq %rbx, 24(%rdi) + adcq %rdx, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end36: + .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2 + + .globl mcl_fpDbl_sqrPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre3Lbmi2,@function +mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %rcx, %rdx + mulxq %rcx, %rdx, %rax + movq %rdx, (%rdi) + movq %r10, %rdx + mulxq %rcx, %r11, %r8 + movq %rsi, %rdx + mulxq %rcx, %rdx, %r14 + addq %rdx, %rax + movq %r14, %rbx + adcq %r11, %rbx + movq %r8, %rcx + adcq $0, %rcx + addq %rdx, %rax + movq %rax, 8(%rdi) + movq %r10, %rdx + mulxq %rsi, %rax, %r9 + movq %rsi, %rdx + mulxq %rsi, %rsi, %rdx + adcq %rbx, %rsi + adcq %rax, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r14, %rsi + adcq %rdx, %rcx + adcq %r9, %rbx + addq %r11, %rsi + movq %rsi, 16(%rdi) + movq %r10, %rdx + mulxq %r10, %rsi, %rdx + adcq %rax, %rcx + adcq %rbx, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %r8, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %rdx, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2 + + .globl mcl_fp_mont3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont3Lbmi2,@function +mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r15 + movq %r15, -32(%rsp) # 8-byte Spill + movq %rdi, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %rdi + movq %rdi, -56(%rsp) # 8-byte Spill + movq (%r15), %rax + movq %rdi, %rdx + mulxq %rax, %r14, %r11 + movq (%rsi), %r12 + movq %r12, -48(%rsp) # 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + mulxq %rax, %rbx, %r8 + movq %r12, %rdx + mulxq %rax, %r9, %rdi + addq %rbx, %rdi + adcq %r14, %r8 + adcq $0, %r11 + movq -8(%rcx), %r13 + movq (%rcx), %rbx + movq %rbx, -8(%rsp) # 8-byte Spill + movq %r9, %rdx + imulq %r13, %rdx + movq 8(%rcx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulxq %rax, %rax, %r10 + mulxq %rbx, %rsi, %rbx + addq %rax, %rbx + movq 16(%rcx), %rbp + mulxq %rbp, %rcx, %rax + movq %rbp, %r14 + adcq %r10, %rcx + adcq $0, %rax + addq %r9, %rsi + adcq %rdi, %rbx + movq 8(%r15), %rdx + adcq %r8, %rcx + adcq %r11, %rax + sbbq %r9, %r9 + andl $1, %r9d + movq -56(%rsp), %r15 # 8-byte Reload + mulxq %r15, %r11, %rdi + mulxq -16(%rsp), %r10, %rsi # 8-byte Folded Reload + mulxq %r12, %r8, %rbp + addq %r10, %rbp + adcq %r11, %rsi + adcq $0, %rdi + addq %rbx, %r8 + adcq %rcx, %rbp + adcq %rax, %rsi + adcq %r9, %rdi + sbbq %r11, %r11 + andl $1, %r11d + movq %r8, %rdx + imulq %r13, %rdx + mulxq %r14, %r9, %rcx + movq %r14, %r12 + movq -40(%rsp), %r14 # 8-byte Reload + mulxq %r14, %r10, %rax + mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload + addq %r10, %rbx + adcq %r9, %rax + adcq $0, %rcx + addq %r8, %rdx + adcq %rbp, %rbx + adcq %rsi, %rax + adcq %rdi, %rcx + adcq $0, %r11 + movq -32(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdx + mulxq %r15, %r9, %rsi + mulxq -16(%rsp), %r10, %r15 # 8-byte Folded Reload + mulxq -48(%rsp), %r8, %rdi # 8-byte Folded Reload + addq %r10, %rdi + adcq %r9, %r15 + adcq $0, %rsi + addq %rbx, %r8 + adcq %rax, %rdi + adcq %rcx, %r15 + adcq %r11, %rsi + sbbq %rbx, %rbx + andl $1, %ebx + imulq %r8, %r13 + movq %r13, %rdx + mulxq %r12, %r9, %rbp + movq %r13, %rdx + mulxq %r14, %r10, %rax + movq %r13, %rdx + movq -8(%rsp), %rcx # 8-byte Reload + mulxq %rcx, %r11, %rdx + addq %r10, %rdx + adcq %r9, %rax + adcq $0, %rbp + addq %r8, %r11 + adcq %rdi, %rdx + adcq %r15, %rax + adcq %rsi, %rbp + adcq $0, %rbx + movq %rdx, %rsi + subq %rcx, %rsi + movq %rax, %rdi + sbbq %r14, %rdi + movq %rbp, %rcx + sbbq %r12, %rcx + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rbp, %rcx + testb %bl, %bl + cmovneq %rdx, %rsi + movq -24(%rsp), %rdx # 8-byte Reload + movq %rsi, (%rdx) + cmovneq %rax, %rdi + movq %rdi, 8(%rdx) + movq %rcx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end38: + .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2 + + .globl mcl_fp_montNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF3Lbmi2,@function +mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdx, %r10 + movq %r10, -16(%rsp) # 8-byte Spill + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -32(%rsp) # 8-byte Spill + movq (%r10), %rax + movq %rdi, %rdx + mulxq %rax, %rbx, %r14 + movq %rcx, %rdx + mulxq %rax, %r15, %r12 + movq 16(%rsi), %r11 + addq %rbx, %r12 + movq %r11, %rdx + mulxq %rax, %rsi, %rbx + adcq %r14, %rsi + adcq $0, %rbx + movq -8(%r8), %r9 + movq (%r8), %r14 + movq %r15, %rdx + imulq %r9, %rdx + mulxq %r14, %rbp, %r13 + addq %r15, %rbp + movq 8(%r8), %r15 + mulxq %r15, %rdi, %rbp + adcq %r12, %rdi + movq 16(%r8), %r12 + mulxq %r12, %rax, %r8 + adcq %rsi, %rax + adcq $0, %rbx + addq %r13, %rdi + movq 8(%r10), %rdx + adcq %rbp, %rax + adcq %r8, %rbx + movq -32(%rsp), %r10 # 8-byte Reload + mulxq %r10, %rsi, %r8 + mulxq %rcx, %r13, %rbp + addq %rsi, %rbp + mulxq %r11, %rcx, %rsi + adcq %r8, %rcx + adcq $0, %rsi + addq %rdi, %r13 + adcq %rax, %rbp + adcq %rbx, %rcx + adcq $0, %rsi + movq %r13, %rdx + imulq %r9, %rdx + mulxq %r14, %rdi, %rbx + addq %r13, %rdi + mulxq %r15, %rax, %rdi + adcq %rbp, %rax + mulxq %r12, %rbp, %rdx + adcq %rcx, %rbp + adcq $0, %rsi + addq %rbx, %rax + adcq %rdi, %rbp + adcq %rdx, %rsi + movq -16(%rsp), %rcx # 8-byte Reload + movq 16(%rcx), %rdx + mulxq %r10, %rbx, %r8 + mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload + addq %rbx, %rdi + mulxq %r11, %rcx, %rbx + adcq %r8, %rcx + adcq $0, %rbx + addq %rax, %r10 + adcq %rbp, %rdi + adcq %rsi, %rcx + adcq $0, %rbx + imulq %r10, %r9 + movq %r9, %rdx + mulxq %r14, %rdx, %r8 + addq %r10, %rdx + movq %r9, %rdx + mulxq %r12, %rbp, %rsi + movq %r9, %rdx + mulxq %r15, %rax, %rdx + adcq %rdi, %rax + adcq %rcx, %rbp + adcq $0, %rbx + addq %r8, %rax + adcq %rdx, %rbp + adcq %rsi, %rbx + movq %rax, %rcx + subq %r14, %rcx + movq %rbp, %rdx + sbbq %r15, %rdx + movq %rbx, %rsi + sbbq %r12, %rsi + movq %rsi, %rdi + sarq $63, %rdi + cmovsq %rax, %rcx + movq -8(%rsp), %rax # 8-byte Reload + movq %rcx, (%rax) + cmovsq %rbp, %rdx + movq %rdx, 8(%rax) + cmovsq %rbx, %rsi + movq %rsi, 16(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end39: + .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2 + + .globl mcl_fp_montRed3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed3Lbmi2,@function +mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) # 8-byte Spill + movq -8(%rcx), %r15 + movq (%rcx), %r9 + movq (%rsi), %rbx + movq %rbx, %rdx + imulq %r15, %rdx + movq 16(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + mulxq %rax, %r14, %r11 + movq %rax, %rbp + movq 8(%rcx), %r10 + mulxq %r10, %rax, %r13 + mulxq %r9, %rdx, %rcx + addq %rax, %rcx + adcq %r14, %r13 + adcq $0, %r11 + movq 40(%rsi), %r14 + movq 32(%rsi), %r12 + addq %rbx, %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r13 + adcq 24(%rsi), %r11 + adcq $0, %r12 + adcq $0, %r14 + sbbq %rsi, %rsi + andl $1, %esi + movq %rcx, %rdx + imulq %r15, %rdx + mulxq %rbp, %rbp, %rdi + mulxq %r10, %r8, %rbx + mulxq %r9, %rdx, %rax + addq %r8, %rax + adcq %rbp, %rbx + adcq $0, %rdi + addq %rcx, %rdx + adcq %r13, %rax + adcq %r11, %rbx + adcq %r12, %rdi + adcq $0, %r14 + adcq $0, %rsi + imulq %rax, %r15 + movq %r15, %rdx + movq -16(%rsp), %r13 # 8-byte Reload + mulxq %r13, %r8, %rcx + movq %r15, %rdx + mulxq %r10, %r11, %r12 + movq %r15, %rdx + mulxq %r9, %r15, %rdx + addq %r11, %rdx + adcq %r8, %r12 + adcq $0, %rcx + addq %rax, %r15 + adcq %rbx, %rdx + adcq %rdi, %r12 + adcq %r14, %rcx + adcq $0, %rsi + movq %rdx, %rax + subq %r9, %rax + movq %r12, %rdi + sbbq %r10, %rdi + movq %rcx, %rbp + sbbq %r13, %rbp + sbbq $0, %rsi + andl $1, %esi + cmovneq %rcx, %rbp + testb %sil, %sil + cmovneq %rdx, %rax + movq -8(%rsp), %rcx # 8-byte Reload + movq %rax, (%rcx) + cmovneq %r12, %rdi + movq %rdi, 8(%rcx) + movq %rbp, 16(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end40: + .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2 + + .globl mcl_fp_addPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre3Lbmi2,@function +mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2 +# BB#0: + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end41: + .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2 + + .globl mcl_fp_subPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre3Lbmi2,@function +mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2 +# BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r8 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end42: + .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2 + + .globl mcl_fp_shr1_3Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_3Lbmi2,@function +mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2 +# BB#0: + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rdx + shrdq $1, %rdx, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rdx + movq %rdx, 8(%rdi) + shrq %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end43: + .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2 + + .globl mcl_fp_add3Lbmi2 + .align 16, 0x90 + .type mcl_fp_add3Lbmi2,@function +mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2 +# BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r8 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB44_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) +.LBB44_2: # %carry + retq +.Lfunc_end44: + .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2 + + .globl mcl_fp_addNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF3Lbmi2,@function +mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2 +# BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %r10 + movq 8(%rdx), %r9 + addq (%rsi), %r10 + adcq 8(%rsi), %r9 + adcq 16(%rsi), %r8 + movq %r10, %rsi + subq (%rcx), %rsi + movq %r9, %rdx + sbbq 8(%rcx), %rdx + movq %r8, %rax + sbbq 16(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r10, %rsi + movq %rsi, (%rdi) + cmovsq %r9, %rdx + movq %rdx, 8(%rdi) + cmovsq %r8, %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end45: + .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2 + + .globl mcl_fp_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub3Lbmi2,@function +mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2 +# BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r8 + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB46_2 +# BB#1: # %nocarry + retq +.LBB46_2: # %carry + movq 8(%rcx), %rdx + movq 16(%rcx), %rsi + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r8, %rsi + movq %rsi, 16(%rdi) + retq +.Lfunc_end46: + .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2 + + .globl mcl_fp_subNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF3Lbmi2,@function +mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2 +# BB#0: + movq 16(%rsi), %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + subq (%rdx), %r8 + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r10 + movq %r10, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r10, %rsi + andq (%rcx), %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + andq 8(%rcx), %rdx + addq %r8, %rsi + movq %rsi, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end47: + .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2 + + .globl mcl_fpDbl_add3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add3Lbmi2,@function +mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r9 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %r15 + adcq %r11, %r9 + adcq %r10, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %r15, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + movq %r8, %rbx + sbbq 16(%rcx), %rbx + sbbq $0, %rax + andl $1, %eax + cmovneq %r15, %rdx + movq %rdx, 24(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 32(%rdi) + cmovneq %r8, %rbx + movq %rbx, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end48: + .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2 + + .globl mcl_fpDbl_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub3Lbmi2,@function +mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rbx + sbbq 8(%rdx), %rax + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r14 + movq %rbx, (%rdi) + movq %rax, 8(%rdi) + movq %r14, 16(%rdi) + sbbq %r15, %r11 + sbbq %r12, %r9 + sbbq %r10, %r8 + movl $0, %eax + sbbq $0, %rax + andl $1, %eax + movq (%rcx), %rdx + cmoveq %rsi, %rdx + testb %al, %al + movq 16(%rcx), %rax + cmoveq %rsi, %rax + cmovneq 8(%rcx), %rsi + addq %r11, %rdx + movq %rdx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end49: + .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2 + + .globl mcl_fp_mulUnitPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre4Lbmi2,@function +mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2 +# BB#0: + mulxq 24(%rsi), %r8, %r11 + mulxq 16(%rsi), %r9, %rax + mulxq 8(%rsi), %r10, %rcx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r10, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rcx + movq %rcx, 16(%rdi) + adcq %r8, %rax + movq %rax, 24(%rdi) + adcq $0, %r11 + movq %r11, 32(%rdi) + retq +.Lfunc_end50: + .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2 + + .globl mcl_fpDbl_mulPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre4Lbmi2,@function +mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r14 + movq 8(%rsi), %r10 + movq (%rdx), %rcx + movq %rdx, %rbp + movq %r14, %rdx + mulxq %rcx, %rdx, %r15 + movq 24(%rsi), %r11 + movq 16(%rsi), %r9 + movq %rdx, (%rdi) + movq %r10, %rdx + mulxq %rcx, %rbx, %r12 + addq %r15, %rbx + movq %r9, %rdx + mulxq %rcx, %r13, %r15 + adcq %r12, %r13 + movq %r11, %rdx + mulxq %rcx, %rcx, %r12 + adcq %r15, %rcx + adcq $0, %r12 + movq 8(%rbp), %rax + movq %r14, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + addq %rbx, %r8 + movq %r10, %rdx + mulxq %rax, %r15, %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + adcq %r13, %r15 + movq %r9, %rdx + mulxq %rax, %rbx, %r13 + adcq %rcx, %rbx + movq %r11, %rdx + mulxq %rax, %rcx, %rax + adcq %r12, %rcx + sbbq %r12, %r12 + andl $1, %r12d + addq -8(%rsp), %r15 # 8-byte Folded Reload + adcq -16(%rsp), %rbx # 8-byte Folded Reload + adcq %r13, %rcx + movq %r8, 8(%rdi) + adcq %rax, %r12 + movq %rbp, %r13 + movq 16(%r13), %rax + movq %r14, %rdx + mulxq %rax, %rdx, %r8 + addq %r15, %rdx + movq %rdx, 16(%rdi) + movq %r10, %rdx + mulxq %rax, %rbp, %r10 + adcq %rbx, %rbp + movq %r11, %rdx + mulxq %rax, %r14, %r11 + movq %r9, %rdx + mulxq %rax, %r15, %rdx + adcq %rcx, %r15 + adcq %r12, %r14 + sbbq %rcx, %rcx + andl $1, %ecx + addq %r8, %rbp + adcq %r10, %r15 + adcq %rdx, %r14 + adcq %r11, %rcx + movq 24(%r13), %rdx + mulxq 24(%rsi), %rbx, %r8 + mulxq (%rsi), %rax, %r9 + addq %rbp, %rax + mulxq 16(%rsi), %rbp, %r10 + mulxq 8(%rsi), %rsi, %rdx + movq %rax, 24(%rdi) + adcq %r15, %rsi + adcq %r14, %rbp + adcq %rcx, %rbx + sbbq %rax, %rax + andl $1, %eax + addq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %rdx, %rbp + movq %rbp, 40(%rdi) + adcq %r10, %rbx + movq %rbx, 48(%rdi) + adcq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end51: + .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2 + + .globl mcl_fpDbl_sqrPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre4Lbmi2,@function +mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rcx + movq 8(%rsi), %rax + movq %rcx, %rdx + mulxq %rcx, %rdx, %r11 + movq %rdx, (%rdi) + movq %r9, %rdx + mulxq %rcx, %rbp, %r10 + movq %rbp, -16(%rsp) # 8-byte Spill + movq %r10, -8(%rsp) # 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r12, %r15 + addq %r12, %r11 + movq %r15, %rbx + adcq %rbp, %rbx + movq %r8, %rdx + mulxq %rcx, %rcx, %r13 + adcq %r10, %rcx + adcq $0, %r13 + addq %r12, %r11 + movq %rax, %rdx + mulxq %rax, %rbp, %r12 + adcq %rbx, %rbp + movq %r8, %rdx + mulxq %rax, %r10, %rbx + movq %r9, %rdx + mulxq %rax, %r14, %rdx + adcq %r14, %rcx + adcq %r13, %r10 + sbbq %rax, %rax + andl $1, %eax + addq %r15, %rbp + adcq %r12, %rcx + adcq %rdx, %r10 + movq %rdx, %r12 + adcq %rbx, %rax + movq %r11, 8(%rdi) + addq -16(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rdi) + movq %r8, %rdx + mulxq %r9, %r11, %r8 + movq %r9, %rdx + mulxq %r9, %r15, %rdx + adcq %r14, %rcx + adcq %r10, %r15 + adcq %rax, %r11 + sbbq %rax, %rax + andl $1, %eax + addq -8(%rsp), %rcx # 8-byte Folded Reload + adcq %r12, %r15 + adcq %rdx, %r11 + adcq %r8, %rax + movq 24(%rsi), %rdx + mulxq 16(%rsi), %rbx, %r8 + mulxq 8(%rsi), %rbp, %r9 + mulxq (%rsi), %rsi, %r10 + addq %rcx, %rsi + movq %rsi, 24(%rdi) + adcq %r15, %rbp + adcq %r11, %rbx + mulxq %rdx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r10, %rbp + movq %rbp, 32(%rdi) + adcq %r9, %rbx + movq %rbx, 40(%rdi) + adcq %r8, %rdx + movq %rdx, 48(%rdi) + adcq %rcx, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2 + + .globl mcl_fp_mont4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont4Lbmi2,@function +mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rdi, -88(%rsp) # 8-byte Spill + movq 24(%rsi), %rdi + movq %rdi, -40(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %r10, %r15 + movq 16(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rax, %rbx, %r11 + movq (%rsi), %rdi + movq %rdi, -56(%rsp) # 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rax, %rbp, %r14 + movq %rdi, %rdx + mulxq %rax, %r13, %r12 + addq %rbp, %r12 + adcq %rbx, %r14 + adcq %r10, %r11 + adcq $0, %r15 + movq -8(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq (%rcx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq %r13, %rdx + imulq %rax, %rdx + movq 24(%rcx), %rsi + movq %rsi, -72(%rsp) # 8-byte Spill + movq 16(%rcx), %rbp + movq %rbp, -8(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulxq %rsi, %r10, %r8 + mulxq %rbp, %r9, %rbx + mulxq %rax, %rsi, %rcx + mulxq %rdi, %rdx, %rbp + addq %rsi, %rbp + adcq %r9, %rcx + adcq %r10, %rbx + adcq $0, %r8 + addq %r13, %rdx + adcq %r12, %rbp + adcq %r14, %rcx + adcq %r11, %rbx + adcq %r15, %r8 + sbbq %rax, %rax + andl $1, %eax + movq -32(%rsp), %rdx # 8-byte Reload + movq 8(%rdx), %rdx + mulxq -40(%rsp), %r12, %r14 # 8-byte Folded Reload + mulxq -48(%rsp), %r15, %r11 # 8-byte Folded Reload + mulxq -64(%rsp), %r9, %rdi # 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rsi # 8-byte Folded Reload + addq %r9, %rsi + adcq %r15, %rdi + adcq %r12, %r11 + adcq $0, %r14 + addq %rbp, %r10 + adcq %rcx, %rsi + adcq %rbx, %rdi + adcq %r8, %r11 + adcq %rax, %r14 + sbbq %rbx, %rbx + andl $1, %ebx + movq %r10, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq -72(%rsp), %r15, %r9 # 8-byte Folded Reload + mulxq -8(%rsp), %r12, %r8 # 8-byte Folded Reload + movq -80(%rsp), %r13 # 8-byte Reload + mulxq %r13, %rbp, %rcx + mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rbp, %rax + adcq %r12, %rcx + adcq %r15, %r8 + adcq $0, %r9 + addq %r10, %rdx + adcq %rsi, %rax + adcq %rdi, %rcx + adcq %r11, %r8 + adcq %r14, %r9 + adcq $0, %rbx + movq -32(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdx + mulxq -40(%rsp), %r15, %r11 # 8-byte Folded Reload + mulxq -48(%rsp), %r12, %r14 # 8-byte Folded Reload + mulxq -64(%rsp), %rsi, %rbp # 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload + addq %rsi, %rdi + adcq %r12, %rbp + adcq %r15, %r14 + adcq $0, %r11 + addq %rax, %r10 + adcq %rcx, %rdi + adcq %r8, %rbp + adcq %r9, %r14 + adcq %rbx, %r11 + sbbq %rbx, %rbx + movq %r10, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq %r13, %rcx, %rsi + mulxq -24(%rsp), %r8, %rax # 8-byte Folded Reload + addq %rcx, %rax + mulxq -8(%rsp), %rcx, %r15 # 8-byte Folded Reload + adcq %rsi, %rcx + movq -72(%rsp), %r13 # 8-byte Reload + mulxq %r13, %r9, %rsi + adcq %r15, %r9 + adcq $0, %rsi + andl $1, %ebx + addq %r10, %r8 + adcq %rdi, %rax + adcq %rbp, %rcx + adcq %r14, %r9 + adcq %r11, %rsi + adcq $0, %rbx + movq -32(%rsp), %rdx # 8-byte Reload + movq 24(%rdx), %rdx + mulxq -40(%rsp), %r11, %r8 # 8-byte Folded Reload + mulxq -48(%rsp), %r15, %rdi # 8-byte Folded Reload + mulxq -64(%rsp), %r12, %r14 # 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rbp # 8-byte Folded Reload + addq %r12, %rbp + adcq %r15, %r14 + adcq %r11, %rdi + adcq $0, %r8 + addq %rax, %r10 + adcq %rcx, %rbp + adcq %r9, %r14 + adcq %rsi, %rdi + adcq %rbx, %r8 + sbbq %rax, %rax + andl $1, %eax + movq -16(%rsp), %rdx # 8-byte Reload + imulq %r10, %rdx + mulxq %r13, %rcx, %rsi + movq %rcx, -16(%rsp) # 8-byte Spill + mulxq -8(%rsp), %r11, %rbx # 8-byte Folded Reload + mulxq -80(%rsp), %r15, %rcx # 8-byte Folded Reload + movq -24(%rsp), %r9 # 8-byte Reload + mulxq %r9, %r12, %r13 + addq %r15, %r13 + adcq %r11, %rcx + adcq -16(%rsp), %rbx # 8-byte Folded Reload + adcq $0, %rsi + addq %r10, %r12 + adcq %rbp, %r13 + adcq %r14, %rcx + adcq %rdi, %rbx + adcq %r8, %rsi + adcq $0, %rax + movq %r13, %rdi + subq %r9, %rdi + movq %rcx, %rbp + sbbq -80(%rsp), %rbp # 8-byte Folded Reload + movq %rbx, %r8 + sbbq -8(%rsp), %r8 # 8-byte Folded Reload + movq %rsi, %rdx + sbbq -72(%rsp), %rdx # 8-byte Folded Reload + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + testb %al, %al + cmovneq %r13, %rdi + movq -88(%rsp), %rax # 8-byte Reload + movq %rdi, (%rax) + cmovneq %rcx, %rbp + movq %rbp, 8(%rax) + cmovneq %rbx, %r8 + movq %r8, 16(%rax) + movq %rdx, 24(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end53: + .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2 + + .globl mcl_fp_montNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF4Lbmi2,@function +mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rdi, -80(%rsp) # 8-byte Spill + movq (%rsi), %rdi + movq %rdi, -64(%rsp) # 8-byte Spill + movq 8(%rsi), %rbp + movq %rbp, -72(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rdx, %r15 + movq %rbp, %rdx + mulxq %rax, %rbp, %r9 + movq %rdi, %rdx + mulxq %rax, %r12, %rbx + movq 16(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + addq %rbp, %rbx + mulxq %rax, %r14, %rbp + adcq %r9, %r14 + movq 24(%rsi), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + mulxq %rax, %r8, %rdi + adcq %rbp, %r8 + adcq $0, %rdi + movq -8(%rcx), %r13 + movq (%rcx), %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq %r12, %rdx + imulq %r13, %rdx + mulxq %rax, %rax, %r11 + addq %r12, %rax + movq 8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulxq %rax, %rbp, %r10 + adcq %rbx, %rbp + movq 16(%rcx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulxq %rax, %rsi, %rbx + adcq %r14, %rsi + movq 24(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + mulxq %rax, %rcx, %rdx + adcq %r8, %rcx + adcq $0, %rdi + addq %r11, %rbp + adcq %r10, %rsi + adcq %rbx, %rcx + adcq %rdx, %rdi + movq 8(%r15), %rdx + movq -72(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rbx, %r9 + movq -64(%rsp), %r15 # 8-byte Reload + mulxq %r15, %r10, %r11 + addq %rbx, %r11 + mulxq -48(%rsp), %rax, %r8 # 8-byte Folded Reload + adcq %r9, %rax + mulxq -16(%rsp), %r9, %rbx # 8-byte Folded Reload + adcq %r8, %r9 + adcq $0, %rbx + addq %rbp, %r10 + adcq %rsi, %r11 + adcq %rcx, %rax + adcq %rdi, %r9 + adcq $0, %rbx + movq %r10, %rdx + imulq %r13, %rdx + movq -56(%rsp), %r14 # 8-byte Reload + mulxq %r14, %rcx, %r8 + addq %r10, %rcx + mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload + adcq %r11, %r10 + mulxq -40(%rsp), %rcx, %rsi # 8-byte Folded Reload + adcq %rax, %rcx + mulxq -8(%rsp), %rax, %rdx # 8-byte Folded Reload + adcq %r9, %rax + adcq $0, %rbx + addq %r8, %r10 + adcq %rdi, %rcx + adcq %rsi, %rax + adcq %rdx, %rbx + movq -32(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdx + mulxq %r12, %rsi, %r8 + mulxq %r15, %r11, %rbp + addq %rsi, %rbp + movq -48(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rdi, %r9 + adcq %r8, %rdi + mulxq -16(%rsp), %r8, %rsi # 8-byte Folded Reload + adcq %r9, %r8 + adcq $0, %rsi + addq %r10, %r11 + adcq %rcx, %rbp + adcq %rax, %rdi + adcq %rbx, %r8 + adcq $0, %rsi + movq %r11, %rdx + imulq %r13, %rdx + mulxq %r14, %rax, %r10 + addq %r11, %rax + movq -24(%rsp), %r14 # 8-byte Reload + mulxq %r14, %r9, %rbx + adcq %rbp, %r9 + movq -40(%rsp), %r15 # 8-byte Reload + mulxq %r15, %rax, %rbp + adcq %rdi, %rax + mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload + adcq %r8, %rcx + adcq $0, %rsi + addq %r10, %r9 + adcq %rbx, %rax + adcq %rbp, %rcx + adcq %rdx, %rsi + movq -32(%rsp), %rdx # 8-byte Reload + movq 24(%rdx), %rdx + mulxq -72(%rsp), %rbx, %r8 # 8-byte Folded Reload + mulxq -64(%rsp), %r11, %rbp # 8-byte Folded Reload + addq %rbx, %rbp + mulxq %r12, %rdi, %r10 + adcq %r8, %rdi + mulxq -16(%rsp), %r8, %rbx # 8-byte Folded Reload + adcq %r10, %r8 + adcq $0, %rbx + addq %r9, %r11 + adcq %rax, %rbp + adcq %rcx, %rdi + adcq %rsi, %r8 + adcq $0, %rbx + imulq %r11, %r13 + movq %r13, %rdx + movq -56(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rcx, %r9 + addq %r11, %rcx + movq %r13, %rdx + mulxq %r14, %r11, %r10 + adcq %rbp, %r11 + movq %r13, %rdx + movq %r15, %rsi + mulxq %rsi, %rax, %rcx + adcq %rdi, %rax + movq %r13, %rdx + movq -8(%rsp), %rbp # 8-byte Reload + mulxq %rbp, %r15, %rdx + adcq %r8, %r15 + adcq $0, %rbx + addq %r9, %r11 + adcq %r10, %rax + adcq %rcx, %r15 + adcq %rdx, %rbx + movq %r11, %rcx + subq %r12, %rcx + movq %rax, %rdx + sbbq %r14, %rdx + movq %r15, %rdi + sbbq %rsi, %rdi + movq %rbx, %rsi + sbbq %rbp, %rsi + cmovsq %r11, %rcx + movq -80(%rsp), %rbp # 8-byte Reload + movq %rcx, (%rbp) + cmovsq %rax, %rdx + movq %rdx, 8(%rbp) + cmovsq %r15, %rdi + movq %rdi, 16(%rbp) + cmovsq %rbx, %rsi + movq %rsi, 24(%rbp) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end54: + .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2 + + .globl mcl_fp_montRed4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed4Lbmi2,@function +mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -56(%rsp) # 8-byte Spill + movq -8(%rcx), %r13 + movq (%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq (%rsi), %r10 + movq %r10, %rdx + imulq %r13, %rdx + movq 24(%rcx), %rdi + movq %rdi, -48(%rsp) # 8-byte Spill + mulxq %rdi, %r9, %r15 + movq %rdi, %r14 + movq 16(%rcx), %rdi + movq %rdi, -8(%rsp) # 8-byte Spill + mulxq %rdi, %rdi, %rbx + movq 8(%rcx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + mulxq %rcx, %rcx, %r8 + mulxq %rax, %rdx, %rbp + addq %rcx, %rbp + adcq %rdi, %r8 + adcq %r9, %rbx + adcq $0, %r15 + movq 56(%rsi), %r11 + movq 48(%rsi), %rcx + addq %r10, %rdx + movq 40(%rsi), %r12 + adcq 8(%rsi), %rbp + adcq 16(%rsi), %r8 + adcq 24(%rsi), %rbx + adcq 32(%rsi), %r15 + adcq $0, %r12 + adcq $0, %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + adcq $0, %r11 + sbbq %rsi, %rsi + andl $1, %esi + movq %rbp, %rdx + imulq %r13, %rdx + mulxq %r14, %rax, %r9 + movq %rax, -40(%rsp) # 8-byte Spill + mulxq -8(%rsp), %r14, %rdi # 8-byte Folded Reload + mulxq -16(%rsp), %r10, %rcx # 8-byte Folded Reload + mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %r10, %rax + adcq %r14, %rcx + adcq -40(%rsp), %rdi # 8-byte Folded Reload + adcq $0, %r9 + addq %rbp, %rdx + adcq %r8, %rax + adcq %rbx, %rcx + adcq %r15, %rdi + adcq %r12, %r9 + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, %r11 + movq %r11, -40(%rsp) # 8-byte Spill + adcq $0, %rsi + movq %rax, %rdx + imulq %r13, %rdx + movq -48(%rsp), %r15 # 8-byte Reload + mulxq %r15, %rbp, %r8 + movq %rbp, -64(%rsp) # 8-byte Spill + movq -8(%rsp), %r11 # 8-byte Reload + mulxq %r11, %rbx, %r10 + movq %rbx, -72(%rsp) # 8-byte Spill + mulxq -16(%rsp), %r12, %rbp # 8-byte Folded Reload + movq -24(%rsp), %r14 # 8-byte Reload + mulxq %r14, %rdx, %rbx + addq %r12, %rbx + adcq -72(%rsp), %rbp # 8-byte Folded Reload + adcq -64(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r8 + addq %rax, %rdx + adcq %rcx, %rbx + adcq %rdi, %rbp + adcq %r9, %r10 + adcq -32(%rsp), %r8 # 8-byte Folded Reload + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, %rsi + imulq %rbx, %r13 + movq %r13, %rdx + mulxq %r15, %rax, %rdi + movq %rax, -32(%rsp) # 8-byte Spill + movq %r13, %rdx + mulxq %r11, %r9, %rax + movq %r13, %rdx + movq -16(%rsp), %r11 # 8-byte Reload + mulxq %r11, %r12, %rcx + movq %r13, %rdx + mulxq %r14, %r15, %r13 + addq %r12, %r13 + adcq %r9, %rcx + adcq -32(%rsp), %rax # 8-byte Folded Reload + adcq $0, %rdi + addq %rbx, %r15 + adcq %rbp, %r13 + adcq %r10, %rcx + adcq %r8, %rax + adcq -40(%rsp), %rdi # 8-byte Folded Reload + adcq $0, %rsi + movq %r13, %rdx + subq %r14, %rdx + movq %rcx, %rbp + sbbq %r11, %rbp + movq %rax, %r8 + sbbq -8(%rsp), %r8 # 8-byte Folded Reload + movq %rdi, %rbx + sbbq -48(%rsp), %rbx # 8-byte Folded Reload + sbbq $0, %rsi + andl $1, %esi + cmovneq %rdi, %rbx + testb %sil, %sil + cmovneq %r13, %rdx + movq -56(%rsp), %rsi # 8-byte Reload + movq %rdx, (%rsi) + cmovneq %rcx, %rbp + movq %rbp, 8(%rsi) + cmovneq %rax, %r8 + movq %r8, 16(%rsi) + movq %rbx, 24(%rsi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end55: + .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2 + + .globl mcl_fp_addPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre4Lbmi2,@function +mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2 +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end56: + .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2 + + .globl mcl_fp_subPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre4Lbmi2,@function +mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2 +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r8, %r9 + movq %r9, 24(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end57: + .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2 + + .globl mcl_fp_shr1_4Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_4Lbmi2,@function +mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2 +# BB#0: + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrq %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end58: + .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2 + + .globl mcl_fp_add4Lbmi2 + .align 16, 0x90 + .type mcl_fp_add4Lbmi2,@function +mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2 +# BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r9 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + adcq %r10, %r8 + movq %r8, 24(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r9 + sbbq 24(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB59_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + movq %r8, 24(%rdi) +.LBB59_2: # %carry + retq +.Lfunc_end59: + .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2 + + .globl mcl_fp_addNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF4Lbmi2,@function +mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2 +# BB#0: + pushq %rbx + movq 24(%rdx), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %r11 + movq 8(%rdx), %r10 + addq (%rsi), %r11 + adcq 8(%rsi), %r10 + adcq 16(%rsi), %r9 + adcq 24(%rsi), %r8 + movq %r11, %rsi + subq (%rcx), %rsi + movq %r10, %rdx + sbbq 8(%rcx), %rdx + movq %r9, %rax + sbbq 16(%rcx), %rax + movq %r8, %rbx + sbbq 24(%rcx), %rbx + testq %rbx, %rbx + cmovsq %r11, %rsi + movq %rsi, (%rdi) + cmovsq %r10, %rdx + movq %rdx, 8(%rdi) + cmovsq %r9, %rax + movq %rax, 16(%rdi) + cmovsq %r8, %rbx + movq %rbx, 24(%rdi) + popq %rbx + retq +.Lfunc_end60: + .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2 + + .globl mcl_fp_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub4Lbmi2,@function +mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2 +# BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r9 + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r9, 16(%rdi) + sbbq %r10, %r8 + movq %r8, 24(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB61_2 +# BB#1: # %nocarry + retq +.LBB61_2: # %carry + movq 24(%rcx), %r10 + movq 8(%rcx), %rsi + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r10 + movq %r10, 24(%rdi) + retq +.Lfunc_end61: + .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2 + + .globl mcl_fp_subNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF4Lbmi2,@function +mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2 +# BB#0: + pushq %rbx + movq 24(%rsi), %r11 + movq 16(%rsi), %r8 + movq (%rsi), %r9 + movq 8(%rsi), %r10 + subq (%rdx), %r9 + sbbq 8(%rdx), %r10 + sbbq 16(%rdx), %r8 + sbbq 24(%rdx), %r11 + movq %r11, %rdx + sarq $63, %rdx + movq 24(%rcx), %rsi + andq %rdx, %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + movq 8(%rcx), %rbx + andq %rdx, %rbx + andq (%rcx), %rdx + addq %r9, %rdx + movq %rdx, (%rdi) + adcq %r10, %rbx + movq %rbx, 8(%rdi) + adcq %r8, %rax + movq %rax, 16(%rdi) + adcq %r11, %rsi + movq %rsi, 24(%rdi) + popq %rbx + retq +.Lfunc_end62: + .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2 + + .globl mcl_fpDbl_add4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add4Lbmi2,@function +mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 48(%rsi), %r12 + movq 40(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rdx), %r15 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rbp + movq 32(%rsi), %rsi + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r15, %rbp + movq %rbp, 24(%rdi) + adcq %r14, %rsi + adcq %r11, %r13 + adcq %r10, %r12 + adcq %r9, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %rsi, %rdx + subq (%rcx), %rdx + movq %r13, %rbp + sbbq 8(%rcx), %rbp + movq %r12, %rbx + sbbq 16(%rcx), %rbx + movq %r8, %r9 + sbbq 24(%rcx), %r9 + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + movq %rdx, 32(%rdi) + testb %al, %al + cmovneq %r13, %rbp + movq %rbp, 40(%rdi) + cmovneq %r12, %rbx + movq %rbx, 48(%rdi) + cmovneq %r8, %r9 + movq %r9, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end63: + .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2 + + .globl mcl_fpDbl_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub4Lbmi2,@function +mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 24(%rdx), %r11 + movq (%rsi), %rbx + xorl %eax, %eax + subq (%rdx), %rbx + movq %rbx, (%rdi) + movq 8(%rsi), %rbx + sbbq 8(%rdx), %rbx + movq %rbx, 8(%rdi) + movq 16(%rsi), %rbx + sbbq 16(%rdx), %rbx + movq %rbx, 16(%rdi) + movq 24(%rsi), %rbx + sbbq %r11, %rbx + movq 40(%rdx), %r11 + movq 32(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 32(%rsi), %r12 + sbbq %rdx, %r12 + movq 48(%rsi), %r14 + movq 40(%rsi), %r15 + sbbq %r11, %r15 + sbbq %r10, %r14 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 24(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 8(%rcx), %rax + addq %r12, %rsi + movq %rsi, 32(%rdi) + adcq %r15, %rax + movq %rax, 40(%rdi) + adcq %r14, %rdx + movq %rdx, 48(%rdi) + adcq %r8, %rbx + movq %rbx, 56(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end64: + .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2 + + .globl mcl_fp_mulUnitPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre5Lbmi2,@function +mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + mulxq 32(%rsi), %r8, %r11 + mulxq 24(%rsi), %r9, %rax + mulxq 16(%rsi), %r10, %rcx + mulxq 8(%rsi), %r14, %rbx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r14, %rsi + movq %rsi, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %rcx + movq %rcx, 24(%rdi) + adcq %r8, %rax + movq %rax, 32(%rdi) + adcq $0, %r11 + movq %r11, 40(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end65: + .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2 + + .globl mcl_fpDbl_mulPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre5Lbmi2,@function +mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %r11 + movq 8(%rsi), %r10 + movq (%rdx), %rcx + movq %r10, %rdx + mulxq %rcx, %rax, %r14 + movq %r11, %rdx + mulxq %rcx, %rdx, %rbx + movq %rdx, -24(%rsp) # 8-byte Spill + movq 24(%rsi), %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + movq 16(%rsi), %r15 + addq %rax, %rbx + movq %r15, %rdx + mulxq %rcx, %rax, %r13 + adcq %r14, %rax + movq %rbp, %rdx + mulxq %rcx, %r8, %r12 + adcq %r13, %r8 + movq 32(%rsi), %r14 + movq %r14, %rdx + mulxq %rcx, %r9, %r13 + adcq %r12, %r9 + movq -24(%rsp), %rcx # 8-byte Reload + movq %rcx, (%rdi) + adcq $0, %r13 + movq -48(%rsp), %rdi # 8-byte Reload + movq 8(%rdi), %rbp + movq %r11, %rdx + mulxq %rbp, %r12, %r11 + addq %rbx, %r12 + movq %r10, %rdx + mulxq %rbp, %rbx, %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + adcq %rax, %rbx + movq %r15, %rdx + mulxq %rbp, %rcx, %r10 + adcq %r8, %rcx + movq -16(%rsp), %rdx # 8-byte Reload + mulxq %rbp, %rax, %r8 + adcq %r9, %rax + movq %r14, %rdx + mulxq %rbp, %r15, %rdx + adcq %r13, %r15 + sbbq %r14, %r14 + andl $1, %r14d + addq %r11, %rbx + movq -8(%rsp), %rbp # 8-byte Reload + movq %r12, 8(%rbp) + adcq -24(%rsp), %rcx # 8-byte Folded Reload + adcq %r10, %rax + adcq %r8, %r15 + adcq %rdx, %r14 + movq (%rsi), %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -16(%rsp) # 8-byte Spill + movq 16(%rdi), %rbp + mulxq %rbp, %r12, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + addq %rbx, %r12 + movq %r8, %rdx + mulxq %rbp, %rbx, %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + adcq %rcx, %rbx + movq 16(%rsi), %r11 + movq %r11, %rdx + mulxq %rbp, %rcx, %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + adcq %rax, %rcx + movq 24(%rsi), %r13 + movq %r13, %rdx + mulxq %rbp, %r9, %r10 + adcq %r15, %r9 + movq 32(%rsi), %r15 + movq %r15, %rdx + mulxq %rbp, %r8, %rdx + adcq %r14, %r8 + sbbq %r14, %r14 + andl $1, %r14d + addq -32(%rsp), %rbx # 8-byte Folded Reload + adcq -40(%rsp), %rcx # 8-byte Folded Reload + adcq -56(%rsp), %r9 # 8-byte Folded Reload + adcq %r10, %r8 + adcq %rdx, %r14 + movq -8(%rsp), %r10 # 8-byte Reload + movq %r12, 16(%r10) + movq %rdi, %rbp + movq 24(%rbp), %rax + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r12, %rdi + addq %rbx, %r12 + movq -16(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rbx, %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + adcq %rcx, %rbx + movq %r11, %rdx + mulxq %rax, %rcx, %r11 + adcq %r9, %rcx + movq %r13, %rdx + mulxq %rax, %r13, %r9 + adcq %r8, %r13 + movq %r15, %rdx + mulxq %rax, %r8, %rdx + adcq %r14, %r8 + sbbq %r14, %r14 + andl $1, %r14d + addq %rdi, %rbx + movq %r12, 24(%r10) + movq %r10, %rdi + adcq -16(%rsp), %rcx # 8-byte Folded Reload + adcq %r11, %r13 + adcq %r9, %r8 + adcq %rdx, %r14 + movq 32(%rbp), %rdx + mulxq 8(%rsi), %rax, %r9 + mulxq (%rsi), %rbp, %r10 + addq %rbx, %rbp + adcq %rcx, %rax + mulxq 16(%rsi), %rbx, %r15 + adcq %r13, %rbx + mulxq 32(%rsi), %rcx, %r11 + mulxq 24(%rsi), %rsi, %rdx + movq %rbp, 32(%rdi) + adcq %r8, %rsi + adcq %r14, %rcx + sbbq %rbp, %rbp + andl $1, %ebp + addq %r10, %rax + movq %rax, 40(%rdi) + adcq %r9, %rbx + movq %rbx, 48(%rdi) + adcq %r15, %rsi + movq %rsi, 56(%rdi) + adcq %rdx, %rcx + movq %rcx, 64(%rdi) + adcq %r11, %rbp + movq %rbp, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end66: + .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2 + + .globl mcl_fpDbl_sqrPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre5Lbmi2,@function +mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rcx + movq %r11, %rdx + mulxq %rax, %rbx, %r15 + movq 32(%rsi), %r9 + movq %r9, -8(%rsp) # 8-byte Spill + movq 24(%rsi), %r13 + movq %rcx, %rdx + mulxq %rax, %r12, %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + movq %rax, %rdx + mulxq %rax, %rdx, %r14 + movq %rdx, -24(%rsp) # 8-byte Spill + addq %r12, %r14 + adcq %rbp, %rbx + movq %r13, %rdx + mulxq %rax, %r8, %r10 + adcq %r15, %r8 + movq %r9, %rdx + mulxq %rax, %rbp, %r15 + adcq %r10, %rbp + movq -24(%rsp), %rax # 8-byte Reload + movq %rax, (%rdi) + adcq $0, %r15 + addq %r12, %r14 + movq %rcx, %rdx + mulxq %rcx, %rax, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %rbx, %rax + movq %r11, %rdx + mulxq %rcx, %rbx, %r10 + adcq %r8, %rbx + movq %r13, %rdx + mulxq %rcx, %r13, %r8 + adcq %rbp, %r13 + movq %r9, %rdx + mulxq %rcx, %r12, %rcx + adcq %r15, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -16(%rsp), %rax # 8-byte Folded Reload + movq %r14, 8(%rdi) + adcq -24(%rsp), %rbx # 8-byte Folded Reload + adcq %r10, %r13 + adcq %r8, %r12 + adcq %rcx, %r15 + movq (%rsi), %r9 + movq 8(%rsi), %r10 + movq %r9, %rdx + mulxq %r11, %rbp, %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + addq %rax, %rbp + movq %r10, %rdx + mulxq %r11, %rax, %r8 + adcq %rbx, %rax + movq %r11, %rdx + mulxq %r11, %r14, %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + adcq %r13, %r14 + movq 24(%rsi), %rcx + movq %rcx, %rdx + mulxq %r11, %rbx, %r13 + adcq %r12, %rbx + movq -8(%rsp), %rdx # 8-byte Reload + mulxq %r11, %r12, %rdx + adcq %r15, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -16(%rsp), %rax # 8-byte Folded Reload + adcq %r8, %r14 + movq %rbp, 16(%rdi) + adcq -24(%rsp), %rbx # 8-byte Folded Reload + adcq %r13, %r12 + adcq %rdx, %r15 + movq %r10, %rdx + mulxq %rcx, %r10, %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + movq %r9, %rdx + mulxq %rcx, %r13, %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + addq %rax, %r13 + movq 16(%rsi), %r8 + movq 32(%rsi), %rax + adcq %r14, %r10 + movq %r8, %rdx + mulxq %rcx, %r9, %r14 + adcq %rbx, %r9 + movq %rcx, %rdx + mulxq %rcx, %r11, %rbp + adcq %r12, %r11 + movq %rax, %rdx + mulxq %rcx, %r12, %rdx + adcq %r15, %r12 + sbbq %rbx, %rbx + andl $1, %ebx + addq -16(%rsp), %r10 # 8-byte Folded Reload + movq %r13, 24(%rdi) + adcq -8(%rsp), %r9 # 8-byte Folded Reload + adcq %r14, %r11 + adcq %rbp, %r12 + adcq %rdx, %rbx + movq %rax, %rdx + mulxq 24(%rsi), %rbp, %r14 + movq %rax, %rdx + mulxq (%rsi), %rcx, %r15 + addq %r10, %rcx + movq %rax, %rdx + mulxq 8(%rsi), %rsi, %r10 + movq %rcx, 32(%rdi) + adcq %r9, %rsi + movq %r8, %rdx + mulxq %rax, %rcx, %r8 + adcq %r11, %rcx + adcq %r12, %rbp + movq %rax, %rdx + mulxq %rax, %rdx, %rax + adcq %rbx, %rdx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r15, %rsi + movq %rsi, 40(%rdi) + adcq %r10, %rcx + movq %rcx, 48(%rdi) + adcq %r8, %rbp + movq %rbp, 56(%rdi) + adcq %r14, %rdx + movq %rdx, 64(%rdi) + adcq %rax, %rbx + movq %rbx, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2 + + .globl mcl_fp_mont5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont5Lbmi2,@function +mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rdi, -112(%rsp) # 8-byte Spill + movq 32(%rsi), %rdi + movq %rdi, -64(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %r10, %rbx + movq 24(%rsi), %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + mulxq %rax, %r12, %r14 + movq 16(%rsi), %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + mulxq %rax, %r13, %r11 + movq (%rsi), %rbp + movq %rbp, -88(%rsp) # 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -96(%rsp) # 8-byte Spill + mulxq %rax, %rdi, %r9 + movq %rbp, %rdx + mulxq %rax, %r15, %r8 + addq %rdi, %r8 + adcq %r13, %r9 + adcq %r12, %r11 + adcq %r10, %r14 + adcq $0, %rbx + movq %rbx, -104(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -48(%rsp) # 8-byte Spill + movq %r15, %rdx + imulq %rax, %rdx + movq (%rcx), %rsi + movq %rsi, -32(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + mulxq %rax, %rax, %r12 + movq %rax, -120(%rsp) # 8-byte Spill + movq 24(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + mulxq %rax, %r13, %r10 + movq 8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulxq %rax, %rdi, %rbp + mulxq %rsi, %rax, %rbx + addq %rdi, %rbx + movq 16(%rcx), %rcx + movq %rcx, -40(%rsp) # 8-byte Spill + mulxq %rcx, %rdi, %rcx + adcq %rbp, %rdi + adcq %r13, %rcx + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r12 + addq %r15, %rax + adcq %r8, %rbx + adcq %r9, %rdi + adcq %r11, %rcx + adcq %r14, %r10 + adcq -104(%rsp), %r12 # 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq -56(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + mulxq -64(%rsp), %rax, %r14 # 8-byte Folded Reload + movq %rax, -104(%rsp) # 8-byte Spill + mulxq -72(%rsp), %rax, %r15 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload + mulxq -96(%rsp), %r8, %rsi # 8-byte Folded Reload + mulxq -88(%rsp), %r11, %rax # 8-byte Folded Reload + addq %r8, %rax + adcq %r13, %rsi + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -104(%rsp), %r15 # 8-byte Folded Reload + adcq $0, %r14 + addq %rbx, %r11 + adcq %rdi, %rax + adcq %rcx, %rsi + adcq %r10, %r9 + adcq %r12, %r15 + adcq %rbp, %r14 + sbbq %r12, %r12 + andl $1, %r12d + movq %r11, %rdx + imulq -48(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rcx, %r10 # 8-byte Folded Reload + movq %rcx, -104(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rcx, %rdi # 8-byte Folded Reload + movq %rcx, -120(%rsp) # 8-byte Spill + mulxq -40(%rsp), %r13, %rcx # 8-byte Folded Reload + mulxq -24(%rsp), %r8, %rbx # 8-byte Folded Reload + mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload + addq %r8, %rbp + adcq %r13, %rbx + adcq -120(%rsp), %rcx # 8-byte Folded Reload + adcq -104(%rsp), %rdi # 8-byte Folded Reload + adcq $0, %r10 + addq %r11, %rdx + adcq %rax, %rbp + adcq %rsi, %rbx + adcq %r9, %rcx + adcq %r15, %rdi + adcq %r14, %r10 + adcq $0, %r12 + movq -56(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + mulxq -64(%rsp), %rax, %r15 # 8-byte Folded Reload + movq %rax, -104(%rsp) # 8-byte Spill + mulxq -72(%rsp), %rax, %r11 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload + mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload + mulxq -88(%rsp), %r14, %rax # 8-byte Folded Reload + addq %rsi, %rax + adcq %r13, %r8 + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r15 + addq %rbp, %r14 + adcq %rbx, %rax + adcq %rcx, %r8 + adcq %rdi, %r9 + adcq %r10, %r11 + adcq %r12, %r15 + sbbq %r13, %r13 + andl $1, %r13d + movq %r14, %rdx + imulq -48(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rcx, %r12 # 8-byte Folded Reload + movq %rcx, -104(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rcx, %r10 # 8-byte Folded Reload + movq %rcx, -120(%rsp) # 8-byte Spill + mulxq -40(%rsp), %rdi, %rsi # 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %rbx # 8-byte Folded Reload + mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload + addq %rcx, %rbp + adcq %rdi, %rbx + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r12 + addq %r14, %rdx + adcq %rax, %rbp + adcq %r8, %rbx + adcq %r9, %rsi + adcq %r11, %r10 + adcq %r15, %r12 + adcq $0, %r13 + movq -56(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + mulxq -64(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -120(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + mulxq -72(%rsp), %r11, %r14 # 8-byte Folded Reload + mulxq -80(%rsp), %r8, %r9 # 8-byte Folded Reload + mulxq -96(%rsp), %rax, %rdi # 8-byte Folded Reload + mulxq -88(%rsp), %r15, %rcx # 8-byte Folded Reload + addq %rax, %rcx + adcq %r8, %rdi + adcq %r11, %r9 + adcq -120(%rsp), %r14 # 8-byte Folded Reload + movq -104(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rbp, %r15 + adcq %rbx, %rcx + adcq %rsi, %rdi + adcq %r10, %r9 + adcq %r12, %r14 + adcq %r13, %rax + movq %rax, -104(%rsp) # 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq %r15, %rdx + imulq -48(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rax, %rbp # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -16(%rsp), %r13, %r10 # 8-byte Folded Reload + mulxq -40(%rsp), %rbx, %r8 # 8-byte Folded Reload + mulxq -24(%rsp), %rsi, %r11 # 8-byte Folded Reload + mulxq -32(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rsi, %rax + adcq %rbx, %r11 + adcq %r13, %r8 + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %rbp + addq %r15, %rdx + adcq %rcx, %rax + adcq %rdi, %r11 + adcq %r9, %r8 + adcq %r14, %r10 + adcq -104(%rsp), %rbp # 8-byte Folded Reload + adcq $0, %r12 + movq -56(%rsp), %rcx # 8-byte Reload + movq 32(%rcx), %rdx + mulxq -64(%rsp), %rcx, %r14 # 8-byte Folded Reload + movq %rcx, -56(%rsp) # 8-byte Spill + mulxq -72(%rsp), %rcx, %rbx # 8-byte Folded Reload + movq %rcx, -64(%rsp) # 8-byte Spill + mulxq -80(%rsp), %rsi, %r15 # 8-byte Folded Reload + mulxq -96(%rsp), %rcx, %r9 # 8-byte Folded Reload + mulxq -88(%rsp), %r13, %rdi # 8-byte Folded Reload + addq %rcx, %rdi + adcq %rsi, %r9 + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq -56(%rsp), %rbx # 8-byte Folded Reload + adcq $0, %r14 + addq %rax, %r13 + adcq %r11, %rdi + adcq %r8, %r9 + adcq %r10, %r15 + adcq %rbp, %rbx + adcq %r12, %r14 + sbbq %rax, %rax + movq -48(%rsp), %rdx # 8-byte Reload + imulq %r13, %rdx + mulxq -32(%rsp), %r10, %rcx # 8-byte Folded Reload + mulxq -24(%rsp), %r8, %rsi # 8-byte Folded Reload + addq %rcx, %r8 + mulxq -40(%rsp), %rbp, %r11 # 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -16(%rsp), %rcx, %r12 # 8-byte Folded Reload + adcq %r11, %rcx + mulxq -8(%rsp), %rsi, %r11 # 8-byte Folded Reload + adcq %r12, %rsi + adcq $0, %r11 + andl $1, %eax + addq %r13, %r10 + adcq %rdi, %r8 + adcq %r9, %rbp + adcq %r15, %rcx + adcq %rbx, %rsi + adcq %r14, %r11 + adcq $0, %rax + movq %r8, %rdi + subq -32(%rsp), %rdi # 8-byte Folded Reload + movq %rbp, %rbx + sbbq -24(%rsp), %rbx # 8-byte Folded Reload + movq %rcx, %r9 + sbbq -40(%rsp), %r9 # 8-byte Folded Reload + movq %rsi, %rdx + sbbq -16(%rsp), %rdx # 8-byte Folded Reload + movq %r11, %r10 + sbbq -8(%rsp), %r10 # 8-byte Folded Reload + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + testb %al, %al + cmovneq %r8, %rdi + movq -112(%rsp), %rax # 8-byte Reload + movq %rdi, (%rax) + cmovneq %rbp, %rbx + movq %rbx, 8(%rax) + cmovneq %rcx, %r9 + movq %r9, 16(%rax) + movq %rdx, 24(%rax) + cmovneq %r11, %r10 + movq %r10, 32(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end68: + .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2 + + .globl mcl_fp_montNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF5Lbmi2,@function +mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rdi, -104(%rsp) # 8-byte Spill + movq (%rsi), %r13 + movq %r13, -64(%rsp) # 8-byte Spill + movq 8(%rsi), %rbp + movq %rbp, -24(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rbp, %rdx + mulxq %rax, %rbp, %r9 + movq %r13, %rdx + mulxq %rax, %r8, %r10 + movq 16(%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + addq %rbp, %r10 + mulxq %rax, %rbp, %rbx + adcq %r9, %rbp + movq 24(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rax, %r15, %r9 + adcq %rbx, %r15 + movq 32(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rax, %rax, %r11 + adcq %r9, %rax + adcq $0, %r11 + movq -8(%rcx), %rsi + movq %rsi, -72(%rsp) # 8-byte Spill + movq %r8, %rdx + imulq %rsi, %rdx + movq (%rcx), %rsi + movq %rsi, -88(%rsp) # 8-byte Spill + mulxq %rsi, %rbx, %r14 + addq %r8, %rbx + movq 8(%rcx), %rsi + movq %rsi, -80(%rsp) # 8-byte Spill + mulxq %rsi, %rbx, %r12 + adcq %r10, %rbx + movq 16(%rcx), %rsi + movq %rsi, -96(%rsp) # 8-byte Spill + mulxq %rsi, %r10, %rdi + adcq %rbp, %r10 + movq 24(%rcx), %rsi + movq %rsi, -56(%rsp) # 8-byte Spill + mulxq %rsi, %r9, %rbp + adcq %r15, %r9 + movq 32(%rcx), %rcx + movq %rcx, -8(%rsp) # 8-byte Spill + mulxq %rcx, %r8, %rcx + adcq %rax, %r8 + adcq $0, %r11 + addq %r14, %rbx + adcq %r12, %r10 + adcq %rdi, %r9 + adcq %rbp, %r8 + adcq %rcx, %r11 + movq -16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + mulxq -24(%rsp), %rcx, %rsi # 8-byte Folded Reload + mulxq %r13, %r14, %rax + addq %rcx, %rax + mulxq -32(%rsp), %rcx, %rdi # 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload + adcq %r15, %rdi + adcq $0, %rbp + addq %rbx, %r14 + adcq %r10, %rax + adcq %r9, %rcx + adcq %r8, %rsi + adcq %r11, %rdi + adcq $0, %rbp + movq %r14, %rdx + movq -72(%rsp), %r12 # 8-byte Reload + imulq %r12, %rdx + mulxq -88(%rsp), %rbx, %r15 # 8-byte Folded Reload + addq %r14, %rbx + movq -80(%rsp), %r13 # 8-byte Reload + mulxq %r13, %r8, %rbx + adcq %rax, %r8 + mulxq -96(%rsp), %r9, %rax # 8-byte Folded Reload + adcq %rcx, %r9 + mulxq -56(%rsp), %r10, %rcx # 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload + adcq %rdi, %r11 + adcq $0, %rbp + addq %r15, %r8 + adcq %rbx, %r9 + adcq %rax, %r10 + adcq %rcx, %r11 + adcq %rdx, %rbp + movq -16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + mulxq -24(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload + addq %rcx, %rsi + mulxq -32(%rsp), %rbx, %rcx # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -40(%rsp), %rdi, %r15 # 8-byte Folded Reload + adcq %rcx, %rdi + mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload + adcq %r15, %rcx + adcq $0, %rax + addq %r8, %r14 + adcq %r9, %rsi + adcq %r10, %rbx + adcq %r11, %rdi + adcq %rbp, %rcx + adcq $0, %rax + movq %r14, %rdx + imulq %r12, %rdx + movq -88(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rbp, %r15 + addq %r14, %rbp + mulxq %r13, %r8, %rbp + adcq %rsi, %r8 + movq -96(%rsp), %r13 # 8-byte Reload + mulxq %r13, %r9, %rsi + adcq %rbx, %r9 + mulxq -56(%rsp), %r10, %rbx # 8-byte Folded Reload + adcq %rdi, %r10 + mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload + adcq %rcx, %r11 + adcq $0, %rax + addq %r15, %r8 + adcq %rbp, %r9 + adcq %rsi, %r10 + adcq %rbx, %r11 + adcq %rdx, %rax + movq -16(%rsp), %rcx # 8-byte Reload + movq 24(%rcx), %rdx + mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload + mulxq -64(%rsp), %r14, %rcx # 8-byte Folded Reload + addq %rdi, %rcx + mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload + adcq %rsi, %rbx + mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload + adcq %r15, %rdi + adcq $0, %rbp + addq %r8, %r14 + adcq %r9, %rcx + adcq %r10, %rbx + adcq %r11, %rsi + adcq %rax, %rdi + adcq $0, %rbp + movq %r14, %rdx + imulq -72(%rsp), %rdx # 8-byte Folded Reload + mulxq %r12, %rax, %r11 + addq %r14, %rax + mulxq -80(%rsp), %r8, %r14 # 8-byte Folded Reload + adcq %rcx, %r8 + mulxq %r13, %r9, %rax + adcq %rbx, %r9 + movq -56(%rsp), %r12 # 8-byte Reload + mulxq %r12, %r10, %rbx + adcq %rsi, %r10 + mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload + adcq %rdi, %rcx + adcq $0, %rbp + addq %r11, %r8 + adcq %r14, %r9 + adcq %rax, %r10 + adcq %rbx, %rcx + adcq %rdx, %rbp + movq -16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + mulxq -24(%rsp), %rdi, %rbx # 8-byte Folded Reload + mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload + addq %rdi, %rsi + mulxq -32(%rsp), %rdi, %rax # 8-byte Folded Reload + adcq %rbx, %rdi + mulxq -40(%rsp), %rbx, %r15 # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -48(%rsp), %r11, %rax # 8-byte Folded Reload + adcq %r15, %r11 + adcq $0, %rax + addq %r8, %r14 + adcq %r9, %rsi + adcq %r10, %rdi + adcq %rcx, %rbx + adcq %rbp, %r11 + adcq $0, %rax + movq -72(%rsp), %rdx # 8-byte Reload + imulq %r14, %rdx + movq -88(%rsp), %r10 # 8-byte Reload + mulxq %r10, %rcx, %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + addq %r14, %rcx + movq -80(%rsp), %r9 # 8-byte Reload + mulxq %r9, %r14, %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + adcq %rsi, %r14 + movq %r13, %r8 + mulxq %r8, %r15, %r13 + adcq %rdi, %r15 + mulxq %r12, %rbp, %rcx + adcq %rbx, %rbp + movq -8(%rsp), %rbx # 8-byte Reload + mulxq %rbx, %r12, %rdx + adcq %r11, %r12 + adcq $0, %rax + addq -16(%rsp), %r14 # 8-byte Folded Reload + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq %r13, %rbp + adcq %rcx, %r12 + adcq %rdx, %rax + movq %r14, %rcx + subq %r10, %rcx + movq %r15, %rsi + sbbq %r9, %rsi + movq %rbp, %rdi + sbbq %r8, %rdi + movq %r12, %r8 + sbbq -56(%rsp), %r8 # 8-byte Folded Reload + movq %rax, %rdx + sbbq %rbx, %rdx + movq %rdx, %rbx + sarq $63, %rbx + cmovsq %r14, %rcx + movq -104(%rsp), %rbx # 8-byte Reload + movq %rcx, (%rbx) + cmovsq %r15, %rsi + movq %rsi, 8(%rbx) + cmovsq %rbp, %rdi + movq %rdi, 16(%rbx) + cmovsq %r12, %r8 + movq %r8, 24(%rbx) + cmovsq %rax, %rdx + movq %rdx, 32(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end69: + .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2 + + .globl mcl_fp_montRed5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed5Lbmi2,@function +mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -80(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq (%rcx), %rbx + movq %rbx, -8(%rsp) # 8-byte Spill + movq (%rsi), %r9 + movq %r9, %rdx + imulq %rax, %rdx + movq %rax, %r15 + movq 32(%rcx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulxq %rax, %r8, %r13 + movq 24(%rcx), %r12 + movq %r12, -32(%rsp) # 8-byte Spill + mulxq %r12, %r10, %r14 + movq 16(%rcx), %rax + movq %rax, -48(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + mulxq %rax, %rdi, %rbp + mulxq %rcx, %rax, %r11 + mulxq %rbx, %rdx, %rcx + addq %rax, %rcx + adcq %rdi, %r11 + adcq %r10, %rbp + adcq %r8, %r14 + adcq $0, %r13 + addq %r9, %rdx + movq 72(%rsi), %rax + movq 64(%rsi), %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r11 + adcq 24(%rsi), %rbp + adcq 32(%rsi), %r14 + adcq 40(%rsi), %r13 + movq 56(%rsi), %rdi + movq 48(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -88(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -72(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -24(%rsp) # 8-byte Spill + sbbq %rsi, %rsi + andl $1, %esi + movq %rcx, %rdx + imulq %r15, %rdx + mulxq -40(%rsp), %rax, %r15 # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + mulxq %r12, %rax, %r10 + movq %rax, -104(%rsp) # 8-byte Spill + movq -48(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rbx, %r8 + mulxq -16(%rsp), %r9, %rdi # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %r9, %rax + adcq %rbx, %rdi + adcq -104(%rsp), %r8 # 8-byte Folded Reload + adcq -96(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r15 + addq %rcx, %rdx + adcq %r11, %rax + adcq %rbp, %rdi + adcq %r14, %r8 + adcq %r13, %r10 + adcq -88(%rsp), %r15 # 8-byte Folded Reload + adcq $0, -72(%rsp) # 8-byte Folded Spill + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, %rsi + movq %rax, %rdx + imulq -64(%rsp), %rdx # 8-byte Folded Reload + mulxq -40(%rsp), %rcx, %r13 # 8-byte Folded Reload + movq %rcx, -88(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rcx, %r14 # 8-byte Folded Reload + movq %rcx, -96(%rsp) # 8-byte Spill + mulxq %r12, %r11, %rbx + mulxq -16(%rsp), %r9, %rbp # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload + addq %r9, %rcx + adcq %r11, %rbp + adcq -96(%rsp), %rbx # 8-byte Folded Reload + adcq -88(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r13 + addq %rax, %rdx + adcq %rdi, %rcx + adcq %r8, %rbp + adcq %r10, %rbx + adcq %r15, %r14 + adcq -72(%rsp), %r13 # 8-byte Folded Reload + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, %rsi + movq %rcx, %rdx + imulq -64(%rsp), %rdx # 8-byte Folded Reload + movq -40(%rsp), %r9 # 8-byte Reload + mulxq %r9, %rax, %r12 + movq %rax, -72(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rax, %r10 # 8-byte Folded Reload + movq %rax, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %r8, %r11 # 8-byte Folded Reload + mulxq -16(%rsp), %rdi, %r15 # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rdi, %rax + adcq %r8, %r15 + adcq -88(%rsp), %r11 # 8-byte Folded Reload + adcq -72(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r12 + addq %rcx, %rdx + adcq %rbp, %rax + adcq %rbx, %r15 + adcq %r14, %r11 + adcq %r13, %r10 + adcq -56(%rsp), %r12 # 8-byte Folded Reload + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, %rsi + movq -64(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + mulxq %r9, %rdi, %rcx + movq %rdi, -56(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rbp, %rdi # 8-byte Folded Reload + movq %rbp, -64(%rsp) # 8-byte Spill + mulxq -48(%rsp), %r13, %rbp # 8-byte Folded Reload + mulxq -8(%rsp), %r8, %r9 # 8-byte Folded Reload + movq -16(%rsp), %r14 # 8-byte Reload + mulxq %r14, %rbx, %rdx + addq %r9, %rbx + adcq %r13, %rdx + adcq -64(%rsp), %rbp # 8-byte Folded Reload + adcq -56(%rsp), %rdi # 8-byte Folded Reload + adcq $0, %rcx + addq %rax, %r8 + adcq %r15, %rbx + adcq %r11, %rdx + adcq %r10, %rbp + adcq %r12, %rdi + adcq -24(%rsp), %rcx # 8-byte Folded Reload + adcq $0, %rsi + movq %rbx, %rax + subq -8(%rsp), %rax # 8-byte Folded Reload + movq %rdx, %r8 + sbbq %r14, %r8 + movq %rbp, %r9 + sbbq -48(%rsp), %r9 # 8-byte Folded Reload + movq %rdi, %r10 + sbbq -32(%rsp), %r10 # 8-byte Folded Reload + movq %rcx, %r11 + sbbq -40(%rsp), %r11 # 8-byte Folded Reload + sbbq $0, %rsi + andl $1, %esi + cmovneq %rcx, %r11 + testb %sil, %sil + cmovneq %rbx, %rax + movq -80(%rsp), %rcx # 8-byte Reload + movq %rax, (%rcx) + cmovneq %rdx, %r8 + movq %r8, 8(%rcx) + cmovneq %rbp, %r9 + movq %r9, 16(%rcx) + cmovneq %rdi, %r10 + movq %r10, 24(%rcx) + movq %r11, 32(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end70: + .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2 + + .globl mcl_fp_addPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre5Lbmi2,@function +mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2 +# BB#0: + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq 16(%rdx), %rcx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rcx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rcx, 16(%rdi) + adcq %r9, %r11 + movq %r11, 24(%rdi) + adcq %r8, %r10 + movq %r10, 32(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end71: + .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2 + + .globl mcl_fp_subPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre5Lbmi2,@function +mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2 +# BB#0: + pushq %rbx + movq 32(%rsi), %r10 + movq 24(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r8, %r11 + movq %r11, 24(%rdi) + sbbq %r9, %r10 + movq %r10, 32(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + retq +.Lfunc_end72: + .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2 + + .globl mcl_fp_shr1_5Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_5Lbmi2,@function +mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2 +# BB#0: + movq 32(%rsi), %r8 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r8, %rcx + movq %rcx, 24(%rdi) + shrq %r8 + movq %r8, 32(%rdi) + retq +.Lfunc_end73: + .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2 + + .globl mcl_fp_add5Lbmi2 + .align 16, 0x90 + .type mcl_fp_add5Lbmi2,@function +mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2 +# BB#0: + pushq %rbx + movq 32(%rdx), %r11 + movq 24(%rdx), %rbx + movq 24(%rsi), %r9 + movq 32(%rsi), %r8 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %rbx, %r9 + movq %r9, 24(%rdi) + adcq %r11, %r8 + movq %r8, 32(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %r9 + sbbq 32(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB74_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %r9, 24(%rdi) + movq %r8, 32(%rdi) +.LBB74_2: # %carry + popq %rbx + retq +.Lfunc_end74: + .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2 + + .globl mcl_fp_addNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF5Lbmi2,@function +mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %r11 + addq (%rsi), %r14 + adcq 8(%rsi), %r11 + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r9 + adcq 32(%rsi), %r8 + movq %r14, %rsi + subq (%rcx), %rsi + movq %r11, %rdx + sbbq 8(%rcx), %rdx + movq %r10, %rbx + sbbq 16(%rcx), %rbx + movq %r9, %r15 + sbbq 24(%rcx), %r15 + movq %r8, %rax + sbbq 32(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r14, %rsi + movq %rsi, (%rdi) + cmovsq %r11, %rdx + movq %rdx, 8(%rdi) + cmovsq %r10, %rbx + movq %rbx, 16(%rdi) + cmovsq %r9, %r15 + movq %r15, 24(%rdi) + cmovsq %r8, %rax + movq %rax, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end75: + .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2 + + .globl mcl_fp_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub5Lbmi2,@function +mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + movq 32(%rsi), %r8 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r11, %r9 + movq %r9, 24(%rdi) + sbbq %r14, %r8 + movq %r8, 32(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB76_2 +# BB#1: # %carry + movq 32(%rcx), %r11 + movq 24(%rcx), %r14 + movq 8(%rcx), %rdx + movq 16(%rcx), %rbx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r14 + movq %r14, 24(%rdi) + adcq %r8, %r11 + movq %r11, 32(%rdi) +.LBB76_2: # %nocarry + popq %rbx + popq %r14 + retq +.Lfunc_end76: + .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2 + + .globl mcl_fp_subNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF5Lbmi2,@function +mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rsi), %r11 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %r10 + movq 8(%rsi), %r14 + subq (%rdx), %r10 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r9 + sbbq 24(%rdx), %r8 + sbbq 32(%rdx), %r11 + movq %r11, %rax + sarq $63, %rax + movq %rax, %rdx + shldq $1, %r11, %rdx + movq 8(%rcx), %rbx + andq %rdx, %rbx + andq (%rcx), %rdx + movq 32(%rcx), %r15 + andq %rax, %r15 + rorxq $63, %rax, %rsi + andq 24(%rcx), %rax + andq 16(%rcx), %rsi + addq %r10, %rdx + movq %rdx, (%rdi) + adcq %r14, %rbx + movq %rbx, 8(%rdi) + adcq %r9, %rsi + movq %rsi, 16(%rdi) + adcq %r8, %rax + movq %rax, 24(%rdi) + adcq %r11, %r15 + movq %r15, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end77: + .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2 + + .globl mcl_fpDbl_add5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add5Lbmi2,@function +mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 64(%rdx), %r11 + movq 56(%rdx), %r14 + movq 48(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 40(%rdx), %r9 + movq %rbx, (%rdi) + movq 72(%rsi), %r8 + movq %rax, 8(%rdi) + movq 64(%rsi), %r10 + movq %r12, 16(%rdi) + movq 56(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 48(%rsi), %rbp + movq 40(%rsi), %rbx + movq %r13, 32(%rdi) + adcq %r9, %rbx + adcq %r15, %rbp + adcq %r14, %r12 + adcq %r11, %r10 + adcq -8(%rsp), %r8 # 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + movq %rbx, %rax + subq (%rcx), %rax + movq %rbp, %rdx + sbbq 8(%rcx), %rdx + movq %r12, %r9 + sbbq 16(%rcx), %r9 + movq %r10, %r11 + sbbq 24(%rcx), %r11 + movq %r8, %r14 + sbbq 32(%rcx), %r14 + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rax + movq %rax, 40(%rdi) + testb %sil, %sil + cmovneq %rbp, %rdx + movq %rdx, 48(%rdi) + cmovneq %r12, %r9 + movq %r9, 56(%rdi) + cmovneq %r10, %r11 + movq %r11, 64(%rdi) + cmovneq %r8, %r14 + movq %r14, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end78: + .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2 + + .globl mcl_fpDbl_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub5Lbmi2,@function +mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %r9 + movq 64(%rdx), %r10 + movq 56(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %r12 + sbbq 24(%rdx), %r12 + movq %r15, (%rdi) + movq 32(%rsi), %rbx + sbbq 32(%rdx), %rbx + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 72(%rsi), %r8 + movq %r12, 24(%rdi) + movq 64(%rsi), %r11 + movq %rbx, 32(%rdi) + movq 40(%rsi), %rbp + sbbq %rdx, %rbp + movq 56(%rsi), %r12 + movq 48(%rsi), %r13 + sbbq %r15, %r13 + sbbq %r14, %r12 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 8(%rcx), %rbx + cmoveq %rax, %rbx + movq 32(%rcx), %r9 + cmoveq %rax, %r9 + cmovneq 24(%rcx), %rax + addq %rbp, %rsi + movq %rsi, 40(%rdi) + adcq %r13, %rbx + movq %rbx, 48(%rdi) + adcq %r12, %rdx + movq %rdx, 56(%rdi) + adcq %r11, %rax + movq %rax, 64(%rdi) + adcq %r8, %r9 + movq %r9, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end79: + .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2 + + .globl mcl_fp_mulUnitPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre6Lbmi2,@function +mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + mulxq 40(%rsi), %r8, %r11 + mulxq 32(%rsi), %r9, %r12 + mulxq 24(%rsi), %r10, %rcx + mulxq 16(%rsi), %r14, %rbx + mulxq 8(%rsi), %r15, %rax + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r15, %rsi + movq %rsi, 8(%rdi) + adcq %r14, %rax + movq %rax, 16(%rdi) + adcq %r10, %rbx + movq %rbx, 24(%rdi) + adcq %r9, %rcx + movq %rcx, 32(%rdi) + adcq %r8, %r12 + movq %r12, 40(%rdi) + adcq $0, %r11 + movq %r11, 48(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end80: + .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2 + + .globl mcl_fpDbl_mulPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre6Lbmi2,@function +mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r11 + movq %r11, -16(%rsp) # 8-byte Spill + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq (%r11), %rax + movq %rcx, %rdx + mulxq %rax, %rcx, %r14 + movq %r15, %rdx + mulxq %rax, %rdx, %rbp + movq %rdx, -48(%rsp) # 8-byte Spill + movq 24(%rsi), %rbx + movq %rbx, -32(%rsp) # 8-byte Spill + movq 16(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + addq %rcx, %rbp + mulxq %rax, %rcx, %r12 + adcq %r14, %rcx + movq %rbx, %rdx + mulxq %rax, %rbx, %r14 + adcq %r12, %rbx + movq 32(%rsi), %r12 + movq %r12, %rdx + mulxq %rax, %r8, %r13 + adcq %r14, %r8 + movq 40(%rsi), %r14 + movq %r14, %rdx + mulxq %rax, %r9, %r10 + adcq %r13, %r9 + movq -48(%rsp), %rax # 8-byte Reload + movq %rax, (%rdi) + adcq $0, %r10 + movq 8(%r11), %rdi + movq %r15, %rdx + mulxq %rdi, %r13, %rax + movq %rax, -48(%rsp) # 8-byte Spill + addq %rbp, %r13 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbp, %rax + movq %rax, -24(%rsp) # 8-byte Spill + adcq %rcx, %rbp + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rax, %r11 + adcq %rbx, %rax + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbx, %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + adcq %r8, %rbx + movq %r12, %rdx + mulxq %rdi, %rcx, %r8 + adcq %r9, %rcx + movq %r14, %rdx + mulxq %rdi, %r12, %rdx + adcq %r10, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -48(%rsp), %rbp # 8-byte Folded Reload + adcq -24(%rsp), %rax # 8-byte Folded Reload + adcq %r11, %rbx + movq -8(%rsp), %rdi # 8-byte Reload + movq %r13, 8(%rdi) + adcq -32(%rsp), %rcx # 8-byte Folded Reload + adcq %r8, %r12 + adcq %rdx, %r15 + movq (%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -24(%rsp) # 8-byte Spill + movq -16(%rsp), %r14 # 8-byte Reload + movq 16(%r14), %rdi + mulxq %rdi, %r13, %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + addq %rbp, %r13 + movq %r8, %rdx + mulxq %rdi, %r8, %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + adcq %rax, %r8 + movq 16(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rdi, %r11, %rax + movq %rax, -88(%rsp) # 8-byte Spill + adcq %rbx, %r11 + movq 24(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rdi, %rax, %rbx + adcq %rcx, %rax + movq 32(%rsi), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + mulxq %rdi, %r10, %rcx + adcq %r12, %r10 + movq 40(%rsi), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rdi, %r9, %rdx + adcq %r15, %r9 + sbbq %rbp, %rbp + andl $1, %ebp + addq -72(%rsp), %r8 # 8-byte Folded Reload + adcq -80(%rsp), %r11 # 8-byte Folded Reload + adcq -88(%rsp), %rax # 8-byte Folded Reload + adcq %rbx, %r10 + adcq %rcx, %r9 + adcq %rdx, %rbp + movq -8(%rsp), %rcx # 8-byte Reload + movq %r13, 16(%rcx) + movq 24(%r14), %rdi + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r12, %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + addq %r8, %r12 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbx, %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + adcq %r11, %rbx + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rcx, %r11 + adcq %rax, %rcx + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r14, %rax + movq %rax, -40(%rsp) # 8-byte Spill + adcq %r10, %r14 + movq -56(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r8, %rax + adcq %r9, %r8 + movq -64(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r13, %rdx + adcq %rbp, %r13 + sbbq %r15, %r15 + andl $1, %r15d + addq -32(%rsp), %rbx # 8-byte Folded Reload + adcq -24(%rsp), %rcx # 8-byte Folded Reload + adcq %r11, %r14 + movq -8(%rsp), %rdi # 8-byte Reload + movq %r12, 24(%rdi) + adcq -40(%rsp), %r8 # 8-byte Folded Reload + adcq %rax, %r13 + adcq %rdx, %r15 + movq (%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rbp + movq %rbp, -24(%rsp) # 8-byte Spill + movq -16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdi + mulxq %rdi, %r12, %rax + movq %rax, -40(%rsp) # 8-byte Spill + addq %rbx, %r12 + movq %rbp, %rdx + mulxq %rdi, %rbx, %rax + movq %rax, -48(%rsp) # 8-byte Spill + adcq %rcx, %rbx + movq 16(%rsi), %r11 + movq %r11, %rdx + mulxq %rdi, %rax, %rcx + movq %rcx, -56(%rsp) # 8-byte Spill + adcq %r14, %rax + movq 24(%rsi), %r14 + movq %r14, %rdx + mulxq %rdi, %rbp, %rcx + movq %rcx, -64(%rsp) # 8-byte Spill + adcq %r8, %rbp + movq 32(%rsi), %r8 + movq %r8, %rdx + mulxq %rdi, %rcx, %r10 + adcq %r13, %rcx + movq 40(%rsi), %r13 + movq %r13, %rdx + mulxq %rdi, %r9, %rdx + adcq %r15, %r9 + sbbq %rsi, %rsi + andl $1, %esi + addq -40(%rsp), %rbx # 8-byte Folded Reload + adcq -48(%rsp), %rax # 8-byte Folded Reload + adcq -56(%rsp), %rbp # 8-byte Folded Reload + adcq -64(%rsp), %rcx # 8-byte Folded Reload + adcq %r10, %r9 + adcq %rdx, %rsi + movq -8(%rsp), %r10 # 8-byte Reload + movq %r12, 32(%r10) + movq -16(%rsp), %rdx # 8-byte Reload + movq 40(%rdx), %rdi + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r15, %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + addq %rbx, %r15 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbx, %r12 + adcq %rax, %rbx + movq %r11, %rdx + mulxq %rdi, %rax, %r11 + adcq %rbp, %rax + movq %r14, %rdx + mulxq %rdi, %rbp, %r14 + adcq %rcx, %rbp + movq %r8, %rdx + mulxq %rdi, %rcx, %r8 + adcq %r9, %rcx + movq %r13, %rdx + mulxq %rdi, %rdi, %r9 + adcq %rsi, %rdi + sbbq %rsi, %rsi + andl $1, %esi + addq -16(%rsp), %rbx # 8-byte Folded Reload + movq %r15, 40(%r10) + movq %rbx, 48(%r10) + adcq %r12, %rax + movq %rax, 56(%r10) + adcq %r11, %rbp + movq %rbp, 64(%r10) + adcq %r14, %rcx + movq %rcx, 72(%r10) + adcq %r8, %rdi + movq %rdi, 80(%r10) + adcq %r9, %rsi + movq %rsi, 88(%r10) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end81: + .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2 + + .globl mcl_fpDbl_sqrPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre6Lbmi2,@function +mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r9 + movq %r9, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + movq (%rsi), %rcx + movq 8(%rsi), %rax + mulxq %rcx, %r10, %r8 + movq 24(%rsi), %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r11, %rbx + movq %rbx, -16(%rsp) # 8-byte Spill + movq %rcx, %rdx + mulxq %rcx, %rdx, %r14 + movq %rdx, -40(%rsp) # 8-byte Spill + addq %r11, %r14 + adcq %rbx, %r10 + movq %rbp, %rdx + mulxq %rcx, %r15, %rbp + adcq %r8, %r15 + movq 32(%rsi), %rbx + movq %rbx, %rdx + mulxq %rcx, %r8, %r13 + adcq %rbp, %r8 + movq 40(%rsi), %rdi + movq %rdi, %rdx + mulxq %rcx, %rcx, %r12 + adcq %r13, %rcx + movq -40(%rsp), %rdx # 8-byte Reload + movq %rdx, (%r9) + adcq $0, %r12 + addq %r11, %r14 + movq %rax, %rdx + mulxq %rax, %rbp, %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + adcq %r10, %rbp + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r13, %r10 + adcq %r15, %r13 + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r15, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %r8, %r15 + movq %rbx, %rdx + mulxq %rax, %rbx, %r8 + adcq %rcx, %rbx + movq %rdi, %rdx + mulxq %rax, %r11, %rax + adcq %r12, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -16(%rsp), %rbp # 8-byte Folded Reload + adcq -40(%rsp), %r13 # 8-byte Folded Reload + movq %r14, 8(%r9) + adcq %r10, %r15 + adcq -24(%rsp), %rbx # 8-byte Folded Reload + adcq %r8, %r11 + adcq %rax, %r12 + movq (%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %rcx + mulxq %rcx, %rax, %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + addq %rbp, %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdi, %rdx + mulxq %rcx, %rbp, %rax + movq %rax, -56(%rsp) # 8-byte Spill + adcq %r13, %rbp + movq %rcx, %rdx + mulxq %rcx, %r13, %rax + movq %rax, -64(%rsp) # 8-byte Spill + adcq %r15, %r13 + movq 24(%rsi), %rax + movq %rax, %rdx + mulxq %rcx, %r8, %rdi + movq %rdi, -40(%rsp) # 8-byte Spill + adcq %r8, %rbx + movq 32(%rsi), %r10 + movq %r10, %rdx + mulxq %rcx, %r14, %r15 + adcq %r11, %r14 + movq 40(%rsi), %r11 + movq %r11, %rdx + mulxq %rcx, %r9, %rdx + adcq %r12, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + addq -48(%rsp), %rbp # 8-byte Folded Reload + adcq -56(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %rbx # 8-byte Folded Reload + adcq %rdi, %r14 + adcq %r15, %r9 + adcq %rdx, %rcx + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rdi, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + addq %rbp, %rdi + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r15, %rbp + adcq %r13, %r15 + adcq %r8, %rbx + movq %rax, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %r14, %r8 + movq %r10, %rdx + mulxq %rax, %r12, %r10 + adcq %r9, %r12 + movq %r11, %rdx + mulxq %rax, %r13, %rax + adcq %rcx, %r13 + sbbq %r9, %r9 + andl $1, %r9d + addq -32(%rsp), %r15 # 8-byte Folded Reload + adcq %rbp, %rbx + movq -8(%rsp), %rdx # 8-byte Reload + movq -16(%rsp), %rbp # 8-byte Reload + movq %rbp, 16(%rdx) + movq %rdi, 24(%rdx) + adcq -40(%rsp), %r8 # 8-byte Folded Reload + adcq -24(%rsp), %r12 # 8-byte Folded Reload + adcq %r10, %r13 + adcq %rax, %r9 + movq (%rsi), %rcx + movq 8(%rsi), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq 32(%rsi), %rax + movq %rcx, %rdx + mulxq %rax, %rdx, %rbp + movq %rbp, -40(%rsp) # 8-byte Spill + addq %r15, %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rdi, %rdx + mulxq %rax, %r15, %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %rbx, %r15 + movq 16(%rsi), %r10 + movq %r10, %rdx + mulxq %rax, %r14, %rbx + adcq %r8, %r14 + movq 24(%rsi), %r8 + movq %r8, %rdx + mulxq %rax, %rbp, %rdi + adcq %r12, %rbp + movq %rax, %rdx + mulxq %rax, %r11, %r12 + adcq %r13, %r11 + movq 40(%rsi), %rsi + movq %rsi, %rdx + mulxq %rax, %r13, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + adcq %r13, %r9 + sbbq %rax, %rax + andl $1, %eax + addq -40(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %r14 # 8-byte Folded Reload + adcq %rbx, %rbp + adcq %rdi, %r11 + adcq %r12, %r9 + adcq %rdx, %rax + movq %rcx, %rdx + mulxq %rsi, %r12, %rcx + addq %r15, %r12 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rsi, %rdi, %r15 + adcq %r14, %rdi + movq %r10, %rdx + mulxq %rsi, %rbx, %r10 + adcq %rbp, %rbx + movq %r8, %rdx + mulxq %rsi, %rbp, %r8 + adcq %r11, %rbp + adcq %r13, %r9 + movq %rsi, %rdx + mulxq %rsi, %rsi, %r11 + adcq %rax, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %rcx, %rdi + movq -8(%rsp), %rdx # 8-byte Reload + movq -16(%rsp), %rcx # 8-byte Reload + movq %rcx, 32(%rdx) + movq %r12, 40(%rdx) + movq %rdi, 48(%rdx) + adcq %r15, %rbx + movq %rbx, 56(%rdx) + adcq %r10, %rbp + movq %rbp, 64(%rdx) + adcq %r8, %r9 + movq %r9, 72(%rdx) + adcq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 80(%rdx) + adcq %r11, %rax + movq %rax, 88(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2 + + .globl mcl_fp_mont6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont6Lbmi2,@function +mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $32, %rsp + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rdi, -104(%rsp) # 8-byte Spill + movq 40(%rsi), %rdi + movq %rdi, -40(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %r11, %r14 + movq 32(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rax, %r15, %rbx + movq 24(%rsi), %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + movq 16(%rsi), %rdi + movq %rdi, -72(%rsp) # 8-byte Spill + movq (%rsi), %rbp + movq %rbp, -56(%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -64(%rsp) # 8-byte Spill + mulxq %rax, %r8, %r12 + movq %rdi, %rdx + mulxq %rax, %r9, %r10 + movq %rsi, %rdx + mulxq %rax, %rdi, %r13 + movq %rbp, %rdx + mulxq %rax, %rdx, %rbp + movq %rdx, -96(%rsp) # 8-byte Spill + addq %rdi, %rbp + adcq %r9, %r13 + adcq %r8, %r10 + adcq %r15, %r12 + adcq %r11, %rbx + movq %rbx, %rdi + adcq $0, %r14 + movq %r14, -88(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + imulq %rax, %rdx + movq (%rcx), %rsi + movq %rsi, (%rsp) # 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + mulxq %rax, %rax, %r9 + movq %rax, -112(%rsp) # 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + mulxq %rax, %r8, %r11 + movq 8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + mulxq %rax, %rax, %r14 + mulxq %rsi, %r15, %rsi + addq %rax, %rsi + adcq %r8, %r14 + movq 24(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + mulxq %rax, %rbx, %r8 + adcq %r11, %rbx + movq 32(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + mulxq %rax, %rax, %rcx + adcq %r8, %rax + adcq -112(%rsp), %rcx # 8-byte Folded Reload + adcq $0, %r9 + addq -96(%rsp), %r15 # 8-byte Folded Reload + adcq %rbp, %rsi + adcq %r13, %r14 + adcq %r10, %rbx + adcq %r12, %rax + adcq %rdi, %rcx + adcq -88(%rsp), %r9 # 8-byte Folded Reload + movq %r9, -96(%rsp) # 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq -32(%rsp), %rdx # 8-byte Reload + movq 8(%rdx), %rdx + mulxq -40(%rsp), %rdi, %rbp # 8-byte Folded Reload + movq %rdi, -112(%rsp) # 8-byte Spill + movq %rbp, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload + movq %rdi, -120(%rsp) # 8-byte Spill + mulxq -80(%rsp), %rdi, %r15 # 8-byte Folded Reload + movq %rdi, -128(%rsp) # 8-byte Spill + mulxq -64(%rsp), %r8, %rdi # 8-byte Folded Reload + mulxq -56(%rsp), %rbp, %r10 # 8-byte Folded Reload + addq %r8, %r10 + mulxq -72(%rsp), %r9, %r11 # 8-byte Folded Reload + adcq %rdi, %r9 + adcq -128(%rsp), %r11 # 8-byte Folded Reload + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rsi, %rbp + adcq %r14, %r10 + adcq %rbx, %r9 + adcq %rax, %r11 + adcq %rcx, %r15 + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq %r12, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %rbp, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rax, %r12 # 8-byte Folded Reload + movq %rax, -112(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rax, %r14 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload + mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload + addq %rcx, %r8 + mulxq 16(%rsp), %rdi, %rbx # 8-byte Folded Reload + adcq %rsi, %rdi + mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload + adcq %rbx, %rcx + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r12 + addq %rbp, %rax + adcq %r10, %r8 + adcq %r9, %rdi + adcq %r11, %rcx + adcq %r15, %rsi + adcq %r13, %r14 + adcq -88(%rsp), %r12 # 8-byte Folded Reload + adcq $0, -96(%rsp) # 8-byte Folded Spill + movq -32(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdx + mulxq -40(%rsp), %rbp, %rax # 8-byte Folded Reload + movq %rbp, -112(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %rax, %r13 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -80(%rsp), %rbp, %r15 # 8-byte Folded Reload + mulxq -64(%rsp), %r9, %rbx # 8-byte Folded Reload + mulxq -56(%rsp), %rax, %r11 # 8-byte Folded Reload + addq %r9, %r11 + mulxq -72(%rsp), %r9, %r10 # 8-byte Folded Reload + adcq %rbx, %r9 + adcq %rbp, %r10 + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r8, %rax + movq %rax, -112(%rsp) # 8-byte Spill + movq %rax, %rbp + adcq %rdi, %r11 + adcq %rcx, %r9 + adcq %rsi, %r10 + adcq %r14, %r15 + adcq %r12, %r13 + adcq -96(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %rbp, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rax, %r8 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -16(%rsp), %r12, %r14 # 8-byte Folded Reload + mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload + mulxq (%rsp), %rax, %rbx # 8-byte Folded Reload + addq %rcx, %rbx + mulxq 16(%rsp), %rbp, %rdi # 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload + adcq %rdi, %rcx + adcq %r12, %rsi + adcq -120(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r8 + addq -112(%rsp), %rax # 8-byte Folded Reload + adcq %r11, %rbx + adcq %r9, %rbp + adcq %r10, %rcx + adcq %r15, %rsi + adcq %r13, %r14 + adcq -88(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -112(%rsp) # 8-byte Spill + movq -96(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + movq -32(%rsp), %rdx # 8-byte Reload + movq 24(%rdx), %rdx + mulxq -40(%rsp), %rdi, %rax # 8-byte Folded Reload + movq %rdi, -96(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %rdi, %rax # 8-byte Folded Reload + movq %rdi, -120(%rsp) # 8-byte Spill + mulxq -80(%rsp), %r15, %r12 # 8-byte Folded Reload + mulxq -64(%rsp), %r8, %r11 # 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload + addq %r8, %rdi + mulxq -72(%rsp), %r8, %r9 # 8-byte Folded Reload + adcq %r11, %r8 + adcq %r15, %r9 + adcq -120(%rsp), %r12 # 8-byte Folded Reload + adcq -96(%rsp), %rax # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rbx, %r10 + adcq %rbp, %rdi + adcq %rcx, %r8 + adcq %rsi, %r9 + adcq %r14, %r12 + adcq -112(%rsp), %rax # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + adcq %r13, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, %r14 + movq %r10, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rax, %r13 # 8-byte Folded Reload + movq %rax, -112(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rax, %r11 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq 8(%rsp), %rbp, %rsi # 8-byte Folded Reload + mulxq (%rsp), %rcx, %rbx # 8-byte Folded Reload + addq %rbp, %rbx + mulxq 16(%rsp), %rbp, %rax # 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -8(%rsp), %rsi, %r15 # 8-byte Folded Reload + adcq %rax, %rsi + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r13 + addq %r10, %rcx + adcq %rdi, %rbx + adcq %r8, %rbp + adcq %r9, %rsi + adcq %r12, %r15 + adcq -96(%rsp), %r11 # 8-byte Folded Reload + adcq -88(%rsp), %r13 # 8-byte Folded Reload + movq %r14, %rdi + adcq $0, %rdi + movq -32(%rsp), %rcx # 8-byte Reload + movq 32(%rcx), %rdx + mulxq -40(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -96(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %rax, %r12 # 8-byte Folded Reload + movq %rax, -112(%rsp) # 8-byte Spill + mulxq -80(%rsp), %rax, %r14 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -64(%rsp), %rcx, %r9 # 8-byte Folded Reload + mulxq -56(%rsp), %rax, %r8 # 8-byte Folded Reload + addq %rcx, %r8 + mulxq -72(%rsp), %rcx, %r10 # 8-byte Folded Reload + adcq %r9, %rcx + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -96(%rsp), %r12 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rbx, %rax + movq %rax, -112(%rsp) # 8-byte Spill + movq %rax, %rbx + adcq %rbp, %r8 + adcq %rsi, %rcx + adcq %r15, %r10 + adcq %r11, %r14 + adcq %r13, %r12 + adcq %rdi, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %rbx, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rax, %r15 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -16(%rsp), %r13, %r11 # 8-byte Folded Reload + mulxq 8(%rsp), %rsi, %rax # 8-byte Folded Reload + mulxq (%rsp), %rdi, %rbx # 8-byte Folded Reload + addq %rsi, %rbx + mulxq 16(%rsp), %rbp, %r9 # 8-byte Folded Reload + adcq %rax, %rbp + mulxq -8(%rsp), %rax, %rsi # 8-byte Folded Reload + adcq %r9, %rax + adcq %r13, %rsi + adcq -120(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r15 + addq -112(%rsp), %rdi # 8-byte Folded Reload + adcq %r8, %rbx + adcq %rcx, %rbp + adcq %r10, %rax + adcq %r14, %rsi + adcq %r12, %r11 + adcq -88(%rsp), %r15 # 8-byte Folded Reload + movq -96(%rsp), %r8 # 8-byte Reload + adcq $0, %r8 + movq -32(%rsp), %rcx # 8-byte Reload + movq 40(%rcx), %rdx + mulxq -40(%rsp), %rdi, %rcx # 8-byte Folded Reload + movq %rdi, -88(%rsp) # 8-byte Spill + movq %rcx, -32(%rsp) # 8-byte Spill + mulxq -48(%rsp), %rdi, %rcx # 8-byte Folded Reload + movq %rdi, -48(%rsp) # 8-byte Spill + movq %rcx, -40(%rsp) # 8-byte Spill + mulxq -80(%rsp), %rcx, %r14 # 8-byte Folded Reload + movq %rcx, -80(%rsp) # 8-byte Spill + mulxq -72(%rsp), %rdi, %r12 # 8-byte Folded Reload + mulxq -64(%rsp), %rcx, %r10 # 8-byte Folded Reload + mulxq -56(%rsp), %r13, %r9 # 8-byte Folded Reload + addq %rcx, %r9 + adcq %rdi, %r10 + adcq -80(%rsp), %r12 # 8-byte Folded Reload + adcq -48(%rsp), %r14 # 8-byte Folded Reload + movq -40(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -32(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %rbx, %r13 + adcq %rbp, %r9 + adcq %rax, %r10 + adcq %rsi, %r12 + adcq %r11, %r14 + adcq %r15, %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + adcq %r8, %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + sbbq %rcx, %rcx + movq -24(%rsp), %rdx # 8-byte Reload + imulq %r13, %rdx + mulxq (%rsp), %r11, %rax # 8-byte Folded Reload + mulxq 8(%rsp), %rdi, %rbx # 8-byte Folded Reload + addq %rax, %rdi + mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload + adcq %rbx, %rsi + mulxq -8(%rsp), %rbx, %rbp # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload + adcq %rbp, %rax + mulxq 24(%rsp), %rbp, %rdx # 8-byte Folded Reload + adcq %r15, %rbp + adcq $0, %rdx + andl $1, %ecx + addq %r13, %r11 + adcq %r9, %rdi + adcq %r10, %rsi + adcq %r12, %rbx + adcq %r14, %rax + adcq -40(%rsp), %rbp # 8-byte Folded Reload + adcq -32(%rsp), %rdx # 8-byte Folded Reload + adcq $0, %rcx + movq %rdi, %r8 + subq (%rsp), %r8 # 8-byte Folded Reload + movq %rsi, %r9 + sbbq 8(%rsp), %r9 # 8-byte Folded Reload + movq %rbx, %r10 + sbbq 16(%rsp), %r10 # 8-byte Folded Reload + movq %rax, %r11 + sbbq -8(%rsp), %r11 # 8-byte Folded Reload + movq %rbp, %r14 + sbbq -16(%rsp), %r14 # 8-byte Folded Reload + movq %rdx, %r15 + sbbq 24(%rsp), %r15 # 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rax, %r11 + testb %cl, %cl + cmovneq %rdi, %r8 + movq -104(%rsp), %rax # 8-byte Reload + movq %r8, (%rax) + cmovneq %rsi, %r9 + movq %r9, 8(%rax) + cmovneq %rbx, %r10 + movq %r10, 16(%rax) + movq %r11, 24(%rax) + cmovneq %rbp, %r14 + movq %r14, 32(%rax) + cmovneq %rdx, %r15 + movq %r15, 40(%rax) + addq $32, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end83: + .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2 + + .globl mcl_fp_montNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF6Lbmi2,@function +mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rdi, -120(%rsp) # 8-byte Spill + movq (%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -80(%rsp) # 8-byte Spill + movq (%rdx), %rbp + movq %rdi, %rdx + mulxq %rbp, %rdi, %rbx + movq %rax, %rdx + mulxq %rbp, %r9, %r14 + movq 16(%rsi), %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + addq %rdi, %r14 + mulxq %rbp, %rdi, %r8 + adcq %rbx, %rdi + movq 24(%rsi), %rdx + movq %rdx, -96(%rsp) # 8-byte Spill + mulxq %rbp, %rbx, %r10 + adcq %r8, %rbx + movq 32(%rsi), %rdx + movq %rdx, -104(%rsp) # 8-byte Spill + mulxq %rbp, %r8, %r11 + adcq %r10, %r8 + movq 40(%rsi), %rdx + movq %rdx, -112(%rsp) # 8-byte Spill + mulxq %rbp, %rsi, %r15 + adcq %r11, %rsi + adcq $0, %r15 + movq -8(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %r9, %rdx + imulq %rax, %rdx + movq (%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + mulxq %rax, %rbp, %rax + movq %rax, -128(%rsp) # 8-byte Spill + addq %r9, %rbp + movq 8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulxq %rax, %r12, %r9 + adcq %r14, %r12 + movq 16(%rcx), %rax + movq %rax, -32(%rsp) # 8-byte Spill + mulxq %rax, %r14, %rax + adcq %rdi, %r14 + movq 24(%rcx), %rdi + movq %rdi, -40(%rsp) # 8-byte Spill + mulxq %rdi, %r13, %rdi + adcq %rbx, %r13 + movq 32(%rcx), %rbp + movq %rbp, -48(%rsp) # 8-byte Spill + mulxq %rbp, %r11, %rbx + adcq %r8, %r11 + movq 40(%rcx), %rcx + movq %rcx, -56(%rsp) # 8-byte Spill + mulxq %rcx, %r10, %rcx + adcq %rsi, %r10 + adcq $0, %r15 + addq -128(%rsp), %r12 # 8-byte Folded Reload + adcq %r9, %r14 + adcq %rax, %r13 + adcq %rdi, %r11 + adcq %rbx, %r10 + adcq %rcx, %r15 + movq -72(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + mulxq -80(%rsp), %rcx, %rsi # 8-byte Folded Reload + mulxq -64(%rsp), %rbx, %rax # 8-byte Folded Reload + addq %rcx, %rax + mulxq -88(%rsp), %rcx, %rdi # 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -104(%rsp), %rdi, %rbp # 8-byte Folded Reload + movq %rbp, -128(%rsp) # 8-byte Spill + adcq %r8, %rdi + mulxq -112(%rsp), %r8, %r9 # 8-byte Folded Reload + adcq -128(%rsp), %r8 # 8-byte Folded Reload + adcq $0, %r9 + addq %r12, %rbx + adcq %r14, %rax + adcq %r13, %rcx + adcq %r11, %rsi + adcq %r10, %rdi + adcq %r15, %r8 + adcq $0, %r9 + movq %rbx, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rbp, %r13 # 8-byte Folded Reload + addq %rbx, %rbp + mulxq -24(%rsp), %r11, %rbx # 8-byte Folded Reload + adcq %rax, %r11 + mulxq -32(%rsp), %r14, %rax # 8-byte Folded Reload + adcq %rcx, %r14 + mulxq -40(%rsp), %r10, %rcx # 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -48(%rsp), %r15, %rsi # 8-byte Folded Reload + adcq %rdi, %r15 + mulxq -56(%rsp), %r12, %rdx # 8-byte Folded Reload + adcq %r8, %r12 + adcq $0, %r9 + addq %r13, %r11 + adcq %rbx, %r14 + adcq %rax, %r10 + adcq %rcx, %r15 + adcq %rsi, %r12 + adcq %rdx, %r9 + movq -72(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + mulxq -80(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq -64(%rsp), %r13, %rdi # 8-byte Folded Reload + addq %rcx, %rdi + mulxq -88(%rsp), %rbx, %rcx # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -96(%rsp), %rsi, %rbp # 8-byte Folded Reload + adcq %rcx, %rsi + mulxq -104(%rsp), %rax, %rcx # 8-byte Folded Reload + movq %rcx, -128(%rsp) # 8-byte Spill + adcq %rbp, %rax + mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload + adcq -128(%rsp), %r8 # 8-byte Folded Reload + adcq $0, %rcx + addq %r11, %r13 + adcq %r14, %rdi + adcq %r10, %rbx + adcq %r15, %rsi + adcq %r12, %rax + adcq %r9, %r8 + adcq $0, %rcx + movq %r13, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rbp, %r12 # 8-byte Folded Reload + addq %r13, %rbp + mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload + adcq %rdi, %r11 + mulxq -32(%rsp), %r9, %rdi # 8-byte Folded Reload + adcq %rbx, %r9 + mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload + adcq %rax, %r14 + mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload + adcq %r8, %r15 + adcq $0, %rcx + addq %r12, %r11 + adcq %rbp, %r9 + adcq %rdi, %r10 + adcq %rbx, %r14 + adcq %rsi, %r15 + adcq %rax, %rcx + movq -72(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + mulxq -80(%rsp), %rsi, %rax # 8-byte Folded Reload + mulxq -64(%rsp), %r13, %rbx # 8-byte Folded Reload + addq %rsi, %rbx + mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload + adcq %rax, %rdi + mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload + adcq %rbp, %rsi + mulxq -104(%rsp), %rax, %rbp # 8-byte Folded Reload + adcq %r8, %rax + mulxq -112(%rsp), %r8, %r12 # 8-byte Folded Reload + adcq %rbp, %r8 + adcq $0, %r12 + addq %r11, %r13 + adcq %r9, %rbx + adcq %r10, %rdi + adcq %r14, %rsi + adcq %r15, %rax + adcq %rcx, %r8 + adcq $0, %r12 + movq %r13, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rbp, %rcx # 8-byte Folded Reload + addq %r13, %rbp + mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload + adcq %rbx, %r11 + mulxq -32(%rsp), %r9, %rbx # 8-byte Folded Reload + adcq %rdi, %r9 + mulxq -40(%rsp), %r10, %rdi # 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload + adcq %rax, %r14 + mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload + adcq %r8, %r15 + adcq $0, %r12 + addq %rcx, %r11 + adcq %rbp, %r9 + adcq %rbx, %r10 + adcq %rdi, %r14 + adcq %rsi, %r15 + adcq %rax, %r12 + movq -72(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + mulxq -80(%rsp), %rsi, %rcx # 8-byte Folded Reload + mulxq -64(%rsp), %r13, %rax # 8-byte Folded Reload + addq %rsi, %rax + mulxq -88(%rsp), %rbx, %rsi # 8-byte Folded Reload + adcq %rcx, %rbx + mulxq -96(%rsp), %rdi, %rcx # 8-byte Folded Reload + adcq %rsi, %rdi + mulxq -104(%rsp), %rsi, %rbp # 8-byte Folded Reload + adcq %rcx, %rsi + mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload + adcq %rbp, %r8 + adcq $0, %rcx + addq %r11, %r13 + adcq %r9, %rax + adcq %r10, %rbx + adcq %r14, %rdi + adcq %r15, %rsi + adcq %r12, %r8 + adcq $0, %rcx + movq %r13, %rdx + imulq -16(%rsp), %rdx # 8-byte Folded Reload + mulxq -8(%rsp), %rbp, %r9 # 8-byte Folded Reload + addq %r13, %rbp + mulxq -24(%rsp), %r13, %rbp # 8-byte Folded Reload + adcq %rax, %r13 + mulxq -32(%rsp), %r11, %rax # 8-byte Folded Reload + adcq %rbx, %r11 + mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload + adcq %rdi, %r10 + mulxq -48(%rsp), %r14, %rdi # 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -56(%rsp), %rsi, %rdx # 8-byte Folded Reload + adcq %r8, %rsi + adcq $0, %rcx + addq %r9, %r13 + adcq %rbp, %r11 + adcq %rax, %r10 + adcq %rbx, %r14 + adcq %rdi, %rsi + adcq %rdx, %rcx + movq -72(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload + mulxq -64(%rsp), %r8, %rbx # 8-byte Folded Reload + addq %rdi, %rbx + mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload + adcq %rax, %rdi + mulxq -96(%rsp), %r15, %rax # 8-byte Folded Reload + adcq %rbp, %r15 + mulxq -104(%rsp), %r12, %rbp # 8-byte Folded Reload + adcq %rax, %r12 + mulxq -112(%rsp), %r9, %rax # 8-byte Folded Reload + adcq %rbp, %r9 + adcq $0, %rax + addq %r13, %r8 + adcq %r11, %rbx + adcq %r10, %rdi + adcq %r14, %r15 + adcq %rsi, %r12 + adcq %rcx, %r9 + adcq $0, %rax + movq -16(%rsp), %rdx # 8-byte Reload + imulq %r8, %rdx + mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload + movq %rsi, -16(%rsp) # 8-byte Spill + addq %r8, %rcx + movq -24(%rsp), %r11 # 8-byte Reload + mulxq %r11, %r8, %rcx + movq %rcx, -64(%rsp) # 8-byte Spill + adcq %rbx, %r8 + movq -32(%rsp), %r10 # 8-byte Reload + mulxq %r10, %rsi, %rcx + movq %rcx, -72(%rsp) # 8-byte Spill + adcq %rdi, %rsi + movq -40(%rsp), %r13 # 8-byte Reload + mulxq %r13, %rdi, %rcx + movq %rcx, -80(%rsp) # 8-byte Spill + adcq %r15, %rdi + movq -48(%rsp), %rcx # 8-byte Reload + mulxq %rcx, %r15, %rbx + adcq %r12, %r15 + movq -56(%rsp), %r14 # 8-byte Reload + mulxq %r14, %r12, %rbp + adcq %r9, %r12 + adcq $0, %rax + addq -16(%rsp), %r8 # 8-byte Folded Reload + adcq -64(%rsp), %rsi # 8-byte Folded Reload + adcq -72(%rsp), %rdi # 8-byte Folded Reload + adcq -80(%rsp), %r15 # 8-byte Folded Reload + adcq %rbx, %r12 + adcq %rbp, %rax + movq %r8, %rbp + subq -8(%rsp), %rbp # 8-byte Folded Reload + movq %rsi, %rbx + sbbq %r11, %rbx + movq %rdi, %r11 + sbbq %r10, %r11 + movq %r15, %r10 + sbbq %r13, %r10 + movq %r12, %r9 + sbbq %rcx, %r9 + movq %rax, %rcx + sbbq %r14, %rcx + movq %rcx, %rdx + sarq $63, %rdx + cmovsq %r8, %rbp + movq -120(%rsp), %rdx # 8-byte Reload + movq %rbp, (%rdx) + cmovsq %rsi, %rbx + movq %rbx, 8(%rdx) + cmovsq %rdi, %r11 + movq %r11, 16(%rdx) + cmovsq %r15, %r10 + movq %r10, 24(%rdx) + cmovsq %r12, %r9 + movq %r9, 32(%rdx) + cmovsq %rax, %rcx + movq %rcx, 40(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end84: + .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2 + + .globl mcl_fp_montRed6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed6Lbmi2,@function +mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + pushq %rax + movq %rdx, %rcx + movq %rdi, -104(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq (%rcx), %rdi + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %r14 + movq %r14, %rdx + imulq %rax, %rdx + movq 40(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulxq %rax, %rbx, %r12 + movq 32(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + mulxq %rax, %r10, %r11 + movq 24(%rcx), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 16(%rcx), %rbp + movq %rbp, -40(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, (%rsp) # 8-byte Spill + mulxq %rax, %r15, %r8 + mulxq %rbp, %r13, %rbp + mulxq %rcx, %rax, %r9 + mulxq %rdi, %rdx, %rcx + addq %rax, %rcx + adcq %r13, %r9 + adcq %r15, %rbp + adcq %r10, %r8 + adcq %rbx, %r11 + adcq $0, %r12 + addq %r14, %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r9 + adcq 24(%rsi), %rbp + adcq 32(%rsi), %r8 + adcq 40(%rsi), %r11 + movq %r11, -88(%rsp) # 8-byte Spill + adcq 48(%rsi), %r12 + movq %r12, -80(%rsp) # 8-byte Spill + movq 88(%rsi), %r10 + movq 80(%rsi), %rdx + movq 72(%rsi), %rdi + movq 64(%rsi), %rax + movq 56(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -112(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -96(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -72(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + adcq $0, %r10 + movq %r10, -48(%rsp) # 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq %rcx, %rdx + imulq -56(%rsp), %rdx # 8-byte Folded Reload + mulxq -24(%rsp), %rax, %r13 # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload + movq %rax, -128(%rsp) # 8-byte Spill + mulxq -32(%rsp), %r11, %r14 # 8-byte Folded Reload + mulxq -40(%rsp), %rbx, %r10 # 8-byte Folded Reload + mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rsi, %rax + adcq %rbx, %rdi + adcq %r11, %r10 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq $0, %r13 + addq %rcx, %rdx + adcq %r9, %rax + adcq %rbp, %rdi + adcq %r8, %r10 + adcq -88(%rsp), %r14 # 8-byte Folded Reload + adcq -80(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + adcq $0, -96(%rsp) # 8-byte Folded Spill + adcq $0, -72(%rsp) # 8-byte Folded Spill + adcq $0, -64(%rsp) # 8-byte Folded Spill + adcq $0, -48(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + movq -56(%rsp), %r11 # 8-byte Reload + imulq %r11, %rdx + mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -112(%rsp) # 8-byte Spill + movq %rcx, -80(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -120(%rsp) # 8-byte Spill + movq %rcx, -88(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rcx, %rbx # 8-byte Folded Reload + movq %rcx, -128(%rsp) # 8-byte Spill + mulxq -40(%rsp), %rcx, %r9 # 8-byte Folded Reload + mulxq (%rsp), %rsi, %rbp # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %r8 # 8-byte Folded Reload + addq %rsi, %r8 + adcq %rcx, %rbp + adcq -128(%rsp), %r9 # 8-byte Folded Reload + adcq -120(%rsp), %rbx # 8-byte Folded Reload + movq -88(%rsp), %rsi # 8-byte Reload + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq -80(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %rax, %rdx + adcq %rdi, %r8 + adcq %r10, %rbp + adcq %r14, %r9 + adcq %r15, %rbx + adcq %r13, %rsi + movq %rsi, -88(%rsp) # 8-byte Spill + adcq -96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -80(%rsp) # 8-byte Spill + adcq $0, -72(%rsp) # 8-byte Folded Spill + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + adcq $0, -48(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %r8, %rdx + imulq %r11, %rdx + mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -64(%rsp) # 8-byte Spill + movq %rcx, -96(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rcx, %r11 # 8-byte Folded Reload + movq %rcx, -112(%rsp) # 8-byte Spill + mulxq -32(%rsp), %r10, %r14 # 8-byte Folded Reload + mulxq -40(%rsp), %r13, %r15 # 8-byte Folded Reload + mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload + addq %rsi, %rcx + adcq %r13, %rdi + adcq %r10, %r15 + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -64(%rsp), %r11 # 8-byte Folded Reload + movq -96(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %r8, %rdx + adcq %rbp, %rcx + adcq %r9, %rdi + adcq %rbx, %r15 + adcq -88(%rsp), %r14 # 8-byte Folded Reload + adcq -80(%rsp), %r11 # 8-byte Folded Reload + adcq -72(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -96(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq -48(%rsp), %rax # 8-byte Reload + adcq $0, %rax + adcq $0, %r12 + movq %rcx, %rdx + imulq -56(%rsp), %rdx # 8-byte Folded Reload + mulxq -24(%rsp), %rbp, %rsi # 8-byte Folded Reload + movq %rbp, -48(%rsp) # 8-byte Spill + movq %rsi, -72(%rsp) # 8-byte Spill + mulxq -16(%rsp), %rbp, %rsi # 8-byte Folded Reload + movq %rbp, -88(%rsp) # 8-byte Spill + movq %rsi, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rsi, %r13 # 8-byte Folded Reload + movq %rsi, -112(%rsp) # 8-byte Spill + movq -40(%rsp), %r9 # 8-byte Reload + mulxq %r9, %r10, %rbp + mulxq (%rsp), %rsi, %r8 # 8-byte Folded Reload + mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload + addq %rsi, %rbx + adcq %r10, %r8 + adcq -112(%rsp), %rbp # 8-byte Folded Reload + adcq -88(%rsp), %r13 # 8-byte Folded Reload + movq -80(%rsp), %r10 # 8-byte Reload + adcq -48(%rsp), %r10 # 8-byte Folded Reload + movq -72(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %rcx, %rdx + adcq %rdi, %rbx + adcq %r15, %r8 + adcq %r14, %rbp + adcq %r11, %r13 + adcq -96(%rsp), %r10 # 8-byte Folded Reload + movq %r10, -80(%rsp) # 8-byte Spill + adcq -64(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -72(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -48(%rsp) # 8-byte Spill + adcq $0, %r12 + movq -56(%rsp), %rdx # 8-byte Reload + imulq %rbx, %rdx + mulxq -24(%rsp), %rax, %r10 # 8-byte Folded Reload + movq %rax, -56(%rsp) # 8-byte Spill + mulxq %r9, %rsi, %r14 + mulxq -8(%rsp), %r11, %rdi # 8-byte Folded Reload + mulxq (%rsp), %rax, %r9 # 8-byte Folded Reload + addq %rdi, %rax + adcq %rsi, %r9 + movq -32(%rsp), %r15 # 8-byte Reload + mulxq %r15, %rsi, %rdi + adcq %r14, %rsi + mulxq -16(%rsp), %rdx, %r14 # 8-byte Folded Reload + adcq %rdi, %rdx + adcq -56(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r10 + addq %rbx, %r11 + adcq %r8, %rax + adcq %rbp, %r9 + adcq %r13, %rsi + adcq -80(%rsp), %rdx # 8-byte Folded Reload + adcq -72(%rsp), %r14 # 8-byte Folded Reload + adcq -48(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r12 + movq %rax, %rcx + subq -8(%rsp), %rcx # 8-byte Folded Reload + movq %r9, %rdi + sbbq (%rsp), %rdi # 8-byte Folded Reload + movq %rsi, %rbp + sbbq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rdx, %rbx + sbbq %r15, %rbx + movq %r14, %r8 + sbbq -16(%rsp), %r8 # 8-byte Folded Reload + movq %r10, %r15 + sbbq -24(%rsp), %r15 # 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %r10, %r15 + testb %r12b, %r12b + cmovneq %rax, %rcx + movq -104(%rsp), %rax # 8-byte Reload + movq %rcx, (%rax) + cmovneq %r9, %rdi + movq %rdi, 8(%rax) + cmovneq %rsi, %rbp + movq %rbp, 16(%rax) + cmovneq %rdx, %rbx + movq %rbx, 24(%rax) + cmovneq %r14, %r8 + movq %r8, 32(%rax) + movq %r15, 40(%rax) + addq $8, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end85: + .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2 + + .globl mcl_fp_addPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre6Lbmi2,@function +mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r11 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 24(%rsi), %rax + movq 32(%rsi), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r10, %rax + movq %rax, 24(%rdi) + adcq %r9, %r14 + movq %r14, 32(%rdi) + adcq %r8, %r11 + movq %r11, 40(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r14 + retq +.Lfunc_end86: + .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2 + + .globl mcl_fp_subPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre6Lbmi2,@function +mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r14 + movq 32(%rdx), %r15 + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r14, %r11 + movq %r11, 24(%rdi) + sbbq %r15, %r10 + movq %r10, 32(%rdi) + sbbq %r8, %r9 + movq %r9, 40(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end87: + .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2 + + .globl mcl_fp_shr1_6Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_6Lbmi2,@function +mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2 +# BB#0: + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %r9, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 32(%rdi) + shrq %r8 + movq %r8, 40(%rdi) + retq +.Lfunc_end88: + .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2 + + .globl mcl_fp_add6Lbmi2 + .align 16, 0x90 + .type mcl_fp_add6Lbmi2,@function +mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rdx), %r15 + movq 24(%rdx), %rbx + movq 24(%rsi), %r10 + movq 32(%rsi), %r9 + movq 16(%rdx), %r11 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + adcq %rbx, %r10 + movq %r10, 24(%rdi) + adcq %r15, %r9 + movq %r9, 32(%rdi) + adcq %r14, %r8 + movq %r8, 40(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r11 + sbbq 24(%rcx), %r10 + sbbq 32(%rcx), %r9 + sbbq 40(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB89_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + movq %r10, 24(%rdi) + movq %r9, 32(%rdi) + movq %r8, 40(%rdi) +.LBB89_2: # %carry + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end89: + .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2 + + .globl mcl_fp_addNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF6Lbmi2,@function +mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 16(%rdx), %r11 + movq (%rdx), %r15 + movq 8(%rdx), %r14 + addq (%rsi), %r15 + adcq 8(%rsi), %r14 + adcq 16(%rsi), %r11 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r9 + adcq 40(%rsi), %r8 + movq %r15, %rsi + subq (%rcx), %rsi + movq %r14, %rbx + sbbq 8(%rcx), %rbx + movq %r11, %rdx + sbbq 16(%rcx), %rdx + movq %r10, %r13 + sbbq 24(%rcx), %r13 + movq %r9, %r12 + sbbq 32(%rcx), %r12 + movq %r8, %rax + sbbq 40(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r15, %rsi + movq %rsi, (%rdi) + cmovsq %r14, %rbx + movq %rbx, 8(%rdi) + cmovsq %r11, %rdx + movq %rdx, 16(%rdi) + cmovsq %r10, %r13 + movq %r13, 24(%rdi) + cmovsq %r9, %r12 + movq %r12, 32(%rdi) + cmovsq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end90: + .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2 + + .globl mcl_fp_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub6Lbmi2,@function +mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r11 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r11, 16(%rdi) + sbbq %r15, %r10 + movq %r10, 24(%rdi) + sbbq %r12, %r9 + movq %r9, 32(%rdi) + sbbq %r14, %r8 + movq %r8, 40(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB91_2 +# BB#1: # %carry + movq 40(%rcx), %r14 + movq 32(%rcx), %r15 + movq 24(%rcx), %r12 + movq 8(%rcx), %rbx + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rdx + movq %rdx, 16(%rdi) + adcq %r10, %r12 + movq %r12, 24(%rdi) + adcq %r9, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r14 + movq %r14, 40(%rdi) +.LBB91_2: # %nocarry + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end91: + .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2 + + .globl mcl_fp_subNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF6Lbmi2,@function +mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rsi), %r15 + movq 32(%rsi), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %r14 + subq (%rdx), %r11 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r10 + sbbq 24(%rdx), %r9 + sbbq 32(%rdx), %r8 + sbbq 40(%rdx), %r15 + movq %r15, %rdx + sarq $63, %rdx + movq %rdx, %rbx + addq %rbx, %rbx + movq %rdx, %rsi + adcq %rsi, %rsi + andq 8(%rcx), %rsi + movq %r15, %rax + shrq $63, %rax + orq %rbx, %rax + andq (%rcx), %rax + movq 40(%rcx), %r12 + andq %rdx, %r12 + movq 32(%rcx), %r13 + andq %rdx, %r13 + movq 24(%rcx), %rbx + andq %rdx, %rbx + andq 16(%rcx), %rdx + addq %r11, %rax + movq %rax, (%rdi) + adcq %r14, %rsi + movq %rsi, 8(%rdi) + adcq %r10, %rdx + movq %rdx, 16(%rdi) + adcq %r9, %rbx + movq %rbx, 24(%rdi) + adcq %r8, %r13 + movq %r13, 32(%rdi) + adcq %r15, %r12 + movq %r12, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end92: + .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2 + + .globl mcl_fpDbl_add6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add6Lbmi2,@function +mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 80(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 72(%rdx), %r14 + movq 64(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rdx + movq %rbx, (%rdi) + movq 88(%rsi), %r8 + movq %rax, 8(%rdi) + movq 80(%rsi), %r10 + movq %r12, 16(%rdi) + movq 72(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 40(%rsi), %rax + adcq %rdx, %rax + movq 64(%rsi), %rdx + movq %r13, 32(%rdi) + movq 56(%rsi), %r13 + movq 48(%rsi), %rbp + adcq %r9, %rbp + movq %rax, 40(%rdi) + adcq %r11, %r13 + adcq %r15, %rdx + adcq %r14, %r12 + adcq -16(%rsp), %r10 # 8-byte Folded Reload + adcq -8(%rsp), %r8 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rbp, %rsi + subq (%rcx), %rsi + movq %r13, %rbx + sbbq 8(%rcx), %rbx + movq %rdx, %r9 + sbbq 16(%rcx), %r9 + movq %r12, %r11 + sbbq 24(%rcx), %r11 + movq %r10, %r14 + sbbq 32(%rcx), %r14 + movq %r8, %r15 + sbbq 40(%rcx), %r15 + sbbq $0, %rax + andl $1, %eax + cmovneq %rbp, %rsi + movq %rsi, 48(%rdi) + testb %al, %al + cmovneq %r13, %rbx + movq %rbx, 56(%rdi) + cmovneq %rdx, %r9 + movq %r9, 64(%rdi) + cmovneq %r12, %r11 + movq %r11, 72(%rdi) + cmovneq %r10, %r14 + movq %r14, 80(%rdi) + cmovneq %r8, %r15 + movq %r15, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end93: + .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2 + + .globl mcl_fpDbl_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub6Lbmi2,@function +mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %r9 + movq 80(%rdx), %r10 + movq 72(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 64(%rdx), %r13 + movq %r15, (%rdi) + movq 56(%rdx), %rbp + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 88(%rsi), %r8 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 80(%rsi), %r11 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + sbbq %r15, %rdx + movq 72(%rsi), %r15 + movq %rbx, 40(%rdi) + movq 64(%rsi), %r12 + movq 56(%rsi), %rsi + sbbq %rbp, %rsi + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%rcx), %r14 + cmoveq %rax, %r14 + testb %bpl, %bpl + movq 16(%rcx), %r9 + cmoveq %rax, %r9 + movq 8(%rcx), %rbp + cmoveq %rax, %rbp + movq 40(%rcx), %r10 + cmoveq %rax, %r10 + movq 32(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 24(%rcx), %rax + addq %rdx, %r14 + movq %r14, 48(%rdi) + adcq %rsi, %rbp + movq %rbp, 56(%rdi) + adcq %r12, %r9 + movq %r9, 64(%rdi) + adcq %r15, %rax + movq %rax, 72(%rdi) + adcq %r11, %rbx + movq %rbx, 80(%rdi) + adcq %r8, %r10 + movq %r10, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end94: + .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2 + + .globl mcl_fp_mulUnitPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre7Lbmi2,@function +mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + mulxq 48(%rsi), %r8, %r11 + mulxq 40(%rsi), %r9, %r13 + mulxq 32(%rsi), %r10, %rcx + mulxq 8(%rsi), %r12, %r14 + mulxq (%rsi), %r15, %rbx + addq %r12, %rbx + mulxq 24(%rsi), %r12, %rax + mulxq 16(%rsi), %rdx, %rsi + movq %r15, (%rdi) + movq %rbx, 8(%rdi) + adcq %r14, %rdx + movq %rdx, 16(%rdi) + adcq %r12, %rsi + movq %rsi, 24(%rdi) + adcq %r10, %rax + movq %rax, 32(%rdi) + adcq %r9, %rcx + movq %rcx, 40(%rdi) + adcq %r8, %r13 + movq %r13, 48(%rdi) + adcq $0, %r11 + movq %r11, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end95: + .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2 + + .globl mcl_fpDbl_mulPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre7Lbmi2,@function +mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r14 + movq %r14, -8(%rsp) # 8-byte Spill + movq %rsi, %r8 + movq %rdi, %r13 + movq %r13, -16(%rsp) # 8-byte Spill + movq (%r8), %rcx + movq %rcx, -56(%rsp) # 8-byte Spill + movq 8(%r8), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq (%r14), %rsi + movq %rax, %rdx + mulxq %rsi, %rbp, %rax + movq %rcx, %rdx + mulxq %rsi, %rdx, %rcx + movq %rdx, -64(%rsp) # 8-byte Spill + movq 24(%r8), %rdi + movq %rdi, -32(%rsp) # 8-byte Spill + movq 16(%r8), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + addq %rbp, %rcx + mulxq %rsi, %rbx, %rbp + adcq %rax, %rbx + movq %rdi, %rdx + mulxq %rsi, %r12, %rax + adcq %rbp, %r12 + movq 32(%r8), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rsi, %r9, %rbp + adcq %rax, %r9 + movq 40(%r8), %rdi + movq %rdi, %rdx + mulxq %rsi, %r10, %rax + adcq %rbp, %r10 + movq 48(%r8), %r15 + movq %r15, %rdx + mulxq %rsi, %rsi, %r11 + adcq %rax, %rsi + movq -64(%rsp), %rax # 8-byte Reload + movq %rax, (%r13) + adcq $0, %r11 + movq 8(%r14), %r13 + movq -56(%rsp), %rdx # 8-byte Reload + mulxq %r13, %r14, %rax + movq %rax, -56(%rsp) # 8-byte Spill + addq %rcx, %r14 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %r13, %rcx, %rax + movq %rax, -24(%rsp) # 8-byte Spill + adcq %rbx, %rcx + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %r13, %rbx, %rax + movq %rax, -40(%rsp) # 8-byte Spill + adcq %r12, %rbx + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %r13, %rbp, %rax + movq %rax, -32(%rsp) # 8-byte Spill + adcq %r9, %rbp + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %r13, %rax, %r9 + adcq %r10, %rax + movq %rdi, %rdx + mulxq %r13, %r10, %rdi + adcq %rsi, %r10 + movq %r15, %rdx + mulxq %r13, %r13, %rdx + adcq %r11, %r13 + sbbq %r12, %r12 + andl $1, %r12d + addq -56(%rsp), %rcx # 8-byte Folded Reload + adcq -24(%rsp), %rbx # 8-byte Folded Reload + adcq -40(%rsp), %rbp # 8-byte Folded Reload + adcq -32(%rsp), %rax # 8-byte Folded Reload + adcq %r9, %r10 + movq -16(%rsp), %rsi # 8-byte Reload + movq %r14, 8(%rsi) + adcq %rdi, %r13 + adcq %rdx, %r12 + movq (%r8), %rsi + movq %rsi, -32(%rsp) # 8-byte Spill + movq 8(%r8), %r11 + movq %r11, -24(%rsp) # 8-byte Spill + movq -8(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdi + movq %rsi, %rdx + mulxq %rdi, %r9, %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + addq %rcx, %r9 + movq %r11, %rdx + mulxq %rdi, %r14, %rcx + movq %rcx, -80(%rsp) # 8-byte Spill + adcq %rbx, %r14 + movq 16(%r8), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rdi, %rsi, %rcx + movq %rcx, -88(%rsp) # 8-byte Spill + adcq %rbp, %rsi + movq 24(%r8), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rdi, %rbp, %rcx + movq %rcx, -96(%rsp) # 8-byte Spill + adcq %rax, %rbp + movq 32(%r8), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + mulxq %rdi, %r11, %rax + movq %rax, -104(%rsp) # 8-byte Spill + adcq %r10, %r11 + movq 40(%r8), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rdi, %r15, %rax + adcq %r13, %r15 + movq 48(%r8), %r13 + movq %r13, %rdx + mulxq %rdi, %rcx, %rdx + adcq %r12, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq -72(%rsp), %r14 # 8-byte Folded Reload + adcq -80(%rsp), %rsi # 8-byte Folded Reload + adcq -88(%rsp), %rbp # 8-byte Folded Reload + adcq -96(%rsp), %r11 # 8-byte Folded Reload + adcq -104(%rsp), %r15 # 8-byte Folded Reload + adcq %rax, %rcx + adcq %rdx, %rbx + movq -16(%rsp), %rax # 8-byte Reload + movq %r9, 16(%rax) + movq -8(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdi + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r9, %rax + movq %rax, -32(%rsp) # 8-byte Spill + addq %r14, %r9 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rax, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %rsi, %rax + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r14, %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + adcq %rbp, %r14 + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r10, %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %r11, %r10 + movq -56(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbp, %rsi + adcq %r15, %rbp + movq -64(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r11, %r15 + adcq %rcx, %r11 + movq %r13, %rdx + mulxq %rdi, %r13, %rcx + adcq %rbx, %r13 + sbbq %r12, %r12 + andl $1, %r12d + addq -32(%rsp), %rax # 8-byte Folded Reload + adcq -24(%rsp), %r14 # 8-byte Folded Reload + adcq -40(%rsp), %r10 # 8-byte Folded Reload + adcq -48(%rsp), %rbp # 8-byte Folded Reload + adcq %rsi, %r11 + movq -16(%rsp), %rdi # 8-byte Reload + movq %r9, 24(%rdi) + adcq %r15, %r13 + adcq %rcx, %r12 + movq (%r8), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%r8), %rbx + movq %rbx, -24(%rsp) # 8-byte Spill + movq -8(%rsp), %rcx # 8-byte Reload + movq 32(%rcx), %rcx + mulxq %rcx, %rsi, %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + addq %rax, %rsi + movq %rbx, %rdx + mulxq %rcx, %r9, %rax + movq %rax, -88(%rsp) # 8-byte Spill + adcq %r14, %r9 + movq 16(%r8), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rcx, %rax, %rdx + movq %rdx, -96(%rsp) # 8-byte Spill + adcq %r10, %rax + movq 24(%r8), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rcx, %r15, %rdx + movq %rdx, -104(%rsp) # 8-byte Spill + adcq %rbp, %r15 + movq 32(%r8), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + mulxq %rcx, %r10, %rbp + adcq %r11, %r10 + movq 40(%r8), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rcx, %r11, %rbx + adcq %r13, %r11 + movq 48(%r8), %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + mulxq %rcx, %r14, %rcx + adcq %r12, %r14 + sbbq %r12, %r12 + andl $1, %r12d + addq -80(%rsp), %r9 # 8-byte Folded Reload + adcq -88(%rsp), %rax # 8-byte Folded Reload + adcq -96(%rsp), %r15 # 8-byte Folded Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq %rbp, %r11 + adcq %rbx, %r14 + adcq %rcx, %r12 + movq %rsi, 32(%rdi) + movq -8(%rsp), %rsi # 8-byte Reload + movq 40(%rsi), %rdi + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r13, %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + addq %r9, %r13 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rcx, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %rax, %rcx + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rax, %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + adcq %r15, %rax + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbx, %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %r10, %rbx + movq -56(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %rbp, %r15 + adcq %r11, %rbp + movq -64(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r9, %r11 + adcq %r14, %r9 + movq -72(%rsp), %rdx # 8-byte Reload + mulxq %rdi, %r10, %rdx + adcq %r12, %r10 + sbbq %rdi, %rdi + andl $1, %edi + addq -32(%rsp), %rcx # 8-byte Folded Reload + adcq -24(%rsp), %rax # 8-byte Folded Reload + adcq -40(%rsp), %rbx # 8-byte Folded Reload + adcq -48(%rsp), %rbp # 8-byte Folded Reload + adcq %r15, %r9 + movq -16(%rsp), %r14 # 8-byte Reload + movq %r13, 40(%r14) + adcq %r11, %r10 + adcq %rdx, %rdi + movq 48(%rsi), %rdx + mulxq (%r8), %r11, %rsi + movq %rsi, -8(%rsp) # 8-byte Spill + addq %rcx, %r11 + mulxq 8(%r8), %rsi, %r15 + adcq %rax, %rsi + mulxq 16(%r8), %rcx, %rax + movq %rax, -24(%rsp) # 8-byte Spill + adcq %rbx, %rcx + mulxq 24(%r8), %rbx, %r12 + adcq %rbp, %rbx + mulxq 32(%r8), %rbp, %r13 + adcq %r9, %rbp + mulxq 40(%r8), %rax, %r9 + adcq %r10, %rax + mulxq 48(%r8), %rdx, %r8 + adcq %rdi, %rdx + sbbq %r10, %r10 + andl $1, %r10d + addq -8(%rsp), %rsi # 8-byte Folded Reload + adcq %r15, %rcx + movq %r11, 48(%r14) + movq %rsi, 56(%r14) + movq %rcx, 64(%r14) + adcq -24(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq %r12, %rbp + movq %rbp, 80(%r14) + adcq %r13, %rax + movq %rax, 88(%r14) + adcq %r9, %rdx + movq %rdx, 96(%r14) + adcq %r8, %r10 + movq %r10, 104(%r14) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end96: + .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2 + + .globl mcl_fpDbl_sqrPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre7Lbmi2,@function +mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + movq (%rsi), %rcx + movq 8(%rsi), %rax + mulxq %rcx, %r8, %r10 + movq 24(%rsi), %rbx + movq %rbx, -32(%rsp) # 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r12, %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + movq %rcx, %rdx + mulxq %rcx, %rdx, %rdi + movq %rdx, -48(%rsp) # 8-byte Spill + addq %r12, %rdi + adcq %rbp, %r8 + movq %rbx, %rdx + mulxq %rcx, %rbp, %r9 + adcq %r10, %rbp + movq 32(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rcx, %r11, %r14 + adcq %r9, %r11 + movq 40(%rsi), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + mulxq %rcx, %r10, %r15 + adcq %r14, %r10 + movq 48(%rsi), %r14 + movq %r14, %rdx + mulxq %rcx, %rcx, %r13 + adcq %r15, %rcx + movq -8(%rsp), %rdx # 8-byte Reload + movq -48(%rsp), %rbx # 8-byte Reload + movq %rbx, (%rdx) + adcq $0, %r13 + addq %r12, %rdi + movq %rax, %rdx + mulxq %rax, %r12, %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %r8, %r12 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r8, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %rbp, %r8 + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r9, %rbp + adcq %r11, %r9 + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r15, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + adcq %r10, %r15 + movq -56(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r11, %rbx + adcq %rcx, %r11 + movq %r14, %rdx + mulxq %rax, %r14, %rax + adcq %r13, %r14 + sbbq %r13, %r13 + andl $1, %r13d + addq -16(%rsp), %r12 # 8-byte Folded Reload + adcq -48(%rsp), %r8 # 8-byte Folded Reload + adcq -24(%rsp), %r9 # 8-byte Folded Reload + adcq %rbp, %r15 + movq -8(%rsp), %rcx # 8-byte Reload + movq %rdi, 8(%rcx) + adcq -32(%rsp), %r11 # 8-byte Folded Reload + adcq %rbx, %r14 + adcq %rax, %r13 + movq (%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %rbx + mulxq %rbx, %rax, %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + addq %r12, %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %rcx, %rdx + mulxq %rbx, %r10, %rax + movq %rax, -72(%rsp) # 8-byte Spill + adcq %r8, %r10 + movq %rbx, %rdx + mulxq %rbx, %r12, %rax + movq %rax, -80(%rsp) # 8-byte Spill + adcq %r9, %r12 + movq 24(%rsi), %rax + movq %rax, %rdx + mulxq %rbx, %r8, %rdi + movq %rdi, -56(%rsp) # 8-byte Spill + adcq %r8, %r15 + movq 32(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rbx, %rcx, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + adcq %r11, %rcx + movq 40(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rbx, %rbp, %r11 + adcq %r14, %rbp + movq 48(%rsi), %r14 + movq %r14, %rdx + mulxq %rbx, %r9, %rdx + adcq %r13, %r9 + sbbq %rbx, %rbx + andl $1, %ebx + addq -64(%rsp), %r10 # 8-byte Folded Reload + adcq -72(%rsp), %r12 # 8-byte Folded Reload + adcq -80(%rsp), %r15 # 8-byte Folded Reload + adcq %rdi, %rcx + adcq -88(%rsp), %rbp # 8-byte Folded Reload + adcq %r11, %r9 + adcq %rdx, %rbx + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rdi, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + addq %r10, %rdi + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r11, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %r12, %r11 + adcq %r8, %r15 + movq %rax, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + adcq %rcx, %r8 + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r13, %rcx + movq %rcx, -40(%rsp) # 8-byte Spill + adcq %rbp, %r13 + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r12, %rbp + adcq %r9, %r12 + movq %r14, %rdx + mulxq %rax, %rcx, %rax + adcq %rbx, %rcx + sbbq %r10, %r10 + andl $1, %r10d + addq -32(%rsp), %r11 # 8-byte Folded Reload + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq -56(%rsp), %r8 # 8-byte Folded Reload + adcq -64(%rsp), %r13 # 8-byte Folded Reload + movq -8(%rsp), %rdx # 8-byte Reload + movq -16(%rsp), %rbx # 8-byte Reload + movq %rbx, 16(%rdx) + movq %rdi, 24(%rdx) + adcq -40(%rsp), %r12 # 8-byte Folded Reload + adcq %rbp, %rcx + adcq %rax, %r10 + movq (%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq 32(%rsi), %rbx + mulxq %rbx, %rax, %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + addq %r11, %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdi, %rdx + mulxq %rbx, %r9, %rax + movq %rax, -88(%rsp) # 8-byte Spill + adcq %r15, %r9 + movq 16(%rsi), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + mulxq %rbx, %r15, %rax + movq %rax, -96(%rsp) # 8-byte Spill + adcq %r8, %r15 + movq 24(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + mulxq %rbx, %r8, %rbp + adcq %r13, %r8 + movq %rbx, %rdx + mulxq %rbx, %r13, %r14 + adcq %r12, %r13 + movq 40(%rsi), %rax + movq %rax, %rdx + mulxq %rbx, %rdx, %rdi + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rdi, -56(%rsp) # 8-byte Spill + adcq %rdx, %rcx + movq 48(%rsi), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rbx, %r11, %rdx + adcq %r10, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -80(%rsp), %r9 # 8-byte Folded Reload + adcq -88(%rsp), %r15 # 8-byte Folded Reload + adcq -96(%rsp), %r8 # 8-byte Folded Reload + adcq %rbp, %r13 + adcq %r14, %rcx + adcq %rdi, %r11 + adcq %rdx, %r12 + movq -32(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r14, %rdi + addq %r9, %r14 + movq -24(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rbx, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %r15, %rbx + movq -40(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rbp, %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + adcq %r8, %rbp + movq -48(%rsp), %rdx # 8-byte Reload + mulxq %rax, %r10, %r15 + adcq %r13, %r10 + adcq -72(%rsp), %rcx # 8-byte Folded Reload + movq %rax, %rdx + mulxq %rax, %r9, %r13 + adcq %r11, %r9 + movq -64(%rsp), %rdx # 8-byte Reload + mulxq %rax, %rax, %r11 + adcq %r12, %rax + sbbq %r8, %r8 + andl $1, %r8d + addq %rdi, %rbx + adcq -24(%rsp), %rbp # 8-byte Folded Reload + adcq -32(%rsp), %r10 # 8-byte Folded Reload + adcq %r15, %rcx + movq -8(%rsp), %rdi # 8-byte Reload + movq -16(%rsp), %rdx # 8-byte Reload + movq %rdx, 32(%rdi) + movq %r14, 40(%rdi) + adcq -56(%rsp), %r9 # 8-byte Folded Reload + adcq %r13, %rax + adcq %r11, %r8 + movq 48(%rsi), %rdx + mulxq (%rsi), %r12, %r11 + addq %rbx, %r12 + mulxq 8(%rsi), %rbx, %r14 + adcq %rbp, %rbx + mulxq 16(%rsi), %rbp, %r15 + adcq %r10, %rbp + mulxq 24(%rsi), %rdi, %r10 + adcq %rcx, %rdi + mulxq 32(%rsi), %rcx, %r13 + adcq %r9, %rcx + mulxq 40(%rsi), %rsi, %r9 + adcq %rax, %rsi + mulxq %rdx, %rdx, %rax + adcq %r8, %rdx + sbbq %r8, %r8 + andl $1, %r8d + addq %r11, %rbx + adcq %r14, %rbp + movq -8(%rsp), %r11 # 8-byte Reload + movq %r12, 48(%r11) + movq %rbx, 56(%r11) + movq %rbp, 64(%r11) + adcq %r15, %rdi + movq %rdi, 72(%r11) + adcq %r10, %rcx + movq %rcx, 80(%r11) + adcq %r13, %rsi + movq %rsi, 88(%r11) + adcq %r9, %rdx + movq %rdx, 96(%r11) + adcq %rax, %r8 + movq %r8, 104(%r11) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2 + + .globl mcl_fp_mont7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont7Lbmi2,@function +mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $56, %rsp + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rdi, -120(%rsp) # 8-byte Spill + movq 48(%rsi), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %rdx, %r8 + movq %rdx, 48(%rsp) # 8-byte Spill + movq 40(%rsi), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + mulxq %rax, %rdx, %r9 + movq %rdx, 40(%rsp) # 8-byte Spill + movq 32(%rsi), %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + movq 24(%rsi), %r11 + movq %r11, -64(%rsp) # 8-byte Spill + movq 16(%rsi), %r10 + movq %r10, -56(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq %r15, -40(%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -48(%rsp) # 8-byte Spill + mulxq %rax, %r13, %rdi + movq %r11, %rdx + mulxq %rax, %r14, %rbp + movq %r10, %rdx + mulxq %rax, %r12, %rbx + movq %rsi, %rdx + mulxq %rax, %r10, %rsi + movq %r15, %rdx + mulxq %rax, %r15, %r11 + addq %r10, %r11 + adcq %r12, %rsi + movq %rsi, -112(%rsp) # 8-byte Spill + adcq %r14, %rbx + movq %rbx, -104(%rsp) # 8-byte Spill + adcq %r13, %rbp + movq %rbp, -96(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -88(%rsp) # 8-byte Spill + adcq 48(%rsp), %r9 # 8-byte Folded Reload + movq %r9, -80(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, %r13 + movq -8(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq %r15, %rdx + imulq %rax, %rdx + movq (%rcx), %rdi + movq %rdi, 24(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 48(%rsp) # 8-byte Spill + mulxq %rax, %rbx, %r9 + movq 16(%rcx), %rsi + movq %rsi, 40(%rsp) # 8-byte Spill + mulxq %rsi, %r14, %rbp + movq 8(%rcx), %rax + movq %rax, 32(%rsp) # 8-byte Spill + mulxq %rax, %rsi, %rax + mulxq %rdi, %r8, %r12 + addq %rsi, %r12 + adcq %r14, %rax + movq %rax, %rsi + movq 24(%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + mulxq %rax, %r10, %r14 + adcq %rbp, %r10 + adcq %rbx, %r14 + movq 40(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + mulxq %rax, %rbp, %rdi + adcq %r9, %rbp + movq 48(%rcx), %rax + movq %rax, (%rsp) # 8-byte Spill + mulxq %rax, %rax, %rbx + adcq %rdi, %rax + adcq $0, %rbx + addq %r15, %r8 + adcq %r11, %r12 + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -112(%rsp) # 8-byte Spill + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq -96(%rsp), %r14 # 8-byte Folded Reload + adcq -88(%rsp), %rbp # 8-byte Folded Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + adcq %r13, %rbx + sbbq %r11, %r11 + andl $1, %r11d + movq -16(%rsp), %rcx # 8-byte Reload + movq 8(%rcx), %rdx + mulxq -24(%rsp), %rdi, %rcx # 8-byte Folded Reload + movq %rdi, -96(%rsp) # 8-byte Spill + movq %rcx, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -128(%rsp) # 8-byte Spill + movq %rcx, -88(%rsp) # 8-byte Spill + mulxq -48(%rsp), %r9, %r8 # 8-byte Folded Reload + mulxq -40(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -104(%rsp) # 8-byte Spill + addq %r9, %rcx + movq %rcx, %rdi + mulxq -56(%rsp), %rcx, %r9 # 8-byte Folded Reload + adcq %r8, %rcx + movq %rcx, %rsi + mulxq -64(%rsp), %r13, %rcx # 8-byte Folded Reload + adcq %r9, %r13 + mulxq -72(%rsp), %r8, %r15 # 8-byte Folded Reload + adcq %rcx, %r8 + adcq -128(%rsp), %r15 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq -96(%rsp), %rdx # 8-byte Folded Reload + movq -80(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + movq -104(%rsp), %r9 # 8-byte Reload + addq %r12, %r9 + movq %r9, -104(%rsp) # 8-byte Spill + adcq -112(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, %r12 + adcq %r10, %rsi + movq %rsi, -128(%rsp) # 8-byte Spill + adcq %r14, %r13 + adcq %rbp, %r8 + adcq %rax, %r15 + adcq %rbx, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + adcq %r11, %rcx + movq %rcx, -80(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) # 8-byte Spill + movq %r9, %rdx + imulq -8(%rsp), %rdx # 8-byte Folded Reload + mulxq (%rsp), %r10, %rax # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq 32(%rsp), %rdi, %rbx # 8-byte Folded Reload + mulxq 24(%rsp), %r14, %r9 # 8-byte Folded Reload + addq %rdi, %r9 + mulxq 40(%rsp), %rbp, %r11 # 8-byte Folded Reload + adcq %rbx, %rbp + adcq %rcx, %r11 + mulxq 48(%rsp), %rbx, %rsi # 8-byte Folded Reload + adcq %rax, %rbx + mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload + adcq %rsi, %rax + adcq %r10, %rcx + movq -96(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq -104(%rsp), %r14 # 8-byte Folded Reload + adcq %r12, %r9 + adcq -128(%rsp), %rbp # 8-byte Folded Reload + adcq %r13, %r11 + adcq %r8, %rbx + adcq %r15, %rax + adcq -88(%rsp), %rcx # 8-byte Folded Reload + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + adcq $0, -112(%rsp) # 8-byte Folded Spill + movq -16(%rsp), %rdx # 8-byte Reload + movq 16(%rdx), %rdx + mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload + movq %rdi, -104(%rsp) # 8-byte Spill + movq %rsi, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rdi, %rsi # 8-byte Folded Reload + movq %rdi, -128(%rsp) # 8-byte Spill + movq %rsi, -88(%rsp) # 8-byte Spill + mulxq -56(%rsp), %rdi, %r10 # 8-byte Folded Reload + mulxq -48(%rsp), %rsi, %r13 # 8-byte Folded Reload + mulxq -40(%rsp), %r8, %r15 # 8-byte Folded Reload + addq %rsi, %r15 + adcq %rdi, %r13 + mulxq -64(%rsp), %r12, %rsi # 8-byte Folded Reload + adcq %r10, %r12 + mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload + adcq %rsi, %r10 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + movq -88(%rsp), %rsi # 8-byte Reload + adcq -104(%rsp), %rsi # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r9, %r8 + movq %r8, -104(%rsp) # 8-byte Spill + adcq %rbp, %r15 + adcq %r11, %r13 + adcq %rbx, %r12 + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -96(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -88(%rsp) # 8-byte Spill + adcq -112(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, %rbx + movq %r8, %rdx + imulq -8(%rsp), %rdx # 8-byte Folded Reload + mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -112(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload + mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload + addq %rbp, %r8 + mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r9 + mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload + adcq %rax, %rsi + mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload + adcq %rdi, %rax + adcq -112(%rsp), %rcx # 8-byte Folded Reload + movq -96(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq -104(%rsp), %r11 # 8-byte Folded Reload + adcq %r15, %r8 + adcq %r13, %rbp + adcq %r12, %r9 + adcq %r10, %rsi + adcq %r14, %rax + adcq -88(%rsp), %rcx # 8-byte Folded Reload + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + adcq $0, %rbx + movq %rbx, -88(%rsp) # 8-byte Spill + movq -16(%rsp), %rdx # 8-byte Reload + movq 24(%rdx), %rdx + mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload + movq %rbx, -112(%rsp) # 8-byte Spill + movq %rdi, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rdi, %r13 # 8-byte Folded Reload + movq %rdi, -128(%rsp) # 8-byte Spill + mulxq -56(%rsp), %r10, %r11 # 8-byte Folded Reload + mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload + mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload + movq %rbx, -104(%rsp) # 8-byte Spill + addq %rdi, %r12 + adcq %r10, %r15 + mulxq -64(%rsp), %rbx, %rdi # 8-byte Folded Reload + adcq %r11, %rbx + mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload + adcq %rdi, %r10 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + movq -104(%rsp), %rdi # 8-byte Reload + addq %r8, %rdi + movq %rdi, -104(%rsp) # 8-byte Spill + adcq %rbp, %r12 + adcq %r9, %r15 + adcq %rsi, %rbx + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %rdi, %rdx + imulq -8(%rsp), %rdx # 8-byte Folded Reload + mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -112(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload + mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload + addq %rbp, %r8 + mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r9 + mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload + adcq %rax, %rsi + mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload + adcq %rdi, %rax + adcq -112(%rsp), %rcx # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq -104(%rsp), %r11 # 8-byte Folded Reload + adcq %r12, %r8 + adcq %r15, %rbp + adcq %rbx, %r9 + adcq %r10, %rsi + adcq %r14, %rax + adcq %r13, %rcx + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + adcq $0, -96(%rsp) # 8-byte Folded Spill + movq -16(%rsp), %rdx # 8-byte Reload + movq 32(%rdx), %rdx + mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload + movq %rbx, -104(%rsp) # 8-byte Spill + movq %rdi, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rdi, %r11 # 8-byte Folded Reload + movq %rdi, -112(%rsp) # 8-byte Spill + mulxq -56(%rsp), %r10, %r13 # 8-byte Folded Reload + mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload + mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload + addq %rdi, %r12 + adcq %r10, %r15 + mulxq -64(%rsp), %r10, %rdi # 8-byte Folded Reload + adcq %r13, %r10 + mulxq -72(%rsp), %r13, %r14 # 8-byte Folded Reload + adcq %rdi, %r13 + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r8, %rbx + movq %rbx, -112(%rsp) # 8-byte Spill + adcq %rbp, %r12 + adcq %r9, %r15 + adcq %rsi, %r10 + adcq %rax, %r13 + adcq %rcx, %r14 + adcq -88(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -128(%rsp) # 8-byte Spill + adcq -96(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbx, %rdx + imulq -8(%rsp), %rdx # 8-byte Folded Reload + mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -88(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload + mulxq 24(%rsp), %r9, %r11 # 8-byte Folded Reload + addq %rbp, %r11 + mulxq 40(%rsp), %rbp, %r8 # 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r8 + mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload + adcq %rax, %rsi + mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload + adcq %rdi, %rax + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq -96(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq -112(%rsp), %r9 # 8-byte Folded Reload + adcq %r12, %r11 + adcq %r15, %rbp + adcq %r10, %r8 + adcq %r13, %rsi + adcq %r14, %rax + adcq -128(%rsp), %rcx # 8-byte Folded Reload + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + adcq $0, -104(%rsp) # 8-byte Folded Spill + movq -16(%rsp), %rdx # 8-byte Reload + movq 40(%rdx), %rdx + mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload + movq %rbx, -112(%rsp) # 8-byte Spill + movq %rdi, -80(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload + movq %rbx, -128(%rsp) # 8-byte Spill + movq %rdi, -88(%rsp) # 8-byte Spill + mulxq -56(%rsp), %rbx, %r10 # 8-byte Folded Reload + mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload + mulxq -40(%rsp), %r9, %r12 # 8-byte Folded Reload + addq %rdi, %r12 + adcq %rbx, %r13 + mulxq -64(%rsp), %r15, %rdi # 8-byte Folded Reload + adcq %r10, %r15 + mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload + adcq %rdi, %r10 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + movq -88(%rsp), %rdi # 8-byte Reload + adcq -112(%rsp), %rdi # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r11, %r9 + movq %r9, -112(%rsp) # 8-byte Spill + adcq %rbp, %r12 + adcq %r8, %r13 + adcq %rsi, %r15 + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -96(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -88(%rsp) # 8-byte Spill + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -104(%rsp) # 8-byte Spill + movq %r9, %rdx + imulq -8(%rsp), %rdx # 8-byte Folded Reload + mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -128(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq 32(%rsp), %rdi, %rsi # 8-byte Folded Reload + mulxq 24(%rsp), %r11, %rbx # 8-byte Folded Reload + addq %rdi, %rbx + mulxq 40(%rsp), %r8, %r9 # 8-byte Folded Reload + adcq %rsi, %r8 + adcq %rcx, %r9 + mulxq 48(%rsp), %rdi, %rbp # 8-byte Folded Reload + adcq %rax, %rdi + mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload + adcq %rbp, %rcx + adcq -128(%rsp), %rsi # 8-byte Folded Reload + movq -96(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq -112(%rsp), %r11 # 8-byte Folded Reload + adcq %r12, %rbx + adcq %r13, %r8 + adcq %r15, %r9 + adcq %r10, %rdi + adcq %r14, %rcx + adcq -88(%rsp), %rsi # 8-byte Folded Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + movq -104(%rsp), %r12 # 8-byte Reload + adcq $0, %r12 + movq -16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + mulxq -24(%rsp), %rbp, %rax # 8-byte Folded Reload + movq %rbp, -80(%rsp) # 8-byte Spill + movq %rax, -16(%rsp) # 8-byte Spill + mulxq -32(%rsp), %rbp, %rax # 8-byte Folded Reload + movq %rbp, -88(%rsp) # 8-byte Spill + movq %rax, -24(%rsp) # 8-byte Spill + mulxq -72(%rsp), %rbp, %rax # 8-byte Folded Reload + movq %rbp, -72(%rsp) # 8-byte Spill + movq %rax, -32(%rsp) # 8-byte Spill + mulxq -64(%rsp), %r13, %rbp # 8-byte Folded Reload + mulxq -56(%rsp), %r14, %r15 # 8-byte Folded Reload + mulxq -48(%rsp), %rax, %r11 # 8-byte Folded Reload + mulxq -40(%rsp), %rdx, %r10 # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + addq %rax, %r10 + adcq %r14, %r11 + adcq %r13, %r15 + adcq -72(%rsp), %rbp # 8-byte Folded Reload + movq -32(%rsp), %r14 # 8-byte Reload + adcq -88(%rsp), %r14 # 8-byte Folded Reload + movq -24(%rsp), %rdx # 8-byte Reload + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq -16(%rsp), %rax # 8-byte Reload + adcq $0, %rax + movq -40(%rsp), %r13 # 8-byte Reload + addq %rbx, %r13 + movq %r13, -40(%rsp) # 8-byte Spill + adcq %r8, %r10 + adcq %r9, %r11 + adcq %rdi, %r15 + adcq %rcx, %rbp + movq %rbp, -48(%rsp) # 8-byte Spill + adcq %rsi, %r14 + movq %r14, -32(%rsp) # 8-byte Spill + adcq -96(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -24(%rsp) # 8-byte Spill + adcq %r12, %rax + movq %rax, -16(%rsp) # 8-byte Spill + sbbq %rdi, %rdi + movq -8(%rsp), %rdx # 8-byte Reload + imulq %r13, %rdx + mulxq 16(%rsp), %rbp, %rsi # 8-byte Folded Reload + mulxq 32(%rsp), %rcx, %rbx # 8-byte Folded Reload + mulxq 24(%rsp), %r13, %rax # 8-byte Folded Reload + addq %rcx, %rax + mulxq 40(%rsp), %rcx, %r9 # 8-byte Folded Reload + adcq %rbx, %rcx + adcq %rbp, %r9 + mulxq 48(%rsp), %rbp, %rbx # 8-byte Folded Reload + adcq %rsi, %rbp + mulxq 8(%rsp), %rsi, %r14 # 8-byte Folded Reload + adcq %rbx, %rsi + mulxq (%rsp), %rdx, %rbx # 8-byte Folded Reload + adcq %r14, %rdx + adcq $0, %rbx + andl $1, %edi + addq -40(%rsp), %r13 # 8-byte Folded Reload + adcq %r10, %rax + adcq %r11, %rcx + adcq %r15, %r9 + adcq -48(%rsp), %rbp # 8-byte Folded Reload + adcq -32(%rsp), %rsi # 8-byte Folded Reload + adcq -24(%rsp), %rdx # 8-byte Folded Reload + adcq -16(%rsp), %rbx # 8-byte Folded Reload + adcq $0, %rdi + movq %rax, %r8 + subq 24(%rsp), %r8 # 8-byte Folded Reload + movq %rcx, %r10 + sbbq 32(%rsp), %r10 # 8-byte Folded Reload + movq %r9, %r11 + sbbq 40(%rsp), %r11 # 8-byte Folded Reload + movq %rbp, %r14 + sbbq 16(%rsp), %r14 # 8-byte Folded Reload + movq %rsi, %r15 + sbbq 48(%rsp), %r15 # 8-byte Folded Reload + movq %rdx, %r12 + sbbq 8(%rsp), %r12 # 8-byte Folded Reload + movq %rbx, %r13 + sbbq (%rsp), %r13 # 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %rbx, %r13 + testb %dil, %dil + cmovneq %rax, %r8 + movq -120(%rsp), %rax # 8-byte Reload + movq %r8, (%rax) + cmovneq %rcx, %r10 + movq %r10, 8(%rax) + cmovneq %r9, %r11 + movq %r11, 16(%rax) + cmovneq %rbp, %r14 + movq %r14, 24(%rax) + cmovneq %rsi, %r15 + movq %r15, 32(%rax) + cmovneq %rdx, %r12 + movq %r12, 40(%rax) + movq %r13, 48(%rax) + addq $56, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end98: + .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2 + + .globl mcl_fp_montNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF7Lbmi2,@function +mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $40, %rsp + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rdi, -104(%rsp) # 8-byte Spill + movq (%rsi), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -48(%rsp) # 8-byte Spill + movq (%rdx), %rbp + movq %rdi, %rdx + mulxq %rbp, %rdi, %rbx + movq %rax, %rdx + mulxq %rbp, %r8, %r14 + movq 16(%rsi), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + addq %rdi, %r14 + mulxq %rbp, %r15, %rax + adcq %rbx, %r15 + movq 24(%rsi), %rdx + movq %rdx, -64(%rsp) # 8-byte Spill + mulxq %rbp, %rbx, %rdi + adcq %rax, %rbx + movq 32(%rsi), %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + mulxq %rbp, %r11, %rax + adcq %rdi, %r11 + movq 40(%rsi), %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + mulxq %rbp, %r9, %rdi + adcq %rax, %r9 + movq 48(%rsi), %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + mulxq %rbp, %r10, %rbp + adcq %rdi, %r10 + adcq $0, %rbp + movq -8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq %r8, %rdx + imulq %rax, %rdx + movq (%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + mulxq %rax, %rax, %rsi + movq %rsi, -96(%rsp) # 8-byte Spill + addq %r8, %rax + movq 8(%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + mulxq %rax, %r8, %rsi + movq %rsi, -112(%rsp) # 8-byte Spill + adcq %r14, %r8 + movq 16(%rcx), %rax + movq %rax, (%rsp) # 8-byte Spill + mulxq %rax, %rsi, %r13 + adcq %r15, %rsi + movq 24(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + mulxq %rax, %r12, %rax + adcq %rbx, %r12 + movq 32(%rcx), %rdi + movq %rdi, -16(%rsp) # 8-byte Spill + mulxq %rdi, %r15, %rbx + adcq %r11, %r15 + movq 40(%rcx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + mulxq %rdi, %r14, %rdi + adcq %r9, %r14 + movq 48(%rcx), %rcx + movq %rcx, 32(%rsp) # 8-byte Spill + mulxq %rcx, %r11, %rcx + adcq %r10, %r11 + adcq $0, %rbp + addq -96(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -96(%rsp) # 8-byte Spill + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -112(%rsp) # 8-byte Spill + adcq %r13, %r12 + adcq %rax, %r15 + adcq %rbx, %r14 + adcq %rdi, %r11 + adcq %rcx, %rbp + movq -40(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + mulxq -48(%rsp), %rcx, %rsi # 8-byte Folded Reload + mulxq -32(%rsp), %r13, %rax # 8-byte Folded Reload + addq %rcx, %rax + mulxq -56(%rsp), %rcx, %rdi # 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -64(%rsp), %rsi, %r8 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload + adcq %r8, %rdi + mulxq -80(%rsp), %r8, %rbx # 8-byte Folded Reload + adcq %r9, %r8 + mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload + adcq %rbx, %r9 + adcq $0, %r10 + addq -96(%rsp), %r13 # 8-byte Folded Reload + adcq -112(%rsp), %rax # 8-byte Folded Reload + adcq %r12, %rcx + adcq %r15, %rsi + adcq %r14, %rdi + adcq %r11, %r8 + adcq %rbp, %r9 + adcq $0, %r10 + movq %r13, %rdx + imulq 8(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rbp, %rbx # 8-byte Folded Reload + movq %rbx, -96(%rsp) # 8-byte Spill + addq %r13, %rbp + mulxq 16(%rsp), %rbp, %r14 # 8-byte Folded Reload + adcq %rax, %rbp + mulxq (%rsp), %rax, %r11 # 8-byte Folded Reload + adcq %rcx, %rax + mulxq -8(%rsp), %r12, %rcx # 8-byte Folded Reload + adcq %rsi, %r12 + mulxq -16(%rsp), %r15, %rbx # 8-byte Folded Reload + adcq %rdi, %r15 + mulxq -24(%rsp), %r13, %rdi # 8-byte Folded Reload + adcq %r8, %r13 + mulxq 32(%rsp), %rsi, %rdx # 8-byte Folded Reload + adcq %r9, %rsi + adcq $0, %r10 + addq -96(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, -96(%rsp) # 8-byte Spill + adcq %r14, %rax + movq %rax, -112(%rsp) # 8-byte Spill + adcq %r11, %r12 + adcq %rcx, %r15 + adcq %rbx, %r13 + adcq %rdi, %rsi + adcq %rdx, %r10 + movq -40(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload + mulxq -32(%rsp), %r14, %rdi # 8-byte Folded Reload + addq %rcx, %rdi + mulxq -56(%rsp), %rbp, %rcx # 8-byte Folded Reload + adcq %rax, %rbp + mulxq -64(%rsp), %rbx, %r8 # 8-byte Folded Reload + adcq %rcx, %rbx + mulxq -72(%rsp), %rax, %r9 # 8-byte Folded Reload + adcq %r8, %rax + mulxq -80(%rsp), %r8, %rcx # 8-byte Folded Reload + movq %rcx, -120(%rsp) # 8-byte Spill + adcq %r9, %r8 + mulxq -88(%rsp), %r9, %r11 # 8-byte Folded Reload + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r11 + addq -96(%rsp), %r14 # 8-byte Folded Reload + adcq -112(%rsp), %rdi # 8-byte Folded Reload + adcq %r12, %rbp + adcq %r15, %rbx + adcq %r13, %rax + adcq %rsi, %r8 + adcq %r10, %r9 + adcq $0, %r11 + movq %r14, %rdx + imulq 8(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rcx, -96(%rsp) # 8-byte Spill + addq %r14, %rsi + mulxq 16(%rsp), %rsi, %r13 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq (%rsp), %rdi, %r15 # 8-byte Folded Reload + adcq %rbp, %rdi + mulxq -8(%rsp), %rcx, %rbp # 8-byte Folded Reload + adcq %rbx, %rcx + mulxq -16(%rsp), %r14, %rbx # 8-byte Folded Reload + adcq %rax, %r14 + mulxq -24(%rsp), %r12, %rax # 8-byte Folded Reload + adcq %r8, %r12 + mulxq 32(%rsp), %r10, %rdx # 8-byte Folded Reload + adcq %r9, %r10 + adcq $0, %r11 + addq -96(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -96(%rsp) # 8-byte Spill + adcq %r13, %rdi + movq %rdi, -112(%rsp) # 8-byte Spill + adcq %r15, %rcx + adcq %rbp, %r14 + adcq %rbx, %r12 + adcq %rax, %r10 + adcq %rdx, %r11 + movq -40(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload + mulxq -32(%rsp), %r15, %rbp # 8-byte Folded Reload + addq %rsi, %rbp + mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload + adcq %rax, %rdi + mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload + adcq %r9, %r8 + mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r13 + addq -96(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %rbp # 8-byte Folded Reload + adcq %rcx, %rbx + adcq %r14, %rsi + adcq %r12, %rdi + adcq %r10, %r8 + adcq %r11, %r9 + adcq $0, %r13 + movq %r15, %rdx + imulq 8(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + addq %r15, %rcx + mulxq 16(%rsp), %rcx, %r11 # 8-byte Folded Reload + adcq %rbp, %rcx + mulxq (%rsp), %rbp, %r10 # 8-byte Folded Reload + adcq %rbx, %rbp + mulxq -8(%rsp), %rax, %rbx # 8-byte Folded Reload + adcq %rsi, %rax + mulxq -16(%rsp), %r14, %rsi # 8-byte Folded Reload + adcq %rdi, %r14 + mulxq -24(%rsp), %r15, %rdi # 8-byte Folded Reload + adcq %r8, %r15 + mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload + adcq %r9, %r12 + adcq $0, %r13 + addq -96(%rsp), %rcx # 8-byte Folded Reload + adcq %r11, %rbp + movq %rbp, -96(%rsp) # 8-byte Spill + adcq %r10, %rax + movq %rax, -112(%rsp) # 8-byte Spill + adcq %rbx, %r14 + adcq %rsi, %r15 + adcq %rdi, %r12 + adcq %rdx, %r13 + movq -40(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + mulxq -48(%rsp), %rsi, %rdi # 8-byte Folded Reload + mulxq -32(%rsp), %r11, %r8 # 8-byte Folded Reload + addq %rsi, %r8 + mulxq -56(%rsp), %rbx, %rsi # 8-byte Folded Reload + adcq %rdi, %rbx + mulxq -64(%rsp), %rbp, %rdi # 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -72(%rsp), %rsi, %r9 # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload + adcq %r9, %rdi + mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r10 + addq %rcx, %r11 + adcq -96(%rsp), %r8 # 8-byte Folded Reload + adcq -112(%rsp), %rbx # 8-byte Folded Reload + adcq %r14, %rbp + adcq %r15, %rsi + adcq %r12, %rdi + adcq %r13, %r9 + adcq $0, %r10 + movq %r11, %rdx + imulq 8(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + addq %r11, %rcx + mulxq 16(%rsp), %rcx, %r13 # 8-byte Folded Reload + adcq %r8, %rcx + mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload + adcq %rbx, %rax + mulxq -8(%rsp), %rbx, %r11 # 8-byte Folded Reload + adcq %rbp, %rbx + mulxq -16(%rsp), %r14, %rbp # 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -24(%rsp), %r15, %rsi # 8-byte Folded Reload + adcq %rdi, %r15 + mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload + adcq %r9, %r12 + adcq $0, %r10 + addq -96(%rsp), %rcx # 8-byte Folded Reload + adcq %r13, %rax + movq %rax, -96(%rsp) # 8-byte Spill + adcq %r8, %rbx + movq %rbx, -112(%rsp) # 8-byte Spill + adcq %r11, %r14 + adcq %rbp, %r15 + adcq %rsi, %r12 + adcq %rdx, %r10 + movq -40(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload + mulxq -32(%rsp), %r11, %rbp # 8-byte Folded Reload + addq %rsi, %rbp + mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload + adcq %rax, %rbx + mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload + adcq %rax, %rdi + mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload + adcq %r9, %r8 + mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r13 + addq %rcx, %r11 + adcq -96(%rsp), %rbp # 8-byte Folded Reload + adcq -112(%rsp), %rbx # 8-byte Folded Reload + adcq %r14, %rsi + adcq %r15, %rdi + adcq %r12, %r8 + adcq %r10, %r9 + adcq $0, %r13 + movq %r11, %rdx + imulq 8(%rsp), %rdx # 8-byte Folded Reload + mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rax, -112(%rsp) # 8-byte Spill + addq %r11, %rcx + mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rax, -120(%rsp) # 8-byte Spill + adcq %rbp, %rcx + mulxq (%rsp), %rax, %rbp # 8-byte Folded Reload + movq %rbp, -128(%rsp) # 8-byte Spill + adcq %rbx, %rax + movq %rax, -96(%rsp) # 8-byte Spill + mulxq -8(%rsp), %r14, %rbp # 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -16(%rsp), %r11, %r12 # 8-byte Folded Reload + adcq %rdi, %r11 + mulxq -24(%rsp), %r10, %rbx # 8-byte Folded Reload + adcq %r8, %r10 + mulxq 32(%rsp), %rdi, %rax # 8-byte Folded Reload + adcq %r9, %rdi + adcq $0, %r13 + addq -112(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -112(%rsp) # 8-byte Spill + movq -96(%rsp), %rcx # 8-byte Reload + adcq -120(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -96(%rsp) # 8-byte Spill + adcq -128(%rsp), %r14 # 8-byte Folded Reload + adcq %rbp, %r11 + adcq %r12, %r10 + adcq %rbx, %rdi + adcq %rax, %r13 + movq -40(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + mulxq -48(%rsp), %rbp, %r9 # 8-byte Folded Reload + mulxq -32(%rsp), %r8, %rax # 8-byte Folded Reload + addq %rbp, %rax + mulxq -56(%rsp), %rbx, %rcx # 8-byte Folded Reload + adcq %r9, %rbx + mulxq -64(%rsp), %rbp, %r9 # 8-byte Folded Reload + adcq %rcx, %rbp + mulxq -72(%rsp), %rcx, %r12 # 8-byte Folded Reload + adcq %r9, %rcx + mulxq -80(%rsp), %r15, %rsi # 8-byte Folded Reload + movq %rsi, -32(%rsp) # 8-byte Spill + adcq %r12, %r15 + mulxq -88(%rsp), %r12, %r9 # 8-byte Folded Reload + adcq -32(%rsp), %r12 # 8-byte Folded Reload + adcq $0, %r9 + addq -112(%rsp), %r8 # 8-byte Folded Reload + adcq -96(%rsp), %rax # 8-byte Folded Reload + adcq %r14, %rbx + adcq %r11, %rbp + adcq %r10, %rcx + adcq %rdi, %r15 + adcq %r13, %r12 + adcq $0, %r9 + movq 8(%rsp), %rdx # 8-byte Reload + imulq %r8, %rdx + mulxq 24(%rsp), %rdi, %rsi # 8-byte Folded Reload + movq %rsi, 8(%rsp) # 8-byte Spill + addq %r8, %rdi + mulxq 16(%rsp), %r8, %rsi # 8-byte Folded Reload + movq %rsi, -32(%rsp) # 8-byte Spill + adcq %rax, %r8 + movq (%rsp), %r11 # 8-byte Reload + mulxq %r11, %rsi, %rax + movq %rax, -40(%rsp) # 8-byte Spill + adcq %rbx, %rsi + movq -8(%rsp), %r14 # 8-byte Reload + mulxq %r14, %rdi, %rax + movq %rax, -48(%rsp) # 8-byte Spill + adcq %rbp, %rdi + movq -16(%rsp), %rbp # 8-byte Reload + mulxq %rbp, %rax, %rbx + movq %rbx, -56(%rsp) # 8-byte Spill + adcq %rcx, %rax + movq -24(%rsp), %rbx # 8-byte Reload + mulxq %rbx, %rcx, %r13 + adcq %r15, %rcx + mulxq 32(%rsp), %rdx, %r15 # 8-byte Folded Reload + adcq %r12, %rdx + adcq $0, %r9 + addq 8(%rsp), %r8 # 8-byte Folded Reload + adcq -32(%rsp), %rsi # 8-byte Folded Reload + adcq -40(%rsp), %rdi # 8-byte Folded Reload + adcq -48(%rsp), %rax # 8-byte Folded Reload + adcq -56(%rsp), %rcx # 8-byte Folded Reload + adcq %r13, %rdx + adcq %r15, %r9 + movq %r8, %r13 + subq 24(%rsp), %r13 # 8-byte Folded Reload + movq %rsi, %r12 + sbbq 16(%rsp), %r12 # 8-byte Folded Reload + movq %rdi, %r10 + sbbq %r11, %r10 + movq %rax, %r11 + sbbq %r14, %r11 + movq %rcx, %r14 + sbbq %rbp, %r14 + movq %rdx, %r15 + sbbq %rbx, %r15 + movq %r9, %rbp + sbbq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r8, %r13 + movq -104(%rsp), %rbx # 8-byte Reload + movq %r13, (%rbx) + cmovsq %rsi, %r12 + movq %r12, 8(%rbx) + cmovsq %rdi, %r10 + movq %r10, 16(%rbx) + cmovsq %rax, %r11 + movq %r11, 24(%rbx) + cmovsq %rcx, %r14 + movq %r14, 32(%rbx) + cmovsq %rdx, %r15 + movq %r15, 40(%rbx) + cmovsq %r9, %rbp + movq %rbp, 48(%rbx) + addq $40, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end99: + .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2 + + .globl mcl_fp_montRed7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed7Lbmi2,@function +mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $64, %rsp + movq %rdx, %rcx + movq %rdi, -88(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq (%rcx), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rsi), %rdx + movq %rdx, 48(%rsp) # 8-byte Spill + imulq %rax, %rdx + movq 48(%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + mulxq %rax, %rdi, %rax + movq %rdi, 40(%rsp) # 8-byte Spill + movq %rax, -48(%rsp) # 8-byte Spill + movq 40(%rcx), %r8 + movq %r8, (%rsp) # 8-byte Spill + movq 32(%rcx), %r9 + movq %r9, 24(%rsp) # 8-byte Spill + movq 24(%rcx), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + movq 16(%rcx), %rdi + movq %rdi, 56(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + mulxq %r8, %r10, %r11 + mulxq %r9, %r14, %r9 + mulxq %rbp, %r8, %r13 + mulxq %rdi, %rcx, %r12 + mulxq %rax, %rbp, %r15 + mulxq %rbx, %rdx, %rdi + addq %rbp, %rdi + adcq %rcx, %r15 + adcq %r8, %r12 + adcq %r14, %r13 + adcq %r10, %r9 + adcq 40(%rsp), %r11 # 8-byte Folded Reload + movq -48(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq 48(%rsp), %rdx # 8-byte Folded Reload + adcq 8(%rsi), %rdi + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r12 + adcq 32(%rsi), %r13 + adcq 40(%rsi), %r9 + movq %r9, -96(%rsp) # 8-byte Spill + adcq 48(%rsi), %r11 + movq %r11, -72(%rsp) # 8-byte Spill + adcq 56(%rsi), %rcx + movq %rcx, -48(%rsp) # 8-byte Spill + movq 104(%rsi), %r8 + movq 96(%rsi), %rdx + movq 88(%rsi), %rbp + movq 80(%rsi), %rbx + movq 72(%rsi), %rcx + movq 64(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -104(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, -80(%rsp) # 8-byte Spill + adcq $0, %rbx + movq %rbx, -40(%rsp) # 8-byte Spill + adcq $0, %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) # 8-byte Spill + sbbq %rcx, %rcx + andl $1, %ecx + movq %rcx, 40(%rsp) # 8-byte Spill + movq %rdi, %rdx + movq -24(%rsp), %r9 # 8-byte Reload + imulq %r9, %rdx + mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -112(%rsp) # 8-byte Spill + movq %rcx, -56(%rsp) # 8-byte Spill + mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -120(%rsp) # 8-byte Spill + movq %rcx, -64(%rsp) # 8-byte Spill + movq 24(%rsp), %rbx # 8-byte Reload + mulxq %rbx, %rcx, %rbp + movq %rcx, -128(%rsp) # 8-byte Spill + mulxq 8(%rsp), %r10, %r14 # 8-byte Folded Reload + mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload + mulxq %rax, %rcx, %r8 + mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rcx, %rax + adcq %rsi, %r8 + adcq %r10, %r11 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + adcq -120(%rsp), %rbp # 8-byte Folded Reload + movq -64(%rsp), %rsi # 8-byte Reload + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq -56(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %rdi, %rdx + adcq %r15, %rax + adcq %r12, %r8 + adcq %r13, %r11 + adcq -96(%rsp), %r14 # 8-byte Folded Reload + adcq -72(%rsp), %rbp # 8-byte Folded Reload + adcq -48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -64(%rsp) # 8-byte Spill + adcq -104(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -56(%rsp) # 8-byte Spill + adcq $0, -80(%rsp) # 8-byte Folded Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 48(%rsp) # 8-byte Folded Spill + adcq $0, 40(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + imulq %r9, %rdx + mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -96(%rsp) # 8-byte Spill + movq %rcx, -48(%rsp) # 8-byte Spill + movq (%rsp), %r15 # 8-byte Reload + mulxq %r15, %rsi, %rcx + movq %rsi, -104(%rsp) # 8-byte Spill + movq %rcx, -72(%rsp) # 8-byte Spill + mulxq %rbx, %rcx, %r13 + movq %rcx, -112(%rsp) # 8-byte Spill + mulxq 8(%rsp), %rbx, %r12 # 8-byte Folded Reload + mulxq 56(%rsp), %rdi, %r9 # 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %r10 # 8-byte Folded Reload + mulxq 32(%rsp), %rdx, %rcx # 8-byte Folded Reload + addq %rsi, %rcx + adcq %rdi, %r10 + adcq %rbx, %r9 + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %r13 # 8-byte Folded Reload + movq -72(%rsp), %rdi # 8-byte Reload + adcq -96(%rsp), %rdi # 8-byte Folded Reload + movq -48(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %rax, %rdx + adcq %r8, %rcx + adcq %r11, %r10 + adcq %r14, %r9 + adcq %rbp, %r12 + adcq -64(%rsp), %r13 # 8-byte Folded Reload + adcq -56(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -72(%rsp) # 8-byte Spill + adcq -80(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -48(%rsp) # 8-byte Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 48(%rsp) # 8-byte Folded Spill + adcq $0, 40(%rsp) # 8-byte Folded Spill + movq %rcx, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload + movq %rsi, -80(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + mulxq %r15, %rsi, %rax + movq %rsi, -96(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + mulxq 24(%rsp), %r8, %r15 # 8-byte Folded Reload + mulxq 8(%rsp), %r14, %rbp # 8-byte Folded Reload + mulxq 56(%rsp), %rdi, %rbx # 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %r11 # 8-byte Folded Reload + mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload + addq %rsi, %rax + adcq %rdi, %r11 + adcq %r14, %rbx + adcq %r8, %rbp + adcq -96(%rsp), %r15 # 8-byte Folded Reload + movq -64(%rsp), %rdi # 8-byte Reload + adcq -80(%rsp), %rdi # 8-byte Folded Reload + movq -56(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %rcx, %rdx + adcq %r10, %rax + adcq %r9, %r11 + adcq %r12, %rbx + adcq %r13, %rbp + adcq -72(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -64(%rsp) # 8-byte Spill + adcq -40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -56(%rsp) # 8-byte Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 48(%rsp) # 8-byte Folded Spill + adcq $0, 40(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + movq 16(%rsp), %r10 # 8-byte Reload + mulxq %r10, %rsi, %rcx + movq %rsi, -72(%rsp) # 8-byte Spill + movq %rcx, -40(%rsp) # 8-byte Spill + mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -80(%rsp) # 8-byte Spill + movq %rcx, -48(%rsp) # 8-byte Spill + mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload + movq %rsi, -96(%rsp) # 8-byte Spill + mulxq 8(%rsp), %r12, %r13 # 8-byte Folded Reload + mulxq 56(%rsp), %r8, %r14 # 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %r9 # 8-byte Folded Reload + mulxq 32(%rsp), %rdx, %rdi # 8-byte Folded Reload + addq %rsi, %rdi + adcq %r8, %r9 + adcq %r12, %r14 + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq -80(%rsp), %rcx # 8-byte Folded Reload + movq -48(%rsp), %r8 # 8-byte Reload + adcq -72(%rsp), %r8 # 8-byte Folded Reload + movq -40(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %rax, %rdx + adcq %r11, %rdi + adcq %rbx, %r9 + adcq %rbp, %r14 + adcq %r15, %r13 + adcq -64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -64(%rsp) # 8-byte Spill + adcq -56(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -48(%rsp) # 8-byte Spill + adcq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -40(%rsp) # 8-byte Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 48(%rsp) # 8-byte Folded Spill + adcq $0, 40(%rsp) # 8-byte Folded Spill + movq %rdi, %rdx + imulq -24(%rsp), %rdx # 8-byte Folded Reload + mulxq %r10, %rcx, %rax + movq %rcx, -72(%rsp) # 8-byte Spill + movq %rax, -32(%rsp) # 8-byte Spill + mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload + movq %rcx, -80(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + mulxq 24(%rsp), %rax, %rcx # 8-byte Folded Reload + movq %rax, -96(%rsp) # 8-byte Spill + movq 8(%rsp), %r12 # 8-byte Reload + mulxq %r12, %rax, %r15 + movq %rax, -104(%rsp) # 8-byte Spill + mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload + movq -16(%rsp), %r10 # 8-byte Reload + mulxq %r10, %rax, %rbp + movq 32(%rsp), %rbx # 8-byte Reload + mulxq %rbx, %rdx, %r8 + addq %rax, %r8 + adcq %rsi, %rbp + adcq -104(%rsp), %r11 # 8-byte Folded Reload + adcq -96(%rsp), %r15 # 8-byte Folded Reload + adcq -80(%rsp), %rcx # 8-byte Folded Reload + movq -56(%rsp), %rsi # 8-byte Reload + adcq -72(%rsp), %rsi # 8-byte Folded Reload + movq -32(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdi, %rdx + adcq %r9, %r8 + adcq %r14, %rbp + adcq %r13, %r11 + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -48(%rsp) # 8-byte Spill + adcq -40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -56(%rsp) # 8-byte Spill + adcq -8(%rsp), %rax # 8-byte Folded Reload + movq %rax, -32(%rsp) # 8-byte Spill + adcq $0, 48(%rsp) # 8-byte Folded Spill + adcq $0, 40(%rsp) # 8-byte Folded Spill + movq -24(%rsp), %rdx # 8-byte Reload + imulq %r8, %rdx + mulxq %r12, %rax, %r13 + mulxq %r10, %rcx, %rdi + mulxq %rbx, %r12, %r14 + addq %rcx, %r14 + mulxq 56(%rsp), %rcx, %r10 # 8-byte Folded Reload + adcq %rdi, %rcx + adcq %rax, %r10 + mulxq 24(%rsp), %rax, %r9 # 8-byte Folded Reload + adcq %r13, %rax + mulxq (%rsp), %rdi, %r13 # 8-byte Folded Reload + adcq %r9, %rdi + mulxq 16(%rsp), %rdx, %rsi # 8-byte Folded Reload + adcq %r13, %rdx + adcq $0, %rsi + addq %r8, %r12 + adcq %rbp, %r14 + adcq %r11, %rcx + adcq %r15, %r10 + adcq -48(%rsp), %rax # 8-byte Folded Reload + adcq -56(%rsp), %rdi # 8-byte Folded Reload + adcq -32(%rsp), %rdx # 8-byte Folded Reload + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq 40(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq %r14, %rbp + subq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rcx, %r13 + sbbq -16(%rsp), %r13 # 8-byte Folded Reload + movq %r10, %r8 + sbbq 56(%rsp), %r8 # 8-byte Folded Reload + movq %rax, %r9 + sbbq 8(%rsp), %r9 # 8-byte Folded Reload + movq %rdi, %r11 + sbbq 24(%rsp), %r11 # 8-byte Folded Reload + movq %rdx, %r15 + sbbq (%rsp), %r15 # 8-byte Folded Reload + movq %rsi, %r12 + sbbq 16(%rsp), %r12 # 8-byte Folded Reload + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rsi, %r12 + testb %bl, %bl + cmovneq %r14, %rbp + movq -88(%rsp), %rsi # 8-byte Reload + movq %rbp, (%rsi) + cmovneq %rcx, %r13 + movq %r13, 8(%rsi) + cmovneq %r10, %r8 + movq %r8, 16(%rsi) + cmovneq %rax, %r9 + movq %r9, 24(%rsi) + cmovneq %rdi, %r11 + movq %r11, 32(%rsi) + cmovneq %rdx, %r15 + movq %r15, 40(%rsi) + movq %r12, 48(%rsi) + addq $64, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end100: + .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2 + + .globl mcl_fp_addPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre7Lbmi2,@function +mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r14 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r12 + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %r12, 16(%rdi) + adcq %r11, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbx + movq %rbx, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end101: + .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2 + + .globl mcl_fp_subPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre7Lbmi2,@function +mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r10 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 32(%rsi), %rdx + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rdx + movq %rdx, 32(%rdi) + sbbq %r9, %r15 + movq %r15, 40(%rdi) + sbbq %r8, %r10 + movq %r10, 48(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end102: + .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2 + + .globl mcl_fp_shr1_7Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_7Lbmi2,@function +mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2 +# BB#0: + movq 48(%rsi), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrdq $1, %r10, %rax + movq %rax, 24(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 32(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 40(%rdi) + shrq %r8 + movq %r8, 48(%rdi) + retq +.Lfunc_end103: + .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2 + + .globl mcl_fp_add7Lbmi2 + .align 16, 0x90 + .type mcl_fp_add7Lbmi2,@function +mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq 24(%rdx), %r13 + movq 16(%rdx), %r10 + movq (%rdx), %r11 + movq 8(%rdx), %rdx + addq (%rsi), %r11 + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r10 + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %r13, %rax + movq %rax, 24(%rdi) + adcq %r12, %rbx + movq %rbx, 32(%rdi) + adcq %r15, %r9 + movq %r9, 40(%rdi) + adcq %r14, %r8 + movq %r8, 48(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %r11 + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %rax + sbbq 32(%rcx), %rbx + sbbq 40(%rcx), %r9 + sbbq 48(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB104_2 +# BB#1: # %nocarry + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %rax, 24(%rdi) + movq %rbx, 32(%rdi) + movq %r9, 40(%rdi) + movq %r8, 48(%rdi) +.LBB104_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end104: + .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2 + + .globl mcl_fp_addNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF7Lbmi2,@function +mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r9 + movq 40(%rdx), %rbp + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r14 + movq (%rdx), %r12 + movq 8(%rdx), %r15 + addq (%rsi), %r12 + adcq 8(%rsi), %r15 + adcq 16(%rsi), %r14 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %rbp + movq %rbp, -8(%rsp) # 8-byte Spill + adcq 48(%rsi), %r9 + movq %r12, %rsi + subq (%rcx), %rsi + movq %r15, %rdx + sbbq 8(%rcx), %rdx + movq %r14, %rax + sbbq 16(%rcx), %rax + movq %r11, %rbx + sbbq 24(%rcx), %rbx + movq %r10, %r13 + sbbq 32(%rcx), %r13 + sbbq 40(%rcx), %rbp + movq %r9, %r8 + sbbq 48(%rcx), %r8 + movq %r8, %rcx + sarq $63, %rcx + cmovsq %r12, %rsi + movq %rsi, (%rdi) + cmovsq %r15, %rdx + movq %rdx, 8(%rdi) + cmovsq %r14, %rax + movq %rax, 16(%rdi) + cmovsq %r11, %rbx + movq %rbx, 24(%rdi) + cmovsq %r10, %r13 + movq %r13, 32(%rdi) + cmovsq -8(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 40(%rdi) + cmovsq %r9, %r8 + movq %r8, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end105: + .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2 + + .globl mcl_fp_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub7Lbmi2,@function +mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + movq 16(%rsi), %r13 + sbbq 16(%rdx), %r13 + movq 32(%rsi), %r10 + movq 24(%rsi), %rsi + sbbq 24(%rdx), %rsi + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r13, 16(%rdi) + movq %rsi, 24(%rdi) + sbbq %r12, %r10 + movq %r10, 32(%rdi) + sbbq %r15, %r9 + movq %r9, 40(%rdi) + sbbq %r14, %r8 + movq %r8, 48(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB106_2 +# BB#1: # %carry + movq 48(%rcx), %r14 + movq 40(%rcx), %r15 + movq 32(%rcx), %r12 + movq 24(%rcx), %rbx + movq 8(%rcx), %rdx + movq 16(%rcx), %rbp + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r13, %rbp + movq %rbp, 16(%rdi) + adcq %rsi, %rbx + movq %rbx, 24(%rdi) + adcq %r10, %r12 + movq %r12, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) +.LBB106_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end106: + .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2 + + .globl mcl_fp_subNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF7Lbmi2,@function +mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 48(%rsi), %r12 + movq 40(%rsi), %rax + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %r14 + movq 8(%rsi), %r15 + subq (%rdx), %r14 + sbbq 8(%rdx), %r15 + sbbq 16(%rdx), %r11 + sbbq 24(%rdx), %r10 + sbbq 32(%rdx), %r9 + sbbq 40(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 48(%rdx), %r12 + movq %r12, %rax + sarq $63, %rax + movq %rax, %rsi + shldq $1, %r12, %rsi + andq (%r8), %rsi + movq 48(%r8), %r13 + andq %rax, %r13 + movq 40(%r8), %rbx + andq %rax, %rbx + movq 32(%r8), %rdx + andq %rax, %rdx + movq 24(%r8), %rbp + andq %rax, %rbp + movq 16(%r8), %rcx + andq %rax, %rcx + andq 8(%r8), %rax + addq %r14, %rsi + adcq %r15, %rax + movq %rsi, (%rdi) + movq %rax, 8(%rdi) + adcq %r11, %rcx + movq %rcx, 16(%rdi) + adcq %r10, %rbp + movq %rbp, 24(%rdi) + adcq %r9, %rdx + movq %rdx, 32(%rdi) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 40(%rdi) + adcq %r12, %r13 + movq %r13, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end107: + .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2 + + .globl mcl_fpDbl_add7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add7Lbmi2,@function +mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 96(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 88(%rdx), %r11 + movq 80(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r12 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r9 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r12 + movq 72(%rdx), %r13 + movq 64(%rdx), %rbp + movq %rax, (%rdi) + movq 56(%rdx), %r10 + movq %rbx, 8(%rdi) + movq 48(%rdx), %rcx + movq 40(%rdx), %rdx + movq %r9, 16(%rdi) + movq 104(%rsi), %r9 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %rdx, %rbx + movq 96(%rsi), %r15 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + adcq %rcx, %rdx + movq 88(%rsi), %rax + movq %rbx, 40(%rdi) + movq 56(%rsi), %rcx + adcq %r10, %rcx + movq 80(%rsi), %r12 + movq %rdx, 48(%rdi) + movq 72(%rsi), %rdx + movq 64(%rsi), %rsi + adcq %rbp, %rsi + adcq %r13, %rdx + adcq %r14, %r12 + adcq %r11, %rax + movq %rax, -8(%rsp) # 8-byte Spill + adcq -24(%rsp), %r15 # 8-byte Folded Reload + movq %r15, -24(%rsp) # 8-byte Spill + adcq -16(%rsp), %r9 # 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq %rcx, %rbx + subq (%r8), %rbx + movq %rsi, %r10 + sbbq 8(%r8), %r10 + movq %rdx, %r11 + sbbq 16(%r8), %r11 + movq %r12, %r14 + sbbq 24(%r8), %r14 + movq -8(%rsp), %r13 # 8-byte Reload + sbbq 32(%r8), %r13 + sbbq 40(%r8), %r15 + movq %r9, %rax + sbbq 48(%r8), %rax + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rcx, %rbx + movq %rbx, 56(%rdi) + testb %bpl, %bpl + cmovneq %rsi, %r10 + movq %r10, 64(%rdi) + cmovneq %rdx, %r11 + movq %r11, 72(%rdi) + cmovneq %r12, %r14 + movq %r14, 80(%rdi) + cmovneq -8(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 88(%rdi) + cmovneq -24(%rsp), %r15 # 8-byte Folded Reload + movq %r15, 96(%rdi) + cmovneq %r9, %rax + movq %rax, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end108: + .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2 + + .globl mcl_fpDbl_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub7Lbmi2,@function +mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 96(%rdx), %r10 + movq 88(%rdx), %r14 + movq 16(%rsi), %rax + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %ecx, %ecx + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %rax + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 80(%rdx), %r13 + movq 72(%rdx), %rbp + movq %r15, (%rdi) + movq 64(%rdx), %r9 + movq %r11, 8(%rdi) + movq 56(%rdx), %r15 + movq %rax, 16(%rdi) + movq 48(%rdx), %r11 + movq 40(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 104(%rsi), %rax + movq %r12, 32(%rdi) + movq 48(%rsi), %r12 + sbbq %r11, %r12 + movq 96(%rsi), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r15, %rdx + movq 88(%rsi), %r15 + movq %r12, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r9, %rbx + movq 80(%rsi), %r12 + movq 72(%rsi), %r9 + sbbq %rbp, %r9 + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq -8(%rsp), %rax # 8-byte Folded Reload + movq %rax, -8(%rsp) # 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r8), %r10 + cmoveq %rcx, %r10 + testb %bpl, %bpl + movq 16(%r8), %rbp + cmoveq %rcx, %rbp + movq 8(%r8), %rsi + cmoveq %rcx, %rsi + movq 48(%r8), %r14 + cmoveq %rcx, %r14 + movq 40(%r8), %r13 + cmoveq %rcx, %r13 + movq 32(%r8), %rax + cmoveq %rcx, %rax + cmovneq 24(%r8), %rcx + addq %rdx, %r10 + adcq %rbx, %rsi + movq %r10, 56(%rdi) + movq %rsi, 64(%rdi) + adcq %r9, %rbp + movq %rbp, 72(%rdi) + adcq %r12, %rcx + movq %rcx, 80(%rdi) + adcq %r15, %rax + movq %rax, 88(%rdi) + adcq %r11, %r13 + movq %r13, 96(%rdi) + adcq -8(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end109: + .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2 + + .align 16, 0x90 + .type .LmulPv512x64,@function +.LmulPv512x64: # @mulPv512x64 +# BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + mulxq 8(%rsi), %rcx, %r8 + addq %rax, %rcx + movq %rcx, 8(%rdi) + mulxq 16(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 16(%rdi) + mulxq 24(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 24(%rdi) + mulxq 32(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 32(%rdi) + mulxq 40(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 40(%rdi) + mulxq 48(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 48(%rdi) + mulxq 56(%rsi), %rax, %rdx + adcq %rcx, %rax + movq %rax, 56(%rdi) + adcq $0, %rdx + movq %rdx, 64(%rdi) + movq %rdi, %rax + retq +.Lfunc_end110: + .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 + + .globl mcl_fp_mulUnitPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre8Lbmi2,@function +mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2 +# BB#0: + pushq %rbx + subq $80, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq .LmulPv512x64 + movq 72(%rsp), %r8 + movq 64(%rsp), %r9 + movq 56(%rsp), %r10 + movq 48(%rsp), %r11 + movq 40(%rsp), %rdi + movq 32(%rsp), %rax + movq 24(%rsp), %rcx + movq 8(%rsp), %rdx + movq 16(%rsp), %rsi + movq %rdx, (%rbx) + movq %rsi, 8(%rbx) + movq %rcx, 16(%rbx) + movq %rax, 24(%rbx) + movq %rdi, 32(%rbx) + movq %r11, 40(%rbx) + movq %r10, 48(%rbx) + movq %r9, 56(%rbx) + movq %r8, 64(%rbx) + addq $80, %rsp + popq %rbx + retq +.Lfunc_end111: + .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2 + + .globl mcl_fpDbl_mulPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre8Lbmi2,@function +mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2 +# BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rdx, %rbx + movq %rsi, %r15 + movq %rdi, %r14 + callq mcl_fpDbl_mulPre4Lbmi2@PLT + leaq 64(%r14), %rdi + leaq 32(%r15), %rsi + leaq 32(%rbx), %rdx + callq mcl_fpDbl_mulPre4Lbmi2@PLT + movq 56(%rbx), %r10 + movq 48(%rbx), %rcx + movq (%rbx), %rdx + movq 8(%rbx), %rsi + addq 32(%rbx), %rdx + adcq 40(%rbx), %rsi + adcq 16(%rbx), %rcx + adcq 24(%rbx), %r10 + pushfq + popq %r8 + xorl %r9d, %r9d + movq 56(%r15), %rdi + movq 48(%r15), %r13 + movq (%r15), %r12 + movq 8(%r15), %rbx + addq 32(%r15), %r12 + adcq 40(%r15), %rbx + adcq 16(%r15), %r13 + adcq 24(%r15), %rdi + movl $0, %eax + cmovbq %r10, %rax + movq %rax, -176(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rcx, %rax + movq %rax, -184(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rsi, %rax + movq %rax, -192(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rdx, %rax + movq %rax, -200(%rbp) # 8-byte Spill + sbbq %r15, %r15 + movq %r12, -136(%rbp) + movq %rbx, -128(%rbp) + movq %r13, -120(%rbp) + movq %rdi, -112(%rbp) + movq %rdx, -168(%rbp) + movq %rsi, -160(%rbp) + movq %rcx, -152(%rbp) + movq %r10, -144(%rbp) + pushq %r8 + popfq + cmovaeq %r9, %rdi + movq %rdi, -216(%rbp) # 8-byte Spill + cmovaeq %r9, %r13 + cmovaeq %r9, %rbx + cmovaeq %r9, %r12 + sbbq %rax, %rax + movq %rax, -208(%rbp) # 8-byte Spill + leaq -104(%rbp), %rdi + leaq -136(%rbp), %rsi + leaq -168(%rbp), %rdx + callq mcl_fpDbl_mulPre4Lbmi2@PLT + addq -200(%rbp), %r12 # 8-byte Folded Reload + adcq -192(%rbp), %rbx # 8-byte Folded Reload + adcq -184(%rbp), %r13 # 8-byte Folded Reload + movq -216(%rbp), %r10 # 8-byte Reload + adcq -176(%rbp), %r10 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq -208(%rbp), %rdx # 8-byte Reload + andl %edx, %r15d + andl $1, %r15d + addq -72(%rbp), %r12 + adcq -64(%rbp), %rbx + adcq -56(%rbp), %r13 + adcq -48(%rbp), %r10 + adcq %rax, %r15 + movq -80(%rbp), %rax + movq -88(%rbp), %rcx + movq -104(%rbp), %rsi + movq -96(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %rdi + movq %rdi, -184(%rbp) # 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -176(%rbp) # 8-byte Spill + sbbq %rdi, %r12 + sbbq %r8, %rbx + movq 48(%r14), %rdi + movq %rdi, -192(%rbp) # 8-byte Spill + sbbq %rdi, %r13 + movq 56(%r14), %rdi + movq %rdi, -200(%rbp) # 8-byte Spill + sbbq %rdi, %r10 + sbbq $0, %r15 + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -208(%rbp) # 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -216(%rbp) # 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -224(%rbp) # 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -232(%rbp) # 8-byte Spill + sbbq %rdi, %r12 + movq 104(%r14), %rdi + sbbq %rdi, %rbx + movq 112(%r14), %r8 + sbbq %r8, %r13 + movq 120(%r14), %r9 + sbbq %r9, %r10 + sbbq $0, %r15 + addq -184(%rbp), %rsi # 8-byte Folded Reload + adcq -176(%rbp), %rdx # 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -192(%rbp), %rcx # 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -200(%rbp), %rax # 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r12 + movq %rax, 56(%r14) + movq %r12, 64(%r14) + adcq -208(%rbp), %rbx # 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq -216(%rbp), %r13 # 8-byte Folded Reload + movq %r13, 80(%r14) + adcq -224(%rbp), %r10 # 8-byte Folded Reload + movq %r10, 88(%r14) + adcq -232(%rbp), %r15 # 8-byte Folded Reload + movq %r15, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end112: + .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2 + + .globl mcl_fpDbl_sqrPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre8Lbmi2,@function +mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2 +# BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rsi, %r14 + movq %rdi, %rbx + movq %r14, %rdx + callq mcl_fpDbl_mulPre4Lbmi2@PLT + leaq 64(%rbx), %rdi + leaq 32(%r14), %rsi + movq %rsi, %rdx + callq mcl_fpDbl_mulPre4Lbmi2@PLT + movq (%r14), %r12 + movq 8(%r14), %r15 + addq 32(%r14), %r12 + adcq 40(%r14), %r15 + pushfq + popq %rax + movq %r12, -136(%rbp) + movq %r12, -168(%rbp) + addq %r12, %r12 + movq %r15, -128(%rbp) + movq %r15, -160(%rbp) + adcq %r15, %r15 + pushfq + popq %rcx + movq 56(%r14), %r13 + movq 48(%r14), %rdx + pushq %rax + popfq + adcq 16(%r14), %rdx + adcq 24(%r14), %r13 + pushfq + popq %r8 + pushfq + popq %rsi + pushfq + popq %rdi + sbbq %rax, %rax + movq %rax, -184(%rbp) # 8-byte Spill + xorl %eax, %eax + pushq %rdi + popfq + cmovaeq %rax, %r15 + movq %r15, -176(%rbp) # 8-byte Spill + cmovaeq %rax, %r12 + movq %rdx, -120(%rbp) + movq %rdx, -152(%rbp) + movq %rdx, %r15 + pushq %rcx + popfq + adcq %r15, %r15 + movq %r13, %r14 + movq %r13, -112(%rbp) + movq %r13, -144(%rbp) + adcq %r13, %r13 + pushq %rsi + popfq + cmovaeq %rax, %r13 + cmovaeq %rax, %r15 + shrq $63, %r14 + pushq %r8 + popfq + cmovaeq %rax, %r14 + leaq -104(%rbp), %rdi + leaq -136(%rbp), %rsi + leaq -168(%rbp), %rdx + callq mcl_fpDbl_mulPre4Lbmi2@PLT + movq -184(%rbp), %rax # 8-byte Reload + andl $1, %eax + addq -72(%rbp), %r12 + movq -176(%rbp), %r8 # 8-byte Reload + adcq -64(%rbp), %r8 + adcq -56(%rbp), %r15 + adcq -48(%rbp), %r13 + adcq %r14, %rax + movq %rax, %rdi + movq -80(%rbp), %rax + movq -88(%rbp), %rcx + movq -104(%rbp), %rsi + movq -96(%rbp), %rdx + subq (%rbx), %rsi + sbbq 8(%rbx), %rdx + sbbq 16(%rbx), %rcx + sbbq 24(%rbx), %rax + movq 32(%rbx), %r10 + movq %r10, -184(%rbp) # 8-byte Spill + movq 40(%rbx), %r9 + movq %r9, -176(%rbp) # 8-byte Spill + sbbq %r10, %r12 + sbbq %r9, %r8 + movq %r8, %r10 + movq 48(%rbx), %r8 + movq %r8, -192(%rbp) # 8-byte Spill + sbbq %r8, %r15 + movq 56(%rbx), %r8 + movq %r8, -200(%rbp) # 8-byte Spill + sbbq %r8, %r13 + sbbq $0, %rdi + movq 64(%rbx), %r11 + subq %r11, %rsi + movq 72(%rbx), %r8 + movq %r8, -208(%rbp) # 8-byte Spill + sbbq %r8, %rdx + movq 80(%rbx), %r8 + movq %r8, -216(%rbp) # 8-byte Spill + sbbq %r8, %rcx + movq 88(%rbx), %r8 + movq %r8, -224(%rbp) # 8-byte Spill + sbbq %r8, %rax + movq 96(%rbx), %r8 + movq %r8, -232(%rbp) # 8-byte Spill + sbbq %r8, %r12 + movq 104(%rbx), %r14 + sbbq %r14, %r10 + movq 112(%rbx), %r8 + sbbq %r8, %r15 + movq 120(%rbx), %r9 + sbbq %r9, %r13 + sbbq $0, %rdi + addq -184(%rbp), %rsi # 8-byte Folded Reload + adcq -176(%rbp), %rdx # 8-byte Folded Reload + movq %rsi, 32(%rbx) + adcq -192(%rbp), %rcx # 8-byte Folded Reload + movq %rdx, 40(%rbx) + adcq -200(%rbp), %rax # 8-byte Folded Reload + movq %rcx, 48(%rbx) + adcq %r11, %r12 + movq %rax, 56(%rbx) + movq %r12, 64(%rbx) + adcq -208(%rbp), %r10 # 8-byte Folded Reload + movq %r10, 72(%rbx) + adcq -216(%rbp), %r15 # 8-byte Folded Reload + movq %r15, 80(%rbx) + adcq -224(%rbp), %r13 # 8-byte Folded Reload + movq %r13, 88(%rbx) + adcq -232(%rbp), %rdi # 8-byte Folded Reload + movq %rdi, 96(%rbx) + adcq $0, %r14 + movq %r14, 104(%rbx) + adcq $0, %r8 + movq %r8, 112(%rbx) + adcq $0, %r9 + movq %r9, 120(%rbx) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2 + + .globl mcl_fp_mont8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont8Lbmi2,@function +mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1256, %rsp # imm = 0x4E8 + movq %rcx, %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%r13), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1184(%rsp), %rdi + callq .LmulPv512x64 + movq 1184(%rsp), %r15 + movq 1192(%rsp), %r14 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1248(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 1240(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1232(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1216(%rsp), %r12 + movq 1208(%rsp), %rbx + movq 1200(%rsp), %rbp + leaq 1112(%rsp), %rdi + movq %r13, %rsi + callq .LmulPv512x64 + addq 1112(%rsp), %r15 + adcq 1120(%rsp), %r14 + adcq 1128(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + adcq 1136(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 1144(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + movq 72(%rsp), %r13 # 8-byte Reload + adcq 1152(%rsp), %r13 + movq 88(%rsp), %rbx # 8-byte Reload + adcq 1160(%rsp), %rbx + movq 80(%rsp), %rbp # 8-byte Reload + adcq 1168(%rsp), %rbp + movq 96(%rsp), %rax # 8-byte Reload + adcq 1176(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1040(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 1040(%rsp), %r14 + movq 8(%rsp), %rax # 8-byte Reload + adcq 1048(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1056(%rsp), %rax + movq %rax, %r12 + movq 64(%rsp), %rax # 8-byte Reload + adcq 1064(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + adcq 1072(%rsp), %r13 + movq %r13, 72(%rsp) # 8-byte Spill + adcq 1080(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 1088(%rsp), %rbp + movq 96(%rsp), %rax # 8-byte Reload + adcq 1096(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 1104(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %r14, %rdx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 968(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 968(%rsp), %r14 + movq 8(%rsp), %r13 # 8-byte Reload + adcq 976(%rsp), %r13 + adcq 984(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r14 # 8-byte Reload + adcq 992(%rsp), %r14 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 1000(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 1008(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 1016(%rsp), %rbp + movq %rbp, %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 1024(%rsp), %rbp + movq 56(%rsp), %rax # 8-byte Reload + adcq 1032(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + adcq $0, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 896(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rcx + addq 896(%rsp), %rcx + movq 48(%rsp), %r13 # 8-byte Reload + adcq 904(%rsp), %r13 + adcq 912(%rsp), %r14 + adcq 920(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 936(%rsp), %r12 + movq %r12, 80(%rsp) # 8-byte Spill + adcq 944(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 56(%rsp), %r12 # 8-byte Reload + adcq 952(%rsp), %r12 + adcq 960(%rsp), %r15 + sbbq %rbx, %rbx + movq %rcx, %rdx + movq %rcx, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 824(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + addq 824(%rsp), %rbp + adcq 832(%rsp), %r13 + movq %r13, 48(%rsp) # 8-byte Spill + adcq 840(%rsp), %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 72(%rsp), %r13 # 8-byte Reload + adcq 848(%rsp), %r13 + movq 88(%rsp), %rbp # 8-byte Reload + adcq 856(%rsp), %rbp + movq 80(%rsp), %r14 # 8-byte Reload + adcq 864(%rsp), %r14 + movq 96(%rsp), %rax # 8-byte Reload + adcq 872(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 880(%rsp), %r12 + adcq 888(%rsp), %r15 + adcq $0, %rbx + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 752(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 48(%rsp), %rax # 8-byte Reload + addq 752(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 760(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 72(%rsp) # 8-byte Spill + adcq 776(%rsp), %rbp + movq %rbp, 88(%rsp) # 8-byte Spill + adcq 784(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + movq 96(%rsp), %rbp # 8-byte Reload + adcq 792(%rsp), %rbp + adcq 800(%rsp), %r12 + adcq 808(%rsp), %r15 + adcq 816(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 680(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 680(%rsp), %rbx + movq 64(%rsp), %r14 # 8-byte Reload + adcq 688(%rsp), %r14 + movq 72(%rsp), %rcx # 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %r13 # 8-byte Reload + adcq 704(%rsp), %r13 + movq 80(%rsp), %rbx # 8-byte Reload + adcq 712(%rsp), %rbx + adcq 720(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq %r12, %rbp + adcq 728(%rsp), %rbp + adcq 736(%rsp), %r15 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 744(%rsp), %r12 + adcq $0, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 608(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r14, %rax + addq 608(%rsp), %rax + movq 72(%rsp), %r14 # 8-byte Reload + adcq 616(%rsp), %r14 + adcq 624(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 632(%rsp), %rbx + movq %rbx, %r13 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 640(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 648(%rsp), %rbp + movq %rbp, 56(%rsp) # 8-byte Spill + adcq 656(%rsp), %r15 + adcq 664(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %rcx # 8-byte Reload + adcq 672(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + sbbq %rbp, %rbp + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %rbp, %rax + andl $1, %eax + addq 536(%rsp), %rbx + adcq 544(%rsp), %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %rbx # 8-byte Reload + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r13 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 568(%rsp), %rbp + movq 56(%rsp), %r12 # 8-byte Reload + adcq 576(%rsp), %r12 + adcq 584(%rsp), %r15 + movq 48(%rsp), %rcx # 8-byte Reload + adcq 592(%rsp), %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r14 # 8-byte Reload + adcq 600(%rsp), %r14 + adcq $0, %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 464(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 72(%rsp), %rax # 8-byte Reload + addq 464(%rsp), %rax + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + adcq 488(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + adcq 496(%rsp), %r12 + adcq 504(%rsp), %r15 + movq %r15, 72(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 512(%rsp), %r15 + adcq 520(%rsp), %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 528(%rsp), %r14 + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 392(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 408(%rsp), %rbp + movq 96(%rsp), %rbx # 8-byte Reload + adcq 416(%rsp), %rbx + adcq 424(%rsp), %r12 + movq 72(%rsp), %r13 # 8-byte Reload + adcq 432(%rsp), %r13 + adcq 440(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r15 # 8-byte Reload + adcq 448(%rsp), %r15 + adcq 456(%rsp), %r14 + adcq $0, %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 88(%rsp), %rax # 8-byte Reload + addq 320(%rsp), %rax + adcq 328(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 336(%rsp), %rbx + movq %rbx, 96(%rsp) # 8-byte Spill + movq %r12, %rbp + adcq 344(%rsp), %rbp + adcq 352(%rsp), %r13 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 360(%rsp), %r12 + adcq 368(%rsp), %r15 + movq %r15, 64(%rsp) # 8-byte Spill + adcq 376(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 72(%rsp), %rcx # 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 248(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 248(%rsp), %rbx + movq 80(%rsp), %rax # 8-byte Reload + adcq 256(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 96(%rsp), %r14 # 8-byte Reload + adcq 264(%rsp), %r14 + adcq 272(%rsp), %rbp + movq %rbp, 56(%rsp) # 8-byte Spill + movq %r13, %rbx + adcq 280(%rsp), %rbx + movq %r12, %rbp + adcq 288(%rsp), %rbp + movq 64(%rsp), %r13 # 8-byte Reload + adcq 296(%rsp), %r13 + movq 88(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 72(%rsp), %r12 # 8-byte Reload + adcq 312(%rsp), %r12 + adcq $0, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 176(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 80(%rsp), %rax # 8-byte Reload + addq 176(%rsp), %rax + adcq 184(%rsp), %r14 + movq %r14, 96(%rsp) # 8-byte Spill + movq 56(%rsp), %rcx # 8-byte Reload + adcq 192(%rsp), %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + adcq 200(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 208(%rsp), %rbp + adcq 216(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 224(%rsp), %r14 + adcq 232(%rsp), %r12 + adcq 240(%rsp), %r15 + sbbq %rbx, %rbx + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %r13 + leaq 104(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + addq 104(%rsp), %r13 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 112(%rsp), %rcx + movq 56(%rsp), %rdx # 8-byte Reload + adcq 120(%rsp), %rdx + movq 72(%rsp), %rsi # 8-byte Reload + adcq 128(%rsp), %rsi + movq %rbp, %rdi + adcq 136(%rsp), %rdi + movq %rdi, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r8 # 8-byte Reload + adcq 144(%rsp), %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq %r14, %r9 + adcq 152(%rsp), %r9 + movq %r9, 88(%rsp) # 8-byte Spill + adcq 160(%rsp), %r12 + adcq 168(%rsp), %r15 + adcq $0, %rbx + movq %rcx, %rax + movq %rcx, %r11 + movq 40(%rsp), %rbp # 8-byte Reload + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r14 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + movq %rsi, %r13 + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %r8, %rdi + sbbq 32(%rbp), %rdi + movq %r9, %r10 + sbbq 40(%rbp), %r10 + movq %r12, %r8 + sbbq 48(%rbp), %r8 + movq %r15, %r9 + sbbq 56(%rbp), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r15, %r9 + testb %bl, %bl + cmovneq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovneq %r14, %rcx + movq %rcx, 8(%rbx) + cmovneq %r13, %rdx + movq %rdx, 16(%rbx) + cmovneq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovneq 64(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovneq 88(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovneq %r12, %r8 + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $1256, %rsp # imm = 0x4E8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end114: + .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2 + + .globl mcl_fp_montNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF8Lbmi2,@function +mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1240, %rsp # imm = 0x4D8 + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1168(%rsp), %rdi + callq .LmulPv512x64 + movq 1168(%rsp), %r15 + movq 1176(%rsp), %r12 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1232(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1216(%rsp), %r13 + movq 1208(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1200(%rsp), %r14 + movq 1192(%rsp), %rbp + movq 1184(%rsp), %rbx + leaq 1096(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 1096(%rsp), %r15 + adcq 1104(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 1112(%rsp), %rbx + adcq 1120(%rsp), %rbp + adcq 1128(%rsp), %r14 + movq %r14, %r12 + movq 72(%rsp), %r14 # 8-byte Reload + adcq 1136(%rsp), %r14 + adcq 1144(%rsp), %r13 + movq 80(%rsp), %rax # 8-byte Reload + adcq 1152(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1160(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1024(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 1088(%rsp), %r15 + movq 64(%rsp), %rax # 8-byte Reload + addq 1024(%rsp), %rax + adcq 1032(%rsp), %rbx + movq %rbx, 8(%rsp) # 8-byte Spill + movq %rbp, %rbx + adcq 1040(%rsp), %rbx + adcq 1048(%rsp), %r12 + adcq 1056(%rsp), %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq %r13, %rbp + adcq 1064(%rsp), %rbp + movq 80(%rsp), %rcx # 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 1080(%rsp), %r14 + adcq $0, %r15 + movq %rax, %rdx + movq %rax, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 952(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 952(%rsp), %r13 + movq 8(%rsp), %rax # 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + adcq 968(%rsp), %rbx + movq %rbx, 64(%rsp) # 8-byte Spill + movq %r12, %rbx + adcq 976(%rsp), %rbx + movq 72(%rsp), %r12 # 8-byte Reload + adcq 984(%rsp), %r12 + adcq 992(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 1000(%rsp), %r13 + movq %r14, %rbp + adcq 1008(%rsp), %rbp + adcq 1016(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 880(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 944(%rsp), %r14 + movq 8(%rsp), %rax # 8-byte Reload + addq 880(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 888(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 896(%rsp), %rbx + adcq 904(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 40(%rsp), %rcx # 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 920(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + adcq 928(%rsp), %rbp + movq %rbp, 48(%rsp) # 8-byte Spill + adcq 936(%rsp), %r15 + adcq $0, %r14 + movq %rax, %rdx + movq %rax, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 808(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 808(%rsp), %rbp + movq 64(%rsp), %r13 # 8-byte Reload + adcq 816(%rsp), %r13 + movq %rbx, %r12 + adcq 824(%rsp), %r12 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 832(%rsp), %rbx + movq 40(%rsp), %rbp # 8-byte Reload + adcq 840(%rsp), %rbp + movq 80(%rsp), %rax # 8-byte Reload + adcq 848(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 856(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + adcq 864(%rsp), %r15 + adcq 872(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 736(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 800(%rsp), %rax + movq %r13, %rcx + addq 736(%rsp), %rcx + adcq 744(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + adcq 752(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 760(%rsp), %rbp + movq %rbp, %r13 + movq 80(%rsp), %rbp # 8-byte Reload + adcq 768(%rsp), %rbp + movq 48(%rsp), %rbx # 8-byte Reload + adcq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r14 + adcq $0, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq %rcx, %rdx + movq %rcx, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 664(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 664(%rsp), %r12 + movq 40(%rsp), %rax # 8-byte Reload + adcq 672(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 72(%rsp), %rax # 8-byte Reload + adcq 680(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + adcq 688(%rsp), %r13 + adcq 696(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 704(%rsp), %rbx + adcq 712(%rsp), %r15 + adcq 720(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 728(%rsp), %r12 + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 592(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 656(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 592(%rsp), %rax + movq 72(%rsp), %rbp # 8-byte Reload + adcq 600(%rsp), %rbp + adcq 608(%rsp), %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 616(%rsp), %r13 + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + adcq 640(%rsp), %r14 + adcq 648(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 520(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 520(%rsp), %r12 + adcq 528(%rsp), %rbp + movq %rbp, 72(%rsp) # 8-byte Spill + movq 40(%rsp), %r12 # 8-byte Reload + adcq 536(%rsp), %r12 + movq %r13, %rbp + adcq 544(%rsp), %rbp + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r15 + adcq 568(%rsp), %r14 + movq 64(%rsp), %r13 # 8-byte Reload + adcq 576(%rsp), %r13 + movq 80(%rsp), %rax # 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 448(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 512(%rsp), %rcx + movq 72(%rsp), %rax # 8-byte Reload + addq 448(%rsp), %rax + adcq 456(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + adcq 464(%rsp), %rbp + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r15 + adcq 488(%rsp), %r14 + adcq 496(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 504(%rsp), %r13 + adcq $0, %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 376(%rsp), %r12 + movq 40(%rsp), %rax # 8-byte Reload + adcq 384(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + adcq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + adcq 408(%rsp), %r15 + adcq 416(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 424(%rsp), %r12 + adcq 432(%rsp), %r13 + movq 72(%rsp), %rax # 8-byte Reload + adcq 440(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 304(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 368(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 304(%rsp), %rax + adcq 312(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 320(%rsp), %rbx + adcq 328(%rsp), %r15 + adcq 336(%rsp), %r14 + adcq 344(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 352(%rsp), %r13 + movq 72(%rsp), %rbp # 8-byte Reload + adcq 360(%rsp), %rbp + adcq $0, %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 232(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 232(%rsp), %r12 + movq 80(%rsp), %rax # 8-byte Reload + adcq 240(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + adcq 248(%rsp), %rbx + adcq 256(%rsp), %r15 + adcq 264(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 272(%rsp), %r12 + adcq 280(%rsp), %r13 + adcq 288(%rsp), %rbp + movq %rbp, 72(%rsp) # 8-byte Spill + movq 48(%rsp), %rbp # 8-byte Reload + adcq 296(%rsp), %rbp + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 224(%rsp), %rcx + movq 80(%rsp), %rax # 8-byte Reload + addq 160(%rsp), %rax + adcq 168(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 200(%rsp), %r13 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 208(%rsp), %rbx + adcq 216(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbp + leaq 88(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 88(%rsp), %rbp + movq 48(%rsp), %r11 # 8-byte Reload + adcq 96(%rsp), %r11 + adcq 104(%rsp), %r15 + adcq 112(%rsp), %r14 + movq 64(%rsp), %rsi # 8-byte Reload + adcq 120(%rsp), %rsi + movq %rsi, 64(%rsp) # 8-byte Spill + adcq 128(%rsp), %r13 + adcq 136(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 144(%rsp), %r12 + movq 80(%rsp), %r8 # 8-byte Reload + adcq 152(%rsp), %r8 + movq %r11, %rax + movq 56(%rsp), %rbp # 8-byte Reload + subq (%rbp), %rax + movq %r15, %rcx + sbbq 8(%rbp), %rcx + movq %r14, %rdx + sbbq 16(%rbp), %rdx + sbbq 24(%rbp), %rsi + movq %r13, %rdi + sbbq 32(%rbp), %rdi + movq %rbx, %r9 + sbbq 40(%rbp), %r9 + movq %r12, %r10 + sbbq 48(%rbp), %r10 + movq %rbp, %rbx + movq %r8, %rbp + sbbq 56(%rbx), %rbp + testq %rbp, %rbp + cmovsq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovsq %r15, %rcx + movq %rcx, 8(%rbx) + cmovsq %r14, %rdx + movq %rdx, 16(%rbx) + cmovsq 64(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq %r13, %rdi + movq %rdi, 32(%rbx) + cmovsq 72(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 40(%rbx) + cmovsq %r12, %r10 + movq %r10, 48(%rbx) + cmovsq %r8, %rbp + movq %rbp, 56(%rbx) + addq $1240, %rsp # imm = 0x4D8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end115: + .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2 + + .globl mcl_fp_montRed8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed8Lbmi2,@function +mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $776, %rsp # imm = 0x308 + movq %rdx, %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq %rdi, 72(%rsp) # 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 128(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rdx + movq %rdx, 184(%rsp) # 8-byte Spill + movq %r15, %rdx + imulq %rcx, %rdx + movq 120(%rsi), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 120(%rsp) # 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + movq 72(%rsi), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 64(%rsi), %r13 + movq 56(%rsi), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 48(%rsi), %r14 + movq 40(%rsi), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + movq 32(%rsi), %r12 + movq 24(%rsi), %rbx + movq 16(%rsi), %rbp + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq %rcx, %rsi + leaq 704(%rsp), %rdi + callq .LmulPv512x64 + addq 704(%rsp), %r15 + movq 184(%rsp), %rcx # 8-byte Reload + adcq 712(%rsp), %rcx + adcq 720(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 728(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rax # 8-byte Reload + adcq 744(%rsp), %rax + movq %rax, 152(%rsp) # 8-byte Spill + adcq 752(%rsp), %r14 + movq %r14, %r12 + movq 144(%rsp), %rax # 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 144(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 184(%rsp) # 8-byte Spill + adcq $0, 192(%rsp) # 8-byte Folded Spill + movq 160(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 176(%rsp) # 8-byte Folded Spill + adcq $0, 168(%rsp) # 8-byte Folded Spill + adcq $0, 120(%rsp) # 8-byte Folded Spill + movq 136(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + movq 96(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + sbbq %rbx, %rbx + movq %rcx, %rbp + movq %rbp, %rdx + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 632(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + movq %rbx, %rax + addq 632(%rsp), %rbp + movq 80(%rsp), %rsi # 8-byte Reload + adcq 640(%rsp), %rsi + movq 88(%rsp), %rcx # 8-byte Reload + adcq 648(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rcx # 8-byte Reload + adcq 656(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rcx # 8-byte Reload + adcq 664(%rsp), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + adcq 672(%rsp), %r12 + movq 144(%rsp), %rcx # 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, 160(%rsp) # 8-byte Spill + movq 176(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq 168(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 120(%rsp) # 8-byte Folded Spill + adcq $0, %r13 + movq %r13, 136(%rsp) # 8-byte Spill + adcq $0, %r14 + movq %r14, 96(%rsp) # 8-byte Spill + movq %rax, %rbp + adcq $0, %rbp + movq %rsi, %rdx + movq %rsi, %r14 + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 560(%rsp), %rdi + movq 112(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv512x64 + addq 560(%rsp), %r14 + movq 88(%rsp), %rcx # 8-byte Reload + adcq 568(%rsp), %rcx + movq 104(%rsp), %rax # 8-byte Reload + adcq 576(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rax # 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 152(%rsp) # 8-byte Spill + adcq 592(%rsp), %r12 + movq %r12, 88(%rsp) # 8-byte Spill + movq 144(%rsp), %r14 # 8-byte Reload + adcq 600(%rsp), %r14 + movq 184(%rsp), %rax # 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 624(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq $0, %rbx + movq %rbx, 176(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, 168(%rsp) # 8-byte Spill + movq 120(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq 136(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 96(%rsp) # 8-byte Folded Spill + adcq $0, %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + movq %rcx, %rbp + movq %rbp, %rdx + movq 128(%rsp), %r12 # 8-byte Reload + imulq %r12, %rdx + leaq 488(%rsp), %rdi + movq %r13, %rsi + callq .LmulPv512x64 + addq 488(%rsp), %rbp + movq 104(%rsp), %rax # 8-byte Reload + adcq 496(%rsp), %rax + movq 152(%rsp), %rbp # 8-byte Reload + adcq 504(%rsp), %rbp + movq 88(%rsp), %rcx # 8-byte Reload + adcq 512(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 520(%rsp), %r14 + movq 184(%rsp), %rcx # 8-byte Reload + adcq 528(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 536(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %r13 # 8-byte Reload + adcq 544(%rsp), %r13 + movq 176(%rsp), %rcx # 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + adcq $0, 168(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 120(%rsp) # 8-byte Spill + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 96(%rsp) # 8-byte Folded Spill + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq %r12, %rdx + leaq 416(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 416(%rsp), %r15 + adcq 424(%rsp), %rbp + movq %rbp, %rax + movq 88(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + movq %r14, %r12 + adcq 440(%rsp), %r12 + movq 184(%rsp), %r14 # 8-byte Reload + adcq 448(%rsp), %r14 + movq 192(%rsp), %rbp # 8-byte Reload + adcq 456(%rsp), %rbp + adcq 464(%rsp), %r13 + movq 176(%rsp), %rcx # 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 168(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + adcq $0, 120(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 136(%rsp) # 8-byte Spill + movq 96(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rbx + movq %rbx, %rdx + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 344(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 344(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 352(%rsp), %rax + adcq 360(%rsp), %r12 + movq %r12, 144(%rsp) # 8-byte Spill + adcq 368(%rsp), %r14 + movq %r14, 184(%rsp) # 8-byte Spill + adcq 376(%rsp), %rbp + movq %rbp, 192(%rsp) # 8-byte Spill + adcq 384(%rsp), %r13 + movq %r13, 160(%rsp) # 8-byte Spill + movq 176(%rsp), %r13 # 8-byte Reload + adcq 392(%rsp), %r13 + movq 168(%rsp), %r12 # 8-byte Reload + adcq 400(%rsp), %r12 + movq 120(%rsp), %r14 # 8-byte Reload + adcq 408(%rsp), %r14 + movq 136(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 272(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 272(%rsp), %r15 + movq 144(%rsp), %rcx # 8-byte Reload + adcq 280(%rsp), %rcx + movq 184(%rsp), %rax # 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq 312(%rsp), %r13 + movq %r13, 176(%rsp) # 8-byte Spill + adcq 320(%rsp), %r12 + movq %r12, 168(%rsp) # 8-byte Spill + adcq 328(%rsp), %r14 + movq %r14, %r13 + adcq 336(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rbx + movq %rbx, %r14 + movq 80(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + movq 128(%rsp), %rdx # 8-byte Reload + movq %rcx, %rbx + imulq %rbx, %rdx + leaq 200(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 200(%rsp), %rbx + movq 184(%rsp), %rax # 8-byte Reload + adcq 208(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %r8 # 8-byte Reload + adcq 216(%rsp), %r8 + movq %r8, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rdx # 8-byte Reload + adcq 224(%rsp), %rdx + movq 176(%rsp), %rsi # 8-byte Reload + adcq 232(%rsp), %rsi + movq 168(%rsp), %rdi # 8-byte Reload + adcq 240(%rsp), %rdi + movq %r13, %rbp + adcq 248(%rsp), %rbp + movq %r12, %rbx + adcq 256(%rsp), %rbx + movq %rbx, 136(%rsp) # 8-byte Spill + movq %r14, %r9 + adcq 264(%rsp), %r9 + adcq $0, %r15 + movq %r15, %r10 + subq 16(%rsp), %rax # 8-byte Folded Reload + movq %r8, %rcx + sbbq 8(%rsp), %rcx # 8-byte Folded Reload + movq %rdx, %r13 + sbbq 24(%rsp), %r13 # 8-byte Folded Reload + movq %rsi, %r12 + sbbq 32(%rsp), %r12 # 8-byte Folded Reload + movq %rdi, %r14 + sbbq 40(%rsp), %r14 # 8-byte Folded Reload + movq %rbp, %r11 + sbbq 48(%rsp), %r11 # 8-byte Folded Reload + movq %rbx, %r8 + sbbq 56(%rsp), %r8 # 8-byte Folded Reload + movq %r9, %r15 + sbbq 64(%rsp), %r9 # 8-byte Folded Reload + sbbq $0, %r10 + andl $1, %r10d + cmovneq %r15, %r9 + testb %r10b, %r10b + cmovneq 184(%rsp), %rax # 8-byte Folded Reload + movq 72(%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovneq 192(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rbx) + cmovneq %rdx, %r13 + movq %r13, 16(%rbx) + cmovneq %rsi, %r12 + movq %r12, 24(%rbx) + cmovneq %rdi, %r14 + movq %r14, 32(%rbx) + cmovneq %rbp, %r11 + movq %r11, 40(%rbx) + cmovneq 136(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $776, %rsp # imm = 0x308 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end116: + .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2 + + .globl mcl_fp_addPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre8Lbmi2,@function +mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 48(%rsi), %r12 + movq 40(%rdx), %r10 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rax + movq 32(%rsi), %rsi + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %rax + movq %rax, 24(%rdi) + adcq %r11, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %r13 + movq %r13, 40(%rdi) + adcq %r9, %r12 + movq %r12, 48(%rdi) + adcq %r8, %r15 + movq %r15, 56(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end117: + .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2 + + .globl mcl_fp_subPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre8Lbmi2,@function +mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 40(%rdx), %r10 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 48(%rsi), %r13 + movq 40(%rsi), %rdx + movq 32(%rsi), %rbp + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rbp + movq %rbp, 32(%rdi) + sbbq %r10, %rdx + movq %rdx, 40(%rdi) + sbbq %r9, %r13 + movq %r13, 48(%rdi) + sbbq %r8, %r15 + movq %r15, 56(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end118: + .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2 + + .globl mcl_fp_shr1_8Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_8Lbmi2,@function +mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2 +# BB#0: + movq 56(%rsi), %r8 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 32(%rsi), %r11 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 24(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 32(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 40(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 48(%rdi) + shrq %r8 + movq %r8, 56(%rdi) + retq +.Lfunc_end119: + .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2 + + .globl mcl_fp_add8Lbmi2 + .align 16, 0x90 + .type mcl_fp_add8Lbmi2,@function +mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r15 + movq 56(%rsi), %r8 + movq 48(%rdx), %r12 + movq 48(%rsi), %r9 + movq 40(%rsi), %r13 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %rbx + addq (%rsi), %r14 + adcq 8(%rsi), %rbx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r11 + movq 40(%rdx), %rsi + adcq 32(%rdx), %r10 + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + adcq %r13, %rsi + movq %rsi, 40(%rdi) + adcq %r12, %r9 + movq %r9, 48(%rdi) + adcq %r15, %r8 + movq %r8, 56(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %r14 + sbbq 8(%rcx), %rbx + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r11 + sbbq 32(%rcx), %r10 + sbbq 40(%rcx), %rsi + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne .LBB120_2 +# BB#1: # %nocarry + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + movq %rsi, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) +.LBB120_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end120: + .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2 + + .globl mcl_fp_addNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF8Lbmi2,@function +mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 48(%rdx), %rbp + movq 40(%rdx), %rbx + movq 32(%rdx), %rax + movq 24(%rdx), %r11 + movq 16(%rdx), %r15 + movq (%rdx), %r13 + movq 8(%rdx), %r12 + addq (%rsi), %r13 + adcq 8(%rsi), %r12 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq %rax, %r10 + adcq 40(%rsi), %rbx + movq %rbx, -16(%rsp) # 8-byte Spill + movq %rbx, %r9 + adcq 48(%rsi), %rbp + movq %rbp, -8(%rsp) # 8-byte Spill + movq %rbp, %rax + adcq 56(%rsi), %r8 + movq %r13, %rsi + subq (%rcx), %rsi + movq %r12, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %rbx + sbbq 16(%rcx), %rbx + movq %r11, %r14 + sbbq 24(%rcx), %r14 + movq %r10, %rbp + sbbq 32(%rcx), %rbp + movq %r9, %r10 + sbbq 40(%rcx), %r10 + movq %rax, %r9 + sbbq 48(%rcx), %r9 + movq %r8, %rax + sbbq 56(%rcx), %rax + testq %rax, %rax + cmovsq %r13, %rsi + movq %rsi, (%rdi) + cmovsq %r12, %rdx + movq %rdx, 8(%rdi) + cmovsq %r15, %rbx + movq %rbx, 16(%rdi) + cmovsq %r11, %r14 + movq %r14, 24(%rdi) + cmovsq -24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 32(%rdi) + cmovsq -16(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rdi) + cmovsq -8(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rdi) + cmovsq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end121: + .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2 + + .globl mcl_fp_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub8Lbmi2,@function +mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r12 + movq 56(%rsi), %r8 + movq 48(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r10 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r10 + movq 16(%rsi), %r11 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %r15 + sbbq 24(%rdx), %r15 + movq 32(%rsi), %r14 + sbbq 32(%rdx), %r14 + movq 48(%rsi), %r9 + movq 40(%rsi), %rsi + sbbq 40(%rdx), %rsi + movq %rax, (%rdi) + movq %r10, 8(%rdi) + movq %r11, 16(%rdi) + movq %r15, 24(%rdi) + movq %r14, 32(%rdi) + movq %rsi, 40(%rdi) + sbbq %r13, %r9 + movq %r9, 48(%rdi) + sbbq %r12, %r8 + movq %r8, 56(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB122_2 +# BB#1: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r10, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r11, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r15, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r14, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %rsi, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r9, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %r8, %rax + movq %rax, 56(%rdi) +.LBB122_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end122: + .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2 + + .globl mcl_fp_subNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF8Lbmi2,@function +mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r9 + movq 56(%rsi), %r14 + movq 48(%rsi), %rax + movq 40(%rsi), %rcx + movq 32(%rsi), %rdi + movq 24(%rsi), %r11 + movq 16(%rsi), %r15 + movq (%rsi), %r13 + movq 8(%rsi), %r12 + subq (%rdx), %r13 + sbbq 8(%rdx), %r12 + sbbq 16(%rdx), %r15 + sbbq 24(%rdx), %r11 + sbbq 32(%rdx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + sbbq 40(%rdx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + sbbq 48(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 56(%rdx), %r14 + movq %r14, %rsi + sarq $63, %rsi + movq 56(%r8), %r10 + andq %rsi, %r10 + movq 48(%r8), %rbx + andq %rsi, %rbx + movq 40(%r8), %rdi + andq %rsi, %rdi + movq 32(%r8), %rbp + andq %rsi, %rbp + movq 24(%r8), %rdx + andq %rsi, %rdx + movq 16(%r8), %rcx + andq %rsi, %rcx + movq 8(%r8), %rax + andq %rsi, %rax + andq (%r8), %rsi + addq %r13, %rsi + adcq %r12, %rax + movq %rsi, (%r9) + adcq %r15, %rcx + movq %rax, 8(%r9) + movq %rcx, 16(%r9) + adcq %r11, %rdx + movq %rdx, 24(%r9) + adcq -24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 32(%r9) + adcq -16(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 40(%r9) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 48(%r9) + adcq %r14, %r10 + movq %r10, 56(%r9) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end123: + .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2 + + .globl mcl_fpDbl_add8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add8Lbmi2,@function +mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 96(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r11 + movq 88(%rdx), %rbp + movq 80(%rdx), %r13 + movq %rbx, (%rdi) + movq 72(%rdx), %r10 + movq %rax, 8(%rdi) + movq 64(%rdx), %r9 + movq %r12, 16(%rdi) + movq 40(%rdx), %r12 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %r12, %rbx + movq 56(%rdx), %r15 + movq 48(%rdx), %r12 + movq %r11, 32(%rdi) + movq 48(%rsi), %rdx + adcq %r12, %rdx + movq 120(%rsi), %r12 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rax + adcq %r15, %rax + movq 112(%rsi), %rcx + movq %rdx, 48(%rdi) + movq 64(%rsi), %rbx + adcq %r9, %rbx + movq 104(%rsi), %rdx + movq %rax, 56(%rdi) + movq 72(%rsi), %r9 + adcq %r10, %r9 + movq 80(%rsi), %r11 + adcq %r13, %r11 + movq 96(%rsi), %rax + movq 88(%rsi), %r15 + adcq %rbp, %r15 + adcq %r14, %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdx, %rax + adcq -32(%rsp), %rax # 8-byte Folded Reload + movq %rax, -32(%rsp) # 8-byte Spill + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -24(%rsp) # 8-byte Spill + adcq -8(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -8(%rsp) # 8-byte Spill + sbbq %rbp, %rbp + andl $1, %ebp + movq %rbx, %rsi + subq (%r8), %rsi + movq %r9, %rdx + sbbq 8(%r8), %rdx + movq %r11, %r10 + sbbq 16(%r8), %r10 + movq %r15, %r14 + sbbq 24(%r8), %r14 + movq -16(%rsp), %r13 # 8-byte Reload + sbbq 32(%r8), %r13 + movq %rax, %r12 + sbbq 40(%r8), %r12 + movq %rcx, %rax + sbbq 48(%r8), %rax + movq -8(%rsp), %rcx # 8-byte Reload + sbbq 56(%r8), %rcx + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rbx, %rsi + movq %rsi, 64(%rdi) + testb %bpl, %bpl + cmovneq %r9, %rdx + movq %rdx, 72(%rdi) + cmovneq %r11, %r10 + movq %r10, 80(%rdi) + cmovneq %r15, %r14 + movq %r14, 88(%rdi) + cmovneq -16(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 96(%rdi) + cmovneq -32(%rsp), %r12 # 8-byte Folded Reload + movq %r12, 104(%rdi) + cmovneq -24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 112(%rdi) + cmovneq -8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end124: + .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2 + + .globl mcl_fpDbl_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub8Lbmi2,@function +mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %r9 + movq (%rsi), %r12 + movq 8(%rsi), %r14 + xorl %r8d, %r8d + subq (%rdx), %r12 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r9 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r13 + sbbq 32(%rdx), %r13 + movq 96(%rdx), %rbp + movq 88(%rdx), %r11 + movq %r12, (%rdi) + movq 80(%rdx), %r12 + movq %r14, 8(%rdi) + movq 72(%rdx), %r10 + movq %r9, 16(%rdi) + movq 40(%rdx), %r9 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r9, %rbx + movq 48(%rdx), %r9 + movq %r13, 32(%rdi) + movq 48(%rsi), %r14 + sbbq %r9, %r14 + movq 64(%rdx), %r13 + movq 56(%rdx), %r9 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r9, %rdx + movq 120(%rsi), %rcx + movq %r14, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r13, %rbx + movq 112(%rsi), %rax + movq %rdx, 56(%rdi) + movq 72(%rsi), %r9 + sbbq %r10, %r9 + movq 80(%rsi), %r13 + sbbq %r12, %r13 + movq 88(%rsi), %r12 + sbbq %r11, %r12 + movq 104(%rsi), %rdx + movq 96(%rsi), %r14 + sbbq %rbp, %r14 + sbbq -24(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -24(%rsp) # 8-byte Spill + sbbq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, -16(%rsp) # 8-byte Spill + sbbq -8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -8(%rsp) # 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r15), %r11 + cmoveq %r8, %r11 + testb %bpl, %bpl + movq 16(%r15), %rbp + cmoveq %r8, %rbp + movq 8(%r15), %rsi + cmoveq %r8, %rsi + movq 56(%r15), %r10 + cmoveq %r8, %r10 + movq 48(%r15), %rdx + cmoveq %r8, %rdx + movq 40(%r15), %rcx + cmoveq %r8, %rcx + movq 32(%r15), %rax + cmoveq %r8, %rax + cmovneq 24(%r15), %r8 + addq %rbx, %r11 + adcq %r9, %rsi + movq %r11, 64(%rdi) + adcq %r13, %rbp + movq %rsi, 72(%rdi) + movq %rbp, 80(%rdi) + adcq %r12, %r8 + movq %r8, 88(%rdi) + adcq %r14, %rax + movq %rax, 96(%rdi) + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 104(%rdi) + adcq -16(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 112(%rdi) + adcq -8(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end125: + .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2 + + .align 16, 0x90 + .type .LmulPv576x64,@function +.LmulPv576x64: # @mulPv576x64 +# BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + mulxq 8(%rsi), %rcx, %r8 + addq %rax, %rcx + movq %rcx, 8(%rdi) + mulxq 16(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 16(%rdi) + mulxq 24(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 24(%rdi) + mulxq 32(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 32(%rdi) + mulxq 40(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 40(%rdi) + mulxq 48(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 48(%rdi) + mulxq 56(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 56(%rdi) + mulxq 64(%rsi), %rax, %rcx + adcq %r8, %rax + movq %rax, 64(%rdi) + adcq $0, %rcx + movq %rcx, 72(%rdi) + movq %rdi, %rax + retq +.Lfunc_end126: + .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 + + .globl mcl_fp_mulUnitPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre9Lbmi2,@function +mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2 +# BB#0: + pushq %r14 + pushq %rbx + subq $88, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq .LmulPv576x64 + movq 80(%rsp), %r8 + movq 72(%rsp), %r9 + movq 64(%rsp), %r10 + movq 56(%rsp), %r11 + movq 48(%rsp), %r14 + movq 40(%rsp), %rax + movq 32(%rsp), %rcx + movq 24(%rsp), %rdx + movq 8(%rsp), %rsi + movq 16(%rsp), %rdi + movq %rsi, (%rbx) + movq %rdi, 8(%rbx) + movq %rdx, 16(%rbx) + movq %rcx, 24(%rbx) + movq %rax, 32(%rbx) + movq %r14, 40(%rbx) + movq %r11, 48(%rbx) + movq %r10, 56(%rbx) + movq %r9, 64(%rbx) + movq %r8, 72(%rbx) + addq $88, %rsp + popq %rbx + popq %r14 + retq +.Lfunc_end127: + .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2 + + .globl mcl_fpDbl_mulPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre9Lbmi2,@function +mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp # imm = 0x328 + movq %rdx, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq %rsi, 72(%rsp) # 8-byte Spill + movq %rdi, %r12 + movq %r12, 80(%rsp) # 8-byte Spill + movq (%rax), %rdx + movq %rax, %rbx + leaq 728(%rsp), %rdi + movq %rsi, %rbp + callq .LmulPv576x64 + movq 800(%rsp), %r13 + movq 792(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r14 + movq %rax, (%r12) + movq 8(%rbx), %rdx + leaq 648(%rsp), %rdi + movq %rbp, %rsi + callq .LmulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r14 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r15 + movq %r14, 8(%r12) + adcq 8(%rsp), %rbx # 8-byte Folded Reload + adcq 16(%rsp), %r15 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, %r14 + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq %r13, %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %r13 # 8-byte Reload + movq 16(%r13), %rdx + leaq 568(%rsp), %rdi + movq 72(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %r9 + movq 624(%rsp), %r10 + movq 616(%rsp), %rdi + movq 608(%rsp), %rbp + movq 600(%rsp), %rcx + addq 568(%rsp), %rbx + movq 592(%rsp), %rdx + movq 576(%rsp), %r12 + movq 584(%rsp), %rsi + movq 80(%rsp), %rax # 8-byte Reload + movq %rbx, 16(%rax) + adcq %r15, %r12 + adcq %r14, %rsi + movq %rsi, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 24(%r13), %rdx + leaq 488(%rsp), %rdi + movq 72(%rsp), %r15 # 8-byte Reload + movq %r15, %rsi + callq .LmulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r12 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq 80(%rsp), %r14 # 8-byte Reload + movq %r12, 24(%r14) + adcq (%rsp), %rbx # 8-byte Folded Reload + adcq 8(%rsp), %r13 # 8-byte Folded Reload + adcq 16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + movq 32(%r12), %rdx + leaq 408(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %r9 + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r15 + movq 424(%rsp), %rcx + movq %rbx, 32(%r14) + adcq %r13, %r15 + adcq 8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq %r12, %r14 + movq 40(%r14), %rdx + leaq 328(%rsp), %rdi + movq 72(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %r9 + movq 384(%rsp), %rsi + movq 376(%rsp), %rdi + movq 368(%rsp), %rbx + movq 360(%rsp), %rbp + addq 328(%rsp), %r15 + movq 352(%rsp), %rcx + movq 336(%rsp), %r12 + movq 344(%rsp), %rdx + movq 80(%rsp), %rax # 8-byte Reload + movq %r15, 40(%rax) + adcq (%rsp), %r12 # 8-byte Folded Reload + adcq 8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 48(%r14), %rdx + leaq 248(%rsp), %rdi + movq %r13, %rsi + movq %r13, %r15 + callq .LmulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %r9 + movq 304(%rsp), %rsi + movq 296(%rsp), %rdi + movq 288(%rsp), %rbx + movq 280(%rsp), %rbp + addq 248(%rsp), %r12 + movq 272(%rsp), %rcx + movq 256(%rsp), %r13 + movq 264(%rsp), %rdx + movq 80(%rsp), %rax # 8-byte Reload + movq %r12, 48(%rax) + adcq (%rsp), %r13 # 8-byte Folded Reload + adcq 8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 56(%r14), %rdx + leaq 168(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 240(%rsp), %rcx + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + addq 168(%rsp), %r13 + movq 200(%rsp), %r12 + movq 192(%rsp), %rbp + movq 176(%rsp), %r14 + movq 184(%rsp), %r15 + movq 80(%rsp), %rax # 8-byte Reload + movq %r13, 56(%rax) + adcq (%rsp), %r14 # 8-byte Folded Reload + adcq 8(%rsp), %r15 # 8-byte Folded Reload + adcq 16(%rsp), %rbp # 8-byte Folded Reload + adcq 24(%rsp), %r12 # 8-byte Folded Reload + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %r13 + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 88(%rsp), %rdi + movq 72(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 88(%rsp), %r14 + adcq 96(%rsp), %r15 + movq 160(%rsp), %r8 + adcq 104(%rsp), %rbp + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 80(%rsp), %rcx # 8-byte Reload + movq %r14, 64(%rcx) + movq %r15, 72(%rcx) + adcq %r12, %rax + movq %rbp, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r13, %rbx + movq %rbx, 96(%rcx) + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp # imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end128: + .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2 + + .globl mcl_fpDbl_sqrPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre9Lbmi2,@function +mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp # imm = 0x328 + movq %rsi, %r15 + movq %r15, 80(%rsp) # 8-byte Spill + movq %rdi, %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq (%r15), %rdx + leaq 728(%rsp), %rdi + callq .LmulPv576x64 + movq 800(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 792(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r12 + movq %rax, (%r14) + movq 8(%r15), %rdx + leaq 648(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r12 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r13 + movq %r12, 8(%r14) + adcq 8(%rsp), %rbx # 8-byte Folded Reload + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 16(%r15), %rdx + leaq 568(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %rcx + movq 624(%rsp), %rdx + movq 616(%rsp), %rsi + movq 608(%rsp), %rdi + movq 600(%rsp), %rbp + addq 568(%rsp), %rbx + movq 592(%rsp), %rax + movq 576(%rsp), %r14 + movq 584(%rsp), %r12 + movq 72(%rsp), %r15 # 8-byte Reload + movq %rbx, 16(%r15) + adcq %r13, %r14 + adcq 16(%rsp), %r12 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 24(%rsi), %rdx + leaq 488(%rsp), %rdi + callq .LmulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r14 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq %r14, 24(%r15) + adcq %r12, %rbx + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 32(%rsi), %rdx + leaq 408(%rsp), %rdi + callq .LmulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %rcx + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r14 + movq 424(%rsp), %r12 + movq %rbx, 32(%r15) + adcq %r13, %r14 + adcq 16(%rsp), %r12 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 40(%rsi), %rdx + leaq 328(%rsp), %rdi + callq .LmulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %rcx + movq 384(%rsp), %rdx + movq 376(%rsp), %rsi + movq 368(%rsp), %rdi + movq 360(%rsp), %rbp + addq 328(%rsp), %r14 + movq 352(%rsp), %rax + movq 336(%rsp), %rbx + movq 344(%rsp), %r13 + movq %r14, 40(%r15) + adcq %r12, %rbx + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 48(%rsi), %rdx + leaq 248(%rsp), %rdi + callq .LmulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %rcx + movq 304(%rsp), %rdx + movq 296(%rsp), %rsi + movq 288(%rsp), %rdi + movq 280(%rsp), %rbp + addq 248(%rsp), %rbx + movq 272(%rsp), %rax + movq 256(%rsp), %r12 + movq 264(%rsp), %r14 + movq %rbx, 48(%r15) + adcq %r13, %r12 + adcq 16(%rsp), %r14 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 56(%rsi), %rdx + leaq 168(%rsp), %rdi + callq .LmulPv576x64 + movq 240(%rsp), %r8 + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + movq 200(%rsp), %rcx + addq 168(%rsp), %r12 + movq 192(%rsp), %r15 + movq 176(%rsp), %r13 + movq 184(%rsp), %rbp + movq 72(%rsp), %rax # 8-byte Reload + movq %r12, 56(%rax) + adcq %r14, %r13 + adcq 16(%rsp), %rbp # 8-byte Folded Reload + adcq 24(%rsp), %r15 # 8-byte Folded Reload + adcq 32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %r12 + adcq 40(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %r14 + adcq 48(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 64(%rsi), %rdx + leaq 88(%rsp), %rdi + callq .LmulPv576x64 + addq 88(%rsp), %r13 + adcq 96(%rsp), %rbp + movq 160(%rsp), %r8 + adcq 104(%rsp), %r15 + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 72(%rsp), %rcx # 8-byte Reload + movq %r13, 64(%rcx) + movq %rbp, 72(%rcx) + adcq %r12, %rax + movq %r15, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r14, %rbx + movq %rbx, 96(%rcx) + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 64(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp # imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2 + + .globl mcl_fp_mont9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont9Lbmi2,@function +mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp # imm = 0x618 + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rdx, 32(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 16(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq .LmulPv576x64 + movq 1480(%rsp), %r14 + movq 1488(%rsp), %r15 + movq %r14, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 1544(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 1536(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 1528(%rsp), %r12 + movq 1520(%rsp), %r13 + movq 1512(%rsp), %rbx + movq 1504(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1400(%rsp), %r14 + adcq 1408(%rsp), %r15 + adcq 1416(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rax # 8-byte Reload + adcq 1424(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + adcq 1432(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + adcq 1440(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + adcq 1448(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %rbx # 8-byte Reload + adcq 1456(%rsp), %rbx + movq 104(%rsp), %r14 # 8-byte Reload + adcq 1464(%rsp), %r14 + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1472(%rsp), %r13 + sbbq %rbp, %rbp + movq 32(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebp + addq 1320(%rsp), %r15 + movq 96(%rsp), %rax # 8-byte Reload + adcq 1328(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rax # 8-byte Reload + adcq 1336(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r12 # 8-byte Reload + adcq 1344(%rsp), %r12 + movq 64(%rsp), %rax # 8-byte Reload + adcq 1352(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1360(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + adcq 1368(%rsp), %rbx + adcq 1376(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 1384(%rsp), %r13 + movq %r13, 112(%rsp) # 8-byte Spill + adcq 1392(%rsp), %rbp + sbbq %r14, %r14 + movq %r15, %rdx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq %r14, %rax + andl $1, %eax + addq 1240(%rsp), %r15 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 1248(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r14 # 8-byte Reload + adcq 1256(%rsp), %r14 + adcq 1264(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + adcq 1272(%rsp), %r12 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 1280(%rsp), %r13 + adcq 1288(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r15 # 8-byte Reload + adcq 1296(%rsp), %r15 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 1304(%rsp), %rbx + adcq 1312(%rsp), %rbp + adcq $0, %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 96(%rsp), %rax # 8-byte Reload + addq 1160(%rsp), %rax + adcq 1168(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r14 # 8-byte Reload + adcq 1176(%rsp), %r14 + adcq 1184(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + movq %r13, %r12 + adcq 1192(%rsp), %r12 + movq 88(%rsp), %rcx # 8-byte Reload + adcq 1200(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, %r13 + adcq 1216(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq 1224(%rsp), %rbp + movq 72(%rsp), %rcx # 8-byte Reload + adcq 1232(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq %r15, %rax + andl $1, %eax + addq 1080(%rsp), %rbx + movq 80(%rsp), %rcx # 8-byte Reload + adcq 1088(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq %r14, %r15 + adcq 1096(%rsp), %r15 + movq 64(%rsp), %r14 # 8-byte Reload + adcq 1104(%rsp), %r14 + movq %r12, %rbx + adcq 1112(%rsp), %rbx + movq 88(%rsp), %rcx # 8-byte Reload + adcq 1120(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 1128(%rsp), %r13 + movq %r13, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1136(%rsp), %r13 + adcq 1144(%rsp), %rbp + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1152(%rsp), %r12 + adcq $0, %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 80(%rsp), %rax # 8-byte Reload + addq 1000(%rsp), %rax + adcq 1008(%rsp), %r15 + movq %r15, 40(%rsp) # 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, %r15 + adcq 1024(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 1032(%rsp), %r14 + movq 104(%rsp), %rcx # 8-byte Reload + adcq 1040(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + adcq 1048(%rsp), %r13 + movq %r13, 112(%rsp) # 8-byte Spill + adcq 1056(%rsp), %rbp + adcq 1064(%rsp), %r12 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 920(%rsp), %r13 + movq 40(%rsp), %rcx # 8-byte Reload + adcq 928(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 936(%rsp), %r15 + movq %r15, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 944(%rsp), %r15 + movq %r14, %r13 + adcq 952(%rsp), %r13 + movq 104(%rsp), %r14 # 8-byte Reload + adcq 960(%rsp), %r14 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 968(%rsp), %rbx + adcq 976(%rsp), %rbp + adcq 984(%rsp), %r12 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 992(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 40(%rsp), %rax # 8-byte Reload + addq 840(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 848(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 856(%rsp), %r15 + adcq 864(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 872(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 880(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq 888(%rsp), %rbp + adcq 896(%rsp), %r12 + movq 96(%rsp), %r13 # 8-byte Reload + adcq 904(%rsp), %r13 + movq 80(%rsp), %rcx # 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r14 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 760(%rsp), %r14 + movq 64(%rsp), %rcx # 8-byte Reload + adcq 768(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 776(%rsp), %r15 + movq 88(%rsp), %r14 # 8-byte Reload + adcq 784(%rsp), %r14 + movq 104(%rsp), %rcx # 8-byte Reload + adcq 792(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rcx # 8-byte Reload + adcq 800(%rsp), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + adcq 808(%rsp), %rbp + movq %r12, %rbx + adcq 816(%rsp), %rbx + movq %r13, %r12 + adcq 824(%rsp), %r12 + movq 80(%rsp), %r13 # 8-byte Reload + adcq 832(%rsp), %r13 + adcq $0, %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 64(%rsp), %rax # 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + adcq 696(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rcx # 8-byte Reload + adcq 704(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %r15 # 8-byte Reload + adcq 712(%rsp), %r15 + adcq 720(%rsp), %rbp + adcq 728(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + adcq 744(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r13 # 8-byte Reload + adcq 752(%rsp), %r13 + sbbq %r14, %r14 + movq %rax, %rdx + movq %rax, %rbx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r14d + addq 600(%rsp), %rbx + movq 48(%rsp), %rax # 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rbx # 8-byte Reload + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + movq %r15, 112(%rsp) # 8-byte Spill + adcq 640(%rsp), %rbp + movq 72(%rsp), %r12 # 8-byte Reload + adcq 648(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 656(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r15 # 8-byte Reload + adcq 664(%rsp), %r15 + adcq 672(%rsp), %r13 + adcq $0, %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 48(%rsp), %rax # 8-byte Reload + addq 520(%rsp), %rax + movq 88(%rsp), %r14 # 8-byte Reload + adcq 528(%rsp), %r14 + adcq 536(%rsp), %rbx + movq %rbx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rcx # 8-byte Reload + adcq 544(%rsp), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + adcq 552(%rsp), %rbp + adcq 560(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %r12 # 8-byte Reload + adcq 568(%rsp), %r12 + adcq 576(%rsp), %r15 + movq %r15, 80(%rsp) # 8-byte Spill + adcq 584(%rsp), %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r15 # 8-byte Reload + adcq 592(%rsp), %r15 + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 440(%rsp), %r13 + adcq 448(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq 456(%rsp), %r14 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 464(%rsp), %rbx + adcq 472(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + adcq 488(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 496(%rsp), %rbp + movq 40(%rsp), %r12 # 8-byte Reload + adcq 504(%rsp), %r12 + adcq 512(%rsp), %r15 + movq %r15, %r13 + adcq $0, %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 88(%rsp), %rax # 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r14 + adcq 376(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + movq 8(%rsp), %rcx # 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %rbx # 8-byte Reload + adcq 392(%rsp), %rbx + movq 96(%rsp), %r15 # 8-byte Reload + adcq 400(%rsp), %r15 + adcq 408(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 416(%rsp), %r12 + movq %r12, %rbp + adcq 424(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %r12 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r13d + addq 280(%rsp), %r12 + adcq 288(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rax # 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 8(%rsp), %r14 # 8-byte Reload + adcq 304(%rsp), %r14 + adcq 312(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 320(%rsp), %r15 + movq %r15, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rbx # 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + adcq 344(%rsp), %r12 + movq 48(%rsp), %rbp # 8-byte Reload + adcq 352(%rsp), %rbp + adcq $0, %r13 + movq 32(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 104(%rsp), %rax # 8-byte Reload + addq 200(%rsp), %rax + movq 112(%rsp), %r15 # 8-byte Reload + adcq 208(%rsp), %r15 + adcq 216(%rsp), %r14 + movq %r14, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %r14 # 8-byte Reload + adcq 224(%rsp), %r14 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 232(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %rcx # 8-byte Reload + adcq 248(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 256(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 264(%rsp), %rbp + movq %rbp, 48(%rsp) # 8-byte Spill + adcq 272(%rsp), %r13 + sbbq %rbx, %rbx + movq 16(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %r12 + leaq 120(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + addq 120(%rsp), %r12 + adcq 128(%rsp), %r15 + movq 8(%rsp), %rbp # 8-byte Reload + adcq 136(%rsp), %rbp + movq %r14, %rcx + adcq 144(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %r8 # 8-byte Reload + adcq 152(%rsp), %r8 + movq %r8, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r9 # 8-byte Reload + adcq 160(%rsp), %r9 + movq %r9, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r10 # 8-byte Reload + adcq 168(%rsp), %r10 + movq %r10, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %rdi # 8-byte Reload + adcq 176(%rsp), %rdi + movq %rdi, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r13 + adcq $0, %rbx + movq %r15, %rsi + movq %r15, %r12 + movq 56(%rsp), %rdx # 8-byte Reload + subq (%rdx), %rsi + movq %rbp, %rax + movq %rbp, %r15 + sbbq 8(%rdx), %rax + movq %rcx, %rbp + sbbq 16(%rdx), %rbp + movq %r8, %rcx + sbbq 24(%rdx), %rcx + movq %r9, %r8 + sbbq 32(%rdx), %r8 + movq %r10, %r11 + sbbq 40(%rdx), %r11 + movq %rdi, %r10 + sbbq 48(%rdx), %r10 + movq %r14, %rdi + sbbq 56(%rdx), %rdi + movq %r13, %r9 + sbbq 64(%rdx), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r13, %r9 + testb %bl, %bl + cmovneq %r12, %rsi + movq (%rsp), %rbx # 8-byte Reload + movq %rsi, (%rbx) + cmovneq %r15, %rax + movq %rax, 8(%rbx) + cmovneq 72(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rbx) + cmovneq 96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 24(%rbx) + cmovneq 80(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 32(%rbx) + cmovneq 40(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 40(%rbx) + cmovneq 64(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 48(%rbx) + cmovneq %r14, %rdi + movq %rdi, 56(%rbx) + movq %r9, 64(%rbx) + addq $1560, %rsp # imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end130: + .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2 + + .globl mcl_fp_montNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF9Lbmi2,@function +mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp # imm = 0x618 + movq %rcx, 64(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq .LmulPv576x64 + movq 1480(%rsp), %r12 + movq 1488(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq %r12, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 1544(%rsp), %r13 + movq 1536(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1528(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 1520(%rsp), %r14 + movq 1512(%rsp), %r15 + movq 1504(%rsp), %rbx + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1400(%rsp), %r12 + movq 88(%rsp), %rax # 8-byte Reload + adcq 1408(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 1416(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + adcq 1424(%rsp), %rbx + movq %rbx, 104(%rsp) # 8-byte Spill + adcq 1432(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + adcq 1440(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %rbx # 8-byte Reload + adcq 1448(%rsp), %rbx + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1456(%rsp), %r12 + adcq 1464(%rsp), %r13 + movq %r13, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbp # 8-byte Reload + adcq 1472(%rsp), %rbp + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1392(%rsp), %rax + movq 88(%rsp), %rcx # 8-byte Reload + addq 1320(%rsp), %rcx + movq 8(%rsp), %r15 # 8-byte Reload + adcq 1328(%rsp), %r15 + movq 104(%rsp), %r14 # 8-byte Reload + adcq 1336(%rsp), %r14 + movq 56(%rsp), %rdx # 8-byte Reload + adcq 1344(%rsp), %rdx + movq %rdx, 56(%rsp) # 8-byte Spill + movq 40(%rsp), %r13 # 8-byte Reload + adcq 1352(%rsp), %r13 + adcq 1360(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 1368(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %rdx # 8-byte Reload + adcq 1376(%rsp), %rdx + movq %rdx, 96(%rsp) # 8-byte Spill + adcq 1384(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, %rbp + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1240(%rsp), %rbx + adcq 1248(%rsp), %r15 + movq %r15, 8(%rsp) # 8-byte Spill + adcq 1256(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + movq 56(%rsp), %r12 # 8-byte Reload + adcq 1264(%rsp), %r12 + adcq 1272(%rsp), %r13 + movq %r13, %r14 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 1280(%rsp), %r13 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 1288(%rsp), %rbx + movq 96(%rsp), %r15 # 8-byte Reload + adcq 1296(%rsp), %r15 + movq 112(%rsp), %rax # 8-byte Reload + adcq 1304(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + adcq 1312(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1232(%rsp), %rax + movq 8(%rsp), %rcx # 8-byte Reload + addq 1160(%rsp), %rcx + movq 104(%rsp), %rbp # 8-byte Reload + adcq 1168(%rsp), %rbp + adcq 1176(%rsp), %r12 + movq %r12, 56(%rsp) # 8-byte Spill + adcq 1184(%rsp), %r14 + adcq 1192(%rsp), %r13 + movq %r13, %r12 + adcq 1200(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbx # 8-byte Reload + adcq 1216(%rsp), %rbx + movq 80(%rsp), %rdx # 8-byte Reload + adcq 1224(%rsp), %rdx + movq %rdx, 80(%rsp) # 8-byte Spill + movq %rax, %r15 + adcq $0, %r15 + movq %rcx, %rdx + movq %rcx, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1080(%rsp), %r13 + adcq 1088(%rsp), %rbp + movq %rbp, 104(%rsp) # 8-byte Spill + movq 56(%rsp), %r13 # 8-byte Reload + adcq 1096(%rsp), %r13 + adcq 1104(%rsp), %r14 + adcq 1112(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1120(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 1128(%rsp), %rbp + adcq 1136(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %rbx # 8-byte Reload + adcq 1144(%rsp), %rbx + adcq 1152(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1072(%rsp), %rax + movq 104(%rsp), %rcx # 8-byte Reload + addq 1000(%rsp), %rcx + adcq 1008(%rsp), %r13 + movq %r13, 56(%rsp) # 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 1024(%rsp), %r14 + adcq 1032(%rsp), %r12 + adcq 1040(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1048(%rsp), %r13 + adcq 1056(%rsp), %rbx + movq %rbx, 80(%rsp) # 8-byte Spill + adcq 1064(%rsp), %r15 + movq %r15, 88(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 920(%rsp), %rbx + movq 56(%rsp), %rax # 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rsp), %rbp # 8-byte Reload + adcq 936(%rsp), %rbp + movq %r14, %rbx + adcq 944(%rsp), %rbx + adcq 952(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 968(%rsp), %r13 + movq %r13, %r15 + movq 80(%rsp), %r13 # 8-byte Reload + adcq 976(%rsp), %r13 + movq 88(%rsp), %r14 # 8-byte Reload + adcq 984(%rsp), %r14 + movq 104(%rsp), %rax # 8-byte Reload + adcq 992(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 912(%rsp), %rax + movq 56(%rsp), %rcx # 8-byte Reload + addq 840(%rsp), %rcx + adcq 848(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + adcq 856(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 864(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 872(%rsp), %rbp + adcq 880(%rsp), %r15 + movq %r15, 112(%rsp) # 8-byte Spill + adcq 888(%rsp), %r13 + adcq 896(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rdx # 8-byte Reload + adcq 904(%rsp), %rdx + movq %rdx, 104(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, %r14 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 760(%rsp), %rbx + movq 40(%rsp), %rax # 8-byte Reload + adcq 768(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 776(%rsp), %r15 + adcq 784(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq %rbp, %rbx + adcq 792(%rsp), %rbx + movq 112(%rsp), %rbp # 8-byte Reload + adcq 800(%rsp), %rbp + adcq 808(%rsp), %r13 + movq 88(%rsp), %rax # 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r12 # 8-byte Reload + adcq 824(%rsp), %r12 + adcq 832(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 752(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %rdx # 8-byte Reload + adcq 696(%rsp), %rdx + movq %rdx, 72(%rsp) # 8-byte Spill + adcq 704(%rsp), %rbx + movq %rbx, 96(%rsp) # 8-byte Spill + adcq 712(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + adcq 720(%rsp), %r13 + movq %r13, %r15 + movq 88(%rsp), %rbx # 8-byte Reload + adcq 728(%rsp), %rbx + adcq 736(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + adcq 744(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 600(%rsp), %r13 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 608(%rsp), %r13 + movq 72(%rsp), %r12 # 8-byte Reload + adcq 616(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 624(%rsp), %rbp + movq 112(%rsp), %rax # 8-byte Reload + adcq 632(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + adcq 640(%rsp), %r15 + movq %r15, 80(%rsp) # 8-byte Spill + adcq 648(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq 656(%rsp), %r14 + movq 40(%rsp), %rbx # 8-byte Reload + adcq 664(%rsp), %rbx + movq 56(%rsp), %r15 # 8-byte Reload + adcq 672(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 592(%rsp), %rcx + movq %r13, %rax + addq 520(%rsp), %rax + adcq 528(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq %rbp, %r12 + adcq 536(%rsp), %r12 + movq 112(%rsp), %rbp # 8-byte Reload + adcq 544(%rsp), %rbp + movq 80(%rsp), %rdx # 8-byte Reload + adcq 552(%rsp), %rdx + movq %rdx, 80(%rsp) # 8-byte Spill + movq 88(%rsp), %rdx # 8-byte Reload + adcq 560(%rsp), %rdx + movq %rdx, 88(%rsp) # 8-byte Spill + adcq 568(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 576(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + adcq 584(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, %r13 + movq %rax, %rdx + movq %rax, %r14 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 440(%rsp), %r14 + movq 72(%rsp), %rax # 8-byte Reload + adcq 448(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + adcq 456(%rsp), %r12 + adcq 464(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %r14 # 8-byte Reload + adcq 472(%rsp), %r14 + movq 88(%rsp), %r15 # 8-byte Reload + adcq 480(%rsp), %r15 + movq 104(%rsp), %rbp # 8-byte Reload + adcq 488(%rsp), %rbp + movq 40(%rsp), %rbx # 8-byte Reload + adcq 496(%rsp), %rbx + movq 56(%rsp), %rax # 8-byte Reload + adcq 504(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + adcq 512(%rsp), %r13 + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 432(%rsp), %rcx + movq 72(%rsp), %rax # 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rdx # 8-byte Reload + adcq 376(%rsp), %rdx + movq %rdx, 112(%rsp) # 8-byte Spill + adcq 384(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + adcq 392(%rsp), %r15 + movq %r15, 88(%rsp) # 8-byte Spill + adcq 400(%rsp), %rbp + movq %rbp, 104(%rsp) # 8-byte Spill + adcq 408(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 56(%rsp), %r14 # 8-byte Reload + adcq 416(%rsp), %r14 + adcq 424(%rsp), %r13 + movq %r13, %r15 + adcq $0, %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 280(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbp # 8-byte Reload + adcq 296(%rsp), %rbp + movq 80(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 88(%rsp), %r13 # 8-byte Reload + adcq 312(%rsp), %r13 + movq 104(%rsp), %r12 # 8-byte Reload + adcq 320(%rsp), %r12 + movq 40(%rsp), %rbx # 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %r14 + movq %r14, 56(%rsp) # 8-byte Spill + adcq 344(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %r14 # 8-byte Reload + adcq 352(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 272(%rsp), %rcx + movq 96(%rsp), %rax # 8-byte Reload + addq 200(%rsp), %rax + adcq 208(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 216(%rsp), %rbp + adcq 224(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 232(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 56(%rsp), %r15 # 8-byte Reload + adcq 248(%rsp), %r15 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 256(%rsp), %r12 + adcq 264(%rsp), %r14 + adcq $0, %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 120(%rsp), %rdi + movq 64(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv576x64 + addq 120(%rsp), %rbx + movq 112(%rsp), %rcx # 8-byte Reload + adcq 128(%rsp), %rcx + movq %rbp, %rdx + adcq 136(%rsp), %rdx + movq 88(%rsp), %rsi # 8-byte Reload + adcq 144(%rsp), %rsi + movq %rsi, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rdi # 8-byte Reload + adcq 152(%rsp), %rdi + movq %rdi, 104(%rsp) # 8-byte Spill + movq 40(%rsp), %rbx # 8-byte Reload + adcq 160(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq %r15, %r8 + adcq 168(%rsp), %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq %r12, %r15 + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + movq 96(%rsp), %r9 # 8-byte Reload + adcq 192(%rsp), %r9 + movq %rcx, %rax + movq %rcx, %r11 + movq %r13, %rbp + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r12 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %rbx, %rdi + sbbq 32(%rbp), %rdi + movq %r8, %r10 + sbbq 40(%rbp), %r10 + movq %r15, %r13 + sbbq 48(%rbp), %r13 + movq %r14, %r8 + sbbq 56(%rbp), %r8 + movq %rbp, %rbx + movq %r9, %rbp + sbbq 64(%rbx), %rbp + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovsq %r12, %rcx + movq %rcx, 8(%rbx) + cmovsq 88(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 16(%rbx) + cmovsq 104(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovsq 56(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovsq %r15, %r13 + movq %r13, 48(%rbx) + cmovsq %r14, %r8 + movq %r8, 56(%rbx) + cmovsq %r9, %rbp + movq %rbp, 64(%rbx) + addq $1560, %rsp # imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end131: + .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2 + + .globl mcl_fp_montRed9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed9Lbmi2,@function +mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $936, %rsp # imm = 0x3A8 + movq %rdx, %rax + movq %rax, 128(%rsp) # 8-byte Spill + movq %rdi, 80(%rsp) # 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 120(%rsp) # 8-byte Spill + movq (%rsi), %r14 + movq 8(%rsi), %rdx + movq %rdx, 192(%rsp) # 8-byte Spill + movq %r14, %rdx + imulq %rcx, %rdx + movq 136(%rsi), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + movq 128(%rsi), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + movq 120(%rsi), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + movq 72(%rsi), %r12 + movq 64(%rsi), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 56(%rsi), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 48(%rsi), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 40(%rsi), %rbp + movq 32(%rsi), %rbx + movq 24(%rsi), %r13 + movq 16(%rsi), %r15 + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 64(%rcx), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq %rcx, %rsi + leaq 856(%rsp), %rdi + callq .LmulPv576x64 + addq 856(%rsp), %r14 + movq 192(%rsp), %rcx # 8-byte Reload + adcq 864(%rsp), %rcx + adcq 872(%rsp), %r15 + adcq 880(%rsp), %r13 + adcq 888(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 896(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rax # 8-byte Reload + adcq 904(%rsp), %rax + movq %rax, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rax # 8-byte Reload + adcq 912(%rsp), %rax + movq %rax, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rax # 8-byte Reload + adcq 920(%rsp), %rax + movq %rax, 176(%rsp) # 8-byte Spill + adcq 928(%rsp), %r12 + movq %r12, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + adcq $0, 200(%rsp) # 8-byte Folded Spill + adcq $0, 208(%rsp) # 8-byte Folded Spill + adcq $0, 184(%rsp) # 8-byte Folded Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + movq 112(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + sbbq %r12, %r12 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 776(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r12d + addq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r13 + movq %r13, (%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 800(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 96(%rsp), %rax # 8-byte Reload + adcq 808(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rax # 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rax # 8-byte Reload + adcq 824(%rsp), %rax + movq %rax, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rax # 8-byte Reload + adcq 832(%rsp), %rax + movq %rax, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 840(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + adcq 848(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + adcq $0, 208(%rsp) # 8-byte Folded Spill + adcq $0, 184(%rsp) # 8-byte Folded Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + movq 152(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, %r14 + movq %r14, 112(%rsp) # 8-byte Spill + adcq $0, %r12 + movq %r15, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 696(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 696(%rsp), %r15 + movq (%rsp), %rcx # 8-byte Reload + adcq 704(%rsp), %rcx + movq 88(%rsp), %rax # 8-byte Reload + adcq 712(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 96(%rsp), %rax # 8-byte Reload + adcq 720(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rbp # 8-byte Reload + adcq 728(%rsp), %rbp + movq 168(%rsp), %r14 # 8-byte Reload + adcq 736(%rsp), %r14 + movq 176(%rsp), %r15 # 8-byte Reload + adcq 744(%rsp), %r15 + movq 192(%rsp), %rax # 8-byte Reload + adcq 752(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 200(%rsp) # 8-byte Spill + adcq $0, 208(%rsp) # 8-byte Folded Spill + movq 184(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 152(%rsp) # 8-byte Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rcx, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 616(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 616(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 624(%rsp), %rax + movq 96(%rsp), %rcx # 8-byte Reload + adcq 632(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 640(%rsp), %rbp + movq %rbp, 136(%rsp) # 8-byte Spill + adcq 648(%rsp), %r14 + movq %r14, 168(%rsp) # 8-byte Spill + adcq 656(%rsp), %r15 + movq 192(%rsp), %r14 # 8-byte Reload + adcq 664(%rsp), %r14 + movq 160(%rsp), %rbp # 8-byte Reload + adcq 672(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + adcq $0, %r13 + movq %r13, 184(%rsp) # 8-byte Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 536(%rsp), %rbx + movq 96(%rsp), %rax # 8-byte Reload + adcq 544(%rsp), %rax + movq 136(%rsp), %rcx # 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rcx # 8-byte Reload + adcq 560(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + adcq 568(%rsp), %r15 + movq %r15, 176(%rsp) # 8-byte Spill + adcq 576(%rsp), %r14 + movq %r14, 192(%rsp) # 8-byte Spill + adcq 584(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r13 # 8-byte Reload + adcq 592(%rsp), %r13 + movq 208(%rsp), %r15 # 8-byte Reload + adcq 600(%rsp), %r15 + movq 184(%rsp), %rbp # 8-byte Reload + adcq 608(%rsp), %rbp + movq 144(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r14 + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 456(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 456(%rsp), %r14 + movq 136(%rsp), %rax # 8-byte Reload + adcq 464(%rsp), %rax + movq 168(%rsp), %rcx # 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 488(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rcx # 8-byte Reload + adcq 496(%rsp), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + adcq 504(%rsp), %r13 + movq %r13, 200(%rsp) # 8-byte Spill + adcq 512(%rsp), %r15 + movq %r15, 208(%rsp) # 8-byte Spill + adcq 520(%rsp), %rbp + movq %rbp, 184(%rsp) # 8-byte Spill + adcq 528(%rsp), %rbx + movq %rbx, 144(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + movq 152(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + movq 112(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r15 + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 376(%rsp), %r15 + movq 168(%rsp), %rax # 8-byte Reload + adcq 384(%rsp), %rax + movq 176(%rsp), %rcx # 8-byte Reload + adcq 392(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 400(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rbp # 8-byte Reload + adcq 408(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 416(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 424(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 144(%rsp), %r15 # 8-byte Reload + adcq 440(%rsp), %r15 + adcq 448(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq $0, %r13 + movq %r13, %r14 + adcq $0, %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 296(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 296(%rsp), %rbx + movq 176(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq 192(%rsp), %r13 # 8-byte Reload + adcq 312(%rsp), %r13 + adcq 320(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 328(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 336(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 344(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + adcq 352(%rsp), %r15 + movq %r15, 144(%rsp) # 8-byte Spill + movq 104(%rsp), %r15 # 8-byte Reload + adcq 360(%rsp), %r15 + adcq 368(%rsp), %r14 + movq %r14, 152(%rsp) # 8-byte Spill + movq 112(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + adcq $0, %r12 + movq 120(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 216(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 216(%rsp), %rbx + movq %r13, %rsi + adcq 224(%rsp), %rsi + movq %rsi, 192(%rsp) # 8-byte Spill + adcq 232(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r9 # 8-byte Reload + adcq 240(%rsp), %r9 + movq %r9, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %r8 # 8-byte Reload + adcq 248(%rsp), %r8 + movq %r8, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rbx # 8-byte Reload + adcq 256(%rsp), %rbx + movq 144(%rsp), %rax # 8-byte Reload + adcq 264(%rsp), %rax + movq %r15, %rcx + adcq 272(%rsp), %rcx + movq 152(%rsp), %rdx # 8-byte Reload + adcq 280(%rsp), %rdx + movq %rdx, 152(%rsp) # 8-byte Spill + adcq 288(%rsp), %r14 + movq %r14, %r11 + adcq $0, %r12 + subq 16(%rsp), %rsi # 8-byte Folded Reload + movq %rbp, %rdi + sbbq 8(%rsp), %rdi # 8-byte Folded Reload + movq %r9, %rbp + sbbq 24(%rsp), %rbp # 8-byte Folded Reload + movq %r8, %r13 + sbbq 32(%rsp), %r13 # 8-byte Folded Reload + movq %rbx, %r15 + sbbq 40(%rsp), %r15 # 8-byte Folded Reload + movq %rax, %r14 + sbbq 48(%rsp), %r14 # 8-byte Folded Reload + movq %rcx, %r10 + sbbq 56(%rsp), %r10 # 8-byte Folded Reload + movq %rdx, %r8 + sbbq 64(%rsp), %r8 # 8-byte Folded Reload + movq %r11, %r9 + sbbq 72(%rsp), %r9 # 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %r11, %r9 + testb %r12b, %r12b + cmovneq 192(%rsp), %rsi # 8-byte Folded Reload + movq 80(%rsp), %rdx # 8-byte Reload + movq %rsi, (%rdx) + cmovneq 160(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 8(%rdx) + cmovneq 200(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rdx) + cmovneq 208(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 24(%rdx) + cmovneq %rbx, %r15 + movq %r15, 32(%rdx) + cmovneq %rax, %r14 + movq %r14, 40(%rdx) + cmovneq %rcx, %r10 + movq %r10, 48(%rdx) + cmovneq 152(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 56(%rdx) + movq %r9, 64(%rdx) + addq $936, %rsp # imm = 0x3A8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end132: + .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2 + + .globl mcl_fp_addPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre9Lbmi2,@function +mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r8 + movq 64(%rsi), %r15 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 24(%rsi), %r12 + movq 32(%rsi), %r14 + movq (%rdx), %rbx + movq 8(%rdx), %rcx + addq (%rsi), %rbx + adcq 8(%rsi), %rcx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r12 + movq 56(%rdx), %r13 + movq 48(%rdx), %rsi + movq 40(%rdx), %rbp + movq 32(%rdx), %rdx + movq %rbx, (%rdi) + movq %rcx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r12, 24(%rdi) + adcq %r14, %rdx + movq %rdx, 32(%rdi) + adcq %r11, %rbp + movq %rbp, 40(%rdi) + adcq %r10, %rsi + movq %rsi, 48(%rdi) + adcq %r9, %r13 + movq %r13, 56(%rdi) + adcq %r8, %r15 + movq %r15, 64(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end133: + .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2 + + .globl mcl_fp_subPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre9Lbmi2,@function +mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2 +# BB#0: + movq 32(%rdx), %r8 + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + movq 8(%rsi), %rcx + sbbq 8(%rdx), %rcx + movq %rcx, 8(%rdi) + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq %rcx, 16(%rdi) + movq 24(%rsi), %rcx + sbbq 24(%rdx), %rcx + movq %rcx, 24(%rdi) + movq 32(%rsi), %rcx + sbbq %r8, %rcx + movq 40(%rdx), %r8 + movq %rcx, 32(%rdi) + movq 40(%rsi), %rcx + sbbq %r8, %rcx + movq 48(%rdx), %r8 + movq %rcx, 40(%rdi) + movq 48(%rsi), %rcx + sbbq %r8, %rcx + movq 56(%rdx), %r8 + movq %rcx, 48(%rdi) + movq 56(%rsi), %rcx + sbbq %r8, %rcx + movq %rcx, 56(%rdi) + movq 64(%rdx), %rcx + movq 64(%rsi), %rdx + sbbq %rcx, %rdx + movq %rdx, 64(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end134: + .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2 + + .globl mcl_fp_shr1_9Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_9Lbmi2,@function +mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2 +# BB#0: + pushq %rbx + movq 64(%rsi), %r8 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 32(%rsi), %rcx + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rbx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rbx + movq %rbx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 32(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 40(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 48(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 56(%rdi) + shrq %r8 + movq %r8, 64(%rdi) + popq %rbx + retq +.Lfunc_end135: + .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2 + + .globl mcl_fp_add9Lbmi2 + .align 16, 0x90 + .type mcl_fp_add9Lbmi2,@function +mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r12 + movq 64(%rsi), %r8 + movq 56(%rsi), %r13 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 24(%rsi), %r14 + movq 32(%rsi), %r11 + movq (%rdx), %rbx + movq 8(%rdx), %r15 + addq (%rsi), %rbx + adcq 8(%rsi), %r15 + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r14 + adcq 32(%rdx), %r11 + adcq 40(%rdx), %r10 + movq 56(%rdx), %rsi + adcq 48(%rdx), %r9 + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + adcq %r13, %rsi + movq %rsi, 56(%rdi) + adcq %r12, %r8 + movq %r8, 64(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rbx + sbbq 8(%rcx), %r15 + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r14 + sbbq 32(%rcx), %r11 + sbbq 40(%rcx), %r10 + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %rsi + sbbq 64(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne .LBB136_2 +# BB#1: # %nocarry + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %rsi, 56(%rdi) + movq %r8, 64(%rdi) +.LBB136_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end136: + .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2 + + .globl mcl_fp_addNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF9Lbmi2,@function +mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r8 + movq 64(%rdx), %r10 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rax + movq 32(%rdx), %rdi + movq 24(%rdx), %rbp + movq 16(%rdx), %r15 + movq (%rdx), %rbx + movq 8(%rdx), %r13 + addq (%rsi), %rbx + adcq 8(%rsi), %r13 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %rbp + movq %rbp, -40(%rsp) # 8-byte Spill + adcq 32(%rsi), %rdi + movq %rdi, -16(%rsp) # 8-byte Spill + adcq 40(%rsi), %rax + movq %rax, -8(%rsp) # 8-byte Spill + adcq 48(%rsi), %r9 + movq %r9, -32(%rsp) # 8-byte Spill + movq %r9, %rdi + adcq 56(%rsi), %r11 + movq %r11, -24(%rsp) # 8-byte Spill + movq %r11, %rax + adcq 64(%rsi), %r10 + movq %r10, %r9 + movq %rbx, %rsi + subq (%rcx), %rsi + movq %r13, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %r12 + sbbq 16(%rcx), %r12 + sbbq 24(%rcx), %rbp + movq -16(%rsp), %r14 # 8-byte Reload + sbbq 32(%rcx), %r14 + movq -8(%rsp), %r11 # 8-byte Reload + sbbq 40(%rcx), %r11 + movq %rdi, %r10 + sbbq 48(%rcx), %r10 + movq %rax, %rdi + sbbq 56(%rcx), %rdi + movq %r9, %rax + sbbq 64(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %rbx, %rsi + movq %rsi, (%r8) + cmovsq %r13, %rdx + movq %rdx, 8(%r8) + cmovsq %r15, %r12 + movq %r12, 16(%r8) + cmovsq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%r8) + cmovsq -16(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 32(%r8) + cmovsq -8(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 40(%r8) + cmovsq -32(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 48(%r8) + cmovsq -24(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 56(%r8) + cmovsq %r9, %rax + movq %rax, 64(%r8) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end137: + .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2 + + .globl mcl_fp_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub9Lbmi2,@function +mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2 +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + movq 16(%rsi), %r10 + sbbq 16(%rdx), %r10 + movq 24(%rsi), %r11 + sbbq 24(%rdx), %r11 + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 40(%rsi), %r14 + sbbq 40(%rdx), %r14 + movq 48(%rsi), %r15 + sbbq 48(%rdx), %r15 + movq 64(%rsi), %r8 + movq 56(%rsi), %rsi + sbbq 56(%rdx), %rsi + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r10, 16(%rdi) + movq %r11, 24(%rdi) + movq %r12, 32(%rdi) + movq %r14, 40(%rdi) + movq %r15, 48(%rdi) + movq %rsi, 56(%rdi) + sbbq %r13, %r8 + movq %r8, 64(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB138_2 +# BB#1: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r9, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r10, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r11, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r12, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %r14, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r15, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %rsi, %rax + movq %rax, 56(%rdi) + movq 64(%rcx), %rax + adcq %r8, %rax + movq %rax, 64(%rdi) +.LBB138_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end138: + .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2 + + .globl mcl_fp_subNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF9Lbmi2,@function +mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r10 + movq 64(%rsi), %r14 + movq 56(%rsi), %rax + movq 48(%rsi), %rcx + movq 40(%rsi), %rdi + movq 32(%rsi), %rbp + movq 24(%rsi), %rbx + movq 16(%rsi), %r15 + movq (%rsi), %r13 + movq 8(%rsi), %r12 + subq (%rdx), %r13 + sbbq 8(%rdx), %r12 + sbbq 16(%rdx), %r15 + sbbq 24(%rdx), %rbx + movq %rbx, -40(%rsp) # 8-byte Spill + sbbq 32(%rdx), %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + sbbq 40(%rdx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + sbbq 48(%rdx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + sbbq 56(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 64(%rdx), %r14 + movq %r14, %rdx + sarq $63, %rdx + movq %rdx, %rbp + shldq $1, %r14, %rbp + movq 24(%r8), %rbx + andq %rbp, %rbx + movq 8(%r8), %rdi + andq %rbp, %rdi + andq (%r8), %rbp + movq 64(%r8), %r11 + andq %rdx, %r11 + rorxq $63, %rdx, %rax + andq 56(%r8), %rdx + movq 48(%r8), %r9 + andq %rax, %r9 + movq 40(%r8), %rsi + andq %rax, %rsi + movq 32(%r8), %rcx + andq %rax, %rcx + andq 16(%r8), %rax + addq %r13, %rbp + adcq %r12, %rdi + movq %rbp, (%r10) + adcq %r15, %rax + movq %rdi, 8(%r10) + adcq -40(%rsp), %rbx # 8-byte Folded Reload + movq %rax, 16(%r10) + movq %rbx, 24(%r10) + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 32(%r10) + adcq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%r10) + adcq -16(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%r10) + adcq -8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 56(%r10) + adcq %r14, %r11 + movq %r11, 64(%r10) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end139: + .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2 + + .globl mcl_fpDbl_add9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add9Lbmi2,@function +mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 136(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq 120(%rdx), %r10 + movq 112(%rdx), %r11 + movq 24(%rsi), %rcx + movq 32(%rsi), %r14 + movq 16(%rdx), %rbp + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %rbp + adcq 24(%rdx), %rcx + adcq 32(%rdx), %r14 + movq 104(%rdx), %r9 + movq 96(%rdx), %r13 + movq %rax, (%rdi) + movq 88(%rdx), %r8 + movq %rbx, 8(%rdi) + movq 80(%rdx), %r12 + movq %rbp, 16(%rdi) + movq 40(%rdx), %rax + movq %rcx, 24(%rdi) + movq 40(%rsi), %rbp + adcq %rax, %rbp + movq 48(%rdx), %rcx + movq %r14, 32(%rdi) + movq 48(%rsi), %rax + adcq %rcx, %rax + movq 56(%rdx), %r14 + movq %rbp, 40(%rdi) + movq 56(%rsi), %rbp + adcq %r14, %rbp + movq 72(%rdx), %rcx + movq 64(%rdx), %rdx + movq %rax, 48(%rdi) + movq 64(%rsi), %rax + adcq %rdx, %rax + movq 136(%rsi), %rbx + movq %rbp, 56(%rdi) + movq 72(%rsi), %rbp + adcq %rcx, %rbp + movq 128(%rsi), %rcx + movq %rax, 64(%rdi) + movq 80(%rsi), %rdx + adcq %r12, %rdx + movq 88(%rsi), %r12 + adcq %r8, %r12 + movq 96(%rsi), %r14 + adcq %r13, %r14 + movq %r14, -48(%rsp) # 8-byte Spill + movq 104(%rsi), %rax + adcq %r9, %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 120(%rsi), %rax + movq 112(%rsi), %rsi + adcq %r11, %rsi + movq %rsi, -24(%rsp) # 8-byte Spill + adcq %r10, %rax + movq %rax, -16(%rsp) # 8-byte Spill + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -40(%rsp) # 8-byte Spill + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, -8(%rsp) # 8-byte Spill + sbbq %r9, %r9 + andl $1, %r9d + movq %rbp, %r10 + subq (%r15), %r10 + movq %rdx, %r11 + sbbq 8(%r15), %r11 + movq %r12, %rbx + sbbq 16(%r15), %rbx + sbbq 24(%r15), %r14 + movq -32(%rsp), %r13 # 8-byte Reload + sbbq 32(%r15), %r13 + movq -24(%rsp), %rsi # 8-byte Reload + sbbq 40(%r15), %rsi + movq -16(%rsp), %rax # 8-byte Reload + sbbq 48(%r15), %rax + sbbq 56(%r15), %rcx + movq -8(%rsp), %r8 # 8-byte Reload + sbbq 64(%r15), %r8 + sbbq $0, %r9 + andl $1, %r9d + cmovneq %rbp, %r10 + movq %r10, 72(%rdi) + testb %r9b, %r9b + cmovneq %rdx, %r11 + movq %r11, 80(%rdi) + cmovneq %r12, %rbx + movq %rbx, 88(%rdi) + cmovneq -48(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 96(%rdi) + cmovneq -32(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 104(%rdi) + cmovneq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rdi) + cmovneq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 120(%rdi) + cmovneq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 128(%rdi) + cmovneq -8(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end140: + .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2 + + .globl mcl_fpDbl_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub9Lbmi2,@function +mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r14 + movq 136(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 120(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %r12 + movq 8(%rsi), %r13 + xorl %r9d, %r9d + subq (%rdx), %r12 + sbbq 8(%rdx), %r13 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %rbp + sbbq 32(%rdx), %rbp + movq 112(%rdx), %r10 + movq 104(%rdx), %rcx + movq %r12, (%rdi) + movq 96(%rdx), %rax + movq %r13, 8(%rdi) + movq 88(%rdx), %r13 + movq %r11, 16(%rdi) + movq 40(%rdx), %r11 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r11, %rbx + movq 48(%rdx), %r11 + movq %rbp, 32(%rdi) + movq 48(%rsi), %rbp + sbbq %r11, %rbp + movq 56(%rdx), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rbx + sbbq %r11, %rbx + movq 64(%rdx), %r11 + movq %rbp, 48(%rdi) + movq 64(%rsi), %rbp + sbbq %r11, %rbp + movq 80(%rdx), %r8 + movq 72(%rdx), %r11 + movq %rbx, 56(%rdi) + movq 72(%rsi), %r15 + sbbq %r11, %r15 + movq 136(%rsi), %rdx + movq %rbp, 64(%rdi) + movq 80(%rsi), %rbp + sbbq %r8, %rbp + movq 88(%rsi), %r12 + sbbq %r13, %r12 + movq 96(%rsi), %r13 + sbbq %rax, %r13 + movq 104(%rsi), %rax + sbbq %rcx, %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq 112(%rsi), %rax + sbbq %r10, %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 128(%rsi), %rax + movq 120(%rsi), %rcx + sbbq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -24(%rsp) # 8-byte Spill + sbbq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, -16(%rsp) # 8-byte Spill + sbbq -8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -8(%rsp) # 8-byte Spill + movl $0, %r8d + sbbq $0, %r8 + andl $1, %r8d + movq (%r14), %r10 + cmoveq %r9, %r10 + testb %r8b, %r8b + movq 16(%r14), %r8 + cmoveq %r9, %r8 + movq 8(%r14), %rdx + cmoveq %r9, %rdx + movq 64(%r14), %rbx + cmoveq %r9, %rbx + movq 56(%r14), %r11 + cmoveq %r9, %r11 + movq 48(%r14), %rsi + cmoveq %r9, %rsi + movq 40(%r14), %rcx + cmoveq %r9, %rcx + movq 32(%r14), %rax + cmoveq %r9, %rax + cmovneq 24(%r14), %r9 + addq %r15, %r10 + adcq %rbp, %rdx + movq %r10, 72(%rdi) + adcq %r12, %r8 + movq %rdx, 80(%rdi) + adcq %r13, %r9 + movq %r8, 88(%rdi) + movq %r9, 96(%rdi) + adcq -40(%rsp), %rax # 8-byte Folded Reload + movq %rax, 104(%rdi) + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 112(%rdi) + adcq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 120(%rdi) + adcq -16(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 128(%rdi) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end141: + .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2 + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.s new file mode 100644 index 000000000..aa677d2ea --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64.s @@ -0,0 +1,16652 @@ + .text + .file "" + .globl makeNIST_P192L + .align 16, 0x90 + .type makeNIST_P192L,@function +makeNIST_P192L: # @makeNIST_P192L +# BB#0: + movq $-1, %rax + movq $-2, %rdx + movq $-1, %rcx + retq +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P192L + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P192L,@function +mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L +# BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq 24(%rsi), %r8 + movq 40(%rsi), %r9 + movq 8(%rsi), %rax + addq %r9, %rax + adcq $0, %r10 + sbbq %rcx, %rcx + andl $1, %ecx + movq 32(%rsi), %r11 + movq (%rsi), %r14 + addq %r8, %r14 + adcq %r11, %rax + adcq %r9, %r10 + adcq $0, %rcx + addq %r9, %r14 + adcq %r8, %rax + adcq %r11, %r10 + adcq $0, %rcx + addq %rcx, %r14 + adcq %rax, %rcx + adcq $0, %r10 + sbbq %rax, %rax + andl $1, %eax + movq %r14, %rsi + addq $1, %rsi + movq %rcx, %rdx + adcq $1, %rdx + movq %r10, %rbx + adcq $0, %rbx + adcq $-1, %rax + andl $1, %eax + cmovneq %r14, %rsi + movq %rsi, (%rdi) + testb %al, %al + cmovneq %rcx, %rdx + movq %rdx, 8(%rdi) + cmovneq %r10, %rbx + movq %rbx, 16(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + + .globl mcl_fp_sqr_NIST_P192L + .align 16, 0x90 + .type mcl_fp_sqr_NIST_P192L,@function +mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %rbx + movq 8(%rsi), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, %rdi + movq %rax, %r14 + movq %rcx, %rax + mulq %rcx + movq %rdx, %r15 + movq %rax, %r12 + movq %rcx, %rax + mulq %rbx + movq %rax, %r13 + movq %rdx, %rcx + addq %rcx, %r12 + adcq %r14, %r15 + movq %rdi, %r10 + adcq $0, %r10 + movq %r11, %rax + mulq %rbx + movq %rdx, %r9 + movq %rax, %rbp + movq %rbx, %rax + mulq %rbx + movq %rax, %r8 + movq %rdx, %rsi + addq %r13, %rsi + adcq %rbp, %rcx + movq %r9, %rbx + adcq $0, %rbx + addq %r13, %rsi + adcq %r12, %rcx + adcq %r15, %rbx + adcq $0, %r10 + movq %r11, %rax + mulq %r11 + addq %r14, %r9 + adcq %rdi, %rax + adcq $0, %rdx + addq %rbp, %rcx + adcq %rbx, %r9 + adcq %r10, %rax + adcq $0, %rdx + addq %rdx, %rsi + adcq $0, %rcx + sbbq %rbp, %rbp + andl $1, %ebp + addq %r9, %r8 + adcq %rax, %rsi + adcq %rdx, %rcx + adcq $0, %rbp + addq %rdx, %r8 + adcq %r9, %rsi + adcq %rax, %rcx + adcq $0, %rbp + addq %rbp, %r8 + adcq %rsi, %rbp + adcq $0, %rcx + sbbq %rax, %rax + andl $1, %eax + movq %r8, %rdx + addq $1, %rdx + movq %rbp, %rsi + adcq $1, %rsi + movq %rcx, %rdi + adcq $0, %rdi + adcq $-1, %rax + andl $1, %eax + cmovneq %r8, %rdx + movq -8(%rsp), %rbx # 8-byte Reload + movq %rdx, (%rbx) + testb %al, %al + cmovneq %rbp, %rsi + movq %rsi, 8(%rbx) + cmovneq %rcx, %rdi + movq %rdi, 16(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + + .globl mcl_fp_mulNIST_P192L + .align 16, 0x90 + .type mcl_fp_mulNIST_P192L,@function +mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L +# BB#0: + pushq %r14 + pushq %rbx + subq $56, %rsp + movq %rdi, %r14 + leaq 8(%rsp), %rdi + callq mcl_fpDbl_mulPre3L@PLT + movq 24(%rsp), %r9 + movq 32(%rsp), %r8 + movq 48(%rsp), %rdi + movq 16(%rsp), %rbx + addq %rdi, %rbx + adcq $0, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + movq 40(%rsp), %rsi + movq 8(%rsp), %rdx + addq %r8, %rdx + adcq %rsi, %rbx + adcq %rdi, %r9 + adcq $0, %rcx + addq %rdi, %rdx + adcq %r8, %rbx + adcq %rsi, %r9 + adcq $0, %rcx + addq %rcx, %rdx + adcq %rbx, %rcx + adcq $0, %r9 + sbbq %rsi, %rsi + andl $1, %esi + movq %rdx, %rdi + addq $1, %rdi + movq %rcx, %rbx + adcq $1, %rbx + movq %r9, %rax + adcq $0, %rax + adcq $-1, %rsi + andl $1, %esi + cmovneq %rdx, %rdi + movq %rdi, (%r14) + testb %sil, %sil + cmovneq %rcx, %rbx + movq %rbx, 8(%r14) + cmovneq %r9, %rax + movq %rax, 16(%r14) + addq $56, %rsp + popq %rbx + popq %r14 + retq +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P521L + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P521L,@function +mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 120(%rsi), %r9 + movq 128(%rsi), %r14 + movq %r14, %r8 + shldq $55, %r9, %r8 + movq 112(%rsi), %r10 + shldq $55, %r10, %r9 + movq 104(%rsi), %r11 + shldq $55, %r11, %r10 + movq 96(%rsi), %r15 + shldq $55, %r15, %r11 + movq 88(%rsi), %r12 + shldq $55, %r12, %r15 + movq 80(%rsi), %rcx + shldq $55, %rcx, %r12 + movq 64(%rsi), %rbx + movq 72(%rsi), %rax + shldq $55, %rax, %rcx + shrq $9, %r14 + shldq $55, %rbx, %rax + andl $511, %ebx # imm = 0x1FF + addq (%rsi), %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r10 + adcq 48(%rsi), %r9 + adcq 56(%rsi), %r8 + adcq %r14, %rbx + movq %rbx, %rsi + shrq $9, %rsi + andl $1, %esi + addq %rax, %rsi + adcq $0, %rcx + adcq $0, %r12 + adcq $0, %r15 + adcq $0, %r11 + adcq $0, %r10 + adcq $0, %r9 + adcq $0, %r8 + adcq $0, %rbx + movq %rsi, %rax + andq %r12, %rax + andq %r15, %rax + andq %r11, %rax + andq %r10, %rax + andq %r9, %rax + andq %r8, %rax + movq %rbx, %rdx + orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00 + andq %rax, %rdx + andq %rcx, %rdx + cmpq $-1, %rdx + je .LBB4_1 +# BB#3: # %nonzero + movq %rsi, (%rdi) + movq %rcx, 8(%rdi) + movq %r12, 16(%rdi) + movq %r15, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) + andl $511, %ebx # imm = 0x1FF + movq %rbx, 64(%rdi) + jmp .LBB4_2 +.LBB4_1: # %zero + movq $0, 64(%rdi) + movq $0, 56(%rdi) + movq $0, 48(%rdi) + movq $0, 40(%rdi) + movq $0, 32(%rdi) + movq $0, 24(%rdi) + movq $0, 16(%rdi) + movq $0, 8(%rdi) + movq $0, (%rdi) +.LBB4_2: # %zero + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + + .globl mcl_fp_mulUnitPre1L + .align 16, 0x90 + .type mcl_fp_mulUnitPre1L,@function +mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L +# BB#0: + movq %rdx, %rax + mulq (%rsi) + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + + .globl mcl_fpDbl_mulPre1L + .align 16, 0x90 + .type mcl_fpDbl_mulPre1L,@function +mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L +# BB#0: + movq (%rdx), %rax + mulq (%rsi) + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + + .globl mcl_fpDbl_sqrPre1L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre1L,@function +mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L +# BB#0: + movq (%rsi), %rax + mulq %rax + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + + .globl mcl_fp_mont1L + .align 16, 0x90 + .type mcl_fp_mont1L,@function +mcl_fp_mont1L: # @mcl_fp_mont1L +# BB#0: + movq (%rsi), %rax + mulq (%rdx) + movq %rax, %rsi + movq %rdx, %r8 + movq -8(%rcx), %rax + imulq %rsi, %rax + movq (%rcx), %rcx + mulq %rcx + addq %rsi, %rax + adcq %r8, %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq %rcx, %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, (%rdi) + retq +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + + .globl mcl_fp_montNF1L + .align 16, 0x90 + .type mcl_fp_montNF1L,@function +mcl_fp_montNF1L: # @mcl_fp_montNF1L +# BB#0: + movq (%rsi), %rax + mulq (%rdx) + movq %rax, %rsi + movq %rdx, %r8 + movq -8(%rcx), %rax + imulq %rsi, %rax + movq (%rcx), %rcx + mulq %rcx + addq %rsi, %rax + adcq %r8, %rdx + movq %rdx, %rax + subq %rcx, %rax + cmovsq %rdx, %rax + movq %rax, (%rdi) + retq +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + + .globl mcl_fp_montRed1L + .align 16, 0x90 + .type mcl_fp_montRed1L,@function +mcl_fp_montRed1L: # @mcl_fp_montRed1L +# BB#0: + movq (%rsi), %rcx + movq -8(%rdx), %rax + imulq %rcx, %rax + movq (%rdx), %r8 + mulq %r8 + addq %rcx, %rax + adcq 8(%rsi), %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rcx + subq %r8, %rcx + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rcx + movq %rcx, (%rdi) + retq +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + + .globl mcl_fp_addPre1L + .align 16, 0x90 + .type mcl_fp_addPre1L,@function +mcl_fp_addPre1L: # @mcl_fp_addPre1L +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + + .globl mcl_fp_subPre1L + .align 16, 0x90 + .type mcl_fp_subPre1L,@function +mcl_fp_subPre1L: # @mcl_fp_subPre1L +# BB#0: + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + + .globl mcl_fp_shr1_1L + .align 16, 0x90 + .type mcl_fp_shr1_1L,@function +mcl_fp_shr1_1L: # @mcl_fp_shr1_1L +# BB#0: + movq (%rsi), %rax + shrq %rax + movq %rax, (%rdi) + retq +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + + .globl mcl_fp_add1L + .align 16, 0x90 + .type mcl_fp_add1L,@function +mcl_fp_add1L: # @mcl_fp_add1L +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rax + sbbq $0, %rdx + testb $1, %dl + jne .LBB14_2 +# BB#1: # %nocarry + movq %rax, (%rdi) +.LBB14_2: # %carry + retq +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + + .globl mcl_fp_addNF1L + .align 16, 0x90 + .type mcl_fp_addNF1L,@function +mcl_fp_addNF1L: # @mcl_fp_addNF1L +# BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, %rdx + subq (%rcx), %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + + .globl mcl_fp_sub1L + .align 16, 0x90 + .type mcl_fp_sub1L,@function +mcl_fp_sub1L: # @mcl_fp_sub1L +# BB#0: + movq (%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rax + movq %rax, (%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB16_2 +# BB#1: # %nocarry + retq +.LBB16_2: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + retq +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + + .globl mcl_fp_subNF1L + .align 16, 0x90 + .type mcl_fp_subNF1L,@function +mcl_fp_subNF1L: # @mcl_fp_subNF1L +# BB#0: + movq (%rsi), %rax + subq (%rdx), %rax + movq %rax, %rdx + sarq $63, %rdx + andq (%rcx), %rdx + addq %rax, %rdx + movq %rdx, (%rdi) + retq +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + + .globl mcl_fpDbl_add1L + .align 16, 0x90 + .type mcl_fpDbl_add1L,@function +mcl_fpDbl_add1L: # @mcl_fpDbl_add1L +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq (%rcx), %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, 8(%rdi) + retq +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + + .globl mcl_fpDbl_sub1L + .align 16, 0x90 + .type mcl_fpDbl_sub1L,@function +mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movl $0, %eax + sbbq $0, %rax + testb $1, %al + cmovneq (%rcx), %rsi + addq %r8, %rsi + movq %rsi, 8(%rdi) + retq +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + + .globl mcl_fp_mulUnitPre2L + .align 16, 0x90 + .type mcl_fp_mulUnitPre2L,@function +mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L +# BB#0: + movq %rdx, %r8 + movq %r8, %rax + mulq 8(%rsi) + movq %rdx, %rcx + movq %rax, %r9 + movq %r8, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r9, %rdx + movq %rdx, 8(%rdi) + adcq $0, %rcx + movq %rcx, 16(%rdi) + retq +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + + .globl mcl_fpDbl_mulPre2L + .align 16, 0x90 + .type mcl_fpDbl_mulPre2L,@function +mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L +# BB#0: + pushq %r14 + pushq %rbx + movq %rdx, %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%r10), %rcx + movq %r8, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %rsi + addq %r9, %rsi + adcq $0, %r14 + movq 8(%r10), %rbx + movq %r11, %rax + mulq %rbx + movq %rdx, %r9 + movq %rax, %rcx + movq %r8, %rax + mulq %rbx + addq %rsi, %rax + movq %rax, 8(%rdi) + adcq %r14, %rcx + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rcx + movq %rcx, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + + .globl mcl_fpDbl_sqrPre2L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre2L,@function +mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L +# BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %r8 + movq %rcx, %rax + mulq %rcx + movq %rdx, %rsi + movq %rax, (%rdi) + movq %r8, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, %r10 + addq %r10, %rsi + movq %r9, %rcx + adcq $0, %rcx + movq %r8, %rax + mulq %r8 + addq %r10, %rsi + movq %rsi, 8(%rdi) + adcq %rcx, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r9, %rax + movq %rax, 16(%rdi) + adcq %rdx, %rcx + movq %rcx, 24(%rdi) + retq +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + + .globl mcl_fp_mont2L + .align 16, 0x90 + .type mcl_fp_mont2L,@function +mcl_fp_mont2L: # @mcl_fp_mont2L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%rdx), %rsi + movq 8(%rdx), %r9 + movq %r11, %rax + mulq %rsi + movq %rdx, %r15 + movq %rax, %r10 + movq %r8, %rax + mulq %rsi + movq %rax, %r14 + movq %rdx, %r13 + addq %r10, %r13 + adcq $0, %r15 + movq -8(%rcx), %r10 + movq (%rcx), %rbp + movq %r14, %rsi + imulq %r10, %rsi + movq 8(%rcx), %rdi + movq %rsi, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq %rbp + movq %rdx, %rbx + addq %r12, %rbx + adcq $0, %rcx + addq %r14, %rax + adcq %r13, %rbx + adcq %r15, %rcx + sbbq %r15, %r15 + andl $1, %r15d + movq %r9, %rax + mulq %r11 + movq %rdx, %r14 + movq %rax, %r11 + movq %r9, %rax + mulq %r8 + movq %rax, %r8 + movq %rdx, %rsi + addq %r11, %rsi + adcq $0, %r14 + addq %rbx, %r8 + adcq %rcx, %rsi + adcq %r15, %r14 + sbbq %rbx, %rbx + andl $1, %ebx + imulq %r8, %r10 + movq %r10, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %r9 + movq %r10, %rax + mulq %rbp + addq %r9, %rdx + adcq $0, %rcx + addq %r8, %rax + adcq %rsi, %rdx + adcq %r14, %rcx + adcq $0, %rbx + movq %rdx, %rax + subq %rbp, %rax + movq %rcx, %rsi + sbbq %rdi, %rsi + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rcx, %rsi + testb %bl, %bl + cmovneq %rdx, %rax + movq -8(%rsp), %rcx # 8-byte Reload + movq %rax, (%rcx) + movq %rsi, 8(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + + .globl mcl_fp_montNF2L + .align 16, 0x90 + .type mcl_fp_montNF2L,@function +mcl_fp_montNF2L: # @mcl_fp_montNF2L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%rdx), %rbp + movq 8(%rdx), %r9 + movq %r8, %rax + mulq %rbp + movq %rax, %rsi + movq %rdx, %r14 + movq -8(%rcx), %r10 + movq (%rcx), %r15 + movq %rsi, %rbx + imulq %r10, %rbx + movq 8(%rcx), %rdi + movq %rbx, %rax + mulq %rdi + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq %r15 + movq %rdx, %r12 + movq %rax, %rbx + movq %r11, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, %rbp + addq %r14, %rbp + adcq $0, %rcx + addq %rsi, %rbx + adcq %r13, %rbp + adcq $0, %rcx + addq %r12, %rbp + adcq -16(%rsp), %rcx # 8-byte Folded Reload + movq %r9, %rax + mulq %r11 + movq %rdx, %rsi + movq %rax, %r11 + movq %r9, %rax + mulq %r8 + movq %rax, %r8 + movq %rdx, %rbx + addq %r11, %rbx + adcq $0, %rsi + addq %rbp, %r8 + adcq %rcx, %rbx + adcq $0, %rsi + imulq %r8, %r10 + movq %r10, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %rbp + movq %r10, %rax + mulq %r15 + addq %r8, %rax + adcq %rbx, %rbp + adcq $0, %rsi + addq %rdx, %rbp + adcq %rcx, %rsi + movq %rbp, %rax + subq %r15, %rax + movq %rsi, %rcx + sbbq %rdi, %rcx + cmovsq %rbp, %rax + movq -8(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovsq %rsi, %rcx + movq %rcx, 8(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + + .globl mcl_fp_montRed2L + .align 16, 0x90 + .type mcl_fp_montRed2L,@function +mcl_fp_montRed2L: # @mcl_fp_montRed2L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq -8(%rdx), %r9 + movq (%rdx), %r11 + movq (%rsi), %rbx + movq %rbx, %rcx + imulq %r9, %rcx + movq 8(%rdx), %r14 + movq %rcx, %rax + mulq %r14 + movq %rdx, %r8 + movq %rax, %r10 + movq %rcx, %rax + mulq %r11 + movq %rdx, %rcx + addq %r10, %rcx + adcq $0, %r8 + movq 24(%rsi), %r15 + addq %rbx, %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r8 + adcq $0, %r15 + sbbq %rbx, %rbx + andl $1, %ebx + imulq %rcx, %r9 + movq %r9, %rax + mulq %r14 + movq %rdx, %rsi + movq %rax, %r10 + movq %r9, %rax + mulq %r11 + addq %r10, %rdx + adcq $0, %rsi + addq %rcx, %rax + adcq %r8, %rdx + adcq %r15, %rsi + adcq $0, %rbx + movq %rdx, %rax + subq %r11, %rax + movq %rsi, %rcx + sbbq %r14, %rcx + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rsi, %rcx + testb %bl, %bl + cmovneq %rdx, %rax + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + + .globl mcl_fp_addPre2L + .align 16, 0x90 + .type mcl_fp_addPre2L,@function +mcl_fp_addPre2L: # @mcl_fp_addPre2L +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rcx + addq (%rsi), %rax + adcq 8(%rsi), %rcx + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + + .globl mcl_fp_subPre2L + .align 16, 0x90 + .type mcl_fp_subPre2L,@function +mcl_fp_subPre2L: # @mcl_fp_subPre2L +# BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + + .globl mcl_fp_shr1_2L + .align 16, 0x90 + .type mcl_fp_shr1_2L,@function +mcl_fp_shr1_2L: # @mcl_fp_shr1_2L +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + shrdq $1, %rcx, %rax + movq %rax, (%rdi) + shrq %rcx + movq %rcx, 8(%rdi) + retq +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + + .globl mcl_fp_add2L + .align 16, 0x90 + .type mcl_fp_add2L,@function +mcl_fp_add2L: # @mcl_fp_add2L +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq $0, %rsi + testb $1, %sil + jne .LBB29_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) +.LBB29_2: # %carry + retq +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + + .globl mcl_fp_addNF2L + .align 16, 0x90 + .type mcl_fp_addNF2L,@function +mcl_fp_addNF2L: # @mcl_fp_addNF2L +# BB#0: + movq (%rdx), %rax + movq 8(%rdx), %r8 + addq (%rsi), %rax + adcq 8(%rsi), %r8 + movq %rax, %rsi + subq (%rcx), %rsi + movq %r8, %rdx + sbbq 8(%rcx), %rdx + testq %rdx, %rdx + cmovsq %rax, %rsi + movq %rsi, (%rdi) + cmovsq %r8, %rdx + movq %rdx, 8(%rdi) + retq +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + + .globl mcl_fp_sub2L + .align 16, 0x90 + .type mcl_fp_sub2L,@function +mcl_fp_sub2L: # @mcl_fp_sub2L +# BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movq %r8, 8(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB31_2 +# BB#1: # %nocarry + retq +.LBB31_2: # %carry + movq 8(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r8, %rdx + movq %rdx, 8(%rdi) + retq +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + + .globl mcl_fp_subNF2L + .align 16, 0x90 + .type mcl_fp_subNF2L,@function +mcl_fp_subNF2L: # @mcl_fp_subNF2L +# BB#0: + movq (%rsi), %r8 + movq 8(%rsi), %rsi + subq (%rdx), %r8 + sbbq 8(%rdx), %rsi + movq %rsi, %rdx + sarq $63, %rdx + movq 8(%rcx), %rax + andq %rdx, %rax + andq (%rcx), %rdx + addq %r8, %rdx + movq %rdx, (%rdi) + adcq %rsi, %rax + movq %rax, 8(%rdi) + retq +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + + .globl mcl_fpDbl_add2L + .align 16, 0x90 + .type mcl_fpDbl_add2L,@function +mcl_fpDbl_add2L: # @mcl_fpDbl_add2L +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + adcq %r8, %r9 + sbbq %rax, %rax + andl $1, %eax + movq %r10, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + sbbq $0, %rax + andl $1, %eax + cmovneq %r10, %rdx + movq %rdx, 16(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 24(%rdi) + retq +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + + .globl mcl_fpDbl_sub2L + .align 16, 0x90 + .type mcl_fpDbl_sub2L,@function +mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %r11 + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %r11, (%rdi) + movq %rsi, 8(%rdi) + sbbq %r8, %r9 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + cmovneq 8(%rcx), %rax + addq %r10, %rsi + movq %rsi, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + + .globl mcl_fp_mulUnitPre3L + .align 16, 0x90 + .type mcl_fp_mulUnitPre3L,@function +mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L +# BB#0: + movq %rdx, %rcx + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r9, %r10 + movq %r10, 16(%rdi) + adcq $0, %r8 + movq %r8, 24(%rdi) + retq +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + + .globl mcl_fpDbl_mulPre3L + .align 16, 0x90 + .type mcl_fpDbl_mulPre3L,@function +mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%r10), %rbx + movq %r8, %rax + mulq %rbx + movq %rdx, %rcx + movq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rbx + movq %rdx, %r14 + movq %rax, %rsi + movq %r9, %rax + mulq %rbx + movq %rdx, %r15 + movq %rax, %rbx + addq %rcx, %rbx + adcq %rsi, %r15 + adcq $0, %r14 + movq 8(%r10), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %rbp + movq %r9, %rax + mulq %rcx + movq %rdx, %r13 + movq %rax, %rsi + movq %r8, %rax + mulq %rcx + addq %rbx, %rax + movq %rax, 8(%rdi) + adcq %r15, %rsi + adcq %r14, %rbp + sbbq %r14, %r14 + andl $1, %r14d + addq %rdx, %rsi + adcq %r13, %rbp + adcq %r12, %r14 + movq 16(%r10), %r15 + movq %r11, %rax + mulq %r15 + movq %rdx, %r10 + movq %rax, %rbx + movq %r9, %rax + mulq %r15 + movq %rdx, %r9 + movq %rax, %rcx + movq %r8, %rax + mulq %r15 + addq %rsi, %rax + movq %rax, 16(%rdi) + adcq %rbp, %rcx + adcq %r14, %rbx + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rbx + movq %rbx, 32(%rdi) + adcq %r10, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + + .globl mcl_fpDbl_sqrPre3L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre3L,@function +mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %rcx, %rax + mulq %rcx + movq %rdx, %rbx + movq %rax, (%rdi) + movq %r10, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %r11 + movq %rsi, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %r12 + addq %r12, %rbx + movq %r14, %r13 + adcq %r11, %r13 + movq %r8, %rcx + adcq $0, %rcx + movq %r10, %rax + mulq %rsi + movq %rdx, %r9 + movq %rax, %r15 + movq %rsi, %rax + mulq %rsi + movq %rax, %rsi + addq %r12, %rbx + movq %rbx, 8(%rdi) + adcq %r13, %rsi + adcq %r15, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r14, %rsi + adcq %rdx, %rcx + adcq %r9, %rbx + movq %r10, %rax + mulq %r10 + addq %r11, %rsi + movq %rsi, 16(%rdi) + adcq %r15, %rcx + adcq %rbx, %rax + sbbq %rsi, %rsi + andl $1, %esi + addq %r8, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rax + movq %rax, 32(%rdi) + adcq %rdx, %rsi + movq %rsi, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + + .globl mcl_fp_mont3L + .align 16, 0x90 + .type mcl_fp_mont3L,@function +mcl_fp_mont3L: # @mcl_fp_mont3L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r10 + movq %r10, -56(%rsp) # 8-byte Spill + movq %rdi, -48(%rsp) # 8-byte Spill + movq 16(%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq (%r10), %rdi + mulq %rdi + movq %rax, %rbp + movq %rdx, %r8 + movq (%rsi), %rbx + movq %rbx, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, %r15 + movq %rax, %rsi + movq %rbx, %rax + mulq %rdi + movq %rax, %r12 + movq %rdx, %r11 + addq %rsi, %r11 + adcq %rbp, %r15 + adcq $0, %r8 + movq -8(%rcx), %r14 + movq (%rcx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq %r12, %rbp + imulq %r14, %rbp + movq 16(%rcx), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + movq 8(%rcx), %rbx + movq %rbx, -8(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rdx, %rcx + movq %rax, %r13 + movq %rbp, %rax + mulq %rbx + movq %rdx, %rsi + movq %rax, %r9 + movq %rbp, %rax + mulq %rdi + movq %rdx, %rbp + addq %r9, %rbp + adcq %r13, %rsi + adcq $0, %rcx + addq %r12, %rax + adcq %r11, %rbp + movq 8(%r10), %rbx + adcq %r15, %rsi + adcq %r8, %rcx + sbbq %rdi, %rdi + andl $1, %edi + movq %rbx, %rax + movq -64(%rsp), %r10 # 8-byte Reload + mulq %r10 + movq %rdx, %r15 + movq %rax, %r9 + movq %rbx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r11 + movq %rbx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rbx + addq %r11, %rbx + adcq %r9, %r12 + adcq $0, %r15 + addq %rbp, %r8 + adcq %rsi, %rbx + adcq %rcx, %r12 + adcq %rdi, %r15 + sbbq %r11, %r11 + andl $1, %r11d + movq %r8, %rcx + imulq %r14, %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r9 + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %rdi + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + addq %rdi, %rbp + adcq %r9, %rsi + adcq $0, %r13 + addq %r8, %rax + adcq %rbx, %rbp + adcq %r12, %rsi + adcq %r15, %r13 + adcq $0, %r11 + movq -56(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rcx + movq %rcx, %rax + mulq %r10 + movq %rdx, %r8 + movq %rax, %r10 + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rdi + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rcx + addq %rdi, %rcx + adcq %r10, %r15 + adcq $0, %r8 + addq %rbp, %r9 + adcq %rsi, %rcx + adcq %r13, %r15 + adcq %r11, %r8 + sbbq %rdi, %rdi + andl $1, %edi + imulq %r9, %r14 + movq %r14, %rax + movq -16(%rsp), %r12 # 8-byte Reload + mulq %r12 + movq %rdx, %rbx + movq %rax, %r10 + movq %r14, %rax + movq -8(%rsp), %r13 # 8-byte Reload + mulq %r13 + movq %rdx, %rsi + movq %rax, %r11 + movq %r14, %rax + movq -24(%rsp), %rbp # 8-byte Reload + mulq %rbp + addq %r11, %rdx + adcq %r10, %rsi + adcq $0, %rbx + addq %r9, %rax + adcq %rcx, %rdx + adcq %r15, %rsi + adcq %r8, %rbx + adcq $0, %rdi + movq %rdx, %rax + subq %rbp, %rax + movq %rsi, %rcx + sbbq %r13, %rcx + movq %rbx, %rbp + sbbq %r12, %rbp + sbbq $0, %rdi + andl $1, %edi + cmovneq %rbx, %rbp + testb %dil, %dil + cmovneq %rdx, %rax + movq -48(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rsi, %rcx + movq %rcx, 8(%rdx) + movq %rbp, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + + .globl mcl_fp_montNF3L + .align 16, 0x90 + .type mcl_fp_montNF3L,@function +mcl_fp_montNF3L: # @mcl_fp_montNF3L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rdi, -32(%rsp) # 8-byte Spill + movq 16(%rsi), %r10 + movq %r10, -40(%rsp) # 8-byte Spill + movq (%rdx), %rbp + movq %r10, %rax + mulq %rbp + movq %rax, %r14 + movq %rdx, %r15 + movq (%rsi), %rbx + movq %rbx, -64(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulq %rbp + movq %rdx, %rdi + movq %rax, %rsi + movq %rbx, %rax + mulq %rbp + movq %rax, %r13 + movq %rdx, %rbp + addq %rsi, %rbp + adcq %r14, %rdi + adcq $0, %r15 + movq -8(%rcx), %r14 + movq (%rcx), %r11 + movq %r11, -48(%rsp) # 8-byte Spill + movq %r13, %rbx + imulq %r14, %rbx + movq 16(%rcx), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -56(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rdx, %r8 + movq %rax, %r12 + movq %rbx, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, %rcx + movq %rbx, %rax + mulq %r11 + addq %r13, %rax + adcq %rbp, %rcx + adcq %rdi, %r12 + adcq $0, %r15 + addq %rdx, %rcx + movq -8(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rbp + adcq %r9, %r12 + adcq %r8, %r15 + movq %rbp, %rax + mulq %r10 + movq %rdx, %rsi + movq %rax, %r8 + movq %rbp, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rbp, %rax + movq -64(%rsp), %r10 # 8-byte Reload + mulq %r10 + movq %rax, %r13 + movq %rdx, %rbp + addq %r9, %rbp + adcq %r8, %rbx + adcq $0, %rsi + addq %rcx, %r13 + adcq %r12, %rbp + adcq %r15, %rbx + adcq $0, %rsi + movq %r13, %rcx + imulq %r14, %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r15 + movq %rcx, %rax + movq -56(%rsp), %rdi # 8-byte Reload + mulq %rdi + movq %rdx, %r9 + movq %rax, %r12 + movq %rcx, %rax + mulq %r11 + addq %r13, %rax + adcq %rbp, %r12 + adcq %rbx, %r15 + adcq $0, %rsi + addq %rdx, %r12 + adcq %r9, %r15 + adcq %r8, %rsi + movq -8(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rbx + movq %rbx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r8 + movq %rbx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r9 + movq %rbx, %rax + mulq %r10 + movq %rax, %r10 + movq %rdx, %rbx + addq %r9, %rbx + adcq %r8, %rcx + adcq $0, %rbp + addq %r12, %r10 + adcq %r15, %rbx + adcq %rsi, %rcx + adcq $0, %rbp + imulq %r10, %r14 + movq %r14, %rax + movq -16(%rsp), %r15 # 8-byte Reload + mulq %r15 + movq %rdx, %r8 + movq %rax, %rsi + movq %r14, %rax + movq %rdi, %r11 + mulq %r11 + movq %rdx, %r9 + movq %rax, %rdi + movq %r14, %rax + movq -48(%rsp), %r14 # 8-byte Reload + mulq %r14 + addq %r10, %rax + adcq %rbx, %rdi + adcq %rcx, %rsi + adcq $0, %rbp + addq %rdx, %rdi + adcq %r9, %rsi + adcq %r8, %rbp + movq %rdi, %rax + subq %r14, %rax + movq %rsi, %rcx + sbbq %r11, %rcx + movq %rbp, %rbx + sbbq %r15, %rbx + movq %rbx, %rdx + sarq $63, %rdx + cmovsq %rdi, %rax + movq -32(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovsq %rsi, %rcx + movq %rcx, 8(%rdx) + cmovsq %rbp, %rbx + movq %rbx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + + .globl mcl_fp_montRed3L + .align 16, 0x90 + .type mcl_fp_montRed3L,@function +mcl_fp_montRed3L: # @mcl_fp_montRed3L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) # 8-byte Spill + movq -8(%rcx), %r9 + movq (%rcx), %rdi + movq %rdi, -16(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq %r15, %rbx + imulq %r9, %rbx + movq 16(%rcx), %rbp + movq %rbp, -24(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rbp + movq %rax, %r11 + movq %rdx, %r8 + movq 8(%rcx), %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rcx, %r12 + movq %rdx, %r10 + movq %rax, %r14 + movq %rbx, %rax + mulq %rdi + movq %rdi, %rbx + movq %rdx, %rcx + addq %r14, %rcx + adcq %r11, %r10 + adcq $0, %r8 + movq 40(%rsi), %rdi + movq 32(%rsi), %r13 + addq %r15, %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r8 + adcq $0, %r13 + adcq $0, %rdi + sbbq %r15, %r15 + andl $1, %r15d + movq %rcx, %rsi + imulq %r9, %rsi + movq %rsi, %rax + mulq %rbp + movq %rdx, %r11 + movq %rax, %rbp + movq %rsi, %rax + mulq %r12 + movq %rdx, %r14 + movq %rax, %r12 + movq %rsi, %rax + mulq %rbx + movq %rdx, %rbx + addq %r12, %rbx + adcq %rbp, %r14 + adcq $0, %r11 + addq %rcx, %rax + adcq %r10, %rbx + adcq %r8, %r14 + adcq %r13, %r11 + adcq $0, %rdi + adcq $0, %r15 + imulq %rbx, %r9 + movq %r9, %rax + movq -24(%rsp), %r12 # 8-byte Reload + mulq %r12 + movq %rdx, %rbp + movq %rax, %r8 + movq %r9, %rax + movq -32(%rsp), %r13 # 8-byte Reload + mulq %r13 + movq %rdx, %rsi + movq %rax, %r10 + movq %r9, %rax + movq -16(%rsp), %rcx # 8-byte Reload + mulq %rcx + addq %r10, %rdx + adcq %r8, %rsi + adcq $0, %rbp + addq %rbx, %rax + adcq %r14, %rdx + adcq %r11, %rsi + adcq %rdi, %rbp + adcq $0, %r15 + movq %rdx, %rax + subq %rcx, %rax + movq %rsi, %rdi + sbbq %r13, %rdi + movq %rbp, %rcx + sbbq %r12, %rcx + sbbq $0, %r15 + andl $1, %r15d + cmovneq %rbp, %rcx + testb %r15b, %r15b + cmovneq %rdx, %rax + movq -8(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rsi, %rdi + movq %rdi, 8(%rdx) + movq %rcx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + + .globl mcl_fp_addPre3L + .align 16, 0x90 + .type mcl_fp_addPre3L,@function +mcl_fp_addPre3L: # @mcl_fp_addPre3L +# BB#0: + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + + .globl mcl_fp_subPre3L + .align 16, 0x90 + .type mcl_fp_subPre3L,@function +mcl_fp_subPre3L: # @mcl_fp_subPre3L +# BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r8 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + + .globl mcl_fp_shr1_3L + .align 16, 0x90 + .type mcl_fp_shr1_3L,@function +mcl_fp_shr1_3L: # @mcl_fp_shr1_3L +# BB#0: + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rdx + shrdq $1, %rdx, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rdx + movq %rdx, 8(%rdi) + shrq %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + + .globl mcl_fp_add3L + .align 16, 0x90 + .type mcl_fp_add3L,@function +mcl_fp_add3L: # @mcl_fp_add3L +# BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r8 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB44_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) +.LBB44_2: # %carry + retq +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + + .globl mcl_fp_addNF3L + .align 16, 0x90 + .type mcl_fp_addNF3L,@function +mcl_fp_addNF3L: # @mcl_fp_addNF3L +# BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %r10 + movq 8(%rdx), %r9 + addq (%rsi), %r10 + adcq 8(%rsi), %r9 + adcq 16(%rsi), %r8 + movq %r10, %rsi + subq (%rcx), %rsi + movq %r9, %rdx + sbbq 8(%rcx), %rdx + movq %r8, %rax + sbbq 16(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r10, %rsi + movq %rsi, (%rdi) + cmovsq %r9, %rdx + movq %rdx, 8(%rdi) + cmovsq %r8, %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + + .globl mcl_fp_sub3L + .align 16, 0x90 + .type mcl_fp_sub3L,@function +mcl_fp_sub3L: # @mcl_fp_sub3L +# BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r8 + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB46_2 +# BB#1: # %nocarry + retq +.LBB46_2: # %carry + movq 8(%rcx), %rdx + movq 16(%rcx), %rsi + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r8, %rsi + movq %rsi, 16(%rdi) + retq +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + + .globl mcl_fp_subNF3L + .align 16, 0x90 + .type mcl_fp_subNF3L,@function +mcl_fp_subNF3L: # @mcl_fp_subNF3L +# BB#0: + movq 16(%rsi), %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + subq (%rdx), %r8 + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r10 + movq %r10, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r10, %rsi + andq (%rcx), %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + andq 8(%rcx), %rdx + addq %r8, %rsi + movq %rsi, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rax + movq %rax, 16(%rdi) + retq +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + + .globl mcl_fpDbl_add3L + .align 16, 0x90 + .type mcl_fpDbl_add3L,@function +mcl_fpDbl_add3L: # @mcl_fpDbl_add3L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r9 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %r15 + adcq %r11, %r9 + adcq %r10, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %r15, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + movq %r8, %rbx + sbbq 16(%rcx), %rbx + sbbq $0, %rax + andl $1, %eax + cmovneq %r15, %rdx + movq %rdx, 24(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 32(%rdi) + cmovneq %r8, %rbx + movq %rbx, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + + .globl mcl_fpDbl_sub3L + .align 16, 0x90 + .type mcl_fpDbl_sub3L,@function +mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rbx + sbbq 8(%rdx), %rax + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r14 + movq %rbx, (%rdi) + movq %rax, 8(%rdi) + movq %r14, 16(%rdi) + sbbq %r15, %r11 + sbbq %r12, %r9 + sbbq %r10, %r8 + movl $0, %eax + sbbq $0, %rax + andl $1, %eax + movq (%rcx), %rdx + cmoveq %rsi, %rdx + testb %al, %al + movq 16(%rcx), %rax + cmoveq %rsi, %rax + cmovneq 8(%rcx), %rsi + addq %r11, %rdx + movq %rdx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + + .globl mcl_fp_mulUnitPre4L + .align 16, 0x90 + .type mcl_fp_mulUnitPre4L,@function +mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L +# BB#0: + pushq %r14 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %r14 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r14, %rdx + movq %rdx, 8(%rdi) + adcq %r11, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r10 + movq %r10, 24(%rdi) + adcq $0, %r8 + movq %r8, 32(%rdi) + popq %rbx + popq %r14 + retq +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + + .globl mcl_fpDbl_mulPre4L + .align 16, 0x90 + .type mcl_fpDbl_mulPre4L,@function +mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq (%rsi), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -64(%rsp) # 8-byte Spill + movq (%rdx), %rbx + movq %rdx, %rbp + mulq %rbx + movq %rdx, %r15 + movq 16(%rsi), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq 24(%rsi), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rbx + movq %rdx, %r12 + movq %rax, %r14 + movq %rcx, %rax + mulq %rbx + movq %rdx, %r10 + movq %rax, %r9 + movq %r8, %rax + mulq %rbx + movq %rdx, %r13 + movq %rax, %r8 + addq %r15, %r8 + adcq %r9, %r13 + adcq %r14, %r10 + adcq $0, %r12 + movq %rbp, %r9 + movq 8(%r9), %rbp + movq %r11, %rax + mulq %rbp + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rcx, %rax + mulq %rbp + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %rcx + movq -64(%rsp), %r14 # 8-byte Reload + movq %r14, %rax + mulq %rbp + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %rbx + movq -8(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -56(%rsp) # 8-byte Spill + addq %r8, %rax + movq %rax, 8(%rdi) + adcq %r13, %rbx + adcq %r10, %rcx + adcq %r12, %r15 + sbbq %r13, %r13 + andl $1, %r13d + movq 16(%r9), %rbp + movq %r14, %rax + mulq %rbp + movq %rax, %r12 + movq %rdx, %r8 + addq -56(%rsp), %rbx # 8-byte Folded Reload + adcq -48(%rsp), %rcx # 8-byte Folded Reload + adcq -40(%rsp), %r15 # 8-byte Folded Reload + adcq -32(%rsp), %r13 # 8-byte Folded Reload + movq %r11, %rax + mulq %rbp + movq %rdx, %r9 + movq %rax, %r11 + movq -24(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, %r14 + movq %rax, %r10 + movq -8(%rsp), %rax # 8-byte Reload + mulq %rbp + addq %rbx, %rax + movq %rax, 16(%rdi) + adcq %r12, %rcx + adcq %r15, %r10 + adcq %r13, %r11 + sbbq %r13, %r13 + andl $1, %r13d + addq %rdx, %rcx + adcq %r8, %r10 + adcq %r14, %r11 + adcq %r9, %r13 + movq -16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rbx + movq %rbx, %rax + mulq 24(%rsi) + movq %rdx, %r8 + movq %rax, %r14 + movq %rbx, %rax + mulq 16(%rsi) + movq %rdx, %r9 + movq %rax, %r12 + movq %rbx, %rax + mulq 8(%rsi) + movq %rdx, %r15 + movq %rax, %rbp + movq %rbx, %rax + mulq (%rsi) + addq %rcx, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbp + adcq %r11, %r12 + adcq %r13, %r14 + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rbp + movq %rbp, 32(%rdi) + adcq %r15, %r12 + movq %r12, 40(%rdi) + adcq %r9, %r14 + movq %r14, 48(%rdi) + adcq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + + .globl mcl_fpDbl_sqrPre4L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre4L,@function +mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rsi, %r10 + movq 16(%r10), %r9 + movq 24(%r10), %r11 + movq (%r10), %r15 + movq 8(%r10), %r8 + movq %r15, %rax + mulq %r15 + movq %rdx, %rbp + movq %rax, (%rdi) + movq %r11, %rax + mulq %r8 + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, -32(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %r8 + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, -40(%rsp) # 8-byte Spill + movq %r11, %rax + mulq %r15 + movq %rdx, %rbx + movq %rax, %rcx + movq %r9, %rax + mulq %r15 + movq %rdx, %rsi + movq %rsi, -16(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %r8, %rax + mulq %r8 + movq %rdx, %r13 + movq %rax, %r14 + movq %r8, %rax + mulq %r15 + addq %rax, %rbp + movq %rdx, %r8 + adcq %r12, %r8 + adcq %rsi, %rcx + adcq $0, %rbx + addq %rax, %rbp + movq %rbp, 8(%rdi) + adcq %r14, %r8 + movq -40(%rsp), %rsi # 8-byte Reload + adcq %rsi, %rcx + adcq -32(%rsp), %rbx # 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + addq %rdx, %r8 + adcq %r13, %rcx + movq -24(%rsp), %r15 # 8-byte Reload + adcq %r15, %rbx + adcq -8(%rsp), %rbp # 8-byte Folded Reload + movq %r11, %rax + mulq %r9 + movq %rdx, %r14 + movq %rax, %r11 + movq %r9, %rax + mulq %r9 + movq %rax, %r9 + addq %r12, %r8 + movq %r8, 16(%rdi) + adcq %rsi, %rcx + adcq %rbx, %r9 + adcq %rbp, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -16(%rsp), %rcx # 8-byte Folded Reload + adcq %r15, %r9 + adcq %rdx, %r11 + adcq %r14, %r12 + movq 24(%r10), %rbp + movq %rbp, %rax + mulq 16(%r10) + movq %rdx, %r8 + movq %rax, %r14 + movq %rbp, %rax + mulq 8(%r10) + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq (%r10) + movq %rdx, %r15 + movq %rax, %rsi + movq %rbp, %rax + mulq %rbp + addq %rcx, %rsi + movq %rsi, 24(%rdi) + adcq %r9, %rbx + adcq %r11, %r14 + adcq %r12, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r15, %rbx + movq %rbx, 32(%rdi) + adcq %r13, %r14 + movq %r14, 40(%rdi) + adcq %r8, %rax + movq %rax, 48(%rdi) + adcq %rdx, %rcx + movq %rcx, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + + .globl mcl_fp_mont4L + .align 16, 0x90 + .type mcl_fp_mont4L,@function +mcl_fp_mont4L: # @mcl_fp_mont4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rdi, -88(%rsp) # 8-byte Spill + movq 24(%rsi), %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, %r9 + movq %rdx, %rbp + movq 16(%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r8 + movq %rdx, %r10 + movq (%rsi), %rbx + movq %rbx, -72(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, %r14 + movq %rax, %rsi + movq %rbx, %rax + mulq %rdi + movq %rax, %r11 + movq %rdx, %r13 + addq %rsi, %r13 + adcq %r8, %r14 + adcq %r9, %r10 + adcq $0, %rbp + movq %rbp, -96(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq (%rcx), %r8 + movq %r8, -32(%rsp) # 8-byte Spill + movq %r11, %rdi + imulq %rax, %rdi + movq 24(%rcx), %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + movq 16(%rcx), %rsi + movq %rsi, -16(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -40(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rdx, %r9 + movq %rax, %r12 + movq %rdi, %rax + mulq %rsi + movq %rdx, %rbp + movq %rax, %rbx + movq %rdi, %rax + mulq %rcx + movq %rdx, %rsi + movq %rax, %r15 + movq %rdi, %rax + mulq %r8 + movq %rdx, %rcx + addq %r15, %rcx + adcq %rbx, %rsi + adcq %r12, %rbp + adcq $0, %r9 + addq %r11, %rax + adcq %r13, %rcx + adcq %r14, %rsi + adcq %r10, %rbp + adcq -96(%rsp), %r9 # 8-byte Folded Reload + sbbq %r13, %r13 + andl $1, %r13d + movq -48(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r11 + movq %rdi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r14 + movq %rdi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r15 + movq %rdi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rdi + addq %r15, %rdi + adcq %r14, %rbx + adcq %r11, %r10 + adcq $0, %r12 + addq %rcx, %r8 + adcq %rsi, %rdi + adcq %rbp, %rbx + adcq %r9, %r10 + adcq %r13, %r12 + sbbq %r15, %r15 + andl $1, %r15d + movq %r8, %rsi + imulq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rbp + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + addq %rbp, %r11 + adcq %r14, %r9 + adcq -96(%rsp), %rcx # 8-byte Folded Reload + adcq $0, %r13 + addq %r8, %rax + adcq %rdi, %r11 + adcq %rbx, %r9 + adcq %r10, %rcx + adcq %r12, %r13 + adcq $0, %r15 + movq -48(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rsi + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rbx + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rdi + movq %rsi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rbp + addq %rdi, %rbp + adcq %rbx, %r14 + adcq -96(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r12 + addq %r11, %r8 + adcq %r9, %rbp + adcq %rcx, %r14 + adcq %r13, %r10 + adcq %r15, %r12 + sbbq %r13, %r13 + movq %r8, %rsi + imulq -24(%rsp), %rsi # 8-byte Folded Reload + andl $1, %r13d + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r9 + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r11 + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r15 + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + addq %r15, %rsi + adcq %r11, %rbx + adcq %r9, %rcx + adcq $0, %rdi + addq %r8, %rax + adcq %rbp, %rsi + adcq %r14, %rbx + adcq %r10, %rcx + adcq %r12, %rdi + adcq $0, %r13 + movq -48(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rbp + movq %rbp, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rbp, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r15 + movq %rbp, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r12 + movq %rbp, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rbp + addq %r12, %rbp + adcq %r15, %r11 + adcq %r14, %r10 + adcq $0, %r8 + addq %rsi, %r9 + adcq %rbx, %rbp + adcq %rcx, %r11 + adcq %rdi, %r10 + adcq %r13, %r8 + sbbq %rsi, %rsi + andl $1, %esi + movq -24(%rsp), %rcx # 8-byte Reload + imulq %r9, %rcx + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -24(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r15 + movq %rcx, %rax + movq -40(%rsp), %r14 # 8-byte Reload + mulq %r14 + movq %rdx, %rdi + movq %rax, %r12 + movq %rcx, %rax + movq -32(%rsp), %rcx # 8-byte Reload + mulq %rcx + addq %r12, %rdx + adcq %r15, %rdi + adcq -24(%rsp), %r13 # 8-byte Folded Reload + adcq $0, %rbx + addq %r9, %rax + adcq %rbp, %rdx + adcq %r11, %rdi + adcq %r10, %r13 + adcq %r8, %rbx + adcq $0, %rsi + movq %rdx, %rax + subq %rcx, %rax + movq %rdi, %rcx + sbbq %r14, %rcx + movq %r13, %r8 + sbbq -16(%rsp), %r8 # 8-byte Folded Reload + movq %rbx, %rbp + sbbq -8(%rsp), %rbp # 8-byte Folded Reload + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rbp + testb %sil, %sil + cmovneq %rdx, %rax + movq -88(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rdi, %rcx + movq %rcx, 8(%rdx) + cmovneq %r13, %r8 + movq %r8, 16(%rdx) + movq %rbp, 24(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + + .globl mcl_fp_montNF4L + .align 16, 0x90 + .type mcl_fp_montNF4L,@function +mcl_fp_montNF4L: # @mcl_fp_montNF4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rdi, -88(%rsp) # 8-byte Spill + movq 24(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, %r15 + movq %rdx, %r12 + movq 16(%rsi), %rax + movq %rax, -32(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r8 + movq %rdx, %r9 + movq (%rsi), %rbp + movq %rbp, -40(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -48(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, %rbx + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, %r11 + movq %rdx, %rdi + addq %rsi, %rdi + adcq %r8, %rbx + adcq %r15, %r9 + adcq $0, %r12 + movq -8(%rcx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq (%rcx), %r8 + movq %r8, -64(%rsp) # 8-byte Spill + movq %r11, %rsi + imulq %rax, %rsi + movq 24(%rcx), %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + movq 16(%rcx), %rbp + movq %rbp, -72(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -80(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rdx, %r15 + movq %rax, %r13 + movq %rsi, %rax + mulq %rbp + movq %rdx, %r10 + movq %rax, %rbp + movq %rsi, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %rcx + movq %rsi, %rax + mulq %r8 + addq %r11, %rax + adcq %rdi, %rcx + adcq %rbx, %rbp + adcq %r9, %r13 + adcq $0, %r12 + addq %rdx, %rcx + adcq %r14, %rbp + adcq %r10, %r13 + adcq %r15, %r12 + movq -16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r11 + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %r9 + addq %r14, %r9 + adcq %r11, %r8 + adcq %r10, %rsi + adcq $0, %rbx + addq %rcx, %rdi + adcq %rbp, %r9 + adcq %r13, %r8 + adcq %r12, %rsi + adcq $0, %rbx + movq %rdi, %rcx + imulq -8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r12 + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r13 + movq %rcx, %rax + movq -80(%rsp), %r15 # 8-byte Reload + mulq %r15 + movq %rdx, %r14 + movq %rax, %rbp + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + addq %rdi, %rax + adcq %r9, %rbp + adcq %r8, %r13 + adcq %rsi, %r12 + adcq $0, %rbx + addq %rdx, %rbp + adcq %r14, %r13 + adcq %r11, %r12 + adcq %r10, %rbx + movq -16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rdi + addq %r14, %rdi + adcq %r11, %rcx + adcq %r10, %r8 + adcq $0, %rsi + addq %rbp, %r9 + adcq %r13, %rdi + adcq %r12, %rcx + adcq %rbx, %r8 + adcq $0, %rsi + movq %r9, %rbx + imulq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r12 + movq %rbx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r13 + movq %rbx, %rax + mulq %r15 + movq %rdx, %r14 + movq %rax, %rbp + movq %rbx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + addq %r9, %rax + adcq %rdi, %rbp + adcq %rcx, %r13 + adcq %r8, %r12 + adcq $0, %rsi + addq %rdx, %rbp + adcq %r14, %r13 + adcq %r11, %r12 + adcq %r10, %rsi + movq -16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdi + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %rcx + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r14 + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rdi + addq %r14, %rdi + adcq %r11, %r10 + adcq %rcx, %r8 + adcq $0, %rbx + addq %rbp, %r9 + adcq %r13, %rdi + adcq %r12, %r10 + adcq %rsi, %r8 + adcq $0, %rbx + movq -8(%rsp), %rsi # 8-byte Reload + imulq %r9, %rsi + movq %rsi, %rax + movq -56(%rsp), %r12 # 8-byte Reload + mulq %r12 + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %rsi, %rax + movq -72(%rsp), %r14 # 8-byte Reload + mulq %r14 + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rsi, %rax + movq -64(%rsp), %r11 # 8-byte Reload + mulq %r11 + movq %rdx, %r15 + movq %rax, %rcx + movq %rsi, %rax + movq -80(%rsp), %rsi # 8-byte Reload + mulq %rsi + addq %r9, %rcx + adcq %rdi, %rax + adcq %r10, %rbp + adcq %r8, %r13 + adcq $0, %rbx + addq %r15, %rax + adcq %rdx, %rbp + adcq -16(%rsp), %r13 # 8-byte Folded Reload + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rax, %rcx + subq %r11, %rcx + movq %rbp, %rdx + sbbq %rsi, %rdx + movq %r13, %rdi + sbbq %r14, %rdi + movq %rbx, %rsi + sbbq %r12, %rsi + cmovsq %rax, %rcx + movq -88(%rsp), %rax # 8-byte Reload + movq %rcx, (%rax) + cmovsq %rbp, %rdx + movq %rdx, 8(%rax) + cmovsq %r13, %rdi + movq %rdi, 16(%rax) + cmovsq %rbx, %rsi + movq %rsi, 24(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end54: + .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L + + .globl mcl_fp_montRed4L + .align 16, 0x90 + .type mcl_fp_montRed4L,@function +mcl_fp_montRed4L: # @mcl_fp_montRed4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -56(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq (%rcx), %rdi + movq %rdi, -48(%rsp) # 8-byte Spill + movq (%rsi), %r12 + movq %r12, %rbx + imulq %rax, %rbx + movq %rax, %r9 + movq 24(%rcx), %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r11 + movq %rdx, %r8 + movq 16(%rcx), %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rbp + movq %rbp, %r13 + movq %rax, %r14 + movq %rdx, %r10 + movq 8(%rcx), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rcx, %rbp + movq %rdx, %r15 + movq %rax, %rcx + movq %rbx, %rax + mulq %rdi + movq %rdx, %rbx + addq %rcx, %rbx + adcq %r14, %r15 + adcq %r11, %r10 + adcq $0, %r8 + movq 56(%rsi), %rcx + movq 48(%rsi), %rdx + addq %r12, %rax + movq 40(%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r8 + adcq $0, %rax + movq %rax, -64(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, %r12 + adcq $0, %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + sbbq %rdi, %rdi + andl $1, %edi + movq %rbx, %rsi + imulq %r9, %rsi + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq %r13 + movq %rdx, %r14 + movq %rax, %r9 + movq %rsi, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, %rbp + movq %rsi, %rax + movq -48(%rsp), %r13 # 8-byte Reload + mulq %r13 + movq %rdx, %rsi + addq %rbp, %rsi + adcq %r9, %rcx + adcq -72(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r11 + addq %rbx, %rax + adcq %r15, %rsi + adcq %r10, %rcx + adcq %r8, %r14 + adcq -64(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r12 + movq %r12, -64(%rsp) # 8-byte Spill + movq -16(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + adcq $0, %rdi + movq %rsi, %rbx + imulq -40(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + movq -8(%rsp), %r12 # 8-byte Reload + mulq %r12 + movq %rdx, %r8 + movq %rax, -16(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -72(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r9 + movq %rbx, %rax + mulq %r13 + movq %rdx, %rbx + addq %r9, %rbx + adcq -72(%rsp), %r15 # 8-byte Folded Reload + adcq -16(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r8 + addq %rsi, %rax + adcq %rcx, %rbx + adcq %r14, %r15 + adcq %r11, %r10 + adcq -64(%rsp), %r8 # 8-byte Folded Reload + adcq $0, %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + adcq $0, %rdi + movq -40(%rsp), %rcx # 8-byte Reload + imulq %rbx, %rcx + movq %rcx, %rax + mulq %r12 + movq %rdx, %r13 + movq %rax, -40(%rsp) # 8-byte Spill + movq %rcx, %rax + movq -32(%rsp), %r14 # 8-byte Reload + mulq %r14 + movq %rdx, %r11 + movq %rax, %r12 + movq %rcx, %rax + movq %rcx, %r9 + movq -24(%rsp), %rsi # 8-byte Reload + mulq %rsi + movq %rdx, %rbp + movq %rax, %rcx + movq %r9, %rax + movq -48(%rsp), %r9 # 8-byte Reload + mulq %r9 + addq %rcx, %rdx + adcq %r12, %rbp + adcq -40(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r13 + addq %rbx, %rax + adcq %r15, %rdx + adcq %r10, %rbp + adcq %r8, %r11 + adcq -16(%rsp), %r13 # 8-byte Folded Reload + adcq $0, %rdi + movq %rdx, %rax + subq %r9, %rax + movq %rbp, %rcx + sbbq %rsi, %rcx + movq %r11, %rbx + sbbq %r14, %rbx + movq %r13, %rsi + sbbq -8(%rsp), %rsi # 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %r13, %rsi + testb %dil, %dil + cmovneq %rdx, %rax + movq -56(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rbp, %rcx + movq %rcx, 8(%rdx) + cmovneq %r11, %rbx + movq %rbx, 16(%rdx) + movq %rsi, 24(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end55: + .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L + + .globl mcl_fp_addPre4L + .align 16, 0x90 + .type mcl_fp_addPre4L,@function +mcl_fp_addPre4L: # @mcl_fp_addPre4L +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end56: + .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L + + .globl mcl_fp_subPre4L + .align 16, 0x90 + .type mcl_fp_subPre4L,@function +mcl_fp_subPre4L: # @mcl_fp_subPre4L +# BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r8, %r9 + movq %r9, 24(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end57: + .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L + + .globl mcl_fp_shr1_4L + .align 16, 0x90 + .type mcl_fp_shr1_4L,@function +mcl_fp_shr1_4L: # @mcl_fp_shr1_4L +# BB#0: + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrq %rax + movq %rax, 24(%rdi) + retq +.Lfunc_end58: + .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L + + .globl mcl_fp_add4L + .align 16, 0x90 + .type mcl_fp_add4L,@function +mcl_fp_add4L: # @mcl_fp_add4L +# BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r9 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + adcq %r10, %r8 + movq %r8, 24(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r9 + sbbq 24(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB59_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + movq %r8, 24(%rdi) +.LBB59_2: # %carry + retq +.Lfunc_end59: + .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L + + .globl mcl_fp_addNF4L + .align 16, 0x90 + .type mcl_fp_addNF4L,@function +mcl_fp_addNF4L: # @mcl_fp_addNF4L +# BB#0: + pushq %rbx + movq 24(%rdx), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %r11 + movq 8(%rdx), %r10 + addq (%rsi), %r11 + adcq 8(%rsi), %r10 + adcq 16(%rsi), %r9 + adcq 24(%rsi), %r8 + movq %r11, %rsi + subq (%rcx), %rsi + movq %r10, %rdx + sbbq 8(%rcx), %rdx + movq %r9, %rax + sbbq 16(%rcx), %rax + movq %r8, %rbx + sbbq 24(%rcx), %rbx + testq %rbx, %rbx + cmovsq %r11, %rsi + movq %rsi, (%rdi) + cmovsq %r10, %rdx + movq %rdx, 8(%rdi) + cmovsq %r9, %rax + movq %rax, 16(%rdi) + cmovsq %r8, %rbx + movq %rbx, 24(%rdi) + popq %rbx + retq +.Lfunc_end60: + .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L + + .globl mcl_fp_sub4L + .align 16, 0x90 + .type mcl_fp_sub4L,@function +mcl_fp_sub4L: # @mcl_fp_sub4L +# BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r9 + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r9, 16(%rdi) + sbbq %r10, %r8 + movq %r8, 24(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne .LBB61_2 +# BB#1: # %nocarry + retq +.LBB61_2: # %carry + movq 24(%rcx), %r10 + movq 8(%rcx), %rsi + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r10 + movq %r10, 24(%rdi) + retq +.Lfunc_end61: + .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L + + .globl mcl_fp_subNF4L + .align 16, 0x90 + .type mcl_fp_subNF4L,@function +mcl_fp_subNF4L: # @mcl_fp_subNF4L +# BB#0: + pushq %rbx + movq 24(%rsi), %r11 + movq 16(%rsi), %r8 + movq (%rsi), %r9 + movq 8(%rsi), %r10 + subq (%rdx), %r9 + sbbq 8(%rdx), %r10 + sbbq 16(%rdx), %r8 + sbbq 24(%rdx), %r11 + movq %r11, %rdx + sarq $63, %rdx + movq 24(%rcx), %rsi + andq %rdx, %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + movq 8(%rcx), %rbx + andq %rdx, %rbx + andq (%rcx), %rdx + addq %r9, %rdx + movq %rdx, (%rdi) + adcq %r10, %rbx + movq %rbx, 8(%rdi) + adcq %r8, %rax + movq %rax, 16(%rdi) + adcq %r11, %rsi + movq %rsi, 24(%rdi) + popq %rbx + retq +.Lfunc_end62: + .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L + + .globl mcl_fpDbl_add4L + .align 16, 0x90 + .type mcl_fpDbl_add4L,@function +mcl_fpDbl_add4L: # @mcl_fpDbl_add4L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 48(%rsi), %r12 + movq 40(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rdx), %r15 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rbp + movq 32(%rsi), %rsi + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r15, %rbp + movq %rbp, 24(%rdi) + adcq %r14, %rsi + adcq %r11, %r13 + adcq %r10, %r12 + adcq %r9, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %rsi, %rdx + subq (%rcx), %rdx + movq %r13, %rbp + sbbq 8(%rcx), %rbp + movq %r12, %rbx + sbbq 16(%rcx), %rbx + movq %r8, %r9 + sbbq 24(%rcx), %r9 + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + movq %rdx, 32(%rdi) + testb %al, %al + cmovneq %r13, %rbp + movq %rbp, 40(%rdi) + cmovneq %r12, %rbx + movq %rbx, 48(%rdi) + cmovneq %r8, %r9 + movq %r9, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end63: + .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L + + .globl mcl_fpDbl_sub4L + .align 16, 0x90 + .type mcl_fpDbl_sub4L,@function +mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 24(%rdx), %r11 + movq (%rsi), %rbx + xorl %eax, %eax + subq (%rdx), %rbx + movq %rbx, (%rdi) + movq 8(%rsi), %rbx + sbbq 8(%rdx), %rbx + movq %rbx, 8(%rdi) + movq 16(%rsi), %rbx + sbbq 16(%rdx), %rbx + movq %rbx, 16(%rdi) + movq 24(%rsi), %rbx + sbbq %r11, %rbx + movq 40(%rdx), %r11 + movq 32(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 32(%rsi), %r12 + sbbq %rdx, %r12 + movq 48(%rsi), %r14 + movq 40(%rsi), %r15 + sbbq %r11, %r15 + sbbq %r10, %r14 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 24(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 8(%rcx), %rax + addq %r12, %rsi + movq %rsi, 32(%rdi) + adcq %r15, %rax + movq %rax, 40(%rdi) + adcq %r14, %rdx + movq %rdx, 48(%rdi) + adcq %r8, %rbx + movq %rbx, 56(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end64: + .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L + + .globl mcl_fp_mulUnitPre5L + .align 16, 0x90 + .type mcl_fp_mulUnitPre5L,@function +mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %r12 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r12, %rdx + movq %rdx, 8(%rdi) + adcq %r14, %rbx + movq %rbx, 16(%rdi) + adcq %r11, %r15 + movq %r15, 24(%rdi) + adcq %r9, %r10 + movq %r10, 32(%rdi) + adcq $0, %r8 + movq %r8, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end65: + .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L + + .globl mcl_fpDbl_mulPre5L + .align 16, 0x90 + .type mcl_fpDbl_mulPre5L,@function +mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rsi, %r9 + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%r9), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 8(%r9), %rbx + movq %rbx, -48(%rsp) # 8-byte Spill + movq (%rdx), %rbp + movq %rdx, %r8 + mulq %rbp + movq %rdx, -32(%rsp) # 8-byte Spill + movq 16(%r9), %r13 + movq 24(%r9), %r14 + movq 32(%r9), %r15 + movq %rax, (%rdi) + movq %r15, %rax + mulq %rbp + movq %rdx, %r10 + movq %rax, -40(%rsp) # 8-byte Spill + movq %r14, %rax + mulq %rbp + movq %rdx, %r12 + movq %rax, %r11 + movq %r13, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, %rsi + movq %rbx, %rax + mulq %rbp + movq %rdx, %rbp + movq %rax, %rdi + addq -32(%rsp), %rdi # 8-byte Folded Reload + adcq %rsi, %rbp + adcq %r11, %rcx + adcq -40(%rsp), %r12 # 8-byte Folded Reload + adcq $0, %r10 + movq 8(%r8), %r11 + movq %r15, %rax + mulq %r11 + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r14, %rax + mulq %r11 + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %r13, %rax + mulq %r11 + movq %rdx, %r8 + movq %rax, %r13 + movq -48(%rsp), %rax # 8-byte Reload + mulq %r11 + movq %rdx, %r14 + movq %rax, %rbx + movq -24(%rsp), %rax # 8-byte Reload + mulq %r11 + addq %rdi, %rax + movq -8(%rsp), %rdi # 8-byte Reload + movq %rax, 8(%rdi) + adcq %rbp, %rbx + adcq %rcx, %r13 + adcq %r12, %r15 + adcq %r10, %rsi + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %rbx + adcq %r14, %r13 + adcq %r8, %r15 + adcq -40(%rsp), %rsi # 8-byte Folded Reload + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq 32(%r9), %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq -16(%rsp), %rdi # 8-byte Reload + movq 16(%rdi), %r12 + mulq %r12 + movq %rax, %r11 + movq %rdx, -24(%rsp) # 8-byte Spill + movq 24(%r9), %rax + movq %rax, -72(%rsp) # 8-byte Spill + mulq %r12 + movq %rax, %r10 + movq %rdx, -32(%rsp) # 8-byte Spill + movq 16(%r9), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulq %r12 + movq %rax, %r8 + movq %rdx, -48(%rsp) # 8-byte Spill + movq (%r9), %r14 + movq 8(%r9), %rax + movq %rax, -56(%rsp) # 8-byte Spill + mulq %r12 + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r14, %rax + mulq %r12 + movq %rdx, -88(%rsp) # 8-byte Spill + addq %rbx, %rax + movq -8(%rsp), %rbx # 8-byte Reload + movq %rax, 16(%rbx) + adcq %r13, %rbp + adcq %r15, %r8 + adcq %rsi, %r10 + adcq %rcx, %r11 + sbbq %rcx, %rcx + movq 24(%rdi), %rsi + movq -40(%rsp), %rax # 8-byte Reload + mulq %rsi + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r13 + movq -56(%rsp), %rax # 8-byte Reload + mulq %rsi + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %r14, %rax + mulq %rsi + movq %rdx, %r15 + movq %rax, %rdi + movq -72(%rsp), %rax # 8-byte Reload + mulq %rsi + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r14 + movq -80(%rsp), %rax # 8-byte Reload + mulq %rsi + andl $1, %ecx + addq -88(%rsp), %rbp # 8-byte Folded Reload + adcq -64(%rsp), %r8 # 8-byte Folded Reload + adcq -48(%rsp), %r10 # 8-byte Folded Reload + adcq -32(%rsp), %r11 # 8-byte Folded Reload + adcq -24(%rsp), %rcx # 8-byte Folded Reload + addq %rdi, %rbp + movq %rbp, 24(%rbx) + adcq %r12, %r8 + adcq %rax, %r10 + adcq %r14, %r11 + adcq %r13, %rcx + sbbq %rsi, %rsi + andl $1, %esi + addq %r15, %r8 + adcq -56(%rsp), %r10 # 8-byte Folded Reload + adcq %rdx, %r11 + adcq -72(%rsp), %rcx # 8-byte Folded Reload + adcq -40(%rsp), %rsi # 8-byte Folded Reload + movq -16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdi + movq %rdi, %rax + mulq 32(%r9) + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rdi, %rax + mulq 24(%r9) + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %rdi, %rax + mulq 16(%r9) + movq %rdx, %r14 + movq %rax, %rbx + movq %rdi, %rax + mulq 8(%r9) + movq %rdx, %r12 + movq %rax, %rbp + movq %rdi, %rax + mulq (%r9) + addq %r8, %rax + movq -8(%rsp), %rdi # 8-byte Reload + movq %rax, 32(%rdi) + adcq %r10, %rbp + adcq %r11, %rbx + adcq %rcx, %r13 + adcq %rsi, %r15 + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rbp + movq %rbp, 40(%rdi) + adcq %r12, %rbx + movq %rbx, 48(%rdi) + adcq %r14, %r13 + movq %r13, 56(%rdi) + adcq -24(%rsp), %r15 # 8-byte Folded Reload + movq %r15, 64(%rdi) + adcq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end66: + .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L + + .globl mcl_fpDbl_sqrPre5L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre5L,@function +mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq 32(%rsi), %r11 + movq (%rsi), %r13 + movq 8(%rsi), %rbx + movq %r11, %rax + mulq %rbx + movq %rax, -40(%rsp) # 8-byte Spill + movq %rdx, -16(%rsp) # 8-byte Spill + movq 24(%rsi), %rbp + movq %rbp, %rax + mulq %rbx + movq %rax, -48(%rsp) # 8-byte Spill + movq %rdx, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %rcx + movq %rcx, %rax + mulq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + movq %r11, %rax + mulq %r13 + movq %rdx, %r8 + movq %rax, -72(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %r13 + movq %rdx, %r9 + movq %rax, %rbp + movq %rcx, %rax + mulq %r13 + movq %rdx, %r10 + movq %rax, %r12 + movq %rbx, %rax + mulq %rbx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rbx, %rax + mulq %r13 + movq %rdx, %rbx + movq %rax, %r14 + movq %r13, %rax + mulq %r13 + movq %rax, (%rdi) + addq %r14, %rdx + adcq %rbx, %r12 + adcq %rbp, %r10 + adcq -72(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r8 + addq %r14, %rdx + movq %rdx, 8(%rdi) + adcq %r15, %r12 + adcq -56(%rsp), %r10 # 8-byte Folded Reload + adcq -48(%rsp), %r9 # 8-byte Folded Reload + adcq -40(%rsp), %r8 # 8-byte Folded Reload + sbbq %rdi, %rdi + andl $1, %edi + addq %rbx, %r12 + adcq -64(%rsp), %r10 # 8-byte Folded Reload + adcq -32(%rsp), %r9 # 8-byte Folded Reload + adcq -24(%rsp), %r8 # 8-byte Folded Reload + adcq -16(%rsp), %rdi # 8-byte Folded Reload + movq %r11, %rax + mulq %rcx + movq %rax, %r11 + movq %rdx, -16(%rsp) # 8-byte Spill + movq 24(%rsi), %rbx + movq %rbx, %rax + mulq %rcx + movq %rax, %r14 + movq %rdx, -24(%rsp) # 8-byte Spill + movq (%rsi), %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulq %rcx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rbp, %rax + mulq %rcx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq %rcx + movq %rax, %r13 + addq %r12, %rbp + movq -8(%rsp), %rax # 8-byte Reload + movq %rbp, 16(%rax) + adcq %r10, %r15 + adcq %r9, %r13 + adcq %r8, %r14 + adcq %rdi, %r11 + sbbq %r10, %r10 + andl $1, %r10d + addq -56(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %r13 # 8-byte Folded Reload + adcq %rdx, %r14 + adcq -24(%rsp), %r11 # 8-byte Folded Reload + adcq -16(%rsp), %r10 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r8 + movq -32(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rax, %rdi + movq %rdx, -40(%rsp) # 8-byte Spill + movq 16(%rsi), %rbp + movq %rbp, -16(%rsp) # 8-byte Spill + movq 32(%rsi), %rcx + movq %rcx, %rax + mulq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %rbp, %rax + mulq %rbx + movq %rdx, %rbp + movq %rax, %r12 + movq %rbx, %rax + mulq %rbx + movq %rax, %rbx + addq %r15, %rdi + movq -8(%rsp), %r15 # 8-byte Reload + movq %rdi, 24(%r15) + adcq %r13, %r8 + adcq %r14, %r12 + adcq %r11, %rbx + adcq %r10, %r9 + sbbq %r10, %r10 + andl $1, %r10d + addq -40(%rsp), %r8 # 8-byte Folded Reload + adcq -24(%rsp), %r12 # 8-byte Folded Reload + adcq %rbp, %rbx + adcq %rdx, %r9 + adcq -32(%rsp), %r10 # 8-byte Folded Reload + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r14 + movq %rax, %rdi + movq %rcx, %rax + mulq (%rsi) + movq %rdx, %r13 + movq %rax, %rsi + movq %rcx, %rax + mulq %rcx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r11 + movq -16(%rsp), %rax # 8-byte Reload + mulq %rcx + addq %r8, %rsi + movq %rsi, 32(%r15) + adcq %r12, %rdi + adcq %rbx, %rax + adcq %r9, %rbp + adcq %r10, %r11 + sbbq %rcx, %rcx + andl $1, %ecx + addq %r13, %rdi + movq %rdi, 40(%r15) + adcq %r14, %rax + movq %rax, 48(%r15) + adcq %rdx, %rbp + movq %rbp, 56(%r15) + adcq -24(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 64(%r15) + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 72(%r15) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L + + .globl mcl_fp_mont5L + .align 16, 0x90 + .type mcl_fp_mont5L,@function +mcl_fp_mont5L: # @mcl_fp_mont5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + pushq %rax + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rdi, -104(%rsp) # 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, %r8 + movq %rdx, %r14 + movq 24(%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r9 + movq %rdx, %r12 + movq 16(%rsi), %rax + movq %rax, -72(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r10 + movq %rdx, %rbp + movq (%rsi), %rbx + movq %rbx, -80(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -88(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, %r11 + movq %rax, %rsi + movq %rbx, %rax + mulq %rdi + movq %rax, -128(%rsp) # 8-byte Spill + movq %rdx, %r15 + addq %rsi, %r15 + adcq %r10, %r11 + adcq %r9, %rbp + movq %rbp, -96(%rsp) # 8-byte Spill + adcq %r8, %r12 + movq %r12, -112(%rsp) # 8-byte Spill + adcq $0, %r14 + movq %r14, -120(%rsp) # 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %rbp + imulq %rdx, %rbp + movq (%rcx), %r9 + movq %r9, -32(%rsp) # 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, (%rsp) # 8-byte Spill + movq 24(%rcx), %rsi + movq %rsi, -8(%rsp) # 8-byte Spill + movq 16(%rcx), %rbx + movq %rbx, -16(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -24(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rdx, %r14 + movq %rax, %r13 + movq %rbp, %rax + mulq %rsi + movq %rdx, %rdi + movq %rax, %r10 + movq %rbp, %rax + mulq %rbx + movq %rdx, %rbx + movq %rax, %r8 + movq %rbp, %rax + mulq %rcx + movq %rdx, %rsi + movq %rax, %r12 + movq %rbp, %rax + mulq %r9 + movq %rdx, %rbp + addq %r12, %rbp + adcq %r8, %rsi + adcq %r10, %rbx + adcq %r13, %rdi + adcq $0, %r14 + addq -128(%rsp), %rax # 8-byte Folded Reload + adcq %r15, %rbp + adcq %r11, %rsi + adcq -96(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %rdi # 8-byte Folded Reload + adcq -120(%rsp), %r14 # 8-byte Folded Reload + sbbq %r9, %r9 + andl $1, %r9d + movq -48(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rcx + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r10 + movq %rcx, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r15 + movq %rdx, %rcx + addq %r10, %rcx + adcq -120(%rsp), %r8 # 8-byte Folded Reload + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -96(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r13 + addq %rbp, %r15 + adcq %rsi, %rcx + adcq %rbx, %r8 + adcq %rdi, %r12 + adcq %r14, %r11 + adcq %r9, %r13 + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %r15, %rsi + imulq -40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rdi + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + addq %rdi, %rbx + adcq -128(%rsp), %r10 # 8-byte Folded Reload + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %rbp # 8-byte Folded Reload + adcq $0, %r14 + addq %r15, %rax + adcq %rcx, %rbx + adcq %r8, %r10 + adcq %r12, %r9 + adcq %r11, %rbp + adcq %r13, %r14 + adcq $0, -96(%rsp) # 8-byte Folded Spill + movq -48(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rcx + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -128(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r8 + movq %rcx, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %r15 + addq %r8, %r15 + adcq -128(%rsp), %rdi # 8-byte Folded Reload + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r13 + addq %rbx, %r12 + adcq %r10, %r15 + adcq %r9, %rdi + adcq %rbp, %rsi + adcq %r14, %r11 + adcq -96(%rsp), %r13 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %r12, %rbp + imulq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r10 + movq %rbp, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r14 + movq %rbp, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + addq %r14, %rbp + adcq %r10, %rbx + adcq -120(%rsp), %rcx # 8-byte Folded Reload + adcq -112(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r8 + addq %r12, %rax + adcq %r15, %rbp + adcq %rdi, %rbx + adcq %rsi, %rcx + adcq %r11, %r9 + adcq %r13, %r8 + adcq $0, -96(%rsp) # 8-byte Folded Spill + movq -48(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rsi + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r15 + movq %rsi, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %rsi + addq %r12, %rsi + adcq %r15, %rdi + adcq -120(%rsp), %r11 # 8-byte Folded Reload + adcq -112(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r13 + addq %rbp, %r14 + adcq %rbx, %rsi + adcq %rcx, %rdi + adcq %r9, %r11 + adcq %r8, %r10 + adcq -96(%rsp), %r13 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) # 8-byte Spill + movq %r14, %rbp + imulq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r8 + movq %rbp, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r12 + movq %rbp, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + addq %r12, %rbp + adcq %r8, %rbx + adcq -120(%rsp), %rcx # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + adcq $0, %r9 + addq %r14, %rax + adcq %rsi, %rbp + adcq %rdi, %rbx + adcq %r11, %rcx + adcq %r10, %r15 + adcq %r13, %r9 + movq -96(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + movq -48(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rsi + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -64(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rdi + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r8 + addq %rdi, %r8 + adcq -72(%rsp), %r12 # 8-byte Folded Reload + adcq -64(%rsp), %r11 # 8-byte Folded Reload + adcq -56(%rsp), %r13 # 8-byte Folded Reload + movq -48(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rbp, %r10 + adcq %rbx, %r8 + adcq %rcx, %r12 + adcq %r15, %r11 + adcq %r9, %r13 + adcq %r14, %rax + movq %rax, -48(%rsp) # 8-byte Spill + sbbq %rcx, %rcx + movq -40(%rsp), %rsi # 8-byte Reload + imulq %r10, %rsi + movq %rsi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -40(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -56(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r15 + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r9 + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + addq %r9, %rdx + adcq %r15, %rdi + adcq -56(%rsp), %rbp # 8-byte Folded Reload + adcq -40(%rsp), %rbx # 8-byte Folded Reload + adcq $0, %r14 + andl $1, %ecx + addq %r10, %rax + adcq %r8, %rdx + adcq %r12, %rdi + adcq %r11, %rbp + adcq %r13, %rbx + adcq -48(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %rcx + movq %rdx, %rax + subq -32(%rsp), %rax # 8-byte Folded Reload + movq %rdi, %r8 + sbbq -24(%rsp), %r8 # 8-byte Folded Reload + movq %rbp, %r9 + sbbq -16(%rsp), %r9 # 8-byte Folded Reload + movq %rbx, %r10 + sbbq -8(%rsp), %r10 # 8-byte Folded Reload + movq %r14, %r11 + sbbq (%rsp), %r11 # 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rbx, %r10 + testb %cl, %cl + cmovneq %rdx, %rax + movq -104(%rsp), %rcx # 8-byte Reload + movq %rax, (%rcx) + cmovneq %rdi, %r8 + movq %r8, 8(%rcx) + cmovneq %rbp, %r9 + movq %r9, 16(%rcx) + movq %r10, 24(%rcx) + cmovneq %r14, %r11 + movq %r11, 32(%rcx) + addq $8, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end68: + .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L + + .globl mcl_fp_montNF5L + .align 16, 0x90 + .type mcl_fp_montNF5L,@function +mcl_fp_montNF5L: # @mcl_fp_montNF5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rdi, -104(%rsp) # 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -48(%rsp) # 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rax, %r15 + movq %rdx, %r10 + movq 24(%rsi), %rax + movq %rax, -56(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, %r13 + movq %rdx, %r14 + movq 16(%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, %r8 + movq %rdx, %r9 + movq (%rsi), %rbp + movq %rbp, -80(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -88(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, %r11 + movq %rax, %rdi + movq %rbp, %rax + mulq %rbx + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdx, %r12 + addq %rdi, %r12 + adcq %r8, %r11 + adcq %r13, %r9 + adcq %r15, %r14 + adcq $0, %r10 + movq -8(%rcx), %rdx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %rsi + imulq %rdx, %rsi + movq (%rcx), %r8 + movq %r8, -96(%rsp) # 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + movq 24(%rcx), %rdi + movq %rdi, -16(%rsp) # 8-byte Spill + movq 16(%rcx), %rbx + movq %rbx, -24(%rsp) # 8-byte Spill + movq 8(%rcx), %rbp + movq %rbp, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %rcx + movq %rsi, %rax + mulq %rdi + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rsi, %rax + mulq %rbx + movq %rdx, %r15 + movq %rax, %rbx + movq %rsi, %rax + mulq %rbp + movq %rdx, %r13 + movq %rax, %rbp + movq %rsi, %rax + mulq %r8 + addq -112(%rsp), %rax # 8-byte Folded Reload + adcq %r12, %rbp + adcq %r11, %rbx + adcq %r9, %rdi + adcq %r14, %rcx + adcq $0, %r10 + addq %rdx, %rbp + adcq %r13, %rbx + adcq %r15, %rdi + adcq -128(%rsp), %rcx # 8-byte Folded Reload + adcq -120(%rsp), %r10 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rsi + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r8 + movq %rsi, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r14 + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %rsi + movq %rdx, %r12 + addq %r14, %r12 + adcq %r8, %r11 + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + adcq $0, %r13 + addq %rbp, %rsi + adcq %rbx, %r12 + adcq %rdi, %r11 + adcq %rcx, %r9 + adcq %r10, %r15 + adcq $0, %r13 + movq %rsi, %rdi + imulq -32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r8 + movq %rdi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rdi, %rax + mulq -96(%rsp) # 8-byte Folded Reload + addq %rsi, %rax + adcq %r12, %r10 + adcq %r11, %r8 + adcq %r9, %r14 + adcq %r15, %rbp + adcq $0, %r13 + addq %rdx, %r10 + adcq %rbx, %r8 + adcq %rcx, %r14 + adcq -120(%rsp), %rbp # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rsi + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rbx + movq %rsi, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %rsi + addq %r12, %rsi + adcq %rbx, %rcx + adcq -120(%rsp), %rdi # 8-byte Folded Reload + adcq -112(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r15 + addq %r10, %r11 + adcq %r8, %rsi + adcq %r14, %rcx + adcq %rbp, %rdi + adcq %r13, %r9 + adcq $0, %r15 + movq %r11, %rbx + imulq -32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rbx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r10 + movq %rbx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbp + movq %rbx, %rax + mulq -96(%rsp) # 8-byte Folded Reload + addq %r11, %rax + adcq %rsi, %rbp + adcq %rcx, %r10 + adcq %rdi, %r8 + adcq %r9, %r13 + adcq $0, %r15 + addq %rdx, %rbp + adcq %r12, %r10 + adcq %r14, %r8 + adcq -120(%rsp), %r13 # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rsi + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rbx + movq %rsi, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %rsi + addq %r12, %rsi + adcq %rbx, %rcx + adcq -120(%rsp), %rdi # 8-byte Folded Reload + adcq -112(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r11 + addq %rbp, %r14 + adcq %r10, %rsi + adcq %r8, %rcx + adcq %r13, %rdi + adcq %r15, %r9 + adcq $0, %r11 + movq %r14, %rbx + imulq -32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rbx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r10 + movq %rbx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbp + movq %rbx, %rax + mulq -96(%rsp) # 8-byte Folded Reload + addq %r14, %rax + adcq %rsi, %rbp + adcq %rcx, %r10 + adcq %rdi, %r8 + adcq %r9, %r13 + adcq $0, %r11 + addq %rdx, %rbp + adcq %r12, %r10 + adcq %r15, %r8 + adcq -120(%rsp), %r13 # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rcx + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -40(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -48(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -56(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rsi + movq %rcx, %rax + mulq -80(%rsp) # 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %rdi + addq %rsi, %rdi + adcq -56(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %r14 # 8-byte Folded Reload + adcq -40(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %rbx + addq %rbp, %r12 + adcq %r10, %rdi + adcq %r8, %r15 + adcq %r13, %r14 + adcq %r11, %r9 + adcq $0, %rbx + movq -32(%rsp), %r8 # 8-byte Reload + imulq %r12, %r8 + movq %r8, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %rcx + movq %r8, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r8, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r8, %rax + movq %r8, %r13 + movq -96(%rsp), %r10 # 8-byte Reload + mulq %r10 + movq %rdx, %r11 + movq %rax, %r8 + movq %r13, %rax + movq -72(%rsp), %r13 # 8-byte Reload + mulq %r13 + addq %r12, %r8 + adcq %rdi, %rax + adcq %r15, %rsi + adcq %r14, %rbp + adcq %r9, %rcx + adcq $0, %rbx + addq %r11, %rax + adcq %rdx, %rsi + adcq -48(%rsp), %rbp # 8-byte Folded Reload + adcq -40(%rsp), %rcx # 8-byte Folded Reload + adcq -32(%rsp), %rbx # 8-byte Folded Reload + movq %rax, %r11 + subq %r10, %r11 + movq %rsi, %r10 + sbbq %r13, %r10 + movq %rbp, %r8 + sbbq -24(%rsp), %r8 # 8-byte Folded Reload + movq %rcx, %r9 + sbbq -16(%rsp), %r9 # 8-byte Folded Reload + movq %rbx, %rdx + sbbq -8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, %rdi + sarq $63, %rdi + cmovsq %rax, %r11 + movq -104(%rsp), %rax # 8-byte Reload + movq %r11, (%rax) + cmovsq %rsi, %r10 + movq %r10, 8(%rax) + cmovsq %rbp, %r8 + movq %r8, 16(%rax) + cmovsq %rcx, %r9 + movq %r9, 24(%rax) + cmovsq %rbx, %rdx + movq %rdx, 32(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end69: + .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L + + .globl mcl_fp_montRed5L + .align 16, 0x90 + .type mcl_fp_montRed5L,@function +mcl_fp_montRed5L: # @mcl_fp_montRed5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -80(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq (%rcx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + movq (%rsi), %r9 + movq %r9, %rbp + imulq %rax, %rbp + movq 32(%rcx), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r10 + movq %rdx, %r13 + movq 24(%rcx), %rdx + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, %r11 + movq 16(%rcx), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rdx, %r15 + movq %rax, %r12 + movq %rbp, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %rbx + movq %rbp, %rax + mulq %rdi + movq %rdx, %rcx + addq %rbx, %rcx + adcq %r12, %r8 + adcq %r14, %r15 + adcq %r10, %r11 + adcq $0, %r13 + addq %r9, %rax + movq 72(%rsi), %rax + movq 64(%rsi), %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r8 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r13 + movq %r13, -88(%rsp) # 8-byte Spill + movq 56(%rsi), %rdi + movq 48(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -96(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -72(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -40(%rsp) # 8-byte Spill + sbbq %rdi, %rdi + andl $1, %edi + movq %rcx, %rsi + movq -64(%rsp), %r9 # 8-byte Reload + imulq %r9, %rsi + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbx + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbp + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + addq %rbp, %rsi + adcq %rbx, %r13 + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %r10 + addq %rcx, %rax + adcq %r8, %rsi + adcq %r15, %r13 + adcq %r11, %r12 + adcq -88(%rsp), %r14 # 8-byte Folded Reload + adcq -96(%rsp), %r10 # 8-byte Folded Reload + adcq $0, -72(%rsp) # 8-byte Folded Spill + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, %rdi + movq %rsi, %rcx + imulq %r9, %rcx + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -88(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + addq %r8, %rbp + adcq -104(%rsp), %rbx # 8-byte Folded Reload + adcq -96(%rsp), %r15 # 8-byte Folded Reload + adcq -88(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r9 + addq %rsi, %rax + adcq %r13, %rbp + adcq %r12, %rbx + adcq %r14, %r15 + adcq %r10, %r11 + adcq -72(%rsp), %r9 # 8-byte Folded Reload + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, %rdi + movq %rbp, %rcx + imulq -64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %rax + movq -48(%rsp), %rsi # 8-byte Reload + mulq %rsi + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r10 + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r8 + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + addq %r8, %rcx + adcq %r10, %r13 + adcq -96(%rsp), %r12 # 8-byte Folded Reload + adcq -88(%rsp), %r14 # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rbp, %rax + adcq %rbx, %rcx + adcq %r15, %r13 + adcq %r11, %r12 + adcq %r9, %r14 + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, %rdi + movq -64(%rsp), %rbx # 8-byte Reload + imulq %rcx, %rbx + movq %rbx, %rax + mulq %rsi + movq %rdx, %rsi + movq %rax, -56(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -64(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r15 + movq %rbx, %rax + movq %rbx, %r10 + movq -32(%rsp), %r11 # 8-byte Reload + mulq %r11 + movq %rdx, %rbx + movq %rax, %r8 + movq %r10, %rax + movq -24(%rsp), %r10 # 8-byte Reload + mulq %r10 + addq %r8, %rdx + adcq %r15, %rbx + adcq -64(%rsp), %rbp # 8-byte Folded Reload + adcq -56(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %rsi + addq %rcx, %rax + adcq %r13, %rdx + adcq %r12, %rbx + adcq %r14, %rbp + adcq -72(%rsp), %r9 # 8-byte Folded Reload + adcq -40(%rsp), %rsi # 8-byte Folded Reload + adcq $0, %rdi + movq %rdx, %rax + subq %r10, %rax + movq %rbx, %rcx + sbbq %r11, %rcx + movq %rbp, %r8 + sbbq -16(%rsp), %r8 # 8-byte Folded Reload + movq %r9, %r10 + sbbq -8(%rsp), %r10 # 8-byte Folded Reload + movq %rsi, %r11 + sbbq -48(%rsp), %r11 # 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %rsi, %r11 + testb %dil, %dil + cmovneq %rdx, %rax + movq -80(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rbx, %rcx + movq %rcx, 8(%rdx) + cmovneq %rbp, %r8 + movq %r8, 16(%rdx) + cmovneq %r9, %r10 + movq %r10, 24(%rdx) + movq %r11, 32(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end70: + .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L + + .globl mcl_fp_addPre5L + .align 16, 0x90 + .type mcl_fp_addPre5L,@function +mcl_fp_addPre5L: # @mcl_fp_addPre5L +# BB#0: + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq 16(%rdx), %rcx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rcx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rcx, 16(%rdi) + adcq %r9, %r11 + movq %r11, 24(%rdi) + adcq %r8, %r10 + movq %r10, 32(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq +.Lfunc_end71: + .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L + + .globl mcl_fp_subPre5L + .align 16, 0x90 + .type mcl_fp_subPre5L,@function +mcl_fp_subPre5L: # @mcl_fp_subPre5L +# BB#0: + pushq %rbx + movq 32(%rsi), %r10 + movq 24(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r8, %r11 + movq %r11, 24(%rdi) + sbbq %r9, %r10 + movq %r10, 32(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + retq +.Lfunc_end72: + .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L + + .globl mcl_fp_shr1_5L + .align 16, 0x90 + .type mcl_fp_shr1_5L,@function +mcl_fp_shr1_5L: # @mcl_fp_shr1_5L +# BB#0: + movq 32(%rsi), %r8 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r8, %rcx + movq %rcx, 24(%rdi) + shrq %r8 + movq %r8, 32(%rdi) + retq +.Lfunc_end73: + .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L + + .globl mcl_fp_add5L + .align 16, 0x90 + .type mcl_fp_add5L,@function +mcl_fp_add5L: # @mcl_fp_add5L +# BB#0: + pushq %rbx + movq 32(%rdx), %r11 + movq 24(%rdx), %rbx + movq 24(%rsi), %r9 + movq 32(%rsi), %r8 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %rbx, %r9 + movq %r9, 24(%rdi) + adcq %r11, %r8 + movq %r8, 32(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %r9 + sbbq 32(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB74_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %r9, 24(%rdi) + movq %r8, 32(%rdi) +.LBB74_2: # %carry + popq %rbx + retq +.Lfunc_end74: + .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L + + .globl mcl_fp_addNF5L + .align 16, 0x90 + .type mcl_fp_addNF5L,@function +mcl_fp_addNF5L: # @mcl_fp_addNF5L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %r11 + addq (%rsi), %r14 + adcq 8(%rsi), %r11 + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r9 + adcq 32(%rsi), %r8 + movq %r14, %rsi + subq (%rcx), %rsi + movq %r11, %rdx + sbbq 8(%rcx), %rdx + movq %r10, %rbx + sbbq 16(%rcx), %rbx + movq %r9, %r15 + sbbq 24(%rcx), %r15 + movq %r8, %rax + sbbq 32(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r14, %rsi + movq %rsi, (%rdi) + cmovsq %r11, %rdx + movq %rdx, 8(%rdi) + cmovsq %r10, %rbx + movq %rbx, 16(%rdi) + cmovsq %r9, %r15 + movq %r15, 24(%rdi) + cmovsq %r8, %rax + movq %rax, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end75: + .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L + + .globl mcl_fp_sub5L + .align 16, 0x90 + .type mcl_fp_sub5L,@function +mcl_fp_sub5L: # @mcl_fp_sub5L +# BB#0: + pushq %r14 + pushq %rbx + movq 32(%rsi), %r8 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r11, %r9 + movq %r9, 24(%rdi) + sbbq %r14, %r8 + movq %r8, 32(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB76_2 +# BB#1: # %carry + movq 32(%rcx), %r11 + movq 24(%rcx), %r14 + movq 8(%rcx), %rdx + movq 16(%rcx), %rbx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r14 + movq %r14, 24(%rdi) + adcq %r8, %r11 + movq %r11, 32(%rdi) +.LBB76_2: # %nocarry + popq %rbx + popq %r14 + retq +.Lfunc_end76: + .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L + + .globl mcl_fp_subNF5L + .align 16, 0x90 + .type mcl_fp_subNF5L,@function +mcl_fp_subNF5L: # @mcl_fp_subNF5L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rsi), %r14 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %r10 + movq 8(%rsi), %r11 + subq (%rdx), %r10 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r9 + sbbq 24(%rdx), %r8 + sbbq 32(%rdx), %r14 + movq %r14, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r14, %rsi + movq 8(%rcx), %rbx + andq %rsi, %rbx + andq (%rcx), %rsi + movq 32(%rcx), %r15 + andq %rdx, %r15 + movq 24(%rcx), %rax + andq %rdx, %rax + rolq %rdx + andq 16(%rcx), %rdx + addq %r10, %rsi + movq %rsi, (%rdi) + adcq %r11, %rbx + movq %rbx, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %rax + movq %rax, 24(%rdi) + adcq %r14, %r15 + movq %r15, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end77: + .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L + + .globl mcl_fpDbl_add5L + .align 16, 0x90 + .type mcl_fpDbl_add5L,@function +mcl_fpDbl_add5L: # @mcl_fpDbl_add5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 64(%rdx), %r11 + movq 56(%rdx), %r14 + movq 48(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 40(%rdx), %r9 + movq %rbx, (%rdi) + movq 72(%rsi), %r8 + movq %rax, 8(%rdi) + movq 64(%rsi), %r10 + movq %r12, 16(%rdi) + movq 56(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 48(%rsi), %rbp + movq 40(%rsi), %rbx + movq %r13, 32(%rdi) + adcq %r9, %rbx + adcq %r15, %rbp + adcq %r14, %r12 + adcq %r11, %r10 + adcq -8(%rsp), %r8 # 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + movq %rbx, %rax + subq (%rcx), %rax + movq %rbp, %rdx + sbbq 8(%rcx), %rdx + movq %r12, %r9 + sbbq 16(%rcx), %r9 + movq %r10, %r11 + sbbq 24(%rcx), %r11 + movq %r8, %r14 + sbbq 32(%rcx), %r14 + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rax + movq %rax, 40(%rdi) + testb %sil, %sil + cmovneq %rbp, %rdx + movq %rdx, 48(%rdi) + cmovneq %r12, %r9 + movq %r9, 56(%rdi) + cmovneq %r10, %r11 + movq %r11, 64(%rdi) + cmovneq %r8, %r14 + movq %r14, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end78: + .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L + + .globl mcl_fpDbl_sub5L + .align 16, 0x90 + .type mcl_fpDbl_sub5L,@function +mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %r9 + movq 64(%rdx), %r10 + movq 56(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %r12 + sbbq 24(%rdx), %r12 + movq %r15, (%rdi) + movq 32(%rsi), %rbx + sbbq 32(%rdx), %rbx + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 72(%rsi), %r8 + movq %r12, 24(%rdi) + movq 64(%rsi), %r11 + movq %rbx, 32(%rdi) + movq 40(%rsi), %rbp + sbbq %rdx, %rbp + movq 56(%rsi), %r12 + movq 48(%rsi), %r13 + sbbq %r15, %r13 + sbbq %r14, %r12 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 8(%rcx), %rbx + cmoveq %rax, %rbx + movq 32(%rcx), %r9 + cmoveq %rax, %r9 + cmovneq 24(%rcx), %rax + addq %rbp, %rsi + movq %rsi, 40(%rdi) + adcq %r13, %rbx + movq %rbx, 48(%rdi) + adcq %r12, %rdx + movq %rdx, 56(%rdi) + adcq %r11, %rax + movq %rax, 64(%rdi) + adcq %r8, %r9 + movq %r9, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end79: + .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L + + .globl mcl_fp_mulUnitPre6L + .align 16, 0x90 + .type mcl_fp_mulUnitPre6L,@function +mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r9 + movq %rax, %r8 + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r13 + movq %rax, %r12 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %rbp + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %rbp, %rdx + movq %rdx, 8(%rdi) + adcq %r12, %rbx + movq %rbx, 16(%rdi) + adcq %r14, %r13 + movq %r13, 24(%rdi) + adcq %r11, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r10 + movq %r10, 40(%rdi) + adcq $0, %r9 + movq %r9, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end80: + .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L + + .globl mcl_fpDbl_mulPre6L + .align 16, 0x90 + .type mcl_fpDbl_mulPre6L,@function +mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rsi, %r8 + movq %rdi, -8(%rsp) # 8-byte Spill + movq (%r8), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 8(%r8), %r13 + movq %r13, -72(%rsp) # 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq 16(%r8), %rbp + movq %rbp, -64(%rsp) # 8-byte Spill + movq 24(%r8), %rsi + movq %rsi, -48(%rsp) # 8-byte Spill + movq 32(%r8), %r10 + movq 40(%r8), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rbx + movq %rdx, %rcx + movq %rax, -40(%rsp) # 8-byte Spill + movq %r10, %rax + mulq %rbx + movq %rdx, %r12 + movq %rax, %rdi + movq %rsi, %rax + mulq %rbx + movq %rdx, %r9 + movq %rax, %r14 + movq %rbp, %rax + mulq %rbx + movq %rdx, %rbp + movq %rax, %r15 + movq %r13, %rax + mulq %rbx + movq %rdx, %r13 + movq %rax, %rsi + addq -32(%rsp), %rsi # 8-byte Folded Reload + adcq %r15, %r13 + adcq %r14, %rbp + adcq %rdi, %r9 + adcq -40(%rsp), %r12 # 8-byte Folded Reload + movq %r12, %rdi + adcq $0, %rcx + movq %rcx, -56(%rsp) # 8-byte Spill + movq -16(%rsp), %r15 # 8-byte Reload + movq 8(%r15), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %r10, %rax + mulq %rcx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r12 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r14 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %rbx + movq -72(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r10 + movq -24(%rsp), %rax # 8-byte Reload + mulq %rcx + addq %rsi, %rax + movq -8(%rsp), %rcx # 8-byte Reload + movq %rax, 8(%rcx) + adcq %r13, %r10 + adcq %rbp, %rbx + adcq %r9, %r14 + adcq %rdi, %r12 + adcq -56(%rsp), %r11 # 8-byte Folded Reload + sbbq %rdi, %rdi + andl $1, %edi + addq %rdx, %r10 + adcq -72(%rsp), %rbx # 8-byte Folded Reload + adcq -64(%rsp), %r14 # 8-byte Folded Reload + adcq -48(%rsp), %r12 # 8-byte Folded Reload + adcq -40(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -96(%rsp) # 8-byte Spill + adcq -32(%rsp), %rdi # 8-byte Folded Reload + movq 40(%r8), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 16(%r15), %rcx + mulq %rcx + movq %rax, -40(%rsp) # 8-byte Spill + movq %rdx, -32(%rsp) # 8-byte Spill + movq 32(%r8), %rax + movq %rax, -48(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r15 + movq %rdx, -88(%rsp) # 8-byte Spill + movq 24(%r8), %rax + movq %rax, -56(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r11 + movq %rdx, -104(%rsp) # 8-byte Spill + movq 16(%r8), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %rbp + movq %rdx, -112(%rsp) # 8-byte Spill + movq (%r8), %rsi + movq %rsi, -72(%rsp) # 8-byte Spill + movq 8(%r8), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulq %rcx + movq %rdx, %r13 + movq %rax, %r9 + movq %rsi, %rax + mulq %rcx + addq %r10, %rax + movq -8(%rsp), %r10 # 8-byte Reload + movq %rax, 16(%r10) + adcq %rbx, %r9 + adcq %r14, %rbp + adcq %r12, %r11 + adcq -96(%rsp), %r15 # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + adcq %rdi, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %r9 + adcq %r13, %rbp + adcq -112(%rsp), %r11 # 8-byte Folded Reload + adcq -104(%rsp), %r15 # 8-byte Folded Reload + adcq -88(%rsp), %rax # 8-byte Folded Reload + movq %rax, -40(%rsp) # 8-byte Spill + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq -16(%rsp), %rdi # 8-byte Reload + movq 24(%rdi), %rbx + movq -24(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -32(%rsp) # 8-byte Spill + movq -48(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -24(%rsp) # 8-byte Spill + movq -56(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r14 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %r12 + movq -80(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, %rsi + movq %rax, %r13 + movq -72(%rsp), %rax # 8-byte Reload + mulq %rbx + addq %r9, %rax + movq %rax, 24(%r10) + adcq %rbp, %r13 + adcq %r11, %r12 + adcq %r15, %r14 + movq -24(%rsp), %rbp # 8-byte Reload + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq -32(%rsp), %rax # 8-byte Reload + adcq %rcx, %rax + sbbq %r10, %r10 + andl $1, %r10d + addq %rdx, %r13 + adcq %rsi, %r12 + adcq -64(%rsp), %r14 # 8-byte Folded Reload + adcq -56(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, -24(%rsp) # 8-byte Spill + adcq -48(%rsp), %rax # 8-byte Folded Reload + movq %rax, -32(%rsp) # 8-byte Spill + adcq -88(%rsp), %r10 # 8-byte Folded Reload + movq 40(%r8), %rax + movq %rax, -72(%rsp) # 8-byte Spill + movq 32(%rdi), %rcx + movq 32(%r8), %rbx + movq %rbx, -112(%rsp) # 8-byte Spill + movq 24(%r8), %rsi + movq %rsi, -64(%rsp) # 8-byte Spill + movq 16(%r8), %rdi + movq %rdi, -104(%rsp) # 8-byte Spill + movq (%r8), %r15 + movq 8(%r8), %r9 + mulq %rcx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %rbx, %rax + mulq %rcx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rsi, %rax + mulq %rcx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %rbx + movq %rdi, %rax + mulq %rcx + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %r9, %rax + mulq %rcx + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r15, %rax + mulq %rcx + movq %rdx, -96(%rsp) # 8-byte Spill + addq %r13, %rax + movq -8(%rsp), %r13 # 8-byte Reload + movq %rax, 32(%r13) + adcq %r12, %rbp + adcq %r14, %rdi + adcq -24(%rsp), %rbx # 8-byte Folded Reload + adcq -32(%rsp), %r8 # 8-byte Folded Reload + adcq %r10, %r11 + movq -16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rcx + sbbq %rsi, %rsi + movq -72(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq -112(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %r9, %rax + mulq %rcx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %r15, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %r9 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -104(%rsp), %rax # 8-byte Reload + mulq %rcx + andl $1, %esi + addq -96(%rsp), %rbp # 8-byte Folded Reload + adcq -88(%rsp), %rdi # 8-byte Folded Reload + adcq -80(%rsp), %rbx # 8-byte Folded Reload + adcq -56(%rsp), %r8 # 8-byte Folded Reload + adcq -48(%rsp), %r11 # 8-byte Folded Reload + adcq -40(%rsp), %rsi # 8-byte Folded Reload + addq %r9, %rbp + movq %rbp, 40(%r13) + adcq %r10, %rdi + adcq %rax, %rbx + adcq %r15, %r8 + adcq %r14, %r11 + adcq -72(%rsp), %rsi # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + addq %r12, %rdi + movq %rdi, 48(%r13) + adcq -32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 56(%r13) + adcq %rdx, %r8 + movq %r8, 64(%r13) + adcq -64(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 72(%r13) + adcq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 80(%r13) + adcq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 88(%r13) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end81: + .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L + + .globl mcl_fpDbl_sqrPre6L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre6L,@function +mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %r8 + movq %r8, -56(%rsp) # 8-byte Spill + movq 24(%rsi), %r10 + movq %r10, -40(%rsp) # 8-byte Spill + movq 32(%rsi), %r9 + movq %r9, -32(%rsp) # 8-byte Spill + movq 40(%rsi), %r11 + movq (%rsi), %rcx + movq 8(%rsi), %rbx + movq %rcx, %rax + mulq %rcx + movq %rdx, %rbp + movq %rax, (%rdi) + movq %r11, %rax + mulq %rcx + movq %rdx, %rdi + movq %rax, -16(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %r9 + movq %r10, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %r10 + movq %r8, %rax + mulq %rcx + movq %rdx, %r13 + movq %rax, %r15 + movq %rbx, %rax + mulq %rcx + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r8 + addq %r8, %rbp + adcq %rdx, %r15 + adcq %r10, %r13 + adcq %r9, %r12 + adcq -16(%rsp), %r14 # 8-byte Folded Reload + adcq $0, %rdi + movq %rdi, -48(%rsp) # 8-byte Spill + movq %r11, %rax + mulq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %rcx + movq -32(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r9 + movq -40(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r10 + movq -56(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, %rdi + movq %rax, %r11 + movq %rbx, %rax + mulq %rbx + movq %rax, %rbx + addq %r8, %rbp + movq -8(%rsp), %rax # 8-byte Reload + movq %rbp, 8(%rax) + adcq %r15, %rbx + adcq %r13, %r11 + adcq %r12, %r10 + adcq %r14, %r9 + movq %rcx, %rax + adcq -48(%rsp), %rax # 8-byte Folded Reload + sbbq %rcx, %rcx + andl $1, %ecx + addq -24(%rsp), %rbx # 8-byte Folded Reload + adcq %rdx, %r11 + adcq %rdi, %r10 + adcq -40(%rsp), %r9 # 8-byte Folded Reload + adcq -32(%rsp), %rax # 8-byte Folded Reload + movq %rax, -72(%rsp) # 8-byte Spill + adcq -16(%rsp), %rcx # 8-byte Folded Reload + movq 40(%rsi), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 16(%rsi), %rdi + mulq %rdi + movq %rax, -80(%rsp) # 8-byte Spill + movq %rdx, -40(%rsp) # 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -32(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r12 + movq %rdx, -56(%rsp) # 8-byte Spill + movq 24(%rsi), %rbp + movq %rbp, %rax + mulq %rdi + movq %rax, %r14 + movq %r14, -96(%rsp) # 8-byte Spill + movq %rdx, -24(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq %r15, -48(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %r15, %rax + mulq %rdi + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rdi, %rax + mulq %rdi + movq %rax, %r13 + addq %rbx, %r15 + movq -8(%rsp), %rbx # 8-byte Reload + movq %r15, 16(%rbx) + adcq %r11, %r8 + adcq %r10, %r13 + adcq %r14, %r9 + adcq -72(%rsp), %r12 # 8-byte Folded Reload + movq -80(%rsp), %r14 # 8-byte Reload + adcq %rcx, %r14 + sbbq %rcx, %rcx + andl $1, %ecx + addq -104(%rsp), %r8 # 8-byte Folded Reload + adcq -88(%rsp), %r13 # 8-byte Folded Reload + adcq %rdx, %r9 + adcq -24(%rsp), %r12 # 8-byte Folded Reload + adcq -56(%rsp), %r14 # 8-byte Folded Reload + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq -16(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -16(%rsp) # 8-byte Spill + movq -32(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r10 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %r11 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rbp, %rax + mulq %rbp + movq %rax, %r15 + movq %rdx, -104(%rsp) # 8-byte Spill + addq %r8, %rdi + movq %rdi, 24(%rbx) + adcq %r13, %r11 + adcq -96(%rsp), %r9 # 8-byte Folded Reload + adcq %r12, %r15 + adcq %r14, %r10 + movq -16(%rsp), %r12 # 8-byte Reload + adcq %rcx, %r12 + sbbq %rcx, %rcx + movq (%rsi), %r8 + andl $1, %ecx + movq 8(%rsi), %rbx + movq 40(%rsi), %rdi + movq %rbx, %rax + mulq %rdi + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, -48(%rsp) # 8-byte Spill + movq %r8, %rax + mulq %rdi + movq %rax, -64(%rsp) # 8-byte Spill + movq %rdx, -40(%rsp) # 8-byte Spill + movq 32(%rsi), %rbp + movq %rbx, %rax + mulq %rbp + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %r8, %rax + mulq %rbp + movq %rax, %r14 + movq %rdx, -112(%rsp) # 8-byte Spill + addq -88(%rsp), %r11 # 8-byte Folded Reload + adcq -80(%rsp), %r9 # 8-byte Folded Reload + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq -72(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -16(%rsp) # 8-byte Spill + adcq -56(%rsp), %rcx # 8-byte Folded Reload + movq 24(%rsi), %rbx + movq 16(%rsi), %r8 + movq %rbx, %rax + mulq %rdi + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq %rbp + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r8, %rax + mulq %rdi + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %r8, %rax + mulq %rbp + movq %rdx, %rbx + movq %rax, %r13 + movq %rdi, %rax + mulq %rbp + movq %rdx, %r12 + movq %rax, %r8 + movq %rdi, %rax + mulq %rdi + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rbp, %rax + mulq %rbp + addq %r14, %r11 + movq -8(%rsp), %r14 # 8-byte Reload + movq %r11, 32(%r14) + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq %r15, %r13 + adcq %r10, %rsi + adcq -16(%rsp), %rax # 8-byte Folded Reload + adcq %r8, %rcx + sbbq %rbp, %rbp + andl $1, %ebp + addq -112(%rsp), %r9 # 8-byte Folded Reload + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq %rbx, %rsi + adcq -104(%rsp), %rax # 8-byte Folded Reload + adcq %rdx, %rcx + adcq %r12, %rbp + addq -64(%rsp), %r9 # 8-byte Folded Reload + movq %r14, %rbx + movq %r9, 40(%rbx) + adcq -48(%rsp), %r13 # 8-byte Folded Reload + adcq -88(%rsp), %rsi # 8-byte Folded Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + adcq %r8, %rcx + adcq %rdi, %rbp + sbbq %rdx, %rdx + andl $1, %edx + addq -40(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 48(%rbx) + adcq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 56(%rbx) + adcq -72(%rsp), %rax # 8-byte Folded Reload + movq %rax, 64(%rbx) + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 72(%rbx) + adcq %r12, %rbp + movq %rbp, 80(%rbx) + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 88(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L + + .globl mcl_fp_mont6L + .align 16, 0x90 + .type mcl_fp_mont6L,@function +mcl_fp_mont6L: # @mcl_fp_mont6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $56, %rsp + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rdi, -96(%rsp) # 8-byte Spill + movq 40(%rsi), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rax, %r8 + movq %rdx, %r14 + movq 32(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, %r9 + movq %rdx, %r15 + movq 24(%rsi), %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq 16(%rsi), %rbp + movq %rbp, -48(%rsp) # 8-byte Spill + movq (%rsi), %r12 + movq %r12, -32(%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -40(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, %rdi + movq %rax, %r10 + movq %rbp, %rax + mulq %rbx + movq %rdx, %rbp + movq %rax, %r11 + movq %rsi, %rax + mulq %rbx + movq %rdx, %rsi + movq %rax, %r13 + movq %r12, %rax + mulq %rbx + movq %rax, -120(%rsp) # 8-byte Spill + addq %r13, %rdx + movq %rdx, -112(%rsp) # 8-byte Spill + adcq %r11, %rsi + movq %rsi, -104(%rsp) # 8-byte Spill + adcq %r10, %rbp + movq %rbp, -88(%rsp) # 8-byte Spill + adcq %r9, %rdi + movq %rdi, -80(%rsp) # 8-byte Spill + adcq %r8, %r15 + movq %r15, -72(%rsp) # 8-byte Spill + adcq $0, %r14 + movq %r14, -64(%rsp) # 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %rdi + imulq %rdx, %rdi + movq (%rcx), %r9 + movq %r9, 8(%rsp) # 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, 48(%rsp) # 8-byte Spill + movq 32(%rcx), %rbp + movq %rbp, 32(%rsp) # 8-byte Spill + movq 24(%rcx), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 16(%rcx), %rsi + movq %rsi, 16(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 24(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rdx, %r11 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %rbp + movq %rdx, %r13 + movq %rax, %r10 + movq %rdi, %rax + mulq %rbx + movq %rdx, %rbp + movq %rax, %r14 + movq %rdi, %rax + mulq %rsi + movq %rdx, %rbx + movq %rax, %r12 + movq %rdi, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %r15 + movq %rdi, %rax + mulq %r9 + movq %rdx, %r9 + addq %r15, %r9 + adcq %r12, %r8 + adcq %r14, %rbx + adcq %r10, %rbp + adcq -128(%rsp), %r13 # 8-byte Folded Reload + adcq $0, %r11 + addq -120(%rsp), %rax # 8-byte Folded Reload + adcq -112(%rsp), %r9 # 8-byte Folded Reload + adcq -104(%rsp), %r8 # 8-byte Folded Reload + adcq -88(%rsp), %rbx # 8-byte Folded Reload + adcq -80(%rsp), %rbp # 8-byte Folded Reload + adcq -72(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %r11 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rax, -80(%rsp) # 8-byte Spill + movq -8(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rcx + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %rdi + addq %r10, %rdi + adcq %rcx, %rsi + adcq -112(%rsp), %r15 # 8-byte Folded Reload + adcq -104(%rsp), %r14 # 8-byte Folded Reload + movq -72(%rsp), %rcx # 8-byte Reload + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r9, %r12 + adcq %r8, %rdi + adcq %rbx, %rsi + adcq %rbp, %r15 + adcq %r13, %r14 + adcq %r11, %rcx + movq %rcx, -72(%rsp) # 8-byte Spill + adcq -80(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r12, %rbx + imulq (%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r13 + movq %rbx, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r11 + movq %rbx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + addq %r11, %r9 + adcq %r13, %rbp + adcq -120(%rsp), %rcx # 8-byte Folded Reload + adcq -112(%rsp), %r10 # 8-byte Folded Reload + adcq -104(%rsp), %r8 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r12, %rax + adcq %rdi, %r9 + adcq %rsi, %rbp + adcq %r15, %rcx + adcq %r14, %r10 + adcq -72(%rsp), %r8 # 8-byte Folded Reload + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq $0, -88(%rsp) # 8-byte Folded Spill + movq -8(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rbx + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r11 + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r15 + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rdi + addq %r15, %rdi + adcq %r11, %rsi + adcq %rbx, %r12 + adcq -112(%rsp), %r14 # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r9, %r13 + adcq %rbp, %rdi + adcq %rcx, %rsi + adcq %r10, %r12 + adcq %r8, %r14 + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq -88(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r13, %rbp + imulq (%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rbp, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r8 + movq %rbp, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + addq %r8, %r9 + adcq %r10, %rcx + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r13, %rax + adcq %rdi, %r9 + adcq %rsi, %rcx + adcq %r12, %rbx + adcq %r14, %r15 + adcq -72(%rsp), %r11 # 8-byte Folded Reload + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq -88(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + movq -8(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdi + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r8 + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %rdi + addq %r13, %rdi + adcq %r8, %rsi + adcq -112(%rsp), %r10 # 8-byte Folded Reload + adcq -104(%rsp), %r14 # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r9, %r12 + adcq %rcx, %rdi + adcq %rbx, %rsi + adcq %r15, %r10 + adcq %r11, %r14 + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq %rbp, %rax + movq %rax, -64(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r12, %rbp + imulq (%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r13 + movq %rbp, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r9 + movq %rbp, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + addq %r9, %r8 + adcq %r13, %rcx + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r12, %rax + adcq %rdi, %r8 + adcq %rsi, %rcx + adcq %r10, %rbx + adcq %r14, %r15 + adcq -72(%rsp), %r11 # 8-byte Folded Reload + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq $0, -88(%rsp) # 8-byte Folded Spill + movq -8(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rsi + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r10 + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %r13 + addq %r10, %r13 + adcq %r12, %r14 + adcq -120(%rsp), %rdi # 8-byte Folded Reload + adcq -112(%rsp), %rbp # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r8, %r9 + adcq %rcx, %r13 + adcq %rbx, %r14 + adcq %r15, %rdi + adcq %r11, %rbp + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq -88(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r9, %rsi + imulq (%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rbx + movq %rsi, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rcx + movq %rsi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + addq %rcx, %r8 + adcq %rbx, %r12 + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r9, %rax + adcq %r13, %r8 + adcq %r14, %r12 + adcq %rdi, %r15 + adcq %rbp, %r11 + adcq -72(%rsp), %r10 # 8-byte Folded Reload + movq %r10, -72(%rsp) # 8-byte Spill + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq -88(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq -8(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, -24(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rsi + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rdi + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rbp + addq %rdi, %rbp + adcq %rsi, %r14 + adcq %r9, %r10 + movq -8(%rsp), %rax # 8-byte Reload + adcq -24(%rsp), %rax # 8-byte Folded Reload + movq -16(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + addq %r8, %r13 + movq %r13, -40(%rsp) # 8-byte Spill + adcq %r12, %rbp + adcq %r15, %r14 + movq %r14, -24(%rsp) # 8-byte Spill + adcq %r11, %r10 + movq %r10, -32(%rsp) # 8-byte Spill + adcq -72(%rsp), %rax # 8-byte Folded Reload + movq %rax, -8(%rsp) # 8-byte Spill + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -16(%rsp) # 8-byte Spill + adcq %rbx, %rsi + movq %rsi, -64(%rsp) # 8-byte Spill + sbbq %rcx, %rcx + movq (%rsp), %r9 # 8-byte Reload + imulq %r13, %r9 + andl $1, %ecx + movq %r9, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, (%rsp) # 8-byte Spill + movq %r9, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -48(%rsp) # 8-byte Spill + movq %r9, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, -56(%rsp) # 8-byte Spill + movq %r9, %rax + movq 8(%rsp), %r13 # 8-byte Reload + mulq %r13 + movq %rdx, %r15 + movq %rax, %r12 + movq %r9, %rax + movq 16(%rsp), %r14 # 8-byte Reload + mulq %r14 + movq %rdx, %rsi + movq %rax, %r11 + movq %r9, %rax + movq 24(%rsp), %r10 # 8-byte Reload + mulq %r10 + addq %r15, %rax + adcq %r11, %rdx + adcq -56(%rsp), %rsi # 8-byte Folded Reload + adcq -48(%rsp), %rdi # 8-byte Folded Reload + adcq (%rsp), %rbx # 8-byte Folded Reload + adcq $0, %r8 + addq -40(%rsp), %r12 # 8-byte Folded Reload + adcq %rbp, %rax + adcq -24(%rsp), %rdx # 8-byte Folded Reload + adcq -32(%rsp), %rsi # 8-byte Folded Reload + adcq -8(%rsp), %rdi # 8-byte Folded Reload + adcq -16(%rsp), %rbx # 8-byte Folded Reload + adcq -64(%rsp), %r8 # 8-byte Folded Reload + adcq $0, %rcx + movq %rax, %rbp + subq %r13, %rbp + movq %rdx, %r9 + sbbq %r10, %r9 + movq %rsi, %r10 + sbbq %r14, %r10 + movq %rdi, %r11 + sbbq 40(%rsp), %r11 # 8-byte Folded Reload + movq %rbx, %r14 + sbbq 32(%rsp), %r14 # 8-byte Folded Reload + movq %r8, %r15 + sbbq 48(%rsp), %r15 # 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rdi, %r11 + testb %cl, %cl + cmovneq %rax, %rbp + movq -96(%rsp), %rax # 8-byte Reload + movq %rbp, (%rax) + cmovneq %rdx, %r9 + movq %r9, 8(%rax) + cmovneq %rsi, %r10 + movq %r10, 16(%rax) + movq %r11, 24(%rax) + cmovneq %rbx, %r14 + movq %r14, 32(%rax) + cmovneq %r8, %r15 + movq %r15, 40(%rax) + addq $56, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end83: + .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L + + .globl mcl_fp_montNF6L + .align 16, 0x90 + .type mcl_fp_montNF6L,@function +mcl_fp_montNF6L: # @mcl_fp_montNF6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $40, %rsp + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rdi, -88(%rsp) # 8-byte Spill + movq 40(%rsi), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rax, 32(%rsp) # 8-byte Spill + movq %rdx, %r13 + movq 32(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, %r10 + movq %rdx, %r9 + movq 24(%rsi), %rax + movq %rax, -72(%rsp) # 8-byte Spill + movq 16(%rsi), %rbp + movq %rbp, -64(%rsp) # 8-byte Spill + movq (%rsi), %rdi + movq %rdi, -48(%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -56(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, %r11 + movq %rax, %r8 + movq %rbp, %rax + mulq %rbx + movq %rdx, %r14 + movq %rax, %r15 + movq %rsi, %rax + mulq %rbx + movq %rdx, %r12 + movq %rax, %rbp + movq %rdi, %rax + mulq %rbx + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdx, %rbx + addq %rbp, %rbx + adcq %r15, %r12 + adcq %r8, %r14 + adcq %r10, %r11 + adcq 32(%rsp), %r9 # 8-byte Folded Reload + movq %r9, -96(%rsp) # 8-byte Spill + adcq $0, %r13 + movq %r13, -80(%rsp) # 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %r9 + imulq %rdx, %r9 + movq (%rcx), %r8 + movq %r8, 8(%rsp) # 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, 32(%rsp) # 8-byte Spill + movq 32(%rcx), %rsi + movq %rsi, 24(%rsp) # 8-byte Spill + movq 24(%rcx), %rbp + movq %rbp, 16(%rsp) # 8-byte Spill + movq 16(%rcx), %rdi + movq %rdi, -40(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -32(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %rdx + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %r9, %rax + mulq %rsi + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %r9, %rax + mulq %rbp + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r9, %rax + mulq %rdi + movq %rdx, %r13 + movq %rax, %rdi + movq %r9, %rax + mulq %rcx + movq %rdx, %rcx + movq %rax, %rbp + movq %r9, %rax + mulq %r8 + addq -104(%rsp), %rax # 8-byte Folded Reload + adcq %rbx, %rbp + adcq %r12, %rdi + adcq %r14, %rsi + adcq %r11, %r15 + adcq -96(%rsp), %r10 # 8-byte Folded Reload + movq -80(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdx, %rbp + adcq %rcx, %rdi + adcq %r13, %rsi + adcq -128(%rsp), %r15 # 8-byte Folded Reload + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -112(%rsp), %rax # 8-byte Folded Reload + movq %rax, -80(%rsp) # 8-byte Spill + movq -8(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rcx + addq %r8, %rcx + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + adcq -96(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r14 + addq %rbp, %r13 + adcq %rdi, %rcx + adcq %rsi, %rbx + adcq %r15, %r12 + adcq %r10, %r11 + adcq -80(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r14 + movq %r13, %rsi + imulq (%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rsi, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rsi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rsi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + addq %r13, %rax + adcq %rcx, %r15 + adcq %rbx, %r10 + adcq %r12, %r8 + adcq %r11, %rbp + adcq %r9, %rdi + adcq $0, %r14 + addq %rdx, %r15 + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -112(%rsp), %r8 # 8-byte Folded Reload + adcq -104(%rsp), %rbp # 8-byte Folded Reload + adcq -80(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -80(%rsp) # 8-byte Spill + adcq -96(%rsp), %r14 # 8-byte Folded Reload + movq -8(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rsi + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %rdi + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rsi + addq %rdi, %rsi + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %rcx # 8-byte Folded Reload + adcq -96(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r13 + addq %r15, %r9 + adcq %r10, %rsi + adcq %r8, %rbx + adcq %rbp, %r12 + adcq -80(%rsp), %rcx # 8-byte Folded Reload + adcq %r14, %r11 + adcq $0, %r13 + movq %r9, %r8 + imulq (%rsp), %r8 # 8-byte Folded Reload + movq %r8, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %r8, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %r8, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %r8, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %r8, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %rdi + movq %r8, %rax + mulq 8(%rsp) # 8-byte Folded Reload + addq %r9, %rax + adcq %rsi, %rdi + adcq %rbx, %r14 + adcq %r12, %r10 + adcq %rcx, %r15 + movq -80(%rsp), %rax # 8-byte Reload + adcq %r11, %rax + adcq $0, %r13 + addq %rdx, %rdi + adcq %rbp, %r14 + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -96(%rsp), %r15 # 8-byte Folded Reload + movq %r15, -96(%rsp) # 8-byte Spill + adcq -112(%rsp), %rax # 8-byte Folded Reload + movq %rax, -80(%rsp) # 8-byte Spill + adcq -104(%rsp), %r13 # 8-byte Folded Reload + movq -8(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rbp + movq %rbp, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r12 + movq %rbp, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rbp, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rbp + addq %r9, %rbp + adcq %r12, %rbx + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -112(%rsp), %rcx # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + adcq $0, %r15 + addq %rdi, %r8 + adcq %r14, %rbp + adcq %r10, %rbx + adcq -96(%rsp), %rsi # 8-byte Folded Reload + adcq -80(%rsp), %rcx # 8-byte Folded Reload + adcq %r13, %r11 + adcq $0, %r15 + movq %r8, %r14 + imulq (%rsp), %r14 # 8-byte Folded Reload + movq %r14, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %r14, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %r14, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %r14, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %r14, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %r14, %rax + mulq 8(%rsp) # 8-byte Folded Reload + addq %r8, %rax + adcq %rbp, %rdi + adcq %rbx, %r12 + adcq %rsi, %r10 + adcq %rcx, %r13 + adcq %r11, %r9 + adcq $0, %r15 + addq %rdx, %rdi + adcq -120(%rsp), %r12 # 8-byte Folded Reload + adcq -112(%rsp), %r10 # 8-byte Folded Reload + adcq -96(%rsp), %r13 # 8-byte Folded Reload + movq %r13, -96(%rsp) # 8-byte Spill + adcq -80(%rsp), %r9 # 8-byte Folded Reload + movq %r9, -80(%rsp) # 8-byte Spill + adcq -104(%rsp), %r15 # 8-byte Folded Reload + movq -8(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -128(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r13 + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %rbp + addq %r13, %rbp + adcq -128(%rsp), %rbx # 8-byte Folded Reload + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -112(%rsp), %r8 # 8-byte Folded Reload + adcq -104(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r14 + addq %rdi, %r11 + adcq %r12, %rbp + adcq %r10, %rbx + adcq -96(%rsp), %rsi # 8-byte Folded Reload + adcq -80(%rsp), %r8 # 8-byte Folded Reload + adcq %r15, %r9 + adcq $0, %r14 + movq %r11, %rcx + imulq (%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %rcx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rdi + movq %rcx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + addq %r11, %rax + adcq %rbp, %rdi + adcq %rbx, %r15 + adcq %rsi, %r10 + adcq %r8, %r12 + movq -80(%rsp), %rcx # 8-byte Reload + adcq %r9, %rcx + adcq $0, %r14 + addq %rdx, %rdi + adcq %r13, %r15 + adcq -104(%rsp), %r10 # 8-byte Folded Reload + movq %r10, -104(%rsp) # 8-byte Spill + adcq -96(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -96(%rsp) # 8-byte Spill + adcq -120(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -80(%rsp) # 8-byte Spill + adcq -112(%rsp), %r14 # 8-byte Folded Reload + movq -8(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -8(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -16(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -24(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -64(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbp + movq %rcx, %rax + mulq -56(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rsi + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %r8 + addq %rsi, %r8 + adcq %rbp, %r10 + adcq -24(%rsp), %r13 # 8-byte Folded Reload + adcq -16(%rsp), %r12 # 8-byte Folded Reload + adcq -8(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %rbx + addq %rdi, %r11 + adcq %r15, %r8 + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq -80(%rsp), %r12 # 8-byte Folded Reload + adcq %r14, %r9 + movq %r9, -16(%rsp) # 8-byte Spill + adcq $0, %rbx + movq (%rsp), %r9 # 8-byte Reload + imulq %r11, %r9 + movq %r9, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r9, %rax + mulq 24(%rsp) # 8-byte Folded Reload + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %r9, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r9, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %r9, %rax + movq -40(%rsp), %r15 # 8-byte Reload + mulq %r15 + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %rcx + movq %r9, %rax + movq -32(%rsp), %r9 # 8-byte Reload + mulq %r9 + addq %r11, %r14 + adcq %r8, %rax + adcq %r10, %rcx + adcq %r13, %rbp + adcq %r12, %rdi + adcq -16(%rsp), %rsi # 8-byte Folded Reload + adcq $0, %rbx + addq -48(%rsp), %rax # 8-byte Folded Reload + adcq %rdx, %rcx + adcq -56(%rsp), %rbp # 8-byte Folded Reload + adcq -24(%rsp), %rdi # 8-byte Folded Reload + adcq -8(%rsp), %rsi # 8-byte Folded Reload + adcq (%rsp), %rbx # 8-byte Folded Reload + movq %rax, %r14 + subq 8(%rsp), %r14 # 8-byte Folded Reload + movq %rcx, %r8 + sbbq %r9, %r8 + movq %rbp, %r9 + sbbq %r15, %r9 + movq %rdi, %r10 + sbbq 16(%rsp), %r10 # 8-byte Folded Reload + movq %rsi, %r11 + sbbq 24(%rsp), %r11 # 8-byte Folded Reload + movq %rbx, %r15 + sbbq 32(%rsp), %r15 # 8-byte Folded Reload + movq %r15, %rdx + sarq $63, %rdx + cmovsq %rax, %r14 + movq -88(%rsp), %rax # 8-byte Reload + movq %r14, (%rax) + cmovsq %rcx, %r8 + movq %r8, 8(%rax) + cmovsq %rbp, %r9 + movq %r9, 16(%rax) + cmovsq %rdi, %r10 + movq %r10, 24(%rax) + cmovsq %rsi, %r11 + movq %r11, 32(%rax) + cmovsq %rbx, %r15 + movq %r15, 40(%rax) + addq $40, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end84: + .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L + + .globl mcl_fp_montRed6L + .align 16, 0x90 + .type mcl_fp_montRed6L,@function +mcl_fp_montRed6L: # @mcl_fp_montRed6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $16, %rsp + movq %rdx, %rcx + movq %rdi, -104(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq (%rcx), %r11 + movq %r11, -24(%rsp) # 8-byte Spill + movq (%rsi), %r9 + movq %r9, %rbp + imulq %rax, %rbp + movq 40(%rcx), %rdx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r12 + movq %rdx, -72(%rsp) # 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, 8(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r15 + movq %rdx, %r8 + movq 24(%rcx), %rdx + movq %rdx, (%rsp) # 8-byte Spill + movq 16(%rcx), %rdi + movq %rdi, -8(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rdx, %r10 + movq %rax, %r14 + movq %rbp, %rax + mulq %rdi + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq %rcx + movq %rdx, %rcx + movq %rax, %rdi + movq %rbp, %rax + mulq %r11 + movq %rdx, %rbp + addq %rdi, %rbp + adcq %rbx, %rcx + adcq %r14, %r13 + adcq %r15, %r10 + adcq %r12, %r8 + movq -72(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r9, %rax + adcq 8(%rsi), %rbp + adcq 16(%rsi), %rcx + adcq 24(%rsi), %r13 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %r8 + movq %r8, -112(%rsp) # 8-byte Spill + adcq 48(%rsi), %rdx + movq %rdx, -72(%rsp) # 8-byte Spill + movq 88(%rsi), %rax + movq 80(%rsi), %rdx + movq 72(%rsi), %rdi + movq 64(%rsi), %rbx + movq 56(%rsi), %r15 + adcq $0, %r15 + adcq $0, %rbx + movq %rbx, -96(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -64(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -48(%rsp) # 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq %rbp, %rdi + imulq -32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -128(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r8 + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r11 + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + addq %r11, %rdi + adcq %r9, %rsi + adcq %r8, %rbx + adcq -128(%rsp), %r14 # 8-byte Folded Reload + movq -88(%rsp), %r8 # 8-byte Reload + adcq -120(%rsp), %r8 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rbp, %rax + adcq %rcx, %rdi + adcq %r13, %rsi + adcq %r10, %rbx + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -72(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -88(%rsp) # 8-byte Spill + adcq %r15, %rdx + movq %rdx, -80(%rsp) # 8-byte Spill + adcq $0, -96(%rsp) # 8-byte Folded Spill + adcq $0, -64(%rsp) # 8-byte Folded Spill + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -48(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rdi, %rcx + imulq -32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r8 + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r10 + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + addq %r10, %r9 + adcq %r8, %rbp + adcq -128(%rsp), %r13 # 8-byte Folded Reload + adcq -120(%rsp), %r11 # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + movq -72(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %rdi, %rax + adcq %rsi, %r9 + adcq %rbx, %rbp + adcq %r14, %r13 + adcq -88(%rsp), %r11 # 8-byte Folded Reload + adcq -80(%rsp), %r15 # 8-byte Folded Reload + adcq -96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -72(%rsp) # 8-byte Spill + adcq $0, -64(%rsp) # 8-byte Folded Spill + adcq $0, -56(%rsp) # 8-byte Folded Spill + adcq $0, -48(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %r9, %rsi + imulq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r10 + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %rbx + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + addq %rbx, %rdi + adcq %r10, %rcx + adcq -120(%rsp), %r8 # 8-byte Folded Reload + adcq -112(%rsp), %r14 # 8-byte Folded Reload + movq -88(%rsp), %rsi # 8-byte Reload + adcq -96(%rsp), %rsi # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r9, %rax + adcq %rbp, %rdi + adcq %r13, %rcx + adcq %r11, %r8 + adcq %r15, %r14 + adcq -72(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -88(%rsp) # 8-byte Spill + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq $0, -56(%rsp) # 8-byte Folded Spill + movq -48(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + adcq $0, %r12 + movq %rdi, %rsi + imulq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + movq -40(%rsp), %r11 # 8-byte Reload + mulq %r11 + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -48(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r15 + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rbx + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + addq %rbx, %rsi + adcq %r15, %r10 + adcq -112(%rsp), %r13 # 8-byte Folded Reload + adcq -96(%rsp), %r9 # 8-byte Folded Reload + movq -72(%rsp), %rbx # 8-byte Reload + adcq -48(%rsp), %rbx # 8-byte Folded Reload + movq -64(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %rdi, %rax + adcq %rcx, %rsi + adcq %r8, %r10 + adcq %r14, %r13 + adcq -88(%rsp), %r9 # 8-byte Folded Reload + adcq -80(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, -72(%rsp) # 8-byte Spill + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + adcq $0, %rbp + movq %rbp, -48(%rsp) # 8-byte Spill + adcq $0, %r12 + movq -32(%rsp), %r8 # 8-byte Reload + imulq %rsi, %r8 + movq %r8, %rax + mulq %r11 + movq %rdx, %rdi + movq %rax, -32(%rsp) # 8-byte Spill + movq %r8, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -56(%rsp) # 8-byte Spill + movq %r8, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -80(%rsp) # 8-byte Spill + movq %r8, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r11 + movq %r8, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r14 + movq %r8, %rax + movq -24(%rsp), %r8 # 8-byte Reload + mulq %r8 + addq %r14, %rdx + adcq %r11, %rbp + adcq -80(%rsp), %rbx # 8-byte Folded Reload + adcq -56(%rsp), %rcx # 8-byte Folded Reload + adcq -32(%rsp), %r15 # 8-byte Folded Reload + adcq $0, %rdi + addq %rsi, %rax + adcq %r10, %rdx + adcq %r13, %rbp + adcq %r9, %rbx + adcq -72(%rsp), %rcx # 8-byte Folded Reload + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %rdi # 8-byte Folded Reload + adcq $0, %r12 + movq %rdx, %rax + subq %r8, %rax + movq %rbp, %rsi + sbbq -16(%rsp), %rsi # 8-byte Folded Reload + movq %rbx, %r9 + sbbq -8(%rsp), %r9 # 8-byte Folded Reload + movq %rcx, %r10 + sbbq (%rsp), %r10 # 8-byte Folded Reload + movq %r15, %r11 + sbbq 8(%rsp), %r11 # 8-byte Folded Reload + movq %rdi, %r14 + sbbq -40(%rsp), %r14 # 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %rdi, %r14 + testb %r12b, %r12b + cmovneq %rdx, %rax + movq -104(%rsp), %rdx # 8-byte Reload + movq %rax, (%rdx) + cmovneq %rbp, %rsi + movq %rsi, 8(%rdx) + cmovneq %rbx, %r9 + movq %r9, 16(%rdx) + cmovneq %rcx, %r10 + movq %r10, 24(%rdx) + cmovneq %r15, %r11 + movq %r11, 32(%rdx) + movq %r14, 40(%rdx) + addq $16, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end85: + .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L + + .globl mcl_fp_addPre6L + .align 16, 0x90 + .type mcl_fp_addPre6L,@function +mcl_fp_addPre6L: # @mcl_fp_addPre6L +# BB#0: + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r11 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 24(%rsi), %rax + movq 32(%rsi), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r10, %rax + movq %rax, 24(%rdi) + adcq %r9, %r14 + movq %r14, 32(%rdi) + adcq %r8, %r11 + movq %r11, 40(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r14 + retq +.Lfunc_end86: + .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L + + .globl mcl_fp_subPre6L + .align 16, 0x90 + .type mcl_fp_subPre6L,@function +mcl_fp_subPre6L: # @mcl_fp_subPre6L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r14 + movq 32(%rdx), %r15 + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r14, %r11 + movq %r11, 24(%rdi) + sbbq %r15, %r10 + movq %r10, 32(%rdi) + sbbq %r8, %r9 + movq %r9, 40(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end87: + .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L + + .globl mcl_fp_shr1_6L + .align 16, 0x90 + .type mcl_fp_shr1_6L,@function +mcl_fp_shr1_6L: # @mcl_fp_shr1_6L +# BB#0: + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %r9, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 32(%rdi) + shrq %r8 + movq %r8, 40(%rdi) + retq +.Lfunc_end88: + .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L + + .globl mcl_fp_add6L + .align 16, 0x90 + .type mcl_fp_add6L,@function +mcl_fp_add6L: # @mcl_fp_add6L +# BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rdx), %r15 + movq 24(%rdx), %rbx + movq 24(%rsi), %r10 + movq 32(%rsi), %r9 + movq 16(%rdx), %r11 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + adcq %rbx, %r10 + movq %r10, 24(%rdi) + adcq %r15, %r9 + movq %r9, 32(%rdi) + adcq %r14, %r8 + movq %r8, 40(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r11 + sbbq 24(%rcx), %r10 + sbbq 32(%rcx), %r9 + sbbq 40(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB89_2 +# BB#1: # %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + movq %r10, 24(%rdi) + movq %r9, 32(%rdi) + movq %r8, 40(%rdi) +.LBB89_2: # %carry + popq %rbx + popq %r14 + popq %r15 + retq +.Lfunc_end89: + .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L + + .globl mcl_fp_addNF6L + .align 16, 0x90 + .type mcl_fp_addNF6L,@function +mcl_fp_addNF6L: # @mcl_fp_addNF6L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 16(%rdx), %r11 + movq (%rdx), %r15 + movq 8(%rdx), %r14 + addq (%rsi), %r15 + adcq 8(%rsi), %r14 + adcq 16(%rsi), %r11 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r9 + adcq 40(%rsi), %r8 + movq %r15, %rsi + subq (%rcx), %rsi + movq %r14, %rbx + sbbq 8(%rcx), %rbx + movq %r11, %rdx + sbbq 16(%rcx), %rdx + movq %r10, %r13 + sbbq 24(%rcx), %r13 + movq %r9, %r12 + sbbq 32(%rcx), %r12 + movq %r8, %rax + sbbq 40(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r15, %rsi + movq %rsi, (%rdi) + cmovsq %r14, %rbx + movq %rbx, 8(%rdi) + cmovsq %r11, %rdx + movq %rdx, 16(%rdi) + cmovsq %r10, %r13 + movq %r13, 24(%rdi) + cmovsq %r9, %r12 + movq %r12, 32(%rdi) + cmovsq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end90: + .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L + + .globl mcl_fp_sub6L + .align 16, 0x90 + .type mcl_fp_sub6L,@function +mcl_fp_sub6L: # @mcl_fp_sub6L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r11 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r11, 16(%rdi) + sbbq %r15, %r10 + movq %r10, 24(%rdi) + sbbq %r12, %r9 + movq %r9, 32(%rdi) + sbbq %r14, %r8 + movq %r8, 40(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB91_2 +# BB#1: # %carry + movq 40(%rcx), %r14 + movq 32(%rcx), %r15 + movq 24(%rcx), %r12 + movq 8(%rcx), %rbx + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rdx + movq %rdx, 16(%rdi) + adcq %r10, %r12 + movq %r12, 24(%rdi) + adcq %r9, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r14 + movq %r14, 40(%rdi) +.LBB91_2: # %nocarry + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end91: + .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L + + .globl mcl_fp_subNF6L + .align 16, 0x90 + .type mcl_fp_subNF6L,@function +mcl_fp_subNF6L: # @mcl_fp_subNF6L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rsi), %r15 + movq 32(%rsi), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %r14 + subq (%rdx), %r11 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r10 + sbbq 24(%rdx), %r9 + sbbq 32(%rdx), %r8 + sbbq 40(%rdx), %r15 + movq %r15, %rdx + sarq $63, %rdx + movq %rdx, %rbx + addq %rbx, %rbx + movq %rdx, %rsi + adcq %rsi, %rsi + andq 8(%rcx), %rsi + movq %r15, %rax + shrq $63, %rax + orq %rbx, %rax + andq (%rcx), %rax + movq 40(%rcx), %r12 + andq %rdx, %r12 + movq 32(%rcx), %r13 + andq %rdx, %r13 + movq 24(%rcx), %rbx + andq %rdx, %rbx + andq 16(%rcx), %rdx + addq %r11, %rax + movq %rax, (%rdi) + adcq %r14, %rsi + movq %rsi, 8(%rdi) + adcq %r10, %rdx + movq %rdx, 16(%rdi) + adcq %r9, %rbx + movq %rbx, 24(%rdi) + adcq %r8, %r13 + movq %r13, 32(%rdi) + adcq %r15, %r12 + movq %r12, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end92: + .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L + + .globl mcl_fpDbl_add6L + .align 16, 0x90 + .type mcl_fpDbl_add6L,@function +mcl_fpDbl_add6L: # @mcl_fpDbl_add6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 80(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 72(%rdx), %r14 + movq 64(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rdx + movq %rbx, (%rdi) + movq 88(%rsi), %r8 + movq %rax, 8(%rdi) + movq 80(%rsi), %r10 + movq %r12, 16(%rdi) + movq 72(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 40(%rsi), %rax + adcq %rdx, %rax + movq 64(%rsi), %rdx + movq %r13, 32(%rdi) + movq 56(%rsi), %r13 + movq 48(%rsi), %rbp + adcq %r9, %rbp + movq %rax, 40(%rdi) + adcq %r11, %r13 + adcq %r15, %rdx + adcq %r14, %r12 + adcq -16(%rsp), %r10 # 8-byte Folded Reload + adcq -8(%rsp), %r8 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rbp, %rsi + subq (%rcx), %rsi + movq %r13, %rbx + sbbq 8(%rcx), %rbx + movq %rdx, %r9 + sbbq 16(%rcx), %r9 + movq %r12, %r11 + sbbq 24(%rcx), %r11 + movq %r10, %r14 + sbbq 32(%rcx), %r14 + movq %r8, %r15 + sbbq 40(%rcx), %r15 + sbbq $0, %rax + andl $1, %eax + cmovneq %rbp, %rsi + movq %rsi, 48(%rdi) + testb %al, %al + cmovneq %r13, %rbx + movq %rbx, 56(%rdi) + cmovneq %rdx, %r9 + movq %r9, 64(%rdi) + cmovneq %r12, %r11 + movq %r11, 72(%rdi) + cmovneq %r10, %r14 + movq %r14, 80(%rdi) + cmovneq %r8, %r15 + movq %r15, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end93: + .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L + + .globl mcl_fpDbl_sub6L + .align 16, 0x90 + .type mcl_fpDbl_sub6L,@function +mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %r9 + movq 80(%rdx), %r10 + movq 72(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 64(%rdx), %r13 + movq %r15, (%rdi) + movq 56(%rdx), %rbp + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 88(%rsi), %r8 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 80(%rsi), %r11 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + sbbq %r15, %rdx + movq 72(%rsi), %r15 + movq %rbx, 40(%rdi) + movq 64(%rsi), %r12 + movq 56(%rsi), %rsi + sbbq %rbp, %rsi + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%rcx), %r14 + cmoveq %rax, %r14 + testb %bpl, %bpl + movq 16(%rcx), %r9 + cmoveq %rax, %r9 + movq 8(%rcx), %rbp + cmoveq %rax, %rbp + movq 40(%rcx), %r10 + cmoveq %rax, %r10 + movq 32(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 24(%rcx), %rax + addq %rdx, %r14 + movq %r14, 48(%rdi) + adcq %rsi, %rbp + movq %rbp, 56(%rdi) + adcq %r12, %r9 + movq %r9, 64(%rdi) + adcq %r15, %rax + movq %rax, 72(%rdi) + adcq %r11, %rbx + movq %rbx, 80(%rdi) + adcq %r8, %r10 + movq %r10, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end94: + .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L + + .globl mcl_fp_mulUnitPre7L + .align 16, 0x90 + .type mcl_fp_mulUnitPre7L,@function +mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 48(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r11 + movq %rax, -16(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r13 + movq %rax, %r12 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %rbx + movq %rax, %rbp + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %rbp, %r8 + movq %r8, 16(%rdi) + adcq %r12, %rbx + movq %rbx, 24(%rdi) + adcq %r14, %r13 + movq %r13, 32(%rdi) + adcq -16(%rsp), %r15 # 8-byte Folded Reload + movq %r15, 40(%rdi) + adcq -8(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 48(%rdi) + adcq $0, %r10 + movq %r10, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end95: + .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L + + .globl mcl_fpDbl_mulPre7L + .align 16, 0x90 + .type mcl_fpDbl_mulPre7L,@function +mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $24, %rsp + movq %rdx, 8(%rsp) # 8-byte Spill + movq %rsi, %r9 + movq %rdi, 16(%rsp) # 8-byte Spill + movq (%r9), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 8(%r9), %r10 + movq %r10, -64(%rsp) # 8-byte Spill + movq (%rdx), %rsi + mulq %rsi + movq %rdx, -32(%rsp) # 8-byte Spill + movq 16(%r9), %r11 + movq %r11, -72(%rsp) # 8-byte Spill + movq 24(%r9), %rbx + movq %rbx, -56(%rsp) # 8-byte Spill + movq 32(%r9), %rbp + movq %rbp, -24(%rsp) # 8-byte Spill + movq 40(%r9), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + movq 48(%r9), %r14 + movq %rax, (%rdi) + movq %r14, %rax + mulq %rsi + movq %rdx, %rdi + movq %rax, (%rsp) # 8-byte Spill + movq %rcx, %rax + mulq %rsi + movq %rdx, %rcx + movq %rax, -40(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rsi + movq %rdx, %rbp + movq %rax, %r15 + movq %rbx, %rax + mulq %rsi + movq %rdx, %rbx + movq %rax, %r8 + movq %r11, %rax + mulq %rsi + movq %rdx, %r12 + movq %rax, %r13 + movq %r10, %rax + mulq %rsi + movq %rdx, %rsi + movq %rax, %r10 + addq -32(%rsp), %r10 # 8-byte Folded Reload + adcq %r13, %rsi + adcq %r8, %r12 + adcq %r15, %rbx + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, -48(%rsp) # 8-byte Spill + adcq (%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -40(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -32(%rsp) # 8-byte Spill + movq 8(%rsp), %r11 # 8-byte Reload + movq 8(%r11), %rcx + movq %r14, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, (%rsp) # 8-byte Spill + movq -16(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %r8 + movq -24(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r13 + movq -56(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -72(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %rbp + movq -64(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %rdi + movq -8(%rsp), %rax # 8-byte Reload + mulq %rcx + addq %r10, %rax + movq 16(%rsp), %r10 # 8-byte Reload + movq %rax, 8(%r10) + adcq %rsi, %rdi + adcq %r12, %rbp + adcq %rbx, %r15 + adcq -48(%rsp), %r13 # 8-byte Folded Reload + movq %r8, %rcx + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq (%rsp), %rax # 8-byte Reload + adcq -32(%rsp), %rax # 8-byte Folded Reload + sbbq %r8, %r8 + andl $1, %r8d + addq %rdx, %rdi + adcq -64(%rsp), %rbp # 8-byte Folded Reload + adcq -72(%rsp), %r15 # 8-byte Folded Reload + adcq -56(%rsp), %r13 # 8-byte Folded Reload + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -88(%rsp) # 8-byte Spill + adcq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, (%rsp) # 8-byte Spill + adcq %r14, %r8 + movq 48(%r9), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 16(%r11), %rcx + mulq %rcx + movq %rax, -96(%rsp) # 8-byte Spill + movq %rdx, -40(%rsp) # 8-byte Spill + movq 40(%r9), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdx, -72(%rsp) # 8-byte Spill + movq 32(%r9), %rax + movq %rax, -32(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r12 + movq %rdx, -80(%rsp) # 8-byte Spill + movq 24(%r9), %rax + movq %rax, -48(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r14 + movq %rdx, -104(%rsp) # 8-byte Spill + movq 16(%r9), %rax + movq %rax, -56(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %rbx + movq %rdx, -112(%rsp) # 8-byte Spill + movq (%r9), %rsi + movq 8(%r9), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rcx + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %rsi, %rax + mulq %rcx + addq %rdi, %rax + movq %rax, 16(%r10) + adcq %rbp, %r11 + adcq %r15, %rbx + adcq %r13, %r14 + adcq -88(%rsp), %r12 # 8-byte Folded Reload + movq -16(%rsp), %rdi # 8-byte Reload + adcq (%rsp), %rdi # 8-byte Folded Reload + movq -96(%rsp), %rax # 8-byte Reload + adcq %r8, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %r11 + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -104(%rsp), %r12 # 8-byte Folded Reload + adcq -80(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -16(%rsp) # 8-byte Spill + adcq -72(%rsp), %rax # 8-byte Folded Reload + movq %rax, %rdi + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq 8(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rbp + movq -8(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, (%rsp) # 8-byte Spill + movq -24(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, -8(%rsp) # 8-byte Spill + movq -32(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r13 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -56(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r8 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rbp + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %rsi, %rax + mulq %rbp + addq %r11, %rax + movq 16(%rsp), %rsi # 8-byte Reload + movq %rax, 24(%rsi) + adcq %rbx, %r10 + adcq %r14, %r8 + adcq %r12, %r15 + adcq -16(%rsp), %r13 # 8-byte Folded Reload + movq -8(%rsp), %rsi # 8-byte Reload + adcq %rdi, %rsi + movq (%rsp), %rax # 8-byte Reload + adcq %rcx, %rax + sbbq %rdi, %rdi + andl $1, %edi + addq %rdx, %r10 + adcq -64(%rsp), %r8 # 8-byte Folded Reload + adcq -56(%rsp), %r15 # 8-byte Folded Reload + adcq -48(%rsp), %r13 # 8-byte Folded Reload + adcq -32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -8(%rsp) # 8-byte Spill + adcq -24(%rsp), %rax # 8-byte Folded Reload + movq %rax, (%rsp) # 8-byte Spill + adcq -40(%rsp), %rdi # 8-byte Folded Reload + movq 48(%r9), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 8(%rsp), %rbx # 8-byte Reload + movq 32(%rbx), %rcx + mulq %rcx + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdx, -56(%rsp) # 8-byte Spill + movq 40(%r9), %rax + movq %rax, -40(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, -24(%rsp) # 8-byte Spill + movq %rdx, -96(%rsp) # 8-byte Spill + movq 32(%r9), %rax + movq %rax, -48(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r12 + movq %rdx, -104(%rsp) # 8-byte Spill + movq 24(%r9), %rax + movq %rax, -64(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %rbp + movq %rdx, -112(%rsp) # 8-byte Spill + movq 16(%r9), %rax + movq %rax, -72(%rsp) # 8-byte Spill + mulq %rcx + movq %rax, %r14 + movq %rdx, -120(%rsp) # 8-byte Spill + movq (%r9), %rsi + movq %rsi, -80(%rsp) # 8-byte Spill + movq 8(%r9), %rax + movq %rax, -88(%rsp) # 8-byte Spill + mulq %rcx + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %rsi, %rax + mulq %rcx + addq %r10, %rax + movq 16(%rsp), %rcx # 8-byte Reload + movq %rax, 32(%rcx) + adcq %r8, %r11 + adcq %r15, %r14 + adcq %r13, %rbp + adcq -8(%rsp), %r12 # 8-byte Folded Reload + movq -24(%rsp), %rcx # 8-byte Reload + adcq (%rsp), %rcx # 8-byte Folded Reload + movq -16(%rsp), %rax # 8-byte Reload + adcq %rdi, %rax + sbbq %r13, %r13 + andl $1, %r13d + addq %rdx, %r11 + adcq -128(%rsp), %r14 # 8-byte Folded Reload + adcq -120(%rsp), %rbp # 8-byte Folded Reload + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -24(%rsp) # 8-byte Spill + adcq -96(%rsp), %rax # 8-byte Folded Reload + movq %rax, -16(%rsp) # 8-byte Spill + adcq -56(%rsp), %r13 # 8-byte Folded Reload + movq 40(%rbx), %rcx + movq -32(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %rdi + movq -40(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, %r10 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %rbx + movq -72(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %rsi + movq -88(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r8 + movq -80(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -96(%rsp) # 8-byte Spill + addq %r11, %rax + movq 16(%rsp), %rcx # 8-byte Reload + movq %rax, 40(%rcx) + adcq %r14, %r8 + adcq %rbp, %rsi + adcq %r12, %rbx + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq -16(%rsp), %r10 # 8-byte Folded Reload + adcq %r13, %rdi + movq 8(%rsp), %rax # 8-byte Reload + movq 48(%rax), %r11 + sbbq %rcx, %rcx + movq %r11, %rax + mulq 48(%r9) + movq %rdx, 8(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %r11, %rax + mulq 40(%r9) + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %r11, %rax + mulq 32(%r9) + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %r11, %rax + mulq 24(%r9) + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r11, %rax + mulq 16(%r9) + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %r11, %rax + mulq 8(%r9) + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %r11, %rax + mulq (%r9) + andl $1, %ecx + addq -96(%rsp), %r8 # 8-byte Folded Reload + adcq -72(%rsp), %rsi # 8-byte Folded Reload + adcq -48(%rsp), %rbx # 8-byte Folded Reload + adcq -40(%rsp), %r15 # 8-byte Folded Reload + adcq -32(%rsp), %r10 # 8-byte Folded Reload + adcq -8(%rsp), %rdi # 8-byte Folded Reload + adcq (%rsp), %rcx # 8-byte Folded Reload + addq %rax, %r8 + movq 16(%rsp), %r9 # 8-byte Reload + movq %r8, 48(%r9) + adcq %r12, %rsi + adcq %r14, %rbx + adcq %rbp, %r15 + adcq %r13, %r10 + adcq -88(%rsp), %rdi # 8-byte Folded Reload + adcq -64(%rsp), %rcx # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rsi + adcq -104(%rsp), %rbx # 8-byte Folded Reload + movq %r9, %rdx + movq %rsi, 56(%rdx) + movq %rbx, 64(%rdx) + adcq -80(%rsp), %r15 # 8-byte Folded Reload + movq %r15, 72(%rdx) + adcq -56(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 80(%rdx) + adcq -24(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 88(%rdx) + adcq -16(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 96(%rdx) + adcq 8(%rsp), %rax # 8-byte Folded Reload + movq %rax, 104(%rdx) + addq $24, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end96: + .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L + + .globl mcl_fpDbl_sqrPre7L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre7L,@function +mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $16, %rsp + movq %rdi, 8(%rsp) # 8-byte Spill + movq 16(%rsi), %r11 + movq %r11, -64(%rsp) # 8-byte Spill + movq 24(%rsi), %r14 + movq %r14, -48(%rsp) # 8-byte Spill + movq 32(%rsi), %r9 + movq %r9, -24(%rsp) # 8-byte Spill + movq 40(%rsi), %r10 + movq %r10, -16(%rsp) # 8-byte Spill + movq 48(%rsi), %r8 + movq (%rsi), %rbp + movq 8(%rsi), %rbx + movq %rbp, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, (%rdi) + movq %r8, %rax + mulq %rbp + movq %rdx, %r15 + movq %rax, (%rsp) # 8-byte Spill + movq %r10, %rax + mulq %rbp + movq %rdx, %rdi + movq %rax, -32(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %rbp + movq %rdx, %r9 + movq %rax, %r10 + movq %r14, %rax + mulq %rbp + movq %rdx, %r13 + movq %rax, %r14 + movq %r11, %rax + mulq %rbp + movq %rdx, %r12 + movq %rax, %r11 + movq %rbx, %rax + mulq %rbp + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + addq %rax, %rcx + adcq %rdx, %r11 + adcq %r14, %r12 + adcq %r10, %r13 + adcq -32(%rsp), %r9 # 8-byte Folded Reload + adcq (%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -40(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, -32(%rsp) # 8-byte Spill + movq %r8, %rax + mulq %rbx + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %rdi + movq -16(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -24(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %r10 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %r14 + movq -64(%rsp), %rax # 8-byte Reload + mulq %rbx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rbx, %rax + mulq %rbx + movq %rax, %rbx + addq -56(%rsp), %rcx # 8-byte Folded Reload + movq 8(%rsp), %rax # 8-byte Reload + movq %rcx, 8(%rax) + adcq %r11, %rbx + adcq %r12, %rbp + adcq %r13, %r14 + adcq %r9, %r10 + adcq -40(%rsp), %r15 # 8-byte Folded Reload + adcq -32(%rsp), %rdi # 8-byte Folded Reload + sbbq %r8, %r8 + andl $1, %r8d + addq -8(%rsp), %rbx # 8-byte Folded Reload + adcq %rdx, %rbp + adcq -64(%rsp), %r14 # 8-byte Folded Reload + adcq -48(%rsp), %r10 # 8-byte Folded Reload + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq -16(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -72(%rsp) # 8-byte Spill + adcq (%rsp), %r8 # 8-byte Folded Reload + movq 48(%rsi), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 16(%rsi), %rdi + mulq %rdi + movq %rax, (%rsp) # 8-byte Spill + movq %rdx, -40(%rsp) # 8-byte Spill + movq 40(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, -88(%rsp) # 8-byte Spill + movq %rdx, -56(%rsp) # 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -32(%rsp) # 8-byte Spill + mulq %rdi + movq %rax, %r13 + movq %rdx, -64(%rsp) # 8-byte Spill + movq 24(%rsi), %rcx + movq %rcx, %rax + mulq %rdi + movq %rax, %r9 + movq %r9, -104(%rsp) # 8-byte Spill + movq %rdx, -16(%rsp) # 8-byte Spill + movq (%rsi), %r12 + movq %r12, -48(%rsp) # 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulq %rdi + movq %rdx, -96(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %r12, %rax + mulq %rdi + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %rdi, %rax + mulq %rdi + movq %rax, %rdi + addq %rbx, %r12 + movq 8(%rsp), %rax # 8-byte Reload + movq %r12, 16(%rax) + adcq %rbp, %r11 + adcq %r14, %rdi + adcq %r9, %r10 + adcq %r15, %r13 + movq -88(%rsp), %r14 # 8-byte Reload + adcq -72(%rsp), %r14 # 8-byte Folded Reload + movq (%rsp), %rax # 8-byte Reload + adcq %r8, %rax + sbbq %rbx, %rbx + andl $1, %ebx + addq -112(%rsp), %r11 # 8-byte Folded Reload + adcq -96(%rsp), %rdi # 8-byte Folded Reload + adcq %rdx, %r10 + adcq -16(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %r14 # 8-byte Folded Reload + adcq -56(%rsp), %rax # 8-byte Folded Reload + movq %rax, (%rsp) # 8-byte Spill + adcq -40(%rsp), %rbx # 8-byte Folded Reload + movq -8(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r8 + movq -24(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -8(%rsp) # 8-byte Spill + movq -32(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r9 + movq -80(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -48(%rsp), %rax # 8-byte Reload + mulq %rcx + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq %rcx + movq %rax, %r12 + movq %rdx, -96(%rsp) # 8-byte Spill + addq %r11, %rbp + movq 8(%rsp), %rax # 8-byte Reload + movq %rbp, 24(%rax) + adcq %rdi, %r15 + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq %r13, %r12 + movq %r9, %rcx + adcq %r14, %rcx + movq -8(%rsp), %rdi # 8-byte Reload + adcq (%rsp), %rdi # 8-byte Folded Reload + adcq %rbx, %r8 + sbbq %r14, %r14 + andl $1, %r14d + movq (%rsi), %r9 + movq 8(%rsi), %rbp + movq 40(%rsi), %r11 + movq %rbp, %rax + mulq %r11 + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, -40(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %r11 + movq %rax, -48(%rsp) # 8-byte Spill + movq %rdx, -32(%rsp) # 8-byte Spill + movq 32(%rsi), %rbx + movq %rbp, %rax + mulq %rbx + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %rbx + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdx, -104(%rsp) # 8-byte Spill + addq -88(%rsp), %r15 # 8-byte Folded Reload + adcq -80(%rsp), %r10 # 8-byte Folded Reload + adcq -16(%rsp), %r12 # 8-byte Folded Reload + adcq -96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -96(%rsp) # 8-byte Spill + adcq -72(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -8(%rsp) # 8-byte Spill + adcq -64(%rsp), %r8 # 8-byte Folded Reload + adcq -56(%rsp), %r14 # 8-byte Folded Reload + movq 48(%rsi), %rax + movq %rax, -56(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %rcx + movq %r11, %rax + mulq %rbx + movq %rax, %rbp + movq %rdx, -16(%rsp) # 8-byte Spill + movq 24(%rsi), %rax + movq %rax, -72(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, %rdi + movq %rdx, -88(%rsp) # 8-byte Spill + movq 16(%rsi), %rax + movq %rax, -80(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %rbx, %rax + mulq %rbx + movq %rax, %r13 + addq -120(%rsp), %r15 # 8-byte Folded Reload + movq 8(%rsp), %rax # 8-byte Reload + movq %r15, 32(%rax) + adcq -112(%rsp), %r10 # 8-byte Folded Reload + adcq %r12, %r9 + adcq -96(%rsp), %rdi # 8-byte Folded Reload + adcq -8(%rsp), %r13 # 8-byte Folded Reload + adcq %rbp, %r8 + adcq %r14, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq -104(%rsp), %r10 # 8-byte Folded Reload + adcq (%rsp), %r9 # 8-byte Folded Reload + adcq -128(%rsp), %rdi # 8-byte Folded Reload + adcq -88(%rsp), %r13 # 8-byte Folded Reload + adcq %rdx, %r8 + adcq -16(%rsp), %rcx # 8-byte Folded Reload + adcq -64(%rsp), %rbx # 8-byte Folded Reload + movq -56(%rsp), %rax # 8-byte Reload + mulq %r11 + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, (%rsp) # 8-byte Spill + movq -72(%rsp), %rax # 8-byte Reload + mulq %r11 + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, %r15 + movq -80(%rsp), %rax # 8-byte Reload + mulq %r11 + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %r11, %rax + mulq %r11 + movq %rax, %r12 + addq -48(%rsp), %r10 # 8-byte Folded Reload + movq 8(%rsp), %rax # 8-byte Reload + movq %r10, 40(%rax) + adcq -40(%rsp), %r9 # 8-byte Folded Reload + adcq %rdi, %r14 + adcq %r13, %r15 + adcq %rbp, %r8 + adcq %rcx, %r12 + movq (%rsp), %rax # 8-byte Reload + adcq %rbx, %rax + sbbq %r11, %r11 + andl $1, %r11d + addq -32(%rsp), %r9 # 8-byte Folded Reload + adcq -24(%rsp), %r14 # 8-byte Folded Reload + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq -56(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -32(%rsp) # 8-byte Spill + adcq -16(%rsp), %r12 # 8-byte Folded Reload + adcq %rdx, %rax + movq %rax, (%rsp) # 8-byte Spill + adcq -8(%rsp), %r11 # 8-byte Folded Reload + movq 48(%rsi), %rcx + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, -8(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %rbx + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %r10 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rcx, %rax + mulq (%rsi) + movq %rdx, %r13 + movq %rax, %rsi + movq %rcx, %rax + mulq %rcx + addq %r9, %rsi + movq 8(%rsp), %r9 # 8-byte Reload + movq %rsi, 48(%r9) + adcq %r14, %rdi + adcq %r15, %r10 + adcq -32(%rsp), %rbp # 8-byte Folded Reload + adcq %r12, %rbx + adcq (%rsp), %r8 # 8-byte Folded Reload + adcq %r11, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r13, %rdi + adcq -48(%rsp), %r10 # 8-byte Folded Reload + movq %r9, %rsi + movq %rdi, 56(%rsi) + movq %r10, 64(%rsi) + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 72(%rsi) + adcq -24(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 80(%rsi) + adcq -16(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 88(%rsi) + adcq -8(%rsp), %rax # 8-byte Folded Reload + movq %rax, 96(%rsi) + adcq %rdx, %rcx + movq %rcx, 104(%rsi) + addq $16, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L + + .globl mcl_fp_mont7L + .align 16, 0x90 + .type mcl_fp_mont7L,@function +mcl_fp_mont7L: # @mcl_fp_mont7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $96, %rsp + movq %rdx, 24(%rsp) # 8-byte Spill + movq %rdi, -96(%rsp) # 8-byte Spill + movq 48(%rsi), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rax, 88(%rsp) # 8-byte Spill + movq %rdx, %r15 + movq 40(%rsi), %rax + movq %rax, 8(%rsp) # 8-byte Spill + mulq %rbx + movq %rax, 80(%rsp) # 8-byte Spill + movq %rdx, %r12 + movq 32(%rsi), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 24(%rsi), %r9 + movq %r9, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %r10 + movq %r10, -16(%rsp) # 8-byte Spill + movq (%rsi), %r13 + movq %r13, (%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -8(%rsp) # 8-byte Spill + mulq %rbx + movq %rdx, %r14 + movq %rax, %r8 + movq %r9, %rax + mulq %rbx + movq %rdx, %rdi + movq %rax, %r9 + movq %r10, %rax + mulq %rbx + movq %rdx, %rbp + movq %rax, %r10 + movq %rsi, %rax + mulq %rbx + movq %rdx, %rsi + movq %rax, %r11 + movq %r13, %rax + mulq %rbx + movq %rax, -112(%rsp) # 8-byte Spill + addq %r11, %rdx + movq %rdx, -104(%rsp) # 8-byte Spill + adcq %r10, %rsi + movq %rsi, -88(%rsp) # 8-byte Spill + adcq %r9, %rbp + movq %rbp, -80(%rsp) # 8-byte Spill + adcq %r8, %rdi + movq %rdi, -72(%rsp) # 8-byte Spill + adcq 80(%rsp), %r14 # 8-byte Folded Reload + movq %r14, -64(%rsp) # 8-byte Spill + adcq 88(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -48(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, -40(%rsp) # 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, 32(%rsp) # 8-byte Spill + movq %rax, %rdi + imulq %rdx, %rdi + movq (%rcx), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + movq 48(%rcx), %rdx + movq %rdx, 64(%rsp) # 8-byte Spill + movq 40(%rcx), %r9 + movq %r9, 88(%rsp) # 8-byte Spill + movq 32(%rcx), %rbx + movq %rbx, 80(%rsp) # 8-byte Spill + movq 24(%rcx), %rsi + movq %rsi, 72(%rsp) # 8-byte Spill + movq 16(%rcx), %rbp + movq %rbp, 48(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %r9 + movq %rdx, %r14 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq %rbx + movq %rdx, %r11 + movq %rax, %r15 + movq %rdi, %rax + mulq %rsi + movq %rdx, %rbx + movq %rax, %r10 + movq %rdi, %rax + mulq %rbp + movq %rdx, %r8 + movq %rax, %r13 + movq %rdi, %rax + mulq %rcx + movq %rdx, %rbp + movq %rax, %r9 + movq %rdi, %rax + mulq %r12 + movq %rdx, %r12 + addq %r9, %r12 + adcq %r13, %rbp + adcq %r10, %r8 + adcq %r15, %rbx + adcq -128(%rsp), %r11 # 8-byte Folded Reload + adcq -120(%rsp), %r14 # 8-byte Folded Reload + movq -56(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq -112(%rsp), %rax # 8-byte Folded Reload + adcq -104(%rsp), %r12 # 8-byte Folded Reload + adcq -88(%rsp), %rbp # 8-byte Folded Reload + adcq -80(%rsp), %r8 # 8-byte Folded Reload + adcq -72(%rsp), %rbx # 8-byte Folded Reload + adcq -64(%rsp), %r11 # 8-byte Folded Reload + adcq -48(%rsp), %r14 # 8-byte Folded Reload + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -56(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -72(%rsp) # 8-byte Spill + movq 24(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r13 + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r10 + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + movq %rdx, %rdi + addq %r10, %rdi + adcq %r13, %r15 + adcq -112(%rsp), %r9 # 8-byte Folded Reload + movq %rcx, %rdx + adcq -104(%rsp), %rdx # 8-byte Folded Reload + adcq -88(%rsp), %rsi # 8-byte Folded Reload + movq -48(%rsp), %rax # 8-byte Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + movq -40(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + movq -64(%rsp), %r10 # 8-byte Reload + addq %r12, %r10 + movq %r10, -64(%rsp) # 8-byte Spill + adcq %rbp, %rdi + adcq %r8, %r15 + adcq %rbx, %r9 + adcq %r11, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + adcq %r14, %rsi + movq %rsi, -80(%rsp) # 8-byte Spill + adcq -56(%rsp), %rax # 8-byte Folded Reload + movq %rax, -48(%rsp) # 8-byte Spill + adcq -72(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -40(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -56(%rsp) # 8-byte Spill + movq %r10, %rbp + imulq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r10 + movq %rbp, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r14 + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + addq %r14, %r11 + adcq %r10, %rsi + adcq %rbx, %rcx + adcq -120(%rsp), %r13 # 8-byte Folded Reload + adcq -112(%rsp), %r12 # 8-byte Folded Reload + adcq -104(%rsp), %r8 # 8-byte Folded Reload + movq -72(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + addq -64(%rsp), %rax # 8-byte Folded Reload + adcq %rdi, %r11 + adcq %r15, %rsi + adcq %r9, %rcx + adcq -88(%rsp), %r13 # 8-byte Folded Reload + adcq -80(%rsp), %r12 # 8-byte Folded Reload + adcq -48(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -80(%rsp) # 8-byte Spill + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, -72(%rsp) # 8-byte Spill + movq -56(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + movq 24(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r9 + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r14 + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r8 + addq %r14, %r8 + adcq %r9, %rbx + adcq -120(%rsp), %r15 # 8-byte Folded Reload + movq -64(%rsp), %r9 # 8-byte Reload + adcq -112(%rsp), %r9 # 8-byte Folded Reload + movq -56(%rsp), %rdi # 8-byte Reload + adcq -104(%rsp), %rdi # 8-byte Folded Reload + movq -48(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r11, %r10 + adcq %rsi, %r8 + adcq %rcx, %rbx + adcq %r13, %r15 + adcq %r12, %r9 + movq %r9, -64(%rsp) # 8-byte Spill + adcq -80(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -56(%rsp) # 8-byte Spill + adcq -72(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %rbp, %rax + movq %rax, -40(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r10, %rbp + imulq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rcx + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r13 + movq %rbp, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r12 + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + addq %r12, %r14 + adcq %r13, %rsi + adcq %rcx, %rdi + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq -72(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %r10, %rax + adcq %r8, %r14 + adcq %rbx, %rsi + adcq %r15, %rdi + adcq -64(%rsp), %r9 # 8-byte Folded Reload + adcq -56(%rsp), %r11 # 8-byte Folded Reload + adcq -48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -72(%rsp) # 8-byte Spill + adcq $0, -88(%rsp) # 8-byte Folded Spill + movq 24(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rcx + movq %rcx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rcx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r12 + movq %rcx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r13 + addq %r12, %r13 + adcq %r8, %rbp + adcq -120(%rsp), %rbx # 8-byte Folded Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + movq -56(%rsp), %rdx # 8-byte Reload + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq -48(%rsp), %rcx # 8-byte Reload + adcq -64(%rsp), %rcx # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r14, %r10 + adcq %rsi, %r13 + adcq %rdi, %rbp + adcq %r9, %rbx + adcq %r11, %r15 + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + adcq -72(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -48(%rsp) # 8-byte Spill + adcq -88(%rsp), %rax # 8-byte Folded Reload + movq %rax, -40(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -64(%rsp) # 8-byte Spill + movq %r10, %rsi + imulq 32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r9 + movq %rsi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + addq %r11, %r14 + adcq %r9, %r8 + adcq -120(%rsp), %rcx # 8-byte Folded Reload + adcq -112(%rsp), %rdi # 8-byte Folded Reload + adcq -104(%rsp), %r12 # 8-byte Folded Reload + movq -80(%rsp), %rsi # 8-byte Reload + adcq -88(%rsp), %rsi # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %r13, %r14 + adcq %rbp, %r8 + adcq %rbx, %rcx + adcq %r15, %rdi + adcq -56(%rsp), %r12 # 8-byte Folded Reload + adcq -48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -80(%rsp) # 8-byte Spill + adcq -40(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq -64(%rsp), %r11 # 8-byte Reload + adcq $0, %r11 + movq 24(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rbp + movq %rbp, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rbp, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r15 + movq %rbp, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rax, %rsi + movq %rdx, %r10 + addq %r15, %r10 + adcq %r9, %r13 + adcq -120(%rsp), %rbx # 8-byte Folded Reload + movq -64(%rsp), %r15 # 8-byte Reload + adcq -112(%rsp), %r15 # 8-byte Folded Reload + movq -56(%rsp), %rbp # 8-byte Reload + adcq -104(%rsp), %rbp # 8-byte Folded Reload + movq -48(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + adcq $0, %rax + movq %rsi, %r9 + addq %r14, %r9 + adcq %r8, %r10 + adcq %rcx, %r13 + adcq %rdi, %rbx + adcq %r12, %r15 + movq %r15, -64(%rsp) # 8-byte Spill + adcq -80(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, -56(%rsp) # 8-byte Spill + adcq -72(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + adcq %r11, %rax + movq %rax, -40(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) # 8-byte Spill + movq %r9, %rsi + movq %r9, %r11 + imulq 32(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rdi + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r12 + movq %rsi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r15 + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + addq %r15, %r14 + adcq %r12, %rcx + adcq %rdi, %rbp + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %r8 # 8-byte Folded Reload + movq -80(%rsp), %rsi # 8-byte Reload + adcq -104(%rsp), %rsi # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r11, %rax + adcq %r10, %r14 + adcq %r13, %rcx + adcq %rbx, %rbp + adcq -64(%rsp), %r9 # 8-byte Folded Reload + adcq -56(%rsp), %r8 # 8-byte Folded Reload + adcq -48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -80(%rsp) # 8-byte Spill + adcq -40(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq $0, -88(%rsp) # 8-byte Folded Spill + movq 24(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdi + movq %rdi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r10 + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r15 + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %r11 + addq %r13, %r11 + adcq %r15, %rsi + adcq %r10, %rbx + adcq -112(%rsp), %r12 # 8-byte Folded Reload + movq -56(%rsp), %r10 # 8-byte Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + movq -48(%rsp), %rdx # 8-byte Reload + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq -40(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %r14, %rdi + adcq %rcx, %r11 + adcq %rbp, %rsi + adcq %r9, %rbx + adcq %r8, %r12 + adcq -80(%rsp), %r10 # 8-byte Folded Reload + movq %r10, -56(%rsp) # 8-byte Spill + adcq -72(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + adcq -88(%rsp), %rax # 8-byte Folded Reload + movq %rax, -40(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -80(%rsp) # 8-byte Spill + movq %rdi, %rbp + imulq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r8 + movq %rbp, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r9 + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + addq %r9, %r15 + adcq %r8, %r13 + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -112(%rsp), %rcx # 8-byte Folded Reload + adcq -104(%rsp), %r14 # 8-byte Folded Reload + movq -72(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %r8 # 8-byte Reload + adcq $0, %r8 + addq %rdi, %rax + adcq %r11, %r15 + adcq %rsi, %r13 + adcq %rbx, %r10 + adcq %r12, %rcx + adcq -56(%rsp), %r14 # 8-byte Folded Reload + movq %r14, -56(%rsp) # 8-byte Spill + adcq -48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + adcq -40(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -64(%rsp) # 8-byte Spill + movq -80(%rsp), %rsi # 8-byte Reload + adcq $0, %rsi + movq 24(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdi + movq %rdi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rax, -40(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, 24(%rsp) # 8-byte Spill + movq %rax, 8(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -32(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r12 + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rbx + movq %rdi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rbp + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + addq %rbp, %rdx + movq %rdx, %rbp + adcq %rbx, %r9 + adcq %r12, %r14 + movq %r8, %rdi + adcq -32(%rsp), %rdi # 8-byte Folded Reload + adcq 8(%rsp), %r11 # 8-byte Folded Reload + movq 24(%rsp), %rbx # 8-byte Reload + adcq -40(%rsp), %rbx # 8-byte Folded Reload + movq 16(%rsp), %r8 # 8-byte Reload + adcq $0, %r8 + addq %r15, %rax + movq %rax, -32(%rsp) # 8-byte Spill + adcq %r13, %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + adcq %r10, %r9 + movq %r9, (%rsp) # 8-byte Spill + adcq %rcx, %r14 + movq %r14, -8(%rsp) # 8-byte Spill + adcq -56(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -16(%rsp) # 8-byte Spill + adcq -72(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -24(%rsp) # 8-byte Spill + adcq -64(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 24(%rsp) # 8-byte Spill + adcq %rsi, %r8 + movq %r8, 16(%rsp) # 8-byte Spill + sbbq %rcx, %rcx + movq 32(%rsp), %r10 # 8-byte Reload + imulq %rax, %r10 + andl $1, %ecx + movq %r10, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, 32(%rsp) # 8-byte Spill + movq %r10, %rax + mulq 88(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -40(%rsp) # 8-byte Spill + movq %r10, %rax + mulq 80(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, -48(%rsp) # 8-byte Spill + movq %r10, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -56(%rsp) # 8-byte Spill + movq %r10, %rax + movq 48(%rsp), %r13 # 8-byte Reload + mulq %r13 + movq %rdx, %rbp + movq %rax, %r12 + movq %r10, %rax + movq 40(%rsp), %r15 # 8-byte Reload + mulq %r15 + movq %rdx, %r11 + movq %rax, %r8 + movq %r10, %rax + movq 56(%rsp), %r14 # 8-byte Reload + mulq %r14 + addq %r11, %rax + adcq %r12, %rdx + adcq -56(%rsp), %rbp # 8-byte Folded Reload + adcq -48(%rsp), %rsi # 8-byte Folded Reload + adcq -40(%rsp), %rdi # 8-byte Folded Reload + adcq 32(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %rbx + addq -32(%rsp), %r8 # 8-byte Folded Reload + adcq 8(%rsp), %rax # 8-byte Folded Reload + adcq (%rsp), %rdx # 8-byte Folded Reload + adcq -8(%rsp), %rbp # 8-byte Folded Reload + adcq -16(%rsp), %rsi # 8-byte Folded Reload + adcq -24(%rsp), %rdi # 8-byte Folded Reload + adcq 24(%rsp), %r9 # 8-byte Folded Reload + adcq 16(%rsp), %rbx # 8-byte Folded Reload + adcq $0, %rcx + movq %rax, %r8 + subq %r15, %r8 + movq %rdx, %r10 + sbbq %r14, %r10 + movq %rbp, %r11 + sbbq %r13, %r11 + movq %rsi, %r14 + sbbq 72(%rsp), %r14 # 8-byte Folded Reload + movq %rdi, %r15 + sbbq 80(%rsp), %r15 # 8-byte Folded Reload + movq %r9, %r12 + sbbq 88(%rsp), %r12 # 8-byte Folded Reload + movq %rbx, %r13 + sbbq 64(%rsp), %r13 # 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rbx, %r13 + testb %cl, %cl + cmovneq %rax, %r8 + movq -96(%rsp), %rax # 8-byte Reload + movq %r8, (%rax) + cmovneq %rdx, %r10 + movq %r10, 8(%rax) + cmovneq %rbp, %r11 + movq %r11, 16(%rax) + cmovneq %rsi, %r14 + movq %r14, 24(%rax) + cmovneq %rdi, %r15 + movq %r15, 32(%rax) + cmovneq %r9, %r12 + movq %r12, 40(%rax) + movq %r13, 48(%rax) + addq $96, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end98: + .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L + + .globl mcl_fp_montNF7L + .align 16, 0x90 + .type mcl_fp_montNF7L,@function +mcl_fp_montNF7L: # @mcl_fp_montNF7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $80, %rsp + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rdi, -96(%rsp) # 8-byte Spill + movq 48(%rsi), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq (%rdx), %rbp + mulq %rbp + movq %rax, 72(%rsp) # 8-byte Spill + movq %rdx, %r9 + movq 40(%rsi), %rax + movq %rax, (%rsp) # 8-byte Spill + mulq %rbp + movq %rax, 64(%rsp) # 8-byte Spill + movq %rdx, %r11 + movq 32(%rsi), %rax + movq %rax, -48(%rsp) # 8-byte Spill + movq 24(%rsi), %r8 + movq %r8, -40(%rsp) # 8-byte Spill + movq 16(%rsi), %rbx + movq %rbx, -32(%rsp) # 8-byte Spill + movq (%rsi), %r10 + movq %r10, -16(%rsp) # 8-byte Spill + movq 8(%rsi), %rsi + movq %rsi, -24(%rsp) # 8-byte Spill + mulq %rbp + movq %rdx, %rdi + movq %rax, 56(%rsp) # 8-byte Spill + movq %r8, %rax + mulq %rbp + movq %rdx, %r14 + movq %rax, %r15 + movq %rbx, %rax + mulq %rbp + movq %rdx, %rbx + movq %rax, %r13 + movq %rsi, %rax + mulq %rbp + movq %rdx, %rsi + movq %rax, %r12 + movq %r10, %rax + mulq %rbp + movq %rdx, %r8 + addq %r12, %r8 + adcq %r13, %rsi + movq %rsi, -104(%rsp) # 8-byte Spill + adcq %r15, %rbx + movq %rbx, -88(%rsp) # 8-byte Spill + adcq 56(%rsp), %r14 # 8-byte Folded Reload + movq %r14, %r12 + adcq 64(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -80(%rsp) # 8-byte Spill + adcq 72(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -56(%rsp) # 8-byte Spill + adcq $0, %r9 + movq %r9, -64(%rsp) # 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, 24(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %rax, %r14 + imulq %rdx, %r9 + movq (%rcx), %r11 + movq %r11, 32(%rsp) # 8-byte Spill + movq 48(%rcx), %rdx + movq %rdx, 72(%rsp) # 8-byte Spill + movq 40(%rcx), %r10 + movq %r10, 64(%rsp) # 8-byte Spill + movq 32(%rcx), %rbp + movq %rbp, 56(%rsp) # 8-byte Spill + movq 24(%rcx), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + movq 16(%rcx), %rdi + movq %rdi, 40(%rsp) # 8-byte Spill + movq 8(%rcx), %rsi + movq %rsi, -8(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %rdx + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %r9, %rax + mulq %r10 + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %r9, %rax + mulq %rbp + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %r13 + movq %r9, %rax + mulq %rbx + movq %rdx, %rbx + movq %rax, %rbp + movq %r9, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %rdi + movq %r9, %rax + mulq %rsi + movq %rdx, %r10 + movq %rax, %rsi + movq %r9, %rax + mulq %r11 + addq %r14, %rax + adcq %r8, %rsi + adcq -104(%rsp), %rdi # 8-byte Folded Reload + adcq -88(%rsp), %rbp # 8-byte Folded Reload + adcq %r12, %r13 + adcq -80(%rsp), %r15 # 8-byte Folded Reload + movq -72(%rsp), %r8 # 8-byte Reload + adcq -56(%rsp), %r8 # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdx, %rsi + adcq %r10, %rdi + adcq %rcx, %rbp + adcq %rbx, %r13 + adcq -128(%rsp), %r15 # 8-byte Folded Reload + adcq -120(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -72(%rsp) # 8-byte Spill + adcq -112(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rbx + movq %rbx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rbx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r12 + addq %r14, %r12 + adcq -128(%rsp), %rcx # 8-byte Folded Reload + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %r8 # 8-byte Folded Reload + adcq -104(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq -88(%rsp), %rdx # 8-byte Folded Reload + movq -56(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rsi, %r10 + adcq %rdi, %r12 + adcq %rbp, %rcx + adcq %r13, %r9 + adcq %r15, %r8 + adcq -72(%rsp), %r11 # 8-byte Folded Reload + adcq -64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq %r10, %rbx + imulq 24(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %rbx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -128(%rsp) # 8-byte Spill + movq %rax, %rdi + movq %rbx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rbp + movq %rbx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rsi + movq %rbx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r10, %rax + adcq %r12, %rsi + adcq %rcx, %rbp + adcq %r9, %rdi + adcq %r8, %r14 + movq -72(%rsp), %rcx # 8-byte Reload + adcq %r11, %rcx + movq -64(%rsp), %r8 # 8-byte Reload + adcq -80(%rsp), %r8 # 8-byte Folded Reload + movq -56(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdx, %rsi + adcq %r13, %rbp + adcq %r15, %rdi + movq %rdi, -88(%rsp) # 8-byte Spill + adcq -128(%rsp), %r14 # 8-byte Folded Reload + movq %r14, -80(%rsp) # 8-byte Spill + adcq -120(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -72(%rsp) # 8-byte Spill + adcq -112(%rsp), %r8 # 8-byte Folded Reload + movq %r8, -64(%rsp) # 8-byte Spill + adcq -104(%rsp), %rax # 8-byte Folded Reload + movq %rax, -56(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rcx + movq %rcx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r11 + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rcx + addq %r11, %rcx + adcq %r9, %r15 + adcq %r12, %rbx + adcq -120(%rsp), %rdi # 8-byte Folded Reload + adcq -112(%rsp), %r14 # 8-byte Folded Reload + adcq -104(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r8 + addq %rsi, %r13 + adcq %rbp, %rcx + adcq -88(%rsp), %r15 # 8-byte Folded Reload + adcq -80(%rsp), %rbx # 8-byte Folded Reload + adcq -72(%rsp), %rdi # 8-byte Folded Reload + adcq -64(%rsp), %r14 # 8-byte Folded Reload + adcq -56(%rsp), %r10 # 8-byte Folded Reload + adcq $0, %r8 + movq %r13, %r9 + imulq 24(%rsp), %r9 # 8-byte Folded Reload + movq %r9, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %r9, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %r9, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %r9, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, %r11 + movq %r9, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %r9, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %r9, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r13, %rax + adcq %rcx, %rsi + adcq %r15, %r12 + adcq %rbx, %r11 + adcq %rdi, %rbp + movq -72(%rsp), %rcx # 8-byte Reload + adcq %r14, %rcx + movq -64(%rsp), %rax # 8-byte Reload + adcq %r10, %rax + adcq $0, %r8 + addq %rdx, %rsi + adcq -120(%rsp), %r12 # 8-byte Folded Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + adcq -104(%rsp), %rbp # 8-byte Folded Reload + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -72(%rsp) # 8-byte Spill + adcq -80(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + adcq -56(%rsp), %r8 # 8-byte Folded Reload + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdi + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rbx + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r9 + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %rdi + addq %r9, %rdi + adcq %rbx, %rcx + adcq -120(%rsp), %r10 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + adcq -104(%rsp), %r15 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq -80(%rsp), %rdx # 8-byte Folded Reload + movq -56(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rsi, %r14 + adcq %r12, %rdi + adcq %r11, %rcx + adcq %rbp, %r10 + adcq -72(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq %r8, %rdx + movq %rdx, -88(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -56(%rsp) # 8-byte Spill + movq %r14, %rsi + imulq 24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r8 + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rbp + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rbx + movq %rsi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r14, %rax + adcq %rdi, %rbx + adcq %rcx, %rbp + adcq %r10, %r8 + adcq %r13, %r12 + movq -80(%rsp), %rsi # 8-byte Reload + adcq %r15, %rsi + movq -72(%rsp), %rcx # 8-byte Reload + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq -56(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdx, %rbx + adcq %r9, %rbp + adcq %r11, %r8 + adcq -120(%rsp), %r12 # 8-byte Folded Reload + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -80(%rsp) # 8-byte Spill + adcq -104(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -72(%rsp) # 8-byte Spill + adcq -64(%rsp), %rax # 8-byte Folded Reload + movq %rax, -56(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rsi + movq %rsi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -104(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rsi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r15 + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %r10 + addq %r15, %r10 + adcq %r14, %rdi + adcq -128(%rsp), %rcx # 8-byte Folded Reload + adcq -120(%rsp), %r9 # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -88(%rsp), %rdx # 8-byte Reload + adcq -104(%rsp), %rdx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rbx, %r11 + adcq %rbp, %r10 + adcq %r8, %rdi + adcq %r12, %rcx + adcq -80(%rsp), %r9 # 8-byte Folded Reload + adcq -72(%rsp), %r13 # 8-byte Folded Reload + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -64(%rsp) # 8-byte Spill + movq %r11, %rsi + imulq 24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r14 + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rbp + movq %rsi, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %rbx + movq %rsi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r11, %rax + adcq %r10, %rbx + adcq %rdi, %rbp + adcq %rcx, %r12 + adcq %r9, %r14 + movq -72(%rsp), %rdi # 8-byte Reload + adcq %r13, %rdi + movq -56(%rsp), %rcx # 8-byte Reload + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq -64(%rsp), %rax # 8-byte Reload + adcq $0, %rax + addq %rdx, %rbx + adcq %r8, %rbp + adcq %r15, %r12 + adcq -120(%rsp), %r14 # 8-byte Folded Reload + movq %r14, -88(%rsp) # 8-byte Spill + adcq -112(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -72(%rsp) # 8-byte Spill + adcq -104(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -56(%rsp) # 8-byte Spill + adcq -80(%rsp), %rax # 8-byte Folded Reload + movq %rax, -64(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -104(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -128(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r15 + movq %rcx, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %r10 + addq %r14, %r10 + adcq %r15, %r8 + adcq -128(%rsp), %rdi # 8-byte Folded Reload + adcq -120(%rsp), %rsi # 8-byte Folded Reload + adcq -112(%rsp), %r13 # 8-byte Folded Reload + movq -80(%rsp), %rax # 8-byte Reload + adcq -104(%rsp), %rax # 8-byte Folded Reload + adcq $0, %r9 + addq %rbx, %r11 + adcq %rbp, %r10 + adcq %r12, %r8 + adcq -88(%rsp), %rdi # 8-byte Folded Reload + adcq -72(%rsp), %rsi # 8-byte Folded Reload + adcq -56(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %rax # 8-byte Folded Reload + movq %rax, -80(%rsp) # 8-byte Spill + adcq $0, %r9 + movq %r11, %rbx + imulq 24(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -56(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, -104(%rsp) # 8-byte Spill + movq %rax, -64(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -112(%rsp) # 8-byte Spill + movq %rax, %r12 + movq %rbx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rbx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, -120(%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rbx, %rax + mulq -8(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rcx + movq %rbx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r11, %rax + adcq %r10, %rcx + adcq %r8, %rbp + adcq %rdi, %r15 + adcq %rsi, %r12 + movq -64(%rsp), %rsi # 8-byte Reload + adcq %r13, %rsi + movq -56(%rsp), %rax # 8-byte Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + adcq $0, %r9 + addq %rdx, %rcx + adcq %r14, %rbp + adcq -120(%rsp), %r15 # 8-byte Folded Reload + adcq -72(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -72(%rsp) # 8-byte Spill + adcq -112(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -64(%rsp) # 8-byte Spill + adcq -104(%rsp), %rax # 8-byte Folded Reload + movq %rax, -56(%rsp) # 8-byte Spill + adcq -88(%rsp), %r9 # 8-byte Folded Reload + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdi + movq %rdi, %rax + mulq 8(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -80(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rax, (%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) # 8-byte Folded Reload + movq %rdx, 8(%rsp) # 8-byte Spill + movq %rax, -48(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -40(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -40(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) # 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rsi + movq %rdi, %rax + mulq -24(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rbx + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %r8 + addq %rbx, %r8 + adcq %rsi, %r10 + adcq -40(%rsp), %r11 # 8-byte Folded Reload + adcq -48(%rsp), %r13 # 8-byte Folded Reload + movq 8(%rsp), %rdx # 8-byte Reload + adcq (%rsp), %rdx # 8-byte Folded Reload + movq 16(%rsp), %rax # 8-byte Reload + adcq -80(%rsp), %rax # 8-byte Folded Reload + adcq $0, %r14 + addq %rcx, %r12 + adcq %rbp, %r8 + adcq %r15, %r10 + adcq -72(%rsp), %r11 # 8-byte Folded Reload + adcq -64(%rsp), %r13 # 8-byte Folded Reload + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 8(%rsp) # 8-byte Spill + adcq %r9, %rax + movq %rax, 16(%rsp) # 8-byte Spill + adcq $0, %r14 + movq 24(%rsp), %rdi # 8-byte Reload + imulq %r12, %rdi + movq %rdi, %rax + mulq 72(%rsp) # 8-byte Folded Reload + movq %rdx, 24(%rsp) # 8-byte Spill + movq %rax, %r9 + movq %rdi, %rax + mulq 64(%rsp) # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + movq %rax, %rbp + movq %rdi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rax, %rsi + movq %rdi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, %rcx + movq %rdi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, %r15 + movq %rdi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, %rbx + movq %rdi, %rax + movq -8(%rsp), %rdi # 8-byte Reload + mulq %rdi + addq %r12, %r15 + adcq %r8, %rax + adcq %r10, %rbx + adcq %r11, %rcx + adcq %r13, %rsi + adcq 8(%rsp), %rbp # 8-byte Folded Reload + adcq 16(%rsp), %r9 # 8-byte Folded Reload + adcq $0, %r14 + addq -32(%rsp), %rax # 8-byte Folded Reload + adcq %rdx, %rbx + adcq -40(%rsp), %rcx # 8-byte Folded Reload + adcq -24(%rsp), %rsi # 8-byte Folded Reload + adcq -16(%rsp), %rbp # 8-byte Folded Reload + adcq (%rsp), %r9 # 8-byte Folded Reload + adcq 24(%rsp), %r14 # 8-byte Folded Reload + movq %rax, %r13 + subq 32(%rsp), %r13 # 8-byte Folded Reload + movq %rbx, %r12 + sbbq %rdi, %r12 + movq %rcx, %r8 + sbbq 40(%rsp), %r8 # 8-byte Folded Reload + movq %rsi, %r10 + sbbq 48(%rsp), %r10 # 8-byte Folded Reload + movq %rbp, %r11 + sbbq 56(%rsp), %r11 # 8-byte Folded Reload + movq %r9, %r15 + sbbq 64(%rsp), %r15 # 8-byte Folded Reload + movq %r14, %rdx + sbbq 72(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, %rdi + sarq $63, %rdi + cmovsq %rax, %r13 + movq -96(%rsp), %rax # 8-byte Reload + movq %r13, (%rax) + cmovsq %rbx, %r12 + movq %r12, 8(%rax) + cmovsq %rcx, %r8 + movq %r8, 16(%rax) + cmovsq %rsi, %r10 + movq %r10, 24(%rax) + cmovsq %rbp, %r11 + movq %r11, 32(%rax) + cmovsq %r9, %r15 + movq %r15, 40(%rax) + cmovsq %r14, %rdx + movq %rdx, 48(%rax) + addq $80, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end99: + .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L + + .globl mcl_fp_montRed7L + .align 16, 0x90 + .type mcl_fp_montRed7L,@function +mcl_fp_montRed7L: # @mcl_fp_montRed7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $64, %rsp + movq %rdx, %rcx + movq %rdi, -104(%rsp) # 8-byte Spill + movq -8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq (%rcx), %rdx + movq %rdx, 32(%rsp) # 8-byte Spill + movq (%rsi), %rbp + movq %rbp, 24(%rsp) # 8-byte Spill + imulq %rax, %rbp + movq 48(%rcx), %rdx + movq %rdx, -16(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, -8(%rsp) # 8-byte Spill + movq %rdx, -48(%rsp) # 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, (%rsp) # 8-byte Spill + movq 32(%rcx), %r10 + movq %r10, 56(%rsp) # 8-byte Spill + movq 24(%rcx), %rdi + movq %rdi, 48(%rsp) # 8-byte Spill + movq 16(%rcx), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 16(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rdx, %r13 + movq %rax, %r9 + movq %rbp, %rax + mulq %r10 + movq %rdx, %r15 + movq %rax, %r11 + movq %rbp, %rax + mulq %rdi + movq %rdx, %r10 + movq %rax, %r8 + movq %rbp, %rax + mulq %rbx + movq %rdx, %r14 + movq %rax, %rbx + movq %rbp, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %rdi + movq %rbp, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + addq %rdi, %rbp + adcq %rbx, %r12 + adcq %r8, %r14 + adcq %r11, %r10 + adcq %r9, %r15 + adcq -8(%rsp), %r13 # 8-byte Folded Reload + movq -48(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq 24(%rsp), %rax # 8-byte Folded Reload + adcq 8(%rsi), %rbp + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r14 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %r15 + adcq 48(%rsi), %r13 + movq %r13, -80(%rsp) # 8-byte Spill + adcq 56(%rsi), %rdx + movq %rdx, -48(%rsp) # 8-byte Spill + movq 104(%rsi), %r8 + movq 96(%rsi), %rdx + movq 88(%rsi), %rdi + movq 80(%rsi), %rbx + movq 72(%rsi), %rax + movq 64(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -88(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, -96(%rsp) # 8-byte Spill + adcq $0, %rbx + movq %rbx, -40(%rsp) # 8-byte Spill + adcq $0, %rdi + movq %rdi, -32(%rsp) # 8-byte Spill + adcq $0, %rdx + movq %rdx, -24(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, -8(%rsp) # 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, 24(%rsp) # 8-byte Spill + movq %rbp, %rdi + imulq 8(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -128(%rsp) # 8-byte Spill + movq %rdi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbx + movq %rdi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r11 + movq %rdi, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %rcx + movq %rdi, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + addq %rcx, %r9 + adcq %r11, %r8 + adcq %rbx, %rsi + adcq -128(%rsp), %r13 # 8-byte Folded Reload + movq -72(%rsp), %rdi # 8-byte Reload + adcq -120(%rsp), %rdi # 8-byte Folded Reload + movq -64(%rsp), %rdx # 8-byte Reload + adcq -112(%rsp), %rdx # 8-byte Folded Reload + movq -56(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %rbp, %rax + adcq %r12, %r9 + adcq %r14, %r8 + adcq %r10, %rsi + adcq %r15, %r13 + adcq -80(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, -72(%rsp) # 8-byte Spill + adcq -48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + adcq -88(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -56(%rsp) # 8-byte Spill + adcq $0, -96(%rsp) # 8-byte Folded Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + movq -8(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, 24(%rsp) # 8-byte Folded Spill + movq %r9, %rcx + imulq 8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -8(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -88(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r11 + movq %rcx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r12 + movq %rcx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r14 + movq %rcx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + addq %r14, %r10 + adcq %r12, %rdi + adcq %r11, %rbp + adcq -120(%rsp), %r15 # 8-byte Folded Reload + movq -88(%rsp), %r11 # 8-byte Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + movq -80(%rsp), %rdx # 8-byte Reload + adcq -8(%rsp), %rdx # 8-byte Folded Reload + movq -48(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %r9, %rax + adcq %r8, %r10 + adcq %rsi, %rdi + adcq %r13, %rbp + adcq -72(%rsp), %r15 # 8-byte Folded Reload + adcq -64(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -88(%rsp) # 8-byte Spill + adcq -56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -80(%rsp) # 8-byte Spill + adcq -96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -48(%rsp) # 8-byte Spill + adcq $0, -40(%rsp) # 8-byte Folded Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, -8(%rsp) # 8-byte Spill + adcq $0, 24(%rsp) # 8-byte Folded Spill + movq %r10, %rbx + imulq 8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %rax + movq -16(%rsp), %r12 # 8-byte Reload + mulq %r12 + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -96(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -112(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -72(%rsp) # 8-byte Spill + movq %rax, -120(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rbx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rbx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r11 + movq %rbx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + addq %r11, %r9 + adcq %r13, %rcx + adcq %r14, %rsi + adcq -120(%rsp), %r8 # 8-byte Folded Reload + movq -72(%rsp), %r11 # 8-byte Reload + adcq -112(%rsp), %r11 # 8-byte Folded Reload + movq -64(%rsp), %rbx # 8-byte Reload + adcq -96(%rsp), %rbx # 8-byte Folded Reload + movq -56(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %rdi, %r9 + adcq %rbp, %rcx + adcq %r15, %rsi + adcq -88(%rsp), %r8 # 8-byte Folded Reload + adcq -80(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -72(%rsp) # 8-byte Spill + adcq -48(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, -64(%rsp) # 8-byte Spill + adcq -40(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + adcq $0, -32(%rsp) # 8-byte Folded Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 24(%rsp) # 8-byte Folded Spill + movq %r9, %rbp + imulq 8(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, %rax + mulq %r12 + movq %rdx, -40(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq (%rsp) # 8-byte Folded Reload + movq %rdx, -48(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rbp, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r14 + movq %rbp, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r11 + movq %rbp, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r12 + movq %rbp, %rax + movq 32(%rsp), %rbp # 8-byte Reload + mulq %rbp + movq %rdx, %r10 + addq %r12, %r10 + adcq %r11, %rbx + adcq %r14, %rdi + adcq -96(%rsp), %r13 # 8-byte Folded Reload + adcq -88(%rsp), %r15 # 8-byte Folded Reload + movq -48(%rsp), %r11 # 8-byte Reload + adcq -80(%rsp), %r11 # 8-byte Folded Reload + movq -40(%rsp), %rdx # 8-byte Reload + adcq $0, %rdx + addq %r9, %rax + adcq %rcx, %r10 + adcq %rsi, %rbx + adcq %r8, %rdi + adcq -72(%rsp), %r13 # 8-byte Folded Reload + adcq -64(%rsp), %r15 # 8-byte Folded Reload + adcq -56(%rsp), %r11 # 8-byte Folded Reload + movq %r11, -48(%rsp) # 8-byte Spill + adcq -32(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -40(%rsp) # 8-byte Spill + adcq $0, -24(%rsp) # 8-byte Folded Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 24(%rsp) # 8-byte Folded Spill + movq %r10, %rsi + imulq 8(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, -72(%rsp) # 8-byte Spill + movq %rsi, %rax + movq (%rsp), %r8 # 8-byte Reload + mulq %r8 + movq %rdx, -56(%rsp) # 8-byte Spill + movq %rax, -80(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, -64(%rsp) # 8-byte Spill + movq %rax, -88(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -96(%rsp) # 8-byte Spill + movq %rsi, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r11 + movq %rsi, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rcx + movq %rsi, %rax + mulq %rbp + movq %rdx, %rbp + addq %rcx, %rbp + adcq %r11, %r14 + adcq -96(%rsp), %r9 # 8-byte Folded Reload + adcq -88(%rsp), %r12 # 8-byte Folded Reload + movq -64(%rsp), %rsi # 8-byte Reload + adcq -80(%rsp), %rsi # 8-byte Folded Reload + movq -56(%rsp), %rdx # 8-byte Reload + adcq -72(%rsp), %rdx # 8-byte Folded Reload + movq -32(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + addq %r10, %rax + adcq %rbx, %rbp + adcq %rdi, %r14 + adcq %r13, %r9 + adcq %r15, %r12 + adcq -48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, -64(%rsp) # 8-byte Spill + adcq -40(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -56(%rsp) # 8-byte Spill + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -32(%rsp) # 8-byte Spill + adcq $0, -8(%rsp) # 8-byte Folded Spill + adcq $0, 24(%rsp) # 8-byte Folded Spill + movq 8(%rsp), %rcx # 8-byte Reload + imulq %rbp, %rcx + movq %rcx, %rax + mulq -16(%rsp) # 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, 8(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq %r8 + movq %rdx, %r13 + movq %rax, -24(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 56(%rsp) # 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -40(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 48(%rsp) # 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -48(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 40(%rsp) # 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r8 + movq %rcx, %rax + mulq 16(%rsp) # 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r11 + movq %rcx, %rax + mulq 32(%rsp) # 8-byte Folded Reload + addq %r11, %rdx + adcq %r8, %rbx + adcq -48(%rsp), %rdi # 8-byte Folded Reload + adcq -40(%rsp), %r10 # 8-byte Folded Reload + adcq -24(%rsp), %r15 # 8-byte Folded Reload + adcq 8(%rsp), %r13 # 8-byte Folded Reload + adcq $0, %rsi + addq %rbp, %rax + adcq %r14, %rdx + adcq %r9, %rbx + adcq %r12, %rdi + adcq -64(%rsp), %r10 # 8-byte Folded Reload + adcq -56(%rsp), %r15 # 8-byte Folded Reload + adcq -32(%rsp), %r13 # 8-byte Folded Reload + adcq -8(%rsp), %rsi # 8-byte Folded Reload + movq 24(%rsp), %rcx # 8-byte Reload + adcq $0, %rcx + movq %rdx, %rax + subq 32(%rsp), %rax # 8-byte Folded Reload + movq %rbx, %rbp + sbbq 16(%rsp), %rbp # 8-byte Folded Reload + movq %rdi, %r8 + sbbq 40(%rsp), %r8 # 8-byte Folded Reload + movq %r10, %r9 + sbbq 48(%rsp), %r9 # 8-byte Folded Reload + movq %r15, %r11 + sbbq 56(%rsp), %r11 # 8-byte Folded Reload + movq %r13, %r14 + sbbq (%rsp), %r14 # 8-byte Folded Reload + movq %rsi, %r12 + sbbq -16(%rsp), %r12 # 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rsi, %r12 + testb %cl, %cl + cmovneq %rdx, %rax + movq -104(%rsp), %rcx # 8-byte Reload + movq %rax, (%rcx) + cmovneq %rbx, %rbp + movq %rbp, 8(%rcx) + cmovneq %rdi, %r8 + movq %r8, 16(%rcx) + cmovneq %r10, %r9 + movq %r9, 24(%rcx) + cmovneq %r15, %r11 + movq %r11, 32(%rcx) + cmovneq %r13, %r14 + movq %r14, 40(%rcx) + movq %r12, 48(%rcx) + addq $64, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end100: + .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L + + .globl mcl_fp_addPre7L + .align 16, 0x90 + .type mcl_fp_addPre7L,@function +mcl_fp_addPre7L: # @mcl_fp_addPre7L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r14 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r12 + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %r12, 16(%rdi) + adcq %r11, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbx + movq %rbx, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end101: + .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L + + .globl mcl_fp_subPre7L + .align 16, 0x90 + .type mcl_fp_subPre7L,@function +mcl_fp_subPre7L: # @mcl_fp_subPre7L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r10 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 32(%rsi), %rdx + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rdx + movq %rdx, 32(%rdi) + sbbq %r9, %r15 + movq %r15, 40(%rdi) + sbbq %r8, %r10 + movq %r10, 48(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq +.Lfunc_end102: + .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L + + .globl mcl_fp_shr1_7L + .align 16, 0x90 + .type mcl_fp_shr1_7L,@function +mcl_fp_shr1_7L: # @mcl_fp_shr1_7L +# BB#0: + movq 48(%rsi), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrdq $1, %r10, %rax + movq %rax, 24(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 32(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 40(%rdi) + shrq %r8 + movq %r8, 48(%rdi) + retq +.Lfunc_end103: + .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L + + .globl mcl_fp_add7L + .align 16, 0x90 + .type mcl_fp_add7L,@function +mcl_fp_add7L: # @mcl_fp_add7L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq 24(%rdx), %r13 + movq 16(%rdx), %r10 + movq (%rdx), %r11 + movq 8(%rdx), %rdx + addq (%rsi), %r11 + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r10 + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %r13, %rax + movq %rax, 24(%rdi) + adcq %r12, %rbx + movq %rbx, 32(%rdi) + adcq %r15, %r9 + movq %r9, 40(%rdi) + adcq %r14, %r8 + movq %r8, 48(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %r11 + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %rax + sbbq 32(%rcx), %rbx + sbbq 40(%rcx), %r9 + sbbq 48(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne .LBB104_2 +# BB#1: # %nocarry + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %rax, 24(%rdi) + movq %rbx, 32(%rdi) + movq %r9, 40(%rdi) + movq %r8, 48(%rdi) +.LBB104_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end104: + .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L + + .globl mcl_fp_addNF7L + .align 16, 0x90 + .type mcl_fp_addNF7L,@function +mcl_fp_addNF7L: # @mcl_fp_addNF7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r9 + movq 40(%rdx), %rbp + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r14 + movq (%rdx), %r12 + movq 8(%rdx), %r15 + addq (%rsi), %r12 + adcq 8(%rsi), %r15 + adcq 16(%rsi), %r14 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %rbp + movq %rbp, -8(%rsp) # 8-byte Spill + adcq 48(%rsi), %r9 + movq %r12, %rsi + subq (%rcx), %rsi + movq %r15, %rdx + sbbq 8(%rcx), %rdx + movq %r14, %rax + sbbq 16(%rcx), %rax + movq %r11, %rbx + sbbq 24(%rcx), %rbx + movq %r10, %r13 + sbbq 32(%rcx), %r13 + sbbq 40(%rcx), %rbp + movq %r9, %r8 + sbbq 48(%rcx), %r8 + movq %r8, %rcx + sarq $63, %rcx + cmovsq %r12, %rsi + movq %rsi, (%rdi) + cmovsq %r15, %rdx + movq %rdx, 8(%rdi) + cmovsq %r14, %rax + movq %rax, 16(%rdi) + cmovsq %r11, %rbx + movq %rbx, 24(%rdi) + cmovsq %r10, %r13 + movq %r13, 32(%rdi) + cmovsq -8(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 40(%rdi) + cmovsq %r9, %r8 + movq %r8, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end105: + .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L + + .globl mcl_fp_sub7L + .align 16, 0x90 + .type mcl_fp_sub7L,@function +mcl_fp_sub7L: # @mcl_fp_sub7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + movq 16(%rsi), %r13 + sbbq 16(%rdx), %r13 + movq 32(%rsi), %r10 + movq 24(%rsi), %rsi + sbbq 24(%rdx), %rsi + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r13, 16(%rdi) + movq %rsi, 24(%rdi) + sbbq %r12, %r10 + movq %r10, 32(%rdi) + sbbq %r15, %r9 + movq %r9, 40(%rdi) + sbbq %r14, %r8 + movq %r8, 48(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB106_2 +# BB#1: # %carry + movq 48(%rcx), %r14 + movq 40(%rcx), %r15 + movq 32(%rcx), %r12 + movq 24(%rcx), %rbx + movq 8(%rcx), %rdx + movq 16(%rcx), %rbp + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r13, %rbp + movq %rbp, 16(%rdi) + adcq %rsi, %rbx + movq %rbx, 24(%rdi) + adcq %r10, %r12 + movq %r12, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) +.LBB106_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end106: + .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L + + .globl mcl_fp_subNF7L + .align 16, 0x90 + .type mcl_fp_subNF7L,@function +mcl_fp_subNF7L: # @mcl_fp_subNF7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 48(%rsi), %r12 + movq 40(%rsi), %rax + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %r14 + movq 8(%rsi), %r15 + subq (%rdx), %r14 + sbbq 8(%rdx), %r15 + sbbq 16(%rdx), %r11 + sbbq 24(%rdx), %r10 + sbbq 32(%rdx), %r9 + sbbq 40(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 48(%rdx), %r12 + movq %r12, %rax + sarq $63, %rax + movq %rax, %rsi + shldq $1, %r12, %rsi + andq (%r8), %rsi + movq 48(%r8), %r13 + andq %rax, %r13 + movq 40(%r8), %rbx + andq %rax, %rbx + movq 32(%r8), %rdx + andq %rax, %rdx + movq 24(%r8), %rbp + andq %rax, %rbp + movq 16(%r8), %rcx + andq %rax, %rcx + andq 8(%r8), %rax + addq %r14, %rsi + adcq %r15, %rax + movq %rsi, (%rdi) + movq %rax, 8(%rdi) + adcq %r11, %rcx + movq %rcx, 16(%rdi) + adcq %r10, %rbp + movq %rbp, 24(%rdi) + adcq %r9, %rdx + movq %rdx, 32(%rdi) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 40(%rdi) + adcq %r12, %r13 + movq %r13, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end107: + .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L + + .globl mcl_fpDbl_add7L + .align 16, 0x90 + .type mcl_fpDbl_add7L,@function +mcl_fpDbl_add7L: # @mcl_fpDbl_add7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 96(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 88(%rdx), %r11 + movq 80(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r12 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r9 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r12 + movq 72(%rdx), %r13 + movq 64(%rdx), %rbp + movq %rax, (%rdi) + movq 56(%rdx), %r10 + movq %rbx, 8(%rdi) + movq 48(%rdx), %rcx + movq 40(%rdx), %rdx + movq %r9, 16(%rdi) + movq 104(%rsi), %r9 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %rdx, %rbx + movq 96(%rsi), %r15 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + adcq %rcx, %rdx + movq 88(%rsi), %rax + movq %rbx, 40(%rdi) + movq 56(%rsi), %rcx + adcq %r10, %rcx + movq 80(%rsi), %r12 + movq %rdx, 48(%rdi) + movq 72(%rsi), %rdx + movq 64(%rsi), %rsi + adcq %rbp, %rsi + adcq %r13, %rdx + adcq %r14, %r12 + adcq %r11, %rax + movq %rax, -8(%rsp) # 8-byte Spill + adcq -24(%rsp), %r15 # 8-byte Folded Reload + movq %r15, -24(%rsp) # 8-byte Spill + adcq -16(%rsp), %r9 # 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq %rcx, %rbx + subq (%r8), %rbx + movq %rsi, %r10 + sbbq 8(%r8), %r10 + movq %rdx, %r11 + sbbq 16(%r8), %r11 + movq %r12, %r14 + sbbq 24(%r8), %r14 + movq -8(%rsp), %r13 # 8-byte Reload + sbbq 32(%r8), %r13 + sbbq 40(%r8), %r15 + movq %r9, %rax + sbbq 48(%r8), %rax + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rcx, %rbx + movq %rbx, 56(%rdi) + testb %bpl, %bpl + cmovneq %rsi, %r10 + movq %r10, 64(%rdi) + cmovneq %rdx, %r11 + movq %r11, 72(%rdi) + cmovneq %r12, %r14 + movq %r14, 80(%rdi) + cmovneq -8(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 88(%rdi) + cmovneq -24(%rsp), %r15 # 8-byte Folded Reload + movq %r15, 96(%rdi) + cmovneq %r9, %rax + movq %rax, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end108: + .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L + + .globl mcl_fpDbl_sub7L + .align 16, 0x90 + .type mcl_fpDbl_sub7L,@function +mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 96(%rdx), %r10 + movq 88(%rdx), %r14 + movq 16(%rsi), %rax + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %ecx, %ecx + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %rax + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 80(%rdx), %r13 + movq 72(%rdx), %rbp + movq %r15, (%rdi) + movq 64(%rdx), %r9 + movq %r11, 8(%rdi) + movq 56(%rdx), %r15 + movq %rax, 16(%rdi) + movq 48(%rdx), %r11 + movq 40(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 104(%rsi), %rax + movq %r12, 32(%rdi) + movq 48(%rsi), %r12 + sbbq %r11, %r12 + movq 96(%rsi), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r15, %rdx + movq 88(%rsi), %r15 + movq %r12, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r9, %rbx + movq 80(%rsi), %r12 + movq 72(%rsi), %r9 + sbbq %rbp, %r9 + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq -8(%rsp), %rax # 8-byte Folded Reload + movq %rax, -8(%rsp) # 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r8), %r10 + cmoveq %rcx, %r10 + testb %bpl, %bpl + movq 16(%r8), %rbp + cmoveq %rcx, %rbp + movq 8(%r8), %rsi + cmoveq %rcx, %rsi + movq 48(%r8), %r14 + cmoveq %rcx, %r14 + movq 40(%r8), %r13 + cmoveq %rcx, %r13 + movq 32(%r8), %rax + cmoveq %rcx, %rax + cmovneq 24(%r8), %rcx + addq %rdx, %r10 + adcq %rbx, %rsi + movq %r10, 56(%rdi) + movq %rsi, 64(%rdi) + adcq %r9, %rbp + movq %rbp, 72(%rdi) + adcq %r12, %rcx + movq %rcx, 80(%rdi) + adcq %r15, %rax + movq %rax, 88(%rdi) + adcq %r11, %r13 + movq %r13, 96(%rdi) + adcq -8(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end109: + .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L + + .align 16, 0x90 + .type .LmulPv512x64,@function +.LmulPv512x64: # @mulPv512x64 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq (%rsi) + movq %rdx, -24(%rsp) # 8-byte Spill + movq %rax, (%rdi) + movq %rcx, %rax + mulq 56(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 48(%rsi) + movq %rdx, %r11 + movq %rax, -16(%rsp) # 8-byte Spill + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r12 + movq %rax, %r15 + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %rbx + movq %rax, %r13 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %rbp + movq %rax, %r8 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r9 + movq %rax, %r14 + movq %rcx, %rax + mulq 8(%rsi) + addq -24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rdi) + adcq %r14, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + adcq %r13, %rbp + movq %rbp, 32(%rdi) + adcq %r15, %rbx + movq %rbx, 40(%rdi) + adcq -16(%rsp), %r12 # 8-byte Folded Reload + movq %r12, 48(%rdi) + adcq -8(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 56(%rdi) + adcq $0, %r10 + movq %r10, 64(%rdi) + movq %rdi, %rax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end110: + .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 + + .globl mcl_fp_mulUnitPre8L + .align 16, 0x90 + .type mcl_fp_mulUnitPre8L,@function +mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L +# BB#0: + pushq %rbx + subq $80, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq .LmulPv512x64 + movq 72(%rsp), %r8 + movq 64(%rsp), %r9 + movq 56(%rsp), %r10 + movq 48(%rsp), %r11 + movq 40(%rsp), %rdi + movq 32(%rsp), %rax + movq 24(%rsp), %rcx + movq 8(%rsp), %rdx + movq 16(%rsp), %rsi + movq %rdx, (%rbx) + movq %rsi, 8(%rbx) + movq %rcx, 16(%rbx) + movq %rax, 24(%rbx) + movq %rdi, 32(%rbx) + movq %r11, 40(%rbx) + movq %r10, 48(%rbx) + movq %r9, 56(%rbx) + movq %r8, 64(%rbx) + addq $80, %rsp + popq %rbx + retq +.Lfunc_end111: + .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L + + .globl mcl_fpDbl_mulPre8L + .align 16, 0x90 + .type mcl_fpDbl_mulPre8L,@function +mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L +# BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rdx, %rbx + movq %rsi, %r15 + movq %rdi, %r14 + callq mcl_fpDbl_mulPre4L@PLT + leaq 64(%r14), %rdi + leaq 32(%r15), %rsi + leaq 32(%rbx), %rdx + callq mcl_fpDbl_mulPre4L@PLT + movq 56(%rbx), %r10 + movq 48(%rbx), %rcx + movq (%rbx), %rdx + movq 8(%rbx), %rsi + addq 32(%rbx), %rdx + adcq 40(%rbx), %rsi + adcq 16(%rbx), %rcx + adcq 24(%rbx), %r10 + pushfq + popq %r8 + xorl %r9d, %r9d + movq 56(%r15), %rdi + movq 48(%r15), %r13 + movq (%r15), %r12 + movq 8(%r15), %rbx + addq 32(%r15), %r12 + adcq 40(%r15), %rbx + adcq 16(%r15), %r13 + adcq 24(%r15), %rdi + movl $0, %eax + cmovbq %r10, %rax + movq %rax, -176(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rcx, %rax + movq %rax, -184(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rsi, %rax + movq %rax, -192(%rbp) # 8-byte Spill + movl $0, %eax + cmovbq %rdx, %rax + movq %rax, -200(%rbp) # 8-byte Spill + sbbq %r15, %r15 + movq %r12, -136(%rbp) + movq %rbx, -128(%rbp) + movq %r13, -120(%rbp) + movq %rdi, -112(%rbp) + movq %rdx, -168(%rbp) + movq %rsi, -160(%rbp) + movq %rcx, -152(%rbp) + movq %r10, -144(%rbp) + pushq %r8 + popfq + cmovaeq %r9, %rdi + movq %rdi, -216(%rbp) # 8-byte Spill + cmovaeq %r9, %r13 + cmovaeq %r9, %rbx + cmovaeq %r9, %r12 + sbbq %rax, %rax + movq %rax, -208(%rbp) # 8-byte Spill + leaq -104(%rbp), %rdi + leaq -136(%rbp), %rsi + leaq -168(%rbp), %rdx + callq mcl_fpDbl_mulPre4L@PLT + addq -200(%rbp), %r12 # 8-byte Folded Reload + adcq -192(%rbp), %rbx # 8-byte Folded Reload + adcq -184(%rbp), %r13 # 8-byte Folded Reload + movq -216(%rbp), %r10 # 8-byte Reload + adcq -176(%rbp), %r10 # 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq -208(%rbp), %rdx # 8-byte Reload + andl %edx, %r15d + andl $1, %r15d + addq -72(%rbp), %r12 + adcq -64(%rbp), %rbx + adcq -56(%rbp), %r13 + adcq -48(%rbp), %r10 + adcq %rax, %r15 + movq -80(%rbp), %rax + movq -88(%rbp), %rcx + movq -104(%rbp), %rsi + movq -96(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %rdi + movq %rdi, -184(%rbp) # 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -176(%rbp) # 8-byte Spill + sbbq %rdi, %r12 + sbbq %r8, %rbx + movq 48(%r14), %rdi + movq %rdi, -192(%rbp) # 8-byte Spill + sbbq %rdi, %r13 + movq 56(%r14), %rdi + movq %rdi, -200(%rbp) # 8-byte Spill + sbbq %rdi, %r10 + sbbq $0, %r15 + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -208(%rbp) # 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -216(%rbp) # 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -224(%rbp) # 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -232(%rbp) # 8-byte Spill + sbbq %rdi, %r12 + movq 104(%r14), %rdi + sbbq %rdi, %rbx + movq 112(%r14), %r8 + sbbq %r8, %r13 + movq 120(%r14), %r9 + sbbq %r9, %r10 + sbbq $0, %r15 + addq -184(%rbp), %rsi # 8-byte Folded Reload + adcq -176(%rbp), %rdx # 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -192(%rbp), %rcx # 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -200(%rbp), %rax # 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r12 + movq %rax, 56(%r14) + movq %r12, 64(%r14) + adcq -208(%rbp), %rbx # 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq -216(%rbp), %r13 # 8-byte Folded Reload + movq %r13, 80(%r14) + adcq -224(%rbp), %r10 # 8-byte Folded Reload + movq %r10, 88(%r14) + adcq -232(%rbp), %r15 # 8-byte Folded Reload + movq %r15, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end112: + .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L + + .globl mcl_fpDbl_sqrPre8L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre8L,@function +mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L +# BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rsi, %r14 + movq %rdi, %rbx + movq %r14, %rdx + callq mcl_fpDbl_mulPre4L@PLT + leaq 64(%rbx), %rdi + leaq 32(%r14), %rsi + movq %rsi, %rdx + callq mcl_fpDbl_mulPre4L@PLT + movq (%r14), %r12 + movq 8(%r14), %r15 + addq 32(%r14), %r12 + adcq 40(%r14), %r15 + pushfq + popq %rax + movq %r12, -136(%rbp) + movq %r12, -168(%rbp) + addq %r12, %r12 + movq %r15, -128(%rbp) + movq %r15, -160(%rbp) + adcq %r15, %r15 + pushfq + popq %rcx + movq 56(%r14), %r13 + movq 48(%r14), %rdx + pushq %rax + popfq + adcq 16(%r14), %rdx + adcq 24(%r14), %r13 + pushfq + popq %r8 + pushfq + popq %rsi + pushfq + popq %rdi + sbbq %rax, %rax + movq %rax, -184(%rbp) # 8-byte Spill + xorl %eax, %eax + pushq %rdi + popfq + cmovaeq %rax, %r15 + movq %r15, -176(%rbp) # 8-byte Spill + cmovaeq %rax, %r12 + movq %rdx, -120(%rbp) + movq %rdx, -152(%rbp) + movq %rdx, %r15 + pushq %rcx + popfq + adcq %r15, %r15 + movq %r13, %r14 + movq %r13, -112(%rbp) + movq %r13, -144(%rbp) + adcq %r13, %r13 + pushq %rsi + popfq + cmovaeq %rax, %r13 + cmovaeq %rax, %r15 + shrq $63, %r14 + pushq %r8 + popfq + cmovaeq %rax, %r14 + leaq -104(%rbp), %rdi + leaq -136(%rbp), %rsi + leaq -168(%rbp), %rdx + callq mcl_fpDbl_mulPre4L@PLT + movq -184(%rbp), %rax # 8-byte Reload + andl $1, %eax + addq -72(%rbp), %r12 + movq -176(%rbp), %r8 # 8-byte Reload + adcq -64(%rbp), %r8 + adcq -56(%rbp), %r15 + adcq -48(%rbp), %r13 + adcq %r14, %rax + movq %rax, %rdi + movq -80(%rbp), %rax + movq -88(%rbp), %rcx + movq -104(%rbp), %rsi + movq -96(%rbp), %rdx + subq (%rbx), %rsi + sbbq 8(%rbx), %rdx + sbbq 16(%rbx), %rcx + sbbq 24(%rbx), %rax + movq 32(%rbx), %r10 + movq %r10, -184(%rbp) # 8-byte Spill + movq 40(%rbx), %r9 + movq %r9, -176(%rbp) # 8-byte Spill + sbbq %r10, %r12 + sbbq %r9, %r8 + movq %r8, %r10 + movq 48(%rbx), %r8 + movq %r8, -192(%rbp) # 8-byte Spill + sbbq %r8, %r15 + movq 56(%rbx), %r8 + movq %r8, -200(%rbp) # 8-byte Spill + sbbq %r8, %r13 + sbbq $0, %rdi + movq 64(%rbx), %r11 + subq %r11, %rsi + movq 72(%rbx), %r8 + movq %r8, -208(%rbp) # 8-byte Spill + sbbq %r8, %rdx + movq 80(%rbx), %r8 + movq %r8, -216(%rbp) # 8-byte Spill + sbbq %r8, %rcx + movq 88(%rbx), %r8 + movq %r8, -224(%rbp) # 8-byte Spill + sbbq %r8, %rax + movq 96(%rbx), %r8 + movq %r8, -232(%rbp) # 8-byte Spill + sbbq %r8, %r12 + movq 104(%rbx), %r14 + sbbq %r14, %r10 + movq 112(%rbx), %r8 + sbbq %r8, %r15 + movq 120(%rbx), %r9 + sbbq %r9, %r13 + sbbq $0, %rdi + addq -184(%rbp), %rsi # 8-byte Folded Reload + adcq -176(%rbp), %rdx # 8-byte Folded Reload + movq %rsi, 32(%rbx) + adcq -192(%rbp), %rcx # 8-byte Folded Reload + movq %rdx, 40(%rbx) + adcq -200(%rbp), %rax # 8-byte Folded Reload + movq %rcx, 48(%rbx) + adcq %r11, %r12 + movq %rax, 56(%rbx) + movq %r12, 64(%rbx) + adcq -208(%rbp), %r10 # 8-byte Folded Reload + movq %r10, 72(%rbx) + adcq -216(%rbp), %r15 # 8-byte Folded Reload + movq %r15, 80(%rbx) + adcq -224(%rbp), %r13 # 8-byte Folded Reload + movq %r13, 88(%rbx) + adcq -232(%rbp), %rdi # 8-byte Folded Reload + movq %rdi, 96(%rbx) + adcq $0, %r14 + movq %r14, 104(%rbx) + adcq $0, %r8 + movq %r8, 112(%rbx) + adcq $0, %r9 + movq %r9, 120(%rbx) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L + + .globl mcl_fp_mont8L + .align 16, 0x90 + .type mcl_fp_mont8L,@function +mcl_fp_mont8L: # @mcl_fp_mont8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1256, %rsp # imm = 0x4E8 + movq %rcx, %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%r13), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1184(%rsp), %rdi + callq .LmulPv512x64 + movq 1184(%rsp), %r15 + movq 1192(%rsp), %r14 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1248(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 1240(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1232(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1216(%rsp), %r12 + movq 1208(%rsp), %rbx + movq 1200(%rsp), %rbp + leaq 1112(%rsp), %rdi + movq %r13, %rsi + callq .LmulPv512x64 + addq 1112(%rsp), %r15 + adcq 1120(%rsp), %r14 + adcq 1128(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + adcq 1136(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 1144(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + movq 72(%rsp), %r13 # 8-byte Reload + adcq 1152(%rsp), %r13 + movq 88(%rsp), %rbx # 8-byte Reload + adcq 1160(%rsp), %rbx + movq 80(%rsp), %rbp # 8-byte Reload + adcq 1168(%rsp), %rbp + movq 96(%rsp), %rax # 8-byte Reload + adcq 1176(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1040(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 1040(%rsp), %r14 + movq 8(%rsp), %rax # 8-byte Reload + adcq 1048(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1056(%rsp), %rax + movq %rax, %r12 + movq 64(%rsp), %rax # 8-byte Reload + adcq 1064(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + adcq 1072(%rsp), %r13 + movq %r13, 72(%rsp) # 8-byte Spill + adcq 1080(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 1088(%rsp), %rbp + movq 96(%rsp), %rax # 8-byte Reload + adcq 1096(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 1104(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %r14, %rdx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 968(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 968(%rsp), %r14 + movq 8(%rsp), %r13 # 8-byte Reload + adcq 976(%rsp), %r13 + adcq 984(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r14 # 8-byte Reload + adcq 992(%rsp), %r14 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 1000(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 1008(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 1016(%rsp), %rbp + movq %rbp, %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 1024(%rsp), %rbp + movq 56(%rsp), %rax # 8-byte Reload + adcq 1032(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + adcq $0, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 896(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rcx + addq 896(%rsp), %rcx + movq 48(%rsp), %r13 # 8-byte Reload + adcq 904(%rsp), %r13 + adcq 912(%rsp), %r14 + adcq 920(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 936(%rsp), %r12 + movq %r12, 80(%rsp) # 8-byte Spill + adcq 944(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 56(%rsp), %r12 # 8-byte Reload + adcq 952(%rsp), %r12 + adcq 960(%rsp), %r15 + sbbq %rbx, %rbx + movq %rcx, %rdx + movq %rcx, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 824(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + addq 824(%rsp), %rbp + adcq 832(%rsp), %r13 + movq %r13, 48(%rsp) # 8-byte Spill + adcq 840(%rsp), %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 72(%rsp), %r13 # 8-byte Reload + adcq 848(%rsp), %r13 + movq 88(%rsp), %rbp # 8-byte Reload + adcq 856(%rsp), %rbp + movq 80(%rsp), %r14 # 8-byte Reload + adcq 864(%rsp), %r14 + movq 96(%rsp), %rax # 8-byte Reload + adcq 872(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 880(%rsp), %r12 + adcq 888(%rsp), %r15 + adcq $0, %rbx + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 752(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 48(%rsp), %rax # 8-byte Reload + addq 752(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 760(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 72(%rsp) # 8-byte Spill + adcq 776(%rsp), %rbp + movq %rbp, 88(%rsp) # 8-byte Spill + adcq 784(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + movq 96(%rsp), %rbp # 8-byte Reload + adcq 792(%rsp), %rbp + adcq 800(%rsp), %r12 + adcq 808(%rsp), %r15 + adcq 816(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 680(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 680(%rsp), %rbx + movq 64(%rsp), %r14 # 8-byte Reload + adcq 688(%rsp), %r14 + movq 72(%rsp), %rcx # 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %r13 # 8-byte Reload + adcq 704(%rsp), %r13 + movq 80(%rsp), %rbx # 8-byte Reload + adcq 712(%rsp), %rbx + adcq 720(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq %r12, %rbp + adcq 728(%rsp), %rbp + adcq 736(%rsp), %r15 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 744(%rsp), %r12 + adcq $0, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 608(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r14, %rax + addq 608(%rsp), %rax + movq 72(%rsp), %r14 # 8-byte Reload + adcq 616(%rsp), %r14 + adcq 624(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 632(%rsp), %rbx + movq %rbx, %r13 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 640(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 648(%rsp), %rbp + movq %rbp, 56(%rsp) # 8-byte Spill + adcq 656(%rsp), %r15 + adcq 664(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %rcx # 8-byte Reload + adcq 672(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + sbbq %rbp, %rbp + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %rbp, %rax + andl $1, %eax + addq 536(%rsp), %rbx + adcq 544(%rsp), %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq 88(%rsp), %rbx # 8-byte Reload + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r13 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 568(%rsp), %rbp + movq 56(%rsp), %r12 # 8-byte Reload + adcq 576(%rsp), %r12 + adcq 584(%rsp), %r15 + movq 48(%rsp), %rcx # 8-byte Reload + adcq 592(%rsp), %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r14 # 8-byte Reload + adcq 600(%rsp), %r14 + adcq $0, %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 464(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 72(%rsp), %rax # 8-byte Reload + addq 464(%rsp), %rax + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + adcq 488(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + adcq 496(%rsp), %r12 + adcq 504(%rsp), %r15 + movq %r15, 72(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 512(%rsp), %r15 + adcq 520(%rsp), %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 528(%rsp), %r14 + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 392(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 408(%rsp), %rbp + movq 96(%rsp), %rbx # 8-byte Reload + adcq 416(%rsp), %rbx + adcq 424(%rsp), %r12 + movq 72(%rsp), %r13 # 8-byte Reload + adcq 432(%rsp), %r13 + adcq 440(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r15 # 8-byte Reload + adcq 448(%rsp), %r15 + adcq 456(%rsp), %r14 + adcq $0, %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 88(%rsp), %rax # 8-byte Reload + addq 320(%rsp), %rax + adcq 328(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 336(%rsp), %rbx + movq %rbx, 96(%rsp) # 8-byte Spill + movq %r12, %rbp + adcq 344(%rsp), %rbp + adcq 352(%rsp), %r13 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 360(%rsp), %r12 + adcq 368(%rsp), %r15 + movq %r15, 64(%rsp) # 8-byte Spill + adcq 376(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 72(%rsp), %rcx # 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 248(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %r15d + addq 248(%rsp), %rbx + movq 80(%rsp), %rax # 8-byte Reload + adcq 256(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 96(%rsp), %r14 # 8-byte Reload + adcq 264(%rsp), %r14 + adcq 272(%rsp), %rbp + movq %rbp, 56(%rsp) # 8-byte Spill + movq %r13, %rbx + adcq 280(%rsp), %rbx + movq %r12, %rbp + adcq 288(%rsp), %rbp + movq 64(%rsp), %r13 # 8-byte Reload + adcq 296(%rsp), %r13 + movq 88(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 72(%rsp), %r12 # 8-byte Reload + adcq 312(%rsp), %r12 + adcq $0, %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 176(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 80(%rsp), %rax # 8-byte Reload + addq 176(%rsp), %rax + adcq 184(%rsp), %r14 + movq %r14, 96(%rsp) # 8-byte Spill + movq 56(%rsp), %rcx # 8-byte Reload + adcq 192(%rsp), %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + adcq 200(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 208(%rsp), %rbp + adcq 216(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 224(%rsp), %r14 + adcq 232(%rsp), %r12 + adcq 240(%rsp), %r15 + sbbq %rbx, %rbx + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %r13 + leaq 104(%rsp), %rdi + movq 40(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + addq 104(%rsp), %r13 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 112(%rsp), %rcx + movq 56(%rsp), %rdx # 8-byte Reload + adcq 120(%rsp), %rdx + movq 72(%rsp), %rsi # 8-byte Reload + adcq 128(%rsp), %rsi + movq %rbp, %rdi + adcq 136(%rsp), %rdi + movq %rdi, 48(%rsp) # 8-byte Spill + movq 64(%rsp), %r8 # 8-byte Reload + adcq 144(%rsp), %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq %r14, %r9 + adcq 152(%rsp), %r9 + movq %r9, 88(%rsp) # 8-byte Spill + adcq 160(%rsp), %r12 + adcq 168(%rsp), %r15 + adcq $0, %rbx + movq %rcx, %rax + movq %rcx, %r11 + movq 40(%rsp), %rbp # 8-byte Reload + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r14 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + movq %rsi, %r13 + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %r8, %rdi + sbbq 32(%rbp), %rdi + movq %r9, %r10 + sbbq 40(%rbp), %r10 + movq %r12, %r8 + sbbq 48(%rbp), %r8 + movq %r15, %r9 + sbbq 56(%rbp), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r15, %r9 + testb %bl, %bl + cmovneq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovneq %r14, %rcx + movq %rcx, 8(%rbx) + cmovneq %r13, %rdx + movq %rdx, 16(%rbx) + cmovneq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovneq 64(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovneq 88(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovneq %r12, %r8 + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $1256, %rsp # imm = 0x4E8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end114: + .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L + + .globl mcl_fp_montNF8L + .align 16, 0x90 + .type mcl_fp_montNF8L,@function +mcl_fp_montNF8L: # @mcl_fp_montNF8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1240, %rsp # imm = 0x4D8 + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1168(%rsp), %rdi + callq .LmulPv512x64 + movq 1168(%rsp), %r15 + movq 1176(%rsp), %r12 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1232(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1216(%rsp), %r13 + movq 1208(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1200(%rsp), %r14 + movq 1192(%rsp), %rbp + movq 1184(%rsp), %rbx + leaq 1096(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 1096(%rsp), %r15 + adcq 1104(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 1112(%rsp), %rbx + adcq 1120(%rsp), %rbp + adcq 1128(%rsp), %r14 + movq %r14, %r12 + movq 72(%rsp), %r14 # 8-byte Reload + adcq 1136(%rsp), %r14 + adcq 1144(%rsp), %r13 + movq 80(%rsp), %rax # 8-byte Reload + adcq 1152(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1160(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1024(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 1088(%rsp), %r15 + movq 64(%rsp), %rax # 8-byte Reload + addq 1024(%rsp), %rax + adcq 1032(%rsp), %rbx + movq %rbx, 8(%rsp) # 8-byte Spill + movq %rbp, %rbx + adcq 1040(%rsp), %rbx + adcq 1048(%rsp), %r12 + adcq 1056(%rsp), %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq %r13, %rbp + adcq 1064(%rsp), %rbp + movq 80(%rsp), %rcx # 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 1080(%rsp), %r14 + adcq $0, %r15 + movq %rax, %rdx + movq %rax, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 952(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 952(%rsp), %r13 + movq 8(%rsp), %rax # 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + adcq 968(%rsp), %rbx + movq %rbx, 64(%rsp) # 8-byte Spill + movq %r12, %rbx + adcq 976(%rsp), %rbx + movq 72(%rsp), %r12 # 8-byte Reload + adcq 984(%rsp), %r12 + adcq 992(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 1000(%rsp), %r13 + movq %r14, %rbp + adcq 1008(%rsp), %rbp + adcq 1016(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 880(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 944(%rsp), %r14 + movq 8(%rsp), %rax # 8-byte Reload + addq 880(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 888(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 896(%rsp), %rbx + adcq 904(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 40(%rsp), %rcx # 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 920(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + adcq 928(%rsp), %rbp + movq %rbp, 48(%rsp) # 8-byte Spill + adcq 936(%rsp), %r15 + adcq $0, %r14 + movq %rax, %rdx + movq %rax, %rbp + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 808(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 808(%rsp), %rbp + movq 64(%rsp), %r13 # 8-byte Reload + adcq 816(%rsp), %r13 + movq %rbx, %r12 + adcq 824(%rsp), %r12 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 832(%rsp), %rbx + movq 40(%rsp), %rbp # 8-byte Reload + adcq 840(%rsp), %rbp + movq 80(%rsp), %rax # 8-byte Reload + adcq 848(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 856(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + adcq 864(%rsp), %r15 + adcq 872(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 736(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 800(%rsp), %rax + movq %r13, %rcx + addq 736(%rsp), %rcx + adcq 744(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + adcq 752(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 760(%rsp), %rbp + movq %rbp, %r13 + movq 80(%rsp), %rbp # 8-byte Reload + adcq 768(%rsp), %rbp + movq 48(%rsp), %rbx # 8-byte Reload + adcq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r14 + adcq $0, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq %rcx, %rdx + movq %rcx, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 664(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 664(%rsp), %r12 + movq 40(%rsp), %rax # 8-byte Reload + adcq 672(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 72(%rsp), %rax # 8-byte Reload + adcq 680(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + adcq 688(%rsp), %r13 + adcq 696(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 704(%rsp), %rbx + adcq 712(%rsp), %r15 + adcq 720(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 728(%rsp), %r12 + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 592(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 656(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 592(%rsp), %rax + movq 72(%rsp), %rbp # 8-byte Reload + adcq 600(%rsp), %rbp + adcq 608(%rsp), %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 616(%rsp), %r13 + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + adcq 640(%rsp), %r14 + adcq 648(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 520(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 520(%rsp), %r12 + adcq 528(%rsp), %rbp + movq %rbp, 72(%rsp) # 8-byte Spill + movq 40(%rsp), %r12 # 8-byte Reload + adcq 536(%rsp), %r12 + movq %r13, %rbp + adcq 544(%rsp), %rbp + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r15 + adcq 568(%rsp), %r14 + movq 64(%rsp), %r13 # 8-byte Reload + adcq 576(%rsp), %r13 + movq 80(%rsp), %rax # 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 448(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 512(%rsp), %rcx + movq 72(%rsp), %rax # 8-byte Reload + addq 448(%rsp), %rax + adcq 456(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + adcq 464(%rsp), %rbp + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r15 + adcq 488(%rsp), %r14 + adcq 496(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %r13 # 8-byte Reload + adcq 504(%rsp), %r13 + adcq $0, %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 376(%rsp), %r12 + movq 40(%rsp), %rax # 8-byte Reload + adcq 384(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + adcq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + adcq 408(%rsp), %r15 + adcq 416(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 424(%rsp), %r12 + adcq 432(%rsp), %r13 + movq 72(%rsp), %rax # 8-byte Reload + adcq 440(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 304(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 368(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 304(%rsp), %rax + adcq 312(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 320(%rsp), %rbx + adcq 328(%rsp), %r15 + adcq 336(%rsp), %r14 + adcq 344(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 352(%rsp), %r13 + movq 72(%rsp), %rbp # 8-byte Reload + adcq 360(%rsp), %rbp + adcq $0, %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 232(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 232(%rsp), %r12 + movq 80(%rsp), %rax # 8-byte Reload + adcq 240(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + adcq 248(%rsp), %rbx + adcq 256(%rsp), %r15 + adcq 264(%rsp), %r14 + movq 64(%rsp), %r12 # 8-byte Reload + adcq 272(%rsp), %r12 + adcq 280(%rsp), %r13 + adcq 288(%rsp), %rbp + movq %rbp, 72(%rsp) # 8-byte Spill + movq 48(%rsp), %rbp # 8-byte Reload + adcq 296(%rsp), %rbp + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + movq 224(%rsp), %rcx + movq 80(%rsp), %rax # 8-byte Reload + addq 160(%rsp), %rax + adcq 168(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 200(%rsp), %r13 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 208(%rsp), %rbx + adcq 216(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbp + leaq 88(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 88(%rsp), %rbp + movq 48(%rsp), %r11 # 8-byte Reload + adcq 96(%rsp), %r11 + adcq 104(%rsp), %r15 + adcq 112(%rsp), %r14 + movq 64(%rsp), %rsi # 8-byte Reload + adcq 120(%rsp), %rsi + movq %rsi, 64(%rsp) # 8-byte Spill + adcq 128(%rsp), %r13 + adcq 136(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 144(%rsp), %r12 + movq 80(%rsp), %r8 # 8-byte Reload + adcq 152(%rsp), %r8 + movq %r11, %rax + movq 56(%rsp), %rbp # 8-byte Reload + subq (%rbp), %rax + movq %r15, %rcx + sbbq 8(%rbp), %rcx + movq %r14, %rdx + sbbq 16(%rbp), %rdx + sbbq 24(%rbp), %rsi + movq %r13, %rdi + sbbq 32(%rbp), %rdi + movq %rbx, %r9 + sbbq 40(%rbp), %r9 + movq %r12, %r10 + sbbq 48(%rbp), %r10 + movq %rbp, %rbx + movq %r8, %rbp + sbbq 56(%rbx), %rbp + testq %rbp, %rbp + cmovsq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovsq %r15, %rcx + movq %rcx, 8(%rbx) + cmovsq %r14, %rdx + movq %rdx, 16(%rbx) + cmovsq 64(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq %r13, %rdi + movq %rdi, 32(%rbx) + cmovsq 72(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 40(%rbx) + cmovsq %r12, %r10 + movq %r10, 48(%rbx) + cmovsq %r8, %rbp + movq %rbp, 56(%rbx) + addq $1240, %rsp # imm = 0x4D8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end115: + .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L + + .globl mcl_fp_montRed8L + .align 16, 0x90 + .type mcl_fp_montRed8L,@function +mcl_fp_montRed8L: # @mcl_fp_montRed8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $776, %rsp # imm = 0x308 + movq %rdx, %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq %rdi, 72(%rsp) # 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 128(%rsp) # 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rdx + movq %rdx, 184(%rsp) # 8-byte Spill + movq %r15, %rdx + imulq %rcx, %rdx + movq 120(%rsi), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 120(%rsp) # 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + movq 72(%rsi), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 64(%rsi), %r13 + movq 56(%rsi), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 48(%rsi), %r14 + movq 40(%rsi), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + movq 32(%rsi), %r12 + movq 24(%rsi), %rbx + movq 16(%rsi), %rbp + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq %rcx, %rsi + leaq 704(%rsp), %rdi + callq .LmulPv512x64 + addq 704(%rsp), %r15 + movq 184(%rsp), %rcx # 8-byte Reload + adcq 712(%rsp), %rcx + adcq 720(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 728(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rax # 8-byte Reload + adcq 744(%rsp), %rax + movq %rax, 152(%rsp) # 8-byte Spill + adcq 752(%rsp), %r14 + movq %r14, %r12 + movq 144(%rsp), %rax # 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 144(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 184(%rsp) # 8-byte Spill + adcq $0, 192(%rsp) # 8-byte Folded Spill + movq 160(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 176(%rsp) # 8-byte Folded Spill + adcq $0, 168(%rsp) # 8-byte Folded Spill + adcq $0, 120(%rsp) # 8-byte Folded Spill + movq 136(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + movq 96(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + sbbq %rbx, %rbx + movq %rcx, %rbp + movq %rbp, %rdx + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 632(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + andl $1, %ebx + movq %rbx, %rax + addq 632(%rsp), %rbp + movq 80(%rsp), %rsi # 8-byte Reload + adcq 640(%rsp), %rsi + movq 88(%rsp), %rcx # 8-byte Reload + adcq 648(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rcx # 8-byte Reload + adcq 656(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rcx # 8-byte Reload + adcq 664(%rsp), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + adcq 672(%rsp), %r12 + movq 144(%rsp), %rcx # 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, 160(%rsp) # 8-byte Spill + movq 176(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq 168(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 120(%rsp) # 8-byte Folded Spill + adcq $0, %r13 + movq %r13, 136(%rsp) # 8-byte Spill + adcq $0, %r14 + movq %r14, 96(%rsp) # 8-byte Spill + movq %rax, %rbp + adcq $0, %rbp + movq %rsi, %rdx + movq %rsi, %r14 + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 560(%rsp), %rdi + movq 112(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv512x64 + addq 560(%rsp), %r14 + movq 88(%rsp), %rcx # 8-byte Reload + adcq 568(%rsp), %rcx + movq 104(%rsp), %rax # 8-byte Reload + adcq 576(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 152(%rsp), %rax # 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 152(%rsp) # 8-byte Spill + adcq 592(%rsp), %r12 + movq %r12, 88(%rsp) # 8-byte Spill + movq 144(%rsp), %r14 # 8-byte Reload + adcq 600(%rsp), %r14 + movq 184(%rsp), %rax # 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 624(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq $0, %rbx + movq %rbx, 176(%rsp) # 8-byte Spill + adcq $0, %r15 + movq %r15, 168(%rsp) # 8-byte Spill + movq 120(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + movq 136(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 96(%rsp) # 8-byte Folded Spill + adcq $0, %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + movq %rcx, %rbp + movq %rbp, %rdx + movq 128(%rsp), %r12 # 8-byte Reload + imulq %r12, %rdx + leaq 488(%rsp), %rdi + movq %r13, %rsi + callq .LmulPv512x64 + addq 488(%rsp), %rbp + movq 104(%rsp), %rax # 8-byte Reload + adcq 496(%rsp), %rax + movq 152(%rsp), %rbp # 8-byte Reload + adcq 504(%rsp), %rbp + movq 88(%rsp), %rcx # 8-byte Reload + adcq 512(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 520(%rsp), %r14 + movq 184(%rsp), %rcx # 8-byte Reload + adcq 528(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 536(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %r13 # 8-byte Reload + adcq 544(%rsp), %r13 + movq 176(%rsp), %rcx # 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + adcq $0, 168(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 120(%rsp) # 8-byte Spill + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 96(%rsp) # 8-byte Folded Spill + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq %r12, %rdx + leaq 416(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 416(%rsp), %r15 + adcq 424(%rsp), %rbp + movq %rbp, %rax + movq 88(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + movq %r14, %r12 + adcq 440(%rsp), %r12 + movq 184(%rsp), %r14 # 8-byte Reload + adcq 448(%rsp), %r14 + movq 192(%rsp), %rbp # 8-byte Reload + adcq 456(%rsp), %rbp + adcq 464(%rsp), %r13 + movq 176(%rsp), %rcx # 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 168(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + adcq $0, 120(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 136(%rsp) # 8-byte Spill + movq 96(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rbx + movq %rbx, %rdx + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 344(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 344(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 352(%rsp), %rax + adcq 360(%rsp), %r12 + movq %r12, 144(%rsp) # 8-byte Spill + adcq 368(%rsp), %r14 + movq %r14, 184(%rsp) # 8-byte Spill + adcq 376(%rsp), %rbp + movq %rbp, 192(%rsp) # 8-byte Spill + adcq 384(%rsp), %r13 + movq %r13, 160(%rsp) # 8-byte Spill + movq 176(%rsp), %r13 # 8-byte Reload + adcq 392(%rsp), %r13 + movq 168(%rsp), %r12 # 8-byte Reload + adcq 400(%rsp), %r12 + movq 120(%rsp), %r14 # 8-byte Reload + adcq 408(%rsp), %r14 + movq 136(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 80(%rsp) # 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq 128(%rsp), %rdx # 8-byte Folded Reload + leaq 272(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 272(%rsp), %r15 + movq 144(%rsp), %rcx # 8-byte Reload + adcq 280(%rsp), %rcx + movq 184(%rsp), %rax # 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq 312(%rsp), %r13 + movq %r13, 176(%rsp) # 8-byte Spill + adcq 320(%rsp), %r12 + movq %r12, 168(%rsp) # 8-byte Spill + adcq 328(%rsp), %r14 + movq %r14, %r13 + adcq 336(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rbx + movq %rbx, %r14 + movq 80(%rsp), %r15 # 8-byte Reload + adcq $0, %r15 + movq 128(%rsp), %rdx # 8-byte Reload + movq %rcx, %rbx + imulq %rbx, %rdx + leaq 200(%rsp), %rdi + movq 112(%rsp), %rsi # 8-byte Reload + callq .LmulPv512x64 + addq 200(%rsp), %rbx + movq 184(%rsp), %rax # 8-byte Reload + adcq 208(%rsp), %rax + movq %rax, 184(%rsp) # 8-byte Spill + movq 192(%rsp), %r8 # 8-byte Reload + adcq 216(%rsp), %r8 + movq %r8, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rdx # 8-byte Reload + adcq 224(%rsp), %rdx + movq 176(%rsp), %rsi # 8-byte Reload + adcq 232(%rsp), %rsi + movq 168(%rsp), %rdi # 8-byte Reload + adcq 240(%rsp), %rdi + movq %r13, %rbp + adcq 248(%rsp), %rbp + movq %r12, %rbx + adcq 256(%rsp), %rbx + movq %rbx, 136(%rsp) # 8-byte Spill + movq %r14, %r9 + adcq 264(%rsp), %r9 + adcq $0, %r15 + movq %r15, %r10 + subq 16(%rsp), %rax # 8-byte Folded Reload + movq %r8, %rcx + sbbq 8(%rsp), %rcx # 8-byte Folded Reload + movq %rdx, %r13 + sbbq 24(%rsp), %r13 # 8-byte Folded Reload + movq %rsi, %r12 + sbbq 32(%rsp), %r12 # 8-byte Folded Reload + movq %rdi, %r14 + sbbq 40(%rsp), %r14 # 8-byte Folded Reload + movq %rbp, %r11 + sbbq 48(%rsp), %r11 # 8-byte Folded Reload + movq %rbx, %r8 + sbbq 56(%rsp), %r8 # 8-byte Folded Reload + movq %r9, %r15 + sbbq 64(%rsp), %r9 # 8-byte Folded Reload + sbbq $0, %r10 + andl $1, %r10d + cmovneq %r15, %r9 + testb %r10b, %r10b + cmovneq 184(%rsp), %rax # 8-byte Folded Reload + movq 72(%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovneq 192(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rbx) + cmovneq %rdx, %r13 + movq %r13, 16(%rbx) + cmovneq %rsi, %r12 + movq %r12, 24(%rbx) + cmovneq %rdi, %r14 + movq %r14, 32(%rbx) + cmovneq %rbp, %r11 + movq %r11, 40(%rbx) + cmovneq 136(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $776, %rsp # imm = 0x308 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end116: + .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L + + .globl mcl_fp_addPre8L + .align 16, 0x90 + .type mcl_fp_addPre8L,@function +mcl_fp_addPre8L: # @mcl_fp_addPre8L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 48(%rsi), %r12 + movq 40(%rdx), %r10 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rax + movq 32(%rsi), %rsi + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %rax + movq %rax, 24(%rdi) + adcq %r11, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %r13 + movq %r13, 40(%rdi) + adcq %r9, %r12 + movq %r12, 48(%rdi) + adcq %r8, %r15 + movq %r15, 56(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end117: + .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L + + .globl mcl_fp_subPre8L + .align 16, 0x90 + .type mcl_fp_subPre8L,@function +mcl_fp_subPre8L: # @mcl_fp_subPre8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 40(%rdx), %r10 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 48(%rsi), %r13 + movq 40(%rsi), %rdx + movq 32(%rsi), %rbp + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rbp + movq %rbp, 32(%rdi) + sbbq %r10, %rdx + movq %rdx, 40(%rdi) + sbbq %r9, %r13 + movq %r13, 48(%rdi) + sbbq %r8, %r15 + movq %r15, 56(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end118: + .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L + + .globl mcl_fp_shr1_8L + .align 16, 0x90 + .type mcl_fp_shr1_8L,@function +mcl_fp_shr1_8L: # @mcl_fp_shr1_8L +# BB#0: + movq 56(%rsi), %r8 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 32(%rsi), %r11 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 24(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 32(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 40(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 48(%rdi) + shrq %r8 + movq %r8, 56(%rdi) + retq +.Lfunc_end119: + .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L + + .globl mcl_fp_add8L + .align 16, 0x90 + .type mcl_fp_add8L,@function +mcl_fp_add8L: # @mcl_fp_add8L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r15 + movq 56(%rsi), %r8 + movq 48(%rdx), %r12 + movq 48(%rsi), %r9 + movq 40(%rsi), %r13 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %rbx + addq (%rsi), %r14 + adcq 8(%rsi), %rbx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r11 + movq 40(%rdx), %rsi + adcq 32(%rdx), %r10 + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + adcq %r13, %rsi + movq %rsi, 40(%rdi) + adcq %r12, %r9 + movq %r9, 48(%rdi) + adcq %r15, %r8 + movq %r8, 56(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %r14 + sbbq 8(%rcx), %rbx + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r11 + sbbq 32(%rcx), %r10 + sbbq 40(%rcx), %rsi + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne .LBB120_2 +# BB#1: # %nocarry + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + movq %rsi, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) +.LBB120_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end120: + .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L + + .globl mcl_fp_addNF8L + .align 16, 0x90 + .type mcl_fp_addNF8L,@function +mcl_fp_addNF8L: # @mcl_fp_addNF8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 48(%rdx), %rbp + movq 40(%rdx), %rbx + movq 32(%rdx), %rax + movq 24(%rdx), %r11 + movq 16(%rdx), %r15 + movq (%rdx), %r13 + movq 8(%rdx), %r12 + addq (%rsi), %r13 + adcq 8(%rsi), %r12 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq %rax, %r10 + adcq 40(%rsi), %rbx + movq %rbx, -16(%rsp) # 8-byte Spill + movq %rbx, %r9 + adcq 48(%rsi), %rbp + movq %rbp, -8(%rsp) # 8-byte Spill + movq %rbp, %rax + adcq 56(%rsi), %r8 + movq %r13, %rsi + subq (%rcx), %rsi + movq %r12, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %rbx + sbbq 16(%rcx), %rbx + movq %r11, %r14 + sbbq 24(%rcx), %r14 + movq %r10, %rbp + sbbq 32(%rcx), %rbp + movq %r9, %r10 + sbbq 40(%rcx), %r10 + movq %rax, %r9 + sbbq 48(%rcx), %r9 + movq %r8, %rax + sbbq 56(%rcx), %rax + testq %rax, %rax + cmovsq %r13, %rsi + movq %rsi, (%rdi) + cmovsq %r12, %rdx + movq %rdx, 8(%rdi) + cmovsq %r15, %rbx + movq %rbx, 16(%rdi) + cmovsq %r11, %r14 + movq %r14, 24(%rdi) + cmovsq -24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 32(%rdi) + cmovsq -16(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rdi) + cmovsq -8(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rdi) + cmovsq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end121: + .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L + + .globl mcl_fp_sub8L + .align 16, 0x90 + .type mcl_fp_sub8L,@function +mcl_fp_sub8L: # @mcl_fp_sub8L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r12 + movq 56(%rsi), %r8 + movq 48(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r10 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r10 + movq 16(%rsi), %r11 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %r15 + sbbq 24(%rdx), %r15 + movq 32(%rsi), %r14 + sbbq 32(%rdx), %r14 + movq 48(%rsi), %r9 + movq 40(%rsi), %rsi + sbbq 40(%rdx), %rsi + movq %rax, (%rdi) + movq %r10, 8(%rdi) + movq %r11, 16(%rdi) + movq %r15, 24(%rdi) + movq %r14, 32(%rdi) + movq %rsi, 40(%rdi) + sbbq %r13, %r9 + movq %r9, 48(%rdi) + sbbq %r12, %r8 + movq %r8, 56(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB122_2 +# BB#1: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r10, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r11, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r15, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r14, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %rsi, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r9, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %r8, %rax + movq %rax, 56(%rdi) +.LBB122_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end122: + .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L + + .globl mcl_fp_subNF8L + .align 16, 0x90 + .type mcl_fp_subNF8L,@function +mcl_fp_subNF8L: # @mcl_fp_subNF8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r9 + movq 56(%rsi), %r14 + movq 48(%rsi), %rax + movq 40(%rsi), %rcx + movq 32(%rsi), %rdi + movq 24(%rsi), %r11 + movq 16(%rsi), %r15 + movq (%rsi), %r13 + movq 8(%rsi), %r12 + subq (%rdx), %r13 + sbbq 8(%rdx), %r12 + sbbq 16(%rdx), %r15 + sbbq 24(%rdx), %r11 + sbbq 32(%rdx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + sbbq 40(%rdx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + sbbq 48(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 56(%rdx), %r14 + movq %r14, %rsi + sarq $63, %rsi + movq 56(%r8), %r10 + andq %rsi, %r10 + movq 48(%r8), %rbx + andq %rsi, %rbx + movq 40(%r8), %rdi + andq %rsi, %rdi + movq 32(%r8), %rbp + andq %rsi, %rbp + movq 24(%r8), %rdx + andq %rsi, %rdx + movq 16(%r8), %rcx + andq %rsi, %rcx + movq 8(%r8), %rax + andq %rsi, %rax + andq (%r8), %rsi + addq %r13, %rsi + adcq %r12, %rax + movq %rsi, (%r9) + adcq %r15, %rcx + movq %rax, 8(%r9) + movq %rcx, 16(%r9) + adcq %r11, %rdx + movq %rdx, 24(%r9) + adcq -24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 32(%r9) + adcq -16(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 40(%r9) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 48(%r9) + adcq %r14, %r10 + movq %r10, 56(%r9) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end123: + .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L + + .globl mcl_fpDbl_add8L + .align 16, 0x90 + .type mcl_fpDbl_add8L,@function +mcl_fpDbl_add8L: # @mcl_fpDbl_add8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 96(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r11 + movq 88(%rdx), %rbp + movq 80(%rdx), %r13 + movq %rbx, (%rdi) + movq 72(%rdx), %r10 + movq %rax, 8(%rdi) + movq 64(%rdx), %r9 + movq %r12, 16(%rdi) + movq 40(%rdx), %r12 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %r12, %rbx + movq 56(%rdx), %r15 + movq 48(%rdx), %r12 + movq %r11, 32(%rdi) + movq 48(%rsi), %rdx + adcq %r12, %rdx + movq 120(%rsi), %r12 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rax + adcq %r15, %rax + movq 112(%rsi), %rcx + movq %rdx, 48(%rdi) + movq 64(%rsi), %rbx + adcq %r9, %rbx + movq 104(%rsi), %rdx + movq %rax, 56(%rdi) + movq 72(%rsi), %r9 + adcq %r10, %r9 + movq 80(%rsi), %r11 + adcq %r13, %r11 + movq 96(%rsi), %rax + movq 88(%rsi), %r15 + adcq %rbp, %r15 + adcq %r14, %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq %rdx, %rax + adcq -32(%rsp), %rax # 8-byte Folded Reload + movq %rax, -32(%rsp) # 8-byte Spill + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -24(%rsp) # 8-byte Spill + adcq -8(%rsp), %r12 # 8-byte Folded Reload + movq %r12, -8(%rsp) # 8-byte Spill + sbbq %rbp, %rbp + andl $1, %ebp + movq %rbx, %rsi + subq (%r8), %rsi + movq %r9, %rdx + sbbq 8(%r8), %rdx + movq %r11, %r10 + sbbq 16(%r8), %r10 + movq %r15, %r14 + sbbq 24(%r8), %r14 + movq -16(%rsp), %r13 # 8-byte Reload + sbbq 32(%r8), %r13 + movq %rax, %r12 + sbbq 40(%r8), %r12 + movq %rcx, %rax + sbbq 48(%r8), %rax + movq -8(%rsp), %rcx # 8-byte Reload + sbbq 56(%r8), %rcx + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rbx, %rsi + movq %rsi, 64(%rdi) + testb %bpl, %bpl + cmovneq %r9, %rdx + movq %rdx, 72(%rdi) + cmovneq %r11, %r10 + movq %r10, 80(%rdi) + cmovneq %r15, %r14 + movq %r14, 88(%rdi) + cmovneq -16(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 96(%rdi) + cmovneq -32(%rsp), %r12 # 8-byte Folded Reload + movq %r12, 104(%rdi) + cmovneq -24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 112(%rdi) + cmovneq -8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end124: + .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L + + .globl mcl_fpDbl_sub8L + .align 16, 0x90 + .type mcl_fpDbl_sub8L,@function +mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %r9 + movq (%rsi), %r12 + movq 8(%rsi), %r14 + xorl %r8d, %r8d + subq (%rdx), %r12 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r9 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r13 + sbbq 32(%rdx), %r13 + movq 96(%rdx), %rbp + movq 88(%rdx), %r11 + movq %r12, (%rdi) + movq 80(%rdx), %r12 + movq %r14, 8(%rdi) + movq 72(%rdx), %r10 + movq %r9, 16(%rdi) + movq 40(%rdx), %r9 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r9, %rbx + movq 48(%rdx), %r9 + movq %r13, 32(%rdi) + movq 48(%rsi), %r14 + sbbq %r9, %r14 + movq 64(%rdx), %r13 + movq 56(%rdx), %r9 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r9, %rdx + movq 120(%rsi), %rcx + movq %r14, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r13, %rbx + movq 112(%rsi), %rax + movq %rdx, 56(%rdi) + movq 72(%rsi), %r9 + sbbq %r10, %r9 + movq 80(%rsi), %r13 + sbbq %r12, %r13 + movq 88(%rsi), %r12 + sbbq %r11, %r12 + movq 104(%rsi), %rdx + movq 96(%rsi), %r14 + sbbq %rbp, %r14 + sbbq -24(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -24(%rsp) # 8-byte Spill + sbbq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, -16(%rsp) # 8-byte Spill + sbbq -8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -8(%rsp) # 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r15), %r11 + cmoveq %r8, %r11 + testb %bpl, %bpl + movq 16(%r15), %rbp + cmoveq %r8, %rbp + movq 8(%r15), %rsi + cmoveq %r8, %rsi + movq 56(%r15), %r10 + cmoveq %r8, %r10 + movq 48(%r15), %rdx + cmoveq %r8, %rdx + movq 40(%r15), %rcx + cmoveq %r8, %rcx + movq 32(%r15), %rax + cmoveq %r8, %rax + cmovneq 24(%r15), %r8 + addq %rbx, %r11 + adcq %r9, %rsi + movq %r11, 64(%rdi) + adcq %r13, %rbp + movq %rsi, 72(%rdi) + movq %rbp, 80(%rdi) + adcq %r12, %r8 + movq %r8, 88(%rdi) + adcq %r14, %rax + movq %rax, 96(%rdi) + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 104(%rdi) + adcq -16(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 112(%rdi) + adcq -8(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end125: + .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L + + .align 16, 0x90 + .type .LmulPv576x64,@function +.LmulPv576x64: # @mulPv576x64 +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rbx + movq %rbx, %rax + mulq (%rsi) + movq %rdx, -32(%rsp) # 8-byte Spill + movq %rax, (%rdi) + movq %rbx, %rax + mulq 64(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 56(%rsi) + movq %rdx, %r14 + movq %rax, -16(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 48(%rsi) + movq %rdx, %r12 + movq %rax, -24(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 40(%rsi) + movq %rdx, %rcx + movq %rax, -40(%rsp) # 8-byte Spill + movq %rbx, %rax + mulq 32(%rsi) + movq %rdx, %rbp + movq %rax, %r8 + movq %rbx, %rax + mulq 24(%rsi) + movq %rdx, %r9 + movq %rax, %r11 + movq %rbx, %rax + mulq 16(%rsi) + movq %rdx, %r15 + movq %rax, %r13 + movq %rbx, %rax + mulq 8(%rsi) + addq -32(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rdi) + adcq %r13, %rdx + movq %rdx, 16(%rdi) + adcq %r11, %r15 + movq %r15, 24(%rdi) + adcq %r8, %r9 + movq %r9, 32(%rdi) + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 40(%rdi) + adcq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 48(%rdi) + adcq -16(%rsp), %r12 # 8-byte Folded Reload + movq %r12, 56(%rdi) + adcq -8(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 64(%rdi) + adcq $0, %r10 + movq %r10, 72(%rdi) + movq %rdi, %rax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end126: + .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 + + .globl mcl_fp_mulUnitPre9L + .align 16, 0x90 + .type mcl_fp_mulUnitPre9L,@function +mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L +# BB#0: + pushq %r14 + pushq %rbx + subq $88, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq .LmulPv576x64 + movq 80(%rsp), %r8 + movq 72(%rsp), %r9 + movq 64(%rsp), %r10 + movq 56(%rsp), %r11 + movq 48(%rsp), %r14 + movq 40(%rsp), %rax + movq 32(%rsp), %rcx + movq 24(%rsp), %rdx + movq 8(%rsp), %rsi + movq 16(%rsp), %rdi + movq %rsi, (%rbx) + movq %rdi, 8(%rbx) + movq %rdx, 16(%rbx) + movq %rcx, 24(%rbx) + movq %rax, 32(%rbx) + movq %r14, 40(%rbx) + movq %r11, 48(%rbx) + movq %r10, 56(%rbx) + movq %r9, 64(%rbx) + movq %r8, 72(%rbx) + addq $88, %rsp + popq %rbx + popq %r14 + retq +.Lfunc_end127: + .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L + + .globl mcl_fpDbl_mulPre9L + .align 16, 0x90 + .type mcl_fpDbl_mulPre9L,@function +mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp # imm = 0x328 + movq %rdx, %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq %rsi, 72(%rsp) # 8-byte Spill + movq %rdi, %r12 + movq %r12, 80(%rsp) # 8-byte Spill + movq (%rax), %rdx + movq %rax, %rbx + leaq 728(%rsp), %rdi + movq %rsi, %rbp + callq .LmulPv576x64 + movq 800(%rsp), %r13 + movq 792(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r14 + movq %rax, (%r12) + movq 8(%rbx), %rdx + leaq 648(%rsp), %rdi + movq %rbp, %rsi + callq .LmulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r14 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r15 + movq %r14, 8(%r12) + adcq 8(%rsp), %rbx # 8-byte Folded Reload + adcq 16(%rsp), %r15 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, %r14 + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq %r13, %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %r13 # 8-byte Reload + movq 16(%r13), %rdx + leaq 568(%rsp), %rdi + movq 72(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %r9 + movq 624(%rsp), %r10 + movq 616(%rsp), %rdi + movq 608(%rsp), %rbp + movq 600(%rsp), %rcx + addq 568(%rsp), %rbx + movq 592(%rsp), %rdx + movq 576(%rsp), %r12 + movq 584(%rsp), %rsi + movq 80(%rsp), %rax # 8-byte Reload + movq %rbx, 16(%rax) + adcq %r15, %r12 + adcq %r14, %rsi + movq %rsi, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 24(%r13), %rdx + leaq 488(%rsp), %rdi + movq 72(%rsp), %r15 # 8-byte Reload + movq %r15, %rsi + callq .LmulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r12 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq 80(%rsp), %r14 # 8-byte Reload + movq %r12, 24(%r14) + adcq (%rsp), %rbx # 8-byte Folded Reload + adcq 8(%rsp), %r13 # 8-byte Folded Reload + adcq 16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + movq 32(%r12), %rdx + leaq 408(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %r9 + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r15 + movq 424(%rsp), %rcx + movq %rbx, 32(%r14) + adcq %r13, %r15 + adcq 8(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq %r12, %r14 + movq 40(%r14), %rdx + leaq 328(%rsp), %rdi + movq 72(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %r9 + movq 384(%rsp), %rsi + movq 376(%rsp), %rdi + movq 368(%rsp), %rbx + movq 360(%rsp), %rbp + addq 328(%rsp), %r15 + movq 352(%rsp), %rcx + movq 336(%rsp), %r12 + movq 344(%rsp), %rdx + movq 80(%rsp), %rax # 8-byte Reload + movq %r15, 40(%rax) + adcq (%rsp), %r12 # 8-byte Folded Reload + adcq 8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 48(%r14), %rdx + leaq 248(%rsp), %rdi + movq %r13, %rsi + movq %r13, %r15 + callq .LmulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %r9 + movq 304(%rsp), %rsi + movq 296(%rsp), %rdi + movq 288(%rsp), %rbx + movq 280(%rsp), %rbp + addq 248(%rsp), %r12 + movq 272(%rsp), %rcx + movq 256(%rsp), %r13 + movq 264(%rsp), %rdx + movq 80(%rsp), %rax # 8-byte Reload + movq %r12, 48(%rax) + adcq (%rsp), %r13 # 8-byte Folded Reload + adcq 8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, (%rsp) # 8-byte Spill + adcq 16(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 8(%rsp) # 8-byte Spill + adcq 24(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq 56(%r14), %rdx + leaq 168(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 240(%rsp), %rcx + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + addq 168(%rsp), %r13 + movq 200(%rsp), %r12 + movq 192(%rsp), %rbp + movq 176(%rsp), %r14 + movq 184(%rsp), %r15 + movq 80(%rsp), %rax # 8-byte Reload + movq %r13, 56(%rax) + adcq (%rsp), %r14 # 8-byte Folded Reload + adcq 8(%rsp), %r15 # 8-byte Folded Reload + adcq 16(%rsp), %rbp # 8-byte Folded Reload + adcq 24(%rsp), %r12 # 8-byte Folded Reload + adcq 32(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %r13 + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + movq 64(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 88(%rsp), %rdi + movq 72(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 88(%rsp), %r14 + adcq 96(%rsp), %r15 + movq 160(%rsp), %r8 + adcq 104(%rsp), %rbp + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 80(%rsp), %rcx # 8-byte Reload + movq %r14, 64(%rcx) + movq %r15, 72(%rcx) + adcq %r12, %rax + movq %rbp, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r13, %rbx + movq %rbx, 96(%rcx) + adcq 32(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 40(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 48(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 56(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp # imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end128: + .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L + + .globl mcl_fpDbl_sqrPre9L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre9L,@function +mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp # imm = 0x328 + movq %rsi, %r15 + movq %r15, 80(%rsp) # 8-byte Spill + movq %rdi, %r14 + movq %r14, 72(%rsp) # 8-byte Spill + movq (%r15), %rdx + leaq 728(%rsp), %rdi + callq .LmulPv576x64 + movq 800(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 792(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r12 + movq %rax, (%r14) + movq 8(%r15), %rdx + leaq 648(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r12 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r13 + movq %r12, 8(%r14) + adcq 8(%rsp), %rbx # 8-byte Folded Reload + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 16(%r15), %rdx + leaq 568(%rsp), %rdi + movq %r15, %rsi + callq .LmulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %rcx + movq 624(%rsp), %rdx + movq 616(%rsp), %rsi + movq 608(%rsp), %rdi + movq 600(%rsp), %rbp + addq 568(%rsp), %rbx + movq 592(%rsp), %rax + movq 576(%rsp), %r14 + movq 584(%rsp), %r12 + movq 72(%rsp), %r15 # 8-byte Reload + movq %rbx, 16(%r15) + adcq %r13, %r14 + adcq 16(%rsp), %r12 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 24(%rsi), %rdx + leaq 488(%rsp), %rdi + callq .LmulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r14 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq %r14, 24(%r15) + adcq %r12, %rbx + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 32(%rsi), %rdx + leaq 408(%rsp), %rdi + callq .LmulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %rcx + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r14 + movq 424(%rsp), %r12 + movq %rbx, 32(%r15) + adcq %r13, %r14 + adcq 16(%rsp), %r12 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 40(%rsi), %rdx + leaq 328(%rsp), %rdi + callq .LmulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %rcx + movq 384(%rsp), %rdx + movq 376(%rsp), %rsi + movq 368(%rsp), %rdi + movq 360(%rsp), %rbp + addq 328(%rsp), %r14 + movq 352(%rsp), %rax + movq 336(%rsp), %rbx + movq 344(%rsp), %r13 + movq %r14, 40(%r15) + adcq %r12, %rbx + adcq 16(%rsp), %r13 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 48(%rsi), %rdx + leaq 248(%rsp), %rdi + callq .LmulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %rcx + movq 304(%rsp), %rdx + movq 296(%rsp), %rsi + movq 288(%rsp), %rdi + movq 280(%rsp), %rbp + addq 248(%rsp), %rbx + movq 272(%rsp), %rax + movq 256(%rsp), %r12 + movq 264(%rsp), %r14 + movq %rbx, 48(%r15) + adcq %r13, %r12 + adcq 16(%rsp), %r14 # 8-byte Folded Reload + adcq 24(%rsp), %rax # 8-byte Folded Reload + movq %rax, 16(%rsp) # 8-byte Spill + adcq 32(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%rsp) # 8-byte Spill + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rsp) # 8-byte Spill + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 56(%rsi), %rdx + leaq 168(%rsp), %rdi + callq .LmulPv576x64 + movq 240(%rsp), %r8 + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + movq 200(%rsp), %rcx + addq 168(%rsp), %r12 + movq 192(%rsp), %r15 + movq 176(%rsp), %r13 + movq 184(%rsp), %rbp + movq 72(%rsp), %rax # 8-byte Reload + movq %r12, 56(%rax) + adcq %r14, %r13 + adcq 16(%rsp), %rbp # 8-byte Folded Reload + adcq 24(%rsp), %r15 # 8-byte Folded Reload + adcq 32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, %r12 + adcq 40(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, %r14 + adcq 48(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 40(%rsp) # 8-byte Spill + adcq 56(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 48(%rsp) # 8-byte Spill + adcq 64(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 56(%rsp) # 8-byte Spill + adcq $0, %r8 + movq %r8, 64(%rsp) # 8-byte Spill + movq 80(%rsp), %rsi # 8-byte Reload + movq 64(%rsi), %rdx + leaq 88(%rsp), %rdi + callq .LmulPv576x64 + addq 88(%rsp), %r13 + adcq 96(%rsp), %rbp + movq 160(%rsp), %r8 + adcq 104(%rsp), %r15 + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 72(%rsp), %rcx # 8-byte Reload + movq %r13, 64(%rcx) + movq %rbp, 72(%rcx) + adcq %r12, %rax + movq %r15, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r14, %rbx + movq %rbx, 96(%rcx) + adcq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 48(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 56(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 64(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp # imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L + + .globl mcl_fp_mont9L + .align 16, 0x90 + .type mcl_fp_mont9L,@function +mcl_fp_mont9L: # @mcl_fp_mont9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp # imm = 0x618 + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rdx, 32(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 16(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq .LmulPv576x64 + movq 1480(%rsp), %r14 + movq 1488(%rsp), %r15 + movq %r14, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 1544(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 1536(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 1528(%rsp), %r12 + movq 1520(%rsp), %r13 + movq 1512(%rsp), %rbx + movq 1504(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1400(%rsp), %r14 + adcq 1408(%rsp), %r15 + adcq 1416(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rax # 8-byte Reload + adcq 1424(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + adcq 1432(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + adcq 1440(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + adcq 1448(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %rbx # 8-byte Reload + adcq 1456(%rsp), %rbx + movq 104(%rsp), %r14 # 8-byte Reload + adcq 1464(%rsp), %r14 + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1472(%rsp), %r13 + sbbq %rbp, %rbp + movq 32(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebp + addq 1320(%rsp), %r15 + movq 96(%rsp), %rax # 8-byte Reload + adcq 1328(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rax # 8-byte Reload + adcq 1336(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r12 # 8-byte Reload + adcq 1344(%rsp), %r12 + movq 64(%rsp), %rax # 8-byte Reload + adcq 1352(%rsp), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %rax # 8-byte Reload + adcq 1360(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + adcq 1368(%rsp), %rbx + adcq 1376(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 1384(%rsp), %r13 + movq %r13, 112(%rsp) # 8-byte Spill + adcq 1392(%rsp), %rbp + sbbq %r14, %r14 + movq %r15, %rdx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq %r14, %rax + andl $1, %eax + addq 1240(%rsp), %r15 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 1248(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r14 # 8-byte Reload + adcq 1256(%rsp), %r14 + adcq 1264(%rsp), %r12 + movq %r12, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + adcq 1272(%rsp), %r12 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 1280(%rsp), %r13 + adcq 1288(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r15 # 8-byte Reload + adcq 1296(%rsp), %r15 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 1304(%rsp), %rbx + adcq 1312(%rsp), %rbp + adcq $0, %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 96(%rsp), %rax # 8-byte Reload + addq 1160(%rsp), %rax + adcq 1168(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r14 # 8-byte Reload + adcq 1176(%rsp), %r14 + adcq 1184(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + movq %r13, %r12 + adcq 1192(%rsp), %r12 + movq 88(%rsp), %rcx # 8-byte Reload + adcq 1200(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, %r13 + adcq 1216(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq 1224(%rsp), %rbp + movq 72(%rsp), %rcx # 8-byte Reload + adcq 1232(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq %r15, %rax + andl $1, %eax + addq 1080(%rsp), %rbx + movq 80(%rsp), %rcx # 8-byte Reload + adcq 1088(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + movq %r14, %r15 + adcq 1096(%rsp), %r15 + movq 64(%rsp), %r14 # 8-byte Reload + adcq 1104(%rsp), %r14 + movq %r12, %rbx + adcq 1112(%rsp), %rbx + movq 88(%rsp), %rcx # 8-byte Reload + adcq 1120(%rsp), %rcx + movq %rcx, 88(%rsp) # 8-byte Spill + adcq 1128(%rsp), %r13 + movq %r13, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1136(%rsp), %r13 + adcq 1144(%rsp), %rbp + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1152(%rsp), %r12 + adcq $0, %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 80(%rsp), %rax # 8-byte Reload + addq 1000(%rsp), %rax + adcq 1008(%rsp), %r15 + movq %r15, 40(%rsp) # 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, %r15 + adcq 1024(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %r14 # 8-byte Reload + adcq 1032(%rsp), %r14 + movq 104(%rsp), %rcx # 8-byte Reload + adcq 1040(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + adcq 1048(%rsp), %r13 + movq %r13, 112(%rsp) # 8-byte Spill + adcq 1056(%rsp), %rbp + adcq 1064(%rsp), %r12 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 920(%rsp), %r13 + movq 40(%rsp), %rcx # 8-byte Reload + adcq 928(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 936(%rsp), %r15 + movq %r15, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 944(%rsp), %r15 + movq %r14, %r13 + adcq 952(%rsp), %r13 + movq 104(%rsp), %r14 # 8-byte Reload + adcq 960(%rsp), %r14 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 968(%rsp), %rbx + adcq 976(%rsp), %rbp + adcq 984(%rsp), %r12 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 992(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 40(%rsp), %rax # 8-byte Reload + addq 840(%rsp), %rax + movq 64(%rsp), %rcx # 8-byte Reload + adcq 848(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 856(%rsp), %r15 + adcq 864(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 872(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 880(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq 888(%rsp), %rbp + adcq 896(%rsp), %r12 + movq 96(%rsp), %r13 # 8-byte Reload + adcq 904(%rsp), %r13 + movq 80(%rsp), %rcx # 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 80(%rsp) # 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r14 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 760(%rsp), %r14 + movq 64(%rsp), %rcx # 8-byte Reload + adcq 768(%rsp), %rcx + movq %rcx, 64(%rsp) # 8-byte Spill + adcq 776(%rsp), %r15 + movq 88(%rsp), %r14 # 8-byte Reload + adcq 784(%rsp), %r14 + movq 104(%rsp), %rcx # 8-byte Reload + adcq 792(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rcx # 8-byte Reload + adcq 800(%rsp), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + adcq 808(%rsp), %rbp + movq %r12, %rbx + adcq 816(%rsp), %rbx + movq %r13, %r12 + adcq 824(%rsp), %r12 + movq 80(%rsp), %r13 # 8-byte Reload + adcq 832(%rsp), %r13 + adcq $0, %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 64(%rsp), %rax # 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + adcq 696(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rcx # 8-byte Reload + adcq 704(%rsp), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %r15 # 8-byte Reload + adcq 712(%rsp), %r15 + adcq 720(%rsp), %rbp + adcq 728(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + adcq 744(%rsp), %r13 + movq %r13, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r13 # 8-byte Reload + adcq 752(%rsp), %r13 + sbbq %r14, %r14 + movq %rax, %rdx + movq %rax, %rbx + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r14d + addq 600(%rsp), %rbx + movq 48(%rsp), %rax # 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rbx # 8-byte Reload + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + movq %r15, 112(%rsp) # 8-byte Spill + adcq 640(%rsp), %rbp + movq 72(%rsp), %r12 # 8-byte Reload + adcq 648(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 656(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r15 # 8-byte Reload + adcq 664(%rsp), %r15 + adcq 672(%rsp), %r13 + adcq $0, %r14 + movq %r14, 64(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 48(%rsp), %rax # 8-byte Reload + addq 520(%rsp), %rax + movq 88(%rsp), %r14 # 8-byte Reload + adcq 528(%rsp), %r14 + adcq 536(%rsp), %rbx + movq %rbx, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rcx # 8-byte Reload + adcq 544(%rsp), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + adcq 552(%rsp), %rbp + adcq 560(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %r12 # 8-byte Reload + adcq 568(%rsp), %r12 + adcq 576(%rsp), %r15 + movq %r15, 80(%rsp) # 8-byte Spill + adcq 584(%rsp), %r13 + movq %r13, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r15 # 8-byte Reload + adcq 592(%rsp), %r15 + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 440(%rsp), %r13 + adcq 448(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq 456(%rsp), %r14 + movq 112(%rsp), %rbx # 8-byte Reload + adcq 464(%rsp), %rbx + adcq 472(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + adcq 488(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 496(%rsp), %rbp + movq 40(%rsp), %r12 # 8-byte Reload + adcq 504(%rsp), %r12 + adcq 512(%rsp), %r15 + movq %r15, %r13 + adcq $0, %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 88(%rsp), %rax # 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r14 + adcq 376(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + movq 8(%rsp), %rcx # 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %rbx # 8-byte Reload + adcq 392(%rsp), %rbx + movq 96(%rsp), %r15 # 8-byte Reload + adcq 400(%rsp), %r15 + adcq 408(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + adcq 416(%rsp), %r12 + movq %r12, %rbp + adcq 424(%rsp), %r13 + movq %r13, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) # 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %r12 + imulq 16(%rsp), %rdx # 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r13d + addq 280(%rsp), %r12 + adcq 288(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + movq 112(%rsp), %rax # 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 8(%rsp), %r14 # 8-byte Reload + adcq 304(%rsp), %r14 + adcq 312(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 320(%rsp), %r15 + movq %r15, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %rbx # 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %r12 # 8-byte Reload + adcq 344(%rsp), %r12 + movq 48(%rsp), %rbp # 8-byte Reload + adcq 352(%rsp), %rbp + adcq $0, %r13 + movq 32(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 104(%rsp), %rax # 8-byte Reload + addq 200(%rsp), %rax + movq 112(%rsp), %r15 # 8-byte Reload + adcq 208(%rsp), %r15 + adcq 216(%rsp), %r14 + movq %r14, 8(%rsp) # 8-byte Spill + movq 72(%rsp), %r14 # 8-byte Reload + adcq 224(%rsp), %r14 + movq 96(%rsp), %rcx # 8-byte Reload + adcq 232(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %rcx # 8-byte Reload + adcq 248(%rsp), %rcx + movq %rcx, 40(%rsp) # 8-byte Spill + adcq 256(%rsp), %r12 + movq %r12, 64(%rsp) # 8-byte Spill + adcq 264(%rsp), %rbp + movq %rbp, 48(%rsp) # 8-byte Spill + adcq 272(%rsp), %r13 + sbbq %rbx, %rbx + movq 16(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %r12 + leaq 120(%rsp), %rdi + movq 56(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %ebx + addq 120(%rsp), %r12 + adcq 128(%rsp), %r15 + movq 8(%rsp), %rbp # 8-byte Reload + adcq 136(%rsp), %rbp + movq %r14, %rcx + adcq 144(%rsp), %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %r8 # 8-byte Reload + adcq 152(%rsp), %r8 + movq %r8, 96(%rsp) # 8-byte Spill + movq 80(%rsp), %r9 # 8-byte Reload + adcq 160(%rsp), %r9 + movq %r9, 80(%rsp) # 8-byte Spill + movq 40(%rsp), %r10 # 8-byte Reload + adcq 168(%rsp), %r10 + movq %r10, 40(%rsp) # 8-byte Spill + movq 64(%rsp), %rdi # 8-byte Reload + adcq 176(%rsp), %rdi + movq %rdi, 64(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r13 + adcq $0, %rbx + movq %r15, %rsi + movq %r15, %r12 + movq 56(%rsp), %rdx # 8-byte Reload + subq (%rdx), %rsi + movq %rbp, %rax + movq %rbp, %r15 + sbbq 8(%rdx), %rax + movq %rcx, %rbp + sbbq 16(%rdx), %rbp + movq %r8, %rcx + sbbq 24(%rdx), %rcx + movq %r9, %r8 + sbbq 32(%rdx), %r8 + movq %r10, %r11 + sbbq 40(%rdx), %r11 + movq %rdi, %r10 + sbbq 48(%rdx), %r10 + movq %r14, %rdi + sbbq 56(%rdx), %rdi + movq %r13, %r9 + sbbq 64(%rdx), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r13, %r9 + testb %bl, %bl + cmovneq %r12, %rsi + movq (%rsp), %rbx # 8-byte Reload + movq %rsi, (%rbx) + cmovneq %r15, %rax + movq %rax, 8(%rbx) + cmovneq 72(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rbx) + cmovneq 96(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 24(%rbx) + cmovneq 80(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 32(%rbx) + cmovneq 40(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 40(%rbx) + cmovneq 64(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 48(%rbx) + cmovneq %r14, %rdi + movq %rdi, 56(%rbx) + movq %r9, 64(%rbx) + addq $1560, %rsp # imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end130: + .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L + + .globl mcl_fp_montNF9L + .align 16, 0x90 + .type mcl_fp_montNF9L,@function +mcl_fp_montNF9L: # @mcl_fp_montNF9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp # imm = 0x618 + movq %rcx, 64(%rsp) # 8-byte Spill + movq %rdx, 16(%rsp) # 8-byte Spill + movq %rsi, 24(%rsp) # 8-byte Spill + movq %rdi, (%rsp) # 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 32(%rsp) # 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq .LmulPv576x64 + movq 1480(%rsp), %r12 + movq 1488(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq %r12, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + movq 1544(%rsp), %r13 + movq 1536(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 1528(%rsp), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 1520(%rsp), %r14 + movq 1512(%rsp), %r15 + movq 1504(%rsp), %rbx + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1400(%rsp), %r12 + movq 88(%rsp), %rax # 8-byte Reload + adcq 1408(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + adcq 1416(%rsp), %rbp + movq %rbp, 8(%rsp) # 8-byte Spill + adcq 1424(%rsp), %rbx + movq %rbx, 104(%rsp) # 8-byte Spill + adcq 1432(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + adcq 1440(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %rbx # 8-byte Reload + adcq 1448(%rsp), %rbx + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1456(%rsp), %r12 + adcq 1464(%rsp), %r13 + movq %r13, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbp # 8-byte Reload + adcq 1472(%rsp), %rbp + movq 16(%rsp), %rax # 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1392(%rsp), %rax + movq 88(%rsp), %rcx # 8-byte Reload + addq 1320(%rsp), %rcx + movq 8(%rsp), %r15 # 8-byte Reload + adcq 1328(%rsp), %r15 + movq 104(%rsp), %r14 # 8-byte Reload + adcq 1336(%rsp), %r14 + movq 56(%rsp), %rdx # 8-byte Reload + adcq 1344(%rsp), %rdx + movq %rdx, 56(%rsp) # 8-byte Spill + movq 40(%rsp), %r13 # 8-byte Reload + adcq 1352(%rsp), %r13 + adcq 1360(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 1368(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq 96(%rsp), %rdx # 8-byte Reload + adcq 1376(%rsp), %rdx + movq %rdx, 96(%rsp) # 8-byte Spill + adcq 1384(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, %rbp + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1240(%rsp), %rbx + adcq 1248(%rsp), %r15 + movq %r15, 8(%rsp) # 8-byte Spill + adcq 1256(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + movq 56(%rsp), %r12 # 8-byte Reload + adcq 1264(%rsp), %r12 + adcq 1272(%rsp), %r13 + movq %r13, %r14 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 1280(%rsp), %r13 + movq 72(%rsp), %rbx # 8-byte Reload + adcq 1288(%rsp), %rbx + movq 96(%rsp), %r15 # 8-byte Reload + adcq 1296(%rsp), %r15 + movq 112(%rsp), %rax # 8-byte Reload + adcq 1304(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + adcq 1312(%rsp), %rbp + movq %rbp, 80(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1232(%rsp), %rax + movq 8(%rsp), %rcx # 8-byte Reload + addq 1160(%rsp), %rcx + movq 104(%rsp), %rbp # 8-byte Reload + adcq 1168(%rsp), %rbp + adcq 1176(%rsp), %r12 + movq %r12, 56(%rsp) # 8-byte Spill + adcq 1184(%rsp), %r14 + adcq 1192(%rsp), %r13 + movq %r13, %r12 + adcq 1200(%rsp), %rbx + movq %rbx, 72(%rsp) # 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbx # 8-byte Reload + adcq 1216(%rsp), %rbx + movq 80(%rsp), %rdx # 8-byte Reload + adcq 1224(%rsp), %rdx + movq %rdx, 80(%rsp) # 8-byte Spill + movq %rax, %r15 + adcq $0, %r15 + movq %rcx, %rdx + movq %rcx, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 1080(%rsp), %r13 + adcq 1088(%rsp), %rbp + movq %rbp, 104(%rsp) # 8-byte Spill + movq 56(%rsp), %r13 # 8-byte Reload + adcq 1096(%rsp), %r13 + adcq 1104(%rsp), %r14 + adcq 1112(%rsp), %r12 + movq %r12, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %r12 # 8-byte Reload + adcq 1120(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 1128(%rsp), %rbp + adcq 1136(%rsp), %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %rbx # 8-byte Reload + adcq 1144(%rsp), %rbx + adcq 1152(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 1072(%rsp), %rax + movq 104(%rsp), %rcx # 8-byte Reload + addq 1000(%rsp), %rcx + adcq 1008(%rsp), %r13 + movq %r13, 56(%rsp) # 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %r14 # 8-byte Reload + adcq 1024(%rsp), %r14 + adcq 1032(%rsp), %r12 + adcq 1040(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %r13 # 8-byte Reload + adcq 1048(%rsp), %r13 + adcq 1056(%rsp), %rbx + movq %rbx, 80(%rsp) # 8-byte Spill + adcq 1064(%rsp), %r15 + movq %r15, 88(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 920(%rsp), %rbx + movq 56(%rsp), %rax # 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rsp), %rbp # 8-byte Reload + adcq 936(%rsp), %rbp + movq %r14, %rbx + adcq 944(%rsp), %rbx + adcq 952(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + adcq 968(%rsp), %r13 + movq %r13, %r15 + movq 80(%rsp), %r13 # 8-byte Reload + adcq 976(%rsp), %r13 + movq 88(%rsp), %r14 # 8-byte Reload + adcq 984(%rsp), %r14 + movq 104(%rsp), %rax # 8-byte Reload + adcq 992(%rsp), %rax + movq %rax, 104(%rsp) # 8-byte Spill + movq 16(%rsp), %rax # 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 912(%rsp), %rax + movq 56(%rsp), %rcx # 8-byte Reload + addq 840(%rsp), %rcx + adcq 848(%rsp), %rbp + movq %rbp, 40(%rsp) # 8-byte Spill + adcq 856(%rsp), %rbx + movq %rbx, 48(%rsp) # 8-byte Spill + adcq 864(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 872(%rsp), %rbp + adcq 880(%rsp), %r15 + movq %r15, 112(%rsp) # 8-byte Spill + adcq 888(%rsp), %r13 + adcq 896(%rsp), %r14 + movq %r14, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rdx # 8-byte Reload + adcq 904(%rsp), %rdx + movq %rdx, 104(%rsp) # 8-byte Spill + adcq $0, %rax + movq %rax, %r14 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 760(%rsp), %rbx + movq 40(%rsp), %rax # 8-byte Reload + adcq 768(%rsp), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 48(%rsp), %r15 # 8-byte Reload + adcq 776(%rsp), %r15 + adcq 784(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq %rbp, %rbx + adcq 792(%rsp), %rbx + movq 112(%rsp), %rbp # 8-byte Reload + adcq 800(%rsp), %rbp + adcq 808(%rsp), %r13 + movq 88(%rsp), %rax # 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r12 # 8-byte Reload + adcq 824(%rsp), %r12 + adcq 832(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 752(%rsp), %rcx + movq 40(%rsp), %rax # 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %rdx # 8-byte Reload + adcq 696(%rsp), %rdx + movq %rdx, 72(%rsp) # 8-byte Spill + adcq 704(%rsp), %rbx + movq %rbx, 96(%rsp) # 8-byte Spill + adcq 712(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + adcq 720(%rsp), %r13 + movq %r13, %r15 + movq 88(%rsp), %rbx # 8-byte Reload + adcq 728(%rsp), %rbx + adcq 736(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + adcq 744(%rsp), %r14 + movq %r14, 40(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, 56(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r13 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 600(%rsp), %r13 + movq 48(%rsp), %r13 # 8-byte Reload + adcq 608(%rsp), %r13 + movq 72(%rsp), %r12 # 8-byte Reload + adcq 616(%rsp), %r12 + movq 96(%rsp), %rbp # 8-byte Reload + adcq 624(%rsp), %rbp + movq 112(%rsp), %rax # 8-byte Reload + adcq 632(%rsp), %rax + movq %rax, 112(%rsp) # 8-byte Spill + adcq 640(%rsp), %r15 + movq %r15, 80(%rsp) # 8-byte Spill + adcq 648(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq 656(%rsp), %r14 + movq 40(%rsp), %rbx # 8-byte Reload + adcq 664(%rsp), %rbx + movq 56(%rsp), %r15 # 8-byte Reload + adcq 672(%rsp), %r15 + movq 16(%rsp), %rax # 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 592(%rsp), %rcx + movq %r13, %rax + addq 520(%rsp), %rax + adcq 528(%rsp), %r12 + movq %r12, 72(%rsp) # 8-byte Spill + movq %rbp, %r12 + adcq 536(%rsp), %r12 + movq 112(%rsp), %rbp # 8-byte Reload + adcq 544(%rsp), %rbp + movq 80(%rsp), %rdx # 8-byte Reload + adcq 552(%rsp), %rdx + movq %rdx, 80(%rsp) # 8-byte Spill + movq 88(%rsp), %rdx # 8-byte Reload + adcq 560(%rsp), %rdx + movq %rdx, 88(%rsp) # 8-byte Spill + adcq 568(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq 576(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + adcq 584(%rsp), %r15 + movq %r15, 56(%rsp) # 8-byte Spill + adcq $0, %rcx + movq %rcx, %r13 + movq %rax, %rdx + movq %rax, %r14 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 440(%rsp), %r14 + movq 72(%rsp), %rax # 8-byte Reload + adcq 448(%rsp), %rax + movq %rax, 72(%rsp) # 8-byte Spill + adcq 456(%rsp), %r12 + adcq 464(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %r14 # 8-byte Reload + adcq 472(%rsp), %r14 + movq 88(%rsp), %r15 # 8-byte Reload + adcq 480(%rsp), %r15 + movq 104(%rsp), %rbp # 8-byte Reload + adcq 488(%rsp), %rbp + movq 40(%rsp), %rbx # 8-byte Reload + adcq 496(%rsp), %rbx + movq 56(%rsp), %rax # 8-byte Reload + adcq 504(%rsp), %rax + movq %rax, 56(%rsp) # 8-byte Spill + adcq 512(%rsp), %r13 + movq 16(%rsp), %rax # 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 432(%rsp), %rcx + movq 72(%rsp), %rax # 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r12 + movq %r12, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rdx # 8-byte Reload + adcq 376(%rsp), %rdx + movq %rdx, 112(%rsp) # 8-byte Spill + adcq 384(%rsp), %r14 + movq %r14, 80(%rsp) # 8-byte Spill + adcq 392(%rsp), %r15 + movq %r15, 88(%rsp) # 8-byte Spill + adcq 400(%rsp), %rbp + movq %rbp, 104(%rsp) # 8-byte Spill + adcq 408(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 56(%rsp), %r14 # 8-byte Reload + adcq 416(%rsp), %r14 + adcq 424(%rsp), %r13 + movq %r13, %r15 + adcq $0, %rcx + movq %rcx, 72(%rsp) # 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 32(%rsp), %rdx # 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 64(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 280(%rsp), %r12 + movq 96(%rsp), %rax # 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 112(%rsp), %rbp # 8-byte Reload + adcq 296(%rsp), %rbp + movq 80(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 80(%rsp) # 8-byte Spill + movq 88(%rsp), %r13 # 8-byte Reload + adcq 312(%rsp), %r13 + movq 104(%rsp), %r12 # 8-byte Reload + adcq 320(%rsp), %r12 + movq 40(%rsp), %rbx # 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %r14 + movq %r14, 56(%rsp) # 8-byte Spill + adcq 344(%rsp), %r15 + movq %r15, 48(%rsp) # 8-byte Spill + movq 72(%rsp), %r14 # 8-byte Reload + adcq 352(%rsp), %r14 + movq 16(%rsp), %rax # 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 24(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + movq 272(%rsp), %rcx + movq 96(%rsp), %rax # 8-byte Reload + addq 200(%rsp), %rax + adcq 208(%rsp), %rbp + movq %rbp, 112(%rsp) # 8-byte Spill + movq 80(%rsp), %rbp # 8-byte Reload + adcq 216(%rsp), %rbp + adcq 224(%rsp), %r13 + movq %r13, 88(%rsp) # 8-byte Spill + adcq 232(%rsp), %r12 + movq %r12, 104(%rsp) # 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq 56(%rsp), %r15 # 8-byte Reload + adcq 248(%rsp), %r15 + movq 48(%rsp), %r12 # 8-byte Reload + adcq 256(%rsp), %r12 + adcq 264(%rsp), %r14 + adcq $0, %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + movq 32(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 120(%rsp), %rdi + movq 64(%rsp), %r13 # 8-byte Reload + movq %r13, %rsi + callq .LmulPv576x64 + addq 120(%rsp), %rbx + movq 112(%rsp), %rcx # 8-byte Reload + adcq 128(%rsp), %rcx + movq %rbp, %rdx + adcq 136(%rsp), %rdx + movq 88(%rsp), %rsi # 8-byte Reload + adcq 144(%rsp), %rsi + movq %rsi, 88(%rsp) # 8-byte Spill + movq 104(%rsp), %rdi # 8-byte Reload + adcq 152(%rsp), %rdi + movq %rdi, 104(%rsp) # 8-byte Spill + movq 40(%rsp), %rbx # 8-byte Reload + adcq 160(%rsp), %rbx + movq %rbx, 40(%rsp) # 8-byte Spill + movq %r15, %r8 + adcq 168(%rsp), %r8 + movq %r8, 56(%rsp) # 8-byte Spill + movq %r12, %r15 + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + movq 96(%rsp), %r9 # 8-byte Reload + adcq 192(%rsp), %r9 + movq %rcx, %rax + movq %rcx, %r11 + movq %r13, %rbp + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r12 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %rbx, %rdi + sbbq 32(%rbp), %rdi + movq %r8, %r10 + sbbq 40(%rbp), %r10 + movq %r15, %r13 + sbbq 48(%rbp), %r13 + movq %r14, %r8 + sbbq 56(%rbp), %r8 + movq %rbp, %rbx + movq %r9, %rbp + sbbq 64(%rbx), %rbp + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r11, %rax + movq (%rsp), %rbx # 8-byte Reload + movq %rax, (%rbx) + cmovsq %r12, %rcx + movq %rcx, 8(%rbx) + cmovsq 88(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 16(%rbx) + cmovsq 104(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq 40(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovsq 56(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovsq %r15, %r13 + movq %r13, 48(%rbx) + cmovsq %r14, %r8 + movq %r8, 56(%rbx) + cmovsq %r9, %rbp + movq %rbp, 64(%rbx) + addq $1560, %rsp # imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end131: + .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L + + .globl mcl_fp_montRed9L + .align 16, 0x90 + .type mcl_fp_montRed9L,@function +mcl_fp_montRed9L: # @mcl_fp_montRed9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $936, %rsp # imm = 0x3A8 + movq %rdx, %rax + movq %rax, 128(%rsp) # 8-byte Spill + movq %rdi, 80(%rsp) # 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 120(%rsp) # 8-byte Spill + movq (%rsi), %r14 + movq 8(%rsi), %rdx + movq %rdx, 192(%rsp) # 8-byte Spill + movq %r14, %rdx + imulq %rcx, %rdx + movq 136(%rsi), %rcx + movq %rcx, 112(%rsp) # 8-byte Spill + movq 128(%rsi), %rcx + movq %rcx, 152(%rsp) # 8-byte Spill + movq 120(%rsi), %rcx + movq %rcx, 104(%rsp) # 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 144(%rsp) # 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + movq 72(%rsi), %r12 + movq 64(%rsi), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 56(%rsi), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 48(%rsi), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 40(%rsi), %rbp + movq 32(%rsi), %rbx + movq 24(%rsi), %r13 + movq 16(%rsi), %r15 + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 16(%rsp) # 8-byte Spill + movq 64(%rcx), %rax + movq %rax, 72(%rsp) # 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 64(%rsp) # 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 56(%rsp) # 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 48(%rsp) # 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 40(%rsp) # 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 32(%rsp) # 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 24(%rsp) # 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 8(%rsp) # 8-byte Spill + movq %rcx, %rsi + leaq 856(%rsp), %rdi + callq .LmulPv576x64 + addq 856(%rsp), %r14 + movq 192(%rsp), %rcx # 8-byte Reload + adcq 864(%rsp), %rcx + adcq 872(%rsp), %r15 + adcq 880(%rsp), %r13 + adcq 888(%rsp), %rbx + movq %rbx, 88(%rsp) # 8-byte Spill + adcq 896(%rsp), %rbp + movq %rbp, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rax # 8-byte Reload + adcq 904(%rsp), %rax + movq %rax, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rax # 8-byte Reload + adcq 912(%rsp), %rax + movq %rax, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rax # 8-byte Reload + adcq 920(%rsp), %rax + movq %rax, 176(%rsp) # 8-byte Spill + adcq 928(%rsp), %r12 + movq %r12, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rbp # 8-byte Reload + adcq $0, %rbp + adcq $0, 200(%rsp) # 8-byte Folded Spill + adcq $0, 208(%rsp) # 8-byte Folded Spill + adcq $0, 184(%rsp) # 8-byte Folded Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + movq 112(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + sbbq %r12, %r12 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 776(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + andl $1, %r12d + addq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r13 + movq %r13, (%rsp) # 8-byte Spill + movq 88(%rsp), %rax # 8-byte Reload + adcq 800(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 96(%rsp), %rax # 8-byte Reload + adcq 808(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rax # 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rax # 8-byte Reload + adcq 824(%rsp), %rax + movq %rax, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rax # 8-byte Reload + adcq 832(%rsp), %rax + movq %rax, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rax # 8-byte Reload + adcq 840(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + adcq 848(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + adcq $0, 208(%rsp) # 8-byte Folded Spill + adcq $0, 184(%rsp) # 8-byte Folded Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + movq 152(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, %r14 + movq %r14, 112(%rsp) # 8-byte Spill + adcq $0, %r12 + movq %r15, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 696(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 696(%rsp), %r15 + movq (%rsp), %rcx # 8-byte Reload + adcq 704(%rsp), %rcx + movq 88(%rsp), %rax # 8-byte Reload + adcq 712(%rsp), %rax + movq %rax, 88(%rsp) # 8-byte Spill + movq 96(%rsp), %rax # 8-byte Reload + adcq 720(%rsp), %rax + movq %rax, 96(%rsp) # 8-byte Spill + movq 136(%rsp), %rbp # 8-byte Reload + adcq 728(%rsp), %rbp + movq 168(%rsp), %r14 # 8-byte Reload + adcq 736(%rsp), %r14 + movq 176(%rsp), %r15 # 8-byte Reload + adcq 744(%rsp), %r15 + movq 192(%rsp), %rax # 8-byte Reload + adcq 752(%rsp), %rax + movq %rax, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rax # 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 160(%rsp) # 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 200(%rsp) # 8-byte Spill + adcq $0, 208(%rsp) # 8-byte Folded Spill + movq 184(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 152(%rsp) # 8-byte Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rcx, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 616(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 616(%rsp), %rbx + movq 88(%rsp), %rax # 8-byte Reload + adcq 624(%rsp), %rax + movq 96(%rsp), %rcx # 8-byte Reload + adcq 632(%rsp), %rcx + movq %rcx, 96(%rsp) # 8-byte Spill + adcq 640(%rsp), %rbp + movq %rbp, 136(%rsp) # 8-byte Spill + adcq 648(%rsp), %r14 + movq %r14, 168(%rsp) # 8-byte Spill + adcq 656(%rsp), %r15 + movq 192(%rsp), %r14 # 8-byte Reload + adcq 664(%rsp), %r14 + movq 160(%rsp), %rbp # 8-byte Reload + adcq 672(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + adcq $0, %r13 + movq %r13, 184(%rsp) # 8-byte Spill + adcq $0, 144(%rsp) # 8-byte Folded Spill + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 536(%rsp), %rbx + movq 96(%rsp), %rax # 8-byte Reload + adcq 544(%rsp), %rax + movq 136(%rsp), %rcx # 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 136(%rsp) # 8-byte Spill + movq 168(%rsp), %rcx # 8-byte Reload + adcq 560(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + adcq 568(%rsp), %r15 + movq %r15, 176(%rsp) # 8-byte Spill + adcq 576(%rsp), %r14 + movq %r14, 192(%rsp) # 8-byte Spill + adcq 584(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r13 # 8-byte Reload + adcq 592(%rsp), %r13 + movq 208(%rsp), %r15 # 8-byte Reload + adcq 600(%rsp), %r15 + movq 184(%rsp), %rbp # 8-byte Reload + adcq 608(%rsp), %rbp + movq 144(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, 104(%rsp) # 8-byte Folded Spill + adcq $0, 152(%rsp) # 8-byte Folded Spill + adcq $0, 112(%rsp) # 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r14 + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 456(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 456(%rsp), %r14 + movq 136(%rsp), %rax # 8-byte Reload + adcq 464(%rsp), %rax + movq 168(%rsp), %rcx # 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 168(%rsp) # 8-byte Spill + movq 176(%rsp), %rcx # 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 488(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rcx # 8-byte Reload + adcq 496(%rsp), %rcx + movq %rcx, 160(%rsp) # 8-byte Spill + adcq 504(%rsp), %r13 + movq %r13, 200(%rsp) # 8-byte Spill + adcq 512(%rsp), %r15 + movq %r15, 208(%rsp) # 8-byte Spill + adcq 520(%rsp), %rbp + movq %rbp, 184(%rsp) # 8-byte Spill + adcq 528(%rsp), %rbx + movq %rbx, 144(%rsp) # 8-byte Spill + movq 104(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + movq 152(%rsp), %r13 # 8-byte Reload + adcq $0, %r13 + movq 112(%rsp), %rbx # 8-byte Reload + adcq $0, %rbx + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r15 + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 376(%rsp), %r15 + movq 168(%rsp), %rax # 8-byte Reload + adcq 384(%rsp), %rax + movq 176(%rsp), %rcx # 8-byte Reload + adcq 392(%rsp), %rcx + movq %rcx, 176(%rsp) # 8-byte Spill + movq 192(%rsp), %rcx # 8-byte Reload + adcq 400(%rsp), %rcx + movq %rcx, 192(%rsp) # 8-byte Spill + movq 160(%rsp), %rbp # 8-byte Reload + adcq 408(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 416(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 424(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + movq 144(%rsp), %r15 # 8-byte Reload + adcq 440(%rsp), %r15 + adcq 448(%rsp), %r14 + movq %r14, 104(%rsp) # 8-byte Spill + adcq $0, %r13 + movq %r13, %r14 + adcq $0, %rbx + movq %rbx, 112(%rsp) # 8-byte Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 120(%rsp), %rdx # 8-byte Folded Reload + leaq 296(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 296(%rsp), %rbx + movq 176(%rsp), %rax # 8-byte Reload + adcq 304(%rsp), %rax + movq 192(%rsp), %r13 # 8-byte Reload + adcq 312(%rsp), %r13 + adcq 320(%rsp), %rbp + movq 200(%rsp), %rcx # 8-byte Reload + adcq 328(%rsp), %rcx + movq %rcx, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %rcx # 8-byte Reload + adcq 336(%rsp), %rcx + movq %rcx, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rcx # 8-byte Reload + adcq 344(%rsp), %rcx + movq %rcx, 184(%rsp) # 8-byte Spill + adcq 352(%rsp), %r15 + movq %r15, 144(%rsp) # 8-byte Spill + movq 104(%rsp), %r15 # 8-byte Reload + adcq 360(%rsp), %r15 + adcq 368(%rsp), %r14 + movq %r14, 152(%rsp) # 8-byte Spill + movq 112(%rsp), %r14 # 8-byte Reload + adcq $0, %r14 + adcq $0, %r12 + movq 120(%rsp), %rdx # 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 216(%rsp), %rdi + movq 128(%rsp), %rsi # 8-byte Reload + callq .LmulPv576x64 + addq 216(%rsp), %rbx + movq %r13, %rsi + adcq 224(%rsp), %rsi + movq %rsi, 192(%rsp) # 8-byte Spill + adcq 232(%rsp), %rbp + movq %rbp, 160(%rsp) # 8-byte Spill + movq 200(%rsp), %r9 # 8-byte Reload + adcq 240(%rsp), %r9 + movq %r9, 200(%rsp) # 8-byte Spill + movq 208(%rsp), %r8 # 8-byte Reload + adcq 248(%rsp), %r8 + movq %r8, 208(%rsp) # 8-byte Spill + movq 184(%rsp), %rbx # 8-byte Reload + adcq 256(%rsp), %rbx + movq 144(%rsp), %rax # 8-byte Reload + adcq 264(%rsp), %rax + movq %r15, %rcx + adcq 272(%rsp), %rcx + movq 152(%rsp), %rdx # 8-byte Reload + adcq 280(%rsp), %rdx + movq %rdx, 152(%rsp) # 8-byte Spill + adcq 288(%rsp), %r14 + movq %r14, %r11 + adcq $0, %r12 + subq 16(%rsp), %rsi # 8-byte Folded Reload + movq %rbp, %rdi + sbbq 8(%rsp), %rdi # 8-byte Folded Reload + movq %r9, %rbp + sbbq 24(%rsp), %rbp # 8-byte Folded Reload + movq %r8, %r13 + sbbq 32(%rsp), %r13 # 8-byte Folded Reload + movq %rbx, %r15 + sbbq 40(%rsp), %r15 # 8-byte Folded Reload + movq %rax, %r14 + sbbq 48(%rsp), %r14 # 8-byte Folded Reload + movq %rcx, %r10 + sbbq 56(%rsp), %r10 # 8-byte Folded Reload + movq %rdx, %r8 + sbbq 64(%rsp), %r8 # 8-byte Folded Reload + movq %r11, %r9 + sbbq 72(%rsp), %r9 # 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %r11, %r9 + testb %r12b, %r12b + cmovneq 192(%rsp), %rsi # 8-byte Folded Reload + movq 80(%rsp), %rdx # 8-byte Reload + movq %rsi, (%rdx) + cmovneq 160(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 8(%rdx) + cmovneq 200(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 16(%rdx) + cmovneq 208(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 24(%rdx) + cmovneq %rbx, %r15 + movq %r15, 32(%rdx) + cmovneq %rax, %r14 + movq %r14, 40(%rdx) + cmovneq %rcx, %r10 + movq %r10, 48(%rdx) + cmovneq 152(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 56(%rdx) + movq %r9, 64(%rdx) + addq $936, %rsp # imm = 0x3A8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end132: + .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L + + .globl mcl_fp_addPre9L + .align 16, 0x90 + .type mcl_fp_addPre9L,@function +mcl_fp_addPre9L: # @mcl_fp_addPre9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r8 + movq 64(%rsi), %r15 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 24(%rsi), %r12 + movq 32(%rsi), %r14 + movq (%rdx), %rbx + movq 8(%rdx), %rcx + addq (%rsi), %rbx + adcq 8(%rsi), %rcx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r12 + movq 56(%rdx), %r13 + movq 48(%rdx), %rsi + movq 40(%rdx), %rbp + movq 32(%rdx), %rdx + movq %rbx, (%rdi) + movq %rcx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r12, 24(%rdi) + adcq %r14, %rdx + movq %rdx, 32(%rdi) + adcq %r11, %rbp + movq %rbp, 40(%rdi) + adcq %r10, %rsi + movq %rsi, 48(%rdi) + adcq %r9, %r13 + movq %r13, 56(%rdi) + adcq %r8, %r15 + movq %r15, 64(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end133: + .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L + + .globl mcl_fp_subPre9L + .align 16, 0x90 + .type mcl_fp_subPre9L,@function +mcl_fp_subPre9L: # @mcl_fp_subPre9L +# BB#0: + movq 32(%rdx), %r8 + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + movq 8(%rsi), %rcx + sbbq 8(%rdx), %rcx + movq %rcx, 8(%rdi) + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq %rcx, 16(%rdi) + movq 24(%rsi), %rcx + sbbq 24(%rdx), %rcx + movq %rcx, 24(%rdi) + movq 32(%rsi), %rcx + sbbq %r8, %rcx + movq 40(%rdx), %r8 + movq %rcx, 32(%rdi) + movq 40(%rsi), %rcx + sbbq %r8, %rcx + movq 48(%rdx), %r8 + movq %rcx, 40(%rdi) + movq 48(%rsi), %rcx + sbbq %r8, %rcx + movq 56(%rdx), %r8 + movq %rcx, 48(%rdi) + movq 56(%rsi), %rcx + sbbq %r8, %rcx + movq %rcx, 56(%rdi) + movq 64(%rdx), %rcx + movq 64(%rsi), %rdx + sbbq %rcx, %rdx + movq %rdx, 64(%rdi) + sbbq $0, %rax + andl $1, %eax + retq +.Lfunc_end134: + .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L + + .globl mcl_fp_shr1_9L + .align 16, 0x90 + .type mcl_fp_shr1_9L,@function +mcl_fp_shr1_9L: # @mcl_fp_shr1_9L +# BB#0: + pushq %rbx + movq 64(%rsi), %r8 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 32(%rsi), %rcx + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rbx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rbx + movq %rbx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 32(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 40(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 48(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 56(%rdi) + shrq %r8 + movq %r8, 64(%rdi) + popq %rbx + retq +.Lfunc_end135: + .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L + + .globl mcl_fp_add9L + .align 16, 0x90 + .type mcl_fp_add9L,@function +mcl_fp_add9L: # @mcl_fp_add9L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r12 + movq 64(%rsi), %r8 + movq 56(%rsi), %r13 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 24(%rsi), %r14 + movq 32(%rsi), %r11 + movq (%rdx), %rbx + movq 8(%rdx), %r15 + addq (%rsi), %rbx + adcq 8(%rsi), %r15 + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r14 + adcq 32(%rdx), %r11 + adcq 40(%rdx), %r10 + movq 56(%rdx), %rsi + adcq 48(%rdx), %r9 + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + adcq %r13, %rsi + movq %rsi, 56(%rdi) + adcq %r12, %r8 + movq %r8, 64(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rbx + sbbq 8(%rcx), %r15 + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r14 + sbbq 32(%rcx), %r11 + sbbq 40(%rcx), %r10 + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %rsi + sbbq 64(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne .LBB136_2 +# BB#1: # %nocarry + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %rsi, 56(%rdi) + movq %r8, 64(%rdi) +.LBB136_2: # %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end136: + .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L + + .globl mcl_fp_addNF9L + .align 16, 0x90 + .type mcl_fp_addNF9L,@function +mcl_fp_addNF9L: # @mcl_fp_addNF9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r8 + movq 64(%rdx), %r10 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rax + movq 32(%rdx), %rdi + movq 24(%rdx), %rbp + movq 16(%rdx), %r15 + movq (%rdx), %rbx + movq 8(%rdx), %r13 + addq (%rsi), %rbx + adcq 8(%rsi), %r13 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %rbp + movq %rbp, -40(%rsp) # 8-byte Spill + adcq 32(%rsi), %rdi + movq %rdi, -16(%rsp) # 8-byte Spill + adcq 40(%rsi), %rax + movq %rax, -8(%rsp) # 8-byte Spill + adcq 48(%rsi), %r9 + movq %r9, -32(%rsp) # 8-byte Spill + movq %r9, %rdi + adcq 56(%rsi), %r11 + movq %r11, -24(%rsp) # 8-byte Spill + movq %r11, %rax + adcq 64(%rsi), %r10 + movq %r10, %r9 + movq %rbx, %rsi + subq (%rcx), %rsi + movq %r13, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %r12 + sbbq 16(%rcx), %r12 + sbbq 24(%rcx), %rbp + movq -16(%rsp), %r14 # 8-byte Reload + sbbq 32(%rcx), %r14 + movq -8(%rsp), %r11 # 8-byte Reload + sbbq 40(%rcx), %r11 + movq %rdi, %r10 + sbbq 48(%rcx), %r10 + movq %rax, %rdi + sbbq 56(%rcx), %rdi + movq %r9, %rax + sbbq 64(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %rbx, %rsi + movq %rsi, (%r8) + cmovsq %r13, %rdx + movq %rdx, 8(%r8) + cmovsq %r15, %r12 + movq %r12, 16(%r8) + cmovsq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rbp, 24(%r8) + cmovsq -16(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 32(%r8) + cmovsq -8(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 40(%r8) + cmovsq -32(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 48(%r8) + cmovsq -24(%rsp), %rdi # 8-byte Folded Reload + movq %rdi, 56(%r8) + cmovsq %r9, %rax + movq %rax, 64(%r8) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end137: + .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L + + .globl mcl_fp_sub9L + .align 16, 0x90 + .type mcl_fp_sub9L,@function +mcl_fp_sub9L: # @mcl_fp_sub9L +# BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + movq 16(%rsi), %r10 + sbbq 16(%rdx), %r10 + movq 24(%rsi), %r11 + sbbq 24(%rdx), %r11 + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 40(%rsi), %r14 + sbbq 40(%rdx), %r14 + movq 48(%rsi), %r15 + sbbq 48(%rdx), %r15 + movq 64(%rsi), %r8 + movq 56(%rsi), %rsi + sbbq 56(%rdx), %rsi + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r10, 16(%rdi) + movq %r11, 24(%rdi) + movq %r12, 32(%rdi) + movq %r14, 40(%rdi) + movq %r15, 48(%rdi) + movq %rsi, 56(%rdi) + sbbq %r13, %r8 + movq %r8, 64(%rdi) + sbbq $0, %rbx + testb $1, %bl + je .LBB138_2 +# BB#1: # %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r9, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r10, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r11, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r12, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %r14, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r15, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %rsi, %rax + movq %rax, 56(%rdi) + movq 64(%rcx), %rax + adcq %r8, %rax + movq %rax, 64(%rdi) +.LBB138_2: # %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq +.Lfunc_end138: + .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L + + .globl mcl_fp_subNF9L + .align 16, 0x90 + .type mcl_fp_subNF9L,@function +mcl_fp_subNF9L: # @mcl_fp_subNF9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r11 + movq 64(%rsi), %r14 + movq 56(%rsi), %rax + movq 48(%rsi), %rcx + movq 40(%rsi), %rdi + movq 32(%rsi), %rbp + movq 24(%rsi), %rbx + movq 16(%rsi), %r15 + movq (%rsi), %r12 + movq 8(%rsi), %r13 + subq (%rdx), %r12 + sbbq 8(%rdx), %r13 + sbbq 16(%rdx), %r15 + sbbq 24(%rdx), %rbx + movq %rbx, -40(%rsp) # 8-byte Spill + sbbq 32(%rdx), %rbp + movq %rbp, -32(%rsp) # 8-byte Spill + sbbq 40(%rdx), %rdi + movq %rdi, -24(%rsp) # 8-byte Spill + sbbq 48(%rdx), %rcx + movq %rcx, -16(%rsp) # 8-byte Spill + sbbq 56(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + sbbq 64(%rdx), %r14 + movq %r14, %rax + sarq $63, %rax + movq %rax, %rcx + shldq $1, %r14, %rcx + movq 24(%r8), %rbp + andq %rcx, %rbp + movq 8(%r8), %rdi + andq %rcx, %rdi + andq (%r8), %rcx + movq 64(%r8), %rbx + andq %rax, %rbx + movq 56(%r8), %r10 + andq %rax, %r10 + rolq %rax + movq 48(%r8), %r9 + andq %rax, %r9 + movq 40(%r8), %rsi + andq %rax, %rsi + movq 32(%r8), %rdx + andq %rax, %rdx + andq 16(%r8), %rax + addq %r12, %rcx + adcq %r13, %rdi + movq %rcx, (%r11) + adcq %r15, %rax + movq %rdi, 8(%r11) + adcq -40(%rsp), %rbp # 8-byte Folded Reload + movq %rax, 16(%r11) + movq %rbp, 24(%r11) + adcq -32(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, 32(%r11) + adcq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 40(%r11) + adcq -16(%rsp), %r9 # 8-byte Folded Reload + movq %r9, 48(%r11) + adcq -8(%rsp), %r10 # 8-byte Folded Reload + movq %r10, 56(%r11) + adcq %r14, %rbx + movq %rbx, 64(%r11) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end139: + .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L + + .globl mcl_fpDbl_add9L + .align 16, 0x90 + .type mcl_fpDbl_add9L,@function +mcl_fpDbl_add9L: # @mcl_fpDbl_add9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 136(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq 120(%rdx), %r10 + movq 112(%rdx), %r11 + movq 24(%rsi), %rcx + movq 32(%rsi), %r14 + movq 16(%rdx), %rbp + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %rbp + adcq 24(%rdx), %rcx + adcq 32(%rdx), %r14 + movq 104(%rdx), %r9 + movq 96(%rdx), %r13 + movq %rax, (%rdi) + movq 88(%rdx), %r8 + movq %rbx, 8(%rdi) + movq 80(%rdx), %r12 + movq %rbp, 16(%rdi) + movq 40(%rdx), %rax + movq %rcx, 24(%rdi) + movq 40(%rsi), %rbp + adcq %rax, %rbp + movq 48(%rdx), %rcx + movq %r14, 32(%rdi) + movq 48(%rsi), %rax + adcq %rcx, %rax + movq 56(%rdx), %r14 + movq %rbp, 40(%rdi) + movq 56(%rsi), %rbp + adcq %r14, %rbp + movq 72(%rdx), %rcx + movq 64(%rdx), %rdx + movq %rax, 48(%rdi) + movq 64(%rsi), %rax + adcq %rdx, %rax + movq 136(%rsi), %rbx + movq %rbp, 56(%rdi) + movq 72(%rsi), %rbp + adcq %rcx, %rbp + movq 128(%rsi), %rcx + movq %rax, 64(%rdi) + movq 80(%rsi), %rdx + adcq %r12, %rdx + movq 88(%rsi), %r12 + adcq %r8, %r12 + movq 96(%rsi), %r14 + adcq %r13, %r14 + movq %r14, -48(%rsp) # 8-byte Spill + movq 104(%rsi), %rax + adcq %r9, %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 120(%rsi), %rax + movq 112(%rsi), %rsi + adcq %r11, %rsi + movq %rsi, -24(%rsp) # 8-byte Spill + adcq %r10, %rax + movq %rax, -16(%rsp) # 8-byte Spill + adcq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -40(%rsp) # 8-byte Spill + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, -8(%rsp) # 8-byte Spill + sbbq %r9, %r9 + andl $1, %r9d + movq %rbp, %r10 + subq (%r15), %r10 + movq %rdx, %r11 + sbbq 8(%r15), %r11 + movq %r12, %rbx + sbbq 16(%r15), %rbx + sbbq 24(%r15), %r14 + movq -32(%rsp), %r13 # 8-byte Reload + sbbq 32(%r15), %r13 + movq -24(%rsp), %rsi # 8-byte Reload + sbbq 40(%r15), %rsi + movq -16(%rsp), %rax # 8-byte Reload + sbbq 48(%r15), %rax + sbbq 56(%r15), %rcx + movq -8(%rsp), %r8 # 8-byte Reload + sbbq 64(%r15), %r8 + sbbq $0, %r9 + andl $1, %r9d + cmovneq %rbp, %r10 + movq %r10, 72(%rdi) + testb %r9b, %r9b + cmovneq %rdx, %r11 + movq %r11, 80(%rdi) + cmovneq %r12, %rbx + movq %rbx, 88(%rdi) + cmovneq -48(%rsp), %r14 # 8-byte Folded Reload + movq %r14, 96(%rdi) + cmovneq -32(%rsp), %r13 # 8-byte Folded Reload + movq %r13, 104(%rdi) + cmovneq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 112(%rdi) + cmovneq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, 120(%rdi) + cmovneq -40(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 128(%rdi) + cmovneq -8(%rsp), %r8 # 8-byte Folded Reload + movq %r8, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end140: + .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L + + .globl mcl_fpDbl_sub9L + .align 16, 0x90 + .type mcl_fpDbl_sub9L,@function +mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L +# BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r14 + movq 136(%rdx), %rax + movq %rax, -8(%rsp) # 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -16(%rsp) # 8-byte Spill + movq 120(%rdx), %rax + movq %rax, -24(%rsp) # 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %r12 + movq 8(%rsi), %r13 + xorl %r9d, %r9d + subq (%rdx), %r12 + sbbq 8(%rdx), %r13 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %rbp + sbbq 32(%rdx), %rbp + movq 112(%rdx), %r10 + movq 104(%rdx), %rcx + movq %r12, (%rdi) + movq 96(%rdx), %rax + movq %r13, 8(%rdi) + movq 88(%rdx), %r13 + movq %r11, 16(%rdi) + movq 40(%rdx), %r11 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r11, %rbx + movq 48(%rdx), %r11 + movq %rbp, 32(%rdi) + movq 48(%rsi), %rbp + sbbq %r11, %rbp + movq 56(%rdx), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rbx + sbbq %r11, %rbx + movq 64(%rdx), %r11 + movq %rbp, 48(%rdi) + movq 64(%rsi), %rbp + sbbq %r11, %rbp + movq 80(%rdx), %r8 + movq 72(%rdx), %r11 + movq %rbx, 56(%rdi) + movq 72(%rsi), %r15 + sbbq %r11, %r15 + movq 136(%rsi), %rdx + movq %rbp, 64(%rdi) + movq 80(%rsi), %rbp + sbbq %r8, %rbp + movq 88(%rsi), %r12 + sbbq %r13, %r12 + movq 96(%rsi), %r13 + sbbq %rax, %r13 + movq 104(%rsi), %rax + sbbq %rcx, %rax + movq %rax, -40(%rsp) # 8-byte Spill + movq 112(%rsi), %rax + sbbq %r10, %rax + movq %rax, -32(%rsp) # 8-byte Spill + movq 128(%rsi), %rax + movq 120(%rsi), %rcx + sbbq -24(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, -24(%rsp) # 8-byte Spill + sbbq -16(%rsp), %rax # 8-byte Folded Reload + movq %rax, -16(%rsp) # 8-byte Spill + sbbq -8(%rsp), %rdx # 8-byte Folded Reload + movq %rdx, -8(%rsp) # 8-byte Spill + movl $0, %r8d + sbbq $0, %r8 + andl $1, %r8d + movq (%r14), %r10 + cmoveq %r9, %r10 + testb %r8b, %r8b + movq 16(%r14), %r8 + cmoveq %r9, %r8 + movq 8(%r14), %rdx + cmoveq %r9, %rdx + movq 64(%r14), %rbx + cmoveq %r9, %rbx + movq 56(%r14), %r11 + cmoveq %r9, %r11 + movq 48(%r14), %rsi + cmoveq %r9, %rsi + movq 40(%r14), %rcx + cmoveq %r9, %rcx + movq 32(%r14), %rax + cmoveq %r9, %rax + cmovneq 24(%r14), %r9 + addq %r15, %r10 + adcq %rbp, %rdx + movq %r10, 72(%rdi) + adcq %r12, %r8 + movq %rdx, 80(%rdi) + adcq %r13, %r9 + movq %r8, 88(%rdi) + movq %r9, 96(%rdi) + adcq -40(%rsp), %rax # 8-byte Folded Reload + movq %rax, 104(%rdi) + adcq -32(%rsp), %rcx # 8-byte Folded Reload + movq %rcx, 112(%rdi) + adcq -24(%rsp), %rsi # 8-byte Folded Reload + movq %rsi, 120(%rdi) + adcq -16(%rsp), %r11 # 8-byte Folded Reload + movq %r11, 128(%rdi) + adcq -8(%rsp), %rbx # 8-byte Folded Reload + movq %rbx, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq +.Lfunc_end141: + .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.bmi2.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.bmi2.s new file mode 100644 index 000000000..849c66649 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.bmi2.s @@ -0,0 +1,13830 @@ + .section __TEXT,__text,regular,pure_instructions + .macosx_version_min 10, 12 + .globl _makeNIST_P192Lbmi2 + .p2align 4, 0x90 +_makeNIST_P192Lbmi2: ## @makeNIST_P192Lbmi2 +## BB#0: + movq $-1, %rax + movq $-2, %rdx + movq $-1, %rcx + retq + + .globl _mcl_fpDbl_mod_NIST_P192Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mod_NIST_P192Lbmi2: ## @mcl_fpDbl_mod_NIST_P192Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq 24(%rsi), %r8 + movq 40(%rsi), %r9 + movq 8(%rsi), %rax + addq %r9, %rax + adcq $0, %r10 + sbbq %rcx, %rcx + andl $1, %ecx + movq 32(%rsi), %r11 + movq (%rsi), %r14 + addq %r8, %r14 + adcq %r11, %rax + adcq %r9, %r10 + adcq $0, %rcx + addq %r9, %r14 + adcq %r8, %rax + adcq %r11, %r10 + adcq $0, %rcx + addq %rcx, %r14 + adcq %rax, %rcx + adcq $0, %r10 + sbbq %rax, %rax + andl $1, %eax + movq %r14, %rsi + addq $1, %rsi + movq %rcx, %rdx + adcq $1, %rdx + movq %r10, %rbx + adcq $0, %rbx + adcq $-1, %rax + andl $1, %eax + cmovneq %r14, %rsi + movq %rsi, (%rdi) + testb %al, %al + cmovneq %rcx, %rdx + movq %rdx, 8(%rdi) + cmovneq %r10, %rbx + movq %rbx, 16(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_sqr_NIST_P192Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sqr_NIST_P192Lbmi2: ## @mcl_fp_sqr_NIST_P192Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %r8, %rdx + mulxq %rsi, %r14, %rbx + movq %rbx, -16(%rsp) ## 8-byte Spill + movq %rsi, %rdx + mulxq %rsi, %r13, %r15 + mulxq %rcx, %r12, %rsi + addq %rsi, %r13 + adcq %r14, %r15 + adcq $0, %rbx + movq %rcx, %rdx + mulxq %rcx, %r9, %rax + addq %r12, %rax + movq %r8, %rdx + mulxq %rcx, %rbp, %r11 + adcq %rbp, %rsi + movq %r11, %r10 + adcq $0, %r10 + addq %r12, %rax + adcq %r13, %rsi + adcq %r15, %r10 + adcq $0, %rbx + mulxq %r8, %rcx, %rdi + addq %r14, %r11 + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + adcq $0, %rdi + addq %rbp, %rsi + adcq %r10, %r11 + adcq %rbx, %rcx + adcq $0, %rdi + addq %rdi, %rax + adcq $0, %rsi + sbbq %rdx, %rdx + andl $1, %edx + addq %r11, %r9 + adcq %rcx, %rax + adcq %rdi, %rsi + adcq $0, %rdx + addq %rdi, %r9 + adcq %r11, %rax + adcq %rcx, %rsi + adcq $0, %rdx + addq %rdx, %r9 + adcq %rax, %rdx + adcq $0, %rsi + sbbq %rax, %rax + andl $1, %eax + movq %r9, %rcx + addq $1, %rcx + movq %rdx, %rdi + adcq $1, %rdi + movq %rsi, %rbp + adcq $0, %rbp + adcq $-1, %rax + andl $1, %eax + cmovneq %r9, %rcx + movq -8(%rsp), %rbx ## 8-byte Reload + movq %rcx, (%rbx) + testb %al, %al + cmovneq %rdx, %rdi + movq %rdi, 8(%rbx) + cmovneq %rsi, %rbp + movq %rbp, 16(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulNIST_P192Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulNIST_P192Lbmi2: ## @mcl_fp_mulNIST_P192Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + subq $56, %rsp + movq %rdi, %r14 + leaq 8(%rsp), %rdi + callq _mcl_fpDbl_mulPre3Lbmi2 + movq 24(%rsp), %r9 + movq 32(%rsp), %r8 + movq 48(%rsp), %rdi + movq 16(%rsp), %rbx + addq %rdi, %rbx + adcq $0, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + movq 40(%rsp), %rsi + movq 8(%rsp), %rdx + addq %r8, %rdx + adcq %rsi, %rbx + adcq %rdi, %r9 + adcq $0, %rcx + addq %rdi, %rdx + adcq %r8, %rbx + adcq %rsi, %r9 + adcq $0, %rcx + addq %rcx, %rdx + adcq %rbx, %rcx + adcq $0, %r9 + sbbq %rsi, %rsi + andl $1, %esi + movq %rdx, %rdi + addq $1, %rdi + movq %rcx, %rbx + adcq $1, %rbx + movq %r9, %rax + adcq $0, %rax + adcq $-1, %rsi + andl $1, %esi + cmovneq %rdx, %rdi + movq %rdi, (%r14) + testb %sil, %sil + cmovneq %rcx, %rbx + movq %rbx, 8(%r14) + cmovneq %r9, %rax + movq %rax, 16(%r14) + addq $56, %rsp + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mod_NIST_P521Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mod_NIST_P521Lbmi2: ## @mcl_fpDbl_mod_NIST_P521Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 120(%rsi), %r9 + movq 128(%rsi), %r14 + movq %r14, %r8 + shldq $55, %r9, %r8 + movq 112(%rsi), %r10 + shldq $55, %r10, %r9 + movq 104(%rsi), %r11 + shldq $55, %r11, %r10 + movq 96(%rsi), %r15 + shldq $55, %r15, %r11 + movq 88(%rsi), %r12 + shldq $55, %r12, %r15 + movq 80(%rsi), %rcx + shldq $55, %rcx, %r12 + movq 64(%rsi), %rbx + movq 72(%rsi), %rax + shldq $55, %rax, %rcx + shrq $9, %r14 + shldq $55, %rbx, %rax + ## kill: %EBX %EBX %RBX %RBX + andl $511, %ebx ## imm = 0x1FF + addq (%rsi), %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r10 + adcq 48(%rsi), %r9 + adcq 56(%rsi), %r8 + adcq %r14, %rbx + movl %ebx, %esi + shrl $9, %esi + andl $1, %esi + addq %rax, %rsi + adcq $0, %rcx + adcq $0, %r12 + adcq $0, %r15 + adcq $0, %r11 + adcq $0, %r10 + adcq $0, %r9 + adcq $0, %r8 + adcq $0, %rbx + movq %rsi, %rax + andq %r12, %rax + andq %r15, %rax + andq %r11, %rax + andq %r10, %rax + andq %r9, %rax + andq %r8, %rax + movq %rbx, %rdx + orq $-512, %rdx ## imm = 0xFE00 + andq %rax, %rdx + andq %rcx, %rdx + cmpq $-1, %rdx + je LBB4_1 +## BB#3: ## %nonzero + movq %rsi, (%rdi) + movq %rcx, 8(%rdi) + movq %r12, 16(%rdi) + movq %r15, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) + andl $511, %ebx ## imm = 0x1FF + movq %rbx, 64(%rdi) + jmp LBB4_2 +LBB4_1: ## %zero + movq $0, 64(%rdi) + movq $0, 56(%rdi) + movq $0, 48(%rdi) + movq $0, 40(%rdi) + movq $0, 32(%rdi) + movq $0, 24(%rdi) + movq $0, 16(%rdi) + movq $0, 8(%rdi) + movq $0, (%rdi) +LBB4_2: ## %zero + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre1Lbmi2: ## @mcl_fp_mulUnitPre1Lbmi2 +## BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq + + .globl _mcl_fpDbl_mulPre1Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre1Lbmi2: ## @mcl_fpDbl_mulPre1Lbmi2 +## BB#0: + movq (%rdx), %rdx + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq + + .globl _mcl_fpDbl_sqrPre1Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre1Lbmi2: ## @mcl_fpDbl_sqrPre1Lbmi2 +## BB#0: + movq (%rsi), %rdx + mulxq %rdx, %rcx, %rax + movq %rcx, (%rdi) + movq %rax, 8(%rdi) + retq + + .globl _mcl_fp_mont1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont1Lbmi2: ## @mcl_fp_mont1Lbmi2 +## BB#0: + movq %rdx, %rax + movq (%rsi), %rdx + mulxq (%rax), %rsi, %r8 + movq -8(%rcx), %rdx + imulq %rsi, %rdx + movq (%rcx), %rcx + mulxq %rcx, %rdx, %rax + addq %rsi, %rdx + adcq %r8, %rax + sbbq %rdx, %rdx + andl $1, %edx + movq %rax, %rsi + subq %rcx, %rsi + sbbq $0, %rdx + testb $1, %dl + cmovneq %rax, %rsi + movq %rsi, (%rdi) + retq + + .globl _mcl_fp_montNF1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF1Lbmi2: ## @mcl_fp_montNF1Lbmi2 +## BB#0: + movq %rdx, %rax + movq (%rsi), %rdx + mulxq (%rax), %rsi, %r8 + movq -8(%rcx), %rdx + imulq %rsi, %rdx + movq (%rcx), %rcx + mulxq %rcx, %rdx, %rax + addq %rsi, %rdx + adcq %r8, %rax + movq %rax, %rdx + subq %rcx, %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq + + .globl _mcl_fp_montRed1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed1Lbmi2: ## @mcl_fp_montRed1Lbmi2 +## BB#0: + movq (%rsi), %rcx + movq -8(%rdx), %rax + imulq %rcx, %rax + movq (%rdx), %r8 + movq %rax, %rdx + mulxq %r8, %rax, %rdx + addq %rcx, %rax + adcq 8(%rsi), %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rcx + subq %r8, %rcx + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rcx + movq %rcx, (%rdi) + retq + + .globl _mcl_fp_addPre1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre1Lbmi2: ## @mcl_fp_addPre1Lbmi2 +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre1Lbmi2: ## @mcl_fp_subPre1Lbmi2 +## BB#0: + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_1Lbmi2: ## @mcl_fp_shr1_1Lbmi2 +## BB#0: + movq (%rsi), %rax + shrq %rax + movq %rax, (%rdi) + retq + + .globl _mcl_fp_add1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add1Lbmi2: ## @mcl_fp_add1Lbmi2 +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rax + sbbq $0, %rdx + testb $1, %dl + jne LBB14_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) +LBB14_2: ## %carry + retq + + .globl _mcl_fp_addNF1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF1Lbmi2: ## @mcl_fp_addNF1Lbmi2 +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, %rdx + subq (%rcx), %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq + + .globl _mcl_fp_sub1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub1Lbmi2: ## @mcl_fp_sub1Lbmi2 +## BB#0: + movq (%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rax + movq %rax, (%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB16_2 +## BB#1: ## %nocarry + retq +LBB16_2: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + retq + + .globl _mcl_fp_subNF1Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF1Lbmi2: ## @mcl_fp_subNF1Lbmi2 +## BB#0: + movq (%rsi), %rax + subq (%rdx), %rax + movq %rax, %rdx + sarq $63, %rdx + andq (%rcx), %rdx + addq %rax, %rdx + movq %rdx, (%rdi) + retq + + .globl _mcl_fpDbl_add1Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add1Lbmi2: ## @mcl_fpDbl_add1Lbmi2 +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq (%rcx), %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, 8(%rdi) + retq + + .globl _mcl_fpDbl_sub1Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub1Lbmi2: ## @mcl_fpDbl_sub1Lbmi2 +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movl $0, %eax + sbbq $0, %rax + testb $1, %al + cmovneq (%rcx), %rsi + addq %r8, %rsi + movq %rsi, 8(%rdi) + retq + + .globl _mcl_fp_mulUnitPre2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre2Lbmi2: ## @mcl_fp_mulUnitPre2Lbmi2 +## BB#0: + mulxq 8(%rsi), %rax, %rcx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %rax, %rsi + movq %rsi, 8(%rdi) + adcq $0, %rcx + movq %rcx, 16(%rdi) + retq + + .globl _mcl_fpDbl_mulPre2Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre2Lbmi2: ## @mcl_fpDbl_mulPre2Lbmi2 +## BB#0: + movq %rdx, %r10 + movq (%rsi), %r11 + movq 8(%rsi), %r8 + movq (%r10), %rsi + movq %r11, %rdx + mulxq %rsi, %rdx, %r9 + movq %rdx, (%rdi) + movq %r8, %rdx + mulxq %rsi, %rsi, %rax + addq %r9, %rsi + adcq $0, %rax + movq 8(%r10), %rcx + movq %r11, %rdx + mulxq %rcx, %rdx, %r9 + addq %rsi, %rdx + movq %rdx, 8(%rdi) + movq %r8, %rdx + mulxq %rcx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %rcx, %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fpDbl_sqrPre2Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre2Lbmi2: ## @mcl_fpDbl_sqrPre2Lbmi2 +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + movq %rax, %rdx + mulxq %rax, %rdx, %rsi + movq %rdx, (%rdi) + movq %rcx, %rdx + mulxq %rax, %rdx, %r8 + addq %rdx, %rsi + movq %r8, %rax + adcq $0, %rax + addq %rdx, %rsi + movq %rsi, 8(%rdi) + movq %rcx, %rdx + mulxq %rcx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r8, %rdx + movq %rdx, 16(%rdi) + adcq %rcx, %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fp_mont2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont2Lbmi2: ## @mcl_fp_mont2Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%rdx), %rax + movq 8(%rdx), %r11 + movq %r9, %rdx + mulxq %rax, %r10, %r13 + movq %r8, %rdx + mulxq %rax, %r14, %rsi + addq %r10, %rsi + adcq $0, %r13 + movq -8(%rcx), %rbp + movq (%rcx), %r10 + movq %r14, %rdx + imulq %rbp, %rdx + movq 8(%rcx), %r15 + mulxq %r15, %r12, %rcx + mulxq %r10, %rdx, %rbx + addq %r12, %rbx + adcq $0, %rcx + addq %r14, %rdx + adcq %rsi, %rbx + adcq %r13, %rcx + sbbq %rsi, %rsi + andl $1, %esi + movq %r11, %rdx + mulxq %r9, %r9, %r14 + movq %r11, %rdx + mulxq %r8, %r8, %rax + addq %r9, %rax + adcq $0, %r14 + addq %rbx, %r8 + adcq %rcx, %rax + adcq %rsi, %r14 + sbbq %rsi, %rsi + andl $1, %esi + imulq %r8, %rbp + movq %rbp, %rdx + mulxq %r15, %rcx, %rbx + mulxq %r10, %rdx, %rbp + addq %rcx, %rbp + adcq $0, %rbx + addq %r8, %rdx + adcq %rax, %rbp + adcq %r14, %rbx + adcq $0, %rsi + movq %rbp, %rax + subq %r10, %rax + movq %rbx, %rcx + sbbq %r15, %rcx + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rcx + testb %sil, %sil + cmovneq %rbp, %rax + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF2Lbmi2: ## @mcl_fp_montNF2Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%rdx), %rax + movq 8(%rdx), %r11 + movq %r9, %rdx + mulxq %rax, %r10, %rsi + movq %r8, %rdx + mulxq %rax, %r15, %r13 + addq %r10, %r13 + adcq $0, %rsi + movq -8(%rcx), %rbp + movq (%rcx), %r10 + movq %r15, %rdx + imulq %rbp, %rdx + movq 8(%rcx), %r14 + mulxq %r10, %rcx, %r12 + addq %r15, %rcx + mulxq %r14, %rbx, %rcx + adcq %r13, %rbx + adcq $0, %rsi + addq %r12, %rbx + adcq %rcx, %rsi + movq %r11, %rdx + mulxq %r9, %r9, %rcx + movq %r11, %rdx + mulxq %r8, %r8, %rax + addq %r9, %rax + adcq $0, %rcx + addq %rbx, %r8 + adcq %rsi, %rax + adcq $0, %rcx + imulq %r8, %rbp + movq %rbp, %rdx + mulxq %r14, %rbx, %rsi + mulxq %r10, %rbp, %rdx + addq %r8, %rbp + adcq %rax, %rbx + adcq $0, %rcx + addq %rdx, %rbx + adcq %rsi, %rcx + movq %rbx, %rax + subq %r10, %rax + movq %rcx, %rdx + sbbq %r14, %rdx + cmovsq %rbx, %rax + movq %rax, (%rdi) + cmovsq %rcx, %rdx + movq %rdx, 8(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed2Lbmi2: ## @mcl_fp_montRed2Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq -8(%rdx), %r15 + movq (%rdx), %r8 + movq (%rsi), %r10 + movq %r10, %rcx + imulq %r15, %rcx + movq 8(%rdx), %r9 + movq %rcx, %rdx + mulxq %r9, %r11, %r14 + mulxq %r8, %rcx, %rax + addq %r11, %rax + adcq $0, %r14 + movq 24(%rsi), %r11 + addq %r10, %rcx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r14 + adcq $0, %r11 + sbbq %rcx, %rcx + andl $1, %ecx + imulq %rax, %r15 + movq %r15, %rdx + mulxq %r9, %r10, %rbx + mulxq %r8, %rsi, %rdx + addq %r10, %rdx + adcq $0, %rbx + addq %rax, %rsi + adcq %r14, %rdx + adcq %r11, %rbx + adcq $0, %rcx + movq %rdx, %rax + subq %r8, %rax + movq %rbx, %rsi + sbbq %r9, %rsi + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rbx, %rsi + testb %cl, %cl + cmovneq %rdx, %rax + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addPre2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre2Lbmi2: ## @mcl_fp_addPre2Lbmi2 +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rcx + addq (%rsi), %rax + adcq 8(%rsi), %rcx + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre2Lbmi2: ## @mcl_fp_subPre2Lbmi2 +## BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_2Lbmi2: ## @mcl_fp_shr1_2Lbmi2 +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + shrdq $1, %rcx, %rax + movq %rax, (%rdi) + shrq %rcx + movq %rcx, 8(%rdi) + retq + + .globl _mcl_fp_add2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add2Lbmi2: ## @mcl_fp_add2Lbmi2 +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq $0, %rsi + testb $1, %sil + jne LBB29_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) +LBB29_2: ## %carry + retq + + .globl _mcl_fp_addNF2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF2Lbmi2: ## @mcl_fp_addNF2Lbmi2 +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %r8 + addq (%rsi), %rax + adcq 8(%rsi), %r8 + movq %rax, %rsi + subq (%rcx), %rsi + movq %r8, %rdx + sbbq 8(%rcx), %rdx + testq %rdx, %rdx + cmovsq %rax, %rsi + movq %rsi, (%rdi) + cmovsq %r8, %rdx + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fp_sub2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub2Lbmi2: ## @mcl_fp_sub2Lbmi2 +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movq %r8, 8(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB31_2 +## BB#1: ## %nocarry + retq +LBB31_2: ## %carry + movq 8(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r8, %rdx + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fp_subNF2Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF2Lbmi2: ## @mcl_fp_subNF2Lbmi2 +## BB#0: + movq (%rsi), %r8 + movq 8(%rsi), %rsi + subq (%rdx), %r8 + sbbq 8(%rdx), %rsi + movq %rsi, %rdx + sarq $63, %rdx + movq 8(%rcx), %rax + andq %rdx, %rax + andq (%rcx), %rdx + addq %r8, %rdx + movq %rdx, (%rdi) + adcq %rsi, %rax + movq %rax, 8(%rdi) + retq + + .globl _mcl_fpDbl_add2Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add2Lbmi2: ## @mcl_fpDbl_add2Lbmi2 +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + adcq %r8, %r9 + sbbq %rax, %rax + andl $1, %eax + movq %r10, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + sbbq $0, %rax + andl $1, %eax + cmovneq %r10, %rdx + movq %rdx, 16(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 24(%rdi) + retq + + .globl _mcl_fpDbl_sub2Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub2Lbmi2: ## @mcl_fpDbl_sub2Lbmi2 +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %r11 + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %r11, (%rdi) + movq %rsi, 8(%rdi) + sbbq %r8, %r9 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + cmovneq 8(%rcx), %rax + addq %r10, %rsi + movq %rsi, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fp_mulUnitPre3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre3Lbmi2: ## @mcl_fp_mulUnitPre3Lbmi2 +## BB#0: + mulxq 16(%rsi), %r8, %rcx + mulxq 8(%rsi), %r9, %rax + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r9, %rsi + movq %rsi, 8(%rdi) + adcq %r8, %rax + movq %rax, 16(%rdi) + adcq $0, %rcx + movq %rcx, 24(%rdi) + retq + + .globl _mcl_fpDbl_mulPre3Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre3Lbmi2: ## @mcl_fpDbl_mulPre3Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq %rdx, %r9 + movq (%rsi), %r10 + movq 8(%rsi), %r8 + movq (%r9), %rax + movq %r10, %rdx + mulxq %rax, %rdx, %r14 + movq 16(%rsi), %r11 + movq %rdx, (%rdi) + movq %r11, %rdx + mulxq %rax, %rsi, %rbx + movq %r8, %rdx + mulxq %rax, %rax, %rcx + addq %r14, %rax + adcq %rsi, %rcx + adcq $0, %rbx + movq 8(%r9), %rsi + movq %r10, %rdx + mulxq %rsi, %rdx, %r14 + addq %rax, %rdx + movq %rdx, 8(%rdi) + movq %r11, %rdx + mulxq %rsi, %rax, %r15 + movq %r8, %rdx + mulxq %rsi, %rsi, %rdx + adcq %rcx, %rsi + adcq %rbx, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r14, %rsi + adcq %rdx, %rax + adcq %r15, %rcx + movq 16(%r9), %rbx + movq %r10, %rdx + mulxq %rbx, %rdx, %r9 + addq %rsi, %rdx + movq %rdx, 16(%rdi) + movq %r11, %rdx + mulxq %rbx, %rsi, %r10 + movq %r8, %rdx + mulxq %rbx, %rbx, %rdx + adcq %rax, %rbx + adcq %rcx, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %r9, %rbx + movq %rbx, 24(%rdi) + adcq %rdx, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_sqrPre3Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre3Lbmi2: ## @mcl_fpDbl_sqrPre3Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %rcx, %rdx + mulxq %rcx, %rdx, %rax + movq %rdx, (%rdi) + movq %r10, %rdx + mulxq %rcx, %r11, %r8 + movq %rsi, %rdx + mulxq %rcx, %rdx, %r14 + addq %rdx, %rax + movq %r14, %rbx + adcq %r11, %rbx + movq %r8, %rcx + adcq $0, %rcx + addq %rdx, %rax + movq %rax, 8(%rdi) + movq %r10, %rdx + mulxq %rsi, %rax, %r9 + movq %rsi, %rdx + mulxq %rsi, %rsi, %rdx + adcq %rbx, %rsi + adcq %rax, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r14, %rsi + adcq %rdx, %rcx + adcq %r9, %rbx + addq %r11, %rsi + movq %rsi, 16(%rdi) + movq %r10, %rdx + mulxq %r10, %rsi, %rdx + adcq %rax, %rcx + adcq %rbx, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %r8, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %rdx, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_mont3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont3Lbmi2: ## @mcl_fp_mont3Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r14 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 16(%rsi), %r12 + movq (%r14), %rax + movq %r14, -16(%rsp) ## 8-byte Spill + movq %r12, %rdx + movq %r12, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r11, %rbp + movq (%rsi), %r15 + movq 8(%rsi), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + mulxq %rax, %rbx, %r8 + movq %r15, %rdx + movq %r15, -32(%rsp) ## 8-byte Spill + mulxq %rax, %r9, %rdi + addq %rbx, %rdi + adcq %r11, %r8 + adcq $0, %rbp + movq -8(%rcx), %r13 + movq %r9, %rdx + imulq %r13, %rdx + movq 8(%rcx), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulxq %rax, %r11, %r10 + movq (%rcx), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %rbx + addq %r11, %rbx + movq 16(%rcx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulxq %rax, %rcx, %rax + adcq %r10, %rcx + adcq $0, %rax + addq %r9, %rsi + adcq %rdi, %rbx + movq 8(%r14), %rdx + adcq %r8, %rcx + adcq %rbp, %rax + sbbq %r9, %r9 + andl $1, %r9d + mulxq %r12, %r11, %rdi + movq -48(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %r10, %rsi + mulxq %r15, %r8, %rbp + addq %r10, %rbp + adcq %r11, %rsi + adcq $0, %rdi + addq %rbx, %r8 + adcq %rcx, %rbp + adcq %rax, %rsi + adcq %r9, %rdi + sbbq %r11, %r11 + andl $1, %r11d + movq %r8, %rdx + imulq %r13, %rdx + movq -40(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %r9, %rcx + mulxq -56(%rsp), %r10, %rax ## 8-byte Folded Reload + mulxq -64(%rsp), %rdx, %rbx ## 8-byte Folded Reload + addq %r10, %rbx + adcq %r9, %rax + adcq $0, %rcx + addq %r8, %rdx + adcq %rbp, %rbx + adcq %rsi, %rax + adcq %rdi, %rcx + adcq $0, %r11 + movq -16(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdx + mulxq -24(%rsp), %r9, %rsi ## 8-byte Folded Reload + mulxq %r12, %r10, %r15 + mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload + addq %r10, %rdi + adcq %r9, %r15 + adcq $0, %rsi + addq %rbx, %r8 + adcq %rax, %rdi + adcq %rcx, %r15 + adcq %r11, %rsi + sbbq %rbx, %rbx + andl $1, %ebx + imulq %r8, %r13 + movq %r13, %rdx + mulxq %r14, %r9, %rbp + movq %r14, %r12 + movq -56(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %r10, %rax + movq -64(%rsp), %rcx ## 8-byte Reload + mulxq %rcx, %r11, %rdx + addq %r10, %rdx + adcq %r9, %rax + adcq $0, %rbp + addq %r8, %r11 + adcq %rdi, %rdx + adcq %r15, %rax + adcq %rsi, %rbp + adcq $0, %rbx + movq %rdx, %rsi + subq %rcx, %rsi + movq %rax, %rdi + sbbq %r14, %rdi + movq %rbp, %rcx + sbbq %r12, %rcx + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rbp, %rcx + testb %bl, %bl + cmovneq %rdx, %rsi + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rsi, (%rdx) + cmovneq %rax, %rdi + movq %rdi, 8(%rdx) + movq %rcx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF3Lbmi2: ## @mcl_fp_montNF3Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdx, %r10 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %rcx + movq 8(%rsi), %rdi + movq %rdi, -32(%rsp) ## 8-byte Spill + movq (%r10), %rax + movq %r10, -16(%rsp) ## 8-byte Spill + movq %rdi, %rdx + mulxq %rax, %rbx, %r14 + movq %rcx, %rdx + movq %rcx, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r15, %r12 + movq 16(%rsi), %r11 + addq %rbx, %r12 + movq %r11, %rdx + mulxq %rax, %rsi, %rbx + adcq %r14, %rsi + adcq $0, %rbx + movq -8(%r8), %r9 + movq (%r8), %r14 + movq %r15, %rdx + imulq %r9, %rdx + mulxq %r14, %rbp, %r13 + addq %r15, %rbp + movq 8(%r8), %r15 + mulxq %r15, %rdi, %rbp + adcq %r12, %rdi + movq 16(%r8), %r12 + mulxq %r12, %rax, %r8 + adcq %rsi, %rax + adcq $0, %rbx + addq %r13, %rdi + movq 8(%r10), %rdx + adcq %rbp, %rax + adcq %r8, %rbx + movq -32(%rsp), %r10 ## 8-byte Reload + mulxq %r10, %rsi, %r8 + mulxq %rcx, %r13, %rbp + addq %rsi, %rbp + mulxq %r11, %rcx, %rsi + adcq %r8, %rcx + adcq $0, %rsi + addq %rdi, %r13 + adcq %rax, %rbp + adcq %rbx, %rcx + adcq $0, %rsi + movq %r13, %rdx + imulq %r9, %rdx + mulxq %r14, %rdi, %rbx + addq %r13, %rdi + mulxq %r15, %rax, %rdi + adcq %rbp, %rax + mulxq %r12, %rbp, %rdx + adcq %rcx, %rbp + adcq $0, %rsi + addq %rbx, %rax + adcq %rdi, %rbp + adcq %rdx, %rsi + movq -16(%rsp), %rcx ## 8-byte Reload + movq 16(%rcx), %rdx + mulxq %r10, %rbx, %r8 + mulxq -24(%rsp), %r10, %rdi ## 8-byte Folded Reload + addq %rbx, %rdi + mulxq %r11, %rcx, %rbx + adcq %r8, %rcx + adcq $0, %rbx + addq %rax, %r10 + adcq %rbp, %rdi + adcq %rsi, %rcx + adcq $0, %rbx + imulq %r10, %r9 + movq %r9, %rdx + mulxq %r14, %rdx, %r8 + addq %r10, %rdx + movq %r9, %rdx + mulxq %r12, %rbp, %rsi + mulxq %r15, %rax, %rdx + adcq %rdi, %rax + adcq %rcx, %rbp + adcq $0, %rbx + addq %r8, %rax + adcq %rdx, %rbp + adcq %rsi, %rbx + movq %rax, %rcx + subq %r14, %rcx + movq %rbp, %rdx + sbbq %r15, %rdx + movq %rbx, %rsi + sbbq %r12, %rsi + movq %rsi, %rdi + sarq $63, %rdi + cmovsq %rax, %rcx + movq -8(%rsp), %rax ## 8-byte Reload + movq %rcx, (%rax) + cmovsq %rbp, %rdx + movq %rdx, 8(%rax) + cmovsq %rbx, %rsi + movq %rsi, 16(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed3Lbmi2: ## @mcl_fp_montRed3Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %r15 + movq (%rcx), %r9 + movq (%rsi), %rbx + movq %rbx, %rdx + imulq %r15, %rdx + movq 16(%rcx), %rax + mulxq %rax, %r14, %r11 + movq %rax, %rbp + movq %rbp, -16(%rsp) ## 8-byte Spill + movq 8(%rcx), %r10 + mulxq %r10, %rax, %r13 + mulxq %r9, %rdx, %rcx + addq %rax, %rcx + adcq %r14, %r13 + adcq $0, %r11 + movq 40(%rsi), %r14 + movq 32(%rsi), %r12 + addq %rbx, %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r13 + adcq 24(%rsi), %r11 + adcq $0, %r12 + adcq $0, %r14 + sbbq %rsi, %rsi + andl $1, %esi + movq %rcx, %rdx + imulq %r15, %rdx + mulxq %rbp, %rbp, %rdi + mulxq %r10, %r8, %rbx + mulxq %r9, %rdx, %rax + addq %r8, %rax + adcq %rbp, %rbx + adcq $0, %rdi + addq %rcx, %rdx + adcq %r13, %rax + adcq %r11, %rbx + adcq %r12, %rdi + adcq $0, %r14 + adcq $0, %rsi + imulq %rax, %r15 + movq %r15, %rdx + movq -16(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %r8, %rcx + movq %r15, %rdx + mulxq %r10, %r11, %r12 + mulxq %r9, %r15, %rdx + addq %r11, %rdx + adcq %r8, %r12 + adcq $0, %rcx + addq %rax, %r15 + adcq %rbx, %rdx + adcq %rdi, %r12 + adcq %r14, %rcx + adcq $0, %rsi + movq %rdx, %rax + subq %r9, %rax + movq %r12, %rdi + sbbq %r10, %rdi + movq %rcx, %rbp + sbbq %r13, %rbp + sbbq $0, %rsi + andl $1, %esi + cmovneq %rcx, %rbp + testb %sil, %sil + cmovneq %rdx, %rax + movq -8(%rsp), %rcx ## 8-byte Reload + movq %rax, (%rcx) + cmovneq %r12, %rdi + movq %rdi, 8(%rcx) + movq %rbp, 16(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre3Lbmi2: ## @mcl_fp_addPre3Lbmi2 +## BB#0: + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre3Lbmi2: ## @mcl_fp_subPre3Lbmi2 +## BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r8 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_3Lbmi2: ## @mcl_fp_shr1_3Lbmi2 +## BB#0: + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rdx + shrdq $1, %rdx, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rdx + movq %rdx, 8(%rdi) + shrq %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fp_add3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add3Lbmi2: ## @mcl_fp_add3Lbmi2 +## BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r8 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB44_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) +LBB44_2: ## %carry + retq + + .globl _mcl_fp_addNF3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF3Lbmi2: ## @mcl_fp_addNF3Lbmi2 +## BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %r10 + movq 8(%rdx), %r9 + addq (%rsi), %r10 + adcq 8(%rsi), %r9 + adcq 16(%rsi), %r8 + movq %r10, %rsi + subq (%rcx), %rsi + movq %r9, %rdx + sbbq 8(%rcx), %rdx + movq %r8, %rax + sbbq 16(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r10, %rsi + movq %rsi, (%rdi) + cmovsq %r9, %rdx + movq %rdx, 8(%rdi) + cmovsq %r8, %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fp_sub3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub3Lbmi2: ## @mcl_fp_sub3Lbmi2 +## BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r8 + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB46_2 +## BB#1: ## %nocarry + retq +LBB46_2: ## %carry + movq 8(%rcx), %rdx + movq 16(%rcx), %rsi + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r8, %rsi + movq %rsi, 16(%rdi) + retq + + .globl _mcl_fp_subNF3Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF3Lbmi2: ## @mcl_fp_subNF3Lbmi2 +## BB#0: + movq 16(%rsi), %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + subq (%rdx), %r8 + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r10 + movq %r10, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r10, %rsi + andq (%rcx), %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + andq 8(%rcx), %rdx + addq %r8, %rsi + movq %rsi, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fpDbl_add3Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add3Lbmi2: ## @mcl_fpDbl_add3Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r9 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %r15 + adcq %r11, %r9 + adcq %r10, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %r15, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + movq %r8, %rbx + sbbq 16(%rcx), %rbx + sbbq $0, %rax + andl $1, %eax + cmovneq %r15, %rdx + movq %rdx, 24(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 32(%rdi) + cmovneq %r8, %rbx + movq %rbx, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_sub3Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub3Lbmi2: ## @mcl_fpDbl_sub3Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rbx + sbbq 8(%rdx), %rax + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r14 + movq %rbx, (%rdi) + movq %rax, 8(%rdi) + movq %r14, 16(%rdi) + sbbq %r15, %r11 + sbbq %r12, %r9 + sbbq %r10, %r8 + movl $0, %eax + sbbq $0, %rax + andl $1, %eax + movq (%rcx), %rdx + cmoveq %rsi, %rdx + testb %al, %al + movq 16(%rcx), %rax + cmoveq %rsi, %rax + cmovneq 8(%rcx), %rsi + addq %r11, %rdx + movq %rdx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre4Lbmi2: ## @mcl_fp_mulUnitPre4Lbmi2 +## BB#0: + mulxq 24(%rsi), %r8, %r11 + mulxq 16(%rsi), %r9, %rax + mulxq 8(%rsi), %r10, %rcx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r10, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rcx + movq %rcx, 16(%rdi) + adcq %r8, %rax + movq %rax, 24(%rdi) + adcq $0, %r11 + movq %r11, 32(%rdi) + retq + + .globl _mcl_fpDbl_mulPre4Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre4Lbmi2: ## @mcl_fpDbl_mulPre4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %r14 + movq 8(%rsi), %r10 + movq (%rdx), %rcx + movq %rdx, %rbp + movq %r14, %rdx + mulxq %rcx, %rdx, %r15 + movq 24(%rsi), %r11 + movq 16(%rsi), %r9 + movq %rdx, (%rdi) + movq %r10, %rdx + mulxq %rcx, %rbx, %r12 + addq %r15, %rbx + movq %r9, %rdx + mulxq %rcx, %r13, %r15 + adcq %r12, %r13 + movq %r11, %rdx + mulxq %rcx, %rcx, %r12 + adcq %r15, %rcx + adcq $0, %r12 + movq 8(%rbp), %rax + movq %r14, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + addq %rbx, %r8 + movq %r10, %rdx + mulxq %rax, %r15, %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + adcq %r13, %r15 + movq %r9, %rdx + mulxq %rax, %rbx, %r13 + adcq %rcx, %rbx + movq %r11, %rdx + mulxq %rax, %rcx, %rax + adcq %r12, %rcx + sbbq %r12, %r12 + andl $1, %r12d + addq -8(%rsp), %r15 ## 8-byte Folded Reload + adcq -16(%rsp), %rbx ## 8-byte Folded Reload + adcq %r13, %rcx + movq %r8, 8(%rdi) + adcq %rax, %r12 + movq %rbp, %r13 + movq 16(%r13), %rax + movq %r14, %rdx + mulxq %rax, %rdx, %r8 + addq %r15, %rdx + movq %rdx, 16(%rdi) + movq %r10, %rdx + mulxq %rax, %rbp, %r10 + adcq %rbx, %rbp + movq %r11, %rdx + mulxq %rax, %r14, %r11 + movq %r9, %rdx + mulxq %rax, %r15, %rdx + adcq %rcx, %r15 + adcq %r12, %r14 + sbbq %rcx, %rcx + andl $1, %ecx + addq %r8, %rbp + adcq %r10, %r15 + adcq %rdx, %r14 + adcq %r11, %rcx + movq 24(%r13), %rdx + mulxq 24(%rsi), %rbx, %r8 + mulxq (%rsi), %rax, %r9 + addq %rbp, %rax + movq %rax, 24(%rdi) + mulxq 16(%rsi), %rbp, %rax + mulxq 8(%rsi), %rsi, %rdx + adcq %r15, %rsi + adcq %r14, %rbp + adcq %rcx, %rbx + sbbq %rcx, %rcx + andl $1, %ecx + addq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %rdx, %rbp + movq %rbp, 40(%rdi) + adcq %rax, %rbx + movq %rbx, 48(%rdi) + adcq %r8, %rcx + movq %rcx, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre4Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre4Lbmi2: ## @mcl_fpDbl_sqrPre4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rcx + movq 8(%rsi), %rax + movq %rcx, %rdx + mulxq %rcx, %rdx, %r11 + movq %rdx, (%rdi) + movq %r9, %rdx + mulxq %rcx, %rbp, %r10 + movq %rbp, -16(%rsp) ## 8-byte Spill + movq %r10, -8(%rsp) ## 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r12, %r15 + addq %r12, %r11 + movq %r15, %rbx + adcq %rbp, %rbx + movq %r8, %rdx + mulxq %rcx, %rcx, %r13 + adcq %r10, %rcx + adcq $0, %r13 + addq %r12, %r11 + movq %rax, %rdx + mulxq %rax, %rbp, %r12 + adcq %rbx, %rbp + movq %r8, %rdx + mulxq %rax, %r10, %rbx + movq %r9, %rdx + mulxq %rax, %r14, %rdx + adcq %r14, %rcx + adcq %r13, %r10 + sbbq %rax, %rax + andl $1, %eax + addq %r15, %rbp + adcq %r12, %rcx + adcq %rdx, %r10 + movq %rdx, %r12 + adcq %rbx, %rax + movq %r11, 8(%rdi) + addq -16(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 16(%rdi) + movq %r8, %rdx + mulxq %r9, %r11, %r8 + movq %r9, %rdx + mulxq %r9, %r15, %rdx + adcq %r14, %rcx + adcq %r10, %r15 + adcq %rax, %r11 + sbbq %rax, %rax + andl $1, %eax + addq -8(%rsp), %rcx ## 8-byte Folded Reload + adcq %r12, %r15 + adcq %rdx, %r11 + adcq %r8, %rax + movq 24(%rsi), %rdx + mulxq 16(%rsi), %rbx, %r8 + mulxq 8(%rsi), %rbp, %r9 + mulxq (%rsi), %rsi, %r10 + addq %rcx, %rsi + movq %rsi, 24(%rdi) + adcq %r15, %rbp + adcq %r11, %rbx + mulxq %rdx, %rdx, %rcx + adcq %rax, %rdx + sbbq %rax, %rax + andl $1, %eax + addq %r10, %rbp + movq %rbp, 32(%rdi) + adcq %r9, %rbx + movq %rbx, 40(%rdi) + adcq %r8, %rdx + movq %rdx, 48(%rdi) + adcq %rcx, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont4Lbmi2: ## @mcl_fp_mont4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r13 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 24(%rsi), %rdi + movq %rdi, -32(%rsp) ## 8-byte Spill + movq (%r13), %rax + movq %r13, -16(%rsp) ## 8-byte Spill + movq %rdi, %rdx + mulxq %rax, %rdi, %r11 + movq 16(%rsi), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + mulxq %rax, %rbx, %r10 + movq (%rsi), %rbp + movq %rbp, -48(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %r12 + movq %rbp, %rdx + mulxq %rax, %r14, %r8 + addq %rsi, %r8 + adcq %rbx, %r12 + adcq %rdi, %r10 + adcq $0, %r11 + movq -8(%rcx), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r14, %rdx + imulq %rax, %rdx + movq 24(%rcx), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq %rax, %r15, %rax + movq 16(%rcx), %rsi + movq %rsi, -80(%rsp) ## 8-byte Spill + mulxq %rsi, %r9, %rsi + movq (%rcx), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -72(%rsp) ## 8-byte Spill + mulxq %rcx, %rdi, %rcx + mulxq %rbp, %rdx, %rbx + addq %rdi, %rbx + adcq %r9, %rcx + adcq %r15, %rsi + adcq $0, %rax + addq %r14, %rdx + adcq %r8, %rbx + adcq %r12, %rcx + adcq %r10, %rsi + adcq %r11, %rax + sbbq %rdi, %rdi + andl $1, %edi + movq 8(%r13), %rdx + mulxq -32(%rsp), %r12, %r10 ## 8-byte Folded Reload + mulxq -40(%rsp), %r15, %r11 ## 8-byte Folded Reload + mulxq -56(%rsp), %r14, %rbp ## 8-byte Folded Reload + mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload + addq %r14, %r9 + adcq %r15, %rbp + adcq %r12, %r11 + adcq $0, %r10 + addq %rbx, %r8 + adcq %rcx, %r9 + adcq %rsi, %rbp + adcq %rax, %r11 + adcq %rdi, %r10 + sbbq %rbx, %rbx + andl $1, %ebx + movq %r8, %rdx + imulq -88(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %r14, %rcx ## 8-byte Folded Reload + mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload + mulxq -72(%rsp), %r12, %rax ## 8-byte Folded Reload + movq -24(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %rdx, %rdi + addq %r12, %rdi + adcq %r15, %rax + adcq %r14, %rsi + adcq $0, %rcx + addq %r8, %rdx + adcq %r9, %rdi + adcq %rbp, %rax + adcq %r11, %rsi + adcq %r10, %rcx + adcq $0, %rbx + movq -16(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdx + mulxq -32(%rsp), %r14, %r11 ## 8-byte Folded Reload + mulxq -40(%rsp), %r15, %rbp ## 8-byte Folded Reload + mulxq -56(%rsp), %r12, %r8 ## 8-byte Folded Reload + mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload + addq %r12, %r10 + adcq %r15, %r8 + adcq %r14, %rbp + adcq $0, %r11 + addq %rdi, %r9 + adcq %rax, %r10 + adcq %rsi, %r8 + adcq %rcx, %rbp + adcq %rbx, %r11 + sbbq %rax, %rax + movq %r9, %rdx + imulq -88(%rsp), %rdx ## 8-byte Folded Reload + mulxq -72(%rsp), %rcx, %rsi ## 8-byte Folded Reload + mulxq %r13, %r14, %rdi + addq %rcx, %rdi + mulxq -80(%rsp), %rcx, %r15 ## 8-byte Folded Reload + adcq %rsi, %rcx + movq -64(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %rbx, %rsi + adcq %r15, %rbx + adcq $0, %rsi + andl $1, %eax + addq %r9, %r14 + adcq %r10, %rdi + adcq %r8, %rcx + adcq %rbp, %rbx + adcq %r11, %rsi + adcq $0, %rax + movq -16(%rsp), %rdx ## 8-byte Reload + movq 24(%rdx), %rdx + mulxq -32(%rsp), %r11, %r8 ## 8-byte Folded Reload + mulxq -40(%rsp), %r15, %r9 ## 8-byte Folded Reload + mulxq -56(%rsp), %r12, %r14 ## 8-byte Folded Reload + mulxq -48(%rsp), %r10, %rbp ## 8-byte Folded Reload + addq %r12, %rbp + adcq %r15, %r14 + adcq %r11, %r9 + adcq $0, %r8 + addq %rdi, %r10 + adcq %rcx, %rbp + adcq %rbx, %r14 + adcq %rsi, %r9 + adcq %rax, %r8 + sbbq %rax, %rax + andl $1, %eax + movq -88(%rsp), %rdx ## 8-byte Reload + imulq %r10, %rdx + mulxq %r13, %rcx, %rdi + movq %rcx, -88(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload + movq -72(%rsp), %rbx ## 8-byte Reload + mulxq %rbx, %r12, %rcx + movq -24(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %rdx, %r13 + addq %r12, %r13 + adcq %r15, %rcx + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + adcq $0, %rdi + addq %r10, %rdx + adcq %rbp, %r13 + adcq %r14, %rcx + adcq %r9, %rsi + adcq %r8, %rdi + adcq $0, %rax + movq %r13, %rdx + subq %r11, %rdx + movq %rcx, %rbp + sbbq %rbx, %rbp + movq %rsi, %r8 + sbbq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %rdi, %rbx + sbbq -64(%rsp), %rbx ## 8-byte Folded Reload + sbbq $0, %rax + andl $1, %eax + cmovneq %rdi, %rbx + testb %al, %al + cmovneq %r13, %rdx + movq -8(%rsp), %rax ## 8-byte Reload + movq %rdx, (%rax) + cmovneq %rcx, %rbp + movq %rbp, 8(%rax) + cmovneq %rsi, %r8 + movq %r8, 16(%rax) + movq %rbx, 24(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF4Lbmi2: ## @mcl_fp_montNF4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %rdi + movq %rdi, -56(%rsp) ## 8-byte Spill + movq 8(%rsi), %rbp + movq %rbp, -64(%rsp) ## 8-byte Spill + movq (%rdx), %rax + movq %rdx, %r15 + movq %r15, -24(%rsp) ## 8-byte Spill + movq %rbp, %rdx + mulxq %rax, %rbp, %r9 + movq %rdi, %rdx + mulxq %rax, %r12, %rbx + movq 16(%rsi), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + addq %rbp, %rbx + mulxq %rax, %r14, %rbp + adcq %r9, %r14 + movq 24(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %rdi + adcq %rbp, %r8 + adcq $0, %rdi + movq -8(%rcx), %r13 + movq (%rcx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq %r12, %rdx + imulq %r13, %rdx + mulxq %rax, %rax, %r11 + addq %r12, %rax + movq 8(%rcx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulxq %rax, %rbp, %r10 + adcq %rbx, %rbp + movq 16(%rcx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %rbx + adcq %r14, %rsi + movq 24(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %rax, %rcx, %rdx + adcq %r8, %rcx + adcq $0, %rdi + addq %r11, %rbp + adcq %r10, %rsi + adcq %rbx, %rcx + adcq %rdx, %rdi + movq 8(%r15), %rdx + movq -64(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %rbx, %r9 + movq -56(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %r10, %r11 + addq %rbx, %r11 + mulxq -40(%rsp), %rax, %r8 ## 8-byte Folded Reload + adcq %r9, %rax + mulxq -80(%rsp), %r9, %rbx ## 8-byte Folded Reload + adcq %r8, %r9 + adcq $0, %rbx + addq %rbp, %r10 + adcq %rsi, %r11 + adcq %rcx, %rax + adcq %rdi, %r9 + adcq $0, %rbx + movq %r10, %rdx + imulq %r13, %rdx + movq -48(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %rcx, %r8 + addq %r10, %rcx + mulxq -16(%rsp), %r10, %rdi ## 8-byte Folded Reload + adcq %r11, %r10 + mulxq -32(%rsp), %rcx, %rsi ## 8-byte Folded Reload + adcq %rax, %rcx + mulxq -72(%rsp), %rax, %rdx ## 8-byte Folded Reload + adcq %r9, %rax + adcq $0, %rbx + addq %r8, %r10 + adcq %rdi, %rcx + adcq %rsi, %rax + adcq %rdx, %rbx + movq -24(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdx + mulxq %r12, %rsi, %r8 + mulxq %r15, %r11, %rbp + addq %rsi, %rbp + movq -40(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %rdi, %r9 + adcq %r8, %rdi + mulxq -80(%rsp), %r8, %rsi ## 8-byte Folded Reload + adcq %r9, %r8 + adcq $0, %rsi + addq %r10, %r11 + adcq %rcx, %rbp + adcq %rax, %rdi + adcq %rbx, %r8 + adcq $0, %rsi + movq %r11, %rdx + imulq %r13, %rdx + mulxq %r14, %rax, %r10 + addq %r11, %rax + movq -16(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %r9, %rbx + adcq %rbp, %r9 + movq -32(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %rax, %rbp + adcq %rdi, %rax + mulxq -72(%rsp), %rcx, %rdx ## 8-byte Folded Reload + adcq %r8, %rcx + adcq $0, %rsi + addq %r10, %r9 + adcq %rbx, %rax + adcq %rbp, %rcx + adcq %rdx, %rsi + movq -24(%rsp), %rdx ## 8-byte Reload + movq 24(%rdx), %rdx + mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload + mulxq -56(%rsp), %r11, %rbp ## 8-byte Folded Reload + addq %rbx, %rbp + mulxq %r12, %rdi, %r10 + adcq %r8, %rdi + mulxq -80(%rsp), %r8, %rbx ## 8-byte Folded Reload + adcq %r10, %r8 + adcq $0, %rbx + addq %r9, %r11 + adcq %rax, %rbp + adcq %rcx, %rdi + adcq %rsi, %r8 + adcq $0, %rbx + imulq %r11, %r13 + movq %r13, %rdx + movq -48(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %rcx, %r9 + addq %r11, %rcx + mulxq %r14, %r11, %r10 + adcq %rbp, %r11 + movq %r15, %rsi + mulxq %rsi, %rax, %rcx + adcq %rdi, %rax + movq -72(%rsp), %rbp ## 8-byte Reload + mulxq %rbp, %r15, %rdx + adcq %r8, %r15 + adcq $0, %rbx + addq %r9, %r11 + adcq %r10, %rax + adcq %rcx, %r15 + adcq %rdx, %rbx + movq %r11, %rcx + subq %r12, %rcx + movq %rax, %rdx + sbbq %r14, %rdx + movq %r15, %rdi + sbbq %rsi, %rdi + movq %rbx, %rsi + sbbq %rbp, %rsi + cmovsq %r11, %rcx + movq -8(%rsp), %rbp ## 8-byte Reload + movq %rcx, (%rbp) + cmovsq %rax, %rdx + movq %rdx, 8(%rbp) + cmovsq %r15, %rdi + movq %rdi, 16(%rbp) + cmovsq %rbx, %rsi + movq %rsi, 24(%rbp) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed4Lbmi2: ## @mcl_fp_montRed4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %r13 + movq (%rcx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq (%rsi), %r10 + movq %r10, %rdx + imulq %r13, %rdx + movq 24(%rcx), %rdi + mulxq %rdi, %r9, %r15 + movq %rdi, %r14 + movq %r14, -40(%rsp) ## 8-byte Spill + movq 16(%rcx), %rdi + movq %rdi, -48(%rsp) ## 8-byte Spill + mulxq %rdi, %rdi, %rbx + movq 8(%rcx), %rcx + movq %rcx, -56(%rsp) ## 8-byte Spill + mulxq %rcx, %rcx, %r8 + mulxq %rax, %rdx, %rbp + addq %rcx, %rbp + adcq %rdi, %r8 + adcq %r9, %rbx + adcq $0, %r15 + movq 56(%rsi), %r11 + movq 48(%rsi), %rcx + addq %r10, %rdx + movq 40(%rsi), %r12 + adcq 8(%rsi), %rbp + adcq 16(%rsi), %r8 + adcq 24(%rsi), %rbx + adcq 32(%rsi), %r15 + adcq $0, %r12 + adcq $0, %rcx + movq %rcx, -64(%rsp) ## 8-byte Spill + adcq $0, %r11 + sbbq %rsi, %rsi + andl $1, %esi + movq %rbp, %rdx + imulq %r13, %rdx + mulxq %r14, %rax, %r9 + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq -48(%rsp), %r14, %rdi ## 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rcx ## 8-byte Folded Reload + mulxq -32(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %r10, %rax + adcq %r14, %rcx + adcq -72(%rsp), %rdi ## 8-byte Folded Reload + adcq $0, %r9 + addq %rbp, %rdx + adcq %r8, %rax + adcq %rbx, %rcx + adcq %r15, %rdi + adcq %r12, %r9 + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, %r11 + movq %r11, -72(%rsp) ## 8-byte Spill + adcq $0, %rsi + movq %rax, %rdx + imulq %r13, %rdx + movq -40(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %rbp, %r8 + movq %rbp, -16(%rsp) ## 8-byte Spill + movq -48(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %rbx, %r10 + movq %rbx, -24(%rsp) ## 8-byte Spill + mulxq -56(%rsp), %r12, %rbp ## 8-byte Folded Reload + movq -32(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %rdx, %rbx + addq %r12, %rbx + adcq -24(%rsp), %rbp ## 8-byte Folded Reload + adcq -16(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r8 + addq %rax, %rdx + adcq %rcx, %rbx + adcq %rdi, %rbp + adcq %r9, %r10 + adcq -64(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, -72(%rsp) ## 8-byte Folded Spill + adcq $0, %rsi + imulq %rbx, %r13 + movq %r13, %rdx + mulxq %r15, %rax, %rdi + movq %rax, -64(%rsp) ## 8-byte Spill + movq %r13, %rdx + mulxq %r11, %r9, %rax + movq -56(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %r12, %rcx + mulxq %r14, %r15, %r13 + addq %r12, %r13 + adcq %r9, %rcx + adcq -64(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %rdi + addq %rbx, %r15 + adcq %rbp, %r13 + adcq %r10, %rcx + adcq %r8, %rax + adcq -72(%rsp), %rdi ## 8-byte Folded Reload + adcq $0, %rsi + movq %r13, %rdx + subq %r14, %rdx + movq %rcx, %rbp + sbbq %r11, %rbp + movq %rax, %r8 + sbbq -48(%rsp), %r8 ## 8-byte Folded Reload + movq %rdi, %rbx + sbbq -40(%rsp), %rbx ## 8-byte Folded Reload + sbbq $0, %rsi + andl $1, %esi + cmovneq %rdi, %rbx + testb %sil, %sil + cmovneq %r13, %rdx + movq -8(%rsp), %rsi ## 8-byte Reload + movq %rdx, (%rsi) + cmovneq %rcx, %rbp + movq %rbp, 8(%rsi) + cmovneq %rax, %r8 + movq %r8, 16(%rsi) + movq %rbx, 24(%rsi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre4Lbmi2: ## @mcl_fp_addPre4Lbmi2 +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre4Lbmi2: ## @mcl_fp_subPre4Lbmi2 +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r8, %r9 + movq %r9, 24(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_4Lbmi2: ## @mcl_fp_shr1_4Lbmi2 +## BB#0: + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrq %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fp_add4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add4Lbmi2: ## @mcl_fp_add4Lbmi2 +## BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r9 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + adcq %r10, %r8 + movq %r8, 24(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r9 + sbbq 24(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB59_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + movq %r8, 24(%rdi) +LBB59_2: ## %carry + retq + + .globl _mcl_fp_addNF4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF4Lbmi2: ## @mcl_fp_addNF4Lbmi2 +## BB#0: + pushq %rbx + movq 24(%rdx), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %r11 + movq 8(%rdx), %r10 + addq (%rsi), %r11 + adcq 8(%rsi), %r10 + adcq 16(%rsi), %r9 + adcq 24(%rsi), %r8 + movq %r11, %rsi + subq (%rcx), %rsi + movq %r10, %rdx + sbbq 8(%rcx), %rdx + movq %r9, %rax + sbbq 16(%rcx), %rax + movq %r8, %rbx + sbbq 24(%rcx), %rbx + testq %rbx, %rbx + cmovsq %r11, %rsi + movq %rsi, (%rdi) + cmovsq %r10, %rdx + movq %rdx, 8(%rdi) + cmovsq %r9, %rax + movq %rax, 16(%rdi) + cmovsq %r8, %rbx + movq %rbx, 24(%rdi) + popq %rbx + retq + + .globl _mcl_fp_sub4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub4Lbmi2: ## @mcl_fp_sub4Lbmi2 +## BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r9 + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r9, 16(%rdi) + sbbq %r10, %r8 + movq %r8, 24(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB61_2 +## BB#1: ## %nocarry + retq +LBB61_2: ## %carry + movq 24(%rcx), %r10 + movq 8(%rcx), %rsi + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r10 + movq %r10, 24(%rdi) + retq + + .globl _mcl_fp_subNF4Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF4Lbmi2: ## @mcl_fp_subNF4Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r8 + movdqu (%rsi), %xmm2 + movdqu 16(%rsi), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r15 + movd %xmm1, %r9 + movd %xmm3, %r11 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %r10 + pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] + movd %xmm1, %r14 + movd %xmm0, %rdx + movd %xmm2, %r12 + subq %rdx, %r12 + sbbq %r10, %r14 + sbbq %r9, %r11 + sbbq %r8, %r15 + movq %r15, %rdx + sarq $63, %rdx + movq 24(%rcx), %rsi + andq %rdx, %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + movq 8(%rcx), %rbx + andq %rdx, %rbx + andq (%rcx), %rdx + addq %r12, %rdx + movq %rdx, (%rdi) + adcq %r14, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rax + movq %rax, 16(%rdi) + adcq %r15, %rsi + movq %rsi, 24(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_add4Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add4Lbmi2: ## @mcl_fpDbl_add4Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 48(%rsi), %r12 + movq 40(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rdx), %r15 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rbp + movq 32(%rsi), %rsi + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r15, %rbp + movq %rbp, 24(%rdi) + adcq %r14, %rsi + adcq %r11, %r13 + adcq %r10, %r12 + adcq %r9, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %rsi, %rdx + subq (%rcx), %rdx + movq %r13, %rbp + sbbq 8(%rcx), %rbp + movq %r12, %rbx + sbbq 16(%rcx), %rbx + movq %r8, %r9 + sbbq 24(%rcx), %r9 + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + movq %rdx, 32(%rdi) + testb %al, %al + cmovneq %r13, %rbp + movq %rbp, 40(%rdi) + cmovneq %r12, %rbx + movq %rbx, 48(%rdi) + cmovneq %r8, %r9 + movq %r9, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub4Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub4Lbmi2: ## @mcl_fpDbl_sub4Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 24(%rdx), %r11 + movq (%rsi), %rbx + xorl %eax, %eax + subq (%rdx), %rbx + movq %rbx, (%rdi) + movq 8(%rsi), %rbx + sbbq 8(%rdx), %rbx + movq %rbx, 8(%rdi) + movq 16(%rsi), %rbx + sbbq 16(%rdx), %rbx + movq %rbx, 16(%rdi) + movq 24(%rsi), %rbx + sbbq %r11, %rbx + movq 40(%rdx), %r11 + movq 32(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 32(%rsi), %r12 + sbbq %rdx, %r12 + movq 48(%rsi), %r14 + movq 40(%rsi), %r15 + sbbq %r11, %r15 + sbbq %r10, %r14 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 24(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 8(%rcx), %rax + addq %r12, %rsi + movq %rsi, 32(%rdi) + adcq %r15, %rax + movq %rax, 40(%rdi) + adcq %r14, %rdx + movq %rdx, 48(%rdi) + adcq %r8, %rbx + movq %rbx, 56(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre5Lbmi2: ## @mcl_fp_mulUnitPre5Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + mulxq 32(%rsi), %r8, %r11 + mulxq 24(%rsi), %r9, %rax + mulxq 16(%rsi), %r10, %rcx + mulxq 8(%rsi), %r14, %rbx + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r14, %rsi + movq %rsi, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %rcx + movq %rcx, 24(%rdi) + adcq %r8, %rax + movq %rax, 32(%rdi) + adcq $0, %r11 + movq %r11, 40(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mulPre5Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre5Lbmi2: ## @mcl_fpDbl_mulPre5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rdi, -40(%rsp) ## 8-byte Spill + movq (%rsi), %r11 + movq 8(%rsi), %r10 + movq (%rdx), %rcx + movq %r10, %rdx + mulxq %rcx, %rax, %r14 + movq %r11, %rdx + mulxq %rcx, %rdx, %rbx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 24(%rsi), %rbp + movq %rbp, -48(%rsp) ## 8-byte Spill + movq 16(%rsi), %r15 + addq %rax, %rbx + movq %r15, %rdx + mulxq %rcx, %rax, %r13 + adcq %r14, %rax + movq %rbp, %rdx + mulxq %rcx, %r8, %r12 + adcq %r13, %r8 + movq 32(%rsi), %r14 + movq %r14, %rdx + mulxq %rcx, %r9, %r13 + adcq %r12, %r9 + movq -56(%rsp), %rcx ## 8-byte Reload + movq %rcx, (%rdi) + adcq $0, %r13 + movq -24(%rsp), %rdi ## 8-byte Reload + movq 8(%rdi), %rbp + movq %r11, %rdx + mulxq %rbp, %r12, %r11 + addq %rbx, %r12 + movq %r10, %rdx + mulxq %rbp, %rbx, %rcx + movq %rcx, -56(%rsp) ## 8-byte Spill + adcq %rax, %rbx + movq %r15, %rdx + mulxq %rbp, %rcx, %r10 + adcq %r8, %rcx + movq -48(%rsp), %rdx ## 8-byte Reload + mulxq %rbp, %rax, %r8 + adcq %r9, %rax + movq %r14, %rdx + mulxq %rbp, %r15, %rdx + adcq %r13, %r15 + sbbq %r14, %r14 + andl $1, %r14d + addq %r11, %rbx + movq -40(%rsp), %rbp ## 8-byte Reload + movq %r12, 8(%rbp) + adcq -56(%rsp), %rcx ## 8-byte Folded Reload + adcq %r10, %rax + adcq %r8, %r15 + adcq %rdx, %r14 + movq (%rsi), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -48(%rsp) ## 8-byte Spill + movq 16(%rdi), %rbp + mulxq %rbp, %r12, %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + addq %rbx, %r12 + movq %r8, %rdx + mulxq %rbp, %rbx, %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + adcq %rcx, %rbx + movq 16(%rsi), %r11 + movq %r11, %rdx + mulxq %rbp, %rcx, %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + adcq %rax, %rcx + movq 24(%rsi), %r13 + movq %r13, %rdx + mulxq %rbp, %r9, %r10 + adcq %r15, %r9 + movq 32(%rsi), %r15 + movq %r15, %rdx + mulxq %rbp, %r8, %rdx + adcq %r14, %r8 + sbbq %r14, %r14 + andl $1, %r14d + addq -8(%rsp), %rbx ## 8-byte Folded Reload + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + adcq -32(%rsp), %r9 ## 8-byte Folded Reload + adcq %r10, %r8 + adcq %rdx, %r14 + movq -40(%rsp), %r10 ## 8-byte Reload + movq %r12, 16(%r10) + movq %rdi, %rbp + movq 24(%rbp), %rax + movq -56(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r12, %rdi + addq %rbx, %r12 + movq -48(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rbx, %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + adcq %rcx, %rbx + movq %r11, %rdx + mulxq %rax, %rcx, %r11 + adcq %r9, %rcx + movq %r13, %rdx + mulxq %rax, %r13, %r9 + adcq %r8, %r13 + movq %r15, %rdx + mulxq %rax, %r8, %rdx + adcq %r14, %r8 + sbbq %r14, %r14 + andl $1, %r14d + addq %rdi, %rbx + movq %r12, 24(%r10) + movq %r10, %rdi + adcq -48(%rsp), %rcx ## 8-byte Folded Reload + adcq %r11, %r13 + adcq %r9, %r8 + adcq %rdx, %r14 + movq 32(%rbp), %rdx + mulxq 8(%rsi), %rax, %r9 + mulxq (%rsi), %rbp, %r10 + addq %rbx, %rbp + adcq %rcx, %rax + mulxq 16(%rsi), %rbx, %r11 + adcq %r13, %rbx + movq %rbp, 32(%rdi) + mulxq 32(%rsi), %rcx, %r15 + mulxq 24(%rsi), %rsi, %rdx + adcq %r8, %rsi + adcq %r14, %rcx + sbbq %rbp, %rbp + andl $1, %ebp + addq %r10, %rax + movq %rax, 40(%rdi) + adcq %r9, %rbx + movq %rbx, 48(%rdi) + adcq %r11, %rsi + movq %rsi, 56(%rdi) + adcq %rdx, %rcx + movq %rcx, 64(%rdi) + adcq %r15, %rbp + movq %rbp, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre5Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre5Lbmi2: ## @mcl_fpDbl_sqrPre5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rcx + movq %r11, %rdx + mulxq %rax, %rbx, %r15 + movq 32(%rsi), %r9 + movq 24(%rsi), %r13 + movq %rcx, %rdx + mulxq %rax, %r12, %rbp + movq %rbp, -16(%rsp) ## 8-byte Spill + movq %rax, %rdx + mulxq %rax, %rdx, %r14 + movq %rdx, -24(%rsp) ## 8-byte Spill + addq %r12, %r14 + adcq %rbp, %rbx + movq %r13, %rdx + mulxq %rax, %r8, %r10 + adcq %r15, %r8 + movq %r9, %rdx + movq %r9, -8(%rsp) ## 8-byte Spill + mulxq %rax, %rbp, %r15 + adcq %r10, %rbp + movq -24(%rsp), %rax ## 8-byte Reload + movq %rax, (%rdi) + adcq $0, %r15 + addq %r12, %r14 + movq %rcx, %rdx + mulxq %rcx, %rax, %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + adcq %rbx, %rax + movq %r11, %rdx + mulxq %rcx, %rbx, %r10 + adcq %r8, %rbx + movq %r13, %rdx + mulxq %rcx, %r13, %r8 + adcq %rbp, %r13 + movq %r9, %rdx + mulxq %rcx, %r12, %rcx + adcq %r15, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -16(%rsp), %rax ## 8-byte Folded Reload + movq %r14, 8(%rdi) + adcq -24(%rsp), %rbx ## 8-byte Folded Reload + adcq %r10, %r13 + adcq %r8, %r12 + adcq %rcx, %r15 + movq (%rsi), %r9 + movq 8(%rsi), %r10 + movq %r9, %rdx + mulxq %r11, %rbp, %rcx + movq %rcx, -16(%rsp) ## 8-byte Spill + addq %rax, %rbp + movq %r10, %rdx + mulxq %r11, %rax, %r8 + adcq %rbx, %rax + movq %r11, %rdx + mulxq %r11, %r14, %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + adcq %r13, %r14 + movq 24(%rsi), %rcx + movq %rcx, %rdx + mulxq %r11, %rbx, %r13 + adcq %r12, %rbx + movq -8(%rsp), %rdx ## 8-byte Reload + mulxq %r11, %r12, %rdx + adcq %r15, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -16(%rsp), %rax ## 8-byte Folded Reload + adcq %r8, %r14 + movq %rbp, 16(%rdi) + adcq -24(%rsp), %rbx ## 8-byte Folded Reload + adcq %r13, %r12 + adcq %rdx, %r15 + movq %r10, %rdx + mulxq %rcx, %r10, %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %r9, %rdx + mulxq %rcx, %r13, %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + addq %rax, %r13 + movq 16(%rsi), %r8 + movq 32(%rsi), %rax + adcq %r14, %r10 + movq %r8, %rdx + mulxq %rcx, %r9, %r14 + adcq %rbx, %r9 + movq %rcx, %rdx + mulxq %rcx, %r11, %rbp + adcq %r12, %r11 + movq %rax, %rdx + mulxq %rcx, %r12, %rdx + adcq %r15, %r12 + sbbq %rbx, %rbx + andl $1, %ebx + addq -16(%rsp), %r10 ## 8-byte Folded Reload + movq %r13, 24(%rdi) + adcq -8(%rsp), %r9 ## 8-byte Folded Reload + adcq %r14, %r11 + adcq %rbp, %r12 + adcq %rdx, %rbx + movq %rax, %rdx + mulxq 24(%rsi), %rbp, %r14 + mulxq (%rsi), %rdx, %r15 + addq %r10, %rdx + movq %rdx, 32(%rdi) + movq %rax, %rdx + mulxq 8(%rsi), %rsi, %r10 + adcq %r9, %rsi + movq %r8, %rdx + mulxq %rax, %rcx, %r8 + adcq %r11, %rcx + adcq %r12, %rbp + movq %rax, %rdx + mulxq %rax, %rdx, %rax + adcq %rbx, %rdx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r15, %rsi + movq %rsi, 40(%rdi) + adcq %r10, %rcx + movq %rcx, 48(%rdi) + adcq %r8, %rbp + movq %rbp, 56(%rdi) + adcq %r14, %rdx + movq %rdx, 64(%rdi) + adcq %rax, %rbx + movq %rbx, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont5Lbmi2: ## @mcl_fp_mont5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 32(%rsi), %rdi + movq %rdi, -104(%rsp) ## 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %r10, %rbx + movq 24(%rsi), %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r12, %r14 + movq 16(%rsi), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + mulxq %rax, %r13, %r11 + movq (%rsi), %rbp + movq %rbp, -40(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + mulxq %rax, %rdi, %r9 + movq %rbp, %rdx + mulxq %rax, %r15, %r8 + addq %rdi, %r8 + adcq %r13, %r9 + adcq %r12, %r11 + adcq %r10, %r14 + adcq $0, %rbx + movq %rbx, -112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq %r15, %rdx + imulq %rax, %rdx + movq 32(%rcx), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %r12 + movq %rax, -120(%rsp) ## 8-byte Spill + movq 24(%rcx), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq %rax, %r13, %r10 + movq 8(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %rax, %rdi, %rbp + movq (%rcx), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %rbx + addq %rdi, %rbx + movq 16(%rcx), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + mulxq %rax, %rdi, %rcx + adcq %rbp, %rdi + adcq %r13, %rcx + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r12 + addq %r15, %rsi + adcq %r8, %rbx + adcq %r9, %rdi + adcq %r11, %rcx + adcq %r14, %r10 + adcq -112(%rsp), %r12 ## 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq -96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + mulxq -104(%rsp), %rax, %r14 ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rax, %r15 ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload + mulxq -48(%rsp), %r8, %rsi ## 8-byte Folded Reload + mulxq -40(%rsp), %r11, %rax ## 8-byte Folded Reload + addq %r8, %rax + adcq %r13, %rsi + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq -112(%rsp), %r15 ## 8-byte Folded Reload + adcq $0, %r14 + addq %rbx, %r11 + adcq %rdi, %rax + adcq %rcx, %rsi + adcq %r10, %r9 + adcq %r12, %r15 + adcq %rbp, %r14 + sbbq %r12, %r12 + andl $1, %r12d + movq %r11, %rdx + imulq -16(%rsp), %rdx ## 8-byte Folded Reload + mulxq -56(%rsp), %rcx, %r10 ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq -88(%rsp), %r13, %rcx ## 8-byte Folded Reload + mulxq -72(%rsp), %r8, %rbx ## 8-byte Folded Reload + mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload + addq %r8, %rbp + adcq %r13, %rbx + adcq -120(%rsp), %rcx ## 8-byte Folded Reload + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + adcq $0, %r10 + addq %r11, %rdx + adcq %rax, %rbp + adcq %rsi, %rbx + adcq %r9, %rcx + adcq %r15, %rdi + adcq %r14, %r10 + adcq $0, %r12 + movq -96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + mulxq -104(%rsp), %rax, %r15 ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rax, %r11 ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload + mulxq -48(%rsp), %rsi, %r8 ## 8-byte Folded Reload + mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload + addq %rsi, %rax + adcq %r13, %r8 + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r15 + addq %rbp, %r14 + adcq %rbx, %rax + adcq %rcx, %r8 + adcq %rdi, %r9 + adcq %r10, %r11 + adcq %r12, %r15 + sbbq %r13, %r13 + andl $1, %r13d + movq %r14, %rdx + imulq -16(%rsp), %rdx ## 8-byte Folded Reload + mulxq -56(%rsp), %rcx, %r12 ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %rcx, %r10 ## 8-byte Folded Reload + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq -88(%rsp), %rdi, %rsi ## 8-byte Folded Reload + mulxq -72(%rsp), %rcx, %rbx ## 8-byte Folded Reload + mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload + addq %rcx, %rbp + adcq %rdi, %rbx + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq -112(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r12 + addq %r14, %rdx + adcq %rax, %rbp + adcq %r8, %rbx + adcq %r9, %rsi + adcq %r11, %r10 + adcq %r15, %r12 + adcq $0, %r13 + movq -96(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, -120(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %r11, %r14 ## 8-byte Folded Reload + mulxq -32(%rsp), %r8, %r9 ## 8-byte Folded Reload + mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload + mulxq -40(%rsp), %r15, %rcx ## 8-byte Folded Reload + addq %rax, %rcx + adcq %r8, %rdi + adcq %r11, %r9 + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbp, %r15 + adcq %rbx, %rcx + adcq %rsi, %rdi + adcq %r10, %r9 + adcq %r12, %r14 + adcq %r13, %rax + movq %rax, -112(%rsp) ## 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq %r15, %rdx + imulq -16(%rsp), %rdx ## 8-byte Folded Reload + mulxq -56(%rsp), %rax, %rbp ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %r13, %r10 ## 8-byte Folded Reload + mulxq -88(%rsp), %rbx, %r8 ## 8-byte Folded Reload + mulxq -72(%rsp), %rsi, %r11 ## 8-byte Folded Reload + mulxq -80(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %rsi, %rax + adcq %rbx, %r11 + adcq %r13, %r8 + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %rbp + addq %r15, %rdx + adcq %rcx, %rax + adcq %rdi, %r11 + adcq %r9, %r8 + adcq %r14, %r10 + adcq -112(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, %r12 + movq -96(%rsp), %rcx ## 8-byte Reload + movq 32(%rcx), %rdx + mulxq -104(%rsp), %rcx, %r14 ## 8-byte Folded Reload + movq %rcx, -96(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rcx, %rbx ## 8-byte Folded Reload + movq %rcx, -104(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %rsi, %r15 ## 8-byte Folded Reload + mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload + mulxq -40(%rsp), %r13, %rdi ## 8-byte Folded Reload + addq %rcx, %rdi + adcq %rsi, %r9 + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq -96(%rsp), %rbx ## 8-byte Folded Reload + adcq $0, %r14 + addq %rax, %r13 + adcq %r11, %rdi + adcq %r8, %r9 + adcq %r10, %r15 + adcq %rbp, %rbx + adcq %r12, %r14 + sbbq %rax, %rax + movq -16(%rsp), %rdx ## 8-byte Reload + imulq %r13, %rdx + mulxq -80(%rsp), %r10, %rcx ## 8-byte Folded Reload + mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload + addq %rcx, %r8 + mulxq -88(%rsp), %rbp, %r11 ## 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -64(%rsp), %rcx, %r12 ## 8-byte Folded Reload + adcq %r11, %rcx + mulxq -56(%rsp), %rsi, %r11 ## 8-byte Folded Reload + adcq %r12, %rsi + adcq $0, %r11 + andl $1, %eax + addq %r13, %r10 + adcq %rdi, %r8 + adcq %r9, %rbp + adcq %r15, %rcx + adcq %rbx, %rsi + adcq %r14, %r11 + adcq $0, %rax + movq %r8, %rdi + subq -80(%rsp), %rdi ## 8-byte Folded Reload + movq %rbp, %rbx + sbbq -72(%rsp), %rbx ## 8-byte Folded Reload + movq %rcx, %r9 + sbbq -88(%rsp), %r9 ## 8-byte Folded Reload + movq %rsi, %rdx + sbbq -64(%rsp), %rdx ## 8-byte Folded Reload + movq %r11, %r10 + sbbq -56(%rsp), %r10 ## 8-byte Folded Reload + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + testb %al, %al + cmovneq %r8, %rdi + movq -8(%rsp), %rax ## 8-byte Reload + movq %rdi, (%rax) + cmovneq %rbp, %rbx + movq %rbx, 8(%rax) + cmovneq %rcx, %r9 + movq %r9, 16(%rax) + movq %rdx, 24(%rax) + cmovneq %r11, %r10 + movq %r10, 32(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF5Lbmi2: ## @mcl_fp_montNF5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %r13 + movq 8(%rsi), %rbp + movq %rbp, -104(%rsp) ## 8-byte Spill + movq (%rdx), %rax + movq %rbp, %rdx + mulxq %rax, %rbp, %r9 + movq %r13, %rdx + movq %r13, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %r10 + movq 16(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + addq %rbp, %r10 + mulxq %rax, %rbp, %rbx + adcq %r9, %rbp + movq 24(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rax, %r15, %r9 + adcq %rbx, %r15 + movq 32(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %r11 + adcq %r9, %rax + adcq $0, %r11 + movq -8(%rcx), %rsi + movq %rsi, -32(%rsp) ## 8-byte Spill + movq %r8, %rdx + imulq %rsi, %rdx + movq (%rcx), %rsi + movq %rsi, -48(%rsp) ## 8-byte Spill + mulxq %rsi, %rbx, %r14 + addq %r8, %rbx + movq 8(%rcx), %rsi + movq %rsi, -40(%rsp) ## 8-byte Spill + mulxq %rsi, %rbx, %r12 + adcq %r10, %rbx + movq 16(%rcx), %rsi + movq %rsi, -16(%rsp) ## 8-byte Spill + mulxq %rsi, %r10, %rdi + adcq %rbp, %r10 + movq 24(%rcx), %rsi + movq %rsi, -88(%rsp) ## 8-byte Spill + mulxq %rsi, %r9, %rbp + adcq %r15, %r9 + movq 32(%rcx), %rcx + movq %rcx, -56(%rsp) ## 8-byte Spill + mulxq %rcx, %r8, %rcx + adcq %rax, %r8 + adcq $0, %r11 + addq %r14, %rbx + adcq %r12, %r10 + adcq %rdi, %r9 + adcq %rbp, %r8 + adcq %rcx, %r11 + movq -96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + mulxq -104(%rsp), %rcx, %rsi ## 8-byte Folded Reload + mulxq %r13, %r14, %rax + addq %rcx, %rax + mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload + adcq %r15, %rdi + adcq $0, %rbp + addq %rbx, %r14 + adcq %r10, %rax + adcq %r9, %rcx + adcq %r8, %rsi + adcq %r11, %rdi + adcq $0, %rbp + movq %r14, %rdx + movq -32(%rsp), %r12 ## 8-byte Reload + imulq %r12, %rdx + mulxq -48(%rsp), %rbx, %r15 ## 8-byte Folded Reload + addq %r14, %rbx + movq -40(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %r8, %rbx + adcq %rax, %r8 + mulxq -16(%rsp), %r9, %rax ## 8-byte Folded Reload + adcq %rcx, %r9 + mulxq -88(%rsp), %r10, %rcx ## 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload + adcq %rdi, %r11 + adcq $0, %rbp + addq %r15, %r8 + adcq %rbx, %r9 + adcq %rax, %r10 + adcq %rcx, %r11 + adcq %rdx, %rbp + movq -96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload + addq %rcx, %rsi + mulxq -64(%rsp), %rbx, %rcx ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -72(%rsp), %rdi, %r15 ## 8-byte Folded Reload + adcq %rcx, %rdi + mulxq -80(%rsp), %rcx, %rax ## 8-byte Folded Reload + adcq %r15, %rcx + adcq $0, %rax + addq %r8, %r14 + adcq %r9, %rsi + adcq %r10, %rbx + adcq %r11, %rdi + adcq %rbp, %rcx + adcq $0, %rax + movq %r14, %rdx + imulq %r12, %rdx + movq -48(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %rbp, %r15 + addq %r14, %rbp + mulxq %r13, %r8, %rbp + adcq %rsi, %r8 + movq -16(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %r9, %rsi + adcq %rbx, %r9 + mulxq -88(%rsp), %r10, %rbx ## 8-byte Folded Reload + adcq %rdi, %r10 + mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload + adcq %rcx, %r11 + adcq $0, %rax + addq %r15, %r8 + adcq %rbp, %r9 + adcq %rsi, %r10 + adcq %rbx, %r11 + adcq %rdx, %rax + movq -96(%rsp), %rcx ## 8-byte Reload + movq 24(%rcx), %rdx + mulxq -104(%rsp), %rdi, %rsi ## 8-byte Folded Reload + mulxq -24(%rsp), %r14, %rcx ## 8-byte Folded Reload + addq %rdi, %rcx + mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload + adcq %rsi, %rbx + mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload + adcq %r15, %rdi + adcq $0, %rbp + addq %r8, %r14 + adcq %r9, %rcx + adcq %r10, %rbx + adcq %r11, %rsi + adcq %rax, %rdi + adcq $0, %rbp + movq %r14, %rdx + imulq -32(%rsp), %rdx ## 8-byte Folded Reload + mulxq %r12, %rax, %r11 + addq %r14, %rax + mulxq -40(%rsp), %r8, %r14 ## 8-byte Folded Reload + adcq %rcx, %r8 + mulxq %r13, %r9, %rax + adcq %rbx, %r9 + movq -88(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %r10, %rbx + adcq %rsi, %r10 + mulxq -56(%rsp), %rcx, %rdx ## 8-byte Folded Reload + adcq %rdi, %rcx + adcq $0, %rbp + addq %r11, %r8 + adcq %r14, %r9 + adcq %rax, %r10 + adcq %rbx, %rcx + adcq %rdx, %rbp + movq -96(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + mulxq -104(%rsp), %rdi, %rbx ## 8-byte Folded Reload + mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload + addq %rdi, %rsi + mulxq -64(%rsp), %rdi, %rax ## 8-byte Folded Reload + adcq %rbx, %rdi + mulxq -72(%rsp), %rbx, %r15 ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -80(%rsp), %r11, %rax ## 8-byte Folded Reload + adcq %r15, %r11 + adcq $0, %rax + addq %r8, %r14 + adcq %r9, %rsi + adcq %r10, %rdi + adcq %rcx, %rbx + adcq %rbp, %r11 + adcq $0, %rax + movq -32(%rsp), %rdx ## 8-byte Reload + imulq %r14, %rdx + movq -48(%rsp), %r10 ## 8-byte Reload + mulxq %r10, %rcx, %rbp + movq %rbp, -96(%rsp) ## 8-byte Spill + addq %r14, %rcx + movq -40(%rsp), %r9 ## 8-byte Reload + mulxq %r9, %r14, %rcx + movq %rcx, -104(%rsp) ## 8-byte Spill + adcq %rsi, %r14 + movq %r13, %r8 + mulxq %r8, %r15, %r13 + adcq %rdi, %r15 + mulxq %r12, %rbp, %rcx + adcq %rbx, %rbp + movq -56(%rsp), %rbx ## 8-byte Reload + mulxq %rbx, %r12, %rdx + adcq %r11, %r12 + adcq $0, %rax + addq -96(%rsp), %r14 ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq %r13, %rbp + adcq %rcx, %r12 + adcq %rdx, %rax + movq %r14, %rcx + subq %r10, %rcx + movq %r15, %rsi + sbbq %r9, %rsi + movq %rbp, %rdi + sbbq %r8, %rdi + movq %r12, %r8 + sbbq -88(%rsp), %r8 ## 8-byte Folded Reload + movq %rax, %rdx + sbbq %rbx, %rdx + movq %rdx, %rbx + sarq $63, %rbx + cmovsq %r14, %rcx + movq -8(%rsp), %rbx ## 8-byte Reload + movq %rcx, (%rbx) + cmovsq %r15, %rsi + movq %rsi, 8(%rbx) + cmovsq %rbp, %rdi + movq %rdi, 16(%rbx) + cmovsq %r12, %r8 + movq %r8, 24(%rbx) + cmovsq %rax, %rdx + movq %rdx, 32(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed5Lbmi2: ## @mcl_fp_montRed5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq (%rsi), %r15 + movq %r15, %rdx + imulq %rax, %rdx + movq 32(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %r14 + movq 24(%rcx), %r12 + mulxq %r12, %r10, %r13 + movq %r12, -56(%rsp) ## 8-byte Spill + movq 16(%rcx), %r9 + mulxq %r9, %rdi, %rbp + movq %r9, -64(%rsp) ## 8-byte Spill + movq (%rcx), %rbx + movq %rbx, -40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %r11 + mulxq %rbx, %rdx, %rcx + addq %rax, %rcx + adcq %rdi, %r11 + adcq %r10, %rbp + adcq %r8, %r13 + adcq $0, %r14 + addq %r15, %rdx + movq 72(%rsi), %rax + movq 64(%rsi), %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r11 + adcq 24(%rsi), %rbp + adcq 32(%rsi), %r13 + adcq 40(%rsi), %r14 + movq %r14, -112(%rsp) ## 8-byte Spill + movq 56(%rsi), %rdi + movq 48(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -32(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -88(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -48(%rsp) ## 8-byte Spill + sbbq %rsi, %rsi + andl $1, %esi + movq %rcx, %rdx + movq -104(%rsp), %r14 ## 8-byte Reload + imulq %r14, %rdx + mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload + movq %rax, -16(%rsp) ## 8-byte Spill + mulxq %r12, %rax, %r10 + movq %rax, -24(%rsp) ## 8-byte Spill + mulxq %r9, %rbx, %r8 + movq -80(%rsp), %r12 ## 8-byte Reload + mulxq %r12, %r9, %rdi + mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %r9, %rax + adcq %rbx, %rdi + adcq -24(%rsp), %r8 ## 8-byte Folded Reload + adcq -16(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r15 + addq %rcx, %rdx + adcq %r11, %rax + adcq %rbp, %rdi + adcq %r13, %r8 + adcq -112(%rsp), %r10 ## 8-byte Folded Reload + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq $0, -88(%rsp) ## 8-byte Folded Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %rsi + movq %rax, %rdx + imulq %r14, %rdx + mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + mulxq -56(%rsp), %rcx, %r14 ## 8-byte Folded Reload + movq %rcx, -32(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %r11, %rbx ## 8-byte Folded Reload + mulxq %r12, %r9, %rbp + mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload + addq %r9, %rcx + adcq %r11, %rbp + adcq -32(%rsp), %rbx ## 8-byte Folded Reload + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r13 + addq %rax, %rdx + adcq %rdi, %rcx + adcq %r8, %rbp + adcq %r10, %rbx + adcq %r15, %r14 + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %rsi + movq %rcx, %rdx + imulq -104(%rsp), %rdx ## 8-byte Folded Reload + movq -72(%rsp), %r9 ## 8-byte Reload + mulxq %r9, %rax, %r12 + movq %rax, -88(%rsp) ## 8-byte Spill + mulxq -56(%rsp), %rax, %r10 ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %r8, %r11 ## 8-byte Folded Reload + mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload + mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %rdi, %rax + adcq %r8, %r15 + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq -88(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r12 + addq %rcx, %rdx + adcq %rbp, %rax + adcq %rbx, %r15 + adcq %r14, %r11 + adcq %r13, %r10 + adcq -96(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %rsi + movq -104(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + mulxq %r9, %rdi, %rcx + movq %rdi, -96(%rsp) ## 8-byte Spill + mulxq -56(%rsp), %rbp, %rdi ## 8-byte Folded Reload + movq %rbp, -104(%rsp) ## 8-byte Spill + mulxq -64(%rsp), %r13, %rbp ## 8-byte Folded Reload + movq -40(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %r8, %r9 + mulxq -80(%rsp), %rbx, %rdx ## 8-byte Folded Reload + addq %r9, %rbx + adcq %r13, %rdx + adcq -104(%rsp), %rbp ## 8-byte Folded Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + adcq $0, %rcx + addq %rax, %r8 + adcq %r15, %rbx + adcq %r11, %rdx + adcq %r10, %rbp + adcq %r12, %rdi + adcq -48(%rsp), %rcx ## 8-byte Folded Reload + adcq $0, %rsi + movq %rbx, %rax + subq %r14, %rax + movq %rdx, %r8 + sbbq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %rbp, %r9 + sbbq -64(%rsp), %r9 ## 8-byte Folded Reload + movq %rdi, %r10 + sbbq -56(%rsp), %r10 ## 8-byte Folded Reload + movq %rcx, %r11 + sbbq -72(%rsp), %r11 ## 8-byte Folded Reload + sbbq $0, %rsi + andl $1, %esi + cmovneq %rcx, %r11 + testb %sil, %sil + cmovneq %rbx, %rax + movq -8(%rsp), %rcx ## 8-byte Reload + movq %rax, (%rcx) + cmovneq %rdx, %r8 + movq %r8, 8(%rcx) + cmovneq %rbp, %r9 + movq %r9, 16(%rcx) + cmovneq %rdi, %r10 + movq %r10, 24(%rcx) + movq %r11, 32(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre5Lbmi2: ## @mcl_fp_addPre5Lbmi2 +## BB#0: + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq 16(%rdx), %rcx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rcx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rcx, 16(%rdi) + adcq %r9, %r11 + movq %r11, 24(%rdi) + adcq %r8, %r10 + movq %r10, 32(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre5Lbmi2: ## @mcl_fp_subPre5Lbmi2 +## BB#0: + pushq %rbx + movq 32(%rsi), %r10 + movq 24(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r8, %r11 + movq %r11, 24(%rdi) + sbbq %r9, %r10 + movq %r10, 32(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + retq + + .globl _mcl_fp_shr1_5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_5Lbmi2: ## @mcl_fp_shr1_5Lbmi2 +## BB#0: + movq 32(%rsi), %r8 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r8, %rcx + movq %rcx, 24(%rdi) + shrq %r8 + movq %r8, 32(%rdi) + retq + + .globl _mcl_fp_add5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add5Lbmi2: ## @mcl_fp_add5Lbmi2 +## BB#0: + pushq %rbx + movq 32(%rdx), %r11 + movq 24(%rdx), %rbx + movq 24(%rsi), %r9 + movq 32(%rsi), %r8 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %rbx, %r9 + movq %r9, 24(%rdi) + adcq %r11, %r8 + movq %r8, 32(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %r9 + sbbq 32(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB74_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %r9, 24(%rdi) + movq %r8, 32(%rdi) +LBB74_2: ## %carry + popq %rbx + retq + + .globl _mcl_fp_addNF5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF5Lbmi2: ## @mcl_fp_addNF5Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %r11 + addq (%rsi), %r14 + adcq 8(%rsi), %r11 + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r9 + adcq 32(%rsi), %r8 + movq %r14, %rsi + subq (%rcx), %rsi + movq %r11, %rdx + sbbq 8(%rcx), %rdx + movq %r10, %rbx + sbbq 16(%rcx), %rbx + movq %r9, %r15 + sbbq 24(%rcx), %r15 + movq %r8, %rax + sbbq 32(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r14, %rsi + movq %rsi, (%rdi) + cmovsq %r11, %rdx + movq %rdx, 8(%rdi) + cmovsq %r10, %rbx + movq %rbx, 16(%rdi) + cmovsq %r9, %r15 + movq %r15, 24(%rdi) + cmovsq %r8, %rax + movq %rax, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_sub5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub5Lbmi2: ## @mcl_fp_sub5Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + movq 32(%rsi), %r8 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r11, %r9 + movq %r9, 24(%rdi) + sbbq %r14, %r8 + movq %r8, 32(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB76_2 +## BB#1: ## %carry + movq 32(%rcx), %r11 + movq 24(%rcx), %r14 + movq 8(%rcx), %rdx + movq 16(%rcx), %rbx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r14 + movq %r14, 24(%rdi) + adcq %r8, %r11 + movq %r11, 32(%rdi) +LBB76_2: ## %nocarry + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_subNF5Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF5Lbmi2: ## @mcl_fp_subNF5Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 32(%rsi), %r12 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r9 + movdqu (%rsi), %xmm2 + movdqu 16(%rsi), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r8 + movd %xmm1, %r10 + movd %xmm3, %r14 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %r11 + pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] + movd %xmm1, %r15 + movd %xmm0, %rsi + movd %xmm2, %r13 + subq %rsi, %r13 + sbbq %r11, %r15 + sbbq %r10, %r14 + sbbq %r9, %r8 + sbbq 32(%rdx), %r12 + movq %r12, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r12, %rsi + movq 8(%rcx), %rax + andq %rsi, %rax + andq (%rcx), %rsi + movq 32(%rcx), %r9 + andq %rdx, %r9 + rorxq $63, %rdx, %rbx + andq 24(%rcx), %rdx + andq 16(%rcx), %rbx + addq %r13, %rsi + movq %rsi, (%rdi) + adcq %r15, %rax + movq %rax, 8(%rdi) + adcq %r14, %rbx + movq %rbx, 16(%rdi) + adcq %r8, %rdx + movq %rdx, 24(%rdi) + adcq %r12, %r9 + movq %r9, 32(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_add5Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add5Lbmi2: ## @mcl_fpDbl_add5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 64(%rdx), %r11 + movq 56(%rdx), %r14 + movq 48(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 40(%rdx), %r9 + movq %rbx, (%rdi) + movq 72(%rsi), %r8 + movq %rax, 8(%rdi) + movq 64(%rsi), %r10 + movq %r12, 16(%rdi) + movq 56(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 48(%rsi), %rbp + movq 40(%rsi), %rbx + movq %r13, 32(%rdi) + adcq %r9, %rbx + adcq %r15, %rbp + adcq %r14, %r12 + adcq %r11, %r10 + adcq -8(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + movq %rbx, %rax + subq (%rcx), %rax + movq %rbp, %rdx + sbbq 8(%rcx), %rdx + movq %r12, %r9 + sbbq 16(%rcx), %r9 + movq %r10, %r11 + sbbq 24(%rcx), %r11 + movq %r8, %r14 + sbbq 32(%rcx), %r14 + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rax + movq %rax, 40(%rdi) + testb %sil, %sil + cmovneq %rbp, %rdx + movq %rdx, 48(%rdi) + cmovneq %r12, %r9 + movq %r9, 56(%rdi) + cmovneq %r10, %r11 + movq %r11, 64(%rdi) + cmovneq %r8, %r14 + movq %r14, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub5Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub5Lbmi2: ## @mcl_fpDbl_sub5Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %r9 + movq 64(%rdx), %r10 + movq 56(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %r12 + sbbq 24(%rdx), %r12 + movq %r15, (%rdi) + movq 32(%rsi), %rbx + sbbq 32(%rdx), %rbx + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 72(%rsi), %r8 + movq %r12, 24(%rdi) + movq 64(%rsi), %r11 + movq %rbx, 32(%rdi) + movq 40(%rsi), %rbp + sbbq %rdx, %rbp + movq 56(%rsi), %r12 + movq 48(%rsi), %r13 + sbbq %r15, %r13 + sbbq %r14, %r12 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 8(%rcx), %rbx + cmoveq %rax, %rbx + movq 32(%rcx), %r9 + cmoveq %rax, %r9 + cmovneq 24(%rcx), %rax + addq %rbp, %rsi + movq %rsi, 40(%rdi) + adcq %r13, %rbx + movq %rbx, 48(%rdi) + adcq %r12, %rdx + movq %rdx, 56(%rdi) + adcq %r11, %rax + movq %rax, 64(%rdi) + adcq %r8, %r9 + movq %r9, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre6Lbmi2: ## @mcl_fp_mulUnitPre6Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + mulxq 40(%rsi), %r8, %r11 + mulxq 32(%rsi), %r9, %r12 + mulxq 24(%rsi), %r10, %rcx + mulxq 16(%rsi), %r14, %rbx + mulxq 8(%rsi), %r15, %rax + mulxq (%rsi), %rdx, %rsi + movq %rdx, (%rdi) + addq %r15, %rsi + movq %rsi, 8(%rdi) + adcq %r14, %rax + movq %rax, 16(%rdi) + adcq %r10, %rbx + movq %rbx, 24(%rdi) + adcq %r9, %rcx + movq %rcx, 32(%rdi) + adcq %r8, %r12 + movq %r12, 40(%rdi) + adcq $0, %r11 + movq %r11, 48(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_mulPre6Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre6Lbmi2: ## @mcl_fpDbl_mulPre6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r11 + movq %rdi, -48(%rsp) ## 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rcx + movq %rcx, -80(%rsp) ## 8-byte Spill + movq (%r11), %rax + movq %r11, -56(%rsp) ## 8-byte Spill + movq %rcx, %rdx + mulxq %rax, %rcx, %r14 + movq %r15, %rdx + mulxq %rax, %rdx, %rbp + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 24(%rsi), %rbx + movq %rbx, -88(%rsp) ## 8-byte Spill + movq 16(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + addq %rcx, %rbp + mulxq %rax, %rcx, %r12 + adcq %r14, %rcx + movq %rbx, %rdx + mulxq %rax, %rbx, %r14 + adcq %r12, %rbx + movq 32(%rsi), %r12 + movq %r12, %rdx + mulxq %rax, %r8, %r13 + adcq %r14, %r8 + movq 40(%rsi), %r14 + movq %r14, %rdx + mulxq %rax, %r9, %r10 + adcq %r13, %r9 + movq -72(%rsp), %rax ## 8-byte Reload + movq %rax, (%rdi) + adcq $0, %r10 + movq 8(%r11), %rdi + movq %r15, %rdx + mulxq %rdi, %r13, %rax + movq %rax, -72(%rsp) ## 8-byte Spill + addq %rbp, %r13 + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbp, %rax + movq %rax, -80(%rsp) ## 8-byte Spill + adcq %rcx, %rbp + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rax, %r11 + adcq %rbx, %rax + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbx, %rcx + movq %rcx, -88(%rsp) ## 8-byte Spill + adcq %r8, %rbx + movq %r12, %rdx + mulxq %rdi, %rcx, %r8 + adcq %r9, %rcx + movq %r14, %rdx + mulxq %rdi, %r12, %rdx + adcq %r10, %r12 + sbbq %r15, %r15 + andl $1, %r15d + addq -72(%rsp), %rbp ## 8-byte Folded Reload + adcq -80(%rsp), %rax ## 8-byte Folded Reload + adcq %r11, %rbx + movq -48(%rsp), %rdi ## 8-byte Reload + movq %r13, 8(%rdi) + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq %r8, %r12 + adcq %rdx, %r15 + movq (%rsi), %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -80(%rsp) ## 8-byte Spill + movq -56(%rsp), %r14 ## 8-byte Reload + movq 16(%r14), %rdi + mulxq %rdi, %r13, %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + addq %rbp, %r13 + movq %r8, %rdx + mulxq %rdi, %r8, %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + adcq %rax, %r8 + movq 16(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + mulxq %rdi, %r11, %rax + movq %rax, -24(%rsp) ## 8-byte Spill + adcq %rbx, %r11 + movq 24(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rdi, %rax, %rbx + adcq %rcx, %rax + movq 32(%rsi), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + mulxq %rdi, %r10, %rcx + adcq %r12, %r10 + movq 40(%rsi), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + mulxq %rdi, %r9, %rdx + adcq %r15, %r9 + sbbq %rbp, %rbp + andl $1, %ebp + addq -8(%rsp), %r8 ## 8-byte Folded Reload + adcq -16(%rsp), %r11 ## 8-byte Folded Reload + adcq -24(%rsp), %rax ## 8-byte Folded Reload + adcq %rbx, %r10 + adcq %rcx, %r9 + adcq %rdx, %rbp + movq -48(%rsp), %rcx ## 8-byte Reload + movq %r13, 16(%rcx) + movq 24(%r14), %rdi + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r12, %rcx + movq %rcx, -88(%rsp) ## 8-byte Spill + addq %r8, %r12 + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbx, %rcx + movq %rcx, -80(%rsp) ## 8-byte Spill + adcq %r11, %rbx + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rcx, %r11 + adcq %rax, %rcx + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r14, %rax + movq %rax, -64(%rsp) ## 8-byte Spill + adcq %r10, %r14 + movq -32(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r8, %rax + adcq %r9, %r8 + movq -40(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r13, %rdx + adcq %rbp, %r13 + sbbq %r15, %r15 + andl $1, %r15d + addq -88(%rsp), %rbx ## 8-byte Folded Reload + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + adcq %r11, %r14 + movq -48(%rsp), %rdi ## 8-byte Reload + movq %r12, 24(%rdi) + adcq -64(%rsp), %r8 ## 8-byte Folded Reload + adcq %rax, %r13 + adcq %rdx, %r15 + movq (%rsi), %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 8(%rsi), %rbp + movq %rbp, -80(%rsp) ## 8-byte Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdi + mulxq %rdi, %r12, %rax + movq %rax, -64(%rsp) ## 8-byte Spill + addq %rbx, %r12 + movq %rbp, %rdx + mulxq %rdi, %rbx, %rax + movq %rax, -72(%rsp) ## 8-byte Spill + adcq %rcx, %rbx + movq 16(%rsi), %r11 + movq %r11, %rdx + mulxq %rdi, %rax, %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + adcq %r14, %rax + movq 24(%rsi), %r14 + movq %r14, %rdx + mulxq %rdi, %rbp, %rcx + movq %rcx, -40(%rsp) ## 8-byte Spill + adcq %r8, %rbp + movq 32(%rsi), %r8 + movq %r8, %rdx + mulxq %rdi, %rcx, %r10 + adcq %r13, %rcx + movq 40(%rsi), %r13 + movq %r13, %rdx + mulxq %rdi, %r9, %rdx + adcq %r15, %r9 + sbbq %rsi, %rsi + andl $1, %esi + addq -64(%rsp), %rbx ## 8-byte Folded Reload + adcq -72(%rsp), %rax ## 8-byte Folded Reload + adcq -32(%rsp), %rbp ## 8-byte Folded Reload + adcq -40(%rsp), %rcx ## 8-byte Folded Reload + adcq %r10, %r9 + adcq %rdx, %rsi + movq -48(%rsp), %r10 ## 8-byte Reload + movq %r12, 32(%r10) + movq -56(%rsp), %rdx ## 8-byte Reload + movq 40(%rdx), %rdi + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r15, %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + addq %rbx, %r15 + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbx, %r12 + adcq %rax, %rbx + movq %r11, %rdx + mulxq %rdi, %rax, %r11 + adcq %rbp, %rax + movq %r14, %rdx + mulxq %rdi, %rbp, %r14 + adcq %rcx, %rbp + movq %r8, %rdx + mulxq %rdi, %rcx, %r8 + adcq %r9, %rcx + movq %r13, %rdx + mulxq %rdi, %rdi, %r9 + adcq %rsi, %rdi + sbbq %rsi, %rsi + andl $1, %esi + addq -56(%rsp), %rbx ## 8-byte Folded Reload + movq %r15, 40(%r10) + movq %rbx, 48(%r10) + adcq %r12, %rax + movq %rax, 56(%r10) + adcq %r11, %rbp + movq %rbp, 64(%r10) + adcq %r14, %rcx + movq %rcx, 72(%r10) + adcq %r8, %rdi + movq %rdi, 80(%r10) + adcq %r9, %rsi + movq %rsi, 88(%r10) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre6Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre6Lbmi2: ## @mcl_fpDbl_sqrPre6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r9 + movq 16(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq (%rsi), %rcx + movq 8(%rsi), %rax + mulxq %rcx, %r10, %r8 + movq 24(%rsi), %rbp + movq %rbp, -48(%rsp) ## 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r11, %rbx + movq %rbx, -40(%rsp) ## 8-byte Spill + movq %rcx, %rdx + mulxq %rcx, %rdx, %r14 + movq %rdx, -56(%rsp) ## 8-byte Spill + addq %r11, %r14 + adcq %rbx, %r10 + movq %rbp, %rdx + mulxq %rcx, %r15, %rbp + adcq %r8, %r15 + movq 32(%rsi), %rbx + movq %rbx, %rdx + mulxq %rcx, %r8, %r13 + adcq %rbp, %r8 + movq 40(%rsi), %rdi + movq %rdi, %rdx + mulxq %rcx, %rcx, %r12 + adcq %r13, %rcx + movq %r9, -24(%rsp) ## 8-byte Spill + movq -56(%rsp), %rdx ## 8-byte Reload + movq %rdx, (%r9) + adcq $0, %r12 + addq %r11, %r14 + movq %rax, %rdx + mulxq %rax, %rbp, %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + adcq %r10, %rbp + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r13, %r10 + adcq %r15, %r13 + movq -48(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r15, %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + adcq %r8, %r15 + movq %rbx, %rdx + mulxq %rax, %rbx, %r8 + adcq %rcx, %rbx + movq %rdi, %rdx + mulxq %rax, %r11, %rax + adcq %r12, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -40(%rsp), %rbp ## 8-byte Folded Reload + adcq -56(%rsp), %r13 ## 8-byte Folded Reload + movq %r14, 8(%r9) + adcq %r10, %r15 + adcq -64(%rsp), %rbx ## 8-byte Folded Reload + adcq %r8, %r11 + adcq %rax, %r12 + movq (%rsi), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -64(%rsp) ## 8-byte Spill + movq 16(%rsi), %rcx + mulxq %rcx, %rax, %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + addq %rbp, %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq %rdi, %rdx + mulxq %rcx, %rbp, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq %r13, %rbp + movq %rcx, %rdx + mulxq %rcx, %r13, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq %r15, %r13 + movq 24(%rsi), %rax + movq %rax, %rdx + mulxq %rcx, %r8, %rdi + movq %rdi, -56(%rsp) ## 8-byte Spill + adcq %r8, %rbx + movq 32(%rsi), %r10 + movq %r10, %rdx + mulxq %rcx, %r14, %r15 + adcq %r11, %r14 + movq 40(%rsi), %r11 + movq %r11, %rdx + mulxq %rcx, %r9, %rdx + adcq %r12, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + addq -32(%rsp), %rbp ## 8-byte Folded Reload + adcq -8(%rsp), %r13 ## 8-byte Folded Reload + adcq -16(%rsp), %rbx ## 8-byte Folded Reload + adcq %rdi, %r14 + adcq %r15, %r9 + adcq %rdx, %rcx + movq -48(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rdi, %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + addq %rbp, %rdi + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r15, %rbp + adcq %r13, %r15 + adcq %r8, %rbx + movq %rax, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + adcq %r14, %r8 + movq %r10, %rdx + mulxq %rax, %r12, %r10 + adcq %r9, %r12 + movq %r11, %rdx + mulxq %rax, %r13, %rax + adcq %rcx, %r13 + sbbq %r9, %r9 + andl $1, %r9d + addq -48(%rsp), %r15 ## 8-byte Folded Reload + adcq %rbp, %rbx + movq -24(%rsp), %rdx ## 8-byte Reload + movq -40(%rsp), %rbp ## 8-byte Reload + movq %rbp, 16(%rdx) + movq %rdi, 24(%rdx) + adcq -56(%rsp), %r8 ## 8-byte Folded Reload + adcq -64(%rsp), %r12 ## 8-byte Folded Reload + adcq %r10, %r13 + adcq %rax, %r9 + movq (%rsi), %rcx + movq 8(%rsi), %rdi + movq %rdi, -64(%rsp) ## 8-byte Spill + movq 32(%rsi), %rax + movq %rcx, %rdx + mulxq %rax, %rdx, %rbp + movq %rbp, -56(%rsp) ## 8-byte Spill + addq %r15, %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rdi, %rdx + mulxq %rax, %r15, %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + adcq %rbx, %r15 + movq 16(%rsi), %r10 + movq %r10, %rdx + mulxq %rax, %r14, %rbx + adcq %r8, %r14 + movq 24(%rsi), %r8 + movq %r8, %rdx + mulxq %rax, %rbp, %rdi + adcq %r12, %rbp + movq %rax, %rdx + mulxq %rax, %r11, %r12 + adcq %r13, %r11 + movq 40(%rsi), %rsi + movq %rsi, %rdx + mulxq %rax, %r13, %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + adcq %r13, %r9 + sbbq %rax, %rax + andl $1, %eax + addq -56(%rsp), %r15 ## 8-byte Folded Reload + adcq -32(%rsp), %r14 ## 8-byte Folded Reload + adcq %rbx, %rbp + adcq %rdi, %r11 + adcq %r12, %r9 + adcq %rdx, %rax + movq %rcx, %rdx + mulxq %rsi, %r12, %rcx + addq %r15, %r12 + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rsi, %rdi, %r15 + adcq %r14, %rdi + movq %r10, %rdx + mulxq %rsi, %rbx, %r10 + adcq %rbp, %rbx + movq %r8, %rdx + mulxq %rsi, %rbp, %r8 + adcq %r11, %rbp + adcq %r13, %r9 + movq %rsi, %rdx + mulxq %rsi, %rsi, %r11 + adcq %rax, %rsi + sbbq %rax, %rax + andl $1, %eax + addq %rcx, %rdi + movq -24(%rsp), %rdx ## 8-byte Reload + movq -40(%rsp), %rcx ## 8-byte Reload + movq %rcx, 32(%rdx) + movq %r12, 40(%rdx) + movq %rdi, 48(%rdx) + adcq %r15, %rbx + movq %rbx, 56(%rdx) + adcq %r10, %rbp + movq %rbp, 64(%rdx) + adcq %r8, %r9 + movq %r9, 72(%rdx) + adcq -48(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 80(%rdx) + adcq %r11, %rax + movq %rax, 88(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont6Lbmi2: ## @mcl_fp_mont6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $32, %rsp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rdi, 24(%rsp) ## 8-byte Spill + movq 40(%rsi), %rdi + movq %rdi, -96(%rsp) ## 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %r11, %rbx + movq 32(%rsi), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + mulxq %rax, %r14, %r12 + movq 24(%rsi), %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + mulxq %rax, %r15, %r13 + movq 16(%rsi), %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %r10 + movq (%rsi), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + mulxq %rax, %rdi, %r9 + movq %rbp, %rdx + mulxq %rax, %rdx, %rbp + movq %rdx, -128(%rsp) ## 8-byte Spill + addq %rdi, %rbp + adcq %r8, %r9 + adcq %r15, %r10 + adcq %r14, %r13 + adcq %r11, %r12 + adcq $0, %rbx + movq %rbx, -120(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + imulq %rax, %rdx + movq 40(%rcx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %r15 + movq %rax, -112(%rsp) ## 8-byte Spill + movq 16(%rcx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %rax + movq 8(%rcx), %rsi + movq %rsi, -56(%rsp) ## 8-byte Spill + mulxq %rsi, %rbx, %r11 + movq (%rcx), %rsi + movq %rsi, -64(%rsp) ## 8-byte Spill + mulxq %rsi, %rsi, %r14 + addq %rbx, %r14 + adcq %r8, %r11 + movq 24(%rcx), %rdi + movq %rdi, -72(%rsp) ## 8-byte Spill + mulxq %rdi, %rdi, %r8 + adcq %rax, %rdi + movq 32(%rcx), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulxq %rax, %rbx, %rax + adcq %r8, %rbx + adcq -112(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %r15 + addq -128(%rsp), %rsi ## 8-byte Folded Reload + adcq %rbp, %r14 + adcq %r9, %r11 + adcq %r10, %rdi + adcq %r13, %rbx + adcq %r12, %rax + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + sbbq %r10, %r10 + andl $1, %r10d + movq -88(%rsp), %rcx ## 8-byte Reload + movq 8(%rcx), %rdx + mulxq -96(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq (%rsp), %rcx, %r13 ## 8-byte Folded Reload + movq %rcx, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %r12, %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %rbp, %rcx ## 8-byte Folded Reload + mulxq -24(%rsp), %rsi, %r9 ## 8-byte Folded Reload + addq %rbp, %r9 + mulxq -16(%rsp), %rbp, %r8 ## 8-byte Folded Reload + adcq %rcx, %rbp + adcq %r12, %r8 + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq %r14, %rsi + adcq %r11, %r9 + adcq %rdi, %rbp + adcq %rbx, %r8 + adcq %rax, %rdx + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq %r15, %r13 + adcq %r10, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rsi, %rbx + movq %rbx, %rdx + imulq 8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -40(%rsp), %rax, %r12 ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %r14, %r11 ## 8-byte Folded Reload + mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload + addq %rcx, %rsi + mulxq -48(%rsp), %rcx, %r10 ## 8-byte Folded Reload + adcq %rax, %rcx + mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload + adcq %r10, %rax + adcq %r14, %r15 + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r12 + addq %rbx, %rdi + adcq %r9, %rsi + adcq %rbp, %rcx + adcq %r8, %rax + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq %r13, %r11 + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + movq -112(%rsp), %r10 ## 8-byte Reload + adcq $0, %r10 + movq -88(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdx + mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload + movq %rbp, -112(%rsp) ## 8-byte Spill + movq %rdi, -120(%rsp) ## 8-byte Spill + mulxq (%rsp), %rdi, %rbp ## 8-byte Folded Reload + movq %rdi, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rdi, %r13 ## 8-byte Folded Reload + movq %rdi, 16(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload + mulxq -24(%rsp), %rbx, %r9 ## 8-byte Folded Reload + movq %rbx, -128(%rsp) ## 8-byte Spill + addq %rdi, %r9 + mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload + adcq %r14, %rbx + adcq 16(%rsp), %r8 ## 8-byte Folded Reload + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -112(%rsp), %rbp ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + movq -128(%rsp), %rdi ## 8-byte Reload + addq %rsi, %rdi + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq %rcx, %r9 + adcq %rax, %rbx + adcq %r15, %r8 + adcq %r11, %r13 + adcq %r12, %rbp + adcq %r10, %rdx + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdi, %rdx + imulq 8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %r15, %r12 ## 8-byte Folded Reload + mulxq -56(%rsp), %rax, %rcx ## 8-byte Folded Reload + mulxq -64(%rsp), %rdi, %r14 ## 8-byte Folded Reload + addq %rax, %r14 + mulxq -48(%rsp), %rax, %r10 ## 8-byte Folded Reload + adcq %rcx, %rax + mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload + adcq %r10, %rsi + adcq %r15, %rcx + adcq -104(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, %r11 + addq -128(%rsp), %rdi ## 8-byte Folded Reload + adcq %r9, %r14 + adcq %rbx, %rax + adcq %r8, %rsi + adcq %r13, %rcx + adcq %rbp, %r12 + adcq -120(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, -112(%rsp) ## 8-byte Folded Spill + movq -88(%rsp), %rdx ## 8-byte Reload + movq 24(%rdx), %rdx + mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload + movq %rbp, -128(%rsp) ## 8-byte Spill + movq %rdi, -120(%rsp) ## 8-byte Spill + mulxq (%rsp), %rdi, %r15 ## 8-byte Folded Reload + movq %rdi, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload + mulxq -32(%rsp), %rbx, %r9 ## 8-byte Folded Reload + mulxq -24(%rsp), %r13, %rdi ## 8-byte Folded Reload + addq %rbx, %rdi + mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload + adcq %r9, %rbx + adcq %r10, %r8 + adcq -104(%rsp), %rbp ## 8-byte Folded Reload + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r14, %r13 + adcq %rax, %rdi + adcq %rsi, %rbx + adcq %rcx, %r8 + adcq %r12, %rbp + adcq %r11, %r15 + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -128(%rsp) ## 8-byte Spill + movq %r13, %rdx + imulq 8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -40(%rsp), %rax, %r10 ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %rax, %r12 ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -56(%rsp), %rax, %r11 ## 8-byte Folded Reload + mulxq -64(%rsp), %rcx, %rsi ## 8-byte Folded Reload + addq %rax, %rsi + mulxq -48(%rsp), %r14, %r9 ## 8-byte Folded Reload + adcq %r11, %r14 + mulxq -72(%rsp), %rax, %r11 ## 8-byte Folded Reload + adcq %r9, %rax + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + adcq -112(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, %r10 + addq %r13, %rcx + adcq %rdi, %rsi + adcq %rbx, %r14 + adcq %r8, %rax + adcq %rbp, %r11 + adcq %r15, %r12 + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + movq -128(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + movq -88(%rsp), %rcx ## 8-byte Reload + movq 32(%rcx), %rdx + mulxq -96(%rsp), %rdi, %rcx ## 8-byte Folded Reload + movq %rdi, -112(%rsp) ## 8-byte Spill + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq (%rsp), %rdi, %rcx ## 8-byte Folded Reload + movq %rdi, 16(%rsp) ## 8-byte Spill + movq %rcx, -128(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %r13, %rbp ## 8-byte Folded Reload + mulxq -32(%rsp), %rdi, %rcx ## 8-byte Folded Reload + mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload + movq %rbx, -104(%rsp) ## 8-byte Spill + addq %rdi, %r8 + mulxq -16(%rsp), %rbx, %r9 ## 8-byte Folded Reload + adcq %rcx, %rbx + adcq %r13, %r9 + adcq 16(%rsp), %rbp ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq -120(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + movq -104(%rsp), %rdi ## 8-byte Reload + addq %rsi, %rdi + movq %rdi, -104(%rsp) ## 8-byte Spill + adcq %r14, %r8 + adcq %rax, %rbx + adcq %r11, %r9 + adcq %r12, %rbp + adcq %r10, %rdx + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq %r15, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, %r13 + movq %rdi, %rdx + imulq 8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %r12, %r15 ## 8-byte Folded Reload + mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload + addq %rcx, %rsi + mulxq -48(%rsp), %r11, %r10 ## 8-byte Folded Reload + adcq %rax, %r11 + mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload + adcq %r10, %rax + adcq %r12, %rcx + adcq %r14, %r15 + movq -112(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -104(%rsp), %rdi ## 8-byte Folded Reload + adcq %r8, %rsi + adcq %rbx, %r11 + adcq %r9, %rax + adcq %rbp, %rcx + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, -120(%rsp) ## 8-byte Spill + movq -88(%rsp), %rdx ## 8-byte Reload + movq 40(%rdx), %rdx + mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload + movq %rbp, -128(%rsp) ## 8-byte Spill + movq %rdi, -88(%rsp) ## 8-byte Spill + mulxq (%rsp), %rbx, %rdi ## 8-byte Folded Reload + movq %rdi, -96(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload + mulxq -16(%rsp), %r8, %r12 ## 8-byte Folded Reload + mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload + mulxq -24(%rsp), %r13, %r9 ## 8-byte Folded Reload + addq %rdi, %r9 + adcq %r8, %r14 + adcq %r10, %r12 + adcq %rbx, %rbp + movq -96(%rsp), %rdi ## 8-byte Reload + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + movq -88(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %rsi, %r13 + adcq %r11, %r9 + adcq %rax, %r14 + adcq %rcx, %r12 + adcq %r15, %rbp + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -96(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + sbbq %rcx, %rcx + movq 8(%rsp), %rdx ## 8-byte Reload + imulq %r13, %rdx + mulxq -64(%rsp), %r8, %rax ## 8-byte Folded Reload + mulxq -56(%rsp), %r10, %rdi ## 8-byte Folded Reload + addq %rax, %r10 + mulxq -48(%rsp), %rsi, %rax ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -72(%rsp), %rbx, %r11 ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload + adcq %r11, %rdi + mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload + adcq %r15, %rax + adcq $0, %r11 + andl $1, %ecx + addq %r13, %r8 + adcq %r9, %r10 + adcq %r14, %rsi + adcq %r12, %rbx + adcq %rbp, %rdi + adcq -96(%rsp), %rax ## 8-byte Folded Reload + adcq -88(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %rcx + movq %r10, %rbp + subq -64(%rsp), %rbp ## 8-byte Folded Reload + movq %rsi, %rdx + sbbq -56(%rsp), %rdx ## 8-byte Folded Reload + movq %rbx, %r8 + sbbq -48(%rsp), %r8 ## 8-byte Folded Reload + movq %rdi, %r9 + sbbq -72(%rsp), %r9 ## 8-byte Folded Reload + movq %rax, %r14 + sbbq -80(%rsp), %r14 ## 8-byte Folded Reload + movq %r11, %r15 + sbbq -40(%rsp), %r15 ## 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rdi, %r9 + testb %cl, %cl + cmovneq %r10, %rbp + movq 24(%rsp), %rcx ## 8-byte Reload + movq %rbp, (%rcx) + cmovneq %rsi, %rdx + movq %rdx, 8(%rcx) + cmovneq %rbx, %r8 + movq %r8, 16(%rcx) + movq %r9, 24(%rcx) + cmovneq %rax, %r14 + movq %r14, 32(%rcx) + cmovneq %r11, %r15 + movq %r15, 40(%rcx) + addq $32, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF6Lbmi2: ## @mcl_fp_montNF6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -128(%rsp) ## 8-byte Spill + movq (%rdx), %rbp + movq %rdi, %rdx + mulxq %rbp, %rdi, %rbx + movq %rax, %rdx + mulxq %rbp, %r9, %r14 + movq 16(%rsi), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + addq %rdi, %r14 + mulxq %rbp, %rdi, %r8 + adcq %rbx, %rdi + movq 24(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + mulxq %rbp, %rbx, %r10 + adcq %r8, %rbx + movq 32(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rbp, %r8, %r11 + adcq %r10, %r8 + movq 40(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rbp, %rsi, %r15 + adcq %r11, %rsi + adcq $0, %r15 + movq -8(%rcx), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq %r9, %rdx + imulq %rax, %rdx + movq (%rcx), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulxq %rax, %rbp, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + addq %r9, %rbp + movq 8(%rcx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulxq %rax, %r12, %r9 + adcq %r14, %r12 + movq 16(%rcx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r14, %rax + adcq %rdi, %r14 + movq 24(%rcx), %rdi + movq %rdi, -32(%rsp) ## 8-byte Spill + mulxq %rdi, %r13, %rdi + adcq %rbx, %r13 + movq 32(%rcx), %rbp + movq %rbp, -40(%rsp) ## 8-byte Spill + mulxq %rbp, %r11, %rbx + adcq %r8, %r11 + movq 40(%rcx), %rcx + movq %rcx, -48(%rsp) ## 8-byte Spill + mulxq %rcx, %r10, %rcx + adcq %rsi, %r10 + adcq $0, %r15 + addq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq %r9, %r14 + adcq %rax, %r13 + adcq %rdi, %r11 + adcq %rbx, %r10 + adcq %rcx, %r15 + movq -120(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + mulxq -128(%rsp), %rcx, %rsi ## 8-byte Folded Reload + mulxq -112(%rsp), %rbx, %rax ## 8-byte Folded Reload + addq %rcx, %rax + mulxq -56(%rsp), %rcx, %rdi ## 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -72(%rsp), %rdi, %rbp ## 8-byte Folded Reload + movq %rbp, -88(%rsp) ## 8-byte Spill + adcq %r8, %rdi + mulxq -80(%rsp), %r8, %r9 ## 8-byte Folded Reload + adcq -88(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, %r9 + addq %r12, %rbx + adcq %r14, %rax + adcq %r13, %rcx + adcq %r11, %rsi + adcq %r10, %rdi + adcq %r15, %r8 + adcq $0, %r9 + movq %rbx, %rdx + imulq -104(%rsp), %rdx ## 8-byte Folded Reload + mulxq -96(%rsp), %rbp, %r13 ## 8-byte Folded Reload + addq %rbx, %rbp + mulxq -16(%rsp), %r11, %rbx ## 8-byte Folded Reload + adcq %rax, %r11 + mulxq -24(%rsp), %r14, %rax ## 8-byte Folded Reload + adcq %rcx, %r14 + mulxq -32(%rsp), %r10, %rcx ## 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload + adcq %rdi, %r15 + mulxq -48(%rsp), %r12, %rdx ## 8-byte Folded Reload + adcq %r8, %r12 + adcq $0, %r9 + addq %r13, %r11 + adcq %rbx, %r14 + adcq %rax, %r10 + adcq %rcx, %r15 + adcq %rsi, %r12 + adcq %rdx, %r9 + movq -120(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + mulxq -128(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r13, %rdi ## 8-byte Folded Reload + addq %rcx, %rdi + mulxq -56(%rsp), %rbx, %rcx ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -64(%rsp), %rsi, %rbp ## 8-byte Folded Reload + adcq %rcx, %rsi + mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload + movq %rcx, -88(%rsp) ## 8-byte Spill + adcq %rbp, %rax + mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload + adcq -88(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, %rcx + addq %r11, %r13 + adcq %r14, %rdi + adcq %r10, %rbx + adcq %r15, %rsi + adcq %r12, %rax + adcq %r9, %r8 + adcq $0, %rcx + movq %r13, %rdx + imulq -104(%rsp), %rdx ## 8-byte Folded Reload + mulxq -96(%rsp), %rbp, %r12 ## 8-byte Folded Reload + addq %r13, %rbp + mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload + adcq %rdi, %r11 + mulxq -24(%rsp), %r9, %rdi ## 8-byte Folded Reload + adcq %rbx, %r9 + mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload + adcq %rax, %r14 + mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload + adcq %r8, %r15 + adcq $0, %rcx + addq %r12, %r11 + adcq %rbp, %r9 + adcq %rdi, %r10 + adcq %rbx, %r14 + adcq %rsi, %r15 + adcq %rax, %rcx + movq -120(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + mulxq -128(%rsp), %rsi, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r13, %rbx ## 8-byte Folded Reload + addq %rsi, %rbx + mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload + adcq %rax, %rdi + mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload + adcq %rbp, %rsi + mulxq -72(%rsp), %rax, %rbp ## 8-byte Folded Reload + adcq %r8, %rax + mulxq -80(%rsp), %r8, %r12 ## 8-byte Folded Reload + adcq %rbp, %r8 + adcq $0, %r12 + addq %r11, %r13 + adcq %r9, %rbx + adcq %r10, %rdi + adcq %r14, %rsi + adcq %r15, %rax + adcq %rcx, %r8 + adcq $0, %r12 + movq %r13, %rdx + imulq -104(%rsp), %rdx ## 8-byte Folded Reload + mulxq -96(%rsp), %rbp, %rcx ## 8-byte Folded Reload + addq %r13, %rbp + mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload + adcq %rbx, %r11 + mulxq -24(%rsp), %r9, %rbx ## 8-byte Folded Reload + adcq %rdi, %r9 + mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload + adcq %rsi, %r10 + mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload + adcq %rax, %r14 + mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload + adcq %r8, %r15 + adcq $0, %r12 + addq %rcx, %r11 + adcq %rbp, %r9 + adcq %rbx, %r10 + adcq %rdi, %r14 + adcq %rsi, %r15 + adcq %rax, %r12 + movq -120(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + mulxq -128(%rsp), %rsi, %rcx ## 8-byte Folded Reload + mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload + addq %rsi, %rax + mulxq -56(%rsp), %rbx, %rsi ## 8-byte Folded Reload + adcq %rcx, %rbx + mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload + adcq %rsi, %rdi + mulxq -72(%rsp), %rsi, %rbp ## 8-byte Folded Reload + adcq %rcx, %rsi + mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload + adcq %rbp, %r8 + adcq $0, %rcx + addq %r11, %r13 + adcq %r9, %rax + adcq %r10, %rbx + adcq %r14, %rdi + adcq %r15, %rsi + adcq %r12, %r8 + adcq $0, %rcx + movq %r13, %rdx + imulq -104(%rsp), %rdx ## 8-byte Folded Reload + mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload + addq %r13, %rbp + mulxq -16(%rsp), %r13, %rbp ## 8-byte Folded Reload + adcq %rax, %r13 + mulxq -24(%rsp), %r11, %rax ## 8-byte Folded Reload + adcq %rbx, %r11 + mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload + adcq %rdi, %r10 + mulxq -40(%rsp), %r14, %rdi ## 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -48(%rsp), %rsi, %rdx ## 8-byte Folded Reload + adcq %r8, %rsi + adcq $0, %rcx + addq %r9, %r13 + adcq %rbp, %r11 + adcq %rax, %r10 + adcq %rbx, %r14 + adcq %rdi, %rsi + adcq %rdx, %rcx + movq -120(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + mulxq -128(%rsp), %rdi, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r8, %rbx ## 8-byte Folded Reload + addq %rdi, %rbx + mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload + adcq %rax, %rdi + mulxq -64(%rsp), %r15, %rax ## 8-byte Folded Reload + adcq %rbp, %r15 + mulxq -72(%rsp), %r12, %rbp ## 8-byte Folded Reload + adcq %rax, %r12 + mulxq -80(%rsp), %r9, %rax ## 8-byte Folded Reload + adcq %rbp, %r9 + adcq $0, %rax + addq %r13, %r8 + adcq %r11, %rbx + adcq %r10, %rdi + adcq %r14, %r15 + adcq %rsi, %r12 + adcq %rcx, %r9 + adcq $0, %rax + movq -104(%rsp), %rdx ## 8-byte Reload + imulq %r8, %rdx + mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + addq %r8, %rcx + movq -16(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %r8, %rcx + movq %rcx, -112(%rsp) ## 8-byte Spill + adcq %rbx, %r8 + movq -24(%rsp), %r10 ## 8-byte Reload + mulxq %r10, %rsi, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + adcq %rdi, %rsi + movq -32(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %rdi, %rcx + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq %r15, %rdi + movq -40(%rsp), %rcx ## 8-byte Reload + mulxq %rcx, %r15, %rbx + adcq %r12, %r15 + movq -48(%rsp), %r14 ## 8-byte Reload + mulxq %r14, %r12, %rbp + adcq %r9, %r12 + adcq $0, %rax + addq -104(%rsp), %r8 ## 8-byte Folded Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -120(%rsp), %rdi ## 8-byte Folded Reload + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq %rbx, %r12 + adcq %rbp, %rax + movq %r8, %rbp + subq -96(%rsp), %rbp ## 8-byte Folded Reload + movq %rsi, %rbx + sbbq %r11, %rbx + movq %rdi, %r11 + sbbq %r10, %r11 + movq %r15, %r10 + sbbq %r13, %r10 + movq %r12, %r9 + sbbq %rcx, %r9 + movq %rax, %rcx + sbbq %r14, %rcx + movq %rcx, %rdx + sarq $63, %rdx + cmovsq %r8, %rbp + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rbp, (%rdx) + cmovsq %rsi, %rbx + movq %rbx, 8(%rdx) + cmovsq %rdi, %r11 + movq %r11, 16(%rdx) + cmovsq %r15, %r10 + movq %r10, 24(%rdx) + cmovsq %r12, %r9 + movq %r9, 32(%rdx) + cmovsq %rax, %rcx + movq %rcx, 40(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed6Lbmi2: ## @mcl_fp_montRed6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + pushq %rax + movq %rdx, %rcx + movq %rdi, (%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq (%rsi), %r9 + movq %r9, %rdx + imulq %rax, %rdx + movq 40(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %rax, %r12, %r13 + movq 32(%rcx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulxq %rax, %r10, %r8 + movq 24(%rcx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + mulxq %rax, %r14, %r15 + movq 16(%rcx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulxq %rax, %rbp, %r11 + movq (%rcx), %rdi + movq %rdi, -40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %rbx + mulxq %rdi, %rdx, %rcx + addq %rax, %rcx + adcq %rbp, %rbx + adcq %r14, %r11 + adcq %r10, %r15 + adcq %r12, %r8 + adcq $0, %r13 + addq %r9, %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %rbx + adcq 24(%rsi), %r11 + adcq 32(%rsi), %r15 + adcq 40(%rsi), %r8 + movq %r8, -112(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r13 + movq %r13, -104(%rsp) ## 8-byte Spill + movq 88(%rsi), %r8 + movq 80(%rsi), %rdx + movq 72(%rsi), %rdi + movq 64(%rsi), %rax + movq 56(%rsi), %r14 + adcq $0, %r14 + adcq $0, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -96(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, -24(%rsp) ## 8-byte Spill + sbbq %r12, %r12 + andl $1, %r12d + movq %rcx, %rdx + imulq -8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload + movq %rsi, -120(%rsp) ## 8-byte Spill + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq -16(%rsp), %rax, %r13 ## 8-byte Folded Reload + movq %rax, -56(%rsp) ## 8-byte Spill + mulxq -48(%rsp), %rbp, %r10 ## 8-byte Folded Reload + mulxq -32(%rsp), %r9, %r8 ## 8-byte Folded Reload + mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload + mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %rsi, %rax + adcq %r9, %rdi + adcq %rbp, %r8 + adcq -56(%rsp), %r10 ## 8-byte Folded Reload + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + movq -128(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rcx, %rdx + adcq %rbx, %rax + adcq %r11, %rdi + adcq %r15, %r8 + adcq -112(%rsp), %r10 ## 8-byte Folded Reload + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq %r14, %rsi + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, -24(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + imulq -8(%rsp), %rdx ## 8-byte Folded Reload + mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + movq %rcx, -104(%rsp) ## 8-byte Spill + movq -16(%rsp), %rbx ## 8-byte Reload + mulxq %rbx, %rcx, %r14 + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq -48(%rsp), %rcx, %r15 ## 8-byte Folded Reload + movq %rcx, -56(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r11, %rbp ## 8-byte Folded Reload + mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload + mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload + addq %rsi, %rcx + adcq %r11, %r9 + adcq -56(%rsp), %rbp ## 8-byte Folded Reload + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + movq -104(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rax, %rdx + adcq %rdi, %rcx + adcq %r8, %r9 + adcq %r10, %rbp + adcq %r13, %r15 + adcq -128(%rsp), %r14 ## 8-byte Folded Reload + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, -24(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rcx, %rdx + movq -8(%rsp), %r13 ## 8-byte Reload + imulq %r13, %rdx + mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq %rbx, %rsi, %rax + movq %rsi, -120(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq -48(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %rax, %rbx + movq %rax, -56(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r10, %r8 ## 8-byte Folded Reload + mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload + mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %rsi, %rax + adcq %r10, %rdi + adcq -56(%rsp), %r8 ## 8-byte Folded Reload + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + movq -88(%rsp), %r10 ## 8-byte Reload + adcq -112(%rsp), %r10 ## 8-byte Folded Reload + movq -128(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rcx, %rdx + adcq %r9, %rax + adcq %rbp, %rdi + adcq %r15, %r8 + adcq %r14, %rbx + adcq -104(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, -88(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, -24(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + imulq %r13, %rdx + mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + movq %rcx, -96(%rsp) ## 8-byte Spill + mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + mulxq %r11, %rsi, %r13 + movq %rsi, -120(%rsp) ## 8-byte Spill + movq -32(%rsp), %r10 ## 8-byte Reload + mulxq %r10, %r15, %r14 + mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload + movq -40(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %rdx, %rbp + addq %rsi, %rbp + adcq %r15, %r9 + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + movq -96(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rax, %rdx + adcq %rdi, %rbp + adcq %r8, %r9 + adcq %rbx, %r14 + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -96(%rsp) ## 8-byte Spill + adcq $0, -24(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq -8(%rsp), %rdx ## 8-byte Reload + imulq %rbp, %rdx + mulxq -72(%rsp), %rax, %rsi ## 8-byte Folded Reload + movq %rax, -80(%rsp) ## 8-byte Spill + mulxq %r10, %rax, %r15 + mulxq %r11, %r10, %rdi + mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload + addq %rdi, %rbx + adcq %rax, %r8 + mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload + adcq %r15, %rax + movq -16(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %rdx, %r11 + adcq %rdi, %rdx + adcq -80(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %rsi + addq %rbp, %r10 + adcq %r9, %rbx + adcq %r14, %r8 + adcq %r13, %rax + adcq -128(%rsp), %rdx ## 8-byte Folded Reload + adcq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq -24(%rsp), %rsi ## 8-byte Folded Reload + adcq $0, %r12 + movq %rbx, %rcx + subq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %r8, %rdi + sbbq -64(%rsp), %rdi ## 8-byte Folded Reload + movq %rax, %rbp + sbbq -32(%rsp), %rbp ## 8-byte Folded Reload + movq %rdx, %r9 + sbbq -48(%rsp), %r9 ## 8-byte Folded Reload + movq %r11, %r10 + sbbq %r15, %r10 + movq %rsi, %r15 + sbbq -72(%rsp), %r15 ## 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %rsi, %r15 + testb %r12b, %r12b + cmovneq %rbx, %rcx + movq (%rsp), %rsi ## 8-byte Reload + movq %rcx, (%rsi) + cmovneq %r8, %rdi + movq %rdi, 8(%rsi) + cmovneq %rax, %rbp + movq %rbp, 16(%rsi) + cmovneq %rdx, %r9 + movq %r9, 24(%rsi) + cmovneq %r11, %r10 + movq %r10, 32(%rsi) + movq %r15, 40(%rsi) + addq $8, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre6Lbmi2: ## @mcl_fp_addPre6Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r11 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 24(%rsi), %rax + movq 32(%rsi), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r10, %rax + movq %rax, 24(%rdi) + adcq %r9, %r14 + movq %r14, 32(%rdi) + adcq %r8, %r11 + movq %r11, 40(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_subPre6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre6Lbmi2: ## @mcl_fp_subPre6Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r14 + movq 32(%rdx), %r15 + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r14, %r11 + movq %r11, 24(%rdi) + sbbq %r15, %r10 + movq %r10, 32(%rdi) + sbbq %r8, %r9 + movq %r9, 40(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_shr1_6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_6Lbmi2: ## @mcl_fp_shr1_6Lbmi2 +## BB#0: + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %r9, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 32(%rdi) + shrq %r8 + movq %r8, 40(%rdi) + retq + + .globl _mcl_fp_add6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add6Lbmi2: ## @mcl_fp_add6Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rdx), %r15 + movq 24(%rdx), %rbx + movq 24(%rsi), %r10 + movq 32(%rsi), %r9 + movq 16(%rdx), %r11 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + adcq %rbx, %r10 + movq %r10, 24(%rdi) + adcq %r15, %r9 + movq %r9, 32(%rdi) + adcq %r14, %r8 + movq %r8, 40(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r11 + sbbq 24(%rcx), %r10 + sbbq 32(%rcx), %r9 + sbbq 40(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB89_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + movq %r10, 24(%rdi) + movq %r9, 32(%rdi) + movq %r8, 40(%rdi) +LBB89_2: ## %carry + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF6Lbmi2: ## @mcl_fp_addNF6Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 16(%rdx), %r11 + movq (%rdx), %r15 + movq 8(%rdx), %r14 + addq (%rsi), %r15 + adcq 8(%rsi), %r14 + adcq 16(%rsi), %r11 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r9 + adcq 40(%rsi), %r8 + movq %r15, %rsi + subq (%rcx), %rsi + movq %r14, %rbx + sbbq 8(%rcx), %rbx + movq %r11, %rdx + sbbq 16(%rcx), %rdx + movq %r10, %r13 + sbbq 24(%rcx), %r13 + movq %r9, %r12 + sbbq 32(%rcx), %r12 + movq %r8, %rax + sbbq 40(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r15, %rsi + movq %rsi, (%rdi) + cmovsq %r14, %rbx + movq %rbx, 8(%rdi) + cmovsq %r11, %rdx + movq %rdx, 16(%rdi) + cmovsq %r10, %r13 + movq %r13, 24(%rdi) + cmovsq %r9, %r12 + movq %r12, 32(%rdi) + cmovsq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_sub6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub6Lbmi2: ## @mcl_fp_sub6Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r11 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r11, 16(%rdi) + sbbq %r15, %r10 + movq %r10, 24(%rdi) + sbbq %r12, %r9 + movq %r9, 32(%rdi) + sbbq %r14, %r8 + movq %r8, 40(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB91_2 +## BB#1: ## %carry + movq 40(%rcx), %r14 + movq 32(%rcx), %r15 + movq 24(%rcx), %r12 + movq 8(%rcx), %rbx + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rdx + movq %rdx, 16(%rdi) + adcq %r10, %r12 + movq %r12, 24(%rdi) + adcq %r9, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r14 + movq %r14, 40(%rdi) +LBB91_2: ## %nocarry + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF6Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF6Lbmi2: ## @mcl_fp_subNF6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %r10 + movdqu (%rsi), %xmm3 + movdqu 16(%rsi), %xmm4 + movdqu 32(%rsi), %xmm5 + pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] + movd %xmm6, %rax + movd %xmm2, %r11 + movd %xmm5, %r8 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r14 + pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] + movd %xmm2, %r9 + movd %xmm1, %r15 + movd %xmm4, %r12 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %r13 + pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] + movd %xmm1, %rbp + movd %xmm0, %rdx + movd %xmm3, %rbx + subq %rdx, %rbx + sbbq %r13, %rbp + sbbq %r15, %r12 + sbbq %r14, %r9 + sbbq %r11, %r8 + sbbq %r10, %rax + movq %rax, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %rax, %rsi + andq (%rcx), %rsi + movq 40(%rcx), %r10 + andq %rdx, %r10 + movq 32(%rcx), %r11 + andq %rdx, %r11 + movq 24(%rcx), %r14 + andq %rdx, %r14 + rorxq $63, %rdx, %r15 + andq 16(%rcx), %rdx + andq 8(%rcx), %r15 + addq %rbx, %rsi + movq %rsi, (%rdi) + adcq %rbp, %r15 + movq %r15, 8(%rdi) + adcq %r12, %rdx + movq %rdx, 16(%rdi) + adcq %r9, %r14 + movq %r14, 24(%rdi) + adcq %r8, %r11 + movq %r11, 32(%rdi) + adcq %rax, %r10 + movq %r10, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add6Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add6Lbmi2: ## @mcl_fpDbl_add6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 80(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 72(%rdx), %r14 + movq 64(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rdx + movq %rbx, (%rdi) + movq 88(%rsi), %r8 + movq %rax, 8(%rdi) + movq 80(%rsi), %r10 + movq %r12, 16(%rdi) + movq 72(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 40(%rsi), %rax + adcq %rdx, %rax + movq 64(%rsi), %rdx + movq %r13, 32(%rdi) + movq 56(%rsi), %r13 + movq 48(%rsi), %rbp + adcq %r9, %rbp + movq %rax, 40(%rdi) + adcq %r11, %r13 + adcq %r15, %rdx + adcq %r14, %r12 + adcq -16(%rsp), %r10 ## 8-byte Folded Reload + adcq -8(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rbp, %rsi + subq (%rcx), %rsi + movq %r13, %rbx + sbbq 8(%rcx), %rbx + movq %rdx, %r9 + sbbq 16(%rcx), %r9 + movq %r12, %r11 + sbbq 24(%rcx), %r11 + movq %r10, %r14 + sbbq 32(%rcx), %r14 + movq %r8, %r15 + sbbq 40(%rcx), %r15 + sbbq $0, %rax + andl $1, %eax + cmovneq %rbp, %rsi + movq %rsi, 48(%rdi) + testb %al, %al + cmovneq %r13, %rbx + movq %rbx, 56(%rdi) + cmovneq %rdx, %r9 + movq %r9, 64(%rdi) + cmovneq %r12, %r11 + movq %r11, 72(%rdi) + cmovneq %r10, %r14 + movq %r14, 80(%rdi) + cmovneq %r8, %r15 + movq %r15, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub6Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub6Lbmi2: ## @mcl_fpDbl_sub6Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %r9 + movq 80(%rdx), %r10 + movq 72(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 64(%rdx), %r13 + movq %r15, (%rdi) + movq 56(%rdx), %rbp + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 88(%rsi), %r8 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 80(%rsi), %r11 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + sbbq %r15, %rdx + movq 72(%rsi), %r15 + movq %rbx, 40(%rdi) + movq 64(%rsi), %r12 + movq 56(%rsi), %rsi + sbbq %rbp, %rsi + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%rcx), %r14 + cmoveq %rax, %r14 + testb %bpl, %bpl + movq 16(%rcx), %r9 + cmoveq %rax, %r9 + movq 8(%rcx), %rbp + cmoveq %rax, %rbp + movq 40(%rcx), %r10 + cmoveq %rax, %r10 + movq 32(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 24(%rcx), %rax + addq %rdx, %r14 + movq %r14, 48(%rdi) + adcq %rsi, %rbp + movq %rbp, 56(%rdi) + adcq %r12, %r9 + movq %r9, 64(%rdi) + adcq %r15, %rax + movq %rax, 72(%rdi) + adcq %r11, %rbx + movq %rbx, 80(%rdi) + adcq %r8, %r10 + movq %r10, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre7Lbmi2: ## @mcl_fp_mulUnitPre7Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + mulxq 48(%rsi), %r8, %r11 + mulxq 40(%rsi), %r9, %r13 + mulxq 32(%rsi), %r10, %rcx + mulxq 8(%rsi), %r12, %r14 + mulxq (%rsi), %r15, %rbx + addq %r12, %rbx + mulxq 24(%rsi), %r12, %rax + mulxq 16(%rsi), %rdx, %rsi + movq %r15, (%rdi) + movq %rbx, 8(%rdi) + adcq %r14, %rdx + movq %rdx, 16(%rdi) + adcq %r12, %rsi + movq %rsi, 24(%rdi) + adcq %r10, %rax + movq %rax, 32(%rdi) + adcq %r9, %rcx + movq %rcx, 40(%rdi) + adcq %r8, %r13 + movq %r13, 48(%rdi) + adcq $0, %r11 + movq %r11, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_mulPre7Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre7Lbmi2: ## @mcl_fpDbl_mulPre7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r14 + movq %rsi, %r8 + movq %rdi, %r13 + movq %r13, -48(%rsp) ## 8-byte Spill + movq (%r8), %rcx + movq %rcx, -72(%rsp) ## 8-byte Spill + movq 8(%r8), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq (%r14), %rsi + movq %r14, -64(%rsp) ## 8-byte Spill + movq %rax, %rdx + mulxq %rsi, %rbp, %rax + movq %rcx, %rdx + mulxq %rsi, %rdx, %rcx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 24(%r8), %rdi + movq %rdi, -88(%rsp) ## 8-byte Spill + movq 16(%r8), %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + addq %rbp, %rcx + mulxq %rsi, %rbx, %rbp + adcq %rax, %rbx + movq %rdi, %rdx + mulxq %rsi, %r12, %rax + adcq %rbp, %r12 + movq 32(%r8), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rsi, %r9, %rbp + adcq %rax, %r9 + movq 40(%r8), %rdi + movq %rdi, %rdx + mulxq %rsi, %r10, %rax + adcq %rbp, %r10 + movq 48(%r8), %r15 + movq %r15, %rdx + mulxq %rsi, %rsi, %r11 + adcq %rax, %rsi + movq -56(%rsp), %rax ## 8-byte Reload + movq %rax, (%r13) + adcq $0, %r11 + movq 8(%r14), %r13 + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %r13, %r14, %rax + movq %rax, -72(%rsp) ## 8-byte Spill + addq %rcx, %r14 + movq -104(%rsp), %rdx ## 8-byte Reload + mulxq %r13, %rcx, %rax + movq %rax, -104(%rsp) ## 8-byte Spill + adcq %rbx, %rcx + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %r13, %rbx, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + adcq %r12, %rbx + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %r13, %rbp, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + adcq %r9, %rbp + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %r13, %rax, %r9 + adcq %r10, %rax + movq %rdi, %rdx + mulxq %r13, %r10, %rdi + adcq %rsi, %r10 + movq %r15, %rdx + mulxq %r13, %r13, %rdx + adcq %r11, %r13 + sbbq %r12, %r12 + andl $1, %r12d + addq -72(%rsp), %rcx ## 8-byte Folded Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + adcq -88(%rsp), %rax ## 8-byte Folded Reload + adcq %r9, %r10 + movq -48(%rsp), %rsi ## 8-byte Reload + movq %r14, 8(%rsi) + adcq %rdi, %r13 + adcq %rdx, %r12 + movq (%r8), %rsi + movq %rsi, -88(%rsp) ## 8-byte Spill + movq 8(%r8), %r11 + movq %r11, -104(%rsp) ## 8-byte Spill + movq -64(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdi + movq %rsi, %rdx + mulxq %rdi, %r9, %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + addq %rcx, %r9 + movq %r11, %rdx + mulxq %rdi, %r14, %rcx + movq %rcx, -16(%rsp) ## 8-byte Spill + adcq %rbx, %r14 + movq 16(%r8), %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + mulxq %rdi, %rsi, %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + adcq %rbp, %rsi + movq 24(%r8), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rdi, %rbp, %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + adcq %rax, %rbp + movq 32(%r8), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rdi, %r11, %rax + movq %rax, -40(%rsp) ## 8-byte Spill + adcq %r10, %r11 + movq 40(%r8), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + mulxq %rdi, %r15, %rax + adcq %r13, %r15 + movq 48(%r8), %r13 + movq %r13, %rdx + mulxq %rdi, %rcx, %rdx + adcq %r12, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq -8(%rsp), %r14 ## 8-byte Folded Reload + adcq -16(%rsp), %rsi ## 8-byte Folded Reload + adcq -24(%rsp), %rbp ## 8-byte Folded Reload + adcq -32(%rsp), %r11 ## 8-byte Folded Reload + adcq -40(%rsp), %r15 ## 8-byte Folded Reload + adcq %rax, %rcx + adcq %rdx, %rbx + movq -48(%rsp), %rax ## 8-byte Reload + movq %r9, 16(%rax) + movq -64(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdi + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r9, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + addq %r14, %r9 + movq -104(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rax, %rdx + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq %rsi, %rax + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r14, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq %rbp, %r14 + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r10, %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + adcq %r11, %r10 + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbp, %rsi + adcq %r15, %rbp + movq -56(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r11, %r15 + adcq %rcx, %r11 + movq %r13, %rdx + mulxq %rdi, %r13, %rcx + adcq %rbx, %r13 + sbbq %r12, %r12 + andl $1, %r12d + addq -88(%rsp), %rax ## 8-byte Folded Reload + adcq -104(%rsp), %r14 ## 8-byte Folded Reload + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq -80(%rsp), %rbp ## 8-byte Folded Reload + adcq %rsi, %r11 + movq -48(%rsp), %rdi ## 8-byte Reload + movq %r9, 24(%rdi) + adcq %r15, %r13 + adcq %rcx, %r12 + movq (%r8), %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 8(%r8), %rbx + movq %rbx, -104(%rsp) ## 8-byte Spill + movq -64(%rsp), %rcx ## 8-byte Reload + movq 32(%rcx), %rcx + mulxq %rcx, %rsi, %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + addq %rax, %rsi + movq %rbx, %rdx + mulxq %rcx, %r9, %rax + movq %rax, -24(%rsp) ## 8-byte Spill + adcq %r14, %r9 + movq 16(%r8), %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + mulxq %rcx, %rax, %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + adcq %r10, %rax + movq 24(%r8), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rcx, %r15, %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + adcq %rbp, %r15 + movq 32(%r8), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rcx, %r10, %rbp + adcq %r11, %r10 + movq 40(%r8), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + mulxq %rcx, %r11, %rbx + adcq %r13, %r11 + movq 48(%r8), %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + mulxq %rcx, %r14, %rcx + adcq %r12, %r14 + sbbq %r12, %r12 + andl $1, %r12d + addq -16(%rsp), %r9 ## 8-byte Folded Reload + adcq -24(%rsp), %rax ## 8-byte Folded Reload + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq -40(%rsp), %r10 ## 8-byte Folded Reload + adcq %rbp, %r11 + adcq %rbx, %r14 + adcq %rcx, %r12 + movq %rsi, 32(%rdi) + movq -64(%rsp), %rsi ## 8-byte Reload + movq 40(%rsi), %rdi + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r13, %rcx + movq %rcx, -88(%rsp) ## 8-byte Spill + addq %r9, %r13 + movq -104(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rcx, %rdx + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq %rax, %rcx + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rax, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq %r15, %rax + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbx, %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + adcq %r10, %rbx + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %rbp, %r15 + adcq %r11, %rbp + movq -56(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r9, %r11 + adcq %r14, %r9 + movq -8(%rsp), %rdx ## 8-byte Reload + mulxq %rdi, %r10, %rdx + adcq %r12, %r10 + sbbq %rdi, %rdi + andl $1, %edi + addq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -104(%rsp), %rax ## 8-byte Folded Reload + adcq -96(%rsp), %rbx ## 8-byte Folded Reload + adcq -80(%rsp), %rbp ## 8-byte Folded Reload + adcq %r15, %r9 + movq -48(%rsp), %r14 ## 8-byte Reload + movq %r13, 40(%r14) + adcq %r11, %r10 + adcq %rdx, %rdi + movq 48(%rsi), %rdx + mulxq (%r8), %r11, %rsi + movq %rsi, -64(%rsp) ## 8-byte Spill + addq %rcx, %r11 + mulxq 8(%r8), %rsi, %r15 + adcq %rax, %rsi + mulxq 16(%r8), %rcx, %rax + movq %rax, -104(%rsp) ## 8-byte Spill + adcq %rbx, %rcx + mulxq 24(%r8), %rbx, %r12 + adcq %rbp, %rbx + mulxq 32(%r8), %rbp, %r13 + adcq %r9, %rbp + mulxq 40(%r8), %rax, %r9 + adcq %r10, %rax + mulxq 48(%r8), %rdx, %r8 + adcq %rdi, %rdx + sbbq %r10, %r10 + andl $1, %r10d + addq -64(%rsp), %rsi ## 8-byte Folded Reload + adcq %r15, %rcx + movq %r11, 48(%r14) + movq %rsi, 56(%r14) + movq %rcx, 64(%r14) + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq %r12, %rbp + movq %rbp, 80(%r14) + adcq %r13, %rax + movq %rax, 88(%r14) + adcq %r9, %rdx + movq %rdx, 96(%r14) + adcq %r8, %r10 + movq %r10, 104(%r14) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre7Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre7Lbmi2: ## @mcl_fpDbl_sqrPre7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -40(%rsp) ## 8-byte Spill + movq 16(%rsi), %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq (%rsi), %rcx + movq 8(%rsi), %rax + mulxq %rcx, %r8, %r10 + movq 24(%rsi), %rbx + movq %rbx, -96(%rsp) ## 8-byte Spill + movq %rax, %rdx + mulxq %rcx, %r12, %rbp + movq %rbp, -48(%rsp) ## 8-byte Spill + movq %rcx, %rdx + mulxq %rcx, %rdx, %rdi + movq %rdx, -80(%rsp) ## 8-byte Spill + addq %r12, %rdi + adcq %rbp, %r8 + movq %rbx, %rdx + mulxq %rcx, %rbp, %r9 + adcq %r10, %rbp + movq 32(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rcx, %r11, %r14 + adcq %r9, %r11 + movq 40(%rsi), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + mulxq %rcx, %r10, %r15 + adcq %r14, %r10 + movq 48(%rsi), %r14 + movq %r14, %rdx + mulxq %rcx, %rcx, %r13 + adcq %r15, %rcx + movq -40(%rsp), %rdx ## 8-byte Reload + movq -80(%rsp), %rbx ## 8-byte Reload + movq %rbx, (%rdx) + adcq $0, %r13 + addq %r12, %rdi + movq %rax, %rdx + mulxq %rax, %r12, %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + adcq %r8, %r12 + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r8, %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + adcq %rbp, %r8 + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r9, %rbp + adcq %r11, %r9 + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r15, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq %r10, %r15 + movq -56(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r11, %rbx + adcq %rcx, %r11 + movq %r14, %rdx + mulxq %rax, %r14, %rax + adcq %r13, %r14 + sbbq %r13, %r13 + andl $1, %r13d + addq -48(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + adcq -88(%rsp), %r9 ## 8-byte Folded Reload + adcq %rbp, %r15 + movq -40(%rsp), %rcx ## 8-byte Reload + movq %rdi, 8(%rcx) + adcq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq %rbx, %r14 + adcq %rax, %r13 + movq (%rsi), %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 8(%rsi), %rcx + movq %rcx, -88(%rsp) ## 8-byte Spill + movq 16(%rsi), %rbx + mulxq %rbx, %rax, %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + addq %r12, %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rcx, %rdx + mulxq %rbx, %r10, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq %r8, %r10 + movq %rbx, %rdx + mulxq %rbx, %r12, %rax + movq %rax, -24(%rsp) ## 8-byte Spill + adcq %r9, %r12 + movq 24(%rsi), %rax + movq %rax, %rdx + mulxq %rbx, %r8, %rdi + movq %rdi, -56(%rsp) ## 8-byte Spill + adcq %r8, %r15 + movq 32(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rbx, %rcx, %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + adcq %r11, %rcx + movq 40(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rbx, %rbp, %r11 + adcq %r14, %rbp + movq 48(%rsi), %r14 + movq %r14, %rdx + mulxq %rbx, %r9, %rdx + adcq %r13, %r9 + sbbq %rbx, %rbx + andl $1, %ebx + addq -64(%rsp), %r10 ## 8-byte Folded Reload + adcq -16(%rsp), %r12 ## 8-byte Folded Reload + adcq -24(%rsp), %r15 ## 8-byte Folded Reload + adcq %rdi, %rcx + adcq -32(%rsp), %rbp ## 8-byte Folded Reload + adcq %r11, %r9 + adcq %rdx, %rbx + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rdi, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + addq %r10, %rdi + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r11, %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + adcq %r12, %r11 + adcq %r8, %r15 + movq %rax, %rdx + mulxq %rax, %r8, %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + adcq %rcx, %r8 + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r13, %rcx + movq %rcx, -72(%rsp) ## 8-byte Spill + adcq %rbp, %r13 + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r12, %rbp + adcq %r9, %r12 + movq %r14, %rdx + mulxq %rax, %rcx, %rax + adcq %rbx, %rcx + sbbq %r10, %r10 + andl $1, %r10d + addq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + adcq -56(%rsp), %r8 ## 8-byte Folded Reload + adcq -64(%rsp), %r13 ## 8-byte Folded Reload + movq -40(%rsp), %rdx ## 8-byte Reload + movq -48(%rsp), %rbx ## 8-byte Reload + movq %rbx, 16(%rdx) + movq %rdi, 24(%rdx) + adcq -72(%rsp), %r12 ## 8-byte Folded Reload + adcq %rbp, %rcx + adcq %rax, %r10 + movq (%rsi), %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -88(%rsp) ## 8-byte Spill + movq 32(%rsi), %rbx + mulxq %rbx, %rax, %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + addq %r11, %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rdi, %rdx + mulxq %rbx, %r9, %rax + movq %rax, -32(%rsp) ## 8-byte Spill + adcq %r15, %r9 + movq 16(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rbx, %r15, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq %r8, %r15 + movq 24(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rbx, %r8, %rbp + adcq %r13, %r8 + movq %rbx, %rdx + mulxq %rbx, %r13, %r14 + adcq %r12, %r13 + movq 40(%rsi), %rax + movq %rax, %rdx + mulxq %rbx, %rdx, %rdi + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rdi, -56(%rsp) ## 8-byte Spill + adcq %rdx, %rcx + movq 48(%rsi), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + mulxq %rbx, %r11, %rdx + adcq %r10, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -24(%rsp), %r9 ## 8-byte Folded Reload + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq -8(%rsp), %r8 ## 8-byte Folded Reload + adcq %rbp, %r13 + adcq %r14, %rcx + adcq %rdi, %r11 + adcq %rdx, %r12 + movq -96(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r14, %rdi + addq %r9, %r14 + movq -88(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rbx, %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + adcq %r15, %rbx + movq -72(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rbp, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq %r8, %rbp + movq -80(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %r10, %r15 + adcq %r13, %r10 + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + movq %rax, %rdx + mulxq %rax, %r9, %r13 + adcq %r11, %r9 + movq -64(%rsp), %rdx ## 8-byte Reload + mulxq %rax, %rax, %r11 + adcq %r12, %rax + sbbq %r8, %r8 + andl $1, %r8d + addq %rdi, %rbx + adcq -88(%rsp), %rbp ## 8-byte Folded Reload + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq %r15, %rcx + movq -40(%rsp), %rdi ## 8-byte Reload + movq -48(%rsp), %rdx ## 8-byte Reload + movq %rdx, 32(%rdi) + movq %r14, 40(%rdi) + adcq -56(%rsp), %r9 ## 8-byte Folded Reload + adcq %r13, %rax + adcq %r11, %r8 + movq 48(%rsi), %rdx + mulxq (%rsi), %r12, %r11 + addq %rbx, %r12 + mulxq 8(%rsi), %rbx, %r14 + adcq %rbp, %rbx + mulxq 16(%rsi), %rbp, %r15 + adcq %r10, %rbp + mulxq 24(%rsi), %rdi, %r10 + adcq %rcx, %rdi + mulxq 32(%rsi), %rcx, %r13 + adcq %r9, %rcx + mulxq 40(%rsi), %rsi, %r9 + adcq %rax, %rsi + mulxq %rdx, %rdx, %rax + adcq %r8, %rdx + sbbq %r8, %r8 + andl $1, %r8d + addq %r11, %rbx + adcq %r14, %rbp + movq -40(%rsp), %r11 ## 8-byte Reload + movq %r12, 48(%r11) + movq %rbx, 56(%r11) + movq %rbp, 64(%r11) + adcq %r15, %rdi + movq %rdi, 72(%r11) + adcq %r10, %rcx + movq %rcx, 80(%r11) + adcq %r13, %rsi + movq %rsi, 88(%r11) + adcq %r9, %rdx + movq %rdx, 96(%r11) + adcq %rax, %r8 + movq %r8, 104(%r11) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont7Lbmi2: ## @mcl_fp_mont7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $56, %rsp + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rdi, 48(%rsp) ## 8-byte Spill + movq 48(%rsi), %rdi + movq %rdi, -64(%rsp) ## 8-byte Spill + movq (%rdx), %rax + movq %rdi, %rdx + mulxq %rax, %rdx, %r13 + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 40(%rsi), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + mulxq %rax, %rdx, %r8 + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 32(%rsi), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + mulxq %rax, %r10, %rdi + movq 24(%rsi), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + mulxq %rax, %r14, %rbp + movq 16(%rsi), %rdx + movq %rdx, 32(%rsp) ## 8-byte Spill + mulxq %rax, %r12, %r15 + movq (%rsi), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %r11 + movq %rbx, %rdx + mulxq %rax, %rdx, %r9 + movq %rdx, -96(%rsp) ## 8-byte Spill + addq %rsi, %r9 + adcq %r12, %r11 + adcq %r14, %r15 + adcq %r10, %rbp + movq %rbp, -112(%rsp) ## 8-byte Spill + adcq -48(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -104(%rsp) ## 8-byte Spill + adcq -40(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -128(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, -120(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + imulq %rax, %rdx + movq 32(%rcx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulxq %rax, %rbx, %r13 + movq 16(%rcx), %rsi + movq %rsi, -48(%rsp) ## 8-byte Spill + mulxq %rsi, %r14, %rbp + movq 8(%rcx), %rsi + movq %rsi, 8(%rsp) ## 8-byte Spill + mulxq %rsi, %rsi, %rax + movq (%rcx), %rdi + movq %rdi, (%rsp) ## 8-byte Spill + mulxq %rdi, %r8, %r12 + addq %rsi, %r12 + adcq %r14, %rax + movq %rax, %rdi + movq 24(%rcx), %rsi + movq %rsi, -8(%rsp) ## 8-byte Spill + mulxq %rsi, %r10, %r14 + adcq %rbp, %r10 + adcq %rbx, %r14 + movq 40(%rcx), %rsi + movq %rsi, -16(%rsp) ## 8-byte Spill + mulxq %rsi, %rbp, %rsi + adcq %r13, %rbp + movq 48(%rcx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %rbx + adcq %rsi, %rax + adcq $0, %rbx + addq -96(%rsp), %r8 ## 8-byte Folded Reload + adcq %r9, %r12 + adcq %r11, %rdi + movq %rdi, -96(%rsp) ## 8-byte Spill + adcq %r15, %r10 + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq -104(%rsp), %rbp ## 8-byte Folded Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + movq -56(%rsp), %rcx ## 8-byte Reload + movq 8(%rcx), %rdx + mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload + movq %rdi, -104(%rsp) ## 8-byte Spill + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rdi, %rcx ## 8-byte Folded Reload + movq %rdi, -88(%rsp) ## 8-byte Spill + movq %rcx, -128(%rsp) ## 8-byte Spill + mulxq 16(%rsp), %r9, %r8 ## 8-byte Folded Reload + mulxq 24(%rsp), %rdi, %r11 ## 8-byte Folded Reload + movq %rdi, -112(%rsp) ## 8-byte Spill + addq %r9, %r11 + mulxq 32(%rsp), %rcx, %r9 ## 8-byte Folded Reload + adcq %r8, %rcx + movq %rcx, %rdi + mulxq -32(%rsp), %r13, %rcx ## 8-byte Folded Reload + adcq %r9, %r13 + mulxq -80(%rsp), %r8, %r15 ## 8-byte Folded Reload + adcq %rcx, %r8 + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq -120(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + movq -112(%rsp), %r9 ## 8-byte Reload + addq %r12, %r9 + movq %r9, -112(%rsp) ## 8-byte Spill + movq %r11, %r12 + adcq -96(%rsp), %r12 ## 8-byte Folded Reload + adcq %r10, %rdi + movq %rdi, -88(%rsp) ## 8-byte Spill + adcq %r14, %r13 + adcq %rbp, %r8 + adcq %rax, %r15 + adcq %rbx, %rdx + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq %rsi, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -96(%rsp) ## 8-byte Spill + movq %r9, %rdx + imulq 40(%rsp), %rdx ## 8-byte Folded Reload + mulxq -24(%rsp), %r10, %rax ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq 8(%rsp), %rdi, %rbx ## 8-byte Folded Reload + mulxq (%rsp), %r14, %r9 ## 8-byte Folded Reload + addq %rdi, %r9 + mulxq -48(%rsp), %rbp, %r11 ## 8-byte Folded Reload + adcq %rbx, %rbp + adcq %rcx, %r11 + mulxq -40(%rsp), %rbx, %rsi ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload + adcq %rsi, %rax + adcq %r10, %rcx + movq -104(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq %r12, %r9 + adcq -88(%rsp), %rbp ## 8-byte Folded Reload + adcq %r13, %r11 + adcq %r8, %rbx + adcq %r15, %rax + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rdx ## 8-byte Reload + movq 16(%rdx), %rdx + mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload + movq %rdi, -112(%rsp) ## 8-byte Spill + movq %rsi, -120(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rdi, %rsi ## 8-byte Folded Reload + movq %rdi, -88(%rsp) ## 8-byte Spill + movq %rsi, -128(%rsp) ## 8-byte Spill + mulxq 32(%rsp), %rdi, %r10 ## 8-byte Folded Reload + mulxq 16(%rsp), %rsi, %r13 ## 8-byte Folded Reload + mulxq 24(%rsp), %r8, %r15 ## 8-byte Folded Reload + addq %rsi, %r15 + adcq %rdi, %r13 + mulxq -32(%rsp), %r12, %rsi ## 8-byte Folded Reload + adcq %r10, %r12 + mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload + adcq %rsi, %r10 + adcq -88(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rsi ## 8-byte Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r9, %r8 + movq %r8, -112(%rsp) ## 8-byte Spill + adcq %rbp, %r15 + adcq %r11, %r13 + adcq %rbx, %r12 + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, %rbx + movq %r8, %rdx + imulq 40(%rsp), %rdx ## 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, -96(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload + mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload + addq %rbp, %r8 + mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r9 + mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload + adcq %rax, %rsi + mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload + adcq %rdi, %rax + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq %r15, %r8 + adcq %r13, %rbp + adcq %r12, %r9 + adcq %r10, %rsi + adcq %r14, %rax + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq $0, %rbx + movq %rbx, -128(%rsp) ## 8-byte Spill + movq -56(%rsp), %rdx ## 8-byte Reload + movq 24(%rdx), %rdx + mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload + movq %rbx, -96(%rsp) ## 8-byte Spill + movq %rdi, -120(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rdi, %r13 ## 8-byte Folded Reload + movq %rdi, -88(%rsp) ## 8-byte Spill + mulxq 32(%rsp), %r10, %r11 ## 8-byte Folded Reload + mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload + mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + addq %rdi, %r12 + adcq %r10, %r15 + mulxq -32(%rsp), %rbx, %rdi ## 8-byte Folded Reload + adcq %r11, %rbx + mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload + adcq %rdi, %r10 + adcq -88(%rsp), %r14 ## 8-byte Folded Reload + adcq -96(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + movq -112(%rsp), %rdi ## 8-byte Reload + addq %r8, %rdi + movq %rdi, -112(%rsp) ## 8-byte Spill + adcq %rbp, %r12 + adcq %r9, %r15 + adcq %rsi, %rbx + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -128(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rdx + imulq 40(%rsp), %rdx ## 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, -96(%rsp) ## 8-byte Spill + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload + mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload + addq %rbp, %r8 + mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r9 + mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload + adcq %rax, %rsi + mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload + adcq %rdi, %rax + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq %r12, %r8 + adcq %r15, %rbp + adcq %rbx, %r9 + adcq %r10, %rsi + adcq %r14, %rax + adcq %r13, %rcx + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, -104(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rdx ## 8-byte Reload + movq 32(%rdx), %rdx + mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + movq %rdi, -120(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rdi, %r11 ## 8-byte Folded Reload + movq %rdi, -96(%rsp) ## 8-byte Spill + mulxq 32(%rsp), %r10, %r13 ## 8-byte Folded Reload + mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload + mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload + addq %rdi, %r12 + adcq %r10, %r15 + mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload + adcq %r13, %r10 + mulxq -80(%rsp), %r13, %r14 ## 8-byte Folded Reload + adcq %rdi, %r13 + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r8, %rbx + movq %rbx, -96(%rsp) ## 8-byte Spill + adcq %rbp, %r12 + adcq %r9, %r15 + adcq %rsi, %r10 + adcq %rax, %r13 + adcq %rcx, %r14 + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, -88(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rbx, %rdx + imulq 40(%rsp), %rdx ## 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload + mulxq (%rsp), %r9, %r11 ## 8-byte Folded Reload + addq %rbp, %r11 + mulxq -48(%rsp), %rbp, %r8 ## 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rcx, %r8 + mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload + adcq %rax, %rsi + mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload + adcq %rdi, %rax + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -96(%rsp), %r9 ## 8-byte Folded Reload + adcq %r12, %r11 + adcq %r15, %rbp + adcq %r10, %r8 + adcq %r13, %rsi + adcq %r14, %rax + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq $0, -112(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rdx ## 8-byte Reload + movq 40(%rdx), %rdx + mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload + movq %rbx, -96(%rsp) ## 8-byte Spill + movq %rdi, -120(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rbx, %rdi ## 8-byte Folded Reload + movq %rbx, -88(%rsp) ## 8-byte Spill + movq %rdi, -128(%rsp) ## 8-byte Spill + mulxq 32(%rsp), %rbx, %r10 ## 8-byte Folded Reload + mulxq 16(%rsp), %rdi, %r13 ## 8-byte Folded Reload + mulxq 24(%rsp), %r9, %r12 ## 8-byte Folded Reload + addq %rdi, %r12 + adcq %rbx, %r13 + mulxq -32(%rsp), %r15, %rdi ## 8-byte Folded Reload + adcq %r10, %r15 + mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload + adcq %rdi, %r10 + adcq -88(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rdi ## 8-byte Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r11, %r9 + movq %r9, -96(%rsp) ## 8-byte Spill + adcq %rbp, %r12 + adcq %r8, %r13 + adcq %rsi, %r15 + adcq %rax, %r10 + adcq %rcx, %r14 + adcq -104(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r9, %rdx + imulq 40(%rsp), %rdx ## 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, -88(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq 8(%rsp), %rdi, %rsi ## 8-byte Folded Reload + mulxq (%rsp), %r11, %rbx ## 8-byte Folded Reload + addq %rdi, %rbx + mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload + adcq %rsi, %r8 + adcq %rcx, %r9 + mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload + adcq %rax, %rdi + mulxq -16(%rsp), %rcx, %rsi ## 8-byte Folded Reload + adcq %rbp, %rcx + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq %r12, %rbx + adcq %r13, %r8 + adcq %r15, %r9 + adcq %r10, %rdi + adcq %r14, %rcx + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + adcq -120(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + movq -112(%rsp), %r12 ## 8-byte Reload + adcq $0, %r12 + movq -56(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + mulxq -64(%rsp), %rbp, %rax ## 8-byte Folded Reload + movq %rbp, -120(%rsp) ## 8-byte Spill + movq %rax, -56(%rsp) ## 8-byte Spill + mulxq -72(%rsp), %rbp, %rax ## 8-byte Folded Reload + movq %rbp, -128(%rsp) ## 8-byte Spill + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq -80(%rsp), %rbp, %rax ## 8-byte Folded Reload + movq %rbp, -112(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r13, %rbp ## 8-byte Folded Reload + mulxq 32(%rsp), %r14, %r15 ## 8-byte Folded Reload + mulxq 16(%rsp), %rax, %r11 ## 8-byte Folded Reload + mulxq 24(%rsp), %rdx, %r10 ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + addq %rax, %r10 + adcq %r14, %r11 + adcq %r13, %r15 + adcq -112(%rsp), %rbp ## 8-byte Folded Reload + movq -72(%rsp), %r14 ## 8-byte Reload + adcq -128(%rsp), %r14 ## 8-byte Folded Reload + movq -64(%rsp), %rdx ## 8-byte Reload + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq -56(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + movq -80(%rsp), %r13 ## 8-byte Reload + addq %rbx, %r13 + movq %r13, -80(%rsp) ## 8-byte Spill + adcq %r8, %r10 + adcq %r9, %r11 + adcq %rdi, %r15 + adcq %rcx, %rbp + movq %rbp, -32(%rsp) ## 8-byte Spill + adcq %rsi, %r14 + movq %r14, -72(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -64(%rsp) ## 8-byte Spill + adcq %r12, %rax + movq %rax, -56(%rsp) ## 8-byte Spill + sbbq %rdi, %rdi + movq 40(%rsp), %rdx ## 8-byte Reload + imulq %r13, %rdx + mulxq -8(%rsp), %rbp, %rsi ## 8-byte Folded Reload + mulxq 8(%rsp), %rcx, %rbx ## 8-byte Folded Reload + mulxq (%rsp), %r13, %rax ## 8-byte Folded Reload + addq %rcx, %rax + mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload + adcq %rbx, %rcx + adcq %rbp, %r9 + mulxq -40(%rsp), %rbp, %rbx ## 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -16(%rsp), %rsi, %r14 ## 8-byte Folded Reload + adcq %rbx, %rsi + mulxq -24(%rsp), %rdx, %rbx ## 8-byte Folded Reload + adcq %r14, %rdx + adcq $0, %rbx + andl $1, %edi + addq -80(%rsp), %r13 ## 8-byte Folded Reload + adcq %r10, %rax + adcq %r11, %rcx + adcq %r15, %r9 + adcq -32(%rsp), %rbp ## 8-byte Folded Reload + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + adcq -64(%rsp), %rdx ## 8-byte Folded Reload + adcq -56(%rsp), %rbx ## 8-byte Folded Reload + adcq $0, %rdi + movq %rax, %r8 + subq (%rsp), %r8 ## 8-byte Folded Reload + movq %rcx, %r10 + sbbq 8(%rsp), %r10 ## 8-byte Folded Reload + movq %r9, %r11 + sbbq -48(%rsp), %r11 ## 8-byte Folded Reload + movq %rbp, %r14 + sbbq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %rsi, %r15 + sbbq -40(%rsp), %r15 ## 8-byte Folded Reload + movq %rdx, %r12 + sbbq -16(%rsp), %r12 ## 8-byte Folded Reload + movq %rbx, %r13 + sbbq -24(%rsp), %r13 ## 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %rbx, %r13 + testb %dil, %dil + cmovneq %rax, %r8 + movq 48(%rsp), %rax ## 8-byte Reload + movq %r8, (%rax) + cmovneq %rcx, %r10 + movq %r10, 8(%rax) + cmovneq %r9, %r11 + movq %r11, 16(%rax) + cmovneq %rbp, %r14 + movq %r14, 24(%rax) + cmovneq %rsi, %r15 + movq %r15, 32(%rax) + cmovneq %rdx, %r12 + movq %r12, 40(%rax) + movq %r13, 48(%rax) + addq $56, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF7Lbmi2: ## @mcl_fp_montNF7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $40, %rsp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rdi, 32(%rsp) ## 8-byte Spill + movq (%rsi), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + movq 8(%rsi), %rdi + movq %rdi, -96(%rsp) ## 8-byte Spill + movq (%rdx), %rbp + movq %rdi, %rdx + mulxq %rbp, %rdi, %rbx + movq %rax, %rdx + mulxq %rbp, %r8, %r14 + movq 16(%rsi), %rdx + movq %rdx, -104(%rsp) ## 8-byte Spill + addq %rdi, %r14 + mulxq %rbp, %r15, %rax + adcq %rbx, %r15 + movq 24(%rsi), %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + mulxq %rbp, %rbx, %rdi + adcq %rax, %rbx + movq 32(%rsi), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + mulxq %rbp, %r11, %rax + adcq %rdi, %r11 + movq 40(%rsi), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + mulxq %rbp, %r9, %rdi + adcq %rax, %r9 + movq 48(%rsi), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + mulxq %rbp, %r10, %rbp + adcq %rdi, %r10 + adcq $0, %rbp + movq -8(%rcx), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq %r8, %rdx + imulq %rax, %rdx + movq (%rcx), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %rsi + movq %rsi, -128(%rsp) ## 8-byte Spill + addq %r8, %rax + movq 8(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %rax, %r8, %rsi + movq %rsi, -120(%rsp) ## 8-byte Spill + adcq %r14, %r8 + movq 16(%rcx), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + mulxq %rax, %rsi, %r13 + adcq %r15, %rsi + movq 24(%rcx), %rax + movq %rax, (%rsp) ## 8-byte Spill + mulxq %rax, %r12, %rax + adcq %rbx, %r12 + movq 32(%rcx), %rdi + movq %rdi, -8(%rsp) ## 8-byte Spill + mulxq %rdi, %r15, %rbx + adcq %r11, %r15 + movq 40(%rcx), %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + mulxq %rdi, %r14, %rdi + adcq %r9, %r14 + movq 48(%rcx), %rcx + movq %rcx, -56(%rsp) ## 8-byte Spill + mulxq %rcx, %r11, %rcx + adcq %r10, %r11 + adcq $0, %rbp + addq -128(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -128(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -120(%rsp) ## 8-byte Spill + adcq %r13, %r12 + adcq %rax, %r15 + adcq %rbx, %r14 + adcq %rdi, %r11 + adcq %rcx, %rbp + movq -88(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload + mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload + addq %rcx, %rax + mulxq -104(%rsp), %rcx, %rdi ## 8-byte Folded Reload + adcq %rsi, %rcx + mulxq -24(%rsp), %rsi, %r8 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload + adcq %r8, %rdi + mulxq -40(%rsp), %r8, %rbx ## 8-byte Folded Reload + adcq %r9, %r8 + mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload + adcq %rbx, %r9 + adcq $0, %r10 + addq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %rax ## 8-byte Folded Reload + adcq %r12, %rcx + adcq %r15, %rsi + adcq %r14, %rdi + adcq %r11, %r8 + adcq %rbp, %r9 + adcq $0, %r10 + movq %r13, %rdx + imulq -80(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %rbp, %rbx ## 8-byte Folded Reload + movq %rbx, -128(%rsp) ## 8-byte Spill + addq %r13, %rbp + mulxq -72(%rsp), %rbp, %r14 ## 8-byte Folded Reload + adcq %rax, %rbp + mulxq 8(%rsp), %rax, %r11 ## 8-byte Folded Reload + adcq %rcx, %rax + mulxq (%rsp), %r12, %rcx ## 8-byte Folded Reload + adcq %rsi, %r12 + mulxq -8(%rsp), %r15, %rbx ## 8-byte Folded Reload + adcq %rdi, %r15 + mulxq -16(%rsp), %r13, %rdi ## 8-byte Folded Reload + adcq %r8, %r13 + mulxq -56(%rsp), %rsi, %rdx ## 8-byte Folded Reload + adcq %r9, %rsi + adcq $0, %r10 + addq -128(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, -128(%rsp) ## 8-byte Spill + adcq %r14, %rax + movq %rax, -120(%rsp) ## 8-byte Spill + adcq %r11, %r12 + adcq %rcx, %r15 + adcq %rbx, %r13 + adcq %rdi, %rsi + adcq %rdx, %r10 + movq -88(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + mulxq -96(%rsp), %rcx, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r14, %rdi ## 8-byte Folded Reload + addq %rcx, %rdi + mulxq -104(%rsp), %rbp, %rcx ## 8-byte Folded Reload + adcq %rax, %rbp + mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload + adcq %rcx, %rbx + mulxq -32(%rsp), %rax, %r9 ## 8-byte Folded Reload + adcq %r8, %rax + mulxq -40(%rsp), %r8, %rcx ## 8-byte Folded Reload + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq %r9, %r8 + mulxq -48(%rsp), %r9, %r11 ## 8-byte Folded Reload + adcq 16(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r11 + addq -128(%rsp), %r14 ## 8-byte Folded Reload + adcq -120(%rsp), %rdi ## 8-byte Folded Reload + adcq %r12, %rbp + adcq %r15, %rbx + adcq %r13, %rax + adcq %rsi, %r8 + adcq %r10, %r9 + adcq $0, %r11 + movq %r14, %rdx + imulq -80(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + addq %r14, %rsi + mulxq -72(%rsp), %rsi, %r13 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq 8(%rsp), %rdi, %r15 ## 8-byte Folded Reload + adcq %rbp, %rdi + mulxq (%rsp), %rcx, %rbp ## 8-byte Folded Reload + adcq %rbx, %rcx + mulxq -8(%rsp), %r14, %rbx ## 8-byte Folded Reload + adcq %rax, %r14 + mulxq -16(%rsp), %r12, %rax ## 8-byte Folded Reload + adcq %r8, %r12 + mulxq -56(%rsp), %r10, %rdx ## 8-byte Folded Reload + adcq %r9, %r10 + adcq $0, %r11 + addq -128(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq %r13, %rdi + movq %rdi, -120(%rsp) ## 8-byte Spill + adcq %r15, %rcx + adcq %rbp, %r14 + adcq %rbx, %r12 + adcq %rax, %r10 + adcq %rdx, %r11 + movq -88(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r15, %rbp ## 8-byte Folded Reload + addq %rsi, %rbp + mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload + adcq %rax, %rdi + mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload + adcq %r9, %r8 + mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r13 + addq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq -120(%rsp), %rbp ## 8-byte Folded Reload + adcq %rcx, %rbx + adcq %r14, %rsi + adcq %r12, %rdi + adcq %r10, %r8 + adcq %r11, %r9 + adcq $0, %r13 + movq %r15, %rdx + imulq -80(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rax, -128(%rsp) ## 8-byte Spill + addq %r15, %rcx + mulxq -72(%rsp), %rcx, %r11 ## 8-byte Folded Reload + adcq %rbp, %rcx + mulxq 8(%rsp), %rbp, %r10 ## 8-byte Folded Reload + adcq %rbx, %rbp + mulxq (%rsp), %rax, %rbx ## 8-byte Folded Reload + adcq %rsi, %rax + mulxq -8(%rsp), %r14, %rsi ## 8-byte Folded Reload + adcq %rdi, %r14 + mulxq -16(%rsp), %r15, %rdi ## 8-byte Folded Reload + adcq %r8, %r15 + mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload + adcq %r9, %r12 + adcq $0, %r13 + addq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq %r11, %rbp + movq %rbp, -128(%rsp) ## 8-byte Spill + adcq %r10, %rax + movq %rax, -120(%rsp) ## 8-byte Spill + adcq %rbx, %r14 + adcq %rsi, %r15 + adcq %rdi, %r12 + adcq %rdx, %r13 + movq -88(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + mulxq -96(%rsp), %rsi, %rdi ## 8-byte Folded Reload + mulxq -112(%rsp), %r11, %r8 ## 8-byte Folded Reload + addq %rsi, %r8 + mulxq -104(%rsp), %rbx, %rsi ## 8-byte Folded Reload + adcq %rdi, %rbx + mulxq -24(%rsp), %rbp, %rdi ## 8-byte Folded Reload + adcq %rsi, %rbp + mulxq -32(%rsp), %rsi, %r9 ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -40(%rsp), %rdi, %rax ## 8-byte Folded Reload + adcq %r9, %rdi + mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r10 + addq %rcx, %r11 + adcq -128(%rsp), %r8 ## 8-byte Folded Reload + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + adcq %r14, %rbp + adcq %r15, %rsi + adcq %r12, %rdi + adcq %r13, %r9 + adcq $0, %r10 + movq %r11, %rdx + imulq -80(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rax, -128(%rsp) ## 8-byte Spill + addq %r11, %rcx + mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload + adcq %r8, %rcx + mulxq 8(%rsp), %rax, %r8 ## 8-byte Folded Reload + adcq %rbx, %rax + mulxq (%rsp), %rbx, %r11 ## 8-byte Folded Reload + adcq %rbp, %rbx + mulxq -8(%rsp), %r14, %rbp ## 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -16(%rsp), %r15, %rsi ## 8-byte Folded Reload + adcq %rdi, %r15 + mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload + adcq %r9, %r12 + adcq $0, %r10 + addq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq %r13, %rax + movq %rax, -128(%rsp) ## 8-byte Spill + adcq %r8, %rbx + movq %rbx, -120(%rsp) ## 8-byte Spill + adcq %r11, %r14 + adcq %rbp, %r15 + adcq %rsi, %r12 + adcq %rdx, %r10 + movq -88(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload + mulxq -112(%rsp), %r11, %rbp ## 8-byte Folded Reload + addq %rsi, %rbp + mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload + adcq %rax, %rbx + mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload + adcq %rdi, %rsi + mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload + adcq %rax, %rdi + mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload + adcq %r9, %r8 + mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload + adcq %rax, %r9 + adcq $0, %r13 + addq %rcx, %r11 + adcq -128(%rsp), %rbp ## 8-byte Folded Reload + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + adcq %r14, %rsi + adcq %r15, %rdi + adcq %r12, %r8 + adcq %r10, %r9 + adcq $0, %r13 + movq %r11, %rdx + imulq -80(%rsp), %rdx ## 8-byte Folded Reload + mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + addq %r11, %rcx + mulxq -72(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rax, 16(%rsp) ## 8-byte Spill + adcq %rbp, %rcx + mulxq 8(%rsp), %rax, %rbp ## 8-byte Folded Reload + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq %rbx, %rax + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq (%rsp), %r14, %rbp ## 8-byte Folded Reload + adcq %rsi, %r14 + mulxq -8(%rsp), %r11, %r12 ## 8-byte Folded Reload + adcq %rdi, %r11 + mulxq -16(%rsp), %r10, %rbx ## 8-byte Folded Reload + adcq %r8, %r10 + mulxq -56(%rsp), %rdi, %rax ## 8-byte Folded Reload + adcq %r9, %rdi + adcq $0, %r13 + addq -120(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -120(%rsp) ## 8-byte Spill + movq -128(%rsp), %rcx ## 8-byte Reload + adcq 16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq 24(%rsp), %r14 ## 8-byte Folded Reload + adcq %rbp, %r11 + adcq %r12, %r10 + adcq %rbx, %rdi + adcq %rax, %r13 + movq -88(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload + mulxq -112(%rsp), %r8, %rax ## 8-byte Folded Reload + addq %rbp, %rax + mulxq -104(%rsp), %rbx, %rcx ## 8-byte Folded Reload + adcq %r9, %rbx + mulxq -24(%rsp), %rbp, %r9 ## 8-byte Folded Reload + adcq %rcx, %rbp + mulxq -32(%rsp), %rcx, %r12 ## 8-byte Folded Reload + adcq %r9, %rcx + mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + adcq %r12, %r15 + mulxq -48(%rsp), %r12, %r9 ## 8-byte Folded Reload + adcq -112(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, %r9 + addq -120(%rsp), %r8 ## 8-byte Folded Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + adcq %r14, %rbx + adcq %r11, %rbp + adcq %r10, %rcx + adcq %rdi, %r15 + adcq %r13, %r12 + adcq $0, %r9 + movq -80(%rsp), %rdx ## 8-byte Reload + imulq %r8, %rdx + mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload + movq %rsi, -80(%rsp) ## 8-byte Spill + addq %r8, %rdi + mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + adcq %rax, %r8 + movq 8(%rsp), %r11 ## 8-byte Reload + mulxq %r11, %rsi, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + adcq %rbx, %rsi + movq (%rsp), %r14 ## 8-byte Reload + mulxq %r14, %rdi, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + adcq %rbp, %rdi + movq -8(%rsp), %rbp ## 8-byte Reload + mulxq %rbp, %rax, %rbx + movq %rbx, -104(%rsp) ## 8-byte Spill + adcq %rcx, %rax + movq -16(%rsp), %rbx ## 8-byte Reload + mulxq %rbx, %rcx, %r13 + adcq %r15, %rcx + mulxq -56(%rsp), %rdx, %r15 ## 8-byte Folded Reload + adcq %r12, %rdx + adcq $0, %r9 + addq -80(%rsp), %r8 ## 8-byte Folded Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -88(%rsp), %rdi ## 8-byte Folded Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + adcq %r13, %rdx + adcq %r15, %r9 + movq %r8, %r13 + subq -64(%rsp), %r13 ## 8-byte Folded Reload + movq %rsi, %r12 + sbbq -72(%rsp), %r12 ## 8-byte Folded Reload + movq %rdi, %r10 + sbbq %r11, %r10 + movq %rax, %r11 + sbbq %r14, %r11 + movq %rcx, %r14 + sbbq %rbp, %r14 + movq %rdx, %r15 + sbbq %rbx, %r15 + movq %r9, %rbp + sbbq -56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r8, %r13 + movq 32(%rsp), %rbx ## 8-byte Reload + movq %r13, (%rbx) + cmovsq %rsi, %r12 + movq %r12, 8(%rbx) + cmovsq %rdi, %r10 + movq %r10, 16(%rbx) + cmovsq %rax, %r11 + movq %r11, 24(%rbx) + cmovsq %rcx, %r14 + movq %r14, 32(%rbx) + cmovsq %rdx, %r15 + movq %r15, 40(%rbx) + cmovsq %r9, %rbp + movq %rbp, 48(%rbx) + addq $40, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed7Lbmi2: ## @mcl_fp_montRed7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $56, %rsp + movq %rdx, %rcx + movq %rdi, 48(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + movq (%rsi), %r13 + movq %r13, %rdx + imulq %rax, %rdx + movq 48(%rcx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulxq %rax, %rdi, %rax + movq %rdi, -64(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq 40(%rcx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulxq %rax, %r10, %rax + movq %rax, -128(%rsp) ## 8-byte Spill + movq 32(%rcx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulxq %rax, %r14, %r8 + movq 24(%rcx), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + mulxq %rax, %r12, %r15 + movq 16(%rcx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulxq %rax, %rbp, %rbx + movq (%rcx), %rdi + movq %rdi, -48(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + mulxq %rax, %rax, %r11 + mulxq %rdi, %rdx, %r9 + addq %rax, %r9 + adcq %rbp, %r11 + adcq %r12, %rbx + adcq %r14, %r15 + adcq %r10, %r8 + movq -128(%rsp), %rcx ## 8-byte Reload + adcq -64(%rsp), %rcx ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r13, %rdx + adcq 8(%rsi), %r9 + adcq 16(%rsi), %r11 + adcq 24(%rsi), %rbx + adcq 32(%rsi), %r15 + adcq 40(%rsi), %r8 + movq %r8, -112(%rsp) ## 8-byte Spill + adcq 48(%rsi), %rcx + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq 56(%rsi), %rax + movq %rax, -120(%rsp) ## 8-byte Spill + movq 104(%rsi), %r8 + movq 96(%rsi), %rdx + movq 88(%rsi), %rdi + movq 80(%rsi), %rbp + movq 72(%rsi), %rax + movq 64(%rsi), %rcx + adcq $0, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + adcq $0, %rbp + movq %rbp, -56(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -80(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, -64(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, (%rsp) ## 8-byte Spill + movq %r9, %rdx + imulq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -16(%rsp), %r13 ## 8-byte Reload + mulxq %r13, %rcx, %rax + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload + movq %rcx, 40(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r14, %r12 ## 8-byte Folded Reload + mulxq 16(%rsp), %r8, %rax ## 8-byte Folded Reload + mulxq -40(%rsp), %rsi, %r10 ## 8-byte Folded Reload + mulxq -8(%rsp), %rcx, %rdi ## 8-byte Folded Reload + mulxq -48(%rsp), %rdx, %rbp ## 8-byte Folded Reload + addq %rcx, %rbp + adcq %rsi, %rdi + adcq %r8, %r10 + adcq %r14, %rax + movq %rax, %rcx + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + movq -104(%rsp), %rsi ## 8-byte Reload + adcq 32(%rsp), %rsi ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r9, %rdx + adcq %r11, %rbp + adcq %rbx, %rdi + adcq %r15, %r10 + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + adcq -128(%rsp), %r12 ## 8-byte Folded Reload + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + adcq $0, -56(%rsp) ## 8-byte Folded Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, (%rsp) ## 8-byte Folded Spill + movq %rbp, %rdx + imulq -72(%rsp), %rdx ## 8-byte Folded Reload + mulxq %r13, %rcx, %rax + movq %rcx, 8(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq -24(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %rcx, %rax + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %r11, %r13 ## 8-byte Folded Reload + mulxq 16(%rsp), %r9, %r14 ## 8-byte Folded Reload + mulxq -40(%rsp), %rsi, %r8 ## 8-byte Folded Reload + mulxq -8(%rsp), %rax, %rbx ## 8-byte Folded Reload + mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload + addq %rax, %rcx + adcq %rsi, %rbx + adcq %r9, %r8 + adcq %r11, %r14 + adcq 32(%rsp), %r13 ## 8-byte Folded Reload + movq -128(%rsp), %rsi ## 8-byte Reload + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbp, %rdx + adcq %rdi, %rcx + adcq %r10, %rbx + adcq -112(%rsp), %r8 ## 8-byte Folded Reload + adcq %r12, %r14 + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -96(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq $0, -56(%rsp) ## 8-byte Folded Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, (%rsp) ## 8-byte Folded Spill + movq %rcx, %rdx + imulq -72(%rsp), %rdx ## 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload + movq %rsi, -88(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + mulxq %r15, %rsi, %rax + movq %rsi, -112(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq -32(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %rax, %r12 + movq %rax, 8(%rsp) ## 8-byte Spill + mulxq 16(%rsp), %r9, %rbp ## 8-byte Folded Reload + mulxq -40(%rsp), %rdi, %r10 ## 8-byte Folded Reload + mulxq -8(%rsp), %rsi, %r11 ## 8-byte Folded Reload + mulxq -48(%rsp), %rdx, %rax ## 8-byte Folded Reload + addq %rsi, %rax + adcq %rdi, %r11 + adcq %r9, %r10 + adcq 8(%rsp), %rbp ## 8-byte Folded Reload + adcq -112(%rsp), %r12 ## 8-byte Folded Reload + movq -104(%rsp), %rdi ## 8-byte Reload + adcq -88(%rsp), %rdi ## 8-byte Folded Reload + movq -96(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rcx, %rdx + adcq %rbx, %rax + adcq %r8, %r11 + adcq %r14, %r10 + adcq %r13, %rbp + adcq -128(%rsp), %r12 ## 8-byte Folded Reload + adcq -120(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -104(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -96(%rsp) ## 8-byte Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, (%rsp) ## 8-byte Folded Spill + movq %rax, %rdx + imulq -72(%rsp), %rdx ## 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + movq %rcx, -56(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rsi, %rcx ## 8-byte Folded Reload + movq %rsi, -88(%rsp) ## 8-byte Spill + movq %rcx, -120(%rsp) ## 8-byte Spill + mulxq %r15, %rcx, %r13 + movq %rcx, -112(%rsp) ## 8-byte Spill + movq 16(%rsp), %r15 ## 8-byte Reload + mulxq %r15, %r9, %r14 + mulxq -40(%rsp), %rdi, %rbx ## 8-byte Folded Reload + mulxq -8(%rsp), %rsi, %r8 ## 8-byte Folded Reload + mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload + addq %rsi, %rcx + adcq %rdi, %r8 + adcq %r9, %rbx + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rdi ## 8-byte Reload + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + movq -56(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + addq %rax, %rdx + adcq %r11, %rcx + adcq %r10, %r8 + adcq %rbp, %rbx + adcq %r12, %r14 + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -120(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -56(%rsp) ## 8-byte Spill + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, (%rsp) ## 8-byte Folded Spill + movq %rcx, %rdx + imulq -72(%rsp), %rdx ## 8-byte Folded Reload + mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload + movq %rsi, -96(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + movq %rax, -128(%rsp) ## 8-byte Spill + mulxq -32(%rsp), %rax, %r12 ## 8-byte Folded Reload + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r15, %r11 + mulxq %r11, %rax, %r15 + movq %rax, -112(%rsp) ## 8-byte Spill + mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload + movq -8(%rsp), %r9 ## 8-byte Reload + mulxq %r9, %rax, %r10 + mulxq -48(%rsp), %rdx, %rsi ## 8-byte Folded Reload + addq %rax, %rsi + adcq %rdi, %r10 + adcq -112(%rsp), %rbp ## 8-byte Folded Reload + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + adcq -104(%rsp), %r12 ## 8-byte Folded Reload + movq -128(%rsp), %rdi ## 8-byte Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq -80(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rcx, %rdx + adcq %r8, %rsi + adcq %rbx, %r10 + adcq %r14, %rbp + adcq %r13, %r15 + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + adcq -56(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -80(%rsp) ## 8-byte Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, (%rsp) ## 8-byte Folded Spill + movq -72(%rsp), %rdx ## 8-byte Reload + imulq %rsi, %rdx + mulxq %r11, %rcx, %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulxq %r9, %rbx, %rdi + mulxq -48(%rsp), %r11, %r14 ## 8-byte Folded Reload + addq %rbx, %r14 + mulxq -40(%rsp), %rbx, %r13 ## 8-byte Folded Reload + adcq %rdi, %rbx + adcq %rcx, %r13 + mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload + adcq -72(%rsp), %r8 ## 8-byte Folded Reload + mulxq -24(%rsp), %rcx, %r9 ## 8-byte Folded Reload + adcq %rdi, %rcx + mulxq -16(%rsp), %rdx, %rdi ## 8-byte Folded Reload + adcq %r9, %rdx + adcq $0, %rdi + addq %rsi, %r11 + adcq %r10, %r14 + adcq %rbp, %rbx + adcq %r15, %r13 + adcq %r12, %r8 + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq -80(%rsp), %rdx ## 8-byte Folded Reload + adcq -64(%rsp), %rdi ## 8-byte Folded Reload + movq (%rsp), %rax ## 8-byte Reload + adcq $0, %rax + movq %r14, %rsi + subq -48(%rsp), %rsi ## 8-byte Folded Reload + movq %rbx, %rbp + sbbq -8(%rsp), %rbp ## 8-byte Folded Reload + movq %r13, %r9 + sbbq -40(%rsp), %r9 ## 8-byte Folded Reload + movq %r8, %r10 + sbbq 16(%rsp), %r10 ## 8-byte Folded Reload + movq %rcx, %r11 + sbbq -32(%rsp), %r11 ## 8-byte Folded Reload + movq %rdx, %r15 + sbbq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %rdi, %r12 + sbbq -16(%rsp), %r12 ## 8-byte Folded Reload + sbbq $0, %rax + andl $1, %eax + cmovneq %rdi, %r12 + testb %al, %al + cmovneq %r14, %rsi + movq 48(%rsp), %rdi ## 8-byte Reload + movq %rsi, (%rdi) + cmovneq %rbx, %rbp + movq %rbp, 8(%rdi) + cmovneq %r13, %r9 + movq %r9, 16(%rdi) + cmovneq %r8, %r10 + movq %r10, 24(%rdi) + cmovneq %rcx, %r11 + movq %r11, 32(%rdi) + cmovneq %rdx, %r15 + movq %r15, 40(%rdi) + movq %r12, 48(%rdi) + addq $56, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre7Lbmi2: ## @mcl_fp_addPre7Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r14 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r12 + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %r12, 16(%rdi) + adcq %r11, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbx + movq %rbx, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subPre7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre7Lbmi2: ## @mcl_fp_subPre7Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r10 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 32(%rsi), %rdx + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rdx + movq %rdx, 32(%rdi) + sbbq %r9, %r15 + movq %r15, 40(%rdi) + sbbq %r8, %r10 + movq %r10, 48(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_shr1_7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_7Lbmi2: ## @mcl_fp_shr1_7Lbmi2 +## BB#0: + movq 48(%rsi), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrdq $1, %r10, %rax + movq %rax, 24(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 32(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 40(%rdi) + shrq %r8 + movq %r8, 48(%rdi) + retq + + .globl _mcl_fp_add7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add7Lbmi2: ## @mcl_fp_add7Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq 24(%rdx), %r13 + movq 16(%rdx), %r10 + movq (%rdx), %r11 + movq 8(%rdx), %rdx + addq (%rsi), %r11 + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r10 + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %r13, %rax + movq %rax, 24(%rdi) + adcq %r12, %rbx + movq %rbx, 32(%rdi) + adcq %r15, %r9 + movq %r9, 40(%rdi) + adcq %r14, %r8 + movq %r8, 48(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %r11 + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %rax + sbbq 32(%rcx), %rbx + sbbq 40(%rcx), %r9 + sbbq 48(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB104_2 +## BB#1: ## %nocarry + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %rax, 24(%rdi) + movq %rbx, 32(%rdi) + movq %r9, 40(%rdi) + movq %r8, 48(%rdi) +LBB104_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF7Lbmi2: ## @mcl_fp_addNF7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r9 + movq 40(%rdx), %rbp + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r14 + movq (%rdx), %r12 + movq 8(%rdx), %r15 + addq (%rsi), %r12 + adcq 8(%rsi), %r15 + adcq 16(%rsi), %r14 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %rbp + movq %rbp, -8(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r9 + movq %r12, %rsi + subq (%rcx), %rsi + movq %r15, %rdx + sbbq 8(%rcx), %rdx + movq %r14, %rax + sbbq 16(%rcx), %rax + movq %r11, %rbx + sbbq 24(%rcx), %rbx + movq %r10, %r13 + sbbq 32(%rcx), %r13 + sbbq 40(%rcx), %rbp + movq %r9, %r8 + sbbq 48(%rcx), %r8 + movq %r8, %rcx + sarq $63, %rcx + cmovsq %r12, %rsi + movq %rsi, (%rdi) + cmovsq %r15, %rdx + movq %rdx, 8(%rdi) + cmovsq %r14, %rax + movq %rax, 16(%rdi) + cmovsq %r11, %rbx + movq %rbx, 24(%rdi) + cmovsq %r10, %r13 + movq %r13, 32(%rdi) + cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 40(%rdi) + cmovsq %r9, %r8 + movq %r8, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub7Lbmi2: ## @mcl_fp_sub7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + movq 16(%rsi), %r13 + sbbq 16(%rdx), %r13 + movq 32(%rsi), %r10 + movq 24(%rsi), %rsi + sbbq 24(%rdx), %rsi + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r13, 16(%rdi) + movq %rsi, 24(%rdi) + sbbq %r12, %r10 + movq %r10, 32(%rdi) + sbbq %r15, %r9 + movq %r9, 40(%rdi) + sbbq %r14, %r8 + movq %r8, 48(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB106_2 +## BB#1: ## %carry + movq 48(%rcx), %r14 + movq 40(%rcx), %r15 + movq 32(%rcx), %r12 + movq 24(%rcx), %rbx + movq 8(%rcx), %rdx + movq 16(%rcx), %rbp + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r13, %rbp + movq %rbp, 16(%rdi) + adcq %rsi, %rbx + movq %rbx, 24(%rdi) + adcq %r10, %r12 + movq %r12, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) +LBB106_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_subNF7Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF7Lbmi2: ## @mcl_fp_subNF7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 48(%rsi), %r11 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %r14 + movdqu (%rsi), %xmm3 + movdqu 16(%rsi), %xmm4 + movdqu 32(%rsi), %xmm5 + pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] + movd %xmm6, %rcx + movd %xmm2, %r15 + movd %xmm5, %r9 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r12 + pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] + movd %xmm2, %r10 + movd %xmm1, %r13 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %rax + pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] + movd %xmm0, %rbx + movd %xmm3, %rsi + subq %rbx, %rsi + movd %xmm1, %rbx + sbbq %rax, %rbx + movd %xmm4, %rbp + sbbq %r13, %rbp + sbbq %r12, %r10 + sbbq %r15, %r9 + sbbq %r14, %rcx + movq %rcx, -8(%rsp) ## 8-byte Spill + sbbq 48(%rdx), %r11 + movq %r11, %rax + sarq $63, %rax + movq %rax, %rdx + shldq $1, %r11, %rdx + andq (%r8), %rdx + movq 48(%r8), %r14 + andq %rax, %r14 + movq 40(%r8), %r15 + andq %rax, %r15 + movq 32(%r8), %r12 + andq %rax, %r12 + movq 24(%r8), %r13 + andq %rax, %r13 + movq 16(%r8), %rcx + andq %rax, %rcx + andq 8(%r8), %rax + addq %rsi, %rdx + adcq %rbx, %rax + movq %rdx, (%rdi) + movq %rax, 8(%rdi) + adcq %rbp, %rcx + movq %rcx, 16(%rdi) + adcq %r10, %r13 + movq %r13, 24(%rdi) + adcq %r9, %r12 + movq %r12, 32(%rdi) + adcq -8(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 40(%rdi) + adcq %r11, %r14 + movq %r14, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add7Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add7Lbmi2: ## @mcl_fpDbl_add7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 96(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 88(%rdx), %r11 + movq 80(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r12 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r9 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r12 + movq 72(%rdx), %r13 + movq 64(%rdx), %rbp + movq %rax, (%rdi) + movq 56(%rdx), %r10 + movq %rbx, 8(%rdi) + movq 48(%rdx), %rcx + movq 40(%rdx), %rdx + movq %r9, 16(%rdi) + movq 104(%rsi), %r9 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %rdx, %rbx + movq 96(%rsi), %r15 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + adcq %rcx, %rdx + movq 88(%rsi), %rax + movq %rbx, 40(%rdi) + movq 56(%rsi), %rcx + adcq %r10, %rcx + movq 80(%rsi), %r12 + movq %rdx, 48(%rdi) + movq 72(%rsi), %rdx + movq 64(%rsi), %rsi + adcq %rbp, %rsi + adcq %r13, %rdx + adcq %r14, %r12 + adcq %r11, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, -24(%rsp) ## 8-byte Spill + adcq -8(%rsp), %r9 ## 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq %rcx, %rbx + subq (%r8), %rbx + movq %rsi, %r10 + sbbq 8(%r8), %r10 + movq %rdx, %r11 + sbbq 16(%r8), %r11 + movq %r12, %r14 + sbbq 24(%r8), %r14 + movq -16(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r8), %r13 + sbbq 40(%r8), %r15 + movq %r9, %rax + sbbq 48(%r8), %rax + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rcx, %rbx + movq %rbx, 56(%rdi) + testb %bpl, %bpl + cmovneq %rsi, %r10 + movq %r10, 64(%rdi) + cmovneq %rdx, %r11 + movq %r11, 72(%rdi) + cmovneq %r12, %r14 + movq %r14, 80(%rdi) + cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 88(%rdi) + cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 96(%rdi) + cmovneq %r9, %rax + movq %rax, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub7Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub7Lbmi2: ## @mcl_fpDbl_sub7Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 96(%rdx), %r10 + movq 88(%rdx), %r14 + movq 16(%rsi), %rax + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %ecx, %ecx + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %rax + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 80(%rdx), %r13 + movq 72(%rdx), %rbp + movq %r15, (%rdi) + movq 64(%rdx), %r9 + movq %r11, 8(%rdi) + movq 56(%rdx), %r15 + movq %rax, 16(%rdi) + movq 48(%rdx), %r11 + movq 40(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 104(%rsi), %rax + movq %r12, 32(%rdi) + movq 48(%rsi), %r12 + sbbq %r11, %r12 + movq 96(%rsi), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r15, %rdx + movq 88(%rsi), %r15 + movq %r12, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r9, %rbx + movq 80(%rsi), %r12 + movq 72(%rsi), %r9 + sbbq %rbp, %r9 + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq -8(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -8(%rsp) ## 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r8), %r10 + cmoveq %rcx, %r10 + testb %bpl, %bpl + movq 16(%r8), %rbp + cmoveq %rcx, %rbp + movq 8(%r8), %rsi + cmoveq %rcx, %rsi + movq 48(%r8), %r14 + cmoveq %rcx, %r14 + movq 40(%r8), %r13 + cmoveq %rcx, %r13 + movq 32(%r8), %rax + cmoveq %rcx, %rax + cmovneq 24(%r8), %rcx + addq %rdx, %r10 + adcq %rbx, %rsi + movq %r10, 56(%rdi) + movq %rsi, 64(%rdi) + adcq %r9, %rbp + movq %rbp, 72(%rdi) + adcq %r12, %rcx + movq %rcx, 80(%rdi) + adcq %r15, %rax + movq %rax, 88(%rdi) + adcq %r11, %r13 + movq %r13, 96(%rdi) + adcq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .p2align 4, 0x90 +l_mulPv512x64: ## @mulPv512x64 +## BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + mulxq 8(%rsi), %rcx, %r8 + addq %rax, %rcx + movq %rcx, 8(%rdi) + mulxq 16(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 16(%rdi) + mulxq 24(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 24(%rdi) + mulxq 32(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 32(%rdi) + mulxq 40(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 40(%rdi) + mulxq 48(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 48(%rdi) + mulxq 56(%rsi), %rax, %rdx + adcq %rcx, %rax + movq %rax, 56(%rdi) + adcq $0, %rdx + movq %rdx, 64(%rdi) + movq %rdi, %rax + retq + + .globl _mcl_fp_mulUnitPre8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre8Lbmi2: ## @mcl_fp_mulUnitPre8Lbmi2 +## BB#0: + pushq %rbx + subq $80, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq l_mulPv512x64 + movq 72(%rsp), %r8 + movq 64(%rsp), %r9 + movq 56(%rsp), %r10 + movq 48(%rsp), %r11 + movq 40(%rsp), %rdi + movq 32(%rsp), %rax + movq 24(%rsp), %rcx + movq 8(%rsp), %rdx + movq 16(%rsp), %rsi + movq %rdx, (%rbx) + movq %rsi, 8(%rbx) + movq %rcx, 16(%rbx) + movq %rax, 24(%rbx) + movq %rdi, 32(%rbx) + movq %r11, 40(%rbx) + movq %r10, 48(%rbx) + movq %r9, 56(%rbx) + movq %r8, 64(%rbx) + addq $80, %rsp + popq %rbx + retq + + .globl _mcl_fpDbl_mulPre8Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre8Lbmi2: ## @mcl_fpDbl_mulPre8Lbmi2 +## BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rdx, %r15 + movq %rsi, %rbx + movq %rdi, %r14 + callq _mcl_fpDbl_mulPre4Lbmi2 + leaq 64(%r14), %rdi + leaq 32(%rbx), %rsi + leaq 32(%r15), %rdx + callq _mcl_fpDbl_mulPre4Lbmi2 + movq 56(%rbx), %r10 + movq 48(%rbx), %rdx + movq (%rbx), %rsi + movq 8(%rbx), %rdi + addq 32(%rbx), %rsi + adcq 40(%rbx), %rdi + adcq 16(%rbx), %rdx + adcq 24(%rbx), %r10 + pushfq + popq %r8 + xorl %r9d, %r9d + movq 56(%r15), %rcx + movq 48(%r15), %r13 + movq (%r15), %r12 + movq 8(%r15), %rbx + addq 32(%r15), %r12 + adcq 40(%r15), %rbx + adcq 16(%r15), %r13 + adcq 24(%r15), %rcx + movl $0, %eax + cmovbq %r10, %rax + movq %rax, -88(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rdx, %rax + movq %rax, -80(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rdi, %rax + movq %rax, -72(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rsi, %rax + movq %rax, -64(%rbp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rsi, -168(%rbp) + movq %rdi, -160(%rbp) + movq %rdx, -152(%rbp) + movq %r10, -144(%rbp) + movq %r12, -136(%rbp) + movq %rbx, -128(%rbp) + movq %r13, -120(%rbp) + movq %rcx, -112(%rbp) + pushq %r8 + popfq + cmovaeq %r9, %rcx + movq %rcx, -48(%rbp) ## 8-byte Spill + cmovaeq %r9, %r13 + cmovaeq %r9, %rbx + cmovaeq %r9, %r12 + sbbq %rax, %rax + movq %rax, -56(%rbp) ## 8-byte Spill + leaq -232(%rbp), %rdi + leaq -168(%rbp), %rsi + leaq -136(%rbp), %rdx + callq _mcl_fpDbl_mulPre4Lbmi2 + addq -64(%rbp), %r12 ## 8-byte Folded Reload + adcq -72(%rbp), %rbx ## 8-byte Folded Reload + adcq -80(%rbp), %r13 ## 8-byte Folded Reload + movq -48(%rbp), %r10 ## 8-byte Reload + adcq -88(%rbp), %r10 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq -56(%rbp), %rdx ## 8-byte Reload + andl %edx, %r15d + andl $1, %r15d + addq -200(%rbp), %r12 + adcq -192(%rbp), %rbx + adcq -184(%rbp), %r13 + adcq -176(%rbp), %r10 + adcq %rax, %r15 + movq -208(%rbp), %rax + movq -216(%rbp), %rcx + movq -232(%rbp), %rsi + movq -224(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %rdi + movq %rdi, -80(%rbp) ## 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -88(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + sbbq %r8, %rbx + movq 48(%r14), %rdi + movq %rdi, -72(%rbp) ## 8-byte Spill + sbbq %rdi, %r13 + movq 56(%r14), %rdi + movq %rdi, -64(%rbp) ## 8-byte Spill + sbbq %rdi, %r10 + sbbq $0, %r15 + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -56(%rbp) ## 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -48(%rbp) ## 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -104(%rbp) ## 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -96(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + movq 104(%r14), %rdi + sbbq %rdi, %rbx + movq 112(%r14), %r8 + sbbq %r8, %r13 + movq 120(%r14), %r9 + sbbq %r9, %r10 + sbbq $0, %r15 + addq -80(%rbp), %rsi ## 8-byte Folded Reload + adcq -88(%rbp), %rdx ## 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -72(%rbp), %rcx ## 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -64(%rbp), %rax ## 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r12 + movq %rax, 56(%r14) + movq %r12, 64(%r14) + adcq -56(%rbp), %rbx ## 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq -48(%rbp), %r13 ## 8-byte Folded Reload + movq %r13, 80(%r14) + adcq -104(%rbp), %r10 ## 8-byte Folded Reload + movq %r10, 88(%r14) + adcq -96(%rbp), %r15 ## 8-byte Folded Reload + movq %r15, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre8Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre8Lbmi2: ## @mcl_fpDbl_sqrPre8Lbmi2 +## BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rsi, %rbx + movq %rdi, %r14 + movq %rbx, %rdx + callq _mcl_fpDbl_mulPre4Lbmi2 + leaq 64(%r14), %rdi + leaq 32(%rbx), %rsi + movq %rsi, %rdx + callq _mcl_fpDbl_mulPre4Lbmi2 + movq 56(%rbx), %r15 + movq 48(%rbx), %rax + movq (%rbx), %rcx + movq 8(%rbx), %rdx + addq 32(%rbx), %rcx + adcq 40(%rbx), %rdx + adcq 16(%rbx), %rax + adcq 24(%rbx), %r15 + pushfq + popq %r8 + pushfq + popq %r9 + pushfq + popq %r10 + pushfq + popq %rdi + pushfq + popq %rbx + sbbq %rsi, %rsi + movq %rsi, -56(%rbp) ## 8-byte Spill + leaq (%rcx,%rcx), %rsi + xorl %r11d, %r11d + pushq %rbx + popfq + cmovaeq %r11, %rsi + movq %rsi, -48(%rbp) ## 8-byte Spill + movq %rdx, %r13 + shldq $1, %rcx, %r13 + pushq %rdi + popfq + cmovaeq %r11, %r13 + movq %rax, %r12 + shldq $1, %rdx, %r12 + pushq %r10 + popfq + cmovaeq %r11, %r12 + movq %r15, %rbx + movq %rcx, -168(%rbp) + movq %rdx, -160(%rbp) + movq %rax, -152(%rbp) + movq %r15, -144(%rbp) + movq %rcx, -136(%rbp) + movq %rdx, -128(%rbp) + movq %rax, -120(%rbp) + movq %r15, -112(%rbp) + shldq $1, %rax, %r15 + pushq %r9 + popfq + cmovaeq %r11, %r15 + shrq $63, %rbx + pushq %r8 + popfq + cmovaeq %r11, %rbx + leaq -232(%rbp), %rdi + leaq -168(%rbp), %rsi + leaq -136(%rbp), %rdx + callq _mcl_fpDbl_mulPre4Lbmi2 + movq -56(%rbp), %rax ## 8-byte Reload + andl $1, %eax + movq -48(%rbp), %r10 ## 8-byte Reload + addq -200(%rbp), %r10 + adcq -192(%rbp), %r13 + adcq -184(%rbp), %r12 + adcq -176(%rbp), %r15 + adcq %rbx, %rax + movq %rax, %rbx + movq -208(%rbp), %rax + movq -216(%rbp), %rcx + movq -232(%rbp), %rsi + movq -224(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %r9 + movq %r9, -56(%rbp) ## 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -48(%rbp) ## 8-byte Spill + sbbq %r9, %r10 + sbbq %r8, %r13 + movq 48(%r14), %rdi + movq %rdi, -104(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + movq 56(%r14), %rdi + movq %rdi, -96(%rbp) ## 8-byte Spill + sbbq %rdi, %r15 + sbbq $0, %rbx + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -88(%rbp) ## 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -80(%rbp) ## 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -72(%rbp) ## 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -64(%rbp) ## 8-byte Spill + sbbq %rdi, %r10 + movq 104(%r14), %rdi + sbbq %rdi, %r13 + movq 112(%r14), %r8 + sbbq %r8, %r12 + movq 120(%r14), %r9 + sbbq %r9, %r15 + sbbq $0, %rbx + addq -56(%rbp), %rsi ## 8-byte Folded Reload + adcq -48(%rbp), %rdx ## 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -104(%rbp), %rcx ## 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -96(%rbp), %rax ## 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r10 + movq %rax, 56(%r14) + movq %r10, 64(%r14) + adcq -88(%rbp), %r13 ## 8-byte Folded Reload + movq %r13, 72(%r14) + adcq -80(%rbp), %r12 ## 8-byte Folded Reload + movq %r12, 80(%r14) + adcq -72(%rbp), %r15 ## 8-byte Folded Reload + movq %r15, 88(%r14) + movq %rbx, %rax + adcq -64(%rbp), %rax ## 8-byte Folded Reload + movq %rax, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont8Lbmi2: ## @mcl_fp_mont8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1256, %rsp ## imm = 0x4E8 + movq %rcx, %r13 + movq %rdx, 64(%rsp) ## 8-byte Spill + movq %rsi, 72(%rsp) ## 8-byte Spill + movq %rdi, 96(%rsp) ## 8-byte Spill + movq -8(%r13), %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + movq %r13, 56(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1184(%rsp), %rdi + callq l_mulPv512x64 + movq 1184(%rsp), %r15 + movq 1192(%rsp), %r14 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1248(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1240(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 1232(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 1216(%rsp), %r12 + movq 1208(%rsp), %rbx + movq 1200(%rsp), %rbp + leaq 1112(%rsp), %rdi + movq %r13, %rsi + callq l_mulPv512x64 + addq 1112(%rsp), %r15 + adcq 1120(%rsp), %r14 + adcq 1128(%rsp), %rbp + movq %rbp, 88(%rsp) ## 8-byte Spill + adcq 1136(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 1144(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 1152(%rsp), %r13 + movq (%rsp), %rbx ## 8-byte Reload + adcq 1160(%rsp), %rbx + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 1168(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1176(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1040(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 1040(%rsp), %r14 + movq 88(%rsp), %rax ## 8-byte Reload + adcq 1048(%rsp), %rax + movq %rax, 88(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 1056(%rsp), %rax + movq %rax, %r12 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 1064(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 1072(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 1080(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + adcq 1088(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1096(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 1104(%rsp), %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %r14, %rdx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 968(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 968(%rsp), %r14 + movq 88(%rsp), %r13 ## 8-byte Reload + adcq 976(%rsp), %r13 + adcq 984(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 992(%rsp), %r14 + movq 16(%rsp), %rbx ## 8-byte Reload + adcq 1000(%rsp), %rbx + movq (%rsp), %rax ## 8-byte Reload + adcq 1008(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 1016(%rsp), %rbp + movq %rbp, %r12 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 1024(%rsp), %rbp + movq 48(%rsp), %rax ## 8-byte Reload + adcq 1032(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 896(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rcx + addq 896(%rsp), %rcx + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 904(%rsp), %r13 + adcq 912(%rsp), %r14 + adcq 920(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 936(%rsp), %r12 + movq %r12, 40(%rsp) ## 8-byte Spill + adcq 944(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 952(%rsp), %r12 + adcq 960(%rsp), %r15 + sbbq %rbx, %rbx + movq %rcx, %rdx + movq %rcx, %rbp + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 824(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + addq 824(%rsp), %rbp + adcq 832(%rsp), %r13 + movq %r13, 32(%rsp) ## 8-byte Spill + adcq 840(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 848(%rsp), %r13 + movq (%rsp), %rbp ## 8-byte Reload + adcq 856(%rsp), %rbp + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 864(%rsp), %r14 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 872(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 880(%rsp), %r12 + adcq 888(%rsp), %r15 + adcq $0, %rbx + movq 64(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 752(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 32(%rsp), %rax ## 8-byte Reload + addq 752(%rsp), %rax + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 760(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 776(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 784(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 792(%rsp), %rbp + adcq 800(%rsp), %r12 + adcq 808(%rsp), %r15 + adcq 816(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 680(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 680(%rsp), %rbx + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 688(%rsp), %r14 + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 704(%rsp), %r13 + movq 40(%rsp), %rbx ## 8-byte Reload + adcq 712(%rsp), %rbx + adcq 720(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq %r12, %rbp + adcq 728(%rsp), %rbp + adcq 736(%rsp), %r15 + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 744(%rsp), %r12 + adcq $0, %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 608(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r14, %rax + addq 608(%rsp), %rax + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 616(%rsp), %r14 + adcq 624(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + adcq 632(%rsp), %rbx + movq %rbx, %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 640(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 648(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 656(%rsp), %r15 + adcq 664(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 672(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + sbbq %rbp, %rbp + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %rbp, %rax + andl $1, %eax + addq 536(%rsp), %rbx + adcq 544(%rsp), %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rbx ## 8-byte Reload + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r13 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 568(%rsp), %rbp + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 576(%rsp), %r12 + adcq 584(%rsp), %r15 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 592(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 600(%rsp), %r14 + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 464(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 16(%rsp), %rax ## 8-byte Reload + addq 464(%rsp), %rax + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + adcq 488(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 496(%rsp), %r12 + adcq 504(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + movq 32(%rsp), %r15 ## 8-byte Reload + adcq 512(%rsp), %r15 + adcq 520(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 528(%rsp), %r14 + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbp + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 392(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 408(%rsp), %rbp + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 416(%rsp), %rbx + adcq 424(%rsp), %r12 + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 432(%rsp), %r13 + adcq 440(%rsp), %r15 + movq %r15, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 448(%rsp), %r15 + adcq 456(%rsp), %r14 + adcq $0, %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 320(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq (%rsp), %rax ## 8-byte Reload + addq 320(%rsp), %rax + adcq 328(%rsp), %rbp + movq %rbp, 40(%rsp) ## 8-byte Spill + adcq 336(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq %r12, %rbp + adcq 344(%rsp), %rbp + adcq 352(%rsp), %r13 + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 360(%rsp), %r12 + adcq 368(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq 376(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 248(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 248(%rsp), %rbx + movq 40(%rsp), %rax ## 8-byte Reload + adcq 256(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r14 ## 8-byte Reload + adcq 264(%rsp), %r14 + adcq 272(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + movq %r13, %rbx + adcq 280(%rsp), %rbx + movq %r12, %rbp + adcq 288(%rsp), %rbp + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 296(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 312(%rsp), %r12 + adcq $0, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 176(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 40(%rsp), %rax ## 8-byte Reload + addq 176(%rsp), %rax + adcq 184(%rsp), %r14 + movq %r14, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 192(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq 200(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + adcq 208(%rsp), %rbp + adcq 216(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 224(%rsp), %r14 + adcq 232(%rsp), %r12 + adcq 240(%rsp), %r15 + sbbq %rbx, %rbx + movq 80(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %r13 + leaq 104(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + addq 104(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 112(%rsp), %rcx + movq 48(%rsp), %rdx ## 8-byte Reload + adcq 120(%rsp), %rdx + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 128(%rsp), %rsi + movq %rbp, %rdi + adcq 136(%rsp), %rdi + movq %rdi, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r8 ## 8-byte Reload + adcq 144(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq %r14, %r9 + adcq 152(%rsp), %r9 + movq %r9, (%rsp) ## 8-byte Spill + adcq 160(%rsp), %r12 + adcq 168(%rsp), %r15 + adcq $0, %rbx + movq %rcx, %rax + movq %rcx, %r11 + movq 56(%rsp), %rbp ## 8-byte Reload + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r14 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + movq %rsi, %r13 + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %r8, %rdi + sbbq 32(%rbp), %rdi + movq %r9, %r10 + sbbq 40(%rbp), %r10 + movq %r12, %r8 + sbbq 48(%rbp), %r8 + movq %r15, %r9 + sbbq 56(%rbp), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r15, %r9 + testb %bl, %bl + cmovneq %r11, %rax + movq 96(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovneq %r14, %rcx + movq %rcx, 8(%rbx) + cmovneq %r13, %rdx + movq %rdx, 16(%rbx) + cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovneq (%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovneq %r12, %r8 + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $1256, %rsp ## imm = 0x4E8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF8Lbmi2: ## @mcl_fp_montNF8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1240, %rsp ## imm = 0x4D8 + movq %rcx, 40(%rsp) ## 8-byte Spill + movq %rdx, 48(%rsp) ## 8-byte Spill + movq %rsi, 56(%rsp) ## 8-byte Spill + movq %rdi, 80(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1168(%rsp), %rdi + callq l_mulPv512x64 + movq 1168(%rsp), %r15 + movq 1176(%rsp), %r12 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1232(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1216(%rsp), %r13 + movq 1208(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 1200(%rsp), %r14 + movq 1192(%rsp), %rbp + movq 1184(%rsp), %rbx + leaq 1096(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 1096(%rsp), %r15 + adcq 1104(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 1112(%rsp), %rbx + adcq 1120(%rsp), %rbp + adcq 1128(%rsp), %r14 + movq %r14, %r12 + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 1136(%rsp), %r14 + adcq 1144(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 1152(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 1160(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1024(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 1088(%rsp), %r15 + movq 16(%rsp), %rax ## 8-byte Reload + addq 1024(%rsp), %rax + adcq 1032(%rsp), %rbx + movq %rbx, 72(%rsp) ## 8-byte Spill + movq %rbp, %rbx + adcq 1040(%rsp), %rbx + adcq 1048(%rsp), %r12 + adcq 1056(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq %r13, %rbp + adcq 1064(%rsp), %rbp + movq (%rsp), %rcx ## 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 1080(%rsp), %r14 + adcq $0, %r15 + movq %rax, %rdx + movq %rax, %r13 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 952(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 952(%rsp), %r13 + movq 72(%rsp), %rax ## 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 968(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq %r12, %rbx + adcq 976(%rsp), %rbx + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 984(%rsp), %r12 + adcq 992(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 1000(%rsp), %r13 + movq %r14, %rbp + adcq 1008(%rsp), %rbp + adcq 1016(%rsp), %r15 + movq 48(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 880(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 944(%rsp), %r14 + movq 72(%rsp), %rax ## 8-byte Reload + addq 880(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 888(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 896(%rsp), %rbx + adcq 904(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 920(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + adcq 928(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 936(%rsp), %r15 + adcq $0, %r14 + movq %rax, %rdx + movq %rax, %rbp + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 808(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 808(%rsp), %rbp + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 816(%rsp), %r13 + movq %rbx, %r12 + adcq 824(%rsp), %r12 + movq 8(%rsp), %rbx ## 8-byte Reload + adcq 832(%rsp), %rbx + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 840(%rsp), %rbp + movq (%rsp), %rax ## 8-byte Reload + adcq 848(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 856(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + adcq 864(%rsp), %r15 + adcq 872(%rsp), %r14 + movq 48(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 736(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 800(%rsp), %rax + movq %r13, %rcx + addq 736(%rsp), %rcx + adcq 744(%rsp), %r12 + movq %r12, 24(%rsp) ## 8-byte Spill + adcq 752(%rsp), %rbx + movq %rbx, 8(%rsp) ## 8-byte Spill + adcq 760(%rsp), %rbp + movq %rbp, %r13 + movq (%rsp), %rbp ## 8-byte Reload + adcq 768(%rsp), %rbp + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r14 + adcq $0, %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq %rcx, %rdx + movq %rcx, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 664(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 664(%rsp), %r12 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 672(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %rax ## 8-byte Reload + adcq 680(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 688(%rsp), %r13 + adcq 696(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 704(%rsp), %rbx + adcq 712(%rsp), %r15 + adcq 720(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 728(%rsp), %r12 + movq 48(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 592(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 656(%rsp), %rcx + movq 24(%rsp), %rax ## 8-byte Reload + addq 592(%rsp), %rax + movq 8(%rsp), %rbp ## 8-byte Reload + adcq 600(%rsp), %rbp + adcq 608(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 616(%rsp), %r13 + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + adcq 640(%rsp), %r14 + adcq 648(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 520(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 520(%rsp), %r12 + adcq 528(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq 24(%rsp), %r12 ## 8-byte Reload + adcq 536(%rsp), %r12 + movq %r13, %rbp + adcq 544(%rsp), %rbp + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r15 + adcq 568(%rsp), %r14 + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 576(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 448(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 512(%rsp), %rcx + movq 8(%rsp), %rax ## 8-byte Reload + addq 448(%rsp), %rax + adcq 456(%rsp), %r12 + movq %r12, 24(%rsp) ## 8-byte Spill + adcq 464(%rsp), %rbp + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r15 + adcq 488(%rsp), %r14 + adcq 496(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 504(%rsp), %r13 + adcq $0, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 376(%rsp), %r12 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 384(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + adcq 408(%rsp), %r15 + adcq 416(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 424(%rsp), %r12 + adcq 432(%rsp), %r13 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 440(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 304(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 368(%rsp), %rcx + movq 24(%rsp), %rax ## 8-byte Reload + addq 304(%rsp), %rax + adcq 312(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 320(%rsp), %rbx + adcq 328(%rsp), %r15 + adcq 336(%rsp), %r14 + adcq 344(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 352(%rsp), %r13 + movq 8(%rsp), %rbp ## 8-byte Reload + adcq 360(%rsp), %rbp + adcq $0, %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 232(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 232(%rsp), %r12 + movq (%rsp), %rax ## 8-byte Reload + adcq 240(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 248(%rsp), %rbx + adcq 256(%rsp), %r15 + adcq 264(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 272(%rsp), %r12 + adcq 280(%rsp), %r13 + adcq 288(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %rbp ## 8-byte Reload + adcq 296(%rsp), %rbp + movq 48(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 160(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 224(%rsp), %rcx + movq (%rsp), %rax ## 8-byte Reload + addq 160(%rsp), %rax + adcq 168(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 200(%rsp), %r13 + movq 8(%rsp), %rbx ## 8-byte Reload + adcq 208(%rsp), %rbx + adcq 216(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 64(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbp + leaq 88(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 88(%rsp), %rbp + movq 32(%rsp), %r11 ## 8-byte Reload + adcq 96(%rsp), %r11 + adcq 104(%rsp), %r15 + adcq 112(%rsp), %r14 + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 120(%rsp), %rsi + movq %rsi, 16(%rsp) ## 8-byte Spill + adcq 128(%rsp), %r13 + adcq 136(%rsp), %rbx + movq %rbx, 8(%rsp) ## 8-byte Spill + adcq 144(%rsp), %r12 + movq (%rsp), %r8 ## 8-byte Reload + adcq 152(%rsp), %r8 + movq %r11, %rax + movq 40(%rsp), %rbp ## 8-byte Reload + subq (%rbp), %rax + movq %r15, %rcx + sbbq 8(%rbp), %rcx + movq %r14, %rdx + sbbq 16(%rbp), %rdx + sbbq 24(%rbp), %rsi + movq %r13, %rdi + sbbq 32(%rbp), %rdi + movq %rbx, %r9 + sbbq 40(%rbp), %r9 + movq %r12, %r10 + sbbq 48(%rbp), %r10 + movq %rbp, %rbx + movq %r8, %rbp + sbbq 56(%rbx), %rbp + testq %rbp, %rbp + cmovsq %r11, %rax + movq 80(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovsq %r15, %rcx + movq %rcx, 8(%rbx) + cmovsq %r14, %rdx + movq %rdx, 16(%rbx) + cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq %r13, %rdi + movq %rdi, 32(%rbx) + cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 40(%rbx) + cmovsq %r12, %r10 + movq %r10, 48(%rbx) + cmovsq %r8, %rbp + movq %rbp, 56(%rbx) + addq $1240, %rsp ## imm = 0x4D8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed8Lbmi2: ## @mcl_fp_montRed8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $776, %rsp ## imm = 0x308 + movq %rdx, %rax + movq %rdi, 192(%rsp) ## 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 104(%rsp) ## 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %r15, %rdx + imulq %rcx, %rdx + movq 120(%rsi), %rcx + movq %rcx, 112(%rsp) ## 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 96(%rsp) ## 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 72(%rsi), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 64(%rsi), %r13 + movq 56(%rsi), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 48(%rsi), %r14 + movq 40(%rsi), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + movq 32(%rsi), %r12 + movq 24(%rsi), %rbx + movq 16(%rsi), %rbp + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 136(%rsp) ## 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 184(%rsp) ## 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 176(%rsp) ## 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 168(%rsp) ## 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 160(%rsp) ## 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 152(%rsp) ## 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 144(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 128(%rsp) ## 8-byte Spill + movq %rcx, %rsi + movq %rsi, 88(%rsp) ## 8-byte Spill + leaq 704(%rsp), %rdi + callq l_mulPv512x64 + addq 704(%rsp), %r15 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 712(%rsp), %rcx + adcq 720(%rsp), %rbp + movq %rbp, 80(%rsp) ## 8-byte Spill + adcq 728(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rax ## 8-byte Reload + adcq 744(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 752(%rsp), %r14 + movq %r14, %r12 + movq 64(%rsp), %rax ## 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + movq 40(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 96(%rsp) ## 8-byte Folded Spill + movq 56(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + movq 112(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + sbbq %rbx, %rbx + movq %rcx, %rbp + movq %rbp, %rdx + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 632(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + movq %rbx, %rax + addq 632(%rsp), %rbp + movq 80(%rsp), %rsi ## 8-byte Reload + adcq 640(%rsp), %rsi + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 648(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 120(%rsp), %rcx ## 8-byte Reload + adcq 656(%rsp), %rcx + movq %rcx, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rcx ## 8-byte Reload + adcq 664(%rsp), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + adcq 672(%rsp), %r12 + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + movq 48(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 96(%rsp) ## 8-byte Folded Spill + adcq $0, %r13 + movq %r13, 56(%rsp) ## 8-byte Spill + adcq $0, %r14 + movq %r14, 112(%rsp) ## 8-byte Spill + movq %rax, %rbp + adcq $0, %rbp + movq %rsi, %rdx + movq %rsi, %r14 + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 560(%rsp), %rdi + movq 88(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv512x64 + addq 560(%rsp), %r14 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 568(%rsp), %rcx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 576(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rax ## 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 592(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 600(%rsp), %r14 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rax ## 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 624(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq $0, %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + movq 96(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + movq 56(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 112(%rsp) ## 8-byte Folded Spill + adcq $0, %rbp + movq %rbp, 80(%rsp) ## 8-byte Spill + movq %rcx, %rbp + movq %rbp, %rdx + movq 104(%rsp), %r12 ## 8-byte Reload + imulq %r12, %rdx + leaq 488(%rsp), %rdi + movq %r13, %rsi + callq l_mulPv512x64 + addq 488(%rsp), %rbp + movq 120(%rsp), %rax ## 8-byte Reload + adcq 496(%rsp), %rax + movq 72(%rsp), %rbp ## 8-byte Reload + adcq 504(%rsp), %rbp + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 512(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 520(%rsp), %r14 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 528(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 536(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %r13 ## 8-byte Reload + adcq 544(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 96(%rsp) ## 8-byte Spill + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 112(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq %r12, %rdx + leaq 416(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 416(%rsp), %r15 + adcq 424(%rsp), %rbp + movq %rbp, %rax + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %r14, %r12 + adcq 440(%rsp), %r12 + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 448(%rsp), %r14 + movq 16(%rsp), %rbp ## 8-byte Reload + adcq 456(%rsp), %rbp + adcq 464(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq $0, 96(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq 112(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rbx + movq %rbx, %rdx + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 344(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 344(%rsp), %rbx + movq 32(%rsp), %rax ## 8-byte Reload + adcq 352(%rsp), %rax + adcq 360(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + adcq 368(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + adcq 376(%rsp), %rbp + movq %rbp, 16(%rsp) ## 8-byte Spill + adcq 384(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 392(%rsp), %r13 + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 400(%rsp), %r12 + movq 96(%rsp), %r14 ## 8-byte Reload + adcq 408(%rsp), %r14 + movq 56(%rsp), %rbp ## 8-byte Reload + adcq $0, %rbp + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 272(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 272(%rsp), %r15 + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 280(%rsp), %rcx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rax ## 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 312(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 320(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + adcq 328(%rsp), %r14 + movq %r14, %r13 + adcq 336(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rbx + movq %rbx, %r14 + movq 80(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + movq 104(%rsp), %rdx ## 8-byte Reload + movq %rcx, %rbx + imulq %rbx, %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 200(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 208(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r8 ## 8-byte Reload + adcq 216(%rsp), %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rdx ## 8-byte Reload + adcq 224(%rsp), %rdx + movq 24(%rsp), %rsi ## 8-byte Reload + adcq 232(%rsp), %rsi + movq 48(%rsp), %rdi ## 8-byte Reload + adcq 240(%rsp), %rdi + movq %r13, %rbp + adcq 248(%rsp), %rbp + movq %r12, %rbx + adcq 256(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq %r14, %r9 + adcq 264(%rsp), %r9 + adcq $0, %r15 + movq %r15, %r10 + subq 136(%rsp), %rax ## 8-byte Folded Reload + movq %r8, %rcx + sbbq 128(%rsp), %rcx ## 8-byte Folded Reload + movq %rdx, %r13 + sbbq 144(%rsp), %r13 ## 8-byte Folded Reload + movq %rsi, %r12 + sbbq 152(%rsp), %r12 ## 8-byte Folded Reload + movq %rdi, %r14 + sbbq 160(%rsp), %r14 ## 8-byte Folded Reload + movq %rbp, %r11 + sbbq 168(%rsp), %r11 ## 8-byte Folded Reload + movq %rbx, %r8 + sbbq 176(%rsp), %r8 ## 8-byte Folded Reload + movq %r9, %r15 + sbbq 184(%rsp), %r9 ## 8-byte Folded Reload + sbbq $0, %r10 + andl $1, %r10d + cmovneq %r15, %r9 + testb %r10b, %r10b + cmovneq 8(%rsp), %rax ## 8-byte Folded Reload + movq 192(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 8(%rbx) + cmovneq %rdx, %r13 + movq %r13, 16(%rbx) + cmovneq %rsi, %r12 + movq %r12, 24(%rbx) + cmovneq %rdi, %r14 + movq %r14, 32(%rbx) + cmovneq %rbp, %r11 + movq %r11, 40(%rbx) + cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $776, %rsp ## imm = 0x308 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre8Lbmi2: ## @mcl_fp_addPre8Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 48(%rsi), %r12 + movq 40(%rdx), %r10 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rax + movq 32(%rsi), %rsi + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %rax + movq %rax, 24(%rdi) + adcq %r11, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %r13 + movq %r13, 40(%rdi) + adcq %r9, %r12 + movq %r12, 48(%rdi) + adcq %r8, %r15 + movq %r15, 56(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subPre8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre8Lbmi2: ## @mcl_fp_subPre8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 40(%rdx), %r10 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 48(%rsi), %r13 + movq 40(%rsi), %rdx + movq 32(%rsi), %rbp + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rbp + movq %rbp, 32(%rdi) + sbbq %r10, %rdx + movq %rdx, 40(%rdi) + sbbq %r9, %r13 + movq %r13, 48(%rdi) + sbbq %r8, %r15 + movq %r15, 56(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_shr1_8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_8Lbmi2: ## @mcl_fp_shr1_8Lbmi2 +## BB#0: + movq 56(%rsi), %r8 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 32(%rsi), %r11 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 24(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 32(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 40(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 48(%rdi) + shrq %r8 + movq %r8, 56(%rdi) + retq + + .globl _mcl_fp_add8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add8Lbmi2: ## @mcl_fp_add8Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r15 + movq 56(%rsi), %r8 + movq 48(%rdx), %r12 + movq 48(%rsi), %r9 + movq 40(%rsi), %r13 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %rbx + addq (%rsi), %r14 + adcq 8(%rsi), %rbx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r11 + movq 40(%rdx), %rsi + adcq 32(%rdx), %r10 + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + adcq %r13, %rsi + movq %rsi, 40(%rdi) + adcq %r12, %r9 + movq %r9, 48(%rdi) + adcq %r15, %r8 + movq %r8, 56(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %r14 + sbbq 8(%rcx), %rbx + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r11 + sbbq 32(%rcx), %r10 + sbbq 40(%rcx), %rsi + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne LBB120_2 +## BB#1: ## %nocarry + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + movq %rsi, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) +LBB120_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF8Lbmi2: ## @mcl_fp_addNF8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 48(%rdx), %rbp + movq 40(%rdx), %rbx + movq 32(%rdx), %rax + movq 24(%rdx), %r11 + movq 16(%rdx), %r15 + movq (%rdx), %r13 + movq 8(%rdx), %r12 + addq (%rsi), %r13 + adcq 8(%rsi), %r12 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %rax + movq %rax, %r10 + movq %r10, -24(%rsp) ## 8-byte Spill + adcq 40(%rsi), %rbx + movq %rbx, %r9 + movq %r9, -16(%rsp) ## 8-byte Spill + adcq 48(%rsi), %rbp + movq %rbp, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq 56(%rsi), %r8 + movq %r13, %rsi + subq (%rcx), %rsi + movq %r12, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %rbx + sbbq 16(%rcx), %rbx + movq %r11, %r14 + sbbq 24(%rcx), %r14 + movq %r10, %rbp + sbbq 32(%rcx), %rbp + movq %r9, %r10 + sbbq 40(%rcx), %r10 + movq %rax, %r9 + sbbq 48(%rcx), %r9 + movq %r8, %rax + sbbq 56(%rcx), %rax + testq %rax, %rax + cmovsq %r13, %rsi + movq %rsi, (%rdi) + cmovsq %r12, %rdx + movq %rdx, 8(%rdi) + cmovsq %r15, %rbx + movq %rbx, 16(%rdi) + cmovsq %r11, %r14 + movq %r14, 24(%rdi) + cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rdi) + cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rdi) + cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 48(%rdi) + cmovsq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub8Lbmi2: ## @mcl_fp_sub8Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r12 + movq 56(%rsi), %r8 + movq 48(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r10 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r10 + movq 16(%rsi), %r11 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %r15 + sbbq 24(%rdx), %r15 + movq 32(%rsi), %r14 + sbbq 32(%rdx), %r14 + movq 48(%rsi), %r9 + movq 40(%rsi), %rsi + sbbq 40(%rdx), %rsi + movq %rax, (%rdi) + movq %r10, 8(%rdi) + movq %r11, 16(%rdi) + movq %r15, 24(%rdi) + movq %r14, 32(%rdi) + movq %rsi, 40(%rdi) + sbbq %r13, %r9 + movq %r9, 48(%rdi) + sbbq %r12, %r8 + movq %r8, 56(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB122_2 +## BB#1: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r10, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r11, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r15, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r14, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %rsi, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r9, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %r8, %rax + movq %rax, 56(%rdi) +LBB122_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF8Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF8Lbmi2: ## @mcl_fp_subNF8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r9 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + movdqu 48(%rdx), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r12 + movdqu (%rsi), %xmm4 + movdqu 16(%rsi), %xmm5 + movdqu 32(%rsi), %xmm8 + movdqu 48(%rsi), %xmm7 + pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1] + movd %xmm6, %rcx + movd %xmm3, %r13 + movd %xmm7, %rdi + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %rbp + pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1] + movd %xmm3, %rdx + movd %xmm2, %rsi + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r11 + pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] + movd %xmm1, %r15 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %rbx + pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1] + movd %xmm0, %rax + movd %xmm4, %r14 + subq %rax, %r14 + movd %xmm1, %r10 + sbbq %rbx, %r10 + movd %xmm5, %rbx + sbbq %r15, %rbx + movd %xmm2, %r15 + sbbq %r11, %r15 + movd %xmm8, %r11 + sbbq %rsi, %r11 + sbbq %rbp, %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + sbbq %r13, %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + sbbq %r12, %rcx + movq %rcx, -8(%rsp) ## 8-byte Spill + movq %rcx, %rbp + sarq $63, %rbp + movq 56(%r8), %r12 + andq %rbp, %r12 + movq 48(%r8), %r13 + andq %rbp, %r13 + movq 40(%r8), %rdi + andq %rbp, %rdi + movq 32(%r8), %rsi + andq %rbp, %rsi + movq 24(%r8), %rdx + andq %rbp, %rdx + movq 16(%r8), %rcx + andq %rbp, %rcx + movq 8(%r8), %rax + andq %rbp, %rax + andq (%r8), %rbp + addq %r14, %rbp + adcq %r10, %rax + movq %rbp, (%r9) + adcq %rbx, %rcx + movq %rax, 8(%r9) + movq %rcx, 16(%r9) + adcq %r15, %rdx + movq %rdx, 24(%r9) + adcq %r11, %rsi + movq %rsi, 32(%r9) + adcq -24(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%r9) + adcq -16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 48(%r9) + adcq -8(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 56(%r9) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add8Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add8Lbmi2: ## @mcl_fpDbl_add8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 120(%rdx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 96(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r11 + movq 88(%rdx), %rbp + movq 80(%rdx), %r13 + movq %rbx, (%rdi) + movq 72(%rdx), %r10 + movq %rax, 8(%rdi) + movq 64(%rdx), %r9 + movq %r12, 16(%rdi) + movq 40(%rdx), %r12 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %r12, %rbx + movq 56(%rdx), %r15 + movq 48(%rdx), %r12 + movq %r11, 32(%rdi) + movq 48(%rsi), %rdx + adcq %r12, %rdx + movq 120(%rsi), %r12 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rax + adcq %r15, %rax + movq 112(%rsi), %rcx + movq %rdx, 48(%rdi) + movq 64(%rsi), %rbx + adcq %r9, %rbx + movq 104(%rsi), %rdx + movq %rax, 56(%rdi) + movq 72(%rsi), %r9 + adcq %r10, %r9 + movq 80(%rsi), %r11 + adcq %r13, %r11 + movq 96(%rsi), %rax + movq 88(%rsi), %r15 + adcq %rbp, %r15 + adcq %r14, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdx, %rax + adcq -24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -24(%rsp) ## 8-byte Spill + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -16(%rsp) ## 8-byte Spill + adcq -32(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, -32(%rsp) ## 8-byte Spill + sbbq %rbp, %rbp + andl $1, %ebp + movq %rbx, %rsi + subq (%r8), %rsi + movq %r9, %rdx + sbbq 8(%r8), %rdx + movq %r11, %r10 + sbbq 16(%r8), %r10 + movq %r15, %r14 + sbbq 24(%r8), %r14 + movq -8(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r8), %r13 + movq %rax, %r12 + sbbq 40(%r8), %r12 + movq %rcx, %rax + sbbq 48(%r8), %rax + movq -32(%rsp), %rcx ## 8-byte Reload + sbbq 56(%r8), %rcx + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rbx, %rsi + movq %rsi, 64(%rdi) + testb %bpl, %bpl + cmovneq %r9, %rdx + movq %rdx, 72(%rdi) + cmovneq %r11, %r10 + movq %r10, 80(%rdi) + cmovneq %r15, %r14 + movq %r14, 88(%rdi) + cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 96(%rdi) + cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 104(%rdi) + cmovneq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 112(%rdi) + cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub8Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub8Lbmi2: ## @mcl_fpDbl_sub8Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 16(%rsi), %r9 + movq (%rsi), %r12 + movq 8(%rsi), %r14 + xorl %r8d, %r8d + subq (%rdx), %r12 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r9 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r13 + sbbq 32(%rdx), %r13 + movq 96(%rdx), %rbp + movq 88(%rdx), %r11 + movq %r12, (%rdi) + movq 80(%rdx), %r12 + movq %r14, 8(%rdi) + movq 72(%rdx), %r10 + movq %r9, 16(%rdi) + movq 40(%rdx), %r9 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r9, %rbx + movq 48(%rdx), %r9 + movq %r13, 32(%rdi) + movq 48(%rsi), %r14 + sbbq %r9, %r14 + movq 64(%rdx), %r13 + movq 56(%rdx), %r9 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r9, %rdx + movq 120(%rsi), %rcx + movq %r14, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r13, %rbx + movq 112(%rsi), %rax + movq %rdx, 56(%rdi) + movq 72(%rsi), %r9 + sbbq %r10, %r9 + movq 80(%rsi), %r13 + sbbq %r12, %r13 + movq 88(%rsi), %r12 + sbbq %r11, %r12 + movq 104(%rsi), %rdx + movq 96(%rsi), %r14 + sbbq %rbp, %r14 + sbbq -24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -24(%rsp) ## 8-byte Spill + sbbq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -16(%rsp) ## 8-byte Spill + sbbq -8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -8(%rsp) ## 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r15), %r11 + cmoveq %r8, %r11 + testb %bpl, %bpl + movq 16(%r15), %rbp + cmoveq %r8, %rbp + movq 8(%r15), %rsi + cmoveq %r8, %rsi + movq 56(%r15), %r10 + cmoveq %r8, %r10 + movq 48(%r15), %rdx + cmoveq %r8, %rdx + movq 40(%r15), %rcx + cmoveq %r8, %rcx + movq 32(%r15), %rax + cmoveq %r8, %rax + cmovneq 24(%r15), %r8 + addq %rbx, %r11 + adcq %r9, %rsi + movq %r11, 64(%rdi) + adcq %r13, %rbp + movq %rsi, 72(%rdi) + movq %rbp, 80(%rdi) + adcq %r12, %r8 + movq %r8, 88(%rdi) + adcq %r14, %rax + movq %rax, 96(%rdi) + adcq -24(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 104(%rdi) + adcq -16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 112(%rdi) + adcq -8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .p2align 4, 0x90 +l_mulPv576x64: ## @mulPv576x64 +## BB#0: + mulxq (%rsi), %rcx, %rax + movq %rcx, (%rdi) + mulxq 8(%rsi), %rcx, %r8 + addq %rax, %rcx + movq %rcx, 8(%rdi) + mulxq 16(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 16(%rdi) + mulxq 24(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 24(%rdi) + mulxq 32(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 32(%rdi) + mulxq 40(%rsi), %rcx, %r9 + adcq %r8, %rcx + movq %rcx, 40(%rdi) + mulxq 48(%rsi), %rax, %rcx + adcq %r9, %rax + movq %rax, 48(%rdi) + mulxq 56(%rsi), %rax, %r8 + adcq %rcx, %rax + movq %rax, 56(%rdi) + mulxq 64(%rsi), %rax, %rcx + adcq %r8, %rax + movq %rax, 64(%rdi) + adcq $0, %rcx + movq %rcx, 72(%rdi) + movq %rdi, %rax + retq + + .globl _mcl_fp_mulUnitPre9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mulUnitPre9Lbmi2: ## @mcl_fp_mulUnitPre9Lbmi2 +## BB#0: + pushq %r14 + pushq %rbx + subq $88, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq l_mulPv576x64 + movq 80(%rsp), %r8 + movq 72(%rsp), %r9 + movq 64(%rsp), %r10 + movq 56(%rsp), %r11 + movq 48(%rsp), %r14 + movq 40(%rsp), %rax + movq 32(%rsp), %rcx + movq 24(%rsp), %rdx + movq 8(%rsp), %rsi + movq 16(%rsp), %rdi + movq %rsi, (%rbx) + movq %rdi, 8(%rbx) + movq %rdx, 16(%rbx) + movq %rcx, 24(%rbx) + movq %rax, 32(%rbx) + movq %r14, 40(%rbx) + movq %r11, 48(%rbx) + movq %r10, 56(%rbx) + movq %r9, 64(%rbx) + movq %r8, 72(%rbx) + addq $88, %rsp + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mulPre9Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_mulPre9Lbmi2: ## @mcl_fpDbl_mulPre9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp ## imm = 0x328 + movq %rdx, %rax + movq %rdi, %r12 + movq (%rax), %rdx + movq %rax, %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + leaq 728(%rsp), %rdi + movq %rsi, %rbp + movq %rbp, 72(%rsp) ## 8-byte Spill + callq l_mulPv576x64 + movq 800(%rsp), %r13 + movq 792(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 768(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r14 + movq %rax, (%r12) + movq %r12, 64(%rsp) ## 8-byte Spill + movq 8(%rbx), %rdx + leaq 648(%rsp), %rdi + movq %rbp, %rsi + callq l_mulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r14 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r15 + movq %r14, 8(%r12) + adcq 24(%rsp), %rbx ## 8-byte Folded Reload + adcq 32(%rsp), %r15 ## 8-byte Folded Reload + adcq 40(%rsp), %rax ## 8-byte Folded Reload + movq %rax, %r14 + adcq (%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 40(%rsp) ## 8-byte Spill + adcq 48(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, (%rsp) ## 8-byte Spill + adcq %r13, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 80(%rsp), %r13 ## 8-byte Reload + movq 16(%r13), %rdx + leaq 568(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %r9 + movq 624(%rsp), %r10 + movq 616(%rsp), %rdi + movq 608(%rsp), %rbp + movq 600(%rsp), %rcx + addq 568(%rsp), %rbx + movq 592(%rsp), %rdx + movq 576(%rsp), %r12 + movq 584(%rsp), %rsi + movq 64(%rsp), %rax ## 8-byte Reload + movq %rbx, 16(%rax) + adcq %r15, %r12 + adcq %r14, %rsi + movq %rsi, 48(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 8(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 24(%r13), %rdx + leaq 488(%rsp), %rdi + movq 72(%rsp), %r15 ## 8-byte Reload + movq %r15, %rsi + callq l_mulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r12 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq 64(%rsp), %r14 ## 8-byte Reload + movq %r12, 24(%r14) + adcq 48(%rsp), %rbx ## 8-byte Folded Reload + adcq 56(%rsp), %r13 ## 8-byte Folded Reload + adcq 24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq (%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, (%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 8(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 80(%rsp), %r12 ## 8-byte Reload + movq 32(%r12), %rdx + leaq 408(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %r9 + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r15 + movq 424(%rsp), %rcx + movq %rbx, 32(%r14) + adcq %r13, %r15 + adcq 24(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq %r12, %r14 + movq 40(%r14), %rdx + leaq 328(%rsp), %rdi + movq 72(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %r9 + movq 384(%rsp), %rsi + movq 376(%rsp), %rdi + movq 368(%rsp), %rbx + movq 360(%rsp), %rbp + addq 328(%rsp), %r15 + movq 352(%rsp), %rcx + movq 336(%rsp), %r12 + movq 344(%rsp), %rdx + movq 64(%rsp), %rax ## 8-byte Reload + movq %r15, 40(%rax) + adcq 56(%rsp), %r12 ## 8-byte Folded Reload + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 48(%r14), %rdx + leaq 248(%rsp), %rdi + movq %r13, %rsi + movq %r13, %r15 + callq l_mulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %r9 + movq 304(%rsp), %rsi + movq 296(%rsp), %rdi + movq 288(%rsp), %rbx + movq 280(%rsp), %rbp + addq 248(%rsp), %r12 + movq 272(%rsp), %rcx + movq 256(%rsp), %r13 + movq 264(%rsp), %rdx + movq 64(%rsp), %rax ## 8-byte Reload + movq %r12, 48(%rax) + adcq 56(%rsp), %r13 ## 8-byte Folded Reload + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 56(%r14), %rdx + leaq 168(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 240(%rsp), %rcx + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + addq 168(%rsp), %r13 + movq 200(%rsp), %r12 + movq 192(%rsp), %rbp + movq 176(%rsp), %r14 + movq 184(%rsp), %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq %r13, 56(%rax) + adcq 56(%rsp), %r14 ## 8-byte Folded Reload + adcq 24(%rsp), %r15 ## 8-byte Folded Reload + adcq 32(%rsp), %rbp ## 8-byte Folded Reload + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %r13 + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 88(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 88(%rsp), %r14 + adcq 96(%rsp), %r15 + movq 160(%rsp), %r8 + adcq 104(%rsp), %rbp + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 64(%rsp), %rcx ## 8-byte Reload + movq %r14, 64(%rcx) + movq %r15, 72(%rcx) + adcq %r12, %rax + movq %rbp, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r13, %rbx + movq %rbx, 96(%rcx) + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp ## imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre9Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre9Lbmi2: ## @mcl_fpDbl_sqrPre9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp ## imm = 0x328 + movq %rsi, %r15 + movq %rdi, %r14 + movq (%r15), %rdx + leaq 728(%rsp), %rdi + callq l_mulPv576x64 + movq 800(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 792(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 80(%rsp) ## 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r12 + movq %rax, (%r14) + movq %r14, 72(%rsp) ## 8-byte Spill + movq 8(%r15), %rdx + leaq 648(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r12 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r13 + movq %r12, 8(%r14) + adcq 80(%rsp), %rbx ## 8-byte Folded Reload + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq %r15, 64(%rsp) ## 8-byte Spill + movq 16(%r15), %rdx + leaq 568(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %rcx + movq 624(%rsp), %rdx + movq 616(%rsp), %rsi + movq 608(%rsp), %rdi + movq 600(%rsp), %rbp + addq 568(%rsp), %rbx + movq 592(%rsp), %rax + movq 576(%rsp), %r14 + movq 584(%rsp), %r12 + movq 72(%rsp), %r15 ## 8-byte Reload + movq %rbx, 16(%r15) + adcq %r13, %r14 + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 24(%rsi), %rdx + leaq 488(%rsp), %rdi + callq l_mulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r14 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq %r14, 24(%r15) + adcq %r12, %rbx + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 32(%rsi), %rdx + leaq 408(%rsp), %rdi + callq l_mulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %rcx + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r14 + movq 424(%rsp), %r12 + movq %rbx, 32(%r15) + adcq %r13, %r14 + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 40(%rsi), %rdx + leaq 328(%rsp), %rdi + callq l_mulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %rcx + movq 384(%rsp), %rdx + movq 376(%rsp), %rsi + movq 368(%rsp), %rdi + movq 360(%rsp), %rbp + addq 328(%rsp), %r14 + movq 352(%rsp), %rax + movq 336(%rsp), %rbx + movq 344(%rsp), %r13 + movq %r14, 40(%r15) + adcq %r12, %rbx + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 48(%rsi), %rdx + leaq 248(%rsp), %rdi + callq l_mulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %rcx + movq 304(%rsp), %rdx + movq 296(%rsp), %rsi + movq 288(%rsp), %rdi + movq 280(%rsp), %rbp + addq 248(%rsp), %rbx + movq 272(%rsp), %rax + movq 256(%rsp), %r12 + movq 264(%rsp), %r14 + movq %rbx, 48(%r15) + adcq %r13, %r12 + adcq 40(%rsp), %r14 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 56(%rsi), %rdx + leaq 168(%rsp), %rdi + callq l_mulPv576x64 + movq 240(%rsp), %r8 + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + movq 200(%rsp), %rcx + addq 168(%rsp), %r12 + movq 192(%rsp), %r15 + movq 176(%rsp), %r13 + movq 184(%rsp), %rbp + movq 72(%rsp), %rax ## 8-byte Reload + movq %r12, 56(%rax) + adcq %r14, %r13 + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + adcq 48(%rsp), %r15 ## 8-byte Folded Reload + adcq 56(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %r12 + adcq 8(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %r14 + adcq 16(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 64(%rsi), %rdx + leaq 88(%rsp), %rdi + callq l_mulPv576x64 + addq 88(%rsp), %r13 + adcq 96(%rsp), %rbp + movq 160(%rsp), %r8 + adcq 104(%rsp), %r15 + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 72(%rsp), %rcx ## 8-byte Reload + movq %r13, 64(%rcx) + movq %rbp, 72(%rcx) + adcq %r12, %rax + movq %r15, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r14, %rbx + movq %rbx, 96(%rcx) + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 32(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp ## imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_mont9Lbmi2: ## @mcl_fp_mont9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp ## imm = 0x618 + movq %rcx, 72(%rsp) ## 8-byte Spill + movq %rdx, 96(%rsp) ## 8-byte Spill + movq %rsi, 88(%rsp) ## 8-byte Spill + movq %rdi, 112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq l_mulPv576x64 + movq 1480(%rsp), %r14 + movq 1488(%rsp), %r15 + movq %r14, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1544(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 1536(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 1528(%rsp), %r12 + movq 1520(%rsp), %r13 + movq 1512(%rsp), %rbx + movq 1504(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1400(%rsp), %r14 + adcq 1408(%rsp), %r15 + adcq 1416(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 1424(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 1432(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 1440(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 1448(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq 1456(%rsp), %rbx + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 1464(%rsp), %r14 + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1472(%rsp), %r13 + sbbq %rbp, %rbp + movq 96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebp + addq 1320(%rsp), %r15 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 1328(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 1336(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 1344(%rsp), %r12 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 1352(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + adcq 1360(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq 1368(%rsp), %rbx + adcq 1376(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + adcq 1384(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 1392(%rsp), %rbp + sbbq %r14, %r14 + movq %r15, %rdx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq %r14, %rax + andl $1, %eax + addq 1240(%rsp), %r15 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 1248(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 1256(%rsp), %r14 + adcq 1264(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 1272(%rsp), %r12 + movq 48(%rsp), %r13 ## 8-byte Reload + adcq 1280(%rsp), %r13 + adcq 1288(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 1296(%rsp), %r15 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 1304(%rsp), %rbx + adcq 1312(%rsp), %rbp + adcq $0, %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 8(%rsp), %rax ## 8-byte Reload + addq 1160(%rsp), %rax + adcq 1168(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 1176(%rsp), %r14 + adcq 1184(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + movq %r13, %r12 + adcq 1192(%rsp), %r12 + movq 56(%rsp), %rcx ## 8-byte Reload + adcq 1200(%rsp), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, %r13 + adcq 1216(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq 1224(%rsp), %rbp + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 1232(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq %r15, %rax + andl $1, %eax + addq 1080(%rsp), %rbx + movq (%rsp), %rcx ## 8-byte Reload + adcq 1088(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq %r14, %r15 + adcq 1096(%rsp), %r15 + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 1104(%rsp), %r14 + movq %r12, %rbx + adcq 1112(%rsp), %rbx + movq 56(%rsp), %rcx ## 8-byte Reload + adcq 1120(%rsp), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 1128(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1136(%rsp), %r13 + adcq 1144(%rsp), %rbp + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 1152(%rsp), %r12 + adcq $0, %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq (%rsp), %rax ## 8-byte Reload + addq 1000(%rsp), %rax + adcq 1008(%rsp), %r15 + movq %r15, 32(%rsp) ## 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, %r15 + adcq 1024(%rsp), %rbx + movq %rbx, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 1032(%rsp), %r14 + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 1040(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + adcq 1048(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 1056(%rsp), %rbp + adcq 1064(%rsp), %r12 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 920(%rsp), %r13 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 928(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 936(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %r15 ## 8-byte Reload + adcq 944(%rsp), %r15 + movq %r14, %r13 + adcq 952(%rsp), %r13 + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 960(%rsp), %r14 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 968(%rsp), %rbx + adcq 976(%rsp), %rbp + adcq 984(%rsp), %r12 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 992(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 32(%rsp), %rax ## 8-byte Reload + addq 840(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 848(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 856(%rsp), %r15 + adcq 864(%rsp), %r13 + movq %r13, 56(%rsp) ## 8-byte Spill + adcq 872(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + adcq 880(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq 888(%rsp), %rbp + adcq 896(%rsp), %r12 + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 904(%rsp), %r13 + movq (%rsp), %rcx ## 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r14 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 760(%rsp), %r14 + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 768(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 776(%rsp), %r15 + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 784(%rsp), %r14 + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 792(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 800(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 808(%rsp), %rbp + movq %r12, %rbx + adcq 816(%rsp), %rbx + movq %r13, %r12 + adcq 824(%rsp), %r12 + movq (%rsp), %r13 ## 8-byte Reload + adcq 832(%rsp), %r13 + adcq $0, %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 16(%rsp), %rax ## 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + adcq 696(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 704(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r15 ## 8-byte Reload + adcq 712(%rsp), %r15 + adcq 720(%rsp), %rbp + adcq 728(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + adcq 744(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 752(%rsp), %r13 + sbbq %r14, %r14 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r14d + addq 600(%rsp), %rbx + movq 48(%rsp), %rax ## 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %rax ## 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %rbx ## 8-byte Reload + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + movq %r15, 24(%rsp) ## 8-byte Spill + adcq 640(%rsp), %rbp + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 648(%rsp), %r12 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 656(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r15 ## 8-byte Reload + adcq 664(%rsp), %r15 + adcq 672(%rsp), %r13 + adcq $0, %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 48(%rsp), %rax ## 8-byte Reload + addq 520(%rsp), %rax + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 528(%rsp), %r14 + adcq 536(%rsp), %rbx + movq %rbx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 544(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 552(%rsp), %rbp + adcq 560(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 568(%rsp), %r12 + adcq 576(%rsp), %r15 + movq %r15, (%rsp) ## 8-byte Spill + adcq 584(%rsp), %r13 + movq %r13, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 592(%rsp), %r15 + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 440(%rsp), %r13 + adcq 448(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 456(%rsp), %r14 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 464(%rsp), %rbx + adcq 472(%rsp), %rbp + movq %rbp, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + adcq 488(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rbp ## 8-byte Reload + adcq 496(%rsp), %rbp + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 504(%rsp), %r12 + adcq 512(%rsp), %r15 + movq %r15, %r13 + adcq $0, %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 56(%rsp), %rax ## 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r14 + adcq 376(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq 104(%rsp), %rcx ## 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbx ## 8-byte Reload + adcq 392(%rsp), %rbx + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 400(%rsp), %r15 + adcq 408(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 416(%rsp), %r12 + movq %r12, %rbp + adcq 424(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %r12 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r13d + addq 280(%rsp), %r12 + adcq 288(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rax ## 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 104(%rsp), %r14 ## 8-byte Reload + adcq 304(%rsp), %r14 + adcq 312(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 320(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rbx ## 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 344(%rsp), %r12 + movq 48(%rsp), %rbp ## 8-byte Reload + adcq 352(%rsp), %rbp + adcq $0, %r13 + movq 96(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 40(%rsp), %rax ## 8-byte Reload + addq 200(%rsp), %rax + movq 24(%rsp), %r15 ## 8-byte Reload + adcq 208(%rsp), %r15 + adcq 216(%rsp), %r14 + movq %r14, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 224(%rsp), %r14 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 232(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 248(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 256(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 264(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 272(%rsp), %r13 + sbbq %rbx, %rbx + movq 80(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %r12 + leaq 120(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + addq 120(%rsp), %r12 + adcq 128(%rsp), %r15 + movq 104(%rsp), %rbp ## 8-byte Reload + adcq 136(%rsp), %rbp + movq %r14, %rcx + adcq 144(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %r8 ## 8-byte Reload + adcq 152(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r9 ## 8-byte Reload + adcq 160(%rsp), %r9 + movq %r9, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r10 ## 8-byte Reload + adcq 168(%rsp), %r10 + movq %r10, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %rdi ## 8-byte Reload + adcq 176(%rsp), %rdi + movq %rdi, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %r14 ## 8-byte Reload + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r13 + adcq $0, %rbx + movq %r15, %rsi + movq %r15, %r12 + movq 72(%rsp), %rdx ## 8-byte Reload + subq (%rdx), %rsi + movq %rbp, %rax + movq %rbp, %r15 + sbbq 8(%rdx), %rax + movq %rcx, %rbp + sbbq 16(%rdx), %rbp + movq %r8, %rcx + sbbq 24(%rdx), %rcx + movq %r9, %r8 + sbbq 32(%rdx), %r8 + movq %r10, %r11 + sbbq 40(%rdx), %r11 + movq %rdi, %r10 + sbbq 48(%rdx), %r10 + movq %r14, %rdi + sbbq 56(%rdx), %rdi + movq %r13, %r9 + sbbq 64(%rdx), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r13, %r9 + testb %bl, %bl + cmovneq %r12, %rsi + movq 112(%rsp), %rbx ## 8-byte Reload + movq %rsi, (%rbx) + cmovneq %r15, %rax + movq %rax, 8(%rbx) + cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 16(%rbx) + cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rbx) + cmovneq (%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 32(%rbx) + cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 40(%rbx) + cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 48(%rbx) + cmovneq %r14, %rdi + movq %rdi, 56(%rbx) + movq %r9, 64(%rbx) + addq $1560, %rsp ## imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montNF9Lbmi2: ## @mcl_fp_montNF9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp ## imm = 0x618 + movq %rcx, 72(%rsp) ## 8-byte Spill + movq %rdx, 80(%rsp) ## 8-byte Spill + movq %rsi, 88(%rsp) ## 8-byte Spill + movq %rdi, 112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 96(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq l_mulPv576x64 + movq 1480(%rsp), %r12 + movq 1488(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq %r12, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1544(%rsp), %r13 + movq 1536(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 1528(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 1520(%rsp), %r14 + movq 1512(%rsp), %r15 + movq 1504(%rsp), %rbx + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1400(%rsp), %r12 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 1408(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + adcq 1416(%rsp), %rbp + movq %rbp, 104(%rsp) ## 8-byte Spill + adcq 1424(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + adcq 1432(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq 1440(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbx ## 8-byte Reload + adcq 1448(%rsp), %rbx + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 1456(%rsp), %r12 + adcq 1464(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 1472(%rsp), %rbp + movq 80(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1392(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + addq 1320(%rsp), %rcx + movq 104(%rsp), %r15 ## 8-byte Reload + adcq 1328(%rsp), %r15 + movq (%rsp), %r14 ## 8-byte Reload + adcq 1336(%rsp), %r14 + movq 8(%rsp), %rdx ## 8-byte Reload + adcq 1344(%rsp), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 1352(%rsp), %r13 + adcq 1360(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 1368(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq 40(%rsp), %rdx ## 8-byte Reload + adcq 1376(%rsp), %rdx + movq %rdx, 40(%rsp) ## 8-byte Spill + adcq 1384(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, %rbp + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1240(%rsp), %rbx + adcq 1248(%rsp), %r15 + movq %r15, 104(%rsp) ## 8-byte Spill + adcq 1256(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 1264(%rsp), %r12 + adcq 1272(%rsp), %r13 + movq %r13, %r14 + movq 64(%rsp), %r13 ## 8-byte Reload + adcq 1280(%rsp), %r13 + movq 48(%rsp), %rbx ## 8-byte Reload + adcq 1288(%rsp), %rbx + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 1296(%rsp), %r15 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1304(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 1312(%rsp), %rbp + movq %rbp, 56(%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1232(%rsp), %rax + movq 104(%rsp), %rcx ## 8-byte Reload + addq 1160(%rsp), %rcx + movq (%rsp), %rbp ## 8-byte Reload + adcq 1168(%rsp), %rbp + adcq 1176(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + adcq 1184(%rsp), %r14 + adcq 1192(%rsp), %r13 + movq %r13, %r12 + adcq 1200(%rsp), %rbx + movq %rbx, 48(%rsp) ## 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 1216(%rsp), %rbx + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 1224(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq %rax, %r15 + adcq $0, %r15 + movq %rcx, %rdx + movq %rcx, %r13 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1080(%rsp), %r13 + adcq 1088(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 1096(%rsp), %r13 + adcq 1104(%rsp), %r14 + adcq 1112(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 1120(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 1128(%rsp), %rbp + adcq 1136(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq 1144(%rsp), %rbx + adcq 1152(%rsp), %r15 + movq 80(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1072(%rsp), %rax + movq (%rsp), %rcx ## 8-byte Reload + addq 1000(%rsp), %rcx + adcq 1008(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 1024(%rsp), %r14 + adcq 1032(%rsp), %r12 + adcq 1040(%rsp), %rbp + movq %rbp, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1048(%rsp), %r13 + adcq 1056(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + adcq 1064(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 920(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %rbp ## 8-byte Reload + adcq 936(%rsp), %rbp + movq %r14, %rbx + adcq 944(%rsp), %rbx + adcq 952(%rsp), %r12 + movq 40(%rsp), %rax ## 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 968(%rsp), %r13 + movq %r13, %r15 + movq 56(%rsp), %r13 ## 8-byte Reload + adcq 976(%rsp), %r13 + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 984(%rsp), %r14 + movq (%rsp), %rax ## 8-byte Reload + adcq 992(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 912(%rsp), %rax + movq 8(%rsp), %rcx ## 8-byte Reload + addq 840(%rsp), %rcx + adcq 848(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 856(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 864(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 872(%rsp), %rbp + adcq 880(%rsp), %r15 + movq %r15, 24(%rsp) ## 8-byte Spill + adcq 888(%rsp), %r13 + adcq 896(%rsp), %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rdx ## 8-byte Reload + adcq 904(%rsp), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, %r14 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 760(%rsp), %rbx + movq 32(%rsp), %rax ## 8-byte Reload + adcq 768(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r15 ## 8-byte Reload + adcq 776(%rsp), %r15 + adcq 784(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq %rbp, %rbx + adcq 792(%rsp), %rbx + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 800(%rsp), %rbp + adcq 808(%rsp), %r13 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r12 ## 8-byte Reload + adcq 824(%rsp), %r12 + adcq 832(%rsp), %r14 + movq 80(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 752(%rsp), %rcx + movq 32(%rsp), %rax ## 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %rdx ## 8-byte Reload + adcq 696(%rsp), %rdx + movq %rdx, 48(%rsp) ## 8-byte Spill + adcq 704(%rsp), %rbx + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 712(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 720(%rsp), %r13 + movq %r13, %r15 + movq 16(%rsp), %rbx ## 8-byte Reload + adcq 728(%rsp), %rbx + adcq 736(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + adcq 744(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r13 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 600(%rsp), %r13 + movq 64(%rsp), %r13 ## 8-byte Reload + adcq 608(%rsp), %r13 + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 616(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 624(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 632(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 640(%rsp), %r15 + movq %r15, 56(%rsp) ## 8-byte Spill + adcq 648(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 656(%rsp), %r14 + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 664(%rsp), %rbx + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 672(%rsp), %r15 + movq 80(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 592(%rsp), %rcx + movq %r13, %rax + addq 520(%rsp), %rax + adcq 528(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq %rbp, %r12 + adcq 536(%rsp), %r12 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 544(%rsp), %rbp + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 552(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq 16(%rsp), %rdx ## 8-byte Reload + adcq 560(%rsp), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 568(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + adcq 576(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 584(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, %r13 + movq %rax, %rdx + movq %rax, %r14 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 440(%rsp), %r14 + movq 48(%rsp), %rax ## 8-byte Reload + adcq 448(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq 456(%rsp), %r12 + adcq 464(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 472(%rsp), %r14 + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 480(%rsp), %r15 + movq (%rsp), %rbp ## 8-byte Reload + adcq 488(%rsp), %rbp + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 496(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 504(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 512(%rsp), %r13 + movq 80(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 432(%rsp), %rcx + movq 48(%rsp), %rax ## 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r12 + movq %r12, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rdx ## 8-byte Reload + adcq 376(%rsp), %rdx + movq %rdx, 24(%rsp) ## 8-byte Spill + adcq 384(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + adcq 392(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq 400(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 408(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 416(%rsp), %r14 + adcq 424(%rsp), %r13 + movq %r13, %r15 + adcq $0, %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 280(%rsp), %r12 + movq 40(%rsp), %rax ## 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 296(%rsp), %rbp + movq 56(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 312(%rsp), %r13 + movq (%rsp), %r12 ## 8-byte Reload + adcq 320(%rsp), %r12 + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + adcq 344(%rsp), %r15 + movq %r15, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %r14 ## 8-byte Reload + adcq 352(%rsp), %r14 + movq 80(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 272(%rsp), %rcx + movq 40(%rsp), %rax ## 8-byte Reload + addq 200(%rsp), %rax + adcq 208(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbp ## 8-byte Reload + adcq 216(%rsp), %rbp + adcq 224(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 232(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 248(%rsp), %r15 + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 256(%rsp), %r12 + adcq 264(%rsp), %r14 + adcq $0, %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 96(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 120(%rsp), %rdi + movq 72(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv576x64 + addq 120(%rsp), %rbx + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 128(%rsp), %rcx + movq %rbp, %rdx + adcq 136(%rsp), %rdx + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 144(%rsp), %rsi + movq %rsi, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rdi ## 8-byte Reload + adcq 152(%rsp), %rdi + movq %rdi, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 160(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq %r15, %r8 + adcq 168(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq %r12, %r15 + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + movq 40(%rsp), %r9 ## 8-byte Reload + adcq 192(%rsp), %r9 + movq %rcx, %rax + movq %rcx, %r11 + movq %r13, %rbp + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r12 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %rbx, %rdi + sbbq 32(%rbp), %rdi + movq %r8, %r10 + sbbq 40(%rbp), %r10 + movq %r15, %r13 + sbbq 48(%rbp), %r13 + movq %r14, %r8 + sbbq 56(%rbp), %r8 + movq %rbp, %rbx + movq %r9, %rbp + sbbq 64(%rbx), %rbp + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r11, %rax + movq 112(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovsq %r12, %rcx + movq %rcx, 8(%rbx) + cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rbx) + cmovsq (%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovsq %r15, %r13 + movq %r13, 48(%rbx) + cmovsq %r14, %r8 + movq %r8, 56(%rbx) + cmovsq %r9, %rbp + movq %rbp, 64(%rbx) + addq $1560, %rsp ## imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_montRed9Lbmi2: ## @mcl_fp_montRed9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $936, %rsp ## imm = 0x3A8 + movq %rdx, %rax + movq %rdi, 208(%rsp) ## 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 96(%rsp) ## 8-byte Spill + movq (%rsi), %r14 + movq 8(%rsi), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + movq %r14, %rdx + imulq %rcx, %rdx + movq 136(%rsi), %rcx + movq %rcx, 88(%rsp) ## 8-byte Spill + movq 128(%rsi), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + movq 120(%rsi), %rcx + movq %rcx, 80(%rsp) ## 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 72(%rsi), %r12 + movq 64(%rsi), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 56(%rsi), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 48(%rsi), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 40(%rsi), %rbp + movq 32(%rsi), %rbx + movq 24(%rsi), %r13 + movq 16(%rsi), %r15 + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 144(%rsp) ## 8-byte Spill + movq 64(%rcx), %rax + movq %rax, 200(%rsp) ## 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 192(%rsp) ## 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 184(%rsp) ## 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 176(%rsp) ## 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 168(%rsp) ## 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 160(%rsp) ## 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 152(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 136(%rsp) ## 8-byte Spill + movq %rcx, %rsi + movq %rsi, 104(%rsp) ## 8-byte Spill + leaq 856(%rsp), %rdi + callq l_mulPv576x64 + addq 856(%rsp), %r14 + movq (%rsp), %rcx ## 8-byte Reload + adcq 864(%rsp), %rcx + adcq 872(%rsp), %r15 + adcq 880(%rsp), %r13 + adcq 888(%rsp), %rbx + movq %rbx, 120(%rsp) ## 8-byte Spill + adcq 896(%rsp), %rbp + movq %rbp, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + adcq 904(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 912(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 920(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 928(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq $0, %rbp + adcq $0, 8(%rsp) ## 8-byte Folded Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + movq 88(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + sbbq %r12, %r12 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 776(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r12d + addq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r13 + movq %r13, 128(%rsp) ## 8-byte Spill + movq 120(%rsp), %rax ## 8-byte Reload + adcq 800(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 112(%rsp), %rax ## 8-byte Reload + adcq 808(%rsp), %rax + movq %rax, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 824(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 832(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 840(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 848(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + adcq $0, 16(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, %r14 + movq %r14, 88(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %r15, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 696(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 696(%rsp), %r15 + movq 128(%rsp), %rcx ## 8-byte Reload + adcq 704(%rsp), %rcx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 712(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 112(%rsp), %rax ## 8-byte Reload + adcq 720(%rsp), %rax + movq %rax, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbp ## 8-byte Reload + adcq 728(%rsp), %rbp + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 736(%rsp), %r14 + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 744(%rsp), %r15 + movq (%rsp), %rax ## 8-byte Reload + adcq 752(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rax ## 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + movq 48(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rcx, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 616(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 616(%rsp), %rbx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 624(%rsp), %rax + movq 112(%rsp), %rcx ## 8-byte Reload + adcq 632(%rsp), %rcx + movq %rcx, 112(%rsp) ## 8-byte Spill + adcq 640(%rsp), %rbp + movq %rbp, 64(%rsp) ## 8-byte Spill + adcq 648(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + adcq 656(%rsp), %r15 + movq (%rsp), %r14 ## 8-byte Reload + adcq 664(%rsp), %r14 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 672(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, 48(%rsp) ## 8-byte Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 536(%rsp), %rbx + movq 112(%rsp), %rax ## 8-byte Reload + adcq 544(%rsp), %rax + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 560(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 568(%rsp), %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + adcq 576(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + adcq 584(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 592(%rsp), %r13 + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 600(%rsp), %r15 + movq 48(%rsp), %rbp ## 8-byte Reload + adcq 608(%rsp), %rbp + movq 72(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r14 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 456(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 456(%rsp), %r14 + movq 64(%rsp), %rax ## 8-byte Reload + adcq 464(%rsp), %rax + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rcx ## 8-byte Reload + adcq 488(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 496(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 504(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq 512(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq 520(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 528(%rsp), %rbx + movq %rbx, 72(%rsp) ## 8-byte Spill + movq 80(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + movq 56(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + movq 88(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r15 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 376(%rsp), %r15 + movq 32(%rsp), %rax ## 8-byte Reload + adcq 384(%rsp), %rax + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 392(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rcx ## 8-byte Reload + adcq 400(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 408(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 416(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 424(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 72(%rsp), %r15 ## 8-byte Reload + adcq 440(%rsp), %r15 + adcq 448(%rsp), %r14 + movq %r14, 80(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, %r14 + adcq $0, %rbx + movq %rbx, 88(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 296(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 296(%rsp), %rbx + movq 40(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq (%rsp), %r13 ## 8-byte Reload + adcq 312(%rsp), %r13 + adcq 320(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 328(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 336(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 344(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq 352(%rsp), %r15 + movq %r15, 72(%rsp) ## 8-byte Spill + movq 80(%rsp), %r15 ## 8-byte Reload + adcq 360(%rsp), %r15 + adcq 368(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 88(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + adcq $0, %r12 + movq 96(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 216(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 216(%rsp), %rbx + movq %r13, %rsi + adcq 224(%rsp), %rsi + movq %rsi, (%rsp) ## 8-byte Spill + adcq 232(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r9 ## 8-byte Reload + adcq 240(%rsp), %r9 + movq %r9, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r8 ## 8-byte Reload + adcq 248(%rsp), %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rbx ## 8-byte Reload + adcq 256(%rsp), %rbx + movq 72(%rsp), %rax ## 8-byte Reload + adcq 264(%rsp), %rax + movq %r15, %rcx + adcq 272(%rsp), %rcx + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 280(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 288(%rsp), %r14 + movq %r14, %r11 + adcq $0, %r12 + subq 144(%rsp), %rsi ## 8-byte Folded Reload + movq %rbp, %rdi + sbbq 136(%rsp), %rdi ## 8-byte Folded Reload + movq %r9, %rbp + sbbq 152(%rsp), %rbp ## 8-byte Folded Reload + movq %r8, %r13 + sbbq 160(%rsp), %r13 ## 8-byte Folded Reload + movq %rbx, %r15 + sbbq 168(%rsp), %r15 ## 8-byte Folded Reload + movq %rax, %r14 + sbbq 176(%rsp), %r14 ## 8-byte Folded Reload + movq %rcx, %r10 + sbbq 184(%rsp), %r10 ## 8-byte Folded Reload + movq %rdx, %r8 + sbbq 192(%rsp), %r8 ## 8-byte Folded Reload + movq %r11, %r9 + sbbq 200(%rsp), %r9 ## 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %r11, %r9 + testb %r12b, %r12b + cmovneq (%rsp), %rsi ## 8-byte Folded Reload + movq 208(%rsp), %rdx ## 8-byte Reload + movq %rsi, (%rdx) + cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 8(%rdx) + cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 16(%rdx) + cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 24(%rdx) + cmovneq %rbx, %r15 + movq %r15, 32(%rdx) + cmovneq %rax, %r14 + movq %r14, 40(%rdx) + cmovneq %rcx, %r10 + movq %r10, 48(%rdx) + cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 56(%rdx) + movq %r9, 64(%rdx) + addq $936, %rsp ## imm = 0x3A8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addPre9Lbmi2: ## @mcl_fp_addPre9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r8 + movq 64(%rsi), %r15 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 24(%rsi), %r12 + movq 32(%rsi), %r14 + movq (%rdx), %rbx + movq 8(%rdx), %rcx + addq (%rsi), %rbx + adcq 8(%rsi), %rcx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r12 + movq 56(%rdx), %r13 + movq 48(%rdx), %rsi + movq 40(%rdx), %rbp + movq 32(%rdx), %rdx + movq %rbx, (%rdi) + movq %rcx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r12, 24(%rdi) + adcq %r14, %rdx + movq %rdx, 32(%rdi) + adcq %r11, %rbp + movq %rbp, 40(%rdi) + adcq %r10, %rsi + movq %rsi, 48(%rdi) + adcq %r9, %r13 + movq %r13, 56(%rdi) + adcq %r8, %r15 + movq %r15, 64(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_subPre9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subPre9Lbmi2: ## @mcl_fp_subPre9Lbmi2 +## BB#0: + movq 32(%rdx), %r8 + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + movq 8(%rsi), %rcx + sbbq 8(%rdx), %rcx + movq %rcx, 8(%rdi) + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq %rcx, 16(%rdi) + movq 24(%rsi), %rcx + sbbq 24(%rdx), %rcx + movq %rcx, 24(%rdi) + movq 32(%rsi), %rcx + sbbq %r8, %rcx + movq 40(%rdx), %r8 + movq %rcx, 32(%rdi) + movq 40(%rsi), %rcx + sbbq %r8, %rcx + movq 48(%rdx), %r8 + movq %rcx, 40(%rdi) + movq 48(%rsi), %rcx + sbbq %r8, %rcx + movq 56(%rdx), %r8 + movq %rcx, 48(%rdi) + movq 56(%rsi), %rcx + sbbq %r8, %rcx + movq %rcx, 56(%rdi) + movq 64(%rdx), %rcx + movq 64(%rsi), %rdx + sbbq %rcx, %rdx + movq %rdx, 64(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_shr1_9Lbmi2: ## @mcl_fp_shr1_9Lbmi2 +## BB#0: + pushq %rbx + movq 64(%rsi), %r8 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 32(%rsi), %rcx + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rbx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rbx + movq %rbx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 32(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 40(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 48(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 56(%rdi) + shrq %r8 + movq %r8, 64(%rdi) + popq %rbx + retq + + .globl _mcl_fp_add9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_add9Lbmi2: ## @mcl_fp_add9Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r12 + movq 64(%rsi), %r8 + movq 56(%rsi), %r13 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 24(%rsi), %r14 + movq 32(%rsi), %r11 + movq (%rdx), %rbx + movq 8(%rdx), %r15 + addq (%rsi), %rbx + adcq 8(%rsi), %r15 + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r14 + adcq 32(%rdx), %r11 + adcq 40(%rdx), %r10 + movq 56(%rdx), %rsi + adcq 48(%rdx), %r9 + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + adcq %r13, %rsi + movq %rsi, 56(%rdi) + adcq %r12, %r8 + movq %r8, 64(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rbx + sbbq 8(%rcx), %r15 + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r14 + sbbq 32(%rcx), %r11 + sbbq 40(%rcx), %r10 + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %rsi + sbbq 64(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne LBB136_2 +## BB#1: ## %nocarry + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %rsi, 56(%rdi) + movq %r8, 64(%rdi) +LBB136_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_addNF9Lbmi2: ## @mcl_fp_addNF9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r8 + movq 64(%rdx), %r10 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rax + movq 32(%rdx), %rdi + movq 24(%rdx), %rbp + movq 16(%rdx), %r15 + movq (%rdx), %rbx + movq 8(%rdx), %r13 + addq (%rsi), %rbx + adcq 8(%rsi), %r13 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + adcq 32(%rsi), %rdi + movq %rdi, -40(%rsp) ## 8-byte Spill + adcq 40(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r9 + movq %r9, %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + adcq 56(%rsi), %r11 + movq %r11, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq 64(%rsi), %r10 + movq %r10, %r9 + movq %rbx, %rsi + subq (%rcx), %rsi + movq %r13, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %r12 + sbbq 16(%rcx), %r12 + sbbq 24(%rcx), %rbp + movq -40(%rsp), %r14 ## 8-byte Reload + sbbq 32(%rcx), %r14 + movq -32(%rsp), %r11 ## 8-byte Reload + sbbq 40(%rcx), %r11 + movq %rdi, %r10 + sbbq 48(%rcx), %r10 + movq %rax, %rdi + sbbq 56(%rcx), %rdi + movq %r9, %rax + sbbq 64(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %rbx, %rsi + movq %rsi, (%r8) + cmovsq %r13, %rdx + movq %rdx, 8(%r8) + cmovsq %r15, %r12 + movq %r12, 16(%r8) + cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 24(%r8) + cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 32(%r8) + cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 40(%r8) + cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 48(%r8) + cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%r8) + cmovsq %r9, %rax + movq %rax, 64(%r8) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_sub9Lbmi2: ## @mcl_fp_sub9Lbmi2 +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + movq 16(%rsi), %r10 + sbbq 16(%rdx), %r10 + movq 24(%rsi), %r11 + sbbq 24(%rdx), %r11 + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 40(%rsi), %r14 + sbbq 40(%rdx), %r14 + movq 48(%rsi), %r15 + sbbq 48(%rdx), %r15 + movq 64(%rsi), %r8 + movq 56(%rsi), %rsi + sbbq 56(%rdx), %rsi + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r10, 16(%rdi) + movq %r11, 24(%rdi) + movq %r12, 32(%rdi) + movq %r14, 40(%rdi) + movq %r15, 48(%rdi) + movq %rsi, 56(%rdi) + sbbq %r13, %r8 + movq %r8, 64(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB138_2 +## BB#1: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r9, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r10, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r11, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r12, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %r14, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r15, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %rsi, %rax + movq %rax, 56(%rdi) + movq 64(%rcx), %rax + adcq %r8, %rax + movq %rax, 64(%rdi) +LBB138_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF9Lbmi2 + .p2align 4, 0x90 +_mcl_fp_subNF9Lbmi2: ## @mcl_fp_subNF9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r10 + movq %rdi, %rbx + movq 64(%rsi), %r11 + movdqu (%rdx), %xmm1 + movdqu 16(%rdx), %xmm2 + movdqu 32(%rdx), %xmm3 + movdqu 48(%rdx), %xmm4 + pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1] + movd %xmm0, %r8 + movdqu (%rsi), %xmm5 + movdqu 16(%rsi), %xmm6 + movdqu 32(%rsi), %xmm7 + movdqu 48(%rsi), %xmm8 + pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1] + movd %xmm0, %rax + movd %xmm4, %r9 + pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1] + movd %xmm0, %rdi + pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1] + movd %xmm3, %rcx + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %rbp + pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1] + movd %xmm2, %r13 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r12 + pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] + movd %xmm1, %rsi + movd %xmm5, %r15 + subq %rsi, %r15 + movd %xmm2, %r14 + sbbq %r12, %r14 + movd %xmm6, %r12 + sbbq %r13, %r12 + movd %xmm3, %r13 + sbbq %rbp, %r13 + movd %xmm7, %rsi + sbbq %rcx, %rsi + movq %rsi, -16(%rsp) ## 8-byte Spill + movd %xmm0, %rcx + sbbq %rdi, %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + movd %xmm8, %rcx + sbbq %r9, %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + sbbq %r8, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + sbbq 64(%rdx), %r11 + movq %r11, -40(%rsp) ## 8-byte Spill + movq %r11, %rdx + sarq $63, %rdx + movq %rdx, %rbp + shldq $1, %r11, %rbp + movq 24(%r10), %r9 + andq %rbp, %r9 + movq 8(%r10), %rdi + andq %rbp, %rdi + andq (%r10), %rbp + movq 64(%r10), %r11 + andq %rdx, %r11 + rorxq $63, %rdx, %rax + andq 56(%r10), %rdx + movq 48(%r10), %r8 + andq %rax, %r8 + movq 40(%r10), %rsi + andq %rax, %rsi + movq 32(%r10), %rcx + andq %rax, %rcx + andq 16(%r10), %rax + addq %r15, %rbp + adcq %r14, %rdi + movq %rbp, (%rbx) + adcq %r12, %rax + movq %rdi, 8(%rbx) + adcq %r13, %r9 + movq %rax, 16(%rbx) + movq %r9, 24(%rbx) + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 32(%rbx) + adcq -24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 40(%rbx) + adcq -32(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 48(%rbx) + adcq -8(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rbx) + adcq -40(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 64(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add9Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_add9Lbmi2: ## @mcl_fpDbl_add9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 136(%rdx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq 120(%rdx), %r10 + movq 112(%rdx), %r11 + movq 24(%rsi), %rcx + movq 32(%rsi), %r14 + movq 16(%rdx), %rbp + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %rbp + adcq 24(%rdx), %rcx + adcq 32(%rdx), %r14 + movq 104(%rdx), %r9 + movq 96(%rdx), %r13 + movq %rax, (%rdi) + movq 88(%rdx), %r8 + movq %rbx, 8(%rdi) + movq 80(%rdx), %r12 + movq %rbp, 16(%rdi) + movq 40(%rdx), %rax + movq %rcx, 24(%rdi) + movq 40(%rsi), %rbp + adcq %rax, %rbp + movq 48(%rdx), %rcx + movq %r14, 32(%rdi) + movq 48(%rsi), %rax + adcq %rcx, %rax + movq 56(%rdx), %r14 + movq %rbp, 40(%rdi) + movq 56(%rsi), %rbp + adcq %r14, %rbp + movq 72(%rdx), %rcx + movq 64(%rdx), %rdx + movq %rax, 48(%rdi) + movq 64(%rsi), %rax + adcq %rdx, %rax + movq 136(%rsi), %rbx + movq %rbp, 56(%rdi) + movq 72(%rsi), %rbp + adcq %rcx, %rbp + movq 128(%rsi), %rcx + movq %rax, 64(%rdi) + movq 80(%rsi), %rdx + adcq %r12, %rdx + movq 88(%rsi), %r12 + adcq %r8, %r12 + movq 96(%rsi), %r14 + adcq %r13, %r14 + movq %r14, -8(%rsp) ## 8-byte Spill + movq 104(%rsi), %rax + adcq %r9, %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 120(%rsi), %rax + movq 112(%rsi), %rsi + adcq %r11, %rsi + movq %rsi, -24(%rsp) ## 8-byte Spill + adcq %r10, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -40(%rsp) ## 8-byte Spill + adcq -48(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -48(%rsp) ## 8-byte Spill + sbbq %r9, %r9 + andl $1, %r9d + movq %rbp, %r10 + subq (%r15), %r10 + movq %rdx, %r11 + sbbq 8(%r15), %r11 + movq %r12, %rbx + sbbq 16(%r15), %rbx + sbbq 24(%r15), %r14 + movq -32(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r15), %r13 + movq -24(%rsp), %rsi ## 8-byte Reload + sbbq 40(%r15), %rsi + movq -16(%rsp), %rax ## 8-byte Reload + sbbq 48(%r15), %rax + sbbq 56(%r15), %rcx + movq -48(%rsp), %r8 ## 8-byte Reload + sbbq 64(%r15), %r8 + sbbq $0, %r9 + andl $1, %r9d + cmovneq %rbp, %r10 + movq %r10, 72(%rdi) + testb %r9b, %r9b + cmovneq %rdx, %r11 + movq %r11, 80(%rdi) + cmovneq %r12, %rbx + movq %rbx, 88(%rdi) + cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 96(%rdi) + cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 104(%rdi) + cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rdi) + cmovneq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 120(%rdi) + cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 128(%rdi) + cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub9Lbmi2 + .p2align 4, 0x90 +_mcl_fpDbl_sub9Lbmi2: ## @mcl_fpDbl_sub9Lbmi2 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r14 + movq 136(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 120(%rdx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %r12 + movq 8(%rsi), %r13 + xorl %r9d, %r9d + subq (%rdx), %r12 + sbbq 8(%rdx), %r13 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %rbp + sbbq 32(%rdx), %rbp + movq 112(%rdx), %r10 + movq 104(%rdx), %rcx + movq %r12, (%rdi) + movq 96(%rdx), %rax + movq %r13, 8(%rdi) + movq 88(%rdx), %r13 + movq %r11, 16(%rdi) + movq 40(%rdx), %r11 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r11, %rbx + movq 48(%rdx), %r11 + movq %rbp, 32(%rdi) + movq 48(%rsi), %rbp + sbbq %r11, %rbp + movq 56(%rdx), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rbx + sbbq %r11, %rbx + movq 64(%rdx), %r11 + movq %rbp, 48(%rdi) + movq 64(%rsi), %rbp + sbbq %r11, %rbp + movq 80(%rdx), %r8 + movq 72(%rdx), %r11 + movq %rbx, 56(%rdi) + movq 72(%rsi), %r15 + sbbq %r11, %r15 + movq 136(%rsi), %rdx + movq %rbp, 64(%rdi) + movq 80(%rsi), %rbp + sbbq %r8, %rbp + movq 88(%rsi), %r12 + sbbq %r13, %r12 + movq 96(%rsi), %r13 + sbbq %rax, %r13 + movq 104(%rsi), %rax + sbbq %rcx, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 112(%rsi), %rax + sbbq %r10, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 128(%rsi), %rax + movq 120(%rsi), %rcx + sbbq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -40(%rsp) ## 8-byte Spill + sbbq -32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -32(%rsp) ## 8-byte Spill + sbbq -24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -24(%rsp) ## 8-byte Spill + movl $0, %r8d + sbbq $0, %r8 + andl $1, %r8d + movq (%r14), %r10 + cmoveq %r9, %r10 + testb %r8b, %r8b + movq 16(%r14), %r8 + cmoveq %r9, %r8 + movq 8(%r14), %rdx + cmoveq %r9, %rdx + movq 64(%r14), %rbx + cmoveq %r9, %rbx + movq 56(%r14), %r11 + cmoveq %r9, %r11 + movq 48(%r14), %rsi + cmoveq %r9, %rsi + movq 40(%r14), %rcx + cmoveq %r9, %rcx + movq 32(%r14), %rax + cmoveq %r9, %rax + cmovneq 24(%r14), %r9 + addq %r15, %r10 + adcq %rbp, %rdx + movq %r10, 72(%rdi) + adcq %r12, %r8 + movq %rdx, 80(%rdi) + adcq %r13, %r9 + movq %r8, 88(%rdi) + movq %r9, 96(%rdi) + adcq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 104(%rdi) + adcq -8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 112(%rdi) + adcq -40(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 120(%rdi) + adcq -32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 128(%rdi) + adcq -24(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + +.subsections_via_symbols diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.s new file mode 100644 index 000000000..0dc7014a3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86-64mac.s @@ -0,0 +1,16313 @@ + .section __TEXT,__text,regular,pure_instructions + .macosx_version_min 10, 12 + .globl _makeNIST_P192L + .p2align 4, 0x90 +_makeNIST_P192L: ## @makeNIST_P192L +## BB#0: + movq $-1, %rax + movq $-2, %rdx + movq $-1, %rcx + retq + + .globl _mcl_fpDbl_mod_NIST_P192L + .p2align 4, 0x90 +_mcl_fpDbl_mod_NIST_P192L: ## @mcl_fpDbl_mod_NIST_P192L +## BB#0: + pushq %r14 + pushq %rbx + movq 16(%rsi), %r10 + movq 24(%rsi), %r8 + movq 40(%rsi), %r9 + movq 8(%rsi), %rax + addq %r9, %rax + adcq $0, %r10 + sbbq %rcx, %rcx + andl $1, %ecx + movq 32(%rsi), %r11 + movq (%rsi), %r14 + addq %r8, %r14 + adcq %r11, %rax + adcq %r9, %r10 + adcq $0, %rcx + addq %r9, %r14 + adcq %r8, %rax + adcq %r11, %r10 + adcq $0, %rcx + addq %rcx, %r14 + adcq %rax, %rcx + adcq $0, %r10 + sbbq %rax, %rax + andl $1, %eax + movq %r14, %rsi + addq $1, %rsi + movq %rcx, %rdx + adcq $1, %rdx + movq %r10, %rbx + adcq $0, %rbx + adcq $-1, %rax + andl $1, %eax + cmovneq %r14, %rsi + movq %rsi, (%rdi) + testb %al, %al + cmovneq %rcx, %rdx + movq %rdx, 8(%rdi) + cmovneq %r10, %rbx + movq %rbx, 16(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_sqr_NIST_P192L + .p2align 4, 0x90 +_mcl_fp_sqr_NIST_P192L: ## @mcl_fp_sqr_NIST_P192L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %rbx + movq 8(%rsi), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, %rdi + movq %rax, %r14 + movq %rcx, %rax + mulq %rcx + movq %rdx, %r15 + movq %rax, %r12 + movq %rcx, %rax + mulq %rbx + movq %rax, %r13 + movq %rdx, %rcx + addq %rcx, %r12 + adcq %r14, %r15 + movq %rdi, %r10 + adcq $0, %r10 + movq %r11, %rax + mulq %rbx + movq %rdx, %r9 + movq %rax, %rbp + movq %rbx, %rax + mulq %rbx + movq %rax, %r8 + movq %rdx, %rsi + addq %r13, %rsi + adcq %rbp, %rcx + movq %r9, %rbx + adcq $0, %rbx + addq %r13, %rsi + adcq %r12, %rcx + adcq %r15, %rbx + adcq $0, %r10 + movq %r11, %rax + mulq %r11 + addq %r14, %r9 + adcq %rdi, %rax + adcq $0, %rdx + addq %rbp, %rcx + adcq %rbx, %r9 + adcq %r10, %rax + adcq $0, %rdx + addq %rdx, %rsi + adcq $0, %rcx + sbbq %rbp, %rbp + andl $1, %ebp + addq %r9, %r8 + adcq %rax, %rsi + adcq %rdx, %rcx + adcq $0, %rbp + addq %rdx, %r8 + adcq %r9, %rsi + adcq %rax, %rcx + adcq $0, %rbp + addq %rbp, %r8 + adcq %rsi, %rbp + adcq $0, %rcx + sbbq %rax, %rax + andl $1, %eax + movq %r8, %rdx + addq $1, %rdx + movq %rbp, %rsi + adcq $1, %rsi + movq %rcx, %rdi + adcq $0, %rdi + adcq $-1, %rax + andl $1, %eax + cmovneq %r8, %rdx + movq -8(%rsp), %rbx ## 8-byte Reload + movq %rdx, (%rbx) + testb %al, %al + cmovneq %rbp, %rsi + movq %rsi, 8(%rbx) + cmovneq %rcx, %rdi + movq %rdi, 16(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulNIST_P192L + .p2align 4, 0x90 +_mcl_fp_mulNIST_P192L: ## @mcl_fp_mulNIST_P192L +## BB#0: + pushq %r14 + pushq %rbx + subq $56, %rsp + movq %rdi, %r14 + leaq 8(%rsp), %rdi + callq _mcl_fpDbl_mulPre3L + movq 24(%rsp), %r9 + movq 32(%rsp), %r8 + movq 48(%rsp), %rdi + movq 16(%rsp), %rbx + addq %rdi, %rbx + adcq $0, %r9 + sbbq %rcx, %rcx + andl $1, %ecx + movq 40(%rsp), %rsi + movq 8(%rsp), %rdx + addq %r8, %rdx + adcq %rsi, %rbx + adcq %rdi, %r9 + adcq $0, %rcx + addq %rdi, %rdx + adcq %r8, %rbx + adcq %rsi, %r9 + adcq $0, %rcx + addq %rcx, %rdx + adcq %rbx, %rcx + adcq $0, %r9 + sbbq %rsi, %rsi + andl $1, %esi + movq %rdx, %rdi + addq $1, %rdi + movq %rcx, %rbx + adcq $1, %rbx + movq %r9, %rax + adcq $0, %rax + adcq $-1, %rsi + andl $1, %esi + cmovneq %rdx, %rdi + movq %rdi, (%r14) + testb %sil, %sil + cmovneq %rcx, %rbx + movq %rbx, 8(%r14) + cmovneq %r9, %rax + movq %rax, 16(%r14) + addq $56, %rsp + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mod_NIST_P521L + .p2align 4, 0x90 +_mcl_fpDbl_mod_NIST_P521L: ## @mcl_fpDbl_mod_NIST_P521L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 120(%rsi), %r9 + movq 128(%rsi), %r14 + movq %r14, %r8 + shldq $55, %r9, %r8 + movq 112(%rsi), %r10 + shldq $55, %r10, %r9 + movq 104(%rsi), %r11 + shldq $55, %r11, %r10 + movq 96(%rsi), %r15 + shldq $55, %r15, %r11 + movq 88(%rsi), %r12 + shldq $55, %r12, %r15 + movq 80(%rsi), %rcx + shldq $55, %rcx, %r12 + movq 64(%rsi), %rbx + movq 72(%rsi), %rax + shldq $55, %rax, %rcx + shrq $9, %r14 + shldq $55, %rbx, %rax + ## kill: %EBX %EBX %RBX %RBX + andl $511, %ebx ## imm = 0x1FF + addq (%rsi), %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r10 + adcq 48(%rsi), %r9 + adcq 56(%rsi), %r8 + adcq %r14, %rbx + movl %ebx, %esi + shrl $9, %esi + andl $1, %esi + addq %rax, %rsi + adcq $0, %rcx + adcq $0, %r12 + adcq $0, %r15 + adcq $0, %r11 + adcq $0, %r10 + adcq $0, %r9 + adcq $0, %r8 + adcq $0, %rbx + movq %rsi, %rax + andq %r12, %rax + andq %r15, %rax + andq %r11, %rax + andq %r10, %rax + andq %r9, %rax + andq %r8, %rax + movq %rbx, %rdx + orq $-512, %rdx ## imm = 0xFE00 + andq %rax, %rdx + andq %rcx, %rdx + cmpq $-1, %rdx + je LBB4_1 +## BB#3: ## %nonzero + movq %rsi, (%rdi) + movq %rcx, 8(%rdi) + movq %r12, 16(%rdi) + movq %r15, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) + andl $511, %ebx ## imm = 0x1FF + movq %rbx, 64(%rdi) + jmp LBB4_2 +LBB4_1: ## %zero + movq $0, 64(%rdi) + movq $0, 56(%rdi) + movq $0, 48(%rdi) + movq $0, 40(%rdi) + movq $0, 32(%rdi) + movq $0, 24(%rdi) + movq $0, 16(%rdi) + movq $0, 8(%rdi) + movq $0, (%rdi) +LBB4_2: ## %zero + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre1L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre1L: ## @mcl_fp_mulUnitPre1L +## BB#0: + movq %rdx, %rax + mulq (%rsi) + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fpDbl_mulPre1L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre1L: ## @mcl_fpDbl_mulPre1L +## BB#0: + movq (%rdx), %rax + mulq (%rsi) + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fpDbl_sqrPre1L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre1L: ## @mcl_fpDbl_sqrPre1L +## BB#0: + movq (%rsi), %rax + mulq %rax + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fp_mont1L + .p2align 4, 0x90 +_mcl_fp_mont1L: ## @mcl_fp_mont1L +## BB#0: + movq (%rsi), %rax + mulq (%rdx) + movq %rax, %rsi + movq %rdx, %r8 + movq -8(%rcx), %rax + imulq %rsi, %rax + movq (%rcx), %rcx + mulq %rcx + addq %rsi, %rax + adcq %r8, %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq %rcx, %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, (%rdi) + retq + + .globl _mcl_fp_montNF1L + .p2align 4, 0x90 +_mcl_fp_montNF1L: ## @mcl_fp_montNF1L +## BB#0: + movq (%rsi), %rax + mulq (%rdx) + movq %rax, %rsi + movq %rdx, %r8 + movq -8(%rcx), %rax + imulq %rsi, %rax + movq (%rcx), %rcx + mulq %rcx + addq %rsi, %rax + adcq %r8, %rdx + movq %rdx, %rax + subq %rcx, %rax + cmovsq %rdx, %rax + movq %rax, (%rdi) + retq + + .globl _mcl_fp_montRed1L + .p2align 4, 0x90 +_mcl_fp_montRed1L: ## @mcl_fp_montRed1L +## BB#0: + movq (%rsi), %rcx + movq -8(%rdx), %rax + imulq %rcx, %rax + movq (%rdx), %r8 + mulq %r8 + addq %rcx, %rax + adcq 8(%rsi), %rdx + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rcx + subq %r8, %rcx + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rcx + movq %rcx, (%rdi) + retq + + .globl _mcl_fp_addPre1L + .p2align 4, 0x90 +_mcl_fp_addPre1L: ## @mcl_fp_addPre1L +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre1L + .p2align 4, 0x90 +_mcl_fp_subPre1L: ## @mcl_fp_subPre1L +## BB#0: + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_1L + .p2align 4, 0x90 +_mcl_fp_shr1_1L: ## @mcl_fp_shr1_1L +## BB#0: + movq (%rsi), %rax + shrq %rax + movq %rax, (%rdi) + retq + + .globl _mcl_fp_add1L + .p2align 4, 0x90 +_mcl_fp_add1L: ## @mcl_fp_add1L +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, (%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rax + sbbq $0, %rdx + testb $1, %dl + jne LBB14_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) +LBB14_2: ## %carry + retq + + .globl _mcl_fp_addNF1L + .p2align 4, 0x90 +_mcl_fp_addNF1L: ## @mcl_fp_addNF1L +## BB#0: + movq (%rdx), %rax + addq (%rsi), %rax + movq %rax, %rdx + subq (%rcx), %rdx + cmovsq %rax, %rdx + movq %rdx, (%rdi) + retq + + .globl _mcl_fp_sub1L + .p2align 4, 0x90 +_mcl_fp_sub1L: ## @mcl_fp_sub1L +## BB#0: + movq (%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rax + movq %rax, (%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB16_2 +## BB#1: ## %nocarry + retq +LBB16_2: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + retq + + .globl _mcl_fp_subNF1L + .p2align 4, 0x90 +_mcl_fp_subNF1L: ## @mcl_fp_subNF1L +## BB#0: + movq (%rsi), %rax + subq (%rdx), %rax + movq %rax, %rdx + sarq $63, %rdx + andq (%rcx), %rdx + addq %rax, %rdx + movq %rdx, (%rdi) + retq + + .globl _mcl_fpDbl_add1L + .p2align 4, 0x90 +_mcl_fpDbl_add1L: ## @mcl_fpDbl_add1L +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + sbbq %rax, %rax + andl $1, %eax + movq %rdx, %rsi + subq (%rcx), %rsi + sbbq $0, %rax + testb $1, %al + cmovneq %rdx, %rsi + movq %rsi, 8(%rdi) + retq + + .globl _mcl_fpDbl_sub1L + .p2align 4, 0x90 +_mcl_fpDbl_sub1L: ## @mcl_fpDbl_sub1L +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movl $0, %eax + sbbq $0, %rax + testb $1, %al + cmovneq (%rcx), %rsi + addq %r8, %rsi + movq %rsi, 8(%rdi) + retq + + .globl _mcl_fp_mulUnitPre2L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre2L: ## @mcl_fp_mulUnitPre2L +## BB#0: + movq %rdx, %r8 + movq %r8, %rax + mulq 8(%rsi) + movq %rdx, %rcx + movq %rax, %r9 + movq %r8, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r9, %rdx + movq %rdx, 8(%rdi) + adcq $0, %rcx + movq %rcx, 16(%rdi) + retq + + .globl _mcl_fpDbl_mulPre2L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre2L: ## @mcl_fpDbl_mulPre2L +## BB#0: + pushq %r14 + pushq %rbx + movq %rdx, %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%r10), %rcx + movq %r8, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %rsi + addq %r9, %rsi + adcq $0, %r14 + movq 8(%r10), %rbx + movq %r11, %rax + mulq %rbx + movq %rdx, %r9 + movq %rax, %rcx + movq %r8, %rax + mulq %rbx + addq %rsi, %rax + movq %rax, 8(%rdi) + adcq %r14, %rcx + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rcx + movq %rcx, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_sqrPre2L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre2L: ## @mcl_fpDbl_sqrPre2L +## BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %r8 + movq %rcx, %rax + mulq %rcx + movq %rdx, %rsi + movq %rax, (%rdi) + movq %r8, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, %r10 + addq %r10, %rsi + movq %r9, %rcx + adcq $0, %rcx + movq %r8, %rax + mulq %r8 + addq %r10, %rsi + movq %rsi, 8(%rdi) + adcq %rcx, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r9, %rax + movq %rax, 16(%rdi) + adcq %rdx, %rcx + movq %rcx, 24(%rdi) + retq + + .globl _mcl_fp_mont2L + .p2align 4, 0x90 +_mcl_fp_mont2L: ## @mcl_fp_mont2L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%rdx), %rsi + movq 8(%rdx), %r9 + movq %r11, %rax + mulq %rsi + movq %rdx, %r15 + movq %rax, %r10 + movq %r8, %rax + mulq %rsi + movq %rax, %r14 + movq %rdx, %r13 + addq %r10, %r13 + adcq $0, %r15 + movq -8(%rcx), %r10 + movq (%rcx), %rbp + movq %r14, %rsi + imulq %r10, %rsi + movq 8(%rcx), %rdi + movq %rsi, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq %rbp + movq %rdx, %rbx + addq %r12, %rbx + adcq $0, %rcx + addq %r14, %rax + adcq %r13, %rbx + adcq %r15, %rcx + sbbq %r15, %r15 + andl $1, %r15d + movq %r9, %rax + mulq %r11 + movq %rdx, %r14 + movq %rax, %r11 + movq %r9, %rax + mulq %r8 + movq %rax, %r8 + movq %rdx, %rsi + addq %r11, %rsi + adcq $0, %r14 + addq %rbx, %r8 + adcq %rcx, %rsi + adcq %r15, %r14 + sbbq %rbx, %rbx + andl $1, %ebx + imulq %r8, %r10 + movq %r10, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %r9 + movq %r10, %rax + mulq %rbp + addq %r9, %rdx + adcq $0, %rcx + addq %r8, %rax + adcq %rsi, %rdx + adcq %r14, %rcx + adcq $0, %rbx + movq %rdx, %rax + subq %rbp, %rax + movq %rcx, %rsi + sbbq %rdi, %rsi + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rcx, %rsi + testb %bl, %bl + cmovneq %rdx, %rax + movq -8(%rsp), %rcx ## 8-byte Reload + movq %rax, (%rcx) + movq %rsi, 8(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF2L + .p2align 4, 0x90 +_mcl_fp_montNF2L: ## @mcl_fp_montNF2L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%rsi), %r8 + movq 8(%rsi), %r11 + movq (%rdx), %rbp + movq 8(%rdx), %r9 + movq %r8, %rax + mulq %rbp + movq %rax, %rsi + movq %rdx, %r14 + movq -8(%rcx), %r10 + movq (%rcx), %r15 + movq %rsi, %rbx + imulq %r10, %rbx + movq 8(%rcx), %rdi + movq %rbx, %rax + mulq %rdi + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq %r15 + movq %rdx, %r12 + movq %rax, %rbx + movq %r11, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, %rbp + addq %r14, %rbp + adcq $0, %rcx + addq %rsi, %rbx + adcq %r13, %rbp + adcq $0, %rcx + addq %r12, %rbp + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + movq %r9, %rax + mulq %r11 + movq %rdx, %rsi + movq %rax, %r11 + movq %r9, %rax + mulq %r8 + movq %rax, %r8 + movq %rdx, %rbx + addq %r11, %rbx + adcq $0, %rsi + addq %rbp, %r8 + adcq %rcx, %rbx + adcq $0, %rsi + imulq %r8, %r10 + movq %r10, %rax + mulq %rdi + movq %rdx, %rcx + movq %rax, %rbp + movq %r10, %rax + mulq %r15 + addq %r8, %rax + adcq %rbx, %rbp + adcq $0, %rsi + addq %rdx, %rbp + adcq %rcx, %rsi + movq %rbp, %rax + subq %r15, %rax + movq %rsi, %rcx + sbbq %rdi, %rcx + cmovsq %rbp, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovsq %rsi, %rcx + movq %rcx, 8(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed2L + .p2align 4, 0x90 +_mcl_fp_montRed2L: ## @mcl_fp_montRed2L +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq -8(%rdx), %r9 + movq (%rdx), %r11 + movq (%rsi), %rbx + movq %rbx, %rcx + imulq %r9, %rcx + movq 8(%rdx), %r14 + movq %rcx, %rax + mulq %r14 + movq %rdx, %r8 + movq %rax, %r10 + movq %rcx, %rax + mulq %r11 + movq %rdx, %rcx + addq %r10, %rcx + adcq $0, %r8 + movq 24(%rsi), %r15 + addq %rbx, %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r8 + adcq $0, %r15 + sbbq %rbx, %rbx + andl $1, %ebx + imulq %rcx, %r9 + movq %r9, %rax + mulq %r14 + movq %rdx, %rsi + movq %rax, %r10 + movq %r9, %rax + mulq %r11 + addq %r10, %rdx + adcq $0, %rsi + addq %rcx, %rax + adcq %r8, %rdx + adcq %r15, %rsi + adcq $0, %rbx + movq %rdx, %rax + subq %r11, %rax + movq %rsi, %rcx + sbbq %r14, %rcx + sbbq $0, %rbx + andl $1, %ebx + cmovneq %rsi, %rcx + testb %bl, %bl + cmovneq %rdx, %rax + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addPre2L + .p2align 4, 0x90 +_mcl_fp_addPre2L: ## @mcl_fp_addPre2L +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rcx + addq (%rsi), %rax + adcq 8(%rsi), %rcx + movq %rax, (%rdi) + movq %rcx, 8(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre2L + .p2align 4, 0x90 +_mcl_fp_subPre2L: ## @mcl_fp_subPre2L +## BB#0: + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_2L + .p2align 4, 0x90 +_mcl_fp_shr1_2L: ## @mcl_fp_shr1_2L +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %rcx + shrdq $1, %rcx, %rax + movq %rax, (%rdi) + shrq %rcx + movq %rcx, 8(%rdi) + retq + + .globl _mcl_fp_add2L + .p2align 4, 0x90 +_mcl_fp_add2L: ## @mcl_fp_add2L +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq $0, %rsi + testb $1, %sil + jne LBB29_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) +LBB29_2: ## %carry + retq + + .globl _mcl_fp_addNF2L + .p2align 4, 0x90 +_mcl_fp_addNF2L: ## @mcl_fp_addNF2L +## BB#0: + movq (%rdx), %rax + movq 8(%rdx), %r8 + addq (%rsi), %rax + adcq 8(%rsi), %r8 + movq %rax, %rsi + subq (%rcx), %rsi + movq %r8, %rdx + sbbq 8(%rcx), %rdx + testq %rdx, %rdx + cmovsq %rax, %rsi + movq %rsi, (%rdi) + cmovsq %r8, %rdx + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fp_sub2L + .p2align 4, 0x90 +_mcl_fp_sub2L: ## @mcl_fp_sub2L +## BB#0: + movq (%rsi), %rax + movq 8(%rsi), %r8 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r8 + movq %rax, (%rdi) + movq %r8, 8(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB31_2 +## BB#1: ## %nocarry + retq +LBB31_2: ## %carry + movq 8(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r8, %rdx + movq %rdx, 8(%rdi) + retq + + .globl _mcl_fp_subNF2L + .p2align 4, 0x90 +_mcl_fp_subNF2L: ## @mcl_fp_subNF2L +## BB#0: + movq (%rsi), %r8 + movq 8(%rsi), %rsi + subq (%rdx), %r8 + sbbq 8(%rdx), %rsi + movq %rsi, %rdx + sarq $63, %rdx + movq 8(%rcx), %rax + andq %rdx, %rax + andq (%rcx), %rdx + addq %r8, %rdx + movq %rdx, (%rdi) + adcq %rsi, %rax + movq %rax, 8(%rdi) + retq + + .globl _mcl_fpDbl_add2L + .p2align 4, 0x90 +_mcl_fpDbl_add2L: ## @mcl_fpDbl_add2L +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + adcq %r8, %r9 + sbbq %rax, %rax + andl $1, %eax + movq %r10, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + sbbq $0, %rax + andl $1, %eax + cmovneq %r10, %rdx + movq %rdx, 16(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 24(%rdi) + retq + + .globl _mcl_fpDbl_sub2L + .p2align 4, 0x90 +_mcl_fpDbl_sub2L: ## @mcl_fpDbl_sub2L +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %r11 + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %r11 + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %r11, (%rdi) + movq %rsi, 8(%rdi) + sbbq %r8, %r9 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + cmovneq 8(%rcx), %rax + addq %r10, %rsi + movq %rsi, 16(%rdi) + adcq %r9, %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fp_mulUnitPre3L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre3L: ## @mcl_fp_mulUnitPre3L +## BB#0: + movq %rdx, %rcx + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r9, %r10 + movq %r10, 16(%rdi) + adcq $0, %r8 + movq %r8, 24(%rdi) + retq + + .globl _mcl_fpDbl_mulPre3L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre3L: ## @mcl_fpDbl_mulPre3L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + movq (%r10), %rbx + movq %r8, %rax + mulq %rbx + movq %rdx, %rcx + movq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rbx + movq %rdx, %r14 + movq %rax, %rsi + movq %r9, %rax + mulq %rbx + movq %rdx, %r15 + movq %rax, %rbx + addq %rcx, %rbx + adcq %rsi, %r15 + adcq $0, %r14 + movq 8(%r10), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %rbp + movq %r9, %rax + mulq %rcx + movq %rdx, %r13 + movq %rax, %rsi + movq %r8, %rax + mulq %rcx + addq %rbx, %rax + movq %rax, 8(%rdi) + adcq %r15, %rsi + adcq %r14, %rbp + sbbq %r14, %r14 + andl $1, %r14d + addq %rdx, %rsi + adcq %r13, %rbp + adcq %r12, %r14 + movq 16(%r10), %r15 + movq %r11, %rax + mulq %r15 + movq %rdx, %r10 + movq %rax, %rbx + movq %r9, %rax + mulq %r15 + movq %rdx, %r9 + movq %rax, %rcx + movq %r8, %rax + mulq %r15 + addq %rsi, %rax + movq %rax, 16(%rdi) + adcq %rbp, %rcx + adcq %r14, %rbx + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rbx + movq %rbx, 32(%rdi) + adcq %r10, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre3L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre3L: ## @mcl_fpDbl_sqrPre3L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + movq %rcx, %rax + mulq %rcx + movq %rdx, %rbx + movq %rax, (%rdi) + movq %r10, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %r11 + movq %rsi, %rax + mulq %rcx + movq %rdx, %r14 + movq %rax, %r12 + addq %r12, %rbx + movq %r14, %r13 + adcq %r11, %r13 + movq %r8, %rcx + adcq $0, %rcx + movq %r10, %rax + mulq %rsi + movq %rdx, %r9 + movq %rax, %r15 + movq %rsi, %rax + mulq %rsi + movq %rax, %rsi + addq %r12, %rbx + movq %rbx, 8(%rdi) + adcq %r13, %rsi + adcq %r15, %rcx + sbbq %rbx, %rbx + andl $1, %ebx + addq %r14, %rsi + adcq %rdx, %rcx + adcq %r9, %rbx + movq %r10, %rax + mulq %r10 + addq %r11, %rsi + movq %rsi, 16(%rdi) + adcq %r15, %rcx + adcq %rbx, %rax + sbbq %rsi, %rsi + andl $1, %esi + addq %r8, %rcx + movq %rcx, 24(%rdi) + adcq %r9, %rax + movq %rax, 32(%rdi) + adcq %rdx, %rsi + movq %rsi, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mont3L + .p2align 4, 0x90 +_mcl_fp_mont3L: ## @mcl_fp_mont3L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 16(%rsi), %r10 + movq (%rdx), %rdi + movq %rdx, %r11 + movq %r11, -16(%rsp) ## 8-byte Spill + movq %r10, %rax + movq %r10, -24(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %rbx + movq %rdx, %r15 + movq (%rsi), %rbp + movq %rbp, -64(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %r12 + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, %r8 + movq %rdx, %r13 + addq %rsi, %r13 + adcq %rbx, %r12 + adcq $0, %r15 + movq -8(%rcx), %r14 + movq %r8, %rbp + imulq %r14, %rbp + movq 16(%rcx), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r9 + movq %rdx, %rbx + movq (%rcx), %rdi + movq %rdi, -40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -48(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rcx + movq %rdx, %rsi + movq %rax, %rcx + movq %rbp, %rax + mulq %rdi + movq %rdx, %rbp + addq %rcx, %rbp + adcq %r9, %rsi + adcq $0, %rbx + addq %r8, %rax + adcq %r13, %rbp + movq 8(%r11), %rcx + adcq %r12, %rsi + adcq %r15, %rbx + sbbq %rdi, %rdi + andl $1, %edi + movq %rcx, %rax + mulq %r10 + movq %rdx, %r15 + movq %rax, %r8 + movq %rcx, %rax + movq -32(%rsp), %r10 ## 8-byte Reload + mulq %r10 + movq %rdx, %r12 + movq %rax, %r9 + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rcx + addq %r9, %rcx + adcq %r8, %r12 + adcq $0, %r15 + addq %rbp, %r13 + adcq %rsi, %rcx + adcq %rbx, %r12 + adcq %rdi, %r15 + sbbq %r11, %r11 + andl $1, %r11d + movq %r13, %rdi + imulq %r14, %rdi + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r9 + movq %rdi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + addq %r9, %rbp + adcq %r8, %rsi + adcq $0, %rbx + addq %r13, %rax + adcq %rcx, %rbp + adcq %r12, %rsi + adcq %r15, %rbx + adcq $0, %r11 + movq -16(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rcx + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r15 + movq %rcx, %rax + mulq %r10 + movq %rdx, %r10 + movq %rax, %rdi + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rcx + addq %rdi, %rcx + adcq %r15, %r10 + adcq $0, %r8 + addq %rbp, %r9 + adcq %rsi, %rcx + adcq %rbx, %r10 + adcq %r11, %r8 + sbbq %rdi, %rdi + andl $1, %edi + imulq %r9, %r14 + movq %r14, %rax + movq -56(%rsp), %r15 ## 8-byte Reload + mulq %r15 + movq %rdx, %rbx + movq %rax, %r11 + movq %r14, %rax + movq -48(%rsp), %r12 ## 8-byte Reload + mulq %r12 + movq %rdx, %rsi + movq %rax, %r13 + movq %r14, %rax + movq -40(%rsp), %rbp ## 8-byte Reload + mulq %rbp + addq %r13, %rdx + adcq %r11, %rsi + adcq $0, %rbx + addq %r9, %rax + adcq %rcx, %rdx + adcq %r10, %rsi + adcq %r8, %rbx + adcq $0, %rdi + movq %rdx, %rax + subq %rbp, %rax + movq %rsi, %rcx + sbbq %r12, %rcx + movq %rbx, %rbp + sbbq %r15, %rbp + sbbq $0, %rdi + andl $1, %edi + cmovneq %rbx, %rbp + testb %dil, %dil + cmovneq %rdx, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rsi, %rcx + movq %rcx, 8(%rdx) + movq %rbp, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF3L + .p2align 4, 0x90 +_mcl_fp_montNF3L: ## @mcl_fp_montNF3L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r10 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 16(%rsi), %r11 + movq (%r10), %rbp + movq %r10, -16(%rsp) ## 8-byte Spill + movq %r11, %rax + movq %r11, -24(%rsp) ## 8-byte Spill + mulq %rbp + movq %rax, %r14 + movq %rdx, %r15 + movq (%rsi), %rbx + movq %rbx, -48(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %rbp + movq %rdx, %rdi + movq %rax, %r8 + movq %rbx, %rax + mulq %rbp + movq %rax, %r13 + movq %rdx, %rbp + addq %r8, %rbp + adcq %r14, %rdi + adcq $0, %r15 + movq -8(%rcx), %r14 + movq %r13, %rbx + imulq %r14, %rbx + movq 16(%rcx), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r12 + movq %rdx, %r8 + movq (%rcx), %rsi + movq %rsi, -32(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -40(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, %rcx + movq %rbx, %rax + mulq %rsi + addq %r13, %rax + adcq %rbp, %rcx + adcq %rdi, %r12 + adcq $0, %r15 + addq %rdx, %rcx + movq 8(%r10), %rbp + adcq %r9, %r12 + adcq %r8, %r15 + movq %rbp, %rax + mulq %r11 + movq %rdx, %rsi + movq %rax, %r8 + movq %rbp, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rbp, %rax + movq -48(%rsp), %r10 ## 8-byte Reload + mulq %r10 + movq %rax, %r13 + movq %rdx, %rbp + addq %r9, %rbp + adcq %r8, %rbx + adcq $0, %rsi + addq %rcx, %r13 + adcq %r12, %rbp + adcq %r15, %rbx + adcq $0, %rsi + movq %r13, %rcx + imulq %r14, %rcx + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r15 + movq %rcx, %rax + movq -40(%rsp), %rdi ## 8-byte Reload + mulq %rdi + movq %rdx, %r9 + movq %rax, %r12 + movq %rcx, %rax + movq -32(%rsp), %r11 ## 8-byte Reload + mulq %r11 + addq %r13, %rax + adcq %rbp, %r12 + adcq %rbx, %r15 + adcq $0, %rsi + addq %rdx, %r12 + adcq %r9, %r15 + adcq %r8, %rsi + movq -16(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rbx + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r8 + movq %rbx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r9 + movq %rbx, %rax + mulq %r10 + movq %rax, %r10 + movq %rdx, %rbx + addq %r9, %rbx + adcq %r8, %rcx + adcq $0, %rbp + addq %r12, %r10 + adcq %r15, %rbx + adcq %rsi, %rcx + adcq $0, %rbp + imulq %r10, %r14 + movq %r14, %rax + movq -56(%rsp), %r15 ## 8-byte Reload + mulq %r15 + movq %rdx, %r8 + movq %rax, %rsi + movq %r14, %rax + movq %rdi, %r12 + mulq %r12 + movq %rdx, %r9 + movq %rax, %rdi + movq %r14, %rax + mulq %r11 + addq %r10, %rax + adcq %rbx, %rdi + adcq %rcx, %rsi + adcq $0, %rbp + addq %rdx, %rdi + adcq %r9, %rsi + adcq %r8, %rbp + movq %rdi, %rax + subq %r11, %rax + movq %rsi, %rcx + sbbq %r12, %rcx + movq %rbp, %rbx + sbbq %r15, %rbx + movq %rbx, %rdx + sarq $63, %rdx + cmovsq %rdi, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovsq %rsi, %rcx + movq %rcx, 8(%rdx) + cmovsq %rbp, %rbx + movq %rbx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed3L + .p2align 4, 0x90 +_mcl_fp_montRed3L: ## @mcl_fp_montRed3L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %r9 + movq (%rcx), %rdi + movq (%rsi), %r15 + movq %r15, %rbx + imulq %r9, %rbx + movq 16(%rcx), %rbp + movq %rbx, %rax + mulq %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rdx, %r8 + movq 8(%rcx), %rcx + movq %rbx, %rax + mulq %rcx + movq %rcx, %r12 + movq %r12, -32(%rsp) ## 8-byte Spill + movq %rdx, %r10 + movq %rax, %r14 + movq %rbx, %rax + mulq %rdi + movq %rdi, %rbx + movq %rbx, -16(%rsp) ## 8-byte Spill + movq %rdx, %rcx + addq %r14, %rcx + adcq %r11, %r10 + adcq $0, %r8 + movq 40(%rsi), %rdi + movq 32(%rsi), %r13 + addq %r15, %rax + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r8 + adcq $0, %r13 + adcq $0, %rdi + sbbq %r15, %r15 + andl $1, %r15d + movq %rcx, %rsi + imulq %r9, %rsi + movq %rsi, %rax + mulq %rbp + movq %rdx, %r11 + movq %rax, %rbp + movq %rsi, %rax + mulq %r12 + movq %rdx, %r14 + movq %rax, %r12 + movq %rsi, %rax + mulq %rbx + movq %rdx, %rbx + addq %r12, %rbx + adcq %rbp, %r14 + adcq $0, %r11 + addq %rcx, %rax + adcq %r10, %rbx + adcq %r8, %r14 + adcq %r13, %r11 + adcq $0, %rdi + adcq $0, %r15 + imulq %rbx, %r9 + movq %r9, %rax + movq -24(%rsp), %r12 ## 8-byte Reload + mulq %r12 + movq %rdx, %rbp + movq %rax, %r8 + movq %r9, %rax + movq -32(%rsp), %r13 ## 8-byte Reload + mulq %r13 + movq %rdx, %rsi + movq %rax, %r10 + movq %r9, %rax + movq -16(%rsp), %rcx ## 8-byte Reload + mulq %rcx + addq %r10, %rdx + adcq %r8, %rsi + adcq $0, %rbp + addq %rbx, %rax + adcq %r14, %rdx + adcq %r11, %rsi + adcq %rdi, %rbp + adcq $0, %r15 + movq %rdx, %rax + subq %rcx, %rax + movq %rsi, %rdi + sbbq %r13, %rdi + movq %rbp, %rcx + sbbq %r12, %rcx + sbbq $0, %r15 + andl $1, %r15d + cmovneq %rbp, %rcx + testb %r15b, %r15b + cmovneq %rdx, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rsi, %rdi + movq %rdi, 8(%rdx) + movq %rcx, 16(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre3L + .p2align 4, 0x90 +_mcl_fp_addPre3L: ## @mcl_fp_addPre3L +## BB#0: + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre3L + .p2align 4, 0x90 +_mcl_fp_subPre3L: ## @mcl_fp_subPre3L +## BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r8 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_3L + .p2align 4, 0x90 +_mcl_fp_shr1_3L: ## @mcl_fp_shr1_3L +## BB#0: + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rdx + shrdq $1, %rdx, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rdx + movq %rdx, 8(%rdi) + shrq %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fp_add3L + .p2align 4, 0x90 +_mcl_fp_add3L: ## @mcl_fp_add3L +## BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r8 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB44_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r8, 16(%rdi) +LBB44_2: ## %carry + retq + + .globl _mcl_fp_addNF3L + .p2align 4, 0x90 +_mcl_fp_addNF3L: ## @mcl_fp_addNF3L +## BB#0: + movq 16(%rdx), %r8 + movq (%rdx), %r10 + movq 8(%rdx), %r9 + addq (%rsi), %r10 + adcq 8(%rsi), %r9 + adcq 16(%rsi), %r8 + movq %r10, %rsi + subq (%rcx), %rsi + movq %r9, %rdx + sbbq 8(%rcx), %rdx + movq %r8, %rax + sbbq 16(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r10, %rsi + movq %rsi, (%rdi) + cmovsq %r9, %rdx + movq %rdx, 8(%rdi) + cmovsq %r8, %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fp_sub3L + .p2align 4, 0x90 +_mcl_fp_sub3L: ## @mcl_fp_sub3L +## BB#0: + movq 16(%rsi), %r8 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r8 + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r8, 16(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB46_2 +## BB#1: ## %nocarry + retq +LBB46_2: ## %carry + movq 8(%rcx), %rdx + movq 16(%rcx), %rsi + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r8, %rsi + movq %rsi, 16(%rdi) + retq + + .globl _mcl_fp_subNF3L + .p2align 4, 0x90 +_mcl_fp_subNF3L: ## @mcl_fp_subNF3L +## BB#0: + movq 16(%rsi), %r10 + movq (%rsi), %r8 + movq 8(%rsi), %r9 + subq (%rdx), %r8 + sbbq 8(%rdx), %r9 + sbbq 16(%rdx), %r10 + movq %r10, %rdx + sarq $63, %rdx + movq %rdx, %rsi + shldq $1, %r10, %rsi + andq (%rcx), %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + andq 8(%rcx), %rdx + addq %r8, %rsi + movq %rsi, (%rdi) + adcq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rax + movq %rax, 16(%rdi) + retq + + .globl _mcl_fpDbl_add3L + .p2align 4, 0x90 +_mcl_fpDbl_add3L: ## @mcl_fpDbl_add3L +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r9 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %r15 + adcq %r11, %r9 + adcq %r10, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %r15, %rdx + subq (%rcx), %rdx + movq %r9, %rsi + sbbq 8(%rcx), %rsi + movq %r8, %rbx + sbbq 16(%rcx), %rbx + sbbq $0, %rax + andl $1, %eax + cmovneq %r15, %rdx + movq %rdx, 24(%rdi) + testb %al, %al + cmovneq %r9, %rsi + movq %rsi, 32(%rdi) + cmovneq %r8, %rbx + movq %rbx, 40(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_sub3L + .p2align 4, 0x90 +_mcl_fpDbl_sub3L: ## @mcl_fpDbl_sub3L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r10 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %rax + xorl %esi, %esi + subq (%rdx), %rbx + sbbq 8(%rdx), %rax + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r14 + movq %rbx, (%rdi) + movq %rax, 8(%rdi) + movq %r14, 16(%rdi) + sbbq %r15, %r11 + sbbq %r12, %r9 + sbbq %r10, %r8 + movl $0, %eax + sbbq $0, %rax + andl $1, %eax + movq (%rcx), %rdx + cmoveq %rsi, %rdx + testb %al, %al + movq 16(%rcx), %rax + cmoveq %rsi, %rax + cmovneq 8(%rcx), %rsi + addq %r11, %rdx + movq %rdx, 24(%rdi) + adcq %r9, %rsi + movq %rsi, 32(%rdi) + adcq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre4L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre4L: ## @mcl_fp_mulUnitPre4L +## BB#0: + pushq %r14 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %r14 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r14, %rdx + movq %rdx, 8(%rdi) + adcq %r11, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r10 + movq %r10, 24(%rdi) + adcq $0, %r8 + movq %r8, 32(%rdi) + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mulPre4L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre4L: ## @mcl_fpDbl_mulPre4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq (%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + movq 8(%rsi), %r8 + movq %r8, -56(%rsp) ## 8-byte Spill + movq (%rdx), %rbx + movq %rdx, %rbp + mulq %rbx + movq %rdx, %r15 + movq 16(%rsi), %rcx + movq 24(%rsi), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rbx + movq %rdx, %r12 + movq %rax, %r14 + movq %rcx, %rax + movq %rcx, -16(%rsp) ## 8-byte Spill + mulq %rbx + movq %rdx, %r10 + movq %rax, %r9 + movq %r8, %rax + mulq %rbx + movq %rdx, %r13 + movq %rax, %r8 + addq %r15, %r8 + adcq %r9, %r13 + adcq %r14, %r10 + adcq $0, %r12 + movq %rbp, %r9 + movq %r9, -8(%rsp) ## 8-byte Spill + movq 8(%r9), %rbp + movq %r11, %rax + mulq %rbp + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %rcx, %rax + mulq %rbp + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq -56(%rsp), %r14 ## 8-byte Reload + movq %r14, %rax + mulq %rbp + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -48(%rsp) ## 8-byte Spill + addq %r8, %rax + movq %rax, 8(%rdi) + adcq %r13, %rbx + adcq %r10, %rcx + adcq %r12, %r15 + sbbq %r13, %r13 + movq 16(%r9), %rbp + movq %r14, %rax + mulq %rbp + movq %rax, %r12 + movq %rdx, %r14 + andl $1, %r13d + addq -48(%rsp), %rbx ## 8-byte Folded Reload + adcq -40(%rsp), %rcx ## 8-byte Folded Reload + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq -24(%rsp), %r13 ## 8-byte Folded Reload + movq %r11, %rax + mulq %rbp + movq %rdx, %r8 + movq %rax, %r11 + movq -16(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, %r9 + movq %rax, %r10 + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rbp + addq %rbx, %rax + movq %rax, 16(%rdi) + adcq %r12, %rcx + adcq %r15, %r10 + adcq %r13, %r11 + sbbq %r13, %r13 + andl $1, %r13d + addq %rdx, %rcx + adcq %r14, %r10 + adcq %r9, %r11 + adcq %r8, %r13 + movq -8(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rbx + movq %rbx, %rax + mulq 24(%rsi) + movq %rdx, %r8 + movq %rax, %r14 + movq %rbx, %rax + mulq 16(%rsi) + movq %rdx, %r9 + movq %rax, %r12 + movq %rbx, %rax + mulq 8(%rsi) + movq %rdx, %r15 + movq %rax, %rbp + movq %rbx, %rax + mulq (%rsi) + addq %rcx, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbp + adcq %r11, %r12 + adcq %r13, %r14 + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rbp + movq %rbp, 32(%rdi) + adcq %r15, %r12 + movq %r12, 40(%rdi) + adcq %r9, %r14 + movq %r14, 48(%rdi) + adcq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre4L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre4L: ## @mcl_fpDbl_sqrPre4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rsi, %r10 + movq 16(%r10), %r9 + movq 24(%r10), %r11 + movq (%r10), %r15 + movq 8(%r10), %r8 + movq %r15, %rax + mulq %r15 + movq %rdx, %rbp + movq %rax, (%rdi) + movq %r11, %rax + mulq %r8 + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %r9, %rax + mulq %r8 + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rax, -40(%rsp) ## 8-byte Spill + movq %r11, %rax + mulq %r15 + movq %rdx, %rbx + movq %rax, %rcx + movq %r9, %rax + mulq %r15 + movq %rdx, %rsi + movq %rsi, -16(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %r8, %rax + mulq %r8 + movq %rdx, %r13 + movq %rax, %r14 + movq %r8, %rax + mulq %r15 + addq %rax, %rbp + movq %rdx, %r8 + adcq %r12, %r8 + adcq %rsi, %rcx + adcq $0, %rbx + addq %rax, %rbp + movq %rbp, 8(%rdi) + adcq %r14, %r8 + movq -40(%rsp), %rsi ## 8-byte Reload + adcq %rsi, %rcx + adcq -32(%rsp), %rbx ## 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + addq %rdx, %r8 + adcq %r13, %rcx + movq -24(%rsp), %r15 ## 8-byte Reload + adcq %r15, %rbx + adcq -8(%rsp), %rbp ## 8-byte Folded Reload + movq %r11, %rax + mulq %r9 + movq %rdx, %r14 + movq %rax, %r11 + movq %r9, %rax + mulq %r9 + movq %rax, %r9 + addq %r12, %r8 + movq %r8, 16(%rdi) + adcq %rsi, %rcx + adcq %rbx, %r9 + adcq %rbp, %r11 + sbbq %r12, %r12 + andl $1, %r12d + addq -16(%rsp), %rcx ## 8-byte Folded Reload + adcq %r15, %r9 + adcq %rdx, %r11 + adcq %r14, %r12 + movq 24(%r10), %rbp + movq %rbp, %rax + mulq 16(%r10) + movq %rdx, %r8 + movq %rax, %r14 + movq %rbp, %rax + mulq 8(%r10) + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq (%r10) + movq %rdx, %r15 + movq %rax, %rsi + movq %rbp, %rax + mulq %rbp + addq %rcx, %rsi + movq %rsi, 24(%rdi) + adcq %r9, %rbx + adcq %r11, %r14 + adcq %r12, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r15, %rbx + movq %rbx, 32(%rdi) + adcq %r13, %r14 + movq %r14, 40(%rdi) + adcq %r8, %rax + movq %rax, 48(%rdi) + adcq %rdx, %rcx + movq %rcx, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont4L + .p2align 4, 0x90 +_mcl_fp_mont4L: ## @mcl_fp_mont4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 24(%rsi), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq (%rdx), %rbp + mulq %rbp + movq %rax, %r9 + movq %rdx, %r8 + movq 16(%rsi), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + mulq %rbp + movq %rax, %rbx + movq %rdx, %r11 + movq (%rsi), %rdi + movq %rdi, -56(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %rbp + movq %rdx, %r12 + movq %rax, %rsi + movq %rdi, %rax + mulq %rbp + movq %rax, %r13 + movq %rdx, %r15 + addq %rsi, %r15 + adcq %rbx, %r12 + adcq %r9, %r11 + adcq $0, %r8 + movq -8(%rcx), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r13, %rsi + imulq %rax, %rsi + movq 24(%rcx), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r10 + movq %rdx, %r9 + movq 16(%rcx), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, %rbx + movq (%rcx), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rcx + movq %rdx, %rdi + movq %rax, %rcx + movq %rsi, %rax + mulq %rbp + movq %rdx, %rsi + addq %rcx, %rsi + adcq %r14, %rdi + adcq %r10, %rbx + adcq $0, %r9 + addq %r13, %rax + adcq %r15, %rsi + adcq %r12, %rdi + adcq %r11, %rbx + adcq %r8, %r9 + sbbq %r15, %r15 + andl $1, %r15d + movq -96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rbp + movq %rbp, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r10 + movq %rbp, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r11 + movq %rbp, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rbp, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rbp + addq %r14, %rbp + adcq %r11, %rcx + adcq %r10, %r13 + adcq $0, %r12 + addq %rsi, %r8 + adcq %rdi, %rbp + adcq %rbx, %rcx + adcq %r9, %r13 + adcq %r15, %r12 + sbbq %r15, %r15 + andl $1, %r15d + movq %r8, %rsi + imulq -88(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r10 + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r11 + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r14 + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + addq %r14, %rsi + adcq %r11, %rdi + adcq %r10, %rbx + adcq $0, %r9 + addq %r8, %rax + adcq %rbp, %rsi + adcq %rcx, %rdi + adcq %r13, %rbx + adcq %r12, %r9 + adcq $0, %r15 + movq -96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rbp + movq %rbp, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r10 + movq %rbp, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r11 + movq %rbp, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rbp, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %rbp + movq %rdx, %r8 + addq %r14, %r8 + adcq %r11, %rcx + adcq %r10, %r13 + adcq $0, %r12 + addq %rsi, %rbp + adcq %rdi, %r8 + adcq %rbx, %rcx + adcq %r9, %r13 + adcq %r15, %r12 + sbbq %r14, %r14 + movq %rbp, %rsi + imulq -88(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r15 + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + andl $1, %r14d + addq %r15, %r11 + adcq %r10, %r9 + adcq -16(%rsp), %rbx ## 8-byte Folded Reload + adcq $0, %rdi + addq %rbp, %rax + adcq %r8, %r11 + adcq %rcx, %r9 + adcq %r13, %rbx + adcq %r12, %rdi + adcq $0, %r14 + movq -96(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rcx + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r15 + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %rbp + addq %r13, %rbp + adcq %r15, %rsi + adcq -96(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, %r8 + addq %r11, %r10 + adcq %r9, %rbp + adcq %rbx, %rsi + adcq %rdi, %r12 + adcq %r14, %r8 + sbbq %rdi, %rdi + andl $1, %edi + movq -88(%rsp), %rcx ## 8-byte Reload + imulq %r10, %rcx + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rbx + movq %rcx, %rax + movq %rcx, %r9 + movq -32(%rsp), %r11 ## 8-byte Reload + mulq %r11 + movq %rdx, %rcx + movq %rax, %r14 + movq %r9, %rax + movq -24(%rsp), %r9 ## 8-byte Reload + mulq %r9 + addq %r14, %rdx + adcq %rbx, %rcx + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + adcq $0, %r13 + addq %r10, %rax + adcq %rbp, %rdx + adcq %rsi, %rcx + adcq %r12, %r15 + adcq %r8, %r13 + adcq $0, %rdi + movq %rdx, %rax + subq %r9, %rax + movq %rcx, %rsi + sbbq %r11, %rsi + movq %r15, %rbp + sbbq -80(%rsp), %rbp ## 8-byte Folded Reload + movq %r13, %rbx + sbbq -72(%rsp), %rbx ## 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %r13, %rbx + testb %dil, %dil + cmovneq %rdx, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rcx, %rsi + movq %rsi, 8(%rdx) + cmovneq %r15, %rbp + movq %rbp, 16(%rdx) + movq %rbx, 24(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF4L + .p2align 4, 0x90 +_mcl_fp_montNF4L: ## @mcl_fp_montNF4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %r15 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 24(%rsi), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq (%r15), %rdi + movq %r15, -24(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r8 + movq %rdx, %r12 + movq 16(%rsi), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r14 + movq %rdx, %r10 + movq (%rsi), %rbp + movq %rbp, -56(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %rbx + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, %r11 + movq %rdx, %r9 + addq %rsi, %r9 + adcq %r14, %rbx + adcq %r8, %r10 + adcq $0, %r12 + movq -8(%rcx), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq %r11, %rsi + imulq %rax, %rsi + movq 24(%rcx), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r13 + movq %rdx, -16(%rsp) ## 8-byte Spill + movq 16(%rcx), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r8 + movq %rdx, %r14 + movq (%rcx), %rdi + movq %rdi, -72(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rcx + movq %rdx, %rcx + movq %rax, %rbp + movq %rsi, %rax + mulq %rdi + addq %r11, %rax + adcq %r9, %rbp + adcq %rbx, %r8 + adcq %r10, %r13 + adcq $0, %r12 + addq %rdx, %rbp + adcq %rcx, %r8 + adcq %r14, %r13 + adcq -16(%rsp), %r12 ## 8-byte Folded Reload + movq 8(%r15), %rdi + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %rsi + movq %rdi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r11 + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %r9 + addq %r14, %r9 + adcq %r11, %rcx + adcq %rsi, %r10 + adcq $0, %rbx + addq %rbp, %rdi + adcq %r8, %r9 + adcq %r13, %rcx + adcq %r12, %r10 + adcq $0, %rbx + movq %rdi, %rsi + imulq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r12 + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r13 + movq %rsi, %rax + movq -32(%rsp), %r15 ## 8-byte Reload + mulq %r15 + movq %rdx, %r14 + movq %rax, %rbp + movq %rsi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + addq %rdi, %rax + adcq %r9, %rbp + adcq %rcx, %r13 + adcq %r10, %r12 + adcq $0, %rbx + addq %rdx, %rbp + adcq %r14, %r13 + adcq %r11, %r12 + adcq %r8, %rbx + movq -24(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %rdi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rdi + addq %r14, %rdi + adcq %r11, %rcx + adcq %r10, %r8 + adcq $0, %rsi + addq %rbp, %r9 + adcq %r13, %rdi + adcq %r12, %rcx + adcq %rbx, %r8 + adcq $0, %rsi + movq %r9, %rbx + imulq -80(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r12 + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r13 + movq %rbx, %rax + mulq %r15 + movq %rdx, %r14 + movq %rax, %rbp + movq %rbx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + addq %r9, %rax + adcq %rdi, %rbp + adcq %rcx, %r13 + adcq %r8, %r12 + adcq $0, %rsi + addq %rdx, %rbp + adcq %r14, %r13 + adcq %r11, %r12 + adcq %r10, %rsi + movq -24(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdi + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %rcx + movq %rdi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r14 + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %rdi + addq %r14, %rdi + adcq %r11, %r10 + adcq %rcx, %r8 + adcq $0, %rbx + addq %rbp, %r9 + adcq %r13, %rdi + adcq %r12, %r10 + adcq %rsi, %r8 + adcq $0, %rbx + movq -80(%rsp), %rcx ## 8-byte Reload + imulq %r9, %rcx + movq %rcx, %rax + movq -40(%rsp), %r12 ## 8-byte Reload + mulq %r12 + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rcx, %rax + movq -48(%rsp), %r11 ## 8-byte Reload + mulq %r11 + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + movq %rcx, %r15 + movq -72(%rsp), %rsi ## 8-byte Reload + mulq %rsi + movq %rdx, %r14 + movq %rax, %rcx + movq %r15, %rax + movq -32(%rsp), %r15 ## 8-byte Reload + mulq %r15 + addq %r9, %rcx + adcq %rdi, %rax + adcq %r10, %rbp + adcq %r8, %r13 + adcq $0, %rbx + addq %r14, %rax + adcq %rdx, %rbp + adcq -96(%rsp), %r13 ## 8-byte Folded Reload + adcq -88(%rsp), %rbx ## 8-byte Folded Reload + movq %rax, %rcx + subq %rsi, %rcx + movq %rbp, %rdx + sbbq %r15, %rdx + movq %r13, %rdi + sbbq %r11, %rdi + movq %rbx, %rsi + sbbq %r12, %rsi + cmovsq %rax, %rcx + movq -8(%rsp), %rax ## 8-byte Reload + movq %rcx, (%rax) + cmovsq %rbp, %rdx + movq %rdx, 8(%rax) + cmovsq %r13, %rdi + movq %rdi, 16(%rax) + cmovsq %rbx, %rsi + movq %rsi, 24(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed4L + .p2align 4, 0x90 +_mcl_fp_montRed4L: ## @mcl_fp_montRed4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq (%rcx), %rdi + movq %rdi, -32(%rsp) ## 8-byte Spill + movq (%rsi), %r12 + movq %r12, %rbx + imulq %rax, %rbx + movq %rax, %r9 + movq %r9, -64(%rsp) ## 8-byte Spill + movq 24(%rcx), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r11 + movq %rdx, %r8 + movq 16(%rcx), %rbp + movq %rbx, %rax + mulq %rbp + movq %rbp, %r13 + movq %r13, -24(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rdx, %r10 + movq 8(%rcx), %rcx + movq %rbx, %rax + mulq %rcx + movq %rcx, %rbp + movq %rbp, -16(%rsp) ## 8-byte Spill + movq %rdx, %r15 + movq %rax, %rcx + movq %rbx, %rax + mulq %rdi + movq %rdx, %rbx + addq %rcx, %rbx + adcq %r14, %r15 + adcq %r11, %r10 + adcq $0, %r8 + movq 56(%rsi), %rcx + movq 48(%rsi), %rdx + addq %r12, %rax + movq 40(%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r8 + adcq $0, %rax + movq %rax, -48(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, %r12 + adcq $0, %rcx + movq %rcx, -72(%rsp) ## 8-byte Spill + sbbq %rdi, %rdi + andl $1, %edi + movq %rbx, %rsi + imulq %r9, %rsi + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %r13 + movq %rdx, %r14 + movq %rax, %r9 + movq %rsi, %rax + mulq %rbp + movq %rdx, %rcx + movq %rax, %rbp + movq %rsi, %rax + movq -32(%rsp), %r13 ## 8-byte Reload + mulq %r13 + movq %rdx, %rsi + addq %rbp, %rsi + adcq %r9, %rcx + adcq -56(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r11 + addq %rbx, %rax + adcq %r15, %rsi + adcq %r10, %rcx + adcq %r8, %r14 + adcq -48(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r12 + movq %r12, -48(%rsp) ## 8-byte Spill + movq -72(%rsp), %rbp ## 8-byte Reload + adcq $0, %rbp + adcq $0, %rdi + movq %rsi, %rbx + imulq -64(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + movq -40(%rsp), %r12 ## 8-byte Reload + mulq %r12 + movq %rdx, %r8 + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r9 + movq %rbx, %rax + mulq %r13 + movq %rdx, %rbx + addq %r9, %rbx + adcq -56(%rsp), %r15 ## 8-byte Folded Reload + adcq -72(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r8 + addq %rsi, %rax + adcq %rcx, %rbx + adcq %r14, %r15 + adcq %r11, %r10 + adcq -48(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, %rbp + movq %rbp, -72(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq -64(%rsp), %rcx ## 8-byte Reload + imulq %rbx, %rcx + movq %rcx, %rax + mulq %r12 + movq %rdx, %r13 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rcx, %rax + movq -24(%rsp), %r14 ## 8-byte Reload + mulq %r14 + movq %rdx, %r11 + movq %rax, %r12 + movq %rcx, %rax + movq %rcx, %r9 + movq -16(%rsp), %rsi ## 8-byte Reload + mulq %rsi + movq %rdx, %rbp + movq %rax, %rcx + movq %r9, %rax + movq -32(%rsp), %r9 ## 8-byte Reload + mulq %r9 + addq %rcx, %rdx + adcq %r12, %rbp + adcq -64(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r13 + addq %rbx, %rax + adcq %r15, %rdx + adcq %r10, %rbp + adcq %r8, %r11 + adcq -72(%rsp), %r13 ## 8-byte Folded Reload + adcq $0, %rdi + movq %rdx, %rax + subq %r9, %rax + movq %rbp, %rcx + sbbq %rsi, %rcx + movq %r11, %rbx + sbbq %r14, %rbx + movq %r13, %rsi + sbbq -40(%rsp), %rsi ## 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %r13, %rsi + testb %dil, %dil + cmovneq %rdx, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rbp, %rcx + movq %rcx, 8(%rdx) + cmovneq %r11, %rbx + movq %rbx, 16(%rdx) + movq %rsi, 24(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre4L + .p2align 4, 0x90 +_mcl_fp_addPre4L: ## @mcl_fp_addPre4L +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rdx), %rax + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rax + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rax, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre4L + .p2align 4, 0x90 +_mcl_fp_subPre4L: ## @mcl_fp_subPre4L +## BB#0: + movq 24(%rdx), %r8 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rcx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rcx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rcx, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r8, %r9 + movq %r9, 24(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_4L + .p2align 4, 0x90 +_mcl_fp_shr1_4L: ## @mcl_fp_shr1_4L +## BB#0: + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrq %rax + movq %rax, 24(%rdi) + retq + + .globl _mcl_fp_add4L + .p2align 4, 0x90 +_mcl_fp_add4L: ## @mcl_fp_add4L +## BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r9 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + adcq %r10, %r8 + movq %r8, 24(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r9 + sbbq 24(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB59_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r9, 16(%rdi) + movq %r8, 24(%rdi) +LBB59_2: ## %carry + retq + + .globl _mcl_fp_addNF4L + .p2align 4, 0x90 +_mcl_fp_addNF4L: ## @mcl_fp_addNF4L +## BB#0: + pushq %rbx + movq 24(%rdx), %r8 + movq 16(%rdx), %r9 + movq (%rdx), %r11 + movq 8(%rdx), %r10 + addq (%rsi), %r11 + adcq 8(%rsi), %r10 + adcq 16(%rsi), %r9 + adcq 24(%rsi), %r8 + movq %r11, %rsi + subq (%rcx), %rsi + movq %r10, %rdx + sbbq 8(%rcx), %rdx + movq %r9, %rax + sbbq 16(%rcx), %rax + movq %r8, %rbx + sbbq 24(%rcx), %rbx + testq %rbx, %rbx + cmovsq %r11, %rsi + movq %rsi, (%rdi) + cmovsq %r10, %rdx + movq %rdx, 8(%rdi) + cmovsq %r9, %rax + movq %rax, 16(%rdi) + cmovsq %r8, %rbx + movq %rbx, 24(%rdi) + popq %rbx + retq + + .globl _mcl_fp_sub4L + .p2align 4, 0x90 +_mcl_fp_sub4L: ## @mcl_fp_sub4L +## BB#0: + movq 24(%rdx), %r10 + movq 24(%rsi), %r8 + movq 16(%rsi), %r9 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %esi, %esi + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r9 + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r9, 16(%rdi) + sbbq %r10, %r8 + movq %r8, 24(%rdi) + sbbq $0, %rsi + testb $1, %sil + jne LBB61_2 +## BB#1: ## %nocarry + retq +LBB61_2: ## %carry + movq 24(%rcx), %r10 + movq 8(%rcx), %rsi + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r10 + movq %r10, 24(%rdi) + retq + + .globl _mcl_fp_subNF4L + .p2align 4, 0x90 +_mcl_fp_subNF4L: ## @mcl_fp_subNF4L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r8 + movdqu (%rsi), %xmm2 + movdqu 16(%rsi), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r15 + movd %xmm1, %r9 + movd %xmm3, %r11 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %r10 + pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] + movd %xmm1, %r14 + movd %xmm0, %rdx + movd %xmm2, %r12 + subq %rdx, %r12 + sbbq %r10, %r14 + sbbq %r9, %r11 + sbbq %r8, %r15 + movq %r15, %rdx + sarq $63, %rdx + movq 24(%rcx), %rsi + andq %rdx, %rsi + movq 16(%rcx), %rax + andq %rdx, %rax + movq 8(%rcx), %rbx + andq %rdx, %rbx + andq (%rcx), %rdx + addq %r12, %rdx + movq %rdx, (%rdi) + adcq %r14, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rax + movq %rax, 16(%rdi) + adcq %r15, %rsi + movq %rsi, 24(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_add4L + .p2align 4, 0x90 +_mcl_fpDbl_add4L: ## @mcl_fpDbl_add4L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 48(%rsi), %r12 + movq 40(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rdx), %r15 + movq 16(%rdx), %rbx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rbp + movq 32(%rsi), %rsi + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r15, %rbp + movq %rbp, 24(%rdi) + adcq %r14, %rsi + adcq %r11, %r13 + adcq %r10, %r12 + adcq %r9, %r8 + sbbq %rax, %rax + andl $1, %eax + movq %rsi, %rdx + subq (%rcx), %rdx + movq %r13, %rbp + sbbq 8(%rcx), %rbp + movq %r12, %rbx + sbbq 16(%rcx), %rbx + movq %r8, %r9 + sbbq 24(%rcx), %r9 + sbbq $0, %rax + andl $1, %eax + cmovneq %rsi, %rdx + movq %rdx, 32(%rdi) + testb %al, %al + cmovneq %r13, %rbp + movq %rbp, 40(%rdi) + cmovneq %r12, %rbx + movq %rbx, 48(%rdi) + cmovneq %r8, %r9 + movq %r9, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub4L + .p2align 4, 0x90 +_mcl_fpDbl_sub4L: ## @mcl_fpDbl_sub4L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r9 + movq 56(%rsi), %r8 + movq 48(%rdx), %r10 + movq 24(%rdx), %r11 + movq (%rsi), %rbx + xorl %eax, %eax + subq (%rdx), %rbx + movq %rbx, (%rdi) + movq 8(%rsi), %rbx + sbbq 8(%rdx), %rbx + movq %rbx, 8(%rdi) + movq 16(%rsi), %rbx + sbbq 16(%rdx), %rbx + movq %rbx, 16(%rdi) + movq 24(%rsi), %rbx + sbbq %r11, %rbx + movq 40(%rdx), %r11 + movq 32(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 32(%rsi), %r12 + sbbq %rdx, %r12 + movq 48(%rsi), %r14 + movq 40(%rsi), %r15 + sbbq %r11, %r15 + sbbq %r10, %r14 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 24(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 8(%rcx), %rax + addq %r12, %rsi + movq %rsi, 32(%rdi) + adcq %r15, %rax + movq %rax, 40(%rdi) + adcq %r14, %rdx + movq %rdx, 48(%rdi) + adcq %r8, %rbx + movq %rbx, 56(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_mulUnitPre5L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre5L: ## @mcl_fp_mulUnitPre5L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %r12 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r12, %rdx + movq %rdx, 8(%rdi) + adcq %r14, %rbx + movq %rbx, 16(%rdi) + adcq %r11, %r15 + movq %r15, 24(%rdi) + adcq %r9, %r10 + movq %r10, 32(%rdi) + adcq $0, %r8 + movq %r8, 40(%rdi) + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_mulPre5L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre5L: ## @mcl_fpDbl_mulPre5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rsi, %r9 + movq %rdi, -48(%rsp) ## 8-byte Spill + movq (%r9), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq (%rdx), %rbp + mulq %rbp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 16(%r9), %r13 + movq 24(%r9), %r15 + movq 32(%r9), %rbx + movq %rax, (%rdi) + movq %rbx, %rax + mulq %rbp + movq %rdx, %r11 + movq %rax, %r10 + movq %r15, %rax + mulq %rbp + movq %rdx, %r14 + movq %rax, %rdi + movq %r13, %rax + mulq %rbp + movq %rax, %rsi + movq %rdx, %rcx + movq 8(%r9), %r8 + movq %r8, %rax + mulq %rbp + movq %rdx, %rbp + movq %rax, %r12 + addq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq %rsi, %rbp + adcq %rdi, %rcx + adcq %r10, %r14 + adcq $0, %r11 + movq -72(%rsp), %r10 ## 8-byte Reload + movq 8(%r10), %rdi + movq %rbx, %rax + mulq %rdi + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq %r15, %rax + mulq %rdi + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %r13, %rax + mulq %rdi + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %r8, %rax + mulq %rdi + movq %rdx, %r8 + movq %rax, %rbx + movq -80(%rsp), %rax ## 8-byte Reload + mulq %rdi + addq %r12, %rax + movq -48(%rsp), %rdi ## 8-byte Reload + movq %rax, 8(%rdi) + adcq %rbp, %rbx + adcq %rcx, %r13 + adcq %r14, %r15 + adcq %r11, %rsi + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %rbx + adcq %r8, %r13 + adcq -56(%rsp), %r15 ## 8-byte Folded Reload + adcq -96(%rsp), %rsi ## 8-byte Folded Reload + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + movq 32(%r9), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + movq 16(%r10), %r12 + mulq %r12 + movq %rax, %r11 + movq %rdx, -80(%rsp) ## 8-byte Spill + movq 24(%r9), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %r12 + movq %rax, %r10 + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 16(%r9), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %r12 + movq %rax, %r8 + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 8(%r9), %rdi + movq %rdi, %rax + mulq %r12 + movq %rax, %rbp + movq %rdx, -16(%rsp) ## 8-byte Spill + movq (%r9), %r14 + movq %r14, %rax + mulq %r12 + movq %rdx, -40(%rsp) ## 8-byte Spill + addq %rbx, %rax + movq -48(%rsp), %rbx ## 8-byte Reload + movq %rax, 16(%rbx) + adcq %r13, %rbp + adcq %r15, %r8 + adcq %rsi, %r10 + adcq %rcx, %r11 + sbbq %rsi, %rsi + movq -72(%rsp), %r12 ## 8-byte Reload + movq 24(%r12), %rcx + movq -96(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -24(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %r14, %rax + mulq %rcx + movq %rdx, %r13 + movq %rax, %rdi + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq -32(%rsp), %rax ## 8-byte Reload + mulq %rcx + andl $1, %esi + addq -40(%rsp), %rbp ## 8-byte Folded Reload + adcq -16(%rsp), %r8 ## 8-byte Folded Reload + adcq -56(%rsp), %r10 ## 8-byte Folded Reload + adcq -88(%rsp), %r11 ## 8-byte Folded Reload + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + addq %rdi, %rbp + movq %rbp, 24(%rbx) + adcq %r15, %r8 + adcq %rax, %r10 + adcq %r14, %r11 + adcq -24(%rsp), %rsi ## 8-byte Folded Reload + sbbq %rcx, %rcx + andl $1, %ecx + addq %r13, %r8 + adcq -8(%rsp), %r10 ## 8-byte Folded Reload + adcq %rdx, %r11 + adcq -64(%rsp), %rsi ## 8-byte Folded Reload + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq 32(%r12), %rdi + movq %rdi, %rax + mulq 32(%r9) + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %rdi, %rax + mulq 24(%r9) + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rdi, %rax + mulq 16(%r9) + movq %rdx, %r14 + movq %rax, %rbx + movq %rdi, %rax + mulq 8(%r9) + movq %rdx, %r12 + movq %rax, %rbp + movq %rdi, %rax + mulq (%r9) + addq %r8, %rax + movq -48(%rsp), %rdi ## 8-byte Reload + movq %rax, 32(%rdi) + adcq %r10, %rbp + adcq %r11, %rbx + adcq %rsi, %r13 + adcq %rcx, %r15 + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rbp + movq %rbp, 40(%rdi) + adcq %r12, %rbx + movq %rbx, 48(%rdi) + adcq %r14, %r13 + movq %r13, 56(%rdi) + adcq -80(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 64(%rdi) + adcq -72(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre5L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre5L: ## @mcl_fpDbl_sqrPre5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 32(%rsi), %r11 + movq (%rsi), %rbp + movq 8(%rsi), %r13 + movq %r11, %rax + mulq %r13 + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 24(%rsi), %rbx + movq %rbx, %rax + mulq %r13 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 16(%rsi), %rcx + movq %rcx, %rax + mulq %r13 + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %r11, %rax + mulq %rbp + movq %rdx, %r8 + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rbp + movq %rdx, %r9 + movq %rax, %r15 + movq %rcx, %rax + mulq %rbp + movq %rdx, %r10 + movq %rax, %r12 + movq %r13, %rax + mulq %r13 + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %r13, %rax + mulq %rbp + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq %rbp + movq %rdi, -24(%rsp) ## 8-byte Spill + movq %rax, (%rdi) + addq %rbx, %rdx + adcq %r13, %r12 + adcq %r15, %r10 + adcq -16(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r8 + addq %rbx, %rdx + movq %rdx, 8(%rdi) + adcq %r14, %r12 + adcq -32(%rsp), %r10 ## 8-byte Folded Reload + adcq -64(%rsp), %r9 ## 8-byte Folded Reload + adcq -56(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + addq %r13, %r12 + adcq -8(%rsp), %r10 ## 8-byte Folded Reload + adcq -48(%rsp), %r9 ## 8-byte Folded Reload + adcq -72(%rsp), %r8 ## 8-byte Folded Reload + adcq -40(%rsp), %rbp ## 8-byte Folded Reload + movq %r11, %rax + mulq %rcx + movq %rax, %r11 + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 24(%rsi), %rbx + movq %rbx, %rax + mulq %rcx + movq %rax, %r14 + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r15 + movq %rdx, -64(%rsp) ## 8-byte Spill + movq (%rsi), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + mulq %rcx + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq %rcx, %rax + mulq %rcx + movq %rax, %r13 + addq %r12, %rdi + movq -24(%rsp), %r12 ## 8-byte Reload + movq %rdi, 16(%r12) + adcq %r10, %r15 + adcq %r9, %r13 + adcq %r8, %r14 + adcq %rbp, %r11 + sbbq %rdi, %rdi + andl $1, %edi + addq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq -64(%rsp), %r13 ## 8-byte Folded Reload + adcq %rdx, %r14 + adcq -72(%rsp), %r11 ## 8-byte Folded Reload + adcq -40(%rsp), %rdi ## 8-byte Folded Reload + movq -56(%rsp), %rax ## 8-byte Reload + mulq %rbx + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq -48(%rsp), %rax ## 8-byte Reload + mulq %rbx + movq %rax, %rbp + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 32(%rsi), %rcx + movq %rcx, %rax + mulq %rbx + movq %rax, %r9 + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 16(%rsi), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulq %rbx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %rbx, %rax + mulq %rbx + movq %rax, %rbx + addq %r15, %rbp + movq %rbp, 24(%r12) + adcq %r13, %r8 + adcq %r14, %r10 + adcq %r11, %rbx + adcq %rdi, %r9 + sbbq %r12, %r12 + andl $1, %r12d + addq -56(%rsp), %r8 ## 8-byte Folded Reload + adcq -72(%rsp), %r10 ## 8-byte Folded Reload + adcq -64(%rsp), %rbx ## 8-byte Folded Reload + adcq %rdx, %r9 + adcq -48(%rsp), %r12 ## 8-byte Folded Reload + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r14 + movq %rax, %rdi + movq %rcx, %rax + mulq (%rsi) + movq %rdx, %r13 + movq %rax, %rsi + movq %rcx, %rax + mulq %rcx + movq %rdx, %r15 + movq %rax, %r11 + movq -40(%rsp), %rax ## 8-byte Reload + mulq %rcx + addq %r8, %rsi + movq -24(%rsp), %r8 ## 8-byte Reload + movq %rsi, 32(%r8) + adcq %r10, %rdi + adcq %rbx, %rax + adcq %r9, %rbp + adcq %r12, %r11 + sbbq %rcx, %rcx + andl $1, %ecx + addq %r13, %rdi + movq %r8, %rsi + movq %rdi, 40(%rsi) + adcq %r14, %rax + movq %rax, 48(%rsi) + adcq %rdx, %rbp + movq %rbp, 56(%rsi) + adcq -72(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 64(%rsi) + adcq %r15, %rcx + movq %rcx, 72(%rsi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont5L + .p2align 4, 0x90 +_mcl_fp_mont5L: ## @mcl_fp_mont5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + pushq %rax + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rdi, (%rsp) ## 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, %r8 + movq %rdx, %r15 + movq 24(%rsi), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r10 + movq %rdx, %rbx + movq 16(%rsi), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r11 + movq %rdx, %r14 + movq (%rsi), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %r12 + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdx, %r9 + addq %rsi, %r9 + adcq %r11, %r12 + adcq %r10, %r14 + adcq %r8, %rbx + movq %rbx, -120(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %r15, -112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %rbp + imulq %rdx, %rbp + movq 32(%rcx), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdx, %r8 + movq 24(%rcx), %rdx + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r13 + movq %rdx, %rsi + movq 16(%rcx), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r11 + movq %rdx, %rbx + movq (%rcx), %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -64(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rcx + movq %rdx, %r10 + movq %rax, %r15 + movq %rbp, %rax + mulq %rdi + movq %rdx, %rcx + addq %r15, %rcx + adcq %r11, %r10 + adcq %r13, %rbx + adcq -8(%rsp), %rsi ## 8-byte Folded Reload + adcq $0, %r8 + addq -128(%rsp), %rax ## 8-byte Folded Reload + adcq %r9, %rcx + adcq %r12, %r10 + adcq %r14, %rbx + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq -112(%rsp), %r8 ## 8-byte Folded Reload + sbbq %r15, %r15 + andl $1, %r15d + movq -96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -104(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r12 + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %r11 + addq %r12, %r11 + adcq -128(%rsp), %r9 ## 8-byte Folded Reload + adcq -120(%rsp), %rbp ## 8-byte Folded Reload + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r13 + addq %rcx, %rdi + adcq %r10, %r11 + adcq %rbx, %r9 + adcq %rsi, %rbp + adcq %r8, %r14 + adcq %r15, %r13 + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdi, %rbx + imulq -72(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r15 + movq %rbx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + addq %r12, %rbx + adcq %r15, %rcx + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + adcq -120(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, %r10 + addq %rdi, %rax + adcq %r11, %rbx + adcq %r9, %rcx + adcq %rbp, %rsi + adcq %r14, %r8 + adcq %r13, %r10 + adcq $0, -112(%rsp) ## 8-byte Folded Spill + movq -96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rbp + movq %rbp, %rax + mulq -104(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r14 + movq %rbp, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %rbp, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rax, %r15 + movq %rdx, %rbp + addq %r12, %rbp + adcq %r14, %rdi + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r13 + addq %rbx, %r15 + adcq %rcx, %rbp + adcq %rsi, %rdi + adcq %r8, %r11 + adcq %r10, %r9 + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r15, %rsi + imulq -72(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rsi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + addq %r8, %r12 + adcq -8(%rsp), %rbx ## 8-byte Folded Reload + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r10 + addq %r15, %rax + adcq %rbp, %r12 + adcq %rdi, %rbx + adcq %r11, %rcx + adcq %r9, %r14 + adcq %r13, %r10 + adcq $0, -112(%rsp) ## 8-byte Folded Spill + movq -96(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rsi + movq %rsi, %rax + mulq -104(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r15 + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r13 + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %rsi + addq %r13, %rsi + adcq %r15, %rdi + adcq -128(%rsp), %rbp ## 8-byte Folded Reload + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r8 + addq %r12, %r11 + adcq %rbx, %rsi + adcq %rcx, %rdi + adcq %r14, %rbp + adcq %r10, %r9 + adcq -112(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r11, %rbx + imulq -72(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r14 + movq %rbx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + addq %r12, %rbx + adcq %r14, %rcx + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %r13 + addq %r11, %rax + adcq %rsi, %rbx + adcq %rdi, %rcx + adcq %rbp, %r15 + adcq %r9, %r10 + adcq %r8, %r13 + movq -112(%rsp), %r8 ## 8-byte Reload + adcq $0, %r8 + movq -96(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rsi + movq %rsi, %rax + mulq -104(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rdi + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %rbp + addq %rdi, %rbp + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %r11 ## 8-byte Folded Reload + adcq -104(%rsp), %r9 ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbx, %r14 + adcq %rcx, %rbp + adcq %r15, %r12 + adcq %r10, %r11 + adcq %r13, %r9 + adcq %r8, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rcx, %rcx + movq -72(%rsp), %rdi ## 8-byte Reload + imulq %r14, %rdi + movq %rdi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rdi, %rax + movq %rdi, %r15 + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r10 + movq %r15, %rax + movq -16(%rsp), %r15 ## 8-byte Reload + mulq %r15 + addq %r10, %rdx + adcq %r13, %rdi + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + adcq -72(%rsp), %rbx ## 8-byte Folded Reload + adcq $0, %r8 + andl $1, %ecx + addq %r14, %rax + adcq %rbp, %rdx + adcq %r12, %rdi + adcq %r11, %rsi + adcq %r9, %rbx + adcq -96(%rsp), %r8 ## 8-byte Folded Reload + adcq $0, %rcx + movq %rdx, %rax + subq %r15, %rax + movq %rdi, %rbp + sbbq -64(%rsp), %rbp ## 8-byte Folded Reload + movq %rsi, %r9 + sbbq -56(%rsp), %r9 ## 8-byte Folded Reload + movq %rbx, %r10 + sbbq -48(%rsp), %r10 ## 8-byte Folded Reload + movq %r8, %r11 + sbbq -40(%rsp), %r11 ## 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rbx, %r10 + testb %cl, %cl + cmovneq %rdx, %rax + movq (%rsp), %rcx ## 8-byte Reload + movq %rax, (%rcx) + cmovneq %rdi, %rbp + movq %rbp, 8(%rcx) + cmovneq %rsi, %r9 + movq %r9, 16(%rcx) + movq %r10, 24(%rcx) + cmovneq %r8, %r11 + movq %r11, 32(%rcx) + addq $8, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF5L + .p2align 4, 0x90 +_mcl_fp_montNF5L: ## @mcl_fp_montNF5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + movq (%rdx), %rbp + mulq %rbp + movq %rax, %r8 + movq %rdx, %r13 + movq 24(%rsi), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulq %rbp + movq %rax, %r10 + movq %rdx, %r11 + movq 16(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rbp + movq %rax, %r15 + movq %rdx, %r9 + movq (%rsi), %rdi + movq %rdi, -48(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulq %rbp + movq %rdx, %r12 + movq %rax, %rbx + movq %rdi, %rax + mulq %rbp + movq %rax, %r14 + movq %rdx, %rbp + addq %rbx, %rbp + adcq %r15, %r12 + adcq %r10, %r9 + adcq %r8, %r11 + adcq $0, %r13 + movq -8(%rcx), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r14, %rsi + imulq %rax, %rsi + movq 32(%rcx), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r10 + movq %rdx, -120(%rsp) ## 8-byte Spill + movq 24(%rcx), %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %rbx + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 16(%rcx), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rdx + movq %rax, %r8 + movq %rdx, -16(%rsp) ## 8-byte Spill + movq (%rcx), %rdi + movq %rdi, -40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rcx + movq %rdx, %r15 + movq %rax, %rcx + movq %rsi, %rax + mulq %rdi + addq %r14, %rax + adcq %rbp, %rcx + adcq %r12, %r8 + adcq %r9, %rbx + adcq %r11, %r10 + adcq $0, %r13 + addq %rdx, %rcx + adcq %r15, %r8 + adcq -16(%rsp), %rbx ## 8-byte Folded Reload + adcq -128(%rsp), %r10 ## 8-byte Folded Reload + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rsi + movq %rsi, %rax + mulq -112(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rdi + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r14 + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rax, %rsi + movq %rdx, %r15 + addq %r14, %r15 + adcq %rdi, %r11 + adcq -128(%rsp), %r9 ## 8-byte Folded Reload + adcq -120(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, %r12 + addq %rcx, %rsi + adcq %r8, %r15 + adcq %rbx, %r11 + adcq %r10, %r9 + adcq %r13, %rbp + adcq $0, %r12 + movq %rsi, %rdi + imulq -88(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rdi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rdi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + addq %rsi, %rax + adcq %r15, %r10 + adcq %r11, %r14 + adcq %r9, %r8 + adcq %rbp, %r13 + adcq $0, %r12 + addq %rdx, %r10 + adcq %rbx, %r14 + adcq %rcx, %r8 + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rsi + movq %rsi, %rax + mulq -112(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rbx + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %rbp + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %rsi + addq %rbp, %rsi + adcq %rbx, %rcx + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r15 + addq %r10, %r11 + adcq %r14, %rsi + adcq %r8, %rcx + adcq %r13, %rdi + adcq %r12, %r9 + adcq $0, %r15 + movq %r11, %rbx + imulq -88(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %rbx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r10 + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbp + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + addq %r11, %rax + adcq %rsi, %rbp + adcq %rcx, %r10 + adcq %rdi, %r8 + adcq %r9, %r13 + adcq $0, %r15 + addq %rdx, %rbp + adcq %r12, %r10 + adcq %r14, %r8 + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rsi + movq %rsi, %rax + mulq -112(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rbx + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %rsi + addq %r12, %rsi + adcq %rbx, %rcx + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r11 + addq %rbp, %r14 + adcq %r10, %rsi + adcq %r8, %rcx + adcq %r13, %rdi + adcq %r15, %r9 + adcq $0, %r11 + movq %r14, %rbx + imulq -88(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %rbx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %rbx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r10 + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbp + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + addq %r14, %rax + adcq %rsi, %rbp + adcq %rcx, %r10 + adcq %rdi, %r8 + adcq %r9, %r13 + adcq $0, %r11 + addq %rdx, %rbp + adcq %r12, %r10 + adcq %r15, %r8 + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %r11 ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rcx + movq %rcx, %rax + mulq -112(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -96(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %rsi + movq %rcx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %rdi + addq %rsi, %rdi + adcq -96(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq -104(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %rbx + addq %rbp, %r12 + adcq %r10, %rdi + adcq %r8, %r15 + adcq %r13, %r14 + adcq %r11, %r9 + adcq $0, %rbx + movq -88(%rsp), %r8 ## 8-byte Reload + imulq %r12, %r8 + movq %r8, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq %r8, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %r8, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq %r8, %rax + movq %r8, %r13 + movq -40(%rsp), %r10 ## 8-byte Reload + mulq %r10 + movq %rdx, %r11 + movq %rax, %r8 + movq %r13, %rax + movq -24(%rsp), %r13 ## 8-byte Reload + mulq %r13 + addq %r12, %r8 + adcq %rdi, %rax + adcq %r15, %rsi + adcq %r14, %rbp + adcq %r9, %rcx + adcq $0, %rbx + addq %r11, %rax + adcq %rdx, %rsi + adcq -112(%rsp), %rbp ## 8-byte Folded Reload + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + adcq -88(%rsp), %rbx ## 8-byte Folded Reload + movq %rax, %r11 + subq %r10, %r11 + movq %rsi, %r10 + sbbq %r13, %r10 + movq %rbp, %r8 + sbbq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %rcx, %r9 + sbbq -72(%rsp), %r9 ## 8-byte Folded Reload + movq %rbx, %rdx + sbbq -64(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, %rdi + sarq $63, %rdi + cmovsq %rax, %r11 + movq -8(%rsp), %rax ## 8-byte Reload + movq %r11, (%rax) + cmovsq %rsi, %r10 + movq %r10, 8(%rax) + cmovsq %rbp, %r8 + movq %r8, 16(%rax) + cmovsq %rcx, %r9 + movq %r9, 24(%rax) + cmovsq %rbx, %rdx + movq %rdx, 32(%rax) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed5L + .p2align 4, 0x90 +_mcl_fp_montRed5L: ## @mcl_fp_montRed5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rdi, -8(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq (%rsi), %r9 + movq %r9, %rdi + imulq %rax, %rdi + movq 32(%rcx), %rdx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r8 + movq %rdx, %r13 + movq 24(%rcx), %rdx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r11 + movq %rdx, %r10 + movq 16(%rcx), %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, %r15 + movq (%rcx), %rbp + movq %rbp, -40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rdx, %r12 + movq %rax, %rbx + movq %rdi, %rax + mulq %rbp + movq %rdx, %rcx + addq %rbx, %rcx + adcq %r14, %r12 + adcq %r11, %r15 + adcq %r8, %r10 + adcq $0, %r13 + addq %r9, %rax + movq 72(%rsi), %rax + movq 64(%rsi), %rdx + adcq 8(%rsi), %rcx + adcq 16(%rsi), %r12 + adcq 24(%rsi), %r15 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %r13 + movq %r13, -112(%rsp) ## 8-byte Spill + movq 56(%rsi), %rdi + movq 48(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -24(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -64(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -48(%rsp) ## 8-byte Spill + sbbq %r8, %r8 + andl $1, %r8d + movq %rcx, %rsi + movq -104(%rsp), %r9 ## 8-byte Reload + imulq %r9, %rsi + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rdi + movq %rsi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %rbp + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + addq %rbp, %rsi + adcq %rdi, %rbx + adcq -16(%rsp), %r13 ## 8-byte Folded Reload + adcq -32(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r11 + addq %rcx, %rax + adcq %r12, %rsi + adcq %r15, %rbx + adcq %r10, %r13 + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq -24(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %r8 + movq %rsi, %rcx + imulq %r9, %rcx + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -24(%rsp) ## 8-byte Spill + movq %rcx, %rax + movq -56(%rsp), %r9 ## 8-byte Reload + mulq %r9 + movq %rdx, %r15 + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rdi + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + addq %rdi, %rcx + adcq -32(%rsp), %r12 ## 8-byte Folded Reload + adcq -24(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %rbp + addq %rsi, %rax + adcq %rbx, %rcx + adcq %r13, %r12 + adcq %r14, %r15 + adcq %r11, %r10 + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %r8 + movq %rcx, %rsi + imulq -104(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %r9 + movq %rdx, %r13 + movq %rax, %rbx + movq %rsi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rdi + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + addq %rdi, %rsi + adcq %rbx, %r9 + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + adcq -64(%rsp), %r14 ## 8-byte Folded Reload + adcq $0, %r11 + addq %rcx, %rax + adcq %r12, %rsi + adcq %r15, %r9 + adcq %r10, %r13 + adcq %rbp, %r14 + adcq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, -48(%rsp) ## 8-byte Folded Spill + adcq $0, %r8 + movq -104(%rsp), %rdi ## 8-byte Reload + imulq %rsi, %rdi + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r15 + movq %rdi, %rax + movq %rdi, %r10 + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %r10, %rax + movq -40(%rsp), %r10 ## 8-byte Reload + mulq %r10 + addq %r12, %rdx + adcq %r15, %rdi + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + adcq $0, %rbp + addq %rsi, %rax + adcq %r9, %rdx + adcq %r13, %rdi + adcq %r14, %rbx + adcq %r11, %rcx + adcq -48(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, %r8 + movq %rdx, %rax + subq %r10, %rax + movq %rdi, %rsi + sbbq -72(%rsp), %rsi ## 8-byte Folded Reload + movq %rbx, %r9 + sbbq -56(%rsp), %r9 ## 8-byte Folded Reload + movq %rcx, %r10 + sbbq -88(%rsp), %r10 ## 8-byte Folded Reload + movq %rbp, %r11 + sbbq -80(%rsp), %r11 ## 8-byte Folded Reload + sbbq $0, %r8 + andl $1, %r8d + cmovneq %rbp, %r11 + testb %r8b, %r8b + cmovneq %rdx, %rax + movq -8(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rdi, %rsi + movq %rsi, 8(%rdx) + cmovneq %rbx, %r9 + movq %r9, 16(%rdx) + cmovneq %rcx, %r10 + movq %r10, 24(%rdx) + movq %r11, 32(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre5L + .p2align 4, 0x90 +_mcl_fp_addPre5L: ## @mcl_fp_addPre5L +## BB#0: + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq 16(%rdx), %rcx + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rcx + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %rcx, 16(%rdi) + adcq %r9, %r11 + movq %r11, 24(%rdi) + adcq %r8, %r10 + movq %r10, 32(%rdi) + sbbq %rax, %rax + andl $1, %eax + retq + + .globl _mcl_fp_subPre5L + .p2align 4, 0x90 +_mcl_fp_subPre5L: ## @mcl_fp_subPre5L +## BB#0: + pushq %rbx + movq 32(%rsi), %r10 + movq 24(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r8, %r11 + movq %r11, 24(%rdi) + sbbq %r9, %r10 + movq %r10, 32(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + retq + + .globl _mcl_fp_shr1_5L + .p2align 4, 0x90 +_mcl_fp_shr1_5L: ## @mcl_fp_shr1_5L +## BB#0: + movq 32(%rsi), %r8 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r8, %rcx + movq %rcx, 24(%rdi) + shrq %r8 + movq %r8, 32(%rdi) + retq + + .globl _mcl_fp_add5L + .p2align 4, 0x90 +_mcl_fp_add5L: ## @mcl_fp_add5L +## BB#0: + pushq %rbx + movq 32(%rdx), %r11 + movq 24(%rdx), %rbx + movq 24(%rsi), %r9 + movq 32(%rsi), %r8 + movq 16(%rdx), %r10 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r10 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %rbx, %r9 + movq %r9, 24(%rdi) + adcq %r11, %r8 + movq %r8, 32(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %r9 + sbbq 32(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB74_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %r9, 24(%rdi) + movq %r8, 32(%rdi) +LBB74_2: ## %carry + popq %rbx + retq + + .globl _mcl_fp_addNF5L + .p2align 4, 0x90 +_mcl_fp_addNF5L: ## @mcl_fp_addNF5L +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 32(%rdx), %r8 + movq 24(%rdx), %r9 + movq 16(%rdx), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %r11 + addq (%rsi), %r14 + adcq 8(%rsi), %r11 + adcq 16(%rsi), %r10 + adcq 24(%rsi), %r9 + adcq 32(%rsi), %r8 + movq %r14, %rsi + subq (%rcx), %rsi + movq %r11, %rdx + sbbq 8(%rcx), %rdx + movq %r10, %rbx + sbbq 16(%rcx), %rbx + movq %r9, %r15 + sbbq 24(%rcx), %r15 + movq %r8, %rax + sbbq 32(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r14, %rsi + movq %rsi, (%rdi) + cmovsq %r11, %rdx + movq %rdx, 8(%rdi) + cmovsq %r10, %rbx + movq %rbx, 16(%rdi) + cmovsq %r9, %r15 + movq %r15, 24(%rdi) + cmovsq %r8, %rax + movq %rax, 32(%rdi) + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_sub5L + .p2align 4, 0x90 +_mcl_fp_sub5L: ## @mcl_fp_sub5L +## BB#0: + pushq %r14 + pushq %rbx + movq 32(%rsi), %r8 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq 24(%rsi), %r9 + movq 16(%rsi), %r10 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + sbbq 16(%rdx), %r10 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r10, 16(%rdi) + sbbq %r11, %r9 + movq %r9, 24(%rdi) + sbbq %r14, %r8 + movq %r8, 32(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB76_2 +## BB#1: ## %carry + movq 32(%rcx), %r11 + movq 24(%rcx), %r14 + movq 8(%rcx), %rdx + movq 16(%rcx), %rbx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rdx + movq %rdx, 8(%rdi) + adcq %r10, %rbx + movq %rbx, 16(%rdi) + adcq %r9, %r14 + movq %r14, 24(%rdi) + adcq %r8, %r11 + movq %r11, 32(%rdi) +LBB76_2: ## %nocarry + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_subNF5L + .p2align 4, 0x90 +_mcl_fp_subNF5L: ## @mcl_fp_subNF5L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 32(%rsi), %r13 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r10 + movdqu (%rsi), %xmm2 + movdqu 16(%rsi), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r8 + movd %xmm1, %r11 + movd %xmm3, %r9 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %r14 + pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] + movd %xmm1, %r15 + movd %xmm0, %rbx + movd %xmm2, %r12 + subq %rbx, %r12 + sbbq %r14, %r15 + sbbq %r11, %r9 + sbbq %r10, %r8 + sbbq 32(%rdx), %r13 + movq %r13, %rdx + sarq $63, %rdx + movq %rdx, %rbx + shldq $1, %r13, %rbx + movq 8(%rcx), %rsi + andq %rbx, %rsi + andq (%rcx), %rbx + movq 32(%rcx), %r10 + andq %rdx, %r10 + movq 24(%rcx), %rax + andq %rdx, %rax + rolq %rdx + andq 16(%rcx), %rdx + addq %r12, %rbx + movq %rbx, (%rdi) + adcq %r15, %rsi + movq %rsi, 8(%rdi) + adcq %r9, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %rax + movq %rax, 24(%rdi) + adcq %r13, %r10 + movq %r10, 32(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fpDbl_add5L + .p2align 4, 0x90 +_mcl_fpDbl_add5L: ## @mcl_fpDbl_add5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 64(%rdx), %r11 + movq 56(%rdx), %r14 + movq 48(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 40(%rdx), %r9 + movq %rbx, (%rdi) + movq 72(%rsi), %r8 + movq %rax, 8(%rdi) + movq 64(%rsi), %r10 + movq %r12, 16(%rdi) + movq 56(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 48(%rsi), %rbp + movq 40(%rsi), %rbx + movq %r13, 32(%rdi) + adcq %r9, %rbx + adcq %r15, %rbp + adcq %r14, %r12 + adcq %r11, %r10 + adcq -8(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + movq %rbx, %rax + subq (%rcx), %rax + movq %rbp, %rdx + sbbq 8(%rcx), %rdx + movq %r12, %r9 + sbbq 16(%rcx), %r9 + movq %r10, %r11 + sbbq 24(%rcx), %r11 + movq %r8, %r14 + sbbq 32(%rcx), %r14 + sbbq $0, %rsi + andl $1, %esi + cmovneq %rbx, %rax + movq %rax, 40(%rdi) + testb %sil, %sil + cmovneq %rbp, %rdx + movq %rdx, 48(%rdi) + cmovneq %r12, %r9 + movq %r9, 56(%rdi) + cmovneq %r10, %r11 + movq %r11, 64(%rdi) + cmovneq %r8, %r14 + movq %r14, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub5L + .p2align 4, 0x90 +_mcl_fpDbl_sub5L: ## @mcl_fpDbl_sub5L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 72(%rdx), %r9 + movq 64(%rdx), %r10 + movq 56(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %r12 + sbbq 24(%rdx), %r12 + movq %r15, (%rdi) + movq 32(%rsi), %rbx + sbbq 32(%rdx), %rbx + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 72(%rsi), %r8 + movq %r12, 24(%rdi) + movq 64(%rsi), %r11 + movq %rbx, 32(%rdi) + movq 40(%rsi), %rbp + sbbq %rdx, %rbp + movq 56(%rsi), %r12 + movq 48(%rsi), %r13 + sbbq %r15, %r13 + sbbq %r14, %r12 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %edx + sbbq $0, %rdx + andl $1, %edx + movq (%rcx), %rsi + cmoveq %rax, %rsi + testb %dl, %dl + movq 16(%rcx), %rdx + cmoveq %rax, %rdx + movq 8(%rcx), %rbx + cmoveq %rax, %rbx + movq 32(%rcx), %r9 + cmoveq %rax, %r9 + cmovneq 24(%rcx), %rax + addq %rbp, %rsi + movq %rsi, 40(%rdi) + adcq %r13, %rbx + movq %rbx, 48(%rdi) + adcq %r12, %rdx + movq %rdx, 56(%rdi) + adcq %r11, %rax + movq %rax, 64(%rdi) + adcq %r8, %r9 + movq %r9, 72(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre6L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre6L: ## @mcl_fp_mulUnitPre6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r9 + movq %rax, %r8 + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r13 + movq %rax, %r12 + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %rbx + movq %rax, %rbp + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %rbp, %rdx + movq %rdx, 8(%rdi) + adcq %r12, %rbx + movq %rbx, 16(%rdi) + adcq %r14, %r13 + movq %r13, 24(%rdi) + adcq %r11, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r10 + movq %r10, 40(%rdi) + adcq $0, %r9 + movq %r9, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_mulPre6L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre6L: ## @mcl_fpDbl_mulPre6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rsi, %r12 + movq %rdi, -16(%rsp) ## 8-byte Spill + movq (%r12), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + movq (%rdx), %rsi + mulq %rsi + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 24(%r12), %rbp + movq %rbp, -104(%rsp) ## 8-byte Spill + movq 32(%r12), %rbx + movq 40(%r12), %r11 + movq %rax, (%rdi) + movq %r11, %rax + mulq %rsi + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rsi + movq %rdx, %rcx + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rsi + movq %rax, %r9 + movq %rdx, %rdi + movq 16(%r12), %r8 + movq %r8, %rax + mulq %rsi + movq %rax, %r14 + movq %rdx, %rbp + movq 8(%r12), %r10 + movq %r10, %rax + mulq %rsi + movq %rdx, %r15 + movq %rax, %r13 + addq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq %r14, %r15 + adcq %r9, %rbp + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + movq -120(%rsp), %rsi ## 8-byte Reload + adcq $0, %rsi + movq -64(%rsp), %r9 ## 8-byte Reload + movq 8(%r9), %rcx + movq %r11, %rax + mulq %rcx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq -104(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %r8, %rax + mulq %rcx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %r10, %rax + mulq %rcx + movq %rdx, %r10 + movq %rax, %rbx + movq -72(%rsp), %rax ## 8-byte Reload + mulq %rcx + addq %r13, %rax + movq -16(%rsp), %r13 ## 8-byte Reload + movq %rax, 8(%r13) + adcq %r15, %rbx + adcq %rbp, %r8 + adcq %rdi, %r14 + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq %rsi, %rax + sbbq %rsi, %rsi + andl $1, %esi + addq %rdx, %rbx + adcq %r10, %r8 + adcq -80(%rsp), %r14 ## 8-byte Folded Reload + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + movq 40(%r12), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq 16(%r9), %rcx + mulq %rcx + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdx, -112(%rsp) ## 8-byte Spill + movq 32(%r12), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r10 + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 24(%r12), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r9 + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 16(%r12), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %rbp + movq %rdx, -56(%rsp) ## 8-byte Spill + movq 8(%r12), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %rdi + movq %rdx, %r15 + movq (%r12), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulq %rcx + addq %rbx, %rax + movq %rax, 16(%r13) + adcq %r8, %rdi + adcq %r14, %rbp + adcq %r11, %r9 + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + movq -72(%rsp), %rax ## 8-byte Reload + adcq %rsi, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %rdi + adcq %r15, %rbp + adcq -56(%rsp), %r9 ## 8-byte Folded Reload + adcq -48(%rsp), %r10 ## 8-byte Folded Reload + adcq -40(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -72(%rsp) ## 8-byte Spill + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + movq -64(%rsp), %rbx ## 8-byte Reload + movq 24(%rbx), %rsi + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rsi + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq -96(%rsp), %rax ## 8-byte Reload + mulq %rsi + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq -104(%rsp), %rax ## 8-byte Reload + mulq %rsi + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -80(%rsp), %rax ## 8-byte Reload + mulq %rsi + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq -32(%rsp), %rax ## 8-byte Reload + mulq %rsi + movq %rdx, %r8 + movq %rax, %r11 + movq -24(%rsp), %rax ## 8-byte Reload + mulq %rsi + addq %rdi, %rax + movq -16(%rsp), %rsi ## 8-byte Reload + movq %rax, 24(%rsi) + adcq %rbp, %r11 + adcq %r9, %r13 + adcq %r10, %r15 + adcq -72(%rsp), %r14 ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq %rcx, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %r11 + adcq %r8, %r13 + adcq -112(%rsp), %r15 ## 8-byte Folded Reload + adcq -104(%rsp), %r14 ## 8-byte Folded Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + movq 40(%r12), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq 32(%rbx), %rdi + mulq %rdi + movq %rax, %r9 + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 32(%r12), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r10 + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 24(%r12), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r8 + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 16(%r12), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %rbx + movq %rdx, -104(%rsp) ## 8-byte Spill + movq (%r12), %rbp + movq 8(%r12), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %rbp, %rax + mulq %rdi + movq %rdx, -48(%rsp) ## 8-byte Spill + addq %r11, %rax + movq %rax, 32(%rsi) + adcq %r13, %r12 + adcq %r15, %rbx + adcq %r14, %r8 + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq %rcx, %r9 + movq -64(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rcx + sbbq %rsi, %rsi + movq -80(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, -24(%rsp) ## 8-byte Spill + movq -56(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -8(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rbp, %rax + mulq %rcx + movq %rdx, %rbp + movq %rax, %rdi + movq -32(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, %r13 + movq %rax, %r14 + movq -40(%rsp), %rax ## 8-byte Reload + mulq %rcx + andl $1, %esi + addq -48(%rsp), %r12 ## 8-byte Folded Reload + adcq -112(%rsp), %rbx ## 8-byte Folded Reload + adcq -104(%rsp), %r8 ## 8-byte Folded Reload + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq -88(%rsp), %r9 ## 8-byte Folded Reload + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + addq %rdi, %r12 + movq -16(%rsp), %rcx ## 8-byte Reload + movq %r12, 40(%rcx) + adcq %r11, %rbx + adcq %rax, %r8 + adcq %r14, %r10 + adcq %r15, %r9 + adcq -24(%rsp), %rsi ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + addq %rbp, %rbx + movq %rbx, 48(%rcx) + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 56(%rcx) + adcq %rdx, %r10 + movq %r10, 64(%rcx) + adcq %r13, %r9 + movq %r9, 72(%rcx) + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 80(%rcx) + adcq -64(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 88(%rcx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre6L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre6L: ## @mcl_fpDbl_sqrPre6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, -48(%rsp) ## 8-byte Spill + movq 16(%rsi), %r8 + movq %r8, -120(%rsp) ## 8-byte Spill + movq 24(%rsi), %r11 + movq %r11, -112(%rsp) ## 8-byte Spill + movq 32(%rsi), %r12 + movq 40(%rsi), %r9 + movq (%rsi), %rcx + movq %rcx, %rax + mulq %rcx + movq %rdx, %rbp + movq %rax, (%rdi) + movq %r9, %rax + mulq %rcx + movq %rdx, %rbx + movq %rax, -128(%rsp) ## 8-byte Spill + movq %r12, %rax + mulq %rcx + movq %rdx, %r10 + movq %rax, %r13 + movq %r11, %rax + mulq %rcx + movq %rdx, %rdi + movq %rax, %r15 + movq %r8, %rax + mulq %rcx + movq %rax, %r11 + movq %rdx, %r14 + movq 8(%rsi), %r8 + movq %r8, %rax + mulq %rcx + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, %rcx + addq %rcx, %rbp + adcq %rdx, %r11 + adcq %r15, %r14 + adcq %r13, %rdi + adcq -128(%rsp), %r10 ## 8-byte Folded Reload + adcq $0, %rbx + movq %rbx, -72(%rsp) ## 8-byte Spill + movq %r9, %rax + mulq %r8 + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %r12, %rax + mulq %r8 + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq -112(%rsp), %rax ## 8-byte Reload + mulq %r8 + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -120(%rsp), %rax ## 8-byte Reload + mulq %r8 + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %r8, %rax + mulq %r8 + movq %rax, %rbx + addq %rcx, %rbp + movq -48(%rsp), %rax ## 8-byte Reload + movq %rbp, 8(%rax) + adcq %r11, %rbx + adcq %r14, %r12 + adcq %rdi, %r15 + adcq %r10, %r9 + movq %r13, %rax + adcq -72(%rsp), %rax ## 8-byte Folded Reload + sbbq %r13, %r13 + andl $1, %r13d + addq -56(%rsp), %rbx ## 8-byte Folded Reload + adcq %rdx, %r12 + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + adcq -64(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -88(%rsp) ## 8-byte Spill + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + movq 40(%rsi), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + movq 16(%rsi), %rdi + mulq %rdi + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdx, -112(%rsp) ## 8-byte Spill + movq 32(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r11 + movq %rdx, -80(%rsp) ## 8-byte Spill + movq 24(%rsi), %rbp + movq %rbp, %rax + mulq %rdi + movq %rax, %r8 + movq %r8, -24(%rsp) ## 8-byte Spill + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, -120(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r10 + movq %rdx, -32(%rsp) ## 8-byte Spill + movq (%rsi), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rdi, %rax + mulq %rdi + movq %rax, %rcx + addq %rbx, %r14 + movq -48(%rsp), %rax ## 8-byte Reload + movq %r14, 16(%rax) + adcq %r12, %r10 + adcq %r15, %rcx + adcq %r8, %r9 + adcq -88(%rsp), %r11 ## 8-byte Folded Reload + movq -96(%rsp), %r8 ## 8-byte Reload + adcq %r13, %r8 + sbbq %rdi, %rdi + andl $1, %edi + addq -104(%rsp), %r10 ## 8-byte Folded Reload + adcq -32(%rsp), %rcx ## 8-byte Folded Reload + adcq %rdx, %r9 + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + movq -56(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -120(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq -72(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %rbp, %rax + mulq %rbp + movq %rax, %r13 + movq %rdx, -104(%rsp) ## 8-byte Spill + addq %r10, %rbx + movq -48(%rsp), %rax ## 8-byte Reload + movq %rbx, 24(%rax) + adcq %rcx, %r14 + adcq -24(%rsp), %r9 ## 8-byte Folded Reload + adcq %r11, %r13 + adcq %r8, %r15 + adcq %rdi, %r12 + sbbq %rcx, %rcx + movq 8(%rsi), %rbp + movq 40(%rsi), %rbx + movq %rbp, %rax + mulq %rbx + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdx, -56(%rsp) ## 8-byte Spill + movq (%rsi), %rdi + movq %rdi, %rax + mulq %rbx + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdx, -64(%rsp) ## 8-byte Spill + movq 32(%rsi), %r10 + movq %rbp, %rax + mulq %r10 + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %r10 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdx, -24(%rsp) ## 8-byte Spill + andl $1, %ecx + addq -40(%rsp), %r14 ## 8-byte Folded Reload + adcq -96(%rsp), %r9 ## 8-byte Folded Reload + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + movq 24(%rsi), %rdi + movq %rdi, %rax + mulq %rbx + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %r10 + movq %rax, %rbp + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 16(%rsi), %rsi + movq %rsi, %rax + mulq %rbx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %r10 + movq %rdx, %r11 + movq %rax, %rsi + movq %rbx, %rax + mulq %r10 + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq %rbx, %rax + mulq %rbx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %r10, %rax + mulq %r10 + movq %rdx, %r8 + addq -8(%rsp), %r14 ## 8-byte Folded Reload + movq -48(%rsp), %rdx ## 8-byte Reload + movq %r14, 32(%rdx) + adcq -32(%rsp), %r9 ## 8-byte Folded Reload + adcq %r13, %rsi + adcq %r15, %rbp + adcq %r12, %rax + adcq %rdi, %rcx + sbbq %r10, %r10 + andl $1, %r10d + addq -24(%rsp), %r9 ## 8-byte Folded Reload + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq %r11, %rbp + adcq -40(%rsp), %rax ## 8-byte Folded Reload + adcq %r8, %rcx + movq -16(%rsp), %r8 ## 8-byte Reload + adcq %r8, %r10 + addq -72(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 40(%rdx) + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %rbp ## 8-byte Folded Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + adcq %rdi, %rcx + adcq %rbx, %r10 + sbbq %rdi, %rdi + andl $1, %edi + addq -64(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 48(%rdx) + adcq -56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 56(%rdx) + adcq -80(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 64(%rdx) + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 72(%rdx) + adcq %r8, %r10 + movq %r10, 80(%rdx) + adcq -88(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 88(%rdx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont6L + .p2align 4, 0x90 +_mcl_fp_mont6L: ## @mcl_fp_mont6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $48, %rsp + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rdi, 40(%rsp) ## 8-byte Spill + movq 40(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, %r10 + movq %rdx, %r11 + movq 32(%rsi), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r14 + movq %rdx, %r15 + movq 24(%rsi), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r8 + movq %rdx, %rbx + movq 16(%rsi), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r9 + movq %rdx, %r12 + movq (%rsi), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %r13 + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdx, %rdi + addq %rsi, %rdi + adcq %r9, %r13 + adcq %r8, %r12 + adcq %r14, %rbx + movq %rbx, -88(%rsp) ## 8-byte Spill + adcq %r10, %r15 + movq %r15, -120(%rsp) ## 8-byte Spill + adcq $0, %r11 + movq %r11, -112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, %rbx + imulq %rdx, %rbx + movq 40(%rcx), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r9 + movq %rdx, %r14 + movq 24(%rcx), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r8 + movq %rdx, %r15 + movq 16(%rcx), %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r10 + movq %rdx, %r11 + movq (%rcx), %rsi + movq %rsi, -24(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -16(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rdx, %rbp + movq %rax, %rcx + movq %rbx, %rax + mulq %rsi + movq %rdx, %rbx + addq %rcx, %rbx + adcq %r10, %rbp + adcq %r8, %r11 + adcq %r9, %r15 + adcq -104(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq -96(%rsp), %rax ## 8-byte Folded Reload + adcq %rdi, %rbx + adcq %r13, %rbp + adcq %r12, %r11 + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + sbbq %rsi, %rsi + andl $1, %esi + movq -56(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r9 + movq %rdi, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r10 + movq %rdi, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %rdi + addq %r10, %rdi + adcq %r9, %rcx + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -96(%rsp), %r8 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq -88(%rsp), %rdx ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbx, %r12 + adcq %rbp, %rdi + adcq %r11, %rcx + adcq %r15, %r13 + adcq %r14, %r8 + adcq -128(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq %rsi, %rax + movq %rax, -112(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r12, %rbx + imulq -32(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r11 + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + addq %r11, %r9 + adcq %r10, %rbp + adcq -48(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r12, %rax + adcq %rdi, %r9 + adcq %rcx, %rbp + adcq %r13, %rsi + adcq %r8, %r15 + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rcx + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rdi + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r8 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rcx + addq %r10, %rcx + adcq %r8, %rbx + adcq %rdi, %r12 + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r9, %r13 + adcq %rbp, %rcx + adcq %rsi, %rbx + adcq %r15, %r12 + adcq %r14, %r11 + adcq -128(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r13, %rdi + imulq -32(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r15 + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r10 + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + addq %r10, %r8 + adcq %r15, %rbp + adcq -48(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %r9 ## 8-byte Folded Reload + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r13, %rax + adcq %rcx, %r8 + adcq %rbx, %rbp + adcq %r12, %rsi + adcq %r11, %r9 + adcq -120(%rsp), %r14 ## 8-byte Folded Reload + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rcx + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rdi + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r10 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r12 + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rax, %r13 + movq %rdx, %rcx + addq %r12, %rcx + adcq %r10, %rbx + adcq %rdi, %r15 + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r8, %r13 + adcq %rbp, %rcx + adcq %rsi, %rbx + adcq %r9, %r15 + adcq %r14, %r11 + adcq -128(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r13, %rsi + imulq -32(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r10 + movq %rsi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r8 + movq %rsi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r9 + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + addq %r9, %rsi + adcq %r8, %r12 + adcq %r10, %r14 + adcq -104(%rsp), %rdi ## 8-byte Folded Reload + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r13, %rax + adcq %rcx, %rsi + adcq %rbx, %r12 + adcq %r15, %r14 + adcq %r11, %rdi + adcq -120(%rsp), %rbp ## 8-byte Folded Reload + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rcx + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r11 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r9 + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %r13 + addq %r9, %r13 + adcq %r11, %r15 + adcq -48(%rsp), %r10 ## 8-byte Folded Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + movq -120(%rsp), %rcx ## 8-byte Reload + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rsi, %r8 + adcq %r12, %r13 + adcq %r14, %r15 + adcq %rdi, %r10 + adcq %rbp, %rbx + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -88(%rsp) ## 8-byte Spill + movq %r8, %rcx + imulq -32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %rdi + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r12 + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + addq %r12, %r14 + adcq %rdi, %rbp + adcq -48(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + adcq -96(%rsp), %r9 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r8, %rax + adcq %r13, %r14 + adcq %r15, %rbp + adcq %r10, %rsi + adcq %rbx, %r11 + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -120(%rsp) ## 8-byte Spill + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq -88(%rsp), %rdi ## 8-byte Reload + adcq $0, %rdi + movq -56(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -72(%rsp) ## 8-byte Folded Reload + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbx + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r9 + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rax, %r15 + movq %rdx, %r8 + addq %r9, %r8 + adcq %rbx, %r10 + adcq -80(%rsp), %r13 ## 8-byte Folded Reload + adcq -72(%rsp), %r12 ## 8-byte Folded Reload + movq -64(%rsp), %rax ## 8-byte Reload + adcq -112(%rsp), %rax ## 8-byte Folded Reload + movq -56(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r14, %r15 + adcq %rbp, %r8 + adcq %rsi, %r10 + adcq %r11, %r13 + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, -72(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -64(%rsp) ## 8-byte Spill + adcq %rdi, %rdx + movq %rdx, -56(%rsp) ## 8-byte Spill + sbbq %rcx, %rcx + movq -32(%rsp), %rdi ## 8-byte Reload + imulq %r15, %rdi + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -40(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %r9 + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r11 + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + andl $1, %ecx + addq %r14, %rax + adcq %r11, %rdx + adcq -40(%rsp), %rbx ## 8-byte Folded Reload + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + adcq -32(%rsp), %r12 ## 8-byte Folded Reload + adcq $0, %rbp + addq %r15, %r9 + adcq %r8, %rax + adcq %r10, %rdx + adcq %r13, %rbx + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + adcq -64(%rsp), %r12 ## 8-byte Folded Reload + adcq -56(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, %rcx + movq %rax, %r8 + subq -24(%rsp), %r8 ## 8-byte Folded Reload + movq %rdx, %r9 + sbbq -16(%rsp), %r9 ## 8-byte Folded Reload + movq %rbx, %r10 + sbbq -8(%rsp), %r10 ## 8-byte Folded Reload + movq %rsi, %r11 + sbbq (%rsp), %r11 ## 8-byte Folded Reload + movq %r12, %r14 + sbbq 8(%rsp), %r14 ## 8-byte Folded Reload + movq %rbp, %r15 + sbbq 16(%rsp), %r15 ## 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rsi, %r11 + testb %cl, %cl + cmovneq %rax, %r8 + movq 40(%rsp), %rax ## 8-byte Reload + movq %r8, (%rax) + cmovneq %rdx, %r9 + movq %r9, 8(%rax) + cmovneq %rbx, %r10 + movq %r10, 16(%rax) + movq %r11, 24(%rax) + cmovneq %r12, %r14 + movq %r14, 32(%rax) + cmovneq %rbp, %r15 + movq %r15, 40(%rax) + addq $48, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF6L + .p2align 4, 0x90 +_mcl_fp_montNF6L: ## @mcl_fp_montNF6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $40, %rsp + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rdi, 32(%rsp) ## 8-byte Spill + movq 40(%rsi), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rdx, %r12 + movq 32(%rsi), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r14 + movq %rdx, %r10 + movq 24(%rsi), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r15 + movq %rdx, %r9 + movq 16(%rsi), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r11 + movq %rdx, %r8 + movq (%rsi), %rbx + movq %rbx, 8(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, (%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %rbp + movq %rax, %rsi + movq %rbx, %rax + mulq %rdi + movq %rax, %r13 + movq %rdx, %rdi + addq %rsi, %rdi + adcq %r11, %rbp + adcq %r15, %r8 + adcq %r14, %r9 + adcq -64(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, -128(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %r12, -112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq %r13, %rbx + imulq %rax, %rbx + movq 40(%rcx), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, -120(%rsp) ## 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r15 + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 24(%rcx), %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r12 + movq %rdx, -104(%rsp) ## 8-byte Spill + movq 16(%rcx), %rdx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rdx + movq %rax, %r10 + movq %rdx, -8(%rsp) ## 8-byte Spill + movq (%rcx), %rsi + movq %rsi, -32(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq %rcx + movq %rdx, %r11 + movq %rax, %rcx + movq %rbx, %rax + mulq %rsi + addq %r13, %rax + adcq %rdi, %rcx + adcq %rbp, %r10 + adcq %r8, %r12 + adcq %r9, %r15 + adcq -128(%rsp), %r14 ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rdx, %rcx + adcq %r11, %r10 + adcq -8(%rsp), %r12 ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, -128(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + movq -72(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r14 + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r11 + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %rbp + addq %r11, %rbp + adcq %r14, %rbx + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + adcq -96(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r8 + addq %rcx, %rdi + adcq %r10, %rbp + adcq %r12, %rbx + adcq %r15, %rsi + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r8 + movq %rdi, %r11 + imulq -48(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r11, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %r11, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq %r11, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %r11, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r14 + movq %r11, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + addq %rdi, %rax + adcq %rbp, %r14 + adcq %rbx, %r10 + adcq %rsi, %rcx + adcq %r13, %r15 + movq -112(%rsp), %rax ## 8-byte Reload + adcq %r9, %rax + adcq $0, %r8 + addq %rdx, %r14 + adcq %r12, %r10 + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, -120(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + adcq -128(%rsp), %r8 ## 8-byte Folded Reload + movq -72(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r9 + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rax, %rbp + movq %rdx, %rbx + addq %r9, %rbx + adcq -8(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %r12 ## 8-byte Folded Reload + adcq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq $0, %r13 + addq %r14, %rbp + adcq %r10, %rbx + adcq %rcx, %rsi + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq %r8, %r15 + adcq $0, %r13 + movq %rbp, %rcx + imulq -48(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %rdi + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + addq %rbp, %rax + adcq %rbx, %rdi + adcq %rsi, %r14 + adcq %r12, %r10 + adcq %r11, %r9 + movq -112(%rsp), %rax ## 8-byte Reload + adcq %r15, %rax + adcq $0, %r13 + addq %rdx, %rdi + adcq %r8, %r14 + adcq -104(%rsp), %r10 ## 8-byte Folded Reload + adcq -96(%rsp), %r9 ## 8-byte Folded Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -112(%rsp) ## 8-byte Spill + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + movq -72(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rbp + movq %rbp, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r12 + movq %rbp, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %rbp + addq %r12, %rbp + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %rsi ## 8-byte Folded Reload + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + adcq -120(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r15 + addq %rdi, %r8 + adcq %r14, %rbp + adcq %r10, %rbx + adcq %r9, %rsi + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + adcq %r13, %r11 + adcq $0, %r15 + movq %r8, %r14 + imulq -48(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq %r14, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %r14, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %r14, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %r14, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq %r14, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + addq %r8, %rax + adcq %rbp, %rdi + adcq %rbx, %r12 + adcq %rsi, %r10 + adcq %rcx, %r13 + adcq %r11, %r9 + adcq $0, %r15 + addq %rdx, %rdi + adcq -104(%rsp), %r12 ## 8-byte Folded Reload + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, -120(%rsp) ## 8-byte Spill + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -112(%rsp) ## 8-byte Spill + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + movq -72(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rcx + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r13 + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %rbp + addq %r13, %rbp + adcq -8(%rsp), %rbx ## 8-byte Folded Reload + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + adcq -96(%rsp), %r8 ## 8-byte Folded Reload + adcq -128(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r14 + addq %rdi, %r11 + adcq %r12, %rbp + adcq %r10, %rbx + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq -112(%rsp), %r8 ## 8-byte Folded Reload + adcq %r15, %r9 + adcq $0, %r14 + movq %r11, %rcx + imulq -48(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rdi + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + addq %r11, %rax + adcq %rbp, %rdi + adcq %rbx, %r15 + adcq %rsi, %r10 + adcq %r8, %r12 + movq -112(%rsp), %rcx ## 8-byte Reload + adcq %r9, %rcx + adcq $0, %r14 + addq %rdx, %rdi + adcq %r13, %r15 + adcq -128(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, -128(%rsp) ## 8-byte Spill + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, -120(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + movq -72(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq -80(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -88(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbp + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rsi + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rax, %r9 + movq %rdx, %r8 + addq %rsi, %r8 + adcq %rbp, %r10 + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq -80(%rsp), %r12 ## 8-byte Folded Reload + adcq -72(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %rbx + addq %rdi, %r9 + adcq %r15, %r8 + adcq -128(%rsp), %r10 ## 8-byte Folded Reload + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + adcq -112(%rsp), %r12 ## 8-byte Folded Reload + adcq %r14, %r11 + adcq $0, %rbx + movq -48(%rsp), %rcx ## 8-byte Reload + imulq %r9, %rcx + movq %rcx, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rcx, %rax + movq %rcx, %r15 + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq %r15, %rax + movq 24(%rsp), %r15 ## 8-byte Reload + mulq %r15 + addq %r9, %r14 + adcq %r8, %rax + adcq %r10, %rcx + adcq %r13, %rbp + adcq %r12, %rdi + adcq %r11, %rsi + adcq $0, %rbx + addq -88(%rsp), %rax ## 8-byte Folded Reload + adcq %rdx, %rcx + adcq -56(%rsp), %rbp ## 8-byte Folded Reload + adcq -80(%rsp), %rdi ## 8-byte Folded Reload + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + adcq -48(%rsp), %rbx ## 8-byte Folded Reload + movq %rax, %r14 + subq -32(%rsp), %r14 ## 8-byte Folded Reload + movq %rcx, %r8 + sbbq %r15, %r8 + movq %rbp, %r9 + sbbq -40(%rsp), %r9 ## 8-byte Folded Reload + movq %rdi, %r10 + sbbq -24(%rsp), %r10 ## 8-byte Folded Reload + movq %rsi, %r11 + sbbq -16(%rsp), %r11 ## 8-byte Folded Reload + movq %rbx, %r15 + sbbq -64(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, %rdx + sarq $63, %rdx + cmovsq %rax, %r14 + movq 32(%rsp), %rax ## 8-byte Reload + movq %r14, (%rax) + cmovsq %rcx, %r8 + movq %r8, 8(%rax) + cmovsq %rbp, %r9 + movq %r9, 16(%rax) + cmovsq %rdi, %r10 + movq %r10, 24(%rax) + cmovsq %rsi, %r11 + movq %r11, 32(%rax) + cmovsq %rbx, %r15 + movq %r15, 40(%rax) + addq $40, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed6L + .p2align 4, 0x90 +_mcl_fp_montRed6L: ## @mcl_fp_montRed6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $24, %rsp + movq %rdx, %rbp + movq %rdi, 16(%rsp) ## 8-byte Spill + movq -8(%rbp), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + movq (%rsi), %r10 + movq %r10, %rdi + imulq %rax, %rdi + movq 40(%rbp), %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rax, %r14 + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 32(%rbp), %rcx + movq %rcx, -40(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rax, %r15 + movq %rdx, %r9 + movq 24(%rbp), %rcx + movq %rcx, -48(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rax, %r12 + movq %rdx, %r11 + movq 16(%rbp), %rcx + movq %rcx, -56(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rax, %rcx + movq %rdx, %r13 + movq (%rbp), %rbx + movq 8(%rbp), %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rdx, %r8 + movq %rax, %rbp + movq %rdi, %rax + mulq %rbx + movq %rbx, %rdi + movq %rdi, -8(%rsp) ## 8-byte Spill + movq %rdx, %rbx + addq %rbp, %rbx + adcq %rcx, %r8 + adcq %r12, %r13 + adcq %r15, %r11 + adcq %r14, %r9 + movq -128(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq %r10, %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r8 + adcq 24(%rsi), %r13 + adcq 32(%rsi), %r11 + adcq 40(%rsi), %r9 + movq %r9, -120(%rsp) ## 8-byte Spill + adcq 48(%rsi), %rcx + movq %rcx, -128(%rsp) ## 8-byte Spill + movq 88(%rsi), %rax + movq 80(%rsi), %rcx + movq 72(%rsi), %rdx + movq 64(%rsi), %rbp + movq 56(%rsi), %rsi + adcq $0, %rsi + movq %rsi, -104(%rsp) ## 8-byte Spill + adcq $0, %rbp + movq %rbp, -72(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, -64(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -88(%rsp) ## 8-byte Spill + sbbq %r14, %r14 + andl $1, %r14d + movq %rbx, %rsi + imulq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, (%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, 8(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r10 + movq %rsi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r9 + movq %rsi, %rax + mulq %rdi + movq %rdx, %rdi + addq %r9, %rdi + adcq %r10, %rbp + adcq 8(%rsp), %rcx ## 8-byte Folded Reload + adcq (%rsp), %r12 ## 8-byte Folded Reload + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %rbx, %rax + adcq %r8, %rdi + adcq %r13, %rbp + adcq %r11, %rcx + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq $0, -72(%rsp) ## 8-byte Folded Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + adcq $0, %r14 + movq %rdi, %rbx + imulq -80(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, (%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r9 + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rbx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + addq %r11, %r10 + adcq %r9, %r8 + adcq (%rsp), %rsi ## 8-byte Folded Reload + adcq -32(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rbx ## 8-byte Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %rdi, %rax + adcq %rbp, %r10 + adcq %rcx, %r8 + adcq %r12, %rsi + adcq %r15, %r13 + adcq -112(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -120(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, -96(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -88(%rsp) ## 8-byte Folded Spill + adcq $0, %r14 + movq %r10, %rcx + imulq -80(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + movq -24(%rsp), %rbp ## 8-byte Reload + mulq %rbp + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbx + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r9 + movq %rcx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + addq %r9, %rcx + adcq %rbx, %rdi + adcq -32(%rsp), %r12 ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + adcq -72(%rsp), %r11 ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %r8, %rcx + adcq %rsi, %rdi + adcq %r13, %r12 + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + movq -88(%rsp), %r8 ## 8-byte Reload + adcq $0, %r8 + adcq $0, %r14 + movq %rcx, %rsi + imulq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq %rbp + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbp + movq %rsi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r10 + movq %rsi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + addq %r10, %rbx + adcq %rbp, %r9 + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rbp ## 8-byte Reload + adcq -72(%rsp), %rbp ## 8-byte Folded Reload + movq -128(%rsp), %rsi ## 8-byte Reload + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + movq -96(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %rcx, %rax + adcq %rdi, %rbx + adcq %r12, %r9 + adcq %r15, %r13 + adcq %r11, %rbp + movq %rbp, -120(%rsp) ## 8-byte Spill + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -128(%rsp) ## 8-byte Spill + adcq -64(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, -88(%rsp) ## 8-byte Spill + adcq $0, %r14 + movq -80(%rsp), %r8 ## 8-byte Reload + imulq %rbx, %r8 + movq %r8, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, -80(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %r8, %rax + movq -16(%rsp), %r12 ## 8-byte Reload + mulq %r12 + movq %rdx, %rcx + movq %rax, %r15 + movq %r8, %rax + movq -8(%rsp), %r8 ## 8-byte Reload + mulq %r8 + addq %r15, %rdx + adcq %r10, %rcx + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -64(%rsp), %rdi ## 8-byte Folded Reload + adcq -80(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %rbp + addq %rbx, %rax + adcq %r9, %rdx + adcq %r13, %rcx + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + adcq -96(%rsp), %r11 ## 8-byte Folded Reload + adcq -88(%rsp), %rbp ## 8-byte Folded Reload + adcq $0, %r14 + movq %rdx, %rax + subq %r8, %rax + movq %rcx, %rbx + sbbq %r12, %rbx + movq %rsi, %r8 + sbbq -56(%rsp), %r8 ## 8-byte Folded Reload + movq %rdi, %r9 + sbbq -48(%rsp), %r9 ## 8-byte Folded Reload + movq %r11, %r10 + sbbq -40(%rsp), %r10 ## 8-byte Folded Reload + movq %rbp, %r15 + sbbq -24(%rsp), %r15 ## 8-byte Folded Reload + sbbq $0, %r14 + andl $1, %r14d + cmovneq %rbp, %r15 + testb %r14b, %r14b + cmovneq %rdx, %rax + movq 16(%rsp), %rdx ## 8-byte Reload + movq %rax, (%rdx) + cmovneq %rcx, %rbx + movq %rbx, 8(%rdx) + cmovneq %rsi, %r8 + movq %r8, 16(%rdx) + cmovneq %rdi, %r9 + movq %r9, 24(%rdx) + cmovneq %r11, %r10 + movq %r10, 32(%rdx) + movq %r15, 40(%rdx) + addq $24, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre6L + .p2align 4, 0x90 +_mcl_fp_addPre6L: ## @mcl_fp_addPre6L +## BB#0: + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r11 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 24(%rsi), %rax + movq 32(%rsi), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r10, %rax + movq %rax, 24(%rdi) + adcq %r9, %r14 + movq %r14, 32(%rdi) + adcq %r8, %r11 + movq %r11, 40(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r14 + retq + + .globl _mcl_fp_subPre6L + .p2align 4, 0x90 +_mcl_fp_subPre6L: ## @mcl_fp_subPre6L +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %r11 + movq 16(%rsi), %rcx + movq (%rsi), %rbx + movq 8(%rsi), %rsi + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r14 + movq 32(%rdx), %r15 + sbbq 16(%rdx), %rcx + movq %rbx, (%rdi) + movq %rsi, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r14, %r11 + movq %r11, 24(%rdi) + sbbq %r15, %r10 + movq %r10, 32(%rdi) + sbbq %r8, %r9 + movq %r9, 40(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_shr1_6L + .p2align 4, 0x90 +_mcl_fp_shr1_6L: ## @mcl_fp_shr1_6L +## BB#0: + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rcx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rcx + movq %rcx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %r9, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 32(%rdi) + shrq %r8 + movq %r8, 40(%rdi) + retq + + .globl _mcl_fp_add6L + .p2align 4, 0x90 +_mcl_fp_add6L: ## @mcl_fp_add6L +## BB#0: + pushq %r15 + pushq %r14 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rdx), %r15 + movq 24(%rdx), %rbx + movq 24(%rsi), %r10 + movq 32(%rsi), %r9 + movq 16(%rdx), %r11 + movq (%rdx), %rax + movq 8(%rdx), %rdx + addq (%rsi), %rax + adcq 8(%rsi), %rdx + adcq 16(%rsi), %r11 + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + adcq %rbx, %r10 + movq %r10, 24(%rdi) + adcq %r15, %r9 + movq %r9, 32(%rdi) + adcq %r14, %r8 + movq %r8, 40(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %rax + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r11 + sbbq 24(%rcx), %r10 + sbbq 32(%rcx), %r9 + sbbq 40(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB89_2 +## BB#1: ## %nocarry + movq %rax, (%rdi) + movq %rdx, 8(%rdi) + movq %r11, 16(%rdi) + movq %r10, 24(%rdi) + movq %r9, 32(%rdi) + movq %r8, 40(%rdi) +LBB89_2: ## %carry + popq %rbx + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF6L + .p2align 4, 0x90 +_mcl_fp_addNF6L: ## @mcl_fp_addNF6L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r8 + movq 32(%rdx), %r9 + movq 24(%rdx), %r10 + movq 16(%rdx), %r11 + movq (%rdx), %r15 + movq 8(%rdx), %r14 + addq (%rsi), %r15 + adcq 8(%rsi), %r14 + adcq 16(%rsi), %r11 + adcq 24(%rsi), %r10 + adcq 32(%rsi), %r9 + adcq 40(%rsi), %r8 + movq %r15, %rsi + subq (%rcx), %rsi + movq %r14, %rbx + sbbq 8(%rcx), %rbx + movq %r11, %rdx + sbbq 16(%rcx), %rdx + movq %r10, %r13 + sbbq 24(%rcx), %r13 + movq %r9, %r12 + sbbq 32(%rcx), %r12 + movq %r8, %rax + sbbq 40(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %r15, %rsi + movq %rsi, (%rdi) + cmovsq %r14, %rbx + movq %rbx, 8(%rdi) + cmovsq %r11, %rdx + movq %rdx, 16(%rdi) + cmovsq %r10, %r13 + movq %r13, 24(%rdi) + cmovsq %r9, %r12 + movq %r12, 32(%rdi) + cmovsq %r8, %rax + movq %rax, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_sub6L + .p2align 4, 0x90 +_mcl_fp_sub6L: ## @mcl_fp_sub6L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 40(%rdx), %r14 + movq 40(%rsi), %r8 + movq 32(%rsi), %r9 + movq 24(%rsi), %r10 + movq 16(%rsi), %r11 + movq (%rsi), %rax + movq 8(%rsi), %rsi + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %rsi + movq 24(%rdx), %r15 + movq 32(%rdx), %r12 + sbbq 16(%rdx), %r11 + movq %rax, (%rdi) + movq %rsi, 8(%rdi) + movq %r11, 16(%rdi) + sbbq %r15, %r10 + movq %r10, 24(%rdi) + sbbq %r12, %r9 + movq %r9, 32(%rdi) + sbbq %r14, %r8 + movq %r8, 40(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB91_2 +## BB#1: ## %carry + movq 40(%rcx), %r14 + movq 32(%rcx), %r15 + movq 24(%rcx), %r12 + movq 8(%rcx), %rbx + movq 16(%rcx), %rdx + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %rsi, %rbx + movq %rbx, 8(%rdi) + adcq %r11, %rdx + movq %rdx, 16(%rdi) + adcq %r10, %r12 + movq %r12, 24(%rdi) + adcq %r9, %r15 + movq %r15, 32(%rdi) + adcq %r8, %r14 + movq %r14, 40(%rdi) +LBB91_2: ## %nocarry + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF6L + .p2align 4, 0x90 +_mcl_fp_subNF6L: ## @mcl_fp_subNF6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %r11 + movdqu (%rsi), %xmm3 + movdqu 16(%rsi), %xmm4 + movdqu 32(%rsi), %xmm5 + pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] + movd %xmm6, %rax + movd %xmm2, %r14 + movd %xmm5, %r8 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r15 + pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] + movd %xmm2, %r9 + movd %xmm1, %r12 + movd %xmm4, %r10 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %rbx + pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] + movd %xmm1, %r13 + movd %xmm0, %rsi + movd %xmm3, %rbp + subq %rsi, %rbp + sbbq %rbx, %r13 + sbbq %r12, %r10 + sbbq %r15, %r9 + sbbq %r14, %r8 + sbbq %r11, %rax + movq %rax, %rsi + sarq $63, %rsi + movq %rsi, %rbx + shldq $1, %rax, %rbx + andq (%rcx), %rbx + movq 40(%rcx), %r11 + andq %rsi, %r11 + movq 32(%rcx), %r14 + andq %rsi, %r14 + movq 24(%rcx), %r15 + andq %rsi, %r15 + movq 16(%rcx), %rdx + andq %rsi, %rdx + rolq %rsi + andq 8(%rcx), %rsi + addq %rbp, %rbx + movq %rbx, (%rdi) + adcq %r13, %rsi + movq %rsi, 8(%rdi) + adcq %r10, %rdx + movq %rdx, 16(%rdi) + adcq %r9, %r15 + movq %r15, 24(%rdi) + adcq %r8, %r14 + movq %r14, 32(%rdi) + adcq %rax, %r11 + movq %r11, 40(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add6L + .p2align 4, 0x90 +_mcl_fpDbl_add6L: ## @mcl_fpDbl_add6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 80(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 72(%rdx), %r14 + movq 64(%rdx), %r15 + movq 24(%rsi), %rbp + movq 32(%rsi), %r13 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %rbp + adcq 32(%rdx), %r13 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rdx + movq %rbx, (%rdi) + movq 88(%rsi), %r8 + movq %rax, 8(%rdi) + movq 80(%rsi), %r10 + movq %r12, 16(%rdi) + movq 72(%rsi), %r12 + movq %rbp, 24(%rdi) + movq 40(%rsi), %rax + adcq %rdx, %rax + movq 64(%rsi), %rdx + movq %r13, 32(%rdi) + movq 56(%rsi), %r13 + movq 48(%rsi), %rbp + adcq %r9, %rbp + movq %rax, 40(%rdi) + adcq %r11, %r13 + adcq %r15, %rdx + adcq %r14, %r12 + adcq -16(%rsp), %r10 ## 8-byte Folded Reload + adcq -8(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq %rbp, %rsi + subq (%rcx), %rsi + movq %r13, %rbx + sbbq 8(%rcx), %rbx + movq %rdx, %r9 + sbbq 16(%rcx), %r9 + movq %r12, %r11 + sbbq 24(%rcx), %r11 + movq %r10, %r14 + sbbq 32(%rcx), %r14 + movq %r8, %r15 + sbbq 40(%rcx), %r15 + sbbq $0, %rax + andl $1, %eax + cmovneq %rbp, %rsi + movq %rsi, 48(%rdi) + testb %al, %al + cmovneq %r13, %rbx + movq %rbx, 56(%rdi) + cmovneq %rdx, %r9 + movq %r9, 64(%rdi) + cmovneq %r12, %r11 + movq %r11, 72(%rdi) + cmovneq %r10, %r14 + movq %r14, 80(%rdi) + cmovneq %r8, %r15 + movq %r15, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub6L + .p2align 4, 0x90 +_mcl_fpDbl_sub6L: ## @mcl_fpDbl_sub6L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 88(%rdx), %r9 + movq 80(%rdx), %r10 + movq 72(%rdx), %r14 + movq 16(%rsi), %r8 + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %eax, %eax + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %r8 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 64(%rdx), %r13 + movq %r15, (%rdi) + movq 56(%rdx), %rbp + movq %r11, 8(%rdi) + movq 48(%rdx), %r15 + movq 40(%rdx), %rdx + movq %r8, 16(%rdi) + movq 88(%rsi), %r8 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 80(%rsi), %r11 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + sbbq %r15, %rdx + movq 72(%rsi), %r15 + movq %rbx, 40(%rdi) + movq 64(%rsi), %r12 + movq 56(%rsi), %rsi + sbbq %rbp, %rsi + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq %r9, %r8 + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%rcx), %r14 + cmoveq %rax, %r14 + testb %bpl, %bpl + movq 16(%rcx), %r9 + cmoveq %rax, %r9 + movq 8(%rcx), %rbp + cmoveq %rax, %rbp + movq 40(%rcx), %r10 + cmoveq %rax, %r10 + movq 32(%rcx), %rbx + cmoveq %rax, %rbx + cmovneq 24(%rcx), %rax + addq %rdx, %r14 + movq %r14, 48(%rdi) + adcq %rsi, %rbp + movq %rbp, 56(%rdi) + adcq %r12, %r9 + movq %r9, 64(%rdi) + adcq %r15, %rax + movq %rax, 72(%rdi) + adcq %r11, %rbx + movq %rbx, 80(%rdi) + adcq %r8, %r10 + movq %r10, 88(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre7L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre7L: ## @mcl_fp_mulUnitPre7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq 48(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r11 + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %r15 + movq %rax, %r14 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %r13 + movq %rax, %r12 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %rbx + movq %rax, %rbp + movq %rcx, %rax + mulq 8(%rsi) + movq %rdx, %r8 + movq %rax, %r9 + movq %rcx, %rax + mulq (%rsi) + movq %rax, (%rdi) + addq %r9, %rdx + movq %rdx, 8(%rdi) + adcq %rbp, %r8 + movq %r8, 16(%rdi) + adcq %r12, %rbx + movq %rbx, 24(%rdi) + adcq %r14, %r13 + movq %r13, 32(%rdi) + adcq -16(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 40(%rdi) + adcq -8(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 48(%rdi) + adcq $0, %r10 + movq %r10, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_mulPre7L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre7L: ## @mcl_fpDbl_mulPre7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $16, %rsp + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rsi, %r9 + movq %rdi, -8(%rsp) ## 8-byte Spill + movq (%r9), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + movq (%rdx), %rsi + mulq %rsi + movq %rdx, -120(%rsp) ## 8-byte Spill + movq 32(%r9), %rbp + movq %rbp, -88(%rsp) ## 8-byte Spill + movq 40(%r9), %rcx + movq %rcx, -128(%rsp) ## 8-byte Spill + movq 48(%r9), %r14 + movq %rax, (%rdi) + movq %r14, %rax + mulq %rsi + movq %rdx, %rdi + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq %rsi + movq %rdx, %rcx + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rsi + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdx, %rbp + movq 24(%r9), %r8 + movq %r8, %rax + mulq %rsi + movq %rax, %r15 + movq %rdx, %rbx + movq 16(%r9), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + mulq %rsi + movq %rax, %r13 + movq %rdx, %r12 + movq 8(%r9), %r11 + movq %r11, %rax + mulq %rsi + movq %rdx, %rsi + movq %rax, %r10 + addq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq %r13, %rsi + adcq %r15, %r12 + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, -72(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -104(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -96(%rsp) ## 8-byte Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rcx + movq %r14, %rax + mulq %rcx + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq -128(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %r8, %rax + mulq %rcx + movq %rdx, %r8 + movq %rax, %r14 + movq -112(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %r11, %rax + mulq %rcx + movq %rdx, %r11 + movq %rax, %rdi + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rcx + addq %r10, %rax + movq -8(%rsp), %r10 ## 8-byte Reload + movq %rax, 8(%r10) + adcq %rsi, %rdi + adcq %r12, %rbp + adcq %rbx, %r14 + adcq -72(%rsp), %r15 ## 8-byte Folded Reload + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + movq -80(%rsp), %rax ## 8-byte Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + addq %rdx, %rdi + adcq %r11, %rbp + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq %r8, %r15 + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -80(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq 48(%r9), %rdx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq -56(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rcx + movq %rdx, %rax + mulq %rcx + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rdx, -104(%rsp) ## 8-byte Spill + movq 40(%r9), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdx, -32(%rsp) ## 8-byte Spill + movq 32(%r9), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r12 + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 24(%r9), %rax + movq %rax, -72(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %rbx + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 16(%r9), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r8 + movq %rdx, 8(%rsp) ## 8-byte Spill + movq 8(%r9), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r11 + movq %rdx, (%rsp) ## 8-byte Spill + movq (%r9), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulq %rcx + addq %rdi, %rax + movq %rax, 16(%r10) + adcq %rbp, %r11 + adcq %r14, %r8 + adcq %r15, %rbx + adcq %r13, %r12 + movq -128(%rsp), %rdi ## 8-byte Reload + adcq -80(%rsp), %rdi ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq %rsi, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %rdx, %r11 + adcq (%rsp), %r8 ## 8-byte Folded Reload + adcq 8(%rsp), %rbx ## 8-byte Folded Reload + adcq -48(%rsp), %r12 ## 8-byte Folded Reload + adcq -40(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq -32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + movq -56(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rbp + movq -64(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, -64(%rsp) ## 8-byte Spill + movq -96(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq -72(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -112(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq -24(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, %r14 + movq %rax, %r10 + movq -16(%rsp), %rax ## 8-byte Reload + mulq %rbp + addq %r11, %rax + movq -8(%rsp), %rsi ## 8-byte Reload + movq %rax, 24(%rsi) + adcq %r8, %r10 + adcq %rbx, %rdi + adcq %r12, %r15 + adcq -128(%rsp), %r13 ## 8-byte Folded Reload + movq -64(%rsp), %rbp ## 8-byte Reload + adcq -120(%rsp), %rbp ## 8-byte Folded Reload + movq -80(%rsp), %rax ## 8-byte Reload + adcq %rcx, %rax + sbbq %rsi, %rsi + andl $1, %esi + addq %rdx, %r10 + adcq %r14, %rdi + adcq -112(%rsp), %r15 ## 8-byte Folded Reload + adcq -72(%rsp), %r13 ## 8-byte Folded Reload + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, -64(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -80(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + movq 48(%r9), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + movq -56(%rsp), %rbx ## 8-byte Reload + movq 32(%rbx), %rcx + mulq %rcx + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 40(%r9), %rax + movq %rax, -96(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdx, -40(%rsp) ## 8-byte Spill + movq 32(%r9), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r12 + movq %rdx, -48(%rsp) ## 8-byte Spill + movq 24(%r9), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %rbp + movq %rdx, 8(%rsp) ## 8-byte Spill + movq 16(%r9), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r14 + movq %rdx, (%rsp) ## 8-byte Spill + movq 8(%r9), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rcx + movq %rax, %r11 + movq %rdx, %r8 + movq (%r9), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + mulq %rcx + addq %r10, %rax + movq -8(%rsp), %rcx ## 8-byte Reload + movq %rax, 32(%rcx) + adcq %rdi, %r11 + adcq %r15, %r14 + adcq %r13, %rbp + adcq -64(%rsp), %r12 ## 8-byte Folded Reload + movq -128(%rsp), %rcx ## 8-byte Reload + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq %rsi, %rax + sbbq %r13, %r13 + andl $1, %r13d + addq %rdx, %r11 + adcq %r8, %r14 + adcq (%rsp), %rbp ## 8-byte Folded Reload + adcq 8(%rsp), %r12 ## 8-byte Folded Reload + adcq -48(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq -40(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq -72(%rsp), %r13 ## 8-byte Folded Reload + movq 40(%rbx), %rcx + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %rdi + movq -96(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq -104(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq -112(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq -16(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq -32(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq -24(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -40(%rsp) ## 8-byte Spill + addq %r11, %rax + movq -8(%rsp), %rcx ## 8-byte Reload + movq %rax, 40(%rcx) + adcq %r14, %r8 + adcq %rbp, %rsi + adcq %r12, %rbx + adcq -128(%rsp), %r15 ## 8-byte Folded Reload + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq %r13, %rdi + movq -56(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %r11 + sbbq %rcx, %rcx + movq %r11, %rax + mulq 48(%r9) + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %r11, %rax + mulq 40(%r9) + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -32(%rsp) ## 8-byte Spill + movq %r11, %rax + mulq 32(%r9) + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r13 + movq %r11, %rax + mulq 24(%r9) + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %r11, %rax + mulq 16(%r9) + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %r11, %rax + mulq 8(%r9) + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %r11, %rax + mulq (%r9) + andl $1, %ecx + addq -40(%rsp), %r8 ## 8-byte Folded Reload + adcq -16(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %r15 ## 8-byte Folded Reload + adcq -88(%rsp), %r10 ## 8-byte Folded Reload + adcq -64(%rsp), %rdi ## 8-byte Folded Reload + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + addq %rax, %r8 + movq -8(%rsp), %r9 ## 8-byte Reload + movq %r8, 48(%r9) + adcq %r12, %rsi + adcq %r14, %rbx + adcq %rbp, %r15 + adcq %r13, %r10 + adcq -32(%rsp), %rdi ## 8-byte Folded Reload + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + addq %rdx, %rsi + adcq -48(%rsp), %rbx ## 8-byte Folded Reload + movq %r9, %rdx + movq %rsi, 56(%rdx) + movq %rbx, 64(%rdx) + adcq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 72(%rdx) + adcq -72(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 80(%rdx) + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 88(%rdx) + adcq -120(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 96(%rdx) + adcq -56(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 104(%rdx) + addq $16, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre7L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre7L: ## @mcl_fpDbl_sqrPre7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $16, %rsp + movq %rsi, %r9 + movq %rdi, -24(%rsp) ## 8-byte Spill + movq 24(%r9), %r10 + movq %r10, -128(%rsp) ## 8-byte Spill + movq 32(%r9), %r14 + movq %r14, -88(%rsp) ## 8-byte Spill + movq 40(%r9), %rsi + movq %rsi, -80(%rsp) ## 8-byte Spill + movq 48(%r9), %rbp + movq %rbp, -120(%rsp) ## 8-byte Spill + movq (%r9), %rbx + movq %rbx, %rax + mulq %rbx + movq %rdx, %rcx + movq %rax, (%rdi) + movq %rbp, %rax + mulq %rbx + movq %rdx, %r11 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq %rbx + movq %rdx, %r8 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %r14, %rax + mulq %rbx + movq %rdx, %r13 + movq %rax, %rsi + movq %r10, %rax + mulq %rbx + movq %rax, %r14 + movq %rdx, %rdi + movq 16(%r9), %r15 + movq %r15, %rax + mulq %rbx + movq %rax, %r10 + movq %rdx, %r12 + movq 8(%r9), %rbp + movq %rbp, %rax + mulq %rbx + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + addq %rax, %rcx + adcq %rdx, %r10 + adcq %r14, %r12 + adcq %rsi, %rdi + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq -96(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -104(%rsp) ## 8-byte Spill + adcq $0, %r11 + movq %r11, -96(%rsp) ## 8-byte Spill + movq -120(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq -80(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq -128(%rsp), %rax ## 8-byte Reload + mulq %rbp + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %r15, %rax + mulq %rbp + movq %rdx, %r15 + movq %rax, %rbx + movq %rbp, %rax + mulq %rbp + movq %rax, %rbp + addq -72(%rsp), %rcx ## 8-byte Folded Reload + movq -24(%rsp), %rax ## 8-byte Reload + movq %rcx, 8(%rax) + adcq %r10, %rbp + adcq %r12, %rbx + adcq %rdi, %r14 + adcq %r13, %r11 + movq %rsi, %rax + adcq -104(%rsp), %rax ## 8-byte Folded Reload + adcq -96(%rsp), %r8 ## 8-byte Folded Reload + sbbq %rsi, %rsi + andl $1, %esi + addq -112(%rsp), %rbp ## 8-byte Folded Reload + adcq %rdx, %rbx + adcq %r15, %r14 + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + adcq -88(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -48(%rsp) ## 8-byte Spill + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -40(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq 48(%r9), %rax + movq %rax, -112(%rsp) ## 8-byte Spill + movq 16(%r9), %rdi + mulq %rdi + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 40(%r9), %rax + movq %rax, -80(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 32(%r9), %rax + movq %rax, -88(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r13 + movq %rdx, -32(%rsp) ## 8-byte Spill + movq 24(%r9), %rcx + movq %rcx, %rax + mulq %rdi + movq %rax, %r10 + movq %r10, -8(%rsp) ## 8-byte Spill + movq %rdx, %r12 + movq %r12, -72(%rsp) ## 8-byte Spill + movq 8(%r9), %rax + movq %rax, (%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r15 + movq %rdx, -64(%rsp) ## 8-byte Spill + movq (%r9), %rax + movq %rax, -104(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %rdi, %rax + mulq %rdi + movq %rax, %rdi + addq %rbp, %r8 + movq -24(%rsp), %rax ## 8-byte Reload + movq %r8, 16(%rax) + adcq %rbx, %r15 + adcq %r14, %rdi + adcq %r10, %r11 + adcq -48(%rsp), %r13 ## 8-byte Folded Reload + movq -56(%rsp), %r10 ## 8-byte Reload + adcq -40(%rsp), %r10 ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq %rsi, %rax + sbbq %rbp, %rbp + andl $1, %ebp + addq -16(%rsp), %r15 ## 8-byte Folded Reload + adcq -64(%rsp), %rdi ## 8-byte Folded Reload + adcq %rdx, %r11 + adcq %r12, %r13 + adcq -32(%rsp), %r10 ## 8-byte Folded Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -120(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + movq -112(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq -80(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq -88(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq (%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq -104(%rsp), %rax ## 8-byte Reload + mulq %rcx + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %rcx, %rax + mulq %rcx + movq %rdx, -56(%rsp) ## 8-byte Spill + addq %r15, %rbx + movq -24(%rsp), %rcx ## 8-byte Reload + movq %rbx, 24(%rcx) + adcq %rdi, %r12 + adcq -8(%rsp), %r11 ## 8-byte Folded Reload + adcq %r13, %rax + movq %rax, %r15 + movq %r8, %rsi + adcq %r10, %rsi + movq -112(%rsp), %rbx ## 8-byte Reload + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + adcq %rbp, %r14 + sbbq %r8, %r8 + movq 8(%r9), %rcx + movq 40(%r9), %r13 + movq %rcx, %rax + mulq %r13 + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdx, -80(%rsp) ## 8-byte Spill + movq (%r9), %rbp + movq %rbp, %rax + mulq %r13 + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 32(%r9), %rdi + movq %rcx, %rax + mulq %rdi + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdi + movq %rax, %rbp + movq %rdx, (%rsp) ## 8-byte Spill + andl $1, %r8d + addq -64(%rsp), %r12 ## 8-byte Folded Reload + adcq -48(%rsp), %r11 ## 8-byte Folded Reload + adcq -72(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, -64(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -56(%rsp) ## 8-byte Spill + adcq -40(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + adcq -32(%rsp), %r14 ## 8-byte Folded Reload + adcq -128(%rsp), %r8 ## 8-byte Folded Reload + movq 48(%r9), %rax + movq %rax, -128(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq %r13, %rax + mulq %rdi + movq %rax, %rsi + movq %rsi, -48(%rsp) ## 8-byte Spill + movq %rdx, %rbx + movq 24(%r9), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r15 + movq %rdx, -16(%rsp) ## 8-byte Spill + movq 16(%r9), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %rdi, %rax + mulq %rdi + movq %rax, %rdi + addq %rbp, %r12 + movq -24(%rsp), %rbp ## 8-byte Reload + movq %r12, 32(%rbp) + adcq -8(%rsp), %r11 ## 8-byte Folded Reload + adcq -64(%rsp), %r10 ## 8-byte Folded Reload + adcq -56(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + adcq %rsi, %r14 + adcq %r8, %rcx + sbbq %rsi, %rsi + andl $1, %esi + addq (%rsp), %r11 ## 8-byte Folded Reload + adcq -120(%rsp), %r10 ## 8-byte Folded Reload + adcq 8(%rsp), %r15 ## 8-byte Folded Reload + adcq -16(%rsp), %rdi ## 8-byte Folded Reload + adcq %rdx, %r14 + adcq %rbx, %rcx + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + movq -128(%rsp), %rax ## 8-byte Reload + mulq %r13 + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq -32(%rsp), %rax ## 8-byte Reload + mulq %r13 + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq -40(%rsp), %rax ## 8-byte Reload + mulq %r13 + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %r13, %rax + mulq %r13 + movq %rax, %r13 + addq -104(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 40(%rbp) + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq %r15, %r12 + adcq %rdi, %r8 + movq %r14, %rax + adcq -48(%rsp), %rax ## 8-byte Folded Reload + adcq %rcx, %r13 + movq -120(%rsp), %rcx ## 8-byte Reload + adcq %rsi, %rcx + sbbq %r14, %r14 + andl $1, %r14d + addq -88(%rsp), %r10 ## 8-byte Folded Reload + adcq -80(%rsp), %r12 ## 8-byte Folded Reload + adcq -72(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -104(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + adcq %rbx, %r13 + adcq %rdx, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + movq 48(%r9), %rcx + movq %rcx, %rax + mulq 40(%r9) + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, %r8 + movq %rcx, %rax + mulq 32(%r9) + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %rcx, %rax + mulq 24(%r9) + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %rcx, %rax + mulq 16(%r9) + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rcx, %rax + mulq 8(%r9) + movq %rdx, %r15 + movq %rax, %rdi + movq %rcx, %rax + mulq (%r9) + movq %rdx, %r9 + movq %rax, %rsi + movq %rcx, %rax + mulq %rcx + addq %r10, %rsi + movq -24(%rsp), %r10 ## 8-byte Reload + movq %rsi, 48(%r10) + adcq %r12, %rdi + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + adcq -96(%rsp), %rbp ## 8-byte Folded Reload + adcq %r13, %rbx + adcq -120(%rsp), %r8 ## 8-byte Folded Reload + adcq %r14, %rax + sbbq %rcx, %rcx + andl $1, %ecx + addq %r9, %rdi + adcq %r15, %r11 + movq %r10, %rsi + movq %rdi, 56(%rsi) + movq %r11, 64(%rsi) + adcq -128(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 72(%rsi) + adcq -88(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 80(%rsi) + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 88(%rsi) + adcq -112(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 96(%rsi) + adcq %rdx, %rcx + movq %rcx, 104(%rsi) + addq $16, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont7L + .p2align 4, 0x90 +_mcl_fp_mont7L: ## @mcl_fp_mont7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $88, %rsp + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rdi, 80(%rsp) ## 8-byte Spill + movq 48(%rsi), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq (%rdx), %rdi + mulq %rdi + movq %rax, 8(%rsp) ## 8-byte Spill + movq %rdx, %r12 + movq 40(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, (%rsp) ## 8-byte Spill + movq %rdx, %r8 + movq 32(%rsi), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdx, %r9 + movq 24(%rsi), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r14 + movq %rdx, %r11 + movq 16(%rsi), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + mulq %rdi + movq %rax, %r15 + movq %rdx, %rbx + movq (%rsi), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + mulq %rdi + movq %rdx, %r13 + movq %rax, %rsi + movq %rbp, %rax + mulq %rdi + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdx, %r10 + addq %rsi, %r10 + adcq %r15, %r13 + adcq %r14, %rbx + movq %rbx, -72(%rsp) ## 8-byte Spill + adcq -8(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, -56(%rsp) ## 8-byte Spill + adcq (%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -112(%rsp) ## 8-byte Spill + adcq 8(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -104(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %r12, -96(%rsp) ## 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, 40(%rsp) ## 8-byte Spill + movq %rax, %rdi + imulq %rdx, %rdi + movq 48(%rcx), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdx, -120(%rsp) ## 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, %r9 + movq 24(%rcx), %rdx + movq %rdx, 64(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r8 + movq %rdx, %rbx + movq 16(%rcx), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rdx + movq %rax, %r15 + movq %rdx, %rbp + movq (%rcx), %rsi + movq %rsi, 48(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq %rcx + movq %rdx, %rcx + movq %rax, %r12 + movq %rdi, %rax + mulq %rsi + movq %rdx, %r11 + addq %r12, %r11 + adcq %r15, %rcx + adcq %r8, %rbp + adcq %r14, %rbx + adcq -64(%rsp), %r9 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -88(%rsp), %rdx ## 8-byte Folded Reload + movq -120(%rsp), %rdi ## 8-byte Reload + adcq $0, %rdi + addq -80(%rsp), %rax ## 8-byte Folded Reload + adcq %r10, %r11 + adcq %r13, %rcx + adcq -72(%rsp), %rbp ## 8-byte Folded Reload + adcq -56(%rsp), %rbx ## 8-byte Folded Reload + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -56(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -120(%rsp) ## 8-byte Spill + sbbq %rsi, %rsi + andl $1, %esi + movq -16(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdi + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r12 + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r9 + movq %rdi, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %r8 + movq %rdx, %r14 + addq %r9, %r14 + adcq %r12, %r13 + adcq -64(%rsp), %r15 ## 8-byte Folded Reload + adcq -88(%rsp), %r10 ## 8-byte Folded Reload + movq -112(%rsp), %rdi ## 8-byte Reload + adcq -80(%rsp), %rdi ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r11, %r8 + adcq %rcx, %r14 + adcq %rbp, %r13 + adcq %rbx, %r15 + adcq -56(%rsp), %r10 ## 8-byte Folded Reload + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -112(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq %rsi, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -56(%rsp) ## 8-byte Spill + movq %r8, %rcx + imulq 40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbx + movq %rcx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rdi + movq %rcx, %rax + mulq 72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %rbp + movq %rcx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + addq %rbp, %rcx + adcq %rdi, %rsi + adcq %rbx, %r9 + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %r11 ## 8-byte Folded Reload + movq -128(%rsp), %rdi ## 8-byte Reload + adcq -72(%rsp), %rdi ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r8, %rax + adcq %r14, %rcx + adcq %r13, %rsi + adcq %r15, %r9 + adcq %r10, %r12 + adcq -112(%rsp), %r11 ## 8-byte Folded Reload + adcq -104(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq $0, -56(%rsp) ## 8-byte Folded Spill + movq -16(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rbx + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r8 + movq %rbx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r15 + movq %rbx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %r14 + movq %rdx, %r10 + addq %r15, %r10 + adcq %r8, %rdi + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + movq -112(%rsp), %rbx ## 8-byte Reload + adcq -80(%rsp), %rbx ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rcx, %r14 + adcq %rsi, %r10 + adcq %r9, %rdi + adcq %r12, %rbp + adcq %r11, %r13 + adcq -128(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -56(%rsp) ## 8-byte Spill + movq %r14, %rbx + imulq 40(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r15 + movq %rbx, %rax + mulq 72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r12 + movq %rbx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + addq %r12, %r11 + adcq %r15, %r8 + adcq -64(%rsp), %rsi ## 8-byte Folded Reload + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -80(%rsp), %r9 ## 8-byte Folded Reload + movq -128(%rsp), %rbx ## 8-byte Reload + adcq -72(%rsp), %rbx ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r14, %rax + adcq %r10, %r11 + adcq %rdi, %r8 + adcq %rbp, %rsi + adcq %r13, %rcx + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -128(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq $0, -56(%rsp) ## 8-byte Folded Spill + movq -16(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rbx + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r14 + movq %rbx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r15 + movq %rbx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r13 + addq %r15, %r13 + adcq %r14, %rdi + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + movq -112(%rsp), %rbx ## 8-byte Reload + adcq -80(%rsp), %rbx ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r11, %r10 + adcq %r8, %r13 + adcq %rsi, %rdi + adcq %rcx, %rbp + adcq %r9, %r12 + adcq -128(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -56(%rsp) ## 8-byte Spill + movq %r10, %rbx + imulq 40(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r14 + movq %rbx, %rax + mulq 72(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r15 + movq %rbx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + addq %r15, %r11 + adcq %r14, %r8 + adcq -64(%rsp), %rsi ## 8-byte Folded Reload + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -80(%rsp), %r9 ## 8-byte Folded Reload + movq -128(%rsp), %rbx ## 8-byte Reload + adcq -72(%rsp), %rbx ## 8-byte Folded Reload + movq -120(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %r13, %r11 + adcq %rdi, %r8 + adcq %rbp, %rsi + adcq %r12, %rcx + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + adcq -104(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -128(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + adcq $0, -56(%rsp) ## 8-byte Folded Spill + movq -16(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rbx + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r13 + movq %rbx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r14 + movq %rbx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r12 + addq %r14, %r12 + adcq %r13, %rdi + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + adcq -88(%rsp), %r15 ## 8-byte Folded Reload + movq -112(%rsp), %rbx ## 8-byte Reload + adcq -80(%rsp), %rbx ## 8-byte Folded Reload + movq -104(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r11, %r10 + adcq %r8, %r12 + adcq %rsi, %rdi + adcq %rcx, %rbp + adcq %r9, %r15 + adcq -128(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + adcq -120(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -120(%rsp) ## 8-byte Spill + movq %r10, %rcx + imulq 40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r14 + movq %rcx, %rax + mulq 72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r8 + movq %rcx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + addq %r8, %r11 + adcq %r14, %rbx + adcq -64(%rsp), %rsi ## 8-byte Folded Reload + adcq -88(%rsp), %r9 ## 8-byte Folded Reload + adcq -80(%rsp), %r13 ## 8-byte Folded Reload + movq -56(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -128(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq %r10, %rax + adcq %r12, %r11 + adcq %rdi, %rbx + adcq %rbp, %rsi + adcq %r15, %r9 + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -56(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + movq -120(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + movq -16(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rcx + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r12 + movq %rcx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r14 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r8 + addq %r14, %r8 + adcq %r12, %rdi + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + movq -120(%rsp), %r14 ## 8-byte Reload + adcq -88(%rsp), %r14 ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq -80(%rsp), %rdx ## 8-byte Folded Reload + movq -104(%rsp), %rcx ## 8-byte Reload + adcq -72(%rsp), %rcx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %r11, %r10 + adcq %rbx, %r8 + adcq %rsi, %rdi + adcq %r9, %rbp + adcq %r13, %r14 + movq %r14, -120(%rsp) ## 8-byte Spill + adcq -56(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -104(%rsp) ## 8-byte Spill + adcq %r15, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -56(%rsp) ## 8-byte Spill + movq %r10, %rcx + imulq 40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r13 + movq %rcx, %rax + mulq 72(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r9 + movq %rcx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + addq %r9, %r11 + adcq %r13, %rbx + adcq -64(%rsp), %r15 ## 8-byte Folded Reload + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + adcq -72(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %r8, %r11 + adcq %rdi, %rbx + adcq %rbp, %r15 + adcq -120(%rsp), %r12 ## 8-byte Folded Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + adcq -104(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, -104(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq -56(%rsp), %r8 ## 8-byte Reload + adcq $0, %r8 + movq -16(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rcx + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -40(%rsp) ## 8-byte Folded Reload + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, -40(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -48(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rdi + movq %rcx, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rbp + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rax, %rsi + movq %rdx, %r10 + addq %rbp, %r10 + adcq %rdi, %r14 + adcq -48(%rsp), %r13 ## 8-byte Folded Reload + adcq -40(%rsp), %r9 ## 8-byte Folded Reload + movq -32(%rsp), %rcx ## 8-byte Reload + adcq -120(%rsp), %rcx ## 8-byte Folded Reload + movq -24(%rsp), %rax ## 8-byte Reload + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq -16(%rsp), %rdi ## 8-byte Reload + adcq $0, %rdi + addq %r11, %rsi + movq %rsi, -48(%rsp) ## 8-byte Spill + adcq %rbx, %r10 + adcq %r15, %r14 + adcq %r12, %r13 + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -40(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -32(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -24(%rsp) ## 8-byte Spill + adcq %r8, %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + sbbq %rcx, %rcx + movq 40(%rsp), %r8 ## 8-byte Reload + imulq %rsi, %r8 + movq %r8, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, 40(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, 32(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, 24(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, 16(%rsp) ## 8-byte Spill + movq %r8, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r11 + movq %r8, %rax + movq %r8, %r12 + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, %r8 + movq %r12, %rax + movq 72(%rsp), %r12 ## 8-byte Reload + mulq %r12 + andl $1, %ecx + addq %r15, %rax + adcq %r11, %rdx + adcq 16(%rsp), %rbp ## 8-byte Folded Reload + adcq 24(%rsp), %rbx ## 8-byte Folded Reload + adcq 32(%rsp), %rsi ## 8-byte Folded Reload + adcq 40(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %rdi + addq -48(%rsp), %r8 ## 8-byte Folded Reload + adcq %r10, %rax + adcq %r14, %rdx + adcq %r13, %rbp + adcq -40(%rsp), %rbx ## 8-byte Folded Reload + adcq -32(%rsp), %rsi ## 8-byte Folded Reload + adcq -24(%rsp), %r9 ## 8-byte Folded Reload + adcq -16(%rsp), %rdi ## 8-byte Folded Reload + adcq $0, %rcx + movq %rax, %r8 + subq 48(%rsp), %r8 ## 8-byte Folded Reload + movq %rdx, %r10 + sbbq %r12, %r10 + movq %rbp, %r11 + sbbq 56(%rsp), %r11 ## 8-byte Folded Reload + movq %rbx, %r14 + sbbq 64(%rsp), %r14 ## 8-byte Folded Reload + movq %rsi, %r15 + sbbq -8(%rsp), %r15 ## 8-byte Folded Reload + movq %r9, %r12 + sbbq (%rsp), %r12 ## 8-byte Folded Reload + movq %rdi, %r13 + sbbq 8(%rsp), %r13 ## 8-byte Folded Reload + sbbq $0, %rcx + andl $1, %ecx + cmovneq %rdi, %r13 + testb %cl, %cl + cmovneq %rax, %r8 + movq 80(%rsp), %rax ## 8-byte Reload + movq %r8, (%rax) + cmovneq %rdx, %r10 + movq %r10, 8(%rax) + cmovneq %rbp, %r11 + movq %r11, 16(%rax) + cmovneq %rbx, %r14 + movq %r14, 24(%rax) + cmovneq %rsi, %r15 + movq %r15, 32(%rax) + cmovneq %r9, %r12 + movq %r12, 40(%rax) + movq %r13, 48(%rax) + addq $88, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF7L + .p2align 4, 0x90 +_mcl_fp_montNF7L: ## @mcl_fp_montNF7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $80, %rsp + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rdi, 72(%rsp) ## 8-byte Spill + movq 48(%rsi), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq (%rdx), %rbx + mulq %rbx + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rdx, %r12 + movq 40(%rsi), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + mulq %rbx + movq %rax, -24(%rsp) ## 8-byte Spill + movq %rdx, %r8 + movq 32(%rsi), %rax + movq %rax, -64(%rsp) ## 8-byte Spill + mulq %rbx + movq %rax, -32(%rsp) ## 8-byte Spill + movq %rdx, %rbp + movq 24(%rsi), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + mulq %rbx + movq %rax, %r10 + movq %rdx, %r15 + movq 16(%rsi), %rax + movq %rax, (%rsp) ## 8-byte Spill + mulq %rbx + movq %rax, %r9 + movq %rdx, %r14 + movq (%rsi), %rdi + movq %rdi, -8(%rsp) ## 8-byte Spill + movq 8(%rsi), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + mulq %rbx + movq %rdx, %r13 + movq %rax, %r11 + movq %rdi, %rax + mulq %rbx + movq %rdx, %rsi + addq %r11, %rsi + adcq %r9, %r13 + adcq %r10, %r14 + adcq -32(%rsp), %r15 ## 8-byte Folded Reload + adcq -24(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, -128(%rsp) ## 8-byte Spill + adcq -16(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -120(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %r12, -104(%rsp) ## 8-byte Spill + movq -8(%rcx), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + movq %rax, %r10 + movq %rax, %r8 + imulq %rdx, %r10 + movq 48(%rcx), %rdx + movq %rdx, 32(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rdx + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdx, -96(%rsp) ## 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rdx + movq %rax, %r11 + movq %rdx, -72(%rsp) ## 8-byte Spill + movq 32(%rcx), %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rdx + movq %rax, %rbp + movq %rdx, -80(%rsp) ## 8-byte Spill + movq 24(%rcx), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rdx + movq %rax, %r12 + movq %rdx, -88(%rsp) ## 8-byte Spill + movq 16(%rcx), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rdx + movq %rax, %rbx + movq %rdx, 24(%rsp) ## 8-byte Spill + movq (%rcx), %rdi + movq %rdi, 40(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq %r10, %rax + mulq %rcx + movq %rdx, %r9 + movq %rax, %rcx + movq %r10, %rax + mulq %rdi + addq %r8, %rax + adcq %rsi, %rcx + adcq %r13, %rbx + adcq %r14, %r12 + adcq %r15, %rbp + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + movq -112(%rsp), %rdi ## 8-byte Reload + adcq -120(%rsp), %rdi ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rdx, %rcx + adcq %r9, %rbx + adcq 24(%rsp), %r12 ## 8-byte Folded Reload + adcq -88(%rsp), %rbp ## 8-byte Folded Reload + adcq -80(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, -120(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -112(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + movq -40(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rsi + movq %rsi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, 24(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %rdi + movq %rsi, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r11 + movq %rsi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r15 + addq %r11, %r15 + adcq %rdi, %r8 + adcq 24(%rsp), %r9 ## 8-byte Folded Reload + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq -80(%rsp), %r14 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rcx, %r10 + adcq %rbx, %r15 + adcq %r12, %r8 + adcq %rbp, %r9 + adcq -120(%rsp), %r13 ## 8-byte Folded Reload + adcq -112(%rsp), %r14 ## 8-byte Folded Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + movq %r10, %rsi + imulq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, -120(%rsp) ## 8-byte Spill + movq %rsi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %rsi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rsi, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r12 + movq %rsi, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rbp + movq %rsi, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + addq %r10, %rax + adcq %r15, %rbp + adcq %r8, %r12 + adcq %r9, %r11 + adcq %r13, %rbx + movq -120(%rsp), %r8 ## 8-byte Reload + adcq %r14, %r8 + movq -112(%rsp), %rsi ## 8-byte Reload + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rdx, %rbp + adcq %rdi, %r12 + adcq %rcx, %r11 + adcq -88(%rsp), %rbx ## 8-byte Folded Reload + adcq -80(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -120(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -112(%rsp) ## 8-byte Spill + adcq -104(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + movq -40(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdi + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, -104(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, 24(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rdi, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r13 + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %r15 + addq %r13, %r15 + adcq %r14, %rcx + adcq 24(%rsp), %r8 ## 8-byte Folded Reload + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + adcq -80(%rsp), %r9 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq -72(%rsp), %rdx ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbp, %r10 + adcq %r12, %r15 + adcq %r11, %rcx + adcq %rbx, %r8 + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + adcq -112(%rsp), %r9 ## 8-byte Folded Reload + adcq -96(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -104(%rsp) ## 8-byte Spill + movq %r10, %rdi + imulq 16(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %rdi, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbp + movq %rdi, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rbx + movq %rdi, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + addq %r10, %rax + adcq %r15, %rbx + adcq %rcx, %rbp + adcq %r8, %r12 + adcq %rsi, %r11 + movq -112(%rsp), %rcx ## 8-byte Reload + adcq %r9, %rcx + movq -96(%rsp), %rsi ## 8-byte Reload + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + movq -104(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rdx, %rbx + adcq %r14, %rbp + adcq %r13, %r12 + adcq -120(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, -120(%rsp) ## 8-byte Spill + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -96(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + movq -40(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdi + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r13 + movq %rdi, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %r10 + movq %rdx, %rdi + addq %r14, %rdi + adcq %r13, %r8 + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + adcq -72(%rsp), %r15 ## 8-byte Folded Reload + adcq -128(%rsp), %r11 ## 8-byte Folded Reload + adcq $0, %r9 + addq %rbx, %r10 + adcq %rbp, %rdi + adcq %r12, %r8 + adcq -120(%rsp), %rcx ## 8-byte Folded Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -96(%rsp), %r15 ## 8-byte Folded Reload + adcq -104(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, -112(%rsp) ## 8-byte Spill + adcq $0, %r9 + movq %r10, %rbp + imulq 16(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rbp, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %rbp, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r11 + movq %rbp, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %rbx + movq %rbp, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + addq %r10, %rax + adcq %rdi, %rbx + adcq %r8, %r11 + adcq %rcx, %r12 + adcq %rsi, %r14 + movq -104(%rsp), %rcx ## 8-byte Reload + adcq %r15, %rcx + movq -96(%rsp), %rax ## 8-byte Reload + adcq -112(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %r9 + addq %rdx, %rbx + adcq %r13, %r11 + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -80(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, -112(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -104(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + adcq -120(%rsp), %r9 ## 8-byte Folded Reload + movq -40(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdi + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %rbp + movq %rdi, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r14 + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %rdi + movq %rdx, %r13 + addq %r14, %r13 + adcq %rbp, %r8 + adcq -88(%rsp), %rcx ## 8-byte Folded Reload + adcq -80(%rsp), %rsi ## 8-byte Folded Reload + adcq -72(%rsp), %r10 ## 8-byte Folded Reload + movq -120(%rsp), %rax ## 8-byte Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %r15 + addq %rbx, %rdi + adcq %r11, %r13 + adcq %r12, %r8 + adcq -112(%rsp), %rcx ## 8-byte Folded Reload + adcq -104(%rsp), %rsi ## 8-byte Folded Reload + adcq -96(%rsp), %r10 ## 8-byte Folded Reload + adcq %r9, %rax + movq %rax, -120(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %rdi, %rbp + imulq 16(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq %rbp, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, %r12 + movq %rbp, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r14 + movq %rbp, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rbx + movq %rbp, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + addq %rdi, %rax + adcq %r13, %rbx + adcq %r8, %r14 + adcq %rcx, %r12 + adcq %rsi, %r9 + movq -112(%rsp), %rcx ## 8-byte Reload + adcq %r10, %rcx + movq -104(%rsp), %rax ## 8-byte Reload + adcq -120(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %r15 + addq %rdx, %rbx + adcq %r11, %r14 + adcq -88(%rsp), %r12 ## 8-byte Folded Reload + adcq -128(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -128(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -112(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -104(%rsp) ## 8-byte Spill + adcq -96(%rsp), %r15 ## 8-byte Folded Reload + movq -40(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rbp + movq %rbp, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, 24(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %rcx + movq %rbp, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r8 + movq %rax, %r9 + movq %rbp, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %r11 + movq %rdx, %r10 + addq %r9, %r10 + adcq %rcx, %r8 + adcq 24(%rsp), %rdi ## 8-byte Folded Reload + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + adcq -80(%rsp), %r13 ## 8-byte Folded Reload + movq -120(%rsp), %rcx ## 8-byte Reload + adcq -72(%rsp), %rcx ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rbx, %r11 + adcq %r14, %r10 + adcq %r12, %r8 + adcq -128(%rsp), %rdi ## 8-byte Folded Reload + adcq -112(%rsp), %rsi ## 8-byte Folded Reload + adcq -104(%rsp), %r13 ## 8-byte Folded Reload + adcq %r15, %rcx + movq %rcx, -120(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -96(%rsp) ## 8-byte Spill + movq %r11, %rbx + imulq 16(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -72(%rsp) ## 8-byte Spill + movq %rax, -112(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq %rbx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %rbx, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, %rbp + movq %rbx, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rcx + movq %rbx, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + addq %r11, %rax + adcq %r10, %rcx + adcq %r8, %rbp + adcq %rdi, %r15 + adcq %rsi, %r9 + movq -112(%rsp), %rbx ## 8-byte Reload + adcq %r13, %rbx + movq -104(%rsp), %rsi ## 8-byte Reload + adcq -120(%rsp), %rsi ## 8-byte Folded Reload + movq -96(%rsp), %rax ## 8-byte Reload + adcq $0, %rax + addq %rdx, %rcx + adcq %r12, %rbp + adcq %r14, %r15 + adcq -88(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, -120(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -112(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -104(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -96(%rsp) ## 8-byte Spill + movq -40(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdi + movq %rdi, %rax + mulq -48(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -56(%rsp) ## 8-byte Folded Reload + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -64(%rsp) ## 8-byte Folded Reload + movq %rdx, -48(%rsp) ## 8-byte Spill + movq %rax, -64(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r9 + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rbx + movq %rdi, %rax + mulq 64(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %rsi + movq %rdi, %rax + mulq -8(%rsp) ## 8-byte Folded Reload + movq %rax, %r12 + movq %rdx, %r8 + addq %rsi, %r8 + adcq %rbx, %r10 + adcq %r9, %r11 + adcq -64(%rsp), %r13 ## 8-byte Folded Reload + movq -48(%rsp), %rdx ## 8-byte Reload + adcq -56(%rsp), %rdx ## 8-byte Folded Reload + movq -40(%rsp), %rax ## 8-byte Reload + adcq -128(%rsp), %rax ## 8-byte Folded Reload + adcq $0, %r14 + addq %rcx, %r12 + adcq %rbp, %r8 + adcq %r15, %r10 + adcq -120(%rsp), %r11 ## 8-byte Folded Reload + adcq -112(%rsp), %r13 ## 8-byte Folded Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -48(%rsp) ## 8-byte Spill + adcq -96(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -40(%rsp) ## 8-byte Spill + adcq $0, %r14 + movq 16(%rsp), %rdi ## 8-byte Reload + imulq %r12, %rdi + movq %rdi, %rax + mulq 32(%rsp) ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + movq %rax, %r9 + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, -56(%rsp) ## 8-byte Spill + movq %rax, %rbp + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, %rsi + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %rax, %rcx + movq %rdi, %rax + mulq 40(%rsp) ## 8-byte Folded Reload + movq %rdx, (%rsp) ## 8-byte Spill + movq %rax, %r15 + movq %rdi, %rax + mulq 56(%rsp) ## 8-byte Folded Reload + movq %rdx, -8(%rsp) ## 8-byte Spill + movq %rax, %rbx + movq %rdi, %rax + mulq 48(%rsp) ## 8-byte Folded Reload + addq %r12, %r15 + adcq %r8, %rax + adcq %r10, %rbx + adcq %r11, %rcx + adcq %r13, %rsi + adcq -48(%rsp), %rbp ## 8-byte Folded Reload + adcq -40(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %r14 + addq (%rsp), %rax ## 8-byte Folded Reload + adcq %rdx, %rbx + adcq -8(%rsp), %rcx ## 8-byte Folded Reload + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + adcq -64(%rsp), %rbp ## 8-byte Folded Reload + adcq -56(%rsp), %r9 ## 8-byte Folded Reload + adcq 16(%rsp), %r14 ## 8-byte Folded Reload + movq %rax, %r13 + subq 40(%rsp), %r13 ## 8-byte Folded Reload + movq %rbx, %r12 + sbbq 48(%rsp), %r12 ## 8-byte Folded Reload + movq %rcx, %r8 + sbbq 56(%rsp), %r8 ## 8-byte Folded Reload + movq %rsi, %r10 + sbbq -32(%rsp), %r10 ## 8-byte Folded Reload + movq %rbp, %r11 + sbbq -24(%rsp), %r11 ## 8-byte Folded Reload + movq %r9, %r15 + sbbq -16(%rsp), %r15 ## 8-byte Folded Reload + movq %r14, %rdx + sbbq 32(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, %rdi + sarq $63, %rdi + cmovsq %rax, %r13 + movq 72(%rsp), %rax ## 8-byte Reload + movq %r13, (%rax) + cmovsq %rbx, %r12 + movq %r12, 8(%rax) + cmovsq %rcx, %r8 + movq %r8, 16(%rax) + cmovsq %rsi, %r10 + movq %r10, 24(%rax) + cmovsq %rbp, %r11 + movq %r11, 32(%rax) + cmovsq %r9, %r15 + movq %r15, 40(%rax) + cmovsq %r14, %rdx + movq %rdx, 48(%rax) + addq $80, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed7L + .p2align 4, 0x90 +_mcl_fp_montRed7L: ## @mcl_fp_montRed7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $72, %rsp + movq %rdx, %rcx + movq %rdi, 64(%rsp) ## 8-byte Spill + movq -8(%rcx), %rax + movq %rax, -56(%rsp) ## 8-byte Spill + movq (%rsi), %rbp + movq %rbp, -48(%rsp) ## 8-byte Spill + imulq %rax, %rbp + movq 48(%rcx), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 40(%rcx), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rdx, %r15 + movq 32(%rcx), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r14 + movq %rdx, %r11 + movq 24(%rcx), %rdx + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r13 + movq %rdx, %r10 + movq 16(%rcx), %rdx + movq %rdx, -16(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rdx + movq %rax, %r9 + movq %rdx, %r12 + movq (%rcx), %rdi + movq %rdi, 24(%rsp) ## 8-byte Spill + movq 8(%rcx), %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %rcx + movq %rdx, %rcx + movq %rax, %rbx + movq %rbp, %rax + mulq %rdi + movq %rdx, %r8 + addq %rbx, %r8 + adcq %r9, %rcx + adcq %r13, %r12 + adcq %r14, %r10 + adcq -72(%rsp), %r11 ## 8-byte Folded Reload + adcq -104(%rsp), %r15 ## 8-byte Folded Reload + movq -128(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq -48(%rsp), %rax ## 8-byte Folded Reload + adcq 8(%rsi), %r8 + adcq 16(%rsi), %rcx + adcq 24(%rsi), %r12 + adcq 32(%rsi), %r10 + movq %r10, 40(%rsp) ## 8-byte Spill + adcq 40(%rsi), %r11 + movq %r11, -40(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r15 + movq %r15, -96(%rsp) ## 8-byte Spill + adcq 56(%rsi), %rdx + movq %rdx, -128(%rsp) ## 8-byte Spill + movq 104(%rsi), %rax + movq 96(%rsi), %rdx + movq 88(%rsi), %rdi + movq 80(%rsi), %rbp + movq 72(%rsi), %rbx + movq 64(%rsi), %r9 + adcq $0, %r9 + adcq $0, %rbx + movq %rbx, -8(%rsp) ## 8-byte Spill + adcq $0, %rbp + movq %rbp, -80(%rsp) ## 8-byte Spill + adcq $0, %rdi + movq %rdi, -64(%rsp) ## 8-byte Spill + adcq $0, %rdx + movq %rdx, -72(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, -104(%rsp) ## 8-byte Spill + sbbq %rax, %rax + andl $1, %eax + movq %rax, -48(%rsp) ## 8-byte Spill + movq %r8, %rdi + imulq -56(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, 32(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, 48(%rsp) ## 8-byte Spill + movq %rdi, %rax + movq 16(%rsp), %r13 ## 8-byte Reload + mulq %r13 + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, 56(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r10 + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %rsi + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %r15 + movq %rdi, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r14 + addq %r15, %r14 + adcq %rsi, %r11 + adcq %r10, %rbp + adcq 56(%rsp), %rbx ## 8-byte Folded Reload + movq -88(%rsp), %rdi ## 8-byte Reload + adcq 48(%rsp), %rdi ## 8-byte Folded Reload + movq -120(%rsp), %rsi ## 8-byte Reload + adcq 32(%rsp), %rsi ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r8, %rax + adcq %rcx, %r14 + adcq %r12, %r11 + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + adcq -40(%rsp), %rbx ## 8-byte Folded Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -88(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, -120(%rsp) ## 8-byte Spill + adcq %r9, %rdx + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq $0, -8(%rsp) ## 8-byte Folded Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -72(%rsp) ## 8-byte Folded Spill + adcq $0, -104(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + movq %r14, %rcx + imulq -56(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, 40(%rsp) ## 8-byte Spill + movq %rcx, %rax + movq 8(%rsp), %r15 ## 8-byte Reload + mulq %r15 + movq %rdx, -96(%rsp) ## 8-byte Spill + movq %rax, 32(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq %r13 + movq %rdx, -40(%rsp) ## 8-byte Spill + movq %rax, 48(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r8 + movq %rcx, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + movq %rax, %r12 + movq %rcx, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, %r13 + movq %rcx, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + addq %r13, %r10 + adcq %r12, %r9 + adcq %r8, %rdi + adcq 48(%rsp), %rsi ## 8-byte Folded Reload + movq -40(%rsp), %r8 ## 8-byte Reload + adcq 32(%rsp), %r8 ## 8-byte Folded Reload + movq -96(%rsp), %rdx ## 8-byte Reload + adcq 40(%rsp), %rdx ## 8-byte Folded Reload + movq -128(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq %r14, %rax + adcq %r11, %r10 + adcq %rbp, %r9 + adcq %rbx, %rdi + adcq -88(%rsp), %rsi ## 8-byte Folded Reload + adcq -120(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, -40(%rsp) ## 8-byte Spill + adcq -112(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -96(%rsp) ## 8-byte Spill + adcq -8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -128(%rsp) ## 8-byte Spill + adcq $0, -80(%rsp) ## 8-byte Folded Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -72(%rsp) ## 8-byte Folded Spill + adcq $0, -104(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + movq %r10, %rbp + imulq -56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, %rax + movq (%rsp), %r8 ## 8-byte Reload + mulq %r8 + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq %r15 + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, 40(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -88(%rsp) ## 8-byte Spill + movq %rax, 32(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %r13 + movq %rbp, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, %r14 + movq %rbp, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, %r15 + movq %rbp, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + addq %r15, %r11 + adcq %r14, %rbx + adcq %r13, %rcx + adcq 32(%rsp), %r12 ## 8-byte Folded Reload + movq -88(%rsp), %r14 ## 8-byte Reload + adcq 40(%rsp), %r14 ## 8-byte Folded Reload + movq -120(%rsp), %rbp ## 8-byte Reload + adcq -8(%rsp), %rbp ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r10, %rax + adcq %r9, %r11 + adcq %rdi, %rbx + adcq %rsi, %rcx + adcq -40(%rsp), %r12 ## 8-byte Folded Reload + adcq -96(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, -88(%rsp) ## 8-byte Spill + adcq -128(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, -120(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq $0, -64(%rsp) ## 8-byte Folded Spill + adcq $0, -72(%rsp) ## 8-byte Folded Spill + adcq $0, -104(%rsp) ## 8-byte Folded Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + movq %r11, %rdi + imulq -56(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq %r8 + movq %rdx, -80(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -128(%rsp) ## 8-byte Spill + movq %rax, -40(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r13 + movq %rax, %r14 + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r10 + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r8 + movq %rdi, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + addq %r8, %r9 + adcq %r10, %rbp + adcq %r14, %rsi + adcq -8(%rsp), %r13 ## 8-byte Folded Reload + adcq -40(%rsp), %r15 ## 8-byte Folded Reload + movq -128(%rsp), %rdi ## 8-byte Reload + adcq -96(%rsp), %rdi ## 8-byte Folded Reload + movq -80(%rsp), %rdx ## 8-byte Reload + adcq $0, %rdx + addq %r11, %rax + adcq %rbx, %r9 + adcq %rcx, %rbp + adcq %r12, %rsi + adcq -88(%rsp), %r13 ## 8-byte Folded Reload + adcq -120(%rsp), %r15 ## 8-byte Folded Reload + adcq -112(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, -128(%rsp) ## 8-byte Spill + adcq -64(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -80(%rsp) ## 8-byte Spill + adcq $0, -72(%rsp) ## 8-byte Folded Spill + movq -104(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + adcq $0, -48(%rsp) ## 8-byte Folded Spill + movq %r9, %rdi + imulq -56(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, -64(%rsp) ## 8-byte Spill + movq %rax, -104(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + movq %rax, -88(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, -120(%rsp) ## 8-byte Spill + movq %rax, -96(%rsp) ## 8-byte Spill + movq %rdi, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %r12 + movq %rax, %rbx + movq %rdi, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r11 + movq %rax, %rcx + movq %rdi, %rax + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %r10 + movq %rax, %r8 + movq %rdi, %rax + mulq 24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rdi + addq %r8, %rdi + adcq %rcx, %r10 + adcq %rbx, %r11 + adcq -96(%rsp), %r12 ## 8-byte Folded Reload + movq -120(%rsp), %rbx ## 8-byte Reload + adcq -88(%rsp), %rbx ## 8-byte Folded Reload + movq -112(%rsp), %rdx ## 8-byte Reload + adcq -104(%rsp), %rdx ## 8-byte Folded Reload + movq -64(%rsp), %rcx ## 8-byte Reload + adcq $0, %rcx + addq %r9, %rax + adcq %rbp, %rdi + adcq %rsi, %r10 + adcq %r13, %r11 + adcq %r15, %r12 + adcq -128(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -120(%rsp) ## 8-byte Spill + adcq -80(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -112(%rsp) ## 8-byte Spill + adcq -72(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -64(%rsp) ## 8-byte Spill + adcq $0, %r14 + movq %r14, -104(%rsp) ## 8-byte Spill + adcq $0, -48(%rsp) ## 8-byte Folded Spill + movq -56(%rsp), %rbp ## 8-byte Reload + imulq %rdi, %rbp + movq %rbp, %rax + mulq (%rsp) ## 8-byte Folded Reload + movq %rdx, %rcx + movq %rax, -56(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq 8(%rsp) ## 8-byte Folded Reload + movq %rdx, %r9 + movq %rax, -72(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq 16(%rsp) ## 8-byte Folded Reload + movq %rdx, %r15 + movq %rax, -80(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -32(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbx + movq %rax, -128(%rsp) ## 8-byte Spill + movq %rbp, %rax + mulq -16(%rsp) ## 8-byte Folded Reload + movq %rdx, %rsi + movq %rax, %r13 + movq %rbp, %rax + movq %rbp, %r14 + mulq -24(%rsp) ## 8-byte Folded Reload + movq %rdx, %rbp + movq %rax, %r8 + movq %r14, %rax + movq 24(%rsp), %r14 ## 8-byte Reload + mulq %r14 + addq %r8, %rdx + adcq %r13, %rbp + adcq -128(%rsp), %rsi ## 8-byte Folded Reload + adcq -80(%rsp), %rbx ## 8-byte Folded Reload + adcq -72(%rsp), %r15 ## 8-byte Folded Reload + adcq -56(%rsp), %r9 ## 8-byte Folded Reload + adcq $0, %rcx + addq %rdi, %rax + adcq %r10, %rdx + adcq %r11, %rbp + adcq %r12, %rsi + adcq -120(%rsp), %rbx ## 8-byte Folded Reload + adcq -112(%rsp), %r15 ## 8-byte Folded Reload + adcq -64(%rsp), %r9 ## 8-byte Folded Reload + adcq -104(%rsp), %rcx ## 8-byte Folded Reload + movq -48(%rsp), %rdi ## 8-byte Reload + adcq $0, %rdi + movq %rdx, %rax + subq %r14, %rax + movq %rbp, %r13 + sbbq -24(%rsp), %r13 ## 8-byte Folded Reload + movq %rsi, %r8 + sbbq -16(%rsp), %r8 ## 8-byte Folded Reload + movq %rbx, %r10 + sbbq -32(%rsp), %r10 ## 8-byte Folded Reload + movq %r15, %r11 + sbbq 16(%rsp), %r11 ## 8-byte Folded Reload + movq %r9, %r14 + sbbq 8(%rsp), %r14 ## 8-byte Folded Reload + movq %rcx, %r12 + sbbq (%rsp), %r12 ## 8-byte Folded Reload + sbbq $0, %rdi + andl $1, %edi + cmovneq %rcx, %r12 + testb %dil, %dil + cmovneq %rdx, %rax + movq 64(%rsp), %rcx ## 8-byte Reload + movq %rax, (%rcx) + cmovneq %rbp, %r13 + movq %r13, 8(%rcx) + cmovneq %rsi, %r8 + movq %r8, 16(%rcx) + cmovneq %rbx, %r10 + movq %r10, 24(%rcx) + cmovneq %r15, %r11 + movq %r11, 32(%rcx) + cmovneq %r9, %r14 + movq %r14, 40(%rcx) + movq %r12, 48(%rcx) + addq $72, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre7L + .p2align 4, 0x90 +_mcl_fp_addPre7L: ## @mcl_fp_addPre7L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r14 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r12 + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %r12, 16(%rdi) + adcq %r11, %rax + movq %rax, 24(%rdi) + adcq %r10, %rbx + movq %rbx, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subPre7L + .p2align 4, 0x90 +_mcl_fp_subPre7L: ## @mcl_fp_subPre7L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r8 + movq 48(%rsi), %r10 + movq 40(%rdx), %r9 + movq 40(%rsi), %r15 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 32(%rsi), %rdx + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rdx + movq %rdx, 32(%rdi) + sbbq %r9, %r15 + movq %r15, 40(%rdi) + sbbq %r8, %r10 + movq %r10, 48(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_shr1_7L + .p2align 4, 0x90 +_mcl_fp_shr1_7L: ## @mcl_fp_shr1_7L +## BB#0: + movq 48(%rsi), %r8 + movq 40(%rsi), %r9 + movq 32(%rsi), %r10 + movq 24(%rsi), %rax + movq 16(%rsi), %rcx + movq (%rsi), %rdx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rdx + movq %rdx, (%rdi) + shrdq $1, %rcx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rax, %rcx + movq %rcx, 16(%rdi) + shrdq $1, %r10, %rax + movq %rax, 24(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 32(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 40(%rdi) + shrq %r8 + movq %r8, 48(%rdi) + retq + + .globl _mcl_fp_add7L + .p2align 4, 0x90 +_mcl_fp_add7L: ## @mcl_fp_add7L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq 24(%rdx), %r13 + movq 16(%rdx), %r10 + movq (%rdx), %r11 + movq 8(%rdx), %rdx + addq (%rsi), %r11 + adcq 8(%rsi), %rdx + movq 24(%rsi), %rax + movq 32(%rsi), %rbx + adcq 16(%rsi), %r10 + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + adcq %r13, %rax + movq %rax, 24(%rdi) + adcq %r12, %rbx + movq %rbx, 32(%rdi) + adcq %r15, %r9 + movq %r9, 40(%rdi) + adcq %r14, %r8 + movq %r8, 48(%rdi) + sbbq %rsi, %rsi + andl $1, %esi + subq (%rcx), %r11 + sbbq 8(%rcx), %rdx + sbbq 16(%rcx), %r10 + sbbq 24(%rcx), %rax + sbbq 32(%rcx), %rbx + sbbq 40(%rcx), %r9 + sbbq 48(%rcx), %r8 + sbbq $0, %rsi + testb $1, %sil + jne LBB104_2 +## BB#1: ## %nocarry + movq %r11, (%rdi) + movq %rdx, 8(%rdi) + movq %r10, 16(%rdi) + movq %rax, 24(%rdi) + movq %rbx, 32(%rdi) + movq %r9, 40(%rdi) + movq %r8, 48(%rdi) +LBB104_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF7L + .p2align 4, 0x90 +_mcl_fp_addNF7L: ## @mcl_fp_addNF7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r9 + movq 40(%rdx), %rbp + movq 32(%rdx), %r10 + movq 24(%rdx), %r11 + movq 16(%rdx), %r14 + movq (%rdx), %r12 + movq 8(%rdx), %r15 + addq (%rsi), %r12 + adcq 8(%rsi), %r15 + adcq 16(%rsi), %r14 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %r10 + adcq 40(%rsi), %rbp + movq %rbp, -8(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r9 + movq %r12, %rsi + subq (%rcx), %rsi + movq %r15, %rdx + sbbq 8(%rcx), %rdx + movq %r14, %rax + sbbq 16(%rcx), %rax + movq %r11, %rbx + sbbq 24(%rcx), %rbx + movq %r10, %r13 + sbbq 32(%rcx), %r13 + sbbq 40(%rcx), %rbp + movq %r9, %r8 + sbbq 48(%rcx), %r8 + movq %r8, %rcx + sarq $63, %rcx + cmovsq %r12, %rsi + movq %rsi, (%rdi) + cmovsq %r15, %rdx + movq %rdx, 8(%rdi) + cmovsq %r14, %rax + movq %rax, 16(%rdi) + cmovsq %r11, %rbx + movq %rbx, 24(%rdi) + cmovsq %r10, %r13 + movq %r13, 32(%rdi) + cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 40(%rdi) + cmovsq %r9, %r8 + movq %r8, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub7L + .p2align 4, 0x90 +_mcl_fp_sub7L: ## @mcl_fp_sub7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 48(%rdx), %r14 + movq 48(%rsi), %r8 + movq 40(%rdx), %r15 + movq 40(%rsi), %r9 + movq 32(%rdx), %r12 + movq (%rsi), %rax + movq 8(%rsi), %r11 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r11 + movq 16(%rsi), %r13 + sbbq 16(%rdx), %r13 + movq 32(%rsi), %r10 + movq 24(%rsi), %rsi + sbbq 24(%rdx), %rsi + movq %rax, (%rdi) + movq %r11, 8(%rdi) + movq %r13, 16(%rdi) + movq %rsi, 24(%rdi) + sbbq %r12, %r10 + movq %r10, 32(%rdi) + sbbq %r15, %r9 + movq %r9, 40(%rdi) + sbbq %r14, %r8 + movq %r8, 48(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB106_2 +## BB#1: ## %carry + movq 48(%rcx), %r14 + movq 40(%rcx), %r15 + movq 32(%rcx), %r12 + movq 24(%rcx), %rbx + movq 8(%rcx), %rdx + movq 16(%rcx), %rbp + addq (%rcx), %rax + movq %rax, (%rdi) + adcq %r11, %rdx + movq %rdx, 8(%rdi) + adcq %r13, %rbp + movq %rbp, 16(%rdi) + adcq %rsi, %rbx + movq %rbx, 24(%rdi) + adcq %r10, %r12 + movq %r12, 32(%rdi) + adcq %r9, %r15 + movq %r15, 40(%rdi) + adcq %r8, %r14 + movq %r14, 48(%rdi) +LBB106_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_subNF7L + .p2align 4, 0x90 +_mcl_fp_subNF7L: ## @mcl_fp_subNF7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 48(%rsi), %r11 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %r14 + movdqu (%rsi), %xmm3 + movdqu 16(%rsi), %xmm4 + movdqu 32(%rsi), %xmm5 + pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] + movd %xmm6, %rcx + movd %xmm2, %r15 + movd %xmm5, %r9 + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r12 + pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] + movd %xmm2, %r10 + movd %xmm1, %r13 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %rax + pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] + movd %xmm0, %rbx + movd %xmm3, %rsi + subq %rbx, %rsi + movd %xmm1, %rbx + sbbq %rax, %rbx + movd %xmm4, %rbp + sbbq %r13, %rbp + sbbq %r12, %r10 + sbbq %r15, %r9 + sbbq %r14, %rcx + movq %rcx, -8(%rsp) ## 8-byte Spill + sbbq 48(%rdx), %r11 + movq %r11, %rax + sarq $63, %rax + movq %rax, %rdx + shldq $1, %r11, %rdx + andq (%r8), %rdx + movq 48(%r8), %r14 + andq %rax, %r14 + movq 40(%r8), %r15 + andq %rax, %r15 + movq 32(%r8), %r12 + andq %rax, %r12 + movq 24(%r8), %r13 + andq %rax, %r13 + movq 16(%r8), %rcx + andq %rax, %rcx + andq 8(%r8), %rax + addq %rsi, %rdx + adcq %rbx, %rax + movq %rdx, (%rdi) + movq %rax, 8(%rdi) + adcq %rbp, %rcx + movq %rcx, 16(%rdi) + adcq %r10, %r13 + movq %r13, 24(%rdi) + adcq %r9, %r12 + movq %r12, 32(%rdi) + adcq -8(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 40(%rdi) + adcq %r11, %r14 + movq %r14, 48(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add7L + .p2align 4, 0x90 +_mcl_fpDbl_add7L: ## @mcl_fpDbl_add7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 96(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 88(%rdx), %r11 + movq 80(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r12 + movq 16(%rdx), %r9 + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %r9 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r12 + movq 72(%rdx), %r13 + movq 64(%rdx), %rbp + movq %rax, (%rdi) + movq 56(%rdx), %r10 + movq %rbx, 8(%rdi) + movq 48(%rdx), %rcx + movq 40(%rdx), %rdx + movq %r9, 16(%rdi) + movq 104(%rsi), %r9 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %rdx, %rbx + movq 96(%rsi), %r15 + movq %r12, 32(%rdi) + movq 48(%rsi), %rdx + adcq %rcx, %rdx + movq 88(%rsi), %rax + movq %rbx, 40(%rdi) + movq 56(%rsi), %rcx + adcq %r10, %rcx + movq 80(%rsi), %r12 + movq %rdx, 48(%rdi) + movq 72(%rsi), %rdx + movq 64(%rsi), %rsi + adcq %rbp, %rsi + adcq %r13, %rdx + adcq %r14, %r12 + adcq %r11, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, -24(%rsp) ## 8-byte Spill + adcq -8(%rsp), %r9 ## 8-byte Folded Reload + sbbq %rbp, %rbp + andl $1, %ebp + movq %rcx, %rbx + subq (%r8), %rbx + movq %rsi, %r10 + sbbq 8(%r8), %r10 + movq %rdx, %r11 + sbbq 16(%r8), %r11 + movq %r12, %r14 + sbbq 24(%r8), %r14 + movq -16(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r8), %r13 + sbbq 40(%r8), %r15 + movq %r9, %rax + sbbq 48(%r8), %rax + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rcx, %rbx + movq %rbx, 56(%rdi) + testb %bpl, %bpl + cmovneq %rsi, %r10 + movq %r10, 64(%rdi) + cmovneq %rdx, %r11 + movq %r11, 72(%rdi) + cmovneq %r12, %r14 + movq %r14, 80(%rdi) + cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 88(%rdi) + cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload + movq %r15, 96(%rdi) + cmovneq %r9, %rax + movq %rax, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub7L + .p2align 4, 0x90 +_mcl_fpDbl_sub7L: ## @mcl_fpDbl_sub7L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 104(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 96(%rdx), %r10 + movq 88(%rdx), %r14 + movq 16(%rsi), %rax + movq (%rsi), %r15 + movq 8(%rsi), %r11 + xorl %ecx, %ecx + subq (%rdx), %r15 + sbbq 8(%rdx), %r11 + sbbq 16(%rdx), %rax + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 80(%rdx), %r13 + movq 72(%rdx), %rbp + movq %r15, (%rdi) + movq 64(%rdx), %r9 + movq %r11, 8(%rdi) + movq 56(%rdx), %r15 + movq %rax, 16(%rdi) + movq 48(%rdx), %r11 + movq 40(%rdx), %rdx + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %rdx, %rbx + movq 104(%rsi), %rax + movq %r12, 32(%rdi) + movq 48(%rsi), %r12 + sbbq %r11, %r12 + movq 96(%rsi), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r15, %rdx + movq 88(%rsi), %r15 + movq %r12, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r9, %rbx + movq 80(%rsi), %r12 + movq 72(%rsi), %r9 + sbbq %rbp, %r9 + sbbq %r13, %r12 + sbbq %r14, %r15 + sbbq %r10, %r11 + sbbq -8(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -8(%rsp) ## 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r8), %r10 + cmoveq %rcx, %r10 + testb %bpl, %bpl + movq 16(%r8), %rbp + cmoveq %rcx, %rbp + movq 8(%r8), %rsi + cmoveq %rcx, %rsi + movq 48(%r8), %r14 + cmoveq %rcx, %r14 + movq 40(%r8), %r13 + cmoveq %rcx, %r13 + movq 32(%r8), %rax + cmoveq %rcx, %rax + cmovneq 24(%r8), %rcx + addq %rdx, %r10 + adcq %rbx, %rsi + movq %r10, 56(%rdi) + movq %rsi, 64(%rdi) + adcq %r9, %rbp + movq %rbp, 72(%rdi) + adcq %r12, %rcx + movq %rcx, 80(%rdi) + adcq %r15, %rax + movq %rax, 88(%rdi) + adcq %r11, %r13 + movq %r13, 96(%rdi) + adcq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 104(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .p2align 4, 0x90 +l_mulPv512x64: ## @mulPv512x64 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rcx + movq %rcx, %rax + mulq (%rsi) + movq %rdx, -24(%rsp) ## 8-byte Spill + movq %rax, (%rdi) + movq %rcx, %rax + mulq 56(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 48(%rsi) + movq %rdx, %r11 + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rcx, %rax + mulq 40(%rsi) + movq %rdx, %r12 + movq %rax, %r15 + movq %rcx, %rax + mulq 32(%rsi) + movq %rdx, %rbx + movq %rax, %r13 + movq %rcx, %rax + mulq 24(%rsi) + movq %rdx, %rbp + movq %rax, %r8 + movq %rcx, %rax + mulq 16(%rsi) + movq %rdx, %r9 + movq %rax, %r14 + movq %rcx, %rax + mulq 8(%rsi) + addq -24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 8(%rdi) + adcq %r14, %rdx + movq %rdx, 16(%rdi) + adcq %r8, %r9 + movq %r9, 24(%rdi) + adcq %r13, %rbp + movq %rbp, 32(%rdi) + adcq %r15, %rbx + movq %rbx, 40(%rdi) + adcq -16(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 48(%rdi) + adcq -8(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 56(%rdi) + adcq $0, %r10 + movq %r10, 64(%rdi) + movq %rdi, %rax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre8L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre8L: ## @mcl_fp_mulUnitPre8L +## BB#0: + pushq %rbx + subq $80, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq l_mulPv512x64 + movq 72(%rsp), %r8 + movq 64(%rsp), %r9 + movq 56(%rsp), %r10 + movq 48(%rsp), %r11 + movq 40(%rsp), %rdi + movq 32(%rsp), %rax + movq 24(%rsp), %rcx + movq 8(%rsp), %rdx + movq 16(%rsp), %rsi + movq %rdx, (%rbx) + movq %rsi, 8(%rbx) + movq %rcx, 16(%rbx) + movq %rax, 24(%rbx) + movq %rdi, 32(%rbx) + movq %r11, 40(%rbx) + movq %r10, 48(%rbx) + movq %r9, 56(%rbx) + movq %r8, 64(%rbx) + addq $80, %rsp + popq %rbx + retq + + .globl _mcl_fpDbl_mulPre8L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre8L: ## @mcl_fpDbl_mulPre8L +## BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rdx, %r15 + movq %rsi, %rbx + movq %rdi, %r14 + callq _mcl_fpDbl_mulPre4L + leaq 64(%r14), %rdi + leaq 32(%rbx), %rsi + leaq 32(%r15), %rdx + callq _mcl_fpDbl_mulPre4L + movq 56(%rbx), %r10 + movq 48(%rbx), %rdx + movq (%rbx), %rsi + movq 8(%rbx), %rdi + addq 32(%rbx), %rsi + adcq 40(%rbx), %rdi + adcq 16(%rbx), %rdx + adcq 24(%rbx), %r10 + pushfq + popq %r8 + xorl %r9d, %r9d + movq 56(%r15), %rcx + movq 48(%r15), %r13 + movq (%r15), %r12 + movq 8(%r15), %rbx + addq 32(%r15), %r12 + adcq 40(%r15), %rbx + adcq 16(%r15), %r13 + adcq 24(%r15), %rcx + movl $0, %eax + cmovbq %r10, %rax + movq %rax, -88(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rdx, %rax + movq %rax, -80(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rdi, %rax + movq %rax, -72(%rbp) ## 8-byte Spill + movl $0, %eax + cmovbq %rsi, %rax + movq %rax, -64(%rbp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rsi, -168(%rbp) + movq %rdi, -160(%rbp) + movq %rdx, -152(%rbp) + movq %r10, -144(%rbp) + movq %r12, -136(%rbp) + movq %rbx, -128(%rbp) + movq %r13, -120(%rbp) + movq %rcx, -112(%rbp) + pushq %r8 + popfq + cmovaeq %r9, %rcx + movq %rcx, -48(%rbp) ## 8-byte Spill + cmovaeq %r9, %r13 + cmovaeq %r9, %rbx + cmovaeq %r9, %r12 + sbbq %rax, %rax + movq %rax, -56(%rbp) ## 8-byte Spill + leaq -232(%rbp), %rdi + leaq -168(%rbp), %rsi + leaq -136(%rbp), %rdx + callq _mcl_fpDbl_mulPre4L + addq -64(%rbp), %r12 ## 8-byte Folded Reload + adcq -72(%rbp), %rbx ## 8-byte Folded Reload + adcq -80(%rbp), %r13 ## 8-byte Folded Reload + movq -48(%rbp), %r10 ## 8-byte Reload + adcq -88(%rbp), %r10 ## 8-byte Folded Reload + sbbq %rax, %rax + andl $1, %eax + movq -56(%rbp), %rdx ## 8-byte Reload + andl %edx, %r15d + andl $1, %r15d + addq -200(%rbp), %r12 + adcq -192(%rbp), %rbx + adcq -184(%rbp), %r13 + adcq -176(%rbp), %r10 + adcq %rax, %r15 + movq -208(%rbp), %rax + movq -216(%rbp), %rcx + movq -232(%rbp), %rsi + movq -224(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %rdi + movq %rdi, -80(%rbp) ## 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -88(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + sbbq %r8, %rbx + movq 48(%r14), %rdi + movq %rdi, -72(%rbp) ## 8-byte Spill + sbbq %rdi, %r13 + movq 56(%r14), %rdi + movq %rdi, -64(%rbp) ## 8-byte Spill + sbbq %rdi, %r10 + sbbq $0, %r15 + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -56(%rbp) ## 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -48(%rbp) ## 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -104(%rbp) ## 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -96(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + movq 104(%r14), %rdi + sbbq %rdi, %rbx + movq 112(%r14), %r8 + sbbq %r8, %r13 + movq 120(%r14), %r9 + sbbq %r9, %r10 + sbbq $0, %r15 + addq -80(%rbp), %rsi ## 8-byte Folded Reload + adcq -88(%rbp), %rdx ## 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -72(%rbp), %rcx ## 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -64(%rbp), %rax ## 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r12 + movq %rax, 56(%r14) + movq %r12, 64(%r14) + adcq -56(%rbp), %rbx ## 8-byte Folded Reload + movq %rbx, 72(%r14) + adcq -48(%rbp), %r13 ## 8-byte Folded Reload + movq %r13, 80(%r14) + adcq -104(%rbp), %r10 ## 8-byte Folded Reload + movq %r10, 88(%r14) + adcq -96(%rbp), %r15 ## 8-byte Folded Reload + movq %r15, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre8L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre8L: ## @mcl_fpDbl_sqrPre8L +## BB#0: + pushq %rbp + movq %rsp, %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $200, %rsp + movq %rsi, %rbx + movq %rdi, %r14 + movq %rbx, %rdx + callq _mcl_fpDbl_mulPre4L + leaq 64(%r14), %rdi + leaq 32(%rbx), %rsi + movq %rsi, %rdx + callq _mcl_fpDbl_mulPre4L + movq 56(%rbx), %r15 + movq 48(%rbx), %rax + movq (%rbx), %rcx + movq 8(%rbx), %rdx + addq 32(%rbx), %rcx + adcq 40(%rbx), %rdx + adcq 16(%rbx), %rax + adcq 24(%rbx), %r15 + pushfq + popq %r8 + pushfq + popq %r9 + pushfq + popq %r10 + pushfq + popq %rdi + pushfq + popq %rbx + sbbq %rsi, %rsi + movq %rsi, -56(%rbp) ## 8-byte Spill + leaq (%rcx,%rcx), %rsi + xorl %r11d, %r11d + pushq %rbx + popfq + cmovaeq %r11, %rsi + movq %rsi, -48(%rbp) ## 8-byte Spill + movq %rdx, %r13 + shldq $1, %rcx, %r13 + pushq %rdi + popfq + cmovaeq %r11, %r13 + movq %rax, %r12 + shldq $1, %rdx, %r12 + pushq %r10 + popfq + cmovaeq %r11, %r12 + movq %r15, %rbx + movq %rcx, -168(%rbp) + movq %rdx, -160(%rbp) + movq %rax, -152(%rbp) + movq %r15, -144(%rbp) + movq %rcx, -136(%rbp) + movq %rdx, -128(%rbp) + movq %rax, -120(%rbp) + movq %r15, -112(%rbp) + shldq $1, %rax, %r15 + pushq %r9 + popfq + cmovaeq %r11, %r15 + shrq $63, %rbx + pushq %r8 + popfq + cmovaeq %r11, %rbx + leaq -232(%rbp), %rdi + leaq -168(%rbp), %rsi + leaq -136(%rbp), %rdx + callq _mcl_fpDbl_mulPre4L + movq -56(%rbp), %rax ## 8-byte Reload + andl $1, %eax + movq -48(%rbp), %r10 ## 8-byte Reload + addq -200(%rbp), %r10 + adcq -192(%rbp), %r13 + adcq -184(%rbp), %r12 + adcq -176(%rbp), %r15 + adcq %rbx, %rax + movq %rax, %rbx + movq -208(%rbp), %rax + movq -216(%rbp), %rcx + movq -232(%rbp), %rsi + movq -224(%rbp), %rdx + subq (%r14), %rsi + sbbq 8(%r14), %rdx + sbbq 16(%r14), %rcx + sbbq 24(%r14), %rax + movq 32(%r14), %r9 + movq %r9, -56(%rbp) ## 8-byte Spill + movq 40(%r14), %r8 + movq %r8, -48(%rbp) ## 8-byte Spill + sbbq %r9, %r10 + sbbq %r8, %r13 + movq 48(%r14), %rdi + movq %rdi, -104(%rbp) ## 8-byte Spill + sbbq %rdi, %r12 + movq 56(%r14), %rdi + movq %rdi, -96(%rbp) ## 8-byte Spill + sbbq %rdi, %r15 + sbbq $0, %rbx + movq 64(%r14), %r11 + subq %r11, %rsi + movq 72(%r14), %rdi + movq %rdi, -88(%rbp) ## 8-byte Spill + sbbq %rdi, %rdx + movq 80(%r14), %rdi + movq %rdi, -80(%rbp) ## 8-byte Spill + sbbq %rdi, %rcx + movq 88(%r14), %rdi + movq %rdi, -72(%rbp) ## 8-byte Spill + sbbq %rdi, %rax + movq 96(%r14), %rdi + movq %rdi, -64(%rbp) ## 8-byte Spill + sbbq %rdi, %r10 + movq 104(%r14), %rdi + sbbq %rdi, %r13 + movq 112(%r14), %r8 + sbbq %r8, %r12 + movq 120(%r14), %r9 + sbbq %r9, %r15 + sbbq $0, %rbx + addq -56(%rbp), %rsi ## 8-byte Folded Reload + adcq -48(%rbp), %rdx ## 8-byte Folded Reload + movq %rsi, 32(%r14) + adcq -104(%rbp), %rcx ## 8-byte Folded Reload + movq %rdx, 40(%r14) + adcq -96(%rbp), %rax ## 8-byte Folded Reload + movq %rcx, 48(%r14) + adcq %r11, %r10 + movq %rax, 56(%r14) + movq %r10, 64(%r14) + adcq -88(%rbp), %r13 ## 8-byte Folded Reload + movq %r13, 72(%r14) + adcq -80(%rbp), %r12 ## 8-byte Folded Reload + movq %r12, 80(%r14) + adcq -72(%rbp), %r15 ## 8-byte Folded Reload + movq %r15, 88(%r14) + movq %rbx, %rax + adcq -64(%rbp), %rax ## 8-byte Folded Reload + movq %rax, 96(%r14) + adcq $0, %rdi + movq %rdi, 104(%r14) + adcq $0, %r8 + movq %r8, 112(%r14) + adcq $0, %r9 + movq %r9, 120(%r14) + addq $200, %rsp + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont8L + .p2align 4, 0x90 +_mcl_fp_mont8L: ## @mcl_fp_mont8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1256, %rsp ## imm = 0x4E8 + movq %rcx, %r13 + movq %rdx, 64(%rsp) ## 8-byte Spill + movq %rsi, 72(%rsp) ## 8-byte Spill + movq %rdi, 96(%rsp) ## 8-byte Spill + movq -8(%r13), %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + movq %r13, 56(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1184(%rsp), %rdi + callq l_mulPv512x64 + movq 1184(%rsp), %r15 + movq 1192(%rsp), %r14 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1248(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1240(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 1232(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 1216(%rsp), %r12 + movq 1208(%rsp), %rbx + movq 1200(%rsp), %rbp + leaq 1112(%rsp), %rdi + movq %r13, %rsi + callq l_mulPv512x64 + addq 1112(%rsp), %r15 + adcq 1120(%rsp), %r14 + adcq 1128(%rsp), %rbp + movq %rbp, 88(%rsp) ## 8-byte Spill + adcq 1136(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 1144(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 1152(%rsp), %r13 + movq (%rsp), %rbx ## 8-byte Reload + adcq 1160(%rsp), %rbx + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 1168(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1176(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1040(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 1040(%rsp), %r14 + movq 88(%rsp), %rax ## 8-byte Reload + adcq 1048(%rsp), %rax + movq %rax, 88(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 1056(%rsp), %rax + movq %rax, %r12 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 1064(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 1072(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 1080(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + adcq 1088(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1096(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 1104(%rsp), %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %r14, %rdx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 968(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 968(%rsp), %r14 + movq 88(%rsp), %r13 ## 8-byte Reload + adcq 976(%rsp), %r13 + adcq 984(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 992(%rsp), %r14 + movq 16(%rsp), %rbx ## 8-byte Reload + adcq 1000(%rsp), %rbx + movq (%rsp), %rax ## 8-byte Reload + adcq 1008(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 1016(%rsp), %rbp + movq %rbp, %r12 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 1024(%rsp), %rbp + movq 48(%rsp), %rax ## 8-byte Reload + adcq 1032(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 896(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rcx + addq 896(%rsp), %rcx + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 904(%rsp), %r13 + adcq 912(%rsp), %r14 + adcq 920(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 936(%rsp), %r12 + movq %r12, 40(%rsp) ## 8-byte Spill + adcq 944(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 952(%rsp), %r12 + adcq 960(%rsp), %r15 + sbbq %rbx, %rbx + movq %rcx, %rdx + movq %rcx, %rbp + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 824(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + addq 824(%rsp), %rbp + adcq 832(%rsp), %r13 + movq %r13, 32(%rsp) ## 8-byte Spill + adcq 840(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 848(%rsp), %r13 + movq (%rsp), %rbp ## 8-byte Reload + adcq 856(%rsp), %rbp + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 864(%rsp), %r14 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 872(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 880(%rsp), %r12 + adcq 888(%rsp), %r15 + adcq $0, %rbx + movq 64(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 752(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 32(%rsp), %rax ## 8-byte Reload + addq 752(%rsp), %rax + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 760(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 776(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 784(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 792(%rsp), %rbp + adcq 800(%rsp), %r12 + adcq 808(%rsp), %r15 + adcq 816(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 680(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 680(%rsp), %rbx + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 688(%rsp), %r14 + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 704(%rsp), %r13 + movq 40(%rsp), %rbx ## 8-byte Reload + adcq 712(%rsp), %rbx + adcq 720(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq %r12, %rbp + adcq 728(%rsp), %rbp + adcq 736(%rsp), %r15 + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 744(%rsp), %r12 + adcq $0, %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 608(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r14, %rax + addq 608(%rsp), %rax + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 616(%rsp), %r14 + adcq 624(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + adcq 632(%rsp), %rbx + movq %rbx, %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 640(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 648(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 656(%rsp), %r15 + adcq 664(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 672(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + sbbq %rbp, %rbp + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %rbp, %rax + andl $1, %eax + addq 536(%rsp), %rbx + adcq 544(%rsp), %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rbx ## 8-byte Reload + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r13 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 568(%rsp), %rbp + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 576(%rsp), %r12 + adcq 584(%rsp), %r15 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 592(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 600(%rsp), %r14 + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 464(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 16(%rsp), %rax ## 8-byte Reload + addq 464(%rsp), %rax + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + adcq 488(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 496(%rsp), %r12 + adcq 504(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + movq 32(%rsp), %r15 ## 8-byte Reload + adcq 512(%rsp), %r15 + adcq 520(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 528(%rsp), %r14 + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %rbp + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 392(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq %r13, %rax + andl $1, %eax + addq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 408(%rsp), %rbp + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 416(%rsp), %rbx + adcq 424(%rsp), %r12 + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 432(%rsp), %r13 + adcq 440(%rsp), %r15 + movq %r15, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 448(%rsp), %r15 + adcq 456(%rsp), %r14 + adcq $0, %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 320(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq (%rsp), %rax ## 8-byte Reload + addq 320(%rsp), %rax + adcq 328(%rsp), %rbp + movq %rbp, 40(%rsp) ## 8-byte Spill + adcq 336(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq %r12, %rbp + adcq 344(%rsp), %rbp + adcq 352(%rsp), %r13 + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 360(%rsp), %r12 + adcq 368(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq 376(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 248(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %r15d + addq 248(%rsp), %rbx + movq 40(%rsp), %rax ## 8-byte Reload + adcq 256(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r14 ## 8-byte Reload + adcq 264(%rsp), %r14 + adcq 272(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + movq %r13, %rbx + adcq 280(%rsp), %rbx + movq %r12, %rbp + adcq 288(%rsp), %rbp + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 296(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 312(%rsp), %r12 + adcq $0, %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 176(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 40(%rsp), %rax ## 8-byte Reload + addq 176(%rsp), %rax + adcq 184(%rsp), %r14 + movq %r14, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 192(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq 200(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + adcq 208(%rsp), %rbp + adcq 216(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 224(%rsp), %r14 + adcq 232(%rsp), %r12 + adcq 240(%rsp), %r15 + sbbq %rbx, %rbx + movq 80(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %r13 + leaq 104(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + addq 104(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 112(%rsp), %rcx + movq 48(%rsp), %rdx ## 8-byte Reload + adcq 120(%rsp), %rdx + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 128(%rsp), %rsi + movq %rbp, %rdi + adcq 136(%rsp), %rdi + movq %rdi, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r8 ## 8-byte Reload + adcq 144(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq %r14, %r9 + adcq 152(%rsp), %r9 + movq %r9, (%rsp) ## 8-byte Spill + adcq 160(%rsp), %r12 + adcq 168(%rsp), %r15 + adcq $0, %rbx + movq %rcx, %rax + movq %rcx, %r11 + movq 56(%rsp), %rbp ## 8-byte Reload + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r14 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + movq %rsi, %r13 + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %r8, %rdi + sbbq 32(%rbp), %rdi + movq %r9, %r10 + sbbq 40(%rbp), %r10 + movq %r12, %r8 + sbbq 48(%rbp), %r8 + movq %r15, %r9 + sbbq 56(%rbp), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r15, %r9 + testb %bl, %bl + cmovneq %r11, %rax + movq 96(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovneq %r14, %rcx + movq %rcx, 8(%rbx) + cmovneq %r13, %rdx + movq %rdx, 16(%rbx) + cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovneq (%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovneq %r12, %r8 + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $1256, %rsp ## imm = 0x4E8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF8L + .p2align 4, 0x90 +_mcl_fp_montNF8L: ## @mcl_fp_montNF8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1240, %rsp ## imm = 0x4D8 + movq %rcx, 40(%rsp) ## 8-byte Spill + movq %rdx, 48(%rsp) ## 8-byte Spill + movq %rsi, 56(%rsp) ## 8-byte Spill + movq %rdi, 80(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1168(%rsp), %rdi + callq l_mulPv512x64 + movq 1168(%rsp), %r15 + movq 1176(%rsp), %r12 + movq %r15, %rdx + imulq %rbx, %rdx + movq 1232(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 1224(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1216(%rsp), %r13 + movq 1208(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 1200(%rsp), %r14 + movq 1192(%rsp), %rbp + movq 1184(%rsp), %rbx + leaq 1096(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 1096(%rsp), %r15 + adcq 1104(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 1112(%rsp), %rbx + adcq 1120(%rsp), %rbp + adcq 1128(%rsp), %r14 + movq %r14, %r12 + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 1136(%rsp), %r14 + adcq 1144(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 1152(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 1160(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1024(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 1088(%rsp), %r15 + movq 16(%rsp), %rax ## 8-byte Reload + addq 1024(%rsp), %rax + adcq 1032(%rsp), %rbx + movq %rbx, 72(%rsp) ## 8-byte Spill + movq %rbp, %rbx + adcq 1040(%rsp), %rbx + adcq 1048(%rsp), %r12 + adcq 1056(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + movq %r13, %rbp + adcq 1064(%rsp), %rbp + movq (%rsp), %rcx ## 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 1080(%rsp), %r14 + adcq $0, %r15 + movq %rax, %rdx + movq %rax, %r13 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 952(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 952(%rsp), %r13 + movq 72(%rsp), %rax ## 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 968(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq %r12, %rbx + adcq 976(%rsp), %rbx + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 984(%rsp), %r12 + adcq 992(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 1000(%rsp), %r13 + movq %r14, %rbp + adcq 1008(%rsp), %rbp + adcq 1016(%rsp), %r15 + movq 48(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 880(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 944(%rsp), %r14 + movq 72(%rsp), %rax ## 8-byte Reload + addq 880(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 888(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 896(%rsp), %rbx + adcq 904(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 920(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + adcq 928(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 936(%rsp), %r15 + adcq $0, %r14 + movq %rax, %rdx + movq %rax, %rbp + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 808(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 808(%rsp), %rbp + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 816(%rsp), %r13 + movq %rbx, %r12 + adcq 824(%rsp), %r12 + movq 8(%rsp), %rbx ## 8-byte Reload + adcq 832(%rsp), %rbx + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 840(%rsp), %rbp + movq (%rsp), %rax ## 8-byte Reload + adcq 848(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 856(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + adcq 864(%rsp), %r15 + adcq 872(%rsp), %r14 + movq 48(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 736(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 800(%rsp), %rax + movq %r13, %rcx + addq 736(%rsp), %rcx + adcq 744(%rsp), %r12 + movq %r12, 24(%rsp) ## 8-byte Spill + adcq 752(%rsp), %rbx + movq %rbx, 8(%rsp) ## 8-byte Spill + adcq 760(%rsp), %rbp + movq %rbp, %r13 + movq (%rsp), %rbp ## 8-byte Reload + adcq 768(%rsp), %rbp + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r14 + adcq $0, %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq %rcx, %rdx + movq %rcx, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 664(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 664(%rsp), %r12 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 672(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %rax ## 8-byte Reload + adcq 680(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 688(%rsp), %r13 + adcq 696(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 704(%rsp), %rbx + adcq 712(%rsp), %r15 + adcq 720(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 728(%rsp), %r12 + movq 48(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 592(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 656(%rsp), %rcx + movq 24(%rsp), %rax ## 8-byte Reload + addq 592(%rsp), %rax + movq 8(%rsp), %rbp ## 8-byte Reload + adcq 600(%rsp), %rbp + adcq 608(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 616(%rsp), %r13 + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + adcq 640(%rsp), %r14 + adcq 648(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 520(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 520(%rsp), %r12 + adcq 528(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq 24(%rsp), %r12 ## 8-byte Reload + adcq 536(%rsp), %r12 + movq %r13, %rbp + adcq 544(%rsp), %rbp + adcq 552(%rsp), %rbx + adcq 560(%rsp), %r15 + adcq 568(%rsp), %r14 + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 576(%rsp), %r13 + movq (%rsp), %rax ## 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 448(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 512(%rsp), %rcx + movq 8(%rsp), %rax ## 8-byte Reload + addq 448(%rsp), %rax + adcq 456(%rsp), %r12 + movq %r12, 24(%rsp) ## 8-byte Spill + adcq 464(%rsp), %rbp + adcq 472(%rsp), %rbx + adcq 480(%rsp), %r15 + adcq 488(%rsp), %r14 + adcq 496(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r13 ## 8-byte Reload + adcq 504(%rsp), %r13 + adcq $0, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 376(%rsp), %r12 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 384(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 392(%rsp), %rbp + adcq 400(%rsp), %rbx + adcq 408(%rsp), %r15 + adcq 416(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 424(%rsp), %r12 + adcq 432(%rsp), %r13 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 440(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 304(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 368(%rsp), %rcx + movq 24(%rsp), %rax ## 8-byte Reload + addq 304(%rsp), %rax + adcq 312(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 320(%rsp), %rbx + adcq 328(%rsp), %r15 + adcq 336(%rsp), %r14 + adcq 344(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 352(%rsp), %r13 + movq 8(%rsp), %rbp ## 8-byte Reload + adcq 360(%rsp), %rbp + adcq $0, %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 64(%rsp), %rdx ## 8-byte Folded Reload + leaq 232(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 232(%rsp), %r12 + movq (%rsp), %rax ## 8-byte Reload + adcq 240(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 248(%rsp), %rbx + adcq 256(%rsp), %r15 + adcq 264(%rsp), %r14 + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 272(%rsp), %r12 + adcq 280(%rsp), %r13 + adcq 288(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %rbp ## 8-byte Reload + adcq 296(%rsp), %rbp + movq 48(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 160(%rsp), %rdi + movq 56(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + movq 224(%rsp), %rcx + movq (%rsp), %rax ## 8-byte Reload + addq 160(%rsp), %rax + adcq 168(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 200(%rsp), %r13 + movq 8(%rsp), %rbx ## 8-byte Reload + adcq 208(%rsp), %rbx + adcq 216(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 64(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbp + leaq 88(%rsp), %rdi + movq 40(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 88(%rsp), %rbp + movq 32(%rsp), %r11 ## 8-byte Reload + adcq 96(%rsp), %r11 + adcq 104(%rsp), %r15 + adcq 112(%rsp), %r14 + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 120(%rsp), %rsi + movq %rsi, 16(%rsp) ## 8-byte Spill + adcq 128(%rsp), %r13 + adcq 136(%rsp), %rbx + movq %rbx, 8(%rsp) ## 8-byte Spill + adcq 144(%rsp), %r12 + movq (%rsp), %r8 ## 8-byte Reload + adcq 152(%rsp), %r8 + movq %r11, %rax + movq 40(%rsp), %rbp ## 8-byte Reload + subq (%rbp), %rax + movq %r15, %rcx + sbbq 8(%rbp), %rcx + movq %r14, %rdx + sbbq 16(%rbp), %rdx + sbbq 24(%rbp), %rsi + movq %r13, %rdi + sbbq 32(%rbp), %rdi + movq %rbx, %r9 + sbbq 40(%rbp), %r9 + movq %r12, %r10 + sbbq 48(%rbp), %r10 + movq %rbp, %rbx + movq %r8, %rbp + sbbq 56(%rbx), %rbp + testq %rbp, %rbp + cmovsq %r11, %rax + movq 80(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovsq %r15, %rcx + movq %rcx, 8(%rbx) + cmovsq %r14, %rdx + movq %rdx, 16(%rbx) + cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq %r13, %rdi + movq %rdi, 32(%rbx) + cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 40(%rbx) + cmovsq %r12, %r10 + movq %r10, 48(%rbx) + cmovsq %r8, %rbp + movq %rbp, 56(%rbx) + addq $1240, %rsp ## imm = 0x4D8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed8L + .p2align 4, 0x90 +_mcl_fp_montRed8L: ## @mcl_fp_montRed8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $776, %rsp ## imm = 0x308 + movq %rdx, %rax + movq %rdi, 192(%rsp) ## 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 104(%rsp) ## 8-byte Spill + movq (%rsi), %r15 + movq 8(%rsi), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq %r15, %rdx + imulq %rcx, %rdx + movq 120(%rsi), %rcx + movq %rcx, 112(%rsp) ## 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 96(%rsp) ## 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 72(%rsi), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 64(%rsi), %r13 + movq 56(%rsi), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 48(%rsi), %r14 + movq 40(%rsi), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + movq 32(%rsi), %r12 + movq 24(%rsi), %rbx + movq 16(%rsi), %rbp + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 136(%rsp) ## 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 184(%rsp) ## 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 176(%rsp) ## 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 168(%rsp) ## 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 160(%rsp) ## 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 152(%rsp) ## 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 144(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 128(%rsp) ## 8-byte Spill + movq %rcx, %rsi + movq %rsi, 88(%rsp) ## 8-byte Spill + leaq 704(%rsp), %rdi + callq l_mulPv512x64 + addq 704(%rsp), %r15 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 712(%rsp), %rcx + adcq 720(%rsp), %rbp + movq %rbp, 80(%rsp) ## 8-byte Spill + adcq 728(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rax ## 8-byte Reload + adcq 744(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 752(%rsp), %r14 + movq %r14, %r12 + movq 64(%rsp), %rax ## 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + movq 40(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 24(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 96(%rsp) ## 8-byte Folded Spill + movq 56(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + movq 112(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + sbbq %rbx, %rbx + movq %rcx, %rbp + movq %rbp, %rdx + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 632(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + andl $1, %ebx + movq %rbx, %rax + addq 632(%rsp), %rbp + movq 80(%rsp), %rsi ## 8-byte Reload + adcq 640(%rsp), %rsi + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 648(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 120(%rsp), %rcx ## 8-byte Reload + adcq 656(%rsp), %rcx + movq %rcx, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rcx ## 8-byte Reload + adcq 664(%rsp), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + adcq 672(%rsp), %r12 + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 696(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + movq 48(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 96(%rsp) ## 8-byte Folded Spill + adcq $0, %r13 + movq %r13, 56(%rsp) ## 8-byte Spill + adcq $0, %r14 + movq %r14, 112(%rsp) ## 8-byte Spill + movq %rax, %rbp + adcq $0, %rbp + movq %rsi, %rdx + movq %rsi, %r14 + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 560(%rsp), %rdi + movq 88(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv512x64 + addq 560(%rsp), %r14 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 568(%rsp), %rcx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 576(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 72(%rsp), %rax ## 8-byte Reload + adcq 584(%rsp), %rax + movq %rax, 72(%rsp) ## 8-byte Spill + adcq 592(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 600(%rsp), %r14 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rax ## 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 624(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq $0, %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq $0, %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + movq 96(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + movq 56(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 112(%rsp) ## 8-byte Folded Spill + adcq $0, %rbp + movq %rbp, 80(%rsp) ## 8-byte Spill + movq %rcx, %rbp + movq %rbp, %rdx + movq 104(%rsp), %r12 ## 8-byte Reload + imulq %r12, %rdx + leaq 488(%rsp), %rdi + movq %r13, %rsi + callq l_mulPv512x64 + addq 488(%rsp), %rbp + movq 120(%rsp), %rax ## 8-byte Reload + adcq 496(%rsp), %rax + movq 72(%rsp), %rbp ## 8-byte Reload + adcq 504(%rsp), %rbp + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 512(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 520(%rsp), %r14 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 528(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 536(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %r13 ## 8-byte Reload + adcq 544(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 96(%rsp) ## 8-byte Spill + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 112(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq %r12, %rdx + leaq 416(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 416(%rsp), %r15 + adcq 424(%rsp), %rbp + movq %rbp, %rax + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq %r14, %r12 + adcq 440(%rsp), %r12 + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 448(%rsp), %r14 + movq 16(%rsp), %rbp ## 8-byte Reload + adcq 456(%rsp), %rbp + adcq 464(%rsp), %r13 + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq $0, 96(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq 112(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rbx + movq %rbx, %rdx + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 344(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 344(%rsp), %rbx + movq 32(%rsp), %rax ## 8-byte Reload + adcq 352(%rsp), %rax + adcq 360(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + adcq 368(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + adcq 376(%rsp), %rbp + movq %rbp, 16(%rsp) ## 8-byte Spill + adcq 384(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 392(%rsp), %r13 + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 400(%rsp), %r12 + movq 96(%rsp), %r14 ## 8-byte Reload + adcq 408(%rsp), %r14 + movq 56(%rsp), %rbp ## 8-byte Reload + adcq $0, %rbp + movq %r15, %rbx + adcq $0, %rbx + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq %rax, %rdx + movq %rax, %r15 + imulq 104(%rsp), %rdx ## 8-byte Folded Reload + leaq 272(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 272(%rsp), %r15 + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 280(%rsp), %rcx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rax ## 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 312(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 320(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + adcq 328(%rsp), %r14 + movq %r14, %r13 + adcq 336(%rsp), %rbp + movq %rbp, %r12 + adcq $0, %rbx + movq %rbx, %r14 + movq 80(%rsp), %r15 ## 8-byte Reload + adcq $0, %r15 + movq 104(%rsp), %rdx ## 8-byte Reload + movq %rcx, %rbx + imulq %rbx, %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv512x64 + addq 200(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 208(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r8 ## 8-byte Reload + adcq 216(%rsp), %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 40(%rsp), %rdx ## 8-byte Reload + adcq 224(%rsp), %rdx + movq 24(%rsp), %rsi ## 8-byte Reload + adcq 232(%rsp), %rsi + movq 48(%rsp), %rdi ## 8-byte Reload + adcq 240(%rsp), %rdi + movq %r13, %rbp + adcq 248(%rsp), %rbp + movq %r12, %rbx + adcq 256(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq %r14, %r9 + adcq 264(%rsp), %r9 + adcq $0, %r15 + movq %r15, %r10 + subq 136(%rsp), %rax ## 8-byte Folded Reload + movq %r8, %rcx + sbbq 128(%rsp), %rcx ## 8-byte Folded Reload + movq %rdx, %r13 + sbbq 144(%rsp), %r13 ## 8-byte Folded Reload + movq %rsi, %r12 + sbbq 152(%rsp), %r12 ## 8-byte Folded Reload + movq %rdi, %r14 + sbbq 160(%rsp), %r14 ## 8-byte Folded Reload + movq %rbp, %r11 + sbbq 168(%rsp), %r11 ## 8-byte Folded Reload + movq %rbx, %r8 + sbbq 176(%rsp), %r8 ## 8-byte Folded Reload + movq %r9, %r15 + sbbq 184(%rsp), %r9 ## 8-byte Folded Reload + sbbq $0, %r10 + andl $1, %r10d + cmovneq %r15, %r9 + testb %r10b, %r10b + cmovneq 8(%rsp), %rax ## 8-byte Folded Reload + movq 192(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 8(%rbx) + cmovneq %rdx, %r13 + movq %r13, 16(%rbx) + cmovneq %rsi, %r12 + movq %r12, 24(%rbx) + cmovneq %rdi, %r14 + movq %r14, 32(%rbx) + cmovneq %rbp, %r11 + movq %r11, 40(%rbx) + cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 48(%rbx) + movq %r9, 56(%rbx) + addq $776, %rsp ## imm = 0x308 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre8L + .p2align 4, 0x90 +_mcl_fp_addPre8L: ## @mcl_fp_addPre8L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 48(%rsi), %r12 + movq 40(%rdx), %r10 + movq 32(%rdx), %r11 + movq 24(%rdx), %r14 + movq 16(%rdx), %rbx + movq (%rdx), %rcx + movq 8(%rdx), %rdx + addq (%rsi), %rcx + adcq 8(%rsi), %rdx + adcq 16(%rsi), %rbx + movq 40(%rsi), %r13 + movq 24(%rsi), %rax + movq 32(%rsi), %rsi + movq %rcx, (%rdi) + movq %rdx, 8(%rdi) + movq %rbx, 16(%rdi) + adcq %r14, %rax + movq %rax, 24(%rdi) + adcq %r11, %rsi + movq %rsi, 32(%rdi) + adcq %r10, %r13 + movq %r13, 40(%rdi) + adcq %r9, %r12 + movq %r12, 48(%rdi) + adcq %r8, %r15 + movq %r15, 56(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subPre8L + .p2align 4, 0x90 +_mcl_fp_subPre8L: ## @mcl_fp_subPre8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 56(%rsi), %r15 + movq 48(%rdx), %r9 + movq 40(%rdx), %r10 + movq 24(%rdx), %r11 + movq 32(%rdx), %r14 + movq (%rsi), %rbx + movq 8(%rsi), %r12 + xorl %eax, %eax + subq (%rdx), %rbx + sbbq 8(%rdx), %r12 + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq 48(%rsi), %r13 + movq 40(%rsi), %rdx + movq 32(%rsi), %rbp + movq 24(%rsi), %rsi + movq %rbx, (%rdi) + movq %r12, 8(%rdi) + movq %rcx, 16(%rdi) + sbbq %r11, %rsi + movq %rsi, 24(%rdi) + sbbq %r14, %rbp + movq %rbp, 32(%rdi) + sbbq %r10, %rdx + movq %rdx, 40(%rdi) + sbbq %r9, %r13 + movq %r13, 48(%rdi) + sbbq %r8, %r15 + movq %r15, 56(%rdi) + sbbq $0, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_shr1_8L + .p2align 4, 0x90 +_mcl_fp_shr1_8L: ## @mcl_fp_shr1_8L +## BB#0: + movq 56(%rsi), %r8 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 32(%rsi), %r11 + movq 24(%rsi), %rcx + movq 16(%rsi), %rdx + movq (%rsi), %rax + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rax + movq %rax, (%rdi) + shrdq $1, %rdx, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 16(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 24(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 32(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 40(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 48(%rdi) + shrq %r8 + movq %r8, 56(%rdi) + retq + + .globl _mcl_fp_add8L + .p2align 4, 0x90 +_mcl_fp_add8L: ## @mcl_fp_add8L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r15 + movq 56(%rsi), %r8 + movq 48(%rdx), %r12 + movq 48(%rsi), %r9 + movq 40(%rsi), %r13 + movq 24(%rsi), %r11 + movq 32(%rsi), %r10 + movq (%rdx), %r14 + movq 8(%rdx), %rbx + addq (%rsi), %r14 + adcq 8(%rsi), %rbx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r11 + movq 40(%rdx), %rsi + adcq 32(%rdx), %r10 + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + adcq %r13, %rsi + movq %rsi, 40(%rdi) + adcq %r12, %r9 + movq %r9, 48(%rdi) + adcq %r15, %r8 + movq %r8, 56(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %r14 + sbbq 8(%rcx), %rbx + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r11 + sbbq 32(%rcx), %r10 + sbbq 40(%rcx), %rsi + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne LBB120_2 +## BB#1: ## %nocarry + movq %r14, (%rdi) + movq %rbx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r11, 24(%rdi) + movq %r10, 32(%rdi) + movq %rsi, 40(%rdi) + movq %r9, 48(%rdi) + movq %r8, 56(%rdi) +LBB120_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF8L + .p2align 4, 0x90 +_mcl_fp_addNF8L: ## @mcl_fp_addNF8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r8 + movq 48(%rdx), %rbp + movq 40(%rdx), %rbx + movq 32(%rdx), %rax + movq 24(%rdx), %r11 + movq 16(%rdx), %r15 + movq (%rdx), %r13 + movq 8(%rdx), %r12 + addq (%rsi), %r13 + adcq 8(%rsi), %r12 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %r11 + adcq 32(%rsi), %rax + movq %rax, %r10 + movq %r10, -24(%rsp) ## 8-byte Spill + adcq 40(%rsi), %rbx + movq %rbx, %r9 + movq %r9, -16(%rsp) ## 8-byte Spill + adcq 48(%rsi), %rbp + movq %rbp, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq 56(%rsi), %r8 + movq %r13, %rsi + subq (%rcx), %rsi + movq %r12, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %rbx + sbbq 16(%rcx), %rbx + movq %r11, %r14 + sbbq 24(%rcx), %r14 + movq %r10, %rbp + sbbq 32(%rcx), %rbp + movq %r9, %r10 + sbbq 40(%rcx), %r10 + movq %rax, %r9 + sbbq 48(%rcx), %r9 + movq %r8, %rax + sbbq 56(%rcx), %rax + testq %rax, %rax + cmovsq %r13, %rsi + movq %rsi, (%rdi) + cmovsq %r12, %rdx + movq %rdx, 8(%rdi) + cmovsq %r15, %rbx + movq %rbx, 16(%rdi) + cmovsq %r11, %r14 + movq %r14, 24(%rdi) + cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rdi) + cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rdi) + cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 48(%rdi) + cmovsq %r8, %rax + movq %rax, 56(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub8L + .p2align 4, 0x90 +_mcl_fp_sub8L: ## @mcl_fp_sub8L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 56(%rdx), %r12 + movq 56(%rsi), %r8 + movq 48(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r10 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r10 + movq 16(%rsi), %r11 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %r15 + sbbq 24(%rdx), %r15 + movq 32(%rsi), %r14 + sbbq 32(%rdx), %r14 + movq 48(%rsi), %r9 + movq 40(%rsi), %rsi + sbbq 40(%rdx), %rsi + movq %rax, (%rdi) + movq %r10, 8(%rdi) + movq %r11, 16(%rdi) + movq %r15, 24(%rdi) + movq %r14, 32(%rdi) + movq %rsi, 40(%rdi) + sbbq %r13, %r9 + movq %r9, 48(%rdi) + sbbq %r12, %r8 + movq %r8, 56(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB122_2 +## BB#1: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r10, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r11, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r15, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r14, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %rsi, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r9, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %r8, %rax + movq %rax, 56(%rdi) +LBB122_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF8L + .p2align 4, 0x90 +_mcl_fp_subNF8L: ## @mcl_fp_subNF8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq %rdi, %r9 + movdqu (%rdx), %xmm0 + movdqu 16(%rdx), %xmm1 + movdqu 32(%rdx), %xmm2 + movdqu 48(%rdx), %xmm3 + pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] + movd %xmm4, %r12 + movdqu (%rsi), %xmm4 + movdqu 16(%rsi), %xmm5 + movdqu 32(%rsi), %xmm8 + movdqu 48(%rsi), %xmm7 + pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1] + movd %xmm6, %rcx + movd %xmm3, %r13 + movd %xmm7, %rdi + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %rbp + pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1] + movd %xmm3, %rdx + movd %xmm2, %rsi + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %r11 + pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] + movd %xmm1, %r15 + pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] + movd %xmm1, %rbx + pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1] + movd %xmm0, %rax + movd %xmm4, %r14 + subq %rax, %r14 + movd %xmm1, %r10 + sbbq %rbx, %r10 + movd %xmm5, %rbx + sbbq %r15, %rbx + movd %xmm2, %r15 + sbbq %r11, %r15 + movd %xmm8, %r11 + sbbq %rsi, %r11 + sbbq %rbp, %rdx + movq %rdx, -24(%rsp) ## 8-byte Spill + sbbq %r13, %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + sbbq %r12, %rcx + movq %rcx, -8(%rsp) ## 8-byte Spill + movq %rcx, %rbp + sarq $63, %rbp + movq 56(%r8), %r12 + andq %rbp, %r12 + movq 48(%r8), %r13 + andq %rbp, %r13 + movq 40(%r8), %rdi + andq %rbp, %rdi + movq 32(%r8), %rsi + andq %rbp, %rsi + movq 24(%r8), %rdx + andq %rbp, %rdx + movq 16(%r8), %rcx + andq %rbp, %rcx + movq 8(%r8), %rax + andq %rbp, %rax + andq (%r8), %rbp + addq %r14, %rbp + adcq %r10, %rax + movq %rbp, (%r9) + adcq %rbx, %rcx + movq %rax, 8(%r9) + movq %rcx, 16(%r9) + adcq %r15, %rdx + movq %rdx, 24(%r9) + adcq %r11, %rsi + movq %rsi, 32(%r9) + adcq -24(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%r9) + adcq -16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 48(%r9) + adcq -8(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 56(%r9) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add8L + .p2align 4, 0x90 +_mcl_fpDbl_add8L: ## @mcl_fpDbl_add8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r8 + movq 120(%rdx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 96(%rdx), %r14 + movq 24(%rsi), %r15 + movq 32(%rsi), %r11 + movq 16(%rdx), %r12 + movq (%rdx), %rbx + movq 8(%rdx), %rax + addq (%rsi), %rbx + adcq 8(%rsi), %rax + adcq 16(%rsi), %r12 + adcq 24(%rdx), %r15 + adcq 32(%rdx), %r11 + movq 88(%rdx), %rbp + movq 80(%rdx), %r13 + movq %rbx, (%rdi) + movq 72(%rdx), %r10 + movq %rax, 8(%rdi) + movq 64(%rdx), %r9 + movq %r12, 16(%rdi) + movq 40(%rdx), %r12 + movq %r15, 24(%rdi) + movq 40(%rsi), %rbx + adcq %r12, %rbx + movq 56(%rdx), %r15 + movq 48(%rdx), %r12 + movq %r11, 32(%rdi) + movq 48(%rsi), %rdx + adcq %r12, %rdx + movq 120(%rsi), %r12 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rax + adcq %r15, %rax + movq 112(%rsi), %rcx + movq %rdx, 48(%rdi) + movq 64(%rsi), %rbx + adcq %r9, %rbx + movq 104(%rsi), %rdx + movq %rax, 56(%rdi) + movq 72(%rsi), %r9 + adcq %r10, %r9 + movq 80(%rsi), %r11 + adcq %r13, %r11 + movq 96(%rsi), %rax + movq 88(%rsi), %r15 + adcq %rbp, %r15 + adcq %r14, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rdx, %rax + adcq -24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -24(%rsp) ## 8-byte Spill + adcq -16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -16(%rsp) ## 8-byte Spill + adcq -32(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, -32(%rsp) ## 8-byte Spill + sbbq %rbp, %rbp + andl $1, %ebp + movq %rbx, %rsi + subq (%r8), %rsi + movq %r9, %rdx + sbbq 8(%r8), %rdx + movq %r11, %r10 + sbbq 16(%r8), %r10 + movq %r15, %r14 + sbbq 24(%r8), %r14 + movq -8(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r8), %r13 + movq %rax, %r12 + sbbq 40(%r8), %r12 + movq %rcx, %rax + sbbq 48(%r8), %rax + movq -32(%rsp), %rcx ## 8-byte Reload + sbbq 56(%r8), %rcx + sbbq $0, %rbp + andl $1, %ebp + cmovneq %rbx, %rsi + movq %rsi, 64(%rdi) + testb %bpl, %bpl + cmovneq %r9, %rdx + movq %rdx, 72(%rdi) + cmovneq %r11, %r10 + movq %r10, 80(%rdi) + cmovneq %r15, %r14 + movq %r14, 88(%rdi) + cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 96(%rdi) + cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 104(%rdi) + cmovneq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 112(%rdi) + cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub8L + .p2align 4, 0x90 +_mcl_fpDbl_sub8L: ## @mcl_fpDbl_sub8L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 120(%rdx), %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 112(%rdx), %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 104(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 16(%rsi), %r9 + movq (%rsi), %r12 + movq 8(%rsi), %r14 + xorl %r8d, %r8d + subq (%rdx), %r12 + sbbq 8(%rdx), %r14 + sbbq 16(%rdx), %r9 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %r13 + sbbq 32(%rdx), %r13 + movq 96(%rdx), %rbp + movq 88(%rdx), %r11 + movq %r12, (%rdi) + movq 80(%rdx), %r12 + movq %r14, 8(%rdi) + movq 72(%rdx), %r10 + movq %r9, 16(%rdi) + movq 40(%rdx), %r9 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r9, %rbx + movq 48(%rdx), %r9 + movq %r13, 32(%rdi) + movq 48(%rsi), %r14 + sbbq %r9, %r14 + movq 64(%rdx), %r13 + movq 56(%rdx), %r9 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rdx + sbbq %r9, %rdx + movq 120(%rsi), %rcx + movq %r14, 48(%rdi) + movq 64(%rsi), %rbx + sbbq %r13, %rbx + movq 112(%rsi), %rax + movq %rdx, 56(%rdi) + movq 72(%rsi), %r9 + sbbq %r10, %r9 + movq 80(%rsi), %r13 + sbbq %r12, %r13 + movq 88(%rsi), %r12 + sbbq %r11, %r12 + movq 104(%rsi), %rdx + movq 96(%rsi), %r14 + sbbq %rbp, %r14 + sbbq -24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -24(%rsp) ## 8-byte Spill + sbbq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -16(%rsp) ## 8-byte Spill + sbbq -8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -8(%rsp) ## 8-byte Spill + movl $0, %ebp + sbbq $0, %rbp + andl $1, %ebp + movq (%r15), %r11 + cmoveq %r8, %r11 + testb %bpl, %bpl + movq 16(%r15), %rbp + cmoveq %r8, %rbp + movq 8(%r15), %rsi + cmoveq %r8, %rsi + movq 56(%r15), %r10 + cmoveq %r8, %r10 + movq 48(%r15), %rdx + cmoveq %r8, %rdx + movq 40(%r15), %rcx + cmoveq %r8, %rcx + movq 32(%r15), %rax + cmoveq %r8, %rax + cmovneq 24(%r15), %r8 + addq %rbx, %r11 + adcq %r9, %rsi + movq %r11, 64(%rdi) + adcq %r13, %rbp + movq %rsi, 72(%rdi) + movq %rbp, 80(%rdi) + adcq %r12, %r8 + movq %r8, 88(%rdi) + adcq %r14, %rax + movq %rax, 96(%rdi) + adcq -24(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 104(%rdi) + adcq -16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 112(%rdi) + adcq -8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 120(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .p2align 4, 0x90 +l_mulPv576x64: ## @mulPv576x64 +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdx, %rbx + movq %rbx, %rax + mulq (%rsi) + movq %rdx, -32(%rsp) ## 8-byte Spill + movq %rax, (%rdi) + movq %rbx, %rax + mulq 64(%rsi) + movq %rdx, %r10 + movq %rax, -8(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 56(%rsi) + movq %rdx, %r14 + movq %rax, -16(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 48(%rsi) + movq %rdx, %r12 + movq %rax, -24(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 40(%rsi) + movq %rdx, %rcx + movq %rax, -40(%rsp) ## 8-byte Spill + movq %rbx, %rax + mulq 32(%rsi) + movq %rdx, %rbp + movq %rax, %r8 + movq %rbx, %rax + mulq 24(%rsi) + movq %rdx, %r9 + movq %rax, %r11 + movq %rbx, %rax + mulq 16(%rsi) + movq %rdx, %r15 + movq %rax, %r13 + movq %rbx, %rax + mulq 8(%rsi) + addq -32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 8(%rdi) + adcq %r13, %rdx + movq %rdx, 16(%rdi) + adcq %r11, %r15 + movq %r15, 24(%rdi) + adcq %r8, %r9 + movq %r9, 32(%rdi) + adcq -40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 40(%rdi) + adcq -24(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 48(%rdi) + adcq -16(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 56(%rdi) + adcq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 64(%rdi) + adcq $0, %r10 + movq %r10, 72(%rdi) + movq %rdi, %rax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mulUnitPre9L + .p2align 4, 0x90 +_mcl_fp_mulUnitPre9L: ## @mcl_fp_mulUnitPre9L +## BB#0: + pushq %r14 + pushq %rbx + subq $88, %rsp + movq %rdi, %rbx + leaq 8(%rsp), %rdi + callq l_mulPv576x64 + movq 80(%rsp), %r8 + movq 72(%rsp), %r9 + movq 64(%rsp), %r10 + movq 56(%rsp), %r11 + movq 48(%rsp), %r14 + movq 40(%rsp), %rax + movq 32(%rsp), %rcx + movq 24(%rsp), %rdx + movq 8(%rsp), %rsi + movq 16(%rsp), %rdi + movq %rsi, (%rbx) + movq %rdi, 8(%rbx) + movq %rdx, 16(%rbx) + movq %rcx, 24(%rbx) + movq %rax, 32(%rbx) + movq %r14, 40(%rbx) + movq %r11, 48(%rbx) + movq %r10, 56(%rbx) + movq %r9, 64(%rbx) + movq %r8, 72(%rbx) + addq $88, %rsp + popq %rbx + popq %r14 + retq + + .globl _mcl_fpDbl_mulPre9L + .p2align 4, 0x90 +_mcl_fpDbl_mulPre9L: ## @mcl_fpDbl_mulPre9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp ## imm = 0x328 + movq %rdx, %rax + movq %rdi, %r12 + movq (%rax), %rdx + movq %rax, %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + leaq 728(%rsp), %rdi + movq %rsi, %rbp + movq %rbp, 72(%rsp) ## 8-byte Spill + callq l_mulPv576x64 + movq 800(%rsp), %r13 + movq 792(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 768(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r14 + movq %rax, (%r12) + movq %r12, 64(%rsp) ## 8-byte Spill + movq 8(%rbx), %rdx + leaq 648(%rsp), %rdi + movq %rbp, %rsi + callq l_mulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r14 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r15 + movq %r14, 8(%r12) + adcq 24(%rsp), %rbx ## 8-byte Folded Reload + adcq 32(%rsp), %r15 ## 8-byte Folded Reload + adcq 40(%rsp), %rax ## 8-byte Folded Reload + movq %rax, %r14 + adcq (%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 40(%rsp) ## 8-byte Spill + adcq 48(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, (%rsp) ## 8-byte Spill + adcq %r13, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 80(%rsp), %r13 ## 8-byte Reload + movq 16(%r13), %rdx + leaq 568(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %r9 + movq 624(%rsp), %r10 + movq 616(%rsp), %rdi + movq 608(%rsp), %rbp + movq 600(%rsp), %rcx + addq 568(%rsp), %rbx + movq 592(%rsp), %rdx + movq 576(%rsp), %r12 + movq 584(%rsp), %rsi + movq 64(%rsp), %rax ## 8-byte Reload + movq %rbx, 16(%rax) + adcq %r15, %r12 + adcq %r14, %rsi + movq %rsi, 48(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 8(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 24(%r13), %rdx + leaq 488(%rsp), %rdi + movq 72(%rsp), %r15 ## 8-byte Reload + movq %r15, %rsi + callq l_mulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r12 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq 64(%rsp), %r14 ## 8-byte Reload + movq %r12, 24(%r14) + adcq 48(%rsp), %rbx ## 8-byte Folded Reload + adcq 56(%rsp), %r13 ## 8-byte Folded Reload + adcq 24(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq (%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, (%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 8(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 80(%rsp), %r12 ## 8-byte Reload + movq 32(%r12), %rdx + leaq 408(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %r9 + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r15 + movq 424(%rsp), %rcx + movq %rbx, 32(%r14) + adcq %r13, %r15 + adcq 24(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq %r12, %r14 + movq 40(%r14), %rdx + leaq 328(%rsp), %rdi + movq 72(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %r9 + movq 384(%rsp), %rsi + movq 376(%rsp), %rdi + movq 368(%rsp), %rbx + movq 360(%rsp), %rbp + addq 328(%rsp), %r15 + movq 352(%rsp), %rcx + movq 336(%rsp), %r12 + movq 344(%rsp), %rdx + movq 64(%rsp), %rax ## 8-byte Reload + movq %r15, 40(%rax) + adcq 56(%rsp), %r12 ## 8-byte Folded Reload + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 48(%r14), %rdx + leaq 248(%rsp), %rdi + movq %r13, %rsi + movq %r13, %r15 + callq l_mulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %r9 + movq 304(%rsp), %rsi + movq 296(%rsp), %rdi + movq 288(%rsp), %rbx + movq 280(%rsp), %rbp + addq 248(%rsp), %r12 + movq 272(%rsp), %rcx + movq 256(%rsp), %r13 + movq 264(%rsp), %rdx + movq 64(%rsp), %rax ## 8-byte Reload + movq %r12, 48(%rax) + adcq 56(%rsp), %r13 ## 8-byte Folded Reload + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 16(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 48(%rsp) ## 8-byte Spill + movq 56(%r14), %rdx + leaq 168(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 240(%rsp), %rcx + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + addq 168(%rsp), %r13 + movq 200(%rsp), %r12 + movq 192(%rsp), %rbp + movq 176(%rsp), %r14 + movq 184(%rsp), %r15 + movq 64(%rsp), %rax ## 8-byte Reload + movq %r13, 56(%rax) + adcq 56(%rsp), %r14 ## 8-byte Folded Reload + adcq 24(%rsp), %r15 ## 8-byte Folded Reload + adcq 32(%rsp), %rbp ## 8-byte Folded Reload + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq (%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %r13 + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, (%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 48(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 88(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 88(%rsp), %r14 + adcq 96(%rsp), %r15 + movq 160(%rsp), %r8 + adcq 104(%rsp), %rbp + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 64(%rsp), %rcx ## 8-byte Reload + movq %r14, 64(%rcx) + movq %r15, 72(%rcx) + adcq %r12, %rax + movq %rbp, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r13, %rbx + movq %rbx, 96(%rcx) + adcq (%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 8(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 48(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp ## imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sqrPre9L + .p2align 4, 0x90 +_mcl_fpDbl_sqrPre9L: ## @mcl_fpDbl_sqrPre9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $808, %rsp ## imm = 0x328 + movq %rsi, %r15 + movq %rdi, %r14 + movq (%r15), %rdx + leaq 728(%rsp), %rdi + callq l_mulPv576x64 + movq 800(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 792(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 784(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 776(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 768(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 760(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 752(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 744(%rsp), %rax + movq %rax, 80(%rsp) ## 8-byte Spill + movq 728(%rsp), %rax + movq 736(%rsp), %r12 + movq %rax, (%r14) + movq %r14, 72(%rsp) ## 8-byte Spill + movq 8(%r15), %rdx + leaq 648(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 720(%rsp), %r8 + movq 712(%rsp), %rcx + movq 704(%rsp), %rdx + movq 696(%rsp), %rsi + movq 688(%rsp), %rdi + movq 680(%rsp), %rbp + addq 648(%rsp), %r12 + movq 672(%rsp), %rax + movq 656(%rsp), %rbx + movq 664(%rsp), %r13 + movq %r12, 8(%r14) + adcq 80(%rsp), %rbx ## 8-byte Folded Reload + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq %r15, 64(%rsp) ## 8-byte Spill + movq 16(%r15), %rdx + leaq 568(%rsp), %rdi + movq %r15, %rsi + callq l_mulPv576x64 + movq 640(%rsp), %r8 + movq 632(%rsp), %rcx + movq 624(%rsp), %rdx + movq 616(%rsp), %rsi + movq 608(%rsp), %rdi + movq 600(%rsp), %rbp + addq 568(%rsp), %rbx + movq 592(%rsp), %rax + movq 576(%rsp), %r14 + movq 584(%rsp), %r12 + movq 72(%rsp), %r15 ## 8-byte Reload + movq %rbx, 16(%r15) + adcq %r13, %r14 + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 24(%rsi), %rdx + leaq 488(%rsp), %rdi + callq l_mulPv576x64 + movq 560(%rsp), %r8 + movq 552(%rsp), %rcx + movq 544(%rsp), %rdx + movq 536(%rsp), %rsi + movq 528(%rsp), %rdi + movq 520(%rsp), %rbp + addq 488(%rsp), %r14 + movq 512(%rsp), %rax + movq 496(%rsp), %rbx + movq 504(%rsp), %r13 + movq %r14, 24(%r15) + adcq %r12, %rbx + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 32(%rsi), %rdx + leaq 408(%rsp), %rdi + callq l_mulPv576x64 + movq 480(%rsp), %r8 + movq 472(%rsp), %rcx + movq 464(%rsp), %rdx + movq 456(%rsp), %rsi + movq 448(%rsp), %rdi + movq 440(%rsp), %rbp + addq 408(%rsp), %rbx + movq 432(%rsp), %rax + movq 416(%rsp), %r14 + movq 424(%rsp), %r12 + movq %rbx, 32(%r15) + adcq %r13, %r14 + adcq 40(%rsp), %r12 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 40(%rsi), %rdx + leaq 328(%rsp), %rdi + callq l_mulPv576x64 + movq 400(%rsp), %r8 + movq 392(%rsp), %rcx + movq 384(%rsp), %rdx + movq 376(%rsp), %rsi + movq 368(%rsp), %rdi + movq 360(%rsp), %rbp + addq 328(%rsp), %r14 + movq 352(%rsp), %rax + movq 336(%rsp), %rbx + movq 344(%rsp), %r13 + movq %r14, 40(%r15) + adcq %r12, %rbx + adcq 40(%rsp), %r13 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 48(%rsi), %rdx + leaq 248(%rsp), %rdi + callq l_mulPv576x64 + movq 320(%rsp), %r8 + movq 312(%rsp), %rcx + movq 304(%rsp), %rdx + movq 296(%rsp), %rsi + movq 288(%rsp), %rdi + movq 280(%rsp), %rbp + addq 248(%rsp), %rbx + movq 272(%rsp), %rax + movq 256(%rsp), %r12 + movq 264(%rsp), %r14 + movq %rbx, 48(%r15) + adcq %r13, %r12 + adcq 40(%rsp), %r14 ## 8-byte Folded Reload + adcq 48(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 56(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%rsp) ## 8-byte Spill + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 56(%rsi), %rdx + leaq 168(%rsp), %rdi + callq l_mulPv576x64 + movq 240(%rsp), %r8 + movq 232(%rsp), %rdx + movq 224(%rsp), %rsi + movq 216(%rsp), %rdi + movq 208(%rsp), %rbx + movq 200(%rsp), %rcx + addq 168(%rsp), %r12 + movq 192(%rsp), %r15 + movq 176(%rsp), %r13 + movq 184(%rsp), %rbp + movq 72(%rsp), %rax ## 8-byte Reload + movq %r12, 56(%rax) + adcq %r14, %r13 + adcq 40(%rsp), %rbp ## 8-byte Folded Reload + adcq 48(%rsp), %r15 ## 8-byte Folded Reload + adcq 56(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, %r12 + adcq 8(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, %r14 + adcq 16(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 8(%rsp) ## 8-byte Spill + adcq 24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 16(%rsp) ## 8-byte Spill + adcq 32(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 24(%rsp) ## 8-byte Spill + adcq $0, %r8 + movq %r8, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rsi ## 8-byte Reload + movq 64(%rsi), %rdx + leaq 88(%rsp), %rdi + callq l_mulPv576x64 + addq 88(%rsp), %r13 + adcq 96(%rsp), %rbp + movq 160(%rsp), %r8 + adcq 104(%rsp), %r15 + movq 152(%rsp), %r9 + movq 144(%rsp), %rdx + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 120(%rsp), %rbx + movq 112(%rsp), %rax + movq 72(%rsp), %rcx ## 8-byte Reload + movq %r13, 64(%rcx) + movq %rbp, 72(%rcx) + adcq %r12, %rax + movq %r15, 80(%rcx) + movq %rax, 88(%rcx) + adcq %r14, %rbx + movq %rbx, 96(%rcx) + adcq 8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 104(%rcx) + adcq 16(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rcx) + adcq 24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 120(%rcx) + adcq 32(%rsp), %r9 ## 8-byte Folded Reload + movq %r9, 128(%rcx) + adcq $0, %r8 + movq %r8, 136(%rcx) + addq $808, %rsp ## imm = 0x328 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_mont9L + .p2align 4, 0x90 +_mcl_fp_mont9L: ## @mcl_fp_mont9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp ## imm = 0x618 + movq %rcx, 72(%rsp) ## 8-byte Spill + movq %rdx, 96(%rsp) ## 8-byte Spill + movq %rsi, 88(%rsp) ## 8-byte Spill + movq %rdi, 112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 80(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq l_mulPv576x64 + movq 1480(%rsp), %r14 + movq 1488(%rsp), %r15 + movq %r14, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1544(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 1536(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 1528(%rsp), %r12 + movq 1520(%rsp), %r13 + movq 1512(%rsp), %rbx + movq 1504(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1400(%rsp), %r14 + adcq 1408(%rsp), %r15 + adcq 1416(%rsp), %rbp + movq %rbp, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 1424(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 1432(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 1440(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 1448(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq 1456(%rsp), %rbx + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 1464(%rsp), %r14 + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1472(%rsp), %r13 + sbbq %rbp, %rbp + movq 96(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebp + addq 1320(%rsp), %r15 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 1328(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 1336(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 1344(%rsp), %r12 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 1352(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rax ## 8-byte Reload + adcq 1360(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq 1368(%rsp), %rbx + adcq 1376(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + adcq 1384(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 1392(%rsp), %rbp + sbbq %r14, %r14 + movq %r15, %rdx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq %r14, %rax + andl $1, %eax + addq 1240(%rsp), %r15 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 1248(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 1256(%rsp), %r14 + adcq 1264(%rsp), %r12 + movq %r12, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 1272(%rsp), %r12 + movq 48(%rsp), %r13 ## 8-byte Reload + adcq 1280(%rsp), %r13 + adcq 1288(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 1296(%rsp), %r15 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 1304(%rsp), %rbx + adcq 1312(%rsp), %rbp + adcq $0, %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 8(%rsp), %rax ## 8-byte Reload + addq 1160(%rsp), %rax + adcq 1168(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 1176(%rsp), %r14 + adcq 1184(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + movq %r13, %r12 + adcq 1192(%rsp), %r12 + movq 56(%rsp), %rcx ## 8-byte Reload + adcq 1200(%rsp), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, %r13 + adcq 1216(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq 1224(%rsp), %rbp + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 1232(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + sbbq %r15, %r15 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq %r15, %rax + andl $1, %eax + addq 1080(%rsp), %rbx + movq (%rsp), %rcx ## 8-byte Reload + adcq 1088(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq %r14, %r15 + adcq 1096(%rsp), %r15 + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 1104(%rsp), %r14 + movq %r12, %rbx + adcq 1112(%rsp), %rbx + movq 56(%rsp), %rcx ## 8-byte Reload + adcq 1120(%rsp), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + adcq 1128(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1136(%rsp), %r13 + adcq 1144(%rsp), %rbp + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 1152(%rsp), %r12 + adcq $0, %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq (%rsp), %rax ## 8-byte Reload + addq 1000(%rsp), %rax + adcq 1008(%rsp), %r15 + movq %r15, 32(%rsp) ## 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, %r15 + adcq 1024(%rsp), %rbx + movq %rbx, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 1032(%rsp), %r14 + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 1040(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + adcq 1048(%rsp), %r13 + movq %r13, 24(%rsp) ## 8-byte Spill + adcq 1056(%rsp), %rbp + adcq 1064(%rsp), %r12 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 1072(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 920(%rsp), %r13 + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 928(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 936(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %r15 ## 8-byte Reload + adcq 944(%rsp), %r15 + movq %r14, %r13 + adcq 952(%rsp), %r13 + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 960(%rsp), %r14 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 968(%rsp), %rbx + adcq 976(%rsp), %rbp + adcq 984(%rsp), %r12 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 992(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 32(%rsp), %rax ## 8-byte Reload + addq 840(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 848(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 856(%rsp), %r15 + adcq 864(%rsp), %r13 + movq %r13, 56(%rsp) ## 8-byte Spill + adcq 872(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + adcq 880(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + adcq 888(%rsp), %rbp + adcq 896(%rsp), %r12 + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 904(%rsp), %r13 + movq (%rsp), %rcx ## 8-byte Reload + adcq 912(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r14 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 760(%rsp), %r14 + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 768(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq 776(%rsp), %r15 + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 784(%rsp), %r14 + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 792(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 800(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 808(%rsp), %rbp + movq %r12, %rbx + adcq 816(%rsp), %rbx + movq %r13, %r12 + adcq 824(%rsp), %r12 + movq (%rsp), %r13 ## 8-byte Reload + adcq 832(%rsp), %r13 + adcq $0, %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 16(%rsp), %rax ## 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 48(%rsp) ## 8-byte Spill + adcq 696(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 704(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r15 ## 8-byte Reload + adcq 712(%rsp), %r15 + adcq 720(%rsp), %rbp + adcq 728(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 736(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + adcq 744(%rsp), %r13 + movq %r13, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 752(%rsp), %r13 + sbbq %r14, %r14 + movq %rax, %rdx + movq %rax, %rbx + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r14d + addq 600(%rsp), %rbx + movq 48(%rsp), %rax ## 8-byte Reload + adcq 608(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 56(%rsp), %rax ## 8-byte Reload + adcq 616(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %rbx ## 8-byte Reload + adcq 624(%rsp), %rbx + adcq 632(%rsp), %r15 + movq %r15, 24(%rsp) ## 8-byte Spill + adcq 640(%rsp), %rbp + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 648(%rsp), %r12 + movq 8(%rsp), %rax ## 8-byte Reload + adcq 656(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r15 ## 8-byte Reload + adcq 664(%rsp), %r15 + adcq 672(%rsp), %r13 + adcq $0, %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 48(%rsp), %rax ## 8-byte Reload + addq 520(%rsp), %rax + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 528(%rsp), %r14 + adcq 536(%rsp), %rbx + movq %rbx, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 544(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 552(%rsp), %rbp + adcq 560(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 568(%rsp), %r12 + adcq 576(%rsp), %r15 + movq %r15, (%rsp) ## 8-byte Spill + adcq 584(%rsp), %r13 + movq %r13, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 592(%rsp), %r15 + sbbq %rbx, %rbx + movq %rax, %rdx + movq %rax, %r13 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + movq %rbx, %rax + addq 440(%rsp), %r13 + adcq 448(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 40(%rsp), %r14 ## 8-byte Reload + adcq 456(%rsp), %r14 + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 464(%rsp), %rbx + adcq 472(%rsp), %rbp + movq %rbp, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + adcq 488(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rbp ## 8-byte Reload + adcq 496(%rsp), %rbp + movq 32(%rsp), %r12 ## 8-byte Reload + adcq 504(%rsp), %r12 + adcq 512(%rsp), %r15 + movq %r15, %r13 + adcq $0, %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 96(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 56(%rsp), %rax ## 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r14 + adcq 376(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq 104(%rsp), %rcx ## 8-byte Reload + adcq 384(%rsp), %rcx + movq %rcx, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbx ## 8-byte Reload + adcq 392(%rsp), %rbx + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 400(%rsp), %r15 + adcq 408(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 416(%rsp), %r12 + movq %r12, %rbp + adcq 424(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + sbbq %r13, %r13 + movq %rax, %rdx + movq %rax, %r12 + imulq 80(%rsp), %rdx ## 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r13d + addq 280(%rsp), %r12 + adcq 288(%rsp), %r14 + movq %r14, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rax ## 8-byte Reload + adcq 296(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 104(%rsp), %r14 ## 8-byte Reload + adcq 304(%rsp), %r14 + adcq 312(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 320(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + movq (%rsp), %rbx ## 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %r12 ## 8-byte Reload + adcq 344(%rsp), %r12 + movq 48(%rsp), %rbp ## 8-byte Reload + adcq 352(%rsp), %rbp + adcq $0, %r13 + movq 96(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 40(%rsp), %rax ## 8-byte Reload + addq 200(%rsp), %rax + movq 24(%rsp), %r15 ## 8-byte Reload + adcq 208(%rsp), %r15 + adcq 216(%rsp), %r14 + movq %r14, 104(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 224(%rsp), %r14 + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 232(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 248(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 256(%rsp), %r12 + movq %r12, 16(%rsp) ## 8-byte Spill + adcq 264(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 272(%rsp), %r13 + sbbq %rbx, %rbx + movq 80(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %r12 + leaq 120(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %ebx + addq 120(%rsp), %r12 + adcq 128(%rsp), %r15 + movq 104(%rsp), %rbp ## 8-byte Reload + adcq 136(%rsp), %rbp + movq %r14, %rcx + adcq 144(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 8(%rsp), %r8 ## 8-byte Reload + adcq 152(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq (%rsp), %r9 ## 8-byte Reload + adcq 160(%rsp), %r9 + movq %r9, (%rsp) ## 8-byte Spill + movq 32(%rsp), %r10 ## 8-byte Reload + adcq 168(%rsp), %r10 + movq %r10, 32(%rsp) ## 8-byte Spill + movq 16(%rsp), %rdi ## 8-byte Reload + adcq 176(%rsp), %rdi + movq %rdi, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %r14 ## 8-byte Reload + adcq 184(%rsp), %r14 + adcq 192(%rsp), %r13 + adcq $0, %rbx + movq %r15, %rsi + movq %r15, %r12 + movq 72(%rsp), %rdx ## 8-byte Reload + subq (%rdx), %rsi + movq %rbp, %rax + movq %rbp, %r15 + sbbq 8(%rdx), %rax + movq %rcx, %rbp + sbbq 16(%rdx), %rbp + movq %r8, %rcx + sbbq 24(%rdx), %rcx + movq %r9, %r8 + sbbq 32(%rdx), %r8 + movq %r10, %r11 + sbbq 40(%rdx), %r11 + movq %rdi, %r10 + sbbq 48(%rdx), %r10 + movq %r14, %rdi + sbbq 56(%rdx), %rdi + movq %r13, %r9 + sbbq 64(%rdx), %r9 + sbbq $0, %rbx + andl $1, %ebx + cmovneq %r13, %r9 + testb %bl, %bl + cmovneq %r12, %rsi + movq 112(%rsp), %rbx ## 8-byte Reload + movq %rsi, (%rbx) + cmovneq %r15, %rax + movq %rax, 8(%rbx) + cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 16(%rbx) + cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 24(%rbx) + cmovneq (%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 32(%rbx) + cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 40(%rbx) + cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 48(%rbx) + cmovneq %r14, %rdi + movq %rdi, 56(%rbx) + movq %r9, 64(%rbx) + addq $1560, %rsp ## imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montNF9L + .p2align 4, 0x90 +_mcl_fp_montNF9L: ## @mcl_fp_montNF9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $1560, %rsp ## imm = 0x618 + movq %rcx, 72(%rsp) ## 8-byte Spill + movq %rdx, 80(%rsp) ## 8-byte Spill + movq %rsi, 88(%rsp) ## 8-byte Spill + movq %rdi, 112(%rsp) ## 8-byte Spill + movq -8(%rcx), %rbx + movq %rbx, 96(%rsp) ## 8-byte Spill + movq (%rdx), %rdx + leaq 1480(%rsp), %rdi + callq l_mulPv576x64 + movq 1480(%rsp), %r12 + movq 1488(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq %r12, %rdx + imulq %rbx, %rdx + movq 1552(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + movq 1544(%rsp), %r13 + movq 1536(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + movq 1528(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 1520(%rsp), %r14 + movq 1512(%rsp), %r15 + movq 1504(%rsp), %rbx + movq 1496(%rsp), %rbp + leaq 1400(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1400(%rsp), %r12 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 1408(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + adcq 1416(%rsp), %rbp + movq %rbp, 104(%rsp) ## 8-byte Spill + adcq 1424(%rsp), %rbx + movq %rbx, (%rsp) ## 8-byte Spill + adcq 1432(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq 1440(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbx ## 8-byte Reload + adcq 1448(%rsp), %rbx + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 1456(%rsp), %r12 + adcq 1464(%rsp), %r13 + movq %r13, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 1472(%rsp), %rbp + movq 80(%rsp), %rax ## 8-byte Reload + movq 8(%rax), %rdx + leaq 1320(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1392(%rsp), %rax + movq 16(%rsp), %rcx ## 8-byte Reload + addq 1320(%rsp), %rcx + movq 104(%rsp), %r15 ## 8-byte Reload + adcq 1328(%rsp), %r15 + movq (%rsp), %r14 ## 8-byte Reload + adcq 1336(%rsp), %r14 + movq 8(%rsp), %rdx ## 8-byte Reload + adcq 1344(%rsp), %rdx + movq %rdx, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %r13 ## 8-byte Reload + adcq 1352(%rsp), %r13 + adcq 1360(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 1368(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq 40(%rsp), %rdx ## 8-byte Reload + adcq 1376(%rsp), %rdx + movq %rdx, 40(%rsp) ## 8-byte Spill + adcq 1384(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, %rbp + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 1240(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1240(%rsp), %rbx + adcq 1248(%rsp), %r15 + movq %r15, 104(%rsp) ## 8-byte Spill + adcq 1256(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + movq 8(%rsp), %r12 ## 8-byte Reload + adcq 1264(%rsp), %r12 + adcq 1272(%rsp), %r13 + movq %r13, %r14 + movq 64(%rsp), %r13 ## 8-byte Reload + adcq 1280(%rsp), %r13 + movq 48(%rsp), %rbx ## 8-byte Reload + adcq 1288(%rsp), %rbx + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 1296(%rsp), %r15 + movq 24(%rsp), %rax ## 8-byte Reload + adcq 1304(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 1312(%rsp), %rbp + movq %rbp, 56(%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 16(%rax), %rdx + leaq 1160(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1232(%rsp), %rax + movq 104(%rsp), %rcx ## 8-byte Reload + addq 1160(%rsp), %rcx + movq (%rsp), %rbp ## 8-byte Reload + adcq 1168(%rsp), %rbp + adcq 1176(%rsp), %r12 + movq %r12, 8(%rsp) ## 8-byte Spill + adcq 1184(%rsp), %r14 + adcq 1192(%rsp), %r13 + movq %r13, %r12 + adcq 1200(%rsp), %rbx + movq %rbx, 48(%rsp) ## 8-byte Spill + adcq 1208(%rsp), %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbx ## 8-byte Reload + adcq 1216(%rsp), %rbx + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 1224(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq %rax, %r15 + adcq $0, %r15 + movq %rcx, %rdx + movq %rcx, %r13 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 1080(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 1080(%rsp), %r13 + adcq 1088(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 1096(%rsp), %r13 + adcq 1104(%rsp), %r14 + adcq 1112(%rsp), %r12 + movq %r12, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 1120(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 1128(%rsp), %rbp + adcq 1136(%rsp), %rbx + movq %rbx, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq 1144(%rsp), %rbx + adcq 1152(%rsp), %r15 + movq 80(%rsp), %rax ## 8-byte Reload + movq 24(%rax), %rdx + leaq 1000(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 1072(%rsp), %rax + movq (%rsp), %rcx ## 8-byte Reload + addq 1000(%rsp), %rcx + adcq 1008(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq 1016(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r14 ## 8-byte Reload + adcq 1024(%rsp), %r14 + adcq 1032(%rsp), %r12 + adcq 1040(%rsp), %rbp + movq %rbp, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %r13 ## 8-byte Reload + adcq 1048(%rsp), %r13 + adcq 1056(%rsp), %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + adcq 1064(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, (%rsp) ## 8-byte Spill + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 920(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 920(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 928(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + movq 32(%rsp), %rbp ## 8-byte Reload + adcq 936(%rsp), %rbp + movq %r14, %rbx + adcq 944(%rsp), %rbx + adcq 952(%rsp), %r12 + movq 40(%rsp), %rax ## 8-byte Reload + adcq 960(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 968(%rsp), %r13 + movq %r13, %r15 + movq 56(%rsp), %r13 ## 8-byte Reload + adcq 976(%rsp), %r13 + movq 16(%rsp), %r14 ## 8-byte Reload + adcq 984(%rsp), %r14 + movq (%rsp), %rax ## 8-byte Reload + adcq 992(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 80(%rsp), %rax ## 8-byte Reload + movq 32(%rax), %rdx + leaq 840(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 912(%rsp), %rax + movq 8(%rsp), %rcx ## 8-byte Reload + addq 840(%rsp), %rcx + adcq 848(%rsp), %rbp + movq %rbp, 32(%rsp) ## 8-byte Spill + adcq 856(%rsp), %rbx + movq %rbx, 64(%rsp) ## 8-byte Spill + adcq 864(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 872(%rsp), %rbp + adcq 880(%rsp), %r15 + movq %r15, 24(%rsp) ## 8-byte Spill + adcq 888(%rsp), %r13 + adcq 896(%rsp), %r14 + movq %r14, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rdx ## 8-byte Reload + adcq 904(%rsp), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + adcq $0, %rax + movq %rax, %r14 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 760(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 760(%rsp), %rbx + movq 32(%rsp), %rax ## 8-byte Reload + adcq 768(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 64(%rsp), %r15 ## 8-byte Reload + adcq 776(%rsp), %r15 + adcq 784(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq %rbp, %rbx + adcq 792(%rsp), %rbx + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 800(%rsp), %rbp + adcq 808(%rsp), %r13 + movq 16(%rsp), %rax ## 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r12 ## 8-byte Reload + adcq 824(%rsp), %r12 + adcq 832(%rsp), %r14 + movq 80(%rsp), %rax ## 8-byte Reload + movq 40(%rax), %rdx + leaq 680(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 752(%rsp), %rcx + movq 32(%rsp), %rax ## 8-byte Reload + addq 680(%rsp), %rax + adcq 688(%rsp), %r15 + movq %r15, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %rdx ## 8-byte Reload + adcq 696(%rsp), %rdx + movq %rdx, 48(%rsp) ## 8-byte Spill + adcq 704(%rsp), %rbx + movq %rbx, 40(%rsp) ## 8-byte Spill + adcq 712(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + adcq 720(%rsp), %r13 + movq %r13, %r15 + movq 16(%rsp), %rbx ## 8-byte Reload + adcq 728(%rsp), %rbx + adcq 736(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + adcq 744(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r13 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 600(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 600(%rsp), %r13 + movq 64(%rsp), %r13 ## 8-byte Reload + adcq 608(%rsp), %r13 + movq 48(%rsp), %r12 ## 8-byte Reload + adcq 616(%rsp), %r12 + movq 40(%rsp), %rbp ## 8-byte Reload + adcq 624(%rsp), %rbp + movq 24(%rsp), %rax ## 8-byte Reload + adcq 632(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 640(%rsp), %r15 + movq %r15, 56(%rsp) ## 8-byte Spill + adcq 648(%rsp), %rbx + movq %rbx, 16(%rsp) ## 8-byte Spill + movq (%rsp), %r14 ## 8-byte Reload + adcq 656(%rsp), %r14 + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 664(%rsp), %rbx + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 672(%rsp), %r15 + movq 80(%rsp), %rax ## 8-byte Reload + movq 48(%rax), %rdx + leaq 520(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 592(%rsp), %rcx + movq %r13, %rax + addq 520(%rsp), %rax + adcq 528(%rsp), %r12 + movq %r12, 48(%rsp) ## 8-byte Spill + movq %rbp, %r12 + adcq 536(%rsp), %r12 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 544(%rsp), %rbp + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 552(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + movq 16(%rsp), %rdx ## 8-byte Reload + adcq 560(%rsp), %rdx + movq %rdx, 16(%rsp) ## 8-byte Spill + adcq 568(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + adcq 576(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + adcq 584(%rsp), %r15 + movq %r15, 8(%rsp) ## 8-byte Spill + adcq $0, %rcx + movq %rcx, %r13 + movq %rax, %rdx + movq %rax, %r14 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 440(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 440(%rsp), %r14 + movq 48(%rsp), %rax ## 8-byte Reload + adcq 448(%rsp), %rax + movq %rax, 48(%rsp) ## 8-byte Spill + adcq 456(%rsp), %r12 + adcq 464(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %r14 ## 8-byte Reload + adcq 472(%rsp), %r14 + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 480(%rsp), %r15 + movq (%rsp), %rbp ## 8-byte Reload + adcq 488(%rsp), %rbp + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 496(%rsp), %rbx + movq 8(%rsp), %rax ## 8-byte Reload + adcq 504(%rsp), %rax + movq %rax, 8(%rsp) ## 8-byte Spill + adcq 512(%rsp), %r13 + movq 80(%rsp), %rax ## 8-byte Reload + movq 56(%rax), %rdx + leaq 360(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 432(%rsp), %rcx + movq 48(%rsp), %rax ## 8-byte Reload + addq 360(%rsp), %rax + adcq 368(%rsp), %r12 + movq %r12, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rdx ## 8-byte Reload + adcq 376(%rsp), %rdx + movq %rdx, 24(%rsp) ## 8-byte Spill + adcq 384(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + adcq 392(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq 400(%rsp), %rbp + movq %rbp, (%rsp) ## 8-byte Spill + adcq 408(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r14 ## 8-byte Reload + adcq 416(%rsp), %r14 + adcq 424(%rsp), %r13 + movq %r13, %r15 + adcq $0, %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq %rax, %rdx + movq %rax, %r12 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 280(%rsp), %rdi + movq 72(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 280(%rsp), %r12 + movq 40(%rsp), %rax ## 8-byte Reload + adcq 288(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 296(%rsp), %rbp + movq 56(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq %rax, 56(%rsp) ## 8-byte Spill + movq 16(%rsp), %r13 ## 8-byte Reload + adcq 312(%rsp), %r13 + movq (%rsp), %r12 ## 8-byte Reload + adcq 320(%rsp), %r12 + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 328(%rsp), %rbx + adcq 336(%rsp), %r14 + movq %r14, 8(%rsp) ## 8-byte Spill + adcq 344(%rsp), %r15 + movq %r15, 64(%rsp) ## 8-byte Spill + movq 48(%rsp), %r14 ## 8-byte Reload + adcq 352(%rsp), %r14 + movq 80(%rsp), %rax ## 8-byte Reload + movq 64(%rax), %rdx + leaq 200(%rsp), %rdi + movq 88(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + movq 272(%rsp), %rcx + movq 40(%rsp), %rax ## 8-byte Reload + addq 200(%rsp), %rax + adcq 208(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 56(%rsp), %rbp ## 8-byte Reload + adcq 216(%rsp), %rbp + adcq 224(%rsp), %r13 + movq %r13, 16(%rsp) ## 8-byte Spill + adcq 232(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + adcq 240(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq 8(%rsp), %r15 ## 8-byte Reload + adcq 248(%rsp), %r15 + movq 64(%rsp), %r12 ## 8-byte Reload + adcq 256(%rsp), %r12 + adcq 264(%rsp), %r14 + adcq $0, %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 96(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 120(%rsp), %rdi + movq 72(%rsp), %r13 ## 8-byte Reload + movq %r13, %rsi + callq l_mulPv576x64 + addq 120(%rsp), %rbx + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 128(%rsp), %rcx + movq %rbp, %rdx + adcq 136(%rsp), %rdx + movq 16(%rsp), %rsi ## 8-byte Reload + adcq 144(%rsp), %rsi + movq %rsi, 16(%rsp) ## 8-byte Spill + movq (%rsp), %rdi ## 8-byte Reload + adcq 152(%rsp), %rdi + movq %rdi, (%rsp) ## 8-byte Spill + movq 32(%rsp), %rbx ## 8-byte Reload + adcq 160(%rsp), %rbx + movq %rbx, 32(%rsp) ## 8-byte Spill + movq %r15, %r8 + adcq 168(%rsp), %r8 + movq %r8, 8(%rsp) ## 8-byte Spill + movq %r12, %r15 + adcq 176(%rsp), %r15 + adcq 184(%rsp), %r14 + movq 40(%rsp), %r9 ## 8-byte Reload + adcq 192(%rsp), %r9 + movq %rcx, %rax + movq %rcx, %r11 + movq %r13, %rbp + subq (%rbp), %rax + movq %rdx, %rcx + movq %rdx, %r12 + sbbq 8(%rbp), %rcx + movq %rsi, %rdx + sbbq 16(%rbp), %rdx + movq %rdi, %rsi + sbbq 24(%rbp), %rsi + movq %rbx, %rdi + sbbq 32(%rbp), %rdi + movq %r8, %r10 + sbbq 40(%rbp), %r10 + movq %r15, %r13 + sbbq 48(%rbp), %r13 + movq %r14, %r8 + sbbq 56(%rbp), %r8 + movq %rbp, %rbx + movq %r9, %rbp + sbbq 64(%rbx), %rbp + movq %rbp, %rbx + sarq $63, %rbx + cmovsq %r11, %rax + movq 112(%rsp), %rbx ## 8-byte Reload + movq %rax, (%rbx) + cmovsq %r12, %rcx + movq %rcx, 8(%rbx) + cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 16(%rbx) + cmovsq (%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 24(%rbx) + cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 32(%rbx) + cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 40(%rbx) + cmovsq %r15, %r13 + movq %r13, 48(%rbx) + cmovsq %r14, %r8 + movq %r8, 56(%rbx) + cmovsq %r9, %rbp + movq %rbp, 64(%rbx) + addq $1560, %rsp ## imm = 0x618 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_montRed9L + .p2align 4, 0x90 +_mcl_fp_montRed9L: ## @mcl_fp_montRed9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + subq $936, %rsp ## imm = 0x3A8 + movq %rdx, %rax + movq %rdi, 208(%rsp) ## 8-byte Spill + movq -8(%rax), %rcx + movq %rcx, 96(%rsp) ## 8-byte Spill + movq (%rsi), %r14 + movq 8(%rsi), %rdx + movq %rdx, (%rsp) ## 8-byte Spill + movq %r14, %rdx + imulq %rcx, %rdx + movq 136(%rsi), %rcx + movq %rcx, 88(%rsp) ## 8-byte Spill + movq 128(%rsi), %rcx + movq %rcx, 56(%rsp) ## 8-byte Spill + movq 120(%rsi), %rcx + movq %rcx, 80(%rsp) ## 8-byte Spill + movq 112(%rsi), %rcx + movq %rcx, 72(%rsp) ## 8-byte Spill + movq 104(%rsi), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 96(%rsi), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 88(%rsi), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 80(%rsi), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + movq 72(%rsi), %r12 + movq 64(%rsi), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq 56(%rsi), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 48(%rsi), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 40(%rsi), %rbp + movq 32(%rsi), %rbx + movq 24(%rsi), %r13 + movq 16(%rsi), %r15 + movq %rax, %rcx + movq (%rcx), %rax + movq %rax, 144(%rsp) ## 8-byte Spill + movq 64(%rcx), %rax + movq %rax, 200(%rsp) ## 8-byte Spill + movq 56(%rcx), %rax + movq %rax, 192(%rsp) ## 8-byte Spill + movq 48(%rcx), %rax + movq %rax, 184(%rsp) ## 8-byte Spill + movq 40(%rcx), %rax + movq %rax, 176(%rsp) ## 8-byte Spill + movq 32(%rcx), %rax + movq %rax, 168(%rsp) ## 8-byte Spill + movq 24(%rcx), %rax + movq %rax, 160(%rsp) ## 8-byte Spill + movq 16(%rcx), %rax + movq %rax, 152(%rsp) ## 8-byte Spill + movq 8(%rcx), %rax + movq %rax, 136(%rsp) ## 8-byte Spill + movq %rcx, %rsi + movq %rsi, 104(%rsp) ## 8-byte Spill + leaq 856(%rsp), %rdi + callq l_mulPv576x64 + addq 856(%rsp), %r14 + movq (%rsp), %rcx ## 8-byte Reload + adcq 864(%rsp), %rcx + adcq 872(%rsp), %r15 + adcq 880(%rsp), %r13 + adcq 888(%rsp), %rbx + movq %rbx, 120(%rsp) ## 8-byte Spill + adcq 896(%rsp), %rbp + movq %rbp, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + adcq 904(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 912(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 920(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + adcq 928(%rsp), %r12 + movq %r12, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq $0, %rbp + adcq $0, 8(%rsp) ## 8-byte Folded Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + movq 88(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + sbbq %r12, %r12 + movq %rcx, %rdx + movq %rcx, %rbx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 776(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + andl $1, %r12d + addq 776(%rsp), %rbx + adcq 784(%rsp), %r15 + adcq 792(%rsp), %r13 + movq %r13, 128(%rsp) ## 8-byte Spill + movq 120(%rsp), %rax ## 8-byte Reload + adcq 800(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 112(%rsp), %rax ## 8-byte Reload + adcq 808(%rsp), %rax + movq %rax, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rax ## 8-byte Reload + adcq 816(%rsp), %rax + movq %rax, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rax ## 8-byte Reload + adcq 824(%rsp), %rax + movq %rax, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rax ## 8-byte Reload + adcq 832(%rsp), %rax + movq %rax, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rax ## 8-byte Reload + adcq 840(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + adcq 848(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + adcq $0, 16(%rsp) ## 8-byte Folded Spill + adcq $0, 48(%rsp) ## 8-byte Folded Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + movq 56(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, %r14 + movq %r14, 88(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %r15, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 696(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 696(%rsp), %r15 + movq 128(%rsp), %rcx ## 8-byte Reload + adcq 704(%rsp), %rcx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 712(%rsp), %rax + movq %rax, 120(%rsp) ## 8-byte Spill + movq 112(%rsp), %rax ## 8-byte Reload + adcq 720(%rsp), %rax + movq %rax, 112(%rsp) ## 8-byte Spill + movq 64(%rsp), %rbp ## 8-byte Reload + adcq 728(%rsp), %rbp + movq 32(%rsp), %r14 ## 8-byte Reload + adcq 736(%rsp), %r14 + movq 40(%rsp), %r15 ## 8-byte Reload + adcq 744(%rsp), %r15 + movq (%rsp), %rax ## 8-byte Reload + adcq 752(%rsp), %rax + movq %rax, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rax ## 8-byte Reload + adcq 760(%rsp), %rax + movq %rax, 24(%rsp) ## 8-byte Spill + adcq 768(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq $0, 16(%rsp) ## 8-byte Folded Spill + movq 48(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, %rbx + movq %rbx, 56(%rsp) ## 8-byte Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rcx, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 616(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 616(%rsp), %rbx + movq 120(%rsp), %rax ## 8-byte Reload + adcq 624(%rsp), %rax + movq 112(%rsp), %rcx ## 8-byte Reload + adcq 632(%rsp), %rcx + movq %rcx, 112(%rsp) ## 8-byte Spill + adcq 640(%rsp), %rbp + movq %rbp, 64(%rsp) ## 8-byte Spill + adcq 648(%rsp), %r14 + movq %r14, 32(%rsp) ## 8-byte Spill + adcq 656(%rsp), %r15 + movq (%rsp), %r14 ## 8-byte Reload + adcq 664(%rsp), %r14 + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 672(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 680(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 688(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, 48(%rsp) ## 8-byte Spill + adcq $0, 72(%rsp) ## 8-byte Folded Spill + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 536(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 536(%rsp), %rbx + movq 112(%rsp), %rax ## 8-byte Reload + adcq 544(%rsp), %rax + movq 64(%rsp), %rcx ## 8-byte Reload + adcq 552(%rsp), %rcx + movq %rcx, 64(%rsp) ## 8-byte Spill + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 560(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + adcq 568(%rsp), %r15 + movq %r15, 40(%rsp) ## 8-byte Spill + adcq 576(%rsp), %r14 + movq %r14, (%rsp) ## 8-byte Spill + adcq 584(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r13 ## 8-byte Reload + adcq 592(%rsp), %r13 + movq 16(%rsp), %r15 ## 8-byte Reload + adcq 600(%rsp), %r15 + movq 48(%rsp), %rbp ## 8-byte Reload + adcq 608(%rsp), %rbp + movq 72(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, 80(%rsp) ## 8-byte Folded Spill + adcq $0, 56(%rsp) ## 8-byte Folded Spill + adcq $0, 88(%rsp) ## 8-byte Folded Spill + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r14 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 456(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 456(%rsp), %r14 + movq 64(%rsp), %rax ## 8-byte Reload + adcq 464(%rsp), %rax + movq 32(%rsp), %rcx ## 8-byte Reload + adcq 472(%rsp), %rcx + movq %rcx, 32(%rsp) ## 8-byte Spill + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 480(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rcx ## 8-byte Reload + adcq 488(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rcx ## 8-byte Reload + adcq 496(%rsp), %rcx + movq %rcx, 24(%rsp) ## 8-byte Spill + adcq 504(%rsp), %r13 + movq %r13, 8(%rsp) ## 8-byte Spill + adcq 512(%rsp), %r15 + movq %r15, 16(%rsp) ## 8-byte Spill + adcq 520(%rsp), %rbp + movq %rbp, 48(%rsp) ## 8-byte Spill + adcq 528(%rsp), %rbx + movq %rbx, 72(%rsp) ## 8-byte Spill + movq 80(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + movq 56(%rsp), %r13 ## 8-byte Reload + adcq $0, %r13 + movq 88(%rsp), %rbx ## 8-byte Reload + adcq $0, %rbx + adcq $0, %r12 + movq %rax, %rdx + movq %rax, %r15 + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 376(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 376(%rsp), %r15 + movq 32(%rsp), %rax ## 8-byte Reload + adcq 384(%rsp), %rax + movq 40(%rsp), %rcx ## 8-byte Reload + adcq 392(%rsp), %rcx + movq %rcx, 40(%rsp) ## 8-byte Spill + movq (%rsp), %rcx ## 8-byte Reload + adcq 400(%rsp), %rcx + movq %rcx, (%rsp) ## 8-byte Spill + movq 24(%rsp), %rbp ## 8-byte Reload + adcq 408(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 416(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 424(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 432(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + movq 72(%rsp), %r15 ## 8-byte Reload + adcq 440(%rsp), %r15 + adcq 448(%rsp), %r14 + movq %r14, 80(%rsp) ## 8-byte Spill + adcq $0, %r13 + movq %r13, %r14 + adcq $0, %rbx + movq %rbx, 88(%rsp) ## 8-byte Spill + adcq $0, %r12 + movq %rax, %rbx + movq %rbx, %rdx + imulq 96(%rsp), %rdx ## 8-byte Folded Reload + leaq 296(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 296(%rsp), %rbx + movq 40(%rsp), %rax ## 8-byte Reload + adcq 304(%rsp), %rax + movq (%rsp), %r13 ## 8-byte Reload + adcq 312(%rsp), %r13 + adcq 320(%rsp), %rbp + movq 8(%rsp), %rcx ## 8-byte Reload + adcq 328(%rsp), %rcx + movq %rcx, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %rcx ## 8-byte Reload + adcq 336(%rsp), %rcx + movq %rcx, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rcx ## 8-byte Reload + adcq 344(%rsp), %rcx + movq %rcx, 48(%rsp) ## 8-byte Spill + adcq 352(%rsp), %r15 + movq %r15, 72(%rsp) ## 8-byte Spill + movq 80(%rsp), %r15 ## 8-byte Reload + adcq 360(%rsp), %r15 + adcq 368(%rsp), %r14 + movq %r14, 56(%rsp) ## 8-byte Spill + movq 88(%rsp), %r14 ## 8-byte Reload + adcq $0, %r14 + adcq $0, %r12 + movq 96(%rsp), %rdx ## 8-byte Reload + imulq %rax, %rdx + movq %rax, %rbx + leaq 216(%rsp), %rdi + movq 104(%rsp), %rsi ## 8-byte Reload + callq l_mulPv576x64 + addq 216(%rsp), %rbx + movq %r13, %rsi + adcq 224(%rsp), %rsi + movq %rsi, (%rsp) ## 8-byte Spill + adcq 232(%rsp), %rbp + movq %rbp, 24(%rsp) ## 8-byte Spill + movq 8(%rsp), %r9 ## 8-byte Reload + adcq 240(%rsp), %r9 + movq %r9, 8(%rsp) ## 8-byte Spill + movq 16(%rsp), %r8 ## 8-byte Reload + adcq 248(%rsp), %r8 + movq %r8, 16(%rsp) ## 8-byte Spill + movq 48(%rsp), %rbx ## 8-byte Reload + adcq 256(%rsp), %rbx + movq 72(%rsp), %rax ## 8-byte Reload + adcq 264(%rsp), %rax + movq %r15, %rcx + adcq 272(%rsp), %rcx + movq 56(%rsp), %rdx ## 8-byte Reload + adcq 280(%rsp), %rdx + movq %rdx, 56(%rsp) ## 8-byte Spill + adcq 288(%rsp), %r14 + movq %r14, %r11 + adcq $0, %r12 + subq 144(%rsp), %rsi ## 8-byte Folded Reload + movq %rbp, %rdi + sbbq 136(%rsp), %rdi ## 8-byte Folded Reload + movq %r9, %rbp + sbbq 152(%rsp), %rbp ## 8-byte Folded Reload + movq %r8, %r13 + sbbq 160(%rsp), %r13 ## 8-byte Folded Reload + movq %rbx, %r15 + sbbq 168(%rsp), %r15 ## 8-byte Folded Reload + movq %rax, %r14 + sbbq 176(%rsp), %r14 ## 8-byte Folded Reload + movq %rcx, %r10 + sbbq 184(%rsp), %r10 ## 8-byte Folded Reload + movq %rdx, %r8 + sbbq 192(%rsp), %r8 ## 8-byte Folded Reload + movq %r11, %r9 + sbbq 200(%rsp), %r9 ## 8-byte Folded Reload + sbbq $0, %r12 + andl $1, %r12d + cmovneq %r11, %r9 + testb %r12b, %r12b + cmovneq (%rsp), %rsi ## 8-byte Folded Reload + movq 208(%rsp), %rdx ## 8-byte Reload + movq %rsi, (%rdx) + cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 8(%rdx) + cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 16(%rdx) + cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 24(%rdx) + cmovneq %rbx, %r15 + movq %r15, 32(%rdx) + cmovneq %rax, %r14 + movq %r14, 40(%rdx) + cmovneq %rcx, %r10 + movq %r10, 48(%rdx) + cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 56(%rdx) + movq %r9, 64(%rdx) + addq $936, %rsp ## imm = 0x3A8 + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_addPre9L + .p2align 4, 0x90 +_mcl_fp_addPre9L: ## @mcl_fp_addPre9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r8 + movq 64(%rsi), %r15 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 24(%rsi), %r12 + movq 32(%rsi), %r14 + movq (%rdx), %rbx + movq 8(%rdx), %rcx + addq (%rsi), %rbx + adcq 8(%rsi), %rcx + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r12 + movq 56(%rdx), %r13 + movq 48(%rdx), %rsi + movq 40(%rdx), %rbp + movq 32(%rdx), %rdx + movq %rbx, (%rdi) + movq %rcx, 8(%rdi) + movq %rax, 16(%rdi) + movq %r12, 24(%rdi) + adcq %r14, %rdx + movq %rdx, 32(%rdi) + adcq %r11, %rbp + movq %rbp, 40(%rdi) + adcq %r10, %rsi + movq %rsi, 48(%rdi) + adcq %r9, %r13 + movq %r13, 56(%rdi) + adcq %r8, %r15 + movq %r15, 64(%rdi) + sbbq %rax, %rax + andl $1, %eax + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_subPre9L + .p2align 4, 0x90 +_mcl_fp_subPre9L: ## @mcl_fp_subPre9L +## BB#0: + movq 32(%rdx), %r8 + movq (%rsi), %rcx + xorl %eax, %eax + subq (%rdx), %rcx + movq %rcx, (%rdi) + movq 8(%rsi), %rcx + sbbq 8(%rdx), %rcx + movq %rcx, 8(%rdi) + movq 16(%rsi), %rcx + sbbq 16(%rdx), %rcx + movq %rcx, 16(%rdi) + movq 24(%rsi), %rcx + sbbq 24(%rdx), %rcx + movq %rcx, 24(%rdi) + movq 32(%rsi), %rcx + sbbq %r8, %rcx + movq 40(%rdx), %r8 + movq %rcx, 32(%rdi) + movq 40(%rsi), %rcx + sbbq %r8, %rcx + movq 48(%rdx), %r8 + movq %rcx, 40(%rdi) + movq 48(%rsi), %rcx + sbbq %r8, %rcx + movq 56(%rdx), %r8 + movq %rcx, 48(%rdi) + movq 56(%rsi), %rcx + sbbq %r8, %rcx + movq %rcx, 56(%rdi) + movq 64(%rdx), %rcx + movq 64(%rsi), %rdx + sbbq %rcx, %rdx + movq %rdx, 64(%rdi) + sbbq $0, %rax + andl $1, %eax + retq + + .globl _mcl_fp_shr1_9L + .p2align 4, 0x90 +_mcl_fp_shr1_9L: ## @mcl_fp_shr1_9L +## BB#0: + pushq %rbx + movq 64(%rsi), %r8 + movq 56(%rsi), %r9 + movq 48(%rsi), %r10 + movq 40(%rsi), %r11 + movq 32(%rsi), %rcx + movq 24(%rsi), %rdx + movq 16(%rsi), %rax + movq (%rsi), %rbx + movq 8(%rsi), %rsi + shrdq $1, %rsi, %rbx + movq %rbx, (%rdi) + shrdq $1, %rax, %rsi + movq %rsi, 8(%rdi) + shrdq $1, %rdx, %rax + movq %rax, 16(%rdi) + shrdq $1, %rcx, %rdx + movq %rdx, 24(%rdi) + shrdq $1, %r11, %rcx + movq %rcx, 32(%rdi) + shrdq $1, %r10, %r11 + movq %r11, 40(%rdi) + shrdq $1, %r9, %r10 + movq %r10, 48(%rdi) + shrdq $1, %r8, %r9 + movq %r9, 56(%rdi) + shrq %r8 + movq %r8, 64(%rdi) + popq %rbx + retq + + .globl _mcl_fp_add9L + .p2align 4, 0x90 +_mcl_fp_add9L: ## @mcl_fp_add9L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r12 + movq 64(%rsi), %r8 + movq 56(%rsi), %r13 + movq 48(%rsi), %r9 + movq 40(%rsi), %r10 + movq 24(%rsi), %r14 + movq 32(%rsi), %r11 + movq (%rdx), %rbx + movq 8(%rdx), %r15 + addq (%rsi), %rbx + adcq 8(%rsi), %r15 + movq 16(%rdx), %rax + adcq 16(%rsi), %rax + adcq 24(%rdx), %r14 + adcq 32(%rdx), %r11 + adcq 40(%rdx), %r10 + movq 56(%rdx), %rsi + adcq 48(%rdx), %r9 + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + adcq %r13, %rsi + movq %rsi, 56(%rdi) + adcq %r12, %r8 + movq %r8, 64(%rdi) + sbbq %rdx, %rdx + andl $1, %edx + subq (%rcx), %rbx + sbbq 8(%rcx), %r15 + sbbq 16(%rcx), %rax + sbbq 24(%rcx), %r14 + sbbq 32(%rcx), %r11 + sbbq 40(%rcx), %r10 + sbbq 48(%rcx), %r9 + sbbq 56(%rcx), %rsi + sbbq 64(%rcx), %r8 + sbbq $0, %rdx + testb $1, %dl + jne LBB136_2 +## BB#1: ## %nocarry + movq %rbx, (%rdi) + movq %r15, 8(%rdi) + movq %rax, 16(%rdi) + movq %r14, 24(%rdi) + movq %r11, 32(%rdi) + movq %r10, 40(%rdi) + movq %r9, 48(%rdi) + movq %rsi, 56(%rdi) + movq %r8, 64(%rdi) +LBB136_2: ## %carry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_addNF9L + .p2align 4, 0x90 +_mcl_fp_addNF9L: ## @mcl_fp_addNF9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rdi, %r8 + movq 64(%rdx), %r10 + movq 56(%rdx), %r11 + movq 48(%rdx), %r9 + movq 40(%rdx), %rax + movq 32(%rdx), %rdi + movq 24(%rdx), %rbp + movq 16(%rdx), %r15 + movq (%rdx), %rbx + movq 8(%rdx), %r13 + addq (%rsi), %rbx + adcq 8(%rsi), %r13 + adcq 16(%rsi), %r15 + adcq 24(%rsi), %rbp + movq %rbp, -24(%rsp) ## 8-byte Spill + adcq 32(%rsi), %rdi + movq %rdi, -40(%rsp) ## 8-byte Spill + adcq 40(%rsi), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + adcq 48(%rsi), %r9 + movq %r9, %rdi + movq %rdi, -16(%rsp) ## 8-byte Spill + adcq 56(%rsi), %r11 + movq %r11, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + adcq 64(%rsi), %r10 + movq %r10, %r9 + movq %rbx, %rsi + subq (%rcx), %rsi + movq %r13, %rdx + sbbq 8(%rcx), %rdx + movq %r15, %r12 + sbbq 16(%rcx), %r12 + sbbq 24(%rcx), %rbp + movq -40(%rsp), %r14 ## 8-byte Reload + sbbq 32(%rcx), %r14 + movq -32(%rsp), %r11 ## 8-byte Reload + sbbq 40(%rcx), %r11 + movq %rdi, %r10 + sbbq 48(%rcx), %r10 + movq %rax, %rdi + sbbq 56(%rcx), %rdi + movq %r9, %rax + sbbq 64(%rcx), %rax + movq %rax, %rcx + sarq $63, %rcx + cmovsq %rbx, %rsi + movq %rsi, (%r8) + cmovsq %r13, %rdx + movq %rdx, 8(%r8) + cmovsq %r15, %r12 + movq %r12, 16(%r8) + cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload + movq %rbp, 24(%r8) + cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 32(%r8) + cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 40(%r8) + cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 48(%r8) + cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload + movq %rdi, 56(%r8) + cmovsq %r9, %rax + movq %rax, 64(%r8) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fp_sub9L + .p2align 4, 0x90 +_mcl_fp_sub9L: ## @mcl_fp_sub9L +## BB#0: + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq 64(%rdx), %r13 + movq (%rsi), %rax + movq 8(%rsi), %r9 + xorl %ebx, %ebx + subq (%rdx), %rax + sbbq 8(%rdx), %r9 + movq 16(%rsi), %r10 + sbbq 16(%rdx), %r10 + movq 24(%rsi), %r11 + sbbq 24(%rdx), %r11 + movq 32(%rsi), %r12 + sbbq 32(%rdx), %r12 + movq 40(%rsi), %r14 + sbbq 40(%rdx), %r14 + movq 48(%rsi), %r15 + sbbq 48(%rdx), %r15 + movq 64(%rsi), %r8 + movq 56(%rsi), %rsi + sbbq 56(%rdx), %rsi + movq %rax, (%rdi) + movq %r9, 8(%rdi) + movq %r10, 16(%rdi) + movq %r11, 24(%rdi) + movq %r12, 32(%rdi) + movq %r14, 40(%rdi) + movq %r15, 48(%rdi) + movq %rsi, 56(%rdi) + sbbq %r13, %r8 + movq %r8, 64(%rdi) + sbbq $0, %rbx + testb $1, %bl + je LBB138_2 +## BB#1: ## %carry + addq (%rcx), %rax + movq %rax, (%rdi) + movq 8(%rcx), %rax + adcq %r9, %rax + movq %rax, 8(%rdi) + movq 16(%rcx), %rax + adcq %r10, %rax + movq %rax, 16(%rdi) + movq 24(%rcx), %rax + adcq %r11, %rax + movq %rax, 24(%rdi) + movq 32(%rcx), %rax + adcq %r12, %rax + movq %rax, 32(%rdi) + movq 40(%rcx), %rax + adcq %r14, %rax + movq %rax, 40(%rdi) + movq 48(%rcx), %rax + adcq %r15, %rax + movq %rax, 48(%rdi) + movq 56(%rcx), %rax + adcq %rsi, %rax + movq %rax, 56(%rdi) + movq 64(%rcx), %rax + adcq %r8, %rax + movq %rax, 64(%rdi) +LBB138_2: ## %nocarry + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .globl _mcl_fp_subNF9L + .p2align 4, 0x90 +_mcl_fp_subNF9L: ## @mcl_fp_subNF9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r11 + movq %rdi, %rbx + movq 64(%rsi), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movdqu (%rdx), %xmm1 + movdqu 16(%rdx), %xmm2 + movdqu 32(%rdx), %xmm3 + movdqu 48(%rdx), %xmm4 + pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1] + movd %xmm0, %r12 + movdqu (%rsi), %xmm5 + movdqu 16(%rsi), %xmm6 + movdqu 32(%rsi), %xmm7 + movdqu 48(%rsi), %xmm8 + pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1] + movd %xmm0, %rax + movd %xmm4, %r10 + pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1] + movd %xmm0, %r9 + pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1] + movd %xmm3, %r8 + pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] + movd %xmm3, %rcx + pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1] + movd %xmm2, %rbp + pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] + movd %xmm2, %rsi + pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] + movd %xmm1, %rdi + movd %xmm5, %r15 + subq %rdi, %r15 + movd %xmm2, %r14 + sbbq %rsi, %r14 + movd %xmm6, %r13 + sbbq %rbp, %r13 + movd %xmm3, %rbp + sbbq %rcx, %rbp + movd %xmm7, %rcx + sbbq %r8, %rcx + movq %rcx, -16(%rsp) ## 8-byte Spill + movd %xmm0, %rcx + sbbq %r9, %rcx + movq %rcx, -24(%rsp) ## 8-byte Spill + movd %xmm8, %rcx + sbbq %r10, %rcx + movq %rcx, -32(%rsp) ## 8-byte Spill + sbbq %r12, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq -40(%rsp), %rsi ## 8-byte Reload + sbbq 64(%rdx), %rsi + movq %rsi, -40(%rsp) ## 8-byte Spill + movq %rsi, %rax + sarq $63, %rax + movq %rax, %rcx + shldq $1, %rsi, %rcx + movq 24(%r11), %r9 + andq %rcx, %r9 + movq 8(%r11), %rdi + andq %rcx, %rdi + andq (%r11), %rcx + movq 64(%r11), %r12 + andq %rax, %r12 + movq 56(%r11), %r10 + andq %rax, %r10 + rolq %rax + movq 48(%r11), %r8 + andq %rax, %r8 + movq 40(%r11), %rsi + andq %rax, %rsi + movq 32(%r11), %rdx + andq %rax, %rdx + andq 16(%r11), %rax + addq %r15, %rcx + adcq %r14, %rdi + movq %rcx, (%rbx) + adcq %r13, %rax + movq %rdi, 8(%rbx) + adcq %rbp, %r9 + movq %rax, 16(%rbx) + movq %r9, 24(%rbx) + adcq -16(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, 32(%rbx) + adcq -24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 40(%rbx) + adcq -32(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 48(%rbx) + adcq -8(%rsp), %r10 ## 8-byte Folded Reload + movq %r10, 56(%rbx) + adcq -40(%rsp), %r12 ## 8-byte Folded Reload + movq %r12, 64(%rbx) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_add9L + .p2align 4, 0x90 +_mcl_fpDbl_add9L: ## @mcl_fpDbl_add9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r15 + movq 136(%rdx), %rax + movq %rax, -48(%rsp) ## 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq 120(%rdx), %r10 + movq 112(%rdx), %r11 + movq 24(%rsi), %rcx + movq 32(%rsi), %r14 + movq 16(%rdx), %rbp + movq (%rdx), %rax + movq 8(%rdx), %rbx + addq (%rsi), %rax + adcq 8(%rsi), %rbx + adcq 16(%rsi), %rbp + adcq 24(%rdx), %rcx + adcq 32(%rdx), %r14 + movq 104(%rdx), %r9 + movq 96(%rdx), %r13 + movq %rax, (%rdi) + movq 88(%rdx), %r8 + movq %rbx, 8(%rdi) + movq 80(%rdx), %r12 + movq %rbp, 16(%rdi) + movq 40(%rdx), %rax + movq %rcx, 24(%rdi) + movq 40(%rsi), %rbp + adcq %rax, %rbp + movq 48(%rdx), %rcx + movq %r14, 32(%rdi) + movq 48(%rsi), %rax + adcq %rcx, %rax + movq 56(%rdx), %r14 + movq %rbp, 40(%rdi) + movq 56(%rsi), %rbp + adcq %r14, %rbp + movq 72(%rdx), %rcx + movq 64(%rdx), %rdx + movq %rax, 48(%rdi) + movq 64(%rsi), %rax + adcq %rdx, %rax + movq 136(%rsi), %rbx + movq %rbp, 56(%rdi) + movq 72(%rsi), %rbp + adcq %rcx, %rbp + movq 128(%rsi), %rcx + movq %rax, 64(%rdi) + movq 80(%rsi), %rdx + adcq %r12, %rdx + movq 88(%rsi), %r12 + adcq %r8, %r12 + movq 96(%rsi), %r14 + adcq %r13, %r14 + movq %r14, -8(%rsp) ## 8-byte Spill + movq 104(%rsi), %rax + adcq %r9, %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 120(%rsi), %rax + movq 112(%rsi), %rsi + adcq %r11, %rsi + movq %rsi, -24(%rsp) ## 8-byte Spill + adcq %r10, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + adcq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -40(%rsp) ## 8-byte Spill + adcq -48(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, -48(%rsp) ## 8-byte Spill + sbbq %r9, %r9 + andl $1, %r9d + movq %rbp, %r10 + subq (%r15), %r10 + movq %rdx, %r11 + sbbq 8(%r15), %r11 + movq %r12, %rbx + sbbq 16(%r15), %rbx + sbbq 24(%r15), %r14 + movq -32(%rsp), %r13 ## 8-byte Reload + sbbq 32(%r15), %r13 + movq -24(%rsp), %rsi ## 8-byte Reload + sbbq 40(%r15), %rsi + movq -16(%rsp), %rax ## 8-byte Reload + sbbq 48(%r15), %rax + sbbq 56(%r15), %rcx + movq -48(%rsp), %r8 ## 8-byte Reload + sbbq 64(%r15), %r8 + sbbq $0, %r9 + andl $1, %r9d + cmovneq %rbp, %r10 + movq %r10, 72(%rdi) + testb %r9b, %r9b + cmovneq %rdx, %r11 + movq %r11, 80(%rdi) + cmovneq %r12, %rbx + movq %rbx, 88(%rdi) + cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload + movq %r14, 96(%rdi) + cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload + movq %r13, 104(%rdi) + cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 112(%rdi) + cmovneq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 120(%rdi) + cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 128(%rdi) + cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload + movq %r8, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + .globl _mcl_fpDbl_sub9L + .p2align 4, 0x90 +_mcl_fpDbl_sub9L: ## @mcl_fpDbl_sub9L +## BB#0: + pushq %rbp + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbx + movq %rcx, %r14 + movq 136(%rdx), %rax + movq %rax, -24(%rsp) ## 8-byte Spill + movq 128(%rdx), %rax + movq %rax, -32(%rsp) ## 8-byte Spill + movq 120(%rdx), %rax + movq %rax, -40(%rsp) ## 8-byte Spill + movq 16(%rsi), %r11 + movq (%rsi), %r12 + movq 8(%rsi), %r13 + xorl %r9d, %r9d + subq (%rdx), %r12 + sbbq 8(%rdx), %r13 + sbbq 16(%rdx), %r11 + movq 24(%rsi), %rbx + sbbq 24(%rdx), %rbx + movq 32(%rsi), %rbp + sbbq 32(%rdx), %rbp + movq 112(%rdx), %r10 + movq 104(%rdx), %rcx + movq %r12, (%rdi) + movq 96(%rdx), %rax + movq %r13, 8(%rdi) + movq 88(%rdx), %r13 + movq %r11, 16(%rdi) + movq 40(%rdx), %r11 + movq %rbx, 24(%rdi) + movq 40(%rsi), %rbx + sbbq %r11, %rbx + movq 48(%rdx), %r11 + movq %rbp, 32(%rdi) + movq 48(%rsi), %rbp + sbbq %r11, %rbp + movq 56(%rdx), %r11 + movq %rbx, 40(%rdi) + movq 56(%rsi), %rbx + sbbq %r11, %rbx + movq 64(%rdx), %r11 + movq %rbp, 48(%rdi) + movq 64(%rsi), %rbp + sbbq %r11, %rbp + movq 80(%rdx), %r8 + movq 72(%rdx), %r11 + movq %rbx, 56(%rdi) + movq 72(%rsi), %r15 + sbbq %r11, %r15 + movq 136(%rsi), %rdx + movq %rbp, 64(%rdi) + movq 80(%rsi), %rbp + sbbq %r8, %rbp + movq 88(%rsi), %r12 + sbbq %r13, %r12 + movq 96(%rsi), %r13 + sbbq %rax, %r13 + movq 104(%rsi), %rax + sbbq %rcx, %rax + movq %rax, -16(%rsp) ## 8-byte Spill + movq 112(%rsi), %rax + sbbq %r10, %rax + movq %rax, -8(%rsp) ## 8-byte Spill + movq 128(%rsi), %rax + movq 120(%rsi), %rcx + sbbq -40(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, -40(%rsp) ## 8-byte Spill + sbbq -32(%rsp), %rax ## 8-byte Folded Reload + movq %rax, -32(%rsp) ## 8-byte Spill + sbbq -24(%rsp), %rdx ## 8-byte Folded Reload + movq %rdx, -24(%rsp) ## 8-byte Spill + movl $0, %r8d + sbbq $0, %r8 + andl $1, %r8d + movq (%r14), %r10 + cmoveq %r9, %r10 + testb %r8b, %r8b + movq 16(%r14), %r8 + cmoveq %r9, %r8 + movq 8(%r14), %rdx + cmoveq %r9, %rdx + movq 64(%r14), %rbx + cmoveq %r9, %rbx + movq 56(%r14), %r11 + cmoveq %r9, %r11 + movq 48(%r14), %rsi + cmoveq %r9, %rsi + movq 40(%r14), %rcx + cmoveq %r9, %rcx + movq 32(%r14), %rax + cmoveq %r9, %rax + cmovneq 24(%r14), %r9 + addq %r15, %r10 + adcq %rbp, %rdx + movq %r10, 72(%rdi) + adcq %r12, %r8 + movq %rdx, 80(%rdi) + adcq %r13, %r9 + movq %r8, 88(%rdi) + movq %r9, 96(%rdi) + adcq -16(%rsp), %rax ## 8-byte Folded Reload + movq %rax, 104(%rdi) + adcq -8(%rsp), %rcx ## 8-byte Folded Reload + movq %rcx, 112(%rdi) + adcq -40(%rsp), %rsi ## 8-byte Folded Reload + movq %rsi, 120(%rdi) + adcq -32(%rsp), %r11 ## 8-byte Folded Reload + movq %r11, 128(%rdi) + adcq -24(%rsp), %rbx ## 8-byte Folded Reload + movq %rbx, 136(%rdi) + popq %rbx + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq %rbp + retq + + +.subsections_via_symbols diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86.bmi2.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86.bmi2.s new file mode 100644 index 000000000..77729c530 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86.bmi2.s @@ -0,0 +1,71547 @@ + .text + .file "" + .globl makeNIST_P192Lbmi2 + .align 16, 0x90 + .type makeNIST_P192Lbmi2,@function +makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2 +# BB#0: + movl 4(%esp), %eax + movl $-1, 20(%eax) + movl $-1, 16(%eax) + movl $-1, 12(%eax) + movl $-2, 8(%eax) + movl $-1, 4(%eax) + movl $-1, (%eax) + retl $4 +.Lfunc_end0: + .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function +mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl 32(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + xorl %edx, %edx + movl (%eax), %ebx + addl %ecx, %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 4(%eax), %ecx + adcl %edi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%eax), %ebp + adcl %esi, %ebp + movl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 12(%eax), %esi + adcl %ecx, %esi + movl 40(%eax), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 16(%eax), %ecx + adcl %ebx, %ecx + movl 44(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl 20(%eax), %eax + adcl %edi, %eax + adcl $0, %edx + sbbl %edi, %edi + andl $1, %edi + addl %ebx, 24(%esp) # 4-byte Folded Spill + movl (%esp), %ebx # 4-byte Reload + adcl %ebx, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + adcl $0, %edi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %eax + adcl $0, %edx + adcl $0, %edi + addl %edx, 24(%esp) # 4-byte Folded Spill + adcl %edi, 28(%esp) # 4-byte Folded Spill + adcl %ebp, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %esi, %edi + adcl $0, %ecx + adcl $0, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 28(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $1, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + adcl $0, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ecx, %edx + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edx + adcl $0, %edx + adcl $-1, %ebx + andl $1, %ebx + jne .LBB1_2 +# BB#1: + movl %edx, %eax +.LBB1_2: + testb %bl, %bl + movl 24(%esp), %edx # 4-byte Reload + jne .LBB1_4 +# BB#3: + movl %esi, %edx +.LBB1_4: + movl 52(%esp), %esi + movl %edx, (%esi) + movl 20(%esp), %edx # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB1_6 +# BB#5: + movl %ebp, %ebx +.LBB1_6: + movl %ebx, 4(%esi) + jne .LBB1_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB1_8: + movl %edx, 8(%esi) + jne .LBB1_10 +# BB#9: + movl 12(%esp), %edi # 4-byte Reload +.LBB1_10: + movl %edi, 12(%esi) + jne .LBB1_12 +# BB#11: + movl 16(%esp), %ecx # 4-byte Reload +.LBB1_12: + movl %ecx, 16(%esi) + movl %eax, 20(%esi) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2 + + .globl mcl_fp_sqr_NIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_sqr_NIST_P192Lbmi2,@function +mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L2$pb +.L2$pb: + popl %ebx +.Ltmp0: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_sqrPre6Lbmi2@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB2_2 +# BB#1: + movl %ebp, %edx +.LBB2_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB2_4 +# BB#3: + movl %esi, %ebx +.LBB2_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB2_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB2_6: + movl %ebx, 4(%esi) + jne .LBB2_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB2_8: + movl %edi, 8(%esi) + jne .LBB2_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB2_10: + movl %eax, 12(%esi) + jne .LBB2_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB2_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2 + + .globl mcl_fp_mulNIST_P192Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulNIST_P192Lbmi2,@function +mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L3$pb +.L3$pb: + popl %ebx +.Ltmp1: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx + movl 120(%esp), %eax + movl %eax, 8(%esp) + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB3_2 +# BB#1: + movl %ebp, %edx +.LBB3_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB3_4 +# BB#3: + movl %esi, %ebx +.LBB3_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB3_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB3_6: + movl %ebx, 4(%esi) + jne .LBB3_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB3_8: + movl %edi, 8(%esi) + jne .LBB3_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB3_10: + movl %eax, 12(%esi) + jne .LBB3_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB3_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end3: + .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2 + + .globl mcl_fpDbl_mod_NIST_P521Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function +mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ecx + movl 124(%ecx), %edx + movl 128(%ecx), %esi + movl %esi, %eax + shldl $23, %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 120(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 116(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 112(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 104(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 92(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 84(%ecx), %edi + shldl $23, %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%ecx), %edx + shldl $23, %edx, %edi + movl 76(%ecx), %eax + shldl $23, %eax, %edx + movl 72(%ecx), %ebx + shldl $23, %ebx, %eax + movl 68(%ecx), %ebp + shldl $23, %ebp, %ebx + shrl $9, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 64(%ecx), %esi + shldl $23, %esi, %ebp + andl $511, %esi # imm = 0x1FF + addl (%ecx), %ebp + adcl 4(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + adcl 8(%ecx), %eax + adcl 12(%ecx), %edx + adcl 16(%ecx), %edi + movl 28(%esp), %ebx # 4-byte Reload + adcl 20(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 28(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 32(%ecx), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 36(%ecx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 40(%ecx), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + adcl 44(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 48(%ecx), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + adcl 52(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 56(%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl 60(%ecx), %ebx + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + movl %esi, %ecx + shrl $9, %ecx + andl $1, %ecx + addl %ebp, %ecx + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, (%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %edi, %esi + adcl $0, 28(%esp) # 4-byte Folded Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebx, %ebp + adcl $0, %ebp + movl 12(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %ecx, %edi + andl %eax, %edi + andl %edx, %edi + andl %esi, %edi + andl 28(%esp), %edi # 4-byte Folded Reload + andl 32(%esp), %edi # 4-byte Folded Reload + andl 36(%esp), %edi # 4-byte Folded Reload + andl 40(%esp), %edi # 4-byte Folded Reload + andl 44(%esp), %edi # 4-byte Folded Reload + andl 48(%esp), %edi # 4-byte Folded Reload + andl 24(%esp), %edi # 4-byte Folded Reload + andl 52(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %esi # 4-byte Reload + andl %esi, %edi + andl 56(%esp), %edi # 4-byte Folded Reload + movl %ebx, %edx + movl 16(%esp), %ebx # 4-byte Reload + andl %ebp, %edi + movl %ebp, %eax + movl %edx, %ebp + orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00 + andl %edi, %ebp + andl %ebx, %ebp + cmpl $-1, %ebp + movl 80(%esp), %edi + je .LBB4_1 +# BB#3: # %nonzero + movl %ecx, (%edi) + movl %ebx, 4(%edi) + movl (%esp), %ecx # 4-byte Reload + movl %ecx, 8(%edi) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%edi) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%edi) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%edi) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%edi) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%edi) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%edi) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%edi) + movl %esi, 52(%edi) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%edi) + movl %eax, 60(%edi) + andl $511, %edx # imm = 0x1FF + movl %edx, 64(%edi) + jmp .LBB4_2 +.LBB4_1: # %zero + xorl %eax, %eax + movl $17, %ecx + rep;stosl +.LBB4_2: # %zero + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2 + + .globl mcl_fp_mulUnitPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre1Lbmi2,@function +mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %edx + mulxl 12(%esp), %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end5: + .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2 + + .globl mcl_fpDbl_mulPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre1Lbmi2,@function +mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %edx + movl 8(%esp), %eax + mulxl (%eax), %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end6: + .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2 + + .globl mcl_fpDbl_sqrPre1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre1Lbmi2,@function +mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %edx + mulxl %edx, %ecx, %eax + movl 4(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + retl +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2 + + .globl mcl_fp_mont1Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont1Lbmi2,@function +mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %edx + movl 20(%esp), %eax + mulxl (%eax), %esi, %ecx + movl 24(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl %ecx, %eax + sbbl %edx, %edx + andl $1, %edx + movl %eax, %ecx + subl %edi, %ecx + sbbl $0, %edx + testb $1, %dl + jne .LBB8_2 +# BB#1: + movl %ecx, %eax +.LBB8_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end8: + .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2 + + .globl mcl_fp_montNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF1Lbmi2,@function +mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %edx + movl 20(%esp), %eax + mulxl (%eax), %esi, %ecx + movl 24(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl %ecx, %eax + movl %eax, %ecx + subl %edi, %ecx + js .LBB9_2 +# BB#1: + movl %ecx, %eax +.LBB9_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end9: + .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2 + + .globl mcl_fp_montRed1Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed1Lbmi2,@function +mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %esi + movl 20(%esp), %eax + movl -4(%eax), %edx + imull %esi, %edx + movl (%eax), %edi + mulxl %edi, %edx, %eax + addl %esi, %edx + adcl 4(%ecx), %eax + sbbl %edx, %edx + andl $1, %edx + movl %eax, %ecx + subl %edi, %ecx + sbbl $0, %edx + testb $1, %dl + jne .LBB10_2 +# BB#1: + movl %ecx, %eax +.LBB10_2: + movl 12(%esp), %ecx + movl %eax, (%ecx) + popl %esi + popl %edi + retl +.Lfunc_end10: + .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2 + + .globl mcl_fp_addPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre1Lbmi2,@function +mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 4(%esp), %ecx + movl 8(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %eax, %eax + andl $1, %eax + retl +.Lfunc_end11: + .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2 + + .globl mcl_fp_subPre1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre1Lbmi2,@function +mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + xorl %eax, %eax + movl 8(%esp), %edx + movl 16(%esp), %esi + subl (%esi), %ecx + movl %ecx, (%edx) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end12: + .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2 + + .globl mcl_fp_shr1_1Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_1Lbmi2,@function +mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + shrl %eax + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end13: + .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2 + + .globl mcl_fp_add1Lbmi2 + .align 16, 0x90 + .type mcl_fp_add1Lbmi2,@function +mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + movl 12(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %edx, %edx + andl $1, %edx + movl 20(%esp), %esi + subl (%esi), %eax + sbbl $0, %edx + testb $1, %dl + jne .LBB14_2 +# BB#1: # %nocarry + movl %eax, (%ecx) +.LBB14_2: # %carry + popl %esi + retl +.Lfunc_end14: + .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2 + + .globl mcl_fp_addNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF1Lbmi2,@function +mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2 +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + addl (%ecx), %eax + movl 16(%esp), %edx + movl %eax, %ecx + subl (%edx), %ecx + js .LBB15_2 +# BB#1: + movl %ecx, %eax +.LBB15_2: + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end15: + .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2 + + .globl mcl_fp_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub1Lbmi2,@function +mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %eax + xorl %edx, %edx + movl 8(%esp), %ecx + movl 16(%esp), %esi + subl (%esi), %eax + movl %eax, (%ecx) + sbbl $0, %edx + testb $1, %dl + jne .LBB16_2 +# BB#1: # %nocarry + popl %esi + retl +.LBB16_2: # %carry + movl 20(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + popl %esi + retl +.Lfunc_end16: + .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2 + + .globl mcl_fp_subNF1Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF1Lbmi2,@function +mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + movl 12(%esp), %ecx + subl (%ecx), %eax + movl %eax, %ecx + sarl $31, %ecx + movl 16(%esp), %edx + andl (%edx), %ecx + addl %eax, %ecx + movl 4(%esp), %eax + movl %ecx, (%eax) + retl +.Lfunc_end17: + .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2 + + .globl mcl_fpDbl_add1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add1Lbmi2,@function +mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2 +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %eax + movl 16(%esp), %esi + addl (%esi), %edx + movl 12(%esp), %ecx + adcl 4(%esi), %eax + movl %edx, (%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + movl %eax, %edx + subl (%esi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB18_2 +# BB#1: + movl %edx, %eax +.LBB18_2: + movl %eax, 4(%ecx) + popl %esi + popl %ebx + retl +.Lfunc_end18: + .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2 + + .globl mcl_fpDbl_sub1Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub1Lbmi2,@function +mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %eax + xorl %ecx, %ecx + movl 16(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %eax + movl 8(%esp), %edx + movl %esi, (%edx) + sbbl $0, %ecx + andl $1, %ecx + je .LBB19_2 +# BB#1: + movl 20(%esp), %ecx + movl (%ecx), %ecx +.LBB19_2: + addl %eax, %ecx + movl %ecx, 4(%edx) + popl %esi + retl +.Lfunc_end19: + .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2 + + .globl mcl_fp_mulUnitPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre2Lbmi2,@function +mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 20(%esp), %edx + movl 16(%esp), %eax + mulxl 4(%eax), %ecx, %esi + mulxl (%eax), %eax, %edx + movl 12(%esp), %edi + movl %eax, (%edi) + addl %ecx, %edx + movl %edx, 4(%edi) + adcl $0, %esi + movl %esi, 8(%edi) + popl %esi + popl %edi + retl +.Lfunc_end20: + .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2 + + .globl mcl_fpDbl_mulPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre2Lbmi2,@function +mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 28(%esp), %esi + movl (%esi), %edi + movl %ecx, %edx + mulxl %edi, %ebx, %ebp + movl %eax, %edx + mulxl %edi, %edx, %edi + addl %ebx, %edi + movl 20(%esp), %ebx + movl %edx, (%ebx) + adcl $0, %ebp + movl 4(%esi), %esi + movl %eax, %edx + mulxl %esi, %eax, %ebx + addl %edi, %eax + movl %ecx, %edx + mulxl %esi, %edx, %ecx + adcl %ebp, %edx + sbbl %esi, %esi + andl $1, %esi + addl %ebx, %edx + movl 20(%esp), %edi + movl %eax, 4(%edi) + movl %edx, 8(%edi) + adcl %ecx, %esi + movl %esi, 12(%edi) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end21: + .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2 + + .globl mcl_fpDbl_sqrPre2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre2Lbmi2,@function +mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 16(%esp), %esi + movl %eax, %edx + mulxl %eax, %edx, %edi + movl %edx, (%esi) + movl %ecx, %edx + mulxl %eax, %edx, %eax + addl %edx, %edi + movl %eax, %ebx + adcl $0, %ebx + addl %edx, %edi + movl %ecx, %edx + mulxl %ecx, %edx, %ecx + adcl %ebx, %edx + sbbl %ebx, %ebx + andl $1, %ebx + addl %eax, %edx + movl %edi, 4(%esi) + movl %edx, 8(%esi) + adcl %ecx, %ebx + movl %ebx, 12(%esi) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2 + + .globl mcl_fp_mont2Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont2Lbmi2,@function +mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %eax + movl (%eax), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 52(%esp), %eax + movl (%eax), %eax + mulxl %eax, %ecx, %esi + movl %edi, %edx + mulxl %eax, %edx, %edi + movl %edx, (%esp) # 4-byte Spill + addl %ecx, %edi + adcl $0, %esi + movl 56(%esp), %eax + movl -4(%eax), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + imull %ecx, %edx + movl (%eax), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 4(%eax), %eax + movl %eax, 20(%esp) # 4-byte Spill + mulxl %eax, %ebp, %ecx + mulxl %ebx, %edx, %eax + addl %ebp, %eax + adcl $0, %ecx + addl (%esp), %edx # 4-byte Folded Reload + adcl %edi, %eax + adcl %esi, %ecx + movl 52(%esp), %edx + movl 4(%edx), %edx + sbbl %ebx, %ebx + andl $1, %ebx + mulxl 4(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + mulxl 8(%esp), %edi, %esi # 4-byte Folded Reload + addl 4(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebp + addl %eax, %edi + adcl %ecx, %esi + adcl %ebx, %ebp + sbbl %ecx, %ecx + movl 12(%esp), %edx # 4-byte Reload + imull %edi, %edx + movl %edx, %eax + mulxl 16(%esp), %ebx, %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, %edx + mulxl 20(%esp), %edx, %eax # 4-byte Folded Reload + addl 12(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ecx + addl %edi, %ebx + adcl %esi, %edx + adcl %ebp, %eax + adcl $0, %ecx + movl %edx, %ebp + subl 16(%esp), %ebp # 4-byte Folded Reload + movl %eax, %esi + sbbl 20(%esp), %esi # 4-byte Folded Reload + sbbl $0, %ecx + andl $1, %ecx + jne .LBB23_2 +# BB#1: + movl %ebp, %edx +.LBB23_2: + movl 44(%esp), %edi + movl %edx, (%edi) + testb %cl, %cl + jne .LBB23_4 +# BB#3: + movl %esi, %eax +.LBB23_4: + movl %eax, 4(%edi) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end23: + .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2 + + .globl mcl_fp_montNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF2Lbmi2,@function +mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 44(%esp), %eax + movl (%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax + movl (%eax), %eax + mulxl %eax, %edi, %ebp + movl %ecx, %edx + mulxl %eax, %ecx, %esi + addl %edi, %esi + adcl $0, %ebp + movl 52(%esp), %eax + movl -4(%eax), %ebx + movl %ecx, %edx + imull %ebx, %edx + movl (%eax), %eax + movl %eax, 16(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + movl %eax, (%esp) # 4-byte Spill + addl %ecx, %edi + movl 52(%esp), %eax + movl 4(%eax), %eax + movl %eax, 12(%esp) # 4-byte Spill + mulxl %eax, %edi, %edx + adcl %esi, %edi + adcl $0, %ebp + addl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebp + movl 48(%esp), %eax + movl 4(%eax), %edx + mulxl 4(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 8(%esp), %eax, %ecx # 4-byte Folded Reload + addl 4(%esp), %ecx # 4-byte Folded Reload + adcl $0, %esi + addl %edi, %eax + adcl %ebp, %ecx + adcl $0, %esi + imull %eax, %ebx + movl %ebx, %edx + movl 16(%esp), %ebp # 4-byte Reload + mulxl %ebp, %edx, %edi + addl %eax, %edx + movl %ebx, %edx + movl 12(%esp), %ebx # 4-byte Reload + mulxl %ebx, %eax, %edx + adcl %ecx, %eax + adcl $0, %esi + addl %edi, %eax + adcl %edx, %esi + movl %eax, %edx + subl %ebp, %edx + movl %esi, %ecx + sbbl %ebx, %ecx + testl %ecx, %ecx + js .LBB24_2 +# BB#1: + movl %edx, %eax +.LBB24_2: + movl 40(%esp), %edx + movl %eax, (%edx) + js .LBB24_4 +# BB#3: + movl %ecx, %esi +.LBB24_4: + movl %esi, 4(%edx) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end24: + .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2 + + .globl mcl_fp_montRed2Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed2Lbmi2,@function +mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 44(%esp), %esi + movl -4(%esi), %ecx + movl (%esi), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 40(%esp), %eax + movl (%eax), %ebx + movl %ebx, %edx + imull %ecx, %edx + movl 4(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + mulxl %eax, %ebp, %esi + mulxl %edi, %edx, %eax + addl %ebp, %eax + adcl $0, %esi + addl %ebx, %edx + movl 40(%esp), %edi + movl 12(%edi), %edx + adcl 4(%edi), %eax + adcl 8(%edi), %esi + adcl $0, %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl %ebx, %ebx + imull %eax, %ecx + movl %ecx, %edx + mulxl 8(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, (%esp) # 4-byte Spill + movl %ecx, %edx + mulxl 12(%esp), %edx, %ebp # 4-byte Folded Reload + addl (%esp), %edx # 4-byte Folded Reload + adcl $0, %ebp + andl $1, %ebx + addl %eax, %edi + adcl %esi, %edx + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %ebx + movl %edx, %edi + subl 8(%esp), %edi # 4-byte Folded Reload + movl %ebp, %ecx + sbbl 12(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB25_2 +# BB#1: + movl %edi, %edx +.LBB25_2: + movl 36(%esp), %esi + movl %edx, (%esi) + testb %bl, %bl + jne .LBB25_4 +# BB#3: + movl %ecx, %ebp +.LBB25_4: + movl %ebp, 4(%esi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end25: + .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2 + + .globl mcl_fp_addPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre2Lbmi2,@function +mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + adcl 4(%edx), %eax + movl %ecx, (%esi) + movl %eax, 4(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end26: + .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2 + + .globl mcl_fp_subPre2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre2Lbmi2,@function +mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + xorl %eax, %eax + movl 16(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end27: + .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2 + + .globl mcl_fp_shr1_2Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_2Lbmi2,@function +mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2 +# BB#0: + movl 8(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + shrdl $1, %eax, %ecx + movl 4(%esp), %edx + movl %ecx, (%edx) + shrl %eax + movl %eax, 4(%edx) + retl +.Lfunc_end28: + .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2 + + .globl mcl_fp_add2Lbmi2 + .align 16, 0x90 + .type mcl_fp_add2Lbmi2,@function +mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2 +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 16(%esp), %esi + addl (%esi), %eax + movl 12(%esp), %edx + adcl 4(%esi), %ecx + movl %eax, (%edx) + movl %ecx, 4(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB29_2 +# BB#1: # %nocarry + movl %eax, (%edx) + movl %ecx, 4(%edx) +.LBB29_2: # %carry + popl %esi + popl %ebx + retl +.Lfunc_end29: + .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2 + + .globl mcl_fp_addNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF2Lbmi2,@function +mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 16(%esp), %edx + addl (%edx), %ecx + adcl 4(%edx), %eax + movl 24(%esp), %edi + movl %ecx, %esi + subl (%edi), %esi + movl %eax, %edx + sbbl 4(%edi), %edx + testl %edx, %edx + js .LBB30_2 +# BB#1: + movl %esi, %ecx +.LBB30_2: + movl 12(%esp), %esi + movl %ecx, (%esi) + js .LBB30_4 +# BB#3: + movl %edx, %eax +.LBB30_4: + movl %eax, 4(%esi) + popl %esi + popl %edi + retl +.Lfunc_end30: + .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2 + + .globl mcl_fp_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub2Lbmi2,@function +mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + xorl %ebx, %ebx + movl 24(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl 16(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + sbbl $0, %ebx + testb $1, %bl + je .LBB31_2 +# BB#1: # %carry + movl 28(%esp), %esi + movl 4(%esi), %edi + addl (%esi), %ecx + movl %ecx, (%edx) + adcl %eax, %edi + movl %edi, 4(%edx) +.LBB31_2: # %nocarry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end31: + .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2 + + .globl mcl_fp_subNF2Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF2Lbmi2,@function +mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 20(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl %eax, %edx + sarl $31, %edx + movl 24(%esp), %esi + movl 4(%esi), %edi + andl %edx, %edi + andl (%esi), %edx + addl %ecx, %edx + movl 12(%esp), %ecx + movl %edx, (%ecx) + adcl %eax, %edi + movl %edi, 4(%ecx) + popl %esi + popl %edi + retl +.Lfunc_end32: + .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2 + + .globl mcl_fpDbl_add2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add2Lbmi2,@function +mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edx + movl 12(%edx), %esi + movl 24(%esp), %edi + movl 12(%edi), %eax + movl 8(%edx), %ecx + movl (%edx), %ebx + movl 4(%edx), %ebp + addl (%edi), %ebx + adcl 4(%edi), %ebp + movl 20(%esp), %edx + adcl 8(%edi), %ecx + movl %ebx, (%edx) + movl %ebp, 4(%edx) + adcl %esi, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + movl %ecx, %esi + subl (%ebp), %esi + movl %eax, %edi + sbbl 4(%ebp), %edi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB33_2 +# BB#1: + movl %edi, %eax +.LBB33_2: + testb %bl, %bl + jne .LBB33_4 +# BB#3: + movl %esi, %ecx +.LBB33_4: + movl %ecx, 8(%edx) + movl %eax, 12(%edx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end33: + .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2 + + .globl mcl_fpDbl_sub2Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub2Lbmi2,@function +mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %ebx, %ebx + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %eax + sbbl 8(%edx), %eax + movl 12(%edx), %ebp + movl 12(%ecx), %edx + movl 20(%esp), %ecx + movl %esi, (%ecx) + movl %edi, 4(%ecx) + sbbl %ebp, %edx + movl 32(%esp), %edi + movl (%edi), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB34_1 +# BB#2: + xorl %edi, %edi + jmp .LBB34_3 +.LBB34_1: + movl 4(%edi), %edi +.LBB34_3: + testb %bl, %bl + jne .LBB34_5 +# BB#4: + xorl %esi, %esi +.LBB34_5: + addl %eax, %esi + movl %esi, 8(%ecx) + adcl %edx, %edi + movl %edi, 12(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end34: + .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2 + + .globl mcl_fp_mulUnitPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre3Lbmi2,@function +mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl 20(%esp), %eax + mulxl 4(%eax), %esi, %ecx + mulxl (%eax), %edi, %ebx + addl %esi, %ebx + mulxl 8(%eax), %eax, %edx + movl 16(%esp), %esi + movl %edi, (%esi) + movl %ebx, 4(%esi) + adcl %ecx, %eax + movl %eax, 8(%esi) + adcl $0, %edx + movl %edx, 12(%esi) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end35: + .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2 + + .globl mcl_fpDbl_mulPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre3Lbmi2,@function +mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi + movl (%esi), %edi + mulxl %edi, %ebx, %ebp + movl %eax, %edx + movl %eax, %esi + mulxl %edi, %edx, %eax + movl %edx, 4(%esp) # 4-byte Spill + addl %ebx, %eax + movl 8(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %edi, %ebx, %edi + adcl %ebp, %ebx + movl 36(%esp), %ecx + movl 4(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + adcl $0, %edi + movl 44(%esp), %ecx + movl 4(%ecx), %ebp + movl %esi, %edx + mulxl %ebp, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ecx + movl 12(%esp), %edx # 4-byte Reload + mulxl %ebp, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebx, %eax + movl 8(%esp), %edx # 4-byte Reload + mulxl %ebp, %ebx, %edx + adcl %edi, %ebx + sbbl %edi, %edi + andl $1, %edi + addl 4(%esp), %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl %edx, %edi + movl 36(%esp), %edx + movl %ecx, 4(%edx) + movl 44(%esp), %ecx + movl 8(%ecx), %ecx + movl %esi, %edx + mulxl %ecx, %ebp, %edx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ebp + movl 12(%esp), %edx # 4-byte Reload + mulxl %ecx, %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %eax + movl 8(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %ecx + adcl %edi, %edx + sbbl %esi, %esi + andl $1, %esi + addl 4(%esp), %eax # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl 36(%esp), %edi + movl %ebp, 8(%edi) + movl %eax, 12(%edi) + movl %edx, 16(%edi) + adcl %ecx, %esi + movl %esi, 20(%edi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end36: + .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2 + + .globl mcl_fpDbl_sqrPre3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre3Lbmi2,@function +mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 44(%esp), %edx + movl 8(%edx), %edi + movl %edi, (%esp) # 4-byte Spill + movl (%edx), %ecx + movl 4(%edx), %esi + movl 40(%esp), %eax + movl %ecx, %edx + mulxl %ecx, %edx, %ebx + movl %edx, (%eax) + movl %esi, %edx + mulxl %ecx, %ebp, %eax + movl %eax, 8(%esp) # 4-byte Spill + addl %ebp, %ebx + movl %edi, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 12(%esp) # 4-byte Spill + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, %edi + adcl %edx, %edi + adcl $0, %ecx + addl %ebp, %ebx + movl %esi, %edx + mulxl %esi, %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl (%esp), %eax # 4-byte Reload + movl %eax, %edx + mulxl %esi, %edx, %esi + adcl %edx, %ecx + sbbl %edi, %edi + andl $1, %edi + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edi + addl 12(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %ecx + movl %eax, %edx + mulxl %eax, %edx, %eax + adcl %edi, %edx + sbbl %edi, %edi + andl $1, %edi + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edx + movl 40(%esp), %esi + movl %ebx, 4(%esi) + movl %ebp, 8(%esi) + movl %ecx, 12(%esi) + movl %edx, 16(%esi) + adcl %eax, %edi + movl %edi, 20(%esi) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2 + + .globl mcl_fp_mont3Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont3Lbmi2,@function +mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %eax + movl 8(%eax), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 72(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %edi + movl %edx, 40(%esp) # 4-byte Spill + movl (%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebp + movl %esi, %edx + mulxl %ecx, %edx, %ebx + movl %edx, 4(%esp) # 4-byte Spill + addl %eax, %ebx + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 76(%esp), %esi + movl -4(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill + imull %eax, %edx + movl (%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 4(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %edi + movl %edi, (%esp) # 4-byte Spill + mulxl %eax, %ebp, %edi + addl %ecx, %edi + movl 8(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %esi + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %esi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %edi + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl 4(%eax), %edx + mulxl 16(%esp), %ebx, %eax # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 8(%esp), %ebx, %eax # 4-byte Folded Reload + movl %ebx, (%esp) # 4-byte Spill + mulxl 12(%esp), %ebx, %ebp # 4-byte Folded Reload + addl (%esp), %ebp # 4-byte Folded Reload + movl %eax, %edx + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edi, %ebx + adcl %ecx, %ebp + adcl %esi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + movl %eax, %edx + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 40(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + mulxl 36(%esp), %esi, %ebx # 4-byte Folded Reload + addl %ecx, %ebx + mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + movl 24(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %eax, %esi + adcl %ebp, %ebx + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl 8(%edx), %edx + mulxl 16(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 8(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + mulxl 12(%esp), %eax, %esi # 4-byte Folded Reload + addl 8(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl %ecx, %esi + adcl %edi, %ebp + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 20(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, %eax + mulxl 40(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + addl 20(%esp), %edi # 4-byte Folded Reload + movl %eax, %edx + mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ebx + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl %esi, %edi + adcl %ebp, %edx + adcl 28(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebx + movl %edi, %ebp + subl 36(%esp), %ebp # 4-byte Folded Reload + movl %edx, %esi + sbbl 40(%esp), %esi # 4-byte Folded Reload + movl %eax, %ecx + sbbl 32(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB38_2 +# BB#1: + movl %ebp, %edi +.LBB38_2: + movl 64(%esp), %ebp + movl %edi, (%ebp) + testb %bl, %bl + jne .LBB38_4 +# BB#3: + movl %esi, %edx +.LBB38_4: + movl %edx, 4(%ebp) + jne .LBB38_6 +# BB#5: + movl %ecx, %eax +.LBB38_6: + movl %eax, 8(%ebp) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end38: + .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2 + + .globl mcl_fp_montNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF3Lbmi2,@function +mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 60(%esp), %eax + movl (%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %ecx, %edi, %ebp + addl %esi, %ebp + movl 8(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebx + adcl 32(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebx + movl 68(%esp), %esi + movl -4(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl %edi, %edx + imull %ecx, %edx + movl (%esi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + addl %edi, %esi + movl 68(%esp), %esi + movl 4(%esi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl %ecx, %edi, %ecx + adcl %ebp, %edi + movl 8(%esi), %esi + movl %esi, 24(%esp) # 4-byte Spill + mulxl %esi, %ebp, %edx + adcl %eax, %ebp + adcl $0, %ebx + addl 4(%esp), %edi # 4-byte Folded Reload + adcl %ecx, %ebp + adcl %edx, %ebx + movl 64(%esp), %eax + movl 4(%eax), %edx + mulxl 12(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + mulxl 16(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + addl %eax, %ecx + mulxl 8(%esp), %esi, %eax # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl $0, %eax + movl 4(%esp), %edx # 4-byte Reload + addl %edi, %edx + adcl %ebp, %ecx + adcl %ebx, %esi + adcl $0, %eax + movl %edx, %ebp + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 32(%esp), %ebx, %edi # 4-byte Folded Reload + addl %ebp, %ebx + mulxl 28(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl %ecx, %ebp + mulxl 24(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %esi, %ecx + adcl $0, %eax + addl %edi, %ebp + adcl %ebx, %ecx + adcl %edx, %eax + movl 64(%esp), %edx + movl 8(%edx), %edx + mulxl 12(%esp), %esi, %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + mulxl 16(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + addl %esi, %edi + mulxl 8(%esp), %ebx, %esi # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl $0, %esi + addl %ebp, 16(%esp) # 4-byte Folded Spill + adcl %ecx, %edi + adcl %eax, %ebx + adcl $0, %esi + movl 20(%esp), %edx # 4-byte Reload + movl 16(%esp), %ecx # 4-byte Reload + imull %ecx, %edx + mulxl 32(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + addl %ecx, %eax + movl %edx, %eax + mulxl 28(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 24(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebx, %eax + adcl $0, %esi + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %eax + adcl %edx, %esi + movl %ecx, %ebp + subl 32(%esp), %ebp # 4-byte Folded Reload + movl %eax, %edi + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %esi, %edx + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + sarl $31, %ebx + testl %ebx, %ebx + js .LBB39_2 +# BB#1: + movl %ebp, %ecx +.LBB39_2: + movl 56(%esp), %ebx + movl %ecx, (%ebx) + js .LBB39_4 +# BB#3: + movl %edi, %eax +.LBB39_4: + movl %eax, 4(%ebx) + js .LBB39_6 +# BB#5: + movl %edx, %esi +.LBB39_6: + movl %esi, 8(%ebx) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end39: + .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2 + + .globl mcl_fp_montRed3Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed3Lbmi2,@function +mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %ecx + movl -4(%ecx), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl (%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl (%eax), %ebx + movl %ebx, %edx + imull %edi, %edx + movl 8(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 4(%ecx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl %edi, %edi, %eax + movl %edi, 16(%esp) # 4-byte Spill + mulxl %ecx, %ebp, %edi + mulxl %esi, %edx, %ecx + addl %ebp, %ecx + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + addl %ebx, %edx + movl 60(%esp), %edx + adcl 4(%edx), %ecx + adcl 8(%edx), %edi + adcl 12(%edx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl 16(%edx), %edx + adcl $0, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl %ecx, %edx + imull 20(%esp), %edx # 4-byte Folded Reload + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 24(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + addl %esi, %eax + mulxl 32(%esp), %esi, %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl $0, %ebp + addl %ecx, 4(%esp) # 4-byte Folded Spill + adcl %edi, %eax + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl 20(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 24(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl %ecx, 20(%esp) # 4-byte Spill + movl %edx, %ecx + mulxl 28(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + addl 8(%esp), %edi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 32(%esp), %ecx, %edx # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edx + addl %eax, 20(%esp) # 4-byte Folded Spill + adcl %esi, %edi + adcl %ebp, %ecx + adcl 16(%esp), %edx # 4-byte Folded Reload + adcl $0, %ebx + movl %edi, %ebp + subl 24(%esp), %ebp # 4-byte Folded Reload + movl %ecx, %esi + sbbl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, %eax + sbbl 32(%esp), %eax # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB40_2 +# BB#1: + movl %ebp, %edi +.LBB40_2: + movl 56(%esp), %ebp + movl %edi, (%ebp) + testb %bl, %bl + jne .LBB40_4 +# BB#3: + movl %esi, %ecx +.LBB40_4: + movl %ecx, 4(%ebp) + jne .LBB40_6 +# BB#5: + movl %eax, %edx +.LBB40_6: + movl %edx, 8(%ebp) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end40: + .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2 + + .globl mcl_fp_addPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre3Lbmi2,@function +mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 12(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + movl %eax, 8(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end41: + .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2 + + .globl mcl_fp_subPre3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre3Lbmi2,@function +mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 20(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl 12(%esp), %edi + movl %edx, (%edi) + movl %esi, 4(%edi) + movl %ecx, 8(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end42: + .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2 + + .globl mcl_fp_shr1_3Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_3Lbmi2,@function +mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl 8(%eax), %ecx + movl (%eax), %edx + movl 4(%eax), %eax + shrdl $1, %eax, %edx + movl 8(%esp), %esi + movl %edx, (%esi) + shrdl $1, %ecx, %eax + movl %eax, 4(%esi) + shrl %ecx + movl %ecx, 8(%esi) + popl %esi + retl +.Lfunc_end43: + .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2 + + .globl mcl_fp_add3Lbmi2 + .align 16, 0x90 + .type mcl_fp_add3Lbmi2,@function +mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 20(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl 16(%esp), %esi + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + sbbl 8(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB44_2 +# BB#1: # %nocarry + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) +.LBB44_2: # %carry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end44: + .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2 + + .globl mcl_fp_addNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF3Lbmi2,@function +mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 24(%esp), %esi + addl (%esi), %edx + adcl 4(%esi), %ecx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 32(%esp), %ebp + movl %edx, %ebx + subl (%ebp), %ebx + movl %ecx, %edi + sbbl 4(%ebp), %edi + movl %eax, %esi + sbbl 8(%ebp), %esi + movl %esi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB45_2 +# BB#1: + movl %ebx, %edx +.LBB45_2: + movl 20(%esp), %ebx + movl %edx, (%ebx) + js .LBB45_4 +# BB#3: + movl %edi, %ecx +.LBB45_4: + movl %ecx, 4(%ebx) + js .LBB45_6 +# BB#5: + movl %esi, %eax +.LBB45_6: + movl %eax, 8(%ebx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end45: + .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2 + + .globl mcl_fp_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub3Lbmi2,@function +mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %ecx + movl 4(%edx), %eax + xorl %ebx, %ebx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %eax + movl 8(%edx), %edx + sbbl 8(%esi), %edx + movl 20(%esp), %esi + movl %ecx, (%esi) + movl %eax, 4(%esi) + movl %edx, 8(%esi) + sbbl $0, %ebx + testb $1, %bl + je .LBB46_2 +# BB#1: # %carry + movl 32(%esp), %edi + movl 4(%edi), %ebx + movl 8(%edi), %ebp + addl (%edi), %ecx + movl %ecx, (%esi) + adcl %eax, %ebx + movl %ebx, 4(%esi) + adcl %edx, %ebp + movl %ebp, 8(%esi) +.LBB46_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end46: + .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2 + + .globl mcl_fp_subNF3Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF3Lbmi2,@function +mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%eax), %eax + sbbl 8(%esi), %eax + movl %eax, %esi + sarl $31, %esi + movl %esi, %edi + shldl $1, %eax, %edi + movl 32(%esp), %ebx + andl (%ebx), %edi + movl 8(%ebx), %ebp + andl %esi, %ebp + andl 4(%ebx), %esi + addl %ecx, %edi + adcl %edx, %esi + movl 20(%esp), %ecx + movl %edi, (%ecx) + movl %esi, 4(%ecx) + adcl %eax, %ebp + movl %ebp, 8(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end47: + .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2 + + .globl mcl_fpDbl_add3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add3Lbmi2,@function +mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 32(%esp), %esi + movl 20(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 16(%esi), %edi + movl 12(%esi), %ebx + movl (%esi), %edx + movl 28(%esp), %eax + addl (%eax), %edx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 8(%esi), %edx + movl 4(%esi), %esi + adcl 4(%eax), %esi + adcl 8(%eax), %edx + movl %esi, 4(%ecx) + movl 20(%eax), %ebp + movl %edx, 8(%ecx) + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl %ebx, %esi + adcl %edi, %edx + adcl (%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl 36(%esp), %ecx + movl %esi, %ebx + subl (%ecx), %ebx + movl %edx, %edi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl %ebp, %ecx + movl 36(%esp), %edi + sbbl 8(%edi), %ecx + sbbl $0, %eax + andl $1, %eax + jne .LBB48_2 +# BB#1: + movl %ecx, %ebp +.LBB48_2: + testb %al, %al + jne .LBB48_4 +# BB#3: + movl %ebx, %esi +.LBB48_4: + movl 24(%esp), %eax + movl %esi, 12(%eax) + jne .LBB48_6 +# BB#5: + movl (%esp), %edx # 4-byte Reload +.LBB48_6: + movl %edx, 16(%eax) + movl %ebp, 20(%eax) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end48: + .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2 + + .globl mcl_fpDbl_sub3Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub3Lbmi2,@function +mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + movl 28(%esp), %ebx + subl (%ebx), %edx + sbbl 4(%ebx), %esi + movl 8(%ecx), %ebp + sbbl 8(%ebx), %ebp + movl 20(%esp), %eax + movl %edx, (%eax) + movl 12(%ecx), %edi + sbbl 12(%ebx), %edi + movl %esi, 4(%eax) + movl 16(%ecx), %esi + sbbl 16(%ebx), %esi + movl 20(%ebx), %ebx + movl 20(%ecx), %edx + movl %ebp, 8(%eax) + sbbl %ebx, %edx + movl $0, %ecx + sbbl $0, %ecx + andl $1, %ecx + movl 32(%esp), %ebp + jne .LBB49_1 +# BB#2: + xorl %ebx, %ebx + jmp .LBB49_3 +.LBB49_1: + movl 8(%ebp), %ebx +.LBB49_3: + testb %cl, %cl + movl $0, %eax + jne .LBB49_4 +# BB#5: + xorl %ecx, %ecx + jmp .LBB49_6 +.LBB49_4: + movl (%ebp), %ecx + movl 4(%ebp), %eax +.LBB49_6: + addl %edi, %ecx + adcl %esi, %eax + movl 20(%esp), %esi + movl %ecx, 12(%esi) + movl %eax, 16(%esi) + adcl %edx, %ebx + movl %ebx, 20(%esi) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end49: + .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2 + + .globl mcl_fp_mulUnitPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre4Lbmi2,@function +mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edx + movl 24(%esp), %eax + mulxl 4(%eax), %esi, %ecx + mulxl (%eax), %edi, %ebx + addl %esi, %ebx + mulxl 8(%eax), %ebp, %esi + adcl %ecx, %ebp + mulxl 12(%eax), %eax, %ecx + movl 20(%esp), %edx + movl %edi, (%edx) + movl %ebx, 4(%edx) + movl %ebp, 8(%edx) + adcl %esi, %eax + movl %eax, 12(%edx) + adcl $0, %ecx + movl %ecx, 16(%edx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end50: + .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2 + + .globl mcl_fpDbl_mulPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre4Lbmi2,@function +mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %eax + movl (%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx + movl (%ecx), %ebp + mulxl %ebp, %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + movl %ebx, %ecx + mulxl %ebp, %edx, %ebx + movl %edx, 8(%esp) # 4-byte Spill + addl %esi, %ebx + movl 8(%eax), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %esi + mulxl %ebp, %eax, %edi + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 12(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %esi + adcl %edi, %ebp + movl 48(%esp), %edx + movl 8(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + adcl $0, %esi + movl 56(%esp), %edx + movl 4(%edx), %edi + movl %ecx, %edx + mulxl %edi, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + addl %ebx, %ecx + movl 24(%esp), %edx # 4-byte Reload + mulxl %edi, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 20(%esp), %edx # 4-byte Reload + mulxl %edi, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %eax + movl 16(%esp), %edx # 4-byte Reload + mulxl %edi, %edi, %edx + adcl %esi, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebp + movl 48(%esp), %edx + movl %ecx, 4(%edx) + movl 56(%esp), %ecx + movl 8(%ecx), %ecx + movl 12(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %esi + movl %esi, 8(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 20(%esp), %edx # 4-byte Reload + mulxl %ecx, %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %esi + movl 16(%esp), %edx # 4-byte Reload + mulxl %ecx, %edi, %eax + adcl %ebp, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 8(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl %eax, %ebp + movl 48(%esp), %eax + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 56(%esp), %eax + movl 12(%eax), %edx + movl 52(%esp), %eax + mulxl (%eax), %ecx, %eax + movl %eax, 20(%esp) # 4-byte Spill + addl %ebx, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 52(%esp), %ebx + mulxl 4(%ebx), %ecx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl %esi, %ecx + mulxl 8(%ebx), %eax, %esi + adcl %edi, %eax + mulxl 12(%ebx), %edi, %edx + adcl %ebp, %edi + sbbl %ebp, %ebp + andl $1, %ebp + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + adcl %esi, %edi + movl 48(%esp), %esi + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl %ecx, 16(%esi) + movl %eax, 20(%esi) + movl %edi, 24(%esi) + adcl %edx, %ebp + movl %ebp, 28(%esi) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end51: + .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2 + + .globl mcl_fpDbl_sqrPre4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre4Lbmi2,@function +mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 60(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 56(%esp), %ebx + movl %esi, %edx + mulxl %esi, %eax, %ebp + movl %eax, (%ebx) + movl %edi, %edx + mulxl %esi, %edx, %ecx + movl %edx, 28(%esp) # 4-byte Spill + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebp, %eax + addl %edx, %eax + movl 60(%esp), %edx + movl 8(%edx), %edx + movl %edx, (%esp) # 4-byte Spill + mulxl %esi, %edx, %ebx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, 24(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl %edx, %ebp + movl 60(%esp), %ecx + movl 12(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %esi, %esi, %ecx + adcl %ebx, %esi + adcl $0, %ecx + addl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %edi, %ebx, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %edi, %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl (%esp), %edx # 4-byte Reload + mulxl %edi, %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %edi, %esi + adcl %ecx, %ebp + sbbl %ecx, %ecx + andl $1, %ecx + addl 16(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl %eax, %ebp + adcl 4(%esp), %ecx # 4-byte Folded Reload + addl 20(%esp), %ebx # 4-byte Folded Reload + adcl %edi, %esi + mulxl %edx, %edi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, %eax + adcl %ebp, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + adcl %ecx, %ebp + sbbl %eax, %eax + andl $1, %eax + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl %ebx, 8(%eax) + movl 60(%esp), %eax + movl 12(%eax), %edx + mulxl (%eax), %ebx, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + addl %esi, %ebx + mulxl 4(%eax), %esi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl %edi, %esi + mulxl 8(%eax), %ecx, %edi + adcl %ebp, %ecx + mulxl %edx, %ebp, %edx + adcl 24(%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 32(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl %edi, %ebp + movl 56(%esp), %edi + movl %ebx, 12(%edi) + movl %esi, 16(%edi) + movl %ecx, 20(%edi) + movl %ebp, 24(%edi) + adcl %edx, %eax + movl %eax, 28(%edi) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2 + + .globl mcl_fp_mont4Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont4Lbmi2,@function +mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %eax + movl 12(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 92(%esp), %ecx + movl (%ecx), %ecx + movl 8(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl (%eax), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 4(%eax), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ebp + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %ecx, %edx, %eax + movl %edx, 56(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %ebx, %esi + movl %edi, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 8(%esp) # 4-byte Spill + addl %ebx, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 96(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + addl %esi, %eax + movl %eax, %ebp + movl 8(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 12(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl %eax, %esi, %ebx + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + addl 8(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 4(%eax), %edx + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + mulxl 32(%esp), %ecx, %ebp # 4-byte Folded Reload + addl (%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %edi, %ebp + adcl %esi, 12(%esp) # 4-byte Folded Spill + adcl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 56(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 48(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + adcl $0, %eax + movl 16(%esp), %ecx # 4-byte Reload + andl $1, %ecx + movl 4(%esp), %edx # 4-byte Reload + addl 8(%esp), %edx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 92(%esp), %edx + movl 8(%edx), %edx + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload + addl %esi, %ecx + movl %ecx, %esi + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 20(%esp), %eax # 4-byte Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + movl 24(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, %edi + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl %ecx, %ebx + mulxl 52(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %eax, %ecx + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl $0, %esi + movl 16(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %ebp, 8(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %edi, %ecx + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 92(%esp), %edx + movl 12(%edx), %edx + mulxl 28(%esp), %ebp, %edi # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + movl %edi, 24(%esp) # 4-byte Spill + mulxl 32(%esp), %edi, %ebp # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 40(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + mulxl 36(%esp), %edi, %edx # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl %ebp, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 32(%esp), %ebp # 4-byte Reload + addl %ebx, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl %eax, %edi + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl 44(%esp), %edx # 4-byte Reload + imull %ebp, %edx + mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %ebx + mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload + addl %esi, %ebp + mulxl 52(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl %eax, %esi + movl %ebx, %edx + mulxl 48(%esp), %edx, %eax # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + andl $1, %ecx + movl 44(%esp), %ebx # 4-byte Reload + addl 32(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl %edi, %esi + adcl 36(%esp), %edx # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + adcl $0, %ecx + movl %ebp, %edi + subl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, %ebx + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl %edx, %ebx + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl %eax, %ebx + sbbl 48(%esp), %ebx # 4-byte Folded Reload + sbbl $0, %ecx + andl $1, %ecx + jne .LBB53_2 +# BB#1: + movl %edi, %ebp +.LBB53_2: + movl 84(%esp), %edi + movl %ebp, (%edi) + testb %cl, %cl + jne .LBB53_4 +# BB#3: + movl 56(%esp), %esi # 4-byte Reload +.LBB53_4: + movl %esi, 4(%edi) + jne .LBB53_6 +# BB#5: + movl 60(%esp), %edx # 4-byte Reload +.LBB53_6: + movl %edx, 8(%edi) + jne .LBB53_8 +# BB#7: + movl %ebx, %eax +.LBB53_8: + movl %eax, 12(%edi) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end53: + .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2 + + .globl mcl_fp_montNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF4Lbmi2,@function +mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %esi + movl (%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %ecx, %ebp, %eax + movl %ebp, 40(%esp) # 4-byte Spill + addl %edi, %eax + movl 8(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl 12(%esi), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %ecx, %esi, %edi + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl $0, %edi + movl 84(%esp), %ecx + movl -4(%ecx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl %ebp, %edx + imull %ecx, %edx + movl 84(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + movl %ebp, 8(%esp) # 4-byte Spill + addl 40(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ecx + movl 4(%ecx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %eax + movl %eax, 36(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + adcl %ebx, %ecx + movl %ecx, %ebp + movl 84(%esp), %ecx + movl 12(%ecx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + adcl %esi, %ebx + adcl $0, %edi + movl 8(%esp), %ecx # 4-byte Reload + addl %ecx, 12(%esp) # 4-byte Folded Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %eax, %ebx + adcl %edx, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl 4(%eax), %edx + mulxl 24(%esp), %esi, %edi # 4-byte Folded Reload + mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload + addl %esi, %eax + mulxl 16(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 20(%esp), %edi, %esi # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %esi + addl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + adcl %ebx, %ebp + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl $0, %esi + movl %ecx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 44(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl (%esp), %ebx # 4-byte Folded Reload + mulxl 40(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %eax, %ebx + movl %ebx, %eax + mulxl 36(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %ebp, %ebx + movl %ebx, %ebp + mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %esi + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl 80(%esp), %ecx + movl 8(%ecx), %edx + mulxl 24(%esp), %ecx, %ebx # 4-byte Folded Reload + mulxl 28(%esp), %eax, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + mulxl 16(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %ebx, %edi + mulxl 20(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl %esi, %ebx + adcl $0, %ecx + movl %eax, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + addl (%esp), %eax # 4-byte Folded Reload + mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + mulxl 36(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 32(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl (%esp), %esi # 4-byte Folded Reload + adcl %edx, %ecx + movl 80(%esp), %eax + movl 12(%eax), %edx + mulxl 24(%esp), %ebx, %ebp # 4-byte Folded Reload + mulxl 28(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 16(%esp), %edi, %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %edi + mulxl 20(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl $0, %ebx + movl 28(%esp), %edx # 4-byte Reload + addl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl %esi, %edi + adcl %ecx, %ebp + adcl $0, %ebx + movl 48(%esp), %edx # 4-byte Reload + imull 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + mulxl 44(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + addl 28(%esp), %ecx # 4-byte Folded Reload + mulxl 40(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl %eax, %esi + movl 48(%esp), %edx # 4-byte Reload + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 48(%esp), %edx # 4-byte Reload + mulxl 32(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + adcl $0, %ebx + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl %edx, %ebx + movl %esi, %edi + subl 44(%esp), %edi # 4-byte Folded Reload + movl %ecx, %ebp + sbbl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, %edx + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %ebx, %edx + sbbl 32(%esp), %edx # 4-byte Folded Reload + testl %edx, %edx + js .LBB54_2 +# BB#1: + movl %edi, %esi +.LBB54_2: + movl 72(%esp), %edi + movl %esi, (%edi) + js .LBB54_4 +# BB#3: + movl %ebp, %ecx +.LBB54_4: + movl %ecx, 4(%edi) + js .LBB54_6 +# BB#5: + movl 48(%esp), %eax # 4-byte Reload +.LBB54_6: + movl %eax, 8(%edi) + js .LBB54_8 +# BB#7: + movl %edx, %ebx +.LBB54_8: + movl %ebx, 12(%edi) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end54: + .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2 + + .globl mcl_fp_montRed4Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed4Lbmi2,@function +mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 80(%esp), %ecx + movl -4(%ecx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl (%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl (%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + imull %eax, %edx + movl 12(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 8(%ecx), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl %esi, %esi, %ecx + movl %esi, 16(%esp) # 4-byte Spill + movl %ecx, 24(%esp) # 4-byte Spill + mulxl %ebx, %esi, %ecx + movl %esi, 12(%esp) # 4-byte Spill + movl %ecx, 20(%esp) # 4-byte Spill + mulxl %eax, %ebx, %ecx + mulxl %edi, %edx, %esi + addl %ebx, %esi + movl %ecx, %edi + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %ebx # 4-byte Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 36(%esp), %edx # 4-byte Folded Reload + adcl 4(%ebp), %esi + adcl 8(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + adcl 12(%ebp), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + adcl 16(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 28(%ebp), %ecx + movl 24(%ebp), %edx + movl 20(%ebp), %edi + adcl $0, %edi + movl %edi, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl %esi, %edx + imull 40(%esp), %edx # 4-byte Folded Reload + mulxl %eax, %ebp, %edi + mulxl 44(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + addl %ebp, %eax + mulxl 48(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %edi, %ebp + movl 28(%esp), %ecx # 4-byte Reload + mulxl %ecx, %edi, %edx + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + addl %esi, 4(%esp) # 4-byte Folded Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl %eax, %edx + imull 40(%esp), %edx # 4-byte Folded Reload + mulxl %ecx, %esi, %ecx + movl %esi, 20(%esp) # 4-byte Spill + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 32(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + movl %ecx, 4(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl (%esp), %ecx # 4-byte Folded Reload + mulxl 48(%esp), %esi, %edx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %eax, 8(%esp) # 4-byte Folded Spill + adcl %ebp, %ecx + adcl %edi, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl 40(%esp), %edx # 4-byte Reload + imull %ecx, %edx + mulxl 44(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + mulxl 32(%esp), %ebp, %esi # 4-byte Folded Reload + addl %eax, %ebp + movl %edx, %eax + mulxl 48(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl %esi, %edi + movl %eax, %edx + mulxl 28(%esp), %edx, %esi # 4-byte Folded Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl %ecx, 40(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + movl %ebp, %ecx + subl 44(%esp), %ecx # 4-byte Folded Reload + movl %edi, %eax + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %eax + sbbl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + sbbl 28(%esp), %eax # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB55_2 +# BB#1: + movl %ecx, %ebp +.LBB55_2: + movl 72(%esp), %ecx + movl %ebp, (%ecx) + testb %bl, %bl + jne .LBB55_4 +# BB#3: + movl 44(%esp), %edi # 4-byte Reload +.LBB55_4: + movl %edi, 4(%ecx) + jne .LBB55_6 +# BB#5: + movl 48(%esp), %edx # 4-byte Reload +.LBB55_6: + movl %edx, 8(%ecx) + jne .LBB55_8 +# BB#7: + movl %eax, %esi +.LBB55_8: + movl %esi, 12(%ecx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end55: + .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2 + + .globl mcl_fp_addPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre4Lbmi2,@function +mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 12(%esi), %esi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl %edx, 4(%ebx) + movl %eax, 8(%ebx) + adcl %edi, %esi + movl %esi, 12(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end56: + .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2 + + .globl mcl_fp_subPre4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre4Lbmi2,@function +mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 12(%edi), %edi + movl 12(%ecx), %ecx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl %esi, 4(%ebp) + movl %ebx, 8(%ebp) + sbbl %edi, %ecx + movl %ecx, 12(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end57: + .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2 + + .globl mcl_fp_shr1_4Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_4Lbmi2,@function +mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl 12(%eax), %ecx + movl 8(%eax), %edx + movl (%eax), %esi + movl 4(%eax), %eax + shrdl $1, %eax, %esi + movl 12(%esp), %edi + movl %esi, (%edi) + shrdl $1, %edx, %eax + movl %eax, 4(%edi) + shrdl $1, %ecx, %edx + movl %edx, 8(%edi) + shrl %ecx + movl %ecx, 12(%edi) + popl %esi + popl %edi + retl +.Lfunc_end58: + .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2 + + .globl mcl_fp_add4Lbmi2 + .align 16, 0x90 + .type mcl_fp_add4Lbmi2,@function +mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + movl 24(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edi), %edx + adcl 8(%esi), %edx + movl 12(%esi), %esi + adcl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + jne .LBB59_2 +# BB#1: # %nocarry + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) +.LBB59_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end59: + .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2 + + .globl mcl_fp_addNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF4Lbmi2,@function +mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 36(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ecx + movl 32(%esp), %edi + addl (%edi), %esi + adcl 4(%edi), %ecx + movl 12(%edx), %ebp + movl 8(%edx), %edx + adcl 8(%edi), %edx + adcl 12(%edi), %ebp + movl 40(%esp), %eax + movl %esi, %ebx + subl (%eax), %ebx + movl %ecx, %edi + sbbl 4(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl %edx, %edi + movl 40(%esp), %eax + sbbl 8(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %edi + movl 40(%esp), %eax + sbbl 12(%eax), %edi + testl %edi, %edi + js .LBB60_2 +# BB#1: + movl %ebx, %esi +.LBB60_2: + movl 28(%esp), %ebx + movl %esi, (%ebx) + js .LBB60_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB60_4: + movl %ecx, 4(%ebx) + js .LBB60_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB60_6: + movl %edx, 8(%ebx) + js .LBB60_8 +# BB#7: + movl %edi, %ebp +.LBB60_8: + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end60: + .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2 + + .globl mcl_fp_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub4Lbmi2,@function +mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + movl 8(%esi), %edx + sbbl 8(%edi), %edx + movl 12(%esi), %esi + sbbl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl $0, %ebx + testb $1, %bl + je .LBB61_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl 8(%ebx), %ebp + adcl 4(%ebx), %ecx + movl 12(%ebx), %ebx + movl %eax, (%edi) + movl %ecx, 4(%edi) + adcl %edx, %ebp + movl %ebp, 8(%edi) + adcl %esi, %ebx + movl %ebx, 12(%edi) +.LBB61_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end61: + .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2 + + .globl mcl_fp_subNF4Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF4Lbmi2,@function +mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 32(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 36(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 12(%eax), %edi + movl 8(%eax), %edx + sbbl 8(%esi), %edx + sbbl 12(%esi), %edi + movl %edi, %esi + sarl $31, %esi + movl 40(%esp), %eax + movl 12(%eax), %ebp + andl %esi, %ebp + movl 8(%eax), %ecx + andl %esi, %ecx + movl 40(%esp), %eax + movl 4(%eax), %eax + andl %esi, %eax + movl 40(%esp), %ebx + andl (%ebx), %esi + addl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %ebx + movl %esi, (%ebx) + adcl %edx, %ecx + movl %eax, 4(%ebx) + movl %ecx, 8(%ebx) + adcl %edi, %ebp + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end62: + .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2 + + .globl mcl_fpDbl_add4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add4Lbmi2,@function +mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edi + movl 4(%eax), %edx + movl 36(%esp), %esi + addl (%esi), %edi + adcl 4(%esi), %edx + movl 8(%eax), %ebx + adcl 8(%esi), %ebx + movl 12(%esi), %ebp + movl 32(%esp), %ecx + movl %edi, (%ecx) + movl 16(%esi), %edi + adcl 12(%eax), %ebp + adcl 16(%eax), %edi + movl %edx, 4(%ecx) + movl 28(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, 8(%ecx) + movl 24(%eax), %ebx + movl 20(%eax), %eax + movl %ebp, 12(%ecx) + movl 20(%esi), %edx + adcl %eax, %edx + movl 28(%esi), %ecx + movl 24(%esi), %ebp + adcl %ebx, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl 44(%esp), %eax + movl %edi, %esi + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + movl %edx, %esi + sbbl 4(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%eax), %esi + sbbl 12(%eax), %ecx + sbbl $0, %ebx + andl $1, %ebx + jne .LBB63_2 +# BB#1: + movl %esi, %ebp +.LBB63_2: + testb %bl, %bl + jne .LBB63_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB63_4: + movl 32(%esp), %eax + movl %edi, 16(%eax) + jne .LBB63_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB63_6: + movl %edx, 20(%eax) + movl %ebp, 24(%eax) + movl 8(%esp), %edx # 4-byte Reload + jne .LBB63_8 +# BB#7: + movl %ecx, %edx +.LBB63_8: + movl %edx, 28(%eax) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end63: + .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2 + + .globl mcl_fpDbl_sub4Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub4Lbmi2,@function +mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 32(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %ebx + sbbl 8(%ebp), %ebx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%ebp), %edi + movl %ebx, 8(%ecx) + movl 20(%ebp), %esi + movl %edx, 12(%ecx) + movl 20(%eax), %ebx + sbbl %esi, %ebx + movl 24(%ebp), %edx + movl 24(%eax), %esi + sbbl %edx, %esi + movl 28(%ebp), %edx + movl 28(%eax), %eax + sbbl %edx, %eax + movl %eax, (%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 36(%esp), %ecx + movl (%ecx), %eax + jne .LBB64_1 +# BB#2: + xorl %ebp, %ebp + jmp .LBB64_3 +.LBB64_1: + movl 4(%ecx), %ebp +.LBB64_3: + testb %dl, %dl + jne .LBB64_5 +# BB#4: + movl $0, %eax +.LBB64_5: + jne .LBB64_6 +# BB#7: + movl $0, %edx + jmp .LBB64_8 +.LBB64_6: + movl 12(%ecx), %edx +.LBB64_8: + jne .LBB64_9 +# BB#10: + xorl %ecx, %ecx + jmp .LBB64_11 +.LBB64_9: + movl 8(%ecx), %ecx +.LBB64_11: + addl %edi, %eax + adcl %ebx, %ebp + movl 24(%esp), %edi + movl %eax, 16(%edi) + adcl %esi, %ecx + movl %ebp, 20(%edi) + movl %ecx, 24(%edi) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%edi) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end64: + .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2 + + .globl mcl_fp_mulUnitPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre5Lbmi2,@function +mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 36(%esp), %edx + movl 32(%esp), %ecx + mulxl 4(%ecx), %esi, %eax + mulxl (%ecx), %edi, %ebx + movl %edi, 4(%esp) # 4-byte Spill + addl %esi, %ebx + mulxl 8(%ecx), %ebp, %esi + adcl %eax, %ebp + mulxl 12(%ecx), %eax, %edi + movl %edi, (%esp) # 4-byte Spill + adcl %esi, %eax + mulxl 16(%ecx), %ecx, %edx + movl 28(%esp), %esi + movl 4(%esp), %edi # 4-byte Reload + movl %edi, (%esi) + movl %ebx, 4(%esi) + movl %ebp, 8(%esi) + movl %eax, 12(%esi) + adcl (%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esi) + adcl $0, %edx + movl %edx, 20(%esi) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end65: + .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2 + + .globl mcl_fpDbl_mulPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre5Lbmi2,@function +mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %eax + movl (%eax), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ecx + movl 72(%esp), %eax + movl (%eax), %ebp + mulxl %ebp, %esi, %edi + movl %ebx, %edx + mulxl %ebp, %edx, %eax + movl %edx, 20(%esp) # 4-byte Spill + addl %esi, %eax + movl 8(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %esi, %ebx + adcl %edi, %esi + movl 12(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %ebp, %edi, %ecx + adcl %ebx, %edi + movl 68(%esp), %edx + movl 16(%edx), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %edx + adcl %ecx, %ebp + movl 64(%esp), %ecx + movl 20(%esp), %ebx # 4-byte Reload + movl %ebx, (%ecx) + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 72(%esp), %ecx + movl 4(%ecx), %ebx + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebx, %ecx, %edx + movl %edx, 20(%esp) # 4-byte Spill + addl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebx, %ecx, %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebx, %esi, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl %edi, %esi + movl 28(%esp), %edx # 4-byte Reload + mulxl %ebx, %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 24(%esp), %edx # 4-byte Reload + mulxl %ebx, %eax, %edx + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 68(%esp), %ebx + movl (%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 4(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %edx, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %edx + movl %edx, %ebp + movl 8(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 12(%ebx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl 16(%ebx), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %eax, %edi, %edx + adcl 16(%esp), %edi # 4-byte Folded Reload + sbbl %ebx, %ebx + andl $1, %ebx + addl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl %edx, %ebx + movl 64(%esp), %eax + movl 20(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 72(%esp), %eax + movl 12(%eax), %eax + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + addl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, %esi + movl 24(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + adcl %ebx, %edi + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + movl 72(%esp), %eax + movl 16(%eax), %edx + movl 68(%esp), %eax + mulxl (%eax), %esi, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + addl %ebp, %esi + movl %esi, 40(%esp) # 4-byte Spill + mulxl 4(%eax), %ebx, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 8(%eax), %esi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + mulxl 12(%eax), %ecx, %ebp + adcl %edi, %ecx + mulxl 16(%eax), %edi, %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %edi + movl 64(%esp), %ebp + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 16(%ebp) + movl %ebx, 20(%ebp) + movl %esi, 24(%ebp) + movl %ecx, 28(%ebp) + movl %edi, 32(%ebp) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ebp) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end66: + .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2 + + .globl mcl_fpDbl_sqrPre5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre5Lbmi2,@function +mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ecx + movl (%ecx), %edi + movl 4(%ecx), %esi + movl %esi, %edx + mulxl %edi, %ebp, %ebx + movl %ebp, 24(%esp) # 4-byte Spill + movl %ebx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %edi, %edx, %eax + movl %edx, 16(%esp) # 4-byte Spill + addl %ebp, %eax + movl 8(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %edi, %ebp, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %edi, %ecx, %ebx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edx + movl 16(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %edi, %edi, %edx + adcl %ebx, %edi + movl 16(%esp), %ebx # 4-byte Reload + movl 60(%esp), %ebp + movl %ebx, (%ebp) + adcl $0, %edx + movl %edx, 8(%esp) # 4-byte Spill + addl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %esi, %ebx, %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + mulxl %esi, %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 20(%esp), %edx # 4-byte Reload + mulxl %esi, %ecx, %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl %edi, %ecx + movl 32(%esp), %edx # 4-byte Reload + mulxl %esi, %edi, %edx + adcl 8(%esp), %edi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 64(%esp), %eax + movl (%eax), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + mulxl %esi, %edx, %eax + movl %eax, 16(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl 4(%eax), %edx + movl %edx, 20(%esp) # 4-byte Spill + mulxl %esi, %ebx, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %esi, %edx + mulxl %esi, %ebp, %edx + movl %edx, 4(%esp) # 4-byte Spill + movl %esi, %eax + adcl %ecx, %ebp + movl 64(%esp), %ecx + movl 12(%ecx), %esi + movl %esi, %edx + mulxl %eax, %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl %edi, %eax + movl 32(%esp), %edx # 4-byte Reload + mulxl 36(%esp), %ecx, %edx # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + sbbl %edi, %edi + andl $1, %edi + addl 16(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl (%esp), %ecx # 4-byte Folded Reload + adcl %edx, %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + mulxl %esi, %edx, %edi + movl %edi, 24(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + mulxl %esi, %edx, %edi + movl %edi, 20(%esp) # 4-byte Spill + adcl %ebp, %edx + movl %edx, %edi + movl 60(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 64(%esp), %eax + movl 8(%eax), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %esi, %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %esi, %edx + mulxl %esi, %ebp, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 16(%eax), %ecx + movl %ecx, %edx + mulxl %esi, %esi, %edx + adcl 16(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %edx + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 12(%edx) + movl %ecx, %edx + movl 64(%esp), %eax + mulxl (%eax), %edx, %eax + movl %eax, 28(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ecx, %edx + movl 64(%esp), %eax + mulxl 4(%eax), %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 36(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ecx, %edx + mulxl 12(%eax), %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl %esi, %ebp + movl %ecx, %edx + mulxl %ecx, %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl 60(%esp), %esi + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%esi) + movl %edi, 20(%esi) + movl %ebx, 24(%esi) + movl %ebp, 28(%esi) + movl %edx, 32(%esi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esi) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2 + + .globl mcl_fp_mont5Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont5Lbmi2,@function +mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %eax + movl 16(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl (%ecx), %ecx + movl 12(%eax), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 8(%eax), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl (%eax), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 4(%eax), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + mulxl %ecx, %edx, %eax + movl %edx, 80(%esp) # 4-byte Spill + movl %esi, %edx + mulxl %ecx, %edx, %esi + movl %edx, 76(%esp) # 4-byte Spill + movl %edi, %edx + mulxl %ecx, %edx, %edi + movl %edx, 72(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ecx, %edx, %ebp + movl %edx, 68(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %edx, %ecx + movl %edx, 16(%esp) # 4-byte Spill + addl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 116(%esp), %ebp + movl -4(%ebp), %eax + movl %eax, 60(%esp) # 4-byte Spill + imull %eax, %edx + movl (%ebp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 4(%ebp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebx + mulxl %eax, %esi, %edi + movl %esi, 12(%esp) # 4-byte Spill + addl %ecx, %edi + movl 8(%ebp), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %esi, %ecx + adcl %ebx, %esi + movl 12(%ebp), %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl %eax, %eax, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %ecx + movl 16(%ebp), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + movl 12(%esp), %edx # 4-byte Reload + addl 16(%esp), %edx # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + sbbl %edi, %edi + andl $1, %edi + movl 112(%esp), %edx + movl 4(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %ebp # 4-byte Folded Reload + addl %eax, %ebp + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 24(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %edi + mulxl 72(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %eax, %esi + mulxl 68(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %ebx, %ecx + mulxl 64(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ebp, %edx + movl %edx, %ebx + adcl $0, %eax + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %ebp # 4-byte Reload + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %esi + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 8(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload + mulxl 44(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + addl %edi, %ecx + mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %edi + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 32(%esp), %ebx # 4-byte Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 24(%esp), %ebp # 4-byte Reload + addl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl %esi, %edi + movl %edi, 4(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 72(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebp, %eax + mulxl 64(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %ebp + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %edi # 4-byte Reload + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 12(%edx), %edx + mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + mulxl 44(%esp), %esi, %edi # 4-byte Folded Reload + addl %eax, %edi + mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %ebp + mulxl 72(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %eax + mulxl 68(%esp), %ebx, %edi # 4-byte Folded Reload + adcl %ecx, %ebx + mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %edi, %ecx + adcl $0, %esi + movl 28(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %edi # 4-byte Reload + addl 8(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %edx + movl 16(%edx), %edx + mulxl 40(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + addl %ebp, %edi + mulxl 48(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + mulxl 56(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 44(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl %ebx, 52(%esp) # 4-byte Folded Spill + adcl %ecx, 56(%esp) # 4-byte Folded Spill + adcl %esi, %ebp + movl %ebp, 36(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 60(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload + addl %eax, %ebp + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + movl %edx, %ecx + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl %eax, %esi + movl %ecx, %edx + mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl $0, %ecx + andl $1, %ebx + movl 60(%esp), %eax # 4-byte Reload + addl 44(%esp), %eax # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + adcl $0, %ebx + subl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + movl %edi, %eax + sbbl 80(%esp), %eax # 4-byte Folded Reload + movl %esi, %ebp + sbbl 72(%esp), %ebp # 4-byte Folded Reload + sbbl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 64(%esp), %edx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB68_2 +# BB#1: + movl %eax, %edi +.LBB68_2: + testb %bl, %bl + movl 44(%esp), %ebx # 4-byte Reload + jne .LBB68_4 +# BB#3: + movl 76(%esp), %ebx # 4-byte Reload +.LBB68_4: + movl 104(%esp), %eax + movl %ebx, (%eax) + movl %edi, 4(%eax) + jne .LBB68_6 +# BB#5: + movl %ebp, %esi +.LBB68_6: + movl %esi, 8(%eax) + movl 60(%esp), %esi # 4-byte Reload + jne .LBB68_8 +# BB#7: + movl 80(%esp), %esi # 4-byte Reload +.LBB68_8: + movl %esi, 12(%eax) + jne .LBB68_10 +# BB#9: + movl %edx, %ecx +.LBB68_10: + movl %ecx, 16(%eax) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end68: + .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2 + + .globl mcl_fp_montNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF5Lbmi2,@function +mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 92(%esp), %edi + movl (%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 4(%edi), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 96(%esp), %ecx + movl (%ecx), %ebx + mulxl %ebx, %ecx, %esi + movl %eax, %edx + mulxl %ebx, %edx, %eax + movl %edx, 60(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, %ecx + movl 8(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebx, %eax, %ebp + adcl %esi, %eax + movl %eax, %esi + movl 12(%edi), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %ebx, %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 16(%edi), %edx + movl %edx, 24(%esp) # 4-byte Spill + mulxl %ebx, %edx, %eax + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 100(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + addl %edi, %ebp + movl 4(%ebx), %eax + movl %eax, 60(%esp) # 4-byte Spill + mulxl %eax, %eax, %edi + movl %edi, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %edi + movl 8(%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + movl 12(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %ecx, %ebp + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl 16(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 4(%eax), %edx + mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 40(%esp), %edi, %eax # 4-byte Folded Reload + addl %ecx, %eax + mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl %esi, %ebp + mulxl 28(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl (%esp), %esi # 4-byte Folded Reload + mulxl 24(%esp), %edx, %ecx # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + adcl $0, %ecx + addl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl %edi, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl %edi, %ebx + mulxl 60(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %eax, %edi + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %ebp, %ecx + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %esi, %eax + mulxl 48(%esp), %ebx, %edx # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 8(%eax), %edx + mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 40(%esp), %ebp, %esi # 4-byte Folded Reload + addl %ecx, %esi + mulxl 32(%esp), %edi, %ecx # 4-byte Folded Reload + adcl %eax, %edi + mulxl 28(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 24(%esp), %ecx, %eax # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ebp, %eax + movl %eax, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %esi, %ebp + movl %ebp, %esi + mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, %eax + mulxl 52(%esp), %ebp, %edi # 4-byte Folded Reload + adcl %ebx, %ebp + movl %ebp, %ebx + mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, 8(%esp) # 4-byte Spill + adcl %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 12(%eax), %edx + mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 40(%esp), %ebx, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %esi, %ecx + mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + adcl %edi, %esi + mulxl 24(%esp), %edi, %eax # 4-byte Folded Reload + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + addl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %edx + imull 44(%esp), %edx # 4-byte Folded Reload + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + addl 16(%esp), %ebx # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + mulxl 48(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %edi, %ecx + movl 20(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 12(%esp), %edi # 4-byte Reload + addl %edi, 16(%esp) # 4-byte Folded Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 16(%eax), %edx + mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload + mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload + addl %eax, %ebx + mulxl 32(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl %ebp, %eax + mulxl 28(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + mulxl 24(%esp), %edx, %esi # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %esi + movl 44(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + addl %edi, %ecx + mulxl 60(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl %ebx, %edi + movl %edx, %eax + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %eax, %edx + mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %esi + addl 44(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %esi + movl %edi, %eax + subl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + sbbl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %edx + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sarl $31, %edx + testl %edx, %edx + js .LBB69_2 +# BB#1: + movl 40(%esp), %edi # 4-byte Reload +.LBB69_2: + movl 88(%esp), %edx + movl %edi, (%edx) + js .LBB69_4 +# BB#3: + movl 44(%esp), %ebx # 4-byte Reload +.LBB69_4: + movl %ebx, 4(%edx) + js .LBB69_6 +# BB#5: + movl 56(%esp), %ecx # 4-byte Reload +.LBB69_6: + movl %ecx, 8(%edx) + js .LBB69_8 +# BB#7: + movl 60(%esp), %ebp # 4-byte Reload +.LBB69_8: + movl %ebp, 12(%edx) + js .LBB69_10 +# BB#9: + movl 64(%esp), %esi # 4-byte Reload +.LBB69_10: + movl %esi, 16(%edx) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end69: + .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2 + + .globl mcl_fp_montRed5Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed5Lbmi2,@function +mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 96(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl (%eax), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 92(%esp), %ecx + movl (%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + imull %esi, %edx + movl 16(%eax), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 12(%eax), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + movl 8(%eax), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + mulxl %esi, %esi, %eax + movl %esi, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl %ebx, %esi, %eax + movl %esi, 24(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %eax + mulxl %ecx, %esi, %ecx + mulxl %edi, %edx, %ebx + addl %esi, %ebx + adcl %ebp, %ecx + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 44(%esp), %edx # 4-byte Folded Reload + movl 92(%esp), %ebp + adcl 4(%ebp), %ebx + adcl 8(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 12(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 16(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + adcl 20(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%ebp), %edx + movl 32(%ebp), %esi + movl 28(%ebp), %edi + movl 24(%ebp), %eax + adcl $0, %eax + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 24(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, %esi + movl %ebx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload + mulxl 52(%esp), %edi, %ecx # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + addl %ebp, %ecx + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + mulxl 64(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + mulxl %eax, %edi, %edx + adcl (%esp), %edi # 4-byte Folded Reload + adcl $0, %edx + addl %ebx, 4(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %ecx, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl %eax, %edi, %eax + movl %edi, 4(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + addl %edi, %ebp + mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload + adcl %eax, %edi + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 32(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ecx, 8(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 16(%esp) # 4-byte Spill + movl %ebp, %edx + imull 48(%esp), %edx # 4-byte Folded Reload + mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, %eax + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, 12(%esp) # 4-byte Folded Spill + adcl %ebx, %esi + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl 48(%esp), %edx # 4-byte Reload + imull %esi, %edx + mulxl 52(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload + addl %ecx, %edi + movl %edx, %ebp + mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ebp, %edx + mulxl 64(%esp), %eax, %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, %edx + mulxl 40(%esp), %ebp, %edx # 4-byte Folded Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + addl %esi, 48(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %ebx, %esi + adcl $0, %esi + movl %edi, %ebx + subl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl %ecx, %ebx + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + sbbl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 40(%esp), %ebp # 4-byte Folded Reload + sbbl $0, %esi + andl $1, %esi + jne .LBB70_2 +# BB#1: + movl 56(%esp), %ecx # 4-byte Reload +.LBB70_2: + movl %esi, %eax + testb %al, %al + jne .LBB70_4 +# BB#3: + movl 52(%esp), %edi # 4-byte Reload +.LBB70_4: + movl 88(%esp), %esi + movl %edi, (%esi) + movl %ecx, 4(%esi) + movl 48(%esp), %eax # 4-byte Reload + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB70_6 +# BB#5: + movl %ebx, %ecx +.LBB70_6: + movl %ecx, 8(%esi) + jne .LBB70_8 +# BB#7: + movl 64(%esp), %eax # 4-byte Reload +.LBB70_8: + movl %eax, 12(%esi) + jne .LBB70_10 +# BB#9: + movl %ebp, %edx +.LBB70_10: + movl %edx, 16(%esi) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end70: + .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2 + + .globl mcl_fp_addPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre5Lbmi2,@function +mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 24(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 12(%esi), %ebx + movl 16(%esi), %esi + adcl 12(%eax), %ebx + movl 16(%eax), %eax + movl 20(%esp), %ebp + movl %ecx, (%ebp) + movl %edx, 4(%ebp) + movl %edi, 8(%ebp) + movl %ebx, 12(%ebp) + adcl %esi, %eax + movl %eax, 16(%ebp) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end71: + .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2 + + .globl mcl_fp_subPre5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre5Lbmi2,@function +mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%esi), %edx + movl 16(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 16(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end72: + .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2 + + .globl mcl_fp_shr1_5Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_5Lbmi2,@function +mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl 16(%eax), %ecx + movl 12(%eax), %edx + movl 8(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %eax + shrdl $1, %eax, %edi + movl 16(%esp), %ebx + movl %edi, (%ebx) + shrdl $1, %esi, %eax + movl %eax, 4(%ebx) + shrdl $1, %edx, %esi + movl %esi, 8(%ebx) + shrdl $1, %ecx, %edx + movl %edx, 12(%ebx) + shrl %ecx + movl %ecx, 16(%ebx) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end73: + .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2 + + .globl mcl_fp_add5Lbmi2 + .align 16, 0x90 + .type mcl_fp_add5Lbmi2,@function +mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %ecx + movl 24(%esp), %edi + addl (%edi), %eax + adcl 4(%edi), %ecx + movl 8(%ebx), %edx + adcl 8(%edi), %edx + movl 12(%edi), %esi + movl 16(%edi), %edi + adcl 12(%ebx), %esi + adcl 16(%ebx), %edi + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl 16(%ebp), %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB74_2 +# BB#1: # %nocarry + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) +.LBB74_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end74: + .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2 + + .globl mcl_fp_addNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF5Lbmi2,@function +mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %esi + movl (%esi), %ebx + movl 4(%esi), %eax + movl 44(%esp), %edi + addl (%edi), %ebx + adcl 4(%edi), %eax + movl 16(%esi), %ecx + movl 12(%esi), %edx + movl 8(%esi), %ebp + adcl 8(%edi), %ebp + adcl 12(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl %ebx, %esi + subl (%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %eax, %esi + sbbl 4(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%edi), %esi + sbbl 12(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 16(%edi), %edx + movl %edx, %edi + sarl $31, %edi + testl %edi, %edi + js .LBB75_2 +# BB#1: + movl (%esp), %ebx # 4-byte Reload +.LBB75_2: + movl 40(%esp), %edi + movl %ebx, (%edi) + js .LBB75_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB75_4: + movl %eax, 4(%edi) + movl 12(%esp), %ecx # 4-byte Reload + js .LBB75_6 +# BB#5: + movl %esi, %ebp +.LBB75_6: + movl %ebp, 8(%edi) + movl 16(%esp), %eax # 4-byte Reload + js .LBB75_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB75_8: + movl %ecx, 12(%edi) + js .LBB75_10 +# BB#9: + movl %edx, %eax +.LBB75_10: + movl %eax, 16(%edi) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end75: + .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2 + + .globl mcl_fp_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub5Lbmi2,@function +mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + movl 8(%edi), %edx + sbbl 8(%ebp), %edx + movl 12(%edi), %esi + sbbl 12(%ebp), %esi + movl 16(%edi), %edi + sbbl 16(%ebp), %edi + movl 20(%esp), %ebp + movl %eax, (%ebp) + movl %ecx, 4(%ebp) + movl %edx, 8(%ebp) + movl %esi, 12(%ebp) + movl %edi, 16(%ebp) + sbbl $0, %ebx + testb $1, %bl + je .LBB76_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl %eax, (%ebp) + adcl 4(%ebx), %ecx + movl %ecx, 4(%ebp) + adcl 8(%ebx), %edx + movl %edx, 8(%ebp) + movl 12(%ebx), %eax + adcl %esi, %eax + movl %eax, 12(%ebp) + movl 16(%ebx), %eax + adcl %edi, %eax + movl %eax, 16(%ebp) +.LBB76_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end76: + .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2 + + .globl mcl_fp_subNF5Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF5Lbmi2,@function +mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 4(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 16(%edi), %esi + movl 12(%edi), %eax + movl 8(%edi), %ecx + sbbl 8(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, %ebx + sarl $31, %ebx + movl %ebx, %ebp + shldl $1, %esi, %ebp + movl 48(%esp), %edi + movl 4(%edi), %ecx + andl %ebp, %ecx + andl (%edi), %ebp + movl 16(%edi), %edx + andl %ebx, %edx + rorxl $31, %ebx, %eax + andl 12(%edi), %ebx + andl 8(%edi), %eax + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %edi + movl %ebp, (%edi) + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 4(%edi) + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %eax, 8(%edi) + movl %ebx, 12(%edi) + adcl %esi, %edx + movl %edx, 16(%edi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end77: + .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2 + + .globl mcl_fpDbl_add5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add5Lbmi2,@function +mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 56(%esp), %edx + movl 52(%esp), %ecx + movl 12(%ecx), %ebx + movl 16(%ecx), %ebp + movl 8(%edx), %esi + movl (%edx), %edi + addl (%ecx), %edi + movl 48(%esp), %eax + movl %edi, (%eax) + movl 4(%edx), %edi + adcl 4(%ecx), %edi + adcl 8(%ecx), %esi + adcl 12(%edx), %ebx + adcl 16(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, 4(%eax) + movl 28(%edx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl %esi, 8(%eax) + movl 20(%edx), %esi + movl %ebx, 12(%eax) + movl 20(%ecx), %ebp + adcl %esi, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%eax) + movl 24(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 28(%ecx), %edi + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl 32(%ecx), %esi + adcl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl 36(%ecx), %edx + adcl %eax, %edx + sbbl %eax, %eax + andl $1, %eax + movl %ebp, %ecx + movl 60(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 4(%ebp), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 8(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %esi, %ebx + movl %edx, %esi + sbbl 12(%ebp), %ebx + sbbl 16(%ebp), %edx + sbbl $0, %eax + andl $1, %eax + jne .LBB78_2 +# BB#1: + movl %edx, %esi +.LBB78_2: + testb %al, %al + movl 12(%esp), %ebp # 4-byte Reload + jne .LBB78_4 +# BB#3: + movl (%esp), %ebp # 4-byte Reload +.LBB78_4: + movl 48(%esp), %eax + movl %ebp, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl 20(%esp), %edx # 4-byte Reload + movl 16(%esp), %edi # 4-byte Reload + jne .LBB78_6 +# BB#5: + movl 4(%esp), %edi # 4-byte Reload +.LBB78_6: + movl %edi, 24(%eax) + jne .LBB78_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB78_8: + movl %edx, 28(%eax) + jne .LBB78_10 +# BB#9: + movl %ebx, %ecx +.LBB78_10: + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end78: + .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2 + + .globl mcl_fpDbl_sub5Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub5Lbmi2,@function +mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 44(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 36(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl %edi, 16(%ecx) + movl 24(%eax), %ebp + sbbl %esi, %ebp + movl 28(%edx), %esi + movl 28(%eax), %edi + sbbl %esi, %edi + movl %edi, (%esp) # 4-byte Spill + movl 32(%edx), %esi + movl 32(%eax), %edi + sbbl %esi, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%edx), %edx + movl 36(%eax), %eax + sbbl %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 48(%esp), %ebx + jne .LBB79_1 +# BB#2: + xorl %eax, %eax + jmp .LBB79_3 +.LBB79_1: + movl 16(%ebx), %eax +.LBB79_3: + testb %dl, %dl + jne .LBB79_4 +# BB#5: + movl $0, %edx + movl $0, %esi + jmp .LBB79_6 +.LBB79_4: + movl (%ebx), %esi + movl 4(%ebx), %edx +.LBB79_6: + jne .LBB79_7 +# BB#8: + movl $0, %edi + jmp .LBB79_9 +.LBB79_7: + movl 12(%ebx), %edi +.LBB79_9: + jne .LBB79_10 +# BB#11: + xorl %ebx, %ebx + jmp .LBB79_12 +.LBB79_10: + movl 8(%ebx), %ebx +.LBB79_12: + addl 4(%esp), %esi # 4-byte Folded Reload + adcl %ebp, %edx + movl %esi, 20(%ecx) + adcl (%esp), %ebx # 4-byte Folded Reload + movl %edx, 24(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %ebx, 28(%ecx) + movl %edi, 32(%ecx) + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end79: + .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2 + + .globl mcl_fp_mulUnitPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre6Lbmi2,@function +mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %edx + movl 36(%esp), %esi + mulxl 4(%esi), %ecx, %edi + mulxl (%esi), %eax, %ebx + movl %eax, 8(%esp) # 4-byte Spill + addl %ecx, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + mulxl 8(%esi), %ebp, %eax + adcl %edi, %ebp + mulxl 12(%esi), %ecx, %edi + adcl %eax, %ecx + mulxl 16(%esi), %eax, %ebx + movl %ebx, (%esp) # 4-byte Spill + adcl %edi, %eax + mulxl 20(%esi), %edx, %esi + movl 32(%esp), %edi + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, (%edi) + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%edi) + movl %ebp, 8(%edi) + movl %ecx, 12(%edi) + movl %eax, 16(%edi) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%edi) + adcl $0, %esi + movl %esi, 24(%edi) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end80: + .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2 + + .globl mcl_fpDbl_mulPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre6Lbmi2,@function +mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %ebp + movl (%ebp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 4(%ebp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl (%eax), %eax + mulxl %eax, %esi, %edi + movl %ecx, %edx + mulxl %eax, %edx, %ecx + movl %edx, 28(%esp) # 4-byte Spill + addl %esi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %esi, %ebx + adcl %edi, %esi + movl 12(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebp, %ecx + mulxl %eax, %edi, %ebp + adcl %ebx, %edi + movl 16(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %ecx, %edx + movl 20(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + adcl 20(%esp), %eax # 4-byte Folded Reload + movl 76(%esp), %edx + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, (%edx) + adcl $0, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 84(%esp), %edx + movl 4(%edx), %ebp + movl 52(%esp), %edx # 4-byte Reload + mulxl %ebp, %edx, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + addl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %ebp, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %ebp, %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %esi + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebp, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebp, %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebp, %eax, %edx + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + sbbl %eax, %eax + andl $1, %eax + addl 28(%esp), %ecx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 80(%esp), %eax + movl (%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 16(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ebp + movl 4(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, %esi + movl 12(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %edi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 16(%ebp), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ebx, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 20(%ebp), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + adcl 20(%esp), %ebp # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + movl 16(%esp), %eax # 4-byte Reload + addl %eax, 52(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl (%esp), %ebp # 4-byte Folded Reload + adcl %edx, %ecx + movl 76(%esp), %eax + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 84(%esp), %eax + movl 12(%eax), %eax + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %edi, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + adcl %ecx, %ebp + sbbl %eax, %eax + andl $1, %eax + movl 24(%esp), %ecx # 4-byte Reload + addl %ecx, 52(%esp) # 4-byte Folded Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 80(%esp), %ecx + movl (%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 16(%eax), %eax + mulxl %eax, %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl 80(%esp), %ecx + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 80(%esp), %edx + movl 8(%edx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %edi, %esi + movl %esi, %edi + movl 80(%esp), %esi + movl %esi, %edx + movl 12(%edx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebx, %esi + movl 80(%esp), %edx + movl 16(%edx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 80(%esp), %edx + movl 20(%edx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + adcl 24(%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 84(%esp), %eax + movl 20(%eax), %eax + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %edi + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %esi, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebx, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + addl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%eax) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 24(%eax) + movl %ecx, 28(%eax) + movl %esi, 32(%eax) + movl %ebx, 36(%eax) + movl %edx, 40(%eax) + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%eax) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end81: + .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2 + + .globl mcl_fpDbl_sqrPre6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre6Lbmi2,@function +mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl %eax, %edx + mulxl %ecx, %edi, %esi + movl %edi, 36(%esp) # 4-byte Spill + movl %esi, 52(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl %ecx, %ebx, %edx + movl %ebx, 28(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 8(%ebp), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %esi, %edi + movl 12(%ebp), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl 16(%ebp), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ecx, %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl 20(%ebp), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ecx, %ebp, %edx + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %ecx + movl 28(%esp), %esi # 4-byte Reload + movl %esi, (%ecx) + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + addl %ecx, 56(%esp) # 4-byte Folded Spill + movl %eax, %edx + mulxl %eax, %esi, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl %edi, %esi + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebp + sbbl %edx, %edx + andl $1, %edx + addl 52(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl 56(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 84(%esp), %eax + movl (%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 8(%eax), %ebp + mulxl %ebp, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 4(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ebp, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ebp, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %eax, %esi + movl 12(%esi), %eax + movl %eax, %edx + mulxl %ebp, %ebx, %edx + movl %ebx, 28(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 16(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ebp, %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 20(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %esi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + movl 20(%esp), %edx # 4-byte Reload + addl %edx, 56(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + addl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %eax, %edx + mulxl %eax, %ecx, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 32(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + adcl %ebp, %ebx + sbbl %ecx, %ecx + andl $1, %ecx + movl 20(%esp), %eax # 4-byte Reload + addl %eax, 48(%esp) # 4-byte Folded Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl %edx, %ecx + movl 80(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + movl 84(%esp), %esi + movl (%esi), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 16(%esi), %ebp + mulxl %ebp, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 8(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 12(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %ebp, %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebp, %edx + mulxl %ebp, %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 20(%esi), %ebx + movl %ebx, %edx + mulxl %ebp, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, 28(%esp) # 4-byte Spill + adcl %edx, %ecx + sbbl %ebp, %ebp + andl $1, %ebp + movl 8(%esp), %esi # 4-byte Reload + addl 24(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %edx # 4-byte Reload + adcl %edx, 56(%esp) # 4-byte Folded Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 4(%esp), %edx # 4-byte Reload + adcl %edx, 52(%esp) # 4-byte Folded Spill + adcl (%esp), %ecx # 4-byte Folded Reload + adcl %eax, %ebp + movl 44(%esp), %edx # 4-byte Reload + mulxl %ebx, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %ebx, %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 36(%esp), %edx # 4-byte Reload + mulxl %ebx, %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %edi, %esi + movl 32(%esp), %edx # 4-byte Reload + mulxl %ebx, %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ebx, %edx + mulxl %ebx, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl %ebp, %ebx + sbbl %ebp, %ebp + andl $1, %ebp + addl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %eax + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 20(%eax) + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 24(%eax) + movl %esi, 28(%eax) + movl %edi, 32(%eax) + movl %ecx, 36(%eax) + movl %ebx, 40(%eax) + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%eax) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2 + + .globl mcl_fp_mont6Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont6Lbmi2,@function +mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %eax + movl 20(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 128(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %ebp + movl %edx, 96(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + mulxl %ecx, %edx, %edi + movl %edx, 92(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ecx, %edx, %esi + movl %edx, 88(%esp) # 4-byte Spill + movl (%eax), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ecx, %eax, %edx + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 84(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ecx, %ebx, %edx + movl %ebx, 16(%esp) # 4-byte Spill + addl 80(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 8(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 92(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 96(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 132(%esp), %edi + movl -4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ebx, %edx + imull %eax, %edx + movl (%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 4(%edi), %esi + movl %esi, 96(%esp) # 4-byte Spill + mulxl %esi, %esi, %ebp + mulxl %eax, %ecx, %eax + movl %ecx, 12(%esp) # 4-byte Spill + addl %esi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%edi), %eax + movl %eax, 88(%esp) # 4-byte Spill + mulxl %eax, %ecx, %esi + adcl %ebp, %ecx + movl 12(%edi), %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + adcl %esi, %ebx + movl 16(%edi), %esi + movl %esi, 80(%esp) # 4-byte Spill + mulxl %esi, %esi, %ebp + adcl %eax, %esi + movl 20(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edi, %eax + adcl %ebp, %edi + adcl $0, %eax + movl 12(%esp), %edx # 4-byte Reload + addl 16(%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl %edx, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 4(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %ebx, %ebp # 4-byte Folded Reload + addl %eax, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + adcl %ebp, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %edi + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %eax + mulxl 84(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 80(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, %ebx + mulxl 76(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, %ecx + movl 36(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %ebp # 4-byte Reload + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 8(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %ebx, %edi # 4-byte Folded Reload + mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + addl %ebx, %esi + mulxl 56(%esp), %ecx, %ebp # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ebp, %edi + adcl (%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 4(%esp), %ebx # 4-byte Reload + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %ebp, %esi # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %ecx + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl %eax, %esi + mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + andl $1, %edx + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 20(%esp), %ebx # 4-byte Reload + adcl %ebx, 28(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl %ebx, 24(%esp) # 4-byte Folded Spill + adcl %edi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 12(%edx), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + addl %eax, %edi + movl %edi, 44(%esp) # 4-byte Spill + mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 20(%esp) # 4-byte Spill + mulxl 68(%esp), %ebx, %eax # 4-byte Folded Reload + adcl %edi, %ebx + adcl %esi, %eax + movl %eax, %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, %ebx + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %edi + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %ecx + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl %eax, %esi + movl %esi, %ebp + mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 16(%esp), %esi # 4-byte Reload + addl 4(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %esi # 4-byte Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 16(%edx), %edx + mulxl 64(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + addl %ebx, %edi + movl %edi, 28(%esp) # 4-byte Spill + mulxl 56(%esp), %edi, %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %eax, %edi + movl %edi, %ebx + mulxl 68(%esp), %edx, %eax # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, %edx + adcl $0, %edx + addl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + adcl %ebp, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + adcl %esi, %ebx + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 36(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + adcl %ecx, %edi + mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %eax, %esi + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + adcl $0, %eax + movl %eax, %ebp + movl 40(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 12(%esp), %eax # 4-byte Reload + addl 8(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 128(%esp), %edx + movl 20(%edx), %edx + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %esi, %ebx + mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ecx, %esi + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, %ecx + adcl 48(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 52(%esp), %edi # 4-byte Reload + addl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 92(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + addl %edi, %ecx + mulxl 88(%esp), %edi, %ebx # 4-byte Folded Reload + adcl %esi, %edi + movl %edx, %esi + mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %ebx, %ebp + movl %esi, %edx + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl %eax, %ebx + movl %esi, %edx + mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + adcl $0, %edx + andl $1, 72(%esp) # 4-byte Folded Spill + movl 60(%esp), %eax # 4-byte Reload + addl 52(%esp), %eax # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + subl 92(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + movl %edi, %ecx + sbbl 96(%esp), %ecx # 4-byte Folded Reload + movl %ebp, %edi + sbbl 88(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl %esi, %edi + sbbl 84(%esp), %ebx # 4-byte Folded Reload + sbbl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 76(%esp), %esi # 4-byte Folded Reload + sbbl $0, %eax + andl $1, %eax + jne .LBB83_2 +# BB#1: + movl %ecx, 68(%esp) # 4-byte Spill +.LBB83_2: + testb %al, %al + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB83_4 +# BB#3: + movl 72(%esp), %ecx # 4-byte Reload +.LBB83_4: + movl 120(%esp), %eax + movl %ecx, (%eax) + movl 68(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + jne .LBB83_6 +# BB#5: + movl 92(%esp), %ebp # 4-byte Reload +.LBB83_6: + movl %ebp, 8(%eax) + movl 60(%esp), %ecx # 4-byte Reload + jne .LBB83_8 +# BB#7: + movl %ebx, %ecx +.LBB83_8: + movl %ecx, 12(%eax) + jne .LBB83_10 +# BB#9: + movl 96(%esp), %edi # 4-byte Reload +.LBB83_10: + movl %edi, 16(%eax) + jne .LBB83_12 +# BB#11: + movl %esi, %edx +.LBB83_12: + movl %edx, 20(%eax) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end83: + .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2 + + .globl mcl_fp_montNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF6Lbmi2,@function +mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %ebx + movl (%ebx), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 4(%ebx), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl (%eax), %eax + mulxl %eax, %ecx, %esi + movl %edi, %edx + mulxl %eax, %edx, %ebp + movl %edx, 76(%esp) # 4-byte Spill + addl %ecx, %ebp + movl 8(%ebx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edi + adcl %esi, %ecx + movl %ecx, %esi + movl 12(%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 16(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edi + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + movl 20(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + adcl %edi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull %eax, %edx + movl (%ebx), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %eax, 16(%esp) # 4-byte Spill + addl %edi, %ecx + movl 4(%ebx), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl %ebp, %edi + movl 8(%ebx), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl %eax, %esi, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 64(%esp), %esi # 4-byte Folded Reload + movl 16(%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 20(%ebx), %eax + movl %eax, 60(%esp) # 4-byte Spill + mulxl %eax, %ebp, %eax + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 12(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 4(%eax), %edx + mulxl 48(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload + addl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + mulxl 44(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %edi, %eax + mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, %edi + mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + movl %ecx, %edx + addl 20(%esp), %ebp # 4-byte Folded Reload + movl 4(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %esi + movl %esi, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + addl %esi, %ebp + mulxl 76(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl %ebp, %esi + mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %ecx + mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl %eax, %ebp + movl %ebp, %eax + mulxl 64(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + mulxl 60(%esp), %edi, %edx # 4-byte Folded Reload + adcl %ebx, %edi + movl %edi, %ebx + movl 28(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 8(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + addl %eax, %edi + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ecx, %eax + movl %eax, %ecx + mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %esi + mulxl 32(%esp), %ebx, %eax # 4-byte Folded Reload + adcl %ebp, %ebx + adcl $0, %eax + movl %eax, %edx + movl (%esp), %ebp # 4-byte Reload + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, (%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %edx + movl %ebp, %eax + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl %edi, %ebp + movl %ebp, %edi + mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %ecx + mulxl 68(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mulxl 64(%esp), %ebp, %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + mulxl 60(%esp), %ebp, %edx # 4-byte Folded Reload + adcl %ebx, %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 12(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %ebp, %esi # 4-byte Folded Reload + addl %eax, %esi + mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %edi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, %edi + mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %ebp, %ecx + movl %ecx, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + addl %ecx, %ebp + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + mulxl 72(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + mulxl 68(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mulxl 64(%esp), %esi, %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + mulxl 60(%esp), %esi, %edx # 4-byte Folded Reload + adcl %ebx, %esi + movl 28(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 16(%eax), %edx + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + addl %eax, %edi + mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ecx, %eax + mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %esi, %ecx + mulxl 36(%esp), %esi, %ebp # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %ebp, %ebx + adcl $0, %edx + movl 24(%esp), %ebp # 4-byte Reload + addl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebp, %edx + imull 56(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + addl 24(%esp), %ebp # 4-byte Folded Reload + mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl %ecx, %eax + mulxl 64(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %edi + mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload + adcl %ebx, %ecx + movl 28(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + adcl %ebx, 24(%esp) # 4-byte Folded Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 20(%eax), %edx + mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload + mulxl 52(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + addl %ebx, %esi + mulxl 44(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 40(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %ebp + mulxl 36(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 52(%esp), %edi # 4-byte Reload + addl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl %ecx, 48(%esp) # 4-byte Folded Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl 56(%esp), %edx # 4-byte Reload + movl 52(%esp), %ebp # 4-byte Reload + imull %ebp, %edx + mulxl 80(%esp), %ecx, %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + addl %ebp, %ecx + mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl %esi, %ebp + mulxl 72(%esp), %ecx, %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %edx, %ebx + mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %ebx, %edx + mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %ebx, %edx + mulxl 60(%esp), %ebx, %edx # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + addl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl %edx, %eax + movl %ebp, %edx + subl 80(%esp), %edx # 4-byte Folded Reload + sbbl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + movl %esi, %ebp + movl %ebx, %esi + sbbl 72(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + sbbl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 80(%esp) # 4-byte Spill + movl %esi, %ebx + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %eax, %edi + sbbl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB84_2 +# BB#1: + movl %edx, 56(%esp) # 4-byte Spill +.LBB84_2: + movl 104(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, (%ebp) + movl 44(%esp), %ecx # 4-byte Reload + js .LBB84_4 +# BB#3: + movl 40(%esp), %ecx # 4-byte Reload +.LBB84_4: + movl %ecx, 4(%ebp) + movl 52(%esp), %ecx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + js .LBB84_6 +# BB#5: + movl 76(%esp), %edx # 4-byte Reload +.LBB84_6: + movl %edx, 8(%ebp) + js .LBB84_8 +# BB#7: + movl 80(%esp), %ecx # 4-byte Reload +.LBB84_8: + movl %ecx, 12(%ebp) + js .LBB84_10 +# BB#9: + movl %ebx, %esi +.LBB84_10: + movl %esi, 16(%ebp) + js .LBB84_12 +# BB#11: + movl %edi, %eax +.LBB84_12: + movl %eax, 20(%ebp) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end84: + .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2 + + .globl mcl_fp_montRed6Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed6Lbmi2,@function +mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %edi + movl -4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl (%edi), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl (%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + imull %eax, %edx + movl 20(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + movl %ebx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %ecx, 44(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %ecx, %eax + movl %ecx, 28(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl 4(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + mulxl %eax, %ebx, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl %esi, %ecx, %eax + movl %ecx, 48(%esp) # 4-byte Spill + addl %ebx, %eax + movl %eax, %ebp + movl 8(%edi), %esi + movl %esi, 64(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + adcl 24(%esp), %eax # 4-byte Folded Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + movl 32(%esp), %edi # 4-byte Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + movl 36(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 84(%esp), %ecx # 4-byte Folded Reload + movl 112(%esp), %ecx + adcl 4(%ecx), %ebp + adcl 8(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 12(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + adcl 16(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + adcl 20(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 24(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl 40(%ecx), %esi + movl 36(%ecx), %edi + movl 32(%ecx), %ebx + movl 28(%ecx), %eax + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 44(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %ebp, %ebx + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + addl %ecx, %ebp + mulxl 64(%esp), %edi, %ecx # 4-byte Folded Reload + adcl %eax, %edi + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 52(%esp), %esi # 4-byte Reload + mulxl %esi, %edx, %eax + adcl (%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + addl %ebx, 8(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl %esi, %ecx, %eax + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload + mulxl 68(%esp), %ecx, %ebx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + addl %edi, %ebx + adcl %esi, %eax + movl %eax, %esi + mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + mulxl 80(%esp), %edi, %edx # 4-byte Folded Reload + adcl %eax, %edi + movl %edi, %eax + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, 8(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebx, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %ebp # 4-byte Folded Reload + addl %ecx, %ebp + adcl %esi, %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, %esi + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebx, %edi + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %edi # 4-byte Reload + adcl %edi, 32(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 60(%esp), %edx # 4-byte Folded Reload + mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + mulxl 64(%esp), %edi, %esi # 4-byte Folded Reload + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + mulxl 68(%esp), %ebx, %ecx # 4-byte Folded Reload + addl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %edi + mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %esi + mulxl 80(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, %ecx + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, %ebx + movl 40(%esp), %ebx # 4-byte Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 60(%esp), %edx # 4-byte Reload + imull %ebx, %edx + mulxl 68(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + mulxl 72(%esp), %eax, %edi # 4-byte Folded Reload + addl %ecx, %eax + mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %edi, %ebp + movl %edx, %edi + mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl %ecx, %esi + movl %edi, %edx + mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %edi, %edx + mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + movl 60(%esp), %edx # 4-byte Reload + addl 40(%esp), %edx # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl $0, %edx + subl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 72(%esp), %eax # 4-byte Folded Reload + movl %esi, %ebp + sbbl 64(%esp), %ebp # 4-byte Folded Reload + sbbl 76(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + movl %ecx, %ebx + sbbl 80(%esp), %ebx # 4-byte Folded Reload + sbbl 52(%esp), %edi # 4-byte Folded Reload + sbbl $0, %edx + andl $1, %edx + movl %edx, 84(%esp) # 4-byte Spill + jne .LBB85_2 +# BB#1: + movl %eax, 60(%esp) # 4-byte Spill +.LBB85_2: + movl 84(%esp), %eax # 4-byte Reload + testb %al, %al + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB85_4 +# BB#3: + movl 68(%esp), %ecx # 4-byte Reload +.LBB85_4: + movl 108(%esp), %eax + movl %ecx, (%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB85_6 +# BB#5: + movl %ebp, %esi +.LBB85_6: + movl %esi, 8(%eax) + jne .LBB85_8 +# BB#7: + movl 76(%esp), %ecx # 4-byte Reload +.LBB85_8: + movl %ecx, 12(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB85_10 +# BB#9: + movl %ebx, %ecx +.LBB85_10: + movl %ecx, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB85_12 +# BB#11: + movl %edi, %ecx +.LBB85_12: + movl %ecx, 20(%eax) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end85: + .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2 + + .globl mcl_fp_addPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre6Lbmi2,@function +mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2 +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 4(%eax), %ecx + adcl 4(%edx), %ecx + movl %ecx, 4(%esi) + movl 8(%eax), %ecx + adcl 8(%edx), %ecx + movl %ecx, 8(%esi) + movl 12(%edx), %ecx + adcl 12(%eax), %ecx + movl %ecx, 12(%esi) + movl 16(%edx), %ecx + adcl 16(%eax), %ecx + movl %ecx, 16(%esi) + movl 20(%eax), %eax + movl 20(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 20(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end86: + .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2 + + .globl mcl_fp_subPre6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre6Lbmi2,@function +mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2 +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%ecx), %edx + sbbl 16(%esi), %edx + movl %edx, 16(%edi) + movl 20(%esi), %edx + movl 20(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 20(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end87: + .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2 + + .globl mcl_fp_shr1_6Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_6Lbmi2,@function +mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl 20(%eax), %ecx + movl 16(%eax), %edx + movl 12(%eax), %esi + movl 8(%eax), %edi + movl (%eax), %ebx + movl 4(%eax), %eax + shrdl $1, %eax, %ebx + movl 20(%esp), %ebp + movl %ebx, (%ebp) + shrdl $1, %edi, %eax + movl %eax, 4(%ebp) + shrdl $1, %esi, %edi + movl %edi, 8(%ebp) + shrdl $1, %edx, %esi + movl %esi, 12(%ebp) + shrdl $1, %ecx, %edx + movl %edx, 16(%ebp) + shrl %ecx + movl %ecx, 20(%ebp) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end88: + .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2 + + .globl mcl_fp_add6Lbmi2 + .align 16, 0x90 + .type mcl_fp_add6Lbmi2,@function +mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ebp + movl 36(%esp), %ebx + addl (%ebx), %edx + adcl 4(%ebx), %ebp + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %esi + movl 12(%ebx), %ecx + movl 16(%ebx), %edi + adcl 12(%eax), %ecx + adcl 16(%eax), %edi + movl 20(%ebx), %ebx + adcl 20(%eax), %ebx + movl 32(%esp), %eax + movl %edx, (%eax) + movl %ebp, 4(%eax) + movl %esi, 8(%eax) + movl %ecx, 12(%eax) + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + sbbl %eax, %eax + andl $1, %eax + movl 44(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + movl 44(%esp), %esi + sbbl 4(%esi), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + sbbl 8(%esi), %edx + sbbl 12(%esi), %ebp + sbbl 16(%esi), %edi + sbbl 20(%esi), %ebx + sbbl $0, %eax + testb $1, %al + jne .LBB89_2 +# BB#1: # %nocarry + movl (%esp), %eax # 4-byte Reload + movl 32(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 4(%ecx) + movl %edx, 8(%ecx) + movl %ebp, 12(%ecx) + movl %edi, 16(%ecx) + movl %ebx, 20(%ecx) +.LBB89_2: # %carry + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end89: + .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2 + + .globl mcl_fp_addNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF6Lbmi2,@function +mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 64(%esp), %ebp + addl (%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edx, %ebx + adcl 4(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl 16(%eax), %esi + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 12(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 72(%esp), %ebx + subl (%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl %ecx, %ebp + movl 72(%esp), %ecx + sbbl 4(%ecx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl %esi, %edi + sbbl 16(%ecx), %edi + movl %edx, %esi + sbbl 20(%ecx), %esi + movl %esi, %ebx + sarl $31, %ebx + testl %ebx, %ebx + js .LBB90_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB90_2: + movl 60(%esp), %ebx + movl %eax, (%ebx) + movl 20(%esp), %ecx # 4-byte Reload + js .LBB90_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB90_4: + movl %ecx, 4(%ebx) + movl 36(%esp), %eax # 4-byte Reload + movl 28(%esp), %edx # 4-byte Reload + movl 24(%esp), %ecx # 4-byte Reload + js .LBB90_6 +# BB#5: + movl 8(%esp), %ecx # 4-byte Reload +.LBB90_6: + movl %ecx, 8(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + js .LBB90_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB90_8: + movl %edx, 12(%ebx) + js .LBB90_10 +# BB#9: + movl %edi, %ecx +.LBB90_10: + movl %ecx, 16(%ebx) + js .LBB90_12 +# BB#11: + movl %esi, %eax +.LBB90_12: + movl %eax, 20(%ebx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end90: + .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2 + + .globl mcl_fp_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub6Lbmi2,@function +mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %ebx + movl (%ebx), %esi + movl 4(%ebx), %edi + movl 44(%esp), %ecx + subl (%ecx), %esi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl 8(%ebx), %eax + sbbl 8(%ecx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %eax + sbbl 12(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 16(%ebx), %ebp + sbbl 16(%ecx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 20(%ebx), %edx + sbbl 20(%ecx), %edx + movl $0, %ecx + sbbl $0, %ecx + testb $1, %cl + movl 36(%esp), %ebx + movl %esi, (%ebx) + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl %ebp, 16(%ebx) + movl %edx, 20(%ebx) + je .LBB91_2 +# BB#1: # %carry + movl 48(%esp), %ecx + addl (%ecx), %esi + movl %esi, (%ebx) + movl (%esp), %eax # 4-byte Reload + adcl 4(%ecx), %eax + adcl 8(%ecx), %edi + movl %eax, 4(%ebx) + movl 12(%ecx), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl 16(%ecx), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ebx) + movl 20(%ecx), %eax + adcl %edx, %eax + movl %eax, 20(%ebx) +.LBB91_2: # %nocarry + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end91: + .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2 + + .globl mcl_fp_subNF6Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF6Lbmi2,@function +mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %ebx + movl 20(%ebx), %esi + movl (%ebx), %ecx + movl 4(%ebx), %eax + movl 52(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl 12(%ebx), %ecx + movl 8(%ebx), %edx + sbbl 8(%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %edx + sbbl 20(%ebp), %edx + movl %edx, (%esp) # 4-byte Spill + movl %edx, %ebp + sarl $31, %ebp + movl %ebp, %ecx + addl %ecx, %ecx + movl %ebp, %eax + adcl %eax, %eax + shrl $31, %edx + orl %ecx, %edx + movl 56(%esp), %ebx + andl 4(%ebx), %eax + andl (%ebx), %edx + movl 20(%ebx), %edi + andl %ebp, %edi + movl 16(%ebx), %esi + andl %ebp, %esi + movl 12(%ebx), %ecx + andl %ebp, %ecx + andl 8(%ebx), %ebp + addl 8(%esp), %edx # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %ebx + movl %edx, (%ebx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %eax, 4(%ebx) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%ebx) + movl %esi, 16(%ebx) + adcl (%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%ebx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end92: + .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2 + + .globl mcl_fpDbl_add6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add6Lbmi2,@function +mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %edx + movl 60(%esp), %ecx + movl 12(%ecx), %esi + movl 16(%ecx), %eax + movl 8(%edx), %edi + movl (%edx), %ebx + addl (%ecx), %ebx + movl 56(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%edx), %ebx + adcl 4(%ecx), %ebx + adcl 8(%ecx), %edi + adcl 12(%edx), %esi + adcl 16(%edx), %eax + movl %ebx, 4(%ebp) + movl %edx, %ebx + movl 32(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, 8(%ebp) + movl 20(%ebx), %edi + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + adcl %edi, %esi + movl 24(%ebx), %edi + movl %eax, 16(%ebp) + movl 24(%ecx), %edx + adcl %edi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 28(%ebx), %edi + movl %esi, 20(%ebp) + movl 28(%ecx), %eax + adcl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + movl 36(%ebx), %esi + movl %ebx, %edi + movl 36(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%edi), %esi + movl 40(%ecx), %edi + adcl %esi, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 64(%esp), %esi + movl 44(%esi), %esi + movl 44(%ecx), %ecx + adcl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %esi + subl (%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 68(%esp), %edx + sbbl 4(%edx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + sbbl 12(%edx), %ebp + movl %edi, %ebx + movl 12(%esp), %edi # 4-byte Reload + sbbl 16(%edx), %ebx + movl %edi, %eax + sbbl 20(%edx), %eax + sbbl $0, %ecx + andl $1, %ecx + jne .LBB93_2 +# BB#1: + movl %eax, %edi +.LBB93_2: + testb %cl, %cl + movl 20(%esp), %ecx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload + jne .LBB93_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %ecx # 4-byte Reload +.LBB93_4: + movl 56(%esp), %eax + movl %ecx, 24(%eax) + movl %edx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl 24(%esp), %edx # 4-byte Reload + jne .LBB93_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB93_6: + movl %edx, 32(%eax) + movl 28(%esp), %edx # 4-byte Reload + jne .LBB93_8 +# BB#7: + movl %ebp, %edx +.LBB93_8: + movl %edx, 36(%eax) + jne .LBB93_10 +# BB#9: + movl %ebx, %ecx +.LBB93_10: + movl %ecx, 40(%eax) + movl %edi, 44(%eax) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end93: + .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2 + + .globl mcl_fpDbl_sub6Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub6Lbmi2,@function +mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 52(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 44(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 28(%esi), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %edi + movl 32(%edx), %eax + sbbl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 36(%esi), %edi + movl 36(%edx), %eax + sbbl %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%esi), %edi + movl 40(%edx), %eax + sbbl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 44(%esi), %esi + movl 44(%edx), %eax + sbbl %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl $0, %ebx + sbbl $0, %ebx + andl $1, %ebx + movl 56(%esp), %eax + jne .LBB94_1 +# BB#2: + xorl %edx, %edx + jmp .LBB94_3 +.LBB94_1: + movl 20(%eax), %edx +.LBB94_3: + testb %bl, %bl + jne .LBB94_4 +# BB#5: + movl $0, %esi + movl $0, %edi + jmp .LBB94_6 +.LBB94_4: + movl (%eax), %edi + movl 4(%eax), %esi +.LBB94_6: + jne .LBB94_7 +# BB#8: + movl $0, %ebx + jmp .LBB94_9 +.LBB94_7: + movl 16(%eax), %ebx +.LBB94_9: + jne .LBB94_10 +# BB#11: + movl $0, %ebp + jmp .LBB94_12 +.LBB94_10: + movl 12(%eax), %ebp +.LBB94_12: + jne .LBB94_13 +# BB#14: + xorl %eax, %eax + jmp .LBB94_15 +.LBB94_13: + movl 8(%eax), %eax +.LBB94_15: + addl 8(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + movl %edi, 24(%ecx) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %esi, 28(%ecx) + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + movl %ebx, 40(%ecx) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%ecx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end94: + .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2 + + .globl mcl_fp_mulUnitPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre7Lbmi2,@function +mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 44(%esp), %edx + movl 40(%esp), %edi + mulxl 4(%edi), %ecx, %esi + mulxl (%edi), %ebx, %eax + movl %ebx, 12(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + mulxl 8(%edi), %ecx, %eax + adcl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + mulxl 12(%edi), %ebx, %ecx + adcl %eax, %ebx + mulxl 16(%edi), %esi, %ebp + adcl %ecx, %esi + mulxl 20(%edi), %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + adcl %ebp, %ecx + mulxl 24(%edi), %edx, %edi + movl 36(%esp), %eax + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + movl 4(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl %ebx, 12(%eax) + movl %esi, 16(%eax) + movl %ecx, 20(%eax) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%eax) + adcl $0, %edi + movl %edi, 28(%eax) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end95: + .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2 + + .globl mcl_fpDbl_mulPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre7Lbmi2,@function +mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %eax + movl (%eax), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edi + movl 100(%esp), %eax + movl (%eax), %ebp + mulxl %ebp, %ecx, %eax + movl %esi, %edx + mulxl %ebp, %edx, %esi + movl %edx, 40(%esp) # 4-byte Spill + addl %ecx, %esi + movl 8(%edi), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %edi, %ebx + mulxl %ebp, %edi, %ecx + adcl %eax, %edi + movl 12(%ebx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mulxl %ebp, %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl 20(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebp, %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl 24(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ebp, %eax, %edx + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + adcl $0, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 4(%eax), %eax + movl 68(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %edi, %esi + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl %ebx, %edi + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 40(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 60(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl %ebp, 64(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + movl %edx, 4(%eax) + movl 96(%esp), %ecx + movl (%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 8(%eax), %eax + mulxl %eax, %edx, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %eax, %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, %edi + movl 12(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 8(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 16(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl 20(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %eax, %ebp, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 24(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mulxl %eax, %ecx, %edx + adcl 24(%esp), %ecx # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + movl 20(%esp), %eax # 4-byte Reload + addl %eax, 68(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 60(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl %edx, %esi + movl 92(%esp), %eax + movl 32(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + movl 100(%esp), %eax + movl 12(%eax), %eax + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 32(%esp) # 4-byte Spill + addl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl %ecx, %ebp + movl 28(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %eax + adcl %esi, %ecx + movl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 32(%esp), %esi # 4-byte Reload + addl %esi, 64(%esp) # 4-byte Folded Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 48(%esp), %esi # 4-byte Reload + adcl %esi, 68(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 96(%esp), %ecx + movl (%ecx), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 16(%eax), %esi + mulxl %esi, %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + addl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %edi + movl 8(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %esi, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 16(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %esi, %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl 20(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %esi, %edx, %eax + movl %eax, 4(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 24(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %esi, %ebp, %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + movl %edi, %esi + addl 28(%esp), %esi # 4-byte Folded Reload + movl 12(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 68(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 16(%eax) + movl 100(%esp), %eax + movl 20(%eax), %eax + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 36(%esp) # 4-byte Spill + addl %esi, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %esi, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl %edi, %esi + movl 56(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl %ebp, %edx + movl %edx, %ebp + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + sbbl %edx, %edx + andl $1, %edx + addl 36(%esp), %esi # 4-byte Folded Reload + movl 20(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl 56(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 60(%esp), %edx # 4-byte Reload + movl %edx, 20(%eax) + movl 100(%esp), %eax + movl 24(%eax), %edx + movl 96(%esp), %eax + mulxl (%eax), %ebp, %edi + movl %edi, 60(%esp) # 4-byte Spill + addl %esi, %ebp + movl %ebp, 64(%esp) # 4-byte Spill + mulxl 4(%eax), %esi, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl %ecx, %esi + movl %esi, %ebp + mulxl 8(%eax), %ecx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl %ebx, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 12(%eax), %ebx, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + mulxl 16(%eax), %edi, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + mulxl 20(%eax), %esi, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl 24(%eax), %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 68(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 92(%esp), %eax + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl %ebx, 36(%eax) + movl %edi, 40(%eax) + movl %esi, 44(%eax) + movl %edx, 48(%eax) + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%eax) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end96: + .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2 + + .globl mcl_fpDbl_sqrPre7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre7Lbmi2,@function +mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 104(%esp), %ecx + movl (%ecx), %ebx + movl 4(%ecx), %eax + movl %eax, %edx + mulxl %ebx, %esi, %edi + movl %esi, 56(%esp) # 4-byte Spill + movl %edi, 76(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl %ebx, %ebp, %edx + movl %ebp, 44(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ebx, %edx, %esi + adcl %edi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %ebx, %edi, %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl %esi, %edi + movl 16(%ecx), %edx + movl %edx, 60(%esp) # 4-byte Spill + mulxl %ebx, %esi, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl 20(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebx, %edx, %ebp + movl %ebp, 36(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 24(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebx, %ecx, %ebx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + movl 100(%esp), %ecx + movl 44(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + adcl $0, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + addl %edx, 72(%esp) # 4-byte Folded Spill + movl %eax, %edx + mulxl %eax, %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + mulxl %eax, %ebp, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %ebp + movl 64(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl %esi, %edi + movl 60(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %ecx, %eax + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + addl 76(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl %esi, 68(%esp) # 4-byte Folded Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 104(%esp), %esi + movl (%esi), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 8(%esi), %ecx + mulxl %ecx, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 4(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %esi, %ebx + mulxl %ecx, %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl %ecx, %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, %edx + mulxl %ecx, %edi, %edx + movl %edi, 32(%esp) # 4-byte Spill + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl %edi, %edx + movl %edx, %esi + movl 16(%ebx), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ecx, %edx, %edi + movl %edi, 76(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 20(%ebx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebx, %ebp + mulxl %ecx, %ebx, %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl 24(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + movl 24(%esp), %edx # 4-byte Reload + addl %edx, 64(%esp) # 4-byte Folded Spill + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 60(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 76(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + addl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %eax, %edx, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + mulxl %eax, %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl %ecx, %edx + movl %edx, %esi + movl 36(%esp), %edx # 4-byte Reload + mulxl %eax, %edi, %eax + adcl %ebp, %edi + movl %edi, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 52(%esp), %edi # 4-byte Reload + addl %edi, 68(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 28(%esp), %ebx # 4-byte Reload + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %ebp # 4-byte Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 64(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 104(%esp), %ecx + movl (%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 16(%ecx), %eax + mulxl %eax, %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + addl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + adcl %edi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%ecx), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %eax, %edx, %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 12(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %esi + mulxl %eax, %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %eax, %edx + mulxl %eax, %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 20(%esi), %ecx + movl %ecx, %edx + mulxl %eax, %edx, %ebp + movl %edx, 32(%esp) # 4-byte Spill + movl %ebp, 36(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl %edx, %edi + movl 24(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + mulxl %eax, %esi, %eax + movl %eax, (%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + sbbl %ebx, %ebx + andl $1, %ebx + movl 12(%esp), %eax # 4-byte Reload + addl 24(%esp), %eax # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, 72(%esp) # 4-byte Folded Spill + movl 16(%esp), %edx # 4-byte Reload + adcl %edx, 64(%esp) # 4-byte Folded Spill + movl 8(%esp), %edx # 4-byte Reload + adcl %edx, 68(%esp) # 4-byte Folded Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl %ebp, %esi + adcl (%esp), %ebx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + mulxl %ecx, %edx, %edi + movl %edi, 28(%esp) # 4-byte Spill + addl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + mulxl %ecx, %ebp, %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + mulxl %ecx, %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + mulxl %ecx, %edi, %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edx + mulxl %ecx, %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl %esi, %edx + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + mulxl %ecx, %ecx, %edx + adcl %ebx, %ecx + movl %ecx, %ebx + sbbl %ecx, %ecx + andl $1, %ecx + addl 28(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl %esi, 72(%esp) # 4-byte Folded Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + adcl %edx, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 104(%esp), %eax + movl 24(%eax), %edx + mulxl (%eax), %ecx, %ebx + movl %ebx, 64(%esp) # 4-byte Spill + addl %ebp, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 4(%eax), %ecx, %ebx + movl %ebx, 60(%esp) # 4-byte Spill + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + mulxl 8(%eax), %ecx, %ebx + movl %ebx, 72(%esp) # 4-byte Spill + adcl %edi, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + mulxl 12(%eax), %ebx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl %esi, %ebx + mulxl 16(%eax), %edi, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + mulxl 20(%eax), %esi, %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl %edx, %edx, %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 76(%esp) # 4-byte Folded Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl 100(%esp), %eax + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl %ebx, 36(%eax) + movl %edi, 40(%eax) + movl %esi, 44(%eax) + movl %edx, 48(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%eax) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2 + + .globl mcl_fp_mont7Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont7Lbmi2,@function +mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %eax + movl 24(%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 144(%esp), %ecx + movl (%ecx), %ecx + mulxl %ecx, %edx, %esi + movl %edx, 112(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl %edx, 84(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edi, 108(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 80(%esp) # 4-byte Spill + mulxl %ecx, %edx, %ebx + movl %edx, 104(%esp) # 4-byte Spill + movl 8(%eax), %edx + movl %edx, 68(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edi, 96(%esp) # 4-byte Spill + movl %edx, 100(%esp) # 4-byte Spill + movl (%eax), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ecx, %edi, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ebp, %edx + mulxl %ecx, %ebp, %edx + addl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl %edx, 48(%esp) # 4-byte Folded Spill + movl 12(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 108(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 148(%esp), %ebx + movl -4(%ebx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ebp, %edx + imull %eax, %edx + movl (%ebx), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + mulxl %edi, %edi, %eax + movl %edi, 8(%esp) # 4-byte Spill + addl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%ebx), %esi + movl %esi, 104(%esp) # 4-byte Spill + mulxl %esi, %eax, %esi + adcl %ecx, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + adcl %esi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 16(%ebx), %esi + movl %esi, 96(%esp) # 4-byte Spill + mulxl %esi, %eax, %esi + adcl %ecx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%ebx), %eax + movl %eax, 92(%esp) # 4-byte Spill + mulxl %eax, %eax, %edi + adcl %esi, %eax + movl %eax, %ecx + movl 24(%ebx), %eax + movl %eax, 88(%esp) # 4-byte Spill + mulxl %eax, %edx, %eax + adcl %edi, %edx + adcl $0, %eax + addl %ebp, 8(%esp) # 4-byte Folded Spill + movl 44(%esp), %esi # 4-byte Reload + adcl %esi, 40(%esp) # 4-byte Folded Spill + movl 48(%esp), %esi # 4-byte Reload + adcl %esi, 32(%esp) # 4-byte Folded Spill + movl 12(%esp), %esi # 4-byte Reload + adcl %esi, 28(%esp) # 4-byte Folded Spill + movl 16(%esp), %esi # 4-byte Reload + adcl %esi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, (%esp) # 4-byte Spill + movl 144(%esp), %edx + movl 4(%edx), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + mulxl 84(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload + mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + addl %ebx, %edi + movl %edi, 52(%esp) # 4-byte Spill + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + adcl %esi, %edi + movl %edi, 48(%esp) # 4-byte Spill + mulxl 64(%esp), %ebp, %ebx # 4-byte Folded Reload + adcl %eax, %ebp + mulxl 80(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + adcl %ecx, %eax + movl %eax, %ecx + movl 44(%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 36(%esp), %eax # 4-byte Reload + addl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + sbbl %ecx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl %eax, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, %edi + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 20(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + movl 12(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 8(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + addl %ecx, %edi + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl %edi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 44(%esp), %ecx # 4-byte Folded Reload + movl 16(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 12(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 16(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %esi, %ecx + movl %ecx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + adcl %ebp, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 28(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %esi, %ecx + movl %ecx, %esi + mulxl 88(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + movl %edx, %ebp + adcl $0, %ecx + movl %ecx, %edx + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 8(%esp), %ecx # 4-byte Reload + addl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl %ebx, 20(%esp) # 4-byte Folded Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl 20(%eax), %edx + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + addl %ecx, %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebp, %ecx + movl %ecx, %ebp + mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + adcl %esi, %ecx + movl %ecx, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl (%esp), %edx # 4-byte Folded Reload + movl 4(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + adcl %edi, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 40(%esp) # 4-byte Spill + mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload + adcl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, %edi + mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + mulxl 92(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %esi + mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, %ebx + adcl $0, %eax + movl %eax, %ecx + movl 44(%esp), %edx # 4-byte Reload + andl $1, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 36(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 144(%esp), %edx + movl 24(%edx), %edx + mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + addl %ebx, %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload + adcl %esi, %ebp + mulxl 64(%esp), %eax, %esi # 4-byte Folded Reload + adcl %edi, %eax + movl %eax, 68(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 84(%esp) # 4-byte Spill + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %esi, %ebx + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + adcl %ecx, %eax + movl %eax, %ecx + movl 72(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 60(%esp), %edi # 4-byte Reload + addl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 68(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + imull %edi, %edx + mulxl 108(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + mulxl 112(%esp), %ecx, %esi # 4-byte Folded Reload + addl %eax, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl 104(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl 100(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %edi, %ecx + movl %edx, %edi + mulxl 96(%esp), %ebx, %ebp # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebp, %esi + movl %edi, %edx + mulxl 88(%esp), %edi, %ebp # 4-byte Folded Reload + adcl %eax, %edi + adcl $0, %ebp + andl $1, 64(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + addl 60(%esp), %eax # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl 64(%esp), %eax # 4-byte Reload + adcl $0, %eax + subl 108(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + sbbl 112(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + sbbl 104(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + sbbl 100(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl %esi, %ebx + sbbl 96(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + movl %edi, %ebx + sbbl 92(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + movl %ebp, %ebx + sbbl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + movl %eax, %ecx + jne .LBB98_2 +# BB#1: + movl 60(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill +.LBB98_2: + movl 136(%esp), %ebx + movl 80(%esp), %edx # 4-byte Reload + movl %edx, (%ebx) + movl %ebx, %edx + testb %cl, %cl + movl 84(%esp), %ebx # 4-byte Reload + jne .LBB98_4 +# BB#3: + movl 64(%esp), %ebx # 4-byte Reload +.LBB98_4: + movl %ebx, 4(%edx) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB98_6 +# BB#5: + movl 72(%esp), %ecx # 4-byte Reload +.LBB98_6: + movl %ecx, 8(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB98_8 +# BB#7: + movl 100(%esp), %eax # 4-byte Reload +.LBB98_8: + movl %eax, 12(%edx) + jne .LBB98_10 +# BB#9: + movl 104(%esp), %esi # 4-byte Reload +.LBB98_10: + movl %esi, 16(%edx) + jne .LBB98_12 +# BB#11: + movl 108(%esp), %edi # 4-byte Reload +.LBB98_12: + movl %edi, 20(%edx) + jne .LBB98_14 +# BB#13: + movl 112(%esp), %ebp # 4-byte Reload +.LBB98_14: + movl %ebp, 24(%edx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end98: + .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2 + + .globl mcl_fp_montNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF7Lbmi2,@function +mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %eax + movl (%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 132(%esp), %ecx + movl (%ecx), %ebp + mulxl %ebp, %ecx, %esi + movl %edi, %edx + mulxl %ebp, %edi, %edx + movl %edi, 96(%esp) # 4-byte Spill + addl %ecx, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 8(%eax), %edx + movl %edx, 60(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %edi + adcl %esi, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + mulxl %ebp, %ecx, %ebx + adcl %edi, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + mulxl %ebp, %edx, %ecx + adcl %ebx, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + mulxl %ebp, %edx, %esi + adcl %ecx, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + mulxl %ebp, %ebp, %eax + adcl %esi, %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 136(%esp), %edi + movl -4(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + movl %esi, %edx + imull %eax, %edx + movl (%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %eax + movl %eax, 28(%esp) # 4-byte Spill + addl %esi, %ecx + movl 4(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + mulxl %ecx, %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl 84(%esp), %esi # 4-byte Folded Reload + movl 16(%edi), %eax + movl %eax, 84(%esp) # 4-byte Spill + mulxl %eax, %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + movl 20(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + mulxl %eax, %eax, %ebx + movl %ebx, 8(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebx + movl 24(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + mulxl %eax, %edx, %eax + adcl %ebp, %edx + movl %edx, %edi + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + movl 28(%esp), %ebp # 4-byte Reload + addl %ebp, 36(%esp) # 4-byte Folded Spill + movl 24(%esp), %ebp # 4-byte Reload + adcl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 4(%eax), %edx + mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload + mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, %ecx + mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload + adcl %edi, %esi + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 28(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ebp, %eax + movl %eax, %ebx + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %edi, %ebp + adcl $0, %eax + movl %eax, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 36(%esp), %eax # 4-byte Folded Reload + movl 4(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %edx + movl %eax, %ebx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 32(%esp) # 4-byte Spill + mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 40(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl 36(%esp), %ebp # 4-byte Reload + addl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl %edx, %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 8(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + addl %ecx, %edi + mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + mulxl 56(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 48(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 4(%esp) # 4-byte Spill + adcl %ecx, %eax + movl %eax, %ebp + mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 36(%esp), %ecx # 4-byte Reload + addl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl 8(%esp), %ecx # 4-byte Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + movl %ebx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %ebp, %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl %edi, %ebp + mulxl 92(%esp), %ebx, %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl %ecx, %ebx + mulxl 88(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 84(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, (%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 12(%eax), %edx + mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %ecx # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl %esi, %ecx + mulxl 60(%esp), %esi, %edi # 4-byte Folded Reload + adcl %eax, %esi + mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload + adcl %edi, %eax + movl %eax, %edi + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, 24(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %ebx + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + addl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + adcl %edx, 24(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + movl %edi, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + addl %edi, %eax + mulxl 96(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, %esi + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload + movl %ebx, (%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 36(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + addl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + movl 12(%esp), %esi # 4-byte Reload + adcl %esi, 28(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl 4(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %edx, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 16(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + addl %ecx, %edi + mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload + adcl %ebx, %eax + movl %eax, %ebx + mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, %esi + mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl $0, %eax + movl %eax, %edx + movl 32(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + addl %ebx, %eax + mulxl 96(%esp), %ebx, %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl %edi, %ebx + mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 84(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + mulxl 80(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload + adcl %ebp, %eax + movl 36(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload + adcl %ecx, 28(%esp) # 4-byte Folded Spill + movl 4(%esp), %ecx # 4-byte Reload + adcl %ecx, 24(%esp) # 4-byte Folded Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 20(%eax), %edx + mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 68(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + addl %ecx, %esi + mulxl 60(%esp), %ebp, %ecx # 4-byte Folded Reload + adcl %eax, %ebp + mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload + adcl %ecx, %eax + movl %eax, 40(%esp) # 4-byte Spill + mulxl 52(%esp), %ecx, %ebx # 4-byte Folded Reload + adcl %edi, %ecx + mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl %ebx, %eax + movl %eax, %edi + mulxl 44(%esp), %ebx, %eax # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl $0, %eax + movl %eax, %edx + movl 32(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, (%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, %edx + imull 72(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + addl %ecx, %eax + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl %ebp, %esi + mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 84(%esp), %eax, %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl (%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + mulxl 76(%esp), %edi, %edx # 4-byte Folded Reload + adcl %ebx, %edi + movl %edi, %ebx + movl 36(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 28(%esp), %ebp # 4-byte Reload + addl %ebp, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + adcl %edx, %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 24(%eax), %edx + mulxl 64(%esp), %edi, %ebx # 4-byte Folded Reload + mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + addl %edi, %ebp + mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload + adcl %eax, %ebx + mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload + adcl %ecx, %esi + mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, %ecx + mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + movl 64(%esp), %edi # 4-byte Reload + addl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + movl 64(%esp), %eax # 4-byte Reload + imull %eax, %edx + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + addl %eax, %esi + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %edx, %ecx + mulxl 92(%esp), %eax, %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %edx + mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %ecx, %edx + mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ecx, %edx + mulxl 76(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl 36(%esp), %edx # 4-byte Reload + addl 56(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl %edx, %ecx + subl 100(%esp), %ecx # 4-byte Folded Reload + sbbl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + sbbl 88(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + sbbl 84(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + sbbl 80(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + sbbl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + sarl $31, %eax + testl %eax, %eax + js .LBB99_2 +# BB#1: + movl %ecx, %edx +.LBB99_2: + movl 124(%esp), %esi + movl %edx, (%esi) + movl 72(%esp), %eax # 4-byte Reload + js .LBB99_4 +# BB#3: + movl 52(%esp), %eax # 4-byte Reload +.LBB99_4: + movl %eax, 4(%esi) + movl 68(%esp), %eax # 4-byte Reload + movl 64(%esp), %ecx # 4-byte Reload + movl 60(%esp), %edx # 4-byte Reload + js .LBB99_6 +# BB#5: + movl 92(%esp), %ebp # 4-byte Reload +.LBB99_6: + movl %ebp, 8(%esi) + movl %esi, %ebp + movl 56(%esp), %esi # 4-byte Reload + js .LBB99_8 +# BB#7: + movl 96(%esp), %esi # 4-byte Reload +.LBB99_8: + movl %esi, 12(%ebp) + js .LBB99_10 +# BB#9: + movl 100(%esp), %edx # 4-byte Reload +.LBB99_10: + movl %edx, 16(%ebp) + js .LBB99_12 +# BB#11: + movl %ebx, %ecx +.LBB99_12: + movl %ecx, 20(%ebp) + js .LBB99_14 +# BB#13: + movl %edi, %eax +.LBB99_14: + movl %eax, 24(%ebp) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end99: + .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2 + + .globl mcl_fp_montRed7Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed7Lbmi2,@function +mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 136(%esp), %edi + movl -4(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl (%edi), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl (%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + imull %ecx, %edx + movl 24(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 68(%esp) # 4-byte Spill + movl %ecx, 44(%esp) # 4-byte Spill + movl 20(%edi), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ecx, 40(%esp) # 4-byte Spill + movl 16(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + mulxl %ecx, %ebx, %ecx + movl %ebx, 56(%esp) # 4-byte Spill + movl %ecx, 36(%esp) # 4-byte Spill + movl 4(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + mulxl %ecx, %ecx, %ebp + mulxl %esi, %ebx, %esi + movl %ebx, 64(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 8(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + mulxl %ecx, %esi, %ecx + adcl %ebp, %esi + movl %esi, %ebp + movl 12(%edi), %esi + movl %esi, 84(%esp) # 4-byte Spill + mulxl %esi, %esi, %edx + adcl %ecx, %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, %edi + movl 36(%esp), %esi # 4-byte Reload + adcl 60(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl 64(%esp), %ebx # 4-byte Reload + addl 72(%esp), %ebx # 4-byte Folded Reload + movl 28(%esp), %ebx # 4-byte Reload + adcl 4(%eax), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + adcl 8(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 12(%eax), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 16(%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 24(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 28(%eax), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%eax), %ecx + movl 48(%eax), %edx + movl 44(%eax), %esi + movl 40(%eax), %edi + movl 36(%eax), %ebp + movl 32(%eax), %eax + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 100(%esp), %eax, %ebx # 4-byte Folded Reload + movl %eax, 4(%esp) # 4-byte Spill + mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload + movl %eax, (%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload + mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + addl %ecx, %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl %ebp, %eax + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %esi, %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + mulxl 104(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %ebx, %ecx + mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %edi, %ebx + adcl $0, %edx + movl 8(%esp), %eax # 4-byte Reload + addl 28(%esp), %eax # 4-byte Folded Reload + movl 32(%esp), %edi # 4-byte Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %edi, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + mulxl 84(%esp), %ebx, %ebp # 4-byte Folded Reload + mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload + mulxl 92(%esp), %ecx, %edi # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + addl %eax, %edi + movl %edi, 8(%esp) # 4-byte Spill + mulxl 88(%esp), %edi, %eax # 4-byte Folded Reload + adcl %esi, %edi + adcl %ebx, %eax + movl %eax, %ebx + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebp, %esi + mulxl 104(%esp), %edx, %ecx # 4-byte Folded Reload + adcl %eax, %edx + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 12(%esp), %ebp # 4-byte Reload + addl 32(%esp), %ebp # 4-byte Folded Reload + movl 8(%esp), %ebp # 4-byte Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %ebx # 4-byte Folded Reload + mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %eax, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + movl %esi, %ebx + adcl %edi, %eax + movl %eax, %edi + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, %ecx + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + movl 48(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ebp, 20(%esp) # 4-byte Folded Spill + movl 16(%esp), %ebp # 4-byte Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + movl %ebp, %edi + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + mulxl 84(%esp), %eax, %ebx # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload + mulxl 92(%esp), %esi, %ebp # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %eax, %ebp + mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ecx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %ecx + mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload + adcl %ebx, %esi + mulxl 104(%esp), %ebx, %edx # 4-byte Folded Reload + adcl %eax, %ebx + adcl 24(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edi, 20(%esp) # 4-byte Folded Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %edi # 4-byte Reload + adcl %edi, 44(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %edx + imull 76(%esp), %edx # 4-byte Folded Reload + mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + mulxl 84(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + mulxl 96(%esp), %ecx, %edi # 4-byte Folded Reload + mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + addl %ecx, %eax + movl %eax, 52(%esp) # 4-byte Spill + mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload + adcl %edi, %ecx + movl %ecx, %edi + adcl %ebx, %eax + movl %eax, %ebx + mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload + adcl %ecx, %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl %ebp, 20(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl %esi, 48(%esp) # 4-byte Folded Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl 76(%esp), %edx # 4-byte Reload + imull %eax, %edx + mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload + addl %ecx, %eax + movl %eax, 56(%esp) # 4-byte Spill + mulxl 88(%esp), %eax, %edi # 4-byte Folded Reload + adcl %esi, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %esi + mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload + adcl %edi, %ebp + mulxl 100(%esp), %ecx, %edi # 4-byte Folded Reload + adcl %eax, %ecx + mulxl 104(%esp), %ebx, %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl %edi, %ebx + mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + movl 64(%esp), %edx # 4-byte Reload + addl 52(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl 60(%esp), %edi # 4-byte Folded Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl 68(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %edx, %ebp + subl 92(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + sbbl 96(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + sbbl 88(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 92(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 100(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + movl %edi, %ecx + sbbl 104(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl %eax, %ecx + sbbl 80(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + sbbl $0, %esi + andl $1, %esi + jne .LBB100_2 +# BB#1: + movl 68(%esp), %ebp # 4-byte Reload +.LBB100_2: + movl 128(%esp), %edx + movl %ebp, (%edx) + movl %esi, %eax + testb %al, %al + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB100_4 +# BB#3: + movl 72(%esp), %ebp # 4-byte Reload +.LBB100_4: + movl %ebp, 4(%edx) + movl %ecx, %eax + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB100_6 +# BB#5: + movl 88(%esp), %ecx # 4-byte Reload +.LBB100_6: + movl %ecx, 8(%edx) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB100_8 +# BB#7: + movl 92(%esp), %ecx # 4-byte Reload +.LBB100_8: + movl %ecx, 12(%edx) + jne .LBB100_10 +# BB#9: + movl 96(%esp), %ebx # 4-byte Reload +.LBB100_10: + movl %ebx, 16(%edx) + jne .LBB100_12 +# BB#11: + movl 104(%esp), %edi # 4-byte Reload +.LBB100_12: + movl %edi, 20(%edx) + jne .LBB100_14 +# BB#13: + movl 100(%esp), %eax # 4-byte Reload +.LBB100_14: + movl %eax, 24(%edx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end100: + .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2 + + .globl mcl_fp_addPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre7Lbmi2,@function +mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl 24(%eax), %eax + movl 24(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 24(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end101: + .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2 + + .globl mcl_fp_subPre7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre7Lbmi2,@function +mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl %esi, 16(%ebp) + movl %edx, 20(%ebp) + movl 24(%edi), %edx + movl 24(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 24(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end102: + .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2 + + .globl mcl_fp_shr1_7Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_7Lbmi2,@function +mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 20(%esi) + shrl %eax + movl %eax, 24(%esi) + popl %esi + retl +.Lfunc_end103: + .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2 + + .globl mcl_fp_add7Lbmi2 + .align 16, 0x90 + .type mcl_fp_add7Lbmi2,@function +mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %ebp + movl (%ebp), %eax + movl 4(%ebp), %edi + movl 44(%esp), %ecx + addl (%ecx), %eax + adcl 4(%ecx), %edi + movl 8(%ebp), %esi + adcl 8(%ecx), %esi + movl 12(%ecx), %edx + movl 16(%ecx), %ebx + adcl 12(%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl 16(%ebp), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 20(%ecx), %ebp + adcl 20(%ebx), %ebp + movl 24(%ecx), %edx + adcl 24(%ebx), %edx + movl 40(%esp), %ecx + movl %eax, (%ecx) + movl %edi, 4(%ecx) + movl %esi, 8(%ecx) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%ecx) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%ecx) + movl %ebp, 20(%ecx) + movl %edx, 24(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %ecx + subl (%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 12(%esp), %ecx # 4-byte Reload + movl 52(%esp), %eax + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %eax, %edi + sbbl 8(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, %esi + sbbl 20(%edi), %ebp + sbbl 24(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB104_2 +# BB#1: # %nocarry + movl 8(%esp), %ecx # 4-byte Reload + movl 40(%esp), %eax + movl %eax, %ebx + movl %ecx, (%ebx) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl %esi, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edx, 24(%ebx) +.LBB104_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end104: + .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2 + + .globl mcl_fp_addNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF7Lbmi2,@function +mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 80(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 76(%esp), %esi + addl (%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %ebp + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 12(%esi), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + sbbl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 8(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%eax), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + sbbl 16(%eax), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 20(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 24(%eax), %edi + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + js .LBB105_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB105_2: + movl 72(%esp), %ecx + movl %esi, (%ecx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB105_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB105_4: + movl %eax, 4(%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %edx # 4-byte Reload + movl 32(%esp), %esi # 4-byte Reload + movl 24(%esp), %ebx # 4-byte Reload + js .LBB105_6 +# BB#5: + movl 8(%esp), %ebx # 4-byte Reload +.LBB105_6: + movl 72(%esp), %eax + movl %ebx, 8(%eax) + movl %eax, %ebx + js .LBB105_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB105_8: + movl %esi, 12(%ebx) + js .LBB105_10 +# BB#9: + movl 20(%esp), %edx # 4-byte Reload +.LBB105_10: + movl %edx, 16(%ebx) + js .LBB105_12 +# BB#11: + movl 12(%esp), %ecx # 4-byte Reload +.LBB105_12: + movl %ecx, 20(%ebx) + js .LBB105_14 +# BB#13: + movl %edi, %ebp +.LBB105_14: + movl %ebp, 24(%ebx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end105: + .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2 + + .globl mcl_fp_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub7Lbmi2,@function +mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 52(%esp), %esi + subl (%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edi), %edx + sbbl 8(%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 12(%edi), %ecx + sbbl 12(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 16(%edi), %eax + sbbl 16(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edi), %ebp + sbbl 20(%esi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edi), %edi + sbbl 24(%esi), %edi + sbbl $0, %ebx + testb $1, %bl + movl 44(%esp), %ebx + movl 16(%esp), %esi # 4-byte Reload + movl %esi, (%ebx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 4(%ebx) + movl %edx, 8(%ebx) + movl %ecx, 12(%ebx) + movl %eax, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edi, 24(%ebx) + je .LBB106_2 +# BB#1: # %carry + movl 56(%esp), %ebp + movl 16(%esp), %ecx # 4-byte Reload + addl (%ebp), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%ebp), %edx + movl %edx, 4(%ebx) + movl 4(%esp), %ecx # 4-byte Reload + adcl 8(%ebp), %ecx + movl 12(%ebp), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%ebp), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl %ecx, 16(%ebx) + movl 20(%ebp), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 24(%ebp), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) +.LBB106_2: # %nocarry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end106: + .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2 + + .globl mcl_fp_subNF7Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF7Lbmi2,@function +mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 60(%esp), %ecx + subl (%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl 20(%eax), %esi + movl 16(%eax), %edi + movl 12(%eax), %ebx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edx, %ecx + sarl $31, %ecx + movl %ecx, %eax + shldl $1, %edx, %eax + movl 64(%esp), %edx + andl (%edx), %eax + movl 24(%edx), %esi + andl %ecx, %esi + movl %esi, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ecx, %ebx + movl 16(%edx), %edi + andl %ecx, %edi + movl 12(%edx), %esi + andl %ecx, %esi + movl 64(%esp), %edx + movl 8(%edx), %edx + andl %ecx, %edx + movl 64(%esp), %ebp + andl 4(%ebp), %ecx + addl 20(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebp + movl %eax, (%ebp) + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %ebp, %eax + movl %ecx, 4(%eax) + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %edx, 8(%eax) + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %esi, 12(%eax) + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + movl (%esp), %ecx # 4-byte Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%eax) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end107: + .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2 + + .globl mcl_fpDbl_add7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add7Lbmi2,@function +mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %esi + movl 68(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %ecx + movl 8(%esi), %eax + movl (%esi), %ebx + addl (%edx), %ebx + movl 64(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%esi), %ebx + adcl 4(%edx), %ebx + adcl 8(%edx), %eax + adcl 12(%esi), %edi + adcl 16(%esi), %ecx + movl %ebx, 4(%ebp) + movl %esi, %ebx + movl 36(%ebx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %eax, 8(%ebp) + movl 20(%ebx), %eax + movl %edi, 12(%ebp) + movl 20(%edx), %edi + adcl %eax, %edi + movl 24(%ebx), %eax + movl %ecx, 16(%ebp) + movl 24(%edx), %ecx + adcl %eax, %ecx + movl 28(%ebx), %eax + movl %edi, 20(%ebp) + movl 28(%edx), %edi + adcl %eax, %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%ebx), %eax + movl %ecx, 24(%ebp) + movl 32(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%edx), %esi + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 40(%ebx), %ecx + movl 40(%edx), %eax + adcl %ecx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%ebx), %ebp + movl 44(%edx), %ecx + adcl %ebp, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 48(%ebx), %ebp + movl %ebx, %eax + movl 48(%edx), %ebx + adcl %ebp, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 52(%eax), %eax + movl 52(%edx), %ebp + adcl %eax, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 76(%esp), %eax + subl (%eax), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %esi, %eax + movl 76(%esp), %edi + sbbl 8(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 20(%edi), %ebx + sbbl 24(%edi), %ebp + sbbl $0, %edx + andl $1, %edx + jne .LBB108_2 +# BB#1: + movl %ebp, 32(%esp) # 4-byte Spill +.LBB108_2: + testb %dl, %dl + movl 20(%esp), %ecx # 4-byte Reload + jne .LBB108_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload +.LBB108_4: + movl 64(%esp), %eax + movl %ecx, 28(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + movl 24(%esp), %edx # 4-byte Reload + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB108_6 +# BB#5: + movl 12(%esp), %ecx # 4-byte Reload +.LBB108_6: + movl %ecx, 40(%eax) + movl 28(%esp), %ecx # 4-byte Reload + jne .LBB108_8 +# BB#7: + movl 16(%esp), %edx # 4-byte Reload +.LBB108_8: + movl %edx, 44(%eax) + jne .LBB108_10 +# BB#9: + movl %ebx, %ecx +.LBB108_10: + movl %ecx, 48(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end108: + .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2 + + .globl mcl_fpDbl_sub7Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub7Lbmi2,@function +mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 60(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %edx + movl 8(%esi), %ebx + sbbl 8(%edi), %ebx + movl 52(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %ebx, 8(%ecx) + movl 20(%edi), %ebx + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %ebx, %eax + movl 24(%edi), %ebx + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %ebx, %edx + movl 28(%edi), %ebx + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %ebx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edi), %eax + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %eax, %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 36(%edi), %eax + movl 36(%esi), %edx + sbbl %eax, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 40(%edi), %eax + movl 40(%esi), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%edi), %eax + movl 44(%esi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%edi), %eax + movl 48(%esi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%edi), %eax + movl 52(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 64(%esp), %esi + jne .LBB109_1 +# BB#2: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB109_3 +.LBB109_1: + movl 24(%esi), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB109_3: + testb %al, %al + jne .LBB109_4 +# BB#5: + movl $0, %edi + movl $0, %eax + jmp .LBB109_6 +.LBB109_4: + movl (%esi), %eax + movl 4(%esi), %edi +.LBB109_6: + jne .LBB109_7 +# BB#8: + movl $0, %ebx + jmp .LBB109_9 +.LBB109_7: + movl 20(%esi), %ebx +.LBB109_9: + jne .LBB109_10 +# BB#11: + movl $0, %ebp + jmp .LBB109_12 +.LBB109_10: + movl 16(%esi), %ebp +.LBB109_12: + jne .LBB109_13 +# BB#14: + movl $0, %edx + jmp .LBB109_15 +.LBB109_13: + movl 12(%esi), %edx +.LBB109_15: + jne .LBB109_16 +# BB#17: + xorl %esi, %esi + jmp .LBB109_18 +.LBB109_16: + movl 8(%esi), %esi +.LBB109_18: + addl 12(%esp), %eax # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %eax, 28(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %edi, 32(%ecx) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 44(%ecx) + movl %ebx, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end109: + .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2 + + .align 16, 0x90 + .type .LmulPv256x32,@function +.LmulPv256x32: # @mulPv256x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl %edx, %eax + movl 40(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 16(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 12(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 12(%eax), %ebx, %esi + adcl 4(%esp), %ebx # 4-byte Folded Reload + mulxl 16(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 20(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 24(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl %ebx, 12(%ecx) + movl %edi, 16(%ecx) + movl %esi, 20(%ecx) + movl %edx, 24(%ecx) + movl 40(%esp), %edx + mulxl 28(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + adcl $0, %edx + movl %edx, 32(%ecx) + movl %ecx, %eax + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end110: + .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 + + .globl mcl_fp_mulUnitPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre8Lbmi2,@function +mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + calll .L111$pb +.L111$pb: + popl %ebx +.Ltmp2: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx + movl 88(%esp), %eax + movl %eax, (%esp) + leal 24(%esp), %ecx + movl 84(%esp), %edx + calll .LmulPv256x32 + movl 56(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 48(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi + movl 40(%esp), %edi + movl 36(%esp), %ebx + movl 32(%esp), %ebp + movl 24(%esp), %edx + movl 28(%esp), %ecx + movl 80(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %ebp, 8(%eax) + movl %ebx, 12(%eax) + movl %edi, 16(%eax) + movl %esi, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end111: + .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2 + + .globl mcl_fpDbl_mulPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre8Lbmi2,@function +mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L112$pb +.L112$pb: + popl %ebx +.Ltmp3: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + leal 16(%esi), %eax + movl %eax, 8(%esp) + leal 16(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 32(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl 24(%edi), %esi + movl (%edi), %ebx + movl 4(%edi), %eax + addl 16(%edi), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + addl 16(%edi), %eax + adcl 20(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + movl 24(%edi), %edx + adcl 8(%edi), %edx + movl 28(%edi), %ecx + adcl 12(%edi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + jb .LBB112_2 +# BB#1: + xorl %esi, %esi + xorl %ebx, %ebx +.LBB112_2: + movl %ebx, -112(%ebp) # 4-byte Spill + movl %esi, -104(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 28(%esi), %edi + movl -80(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%esi), %edi + movl %edi, -116(%ebp) # 4-byte Spill + movl %ecx, -84(%ebp) # 4-byte Spill + movl %edx, %edi + movl -124(%ebp), %ebx # 4-byte Reload + movl %ebx, -80(%ebp) # 4-byte Spill + movl %eax, -92(%ebp) # 4-byte Spill + jb .LBB112_4 +# BB#3: + movl $0, -84(%ebp) # 4-byte Folded Spill + movl $0, %edi + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -92(%ebp) # 4-byte Folded Spill +.LBB112_4: + movl %edi, -88(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -60(%ebp) + movl -100(%ebp), %edi # 4-byte Reload + movl %edi, -56(%ebp) + movl -108(%ebp), %esi # 4-byte Reload + movl %esi, -52(%ebp) + movl %eax, -76(%ebp) + movl %ebx, -72(%ebp) + movl %edx, -68(%ebp) + movl %ecx, -64(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %esi # 4-byte Reload + movl %esi, -48(%ebp) + movl -128(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB112_6 +# BB#5: + movl $0, %esi + movl $0, %edi +.LBB112_6: + sbbl %eax, %eax + leal -76(%ebp), %ecx + movl %ecx, 8(%esp) + leal -60(%ebp), %ecx + movl %ecx, 4(%esp) + leal -44(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl %edi, %eax + movl -92(%ebp), %edi # 4-byte Reload + addl -112(%ebp), %edi # 4-byte Folded Reload + adcl %eax, -80(%ebp) # 4-byte Folded Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl %eax, -88(%ebp) # 4-byte Folded Spill + adcl %esi, -84(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -92(%ebp) # 4-byte Spill + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4Lbmi2@PLT + addl -28(%ebp), %edi + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + movl -84(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + adcl %esi, -92(%ebp) # 4-byte Folded Spill + movl -44(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -36(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%esi), %edx + movl 16(%esi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 20(%esi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%esi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%esi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%esi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%esi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 52(%esi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%esi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%esi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%esi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%esi) + adcl -132(%ebp), %edi # 4-byte Folded Reload + movl %edx, 28(%esi) + movl -80(%ebp), %eax # 4-byte Reload + adcl -136(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -88(%ebp), %ecx # 4-byte Reload + adcl -128(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -140(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -144(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 44(%esi) + movl %ecx, 48(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%esi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%esi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end112: + .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2 + + .globl mcl_fpDbl_sqrPre8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre8Lbmi2,@function +mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L113$pb +.L113$pb: + popl %ebx +.Ltmp4: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + leal 16(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 32(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl (%edi), %esi + movl 4(%edi), %ecx + addl 16(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + adcl 20(%edi), %ecx + seto %al + lahf + movl %eax, %edx + addl %esi, %esi + movl %esi, -84(%ebp) # 4-byte Spill + movl %ecx, %esi + adcl %esi, %esi + movl %esi, -80(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -88(%ebp) # 4-byte Spill + movl 24(%edi), %esi + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 8(%edi), %esi + movl 28(%edi), %edx + adcl 12(%edi), %edx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -104(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %ebx + sbbl %edi, %edi + movl %edi, -92(%ebp) # 4-byte Spill + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB113_2 +# BB#1: + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -84(%ebp) # 4-byte Folded Spill +.LBB113_2: + movl %esi, %ebx + movl -88(%ebp), %edi # 4-byte Reload + movl %edi, %eax + addb $127, %al + sahf + adcl %ebx, %ebx + movl %edx, %edi + adcl %edi, %edi + movl -104(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_4 +# BB#3: + xorl %edi, %edi + xorl %ebx, %ebx +.LBB113_4: + movl %ebx, -88(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl %ecx, -56(%ebp) + movl %esi, -52(%ebp) + movl %edx, -48(%ebp) + movl %eax, -76(%ebp) + movl %ecx, -72(%ebp) + movl %esi, -68(%ebp) + movl %edx, -64(%ebp) + movl -100(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_5 +# BB#6: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB113_7 +.LBB113_5: + shrl $31, %edx + movl %edx, -100(%ebp) # 4-byte Spill +.LBB113_7: + leal -76(%ebp), %eax + movl %eax, 8(%esp) + leal -60(%ebp), %eax + movl %eax, 4(%esp) + leal -44(%ebp), %eax + movl %eax, (%esp) + movl -92(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4Lbmi2@PLT + movl -84(%ebp), %eax # 4-byte Reload + addl -28(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -92(%ebp) # 4-byte Spill + adcl -100(%ebp), %esi # 4-byte Folded Reload + movl -44(%ebp), %eax + movl 8(%ebp), %edi + subl (%edi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%edi), %ebx + movl -36(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%edi), %edx + movl 16(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + movl 20(%edi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%edi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%edi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 32(%edi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%edi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%edi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%edi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + movl 52(%edi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%edi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%edi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %edx, 28(%edi) + movl -80(%ebp), %ecx # 4-byte Reload + adcl -136(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl -88(%ebp), %eax # 4-byte Reload + adcl -128(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -140(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + adcl -144(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %esi, 48(%edi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%edi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%edi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%edi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2 + + .globl mcl_fp_mont8Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont8Lbmi2,@function +mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L114$pb +.L114$pb: + popl %ebx +.Ltmp5: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + sbbl %eax, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 60(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 612(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + andl $1, %ebp + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + addl 504(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 524(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 464(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 480(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 28(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 436(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 408(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + adcl 348(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 372(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %ebp, %eax + andl $1, %eax + addl 304(%esp), %edi + movl 36(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 312(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 316(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 272(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 224(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 228(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + adcl 236(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 204(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %ecx + andl $1, %ecx + addl 144(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 172(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + adcl 108(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 116(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 128(%esp), %edi + adcl 132(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + andl $1, %esi + addl 64(%esp), %ebp + movl 32(%esp), %ebx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + movl 44(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl $0, %esi + movl %eax, %edx + movl 732(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %ebx, %ecx + sbbl 8(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 20(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl $0, %esi + andl $1, %esi + movl %esi, %ecx + jne .LBB114_2 +# BB#1: + movl %edx, %ebp +.LBB114_2: + movl 720(%esp), %edx + movl %ebp, (%edx) + testb %cl, %cl + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB114_4 +# BB#3: + movl %eax, %ebp +.LBB114_4: + movl %ebp, 4(%edx) + jne .LBB114_6 +# BB#5: + movl 12(%esp), %ebx # 4-byte Reload +.LBB114_6: + movl %ebx, 8(%edx) + movl 28(%esp), %eax # 4-byte Reload + jne .LBB114_8 +# BB#7: + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%esp) # 4-byte Spill +.LBB114_8: + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edx) + movl 40(%esp), %edi # 4-byte Reload + jne .LBB114_10 +# BB#9: + movl 20(%esp), %edi # 4-byte Reload +.LBB114_10: + movl %edi, 16(%edx) + jne .LBB114_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB114_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB114_14 +# BB#13: + movl 32(%esp), %eax # 4-byte Reload +.LBB114_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB114_16 +# BB#15: + movl 52(%esp), %eax # 4-byte Reload +.LBB114_16: + movl %eax, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end114: + .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2 + + .globl mcl_fp_montNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF8Lbmi2,@function +mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L115$pb +.L115$pb: + popl %ebx +.Ltmp6: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 640(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 616(%esp), %ecx + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 604(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 52(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 536(%esp), %ecx + addl 504(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 528(%esp), %edi + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 464(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 472(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + adcl 488(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 456(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 424(%esp), %edx + adcl 428(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, %ebp + movl %eax, %edi + adcl $0, %edi + movl %edx, %eax + movl %edx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 384(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + adcl 416(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 376(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + adcl 368(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 304(%esp), %ebp + movl 40(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 296(%esp), %edx + movl %ebp, %ecx + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edx, %edi + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 224(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + adcl 240(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 216(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 192(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 144(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 156(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 160(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %ebp + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 136(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 116(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl $0, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 64(%esp), %esi + movl 32(%esp), %esi # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 80(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 96(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edx + movl 732(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ecx + sbbl 8(%eax), %esi + sbbl 12(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 16(%eax), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + sbbl 20(%eax), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%eax), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + sbbl 28(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + testl %edi, %edi + js .LBB115_2 +# BB#1: + movl %edx, 56(%esp) # 4-byte Spill +.LBB115_2: + movl 720(%esp), %edx + movl 56(%esp), %eax # 4-byte Reload + movl %eax, (%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB115_4 +# BB#3: + movl %ecx, %eax +.LBB115_4: + movl %eax, 4(%edx) + js .LBB115_6 +# BB#5: + movl %esi, 32(%esp) # 4-byte Spill +.LBB115_6: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl 40(%esp), %ebp # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB115_8 +# BB#7: + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 44(%esp) # 4-byte Spill +.LBB115_8: + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 12(%edx) + js .LBB115_10 +# BB#9: + movl 16(%esp), %edi # 4-byte Reload +.LBB115_10: + movl %edi, 16(%edx) + js .LBB115_12 +# BB#11: + movl 20(%esp), %ebp # 4-byte Reload +.LBB115_12: + movl %ebp, 20(%edx) + js .LBB115_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB115_14: + movl %eax, 24(%edx) + js .LBB115_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB115_16: + movl %ecx, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end115: + .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2 + + .globl mcl_fp_montRed8Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed8Lbmi2,@function +mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L116$pb +.L116$pb: + popl %ebx +.Ltmp7: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx + movl 456(%esp), %edx + movl -4(%edx), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl (%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 60(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 56(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 16(%eax), %ebp + movl 12(%eax), %edi + movl 8(%eax), %esi + movl (%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 392(%esp), %ecx + calll .LmulPv256x32 + movl 56(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + adcl 400(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 408(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 352(%esp), %edi + movl 16(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 384(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 312(%esp), %edi + movl 52(%esp), %edi # 4-byte Reload + adcl 316(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 272(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 232(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 236(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 252(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 192(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 204(%esp), %edi + adcl 208(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 152(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + adcl 160(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 172(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 112(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl %edi, %ebx + movl 100(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %eax, %esi + adcl 136(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %edx + subl 24(%esp), %edx # 4-byte Folded Reload + movl 108(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + sbbl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 68(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + sbbl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + jne .LBB116_2 +# BB#1: + movl %edx, %ecx +.LBB116_2: + movl 448(%esp), %edx + movl %ecx, (%edx) + movl %edi, %ecx + testb %cl, %cl + jne .LBB116_4 +# BB#3: + movl %eax, 108(%esp) # 4-byte Spill +.LBB116_4: + movl 108(%esp), %eax # 4-byte Reload + movl %eax, 4(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB116_6 +# BB#5: + movl %ebp, %eax +.LBB116_6: + movl %eax, 8(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB116_8 +# BB#7: + movl %ebx, %ebp +.LBB116_8: + movl %ebp, 12(%edx) + movl 100(%esp), %ebx # 4-byte Reload + jne .LBB116_10 +# BB#9: + movl 68(%esp), %ebx # 4-byte Reload +.LBB116_10: + movl %ebx, 16(%edx) + movl 80(%esp), %edi # 4-byte Reload + jne .LBB116_12 +# BB#11: + movl 72(%esp), %edi # 4-byte Reload +.LBB116_12: + movl %edi, 20(%edx) + movl 88(%esp), %esi # 4-byte Reload + jne .LBB116_14 +# BB#13: + movl 92(%esp), %esi # 4-byte Reload +.LBB116_14: + movl %esi, 24(%edx) + jne .LBB116_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB116_16: + movl %eax, 28(%edx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end116: + .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2 + + .globl mcl_fp_addPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre8Lbmi2,@function +mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl 24(%eax), %edi + movl %edx, 16(%ebx) + movl 24(%esi), %edx + adcl %edi, %edx + movl %ecx, 20(%ebx) + movl %edx, 24(%ebx) + movl 28(%eax), %eax + movl 28(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 28(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end117: + .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2 + + .globl mcl_fp_subPre8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre8Lbmi2,@function +mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl 24(%edi), %ebx + movl %esi, 16(%ebp) + movl 24(%ecx), %esi + sbbl %ebx, %esi + movl %edx, 20(%ebp) + movl %esi, 24(%ebp) + movl 28(%edi), %edx + movl 28(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 28(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end118: + .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2 + + .globl mcl_fp_shr1_8Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_8Lbmi2,@function +mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 24(%esi) + shrl %eax + movl %eax, 28(%esi) + popl %esi + retl +.Lfunc_end119: + .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2 + + .globl mcl_fp_add8Lbmi2 + .align 16, 0x90 + .type mcl_fp_add8Lbmi2,@function +mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %edx + addl (%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%edx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%edx), %esi + movl 16(%edx), %eax + adcl 12(%edi), %esi + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%edx), %ecx + adcl 20(%edi), %ecx + movl 24(%edx), %ebx + adcl 24(%edi), %ebx + movl 28(%edx), %edi + movl 48(%esp), %edx + adcl 28(%edx), %edi + movl 40(%esp), %edx + movl %ebp, (%edx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%edx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edx) + movl %esi, 12(%edx) + movl %eax, 16(%edx) + movl %ecx, 20(%edx) + movl %ebx, 24(%edx) + movl %edi, 28(%edx) + sbbl %eax, %eax + andl $1, %eax + movl 52(%esp), %edx + movl 8(%esp), %ebp # 4-byte Reload + subl (%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 4(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 8(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebp + sbbl 12(%ebp), %esi + movl %esi, (%esp) # 4-byte Spill + movl 4(%esp), %edx # 4-byte Reload + sbbl 16(%ebp), %edx + movl %edx, %esi + sbbl 20(%ebp), %ecx + sbbl 24(%ebp), %ebx + sbbl 28(%ebp), %edi + sbbl $0, %eax + testb $1, %al + jne .LBB120_2 +# BB#1: # %nocarry + movl 8(%esp), %edx # 4-byte Reload + movl 40(%esp), %ebp + movl %edx, (%ebp) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 4(%ebp) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 8(%ebp) + movl (%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl %esi, 16(%ebp) + movl %ecx, 20(%ebp) + movl %ebx, 24(%ebp) + movl %edi, 28(%ebp) +.LBB120_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end120: + .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2 + + .globl mcl_fp_addNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF8Lbmi2,@function +mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 80(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 4(%ebx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %esi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 12(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%ebx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 24(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 28(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 88(%esp), %ebx + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, %eax + subl (%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl 48(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + testl %esi, %esi + js .LBB121_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB121_2: + movl 76(%esp), %ebx + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB121_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB121_4: + movl %eax, 4(%ebx) + movl 40(%esp), %edx # 4-byte Reload + movl 28(%esp), %edi # 4-byte Reload + js .LBB121_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB121_6: + movl %edi, 8(%ebx) + movl 44(%esp), %ecx # 4-byte Reload + movl 36(%esp), %eax # 4-byte Reload + js .LBB121_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB121_8: + movl %eax, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + js .LBB121_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB121_10: + movl %edx, 16(%ebx) + js .LBB121_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB121_12: + movl %ecx, 20(%ebx) + js .LBB121_14 +# BB#13: + movl %ebp, %eax +.LBB121_14: + movl %eax, 24(%ebx) + js .LBB121_16 +# BB#15: + movl %esi, %edi +.LBB121_16: + movl %edi, 28(%ebx) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end121: + .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2 + + .globl mcl_fp_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub8Lbmi2,@function +mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 56(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esi), %edx + sbbl 8(%ebp), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %ecx + sbbl 16(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%esi), %edi + sbbl 24(%ebp), %edi + movl 28(%esi), %esi + sbbl 28(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, (%ebx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ebx) + movl %edx, 8(%ebx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 12(%ebx) + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl %edi, 24(%ebx) + movl %esi, 28(%ebx) + je .LBB122_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 60(%esp), %esi + movl 16(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 8(%esp), %ebp # 4-byte Reload + adcl 8(%esi), %ebp + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ebp, 8(%ebx) + movl 16(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl 24(%esi), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebx) +.LBB122_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end122: + .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2 + + .globl mcl_fp_subNF8Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF8Lbmi2,@function +mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 68(%esp), %ecx + subl (%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl 24(%eax), %esi + movl 20(%eax), %edi + movl 16(%eax), %ebx + movl 12(%eax), %ebp + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 24(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sarl $31, %edi + movl 72(%esp), %ebp + movl 28(%ebp), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebp), %eax + andl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%ebp), %ebx + andl %edi, %ebx + movl 16(%ebp), %esi + andl %edi, %esi + movl 12(%ebp), %edx + andl %edi, %edx + movl 8(%ebp), %ecx + andl %edi, %ecx + movl 4(%ebp), %eax + andl %edi, %eax + andl (%ebp), %edi + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 60(%esp), %ebp + movl %edi, (%ebp) + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 4(%ebp) + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %ecx, 8(%ebp) + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %edx, 12(%ebp) + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%ebp) + movl %eax, 24(%ebp) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebp) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end123: + .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2 + + .globl mcl_fpDbl_add8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add8Lbmi2,@function +mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 80(%esp), %ebp + addl (%ebp), %esi + adcl 4(%ebp), %edx + movl 8(%ecx), %edi + adcl 8(%ebp), %edi + movl 12(%ebp), %ebx + movl 76(%esp), %eax + movl %esi, (%eax) + movl 16(%ebp), %esi + adcl 12(%ecx), %ebx + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 40(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebx, 12(%eax) + movl 20(%ebp), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebp), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebp), %ebx + adcl %edx, %ebx + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebp), %esi + adcl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %ebx, 28(%eax) + movl 36(%ebp), %ebx + adcl %edx, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%ebp), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl 44(%ebp), %edi + adcl %edx, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl 48(%ebp), %eax + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl 52(%ebp), %esi + adcl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%ebp), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%ecx), %ecx + movl 60(%ebp), %ebp + adcl %ecx, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 44(%esp), %eax # 4-byte Reload + movl 88(%esp), %edx + subl (%edx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax + sbbl 4(%eax), %ebx + movl %eax, %edx + movl %ebx, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + movl %edx, %ebx + sbbl 8(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + movl 24(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + sbbl 16(%ebx), %eax + sbbl 20(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 28(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB124_2 +# BB#1: + movl %eax, %edi +.LBB124_2: + testb %cl, %cl + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB124_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB124_4: + movl 76(%esp), %eax + movl %ecx, 32(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl 32(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB124_6 +# BB#5: + movl 4(%esp), %ebx # 4-byte Reload +.LBB124_6: + movl %ebx, 36(%eax) + jne .LBB124_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB124_8: + movl %esi, 40(%eax) + movl 36(%esp), %esi # 4-byte Reload + jne .LBB124_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB124_10: + movl %edx, 44(%eax) + movl %edi, 48(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB124_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB124_12: + movl %esi, 52(%eax) + jne .LBB124_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB124_14: + movl %edx, 56(%eax) + jne .LBB124_16 +# BB#15: + movl %ebp, %ecx +.LBB124_16: + movl %ecx, 60(%eax) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end124: + .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2 + + .globl mcl_fpDbl_sub8Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub8Lbmi2,@function +mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 68(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 60(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%ebx), %edx + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %edx, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 40(%ebx), %eax + movl 40(%edi), %edx + sbbl %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%ebx), %eax + movl 44(%edi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebx), %eax + movl 48(%edi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%ebx), %eax + movl 52(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 72(%esp), %ebx + jne .LBB125_1 +# BB#2: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB125_3 +.LBB125_1: + movl 28(%ebx), %edx + movl %edx, 4(%esp) # 4-byte Spill +.LBB125_3: + testb %al, %al + jne .LBB125_4 +# BB#5: + movl $0, %ebp + movl $0, %eax + jmp .LBB125_6 +.LBB125_4: + movl (%ebx), %eax + movl 4(%ebx), %ebp +.LBB125_6: + jne .LBB125_7 +# BB#8: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB125_9 +.LBB125_7: + movl 24(%ebx), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB125_9: + jne .LBB125_10 +# BB#11: + movl $0, %edx + jmp .LBB125_12 +.LBB125_10: + movl 20(%ebx), %edx +.LBB125_12: + jne .LBB125_13 +# BB#14: + movl $0, %esi + jmp .LBB125_15 +.LBB125_13: + movl 16(%ebx), %esi +.LBB125_15: + jne .LBB125_16 +# BB#17: + movl $0, %edi + jmp .LBB125_18 +.LBB125_16: + movl 12(%ebx), %edi +.LBB125_18: + jne .LBB125_19 +# BB#20: + xorl %ebx, %ebx + jmp .LBB125_21 +.LBB125_19: + movl 8(%ebx), %ebx +.LBB125_21: + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + movl %eax, 56(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end125: + .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2 + + .align 16, 0x90 + .type .LmulPv288x32,@function +.LmulPv288x32: # @mulPv288x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl %edx, %eax + movl 44(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 20(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 12(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 16(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 20(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 24(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 28(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl %ebx, 16(%ecx) + movl %edi, 20(%ecx) + movl %esi, 24(%ecx) + movl %edx, 28(%ecx) + movl 44(%esp), %edx + mulxl 32(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl $0, %edx + movl %edx, 36(%ecx) + movl %ecx, %eax + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end126: + .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 + + .globl mcl_fp_mulUnitPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre9Lbmi2,@function +mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L127$pb +.L127$pb: + popl %ebx +.Ltmp8: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv288x32 + movl 68(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl 48(%esp), %ebx + movl 44(%esp), %ebp + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %ebp, 12(%eax) + movl %ebx, 16(%eax) + movl %edi, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end127: + .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2 + + .globl mcl_fpDbl_mulPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre9Lbmi2,@function +mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L128$pb +.L128$pb: + popl %esi +.Ltmp9: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 452(%esp), %edx + movl %edx, %ebp + movl %esi, %ebx + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %edi + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %ebp, %edx + movl %esi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 240(%esp), %edi + movl 236(%esp), %ebp + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 16(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 44(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 204(%esp), %edi + movl 200(%esp), %ebx + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %ebx + movl 160(%esp), %edi + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 12(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %ebp + movl 132(%esp), %edi + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 12(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 24(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 52(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end128: + .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2 + + .globl mcl_fpDbl_sqrPre9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre9Lbmi2,@function +mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L129$pb +.L129$pb: + popl %ebx +.Ltmp10: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 452(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %ebp + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esi), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 244(%esp), %edi + movl 240(%esp), %ebp + movl 236(%esp), %esi + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebx + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %edi + movl 160(%esp), %ebp + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 4(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %ebp + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 120(%esp), %edi + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 48(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 20(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2 + + .globl mcl_fp_mont9Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont9Lbmi2,@function +mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L130$pb +.L130$pb: + popl %ebx +.Ltmp11: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %ebp + movl 756(%esp), %esi + movl %ebp, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %ebp + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 748(%esp), %ebp + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 672(%esp), %esi + adcl 676(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 708(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 632(%esp), %esi + adcl 636(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 660(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 592(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 624(%esp), %esi + adcl 628(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 552(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 580(%esp), %edi + adcl 584(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + adcl 548(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 472(%esp), %ebp + movl 32(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 484(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 500(%esp), %esi + adcl 504(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 452(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 456(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %esi, %eax + andl $1, %eax + addl 392(%esp), %ebp + movl 36(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 408(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 428(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 352(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 384(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %eax + andl $1, %eax + addl 312(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 336(%esp), %esi + movl 44(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 344(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 292(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 296(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %ecx + andl $1, %ecx + addl 232(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 240(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 260(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + adcl 196(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 200(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 152(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 164(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 172(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 120(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl 136(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %edi + addl 72(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 828(%esp), %ebx + subl (%ebx), %eax + movl %ecx, %edx + sbbl 4(%ebx), %edx + movl %esi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %esi # 4-byte Reload + sbbl 12(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + sbbl 16(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + movl 60(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB130_2 +# BB#1: + movl %esi, 32(%esp) # 4-byte Spill +.LBB130_2: + testb %bl, %bl + movl 68(%esp), %esi # 4-byte Reload + jne .LBB130_4 +# BB#3: + movl %eax, %esi +.LBB130_4: + movl 816(%esp), %ebp + movl %esi, (%ebp) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB130_6 +# BB#5: + movl %edx, %eax +.LBB130_6: + movl %eax, 4(%ebp) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB130_8 +# BB#7: + movl %ecx, %eax +.LBB130_8: + movl %eax, 8(%ebp) + movl 44(%esp), %eax # 4-byte Reload + jne .LBB130_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB130_10: + movl %eax, 12(%ebp) + jne .LBB130_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill +.LBB130_12: + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB130_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB130_14: + movl %eax, 20(%ebp) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB130_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB130_16: + movl %eax, 24(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB130_18 +# BB#17: + movl 56(%esp), %eax # 4-byte Reload +.LBB130_18: + movl %eax, 32(%ebp) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end130: + .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2 + + .globl mcl_fp_montNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF9Lbmi2,@function +mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L131$pb +.L131$pb: + popl %ebx +.Ltmp12: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %esi + movl 756(%esp), %ebp + movl %esi, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %esi + adcl 716(%esp), %ebp + adcl 720(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 740(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 708(%esp), %eax + addl 672(%esp), %ebp + adcl 676(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 696(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 632(%esp), %ebp + adcl 636(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 656(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 628(%esp), %eax + addl 592(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 612(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 616(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 620(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 624(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 552(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 572(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 576(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 548(%esp), %eax + movl 32(%esp), %edx # 4-byte Reload + addl 512(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 516(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 540(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 544(%esp), %esi + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 32(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 500(%esp), %edi + movl %edi, %ebp + adcl 504(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 468(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 48(%esp), %esi # 4-byte Reload + adcl 436(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 444(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 448(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 452(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 460(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 464(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 392(%esp), %ebp + adcl 396(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 428(%esp), %esi + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 388(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 360(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 364(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 372(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 376(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 380(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 312(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 324(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 340(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 308(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 292(%esp), %ebp + adcl 296(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 232(%esp), %edi + movl 36(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 252(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 228(%esp), %ebp + movl %esi, %ecx + addl 192(%esp), %ecx + movl 60(%esp), %esi # 4-byte Reload + adcl 196(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 152(%esp), %edi + adcl 156(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 164(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 148(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + adcl 116(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 120(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 132(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ebp + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 72(%esp), %edi + movl 44(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + adcl 80(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 84(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 828(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ebx + movl %edi, %ecx + sbbl 8(%eax), %ecx + movl 52(%esp), %esi # 4-byte Reload + sbbl 12(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 16(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + sbbl 32(%eax), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sarl $31, %ebp + testl %ebp, %ebp + movl 68(%esp), %eax # 4-byte Reload + js .LBB131_2 +# BB#1: + movl %edx, %eax +.LBB131_2: + movl 816(%esp), %edx + movl %eax, (%edx) + movl 64(%esp), %esi # 4-byte Reload + js .LBB131_4 +# BB#3: + movl %ebx, %esi +.LBB131_4: + movl %esi, 4(%edx) + movl 52(%esp), %ebp # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB131_6 +# BB#5: + movl %ecx, %edi +.LBB131_6: + movl %edi, 8(%edx) + js .LBB131_8 +# BB#7: + movl 16(%esp), %ebp # 4-byte Reload +.LBB131_8: + movl %ebp, 12(%edx) + js .LBB131_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB131_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB131_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB131_12: + movl %eax, 20(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB131_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB131_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB131_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB131_16: + movl %eax, 28(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB131_18 +# BB#17: + movl 44(%esp), %eax # 4-byte Reload +.LBB131_18: + movl %eax, 32(%edx) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end131: + .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2 + + .globl mcl_fp_montRed9Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed9Lbmi2,@function +mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $492, %esp # imm = 0x1EC + calll .L132$pb +.L132$pb: + popl %ebx +.Ltmp13: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx + movl 520(%esp), %edx + movl -4(%edx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl (%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 68(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 60(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 56(%eax), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 448(%esp), %ecx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + addl 448(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 460(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 464(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 96(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 52(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 412(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 368(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 404(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 328(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + addl 288(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 248(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 264(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %edi, %esi + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 208(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 212(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 168(%esp), %ebp + movl 104(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 184(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 128(%esp), %edi + movl 120(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %eax, %edi + adcl 136(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %eax, %ebx + movl 112(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 20(%esp), %edi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 16(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + sbbl 28(%esp), %ecx # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 92(%esp) # 4-byte Spill + movl %edx, %ebx + movl %ebp, %edx + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + sbbl $0, %edx + andl $1, %edx + jne .LBB132_2 +# BB#1: + movl %ecx, 116(%esp) # 4-byte Spill +.LBB132_2: + testb %dl, %dl + movl 120(%esp), %ecx # 4-byte Reload + jne .LBB132_4 +# BB#3: + movl %edi, %ecx +.LBB132_4: + movl 512(%esp), %edi + movl %ecx, (%edi) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB132_6 +# BB#5: + movl %eax, 124(%esp) # 4-byte Spill +.LBB132_6: + movl 124(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB132_8 +# BB#7: + movl %esi, %eax +.LBB132_8: + movl %eax, 8(%edi) + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB132_10 +# BB#9: + movl 72(%esp), %ebp # 4-byte Reload +.LBB132_10: + movl %ebp, 16(%edi) + movl 112(%esp), %ebx # 4-byte Reload + jne .LBB132_12 +# BB#11: + movl 76(%esp), %ebx # 4-byte Reload +.LBB132_12: + movl %ebx, 20(%edi) + movl 100(%esp), %esi # 4-byte Reload + jne .LBB132_14 +# BB#13: + movl 84(%esp), %esi # 4-byte Reload +.LBB132_14: + movl %esi, 24(%edi) + jne .LBB132_16 +# BB#15: + movl 92(%esp), %ecx # 4-byte Reload +.LBB132_16: + movl %ecx, 28(%edi) + jne .LBB132_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload +.LBB132_18: + movl %eax, 32(%edi) + addl $492, %esp # imm = 0x1EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end132: + .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2 + + .globl mcl_fp_addPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre9Lbmi2,@function +mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl %esi, 24(%ebx) + movl %edx, 28(%ebx) + movl 32(%eax), %eax + movl 32(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 32(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end133: + .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2 + + .globl mcl_fp_subPre9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre9Lbmi2,@function +mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 24(%ebp) + movl %esi, 28(%ebp) + movl 32(%edx), %edx + movl 32(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 32(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end134: + .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2 + + .globl mcl_fp_shr1_9Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_9Lbmi2,@function +mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 28(%esi) + shrl %eax + movl %eax, 32(%esi) + popl %esi + retl +.Lfunc_end135: + .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2 + + .globl mcl_fp_add9Lbmi2 + .align 16, 0x90 + .type mcl_fp_add9Lbmi2,@function +mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, %ebp + adcl 4(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%ebx), %esi + adcl 20(%edi), %esi + movl 24(%ebx), %edx + adcl 24(%edi), %edx + movl 28(%ebx), %ecx + adcl 28(%edi), %ecx + movl 32(%ebx), %eax + adcl 32(%edi), %eax + movl 40(%esp), %edi + movl %ebp, (%edi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%edi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%edi) + movl %esi, 20(%edi) + movl %edx, 24(%edi) + movl %ecx, 28(%edi) + movl %eax, 32(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %edi + subl (%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 4(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 8(%edi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 12(%edi), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebp # 4-byte Reload + sbbl 16(%edi), %ebp + sbbl 20(%edi), %esi + sbbl 24(%edi), %edx + sbbl 28(%edi), %ecx + sbbl 32(%edi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB136_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 40(%esp), %ebx + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %ebp, 16(%ebx) + movl %esi, 20(%ebx) + movl %edx, 24(%ebx) + movl %ecx, 28(%ebx) + movl %eax, 32(%ebx) +.LBB136_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end136: + .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2 + + .globl mcl_fp_addNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF9Lbmi2,@function +mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 96(%esp), %esi + addl (%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 4(%esi), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 28(%eax), %ebp + movl 24(%eax), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 16(%eax), %ebx + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 20(%esi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 28(%esi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 32(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 104(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %ebp + subl (%esi), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 4(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + sbbl 24(%esi), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 28(%esi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, %edx + movl %ecx, %ebp + sbbl 32(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + testl %esi, %esi + js .LBB137_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB137_2: + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB137_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB137_4: + movl %eax, 4(%ecx) + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB137_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload +.LBB137_6: + movl %eax, 8(%ecx) + movl %ebp, %eax + js .LBB137_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB137_8: + movl %edx, 12(%ecx) + movl 56(%esp), %edx # 4-byte Reload + js .LBB137_10 +# BB#9: + movl 16(%esp), %ebx # 4-byte Reload +.LBB137_10: + movl %ebx, 16(%ecx) + js .LBB137_12 +# BB#11: + movl 20(%esp), %edi # 4-byte Reload +.LBB137_12: + movl %edi, 20(%ecx) + js .LBB137_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload +.LBB137_14: + movl %esi, 24(%ecx) + js .LBB137_16 +# BB#15: + movl 28(%esp), %edx # 4-byte Reload +.LBB137_16: + movl %edx, 28(%ecx) + js .LBB137_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB137_18: + movl %eax, 32(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end137: + .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2 + + .globl mcl_fp_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub9Lbmi2,@function +mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 56(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 20(%esi), %ecx + sbbl 20(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 28(%esi), %ebp + sbbl 28(%edi), %ebp + movl 32(%esi), %esi + sbbl 32(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl %eax, 24(%ebx) + movl %ebp, 28(%ebx) + movl %esi, 32(%ebx) + je .LBB138_2 +# BB#1: # %carry + movl %esi, %edi + movl 60(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl %ecx, 24(%ebx) + movl 28(%esi), %eax + adcl %ebp, %eax + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %edi, %eax + movl %eax, 32(%ebx) +.LBB138_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end138: + .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2 + + .globl mcl_fp_subNF9Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF9Lbmi2,@function +mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 72(%esp), %edx + movl (%edx), %ecx + movl 4(%edx), %eax + movl 76(%esp), %esi + subl (%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + sbbl 4(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%edx), %ebp + movl 24(%edx), %edi + movl 20(%edx), %ebx + movl 16(%edx), %ecx + movl 12(%edx), %eax + movl 8(%edx), %edx + sbbl 8(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 12(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + sbbl 24(%esi), %edi + movl %edi, 28(%esp) # 4-byte Spill + sbbl 28(%esi), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %eax, %ecx + sarl $31, %ecx + movl %ecx, %edi + shldl $1, %eax, %edi + movl 80(%esp), %ebp + movl 12(%ebp), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 4(%ebp), %ebx + andl %edi, %ebx + andl (%ebp), %edi + movl 32(%ebp), %eax + andl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + rorxl $31, %ecx, %eax + andl 28(%ebp), %ecx + movl 24(%ebp), %edx + andl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + movl 20(%ebp), %esi + andl %eax, %esi + movl 16(%ebp), %edx + andl %eax, %edx + andl 8(%ebp), %eax + addl 36(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %ebp + movl %edi, (%ebp) + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ebx, 4(%ebp) + movl 4(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %eax, 8(%ebp) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edi, 12(%ebp) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edx, 16(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %esi, 20(%ebp) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%ebp) + movl %ecx, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ebp) + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end139: + .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2 + + .globl mcl_fpDbl_add9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add9Lbmi2,@function +mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 96(%esp), %edx + movl 92(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 88(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 44(%edx), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebp + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebp, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 40(%edx), %esi + movl %ecx, 32(%eax) + movl 40(%edi), %eax + adcl %esi, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%edi), %eax + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl 48(%edi), %ebx + adcl %ecx, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 56(%edx), %esi + movl 56(%edi), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%edx), %ebp + movl 60(%edi), %esi + adcl %ebp, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%edx), %eax + movl 64(%edi), %ebp + adcl %eax, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 68(%edx), %edx + movl 68(%edi), %eax + adcl %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 100(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + subl (%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl %ebp, %eax + movl 32(%esp), %ebp # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %ebx + sbbl 32(%edi), %ebx + sbbl $0, %edx + andl $1, %edx + jne .LBB140_2 +# BB#1: + movl %ebx, %ebp +.LBB140_2: + testb %dl, %dl + movl 60(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB140_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload +.LBB140_4: + movl 88(%esp), %eax + movl %edx, 36(%eax) + movl %ebx, 40(%eax) + movl %edi, 44(%eax) + movl %esi, 48(%eax) + movl %ecx, 52(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB140_6 +# BB#5: + movl 20(%esp), %ecx # 4-byte Reload +.LBB140_6: + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB140_8 +# BB#7: + movl 24(%esp), %edx # 4-byte Reload +.LBB140_8: + movl %edx, 60(%eax) + jne .LBB140_10 +# BB#9: + movl 28(%esp), %ecx # 4-byte Reload +.LBB140_10: + movl %ecx, 64(%eax) + movl %ebp, 68(%eax) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end140: + .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2 + + .globl mcl_fpDbl_sub9Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub9Lbmi2,@function +mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 80(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 72(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%ebp), %eax + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%ebp), %eax + movl 44(%ebx), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl 48(%ebx), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%ebx), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%ebx), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 84(%esp), %ebp + jne .LBB141_1 +# BB#2: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB141_3 +.LBB141_1: + movl 32(%ebp), %edx + movl %edx, 12(%esp) # 4-byte Spill +.LBB141_3: + testb %al, %al + jne .LBB141_4 +# BB#5: + movl $0, 4(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB141_6 +.LBB141_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB141_6: + jne .LBB141_7 +# BB#8: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB141_9 +.LBB141_7: + movl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB141_9: + jne .LBB141_10 +# BB#11: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB141_12 +.LBB141_10: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB141_12: + jne .LBB141_13 +# BB#14: + movl $0, %edi + jmp .LBB141_15 +.LBB141_13: + movl 20(%ebp), %edi +.LBB141_15: + jne .LBB141_16 +# BB#17: + movl $0, %ebx + jmp .LBB141_18 +.LBB141_16: + movl 16(%ebp), %ebx +.LBB141_18: + jne .LBB141_19 +# BB#20: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB141_21 +.LBB141_19: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB141_21: + jne .LBB141_22 +# BB#23: + xorl %eax, %eax + jmp .LBB141_24 +.LBB141_22: + movl 8(%eax), %eax +.LBB141_24: + addl 24(%esp), %esi # 4-byte Folded Reload + movl 4(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 48(%ecx) + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %edx, 60(%ecx) + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%ecx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end141: + .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2 + + .align 16, 0x90 + .type .LmulPv320x32,@function +.LmulPv320x32: # @mulPv320x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl %edx, %eax + movl 48(%esp), %edx + mulxl 4(%eax), %edi, %esi + mulxl (%eax), %ebp, %ebx + movl %ebp, 24(%esp) # 4-byte Spill + addl %edi, %ebx + movl %ebx, 20(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 16(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %ebx + movl %ebx, 4(%esp) # 4-byte Spill + adcl %edi, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl %edx, %ebp + mulxl 20(%eax), %ebx, %esi + adcl 4(%esp), %ebx # 4-byte Folded Reload + mulxl 24(%eax), %edi, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %esi, %edi + movl %ebp, %edx + mulxl 28(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + mulxl 32(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl %ebx, 20(%ecx) + movl %edi, 24(%ecx) + movl %esi, 28(%ecx) + movl %edx, 32(%ecx) + movl 48(%esp), %edx + mulxl 36(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + adcl $0, %edx + movl %edx, 40(%ecx) + movl %ecx, %eax + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end142: + .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 + + .globl mcl_fp_mulUnitPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre10Lbmi2,@function +mcl_fp_mulUnitPre10Lbmi2: # @mcl_fp_mulUnitPre10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L143$pb +.L143$pb: + popl %ebx +.Ltmp14: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebx + movl 48(%esp), %ebp + movl 44(%esp), %edi + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebp, 16(%eax) + movl %ebx, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end143: + .size mcl_fp_mulUnitPre10Lbmi2, .Lfunc_end143-mcl_fp_mulUnitPre10Lbmi2 + + .globl mcl_fpDbl_mulPre10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre10Lbmi2,@function +mcl_fpDbl_mulPre10Lbmi2: # @mcl_fpDbl_mulPre10Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L144$pb +.L144$pb: + popl %ebx +.Ltmp15: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + leal 20(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 40(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl 28(%esi), %edi + movl (%esi), %ebx + movl 4(%esi), %eax + addl 20(%esi), %ebx + movl %ebx, -148(%ebp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + adcl 8(%esi), %edi + movl %edi, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + addl 20(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + adcl 24(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 28(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl 32(%esi), %eax + adcl 12(%esi), %eax + movl 36(%esi), %ecx + adcl 16(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, -124(%ebp) # 4-byte Spill + jb .LBB144_2 +# BB#1: + xorl %edi, %edi + movl $0, -124(%ebp) # 4-byte Folded Spill +.LBB144_2: + movl %edi, -136(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl %esi, %ebx + movl 36(%ebx), %esi + movl 32(%ebx), %edi + movl -96(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%ebx), %edi + movl %edi, -116(%ebp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, -144(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl %eax, -104(%ebp) # 4-byte Spill + movl -160(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -96(%ebp) # 4-byte Spill + movl -152(%ebp), %ebx # 4-byte Reload + movl %ebx, -100(%ebp) # 4-byte Spill + jb .LBB144_4 +# BB#3: + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -104(%ebp) # 4-byte Folded Spill + movl $0, -108(%ebp) # 4-byte Folded Spill + movl $0, -96(%ebp) # 4-byte Folded Spill + movl $0, -100(%ebp) # 4-byte Folded Spill +.LBB144_4: + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -132(%ebp), %edi # 4-byte Reload + movl %edi, -68(%ebp) + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -64(%ebp) + movl %ebx, -92(%ebp) + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl %edx, -84(%ebp) + movl %eax, -80(%ebp) + movl %ecx, -76(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl -144(%ebp), %ebx # 4-byte Reload + movl %ebx, -56(%ebp) + movl -156(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB144_6 +# BB#5: + movl $0, %ebx + movl $0, %eax + movl $0, %edi +.LBB144_6: + movl %eax, -116(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -92(%ebp), %ecx + movl %ecx, 8(%esp) + leal -72(%ebp), %ecx + movl %ecx, 4(%esp) + leal -52(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -124(%ebp), %eax # 4-byte Reload + addl %eax, -100(%ebp) # 4-byte Folded Spill + adcl %edi, -96(%ebp) # 4-byte Folded Spill + movl -108(%ebp), %esi # 4-byte Reload + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -116(%ebp), %eax # 4-byte Reload + adcl %eax, -104(%ebp) # 4-byte Folded Spill + movl -112(%ebp), %edi # 4-byte Reload + adcl %ebx, %edi + sbbl %eax, %eax + andl $1, %eax + movl %eax, -120(%ebp) # 4-byte Spill + andl $1, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl -100(%ebp), %eax # 4-byte Reload + addl -32(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + adcl -24(%ebp), %esi + movl %esi, -108(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl %eax, -116(%ebp) # 4-byte Folded Spill + movl -52(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl -48(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -44(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -40(%ebp), %edx + sbbl 12(%esi), %edx + movl -36(%ebp), %edi + sbbl 16(%esi), %edi + movl 20(%esi), %eax + movl %eax, -124(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -128(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + subl %eax, %ecx + movl 44(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 48(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + sbbl %eax, -120(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 56(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + addl -124(%ebp), %ecx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 20(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 24(%esi) + adcl -136(%ebp), %edx # 4-byte Folded Reload + movl %eax, 28(%esi) + adcl -140(%ebp), %edi # 4-byte Folded Reload + movl %edx, 32(%esi) + movl -100(%ebp), %eax # 4-byte Reload + adcl -160(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -96(%ebp), %ecx # 4-byte Reload + adcl -164(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl -108(%ebp), %eax # 4-byte Reload + adcl -168(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -104(%ebp), %ecx # 4-byte Reload + adcl -172(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -112(%ebp), %edx # 4-byte Reload + adcl -176(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl -180(%ebp), %eax # 4-byte Folded Reload + movl %edx, 56(%esi) + movl %eax, 60(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 64(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 72(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 76(%esi) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end144: + .size mcl_fpDbl_mulPre10Lbmi2, .Lfunc_end144-mcl_fpDbl_mulPre10Lbmi2 + + .globl mcl_fpDbl_sqrPre10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre10Lbmi2,@function +mcl_fpDbl_sqrPre10Lbmi2: # @mcl_fpDbl_sqrPre10Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L145$pb +.L145$pb: + popl %ebx +.Ltmp16: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 40(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl 36(%edi), %eax + movl 32(%edi), %ebx + movl 28(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 20(%edi), %ecx + adcl 24(%edi), %edx + adcl 8(%edi), %esi + adcl 12(%edi), %ebx + movl %ebx, -124(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -108(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -104(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -96(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + sbbl %ebx, %ebx + movl %ebx, -116(%ebp) # 4-byte Spill + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_1 +# BB#2: + movl $0, -112(%ebp) # 4-byte Folded Spill + jmp .LBB145_3 +.LBB145_1: + leal (%ecx,%ecx), %edi + movl %edi, -112(%ebp) # 4-byte Spill +.LBB145_3: + movl -96(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + movl -124(%ebp), %edi # 4-byte Reload + jb .LBB145_4 +# BB#5: + movl $0, -96(%ebp) # 4-byte Folded Spill + jmp .LBB145_6 +.LBB145_4: + movl %edx, %ebx + shldl $1, %ecx, %ebx + movl %ebx, -96(%ebp) # 4-byte Spill +.LBB145_6: + movl -100(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_7 +# BB#8: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB145_9 +.LBB145_7: + movl %esi, %ebx + shldl $1, %edx, %ebx + movl %ebx, -100(%ebp) # 4-byte Spill +.LBB145_9: + movl -104(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_10 +# BB#11: + movl $0, -104(%ebp) # 4-byte Folded Spill + jmp .LBB145_12 +.LBB145_10: + movl %edi, %ebx + shldl $1, %esi, %ebx + movl %ebx, -104(%ebp) # 4-byte Spill +.LBB145_12: + movl -108(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_13 +# BB#14: + movl $0, -108(%ebp) # 4-byte Folded Spill + jmp .LBB145_15 +.LBB145_13: + movl %eax, %ebx + shldl $1, %edi, %ebx + movl %ebx, -108(%ebp) # 4-byte Spill +.LBB145_15: + movl %ecx, -72(%ebp) + movl %edx, -68(%ebp) + movl %esi, -64(%ebp) + movl %edi, -60(%ebp) + movl %eax, -56(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl %esi, -84(%ebp) + movl %edi, -80(%ebp) + movl %eax, -76(%ebp) + movl -128(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_16 +# BB#17: + movl $0, -124(%ebp) # 4-byte Folded Spill + jmp .LBB145_18 +.LBB145_16: + shrl $31, %eax + movl %eax, -124(%ebp) # 4-byte Spill +.LBB145_18: + leal -52(%ebp), %eax + movl %eax, (%esp) + leal -72(%ebp), %eax + movl %eax, 4(%esp) + leal -92(%ebp), %eax + movl %eax, 8(%esp) + movl -116(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -120(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5Lbmi2@PLT + movl -112(%ebp), %edi # 4-byte Reload + addl -32(%ebp), %edi + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -100(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -108(%ebp) # 4-byte Spill + adcl -124(%ebp), %esi # 4-byte Folded Reload + movl -52(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -48(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -44(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -36(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 40(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 44(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 48(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 56(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 68(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -124(%ebp), %edx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 20(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -132(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%eax) + adcl -136(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 28(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -140(%ebp), %edx # 4-byte Folded Reload + movl %edi, 32(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -160(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -96(%ebp), %edx # 4-byte Reload + adcl -164(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 40(%eax) + movl -100(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -104(%ebp), %edx # 4-byte Reload + adcl -172(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -108(%ebp), %ecx # 4-byte Reload + adcl -176(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 52(%eax) + adcl -180(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 56(%eax) + movl %esi, 60(%eax) + movl -144(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 64(%eax) + movl -148(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 68(%eax) + movl -152(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 72(%eax) + movl -156(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end145: + .size mcl_fpDbl_sqrPre10Lbmi2, .Lfunc_end145-mcl_fpDbl_sqrPre10Lbmi2 + + .globl mcl_fp_mont10Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont10Lbmi2,@function +mcl_fp_mont10Lbmi2: # @mcl_fp_mont10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1036, %esp # imm = 0x40C + calll .L146$pb +.L146$pb: + popl %ebx +.Ltmp17: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx + movl 1068(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 992(%esp), %edi + movl 996(%esp), %ebp + movl %edi, %eax + imull %esi, %eax + movl 1032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1028(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1024(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1020(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1016(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1000(%esp), %esi + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + addl 944(%esp), %edi + adcl 948(%esp), %ebp + adcl 952(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1064(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 896(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + addl 896(%esp), %ebp + adcl 900(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 936(%esp), %edi + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 848(%esp), %ebp + adcl 852(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 876(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 880(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %edi + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + addl 800(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 836(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1068(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 752(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 780(%esp), %esi + movl 76(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 728(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 744(%esp), %edi + sbbl %esi, %esi + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + movl %esi, %ecx + movl 44(%esp), %eax # 4-byte Reload + addl 656(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 696(%esp), %edi + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 608(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 624(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 640(%esp), %esi + adcl 644(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %ecx + movl 40(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 592(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %edi + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 520(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %eax + addl 464(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 472(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 44(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 504(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1060(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 432(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 444(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 368(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 380(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 384(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 320(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 272(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 312(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl 1064(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl %edi, %ecx + addl 224(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 264(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + addl 176(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 192(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1064(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 128(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + adcl 144(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + sbbl %esi, %esi + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + addl 80(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 92(%esp), %ebx + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edx, %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 120(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl $0, %esi + movl 1068(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + sbbl 20(%edx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 68(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl 36(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB146_2 +# BB#1: + movl %ecx, 48(%esp) # 4-byte Spill +.LBB146_2: + movl %esi, %ecx + testb %cl, %cl + movl 76(%esp), %esi # 4-byte Reload + jne .LBB146_4 +# BB#3: + movl %eax, %esi +.LBB146_4: + movl 1056(%esp), %eax + movl %esi, (%eax) + movl 60(%esp), %edi # 4-byte Reload + jne .LBB146_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB146_6: + movl %edi, 4(%eax) + jne .LBB146_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB146_8: + movl %ebx, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB146_10 +# BB#9: + movl 24(%esp), %ebp # 4-byte Reload +.LBB146_10: + movl %ebp, 12(%eax) + jne .LBB146_12 +# BB#11: + movl 28(%esp), %ecx # 4-byte Reload +.LBB146_12: + movl %ecx, 16(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB146_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB146_14: + movl %ecx, 20(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB146_16 +# BB#15: + movl 56(%esp), %ecx # 4-byte Reload +.LBB146_16: + movl %ecx, 24(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB146_18 +# BB#17: + movl 64(%esp), %ecx # 4-byte Reload +.LBB146_18: + movl %ecx, 32(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB146_20 +# BB#19: + movl %edx, %ecx +.LBB146_20: + movl %ecx, 36(%eax) + addl $1036, %esp # imm = 0x40C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end146: + .size mcl_fp_mont10Lbmi2, .Lfunc_end146-mcl_fp_mont10Lbmi2 + + .globl mcl_fp_montNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF10Lbmi2,@function +mcl_fp_montNF10Lbmi2: # @mcl_fp_montNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1020, %esp # imm = 0x3FC + calll .L147$pb +.L147$pb: + popl %ebx +.Ltmp18: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx + movl 1052(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 976(%esp), %edi + movl 980(%esp), %esi + movl %edi, %eax + imull %ebp, %eax + movl 1016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 996(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 992(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 988(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 984(%esp), %ebp + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 928(%esp), %edi + adcl 932(%esp), %esi + adcl 936(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 952(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 920(%esp), %ecx + addl 880(%esp), %esi + adcl 884(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 832(%esp), %esi + adcl 836(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 848(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 1048(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 824(%esp), %ecx + addl 784(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 796(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 736(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 760(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 764(%esp), %ebp + movl 60(%esp), %esi # 4-byte Reload + adcl 768(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 728(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + addl 688(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 712(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 716(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + movl 32(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1052(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + addl 640(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 672(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 680(%esp), %esi + movl 1048(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 632(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 604(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 628(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 544(%esp), %esi + movl 24(%esp), %edi # 4-byte Reload + adcl 548(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 552(%esp), %esi + movl 36(%esp), %ebp # 4-byte Reload + adcl 556(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 536(%esp), %edx + addl 496(%esp), %edi + adcl 500(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 448(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 464(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 480(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 1048(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 440(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 400(%esp), %ecx + adcl 404(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 408(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 412(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 416(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 420(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 424(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 428(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 432(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 436(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 352(%esp), %esi + adcl 356(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %esi + adcl 368(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 344(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 316(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 324(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 24(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 256(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 272(%esp), %edi + adcl 276(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 248(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 160(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 176(%esp), %edi + movl 28(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 192(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 152(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 124(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 144(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 64(%esp), %ebp + movl %edi, %ebp + movl 60(%esp), %eax # 4-byte Reload + movl 32(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + adcl 80(%esp), %ebp + movl 44(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 96(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %edx + movl 1052(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ecx + movl %ebx, %eax + sbbl 8(%edi), %eax + movl %ebp, %esi + sbbl 12(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 16(%edi), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + sbbl 20(%edi), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 24(%edi), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + sarl $31, %edi + testl %edi, %edi + movl 60(%esp), %edi # 4-byte Reload + js .LBB147_2 +# BB#1: + movl %edx, %edi +.LBB147_2: + movl 1040(%esp), %edx + movl %edi, (%edx) + movl 52(%esp), %edi # 4-byte Reload + js .LBB147_4 +# BB#3: + movl %ecx, %edi +.LBB147_4: + movl %edi, 4(%edx) + js .LBB147_6 +# BB#5: + movl %eax, %ebx +.LBB147_6: + movl %ebx, 8(%edx) + js .LBB147_8 +# BB#7: + movl 4(%esp), %ebp # 4-byte Reload +.LBB147_8: + movl %ebp, 12(%edx) + movl 44(%esp), %esi # 4-byte Reload + movl 24(%esp), %eax # 4-byte Reload + js .LBB147_10 +# BB#9: + movl 8(%esp), %esi # 4-byte Reload +.LBB147_10: + movl %esi, 16(%edx) + js .LBB147_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB147_12: + movl %eax, 20(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB147_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB147_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB147_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB147_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB147_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB147_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB147_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB147_20: + movl %eax, 36(%edx) + addl $1020, %esp # imm = 0x3FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end147: + .size mcl_fp_montNF10Lbmi2, .Lfunc_end147-mcl_fp_montNF10Lbmi2 + + .globl mcl_fp_montRed10Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed10Lbmi2,@function +mcl_fp_montRed10Lbmi2: # @mcl_fp_montRed10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $604, %esp # imm = 0x25C + calll .L148$pb +.L148$pb: + popl %eax +.Ltmp19: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 632(%esp), %edx + movl -4(%edx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 628(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 68(%esp) # 4-byte Spill + imull %esi, %ebx + movl 76(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%ecx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %edi + movl 12(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 560(%esp), %ecx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 56(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + adcl 568(%esp), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 576(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 580(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 512(%esp), %esi + movl 4(%esp), %edx # 4-byte Reload + adcl 516(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 536(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 464(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 492(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 52(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + movl 60(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 632(%esp), %eax + movl %eax, %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 368(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %edi, %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 320(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 352(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 272(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 288(%esp), %ebp + adcl 292(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 312(%esp), %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 96(%esp), %eax # 4-byte Reload + addl 224(%esp), %eax + movl 100(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl 92(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 240(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 260(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 264(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 68(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 176(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl 112(%esp), %edi # 4-byte Reload + adcl 184(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 128(%esp), %esi + movl %edi, %eax + adcl 132(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %eax, %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 140(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + adcl 144(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %ebp, %edx + movl 120(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + adcl 164(%esp), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 96(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB148_2 +# BB#1: + movl %edx, 80(%esp) # 4-byte Spill +.LBB148_2: + testb %al, %al + movl 112(%esp), %edx # 4-byte Reload + jne .LBB148_4 +# BB#3: + movl %edi, %edx +.LBB148_4: + movl 624(%esp), %edi + movl %edx, (%edi) + movl 108(%esp), %edx # 4-byte Reload + jne .LBB148_6 +# BB#5: + movl %ecx, 124(%esp) # 4-byte Spill +.LBB148_6: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edi) + movl 116(%esp), %ecx # 4-byte Reload + jne .LBB148_8 +# BB#7: + movl %esi, %ecx +.LBB148_8: + movl %ecx, 8(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 76(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB148_10 +# BB#9: + movl 64(%esp), %eax # 4-byte Reload +.LBB148_10: + movl %eax, 16(%edi) + movl 84(%esp), %eax # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + jne .LBB148_12 +# BB#11: + movl 68(%esp), %ebp # 4-byte Reload +.LBB148_12: + movl %ebp, 20(%edi) + movl 88(%esp), %ebx # 4-byte Reload + jne .LBB148_14 +# BB#13: + movl 72(%esp), %ebx # 4-byte Reload +.LBB148_14: + movl %ebx, 24(%edi) + jne .LBB148_16 +# BB#15: + movl 92(%esp), %edx # 4-byte Reload +.LBB148_16: + movl %edx, 28(%edi) + jne .LBB148_18 +# BB#17: + movl 100(%esp), %ecx # 4-byte Reload +.LBB148_18: + movl %ecx, 32(%edi) + jne .LBB148_20 +# BB#19: + movl 96(%esp), %eax # 4-byte Reload +.LBB148_20: + movl %eax, 36(%edi) + addl $604, %esp # imm = 0x25C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end148: + .size mcl_fp_montRed10Lbmi2, .Lfunc_end148-mcl_fp_montRed10Lbmi2 + + .globl mcl_fp_addPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre10Lbmi2,@function +mcl_fp_addPre10Lbmi2: # @mcl_fp_addPre10Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl %edx, 28(%ebx) + movl %esi, 32(%ebx) + movl 36(%eax), %eax + movl 36(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 36(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end149: + .size mcl_fp_addPre10Lbmi2, .Lfunc_end149-mcl_fp_addPre10Lbmi2 + + .globl mcl_fp_subPre10Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre10Lbmi2,@function +mcl_fp_subPre10Lbmi2: # @mcl_fp_subPre10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 28(%ebp) + movl %edi, 32(%ebp) + movl 36(%edx), %edx + movl 36(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 36(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end150: + .size mcl_fp_subPre10Lbmi2, .Lfunc_end150-mcl_fp_subPre10Lbmi2 + + .globl mcl_fp_shr1_10Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_10Lbmi2,@function +mcl_fp_shr1_10Lbmi2: # @mcl_fp_shr1_10Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 32(%esi) + shrl %eax + movl %eax, 36(%esi) + popl %esi + retl +.Lfunc_end151: + .size mcl_fp_shr1_10Lbmi2, .Lfunc_end151-mcl_fp_shr1_10Lbmi2 + + .globl mcl_fp_add10Lbmi2 + .align 16, 0x90 + .type mcl_fp_add10Lbmi2,@function +mcl_fp_add10Lbmi2: # @mcl_fp_add10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 52(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 48(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebx), %esi + adcl 24(%edi), %esi + movl 28(%ebx), %ebp + adcl 28(%edi), %ebp + movl 32(%ebx), %edx + adcl 32(%edi), %edx + movl 36(%ebx), %ecx + adcl 36(%edi), %ecx + movl 44(%esp), %edi + movl (%esp), %ebx # 4-byte Reload + movl %ebx, (%edi) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 8(%edi) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 16(%edi) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + movl %esi, 24(%edi) + movl %ebp, 28(%edi) + movl %edx, 32(%edi) + movl %ecx, 36(%edi) + sbbl %eax, %eax + andl $1, %eax + movl 56(%esp), %edi + subl (%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 4(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 8(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + sbbl 28(%edi), %ebp + sbbl 32(%edi), %edx + sbbl 36(%edi), %ecx + sbbl $0, %eax + testb $1, %al + jne .LBB152_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 44(%esp), %ebx + movl %edi, (%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl %esi, 24(%ebx) + movl %ebp, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) +.LBB152_2: # %carry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end152: + .size mcl_fp_add10Lbmi2, .Lfunc_end152-mcl_fp_add10Lbmi2 + + .globl mcl_fp_addNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF10Lbmi2,@function +mcl_fp_addNF10Lbmi2: # @mcl_fp_addNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %esi + movl 96(%esp), %edx + addl (%edx), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%edx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %ebx + movl 12(%ecx), %eax + movl 8(%ecx), %esi + adcl 8(%edx), %esi + adcl 12(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + adcl 20(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 24(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 28(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 32(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + adcl 36(%edx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 104(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + subl (%edi), %edx + movl 56(%esp), %esi # 4-byte Reload + sbbl 4(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %ecx, %esi + sbbl 8(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebp + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebx + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + movl 52(%esp), %esi # 4-byte Reload + sarl $31, %edi + testl %edi, %edi + js .LBB153_2 +# BB#1: + movl %edx, %esi +.LBB153_2: + movl 92(%esp), %edx + movl %esi, (%edx) + movl 56(%esp), %esi # 4-byte Reload + js .LBB153_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload +.LBB153_4: + movl %esi, 4(%edx) + movl %ebp, %edi + movl 40(%esp), %esi # 4-byte Reload + js .LBB153_6 +# BB#5: + movl 4(%esp), %ecx # 4-byte Reload +.LBB153_6: + movl %ecx, 8(%edx) + movl %ebx, %ecx + movl 44(%esp), %ebp # 4-byte Reload + js .LBB153_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB153_8: + movl %esi, 12(%edx) + movl 68(%esp), %esi # 4-byte Reload + movl 48(%esp), %ebx # 4-byte Reload + js .LBB153_10 +# BB#9: + movl 12(%esp), %ebp # 4-byte Reload +.LBB153_10: + movl %ebp, 16(%edx) + js .LBB153_12 +# BB#11: + movl 16(%esp), %ebx # 4-byte Reload +.LBB153_12: + movl %ebx, 20(%edx) + js .LBB153_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB153_14: + movl %edi, 24(%edx) + js .LBB153_16 +# BB#15: + movl 24(%esp), %esi # 4-byte Reload +.LBB153_16: + movl %esi, 28(%edx) + js .LBB153_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB153_18: + movl %ecx, 32(%edx) + js .LBB153_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB153_20: + movl %eax, 36(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end153: + .size mcl_fp_addNF10Lbmi2, .Lfunc_end153-mcl_fp_addNF10Lbmi2 + + .globl mcl_fp_sub10Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub10Lbmi2,@function +mcl_fp_sub10Lbmi2: # @mcl_fp_sub10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 60(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 20(%esi), %edx + sbbl 20(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 24(%esi), %ecx + sbbl 24(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %ebp + sbbl 32(%edi), %ebp + movl 36(%esi), %esi + sbbl 36(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 52(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl %edx, 20(%ebx) + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl %ebp, 32(%ebx) + movl %esi, 36(%ebx) + je .LBB154_2 +# BB#1: # %carry + movl %esi, %edi + movl 64(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %ebp, %eax + movl %eax, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) +.LBB154_2: # %nocarry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end154: + .size mcl_fp_sub10Lbmi2, .Lfunc_end154-mcl_fp_sub10Lbmi2 + + .globl mcl_fp_subNF10Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF10Lbmi2,@function +mcl_fp_subNF10Lbmi2: # @mcl_fp_subNF10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %eax + movl 36(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %edx + movl 84(%esp), %ecx + subl (%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl 24(%eax), %ebx + movl 20(%eax), %ebp + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 12(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 16(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + sbbl 28(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 36(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %ecx + adcl %ecx, %ecx + movl %esi, %ebx + shrl $31, %ebx + orl %edx, %ebx + movl 88(%esp), %edi + movl 20(%edi), %edx + andl %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 12(%edi), %edx + andl %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + andl 4(%edi), %ecx + movl 16(%edi), %edx + andl %ebx, %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%edi), %edx + andl %ebx, %edx + andl (%edi), %ebx + movl 36(%edi), %esi + andl %eax, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 32(%edi), %ebp + andl %eax, %ebp + movl 28(%edi), %esi + andl %eax, %esi + andl 24(%edi), %eax + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %edi + movl %ebx, (%edi) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %edx, 8(%edi) + movl (%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %edx, 16(%edi) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 20(%edi) + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %eax, 24(%edi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %esi, 28(%edi) + movl %ebp, 32(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%edi) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end155: + .size mcl_fp_subNF10Lbmi2, .Lfunc_end155-mcl_fp_subNF10Lbmi2 + + .globl mcl_fpDbl_add10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add10Lbmi2,@function +mcl_fpDbl_add10Lbmi2: # @mcl_fpDbl_add10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %edx + movl 96(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 92(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 48(%edx), %ebp + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%edx), %ebx + movl %ecx, 32(%eax) + movl 40(%edi), %ecx + adcl %ebx, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%edx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %eax + adcl %ebx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%edi), %eax + adcl %ebp, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl 56(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl 60(%edi), %ecx + adcl %eax, %ecx + movl 64(%edx), %esi + movl 64(%edi), %eax + adcl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 68(%edx), %ebx + movl 68(%edi), %esi + adcl %ebx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%edx), %ebx + movl 72(%edi), %ebp + adcl %ebx, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 76(%edx), %edx + movl 76(%edi), %edi + adcl %edx, %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 104(%esp), %ebx + movl 64(%esp), %edi # 4-byte Reload + subl (%ebx), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 4(%ebx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + sbbl 8(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ecx, %edi + sbbl 20(%ebx), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl %ebp, %eax + movl 36(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %edi + sbbl 36(%ebx), %edi + sbbl $0, %edx + andl $1, %edx + jne .LBB156_2 +# BB#1: + movl %edi, %ebp +.LBB156_2: + testb %dl, %dl + movl 64(%esp), %edx # 4-byte Reload + movl 60(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB156_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload +.LBB156_4: + movl 92(%esp), %eax + movl %edx, 40(%eax) + movl 68(%esp), %edx # 4-byte Reload + movl %edx, 44(%eax) + movl %ebx, 48(%eax) + movl %edi, 52(%eax) + movl %esi, 56(%eax) + movl %ecx, 60(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB156_6 +# BB#5: + movl 24(%esp), %ecx # 4-byte Reload +.LBB156_6: + movl %ecx, 64(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB156_8 +# BB#7: + movl 28(%esp), %edx # 4-byte Reload +.LBB156_8: + movl %edx, 68(%eax) + jne .LBB156_10 +# BB#9: + movl 32(%esp), %ecx # 4-byte Reload +.LBB156_10: + movl %ecx, 72(%eax) + movl %ebp, 76(%eax) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end156: + .size mcl_fpDbl_add10Lbmi2, .Lfunc_end156-mcl_fpDbl_add10Lbmi2 + + .globl mcl_fpDbl_sub10Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub10Lbmi2,@function +mcl_fpDbl_sub10Lbmi2: # @mcl_fpDbl_sub10Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %edx + movl 4(%ebp), %esi + movl 88(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %esi + movl 8(%ebp), %edi + sbbl 8(%eax), %edi + movl 80(%esp), %ecx + movl %edx, (%ecx) + movl 12(%ebp), %edx + sbbl 12(%eax), %edx + movl %esi, 4(%ecx) + movl 16(%ebp), %esi + sbbl 16(%eax), %esi + movl %edi, 8(%ecx) + movl 20(%eax), %edi + movl %edx, 12(%ecx) + movl 20(%ebp), %edx + sbbl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ecx) + movl 24(%ebp), %esi + sbbl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ecx) + movl 28(%ebp), %edx + sbbl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ecx) + movl 32(%ebp), %esi + sbbl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ecx) + movl 36(%ebp), %edx + sbbl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ecx) + movl 40(%ebp), %esi + sbbl %edi, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %edx, 36(%ecx) + movl 44(%ebp), %edx + sbbl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl 48(%ebp), %esi + sbbl %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl 52(%ebp), %esi + sbbl %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%eax), %edx + movl 56(%ebp), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 60(%eax), %edx + movl 60(%ebp), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 64(%eax), %edx + movl 64(%ebp), %esi + sbbl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%eax), %edx + movl 68(%ebp), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 72(%eax), %edx + movl 72(%ebp), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 76(%eax), %eax + movl 76(%ebp), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 92(%esp), %esi + jne .LBB157_1 +# BB#2: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB157_3 +.LBB157_1: + movl 36(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill +.LBB157_3: + testb %al, %al + jne .LBB157_4 +# BB#5: + movl $0, 8(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB157_6 +.LBB157_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB157_6: + jne .LBB157_7 +# BB#8: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB157_9 +.LBB157_7: + movl 32(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB157_9: + jne .LBB157_10 +# BB#11: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB157_12 +.LBB157_10: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB157_12: + jne .LBB157_13 +# BB#14: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB157_15 +.LBB157_13: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB157_15: + jne .LBB157_16 +# BB#17: + movl $0, %ebp + jmp .LBB157_18 +.LBB157_16: + movl 20(%esi), %ebp +.LBB157_18: + jne .LBB157_19 +# BB#20: + movl $0, %eax + jmp .LBB157_21 +.LBB157_19: + movl 16(%esi), %eax +.LBB157_21: + jne .LBB157_22 +# BB#23: + movl $0, %edx + jmp .LBB157_24 +.LBB157_22: + movl 12(%esi), %edx +.LBB157_24: + jne .LBB157_25 +# BB#26: + xorl %esi, %esi + jmp .LBB157_27 +.LBB157_25: + movl 8(%esi), %esi +.LBB157_27: + addl 28(%esp), %ebx # 4-byte Folded Reload + movl 8(%esp), %edi # 4-byte Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl %eax, 72(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%ecx) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end157: + .size mcl_fpDbl_sub10Lbmi2, .Lfunc_end157-mcl_fpDbl_sub10Lbmi2 + + .align 16, 0x90 + .type .LmulPv352x32,@function +.LmulPv352x32: # @mulPv352x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl %edx, %eax + movl 52(%esp), %edx + mulxl 4(%eax), %ebx, %esi + mulxl (%eax), %edi, %ebp + movl %edi, 28(%esp) # 4-byte Spill + addl %ebx, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + mulxl 8(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 20(%esp) # 4-byte Spill + mulxl 12(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 16(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %ebx + adcl %edi, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 20(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + mulxl 24(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 28(%eax), %edi, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 32(%eax), %esi, %ebp + movl %ebp, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + mulxl 36(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl %ebx, 24(%ecx) + movl %edi, 28(%ecx) + movl %esi, 32(%ecx) + movl %edx, 36(%ecx) + movl 52(%esp), %edx + mulxl 40(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + adcl $0, %edx + movl %edx, 44(%ecx) + movl %ecx, %eax + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end158: + .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 + + .globl mcl_fp_mulUnitPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre11Lbmi2,@function +mcl_fp_mulUnitPre11Lbmi2: # @mcl_fp_mulUnitPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L159$pb +.L159$pb: + popl %ebx +.Ltmp20: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv352x32 + movl 84(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end159: + .size mcl_fp_mulUnitPre11Lbmi2, .Lfunc_end159-mcl_fp_mulUnitPre11Lbmi2 + + .globl mcl_fpDbl_mulPre11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre11Lbmi2,@function +mcl_fpDbl_mulPre11Lbmi2: # @mcl_fpDbl_mulPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L160$pb +.L160$pb: + popl %eax +.Ltmp21: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %eax, %ebx + movl 648(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 644(%esp), %edx + movl %edx, %ebp + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %esi + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl 648(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %ebp, %edx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 428(%esp), %ecx + movl 432(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 396(%esp), %ebp + movl 392(%esp), %edi + movl 388(%esp), %esi + movl 380(%esp), %ecx + movl 384(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 348(%esp), %ebx + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 300(%esp), %ebp + movl 296(%esp), %edi + movl 292(%esp), %esi + movl 284(%esp), %ecx + movl 288(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %ebx + movl 248(%esp), %edi + movl 244(%esp), %esi + movl 236(%esp), %ecx + movl 240(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 648(%esp), %edi + movl 36(%edi), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 644(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end160: + .size mcl_fpDbl_mulPre11Lbmi2, .Lfunc_end160-mcl_fpDbl_mulPre11Lbmi2 + + .globl mcl_fpDbl_sqrPre11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre11Lbmi2,@function +mcl_fpDbl_sqrPre11Lbmi2: # @mcl_fpDbl_sqrPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L161$pb +.L161$pb: + popl %ebx +.Ltmp22: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %ebp + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl %esi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 448(%esp), %ebx + movl 444(%esp), %edi + movl 440(%esp), %esi + movl 436(%esp), %edx + movl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 396(%esp), %edi + movl 392(%esp), %esi + movl 388(%esp), %edx + movl 380(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 384(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 80(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 348(%esp), %ebp + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 260(%esp), %ebx + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %edi + movl 248(%esp), %esi + movl 244(%esp), %edx + movl 236(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 240(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 72(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end161: + .size mcl_fpDbl_sqrPre11Lbmi2, .Lfunc_end161-mcl_fpDbl_sqrPre11Lbmi2 + + .globl mcl_fp_mont11Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont11Lbmi2,@function +mcl_fp_mont11Lbmi2: # @mcl_fp_mont11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L162$pb +.L162$pb: + popl %ebx +.Ltmp23: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %edi + movl 1084(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + imull %ebp, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %ebp + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + adcl 1044(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + sbbl %edi, %edi + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl 56(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1024(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 936(%esp), %esi + adcl 940(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 964(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + addl 888(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 928(%esp), %esi + movl %esi, %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %eax + andl $1, %eax + addl 840(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 844(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 848(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 852(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 872(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + adcl 880(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 792(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 828(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 744(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 776(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 788(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 724(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 648(%esp), %ebp + movl 24(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + adcl 680(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + adcl $0, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 24(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 608(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 624(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %esi + movl %esi, %eax + addl 552(%esp), %edi + movl 28(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 560(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 580(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 584(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 504(%esp), %ecx + adcl 508(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 536(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %eax + addl 456(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 484(%esp), %edi + adcl 488(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 24(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + adcl 412(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + adcl 432(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 360(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 368(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + adcl 316(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 332(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 348(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %ecx + addl 264(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 300(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 304(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 252(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + andl $1, %ecx + addl 168(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 172(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 184(%esp), %ebp + movl 48(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + addl 120(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 136(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %esi + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %esi + movl 1164(%esp), %ebp + subl (%ebp), %eax + movl %ecx, %edx + sbbl 4(%ebp), %edx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + sbbl 12(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl 32(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, %ebp + sbbl $0, %esi + andl $1, %esi + jne .LBB162_2 +# BB#1: + movl %ebx, 28(%esp) # 4-byte Spill +.LBB162_2: + movl %esi, %ebx + testb %bl, %bl + movl 68(%esp), %ebx # 4-byte Reload + jne .LBB162_4 +# BB#3: + movl %eax, %ebx +.LBB162_4: + movl 1152(%esp), %eax + movl %ebx, (%eax) + movl 56(%esp), %edi # 4-byte Reload + jne .LBB162_6 +# BB#5: + movl %edx, %edi +.LBB162_6: + movl %edi, 4(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB162_8 +# BB#7: + movl %ecx, %edx +.LBB162_8: + movl %edx, 8(%eax) + jne .LBB162_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%esp) # 4-byte Spill +.LBB162_10: + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB162_12 +# BB#11: + movl 8(%esp), %ecx # 4-byte Reload +.LBB162_12: + movl %ecx, 16(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB162_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB162_14: + movl %ecx, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + jne .LBB162_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB162_16: + movl %ecx, 24(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + jne .LBB162_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB162_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB162_20 +# BB#19: + movl 60(%esp), %ecx # 4-byte Reload +.LBB162_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB162_22 +# BB#21: + movl %ebp, %ecx +.LBB162_22: + movl %ecx, 40(%eax) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end162: + .size mcl_fp_mont11Lbmi2, .Lfunc_end162-mcl_fp_mont11Lbmi2 + + .globl mcl_fp_montNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF11Lbmi2,@function +mcl_fp_montNF11Lbmi2: # @mcl_fp_montNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L163$pb +.L163$pb: + popl %ebx +.Ltmp24: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %ebp + movl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %edi + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + adcl 1044(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 1048(%esp), %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1028(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 996(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl 1000(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + addl 936(%esp), %ebp + adcl 940(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 960(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 980(%esp), %ebp + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 932(%esp), %eax + addl 888(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 900(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl 908(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 928(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 840(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 860(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 884(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 836(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 792(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 800(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 812(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 816(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 820(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 832(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 744(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 768(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 780(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 784(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 740(%esp), %edx + movl 40(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 712(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 716(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 720(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 732(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 736(%esp), %esi + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 648(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 672(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 688(%esp), %esi + movl %esi, %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 644(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 608(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 612(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 616(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 620(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 624(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 628(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 632(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 552(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 1160(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 548(%esp), %edx + movl 32(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 512(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 456(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 480(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 488(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 452(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 52(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 428(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 360(%esp), %esi + adcl 364(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 372(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 332(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 264(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 284(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 292(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 260(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 240(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 244(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 168(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 176(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 180(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 204(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 164(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 120(%esp), %ecx + adcl 124(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 136(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 84(%esp), %edi + adcl 88(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edx + movl 1164(%esp), %ebx + subl (%ebx), %edx + movl %ecx, %esi + sbbl 4(%ebx), %esi + movl %edi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl 40(%esp), %ebp # 4-byte Reload + sbbl 16(%ebx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 20(%ebx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 28(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl 36(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + sbbl 40(%ebx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ebp, %ebx + sarl $31, %ebx + testl %ebx, %ebx + movl 68(%esp), %ebx # 4-byte Reload + js .LBB163_2 +# BB#1: + movl %edx, %ebx +.LBB163_2: + movl 1152(%esp), %edx + movl %ebx, (%edx) + movl 60(%esp), %ebp # 4-byte Reload + js .LBB163_4 +# BB#3: + movl %esi, %ebp +.LBB163_4: + movl %ebp, 4(%edx) + js .LBB163_6 +# BB#5: + movl %ecx, %edi +.LBB163_6: + movl %edi, 8(%edx) + movl 44(%esp), %ecx # 4-byte Reload + js .LBB163_8 +# BB#7: + movl %eax, %ecx +.LBB163_8: + movl %ecx, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB163_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB163_10: + movl %eax, 16(%edx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB163_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB163_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB163_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB163_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB163_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB163_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB163_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB163_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB163_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB163_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB163_22 +# BB#21: + movl 48(%esp), %eax # 4-byte Reload +.LBB163_22: + movl %eax, 40(%edx) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end163: + .size mcl_fp_montNF11Lbmi2, .Lfunc_end163-mcl_fp_montNF11Lbmi2 + + .globl mcl_fp_montRed11Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed11Lbmi2,@function +mcl_fp_montRed11Lbmi2: # @mcl_fp_montRed11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $668, %esp # imm = 0x29C + calll .L164$pb +.L164$pb: + popl %eax +.Ltmp25: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 696(%esp), %edx + movl -4(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + imull %esi, %ebx + movl 84(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 28(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 24(%ecx), %ebp + movl 20(%ecx), %edi + movl 16(%ecx), %esi + movl 12(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 616(%esp), %ecx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 620(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 632(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 640(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 568(%esp), %esi + movl 56(%esp), %edx # 4-byte Reload + adcl 572(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 520(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 120(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 472(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 424(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %esi + movl %esi, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 404(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 696(%esp), %eax + movl %eax, %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl 88(%esp), %ebp # 4-byte Reload + adcl 284(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 304(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 232(%esp), %ebp + movl 84(%esp), %ebp # 4-byte Reload + adcl 236(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 276(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl 132(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, %ebp + movl 68(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 136(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl %eax, %edi + movl 128(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + adcl 180(%esp), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + movl %ebp, %ebx + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + jne .LBB164_2 +# BB#1: + movl %esi, 112(%esp) # 4-byte Spill +.LBB164_2: + testb %bl, %bl + movl 132(%esp), %esi # 4-byte Reload + jne .LBB164_4 +# BB#3: + movl %edi, %esi +.LBB164_4: + movl 688(%esp), %edi + movl %esi, (%edi) + movl 104(%esp), %esi # 4-byte Reload + jne .LBB164_6 +# BB#5: + movl %edx, 128(%esp) # 4-byte Spill +.LBB164_6: + movl 128(%esp), %edx # 4-byte Reload + movl %edx, 4(%edi) + movl 116(%esp), %edx # 4-byte Reload + jne .LBB164_8 +# BB#7: + movl %ecx, %edx +.LBB164_8: + movl %edx, 8(%edi) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 92(%esp), %edx # 4-byte Reload + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB164_10 +# BB#9: + movl 64(%esp), %ecx # 4-byte Reload +.LBB164_10: + movl %ecx, 16(%edi) + movl 96(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB164_12 +# BB#11: + movl 68(%esp), %eax # 4-byte Reload +.LBB164_12: + movl %eax, 20(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB164_14 +# BB#13: + movl 72(%esp), %ebp # 4-byte Reload +.LBB164_14: + movl %ebp, 24(%edi) + jne .LBB164_16 +# BB#15: + movl 76(%esp), %esi # 4-byte Reload +.LBB164_16: + movl %esi, 28(%edi) + jne .LBB164_18 +# BB#17: + movl 84(%esp), %edx # 4-byte Reload +.LBB164_18: + movl %edx, 32(%edi) + jne .LBB164_20 +# BB#19: + movl 88(%esp), %ecx # 4-byte Reload +.LBB164_20: + movl %ecx, 36(%edi) + jne .LBB164_22 +# BB#21: + movl 100(%esp), %eax # 4-byte Reload +.LBB164_22: + movl %eax, 40(%edi) + addl $668, %esp # imm = 0x29C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end164: + .size mcl_fp_montRed11Lbmi2, .Lfunc_end164-mcl_fp_montRed11Lbmi2 + + .globl mcl_fp_addPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre11Lbmi2,@function +mcl_fp_addPre11Lbmi2: # @mcl_fp_addPre11Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl %esi, 32(%ebx) + movl %edx, 36(%ebx) + movl 40(%eax), %eax + movl 40(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end165: + .size mcl_fp_addPre11Lbmi2, .Lfunc_end165-mcl_fp_addPre11Lbmi2 + + .globl mcl_fp_subPre11Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre11Lbmi2,@function +mcl_fp_subPre11Lbmi2: # @mcl_fp_subPre11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 32(%ebp) + movl %esi, 36(%ebp) + movl 40(%edx), %edx + movl 40(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 40(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end166: + .size mcl_fp_subPre11Lbmi2, .Lfunc_end166-mcl_fp_subPre11Lbmi2 + + .globl mcl_fp_shr1_11Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_11Lbmi2,@function +mcl_fp_shr1_11Lbmi2: # @mcl_fp_shr1_11Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 32(%esi) + movl 40(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 36(%esi) + shrl %eax + movl %eax, 40(%esi) + popl %esi + retl +.Lfunc_end167: + .size mcl_fp_shr1_11Lbmi2, .Lfunc_end167-mcl_fp_shr1_11Lbmi2 + + .globl mcl_fp_add11Lbmi2 + .align 16, 0x90 + .type mcl_fp_add11Lbmi2,@function +mcl_fp_add11Lbmi2: # @mcl_fp_add11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 60(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 56(%esp), %esi + addl (%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl 16(%esi), %ecx + adcl 12(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 20(%esi), %eax + adcl 20(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 24(%esi), %eax + adcl 24(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%esi), %ebx + adcl 28(%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 32(%esi), %ecx + adcl 32(%edi), %ecx + movl 36(%esi), %eax + adcl 36(%edi), %eax + movl 40(%esi), %edx + adcl 40(%edi), %edx + movl 52(%esp), %esi + movl %ebp, (%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%esi) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%esi) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%esi) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%esi) + movl %ebx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edx, 40(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 64(%esp), %ebp + movl 4(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 20(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 12(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 8(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl (%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %ecx + sbbl 36(%ebp), %eax + sbbl 40(%ebp), %edx + movl %edx, %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB168_2 +# BB#1: # %nocarry + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%esi) + movl 28(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%esi) + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 20(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%esi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%esi) + movl (%esp), %edx # 4-byte Reload + movl %edx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edi, 40(%esi) +.LBB168_2: # %carry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end168: + .size mcl_fp_add11Lbmi2, .Lfunc_end168-mcl_fp_add11Lbmi2 + + .globl mcl_fp_addNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF11Lbmi2,@function +mcl_fp_addNF11Lbmi2: # @mcl_fp_addNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 104(%esp), %esi + addl (%esi), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ebx + movl 36(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 20(%edx), %ebp + movl 16(%edx), %edi + movl 12(%edx), %eax + movl 8(%edx), %ecx + adcl 8(%esi), %ecx + adcl 12(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 24(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 28(%esi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 32(%esi), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 36(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %edx + adcl 40(%esi), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 112(%esp), %ebx + movl 52(%esp), %esi # 4-byte Reload + subl (%ebx), %esi + movl 60(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl %edx, %ecx + sbbl 8(%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 28(%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + movl %edi, %ecx + movl %edi, %ebp + sbbl 36(%ebx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edi, %ebx + movl 52(%esp), %edi # 4-byte Reload + sarl $31, %ebx + testl %ebx, %ebx + js .LBB169_2 +# BB#1: + movl %esi, %edi +.LBB169_2: + movl 100(%esp), %esi + movl %edi, (%esi) + movl 60(%esp), %edi # 4-byte Reload + js .LBB169_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB169_4: + movl %edi, 4(%esi) + movl %eax, %edi + js .LBB169_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB169_6: + movl %edx, 8(%esi) + movl %ebp, %ecx + movl 72(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB169_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB169_8: + movl %eax, 12(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebp # 4-byte Reload + js .LBB169_10 +# BB#9: + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 48(%esp) # 4-byte Spill +.LBB169_10: + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + js .LBB169_12 +# BB#11: + movl 16(%esp), %ebp # 4-byte Reload +.LBB169_12: + movl %ebp, 20(%esi) + js .LBB169_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB169_14: + movl %edi, 24(%esi) + js .LBB169_16 +# BB#15: + movl 24(%esp), %eax # 4-byte Reload +.LBB169_16: + movl %eax, 28(%esi) + js .LBB169_18 +# BB#17: + movl 28(%esp), %edx # 4-byte Reload +.LBB169_18: + movl %edx, 32(%esi) + js .LBB169_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload +.LBB169_20: + movl %ecx, 36(%esi) + movl 56(%esp), %eax # 4-byte Reload + js .LBB169_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload +.LBB169_22: + movl %eax, 40(%esi) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end169: + .size mcl_fp_addNF11Lbmi2, .Lfunc_end169-mcl_fp_addNF11Lbmi2 + + .globl mcl_fp_sub11Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub11Lbmi2,@function +mcl_fp_sub11Lbmi2: # @mcl_fp_sub11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebp), %ebx + sbbl 12(%edi), %ebx + movl 16(%ebp), %eax + sbbl 16(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 20(%ebp), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%ebp), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%ebp), %edx + sbbl 28(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + sbbl 32(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 36(%ebp), %eax + sbbl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%ebp), %eax + sbbl 40(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 16(%esp), %esi # 4-byte Reload + movl $0, %ebx + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl %esi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %ebp, 12(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%ebx) + movl %ecx, %edi + movl %eax, 40(%ebx) + je .LBB170_2 +# BB#1: # %carry + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + movl %eax, %esi + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl %ebp, %eax + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl %ecx, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) +.LBB170_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end170: + .size mcl_fp_sub11Lbmi2, .Lfunc_end170-mcl_fp_sub11Lbmi2 + + .globl mcl_fp_subNF11Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF11Lbmi2,@function +mcl_fp_subNF11Lbmi2: # @mcl_fp_subNF11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 92(%esp), %edi + subl (%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 28(%eax), %ebx + movl 24(%eax), %ebp + movl 20(%eax), %esi + movl 16(%eax), %edx + movl 12(%eax), %ecx + movl 8(%eax), %eax + sbbl 8(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 24(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + sbbl 24(%edi), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %eax, %ecx + sarl $31, %ecx + movl %ecx, %edx + shldl $1, %eax, %edx + movl 96(%esp), %ebx + movl 4(%ebx), %eax + andl %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + andl (%ebx), %edx + movl 40(%ebx), %eax + andl %ecx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 36(%ebx), %eax + andl %ecx, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 32(%ebx), %eax + andl %ecx, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 28(%ebx), %eax + andl %ecx, %eax + movl %eax, (%esp) # 4-byte Spill + movl 24(%ebx), %ebp + andl %ecx, %ebp + rorxl $31, %ecx, %eax + andl 20(%ebx), %ecx + movl 16(%ebx), %edi + andl %eax, %edi + movl 12(%ebx), %esi + andl %eax, %esi + andl 8(%ebx), %eax + addl 40(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ebx # 4-byte Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 84(%esp), %ebx + movl %edx, (%ebx) + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 4(%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %eax, 8(%ebx) + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %esi, 12(%ebx) + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %edi, 16(%ebx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ecx, 20(%ebx) + movl (%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%ebx) + movl 4(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 12(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end171: + .size mcl_fp_subNF11Lbmi2, .Lfunc_end171-mcl_fp_subNF11Lbmi2 + + .globl mcl_fpDbl_add11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add11Lbmi2,@function +mcl_fpDbl_add11Lbmi2: # @mcl_fpDbl_add11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %ecx + movl 104(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 100(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 52(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %edx, 40(%eax) + movl 48(%edi), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%edi), %eax + adcl %ebp, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%edi), %eax + adcl %edx, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl 60(%edi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%edi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl 72(%ecx), %esi + movl 72(%edi), %eax + adcl %esi, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 76(%ecx), %ebx + movl 76(%edi), %esi + adcl %ebx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 84(%ecx), %ecx + movl 84(%edi), %edi + adcl %ecx, %edi + movl %edi, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 112(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 24(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 28(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 32(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %ebx, %eax + movl 40(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 40(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB172_2 +# BB#1: + movl %edi, %ebx +.LBB172_2: + testb %cl, %cl + movl 68(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + movl 60(%esp), %edi # 4-byte Reload + movl 56(%esp), %ebp # 4-byte Reload + jne .LBB172_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload +.LBB172_4: + movl 100(%esp), %eax + movl %ecx, 44(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl %ebp, 56(%eax) + movl %edi, 60(%eax) + movl %esi, 64(%eax) + movl %edx, 68(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl 44(%esp), %edx # 4-byte Reload + jne .LBB172_6 +# BB#5: + movl 28(%esp), %edx # 4-byte Reload +.LBB172_6: + movl %edx, 72(%eax) + movl 48(%esp), %edx # 4-byte Reload + jne .LBB172_8 +# BB#7: + movl 32(%esp), %edx # 4-byte Reload +.LBB172_8: + movl %edx, 76(%eax) + jne .LBB172_10 +# BB#9: + movl 36(%esp), %ecx # 4-byte Reload +.LBB172_10: + movl %ecx, 80(%eax) + movl %ebx, 84(%eax) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end172: + .size mcl_fpDbl_add11Lbmi2, .Lfunc_end172-mcl_fpDbl_add11Lbmi2 + + .globl mcl_fpDbl_sub11Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub11Lbmi2,@function +mcl_fpDbl_sub11Lbmi2: # @mcl_fpDbl_sub11Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %esi + movl 100(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %esi + movl 8(%edx), %edi + sbbl 8(%ebp), %edi + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%ebp), %eax + movl %esi, 4(%ecx) + movl 16(%edx), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %edi, %eax + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%edx), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%edx), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %edi, %eax + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%edx), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %edi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl %esi, 40(%ecx) + movl 48(%edx), %esi + sbbl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%edx), %esi + sbbl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%edx), %esi + sbbl %eax, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%edx), %esi + sbbl %eax, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%edx), %esi + sbbl %eax, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%edx), %esi + sbbl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%edx), %esi + sbbl %eax, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%edx), %esi + sbbl %eax, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%edx), %esi + sbbl %eax, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%edx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 104(%esp), %ebp + jne .LBB173_1 +# BB#2: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB173_3 +.LBB173_1: + movl 40(%ebp), %edx + movl %edx, 28(%esp) # 4-byte Spill +.LBB173_3: + testb %al, %al + jne .LBB173_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB173_6 +.LBB173_4: + movl (%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB173_6: + jne .LBB173_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB173_9 +.LBB173_7: + movl 36(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB173_9: + jne .LBB173_10 +# BB#11: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB173_12 +.LBB173_10: + movl 32(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB173_12: + jne .LBB173_13 +# BB#14: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB173_15 +.LBB173_13: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB173_15: + jne .LBB173_16 +# BB#17: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB173_18 +.LBB173_16: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB173_18: + jne .LBB173_19 +# BB#20: + movl $0, %edx + jmp .LBB173_21 +.LBB173_19: + movl 20(%ebp), %edx +.LBB173_21: + jne .LBB173_22 +# BB#23: + movl $0, %edi + jmp .LBB173_24 +.LBB173_22: + movl 16(%ebp), %edi +.LBB173_24: + jne .LBB173_25 +# BB#26: + movl $0, %ebx + jmp .LBB173_27 +.LBB173_25: + movl 12(%ebp), %ebx +.LBB173_27: + jne .LBB173_28 +# BB#29: + xorl %ebp, %ebp + jmp .LBB173_30 +.LBB173_28: + movl 8(%ebp), %ebp +.LBB173_30: + movl 8(%esp), %esi # 4-byte Reload + addl 36(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %esi, 44(%ecx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %eax, 48(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 52(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 56(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edi, 60(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl %eax, 80(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end173: + .size mcl_fpDbl_sub11Lbmi2, .Lfunc_end173-mcl_fpDbl_sub11Lbmi2 + + .align 16, 0x90 + .type .LmulPv384x32,@function +.LmulPv384x32: # @mulPv384x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl %edx, %eax + movl 56(%esp), %edx + mulxl 4(%eax), %ebx, %edi + mulxl (%eax), %esi, %ebp + movl %esi, 32(%esp) # 4-byte Spill + addl %ebx, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + mulxl 8(%eax), %ebx, %esi + adcl %edi, %ebx + movl %ebx, 24(%esp) # 4-byte Spill + mulxl 12(%eax), %edi, %ebx + adcl %esi, %edi + movl %edi, 20(%esp) # 4-byte Spill + mulxl 16(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 16(%esp) # 4-byte Spill + mulxl 20(%eax), %esi, %ebx + adcl %edi, %esi + movl %esi, 12(%esp) # 4-byte Spill + mulxl 24(%eax), %esi, %edi + adcl %ebx, %esi + movl %esi, 8(%esp) # 4-byte Spill + mulxl 28(%eax), %ebx, %esi + adcl %edi, %ebx + mulxl 32(%eax), %edi, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl %esi, %edi + mulxl 36(%eax), %esi, %ebp + movl %ebp, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + mulxl 40(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl %ebx, 28(%ecx) + movl %edi, 32(%ecx) + movl %esi, 36(%ecx) + movl %edx, 40(%ecx) + movl 56(%esp), %edx + mulxl 44(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl $0, %edx + movl %edx, 48(%ecx) + movl %ecx, %eax + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end174: + .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 + + .globl mcl_fp_mulUnitPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre12Lbmi2,@function +mcl_fp_mulUnitPre12Lbmi2: # @mcl_fp_mulUnitPre12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L175$pb +.L175$pb: + popl %ebx +.Ltmp26: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end175: + .size mcl_fp_mulUnitPre12Lbmi2, .Lfunc_end175-mcl_fp_mulUnitPre12Lbmi2 + + .globl mcl_fpDbl_mulPre12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre12Lbmi2,@function +mcl_fpDbl_mulPre12Lbmi2: # @mcl_fpDbl_mulPre12Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L176$pb +.L176$pb: + popl %ebx +.Ltmp27: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + leal 24(%esi), %eax + movl %eax, 8(%esp) + leal 24(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 48(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl 40(%edi), %ebx + movl 36(%edi), %eax + movl 32(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ecx + addl 24(%edi), %esi + adcl 28(%edi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + adcl 16(%edi), %ebx + movl %ebx, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + addl 24(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 4(%edi), %eax + adcl 28(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl 32(%edi), %eax + adcl 8(%edi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl 36(%edi), %eax + adcl 12(%edi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl 40(%edi), %ecx + adcl 16(%edi), %ecx + movl 44(%edi), %eax + adcl 20(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -184(%ebp) # 4-byte Spill + movl %ebx, %edi + movl %edx, -156(%ebp) # 4-byte Spill + movl %esi, -160(%ebp) # 4-byte Spill + movl %esi, %edx + jb .LBB176_2 +# BB#1: + xorl %edi, %edi + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill +.LBB176_2: + movl %edi, -176(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 44(%esi), %edi + movl -112(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%esi), %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl %eax, -124(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -116(%ebp) # 4-byte Spill + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -120(%ebp) # 4-byte Spill + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -136(%ebp), %esi # 4-byte Reload + movl %esi, -152(%ebp) # 4-byte Spill + jb .LBB176_4 +# BB#3: + movl $0, -124(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill +.LBB176_4: + movl %edx, -84(%ebp) + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -80(%ebp) + movl -188(%ebp), %edx # 4-byte Reload + movl %edx, -76(%ebp) + movl -168(%ebp), %edi # 4-byte Reload + movl %edi, -72(%ebp) + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl -140(%ebp), %edx # 4-byte Reload + movl %edx, -104(%ebp) + movl -144(%ebp), %edx # 4-byte Reload + movl %edx, -100(%ebp) + movl -148(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl %ecx, -92(%ebp) + movl %eax, -88(%ebp) + movl %edi, %ebx + sbbl %edx, %edx + movl -132(%ebp), %eax # 4-byte Reload + movl %eax, -64(%ebp) + movl -184(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB176_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi +.LBB176_6: + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -108(%ebp), %ecx + movl %ecx, 8(%esp) + leal -84(%ebp), %ecx + movl %ecx, 4(%esp) + leal -60(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -152(%ebp), %edi # 4-byte Reload + addl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -128(%ebp) # 4-byte Folded Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl %eax, -120(%ebp) # 4-byte Folded Spill + adcl %ebx, -116(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl %eax, -112(%ebp) # 4-byte Folded Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl %eax, -124(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -132(%ebp) # 4-byte Spill + movl -164(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6Lbmi2@PLT + addl -36(%ebp), %edi + movl -128(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -112(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl %esi, -132(%ebp) # 4-byte Folded Spill + movl -60(%ebp), %ecx + movl 8(%ebp), %eax + subl (%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -56(%ebp), %esi + sbbl 4(%eax), %esi + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 12(%eax), %edx + movl -44(%ebp), %ebx + sbbl 16(%eax), %ebx + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %esi + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %ecx # 4-byte Reload + addl -148(%ebp), %ecx # 4-byte Folded Reload + adcl -152(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %esi, 28(%eax) + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 40(%eax) + adcl -192(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 44(%eax) + movl -128(%ebp), %ecx # 4-byte Reload + adcl -196(%ebp), %ecx # 4-byte Folded Reload + movl %edi, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -112(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + movl -132(%ebp), %edx # 4-byte Reload + adcl -216(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %edx, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end176: + .size mcl_fpDbl_mulPre12Lbmi2, .Lfunc_end176-mcl_fpDbl_mulPre12Lbmi2 + + .globl mcl_fpDbl_sqrPre12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre12Lbmi2,@function +mcl_fpDbl_sqrPre12Lbmi2: # @mcl_fpDbl_sqrPre12Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L177$pb +.L177$pb: + popl %ebx +.Ltmp28: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx + movl %ebx, -152(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + leal 24(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 48(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl 44(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 40(%edi), %edx + movl 36(%edi), %eax + movl (%edi), %ebx + movl 4(%edi), %esi + addl 24(%edi), %ebx + adcl 28(%edi), %esi + movl 32(%edi), %ecx + adcl 8(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + adcl 16(%edi), %edx + movl %edx, %ecx + movl -136(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + movl %edx, -156(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edx + popl %eax + movl %edx, -124(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -120(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + sbbl %edi, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl %ebx, %edi + addl %edi, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl %esi, %edi + movl %esi, %eax + adcl %edi, %edi + movl %edi, -132(%ebp) # 4-byte Spill + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_2 +# BB#1: + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill +.LBB177_2: + movl -144(%ebp), %esi # 4-byte Reload + addl %esi, %esi + movl -140(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -120(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_3 +# BB#4: + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + jmp .LBB177_5 +.LBB177_3: + movl %eax, %edx + shrl $31, %edx + orl %esi, %edx + movl %edx, -120(%ebp) # 4-byte Spill +.LBB177_5: + movl -136(%ebp), %edx # 4-byte Reload + movl %ecx, %esi + addl %esi, %esi + adcl %edx, %edx + movl -124(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_6 +# BB#7: + xorl %edx, %edx + movl $0, -128(%ebp) # 4-byte Folded Spill + movl -140(%ebp), %edi # 4-byte Reload + jmp .LBB177_8 +.LBB177_6: + movl %ecx, -124(%ebp) # 4-byte Spill + movl -140(%ebp), %edi # 4-byte Reload + movl %edi, %ecx + shrl $31, %ecx + orl %esi, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %ecx # 4-byte Reload +.LBB177_8: + movl %edx, -124(%ebp) # 4-byte Spill + movl %ebx, -84(%ebp) + movl %eax, -80(%ebp) + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -76(%ebp) + movl %edi, -72(%ebp) + movl %ecx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -64(%ebp) + movl %ebx, -108(%ebp) + movl %eax, -104(%ebp) + movl %esi, -100(%ebp) + movl %edi, -96(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl -156(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB177_9 +# BB#10: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB177_11 +.LBB177_9: + shrl $31, %edx + movl %edx, -136(%ebp) # 4-byte Spill +.LBB177_11: + leal -108(%ebp), %eax + movl %eax, 8(%esp) + leal -84(%ebp), %eax + movl %eax, 4(%esp) + leal -60(%ebp), %eax + movl %eax, (%esp) + movl -148(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -152(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6Lbmi2@PLT + movl -112(%ebp), %eax # 4-byte Reload + addl -36(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -132(%ebp), %edi # 4-byte Reload + adcl -32(%ebp), %edi + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -60(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -56(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -44(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -132(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -148(%ebp), %edx # 4-byte Folded Reload + adcl -152(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 28(%eax) + movl -144(%ebp), %edx # 4-byte Reload + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %edi # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %edx # 4-byte Reload + adcl -168(%ebp), %edx # 4-byte Folded Reload + movl %edi, 40(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -192(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -132(%ebp), %edi # 4-byte Reload + adcl -196(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %edi, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -128(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl -216(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %esi, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end177: + .size mcl_fpDbl_sqrPre12Lbmi2, .Lfunc_end177-mcl_fpDbl_sqrPre12Lbmi2 + + .globl mcl_fp_mont12Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont12Lbmi2,@function +mcl_fp_mont12Lbmi2: # @mcl_fp_mont12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L178$pb +.L178$pb: + popl %ebx +.Ltmp29: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx + movl 1468(%esp), %eax + movl -4(%eax), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 1384(%esp), %ebp + movl 1388(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1428(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1424(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1420(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1396(%esp), %edi + movl 1392(%esp), %esi + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + addl 1328(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 1340(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1372(%esp), %esi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1376(%esp), %ebp + sbbl %edi, %edi + movl 1464(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl 84(%esp), %ecx # 4-byte Reload + addl 1272(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1316(%esp), %ebp + adcl 1320(%esp), %edi + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1216(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1248(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1260(%esp), %ebp + adcl 1264(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1160(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1160(%esp), %ecx + adcl 1164(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 1204(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1104(%esp), %ecx + movl 1468(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1104(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1140(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1148(%esp), %edi + movl 84(%esp), %ebp # 4-byte Reload + adcl 1152(%esp), %ebp + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1080(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + adcl 1092(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + movl %esi, %eax + addl 992(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 1008(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1032(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 936(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 948(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 960(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl 984(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 880(%esp), %eax + movl 48(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 924(%esp), %esi + movl %esi, %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 44(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 840(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 864(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 768(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 780(%esp), %ebp + adcl 784(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 800(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1460(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + addl 712(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 720(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 752(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + sbbl %ebp, %ebp + movl %eax, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %eax + addl 656(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 664(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 672(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 676(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 616(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 620(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 636(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 648(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 544(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 560(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 584(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + addl 432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 480(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 376(%esp), %ecx + adcl 380(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 320(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + adcl 336(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 360(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 284(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 208(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 224(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + adcl 240(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 152(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 164(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 40(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + addl 96(%esp), %edi + movl 84(%esp), %ebx # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %edx, %edi + adcl 108(%esp), %ebx + adcl 112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %esi + movl 1468(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 8(%edx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 36(%edx), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 40(%edx), %edi + movl %edi, 84(%esp) # 4-byte Spill + sbbl 44(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB178_2 +# BB#1: + movl %ecx, 52(%esp) # 4-byte Spill +.LBB178_2: + movl %esi, %ecx + testb %cl, %cl + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB178_4 +# BB#3: + movl %eax, %ecx +.LBB178_4: + movl 1456(%esp), %eax + movl %ecx, (%eax) + movl 68(%esp), %edi # 4-byte Reload + jne .LBB178_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB178_6: + movl %edi, 4(%eax) + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB178_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB178_8: + movl %ebx, 8(%eax) + jne .LBB178_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%esp) # 4-byte Spill +.LBB178_10: + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB178_12 +# BB#11: + movl 28(%esp), %ebp # 4-byte Reload +.LBB178_12: + movl %ebp, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB178_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB178_14: + movl %ecx, 20(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB178_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB178_16: + movl %ecx, 24(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB178_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB178_18: + movl %ecx, 32(%eax) + movl 60(%esp), %ecx # 4-byte Reload + jne .LBB178_20 +# BB#19: + movl 80(%esp), %ecx # 4-byte Reload +.LBB178_20: + movl %ecx, 36(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB178_22 +# BB#21: + movl 84(%esp), %ecx # 4-byte Reload +.LBB178_22: + movl %ecx, 40(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB178_24 +# BB#23: + movl %edx, %ecx +.LBB178_24: + movl %ecx, 44(%eax) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end178: + .size mcl_fp_mont12Lbmi2, .Lfunc_end178-mcl_fp_mont12Lbmi2 + + .globl mcl_fp_montNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF12Lbmi2,@function +mcl_fp_montNF12Lbmi2: # @mcl_fp_montNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1420, %esp # imm = 0x58C + calll .L179$pb +.L179$pb: + popl %ebx +.Ltmp30: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx + movl 1452(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1368(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1368(%esp), %ebp + movl 1372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1416(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1392(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1388(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1384(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1380(%esp), %edi + movl 1376(%esp), %esi + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1312(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1320(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1344(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1356(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 1360(%esp), %ebp + movl 1448(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1304(%esp), %eax + movl 56(%esp), %edx # 4-byte Reload + addl 1256(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1260(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1264(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1284(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1296(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1200(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1208(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %ebp + adcl 1248(%esp), %edi + movl 1448(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1192(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1144(%esp), %edx + adcl 1148(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1160(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1184(%esp), %ebp + adcl 1188(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1088(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 1088(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %esi, %edi + adcl 1104(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1124(%esp), %esi + adcl 1128(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1136(%esp), %ebp + movl 1448(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1080(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1032(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1064(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1076(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 976(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1024(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 968(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 920(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 944(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 956(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 864(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 1448(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 856(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 808(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 828(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 832(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 752(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 776(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 792(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1448(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1444(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 744(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + adcl 700(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 724(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 728(%esp), %edi + adcl 732(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %eax, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 640(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 648(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 660(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + adcl 672(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 632(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 584(%esp), %ecx + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 608(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 616(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 528(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 540(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 564(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 32(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 1448(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 520(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 512(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 416(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 440(%esp), %ebp + movl 52(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 408(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 372(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 304(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 312(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 296(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 248(%esp), %ecx + adcl 252(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 260(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 272(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 192(%esp), %esi + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 200(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 184(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 136(%esp), %ecx + adcl 140(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + adcl 144(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + adcl 164(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 80(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 104(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 112(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 1452(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %esi, %ebx + sbbl 8(%ebp), %ebx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + movl 76(%esp), %ebp # 4-byte Reload + js .LBB179_2 +# BB#1: + movl %edx, %ebp +.LBB179_2: + movl 1440(%esp), %edx + movl %ebp, (%edx) + movl 68(%esp), %edi # 4-byte Reload + js .LBB179_4 +# BB#3: + movl %eax, %edi +.LBB179_4: + movl %edi, 4(%edx) + js .LBB179_6 +# BB#5: + movl %ebx, %esi +.LBB179_6: + movl %esi, 8(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB179_8 +# BB#7: + movl %ecx, %eax +.LBB179_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB179_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB179_10: + movl %eax, 16(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB179_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB179_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB179_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB179_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB179_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB179_16: + movl %eax, 28(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB179_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB179_18: + movl %eax, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB179_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB179_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB179_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB179_22: + movl %eax, 40(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB179_24 +# BB#23: + movl 56(%esp), %eax # 4-byte Reload +.LBB179_24: + movl %eax, 44(%edx) + addl $1420, %esp # imm = 0x58C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end179: + .size mcl_fp_montNF12Lbmi2, .Lfunc_end179-mcl_fp_montNF12Lbmi2 + + .globl mcl_fp_montRed12Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed12Lbmi2,@function +mcl_fp_montRed12Lbmi2: # @mcl_fp_montRed12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $828, %esp # imm = 0x33C + calll .L180$pb +.L180$pb: + popl %eax +.Ltmp31: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 856(%esp), %edx + movl -4(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 852(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + imull %esi, %ebx + movl 92(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 44(%ecx), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 776(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 88(%esp), %eax # 4-byte Reload + addl 776(%esp), %eax + movl 100(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 796(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 804(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 136(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 720(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 724(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 752(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ebp, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 664(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 692(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 608(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 612(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 552(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 496(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 136(%esp), %edi # 4-byte Reload + adcl 532(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %esi # 4-byte Reload + adcl 476(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 856(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 384(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 400(%esp), %ebp + movl 152(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 416(%esp), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %esi # 4-byte Reload + adcl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 100(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 352(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + adcl 364(%esp), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %eax, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 272(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 280(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 300(%esp), %esi + movl 140(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, %ebp + movl %eax, %edi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 216(%esp), %edi + movl 132(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 248(%esp), %esi + movl 144(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 84(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 160(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl %eax, %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ebx # 4-byte Reload + adcl 176(%esp), %ebx + movl %ebx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 188(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ebp + subl 24(%esp), %edi # 4-byte Folded Reload + movl 156(%esp), %esi # 4-byte Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + sbbl 28(%esp), %ebx # 4-byte Folded Reload + sbbl 32(%esp), %ecx # 4-byte Folded Reload + movl 140(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + sbbl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%esp) # 4-byte Spill + sbbl $0, %ebp + andl $1, %ebp + jne .LBB180_2 +# BB#1: + movl %ebx, 148(%esp) # 4-byte Spill +.LBB180_2: + movl %ebp, %ebx + testb %bl, %bl + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB180_4 +# BB#3: + movl %edi, %ebx +.LBB180_4: + movl 848(%esp), %edi + movl %ebx, (%edi) + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB180_6 +# BB#5: + movl %esi, 156(%esp) # 4-byte Spill +.LBB180_6: + movl 156(%esp), %esi # 4-byte Reload + movl %esi, 4(%edi) + movl 136(%esp), %esi # 4-byte Reload + jne .LBB180_8 +# BB#7: + movl %edx, %esi +.LBB180_8: + movl %esi, 8(%edi) + movl 148(%esp), %edx # 4-byte Reload + movl %edx, 12(%edi) + movl 128(%esp), %esi # 4-byte Reload + movl 116(%esp), %edx # 4-byte Reload + jne .LBB180_10 +# BB#9: + movl %ecx, %edx +.LBB180_10: + movl %edx, 16(%edi) + movl 120(%esp), %edx # 4-byte Reload + movl 140(%esp), %ecx # 4-byte Reload + jne .LBB180_12 +# BB#11: + movl 84(%esp), %ecx # 4-byte Reload +.LBB180_12: + movl %ecx, 20(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + jne .LBB180_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB180_14: + movl %eax, 24(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB180_16 +# BB#15: + movl 92(%esp), %ebx # 4-byte Reload +.LBB180_16: + movl %ebx, 28(%edi) + jne .LBB180_18 +# BB#17: + movl 96(%esp), %esi # 4-byte Reload +.LBB180_18: + movl %esi, 32(%edi) + jne .LBB180_20 +# BB#19: + movl 100(%esp), %edx # 4-byte Reload +.LBB180_20: + movl %edx, 36(%edi) + jne .LBB180_22 +# BB#21: + movl 112(%esp), %ecx # 4-byte Reload +.LBB180_22: + movl %ecx, 40(%edi) + jne .LBB180_24 +# BB#23: + movl 132(%esp), %eax # 4-byte Reload +.LBB180_24: + movl %eax, 44(%edi) + addl $828, %esp # imm = 0x33C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end180: + .size mcl_fp_montRed12Lbmi2, .Lfunc_end180-mcl_fp_montRed12Lbmi2 + + .globl mcl_fp_addPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre12Lbmi2,@function +mcl_fp_addPre12Lbmi2: # @mcl_fp_addPre12Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl %edx, 36(%ebx) + movl %esi, 40(%ebx) + movl 44(%eax), %eax + movl 44(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 44(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end181: + .size mcl_fp_addPre12Lbmi2, .Lfunc_end181-mcl_fp_addPre12Lbmi2 + + .globl mcl_fp_subPre12Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre12Lbmi2,@function +mcl_fp_subPre12Lbmi2: # @mcl_fp_subPre12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 36(%ebp) + movl %edi, 40(%ebp) + movl 44(%edx), %edx + movl 44(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 44(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end182: + .size mcl_fp_subPre12Lbmi2, .Lfunc_end182-mcl_fp_subPre12Lbmi2 + + .globl mcl_fp_shr1_12Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_12Lbmi2,@function +mcl_fp_shr1_12Lbmi2: # @mcl_fp_shr1_12Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 40(%ecx) + shrl %eax + movl %eax, 44(%ecx) + popl %esi + retl +.Lfunc_end183: + .size mcl_fp_shr1_12Lbmi2, .Lfunc_end183-mcl_fp_shr1_12Lbmi2 + + .globl mcl_fp_add12Lbmi2 + .align 16, 0x90 + .type mcl_fp_add12Lbmi2,@function +mcl_fp_add12Lbmi2: # @mcl_fp_add12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %ebx + movl (%ebx), %edx + movl 4(%ebx), %ecx + movl 60(%esp), %eax + addl (%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%eax), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 8(%ebx), %ecx + adcl 8(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 16(%eax), %ecx + adcl 12(%ebx), %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %ecx + adcl 20(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 24(%eax), %ecx + adcl 24(%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 28(%eax), %ecx + adcl 28(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 32(%eax), %ebp + adcl 32(%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 36(%eax), %edi + adcl 36(%ebx), %edi + movl 40(%eax), %esi + adcl 40(%ebx), %esi + movl 44(%eax), %edx + adcl 44(%ebx), %edx + movl 56(%esp), %ebx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%ebx) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%ebx) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + movl %ebp, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %ebp + subl (%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 8(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 20(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 24(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl (%esp), %eax # 4-byte Reload + sbbl 32(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + sbbl 40(%ebp), %esi + sbbl 44(%ebp), %edx + sbbl $0, %ecx + testb $1, %cl + jne .LBB184_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebx) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebx) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) +.LBB184_2: # %carry + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end184: + .size mcl_fp_add12Lbmi2, .Lfunc_end184-mcl_fp_add12Lbmi2 + + .globl mcl_fp_addNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF12Lbmi2,@function +mcl_fp_addNF12Lbmi2: # @mcl_fp_addNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + movl 112(%esp), %edx + addl (%edx), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 4(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 40(%esi), %ebp + movl 36(%esi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %eax + adcl 8(%edx), %eax + adcl 12(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 28(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 32(%edx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 36(%edx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl %eax, %esi + adcl 40(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + subl (%ebp), %edx + movl 64(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %edi, %ebp + movl 60(%esp), %edi # 4-byte Reload + sarl $31, %ebp + testl %ebp, %ebp + js .LBB185_2 +# BB#1: + movl %edx, %edi +.LBB185_2: + movl 108(%esp), %edx + movl %edi, (%edx) + movl 64(%esp), %edi # 4-byte Reload + js .LBB185_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB185_4: + movl %edi, 4(%edx) + movl %eax, %ebp + js .LBB185_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB185_6: + movl %esi, 8(%edx) + movl %ecx, %esi + movl 52(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB185_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB185_8: + movl %ecx, 12(%edx) + movl 76(%esp), %ebx # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + js .LBB185_10 +# BB#9: + movl 12(%esp), %eax # 4-byte Reload +.LBB185_10: + movl %eax, 16(%edx) + movl 80(%esp), %ecx # 4-byte Reload + js .LBB185_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 56(%esp) # 4-byte Spill +.LBB185_12: + movl 56(%esp), %eax # 4-byte Reload + movl %eax, 20(%edx) + js .LBB185_14 +# BB#13: + movl 20(%esp), %ebp # 4-byte Reload +.LBB185_14: + movl %ebp, 24(%edx) + js .LBB185_16 +# BB#15: + movl 24(%esp), %edi # 4-byte Reload +.LBB185_16: + movl %edi, 28(%edx) + js .LBB185_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB185_18: + movl %ebx, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB185_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB185_20: + movl %eax, 36(%edx) + js .LBB185_22 +# BB#21: + movl 36(%esp), %esi # 4-byte Reload +.LBB185_22: + movl %esi, 40(%edx) + js .LBB185_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB185_24: + movl %ecx, 44(%edx) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end185: + .size mcl_fp_addNF12Lbmi2, .Lfunc_end185-mcl_fp_addNF12Lbmi2 + + .globl mcl_fp_sub12Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub12Lbmi2,@function +mcl_fp_sub12Lbmi2: # @mcl_fp_sub12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%esi), %edx + sbbl 28(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 32(%esi), %ecx + sbbl 32(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 40(%esi), %ebp + sbbl 40(%edi), %ebp + movl 44(%esi), %esi + sbbl 44(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl %ebp, 40(%ebx) + movl %esi, 44(%ebx) + je .LBB186_2 +# BB#1: # %carry + movl %esi, %edi + movl 72(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 24(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl %ebp, %eax + movl %eax, 40(%ebx) + movl 44(%esi), %eax + adcl %edi, %eax + movl %eax, 44(%ebx) +.LBB186_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end186: + .size mcl_fp_sub12Lbmi2, .Lfunc_end186-mcl_fp_sub12Lbmi2 + + .globl mcl_fp_subNF12Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF12Lbmi2,@function +mcl_fp_subNF12Lbmi2: # @mcl_fp_subNF12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 100(%esp), %edi + subl (%edi), %edx + movl %edx, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %edi + adcl %edi, %edi + movl %eax, %ebp + adcl %ebp, %ebp + movl %eax, %esi + adcl %esi, %esi + shrl $31, %ecx + orl %edx, %ecx + movl 104(%esp), %edx + andl 12(%edx), %esi + movl %esi, 8(%esp) # 4-byte Spill + andl 8(%edx), %ebp + andl 4(%edx), %edi + andl (%edx), %ecx + movl 44(%edx), %esi + andl %eax, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 40(%edx), %esi + andl %eax, %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%edx), %esi + andl %eax, %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 32(%edx), %esi + andl %eax, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 28(%edx), %esi + andl %eax, %esi + movl %esi, (%esp) # 4-byte Spill + movl 24(%edx), %ebx + andl %eax, %ebx + movl 20(%edx), %esi + andl %eax, %esi + andl 16(%edx), %eax + addl 48(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 92(%esp), %edx + movl %ecx, (%edx) + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %edi, 4(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 16(%edx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 20(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edx) + movl 12(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edx) + movl 16(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edx) + movl %eax, 40(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end187: + .size mcl_fp_subNF12Lbmi2, .Lfunc_end187-mcl_fp_subNF12Lbmi2 + + .globl mcl_fpDbl_add12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add12Lbmi2,@function +mcl_fpDbl_add12Lbmi2: # @mcl_fpDbl_add12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %ecx + movl 112(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 108(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 56(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%edi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 52(%ecx), %ebx + movl %esi, 44(%eax) + movl 52(%edi), %eax + adcl %ebx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%edi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl 60(%edi), %edx + adcl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl 64(%edi), %edx + adcl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl 72(%edi), %edx + adcl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl 76(%edi), %edx + adcl %eax, %edx + movl 80(%ecx), %esi + movl 80(%edi), %eax + adcl %esi, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%ecx), %ebx + movl 84(%edi), %esi + adcl %ebx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 92(%ecx), %ecx + movl 92(%edi), %edi + adcl %ecx, %edi + movl %edi, 44(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 120(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl 36(%ebp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %ebx, %eax + movl 44(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 44(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB188_2 +# BB#1: + movl %edi, %ebx +.LBB188_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB188_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload +.LBB188_4: + movl 108(%esp), %eax + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl %ebp, 64(%eax) + movl %edi, 68(%eax) + movl %esi, 72(%eax) + movl %edx, 76(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + jne .LBB188_6 +# BB#5: + movl 32(%esp), %edx # 4-byte Reload +.LBB188_6: + movl %edx, 80(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB188_8 +# BB#7: + movl 36(%esp), %edx # 4-byte Reload +.LBB188_8: + movl %edx, 84(%eax) + jne .LBB188_10 +# BB#9: + movl 40(%esp), %ecx # 4-byte Reload +.LBB188_10: + movl %ecx, 88(%eax) + movl %ebx, 92(%eax) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end188: + .size mcl_fpDbl_add12Lbmi2, .Lfunc_end188-mcl_fpDbl_add12Lbmi2 + + .globl mcl_fpDbl_sub12Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub12Lbmi2,@function +mcl_fpDbl_sub12Lbmi2: # @mcl_fpDbl_sub12Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + movl 100(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 104(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%esi), %edi + sbbl 8(%ebx), %edi + movl 96(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%ebx), %edx + movl %edi, 8(%ecx) + movl 20(%ebx), %edi + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %edi, %eax + movl 24(%ebx), %edi + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %edi, %edx + movl 28(%ebx), %edi + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %edi, %eax + movl 32(%ebx), %edi + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %edi, %edx + movl 36(%ebx), %edi + movl %eax, 28(%ecx) + movl 36(%esi), %eax + sbbl %edi, %eax + movl 40(%ebx), %edi + movl %edx, 32(%ecx) + movl 40(%esi), %edx + sbbl %edi, %edx + movl 44(%ebx), %edi + movl %eax, 36(%ecx) + movl 44(%esi), %eax + sbbl %edi, %eax + movl 48(%ebx), %edi + movl %edx, 40(%ecx) + movl 48(%esi), %edx + sbbl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 52(%ebx), %edx + movl %eax, 44(%ecx) + movl 52(%esi), %eax + sbbl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%esi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%esi), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%esi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%esi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%esi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%esi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%esi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%esi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%esi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 108(%esp), %ebp + jne .LBB189_1 +# BB#2: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB189_3 +.LBB189_1: + movl 44(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill +.LBB189_3: + testb %al, %al + jne .LBB189_4 +# BB#5: + movl $0, 12(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB189_6 +.LBB189_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB189_6: + jne .LBB189_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB189_9 +.LBB189_7: + movl 40(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB189_9: + jne .LBB189_10 +# BB#11: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB189_12 +.LBB189_10: + movl 36(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB189_12: + jne .LBB189_13 +# BB#14: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB189_15 +.LBB189_13: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB189_15: + jne .LBB189_16 +# BB#17: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB189_18 +.LBB189_16: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB189_18: + jne .LBB189_19 +# BB#20: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB189_21 +.LBB189_19: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB189_21: + jne .LBB189_22 +# BB#23: + movl $0, %ebx + jmp .LBB189_24 +.LBB189_22: + movl 20(%ebp), %ebx +.LBB189_24: + jne .LBB189_25 +# BB#26: + movl $0, %eax + jmp .LBB189_27 +.LBB189_25: + movl 16(%ebp), %eax +.LBB189_27: + jne .LBB189_28 +# BB#29: + movl %ebp, %edx + movl $0, %ebp + jmp .LBB189_30 +.LBB189_28: + movl %ebp, %edx + movl 12(%edx), %ebp +.LBB189_30: + jne .LBB189_31 +# BB#32: + xorl %edx, %edx + jmp .LBB189_33 +.LBB189_31: + movl 8(%edx), %edx +.LBB189_33: + addl 32(%esp), %esi # 4-byte Folded Reload + movl 12(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edi, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebx, 68(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl %eax, 88(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%ecx) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end189: + .size mcl_fpDbl_sub12Lbmi2, .Lfunc_end189-mcl_fpDbl_sub12Lbmi2 + + .align 16, 0x90 + .type .LmulPv416x32,@function +.LmulPv416x32: # @mulPv416x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl %edx, %eax + movl 64(%esp), %ebx + movl %ebx, %edx + mulxl 4(%eax), %esi, %ebp + movl %ebx, %edx + mulxl (%eax), %edi, %edx + movl %edi, 40(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 8(%eax), %edx, %esi + adcl %ebp, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 12(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 16(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 20(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 24(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 28(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 32(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 36(%eax), %edi, %ebp + adcl %esi, %edi + movl %ebx, %edx + mulxl 40(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %ebx, %edx + mulxl 44(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl %edi, 36(%ecx) + movl %esi, 40(%ecx) + movl %edx, 44(%ecx) + movl %ebx, %edx + mulxl 48(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + adcl $0, %edx + movl %edx, 52(%ecx) + movl %ecx, %eax + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end190: + .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 + + .globl mcl_fp_mulUnitPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre13Lbmi2,@function +mcl_fp_mulUnitPre13Lbmi2: # @mcl_fp_mulUnitPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L191$pb +.L191$pb: + popl %ebx +.Ltmp32: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv416x32 + movl 100(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end191: + .size mcl_fp_mulUnitPre13Lbmi2, .Lfunc_end191-mcl_fp_mulUnitPre13Lbmi2 + + .globl mcl_fpDbl_mulPre13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre13Lbmi2,@function +mcl_fpDbl_mulPre13Lbmi2: # @mcl_fpDbl_mulPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L192$pb +.L192$pb: + popl %edi +.Ltmp33: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 868(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl 872(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %edi + movl 44(%edi), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 868(%esp), %eax + movl %eax, %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end192: + .size mcl_fpDbl_mulPre13Lbmi2, .Lfunc_end192-mcl_fpDbl_mulPre13Lbmi2 + + .globl mcl_fpDbl_sqrPre13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre13Lbmi2,@function +mcl_fpDbl_sqrPre13Lbmi2: # @mcl_fpDbl_sqrPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L193$pb +.L193$pb: + popl %ebx +.Ltmp34: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx + movl %ebx, 108(%esp) # 4-byte Spill + movl 868(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end193: + .size mcl_fpDbl_sqrPre13Lbmi2, .Lfunc_end193-mcl_fpDbl_sqrPre13Lbmi2 + + .globl mcl_fp_mont13Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont13Lbmi2,@function +mcl_fp_mont13Lbmi2: # @mcl_fp_mont13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L194$pb +.L194$pb: + popl %ebx +.Ltmp35: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %esi + movl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %esi, %eax + imull %edi, %eax + movl 1540(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %edi + movl 1500(%esp), %ebp + movl 1496(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1444(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 1448(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1472(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl 76(%esp), %ecx # 4-byte Reload + addl 1376(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1388(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1404(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1420(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1428(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 76(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 36(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1360(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1364(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1368(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 1372(%esp), %edi + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1264(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1308(%esp), %ebp + adcl 1312(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1208(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1248(%esp), %edi + adcl 1252(%esp), %ebp + movl %ebp, %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1260(%esp), %ebp + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1152(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1192(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %eax + addl 1096(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 72(%esp), %esi # 4-byte Reload + adcl 1132(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1136(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1144(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 1040(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1068(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 1072(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1084(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 984(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 980(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 872(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 904(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 844(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 848(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 32(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 36(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 792(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 812(%esp), %edi + adcl $0, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 36(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 712(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + adcl 724(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 748(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 752(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %eax, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %eax + andl $1, %eax + addl 648(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 652(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 656(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 664(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 612(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 536(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 544(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 560(%esp), %esi + adcl 564(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %ecx + andl $1, %ecx + addl 424(%esp), %esi + movl 52(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + addl 368(%esp), %ebp + adcl 372(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 376(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 52(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 312(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 48(%esp), %esi # 4-byte Reload + adcl 344(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 268(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %ecx + addl 200(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 208(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + adcl 148(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 152(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 88(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + movl 84(%esp), %esi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 100(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 112(%esp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 116(%esp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 120(%esp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 124(%esp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 128(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 132(%esp), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 136(%esp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl 140(%esp), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl $0, %edi + movl 1580(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %ecx + sbbl 8(%ebx), %ebp + sbbl 12(%ebx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 16(%ebx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + sbbl 20(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 28(%ebx), %edx + movl 36(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, %ebx + sbbl $0, %edi + andl $1, %edi + jne .LBB194_2 +# BB#1: + movl %edx, 32(%esp) # 4-byte Spill +.LBB194_2: + movl %edi, %edx + testb %dl, %dl + movl 80(%esp), %edx # 4-byte Reload + jne .LBB194_4 +# BB#3: + movl %eax, %edx +.LBB194_4: + movl 1568(%esp), %eax + movl %edx, (%eax) + movl 64(%esp), %esi # 4-byte Reload + jne .LBB194_6 +# BB#5: + movl %ecx, %esi +.LBB194_6: + movl %esi, 4(%eax) + jne .LBB194_8 +# BB#7: + movl %ebp, 76(%esp) # 4-byte Spill +.LBB194_8: + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB194_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%esp) # 4-byte Spill +.LBB194_10: + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB194_12 +# BB#11: + movl 8(%esp), %ebp # 4-byte Reload +.LBB194_12: + movl %ebp, 16(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB194_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB194_14: + movl %ecx, 20(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB194_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB194_16: + movl %ecx, 24(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB194_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB194_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB194_20 +# BB#19: + movl 24(%esp), %ecx # 4-byte Reload +.LBB194_20: + movl %ecx, 36(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB194_22 +# BB#21: + movl 28(%esp), %ecx # 4-byte Reload +.LBB194_22: + movl %ecx, 40(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB194_24 +# BB#23: + movl 72(%esp), %ecx # 4-byte Reload +.LBB194_24: + movl %ecx, 44(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB194_26 +# BB#25: + movl %ebx, %ecx +.LBB194_26: + movl %ecx, 48(%eax) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end194: + .size mcl_fp_mont13Lbmi2, .Lfunc_end194-mcl_fp_mont13Lbmi2 + + .globl mcl_fp_montNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF13Lbmi2,@function +mcl_fp_montNF13Lbmi2: # @mcl_fp_montNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L195$pb +.L195$pb: + popl %ebx +.Ltmp36: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %edi + movl 1492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1540(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1500(%esp), %esi + movl 1496(%esp), %ebp + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + adcl 1444(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1472(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1484(%esp), %edi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1428(%esp), %ecx + movl 80(%esp), %edx # 4-byte Reload + addl 1376(%esp), %edx + adcl 1380(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1424(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1320(%esp), %esi + adcl 1324(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1360(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1316(%esp), %eax + addl 1264(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 1284(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1208(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 1228(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 1252(%esp), %edi + movl 80(%esp), %ebp # 4-byte Reload + adcl 1256(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1204(%esp), %eax + movl 64(%esp), %edx # 4-byte Reload + addl 1152(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1160(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1192(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1196(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + addl 1096(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1116(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 1120(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1140(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1148(%esp), %ebp + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1092(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 1040(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1056(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 1060(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1088(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %eax, %esi + adcl $0, %esi + movl %edx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 984(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 996(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1008(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 980(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 36(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 936(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 948(%esp), %ebp + movl 72(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 968(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 872(%esp), %edi + movl 36(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 912(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 868(%esp), %edx + addl 816(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 860(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 760(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 780(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 788(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 804(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 756(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 716(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 728(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 732(%esp), %esi + movl 68(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 744(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 748(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 752(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 648(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 696(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 644(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 624(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 536(%esp), %edi + adcl 540(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 556(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + adcl 568(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 532(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 496(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 504(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 424(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %esi + adcl 452(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 420(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 368(%esp), %ecx + movl 72(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 392(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 312(%esp), %esi + adcl 316(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 308(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + adcl 260(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 200(%esp), %esi + adcl 204(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 216(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 228(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 196(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 156(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 164(%esp), %ebp + adcl 168(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 88(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 100(%esp), %edi + movl 64(%esp), %ebx # 4-byte Reload + adcl 104(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl %ebp, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %edx + movl 1580(%esp), %eax + subl (%eax), %edx + movl %ecx, %ebp + sbbl 4(%eax), %ebp + movl %edi, %ecx + sbbl 8(%eax), %ecx + sbbl 12(%eax), %ebx + sbbl 16(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 32(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 48(%eax), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + testl %eax, %eax + movl 84(%esp), %eax # 4-byte Reload + js .LBB195_2 +# BB#1: + movl %edx, %eax +.LBB195_2: + movl 1568(%esp), %edx + movl %eax, (%edx) + movl 80(%esp), %esi # 4-byte Reload + js .LBB195_4 +# BB#3: + movl %ebp, %esi +.LBB195_4: + movl %esi, 4(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB195_6 +# BB#5: + movl %ecx, %edi +.LBB195_6: + movl %edi, 8(%edx) + js .LBB195_8 +# BB#7: + movl %ebx, %eax +.LBB195_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB195_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB195_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB195_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB195_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB195_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB195_14: + movl %eax, 24(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB195_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB195_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB195_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB195_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB195_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB195_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB195_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB195_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB195_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB195_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB195_26 +# BB#25: + movl 68(%esp), %eax # 4-byte Reload +.LBB195_26: + movl %eax, 48(%edx) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end195: + .size mcl_fp_montNF13Lbmi2, .Lfunc_end195-mcl_fp_montNF13Lbmi2 + + .globl mcl_fp_montRed13Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed13Lbmi2,@function +mcl_fp_montRed13Lbmi2: # @mcl_fp_montRed13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $892, %esp # imm = 0x37C + calll .L196$pb +.L196$pb: + popl %eax +.Ltmp37: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 920(%esp), %edx + movl -4(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 916(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %eax, %ebx + movl 100(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %edi + movl 20(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 832(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 76(%esp), %eax # 4-byte Reload + addl 832(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 836(%esp), %ecx + adcl 840(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 860(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + andl $1, %esi + addl 776(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 132(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 720(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 132(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 664(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 608(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 612(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 552(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 496(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 148(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl 476(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 384(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 140(%esp), %edi # 4-byte Reload + adcl 408(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 920(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 108(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 344(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 360(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + movl 72(%esp), %esi # 4-byte Reload + imull %esi, %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 272(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 120(%esp), %edi # 4-byte Reload + adcl 280(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 300(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %eax, %ebp + imull %esi, %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 216(%esp), %ebp + movl %edi, %ecx + adcl 220(%esp), %ecx + movl 156(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl 152(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 160(%esp), %esi + movl 156(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 140(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 152(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl %eax, %edx + movl %edi, %eax + adcl 184(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 156(%esp), %edi # 4-byte Reload + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %ebx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl 132(%esp), %edx # 4-byte Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl 144(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 104(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 120(%esp) # 4-byte Spill + movl %eax, %edx + movl %esi, %eax + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 124(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB196_2 +# BB#1: + movl %ebp, 148(%esp) # 4-byte Spill +.LBB196_2: + testb %al, %al + movl 156(%esp), %ebp # 4-byte Reload + jne .LBB196_4 +# BB#3: + movl %edi, %ebp +.LBB196_4: + movl 912(%esp), %edi + movl %ebp, (%edi) + movl 140(%esp), %ebp # 4-byte Reload + jne .LBB196_6 +# BB#5: + movl %ebx, %ebp +.LBB196_6: + movl %ebp, 4(%edi) + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB196_8 +# BB#7: + movl %ecx, %ebx +.LBB196_8: + movl %ebx, 8(%edi) + movl 148(%esp), %esi # 4-byte Reload + movl %esi, 12(%edi) + movl 116(%esp), %ebx # 4-byte Reload + movl 128(%esp), %esi # 4-byte Reload + jne .LBB196_10 +# BB#9: + movl 72(%esp), %esi # 4-byte Reload +.LBB196_10: + movl %esi, 16(%edi) + movl 112(%esp), %esi # 4-byte Reload + movl 132(%esp), %edx # 4-byte Reload + jne .LBB196_12 +# BB#11: + movl 76(%esp), %edx # 4-byte Reload +.LBB196_12: + movl %edx, 20(%edi) + movl 96(%esp), %edx # 4-byte Reload + movl 144(%esp), %ecx # 4-byte Reload + jne .LBB196_14 +# BB#13: + movl 80(%esp), %ecx # 4-byte Reload +.LBB196_14: + movl %ecx, 24(%edi) + movl 100(%esp), %ecx # 4-byte Reload + movl 136(%esp), %eax # 4-byte Reload + jne .LBB196_16 +# BB#15: + movl 84(%esp), %eax # 4-byte Reload +.LBB196_16: + movl %eax, 28(%edi) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB196_18 +# BB#17: + movl 88(%esp), %ebx # 4-byte Reload +.LBB196_18: + movl %ebx, 32(%edi) + jne .LBB196_20 +# BB#19: + movl 104(%esp), %esi # 4-byte Reload +.LBB196_20: + movl %esi, 36(%edi) + jne .LBB196_22 +# BB#21: + movl 108(%esp), %edx # 4-byte Reload +.LBB196_22: + movl %edx, 40(%edi) + jne .LBB196_24 +# BB#23: + movl 120(%esp), %ecx # 4-byte Reload +.LBB196_24: + movl %ecx, 44(%edi) + jne .LBB196_26 +# BB#25: + movl 124(%esp), %eax # 4-byte Reload +.LBB196_26: + movl %eax, 48(%edi) + addl $892, %esp # imm = 0x37C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end196: + .size mcl_fp_montRed13Lbmi2, .Lfunc_end196-mcl_fp_montRed13Lbmi2 + + .globl mcl_fp_addPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre13Lbmi2,@function +mcl_fp_addPre13Lbmi2: # @mcl_fp_addPre13Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + movl 48(%eax), %eax + movl 48(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 48(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end197: + .size mcl_fp_addPre13Lbmi2, .Lfunc_end197-mcl_fp_addPre13Lbmi2 + + .globl mcl_fp_subPre13Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre13Lbmi2,@function +mcl_fp_subPre13Lbmi2: # @mcl_fp_subPre13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 40(%ebp) + movl %esi, 44(%ebp) + movl 48(%edx), %edx + movl 48(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 48(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end198: + .size mcl_fp_subPre13Lbmi2, .Lfunc_end198-mcl_fp_subPre13Lbmi2 + + .globl mcl_fp_shr1_13Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_13Lbmi2,@function +mcl_fp_shr1_13Lbmi2: # @mcl_fp_shr1_13Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 44(%ecx) + shrl %eax + movl %eax, 48(%ecx) + popl %esi + retl +.Lfunc_end199: + .size mcl_fp_shr1_13Lbmi2, .Lfunc_end199-mcl_fp_shr1_13Lbmi2 + + .globl mcl_fp_add13Lbmi2 + .align 16, 0x90 + .type mcl_fp_add13Lbmi2,@function +mcl_fp_add13Lbmi2: # @mcl_fp_add13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 64(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%ebp), %eax + adcl 8(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%ebx), %eax + adcl 24(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%ebx), %eax + adcl 28(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%ebx), %eax + adcl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 36(%ebx), %ecx + adcl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 40(%ebx), %edi + adcl 40(%ebp), %edi + movl 44(%ebx), %edx + adcl 44(%ebp), %edx + movl 48(%ebx), %esi + adcl 48(%ebp), %esi + movl 60(%esp), %ebp + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ecx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) + sbbl %eax, %eax + andl $1, %eax + movl 72(%esp), %ecx + subl (%ecx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 4(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + sbbl 8(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 12(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + sbbl 16(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 24(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 32(%ecx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + sbbl 36(%ecx), %ebx + sbbl 40(%ecx), %edi + sbbl 44(%ecx), %edx + sbbl 48(%ecx), %esi + sbbl $0, %eax + testb $1, %al + jne .LBB200_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ebx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) +.LBB200_2: # %carry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end200: + .size mcl_fp_add13Lbmi2, .Lfunc_end200-mcl_fp_add13Lbmi2 + + .globl mcl_fp_addNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF13Lbmi2,@function +mcl_fp_addNF13Lbmi2: # @mcl_fp_addNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 124(%esp), %edx + addl (%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 4(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 40(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 32(%esi), %ebp + movl 28(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %esi + adcl 8(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 12(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + adcl 24(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 28(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 32(%edx), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 36(%edx), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 40(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 132(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + subl (%edx), %eax + movl 68(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 8(%edx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %ebp + sbbl 36(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %edi + sbbl 40(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 44(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + sarl $31, %ebx + testl %ebx, %ebx + movl 64(%esp), %edx # 4-byte Reload + js .LBB201_2 +# BB#1: + movl %eax, %edx +.LBB201_2: + movl 120(%esp), %esi + movl %edx, (%esi) + movl 68(%esp), %edx # 4-byte Reload + js .LBB201_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB201_4: + movl %edx, 4(%esi) + movl %edi, %edx + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + js .LBB201_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB201_6: + movl %eax, 8(%esi) + movl %ebp, %edi + movl 60(%esp), %eax # 4-byte Reload + js .LBB201_8 +# BB#7: + movl 8(%esp), %ebx # 4-byte Reload +.LBB201_8: + movl %ebx, 12(%esi) + movl 96(%esp), %ebp # 4-byte Reload + movl 56(%esp), %ecx # 4-byte Reload + js .LBB201_10 +# BB#9: + movl 12(%esp), %ecx # 4-byte Reload +.LBB201_10: + movl %ecx, 16(%esi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB201_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload +.LBB201_12: + movl %eax, 20(%esi) + movl 72(%esp), %ebx # 4-byte Reload + js .LBB201_14 +# BB#13: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB201_14: + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 24(%esi) + js .LBB201_16 +# BB#15: + movl 24(%esp), %ebp # 4-byte Reload +.LBB201_16: + movl %ebp, 28(%esi) + js .LBB201_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB201_18: + movl %ebx, 32(%esi) + js .LBB201_20 +# BB#19: + movl 32(%esp), %edi # 4-byte Reload +.LBB201_20: + movl %edi, 36(%esi) + js .LBB201_22 +# BB#21: + movl 36(%esp), %edx # 4-byte Reload +.LBB201_22: + movl %edx, 40(%esi) + js .LBB201_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB201_24: + movl %ecx, 44(%esi) + movl 88(%esp), %eax # 4-byte Reload + js .LBB201_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB201_26: + movl %eax, 48(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end201: + .size mcl_fp_addNF13Lbmi2, .Lfunc_end201-mcl_fp_addNF13Lbmi2 + + .globl mcl_fp_sub13Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub13Lbmi2,@function +mcl_fp_sub13Lbmi2: # @mcl_fp_sub13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 72(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%esi), %edx + sbbl 32(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 36(%esi), %ecx + sbbl 36(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 44(%esi), %ebp + sbbl 44(%edi), %ebp + movl 48(%esi), %esi + sbbl 48(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 64(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) + movl %eax, 40(%ebx) + movl %ebp, 44(%ebx) + movl %esi, 48(%ebx) + je .LBB202_2 +# BB#1: # %carry + movl %esi, %edi + movl 76(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl %ecx, 40(%ebx) + movl 44(%esi), %eax + adcl %ebp, %eax + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %edi, %eax + movl %eax, 48(%ebx) +.LBB202_2: # %nocarry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end202: + .size mcl_fp_sub13Lbmi2, .Lfunc_end202-mcl_fp_sub13Lbmi2 + + .globl mcl_fp_subNF13Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF13Lbmi2,@function +mcl_fp_subNF13Lbmi2: # @mcl_fp_subNF13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 104(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 108(%esp), %edi + subl (%edi), %edx + movl %edx, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %ebx + movl 24(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 24(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + sbbl 48(%edi), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + movl %esi, %ecx + shldl $1, %edx, %ecx + movl 112(%esp), %edi + movl 4(%edi), %eax + andl %ecx, %eax + movl %eax, 56(%esp) # 4-byte Spill + andl (%edi), %ecx + movl 48(%edi), %eax + andl %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%edi), %eax + andl %esi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 40(%edi), %eax + andl %esi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 36(%edi), %eax + andl %esi, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 32(%edi), %eax + andl %esi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 28(%edi), %eax + andl %esi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 24(%edi), %ebp + andl %esi, %ebp + movl 20(%edi), %ebx + andl %esi, %ebx + movl 16(%edi), %edx + andl %esi, %edx + rorxl $31, %esi, %eax + andl 12(%edi), %esi + andl 8(%edi), %eax + addl 48(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + movl 100(%esp), %edi + movl %ecx, (%edi) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edi) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %eax, 8(%edi) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, 12(%edi) + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %edx, 16(%edi) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl (%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%edi) + movl %eax, 44(%edi) + movl 20(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%edi) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end203: + .size mcl_fp_subNF13Lbmi2, .Lfunc_end203-mcl_fp_subNF13Lbmi2 + + .globl mcl_fpDbl_add13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add13Lbmi2,@function +mcl_fpDbl_add13Lbmi2: # @mcl_fpDbl_add13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 124(%esp), %ecx + movl 120(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 116(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 60(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edx, 48(%eax) + movl 56(%esi), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %edi + adcl %edx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 100(%ecx), %ecx + movl 100(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 128(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 44(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 48(%ebp), %ecx + sbbl $0, %edx + andl $1, %edx + jne .LBB204_2 +# BB#1: + movl %ecx, %ebx +.LBB204_2: + testb %dl, %dl + movl 76(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB204_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload +.LBB204_4: + movl 116(%esp), %eax + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 88(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + movl %ebp, 72(%eax) + movl %edi, 76(%eax) + movl %esi, 80(%eax) + movl %edx, 84(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + jne .LBB204_6 +# BB#5: + movl 36(%esp), %esi # 4-byte Reload +.LBB204_6: + movl %esi, 88(%eax) + jne .LBB204_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB204_8: + movl %edx, 92(%eax) + jne .LBB204_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB204_10: + movl %ecx, 96(%eax) + movl %ebx, 100(%eax) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end204: + .size mcl_fpDbl_add13Lbmi2, .Lfunc_end204-mcl_fpDbl_add13Lbmi2 + + .globl mcl_fpDbl_sub13Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub13Lbmi2,@function +mcl_fpDbl_sub13Lbmi2: # @mcl_fpDbl_sub13Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 112(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 104(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl 36(%ebx), %esi + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %esi, %eax + movl 40(%ebx), %esi + movl %edx, 32(%ecx) + movl 40(%edi), %edx + sbbl %esi, %edx + movl 44(%ebx), %esi + movl %eax, 36(%ecx) + movl 44(%edi), %eax + sbbl %esi, %eax + movl 48(%ebx), %esi + movl %edx, 40(%ecx) + movl 48(%edi), %edx + sbbl %esi, %edx + movl 52(%ebx), %esi + movl %eax, 44(%ecx) + movl 52(%edi), %eax + sbbl %esi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl %edx, 48(%ecx) + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%edi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%edi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%edi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%edi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%edi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%edi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%edi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%edi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 96(%ebx), %eax + movl 96(%edi), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%ebx), %eax + movl 100(%edi), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 116(%esp), %edi + jne .LBB205_1 +# BB#2: + movl $0, 44(%esp) # 4-byte Folded Spill + jmp .LBB205_3 +.LBB205_1: + movl 48(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill +.LBB205_3: + testb %al, %al + jne .LBB205_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB205_6 +.LBB205_4: + movl (%edi), %ebx + movl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB205_6: + jne .LBB205_7 +# BB#8: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB205_9 +.LBB205_7: + movl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB205_9: + jne .LBB205_10 +# BB#11: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB205_12 +.LBB205_10: + movl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB205_12: + jne .LBB205_13 +# BB#14: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB205_15 +.LBB205_13: + movl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB205_15: + jne .LBB205_16 +# BB#17: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB205_18 +.LBB205_16: + movl 32(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB205_18: + jne .LBB205_19 +# BB#20: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB205_21 +.LBB205_19: + movl 28(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB205_21: + jne .LBB205_22 +# BB#23: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB205_24 +.LBB205_22: + movl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB205_24: + jne .LBB205_25 +# BB#26: + movl $0, %eax + jmp .LBB205_27 +.LBB205_25: + movl 20(%edi), %eax +.LBB205_27: + jne .LBB205_28 +# BB#29: + movl $0, %edx + jmp .LBB205_30 +.LBB205_28: + movl 16(%edi), %edx +.LBB205_30: + jne .LBB205_31 +# BB#32: + movl $0, %esi + jmp .LBB205_33 +.LBB205_31: + movl 12(%edi), %esi +.LBB205_33: + jne .LBB205_34 +# BB#35: + xorl %edi, %edi + jmp .LBB205_36 +.LBB205_34: + movl 8(%edi), %edi +.LBB205_36: + addl 36(%esp), %ebx # 4-byte Folded Reload + movl 16(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 52(%ecx) + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %ebp, 56(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 60(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %esi, 64(%ecx) + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl %eax, 96(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%ecx) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end205: + .size mcl_fpDbl_sub13Lbmi2, .Lfunc_end205-mcl_fpDbl_sub13Lbmi2 + + .align 16, 0x90 + .type .LmulPv448x32,@function +.LmulPv448x32: # @mulPv448x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl %edx, %eax + movl 68(%esp), %ebx + movl %ebx, %edx + mulxl 4(%eax), %edi, %esi + movl %ebx, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 44(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 8(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 12(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 16(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 20(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 24(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 28(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 32(%eax), %edx, %edi + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 36(%eax), %edx, %esi + adcl %edi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, %edx + mulxl 40(%eax), %edi, %ebp + adcl %esi, %edi + movl %ebx, %edx + mulxl 44(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %ebx, %edx + mulxl 48(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl %edi, 40(%ecx) + movl %esi, 44(%ecx) + movl %edx, 48(%ecx) + movl %ebx, %edx + mulxl 52(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + adcl $0, %edx + movl %edx, 56(%ecx) + movl %ecx, %eax + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end206: + .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 + + .globl mcl_fp_mulUnitPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre14Lbmi2,@function +mcl_fp_mulUnitPre14Lbmi2: # @mcl_fp_mulUnitPre14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L207$pb +.L207$pb: + popl %ebx +.Ltmp38: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end207: + .size mcl_fp_mulUnitPre14Lbmi2, .Lfunc_end207-mcl_fp_mulUnitPre14Lbmi2 + + .globl mcl_fpDbl_mulPre14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre14Lbmi2,@function +mcl_fpDbl_mulPre14Lbmi2: # @mcl_fpDbl_mulPre14Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L208$pb +.L208$pb: + popl %ebx +.Ltmp39: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx + movl %ebx, -192(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + leal 28(%esi), %eax + movl %eax, 8(%esp) + leal 28(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 56(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl 44(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 40(%edi), %eax + movl 36(%edi), %edx + movl (%edi), %edi + movl 12(%ebp), %ecx + movl 4(%ecx), %ecx + movl 12(%ebp), %ebx + addl 28(%ebx), %edi + movl %edi, -180(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + adcl 32(%edi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -212(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl 16(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl %eax, %ebx + seto %al + lahf + movl %eax, %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl (%esi), %eax + addl 28(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + movl 4(%esi), %eax + adcl 32(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl 36(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 40(%esi), %eax + adcl 12(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 44(%esi), %eax + adcl 16(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 20(%esi), %ecx + movl 52(%esi), %eax + adcl 24(%esi), %eax + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -220(%ebp) # 4-byte Spill + movl %ebx, %esi + movl %edx, -184(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -188(%ebp) # 4-byte Spill + jb .LBB208_2 +# BB#1: + xorl %esi, %esi + movl $0, -184(%ebp) # 4-byte Folded Spill + movl $0, -188(%ebp) # 4-byte Folded Spill +.LBB208_2: + movl %esi, -204(%ebp) # 4-byte Spill + movl 52(%edi), %esi + movl 48(%edi), %ebx + movl -128(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%edi), %ebx + movl %ebx, -160(%ebp) # 4-byte Spill + adcl 24(%edi), %esi + movl %esi, -208(%ebp) # 4-byte Spill + movl %eax, -148(%ebp) # 4-byte Spill + movl %ecx, -152(%ebp) # 4-byte Spill + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -132(%ebp) # 4-byte Spill + movl -168(%ebp), %esi # 4-byte Reload + movl %esi, -136(%ebp) # 4-byte Spill + movl -164(%ebp), %esi # 4-byte Reload + movl %esi, -140(%ebp) # 4-byte Spill + movl -216(%ebp), %ebx # 4-byte Reload + movl %ebx, -144(%ebp) # 4-byte Spill + jb .LBB208_4 +# BB#3: + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -136(%ebp) # 4-byte Folded Spill + movl $0, -140(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill +.LBB208_4: + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl -200(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -212(%ebp), %edx # 4-byte Reload + movl %edx, -88(%ebp) + movl -196(%ebp), %edi # 4-byte Reload + movl %edi, -84(%ebp) + movl -156(%ebp), %edx # 4-byte Reload + movl %edx, -80(%ebp) + movl %ebx, -124(%ebp) + movl -164(%ebp), %edx # 4-byte Reload + movl %edx, -120(%ebp) + movl -168(%ebp), %edx # 4-byte Reload + movl %edx, -116(%ebp) + movl -172(%ebp), %edx # 4-byte Reload + movl %edx, -112(%ebp) + movl -176(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl %ecx, -104(%ebp) + movl %edi, %ebx + movl %esi, %edi + movl %eax, -100(%ebp) + sbbl %edx, %edx + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -76(%ebp) + movl -208(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -220(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB208_6 +# BB#5: + movl $0, %esi + movl $0, %eax + movl $0, %ebx + movl $0, %edi +.LBB208_6: + movl %eax, -160(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -124(%ebp), %ecx + movl %ecx, 8(%esp) + leal -96(%ebp), %ecx + movl %ecx, 4(%esp) + leal -68(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -188(%ebp), %eax # 4-byte Reload + addl %eax, -144(%ebp) # 4-byte Folded Spill + adcl %edi, -140(%ebp) # 4-byte Folded Spill + movl -184(%ebp), %eax # 4-byte Reload + adcl %eax, -136(%ebp) # 4-byte Folded Spill + adcl %ebx, -132(%ebp) # 4-byte Folded Spill + movl -204(%ebp), %eax # 4-byte Reload + adcl %eax, -128(%ebp) # 4-byte Folded Spill + movl -152(%ebp), %edi # 4-byte Reload + adcl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -148(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -156(%ebp) # 4-byte Spill + movl -192(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl -144(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -140(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl -136(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + adcl -20(%ebp), %edi + movl -148(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -68(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -64(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -60(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -44(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -152(%ebp) # 4-byte Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + movl -148(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + sbbl $0, -156(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + subl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl -156(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -172(%ebp), %eax # 4-byte Reload + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 32(%esi) + adcl -188(%ebp), %edx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 40(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -232(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %eax # 4-byte Reload + adcl -236(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -132(%ebp), %ecx # 4-byte Reload + adcl -240(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -244(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -248(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -252(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + adcl -256(%ebp), %edi # 4-byte Folded Reload + movl %eax, 80(%esi) + movl %edi, 84(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end208: + .size mcl_fpDbl_mulPre14Lbmi2, .Lfunc_end208-mcl_fpDbl_mulPre14Lbmi2 + + .globl mcl_fpDbl_sqrPre14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre14Lbmi2,@function +mcl_fpDbl_sqrPre14Lbmi2: # @mcl_fpDbl_sqrPre14Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L209$pb +.L209$pb: + popl %ebx +.Ltmp40: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx + movl %ebx, -172(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + leal 28(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 56(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl 48(%edi), %eax + movl 44(%edi), %ecx + movl 36(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ebx + addl 28(%edi), %esi + adcl 32(%edi), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl 40(%edi), %edx + adcl 12(%edi), %edx + adcl 16(%edi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 52(%edi), %ecx + adcl 24(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -184(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -152(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -148(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + seto %al + lahf + movl %eax, %eax + sbbl %ebx, %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_1 +# BB#2: + movl %esi, -168(%ebp) # 4-byte Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + jmp .LBB209_3 +.LBB209_1: + leal (%esi,%esi), %eax + movl %esi, -168(%ebp) # 4-byte Spill + movl %eax, -132(%ebp) # 4-byte Spill +.LBB209_3: + movl %edi, %eax + addb $127, %al + sahf + movl -180(%ebp), %ebx # 4-byte Reload + jb .LBB209_4 +# BB#5: + movl $0, -156(%ebp) # 4-byte Folded Spill + jmp .LBB209_6 +.LBB209_4: + movl -164(%ebp), %eax # 4-byte Reload + movl -168(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -156(%ebp) # 4-byte Spill +.LBB209_6: + movl -176(%ebp), %edi # 4-byte Reload + movl -136(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_7 +# BB#8: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB209_9 +.LBB209_7: + movl -160(%ebp), %eax # 4-byte Reload + movl -164(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -136(%ebp) # 4-byte Spill +.LBB209_9: + movl %ebx, %esi + movl -140(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_10 +# BB#11: + movl $0, -140(%ebp) # 4-byte Folded Spill + jmp .LBB209_12 +.LBB209_10: + movl %edx, %eax + movl -160(%ebp), %ebx # 4-byte Reload + shldl $1, %ebx, %eax + movl %eax, -140(%ebp) # 4-byte Spill +.LBB209_12: + movl -144(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_13 +# BB#14: + movl $0, -144(%ebp) # 4-byte Folded Spill + jmp .LBB209_15 +.LBB209_13: + movl %esi, %eax + shldl $1, %edx, %eax + movl %eax, -144(%ebp) # 4-byte Spill +.LBB209_15: + movl -148(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_16 +# BB#17: + movl $0, -148(%ebp) # 4-byte Folded Spill + jmp .LBB209_18 +.LBB209_16: + movl %edi, %eax + shldl $1, %esi, %eax + movl %eax, -148(%ebp) # 4-byte Spill +.LBB209_18: + movl -152(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_19 +# BB#20: + movl $0, -152(%ebp) # 4-byte Folded Spill + jmp .LBB209_21 +.LBB209_19: + movl %ecx, %eax + shldl $1, %edi, %eax + movl %eax, -152(%ebp) # 4-byte Spill +.LBB209_21: + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, -96(%ebp) + movl %eax, -124(%ebp) + movl -164(%ebp), %eax # 4-byte Reload + movl %eax, -92(%ebp) + movl %eax, -120(%ebp) + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -116(%ebp) + movl %edx, -84(%ebp) + movl %edx, -112(%ebp) + movl %esi, -80(%ebp) + movl %esi, -108(%ebp) + movl %edi, -76(%ebp) + movl %edi, -104(%ebp) + movl %ecx, -72(%ebp) + movl %ecx, -100(%ebp) + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_22 +# BB#23: + xorl %edi, %edi + jmp .LBB209_24 +.LBB209_22: + shrl $31, %ecx + movl %ecx, %edi +.LBB209_24: + leal -68(%ebp), %eax + movl %eax, (%esp) + leal -96(%ebp), %eax + movl %eax, 4(%esp) + leal -124(%ebp), %eax + movl %eax, 8(%esp) + movl -128(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -172(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7Lbmi2@PLT + movl -132(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl -136(%ebp), %ecx # 4-byte Reload + adcl -32(%ebp), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -140(%ebp), %ecx # 4-byte Reload + adcl -28(%ebp), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl -144(%ebp), %ecx # 4-byte Reload + adcl -24(%ebp), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -148(%ebp), %ecx # 4-byte Reload + adcl -20(%ebp), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + movl -152(%ebp), %ecx # 4-byte Reload + adcl -16(%ebp), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + adcl %edi, %esi + movl %esi, -128(%ebp) # 4-byte Spill + movl -68(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl -64(%ebp), %edi + sbbl 4(%esi), %edi + movl -60(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -168(%ebp) # 4-byte Spill + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %ecx + sbbl 20(%esi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -44(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -164(%ebp) # 4-byte Spill + movl 28(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + sbbl %edx, -132(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl -128(%ebp), %ecx # 4-byte Reload + sbbl $0, %ecx + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + movl -204(%ebp), %edx # 4-byte Reload + subl %eax, %edx + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + sbbl $0, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl %edx, %eax + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -188(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -132(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -156(%ebp), %edx # 4-byte Reload + adcl -232(%ebp), %edx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -236(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -140(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 64(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -244(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 68(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -248(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 72(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -252(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 76(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -256(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 80(%esi) + movl %eax, 84(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end209: + .size mcl_fpDbl_sqrPre14Lbmi2, .Lfunc_end209-mcl_fpDbl_sqrPre14Lbmi2 + + .globl mcl_fp_mont14Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont14Lbmi2,@function +mcl_fp_mont14Lbmi2: # @mcl_fp_mont14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1900, %esp # imm = 0x76C + calll .L210$pb +.L210$pb: + popl %ebx +.Ltmp41: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx + movl 1932(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 1840(%esp), %edi + movl 1844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1896(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 1892(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 1888(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1884(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1880(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1860(%esp), %esi + movl 1856(%esp), %ebp + movl 1852(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + addl 1776(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1792(%esp), %ebp + adcl 1796(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1928(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 1712(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1724(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 1728(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1732(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1768(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 100(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1648(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1668(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1672(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + adcl $0, %eax + movl %eax, %edi + movl 1928(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1584(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1584(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1604(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1608(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1636(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 1640(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1520(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1520(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 1544(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1564(%esp), %ebp + movl 108(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %edi # 4-byte Reload + adcl 1572(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1456(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1456(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1496(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1500(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1392(%esp), %ecx + movl 1932(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %eax + addl 1392(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1396(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1420(%esp), %esi + movl 88(%esp), %ebp # 4-byte Reload + adcl 1424(%esp), %ebp + movl 96(%esp), %edi # 4-byte Reload + adcl 1428(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 1432(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1924(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1328(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1356(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1384(%esp), %edi + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1264(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1284(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1316(%esp), %esi + adcl 1320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 68(%esp), %eax # 4-byte Reload + addl 1200(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + adcl 1216(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1248(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1136(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + adcl 1148(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1172(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1180(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 60(%esp), %eax # 4-byte Reload + addl 1072(%esp), %eax + adcl 1076(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1108(%esp), %ebp + adcl 1112(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1124(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1128(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1008(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1008(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 52(%esp), %eax # 4-byte Reload + addl 944(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 952(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 964(%esp), %esi + adcl 968(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 992(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 880(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 896(%esp), %edi + adcl 900(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 924(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 824(%esp), %ebp + adcl 828(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 856(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + sbbl %eax, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 752(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 760(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 784(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 792(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 796(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 800(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 808(%esp), %esi + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 688(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 728(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 624(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 648(%esp), %esi + movl 100(%esp), %edi # 4-byte Reload + adcl 652(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 560(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 568(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 584(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %ecx + addl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 528(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 540(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 440(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 368(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 376(%esp), %esi + adcl 380(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 312(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 324(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 240(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 248(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 252(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 256(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 268(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 176(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 184(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 192(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 200(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 112(%esp), %esi + movl 100(%esp), %esi # 4-byte Reload + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 76(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1932(%esp), %ecx + subl (%ecx), %eax + sbbl 4(%ecx), %edx + sbbl 8(%ecx), %esi + sbbl 12(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 20(%ecx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ecx), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl 52(%esp), %edi # 4-byte Reload + sbbl 32(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 36(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ecx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + sbbl 48(%ecx), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + sbbl 52(%ecx), %edi + movl %ebp, %ecx + movl %edi, 104(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB210_2 +# BB#1: + movl %ebx, 60(%esp) # 4-byte Spill +.LBB210_2: + testb %cl, %cl + movl 108(%esp), %ebx # 4-byte Reload + jne .LBB210_4 +# BB#3: + movl %eax, %ebx +.LBB210_4: + movl 1920(%esp), %eax + movl %ebx, (%eax) + movl 92(%esp), %edi # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB210_6 +# BB#5: + movl %edx, %edi +.LBB210_6: + movl %edi, 4(%eax) + jne .LBB210_8 +# BB#7: + movl %esi, 100(%esp) # 4-byte Spill +.LBB210_8: + movl 100(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + jne .LBB210_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 84(%esp) # 4-byte Spill +.LBB210_10: + movl 84(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + jne .LBB210_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB210_12: + movl %ecx, 16(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB210_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB210_14: + movl %ecx, 20(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB210_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB210_16: + movl %ecx, 24(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB210_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload +.LBB210_18: + movl %ecx, 32(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB210_20 +# BB#19: + movl 36(%esp), %ecx # 4-byte Reload +.LBB210_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB210_22 +# BB#21: + movl 40(%esp), %ecx # 4-byte Reload +.LBB210_22: + movl %ecx, 40(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB210_24 +# BB#23: + movl 44(%esp), %ecx # 4-byte Reload +.LBB210_24: + movl %ecx, 44(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB210_26 +# BB#25: + movl 48(%esp), %ecx # 4-byte Reload +.LBB210_26: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB210_28 +# BB#27: + movl 104(%esp), %ecx # 4-byte Reload +.LBB210_28: + movl %ecx, 52(%eax) + addl $1900, %esp # imm = 0x76C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end210: + .size mcl_fp_mont14Lbmi2, .Lfunc_end210-mcl_fp_mont14Lbmi2 + + .globl mcl_fp_montNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF14Lbmi2,@function +mcl_fp_montNF14Lbmi2: # @mcl_fp_montNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1884, %esp # imm = 0x75C + calll .L211$pb +.L211$pb: + popl %ebx +.Ltmp42: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx + movl 1916(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1824(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1824(%esp), %edi + movl 1828(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1880(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1860(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1856(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1852(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1844(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1840(%esp), %esi + movl 1836(%esp), %ebp + movl 1832(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1760(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1760(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1768(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 1776(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1808(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1816(%esp), %ebp + movl 1912(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1752(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1696(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1708(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1740(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 1916(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + addl 1632(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 1664(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl 1912(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1624(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1568(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1596(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1612(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1504(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1544(%esp), %esi + adcl 1548(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1560(%esp), %ebp + movl 1912(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1440(%esp), %ecx + movl 1908(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 1496(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1440(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1464(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1476(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1488(%esp), %esi + adcl 1492(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1424(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1368(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1312(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1248(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + adcl 1276(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1284(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1184(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1240(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 1184(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1216(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1232(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1120(%esp), %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1156(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1112(%esp), %eax + movl %ebp, %ecx + addl 1056(%esp), %ecx + adcl 1060(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1064(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1068(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1072(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1076(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1080(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1084(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1088(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1092(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1096(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1100(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1104(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + adcl 1108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %ecx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 992(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1044(%esp), %ebp + adcl 1048(%esp), %esi + movl 1912(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 984(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 48(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 940(%esp), %edi + movl 84(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 976(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 864(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 876(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 884(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 916(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 920(%esp), %ebp + movl 1912(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 856(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 800(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 816(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 828(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 852(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 736(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 764(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 72(%esp), %esi # 4-byte Reload + adcl 772(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 780(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 728(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 672(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 608(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 616(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 644(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 600(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 544(%esp), %ecx + adcl 548(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 556(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 568(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 576(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 480(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 488(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 472(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + adcl 420(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + adcl 428(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 464(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 352(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 364(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 388(%esp), %edi + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 344(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 288(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 296(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + adcl 324(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 224(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + adcl 264(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 216(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 160(%esp), %ecx + adcl 164(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 192(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 96(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 104(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl %ebp, %ebx + adcl 108(%esp), %esi + adcl 112(%esp), %edi + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, %edx + movl 1916(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ebx + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %edi, %ecx + sbbl 12(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 48(%ebp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 52(%ebp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + sarl $31, %ecx + testl %ecx, %ecx + movl 92(%esp), %ebp # 4-byte Reload + js .LBB211_2 +# BB#1: + movl %edx, %ebp +.LBB211_2: + movl 1904(%esp), %edx + movl %ebp, (%edx) + movl 88(%esp), %ebp # 4-byte Reload + js .LBB211_4 +# BB#3: + movl %ebx, %ebp +.LBB211_4: + movl %ebp, 4(%edx) + js .LBB211_6 +# BB#5: + movl %eax, %esi +.LBB211_6: + movl %esi, 8(%edx) + js .LBB211_8 +# BB#7: + movl 4(%esp), %edi # 4-byte Reload +.LBB211_8: + movl %edi, 12(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB211_10 +# BB#9: + movl 8(%esp), %eax # 4-byte Reload +.LBB211_10: + movl %eax, 16(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB211_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB211_12: + movl %eax, 20(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB211_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB211_14: + movl %eax, 24(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB211_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB211_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB211_18 +# BB#17: + movl 24(%esp), %eax # 4-byte Reload +.LBB211_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB211_20 +# BB#19: + movl 28(%esp), %eax # 4-byte Reload +.LBB211_20: + movl %eax, 36(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB211_22 +# BB#21: + movl 32(%esp), %eax # 4-byte Reload +.LBB211_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB211_24 +# BB#23: + movl 36(%esp), %eax # 4-byte Reload +.LBB211_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB211_26 +# BB#25: + movl 64(%esp), %eax # 4-byte Reload +.LBB211_26: + movl %eax, 48(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB211_28 +# BB#27: + movl 72(%esp), %eax # 4-byte Reload +.LBB211_28: + movl %eax, 52(%edx) + addl $1884, %esp # imm = 0x75C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end211: + .size mcl_fp_montNF14Lbmi2, .Lfunc_end211-mcl_fp_montNF14Lbmi2 + + .globl mcl_fp_montRed14Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed14Lbmi2,@function +mcl_fp_montRed14Lbmi2: # @mcl_fp_montRed14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1068, %esp # imm = 0x42C + calll .L212$pb +.L212$pb: + popl %eax +.Ltmp43: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1096(%esp), %edx + movl -4(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1092(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 92(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + imull %eax, %ebx + movl 108(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 52(%ecx), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 40(%ecx), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1008(%esp), %ecx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1008(%esp), %eax + movl 96(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 944(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 976(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %esi # 4-byte Reload + adcl 1000(%esp), %esi + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 880(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 908(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 920(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 816(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 820(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 752(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 688(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 140(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 624(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 628(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 168(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 560(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 168(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 136(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1096(%esp), %eax + movl %eax, %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 496(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 172(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 168(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 432(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 144(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + adcl 456(%esp), %ebp + movl 164(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 468(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %esi + movl 88(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 368(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 172(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 160(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 392(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 408(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %ebp + imull %edi, %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 304(%esp), %ebp + movl 132(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 316(%esp), %ebp + movl 160(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 148(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 240(%esp), %edi + movl 144(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + adcl 248(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 268(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 176(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebx # 4-byte Reload + adcl 188(%esp), %ebx + movl %ebx, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 196(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl %edi, %eax + adcl 200(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 212(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 172(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + movl 160(%esp), %ebp # 4-byte Reload + sbbl 8(%esp), %ebp # 4-byte Folded Reload + sbbl 12(%esp), %ebx # 4-byte Folded Reload + movl 168(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 148(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 156(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 152(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + movl 124(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl 140(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl 128(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 112(%esp) # 4-byte Spill + movl 120(%esp), %edx # 4-byte Reload + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 116(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + sbbl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 132(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 144(%esp) # 4-byte Spill + sbbl $0, %esi + andl $1, %esi + jne .LBB212_2 +# BB#1: + movl %eax, 168(%esp) # 4-byte Spill +.LBB212_2: + movl %esi, %edx + testb %dl, %dl + movl 172(%esp), %eax # 4-byte Reload + jne .LBB212_4 +# BB#3: + movl %edi, %eax +.LBB212_4: + movl 1088(%esp), %edi + movl %eax, (%edi) + movl %ecx, 104(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + movl 160(%esp), %ecx # 4-byte Reload + jne .LBB212_6 +# BB#5: + movl %ebp, %ecx +.LBB212_6: + movl %ecx, 4(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 164(%esp), %ebp # 4-byte Reload + jne .LBB212_8 +# BB#7: + movl %ebx, %ebp +.LBB212_8: + movl %ebp, 8(%edi) + movl 168(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 124(%esp), %ebp # 4-byte Reload + movl 136(%esp), %ebx # 4-byte Reload + jne .LBB212_10 +# BB#9: + movl 80(%esp), %ebx # 4-byte Reload +.LBB212_10: + movl %ebx, 16(%edi) + movl 140(%esp), %ebx # 4-byte Reload + movl 148(%esp), %esi # 4-byte Reload + jne .LBB212_12 +# BB#11: + movl 84(%esp), %esi # 4-byte Reload +.LBB212_12: + movl %esi, 20(%edi) + movl 128(%esp), %esi # 4-byte Reload + jne .LBB212_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB212_14: + movl %eax, 24(%edi) + movl 120(%esp), %edx # 4-byte Reload + jne .LBB212_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 152(%esp) # 4-byte Spill +.LBB212_16: + movl 152(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB212_18 +# BB#17: + movl 96(%esp), %ebp # 4-byte Reload +.LBB212_18: + movl %ebp, 32(%edi) + jne .LBB212_20 +# BB#19: + movl 100(%esp), %ebx # 4-byte Reload +.LBB212_20: + movl %ebx, 36(%edi) + jne .LBB212_22 +# BB#21: + movl 112(%esp), %esi # 4-byte Reload +.LBB212_22: + movl %esi, 40(%edi) + jne .LBB212_24 +# BB#23: + movl 116(%esp), %edx # 4-byte Reload +.LBB212_24: + movl %edx, 44(%edi) + jne .LBB212_26 +# BB#25: + movl 132(%esp), %ecx # 4-byte Reload +.LBB212_26: + movl %ecx, 48(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB212_28 +# BB#27: + movl 144(%esp), %eax # 4-byte Reload +.LBB212_28: + movl %eax, 52(%edi) + addl $1068, %esp # imm = 0x42C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end212: + .size mcl_fp_montRed14Lbmi2, .Lfunc_end212-mcl_fp_montRed14Lbmi2 + + .globl mcl_fp_addPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre14Lbmi2,@function +mcl_fp_addPre14Lbmi2: # @mcl_fp_addPre14Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl 48(%eax), %edi + movl %esi, 40(%ebx) + movl 48(%ecx), %esi + adcl %edi, %esi + movl %edx, 44(%ebx) + movl %esi, 48(%ebx) + movl 52(%eax), %eax + movl 52(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 52(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end213: + .size mcl_fp_addPre14Lbmi2, .Lfunc_end213-mcl_fp_addPre14Lbmi2 + + .globl mcl_fp_subPre14Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre14Lbmi2,@function +mcl_fp_subPre14Lbmi2: # @mcl_fp_subPre14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ebp) + movl 48(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 44(%ebp) + movl %edi, 48(%ebp) + movl 52(%edx), %edx + movl 52(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 52(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end214: + .size mcl_fp_subPre14Lbmi2, .Lfunc_end214-mcl_fp_subPre14Lbmi2 + + .globl mcl_fp_shr1_14Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_14Lbmi2,@function +mcl_fp_shr1_14Lbmi2: # @mcl_fp_shr1_14Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 48(%ecx) + shrl %eax + movl %eax, 52(%ecx) + popl %esi + retl +.Lfunc_end215: + .size mcl_fp_shr1_14Lbmi2, .Lfunc_end215-mcl_fp_shr1_14Lbmi2 + + .globl mcl_fp_add14Lbmi2 + .align 16, 0x90 + .type mcl_fp_add14Lbmi2,@function +mcl_fp_add14Lbmi2: # @mcl_fp_add14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 68(%esp), %ebp + addl (%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%ebp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 8(%eax), %ecx + adcl 8(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 12(%ebp), %edx + movl 16(%ebp), %ecx + adcl 12(%eax), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 16(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%ebp), %ecx + adcl 20(%eax), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 24(%ebp), %ecx + adcl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%ebp), %ecx + adcl 28(%eax), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + adcl 32(%eax), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%ebp), %ecx + adcl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 40(%ebp), %edx + adcl 40(%eax), %edx + movl %edx, (%esp) # 4-byte Spill + movl 44(%ebp), %ebx + adcl 44(%eax), %ebx + movl 48(%ebp), %esi + adcl 48(%eax), %esi + movl 52(%ebp), %edi + adcl 52(%eax), %edi + movl 64(%esp), %eax + movl 4(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %edx, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) + sbbl %ecx, %ecx + andl $1, %ecx + movl 76(%esp), %edx + subl (%edx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 8(%edx), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 12(%edx), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 16(%edx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebp # 4-byte Reload + sbbl 20(%edx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 28(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 32(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 36(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl (%esp), %ebp # 4-byte Reload + sbbl 40(%edx), %ebp + sbbl 44(%edx), %ebx + sbbl 48(%edx), %esi + sbbl 52(%edx), %edi + sbbl $0, %ecx + testb $1, %cl + jne .LBB216_2 +# BB#1: # %nocarry + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %ebp, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) +.LBB216_2: # %carry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end216: + .size mcl_fp_add14Lbmi2, .Lfunc_end216-mcl_fp_add14Lbmi2 + + .globl mcl_fp_addNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF14Lbmi2,@function +mcl_fp_addNF14Lbmi2: # @mcl_fp_addNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl 140(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 136(%esp), %ecx + addl (%ecx), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 4(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 44(%eax), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 40(%eax), %ebp + movl 36(%eax), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %edx + adcl 8(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%ecx), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 16(%ecx), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 20(%ecx), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 28(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 36(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 40(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 44(%ecx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%ecx), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%ecx), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 144(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + subl (%ecx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 4(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 24(%ecx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%ecx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + movl %eax, %esi + movl %eax, %ebp + sbbl 44(%ecx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 48(%ecx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 52(%ecx), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 72(%esp), %ecx # 4-byte Reload + js .LBB217_2 +# BB#1: + movl (%esp), %ecx # 4-byte Reload +.LBB217_2: + movl 132(%esp), %edi + movl %ecx, (%edi) + movl 76(%esp), %eax # 4-byte Reload + js .LBB217_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB217_4: + movl %eax, 4(%edi) + movl %edx, %ecx + movl 64(%esp), %eax # 4-byte Reload + movl 56(%esp), %edx # 4-byte Reload + js .LBB217_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB217_6: + movl %edx, 8(%edi) + movl %ebp, %edx + movl 104(%esp), %ebx # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + js .LBB217_8 +# BB#7: + movl 12(%esp), %ebp # 4-byte Reload +.LBB217_8: + movl %ebp, 12(%edi) + movl 100(%esp), %ebp # 4-byte Reload + js .LBB217_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB217_10: + movl %eax, 16(%edi) + movl 80(%esp), %esi # 4-byte Reload + js .LBB217_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill +.LBB217_12: + movl 68(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + js .LBB217_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB217_14: + movl %ecx, 24(%edi) + js .LBB217_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill +.LBB217_16: + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + js .LBB217_18 +# BB#17: + movl 32(%esp), %ebp # 4-byte Reload +.LBB217_18: + movl %ebp, 32(%edi) + js .LBB217_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB217_20: + movl %ebx, 36(%edi) + js .LBB217_22 +# BB#21: + movl 40(%esp), %esi # 4-byte Reload +.LBB217_22: + movl %esi, 40(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB217_24 +# BB#23: + movl 44(%esp), %edx # 4-byte Reload +.LBB217_24: + movl %edx, 44(%edi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB217_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB217_26: + movl %eax, 48(%edi) + js .LBB217_28 +# BB#27: + movl 52(%esp), %ecx # 4-byte Reload +.LBB217_28: + movl %ecx, 52(%edi) + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end217: + .size mcl_fp_addNF14Lbmi2, .Lfunc_end217-mcl_fp_addNF14Lbmi2 + + .globl mcl_fp_sub14Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub14Lbmi2,@function +mcl_fp_sub14Lbmi2: # @mcl_fp_sub14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 80(%esp), %edi + subl (%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 36(%esi), %edx + sbbl 36(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esi), %ecx + sbbl 40(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esi), %ebp + sbbl 48(%edi), %ebp + movl 52(%esi), %esi + sbbl 52(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 72(%esp), %ebx + movl 44(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl %edx, 36(%ebx) + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl %ebp, 48(%ebx) + movl %esi, 52(%ebx) + je .LBB218_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 84(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %ebp, %eax + movl %eax, 48(%ebx) + movl 52(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ebx) +.LBB218_2: # %nocarry + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end218: + .size mcl_fp_sub14Lbmi2, .Lfunc_end218-mcl_fp_sub14Lbmi2 + + .globl mcl_fp_subNF14Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF14Lbmi2,@function +mcl_fp_subNF14Lbmi2: # @mcl_fp_subNF14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 112(%esp), %ecx + movl 52(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 116(%esp), %edi + subl (%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %eax, %esi + sarl $31, %esi + movl %esi, %ecx + addl %ecx, %ecx + movl %esi, %ebp + adcl %ebp, %ebp + shrl $31, %eax + orl %ecx, %eax + movl 120(%esp), %edi + andl 4(%edi), %ebp + andl (%edi), %eax + movl 52(%edi), %ecx + andl %esi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%edi), %ecx + andl %esi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edi), %ecx + andl %esi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%edi), %ecx + andl %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%edi), %ecx + andl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%edi), %ecx + andl %esi, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%edi), %ecx + andl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%edi), %ecx + andl %esi, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%edi), %ebx + andl %esi, %ebx + movl 16(%edi), %edx + andl %esi, %edx + movl 12(%edi), %ecx + andl %esi, %ecx + andl 8(%edi), %esi + addl 56(%esp), %eax # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl 108(%esp), %edi + movl %eax, (%edi) + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %ebp, 4(%edi) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %esi, 8(%edi) + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edx, 16(%edi) + movl (%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + movl 24(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %eax, 48(%edi) + movl 28(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%edi) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end219: + .size mcl_fp_subNF14Lbmi2, .Lfunc_end219-mcl_fp_subNF14Lbmi2 + + .globl mcl_fpDbl_add14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add14Lbmi2,@function +mcl_fpDbl_add14Lbmi2: # @mcl_fpDbl_add14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %ecx + movl 124(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 120(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 64(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %edx, 48(%eax) + movl 56(%esi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%esi), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %eax + adcl %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + movl 100(%esi), %edi + adcl %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 104(%ecx), %edx + movl 104(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 108(%ecx), %ecx + movl 108(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 132(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + sbbl 40(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 52(%ebp), %esi + sbbl $0, %edx + andl $1, %edx + jne .LBB220_2 +# BB#1: + movl %esi, %ebx +.LBB220_2: + testb %dl, %dl + movl 72(%esp), %eax # 4-byte Reload + movl 68(%esp), %edx # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB220_4 +# BB#3: + movl %ecx, %edx + movl (%esp), %edi # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload +.LBB220_4: + movl 120(%esp), %esi + movl %eax, 56(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 60(%esi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%esi) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%esi) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%esi) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%esi) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%esi) + movl %ebp, 84(%esi) + movl %edi, 88(%esi) + movl %edx, 92(%esi) + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + jne .LBB220_6 +# BB#5: + movl 36(%esp), %eax # 4-byte Reload +.LBB220_6: + movl %eax, 96(%esi) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB220_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB220_8: + movl %edx, 100(%esi) + jne .LBB220_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB220_10: + movl %ecx, 104(%esi) + movl %ebx, 108(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end220: + .size mcl_fpDbl_add14Lbmi2, .Lfunc_end220-mcl_fpDbl_add14Lbmi2 + + .globl mcl_fpDbl_sub14Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub14Lbmi2,@function +mcl_fpDbl_sub14Lbmi2: # @mcl_fpDbl_sub14Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 124(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 116(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl 40(%ebp), %esi + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %esi, %edx + movl 44(%ebp), %esi + movl %eax, 36(%ecx) + movl 44(%ebx), %eax + sbbl %esi, %eax + movl 48(%ebp), %esi + movl %edx, 40(%ecx) + movl 48(%ebx), %edx + sbbl %esi, %edx + movl 52(%ebp), %esi + movl %eax, 44(%ecx) + movl 52(%ebx), %eax + sbbl %esi, %eax + movl 56(%ebp), %esi + movl %edx, 48(%ecx) + movl 56(%ebx), %edx + sbbl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%ebp), %edx + movl %eax, 52(%ecx) + movl 60(%ebx), %eax + sbbl %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%ebx), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%ebx), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%ebx), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%ebx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 88(%ebp), %eax + movl 88(%ebx), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 92(%ebp), %eax + movl 92(%ebx), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%ebp), %eax + movl 96(%ebx), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 100(%ebp), %eax + movl 100(%ebx), %edx + sbbl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 104(%ebp), %eax + movl 104(%ebx), %edx + sbbl %eax, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 108(%ebp), %eax + movl 108(%ebx), %edx + sbbl %eax, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 128(%esp), %ebp + jne .LBB221_1 +# BB#2: + movl $0, 56(%esp) # 4-byte Folded Spill + jmp .LBB221_3 +.LBB221_1: + movl 52(%ebp), %edx + movl %edx, 56(%esp) # 4-byte Spill +.LBB221_3: + testb %al, %al + jne .LBB221_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB221_6 +.LBB221_4: + movl (%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB221_6: + jne .LBB221_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB221_9 +.LBB221_7: + movl 48(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB221_9: + jne .LBB221_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB221_12 +.LBB221_10: + movl 44(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB221_12: + jne .LBB221_13 +# BB#14: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB221_15 +.LBB221_13: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB221_15: + jne .LBB221_16 +# BB#17: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB221_18 +.LBB221_16: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB221_18: + jne .LBB221_19 +# BB#20: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB221_21 +.LBB221_19: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB221_21: + jne .LBB221_22 +# BB#23: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB221_24 +.LBB221_22: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB221_24: + jne .LBB221_25 +# BB#26: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB221_27 +.LBB221_25: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB221_27: + jne .LBB221_28 +# BB#29: + movl $0, %esi + jmp .LBB221_30 +.LBB221_28: + movl 20(%ebp), %esi +.LBB221_30: + jne .LBB221_31 +# BB#32: + movl $0, %edi + jmp .LBB221_33 +.LBB221_31: + movl 16(%ebp), %edi +.LBB221_33: + jne .LBB221_34 +# BB#35: + movl $0, %ebx + jmp .LBB221_36 +.LBB221_34: + movl 12(%ebp), %ebx +.LBB221_36: + jne .LBB221_37 +# BB#38: + xorl %ebp, %ebp + jmp .LBB221_39 +.LBB221_37: + movl 8(%ebp), %ebp +.LBB221_39: + movl 20(%esp), %edx # 4-byte Reload + addl 44(%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 64(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 68(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edi, 72(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 76(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl %eax, 104(%ecx) + movl 56(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%ecx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end221: + .size mcl_fpDbl_sub14Lbmi2, .Lfunc_end221-mcl_fpDbl_sub14Lbmi2 + + .align 16, 0x90 + .type .LmulPv480x32,@function +.LmulPv480x32: # @mulPv480x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl %edx, %eax + movl 72(%esp), %edi + movl %edi, %edx + mulxl 4(%eax), %ebx, %esi + movl %edi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 48(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 8(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 12(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 16(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 20(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 24(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 28(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 32(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 36(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 40(%eax), %edx, %ebp + adcl %esi, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 44(%eax), %ebx, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl %ebp, %ebx + movl %edi, %edx + mulxl 48(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %edi, %edx + mulxl 52(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl %ebx, 44(%ecx) + movl %esi, 48(%ecx) + movl %edx, 52(%ecx) + movl %edi, %edx + mulxl 56(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ecx) + adcl $0, %edx + movl %edx, 60(%ecx) + movl %ecx, %eax + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end222: + .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 + + .globl mcl_fp_mulUnitPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre15Lbmi2,@function +mcl_fp_mulUnitPre15Lbmi2: # @mcl_fp_mulUnitPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L223$pb +.L223$pb: + popl %ebx +.Ltmp44: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end223: + .size mcl_fp_mulUnitPre15Lbmi2, .Lfunc_end223-mcl_fp_mulUnitPre15Lbmi2 + + .globl mcl_fpDbl_mulPre15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre15Lbmi2,@function +mcl_fpDbl_mulPre15Lbmi2: # @mcl_fpDbl_mulPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L224$pb +.L224$pb: + popl %esi +.Ltmp45: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1108(%esp), %edi + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl 1112(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1108(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end224: + .size mcl_fpDbl_mulPre15Lbmi2, .Lfunc_end224-mcl_fpDbl_mulPre15Lbmi2 + + .globl mcl_fpDbl_sqrPre15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre15Lbmi2,@function +mcl_fpDbl_sqrPre15Lbmi2: # @mcl_fpDbl_sqrPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L225$pb +.L225$pb: + popl %ebx +.Ltmp46: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx + movl %ebx, 116(%esp) # 4-byte Spill + movl 1108(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end225: + .size mcl_fpDbl_sqrPre15Lbmi2, .Lfunc_end225-mcl_fpDbl_sqrPre15Lbmi2 + + .globl mcl_fp_mont15Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont15Lbmi2,@function +mcl_fp_mont15Lbmi2: # @mcl_fp_mont15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2044, %esp # imm = 0x7FC + calll .L226$pb +.L226$pb: + popl %ebx +.Ltmp47: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx + movl 2076(%esp), %eax + movl -4(%eax), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1976(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 1976(%esp), %ebp + movl 1980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2036(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2032(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2028(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2024(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2020(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2000(%esp), %edi + movl 1996(%esp), %esi + movl 1992(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + addl 1912(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1932(%esp), %esi + adcl 1936(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1968(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1972(%esp), %ebp + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1848(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1864(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1868(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1892(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1896(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1900(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1904(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1908(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edx, %eax + movl %edx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2076(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1784(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1812(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1836(%esp), %esi + movl 108(%esp), %ebp # 4-byte Reload + adcl 1840(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1744(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1756(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1768(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1780(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1656(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + movl 96(%esp), %eax # 4-byte Reload + addl 1656(%esp), %eax + movl 84(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1688(%esp), %ebp + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1712(%esp), %edi + adcl 1716(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1592(%esp), %ecx + movl 2068(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1592(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1628(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1528(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1528(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1544(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1564(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1568(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 1572(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1464(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 1464(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1476(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1496(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1500(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1504(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 1512(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1400(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1400(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1420(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1424(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1428(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1432(%esp), %edi + movl 112(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1448(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %esi # 4-byte Reload + adcl 1452(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1364(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + adcl 1384(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1392(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1272(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1320(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1328(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2072(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1232(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1244(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1268(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + movl 64(%esp), %eax # 4-byte Reload + addl 1144(%esp), %eax + movl 56(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1156(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1180(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1196(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 1080(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1128(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1016(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1032(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1044(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 952(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 964(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 976(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl %ebp, %eax + andl $1, %eax + addl 888(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 908(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 924(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 856(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 864(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + adcl 876(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 760(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 776(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 800(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 804(%esp), %ebp + adcl 808(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 816(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 708(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 736(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 752(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 632(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 672(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 568(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 604(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 504(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 516(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 560(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 440(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 448(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 492(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 376(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 388(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 336(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %esi + movl %esi, %ecx + addl 248(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 252(%esp), %esi + movl 108(%esp), %edi # 4-byte Reload + adcl 256(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 288(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl %esi, %ecx + movl 96(%esp), %esi # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + adcl 200(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 104(%esp), %ebx # 4-byte Reload + andl $1, %ebx + addl 120(%esp), %edi + movl %ebp, %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 132(%esp), %edi + adcl 136(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2076(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ecx + movl %edi, %eax + sbbl 8(%ebp), %eax + movl %esi, %ebx + sbbl 12(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 16(%ebp), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 20(%ebp), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 24(%ebp), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 32(%ebp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%ebp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 48(%ebp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 52(%ebp), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 56(%ebp), %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB226_2 +# BB#1: + movl %edx, %ebp +.LBB226_2: + movl 2064(%esp), %edx + movl %ebp, (%edx) + testb %bl, %bl + movl 116(%esp), %ebp # 4-byte Reload + jne .LBB226_4 +# BB#3: + movl %ecx, %ebp +.LBB226_4: + movl %ebp, 4(%edx) + jne .LBB226_6 +# BB#5: + movl %eax, %edi +.LBB226_6: + movl %edi, 8(%edx) + jne .LBB226_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB226_8: + movl %esi, 12(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB226_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB226_10: + movl %eax, 16(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB226_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB226_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + jne .LBB226_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB226_14: + movl %eax, 24(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB226_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB226_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + jne .LBB226_18 +# BB#17: + movl 36(%esp), %eax # 4-byte Reload +.LBB226_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB226_20 +# BB#19: + movl 40(%esp), %eax # 4-byte Reload +.LBB226_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB226_22 +# BB#21: + movl 44(%esp), %eax # 4-byte Reload +.LBB226_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB226_24 +# BB#23: + movl 48(%esp), %eax # 4-byte Reload +.LBB226_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB226_26 +# BB#25: + movl 52(%esp), %eax # 4-byte Reload +.LBB226_26: + movl %eax, 48(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB226_28 +# BB#27: + movl 88(%esp), %eax # 4-byte Reload +.LBB226_28: + movl %eax, 52(%edx) + movl 112(%esp), %eax # 4-byte Reload + jne .LBB226_30 +# BB#29: + movl 96(%esp), %eax # 4-byte Reload +.LBB226_30: + movl %eax, 56(%edx) + addl $2044, %esp # imm = 0x7FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end226: + .size mcl_fp_mont15Lbmi2, .Lfunc_end226-mcl_fp_mont15Lbmi2 + + .globl mcl_fp_montNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF15Lbmi2,@function +mcl_fp_montNF15Lbmi2: # @mcl_fp_montNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2028, %esp # imm = 0x7EC + calll .L227$pb +.L227$pb: + popl %ebx +.Ltmp48: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx + movl 2060(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1960(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1960(%esp), %ebp + movl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2020(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1992(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1980(%esp), %esi + movl 1976(%esp), %edi + movl 1972(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1968(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1896(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1896(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1912(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1916(%esp), %esi + movl %esi, %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1944(%esp), %ebp + movl 76(%esp), %esi # 4-byte Reload + adcl 1948(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1832(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1892(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 1832(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1840(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1844(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1848(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1876(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1880(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1768(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1784(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, %esi + adcl 1820(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1824(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1764(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1704(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1752(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1756(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1640(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, %esi + movl 96(%esp), %edi # 4-byte Reload + adcl 1688(%esp), %edi + adcl 1692(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1700(%esp), %ebp + movl 2056(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1636(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1576(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1616(%esp), %esi + adcl 1620(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1632(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1512(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1512(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1532(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1548(%esp), %ebp + adcl 1552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1448(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1508(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1448(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1464(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1476(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1480(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1484(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1496(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1504(%esp), %ebp + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1408(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1380(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 1324(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1368(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + movl 40(%esp), %eax # 4-byte Reload + addl 1256(%esp), %eax + adcl 1260(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1296(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1304(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1312(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2052(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 1252(%esp), %eax + movl 48(%esp), %edx # 4-byte Reload + addl 1192(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1212(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1228(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1128(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1140(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1148(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1168(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1188(%esp), %esi + movl 2056(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1124(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 1064(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1072(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1084(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1100(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1120(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1000(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1020(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1028(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1060(%esp), %esi + movl 2056(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 996(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 936(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 952(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 968(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 984(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 992(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 872(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 876(%esp), %ebp + movl 60(%esp), %edi # 4-byte Reload + adcl 880(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 868(%esp), %eax + movl %ebp, %ecx + addl 808(%esp), %ecx + adcl 812(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 816(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 820(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 824(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 828(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 832(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 836(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 840(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 844(%esp), %esi + movl 72(%esp), %edx # 4-byte Reload + adcl 848(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 852(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 856(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 860(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 864(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 744(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 776(%esp), %edi + adcl 780(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 792(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 680(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 740(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 680(%esp), %ecx + movl 84(%esp), %edx # 4-byte Reload + adcl 684(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 688(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 692(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 696(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 704(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl 88(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 716(%esp), %ebp + movl 64(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 728(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 732(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 616(%esp), %esi + movl 84(%esp), %esi # 4-byte Reload + adcl 620(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 644(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + adcl 652(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 612(%esp), %edx + movl %esi, %ecx + addl 552(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 580(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 488(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 508(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 512(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 484(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %edi + adcl 460(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 360(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 368(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 296(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 356(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 296(%esp), %ecx + adcl 300(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 332(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 232(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 240(%esp), %ebp + movl 92(%esp), %esi # 4-byte Reload + adcl 244(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 228(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 168(%esp), %ecx + adcl 172(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 176(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 188(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 104(%esp), %edi + movl 68(%esp), %edi # 4-byte Reload + movl 100(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %ecx, %ebx + adcl 116(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 120(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 148(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, %edx + movl 2060(%esp), %ecx + subl (%ecx), %edx + movl %ebx, %ebp + sbbl 4(%ecx), %ebp + movl %edi, %ebx + sbbl 8(%ecx), %ebx + movl 88(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %eax + sbbl 16(%ecx), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 20(%ecx), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 28(%ecx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 32(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 44(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 48(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 52(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%ecx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 100(%esp), %ecx # 4-byte Reload + js .LBB227_2 +# BB#1: + movl %edx, %ecx +.LBB227_2: + movl 2048(%esp), %edx + movl %ecx, (%edx) + movl 92(%esp), %esi # 4-byte Reload + js .LBB227_4 +# BB#3: + movl %ebp, %esi +.LBB227_4: + movl %esi, 4(%edx) + movl 88(%esp), %ecx # 4-byte Reload + js .LBB227_6 +# BB#5: + movl %ebx, %edi +.LBB227_6: + movl %edi, 8(%edx) + js .LBB227_8 +# BB#7: + movl %eax, %ecx +.LBB227_8: + movl %ecx, 12(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB227_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB227_10: + movl %eax, 16(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB227_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB227_12: + movl %eax, 20(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB227_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB227_14: + movl %eax, 24(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB227_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB227_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB227_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB227_18: + movl %eax, 32(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB227_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB227_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB227_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB227_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB227_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB227_24: + movl %eax, 44(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB227_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB227_26: + movl %eax, 48(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB227_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB227_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB227_30 +# BB#29: + movl 68(%esp), %eax # 4-byte Reload +.LBB227_30: + movl %eax, 56(%edx) + addl $2028, %esp # imm = 0x7EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end227: + .size mcl_fp_montNF15Lbmi2, .Lfunc_end227-mcl_fp_montNF15Lbmi2 + + .globl mcl_fp_montRed15Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed15Lbmi2,@function +mcl_fp_montRed15Lbmi2: # @mcl_fp_montRed15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1148, %esp # imm = 0x47C + calll .L228$pb +.L228$pb: + popl %eax +.Ltmp49: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1176(%esp), %edx + movl -4(%edx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 1172(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 84(%esp) # 4-byte Spill + imull %esi, %ebx + movl 116(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 176(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %edi + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1080(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + addl 1080(%esp), %eax + movl 84(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + adcl 1088(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl 1092(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1108(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 148(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1016(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 1020(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 144(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 952(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 180(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 888(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 892(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 824(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 828(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %esi + movl 76(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 696(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 636(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 164(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 168(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 568(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 604(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 160(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1176(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 440(%esp), %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %edi # 4-byte Reload + adcl 452(%esp), %edi + adcl 456(%esp), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %esi # 4-byte Reload + adcl 464(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl 120(%esp), %ebp # 4-byte Reload + adcl 380(%esp), %ebp + adcl 384(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + adcl 396(%esp), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 312(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl 156(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 340(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 160(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 136(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 248(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 172(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 284(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 184(%esp), %esi + movl 172(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 240(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %edx, %eax + subl 16(%esp), %edx # 4-byte Folded Reload + sbbl 4(%esp), %ecx # 4-byte Folded Reload + movl 176(%esp), %eax # 4-byte Reload + sbbl 8(%esp), %eax # 4-byte Folded Reload + movl 164(%esp), %ebp # 4-byte Reload + sbbl 12(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %esi # 4-byte Folded Reload + movl 144(%esp), %edi # 4-byte Reload + sbbl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 84(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 88(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + sbbl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + sbbl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + sbbl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + sbbl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 112(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + sbbl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + sbbl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 124(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + sbbl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 140(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + sbbl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 156(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + movl %ebx, %edi + jne .LBB228_2 +# BB#1: + movl %edx, 172(%esp) # 4-byte Spill +.LBB228_2: + movl 1168(%esp), %edx + movl 172(%esp), %ebx # 4-byte Reload + movl %ebx, (%edx) + movl %edi, %ebx + testb %bl, %bl + jne .LBB228_4 +# BB#3: + movl %ecx, 180(%esp) # 4-byte Spill +.LBB228_4: + movl 180(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edx) + movl 176(%esp), %ecx # 4-byte Reload + jne .LBB228_6 +# BB#5: + movl %eax, %ecx +.LBB228_6: + movl %ecx, 8(%edx) + movl 164(%esp), %eax # 4-byte Reload + jne .LBB228_8 +# BB#7: + movl %ebp, %eax +.LBB228_8: + movl %eax, 12(%edx) + movl 108(%esp), %ecx # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + movl 168(%esp), %ebp # 4-byte Reload + jne .LBB228_10 +# BB#9: + movl %esi, %ebp +.LBB228_10: + movl %ebp, 16(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB228_12 +# BB#11: + movl 84(%esp), %ebx # 4-byte Reload +.LBB228_12: + movl %ebx, 20(%edx) + movl 132(%esp), %ebx # 4-byte Reload + movl 160(%esp), %edi # 4-byte Reload + jne .LBB228_14 +# BB#13: + movl 88(%esp), %edi # 4-byte Reload +.LBB228_14: + movl %edi, 24(%edx) + movl 128(%esp), %edi # 4-byte Reload + jne .LBB228_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload +.LBB228_16: + movl %eax, 28(%edx) + movl 116(%esp), %esi # 4-byte Reload + jne .LBB228_18 +# BB#17: + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 136(%esp) # 4-byte Spill +.LBB228_18: + movl 136(%esp), %eax # 4-byte Reload + movl %eax, 32(%edx) + jne .LBB228_20 +# BB#19: + movl 100(%esp), %ebp # 4-byte Reload +.LBB228_20: + movl %ebp, 36(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB228_22 +# BB#21: + movl 112(%esp), %ebx # 4-byte Reload +.LBB228_22: + movl %ebx, 40(%edx) + jne .LBB228_24 +# BB#23: + movl 120(%esp), %edi # 4-byte Reload +.LBB228_24: + movl %edi, 44(%edx) + jne .LBB228_26 +# BB#25: + movl 124(%esp), %esi # 4-byte Reload +.LBB228_26: + movl %esi, 48(%edx) + jne .LBB228_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB228_28: + movl %eax, 52(%edx) + jne .LBB228_30 +# BB#29: + movl 156(%esp), %ecx # 4-byte Reload +.LBB228_30: + movl %ecx, 56(%edx) + addl $1148, %esp # imm = 0x47C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end228: + .size mcl_fp_montRed15Lbmi2, .Lfunc_end228-mcl_fp_montRed15Lbmi2 + + .globl mcl_fp_addPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre15Lbmi2,@function +mcl_fp_addPre15Lbmi2: # @mcl_fp_addPre15Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl %esi, 48(%edi) + movl %edx, 52(%edi) + movl 56(%eax), %eax + movl 56(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 56(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end229: + .size mcl_fp_addPre15Lbmi2, .Lfunc_end229-mcl_fp_addPre15Lbmi2 + + .globl mcl_fp_subPre15Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre15Lbmi2,@function +mcl_fp_subPre15Lbmi2: # @mcl_fp_subPre15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 48(%ebx) + movl %esi, 52(%ebx) + movl 56(%edx), %edx + movl 56(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 56(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end230: + .size mcl_fp_subPre15Lbmi2, .Lfunc_end230-mcl_fp_subPre15Lbmi2 + + .globl mcl_fp_shr1_15Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_15Lbmi2,@function +mcl_fp_shr1_15Lbmi2: # @mcl_fp_shr1_15Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 52(%ecx) + shrl %eax + movl %eax, 56(%ecx) + popl %esi + retl +.Lfunc_end231: + .size mcl_fp_shr1_15Lbmi2, .Lfunc_end231-mcl_fp_shr1_15Lbmi2 + + .globl mcl_fp_add15Lbmi2 + .align 16, 0x90 + .type mcl_fp_add15Lbmi2,@function +mcl_fp_add15Lbmi2: # @mcl_fp_add15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 76(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + adcl 4(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 8(%ecx), %edx + adcl 8(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl 12(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 16(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%eax), %edx + adcl 20(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %edx + adcl 24(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%eax), %edx + adcl 28(%ecx), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 32(%eax), %edx + adcl 32(%ecx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%eax), %edx + adcl 36(%ecx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%eax), %edx + adcl 40(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 44(%eax), %ebx + adcl 44(%ecx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 48(%eax), %ebp + adcl 48(%ecx), %ebp + movl 52(%eax), %edi + adcl 52(%ecx), %edi + movl 56(%eax), %edx + adcl 56(%ecx), %edx + movl 68(%esp), %ecx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ecx) + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 4(%ecx) + movl 40(%esp), %esi # 4-byte Reload + movl %esi, 8(%ecx) + movl 36(%esp), %esi # 4-byte Reload + movl %esi, 12(%ecx) + movl 32(%esp), %esi # 4-byte Reload + movl %esi, 16(%ecx) + movl 28(%esp), %esi # 4-byte Reload + movl %esi, 20(%ecx) + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 24(%ecx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 28(%ecx) + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 32(%ecx) + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 36(%ecx) + movl 8(%esp), %esi # 4-byte Reload + movl %esi, 40(%ecx) + movl %ebx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %edx, 56(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 80(%esp), %esi + subl (%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + sbbl 8(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + sbbl 12(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 16(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + sbbl 20(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + sbbl 24(%esi), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + sbbl 28(%esi), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 16(%esp), %edx # 4-byte Reload + sbbl 32(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + sbbl 36(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + sbbl 40(%esi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl (%esp), %edx # 4-byte Reload + sbbl 44(%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 48(%esi), %ebp + sbbl 52(%esi), %edi + sbbl 56(%esi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB232_2 +# BB#1: # %nocarry + movl 4(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 4(%ecx) + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 8(%ecx) + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 12(%ecx) + movl 32(%esp), %edx # 4-byte Reload + movl %edx, 16(%ecx) + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 20(%ecx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 24(%ecx) + movl 20(%esp), %edx # 4-byte Reload + movl %edx, 28(%ecx) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 32(%ecx) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 36(%ecx) + movl 8(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + movl (%esp), %edx # 4-byte Reload + movl %edx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %eax, 56(%ecx) +.LBB232_2: # %carry + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end232: + .size mcl_fp_add15Lbmi2, .Lfunc_end232-mcl_fp_add15Lbmi2 + + .globl mcl_fp_addNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF15Lbmi2,@function +mcl_fp_addNF15Lbmi2: # @mcl_fp_addNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $120, %esp + movl 148(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %edx + movl 144(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl 20(%ecx), %ebx + movl 16(%ecx), %edi + movl 12(%ecx), %edx + movl 8(%ecx), %ecx + adcl 8(%esi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 44(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 152(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esi), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 40(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 44(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 48(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + movl %ecx, %ebx + sbbl 52(%esi), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 56(%esi), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %esi + sarl $31, %esi + testl %esi, %esi + movl 80(%esp), %esi # 4-byte Reload + js .LBB233_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB233_2: + movl 140(%esp), %edi + movl %esi, (%edi) + movl 84(%esp), %ecx # 4-byte Reload + js .LBB233_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB233_4: + movl %ecx, 4(%edi) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + js .LBB233_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB233_6: + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edi) + movl 64(%esp), %eax # 4-byte Reload + js .LBB233_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB233_8: + movl %eax, 12(%edi) + movl %ebx, %ebp + movl %edx, %eax + movl 68(%esp), %edx # 4-byte Reload + js .LBB233_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB233_10: + movl %edx, 16(%edi) + movl 112(%esp), %edx # 4-byte Reload + movl 108(%esp), %ebx # 4-byte Reload + js .LBB233_12 +# BB#11: + movl 20(%esp), %esi # 4-byte Reload +.LBB233_12: + movl %esi, 20(%edi) + js .LBB233_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 88(%esp) # 4-byte Spill +.LBB233_14: + movl 88(%esp), %esi # 4-byte Reload + movl %esi, 24(%edi) + js .LBB233_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB233_16: + movl %ecx, 28(%edi) + js .LBB233_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB233_18: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + js .LBB233_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB233_20: + movl %ebx, 36(%edi) + js .LBB233_22 +# BB#21: + movl 40(%esp), %edx # 4-byte Reload +.LBB233_22: + movl %edx, 40(%edi) + js .LBB233_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB233_24: + movl %eax, 44(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB233_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB233_26: + movl %eax, 48(%edi) + js .LBB233_28 +# BB#27: + movl 52(%esp), %ebp # 4-byte Reload +.LBB233_28: + movl %ebp, 52(%edi) + movl 100(%esp), %eax # 4-byte Reload + js .LBB233_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB233_30: + movl %eax, 56(%edi) + addl $120, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end233: + .size mcl_fp_addNF15Lbmi2, .Lfunc_end233-mcl_fp_addNF15Lbmi2 + + .globl mcl_fp_sub15Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub15Lbmi2,@function +mcl_fp_sub15Lbmi2: # @mcl_fp_sub15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 84(%esp), %edi + subl (%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%esi), %edx + sbbl 40(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%esi), %ecx + sbbl 44(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 48(%esi), %eax + sbbl 48(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 52(%esi), %ebp + sbbl 52(%edi), %ebp + movl 56(%esi), %esi + sbbl 56(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 76(%esp), %ebx + movl 48(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl %edx, 40(%ebx) + movl %ecx, 44(%ebx) + movl %eax, 48(%ebx) + movl %ebp, 52(%ebx) + movl %esi, 56(%ebx) + je .LBB234_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 88(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl %ecx, 48(%ebx) + movl 52(%esi), %eax + adcl %ebp, %eax + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ebx) +.LBB234_2: # %nocarry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end234: + .size mcl_fp_sub15Lbmi2, .Lfunc_end234-mcl_fp_sub15Lbmi2 + + .globl mcl_fp_subNF15Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF15Lbmi2,@function +mcl_fp_subNF15Lbmi2: # @mcl_fp_subNF15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 124(%esp), %edi + subl (%edi), %esi + movl %esi, 60(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 32(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %eax, %ebp + sarl $31, %ebp + movl %ebp, %edi + shldl $1, %eax, %edi + movl 128(%esp), %edx + andl (%edx), %edi + movl 56(%edx), %eax + andl %ebp, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%edx), %eax + andl %ebp, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 48(%edx), %eax + andl %ebp, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%edx), %eax + andl %ebp, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%edx), %eax + andl %ebp, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 36(%edx), %eax + andl %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edx), %eax + andl %ebp, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%edx), %eax + andl %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%edx), %eax + andl %ebp, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ebp, %ebx + movl 16(%edx), %esi + andl %ebp, %esi + movl 12(%edx), %ecx + andl %ebp, %ecx + movl 8(%edx), %eax + andl %ebp, %eax + andl 4(%edx), %ebp + addl 60(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 116(%esp), %edx + movl %edi, (%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebp, 4(%edx) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 8(%edx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 20(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%edx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%edx) + movl 16(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%edx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%edx) + movl 28(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%edx) + movl %eax, 52(%edx) + movl 44(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%edx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end235: + .size mcl_fp_subNF15Lbmi2, .Lfunc_end235-mcl_fp_subNF15Lbmi2 + + .globl mcl_fpDbl_add15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add15Lbmi2,@function +mcl_fpDbl_add15Lbmi2: # @mcl_fpDbl_add15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 136(%esp), %ecx + movl 132(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 128(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 68(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 64(%ecx), %edi + movl %esi, 56(%eax) + movl 64(%edx), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%edx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl 72(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl 76(%edx), %eax + adcl %esi, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%ecx), %eax + movl 104(%edx), %esi + adcl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edi + movl 108(%edx), %eax + adcl %edi, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 112(%ecx), %ebx + movl 112(%edx), %edi + adcl %ebx, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 116(%ecx), %ecx + movl 116(%edx), %edx + adcl %ecx, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 140(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + sbbl 44(%ebp), %esi + movl %esi, 40(%esp) # 4-byte Spill + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + movl %edx, %edi + sbbl 52(%ebp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %esi + sbbl 56(%ebp), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB236_2 +# BB#1: + movl %esi, %edi +.LBB236_2: + testb %bl, %bl + movl 76(%esp), %eax # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + movl 68(%esp), %ebx # 4-byte Reload + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB236_4 +# BB#3: + movl %ecx, %esi + movl (%esp), %ebx # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload +.LBB236_4: + movl 128(%esp), %edx + movl %eax, 60(%edx) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%edx) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%edx) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%edx) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%edx) + movl 100(%esp), %eax # 4-byte Reload + movl %eax, 84(%edx) + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 88(%edx) + movl %ebp, 92(%edx) + movl %ebx, 96(%edx) + movl %esi, 100(%edx) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB236_6 +# BB#5: + movl 40(%esp), %eax # 4-byte Reload +.LBB236_6: + movl %eax, 104(%edx) + movl 60(%esp), %ecx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + jne .LBB236_8 +# BB#7: + movl 44(%esp), %eax # 4-byte Reload +.LBB236_8: + movl %eax, 108(%edx) + jne .LBB236_10 +# BB#9: + movl 48(%esp), %ecx # 4-byte Reload +.LBB236_10: + movl %ecx, 112(%edx) + movl %edi, 116(%edx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end236: + .size mcl_fpDbl_add15Lbmi2, .Lfunc_end236-mcl_fpDbl_add15Lbmi2 + + .globl mcl_fpDbl_sub15Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub15Lbmi2,@function +mcl_fpDbl_sub15Lbmi2: # @mcl_fpDbl_sub15Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 128(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %edi + sbbl 8(%ebp), %edi + movl 120(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %edx, 12(%ecx) + movl 20(%eax), %edx + sbbl %edi, %edx + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%eax), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %edx, 20(%ecx) + movl 28(%eax), %edx + sbbl %edi, %edx + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%eax), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %edx, 28(%ecx) + movl 36(%eax), %edx + sbbl %edi, %edx + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%eax), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %edx, 36(%ecx) + movl 44(%eax), %edx + sbbl %edi, %edx + movl 48(%ebp), %edi + movl %esi, 40(%ecx) + movl 48(%eax), %esi + sbbl %edi, %esi + movl 52(%ebp), %edi + movl %edx, 44(%ecx) + movl 52(%eax), %edx + sbbl %edi, %edx + movl 56(%ebp), %edi + movl %esi, 48(%ecx) + movl 56(%eax), %esi + sbbl %edi, %esi + movl 60(%ebp), %edi + movl %edx, 52(%ecx) + movl 60(%eax), %edx + sbbl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%ebp), %edx + movl %esi, 56(%ecx) + movl 64(%eax), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%ebp), %edx + movl 68(%eax), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%ebp), %edx + movl 72(%eax), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 76(%ebp), %edx + movl 76(%eax), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 80(%ebp), %edx + movl 80(%eax), %esi + sbbl %edx, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%ebp), %edx + movl 84(%eax), %esi + sbbl %edx, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 88(%ebp), %edx + movl 88(%eax), %esi + sbbl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 92(%ebp), %edx + movl 92(%eax), %esi + sbbl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 96(%ebp), %edx + movl 96(%eax), %esi + sbbl %edx, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%ebp), %edx + movl 100(%eax), %esi + sbbl %edx, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 104(%ebp), %edx + movl 104(%eax), %esi + sbbl %edx, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 108(%ebp), %edx + movl 108(%eax), %esi + sbbl %edx, %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 112(%ebp), %edx + movl 112(%eax), %esi + sbbl %edx, %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 116(%ebp), %edx + movl 116(%eax), %eax + sbbl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 132(%esp), %esi + jne .LBB237_1 +# BB#2: + movl $0, 60(%esp) # 4-byte Folded Spill + jmp .LBB237_3 +.LBB237_1: + movl 56(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill +.LBB237_3: + testb %al, %al + jne .LBB237_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB237_6 +.LBB237_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB237_6: + jne .LBB237_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB237_9 +.LBB237_7: + movl 52(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB237_9: + jne .LBB237_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB237_12 +.LBB237_10: + movl 48(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB237_12: + jne .LBB237_13 +# BB#14: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB237_15 +.LBB237_13: + movl 44(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB237_15: + jne .LBB237_16 +# BB#17: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB237_18 +.LBB237_16: + movl 40(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB237_18: + jne .LBB237_19 +# BB#20: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB237_21 +.LBB237_19: + movl 36(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB237_21: + jne .LBB237_22 +# BB#23: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB237_24 +.LBB237_22: + movl 32(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB237_24: + jne .LBB237_25 +# BB#26: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB237_27 +.LBB237_25: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB237_27: + jne .LBB237_28 +# BB#29: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB237_30 +.LBB237_28: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB237_30: + jne .LBB237_31 +# BB#32: + movl $0, %edx + jmp .LBB237_33 +.LBB237_31: + movl 20(%esi), %edx +.LBB237_33: + jne .LBB237_34 +# BB#35: + movl $0, %ebp + jmp .LBB237_36 +.LBB237_34: + movl 16(%esi), %ebp +.LBB237_36: + jne .LBB237_37 +# BB#38: + movl $0, %eax + jmp .LBB237_39 +.LBB237_37: + movl 12(%esi), %eax +.LBB237_39: + jne .LBB237_40 +# BB#41: + xorl %esi, %esi + jmp .LBB237_42 +.LBB237_40: + movl 8(%esi), %esi +.LBB237_42: + addl 44(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 60(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 64(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %eax, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %ebp, 76(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl %eax, 112(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%ecx) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end237: + .size mcl_fpDbl_sub15Lbmi2, .Lfunc_end237-mcl_fpDbl_sub15Lbmi2 + + .align 16, 0x90 + .type .LmulPv512x32,@function +.LmulPv512x32: # @mulPv512x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl %edx, %eax + movl 76(%esp), %edi + movl %edi, %edx + mulxl 4(%eax), %ebx, %esi + movl %edi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 52(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 8(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 12(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 16(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 20(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 24(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 28(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 32(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 36(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 40(%eax), %edx, %ebx + adcl %esi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 44(%eax), %edx, %esi + adcl %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + mulxl 48(%eax), %ebx, %ebp + adcl %esi, %ebx + movl %edi, %edx + mulxl 52(%eax), %esi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %esi + movl %edi, %edx + mulxl 56(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%ecx) + movl %ebx, 48(%ecx) + movl %esi, 52(%ecx) + movl %edx, 56(%ecx) + movl %edi, %edx + mulxl 60(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + adcl $0, %edx + movl %edx, 64(%ecx) + movl %ecx, %eax + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end238: + .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 + + .globl mcl_fp_mulUnitPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre16Lbmi2,@function +mcl_fp_mulUnitPre16Lbmi2: # @mcl_fp_mulUnitPre16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L239$pb +.L239$pb: + popl %ebx +.Ltmp50: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end239: + .size mcl_fp_mulUnitPre16Lbmi2, .Lfunc_end239-mcl_fp_mulUnitPre16Lbmi2 + + .globl mcl_fpDbl_mulPre16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre16Lbmi2,@function +mcl_fpDbl_mulPre16Lbmi2: # @mcl_fpDbl_mulPre16Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L240$pb +.L240$pb: + popl %ebx +.Ltmp51: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx + movl %ebx, -224(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + leal 32(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 64(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl 52(%esi), %ebx + movl 48(%esi), %eax + movl 44(%esi), %ecx + movl 40(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl (%esi), %edi + movl 4(%esi), %edx + addl 32(%esi), %edi + movl %edi, -184(%ebp) # 4-byte Spill + movl %esi, %edi + adcl 36(%edi), %edx + movl %edx, -236(%ebp) # 4-byte Spill + movl -176(%ebp), %edx # 4-byte Reload + adcl 8(%edi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + adcl 12(%edi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + adcl 20(%edi), %ebx + movl %ebx, -228(%ebp) # 4-byte Spill + movl 56(%edi), %eax + adcl 24(%edi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %ecx + popl %eax + movl %ecx, -144(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %ecx + addl 32(%esi), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + movl 4(%esi), %ecx + adcl 36(%esi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + movl 40(%esi), %ecx + adcl 8(%esi), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + movl 44(%esi), %ecx + adcl 12(%esi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 16(%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl 52(%esi), %ecx + adcl 20(%esi), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl 56(%esi), %edx + adcl 24(%esi), %edx + movl 60(%esi), %ecx + adcl 28(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %ebx + popl %eax + movl %ebx, -252(%ebp) # 4-byte Spill + movl -212(%ebp), %ebx # 4-byte Reload + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -216(%ebp) # 4-byte Spill + movl -184(%ebp), %esi # 4-byte Reload + movl %esi, -220(%ebp) # 4-byte Spill + jb .LBB240_2 +# BB#1: + xorl %eax, %eax + xorl %ebx, %ebx + movl $0, -216(%ebp) # 4-byte Folded Spill + movl $0, -220(%ebp) # 4-byte Folded Spill +.LBB240_2: + movl %ebx, -244(%ebp) # 4-byte Spill + movl %eax, -240(%ebp) # 4-byte Spill + movl 60(%edi), %eax + movl -144(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 28(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl %ecx, -172(%ebp) # 4-byte Spill + movl %edx, -144(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, -148(%ebp) # 4-byte Spill + movl -204(%ebp), %eax # 4-byte Reload + movl %eax, -152(%ebp) # 4-byte Spill + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, -156(%ebp) # 4-byte Spill + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -160(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -164(%ebp) # 4-byte Spill + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -168(%ebp) # 4-byte Spill + jb .LBB240_4 +# BB#3: + movl $0, -172(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill + movl $0, -168(%ebp) # 4-byte Folded Spill +.LBB240_4: + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl -236(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl -176(%ebp), %edi # 4-byte Reload + movl %edi, -100(%ebp) + movl -232(%ebp), %edi # 4-byte Reload + movl %edi, -96(%ebp) + movl -212(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -228(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl -248(%ebp), %ebx # 4-byte Reload + movl %ebx, -84(%ebp) + movl -188(%ebp), %ebx # 4-byte Reload + movl %ebx, -140(%ebp) + movl -192(%ebp), %ebx # 4-byte Reload + movl %ebx, -136(%ebp) + movl -196(%ebp), %ebx # 4-byte Reload + movl %ebx, -132(%ebp) + movl -200(%ebp), %ebx # 4-byte Reload + movl %ebx, -128(%ebp) + movl -204(%ebp), %ebx # 4-byte Reload + movl %ebx, -124(%ebp) + movl -208(%ebp), %ebx # 4-byte Reload + movl %ebx, -120(%ebp) + movl %esi, %ebx + movl %edi, %esi + movl %eax, %edi + movl %edx, -116(%ebp) + movl %ecx, -112(%ebp) + sbbl %edx, %edx + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -80(%ebp) + movl -252(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB240_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi + movl $0, %edi +.LBB240_6: + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -140(%ebp), %ecx + movl %ecx, 8(%esp) + leal -108(%ebp), %ecx + movl %ecx, 4(%esp) + leal -76(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -220(%ebp), %eax # 4-byte Reload + addl %eax, -168(%ebp) # 4-byte Folded Spill + adcl %edi, -164(%ebp) # 4-byte Folded Spill + movl -216(%ebp), %eax # 4-byte Reload + adcl %eax, -160(%ebp) # 4-byte Folded Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -244(%ebp), %eax # 4-byte Reload + adcl %eax, -152(%ebp) # 4-byte Folded Spill + adcl %ebx, -148(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl -224(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl -168(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -164(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -172(%ebp) # 4-byte Spill + adcl %esi, -176(%ebp) # 4-byte Folded Spill + movl -76(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -72(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -68(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl -52(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + movl -48(%ebp), %eax + sbbl 28(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + sbbl $0, -176(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -260(%ebp) # 4-byte Spill + subl %eax, -196(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -264(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 72(%esi), %eax + movl %eax, -268(%ebp) # 4-byte Spill + sbbl %eax, -192(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -272(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 80(%esi), %eax + movl %eax, -276(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 84(%esi), %eax + movl %eax, -280(%ebp) # 4-byte Spill + sbbl %eax, -180(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -284(%ebp) # 4-byte Spill + sbbl %eax, -184(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -288(%ebp) # 4-byte Spill + sbbl %eax, -188(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -292(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 112(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 116(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 120(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -144(%ebp) # 4-byte Spill + movl 124(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -196(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -192(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + adcl -212(%ebp), %edx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 44(%esi) + movl -180(%ebp), %eax # 4-byte Reload + adcl -220(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -184(%ebp), %ecx # 4-byte Reload + adcl -224(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 52(%esi) + movl -188(%ebp), %edx # 4-byte Reload + adcl -228(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 56(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -156(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -148(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -172(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + adcl -292(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %edi, 96(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end240: + .size mcl_fpDbl_mulPre16Lbmi2, .Lfunc_end240-mcl_fpDbl_mulPre16Lbmi2 + + .globl mcl_fpDbl_sqrPre16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre16Lbmi2,@function +mcl_fpDbl_sqrPre16Lbmi2: # @mcl_fpDbl_sqrPre16Lbmi2 +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L241$pb +.L241$pb: + popl %ebx +.Ltmp52: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx + movl %ebx, -184(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 64(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl 52(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl 48(%edi), %eax + movl 44(%edi), %ebx + movl 40(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 32(%edi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + adcl 36(%edi), %edx + movl %edx, -196(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %ebx + adcl 16(%edi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + addl %ecx, %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + adcl %edx, %edx + movl %edx, -160(%ebp) # 4-byte Spill + adcl %esi, %esi + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, %edx + movl %ebx, %esi + adcl %edx, %edx + movl %edx, -152(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, %edx + movl %eax, %ebx + adcl %edx, %edx + movl %edx, -148(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 56(%edi), %edx + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + adcl 24(%edi), %edx + movl 60(%edi), %ecx + adcl 28(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -200(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -204(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + sbbl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl %edi, %eax + addb $127, %al + sahf + jb .LBB241_2 +# BB#1: + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill +.LBB241_2: + movl %edx, %eax + movl -172(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + adcl %eax, %eax + movl %ecx, %edi + adcl %edi, %edi + movl %edi, -176(%ebp) # 4-byte Spill + movl -204(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB241_4 +# BB#3: + movl $0, -176(%ebp) # 4-byte Folded Spill + xorl %eax, %eax +.LBB241_4: + movl %eax, -172(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl %eax, -140(%ebp) + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl %eax, -136(%ebp) + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -100(%ebp) + movl %eax, -132(%ebp) + movl %esi, -96(%ebp) + movl %esi, -128(%ebp) + movl %ebx, -92(%ebp) + movl %ebx, -124(%ebp) + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -120(%ebp) + movl %edx, -84(%ebp) + movl %edx, -116(%ebp) + movl %ecx, -80(%ebp) + movl %ecx, -112(%ebp) + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB241_5 +# BB#6: + xorl %edi, %edi + jmp .LBB241_7 +.LBB241_5: + shrl $31, %ecx + movl %ecx, %edi +.LBB241_7: + leal -140(%ebp), %eax + movl %eax, 8(%esp) + leal -108(%ebp), %eax + movl %eax, 4(%esp) + leal -76(%ebp), %eax + movl %eax, (%esp) + movl -168(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -184(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8Lbmi2@PLT + movl -164(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + adcl %edi, %esi + movl %esi, -168(%ebp) # 4-byte Spill + movl -76(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + movl -72(%ebp), %edi + sbbl 4(%esi), %edi + movl -68(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -184(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -192(%ebp) # 4-byte Spill + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl %eax, %ecx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -52(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 28(%esi), %edx + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + movl -168(%ebp), %eax # 4-byte Reload + sbbl $0, %eax + movl 64(%esi), %ecx + movl %ecx, -260(%ebp) # 4-byte Spill + subl %ecx, -180(%ebp) # 4-byte Folded Spill + movl 68(%esi), %ecx + movl %ecx, -264(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 72(%esi), %ecx + movl %ecx, -268(%ebp) # 4-byte Spill + sbbl %ecx, -184(%ebp) # 4-byte Folded Spill + movl 76(%esi), %ecx + movl %ecx, -272(%ebp) # 4-byte Spill + sbbl %ecx, -192(%ebp) # 4-byte Folded Spill + movl 80(%esi), %ecx + movl %ecx, -276(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 84(%esi), %ecx + movl %ecx, -280(%ebp) # 4-byte Spill + sbbl %ecx, -196(%ebp) # 4-byte Folded Spill + movl 88(%esi), %ecx + movl %ecx, -284(%ebp) # 4-byte Spill + sbbl %ecx, -188(%ebp) # 4-byte Folded Spill + movl 92(%esi), %ecx + movl %ecx, -288(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 96(%esi), %ecx + movl %ecx, -292(%ebp) # 4-byte Spill + sbbl %ecx, -164(%ebp) # 4-byte Folded Spill + movl 100(%esi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + sbbl %ecx, -160(%ebp) # 4-byte Folded Spill + movl 104(%esi), %ecx + movl %ecx, -236(%ebp) # 4-byte Spill + sbbl %ecx, -156(%ebp) # 4-byte Folded Spill + movl 108(%esi), %ecx + movl %ecx, -240(%ebp) # 4-byte Spill + sbbl %ecx, -152(%ebp) # 4-byte Folded Spill + movl 112(%esi), %ecx + movl %ecx, -244(%ebp) # 4-byte Spill + sbbl %ecx, -148(%ebp) # 4-byte Folded Spill + movl 116(%esi), %ecx + movl %ecx, -248(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 120(%esi), %ecx + movl %ecx, -252(%ebp) # 4-byte Spill + sbbl %ecx, -172(%ebp) # 4-byte Folded Spill + movl 124(%esi), %ecx + movl %ecx, -256(%ebp) # 4-byte Spill + sbbl %ecx, -176(%ebp) # 4-byte Folded Spill + sbbl $0, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %edi # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -184(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -192(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -196(%ebp), %ecx # 4-byte Reload + adcl -220(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -188(%ebp), %eax # 4-byte Reload + adcl -224(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %edx, %ecx + adcl -228(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -160(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -176(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -292(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %eax, 96(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end241: + .size mcl_fpDbl_sqrPre16Lbmi2, .Lfunc_end241-mcl_fpDbl_sqrPre16Lbmi2 + + .globl mcl_fp_mont16Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont16Lbmi2,@function +mcl_fp_mont16Lbmi2: # @mcl_fp_mont16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2428, %esp # imm = 0x97C + calll .L242$pb +.L242$pb: + popl %ebx +.Ltmp53: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx + movl 2460(%esp), %eax + movl -4(%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2360(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 2360(%esp), %ebp + movl 2364(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2424(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2420(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 2416(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2412(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2408(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2380(%esp), %edi + movl 2376(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2368(%esp), %esi + movl %eax, (%esp) + leal 2288(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + addl 2288(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 2296(%esp), %esi + movl %esi, %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2308(%esp), %edi + movl %edi, %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2344(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 2456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2216(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl 112(%esp), %ecx # 4-byte Reload + addl 2216(%esp), %ecx + adcl 2220(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2232(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 2236(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2280(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2144(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + addl 2144(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2164(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 2168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2208(%esp), %esi + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2072(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2072(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2076(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2092(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 2108(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2116(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2128(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2000(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %eax + movl 112(%esp), %ecx # 4-byte Reload + addl 2000(%esp), %ecx + movl 100(%esp), %ecx # 4-byte Reload + adcl 2004(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2008(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 2036(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2044(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2052(%esp), %ebp + movl 124(%esp), %esi # 4-byte Reload + adcl 2056(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1928(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 1928(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1976(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 1980(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1856(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1856(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1892(%esp), %esi + adcl 1896(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 1900(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1904(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1908(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1912(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1784(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1816(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1824(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %ecx + addl 1712(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1764(%esp), %ebp + movl 104(%esp), %esi # 4-byte Reload + adcl 1768(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1640(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1668(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1692(%esp), %esi + movl %esi, %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1568(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1620(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1632(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1496(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1496(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1500(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1504(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1516(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1424(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl 60(%esp), %eax # 4-byte Reload + addl 1424(%esp), %eax + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1432(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1444(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1480(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl 2456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1352(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1352(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1396(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1404(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1412(%esp), %esi + adcl 1416(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1280(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1288(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1340(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + adcl 1212(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1272(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 1136(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1164(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1192(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1064(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1116(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl %edi, %eax + andl $1, %eax + addl 992(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 116(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1020(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1032(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 920(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 932(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 968(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 848(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 868(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 896(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 776(%esp), %ecx + adcl 780(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 792(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 800(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 704(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 728(%esp), %esi + movl 104(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 752(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 632(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 676(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 680(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 560(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 592(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 612(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 508(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 520(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 416(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + adcl 436(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 120(%esp), %ebp # 4-byte Reload + adcl 348(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 356(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 360(%esp), %edi + adcl 364(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 272(%esp), %esi + adcl 276(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 296(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %ecx # 4-byte Reload + addl 200(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 212(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 220(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 232(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 56(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + addl 128(%esp), %esi + movl 104(%esp), %ebx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 140(%esp), %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 156(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 172(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 192(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl $0, %ebp + movl %eax, %edx + movl 2460(%esp), %edi + subl (%edi), %edx + movl %ecx, %eax + sbbl 4(%edi), %eax + movl %ebx, %ecx + sbbl 8(%edi), %ecx + movl 112(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 40(%edi), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 44(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + sbbl 52(%edi), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%edi), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + sbbl 60(%edi), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + sbbl $0, %ebp + andl $1, %ebp + movl %ebp, %ebx + jne .LBB242_2 +# BB#1: + movl %edx, %edi +.LBB242_2: + movl 2448(%esp), %edx + movl %edi, (%edx) + testb %bl, %bl + movl 108(%esp), %edi # 4-byte Reload + jne .LBB242_4 +# BB#3: + movl %eax, %edi +.LBB242_4: + movl %edi, 4(%edx) + jne .LBB242_6 +# BB#5: + movl %ecx, 104(%esp) # 4-byte Spill +.LBB242_6: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + jne .LBB242_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill +.LBB242_8: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 12(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB242_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB242_10: + movl %eax, 16(%edx) + movl 88(%esp), %eax # 4-byte Reload + jne .LBB242_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload +.LBB242_12: + movl %eax, 20(%edx) + jne .LBB242_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill +.LBB242_14: + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 24(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB242_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB242_16: + movl %eax, 28(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB242_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB242_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB242_20 +# BB#19: + movl 36(%esp), %eax # 4-byte Reload +.LBB242_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB242_22 +# BB#21: + movl 40(%esp), %eax # 4-byte Reload +.LBB242_22: + movl %eax, 40(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB242_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB242_24: + movl %eax, 44(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB242_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB242_26: + movl %eax, 48(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB242_28 +# BB#27: + movl 52(%esp), %eax # 4-byte Reload +.LBB242_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB242_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB242_30: + movl %eax, 56(%edx) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB242_32 +# BB#31: + movl 120(%esp), %eax # 4-byte Reload +.LBB242_32: + movl %eax, 60(%edx) + addl $2428, %esp # imm = 0x97C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end242: + .size mcl_fp_mont16Lbmi2, .Lfunc_end242-mcl_fp_mont16Lbmi2 + + .globl mcl_fp_montNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF16Lbmi2,@function +mcl_fp_montNF16Lbmi2: # @mcl_fp_montNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2412, %esp # imm = 0x96C + calll .L243$pb +.L243$pb: + popl %ebx +.Ltmp54: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx + movl 2444(%esp), %eax + movl -4(%eax), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2344(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2344(%esp), %edi + movl 2348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2408(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2380(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2376(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2368(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2364(%esp), %ebp + movl 2360(%esp), %esi + movl 2356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2352(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2272(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2272(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 2288(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2292(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 2308(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 2328(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2200(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2264(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 2200(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2208(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2216(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 2232(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2236(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2252(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2260(%esp), %esi + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2128(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2128(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 2132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 2156(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2164(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2188(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2192(%esp), %esi + movl 2440(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2056(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2120(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 2056(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 2080(%esp), %edi + movl %edi, %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2088(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 2112(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 2116(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1984(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1984(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 2004(%esp), %edi + adcl 2008(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 2036(%esp), %ebp + movl 100(%esp), %esi # 4-byte Reload + adcl 2040(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 2044(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1976(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1912(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1928(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1936(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 1948(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1960(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1964(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1968(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 2444(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + addl 1840(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1876(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1892(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1904(%esp), %esi + movl 2440(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2436(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 1832(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 1768(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1808(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1828(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1696(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1716(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 1720(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1624(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1688(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + addl 1624(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1640(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1648(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1552(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1552(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + adcl 1556(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1576(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1592(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1600(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1616(%esp), %edi + movl 2440(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1480(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1544(%esp), %eax + addl 1480(%esp), %esi + movl 60(%esp), %edx # 4-byte Reload + adcl 1484(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1488(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1504(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1508(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + adcl 1524(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1532(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1536(%esp), %ebp + adcl 1540(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1408(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1408(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1464(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1468(%esp), %ebp + adcl 1472(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1400(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + adcl 1340(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1344(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1348(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1352(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1356(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1360(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1364(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1368(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1372(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1376(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1380(%esp), %edi + movl 76(%esp), %esi # 4-byte Reload + adcl 1384(%esp), %esi + movl 80(%esp), %edx # 4-byte Reload + adcl 1388(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1392(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1396(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1264(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1308(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1256(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 1192(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1196(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1200(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1204(%esp), %esi + movl 88(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + movl 92(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1224(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1228(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1232(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1120(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1132(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1140(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1176(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1112(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1068(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1076(%esp), %ebp + movl 108(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1100(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 976(%esp), %edi + adcl 980(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1000(%esp), %edi + adcl 1004(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1008(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1016(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 904(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 968(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 904(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + adcl 908(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 912(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 920(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 924(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 928(%esp), %edi + adcl 932(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 940(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 960(%esp), %ebp + movl 52(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %eax, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 832(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 888(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 892(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 824(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 796(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 816(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 688(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + adcl 736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 680(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 616(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 544(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 552(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 536(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 484(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + adcl 488(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 400(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 420(%esp), %edi + adcl 424(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 444(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 392(%esp), %edx + movl 92(%esp), %ecx # 4-byte Reload + addl 328(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 256(%esp), %ebp + movl 104(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 268(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 280(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 248(%esp), %edx + movl %edi, %ecx + addl 184(%esp), %ecx + movl 100(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + adcl 192(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 208(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 112(%esp), %esi + movl %edi, %eax + adcl 116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 80(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2444(%esp), %esi + subl (%esi), %edx + sbbl 4(%esi), %edi + movl %ebp, %ecx + sbbl 8(%esi), %ecx + movl %ebx, %eax + sbbl 12(%esi), %eax + movl 80(%esp), %ebx # 4-byte Reload + sbbl 16(%esi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + sbbl 32(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + sbbl 36(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 40(%esi), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%esi), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%esi), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 96(%esp), %ebx # 4-byte Reload + sbbl 52(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 56(%esi), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 60(%esi), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + testl %ebx, %ebx + js .LBB243_2 +# BB#1: + movl %edx, %esi +.LBB243_2: + movl 2432(%esp), %edx + movl %esi, (%edx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB243_4 +# BB#3: + movl %edi, %esi +.LBB243_4: + movl %esi, 4(%edx) + js .LBB243_6 +# BB#5: + movl %ecx, %ebp +.LBB243_6: + movl %ebp, 8(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB243_8 +# BB#7: + movl %eax, %ecx +.LBB243_8: + movl %ecx, 12(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB243_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB243_10: + movl %eax, 16(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB243_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB243_12: + movl %eax, 20(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB243_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB243_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB243_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB243_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB243_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB243_18: + movl %eax, 32(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB243_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB243_20: + movl %eax, 36(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB243_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB243_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB243_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB243_24: + movl %eax, 44(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB243_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB243_26: + movl %eax, 48(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB243_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB243_28: + movl %eax, 52(%edx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB243_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB243_30: + movl %eax, 56(%edx) + movl 104(%esp), %eax # 4-byte Reload + js .LBB243_32 +# BB#31: + movl 84(%esp), %eax # 4-byte Reload +.LBB243_32: + movl %eax, 60(%edx) + addl $2412, %esp # imm = 0x96C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end243: + .size mcl_fp_montNF16Lbmi2, .Lfunc_end243-mcl_fp_montNF16Lbmi2 + + .globl mcl_fp_montRed16Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed16Lbmi2,@function +mcl_fp_montRed16Lbmi2: # @mcl_fp_montRed16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L244$pb +.L244$pb: + popl %eax +.Ltmp55: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1384(%esp), %edx + movl -4(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1380(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 112(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 120(%esp) # 4-byte Spill + imull %eax, %ebx + movl 124(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 204(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 60(%ecx), %edi + movl %edi, 180(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1288(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %eax # 4-byte Reload + addl 1288(%esp), %eax + movl 120(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1300(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 1328(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + movl 196(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 1216(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl 80(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1144(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1072(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1000(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + movl 172(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 928(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 172(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 144(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 100(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 856(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 176(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 784(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 712(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl 752(%esp), %ebp + movl %ebp, 192(%esp) # 4-byte Spill + movl 196(%esp), %edi # 4-byte Reload + adcl 756(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + movl 204(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1384(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + adcl 592(%esp), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 204(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 192(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 632(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 496(%esp), %edi + movl 136(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 160(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 424(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + adcl 432(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 200(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 200(%esp) # 4-byte Spill + movl 204(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 196(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 184(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 184(%esp) # 4-byte Spill + movl 188(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 188(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %eax, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 352(%esp), %esi + movl 164(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl 416(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 280(%esp), %esi + movl 180(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 304(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 316(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 100(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 208(%esp), %ebp + movl 200(%esp), %edx # 4-byte Reload + adcl 212(%esp), %edx + movl %edx, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl %eax, %ebx + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %edx, %eax + subl 24(%esp), %edx # 4-byte Folded Reload + movl 204(%esp), %esi # 4-byte Reload + sbbl 12(%esp), %esi # 4-byte Folded Reload + sbbl 16(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 188(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + movl 168(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + movl 176(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + movl 172(%esp), %ebx # 4-byte Reload + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 116(%esp) # 4-byte Spill + movl 152(%esp), %ebx # 4-byte Reload + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebx # 4-byte Reload + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 136(%esp) # 4-byte Spill + movl 144(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 160(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 180(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB244_2 +# BB#1: + movl %edx, 200(%esp) # 4-byte Spill +.LBB244_2: + movl 1376(%esp), %edx + movl 200(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + testb %bl, %bl + jne .LBB244_4 +# BB#3: + movl %esi, 204(%esp) # 4-byte Spill +.LBB244_4: + movl 204(%esp), %esi # 4-byte Reload + movl %esi, 4(%edx) + movl 192(%esp), %esi # 4-byte Reload + jne .LBB244_6 +# BB#5: + movl %ecx, %esi +.LBB244_6: + movl %esi, 8(%edx) + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB244_8 +# BB#7: + movl %eax, %ecx +.LBB244_8: + movl %ecx, 12(%edx) + movl 128(%esp), %esi # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + jne .LBB244_10 +# BB#9: + movl %ebp, %eax +.LBB244_10: + movl %eax, 16(%edx) + movl 124(%esp), %ecx # 4-byte Reload + movl 176(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB244_12 +# BB#11: + movl 100(%esp), %ebp # 4-byte Reload +.LBB244_12: + movl %ebp, 20(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 188(%esp), %ebx # 4-byte Reload + jne .LBB244_14 +# BB#13: + movl 104(%esp), %ebx # 4-byte Reload +.LBB244_14: + movl %ebx, 24(%edx) + movl 156(%esp), %ebx # 4-byte Reload + movl 168(%esp), %edi # 4-byte Reload + jne .LBB244_16 +# BB#15: + movl 108(%esp), %edi # 4-byte Reload +.LBB244_16: + movl %edi, 28(%edx) + movl 144(%esp), %edi # 4-byte Reload + jne .LBB244_18 +# BB#17: + movl 112(%esp), %eax # 4-byte Reload +.LBB244_18: + movl %eax, 32(%edx) + jne .LBB244_20 +# BB#19: + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB244_20: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 36(%edx) + jne .LBB244_22 +# BB#21: + movl 120(%esp), %ebp # 4-byte Reload +.LBB244_22: + movl %ebp, 40(%edx) + movl 132(%esp), %eax # 4-byte Reload + jne .LBB244_24 +# BB#23: + movl 136(%esp), %ebx # 4-byte Reload +.LBB244_24: + movl %ebx, 44(%edx) + jne .LBB244_26 +# BB#25: + movl 140(%esp), %edi # 4-byte Reload +.LBB244_26: + movl %edi, 48(%edx) + jne .LBB244_28 +# BB#27: + movl 160(%esp), %eax # 4-byte Reload +.LBB244_28: + movl %eax, 52(%edx) + jne .LBB244_30 +# BB#29: + movl 164(%esp), %esi # 4-byte Reload +.LBB244_30: + movl %esi, 56(%edx) + jne .LBB244_32 +# BB#31: + movl 180(%esp), %ecx # 4-byte Reload +.LBB244_32: + movl %ecx, 60(%edx) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end244: + .size mcl_fp_montRed16Lbmi2, .Lfunc_end244-mcl_fp_montRed16Lbmi2 + + .globl mcl_fp_addPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre16Lbmi2,@function +mcl_fp_addPre16Lbmi2: # @mcl_fp_addPre16Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl %edx, 52(%edi) + movl %esi, 56(%edi) + movl 60(%eax), %eax + movl 60(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 60(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end245: + .size mcl_fp_addPre16Lbmi2, .Lfunc_end245-mcl_fp_addPre16Lbmi2 + + .globl mcl_fp_subPre16Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre16Lbmi2,@function +mcl_fp_subPre16Lbmi2: # @mcl_fp_subPre16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl %esi, 52(%ebx) + movl %edi, 56(%ebx) + movl 60(%edx), %edx + movl 60(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 60(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end246: + .size mcl_fp_subPre16Lbmi2, .Lfunc_end246-mcl_fp_subPre16Lbmi2 + + .globl mcl_fp_shr1_16Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_16Lbmi2,@function +mcl_fp_shr1_16Lbmi2: # @mcl_fp_shr1_16Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 56(%ecx) + shrl %eax + movl %eax, 60(%ecx) + popl %esi + retl +.Lfunc_end247: + .size mcl_fp_shr1_16Lbmi2, .Lfunc_end247-mcl_fp_shr1_16Lbmi2 + + .globl mcl_fp_add16Lbmi2 + .align 16, 0x90 + .type mcl_fp_add16Lbmi2,@function +mcl_fp_add16Lbmi2: # @mcl_fp_add16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ebp + movl 80(%esp), %ecx + addl (%ecx), %esi + movl %esi, %ebx + adcl 4(%ecx), %ebp + movl 8(%edx), %eax + adcl 8(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 16(%ecx), %edi + adcl 12(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 20(%ecx), %eax + adcl 20(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%ecx), %eax + adcl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%ecx), %eax + adcl 28(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%ecx), %eax + adcl 32(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%ecx), %eax + adcl 36(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%ecx), %eax + adcl 40(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%ecx), %eax + adcl 44(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%ecx), %eax + adcl 48(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%ecx), %eax + adcl 52(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 56(%ecx), %esi + adcl 56(%edx), %esi + movl 60(%ecx), %ecx + adcl 60(%edx), %ecx + movl 76(%esp), %edx + movl %ebx, (%edx) + movl %ebx, %eax + movl %ebp, 4(%edx) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edx) + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edx) + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 4(%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, %ebp + sbbl 56(%edi), %esi + sbbl 60(%edi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB248_2 +# BB#1: # %nocarry + movl 4(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + movl (%esp), %edi # 4-byte Reload + movl %edi, 4(%edx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 8(%edx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%edx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl %ebp, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) +.LBB248_2: # %carry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end248: + .size mcl_fp_add16Lbmi2, .Lfunc_end248-mcl_fp_add16Lbmi2 + + .globl mcl_fp_addNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF16Lbmi2,@function +mcl_fp_addNF16Lbmi2: # @mcl_fp_addNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + movl 152(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 148(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%edx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%edx), %edi + movl 40(%edx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl 20(%edx), %ebp + movl 16(%edx), %ebx + movl 12(%edx), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 12(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 44(%esi), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 52(%esi), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 56(%esi), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 60(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + subl (%edi), %esi + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + sbbl 24(%edi), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 44(%edi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, %ebx + sbbl 56(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 60(%edi), %ebx + movl 80(%esp), %edi # 4-byte Reload + movl %ebx, 56(%esp) # 4-byte Spill + testl %ebx, %ebx + js .LBB249_2 +# BB#1: + movl %esi, %edi +.LBB249_2: + movl 144(%esp), %ebx + movl %edi, (%ebx) + movl 84(%esp), %edx # 4-byte Reload + js .LBB249_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB249_4: + movl %edx, 4(%ebx) + movl 68(%esp), %edx # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + js .LBB249_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB249_6: + movl %eax, 8(%ebx) + movl 100(%esp), %eax # 4-byte Reload + movl 88(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + js .LBB249_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB249_8: + movl %esi, 12(%ebx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB249_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB249_10: + movl %edx, 16(%ebx) + movl 112(%esp), %edi # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + js .LBB249_12 +# BB#11: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 72(%esp) # 4-byte Spill +.LBB249_12: + movl 72(%esp), %edx # 4-byte Reload + movl %edx, 20(%ebx) + js .LBB249_14 +# BB#13: + movl 20(%esp), %ecx # 4-byte Reload +.LBB249_14: + movl %ecx, 24(%ebx) + js .LBB249_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB249_16: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + js .LBB249_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB249_18: + movl %eax, 32(%ebx) + movl 96(%esp), %ecx # 4-byte Reload + js .LBB249_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill +.LBB249_20: + movl 120(%esp), %eax # 4-byte Reload + movl %eax, 36(%ebx) + js .LBB249_22 +# BB#21: + movl 36(%esp), %ebp # 4-byte Reload +.LBB249_22: + movl %ebp, 40(%ebx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB249_24 +# BB#23: + movl 40(%esp), %eax # 4-byte Reload +.LBB249_24: + movl %eax, 44(%ebx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB249_26 +# BB#25: + movl 44(%esp), %esi # 4-byte Reload +.LBB249_26: + movl %esi, 48(%ebx) + js .LBB249_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB249_28: + movl %eax, 52(%ebx) + js .LBB249_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB249_30: + movl %ecx, 56(%ebx) + js .LBB249_32 +# BB#31: + movl 56(%esp), %edi # 4-byte Reload +.LBB249_32: + movl %edi, 60(%ebx) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end249: + .size mcl_fp_addNF16Lbmi2, .Lfunc_end249-mcl_fp_addNF16Lbmi2 + + .globl mcl_fp_sub16Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub16Lbmi2,@function +mcl_fp_sub16Lbmi2: # @mcl_fp_sub16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%esi), %edx + sbbl 44(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 48(%esi), %ecx + sbbl 48(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 52(%esi), %eax + sbbl 52(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 56(%esi), %ebp + sbbl 56(%edi), %ebp + movl 60(%esi), %esi + sbbl 60(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 80(%esp), %ebx + movl 52(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl %edx, 44(%ebx) + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl %ebp, 56(%ebx) + movl %esi, 60(%ebx) + je .LBB250_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 92(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl %ebp, %eax + movl %eax, 56(%ebx) + movl 60(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ebx) +.LBB250_2: # %nocarry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end250: + .size mcl_fp_sub16Lbmi2, .Lfunc_end250-mcl_fp_sub16Lbmi2 + + .globl mcl_fp_subNF16Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF16Lbmi2,@function +mcl_fp_subNF16Lbmi2: # @mcl_fp_subNF16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 132(%esp), %edi + subl (%edi), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + sarl $31, %eax + movl 136(%esp), %esi + movl 60(%esi), %ecx + andl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esi), %ecx + andl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esi), %ecx + andl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%esi), %ecx + andl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esi), %ecx + andl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%esi), %ecx + andl %eax, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%esi), %ecx + andl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%esi), %ecx + andl %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%esi), %ecx + andl %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %ecx + andl %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%esi), %ebp + andl %eax, %ebp + movl 16(%esi), %ebx + andl %eax, %ebx + movl 12(%esi), %edi + andl %eax, %edi + movl 8(%esi), %edx + andl %eax, %edx + movl 4(%esi), %ecx + andl %eax, %ecx + andl (%esi), %eax + addl 64(%esp), %eax # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 124(%esp), %esi + movl %eax, (%esi) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%esi) + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edx, 8(%esi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edi, 12(%esi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 16(%esi) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebp, 20(%esi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%esi) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%esi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl 28(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl 36(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %eax, 56(%esi) + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esi) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end251: + .size mcl_fp_subNF16Lbmi2, .Lfunc_end251-mcl_fp_subNF16Lbmi2 + + .globl mcl_fpDbl_add16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add16Lbmi2,@function +mcl_fpDbl_add16Lbmi2: # @mcl_fpDbl_add16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 144(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 140(%esp), %ebx + addl (%ebx), %esi + adcl 4(%ebx), %edx + movl 8(%ecx), %edi + adcl 8(%ebx), %edi + movl 12(%ebx), %ebp + movl 136(%esp), %eax + movl %esi, (%eax) + movl 16(%ebx), %esi + adcl 12(%ecx), %ebp + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 72(%ecx), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebp, 12(%eax) + movl 20(%ebx), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebx), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebx), %edi + adcl %edx, %edi + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebx), %esi + adcl %edx, %esi + movl 36(%ecx), %edx + movl %edi, 28(%eax) + movl 36(%ebx), %edi + adcl %edx, %edi + movl 40(%ecx), %edx + movl %esi, 32(%eax) + movl 40(%ebx), %esi + adcl %edx, %esi + movl 44(%ecx), %edx + movl %edi, 36(%eax) + movl 44(%ebx), %edi + adcl %edx, %edi + movl 48(%ecx), %edx + movl %esi, 40(%eax) + movl 48(%ebx), %esi + adcl %edx, %esi + movl 52(%ecx), %edx + movl %edi, 44(%eax) + movl 52(%ebx), %edi + adcl %edx, %edi + movl 56(%ecx), %edx + movl %esi, 48(%eax) + movl 56(%ebx), %esi + adcl %edx, %esi + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%ebx), %ebp + adcl %edx, %ebp + movl 64(%ecx), %edx + movl %esi, 56(%eax) + movl 64(%ebx), %esi + adcl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl %ebp, 60(%eax) + movl 68(%ebx), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 72(%ebx), %eax + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%ecx), %ebp + movl 76(%ebx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%ebx), %eax + adcl %ebp, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %ebp + movl 84(%ebx), %eax + adcl %ebp, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%ebx), %eax + adcl %ebp, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%ecx), %ebp + movl 92(%ebx), %eax + adcl %ebp, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%ecx), %ebp + movl 96(%ebx), %eax + adcl %ebp, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 100(%ecx), %ebp + movl 100(%ebx), %edx + adcl %ebp, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 104(%ecx), %ebp + movl 104(%ebx), %edx + adcl %ebp, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%ecx), %ebp + movl 108(%ebx), %edx + adcl %ebp, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%ecx), %edx + movl 112(%ebx), %ebp + adcl %edx, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 116(%ecx), %edx + movl 116(%ebx), %esi + adcl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 120(%ecx), %edx + movl 120(%ebx), %edi + adcl %edx, %edi + movl 124(%ecx), %ecx + movl 124(%ebx), %esi + adcl %ecx, %esi + sbbl %ecx, %ecx + andl $1, %ecx + movl 148(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + subl (%edx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 4(%edx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 8(%edx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 12(%edx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 16(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 108(%esp), %ebx # 4-byte Reload + sbbl 24(%edx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 28(%edx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl %eax, %ebx + sbbl 32(%edx), %ebx + movl 112(%esp), %eax # 4-byte Reload + sbbl 36(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 44(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 48(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ebp + sbbl 52(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %edi, %ebp + sbbl 56(%edx), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl %esi, %ebp + sbbl 60(%edx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB252_2 +# BB#1: + movl %ebx, 64(%esp) # 4-byte Spill +.LBB252_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB252_4: + movl 136(%esp), %ebx + movl %ecx, 64(%ebx) + movl %esi, %ebp + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + movl 92(%esp), %ecx # 4-byte Reload + movl 88(%esp), %edx # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + jne .LBB252_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB252_6: + movl %esi, 68(%ebx) + movl 84(%esp), %esi # 4-byte Reload + movl 80(%esp), %eax # 4-byte Reload + jne .LBB252_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB252_8: + movl %eax, 72(%ebx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB252_10 +# BB#9: + movl 12(%esp), %esi # 4-byte Reload +.LBB252_10: + movl %esi, 76(%ebx) + jne .LBB252_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 104(%esp) # 4-byte Spill +.LBB252_12: + movl 104(%esp), %esi # 4-byte Reload + movl %esi, 80(%ebx) + jne .LBB252_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB252_14: + movl %edx, 84(%ebx) + jne .LBB252_16 +# BB#15: + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 108(%esp) # 4-byte Spill +.LBB252_16: + movl 108(%esp), %edx # 4-byte Reload + movl %edx, 88(%ebx) + jne .LBB252_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB252_18: + movl %ecx, 92(%ebx) + movl 64(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%ebx) + jne .LBB252_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 112(%esp) # 4-byte Spill +.LBB252_20: + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%ebx) + jne .LBB252_22 +# BB#21: + movl 36(%esp), %edi # 4-byte Reload +.LBB252_22: + movl %edi, 104(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + jne .LBB252_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB252_24: + movl %ecx, 108(%ebx) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB252_26: + movl %eax, 112(%ebx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB252_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB252_28: + movl %eax, 116(%ebx) + jne .LBB252_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB252_30: + movl %ecx, 120(%ebx) + jne .LBB252_32 +# BB#31: + movl 56(%esp), %ebp # 4-byte Reload +.LBB252_32: + movl %ebp, 124(%ebx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end252: + .size mcl_fpDbl_add16Lbmi2, .Lfunc_end252-mcl_fpDbl_add16Lbmi2 + + .globl mcl_fpDbl_sub16Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub16Lbmi2,@function +mcl_fpDbl_sub16Lbmi2: # @mcl_fpDbl_sub16Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 132(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 136(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 128(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ecx) + movl 24(%eax), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ecx) + movl 28(%eax), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ecx) + movl 32(%eax), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ecx) + movl 36(%eax), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ecx) + movl 40(%eax), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ecx) + movl 44(%eax), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ecx) + movl 48(%eax), %edi + sbbl %ebx, %edi + movl 52(%edx), %ebx + movl %esi, 44(%ecx) + movl 52(%eax), %esi + sbbl %ebx, %esi + movl 56(%edx), %ebx + movl %edi, 48(%ecx) + movl 56(%eax), %edi + sbbl %ebx, %edi + movl 60(%edx), %ebx + movl %esi, 52(%ecx) + movl 60(%eax), %esi + sbbl %ebx, %esi + movl 64(%edx), %ebx + movl %edi, 56(%ecx) + movl 64(%eax), %edi + sbbl %ebx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 68(%edx), %edi + movl %esi, 60(%ecx) + movl 68(%eax), %esi + sbbl %edi, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%edx), %esi + movl 72(%eax), %edi + sbbl %esi, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%edx), %esi + movl 76(%eax), %edi + sbbl %esi, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 80(%edx), %esi + movl 80(%eax), %edi + sbbl %esi, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%edx), %esi + movl 84(%eax), %edi + sbbl %esi, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%edx), %esi + movl 88(%eax), %edi + sbbl %esi, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%edx), %esi + movl 92(%eax), %edi + sbbl %esi, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%edx), %esi + movl 96(%eax), %edi + sbbl %esi, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%edx), %esi + movl 100(%eax), %edi + sbbl %esi, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%edx), %esi + movl 104(%eax), %edi + sbbl %esi, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%edx), %esi + movl 108(%eax), %edi + sbbl %esi, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%edx), %esi + movl 112(%eax), %edi + sbbl %esi, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%edx), %esi + movl 116(%eax), %edi + sbbl %esi, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%edx), %esi + movl 120(%eax), %edi + sbbl %esi, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%edx), %edx + movl 124(%eax), %eax + sbbl %edx, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 140(%esp), %ebx + jne .LBB253_1 +# BB#2: + movl $0, 68(%esp) # 4-byte Folded Spill + jmp .LBB253_3 +.LBB253_1: + movl 60(%ebx), %edx + movl %edx, 68(%esp) # 4-byte Spill +.LBB253_3: + testb %al, %al + jne .LBB253_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebp + jmp .LBB253_6 +.LBB253_4: + movl (%ebx), %ebp + movl 4(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB253_6: + jne .LBB253_7 +# BB#8: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB253_9 +.LBB253_7: + movl 56(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB253_9: + jne .LBB253_10 +# BB#11: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB253_12 +.LBB253_10: + movl 52(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB253_12: + jne .LBB253_13 +# BB#14: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB253_15 +.LBB253_13: + movl 48(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB253_15: + jne .LBB253_16 +# BB#17: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB253_18 +.LBB253_16: + movl 44(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB253_18: + jne .LBB253_19 +# BB#20: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB253_21 +.LBB253_19: + movl 40(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB253_21: + jne .LBB253_22 +# BB#23: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB253_24 +.LBB253_22: + movl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB253_24: + jne .LBB253_25 +# BB#26: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB253_27 +.LBB253_25: + movl 32(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB253_27: + jne .LBB253_28 +# BB#29: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB253_30 +.LBB253_28: + movl 28(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB253_30: + jne .LBB253_31 +# BB#32: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB253_33 +.LBB253_31: + movl 24(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB253_33: + jne .LBB253_34 +# BB#35: + movl $0, %esi + jmp .LBB253_36 +.LBB253_34: + movl 20(%ebx), %esi +.LBB253_36: + jne .LBB253_37 +# BB#38: + movl $0, %edx + jmp .LBB253_39 +.LBB253_37: + movl 16(%ebx), %edx +.LBB253_39: + jne .LBB253_40 +# BB#41: + movl $0, %edi + jmp .LBB253_42 +.LBB253_40: + movl 12(%ebx), %edi +.LBB253_42: + jne .LBB253_43 +# BB#44: + xorl %ebx, %ebx + jmp .LBB253_45 +.LBB253_43: + movl 8(%ebx), %ebx +.LBB253_45: + addl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + movl 24(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 64(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 68(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edi, 76(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 32(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl %eax, 120(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 124(%ecx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end253: + .size mcl_fpDbl_sub16Lbmi2, .Lfunc_end253-mcl_fpDbl_sub16Lbmi2 + + .align 16, 0x90 + .type .LmulPv544x32,@function +.LmulPv544x32: # @mulPv544x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl %edx, %eax + movl 80(%esp), %esi + movl %esi, %edx + mulxl 4(%eax), %edi, %ebx + movl %esi, %edx + mulxl (%eax), %ebp, %edx + movl %ebp, 56(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 8(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 12(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 16(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 20(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 24(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 28(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 32(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 36(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 40(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 44(%eax), %edx, %ebx + adcl %edi, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 48(%eax), %edx, %edi + adcl %ebx, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %esi, %edx + mulxl 52(%eax), %ebx, %ebp + adcl %edi, %ebx + movl %esi, %edx + mulxl 56(%eax), %edi, %edx + movl %edx, (%esp) # 4-byte Spill + adcl %ebp, %edi + movl %esi, %edx + mulxl 60(%eax), %edx, %ebp + movl %ebp, 4(%esp) # 4-byte Spill + adcl (%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, (%ecx) + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%ecx) + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%ecx) + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%ecx) + movl 36(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%ecx) + movl 32(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%ecx) + movl 28(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%ecx) + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%ecx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%ecx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%ecx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%ecx) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%ecx) + movl %ebx, 52(%ecx) + movl %edi, 56(%ecx) + movl %edx, 60(%ecx) + movl %esi, %edx + mulxl 64(%eax), %eax, %edx + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + adcl $0, %edx + movl %edx, 68(%ecx) + movl %ecx, %eax + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end254: + .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 + + .globl mcl_fp_mulUnitPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_mulUnitPre17Lbmi2,@function +mcl_fp_mulUnitPre17Lbmi2: # @mcl_fp_mulUnitPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $140, %esp + calll .L255$pb +.L255$pb: + popl %ebx +.Ltmp56: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx + movl 168(%esp), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 164(%esp), %edx + calll .LmulPv544x32 + movl 132(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebp + movl 80(%esp), %ebx + movl 76(%esp), %edi + movl 72(%esp), %esi + movl 64(%esp), %edx + movl 68(%esp), %ecx + movl 160(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + addl $140, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end255: + .size mcl_fp_mulUnitPre17Lbmi2, .Lfunc_end255-mcl_fp_mulUnitPre17Lbmi2 + + .globl mcl_fpDbl_mulPre17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_mulPre17Lbmi2,@function +mcl_fpDbl_mulPre17Lbmi2: # @mcl_fpDbl_mulPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L256$pb +.L256$pb: + popl %edi +.Ltmp57: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 1384(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 1380(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl 1384(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1380(%esp), %eax + movl %eax, %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %ecx + movl %ecx, %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 112(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end256: + .size mcl_fpDbl_mulPre17Lbmi2, .Lfunc_end256-mcl_fpDbl_mulPre17Lbmi2 + + .globl mcl_fpDbl_sqrPre17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sqrPre17Lbmi2,@function +mcl_fpDbl_sqrPre17Lbmi2: # @mcl_fpDbl_sqrPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L257$pb +.L257$pb: + popl %ebx +.Ltmp58: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx + movl %ebx, 124(%esp) # 4-byte Spill + movl 1380(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 60(%edx), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 64(%edx), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end257: + .size mcl_fpDbl_sqrPre17Lbmi2, .Lfunc_end257-mcl_fpDbl_sqrPre17Lbmi2 + + .globl mcl_fp_mont17Lbmi2 + .align 16, 0x90 + .type mcl_fp_mont17Lbmi2,@function +mcl_fp_mont17Lbmi2: # @mcl_fp_mont17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2588, %esp # imm = 0xA1C + calll .L258$pb +.L258$pb: + popl %ebx +.Ltmp59: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx + movl 2620(%esp), %eax + movl -4(%eax), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2512(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 2512(%esp), %ebp + movl 2516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2580(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 2576(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 2572(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2568(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2564(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2532(%esp), %edi + movl 2528(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2524(%esp), %esi + movl 2520(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2440(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + addl 2440(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 2452(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2456(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2460(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2472(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2480(%esp), %eax + movl %eax, %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2496(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 2616(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2368(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + andl $1, %ebp + movl 120(%esp), %ecx # 4-byte Reload + addl 2368(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2384(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 2392(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 2396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 2404(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 2408(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2424(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2436(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2296(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 2296(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 2300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 2304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2308(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2316(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2320(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2324(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2328(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2332(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 2336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2340(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2344(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2348(%esp), %esi + movl 124(%esp), %ecx # 4-byte Reload + adcl 2352(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2356(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2360(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2364(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl 2616(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2224(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 2224(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2272(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2288(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 2292(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2152(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 2152(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 2204(%esp), %ebp + movl 128(%esp), %edi # 4-byte Reload + adcl 2208(%esp), %edi + movl 132(%esp), %esi # 4-byte Reload + adcl 2212(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 2216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2080(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2080(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2092(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2108(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2116(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 2128(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl 2136(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2148(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2008(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %ebp, %eax + andl $1, %eax + addl 2008(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2036(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2044(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2052(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 2056(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 2072(%esp), %ebp + adcl 2076(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1936(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1936(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1956(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1976(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1980(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 1996(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1864(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1864(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + adcl 1884(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 1896(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1916(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1792(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1792(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1820(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 1840(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1848(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 1852(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %eax + movl 80(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl 92(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1748(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1752(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1756(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1764(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1768(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1772(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1776(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl 1780(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1788(%esp), %ebp + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1648(%esp), %eax + movl 76(%esp), %edi # 4-byte Reload + adcl 1652(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1672(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1676(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1712(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 1576(%esp), %ebp + adcl 1580(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + adcl 1592(%esp), %edi + movl 84(%esp), %esi # 4-byte Reload + adcl 1596(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1504(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 1516(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1520(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl 1540(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1432(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1460(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1464(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 1468(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1476(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1496(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1360(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1384(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1408(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1288(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1312(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1340(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 1344(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1216(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 1240(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1268(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %edi, %eax + andl $1, %eax + addl 1144(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1160(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1188(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1192(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1072(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1080(%esp), %ebp + adcl 1084(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1112(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1000(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1008(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1020(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1028(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1032(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + adcl 1040(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1056(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 984(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 856(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 896(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2616(%esp), %ecx + movl %ecx, %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 784(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 828(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 712(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 756(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 640(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 660(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 568(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 600(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + addl 496(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 540(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 480(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 124(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 364(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 404(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 280(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + adcl 300(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 128(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + adcl 212(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %edi + addl 136(%esp), %esi + movl 116(%esp), %edx # 4-byte Reload + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 148(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 168(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 200(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 204(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl 132(%esp), %ecx # 4-byte Reload + movl 2620(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 8(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 12(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + movl %eax, %edx + sbbl 16(%ebx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 20(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + sbbl 52(%ebx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 56(%ebx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 108(%esp), %ebx # 4-byte Reload + sbbl 60(%ebp), %ebx + movl 124(%esp), %esi # 4-byte Reload + sbbl 64(%ebp), %esi + movl %esi, %ebp + sbbl $0, %edi + andl $1, %edi + jne .LBB258_2 +# BB#1: + movl %ebx, 108(%esp) # 4-byte Spill +.LBB258_2: + movl %edi, %ebx + testb %bl, %bl + movl 132(%esp), %ebx # 4-byte Reload + jne .LBB258_4 +# BB#3: + movl 12(%esp), %ebx # 4-byte Reload +.LBB258_4: + movl 2608(%esp), %eax + movl %ebx, (%eax) + movl 120(%esp), %ebx # 4-byte Reload + jne .LBB258_6 +# BB#5: + movl 16(%esp), %ebx # 4-byte Reload +.LBB258_6: + movl %ebx, 4(%eax) + jne .LBB258_8 +# BB#7: + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB258_8: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + jne .LBB258_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%esp) # 4-byte Spill +.LBB258_10: + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 112(%esp), %esi # 4-byte Reload + jne .LBB258_12 +# BB#11: + movl 28(%esp), %esi # 4-byte Reload +.LBB258_12: + movl %esi, 16(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB258_14 +# BB#13: + movl 32(%esp), %edx # 4-byte Reload +.LBB258_14: + movl %edx, 20(%eax) + jne .LBB258_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB258_16: + movl %ecx, 24(%eax) + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB258_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB258_18: + movl %ecx, 28(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB258_20 +# BB#19: + movl 44(%esp), %ecx # 4-byte Reload +.LBB258_20: + movl %ecx, 32(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB258_22 +# BB#21: + movl 48(%esp), %ecx # 4-byte Reload +.LBB258_22: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB258_24 +# BB#23: + movl 52(%esp), %ecx # 4-byte Reload +.LBB258_24: + movl %ecx, 40(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB258_26 +# BB#25: + movl 56(%esp), %ecx # 4-byte Reload +.LBB258_26: + movl %ecx, 44(%eax) + movl 84(%esp), %ecx # 4-byte Reload + jne .LBB258_28 +# BB#27: + movl 60(%esp), %ecx # 4-byte Reload +.LBB258_28: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB258_30 +# BB#29: + movl 88(%esp), %ecx # 4-byte Reload +.LBB258_30: + movl %ecx, 52(%eax) + movl 104(%esp), %ecx # 4-byte Reload + jne .LBB258_32 +# BB#31: + movl 128(%esp), %ecx # 4-byte Reload +.LBB258_32: + movl %ecx, 56(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB258_34 +# BB#33: + movl %ebp, %ecx +.LBB258_34: + movl %ecx, 64(%eax) + addl $2588, %esp # imm = 0xA1C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end258: + .size mcl_fp_mont17Lbmi2, .Lfunc_end258-mcl_fp_mont17Lbmi2 + + .globl mcl_fp_montNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_montNF17Lbmi2,@function +mcl_fp_montNF17Lbmi2: # @mcl_fp_montNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2572, %esp # imm = 0xA0C + calll .L259$pb +.L259$pb: + popl %ebx +.Ltmp60: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx + movl 2604(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2496(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2496(%esp), %edi + movl 2500(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2564(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2532(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2524(%esp), %ebp + movl 2520(%esp), %esi + movl 2516(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2512(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2508(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2504(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2424(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2424(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2436(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 2448(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2452(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 2456(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 2460(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 2472(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2480(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2352(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2420(%esp), %ecx + movl 112(%esp), %edx # 4-byte Reload + addl 2352(%esp), %edx + movl 92(%esp), %eax # 4-byte Reload + adcl 2356(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2360(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2380(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2384(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2392(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2396(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2404(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2408(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2280(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2280(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2288(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 2316(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2324(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2332(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 2340(%esp), %ebp + movl 116(%esp), %edi # 4-byte Reload + adcl 2344(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2208(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2276(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 2208(%esp), %edx + movl 104(%esp), %ecx # 4-byte Reload + adcl 2212(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2216(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2220(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2224(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2228(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2232(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 2240(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2244(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2248(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2256(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2260(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 2264(%esp), %ebp + adcl 2268(%esp), %edi + movl %edi, %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 2272(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2136(%esp), %ecx + movl 2604(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + addl 2136(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 2188(%esp), %edi + adcl 2192(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 2196(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 2200(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2064(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2132(%esp), %eax + movl 104(%esp), %edx # 4-byte Reload + addl 2064(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2080(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2088(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 2112(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 2116(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 2120(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 2124(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2128(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1992(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1992(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2016(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2036(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2040(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2044(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 2056(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2060(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1920(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1988(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1920(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1928(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1936(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1948(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1960(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1964(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1968(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1976(%esp), %esi + adcl 1980(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1984(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1848(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1864(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1884(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1904(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1908(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, %edi + movl 2600(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1844(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 1776(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1780(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1788(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1792(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1796(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1800(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1804(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1808(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1812(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1816(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1820(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1824(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1828(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1832(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1840(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1704(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1736(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1768(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 2596(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 1700(%esp), %eax + movl 80(%esp), %edx # 4-byte Reload + addl 1632(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1636(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1640(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1644(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1648(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1660(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1672(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1560(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1560(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1580(%esp), %edi + movl 64(%esp), %ebp # 4-byte Reload + adcl 1584(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1600(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1608(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1556(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1488(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + adcl 1508(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1524(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 1532(%esp), %esi + movl %esi, %ebp + movl 92(%esp), %edx # 4-byte Reload + adcl 1536(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1540(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1544(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1548(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1552(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1416(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1428(%esp), %esi + adcl 1432(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1436(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1460(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 2600(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1344(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1412(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1344(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1368(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1372(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1376(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1380(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1384(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1388(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1392(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1396(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1408(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1272(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1288(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1304(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1268(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + addl 1200(%esp), %ecx + movl 68(%esp), %edx # 4-byte Reload + adcl 1204(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 1212(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + adcl 1228(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1232(%esp), %edi + movl 112(%esp), %edx # 4-byte Reload + adcl 1236(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1256(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1260(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1264(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1128(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1152(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1160(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1172(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1188(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1124(%esp), %edx + movl 68(%esp), %eax # 4-byte Reload + addl 1056(%esp), %eax + movl 72(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1096(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1112(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1116(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 984(%esp), %esi + movl 72(%esp), %esi # 4-byte Reload + adcl 988(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 912(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 980(%esp), %eax + addl 912(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 920(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 924(%esp), %edi + movl 108(%esp), %edx # 4-byte Reload + adcl 928(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 944(%esp), %ebp + movl 104(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 840(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 864(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 872(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 836(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 768(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 788(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 792(%esp), %edi + adcl 796(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 696(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 732(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 756(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 692(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 624(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + adcl 640(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 656(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 660(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 604(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 548(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 524(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 528(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 408(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 432(%esp), %ebp + movl 76(%esp), %edi # 4-byte Reload + adcl 436(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 404(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 336(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 356(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 264(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 280(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 300(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 68(%esp), %ebp # 4-byte Reload + adcl 312(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 260(%esp), %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 204(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 120(%esp), %esi + movl 92(%esp), %esi # 4-byte Reload + movl 116(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 128(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl 132(%esp), %esi + movl 104(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, %edx + movl 2604(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ebp + movl %esi, %ebx + sbbl 8(%edi), %ebx + movl 104(%esp), %ecx # 4-byte Reload + sbbl 12(%edi), %ecx + movl 76(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 64(%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + sarl $31, %eax + testl %eax, %eax + movl 116(%esp), %edi # 4-byte Reload + js .LBB259_2 +# BB#1: + movl %edx, %edi +.LBB259_2: + movl 2592(%esp), %edx + movl %edi, (%edx) + movl 112(%esp), %edi # 4-byte Reload + js .LBB259_4 +# BB#3: + movl %ebp, %edi +.LBB259_4: + movl %edi, 4(%edx) + js .LBB259_6 +# BB#5: + movl %ebx, %esi +.LBB259_6: + movl %esi, 8(%edx) + movl 104(%esp), %esi # 4-byte Reload + js .LBB259_8 +# BB#7: + movl %ecx, %esi +.LBB259_8: + movl %esi, 12(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB259_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload +.LBB259_10: + movl %ecx, 16(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB259_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB259_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB259_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB259_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB259_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB259_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB259_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB259_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB259_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB259_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB259_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB259_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB259_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB259_24: + movl %eax, 44(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB259_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB259_26: + movl %eax, 48(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB259_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB259_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB259_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB259_30: + movl %eax, 56(%edx) + movl 108(%esp), %eax # 4-byte Reload + js .LBB259_32 +# BB#31: + movl 48(%esp), %eax # 4-byte Reload +.LBB259_32: + movl %eax, 60(%edx) + movl 100(%esp), %eax # 4-byte Reload + js .LBB259_34 +# BB#33: + movl 92(%esp), %eax # 4-byte Reload +.LBB259_34: + movl %eax, 64(%edx) + addl $2572, %esp # imm = 0xA0C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end259: + .size mcl_fp_montNF17Lbmi2, .Lfunc_end259-mcl_fp_montNF17Lbmi2 + + .globl mcl_fp_montRed17Lbmi2 + .align 16, 0x90 + .type mcl_fp_montRed17Lbmi2,@function +mcl_fp_montRed17Lbmi2: # @mcl_fp_montRed17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L260$pb +.L260$pb: + popl %eax +.Ltmp61: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1464(%esp), %edx + movl -4(%edx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 1460(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %esi, %ebx + movl 132(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 128(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 124(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 64(%ecx), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 60(%ecx), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %esi + movl 12(%ecx), %edi + movl 8(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 64(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1360(%esp), %ecx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + addl 1360(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1372(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1376(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 128(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + andl $1, %edi + movl %edi, %ecx + addl 1288(%esp), %esi + movl 100(%esp), %edx # 4-byte Reload + adcl 1292(%esp), %edx + movl 72(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 1336(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + movl 184(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1216(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1260(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 1264(%esp), %edi + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1144(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1072(%esp), %esi + movl 80(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1000(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 172(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 928(%esp), %esi + movl 88(%esp), %esi # 4-byte Reload + adcl 932(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 856(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 924(%esp), %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 96(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 784(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull %ebp, %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 712(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 760(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 780(%esp), %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 192(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 196(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 684(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1464(%esp), %eax + movl %eax, %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 596(%esp), %edi + movl %edi, 204(%esp) # 4-byte Spill + adcl 600(%esp), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 196(%esp), %esi # 4-byte Reload + adcl 604(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 496(%esp), %edi + movl 136(%esp), %edi # 4-byte Reload + adcl 500(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 512(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 200(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 424(%esp), %edi + movl 148(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 164(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 176(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 352(%esp), %esi + movl %edi, %ecx + adcl 356(%esp), %ecx + movl 176(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %edi # 4-byte Reload + adcl 384(%esp), %edi + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 280(%esp), %ebp + movl 176(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 308(%esp), %edi + movl %edi, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl 96(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 208(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edx # 4-byte Reload + adcl 216(%esp), %edx + movl %edx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl %eax, %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + adcl 264(%esp), %ebx + movl %ebx, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 276(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 200(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 12(%esp), %eax # 4-byte Folded Reload + sbbl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + movl 184(%esp), %esi # 4-byte Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 188(%esp), %esi # 4-byte Reload + sbbl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + sbbl 32(%esp), %esi # 4-byte Folded Reload + movl 172(%esp), %ebp # 4-byte Reload + sbbl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + sbbl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 108(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + sbbl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 112(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + sbbl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + sbbl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 136(%esp) # 4-byte Spill + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 148(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 116(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 176(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB260_2 +# BB#1: + movl %esi, 168(%esp) # 4-byte Spill +.LBB260_2: + testb %bl, %bl + movl 200(%esp), %esi # 4-byte Reload + jne .LBB260_4 +# BB#3: + movl %edi, %esi +.LBB260_4: + movl 1456(%esp), %edi + movl %esi, (%edi) + movl 156(%esp), %esi # 4-byte Reload + movl 204(%esp), %ebx # 4-byte Reload + jne .LBB260_6 +# BB#5: + movl %edx, %ebx +.LBB260_6: + movl %ebx, 4(%edi) + movl 144(%esp), %ebx # 4-byte Reload + movl 192(%esp), %edx # 4-byte Reload + jne .LBB260_8 +# BB#7: + movl %ecx, %edx +.LBB260_8: + movl %edx, 8(%edi) + movl 132(%esp), %edx # 4-byte Reload + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB260_10 +# BB#9: + movl %eax, %ecx +.LBB260_10: + movl %ecx, 12(%edi) + movl 124(%esp), %ecx # 4-byte Reload + movl 180(%esp), %eax # 4-byte Reload + jne .LBB260_12 +# BB#11: + movl 88(%esp), %eax # 4-byte Reload +.LBB260_12: + movl %eax, 16(%edi) + movl 188(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB260_14 +# BB#13: + movl 92(%esp), %ebp # 4-byte Reload +.LBB260_14: + movl %ebp, 20(%edi) + movl 152(%esp), %ebp # 4-byte Reload + jne .LBB260_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB260_16: + movl %eax, 24(%edi) + movl 168(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB260_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB260_18: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 32(%edi) + jne .LBB260_20 +# BB#19: + movl 108(%esp), %esi # 4-byte Reload +.LBB260_20: + movl %esi, 36(%edi) + jne .LBB260_22 +# BB#21: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 160(%esp) # 4-byte Spill +.LBB260_22: + movl 160(%esp), %esi # 4-byte Reload + movl %esi, 40(%edi) + movl 128(%esp), %eax # 4-byte Reload + jne .LBB260_24 +# BB#23: + movl 120(%esp), %ebp # 4-byte Reload +.LBB260_24: + movl %ebp, 44(%edi) + jne .LBB260_26 +# BB#25: + movl 136(%esp), %ebx # 4-byte Reload +.LBB260_26: + movl %ebx, 48(%edi) + jne .LBB260_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB260_28: + movl %eax, 52(%edi) + jne .LBB260_30 +# BB#29: + movl 148(%esp), %edx # 4-byte Reload +.LBB260_30: + movl %edx, 56(%edi) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB260_32 +# BB#31: + movl 164(%esp), %ecx # 4-byte Reload +.LBB260_32: + movl %ecx, 60(%edi) + jne .LBB260_34 +# BB#33: + movl 176(%esp), %eax # 4-byte Reload +.LBB260_34: + movl %eax, 64(%edi) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end260: + .size mcl_fp_montRed17Lbmi2, .Lfunc_end260-mcl_fp_montRed17Lbmi2 + + .globl mcl_fp_addPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_addPre17Lbmi2,@function +mcl_fp_addPre17Lbmi2: # @mcl_fp_addPre17Lbmi2 +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl 60(%eax), %ebx + movl %edx, 52(%edi) + movl 60(%ecx), %edx + adcl %ebx, %edx + movl %esi, 56(%edi) + movl %edx, 60(%edi) + movl 64(%eax), %eax + movl 64(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 64(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end261: + .size mcl_fp_addPre17Lbmi2, .Lfunc_end261-mcl_fp_addPre17Lbmi2 + + .globl mcl_fp_subPre17Lbmi2 + .align 16, 0x90 + .type mcl_fp_subPre17Lbmi2,@function +mcl_fp_subPre17Lbmi2: # @mcl_fp_subPre17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl 60(%edx), %ebp + movl %esi, 52(%ebx) + movl 60(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 56(%ebx) + movl %esi, 60(%ebx) + movl 64(%edx), %edx + movl 64(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 64(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end262: + .size mcl_fp_subPre17Lbmi2, .Lfunc_end262-mcl_fp_subPre17Lbmi2 + + .globl mcl_fp_shr1_17Lbmi2 + .align 16, 0x90 + .type mcl_fp_shr1_17Lbmi2,@function +mcl_fp_shr1_17Lbmi2: # @mcl_fp_shr1_17Lbmi2 +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 56(%ecx) + movl 64(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 60(%ecx) + shrl %eax + movl %eax, 64(%ecx) + popl %esi + retl +.Lfunc_end263: + .size mcl_fp_shr1_17Lbmi2, .Lfunc_end263-mcl_fp_shr1_17Lbmi2 + + .globl mcl_fp_add17Lbmi2 + .align 16, 0x90 + .type mcl_fp_add17Lbmi2,@function +mcl_fp_add17Lbmi2: # @mcl_fp_add17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 88(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 84(%esp), %edx + addl (%edx), %ecx + movl %ecx, %ebx + adcl 4(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 8(%esi), %eax + adcl 8(%edx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl 16(%edx), %edi + adcl 12(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 20(%edx), %eax + adcl 20(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + adcl 24(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%edx), %eax + adcl 28(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%edx), %eax + adcl 32(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 36(%edx), %eax + adcl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%edx), %eax + adcl 40(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%edx), %eax + adcl 44(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 48(%edx), %eax + adcl 48(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%edx), %eax + adcl 52(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%edx), %eax + adcl 56(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%edx), %ebp + adcl 60(%esi), %ebp + movl 64(%edx), %edx + adcl 64(%esi), %edx + movl 80(%esp), %esi + movl %ebx, (%esi) + movl %ebx, %eax + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%esi) + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 52(%esi) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 60(%edi), %ebp + sbbl 64(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB264_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl %edi, (%esi) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%esi) + movl 4(%esp), %edi # 4-byte Reload + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%esi) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) +.LBB264_2: # %carry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end264: + .size mcl_fp_add17Lbmi2, .Lfunc_end264-mcl_fp_add17Lbmi2 + + .globl mcl_fp_addNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_addNF17Lbmi2,@function +mcl_fp_addNF17Lbmi2: # @mcl_fp_addNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $132, %esp + movl 160(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 156(%esp), %esi + addl (%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%eax), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 60(%eax), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 56(%eax), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 52(%eax), %ebp + movl 48(%eax), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 44(%eax), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 52(%esi), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 60(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 64(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 164(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + sbbl 16(%esi), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 120(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + sbbl 40(%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + sbbl 44(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + sbbl 48(%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 52(%esi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %ecx + movl %eax, %ebp + sbbl 56(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 60(%esi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + movl %eax, %ebx + sbbl 64(%esi), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ebx, %esi + sarl $31, %esi + testl %esi, %esi + movl 84(%esp), %esi # 4-byte Reload + js .LBB265_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB265_2: + movl 152(%esp), %ebx + movl %esi, (%ebx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB265_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB265_4: + movl %eax, 4(%ebx) + movl 108(%esp), %eax # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + js .LBB265_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB265_6: + movl %edi, 8(%ebx) + movl 116(%esp), %edi # 4-byte Reload + movl 68(%esp), %ecx # 4-byte Reload + js .LBB265_8 +# BB#7: + movl %edx, %ecx +.LBB265_8: + movl %ecx, 12(%ebx) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + js .LBB265_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB265_10: + movl %edx, 16(%ebx) + movl %ebp, %edx + js .LBB265_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB265_12: + movl %esi, 20(%ebx) + movl 112(%esp), %ebp # 4-byte Reload + js .LBB265_14 +# BB#13: + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 120(%esp) # 4-byte Spill +.LBB265_14: + movl 120(%esp), %esi # 4-byte Reload + movl %esi, 24(%ebx) + js .LBB265_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload +.LBB265_16: + movl %ecx, 28(%ebx) + js .LBB265_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 124(%esp) # 4-byte Spill +.LBB265_18: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%ebx) + js .LBB265_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB265_20: + movl %eax, 36(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + js .LBB265_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 128(%esp) # 4-byte Spill +.LBB265_22: + movl 128(%esp), %eax # 4-byte Reload + movl %eax, 40(%ebx) + js .LBB265_24 +# BB#23: + movl 40(%esp), %ebp # 4-byte Reload +.LBB265_24: + movl %ebp, 44(%ebx) + js .LBB265_26 +# BB#25: + movl 44(%esp), %edi # 4-byte Reload +.LBB265_26: + movl %edi, 48(%ebx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB265_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB265_28: + movl %eax, 52(%ebx) + js .LBB265_30 +# BB#29: + movl 52(%esp), %edx # 4-byte Reload +.LBB265_30: + movl %edx, 56(%ebx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB265_32 +# BB#31: + movl 56(%esp), %eax # 4-byte Reload +.LBB265_32: + movl %eax, 60(%ebx) + js .LBB265_34 +# BB#33: + movl 60(%esp), %ecx # 4-byte Reload +.LBB265_34: + movl %ecx, 64(%ebx) + addl $132, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end265: + .size mcl_fp_addNF17Lbmi2, .Lfunc_end265-mcl_fp_addNF17Lbmi2 + + .globl mcl_fp_sub17Lbmi2 + .align 16, 0x90 + .type mcl_fp_sub17Lbmi2,@function +mcl_fp_sub17Lbmi2: # @mcl_fp_sub17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%esi), %edx + sbbl 48(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 52(%esi), %ecx + sbbl 52(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esi), %eax + sbbl 56(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 60(%esi), %ebp + sbbl 60(%edi), %ebp + movl 64(%esi), %esi + sbbl 64(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 84(%esp), %ebx + movl 56(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 60(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%ebx) + movl %edx, 48(%ebx) + movl %ecx, 52(%ebx) + movl %eax, 56(%ebx) + movl %ebp, 60(%ebx) + movl %esi, 64(%ebx) + je .LBB266_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 96(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl 56(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%ebx) + movl %ecx, 56(%ebx) + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 60(%ebx) + movl 64(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ebx) +.LBB266_2: # %nocarry + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end266: + .size mcl_fp_sub17Lbmi2, .Lfunc_end266-mcl_fp_sub17Lbmi2 + + .globl mcl_fp_subNF17Lbmi2 + .align 16, 0x90 + .type mcl_fp_subNF17Lbmi2,@function +mcl_fp_subNF17Lbmi2: # @mcl_fp_subNF17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 144(%esp), %edi + subl (%edi), %esi + movl %esi, 72(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 60(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + sbbl 40(%edi), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 60(%edi), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 64(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + sarl $31, %eax + movl %eax, %edx + shldl $1, %ecx, %edx + movl 148(%esp), %ebx + movl 28(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%ebx), %ecx + andl %edx, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + andl (%ebx), %edx + movl 64(%ebx), %edi + movl %eax, %ecx + andl %ecx, %edi + movl %edi, 44(%esp) # 4-byte Spill + rorxl $31, %ecx, %eax + andl 60(%ebx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 56(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%ebx), %ecx + andl %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 32(%ebx), %ecx + andl %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 24(%ebx), %ebp + andl %eax, %ebp + movl 20(%ebx), %edi + andl %eax, %edi + movl 16(%ebx), %esi + andl %eax, %esi + andl 8(%ebx), %eax + addl 72(%esp), %edx # 4-byte Folded Reload + movl 8(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl 136(%esp), %ebx + movl %edx, (%ebx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ecx, 4(%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %eax, 8(%ebx) + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %edx, 12(%ebx) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 16(%ebx) + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %edi, 20(%ebx) + movl 32(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%ebx) + movl (%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 4(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 20(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 28(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl 36(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%ebx) + movl 40(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %ecx, 56(%ebx) + movl %eax, 60(%ebx) + movl 44(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ebx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end267: + .size mcl_fp_subNF17Lbmi2, .Lfunc_end267-mcl_fp_subNF17Lbmi2 + + .globl mcl_fpDbl_add17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_add17Lbmi2,@function +mcl_fpDbl_add17Lbmi2: # @mcl_fpDbl_add17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $128, %esp + movl 156(%esp), %ecx + movl 152(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 148(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 76(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl 64(%ecx), %ebx + movl %esi, 56(%eax) + movl 64(%edx), %esi + adcl %ebx, %esi + movl 68(%ecx), %ebx + movl %edi, 60(%eax) + movl 68(%edx), %edi + adcl %ebx, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edi + movl %esi, 64(%eax) + movl 72(%edx), %eax + adcl %edi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 76(%edx), %eax + adcl %ebp, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl 104(%edx), %eax + adcl %esi, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl 108(%edx), %eax + adcl %esi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 112(%ecx), %esi + movl 112(%edx), %eax + adcl %esi, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 116(%ecx), %esi + movl 116(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 120(%ecx), %edi + movl 120(%edx), %esi + adcl %edi, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 124(%ecx), %ebx + movl 124(%edx), %edi + adcl %ebx, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 128(%ecx), %ebx + movl 128(%edx), %ebp + adcl %ebx, %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 132(%ecx), %ecx + movl 132(%edx), %edx + adcl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 160(%esp), %ebx + movl 92(%esp), %eax # 4-byte Reload + subl (%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 8(%ebx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 16(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 20(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 28(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 40(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 44(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 48(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 52(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + sbbl 56(%ebx), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl 60(%ebx), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 64(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB268_2 +# BB#1: + movl %ebp, %edx +.LBB268_2: + testb %cl, %cl + movl 92(%esp), %eax # 4-byte Reload + movl 88(%esp), %esi # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + movl 80(%esp), %ebx # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB268_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %edi # 4-byte Reload + movl 8(%esp), %ebx # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 124(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload +.LBB268_4: + movl 148(%esp), %ecx + movl %eax, 68(%ecx) + movl %ecx, %eax + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%eax) + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 76(%eax) + movl 104(%esp), %ecx # 4-byte Reload + movl %ecx, 80(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%eax) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 88(%eax) + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 92(%eax) + movl 120(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%eax) + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%eax) + movl %ebp, 104(%eax) + movl %ebx, 108(%eax) + movl %edi, 112(%eax) + movl %esi, 116(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + jne .LBB268_6 +# BB#5: + movl 52(%esp), %esi # 4-byte Reload +.LBB268_6: + movl %esi, 120(%eax) + movl 68(%esp), %esi # 4-byte Reload + jne .LBB268_8 +# BB#7: + movl 56(%esp), %esi # 4-byte Reload +.LBB268_8: + movl %esi, 124(%eax) + jne .LBB268_10 +# BB#9: + movl 60(%esp), %ecx # 4-byte Reload +.LBB268_10: + movl %ecx, 128(%eax) + movl %edx, 132(%eax) + addl $128, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end268: + .size mcl_fpDbl_add17Lbmi2, .Lfunc_end268-mcl_fpDbl_add17Lbmi2 + + .globl mcl_fpDbl_sub17Lbmi2 + .align 16, 0x90 + .type mcl_fpDbl_sub17Lbmi2,@function +mcl_fpDbl_sub17Lbmi2: # @mcl_fpDbl_sub17Lbmi2 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 144(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 136(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl 28(%esi), %ebx + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %ebx, %eax + movl 32(%esi), %ebx + movl %edi, 24(%ecx) + movl 32(%edx), %edi + sbbl %ebx, %edi + movl 36(%esi), %ebx + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %ebx, %eax + movl 40(%esi), %ebx + movl %edi, 32(%ecx) + movl 40(%edx), %edi + sbbl %ebx, %edi + movl 44(%esi), %ebx + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %ebx, %eax + movl 48(%esi), %ebx + movl %edi, 40(%ecx) + movl 48(%edx), %edi + sbbl %ebx, %edi + movl 52(%esi), %ebx + movl %eax, 44(%ecx) + movl 52(%edx), %eax + sbbl %ebx, %eax + movl 56(%esi), %ebx + movl %edi, 48(%ecx) + movl 56(%edx), %edi + sbbl %ebx, %edi + movl 60(%esi), %ebx + movl %eax, 52(%ecx) + movl 60(%edx), %eax + sbbl %ebx, %eax + movl 64(%esi), %ebx + movl %edi, 56(%ecx) + movl 64(%edx), %edi + sbbl %ebx, %edi + movl 68(%esi), %ebx + movl %eax, 60(%ecx) + movl 68(%edx), %eax + sbbl %ebx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 72(%esi), %eax + movl %edi, 64(%ecx) + movl 72(%edx), %edi + sbbl %eax, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%esi), %eax + movl 76(%edx), %edi + sbbl %eax, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 80(%esi), %eax + movl 80(%edx), %edi + sbbl %eax, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%esi), %eax + movl 84(%edx), %edi + sbbl %eax, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%esi), %eax + movl 88(%edx), %edi + sbbl %eax, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%esi), %eax + movl 92(%edx), %edi + sbbl %eax, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 96(%esi), %eax + movl 96(%edx), %edi + sbbl %eax, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 100(%esi), %eax + movl 100(%edx), %edi + sbbl %eax, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%esi), %eax + movl 104(%edx), %edi + sbbl %eax, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%esi), %eax + movl 108(%edx), %edi + sbbl %eax, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%esi), %eax + movl 112(%edx), %edi + sbbl %eax, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esi), %eax + movl 116(%edx), %edi + sbbl %eax, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%esi), %eax + movl 120(%edx), %edi + sbbl %eax, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%esi), %eax + movl 124(%edx), %edi + sbbl %eax, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 128(%esi), %eax + movl 128(%edx), %edi + sbbl %eax, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 132(%esi), %eax + movl 132(%edx), %edx + sbbl %eax, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 148(%esp), %ebp + jne .LBB269_1 +# BB#2: + movl $0, 76(%esp) # 4-byte Folded Spill + jmp .LBB269_3 +.LBB269_1: + movl 64(%ebp), %edx + movl %edx, 76(%esp) # 4-byte Spill +.LBB269_3: + testb %al, %al + jne .LBB269_4 +# BB#5: + movl $0, 28(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB269_6 +.LBB269_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB269_6: + jne .LBB269_7 +# BB#8: + movl $0, 40(%esp) # 4-byte Folded Spill + jmp .LBB269_9 +.LBB269_7: + movl 60(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill +.LBB269_9: + jne .LBB269_10 +# BB#11: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB269_12 +.LBB269_10: + movl 56(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB269_12: + jne .LBB269_13 +# BB#14: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB269_15 +.LBB269_13: + movl 52(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB269_15: + jne .LBB269_16 +# BB#17: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB269_18 +.LBB269_16: + movl 48(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB269_18: + jne .LBB269_19 +# BB#20: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB269_21 +.LBB269_19: + movl 44(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB269_21: + jne .LBB269_22 +# BB#23: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB269_24 +.LBB269_22: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB269_24: + jne .LBB269_25 +# BB#26: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB269_27 +.LBB269_25: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB269_27: + jne .LBB269_28 +# BB#29: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB269_30 +.LBB269_28: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB269_30: + jne .LBB269_31 +# BB#32: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB269_33 +.LBB269_31: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB269_33: + jne .LBB269_34 +# BB#35: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB269_36 +.LBB269_34: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB269_36: + jne .LBB269_37 +# BB#38: + movl $0, %ebx + jmp .LBB269_39 +.LBB269_37: + movl 20(%ebp), %ebx +.LBB269_39: + jne .LBB269_40 +# BB#41: + movl $0, %edi + jmp .LBB269_42 +.LBB269_40: + movl 16(%ebp), %edi +.LBB269_42: + jne .LBB269_43 +# BB#44: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB269_45 +.LBB269_43: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB269_45: + jne .LBB269_46 +# BB#47: + xorl %eax, %eax + jmp .LBB269_48 +.LBB269_46: + movl 8(%eax), %eax +.LBB269_48: + addl 52(%esp), %esi # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edx, 72(%ecx) + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %eax, 76(%ecx) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %ebp, 80(%ecx) + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %edi, 84(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %ebx, 88(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 24(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %eax, 120(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %edx, 124(%ecx) + movl %eax, 128(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%ecx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end269: + .size mcl_fpDbl_sub17Lbmi2, .Lfunc_end269-mcl_fpDbl_sub17Lbmi2 + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/byzantine-lab/mcl/src/asm/x86.s b/vendor/github.com/byzantine-lab/mcl/src/asm/x86.s new file mode 100644 index 000000000..cdd988ad3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/asm/x86.s @@ -0,0 +1,73785 @@ + .text + .file "" + .globl makeNIST_P192L + .align 16, 0x90 + .type makeNIST_P192L,@function +makeNIST_P192L: # @makeNIST_P192L +# BB#0: + movl 4(%esp), %eax + movl $-1, 20(%eax) + movl $-1, 16(%eax) + movl $-1, 12(%eax) + movl $-2, 8(%eax) + movl $-1, 4(%eax) + movl $-1, (%eax) + retl $4 +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P192L + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P192L,@function +mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl 32(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 16(%esp) # 4-byte Spill + xorl %edx, %edx + movl (%eax), %ebx + addl %ecx, %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 4(%eax), %ecx + adcl %edi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%eax), %ebp + adcl %esi, %ebp + movl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 12(%eax), %esi + adcl %ecx, %esi + movl 40(%eax), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 16(%eax), %ecx + adcl %ebx, %ecx + movl 44(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl 20(%eax), %eax + adcl %edi, %eax + adcl $0, %edx + sbbl %edi, %edi + andl $1, %edi + addl %ebx, 24(%esp) # 4-byte Folded Spill + movl (%esp), %ebx # 4-byte Reload + adcl %ebx, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + adcl $0, %edi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %eax + adcl $0, %edx + adcl $0, %edi + addl %edx, 24(%esp) # 4-byte Folded Spill + adcl %edi, 28(%esp) # 4-byte Folded Spill + adcl %ebp, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl %esi, %edi + adcl $0, %ecx + adcl $0, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 28(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $1, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edi, %edx + adcl $0, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %ecx, %edx + adcl $0, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edx + adcl $0, %edx + adcl $-1, %ebx + andl $1, %ebx + jne .LBB1_2 +# BB#1: + movl %edx, %eax +.LBB1_2: + testb %bl, %bl + movl 24(%esp), %edx # 4-byte Reload + jne .LBB1_4 +# BB#3: + movl %esi, %edx +.LBB1_4: + movl 52(%esp), %esi + movl %edx, (%esi) + movl 20(%esp), %edx # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB1_6 +# BB#5: + movl %ebp, %ebx +.LBB1_6: + movl %ebx, 4(%esi) + jne .LBB1_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB1_8: + movl %edx, 8(%esi) + jne .LBB1_10 +# BB#9: + movl 12(%esp), %edi # 4-byte Reload +.LBB1_10: + movl %edi, 12(%esi) + jne .LBB1_12 +# BB#11: + movl 16(%esp), %ecx # 4-byte Reload +.LBB1_12: + movl %ecx, 16(%esi) + movl %eax, 20(%esi) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + + .globl mcl_fp_sqr_NIST_P192L + .align 16, 0x90 + .type mcl_fp_sqr_NIST_P192L,@function +mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L2$pb +.L2$pb: + popl %ebx +.Ltmp0: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_sqrPre6L@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB2_2 +# BB#1: + movl %ebp, %edx +.LBB2_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB2_4 +# BB#3: + movl %esi, %ebx +.LBB2_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB2_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB2_6: + movl %ebx, 4(%esi) + jne .LBB2_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB2_8: + movl %edi, 8(%esi) + jne .LBB2_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB2_10: + movl %eax, 12(%esi) + jne .LBB2_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB2_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + + .globl mcl_fp_mulNIST_P192L + .align 16, 0x90 + .type mcl_fp_mulNIST_P192L,@function +mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L3$pb +.L3$pb: + popl %ebx +.Ltmp1: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx + movl 120(%esp), %eax + movl %eax, 8(%esp) + movl 116(%esp), %eax + movl %eax, 4(%esp) + leal 44(%esp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6L@PLT + xorl %edi, %edi + movl 76(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 72(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi + addl %eax, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax + adcl %edx, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %ebp + adcl %ecx, %ebp + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi + adcl %eax, %esi + movl 84(%esp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + adcl %ebx, %ecx + movl 88(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 64(%esp), %edx + adcl %eax, %edx + adcl $0, %edi + sbbl %eax, %eax + andl $1, %eax + addl %ebx, 36(%esp) # 4-byte Folded Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %edi + adcl $0, %eax + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %esi + adcl $0, %ecx + adcl $0, %edx + adcl $0, %edi + adcl $0, %eax + addl %edi, 36(%esp) # 4-byte Folded Spill + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl %ebp, %edi + adcl %esi, %eax + adcl $0, %ecx + adcl $0, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 36(%esp), %esi # 4-byte Reload + addl $1, %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, %ebp + adcl $1, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %ebp + adcl $-1, %ebx + andl $1, %ebx + jne .LBB3_2 +# BB#1: + movl %ebp, %edx +.LBB3_2: + testb %bl, %bl + movl 36(%esp), %ebx # 4-byte Reload + jne .LBB3_4 +# BB#3: + movl %esi, %ebx +.LBB3_4: + movl 112(%esp), %esi + movl %ebx, (%esi) + movl 40(%esp), %ebx # 4-byte Reload + jne .LBB3_6 +# BB#5: + movl 20(%esp), %ebx # 4-byte Reload +.LBB3_6: + movl %ebx, 4(%esi) + jne .LBB3_8 +# BB#7: + movl 24(%esp), %edi # 4-byte Reload +.LBB3_8: + movl %edi, 8(%esi) + jne .LBB3_10 +# BB#9: + movl 28(%esp), %eax # 4-byte Reload +.LBB3_10: + movl %eax, 12(%esi) + jne .LBB3_12 +# BB#11: + movl 32(%esp), %ecx # 4-byte Reload +.LBB3_12: + movl %ecx, 16(%esi) + movl %edx, 20(%esi) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P521L + .align 16, 0x90 + .type mcl_fpDbl_mod_NIST_P521L,@function +mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ecx + movl 124(%ecx), %edx + movl 128(%ecx), %esi + movl %esi, %eax + shldl $23, %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 120(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 116(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 112(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 104(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 92(%ecx), %edx + shldl $23, %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%ecx), %eax + shldl $23, %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 84(%ecx), %edi + shldl $23, %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%ecx), %edx + shldl $23, %edx, %edi + movl 76(%ecx), %eax + shldl $23, %eax, %edx + movl 72(%ecx), %ebx + shldl $23, %ebx, %eax + movl 68(%ecx), %ebp + shldl $23, %ebp, %ebx + shrl $9, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 64(%ecx), %esi + shldl $23, %esi, %ebp + andl $511, %esi # imm = 0x1FF + addl (%ecx), %ebp + adcl 4(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + adcl 8(%ecx), %eax + adcl 12(%ecx), %edx + adcl 16(%ecx), %edi + movl 28(%esp), %ebx # 4-byte Reload + adcl 20(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 28(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 32(%ecx), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 36(%ecx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 40(%ecx), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + adcl 44(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 48(%ecx), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + adcl 52(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 56(%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + adcl 60(%ecx), %ebx + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + movl %esi, %ecx + shrl $9, %ecx + andl $1, %ecx + addl %ebp, %ecx + adcl $0, 16(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, (%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %edi, %esi + adcl $0, 28(%esp) # 4-byte Folded Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebx, %ebp + adcl $0, %ebp + movl 12(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %ecx, %edi + andl %eax, %edi + andl %edx, %edi + andl %esi, %edi + andl 28(%esp), %edi # 4-byte Folded Reload + andl 32(%esp), %edi # 4-byte Folded Reload + andl 36(%esp), %edi # 4-byte Folded Reload + andl 40(%esp), %edi # 4-byte Folded Reload + andl 44(%esp), %edi # 4-byte Folded Reload + andl 48(%esp), %edi # 4-byte Folded Reload + andl 24(%esp), %edi # 4-byte Folded Reload + andl 52(%esp), %edi # 4-byte Folded Reload + movl 20(%esp), %esi # 4-byte Reload + andl %esi, %edi + andl 56(%esp), %edi # 4-byte Folded Reload + movl %ebx, %edx + movl 16(%esp), %ebx # 4-byte Reload + andl %ebp, %edi + movl %ebp, %eax + movl %edx, %ebp + orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00 + andl %edi, %ebp + andl %ebx, %ebp + cmpl $-1, %ebp + movl 80(%esp), %edi + je .LBB4_1 +# BB#3: # %nonzero + movl %ecx, (%edi) + movl %ebx, 4(%edi) + movl (%esp), %ecx # 4-byte Reload + movl %ecx, 8(%edi) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%edi) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%edi) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%edi) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%edi) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%edi) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%edi) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%edi) + movl %esi, 52(%edi) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%edi) + movl %eax, 60(%edi) + andl $511, %edx # imm = 0x1FF + movl %edx, 64(%edi) + jmp .LBB4_2 +.LBB4_1: # %zero + xorl %eax, %eax + movl $17, %ecx + rep;stosl +.LBB4_2: # %zero + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + + .globl mcl_fp_mulUnitPre1L + .align 16, 0x90 + .type mcl_fp_mulUnitPre1L,@function +mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + mull 12(%esp) + movl 4(%esp), %ecx + movl %eax, (%ecx) + movl %edx, 4(%ecx) + retl +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + + .globl mcl_fpDbl_mulPre1L + .align 16, 0x90 + .type mcl_fpDbl_mulPre1L,@function +mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + mull (%ecx) + movl 4(%esp), %ecx + movl %eax, (%ecx) + movl %edx, 4(%ecx) + retl +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + + .globl mcl_fpDbl_sqrPre1L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre1L,@function +mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + mull %eax + movl 4(%esp), %ecx + movl %eax, (%ecx) + movl %edx, 4(%ecx) + retl +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + + .globl mcl_fp_mont1L + .align 16, 0x90 + .type mcl_fp_mont1L,@function +mcl_fp_mont1L: # @mcl_fp_mont1L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %eax + movl 20(%esp), %ecx + mull (%ecx) + movl %eax, %ecx + movl %edx, %esi + movl 24(%esp), %edx + movl -4(%edx), %eax + imull %ecx, %eax + movl (%edx), %edi + mull %edi + addl %ecx, %eax + adcl %esi, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl %edx, %eax + subl %edi, %eax + sbbl $0, %ecx + testb $1, %cl + jne .LBB8_2 +# BB#1: + movl %eax, %edx +.LBB8_2: + movl 12(%esp), %eax + movl %edx, (%eax) + popl %esi + popl %edi + retl +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + + .globl mcl_fp_montNF1L + .align 16, 0x90 + .type mcl_fp_montNF1L,@function +mcl_fp_montNF1L: # @mcl_fp_montNF1L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %eax + movl 20(%esp), %ecx + mull (%ecx) + movl %eax, %ecx + movl %edx, %esi + movl 24(%esp), %edx + movl -4(%edx), %eax + imull %ecx, %eax + movl (%edx), %edi + mull %edi + addl %ecx, %eax + adcl %esi, %edx + movl %edx, %eax + subl %edi, %eax + js .LBB9_2 +# BB#1: + movl %eax, %edx +.LBB9_2: + movl 12(%esp), %eax + movl %edx, (%eax) + popl %esi + popl %edi + retl +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + + .globl mcl_fp_montRed1L + .align 16, 0x90 + .type mcl_fp_montRed1L,@function +mcl_fp_montRed1L: # @mcl_fp_montRed1L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %esi + movl 20(%esp), %edx + movl -4(%edx), %eax + imull %esi, %eax + movl (%edx), %edi + mull %edi + addl %esi, %eax + adcl 4(%ecx), %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl %edx, %eax + subl %edi, %eax + sbbl $0, %ecx + testb $1, %cl + jne .LBB10_2 +# BB#1: + movl %eax, %edx +.LBB10_2: + movl 12(%esp), %eax + movl %edx, (%eax) + popl %esi + popl %edi + retl +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + + .globl mcl_fp_addPre1L + .align 16, 0x90 + .type mcl_fp_addPre1L,@function +mcl_fp_addPre1L: # @mcl_fp_addPre1L +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 4(%esp), %ecx + movl 8(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %eax, %eax + andl $1, %eax + retl +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + + .globl mcl_fp_subPre1L + .align 16, 0x90 + .type mcl_fp_subPre1L,@function +mcl_fp_subPre1L: # @mcl_fp_subPre1L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + xorl %eax, %eax + movl 8(%esp), %edx + movl 16(%esp), %esi + subl (%esi), %ecx + movl %ecx, (%edx) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + + .globl mcl_fp_shr1_1L + .align 16, 0x90 + .type mcl_fp_shr1_1L,@function +mcl_fp_shr1_1L: # @mcl_fp_shr1_1L +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + shrl %eax + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + + .globl mcl_fp_add1L + .align 16, 0x90 + .type mcl_fp_add1L,@function +mcl_fp_add1L: # @mcl_fp_add1L +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + movl 12(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + sbbl %edx, %edx + andl $1, %edx + movl 20(%esp), %esi + subl (%esi), %eax + sbbl $0, %edx + testb $1, %dl + jne .LBB14_2 +# BB#1: # %nocarry + movl %eax, (%ecx) +.LBB14_2: # %carry + popl %esi + retl +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + + .globl mcl_fp_addNF1L + .align 16, 0x90 + .type mcl_fp_addNF1L,@function +mcl_fp_addNF1L: # @mcl_fp_addNF1L +# BB#0: + movl 12(%esp), %eax + movl (%eax), %eax + movl 8(%esp), %ecx + addl (%ecx), %eax + movl 16(%esp), %edx + movl %eax, %ecx + subl (%edx), %ecx + js .LBB15_2 +# BB#1: + movl %ecx, %eax +.LBB15_2: + movl 4(%esp), %ecx + movl %eax, (%ecx) + retl +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + + .globl mcl_fp_sub1L + .align 16, 0x90 + .type mcl_fp_sub1L,@function +mcl_fp_sub1L: # @mcl_fp_sub1L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %eax + xorl %edx, %edx + movl 8(%esp), %ecx + movl 16(%esp), %esi + subl (%esi), %eax + movl %eax, (%ecx) + sbbl $0, %edx + testb $1, %dl + jne .LBB16_2 +# BB#1: # %nocarry + popl %esi + retl +.LBB16_2: # %carry + movl 20(%esp), %edx + addl (%edx), %eax + movl %eax, (%ecx) + popl %esi + retl +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + + .globl mcl_fp_subNF1L + .align 16, 0x90 + .type mcl_fp_subNF1L,@function +mcl_fp_subNF1L: # @mcl_fp_subNF1L +# BB#0: + movl 8(%esp), %eax + movl (%eax), %eax + movl 12(%esp), %ecx + subl (%ecx), %eax + movl %eax, %ecx + sarl $31, %ecx + movl 16(%esp), %edx + andl (%edx), %ecx + addl %eax, %ecx + movl 4(%esp), %eax + movl %ecx, (%eax) + retl +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + + .globl mcl_fpDbl_add1L + .align 16, 0x90 + .type mcl_fpDbl_add1L,@function +mcl_fpDbl_add1L: # @mcl_fpDbl_add1L +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %eax + movl 16(%esp), %esi + addl (%esi), %edx + movl 12(%esp), %ecx + adcl 4(%esi), %eax + movl %edx, (%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + movl %eax, %edx + subl (%esi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB18_2 +# BB#1: + movl %edx, %eax +.LBB18_2: + movl %eax, 4(%ecx) + popl %esi + popl %ebx + retl +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + + .globl mcl_fpDbl_sub1L + .align 16, 0x90 + .type mcl_fpDbl_sub1L,@function +mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %eax + xorl %ecx, %ecx + movl 16(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %eax + movl 8(%esp), %edx + movl %esi, (%edx) + sbbl $0, %ecx + andl $1, %ecx + je .LBB19_2 +# BB#1: + movl 20(%esp), %ecx + movl (%ecx), %ecx +.LBB19_2: + addl %eax, %ecx + movl %ecx, 4(%edx) + popl %esi + retl +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + + .globl mcl_fp_mulUnitPre2L + .align 16, 0x90 + .type mcl_fp_mulUnitPre2L,@function +mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl 20(%esp), %ebx + movl %ecx, %eax + mull 4(%ebx) + movl %edx, %esi + movl %eax, %edi + movl %ecx, %eax + mull (%ebx) + movl 16(%esp), %ecx + movl %eax, (%ecx) + addl %edi, %edx + movl %edx, 4(%ecx) + adcl $0, %esi + movl %esi, 8(%ecx) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + + .globl mcl_fpDbl_mulPre2L + .align 16, 0x90 + .type mcl_fpDbl_mulPre2L,@function +mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 32(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 36(%esp), %ebx + movl (%ebx), %esi + movl %ecx, %eax + mull %esi + movl %edx, %ebp + movl 28(%esp), %edx + movl %eax, (%edx) + movl 4(%ebx), %ebx + movl %edi, %eax + mull %ebx + movl %edx, 4(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, %ecx + movl %eax, %ebx + movl %edi, %eax + mull %esi + addl %ebp, %eax + adcl $0, %edx + addl %ebx, %eax + movl 28(%esp), %esi + movl %eax, 4(%esi) + adcl (%esp), %edx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl %ecx, %edx + movl %edx, 8(%esi) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esi) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + + .globl mcl_fpDbl_sqrPre2L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre2L,@function +mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %esi + movl %esi, %eax + mull %esi + movl %edx, %edi + movl %eax, %ebx + movl %esi, %eax + mull %ecx + movl %edx, %esi + movl %eax, %ebp + movl %ecx, %eax + mull %ecx + movl 20(%esp), %ecx + movl %eax, (%ecx) + addl %ebp, %edx + movl %esi, %eax + adcl $0, %eax + addl %ebp, %edx + movl %edx, 4(%ecx) + adcl %ebx, %eax + sbbl %edx, %edx + andl $1, %edx + addl %esi, %eax + movl %eax, 8(%ecx) + adcl %edi, %edx + movl %edx, 12(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + + .globl mcl_fp_mont2L + .align 16, 0x90 + .type mcl_fp_mont2L,@function +mcl_fp_mont2L: # @mcl_fp_mont2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %ecx + movl (%ecx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 4(%ecx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx + movl (%ecx), %esi + mull %esi + movl %eax, 8(%esp) # 4-byte Spill + movl %edx, 4(%esp) # 4-byte Spill + movl 64(%esp), %edx + movl -4(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %eax, %ebp + imull %ecx, %ebp + movl (%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 4(%edx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull %edx + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl %ebp, %eax + mull %ecx + movl %edx, %ebp + movl %eax, %edi + movl 16(%esp), %eax # 4-byte Reload + mull %esi + addl 4(%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + addl (%esp), %ebp # 4-byte Folded Reload + adcl $0, %ebx + addl 8(%esp), %edi # 4-byte Folded Reload + adcl %eax, %ebp + adcl %edx, %ebx + movl 60(%esp), %eax + movl 4(%eax), %ecx + sbbl %eax, %eax + andl $1, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 16(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 12(%esp) # 4-byte Folded Reload + movl %eax, %ecx + movl %edx, %esi + addl 16(%esp), %esi # 4-byte Folded Reload + adcl $0, %edi + addl %ebp, %ecx + adcl %ebx, %esi + adcl 8(%esp), %edi # 4-byte Folded Reload + sbbl %ebx, %ebx + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %eax, 28(%esp) # 4-byte Spill + andl $1, %ebx + mull 20(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %ebp + movl 28(%esp), %eax # 4-byte Reload + mull 24(%esp) # 4-byte Folded Reload + addl 16(%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + addl %ecx, %ebp + adcl %esi, %eax + adcl %edi, %edx + adcl $0, %ebx + movl %eax, %esi + subl 20(%esp), %esi # 4-byte Folded Reload + movl %edx, %ecx + sbbl 24(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB23_2 +# BB#1: + movl %esi, %eax +.LBB23_2: + movl 52(%esp), %esi + movl %eax, (%esi) + testb %bl, %bl + jne .LBB23_4 +# BB#3: + movl %ecx, %edx +.LBB23_4: + movl %edx, 4(%esi) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + + .globl mcl_fp_montNF2L + .align 16, 0x90 + .type mcl_fp_montNF2L,@function +mcl_fp_montNF2L: # @mcl_fp_montNF2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 60(%esp), %ecx + movl (%ecx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 4(%ecx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx + movl (%ecx), %ebp + mull %ebp + movl %eax, %ebx + movl %edx, 8(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl -4(%eax), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl %ebx, %edi + imull %ecx, %edi + movl (%eax), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 4(%eax), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %edi, %eax + mull %ecx + movl %edx, 4(%esp) # 4-byte Spill + movl %eax, %esi + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, %eax + mull %ebp + movl %edx, %edi + movl %eax, %ebp + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edi + addl %ebx, %esi + adcl (%esp), %ebp # 4-byte Folded Reload + adcl $0, %edi + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl 64(%esp), %eax + movl 4(%eax), %ebx + movl %ebx, %eax + mull %ecx + movl %edx, %esi + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 20(%esp) # 4-byte Folded Reload + movl %eax, %ebx + movl %edx, %ecx + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl $0, %esi + addl %ebp, %ebx + adcl %edi, %ecx + adcl $0, %esi + movl 24(%esp), %eax # 4-byte Reload + imull %ebx, %eax + movl %eax, %edi + mull 32(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %ebp + movl %edi, %eax + movl 28(%esp), %edi # 4-byte Reload + mull %edi + addl %ebx, %ebp + adcl %ecx, %eax + adcl $0, %esi + addl 24(%esp), %eax # 4-byte Folded Reload + adcl %edx, %esi + movl %eax, %edx + subl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, %ecx + sbbl %edi, %ecx + testl %ecx, %ecx + js .LBB24_2 +# BB#1: + movl %edx, %eax +.LBB24_2: + movl 56(%esp), %edx + movl %eax, (%edx) + js .LBB24_4 +# BB#3: + movl %ecx, %esi +.LBB24_4: + movl %esi, 4(%edx) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + + .globl mcl_fp_montRed2L + .align 16, 0x90 + .type mcl_fp_montRed2L,@function +mcl_fp_montRed2L: # @mcl_fp_montRed2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 44(%esp), %eax + movl -4(%eax), %ecx + movl (%eax), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 40(%esp), %edx + movl (%edx), %ebp + movl %ebp, %edi + imull %ecx, %edi + movl 4(%eax), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl %edi, %eax + mull %edx + movl %edx, %esi + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull %ebx + movl %edx, %edi + addl 4(%esp), %edi # 4-byte Folded Reload + adcl $0, %esi + addl %ebp, %eax + movl 40(%esp), %edx + movl 12(%edx), %eax + adcl 4(%edx), %edi + adcl 8(%edx), %esi + adcl $0, %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl %ebx, %ebx + imull %edi, %ecx + andl $1, %ebx + movl %ecx, %eax + mull 8(%esp) # 4-byte Folded Reload + movl %edx, (%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull 12(%esp) # 4-byte Folded Reload + addl (%esp), %eax # 4-byte Folded Reload + adcl $0, %edx + addl %edi, %ebp + adcl %esi, %eax + adcl 4(%esp), %edx # 4-byte Folded Reload + adcl $0, %ebx + movl %eax, %esi + subl 8(%esp), %esi # 4-byte Folded Reload + movl %edx, %ecx + sbbl 12(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB25_2 +# BB#1: + movl %esi, %eax +.LBB25_2: + movl 36(%esp), %esi + movl %eax, (%esi) + testb %bl, %bl + jne .LBB25_4 +# BB#3: + movl %ecx, %edx +.LBB25_4: + movl %edx, 4(%esi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + + .globl mcl_fp_addPre2L + .align 16, 0x90 + .type mcl_fp_addPre2L,@function +mcl_fp_addPre2L: # @mcl_fp_addPre2L +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + adcl 4(%edx), %eax + movl %ecx, (%esi) + movl %eax, 4(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + + .globl mcl_fp_subPre2L + .align 16, 0x90 + .type mcl_fp_subPre2L,@function +mcl_fp_subPre2L: # @mcl_fp_subPre2L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + xorl %eax, %eax + movl 16(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + sbbl $0, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + + .globl mcl_fp_shr1_2L + .align 16, 0x90 + .type mcl_fp_shr1_2L,@function +mcl_fp_shr1_2L: # @mcl_fp_shr1_2L +# BB#0: + movl 8(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + shrdl $1, %eax, %ecx + movl 4(%esp), %edx + movl %ecx, (%edx) + shrl %eax + movl %eax, 4(%edx) + retl +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + + .globl mcl_fp_add2L + .align 16, 0x90 + .type mcl_fp_add2L,@function +mcl_fp_add2L: # @mcl_fp_add2L +# BB#0: + pushl %ebx + pushl %esi + movl 20(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %ecx + movl 16(%esp), %esi + addl (%esi), %eax + movl 12(%esp), %edx + adcl 4(%esi), %ecx + movl %eax, (%edx) + movl %ecx, 4(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 24(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB29_2 +# BB#1: # %nocarry + movl %eax, (%edx) + movl %ecx, 4(%edx) +.LBB29_2: # %carry + popl %esi + popl %ebx + retl +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + + .globl mcl_fp_addNF2L + .align 16, 0x90 + .type mcl_fp_addNF2L,@function +mcl_fp_addNF2L: # @mcl_fp_addNF2L +# BB#0: + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 16(%esp), %edx + addl (%edx), %ecx + adcl 4(%edx), %eax + movl 24(%esp), %edi + movl %ecx, %esi + subl (%edi), %esi + movl %eax, %edx + sbbl 4(%edi), %edx + testl %edx, %edx + js .LBB30_2 +# BB#1: + movl %esi, %ecx +.LBB30_2: + movl 12(%esp), %esi + movl %ecx, (%esi) + js .LBB30_4 +# BB#3: + movl %edx, %eax +.LBB30_4: + movl %eax, 4(%esi) + popl %esi + popl %edi + retl +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + + .globl mcl_fp_sub2L + .align 16, 0x90 + .type mcl_fp_sub2L,@function +mcl_fp_sub2L: # @mcl_fp_sub2L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + xorl %ebx, %ebx + movl 24(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl 16(%esp), %edx + movl %ecx, (%edx) + movl %eax, 4(%edx) + sbbl $0, %ebx + testb $1, %bl + je .LBB31_2 +# BB#1: # %carry + movl 28(%esp), %esi + movl 4(%esi), %edi + addl (%esi), %ecx + movl %ecx, (%edx) + adcl %eax, %edi + movl %edi, 4(%edx) +.LBB31_2: # %nocarry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + + .globl mcl_fp_subNF2L + .align 16, 0x90 + .type mcl_fp_subNF2L,@function +mcl_fp_subNF2L: # @mcl_fp_subNF2L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %eax + movl 20(%esp), %edx + subl (%edx), %ecx + sbbl 4(%edx), %eax + movl %eax, %edx + sarl $31, %edx + movl 24(%esp), %esi + movl 4(%esi), %edi + andl %edx, %edi + andl (%esi), %edx + addl %ecx, %edx + movl 12(%esp), %ecx + movl %edx, (%ecx) + adcl %eax, %edi + movl %edi, 4(%ecx) + popl %esi + popl %edi + retl +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + + .globl mcl_fpDbl_add2L + .align 16, 0x90 + .type mcl_fpDbl_add2L,@function +mcl_fpDbl_add2L: # @mcl_fpDbl_add2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edx + movl 12(%edx), %esi + movl 24(%esp), %edi + movl 12(%edi), %eax + movl 8(%edx), %ecx + movl (%edx), %ebx + movl 4(%edx), %ebp + addl (%edi), %ebx + adcl 4(%edi), %ebp + movl 20(%esp), %edx + adcl 8(%edi), %ecx + movl %ebx, (%edx) + movl %ebp, 4(%edx) + adcl %esi, %eax + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + movl %ecx, %esi + subl (%ebp), %esi + movl %eax, %edi + sbbl 4(%ebp), %edi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB33_2 +# BB#1: + movl %edi, %eax +.LBB33_2: + testb %bl, %bl + jne .LBB33_4 +# BB#3: + movl %esi, %ecx +.LBB33_4: + movl %ecx, 8(%edx) + movl %eax, 12(%edx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + + .globl mcl_fpDbl_sub2L + .align 16, 0x90 + .type mcl_fpDbl_sub2L,@function +mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %ebx, %ebx + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %eax + sbbl 8(%edx), %eax + movl 12(%edx), %ebp + movl 12(%ecx), %edx + movl 20(%esp), %ecx + movl %esi, (%ecx) + movl %edi, 4(%ecx) + sbbl %ebp, %edx + movl 32(%esp), %edi + movl (%edi), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB34_1 +# BB#2: + xorl %edi, %edi + jmp .LBB34_3 +.LBB34_1: + movl 4(%edi), %edi +.LBB34_3: + testb %bl, %bl + jne .LBB34_5 +# BB#4: + xorl %esi, %esi +.LBB34_5: + addl %eax, %esi + movl %esi, 8(%ecx) + adcl %edx, %edi + movl %edi, 12(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + + .globl mcl_fp_mulUnitPre3L + .align 16, 0x90 + .type mcl_fp_mulUnitPre3L,@function +mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 32(%esp), %ecx + movl 28(%esp), %edi + movl %ecx, %eax + mull 8(%edi) + movl %edx, %esi + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull 4(%edi) + movl %edx, %ebx + movl %eax, %ebp + movl %ecx, %eax + mull (%edi) + movl 24(%esp), %ecx + movl %eax, (%ecx) + addl %ebp, %edx + movl %edx, 4(%ecx) + adcl (%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%ecx) + adcl $0, %esi + movl %esi, 12(%ecx) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + + .globl mcl_fpDbl_mulPre3L + .align 16, 0x90 + .type mcl_fpDbl_mulPre3L,@function +mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %ecx + movl (%ecx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%esp), %edx + movl (%edx), %edi + mull %edi + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%esp), %edx + movl %eax, (%edx) + movl 4(%ecx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull %edi + movl %edx, %ecx + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull %edi + movl %edx, %edi + movl %eax, %ebx + addl 20(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl $0, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl 4(%eax), %ecx + movl %esi, %eax + mull %ecx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebp, %eax + mull %ecx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %ebp + movl 24(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 4(%esp) # 4-byte Spill + addl %ebx, %eax + movl 48(%esp), %ecx + movl %eax, 4(%ecx) + adcl %edi, %ebp + adcl 12(%esp), %esi # 4-byte Folded Reload + movl 56(%esp), %eax + movl 8(%eax), %edi + sbbl %ecx, %ecx + movl (%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %ebx + movl 8(%esp), %eax # 4-byte Reload + mull %edi + andl $1, %ecx + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + addl %ebx, %ebp + movl 48(%esp), %edi + movl %ebp, 8(%edi) + adcl (%esp), %esi # 4-byte Folded Reload + adcl %eax, %ecx + sbbl %eax, %eax + andl $1, %eax + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %esi, 12(%edi) + movl %ecx, 16(%edi) + adcl %edx, %eax + movl %eax, 20(%edi) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + + .globl mcl_fpDbl_sqrPre3L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre3L,@function +mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl 8(%eax), %ebp + movl (%eax), %ecx + movl 4(%eax), %esi + movl %ebp, %eax + mull %esi + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ecx + movl %edx, 4(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebx, (%esp) # 4-byte Spill + movl %esi, %eax + mull %esi + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull %ecx + movl %edx, %esi + movl %eax, %edi + movl %ecx, %eax + mull %ecx + movl %edx, %ecx + movl 52(%esp), %edx + movl %eax, (%edx) + movl %ebp, %eax + mull %ebp + movl %eax, 12(%esp) # 4-byte Spill + movl %edx, 24(%esp) # 4-byte Spill + addl %edi, %ecx + movl %esi, %ebp + adcl %ebx, %ebp + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, %eax + adcl $0, %eax + addl %edi, %ecx + movl 52(%esp), %edx + movl %ecx, 4(%edx) + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl %edx, %eax + sbbl %ecx, %ecx + andl $1, %ecx + addl %esi, %ebp + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %edi # 4-byte Reload + adcl %edi, %ecx + addl (%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %esi + movl %ebp, 8(%esi) + adcl %edx, %eax + adcl 12(%esp), %ecx # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + addl %ebx, %eax + adcl %edi, %ecx + movl 52(%esp), %edx + movl %eax, 12(%edx) + movl %ecx, 16(%edx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%edx) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + + .globl mcl_fp_mont3L + .align 16, 0x90 + .type mcl_fp_mont3L,@function +mcl_fp_mont3L: # @mcl_fp_mont3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %ecx + movl (%ecx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 84(%esp), %edx + movl (%edx), %edx + movl %edx, 12(%esp) # 4-byte Spill + mull %edx + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, 16(%esp) # 4-byte Spill + movl 88(%esp), %esi + movl -4(%esi), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, %ebp + imull %edx, %ebp + movl (%esi), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 8(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 4(%esi), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%ecx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl %ebp, %eax + mull %edx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ebx + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl %ebp, %eax + mull %edi + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %ecx, %eax + movl 12(%esp), %ecx # 4-byte Reload + mull %ecx + movl %edx, %esi + movl %eax, %edi + movl 36(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, %ecx + addl 16(%esp), %ecx # 4-byte Folded Reload + adcl %edi, %edx + adcl $0, %esi + addl (%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 4(%esp), %edi # 4-byte Reload + addl 32(%esp), %edi # 4-byte Folded Reload + adcl %ecx, %ebp + adcl %edx, %ebx + adcl %esi, %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 4(%eax), %ecx + movl %ecx, %eax + mull 20(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 36(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 12(%esp) # 4-byte Spill + movl %ecx, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %edx, %edi + addl 12(%esp), %edi # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl 32(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %eax, %edx + addl %ebp, %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl %ebx, %edi + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %edx, %esi + imull 52(%esp), %esi # 4-byte Folded Reload + andl $1, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 40(%esp) # 4-byte Folded Reload + movl %edx, %ecx + addl 4(%esp), %ecx # 4-byte Folded Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ebp + addl 12(%esp), %eax # 4-byte Folded Reload + adcl %edi, %ecx + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax + movl 8(%eax), %esi + movl %esi, %eax + mull 20(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 36(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl %edx, %esi + addl 16(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 36(%esp), %edx # 4-byte Reload + addl %ecx, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl %ebx, %esi + adcl %ebp, %edi + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl 52(%esp), %ebp # 4-byte Reload + imull %edx, %ebp + movl %ebp, 52(%esp) # 4-byte Spill + andl $1, %ecx + movl %ebp, %eax + mull 40(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + mull 44(%esp) # 4-byte Folded Reload + addl 28(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl $0, %ebp + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl %esi, %eax + adcl %edi, %edx + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl $0, %ecx + movl %eax, %ebx + subl 40(%esp), %ebx # 4-byte Folded Reload + movl %edx, %edi + sbbl 44(%esp), %edi # 4-byte Folded Reload + movl %ebp, %esi + sbbl 48(%esp), %esi # 4-byte Folded Reload + sbbl $0, %ecx + andl $1, %ecx + jne .LBB38_2 +# BB#1: + movl %ebx, %eax +.LBB38_2: + movl 76(%esp), %ebx + movl %eax, (%ebx) + testb %cl, %cl + jne .LBB38_4 +# BB#3: + movl %edi, %edx +.LBB38_4: + movl %edx, 4(%ebx) + jne .LBB38_6 +# BB#5: + movl %esi, %ebp +.LBB38_6: + movl %ebp, 8(%ebx) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + + .globl mcl_fp_montNF3L + .align 16, 0x90 + .type mcl_fp_montNF3L,@function +mcl_fp_montNF3L: # @mcl_fp_montNF3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + mull %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, 36(%esp) # 4-byte Spill + movl 92(%esp), %esi + movl -4(%esi), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %ecx + imull %edx, %ecx + movl (%esi), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 8(%esi), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 4(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%ebp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, 4(%esp) # 4-byte Spill + movl %eax, %ebp + movl %esi, %eax + movl 20(%esp), %ecx # 4-byte Reload + mull %ecx + movl %edx, %edi + movl %eax, %ebx + movl 40(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, %ecx + movl %eax, %esi + addl 36(%esp), %esi # 4-byte Folded Reload + adcl %ebx, %ecx + adcl $0, %edi + addl 32(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + addl 4(%esp), %esi # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl 88(%esp), %eax + movl 4(%eax), %ebx + movl %ebx, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %eax + mull 40(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 28(%esp) # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl %edx, %ebx + addl 16(%esp), %ebx # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebp + movl 20(%esp), %edx # 4-byte Reload + addl %esi, %edx + adcl %ecx, %ebx + adcl %edi, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %edx, %ecx + movl %edx, %edi + imull 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 44(%esp) # 4-byte Folded Reload + addl %edi, %eax + adcl %ebx, %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebp + addl %edx, %esi + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 88(%esp), %eax + movl 8(%eax), %edi + movl %edi, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 40(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 28(%esp) # 4-byte Folded Reload + movl %edx, %edi + addl 40(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl $0, %ebx + addl %esi, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl %ebp, %ecx + adcl $0, %ebx + movl 56(%esp), %esi # 4-byte Reload + imull %eax, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl %esi, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + movl %esi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %esi + movl 56(%esp), %eax # 4-byte Reload + mull 48(%esp) # 4-byte Folded Reload + addl 32(%esp), %ebp # 4-byte Folded Reload + adcl %edi, %eax + adcl %ecx, %esi + adcl $0, %ebx + addl 40(%esp), %eax # 4-byte Folded Reload + adcl %edx, %esi + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %eax, %edi + subl 44(%esp), %edi # 4-byte Folded Reload + movl %esi, %edx + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %ebx, %ecx + sbbl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB39_2 +# BB#1: + movl %edi, %eax +.LBB39_2: + movl 80(%esp), %edi + movl %eax, (%edi) + js .LBB39_4 +# BB#3: + movl %edx, %esi +.LBB39_4: + movl %esi, 4(%edi) + js .LBB39_6 +# BB#5: + movl %ecx, %ebx +.LBB39_6: + movl %ebx, 8(%edi) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + + .globl mcl_fp_montRed3L + .align 16, 0x90 + .type mcl_fp_montRed3L,@function +mcl_fp_montRed3L: # @mcl_fp_montRed3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %eax + movl -4(%eax), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl (%eax), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 64(%esp), %ebx + movl (%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + imull %edx, %ecx + movl 8(%eax), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ecx, %eax + mull %esi + movl %edx, %esi + movl %eax, %ebp + movl %ecx, %eax + mull %edi + movl %edx, %ecx + addl %ebp, %ecx + adcl 12(%esp), %esi # 4-byte Folded Reload + movl 20(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 4(%ebx), %ecx + adcl 8(%ebx), %esi + adcl 12(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 20(%ebx), %eax + movl 16(%ebx), %edx + adcl $0, %edx + movl %edx, 8(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl %ecx, %edi + imull 36(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 32(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull 28(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl %edi, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %edx, %edi + addl (%esp), %edi # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ecx, %eax + adcl %esi, %edi + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl $0, 12(%esp) # 4-byte Folded Spill + adcl $0, %ebx + movl 36(%esp), %ecx # 4-byte Reload + imull %edi, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 32(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 24(%esp) # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, %ecx + movl 36(%esp), %eax # 4-byte Reload + mull 28(%esp) # 4-byte Folded Reload + addl 8(%esp), %eax # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl %edi, %ecx + adcl %ebp, %eax + adcl 16(%esp), %edx # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + movl %eax, %ebp + subl 24(%esp), %ebp # 4-byte Folded Reload + movl %edx, %edi + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %esi, %ecx + sbbl 32(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB40_2 +# BB#1: + movl %ebp, %eax +.LBB40_2: + movl 60(%esp), %ebp + movl %eax, (%ebp) + testb %bl, %bl + jne .LBB40_4 +# BB#3: + movl %edi, %edx +.LBB40_4: + movl %edx, 4(%ebp) + jne .LBB40_6 +# BB#5: + movl %ecx, %esi +.LBB40_6: + movl %esi, 8(%ebp) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + + .globl mcl_fp_addPre3L + .align 16, 0x90 + .type mcl_fp_addPre3L,@function +mcl_fp_addPre3L: # @mcl_fp_addPre3L +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 12(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 8(%esp), %esi + movl %ecx, (%esi) + movl %edx, 4(%esi) + movl %eax, 8(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + + .globl mcl_fp_subPre3L + .align 16, 0x90 + .type mcl_fp_subPre3L,@function +mcl_fp_subPre3L: # @mcl_fp_subPre3L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 20(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl 12(%esp), %edi + movl %edx, (%edi) + movl %esi, 4(%edi) + movl %ecx, 8(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + + .globl mcl_fp_shr1_3L + .align 16, 0x90 + .type mcl_fp_shr1_3L,@function +mcl_fp_shr1_3L: # @mcl_fp_shr1_3L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl 8(%eax), %ecx + movl (%eax), %edx + movl 4(%eax), %eax + shrdl $1, %eax, %edx + movl 8(%esp), %esi + movl %edx, (%esi) + shrdl $1, %ecx, %eax + movl %eax, 4(%esi) + shrl %ecx + movl %ecx, 8(%esi) + popl %esi + retl +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + + .globl mcl_fp_add3L + .align 16, 0x90 + .type mcl_fp_add3L,@function +mcl_fp_add3L: # @mcl_fp_add3L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 20(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl 16(%esp), %esi + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + sbbl 8(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB44_2 +# BB#1: # %nocarry + movl %eax, (%esi) + movl %ecx, 4(%esi) + movl %edx, 8(%esi) +.LBB44_2: # %carry + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + + .globl mcl_fp_addNF3L + .align 16, 0x90 + .type mcl_fp_addNF3L,@function +mcl_fp_addNF3L: # @mcl_fp_addNF3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 24(%esp), %esi + addl (%esi), %edx + adcl 4(%esi), %ecx + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 32(%esp), %ebp + movl %edx, %ebx + subl (%ebp), %ebx + movl %ecx, %edi + sbbl 4(%ebp), %edi + movl %eax, %esi + sbbl 8(%ebp), %esi + movl %esi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + js .LBB45_2 +# BB#1: + movl %ebx, %edx +.LBB45_2: + movl 20(%esp), %ebx + movl %edx, (%ebx) + js .LBB45_4 +# BB#3: + movl %edi, %ecx +.LBB45_4: + movl %ecx, 4(%ebx) + js .LBB45_6 +# BB#5: + movl %esi, %eax +.LBB45_6: + movl %eax, 8(%ebx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + + .globl mcl_fp_sub3L + .align 16, 0x90 + .type mcl_fp_sub3L,@function +mcl_fp_sub3L: # @mcl_fp_sub3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edx + movl (%edx), %ecx + movl 4(%edx), %eax + xorl %ebx, %ebx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %eax + movl 8(%edx), %edx + sbbl 8(%esi), %edx + movl 20(%esp), %esi + movl %ecx, (%esi) + movl %eax, 4(%esi) + movl %edx, 8(%esi) + sbbl $0, %ebx + testb $1, %bl + je .LBB46_2 +# BB#1: # %carry + movl 32(%esp), %edi + movl 4(%edi), %ebx + movl 8(%edi), %ebp + addl (%edi), %ecx + movl %ecx, (%esi) + adcl %eax, %ebx + movl %ebx, 4(%esi) + adcl %edx, %ebp + movl %ebp, 8(%esi) +.LBB46_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + + .globl mcl_fp_subNF3L + .align 16, 0x90 + .type mcl_fp_subNF3L,@function +mcl_fp_subNF3L: # @mcl_fp_subNF3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 28(%esp), %esi + subl (%esi), %ecx + sbbl 4(%esi), %edx + movl 8(%eax), %eax + sbbl 8(%esi), %eax + movl %eax, %esi + sarl $31, %esi + movl %esi, %edi + shldl $1, %eax, %edi + movl 32(%esp), %ebx + andl (%ebx), %edi + movl 8(%ebx), %ebp + andl %esi, %ebp + andl 4(%ebx), %esi + addl %ecx, %edi + adcl %edx, %esi + movl 20(%esp), %ecx + movl %edi, (%ecx) + movl %esi, 4(%ecx) + adcl %eax, %ebp + movl %ebp, 8(%ecx) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + + .globl mcl_fpDbl_add3L + .align 16, 0x90 + .type mcl_fpDbl_add3L,@function +mcl_fpDbl_add3L: # @mcl_fpDbl_add3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 32(%esp), %esi + movl 20(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 16(%esi), %edi + movl 12(%esi), %ebx + movl (%esi), %edx + movl 28(%esp), %eax + addl (%eax), %edx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 8(%esi), %edx + movl 4(%esi), %esi + adcl 4(%eax), %esi + adcl 8(%eax), %edx + movl %esi, 4(%ecx) + movl 20(%eax), %ebp + movl %edx, 8(%ecx) + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl %ebx, %esi + adcl %edi, %edx + adcl (%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl 36(%esp), %ecx + movl %esi, %ebx + subl (%ecx), %ebx + movl %edx, %edi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl %ebp, %ecx + movl 36(%esp), %edi + sbbl 8(%edi), %ecx + sbbl $0, %eax + andl $1, %eax + jne .LBB48_2 +# BB#1: + movl %ecx, %ebp +.LBB48_2: + testb %al, %al + jne .LBB48_4 +# BB#3: + movl %ebx, %esi +.LBB48_4: + movl 24(%esp), %eax + movl %esi, 12(%eax) + jne .LBB48_6 +# BB#5: + movl (%esp), %edx # 4-byte Reload +.LBB48_6: + movl %edx, 16(%eax) + movl %ebp, 20(%eax) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + + .globl mcl_fpDbl_sub3L + .align 16, 0x90 + .type mcl_fpDbl_sub3L,@function +mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + movl 28(%esp), %ebx + subl (%ebx), %edx + sbbl 4(%ebx), %esi + movl 8(%ecx), %ebp + sbbl 8(%ebx), %ebp + movl 20(%esp), %eax + movl %edx, (%eax) + movl 12(%ecx), %edi + sbbl 12(%ebx), %edi + movl %esi, 4(%eax) + movl 16(%ecx), %esi + sbbl 16(%ebx), %esi + movl 20(%ebx), %ebx + movl 20(%ecx), %edx + movl %ebp, 8(%eax) + sbbl %ebx, %edx + movl $0, %ecx + sbbl $0, %ecx + andl $1, %ecx + movl 32(%esp), %ebp + jne .LBB49_1 +# BB#2: + xorl %ebx, %ebx + jmp .LBB49_3 +.LBB49_1: + movl 8(%ebp), %ebx +.LBB49_3: + testb %cl, %cl + movl $0, %eax + jne .LBB49_4 +# BB#5: + xorl %ecx, %ecx + jmp .LBB49_6 +.LBB49_4: + movl (%ebp), %ecx + movl 4(%ebp), %eax +.LBB49_6: + addl %edi, %ecx + adcl %esi, %eax + movl 20(%esp), %esi + movl %ecx, 12(%esi) + movl %eax, 16(%esi) + adcl %edx, %ebx + movl %ebx, 20(%esi) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + + .globl mcl_fp_mulUnitPre4L + .align 16, 0x90 + .type mcl_fp_mulUnitPre4L,@function +mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %ecx + movl 36(%esp), %ebp + movl %ecx, %eax + mull 12(%ebp) + movl %edx, %esi + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 8(%ebp) + movl %edx, %ebx + movl %eax, 4(%esp) # 4-byte Spill + movl %ecx, %eax + mull 4(%ebp) + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull (%ebp) + movl 32(%esp), %ecx + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%ecx) + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%ecx) + adcl $0, %esi + movl %esi, 16(%ecx) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + + .globl mcl_fpDbl_mulPre4L + .align 16, 0x90 + .type mcl_fpDbl_mulPre4L,@function +mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %edi + movl (%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%esp), %ecx + movl (%ecx), %esi + movl %ecx, %ebp + mull %esi + movl %edx, 12(%esp) # 4-byte Spill + movl 76(%esp), %ecx + movl %eax, (%ecx) + movl 4(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%edi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 12(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 4(%ebp), %ecx + movl %eax, %ebp + mull %ecx + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ecx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + movl %edi, %eax + mull %ecx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ebp, %eax + mull %esi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ebx, %eax + mull %esi + movl %edx, %ecx + movl %eax, %ebx + movl %edi, %eax + mull %esi + movl %edx, %edi + addl 12(%esp), %eax # 4-byte Folded Reload + adcl %ebx, %edi + adcl %ebp, %ecx + movl 52(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl (%esp), %eax # 4-byte Folded Reload + movl 76(%esp), %edx + movl %eax, 4(%edx) + adcl 4(%esp), %edi # 4-byte Folded Reload + adcl 16(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 8(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %esi + movl 20(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl 40(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + movl 48(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 28(%esp) # 4-byte Spill + addl %edi, %eax + movl 76(%esp), %edx + movl %eax, 8(%edx) + adcl %ecx, %ebp + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax + movl 12(%eax), %esi + sbbl %ecx, %ecx + movl %esi, %eax + movl 80(%esp), %edi + mull 12(%edi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 8(%edi) + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 4(%edi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %edi + movl %esi, %eax + movl 80(%esp), %edx + mull (%edx) + movl %eax, %esi + andl $1, %ecx + addl 28(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl 44(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + addl %esi, %ebp + movl 76(%esp), %esi + movl %ebp, 12(%esi) + adcl %edi, %ebx + movl %eax, %edi + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl %edx, %ebx + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %esi, %edx + movl %ebx, 16(%edx) + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edi, 20(%edx) + movl %ecx, 24(%edx) + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%edx) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + + .globl mcl_fpDbl_sqrPre4L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre4L,@function +mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ecx + movl 12(%ecx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl (%ecx), %ebx + movl 4(%ecx), %esi + movl %ebp, %eax + mull %esi + movl %eax, 12(%esp) # 4-byte Spill + movl %edx, 36(%esp) # 4-byte Spill + movl 8(%ecx), %edi + movl %edi, %eax + mull %esi + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ebx + movl %edx, %ebp + movl %eax, %ecx + movl %edi, %eax + mull %ebx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull %esi + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %edx, (%esp) # 4-byte Spill + movl %eax, %esi + movl %ebx, %eax + mull %ebx + movl 60(%esp), %ebx + movl %eax, (%ebx) + addl %esi, %edx + movl (%esp), %eax # 4-byte Reload + movl %eax, %ebx + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl $0, %ebp + addl %esi, %edx + movl 60(%esp), %esi + movl %edx, 4(%esi) + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + addl %eax, %ebx + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull %edi + movl %eax, %edi + addl 20(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %eax + movl %ebx, 8(%eax) + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %edi + movl 36(%esp), %eax # 4-byte Reload + adcl %esi, %eax + sbbl %esi, %esi + andl $1, %esi + addl 28(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + movl 64(%esp), %esi + movl 12(%esi), %ebp + movl %ebp, %eax + mull 8(%esi) + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull 4(%esi) + movl %esi, %edi + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebp, %eax + mull (%edi) + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull %ebp + addl %ecx, %edi + movl 60(%esp), %ebp + movl %edi, 12(%ebp) + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 16(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebp, %edi + movl %esi, 16(%edi) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl %eax, 24(%edi) + adcl %edx, %ecx + movl %ecx, 28(%edi) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + + .globl mcl_fp_mont4L + .align 16, 0x90 + .type mcl_fp_mont4L,@function +mcl_fp_mont4L: # @mcl_fp_mont4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 112(%esp), %ecx + movl (%ecx), %eax + movl %ecx, %ebp + movl %eax, 60(%esp) # 4-byte Spill + movl 116(%esp), %edx + movl (%edx), %edx + movl %edx, 28(%esp) # 4-byte Spill + mull %edx + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, 32(%esp) # 4-byte Spill + movl 120(%esp), %edi + movl -4(%edi), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, %ebx + imull %edx, %ebx + movl (%edi), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 12(%edi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 8(%edi), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 4(%edi), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl %ebp, %edi + movl 4(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 12(%edi), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 8(%edi), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull %esi + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ecx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebp, %eax + movl 28(%esp), %esi # 4-byte Reload + mull %esi + movl %edx, %ecx + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull %esi + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, %ebx + movl %eax, %edi + addl 32(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + addl 8(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl 20(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 44(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 12(%esp), %ebp # 4-byte Reload + addl 48(%esp), %ebp # 4-byte Folded Reload + adcl %edi, %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebx + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + sbbl %ebp, %ebp + andl $1, %ebp + movl 116(%esp), %eax + movl 4(%eax), %esi + movl %esi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 32(%esp) # 4-byte Spill + movl %esi, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %esi + addl 24(%esp), %esi # 4-byte Folded Reload + movl %ecx, %edx + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + addl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl %ebp, %edi + movl %edi, 44(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ecx + imull 80(%esp), %ecx # 4-byte Folded Reload + andl $1, %esi + movl %ecx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %edi + addl 16(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 28(%esp), %eax # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl 8(%eax), %esi + movl %esi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %esi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + addl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, %eax + adcl $0, %eax + movl 40(%esp), %ecx # 4-byte Reload + addl %edi, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl %ebp, 32(%esp) # 4-byte Folded Spill + adcl %ebx, %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %ecx, %esi + imull 80(%esp), %esi # 4-byte Folded Reload + andl $1, %eax + movl %eax, %ecx + movl %esi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, %edi + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %esi + addl %edi, %esi + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 40(%esp), %eax # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl 12(%eax), %ebp + movl %ebp, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebp, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %ebp + addl 32(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edi, %eax + adcl $0, %eax + movl 64(%esp), %edi # 4-byte Reload + addl %esi, %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl %ebx, %ebp + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 80(%esp), %esi # 4-byte Reload + imull %edi, %esi + movl %esi, 80(%esp) # 4-byte Spill + andl $1, %ebx + movl %esi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, %ecx + movl %esi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 40(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + mull 68(%esp) # 4-byte Folded Reload + addl 44(%esp), %eax # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl $0, %edi + addl 64(%esp), %ecx # 4-byte Folded Reload + adcl %ebp, %eax + adcl 60(%esp), %edx # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl $0, %ebx + movl %eax, %ebp + subl 84(%esp), %ebp # 4-byte Folded Reload + movl %edx, %ecx + sbbl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + movl %esi, %ecx + sbbl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 84(%esp) # 4-byte Spill + movl %edi, %ecx + sbbl 76(%esp), %ecx # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB53_2 +# BB#1: + movl %ebp, %eax +.LBB53_2: + movl 108(%esp), %ebp + movl %eax, (%ebp) + testb %bl, %bl + jne .LBB53_4 +# BB#3: + movl 80(%esp), %edx # 4-byte Reload +.LBB53_4: + movl %edx, 4(%ebp) + jne .LBB53_6 +# BB#5: + movl 84(%esp), %esi # 4-byte Reload +.LBB53_6: + movl %esi, 8(%ebp) + jne .LBB53_8 +# BB#7: + movl %ecx, %edi +.LBB53_8: + movl %edi, 12(%ebp) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + + .globl mcl_fp_montNF4L + .align 16, 0x90 + .type mcl_fp_montNF4L,@function +mcl_fp_montNF4L: # @mcl_fp_montNF4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %ecx + movl (%ecx), %eax + movl %ecx, %ebp + movl %eax, 52(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + mull %ecx + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, 76(%esp) # 4-byte Spill + movl 116(%esp), %esi + movl -4(%esi), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %ecx + imull %edx, %ecx + movl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 12(%esi), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 8(%esi), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 4(%esi), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + movl %ebp, %eax + movl 4(%eax), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 12(%eax), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 8(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + movl 36(%esp), %ebx # 4-byte Reload + mull %ebx + movl %edx, %ecx + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %ebx, %esi + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, %esi + movl %eax, %ebp + addl 76(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 40(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl 12(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 4(%eax), %edi + movl %edi, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, %edi + addl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ebp, 32(%esp) # 4-byte Folded Spill + adcl %esi, %edi + adcl %ebx, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + movl %esi, %ecx + imull 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + addl %esi, %eax + adcl %edi, %ebx + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %ecx # 4-byte Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edx, %ebx + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 28(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 8(%eax), %ecx + movl %ecx, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %edi + movl %ecx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, %ecx + addl %edi, %ecx + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl 36(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + adcl $0, %esi + movl 32(%esp), %edx # 4-byte Reload + addl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %esi + movl %edx, %ebx + imull 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebx, %eax + mull 80(%esp) # 4-byte Folded Reload + addl 32(%esp), %eax # 4-byte Folded Reload + adcl %ecx, %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl %ebp, %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + adcl $0, %esi + addl %edx, %edi + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 112(%esp), %eax + movl 12(%eax), %ecx + movl %ecx, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl %edx, %ecx + addl 32(%esp), %ecx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + adcl $0, %ebx + movl 60(%esp), %edx # 4-byte Reload + addl %edi, %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl %esi, %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %ebx + movl 56(%esp), %edi # 4-byte Reload + imull %edx, %edi + movl %edi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + movl %edi, %ebp + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull 64(%esp) # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + addl 60(%esp), %ebp # 4-byte Folded Reload + adcl %ecx, %eax + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl $0, %ebx + addl 56(%esp), %eax # 4-byte Folded Reload + adcl %edx, %edi + adcl 52(%esp), %esi # 4-byte Folded Reload + adcl 76(%esp), %ebx # 4-byte Folded Reload + movl %eax, %edx + subl 80(%esp), %edx # 4-byte Folded Reload + movl %edi, %ebp + sbbl 64(%esp), %ebp # 4-byte Folded Reload + movl %esi, %ecx + sbbl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 72(%esp), %ecx # 4-byte Folded Reload + testl %ecx, %ecx + js .LBB54_2 +# BB#1: + movl %edx, %eax +.LBB54_2: + movl 104(%esp), %edx + movl %eax, (%edx) + js .LBB54_4 +# BB#3: + movl %ebp, %edi +.LBB54_4: + movl %edi, 4(%edx) + js .LBB54_6 +# BB#5: + movl 80(%esp), %esi # 4-byte Reload +.LBB54_6: + movl %esi, 8(%edx) + js .LBB54_8 +# BB#7: + movl %ecx, %ebx +.LBB54_8: + movl %ebx, 12(%edx) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end54: + .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L + + .globl mcl_fp_montRed4L + .align 16, 0x90 + .type mcl_fp_montRed4L,@function +mcl_fp_montRed4L: # @mcl_fp_montRed4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 92(%esp), %eax + movl -4(%eax), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl (%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 88(%esp), %ecx + movl (%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + imull %edx, %esi + movl 12(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 8(%eax), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 4(%eax), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebp + movl %edx, %ebp + movl %eax, %ebx + movl %esi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %edi + addl %ebx, %edi + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 32(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 40(%esp), %eax # 4-byte Folded Reload + adcl 4(%ecx), %edi + adcl 8(%ecx), %ebp + adcl 12(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%ecx), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl 24(%ecx), %edx + movl 20(%ecx), %ecx + adcl $0, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %ebx + imull 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %esi + addl (%esp), %esi # 4-byte Folded Reload + movl %ecx, %ebx + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %ecx # 4-byte Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %edi, %eax + adcl %ebp, %esi + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + movl 8(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %esi, %ebp + imull 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull 48(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl 8(%esp), %ebp # 4-byte Folded Reload + adcl %ebx, %ecx + movl %ecx, %ebx + movl 32(%esp), %ecx # 4-byte Reload + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %esi, %eax + adcl 12(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl 56(%esp), %esi # 4-byte Reload + imull %ebp, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl %esi, %eax + mull 44(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %esi + movl 56(%esp), %eax # 4-byte Reload + mull 48(%esp) # 4-byte Folded Reload + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl $0, %ecx + addl %ebp, %esi + adcl 24(%esp), %eax # 4-byte Folded Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + movl %eax, %ebp + subl 60(%esp), %ebp # 4-byte Folded Reload + movl %edx, %esi + sbbl 48(%esp), %esi # 4-byte Folded Reload + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl %ecx, %ebx + sbbl 44(%esp), %ebx # 4-byte Folded Reload + sbbl $0, %edi + andl $1, %edi + jne .LBB55_2 +# BB#1: + movl %ebp, %eax +.LBB55_2: + movl 84(%esp), %ebp + movl %eax, (%ebp) + movl %edi, %eax + testb %al, %al + jne .LBB55_4 +# BB#3: + movl %esi, %edx +.LBB55_4: + movl %edx, 4(%ebp) + movl 56(%esp), %eax # 4-byte Reload + jne .LBB55_6 +# BB#5: + movl 60(%esp), %eax # 4-byte Reload +.LBB55_6: + movl %eax, 8(%ebp) + jne .LBB55_8 +# BB#7: + movl %ebx, %ecx +.LBB55_8: + movl %ecx, 12(%ebp) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end55: + .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L + + .globl mcl_fp_addPre4L + .align 16, 0x90 + .type mcl_fp_addPre4L,@function +mcl_fp_addPre4L: # @mcl_fp_addPre4L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%esi), %eax + movl 12(%esi), %esi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl %edx, 4(%ebx) + movl %eax, 8(%ebx) + adcl %edi, %esi + movl %esi, 12(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end56: + .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L + + .globl mcl_fp_subPre4L + .align 16, 0x90 + .type mcl_fp_subPre4L,@function +mcl_fp_subPre4L: # @mcl_fp_subPre4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 12(%edi), %edi + movl 12(%ecx), %ecx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl %esi, 4(%ebp) + movl %ebx, 8(%ebp) + sbbl %edi, %ecx + movl %ecx, 12(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end57: + .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L + + .globl mcl_fp_shr1_4L + .align 16, 0x90 + .type mcl_fp_shr1_4L,@function +mcl_fp_shr1_4L: # @mcl_fp_shr1_4L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %eax + movl 12(%eax), %ecx + movl 8(%eax), %edx + movl (%eax), %esi + movl 4(%eax), %eax + shrdl $1, %eax, %esi + movl 12(%esp), %edi + movl %esi, (%edi) + shrdl $1, %edx, %eax + movl %eax, 4(%edi) + shrdl $1, %ecx, %edx + movl %edx, 8(%edi) + shrl %ecx + movl %ecx, 12(%edi) + popl %esi + popl %edi + retl +.Lfunc_end58: + .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L + + .globl mcl_fp_add4L + .align 16, 0x90 + .type mcl_fp_add4L,@function +mcl_fp_add4L: # @mcl_fp_add4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + movl 24(%esp), %esi + addl (%esi), %eax + adcl 4(%esi), %ecx + movl 8(%edi), %edx + adcl 8(%esi), %edx + movl 12(%esi), %esi + adcl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + jne .LBB59_2 +# BB#1: # %nocarry + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) +.LBB59_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end59: + .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L + + .globl mcl_fp_addNF4L + .align 16, 0x90 + .type mcl_fp_addNF4L,@function +mcl_fp_addNF4L: # @mcl_fp_addNF4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 36(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ecx + movl 32(%esp), %edi + addl (%edi), %esi + adcl 4(%edi), %ecx + movl 12(%edx), %ebp + movl 8(%edx), %edx + adcl 8(%edi), %edx + adcl 12(%edi), %ebp + movl 40(%esp), %eax + movl %esi, %ebx + subl (%eax), %ebx + movl %ecx, %edi + sbbl 4(%eax), %edi + movl %edi, (%esp) # 4-byte Spill + movl %edx, %edi + movl 40(%esp), %eax + sbbl 8(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %edi + movl 40(%esp), %eax + sbbl 12(%eax), %edi + testl %edi, %edi + js .LBB60_2 +# BB#1: + movl %ebx, %esi +.LBB60_2: + movl 28(%esp), %ebx + movl %esi, (%ebx) + js .LBB60_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB60_4: + movl %ecx, 4(%ebx) + js .LBB60_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB60_6: + movl %edx, 8(%ebx) + js .LBB60_8 +# BB#7: + movl %edi, %ebp +.LBB60_8: + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end60: + .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L + + .globl mcl_fp_sub4L + .align 16, 0x90 + .type mcl_fp_sub4L,@function +mcl_fp_sub4L: # @mcl_fp_sub4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %ecx + movl 8(%esi), %edx + sbbl 8(%edi), %edx + movl 12(%esi), %esi + sbbl 12(%edi), %esi + movl 20(%esp), %edi + movl %eax, (%edi) + movl %ecx, 4(%edi) + movl %edx, 8(%edi) + movl %esi, 12(%edi) + sbbl $0, %ebx + testb $1, %bl + je .LBB61_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl 8(%ebx), %ebp + adcl 4(%ebx), %ecx + movl 12(%ebx), %ebx + movl %eax, (%edi) + movl %ecx, 4(%edi) + adcl %edx, %ebp + movl %ebp, 8(%edi) + adcl %esi, %ebx + movl %ebx, 12(%edi) +.LBB61_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end61: + .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L + + .globl mcl_fp_subNF4L + .align 16, 0x90 + .type mcl_fp_subNF4L,@function +mcl_fp_subNF4L: # @mcl_fp_subNF4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $8, %esp + movl 32(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 36(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 12(%eax), %edi + movl 8(%eax), %edx + sbbl 8(%esi), %edx + sbbl 12(%esi), %edi + movl %edi, %esi + sarl $31, %esi + movl 40(%esp), %eax + movl 12(%eax), %ebp + andl %esi, %ebp + movl 8(%eax), %ecx + andl %esi, %ecx + movl 40(%esp), %eax + movl 4(%eax), %eax + andl %esi, %eax + movl 40(%esp), %ebx + andl (%ebx), %esi + addl (%esp), %esi # 4-byte Folded Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 28(%esp), %ebx + movl %esi, (%ebx) + adcl %edx, %ecx + movl %eax, 4(%ebx) + movl %ecx, 8(%ebx) + adcl %edi, %ebp + movl %ebp, 12(%ebx) + addl $8, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end62: + .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L + + .globl mcl_fpDbl_add4L + .align 16, 0x90 + .type mcl_fpDbl_add4L,@function +mcl_fpDbl_add4L: # @mcl_fpDbl_add4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edi + movl 4(%eax), %edx + movl 36(%esp), %esi + addl (%esi), %edi + adcl 4(%esi), %edx + movl 8(%eax), %ebx + adcl 8(%esi), %ebx + movl 12(%esi), %ebp + movl 32(%esp), %ecx + movl %edi, (%ecx) + movl 16(%esi), %edi + adcl 12(%eax), %ebp + adcl 16(%eax), %edi + movl %edx, 4(%ecx) + movl 28(%eax), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ebx, 8(%ecx) + movl 24(%eax), %ebx + movl 20(%eax), %eax + movl %ebp, 12(%ecx) + movl 20(%esi), %edx + adcl %eax, %edx + movl 28(%esi), %ecx + movl 24(%esi), %ebp + adcl %ebx, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + sbbl %ebx, %ebx + andl $1, %ebx + movl 44(%esp), %eax + movl %edi, %esi + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + movl %edx, %esi + sbbl 4(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%eax), %esi + sbbl 12(%eax), %ecx + sbbl $0, %ebx + andl $1, %ebx + jne .LBB63_2 +# BB#1: + movl %esi, %ebp +.LBB63_2: + testb %bl, %bl + jne .LBB63_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB63_4: + movl 32(%esp), %eax + movl %edi, 16(%eax) + jne .LBB63_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB63_6: + movl %edx, 20(%eax) + movl %ebp, 24(%eax) + movl 8(%esp), %edx # 4-byte Reload + jne .LBB63_8 +# BB#7: + movl %ecx, %edx +.LBB63_8: + movl %edx, 28(%eax) + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end63: + .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L + + .globl mcl_fpDbl_sub4L + .align 16, 0x90 + .type mcl_fpDbl_sub4L,@function +mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + pushl %eax + movl 28(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 32(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %ebx + sbbl 8(%ebp), %ebx + movl 24(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%ebp), %edi + movl %ebx, 8(%ecx) + movl 20(%ebp), %esi + movl %edx, 12(%ecx) + movl 20(%eax), %ebx + sbbl %esi, %ebx + movl 24(%ebp), %edx + movl 24(%eax), %esi + sbbl %edx, %esi + movl 28(%ebp), %edx + movl 28(%eax), %eax + sbbl %edx, %eax + movl %eax, (%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 36(%esp), %ecx + movl (%ecx), %eax + jne .LBB64_1 +# BB#2: + xorl %ebp, %ebp + jmp .LBB64_3 +.LBB64_1: + movl 4(%ecx), %ebp +.LBB64_3: + testb %dl, %dl + jne .LBB64_5 +# BB#4: + movl $0, %eax +.LBB64_5: + jne .LBB64_6 +# BB#7: + movl $0, %edx + jmp .LBB64_8 +.LBB64_6: + movl 12(%ecx), %edx +.LBB64_8: + jne .LBB64_9 +# BB#10: + xorl %ecx, %ecx + jmp .LBB64_11 +.LBB64_9: + movl 8(%ecx), %ecx +.LBB64_11: + addl %edi, %eax + adcl %ebx, %ebp + movl 24(%esp), %edi + movl %eax, 16(%edi) + adcl %esi, %ecx + movl %ebp, 20(%edi) + movl %ecx, 24(%edi) + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%edi) + addl $4, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end64: + .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L + + .globl mcl_fp_mulUnitPre5L + .align 16, 0x90 + .type mcl_fp_mulUnitPre5L,@function +mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %esi + movl 44(%esp), %ecx + movl %esi, %eax + mull 16(%ecx) + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull 12(%ecx) + movl %edx, %ebx + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 8(%ecx) + movl %edx, %edi + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 4(%ecx) + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + mull (%ecx) + movl 40(%esp), %ecx + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%ecx) + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 20(%ecx) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end65: + .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L + + .globl mcl_fpDbl_mulPre5L + .align 16, 0x90 + .type mcl_fpDbl_mulPre5L,@function +mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %esi + movl (%esi), %ebp + movl 92(%esp), %eax + movl (%eax), %ebx + movl %eax, %edi + movl %ebp, %eax + mull %ebx + movl %edx, 36(%esp) # 4-byte Spill + movl 84(%esp), %edx + movl %eax, (%edx) + movl %esi, %eax + movl 4(%eax), %esi + movl 8(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 12(%eax), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 16(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 4(%edi), %edi + movl %esi, %eax + mull %edi + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebp, %eax + mull %edi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull %edi + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl (%esp), %esi # 4-byte Reload + movl %esi, %eax + mull %edi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, %eax + mull %edi + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %edx, %esi + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %eax, %edi + movl %edx, %ebx + addl 36(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + movl 60(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 24(%esp), %ebp # 4-byte Folded Reload + movl 84(%esp), %eax + movl %ebp, 4(%eax) + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + addl 52(%esp), %edi # 4-byte Folded Reload + adcl 56(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl 88(%esp), %eax + movl %eax, %esi + movl 16(%esi), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 8(%eax), %ecx + movl %edx, %eax + mull %ecx + movl %eax, 56(%esp) # 4-byte Spill + movl %edx, 32(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + mull %ecx + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, 20(%esp) # 4-byte Spill + movl %esi, %edx + movl 8(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + mull %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, 12(%esp) # 4-byte Spill + movl %esi, %eax + movl (%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 4(%eax), %eax + movl %eax, 24(%esp) # 4-byte Spill + mull %ecx + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %ecx + addl %edi, %eax + movl 84(%esp), %ecx + movl %eax, 8(%ecx) + movl 4(%esp), %ecx # 4-byte Reload + adcl %ebx, %ecx + movl 48(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl 60(%esp), %esi # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl %ebp, %eax + sbbl %ebx, %ebx + andl $1, %ebx + addl %edx, %ecx + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl 12(%eax), %ebp + movl 44(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %esi + movl 36(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %ebx + movl 24(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %edi + movl 28(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 24(%esp) # 4-byte Spill + addl %ecx, %eax + movl 84(%esp), %edx + movl %eax, 12(%edx) + adcl 48(%esp), %edi # 4-byte Folded Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 4(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 60(%esp) # 4-byte Folded Spill + movl 92(%esp), %eax + movl 16(%eax), %ebp + sbbl %ecx, %ecx + movl %ebp, %eax + movl 88(%esp), %esi + mull 16(%esi) + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 12(%esi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 8(%esi) + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebp, %eax + mull 4(%esi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull (%esi) + movl %eax, %ebp + andl $1, %ecx + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 4(%esp), %esi # 4-byte Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 60(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + addl %ebp, %edi + movl 84(%esp), %ebp + movl %edi, 16(%ebp) + adcl 8(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %eax, %edi + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl %edx, %ebx + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %ebp, %edx + movl %ebx, 20(%edx) + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %esi, 24(%edx) + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %edi, 28(%edx) + movl %ecx, 32(%edx) + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%edx) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end66: + .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L + + .globl mcl_fpDbl_sqrPre5L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre5L,@function +mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebx + movl 16(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl (%ebx), %edi + movl 4(%ebx), %ecx + mull %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, 40(%esp) # 4-byte Spill + movl 12(%ebx), %esi + movl %esi, %eax + mull %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %edx, 36(%esp) # 4-byte Spill + movl 8(%ebx), %ebx + movl %ebx, %eax + mull %ecx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull %edi + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edi + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull %ecx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, %esi + movl %esi, (%esp) # 4-byte Spill + movl %eax, %ecx + movl %edi, %eax + mull %edi + movl 80(%esp), %edi + movl %eax, (%edi) + addl %ecx, %edx + adcl %esi, %ebp + movl 56(%esp), %esi # 4-byte Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + movl 48(%esp), %edi # 4-byte Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ecx, %edx + movl 80(%esp), %ecx + movl %edx, 4(%ecx) + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %esi, %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + addl (%esp), %ebp # 4-byte Folded Reload + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl 44(%esp), %eax # 4-byte Reload + mull %ebx + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, 20(%esp) # 4-byte Spill + movl 84(%esp), %ecx + movl 12(%ecx), %edi + movl %edi, %eax + mull %ebx + movl %eax, 36(%esp) # 4-byte Spill + movl %edx, 16(%esp) # 4-byte Spill + movl %ecx, %eax + movl (%eax), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 4(%eax), %eax + movl %eax, 28(%esp) # 4-byte Spill + mull %ebx + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull %ebx + movl %eax, 44(%esp) # 4-byte Spill + addl %ebp, %ecx + movl 80(%esp), %eax + movl %ecx, 8(%eax) + movl 32(%esp), %ebx # 4-byte Reload + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl 36(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl 40(%esp), %eax # 4-byte Reload + adcl %esi, %eax + sbbl %esi, %esi + andl $1, %esi + addl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl %edx, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 8(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 16(%eax), %ebx + movl %ebx, %eax + mull %edi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %esi + movl 28(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebp + movl 24(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %ecx + movl %edi, %eax + mull %edi + movl %eax, %edi + movl %edx, 24(%esp) # 4-byte Spill + addl 32(%esp), %ecx # 4-byte Folded Reload + movl 80(%esp), %eax + movl %ecx, 12(%eax) + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + sbbl %ecx, %ecx + movl %ebx, %eax + movl 84(%esp), %edx + mull 12(%edx) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + movl 84(%esp), %edx + mull 4(%edx) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + movl 84(%esp), %edx + mull (%edx) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + mull %ebx + movl %eax, 20(%esp) # 4-byte Spill + andl $1, %ecx + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + addl (%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %ebx + movl %ebp, 16(%ebx) + adcl 8(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %eax, %ebp + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ecx # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 12(%esp), %esi # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %esi, 20(%ebx) + adcl %edx, %ebp + movl %edi, 24(%ebx) + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 28(%ebx) + movl %ecx, 32(%ebx) + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ebx) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L + + .globl mcl_fp_mont5L + .align 16, 0x90 + .type mcl_fp_mont5L,@function +mcl_fp_mont5L: # @mcl_fp_mont5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl 136(%esp), %ebx + movl (%ebx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 140(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + mull %ecx + movl %eax, 36(%esp) # 4-byte Spill + movl %edx, 40(%esp) # 4-byte Spill + movl 144(%esp), %esi + movl -4(%esi), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, %ecx + imull %edx, %ecx + movl (%esi), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 16(%esi), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 4(%esi), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 4(%ebx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 16(%ebx), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 12(%ebx), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 8(%ebx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull %esi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + movl 32(%esp), %ecx # 4-byte Reload + mull %ecx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull %ecx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ecx + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, %ebx + movl %eax, %edi + addl 40(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %ebx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + addl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 16(%esp), %ebp # 4-byte Reload + addl 36(%esp), %ebp # 4-byte Folded Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl %ebx, %esi + movl %esi, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl 4(%eax), %edi + movl %edi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebp + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl %ebp, %edx + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %ebx, %ebp + imull 96(%esp), %ebp # 4-byte Folded Reload + andl $1, %eax + movl %eax, %ebx + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %ecx, %edx + adcl 56(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 36(%esp), %eax # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl 8(%eax), %ebx + movl %ebx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %eax, %esi + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ebp, %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl %edi, 48(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %esi, %ebp + imull 96(%esp), %ebp # 4-byte Folded Reload + andl $1, %eax + movl %eax, %ebx + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edi, %edx + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 40(%esp), %eax # 4-byte Folded Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl 12(%eax), %edi + movl %edi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %ebp, %ebx + movl %ebx, 40(%esp) # 4-byte Spill + adcl %esi, 48(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %ebx, %ebp + imull 96(%esp), %ebp # 4-byte Folded Reload + andl $1, %eax + movl %eax, %ebx + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %ecx, %edx + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 40(%esp), %eax # 4-byte Folded Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 64(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl 16(%eax), %ebx + movl %ebx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + addl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl 72(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %esi, %eax + adcl $0, %eax + movl 84(%esp), %esi # 4-byte Reload + addl %ebp, %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl %edi, 80(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + sbbl %ebx, %ebx + movl 96(%esp), %ecx # 4-byte Reload + imull %esi, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + andl $1, %ebx + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ecx + movl 96(%esp), %eax # 4-byte Reload + mull 88(%esp) # 4-byte Folded Reload + addl 48(%esp), %eax # 4-byte Folded Reload + adcl %ecx, %edx + adcl 52(%esp), %esi # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edi + movl 56(%esp), %ecx # 4-byte Reload + addl 84(%esp), %ecx # 4-byte Folded Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + adcl 76(%esp), %esi # 4-byte Folded Reload + adcl 72(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 96(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + adcl $0, %ebx + movl %eax, %ecx + subl 100(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 100(%esp) # 4-byte Spill + movl %edx, %ecx + sbbl 88(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + movl %esi, %ecx + sbbl 104(%esp), %ecx # 4-byte Folded Reload + sbbl 108(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 108(%esp) # 4-byte Spill + movl %edi, %ebp + sbbl 92(%esp), %ebp # 4-byte Folded Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB68_2 +# BB#1: + movl 88(%esp), %edx # 4-byte Reload +.LBB68_2: + testb %bl, %bl + jne .LBB68_4 +# BB#3: + movl 100(%esp), %eax # 4-byte Reload +.LBB68_4: + movl 132(%esp), %ebx + movl %eax, (%ebx) + movl %edx, 4(%ebx) + jne .LBB68_6 +# BB#5: + movl %ecx, %esi +.LBB68_6: + movl %esi, 8(%ebx) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB68_8 +# BB#7: + movl 108(%esp), %eax # 4-byte Reload +.LBB68_8: + movl %eax, 12(%ebx) + jne .LBB68_10 +# BB#9: + movl %ebp, %edi +.LBB68_10: + movl %edi, 16(%ebx) + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end68: + .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L + + .globl mcl_fp_montNF5L + .align 16, 0x90 + .type mcl_fp_montNF5L,@function +mcl_fp_montNF5L: # @mcl_fp_montNF5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %ebx + movl (%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 132(%esp), %ecx + movl (%ecx), %ecx + mull %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, 68(%esp) # 4-byte Spill + movl 136(%esp), %esi + movl -4(%esi), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, %edi + imull %edx, %edi + movl (%esi), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 16(%esi), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 4(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 4(%ebx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%ebx), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 12(%ebx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 8(%ebx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl %edi, %eax + mull %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ecx + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %ecx + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull %ecx + movl %edx, %ebp + movl %eax, %ebx + movl 76(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, %ecx + movl %eax, %esi + addl 68(%esp), %esi # 4-byte Folded Reload + adcl %ebx, %ecx + adcl (%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 8(%esp), %edx # 4-byte Reload + addl 48(%esp), %edx # 4-byte Folded Reload + adcl 12(%esp), %esi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl $0, %eax + addl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 4(%eax), %ebx + movl %ebx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + addl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl 68(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %esi, 28(%esp) # 4-byte Folded Spill + adcl %ecx, 32(%esp) # 4-byte Folded Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl %ebp, %ecx + adcl %edi, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 28(%esp), %esi # 4-byte Reload + movl %esi, %edi + imull 84(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ebp + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %ebx + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + addl %esi, %eax + adcl 32(%esp), %ebx # 4-byte Folded Reload + adcl %ecx, %ebp + movl 44(%esp), %ecx # 4-byte Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 48(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl 68(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edx, %ebx + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 8(%eax), %ecx + movl %ecx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl %edx, %ebp + addl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl 36(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 32(%esp), %edx # 4-byte Reload + addl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %ebx + imull 84(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + addl 32(%esp), %eax # 4-byte Folded Reload + adcl %ebp, %ecx + movl 40(%esp), %ebx # 4-byte Reload + adcl %edi, %ebx + movl 44(%esp), %edi # 4-byte Reload + adcl %esi, %edi + movl 48(%esp), %esi # 4-byte Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edx, %ecx + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 12(%eax), %edi + movl %edi, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl %ecx, %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl %eax, %ecx + imull 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + addl 20(%esp), %eax # 4-byte Folded Reload + movl %edi, %edx + adcl %ebp, %edx + movl 44(%esp), %edi # 4-byte Reload + adcl %ebx, %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl %esi, %ecx + movl 72(%esp), %esi # 4-byte Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 68(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl 16(%eax), %ecx + movl %ecx, %eax + mull 52(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull 56(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + mull 64(%esp) # 4-byte Folded Reload + addl 76(%esp), %edx # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + adcl 56(%esp), %ebx # 4-byte Folded Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + adcl $0, %edi + addl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 68(%esp) # 4-byte Spill + adcl $0, %edi + movl 84(%esp), %ecx # 4-byte Reload + imull %eax, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %ecx + movl 84(%esp), %eax # 4-byte Reload + mull 88(%esp) # 4-byte Folded Reload + addl 40(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + adcl 68(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edi + addl 52(%esp), %eax # 4-byte Folded Reload + adcl %edx, %ecx + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 84(%esp) # 4-byte Spill + adcl 72(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %edi # 4-byte Folded Reload + movl %eax, %ebx + subl 100(%esp), %ebx # 4-byte Folded Reload + movl %ecx, %edx + sbbl 88(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + sbbl 92(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl %ebp, %edx + sbbl 96(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl %edi, %edx + movl %edi, %esi + sbbl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, %edx + sarl $31, %edx + testl %edx, %edx + js .LBB69_2 +# BB#1: + movl %ebx, %eax +.LBB69_2: + movl 124(%esp), %edx + movl %eax, (%edx) + js .LBB69_4 +# BB#3: + movl 88(%esp), %ecx # 4-byte Reload +.LBB69_4: + movl %ecx, 4(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB69_6 +# BB#5: + movl 92(%esp), %eax # 4-byte Reload +.LBB69_6: + movl %eax, 8(%edx) + js .LBB69_8 +# BB#7: + movl 100(%esp), %ebp # 4-byte Reload +.LBB69_8: + movl %ebp, 12(%edx) + js .LBB69_10 +# BB#9: + movl %edi, %esi +.LBB69_10: + movl %esi, 16(%edx) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end69: + .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L + + .globl mcl_fp_montRed5L + .align 16, 0x90 + .type mcl_fp_montRed5L,@function +mcl_fp_montRed5L: # @mcl_fp_montRed5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %eax + movl -4(%eax), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl (%eax), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 112(%esp), %esi + movl (%esi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + imull %edx, %ecx + movl 16(%eax), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 12(%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 8(%eax), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 4(%eax), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebp + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, %ebp + movl %eax, %edi + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ebx + addl %edi, %ebx + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 40(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl 48(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 80(%esp), %eax # 4-byte Folded Reload + adcl 4(%esi), %ebx + adcl 8(%esi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + adcl 12(%esi), %edi + movl %edi, 40(%esp) # 4-byte Spill + adcl 16(%esi), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 20(%esi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esi), %eax + movl 32(%esi), %ecx + movl 28(%esi), %edx + movl 24(%esi), %esi + adcl $0, %esi + movl %esi, 12(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %ecx + imull 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ecx, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %eax, %edi + addl %esi, %edx + movl %edx, %ebp + movl 24(%esp), %ecx # 4-byte Reload + adcl (%esp), %ecx # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 32(%esp), %esi # 4-byte Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebx, %edi + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ebp, %esi + imull 76(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %esi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %ebx + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl %ebx, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 44(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 56(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ebp, %edi + imull 76(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + mull 60(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + movl %edx, %ebx + addl 8(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %edi # 4-byte Reload + adcl 56(%esp), %edi # 4-byte Folded Reload + movl 40(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, 16(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 76(%esp), %esi # 4-byte Reload + imull %ebx, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl %esi, %eax + mull 72(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 52(%esp) # 4-byte Spill + movl %esi, %eax + mull 68(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull 64(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 44(%esp) # 4-byte Spill + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %esi + movl 76(%esp), %eax # 4-byte Reload + mull 60(%esp) # 4-byte Folded Reload + addl 24(%esp), %eax # 4-byte Folded Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + adcl $0, %ebp + addl %ebx, %esi + adcl 28(%esp), %eax # 4-byte Folded Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %eax, %esi + subl 84(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 60(%esp), %esi # 4-byte Folded Reload + sbbl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + sbbl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + movl %edi, %ecx + sbbl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 84(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + movl %ebx, 80(%esp) # 4-byte Spill + jne .LBB70_2 +# BB#1: + movl %esi, %edx +.LBB70_2: + movl 80(%esp), %ebx # 4-byte Reload + testb %bl, %bl + jne .LBB70_4 +# BB#3: + movl 48(%esp), %eax # 4-byte Reload +.LBB70_4: + movl 108(%esp), %ecx + movl %eax, (%ecx) + movl %edx, 4(%ecx) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB70_6 +# BB#5: + movl %ebp, %eax +.LBB70_6: + movl %eax, 8(%ecx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB70_8 +# BB#7: + movl 68(%esp), %eax # 4-byte Reload +.LBB70_8: + movl %eax, 12(%ecx) + jne .LBB70_10 +# BB#9: + movl 84(%esp), %edi # 4-byte Reload +.LBB70_10: + movl %edi, 16(%ecx) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end70: + .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L + + .globl mcl_fp_addPre5L + .align 16, 0x90 + .type mcl_fp_addPre5L,@function +mcl_fp_addPre5L: # @mcl_fp_addPre5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 24(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 12(%esi), %ebx + movl 16(%esi), %esi + adcl 12(%eax), %ebx + movl 16(%eax), %eax + movl 20(%esp), %ebp + movl %ecx, (%ebp) + movl %edx, 4(%ebp) + movl %edi, 8(%ebp) + movl %ebx, 12(%ebp) + adcl %esi, %eax + movl %eax, 16(%ebp) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end71: + .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L + + .globl mcl_fp_subPre5L + .align 16, 0x90 + .type mcl_fp_subPre5L,@function +mcl_fp_subPre5L: # @mcl_fp_subPre5L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%esi), %edx + movl 16(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 16(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end72: + .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L + + .globl mcl_fp_shr1_5L + .align 16, 0x90 + .type mcl_fp_shr1_5L,@function +mcl_fp_shr1_5L: # @mcl_fp_shr1_5L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 20(%esp), %eax + movl 16(%eax), %ecx + movl 12(%eax), %edx + movl 8(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %eax + shrdl $1, %eax, %edi + movl 16(%esp), %ebx + movl %edi, (%ebx) + shrdl $1, %esi, %eax + movl %eax, 4(%ebx) + shrdl $1, %edx, %esi + movl %esi, 8(%ebx) + shrdl $1, %ecx, %edx + movl %edx, 12(%ebx) + shrl %ecx + movl %ecx, 16(%ebx) + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end73: + .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L + + .globl mcl_fp_add5L + .align 16, 0x90 + .type mcl_fp_add5L,@function +mcl_fp_add5L: # @mcl_fp_add5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 28(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %ecx + movl 24(%esp), %edi + addl (%edi), %eax + adcl 4(%edi), %ecx + movl 8(%ebx), %edx + adcl 8(%edi), %edx + movl 12(%edi), %esi + movl 16(%edi), %edi + adcl 12(%ebx), %esi + adcl 16(%ebx), %edi + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 32(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + sbbl 8(%ebp), %edx + sbbl 12(%ebp), %esi + sbbl 16(%ebp), %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB74_2 +# BB#1: # %nocarry + movl 20(%esp), %ebx + movl %eax, (%ebx) + movl %ecx, 4(%ebx) + movl %edx, 8(%ebx) + movl %esi, 12(%ebx) + movl %edi, 16(%ebx) +.LBB74_2: # %carry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end74: + .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L + + .globl mcl_fp_addNF5L + .align 16, 0x90 + .type mcl_fp_addNF5L,@function +mcl_fp_addNF5L: # @mcl_fp_addNF5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %esi + movl (%esi), %ebx + movl 4(%esi), %eax + movl 44(%esp), %edi + addl (%edi), %ebx + adcl 4(%edi), %eax + movl 16(%esi), %ecx + movl 12(%esi), %edx + movl 8(%esi), %ebp + adcl 8(%edi), %ebp + adcl 12(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl %ebx, %esi + subl (%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %eax, %esi + sbbl 4(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl %ebp, %esi + sbbl 8(%edi), %esi + sbbl 12(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 16(%edi), %edx + movl %edx, %edi + sarl $31, %edi + testl %edi, %edi + js .LBB75_2 +# BB#1: + movl (%esp), %ebx # 4-byte Reload +.LBB75_2: + movl 40(%esp), %edi + movl %ebx, (%edi) + js .LBB75_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB75_4: + movl %eax, 4(%edi) + movl 12(%esp), %ecx # 4-byte Reload + js .LBB75_6 +# BB#5: + movl %esi, %ebp +.LBB75_6: + movl %ebp, 8(%edi) + movl 16(%esp), %eax # 4-byte Reload + js .LBB75_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB75_8: + movl %ecx, 12(%edi) + js .LBB75_10 +# BB#9: + movl %edx, %eax +.LBB75_10: + movl %eax, 16(%edi) + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end75: + .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L + + .globl mcl_fp_sub5L + .align 16, 0x90 + .type mcl_fp_sub5L,@function +mcl_fp_sub5L: # @mcl_fp_sub5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 28(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %ecx + movl 8(%edi), %edx + sbbl 8(%ebp), %edx + movl 12(%edi), %esi + sbbl 12(%ebp), %esi + movl 16(%edi), %edi + sbbl 16(%ebp), %edi + movl 20(%esp), %ebp + movl %eax, (%ebp) + movl %ecx, 4(%ebp) + movl %edx, 8(%ebp) + movl %esi, 12(%ebp) + movl %edi, 16(%ebp) + sbbl $0, %ebx + testb $1, %bl + je .LBB76_2 +# BB#1: # %carry + movl 32(%esp), %ebx + addl (%ebx), %eax + movl %eax, (%ebp) + adcl 4(%ebx), %ecx + movl %ecx, 4(%ebp) + adcl 8(%ebx), %edx + movl %edx, 8(%ebp) + movl 12(%ebx), %eax + adcl %esi, %eax + movl %eax, 12(%ebp) + movl 16(%ebx), %eax + adcl %edi, %eax + movl %eax, 16(%ebp) +.LBB76_2: # %nocarry + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end76: + .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L + + .globl mcl_fp_subNF5L + .align 16, 0x90 + .type mcl_fp_subNF5L,@function +mcl_fp_subNF5L: # @mcl_fp_subNF5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 4(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 16(%edi), %esi + movl 12(%edi), %eax + movl 8(%edi), %ecx + sbbl 8(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, %ebx + sarl $31, %ebx + movl %ebx, %ebp + shldl $1, %esi, %ebp + movl 48(%esp), %edi + movl 4(%edi), %ecx + andl %ebp, %ecx + andl (%edi), %ebp + movl 16(%edi), %edx + andl %ebx, %edx + movl 12(%edi), %eax + andl %ebx, %eax + roll %ebx + andl 8(%edi), %ebx + addl 4(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %edi + movl %ebp, (%edi) + adcl (%esp), %ebx # 4-byte Folded Reload + movl %ecx, 4(%edi) + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ebx, 8(%edi) + movl %eax, 12(%edi) + adcl %esi, %edx + movl %edx, 16(%edi) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end77: + .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L + + .globl mcl_fpDbl_add5L + .align 16, 0x90 + .type mcl_fpDbl_add5L,@function +mcl_fpDbl_add5L: # @mcl_fpDbl_add5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 56(%esp), %edx + movl 52(%esp), %ecx + movl 12(%ecx), %ebx + movl 16(%ecx), %ebp + movl 8(%edx), %esi + movl (%edx), %edi + addl (%ecx), %edi + movl 48(%esp), %eax + movl %edi, (%eax) + movl 4(%edx), %edi + adcl 4(%ecx), %edi + adcl 8(%ecx), %esi + adcl 12(%edx), %ebx + adcl 16(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl %edi, 4(%eax) + movl 28(%edx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl %esi, 8(%eax) + movl 20(%edx), %esi + movl %ebx, 12(%eax) + movl 20(%ecx), %ebp + adcl %esi, %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%eax) + movl 24(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 28(%ecx), %edi + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl 32(%ecx), %esi + adcl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl 36(%ecx), %edx + adcl %eax, %edx + sbbl %eax, %eax + andl $1, %eax + movl %ebp, %ecx + movl 60(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 4(%ebp), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 8(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl %esi, %ebx + movl %edx, %esi + sbbl 12(%ebp), %ebx + sbbl 16(%ebp), %edx + sbbl $0, %eax + andl $1, %eax + jne .LBB78_2 +# BB#1: + movl %edx, %esi +.LBB78_2: + testb %al, %al + movl 12(%esp), %ebp # 4-byte Reload + jne .LBB78_4 +# BB#3: + movl (%esp), %ebp # 4-byte Reload +.LBB78_4: + movl 48(%esp), %eax + movl %ebp, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl 20(%esp), %edx # 4-byte Reload + movl 16(%esp), %edi # 4-byte Reload + jne .LBB78_6 +# BB#5: + movl 4(%esp), %edi # 4-byte Reload +.LBB78_6: + movl %edi, 24(%eax) + jne .LBB78_8 +# BB#7: + movl 8(%esp), %edx # 4-byte Reload +.LBB78_8: + movl %edx, 28(%eax) + jne .LBB78_10 +# BB#9: + movl %ebx, %ecx +.LBB78_10: + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end78: + .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L + + .globl mcl_fpDbl_sub5L + .align 16, 0x90 + .type mcl_fpDbl_sub5L,@function +mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 44(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 36(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 24(%edx), %esi + movl %edi, 16(%ecx) + movl 24(%eax), %ebp + sbbl %esi, %ebp + movl 28(%edx), %esi + movl 28(%eax), %edi + sbbl %esi, %edi + movl %edi, (%esp) # 4-byte Spill + movl 32(%edx), %esi + movl 32(%eax), %edi + sbbl %esi, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%edx), %edx + movl 36(%eax), %eax + sbbl %edx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl $0, %edx + sbbl $0, %edx + andl $1, %edx + movl 48(%esp), %ebx + jne .LBB79_1 +# BB#2: + xorl %eax, %eax + jmp .LBB79_3 +.LBB79_1: + movl 16(%ebx), %eax +.LBB79_3: + testb %dl, %dl + jne .LBB79_4 +# BB#5: + movl $0, %edx + movl $0, %esi + jmp .LBB79_6 +.LBB79_4: + movl (%ebx), %esi + movl 4(%ebx), %edx +.LBB79_6: + jne .LBB79_7 +# BB#8: + movl $0, %edi + jmp .LBB79_9 +.LBB79_7: + movl 12(%ebx), %edi +.LBB79_9: + jne .LBB79_10 +# BB#11: + xorl %ebx, %ebx + jmp .LBB79_12 +.LBB79_10: + movl 8(%ebx), %ebx +.LBB79_12: + addl 4(%esp), %esi # 4-byte Folded Reload + adcl %ebp, %edx + movl %esi, 20(%ecx) + adcl (%esp), %ebx # 4-byte Folded Reload + movl %edx, 24(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %ebx, 28(%ecx) + movl %edi, 32(%ecx) + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end79: + .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L + + .globl mcl_fp_mulUnitPre6L + .align 16, 0x90 + .type mcl_fp_mulUnitPre6L,@function +mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl %ebx, %eax + mull 20(%edi) + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %eax + mull 16(%edi) + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebx, %eax + mull 12(%edi) + movl %edx, %esi + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 8(%edi) + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 4(%edi) + movl %edx, %ecx + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull (%edi) + movl 48(%esp), %edi + movl %eax, (%edi) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%edi) + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%edi) + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%edi) + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%edi) + movl 24(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 24(%edi) + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end80: + .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L + + .globl mcl_fpDbl_mulPre6L + .align 16, 0x90 + .type mcl_fpDbl_mulPre6L,@function +mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %esi + movl (%esi), %ebp + movl 112(%esp), %eax + movl (%eax), %edi + movl %ebp, %eax + mull %edi + movl %edx, 40(%esp) # 4-byte Spill + movl 104(%esp), %edx + movl %eax, (%edx) + movl 4(%esi), %ebx + movl 8(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 16(%esi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%esi), %ecx + movl 112(%esp), %eax + movl 4(%eax), %esi + movl %ebx, %eax + mull %esi + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %eax + mull %esi + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edi + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull %esi + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, %eax + mull %esi + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, %ecx + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, %esi + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edi + movl %eax, %ebx + movl %edx, %edi + addl 40(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 80(%esp), %edi # 4-byte Folded Reload + adcl 76(%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 32(%esp), %ebp # 4-byte Folded Reload + movl 104(%esp), %eax + movl %ebp, 4(%eax) + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %ecx, %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %edx, %ecx + adcl 44(%esp), %ecx # 4-byte Folded Reload + sbbl %edx, %edx + andl $1, %edx + addl 60(%esp), %ebx # 4-byte Folded Reload + adcl 72(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl 108(%esp), %ebp + movl 20(%ebp), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 8(%eax), %ecx + movl %edx, %eax + mull %ecx + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 60(%esp) # 4-byte Spill + movl 16(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + mull %ecx + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, 56(%esp) # 4-byte Spill + movl 12(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + mull %ecx + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + movl 8(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + mull %ecx + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 48(%esp) # 4-byte Spill + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + mull %ecx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + movl %esi, %eax + mull %ecx + movl %edx, 24(%esp) # 4-byte Spill + addl %ebx, %eax + movl 104(%esp), %ecx + movl %eax, 8(%ecx) + adcl %edi, %ebp + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 72(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 76(%esp) # 4-byte Folded Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl 112(%esp), %eax + movl 12(%eax), %ecx + movl 20(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull %ecx + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, (%esp) # 4-byte Spill + movl %edx, 32(%esp) # 4-byte Spill + andl $1, %edi + addl 24(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ebx, %ecx + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl 80(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + adcl 60(%esp), %edi # 4-byte Folded Reload + addl 4(%esp), %ebp # 4-byte Folded Reload + movl 104(%esp), %ebx + movl %ebp, 12(%ebx) + movl %esi, %ebx + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %edx, %esi + adcl (%esp), %esi # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + sbbl %edx, %edx + andl $1, %edx + addl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, %ecx + movl 20(%ecx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 12(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 8(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl (%ecx), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 112(%esp), %esi + movl 16(%esi), %ecx + mull %ecx + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, %esi + movl 56(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %edi, %eax + mull %ecx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebx, %eax + mull %ecx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull %ecx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebp + movl 60(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 24(%esp) # 4-byte Spill + addl 72(%esp), %eax # 4-byte Folded Reload + movl 104(%esp), %ecx + movl %eax, 16(%ecx) + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 68(%esp), %ebx # 4-byte Folded Reload + adcl 80(%esp), %edi # 4-byte Folded Reload + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl 20(%eax), %ecx + sbbl %esi, %esi + movl 20(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, 12(%esp) # 4-byte Spill + movl %edx, 56(%esp) # 4-byte Spill + andl $1, %esi + addl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl 64(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl 8(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + addl 4(%esp), %ebp # 4-byte Folded Reload + movl 104(%esp), %edx + movl %ebp, 20(%edx) + adcl 20(%esp), %ebx # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + movl %ecx, %ebp + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 60(%esp), %ebx # 4-byte Folded Reload + adcl 72(%esp), %edi # 4-byte Folded Reload + movl 104(%esp), %ecx + movl %ebx, 24(%ecx) + movl %edx, %ebx + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %edi, 28(%ecx) + movl %ebp, %edx + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %ebx, 32(%ecx) + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %edx, 36(%ecx) + movl %esi, 40(%ecx) + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end81: + .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L + + .globl mcl_fpDbl_sqrPre6L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre6L,@function +mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %esi + movl 20(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl (%esi), %ebp + movl 4(%esi), %ebx + mull %ebx + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 84(%esp) # 4-byte Spill + movl 16(%esi), %ecx + movl %ecx, %eax + mull %ebx + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, 80(%esp) # 4-byte Spill + movl 12(%esi), %edi + movl %edi, %eax + mull %ebx + movl %eax, 60(%esp) # 4-byte Spill + movl %edx, 76(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 8(%eax), %esi + movl %esi, %eax + mull %ebx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebp + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull %ebp + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebp + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull %ebx + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull %ebp + movl 120(%esp), %ebx + movl %eax, (%ebx) + addl %edi, %edx + adcl %esi, %ecx + movl %ecx, %ebx + movl 88(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 92(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl 96(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + addl %edi, %edx + movl 120(%esp), %edi + movl %edx, 4(%edi) + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %esi, %edx + adcl 60(%esp), %edx # 4-byte Folded Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, %edi + adcl 72(%esp), %ebp # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + movl %ebx, %esi + addl 32(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + adcl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + adcl 80(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %edi + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 124(%esp), %ebx + movl 20(%ebx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 8(%ebx), %ebp + mull %ebp + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, 60(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl %eax, 64(%esp) # 4-byte Spill + mull %ebp + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + mull %ebp + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, 76(%esp) # 4-byte Spill + movl (%ebx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 4(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mull %ebp + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebp + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull %ebp + movl %eax, %ebp + addl %esi, %ebx + movl 120(%esp), %eax + movl %ebx, 8(%eax) + movl 28(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + adcl 92(%esp), %ebp # 4-byte Folded Reload + movl 96(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl 80(%esp), %ebx # 4-byte Reload + adcl %edi, %ebx + movl 84(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + sbbl %edi, %edi + andl $1, %edi + addl 32(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 96(%esp) # 4-byte Spill + adcl %edx, %eax + movl %eax, %ebp + adcl 76(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 80(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 84(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + movl 36(%esp), %edi # 4-byte Reload + mull %edi + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 92(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, %ebx + movl 56(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull %edi + movl %eax, %edi + movl %edx, 36(%esp) # 4-byte Spill + addl %ecx, %esi + movl 120(%esp), %eax + movl %esi, 12(%eax) + adcl 96(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 96(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 92(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %ecx + movl (%ecx), %ebx + movl 4(%ecx), %edi + movl 20(%ecx), %ebp + movl %edi, %eax + mull %ebp + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl 16(%ecx), %esi + movl %edi, %eax + mull %esi + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %eax + mull %esi + movl %eax, 12(%esp) # 4-byte Spill + movl %edx, 28(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + addl %eax, 72(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 92(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 12(%eax), %edi + movl 8(%eax), %ebx + movl %edi, %eax + mull %ebp + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull %esi + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %eax + mull %esi + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull %esi + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ebp + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull %esi + movl %eax, %ebx + movl %edx, (%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + addl 12(%esp), %eax # 4-byte Folded Reload + movl 120(%esp), %ebp + movl %eax, 16(%ebp) + movl 96(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + adcl %ecx, %edi + movl 4(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + adcl 92(%esp), %ebx # 4-byte Folded Reload + movl 84(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + addl 28(%esp), %eax # 4-byte Folded Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + adcl (%esp), %edx # 4-byte Folded Reload + adcl 80(%esp), %esi # 4-byte Folded Reload + addl 48(%esp), %eax # 4-byte Folded Reload + movl 120(%esp), %ebp + movl %eax, 20(%ebp) + adcl 56(%esp), %edi # 4-byte Folded Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %edx, %eax + adcl 76(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + sbbl %edx, %edx + andl $1, %edx + addl 64(%esp), %edi # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 120(%esp), %ebp + movl %edi, 24(%ebp) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ecx, 28(%ebp) + movl %eax, %edi + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %ebx, 32(%ebp) + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %edi, 36(%ebp) + movl %esi, 40(%ebp) + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%ebp) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L + + .globl mcl_fp_mont6L + .align 16, 0x90 + .type mcl_fp_mont6L,@function +mcl_fp_mont6L: # @mcl_fp_mont6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $132, %esp + movl 156(%esp), %edi + movl (%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 160(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + mull %ecx + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl 164(%esp), %edx + movl -4(%edx), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %eax, %ebp + imull %ecx, %ebp + movl (%edx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 4(%edx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 4(%edi), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %edi, %eax + movl 20(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 16(%eax), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 12(%eax), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + movl 8(%eax), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ebp, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebp, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull %esi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + movl 40(%esp), %ebp # 4-byte Reload + mull %ebp + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebp + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %edx, %ebx + movl %eax, 8(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + mull %ebp + movl %ebp, %ecx + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, %edi + addl 64(%esp), %edi # 4-byte Folded Reload + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl (%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 72(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + addl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl 60(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 20(%esp), %ebx # 4-byte Reload + addl 68(%esp), %ebx # 4-byte Folded Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 4(%eax), %ecx + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %esi + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + addl %esi, %edx + movl %edx, %esi + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl %edi, %ebx + movl %ebx, %edi + movl 76(%esp), %edx # 4-byte Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl 68(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 64(%esp), %ebx # 4-byte Reload + addl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %ebx, %esi + imull 112(%esp), %esi # 4-byte Folded Reload + andl $1, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %ecx + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %eax, %edi + movl %edx, %ebx + addl %ecx, %ebx + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 60(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 64(%esp), %edi # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 160(%esp), %eax + movl 8(%eax), %ecx + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + addl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 72(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl $0, %edi + addl %ebx, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl %ebp, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl %ebx, 40(%esp) # 4-byte Folded Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl %eax, %esi + imull 112(%esp), %esi # 4-byte Folded Reload + andl $1, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %edi + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %eax, %esi + movl %edx, %ebx + addl %edi, %ebx + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ecx, %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 32(%esp), %esi # 4-byte Folded Reload + adcl 44(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 160(%esp), %eax + movl 12(%eax), %edi + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 52(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl %ebx, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl %ebp, 52(%esp) # 4-byte Folded Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl %ebx, 48(%esp) # 4-byte Folded Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 72(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl %eax, %esi + imull 112(%esp), %esi # 4-byte Folded Reload + andl $1, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %edi + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %eax, %esi + movl %edx, %ebx + addl %edi, %ebx + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %ecx, %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 32(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 160(%esp), %eax + movl 16(%eax), %edi + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + adcl $0, %esi + addl %ebx, %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl %ebp, 48(%esp) # 4-byte Folded Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 44(%esp) # 4-byte Folded Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 72(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + sbbl %ecx, %ecx + movl %eax, %esi + imull 112(%esp), %esi # 4-byte Folded Reload + andl $1, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %edi + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %eax, %ecx + movl %edx, %ebx + addl %edi, %ebx + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 32(%esp), %ecx # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 60(%esp) # 4-byte Spill + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 160(%esp), %eax + movl 20(%eax), %edi + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 76(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + addl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 88(%esp), %edi # 4-byte Reload + adcl 72(%esp), %edi # 4-byte Folded Reload + movl 84(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %esi, %eax + adcl $0, %eax + movl 100(%esp), %esi # 4-byte Reload + addl %ebx, %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl %ebp, 92(%esp) # 4-byte Folded Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 80(%esp) # 4-byte Folded Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 88(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl 112(%esp), %ecx # 4-byte Reload + imull %esi, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + andl $1, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 52(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + mull 104(%esp) # 4-byte Folded Reload + addl 56(%esp), %eax # 4-byte Folded Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + adcl 64(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %ebp # 4-byte Folded Reload + adcl $0, %edi + addl 100(%esp), %esi # 4-byte Folded Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + adcl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + adcl 84(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %eax, %esi + subl 108(%esp), %esi # 4-byte Folded Reload + movl %esi, 108(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 104(%esp), %esi # 4-byte Folded Reload + movl %esi, 104(%esp) # 4-byte Spill + movl %ecx, %esi + sbbl 116(%esp), %esi # 4-byte Folded Reload + movl %esi, 116(%esp) # 4-byte Spill + movl %ebx, %esi + movl %edi, %ebx + sbbl 120(%esp), %esi # 4-byte Folded Reload + movl %esi, 120(%esp) # 4-byte Spill + movl %ebp, %esi + movl %ebp, %edi + sbbl 124(%esp), %esi # 4-byte Folded Reload + movl %esi, 124(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + movl %ebp, %esi + sbbl 128(%esp), %esi # 4-byte Folded Reload + movl %esi, 128(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + jne .LBB83_2 +# BB#1: + movl 104(%esp), %edx # 4-byte Reload +.LBB83_2: + testb %bl, %bl + jne .LBB83_4 +# BB#3: + movl 108(%esp), %eax # 4-byte Reload +.LBB83_4: + movl 152(%esp), %ebx + movl %eax, (%ebx) + movl %edx, 4(%ebx) + jne .LBB83_6 +# BB#5: + movl 116(%esp), %ecx # 4-byte Reload +.LBB83_6: + movl %ecx, 8(%ebx) + movl 112(%esp), %eax # 4-byte Reload + jne .LBB83_8 +# BB#7: + movl 120(%esp), %eax # 4-byte Reload +.LBB83_8: + movl %eax, 12(%ebx) + jne .LBB83_10 +# BB#9: + movl 124(%esp), %edi # 4-byte Reload +.LBB83_10: + movl %edi, 16(%ebx) + jne .LBB83_12 +# BB#11: + movl 128(%esp), %ebp # 4-byte Reload +.LBB83_12: + movl %ebp, 20(%ebx) + addl $132, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end83: + .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L + + .globl mcl_fp_montNF6L + .align 16, 0x90 + .type mcl_fp_montNF6L,@function +mcl_fp_montNF6L: # @mcl_fp_montNF6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $132, %esp + movl 156(%esp), %ebx + movl (%ebx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 160(%esp), %ecx + movl (%ecx), %edi + mull %edi + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl 164(%esp), %esi + movl -4(%esi), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, %ecx + imull %edx, %ecx + movl (%esi), %edx + movl %edx, 128(%esp) # 4-byte Spill + movl 20(%esi), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 16(%esi), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 4(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 4(%ebx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 20(%ebx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 16(%ebx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 12(%ebx), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 8(%ebx), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull %edi + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull %edi + movl %edx, %ecx + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edi + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, %ebp + movl %eax, %esi + addl 64(%esp), %esi # 4-byte Folded Reload + adcl (%esp), %ebp # 4-byte Folded Reload + adcl 4(%esp), %ebx # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + adcl 12(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 16(%esp), %edi # 4-byte Reload + addl 72(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl 36(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + adcl $0, %eax + addl 24(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 4(%eax), %edi + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + addl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %esi, 40(%esp) # 4-byte Folded Spill + adcl %ebp, 44(%esp) # 4-byte Folded Spill + adcl %ebx, 48(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, %ebp + imull 96(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull 128(%esp) # 4-byte Folded Reload + addl %ecx, %eax + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %esi, %ebp + adcl 48(%esp), %ebp # 4-byte Folded Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl 64(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl 68(%esp), %esi # 4-byte Reload + adcl 60(%esp), %esi # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edx, %edi + movl %edi, 40(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 68(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 8(%eax), %ecx + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, %esi + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl %edx, %ebp + addl %esi, %ebp + adcl %edi, %ebx + movl %ebx, %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 24(%esp), %ebx # 4-byte Reload + addl 40(%esp), %ebx # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ebx, %edi + movl %ebx, %ecx + imull 96(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %edi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %edi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + addl %ecx, %eax + adcl %ebp, %esi + movl 60(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 12(%eax), %ecx + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, %ebx + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl %edx, %ebp + addl %ebx, %ebp + adcl %esi, %edi + movl %edi, %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebx # 4-byte Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 24(%esp), %edi # 4-byte Reload + addl 40(%esp), %edi # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edi, %esi + movl %edi, %ecx + imull 96(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %edi + movl %esi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %ebx + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + addl %ecx, %eax + adcl %ebp, %ebx + movl 60(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + movl 64(%esp), %esi # 4-byte Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 16(%eax), %ecx + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebp + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + movl %edx, %ebx + addl %ebp, %ebx + adcl %edi, %esi + movl %esi, %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebp # 4-byte Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 24(%esp), %esi # 4-byte Reload + addl 40(%esp), %esi # 4-byte Folded Reload + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %esi, %ebx + movl %esi, %ecx + imull 96(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ebx, %eax + mull 128(%esp) # 4-byte Folded Reload + addl %ecx, %eax + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl %edi, %ebx + movl 68(%esp), %edi # 4-byte Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edx, %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl 20(%eax), %ecx + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %edi + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, %ebp + movl %edx, %ebx + addl %edi, %ebx + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, %edi + movl 88(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl 84(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl 60(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 80(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 84(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebx # 4-byte Reload + imull %ebp, %ebx + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 104(%esp) # 4-byte Spill + movl %ebx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ebx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + addl 52(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + adcl 88(%esp), %esi # 4-byte Folded Reload + adcl 84(%esp), %ebp # 4-byte Folded Reload + movl 104(%esp), %ebx # 4-byte Reload + adcl 100(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 72(%esp), %eax # 4-byte Folded Reload + adcl %edx, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 92(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + adcl 96(%esp), %edi # 4-byte Folded Reload + movl %eax, %edx + subl 128(%esp), %edx # 4-byte Folded Reload + sbbl 112(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebx + sbbl 116(%esp), %esi # 4-byte Folded Reload + movl %esi, 116(%esp) # 4-byte Spill + movl %ebp, %ecx + sbbl 120(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 120(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 124(%esp), %esi # 4-byte Folded Reload + movl %esi, 128(%esp) # 4-byte Spill + movl %edi, %ecx + movl %edi, %esi + movl %ecx, %edi + sbbl 108(%esp), %edi # 4-byte Folded Reload + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + js .LBB84_2 +# BB#1: + movl %edx, %eax +.LBB84_2: + movl 152(%esp), %ecx + movl %eax, (%ecx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB84_4 +# BB#3: + movl %ebx, %eax +.LBB84_4: + movl %eax, 4(%ecx) + movl %ecx, %ebx + movl %esi, %eax + movl 104(%esp), %ecx # 4-byte Reload + movl 100(%esp), %edx # 4-byte Reload + js .LBB84_6 +# BB#5: + movl 116(%esp), %edx # 4-byte Reload +.LBB84_6: + movl %edx, 8(%ebx) + movl %ebx, %edx + js .LBB84_8 +# BB#7: + movl 120(%esp), %ebp # 4-byte Reload +.LBB84_8: + movl %ebp, 12(%edx) + js .LBB84_10 +# BB#9: + movl 128(%esp), %ecx # 4-byte Reload +.LBB84_10: + movl %ecx, 16(%edx) + js .LBB84_12 +# BB#11: + movl %edi, %eax +.LBB84_12: + movl %eax, 20(%edx) + addl $132, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end84: + .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L + + .globl mcl_fp_montRed6L + .align 16, 0x90 + .type mcl_fp_montRed6L,@function +mcl_fp_montRed6L: # @mcl_fp_montRed6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 132(%esp), %eax + movl -4(%eax), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl (%eax), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 128(%esp), %ebp + movl (%ebp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + imull %edx, %ecx + movl 20(%eax), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 16(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 12(%eax), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 8(%eax), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 4(%eax), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull %esi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, %esi + movl %eax, %edi + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl %edi, %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + movl 44(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 48(%esp), %edi # 4-byte Reload + adcl 64(%esp), %edi # 4-byte Folded Reload + movl 52(%esp), %esi # 4-byte Reload + adcl 68(%esp), %esi # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 72(%esp), %ebx # 4-byte Folded Reload + movl 36(%esp), %ebx # 4-byte Reload + adcl 4(%ebp), %ebx + adcl 8(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + adcl 12(%ebp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 16(%ebp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 20(%ebp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 24(%ebp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 44(%ebp), %eax + movl 40(%ebp), %edx + movl 36(%ebp), %esi + movl 32(%ebp), %edi + movl 28(%ebp), %ecx + adcl $0, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 20(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %esi + imull 96(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebp + movl %esi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %esi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, %esi + addl %edi, %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl %ebp, %ecx + movl %ecx, %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl (%esp), %eax # 4-byte Folded Reload + movl 32(%esp), %edi # 4-byte Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl 8(%esp), %edx # 4-byte Folded Reload + movl 40(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl %ebx, %esi + movl 24(%esp), %esi # 4-byte Reload + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %esi, %ebx + imull 96(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, %ecx + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, %ebp + addl %ecx, %ebp + adcl 4(%esp), %edi # 4-byte Folded Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + movl 48(%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 24(%esp), %eax # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, 60(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ebp, %ecx + imull 96(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ecx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, %edi + addl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl %ebx, %esi + movl %esi, %ebx + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl 36(%esp), %eax # 4-byte Reload + adcl 12(%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 44(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl %ebp, %edi + movl 28(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 32(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %edi, %esi + imull 96(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 80(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %ecx + movl %esi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl %ecx, %edx + movl %edx, %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 8(%esp), %eax # 4-byte Folded Reload + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, 72(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl 96(%esp), %ebx # 4-byte Reload + imull %ebp, %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl %ebp, %esi + movl %ebx, %eax + mull 76(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 88(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 84(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 28(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + mull 80(%esp) # 4-byte Folded Reload + addl %ebx, %eax + adcl 24(%esp), %edx # 4-byte Folded Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + addl %esi, 28(%esp) # 4-byte Folded Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %eax, %esi + subl 92(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 80(%esp) # 4-byte Spill + sbbl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 84(%esp) # 4-byte Spill + sbbl 88(%esp), %edi # 4-byte Folded Reload + movl %edi, 88(%esp) # 4-byte Spill + sbbl 100(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 92(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + movl %edi, %esi + sbbl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + jne .LBB85_2 +# BB#1: + movl 80(%esp), %edx # 4-byte Reload +.LBB85_2: + testb %bl, %bl + jne .LBB85_4 +# BB#3: + movl 72(%esp), %eax # 4-byte Reload +.LBB85_4: + movl 124(%esp), %ebx + movl %eax, (%ebx) + movl %edx, 4(%ebx) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB85_6 +# BB#5: + movl 84(%esp), %ecx # 4-byte Reload +.LBB85_6: + movl %ecx, 8(%ebx) + movl %edi, %ecx + movl 60(%esp), %edi # 4-byte Reload + movl 96(%esp), %esi # 4-byte Reload + jne .LBB85_8 +# BB#7: + movl 88(%esp), %esi # 4-byte Reload +.LBB85_8: + movl %esi, 12(%ebx) + jne .LBB85_10 +# BB#9: + movl 92(%esp), %edi # 4-byte Reload +.LBB85_10: + movl %edi, 16(%ebx) + jne .LBB85_12 +# BB#11: + movl 100(%esp), %ecx # 4-byte Reload +.LBB85_12: + movl %ecx, 20(%ebx) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end85: + .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L + + .globl mcl_fp_addPre6L + .align 16, 0x90 + .type mcl_fp_addPre6L,@function +mcl_fp_addPre6L: # @mcl_fp_addPre6L +# BB#0: + pushl %esi + movl 16(%esp), %eax + movl (%eax), %ecx + movl 12(%esp), %edx + addl (%edx), %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 4(%eax), %ecx + adcl 4(%edx), %ecx + movl %ecx, 4(%esi) + movl 8(%eax), %ecx + adcl 8(%edx), %ecx + movl %ecx, 8(%esi) + movl 12(%edx), %ecx + adcl 12(%eax), %ecx + movl %ecx, 12(%esi) + movl 16(%edx), %ecx + adcl 16(%eax), %ecx + movl %ecx, 16(%esi) + movl 20(%eax), %eax + movl 20(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 20(%esi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + retl +.Lfunc_end86: + .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L + + .globl mcl_fp_subPre6L + .align 16, 0x90 + .type mcl_fp_subPre6L,@function +mcl_fp_subPre6L: # @mcl_fp_subPre6L +# BB#0: + pushl %edi + pushl %esi + movl 16(%esp), %ecx + movl (%ecx), %edx + xorl %eax, %eax + movl 20(%esp), %esi + subl (%esi), %edx + movl 12(%esp), %edi + movl %edx, (%edi) + movl 4(%ecx), %edx + sbbl 4(%esi), %edx + movl %edx, 4(%edi) + movl 8(%ecx), %edx + sbbl 8(%esi), %edx + movl %edx, 8(%edi) + movl 12(%ecx), %edx + sbbl 12(%esi), %edx + movl %edx, 12(%edi) + movl 16(%ecx), %edx + sbbl 16(%esi), %edx + movl %edx, 16(%edi) + movl 20(%esi), %edx + movl 20(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 20(%edi) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + retl +.Lfunc_end87: + .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L + + .globl mcl_fp_shr1_6L + .align 16, 0x90 + .type mcl_fp_shr1_6L,@function +mcl_fp_shr1_6L: # @mcl_fp_shr1_6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl 20(%eax), %ecx + movl 16(%eax), %edx + movl 12(%eax), %esi + movl 8(%eax), %edi + movl (%eax), %ebx + movl 4(%eax), %eax + shrdl $1, %eax, %ebx + movl 20(%esp), %ebp + movl %ebx, (%ebp) + shrdl $1, %edi, %eax + movl %eax, 4(%ebp) + shrdl $1, %esi, %edi + movl %edi, 8(%ebp) + shrdl $1, %edx, %esi + movl %esi, 12(%ebp) + shrdl $1, %ecx, %edx + movl %edx, 16(%ebp) + shrl %ecx + movl %ecx, 20(%ebp) + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end88: + .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L + + .globl mcl_fp_add6L + .align 16, 0x90 + .type mcl_fp_add6L,@function +mcl_fp_add6L: # @mcl_fp_add6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $12, %esp + movl 40(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ebp + movl 36(%esp), %ebx + addl (%ebx), %edx + adcl 4(%ebx), %ebp + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %esi + movl 12(%ebx), %ecx + movl 16(%ebx), %edi + adcl 12(%eax), %ecx + adcl 16(%eax), %edi + movl 20(%ebx), %ebx + adcl 20(%eax), %ebx + movl 32(%esp), %eax + movl %edx, (%eax) + movl %ebp, 4(%eax) + movl %esi, 8(%eax) + movl %ecx, 12(%eax) + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + sbbl %eax, %eax + andl $1, %eax + movl 44(%esp), %esi + subl (%esi), %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + movl 44(%esp), %esi + sbbl 4(%esi), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + sbbl 8(%esi), %edx + sbbl 12(%esi), %ebp + sbbl 16(%esi), %edi + sbbl 20(%esi), %ebx + sbbl $0, %eax + testb $1, %al + jne .LBB89_2 +# BB#1: # %nocarry + movl (%esp), %eax # 4-byte Reload + movl 32(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 4(%ecx) + movl %edx, 8(%ecx) + movl %ebp, 12(%ecx) + movl %edi, 16(%ecx) + movl %ebx, 20(%ecx) +.LBB89_2: # %carry + addl $12, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end89: + .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L + + .globl mcl_fp_addNF6L + .align 16, 0x90 + .type mcl_fp_addNF6L,@function +mcl_fp_addNF6L: # @mcl_fp_addNF6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 64(%esp), %ebp + addl (%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl %edx, %ebx + adcl 4(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl 16(%eax), %esi + movl 12(%eax), %edi + movl 8(%eax), %eax + adcl 8(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 12(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 72(%esp), %ebx + subl (%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl %ecx, %ebp + movl 72(%esp), %ecx + sbbl 4(%ecx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl %esi, %edi + sbbl 16(%ecx), %edi + movl %edx, %esi + sbbl 20(%ecx), %esi + movl %esi, %ebx + sarl $31, %ebx + testl %ebx, %ebx + js .LBB90_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB90_2: + movl 60(%esp), %ebx + movl %eax, (%ebx) + movl 20(%esp), %ecx # 4-byte Reload + js .LBB90_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB90_4: + movl %ecx, 4(%ebx) + movl 36(%esp), %eax # 4-byte Reload + movl 28(%esp), %edx # 4-byte Reload + movl 24(%esp), %ecx # 4-byte Reload + js .LBB90_6 +# BB#5: + movl 8(%esp), %ecx # 4-byte Reload +.LBB90_6: + movl %ecx, 8(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + js .LBB90_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB90_8: + movl %edx, 12(%ebx) + js .LBB90_10 +# BB#9: + movl %edi, %ecx +.LBB90_10: + movl %ecx, 16(%ebx) + js .LBB90_12 +# BB#11: + movl %esi, %eax +.LBB90_12: + movl %eax, 20(%ebx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end90: + .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L + + .globl mcl_fp_sub6L + .align 16, 0x90 + .type mcl_fp_sub6L,@function +mcl_fp_sub6L: # @mcl_fp_sub6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $16, %esp + movl 40(%esp), %ebx + movl (%ebx), %esi + movl 4(%ebx), %edi + movl 44(%esp), %ecx + subl (%ecx), %esi + sbbl 4(%ecx), %edi + movl %edi, (%esp) # 4-byte Spill + movl 8(%ebx), %eax + sbbl 8(%ecx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %eax + sbbl 12(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 16(%ebx), %ebp + sbbl 16(%ecx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 20(%ebx), %edx + sbbl 20(%ecx), %edx + movl $0, %ecx + sbbl $0, %ecx + testb $1, %cl + movl 36(%esp), %ebx + movl %esi, (%ebx) + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl %ebp, 16(%ebx) + movl %edx, 20(%ebx) + je .LBB91_2 +# BB#1: # %carry + movl 48(%esp), %ecx + addl (%ecx), %esi + movl %esi, (%ebx) + movl (%esp), %eax # 4-byte Reload + adcl 4(%ecx), %eax + adcl 8(%ecx), %edi + movl %eax, 4(%ebx) + movl 12(%ecx), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl %eax, 12(%ebx) + movl 16(%ecx), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ebx) + movl 20(%ecx), %eax + adcl %edx, %eax + movl %eax, 20(%ebx) +.LBB91_2: # %nocarry + addl $16, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end91: + .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L + + .globl mcl_fp_subNF6L + .align 16, 0x90 + .type mcl_fp_subNF6L,@function +mcl_fp_subNF6L: # @mcl_fp_subNF6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %ebx + movl 20(%ebx), %esi + movl (%ebx), %ecx + movl 4(%ebx), %eax + movl 52(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl 12(%ebx), %ecx + movl 8(%ebx), %edx + sbbl 8(%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %edx + sbbl 20(%ebp), %edx + movl %edx, (%esp) # 4-byte Spill + movl %edx, %ebp + sarl $31, %ebp + movl %ebp, %ecx + addl %ecx, %ecx + movl %ebp, %eax + adcl %eax, %eax + shrl $31, %edx + orl %ecx, %edx + movl 56(%esp), %ebx + andl 4(%ebx), %eax + andl (%ebx), %edx + movl 20(%ebx), %edi + andl %ebp, %edi + movl 16(%ebx), %esi + andl %ebp, %esi + movl 12(%ebx), %ecx + andl %ebp, %ecx + andl 8(%ebx), %ebp + addl 8(%esp), %edx # 4-byte Folded Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 44(%esp), %ebx + movl %edx, (%ebx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %eax, 4(%ebx) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%ebx) + movl %esi, 16(%ebx) + adcl (%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%ebx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end92: + .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L + + .globl mcl_fpDbl_add6L + .align 16, 0x90 + .type mcl_fpDbl_add6L,@function +mcl_fpDbl_add6L: # @mcl_fpDbl_add6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %edx + movl 60(%esp), %ecx + movl 12(%ecx), %esi + movl 16(%ecx), %eax + movl 8(%edx), %edi + movl (%edx), %ebx + addl (%ecx), %ebx + movl 56(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%edx), %ebx + adcl 4(%ecx), %ebx + adcl 8(%ecx), %edi + adcl 12(%edx), %esi + adcl 16(%edx), %eax + movl %ebx, 4(%ebp) + movl %edx, %ebx + movl 32(%ebx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edi, 8(%ebp) + movl 20(%ebx), %edi + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + adcl %edi, %esi + movl 24(%ebx), %edi + movl %eax, 16(%ebp) + movl 24(%ecx), %edx + adcl %edi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 28(%ebx), %edi + movl %esi, 20(%ebp) + movl 28(%ecx), %eax + adcl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 24(%esp) # 4-byte Spill + movl 36(%ebx), %esi + movl %ebx, %edi + movl 36(%ecx), %ebx + adcl %esi, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%edi), %esi + movl 40(%ecx), %edi + adcl %esi, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 64(%esp), %esi + movl 44(%esi), %esi + movl 44(%ecx), %ecx + adcl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %esi + subl (%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 68(%esp), %edx + sbbl 4(%edx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + sbbl 12(%edx), %ebp + movl %edi, %ebx + movl 12(%esp), %edi # 4-byte Reload + sbbl 16(%edx), %ebx + movl %edi, %eax + sbbl 20(%edx), %eax + sbbl $0, %ecx + andl $1, %ecx + jne .LBB93_2 +# BB#1: + movl %eax, %edi +.LBB93_2: + testb %cl, %cl + movl 20(%esp), %ecx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload + jne .LBB93_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %ecx # 4-byte Reload +.LBB93_4: + movl 56(%esp), %eax + movl %ecx, 24(%eax) + movl %edx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl 24(%esp), %edx # 4-byte Reload + jne .LBB93_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB93_6: + movl %edx, 32(%eax) + movl 28(%esp), %edx # 4-byte Reload + jne .LBB93_8 +# BB#7: + movl %ebp, %edx +.LBB93_8: + movl %edx, 36(%eax) + jne .LBB93_10 +# BB#9: + movl %ebx, %ecx +.LBB93_10: + movl %ecx, 40(%eax) + movl %edi, 44(%eax) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end93: + .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L + + .globl mcl_fpDbl_sub6L + .align 16, 0x90 + .type mcl_fpDbl_sub6L,@function +mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 52(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 44(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 28(%esi), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %edi + movl 32(%edx), %eax + sbbl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 36(%esi), %edi + movl 36(%edx), %eax + sbbl %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%esi), %edi + movl 40(%edx), %eax + sbbl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 44(%esi), %esi + movl 44(%edx), %eax + sbbl %esi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl $0, %ebx + sbbl $0, %ebx + andl $1, %ebx + movl 56(%esp), %eax + jne .LBB94_1 +# BB#2: + xorl %edx, %edx + jmp .LBB94_3 +.LBB94_1: + movl 20(%eax), %edx +.LBB94_3: + testb %bl, %bl + jne .LBB94_4 +# BB#5: + movl $0, %esi + movl $0, %edi + jmp .LBB94_6 +.LBB94_4: + movl (%eax), %edi + movl 4(%eax), %esi +.LBB94_6: + jne .LBB94_7 +# BB#8: + movl $0, %ebx + jmp .LBB94_9 +.LBB94_7: + movl 16(%eax), %ebx +.LBB94_9: + jne .LBB94_10 +# BB#11: + movl $0, %ebp + jmp .LBB94_12 +.LBB94_10: + movl 12(%eax), %ebp +.LBB94_12: + jne .LBB94_13 +# BB#14: + xorl %eax, %eax + jmp .LBB94_15 +.LBB94_13: + movl 8(%eax), %eax +.LBB94_15: + addl 8(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + movl %edi, 24(%ecx) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %esi, 28(%ecx) + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + movl %ebx, 40(%ecx) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%ecx) + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end94: + .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L + + .globl mcl_fp_mulUnitPre7L + .align 16, 0x90 + .type mcl_fp_mulUnitPre7L,@function +mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %esi + movl 60(%esp), %ebx + movl %esi, %eax + mull 24(%ebx) + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull 20(%ebx) + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %esi, %eax + mull 16(%ebx) + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull 12(%ebx) + movl %edx, %ebp + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 8(%ebx) + movl %edx, %ecx + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 4(%ebx) + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + mull (%ebx) + movl 56(%esp), %esi + movl %eax, (%esi) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%esi) + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esi) + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esi) + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%esi) + movl 32(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 28(%esi) + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end95: + .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L + + .globl mcl_fpDbl_mulPre7L + .align 16, 0x90 + .type mcl_fpDbl_mulPre7L,@function +mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %ebx + movl (%ebx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 128(%esp), %ecx + movl (%ecx), %edi + movl %ecx, %ebp + mull %edi + movl %edx, 96(%esp) # 4-byte Spill + movl 120(%esp), %ecx + movl %eax, (%ecx) + movl 4(%ebx), %ecx + movl 8(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%ebx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 24(%ebx), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + movl 4(%ebp), %ebp + movl %ecx, %eax + mull %ebp + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + mull %ebp + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 60(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebp + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, %eax + mull %ebp + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%esp), %esi # 4-byte Reload + movl %esi, %eax + mull %ebp + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, %eax + mull %ebp + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + mull %edi + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edi + movl %edx, %ebx + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull %edi + movl %edx, %ebp + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, %ecx + movl 24(%esp), %esi # 4-byte Reload + addl 96(%esp), %esi # 4-byte Folded Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebx, %edi + adcl 12(%esp), %edi # 4-byte Folded Reload + movl 76(%esp), %edx # 4-byte Reload + adcl 20(%esp), %edx # 4-byte Folded Reload + movl 80(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + addl 52(%esp), %esi # 4-byte Folded Reload + movl 120(%esp), %eax + movl %esi, 4(%eax) + movl 96(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ebp, %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + adcl 60(%esp), %ebx # 4-byte Folded Reload + sbbl %edx, %edx + andl $1, %edx + movl 96(%esp), %ebp # 4-byte Reload + addl 84(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 96(%esp) # 4-byte Spill + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 80(%esp) # 4-byte Spill + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl 124(%esp), %esi + movl 24(%esi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl 8(%eax), %edi + movl %ecx, %eax + mull %edi + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, 68(%esp) # 4-byte Spill + movl 20(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + mull %edi + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl 16(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill + mull %edi + movl %eax, 88(%esp) # 4-byte Spill + movl %edx, 60(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + mull %edi + movl %eax, %ebp + movl %edx, 56(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + mull %edi + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 52(%esp) # 4-byte Spill + movl (%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 4(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + mull %edi + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull %edi + movl %edx, 28(%esp) # 4-byte Spill + addl 96(%esp), %eax # 4-byte Folded Reload + movl 120(%esp), %edx + movl %eax, 8(%edx) + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 92(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 84(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl 128(%esp), %eax + movl 12(%eax), %ecx + movl 16(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl (%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, (%esp) # 4-byte Spill + movl %edx, 36(%esp) # 4-byte Spill + andl $1, %edi + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 52(%esp), %esi # 4-byte Folded Reload + movl 88(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl 92(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl 84(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + addl 4(%esp), %ebx # 4-byte Folded Reload + movl 120(%esp), %ebp + movl %ebx, 12(%ebp) + movl 72(%esp), %ebp # 4-byte Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + movl %esi, %ebx + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + adcl 24(%esp), %ecx # 4-byte Folded Reload + adcl 44(%esp), %edi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 20(%esp), %ebp # 4-byte Folded Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 88(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 84(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%esp) # 4-byte Spill + movl 124(%esp), %ebx + movl 24(%ebx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl 16(%eax), %ecx + movl %edx, %eax + mull %ecx + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 60(%esp) # 4-byte Spill + movl 20(%ebx), %eax + movl %eax, 56(%esp) # 4-byte Spill + mull %ecx + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, 32(%esp) # 4-byte Spill + movl 16(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + mull %ecx + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 16(%esp) # 4-byte Spill + movl 12(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + mull %ecx + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + mull %ecx + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, 8(%esp) # 4-byte Spill + movl (%ebx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 4(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + mull %ecx + movl %edx, 4(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull %ecx + movl %edx, (%esp) # 4-byte Spill + addl %ebp, %eax + movl 120(%esp), %ecx + movl %eax, 16(%ecx) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 88(%esp), %esi # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + sbbl %ebx, %ebx + andl $1, %ebx + addl (%esp), %edi # 4-byte Folded Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 92(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl 20(%eax), %esi + movl 96(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %ecx + movl 56(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 84(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, %ebp + movl 36(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %ebx + movl 40(%esp), %eax # 4-byte Reload + mull %esi + movl %edx, 40(%esp) # 4-byte Spill + addl %edi, %eax + movl 120(%esp), %edx + movl %eax, 20(%edx) + adcl 64(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 84(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 96(%esp) # 4-byte Folded Spill + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 92(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl 24(%eax), %ecx + sbbl %esi, %esi + movl %ecx, %eax + movl 124(%esp), %edi + mull 24(%edi) + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %eax + mull 20(%edi) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 16(%edi) + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 12(%edi) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 8(%edi) + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 4(%edi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ecx, %eax + mull (%edi) + movl %eax, (%esp) # 4-byte Spill + movl %edx, 12(%esp) # 4-byte Spill + andl $1, %esi + addl 40(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 84(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl 96(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 92(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 92(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + addl (%esp), %ebx # 4-byte Folded Reload + movl 120(%esp), %ecx + movl %ebx, 24(%ecx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, %ebx + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 28(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + sbbl %eax, %eax + andl $1, %eax + addl 12(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 28(%ecx) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %ebx, 32(%ecx) + movl 96(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %edx, 36(%ecx) + movl %edi, %edx + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %edx, 44(%ecx) + movl %esi, 48(%ecx) + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end96: + .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L + + .globl mcl_fpDbl_sqrPre7L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre7L,@function +mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %esi + movl 24(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl (%esi), %ebx + movl 4(%esi), %edi + mull %edi + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, 80(%esp) # 4-byte Spill + movl 20(%esi), %eax + movl %eax, 92(%esp) # 4-byte Spill + mull %edi + movl %eax, 60(%esp) # 4-byte Spill + movl %edx, 76(%esp) # 4-byte Spill + movl 16(%esi), %ecx + movl %ecx, %eax + mull %edi + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, 72(%esp) # 4-byte Spill + movl 12(%esi), %esi + movl %esi, %eax + mull %edi + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 8(%eax), %ebp + movl %ebp, %eax + mull %edi + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ebx + movl %edx, %ebp + movl %eax, %ecx + movl %edi, %eax + mull %edi + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull %ebx + movl %edx, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebx, %eax + mull %ebx + movl 120(%esp), %ebx + movl %eax, (%ebx) + addl %edi, %edx + adcl %esi, %ecx + adcl 16(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %esi + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 92(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 96(%esp) # 4-byte Folded Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl %edi, %edx + movl %edx, 4(%ebx) + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebx + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, %edi + movl 92(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %esi + adcl 68(%esp), %eax # 4-byte Folded Reload + sbbl %ebp, %ebp + andl $1, %ebp + addl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + adcl 72(%esp), %esi # 4-byte Folded Reload + movl %esi, 84(%esp) # 4-byte Spill + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl 80(%esp), %ebp # 4-byte Folded Reload + movl 124(%esp), %edi + movl 24(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 8(%edi), %esi + mull %esi + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 40(%esp) # 4-byte Spill + movl 20(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + mull %esi + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, 36(%esp) # 4-byte Spill + movl 16(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + mull %esi + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 16(%esp) # 4-byte Spill + movl 12(%edi), %ebx + movl %ebx, %eax + mull %esi + movl %eax, 60(%esp) # 4-byte Spill + movl %edx, 64(%esp) # 4-byte Spill + movl (%edi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 4(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + mull %esi + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull %esi + movl %edx, 8(%esp) # 4-byte Spill + movl %eax, %edi + movl %esi, %eax + mull %esi + movl %eax, %ecx + movl %edx, 4(%esp) # 4-byte Spill + addl 28(%esp), %edi # 4-byte Folded Reload + movl 120(%esp), %eax + movl %edi, 8(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 84(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %edi # 4-byte Reload + adcl 88(%esp), %edi # 4-byte Folded Reload + movl 80(%esp), %eax # 4-byte Reload + adcl %ebp, %eax + sbbl %ebp, %ebp + andl $1, %ebp + addl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + adcl %edx, 56(%esp) # 4-byte Folded Spill + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl 52(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %esi + movl 24(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %ecx + movl 20(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebx, %eax + mull %ebx + movl %eax, %ebx + movl %edx, 16(%esp) # 4-byte Spill + addl 68(%esp), %edi # 4-byte Folded Reload + movl 120(%esp), %eax + movl %edi, 12(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 96(%esp) # 4-byte Spill + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 60(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + adcl %ebp, 92(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 20(%eax), %ebx + movl %edi, %eax + mull %ebx + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebx + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, 48(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl 16(%eax), %ebp + movl %edi, %eax + mull %ebp + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + addl %eax, 56(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 96(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 60(%esp) # 4-byte Folded Spill + movl 28(%esp), %eax # 4-byte Reload + adcl %eax, 88(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 92(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %esi + movl 24(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + mull %ebp + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ebp + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, 68(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + mull %ebp + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, 20(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + mull %ebp + movl %edx, 16(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull %ebp + movl %eax, %esi + movl %edx, (%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + addl 4(%esp), %ebp # 4-byte Folded Reload + movl 120(%esp), %eax + movl %ebp, 16(%eax) + movl %ecx, %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %edi, %ebp + adcl 96(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + adcl 88(%esp), %esi # 4-byte Folded Reload + movl %esi, 88(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx # 4-byte Folded Reload + sbbl %edi, %edi + andl $1, %edi + addl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl 80(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 80(%esp) # 4-byte Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl (%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + mull %ebx + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull %ebx + movl %eax, %esi + movl %edx, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + addl 40(%esp), %eax # 4-byte Folded Reload + movl 120(%esp), %edx + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ecx, %edx + adcl 72(%esp), %edx # 4-byte Folded Reload + movl 84(%esp), %ebx # 4-byte Reload + adcl %ebp, %ebx + movl 92(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + adcl 76(%esp), %esi # 4-byte Folded Reload + movl 88(%esp), %ebp # 4-byte Reload + adcl %edi, %ebp + sbbl %edi, %edi + andl $1, %edi + addl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 92(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 124(%esp), %esi + movl 24(%esi), %ecx + movl %ecx, %eax + mull 20(%esi) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 16(%esi) + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + mull 12(%esi) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull 8(%esi) + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, %ebp + movl %ecx, %eax + mull 4(%esi) + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull (%esi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull %ecx + movl %edx, 52(%esp) # 4-byte Spill + addl 80(%esp), %esi # 4-byte Folded Reload + movl 120(%esp), %edx + movl %esi, 24(%edx) + movl %edx, %esi + adcl 56(%esp), %edi # 4-byte Folded Reload + adcl 84(%esp), %ebp # 4-byte Folded Reload + adcl 92(%esp), %ebx # 4-byte Folded Reload + movl 64(%esp), %ecx # 4-byte Reload + adcl %ecx, 96(%esp) # 4-byte Folded Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + sbbl %ecx, %ecx + andl $1, %ecx + addl 36(%esp), %edi # 4-byte Folded Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %edi, 28(%esi) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 32(%esi) + movl 96(%esp), %edi # 4-byte Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %ebx, 36(%esi) + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edi, 40(%esi) + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 44(%esi) + movl %eax, 48(%esi) + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L + + .globl mcl_fp_mont7L + .align 16, 0x90 + .type mcl_fp_mont7L,@function +mcl_fp_mont7L: # @mcl_fp_mont7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $152, %esp + movl 176(%esp), %esi + movl (%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 180(%esp), %edx + movl (%edx), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + mull %ecx + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, 76(%esp) # 4-byte Spill + movl 184(%esp), %ecx + movl -4(%ecx), %edx + movl %edx, 132(%esp) # 4-byte Spill + movl %eax, %ebx + imull %edx, %ebx + movl (%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 24(%ecx), %edx + movl %edx, 120(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 4(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %esi, %eax + movl 24(%eax), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 20(%eax), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 16(%eax), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 12(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 8(%eax), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebx, %eax + mull %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + movl 72(%esp), %ecx # 4-byte Reload + mull %ecx + movl %edx, %esi + movl %eax, 16(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebp, %eax + mull %ecx + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull %ecx + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, %ebx + movl %eax, 8(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + mull %ecx + addl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl 8(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + adcl (%esp), %ebx # 4-byte Folded Reload + movl %ebx, (%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + addl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 48(%esp), %edi # 4-byte Reload + adcl 32(%esp), %edi # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 56(%esp), %ecx # 4-byte Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 60(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 28(%esp), %ebp # 4-byte Reload + addl 80(%esp), %ebp # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + adcl %ebp, 88(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl (%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 72(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 4(%eax), %ecx + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %edi + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, %ebx + addl %edi, %ebx + adcl 84(%esp), %esi # 4-byte Folded Reload + movl %esi, 84(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 48(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 80(%esp) # 4-byte Spill + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + sbbl %eax, %eax + movl 88(%esp), %ecx # 4-byte Reload + imull 132(%esp), %ecx # 4-byte Folded Reload + andl $1, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + movl 52(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 60(%esp), %ecx # 4-byte Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 88(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + adcl 84(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + adcl 80(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 180(%esp), %eax + movl 8(%eax), %ebx + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, %edi + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 72(%esp), %esi # 4-byte Reload + addl %ebp, %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl %edi, 44(%esp) # 4-byte Folded Spill + movl 52(%esp), %edi # 4-byte Reload + adcl %edi, 48(%esp) # 4-byte Folded Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %esi, %ecx + imull 132(%esp), %ecx # 4-byte Folded Reload + andl $1, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + movl 56(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 72(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 180(%esp), %eax + movl 12(%eax), %ebx + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, %edi + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 72(%esp), %esi # 4-byte Reload + addl %ebp, %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl %edi, 48(%esp) # 4-byte Folded Spill + movl 56(%esp), %edi # 4-byte Reload + adcl %edi, 52(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %esi, %ecx + imull 132(%esp), %ecx # 4-byte Folded Reload + andl $1, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + movl 56(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 72(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 180(%esp), %eax + movl 16(%eax), %ebx + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, %edi + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 72(%esp), %esi # 4-byte Reload + addl %ebp, %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl %edi, 48(%esp) # 4-byte Folded Spill + movl 56(%esp), %edi # 4-byte Reload + adcl %edi, 52(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %esi, %ecx + imull 132(%esp), %ecx # 4-byte Folded Reload + andl $1, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + movl 56(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 72(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 88(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 180(%esp), %eax + movl 20(%eax), %ebx + movl %ebx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %ebx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, %edi + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %ebx # 4-byte Reload + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 72(%esp), %esi # 4-byte Reload + addl %ebp, %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl %edi, 48(%esp) # 4-byte Folded Spill + movl 56(%esp), %edi # 4-byte Reload + adcl %edi, 52(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %esi, %ecx + imull 132(%esp), %ecx # 4-byte Folded Reload + andl $1, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebp + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl %ebp, %edx + movl %edx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + movl 56(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 32(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 72(%esp), %ebx # 4-byte Folded Reload + movl 20(%esp), %ebx # 4-byte Reload + adcl 40(%esp), %ebx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 88(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 180(%esp), %eax + movl 24(%eax), %ebp + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebp, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 108(%esp) # 4-byte Spill + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + addl %edi, %edx + movl %edx, %edi + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%esp) # 4-byte Spill + adcl 92(%esp), %esi # 4-byte Folded Reload + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl 100(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl 88(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 116(%esp), %esi # 4-byte Reload + addl %ebx, %esi + movl %esi, 116(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl %edi, 112(%esp) # 4-byte Folded Spill + movl 56(%esp), %edi # 4-byte Reload + adcl %edi, 104(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 108(%esp) # 4-byte Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 100(%esp) # 4-byte Spill + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl 132(%esp), %ecx # 4-byte Reload + imull %esi, %ecx + movl %ecx, 132(%esp) # 4-byte Spill + andl $1, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl 132(%esp), %eax # 4-byte Reload + mull 124(%esp) # 4-byte Folded Reload + addl 56(%esp), %eax # 4-byte Folded Reload + adcl %edi, %edx + adcl 60(%esp), %ecx # 4-byte Folded Reload + adcl 68(%esp), %ebx # 4-byte Folded Reload + adcl 72(%esp), %ebp # 4-byte Folded Reload + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 132(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 64(%esp), %esi # 4-byte Reload + addl 116(%esp), %esi # 4-byte Folded Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + adcl 112(%esp), %edx # 4-byte Folded Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 104(%esp) # 4-byte Spill + adcl 108(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 116(%esp) # 4-byte Spill + adcl 100(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %esi # 4-byte Reload + adcl 88(%esp), %esi # 4-byte Folded Reload + movl %esi, 132(%esp) # 4-byte Spill + adcl 84(%esp), %edi # 4-byte Folded Reload + adcl $0, 96(%esp) # 4-byte Folded Spill + movl %eax, %esi + subl 128(%esp), %esi # 4-byte Folded Reload + movl %esi, 108(%esp) # 4-byte Spill + movl %edx, %esi + sbbl 124(%esp), %esi # 4-byte Folded Reload + movl %esi, 124(%esp) # 4-byte Spill + sbbl 136(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%esp) # 4-byte Spill + sbbl 140(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 136(%esp) # 4-byte Spill + sbbl 144(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 140(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + movl %ecx, %ebx + movl %ecx, %ebp + sbbl 148(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 144(%esp) # 4-byte Spill + movl %edi, %ebx + sbbl 120(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 148(%esp) # 4-byte Spill + movl 96(%esp), %ebx # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB98_2 +# BB#1: + movl 108(%esp), %eax # 4-byte Reload +.LBB98_2: + movl 172(%esp), %esi + movl %eax, (%esi) + testb %bl, %bl + jne .LBB98_4 +# BB#3: + movl 124(%esp), %edx # 4-byte Reload +.LBB98_4: + movl %edx, 4(%esi) + movl 104(%esp), %ecx # 4-byte Reload + jne .LBB98_6 +# BB#5: + movl 128(%esp), %ecx # 4-byte Reload +.LBB98_6: + movl %ecx, 8(%esi) + movl 112(%esp), %ecx # 4-byte Reload + movl 116(%esp), %eax # 4-byte Reload + jne .LBB98_8 +# BB#7: + movl 136(%esp), %eax # 4-byte Reload +.LBB98_8: + movl %eax, 12(%esi) + jne .LBB98_10 +# BB#9: + movl 140(%esp), %ecx # 4-byte Reload +.LBB98_10: + movl %ecx, 16(%esi) + jne .LBB98_12 +# BB#11: + movl 144(%esp), %ebp # 4-byte Reload +.LBB98_12: + movl %ebp, 20(%esi) + jne .LBB98_14 +# BB#13: + movl 148(%esp), %edi # 4-byte Reload +.LBB98_14: + movl %edi, 24(%esi) + addl $152, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end98: + .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L + + .globl mcl_fp_montNF7L + .align 16, 0x90 + .type mcl_fp_montNF7L,@function +mcl_fp_montNF7L: # @mcl_fp_montNF7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $152, %esp + movl 176(%esp), %ebp + movl (%ebp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 180(%esp), %ecx + movl (%ecx), %ecx + mull %ecx + movl %eax, 88(%esp) # 4-byte Spill + movl %edx, 84(%esp) # 4-byte Spill + movl 184(%esp), %esi + movl -4(%esi), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %eax, %edi + imull %edx, %edi + movl (%esi), %edx + movl %edx, 148(%esp) # 4-byte Spill + movl 24(%esi), %edx + movl %edx, 124(%esp) # 4-byte Spill + movl 20(%esi), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 16(%esi), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 8(%esi), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 4(%esi), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 24(%ebp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 20(%ebp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 12(%ebp), %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl 8(%ebp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl %edi, %eax + mull %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + mull %ecx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull %ecx + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull %ecx + movl %edx, %esi + movl %eax, %ebx + movl %ebp, %eax + mull %ecx + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + mull %ecx + movl %eax, %edi + addl 84(%esp), %edi # 4-byte Folded Reload + adcl (%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %esi, %edx + adcl 4(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 8(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %ecx # 4-byte Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl 80(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 16(%esp), %ebx # 4-byte Reload + addl 88(%esp), %ebx # 4-byte Folded Reload + movl %edi, %ebx + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl 84(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + adcl $0, %eax + addl 28(%esp), %ebx # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 76(%esp) # 4-byte Spill + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 4(%eax), %ecx + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 52(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + addl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 88(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl %ebx, 48(%esp) # 4-byte Folded Spill + adcl %edi, 52(%esp) # 4-byte Folded Spill + movl %ebp, %edi + adcl 40(%esp), %edi # 4-byte Folded Reload + movl 44(%esp), %ebx # 4-byte Reload + adcl %ebx, 56(%esp) # 4-byte Folded Spill + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 88(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, %ecx + imull 108(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %esi + movl %ecx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ebx + movl %ecx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + addl %ebp, %eax + adcl 52(%esp), %ebx # 4-byte Folded Reload + adcl %edi, %esi + movl %esi, %edi + movl 68(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 60(%esp), %esi # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 80(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 64(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 8(%eax), %ebp + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %ebp + addl %ebx, %ebp + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebx + movl 52(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %esi # 4-byte Reload + adcl %edi, %esi + movl 60(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 44(%esp), %edi # 4-byte Reload + addl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edi # 4-byte Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + movl %edi, %ebx + imull 108(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + addl %edi, %eax + adcl %ebp, %ecx + movl %ecx, %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl 68(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl 76(%esp), %edi # 4-byte Reload + adcl 56(%esp), %edi # 4-byte Folded Reload + movl 80(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 12(%eax), %edi + movl %edi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %edi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebp + movl %edi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %ebx + addl %ebp, %ebx + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, %edi + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 44(%esp), %esi # 4-byte Reload + addl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 72(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 80(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + movl %edi, %ebp + imull 108(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebp, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebp, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebp, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, %esi + movl %ebp, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + addl %edi, %eax + adcl %ebx, %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl 68(%esp), %ebp # 4-byte Reload + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl 76(%esp), %edi # 4-byte Reload + adcl 56(%esp), %edi # 4-byte Folded Reload + movl 80(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 76(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 16(%eax), %ebp + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %edi + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 24(%esp) # 4-byte Spill + addl %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl %edi, %esi + movl %esi, %edi + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl 56(%esp), %ebx # 4-byte Reload + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %edx # 4-byte Reload + adcl 36(%esp), %edx # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 24(%esp), %ecx # 4-byte Reload + addl 40(%esp), %ecx # 4-byte Folded Reload + movl 44(%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %esi, 44(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl 48(%esp), %esi # 4-byte Reload + adcl 72(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + adcl 80(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ecx, %esi + imull 108(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %esi, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %esi, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %ebp + movl %esi, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %esi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, %ebx + movl %esi, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + addl %ecx, %eax + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edx # 4-byte Reload + adcl %edi, %edx + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %edi # 4-byte Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl 80(%esp), %ecx # 4-byte Reload + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 48(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 72(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%esp) # 4-byte Spill + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 20(%eax), %ebp + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebp, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edi + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %ebx + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %ebp + addl %ebx, %ebp + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, %ebx + movl 60(%esp), %esi # 4-byte Reload + adcl 32(%esp), %esi # 4-byte Folded Reload + movl 36(%esp), %edx # 4-byte Reload + adcl %edi, %edx + movl 64(%esp), %ecx # 4-byte Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl 52(%esp), %edi # 4-byte Reload + addl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 52(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl 56(%esp), %edi # 4-byte Reload + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 56(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 60(%esp) # 4-byte Spill + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %edx, %esi + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + movl %edi, %ebx + imull 108(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %ebx, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebx, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ebx, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + movl %eax, %ecx + movl %ebx, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + addl %edi, %eax + adcl %ebp, %ecx + movl %ecx, %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl 76(%esp), %ebx # 4-byte Reload + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl 80(%esp), %edi # 4-byte Reload + adcl %esi, %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + addl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 72(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 80(%esp) # 4-byte Spill + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 88(%esp) # 4-byte Spill + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl 24(%eax), %edi + movl %edi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, 112(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + mull 116(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 116(%esp) # 4-byte Spill + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 52(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %ebp + movl %edi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %eax, %ebx + addl %ebp, %edx + movl %edx, 104(%esp) # 4-byte Spill + movl %ecx, %edi + adcl %esi, %edi + movl 100(%esp), %esi # 4-byte Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl 92(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx # 4-byte Folded Reload + movl 64(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + addl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl %ebx, 104(%esp) # 4-byte Folded Spill + adcl 72(%esp), %edi # 4-byte Folded Reload + movl %edi, 72(%esp) # 4-byte Spill + adcl 76(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%esp) # 4-byte Spill + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + imull %ecx, %edi + movl %edi, %eax + mull 124(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 76(%esp) # 4-byte Spill + movl %edi, %eax + mull 144(%esp) # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 108(%esp) # 4-byte Spill + movl %edi, %eax + mull 140(%esp) # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, 120(%esp) # 4-byte Spill + movl %edi, %eax + mull 136(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 116(%esp) # 4-byte Spill + movl %edi, %eax + mull 148(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %ebp + movl %edi, %eax + mull 132(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %esi + movl %edi, %eax + mull 128(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + addl %ecx, %ebp + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, %edx + adcl 72(%esp), %esi # 4-byte Folded Reload + movl 116(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl 120(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl 108(%esp), %ebx # 4-byte Reload + adcl 92(%esp), %ebx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 112(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 60(%esp), %edx # 4-byte Folded Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %esi, 104(%esp) # 4-byte Spill + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 116(%esp) # 4-byte Spill + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 120(%esp) # 4-byte Spill + adcl 80(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + adcl 84(%esp), %ebp # 4-byte Folded Reload + adcl 88(%esp), %edi # 4-byte Folded Reload + movl %edi, 112(%esp) # 4-byte Spill + movl %edx, %eax + subl 148(%esp), %eax # 4-byte Folded Reload + sbbl 128(%esp), %esi # 4-byte Folded Reload + movl %esi, 128(%esp) # 4-byte Spill + sbbl 132(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + movl %edx, %esi + sbbl 136(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 136(%esp) # 4-byte Spill + sbbl 140(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 148(%esp) # 4-byte Spill + movl %ebp, %ebx + movl %ebp, %ecx + movl %ebx, %ebp + sbbl 144(%esp), %ebp # 4-byte Folded Reload + movl %edi, %ebx + sbbl 124(%esp), %ebx # 4-byte Folded Reload + movl %ebx, %edi + sarl $31, %edi + testl %edi, %edi + js .LBB99_2 +# BB#1: + movl %eax, %esi +.LBB99_2: + movl 172(%esp), %edx + movl %esi, (%edx) + movl 104(%esp), %eax # 4-byte Reload + js .LBB99_4 +# BB#3: + movl 128(%esp), %eax # 4-byte Reload +.LBB99_4: + movl %eax, 4(%edx) + movl %ecx, %eax + movl 116(%esp), %ecx # 4-byte Reload + js .LBB99_6 +# BB#5: + movl 132(%esp), %ecx # 4-byte Reload +.LBB99_6: + movl %ecx, 8(%edx) + movl 108(%esp), %esi # 4-byte Reload + movl 120(%esp), %ecx # 4-byte Reload + js .LBB99_8 +# BB#7: + movl 136(%esp), %ecx # 4-byte Reload +.LBB99_8: + movl %ecx, 12(%edx) + js .LBB99_10 +# BB#9: + movl 148(%esp), %esi # 4-byte Reload +.LBB99_10: + movl %esi, 16(%edx) + js .LBB99_12 +# BB#11: + movl %ebp, %eax +.LBB99_12: + movl %eax, 20(%edx) + js .LBB99_14 +# BB#13: + movl %ebx, 112(%esp) # 4-byte Spill +.LBB99_14: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 24(%edx) + addl $152, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end99: + .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L + + .globl mcl_fp_montRed7L + .align 16, 0x90 + .type mcl_fp_montRed7L,@function +mcl_fp_montRed7L: # @mcl_fp_montRed7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + movl 152(%esp), %eax + movl -4(%eax), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl (%eax), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 148(%esp), %ecx + movl (%ecx), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + imull %edx, %ecx + movl 24(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 20(%eax), %edx + movl %edx, 120(%esp) # 4-byte Spill + movl 16(%eax), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 8(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 4(%eax), %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + mull %edi + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edi + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebp + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + mull %esi + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull %ebx + movl %edx, %esi + movl %eax, %ebx + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + addl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + adcl 80(%esp), %ebp # 4-byte Folded Reload + movl 56(%esp), %ebx # 4-byte Reload + adcl 84(%esp), %ebx # 4-byte Folded Reload + movl 60(%esp), %esi # 4-byte Reload + adcl %edi, %esi + movl 64(%esp), %edi # 4-byte Reload + adcl $0, %edi + addl 116(%esp), %eax # 4-byte Folded Reload + movl 148(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 4(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 8(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 12(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 16(%ecx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + adcl 20(%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + adcl 24(%ecx), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 28(%ecx), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl 48(%ecx), %edi + movl 44(%ecx), %edx + movl 40(%ecx), %ebx + movl 36(%ecx), %ebp + movl 32(%ecx), %eax + adcl $0, %eax + movl %eax, 12(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 24(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 80(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + andl $1, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + imull 88(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, (%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebp + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, %ebx + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %eax, %ecx + addl %ebx, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl %ebp, %edi + movl %esi, %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl 36(%esp), %esi # 4-byte Reload + adcl 4(%esp), %esi # 4-byte Folded Reload + movl 40(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl 44(%esp), %ebx # 4-byte Reload + adcl 16(%esp), %ebx # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 72(%esp), %ecx # 4-byte Folded Reload + movl 28(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 40(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl %ecx, %edi + imull 88(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, %ebx + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, %esi + movl %edi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %eax, %edi + addl %esi, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ebx, %ebp + movl %ebp, %eax + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl 60(%esp), %ebx # 4-byte Reload + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl 64(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 72(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %edi # 4-byte Folded Reload + movl 32(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl %edi, %esi + imull 88(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %esi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, %ecx + movl %esi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, %ebp + movl %esi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %eax, %edi + addl %ebp, %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl %ecx, %ebx + movl %ebx, %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 4(%esp), %eax # 4-byte Folded Reload + movl 40(%esp), %ebx # 4-byte Reload + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + adcl 24(%esp), %edx # 4-byte Folded Reload + movl 52(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + addl 32(%esp), %edi # 4-byte Folded Reload + movl 28(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl %edi, %ebp + imull 88(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %ebp, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %ebp, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ecx + movl %ebp, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, %ebx + movl %ebp, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl %ecx, %edi + movl %edi, %ecx + adcl 12(%esp), %esi # 4-byte Folded Reload + movl 60(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 56(%esp), %edi # 4-byte Folded Reload + movl 72(%esp), %edx # 4-byte Reload + adcl $0, %edx + addl 28(%esp), %ebp # 4-byte Folded Reload + movl 32(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 32(%esp) # 4-byte Spill + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 36(%esp) # 4-byte Spill + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 40(%esp) # 4-byte Spill + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 68(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl %ebp, %edi + imull 88(%esp), %edi # 4-byte Folded Reload + movl %edi, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 28(%esp) # 4-byte Spill + movl %edi, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 20(%esp) # 4-byte Spill + movl %edi, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ecx + movl %eax, 16(%esp) # 4-byte Spill + movl %edi, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %esi + movl %eax, %ebx + movl %edi, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %eax, %ebp + addl %ebx, %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, %eax + movl 48(%esp), %edi # 4-byte Reload + adcl 16(%esp), %edi # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebx # 4-byte Reload + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl 56(%esp), %edx # 4-byte Reload + adcl 28(%esp), %edx # 4-byte Folded Reload + movl 76(%esp), %esi # 4-byte Reload + adcl $0, %esi + addl 32(%esp), %ebp # 4-byte Folded Reload + movl 44(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 88(%esp), %ecx # 4-byte Reload + imull %ebp, %ecx + movl %ecx, %eax + mull 92(%esp) # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + mull 120(%esp) # 4-byte Folded Reload + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, 36(%esp) # 4-byte Spill + movl %ecx, %eax + mull 104(%esp) # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ecx, %eax + mull 108(%esp) # 4-byte Folded Reload + movl %edx, %ebp + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + mull 100(%esp) # 4-byte Folded Reload + movl %edx, %ebx + movl %eax, 24(%esp) # 4-byte Spill + movl %ecx, %eax + mull 112(%esp) # 4-byte Folded Reload + movl %edx, %edi + movl %eax, 28(%esp) # 4-byte Spill + movl %ecx, %eax + mull 96(%esp) # 4-byte Folded Reload + movl %edx, %esi + addl %edi, %eax + movl %eax, %edi + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 80(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 80(%esp) # 4-byte Spill + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl 88(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl 64(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl 28(%esp), %ebx # 4-byte Reload + addl 44(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %edi # 4-byte Folded Reload + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 80(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 68(%esp) # 4-byte Spill + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %edx, %eax + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 72(%esp) # 4-byte Spill + movl 116(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %edi, %edx + movl %edx, %ecx + subl 112(%esp), %ecx # 4-byte Folded Reload + sbbl 96(%esp), %esi # 4-byte Folded Reload + movl %esi, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 100(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + sbbl 108(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + sbbl 104(%esp), %esi # 4-byte Folded Reload + movl %esi, 108(%esp) # 4-byte Spill + movl %eax, %esi + movl %esi, %ebp + sbbl 120(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + movl %esi, %eax + sbbl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 120(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + movl %ebx, 116(%esp) # 4-byte Spill + jne .LBB100_2 +# BB#1: + movl %ecx, %edx +.LBB100_2: + movl 144(%esp), %edi + movl %edx, (%edi) + movl 116(%esp), %eax # 4-byte Reload + testb %al, %al + movl 64(%esp), %eax # 4-byte Reload + jne .LBB100_4 +# BB#3: + movl 84(%esp), %eax # 4-byte Reload +.LBB100_4: + movl %eax, 4(%edi) + movl 80(%esp), %eax # 4-byte Reload + jne .LBB100_6 +# BB#5: + movl 96(%esp), %eax # 4-byte Reload +.LBB100_6: + movl %eax, 8(%edi) + movl 88(%esp), %eax # 4-byte Reload + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB100_8 +# BB#7: + movl 100(%esp), %ecx # 4-byte Reload +.LBB100_8: + movl %ecx, 12(%edi) + jne .LBB100_10 +# BB#9: + movl 108(%esp), %eax # 4-byte Reload +.LBB100_10: + movl %eax, 16(%edi) + jne .LBB100_12 +# BB#11: + movl 112(%esp), %ebp # 4-byte Reload +.LBB100_12: + movl %ebp, 20(%edi) + jne .LBB100_14 +# BB#13: + movl 120(%esp), %esi # 4-byte Reload +.LBB100_14: + movl %esi, 24(%edi) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end100: + .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L + + .globl mcl_fp_addPre7L + .align 16, 0x90 + .type mcl_fp_addPre7L,@function +mcl_fp_addPre7L: # @mcl_fp_addPre7L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl 24(%eax), %eax + movl 24(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 24(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end101: + .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L + + .globl mcl_fp_subPre7L + .align 16, 0x90 + .type mcl_fp_subPre7L,@function +mcl_fp_subPre7L: # @mcl_fp_subPre7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl %esi, 16(%ebp) + movl %edx, 20(%ebp) + movl 24(%edi), %edx + movl 24(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 24(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end102: + .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L + + .globl mcl_fp_shr1_7L + .align 16, 0x90 + .type mcl_fp_shr1_7L,@function +mcl_fp_shr1_7L: # @mcl_fp_shr1_7L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 20(%esi) + shrl %eax + movl %eax, 24(%esi) + popl %esi + retl +.Lfunc_end103: + .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L + + .globl mcl_fp_add7L + .align 16, 0x90 + .type mcl_fp_add7L,@function +mcl_fp_add7L: # @mcl_fp_add7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %ebp + movl (%ebp), %eax + movl 4(%ebp), %edi + movl 44(%esp), %ecx + addl (%ecx), %eax + adcl 4(%ecx), %edi + movl 8(%ebp), %esi + adcl 8(%ecx), %esi + movl 12(%ecx), %edx + movl 16(%ecx), %ebx + adcl 12(%ebp), %edx + movl %edx, 16(%esp) # 4-byte Spill + adcl 16(%ebp), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 20(%ecx), %ebp + adcl 20(%ebx), %ebp + movl 24(%ecx), %edx + adcl 24(%ebx), %edx + movl 40(%esp), %ecx + movl %eax, (%ecx) + movl %edi, 4(%ecx) + movl %esi, 8(%ecx) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%ecx) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%ecx) + movl %ebp, 20(%ecx) + movl %edx, 24(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %ecx + subl (%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 12(%esp), %ecx # 4-byte Reload + movl 52(%esp), %eax + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %eax, %edi + sbbl 8(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, %esi + sbbl 20(%edi), %ebp + sbbl 24(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB104_2 +# BB#1: # %nocarry + movl 8(%esp), %ecx # 4-byte Reload + movl 40(%esp), %eax + movl %eax, %ebx + movl %ecx, (%ebx) + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl %esi, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edx, 24(%ebx) +.LBB104_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end104: + .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L + + .globl mcl_fp_addNF7L + .align 16, 0x90 + .type mcl_fp_addNF7L,@function +mcl_fp_addNF7L: # @mcl_fp_addNF7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 80(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 76(%esp), %esi + addl (%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %ebp + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 12(%esi), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + subl (%eax), %esi + movl %esi, (%esp) # 4-byte Spill + sbbl 4(%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 8(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%eax), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + sbbl 16(%eax), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 20(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 24(%eax), %edi + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + js .LBB105_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB105_2: + movl 72(%esp), %ecx + movl %esi, (%ecx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB105_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB105_4: + movl %eax, 4(%ecx) + movl 48(%esp), %ebp # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %edx # 4-byte Reload + movl 32(%esp), %esi # 4-byte Reload + movl 24(%esp), %ebx # 4-byte Reload + js .LBB105_6 +# BB#5: + movl 8(%esp), %ebx # 4-byte Reload +.LBB105_6: + movl 72(%esp), %eax + movl %ebx, 8(%eax) + movl %eax, %ebx + js .LBB105_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB105_8: + movl %esi, 12(%ebx) + js .LBB105_10 +# BB#9: + movl 20(%esp), %edx # 4-byte Reload +.LBB105_10: + movl %edx, 16(%ebx) + js .LBB105_12 +# BB#11: + movl 12(%esp), %ecx # 4-byte Reload +.LBB105_12: + movl %ecx, 20(%ebx) + js .LBB105_14 +# BB#13: + movl %edi, %ebp +.LBB105_14: + movl %ebp, 24(%ebx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end105: + .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L + + .globl mcl_fp_sub7L + .align 16, 0x90 + .type mcl_fp_sub7L,@function +mcl_fp_sub7L: # @mcl_fp_sub7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 48(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + xorl %ebx, %ebx + movl 52(%esp), %esi + subl (%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 4(%esi), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edi), %edx + sbbl 8(%esi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 12(%edi), %ecx + sbbl 12(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 16(%edi), %eax + sbbl 16(%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edi), %ebp + sbbl 20(%esi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 24(%edi), %edi + sbbl 24(%esi), %edi + sbbl $0, %ebx + testb $1, %bl + movl 44(%esp), %ebx + movl 16(%esp), %esi # 4-byte Reload + movl %esi, (%ebx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 4(%ebx) + movl %edx, 8(%ebx) + movl %ecx, 12(%ebx) + movl %eax, 16(%ebx) + movl %ebp, 20(%ebx) + movl %edi, 24(%ebx) + je .LBB106_2 +# BB#1: # %carry + movl 56(%esp), %ebp + movl 16(%esp), %ecx # 4-byte Reload + addl (%ebp), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%ebp), %edx + movl %edx, 4(%ebx) + movl 4(%esp), %ecx # 4-byte Reload + adcl 8(%ebp), %ecx + movl 12(%ebp), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%ebp), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl %ecx, 16(%ebx) + movl 20(%ebp), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 24(%ebp), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) +.LBB106_2: # %nocarry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end106: + .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L + + .globl mcl_fp_subNF7L + .align 16, 0x90 + .type mcl_fp_subNF7L,@function +mcl_fp_subNF7L: # @mcl_fp_subNF7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 60(%esp), %ecx + subl (%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl 20(%eax), %esi + movl 16(%eax), %edi + movl 12(%eax), %ebx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl %edx, %ecx + sarl $31, %ecx + movl %ecx, %eax + shldl $1, %edx, %eax + movl 64(%esp), %edx + andl (%edx), %eax + movl 24(%edx), %esi + andl %ecx, %esi + movl %esi, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ecx, %ebx + movl 16(%edx), %edi + andl %ecx, %edi + movl 12(%edx), %esi + andl %ecx, %esi + movl 64(%esp), %edx + movl 8(%edx), %edx + andl %ecx, %edx + movl 64(%esp), %ebp + andl 4(%ebp), %ecx + addl 20(%esp), %eax # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 52(%esp), %ebp + movl %eax, (%ebp) + adcl 4(%esp), %edx # 4-byte Folded Reload + movl %ebp, %eax + movl %ecx, 4(%eax) + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %edx, 8(%eax) + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %esi, 12(%eax) + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %edi, 16(%eax) + movl %ebx, 20(%eax) + movl (%esp), %ecx # 4-byte Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%eax) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end107: + .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L + + .globl mcl_fpDbl_add7L + .align 16, 0x90 + .type mcl_fpDbl_add7L,@function +mcl_fpDbl_add7L: # @mcl_fpDbl_add7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %esi + movl 68(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %ecx + movl 8(%esi), %eax + movl (%esi), %ebx + addl (%edx), %ebx + movl 64(%esp), %ebp + movl %ebx, (%ebp) + movl 4(%esi), %ebx + adcl 4(%edx), %ebx + adcl 8(%edx), %eax + adcl 12(%esi), %edi + adcl 16(%esi), %ecx + movl %ebx, 4(%ebp) + movl %esi, %ebx + movl 36(%ebx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %eax, 8(%ebp) + movl 20(%ebx), %eax + movl %edi, 12(%ebp) + movl 20(%edx), %edi + adcl %eax, %edi + movl 24(%ebx), %eax + movl %ecx, 16(%ebp) + movl 24(%edx), %ecx + adcl %eax, %ecx + movl 28(%ebx), %eax + movl %edi, 20(%ebp) + movl 28(%edx), %edi + adcl %eax, %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 32(%ebx), %eax + movl %ecx, 24(%ebp) + movl 32(%edx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%edx), %esi + adcl 36(%esp), %esi # 4-byte Folded Reload + movl 40(%ebx), %ecx + movl 40(%edx), %eax + adcl %ecx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%ebx), %ebp + movl 44(%edx), %ecx + adcl %ebp, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 48(%ebx), %ebp + movl %ebx, %eax + movl 48(%edx), %ebx + adcl %ebp, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 52(%eax), %eax + movl 52(%edx), %ebp + adcl %eax, %ebp + movl %ebp, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 76(%esp), %eax + subl (%eax), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 4(%eax), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %esi, %eax + movl 76(%esp), %edi + sbbl 8(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 20(%edi), %ebx + sbbl 24(%edi), %ebp + sbbl $0, %edx + andl $1, %edx + jne .LBB108_2 +# BB#1: + movl %ebp, 32(%esp) # 4-byte Spill +.LBB108_2: + testb %dl, %dl + movl 20(%esp), %ecx # 4-byte Reload + jne .LBB108_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 8(%esp), %ecx # 4-byte Reload +.LBB108_4: + movl 64(%esp), %eax + movl %ecx, 28(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl %esi, 36(%eax) + movl 24(%esp), %edx # 4-byte Reload + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB108_6 +# BB#5: + movl 12(%esp), %ecx # 4-byte Reload +.LBB108_6: + movl %ecx, 40(%eax) + movl 28(%esp), %ecx # 4-byte Reload + jne .LBB108_8 +# BB#7: + movl 16(%esp), %edx # 4-byte Reload +.LBB108_8: + movl %edx, 44(%eax) + jne .LBB108_10 +# BB#9: + movl %ebx, %ecx +.LBB108_10: + movl %ecx, 48(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end108: + .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L + + .globl mcl_fpDbl_sub7L + .align 16, 0x90 + .type mcl_fpDbl_sub7L,@function +mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 60(%esp), %edi + subl (%edi), %eax + sbbl 4(%edi), %edx + movl 8(%esi), %ebx + sbbl 8(%edi), %ebx + movl 52(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %ebx, 8(%ecx) + movl 20(%edi), %ebx + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %ebx, %eax + movl 24(%edi), %ebx + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %ebx, %edx + movl 28(%edi), %ebx + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %ebx, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edi), %eax + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %eax, %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 36(%edi), %eax + movl 36(%esi), %edx + sbbl %eax, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 40(%edi), %eax + movl 40(%esi), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%edi), %eax + movl 44(%esi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%edi), %eax + movl 48(%esi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%edi), %eax + movl 52(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 64(%esp), %esi + jne .LBB109_1 +# BB#2: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB109_3 +.LBB109_1: + movl 24(%esi), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB109_3: + testb %al, %al + jne .LBB109_4 +# BB#5: + movl $0, %edi + movl $0, %eax + jmp .LBB109_6 +.LBB109_4: + movl (%esi), %eax + movl 4(%esi), %edi +.LBB109_6: + jne .LBB109_7 +# BB#8: + movl $0, %ebx + jmp .LBB109_9 +.LBB109_7: + movl 20(%esi), %ebx +.LBB109_9: + jne .LBB109_10 +# BB#11: + movl $0, %ebp + jmp .LBB109_12 +.LBB109_10: + movl 16(%esi), %ebp +.LBB109_12: + jne .LBB109_13 +# BB#14: + movl $0, %edx + jmp .LBB109_15 +.LBB109_13: + movl 12(%esi), %edx +.LBB109_15: + jne .LBB109_16 +# BB#17: + xorl %esi, %esi + jmp .LBB109_18 +.LBB109_16: + movl 8(%esi), %esi +.LBB109_18: + addl 12(%esp), %eax # 4-byte Folded Reload + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %eax, 28(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %edi, 32(%ecx) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %ebp # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 44(%ecx) + movl %ebx, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end109: + .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L + + .align 16, 0x90 + .type .LmulPv256x32,@function +.LmulPv256x32: # @mulPv256x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl %edx, %esi + movl 68(%esp), %ebx + movl %ebx, %eax + mull 28(%esi) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 24(%esi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 20(%esi) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 16(%esi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 12(%esi) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 8(%esi) + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 4(%esi) + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull (%esi) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%ecx) + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 32(%ecx) + movl %ecx, %eax + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end110: + .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 + + .globl mcl_fp_mulUnitPre8L + .align 16, 0x90 + .type mcl_fp_mulUnitPre8L,@function +mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + calll .L111$pb +.L111$pb: + popl %ebx +.Ltmp2: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx + movl 88(%esp), %eax + movl %eax, (%esp) + leal 24(%esp), %ecx + movl 84(%esp), %edx + calll .LmulPv256x32 + movl 56(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 48(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi + movl 40(%esp), %edi + movl 36(%esp), %ebx + movl 32(%esp), %ebp + movl 24(%esp), %edx + movl 28(%esp), %ecx + movl 80(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %ebp, 8(%eax) + movl %ebx, 12(%eax) + movl %edi, 16(%eax) + movl %esi, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end111: + .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L + + .globl mcl_fpDbl_mulPre8L + .align 16, 0x90 + .type mcl_fpDbl_mulPre8L,@function +mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L112$pb +.L112$pb: + popl %ebx +.Ltmp3: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4L@PLT + leal 16(%esi), %eax + movl %eax, 8(%esp) + leal 16(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 32(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4L@PLT + movl 24(%edi), %esi + movl (%edi), %ebx + movl 4(%edi), %eax + addl 16(%edi), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + movl 4(%edi), %ecx + addl 16(%edi), %eax + adcl 20(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + movl 24(%edi), %edx + adcl 8(%edi), %edx + movl 28(%edi), %ecx + adcl 12(%edi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + jb .LBB112_2 +# BB#1: + xorl %esi, %esi + xorl %ebx, %ebx +.LBB112_2: + movl %ebx, -112(%ebp) # 4-byte Spill + movl %esi, -104(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 28(%esi), %edi + movl -80(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%esi), %edi + movl %edi, -116(%ebp) # 4-byte Spill + movl %ecx, -84(%ebp) # 4-byte Spill + movl %edx, %edi + movl -124(%ebp), %ebx # 4-byte Reload + movl %ebx, -80(%ebp) # 4-byte Spill + movl %eax, -92(%ebp) # 4-byte Spill + jb .LBB112_4 +# BB#3: + movl $0, -84(%ebp) # 4-byte Folded Spill + movl $0, %edi + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -92(%ebp) # 4-byte Folded Spill +.LBB112_4: + movl %edi, -88(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -60(%ebp) + movl -100(%ebp), %edi # 4-byte Reload + movl %edi, -56(%ebp) + movl -108(%ebp), %esi # 4-byte Reload + movl %esi, -52(%ebp) + movl %eax, -76(%ebp) + movl %ebx, -72(%ebp) + movl %edx, -68(%ebp) + movl %ecx, -64(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %esi # 4-byte Reload + movl %esi, -48(%ebp) + movl -128(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB112_6 +# BB#5: + movl $0, %esi + movl $0, %edi +.LBB112_6: + sbbl %eax, %eax + leal -76(%ebp), %ecx + movl %ecx, 8(%esp) + leal -60(%ebp), %ecx + movl %ecx, 4(%esp) + leal -44(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl %edi, %eax + movl -92(%ebp), %edi # 4-byte Reload + addl -112(%ebp), %edi # 4-byte Folded Reload + adcl %eax, -80(%ebp) # 4-byte Folded Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl %eax, -88(%ebp) # 4-byte Folded Spill + adcl %esi, -84(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -92(%ebp) # 4-byte Spill + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4L@PLT + addl -28(%ebp), %edi + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + movl -84(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + adcl %esi, -92(%ebp) # 4-byte Folded Spill + movl -44(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -36(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%esi), %edx + movl 16(%esi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 20(%esi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%esi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%esi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%esi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%esi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 52(%esi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%esi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + sbbl $0, -92(%ebp) # 4-byte Folded Spill + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%esi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%esi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%esi) + adcl -132(%ebp), %edi # 4-byte Folded Reload + movl %edx, 28(%esi) + movl -80(%ebp), %eax # 4-byte Reload + adcl -136(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -88(%ebp), %ecx # 4-byte Reload + adcl -128(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -140(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -144(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 44(%esi) + movl %ecx, 48(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%esi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%esi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end112: + .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L + + .globl mcl_fpDbl_sqrPre8L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre8L,@function +mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $156, %esp + calll .L113$pb +.L113$pb: + popl %ebx +.Ltmp4: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx + movl %ebx, -96(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre4L@PLT + leal 16(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 32(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre4L@PLT + movl (%edi), %esi + movl 4(%edi), %ecx + addl 16(%edi), %esi + movl %esi, -108(%ebp) # 4-byte Spill + adcl 20(%edi), %ecx + seto %al + lahf + movl %eax, %edx + addl %esi, %esi + movl %esi, -84(%ebp) # 4-byte Spill + movl %ecx, %esi + adcl %esi, %esi + movl %esi, -80(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -88(%ebp) # 4-byte Spill + movl 24(%edi), %esi + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 8(%edi), %esi + movl 28(%edi), %edx + adcl 12(%edi), %edx + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -104(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %ebx + sbbl %edi, %edi + movl %edi, -92(%ebp) # 4-byte Spill + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB113_2 +# BB#1: + movl $0, -80(%ebp) # 4-byte Folded Spill + movl $0, -84(%ebp) # 4-byte Folded Spill +.LBB113_2: + movl %esi, %ebx + movl -88(%ebp), %edi # 4-byte Reload + movl %edi, %eax + addb $127, %al + sahf + adcl %ebx, %ebx + movl %edx, %edi + adcl %edi, %edi + movl -104(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_4 +# BB#3: + xorl %edi, %edi + xorl %ebx, %ebx +.LBB113_4: + movl %ebx, -88(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl %ecx, -56(%ebp) + movl %esi, -52(%ebp) + movl %edx, -48(%ebp) + movl %eax, -76(%ebp) + movl %ecx, -72(%ebp) + movl %esi, -68(%ebp) + movl %edx, -64(%ebp) + movl -100(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB113_5 +# BB#6: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB113_7 +.LBB113_5: + shrl $31, %edx + movl %edx, -100(%ebp) # 4-byte Spill +.LBB113_7: + leal -76(%ebp), %eax + movl %eax, 8(%esp) + leal -60(%ebp), %eax + movl %eax, 4(%esp) + leal -44(%ebp), %eax + movl %eax, (%esp) + movl -92(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -96(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre4L@PLT + movl -84(%ebp), %eax # 4-byte Reload + addl -28(%ebp), %eax + movl %eax, -84(%ebp) # 4-byte Spill + movl -80(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -80(%ebp) # 4-byte Spill + movl -88(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -88(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -92(%ebp) # 4-byte Spill + adcl -100(%ebp), %esi # 4-byte Folded Reload + movl -44(%ebp), %eax + movl 8(%ebp), %edi + subl (%edi), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ebx + sbbl 4(%edi), %ebx + movl -36(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -32(%ebp), %edx + sbbl 12(%edi), %edx + movl 16(%edi), %eax + movl %eax, -100(%ebp) # 4-byte Spill + sbbl %eax, -84(%ebp) # 4-byte Folded Spill + movl 20(%edi), %eax + movl %eax, -112(%ebp) # 4-byte Spill + sbbl %eax, -80(%ebp) # 4-byte Folded Spill + movl 24(%edi), %eax + movl %eax, -104(%ebp) # 4-byte Spill + sbbl %eax, -88(%ebp) # 4-byte Folded Spill + movl 28(%edi), %eax + movl %eax, -108(%ebp) # 4-byte Spill + sbbl %eax, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 32(%edi), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + subl %ecx, %eax + movl 36(%edi), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 40(%edi), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 44(%edi), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 48(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -84(%ebp) # 4-byte Folded Spill + movl 52(%edi), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + sbbl %ecx, -80(%ebp) # 4-byte Folded Spill + movl 56(%edi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + sbbl %ecx, -88(%ebp) # 4-byte Folded Spill + movl 60(%edi), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, -92(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -100(%ebp), %eax # 4-byte Folded Reload + adcl -112(%ebp), %ebx # 4-byte Folded Reload + movl %eax, 16(%edi) + movl -96(%ebp), %eax # 4-byte Reload + adcl -104(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + adcl -108(%ebp), %edx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl -84(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %edx, 28(%edi) + movl -80(%ebp), %ecx # 4-byte Reload + adcl -136(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl -88(%ebp), %eax # 4-byte Reload + adcl -128(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl -92(%ebp), %ecx # 4-byte Reload + adcl -140(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + adcl -144(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %esi, 48(%edi) + movl -116(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%edi) + movl -120(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%edi) + movl -124(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%edi) + addl $156, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L + + .globl mcl_fp_mont8L + .align 16, 0x90 + .type mcl_fp_mont8L,@function +mcl_fp_mont8L: # @mcl_fp_mont8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L114$pb +.L114$pb: + popl %ebx +.Ltmp5: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + sbbl %eax, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 60(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 612(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + andl $1, %ebp + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + addl 504(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 524(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 464(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 480(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 28(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 436(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 408(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + adcl 348(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 372(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %ebp, %eax + andl $1, %eax + addl 304(%esp), %edi + movl 36(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 312(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 316(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 272(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %eax + andl $1, %eax + addl 224(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 228(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + adcl 236(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 204(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + movl %edi, %ecx + andl $1, %ecx + addl 144(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 172(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + adcl 108(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 116(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 128(%esp), %edi + adcl 132(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + andl $1, %esi + addl 64(%esp), %ebp + movl 32(%esp), %ebx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + movl 44(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl $0, %esi + movl %eax, %edx + movl 732(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %ebx, %ecx + sbbl 8(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + sbbl 20(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl $0, %esi + andl $1, %esi + movl %esi, %ecx + jne .LBB114_2 +# BB#1: + movl %edx, %ebp +.LBB114_2: + movl 720(%esp), %edx + movl %ebp, (%edx) + testb %cl, %cl + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB114_4 +# BB#3: + movl %eax, %ebp +.LBB114_4: + movl %ebp, 4(%edx) + jne .LBB114_6 +# BB#5: + movl 12(%esp), %ebx # 4-byte Reload +.LBB114_6: + movl %ebx, 8(%edx) + movl 28(%esp), %eax # 4-byte Reload + jne .LBB114_8 +# BB#7: + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%esp) # 4-byte Spill +.LBB114_8: + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edx) + movl 40(%esp), %edi # 4-byte Reload + jne .LBB114_10 +# BB#9: + movl 20(%esp), %edi # 4-byte Reload +.LBB114_10: + movl %edi, 16(%edx) + jne .LBB114_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB114_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB114_14 +# BB#13: + movl 32(%esp), %eax # 4-byte Reload +.LBB114_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB114_16 +# BB#15: + movl 52(%esp), %eax # 4-byte Reload +.LBB114_16: + movl %eax, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end114: + .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L + + .globl mcl_fp_montNF8L + .align 16, 0x90 + .type mcl_fp_montNF8L,@function +mcl_fp_montNF8L: # @mcl_fp_montNF8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $700, %esp # imm = 0x2BC + calll .L115$pb +.L115$pb: + popl %ebx +.Ltmp6: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx + movl 732(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 664(%esp), %ebp + movl 668(%esp), %edi + movl %ebp, %eax + imull %esi, %eax + movl 696(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 676(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 672(%esp), %esi + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 624(%esp), %ebp + adcl 628(%esp), %edi + adcl 632(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 640(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 616(%esp), %ecx + addl 584(%esp), %edi + adcl 588(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 604(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 732(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + addl 544(%esp), %edi + adcl 548(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 52(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 728(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 536(%esp), %ecx + addl 504(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 528(%esp), %edi + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 464(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 472(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + adcl 488(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 728(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 456(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 424(%esp), %edx + adcl 428(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, %ebp + movl %eax, %edi + adcl $0, %edi + movl %edx, %eax + movl %edx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 384(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + adcl 416(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 376(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + adcl 368(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 304(%esp), %ebp + movl 40(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 728(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 724(%esp), %eax + movl %eax, %edx + calll .LmulPv256x32 + movl 296(%esp), %edx + movl %ebp, %ecx + addl 264(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + adcl 284(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edx, %edi + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 224(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + adcl 240(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 216(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + addl 184(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 192(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 144(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 156(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 160(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %ebp + movl 728(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 724(%esp), %edx + calll .LmulPv256x32 + movl 136(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + addl 104(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 116(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl $0, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 732(%esp), %edx + calll .LmulPv256x32 + addl 64(%esp), %esi + movl 32(%esp), %esi # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 80(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 96(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edx + movl 732(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ecx + sbbl 8(%eax), %esi + sbbl 12(%eax), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 16(%eax), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + sbbl 20(%eax), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%eax), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + sbbl 28(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + testl %edi, %edi + js .LBB115_2 +# BB#1: + movl %edx, 56(%esp) # 4-byte Spill +.LBB115_2: + movl 720(%esp), %edx + movl 56(%esp), %eax # 4-byte Reload + movl %eax, (%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB115_4 +# BB#3: + movl %ecx, %eax +.LBB115_4: + movl %eax, 4(%edx) + js .LBB115_6 +# BB#5: + movl %esi, 32(%esp) # 4-byte Spill +.LBB115_6: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl 40(%esp), %ebp # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB115_8 +# BB#7: + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 44(%esp) # 4-byte Spill +.LBB115_8: + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 12(%edx) + js .LBB115_10 +# BB#9: + movl 16(%esp), %edi # 4-byte Reload +.LBB115_10: + movl %edi, 16(%edx) + js .LBB115_12 +# BB#11: + movl 20(%esp), %ebp # 4-byte Reload +.LBB115_12: + movl %ebp, 20(%edx) + js .LBB115_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB115_14: + movl %eax, 24(%edx) + js .LBB115_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB115_16: + movl %ecx, 28(%edx) + addl $700, %esp # imm = 0x2BC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end115: + .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L + + .globl mcl_fp_montRed8L + .align 16, 0x90 + .type mcl_fp_montRed8L,@function +mcl_fp_montRed8L: # @mcl_fp_montRed8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L116$pb +.L116$pb: + popl %ebx +.Ltmp7: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx + movl 456(%esp), %edx + movl -4(%edx), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl (%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 60(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 56(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%eax), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 16(%eax), %ebp + movl 12(%eax), %edi + movl 8(%eax), %esi + movl (%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 392(%esp), %ecx + calll .LmulPv256x32 + movl 56(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + adcl 400(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 408(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 352(%esp), %edi + movl 16(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 384(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 312(%esp), %edi + movl 52(%esp), %edi # 4-byte Reload + adcl 316(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 272(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 64(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 232(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 236(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 252(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 192(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 204(%esp), %edi + adcl 208(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 152(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + adcl 160(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 172(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 64(%esp) # 4-byte Folded Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 456(%esp), %edx + calll .LmulPv256x32 + addl 112(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl %edi, %ebx + movl 100(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %eax, %esi + adcl 136(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %edx + subl 24(%esp), %edx # 4-byte Folded Reload + movl 108(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + sbbl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 68(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + sbbl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + jne .LBB116_2 +# BB#1: + movl %edx, %ecx +.LBB116_2: + movl 448(%esp), %edx + movl %ecx, (%edx) + movl %edi, %ecx + testb %cl, %cl + jne .LBB116_4 +# BB#3: + movl %eax, 108(%esp) # 4-byte Spill +.LBB116_4: + movl 108(%esp), %eax # 4-byte Reload + movl %eax, 4(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB116_6 +# BB#5: + movl %ebp, %eax +.LBB116_6: + movl %eax, 8(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB116_8 +# BB#7: + movl %ebx, %ebp +.LBB116_8: + movl %ebp, 12(%edx) + movl 100(%esp), %ebx # 4-byte Reload + jne .LBB116_10 +# BB#9: + movl 68(%esp), %ebx # 4-byte Reload +.LBB116_10: + movl %ebx, 16(%edx) + movl 80(%esp), %edi # 4-byte Reload + jne .LBB116_12 +# BB#11: + movl 72(%esp), %edi # 4-byte Reload +.LBB116_12: + movl %edi, 20(%edx) + movl 88(%esp), %esi # 4-byte Reload + jne .LBB116_14 +# BB#13: + movl 92(%esp), %esi # 4-byte Reload +.LBB116_14: + movl %esi, 24(%edx) + jne .LBB116_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB116_16: + movl %eax, 28(%edx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end116: + .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L + + .globl mcl_fp_addPre8L + .align 16, 0x90 + .type mcl_fp_addPre8L,@function +mcl_fp_addPre8L: # @mcl_fp_addPre8L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 20(%esp), %esi + addl (%esi), %ecx + adcl 4(%esi), %edx + movl 8(%eax), %edi + adcl 8(%esi), %edi + movl 16(%esp), %ebx + movl %ecx, (%ebx) + movl 12(%esi), %ecx + movl %edx, 4(%ebx) + movl 16(%esi), %edx + adcl 12(%eax), %ecx + adcl 16(%eax), %edx + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %ecx, 12(%ebx) + movl 20(%esi), %ecx + adcl %edi, %ecx + movl 24(%eax), %edi + movl %edx, 16(%ebx) + movl 24(%esi), %edx + adcl %edi, %edx + movl %ecx, 20(%ebx) + movl %edx, 24(%ebx) + movl 28(%eax), %eax + movl 28(%esi), %ecx + adcl %eax, %ecx + movl %ecx, 28(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end117: + .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L + + .globl mcl_fp_subPre8L + .align 16, 0x90 + .type mcl_fp_subPre8L,@function +mcl_fp_subPre8L: # @mcl_fp_subPre8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %esi + xorl %eax, %eax + movl 28(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %esi + movl 8(%ecx), %ebx + sbbl 8(%edi), %ebx + movl 20(%esp), %ebp + movl %edx, (%ebp) + movl 12(%ecx), %edx + sbbl 12(%edi), %edx + movl %esi, 4(%ebp) + movl 16(%ecx), %esi + sbbl 16(%edi), %esi + movl %ebx, 8(%ebp) + movl 20(%edi), %ebx + movl %edx, 12(%ebp) + movl 20(%ecx), %edx + sbbl %ebx, %edx + movl 24(%edi), %ebx + movl %esi, 16(%ebp) + movl 24(%ecx), %esi + sbbl %ebx, %esi + movl %edx, 20(%ebp) + movl %esi, 24(%ebp) + movl 28(%edi), %edx + movl 28(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 28(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end118: + .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L + + .globl mcl_fp_shr1_8L + .align 16, 0x90 + .type mcl_fp_shr1_8L,@function +mcl_fp_shr1_8L: # @mcl_fp_shr1_8L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 24(%esi) + shrl %eax + movl %eax, 28(%esi) + popl %esi + retl +.Lfunc_end119: + .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L + + .globl mcl_fp_add8L + .align 16, 0x90 + .type mcl_fp_add8L,@function +mcl_fp_add8L: # @mcl_fp_add8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %edx + addl (%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%edx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%edx), %esi + movl 16(%edx), %eax + adcl 12(%edi), %esi + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%edx), %ecx + adcl 20(%edi), %ecx + movl 24(%edx), %ebx + adcl 24(%edi), %ebx + movl 28(%edx), %edi + movl 48(%esp), %edx + adcl 28(%edx), %edi + movl 40(%esp), %edx + movl %ebp, (%edx) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%edx) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edx) + movl %esi, 12(%edx) + movl %eax, 16(%edx) + movl %ecx, 20(%edx) + movl %ebx, 24(%edx) + movl %edi, 28(%edx) + sbbl %eax, %eax + andl $1, %eax + movl 52(%esp), %edx + movl 8(%esp), %ebp # 4-byte Reload + subl (%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 4(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 52(%esp), %edx + sbbl 8(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebp + sbbl 12(%ebp), %esi + movl %esi, (%esp) # 4-byte Spill + movl 4(%esp), %edx # 4-byte Reload + sbbl 16(%ebp), %edx + movl %edx, %esi + sbbl 20(%ebp), %ecx + sbbl 24(%ebp), %ebx + sbbl 28(%ebp), %edi + sbbl $0, %eax + testb $1, %al + jne .LBB120_2 +# BB#1: # %nocarry + movl 8(%esp), %edx # 4-byte Reload + movl 40(%esp), %ebp + movl %edx, (%ebp) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 4(%ebp) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 8(%ebp) + movl (%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl %esi, 16(%ebp) + movl %ecx, 20(%ebp) + movl %ebx, 24(%ebp) + movl %edi, 28(%ebp) +.LBB120_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end120: + .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L + + .globl mcl_fp_addNF8L + .align 16, 0x90 + .type mcl_fp_addNF8L,@function +mcl_fp_addNF8L: # @mcl_fp_addNF8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 80(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl 4(%ebx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %esi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 12(%ebx), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 20(%ebx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 24(%ebx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 28(%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 88(%esp), %ebx + movl 24(%esp), %ebp # 4-byte Reload + movl %ebp, %eax + subl (%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 8(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl 48(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + testl %esi, %esi + js .LBB121_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB121_2: + movl 76(%esp), %ebx + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB121_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB121_4: + movl %eax, 4(%ebx) + movl 40(%esp), %edx # 4-byte Reload + movl 28(%esp), %edi # 4-byte Reload + js .LBB121_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB121_6: + movl %edi, 8(%ebx) + movl 44(%esp), %ecx # 4-byte Reload + movl 36(%esp), %eax # 4-byte Reload + js .LBB121_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB121_8: + movl %eax, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + js .LBB121_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB121_10: + movl %edx, 16(%ebx) + js .LBB121_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB121_12: + movl %ecx, 20(%ebx) + js .LBB121_14 +# BB#13: + movl %ebp, %eax +.LBB121_14: + movl %eax, 24(%ebx) + js .LBB121_16 +# BB#15: + movl %esi, %edi +.LBB121_16: + movl %edi, 28(%ebx) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end121: + .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L + + .globl mcl_fp_sub8L + .align 16, 0x90 + .type mcl_fp_sub8L,@function +mcl_fp_sub8L: # @mcl_fp_sub8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 56(%esp), %ebp + subl (%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%esi), %edx + sbbl 8(%ebp), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %ecx + sbbl 16(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%esi), %edi + sbbl 24(%ebp), %edi + movl 28(%esi), %esi + sbbl 28(%ebp), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, (%ebx) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%ebx) + movl %edx, 8(%ebx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 12(%ebx) + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl %edi, 24(%ebx) + movl %esi, 28(%ebx) + je .LBB122_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 60(%esp), %esi + movl 16(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 20(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 8(%esp), %ebp # 4-byte Reload + adcl 8(%esi), %ebp + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ebp, 8(%ebx) + movl 16(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl %eax, 20(%ebx) + movl 24(%esi), %eax + adcl %edi, %eax + movl %eax, 24(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebx) +.LBB122_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end122: + .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L + + .globl mcl_fp_subNF8L + .align 16, 0x90 + .type mcl_fp_subNF8L,@function +mcl_fp_subNF8L: # @mcl_fp_subNF8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edx + movl 68(%esp), %ecx + subl (%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl 24(%eax), %esi + movl 20(%eax), %edi + movl 16(%eax), %ebx + movl 12(%eax), %ebp + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 24(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sarl $31, %edi + movl 72(%esp), %ebp + movl 28(%ebp), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebp), %eax + andl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%ebp), %ebx + andl %edi, %ebx + movl 16(%ebp), %esi + andl %edi, %esi + movl 12(%ebp), %edx + andl %edi, %edx + movl 8(%ebp), %ecx + andl %edi, %ecx + movl 4(%ebp), %eax + andl %edi, %eax + andl (%ebp), %edi + addl 24(%esp), %edi # 4-byte Folded Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 60(%esp), %ebp + movl %edi, (%ebp) + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 4(%ebp) + adcl 12(%esp), %edx # 4-byte Folded Reload + movl %ecx, 8(%ebp) + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %edx, 12(%ebp) + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%ebp) + movl %eax, 24(%ebp) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ebp) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end123: + .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L + + .globl mcl_fpDbl_add8L + .align 16, 0x90 + .type mcl_fpDbl_add8L,@function +mcl_fpDbl_add8L: # @mcl_fpDbl_add8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 80(%esp), %ebp + addl (%ebp), %esi + adcl 4(%ebp), %edx + movl 8(%ecx), %edi + adcl 8(%ebp), %edi + movl 12(%ebp), %ebx + movl 76(%esp), %eax + movl %esi, (%eax) + movl 16(%ebp), %esi + adcl 12(%ecx), %ebx + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 40(%ecx), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebx, 12(%eax) + movl 20(%ebp), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebp), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebp), %ebx + adcl %edx, %ebx + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebp), %esi + adcl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %ebx, 28(%eax) + movl 36(%ebp), %ebx + adcl %edx, %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 40(%ebp), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl 44(%ebp), %edi + adcl %edx, %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl 48(%ebp), %eax + adcl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl 52(%ebp), %esi + adcl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%ebp), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%ecx), %ecx + movl 60(%ebp), %ebp + adcl %ecx, %ebp + movl %ebp, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 44(%esp), %eax # 4-byte Reload + movl 88(%esp), %edx + subl (%edx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax + sbbl 4(%eax), %ebx + movl %eax, %edx + movl %ebx, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + movl %edx, %ebx + sbbl 8(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + movl 24(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl %edi, %eax + sbbl 16(%ebx), %eax + sbbl 20(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 28(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB124_2 +# BB#1: + movl %eax, %edi +.LBB124_2: + testb %cl, %cl + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB124_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB124_4: + movl 76(%esp), %eax + movl %ecx, 32(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl 32(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + movl 28(%esp), %ebx # 4-byte Reload + jne .LBB124_6 +# BB#5: + movl 4(%esp), %ebx # 4-byte Reload +.LBB124_6: + movl %ebx, 36(%eax) + jne .LBB124_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB124_8: + movl %esi, 40(%eax) + movl 36(%esp), %esi # 4-byte Reload + jne .LBB124_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB124_10: + movl %edx, 44(%eax) + movl %edi, 48(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB124_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB124_12: + movl %esi, 52(%eax) + jne .LBB124_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB124_14: + movl %edx, 56(%eax) + jne .LBB124_16 +# BB#15: + movl %ebp, %ecx +.LBB124_16: + movl %ecx, 60(%eax) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end124: + .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L + + .globl mcl_fpDbl_sub8L + .align 16, 0x90 + .type mcl_fpDbl_sub8L,@function +mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 68(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 60(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%ebx), %edx + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %edx, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 40(%ebx), %eax + movl 40(%edi), %edx + sbbl %eax, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%ebx), %eax + movl 44(%edi), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebx), %eax + movl 48(%edi), %edx + sbbl %eax, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%ebx), %eax + movl 52(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 72(%esp), %ebx + jne .LBB125_1 +# BB#2: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB125_3 +.LBB125_1: + movl 28(%ebx), %edx + movl %edx, 4(%esp) # 4-byte Spill +.LBB125_3: + testb %al, %al + jne .LBB125_4 +# BB#5: + movl $0, %ebp + movl $0, %eax + jmp .LBB125_6 +.LBB125_4: + movl (%ebx), %eax + movl 4(%ebx), %ebp +.LBB125_6: + jne .LBB125_7 +# BB#8: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB125_9 +.LBB125_7: + movl 24(%ebx), %edx + movl %edx, (%esp) # 4-byte Spill +.LBB125_9: + jne .LBB125_10 +# BB#11: + movl $0, %edx + jmp .LBB125_12 +.LBB125_10: + movl 20(%ebx), %edx +.LBB125_12: + jne .LBB125_13 +# BB#14: + movl $0, %esi + jmp .LBB125_15 +.LBB125_13: + movl 16(%ebx), %esi +.LBB125_15: + jne .LBB125_16 +# BB#17: + movl $0, %edi + jmp .LBB125_18 +.LBB125_16: + movl 12(%ebx), %edi +.LBB125_18: + jne .LBB125_19 +# BB#20: + xorl %ebx, %ebx + jmp .LBB125_21 +.LBB125_19: + movl 8(%ebx), %ebx +.LBB125_21: + addl 16(%esp), %eax # 4-byte Folded Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %eax, 32(%ecx) + adcl 12(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 36(%ecx) + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + movl %eax, 56(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end125: + .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L + + .align 16, 0x90 + .type .LmulPv288x32,@function +.LmulPv288x32: # @mulPv288x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl %edx, %esi + movl 76(%esp), %edi + movl %edi, %eax + mull 32(%esi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 28(%esi) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 24(%esi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 20(%esi) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 16(%esi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %edi, %eax + mull 12(%esi) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull 8(%esi) + movl %edx, %ebx + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull 4(%esi) + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl %edi, %eax + mull (%esi) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%ecx) + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 36(%ecx) + movl %ecx, %eax + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end126: + .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 + + .globl mcl_fp_mulUnitPre9L + .align 16, 0x90 + .type mcl_fp_mulUnitPre9L,@function +mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L127$pb +.L127$pb: + popl %ebx +.Ltmp8: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv288x32 + movl 68(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi + movl 48(%esp), %ebx + movl 44(%esp), %ebp + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %ebp, 12(%eax) + movl %ebx, 16(%eax) + movl %edi, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end127: + .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L + + .globl mcl_fpDbl_mulPre9L + .align 16, 0x90 + .type mcl_fpDbl_mulPre9L,@function +mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L128$pb +.L128$pb: + popl %esi +.Ltmp9: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 452(%esp), %edx + movl %edx, %ebp + movl %esi, %ebx + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %edi + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %ebp, %edx + movl %esi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 240(%esp), %edi + movl 236(%esp), %ebp + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 16(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 44(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 204(%esp), %edi + movl 200(%esp), %ebx + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 44(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 20(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %ebx + movl 160(%esp), %edi + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 52(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 12(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %ebp + movl 132(%esp), %edi + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 12(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 52(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 44(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 452(%esp), %edx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 24(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 52(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 28(%esp), %eax # 4-byte Folded Reload + movl 52(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 48(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end128: + .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L + + .globl mcl_fpDbl_sqrPre9L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre9L,@function +mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $428, %esp # imm = 0x1AC + calll .L129$pb +.L129$pb: + popl %ebx +.Ltmp10: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 452(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 384(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv288x32 + movl 420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl 388(%esp), %ebp + movl 448(%esp), %ecx + movl %eax, (%ecx) + movl 4(%esi), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv288x32 + addl 344(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 364(%esp), %ebx + movl 360(%esp), %edi + movl 356(%esp), %esi + movl 348(%esp), %ecx + movl 352(%esp), %edx + movl 448(%esp), %eax + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 24(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 324(%esp), %edi + movl 320(%esp), %ebp + movl 316(%esp), %esi + movl 308(%esp), %ecx + movl 312(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 264(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 288(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 284(%esp), %ebx + movl 280(%esp), %edi + movl 276(%esp), %esi + movl 268(%esp), %ecx + movl 272(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 20(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 224(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 244(%esp), %edi + movl 240(%esp), %ebp + movl 236(%esp), %esi + movl 228(%esp), %ecx + movl 232(%esp), %edx + movl 448(%esp), %eax + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 40(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebx + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 448(%esp), %eax + movl 40(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + addl 144(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 164(%esp), %edi + movl 160(%esp), %ebp + movl 156(%esp), %esi + movl 148(%esp), %ecx + movl 152(%esp), %edx + movl 448(%esp), %eax + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl 4(%esp), %esi # 4-byte Reload + addl 104(%esp), %esi + movl 140(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %ebp + movl 128(%esp), %ebx + movl 124(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 120(%esp), %edi + movl 116(%esp), %edx + movl 108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %ecx + movl 448(%esp), %eax + movl %esi, 28(%eax) + movl 48(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 48(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 452(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 60(%esp), %ebx # 4-byte Reload + calll .LmulPv288x32 + movl %esi, %ebp + addl 64(%esp), %ebp + movl 20(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %edi + movl 84(%esp), %ebx + movl 80(%esp), %esi + movl 76(%esp), %eax + movl 448(%esp), %ecx + movl %ebp, 32(%ecx) + movl %edx, 36(%ecx) + adcl 24(%esp), %eax # 4-byte Folded Reload + movl 48(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl %eax, 60(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + addl $428, %esp # imm = 0x1AC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L + + .globl mcl_fp_mont9L + .align 16, 0x90 + .type mcl_fp_mont9L,@function +mcl_fp_mont9L: # @mcl_fp_mont9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L130$pb +.L130$pb: + popl %ebx +.Ltmp11: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %ebp + movl 756(%esp), %esi + movl %ebp, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %ebp + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 748(%esp), %ebp + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 672(%esp), %esi + adcl 676(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 708(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 632(%esp), %esi + adcl 636(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 660(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 592(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 624(%esp), %esi + adcl 628(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 552(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 580(%esp), %edi + adcl 584(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ebp + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + adcl 548(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 40(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 472(%esp), %ebp + movl 32(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 484(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 500(%esp), %esi + adcl 504(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 452(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 456(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %esi, %eax + andl $1, %eax + addl 392(%esp), %ebp + movl 36(%esp), %esi # 4-byte Reload + adcl 396(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 404(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 408(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 428(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + addl 352(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 384(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %eax + andl $1, %eax + addl 312(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 336(%esp), %esi + movl 44(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 344(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 292(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 296(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl %edi, %ecx + andl $1, %ecx + addl 232(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 240(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 260(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + adcl 196(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + adcl 200(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %ebp + addl 152(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 164(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 172(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 120(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + movl 52(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + adcl 136(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + andl $1, %edi + addl 72(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edx, %esi + movl 44(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 92(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 828(%esp), %ebx + subl (%ebx), %eax + movl %ecx, %edx + sbbl 4(%ebx), %edx + movl %esi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %esi # 4-byte Reload + sbbl 12(%ebx), %esi + movl %esi, 16(%esp) # 4-byte Spill + sbbl 16(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 20(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + sbbl 28(%ebx), %esi + movl 60(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB130_2 +# BB#1: + movl %esi, 32(%esp) # 4-byte Spill +.LBB130_2: + testb %bl, %bl + movl 68(%esp), %esi # 4-byte Reload + jne .LBB130_4 +# BB#3: + movl %eax, %esi +.LBB130_4: + movl 816(%esp), %ebp + movl %esi, (%ebp) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB130_6 +# BB#5: + movl %edx, %eax +.LBB130_6: + movl %eax, 4(%ebp) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB130_8 +# BB#7: + movl %ecx, %eax +.LBB130_8: + movl %eax, 8(%ebp) + movl 44(%esp), %eax # 4-byte Reload + jne .LBB130_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB130_10: + movl %eax, 12(%ebp) + jne .LBB130_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 40(%esp) # 4-byte Spill +.LBB130_12: + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 36(%esp), %eax # 4-byte Reload + jne .LBB130_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload +.LBB130_14: + movl %eax, 20(%ebp) + movl 48(%esp), %eax # 4-byte Reload + jne .LBB130_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB130_16: + movl %eax, 24(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB130_18 +# BB#17: + movl 56(%esp), %eax # 4-byte Reload +.LBB130_18: + movl %eax, 32(%ebp) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end130: + .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L + + .globl mcl_fp_montNF9L + .align 16, 0x90 + .type mcl_fp_montNF9L,@function +mcl_fp_montNF9L: # @mcl_fp_montNF9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $796, %esp # imm = 0x31C + calll .L131$pb +.L131$pb: + popl %ebx +.Ltmp12: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx + movl 828(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 752(%esp), %esi + movl 756(%esp), %ebp + movl %esi, %eax + imull %edi, %eax + movl 788(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 780(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 776(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 772(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 768(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 764(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 760(%esp), %edi + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 712(%esp), %esi + adcl 716(%esp), %ebp + adcl 720(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 740(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 708(%esp), %eax + addl 672(%esp), %ebp + adcl 676(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 696(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 632(%esp), %ebp + adcl 636(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 656(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 628(%esp), %eax + addl 592(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 600(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 604(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 608(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 612(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 616(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 620(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 624(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 552(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 572(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 576(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 548(%esp), %eax + movl 32(%esp), %edx # 4-byte Reload + addl 512(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 516(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 540(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 544(%esp), %esi + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + movl 32(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 496(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl 500(%esp), %edi + movl %edi, %ebp + adcl 504(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 820(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + movl 468(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 48(%esp), %esi # 4-byte Reload + adcl 436(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 444(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 448(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 452(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 460(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 464(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 392(%esp), %ebp + adcl 396(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 428(%esp), %esi + movl 824(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 388(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 356(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 360(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 364(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 372(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 376(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 380(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 312(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 324(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 340(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 308(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 272(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 292(%esp), %ebp + adcl 296(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 232(%esp), %edi + movl 36(%esp), %esi # 4-byte Reload + adcl 236(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 252(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 228(%esp), %ebp + movl %esi, %ecx + addl 192(%esp), %ecx + movl 60(%esp), %esi # 4-byte Reload + adcl 196(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 828(%esp), %edx + calll .LmulPv288x32 + addl 152(%esp), %edi + adcl 156(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 164(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 820(%esp), %edx + calll .LmulPv288x32 + movl 148(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + adcl 116(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 120(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 132(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ebp + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 828(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 72(%esp), %edi + movl 44(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + adcl 80(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 84(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 828(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %ebx + movl %edi, %ecx + sbbl 8(%eax), %ecx + movl 52(%esp), %esi # 4-byte Reload + sbbl 12(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 16(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + sbbl 32(%eax), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + sarl $31, %ebp + testl %ebp, %ebp + movl 68(%esp), %eax # 4-byte Reload + js .LBB131_2 +# BB#1: + movl %edx, %eax +.LBB131_2: + movl 816(%esp), %edx + movl %eax, (%edx) + movl 64(%esp), %esi # 4-byte Reload + js .LBB131_4 +# BB#3: + movl %ebx, %esi +.LBB131_4: + movl %esi, 4(%edx) + movl 52(%esp), %ebp # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB131_6 +# BB#5: + movl %ecx, %edi +.LBB131_6: + movl %edi, 8(%edx) + js .LBB131_8 +# BB#7: + movl 16(%esp), %ebp # 4-byte Reload +.LBB131_8: + movl %ebp, 12(%edx) + js .LBB131_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB131_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB131_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB131_12: + movl %eax, 20(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB131_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB131_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB131_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB131_16: + movl %eax, 28(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB131_18 +# BB#17: + movl 44(%esp), %eax # 4-byte Reload +.LBB131_18: + movl %eax, 32(%edx) + addl $796, %esp # imm = 0x31C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end131: + .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L + + .globl mcl_fp_montRed9L + .align 16, 0x90 + .type mcl_fp_montRed9L,@function +mcl_fp_montRed9L: # @mcl_fp_montRed9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $492, %esp # imm = 0x1EC + calll .L132$pb +.L132$pb: + popl %ebx +.Ltmp13: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx + movl 520(%esp), %edx + movl -4(%edx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl (%eax), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 4(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %ecx + imull %edi, %ecx + movl 68(%eax), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%eax), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 60(%eax), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 56(%eax), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 52(%eax), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 48(%eax), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 40(%eax), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 36(%eax), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 32(%eax), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 24(%eax), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebp + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 4(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl %ecx, (%esp) + leal 448(%esp), %ecx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + addl 448(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 460(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 464(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 96(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 52(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 56(%esp), %edx # 4-byte Reload + adcl 412(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 368(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 404(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 328(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 364(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 88(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + movl 64(%esp), %eax # 4-byte Reload + addl 288(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 248(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 264(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %edi, %esi + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 208(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 212(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 520(%esp), %eax + movl %eax, %edx + calll .LmulPv288x32 + addl 168(%esp), %ebp + movl 104(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 180(%esp), %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 184(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 520(%esp), %edx + calll .LmulPv288x32 + addl 128(%esp), %edi + movl 120(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %eax, %edi + adcl 136(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %eax, %ebx + movl 112(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 20(%esp), %edi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 16(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + sbbl 28(%esp), %ecx # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 72(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 92(%esp) # 4-byte Spill + movl %edx, %ebx + movl %ebp, %edx + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + sbbl $0, %edx + andl $1, %edx + jne .LBB132_2 +# BB#1: + movl %ecx, 116(%esp) # 4-byte Spill +.LBB132_2: + testb %dl, %dl + movl 120(%esp), %ecx # 4-byte Reload + jne .LBB132_4 +# BB#3: + movl %edi, %ecx +.LBB132_4: + movl 512(%esp), %edi + movl %ecx, (%edi) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB132_6 +# BB#5: + movl %eax, 124(%esp) # 4-byte Spill +.LBB132_6: + movl 124(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB132_8 +# BB#7: + movl %esi, %eax +.LBB132_8: + movl %eax, 8(%edi) + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB132_10 +# BB#9: + movl 72(%esp), %ebp # 4-byte Reload +.LBB132_10: + movl %ebp, 16(%edi) + movl 112(%esp), %ebx # 4-byte Reload + jne .LBB132_12 +# BB#11: + movl 76(%esp), %ebx # 4-byte Reload +.LBB132_12: + movl %ebx, 20(%edi) + movl 100(%esp), %esi # 4-byte Reload + jne .LBB132_14 +# BB#13: + movl 84(%esp), %esi # 4-byte Reload +.LBB132_14: + movl %esi, 24(%edi) + jne .LBB132_16 +# BB#15: + movl 92(%esp), %ecx # 4-byte Reload +.LBB132_16: + movl %ecx, 28(%edi) + jne .LBB132_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload +.LBB132_18: + movl %eax, 32(%edi) + addl $492, %esp # imm = 0x1EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end132: + .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L + + .globl mcl_fp_addPre9L + .align 16, 0x90 + .type mcl_fp_addPre9L,@function +mcl_fp_addPre9L: # @mcl_fp_addPre9L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl %esi, 24(%ebx) + movl %edx, 28(%ebx) + movl 32(%eax), %eax + movl 32(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 32(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end133: + .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L + + .globl mcl_fp_subPre9L + .align 16, 0x90 + .type mcl_fp_subPre9L,@function +mcl_fp_subPre9L: # @mcl_fp_subPre9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 24(%ebp) + movl %esi, 28(%ebp) + movl 32(%edx), %edx + movl 32(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 32(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end134: + .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L + + .globl mcl_fp_shr1_9L + .align 16, 0x90 + .type mcl_fp_shr1_9L,@function +mcl_fp_shr1_9L: # @mcl_fp_shr1_9L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 28(%esi) + shrl %eax + movl %eax, 32(%esi) + popl %esi + retl +.Lfunc_end135: + .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L + + .globl mcl_fp_add9L + .align 16, 0x90 + .type mcl_fp_add9L,@function +mcl_fp_add9L: # @mcl_fp_add9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $20, %esp + movl 48(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 44(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, %ebp + adcl 4(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 20(%ebx), %esi + adcl 20(%edi), %esi + movl 24(%ebx), %edx + adcl 24(%edi), %edx + movl 28(%ebx), %ecx + adcl 28(%edi), %ecx + movl 32(%ebx), %eax + adcl 32(%edi), %eax + movl 40(%esp), %edi + movl %ebp, (%edi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%edi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%edi) + movl %esi, 20(%edi) + movl %edx, 24(%edi) + movl %ecx, 28(%edi) + movl %eax, 32(%edi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 52(%esp), %edi + subl (%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 4(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 8(%edi), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 12(%edi), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebp # 4-byte Reload + sbbl 16(%edi), %ebp + sbbl 20(%edi), %esi + sbbl 24(%edi), %edx + sbbl 28(%edi), %ecx + sbbl 32(%edi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB136_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 40(%esp), %ebx + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %ebp, 16(%ebx) + movl %esi, 20(%ebx) + movl %edx, 24(%ebx) + movl %ecx, 28(%ebx) + movl %eax, 32(%ebx) +.LBB136_2: # %carry + addl $20, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end136: + .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L + + .globl mcl_fp_addNF9L + .align 16, 0x90 + .type mcl_fp_addNF9L,@function +mcl_fp_addNF9L: # @mcl_fp_addNF9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edi + movl 96(%esp), %esi + addl (%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 4(%esi), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 28(%eax), %ebp + movl 24(%eax), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 16(%eax), %ebx + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 20(%esi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 28(%esi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 32(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 104(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %ebp + subl (%esi), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 4(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + sbbl 24(%esi), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 28(%esi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, %edx + movl %ecx, %ebp + sbbl 32(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + testl %esi, %esi + js .LBB137_2 +# BB#1: + movl (%esp), %eax # 4-byte Reload +.LBB137_2: + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB137_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB137_4: + movl %eax, 4(%ecx) + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB137_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload +.LBB137_6: + movl %eax, 8(%ecx) + movl %ebp, %eax + js .LBB137_8 +# BB#7: + movl 12(%esp), %edx # 4-byte Reload +.LBB137_8: + movl %edx, 12(%ecx) + movl 56(%esp), %edx # 4-byte Reload + js .LBB137_10 +# BB#9: + movl 16(%esp), %ebx # 4-byte Reload +.LBB137_10: + movl %ebx, 16(%ecx) + js .LBB137_12 +# BB#11: + movl 20(%esp), %edi # 4-byte Reload +.LBB137_12: + movl %edi, 20(%ecx) + js .LBB137_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload +.LBB137_14: + movl %esi, 24(%ecx) + js .LBB137_16 +# BB#15: + movl 28(%esp), %edx # 4-byte Reload +.LBB137_16: + movl %edx, 28(%ecx) + js .LBB137_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB137_18: + movl %eax, 32(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end137: + .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L + + .globl mcl_fp_sub9L + .align 16, 0x90 + .type mcl_fp_sub9L,@function +mcl_fp_sub9L: # @mcl_fp_sub9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $28, %esp + movl 52(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 56(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 16(%esi), %edx + sbbl 16(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 20(%esi), %ecx + sbbl 20(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 28(%esi), %ebp + sbbl 28(%edi), %ebp + movl 32(%esi), %esi + sbbl 32(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 48(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl %edx, 16(%ebx) + movl %ecx, 20(%ebx) + movl %eax, 24(%ebx) + movl %ebp, 28(%ebx) + movl %esi, 32(%ebx) + je .LBB138_2 +# BB#1: # %carry + movl %esi, %edi + movl 60(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl %ecx, 24(%ebx) + movl 28(%esi), %eax + adcl %ebp, %eax + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %edi, %eax + movl %eax, 32(%ebx) +.LBB138_2: # %nocarry + addl $28, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end138: + .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L + + .globl mcl_fp_subNF9L + .align 16, 0x90 + .type mcl_fp_subNF9L,@function +mcl_fp_subNF9L: # @mcl_fp_subNF9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 72(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 76(%esp), %esi + subl (%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 4(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%ecx), %edx + movl 24(%ecx), %edi + movl 20(%ecx), %ebx + movl 16(%ecx), %ebp + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + movl 76(%esp), %esi + sbbl 8(%esi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 76(%esp), %ecx + sbbl 12(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + sbbl 24(%ecx), %edi + movl %edi, 28(%esp) # 4-byte Spill + sbbl 28(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + sbbl 32(%ecx), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %edx, %ecx + sarl $31, %ecx + movl %ecx, %eax + shldl $1, %edx, %eax + movl 80(%esp), %ebp + movl 12(%ebp), %edx + andl %eax, %edx + movl %edx, (%esp) # 4-byte Spill + movl 4(%ebp), %edi + andl %eax, %edi + andl (%ebp), %eax + movl 32(%ebp), %edx + andl %ecx, %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 28(%ebp), %edx + andl %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + roll %ecx + movl 24(%ebp), %ebx + andl %ecx, %ebx + movl 20(%ebp), %esi + andl %ecx, %esi + movl 16(%ebp), %edx + andl %ecx, %edx + andl 8(%ebp), %ecx + addl 32(%esp), %eax # 4-byte Folded Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl 68(%esp), %ebp + movl %eax, (%ebp) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %edi, 4(%ebp) + movl (%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebp) + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %eax, 12(%ebp) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edx, 16(%ebp) + adcl 28(%esp), %ebx # 4-byte Folded Reload + movl %esi, 20(%ebp) + movl 4(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ebx, 24(%ebp) + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ebp) + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end139: + .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L + + .globl mcl_fpDbl_add9L + .align 16, 0x90 + .type mcl_fpDbl_add9L,@function +mcl_fpDbl_add9L: # @mcl_fpDbl_add9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $68, %esp + movl 96(%esp), %edx + movl 92(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 88(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 44(%edx), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebp + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebp, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 40(%edx), %esi + movl %ecx, 32(%eax) + movl 40(%edi), %eax + adcl %esi, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%edi), %eax + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl 48(%edi), %ebx + adcl %ecx, %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 56(%edx), %esi + movl 56(%edi), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%edx), %ebp + movl 60(%edi), %esi + adcl %ebp, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%edx), %eax + movl 64(%edi), %ebp + adcl %eax, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 68(%edx), %edx + movl 68(%edi), %eax + adcl %edx, %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 100(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + subl (%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 12(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 16(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl %ebp, %eax + movl 32(%esp), %ebp # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %ebp, %ebx + sbbl 32(%edi), %ebx + sbbl $0, %edx + andl $1, %edx + jne .LBB140_2 +# BB#1: + movl %ebx, %ebp +.LBB140_2: + testb %dl, %dl + movl 60(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + movl 36(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB140_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %edx # 4-byte Reload +.LBB140_4: + movl 88(%esp), %eax + movl %edx, 36(%eax) + movl %ebx, 40(%eax) + movl %edi, 44(%eax) + movl %esi, 48(%eax) + movl %ecx, 52(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB140_6 +# BB#5: + movl 20(%esp), %ecx # 4-byte Reload +.LBB140_6: + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB140_8 +# BB#7: + movl 24(%esp), %edx # 4-byte Reload +.LBB140_8: + movl %edx, 60(%eax) + jne .LBB140_10 +# BB#9: + movl 28(%esp), %ecx # 4-byte Reload +.LBB140_10: + movl %ecx, 64(%eax) + movl %ebp, 68(%eax) + addl $68, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end140: + .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L + + .globl mcl_fpDbl_sub9L + .align 16, 0x90 + .type mcl_fpDbl_sub9L,@function +mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 80(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 72(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%ebp), %eax + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %eax, %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 44(%ebp), %eax + movl 44(%ebx), %edx + sbbl %eax, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl 48(%ebx), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%ebx), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%ebx), %edx + sbbl %eax, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 84(%esp), %ebp + jne .LBB141_1 +# BB#2: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB141_3 +.LBB141_1: + movl 32(%ebp), %edx + movl %edx, 12(%esp) # 4-byte Spill +.LBB141_3: + testb %al, %al + jne .LBB141_4 +# BB#5: + movl $0, 4(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB141_6 +.LBB141_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB141_6: + jne .LBB141_7 +# BB#8: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB141_9 +.LBB141_7: + movl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB141_9: + jne .LBB141_10 +# BB#11: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB141_12 +.LBB141_10: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB141_12: + jne .LBB141_13 +# BB#14: + movl $0, %edi + jmp .LBB141_15 +.LBB141_13: + movl 20(%ebp), %edi +.LBB141_15: + jne .LBB141_16 +# BB#17: + movl $0, %ebx + jmp .LBB141_18 +.LBB141_16: + movl 16(%ebp), %ebx +.LBB141_18: + jne .LBB141_19 +# BB#20: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB141_21 +.LBB141_19: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB141_21: + jne .LBB141_22 +# BB#23: + xorl %eax, %eax + jmp .LBB141_24 +.LBB141_22: + movl 8(%eax), %eax +.LBB141_24: + addl 24(%esp), %esi # 4-byte Folded Reload + movl 4(%esp), %edx # 4-byte Reload + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %esi, 36(%ecx) + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %edx, 40(%ecx) + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %eax, 44(%ecx) + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 48(%ecx) + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 52(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edi, 56(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %edx, 60(%ecx) + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%ecx) + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end141: + .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L + + .align 16, 0x90 + .type .LmulPv320x32,@function +.LmulPv320x32: # @mulPv320x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl %edx, %esi + movl 84(%esp), %edi + movl %edi, %eax + mull 36(%esi) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + mull 32(%esi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 28(%esi) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 24(%esi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 20(%esi) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 16(%esi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %edi, %eax + mull 12(%esi) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull 8(%esi) + movl %edx, %ebp + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull 4(%esi) + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl %edi, %eax + mull (%esi) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%ecx) + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 40(%ecx) + movl %ecx, %eax + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end142: + .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 + + .globl mcl_fp_mulUnitPre10L + .align 16, 0x90 + .type mcl_fp_mulUnitPre10L,@function +mcl_fp_mulUnitPre10L: # @mcl_fp_mulUnitPre10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + calll .L143$pb +.L143$pb: + popl %ebx +.Ltmp14: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx + movl 104(%esp), %eax + movl %eax, (%esp) + leal 32(%esp), %ecx + movl 100(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 52(%esp), %ebx + movl 48(%esp), %ebp + movl 44(%esp), %edi + movl 40(%esp), %esi + movl 32(%esp), %edx + movl 36(%esp), %ecx + movl 96(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebp, 16(%eax) + movl %ebx, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end143: + .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L + + .globl mcl_fpDbl_mulPre10L + .align 16, 0x90 + .type mcl_fpDbl_mulPre10L,@function +mcl_fpDbl_mulPre10L: # @mcl_fpDbl_mulPre10L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L144$pb +.L144$pb: + popl %ebx +.Ltmp15: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5L@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + leal 20(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 40(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5L@PLT + movl 28(%esi), %edi + movl (%esi), %ebx + movl 4(%esi), %eax + addl 20(%esi), %ebx + movl %ebx, -148(%ebp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + adcl 8(%esi), %edi + movl %edi, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + addl 20(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + adcl 24(%esi), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 28(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl 32(%esi), %eax + adcl 12(%esi), %eax + movl 36(%esi), %ecx + adcl 16(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, -124(%ebp) # 4-byte Spill + jb .LBB144_2 +# BB#1: + xorl %edi, %edi + movl $0, -124(%ebp) # 4-byte Folded Spill +.LBB144_2: + movl %edi, -136(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl %esi, %ebx + movl 36(%ebx), %esi + movl 32(%ebx), %edi + movl -96(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 12(%ebx), %edi + movl %edi, -116(%ebp) # 4-byte Spill + adcl 16(%ebx), %esi + movl %esi, -144(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl %eax, -104(%ebp) # 4-byte Spill + movl -160(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) # 4-byte Spill + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -96(%ebp) # 4-byte Spill + movl -152(%ebp), %ebx # 4-byte Reload + movl %ebx, -100(%ebp) # 4-byte Spill + jb .LBB144_4 +# BB#3: + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -104(%ebp) # 4-byte Folded Spill + movl $0, -108(%ebp) # 4-byte Folded Spill + movl $0, -96(%ebp) # 4-byte Folded Spill + movl $0, -100(%ebp) # 4-byte Folded Spill +.LBB144_4: + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -132(%ebp), %edi # 4-byte Reload + movl %edi, -68(%ebp) + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -64(%ebp) + movl %ebx, -92(%ebp) + movl -120(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl %edx, -84(%ebp) + movl %eax, -80(%ebp) + movl %ecx, -76(%ebp) + sbbl %edx, %edx + movl -116(%ebp), %eax # 4-byte Reload + movl %eax, -60(%ebp) + movl -144(%ebp), %ebx # 4-byte Reload + movl %ebx, -56(%ebp) + movl -156(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB144_6 +# BB#5: + movl $0, %ebx + movl $0, %eax + movl $0, %edi +.LBB144_6: + movl %eax, -116(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -92(%ebp), %ecx + movl %ecx, 8(%esp) + leal -72(%ebp), %ecx + movl %ecx, 4(%esp) + leal -52(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -124(%ebp), %eax # 4-byte Reload + addl %eax, -100(%ebp) # 4-byte Folded Spill + adcl %edi, -96(%ebp) # 4-byte Folded Spill + movl -108(%ebp), %esi # 4-byte Reload + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -116(%ebp), %eax # 4-byte Reload + adcl %eax, -104(%ebp) # 4-byte Folded Spill + movl -112(%ebp), %edi # 4-byte Reload + adcl %ebx, %edi + sbbl %eax, %eax + andl $1, %eax + movl %eax, -120(%ebp) # 4-byte Spill + andl $1, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5L@PLT + movl -100(%ebp), %eax # 4-byte Reload + addl -32(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + adcl -24(%ebp), %esi + movl %esi, -108(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl %eax, -116(%ebp) # 4-byte Folded Spill + movl -52(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl -48(%ebp), %ebx + sbbl 4(%esi), %ebx + movl -44(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -40(%ebp), %edx + sbbl 12(%esi), %edx + movl -36(%ebp), %edi + sbbl 16(%esi), %edi + movl 20(%esi), %eax + movl %eax, -124(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 24(%esi), %eax + movl %eax, -128(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 28(%esi), %eax + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + subl %eax, %ecx + movl 44(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 48(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + sbbl %eax, -120(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 56(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -100(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + sbbl %eax, -96(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + sbbl %eax, -108(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -152(%ebp) # 4-byte Spill + sbbl %eax, -104(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + sbbl %eax, -112(%ebp) # 4-byte Folded Spill + sbbl $0, -116(%ebp) # 4-byte Folded Spill + addl -124(%ebp), %ecx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 20(%esi) + movl -120(%ebp), %eax # 4-byte Reload + adcl -132(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 24(%esi) + adcl -136(%ebp), %edx # 4-byte Folded Reload + movl %eax, 28(%esi) + adcl -140(%ebp), %edi # 4-byte Folded Reload + movl %edx, 32(%esi) + movl -100(%ebp), %eax # 4-byte Reload + adcl -160(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -96(%ebp), %ecx # 4-byte Reload + adcl -164(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl -108(%ebp), %eax # 4-byte Reload + adcl -168(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -104(%ebp), %ecx # 4-byte Reload + adcl -172(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -112(%ebp), %edx # 4-byte Reload + adcl -176(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -116(%ebp), %eax # 4-byte Reload + adcl -180(%ebp), %eax # 4-byte Folded Reload + movl %edx, 56(%esi) + movl %eax, 60(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 64(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 72(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 76(%esi) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end144: + .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L + + .globl mcl_fpDbl_sqrPre10L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre10L,@function +mcl_fpDbl_sqrPre10L: # @mcl_fpDbl_sqrPre10L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $188, %esp + calll .L145$pb +.L145$pb: + popl %ebx +.Ltmp16: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx + movl %ebx, -120(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre5L@PLT + leal 20(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 40(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre5L@PLT + movl 36(%edi), %eax + movl 32(%edi), %ebx + movl 28(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 20(%edi), %ecx + adcl 24(%edi), %edx + adcl 8(%edi), %esi + adcl 12(%edi), %ebx + movl %ebx, -124(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -128(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -108(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -104(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -100(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -96(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + sbbl %ebx, %ebx + movl %ebx, -116(%ebp) # 4-byte Spill + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_1 +# BB#2: + movl $0, -112(%ebp) # 4-byte Folded Spill + jmp .LBB145_3 +.LBB145_1: + leal (%ecx,%ecx), %edi + movl %edi, -112(%ebp) # 4-byte Spill +.LBB145_3: + movl -96(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + movl -124(%ebp), %edi # 4-byte Reload + jb .LBB145_4 +# BB#5: + movl $0, -96(%ebp) # 4-byte Folded Spill + jmp .LBB145_6 +.LBB145_4: + movl %edx, %ebx + shldl $1, %ecx, %ebx + movl %ebx, -96(%ebp) # 4-byte Spill +.LBB145_6: + movl -100(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_7 +# BB#8: + movl $0, -100(%ebp) # 4-byte Folded Spill + jmp .LBB145_9 +.LBB145_7: + movl %esi, %ebx + shldl $1, %edx, %ebx + movl %ebx, -100(%ebp) # 4-byte Spill +.LBB145_9: + movl -104(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_10 +# BB#11: + movl $0, -104(%ebp) # 4-byte Folded Spill + jmp .LBB145_12 +.LBB145_10: + movl %edi, %ebx + shldl $1, %esi, %ebx + movl %ebx, -104(%ebp) # 4-byte Spill +.LBB145_12: + movl -108(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_13 +# BB#14: + movl $0, -108(%ebp) # 4-byte Folded Spill + jmp .LBB145_15 +.LBB145_13: + movl %eax, %ebx + shldl $1, %edi, %ebx + movl %ebx, -108(%ebp) # 4-byte Spill +.LBB145_15: + movl %ecx, -72(%ebp) + movl %edx, -68(%ebp) + movl %esi, -64(%ebp) + movl %edi, -60(%ebp) + movl %eax, -56(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl %esi, -84(%ebp) + movl %edi, -80(%ebp) + movl %eax, -76(%ebp) + movl -128(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB145_16 +# BB#17: + movl $0, -124(%ebp) # 4-byte Folded Spill + jmp .LBB145_18 +.LBB145_16: + shrl $31, %eax + movl %eax, -124(%ebp) # 4-byte Spill +.LBB145_18: + leal -52(%ebp), %eax + movl %eax, (%esp) + leal -72(%ebp), %eax + movl %eax, 4(%esp) + leal -92(%ebp), %eax + movl %eax, 8(%esp) + movl -116(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -120(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre5L@PLT + movl -112(%ebp), %edi # 4-byte Reload + addl -32(%ebp), %edi + movl -96(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -96(%ebp) # 4-byte Spill + movl -100(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -100(%ebp) # 4-byte Spill + movl -104(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -104(%ebp) # 4-byte Spill + movl -108(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -108(%ebp) # 4-byte Spill + adcl -124(%ebp), %esi # 4-byte Folded Reload + movl -52(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -48(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -44(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -116(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -36(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -120(%ebp) # 4-byte Spill + movl 20(%eax), %ecx + movl %ecx, -124(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -132(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 40(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 44(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 48(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 56(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + sbbl %ecx, -96(%ebp) # 4-byte Folded Spill + movl 68(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -100(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -104(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -108(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -124(%ebp), %edx # 4-byte Folded Reload + adcl -128(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 20(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -132(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%eax) + adcl -136(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 28(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -140(%ebp), %edx # 4-byte Folded Reload + movl %edi, 32(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -160(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -96(%ebp), %edx # 4-byte Reload + adcl -164(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 40(%eax) + movl -100(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -104(%ebp), %edx # 4-byte Reload + adcl -172(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -108(%ebp), %ecx # 4-byte Reload + adcl -176(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 52(%eax) + adcl -180(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 56(%eax) + movl %esi, 60(%eax) + movl -144(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 64(%eax) + movl -148(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 68(%eax) + movl -152(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 72(%eax) + movl -156(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + addl $188, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end145: + .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L + + .globl mcl_fp_mont10L + .align 16, 0x90 + .type mcl_fp_mont10L,@function +mcl_fp_mont10L: # @mcl_fp_mont10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1036, %esp # imm = 0x40C + calll .L146$pb +.L146$pb: + popl %ebx +.Ltmp17: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx + movl 1068(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 992(%esp), %edi + movl 996(%esp), %ebp + movl %edi, %eax + imull %esi, %eax + movl 1032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1028(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1024(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1020(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1016(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1000(%esp), %esi + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + addl 944(%esp), %edi + adcl 948(%esp), %ebp + adcl 952(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1064(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 896(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + addl 896(%esp), %ebp + adcl 900(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 936(%esp), %edi + sbbl %eax, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + movl 64(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 848(%esp), %ebp + adcl 852(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 876(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 880(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %edi + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + addl 800(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 836(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1068(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 752(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 780(%esp), %esi + movl 76(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 728(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 744(%esp), %edi + sbbl %esi, %esi + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + movl %esi, %ecx + movl 44(%esp), %eax # 4-byte Reload + addl 656(%esp), %eax + movl 40(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 696(%esp), %edi + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 608(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 624(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 640(%esp), %esi + adcl 644(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %ecx + movl 40(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 36(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 592(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %edi + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 512(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 520(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + movl %ebp, %eax + addl 464(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 472(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 484(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 44(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 504(%esp), %edi + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1060(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 432(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 444(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 452(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 368(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 380(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 384(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 320(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 332(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %edi + movl %edi, %ecx + addl 272(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 312(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl $0, %ebp + movl 1064(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl %edi, %ecx + addl 224(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 240(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 264(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %ebp + addl 176(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 192(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 196(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1064(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1060(%esp), %edx + calll .LmulPv320x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 128(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 140(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + adcl 144(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + sbbl %esi, %esi + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1068(%esp), %edx + calll .LmulPv320x32 + andl $1, %esi + addl 80(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + movl 64(%esp), %ebx # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 92(%esp), %ebx + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %edx, %edi + movl 36(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 120(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl $0, %esi + movl 1068(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + sbbl 20(%edx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 68(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl 36(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB146_2 +# BB#1: + movl %ecx, 48(%esp) # 4-byte Spill +.LBB146_2: + movl %esi, %ecx + testb %cl, %cl + movl 76(%esp), %esi # 4-byte Reload + jne .LBB146_4 +# BB#3: + movl %eax, %esi +.LBB146_4: + movl 1056(%esp), %eax + movl %esi, (%eax) + movl 60(%esp), %edi # 4-byte Reload + jne .LBB146_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB146_6: + movl %edi, 4(%eax) + jne .LBB146_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB146_8: + movl %ebx, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB146_10 +# BB#9: + movl 24(%esp), %ebp # 4-byte Reload +.LBB146_10: + movl %ebp, 12(%eax) + jne .LBB146_12 +# BB#11: + movl 28(%esp), %ecx # 4-byte Reload +.LBB146_12: + movl %ecx, 16(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB146_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB146_14: + movl %ecx, 20(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB146_16 +# BB#15: + movl 56(%esp), %ecx # 4-byte Reload +.LBB146_16: + movl %ecx, 24(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB146_18 +# BB#17: + movl 64(%esp), %ecx # 4-byte Reload +.LBB146_18: + movl %ecx, 32(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB146_20 +# BB#19: + movl %edx, %ecx +.LBB146_20: + movl %ecx, 36(%eax) + addl $1036, %esp # imm = 0x40C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end146: + .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L + + .globl mcl_fp_montNF10L + .align 16, 0x90 + .type mcl_fp_montNF10L,@function +mcl_fp_montNF10L: # @mcl_fp_montNF10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1020, %esp # imm = 0x3FC + calll .L147$pb +.L147$pb: + popl %ebx +.Ltmp18: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx + movl 1052(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 976(%esp), %edi + movl 980(%esp), %esi + movl %edi, %eax + imull %ebp, %eax + movl 1016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1012(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1008(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1004(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 996(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 992(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 988(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 984(%esp), %ebp + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 928(%esp), %edi + adcl 932(%esp), %esi + adcl 936(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 952(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 920(%esp), %ecx + addl 880(%esp), %esi + adcl 884(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl %esi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 832(%esp), %esi + adcl 836(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 848(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 1048(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 824(%esp), %ecx + addl 784(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 796(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 736(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 760(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 764(%esp), %ebp + movl 60(%esp), %esi # 4-byte Reload + adcl 768(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 728(%esp), %eax + movl 28(%esp), %edx # 4-byte Reload + addl 688(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 712(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 716(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + movl 32(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1052(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + addl 640(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 672(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 680(%esp), %esi + movl 1048(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 632(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 604(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 628(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 544(%esp), %esi + movl 24(%esp), %edi # 4-byte Reload + adcl 548(%esp), %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 552(%esp), %esi + movl 36(%esp), %ebp # 4-byte Reload + adcl 556(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 536(%esp), %edx + addl 496(%esp), %edi + adcl 500(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 24(%esp) # 4-byte Spill + movl %edi, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 448(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 464(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 480(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 488(%esp), %esi + movl 1048(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 440(%esp), %eax + movl 40(%esp), %ecx # 4-byte Reload + addl 400(%esp), %ecx + adcl 404(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 408(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 412(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 416(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + adcl 420(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 424(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 428(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 432(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl 436(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 352(%esp), %esi + adcl 356(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %esi + adcl 368(%esp), %esi + movl 52(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1044(%esp), %eax + movl %eax, %edx + calll .LmulPv320x32 + movl 344(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 316(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 324(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 24(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 256(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 272(%esp), %edi + adcl 276(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 248(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 236(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 160(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 176(%esp), %edi + movl 28(%esp), %esi # 4-byte Reload + adcl 180(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 192(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1044(%esp), %edx + calll .LmulPv320x32 + movl 152(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 112(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 124(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 144(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 1052(%esp), %edx + calll .LmulPv320x32 + addl 64(%esp), %ebp + movl %edi, %ebp + movl 60(%esp), %eax # 4-byte Reload + movl 32(%esp), %ebx # 4-byte Reload + adcl 68(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 76(%esp), %ebx + adcl 80(%esp), %ebp + movl 44(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 96(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %eax, %edx + movl 1052(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ecx + movl %ebx, %eax + sbbl 8(%edi), %eax + movl %ebp, %esi + sbbl 12(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 16(%edi), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 24(%esp), %esi # 4-byte Reload + sbbl 20(%edi), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + sbbl 24(%edi), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + sarl $31, %edi + testl %edi, %edi + movl 60(%esp), %edi # 4-byte Reload + js .LBB147_2 +# BB#1: + movl %edx, %edi +.LBB147_2: + movl 1040(%esp), %edx + movl %edi, (%edx) + movl 52(%esp), %edi # 4-byte Reload + js .LBB147_4 +# BB#3: + movl %ecx, %edi +.LBB147_4: + movl %edi, 4(%edx) + js .LBB147_6 +# BB#5: + movl %eax, %ebx +.LBB147_6: + movl %ebx, 8(%edx) + js .LBB147_8 +# BB#7: + movl 4(%esp), %ebp # 4-byte Reload +.LBB147_8: + movl %ebp, 12(%edx) + movl 44(%esp), %esi # 4-byte Reload + movl 24(%esp), %eax # 4-byte Reload + js .LBB147_10 +# BB#9: + movl 8(%esp), %esi # 4-byte Reload +.LBB147_10: + movl %esi, 16(%edx) + js .LBB147_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB147_12: + movl %eax, 20(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB147_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB147_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB147_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB147_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB147_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB147_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB147_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB147_20: + movl %eax, 36(%edx) + addl $1020, %esp # imm = 0x3FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end147: + .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L + + .globl mcl_fp_montRed10L + .align 16, 0x90 + .type mcl_fp_montRed10L,@function +mcl_fp_montRed10L: # @mcl_fp_montRed10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $604, %esp # imm = 0x25C + calll .L148$pb +.L148$pb: + popl %eax +.Ltmp19: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 632(%esp), %edx + movl -4(%edx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 628(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 68(%esp) # 4-byte Spill + imull %esi, %ebx + movl 76(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 24(%ecx), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %edi + movl 12(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 560(%esp), %ecx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 56(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl 68(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + adcl 568(%esp), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 576(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 580(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 512(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 512(%esp), %esi + movl 4(%esp), %edx # 4-byte Reload + adcl 516(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 536(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 464(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 464(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 492(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 52(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + movl 60(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 632(%esp), %eax + movl %eax, %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 368(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %edi, %eax + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 320(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 352(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 360(%esp), %esi + adcl $0, 88(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 272(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 276(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 288(%esp), %ebp + adcl 292(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 312(%esp), %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, 68(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + movl 96(%esp), %eax # 4-byte Reload + addl 224(%esp), %eax + movl 100(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl 92(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 240(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 256(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 260(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 264(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 68(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 176(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl 112(%esp), %edi # 4-byte Reload + adcl 184(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 632(%esp), %edx + movl 64(%esp), %ebx # 4-byte Reload + calll .LmulPv320x32 + addl 128(%esp), %esi + movl %edi, %eax + adcl 132(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %eax, %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 140(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + adcl 144(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %ebp, %edx + movl 120(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + adcl 164(%esp), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 96(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB148_2 +# BB#1: + movl %edx, 80(%esp) # 4-byte Spill +.LBB148_2: + testb %al, %al + movl 112(%esp), %edx # 4-byte Reload + jne .LBB148_4 +# BB#3: + movl %edi, %edx +.LBB148_4: + movl 624(%esp), %edi + movl %edx, (%edi) + movl 108(%esp), %edx # 4-byte Reload + jne .LBB148_6 +# BB#5: + movl %ecx, 124(%esp) # 4-byte Spill +.LBB148_6: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edi) + movl 116(%esp), %ecx # 4-byte Reload + jne .LBB148_8 +# BB#7: + movl %esi, %ecx +.LBB148_8: + movl %ecx, 8(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 76(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB148_10 +# BB#9: + movl 64(%esp), %eax # 4-byte Reload +.LBB148_10: + movl %eax, 16(%edi) + movl 84(%esp), %eax # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + jne .LBB148_12 +# BB#11: + movl 68(%esp), %ebp # 4-byte Reload +.LBB148_12: + movl %ebp, 20(%edi) + movl 88(%esp), %ebx # 4-byte Reload + jne .LBB148_14 +# BB#13: + movl 72(%esp), %ebx # 4-byte Reload +.LBB148_14: + movl %ebx, 24(%edi) + jne .LBB148_16 +# BB#15: + movl 92(%esp), %edx # 4-byte Reload +.LBB148_16: + movl %edx, 28(%edi) + jne .LBB148_18 +# BB#17: + movl 100(%esp), %ecx # 4-byte Reload +.LBB148_18: + movl %ecx, 32(%edi) + jne .LBB148_20 +# BB#19: + movl 96(%esp), %eax # 4-byte Reload +.LBB148_20: + movl %eax, 36(%edi) + addl $604, %esp # imm = 0x25C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end148: + .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L + + .globl mcl_fp_addPre10L + .align 16, 0x90 + .type mcl_fp_addPre10L,@function +mcl_fp_addPre10L: # @mcl_fp_addPre10L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl %edx, 28(%ebx) + movl %esi, 32(%ebx) + movl 36(%eax), %eax + movl 36(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 36(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end149: + .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L + + .globl mcl_fp_subPre10L + .align 16, 0x90 + .type mcl_fp_subPre10L,@function +mcl_fp_subPre10L: # @mcl_fp_subPre10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 28(%ebp) + movl %edi, 32(%ebp) + movl 36(%edx), %edx + movl 36(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 36(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end150: + .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L + + .globl mcl_fp_shr1_10L + .align 16, 0x90 + .type mcl_fp_shr1_10L,@function +mcl_fp_shr1_10L: # @mcl_fp_shr1_10L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %eax + shrdl $1, %eax, %ecx + movl %ecx, 32(%esi) + shrl %eax + movl %eax, 36(%esi) + popl %esi + retl +.Lfunc_end151: + .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L + + .globl mcl_fp_add10L + .align 16, 0x90 + .type mcl_fp_add10L,@function +mcl_fp_add10L: # @mcl_fp_add10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $24, %esp + movl 52(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 48(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%ebx), %esi + adcl 24(%edi), %esi + movl 28(%ebx), %ebp + adcl 28(%edi), %ebp + movl 32(%ebx), %edx + adcl 32(%edi), %edx + movl 36(%ebx), %ecx + adcl 36(%edi), %ecx + movl 44(%esp), %edi + movl (%esp), %ebx # 4-byte Reload + movl %ebx, (%edi) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 4(%edi) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 8(%edi) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 12(%edi) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 16(%edi) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + movl %esi, 24(%edi) + movl %ebp, 28(%edi) + movl %edx, 32(%edi) + movl %ecx, 36(%edi) + sbbl %eax, %eax + andl $1, %eax + movl 56(%esp), %edi + subl (%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 4(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 8(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 4(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + sbbl 28(%edi), %ebp + sbbl 32(%edi), %edx + sbbl 36(%edi), %ecx + sbbl $0, %eax + testb $1, %al + jne .LBB152_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl 44(%esp), %ebx + movl %edi, (%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 4(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl %esi, 24(%ebx) + movl %ebp, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) +.LBB152_2: # %carry + addl $24, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end152: + .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L + + .globl mcl_fp_addNF10L + .align 16, 0x90 + .type mcl_fp_addNF10L,@function +mcl_fp_addNF10L: # @mcl_fp_addNF10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %esi + movl 96(%esp), %edx + addl (%edx), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%edx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %ebx + movl 12(%ecx), %eax + movl 8(%ecx), %esi + adcl 8(%edx), %esi + adcl 12(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + adcl 20(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 24(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 28(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 32(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl %esi, %ecx + adcl 36(%edx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 104(%esp), %edi + movl 52(%esp), %edx # 4-byte Reload + subl (%edi), %edx + movl 56(%esp), %esi # 4-byte Reload + sbbl 4(%edi), %esi + movl %esi, (%esp) # 4-byte Spill + movl %ecx, %esi + sbbl 8(%edi), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebp + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + movl %esi, %eax + movl %esi, %ebx + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 36(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %esi, %edi + movl 52(%esp), %esi # 4-byte Reload + sarl $31, %edi + testl %edi, %edi + js .LBB153_2 +# BB#1: + movl %edx, %esi +.LBB153_2: + movl 92(%esp), %edx + movl %esi, (%edx) + movl 56(%esp), %esi # 4-byte Reload + js .LBB153_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload +.LBB153_4: + movl %esi, 4(%edx) + movl %ebp, %edi + movl 40(%esp), %esi # 4-byte Reload + js .LBB153_6 +# BB#5: + movl 4(%esp), %ecx # 4-byte Reload +.LBB153_6: + movl %ecx, 8(%edx) + movl %ebx, %ecx + movl 44(%esp), %ebp # 4-byte Reload + js .LBB153_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB153_8: + movl %esi, 12(%edx) + movl 68(%esp), %esi # 4-byte Reload + movl 48(%esp), %ebx # 4-byte Reload + js .LBB153_10 +# BB#9: + movl 12(%esp), %ebp # 4-byte Reload +.LBB153_10: + movl %ebp, 16(%edx) + js .LBB153_12 +# BB#11: + movl 16(%esp), %ebx # 4-byte Reload +.LBB153_12: + movl %ebx, 20(%edx) + js .LBB153_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB153_14: + movl %edi, 24(%edx) + js .LBB153_16 +# BB#15: + movl 24(%esp), %esi # 4-byte Reload +.LBB153_16: + movl %esi, 28(%edx) + js .LBB153_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB153_18: + movl %ecx, 32(%edx) + js .LBB153_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB153_20: + movl %eax, 36(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end153: + .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L + + .globl mcl_fp_sub10L + .align 16, 0x90 + .type mcl_fp_sub10L,@function +mcl_fp_sub10L: # @mcl_fp_sub10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 56(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 60(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 20(%esi), %edx + sbbl 20(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 24(%esi), %ecx + sbbl 24(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 32(%esi), %ebp + sbbl 32(%edi), %ebp + movl 36(%esi), %esi + sbbl 36(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 52(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl %edx, 20(%ebx) + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl %ebp, 32(%ebx) + movl %esi, 36(%ebx) + je .LBB154_2 +# BB#1: # %carry + movl %esi, %edi + movl 64(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl %eax, 28(%ebx) + movl 32(%esi), %eax + adcl %ebp, %eax + movl %eax, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) +.LBB154_2: # %nocarry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end154: + .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L + + .globl mcl_fp_subNF10L + .align 16, 0x90 + .type mcl_fp_subNF10L,@function +mcl_fp_subNF10L: # @mcl_fp_subNF10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %eax + movl 36(%eax), %esi + movl (%eax), %edi + movl 4(%eax), %edx + movl 84(%esp), %ecx + subl (%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 4(%ecx), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 28(%eax), %edi + movl 24(%eax), %ebx + movl 20(%eax), %ebp + movl 16(%eax), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 8(%eax), %eax + sbbl 8(%ecx), %eax + movl %eax, 16(%esp) # 4-byte Spill + sbbl 12(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 16(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 24(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + sbbl 28(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 36(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %ecx + adcl %ecx, %ecx + movl %esi, %ebx + shrl $31, %ebx + orl %edx, %ebx + movl 88(%esp), %edi + movl 20(%edi), %edx + andl %ecx, %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 12(%edi), %edx + andl %ecx, %edx + movl %edx, 4(%esp) # 4-byte Spill + andl 4(%edi), %ecx + movl 16(%edi), %edx + andl %ebx, %edx + movl %edx, (%esp) # 4-byte Spill + movl 8(%edi), %edx + andl %ebx, %edx + andl (%edi), %ebx + movl 36(%edi), %esi + andl %eax, %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 32(%edi), %ebp + andl %eax, %ebp + movl 28(%edi), %esi + andl %eax, %esi + andl 24(%edi), %eax + addl 36(%esp), %ebx # 4-byte Folded Reload + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %edi + movl %ebx, (%edi) + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %edx, 8(%edi) + movl (%esp), %edx # 4-byte Reload + adcl 52(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %edx, 16(%edi) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 20(%edi) + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %eax, 24(%edi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %esi, 28(%edi) + movl %ebp, 32(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%edi) + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end155: + .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L + + .globl mcl_fpDbl_add10L + .align 16, 0x90 + .type mcl_fpDbl_add10L,@function +mcl_fpDbl_add10L: # @mcl_fpDbl_add10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 100(%esp), %edx + movl 96(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %ecx + movl 8(%edx), %ebx + movl (%edx), %ebp + addl (%edi), %ebp + movl 92(%esp), %eax + movl %ebp, (%eax) + movl 4(%edx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%edx), %esi + adcl 16(%edx), %ecx + movl %ebp, 4(%eax) + movl 48(%edx), %ebp + movl %ebx, 8(%eax) + movl 20(%edx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%edx), %ebx + movl %ecx, 16(%eax) + movl 24(%edi), %ecx + adcl %ebx, %ecx + movl 28(%edx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%edx), %ebx + movl %ecx, 24(%eax) + movl 32(%edi), %ecx + adcl %ebx, %ecx + movl 36(%edx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%edx), %ebx + movl %ecx, 32(%eax) + movl 40(%edi), %ecx + adcl %ebx, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%edx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %eax + adcl %ebx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%edi), %eax + adcl %ebp, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl 52(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl 56(%edi), %ecx + adcl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl 60(%edi), %ecx + adcl %eax, %ecx + movl 64(%edx), %esi + movl 64(%edi), %eax + adcl %esi, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 68(%edx), %ebx + movl 68(%edi), %esi + adcl %ebx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%edx), %ebx + movl 72(%edi), %ebp + adcl %ebx, %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 76(%edx), %edx + movl 76(%edi), %edi + adcl %edx, %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl %edx, %edx + andl $1, %edx + movl 104(%esp), %ebx + movl 64(%esp), %edi # 4-byte Reload + subl (%ebx), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 4(%ebx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + sbbl 8(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebx), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebx), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %ecx, %edi + sbbl 20(%ebx), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl %ebp, %eax + movl 36(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %edi + sbbl 36(%ebx), %edi + sbbl $0, %edx + andl $1, %edx + jne .LBB156_2 +# BB#1: + movl %edi, %ebp +.LBB156_2: + testb %dl, %dl + movl 64(%esp), %edx # 4-byte Reload + movl 60(%esp), %esi # 4-byte Reload + movl 56(%esp), %edi # 4-byte Reload + movl 52(%esp), %ebx # 4-byte Reload + jne .LBB156_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebx # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload +.LBB156_4: + movl 92(%esp), %eax + movl %edx, 40(%eax) + movl 68(%esp), %edx # 4-byte Reload + movl %edx, 44(%eax) + movl %ebx, 48(%eax) + movl %edi, 52(%eax) + movl %esi, 56(%eax) + movl %ecx, 60(%eax) + movl 44(%esp), %edx # 4-byte Reload + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB156_6 +# BB#5: + movl 24(%esp), %ecx # 4-byte Reload +.LBB156_6: + movl %ecx, 64(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB156_8 +# BB#7: + movl 28(%esp), %edx # 4-byte Reload +.LBB156_8: + movl %edx, 68(%eax) + jne .LBB156_10 +# BB#9: + movl 32(%esp), %ecx # 4-byte Reload +.LBB156_10: + movl %ecx, 72(%eax) + movl %ebp, 76(%eax) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end156: + .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L + + .globl mcl_fpDbl_sub10L + .align 16, 0x90 + .type mcl_fpDbl_sub10L,@function +mcl_fpDbl_sub10L: # @mcl_fpDbl_sub10L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %ebp + movl (%ebp), %edx + movl 4(%ebp), %esi + movl 88(%esp), %eax + subl (%eax), %edx + sbbl 4(%eax), %esi + movl 8(%ebp), %edi + sbbl 8(%eax), %edi + movl 80(%esp), %ecx + movl %edx, (%ecx) + movl 12(%ebp), %edx + sbbl 12(%eax), %edx + movl %esi, 4(%ecx) + movl 16(%ebp), %esi + sbbl 16(%eax), %esi + movl %edi, 8(%ecx) + movl 20(%eax), %edi + movl %edx, 12(%ecx) + movl 20(%ebp), %edx + sbbl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ecx) + movl 24(%ebp), %esi + sbbl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ecx) + movl 28(%ebp), %edx + sbbl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ecx) + movl 32(%ebp), %esi + sbbl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ecx) + movl 36(%ebp), %edx + sbbl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ecx) + movl 40(%ebp), %esi + sbbl %edi, %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 44(%eax), %esi + movl %edx, 36(%ecx) + movl 44(%ebp), %edx + sbbl %esi, %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl 48(%ebp), %esi + sbbl %edx, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl 52(%ebp), %esi + sbbl %edx, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%eax), %edx + movl 56(%ebp), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 60(%eax), %edx + movl 60(%ebp), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 64(%eax), %edx + movl 64(%ebp), %esi + sbbl %edx, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%eax), %edx + movl 68(%ebp), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 72(%eax), %edx + movl 72(%ebp), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 76(%eax), %eax + movl 76(%ebp), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 92(%esp), %esi + jne .LBB157_1 +# BB#2: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB157_3 +.LBB157_1: + movl 36(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill +.LBB157_3: + testb %al, %al + jne .LBB157_4 +# BB#5: + movl $0, 8(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB157_6 +.LBB157_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB157_6: + jne .LBB157_7 +# BB#8: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB157_9 +.LBB157_7: + movl 32(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB157_9: + jne .LBB157_10 +# BB#11: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB157_12 +.LBB157_10: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB157_12: + jne .LBB157_13 +# BB#14: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB157_15 +.LBB157_13: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB157_15: + jne .LBB157_16 +# BB#17: + movl $0, %ebp + jmp .LBB157_18 +.LBB157_16: + movl 20(%esi), %ebp +.LBB157_18: + jne .LBB157_19 +# BB#20: + movl $0, %eax + jmp .LBB157_21 +.LBB157_19: + movl 16(%esi), %eax +.LBB157_21: + jne .LBB157_22 +# BB#23: + movl $0, %edx + jmp .LBB157_24 +.LBB157_22: + movl 12(%esi), %edx +.LBB157_24: + jne .LBB157_25 +# BB#26: + xorl %esi, %esi + jmp .LBB157_27 +.LBB157_25: + movl 8(%esi), %esi +.LBB157_27: + addl 28(%esp), %ebx # 4-byte Folded Reload + movl 8(%esp), %edi # 4-byte Reload + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %ebx, 40(%ecx) + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %edi, 44(%ecx) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl %eax, 72(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%ecx) + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end157: + .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L + + .align 16, 0x90 + .type .LmulPv352x32,@function +.LmulPv352x32: # @mulPv352x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl %edx, %ebx + movl 92(%esp), %edi + movl %edi, %eax + mull 40(%ebx) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %edi, %eax + mull 36(%ebx) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + mull 32(%ebx) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %eax + mull 28(%ebx) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %edi, %eax + mull 24(%ebx) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %edi, %eax + mull 20(%ebx) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %edi, %eax + mull 16(%ebx) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %edi, %eax + mull 12(%ebx) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %edi, %eax + mull 8(%ebx) + movl %edx, %esi + movl %eax, 4(%esp) # 4-byte Spill + movl %edi, %eax + mull 4(%ebx) + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl %edi, %eax + mull (%ebx) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 44(%ecx) + movl %ecx, %eax + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end158: + .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 + + .globl mcl_fp_mulUnitPre11L + .align 16, 0x90 + .type mcl_fp_mulUnitPre11L,@function +mcl_fp_mulUnitPre11L: # @mcl_fp_mulUnitPre11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L159$pb +.L159$pb: + popl %ebx +.Ltmp20: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv352x32 + movl 84(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end159: + .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L + + .globl mcl_fpDbl_mulPre11L + .align 16, 0x90 + .type mcl_fpDbl_mulPre11L,@function +mcl_fpDbl_mulPre11L: # @mcl_fpDbl_mulPre11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L160$pb +.L160$pb: + popl %eax +.Ltmp21: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %eax, %ebx + movl 648(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 644(%esp), %edx + movl %edx, %ebp + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %esi + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl 648(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %ebp, %edx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 428(%esp), %ecx + movl 432(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 396(%esp), %ebp + movl 392(%esp), %edi + movl 388(%esp), %esi + movl 380(%esp), %ecx + movl 384(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 348(%esp), %ebx + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 300(%esp), %ebp + movl 296(%esp), %edi + movl 292(%esp), %esi + movl 284(%esp), %ecx + movl 288(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %ebx + movl 248(%esp), %edi + movl 244(%esp), %esi + movl 236(%esp), %ecx + movl 240(%esp), %edx + movl 640(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, %ebp + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 56(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 648(%esp), %edi + movl 36(%edi), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 644(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 56(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 648(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 644(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end160: + .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L + + .globl mcl_fpDbl_sqrPre11L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre11L,@function +mcl_fpDbl_sqrPre11L: # @mcl_fpDbl_sqrPre11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $620, %esp # imm = 0x26C + calll .L161$pb +.L161$pb: + popl %ebx +.Ltmp22: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl %edx, %esi + movl %ebx, %edi + calll .LmulPv352x32 + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 584(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 580(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 568(%esp), %eax + movl 572(%esp), %ebp + movl 640(%esp), %ecx + movl %eax, (%ecx) + movl %esi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 520(%esp), %ecx + movl %edi, %ebx + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 540(%esp), %ebx + movl 536(%esp), %edi + movl 532(%esp), %esi + movl 524(%esp), %ecx + movl 528(%esp), %edx + movl 640(%esp), %eax + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 4(%eax) + adcl 60(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 60(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 472(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 516(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 504(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 492(%esp), %ebp + movl 488(%esp), %edi + movl 484(%esp), %esi + movl 476(%esp), %ecx + movl 480(%esp), %edx + movl 640(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 424(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 448(%esp), %ebx + movl 444(%esp), %edi + movl 440(%esp), %esi + movl 436(%esp), %edx + movl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 396(%esp), %edi + movl 392(%esp), %esi + movl 388(%esp), %edx + movl 380(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 384(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 20(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 80(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 348(%esp), %ebp + movl 344(%esp), %edi + movl 340(%esp), %esi + movl 332(%esp), %ecx + movl 336(%esp), %edx + movl 640(%esp), %eax + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 48(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + adcl 80(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 24(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 48(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 640(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 80(%esp) # 4-byte Folded Spill + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 48(%esp) # 4-byte Spill + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 80(%esp), %eax # 4-byte Reload + addl 232(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 260(%esp), %ebx + movl 256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 252(%esp), %edi + movl 248(%esp), %esi + movl 244(%esp), %edx + movl 236(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 240(%esp), %ecx + movl 640(%esp), %eax + movl 80(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 204(%esp), %ebp + movl 200(%esp), %edi + movl 196(%esp), %esi + movl 188(%esp), %ecx + movl 192(%esp), %edx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + adcl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, 28(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 164(%esp), %ebp + movl 160(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 152(%esp), %esi + movl 148(%esp), %edx + movl 140(%esp), %ecx + movl 144(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 36(%eax) + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 644(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 52(%esp), %eax # 4-byte Reload + addl 88(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 92(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %edi + movl 108(%esp), %esi + movl 104(%esp), %edx + movl 100(%esp), %ecx + movl 640(%esp), %eax + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 40(%eax) + movl %ebp, 44(%eax) + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + adcl 36(%esp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edx, 56(%eax) + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %esi, 60(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %edi, 64(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl 72(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %edx, 72(%eax) + movl %ecx, 76(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 80(%eax) + movl 84(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + addl $620, %esp # imm = 0x26C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end161: + .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L + + .globl mcl_fp_mont11L + .align 16, 0x90 + .type mcl_fp_mont11L,@function +mcl_fp_mont11L: # @mcl_fp_mont11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L162$pb +.L162$pb: + popl %ebx +.Ltmp23: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %edi + movl 1084(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edi, %eax + imull %ebp, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %ebp + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + adcl 1044(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + sbbl %edi, %edi + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl 56(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1024(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 936(%esp), %esi + adcl 940(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 964(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + addl 888(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 928(%esp), %esi + movl %esi, %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ebp, %eax + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %eax + andl $1, %eax + addl 840(%esp), %ebp + movl 40(%esp), %ecx # 4-byte Reload + adcl 844(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 848(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload + adcl 852(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 856(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 864(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 872(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + adcl 880(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 792(%esp), %ecx + movl 36(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 820(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 828(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 744(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 776(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 788(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 36(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 24(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 716(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 724(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 648(%esp), %ebp + movl 24(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + adcl 680(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + adcl $0, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 24(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 608(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 624(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %esi + movl %esi, %eax + addl 552(%esp), %edi + movl 28(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl %ebp, %edi + adcl 560(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 580(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 584(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 592(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 596(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 28(%esp), %ecx # 4-byte Reload + addl 504(%esp), %ecx + adcl 508(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 536(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %eax + addl 456(%esp), %edi + movl 32(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 484(%esp), %edi + adcl 488(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 492(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 24(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + adcl 412(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + adcl 432(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 360(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 368(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + adcl 316(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 332(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + adcl 348(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + andl $1, %ebp + movl %ebp, %ecx + addl 264(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 300(%esp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 304(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + adcl 252(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 20(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + andl $1, %ecx + addl 168(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 172(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 184(%esp), %ebp + movl 48(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl %esi, %ecx + addl 120(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 136(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 20(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + andl $1, %esi + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl 88(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %esi + movl 1164(%esp), %ebp + subl (%ebp), %eax + movl %ecx, %edx + sbbl 4(%ebp), %edx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + sbbl 12(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl 32(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, %ebp + sbbl $0, %esi + andl $1, %esi + jne .LBB162_2 +# BB#1: + movl %ebx, 28(%esp) # 4-byte Spill +.LBB162_2: + movl %esi, %ebx + testb %bl, %bl + movl 68(%esp), %ebx # 4-byte Reload + jne .LBB162_4 +# BB#3: + movl %eax, %ebx +.LBB162_4: + movl 1152(%esp), %eax + movl %ebx, (%eax) + movl 56(%esp), %edi # 4-byte Reload + jne .LBB162_6 +# BB#5: + movl %edx, %edi +.LBB162_6: + movl %edi, 4(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB162_8 +# BB#7: + movl %ecx, %edx +.LBB162_8: + movl %edx, 8(%eax) + jne .LBB162_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%esp) # 4-byte Spill +.LBB162_10: + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB162_12 +# BB#11: + movl 8(%esp), %ecx # 4-byte Reload +.LBB162_12: + movl %ecx, 16(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB162_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB162_14: + movl %ecx, 20(%eax) + movl 24(%esp), %ecx # 4-byte Reload + jne .LBB162_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB162_16: + movl %ecx, 24(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 32(%esp), %ecx # 4-byte Reload + jne .LBB162_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB162_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB162_20 +# BB#19: + movl 60(%esp), %ecx # 4-byte Reload +.LBB162_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB162_22 +# BB#21: + movl %ebp, %ecx +.LBB162_22: + movl %ecx, 40(%eax) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end162: + .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L + + .globl mcl_fp_montNF11L + .align 16, 0x90 + .type mcl_fp_montNF11L,@function +mcl_fp_montNF11L: # @mcl_fp_montNF11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1132, %esp # imm = 0x46C + calll .L163$pb +.L163$pb: + popl %ebx +.Ltmp24: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx + movl 1164(%esp), %eax + movl -4(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1080(%esp), %ebp + movl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1124(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1120(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1116(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1112(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1108(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1104(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1100(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 1096(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1092(%esp), %esi + movl 1088(%esp), %edi + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 1032(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + adcl 1044(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 1048(%esp), %ebp + movl 28(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 1028(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 984(%esp), %ecx + adcl 988(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 996(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl 1000(%esp), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1164(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + addl 936(%esp), %ebp + adcl 940(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 960(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 980(%esp), %ebp + movl 1160(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 932(%esp), %eax + addl 888(%esp), %edi + movl 44(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 900(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl 908(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 928(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %edi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 840(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 860(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 876(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 884(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 792(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 836(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 792(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 800(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 812(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 816(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 820(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 824(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 832(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 744(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 768(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 780(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 784(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 740(%esp), %edx + movl 40(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 704(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 712(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 716(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 720(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 732(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 736(%esp), %esi + adcl $0, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl %eax, %ebp + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 648(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 672(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 688(%esp), %esi + movl %esi, %edi + movl 40(%esp), %esi # 4-byte Reload + adcl 692(%esp), %esi + movl 1160(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1156(%esp), %eax + movl %eax, %edx + calll .LmulPv352x32 + movl 644(%esp), %eax + movl 28(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 608(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 612(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 616(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 620(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 624(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 628(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 632(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl %eax, %ebp + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 552(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 1160(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 548(%esp), %edx + movl 32(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 512(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 520(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 532(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 40(%esp), %ecx # 4-byte Reload + adcl 540(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload + adcl 544(%esp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl %eax, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 456(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 456(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 480(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 488(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + adcl 496(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 452(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 408(%esp), %ecx + movl 52(%esp), %ebp # 4-byte Reload + adcl 412(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 428(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 448(%esp), %edi + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 360(%esp), %esi + adcl 364(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 372(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 28(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 356(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 332(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 264(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 276(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %edi, %esi + adcl 284(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 292(%esp), %edi + movl 28(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 260(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 216(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 224(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 232(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 240(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + adcl 244(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 24(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 168(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 176(%esp), %esi + movl 60(%esp), %edi # 4-byte Reload + adcl 180(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 196(%esp), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 204(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1160(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1156(%esp), %edx + calll .LmulPv352x32 + movl 164(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 120(%esp), %ecx + adcl 124(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 136(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 72(%esp), %ecx + movl 1164(%esp), %edx + calll .LmulPv352x32 + addl 72(%esp), %edi + movl 48(%esp), %edi # 4-byte Reload + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 84(%esp), %edi + adcl 88(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %eax, %edx + movl 1164(%esp), %ebx + subl (%ebx), %edx + movl %ecx, %esi + sbbl 4(%ebx), %esi + movl %edi, %ecx + sbbl 8(%ebx), %ecx + movl 44(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl 40(%esp), %ebp # 4-byte Reload + sbbl 16(%ebx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 20(%ebx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 24(%ebx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 28(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + sbbl 32(%ebx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + sbbl 36(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + sbbl 40(%ebx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %ebp, %ebx + sarl $31, %ebx + testl %ebx, %ebx + movl 68(%esp), %ebx # 4-byte Reload + js .LBB163_2 +# BB#1: + movl %edx, %ebx +.LBB163_2: + movl 1152(%esp), %edx + movl %ebx, (%edx) + movl 60(%esp), %ebp # 4-byte Reload + js .LBB163_4 +# BB#3: + movl %esi, %ebp +.LBB163_4: + movl %ebp, 4(%edx) + js .LBB163_6 +# BB#5: + movl %ecx, %edi +.LBB163_6: + movl %edi, 8(%edx) + movl 44(%esp), %ecx # 4-byte Reload + js .LBB163_8 +# BB#7: + movl %eax, %ecx +.LBB163_8: + movl %ecx, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB163_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB163_10: + movl %eax, 16(%edx) + movl 28(%esp), %eax # 4-byte Reload + js .LBB163_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB163_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB163_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB163_14: + movl %eax, 24(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB163_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB163_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB163_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB163_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB163_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB163_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB163_22 +# BB#21: + movl 48(%esp), %eax # 4-byte Reload +.LBB163_22: + movl %eax, 40(%edx) + addl $1132, %esp # imm = 0x46C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end163: + .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L + + .globl mcl_fp_montRed11L + .align 16, 0x90 + .type mcl_fp_montRed11L,@function +mcl_fp_montRed11L: # @mcl_fp_montRed11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $668, %esp # imm = 0x29C + calll .L164$pb +.L164$pb: + popl %eax +.Ltmp25: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 696(%esp), %edx + movl -4(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 692(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl 4(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + imull %esi, %ebx + movl 84(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 28(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 24(%ecx), %ebp + movl 20(%ecx), %edi + movl 16(%ecx), %esi + movl 12(%ecx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 616(%esp), %ecx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + movl 60(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl 64(%esp), %ecx # 4-byte Reload + adcl 620(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 632(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 640(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + andl $1, %edi + movl %edi, %ecx + addl 568(%esp), %esi + movl 56(%esp), %edx # 4-byte Reload + adcl 572(%esp), %edx + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 520(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 520(%esp), %ebp + movl 48(%esp), %ecx # 4-byte Reload + adcl 524(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 120(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 472(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 424(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 464(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ecx, %esi + movl %esi, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 376(%esp), %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 404(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 412(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 328(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 352(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 696(%esp), %eax + movl %eax, %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 280(%esp), %ebp + movl 88(%esp), %ebp # 4-byte Reload + adcl 284(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 296(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 304(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 232(%esp), %ebp + movl 84(%esp), %ebp # 4-byte Reload + adcl 236(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 244(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 276(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 56(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %ebp, %eax + imull 68(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 184(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl 132(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, %ebp + movl 68(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 696(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + calll .LmulPv352x32 + addl 136(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl %eax, %edi + movl 128(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 152(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + adcl 180(%esp), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + adcl $0, %ebp + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + movl 124(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + movl %ebp, %ebx + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + jne .LBB164_2 +# BB#1: + movl %esi, 112(%esp) # 4-byte Spill +.LBB164_2: + testb %bl, %bl + movl 132(%esp), %esi # 4-byte Reload + jne .LBB164_4 +# BB#3: + movl %edi, %esi +.LBB164_4: + movl 688(%esp), %edi + movl %esi, (%edi) + movl 104(%esp), %esi # 4-byte Reload + jne .LBB164_6 +# BB#5: + movl %edx, 128(%esp) # 4-byte Spill +.LBB164_6: + movl 128(%esp), %edx # 4-byte Reload + movl %edx, 4(%edi) + movl 116(%esp), %edx # 4-byte Reload + jne .LBB164_8 +# BB#7: + movl %ecx, %edx +.LBB164_8: + movl %edx, 8(%edi) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%edi) + movl 92(%esp), %edx # 4-byte Reload + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB164_10 +# BB#9: + movl 64(%esp), %ecx # 4-byte Reload +.LBB164_10: + movl %ecx, 16(%edi) + movl 96(%esp), %ecx # 4-byte Reload + movl 120(%esp), %eax # 4-byte Reload + jne .LBB164_12 +# BB#11: + movl 68(%esp), %eax # 4-byte Reload +.LBB164_12: + movl %eax, 20(%edi) + movl 80(%esp), %eax # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + jne .LBB164_14 +# BB#13: + movl 72(%esp), %ebp # 4-byte Reload +.LBB164_14: + movl %ebp, 24(%edi) + jne .LBB164_16 +# BB#15: + movl 76(%esp), %esi # 4-byte Reload +.LBB164_16: + movl %esi, 28(%edi) + jne .LBB164_18 +# BB#17: + movl 84(%esp), %edx # 4-byte Reload +.LBB164_18: + movl %edx, 32(%edi) + jne .LBB164_20 +# BB#19: + movl 88(%esp), %ecx # 4-byte Reload +.LBB164_20: + movl %ecx, 36(%edi) + jne .LBB164_22 +# BB#21: + movl 100(%esp), %eax # 4-byte Reload +.LBB164_22: + movl %eax, 40(%edi) + addl $668, %esp # imm = 0x29C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end164: + .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L + + .globl mcl_fp_addPre11L + .align 16, 0x90 + .type mcl_fp_addPre11L,@function +mcl_fp_addPre11L: # @mcl_fp_addPre11L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl %esi, 32(%ebx) + movl %edx, 36(%ebx) + movl 40(%eax), %eax + movl 40(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 40(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end165: + .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L + + .globl mcl_fp_subPre11L + .align 16, 0x90 + .type mcl_fp_subPre11L,@function +mcl_fp_subPre11L: # @mcl_fp_subPre11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 32(%ebp) + movl %esi, 36(%ebp) + movl 40(%edx), %edx + movl 40(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 40(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end166: + .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L + + .globl mcl_fp_shr1_11L + .align 16, 0x90 + .type mcl_fp_shr1_11L,@function +mcl_fp_shr1_11L: # @mcl_fp_shr1_11L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + shrdl $1, %edx, %ecx + movl 8(%esp), %esi + movl %ecx, (%esi) + movl 8(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 4(%esi) + movl 12(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 8(%esi) + movl 16(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 12(%esi) + movl 20(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 16(%esi) + movl 24(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 20(%esi) + movl 28(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 24(%esi) + movl 32(%eax), %ecx + shrdl $1, %ecx, %edx + movl %edx, 28(%esi) + movl 36(%eax), %edx + shrdl $1, %edx, %ecx + movl %ecx, 32(%esi) + movl 40(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 36(%esi) + shrl %eax + movl %eax, 40(%esi) + popl %esi + retl +.Lfunc_end167: + .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L + + .globl mcl_fp_add11L + .align 16, 0x90 + .type mcl_fp_add11L,@function +mcl_fp_add11L: # @mcl_fp_add11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $32, %esp + movl 60(%esp), %edi + movl (%edi), %ecx + movl 4(%edi), %eax + movl 56(%esp), %esi + addl (%esi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ecx, %ebp + adcl 4(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%edi), %eax + adcl 8(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 12(%esi), %eax + movl 16(%esi), %ecx + adcl 12(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + adcl 16(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 20(%esi), %eax + adcl 20(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 24(%esi), %eax + adcl 24(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%esi), %ebx + adcl 28(%edi), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 32(%esi), %ecx + adcl 32(%edi), %ecx + movl 36(%esi), %eax + adcl 36(%edi), %eax + movl 40(%esi), %edx + adcl 40(%edi), %edx + movl 52(%esp), %esi + movl %ebp, (%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 20(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%esi) + movl 16(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%esi) + movl 12(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%esi) + movl 8(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%esi) + movl %ebx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edx, 40(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 64(%esp), %ebp + movl 4(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 28(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 24(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 20(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 16(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 12(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 8(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl (%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %ecx + sbbl 36(%ebp), %eax + sbbl 40(%ebp), %edx + movl %edx, %edi + sbbl $0, %ebx + testb $1, %bl + jne .LBB168_2 +# BB#1: # %nocarry + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%esi) + movl 28(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%esi) + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 20(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 20(%esi) + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%esi) + movl (%esp), %edx # 4-byte Reload + movl %edx, 28(%esi) + movl %ecx, 32(%esi) + movl %eax, 36(%esi) + movl %edi, 40(%esi) +.LBB168_2: # %carry + addl $32, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end168: + .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L + + .globl mcl_fp_addNF11L + .align 16, 0x90 + .type mcl_fp_addNF11L,@function +mcl_fp_addNF11L: # @mcl_fp_addNF11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 104(%esp), %esi + addl (%esi), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ebx + movl 36(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 20(%edx), %ebp + movl 16(%edx), %edi + movl 12(%edx), %eax + movl 8(%edx), %ecx + adcl 8(%esi), %ecx + adcl 12(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 24(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 28(%esi), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 32(%esi), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 36(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %edx + adcl 40(%esi), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 112(%esp), %ebx + movl 52(%esp), %esi # 4-byte Reload + subl (%ebx), %esi + movl 60(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl %edx, %ecx + sbbl 8(%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + sbbl 12(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + sbbl 16(%ebx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 28(%ebx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + movl %edi, %ecx + movl %edi, %ebp + sbbl 36(%ebx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl %edi, %ebx + movl 52(%esp), %edi # 4-byte Reload + sarl $31, %ebx + testl %ebx, %ebx + js .LBB169_2 +# BB#1: + movl %esi, %edi +.LBB169_2: + movl 100(%esp), %esi + movl %edi, (%esi) + movl 60(%esp), %edi # 4-byte Reload + js .LBB169_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB169_4: + movl %edi, 4(%esi) + movl %eax, %edi + js .LBB169_6 +# BB#5: + movl 4(%esp), %edx # 4-byte Reload +.LBB169_6: + movl %edx, 8(%esi) + movl %ebp, %ecx + movl 72(%esp), %edx # 4-byte Reload + movl 40(%esp), %eax # 4-byte Reload + js .LBB169_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB169_8: + movl %eax, 12(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl 44(%esp), %ebp # 4-byte Reload + js .LBB169_10 +# BB#9: + movl 12(%esp), %ebx # 4-byte Reload + movl %ebx, 48(%esp) # 4-byte Spill +.LBB169_10: + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 16(%esi) + js .LBB169_12 +# BB#11: + movl 16(%esp), %ebp # 4-byte Reload +.LBB169_12: + movl %ebp, 20(%esi) + js .LBB169_14 +# BB#13: + movl 20(%esp), %edi # 4-byte Reload +.LBB169_14: + movl %edi, 24(%esi) + js .LBB169_16 +# BB#15: + movl 24(%esp), %eax # 4-byte Reload +.LBB169_16: + movl %eax, 28(%esi) + js .LBB169_18 +# BB#17: + movl 28(%esp), %edx # 4-byte Reload +.LBB169_18: + movl %edx, 32(%esi) + js .LBB169_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload +.LBB169_20: + movl %ecx, 36(%esi) + movl 56(%esp), %eax # 4-byte Reload + js .LBB169_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload +.LBB169_22: + movl %eax, 40(%esi) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end169: + .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L + + .globl mcl_fp_sub11L + .align 16, 0x90 + .type mcl_fp_sub11L,@function +mcl_fp_sub11L: # @mcl_fp_sub11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 8(%ebp), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebp), %ebx + sbbl 12(%edi), %ebx + movl 16(%ebp), %eax + sbbl 16(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 20(%ebp), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%ebp), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%ebp), %edx + sbbl 28(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + sbbl 32(%edi), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 36(%ebp), %eax + sbbl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 40(%ebp), %eax + sbbl 40(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 16(%esp), %esi # 4-byte Reload + movl $0, %ebx + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl %esi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl %ebp, 12(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%ebx) + movl %ecx, %edi + movl %eax, 40(%ebx) + je .LBB170_2 +# BB#1: # %carry + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + movl %eax, %esi + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl %ebp, %eax + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl %ecx, 32(%ebx) + movl 36(%esi), %eax + adcl %edi, %eax + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) +.LBB170_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end170: + .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L + + .globl mcl_fp_subNF11L + .align 16, 0x90 + .type mcl_fp_subNF11L,@function +mcl_fp_subNF11L: # @mcl_fp_subNF11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 92(%esp), %edi + subl (%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 28(%eax), %ebx + movl 24(%eax), %ebp + movl 20(%eax), %esi + movl 16(%eax), %edx + movl 12(%eax), %ecx + movl 8(%eax), %eax + sbbl 8(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 28(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + sbbl 24(%edi), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + sbbl 40(%edi), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %edx, %esi + sarl $31, %esi + movl %esi, %eax + shldl $1, %edx, %eax + movl 96(%esp), %edx + movl 4(%edx), %ecx + andl %eax, %ecx + movl %ecx, %ebx + andl (%edx), %eax + movl 40(%edx), %ecx + andl %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%edx), %ecx + andl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%edx), %ecx + andl %esi, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%edx), %ecx + andl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%edx), %ecx + andl %esi, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%edx), %ebp + andl %esi, %ebp + roll %esi + movl 16(%edx), %edi + andl %esi, %edi + movl 12(%edx), %ecx + andl %esi, %ecx + andl 8(%edx), %esi + addl 44(%esp), %eax # 4-byte Folded Reload + movl %ebx, %edx + adcl 48(%esp), %edx # 4-byte Folded Reload + movl 84(%esp), %ebx + movl %eax, (%ebx) + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %edx, 4(%ebx) + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %esi, 8(%ebx) + adcl 28(%esp), %edi # 4-byte Folded Reload + movl %ecx, 12(%ebx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %edi, 16(%ebx) + movl (%esp), %ecx # 4-byte Reload + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 20(%ebx) + movl 4(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 12(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 16(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ebx) + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end171: + .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L + + .globl mcl_fpDbl_add11L + .align 16, 0x90 + .type mcl_fpDbl_add11L,@function +mcl_fpDbl_add11L: # @mcl_fpDbl_add11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl 108(%esp), %ecx + movl 104(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 100(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 52(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 48(%ecx), %esi + movl %edx, 40(%eax) + movl 48(%edi), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%edi), %eax + adcl %ebp, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl 56(%edi), %eax + adcl %edx, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl 60(%edi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%edi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl 72(%ecx), %esi + movl 72(%edi), %eax + adcl %esi, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 76(%ecx), %ebx + movl 76(%edi), %esi + adcl %ebx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 84(%ecx), %ecx + movl 84(%edi), %edi + adcl %ecx, %edi + movl %edi, 40(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 112(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 24(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 28(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 32(%ebp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl %ebx, %eax + movl 40(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 40(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB172_2 +# BB#1: + movl %edi, %ebx +.LBB172_2: + testb %cl, %cl + movl 68(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + movl 60(%esp), %edi # 4-byte Reload + movl 56(%esp), %ebp # 4-byte Reload + jne .LBB172_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 24(%esp), %ecx # 4-byte Reload +.LBB172_4: + movl 100(%esp), %eax + movl %ecx, 44(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl %ebp, 56(%eax) + movl %edi, 60(%eax) + movl %esi, 64(%eax) + movl %edx, 68(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl 44(%esp), %edx # 4-byte Reload + jne .LBB172_6 +# BB#5: + movl 28(%esp), %edx # 4-byte Reload +.LBB172_6: + movl %edx, 72(%eax) + movl 48(%esp), %edx # 4-byte Reload + jne .LBB172_8 +# BB#7: + movl 32(%esp), %edx # 4-byte Reload +.LBB172_8: + movl %edx, 76(%eax) + jne .LBB172_10 +# BB#9: + movl 36(%esp), %ecx # 4-byte Reload +.LBB172_10: + movl %ecx, 80(%eax) + movl %ebx, 84(%eax) + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end172: + .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L + + .globl mcl_fpDbl_sub11L + .align 16, 0x90 + .type mcl_fpDbl_sub11L,@function +mcl_fpDbl_sub11L: # @mcl_fpDbl_sub11L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %esi + movl 100(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %esi + movl 8(%edx), %edi + sbbl 8(%ebp), %edi + movl 92(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%ebp), %eax + movl %esi, 4(%ecx) + movl 16(%edx), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %edi, %eax + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%edx), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %edi, %eax + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%edx), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %edi, %eax + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%edx), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %edi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 48(%ebp), %eax + movl %esi, 40(%ecx) + movl 48(%edx), %esi + sbbl %eax, %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 52(%ebp), %eax + movl 52(%edx), %esi + sbbl %eax, %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 56(%ebp), %eax + movl 56(%edx), %esi + sbbl %eax, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 60(%ebp), %eax + movl 60(%edx), %esi + sbbl %eax, %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%edx), %esi + sbbl %eax, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%edx), %esi + sbbl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%edx), %esi + sbbl %eax, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%edx), %esi + sbbl %eax, %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%edx), %esi + sbbl %eax, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%edx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 104(%esp), %ebp + jne .LBB173_1 +# BB#2: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB173_3 +.LBB173_1: + movl 40(%ebp), %edx + movl %edx, 28(%esp) # 4-byte Spill +.LBB173_3: + testb %al, %al + jne .LBB173_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB173_6 +.LBB173_4: + movl (%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB173_6: + jne .LBB173_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB173_9 +.LBB173_7: + movl 36(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB173_9: + jne .LBB173_10 +# BB#11: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB173_12 +.LBB173_10: + movl 32(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB173_12: + jne .LBB173_13 +# BB#14: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB173_15 +.LBB173_13: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB173_15: + jne .LBB173_16 +# BB#17: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB173_18 +.LBB173_16: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB173_18: + jne .LBB173_19 +# BB#20: + movl $0, %edx + jmp .LBB173_21 +.LBB173_19: + movl 20(%ebp), %edx +.LBB173_21: + jne .LBB173_22 +# BB#23: + movl $0, %edi + jmp .LBB173_24 +.LBB173_22: + movl 16(%ebp), %edi +.LBB173_24: + jne .LBB173_25 +# BB#26: + movl $0, %ebx + jmp .LBB173_27 +.LBB173_25: + movl 12(%ebp), %ebx +.LBB173_27: + jne .LBB173_28 +# BB#29: + xorl %ebp, %ebp + jmp .LBB173_30 +.LBB173_28: + movl 8(%ebp), %ebp +.LBB173_30: + movl 8(%esp), %esi # 4-byte Reload + addl 36(%esp), %esi # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %esi, 44(%ecx) + adcl 32(%esp), %ebp # 4-byte Folded Reload + movl %eax, 48(%ecx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 52(%ecx) + adcl 44(%esp), %edi # 4-byte Folded Reload + movl %ebx, 56(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %edi, 60(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 52(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 60(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl %eax, 80(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%ecx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end173: + .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L + + .align 16, 0x90 + .type .LmulPv384x32,@function +.LmulPv384x32: # @mulPv384x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $80, %esp + movl %edx, %ebx + movl 100(%esp), %ebp + movl %ebp, %eax + mull 44(%ebx) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebp, %eax + mull 40(%ebx) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + mull 36(%ebx) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + mull 32(%ebx) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %eax + mull 28(%ebx) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebp, %eax + mull 24(%ebx) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 20(%ebx) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull 16(%ebx) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 12(%ebx) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull 8(%ebx) + movl %edx, %edi + movl %eax, 4(%esp) # 4-byte Spill + movl %ebp, %eax + mull 4(%ebx) + movl %edx, %esi + movl %eax, (%esp) # 4-byte Spill + movl %ebp, %eax + mull (%ebx) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 48(%ecx) + movl %ecx, %eax + addl $80, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end174: + .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 + + .globl mcl_fp_mulUnitPre12L + .align 16, 0x90 + .type mcl_fp_mulUnitPre12L,@function +mcl_fp_mulUnitPre12L: # @mcl_fp_mulUnitPre12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $92, %esp + calll .L175$pb +.L175$pb: + popl %ebx +.Ltmp26: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx + movl 120(%esp), %eax + movl %eax, (%esp) + leal 40(%esp), %ecx + movl 116(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebp + movl 56(%esp), %ebx + movl 52(%esp), %edi + movl 48(%esp), %esi + movl 40(%esp), %edx + movl 44(%esp), %ecx + movl 112(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + addl $92, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end175: + .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L + + .globl mcl_fpDbl_mulPre12L + .align 16, 0x90 + .type mcl_fpDbl_mulPre12L,@function +mcl_fpDbl_mulPre12L: # @mcl_fpDbl_mulPre12L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L176$pb +.L176$pb: + popl %ebx +.Ltmp27: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6L@PLT + leal 24(%esi), %eax + movl %eax, 8(%esp) + leal 24(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 48(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6L@PLT + movl 40(%edi), %ebx + movl 36(%edi), %eax + movl 32(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ecx + addl 24(%edi), %esi + adcl 28(%edi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + adcl 16(%edi), %ebx + movl %ebx, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl (%edi), %eax + addl 24(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 4(%edi), %eax + adcl 28(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl 32(%edi), %eax + adcl 8(%edi), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl 36(%edi), %eax + adcl 12(%edi), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl 40(%edi), %ecx + adcl 16(%edi), %ecx + movl 44(%edi), %eax + adcl 20(%edi), %eax + pushl %eax + seto %al + lahf + movl %eax, %edi + popl %eax + movl %edi, -184(%ebp) # 4-byte Spill + movl %ebx, %edi + movl %edx, -156(%ebp) # 4-byte Spill + movl %esi, -160(%ebp) # 4-byte Spill + movl %esi, %edx + jb .LBB176_2 +# BB#1: + xorl %edi, %edi + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill +.LBB176_2: + movl %edi, -176(%ebp) # 4-byte Spill + movl 12(%ebp), %esi + movl 44(%esi), %edi + movl -112(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%esi), %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl %eax, -124(%ebp) # 4-byte Spill + movl %ecx, -112(%ebp) # 4-byte Spill + movl -148(%ebp), %esi # 4-byte Reload + movl %esi, -116(%ebp) # 4-byte Spill + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -120(%ebp) # 4-byte Spill + movl -140(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -136(%ebp), %esi # 4-byte Reload + movl %esi, -152(%ebp) # 4-byte Spill + jb .LBB176_4 +# BB#3: + movl $0, -124(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill +.LBB176_4: + movl %edx, -84(%ebp) + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -80(%ebp) + movl -188(%ebp), %edx # 4-byte Reload + movl %edx, -76(%ebp) + movl -168(%ebp), %edi # 4-byte Reload + movl %edi, -72(%ebp) + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl -140(%ebp), %edx # 4-byte Reload + movl %edx, -104(%ebp) + movl -144(%ebp), %edx # 4-byte Reload + movl %edx, -100(%ebp) + movl -148(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl %ecx, -92(%ebp) + movl %eax, -88(%ebp) + movl %edi, %ebx + sbbl %edx, %edx + movl -132(%ebp), %eax # 4-byte Reload + movl %eax, -64(%ebp) + movl -184(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB176_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi +.LBB176_6: + movl %eax, -132(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -108(%ebp), %ecx + movl %ecx, 8(%esp) + leal -84(%ebp), %ecx + movl %ecx, 4(%esp) + leal -60(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -152(%ebp), %edi # 4-byte Reload + addl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -128(%ebp) # 4-byte Folded Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl %eax, -120(%ebp) # 4-byte Folded Spill + adcl %ebx, -116(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl %eax, -112(%ebp) # 4-byte Folded Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl %eax, -124(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -132(%ebp) # 4-byte Spill + movl -164(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6L@PLT + addl -36(%ebp), %edi + movl -128(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -112(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl %esi, -132(%ebp) # 4-byte Folded Spill + movl -60(%ebp), %ecx + movl 8(%ebp), %eax + subl (%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -56(%ebp), %esi + sbbl 4(%eax), %esi + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 12(%eax), %edx + movl -44(%ebp), %ebx + sbbl 16(%eax), %ebx + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %esi + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, -132(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %ecx # 4-byte Reload + addl -148(%ebp), %ecx # 4-byte Folded Reload + adcl -152(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %esi, 28(%eax) + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -168(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 40(%eax) + adcl -192(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 44(%eax) + movl -128(%ebp), %ecx # 4-byte Reload + adcl -196(%ebp), %ecx # 4-byte Folded Reload + movl %edi, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -112(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + movl -132(%ebp), %edx # 4-byte Reload + adcl -216(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %edx, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end176: + .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L + + .globl mcl_fpDbl_sqrPre12L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre12L,@function +mcl_fpDbl_sqrPre12L: # @mcl_fpDbl_sqrPre12L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $220, %esp + calll .L177$pb +.L177$pb: + popl %ebx +.Ltmp28: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx + movl %ebx, -152(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre6L@PLT + leal 24(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 48(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre6L@PLT + movl 44(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl 40(%edi), %edx + movl 36(%edi), %eax + movl (%edi), %ebx + movl 4(%edi), %esi + addl 24(%edi), %ebx + adcl 28(%edi), %esi + movl 32(%edi), %ecx + adcl 8(%edi), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -140(%ebp) # 4-byte Spill + adcl 16(%edi), %edx + movl %edx, %ecx + movl -136(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + movl %edx, -156(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %edx + popl %eax + movl %edx, -124(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -120(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edx + sbbl %edi, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl %ebx, %edi + addl %edi, %edi + movl %edi, -112(%ebp) # 4-byte Spill + movl %esi, %edi + movl %esi, %eax + adcl %edi, %edi + movl %edi, -132(%ebp) # 4-byte Spill + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_2 +# BB#1: + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -112(%ebp) # 4-byte Folded Spill +.LBB177_2: + movl -144(%ebp), %esi # 4-byte Reload + addl %esi, %esi + movl -140(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -116(%ebp) # 4-byte Spill + movl -120(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_3 +# BB#4: + movl $0, -116(%ebp) # 4-byte Folded Spill + movl $0, -120(%ebp) # 4-byte Folded Spill + jmp .LBB177_5 +.LBB177_3: + movl %eax, %edx + shrl $31, %edx + orl %esi, %edx + movl %edx, -120(%ebp) # 4-byte Spill +.LBB177_5: + movl -136(%ebp), %edx # 4-byte Reload + movl %ecx, %esi + addl %esi, %esi + adcl %edx, %edx + movl -124(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB177_6 +# BB#7: + xorl %edx, %edx + movl $0, -128(%ebp) # 4-byte Folded Spill + movl -140(%ebp), %edi # 4-byte Reload + jmp .LBB177_8 +.LBB177_6: + movl %ecx, -124(%ebp) # 4-byte Spill + movl -140(%ebp), %edi # 4-byte Reload + movl %edi, %ecx + shrl $31, %ecx + orl %esi, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %ecx # 4-byte Reload +.LBB177_8: + movl %edx, -124(%ebp) # 4-byte Spill + movl %ebx, -84(%ebp) + movl %eax, -80(%ebp) + movl -144(%ebp), %esi # 4-byte Reload + movl %esi, -76(%ebp) + movl %edi, -72(%ebp) + movl %ecx, -68(%ebp) + movl -136(%ebp), %edx # 4-byte Reload + movl %edx, -64(%ebp) + movl %ebx, -108(%ebp) + movl %eax, -104(%ebp) + movl %esi, -100(%ebp) + movl %edi, -96(%ebp) + movl %ecx, -92(%ebp) + movl %edx, -88(%ebp) + movl -156(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB177_9 +# BB#10: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB177_11 +.LBB177_9: + shrl $31, %edx + movl %edx, -136(%ebp) # 4-byte Spill +.LBB177_11: + leal -108(%ebp), %eax + movl %eax, 8(%esp) + leal -84(%ebp), %eax + movl %eax, 4(%esp) + leal -60(%ebp), %eax + movl %eax, (%esp) + movl -148(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -152(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre6L@PLT + movl -112(%ebp), %eax # 4-byte Reload + addl -36(%ebp), %eax + movl %eax, -112(%ebp) # 4-byte Spill + movl -132(%ebp), %edi # 4-byte Reload + adcl -32(%ebp), %edi + movl -120(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -120(%ebp) # 4-byte Spill + movl -116(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -116(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl -124(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -124(%ebp) # 4-byte Spill + adcl -136(%ebp), %esi # 4-byte Folded Reload + movl -60(%ebp), %edx + movl 8(%ebp), %eax + subl (%eax), %edx + movl -56(%ebp), %ebx + sbbl 4(%eax), %ebx + movl -52(%ebp), %ecx + sbbl 8(%eax), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -48(%ebp), %ecx + sbbl 12(%eax), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -44(%ebp), %ecx + sbbl 16(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -40(%ebp), %ecx + sbbl 20(%eax), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 28(%eax), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl %edi, -132(%ebp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, -156(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 36(%eax), %ecx + movl %ecx, -160(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 40(%eax), %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 44(%eax), %ecx + movl %ecx, -168(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + movl 48(%eax), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + subl %ecx, %edx + movl 52(%eax), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 56(%eax), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + sbbl %ecx, -136(%ebp) # 4-byte Folded Spill + movl 60(%eax), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 64(%eax), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + sbbl %ecx, %edi + movl 68(%eax), %ecx + movl %ecx, -212(%ebp) # 4-byte Spill + sbbl %ecx, -140(%ebp) # 4-byte Folded Spill + movl 72(%eax), %ecx + movl %ecx, -216(%ebp) # 4-byte Spill + sbbl %ecx, -112(%ebp) # 4-byte Folded Spill + movl 76(%eax), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + sbbl %ecx, -132(%ebp) # 4-byte Folded Spill + movl 80(%eax), %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + sbbl %ecx, -120(%ebp) # 4-byte Folded Spill + movl 84(%eax), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, -116(%ebp) # 4-byte Folded Spill + movl 88(%eax), %ecx + movl %ecx, -184(%ebp) # 4-byte Spill + sbbl %ecx, -128(%ebp) # 4-byte Folded Spill + movl 92(%eax), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + sbbl %ecx, -124(%ebp) # 4-byte Folded Spill + sbbl $0, %esi + addl -148(%ebp), %edx # 4-byte Folded Reload + adcl -152(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 24(%eax) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -156(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 28(%eax) + movl -144(%ebp), %edx # 4-byte Reload + adcl -160(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 32(%eax) + adcl -164(%ebp), %edi # 4-byte Folded Reload + movl %edx, 36(%eax) + movl -140(%ebp), %edx # 4-byte Reload + adcl -168(%ebp), %edx # 4-byte Folded Reload + movl %edi, 40(%eax) + movl -112(%ebp), %ecx # 4-byte Reload + adcl -192(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 44(%eax) + movl -132(%ebp), %edi # 4-byte Reload + adcl -196(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 48(%eax) + movl -120(%ebp), %edx # 4-byte Reload + adcl -200(%ebp), %edx # 4-byte Folded Reload + movl %edi, 52(%eax) + movl -116(%ebp), %ecx # 4-byte Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 56(%eax) + movl -128(%ebp), %edx # 4-byte Reload + adcl -208(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + movl -124(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl -216(%ebp), %esi # 4-byte Folded Reload + movl %ecx, 68(%eax) + movl %esi, 72(%eax) + movl -172(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 76(%eax) + movl -176(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 80(%eax) + movl -180(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 84(%eax) + movl -184(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 88(%eax) + movl -188(%ebp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 92(%eax) + addl $220, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end177: + .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L + + .globl mcl_fp_mont12L + .align 16, 0x90 + .type mcl_fp_mont12L,@function +mcl_fp_mont12L: # @mcl_fp_mont12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L178$pb +.L178$pb: + popl %ebx +.Ltmp29: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx + movl 1468(%esp), %eax + movl -4(%eax), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 1384(%esp), %ebp + movl 1388(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1432(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1428(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1424(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1420(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1396(%esp), %edi + movl 1392(%esp), %esi + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + addl 1328(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + adcl 1340(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1372(%esp), %esi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1376(%esp), %ebp + sbbl %edi, %edi + movl 1464(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl 84(%esp), %ecx # 4-byte Reload + addl 1272(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1316(%esp), %ebp + adcl 1320(%esp), %edi + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1216(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1248(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1260(%esp), %ebp + adcl 1264(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1160(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1160(%esp), %ecx + adcl 1164(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 1204(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1104(%esp), %ecx + movl 1468(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1104(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1140(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1148(%esp), %edi + movl 84(%esp), %ebp # 4-byte Reload + adcl 1152(%esp), %ebp + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1080(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + adcl 1092(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + movl %esi, %eax + addl 992(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 1008(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1032(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 936(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 948(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 960(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 980(%esp), %esi + adcl 984(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 880(%esp), %eax + movl 48(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 924(%esp), %esi + movl %esi, %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 44(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 840(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 864(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 768(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 780(%esp), %ebp + adcl 784(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 800(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1460(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + addl 712(%esp), %eax + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 720(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 724(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 752(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + sbbl %ebp, %ebp + movl %eax, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 656(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %eax + addl 656(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 664(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 672(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 676(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 704(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl 1464(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 600(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 52(%esp), %ecx # 4-byte Reload + addl 600(%esp), %ecx + adcl 604(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 616(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 620(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 636(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 648(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 44(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 544(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 548(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 552(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 560(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 568(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 72(%esp), %ecx # 4-byte Reload + adcl 576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 584(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 536(%esp), %ebp + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %edi + movl %edi, %ecx + addl 432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 440(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 480(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 376(%esp), %ecx + adcl 380(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 320(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 76(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 320(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + adcl 336(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 344(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 360(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 264(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 284(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 40(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + movl 88(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 208(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 224(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + adcl 240(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1464(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 152(%esp), %ecx + movl 1460(%esp), %edx + calll .LmulPv384x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 152(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 164(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 40(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1468(%esp), %edx + calll .LmulPv384x32 + andl $1, %esi + addl 96(%esp), %edi + movl 84(%esp), %ebx # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %edx, %edi + adcl 108(%esp), %ebx + adcl 112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 140(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %edx, %ebp + adcl $0, %esi + movl 1468(%esp), %edx + subl (%edx), %eax + sbbl 4(%edx), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 8(%edx), %edi + movl %edi, 20(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%edx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 36(%edx), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 40(%edx), %edi + movl %edi, 84(%esp) # 4-byte Spill + sbbl 44(%edx), %ebp + movl %ebp, %edx + sbbl $0, %esi + andl $1, %esi + jne .LBB178_2 +# BB#1: + movl %ecx, 52(%esp) # 4-byte Spill +.LBB178_2: + movl %esi, %ecx + testb %cl, %cl + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB178_4 +# BB#3: + movl %eax, %ecx +.LBB178_4: + movl 1456(%esp), %eax + movl %ecx, (%eax) + movl 68(%esp), %edi # 4-byte Reload + jne .LBB178_6 +# BB#5: + movl 16(%esp), %edi # 4-byte Reload +.LBB178_6: + movl %edi, 4(%eax) + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB178_8 +# BB#7: + movl 20(%esp), %ebx # 4-byte Reload +.LBB178_8: + movl %ebx, 8(%eax) + jne .LBB178_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%esp) # 4-byte Spill +.LBB178_10: + movl 72(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB178_12 +# BB#11: + movl 28(%esp), %ebp # 4-byte Reload +.LBB178_12: + movl %ebp, 16(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB178_14 +# BB#13: + movl 32(%esp), %ecx # 4-byte Reload +.LBB178_14: + movl %ecx, 20(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB178_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB178_16: + movl %ecx, 24(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB178_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB178_18: + movl %ecx, 32(%eax) + movl 60(%esp), %ecx # 4-byte Reload + jne .LBB178_20 +# BB#19: + movl 80(%esp), %ecx # 4-byte Reload +.LBB178_20: + movl %ecx, 36(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB178_22 +# BB#21: + movl 84(%esp), %ecx # 4-byte Reload +.LBB178_22: + movl %ecx, 40(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB178_24 +# BB#23: + movl %edx, %ecx +.LBB178_24: + movl %ecx, 44(%eax) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end178: + .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L + + .globl mcl_fp_montNF12L + .align 16, 0x90 + .type mcl_fp_montNF12L,@function +mcl_fp_montNF12L: # @mcl_fp_montNF12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1420, %esp # imm = 0x58C + calll .L179$pb +.L179$pb: + popl %ebx +.Ltmp30: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx + movl 1452(%esp), %eax + movl -4(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1368(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1368(%esp), %ebp + movl 1372(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 1416(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1412(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1404(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1400(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1396(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1392(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1388(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1384(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1380(%esp), %edi + movl 1376(%esp), %esi + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1312(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1320(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1344(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1356(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 1360(%esp), %ebp + movl 1448(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1304(%esp), %eax + movl 56(%esp), %edx # 4-byte Reload + addl 1256(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1260(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1264(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1284(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1296(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 1200(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1208(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %ebp + adcl 1248(%esp), %edi + movl 1448(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1192(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1144(%esp), %edx + adcl 1148(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1160(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1184(%esp), %ebp + adcl 1188(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1088(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 1088(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl %esi, %edi + adcl 1104(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1124(%esp), %esi + adcl 1128(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1136(%esp), %ebp + movl 1448(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1032(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 1080(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1032(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1064(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1076(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl %edx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 976(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1004(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1024(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 968(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 920(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + adcl 924(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 944(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 956(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 864(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 912(%esp), %edi + movl 1448(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 856(%esp), %edx + movl 36(%esp), %ecx # 4-byte Reload + addl 808(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 828(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 832(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 752(%esp), %esi + movl 32(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 760(%esp), %edi + movl 44(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 776(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 792(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1448(%esp), %ecx + movl %ecx, %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1444(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + movl 744(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + adcl 700(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 724(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 728(%esp), %edi + adcl 732(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl %eax, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 640(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 648(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 660(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + adcl 672(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 40(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 584(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 632(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 584(%esp), %ecx + adcl 588(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 608(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 616(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 528(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 528(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 540(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 564(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 568(%esp), %edi + movl 32(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 1448(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 520(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 508(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + adcl 512(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 416(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 440(%esp), %ebp + movl 52(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 408(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 372(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 304(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 312(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 328(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 40(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 36(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 296(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 248(%esp), %ecx + adcl 252(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 260(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 272(%esp), %ebp + movl 36(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 1452(%esp), %edx + calll .LmulPv384x32 + addl 192(%esp), %esi + adcl 196(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 200(%esp), %edi + movl 68(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 216(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1448(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 1444(%esp), %edx + calll .LmulPv384x32 + movl 184(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 136(%esp), %ecx + adcl 140(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + adcl 144(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 152(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 160(%esp), %edi + adcl 164(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 80(%esp), %ecx + movl 1452(%esp), %eax + movl %eax, %edx + calll .LmulPv384x32 + addl 80(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 92(%esp), %esi + movl 52(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 100(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl 104(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 112(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, %edx + movl 1452(%esp), %ebp + subl (%ebp), %edx + movl %ecx, %eax + sbbl 4(%ebp), %eax + movl %esi, %ebx + sbbl 8(%ebp), %ebx + movl 52(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl 40(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + sbbl 28(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + sbbl 32(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + sbbl 36(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %ebp + sarl $31, %ebp + testl %ebp, %ebp + movl 76(%esp), %ebp # 4-byte Reload + js .LBB179_2 +# BB#1: + movl %edx, %ebp +.LBB179_2: + movl 1440(%esp), %edx + movl %ebp, (%edx) + movl 68(%esp), %edi # 4-byte Reload + js .LBB179_4 +# BB#3: + movl %eax, %edi +.LBB179_4: + movl %edi, 4(%edx) + js .LBB179_6 +# BB#5: + movl %ebx, %esi +.LBB179_6: + movl %esi, 8(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB179_8 +# BB#7: + movl %ecx, %eax +.LBB179_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB179_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB179_10: + movl %eax, 16(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB179_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB179_12: + movl %eax, 20(%edx) + movl 32(%esp), %eax # 4-byte Reload + js .LBB179_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB179_14: + movl %eax, 24(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB179_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB179_16: + movl %eax, 28(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB179_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB179_18: + movl %eax, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB179_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB179_20: + movl %eax, 36(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB179_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB179_22: + movl %eax, 40(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB179_24 +# BB#23: + movl 56(%esp), %eax # 4-byte Reload +.LBB179_24: + movl %eax, 44(%edx) + addl $1420, %esp # imm = 0x58C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end179: + .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L + + .globl mcl_fp_montRed12L + .align 16, 0x90 + .type mcl_fp_montRed12L,@function +mcl_fp_montRed12L: # @mcl_fp_montRed12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $828, %esp # imm = 0x33C + calll .L180$pb +.L180$pb: + popl %eax +.Ltmp31: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 856(%esp), %edx + movl -4(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 852(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + imull %esi, %ebx + movl 92(%ecx), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 44(%ecx), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 776(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 88(%esp), %eax # 4-byte Reload + addl 776(%esp), %eax + movl 100(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 796(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 804(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 136(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + andl $1, %ebp + movl %ebp, %ecx + addl 720(%esp), %esi + movl 76(%esp), %ebp # 4-byte Reload + adcl 724(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 752(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ebp, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 664(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 692(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 608(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 612(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 636(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl 108(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 552(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 496(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 136(%esp), %edi # 4-byte Reload + adcl 532(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 468(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %esi # 4-byte Reload + adcl 476(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 856(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 384(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 400(%esp), %ebp + movl 152(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 416(%esp), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 140(%esp), %esi # 4-byte Reload + adcl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 76(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + movl 100(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 340(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 352(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 360(%esp), %ebp + adcl 364(%esp), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 76(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %eax, %esi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 272(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 280(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 300(%esp), %esi + movl 140(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, %ebp + movl %eax, %edi + imull 84(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 216(%esp), %edi + movl 132(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 248(%esp), %esi + movl 144(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 84(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 856(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv384x32 + addl 160(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl %eax, %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ebx # 4-byte Reload + adcl 176(%esp), %ebx + movl %ebx, 148(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 188(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ebp + subl 24(%esp), %edi # 4-byte Folded Reload + movl 156(%esp), %esi # 4-byte Reload + sbbl 16(%esp), %esi # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + sbbl 28(%esp), %ebx # 4-byte Folded Reload + sbbl 32(%esp), %ecx # 4-byte Folded Reload + movl 140(%esp), %eax # 4-byte Reload + sbbl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + sbbl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + sbbl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%esp) # 4-byte Spill + sbbl $0, %ebp + andl $1, %ebp + jne .LBB180_2 +# BB#1: + movl %ebx, 148(%esp) # 4-byte Spill +.LBB180_2: + movl %ebp, %ebx + testb %bl, %bl + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB180_4 +# BB#3: + movl %edi, %ebx +.LBB180_4: + movl 848(%esp), %edi + movl %ebx, (%edi) + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB180_6 +# BB#5: + movl %esi, 156(%esp) # 4-byte Spill +.LBB180_6: + movl 156(%esp), %esi # 4-byte Reload + movl %esi, 4(%edi) + movl 136(%esp), %esi # 4-byte Reload + jne .LBB180_8 +# BB#7: + movl %edx, %esi +.LBB180_8: + movl %esi, 8(%edi) + movl 148(%esp), %edx # 4-byte Reload + movl %edx, 12(%edi) + movl 128(%esp), %esi # 4-byte Reload + movl 116(%esp), %edx # 4-byte Reload + jne .LBB180_10 +# BB#9: + movl %ecx, %edx +.LBB180_10: + movl %edx, 16(%edi) + movl 120(%esp), %edx # 4-byte Reload + movl 140(%esp), %ecx # 4-byte Reload + jne .LBB180_12 +# BB#11: + movl 84(%esp), %ecx # 4-byte Reload +.LBB180_12: + movl %ecx, 20(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + jne .LBB180_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB180_14: + movl %eax, 24(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB180_16 +# BB#15: + movl 92(%esp), %ebx # 4-byte Reload +.LBB180_16: + movl %ebx, 28(%edi) + jne .LBB180_18 +# BB#17: + movl 96(%esp), %esi # 4-byte Reload +.LBB180_18: + movl %esi, 32(%edi) + jne .LBB180_20 +# BB#19: + movl 100(%esp), %edx # 4-byte Reload +.LBB180_20: + movl %edx, 36(%edi) + jne .LBB180_22 +# BB#21: + movl 112(%esp), %ecx # 4-byte Reload +.LBB180_22: + movl %ecx, 40(%edi) + jne .LBB180_24 +# BB#23: + movl 132(%esp), %eax # 4-byte Reload +.LBB180_24: + movl %eax, 44(%edi) + addl $828, %esp # imm = 0x33C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end180: + .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L + + .globl mcl_fp_addPre12L + .align 16, 0x90 + .type mcl_fp_addPre12L,@function +mcl_fp_addPre12L: # @mcl_fp_addPre12L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl %edx, 36(%ebx) + movl %esi, 40(%ebx) + movl 44(%eax), %eax + movl 44(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 44(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end181: + .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L + + .globl mcl_fp_subPre12L + .align 16, 0x90 + .type mcl_fp_subPre12L,@function +mcl_fp_subPre12L: # @mcl_fp_subPre12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 36(%ebp) + movl %edi, 40(%ebp) + movl 44(%edx), %edx + movl 44(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 44(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end182: + .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L + + .globl mcl_fp_shr1_12L + .align 16, 0x90 + .type mcl_fp_shr1_12L,@function +mcl_fp_shr1_12L: # @mcl_fp_shr1_12L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 40(%ecx) + shrl %eax + movl %eax, 44(%ecx) + popl %esi + retl +.Lfunc_end183: + .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L + + .globl mcl_fp_add12L + .align 16, 0x90 + .type mcl_fp_add12L,@function +mcl_fp_add12L: # @mcl_fp_add12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $36, %esp + movl 64(%esp), %ebx + movl (%ebx), %edx + movl 4(%ebx), %ecx + movl 60(%esp), %eax + addl (%eax), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%eax), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 8(%ebx), %ecx + adcl 8(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%eax), %edx + movl 16(%eax), %ecx + adcl 12(%ebx), %edx + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%ebx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 20(%eax), %ecx + adcl 20(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 24(%eax), %ecx + adcl 24(%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 28(%eax), %ecx + adcl 28(%ebx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 32(%eax), %ebp + adcl 32(%ebx), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 36(%eax), %edi + adcl 36(%ebx), %edi + movl 40(%eax), %esi + adcl 40(%ebx), %esi + movl 44(%eax), %edx + adcl 44(%ebx), %edx + movl 56(%esp), %ebx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%ebx) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%ebx) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%ebx) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%ebx) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%ebx) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%ebx) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + movl %ebp, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + sbbl %ecx, %ecx + andl $1, %ecx + movl 68(%esp), %ebp + subl (%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 8(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 12(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 16(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 20(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 24(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 28(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl (%esp), %eax # 4-byte Reload + sbbl 32(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + sbbl 40(%ebp), %esi + sbbl 44(%ebp), %edx + sbbl $0, %ecx + testb $1, %cl + jne .LBB184_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebx) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebx) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebx) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebx) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebx) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebx) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebx) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebx) + movl (%esp), %eax # 4-byte Reload + movl %eax, 32(%ebx) + movl %edi, 36(%ebx) + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) +.LBB184_2: # %carry + addl $36, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end184: + .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L + + .globl mcl_fp_addNF12L + .align 16, 0x90 + .type mcl_fp_addNF12L,@function +mcl_fp_addNF12L: # @mcl_fp_addNF12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + movl 112(%esp), %edx + addl (%edx), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 4(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 40(%esi), %ebp + movl 36(%esi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %eax + adcl 8(%edx), %eax + adcl 12(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 52(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%edx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 28(%edx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 32(%edx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 36(%edx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl %eax, %esi + adcl 40(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 120(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + subl (%ebp), %edx + movl 64(%esp), %eax # 4-byte Reload + sbbl 4(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 12(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 24(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 40(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %edi, %ebp + movl 60(%esp), %edi # 4-byte Reload + sarl $31, %ebp + testl %ebp, %ebp + js .LBB185_2 +# BB#1: + movl %edx, %edi +.LBB185_2: + movl 108(%esp), %edx + movl %edi, (%edx) + movl 64(%esp), %edi # 4-byte Reload + js .LBB185_4 +# BB#3: + movl (%esp), %edi # 4-byte Reload +.LBB185_4: + movl %edi, 4(%edx) + movl %eax, %ebp + js .LBB185_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB185_6: + movl %esi, 8(%edx) + movl %ecx, %esi + movl 52(%esp), %eax # 4-byte Reload + movl 48(%esp), %ecx # 4-byte Reload + js .LBB185_8 +# BB#7: + movl 8(%esp), %ecx # 4-byte Reload +.LBB185_8: + movl %ecx, 12(%edx) + movl 76(%esp), %ebx # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + js .LBB185_10 +# BB#9: + movl 12(%esp), %eax # 4-byte Reload +.LBB185_10: + movl %eax, 16(%edx) + movl 80(%esp), %ecx # 4-byte Reload + js .LBB185_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 56(%esp) # 4-byte Spill +.LBB185_12: + movl 56(%esp), %eax # 4-byte Reload + movl %eax, 20(%edx) + js .LBB185_14 +# BB#13: + movl 20(%esp), %ebp # 4-byte Reload +.LBB185_14: + movl %ebp, 24(%edx) + js .LBB185_16 +# BB#15: + movl 24(%esp), %edi # 4-byte Reload +.LBB185_16: + movl %edi, 28(%edx) + js .LBB185_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB185_18: + movl %ebx, 32(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB185_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB185_20: + movl %eax, 36(%edx) + js .LBB185_22 +# BB#21: + movl 36(%esp), %esi # 4-byte Reload +.LBB185_22: + movl %esi, 40(%edx) + js .LBB185_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB185_24: + movl %ecx, 44(%edx) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end185: + .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L + + .globl mcl_fp_sub12L + .align 16, 0x90 + .type mcl_fp_sub12L,@function +mcl_fp_sub12L: # @mcl_fp_sub12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 64(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + xorl %ebx, %ebx + movl 68(%esp), %edi + subl (%edi), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%esi), %edx + sbbl 28(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 32(%esi), %ecx + sbbl 32(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 40(%esi), %ebp + sbbl 40(%edi), %ebp + movl 44(%esi), %esi + sbbl 44(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 60(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl %edx, 28(%ebx) + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl %ebp, 40(%ebx) + movl %esi, 44(%ebx) + je .LBB186_2 +# BB#1: # %carry + movl %esi, %edi + movl 72(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 24(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 32(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 16(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl %eax, 36(%ebx) + movl 40(%esi), %eax + adcl %ebp, %eax + movl %eax, 40(%ebx) + movl 44(%esi), %eax + adcl %edi, %eax + movl %eax, 44(%ebx) +.LBB186_2: # %nocarry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end186: + .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L + + .globl mcl_fp_subNF12L + .align 16, 0x90 + .type mcl_fp_subNF12L,@function +mcl_fp_subNF12L: # @mcl_fp_subNF12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $72, %esp + movl 96(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 100(%esp), %edi + subl (%edi), %edx + movl %edx, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 32(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + sarl $31, %eax + movl %eax, %edx + addl %edx, %edx + movl %eax, %edi + adcl %edi, %edi + movl %eax, %ebp + adcl %ebp, %ebp + movl %eax, %esi + adcl %esi, %esi + shrl $31, %ecx + orl %edx, %ecx + movl 104(%esp), %edx + andl 12(%edx), %esi + movl %esi, 8(%esp) # 4-byte Spill + andl 8(%edx), %ebp + andl 4(%edx), %edi + andl (%edx), %ecx + movl 44(%edx), %esi + andl %eax, %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 40(%edx), %esi + andl %eax, %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 36(%edx), %esi + andl %eax, %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 32(%edx), %esi + andl %eax, %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 28(%edx), %esi + andl %eax, %esi + movl %esi, (%esp) # 4-byte Spill + movl 24(%edx), %ebx + andl %eax, %ebx + movl 20(%edx), %esi + andl %eax, %esi + andl 16(%edx), %eax + addl 48(%esp), %ecx # 4-byte Folded Reload + adcl 52(%esp), %edi # 4-byte Folded Reload + movl 92(%esp), %edx + movl %ecx, (%edx) + adcl 24(%esp), %ebp # 4-byte Folded Reload + movl %edi, 4(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 8(%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %eax, 16(%edx) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %esi, 20(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 24(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edx) + movl 12(%esp), %ecx # 4-byte Reload + adcl 64(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edx) + movl 16(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edx) + movl %eax, 40(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%edx) + addl $72, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end187: + .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L + + .globl mcl_fpDbl_add12L + .align 16, 0x90 + .type mcl_fpDbl_add12L,@function +mcl_fpDbl_add12L: # @mcl_fpDbl_add12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 116(%esp), %ecx + movl 112(%esp), %edi + movl 12(%edi), %esi + movl 16(%edi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edi), %ebp + movl 108(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edi), %ebp + adcl 8(%edi), %ebx + adcl 12(%ecx), %esi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 56(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %esi, 12(%eax) + movl 20(%edi), %esi + adcl %ebx, %esi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%edi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %esi, 20(%eax) + movl 28(%edi), %esi + adcl %ebx, %esi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%edi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %esi, 28(%eax) + movl 36(%edi), %esi + adcl %ebx, %esi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%edi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %esi, 36(%eax) + movl 44(%edi), %esi + adcl %ebx, %esi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%edi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 52(%ecx), %ebx + movl %esi, 44(%eax) + movl 52(%edi), %eax + adcl %ebx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 56(%edi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl 60(%edi), %edx + adcl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl 64(%edi), %edx + adcl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl 68(%edi), %edx + adcl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl 72(%edi), %edx + adcl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl 76(%edi), %edx + adcl %eax, %edx + movl 80(%ecx), %esi + movl 80(%edi), %eax + adcl %esi, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 84(%ecx), %ebx + movl 84(%edi), %esi + adcl %ebx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%edi), %ebx + adcl %ebp, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 92(%ecx), %ecx + movl 92(%edi), %edi + adcl %ecx, %edi + movl %edi, 44(%esp) # 4-byte Spill + sbbl %ecx, %ecx + andl $1, %ecx + movl 120(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + subl (%ebp), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 4(%ebp), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 8(%ebp), %edi + movl %edi, 20(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + sbbl 12(%ebp), %edi + movl %edi, 16(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + sbbl 16(%ebp), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 20(%ebp), %edi + movl %edi, 8(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ebp), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl %edx, %edi + sbbl 28(%ebp), %edi + movl %edi, (%esp) # 4-byte Spill + sbbl 32(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill + sbbl 36(%ebp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl %ebx, %eax + movl 44(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %edi + sbbl 44(%ebp), %edi + sbbl $0, %ecx + andl $1, %ecx + jne .LBB188_2 +# BB#1: + movl %edi, %ebx +.LBB188_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB188_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%esp), %ecx # 4-byte Reload +.LBB188_4: + movl 108(%esp), %eax + movl %ecx, 48(%eax) + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl %ebp, 64(%eax) + movl %edi, 68(%eax) + movl %esi, 72(%eax) + movl %edx, 76(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 48(%esp), %edx # 4-byte Reload + jne .LBB188_6 +# BB#5: + movl 32(%esp), %edx # 4-byte Reload +.LBB188_6: + movl %edx, 80(%eax) + movl 52(%esp), %edx # 4-byte Reload + jne .LBB188_8 +# BB#7: + movl 36(%esp), %edx # 4-byte Reload +.LBB188_8: + movl %edx, 84(%eax) + jne .LBB188_10 +# BB#9: + movl 40(%esp), %ecx # 4-byte Reload +.LBB188_10: + movl %ecx, 88(%eax) + movl %ebx, 92(%eax) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end188: + .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L + + .globl mcl_fpDbl_sub12L + .align 16, 0x90 + .type mcl_fpDbl_sub12L,@function +mcl_fpDbl_sub12L: # @mcl_fpDbl_sub12L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $76, %esp + movl 100(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %edx + movl 104(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%esi), %edi + sbbl 8(%ebx), %edi + movl 96(%esp), %ecx + movl %eax, (%ecx) + movl 12(%esi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%esi), %edx + sbbl 16(%ebx), %edx + movl %edi, 8(%ecx) + movl 20(%ebx), %edi + movl %eax, 12(%ecx) + movl 20(%esi), %eax + sbbl %edi, %eax + movl 24(%ebx), %edi + movl %edx, 16(%ecx) + movl 24(%esi), %edx + sbbl %edi, %edx + movl 28(%ebx), %edi + movl %eax, 20(%ecx) + movl 28(%esi), %eax + sbbl %edi, %eax + movl 32(%ebx), %edi + movl %edx, 24(%ecx) + movl 32(%esi), %edx + sbbl %edi, %edx + movl 36(%ebx), %edi + movl %eax, 28(%ecx) + movl 36(%esi), %eax + sbbl %edi, %eax + movl 40(%ebx), %edi + movl %edx, 32(%ecx) + movl 40(%esi), %edx + sbbl %edi, %edx + movl 44(%ebx), %edi + movl %eax, 36(%ecx) + movl 44(%esi), %eax + sbbl %edi, %eax + movl 48(%ebx), %edi + movl %edx, 40(%ecx) + movl 48(%esi), %edx + sbbl %edi, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 52(%ebx), %edx + movl %eax, 44(%ecx) + movl 52(%esi), %eax + sbbl %edx, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl 56(%esi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%esi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%esi), %edx + sbbl %eax, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%esi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%esi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%esi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%esi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%esi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%esi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%esi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 108(%esp), %ebp + jne .LBB189_1 +# BB#2: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB189_3 +.LBB189_1: + movl 44(%ebp), %edx + movl %edx, 36(%esp) # 4-byte Spill +.LBB189_3: + testb %al, %al + jne .LBB189_4 +# BB#5: + movl $0, 12(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB189_6 +.LBB189_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB189_6: + jne .LBB189_7 +# BB#8: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB189_9 +.LBB189_7: + movl 40(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB189_9: + jne .LBB189_10 +# BB#11: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB189_12 +.LBB189_10: + movl 36(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB189_12: + jne .LBB189_13 +# BB#14: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB189_15 +.LBB189_13: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB189_15: + jne .LBB189_16 +# BB#17: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB189_18 +.LBB189_16: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB189_18: + jne .LBB189_19 +# BB#20: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB189_21 +.LBB189_19: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB189_21: + jne .LBB189_22 +# BB#23: + movl $0, %ebx + jmp .LBB189_24 +.LBB189_22: + movl 20(%ebp), %ebx +.LBB189_24: + jne .LBB189_25 +# BB#26: + movl $0, %eax + jmp .LBB189_27 +.LBB189_25: + movl 16(%ebp), %eax +.LBB189_27: + jne .LBB189_28 +# BB#29: + movl %ebp, %edx + movl $0, %ebp + jmp .LBB189_30 +.LBB189_28: + movl %ebp, %edx + movl 12(%edx), %ebp +.LBB189_30: + jne .LBB189_31 +# BB#32: + xorl %edx, %edx + jmp .LBB189_33 +.LBB189_31: + movl 8(%edx), %edx +.LBB189_33: + addl 32(%esp), %esi # 4-byte Folded Reload + movl 12(%esp), %edi # 4-byte Reload + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %esi, 48(%ecx) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edi, 52(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ebp, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebx, 68(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl %eax, 88(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%ecx) + addl $76, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end189: + .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L + + .align 16, 0x90 + .type .LmulPv416x32,@function +.LmulPv416x32: # @mulPv416x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl %edx, %edi + movl 108(%esp), %ebp + movl %ebp, %eax + mull 48(%edi) + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebp, %eax + mull 44(%edi) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebp, %eax + mull 40(%edi) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebp, %eax + mull 36(%edi) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebp, %eax + mull 32(%edi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebp, %eax + mull 28(%edi) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebp, %eax + mull 24(%edi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebp, %eax + mull 20(%edi) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebp, %eax + mull 16(%edi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebp, %eax + mull 12(%edi) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebp, %eax + mull 8(%edi) + movl %edx, %esi + movl %eax, 4(%esp) # 4-byte Spill + movl %ebp, %eax + mull 4(%edi) + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl %ebp, %eax + mull (%edi) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + movl 84(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 52(%ecx) + movl %ecx, %eax + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end190: + .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 + + .globl mcl_fp_mulUnitPre13L + .align 16, 0x90 + .type mcl_fp_mulUnitPre13L,@function +mcl_fp_mulUnitPre13L: # @mcl_fp_mulUnitPre13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L191$pb +.L191$pb: + popl %ebx +.Ltmp32: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv416x32 + movl 100(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end191: + .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L + + .globl mcl_fpDbl_mulPre13L + .align 16, 0x90 + .type mcl_fpDbl_mulPre13L,@function +mcl_fpDbl_mulPre13L: # @mcl_fpDbl_mulPre13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L192$pb +.L192$pb: + popl %edi +.Ltmp33: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 868(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl 872(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 872(%esp), %edi + movl 44(%edi), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 868(%esp), %eax + movl %eax, %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 872(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 868(%esp), %edx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end192: + .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L + + .globl mcl_fpDbl_sqrPre13L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre13L,@function +mcl_fpDbl_sqrPre13L: # @mcl_fpDbl_sqrPre13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $844, %esp # imm = 0x34C + calll .L193$pb +.L193$pb: + popl %ebx +.Ltmp34: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx + movl %ebx, 108(%esp) # 4-byte Spill + movl 868(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv416x32 + movl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 784(%esp), %eax + movl 788(%esp), %ebp + movl 864(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 728(%esp), %ecx + movl %esi, %ebx + calll .LmulPv416x32 + addl 728(%esp), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 780(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 748(%esp), %edi + movl 744(%esp), %esi + movl 740(%esp), %edx + movl 732(%esp), %eax + movl 736(%esp), %ecx + movl 864(%esp), %ebp + movl 24(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %eax # 4-byte Reload + addl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 724(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 720(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 716(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 712(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 708(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 696(%esp), %ebx + movl 692(%esp), %edi + movl 688(%esp), %esi + movl 684(%esp), %edx + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 680(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 656(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 652(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 648(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 644(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 640(%esp), %ebx + movl 636(%esp), %edi + movl 632(%esp), %esi + movl 628(%esp), %edx + movl 620(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 624(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 560(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 584(%esp), %ebx + movl 580(%esp), %edi + movl 576(%esp), %esi + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 864(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 528(%esp), %ebx + movl 524(%esp), %edi + movl 520(%esp), %esi + movl 516(%esp), %edx + movl 508(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 512(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 16(%esp), %ebp # 4-byte Folded Reload + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 448(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 448(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 472(%esp), %ebp + movl 468(%esp), %edi + movl 464(%esp), %esi + movl 460(%esp), %edx + movl 452(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 456(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 24(%eax) + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 392(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 392(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 444(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %ebx + movl 412(%esp), %edi + movl 408(%esp), %esi + movl 404(%esp), %edx + movl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 400(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 36(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 336(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 360(%esp), %ebp + movl 356(%esp), %edi + movl 352(%esp), %esi + movl 348(%esp), %edx + movl 340(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %ecx + movl 864(%esp), %eax + movl 60(%esp), %ebx # 4-byte Reload + movl %ebx, 32(%eax) + movl 24(%esp), %eax # 4-byte Reload + adcl %eax, 104(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 280(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 316(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 304(%esp), %ebx + movl 300(%esp), %edi + movl 296(%esp), %esi + movl 292(%esp), %edx + movl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 288(%esp), %ecx + movl 864(%esp), %eax + movl 104(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 24(%esp), %ebp # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 224(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 272(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 248(%esp), %ebx + movl 244(%esp), %edi + movl 240(%esp), %esi + movl 236(%esp), %edx + movl 228(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 232(%esp), %ecx + movl 864(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 60(%esp), %eax # 4-byte Reload + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 32(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 40(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 60(%esp), %esi # 4-byte Reload + addl 168(%esp), %esi + movl 220(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 208(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 196(%esp), %ebp + movl 192(%esp), %ebx + movl 188(%esp), %edi + movl 184(%esp), %edx + movl 180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 176(%esp), %ecx + movl 864(%esp), %eax + movl %esi, 44(%eax) + movl 68(%esp), %esi # 4-byte Reload + adcl 24(%esp), %esi # 4-byte Folded Reload + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 40(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 48(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + adcl 104(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 868(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 108(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 112(%esp), %esi + movl %esi, %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl 164(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 140(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 136(%esp), %ebx + movl 132(%esp), %esi + movl 128(%esp), %edx + movl 124(%esp), %ecx + movl 864(%esp), %eax + movl %ebp, 48(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %edi, 56(%eax) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %ecx, 60(%eax) + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 64(%eax) + adcl 104(%esp), %ebx # 4-byte Folded Reload + movl %esi, 68(%eax) + movl 44(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 72(%eax) + movl 60(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + movl 76(%esp), %ecx # 4-byte Reload + adcl 88(%esp), %ecx # 4-byte Folded Reload + movl %edx, 80(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %ecx, 84(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl %ecx, 92(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 96(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 100(%eax) + addl $844, %esp # imm = 0x34C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end193: + .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L + + .globl mcl_fp_mont13L + .align 16, 0x90 + .type mcl_fp_mont13L,@function +mcl_fp_mont13L: # @mcl_fp_mont13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L194$pb +.L194$pb: + popl %ebx +.Ltmp35: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %esi + movl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %esi, %eax + imull %edi, %eax + movl 1540(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %edi + movl 1500(%esp), %ebp + movl 1496(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1444(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 1448(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1472(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl 76(%esp), %ecx # 4-byte Reload + addl 1376(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1388(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 1404(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1420(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1428(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 76(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 36(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + adcl 1348(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1360(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1364(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1368(%esp), %ebp + movl 64(%esp), %edi # 4-byte Reload + adcl 1372(%esp), %edi + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1264(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1308(%esp), %ebp + adcl 1312(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 84(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1208(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1248(%esp), %edi + adcl 1252(%esp), %ebp + movl %ebp, %esi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1260(%esp), %ebp + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1152(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1192(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1200(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %eax + addl 1096(%esp), %esi + movl 40(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 72(%esp), %esi # 4-byte Reload + adcl 1132(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1136(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1144(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 40(%esp), %ecx # 4-byte Reload + addl 1040(%esp), %ecx + movl 48(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1068(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 1072(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1084(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 984(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 44(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edi + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 32(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 980(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 872(%esp), %ebp + movl 32(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebp # 4-byte Reload + adcl 884(%esp), %ebp + adcl 888(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 904(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + movl 32(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 824(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 844(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 848(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %esi + movl %esi, %eax + movl 32(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 36(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 792(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 796(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 808(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 812(%esp), %edi + adcl $0, %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 36(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + adcl 708(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 712(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 720(%esp), %ebp + adcl 724(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 728(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 732(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 736(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 744(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 748(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 752(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %eax, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %eax + andl $1, %eax + addl 648(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + adcl 652(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 656(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 660(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 664(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 64(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 680(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 684(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 688(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 696(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 612(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + adcl 616(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 620(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 44(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 536(%esp), %esi + movl 56(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 544(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 560(%esp), %esi + adcl 564(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 572(%esp), %edi + movl 48(%esp), %ebp # 4-byte Reload + adcl 576(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 512(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl %edi, %ecx + andl $1, %ecx + addl 424(%esp), %esi + movl 52(%esp), %ebp # 4-byte Reload + adcl 428(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 444(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + addl 368(%esp), %ebp + adcl 372(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 376(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 384(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 392(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %ebp, %eax + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + movl 52(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 312(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 340(%esp), %edi + movl 48(%esp), %esi # 4-byte Reload + adcl 344(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 268(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 280(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + adcl 284(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 32(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 28(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %ebp + movl %ebp, %ecx + addl 200(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 208(%esp), %ebp + movl 64(%esp), %esi # 4-byte Reload + adcl 212(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 32(%esp), %edi # 4-byte Reload + adcl 236(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + adcl 148(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 152(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 156(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 176(%esp), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 28(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + andl $1, %edi + addl 88(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + movl 84(%esp), %esi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 100(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + adcl 112(%esp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 40(%esp), %ebx # 4-byte Reload + adcl 116(%esp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + adcl 120(%esp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + adcl 124(%esp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ebx # 4-byte Reload + adcl 128(%esp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + adcl 132(%esp), %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl 136(%esp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + adcl 140(%esp), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl $0, %edi + movl 1580(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %ecx + sbbl 8(%ebx), %ebp + sbbl 12(%ebx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 16(%ebx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + sbbl 20(%ebx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + sbbl 24(%ebx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 28(%ebx), %edx + movl 36(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, %ebx + sbbl $0, %edi + andl $1, %edi + jne .LBB194_2 +# BB#1: + movl %edx, 32(%esp) # 4-byte Spill +.LBB194_2: + movl %edi, %edx + testb %dl, %dl + movl 80(%esp), %edx # 4-byte Reload + jne .LBB194_4 +# BB#3: + movl %eax, %edx +.LBB194_4: + movl 1568(%esp), %eax + movl %edx, (%eax) + movl 64(%esp), %esi # 4-byte Reload + jne .LBB194_6 +# BB#5: + movl %ecx, %esi +.LBB194_6: + movl %esi, 4(%eax) + jne .LBB194_8 +# BB#7: + movl %ebp, 76(%esp) # 4-byte Spill +.LBB194_8: + movl 76(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB194_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%esp) # 4-byte Spill +.LBB194_10: + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + jne .LBB194_12 +# BB#11: + movl 8(%esp), %ebp # 4-byte Reload +.LBB194_12: + movl %ebp, 16(%eax) + movl 48(%esp), %ecx # 4-byte Reload + jne .LBB194_14 +# BB#13: + movl 12(%esp), %ecx # 4-byte Reload +.LBB194_14: + movl %ecx, 20(%eax) + movl 40(%esp), %ecx # 4-byte Reload + jne .LBB194_16 +# BB#15: + movl 16(%esp), %ecx # 4-byte Reload +.LBB194_16: + movl %ecx, 24(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 36(%esp), %ecx # 4-byte Reload + jne .LBB194_18 +# BB#17: + movl 20(%esp), %ecx # 4-byte Reload +.LBB194_18: + movl %ecx, 32(%eax) + movl 44(%esp), %ecx # 4-byte Reload + jne .LBB194_20 +# BB#19: + movl 24(%esp), %ecx # 4-byte Reload +.LBB194_20: + movl %ecx, 36(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB194_22 +# BB#21: + movl 28(%esp), %ecx # 4-byte Reload +.LBB194_22: + movl %ecx, 40(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB194_24 +# BB#23: + movl 72(%esp), %ecx # 4-byte Reload +.LBB194_24: + movl %ecx, 44(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB194_26 +# BB#25: + movl %ebx, %ecx +.LBB194_26: + movl %ecx, 48(%eax) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end194: + .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L + + .globl mcl_fp_montNF13L + .align 16, 0x90 + .type mcl_fp_montNF13L,@function +mcl_fp_montNF13L: # @mcl_fp_montNF13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1548, %esp # imm = 0x60C + calll .L195$pb +.L195$pb: + popl %ebx +.Ltmp36: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx + movl 1580(%esp), %eax + movl -4(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1488(%esp), %edi + movl 1492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1540(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1536(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1532(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1528(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1524(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1520(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1516(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1512(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 1508(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1504(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1500(%esp), %esi + movl 1496(%esp), %ebp + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1432(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + adcl 1444(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1472(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1484(%esp), %edi + movl 1576(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1428(%esp), %ecx + movl 80(%esp), %edx # 4-byte Reload + addl 1376(%esp), %edx + adcl 1380(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1412(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1424(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1320(%esp), %esi + adcl 1324(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 44(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1360(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1316(%esp), %eax + addl 1264(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 1268(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1272(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1280(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + adcl 1284(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1300(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ebp, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 1208(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + adcl 1228(%esp), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1244(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 1252(%esp), %edi + movl 80(%esp), %ebp # 4-byte Reload + adcl 1256(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1152(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1204(%esp), %eax + movl 64(%esp), %edx # 4-byte Reload + addl 1152(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1160(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1168(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1180(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1192(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1196(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1096(%esp), %ecx + movl 1580(%esp), %eax + movl %eax, %edx + calll .LmulPv416x32 + addl 1096(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 1116(%esp), %esi + movl 56(%esp), %edi # 4-byte Reload + adcl 1120(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1140(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1148(%esp), %ebp + movl 1576(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1040(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 1092(%esp), %eax + movl 40(%esp), %edx # 4-byte Reload + addl 1040(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1056(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl 1060(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1088(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl %eax, %esi + adcl $0, %esi + movl %edx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 984(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 996(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1008(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 980(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 36(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl 936(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 948(%esp), %ebp + movl 72(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 968(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 872(%esp), %edi + movl 36(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 892(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 912(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 868(%esp), %edx + addl 816(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 832(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 836(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 860(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 36(%esp) # 4-byte Spill + movl %edi, %eax + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 760(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 780(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 784(%esp), %esi + movl 84(%esp), %edi # 4-byte Reload + adcl 788(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 804(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 756(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 704(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 708(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 716(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 728(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 732(%esp), %esi + movl 68(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 740(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 744(%esp), %ebp + movl 48(%esp), %edx # 4-byte Reload + adcl 748(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 752(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 648(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 648(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 676(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 696(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 592(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 644(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 592(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 624(%esp), %ebp + movl 40(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 636(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 536(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 536(%esp), %edi + adcl 540(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 556(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + adcl 568(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 572(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 532(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 496(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 504(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 424(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %esi + adcl 452(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 420(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 368(%esp), %ecx + movl 72(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 392(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + adcl 400(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 312(%esp), %esi + adcl 316(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 320(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 308(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 256(%esp), %ecx + adcl 260(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 272(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 32(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 200(%esp), %esi + adcl 204(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 68(%esp), %esi # 4-byte Reload + adcl 216(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 228(%esp), %edi + movl 36(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1576(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 144(%esp), %ecx + movl 1572(%esp), %edx + calll .LmulPv416x32 + movl 196(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 144(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 156(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 164(%esp), %ebp + adcl 168(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 88(%esp), %ecx + movl 1580(%esp), %edx + calll .LmulPv416x32 + addl 88(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + movl 68(%esp), %edi # 4-byte Reload + adcl 92(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 100(%esp), %edi + movl 64(%esp), %ebx # 4-byte Reload + adcl 104(%esp), %ebx + movl %ebx, 64(%esp) # 4-byte Spill + adcl 108(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl %ebp, %esi + movl 48(%esp), %edx # 4-byte Reload + adcl 112(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, %edx + movl 1580(%esp), %eax + subl (%eax), %edx + movl %ecx, %ebp + sbbl 4(%eax), %ebp + movl %edi, %ecx + sbbl 8(%eax), %ecx + sbbl 12(%eax), %ebx + sbbl 16(%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 20(%eax), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 36(%esp), %esi # 4-byte Reload + sbbl 24(%eax), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 28(%eax), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 32(%eax), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 36(%eax), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%eax), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%eax), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 48(%eax), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %eax + sarl $31, %eax + testl %eax, %eax + movl 84(%esp), %eax # 4-byte Reload + js .LBB195_2 +# BB#1: + movl %edx, %eax +.LBB195_2: + movl 1568(%esp), %edx + movl %eax, (%edx) + movl 80(%esp), %esi # 4-byte Reload + js .LBB195_4 +# BB#3: + movl %ebp, %esi +.LBB195_4: + movl %esi, 4(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB195_6 +# BB#5: + movl %ecx, %edi +.LBB195_6: + movl %edi, 8(%edx) + js .LBB195_8 +# BB#7: + movl %ebx, %eax +.LBB195_8: + movl %eax, 12(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB195_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB195_10: + movl %eax, 16(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB195_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB195_12: + movl %eax, 20(%edx) + movl 36(%esp), %eax # 4-byte Reload + js .LBB195_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB195_14: + movl %eax, 24(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB195_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB195_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB195_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB195_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB195_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB195_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB195_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB195_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB195_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB195_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB195_26 +# BB#25: + movl 68(%esp), %eax # 4-byte Reload +.LBB195_26: + movl %eax, 48(%edx) + addl $1548, %esp # imm = 0x60C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end195: + .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L + + .globl mcl_fp_montRed13L + .align 16, 0x90 + .type mcl_fp_montRed13L,@function +mcl_fp_montRed13L: # @mcl_fp_montRed13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $892, %esp # imm = 0x37C + calll .L196$pb +.L196$pb: + popl %eax +.Ltmp37: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 920(%esp), %edx + movl -4(%edx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 916(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %eax, %ebx + movl 100(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 72(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 148(%esp) # 4-byte Spill + movl 60(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %edi + movl 20(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 832(%esp), %ecx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 76(%esp), %eax # 4-byte Reload + addl 832(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 836(%esp), %ecx + adcl 840(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 860(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + andl $1, %esi + addl 776(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 132(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 720(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 720(%esp), %esi + movl 56(%esp), %esi # 4-byte Reload + adcl 724(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 132(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl 100(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 664(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 664(%esp), %esi + movl 60(%esp), %ecx # 4-byte Reload + adcl 668(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 96(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 608(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 612(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 144(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 552(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 556(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 496(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 148(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 128(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 440(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl 476(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 384(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 384(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 388(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl 404(%esp), %ebp + movl 140(%esp), %edi # 4-byte Reload + adcl 408(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %ecx, %eax + imull 72(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 920(%esp), %eax + movl %eax, %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + movl 104(%esp), %eax # 4-byte Reload + addl 328(%esp), %eax + movl 108(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 344(%esp), %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl 348(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 360(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 96(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + movl 72(%esp), %esi # 4-byte Reload + imull %esi, %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 272(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl 120(%esp), %edi # 4-byte Reload + adcl 280(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 288(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 292(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 296(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 300(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 304(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 308(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 312(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl %eax, %ebp + imull %esi, %eax + movl %eax, (%esp) + leal 216(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 216(%esp), %ebp + movl %edi, %ecx + adcl 220(%esp), %ecx + movl 156(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 140(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl 152(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 92(%esp) # 4-byte Folded Spill + adcl $0, 80(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 920(%esp), %edx + movl 84(%esp), %ebx # 4-byte Reload + calll .LmulPv416x32 + addl 160(%esp), %esi + movl 156(%esp), %eax # 4-byte Reload + adcl 164(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 168(%esp), %ebp + movl %ebp, 140(%esp) # 4-byte Spill + movl %ebp, %ebx + movl 152(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 176(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl %eax, %edx + movl %edi, %eax + adcl 184(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 156(%esp), %edi # 4-byte Reload + subl 12(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %ebx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + sbbl 16(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %edx # 4-byte Folded Reload + movl %edx, 72(%esp) # 4-byte Spill + movl 132(%esp), %edx # 4-byte Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 76(%esp) # 4-byte Spill + movl 144(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 104(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 120(%esp) # 4-byte Spill + movl %eax, %edx + movl %esi, %eax + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 124(%esp) # 4-byte Spill + sbbl $0, %eax + andl $1, %eax + jne .LBB196_2 +# BB#1: + movl %ebp, 148(%esp) # 4-byte Spill +.LBB196_2: + testb %al, %al + movl 156(%esp), %ebp # 4-byte Reload + jne .LBB196_4 +# BB#3: + movl %edi, %ebp +.LBB196_4: + movl 912(%esp), %edi + movl %ebp, (%edi) + movl 140(%esp), %ebp # 4-byte Reload + jne .LBB196_6 +# BB#5: + movl %ebx, %ebp +.LBB196_6: + movl %ebp, 4(%edi) + movl 152(%esp), %ebx # 4-byte Reload + jne .LBB196_8 +# BB#7: + movl %ecx, %ebx +.LBB196_8: + movl %ebx, 8(%edi) + movl 148(%esp), %esi # 4-byte Reload + movl %esi, 12(%edi) + movl 116(%esp), %ebx # 4-byte Reload + movl 128(%esp), %esi # 4-byte Reload + jne .LBB196_10 +# BB#9: + movl 72(%esp), %esi # 4-byte Reload +.LBB196_10: + movl %esi, 16(%edi) + movl 112(%esp), %esi # 4-byte Reload + movl 132(%esp), %edx # 4-byte Reload + jne .LBB196_12 +# BB#11: + movl 76(%esp), %edx # 4-byte Reload +.LBB196_12: + movl %edx, 20(%edi) + movl 96(%esp), %edx # 4-byte Reload + movl 144(%esp), %ecx # 4-byte Reload + jne .LBB196_14 +# BB#13: + movl 80(%esp), %ecx # 4-byte Reload +.LBB196_14: + movl %ecx, 24(%edi) + movl 100(%esp), %ecx # 4-byte Reload + movl 136(%esp), %eax # 4-byte Reload + jne .LBB196_16 +# BB#15: + movl 84(%esp), %eax # 4-byte Reload +.LBB196_16: + movl %eax, 28(%edi) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB196_18 +# BB#17: + movl 88(%esp), %ebx # 4-byte Reload +.LBB196_18: + movl %ebx, 32(%edi) + jne .LBB196_20 +# BB#19: + movl 104(%esp), %esi # 4-byte Reload +.LBB196_20: + movl %esi, 36(%edi) + jne .LBB196_22 +# BB#21: + movl 108(%esp), %edx # 4-byte Reload +.LBB196_22: + movl %edx, 40(%edi) + jne .LBB196_24 +# BB#23: + movl 120(%esp), %ecx # 4-byte Reload +.LBB196_24: + movl %ecx, 44(%edi) + jne .LBB196_26 +# BB#25: + movl 124(%esp), %eax # 4-byte Reload +.LBB196_26: + movl %eax, 48(%edi) + addl $892, %esp # imm = 0x37C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end196: + .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L + + .globl mcl_fp_addPre13L + .align 16, 0x90 + .type mcl_fp_addPre13L,@function +mcl_fp_addPre13L: # @mcl_fp_addPre13L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl %esi, 40(%ebx) + movl %edx, 44(%ebx) + movl 48(%eax), %eax + movl 48(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 48(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end197: + .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L + + .globl mcl_fp_subPre13L + .align 16, 0x90 + .type mcl_fp_subPre13L,@function +mcl_fp_subPre13L: # @mcl_fp_subPre13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl %edi, 40(%ebp) + movl %esi, 44(%ebp) + movl 48(%edx), %edx + movl 48(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 48(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end198: + .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L + + .globl mcl_fp_shr1_13L + .align 16, 0x90 + .type mcl_fp_shr1_13L,@function +mcl_fp_shr1_13L: # @mcl_fp_shr1_13L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 44(%ecx) + shrl %eax + movl %eax, 48(%ecx) + popl %esi + retl +.Lfunc_end199: + .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L + + .globl mcl_fp_add13L + .align 16, 0x90 + .type mcl_fp_add13L,@function +mcl_fp_add13L: # @mcl_fp_add13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $40, %esp + movl 68(%esp), %ebp + movl (%ebp), %ecx + movl 4(%ebp), %eax + movl 64(%esp), %ebx + addl (%ebx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + adcl 4(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 8(%ebp), %eax + adcl 8(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 12(%ebx), %ecx + movl 16(%ebx), %eax + adcl 12(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + adcl 16(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%ebx), %eax + adcl 20(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 24(%ebx), %eax + adcl 24(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 28(%ebx), %eax + adcl 28(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%ebx), %eax + adcl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 36(%ebx), %ecx + adcl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 40(%ebx), %edi + adcl 40(%ebp), %edi + movl 44(%ebx), %edx + adcl 44(%ebp), %edx + movl 48(%ebx), %esi + adcl 48(%ebp), %esi + movl 60(%esp), %ebp + movl 4(%esp), %ebx # 4-byte Reload + movl %ebx, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ecx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) + sbbl %eax, %eax + andl $1, %eax + movl 72(%esp), %ecx + subl (%ecx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 36(%esp), %ebx # 4-byte Reload + sbbl 4(%ecx), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebx # 4-byte Reload + sbbl 8(%ecx), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebx # 4-byte Reload + sbbl 12(%ecx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebx # 4-byte Reload + sbbl 16(%ecx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebx # 4-byte Reload + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebx # 4-byte Reload + sbbl 24(%ecx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebx # 4-byte Reload + sbbl 32(%ecx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl (%esp), %ebx # 4-byte Reload + sbbl 36(%ecx), %ebx + sbbl 40(%ecx), %edi + sbbl 44(%ecx), %edx + sbbl 48(%ecx), %esi + sbbl $0, %eax + testb $1, %al + jne .LBB200_2 +# BB#1: # %nocarry + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ebp) + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 4(%ebp) + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 8(%ebp) + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 12(%ebp) + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 16(%ebp) + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 20(%ebp) + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 24(%ebp) + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 28(%ebp) + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 32(%ebp) + movl %ebx, 36(%ebp) + movl %edi, 40(%ebp) + movl %edx, 44(%ebp) + movl %esi, 48(%ebp) +.LBB200_2: # %carry + addl $40, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end200: + .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L + + .globl mcl_fp_addNF13L + .align 16, 0x90 + .type mcl_fp_addNF13L,@function +mcl_fp_addNF13L: # @mcl_fp_addNF13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 124(%esp), %edx + addl (%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 4(%edx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 48(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 44(%esi), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 40(%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 32(%esi), %ebp + movl 28(%esi), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%esi), %eax + movl 20(%esi), %ebx + movl 16(%esi), %edi + movl 12(%esi), %ecx + movl 8(%esi), %esi + adcl 8(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 12(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 20(%edx), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + adcl 24(%edx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 28(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 32(%edx), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 36(%edx), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 40(%edx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 132(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + subl (%edx), %eax + movl 68(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, (%esp) # 4-byte Spill + sbbl 8(%edx), %esi + movl %esi, 4(%esp) # 4-byte Spill + sbbl 12(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 28(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %ebp + sbbl 36(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + movl %esi, %ecx + movl %esi, %edi + sbbl 40(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 44(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%edx), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + sarl $31, %ebx + testl %ebx, %ebx + movl 64(%esp), %edx # 4-byte Reload + js .LBB201_2 +# BB#1: + movl %eax, %edx +.LBB201_2: + movl 120(%esp), %esi + movl %edx, (%esi) + movl 68(%esp), %edx # 4-byte Reload + js .LBB201_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB201_4: + movl %edx, 4(%esi) + movl %edi, %edx + movl 52(%esp), %ebx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + js .LBB201_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB201_6: + movl %eax, 8(%esi) + movl %ebp, %edi + movl 60(%esp), %eax # 4-byte Reload + js .LBB201_8 +# BB#7: + movl 8(%esp), %ebx # 4-byte Reload +.LBB201_8: + movl %ebx, 12(%esi) + movl 96(%esp), %ebp # 4-byte Reload + movl 56(%esp), %ecx # 4-byte Reload + js .LBB201_10 +# BB#9: + movl 12(%esp), %ecx # 4-byte Reload +.LBB201_10: + movl %ecx, 16(%esi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB201_12 +# BB#11: + movl 16(%esp), %eax # 4-byte Reload +.LBB201_12: + movl %eax, 20(%esi) + movl 72(%esp), %ebx # 4-byte Reload + js .LBB201_14 +# BB#13: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB201_14: + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 24(%esi) + js .LBB201_16 +# BB#15: + movl 24(%esp), %ebp # 4-byte Reload +.LBB201_16: + movl %ebp, 28(%esi) + js .LBB201_18 +# BB#17: + movl 28(%esp), %ebx # 4-byte Reload +.LBB201_18: + movl %ebx, 32(%esi) + js .LBB201_20 +# BB#19: + movl 32(%esp), %edi # 4-byte Reload +.LBB201_20: + movl %edi, 36(%esi) + js .LBB201_22 +# BB#21: + movl 36(%esp), %edx # 4-byte Reload +.LBB201_22: + movl %edx, 40(%esi) + js .LBB201_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB201_24: + movl %ecx, 44(%esi) + movl 88(%esp), %eax # 4-byte Reload + js .LBB201_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB201_26: + movl %eax, 48(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end201: + .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L + + .globl mcl_fp_sub13L + .align 16, 0x90 + .type mcl_fp_sub13L,@function +mcl_fp_sub13L: # @mcl_fp_sub13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 68(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 72(%esp), %edi + subl (%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%esi), %edx + sbbl 32(%edi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 36(%esi), %ecx + sbbl 36(%edi), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 44(%esi), %ebp + sbbl 44(%edi), %ebp + movl 48(%esi), %esi + sbbl 48(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 64(%esp), %ebx + movl 12(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl %edx, 32(%ebx) + movl %ecx, 36(%ebx) + movl %eax, 40(%ebx) + movl %ebp, 44(%ebx) + movl %esi, 48(%ebx) + je .LBB202_2 +# BB#1: # %carry + movl %esi, %edi + movl 76(%esp), %esi + movl 12(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 28(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %ecx # 4-byte Reload + adcl 8(%esi), %ecx + movl 12(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 8(%ebx) + movl 16(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl (%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl %ecx, 40(%ebx) + movl 44(%esi), %eax + adcl %ebp, %eax + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %edi, %eax + movl %eax, 48(%ebx) +.LBB202_2: # %nocarry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end202: + .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L + + .globl mcl_fp_subNF13L + .align 16, 0x90 + .type mcl_fp_subNF13L,@function +mcl_fp_subNF13L: # @mcl_fp_subNF13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %ecx + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 112(%esp), %edi + subl (%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %ebx + movl 24(%ecx), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 24(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + sbbl 28(%edi), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + sbbl 48(%edi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %edx, %edi + movl %edx, %eax + sarl $31, %edi + movl %edi, %edx + shldl $1, %eax, %edx + movl 116(%esp), %esi + movl 4(%esi), %eax + andl %edx, %eax + movl %eax, 8(%esp) # 4-byte Spill + andl (%esi), %edx + movl 48(%esi), %eax + andl %edi, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%esi), %eax + andl %edi, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esi), %eax + andl %edi, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 36(%esi), %eax + andl %edi, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 32(%esi), %eax + andl %edi, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 28(%esi), %eax + andl %edi, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%esi), %eax + andl %edi, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%esi), %ebp + andl %edi, %ebp + movl 16(%esi), %ebx + andl %edi, %ebx + movl 12(%esi), %ecx + andl %edi, %ecx + roll %edi + andl 8(%esi), %edi + addl 56(%esp), %edx # 4-byte Folded Reload + movl 8(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl 104(%esp), %esi + movl %edx, (%esi) + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %eax, 4(%esi) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %edi, 8(%esi) + adcl 40(%esp), %ebx # 4-byte Folded Reload + movl %ecx, 12(%esi) + adcl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 16(%esi) + movl (%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %ebp, 20(%esi) + movl 4(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%esi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%esi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 72(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl %eax, 44(%esi) + movl 28(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%esi) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end203: + .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L + + .globl mcl_fpDbl_add13L + .align 16, 0x90 + .type mcl_fpDbl_add13L,@function +mcl_fpDbl_add13L: # @mcl_fpDbl_add13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 124(%esp), %ecx + movl 120(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 116(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 60(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edx, 48(%eax) + movl 56(%esi), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%ecx), %edx + movl 64(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %edi + adcl %edx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 100(%ecx), %ecx + movl 100(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 128(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + sbbl 36(%ebp), %edi + movl %edi, 36(%esp) # 4-byte Spill + sbbl 40(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 44(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ebx, %ecx + sbbl 48(%ebp), %ecx + sbbl $0, %edx + andl $1, %edx + jne .LBB204_2 +# BB#1: + movl %ecx, %ebx +.LBB204_2: + testb %dl, %dl + movl 76(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + movl 68(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB204_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload + movl 4(%esp), %esi # 4-byte Reload + movl 8(%esp), %edi # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 32(%esp), %ecx # 4-byte Reload +.LBB204_4: + movl 116(%esp), %eax + movl %ecx, 52(%eax) + movl 80(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 84(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 88(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + movl %ebp, 72(%eax) + movl %edi, 76(%eax) + movl %esi, 80(%eax) + movl %edx, 84(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %esi # 4-byte Reload + jne .LBB204_6 +# BB#5: + movl 36(%esp), %esi # 4-byte Reload +.LBB204_6: + movl %esi, 88(%eax) + jne .LBB204_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB204_8: + movl %edx, 92(%eax) + jne .LBB204_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB204_10: + movl %ecx, 96(%eax) + movl %ebx, 100(%eax) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end204: + .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L + + .globl mcl_fpDbl_sub13L + .align 16, 0x90 + .type mcl_fpDbl_sub13L,@function +mcl_fpDbl_sub13L: # @mcl_fpDbl_sub13L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $84, %esp + movl 108(%esp), %edi + movl (%edi), %eax + movl 4(%edi), %edx + movl 112(%esp), %ebx + subl (%ebx), %eax + sbbl 4(%ebx), %edx + movl 8(%edi), %esi + sbbl 8(%ebx), %esi + movl 104(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edi), %eax + sbbl 12(%ebx), %eax + movl %edx, 4(%ecx) + movl 16(%edi), %edx + sbbl 16(%ebx), %edx + movl %esi, 8(%ecx) + movl 20(%ebx), %esi + movl %eax, 12(%ecx) + movl 20(%edi), %eax + sbbl %esi, %eax + movl 24(%ebx), %esi + movl %edx, 16(%ecx) + movl 24(%edi), %edx + sbbl %esi, %edx + movl 28(%ebx), %esi + movl %eax, 20(%ecx) + movl 28(%edi), %eax + sbbl %esi, %eax + movl 32(%ebx), %esi + movl %edx, 24(%ecx) + movl 32(%edi), %edx + sbbl %esi, %edx + movl 36(%ebx), %esi + movl %eax, 28(%ecx) + movl 36(%edi), %eax + sbbl %esi, %eax + movl 40(%ebx), %esi + movl %edx, 32(%ecx) + movl 40(%edi), %edx + sbbl %esi, %edx + movl 44(%ebx), %esi + movl %eax, 36(%ecx) + movl 44(%edi), %eax + sbbl %esi, %eax + movl 48(%ebx), %esi + movl %edx, 40(%ecx) + movl 48(%edi), %edx + sbbl %esi, %edx + movl 52(%ebx), %esi + movl %eax, 44(%ecx) + movl 52(%edi), %eax + sbbl %esi, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 56(%ebx), %eax + movl %edx, 48(%ecx) + movl 56(%edi), %edx + sbbl %eax, %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 60(%ebx), %eax + movl 60(%edi), %edx + sbbl %eax, %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 64(%ebx), %eax + movl 64(%edi), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebx), %eax + movl 68(%edi), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebx), %eax + movl 72(%edi), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebx), %eax + movl 76(%edi), %edx + sbbl %eax, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 80(%ebx), %eax + movl 80(%edi), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 84(%ebx), %eax + movl 84(%edi), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%ebx), %eax + movl 88(%edi), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 92(%ebx), %eax + movl 92(%edi), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 96(%ebx), %eax + movl 96(%edi), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%ebx), %eax + movl 100(%edi), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 116(%esp), %edi + jne .LBB205_1 +# BB#2: + movl $0, 44(%esp) # 4-byte Folded Spill + jmp .LBB205_3 +.LBB205_1: + movl 48(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill +.LBB205_3: + testb %al, %al + jne .LBB205_4 +# BB#5: + movl $0, 16(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB205_6 +.LBB205_4: + movl (%edi), %ebx + movl 4(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB205_6: + jne .LBB205_7 +# BB#8: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB205_9 +.LBB205_7: + movl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB205_9: + jne .LBB205_10 +# BB#11: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB205_12 +.LBB205_10: + movl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB205_12: + jne .LBB205_13 +# BB#14: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB205_15 +.LBB205_13: + movl 36(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB205_15: + jne .LBB205_16 +# BB#17: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB205_18 +.LBB205_16: + movl 32(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB205_18: + jne .LBB205_19 +# BB#20: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB205_21 +.LBB205_19: + movl 28(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB205_21: + jne .LBB205_22 +# BB#23: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB205_24 +.LBB205_22: + movl 24(%edi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB205_24: + jne .LBB205_25 +# BB#26: + movl $0, %eax + jmp .LBB205_27 +.LBB205_25: + movl 20(%edi), %eax +.LBB205_27: + jne .LBB205_28 +# BB#29: + movl $0, %edx + jmp .LBB205_30 +.LBB205_28: + movl 16(%edi), %edx +.LBB205_30: + jne .LBB205_31 +# BB#32: + movl $0, %esi + jmp .LBB205_33 +.LBB205_31: + movl 12(%edi), %esi +.LBB205_33: + jne .LBB205_34 +# BB#35: + xorl %edi, %edi + jmp .LBB205_36 +.LBB205_34: + movl 8(%edi), %edi +.LBB205_36: + addl 36(%esp), %ebx # 4-byte Folded Reload + movl 16(%esp), %ebp # 4-byte Reload + adcl 28(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 52(%ecx) + adcl 32(%esp), %edi # 4-byte Folded Reload + movl %ebp, 56(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 60(%ecx) + adcl 48(%esp), %edx # 4-byte Folded Reload + movl %esi, 64(%ecx) + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edx, 68(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %eax, 72(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edx, 76(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 24(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl %eax, 96(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%ecx) + addl $84, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end205: + .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L + + .align 16, 0x90 + .type .LmulPv448x32,@function +.LmulPv448x32: # @mulPv448x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl %edx, %edi + movl 116(%esp), %esi + movl %esi, %eax + mull 52(%edi) + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl %esi, %eax + mull 48(%edi) + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %esi, %eax + mull 44(%edi) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + mull 40(%edi) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %eax + mull 36(%edi) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %esi, %eax + mull 32(%edi) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull 28(%edi) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %esi, %eax + mull 24(%edi) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %esi, %eax + mull 20(%edi) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 16(%edi) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 12(%edi) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 8(%edi) + movl %edx, %ebx + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 4(%edi) + movl %edx, %ebp + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + mull (%edi) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 8(%ecx) + adcl 8(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + movl 92(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 56(%ecx) + movl %ecx, %eax + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end206: + .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 + + .globl mcl_fp_mulUnitPre14L + .align 16, 0x90 + .type mcl_fp_mulUnitPre14L,@function +mcl_fp_mulUnitPre14L: # @mcl_fp_mulUnitPre14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + calll .L207$pb +.L207$pb: + popl %ebx +.Ltmp38: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx + movl 136(%esp), %eax + movl %eax, (%esp) + leal 48(%esp), %ecx + movl 132(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 76(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 72(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 68(%esp), %ebp + movl 64(%esp), %ebx + movl 60(%esp), %edi + movl 56(%esp), %esi + movl 48(%esp), %edx + movl 52(%esp), %ecx + movl 128(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end207: + .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L + + .globl mcl_fpDbl_mulPre14L + .align 16, 0x90 + .type mcl_fpDbl_mulPre14L,@function +mcl_fpDbl_mulPre14L: # @mcl_fpDbl_mulPre14L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L208$pb +.L208$pb: + popl %ebx +.Ltmp39: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx + movl %ebx, -192(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl %esi, 8(%esp) + movl 12(%ebp), %edi + movl %edi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7L@PLT + leal 28(%esi), %eax + movl %eax, 8(%esp) + leal 28(%edi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 56(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7L@PLT + movl 44(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 40(%edi), %eax + movl 36(%edi), %edx + movl (%edi), %edi + movl 12(%ebp), %ecx + movl 4(%ecx), %ecx + movl 12(%ebp), %ebx + addl 28(%ebx), %edi + movl %edi, -180(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + adcl 32(%edi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -212(%ebp) # 4-byte Spill + adcl 12(%edi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl 16(%edi), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl %eax, %ebx + seto %al + lahf + movl %eax, %eax + movl %eax, -128(%ebp) # 4-byte Spill + movl (%esi), %eax + addl 28(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + movl 4(%esi), %eax + adcl 32(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl 36(%esi), %eax + adcl 8(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 40(%esi), %eax + adcl 12(%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 44(%esi), %eax + adcl 16(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 20(%esi), %ecx + movl 52(%esi), %eax + adcl 24(%esi), %eax + pushl %eax + seto %al + lahf + movl %eax, %esi + popl %eax + movl %esi, -220(%ebp) # 4-byte Spill + movl %ebx, %esi + movl %edx, -184(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -188(%ebp) # 4-byte Spill + jb .LBB208_2 +# BB#1: + xorl %esi, %esi + movl $0, -184(%ebp) # 4-byte Folded Spill + movl $0, -188(%ebp) # 4-byte Folded Spill +.LBB208_2: + movl %esi, -204(%ebp) # 4-byte Spill + movl 52(%edi), %esi + movl 48(%edi), %ebx + movl -128(%ebp), %edx # 4-byte Reload + pushl %eax + movl %edx, %eax + addb $127, %al + sahf + popl %eax + adcl 20(%edi), %ebx + movl %ebx, -160(%ebp) # 4-byte Spill + adcl 24(%edi), %esi + movl %esi, -208(%ebp) # 4-byte Spill + movl %eax, -148(%ebp) # 4-byte Spill + movl %ecx, -152(%ebp) # 4-byte Spill + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -128(%ebp) # 4-byte Spill + movl -172(%ebp), %esi # 4-byte Reload + movl %esi, -132(%ebp) # 4-byte Spill + movl -168(%ebp), %esi # 4-byte Reload + movl %esi, -136(%ebp) # 4-byte Spill + movl -164(%ebp), %esi # 4-byte Reload + movl %esi, -140(%ebp) # 4-byte Spill + movl -216(%ebp), %ebx # 4-byte Reload + movl %ebx, -144(%ebp) # 4-byte Spill + jb .LBB208_4 +# BB#3: + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -128(%ebp) # 4-byte Folded Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + movl $0, -136(%ebp) # 4-byte Folded Spill + movl $0, -140(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill +.LBB208_4: + movl -180(%ebp), %edx # 4-byte Reload + movl %edx, -96(%ebp) + movl -200(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -212(%ebp), %edx # 4-byte Reload + movl %edx, -88(%ebp) + movl -196(%ebp), %edi # 4-byte Reload + movl %edi, -84(%ebp) + movl -156(%ebp), %edx # 4-byte Reload + movl %edx, -80(%ebp) + movl %ebx, -124(%ebp) + movl -164(%ebp), %edx # 4-byte Reload + movl %edx, -120(%ebp) + movl -168(%ebp), %edx # 4-byte Reload + movl %edx, -116(%ebp) + movl -172(%ebp), %edx # 4-byte Reload + movl %edx, -112(%ebp) + movl -176(%ebp), %edx # 4-byte Reload + movl %edx, -108(%ebp) + movl %ecx, -104(%ebp) + movl %edi, %ebx + movl %esi, %edi + movl %eax, -100(%ebp) + sbbl %edx, %edx + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -76(%ebp) + movl -208(%ebp), %esi # 4-byte Reload + movl %esi, -72(%ebp) + movl -220(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB208_6 +# BB#5: + movl $0, %esi + movl $0, %eax + movl $0, %ebx + movl $0, %edi +.LBB208_6: + movl %eax, -160(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -124(%ebp), %ecx + movl %ecx, 8(%esp) + leal -96(%ebp), %ecx + movl %ecx, 4(%esp) + leal -68(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -188(%ebp), %eax # 4-byte Reload + addl %eax, -144(%ebp) # 4-byte Folded Spill + adcl %edi, -140(%ebp) # 4-byte Folded Spill + movl -184(%ebp), %eax # 4-byte Reload + adcl %eax, -136(%ebp) # 4-byte Folded Spill + adcl %ebx, -132(%ebp) # 4-byte Folded Spill + movl -204(%ebp), %eax # 4-byte Reload + adcl %eax, -128(%ebp) # 4-byte Folded Spill + movl -152(%ebp), %edi # 4-byte Reload + adcl -160(%ebp), %edi # 4-byte Folded Reload + adcl %esi, -148(%ebp) # 4-byte Folded Spill + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -156(%ebp) # 4-byte Spill + movl -192(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7L@PLT + movl -144(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -140(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -140(%ebp) # 4-byte Spill + movl -136(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -136(%ebp) # 4-byte Spill + movl -132(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -128(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -128(%ebp) # 4-byte Spill + adcl -20(%ebp), %edi + movl -148(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -68(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -64(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -60(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -44(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl 28(%esi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 32(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -152(%ebp) # 4-byte Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + movl -148(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + sbbl $0, -156(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + subl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -128(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -148(%ebp) # 4-byte Spill + movl -156(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -172(%ebp), %eax # 4-byte Reload + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 32(%esi) + adcl -188(%ebp), %edx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 40(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -140(%ebp), %ecx # 4-byte Reload + adcl -232(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %eax # 4-byte Reload + adcl -236(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -132(%ebp), %ecx # 4-byte Reload + adcl -240(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -244(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -248(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -252(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + adcl -256(%ebp), %edi # 4-byte Folded Reload + movl %eax, 80(%esi) + movl %edi, 84(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end208: + .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L + + .globl mcl_fpDbl_sqrPre14L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre14L,@function +mcl_fpDbl_sqrPre14L: # @mcl_fpDbl_sqrPre14L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $268, %esp # imm = 0x10C + calll .L209$pb +.L209$pb: + popl %ebx +.Ltmp40: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx + movl %ebx, -172(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre7L@PLT + leal 28(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 56(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre7L@PLT + movl 48(%edi), %eax + movl 44(%edi), %ecx + movl 36(%edi), %edx + movl (%edi), %esi + movl 4(%edi), %ebx + addl 28(%edi), %esi + adcl 32(%edi), %ebx + movl %ebx, -164(%ebp) # 4-byte Spill + adcl 8(%edi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl 40(%edi), %edx + adcl 12(%edi), %edx + adcl 16(%edi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + adcl 20(%edi), %eax + movl %eax, -176(%ebp) # 4-byte Spill + movl 52(%edi), %ecx + adcl 24(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -184(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -152(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -148(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -140(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -136(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + seto %al + lahf + movl %eax, %eax + sbbl %ebx, %ebx + movl %ebx, -128(%ebp) # 4-byte Spill + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_1 +# BB#2: + movl %esi, -168(%ebp) # 4-byte Spill + movl $0, -132(%ebp) # 4-byte Folded Spill + jmp .LBB209_3 +.LBB209_1: + leal (%esi,%esi), %eax + movl %esi, -168(%ebp) # 4-byte Spill + movl %eax, -132(%ebp) # 4-byte Spill +.LBB209_3: + movl %edi, %eax + addb $127, %al + sahf + movl -180(%ebp), %ebx # 4-byte Reload + jb .LBB209_4 +# BB#5: + movl $0, -156(%ebp) # 4-byte Folded Spill + jmp .LBB209_6 +.LBB209_4: + movl -164(%ebp), %eax # 4-byte Reload + movl -168(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -156(%ebp) # 4-byte Spill +.LBB209_6: + movl -176(%ebp), %edi # 4-byte Reload + movl -136(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_7 +# BB#8: + movl $0, -136(%ebp) # 4-byte Folded Spill + jmp .LBB209_9 +.LBB209_7: + movl -160(%ebp), %eax # 4-byte Reload + movl -164(%ebp), %esi # 4-byte Reload + shldl $1, %esi, %eax + movl %eax, -136(%ebp) # 4-byte Spill +.LBB209_9: + movl %ebx, %esi + movl -140(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_10 +# BB#11: + movl $0, -140(%ebp) # 4-byte Folded Spill + jmp .LBB209_12 +.LBB209_10: + movl %edx, %eax + movl -160(%ebp), %ebx # 4-byte Reload + shldl $1, %ebx, %eax + movl %eax, -140(%ebp) # 4-byte Spill +.LBB209_12: + movl -144(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_13 +# BB#14: + movl $0, -144(%ebp) # 4-byte Folded Spill + jmp .LBB209_15 +.LBB209_13: + movl %esi, %eax + shldl $1, %edx, %eax + movl %eax, -144(%ebp) # 4-byte Spill +.LBB209_15: + movl -148(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_16 +# BB#17: + movl $0, -148(%ebp) # 4-byte Folded Spill + jmp .LBB209_18 +.LBB209_16: + movl %edi, %eax + shldl $1, %esi, %eax + movl %eax, -148(%ebp) # 4-byte Spill +.LBB209_18: + movl -152(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_19 +# BB#20: + movl $0, -152(%ebp) # 4-byte Folded Spill + jmp .LBB209_21 +.LBB209_19: + movl %ecx, %eax + shldl $1, %edi, %eax + movl %eax, -152(%ebp) # 4-byte Spill +.LBB209_21: + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, -96(%ebp) + movl %eax, -124(%ebp) + movl -164(%ebp), %eax # 4-byte Reload + movl %eax, -92(%ebp) + movl %eax, -120(%ebp) + movl -160(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -116(%ebp) + movl %edx, -84(%ebp) + movl %edx, -112(%ebp) + movl %esi, -80(%ebp) + movl %esi, -108(%ebp) + movl %edi, -76(%ebp) + movl %edi, -104(%ebp) + movl %ecx, -72(%ebp) + movl %ecx, -100(%ebp) + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB209_22 +# BB#23: + xorl %edi, %edi + jmp .LBB209_24 +.LBB209_22: + shrl $31, %ecx + movl %ecx, %edi +.LBB209_24: + leal -68(%ebp), %eax + movl %eax, (%esp) + leal -96(%ebp), %eax + movl %eax, 4(%esp) + leal -124(%ebp), %eax + movl %eax, 8(%esp) + movl -128(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -172(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre7L@PLT + movl -132(%ebp), %eax # 4-byte Reload + addl -40(%ebp), %eax + movl %eax, -132(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl -136(%ebp), %ecx # 4-byte Reload + adcl -32(%ebp), %ecx + movl %ecx, -136(%ebp) # 4-byte Spill + movl -140(%ebp), %ecx # 4-byte Reload + adcl -28(%ebp), %ecx + movl %ecx, -140(%ebp) # 4-byte Spill + movl -144(%ebp), %ecx # 4-byte Reload + adcl -24(%ebp), %ecx + movl %ecx, -144(%ebp) # 4-byte Spill + movl -148(%ebp), %ecx # 4-byte Reload + adcl -20(%ebp), %ecx + movl %ecx, -148(%ebp) # 4-byte Spill + movl -152(%ebp), %ecx # 4-byte Reload + adcl -16(%ebp), %ecx + movl %ecx, -152(%ebp) # 4-byte Spill + adcl %edi, %esi + movl %esi, -128(%ebp) # 4-byte Spill + movl -68(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl -64(%ebp), %edi + sbbl 4(%esi), %edi + movl -60(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -160(%ebp) # 4-byte Spill + movl -56(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -168(%ebp) # 4-byte Spill + movl -52(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -48(%ebp), %ecx + sbbl 20(%esi), %ecx + movl %ecx, -172(%ebp) # 4-byte Spill + movl -44(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -164(%ebp) # 4-byte Spill + movl 28(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + sbbl %edx, -132(%ebp) # 4-byte Folded Spill + movl 32(%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + sbbl %ecx, %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl 36(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl -128(%ebp), %ecx # 4-byte Reload + sbbl $0, %ecx + movl 56(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + movl -204(%ebp), %edx # 4-byte Reload + subl %eax, %edx + movl 60(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl 64(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 72(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 76(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 80(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 84(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -132(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -136(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -140(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + sbbl $0, %ecx + movl %ecx, -128(%ebp) # 4-byte Spill + movl %edx, %eax + addl -176(%ebp), %eax # 4-byte Folded Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + movl %eax, 28(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -184(%ebp), %eax # 4-byte Folded Reload + movl %edi, 32(%esi) + movl -168(%ebp), %ecx # 4-byte Reload + adcl -188(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + adcl -192(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -196(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 44(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -200(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl -132(%ebp), %eax # 4-byte Reload + adcl -228(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl -156(%ebp), %edx # 4-byte Reload + adcl -232(%ebp), %edx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -136(%ebp), %ecx # 4-byte Reload + adcl -236(%ebp), %ecx # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -140(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 64(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -244(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 68(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -248(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 72(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -252(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 76(%esi) + movl -128(%ebp), %eax # 4-byte Reload + adcl -256(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 80(%esi) + movl %eax, 84(%esi) + movl -204(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 88(%esi) + movl -208(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 92(%esi) + movl -212(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 96(%esi) + movl -216(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -220(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -224(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + addl $268, %esp # imm = 0x10C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end209: + .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L + + .globl mcl_fp_mont14L + .align 16, 0x90 + .type mcl_fp_mont14L,@function +mcl_fp_mont14L: # @mcl_fp_mont14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1900, %esp # imm = 0x76C + calll .L210$pb +.L210$pb: + popl %ebx +.Ltmp41: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx + movl 1932(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 1840(%esp), %edi + movl 1844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1896(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 1892(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 1888(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1884(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1880(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1860(%esp), %esi + movl 1856(%esp), %ebp + movl 1852(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + addl 1776(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1792(%esp), %ebp + adcl 1796(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 1928(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 1712(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1724(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 1728(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 1732(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1768(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 100(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1648(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl 1668(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1672(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + adcl $0, %eax + movl %eax, %edi + movl 1928(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1584(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1584(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1604(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1608(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1636(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 1640(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1520(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1520(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 1544(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1564(%esp), %ebp + movl 108(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %edi # 4-byte Reload + adcl 1572(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1456(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1456(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1496(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1500(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1392(%esp), %ecx + movl 1932(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %eax + addl 1392(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1396(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1420(%esp), %esi + movl 88(%esp), %ebp # 4-byte Reload + adcl 1424(%esp), %ebp + movl 96(%esp), %edi # 4-byte Reload + adcl 1428(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 1432(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1328(%esp), %ecx + movl 1924(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1328(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1356(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1384(%esp), %edi + sbbl %esi, %esi + movl %ecx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1264(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1284(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1316(%esp), %esi + adcl 1320(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 68(%esp), %eax # 4-byte Reload + addl 1200(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + adcl 1216(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1228(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1244(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1248(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1252(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1256(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1136(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + adcl 1148(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1172(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1180(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 60(%esp), %eax # 4-byte Reload + addl 1072(%esp), %eax + adcl 1076(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1108(%esp), %ebp + adcl 1112(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1124(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1128(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1008(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 1008(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1020(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 52(%esp), %eax # 4-byte Reload + addl 944(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 952(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 960(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 964(%esp), %esi + adcl 968(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 992(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %eax, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 880(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 896(%esp), %edi + adcl 900(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 924(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 816(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 824(%esp), %ebp + adcl 828(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 856(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + sbbl %eax, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 56(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 752(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 760(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 764(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 768(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 772(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 776(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 780(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 784(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 792(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 796(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 800(%esp), %edi + movl 60(%esp), %ecx # 4-byte Reload + adcl 804(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 808(%esp), %esi + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 688(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 728(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + adcl 732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 740(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 624(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 648(%esp), %esi + movl 100(%esp), %edi # 4-byte Reload + adcl 652(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 560(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 568(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 580(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + adcl 584(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 592(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %edi + movl %edi, %ecx + addl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 528(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 540(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 432(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 440(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 444(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 472(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %esi + movl %esi, %ecx + addl 368(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 376(%esp), %esi + adcl 380(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 304(%esp), %ecx + adcl 308(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 312(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 324(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 328(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 240(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 248(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 252(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 256(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 268(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 1928(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1924(%esp), %edx + calll .LmulPv448x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 176(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 184(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + adcl 188(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 192(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 200(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 1932(%esp), %edx + calll .LmulPv448x32 + andl $1, %ebp + addl 112(%esp), %esi + movl 100(%esp), %esi # 4-byte Reload + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl 128(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 76(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl $0, %ebp + movl 1932(%esp), %ecx + subl (%ecx), %eax + sbbl 4(%ecx), %edx + sbbl 8(%ecx), %esi + sbbl 12(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 16(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + sbbl 20(%ecx), %edi + movl %edi, 24(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + sbbl 24(%ecx), %edi + movl %edi, 28(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%ecx), %ebx + movl 52(%esp), %edi # 4-byte Reload + sbbl 32(%ecx), %edi + movl %edi, 32(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + sbbl 36(%ecx), %edi + movl %edi, 36(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + sbbl 40(%ecx), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + sbbl 44(%ecx), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + sbbl 48(%ecx), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + sbbl 52(%ecx), %edi + movl %ebp, %ecx + movl %edi, 104(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB210_2 +# BB#1: + movl %ebx, 60(%esp) # 4-byte Spill +.LBB210_2: + testb %cl, %cl + movl 108(%esp), %ebx # 4-byte Reload + jne .LBB210_4 +# BB#3: + movl %eax, %ebx +.LBB210_4: + movl 1920(%esp), %eax + movl %ebx, (%eax) + movl 92(%esp), %edi # 4-byte Reload + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB210_6 +# BB#5: + movl %edx, %edi +.LBB210_6: + movl %edi, 4(%eax) + jne .LBB210_8 +# BB#7: + movl %esi, 100(%esp) # 4-byte Spill +.LBB210_8: + movl 100(%esp), %edx # 4-byte Reload + movl %edx, 8(%eax) + jne .LBB210_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 84(%esp) # 4-byte Spill +.LBB210_10: + movl 84(%esp), %edx # 4-byte Reload + movl %edx, 12(%eax) + jne .LBB210_12 +# BB#11: + movl 20(%esp), %ecx # 4-byte Reload +.LBB210_12: + movl %ecx, 16(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB210_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB210_14: + movl %ecx, 20(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB210_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB210_16: + movl %ecx, 24(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 52(%esp), %ecx # 4-byte Reload + jne .LBB210_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload +.LBB210_18: + movl %ecx, 32(%eax) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB210_20 +# BB#19: + movl 36(%esp), %ecx # 4-byte Reload +.LBB210_20: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB210_22 +# BB#21: + movl 40(%esp), %ecx # 4-byte Reload +.LBB210_22: + movl %ecx, 40(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB210_24 +# BB#23: + movl 44(%esp), %ecx # 4-byte Reload +.LBB210_24: + movl %ecx, 44(%eax) + movl 88(%esp), %ecx # 4-byte Reload + jne .LBB210_26 +# BB#25: + movl 48(%esp), %ecx # 4-byte Reload +.LBB210_26: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB210_28 +# BB#27: + movl 104(%esp), %ecx # 4-byte Reload +.LBB210_28: + movl %ecx, 52(%eax) + addl $1900, %esp # imm = 0x76C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end210: + .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L + + .globl mcl_fp_montNF14L + .align 16, 0x90 + .type mcl_fp_montNF14L,@function +mcl_fp_montNF14L: # @mcl_fp_montNF14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1884, %esp # imm = 0x75C + calll .L211$pb +.L211$pb: + popl %ebx +.Ltmp42: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx + movl 1916(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1824(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1824(%esp), %edi + movl 1828(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 1880(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1876(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 1872(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 1868(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1864(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1860(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1856(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1852(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1848(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 1844(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1840(%esp), %esi + movl 1836(%esp), %ebp + movl 1832(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1760(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1760(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1768(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 1776(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1804(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1808(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1816(%esp), %ebp + movl 1912(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1752(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1696(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1708(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1740(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 1916(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + addl 1632(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + adcl 1664(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl 1912(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1624(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1568(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 1596(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1612(%esp), %edi + movl 92(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %ebp + movl %edx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1504(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1544(%esp), %esi + adcl 1548(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1560(%esp), %ebp + movl 1912(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1440(%esp), %ecx + movl 1908(%esp), %eax + movl %eax, %edx + calll .LmulPv448x32 + movl 1496(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1440(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1464(%esp), %edi + movl 52(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1476(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1488(%esp), %esi + adcl 1492(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1376(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1424(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1312(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1368(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1312(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1340(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1360(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1248(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1248(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + adcl 1276(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1284(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1300(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1184(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1240(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + addl 1184(%esp), %ecx + movl 40(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1216(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1232(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 1120(%esp), %esi + movl 40(%esp), %ebp # 4-byte Reload + adcl 1124(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1128(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1156(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 1112(%esp), %eax + movl %ebp, %ecx + addl 1056(%esp), %ecx + adcl 1060(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1064(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1068(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1072(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1076(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1080(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1084(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1088(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1092(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1096(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1100(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1104(%esp), %ebp + movl 60(%esp), %edx # 4-byte Reload + adcl 1108(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %ecx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 992(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1040(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1044(%esp), %ebp + adcl 1048(%esp), %esi + movl 1912(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 984(%esp), %eax + movl 44(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 48(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 940(%esp), %edi + movl 84(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 976(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, %ebp + adcl $0, %eax + movl %eax, 40(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 864(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 864(%esp), %esi + movl 48(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 876(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 884(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 916(%esp), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + adcl 920(%esp), %ebp + movl 1912(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 800(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 856(%esp), %edx + movl 48(%esp), %ecx # 4-byte Reload + addl 800(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 808(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 816(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 828(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 852(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 736(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 736(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 764(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 72(%esp), %esi # 4-byte Reload + adcl 772(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 780(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 672(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 728(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + addl 672(%esp), %ecx + movl 52(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 704(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 608(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 608(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 616(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 644(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 600(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 544(%esp), %ecx + adcl 548(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 556(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 568(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 576(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 480(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 488(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 496(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 504(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 472(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + addl 416(%esp), %ecx + adcl 420(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 424(%esp), %edi + adcl 428(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 464(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 352(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 364(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 388(%esp), %edi + movl 40(%esp), %ebp # 4-byte Reload + adcl 392(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 288(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 344(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 288(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 296(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 320(%esp), %edi + adcl 324(%esp), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 328(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 224(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 224(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 232(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 256(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 40(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + adcl 264(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1912(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 160(%esp), %ecx + movl 1908(%esp), %edx + calll .LmulPv448x32 + movl 216(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 160(%esp), %ecx + adcl 164(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 176(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 188(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + adcl 192(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 96(%esp), %ecx + movl 1916(%esp), %edx + calll .LmulPv448x32 + addl 96(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + movl 92(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 104(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl %ebp, %ebx + adcl 108(%esp), %esi + adcl 112(%esp), %edi + movl 68(%esp), %edx # 4-byte Reload + adcl 116(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 120(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 124(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 40(%esp), %edx # 4-byte Reload + adcl 128(%esp), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 132(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %eax, %edx + movl 1916(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ebx + movl %esi, %eax + sbbl 8(%ebp), %eax + movl %edi, %ecx + sbbl 12(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 48(%ebp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 52(%ebp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + sarl $31, %ecx + testl %ecx, %ecx + movl 92(%esp), %ebp # 4-byte Reload + js .LBB211_2 +# BB#1: + movl %edx, %ebp +.LBB211_2: + movl 1904(%esp), %edx + movl %ebp, (%edx) + movl 88(%esp), %ebp # 4-byte Reload + js .LBB211_4 +# BB#3: + movl %ebx, %ebp +.LBB211_4: + movl %ebp, 4(%edx) + js .LBB211_6 +# BB#5: + movl %eax, %esi +.LBB211_6: + movl %esi, 8(%edx) + js .LBB211_8 +# BB#7: + movl 4(%esp), %edi # 4-byte Reload +.LBB211_8: + movl %edi, 12(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB211_10 +# BB#9: + movl 8(%esp), %eax # 4-byte Reload +.LBB211_10: + movl %eax, 16(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB211_12 +# BB#11: + movl 12(%esp), %eax # 4-byte Reload +.LBB211_12: + movl %eax, 20(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB211_14 +# BB#13: + movl 16(%esp), %eax # 4-byte Reload +.LBB211_14: + movl %eax, 24(%edx) + movl 40(%esp), %eax # 4-byte Reload + js .LBB211_16 +# BB#15: + movl 20(%esp), %eax # 4-byte Reload +.LBB211_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB211_18 +# BB#17: + movl 24(%esp), %eax # 4-byte Reload +.LBB211_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB211_20 +# BB#19: + movl 28(%esp), %eax # 4-byte Reload +.LBB211_20: + movl %eax, 36(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB211_22 +# BB#21: + movl 32(%esp), %eax # 4-byte Reload +.LBB211_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB211_24 +# BB#23: + movl 36(%esp), %eax # 4-byte Reload +.LBB211_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB211_26 +# BB#25: + movl 64(%esp), %eax # 4-byte Reload +.LBB211_26: + movl %eax, 48(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB211_28 +# BB#27: + movl 72(%esp), %eax # 4-byte Reload +.LBB211_28: + movl %eax, 52(%edx) + addl $1884, %esp # imm = 0x75C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end211: + .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L + + .globl mcl_fp_montRed14L + .align 16, 0x90 + .type mcl_fp_montRed14L,@function +mcl_fp_montRed14L: # @mcl_fp_montRed14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1068, %esp # imm = 0x42C + calll .L212$pb +.L212$pb: + popl %eax +.Ltmp43: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1096(%esp), %edx + movl -4(%edx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1092(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 92(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 96(%esp) # 4-byte Spill + imull %eax, %ebx + movl 108(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 52(%ecx), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 40(%ecx), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 36(%ecx), %ebp + movl 32(%ecx), %edi + movl 28(%ecx), %esi + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 8(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1008(%esp), %ecx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1008(%esp), %eax + movl 96(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1036(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1040(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1044(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 1052(%esp), %ebp + movl 132(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 944(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + movl %edi, %ecx + andl $1, %ecx + addl 944(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 976(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 984(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %esi # 4-byte Reload + adcl 1000(%esp), %esi + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 880(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 880(%esp), %ebp + movl 64(%esp), %ecx # 4-byte Reload + adcl 884(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 908(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl 920(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 816(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 816(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 820(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 104(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 752(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 752(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 756(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + movl 156(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 688(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 692(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 140(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 624(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 628(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 168(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 560(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 564(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 596(%esp), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 168(%esp), %edi # 4-byte Reload + adcl 600(%esp), %edi + movl 136(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + movl 120(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1096(%esp), %eax + movl %eax, %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 496(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 172(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 532(%esp), %edi + movl %edi, 168(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 536(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl $0, 140(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 432(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 432(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl 112(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 144(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + adcl 456(%esp), %ebp + movl 164(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 468(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %esi + movl 88(%esp), %edi # 4-byte Reload + imull %edi, %eax + movl %eax, (%esp) + leal 368(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 368(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl 132(%esp), %ecx # 4-byte Reload + adcl 376(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 380(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 172(%esp), %esi # 4-byte Reload + adcl 384(%esp), %esi + adcl 388(%esp), %ebp + movl %ebp, 160(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 392(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 396(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 136(%esp), %ecx # 4-byte Reload + adcl 400(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 148(%esp), %ecx # 4-byte Reload + adcl 404(%esp), %ecx + movl %ecx, 148(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 408(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 412(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 416(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 140(%esp), %ecx # 4-byte Reload + adcl 420(%esp), %ecx + movl %ecx, 140(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 424(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + adcl $0, 120(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %eax, %ebp + imull %edi, %eax + movl %eax, (%esp) + leal 304(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 304(%esp), %ebp + movl 132(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 144(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 316(%esp), %ebp + movl 160(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 332(%esp), %esi + movl 148(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 240(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 240(%esp), %edi + movl 144(%esp), %ecx # 4-byte Reload + adcl 244(%esp), %ecx + adcl 248(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 264(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 268(%esp), %edi + movl 156(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 280(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 176(%esp), %ecx + movl 1096(%esp), %edx + movl 100(%esp), %ebx # 4-byte Reload + calll .LmulPv448x32 + addl 176(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %ebx # 4-byte Reload + adcl 188(%esp), %ebx + movl %ebx, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 136(%esp), %edx # 4-byte Reload + adcl 196(%esp), %edx + movl %edx, 136(%esp) # 4-byte Spill + movl %edi, %eax + adcl 200(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl 212(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 232(%esp), %ecx + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl 172(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + movl 160(%esp), %ebp # 4-byte Reload + sbbl 8(%esp), %ebp # 4-byte Folded Reload + sbbl 12(%esp), %ebx # 4-byte Folded Reload + movl 168(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 80(%esp) # 4-byte Spill + movl 148(%esp), %edx # 4-byte Reload + sbbl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 84(%esp) # 4-byte Spill + movl 156(%esp), %edx # 4-byte Reload + sbbl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 88(%esp) # 4-byte Spill + movl 152(%esp), %edx # 4-byte Reload + sbbl 36(%esp), %edx # 4-byte Folded Reload + movl %edx, 92(%esp) # 4-byte Spill + movl 124(%esp), %edx # 4-byte Reload + sbbl 40(%esp), %edx # 4-byte Folded Reload + movl %edx, 96(%esp) # 4-byte Spill + movl 140(%esp), %edx # 4-byte Reload + sbbl 44(%esp), %edx # 4-byte Folded Reload + movl %edx, 100(%esp) # 4-byte Spill + movl 128(%esp), %edx # 4-byte Reload + sbbl 48(%esp), %edx # 4-byte Folded Reload + movl %edx, 112(%esp) # 4-byte Spill + movl 120(%esp), %edx # 4-byte Reload + sbbl 52(%esp), %edx # 4-byte Folded Reload + movl %edx, 116(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + sbbl 56(%esp), %edx # 4-byte Folded Reload + movl %edx, 132(%esp) # 4-byte Spill + movl %ecx, %edx + sbbl 60(%esp), %edx # 4-byte Folded Reload + movl %edx, 144(%esp) # 4-byte Spill + sbbl $0, %esi + andl $1, %esi + jne .LBB212_2 +# BB#1: + movl %eax, 168(%esp) # 4-byte Spill +.LBB212_2: + movl %esi, %edx + testb %dl, %dl + movl 172(%esp), %eax # 4-byte Reload + jne .LBB212_4 +# BB#3: + movl %edi, %eax +.LBB212_4: + movl 1088(%esp), %edi + movl %eax, (%edi) + movl %ecx, 104(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + movl 160(%esp), %ecx # 4-byte Reload + jne .LBB212_6 +# BB#5: + movl %ebp, %ecx +.LBB212_6: + movl %ecx, 4(%edi) + movl 108(%esp), %ecx # 4-byte Reload + movl 164(%esp), %ebp # 4-byte Reload + jne .LBB212_8 +# BB#7: + movl %ebx, %ebp +.LBB212_8: + movl %ebp, 8(%edi) + movl 168(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edi) + movl 124(%esp), %ebp # 4-byte Reload + movl 136(%esp), %ebx # 4-byte Reload + jne .LBB212_10 +# BB#9: + movl 80(%esp), %ebx # 4-byte Reload +.LBB212_10: + movl %ebx, 16(%edi) + movl 140(%esp), %ebx # 4-byte Reload + movl 148(%esp), %esi # 4-byte Reload + jne .LBB212_12 +# BB#11: + movl 84(%esp), %esi # 4-byte Reload +.LBB212_12: + movl %esi, 20(%edi) + movl 128(%esp), %esi # 4-byte Reload + jne .LBB212_14 +# BB#13: + movl 88(%esp), %eax # 4-byte Reload +.LBB212_14: + movl %eax, 24(%edi) + movl 120(%esp), %edx # 4-byte Reload + jne .LBB212_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 152(%esp) # 4-byte Spill +.LBB212_16: + movl 152(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB212_18 +# BB#17: + movl 96(%esp), %ebp # 4-byte Reload +.LBB212_18: + movl %ebp, 32(%edi) + jne .LBB212_20 +# BB#19: + movl 100(%esp), %ebx # 4-byte Reload +.LBB212_20: + movl %ebx, 36(%edi) + jne .LBB212_22 +# BB#21: + movl 112(%esp), %esi # 4-byte Reload +.LBB212_22: + movl %esi, 40(%edi) + jne .LBB212_24 +# BB#23: + movl 116(%esp), %edx # 4-byte Reload +.LBB212_24: + movl %edx, 44(%edi) + jne .LBB212_26 +# BB#25: + movl 132(%esp), %ecx # 4-byte Reload +.LBB212_26: + movl %ecx, 48(%edi) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB212_28 +# BB#27: + movl 144(%esp), %eax # 4-byte Reload +.LBB212_28: + movl %eax, 52(%edi) + addl $1068, %esp # imm = 0x42C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end212: + .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L + + .globl mcl_fp_addPre14L + .align 16, 0x90 + .type mcl_fp_addPre14L,@function +mcl_fp_addPre14L: # @mcl_fp_addPre14L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %edi + adcl 8(%ecx), %edi + movl 16(%esp), %ebx + movl %edx, (%ebx) + movl 12(%ecx), %edx + movl %esi, 4(%ebx) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %edi, 8(%ebx) + movl 20(%eax), %edi + movl %edx, 12(%ebx) + movl 20(%ecx), %edx + adcl %edi, %edx + movl 24(%eax), %edi + movl %esi, 16(%ebx) + movl 24(%ecx), %esi + adcl %edi, %esi + movl 28(%eax), %edi + movl %edx, 20(%ebx) + movl 28(%ecx), %edx + adcl %edi, %edx + movl 32(%eax), %edi + movl %esi, 24(%ebx) + movl 32(%ecx), %esi + adcl %edi, %esi + movl 36(%eax), %edi + movl %edx, 28(%ebx) + movl 36(%ecx), %edx + adcl %edi, %edx + movl 40(%eax), %edi + movl %esi, 32(%ebx) + movl 40(%ecx), %esi + adcl %edi, %esi + movl 44(%eax), %edi + movl %edx, 36(%ebx) + movl 44(%ecx), %edx + adcl %edi, %edx + movl 48(%eax), %edi + movl %esi, 40(%ebx) + movl 48(%ecx), %esi + adcl %edi, %esi + movl %edx, 44(%ebx) + movl %esi, 48(%ebx) + movl 52(%eax), %eax + movl 52(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 52(%ebx) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end213: + .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L + + .globl mcl_fp_subPre14L + .align 16, 0x90 + .type mcl_fp_subPre14L,@function +mcl_fp_subPre14L: # @mcl_fp_subPre14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebx + sbbl 8(%edx), %ebx + movl 20(%esp), %ebp + movl %esi, (%ebp) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebp) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ebp) + movl 20(%edx), %ebx + movl %esi, 12(%ebp) + movl 20(%ecx), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ebp) + movl 24(%ecx), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ebp) + movl 28(%ecx), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ebp) + movl 32(%ecx), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ebp) + movl 36(%ecx), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ebp) + movl 40(%ecx), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ebp) + movl 44(%ecx), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ebp) + movl 48(%ecx), %edi + sbbl %ebx, %edi + movl %esi, 44(%ebp) + movl %edi, 48(%ebp) + movl 52(%edx), %edx + movl 52(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 52(%ebp) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end214: + .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L + + .globl mcl_fp_shr1_14L + .align 16, 0x90 + .type mcl_fp_shr1_14L,@function +mcl_fp_shr1_14L: # @mcl_fp_shr1_14L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 48(%ecx) + shrl %eax + movl %eax, 52(%ecx) + popl %esi + retl +.Lfunc_end215: + .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L + + .globl mcl_fp_add14L + .align 16, 0x90 + .type mcl_fp_add14L,@function +mcl_fp_add14L: # @mcl_fp_add14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $44, %esp + movl 72(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %ecx + movl 68(%esp), %ebp + addl (%ebp), %edx + movl %edx, 4(%esp) # 4-byte Spill + adcl 4(%ebp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 8(%eax), %ecx + adcl 8(%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 12(%ebp), %edx + movl 16(%ebp), %ecx + adcl 12(%eax), %edx + movl %edx, 32(%esp) # 4-byte Spill + adcl 16(%eax), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%ebp), %ecx + adcl 20(%eax), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 24(%ebp), %ecx + adcl 24(%eax), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 28(%ebp), %ecx + adcl 28(%eax), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 32(%ebp), %ecx + adcl 32(%eax), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 36(%ebp), %ecx + adcl 36(%eax), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 40(%ebp), %edx + adcl 40(%eax), %edx + movl %edx, (%esp) # 4-byte Spill + movl 44(%ebp), %ebx + adcl 44(%eax), %ebx + movl 48(%ebp), %esi + adcl 48(%eax), %esi + movl 52(%ebp), %edi + adcl 52(%eax), %edi + movl 64(%esp), %eax + movl 4(%esp), %ebp # 4-byte Reload + movl %ebp, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %edx, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) + sbbl %ecx, %ecx + andl $1, %ecx + movl 76(%esp), %edx + subl (%edx), %ebp + movl %ebp, 4(%esp) # 4-byte Spill + movl 40(%esp), %ebp # 4-byte Reload + sbbl 4(%edx), %ebp + movl %ebp, 40(%esp) # 4-byte Spill + movl 36(%esp), %ebp # 4-byte Reload + sbbl 8(%edx), %ebp + movl %ebp, 36(%esp) # 4-byte Spill + movl 32(%esp), %ebp # 4-byte Reload + sbbl 12(%edx), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + movl 28(%esp), %ebp # 4-byte Reload + sbbl 16(%edx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 24(%esp), %ebp # 4-byte Reload + sbbl 20(%edx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 20(%esp), %ebp # 4-byte Reload + sbbl 24(%edx), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 16(%esp), %ebp # 4-byte Reload + sbbl 28(%edx), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + sbbl 32(%edx), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 8(%esp), %ebp # 4-byte Reload + sbbl 36(%edx), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl (%esp), %ebp # 4-byte Reload + sbbl 40(%edx), %ebp + sbbl 44(%edx), %ebx + sbbl 48(%edx), %esi + sbbl 52(%edx), %edi + sbbl $0, %ecx + testb $1, %cl + jne .LBB216_2 +# BB#1: # %nocarry + movl 4(%esp), %ecx # 4-byte Reload + movl %ecx, (%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 16(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 20(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl %ebp, 40(%eax) + movl %ebx, 44(%eax) + movl %esi, 48(%eax) + movl %edi, 52(%eax) +.LBB216_2: # %carry + addl $44, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end216: + .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L + + .globl mcl_fp_addNF14L + .align 16, 0x90 + .type mcl_fp_addNF14L,@function +mcl_fp_addNF14L: # @mcl_fp_addNF14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl 140(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 136(%esp), %ecx + addl (%ecx), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 4(%ecx), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 52(%eax), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%eax), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 44(%eax), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 40(%eax), %ebp + movl 36(%eax), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 32(%eax), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 28(%eax), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 24(%eax), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %esi + movl 8(%eax), %edx + adcl 8(%ecx), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl 12(%ecx), %esi + movl %esi, 60(%esp) # 4-byte Spill + adcl 16(%ecx), %edi + movl %edi, 64(%esp) # 4-byte Spill + adcl 20(%ecx), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 28(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 36(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 40(%ecx), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 44(%ecx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%ecx), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%ecx), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 144(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + subl (%ecx), %eax + movl %eax, (%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 4(%ecx), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + sbbl 12(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + sbbl 16(%ecx), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%ecx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 24(%ecx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 28(%ecx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%ecx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + movl %eax, %esi + movl %eax, %ebp + sbbl 44(%ecx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %esi + sbbl 48(%ecx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 52(%ecx), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl %edi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 72(%esp), %ecx # 4-byte Reload + js .LBB217_2 +# BB#1: + movl (%esp), %ecx # 4-byte Reload +.LBB217_2: + movl 132(%esp), %edi + movl %ecx, (%edi) + movl 76(%esp), %eax # 4-byte Reload + js .LBB217_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB217_4: + movl %eax, 4(%edi) + movl %edx, %ecx + movl 64(%esp), %eax # 4-byte Reload + movl 56(%esp), %edx # 4-byte Reload + js .LBB217_6 +# BB#5: + movl 8(%esp), %edx # 4-byte Reload +.LBB217_6: + movl %edx, 8(%edi) + movl %ebp, %edx + movl 104(%esp), %ebx # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + js .LBB217_8 +# BB#7: + movl 12(%esp), %ebp # 4-byte Reload +.LBB217_8: + movl %ebp, 12(%edi) + movl 100(%esp), %ebp # 4-byte Reload + js .LBB217_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB217_10: + movl %eax, 16(%edi) + movl 80(%esp), %esi # 4-byte Reload + js .LBB217_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 68(%esp) # 4-byte Spill +.LBB217_12: + movl 68(%esp), %eax # 4-byte Reload + movl %eax, 20(%edi) + js .LBB217_14 +# BB#13: + movl 24(%esp), %ecx # 4-byte Reload +.LBB217_14: + movl %ecx, 24(%edi) + js .LBB217_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill +.LBB217_16: + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%edi) + js .LBB217_18 +# BB#17: + movl 32(%esp), %ebp # 4-byte Reload +.LBB217_18: + movl %ebp, 32(%edi) + js .LBB217_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB217_20: + movl %ebx, 36(%edi) + js .LBB217_22 +# BB#21: + movl 40(%esp), %esi # 4-byte Reload +.LBB217_22: + movl %esi, 40(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB217_24 +# BB#23: + movl 44(%esp), %edx # 4-byte Reload +.LBB217_24: + movl %edx, 44(%edi) + movl 92(%esp), %ecx # 4-byte Reload + js .LBB217_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB217_26: + movl %eax, 48(%edi) + js .LBB217_28 +# BB#27: + movl 52(%esp), %ecx # 4-byte Reload +.LBB217_28: + movl %ecx, 52(%edi) + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end217: + .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L + + .globl mcl_fp_sub14L + .align 16, 0x90 + .type mcl_fp_sub14L,@function +mcl_fp_sub14L: # @mcl_fp_sub14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $52, %esp + movl 76(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 80(%esp), %edi + subl (%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 36(%esi), %edx + sbbl 36(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%esi), %ecx + sbbl 40(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esi), %ebp + sbbl 48(%edi), %ebp + movl 52(%esi), %esi + sbbl 52(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 72(%esp), %ebx + movl 44(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl %edx, 36(%ebx) + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl %ebp, 48(%ebx) + movl %esi, 52(%ebx) + je .LBB218_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 84(%esp), %esi + movl 44(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 36(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl %eax, 44(%ebx) + movl 48(%esi), %eax + adcl %ebp, %eax + movl %eax, 48(%ebx) + movl 52(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ebx) +.LBB218_2: # %nocarry + addl $52, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end218: + .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L + + .globl mcl_fp_subNF14L + .align 16, 0x90 + .type mcl_fp_subNF14L,@function +mcl_fp_subNF14L: # @mcl_fp_subNF14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $88, %esp + movl 112(%esp), %ecx + movl 52(%ecx), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl (%ecx), %edx + movl 4(%ecx), %eax + movl 116(%esp), %edi + subl (%edi), %edx + movl %edx, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %ebp + movl 28(%ecx), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 28(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + sbbl 32(%edi), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %eax, %esi + sarl $31, %esi + movl %esi, %ecx + addl %ecx, %ecx + movl %esi, %ebp + adcl %ebp, %ebp + shrl $31, %eax + orl %ecx, %eax + movl 120(%esp), %edi + andl 4(%edi), %ebp + andl (%edi), %eax + movl 52(%edi), %ecx + andl %esi, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%edi), %ecx + andl %esi, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%edi), %ecx + andl %esi, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%edi), %ecx + andl %esi, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%edi), %ecx + andl %esi, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%edi), %ecx + andl %esi, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%edi), %ecx + andl %esi, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%edi), %ecx + andl %esi, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%edi), %ebx + andl %esi, %ebx + movl 16(%edi), %edx + andl %esi, %edx + movl 12(%edi), %ecx + andl %esi, %ecx + andl 8(%edi), %esi + addl 56(%esp), %eax # 4-byte Folded Reload + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl 108(%esp), %edi + movl %eax, (%edi) + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %ebp, 4(%edi) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %esi, 8(%edi) + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 12(%edi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edx, 16(%edi) + movl (%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ebx, 20(%edi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%edi) + movl 8(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%edi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%edi) + movl 16(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%edi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%edi) + movl 24(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%edi) + movl %eax, 48(%edi) + movl 28(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%edi) + addl $88, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end219: + .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L + + .globl mcl_fpDbl_add14L + .align 16, 0x90 + .type mcl_fpDbl_add14L,@function +mcl_fpDbl_add14L: # @mcl_fpDbl_add14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 128(%esp), %ecx + movl 124(%esp), %esi + movl 12(%esi), %edi + movl 16(%esi), %edx + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%esi), %ebp + movl 120(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%esi), %ebp + adcl 8(%esi), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %edx + movl %ebp, 4(%eax) + movl 64(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%esi), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %edx, 16(%eax) + movl 24(%esi), %edx + adcl %ebx, %edx + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%esi), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %edx, 24(%eax) + movl 32(%esi), %edx + adcl %ebx, %edx + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%esi), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %edx, 32(%eax) + movl 40(%esi), %edx + adcl %ebx, %edx + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%esi), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %edx, 40(%eax) + movl 48(%esi), %edx + adcl %ebx, %edx + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%esi), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %edx, 48(%eax) + movl 56(%esi), %edx + adcl %ebx, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%esi), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 64(%esi), %eax + adcl %ebp, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl 68(%esi), %eax + adcl %edx, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %edx + movl 72(%esi), %eax + adcl %edx, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %edx + movl 76(%esi), %eax + adcl %edx, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %edx + movl 80(%esi), %eax + adcl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %edx + movl 84(%esi), %eax + adcl %edx, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 88(%ecx), %edx + movl 88(%esi), %eax + adcl %edx, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 92(%ecx), %edx + movl 92(%esi), %eax + adcl %edx, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 96(%ecx), %edx + movl 96(%esi), %eax + adcl %edx, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%ecx), %edx + movl 100(%esi), %edi + adcl %edx, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 104(%ecx), %edx + movl 104(%esi), %ebx + adcl %edx, %ebx + movl %ebx, 56(%esp) # 4-byte Spill + movl 108(%ecx), %ecx + movl 108(%esi), %esi + adcl %ecx, %esi + sbbl %edx, %edx + andl $1, %edx + movl 132(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + sbbl 40(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 44(%ebp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl %ebx, %eax + movl %esi, %ebx + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + sbbl 52(%ebp), %esi + sbbl $0, %edx + andl $1, %edx + jne .LBB220_2 +# BB#1: + movl %esi, %ebx +.LBB220_2: + testb %dl, %dl + movl 72(%esp), %eax # 4-byte Reload + movl 68(%esp), %edx # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + movl 60(%esp), %ebp # 4-byte Reload + jne .LBB220_4 +# BB#3: + movl %ecx, %edx + movl (%esp), %edi # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload +.LBB220_4: + movl 120(%esp), %esi + movl %eax, 56(%esi) + movl 76(%esp), %eax # 4-byte Reload + movl %eax, 60(%esi) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%esi) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%esi) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%esi) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%esi) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%esi) + movl %ebp, 84(%esi) + movl %edi, 88(%esi) + movl %edx, 92(%esi) + movl 52(%esp), %edx # 4-byte Reload + movl 48(%esp), %eax # 4-byte Reload + jne .LBB220_6 +# BB#5: + movl 36(%esp), %eax # 4-byte Reload +.LBB220_6: + movl %eax, 96(%esi) + movl 56(%esp), %ecx # 4-byte Reload + jne .LBB220_8 +# BB#7: + movl 40(%esp), %edx # 4-byte Reload +.LBB220_8: + movl %edx, 100(%esi) + jne .LBB220_10 +# BB#9: + movl 44(%esp), %ecx # 4-byte Reload +.LBB220_10: + movl %ecx, 104(%esi) + movl %ebx, 108(%esi) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end220: + .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L + + .globl mcl_fpDbl_sub14L + .align 16, 0x90 + .type mcl_fpDbl_sub14L,@function +mcl_fpDbl_sub14L: # @mcl_fpDbl_sub14L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ebx + movl (%ebx), %eax + movl 4(%ebx), %edx + movl 124(%esp), %ebp + subl (%ebp), %eax + sbbl 4(%ebp), %edx + movl 8(%ebx), %esi + sbbl 8(%ebp), %esi + movl 116(%esp), %ecx + movl %eax, (%ecx) + movl 12(%ebx), %eax + sbbl 12(%ebp), %eax + movl %edx, 4(%ecx) + movl 16(%ebx), %edx + sbbl 16(%ebp), %edx + movl %esi, 8(%ecx) + movl 20(%ebp), %esi + movl %eax, 12(%ecx) + movl 20(%ebx), %eax + sbbl %esi, %eax + movl 24(%ebp), %esi + movl %edx, 16(%ecx) + movl 24(%ebx), %edx + sbbl %esi, %edx + movl 28(%ebp), %esi + movl %eax, 20(%ecx) + movl 28(%ebx), %eax + sbbl %esi, %eax + movl 32(%ebp), %esi + movl %edx, 24(%ecx) + movl 32(%ebx), %edx + sbbl %esi, %edx + movl 36(%ebp), %esi + movl %eax, 28(%ecx) + movl 36(%ebx), %eax + sbbl %esi, %eax + movl 40(%ebp), %esi + movl %edx, 32(%ecx) + movl 40(%ebx), %edx + sbbl %esi, %edx + movl 44(%ebp), %esi + movl %eax, 36(%ecx) + movl 44(%ebx), %eax + sbbl %esi, %eax + movl 48(%ebp), %esi + movl %edx, 40(%ecx) + movl 48(%ebx), %edx + sbbl %esi, %edx + movl 52(%ebp), %esi + movl %eax, 44(%ecx) + movl 52(%ebx), %eax + sbbl %esi, %eax + movl 56(%ebp), %esi + movl %edx, 48(%ecx) + movl 56(%ebx), %edx + sbbl %esi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 60(%ebp), %edx + movl %eax, 52(%ecx) + movl 60(%ebx), %eax + sbbl %edx, %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 64(%ebp), %eax + movl 64(%ebx), %edx + sbbl %eax, %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 68(%ebp), %eax + movl 68(%ebx), %edx + sbbl %eax, %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 72(%ebp), %eax + movl 72(%ebx), %edx + sbbl %eax, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 76(%ebp), %eax + movl 76(%ebx), %edx + sbbl %eax, %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 80(%ebp), %eax + movl 80(%ebx), %edx + sbbl %eax, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 84(%ebp), %eax + movl 84(%ebx), %edx + sbbl %eax, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 88(%ebp), %eax + movl 88(%ebx), %edx + sbbl %eax, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 92(%ebp), %eax + movl 92(%ebx), %edx + sbbl %eax, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%ebp), %eax + movl 96(%ebx), %edx + sbbl %eax, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 100(%ebp), %eax + movl 100(%ebx), %edx + sbbl %eax, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 104(%ebp), %eax + movl 104(%ebx), %edx + sbbl %eax, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 108(%ebp), %eax + movl 108(%ebx), %edx + sbbl %eax, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 128(%esp), %ebp + jne .LBB221_1 +# BB#2: + movl $0, 56(%esp) # 4-byte Folded Spill + jmp .LBB221_3 +.LBB221_1: + movl 52(%ebp), %edx + movl %edx, 56(%esp) # 4-byte Spill +.LBB221_3: + testb %al, %al + jne .LBB221_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB221_6 +.LBB221_4: + movl (%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 4(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB221_6: + jne .LBB221_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB221_9 +.LBB221_7: + movl 48(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB221_9: + jne .LBB221_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB221_12 +.LBB221_10: + movl 44(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB221_12: + jne .LBB221_13 +# BB#14: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB221_15 +.LBB221_13: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB221_15: + jne .LBB221_16 +# BB#17: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB221_18 +.LBB221_16: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB221_18: + jne .LBB221_19 +# BB#20: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB221_21 +.LBB221_19: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB221_21: + jne .LBB221_22 +# BB#23: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB221_24 +.LBB221_22: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB221_24: + jne .LBB221_25 +# BB#26: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB221_27 +.LBB221_25: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB221_27: + jne .LBB221_28 +# BB#29: + movl $0, %esi + jmp .LBB221_30 +.LBB221_28: + movl 20(%ebp), %esi +.LBB221_30: + jne .LBB221_31 +# BB#32: + movl $0, %edi + jmp .LBB221_33 +.LBB221_31: + movl 16(%ebp), %edi +.LBB221_33: + jne .LBB221_34 +# BB#35: + movl $0, %ebx + jmp .LBB221_36 +.LBB221_34: + movl 12(%ebp), %ebx +.LBB221_36: + jne .LBB221_37 +# BB#38: + xorl %ebp, %ebp + jmp .LBB221_39 +.LBB221_37: + movl 8(%ebp), %ebp +.LBB221_39: + movl 20(%esp), %edx # 4-byte Reload + addl 44(%esp), %edx # 4-byte Folded Reload + movl 24(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %edx, 56(%ecx) + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 60(%ecx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 64(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 68(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edi, 72(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 76(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %eax, 80(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 84(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 76(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 84(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl %eax, 104(%ecx) + movl 56(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%ecx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end221: + .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L + + .align 16, 0x90 + .type .LmulPv480x32,@function +.LmulPv480x32: # @mulPv480x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl %edx, %ebp + movl 124(%esp), %esi + movl %esi, %eax + mull 56(%ebp) + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl %esi, %eax + mull 52(%ebp) + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl %esi, %eax + mull 48(%ebp) + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %esi, %eax + mull 44(%ebp) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + mull 40(%ebp) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %eax + mull 36(%ebp) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %esi, %eax + mull 32(%ebp) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %esi, %eax + mull 28(%ebp) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %esi, %eax + mull 24(%ebp) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %esi, %eax + mull 20(%ebp) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %esi, %eax + mull 16(%ebp) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %esi, %eax + mull 12(%ebp) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %esi, %eax + mull 8(%ebp) + movl %edx, %edi + movl %eax, 4(%esp) # 4-byte Spill + movl %esi, %eax + mull 4(%ebp) + movl %edx, %ebx + movl %eax, (%esp) # 4-byte Spill + movl %esi, %eax + mull (%ebp) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 8(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl 100(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 60(%ecx) + movl %ecx, %eax + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end222: + .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 + + .globl mcl_fp_mulUnitPre15L + .align 16, 0x90 + .type mcl_fp_mulUnitPre15L,@function +mcl_fp_mulUnitPre15L: # @mcl_fp_mulUnitPre15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L223$pb +.L223$pb: + popl %ebx +.Ltmp44: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end223: + .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L + + .globl mcl_fpDbl_mulPre15L + .align 16, 0x90 + .type mcl_fpDbl_mulPre15L,@function +mcl_fpDbl_mulPre15L: # @mcl_fpDbl_mulPre15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L224$pb +.L224$pb: + popl %esi +.Ltmp45: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1108(%esp), %edi + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl 1112(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %edi, %edx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1108(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1112(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 1108(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end224: + .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L + + .globl mcl_fpDbl_sqrPre15L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre15L,@function +mcl_fpDbl_sqrPre15L: # @mcl_fpDbl_sqrPre15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1084, %esp # imm = 0x43C + calll .L225$pb +.L225$pb: + popl %ebx +.Ltmp46: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx + movl %ebx, 116(%esp) # 4-byte Spill + movl 1108(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv480x32 + movl 1076(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1068(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1064(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1016(%esp), %eax + movl 1020(%esp), %ebp + movl 1104(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl %esi, %ebx + calll .LmulPv480x32 + addl 952(%esp), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1008(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1004(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 992(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 972(%esp), %edi + movl 968(%esp), %esi + movl 964(%esp), %edx + movl 956(%esp), %eax + movl 960(%esp), %ecx + movl 1104(%esp), %ebp + movl 16(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 72(%esp), %eax # 4-byte Reload + addl 888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 948(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 944(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 940(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 936(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 932(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 928(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 924(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 916(%esp), %ebx + movl 912(%esp), %edi + movl 908(%esp), %esi + movl 904(%esp), %edx + movl 900(%esp), %ecx + movl 892(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 72(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 112(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 68(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 824(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 876(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 872(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 868(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 864(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 856(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 852(%esp), %ebx + movl 848(%esp), %edi + movl 844(%esp), %esi + movl 840(%esp), %edx + movl 836(%esp), %ecx + movl 828(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 60(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 800(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 796(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 792(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 788(%esp), %ebx + movl 784(%esp), %edi + movl 780(%esp), %esi + movl 776(%esp), %edx + movl 772(%esp), %ecx + movl 764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 60(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 732(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 724(%esp), %ebx + movl 720(%esp), %edi + movl 716(%esp), %esi + movl 712(%esp), %edx + movl 708(%esp), %ecx + movl 700(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 704(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %edx + movl 644(%esp), %ecx + movl 636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 640(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 64(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 68(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 568(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 596(%esp), %ebx + movl 592(%esp), %edi + movl 588(%esp), %esi + movl 584(%esp), %edx + movl 580(%esp), %ecx + movl 572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 576(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 68(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 564(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 532(%esp), %ebx + movl 528(%esp), %edi + movl 524(%esp), %esi + movl 520(%esp), %edx + movl 516(%esp), %ecx + movl 508(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 512(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 440(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 500(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 496(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 488(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 468(%esp), %ebx + movl 464(%esp), %edi + movl 460(%esp), %esi + movl 456(%esp), %edx + movl 452(%esp), %ecx + movl 444(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 56(%esp), %ebp # 4-byte Reload + adcl 36(%esp), %ebp # 4-byte Folded Reload + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 24(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 12(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 436(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 428(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 424(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 420(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 416(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 404(%esp), %ebx + movl 400(%esp), %edi + movl 396(%esp), %esi + movl 392(%esp), %edx + movl 388(%esp), %ecx + movl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 56(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 112(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 12(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 372(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 368(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 364(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 352(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 348(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 344(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 340(%esp), %ebx + movl 336(%esp), %edi + movl 332(%esp), %esi + movl 328(%esp), %edx + movl 324(%esp), %ecx + movl 316(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 320(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl 112(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 36(%esp), %eax # 4-byte Reload + adcl %eax, 108(%esp) # 4-byte Folded Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 28(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 32(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 16(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 56(%esp) # 4-byte Folded Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, 48(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 108(%esp), %eax # 4-byte Reload + addl 248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 292(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 288(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 276(%esp), %ebx + movl 272(%esp), %edi + movl 268(%esp), %edx + movl 264(%esp), %ecx + movl 260(%esp), %eax + movl 252(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 256(%esp), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + movl 1104(%esp), %ebp + movl %esi, 48(%ebp) + movl 112(%esp), %esi # 4-byte Reload + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 20(%esp), %esi # 4-byte Reload + adcl %esi, 36(%esp) # 4-byte Folded Spill + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 12(%esp) # 4-byte Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 20(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 24(%esp) # 4-byte Spill + adcl 16(%esp), %edi # 4-byte Folded Reload + movl %edi, 28(%esp) # 4-byte Spill + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 32(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 100(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl %eax, 52(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 44(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 184(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 228(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 220(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 212(%esp), %ebx + movl 208(%esp), %edx + movl 204(%esp), %ecx + movl 200(%esp), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl 192(%esp), %esi + movl 112(%esp), %ebp # 4-byte Reload + movl 1104(%esp), %edi + movl %ebp, 52(%edi) + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 12(%esp), %ebp # 4-byte Folded Reload + movl 72(%esp), %esi # 4-byte Reload + adcl 20(%esp), %esi # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl %edi, 40(%esp) # 4-byte Folded Spill + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 28(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 36(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + adcl %ebx, 64(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1108(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 112(%esp), %eax # 4-byte Reload + addl 120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl %ebp, 32(%esp) # 4-byte Spill + adcl 128(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 156(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 152(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 148(%esp), %ebp + movl 144(%esp), %edi + movl 140(%esp), %esi + movl 136(%esp), %edx + movl 132(%esp), %ecx + movl 1104(%esp), %eax + movl 112(%esp), %ebx # 4-byte Reload + movl %ebx, 56(%eax) + movl 32(%esp), %ebx # 4-byte Reload + movl %ebx, 60(%eax) + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl 72(%esp), %ebx # 4-byte Reload + movl %ebx, 64(%eax) + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %ecx, 68(%eax) + adcl 36(%esp), %esi # 4-byte Folded Reload + movl %edx, 72(%eax) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %esi, 76(%eax) + adcl 76(%esp), %ebp # 4-byte Folded Reload + movl %edi, 80(%eax) + movl 44(%esp), %edx # 4-byte Reload + adcl 64(%esp), %edx # 4-byte Folded Reload + movl %ebp, 84(%eax) + movl 52(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %edx, 88(%eax) + movl 68(%esp), %edx # 4-byte Reload + adcl 92(%esp), %edx # 4-byte Folded Reload + movl %ecx, 92(%eax) + movl 80(%esp), %ecx # 4-byte Reload + adcl 104(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 88(%esp), %edx # 4-byte Reload + adcl 108(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 96(%esp), %ecx # 4-byte Reload + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl %ecx, 108(%eax) + movl 100(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 112(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 116(%eax) + addl $1084, %esp # imm = 0x43C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end225: + .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L + + .globl mcl_fp_mont15L + .align 16, 0x90 + .type mcl_fp_mont15L,@function +mcl_fp_mont15L: # @mcl_fp_mont15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2044, %esp # imm = 0x7FC + calll .L226$pb +.L226$pb: + popl %ebx +.Ltmp47: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx + movl 2076(%esp), %eax + movl -4(%eax), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1976(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 1976(%esp), %ebp + movl 1980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2036(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2032(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2028(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2024(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2020(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2000(%esp), %edi + movl 1996(%esp), %esi + movl 1992(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + addl 1912(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1932(%esp), %esi + adcl 1936(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1968(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1972(%esp), %ebp + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1848(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1864(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 1868(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1892(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1896(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1900(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1904(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1908(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %edx, %eax + movl %edx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2076(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1784(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1812(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1836(%esp), %esi + movl 108(%esp), %ebp # 4-byte Reload + adcl 1840(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1744(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1756(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1768(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + adcl 1772(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1780(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1656(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + movl 96(%esp), %eax # 4-byte Reload + addl 1656(%esp), %eax + movl 84(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1688(%esp), %ebp + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1712(%esp), %edi + adcl 1716(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1592(%esp), %ecx + movl 2068(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 1592(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1620(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1628(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %ebp + movl %ebp, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1528(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1528(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1544(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1564(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1568(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 1572(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1464(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 1464(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1476(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1492(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1496(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1500(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1504(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 1512(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1400(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 92(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1400(%esp), %edi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1408(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1412(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1416(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 1420(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1424(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1428(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1432(%esp), %edi + movl 112(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1448(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %esi # 4-byte Reload + adcl 1452(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1364(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + adcl 1384(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1392(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1272(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1276(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1280(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1288(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1320(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1328(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2072(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1232(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1244(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1268(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + movl 64(%esp), %eax # 4-byte Reload + addl 1144(%esp), %eax + movl 56(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1156(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1180(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1196(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1080(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 56(%esp), %ecx # 4-byte Reload + addl 1080(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1128(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 1016(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1028(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 1032(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1044(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 952(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 964(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 976(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl %ebp, %eax + andl $1, %eax + addl 888(%esp), %esi + movl 68(%esp), %ecx # 4-byte Reload + adcl 892(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 896(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 900(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 904(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 908(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 912(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 916(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 920(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 924(%esp), %ebp + movl 96(%esp), %ecx # 4-byte Reload + adcl 928(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 936(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 944(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 824(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 832(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 856(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 864(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 872(%esp), %edi + adcl 876(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 68(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 760(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 776(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 800(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 804(%esp), %ebp + adcl 808(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 816(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 696(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 708(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 736(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 748(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 752(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 632(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 656(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 672(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 688(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 568(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 596(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 604(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %edi + movl %edi, %ecx + addl 504(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 516(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 520(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 532(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 560(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 440(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 448(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl 452(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 460(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 492(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 376(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 388(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 404(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 416(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 312(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 320(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 336(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 348(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %edi + imull 52(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + andl $1, %esi + movl %esi, %ecx + addl 248(%esp), %edi + movl 104(%esp), %esi # 4-byte Reload + adcl 252(%esp), %esi + movl 108(%esp), %edi # 4-byte Reload + adcl 256(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 288(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 2072(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2068(%esp), %edx + calll .LmulPv480x32 + movl %esi, %ecx + movl 96(%esp), %esi # 4-byte Reload + addl 184(%esp), %ecx + adcl 188(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + adcl 200(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2076(%esp), %edx + calll .LmulPv480x32 + movl 104(%esp), %ebx # 4-byte Reload + andl $1, %ebx + addl 120(%esp), %edi + movl %ebp, %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 132(%esp), %edi + adcl 136(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + adcl $0, %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2076(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %ecx + movl %edi, %eax + sbbl 8(%ebp), %eax + movl %esi, %ebx + sbbl 12(%ebp), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 16(%ebp), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 20(%ebp), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 24(%ebp), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 28(%ebp), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 32(%ebp), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 36(%ebp), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 40(%ebp), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%ebp), %ebx + movl %ebx, 48(%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 48(%ebp), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 52(%ebp), %ebx + movl %ebx, 88(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 56(%ebp), %ebx + movl %ebx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + movl 108(%esp), %ebp # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB226_2 +# BB#1: + movl %edx, %ebp +.LBB226_2: + movl 2064(%esp), %edx + movl %ebp, (%edx) + testb %bl, %bl + movl 116(%esp), %ebp # 4-byte Reload + jne .LBB226_4 +# BB#3: + movl %ecx, %ebp +.LBB226_4: + movl %ebp, 4(%edx) + jne .LBB226_6 +# BB#5: + movl %eax, %edi +.LBB226_6: + movl %edi, 8(%edx) + jne .LBB226_8 +# BB#7: + movl 16(%esp), %esi # 4-byte Reload +.LBB226_8: + movl %esi, 12(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB226_10 +# BB#9: + movl 20(%esp), %eax # 4-byte Reload +.LBB226_10: + movl %eax, 16(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB226_12 +# BB#11: + movl 24(%esp), %eax # 4-byte Reload +.LBB226_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + jne .LBB226_14 +# BB#13: + movl 28(%esp), %eax # 4-byte Reload +.LBB226_14: + movl %eax, 24(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB226_16 +# BB#15: + movl 32(%esp), %eax # 4-byte Reload +.LBB226_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + jne .LBB226_18 +# BB#17: + movl 36(%esp), %eax # 4-byte Reload +.LBB226_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB226_20 +# BB#19: + movl 40(%esp), %eax # 4-byte Reload +.LBB226_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB226_22 +# BB#21: + movl 44(%esp), %eax # 4-byte Reload +.LBB226_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB226_24 +# BB#23: + movl 48(%esp), %eax # 4-byte Reload +.LBB226_24: + movl %eax, 44(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB226_26 +# BB#25: + movl 52(%esp), %eax # 4-byte Reload +.LBB226_26: + movl %eax, 48(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB226_28 +# BB#27: + movl 88(%esp), %eax # 4-byte Reload +.LBB226_28: + movl %eax, 52(%edx) + movl 112(%esp), %eax # 4-byte Reload + jne .LBB226_30 +# BB#29: + movl 96(%esp), %eax # 4-byte Reload +.LBB226_30: + movl %eax, 56(%edx) + addl $2044, %esp # imm = 0x7FC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end226: + .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L + + .globl mcl_fp_montNF15L + .align 16, 0x90 + .type mcl_fp_montNF15L,@function +mcl_fp_montNF15L: # @mcl_fp_montNF15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2028, %esp # imm = 0x7EC + calll .L227$pb +.L227$pb: + popl %ebx +.Ltmp48: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx + movl 2060(%esp), %eax + movl -4(%eax), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1960(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1960(%esp), %ebp + movl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2020(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2016(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2012(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2008(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 2004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2000(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 1996(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 1992(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 1988(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 1984(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 1980(%esp), %esi + movl 1976(%esp), %edi + movl 1972(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 1968(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 1896(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1896(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1912(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1916(%esp), %esi + movl %esi, %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1944(%esp), %ebp + movl 76(%esp), %esi # 4-byte Reload + adcl 1948(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1832(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1892(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 1832(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1840(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1844(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1848(%esp), %edi + movl 48(%esp), %ecx # 4-byte Reload + adcl 1852(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1856(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1876(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1880(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1768(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1784(%esp), %edi + movl %edi, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, %esi + adcl 1820(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1824(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1764(%esp), %eax + movl 68(%esp), %edx # 4-byte Reload + addl 1704(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1712(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1720(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + adcl 1748(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1752(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1756(%esp), %ebp + movl 92(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1640(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1680(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, %esi + movl 96(%esp), %edi # 4-byte Reload + adcl 1688(%esp), %edi + adcl 1692(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1700(%esp), %ebp + movl 2056(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1636(%esp), %eax + movl 88(%esp), %edx # 4-byte Reload + addl 1576(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 40(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1588(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1616(%esp), %esi + adcl 1620(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1632(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1512(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1512(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1516(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1532(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1548(%esp), %ebp + adcl 1552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1448(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1508(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + addl 1448(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 1460(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 1464(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1468(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1476(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1480(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + adcl 1484(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1496(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1504(%esp), %ebp + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1384(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1384(%esp), %esi + movl 40(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1408(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1440(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1320(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1380(%esp), %edx + movl 40(%esp), %ecx # 4-byte Reload + addl 1320(%esp), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 1324(%esp), %ebp + movl 44(%esp), %edi # 4-byte Reload + adcl 1328(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1368(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1256(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + movl 40(%esp), %eax # 4-byte Reload + addl 1256(%esp), %eax + adcl 1260(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 1272(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 1296(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1304(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1312(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2052(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + movl 1252(%esp), %eax + movl 48(%esp), %edx # 4-byte Reload + addl 1192(%esp), %edx + movl 44(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1204(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 1212(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1216(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1224(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1228(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1232(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1236(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1240(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1244(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1248(%esp), %esi + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 1128(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 1140(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1148(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1168(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1188(%esp), %esi + movl 2056(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 1124(%esp), %eax + movl 44(%esp), %edx # 4-byte Reload + addl 1064(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 1072(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 1084(%esp), %edi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1096(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1100(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1104(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1112(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1116(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1120(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %ebp + movl %ebp, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 1000(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1020(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1028(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1060(%esp), %esi + movl 2056(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 936(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 996(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 936(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 944(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 948(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 952(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 964(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 968(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 972(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 976(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 980(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 984(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 988(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + adcl 992(%esp), %esi + movl %esi, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %esi + movl %edx, %edi + movl %edi, %eax + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 872(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 872(%esp), %edi + movl 56(%esp), %ebp # 4-byte Reload + adcl 876(%esp), %ebp + movl 60(%esp), %edi # 4-byte Reload + adcl 880(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 932(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 808(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 868(%esp), %eax + movl %ebp, %ecx + addl 808(%esp), %ecx + adcl 812(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 816(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 820(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 824(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 828(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 832(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 836(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 840(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 844(%esp), %esi + movl 72(%esp), %edx # 4-byte Reload + adcl 848(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 852(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 856(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 860(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 864(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 744(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 744(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 768(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 776(%esp), %edi + adcl 780(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 792(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 680(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 740(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 680(%esp), %ecx + movl 84(%esp), %edx # 4-byte Reload + adcl 684(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 688(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 692(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 696(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + adcl 700(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 704(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 708(%esp), %edi + movl 88(%esp), %edx # 4-byte Reload + adcl 712(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 716(%esp), %ebp + movl 64(%esp), %edx # 4-byte Reload + adcl 720(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 724(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 728(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 732(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 44(%esp), %edx # 4-byte Reload + adcl 736(%esp), %edx + movl %edx, 44(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 616(%esp), %esi + movl 84(%esp), %esi # 4-byte Reload + adcl 620(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 644(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 648(%esp), %edi + adcl 652(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 656(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 612(%esp), %edx + movl %esi, %ecx + addl 552(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 572(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 580(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 588(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 488(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl %esi, %ebp + adcl 508(%esp), %ebp + movl 68(%esp), %edi # 4-byte Reload + adcl 512(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 528(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 484(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 424(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + adcl 444(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl %esi, %edi + adcl 460(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 360(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 360(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 368(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 376(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 396(%esp), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 400(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 296(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 356(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + addl 296(%esp), %ecx + adcl 300(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 308(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 332(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 336(%esp), %edi + movl 44(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 36(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 232(%esp), %ecx + movl 2060(%esp), %edx + calll .LmulPv480x32 + addl 232(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 240(%esp), %ebp + movl 92(%esp), %esi # 4-byte Reload + adcl 244(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 44(%esp), %edi # 4-byte Reload + adcl 276(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2056(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 168(%esp), %ecx + movl 2052(%esp), %edx + calll .LmulPv480x32 + movl 228(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 168(%esp), %ecx + adcl 172(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 176(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 180(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 184(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 188(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 192(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 208(%esp), %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 212(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %edi + movl %eax, (%esp) + leal 104(%esp), %ecx + movl 2060(%esp), %eax + movl %eax, %edx + calll .LmulPv480x32 + addl 104(%esp), %edi + movl 68(%esp), %edi # 4-byte Reload + movl 100(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %ecx, %ebx + adcl 116(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 120(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 124(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 44(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + adcl 148(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl %eax, %edx + movl 2060(%esp), %ecx + subl (%ecx), %edx + movl %ebx, %ebp + sbbl 4(%ecx), %ebp + movl %edi, %ebx + sbbl 8(%ecx), %ebx + movl 88(%esp), %eax # 4-byte Reload + sbbl 12(%ecx), %eax + sbbl 16(%ecx), %esi + movl %esi, 4(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 20(%ecx), %esi + movl %esi, 8(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + sbbl 24(%ecx), %esi + movl %esi, 12(%esp) # 4-byte Spill + movl 52(%esp), %esi # 4-byte Reload + sbbl 28(%ecx), %esi + movl %esi, 16(%esp) # 4-byte Spill + movl 56(%esp), %esi # 4-byte Reload + sbbl 32(%ecx), %esi + movl %esi, 20(%esp) # 4-byte Spill + movl 44(%esp), %esi # 4-byte Reload + sbbl 36(%ecx), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 40(%ecx), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 44(%ecx), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + sbbl 48(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 52(%ecx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%ecx), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl %esi, %ecx + sarl $31, %ecx + testl %ecx, %ecx + movl 100(%esp), %ecx # 4-byte Reload + js .LBB227_2 +# BB#1: + movl %edx, %ecx +.LBB227_2: + movl 2048(%esp), %edx + movl %ecx, (%edx) + movl 92(%esp), %esi # 4-byte Reload + js .LBB227_4 +# BB#3: + movl %ebp, %esi +.LBB227_4: + movl %esi, 4(%edx) + movl 88(%esp), %ecx # 4-byte Reload + js .LBB227_6 +# BB#5: + movl %ebx, %edi +.LBB227_6: + movl %edi, 8(%edx) + js .LBB227_8 +# BB#7: + movl %eax, %ecx +.LBB227_8: + movl %ecx, 12(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB227_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB227_10: + movl %eax, 16(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB227_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB227_12: + movl %eax, 20(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB227_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB227_14: + movl %eax, 24(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB227_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB227_16: + movl %eax, 28(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB227_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB227_18: + movl %eax, 32(%edx) + movl 44(%esp), %eax # 4-byte Reload + js .LBB227_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB227_20: + movl %eax, 36(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB227_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB227_22: + movl %eax, 40(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB227_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB227_24: + movl %eax, 44(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB227_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB227_26: + movl %eax, 48(%edx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB227_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB227_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB227_30 +# BB#29: + movl 68(%esp), %eax # 4-byte Reload +.LBB227_30: + movl %eax, 56(%edx) + addl $2028, %esp # imm = 0x7EC + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end227: + .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L + + .globl mcl_fp_montRed15L + .align 16, 0x90 + .type mcl_fp_montRed15L,@function +mcl_fp_montRed15L: # @mcl_fp_montRed15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1148, %esp # imm = 0x47C + calll .L228$pb +.L228$pb: + popl %eax +.Ltmp49: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1176(%esp), %edx + movl -4(%edx), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 1172(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 80(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 84(%esp) # 4-byte Spill + imull %esi, %ebx + movl 116(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 80(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 164(%esp) # 4-byte Spill + movl 68(%ecx), %esi + movl %esi, 176(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 56(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 52(%ecx), %esi + movl %esi, 140(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 44(%ecx), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 12(%ecx), %edi + movl 8(%ecx), %esi + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1080(%esp), %ecx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + movl 80(%esp), %eax # 4-byte Reload + addl 1080(%esp), %eax + movl 84(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + adcl 1088(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + adcl 1092(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1108(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 148(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1016(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1016(%esp), %esi + movl 84(%esp), %edx # 4-byte Reload + adcl 1020(%esp), %edx + movl 64(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 1060(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 144(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 952(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 952(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 956(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 992(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl 1004(%esp), %ebp + movl 180(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 164(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 888(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 888(%esp), %esi + movl 68(%esp), %esi # 4-byte Reload + adcl 892(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 104(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 824(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 824(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 828(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 760(%esp), %esi + movl 76(%esp), %esi # 4-byte Reload + adcl 764(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + movl 152(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 696(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 700(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + movl 108(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 632(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 636(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 164(%esp), %edi # 4-byte Reload + adcl 676(%esp), %edi + movl 168(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 136(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 568(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 604(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 160(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 504(%esp), %ecx + movl 1176(%esp), %eax + movl %eax, %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 504(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 508(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 156(%esp), %esi # 4-byte Reload + adcl 524(%esp), %esi + movl 172(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl 548(%esp), %edi + movl %edi, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 440(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 440(%esp), %edi + movl 124(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %edi # 4-byte Reload + adcl 452(%esp), %edi + adcl 456(%esp), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %esi # 4-byte Reload + adcl 464(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 376(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 376(%esp), %ebp + movl 120(%esp), %ebp # 4-byte Reload + adcl 380(%esp), %ebp + adcl 384(%esp), %edi + movl %edi, 140(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 172(%esp), %edi # 4-byte Reload + adcl 392(%esp), %edi + adcl 396(%esp), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, 84(%esp) # 4-byte Folded Spill + movl %ebp, %eax + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 312(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 312(%esp), %ebp + movl 140(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl 156(%esp), %ecx # 4-byte Reload + adcl 320(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + adcl 324(%esp), %edi + movl %edi, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 328(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 332(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 164(%esp), %ecx # 4-byte Reload + adcl 336(%esp), %ecx + movl %ecx, 164(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 340(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 144(%esp) # 4-byte Spill + movl 160(%esp), %ecx # 4-byte Reload + adcl 348(%esp), %ecx + movl %ecx, 160(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 352(%esp), %ebp + movl 136(%esp), %ecx # 4-byte Reload + adcl 356(%esp), %ecx + movl %ecx, 136(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 360(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 364(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 368(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 372(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl $0, 104(%esp) # 4-byte Folded Spill + adcl $0, 108(%esp) # 4-byte Folded Spill + movl 84(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %eax, %edi + imull 88(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 248(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 248(%esp), %edi + movl 156(%esp), %ecx # 4-byte Reload + adcl 252(%esp), %ecx + movl 172(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl 284(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 288(%esp), %edi + movl 152(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + adcl $0, 108(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 1176(%esp), %edx + movl 92(%esp), %ebx # 4-byte Reload + calll .LmulPv480x32 + addl 184(%esp), %esi + movl 172(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 172(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 204(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl 220(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 240(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + adcl $0, %ebx + movl %edx, %eax + subl 16(%esp), %edx # 4-byte Folded Reload + sbbl 4(%esp), %ecx # 4-byte Folded Reload + movl 176(%esp), %eax # 4-byte Reload + sbbl 8(%esp), %eax # 4-byte Folded Reload + movl 164(%esp), %ebp # 4-byte Reload + sbbl 12(%esp), %ebp # 4-byte Folded Reload + sbbl 20(%esp), %esi # 4-byte Folded Reload + movl 144(%esp), %edi # 4-byte Reload + sbbl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 84(%esp) # 4-byte Spill + movl 160(%esp), %edi # 4-byte Reload + sbbl 28(%esp), %edi # 4-byte Folded Reload + movl %edi, 88(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + sbbl 32(%esp), %edi # 4-byte Folded Reload + movl %edi, 92(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + sbbl 36(%esp), %edi # 4-byte Folded Reload + movl %edi, 96(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + sbbl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 100(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + sbbl 44(%esp), %edi # 4-byte Folded Reload + movl %edi, 112(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + sbbl 48(%esp), %edi # 4-byte Folded Reload + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + sbbl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 124(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + sbbl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 140(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + sbbl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 156(%esp) # 4-byte Spill + sbbl $0, %ebx + andl $1, %ebx + movl %ebx, %edi + jne .LBB228_2 +# BB#1: + movl %edx, 172(%esp) # 4-byte Spill +.LBB228_2: + movl 1168(%esp), %edx + movl 172(%esp), %ebx # 4-byte Reload + movl %ebx, (%edx) + movl %edi, %ebx + testb %bl, %bl + jne .LBB228_4 +# BB#3: + movl %ecx, 180(%esp) # 4-byte Spill +.LBB228_4: + movl 180(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%edx) + movl 176(%esp), %ecx # 4-byte Reload + jne .LBB228_6 +# BB#5: + movl %eax, %ecx +.LBB228_6: + movl %ecx, 8(%edx) + movl 164(%esp), %eax # 4-byte Reload + jne .LBB228_8 +# BB#7: + movl %ebp, %eax +.LBB228_8: + movl %eax, 12(%edx) + movl 108(%esp), %ecx # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + movl 168(%esp), %ebp # 4-byte Reload + jne .LBB228_10 +# BB#9: + movl %esi, %ebp +.LBB228_10: + movl %ebp, 16(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 144(%esp), %ebx # 4-byte Reload + jne .LBB228_12 +# BB#11: + movl 84(%esp), %ebx # 4-byte Reload +.LBB228_12: + movl %ebx, 20(%edx) + movl 132(%esp), %ebx # 4-byte Reload + movl 160(%esp), %edi # 4-byte Reload + jne .LBB228_14 +# BB#13: + movl 88(%esp), %edi # 4-byte Reload +.LBB228_14: + movl %edi, 24(%edx) + movl 128(%esp), %edi # 4-byte Reload + jne .LBB228_16 +# BB#15: + movl 92(%esp), %eax # 4-byte Reload +.LBB228_16: + movl %eax, 28(%edx) + movl 116(%esp), %esi # 4-byte Reload + jne .LBB228_18 +# BB#17: + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 136(%esp) # 4-byte Spill +.LBB228_18: + movl 136(%esp), %eax # 4-byte Reload + movl %eax, 32(%edx) + jne .LBB228_20 +# BB#19: + movl 100(%esp), %ebp # 4-byte Reload +.LBB228_20: + movl %ebp, 36(%edx) + movl 104(%esp), %eax # 4-byte Reload + jne .LBB228_22 +# BB#21: + movl 112(%esp), %ebx # 4-byte Reload +.LBB228_22: + movl %ebx, 40(%edx) + jne .LBB228_24 +# BB#23: + movl 120(%esp), %edi # 4-byte Reload +.LBB228_24: + movl %edi, 44(%edx) + jne .LBB228_26 +# BB#25: + movl 124(%esp), %esi # 4-byte Reload +.LBB228_26: + movl %esi, 48(%edx) + jne .LBB228_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB228_28: + movl %eax, 52(%edx) + jne .LBB228_30 +# BB#29: + movl 156(%esp), %ecx # 4-byte Reload +.LBB228_30: + movl %ecx, 56(%edx) + addl $1148, %esp # imm = 0x47C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end228: + .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L + + .globl mcl_fp_addPre15L + .align 16, 0x90 + .type mcl_fp_addPre15L,@function +mcl_fp_addPre15L: # @mcl_fp_addPre15L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl %esi, 48(%edi) + movl %edx, 52(%edi) + movl 56(%eax), %eax + movl 56(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 56(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end229: + .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L + + .globl mcl_fp_subPre15L + .align 16, 0x90 + .type mcl_fp_subPre15L,@function +mcl_fp_subPre15L: # @mcl_fp_subPre15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 48(%ebx) + movl %esi, 52(%ebx) + movl 56(%edx), %edx + movl 56(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 56(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end230: + .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L + + .globl mcl_fp_shr1_15L + .align 16, 0x90 + .type mcl_fp_shr1_15L,@function +mcl_fp_shr1_15L: # @mcl_fp_shr1_15L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 52(%ecx) + shrl %eax + movl %eax, 56(%ecx) + popl %esi + retl +.Lfunc_end231: + .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L + + .globl mcl_fp_add15L + .align 16, 0x90 + .type mcl_fp_add15L,@function +mcl_fp_add15L: # @mcl_fp_add15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $48, %esp + movl 76(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 72(%esp), %eax + addl (%eax), %esi + movl %esi, 4(%esp) # 4-byte Spill + adcl 4(%eax), %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 8(%ecx), %edx + adcl 8(%eax), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 12(%eax), %esi + movl 16(%eax), %edx + adcl 12(%ecx), %esi + movl %esi, 36(%esp) # 4-byte Spill + adcl 16(%ecx), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 20(%eax), %edx + adcl 20(%ecx), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%eax), %edx + adcl 24(%ecx), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 28(%eax), %edx + adcl 28(%ecx), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 32(%eax), %edx + adcl 32(%ecx), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 36(%eax), %edx + adcl 36(%ecx), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 40(%eax), %edx + adcl 40(%ecx), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl 44(%eax), %ebx + adcl 44(%ecx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 48(%eax), %ebp + adcl 48(%ecx), %ebp + movl 52(%eax), %edi + adcl 52(%ecx), %edi + movl 56(%eax), %edx + adcl 56(%ecx), %edx + movl 68(%esp), %ecx + movl 4(%esp), %eax # 4-byte Reload + movl %eax, (%ecx) + movl 44(%esp), %esi # 4-byte Reload + movl %esi, 4(%ecx) + movl 40(%esp), %esi # 4-byte Reload + movl %esi, 8(%ecx) + movl 36(%esp), %esi # 4-byte Reload + movl %esi, 12(%ecx) + movl 32(%esp), %esi # 4-byte Reload + movl %esi, 16(%ecx) + movl 28(%esp), %esi # 4-byte Reload + movl %esi, 20(%ecx) + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 24(%ecx) + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 28(%ecx) + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 32(%ecx) + movl 12(%esp), %esi # 4-byte Reload + movl %esi, 36(%ecx) + movl 8(%esp), %esi # 4-byte Reload + movl %esi, 40(%ecx) + movl %ebx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %edx, 56(%ecx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 80(%esp), %esi + subl (%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edx, %eax + movl 40(%esp), %edx # 4-byte Reload + sbbl 8(%esi), %edx + movl %edx, 40(%esp) # 4-byte Spill + movl 36(%esp), %edx # 4-byte Reload + sbbl 12(%esi), %edx + movl %edx, 36(%esp) # 4-byte Spill + movl 32(%esp), %edx # 4-byte Reload + sbbl 16(%esi), %edx + movl %edx, 32(%esp) # 4-byte Spill + movl 28(%esp), %edx # 4-byte Reload + sbbl 20(%esi), %edx + movl %edx, 28(%esp) # 4-byte Spill + movl 24(%esp), %edx # 4-byte Reload + sbbl 24(%esi), %edx + movl %edx, 24(%esp) # 4-byte Spill + movl 20(%esp), %edx # 4-byte Reload + sbbl 28(%esi), %edx + movl %edx, 20(%esp) # 4-byte Spill + movl 16(%esp), %edx # 4-byte Reload + sbbl 32(%esi), %edx + movl %edx, 16(%esp) # 4-byte Spill + movl 12(%esp), %edx # 4-byte Reload + sbbl 36(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 8(%esp), %edx # 4-byte Reload + sbbl 40(%esi), %edx + movl %edx, 8(%esp) # 4-byte Spill + movl (%esp), %edx # 4-byte Reload + sbbl 44(%esi), %edx + movl %edx, (%esp) # 4-byte Spill + sbbl 48(%esi), %ebp + sbbl 52(%esi), %edi + sbbl 56(%esi), %eax + sbbl $0, %ebx + testb $1, %bl + jne .LBB232_2 +# BB#1: # %nocarry + movl 4(%esp), %edx # 4-byte Reload + movl %edx, (%ecx) + movl 44(%esp), %edx # 4-byte Reload + movl %edx, 4(%ecx) + movl 40(%esp), %edx # 4-byte Reload + movl %edx, 8(%ecx) + movl 36(%esp), %edx # 4-byte Reload + movl %edx, 12(%ecx) + movl 32(%esp), %edx # 4-byte Reload + movl %edx, 16(%ecx) + movl 28(%esp), %edx # 4-byte Reload + movl %edx, 20(%ecx) + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 24(%ecx) + movl 20(%esp), %edx # 4-byte Reload + movl %edx, 28(%ecx) + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 32(%ecx) + movl 12(%esp), %edx # 4-byte Reload + movl %edx, 36(%ecx) + movl 8(%esp), %edx # 4-byte Reload + movl %edx, 40(%ecx) + movl (%esp), %edx # 4-byte Reload + movl %edx, 44(%ecx) + movl %ebp, 48(%ecx) + movl %edi, 52(%ecx) + movl %eax, 56(%ecx) +.LBB232_2: # %carry + addl $48, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end232: + .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L + + .globl mcl_fp_addNF15L + .align 16, 0x90 + .type mcl_fp_addNF15L,@function +mcl_fp_addNF15L: # @mcl_fp_addNF15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $120, %esp + movl 148(%esp), %ecx + movl (%ecx), %eax + movl 4(%ecx), %edx + movl 144(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 44(%ecx), %ebp + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl 20(%ecx), %ebx + movl 16(%ecx), %edi + movl 12(%ecx), %edx + movl 8(%ecx), %ecx + adcl 8(%esi), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 44(%esi), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 48(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 52(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 152(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + movl %edx, 12(%esp) # 4-byte Spill + sbbl 16(%esi), %edi + movl %edi, 16(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + sbbl 28(%esi), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 40(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + movl %edx, %eax + sbbl 44(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %edi + sbbl 48(%esi), %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + movl %ecx, %ebx + sbbl 52(%esi), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, %edi + sbbl 56(%esi), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl %edi, %esi + sarl $31, %esi + testl %esi, %esi + movl 80(%esp), %esi # 4-byte Reload + js .LBB233_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB233_2: + movl 140(%esp), %edi + movl %esi, (%edi) + movl 84(%esp), %ecx # 4-byte Reload + js .LBB233_4 +# BB#3: + movl 4(%esp), %ecx # 4-byte Reload +.LBB233_4: + movl %ecx, 4(%edi) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + js .LBB233_6 +# BB#5: + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 76(%esp) # 4-byte Spill +.LBB233_6: + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%edi) + movl 64(%esp), %eax # 4-byte Reload + js .LBB233_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload +.LBB233_8: + movl %eax, 12(%edi) + movl %ebx, %ebp + movl %edx, %eax + movl 68(%esp), %edx # 4-byte Reload + js .LBB233_10 +# BB#9: + movl 16(%esp), %edx # 4-byte Reload +.LBB233_10: + movl %edx, 16(%edi) + movl 112(%esp), %edx # 4-byte Reload + movl 108(%esp), %ebx # 4-byte Reload + js .LBB233_12 +# BB#11: + movl 20(%esp), %esi # 4-byte Reload +.LBB233_12: + movl %esi, 20(%edi) + js .LBB233_14 +# BB#13: + movl 24(%esp), %esi # 4-byte Reload + movl %esi, 88(%esp) # 4-byte Spill +.LBB233_14: + movl 88(%esp), %esi # 4-byte Reload + movl %esi, 24(%edi) + js .LBB233_16 +# BB#15: + movl 28(%esp), %ecx # 4-byte Reload +.LBB233_16: + movl %ecx, 28(%edi) + js .LBB233_18 +# BB#17: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB233_18: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%edi) + js .LBB233_20 +# BB#19: + movl 36(%esp), %ebx # 4-byte Reload +.LBB233_20: + movl %ebx, 36(%edi) + js .LBB233_22 +# BB#21: + movl 40(%esp), %edx # 4-byte Reload +.LBB233_22: + movl %edx, 40(%edi) + js .LBB233_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB233_24: + movl %eax, 44(%edi) + movl 96(%esp), %eax # 4-byte Reload + js .LBB233_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB233_26: + movl %eax, 48(%edi) + js .LBB233_28 +# BB#27: + movl 52(%esp), %ebp # 4-byte Reload +.LBB233_28: + movl %ebp, 52(%edi) + movl 100(%esp), %eax # 4-byte Reload + js .LBB233_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB233_30: + movl %eax, 56(%edi) + addl $120, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end233: + .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L + + .globl mcl_fp_sub15L + .align 16, 0x90 + .type mcl_fp_sub15L,@function +mcl_fp_sub15L: # @mcl_fp_sub15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 80(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 84(%esp), %edi + subl (%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%esi), %edx + sbbl 40(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 44(%esi), %ecx + sbbl 44(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 48(%esi), %eax + sbbl 48(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 52(%esi), %ebp + sbbl 52(%edi), %ebp + movl 56(%esi), %esi + sbbl 56(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 76(%esp), %ebx + movl 48(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl %edx, 40(%ebx) + movl %ecx, 44(%ebx) + movl %eax, 48(%ebx) + movl %ebp, 52(%ebx) + movl %esi, 56(%ebx) + je .LBB234_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 88(%esp), %esi + movl 48(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 40(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl %ecx, 48(%ebx) + movl 52(%esi), %eax + adcl %ebp, %eax + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ebx) +.LBB234_2: # %nocarry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end234: + .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L + + .globl mcl_fp_subNF15L + .align 16, 0x90 + .type mcl_fp_subNF15L,@function +mcl_fp_subNF15L: # @mcl_fp_subNF15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $96, %esp + movl 120(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 124(%esp), %edi + subl (%edi), %esi + movl %esi, 60(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 32(%ecx), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 40(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 32(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %eax, %ebp + sarl $31, %ebp + movl %ebp, %edi + shldl $1, %eax, %edi + movl 128(%esp), %edx + andl (%edx), %edi + movl 56(%edx), %eax + andl %ebp, %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 52(%edx), %eax + andl %ebp, %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 48(%edx), %eax + andl %ebp, %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%edx), %eax + andl %ebp, %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 40(%edx), %eax + andl %ebp, %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 36(%edx), %eax + andl %ebp, %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 32(%edx), %eax + andl %ebp, %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 28(%edx), %eax + andl %ebp, %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 24(%edx), %eax + andl %ebp, %eax + movl %eax, (%esp) # 4-byte Spill + movl 20(%edx), %ebx + andl %ebp, %ebx + movl 16(%edx), %esi + andl %ebp, %esi + movl 12(%edx), %ecx + andl %ebp, %ecx + movl 8(%edx), %eax + andl %ebp, %eax + andl 4(%edx), %ebp + addl 60(%esp), %edi # 4-byte Folded Reload + adcl 64(%esp), %ebp # 4-byte Folded Reload + movl 116(%esp), %edx + movl %edi, (%edx) + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ebp, 4(%edx) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 8(%edx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %ecx, 12(%edx) + adcl 48(%esp), %ebx # 4-byte Folded Reload + movl %esi, 16(%edx) + movl (%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ebx, 20(%edx) + movl 4(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%edx) + movl 8(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%edx) + movl 12(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%edx) + movl 16(%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%edx) + movl 20(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%edx) + movl 24(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%edx) + movl 28(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%edx) + movl %eax, 52(%edx) + movl 44(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%edx) + addl $96, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end235: + .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L + + .globl mcl_fpDbl_add15L + .align 16, 0x90 + .type mcl_fpDbl_add15L,@function +mcl_fpDbl_add15L: # @mcl_fpDbl_add15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 136(%esp), %ecx + movl 132(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 128(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 68(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 64(%ecx), %edi + movl %esi, 56(%eax) + movl 64(%edx), %eax + adcl %edi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%edx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl 72(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl 76(%edx), %eax + adcl %esi, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%ecx), %eax + movl 104(%edx), %esi + adcl %eax, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 108(%ecx), %edi + movl 108(%edx), %eax + adcl %edi, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 112(%ecx), %ebx + movl 112(%edx), %edi + adcl %ebx, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 116(%ecx), %ecx + movl 116(%edx), %edx + adcl %ecx, %edx + sbbl %ebx, %ebx + andl $1, %ebx + movl 140(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + subl (%ebp), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 4(%ebp), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 8(%ebp), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 12(%ebp), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 16(%ebp), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 20(%ebp), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 24(%ebp), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 28(%ebp), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + sbbl 32(%ebp), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + sbbl 36(%ebp), %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + sbbl 40(%ebp), %ecx + sbbl 44(%ebp), %esi + movl %esi, 40(%esp) # 4-byte Spill + sbbl 48(%ebp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl %edi, %eax + movl %edx, %edi + sbbl 52(%ebp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %edi, %esi + sbbl 56(%ebp), %esi + sbbl $0, %ebx + andl $1, %ebx + jne .LBB236_2 +# BB#1: + movl %esi, %edi +.LBB236_2: + testb %bl, %bl + movl 76(%esp), %eax # 4-byte Reload + movl 72(%esp), %esi # 4-byte Reload + movl 68(%esp), %ebx # 4-byte Reload + movl 64(%esp), %ebp # 4-byte Reload + jne .LBB236_4 +# BB#3: + movl %ecx, %esi + movl (%esp), %ebx # 4-byte Reload + movl 4(%esp), %ebp # 4-byte Reload + movl 8(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload +.LBB236_4: + movl 128(%esp), %edx + movl %eax, 60(%edx) + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 64(%edx) + movl 84(%esp), %eax # 4-byte Reload + movl %eax, 68(%edx) + movl 88(%esp), %eax # 4-byte Reload + movl %eax, 72(%edx) + movl 92(%esp), %eax # 4-byte Reload + movl %eax, 76(%edx) + movl 96(%esp), %eax # 4-byte Reload + movl %eax, 80(%edx) + movl 100(%esp), %eax # 4-byte Reload + movl %eax, 84(%edx) + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 88(%edx) + movl %ebp, 92(%edx) + movl %ebx, 96(%edx) + movl %esi, 100(%edx) + movl 52(%esp), %eax # 4-byte Reload + jne .LBB236_6 +# BB#5: + movl 40(%esp), %eax # 4-byte Reload +.LBB236_6: + movl %eax, 104(%edx) + movl 60(%esp), %ecx # 4-byte Reload + movl 56(%esp), %eax # 4-byte Reload + jne .LBB236_8 +# BB#7: + movl 44(%esp), %eax # 4-byte Reload +.LBB236_8: + movl %eax, 108(%edx) + jne .LBB236_10 +# BB#9: + movl 48(%esp), %ecx # 4-byte Reload +.LBB236_10: + movl %ecx, 112(%edx) + movl %edi, 116(%edx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end236: + .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L + + .globl mcl_fpDbl_sub15L + .align 16, 0x90 + .type mcl_fpDbl_sub15L,@function +mcl_fpDbl_sub15L: # @mcl_fpDbl_sub15L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $100, %esp + movl 124(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 128(%esp), %ebp + subl (%ebp), %edx + sbbl 4(%ebp), %esi + movl 8(%eax), %edi + sbbl 8(%ebp), %edi + movl 120(%esp), %ecx + movl %edx, (%ecx) + movl 12(%eax), %edx + sbbl 12(%ebp), %edx + movl %esi, 4(%ecx) + movl 16(%eax), %esi + sbbl 16(%ebp), %esi + movl %edi, 8(%ecx) + movl 20(%ebp), %edi + movl %edx, 12(%ecx) + movl 20(%eax), %edx + sbbl %edi, %edx + movl 24(%ebp), %edi + movl %esi, 16(%ecx) + movl 24(%eax), %esi + sbbl %edi, %esi + movl 28(%ebp), %edi + movl %edx, 20(%ecx) + movl 28(%eax), %edx + sbbl %edi, %edx + movl 32(%ebp), %edi + movl %esi, 24(%ecx) + movl 32(%eax), %esi + sbbl %edi, %esi + movl 36(%ebp), %edi + movl %edx, 28(%ecx) + movl 36(%eax), %edx + sbbl %edi, %edx + movl 40(%ebp), %edi + movl %esi, 32(%ecx) + movl 40(%eax), %esi + sbbl %edi, %esi + movl 44(%ebp), %edi + movl %edx, 36(%ecx) + movl 44(%eax), %edx + sbbl %edi, %edx + movl 48(%ebp), %edi + movl %esi, 40(%ecx) + movl 48(%eax), %esi + sbbl %edi, %esi + movl 52(%ebp), %edi + movl %edx, 44(%ecx) + movl 52(%eax), %edx + sbbl %edi, %edx + movl 56(%ebp), %edi + movl %esi, 48(%ecx) + movl 56(%eax), %esi + sbbl %edi, %esi + movl 60(%ebp), %edi + movl %edx, 52(%ecx) + movl 60(%eax), %edx + sbbl %edi, %edx + movl %edx, 44(%esp) # 4-byte Spill + movl 64(%ebp), %edx + movl %esi, 56(%ecx) + movl 64(%eax), %esi + sbbl %edx, %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%ebp), %edx + movl 68(%eax), %esi + sbbl %edx, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%ebp), %edx + movl 72(%eax), %esi + sbbl %edx, %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 76(%ebp), %edx + movl 76(%eax), %esi + sbbl %edx, %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 80(%ebp), %edx + movl 80(%eax), %esi + sbbl %edx, %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%ebp), %edx + movl 84(%eax), %esi + sbbl %edx, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 88(%ebp), %edx + movl 88(%eax), %esi + sbbl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 92(%ebp), %edx + movl 92(%eax), %esi + sbbl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 96(%ebp), %edx + movl 96(%eax), %esi + sbbl %edx, %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 100(%ebp), %edx + movl 100(%eax), %esi + sbbl %edx, %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 104(%ebp), %edx + movl 104(%eax), %esi + sbbl %edx, %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 108(%ebp), %edx + movl 108(%eax), %esi + sbbl %edx, %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 112(%ebp), %edx + movl 112(%eax), %esi + sbbl %edx, %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 116(%ebp), %edx + movl 116(%eax), %eax + sbbl %edx, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 132(%esp), %esi + jne .LBB237_1 +# BB#2: + movl $0, 60(%esp) # 4-byte Folded Spill + jmp .LBB237_3 +.LBB237_1: + movl 56(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill +.LBB237_3: + testb %al, %al + jne .LBB237_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebx + jmp .LBB237_6 +.LBB237_4: + movl (%esi), %ebx + movl 4(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB237_6: + jne .LBB237_7 +# BB#8: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB237_9 +.LBB237_7: + movl 52(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB237_9: + jne .LBB237_10 +# BB#11: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB237_12 +.LBB237_10: + movl 48(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB237_12: + jne .LBB237_13 +# BB#14: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB237_15 +.LBB237_13: + movl 44(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB237_15: + jne .LBB237_16 +# BB#17: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB237_18 +.LBB237_16: + movl 40(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB237_18: + jne .LBB237_19 +# BB#20: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB237_21 +.LBB237_19: + movl 36(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB237_21: + jne .LBB237_22 +# BB#23: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB237_24 +.LBB237_22: + movl 32(%esi), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB237_24: + jne .LBB237_25 +# BB#26: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB237_27 +.LBB237_25: + movl 28(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB237_27: + jne .LBB237_28 +# BB#29: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB237_30 +.LBB237_28: + movl 24(%esi), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB237_30: + jne .LBB237_31 +# BB#32: + movl $0, %edx + jmp .LBB237_33 +.LBB237_31: + movl 20(%esi), %edx +.LBB237_33: + jne .LBB237_34 +# BB#35: + movl $0, %ebp + jmp .LBB237_36 +.LBB237_34: + movl 16(%esi), %ebp +.LBB237_36: + jne .LBB237_37 +# BB#38: + movl $0, %eax + jmp .LBB237_39 +.LBB237_37: + movl 12(%esi), %eax +.LBB237_39: + jne .LBB237_40 +# BB#41: + xorl %esi, %esi + jmp .LBB237_42 +.LBB237_40: + movl 8(%esi), %esi +.LBB237_42: + addl 44(%esp), %ebx # 4-byte Folded Reload + movl 24(%esp), %edi # 4-byte Reload + adcl 36(%esp), %edi # 4-byte Folded Reload + movl %ebx, 60(%ecx) + adcl 40(%esp), %esi # 4-byte Folded Reload + movl %edi, 64(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 52(%esp), %ebp # 4-byte Folded Reload + movl %eax, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %ebp, 76(%ecx) + movl (%esp), %esi # 4-byte Reload + adcl 64(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 28(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl %eax, 112(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%ecx) + addl $100, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end237: + .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L + + .align 16, 0x90 + .type .LmulPv512x32,@function +.LmulPv512x32: # @mulPv512x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl %edx, %ebp + movl 132(%esp), %ebx + movl %ebx, %eax + mull 60(%ebp) + movl %edx, 108(%esp) # 4-byte Spill + movl %eax, 104(%esp) # 4-byte Spill + movl %ebx, %eax + mull 56(%ebp) + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl %ebx, %eax + mull 52(%ebp) + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + mull 48(%ebp) + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %eax + mull 44(%ebp) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebx, %eax + mull 40(%ebp) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebx, %eax + mull 36(%ebp) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mull 32(%ebp) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 28(%ebp) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 24(%ebp) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 20(%ebp) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 16(%ebp) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 12(%ebp) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 8(%ebp) + movl %edx, %esi + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 4(%ebp) + movl %edx, %edi + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull (%ebp) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %edi # 4-byte Folded Reload + movl %edi, 8(%ecx) + adcl 8(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + movl 108(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 64(%ecx) + movl %ecx, %eax + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end238: + .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 + + .globl mcl_fp_mulUnitPre16L + .align 16, 0x90 + .type mcl_fp_mulUnitPre16L,@function +mcl_fp_mulUnitPre16L: # @mcl_fp_mulUnitPre16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + calll .L239$pb +.L239$pb: + popl %ebx +.Ltmp50: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx + movl 152(%esp), %eax + movl %eax, (%esp) + leal 56(%esp), %ecx + movl 148(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 84(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 80(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 76(%esp), %ebp + movl 72(%esp), %ebx + movl 68(%esp), %edi + movl 64(%esp), %esi + movl 56(%esp), %edx + movl 60(%esp), %ecx + movl 144(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end239: + .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L + + .globl mcl_fpDbl_mulPre16L + .align 16, 0x90 + .type mcl_fpDbl_mulPre16L,@function +mcl_fpDbl_mulPre16L: # @mcl_fpDbl_mulPre16L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L240$pb +.L240$pb: + popl %ebx +.Ltmp51: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx + movl %ebx, -224(%ebp) # 4-byte Spill + movl 16(%ebp), %edi + movl %edi, 8(%esp) + movl 12(%ebp), %esi + movl %esi, 4(%esp) + movl 8(%ebp), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8L@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + leal 32(%esi), %eax + movl %eax, 4(%esp) + movl 8(%ebp), %eax + leal 64(%eax), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8L@PLT + movl 52(%esi), %ebx + movl 48(%esi), %eax + movl 44(%esi), %ecx + movl 40(%esi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl (%esi), %edi + movl 4(%esi), %edx + addl 32(%esi), %edi + movl %edi, -184(%ebp) # 4-byte Spill + movl %esi, %edi + adcl 36(%edi), %edx + movl %edx, -236(%ebp) # 4-byte Spill + movl -176(%ebp), %edx # 4-byte Reload + adcl 8(%edi), %edx + movl %edx, -176(%ebp) # 4-byte Spill + adcl 12(%edi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + adcl 16(%edi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + adcl 20(%edi), %ebx + movl %ebx, -228(%ebp) # 4-byte Spill + movl 56(%edi), %eax + adcl 24(%edi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + pushl %eax + seto %al + lahf + movl %eax, %ecx + popl %eax + movl %ecx, -144(%ebp) # 4-byte Spill + movl 16(%ebp), %esi + movl (%esi), %ecx + addl 32(%esi), %ecx + movl %ecx, -188(%ebp) # 4-byte Spill + movl 4(%esi), %ecx + adcl 36(%esi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + movl 40(%esi), %ecx + adcl 8(%esi), %ecx + movl %ecx, -196(%ebp) # 4-byte Spill + movl 44(%esi), %ecx + adcl 12(%esi), %ecx + movl %ecx, -200(%ebp) # 4-byte Spill + movl 48(%esi), %ecx + adcl 16(%esi), %ecx + movl %ecx, -204(%ebp) # 4-byte Spill + movl 52(%esi), %ecx + adcl 20(%esi), %ecx + movl %ecx, -208(%ebp) # 4-byte Spill + movl 56(%esi), %edx + adcl 24(%esi), %edx + movl 60(%esi), %ecx + adcl 28(%esi), %ecx + pushl %eax + seto %al + lahf + movl %eax, %ebx + popl %eax + movl %ebx, -252(%ebp) # 4-byte Spill + movl -212(%ebp), %ebx # 4-byte Reload + movl -176(%ebp), %esi # 4-byte Reload + movl %esi, -216(%ebp) # 4-byte Spill + movl -184(%ebp), %esi # 4-byte Reload + movl %esi, -220(%ebp) # 4-byte Spill + jb .LBB240_2 +# BB#1: + xorl %eax, %eax + xorl %ebx, %ebx + movl $0, -216(%ebp) # 4-byte Folded Spill + movl $0, -220(%ebp) # 4-byte Folded Spill +.LBB240_2: + movl %ebx, -244(%ebp) # 4-byte Spill + movl %eax, -240(%ebp) # 4-byte Spill + movl 60(%edi), %eax + movl -144(%ebp), %ebx # 4-byte Reload + pushl %eax + movl %ebx, %eax + addb $127, %al + sahf + popl %eax + adcl 28(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl %ecx, -172(%ebp) # 4-byte Spill + movl %edx, -144(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, -148(%ebp) # 4-byte Spill + movl -204(%ebp), %eax # 4-byte Reload + movl %eax, -152(%ebp) # 4-byte Spill + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, -156(%ebp) # 4-byte Spill + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -160(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -164(%ebp) # 4-byte Spill + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -168(%ebp) # 4-byte Spill + jb .LBB240_4 +# BB#3: + movl $0, -172(%ebp) # 4-byte Folded Spill + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill + movl $0, -168(%ebp) # 4-byte Folded Spill +.LBB240_4: + movl -184(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl -236(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl -176(%ebp), %edi # 4-byte Reload + movl %edi, -100(%ebp) + movl -232(%ebp), %edi # 4-byte Reload + movl %edi, -96(%ebp) + movl -212(%ebp), %esi # 4-byte Reload + movl %esi, -92(%ebp) + movl -228(%ebp), %esi # 4-byte Reload + movl %esi, -88(%ebp) + movl -248(%ebp), %ebx # 4-byte Reload + movl %ebx, -84(%ebp) + movl -188(%ebp), %ebx # 4-byte Reload + movl %ebx, -140(%ebp) + movl -192(%ebp), %ebx # 4-byte Reload + movl %ebx, -136(%ebp) + movl -196(%ebp), %ebx # 4-byte Reload + movl %ebx, -132(%ebp) + movl -200(%ebp), %ebx # 4-byte Reload + movl %ebx, -128(%ebp) + movl -204(%ebp), %ebx # 4-byte Reload + movl %ebx, -124(%ebp) + movl -208(%ebp), %ebx # 4-byte Reload + movl %ebx, -120(%ebp) + movl %esi, %ebx + movl %edi, %esi + movl %eax, %edi + movl %edx, -116(%ebp) + movl %ecx, -112(%ebp) + sbbl %edx, %edx + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -80(%ebp) + movl -252(%ebp), %ecx # 4-byte Reload + pushl %eax + movl %ecx, %eax + addb $127, %al + sahf + popl %eax + jb .LBB240_6 +# BB#5: + movl $0, %eax + movl $0, %ebx + movl $0, %esi + movl $0, %edi +.LBB240_6: + movl %eax, -180(%ebp) # 4-byte Spill + sbbl %eax, %eax + leal -140(%ebp), %ecx + movl %ecx, 8(%esp) + leal -108(%ebp), %ecx + movl %ecx, 4(%esp) + leal -76(%ebp), %ecx + movl %ecx, (%esp) + andl %eax, %edx + movl -220(%ebp), %eax # 4-byte Reload + addl %eax, -168(%ebp) # 4-byte Folded Spill + adcl %edi, -164(%ebp) # 4-byte Folded Spill + movl -216(%ebp), %eax # 4-byte Reload + adcl %eax, -160(%ebp) # 4-byte Folded Spill + adcl %esi, -156(%ebp) # 4-byte Folded Spill + movl -244(%ebp), %eax # 4-byte Reload + adcl %eax, -152(%ebp) # 4-byte Folded Spill + adcl %ebx, -148(%ebp) # 4-byte Folded Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -240(%ebp), %eax # 4-byte Folded Reload + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %edi # 4-byte Reload + adcl -180(%ebp), %edi # 4-byte Folded Reload + sbbl %esi, %esi + andl $1, %esi + andl $1, %edx + movl %edx, -176(%ebp) # 4-byte Spill + movl -224(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8L@PLT + movl -168(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -164(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + adcl -16(%ebp), %edi + movl %edi, -172(%ebp) # 4-byte Spill + adcl %esi, -176(%ebp) # 4-byte Folded Spill + movl -76(%ebp), %eax + movl 8(%ebp), %esi + subl (%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -72(%ebp), %ecx + sbbl 4(%esi), %ecx + movl -68(%ebp), %eax + sbbl 8(%esi), %eax + movl %eax, -192(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl -52(%ebp), %eax + sbbl 24(%esi), %eax + movl %eax, -184(%ebp) # 4-byte Spill + movl -48(%ebp), %eax + sbbl 28(%esi), %eax + movl %eax, -188(%ebp) # 4-byte Spill + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + movl -144(%ebp), %edi # 4-byte Reload + sbbl %eax, %edi + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + sbbl $0, -176(%ebp) # 4-byte Folded Spill + movl 64(%esi), %eax + movl %eax, -260(%ebp) # 4-byte Spill + subl %eax, -196(%ebp) # 4-byte Folded Spill + movl 68(%esi), %eax + movl %eax, -264(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl 72(%esi), %eax + movl %eax, -268(%ebp) # 4-byte Spill + sbbl %eax, -192(%ebp) # 4-byte Folded Spill + movl 76(%esi), %eax + movl %eax, -272(%ebp) # 4-byte Spill + sbbl %eax, %edx + movl 80(%esi), %eax + movl %eax, -276(%ebp) # 4-byte Spill + sbbl %eax, %ebx + movl 84(%esi), %eax + movl %eax, -280(%ebp) # 4-byte Spill + sbbl %eax, -180(%ebp) # 4-byte Folded Spill + movl 88(%esi), %eax + movl %eax, -284(%ebp) # 4-byte Spill + sbbl %eax, -184(%ebp) # 4-byte Folded Spill + movl 92(%esi), %eax + movl %eax, -288(%ebp) # 4-byte Spill + sbbl %eax, -188(%ebp) # 4-byte Folded Spill + movl 96(%esi), %eax + movl %eax, -292(%ebp) # 4-byte Spill + sbbl %eax, -168(%ebp) # 4-byte Folded Spill + movl 100(%esi), %eax + movl %eax, -236(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 104(%esi), %eax + movl %eax, -240(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 108(%esi), %eax + movl %eax, -244(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 112(%esi), %eax + movl %eax, -248(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 116(%esi), %eax + movl %eax, -252(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 120(%esi), %eax + movl %eax, -232(%ebp) # 4-byte Spill + sbbl %eax, %edi + movl %edi, -144(%ebp) # 4-byte Spill + movl 124(%esi), %eax + movl %eax, -256(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl -176(%ebp), %edi # 4-byte Reload + sbbl $0, %edi + movl -196(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -192(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + adcl -212(%ebp), %edx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %edx, 44(%esi) + movl -180(%ebp), %eax # 4-byte Reload + adcl -220(%ebp), %eax # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -184(%ebp), %ecx # 4-byte Reload + adcl -224(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 52(%esi) + movl -188(%ebp), %edx # 4-byte Reload + adcl -228(%ebp), %edx # 4-byte Folded Reload + movl %ecx, 56(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %edx, 60(%esi) + movl -164(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -160(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -156(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -152(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -148(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -144(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -172(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + adcl -292(%ebp), %edi # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %edi, 96(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end240: + .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L + + .globl mcl_fpDbl_sqrPre16L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre16L,@function +mcl_fpDbl_sqrPre16L: # @mcl_fpDbl_sqrPre16L +# BB#0: + pushl %ebp + movl %esp, %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $300, %esp # imm = 0x12C + calll .L241$pb +.L241$pb: + popl %ebx +.Ltmp52: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx + movl %ebx, -184(%ebp) # 4-byte Spill + movl 12(%ebp), %edi + movl %edi, 8(%esp) + movl %edi, 4(%esp) + movl 8(%ebp), %esi + movl %esi, (%esp) + calll mcl_fpDbl_mulPre8L@PLT + leal 32(%edi), %eax + movl %eax, 8(%esp) + movl %eax, 4(%esp) + leal 64(%esi), %eax + movl %eax, (%esp) + calll mcl_fpDbl_mulPre8L@PLT + movl 52(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + movl 48(%edi), %eax + movl 44(%edi), %ebx + movl 40(%edi), %esi + movl (%edi), %ecx + movl 4(%edi), %edx + addl 32(%edi), %ecx + movl %ecx, -192(%ebp) # 4-byte Spill + adcl 36(%edi), %edx + movl %edx, -196(%ebp) # 4-byte Spill + adcl 8(%edi), %esi + movl %esi, -188(%ebp) # 4-byte Spill + adcl 12(%edi), %ebx + adcl 16(%edi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + adcl 20(%edi), %eax + movl %eax, -180(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + addl %ecx, %ecx + movl %ecx, -164(%ebp) # 4-byte Spill + adcl %edx, %edx + movl %edx, -160(%ebp) # 4-byte Spill + adcl %esi, %esi + movl %esi, -156(%ebp) # 4-byte Spill + movl %ebx, %edx + movl %ebx, %esi + adcl %edx, %edx + movl %edx, -152(%ebp) # 4-byte Spill + movl -208(%ebp), %eax # 4-byte Reload + movl %eax, %edx + movl %eax, %ebx + adcl %edx, %edx + movl %edx, -148(%ebp) # 4-byte Spill + movl -180(%ebp), %edx # 4-byte Reload + adcl %edx, %edx + movl %edx, -144(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl 56(%edi), %edx + movl -168(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + adcl 24(%edi), %edx + movl 60(%edi), %ecx + adcl 28(%edi), %ecx + seto %al + lahf + movl %eax, %eax + movl %eax, -200(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %eax + movl %eax, -204(%ebp) # 4-byte Spill + seto %al + lahf + movl %eax, %edi + sbbl %eax, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl %edi, %eax + addb $127, %al + sahf + jb .LBB241_2 +# BB#1: + movl $0, -144(%ebp) # 4-byte Folded Spill + movl $0, -148(%ebp) # 4-byte Folded Spill + movl $0, -152(%ebp) # 4-byte Folded Spill + movl $0, -156(%ebp) # 4-byte Folded Spill + movl $0, -160(%ebp) # 4-byte Folded Spill + movl $0, -164(%ebp) # 4-byte Folded Spill +.LBB241_2: + movl %edx, %eax + movl -172(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + adcl %eax, %eax + movl %ecx, %edi + adcl %edi, %edi + movl %edi, -176(%ebp) # 4-byte Spill + movl -204(%ebp), %edi # 4-byte Reload + pushl %eax + movl %edi, %eax + addb $127, %al + sahf + popl %eax + jb .LBB241_4 +# BB#3: + movl $0, -176(%ebp) # 4-byte Folded Spill + xorl %eax, %eax +.LBB241_4: + movl %eax, -172(%ebp) # 4-byte Spill + movl -192(%ebp), %eax # 4-byte Reload + movl %eax, -108(%ebp) + movl %eax, -140(%ebp) + movl -196(%ebp), %eax # 4-byte Reload + movl %eax, -104(%ebp) + movl %eax, -136(%ebp) + movl -188(%ebp), %eax # 4-byte Reload + movl %eax, -100(%ebp) + movl %eax, -132(%ebp) + movl %esi, -96(%ebp) + movl %esi, -128(%ebp) + movl %ebx, -92(%ebp) + movl %ebx, -124(%ebp) + movl -180(%ebp), %eax # 4-byte Reload + movl %eax, -88(%ebp) + movl %eax, -120(%ebp) + movl %edx, -84(%ebp) + movl %edx, -116(%ebp) + movl %ecx, -80(%ebp) + movl %ecx, -112(%ebp) + movl -200(%ebp), %eax # 4-byte Reload + movl %eax, %eax + addb $127, %al + sahf + jb .LBB241_5 +# BB#6: + xorl %edi, %edi + jmp .LBB241_7 +.LBB241_5: + shrl $31, %ecx + movl %ecx, %edi +.LBB241_7: + leal -140(%ebp), %eax + movl %eax, 8(%esp) + leal -108(%ebp), %eax + movl %eax, 4(%esp) + leal -76(%ebp), %eax + movl %eax, (%esp) + movl -168(%ebp), %esi # 4-byte Reload + andl $1, %esi + movl -184(%ebp), %ebx # 4-byte Reload + calll mcl_fpDbl_mulPre8L@PLT + movl -164(%ebp), %eax # 4-byte Reload + addl -44(%ebp), %eax + movl %eax, -164(%ebp) # 4-byte Spill + movl -160(%ebp), %eax # 4-byte Reload + adcl -40(%ebp), %eax + movl %eax, -160(%ebp) # 4-byte Spill + movl -156(%ebp), %eax # 4-byte Reload + adcl -36(%ebp), %eax + movl %eax, -156(%ebp) # 4-byte Spill + movl -152(%ebp), %eax # 4-byte Reload + adcl -32(%ebp), %eax + movl %eax, -152(%ebp) # 4-byte Spill + movl -148(%ebp), %eax # 4-byte Reload + adcl -28(%ebp), %eax + movl %eax, -148(%ebp) # 4-byte Spill + movl -144(%ebp), %eax # 4-byte Reload + adcl -24(%ebp), %eax + movl %eax, -144(%ebp) # 4-byte Spill + movl -172(%ebp), %eax # 4-byte Reload + adcl -20(%ebp), %eax + movl %eax, -172(%ebp) # 4-byte Spill + movl -176(%ebp), %eax # 4-byte Reload + adcl -16(%ebp), %eax + adcl %edi, %esi + movl %esi, -168(%ebp) # 4-byte Spill + movl -76(%ebp), %ecx + movl 8(%ebp), %esi + subl (%esi), %ecx + movl %ecx, -180(%ebp) # 4-byte Spill + movl -72(%ebp), %edi + sbbl 4(%esi), %edi + movl -68(%ebp), %edx + sbbl 8(%esi), %edx + movl %edx, -184(%ebp) # 4-byte Spill + movl -64(%ebp), %edx + sbbl 12(%esi), %edx + movl %edx, -192(%ebp) # 4-byte Spill + movl -60(%ebp), %ebx + sbbl 16(%esi), %ebx + movl %eax, %ecx + movl -56(%ebp), %eax + sbbl 20(%esi), %eax + movl %eax, -196(%ebp) # 4-byte Spill + movl -52(%ebp), %edx + sbbl 24(%esi), %edx + movl %edx, -188(%ebp) # 4-byte Spill + movl -48(%ebp), %edx + sbbl 28(%esi), %edx + movl 32(%esi), %eax + movl %eax, -200(%ebp) # 4-byte Spill + sbbl %eax, -164(%ebp) # 4-byte Folded Spill + movl 36(%esi), %eax + movl %eax, -204(%ebp) # 4-byte Spill + sbbl %eax, -160(%ebp) # 4-byte Folded Spill + movl 40(%esi), %eax + movl %eax, -208(%ebp) # 4-byte Spill + sbbl %eax, -156(%ebp) # 4-byte Folded Spill + movl 44(%esi), %eax + movl %eax, -212(%ebp) # 4-byte Spill + sbbl %eax, -152(%ebp) # 4-byte Folded Spill + movl 48(%esi), %eax + movl %eax, -216(%ebp) # 4-byte Spill + sbbl %eax, -148(%ebp) # 4-byte Folded Spill + movl 52(%esi), %eax + movl %eax, -220(%ebp) # 4-byte Spill + sbbl %eax, -144(%ebp) # 4-byte Folded Spill + movl 56(%esi), %eax + movl %eax, -224(%ebp) # 4-byte Spill + sbbl %eax, -172(%ebp) # 4-byte Folded Spill + movl 60(%esi), %eax + movl %eax, -228(%ebp) # 4-byte Spill + sbbl %eax, %ecx + movl %ecx, -176(%ebp) # 4-byte Spill + movl -168(%ebp), %eax # 4-byte Reload + sbbl $0, %eax + movl 64(%esi), %ecx + movl %ecx, -260(%ebp) # 4-byte Spill + subl %ecx, -180(%ebp) # 4-byte Folded Spill + movl 68(%esi), %ecx + movl %ecx, -264(%ebp) # 4-byte Spill + sbbl %ecx, %edi + movl 72(%esi), %ecx + movl %ecx, -268(%ebp) # 4-byte Spill + sbbl %ecx, -184(%ebp) # 4-byte Folded Spill + movl 76(%esi), %ecx + movl %ecx, -272(%ebp) # 4-byte Spill + sbbl %ecx, -192(%ebp) # 4-byte Folded Spill + movl 80(%esi), %ecx + movl %ecx, -276(%ebp) # 4-byte Spill + sbbl %ecx, %ebx + movl 84(%esi), %ecx + movl %ecx, -280(%ebp) # 4-byte Spill + sbbl %ecx, -196(%ebp) # 4-byte Folded Spill + movl 88(%esi), %ecx + movl %ecx, -284(%ebp) # 4-byte Spill + sbbl %ecx, -188(%ebp) # 4-byte Folded Spill + movl 92(%esi), %ecx + movl %ecx, -288(%ebp) # 4-byte Spill + sbbl %ecx, %edx + movl 96(%esi), %ecx + movl %ecx, -292(%ebp) # 4-byte Spill + sbbl %ecx, -164(%ebp) # 4-byte Folded Spill + movl 100(%esi), %ecx + movl %ecx, -232(%ebp) # 4-byte Spill + sbbl %ecx, -160(%ebp) # 4-byte Folded Spill + movl 104(%esi), %ecx + movl %ecx, -236(%ebp) # 4-byte Spill + sbbl %ecx, -156(%ebp) # 4-byte Folded Spill + movl 108(%esi), %ecx + movl %ecx, -240(%ebp) # 4-byte Spill + sbbl %ecx, -152(%ebp) # 4-byte Folded Spill + movl 112(%esi), %ecx + movl %ecx, -244(%ebp) # 4-byte Spill + sbbl %ecx, -148(%ebp) # 4-byte Folded Spill + movl 116(%esi), %ecx + movl %ecx, -248(%ebp) # 4-byte Spill + sbbl %ecx, -144(%ebp) # 4-byte Folded Spill + movl 120(%esi), %ecx + movl %ecx, -252(%ebp) # 4-byte Spill + sbbl %ecx, -172(%ebp) # 4-byte Folded Spill + movl 124(%esi), %ecx + movl %ecx, -256(%ebp) # 4-byte Spill + sbbl %ecx, -176(%ebp) # 4-byte Folded Spill + sbbl $0, %eax + movl %eax, -168(%ebp) # 4-byte Spill + movl -180(%ebp), %eax # 4-byte Reload + addl -200(%ebp), %eax # 4-byte Folded Reload + adcl -204(%ebp), %edi # 4-byte Folded Reload + movl %eax, 32(%esi) + movl -184(%ebp), %eax # 4-byte Reload + adcl -208(%ebp), %eax # 4-byte Folded Reload + movl %edi, 36(%esi) + movl -192(%ebp), %ecx # 4-byte Reload + adcl -212(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + adcl -216(%ebp), %ebx # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl -196(%ebp), %ecx # 4-byte Reload + adcl -220(%ebp), %ecx # 4-byte Folded Reload + movl %ebx, 48(%esi) + movl -188(%ebp), %eax # 4-byte Reload + adcl -224(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %edx, %ecx + adcl -228(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 56(%esi) + movl -164(%ebp), %eax # 4-byte Reload + adcl -260(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 60(%esi) + movl -160(%ebp), %ecx # 4-byte Reload + adcl -264(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 64(%esi) + movl -156(%ebp), %eax # 4-byte Reload + adcl -268(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 68(%esi) + movl -152(%ebp), %ecx # 4-byte Reload + adcl -272(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 72(%esi) + movl -148(%ebp), %eax # 4-byte Reload + adcl -276(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 76(%esi) + movl -144(%ebp), %ecx # 4-byte Reload + adcl -280(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 80(%esi) + movl -172(%ebp), %eax # 4-byte Reload + adcl -284(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 84(%esi) + movl -176(%ebp), %ecx # 4-byte Reload + adcl -288(%ebp), %ecx # 4-byte Folded Reload + movl %eax, 88(%esi) + movl -168(%ebp), %eax # 4-byte Reload + adcl -292(%ebp), %eax # 4-byte Folded Reload + movl %ecx, 92(%esi) + movl %eax, 96(%esi) + movl -232(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 100(%esi) + movl -236(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 104(%esi) + movl -240(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 108(%esi) + movl -244(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 112(%esi) + movl -248(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 116(%esi) + movl -252(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 120(%esi) + movl -256(%ebp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 124(%esi) + addl $300, %esp # imm = 0x12C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end241: + .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L + + .globl mcl_fp_mont16L + .align 16, 0x90 + .type mcl_fp_mont16L,@function +mcl_fp_mont16L: # @mcl_fp_mont16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2428, %esp # imm = 0x97C + calll .L242$pb +.L242$pb: + popl %ebx +.Ltmp53: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx + movl 2460(%esp), %eax + movl -4(%eax), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2360(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 2360(%esp), %ebp + movl 2364(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2424(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2420(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 2416(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2412(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2408(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2380(%esp), %edi + movl 2376(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2368(%esp), %esi + movl %eax, (%esp) + leal 2288(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + addl 2288(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 2296(%esp), %esi + movl %esi, %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2308(%esp), %edi + movl %edi, %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2344(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 2456(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2216(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl 112(%esp), %ecx # 4-byte Reload + addl 2216(%esp), %ecx + adcl 2220(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2232(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + adcl 2236(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2280(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2144(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + addl 2144(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2164(%esp), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 2168(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2208(%esp), %esi + adcl $0, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2072(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2072(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2076(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2092(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 2108(%esp), %ebp + movl 92(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2116(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2128(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2000(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %eax + movl 112(%esp), %ecx # 4-byte Reload + addl 2000(%esp), %ecx + movl 100(%esp), %ecx # 4-byte Reload + adcl 2004(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2008(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 2036(%esp), %ebp + movl %ebp, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2044(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2052(%esp), %ebp + movl 124(%esp), %esi # 4-byte Reload + adcl 2056(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1928(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 1928(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1936(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1956(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1976(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 1980(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1856(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1856(%esp), %esi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1860(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1864(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1868(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1872(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1876(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1880(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1884(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1888(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1892(%esp), %esi + adcl 1896(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 1900(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1904(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1908(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1912(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1784(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1784(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1804(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1816(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1824(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1712(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %esi + movl %esi, %ecx + addl 1712(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1732(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1740(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 1744(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1764(%esp), %ebp + movl 104(%esp), %esi # 4-byte Reload + adcl 1768(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1640(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1640(%esp), %ecx + movl 60(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1648(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1664(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1668(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 1688(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + adcl 1692(%esp), %esi + movl %esi, %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1696(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1704(%esp), %esi + sbbl %eax, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1568(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 80(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1568(%esp), %ebp + movl 60(%esp), %ecx # 4-byte Reload + adcl 1572(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1576(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1580(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1584(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 84(%esp), %ecx # 4-byte Reload + adcl 1592(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1596(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1600(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1604(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1608(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1612(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1616(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1620(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1624(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1628(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 1632(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1496(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 60(%esp), %ecx # 4-byte Reload + addl 1496(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1500(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1504(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1516(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1520(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1540(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1424(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + movl 60(%esp), %eax # 4-byte Reload + addl 1424(%esp), %eax + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1432(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1444(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1472(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1480(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 1484(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1488(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ebp + movl 2456(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1352(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1352(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1360(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1396(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 1404(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1412(%esp), %esi + adcl 1416(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %ebp + movl %ebp, %eax + addl 1280(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1284(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1288(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1312(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1336(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1340(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1344(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, %edi + movl 2456(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl 2452(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1208(%esp), %ecx + adcl 1212(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1216(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1272(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 1136(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1164(%esp), %edi + movl 124(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1188(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1192(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1064(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1088(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 1092(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1116(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %ebp + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl %edi, %eax + andl $1, %eax + addl 992(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 996(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1000(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1008(%esp), %edi + movl 116(%esp), %ebp # 4-byte Reload + adcl 1012(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1020(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1028(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1032(%esp), %esi + movl 100(%esp), %ecx # 4-byte Reload + adcl 1036(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1040(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1056(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 920(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 928(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 932(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 936(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 956(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 968(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %esi + movl %esi, %eax + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + andl $1, %edi + movl %edi, %ecx + addl 848(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 856(%esp), %edi + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 868(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 896(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 776(%esp), %ecx + adcl 780(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 784(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 792(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 800(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 84(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 704(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 712(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 728(%esp), %esi + movl 104(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 752(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + addl 632(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 640(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 664(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 676(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 680(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 92(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 560(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 592(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 612(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 488(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 508(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 516(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 520(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 536(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 96(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 416(%esp), %edi + movl 116(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + adcl 436(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 448(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + addl 344(%esp), %ecx + movl 120(%esp), %ebp # 4-byte Reload + adcl 348(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 356(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 360(%esp), %edi + adcl 364(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 56(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 2460(%esp), %edx + calll .LmulPv512x32 + movl 116(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 272(%esp), %esi + adcl 276(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 280(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 288(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 296(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 308(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2456(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 2452(%esp), %edx + calll .LmulPv512x32 + movl 120(%esp), %ecx # 4-byte Reload + addl 200(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 212(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 220(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 232(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 244(%esp), %edi + movl 76(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 56(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 2460(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + andl $1, %ebp + addl 128(%esp), %esi + movl 104(%esp), %ebx # 4-byte Reload + movl 124(%esp), %eax # 4-byte Reload + adcl 132(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 140(%esp), %ebx + movl %ebx, 104(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 156(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 172(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 192(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl $0, %ebp + movl %eax, %edx + movl 2460(%esp), %edi + subl (%edi), %edx + movl %ecx, %eax + sbbl 4(%edi), %eax + movl %ebx, %ecx + sbbl 8(%edi), %ecx + movl 112(%esp), %ebx # 4-byte Reload + sbbl 12(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl 16(%edi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + sbbl 24(%edi), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 28(%edi), %esi + movl %esi, 28(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 32(%edi), %esi + movl %esi, 32(%esp) # 4-byte Spill + movl 60(%esp), %esi # 4-byte Reload + sbbl 36(%edi), %esi + movl %esi, 36(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 40(%edi), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 44(%edi), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + sbbl 52(%edi), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + sbbl 56(%edi), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + sbbl 60(%edi), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + sbbl $0, %ebp + andl $1, %ebp + movl %ebp, %ebx + jne .LBB242_2 +# BB#1: + movl %edx, %edi +.LBB242_2: + movl 2448(%esp), %edx + movl %edi, (%edx) + testb %bl, %bl + movl 108(%esp), %edi # 4-byte Reload + jne .LBB242_4 +# BB#3: + movl %eax, %edi +.LBB242_4: + movl %edi, 4(%edx) + jne .LBB242_6 +# BB#5: + movl %ecx, 104(%esp) # 4-byte Spill +.LBB242_6: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 8(%edx) + jne .LBB242_8 +# BB#7: + movl 12(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill +.LBB242_8: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 12(%edx) + movl 100(%esp), %eax # 4-byte Reload + jne .LBB242_10 +# BB#9: + movl 16(%esp), %eax # 4-byte Reload +.LBB242_10: + movl %eax, 16(%edx) + movl 88(%esp), %eax # 4-byte Reload + jne .LBB242_12 +# BB#11: + movl 20(%esp), %eax # 4-byte Reload +.LBB242_12: + movl %eax, 20(%edx) + jne .LBB242_14 +# BB#13: + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 80(%esp) # 4-byte Spill +.LBB242_14: + movl 80(%esp), %eax # 4-byte Reload + movl %eax, 24(%edx) + movl 72(%esp), %eax # 4-byte Reload + jne .LBB242_16 +# BB#15: + movl 28(%esp), %eax # 4-byte Reload +.LBB242_16: + movl %eax, 28(%edx) + movl 64(%esp), %eax # 4-byte Reload + jne .LBB242_18 +# BB#17: + movl 32(%esp), %eax # 4-byte Reload +.LBB242_18: + movl %eax, 32(%edx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB242_20 +# BB#19: + movl 36(%esp), %eax # 4-byte Reload +.LBB242_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB242_22 +# BB#21: + movl 40(%esp), %eax # 4-byte Reload +.LBB242_22: + movl %eax, 40(%edx) + movl 76(%esp), %eax # 4-byte Reload + jne .LBB242_24 +# BB#23: + movl 44(%esp), %eax # 4-byte Reload +.LBB242_24: + movl %eax, 44(%edx) + movl 84(%esp), %eax # 4-byte Reload + jne .LBB242_26 +# BB#25: + movl 48(%esp), %eax # 4-byte Reload +.LBB242_26: + movl %eax, 48(%edx) + movl 92(%esp), %eax # 4-byte Reload + jne .LBB242_28 +# BB#27: + movl 52(%esp), %eax # 4-byte Reload +.LBB242_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + jne .LBB242_30 +# BB#29: + movl 56(%esp), %eax # 4-byte Reload +.LBB242_30: + movl %eax, 56(%edx) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB242_32 +# BB#31: + movl 120(%esp), %eax # 4-byte Reload +.LBB242_32: + movl %eax, 60(%edx) + addl $2428, %esp # imm = 0x97C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end242: + .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L + + .globl mcl_fp_montNF16L + .align 16, 0x90 + .type mcl_fp_montNF16L,@function +mcl_fp_montNF16L: # @mcl_fp_montNF16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2412, %esp # imm = 0x96C + calll .L243$pb +.L243$pb: + popl %ebx +.Ltmp54: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx + movl 2444(%esp), %eax + movl -4(%eax), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2344(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2344(%esp), %edi + movl 2348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2408(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2404(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2400(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2396(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2392(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2388(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2384(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2380(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2376(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 2372(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 2368(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2364(%esp), %ebp + movl 2360(%esp), %esi + movl 2356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2352(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2272(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2272(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 2288(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2292(%esp), %ebp + movl 64(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 2308(%esp), %edi + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2324(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 2328(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 2332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2200(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2264(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 2200(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2208(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2212(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 2216(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 2232(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2236(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2252(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2260(%esp), %esi + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %edi + movl %edi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2128(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 2128(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 2132(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %edi # 4-byte Reload + adcl 2156(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2164(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2188(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2192(%esp), %esi + movl 2440(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2056(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 2120(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 2056(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + adcl 2080(%esp), %edi + movl %edi, %ebp + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2088(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 2112(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + adcl 2116(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1984(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1984(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 2004(%esp), %edi + adcl 2008(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2016(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 2036(%esp), %ebp + movl 100(%esp), %esi # 4-byte Reload + adcl 2040(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 2044(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1912(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1976(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1912(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1916(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1920(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1928(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1936(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 1948(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1960(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 1964(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1968(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1840(%esp), %ecx + movl 2444(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + addl 1840(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1848(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1864(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1876(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + movl 92(%esp), %ebp # 4-byte Reload + adcl 1884(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1892(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1904(%esp), %esi + movl 2440(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1768(%esp), %ecx + movl 2436(%esp), %eax + movl %eax, %edx + calll .LmulPv512x32 + movl 1832(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + addl 1768(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1776(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1780(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1784(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1788(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1792(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + adcl 1808(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1820(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1828(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 80(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1696(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1696(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 1700(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1704(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1716(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 1720(%esp), %edi + movl 72(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1736(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1624(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1688(%esp), %edx + movl 68(%esp), %ecx # 4-byte Reload + addl 1624(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 1640(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 1644(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 1648(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1652(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1656(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1660(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 1668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1672(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1676(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1680(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1552(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1552(%esp), %esi + movl 64(%esp), %esi # 4-byte Reload + adcl 1556(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1576(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1580(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 1592(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1600(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1616(%esp), %edi + movl 2440(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1480(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1544(%esp), %eax + addl 1480(%esp), %esi + movl 60(%esp), %edx # 4-byte Reload + adcl 1484(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %edx # 4-byte Reload + adcl 1488(%esp), %edx + movl %edx, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1504(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1508(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + adcl 1524(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1532(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 1536(%esp), %ebp + adcl 1540(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl %eax, %edi + adcl $0, %edi + movl %esi, %eax + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1408(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1408(%esp), %esi + movl 60(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %esi # 4-byte Reload + adcl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1432(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1436(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1460(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1464(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 1468(%esp), %ebp + adcl 1472(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1336(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1400(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1336(%esp), %ecx + adcl 1340(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1344(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1348(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1352(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1356(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1360(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1364(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1368(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1372(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1376(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 1380(%esp), %edi + movl 76(%esp), %esi # 4-byte Reload + adcl 1384(%esp), %esi + movl 80(%esp), %edx # 4-byte Reload + adcl 1388(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl 1392(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1396(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1264(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1264(%esp), %ebp + movl 48(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1288(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1308(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + adcl 1312(%esp), %esi + movl %esi, %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1192(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1256(%esp), %eax + movl 48(%esp), %ecx # 4-byte Reload + addl 1192(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1196(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1200(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + adcl 1204(%esp), %esi + movl 88(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 1212(%esp), %edi + movl 92(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1224(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1228(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1232(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 48(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1120(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 1120(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 1132(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 1140(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1144(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1176(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1048(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 1112(%esp), %edx + movl 52(%esp), %ecx # 4-byte Reload + addl 1048(%esp), %ecx + movl 56(%esp), %esi # 4-byte Reload + adcl 1052(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1068(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1072(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1076(%esp), %ebp + movl 108(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1100(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 52(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 976(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 976(%esp), %edi + adcl 980(%esp), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1000(%esp), %edi + adcl 1004(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1008(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 1016(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 904(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 968(%esp), %ecx + movl 56(%esp), %eax # 4-byte Reload + addl 904(%esp), %eax + movl 72(%esp), %edx # 4-byte Reload + adcl 908(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 912(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 920(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + adcl 924(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 928(%esp), %edi + adcl 932(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + adcl 940(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 944(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 960(%esp), %ebp + movl 52(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl %eax, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 832(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 832(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 856(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 872(%esp), %esi + movl 68(%esp), %edi # 4-byte Reload + adcl 876(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 888(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 892(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 760(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 824(%esp), %edx + movl 72(%esp), %ecx # 4-byte Reload + addl 760(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 796(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 800(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 808(%esp), %edi + movl 48(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + adcl 816(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 688(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 688(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 732(%esp), %ebp + adcl 736(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 616(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 680(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 616(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 624(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 640(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 656(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 672(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 544(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 544(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 552(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 560(%esp), %edi + movl 108(%esp), %esi # 4-byte Reload + adcl 564(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 600(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 472(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 536(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 472(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 484(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + adcl 488(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %ebp # 4-byte Reload + adcl 496(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 400(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 400(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 412(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 420(%esp), %edi + adcl 424(%esp), %ebp + movl %ebp, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %ebp # 4-byte Reload + adcl 444(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 328(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 392(%esp), %edx + movl 92(%esp), %ecx # 4-byte Reload + addl 328(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 352(%esp), %esi + movl 68(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 368(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 92(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 44(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 256(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 256(%esp), %ebp + movl 104(%esp), %edi # 4-byte Reload + adcl 260(%esp), %edi + movl 100(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 268(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 280(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 284(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2440(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 184(%esp), %ecx + movl 2436(%esp), %edx + calll .LmulPv512x32 + movl 248(%esp), %edx + movl %edi, %ecx + addl 184(%esp), %ecx + movl 100(%esp), %edi # 4-byte Reload + adcl 188(%esp), %edi + adcl 192(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 84(%esp), %ebp # 4-byte Reload + adcl 196(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 204(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 208(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 112(%esp), %ecx + movl 2444(%esp), %edx + calll .LmulPv512x32 + addl 112(%esp), %esi + movl %edi, %eax + adcl 116(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 120(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 124(%esp), %ebp + movl 76(%esp), %ecx # 4-byte Reload + adcl 128(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl %ecx, %ebx + movl 80(%esp), %ecx # 4-byte Reload + adcl 132(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 136(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 140(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 144(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 48(%esp), %ecx # 4-byte Reload + adcl 148(%esp), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 152(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 156(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 160(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 168(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl %eax, %edx + movl 2444(%esp), %esi + subl (%esi), %edx + sbbl 4(%esi), %edi + movl %ebp, %ecx + sbbl 8(%esi), %ecx + movl %ebx, %eax + sbbl 12(%esi), %eax + movl 80(%esp), %ebx # 4-byte Reload + sbbl 16(%esi), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 68(%esp), %ebx # 4-byte Reload + sbbl 20(%esi), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 64(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 60(%esp), %ebx # 4-byte Reload + sbbl 28(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 48(%esp), %ebx # 4-byte Reload + sbbl 32(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %ebx # 4-byte Reload + sbbl 36(%esi), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %ebx # 4-byte Reload + sbbl 40(%esi), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl 72(%esp), %ebx # 4-byte Reload + sbbl 44(%esi), %ebx + movl %ebx, 32(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 48(%esi), %ebx + movl %ebx, 36(%esp) # 4-byte Spill + movl 96(%esp), %ebx # 4-byte Reload + sbbl 52(%esi), %ebx + movl %ebx, 40(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 56(%esi), %ebx + movl %ebx, 44(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 60(%esi), %ebx + movl %ebx, 84(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + testl %ebx, %ebx + js .LBB243_2 +# BB#1: + movl %edx, %esi +.LBB243_2: + movl 2432(%esp), %edx + movl %esi, (%edx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB243_4 +# BB#3: + movl %edi, %esi +.LBB243_4: + movl %esi, 4(%edx) + js .LBB243_6 +# BB#5: + movl %ecx, %ebp +.LBB243_6: + movl %ebp, 8(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB243_8 +# BB#7: + movl %eax, %ecx +.LBB243_8: + movl %ecx, 12(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB243_10 +# BB#9: + movl 4(%esp), %eax # 4-byte Reload +.LBB243_10: + movl %eax, 16(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB243_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB243_12: + movl %eax, 20(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB243_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB243_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB243_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB243_16: + movl %eax, 28(%edx) + movl 48(%esp), %eax # 4-byte Reload + js .LBB243_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB243_18: + movl %eax, 32(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB243_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB243_20: + movl %eax, 36(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB243_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB243_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB243_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB243_24: + movl %eax, 44(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB243_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB243_26: + movl %eax, 48(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB243_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB243_28: + movl %eax, 52(%edx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB243_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB243_30: + movl %eax, 56(%edx) + movl 104(%esp), %eax # 4-byte Reload + js .LBB243_32 +# BB#31: + movl 84(%esp), %eax # 4-byte Reload +.LBB243_32: + movl %eax, 60(%edx) + addl $2412, %esp # imm = 0x96C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end243: + .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L + + .globl mcl_fp_montRed16L + .align 16, 0x90 + .type mcl_fp_montRed16L,@function +mcl_fp_montRed16L: # @mcl_fp_montRed16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L244$pb +.L244$pb: + popl %eax +.Ltmp55: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1384(%esp), %edx + movl -4(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1380(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 112(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 120(%esp) # 4-byte Spill + imull %eax, %ebx + movl 124(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 152(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 84(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 76(%ecx), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 68(%ecx), %edi + movl %edi, 204(%esp) # 4-byte Spill + movl 64(%ecx), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 60(%ecx), %edi + movl %edi, 180(%esp) # 4-byte Spill + movl 56(%ecx), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 40(%ecx), %ebp + movl 36(%ecx), %edi + movl 32(%ecx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 20(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 16(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 8(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1288(%esp), %ecx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %eax # 4-byte Reload + addl 1288(%esp), %eax + movl 120(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1300(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + adcl 1328(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + movl 196(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + sbbl %eax, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + movl 112(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 1216(%esp), %esi + movl 76(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl 80(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %esi # 4-byte Reload + adcl 1260(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 1264(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 132(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1144(%esp), %ebp + movl 80(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1184(%esp), %esi + movl %esi, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1188(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1072(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 148(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 1000(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 92(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + movl 172(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 928(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 932(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, %ebp + movl %ebp, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 176(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 172(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 144(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 100(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 856(%esp), %edi + movl 96(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 176(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 144(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 784(%esp), %esi + movl 104(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + adcl $0, %edi + movl %edi, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 156(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 712(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl 752(%esp), %ebp + movl %ebp, 192(%esp) # 4-byte Spill + movl 196(%esp), %edi # 4-byte Reload + adcl 756(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + movl %ecx, %esi + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %esi # 4-byte Reload + adcl 668(%esp), %esi + movl 204(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1384(%esp), %eax + movl %eax, %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + adcl 592(%esp), %esi + movl %esi, 200(%esp) # 4-byte Spill + movl 204(%esp), %esi # 4-byte Reload + adcl 596(%esp), %esi + movl 192(%esp), %eax # 4-byte Reload + adcl 600(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 632(%esp), %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 496(%esp), %edi + movl 136(%esp), %ecx # 4-byte Reload + adcl 500(%esp), %ecx + movl 160(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 164(%esp), %edi # 4-byte Reload + adcl 508(%esp), %edi + adcl 512(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 520(%esp), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 528(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 532(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 424(%esp), %esi + movl 160(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + adcl 432(%esp), %edi + movl %edi, 164(%esp) # 4-byte Spill + movl 180(%esp), %ecx # 4-byte Reload + adcl 436(%esp), %ecx + movl %ecx, 180(%esp) # 4-byte Spill + movl 200(%esp), %ecx # 4-byte Reload + adcl 440(%esp), %ecx + movl %ecx, 200(%esp) # 4-byte Spill + movl 204(%esp), %ecx # 4-byte Reload + adcl 444(%esp), %ecx + movl %ecx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 448(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %ecx # 4-byte Reload + adcl 452(%esp), %ecx + movl %ecx, 196(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl 184(%esp), %ecx # 4-byte Reload + adcl 460(%esp), %ecx + movl %ecx, 184(%esp) # 4-byte Spill + movl 188(%esp), %ecx # 4-byte Reload + adcl 464(%esp), %ecx + movl %ecx, 188(%esp) # 4-byte Spill + movl 168(%esp), %ecx # 4-byte Reload + adcl 468(%esp), %ecx + movl %ecx, 168(%esp) # 4-byte Spill + movl 176(%esp), %ecx # 4-byte Reload + adcl 472(%esp), %ecx + movl %ecx, 176(%esp) # 4-byte Spill + movl 172(%esp), %ecx # 4-byte Reload + adcl 476(%esp), %ecx + movl %ecx, 172(%esp) # 4-byte Spill + movl 152(%esp), %ecx # 4-byte Reload + adcl 480(%esp), %ecx + movl %ecx, 152(%esp) # 4-byte Spill + movl 156(%esp), %ecx # 4-byte Reload + adcl 484(%esp), %ecx + movl %ecx, 156(%esp) # 4-byte Spill + movl 144(%esp), %ecx # 4-byte Reload + adcl 488(%esp), %ecx + movl %ecx, 144(%esp) # 4-byte Spill + movl 132(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %eax, %esi + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 352(%esp), %esi + movl 164(%esp), %esi # 4-byte Reload + adcl 356(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 364(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 380(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl 416(%esp), %edi + movl %edi, 132(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 112(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 100(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 280(%esp), %esi + movl 180(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 304(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 308(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + adcl 316(%esp), %esi + movl 176(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 344(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 124(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 112(%esp) # 4-byte Folded Spill + movl 100(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1384(%esp), %edx + movl 116(%esp), %ebx # 4-byte Reload + calll .LmulPv512x32 + addl 208(%esp), %ebp + movl 200(%esp), %edx # 4-byte Reload + adcl 212(%esp), %edx + movl %edx, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 148(%esp), %ebp # 4-byte Reload + adcl 228(%esp), %ebp + movl %ebp, 148(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl %eax, %ebx + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 240(%esp), %esi + movl %esi, 168(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 272(%esp), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl $0, %edi + movl %edx, %eax + subl 24(%esp), %edx # 4-byte Folded Reload + movl 204(%esp), %esi # 4-byte Reload + sbbl 12(%esp), %esi # 4-byte Folded Reload + sbbl 16(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 20(%esp), %eax # 4-byte Folded Reload + sbbl 28(%esp), %ebp # 4-byte Folded Reload + sbbl 32(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 100(%esp) # 4-byte Spill + movl 188(%esp), %ebx # 4-byte Reload + sbbl 36(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 104(%esp) # 4-byte Spill + movl 168(%esp), %ebx # 4-byte Reload + sbbl 40(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 108(%esp) # 4-byte Spill + movl 176(%esp), %ebx # 4-byte Reload + sbbl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 112(%esp) # 4-byte Spill + movl 172(%esp), %ebx # 4-byte Reload + sbbl 48(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 116(%esp) # 4-byte Spill + movl 152(%esp), %ebx # 4-byte Reload + sbbl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 120(%esp) # 4-byte Spill + movl 156(%esp), %ebx # 4-byte Reload + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 136(%esp) # 4-byte Spill + movl 144(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 160(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 180(%esp) # 4-byte Spill + sbbl $0, %edi + andl $1, %edi + movl %edi, %ebx + jne .LBB244_2 +# BB#1: + movl %edx, 200(%esp) # 4-byte Spill +.LBB244_2: + movl 1376(%esp), %edx + movl 200(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + testb %bl, %bl + jne .LBB244_4 +# BB#3: + movl %esi, 204(%esp) # 4-byte Spill +.LBB244_4: + movl 204(%esp), %esi # 4-byte Reload + movl %esi, 4(%edx) + movl 192(%esp), %esi # 4-byte Reload + jne .LBB244_6 +# BB#5: + movl %ecx, %esi +.LBB244_6: + movl %esi, 8(%edx) + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB244_8 +# BB#7: + movl %eax, %ecx +.LBB244_8: + movl %ecx, 12(%edx) + movl 128(%esp), %esi # 4-byte Reload + movl 148(%esp), %eax # 4-byte Reload + jne .LBB244_10 +# BB#9: + movl %ebp, %eax +.LBB244_10: + movl %eax, 16(%edx) + movl 124(%esp), %ecx # 4-byte Reload + movl 176(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB244_12 +# BB#11: + movl 100(%esp), %ebp # 4-byte Reload +.LBB244_12: + movl %ebp, 20(%edx) + movl 152(%esp), %ebp # 4-byte Reload + movl 188(%esp), %ebx # 4-byte Reload + jne .LBB244_14 +# BB#13: + movl 104(%esp), %ebx # 4-byte Reload +.LBB244_14: + movl %ebx, 24(%edx) + movl 156(%esp), %ebx # 4-byte Reload + movl 168(%esp), %edi # 4-byte Reload + jne .LBB244_16 +# BB#15: + movl 108(%esp), %edi # 4-byte Reload +.LBB244_16: + movl %edi, 28(%edx) + movl 144(%esp), %edi # 4-byte Reload + jne .LBB244_18 +# BB#17: + movl 112(%esp), %eax # 4-byte Reload +.LBB244_18: + movl %eax, 32(%edx) + jne .LBB244_20 +# BB#19: + movl 116(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB244_20: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 36(%edx) + jne .LBB244_22 +# BB#21: + movl 120(%esp), %ebp # 4-byte Reload +.LBB244_22: + movl %ebp, 40(%edx) + movl 132(%esp), %eax # 4-byte Reload + jne .LBB244_24 +# BB#23: + movl 136(%esp), %ebx # 4-byte Reload +.LBB244_24: + movl %ebx, 44(%edx) + jne .LBB244_26 +# BB#25: + movl 140(%esp), %edi # 4-byte Reload +.LBB244_26: + movl %edi, 48(%edx) + jne .LBB244_28 +# BB#27: + movl 160(%esp), %eax # 4-byte Reload +.LBB244_28: + movl %eax, 52(%edx) + jne .LBB244_30 +# BB#29: + movl 164(%esp), %esi # 4-byte Reload +.LBB244_30: + movl %esi, 56(%edx) + jne .LBB244_32 +# BB#31: + movl 180(%esp), %ecx # 4-byte Reload +.LBB244_32: + movl %ecx, 60(%edx) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end244: + .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L + + .globl mcl_fp_addPre16L + .align 16, 0x90 + .type mcl_fp_addPre16L,@function +mcl_fp_addPre16L: # @mcl_fp_addPre16L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl %edx, 52(%edi) + movl %esi, 56(%edi) + movl 60(%eax), %eax + movl 60(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 60(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end245: + .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L + + .globl mcl_fp_subPre16L + .align 16, 0x90 + .type mcl_fp_subPre16L,@function +mcl_fp_subPre16L: # @mcl_fp_subPre16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl %esi, 52(%ebx) + movl %edi, 56(%ebx) + movl 60(%edx), %edx + movl 60(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 60(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end246: + .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L + + .globl mcl_fp_shr1_16L + .align 16, 0x90 + .type mcl_fp_shr1_16L,@function +mcl_fp_shr1_16L: # @mcl_fp_shr1_16L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %eax + shrdl $1, %eax, %edx + movl %edx, 56(%ecx) + shrl %eax + movl %eax, 60(%ecx) + popl %esi + retl +.Lfunc_end247: + .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L + + .globl mcl_fp_add16L + .align 16, 0x90 + .type mcl_fp_add16L,@function +mcl_fp_add16L: # @mcl_fp_add16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $56, %esp + movl 84(%esp), %edx + movl (%edx), %esi + movl 4(%edx), %ebp + movl 80(%esp), %ecx + addl (%ecx), %esi + movl %esi, %ebx + adcl 4(%ecx), %ebp + movl 8(%edx), %eax + adcl 8(%ecx), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 12(%ecx), %esi + movl 16(%ecx), %edi + adcl 12(%edx), %esi + movl %esi, 48(%esp) # 4-byte Spill + adcl 16(%edx), %edi + movl %edi, 12(%esp) # 4-byte Spill + movl 20(%ecx), %eax + adcl 20(%edx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%ecx), %eax + adcl 24(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%ecx), %eax + adcl 28(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%ecx), %eax + adcl 32(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%ecx), %eax + adcl 36(%edx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%ecx), %eax + adcl 40(%edx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%ecx), %eax + adcl 44(%edx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%ecx), %eax + adcl 48(%edx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%ecx), %eax + adcl 52(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 56(%ecx), %esi + adcl 56(%edx), %esi + movl 60(%ecx), %ecx + adcl 60(%edx), %ecx + movl 76(%esp), %edx + movl %ebx, (%edx) + movl %ebx, %eax + movl %ebp, 4(%edx) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%edx) + movl 48(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%edx) + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) + sbbl %ebx, %ebx + andl $1, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 4(%edi), %ebp + movl %ebp, (%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 8(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, %ebp + sbbl 56(%edi), %esi + sbbl 60(%edi), %ecx + sbbl $0, %ebx + testb $1, %bl + jne .LBB248_2 +# BB#1: # %nocarry + movl 4(%esp), %edi # 4-byte Reload + movl %edi, (%edx) + movl (%esp), %edi # 4-byte Reload + movl %edi, 4(%edx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 8(%edx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 12(%edx) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 16(%edx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%edx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%edx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%edx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%edx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%edx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%edx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%edx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 48(%edx) + movl %ebp, 52(%edx) + movl %esi, 56(%edx) + movl %ecx, 60(%edx) +.LBB248_2: # %carry + addl $56, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end248: + .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L + + .globl mcl_fp_addNF16L + .align 16, 0x90 + .type mcl_fp_addNF16L,@function +mcl_fp_addNF16L: # @mcl_fp_addNF16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $124, %esp + movl 152(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %ecx + movl 148(%esp), %esi + addl (%esi), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 4(%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 60(%edx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 56(%edx), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 52(%edx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 48(%edx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 44(%edx), %edi + movl 40(%edx), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 36(%edx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 32(%edx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 28(%edx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 24(%edx), %eax + movl 20(%edx), %ebp + movl 16(%edx), %ebx + movl 12(%edx), %ecx + movl 8(%edx), %edx + adcl 8(%esi), %edx + movl %edx, 60(%esp) # 4-byte Spill + adcl 12(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 16(%esi), %ebx + movl %ebx, 68(%esp) # 4-byte Spill + adcl 20(%esi), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + adcl 24(%esi), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 44(%esi), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 52(%esi), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 56(%esi), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 60(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 156(%esp), %edi + movl 80(%esp), %esi # 4-byte Reload + subl (%edi), %esi + movl 84(%esp), %eax # 4-byte Reload + sbbl 4(%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 8(%edi), %edx + movl %edx, 4(%esp) # 4-byte Spill + sbbl 12(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 16(%edi), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + sbbl 20(%edi), %ebp + movl %ebp, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + sbbl 24(%edi), %ebp + movl %ebp, 20(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 44(%edi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, %ebx + sbbl 56(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + movl 112(%esp), %ebx # 4-byte Reload + sbbl 60(%edi), %ebx + movl 80(%esp), %edi # 4-byte Reload + movl %ebx, 56(%esp) # 4-byte Spill + testl %ebx, %ebx + js .LBB249_2 +# BB#1: + movl %esi, %edi +.LBB249_2: + movl 144(%esp), %ebx + movl %edi, (%ebx) + movl 84(%esp), %edx # 4-byte Reload + js .LBB249_4 +# BB#3: + movl (%esp), %edx # 4-byte Reload +.LBB249_4: + movl %edx, 4(%ebx) + movl 68(%esp), %edx # 4-byte Reload + movl 60(%esp), %eax # 4-byte Reload + js .LBB249_6 +# BB#5: + movl 4(%esp), %eax # 4-byte Reload +.LBB249_6: + movl %eax, 8(%ebx) + movl 100(%esp), %eax # 4-byte Reload + movl 88(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + js .LBB249_8 +# BB#7: + movl 8(%esp), %esi # 4-byte Reload +.LBB249_8: + movl %esi, 12(%ebx) + movl 108(%esp), %esi # 4-byte Reload + js .LBB249_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB249_10: + movl %edx, 16(%ebx) + movl 112(%esp), %edi # 4-byte Reload + movl 104(%esp), %ebp # 4-byte Reload + js .LBB249_12 +# BB#11: + movl 16(%esp), %edx # 4-byte Reload + movl %edx, 72(%esp) # 4-byte Spill +.LBB249_12: + movl 72(%esp), %edx # 4-byte Reload + movl %edx, 20(%ebx) + js .LBB249_14 +# BB#13: + movl 20(%esp), %ecx # 4-byte Reload +.LBB249_14: + movl %ecx, 24(%ebx) + js .LBB249_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB249_16: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%ebx) + js .LBB249_18 +# BB#17: + movl 28(%esp), %eax # 4-byte Reload +.LBB249_18: + movl %eax, 32(%ebx) + movl 96(%esp), %ecx # 4-byte Reload + js .LBB249_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill +.LBB249_20: + movl 120(%esp), %eax # 4-byte Reload + movl %eax, 36(%ebx) + js .LBB249_22 +# BB#21: + movl 36(%esp), %ebp # 4-byte Reload +.LBB249_22: + movl %ebp, 40(%ebx) + movl 76(%esp), %eax # 4-byte Reload + js .LBB249_24 +# BB#23: + movl 40(%esp), %eax # 4-byte Reload +.LBB249_24: + movl %eax, 44(%ebx) + movl 92(%esp), %eax # 4-byte Reload + js .LBB249_26 +# BB#25: + movl 44(%esp), %esi # 4-byte Reload +.LBB249_26: + movl %esi, 48(%ebx) + js .LBB249_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB249_28: + movl %eax, 52(%ebx) + js .LBB249_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB249_30: + movl %ecx, 56(%ebx) + js .LBB249_32 +# BB#31: + movl 56(%esp), %edi # 4-byte Reload +.LBB249_32: + movl %edi, 60(%ebx) + addl $124, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end249: + .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L + + .globl mcl_fp_sub16L + .align 16, 0x90 + .type mcl_fp_sub16L,@function +mcl_fp_sub16L: # @mcl_fp_sub16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 84(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 88(%esp), %edi + subl (%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 44(%esi), %edx + sbbl 44(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 48(%esi), %ecx + sbbl 48(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 52(%esi), %eax + sbbl 52(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 56(%esi), %ebp + sbbl 56(%edi), %ebp + movl 60(%esi), %esi + sbbl 60(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 80(%esp), %ebx + movl 52(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl %edx, 44(%ebx) + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl %ebp, 56(%ebx) + movl %esi, 60(%ebx) + je .LBB250_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 92(%esp), %esi + movl 52(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 44(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 48(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 28(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 12(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl %eax, 52(%ebx) + movl 56(%esi), %eax + adcl %ebp, %eax + movl %eax, 56(%ebx) + movl 60(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ebx) +.LBB250_2: # %nocarry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end250: + .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L + + .globl mcl_fp_subNF16L + .align 16, 0x90 + .type mcl_fp_subNF16L,@function +mcl_fp_subNF16L: # @mcl_fp_subNF16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $104, %esp + movl 128(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 132(%esp), %edi + subl (%edi), %esi + movl %esi, 64(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 60(%ecx), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 36(%ecx), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 44(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 48(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 52(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 80(%esp) # 4-byte Spill + sarl $31, %eax + movl 136(%esp), %esi + movl 60(%esi), %ecx + andl %eax, %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 56(%esi), %ecx + andl %eax, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 52(%esi), %ecx + andl %eax, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 48(%esi), %ecx + andl %eax, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 44(%esi), %ecx + andl %eax, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 40(%esi), %ecx + andl %eax, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 36(%esi), %ecx + andl %eax, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 32(%esi), %ecx + andl %eax, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 28(%esi), %ecx + andl %eax, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 24(%esi), %ecx + andl %eax, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 20(%esi), %ebp + andl %eax, %ebp + movl 16(%esi), %ebx + andl %eax, %ebx + movl 12(%esi), %edi + andl %eax, %edi + movl 8(%esi), %edx + andl %eax, %edx + movl 4(%esi), %ecx + andl %eax, %ecx + andl (%esi), %eax + addl 64(%esp), %eax # 4-byte Folded Reload + adcl 68(%esp), %ecx # 4-byte Folded Reload + movl 124(%esp), %esi + movl %eax, (%esi) + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %ecx, 4(%esi) + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edx, 8(%esi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %edi, 12(%esi) + adcl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebx, 16(%esi) + movl (%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %ebp, 20(%esi) + movl 4(%esp), %ecx # 4-byte Reload + adcl 56(%esp), %ecx # 4-byte Folded Reload + movl %eax, 24(%esi) + movl 8(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %ecx, 28(%esi) + movl 12(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %eax, 32(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %ecx, 36(%esi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 84(%esp), %ecx # 4-byte Folded Reload + movl %eax, 40(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 44(%esi) + movl 28(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 48(%esi) + movl 36(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %ecx, 52(%esi) + movl %eax, 56(%esi) + movl 60(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esi) + addl $104, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end251: + .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L + + .globl mcl_fpDbl_add16L + .align 16, 0x90 + .type mcl_fpDbl_add16L,@function +mcl_fpDbl_add16L: # @mcl_fpDbl_add16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 144(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 140(%esp), %ebx + addl (%ebx), %esi + adcl 4(%ebx), %edx + movl 8(%ecx), %edi + adcl 8(%ebx), %edi + movl 12(%ebx), %ebp + movl 136(%esp), %eax + movl %esi, (%eax) + movl 16(%ebx), %esi + adcl 12(%ecx), %ebp + adcl 16(%ecx), %esi + movl %edx, 4(%eax) + movl 72(%ecx), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl %edi, 8(%eax) + movl 20(%ecx), %edx + movl %ebp, 12(%eax) + movl 20(%ebx), %edi + adcl %edx, %edi + movl 24(%ecx), %edx + movl %esi, 16(%eax) + movl 24(%ebx), %esi + adcl %edx, %esi + movl 28(%ecx), %edx + movl %edi, 20(%eax) + movl 28(%ebx), %edi + adcl %edx, %edi + movl 32(%ecx), %edx + movl %esi, 24(%eax) + movl 32(%ebx), %esi + adcl %edx, %esi + movl 36(%ecx), %edx + movl %edi, 28(%eax) + movl 36(%ebx), %edi + adcl %edx, %edi + movl 40(%ecx), %edx + movl %esi, 32(%eax) + movl 40(%ebx), %esi + adcl %edx, %esi + movl 44(%ecx), %edx + movl %edi, 36(%eax) + movl 44(%ebx), %edi + adcl %edx, %edi + movl 48(%ecx), %edx + movl %esi, 40(%eax) + movl 48(%ebx), %esi + adcl %edx, %esi + movl 52(%ecx), %edx + movl %edi, 44(%eax) + movl 52(%ebx), %edi + adcl %edx, %edi + movl 56(%ecx), %edx + movl %esi, 48(%eax) + movl 56(%ebx), %esi + adcl %edx, %esi + movl 60(%ecx), %edx + movl %edi, 52(%eax) + movl 60(%ebx), %ebp + adcl %edx, %ebp + movl 64(%ecx), %edx + movl %esi, 56(%eax) + movl 64(%ebx), %esi + adcl %edx, %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 68(%ecx), %edx + movl %ebp, 60(%eax) + movl 68(%ebx), %eax + adcl %edx, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 72(%ebx), %eax + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 76(%ecx), %ebp + movl 76(%ebx), %eax + adcl %ebp, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%ecx), %ebp + movl 80(%ebx), %eax + adcl %ebp, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %ebp + movl 84(%ebx), %eax + adcl %ebp, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 88(%ecx), %ebp + movl 88(%ebx), %eax + adcl %ebp, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 92(%ecx), %ebp + movl 92(%ebx), %eax + adcl %ebp, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 96(%ecx), %ebp + movl 96(%ebx), %eax + adcl %ebp, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 100(%ecx), %ebp + movl 100(%ebx), %edx + adcl %ebp, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 104(%ecx), %ebp + movl 104(%ebx), %edx + adcl %ebp, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%ecx), %ebp + movl 108(%ebx), %edx + adcl %ebp, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 112(%ecx), %edx + movl 112(%ebx), %ebp + adcl %edx, %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 116(%ecx), %edx + movl 116(%ebx), %esi + adcl %edx, %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 120(%ecx), %edx + movl 120(%ebx), %edi + adcl %edx, %edi + movl 124(%ecx), %ecx + movl 124(%ebx), %esi + adcl %ecx, %esi + sbbl %ecx, %ecx + andl $1, %ecx + movl 148(%esp), %edx + movl 72(%esp), %ebx # 4-byte Reload + subl (%edx), %ebx + movl %ebx, (%esp) # 4-byte Spill + movl 76(%esp), %ebx # 4-byte Reload + sbbl 4(%edx), %ebx + movl %ebx, 4(%esp) # 4-byte Spill + movl 80(%esp), %ebx # 4-byte Reload + sbbl 8(%edx), %ebx + movl %ebx, 8(%esp) # 4-byte Spill + movl 84(%esp), %ebx # 4-byte Reload + sbbl 12(%edx), %ebx + movl %ebx, 12(%esp) # 4-byte Spill + movl 104(%esp), %ebx # 4-byte Reload + sbbl 16(%edx), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 88(%esp), %ebx # 4-byte Reload + sbbl 20(%edx), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 108(%esp), %ebx # 4-byte Reload + sbbl 24(%edx), %ebx + movl %ebx, 24(%esp) # 4-byte Spill + movl 92(%esp), %ebx # 4-byte Reload + sbbl 28(%edx), %ebx + movl %ebx, 28(%esp) # 4-byte Spill + movl %eax, %ebx + sbbl 32(%edx), %ebx + movl 112(%esp), %eax # 4-byte Reload + sbbl 36(%edx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 40(%edx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 44(%edx), %eax + movl %eax, 40(%esp) # 4-byte Spill + sbbl 48(%edx), %ebp + movl %ebp, 44(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + movl %eax, %ebp + sbbl 52(%edx), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl %edi, %ebp + sbbl 56(%edx), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl %esi, %ebp + sbbl 60(%edx), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + sbbl $0, %ecx + andl $1, %ecx + jne .LBB252_2 +# BB#1: + movl %ebx, 64(%esp) # 4-byte Spill +.LBB252_2: + testb %cl, %cl + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_4 +# BB#3: + movl (%esp), %ecx # 4-byte Reload +.LBB252_4: + movl 136(%esp), %ebx + movl %ecx, 64(%ebx) + movl %esi, %ebp + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + movl 92(%esp), %ecx # 4-byte Reload + movl 88(%esp), %edx # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + jne .LBB252_6 +# BB#5: + movl 4(%esp), %esi # 4-byte Reload +.LBB252_6: + movl %esi, 68(%ebx) + movl 84(%esp), %esi # 4-byte Reload + movl 80(%esp), %eax # 4-byte Reload + jne .LBB252_8 +# BB#7: + movl 8(%esp), %eax # 4-byte Reload +.LBB252_8: + movl %eax, 72(%ebx) + movl 60(%esp), %eax # 4-byte Reload + jne .LBB252_10 +# BB#9: + movl 12(%esp), %esi # 4-byte Reload +.LBB252_10: + movl %esi, 76(%ebx) + jne .LBB252_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload + movl %esi, 104(%esp) # 4-byte Spill +.LBB252_12: + movl 104(%esp), %esi # 4-byte Reload + movl %esi, 80(%ebx) + jne .LBB252_14 +# BB#13: + movl 20(%esp), %edx # 4-byte Reload +.LBB252_14: + movl %edx, 84(%ebx) + jne .LBB252_16 +# BB#15: + movl 24(%esp), %edx # 4-byte Reload + movl %edx, 108(%esp) # 4-byte Spill +.LBB252_16: + movl 108(%esp), %edx # 4-byte Reload + movl %edx, 88(%ebx) + jne .LBB252_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload +.LBB252_18: + movl %ecx, 92(%ebx) + movl 64(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%ebx) + jne .LBB252_20 +# BB#19: + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 112(%esp) # 4-byte Spill +.LBB252_20: + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%ebx) + jne .LBB252_22 +# BB#21: + movl 36(%esp), %edi # 4-byte Reload +.LBB252_22: + movl %edi, 104(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + jne .LBB252_24 +# BB#23: + movl 40(%esp), %ecx # 4-byte Reload +.LBB252_24: + movl %ecx, 108(%ebx) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB252_26 +# BB#25: + movl 44(%esp), %eax # 4-byte Reload +.LBB252_26: + movl %eax, 112(%ebx) + movl 68(%esp), %eax # 4-byte Reload + jne .LBB252_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB252_28: + movl %eax, 116(%ebx) + jne .LBB252_30 +# BB#29: + movl 52(%esp), %ecx # 4-byte Reload +.LBB252_30: + movl %ecx, 120(%ebx) + jne .LBB252_32 +# BB#31: + movl 56(%esp), %ebp # 4-byte Reload +.LBB252_32: + movl %ebp, 124(%ebx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end252: + .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L + + .globl mcl_fpDbl_sub16L + .align 16, 0x90 + .type mcl_fpDbl_sub16L,@function +mcl_fpDbl_sub16L: # @mcl_fpDbl_sub16L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $108, %esp + movl 132(%esp), %eax + movl (%eax), %esi + movl 4(%eax), %edi + movl 136(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%eax), %ebx + sbbl 8(%edx), %ebx + movl 128(%esp), %ecx + movl %esi, (%ecx) + movl 12(%eax), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ecx) + movl 16(%eax), %edi + sbbl 16(%edx), %edi + movl %ebx, 8(%ecx) + movl 20(%edx), %ebx + movl %esi, 12(%ecx) + movl 20(%eax), %esi + sbbl %ebx, %esi + movl 24(%edx), %ebx + movl %edi, 16(%ecx) + movl 24(%eax), %edi + sbbl %ebx, %edi + movl 28(%edx), %ebx + movl %esi, 20(%ecx) + movl 28(%eax), %esi + sbbl %ebx, %esi + movl 32(%edx), %ebx + movl %edi, 24(%ecx) + movl 32(%eax), %edi + sbbl %ebx, %edi + movl 36(%edx), %ebx + movl %esi, 28(%ecx) + movl 36(%eax), %esi + sbbl %ebx, %esi + movl 40(%edx), %ebx + movl %edi, 32(%ecx) + movl 40(%eax), %edi + sbbl %ebx, %edi + movl 44(%edx), %ebx + movl %esi, 36(%ecx) + movl 44(%eax), %esi + sbbl %ebx, %esi + movl 48(%edx), %ebx + movl %edi, 40(%ecx) + movl 48(%eax), %edi + sbbl %ebx, %edi + movl 52(%edx), %ebx + movl %esi, 44(%ecx) + movl 52(%eax), %esi + sbbl %ebx, %esi + movl 56(%edx), %ebx + movl %edi, 48(%ecx) + movl 56(%eax), %edi + sbbl %ebx, %edi + movl 60(%edx), %ebx + movl %esi, 52(%ecx) + movl 60(%eax), %esi + sbbl %ebx, %esi + movl 64(%edx), %ebx + movl %edi, 56(%ecx) + movl 64(%eax), %edi + sbbl %ebx, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 68(%edx), %edi + movl %esi, 60(%ecx) + movl 68(%eax), %esi + sbbl %edi, %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 72(%edx), %esi + movl 72(%eax), %edi + sbbl %esi, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%edx), %esi + movl 76(%eax), %edi + sbbl %esi, %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 80(%edx), %esi + movl 80(%eax), %edi + sbbl %esi, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%edx), %esi + movl 84(%eax), %edi + sbbl %esi, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%edx), %esi + movl 88(%eax), %edi + sbbl %esi, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%edx), %esi + movl 92(%eax), %edi + sbbl %esi, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 96(%edx), %esi + movl 96(%eax), %edi + sbbl %esi, %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 100(%edx), %esi + movl 100(%eax), %edi + sbbl %esi, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%edx), %esi + movl 104(%eax), %edi + sbbl %esi, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%edx), %esi + movl 108(%eax), %edi + sbbl %esi, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%edx), %esi + movl 112(%eax), %edi + sbbl %esi, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%edx), %esi + movl 116(%eax), %edi + sbbl %esi, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%edx), %esi + movl 120(%eax), %edi + sbbl %esi, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%edx), %edx + movl 124(%eax), %eax + sbbl %edx, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 140(%esp), %ebx + jne .LBB253_1 +# BB#2: + movl $0, 68(%esp) # 4-byte Folded Spill + jmp .LBB253_3 +.LBB253_1: + movl 60(%ebx), %edx + movl %edx, 68(%esp) # 4-byte Spill +.LBB253_3: + testb %al, %al + jne .LBB253_4 +# BB#5: + movl $0, 24(%esp) # 4-byte Folded Spill + movl $0, %ebp + jmp .LBB253_6 +.LBB253_4: + movl (%ebx), %ebp + movl 4(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB253_6: + jne .LBB253_7 +# BB#8: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB253_9 +.LBB253_7: + movl 56(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB253_9: + jne .LBB253_10 +# BB#11: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB253_12 +.LBB253_10: + movl 52(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB253_12: + jne .LBB253_13 +# BB#14: + movl $0, 28(%esp) # 4-byte Folded Spill + jmp .LBB253_15 +.LBB253_13: + movl 48(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB253_15: + jne .LBB253_16 +# BB#17: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB253_18 +.LBB253_16: + movl 44(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB253_18: + jne .LBB253_19 +# BB#20: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB253_21 +.LBB253_19: + movl 40(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB253_21: + jne .LBB253_22 +# BB#23: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB253_24 +.LBB253_22: + movl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB253_24: + jne .LBB253_25 +# BB#26: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB253_27 +.LBB253_25: + movl 32(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB253_27: + jne .LBB253_28 +# BB#29: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB253_30 +.LBB253_28: + movl 28(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB253_30: + jne .LBB253_31 +# BB#32: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB253_33 +.LBB253_31: + movl 24(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB253_33: + jne .LBB253_34 +# BB#35: + movl $0, %esi + jmp .LBB253_36 +.LBB253_34: + movl 20(%ebx), %esi +.LBB253_36: + jne .LBB253_37 +# BB#38: + movl $0, %edx + jmp .LBB253_39 +.LBB253_37: + movl 16(%ebx), %edx +.LBB253_39: + jne .LBB253_40 +# BB#41: + movl $0, %edi + jmp .LBB253_42 +.LBB253_40: + movl 12(%ebx), %edi +.LBB253_42: + jne .LBB253_43 +# BB#44: + xorl %ebx, %ebx + jmp .LBB253_45 +.LBB253_43: + movl 8(%ebx), %ebx +.LBB253_45: + addl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, %eax + movl 24(%esp), %ebp # 4-byte Reload + adcl 40(%esp), %ebp # 4-byte Folded Reload + movl %eax, 64(%ecx) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %ebp, 68(%ecx) + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %ebx, 72(%ecx) + adcl 56(%esp), %edx # 4-byte Folded Reload + movl %edi, 76(%ecx) + adcl 60(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%ecx) + movl (%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %esi, 84(%ecx) + movl 4(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %eax, 88(%ecx) + movl 8(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 12(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 16(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 20(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 32(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl %eax, 120(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 124(%ecx) + addl $108, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end253: + .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L + + .align 16, 0x90 + .type .LmulPv544x32,@function +.LmulPv544x32: # @mulPv544x32 +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $120, %esp + movl %edx, %ebp + movl 140(%esp), %ebx + movl %ebx, %eax + mull 64(%ebp) + movl %edx, 116(%esp) # 4-byte Spill + movl %eax, 112(%esp) # 4-byte Spill + movl %ebx, %eax + mull 60(%ebp) + movl %edx, 108(%esp) # 4-byte Spill + movl %eax, 104(%esp) # 4-byte Spill + movl %ebx, %eax + mull 56(%ebp) + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, 96(%esp) # 4-byte Spill + movl %ebx, %eax + mull 52(%ebp) + movl %edx, 92(%esp) # 4-byte Spill + movl %eax, 88(%esp) # 4-byte Spill + movl %ebx, %eax + mull 48(%ebp) + movl %edx, 84(%esp) # 4-byte Spill + movl %eax, 80(%esp) # 4-byte Spill + movl %ebx, %eax + mull 44(%ebp) + movl %edx, 76(%esp) # 4-byte Spill + movl %eax, 72(%esp) # 4-byte Spill + movl %ebx, %eax + mull 40(%ebp) + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, 64(%esp) # 4-byte Spill + movl %ebx, %eax + mull 36(%ebp) + movl %edx, 60(%esp) # 4-byte Spill + movl %eax, 56(%esp) # 4-byte Spill + movl %ebx, %eax + mull 32(%ebp) + movl %edx, 52(%esp) # 4-byte Spill + movl %eax, 48(%esp) # 4-byte Spill + movl %ebx, %eax + mull 28(%ebp) + movl %edx, 44(%esp) # 4-byte Spill + movl %eax, 40(%esp) # 4-byte Spill + movl %ebx, %eax + mull 24(%ebp) + movl %edx, 36(%esp) # 4-byte Spill + movl %eax, 32(%esp) # 4-byte Spill + movl %ebx, %eax + mull 20(%ebp) + movl %edx, 28(%esp) # 4-byte Spill + movl %eax, 24(%esp) # 4-byte Spill + movl %ebx, %eax + mull 16(%ebp) + movl %edx, 20(%esp) # 4-byte Spill + movl %eax, 16(%esp) # 4-byte Spill + movl %ebx, %eax + mull 12(%ebp) + movl %edx, 12(%esp) # 4-byte Spill + movl %eax, 8(%esp) # 4-byte Spill + movl %ebx, %eax + mull 8(%ebp) + movl %edx, %edi + movl %eax, 4(%esp) # 4-byte Spill + movl %ebx, %eax + mull 4(%ebp) + movl %edx, %esi + movl %eax, (%esp) # 4-byte Spill + movl %ebx, %eax + mull (%ebp) + movl %eax, (%ecx) + addl (%esp), %edx # 4-byte Folded Reload + movl %edx, 4(%ecx) + adcl 4(%esp), %esi # 4-byte Folded Reload + movl %esi, 8(%ecx) + adcl 8(%esp), %edi # 4-byte Folded Reload + movl %edi, 12(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 16(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 20(%ecx) + movl 28(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 24(%ecx) + movl 36(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 28(%ecx) + movl 44(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 32(%ecx) + movl 52(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 36(%ecx) + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 40(%ecx) + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 44(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 48(%ecx) + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%ecx) + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%ecx) + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%ecx) + movl 108(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ecx) + movl 116(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, 68(%ecx) + movl %ecx, %eax + addl $120, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end254: + .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 + + .globl mcl_fp_mulUnitPre17L + .align 16, 0x90 + .type mcl_fp_mulUnitPre17L,@function +mcl_fp_mulUnitPre17L: # @mcl_fp_mulUnitPre17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $140, %esp + calll .L255$pb +.L255$pb: + popl %ebx +.Ltmp56: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx + movl 168(%esp), %eax + movl %eax, (%esp) + leal 64(%esp), %ecx + movl 164(%esp), %edx + calll .LmulPv544x32 + movl 132(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 128(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 124(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 120(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 116(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 112(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 108(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 104(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 100(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 96(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 92(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 88(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 84(%esp), %ebp + movl 80(%esp), %ebx + movl 76(%esp), %edi + movl 72(%esp), %esi + movl 64(%esp), %edx + movl 68(%esp), %ecx + movl 160(%esp), %eax + movl %edx, (%eax) + movl %ecx, 4(%eax) + movl %esi, 8(%eax) + movl %edi, 12(%eax) + movl %ebx, 16(%eax) + movl %ebp, 20(%eax) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 24(%eax) + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 28(%eax) + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%eax) + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 36(%eax) + movl 32(%esp), %ecx # 4-byte Reload + movl %ecx, 40(%eax) + movl 36(%esp), %ecx # 4-byte Reload + movl %ecx, 44(%eax) + movl 40(%esp), %ecx # 4-byte Reload + movl %ecx, 48(%eax) + movl 44(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%eax) + movl 48(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%eax) + movl 52(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 56(%esp), %ecx # 4-byte Reload + movl %ecx, 64(%eax) + movl 60(%esp), %ecx # 4-byte Reload + movl %ecx, 68(%eax) + addl $140, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end255: + .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L + + .globl mcl_fpDbl_mulPre17L + .align 16, 0x90 + .type mcl_fpDbl_mulPre17L,@function +mcl_fpDbl_mulPre17L: # @mcl_fpDbl_mulPre17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L256$pb +.L256$pb: + popl %edi +.Ltmp57: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi + movl %edi, 124(%esp) # 4-byte Spill + movl 1384(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl 1380(%esp), %edx + movl %edx, %esi + movl %edi, %ebx + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl 1384(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %edx + movl %edi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 1380(%esp), %eax + movl %eax, %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 112(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1384(%esp), %ecx + movl %ecx, %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1384(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 1380(%esp), %edx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 64(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 68(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 116(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 112(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end256: + .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L + + .globl mcl_fpDbl_sqrPre17L + .align 16, 0x90 + .type mcl_fpDbl_sqrPre17L,@function +mcl_fpDbl_sqrPre17L: # @mcl_fpDbl_sqrPre17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1356, %esp # imm = 0x54C + calll .L257$pb +.L257$pb: + popl %ebx +.Ltmp58: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx + movl %ebx, 124(%esp) # 4-byte Spill + movl 1380(%esp), %edx + movl (%edx), %eax + movl %eax, (%esp) + leal 1280(%esp), %ecx + movl %edx, %edi + movl %ebx, %esi + calll .LmulPv544x32 + movl 1348(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1344(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1336(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1332(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1328(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1320(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1316(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1312(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1308(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1304(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1300(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 1296(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 1292(%esp), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 1288(%esp), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 1280(%esp), %eax + movl 1284(%esp), %ebp + movl 1376(%esp), %ecx + movl %eax, (%ecx) + movl %edi, %edx + movl 4(%edx), %eax + movl %eax, (%esp) + leal 1208(%esp), %ecx + movl %esi, %ebx + calll .LmulPv544x32 + addl 1208(%esp), %ebp + movl %ebp, 8(%esp) # 4-byte Spill + movl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1272(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1260(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1256(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 1252(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1248(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1244(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1232(%esp), %edi + movl 1228(%esp), %esi + movl 1224(%esp), %edx + movl 1220(%esp), %ecx + movl 1212(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1216(%esp), %eax + movl 1376(%esp), %ebp + movl 8(%esp), %ebx # 4-byte Reload + movl %ebx, 4(%ebp) + movl 12(%esp), %ebp # 4-byte Reload + adcl %ebp, 120(%esp) # 4-byte Folded Spill + adcl 16(%esp), %eax # 4-byte Folded Reload + movl %eax, 8(%esp) # 4-byte Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 12(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 32(%esp), %esi # 4-byte Folded Reload + movl %esi, 20(%esp) # 4-byte Spill + adcl 40(%esp), %edi # 4-byte Folded Reload + movl %edi, 24(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 52(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + movl 68(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + movl 76(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 64(%esp) # 4-byte Folded Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, 56(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 8(%edx), %eax + movl %eax, (%esp) + leal 1136(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 1136(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1204(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1196(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1192(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1184(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1180(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1172(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1168(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1164(%esp), %ebx + movl 1160(%esp), %edi + movl 1156(%esp), %esi + movl 1152(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1148(%esp), %edx + movl 1140(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1144(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 8(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 80(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 72(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 56(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 12(%edx), %eax + movl %eax, (%esp) + leal 1064(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1064(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 1132(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 1128(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1124(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 1116(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 1112(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 1108(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 1104(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 1100(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 1096(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 1092(%esp), %ebx + movl 1088(%esp), %edi + movl 1084(%esp), %esi + movl 1080(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 1076(%esp), %edx + movl 1068(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1072(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 12(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 16(%edx), %eax + movl %eax, (%esp) + leal 992(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 1060(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 1056(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 1052(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 1048(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 1044(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 1040(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 1036(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 1032(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 1028(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 1024(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 1020(%esp), %ebx + movl 1016(%esp), %edi + movl 1012(%esp), %esi + movl 1008(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 1004(%esp), %edx + movl 996(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 1000(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 16(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 20(%edx), %eax + movl %eax, (%esp) + leal 920(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 920(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 988(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 984(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 980(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 976(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 972(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 968(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 964(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 956(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 952(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 948(%esp), %ebx + movl 944(%esp), %edi + movl 940(%esp), %esi + movl 936(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 932(%esp), %edx + movl 924(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 928(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 20(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 24(%edx), %eax + movl %eax, (%esp) + leal 848(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 848(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 916(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 912(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 908(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 904(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 900(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 896(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 892(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 888(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 884(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 880(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 876(%esp), %ebx + movl 872(%esp), %edi + movl 868(%esp), %esi + movl 864(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 860(%esp), %edx + movl 852(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 856(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 24(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 28(%edx), %eax + movl %eax, (%esp) + leal 776(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 776(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 844(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 840(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 836(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 832(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 828(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 824(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 820(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 816(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 812(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 808(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 804(%esp), %ebx + movl 800(%esp), %edi + movl 796(%esp), %esi + movl 792(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 788(%esp), %edx + movl 780(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 784(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 28(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 32(%edx), %eax + movl %eax, (%esp) + leal 704(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 704(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 772(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 768(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 764(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 756(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 752(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 748(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 744(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 740(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 736(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 732(%esp), %ebx + movl 728(%esp), %edi + movl 724(%esp), %esi + movl 720(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 716(%esp), %edx + movl 708(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 712(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 32(%eax) + movl 52(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 32(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 24(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 40(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 36(%edx), %eax + movl %eax, (%esp) + leal 632(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 632(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 700(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 696(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 692(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 688(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 684(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 680(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 676(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 672(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 668(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 664(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 660(%esp), %ebx + movl 656(%esp), %edi + movl 652(%esp), %esi + movl 648(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 644(%esp), %edx + movl 636(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 640(%esp), %ecx + movl 1376(%esp), %eax + movl 52(%esp), %ebp # 4-byte Reload + movl %ebp, 36(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 32(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 24(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 40(%edx), %eax + movl %eax, (%esp) + leal 560(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 560(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 628(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 624(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 620(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 616(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 612(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 608(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 600(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 596(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 592(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 588(%esp), %ebx + movl 584(%esp), %edi + movl 580(%esp), %esi + movl 576(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 572(%esp), %edx + movl 564(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 568(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 40(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 12(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 44(%edx), %eax + movl %eax, (%esp) + leal 488(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 488(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 556(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 552(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 548(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 540(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 536(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 532(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 528(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 524(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 520(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 516(%esp), %ebx + movl 512(%esp), %edi + movl 508(%esp), %esi + movl 504(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 500(%esp), %edx + movl 492(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 496(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 44(%eax) + movl 8(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 20(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 48(%edx), %eax + movl %eax, (%esp) + leal 416(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 416(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 484(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 480(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 476(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 472(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 468(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 464(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 460(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 456(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 452(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 448(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 444(%esp), %ebx + movl 440(%esp), %edi + movl 436(%esp), %esi + movl 432(%esp), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 428(%esp), %edx + movl 420(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 424(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 48(%eax) + movl 48(%esp), %ebp # 4-byte Reload + adcl 4(%esp), %ebp # 4-byte Folded Reload + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 52(%edx), %eax + movl %eax, (%esp) + leal 344(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 344(%esp), %ebp + movl %ebp, 48(%esp) # 4-byte Spill + movl 412(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 408(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 404(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 396(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 392(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 388(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 384(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 380(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 372(%esp), %ebx + movl 368(%esp), %edi + movl 364(%esp), %esi + movl 360(%esp), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 356(%esp), %edx + movl 348(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 352(%esp), %ecx + movl 1376(%esp), %eax + movl 48(%esp), %ebp # 4-byte Reload + movl %ebp, 52(%eax) + movl 4(%esp), %eax # 4-byte Reload + adcl %eax, 120(%esp) # 4-byte Folded Spill + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 4(%esp) # 4-byte Spill + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %edx, 8(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 28(%esp) # 4-byte Folded Spill + adcl 16(%esp), %esi # 4-byte Folded Reload + movl %esi, 12(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 16(%esp) # 4-byte Spill + adcl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 20(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %eax, 52(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 76(%esp), %eax # 4-byte Folded Reload + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 40(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 32(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, 36(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 56(%edx), %eax + movl %eax, (%esp) + leal 272(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 272(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 340(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 332(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 328(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 324(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 320(%esp), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 316(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 312(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 308(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 304(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 300(%esp), %ebx + movl 296(%esp), %edi + movl 292(%esp), %edx + movl 288(%esp), %esi + movl %esi, 24(%esp) # 4-byte Spill + movl 284(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 276(%esp), %eax + movl 280(%esp), %ecx + movl 120(%esp), %esi # 4-byte Reload + movl 1376(%esp), %ebp + movl %esi, 56(%ebp) + adcl 4(%esp), %eax # 4-byte Folded Reload + movl %eax, %ebp + adcl 8(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 8(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 76(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + adcl %eax, 24(%esp) # 4-byte Folded Spill + adcl 16(%esp), %edx # 4-byte Folded Reload + movl %edx, 16(%esp) # 4-byte Spill + adcl 20(%esp), %edi # 4-byte Folded Reload + movl %edi, 20(%esp) # 4-byte Spill + adcl 52(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 28(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 68(%esp), %eax # 4-byte Folded Reload + movl %eax, 56(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %eax, 60(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 44(%esp), %esi # 4-byte Folded Reload + movl %esi, 116(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl %eax, 48(%esp) # 4-byte Folded Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %eax, 84(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %eax, 92(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, 32(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 60(%edx), %eax + movl %eax, (%esp) + leal 200(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 200(%esp), %ebp + movl %ebp, 12(%esp) # 4-byte Spill + movl 268(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 264(%esp), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 260(%esp), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 256(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 252(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 248(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 244(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 240(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 236(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 232(%esp), %edi + movl 228(%esp), %esi + movl 224(%esp), %edx + movl 220(%esp), %ecx + movl 216(%esp), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 212(%esp), %eax + movl 204(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 208(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 12(%esp), %ebp # 4-byte Reload + movl 1376(%esp), %ebx + movl %ebp, 60(%ebx) + movl 120(%esp), %ebp # 4-byte Reload + adcl 8(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 76(%esp), %ebp # 4-byte Folded Reload + adcl 24(%esp), %eax # 4-byte Folded Reload + movl %eax, 76(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + adcl %eax, 36(%esp) # 4-byte Folded Spill + adcl 20(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 24(%esp) # 4-byte Spill + adcl 28(%esp), %edx # 4-byte Folded Reload + movl %edx, 28(%esp) # 4-byte Spill + adcl 56(%esp), %esi # 4-byte Folded Reload + movl %esi, 56(%esp) # 4-byte Spill + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %edi, 60(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %eax, 72(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 116(%esp), %eax # 4-byte Folded Reload + movl %eax, 80(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl %eax, 40(%esp) # 4-byte Folded Spill + movl 32(%esp), %eax # 4-byte Reload + adcl %eax, 44(%esp) # 4-byte Folded Spill + adcl $0, 52(%esp) # 4-byte Folded Spill + movl 1380(%esp), %edx + movl 64(%edx), %eax + movl %eax, (%esp) + leal 128(%esp), %ecx + movl 124(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 120(%esp), %eax # 4-byte Reload + addl 128(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 132(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 136(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 196(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 192(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 188(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 184(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 180(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 176(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 172(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 168(%esp), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 164(%esp), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 160(%esp), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 156(%esp), %ebx + movl 152(%esp), %edi + movl 148(%esp), %esi + movl 144(%esp), %edx + movl 140(%esp), %ecx + movl 1376(%esp), %eax + movl 120(%esp), %ebp # 4-byte Reload + movl %ebp, 64(%eax) + movl 68(%esp), %ebp # 4-byte Reload + movl %ebp, 68(%eax) + adcl 36(%esp), %ecx # 4-byte Folded Reload + movl 76(%esp), %ebp # 4-byte Reload + movl %ebp, 72(%eax) + adcl 24(%esp), %edx # 4-byte Folded Reload + movl %ecx, 76(%eax) + adcl 28(%esp), %esi # 4-byte Folded Reload + movl %edx, 80(%eax) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %esi, 84(%eax) + adcl 60(%esp), %ebx # 4-byte Folded Reload + movl %edi, 88(%eax) + movl 20(%esp), %edx # 4-byte Reload + adcl 72(%esp), %edx # 4-byte Folded Reload + movl %ebx, 92(%eax) + movl 32(%esp), %ecx # 4-byte Reload + adcl 80(%esp), %ecx # 4-byte Folded Reload + movl %edx, 96(%eax) + movl 48(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %ecx, 100(%eax) + movl 64(%esp), %ecx # 4-byte Reload + adcl 96(%esp), %ecx # 4-byte Folded Reload + movl %edx, 104(%eax) + movl 84(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %ecx, 108(%eax) + movl 92(%esp), %ecx # 4-byte Reload + adcl 112(%esp), %ecx # 4-byte Folded Reload + movl %edx, 112(%eax) + movl 100(%esp), %edx # 4-byte Reload + adcl 40(%esp), %edx # 4-byte Folded Reload + movl %ecx, 116(%eax) + movl 108(%esp), %ecx # 4-byte Reload + adcl 44(%esp), %ecx # 4-byte Folded Reload + movl %edx, 120(%eax) + movl %ecx, 124(%eax) + movl 116(%esp), %ecx # 4-byte Reload + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %ecx, 128(%eax) + movl 124(%esp), %ecx # 4-byte Reload + adcl $0, %ecx + movl %ecx, 132(%eax) + addl $1356, %esp # imm = 0x54C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end257: + .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L + + .globl mcl_fp_mont17L + .align 16, 0x90 + .type mcl_fp_mont17L,@function +mcl_fp_mont17L: # @mcl_fp_mont17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2588, %esp # imm = 0xA1C + calll .L258$pb +.L258$pb: + popl %ebx +.Ltmp59: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx + movl 2620(%esp), %eax + movl -4(%eax), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2512(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 2512(%esp), %ebp + movl 2516(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl %ebp, %eax + imull %esi, %eax + movl 2580(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 2576(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 2572(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2568(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2564(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2532(%esp), %edi + movl 2528(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2524(%esp), %esi + movl 2520(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2440(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + addl 2440(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 2452(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2456(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2460(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2472(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2480(%esp), %eax + movl %eax, %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2496(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl 2616(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2368(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + andl $1, %ebp + movl 120(%esp), %ecx # 4-byte Reload + addl 2368(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2384(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 2392(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 2396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 2404(%esp), %esi + movl %esi, 72(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 2408(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2424(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2436(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2296(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 116(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 2296(%esp), %ebp + movl 100(%esp), %ecx # 4-byte Reload + adcl 2300(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 2304(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2308(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2312(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2316(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 2320(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2324(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2328(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2332(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 2336(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2340(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 2344(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 2348(%esp), %esi + movl 124(%esp), %ecx # 4-byte Reload + adcl 2352(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2356(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2360(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 2364(%esp), %ebp + adcl $0, %eax + movl %eax, %edi + movl 2616(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2224(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 100(%esp), %ecx # 4-byte Reload + addl 2224(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 2228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2232(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 2272(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 2276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 2280(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 2288(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + adcl 2292(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2152(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 2152(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2188(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2192(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2196(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2200(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 2204(%esp), %ebp + movl 128(%esp), %edi # 4-byte Reload + adcl 2208(%esp), %edi + movl 132(%esp), %esi # 4-byte Reload + adcl 2212(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 2216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2220(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2080(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 112(%esp), %ecx # 4-byte Reload + addl 2080(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 2084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2088(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2092(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2096(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2100(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2104(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2108(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2112(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2116(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2120(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2124(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 2128(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + adcl 2132(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + adcl 2136(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2148(%esp), %esi + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2008(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %ebp, %eax + andl $1, %eax + addl 2008(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 2012(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2016(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2020(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2024(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 2028(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2032(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2036(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2040(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2044(%esp), %edi + movl 104(%esp), %ecx # 4-byte Reload + adcl 2048(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2052(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 2056(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 2060(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 2064(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 2072(%esp), %ebp + adcl 2076(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1936(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 88(%esp), %ecx # 4-byte Reload + addl 1936(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1940(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1944(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1948(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1952(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 1956(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 1960(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1964(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1968(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1972(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1976(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1980(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1984(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1988(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1992(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 1996(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + sbbl %ebp, %ebp + movl %ecx, %edi + movl %edi, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1864(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %ebp + movl %ebp, %ecx + addl 1864(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 1880(%esp), %edi + adcl 1884(%esp), %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %esi # 4-byte Reload + adcl 1896(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1904(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1908(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1916(%esp), %ebp + movl 120(%esp), %eax # 4-byte Reload + adcl 1920(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1924(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1928(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1932(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1792(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 80(%esp), %ecx # 4-byte Reload + addl 1792(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1796(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1800(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 1804(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1808(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1812(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1816(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 1820(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1824(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1828(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1832(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1836(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 1840(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1844(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1848(%esp), %edi + movl 100(%esp), %ebp # 4-byte Reload + adcl 1852(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1720(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %eax + movl 80(%esp), %ecx # 4-byte Reload + addl 1720(%esp), %ecx + movl 92(%esp), %ecx # 4-byte Reload + adcl 1724(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1728(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1732(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1736(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1740(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1744(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1748(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + adcl 1752(%esp), %esi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1756(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1760(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1764(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1768(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1772(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + adcl 1776(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl 1780(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 1788(%esp), %ebp + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1648(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 92(%esp), %eax # 4-byte Reload + addl 1648(%esp), %eax + movl 76(%esp), %edi # 4-byte Reload + adcl 1652(%esp), %edi + movl 68(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1660(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1664(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1672(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 1676(%esp), %esi + movl %esi, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1692(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1700(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1704(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1708(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1712(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1716(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %eax, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1576(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 1576(%esp), %ebp + adcl 1580(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1584(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 1588(%esp), %ebp + movl 72(%esp), %edi # 4-byte Reload + adcl 1592(%esp), %edi + movl 84(%esp), %esi # 4-byte Reload + adcl 1596(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 1600(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1608(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1632(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1636(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1640(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1644(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1504(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %ecx # 4-byte Reload + addl 1504(%esp), %ecx + movl 68(%esp), %eax # 4-byte Reload + adcl 1508(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 1512(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + adcl 1516(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1520(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1524(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1528(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1532(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1536(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %edi # 4-byte Reload + adcl 1540(%esp), %edi + movl 132(%esp), %eax # 4-byte Reload + adcl 1544(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1548(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1552(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1556(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1560(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %esi # 4-byte Reload + adcl 1568(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1432(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1432(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 1436(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1440(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1444(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1448(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1452(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1456(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ebp # 4-byte Reload + adcl 1460(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1464(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl 1468(%esp), %edi + movl %edi, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1472(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %edi # 4-byte Reload + adcl 1476(%esp), %edi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1480(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1484(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1488(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1492(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + adcl 1496(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1500(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1360(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %ecx # 4-byte Reload + addl 1360(%esp), %ecx + movl 64(%esp), %eax # 4-byte Reload + adcl 1364(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1372(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1376(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1380(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1384(%esp), %ebp + movl 124(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1400(%esp), %edi + movl %edi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1408(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 68(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1288(%esp), %edi + movl 64(%esp), %ecx # 4-byte Reload + adcl 1292(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1296(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1300(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1304(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1308(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1312(%esp), %ebp + movl 124(%esp), %ecx # 4-byte Reload + adcl 1316(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1320(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1324(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + adcl 1328(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1332(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 1336(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 1340(%esp), %edi + movl 88(%esp), %esi # 4-byte Reload + adcl 1344(%esp), %esi + movl 80(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1352(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 64(%esp), %ecx # 4-byte Reload + addl 1216(%esp), %ecx + movl 72(%esp), %eax # 4-byte Reload + adcl 1220(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1236(%esp), %ebp + movl %ebp, 108(%esp) # 4-byte Spill + movl 124(%esp), %ebp # 4-byte Reload + adcl 1240(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1260(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1264(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + adcl 1268(%esp), %esi + movl %esi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl %edi, %eax + andl $1, %eax + addl 1144(%esp), %esi + movl 72(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1152(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1156(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %edi # 4-byte Reload + adcl 1160(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 1164(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1168(%esp), %ebp + movl %ebp, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1172(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ecx # 4-byte Reload + adcl 1176(%esp), %ecx + movl %ecx, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1184(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1188(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1192(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1196(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1200(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1204(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1208(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1212(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %ecx # 4-byte Reload + addl 1072(%esp), %ecx + movl 84(%esp), %eax # 4-byte Reload + adcl 1076(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 1080(%esp), %ebp + adcl 1084(%esp), %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1112(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %edi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 72(%esp), %eax # 4-byte Reload + andl $1, %eax + addl 1000(%esp), %edi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 1008(%esp), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1012(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1016(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 1020(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + adcl 1024(%esp), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 132(%esp), %ebp # 4-byte Reload + adcl 1028(%esp), %ebp + movl 120(%esp), %ecx # 4-byte Reload + adcl 1032(%esp), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1036(%esp), %edi + adcl 1040(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1044(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1048(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1052(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 1056(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 84(%esp), %ecx # 4-byte Reload + addl 928(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 932(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl 952(%esp), %ebp + movl %ebp, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 960(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 980(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 984(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 856(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 100(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 896(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 912(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 924(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2616(%esp), %ecx + movl %ecx, %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 2612(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 96(%esp), %ecx # 4-byte Reload + addl 784(%esp), %ecx + movl 104(%esp), %eax # 4-byte Reload + adcl 788(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 820(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %edi # 4-byte Reload + adcl 828(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 712(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 716(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %esi # 4-byte Reload + adcl 728(%esp), %esi + movl 132(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %ebp # 4-byte Reload + adcl 736(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 756(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + addl 640(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 652(%esp), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 660(%esp), %ebp + movl %ebp, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 680(%esp), %edi + movl 92(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %ebp # 4-byte Reload + adcl 696(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 104(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 568(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 588(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 600(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 604(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 608(%esp), %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 616(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + adcl 624(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + addl 496(%esp), %ecx + movl 124(%esp), %eax # 4-byte Reload + adcl 500(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 524(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 528(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 540(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 68(%esp), %edi # 4-byte Reload + adcl 544(%esp), %edi + movl 64(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + sbbl %eax, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + movl 108(%esp), %ecx # 4-byte Reload + andl $1, %ecx + addl 424(%esp), %esi + movl 124(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 432(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %esi # 4-byte Reload + adcl 440(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 456(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 472(%esp), %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %ebp # 4-byte Reload + adcl 480(%esp), %ebp + movl 84(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 124(%esp), %ecx # 4-byte Reload + addl 352(%esp), %ecx + movl 128(%esp), %eax # 4-byte Reload + adcl 356(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl 364(%esp), %esi + movl %esi, 120(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 100(%esp), %edi # 4-byte Reload + adcl 372(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 404(%esp), %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + sbbl %esi, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 60(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 2620(%esp), %edx + calll .LmulPv544x32 + andl $1, %esi + movl %esi, %ecx + addl 280(%esp), %ebp + movl 128(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %esi # 4-byte Reload + adcl 288(%esp), %esi + movl 120(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 116(%esp), %ebp # 4-byte Reload + adcl 296(%esp), %ebp + adcl 300(%esp), %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 2616(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 2612(%esp), %edx + calll .LmulPv544x32 + movl 128(%esp), %ecx # 4-byte Reload + addl 208(%esp), %ecx + adcl 212(%esp), %esi + movl %esi, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 220(%esp), %ebp + movl %ebp, 116(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 224(%esp), %ebp + movl 112(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 264(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + sbbl %edi, %edi + movl 60(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 136(%esp), %ecx + movl 2620(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + andl $1, %edi + addl 136(%esp), %esi + movl 116(%esp), %edx # 4-byte Reload + movl 132(%esp), %eax # 4-byte Reload + adcl 140(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 144(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + adcl 148(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 152(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 156(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 160(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 164(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 168(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 172(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 176(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 180(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 184(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 188(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 192(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 196(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 200(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 124(%esp), %ecx # 4-byte Reload + adcl 204(%esp), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + adcl $0, %edi + movl 132(%esp), %ecx # 4-byte Reload + movl 2620(%esp), %ebx + subl (%ebx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 120(%esp), %ecx # 4-byte Reload + sbbl 4(%ebx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + sbbl 8(%ebx), %edx + movl %edx, 20(%esp) # 4-byte Spill + sbbl 12(%ebx), %ebp + movl %ebp, 24(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + movl %eax, %edx + sbbl 16(%ebx), %ebp + movl %ebp, 28(%esp) # 4-byte Spill + sbbl 20(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + sbbl 28(%ebx), %esi + movl %esi, 40(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + sbbl 32(%ebx), %esi + movl %esi, 44(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + sbbl 36(%ebx), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + sbbl 40(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + movl 72(%esp), %esi # 4-byte Reload + sbbl 44(%ebx), %esi + movl %esi, 56(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + sbbl 48(%ebx), %esi + movl %esi, 60(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + sbbl 52(%ebx), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 104(%esp), %esi # 4-byte Reload + sbbl 56(%ebx), %esi + movl %esi, 128(%esp) # 4-byte Spill + movl %ebx, %ebp + movl 108(%esp), %ebx # 4-byte Reload + sbbl 60(%ebp), %ebx + movl 124(%esp), %esi # 4-byte Reload + sbbl 64(%ebp), %esi + movl %esi, %ebp + sbbl $0, %edi + andl $1, %edi + jne .LBB258_2 +# BB#1: + movl %ebx, 108(%esp) # 4-byte Spill +.LBB258_2: + movl %edi, %ebx + testb %bl, %bl + movl 132(%esp), %ebx # 4-byte Reload + jne .LBB258_4 +# BB#3: + movl 12(%esp), %ebx # 4-byte Reload +.LBB258_4: + movl 2608(%esp), %eax + movl %ebx, (%eax) + movl 120(%esp), %ebx # 4-byte Reload + jne .LBB258_6 +# BB#5: + movl 16(%esp), %ebx # 4-byte Reload +.LBB258_6: + movl %ebx, 4(%eax) + jne .LBB258_8 +# BB#7: + movl 20(%esp), %ecx # 4-byte Reload + movl %ecx, 116(%esp) # 4-byte Spill +.LBB258_8: + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 8(%eax) + jne .LBB258_10 +# BB#9: + movl 24(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%esp) # 4-byte Spill +.LBB258_10: + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 12(%eax) + movl 112(%esp), %esi # 4-byte Reload + jne .LBB258_12 +# BB#11: + movl 28(%esp), %esi # 4-byte Reload +.LBB258_12: + movl %esi, 16(%eax) + movl 80(%esp), %ecx # 4-byte Reload + jne .LBB258_14 +# BB#13: + movl 32(%esp), %edx # 4-byte Reload +.LBB258_14: + movl %edx, 20(%eax) + jne .LBB258_16 +# BB#15: + movl 36(%esp), %ecx # 4-byte Reload +.LBB258_16: + movl %ecx, 24(%eax) + movl 92(%esp), %ecx # 4-byte Reload + jne .LBB258_18 +# BB#17: + movl 40(%esp), %ecx # 4-byte Reload +.LBB258_18: + movl %ecx, 28(%eax) + movl 76(%esp), %ecx # 4-byte Reload + jne .LBB258_20 +# BB#19: + movl 44(%esp), %ecx # 4-byte Reload +.LBB258_20: + movl %ecx, 32(%eax) + movl 68(%esp), %ecx # 4-byte Reload + jne .LBB258_22 +# BB#21: + movl 48(%esp), %ecx # 4-byte Reload +.LBB258_22: + movl %ecx, 36(%eax) + movl 64(%esp), %ecx # 4-byte Reload + jne .LBB258_24 +# BB#23: + movl 52(%esp), %ecx # 4-byte Reload +.LBB258_24: + movl %ecx, 40(%eax) + movl 72(%esp), %ecx # 4-byte Reload + jne .LBB258_26 +# BB#25: + movl 56(%esp), %ecx # 4-byte Reload +.LBB258_26: + movl %ecx, 44(%eax) + movl 84(%esp), %ecx # 4-byte Reload + jne .LBB258_28 +# BB#27: + movl 60(%esp), %ecx # 4-byte Reload +.LBB258_28: + movl %ecx, 48(%eax) + movl 96(%esp), %ecx # 4-byte Reload + jne .LBB258_30 +# BB#29: + movl 88(%esp), %ecx # 4-byte Reload +.LBB258_30: + movl %ecx, 52(%eax) + movl 104(%esp), %ecx # 4-byte Reload + jne .LBB258_32 +# BB#31: + movl 128(%esp), %ecx # 4-byte Reload +.LBB258_32: + movl %ecx, 56(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 60(%eax) + movl 124(%esp), %ecx # 4-byte Reload + jne .LBB258_34 +# BB#33: + movl %ebp, %ecx +.LBB258_34: + movl %ecx, 64(%eax) + addl $2588, %esp # imm = 0xA1C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end258: + .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L + + .globl mcl_fp_montNF17L + .align 16, 0x90 + .type mcl_fp_montNF17L,@function +mcl_fp_montNF17L: # @mcl_fp_montNF17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $2572, %esp # imm = 0xA0C + calll .L259$pb +.L259$pb: + popl %ebx +.Ltmp60: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx + movl 2604(%esp), %eax + movl -4(%eax), %esi + movl %esi, 48(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl (%eax), %eax + movl %eax, (%esp) + leal 2496(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2496(%esp), %edi + movl 2500(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %edi, %eax + imull %esi, %eax + movl 2564(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 2560(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 2556(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 2552(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 2548(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 2544(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 2540(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 2536(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 2532(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 2528(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 2524(%esp), %ebp + movl 2520(%esp), %esi + movl 2516(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 2512(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 2508(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 2504(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl %eax, (%esp) + leal 2424(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2424(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2428(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 2432(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2436(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2440(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2444(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 2448(%esp), %esi + movl %esi, 80(%esp) # 4-byte Spill + adcl 2452(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 2456(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 2460(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2464(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2468(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %esi # 4-byte Reload + adcl 2472(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 2476(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 2480(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 4(%eax), %eax + movl %eax, (%esp) + leal 2352(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2420(%esp), %ecx + movl 112(%esp), %edx # 4-byte Reload + addl 2352(%esp), %edx + movl 92(%esp), %eax # 4-byte Reload + adcl 2356(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2360(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2364(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2368(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2372(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2376(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 2380(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2384(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2388(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2392(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2396(%esp), %esi + movl %esi, %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 2400(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2404(%esp), %edi + movl 108(%esp), %eax # 4-byte Reload + adcl 2408(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 2412(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 2416(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl $0, %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2280(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 2280(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2284(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 2288(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2292(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2296(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2300(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2304(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2308(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2312(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 2316(%esp), %esi + movl 72(%esp), %eax # 4-byte Reload + adcl 2320(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 2324(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2328(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 2332(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 2336(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 2340(%esp), %ebp + movl 116(%esp), %edi # 4-byte Reload + adcl 2344(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 2348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 8(%eax), %eax + movl %eax, (%esp) + leal 2208(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2276(%esp), %eax + movl 92(%esp), %edx # 4-byte Reload + addl 2208(%esp), %edx + movl 104(%esp), %ecx # 4-byte Reload + adcl 2212(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 2216(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2220(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2224(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2228(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2232(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 2236(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 2240(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2244(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2248(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2252(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2256(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 2260(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 2264(%esp), %ebp + adcl 2268(%esp), %edi + movl %edi, %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 2272(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 92(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 2136(%esp), %ecx + movl 2604(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + addl 2136(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2140(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 2144(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2148(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2156(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2160(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 2164(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2168(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2172(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2176(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2180(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2184(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %edi # 4-byte Reload + adcl 2188(%esp), %edi + adcl 2192(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + adcl 2196(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 2200(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 2204(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 12(%eax), %eax + movl %eax, (%esp) + leal 2064(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 2132(%esp), %eax + movl 104(%esp), %edx # 4-byte Reload + addl 2064(%esp), %edx + movl 76(%esp), %ecx # 4-byte Reload + adcl 2068(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 2072(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 2076(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 2080(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 2084(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ebp # 4-byte Reload + adcl 2088(%esp), %ebp + movl 68(%esp), %ecx # 4-byte Reload + adcl 2092(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 2096(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 2100(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 2104(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 2108(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + adcl 2112(%esp), %edi + movl 100(%esp), %ecx # 4-byte Reload + adcl 2116(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 2120(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + adcl 2124(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 2128(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl %edx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1992(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1992(%esp), %esi + movl 76(%esp), %eax # 4-byte Reload + adcl 1996(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 2000(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 2004(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 2008(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 2012(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 2016(%esp), %ebp + movl %ebp, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 2020(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 2024(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 2028(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 2032(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 2036(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 2040(%esp), %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 2044(%esp), %esi + movl 116(%esp), %eax # 4-byte Reload + adcl 2048(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 2052(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %edi # 4-byte Reload + adcl 2056(%esp), %edi + movl 104(%esp), %eax # 4-byte Reload + adcl 2060(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 16(%eax), %eax + movl %eax, (%esp) + leal 1920(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1988(%esp), %eax + movl 76(%esp), %edx # 4-byte Reload + addl 1920(%esp), %edx + movl 84(%esp), %ecx # 4-byte Reload + adcl 1924(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1928(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1932(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 1936(%esp), %ebp + movl 56(%esp), %ecx # 4-byte Reload + adcl 1940(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1944(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1948(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1952(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1956(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1960(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1964(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + adcl 1968(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1972(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1976(%esp), %esi + adcl 1980(%esp), %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1984(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl %edx, %eax + movl %edx, %edi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1848(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1848(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1852(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1856(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1860(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1864(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1868(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1872(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1876(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1880(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1884(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1888(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1892(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1896(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1900(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1904(%esp), %esi + movl %esi, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 1908(%esp), %ebp + movl 104(%esp), %eax # 4-byte Reload + adcl 1912(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1916(%esp), %eax + movl %eax, %edi + movl 2600(%esp), %eax + movl 20(%eax), %eax + movl %eax, (%esp) + leal 1776(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1844(%esp), %eax + movl 84(%esp), %edx # 4-byte Reload + addl 1776(%esp), %edx + movl 80(%esp), %ecx # 4-byte Reload + adcl 1780(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 60(%esp), %ecx # 4-byte Reload + adcl 1784(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1788(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1792(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1796(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1800(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1804(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %esi # 4-byte Reload + adcl 1808(%esp), %esi + movl 96(%esp), %ecx # 4-byte Reload + adcl 1812(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1816(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1820(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1824(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1828(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + adcl 1832(%esp), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1836(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1840(%esp), %edi + adcl $0, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1704(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1704(%esp), %ebp + movl 80(%esp), %eax # 4-byte Reload + adcl 1708(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1712(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1716(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1720(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1724(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1728(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1732(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl 1736(%esp), %esi + movl %esi, %ebp + movl 96(%esp), %esi # 4-byte Reload + adcl 1740(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 1744(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1748(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1752(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1756(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1760(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1764(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1768(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 1772(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 24(%eax), %eax + movl %eax, (%esp) + leal 1632(%esp), %ecx + movl 2596(%esp), %eax + movl %eax, %edx + calll .LmulPv544x32 + movl 1700(%esp), %eax + movl 80(%esp), %edx # 4-byte Reload + addl 1632(%esp), %edx + movl 60(%esp), %ecx # 4-byte Reload + adcl 1636(%esp), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%esp), %ecx # 4-byte Reload + adcl 1640(%esp), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 56(%esp), %ecx # 4-byte Reload + adcl 1644(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 68(%esp), %ecx # 4-byte Reload + adcl 1648(%esp), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1652(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1656(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 1660(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + adcl 1664(%esp), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1668(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %esi # 4-byte Reload + adcl 1672(%esp), %esi + movl 116(%esp), %ecx # 4-byte Reload + adcl 1676(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1680(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1684(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1688(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + adcl 1692(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + adcl 1696(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl %edx, %edi + movl %edi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1560(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1560(%esp), %edi + movl 60(%esp), %eax # 4-byte Reload + adcl 1564(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1568(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1572(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1576(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %edi # 4-byte Reload + adcl 1580(%esp), %edi + movl 64(%esp), %ebp # 4-byte Reload + adcl 1584(%esp), %ebp + movl 88(%esp), %eax # 4-byte Reload + adcl 1588(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1592(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1596(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 1600(%esp), %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1604(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %esi # 4-byte Reload + adcl 1608(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 1612(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1616(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1620(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1624(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1628(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 28(%eax), %eax + movl %eax, (%esp) + leal 1488(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1556(%esp), %eax + movl 60(%esp), %ecx # 4-byte Reload + addl 1488(%esp), %ecx + movl 52(%esp), %edx # 4-byte Reload + adcl 1492(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 1496(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 1500(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 1504(%esp), %edi + adcl 1508(%esp), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1512(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1516(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 1520(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 1524(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 1528(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + adcl 1532(%esp), %esi + movl %esi, %ebp + movl 92(%esp), %edx # 4-byte Reload + adcl 1536(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1540(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1544(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1548(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1552(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 60(%esp) # 4-byte Spill + movl %ecx, %esi + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1416(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1416(%esp), %esi + movl 52(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %esi # 4-byte Reload + adcl 1428(%esp), %esi + adcl 1432(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 64(%esp), %edi # 4-byte Reload + adcl 1436(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1440(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1444(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1448(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1452(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1456(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 1460(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1464(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1468(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1472(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1476(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1480(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 1484(%esp), %ebp + movl 2600(%esp), %eax + movl 32(%eax), %eax + movl %eax, (%esp) + leal 1344(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1412(%esp), %eax + movl 52(%esp), %edx # 4-byte Reload + addl 1344(%esp), %edx + movl 56(%esp), %ecx # 4-byte Reload + adcl 1348(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl 1352(%esp), %esi + movl %esi, 68(%esp) # 4-byte Spill + movl 72(%esp), %ecx # 4-byte Reload + adcl 1356(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + adcl 1360(%esp), %edi + movl 88(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1368(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1372(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1376(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1380(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1384(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1388(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + adcl 1392(%esp), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 76(%esp), %esi # 4-byte Reload + adcl 1396(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1400(%esp), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1404(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1408(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1272(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1272(%esp), %ebp + movl 56(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl 1288(%esp), %edi + movl 88(%esp), %eax # 4-byte Reload + adcl 1292(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 1304(%esp), %ebp + movl 116(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 1324(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1336(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 36(%eax), %eax + movl %eax, (%esp) + leal 1200(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1268(%esp), %eax + movl 56(%esp), %ecx # 4-byte Reload + addl 1200(%esp), %ecx + movl 68(%esp), %edx # 4-byte Reload + adcl 1204(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 1208(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + adcl 1212(%esp), %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 1216(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 1220(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %esi # 4-byte Reload + adcl 1224(%esp), %esi + adcl 1228(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 1232(%esp), %edi + movl 112(%esp), %edx # 4-byte Reload + adcl 1236(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %edx # 4-byte Reload + adcl 1240(%esp), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 104(%esp), %edx # 4-byte Reload + adcl 1244(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 1248(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 1252(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 1256(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 1260(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 1264(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 56(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %ebp + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1128(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 1128(%esp), %ebp + movl 68(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1144(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 1148(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl 1152(%esp), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1160(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 1172(%esp), %ebp + movl 76(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %esi # 4-byte Reload + adcl 1180(%esp), %esi + movl 80(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %edi # 4-byte Reload + adcl 1188(%esp), %edi + movl 52(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 40(%eax), %eax + movl %eax, (%esp) + leal 1056(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 1124(%esp), %edx + movl 68(%esp), %eax # 4-byte Reload + addl 1056(%esp), %eax + movl 72(%esp), %ecx # 4-byte Reload + adcl 1060(%esp), %ecx + movl %ecx, 72(%esp) # 4-byte Spill + movl 64(%esp), %ecx # 4-byte Reload + adcl 1064(%esp), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + adcl 1068(%esp), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + adcl 1072(%esp), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + adcl 1076(%esp), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + adcl 1080(%esp), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + adcl 1084(%esp), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + adcl 1088(%esp), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + adcl 1092(%esp), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + adcl 1096(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + movl 76(%esp), %ecx # 4-byte Reload + adcl 1100(%esp), %ecx + movl %ecx, 76(%esp) # 4-byte Spill + adcl 1104(%esp), %esi + movl %esi, 84(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + adcl 1108(%esp), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + adcl 1112(%esp), %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 52(%esp), %edi # 4-byte Reload + adcl 1116(%esp), %edi + movl 56(%esp), %ecx # 4-byte Reload + adcl 1120(%esp), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 68(%esp) # 4-byte Spill + movl %eax, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 984(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 984(%esp), %esi + movl 72(%esp), %esi # 4-byte Reload + adcl 988(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %ebp # 4-byte Reload + adcl 996(%esp), %ebp + movl 96(%esp), %eax # 4-byte Reload + adcl 1000(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1004(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 1044(%esp), %edi + movl %edi, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 44(%eax), %eax + movl %eax, (%esp) + leal 912(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 980(%esp), %eax + addl 912(%esp), %esi + movl 64(%esp), %edx # 4-byte Reload + adcl 916(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + adcl 920(%esp), %ebp + movl %ebp, 88(%esp) # 4-byte Spill + movl 96(%esp), %edi # 4-byte Reload + adcl 924(%esp), %edi + movl 108(%esp), %edx # 4-byte Reload + adcl 928(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 932(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 116(%esp), %edx # 4-byte Reload + adcl 936(%esp), %edx + movl %edx, 116(%esp) # 4-byte Spill + movl 112(%esp), %edx # 4-byte Reload + adcl 940(%esp), %edx + movl %edx, 112(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 944(%esp), %ebp + movl 104(%esp), %edx # 4-byte Reload + adcl 948(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 952(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 956(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 960(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 964(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 968(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 972(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 976(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl $0, %eax + movl %eax, 72(%esp) # 4-byte Spill + movl %esi, %eax + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 840(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 840(%esp), %esi + movl 64(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl 852(%esp), %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 856(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 860(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl 864(%esp), %edi + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl %ebp, %esi + adcl 872(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %ebp # 4-byte Reload + adcl 888(%esp), %ebp + movl 60(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 48(%eax), %eax + movl %eax, (%esp) + leal 768(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 836(%esp), %edx + movl 64(%esp), %ecx # 4-byte Reload + addl 768(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 780(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 784(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 788(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + movl 112(%esp), %edi # 4-byte Reload + adcl 792(%esp), %edi + adcl 796(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + adcl 812(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %ebp # 4-byte Reload + adcl 828(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 64(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 696(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 696(%esp), %esi + movl 88(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 712(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 716(%esp), %esi + adcl 720(%esp), %edi + movl %edi, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %edi # 4-byte Reload + adcl 732(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + adcl 756(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 760(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 52(%eax), %eax + movl %eax, (%esp) + leal 624(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 692(%esp), %edx + movl 88(%esp), %ecx # 4-byte Reload + addl 624(%esp), %ecx + movl 96(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 636(%esp), %ebp + adcl 640(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 644(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + adcl 656(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 660(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 672(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 676(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 684(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 88(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 552(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 552(%esp), %esi + movl 96(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl 564(%esp), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 568(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 572(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 576(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 588(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 596(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %ebp # 4-byte Reload + adcl 600(%esp), %ebp + movl 56(%esp), %edi # 4-byte Reload + adcl 604(%esp), %edi + movl 68(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 56(%eax), %eax + movl %eax, (%esp) + leal 480(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 548(%esp), %edx + movl 96(%esp), %ecx # 4-byte Reload + addl 480(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 496(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 500(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 512(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 516(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + adcl 524(%esp), %ebp + movl %ebp, 52(%esp) # 4-byte Spill + adcl 528(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 96(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 408(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 408(%esp), %esi + movl 108(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %esi # 4-byte Reload + adcl 420(%esp), %esi + movl 112(%esp), %eax # 4-byte Reload + adcl 424(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 428(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 104(%esp), %ebp # 4-byte Reload + adcl 432(%esp), %ebp + movl 76(%esp), %edi # 4-byte Reload + adcl 436(%esp), %edi + movl 84(%esp), %eax # 4-byte Reload + adcl 440(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 60(%eax), %eax + movl %eax, (%esp) + leal 336(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 404(%esp), %edx + movl 108(%esp), %ecx # 4-byte Reload + addl 336(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 344(%esp), %esi + movl %esi, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 352(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + adcl 356(%esp), %ebp + movl %ebp, 104(%esp) # 4-byte Spill + adcl 360(%esp), %edi + movl %edi, 76(%esp) # 4-byte Spill + movl 84(%esp), %edi # 4-byte Reload + adcl 364(%esp), %edi + movl 80(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %ebp # 4-byte Reload + adcl 372(%esp), %ebp + movl 52(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + adcl 384(%esp), %eax + movl %eax, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 108(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 48(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 264(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 264(%esp), %esi + movl 100(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 276(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%esp), %esi # 4-byte Reload + adcl 280(%esp), %esi + movl 104(%esp), %eax # 4-byte Reload + adcl 284(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + adcl 292(%esp), %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + adcl 300(%esp), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 56(%esp), %edi # 4-byte Reload + adcl 308(%esp), %edi + movl 68(%esp), %ebp # 4-byte Reload + adcl 312(%esp), %ebp + movl 72(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 2600(%esp), %eax + movl 64(%eax), %eax + movl %eax, (%esp) + leal 192(%esp), %ecx + movl 2596(%esp), %edx + calll .LmulPv544x32 + movl 260(%esp), %edx + movl 100(%esp), %ecx # 4-byte Reload + addl 192(%esp), %ecx + movl 116(%esp), %eax # 4-byte Reload + adcl 196(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 200(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + adcl 204(%esp), %esi + movl %esi, 92(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 208(%esp), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 216(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 220(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 52(%esp) # 4-byte Spill + adcl 232(%esp), %edi + movl %edi, 56(%esp) # 4-byte Spill + adcl 236(%esp), %ebp + movl %ebp, 68(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 64(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + adcl $0, %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %esi + movl %eax, (%esp) + leal 120(%esp), %ecx + movl 2604(%esp), %edx + calll .LmulPv544x32 + addl 120(%esp), %esi + movl 92(%esp), %esi # 4-byte Reload + movl 116(%esp), %eax # 4-byte Reload + adcl 124(%esp), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 112(%esp), %ebp # 4-byte Reload + adcl 128(%esp), %ebp + movl %ebp, 112(%esp) # 4-byte Spill + adcl 132(%esp), %esi + movl 104(%esp), %edx # 4-byte Reload + adcl 136(%esp), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 76(%esp), %edx # 4-byte Reload + adcl 140(%esp), %edx + movl %edx, 76(%esp) # 4-byte Spill + movl 84(%esp), %edx # 4-byte Reload + adcl 144(%esp), %edx + movl %edx, 84(%esp) # 4-byte Spill + movl 80(%esp), %edx # 4-byte Reload + adcl 148(%esp), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 60(%esp), %edx # 4-byte Reload + adcl 152(%esp), %edx + movl %edx, 60(%esp) # 4-byte Spill + movl 52(%esp), %edx # 4-byte Reload + adcl 156(%esp), %edx + movl %edx, 52(%esp) # 4-byte Spill + movl 56(%esp), %edx # 4-byte Reload + adcl 160(%esp), %edx + movl %edx, 56(%esp) # 4-byte Spill + movl 68(%esp), %edx # 4-byte Reload + adcl 164(%esp), %edx + movl %edx, 68(%esp) # 4-byte Spill + movl 72(%esp), %edx # 4-byte Reload + adcl 168(%esp), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%esp), %edx # 4-byte Reload + adcl 172(%esp), %edx + movl %edx, 64(%esp) # 4-byte Spill + movl 88(%esp), %edx # 4-byte Reload + adcl 176(%esp), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 96(%esp), %edx # 4-byte Reload + adcl 180(%esp), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 108(%esp), %edx # 4-byte Reload + adcl 184(%esp), %edx + movl %edx, 108(%esp) # 4-byte Spill + movl 100(%esp), %edx # 4-byte Reload + adcl 188(%esp), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl %eax, %edx + movl 2604(%esp), %edi + subl (%edi), %edx + sbbl 4(%edi), %ebp + movl %esi, %ebx + sbbl 8(%edi), %ebx + movl 104(%esp), %ecx # 4-byte Reload + sbbl 12(%edi), %ecx + movl 76(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 68(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 72(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 64(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 60(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 64(%edi), %eax + movl %eax, 92(%esp) # 4-byte Spill + sarl $31, %eax + testl %eax, %eax + movl 116(%esp), %edi # 4-byte Reload + js .LBB259_2 +# BB#1: + movl %edx, %edi +.LBB259_2: + movl 2592(%esp), %edx + movl %edi, (%edx) + movl 112(%esp), %edi # 4-byte Reload + js .LBB259_4 +# BB#3: + movl %ebp, %edi +.LBB259_4: + movl %edi, 4(%edx) + js .LBB259_6 +# BB#5: + movl %ebx, %esi +.LBB259_6: + movl %esi, 8(%edx) + movl 104(%esp), %esi # 4-byte Reload + js .LBB259_8 +# BB#7: + movl %ecx, %esi +.LBB259_8: + movl %esi, 12(%edx) + movl 76(%esp), %ecx # 4-byte Reload + js .LBB259_10 +# BB#9: + movl 4(%esp), %ecx # 4-byte Reload +.LBB259_10: + movl %ecx, 16(%edx) + movl 84(%esp), %eax # 4-byte Reload + js .LBB259_12 +# BB#11: + movl 8(%esp), %eax # 4-byte Reload +.LBB259_12: + movl %eax, 20(%edx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB259_14 +# BB#13: + movl 12(%esp), %eax # 4-byte Reload +.LBB259_14: + movl %eax, 24(%edx) + movl 60(%esp), %eax # 4-byte Reload + js .LBB259_16 +# BB#15: + movl 16(%esp), %eax # 4-byte Reload +.LBB259_16: + movl %eax, 28(%edx) + movl 52(%esp), %eax # 4-byte Reload + js .LBB259_18 +# BB#17: + movl 20(%esp), %eax # 4-byte Reload +.LBB259_18: + movl %eax, 32(%edx) + movl 56(%esp), %eax # 4-byte Reload + js .LBB259_20 +# BB#19: + movl 24(%esp), %eax # 4-byte Reload +.LBB259_20: + movl %eax, 36(%edx) + movl 68(%esp), %eax # 4-byte Reload + js .LBB259_22 +# BB#21: + movl 28(%esp), %eax # 4-byte Reload +.LBB259_22: + movl %eax, 40(%edx) + movl 72(%esp), %eax # 4-byte Reload + js .LBB259_24 +# BB#23: + movl 32(%esp), %eax # 4-byte Reload +.LBB259_24: + movl %eax, 44(%edx) + movl 64(%esp), %eax # 4-byte Reload + js .LBB259_26 +# BB#25: + movl 36(%esp), %eax # 4-byte Reload +.LBB259_26: + movl %eax, 48(%edx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB259_28 +# BB#27: + movl 40(%esp), %eax # 4-byte Reload +.LBB259_28: + movl %eax, 52(%edx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB259_30 +# BB#29: + movl 44(%esp), %eax # 4-byte Reload +.LBB259_30: + movl %eax, 56(%edx) + movl 108(%esp), %eax # 4-byte Reload + js .LBB259_32 +# BB#31: + movl 48(%esp), %eax # 4-byte Reload +.LBB259_32: + movl %eax, 60(%edx) + movl 100(%esp), %eax # 4-byte Reload + js .LBB259_34 +# BB#33: + movl 92(%esp), %eax # 4-byte Reload +.LBB259_34: + movl %eax, 64(%edx) + addl $2572, %esp # imm = 0xA0C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end259: + .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L + + .globl mcl_fp_montRed17L + .align 16, 0x90 + .type mcl_fp_montRed17L,@function +mcl_fp_montRed17L: # @mcl_fp_montRed17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $1436, %esp # imm = 0x59C + calll .L260$pb +.L260$pb: + popl %eax +.Ltmp61: + addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 1464(%esp), %edx + movl -4(%edx), %esi + movl %esi, 96(%esp) # 4-byte Spill + movl 1460(%esp), %ecx + movl (%ecx), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 4(%ecx), %edi + movl %edi, 80(%esp) # 4-byte Spill + imull %esi, %ebx + movl 132(%ecx), %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 128(%ecx), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 124(%ecx), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 120(%ecx), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 116(%ecx), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 112(%ecx), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 108(%ecx), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl %esi, 156(%esp) # 4-byte Spill + movl 100(%ecx), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 96(%ecx), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 92(%ecx), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 88(%ecx), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl %esi, 180(%esp) # 4-byte Spill + movl 80(%ecx), %edi + movl %edi, 196(%esp) # 4-byte Spill + movl 76(%ecx), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 72(%ecx), %esi + movl %esi, 204(%esp) # 4-byte Spill + movl 68(%ecx), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 64(%ecx), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 60(%ecx), %ebp + movl %ebp, 164(%esp) # 4-byte Spill + movl 56(%ecx), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 52(%ecx), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 48(%ecx), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 44(%ecx), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 40(%ecx), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 36(%ecx), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 28(%ecx), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 24(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 20(%ecx), %ebp + movl 16(%ecx), %esi + movl 12(%ecx), %edi + movl 8(%ecx), %eax + movl %eax, 100(%esp) # 4-byte Spill + movl (%edx), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 64(%edx), %ecx + movl %ecx, 68(%esp) # 4-byte Spill + movl 60(%edx), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + movl 56(%edx), %ecx + movl %ecx, 60(%esp) # 4-byte Spill + movl 52(%edx), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 48(%edx), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 44(%edx), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 40(%edx), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 36(%edx), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 32(%edx), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 28(%edx), %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 24(%edx), %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 20(%edx), %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 16(%edx), %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 8(%edx), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 4(%edx), %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl %ebx, (%esp) + leal 1360(%esp), %ecx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + movl 76(%esp), %eax # 4-byte Reload + addl 1360(%esp), %eax + movl 80(%esp), %ecx # 4-byte Reload + adcl 1364(%esp), %ecx + movl 100(%esp), %eax # 4-byte Reload + adcl 1368(%esp), %eax + movl %eax, 100(%esp) # 4-byte Spill + adcl 1372(%esp), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 1376(%esp), %esi + movl %esi, 76(%esp) # 4-byte Spill + adcl 1380(%esp), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1384(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1388(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1392(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1396(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1400(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1404(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1408(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1412(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1416(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1420(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1424(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1428(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl $0, 204(%esp) # 4-byte Folded Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + movl 128(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + sbbl %edi, %edi + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1288(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + andl $1, %edi + movl %edi, %ecx + addl 1288(%esp), %esi + movl 100(%esp), %edx # 4-byte Reload + adcl 1292(%esp), %edx + movl 72(%esp), %eax # 4-byte Reload + adcl 1296(%esp), %eax + movl %eax, 72(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + adcl 1300(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1304(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1308(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1312(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1316(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1320(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1324(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1328(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1332(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %edi # 4-byte Reload + adcl 1336(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 1340(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1344(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1348(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1352(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1356(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + adcl $0, 192(%esp) # 4-byte Folded Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + movl 184(%esp), %eax # 4-byte Reload + adcl $0, %eax + movl %eax, %esi + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl %edx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1216(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1216(%esp), %ebp + movl 72(%esp), %ecx # 4-byte Reload + adcl 1220(%esp), %ecx + movl 76(%esp), %eax # 4-byte Reload + adcl 1224(%esp), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + adcl 1228(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1232(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1236(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1240(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1244(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1248(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1252(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1256(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + adcl 1260(%esp), %edi + movl %edi, 136(%esp) # 4-byte Spill + movl 148(%esp), %edi # 4-byte Reload + adcl 1264(%esp), %edi + movl 164(%esp), %eax # 4-byte Reload + adcl 1268(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1272(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1276(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1280(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1284(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl $0, 196(%esp) # 4-byte Folded Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + movl 144(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1144(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1144(%esp), %esi + movl 76(%esp), %ecx # 4-byte Reload + adcl 1148(%esp), %ecx + movl 80(%esp), %eax # 4-byte Reload + adcl 1152(%esp), %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + adcl 1156(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1160(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1164(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1168(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1172(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1176(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1180(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1184(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + adcl 1188(%esp), %edi + movl %edi, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1192(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1196(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1200(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1204(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1208(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1212(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl $0, 180(%esp) # 4-byte Folded Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + movl 188(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1072(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1072(%esp), %esi + movl 80(%esp), %esi # 4-byte Reload + adcl 1076(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + adcl 1080(%esp), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + adcl 1084(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1088(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1092(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1096(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1100(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1104(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1108(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1112(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1116(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1120(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1124(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1128(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1132(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1136(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1140(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl $0, 184(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 188(%esp) # 4-byte Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + movl 172(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + movl 152(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 1000(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 1000(%esp), %esi + movl 84(%esp), %ecx # 4-byte Reload + adcl 1004(%esp), %ecx + movl 88(%esp), %eax # 4-byte Reload + adcl 1008(%esp), %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + adcl 1012(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 1016(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 1020(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 1024(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 1028(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 1032(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 1036(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 1040(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 1044(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 1048(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 1052(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 1056(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 1060(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 1064(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 1068(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + adcl $0, 188(%esp) # 4-byte Folded Spill + adcl $0, 168(%esp) # 4-byte Folded Spill + adcl $0, %ebp + movl %ebp, 172(%esp) # 4-byte Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 928(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 928(%esp), %esi + movl 88(%esp), %esi # 4-byte Reload + adcl 932(%esp), %esi + movl 92(%esp), %eax # 4-byte Reload + adcl 936(%esp), %eax + movl %eax, 92(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 940(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 944(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 948(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 952(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 956(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 960(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 964(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 968(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 972(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 976(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 980(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 984(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 988(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 992(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 996(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %ebp # 4-byte Reload + adcl $0, %ebp + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + movl 160(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 856(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 856(%esp), %esi + movl 92(%esp), %ecx # 4-byte Reload + adcl 860(%esp), %ecx + movl 108(%esp), %eax # 4-byte Reload + adcl 864(%esp), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 868(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 872(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 876(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 880(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 884(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 888(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 892(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 896(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 900(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 904(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 908(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 912(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 916(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 920(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + adcl 924(%esp), %ebp + movl %ebp, 168(%esp) # 4-byte Spill + adcl $0, 172(%esp) # 4-byte Folded Spill + adcl $0, 156(%esp) # 4-byte Folded Spill + adcl $0, %edi + movl %edi, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + movl 124(%esp), %esi # 4-byte Reload + adcl $0, %esi + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + movl 96(%esp), %ebp # 4-byte Reload + imull %ebp, %eax + movl %eax, (%esp) + leal 784(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 784(%esp), %edi + movl 108(%esp), %ecx # 4-byte Reload + adcl 788(%esp), %ecx + movl 112(%esp), %eax # 4-byte Reload + adcl 792(%esp), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 796(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 800(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 804(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 808(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 812(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 816(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 820(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 824(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 828(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 832(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 836(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 840(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 844(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 848(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 852(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 124(%esp) # 4-byte Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %eax + movl %ecx, %esi + imull %ebp, %eax + movl %eax, (%esp) + leal 712(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 712(%esp), %esi + movl 112(%esp), %ecx # 4-byte Reload + adcl 716(%esp), %ecx + movl 120(%esp), %eax # 4-byte Reload + adcl 720(%esp), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 140(%esp), %eax # 4-byte Reload + adcl 724(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 728(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 732(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 736(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 740(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 744(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 748(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 752(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 756(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %ebp # 4-byte Reload + adcl 760(%esp), %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 764(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 768(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 772(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 776(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + adcl 780(%esp), %edi + movl %edi, 156(%esp) # 4-byte Spill + adcl $0, 160(%esp) # 4-byte Folded Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %esi + movl %esi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 640(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 640(%esp), %esi + movl 120(%esp), %ecx # 4-byte Reload + adcl 644(%esp), %ecx + movl 140(%esp), %eax # 4-byte Reload + adcl 648(%esp), %eax + movl %eax, 140(%esp) # 4-byte Spill + movl 136(%esp), %eax # 4-byte Reload + adcl 652(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 656(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 660(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %eax # 4-byte Reload + adcl 664(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + movl 200(%esp), %eax # 4-byte Reload + adcl 668(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edi # 4-byte Reload + adcl 672(%esp), %edi + movl 192(%esp), %esi # 4-byte Reload + adcl 676(%esp), %esi + movl 196(%esp), %eax # 4-byte Reload + adcl 680(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + adcl 684(%esp), %ebp + movl %ebp, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 688(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 692(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 696(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 700(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 704(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 708(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + adcl $0, 152(%esp) # 4-byte Folded Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %ebp + movl %ebp, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 568(%esp), %ecx + movl 1464(%esp), %eax + movl %eax, %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 568(%esp), %ebp + movl 140(%esp), %ecx # 4-byte Reload + adcl 572(%esp), %ecx + movl 136(%esp), %eax # 4-byte Reload + adcl 576(%esp), %eax + movl %eax, 136(%esp) # 4-byte Spill + movl 148(%esp), %eax # 4-byte Reload + adcl 580(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 584(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + movl 176(%esp), %ebp # 4-byte Reload + adcl 588(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 592(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + adcl 596(%esp), %edi + movl %edi, 204(%esp) # 4-byte Spill + adcl 600(%esp), %esi + movl %esi, 192(%esp) # 4-byte Spill + movl 196(%esp), %esi # 4-byte Reload + adcl 604(%esp), %esi + movl 180(%esp), %eax # 4-byte Reload + adcl 608(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 612(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 616(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 620(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 624(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 628(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 632(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 636(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + adcl $0, 144(%esp) # 4-byte Folded Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, 100(%esp) # 4-byte Folded Spill + movl %ecx, %edi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 496(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 496(%esp), %edi + movl 136(%esp), %edi # 4-byte Reload + adcl 500(%esp), %edi + movl 148(%esp), %eax # 4-byte Reload + adcl 504(%esp), %eax + movl %eax, 148(%esp) # 4-byte Spill + movl 164(%esp), %eax # 4-byte Reload + adcl 508(%esp), %eax + movl %eax, 164(%esp) # 4-byte Spill + adcl 512(%esp), %ebp + movl %ebp, 176(%esp) # 4-byte Spill + movl 200(%esp), %ebp # 4-byte Reload + adcl 516(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 520(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 524(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + adcl 528(%esp), %esi + movl %esi, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 532(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 536(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 540(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 544(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 548(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 552(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 556(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 560(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 564(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + adcl $0, 128(%esp) # 4-byte Folded Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %edi, %eax + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 424(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 424(%esp), %edi + movl 148(%esp), %ecx # 4-byte Reload + adcl 428(%esp), %ecx + movl 164(%esp), %edi # 4-byte Reload + adcl 432(%esp), %edi + movl 176(%esp), %eax # 4-byte Reload + adcl 436(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 440(%esp), %ebp + movl 204(%esp), %eax # 4-byte Reload + adcl 444(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 448(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 452(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 456(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %eax # 4-byte Reload + adcl 460(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 464(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 468(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 472(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 476(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 480(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 484(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 488(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 492(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + adcl $0, 132(%esp) # 4-byte Folded Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl %ecx, %eax + movl %ecx, %esi + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 352(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 352(%esp), %esi + movl %edi, %ecx + adcl 356(%esp), %ecx + movl 176(%esp), %eax # 4-byte Reload + adcl 360(%esp), %eax + movl %eax, 176(%esp) # 4-byte Spill + adcl 364(%esp), %ebp + movl %ebp, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 368(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 372(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 376(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 380(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl 184(%esp), %edi # 4-byte Reload + adcl 384(%esp), %edi + movl 188(%esp), %eax # 4-byte Reload + adcl 388(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 392(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 396(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 400(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 404(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 408(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 412(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 416(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 420(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + adcl $0, 124(%esp) # 4-byte Folded Spill + adcl $0, 116(%esp) # 4-byte Folded Spill + movl 100(%esp), %esi # 4-byte Reload + adcl $0, %esi + movl %ecx, %eax + movl %ecx, %ebp + imull 96(%esp), %eax # 4-byte Folded Reload + movl %eax, (%esp) + leal 280(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 280(%esp), %ebp + movl 176(%esp), %ecx # 4-byte Reload + adcl 284(%esp), %ecx + movl 200(%esp), %eax # 4-byte Reload + adcl 288(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %eax # 4-byte Reload + adcl 292(%esp), %eax + movl %eax, 204(%esp) # 4-byte Spill + movl 192(%esp), %eax # 4-byte Reload + adcl 296(%esp), %eax + movl %eax, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 300(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 304(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + adcl 308(%esp), %edi + movl %edi, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 312(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 316(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 320(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 324(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 328(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 332(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 336(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 340(%esp), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 344(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 348(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 116(%esp), %edi # 4-byte Reload + adcl $0, %edi + adcl $0, %esi + movl 96(%esp), %eax # 4-byte Reload + imull %ecx, %eax + movl %ecx, %ebp + movl %eax, (%esp) + leal 208(%esp), %ecx + movl 1464(%esp), %edx + movl 104(%esp), %ebx # 4-byte Reload + calll .LmulPv544x32 + addl 208(%esp), %ebp + movl 200(%esp), %eax # 4-byte Reload + adcl 212(%esp), %eax + movl %eax, 200(%esp) # 4-byte Spill + movl 204(%esp), %edx # 4-byte Reload + adcl 216(%esp), %edx + movl %edx, 204(%esp) # 4-byte Spill + movl 192(%esp), %ecx # 4-byte Reload + adcl 220(%esp), %ecx + movl %ecx, 192(%esp) # 4-byte Spill + movl 196(%esp), %eax # 4-byte Reload + adcl 224(%esp), %eax + movl %eax, 196(%esp) # 4-byte Spill + movl 180(%esp), %eax # 4-byte Reload + adcl 228(%esp), %eax + movl %eax, 180(%esp) # 4-byte Spill + movl %eax, %ebp + movl 184(%esp), %eax # 4-byte Reload + adcl 232(%esp), %eax + movl %eax, 184(%esp) # 4-byte Spill + movl 188(%esp), %eax # 4-byte Reload + adcl 236(%esp), %eax + movl %eax, 188(%esp) # 4-byte Spill + movl 168(%esp), %eax # 4-byte Reload + adcl 240(%esp), %eax + movl %eax, 168(%esp) # 4-byte Spill + movl 172(%esp), %eax # 4-byte Reload + adcl 244(%esp), %eax + movl %eax, 172(%esp) # 4-byte Spill + movl 156(%esp), %eax # 4-byte Reload + adcl 248(%esp), %eax + movl %eax, 156(%esp) # 4-byte Spill + movl 160(%esp), %eax # 4-byte Reload + adcl 252(%esp), %eax + movl %eax, 160(%esp) # 4-byte Spill + movl 152(%esp), %eax # 4-byte Reload + adcl 256(%esp), %eax + movl %eax, 152(%esp) # 4-byte Spill + movl 144(%esp), %eax # 4-byte Reload + adcl 260(%esp), %eax + movl %eax, 144(%esp) # 4-byte Spill + movl 128(%esp), %ebx # 4-byte Reload + adcl 264(%esp), %ebx + movl %ebx, 128(%esp) # 4-byte Spill + movl 132(%esp), %eax # 4-byte Reload + adcl 268(%esp), %eax + movl %eax, 132(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 272(%esp), %eax + movl %eax, 124(%esp) # 4-byte Spill + adcl 276(%esp), %edi + movl %edi, 116(%esp) # 4-byte Spill + adcl $0, %esi + movl %esi, 100(%esp) # 4-byte Spill + movl 200(%esp), %edi # 4-byte Reload + subl 16(%esp), %edi # 4-byte Folded Reload + sbbl 4(%esp), %edx # 4-byte Folded Reload + sbbl 8(%esp), %ecx # 4-byte Folded Reload + movl 196(%esp), %eax # 4-byte Reload + sbbl 12(%esp), %eax # 4-byte Folded Reload + sbbl 20(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 88(%esp) # 4-byte Spill + movl 184(%esp), %esi # 4-byte Reload + sbbl 24(%esp), %esi # 4-byte Folded Reload + movl %esi, 92(%esp) # 4-byte Spill + movl 188(%esp), %esi # 4-byte Reload + sbbl 28(%esp), %esi # 4-byte Folded Reload + movl %esi, 96(%esp) # 4-byte Spill + movl 168(%esp), %esi # 4-byte Reload + sbbl 32(%esp), %esi # 4-byte Folded Reload + movl 172(%esp), %ebp # 4-byte Reload + sbbl 36(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 104(%esp) # 4-byte Spill + movl 156(%esp), %ebp # 4-byte Reload + sbbl 40(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 108(%esp) # 4-byte Spill + movl 160(%esp), %ebp # 4-byte Reload + sbbl 44(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 112(%esp) # 4-byte Spill + movl 152(%esp), %ebp # 4-byte Reload + sbbl 48(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 120(%esp) # 4-byte Spill + movl 144(%esp), %ebp # 4-byte Reload + sbbl 52(%esp), %ebp # 4-byte Folded Reload + movl %ebp, 136(%esp) # 4-byte Spill + sbbl 56(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 140(%esp) # 4-byte Spill + movl 132(%esp), %ebx # 4-byte Reload + sbbl 60(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 148(%esp) # 4-byte Spill + movl 124(%esp), %ebx # 4-byte Reload + sbbl 64(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 164(%esp) # 4-byte Spill + movl 116(%esp), %ebx # 4-byte Reload + sbbl 68(%esp), %ebx # 4-byte Folded Reload + movl %ebx, 176(%esp) # 4-byte Spill + movl 100(%esp), %ebx # 4-byte Reload + sbbl $0, %ebx + andl $1, %ebx + jne .LBB260_2 +# BB#1: + movl %esi, 168(%esp) # 4-byte Spill +.LBB260_2: + testb %bl, %bl + movl 200(%esp), %esi # 4-byte Reload + jne .LBB260_4 +# BB#3: + movl %edi, %esi +.LBB260_4: + movl 1456(%esp), %edi + movl %esi, (%edi) + movl 156(%esp), %esi # 4-byte Reload + movl 204(%esp), %ebx # 4-byte Reload + jne .LBB260_6 +# BB#5: + movl %edx, %ebx +.LBB260_6: + movl %ebx, 4(%edi) + movl 144(%esp), %ebx # 4-byte Reload + movl 192(%esp), %edx # 4-byte Reload + jne .LBB260_8 +# BB#7: + movl %ecx, %edx +.LBB260_8: + movl %edx, 8(%edi) + movl 132(%esp), %edx # 4-byte Reload + movl 196(%esp), %ecx # 4-byte Reload + jne .LBB260_10 +# BB#9: + movl %eax, %ecx +.LBB260_10: + movl %ecx, 12(%edi) + movl 124(%esp), %ecx # 4-byte Reload + movl 180(%esp), %eax # 4-byte Reload + jne .LBB260_12 +# BB#11: + movl 88(%esp), %eax # 4-byte Reload +.LBB260_12: + movl %eax, 16(%edi) + movl 188(%esp), %eax # 4-byte Reload + movl 184(%esp), %ebp # 4-byte Reload + jne .LBB260_14 +# BB#13: + movl 92(%esp), %ebp # 4-byte Reload +.LBB260_14: + movl %ebp, 20(%edi) + movl 152(%esp), %ebp # 4-byte Reload + jne .LBB260_16 +# BB#15: + movl 96(%esp), %eax # 4-byte Reload +.LBB260_16: + movl %eax, 24(%edi) + movl 168(%esp), %eax # 4-byte Reload + movl %eax, 28(%edi) + jne .LBB260_18 +# BB#17: + movl 104(%esp), %eax # 4-byte Reload + movl %eax, 172(%esp) # 4-byte Spill +.LBB260_18: + movl 172(%esp), %eax # 4-byte Reload + movl %eax, 32(%edi) + jne .LBB260_20 +# BB#19: + movl 108(%esp), %esi # 4-byte Reload +.LBB260_20: + movl %esi, 36(%edi) + jne .LBB260_22 +# BB#21: + movl 112(%esp), %eax # 4-byte Reload + movl %eax, 160(%esp) # 4-byte Spill +.LBB260_22: + movl 160(%esp), %esi # 4-byte Reload + movl %esi, 40(%edi) + movl 128(%esp), %eax # 4-byte Reload + jne .LBB260_24 +# BB#23: + movl 120(%esp), %ebp # 4-byte Reload +.LBB260_24: + movl %ebp, 44(%edi) + jne .LBB260_26 +# BB#25: + movl 136(%esp), %ebx # 4-byte Reload +.LBB260_26: + movl %ebx, 48(%edi) + jne .LBB260_28 +# BB#27: + movl 140(%esp), %eax # 4-byte Reload +.LBB260_28: + movl %eax, 52(%edi) + jne .LBB260_30 +# BB#29: + movl 148(%esp), %edx # 4-byte Reload +.LBB260_30: + movl %edx, 56(%edi) + movl 116(%esp), %eax # 4-byte Reload + jne .LBB260_32 +# BB#31: + movl 164(%esp), %ecx # 4-byte Reload +.LBB260_32: + movl %ecx, 60(%edi) + jne .LBB260_34 +# BB#33: + movl 176(%esp), %eax # 4-byte Reload +.LBB260_34: + movl %eax, 64(%edi) + addl $1436, %esp # imm = 0x59C + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end260: + .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L + + .globl mcl_fp_addPre17L + .align 16, 0x90 + .type mcl_fp_addPre17L,@function +mcl_fp_addPre17L: # @mcl_fp_addPre17L +# BB#0: + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + movl 20(%esp), %ecx + addl (%ecx), %edx + adcl 4(%ecx), %esi + movl 8(%eax), %ebx + adcl 8(%ecx), %ebx + movl 16(%esp), %edi + movl %edx, (%edi) + movl 12(%ecx), %edx + movl %esi, 4(%edi) + movl 16(%ecx), %esi + adcl 12(%eax), %edx + adcl 16(%eax), %esi + movl %ebx, 8(%edi) + movl 20(%eax), %ebx + movl %edx, 12(%edi) + movl 20(%ecx), %edx + adcl %ebx, %edx + movl 24(%eax), %ebx + movl %esi, 16(%edi) + movl 24(%ecx), %esi + adcl %ebx, %esi + movl 28(%eax), %ebx + movl %edx, 20(%edi) + movl 28(%ecx), %edx + adcl %ebx, %edx + movl 32(%eax), %ebx + movl %esi, 24(%edi) + movl 32(%ecx), %esi + adcl %ebx, %esi + movl 36(%eax), %ebx + movl %edx, 28(%edi) + movl 36(%ecx), %edx + adcl %ebx, %edx + movl 40(%eax), %ebx + movl %esi, 32(%edi) + movl 40(%ecx), %esi + adcl %ebx, %esi + movl 44(%eax), %ebx + movl %edx, 36(%edi) + movl 44(%ecx), %edx + adcl %ebx, %edx + movl 48(%eax), %ebx + movl %esi, 40(%edi) + movl 48(%ecx), %esi + adcl %ebx, %esi + movl 52(%eax), %ebx + movl %edx, 44(%edi) + movl 52(%ecx), %edx + adcl %ebx, %edx + movl 56(%eax), %ebx + movl %esi, 48(%edi) + movl 56(%ecx), %esi + adcl %ebx, %esi + movl 60(%eax), %ebx + movl %edx, 52(%edi) + movl 60(%ecx), %edx + adcl %ebx, %edx + movl %esi, 56(%edi) + movl %edx, 60(%edi) + movl 64(%eax), %eax + movl 64(%ecx), %ecx + adcl %eax, %ecx + movl %ecx, 64(%edi) + sbbl %eax, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + retl +.Lfunc_end261: + .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L + + .globl mcl_fp_subPre17L + .align 16, 0x90 + .type mcl_fp_subPre17L,@function +mcl_fp_subPre17L: # @mcl_fp_subPre17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl 24(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edi + xorl %eax, %eax + movl 28(%esp), %edx + subl (%edx), %esi + sbbl 4(%edx), %edi + movl 8(%ecx), %ebp + sbbl 8(%edx), %ebp + movl 20(%esp), %ebx + movl %esi, (%ebx) + movl 12(%ecx), %esi + sbbl 12(%edx), %esi + movl %edi, 4(%ebx) + movl 16(%ecx), %edi + sbbl 16(%edx), %edi + movl %ebp, 8(%ebx) + movl 20(%edx), %ebp + movl %esi, 12(%ebx) + movl 20(%ecx), %esi + sbbl %ebp, %esi + movl 24(%edx), %ebp + movl %edi, 16(%ebx) + movl 24(%ecx), %edi + sbbl %ebp, %edi + movl 28(%edx), %ebp + movl %esi, 20(%ebx) + movl 28(%ecx), %esi + sbbl %ebp, %esi + movl 32(%edx), %ebp + movl %edi, 24(%ebx) + movl 32(%ecx), %edi + sbbl %ebp, %edi + movl 36(%edx), %ebp + movl %esi, 28(%ebx) + movl 36(%ecx), %esi + sbbl %ebp, %esi + movl 40(%edx), %ebp + movl %edi, 32(%ebx) + movl 40(%ecx), %edi + sbbl %ebp, %edi + movl 44(%edx), %ebp + movl %esi, 36(%ebx) + movl 44(%ecx), %esi + sbbl %ebp, %esi + movl 48(%edx), %ebp + movl %edi, 40(%ebx) + movl 48(%ecx), %edi + sbbl %ebp, %edi + movl 52(%edx), %ebp + movl %esi, 44(%ebx) + movl 52(%ecx), %esi + sbbl %ebp, %esi + movl 56(%edx), %ebp + movl %edi, 48(%ebx) + movl 56(%ecx), %edi + sbbl %ebp, %edi + movl 60(%edx), %ebp + movl %esi, 52(%ebx) + movl 60(%ecx), %esi + sbbl %ebp, %esi + movl %edi, 56(%ebx) + movl %esi, 60(%ebx) + movl 64(%edx), %edx + movl 64(%ecx), %ecx + sbbl %edx, %ecx + movl %ecx, 64(%ebx) + sbbl $0, %eax + andl $1, %eax + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end262: + .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L + + .globl mcl_fp_shr1_17L + .align 16, 0x90 + .type mcl_fp_shr1_17L,@function +mcl_fp_shr1_17L: # @mcl_fp_shr1_17L +# BB#0: + pushl %esi + movl 12(%esp), %eax + movl (%eax), %edx + movl 4(%eax), %esi + shrdl $1, %esi, %edx + movl 8(%esp), %ecx + movl %edx, (%ecx) + movl 8(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 4(%ecx) + movl 12(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 8(%ecx) + movl 16(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 12(%ecx) + movl 20(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 16(%ecx) + movl 24(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 20(%ecx) + movl 28(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 24(%ecx) + movl 32(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 28(%ecx) + movl 36(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 32(%ecx) + movl 40(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 36(%ecx) + movl 44(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 40(%ecx) + movl 48(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 44(%ecx) + movl 52(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 48(%ecx) + movl 56(%eax), %edx + shrdl $1, %edx, %esi + movl %esi, 52(%ecx) + movl 60(%eax), %esi + shrdl $1, %esi, %edx + movl %edx, 56(%ecx) + movl 64(%eax), %eax + shrdl $1, %eax, %esi + movl %esi, 60(%ecx) + shrl %eax + movl %eax, 64(%ecx) + popl %esi + retl +.Lfunc_end263: + .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L + + .globl mcl_fp_add17L + .align 16, 0x90 + .type mcl_fp_add17L,@function +mcl_fp_add17L: # @mcl_fp_add17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $60, %esp + movl 88(%esp), %esi + movl (%esi), %ecx + movl 4(%esi), %eax + movl 84(%esp), %edx + addl (%edx), %ecx + movl %ecx, %ebx + adcl 4(%edx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 8(%esi), %eax + adcl 8(%edx), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 12(%edx), %ecx + movl 16(%edx), %edi + adcl 12(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 4(%esp) # 4-byte Spill + movl 20(%edx), %eax + adcl 20(%esi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 24(%edx), %eax + adcl 24(%esi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 28(%edx), %eax + adcl 28(%esi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 32(%edx), %eax + adcl 32(%esi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 36(%edx), %eax + adcl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 40(%edx), %eax + adcl 40(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 44(%edx), %eax + adcl 44(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 48(%edx), %eax + adcl 48(%esi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 52(%edx), %eax + adcl 52(%esi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 56(%edx), %eax + adcl 56(%esi), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 60(%edx), %ebp + adcl 60(%esi), %ebp + movl 64(%edx), %edx + adcl 64(%esi), %edx + movl 80(%esp), %esi + movl %ebx, (%esi) + movl %ebx, %eax + movl 8(%esp), %ecx # 4-byte Reload + movl %ecx, 4(%esi) + movl 56(%esp), %ebx # 4-byte Reload + movl %ebx, 8(%esi) + movl 52(%esp), %ebx # 4-byte Reload + movl %ebx, 12(%esi) + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 52(%esi) + movl 12(%esp), %edi # 4-byte Reload + movl %edi, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) + sbbl %ebx, %ebx + andl $1, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esp), %eax # 4-byte Reload + sbbl 8(%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + movl 52(%esp), %eax # 4-byte Reload + sbbl 12(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 4(%esp), %eax # 4-byte Reload + sbbl 16(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload + sbbl 20(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + sbbl 24(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + sbbl 28(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + sbbl 36(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + sbbl 40(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + sbbl 44(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + sbbl 48(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 16(%esp), %eax # 4-byte Reload + sbbl 52(%edi), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 12(%esp), %eax # 4-byte Reload + sbbl 56(%edi), %eax + movl %eax, 12(%esp) # 4-byte Spill + sbbl 60(%edi), %ebp + sbbl 64(%edi), %edx + sbbl $0, %ebx + testb $1, %bl + jne .LBB264_2 +# BB#1: # %nocarry + movl (%esp), %edi # 4-byte Reload + movl %edi, (%esi) + movl 8(%esp), %edi # 4-byte Reload + movl %edi, 4(%esi) + movl 56(%esp), %edi # 4-byte Reload + movl %edi, 8(%esi) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 12(%esi) + movl 4(%esp), %edi # 4-byte Reload + movl %edi, 16(%esi) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 20(%esi) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 24(%esi) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 28(%esi) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 32(%esi) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 36(%esi) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 40(%esi) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 44(%esi) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 48(%esi) + movl 16(%esp), %ecx # 4-byte Reload + movl %ecx, 52(%esi) + movl 12(%esp), %ecx # 4-byte Reload + movl %ecx, 56(%esi) + movl %ebp, 60(%esi) + movl %edx, 64(%esi) +.LBB264_2: # %carry + addl $60, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end264: + .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L + + .globl mcl_fp_addNF17L + .align 16, 0x90 + .type mcl_fp_addNF17L,@function +mcl_fp_addNF17L: # @mcl_fp_addNF17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $132, %esp + movl 160(%esp), %eax + movl (%eax), %ecx + movl 4(%eax), %edx + movl 156(%esp), %esi + addl (%esi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + adcl 4(%esi), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 64(%eax), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 60(%eax), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 56(%eax), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 52(%eax), %ebp + movl 48(%eax), %ecx + movl %ecx, 116(%esp) # 4-byte Spill + movl 44(%eax), %ecx + movl %ecx, 112(%esp) # 4-byte Spill + movl 40(%eax), %ecx + movl %ecx, 128(%esp) # 4-byte Spill + movl 36(%eax), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 32(%eax), %ecx + movl %ecx, 124(%esp) # 4-byte Spill + movl 28(%eax), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 24(%eax), %ecx + movl %ecx, 120(%esp) # 4-byte Spill + movl 20(%eax), %ebx + movl 16(%eax), %edi + movl 12(%eax), %edx + movl 8(%eax), %ecx + adcl 8(%esi), %ecx + movl %ecx, 64(%esp) # 4-byte Spill + adcl 12(%esi), %edx + movl %edx, 68(%esp) # 4-byte Spill + adcl 16(%esi), %edi + movl %edi, 72(%esp) # 4-byte Spill + adcl 20(%esi), %ebx + movl %ebx, 76(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + adcl 24(%esi), %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + adcl 28(%esi), %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + adcl 32(%esi), %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + adcl 36(%esi), %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 128(%esp), %eax # 4-byte Reload + adcl 40(%esi), %eax + movl %eax, 128(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + adcl 44(%esi), %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + adcl 48(%esi), %eax + movl %eax, 116(%esp) # 4-byte Spill + adcl 52(%esi), %ebp + movl %ebp, 80(%esp) # 4-byte Spill + movl 92(%esp), %ebp # 4-byte Reload + adcl 56(%esi), %ebp + movl %ebp, 92(%esp) # 4-byte Spill + movl 96(%esp), %ebp # 4-byte Reload + adcl 60(%esi), %ebp + movl %ebp, 96(%esp) # 4-byte Spill + movl 100(%esp), %ebp # 4-byte Reload + adcl 64(%esi), %ebp + movl %ebp, 100(%esp) # 4-byte Spill + movl 164(%esp), %esi + movl 84(%esp), %eax # 4-byte Reload + subl (%esi), %eax + movl %eax, (%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 4(%esi), %eax + movl %eax, 4(%esp) # 4-byte Spill + sbbl 8(%esi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + sbbl 12(%esi), %edx + sbbl 16(%esi), %edi + movl %edi, 12(%esp) # 4-byte Spill + sbbl 20(%esi), %ebx + movl %ebx, 16(%esp) # 4-byte Spill + movl 120(%esp), %ebx # 4-byte Reload + sbbl 24(%esi), %ebx + movl %ebx, 20(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 28(%esi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%esi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 36(%esi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 128(%esp), %ecx # 4-byte Reload + sbbl 40(%esi), %ecx + movl %ecx, 36(%esp) # 4-byte Spill + movl 112(%esp), %ecx # 4-byte Reload + sbbl 44(%esi), %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 116(%esp), %ecx # 4-byte Reload + sbbl 48(%esi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 52(%esi), %ecx + movl %ecx, 48(%esp) # 4-byte Spill + movl 92(%esp), %eax # 4-byte Reload + movl %eax, %ecx + movl %eax, %ebp + sbbl 56(%esi), %ecx + movl %ecx, 52(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + movl %eax, %ecx + sbbl 60(%esi), %ecx + movl %ecx, 56(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + movl %eax, %ebx + sbbl 64(%esi), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + movl %ebx, %esi + sarl $31, %esi + testl %esi, %esi + movl 84(%esp), %esi # 4-byte Reload + js .LBB265_2 +# BB#1: + movl (%esp), %esi # 4-byte Reload +.LBB265_2: + movl 152(%esp), %ebx + movl %esi, (%ebx) + movl 88(%esp), %eax # 4-byte Reload + js .LBB265_4 +# BB#3: + movl 4(%esp), %eax # 4-byte Reload +.LBB265_4: + movl %eax, 4(%ebx) + movl 108(%esp), %eax # 4-byte Reload + movl 76(%esp), %esi # 4-byte Reload + movl 64(%esp), %edi # 4-byte Reload + js .LBB265_6 +# BB#5: + movl 8(%esp), %edi # 4-byte Reload +.LBB265_6: + movl %edi, 8(%ebx) + movl 116(%esp), %edi # 4-byte Reload + movl 68(%esp), %ecx # 4-byte Reload + js .LBB265_8 +# BB#7: + movl %edx, %ecx +.LBB265_8: + movl %ecx, 12(%ebx) + movl 104(%esp), %ecx # 4-byte Reload + movl 72(%esp), %edx # 4-byte Reload + js .LBB265_10 +# BB#9: + movl 12(%esp), %edx # 4-byte Reload +.LBB265_10: + movl %edx, 16(%ebx) + movl %ebp, %edx + js .LBB265_12 +# BB#11: + movl 16(%esp), %esi # 4-byte Reload +.LBB265_12: + movl %esi, 20(%ebx) + movl 112(%esp), %ebp # 4-byte Reload + js .LBB265_14 +# BB#13: + movl 20(%esp), %esi # 4-byte Reload + movl %esi, 120(%esp) # 4-byte Spill +.LBB265_14: + movl 120(%esp), %esi # 4-byte Reload + movl %esi, 24(%ebx) + js .LBB265_16 +# BB#15: + movl 24(%esp), %ecx # 4-byte Reload +.LBB265_16: + movl %ecx, 28(%ebx) + js .LBB265_18 +# BB#17: + movl 28(%esp), %ecx # 4-byte Reload + movl %ecx, 124(%esp) # 4-byte Spill +.LBB265_18: + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 32(%ebx) + js .LBB265_20 +# BB#19: + movl 32(%esp), %eax # 4-byte Reload +.LBB265_20: + movl %eax, 36(%ebx) + movl 100(%esp), %ecx # 4-byte Reload + js .LBB265_22 +# BB#21: + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 128(%esp) # 4-byte Spill +.LBB265_22: + movl 128(%esp), %eax # 4-byte Reload + movl %eax, 40(%ebx) + js .LBB265_24 +# BB#23: + movl 40(%esp), %ebp # 4-byte Reload +.LBB265_24: + movl %ebp, 44(%ebx) + js .LBB265_26 +# BB#25: + movl 44(%esp), %edi # 4-byte Reload +.LBB265_26: + movl %edi, 48(%ebx) + movl 80(%esp), %eax # 4-byte Reload + js .LBB265_28 +# BB#27: + movl 48(%esp), %eax # 4-byte Reload +.LBB265_28: + movl %eax, 52(%ebx) + js .LBB265_30 +# BB#29: + movl 52(%esp), %edx # 4-byte Reload +.LBB265_30: + movl %edx, 56(%ebx) + movl 96(%esp), %eax # 4-byte Reload + js .LBB265_32 +# BB#31: + movl 56(%esp), %eax # 4-byte Reload +.LBB265_32: + movl %eax, 60(%ebx) + js .LBB265_34 +# BB#33: + movl 60(%esp), %ecx # 4-byte Reload +.LBB265_34: + movl %ecx, 64(%ebx) + addl $132, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end265: + .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L + + .globl mcl_fp_sub17L + .align 16, 0x90 + .type mcl_fp_sub17L,@function +mcl_fp_sub17L: # @mcl_fp_sub17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $64, %esp + movl 88(%esp), %esi + movl (%esi), %eax + movl 4(%esi), %ecx + xorl %ebx, %ebx + movl 92(%esp), %edi + subl (%edi), %eax + movl %eax, 56(%esp) # 4-byte Spill + sbbl 4(%edi), %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 8(%esi), %eax + sbbl 8(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 12(%esi), %eax + sbbl 12(%edi), %eax + movl %eax, 60(%esp) # 4-byte Spill + movl 16(%esi), %eax + sbbl 16(%edi), %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 20(%esi), %eax + sbbl 20(%edi), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 24(%esi), %eax + sbbl 24(%edi), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 28(%esi), %eax + sbbl 28(%edi), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 32(%esi), %eax + sbbl 32(%edi), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 36(%esi), %eax + sbbl 36(%edi), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 40(%esi), %eax + sbbl 40(%edi), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 44(%esi), %eax + sbbl 44(%edi), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 48(%esi), %edx + sbbl 48(%edi), %edx + movl %edx, 12(%esp) # 4-byte Spill + movl 52(%esi), %ecx + sbbl 52(%edi), %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 56(%esi), %eax + sbbl 56(%edi), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 60(%esi), %ebp + sbbl 60(%edi), %ebp + movl 64(%esi), %esi + sbbl 64(%edi), %esi + sbbl $0, %ebx + testb $1, %bl + movl 84(%esp), %ebx + movl 56(%esp), %edi # 4-byte Reload + movl %edi, (%ebx) + movl 16(%esp), %edi # 4-byte Reload + movl %edi, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + movl %edi, 8(%ebx) + movl 60(%esp), %edi # 4-byte Reload + movl %edi, 12(%ebx) + movl 52(%esp), %edi # 4-byte Reload + movl %edi, 16(%ebx) + movl 44(%esp), %edi # 4-byte Reload + movl %edi, 20(%ebx) + movl 40(%esp), %edi # 4-byte Reload + movl %edi, 24(%ebx) + movl 36(%esp), %edi # 4-byte Reload + movl %edi, 28(%ebx) + movl 32(%esp), %edi # 4-byte Reload + movl %edi, 32(%ebx) + movl 28(%esp), %edi # 4-byte Reload + movl %edi, 36(%ebx) + movl 24(%esp), %edi # 4-byte Reload + movl %edi, 40(%ebx) + movl 20(%esp), %edi # 4-byte Reload + movl %edi, 44(%ebx) + movl %edx, 48(%ebx) + movl %ecx, 52(%ebx) + movl %eax, 56(%ebx) + movl %ebp, 60(%ebx) + movl %esi, 64(%ebx) + je .LBB266_2 +# BB#1: # %carry + movl %esi, (%esp) # 4-byte Spill + movl 96(%esp), %esi + movl 56(%esp), %ecx # 4-byte Reload + addl (%esi), %ecx + movl %ecx, (%ebx) + movl 16(%esp), %edx # 4-byte Reload + adcl 4(%esi), %edx + movl %edx, 4(%ebx) + movl 48(%esp), %edi # 4-byte Reload + adcl 8(%esi), %edi + movl 12(%esi), %eax + adcl 60(%esp), %eax # 4-byte Folded Reload + movl %edi, 8(%ebx) + movl 16(%esi), %ecx + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%ebx) + movl 20(%esi), %eax + adcl 44(%esp), %eax # 4-byte Folded Reload + movl %ecx, 16(%ebx) + movl 24(%esi), %ecx + adcl 40(%esp), %ecx # 4-byte Folded Reload + movl %eax, 20(%ebx) + movl 28(%esi), %eax + adcl 36(%esp), %eax # 4-byte Folded Reload + movl %ecx, 24(%ebx) + movl 32(%esi), %ecx + adcl 32(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%ebx) + movl 36(%esi), %eax + adcl 28(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%ebx) + movl 40(%esi), %ecx + adcl 24(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%ebx) + movl 44(%esi), %eax + adcl 20(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%ebx) + movl 48(%esi), %ecx + adcl 12(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%ebx) + movl 52(%esi), %eax + adcl 8(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%ebx) + movl 56(%esi), %ecx + adcl 4(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%ebx) + movl %ecx, 56(%ebx) + movl 60(%esi), %eax + adcl %ebp, %eax + movl %eax, 60(%ebx) + movl 64(%esi), %eax + adcl (%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%ebx) +.LBB266_2: # %nocarry + addl $64, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end266: + .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L + + .globl mcl_fp_subNF17L + .align 16, 0x90 + .type mcl_fp_subNF17L,@function +mcl_fp_subNF17L: # @mcl_fp_subNF17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $112, %esp + movl 136(%esp), %ecx + movl (%ecx), %esi + movl 4(%ecx), %edx + movl 140(%esp), %edi + subl (%edi), %esi + movl %esi, 68(%esp) # 4-byte Spill + sbbl 4(%edi), %edx + movl %edx, 72(%esp) # 4-byte Spill + movl 64(%ecx), %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 60(%ecx), %edx + movl %edx, 104(%esp) # 4-byte Spill + movl 56(%ecx), %edx + movl %edx, 100(%esp) # 4-byte Spill + movl 52(%ecx), %edx + movl %edx, 96(%esp) # 4-byte Spill + movl 48(%ecx), %edx + movl %edx, 92(%esp) # 4-byte Spill + movl 44(%ecx), %edx + movl %edx, 88(%esp) # 4-byte Spill + movl 40(%ecx), %esi + movl %esi, 108(%esp) # 4-byte Spill + movl 36(%ecx), %edx + movl %edx, 80(%esp) # 4-byte Spill + movl 32(%ecx), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 28(%ecx), %ebp + movl 24(%ecx), %ebx + movl 20(%ecx), %esi + movl 16(%ecx), %edx + movl 12(%ecx), %eax + movl 8(%ecx), %ecx + sbbl 8(%edi), %ecx + movl %ecx, 44(%esp) # 4-byte Spill + sbbl 12(%edi), %eax + movl %eax, 48(%esp) # 4-byte Spill + sbbl 16(%edi), %edx + movl %edx, 52(%esp) # 4-byte Spill + sbbl 20(%edi), %esi + movl %esi, 56(%esp) # 4-byte Spill + sbbl 24(%edi), %ebx + movl %ebx, 60(%esp) # 4-byte Spill + sbbl 28(%edi), %ebp + movl %ebp, 64(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 32(%edi), %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 80(%esp), %ecx # 4-byte Reload + sbbl 36(%edi), %ecx + movl %ecx, 80(%esp) # 4-byte Spill + movl 108(%esp), %ecx # 4-byte Reload + sbbl 40(%edi), %ecx + movl %ecx, 108(%esp) # 4-byte Spill + movl 88(%esp), %ecx # 4-byte Reload + sbbl 44(%edi), %ecx + movl %ecx, 88(%esp) # 4-byte Spill + movl 92(%esp), %ecx # 4-byte Reload + sbbl 48(%edi), %ecx + movl %ecx, 92(%esp) # 4-byte Spill + movl 96(%esp), %ecx # 4-byte Reload + sbbl 52(%edi), %ecx + movl %ecx, 96(%esp) # 4-byte Spill + movl 100(%esp), %ecx # 4-byte Reload + sbbl 56(%edi), %ecx + movl %ecx, 100(%esp) # 4-byte Spill + movl 104(%esp), %ecx # 4-byte Reload + sbbl 60(%edi), %ecx + movl %ecx, 104(%esp) # 4-byte Spill + movl 84(%esp), %ecx # 4-byte Reload + sbbl 64(%edi), %ecx + movl %ecx, 84(%esp) # 4-byte Spill + movl %ecx, %ebx + sarl $31, %ebx + movl %ebx, %edx + shldl $1, %ecx, %edx + movl 144(%esp), %eax + movl 28(%eax), %ecx + andl %edx, %ecx + movl %ecx, 28(%esp) # 4-byte Spill + movl 12(%eax), %ecx + andl %edx, %ecx + movl %ecx, 16(%esp) # 4-byte Spill + movl 4(%eax), %ecx + andl %edx, %ecx + movl %ecx, %esi + andl (%eax), %edx + movl 64(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 40(%esp) # 4-byte Spill + movl 60(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 36(%esp) # 4-byte Spill + roll %ebx + movl 56(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 32(%esp) # 4-byte Spill + movl 52(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 24(%esp) # 4-byte Spill + movl 48(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 20(%esp) # 4-byte Spill + movl 44(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 12(%esp) # 4-byte Spill + movl 40(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 8(%esp) # 4-byte Spill + movl 36(%eax), %ecx + andl %ebx, %ecx + movl %ecx, 4(%esp) # 4-byte Spill + movl 32(%eax), %ecx + andl %ebx, %ecx + movl %ecx, (%esp) # 4-byte Spill + movl 24(%eax), %ebp + andl %ebx, %ebp + movl 20(%eax), %edi + andl %ebx, %edi + movl 16(%eax), %ecx + andl %ebx, %ecx + andl 8(%eax), %ebx + addl 68(%esp), %edx # 4-byte Folded Reload + movl %esi, %eax + adcl 72(%esp), %eax # 4-byte Folded Reload + movl 132(%esp), %esi + movl %edx, (%esi) + adcl 44(%esp), %ebx # 4-byte Folded Reload + movl %eax, 4(%esi) + movl 16(%esp), %eax # 4-byte Reload + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %ebx, 8(%esi) + adcl 52(%esp), %ecx # 4-byte Folded Reload + movl %eax, 12(%esi) + adcl 56(%esp), %edi # 4-byte Folded Reload + movl %ecx, 16(%esi) + adcl 60(%esp), %ebp # 4-byte Folded Reload + movl %edi, 20(%esi) + movl 28(%esp), %eax # 4-byte Reload + adcl 64(%esp), %eax # 4-byte Folded Reload + movl %ebp, 24(%esi) + movl (%esp), %ecx # 4-byte Reload + adcl 76(%esp), %ecx # 4-byte Folded Reload + movl %eax, 28(%esi) + movl 4(%esp), %eax # 4-byte Reload + adcl 80(%esp), %eax # 4-byte Folded Reload + movl %ecx, 32(%esi) + movl 8(%esp), %ecx # 4-byte Reload + adcl 108(%esp), %ecx # 4-byte Folded Reload + movl %eax, 36(%esi) + movl 12(%esp), %eax # 4-byte Reload + adcl 88(%esp), %eax # 4-byte Folded Reload + movl %ecx, 40(%esi) + movl 20(%esp), %ecx # 4-byte Reload + adcl 92(%esp), %ecx # 4-byte Folded Reload + movl %eax, 44(%esi) + movl 24(%esp), %eax # 4-byte Reload + adcl 96(%esp), %eax # 4-byte Folded Reload + movl %ecx, 48(%esi) + movl 32(%esp), %ecx # 4-byte Reload + adcl 100(%esp), %ecx # 4-byte Folded Reload + movl %eax, 52(%esi) + movl 36(%esp), %eax # 4-byte Reload + adcl 104(%esp), %eax # 4-byte Folded Reload + movl %ecx, 56(%esi) + movl %eax, 60(%esi) + movl 40(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %eax, 64(%esi) + addl $112, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end267: + .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L + + .globl mcl_fpDbl_add17L + .align 16, 0x90 + .type mcl_fpDbl_add17L,@function +mcl_fpDbl_add17L: # @mcl_fpDbl_add17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $128, %esp + movl 156(%esp), %ecx + movl 152(%esp), %edx + movl 12(%edx), %edi + movl 16(%edx), %esi + movl 8(%ecx), %ebx + movl (%ecx), %ebp + addl (%edx), %ebp + movl 148(%esp), %eax + movl %ebp, (%eax) + movl 4(%ecx), %ebp + adcl 4(%edx), %ebp + adcl 8(%edx), %ebx + adcl 12(%ecx), %edi + adcl 16(%ecx), %esi + movl %ebp, 4(%eax) + movl 76(%ecx), %ebp + movl %ebx, 8(%eax) + movl 20(%ecx), %ebx + movl %edi, 12(%eax) + movl 20(%edx), %edi + adcl %ebx, %edi + movl 24(%ecx), %ebx + movl %esi, 16(%eax) + movl 24(%edx), %esi + adcl %ebx, %esi + movl 28(%ecx), %ebx + movl %edi, 20(%eax) + movl 28(%edx), %edi + adcl %ebx, %edi + movl 32(%ecx), %ebx + movl %esi, 24(%eax) + movl 32(%edx), %esi + adcl %ebx, %esi + movl 36(%ecx), %ebx + movl %edi, 28(%eax) + movl 36(%edx), %edi + adcl %ebx, %edi + movl 40(%ecx), %ebx + movl %esi, 32(%eax) + movl 40(%edx), %esi + adcl %ebx, %esi + movl 44(%ecx), %ebx + movl %edi, 36(%eax) + movl 44(%edx), %edi + adcl %ebx, %edi + movl 48(%ecx), %ebx + movl %esi, 40(%eax) + movl 48(%edx), %esi + adcl %ebx, %esi + movl 52(%ecx), %ebx + movl %edi, 44(%eax) + movl 52(%edx), %edi + adcl %ebx, %edi + movl 56(%ecx), %ebx + movl %esi, 48(%eax) + movl 56(%edx), %esi + adcl %ebx, %esi + movl 60(%ecx), %ebx + movl %edi, 52(%eax) + movl 60(%edx), %edi + adcl %ebx, %edi + movl 64(%ecx), %ebx + movl %esi, 56(%eax) + movl 64(%edx), %esi + adcl %ebx, %esi + movl 68(%ecx), %ebx + movl %edi, 60(%eax) + movl 68(%edx), %edi + adcl %ebx, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 72(%ecx), %edi + movl %esi, 64(%eax) + movl 72(%edx), %eax + adcl %edi, %eax + movl %eax, 96(%esp) # 4-byte Spill + movl 76(%edx), %eax + adcl %ebp, %eax + movl %eax, 100(%esp) # 4-byte Spill + movl 80(%ecx), %esi + movl 80(%edx), %eax + adcl %esi, %eax + movl %eax, 104(%esp) # 4-byte Spill + movl 84(%ecx), %esi + movl 84(%edx), %eax + adcl %esi, %eax + movl %eax, 108(%esp) # 4-byte Spill + movl 88(%ecx), %esi + movl 88(%edx), %eax + adcl %esi, %eax + movl %eax, 112(%esp) # 4-byte Spill + movl 92(%ecx), %esi + movl 92(%edx), %eax + adcl %esi, %eax + movl %eax, 116(%esp) # 4-byte Spill + movl 96(%ecx), %esi + movl 96(%edx), %eax + adcl %esi, %eax + movl %eax, 120(%esp) # 4-byte Spill + movl 100(%ecx), %esi + movl 100(%edx), %eax + adcl %esi, %eax + movl %eax, 124(%esp) # 4-byte Spill + movl 104(%ecx), %esi + movl 104(%edx), %eax + adcl %esi, %eax + movl %eax, 76(%esp) # 4-byte Spill + movl 108(%ecx), %esi + movl 108(%edx), %eax + adcl %esi, %eax + movl %eax, 80(%esp) # 4-byte Spill + movl 112(%ecx), %esi + movl 112(%edx), %eax + adcl %esi, %eax + movl %eax, 84(%esp) # 4-byte Spill + movl 116(%ecx), %esi + movl 116(%edx), %eax + adcl %esi, %eax + movl %eax, 88(%esp) # 4-byte Spill + movl 120(%ecx), %edi + movl 120(%edx), %esi + adcl %edi, %esi + movl %esi, 64(%esp) # 4-byte Spill + movl 124(%ecx), %ebx + movl 124(%edx), %edi + adcl %ebx, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 128(%ecx), %ebx + movl 128(%edx), %ebp + adcl %ebx, %ebp + movl %ebp, 72(%esp) # 4-byte Spill + movl 132(%ecx), %ecx + movl 132(%edx), %edx + adcl %ecx, %edx + sbbl %ecx, %ecx + andl $1, %ecx + movl 160(%esp), %ebx + movl 92(%esp), %eax # 4-byte Reload + subl (%ebx), %eax + movl %eax, 48(%esp) # 4-byte Spill + movl 96(%esp), %eax # 4-byte Reload + sbbl 4(%ebx), %eax + movl %eax, 44(%esp) # 4-byte Spill + movl 100(%esp), %eax # 4-byte Reload + sbbl 8(%ebx), %eax + movl %eax, 40(%esp) # 4-byte Spill + movl 104(%esp), %eax # 4-byte Reload + sbbl 12(%ebx), %eax + movl %eax, 36(%esp) # 4-byte Spill + movl 108(%esp), %eax # 4-byte Reload + sbbl 16(%ebx), %eax + movl %eax, 32(%esp) # 4-byte Spill + movl 112(%esp), %eax # 4-byte Reload + sbbl 20(%ebx), %eax + movl %eax, 28(%esp) # 4-byte Spill + movl 116(%esp), %eax # 4-byte Reload + sbbl 24(%ebx), %eax + movl %eax, 24(%esp) # 4-byte Spill + movl 120(%esp), %eax # 4-byte Reload + sbbl 28(%ebx), %eax + movl %eax, 20(%esp) # 4-byte Spill + movl 124(%esp), %eax # 4-byte Reload + sbbl 32(%ebx), %eax + movl %eax, 16(%esp) # 4-byte Spill + movl 76(%esp), %eax # 4-byte Reload + sbbl 36(%ebx), %eax + movl %eax, 12(%esp) # 4-byte Spill + movl 80(%esp), %eax # 4-byte Reload + sbbl 40(%ebx), %eax + movl %eax, 8(%esp) # 4-byte Spill + movl 84(%esp), %eax # 4-byte Reload + sbbl 44(%ebx), %eax + movl %eax, 4(%esp) # 4-byte Spill + movl 88(%esp), %eax # 4-byte Reload + sbbl 48(%ebx), %eax + movl %eax, (%esp) # 4-byte Spill + sbbl 52(%ebx), %esi + movl %esi, 52(%esp) # 4-byte Spill + sbbl 56(%ebx), %edi + movl %edi, 56(%esp) # 4-byte Spill + sbbl 60(%ebx), %ebp + movl %ebp, 60(%esp) # 4-byte Spill + movl %edx, %ebp + sbbl 64(%ebx), %ebp + sbbl $0, %ecx + andl $1, %ecx + jne .LBB268_2 +# BB#1: + movl %ebp, %edx +.LBB268_2: + testb %cl, %cl + movl 92(%esp), %eax # 4-byte Reload + movl 88(%esp), %esi # 4-byte Reload + movl 84(%esp), %edi # 4-byte Reload + movl 80(%esp), %ebx # 4-byte Reload + movl 76(%esp), %ebp # 4-byte Reload + jne .LBB268_4 +# BB#3: + movl (%esp), %esi # 4-byte Reload + movl 4(%esp), %edi # 4-byte Reload + movl 8(%esp), %ebx # 4-byte Reload + movl 12(%esp), %ebp # 4-byte Reload + movl 16(%esp), %eax # 4-byte Reload + movl %eax, 124(%esp) # 4-byte Spill + movl 20(%esp), %eax # 4-byte Reload + movl %eax, 120(%esp) # 4-byte Spill + movl 24(%esp), %eax # 4-byte Reload + movl %eax, 116(%esp) # 4-byte Spill + movl 28(%esp), %eax # 4-byte Reload + movl %eax, 112(%esp) # 4-byte Spill + movl 32(%esp), %eax # 4-byte Reload + movl %eax, 108(%esp) # 4-byte Spill + movl 36(%esp), %eax # 4-byte Reload + movl %eax, 104(%esp) # 4-byte Spill + movl 40(%esp), %eax # 4-byte Reload + movl %eax, 100(%esp) # 4-byte Spill + movl 44(%esp), %eax # 4-byte Reload + movl %eax, 96(%esp) # 4-byte Spill + movl 48(%esp), %eax # 4-byte Reload +.LBB268_4: + movl 148(%esp), %ecx + movl %eax, 68(%ecx) + movl %ecx, %eax + movl 96(%esp), %ecx # 4-byte Reload + movl %ecx, 72(%eax) + movl 100(%esp), %ecx # 4-byte Reload + movl %ecx, 76(%eax) + movl 104(%esp), %ecx # 4-byte Reload + movl %ecx, 80(%eax) + movl 108(%esp), %ecx # 4-byte Reload + movl %ecx, 84(%eax) + movl 112(%esp), %ecx # 4-byte Reload + movl %ecx, 88(%eax) + movl 116(%esp), %ecx # 4-byte Reload + movl %ecx, 92(%eax) + movl 120(%esp), %ecx # 4-byte Reload + movl %ecx, 96(%eax) + movl 124(%esp), %ecx # 4-byte Reload + movl %ecx, 100(%eax) + movl %ebp, 104(%eax) + movl %ebx, 108(%eax) + movl %edi, 112(%eax) + movl %esi, 116(%eax) + movl 72(%esp), %ecx # 4-byte Reload + movl 64(%esp), %esi # 4-byte Reload + jne .LBB268_6 +# BB#5: + movl 52(%esp), %esi # 4-byte Reload +.LBB268_6: + movl %esi, 120(%eax) + movl 68(%esp), %esi # 4-byte Reload + jne .LBB268_8 +# BB#7: + movl 56(%esp), %esi # 4-byte Reload +.LBB268_8: + movl %esi, 124(%eax) + jne .LBB268_10 +# BB#9: + movl 60(%esp), %ecx # 4-byte Reload +.LBB268_10: + movl %ecx, 128(%eax) + movl %edx, 132(%eax) + addl $128, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end268: + .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L + + .globl mcl_fpDbl_sub17L + .align 16, 0x90 + .type mcl_fpDbl_sub17L,@function +mcl_fpDbl_sub17L: # @mcl_fpDbl_sub17L +# BB#0: + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + subl $116, %esp + movl 140(%esp), %edx + movl (%edx), %eax + movl 4(%edx), %edi + movl 144(%esp), %esi + subl (%esi), %eax + sbbl 4(%esi), %edi + movl 8(%edx), %ebx + sbbl 8(%esi), %ebx + movl 136(%esp), %ecx + movl %eax, (%ecx) + movl 12(%edx), %eax + sbbl 12(%esi), %eax + movl %edi, 4(%ecx) + movl 16(%edx), %edi + sbbl 16(%esi), %edi + movl %ebx, 8(%ecx) + movl 20(%esi), %ebx + movl %eax, 12(%ecx) + movl 20(%edx), %eax + sbbl %ebx, %eax + movl 24(%esi), %ebx + movl %edi, 16(%ecx) + movl 24(%edx), %edi + sbbl %ebx, %edi + movl 28(%esi), %ebx + movl %eax, 20(%ecx) + movl 28(%edx), %eax + sbbl %ebx, %eax + movl 32(%esi), %ebx + movl %edi, 24(%ecx) + movl 32(%edx), %edi + sbbl %ebx, %edi + movl 36(%esi), %ebx + movl %eax, 28(%ecx) + movl 36(%edx), %eax + sbbl %ebx, %eax + movl 40(%esi), %ebx + movl %edi, 32(%ecx) + movl 40(%edx), %edi + sbbl %ebx, %edi + movl 44(%esi), %ebx + movl %eax, 36(%ecx) + movl 44(%edx), %eax + sbbl %ebx, %eax + movl 48(%esi), %ebx + movl %edi, 40(%ecx) + movl 48(%edx), %edi + sbbl %ebx, %edi + movl 52(%esi), %ebx + movl %eax, 44(%ecx) + movl 52(%edx), %eax + sbbl %ebx, %eax + movl 56(%esi), %ebx + movl %edi, 48(%ecx) + movl 56(%edx), %edi + sbbl %ebx, %edi + movl 60(%esi), %ebx + movl %eax, 52(%ecx) + movl 60(%edx), %eax + sbbl %ebx, %eax + movl 64(%esi), %ebx + movl %edi, 56(%ecx) + movl 64(%edx), %edi + sbbl %ebx, %edi + movl 68(%esi), %ebx + movl %eax, 60(%ecx) + movl 68(%edx), %eax + sbbl %ebx, %eax + movl %eax, 52(%esp) # 4-byte Spill + movl 72(%esi), %eax + movl %edi, 64(%ecx) + movl 72(%edx), %edi + sbbl %eax, %edi + movl %edi, 44(%esp) # 4-byte Spill + movl 76(%esi), %eax + movl 76(%edx), %edi + sbbl %eax, %edi + movl %edi, 48(%esp) # 4-byte Spill + movl 80(%esi), %eax + movl 80(%edx), %edi + sbbl %eax, %edi + movl %edi, 56(%esp) # 4-byte Spill + movl 84(%esi), %eax + movl 84(%edx), %edi + sbbl %eax, %edi + movl %edi, 60(%esp) # 4-byte Spill + movl 88(%esi), %eax + movl 88(%edx), %edi + sbbl %eax, %edi + movl %edi, 64(%esp) # 4-byte Spill + movl 92(%esi), %eax + movl 92(%edx), %edi + sbbl %eax, %edi + movl %edi, 68(%esp) # 4-byte Spill + movl 96(%esi), %eax + movl 96(%edx), %edi + sbbl %eax, %edi + movl %edi, 72(%esp) # 4-byte Spill + movl 100(%esi), %eax + movl 100(%edx), %edi + sbbl %eax, %edi + movl %edi, 80(%esp) # 4-byte Spill + movl 104(%esi), %eax + movl 104(%edx), %edi + sbbl %eax, %edi + movl %edi, 84(%esp) # 4-byte Spill + movl 108(%esi), %eax + movl 108(%edx), %edi + sbbl %eax, %edi + movl %edi, 88(%esp) # 4-byte Spill + movl 112(%esi), %eax + movl 112(%edx), %edi + sbbl %eax, %edi + movl %edi, 92(%esp) # 4-byte Spill + movl 116(%esi), %eax + movl 116(%edx), %edi + sbbl %eax, %edi + movl %edi, 96(%esp) # 4-byte Spill + movl 120(%esi), %eax + movl 120(%edx), %edi + sbbl %eax, %edi + movl %edi, 100(%esp) # 4-byte Spill + movl 124(%esi), %eax + movl 124(%edx), %edi + sbbl %eax, %edi + movl %edi, 104(%esp) # 4-byte Spill + movl 128(%esi), %eax + movl 128(%edx), %edi + sbbl %eax, %edi + movl %edi, 108(%esp) # 4-byte Spill + movl 132(%esi), %eax + movl 132(%edx), %edx + sbbl %eax, %edx + movl %edx, 112(%esp) # 4-byte Spill + movl $0, %eax + sbbl $0, %eax + andl $1, %eax + movl 148(%esp), %ebp + jne .LBB269_1 +# BB#2: + movl $0, 76(%esp) # 4-byte Folded Spill + jmp .LBB269_3 +.LBB269_1: + movl 64(%ebp), %edx + movl %edx, 76(%esp) # 4-byte Spill +.LBB269_3: + testb %al, %al + jne .LBB269_4 +# BB#5: + movl $0, 28(%esp) # 4-byte Folded Spill + movl $0, %esi + jmp .LBB269_6 +.LBB269_4: + movl (%ebp), %esi + movl 4(%ebp), %eax + movl %eax, 28(%esp) # 4-byte Spill +.LBB269_6: + jne .LBB269_7 +# BB#8: + movl $0, 40(%esp) # 4-byte Folded Spill + jmp .LBB269_9 +.LBB269_7: + movl 60(%ebp), %eax + movl %eax, 40(%esp) # 4-byte Spill +.LBB269_9: + jne .LBB269_10 +# BB#11: + movl $0, 36(%esp) # 4-byte Folded Spill + jmp .LBB269_12 +.LBB269_10: + movl 56(%ebp), %eax + movl %eax, 36(%esp) # 4-byte Spill +.LBB269_12: + jne .LBB269_13 +# BB#14: + movl $0, 32(%esp) # 4-byte Folded Spill + jmp .LBB269_15 +.LBB269_13: + movl 52(%ebp), %eax + movl %eax, 32(%esp) # 4-byte Spill +.LBB269_15: + jne .LBB269_16 +# BB#17: + movl $0, 24(%esp) # 4-byte Folded Spill + jmp .LBB269_18 +.LBB269_16: + movl 48(%ebp), %eax + movl %eax, 24(%esp) # 4-byte Spill +.LBB269_18: + jne .LBB269_19 +# BB#20: + movl $0, 20(%esp) # 4-byte Folded Spill + jmp .LBB269_21 +.LBB269_19: + movl 44(%ebp), %eax + movl %eax, 20(%esp) # 4-byte Spill +.LBB269_21: + jne .LBB269_22 +# BB#23: + movl $0, 16(%esp) # 4-byte Folded Spill + jmp .LBB269_24 +.LBB269_22: + movl 40(%ebp), %eax + movl %eax, 16(%esp) # 4-byte Spill +.LBB269_24: + jne .LBB269_25 +# BB#26: + movl $0, 12(%esp) # 4-byte Folded Spill + jmp .LBB269_27 +.LBB269_25: + movl 36(%ebp), %eax + movl %eax, 12(%esp) # 4-byte Spill +.LBB269_27: + jne .LBB269_28 +# BB#29: + movl $0, 8(%esp) # 4-byte Folded Spill + jmp .LBB269_30 +.LBB269_28: + movl 32(%ebp), %eax + movl %eax, 8(%esp) # 4-byte Spill +.LBB269_30: + jne .LBB269_31 +# BB#32: + movl $0, 4(%esp) # 4-byte Folded Spill + jmp .LBB269_33 +.LBB269_31: + movl 28(%ebp), %eax + movl %eax, 4(%esp) # 4-byte Spill +.LBB269_33: + jne .LBB269_34 +# BB#35: + movl $0, (%esp) # 4-byte Folded Spill + jmp .LBB269_36 +.LBB269_34: + movl 24(%ebp), %eax + movl %eax, (%esp) # 4-byte Spill +.LBB269_36: + jne .LBB269_37 +# BB#38: + movl $0, %ebx + jmp .LBB269_39 +.LBB269_37: + movl 20(%ebp), %ebx +.LBB269_39: + jne .LBB269_40 +# BB#41: + movl $0, %edi + jmp .LBB269_42 +.LBB269_40: + movl 16(%ebp), %edi +.LBB269_42: + jne .LBB269_43 +# BB#44: + movl %ebp, %eax + movl $0, %ebp + jmp .LBB269_45 +.LBB269_43: + movl %ebp, %eax + movl 12(%eax), %ebp +.LBB269_45: + jne .LBB269_46 +# BB#47: + xorl %eax, %eax + jmp .LBB269_48 +.LBB269_46: + movl 8(%eax), %eax +.LBB269_48: + addl 52(%esp), %esi # 4-byte Folded Reload + movl 28(%esp), %edx # 4-byte Reload + adcl 44(%esp), %edx # 4-byte Folded Reload + movl %esi, 68(%ecx) + adcl 48(%esp), %eax # 4-byte Folded Reload + movl %edx, 72(%ecx) + adcl 56(%esp), %ebp # 4-byte Folded Reload + movl %eax, 76(%ecx) + adcl 60(%esp), %edi # 4-byte Folded Reload + movl %ebp, 80(%ecx) + adcl 64(%esp), %ebx # 4-byte Folded Reload + movl %edi, 84(%ecx) + movl (%esp), %edx # 4-byte Reload + adcl 68(%esp), %edx # 4-byte Folded Reload + movl %ebx, 88(%ecx) + movl 4(%esp), %eax # 4-byte Reload + adcl 72(%esp), %eax # 4-byte Folded Reload + movl %edx, 92(%ecx) + movl 8(%esp), %edx # 4-byte Reload + adcl 80(%esp), %edx # 4-byte Folded Reload + movl %eax, 96(%ecx) + movl 12(%esp), %eax # 4-byte Reload + adcl 84(%esp), %eax # 4-byte Folded Reload + movl %edx, 100(%ecx) + movl 16(%esp), %edx # 4-byte Reload + adcl 88(%esp), %edx # 4-byte Folded Reload + movl %eax, 104(%ecx) + movl 20(%esp), %eax # 4-byte Reload + adcl 92(%esp), %eax # 4-byte Folded Reload + movl %edx, 108(%ecx) + movl 24(%esp), %edx # 4-byte Reload + adcl 96(%esp), %edx # 4-byte Folded Reload + movl %eax, 112(%ecx) + movl 32(%esp), %eax # 4-byte Reload + adcl 100(%esp), %eax # 4-byte Folded Reload + movl %edx, 116(%ecx) + movl 36(%esp), %edx # 4-byte Reload + adcl 104(%esp), %edx # 4-byte Folded Reload + movl %eax, 120(%ecx) + movl 40(%esp), %eax # 4-byte Reload + adcl 108(%esp), %eax # 4-byte Folded Reload + movl %edx, 124(%ecx) + movl %eax, 128(%ecx) + movl 76(%esp), %eax # 4-byte Reload + adcl 112(%esp), %eax # 4-byte Folded Reload + movl %eax, 132(%ecx) + addl $116, %esp + popl %esi + popl %edi + popl %ebx + popl %ebp + retl +.Lfunc_end269: + .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/byzantine-lab/mcl/src/bn_c256.cpp b/vendor/github.com/byzantine-lab/mcl/src/bn_c256.cpp new file mode 100644 index 000000000..2f975a287 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/bn_c256.cpp @@ -0,0 +1,6 @@ +/* + implementation of mclBn_* apis +*/ +#define MCLBN_FP_UNIT_SIZE 4 +#include "mcl/impl/bn_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/src/bn_c384.cpp b/vendor/github.com/byzantine-lab/mcl/src/bn_c384.cpp new file mode 100644 index 000000000..934a078ae --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/bn_c384.cpp @@ -0,0 +1,7 @@ +/* + implementation of mclBn_* apis +*/ +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 6 +#include "mcl/impl/bn_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/src/bn_c384_256.cpp b/vendor/github.com/byzantine-lab/mcl/src/bn_c384_256.cpp new file mode 100644 index 000000000..ecd968efd --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/bn_c384_256.cpp @@ -0,0 +1,7 @@ +/* + implementation of mclBn_* apis +*/ +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 4 +#include "mcl/impl/bn_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/src/bn_c512.cpp b/vendor/github.com/byzantine-lab/mcl/src/bn_c512.cpp new file mode 100644 index 000000000..7c1029522 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/bn_c512.cpp @@ -0,0 +1,6 @@ +/* + implementation of mclBn_* apis +*/ +#define MCLBN_FP_UNIT_SIZE 8 +#include "mcl/impl/bn_c_impl.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/src/bn_c_impl.hpp b/vendor/github.com/byzantine-lab/mcl/src/bn_c_impl.hpp new file mode 100644 index 000000000..bb0b4ba8e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/bn_c_impl.hpp @@ -0,0 +1,517 @@ +#define MCLBN_DLL_EXPORT +#include + +#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 +#include +#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 +#include +#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 +#include +#else + #error "not supported size" +#endif +#include +using namespace mcl::bn; + +static Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } +static const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } + +static G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } +static const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } + +static G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } +static const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } + +static Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } +static const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } + +static Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } +static const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } + +template +int setStr(T *x, const char *buf, mclSize bufSize, int ioMode) +{ + size_t n = cast(x)->deserialize(buf, bufSize, ioMode); + return n > 0 ? 0 : -1; +} + +#ifdef __EMSCRIPTEN__ +// use these functions forcibly +extern "C" MCLBN_DLL_API void *mclBnMalloc(size_t n) +{ + return malloc(n); +} +extern "C" MCLBN_DLL_API void mclBnFree(void *p) +{ + free(p); +} +#endif + +int mclBn_init(int curve, int compiledTimeVar) +{ + if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { + return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); + } + const mcl::CurveParam& cp = mcl::getCurveParam(curve); + bool b; + initPairing(&b, cp); + return b ? 0 : -1; +} + +int mclBn_getOpUnitSize() +{ + return (int)Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); +} + +int mclBn_getG1ByteSize() +{ + return (int)Fp::getByteSize(); +} + +int mclBn_getFrByteSize() +{ + return (int)Fr::getByteSize(); +} + +mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize) +{ + return Fr::getModulo(buf, maxBufSize); +} + +mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize) +{ + return Fp::getModulo(buf, maxBufSize); +} +//////////////////////////////////////////////// +// set zero +void mclBnFr_clear(mclBnFr *x) +{ + cast(x)->clear(); +} + +// set x to y +void mclBnFr_setInt(mclBnFr *y, mclInt x) +{ + *cast(y) = x; +} +void mclBnFr_setInt32(mclBnFr *y, int x) +{ + *cast(y) = x; +} + +int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize) +{ + cast(x)->setArrayMask((const char *)buf, bufSize); + return 0; +} +mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} +// return 1 if true +int mclBnFr_isValid(const mclBnFr *x) +{ + return cast(x)->isValid(); +} +int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y) +{ + return *cast(x) == *cast(y); +} +int mclBnFr_isZero(const mclBnFr *x) +{ + return cast(x)->isZero(); +} +int mclBnFr_isOne(const mclBnFr *x) +{ + return cast(x)->isOne(); +} + +#ifndef MCL_DONT_USE_CSRPNG +int mclBnFr_setByCSPRNG(mclBnFr *x) +{ + cast(x)->setByCSPRNG(); + return 0; +} +#endif + +// hash(buf) and set x +int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize) +{ + cast(x)->setHashOf(buf, bufSize); + return 0; +} + +mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} +mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnFr_neg(mclBnFr *y, const mclBnFr *x) +{ + Fr::neg(*cast(y), *cast(x)); +} +void mclBnFr_inv(mclBnFr *y, const mclBnFr *x) +{ + Fr::inv(*cast(y), *cast(x)); +} +void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x) +{ + Fr::sqr(*cast(y), *cast(x)); +} +void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) +{ + Fr::div(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnG1_clear(mclBnG1 *x) +{ + cast(x)->clear(); +} + +int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnG1_isValid(const mclBnG1 *x) +{ + return cast(x)->isValid(); +} +int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y) +{ + return *cast(x) == *cast(y); +} +int mclBnG1_isZero(const mclBnG1 *x) +{ + return cast(x)->isZero(); +} +int mclBnG1_isValidOrder(const mclBnG1 *x) +{ + return cast(x)->isValidOrder(); +} + +int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize) +{ + hashAndMapToG1(*cast(x), buf, bufSize); + return 0; +} + +mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x) +{ + G1::neg(*cast(y), *cast(x)); +} +void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x) +{ + G1::dbl(*cast(y), *cast(x)); +} +void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x) +{ + G1::normalize(*cast(y), *cast(x)); +} +void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) +{ + G1::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) +{ + G1::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) +{ + G1::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) +{ + G1::mulCT(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnG2_clear(mclBnG2 *x) +{ + cast(x)->clear(); +} + +int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnG2_isValid(const mclBnG2 *x) +{ + return cast(x)->isValid(); +} +int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y) +{ + return *cast(x) == *cast(y); +} +int mclBnG2_isZero(const mclBnG2 *x) +{ + return cast(x)->isZero(); +} +int mclBnG2_isValidOrder(const mclBnG2 *x) +{ + return cast(x)->isValidOrder(); +} + +int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize) +{ + hashAndMapToG2(*cast(x), buf, bufSize); + return 0; +} + +mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x) +{ + G2::neg(*cast(y), *cast(x)); +} +void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x) +{ + G2::dbl(*cast(y), *cast(x)); +} +void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x) +{ + G2::normalize(*cast(y), *cast(x)); +} +void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) +{ + G2::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) +{ + G2::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) +{ + G2::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) +{ + G2::mulCT(*cast(z),*cast(x), *cast(y)); +} + +//////////////////////////////////////////////// +// set zero +void mclBnGT_clear(mclBnGT *x) +{ + cast(x)->clear(); +} +void mclBnGT_setInt(mclBnGT *y, mclInt x) +{ + cast(y)->clear(); + *(cast(y)->getFp0()) = x; +} +void mclBnGT_setInt32(mclBnGT *y, int x) +{ + cast(y)->clear(); + *(cast(y)->getFp0()) = x; +} + +int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode) +{ + return setStr(x, buf, bufSize, ioMode); +} +mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(x)->deserialize(buf, bufSize); +} + +// return 1 if true +int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y) +{ + return *cast(x) == *cast(y); +} +int mclBnGT_isZero(const mclBnGT *x) +{ + return cast(x)->isZero(); +} +int mclBnGT_isOne(const mclBnGT *x) +{ + return cast(x)->isOne(); +} + +mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode) +{ + return cast(x)->getStr(buf, maxBufSize, ioMode); +} + +mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x) +{ + return (mclSize)cast(x)->serialize(buf, maxBufSize); +} + +void mclBnGT_neg(mclBnGT *y, const mclBnGT *x) +{ + Fp12::neg(*cast(y), *cast(x)); +} +void mclBnGT_inv(mclBnGT *y, const mclBnGT *x) +{ + Fp12::inv(*cast(y), *cast(x)); +} +void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x) +{ + Fp12::sqr(*cast(y), *cast(x)); +} +void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::add(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::sub(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::mul(*cast(z),*cast(x), *cast(y)); +} +void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) +{ + Fp12::div(*cast(z),*cast(x), *cast(y)); +} + +void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) +{ + Fp12::pow(*cast(z), *cast(x), *cast(y)); +} +void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) +{ + Fp12::powGeneric(*cast(z), *cast(x), *cast(y)); +} + +void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) +{ + pairing(*cast(z), *cast(x), *cast(y)); +} +void mclBn_finalExp(mclBnGT *y, const mclBnGT *x) +{ + finalExp(*cast(y), *cast(x)); +} +void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) +{ + millerLoop(*cast(z), *cast(x), *cast(y)); +} +int mclBn_getUint64NumToPrecompute(void) +{ + return int(BN::param.precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t)); +} + +void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q) +{ + precomputeG2(cast(Qbuf), *cast(Q)); +} + +void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf) +{ + precomputedMillerLoop(*cast(f), *cast(P), cast(Qbuf)); +} + +void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf) +{ + precomputedMillerLoop2(*cast(f), *cast(P1), cast(Q1buf), *cast(P2), cast(Q2buf)); +} + +void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf) +{ + precomputedMillerLoop2mixed(*cast(f), *cast(P1), *cast(Q1), *cast(P2), cast(Q2buf)); +} + +int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k) +{ + bool b; + mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); + return b ? 0 : -1; +} +int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} +int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} +int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x) +{ + bool b; + mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); + return b ? 0 : -1; +} + +void mclBn_verifyOrderG1(int doVerify) +{ + verifyOrderG1(doVerify != 0); +} + +void mclBn_verifyOrderG2(int doVerify) +{ + verifyOrderG2(doVerify != 0); +} + diff --git a/vendor/github.com/byzantine-lab/mcl/src/ecdsa_c.cpp b/vendor/github.com/byzantine-lab/mcl/src/ecdsa_c.cpp new file mode 100644 index 000000000..f2222a224 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/ecdsa_c.cpp @@ -0,0 +1,110 @@ +#define ECDSA_DLL_EXPORT +#include +#include +#include + +using namespace mcl::ecdsa; + +static SecretKey *cast(ecdsaSecretKey *p) { return reinterpret_cast(p); } +static const SecretKey *cast(const ecdsaSecretKey *p) { return reinterpret_cast(p); } + +static PublicKey *cast(ecdsaPublicKey *p) { return reinterpret_cast(p); } +static const PublicKey *cast(const ecdsaPublicKey *p) { return reinterpret_cast(p); } + +static Signature *cast(ecdsaSignature *p) { return reinterpret_cast(p); } +static const Signature *cast(const ecdsaSignature *p) { return reinterpret_cast(p); } + +static PrecomputedPublicKey *cast(ecdsaPrecomputedPublicKey *p) { return reinterpret_cast(p); } +static const PrecomputedPublicKey *cast(const ecdsaPrecomputedPublicKey *p) { return reinterpret_cast(p); } + +#ifdef __EMSCRIPTEN__ +// use these functions forcibly +extern "C" ECDSA_DLL_API void *ecdsaMalloc(size_t n) +{ + return malloc(n); +} +extern "C" ECDSA_DLL_API void ecdsaFree(void *p) +{ + free(p); +} +#endif + +int ecdsaInit(void) +{ + bool b; + init(&b); + return b ? 0 : -1; +} + +mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec) +{ + return (mclSize)cast(sec)->serialize(buf, maxBufSize); +} +mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub) +{ + return (mclSize)cast(pub)->serialize(buf, maxBufSize); +} +mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig) +{ + return (mclSize)cast(sig)->serialize(buf, maxBufSize); +} + +mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(sec)->deserialize(buf, bufSize); +} +mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(pub)->deserialize(buf, bufSize); +} +mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(sig)->deserialize(buf, bufSize); +} + +// return 0 if success +int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec) +{ + cast(sec)->setByCSPRNG(); + return 0; +} + +void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec) +{ + getPublicKey(*cast(pub), *cast(sec)); +} + +void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size) +{ + sign(*cast(sig), *cast(sec), m, size); +} + +int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size) +{ + return verify(*cast(sig), *cast(pub), m, size); +} +int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *ppub, const void *m, mclSize size) +{ + return verify(*cast(sig), *cast(ppub), m, size); +} + +ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate() +{ + PrecomputedPublicKey *ppub = (PrecomputedPublicKey*)malloc(sizeof(PrecomputedPublicKey)); + if (ppub == 0) return 0; + new(ppub) PrecomputedPublicKey(); + return reinterpret_cast(ppub); +} + +void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub) +{ + cast(ppub)->~PrecomputedPublicKey(); + free(ppub); +} + +int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub) +{ + bool b; + cast(ppub)->init(&b, *cast(pub)); + return b ? 0 : -1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/src/fp.cpp b/vendor/github.com/byzantine-lab/mcl/src/fp.cpp new file mode 100644 index 000000000..df72d6d07 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/fp.cpp @@ -0,0 +1,646 @@ +#include +#include +#include +#include +#include +#ifdef MCL_USE_XBYAK +#include "fp_generator.hpp" +#endif +#include "low_func.hpp" +#ifdef MCL_USE_LLVM +#include "proto.hpp" +#include "low_func_llvm.hpp" +#endif +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable : 4127) +#endif + +namespace mcl { + +namespace fp { + +#ifdef MCL_USE_XBYAK +FpGenerator *Op::createFpGenerator() +{ + return new FpGenerator(); +} +void Op::destroyFpGenerator(FpGenerator *fg) +{ + delete fg; +} +#endif + +inline void setUnitAsLE(void *p, Unit x) +{ +#if MCL_SIZEOF_UNIT == 4 + cybozu::Set32bitAsLE(p, x); +#else + cybozu::Set64bitAsLE(p, x); +#endif +} +inline Unit getUnitAsLE(const void *p) +{ +#if MCL_SIZEOF_UNIT == 4 + return cybozu::Get32bitAsLE(p); +#else + return cybozu::Get64bitAsLE(p); +#endif +} + +const char *ModeToStr(Mode mode) +{ + switch (mode) { + case FP_AUTO: return "auto"; + case FP_GMP: return "gmp"; + case FP_GMP_MONT: return "gmp_mont"; + case FP_LLVM: return "llvm"; + case FP_LLVM_MONT: return "llvm_mont"; + case FP_XBYAK: return "xbyak"; + default: + assert(0); + return 0; + } +} + +Mode StrToMode(const char *s) +{ + static const struct { + const char *s; + Mode mode; + } tbl[] = { + { "auto", FP_AUTO }, + { "gmp", FP_GMP }, + { "gmp_mont", FP_GMP_MONT }, + { "llvm", FP_LLVM }, + { "llvm_mont", FP_LLVM_MONT }, + { "xbyak", FP_XBYAK }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (strcmp(s, tbl[i].s) == 0) return tbl[i].mode; + } + return FP_AUTO; +} + +bool isEnableJIT() +{ +#if defined(MCL_USE_XBYAK) + /* -1:not init, 0:disable, 1:enable */ + static int status = -1; + if (status == -1) { +#ifndef _MSC_VER + status = 1; + FILE *fp = fopen("/sys/fs/selinux/enforce", "rb"); + if (fp) { + char c; + if (fread(&c, 1, 1, fp) == 1 && c == '1') { + status = 0; + } + fclose(fp); + } +#endif + if (status != 0) { + MIE_ALIGN(4096) char buf[4096]; + bool ret = Xbyak::CodeArray::protect(buf, sizeof(buf), true); + status = ret ? 1 : 0; + if (ret) { + Xbyak::CodeArray::protect(buf, sizeof(buf), false); + } + } + } + return status != 0; +#else + return false; +#endif +} + +uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize) +{ + return (uint32_t)cybozu::Sha256().digest(out, maxOutSize, msg, msgSize); +} + +uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize) +{ + return (uint32_t)cybozu::Sha512().digest(out, maxOutSize, msg, msgSize); +} + +#ifndef MCL_USE_VINT +static inline void set_mpz_t(mpz_t& z, const Unit* p, int n) +{ + int s = n; + while (s > 0) { + if (p[s - 1]) break; + s--; + } + z->_mp_alloc = n; + z->_mp_size = s; + z->_mp_d = (mp_limb_t*)const_cast(p); +} +#endif + +/* + y = (1/x) mod op.p +*/ +static inline void fp_invOpC(Unit *y, const Unit *x, const Op& op) +{ + const int N = (int)op.N; + bool b = false; +#ifdef MCL_USE_VINT + Vint vx, vy, vp; + vx.setArray(&b, x, N); + assert(b); (void)b; + vp.setArray(&b, op.p, N); + assert(b); (void)b; + Vint::invMod(vy, vx, vp); + vy.getArray(&b, y, N); + assert(b); (void)b; +#else + mpz_class my; + mpz_t mx, mp; + set_mpz_t(mx, x, N); + set_mpz_t(mp, op.p, N); + mpz_invert(my.get_mpz_t(), mx, mp); + gmp::getArray(&b, y, N, my); + assert(b); +#endif +} + +/* + inv(xR) = (1/x)R^-1 -toMont-> 1/x -toMont-> (1/x)R +*/ +static void fp_invMontOpC(Unit *y, const Unit *x, const Op& op) +{ + fp_invOpC(y, x, op); + op.fp_mul(y, y, op.R3, op.p); +} + +/* + large (N * 2) specification of AddPre, SubPre +*/ +template +struct SetFpDbl { + static inline void exec(Op&) {} +}; + +template +struct SetFpDbl { + static inline void exec(Op& op) + { +// if (!op.isFullBit) { + op.fpDbl_addPre = AddPre::f; + op.fpDbl_subPre = SubPre::f; +// } + } +}; + +template +void setOp2(Op& op) +{ + op.fp_shr1 = Shr1::f; + op.fp_neg = Neg::f; + if (op.isFullBit) { + op.fp_add = Add::f; + op.fp_sub = Sub::f; + } else { + op.fp_add = Add::f; + op.fp_sub = Sub::f; + } + if (op.isMont) { + if (op.isFullBit) { + op.fp_mul = Mont::f; + op.fp_sqr = SqrMont::f; + } else { + op.fp_mul = Mont::f; + op.fp_sqr = SqrMont::f; + } + op.fpDbl_mod = MontRed::f; + } else { + op.fp_mul = Mul::f; + op.fp_sqr = Sqr::f; + op.fpDbl_mod = Dbl_Mod::f; + } + op.fp_mulUnit = MulUnit::f; + if (!gmpIsFasterThanLLVM) { + op.fpDbl_mulPre = MulPre::f; + op.fpDbl_sqrPre = SqrPre::f; + } + op.fp_mulUnitPre = MulUnitPre::f; + op.fpN1_mod = N1_Mod::f; + op.fpDbl_add = DblAdd::f; + op.fpDbl_sub = DblSub::f; + op.fp_addPre = AddPre::f; + op.fp_subPre = SubPre::f; + op.fp2_mulNF = Fp2MulNF::f; + SetFpDbl::exec(op); +} + +template +void setOp(Op& op, Mode mode) +{ + // generic setup + op.fp_isZero = isZeroC; + op.fp_clear = clearC; + op.fp_copy = copyC; + if (op.isMont) { + op.fp_invOp = fp_invMontOpC; + } else { + op.fp_invOp = fp_invOpC; + } + setOp2(op); +#ifdef MCL_USE_LLVM + if (mode != fp::FP_GMP && mode != fp::FP_GMP_MONT) { +#if defined(MCL_USE_XBYAK) && CYBOZU_HOST == CYBOZU_HOST_INTEL + const bool gmpIsFasterThanLLVM = false;//(N == 8 && MCL_SIZEOF_UNIT == 8); + Xbyak::util::Cpu cpu; + if (cpu.has(Xbyak::util::Cpu::tBMI2)) { + setOp2(op); + } else +#endif + { + setOp2(op); + } + } +#else + (void)mode; +#endif +} + +#ifdef MCL_USE_XBYAK +inline void invOpForMontC(Unit *y, const Unit *x, const Op& op) +{ + Unit r[maxUnitSize]; + int k = op.fp_preInv(r, x); + /* + S = UnitBitSize + xr = 2^k + R = 2^(N * S) + get r2^(-k)R^2 = r 2^(N * S * 2 - k) + */ + op.fp_mul(y, r, op.invTbl.data() + k * op.N, op.p); +} + +static void initInvTbl(Op& op) +{ + const size_t N = op.N; + const Unit *p = op.p; + const size_t invTblN = N * sizeof(Unit) * 8 * 2; + op.invTbl.resize(invTblN * N); + Unit *tbl = op.invTbl.data() + (invTblN - 1) * N; + Unit t[maxUnitSize] = {}; + t[0] = 2; + op.toMont(tbl, t); + for (size_t i = 0; i < invTblN - 1; i++) { + op.fp_add(tbl - N, tbl, tbl, p); + tbl -= N; + } +} +#endif + +static bool initForMont(Op& op, const Unit *p, Mode mode) +{ + const size_t N = op.N; + bool b; + { + mpz_class t = 1, R; + gmp::getArray(&b, op.one, N, t); + if (!b) return false; + R = (t << (N * UnitBitSize)) % op.mp; + t = (R * R) % op.mp; + gmp::getArray(&b, op.R2, N, t); + if (!b) return false; + t = (t * R) % op.mp; + gmp::getArray(&b, op.R3, N, t); + if (!b) return false; + } + op.rp = getMontgomeryCoeff(p[0]); + if (mode != FP_XBYAK) return true; +#ifdef MCL_USE_XBYAK + if (op.fg == 0) op.fg = Op::createFpGenerator(); + bool useXbyak = op.fg->init(op); + + if (useXbyak && op.isMont && N <= 4) { + op.fp_invOp = &invOpForMontC; + initInvTbl(op); + } +#endif + return true; +} + +bool Op::init(const mpz_class& _p, size_t maxBitSize, int _xi_a, Mode mode, size_t mclMaxBitSize) +{ + if (mclMaxBitSize != MCL_MAX_BIT_SIZE) return false; +#ifdef MCL_USE_VINT + assert(sizeof(mcl::vint::Unit) == sizeof(Unit)); +#else + assert(sizeof(mp_limb_t) == sizeof(Unit)); +#endif + if (maxBitSize > MCL_MAX_BIT_SIZE) return false; + if (_p <= 0) return false; + clear(); + maxN = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize; + N = gmp::getUnitSize(_p); + if (N > maxN) return false; + { + bool b; + gmp::getArray(&b, p, N, _p); + if (!b) return false; + } + mp = _p; + bitSize = gmp::getBitSize(mp); + pmod4 = gmp::getUnit(mp, 0) % 4; + this->xi_a = _xi_a; +/* + priority : MCL_USE_XBYAK > MCL_USE_LLVM > none + Xbyak > llvm_mont > llvm > gmp_mont > gmp +*/ +#ifdef MCL_USE_XBYAK + if (mode == FP_AUTO) mode = FP_XBYAK; + if (mode == FP_XBYAK && bitSize > 384) { + mode = FP_AUTO; + } + if (!isEnableJIT()) { + mode = FP_AUTO; + } +#else + if (mode == FP_XBYAK) mode = FP_AUTO; +#endif +#ifdef MCL_USE_LLVM + if (mode == FP_AUTO) mode = FP_LLVM_MONT; +#else + if (mode == FP_LLVM || mode == FP_LLVM_MONT) mode = FP_AUTO; +#endif + if (mode == FP_AUTO) mode = FP_GMP_MONT; + isMont = mode == FP_GMP_MONT || mode == FP_LLVM_MONT || mode == FP_XBYAK; +#if 0 + fprintf(stderr, "mode=%s, isMont=%d, maxBitSize=%d" +#ifdef MCL_USE_XBYAK + " MCL_USE_XBYAK" +#endif +#ifdef MCL_USE_LLVM + " MCL_USE_LLVM" +#endif + "\n", ModeToStr(mode), isMont, (int)maxBitSize); +#endif + isFullBit = (bitSize % UnitBitSize) == 0; + +#if defined(MCL_USE_LLVM) || defined(MCL_USE_XBYAK) + if ((mode == FP_AUTO || mode == FP_LLVM || mode == FP_XBYAK) + && mp == mpz_class("0xfffffffffffffffffffffffffffffffeffffffffffffffff")) { + primeMode = PM_NIST_P192; + isMont = false; + isFastMod = true; + } + if ((mode == FP_AUTO || mode == FP_LLVM || mode == FP_XBYAK) + && mp == mpz_class("0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) { + primeMode = PM_NIST_P521; + isMont = false; + isFastMod = true; + } +#endif +#if defined(MCL_USE_VINT) && MCL_SIZEOF_UNIT == 8 + { + const char *secp256k1Str = "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"; + bool b; + mpz_class secp256k1; + gmp::setStr(&b, secp256k1, secp256k1Str); + if (b && mp == secp256k1) { + primeMode = PM_SECP256K1; + isMont = false; + isFastMod = true; + } + } +#endif + switch (N) { + case 1: setOp<1>(*this, mode); break; + case 2: setOp<2>(*this, mode); break; + case 3: setOp<3>(*this, mode); break; + case 4: setOp<4>(*this, mode); break; // 256 if 64-bit +#if MCL_MAX_UNIT_SIZE >= 6 + case 5: setOp<5>(*this, mode); break; + case 6: setOp<6>(*this, mode); break; +#endif +#if MCL_MAX_UNIT_SIZE >= 8 + case 7: setOp<7>(*this, mode); break; + case 8: setOp<8>(*this, mode); break; +#endif +#if MCL_MAX_UNIT_SIZE >= 9 + case 9: setOp<9>(*this, mode); break; // 521 if 64-bit +#endif +#if MCL_MAX_UNIT_SIZE >= 10 + case 10: setOp<10>(*this, mode); break; +#endif +#if MCL_MAX_UNIT_SIZE >= 12 + case 11: setOp<11>(*this, mode); break; + case 12: setOp<12>(*this, mode); break; // 768 if 64-bit +#endif +#if MCL_MAX_UNIT_SIZE >= 14 + case 13: setOp<13>(*this, mode); break; + case 14: setOp<14>(*this, mode); break; +#endif +#if MCL_MAX_UNIT_SIZE >= 16 + case 15: setOp<15>(*this, mode); break; + case 16: setOp<16>(*this, mode); break; // 1024 if 64-bit +#endif +#if MCL_MAX_UNIT_SIZE >= 17 + case 17: setOp<17>(*this, mode); break; // 521 if 32-bit +#endif + default: + return false; + } +#ifdef MCL_USE_LLVM + if (primeMode == PM_NIST_P192) { + fp_mul = &mcl_fp_mulNIST_P192L; + fp_sqr = &mcl_fp_sqr_NIST_P192L; + fpDbl_mod = &mcl_fpDbl_mod_NIST_P192L; + } + if (primeMode == PM_NIST_P521) { + fpDbl_mod = &mcl_fpDbl_mod_NIST_P521L; + } +#endif +#if defined(MCL_USE_VINT) && MCL_SIZEOF_UNIT == 8 + if (primeMode == PM_SECP256K1) { + fp_mul = &mcl::vint::mcl_fp_mul_SECP256K1; + fp_sqr = &mcl::vint::mcl_fp_sqr_SECP256K1; + fpDbl_mod = &mcl::vint::mcl_fpDbl_mod_SECP256K1; + } +#endif + if (N * UnitBitSize <= 256) { + hash = sha256; + } else { + hash = sha512; + } + { + bool b; + sq.set(&b, mp); + if (!b) return false; + } + modp.init(mp); + return fp::initForMont(*this, p, mode); +} + +void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize) +{ + while (byteSize >= sizeof(Unit)) { + setUnitAsLE(dst, *src++); + dst += sizeof(Unit); + byteSize -= sizeof(Unit); + } + if (byteSize == 0) return; + Unit x = *src; + while (byteSize) { + *dst++ = static_cast(x); + x >>= 8; + byteSize--; + } +} + +void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize) +{ + while (byteSize >= sizeof(Unit)) { + *dst++ = getUnitAsLE(src); + src += sizeof(Unit); + byteSize -= sizeof(Unit); + } + if (byteSize == 0) return; + Unit x = 0; + for (size_t i = 0; i < byteSize; i++) { + x |= Unit(src[i]) << (i * 8); + } + *dst = x; +} + +#ifndef CYBOZU_DONT_USE_STRING +int detectIoMode(int ioMode, const std::ios_base& ios) +{ + if (ioMode & ~IoPrefix) return ioMode; + // IoAuto or IoPrefix + const std::ios_base::fmtflags f = ios.flags(); + assert(!(f & std::ios_base::oct)); + ioMode |= (f & std::ios_base::hex) ? IoHex : 0; + if (f & std::ios_base::showbase) { + ioMode |= IoPrefix; + } + return ioMode; +} +#endif + +bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode) +{ + const size_t fpByteSize = sizeof(Unit) * op.N; + if (maskMode == Mod) { + if (xByteSize > fpByteSize * 2) return false; + mpz_class mx; + bool b; + gmp::setArray(&b, mx, (const char*)x, xByteSize); + if (!b) return false; +#ifdef MCL_USE_VINT + op.modp.modp(mx, mx); +#else + mx %= op.mp; +#endif + const Unit *pmx = gmp::getUnit(mx); + size_t i = 0; + for (const size_t n = gmp::getUnitSize(mx); i < n; i++) { + y[i] = pmx[i]; + } + for (; i < op.N; i++) { + y[i] = 0; + } + return true; + } + if (xByteSize > fpByteSize) { + if (maskMode == NoMask) return false; + xByteSize = fpByteSize; + } + // QQQ : fixed later for big endian + copyByteToUnitAsLE(y, (const uint8_t*)x, xByteSize); + for (size_t i = (xByteSize + sizeof(Unit) - 1) / sizeof(Unit); i < op.N; i++) { + y[i] = 0; + } + if (maskMode == mcl::fp::SmallMask || maskMode == mcl::fp::MaskAndMod) { + maskArray(y, op.N, op.bitSize); + } + if (isGreaterOrEqualArray(y, op.p, op.N)) { + switch (maskMode) { + case mcl::fp::NoMask: return false; + case mcl::fp::SmallMask: + maskArray(y, op.N, op.bitSize - 1); + break; + case mcl::fp::MaskAndMod: + default: + op.fp_subPre(y, y, op.p); + break; + } + } + assert(isLessArray(y, op.p, op.N)); + return true; +} + +static bool isInUint64(uint64_t *pv, const fp::Block& b) +{ + assert(fp::UnitBitSize == 32 || fp::UnitBitSize == 64); + const size_t start = 64 / fp::UnitBitSize; + for (size_t i = start; i < b.n; i++) { + if (b.p[i]) return false; + } +#if MCL_SIZEOF_UNIT == 4 + *pv = b.p[0] | (uint64_t(b.p[1]) << 32); +#else + *pv = b.p[0]; +#endif + return true; +} + +uint64_t getUint64(bool *pb, const fp::Block& b) +{ + uint64_t v; + if (isInUint64(&v, b)) { + *pb = true; + return v; + } + *pb = false; + return 0; +} + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4146) +#endif + +int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op) +{ + bool isNegative = false; + if (fp::isGreaterOrEqualArray(b.p, op.half, op.N)) { + op.fp_neg(b.v_, b.p, op.p); + b.p = b.v_; + isNegative = true; + } + uint64_t v; + if (fp::isInUint64(&v, b)) { + const uint64_t c = uint64_t(1) << 63; + if (isNegative) { + if (v <= c) { // include c + *pb = true; + // -1 << 63 + if (v == c) return int64_t(-9223372036854775807ll - 1); + return int64_t(-v); + } + } else { + if (v < c) { // not include c + *pb = true; + return int64_t(v); + } + } + } + *pb = false; + return 0; +} + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +} } // mcl::fp + diff --git a/vendor/github.com/byzantine-lab/mcl/src/fp_generator.hpp b/vendor/github.com/byzantine-lab/mcl/src/fp_generator.hpp new file mode 100644 index 000000000..b496bc4d4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/fp_generator.hpp @@ -0,0 +1,3885 @@ +#pragma once +/** + @file + @brief Fp generator + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#if CYBOZU_HOST == CYBOZU_HOST_INTEL +#define XBYAK_NO_OP_NAMES +#define XBYAK_DISABLE_AVX512 +#include "xbyak/xbyak_util.h" + +#if MCL_SIZEOF_UNIT == 8 +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) + #pragma warning(disable : 4458) +#endif + +namespace mcl { + +namespace fp_gen_local { + +class MemReg { + const Xbyak::Reg64 *r_; + const Xbyak::RegExp *m_; + size_t offset_; +public: + MemReg(const Xbyak::Reg64 *r, const Xbyak::RegExp *m, size_t offset) : r_(r), m_(m), offset_(offset) {} + bool isReg() const { return r_ != 0; } + const Xbyak::Reg64& getReg() const { return *r_; } + Xbyak::RegExp getMem() const { return *m_ + offset_ * sizeof(size_t); } +}; + +struct MixPack { + static const size_t useAll = 100; + Xbyak::util::Pack p; + Xbyak::RegExp m; + size_t mn; + MixPack() : mn(0) {} + MixPack(Xbyak::util::Pack& remain, size_t& rspPos, size_t n, size_t useRegNum = useAll) + { + init(remain, rspPos, n, useRegNum); + } + void init(Xbyak::util::Pack& remain, size_t& rspPos, size_t n, size_t useRegNum = useAll) + { + size_t pn = std::min(remain.size(), n); + if (useRegNum != useAll && useRegNum < pn) pn = useRegNum; + this->mn = n - pn; + this->m = Xbyak::util::rsp + rspPos; + this->p = remain.sub(0, pn); + remain = remain.sub(pn); + rspPos += mn * 8; + } + size_t size() const { return p.size() + mn; } + bool isReg(size_t n) const { return n < p.size(); } + const Xbyak::Reg64& getReg(size_t n) const + { + assert(n < p.size()); + return p[n]; + } + Xbyak::RegExp getMem(size_t n) const + { + const size_t pn = p.size(); + assert(pn <= n && n < size()); + return m + (int)((n - pn) * sizeof(size_t)); + } + MemReg operator[](size_t n) const + { + const size_t pn = p.size(); + return MemReg((n < pn) ? &p[n] : 0, (n < pn) ? 0 : &m, n - pn); + } + void removeLast() + { + if (!size()) throw cybozu::Exception("MixPack:removeLast:empty"); + if (mn > 0) { + mn--; + } else { + p = p.sub(0, p.size() - 1); + } + } + /* + replace Mem with r if possible + */ + bool replaceMemWith(Xbyak::CodeGenerator *code, const Xbyak::Reg64& r) + { + if (mn == 0) return false; + p.append(r); + code->mov(r, code->ptr [m]); + m = m + 8; + mn--; + return true; + } +}; + +} // fp_gen_local + +/* + op(r, rm); + r : reg + rm : Reg/Mem +*/ +#define MCL_FP_GEN_OP_RM(op, r, rm) \ +if (rm.isReg()) { \ + op(r, rm.getReg()); \ +} else { \ + op(r, qword [rm.getMem()]); \ +} + +/* + op(rm, r); + rm : Reg/Mem + r : reg +*/ +#define MCL_FP_GEN_OP_MR(op, rm, r) \ +if (rm.isReg()) { \ + op(rm.getReg(), r); \ +} else { \ + op(qword [rm.getMem()], r); \ +} + +namespace fp { + +struct Profiler { + FILE *fp_; + const char *suf_; + const uint8_t *prev_; + Profiler() + : fp_(0) + , suf_(0) + , prev_(0) + { + } + void init(const char *suf, const uint8_t *prev) + { +#ifdef __linux__ + close(); + const char *s = getenv("MCL_PERF"); + if (s == 0 || strcmp(s, "1") != 0) return; + fprintf(stderr, "use perf suf=%s\n", suf); + suf_ = suf; + const int pid = getpid(); + char name[128]; + snprintf(name, sizeof(name), "/tmp/perf-%d.map", pid); + fp_ = fopen(name, "wb"); + if (fp_ == 0) throw cybozu::Exception("PerMap") << name; + prev_ = prev; +#else + (void)suf; + (void)prev; +#endif + } + ~Profiler() + { + close(); + } + void close() + { +#ifdef __linux__ + if (fp_ == 0) return; + fclose(fp_); + fp_ = 0; + prev_ = 0; +#endif + } + void set(const uint8_t *p, size_t n, const char *name) const + { +#ifdef __linux__ + if (fp_ == 0) return; + fprintf(fp_, "%llx %zx %s%s\n", (long long)p, n, name, suf_); +#else + (void)p; + (void)n; + (void)name; +#endif + } + void set(const char *name, const uint8_t *cur) + { +#ifdef __linux__ + set(prev_, cur - prev_, name); + prev_ = cur; +#else + (void)name; + (void)cur; +#endif + } +}; + +struct FpGenerator : Xbyak::CodeGenerator { + typedef Xbyak::RegExp RegExp; + typedef Xbyak::Reg64 Reg64; + typedef Xbyak::Xmm Xmm; + typedef Xbyak::Operand Operand; + typedef Xbyak::Label Label; + typedef Xbyak::util::StackFrame StackFrame; + typedef Xbyak::util::Pack Pack; + typedef fp_gen_local::MixPack MixPack; + typedef fp_gen_local::MemReg MemReg; + static const int UseRDX = Xbyak::util::UseRDX; + static const int UseRCX = Xbyak::util::UseRCX; + /* + classes to calculate offset and size + */ + struct Ext1 { + Ext1(int FpByte, const Reg64& r, int n = 0) + : r_(r) + , n_(n) + , next(FpByte + n) + { + } + operator RegExp() const { return r_ + n_; } + const Reg64& r_; + const int n_; + const int next; + private: + Ext1(const Ext1&); + void operator=(const Ext1&); + }; + struct Ext2 { + Ext2(int FpByte, const Reg64& r, int n = 0) + : r_(r) + , n_(n) + , next(FpByte * 2 + n) + , a(FpByte, r, n) + , b(FpByte, r, n + FpByte) + { + } + operator RegExp() const { return r_ + n_; } + const Reg64& r_; + const int n_; + const int next; + Ext1 a; + Ext1 b; + private: + Ext2(const Ext2&); + void operator=(const Ext2&); + }; + Xbyak::util::Cpu cpu_; + bool useMulx_; + bool useAdx_; + const Reg64& gp0; + const Reg64& gp1; + const Reg64& gp2; + const Reg64& gt0; + const Reg64& gt1; + const Reg64& gt2; + const Reg64& gt3; + const Reg64& gt4; + const Reg64& gt5; + const Reg64& gt6; + const Reg64& gt7; + const Reg64& gt8; + const Reg64& gt9; + const mcl::fp::Op *op_; + Label pL_; // pointer to p + // the following labels assume sf(this, 3, 10 | UseRDX) + Label mulPreL; + Label fpDbl_modL; + Label fp_mulL; + const uint64_t *p_; + uint64_t rp_; + int pn_; + int FpByte_; + bool isFullBit_; + Profiler prof_; + + /* + @param op [in] ; use op.p, op.N, op.isFullBit + */ + FpGenerator() + : CodeGenerator(4096 * 9, Xbyak::DontSetProtectRWE) +#ifdef XBYAK64_WIN + , gp0(rcx) + , gp1(r11) + , gp2(r8) + , gt0(r9) + , gt1(r10) + , gt2(rdi) + , gt3(rsi) +#else + , gp0(rdi) + , gp1(rsi) + , gp2(r11) + , gt0(rcx) + , gt1(r8) + , gt2(r9) + , gt3(r10) +#endif + , gt4(rbx) + , gt5(rbp) + , gt6(r12) + , gt7(r13) + , gt8(r14) + , gt9(r15) + , op_(0) + , p_(0) + , rp_(0) + , pn_(0) + , FpByte_(0) + { + useMulx_ = cpu_.has(Xbyak::util::Cpu::tBMI2); + useAdx_ = cpu_.has(Xbyak::util::Cpu::tADX); + } + bool init(Op& op) + { + if (!cpu_.has(Xbyak::util::Cpu::tAVX)) return false; + reset(); // reset jit code for reuse + setProtectModeRW(); // read/write memory + init_inner(op); +// printf("code size=%d\n", (int)getSize()); + setProtectModeRE(); // set read/exec memory + return true; + } +private: + void init_inner(Op& op) + { + op_ = &op; + L(pL_); + p_ = reinterpret_cast(getCurr()); + for (size_t i = 0; i < op.N; i++) { + dq(op.p[i]); + } + rp_ = fp::getMontgomeryCoeff(p_[0]); + pn_ = (int)op.N; + FpByte_ = int(op.maxN * sizeof(uint64_t)); + isFullBit_ = op.isFullBit; +// printf("p=%p, pn_=%d, isFullBit_=%d\n", p_, pn_, isFullBit_); + static char suf[] = "_0"; + prof_.init(suf, getCurr()); + suf[1]++; + + op.fp_addPre = gen_addSubPre(true, pn_); + prof_.set("Fp_addPre", getCurr()); + + op.fp_subPre = gen_addSubPre(false, pn_); + prof_.set("Fp_subPre", getCurr()); + + op.fp_addA_ = gen_fp_add(); + prof_.set("Fp_add", getCurr()); + + op.fp_subA_ = gen_fp_sub(); + prof_.set("Fp_sub", getCurr()); + + op.fp_shr1 = gen_shr1(); + prof_.set("Fp_shr1", getCurr()); + + op.fp_negA_ = gen_fp_neg(); + prof_.set("Fp_neg", getCurr()); + + op.fpDbl_addA_ = gen_fpDbl_add(); + prof_.set("FpDbl_add", getCurr()); + + op.fpDbl_subA_ = gen_fpDbl_sub(); + prof_.set("FpDbl_sub", getCurr()); + + op.fpDbl_addPre = gen_addSubPre(true, pn_ * 2); + prof_.set("FpDbl_addPre", getCurr()); + + op.fpDbl_subPre = gen_addSubPre(false, pn_ * 2); + prof_.set("FpDbl_subPre", getCurr()); + + op.fpDbl_mulPreA_ = gen_fpDbl_mulPre(); + prof_.set("FpDbl_mulPre", getCurr()); + + op.fpDbl_sqrPreA_ = gen_fpDbl_sqrPre(); + prof_.set("FpDbl_sqrPre", getCurr()); + + op.fpDbl_modA_ = gen_fpDbl_mod(op); + prof_.set("FpDbl_mod", getCurr()); + + op.fp_mulA_ = gen_mul(); + prof_.set("Fp_mul", getCurr()); + if (op.fp_mulA_) { + op.fp_mul = fp::func_ptr_cast(op.fp_mulA_); // used in toMont/fromMont + } + op.fp_sqrA_ = gen_sqr(); + prof_.set("Fp_sqr", getCurr()); + + if (op.primeMode != PM_NIST_P192 && op.N <= 4) { // support general op.N but not fast for op.N > 4 + align(16); + op.fp_preInv = getCurr(); + gen_preInv(); + prof_.set("preInv", getCurr()); + } + if (op.xi_a == 0) return; // Fp2 is not used + op.fp2_addA_ = gen_fp2_add(); + prof_.set("Fp2_add", getCurr()); + + op.fp2_subA_ = gen_fp2_sub(); + prof_.set("Fp2_sub", getCurr()); + + op.fp2_negA_ = gen_fp2_neg(); + prof_.set("Fp2_neg", getCurr()); + + op.fp2_mulNF = 0; + op.fp2Dbl_mulPreA_ = gen_fp2Dbl_mulPre(); + prof_.set("Fp2Dbl_mulPre", getCurr()); + + op.fp2Dbl_sqrPreA_ = gen_fp2Dbl_sqrPre(); + prof_.set("Fp2Dbl_sqrPre", getCurr()); + + op.fp2_mulA_ = gen_fp2_mul(); + prof_.set("Fp2_mul", getCurr()); + + op.fp2_sqrA_ = gen_fp2_sqr(); + prof_.set("Fp2_sqr", getCurr()); + + op.fp2_mul_xiA_ = gen_fp2_mul_xi(); + prof_.set("Fp2_mul_xi", getCurr()); + } + u3u gen_addSubPre(bool isAdd, int n) + { +// if (isFullBit_) return 0; + align(16); + u3u func = getCurr(); + StackFrame sf(this, 3); + if (isAdd) { + gen_raw_add(sf.p[0], sf.p[1], sf.p[2], rax, n); + } else { + gen_raw_sub(sf.p[0], sf.p[1], sf.p[2], rax, n); + } + return func; + } + /* + pz[] = px[] + py[] + */ + void gen_raw_add(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& t, int n) + { + mov(t, ptr [px]); + add(t, ptr [py]); + mov(ptr [pz], t); + for (int i = 1; i < n; i++) { + mov(t, ptr [px + i * 8]); + adc(t, ptr [py + i * 8]); + mov(ptr [pz + i * 8], t); + } + } + /* + pz[] = px[] - py[] + */ + void gen_raw_sub(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& t, int n) + { + mov(t, ptr [px]); + sub(t, ptr [py]); + mov(ptr [pz], t); + for (int i = 1; i < n; i++) { + mov(t, ptr [px + i * 8]); + sbb(t, ptr [py + i * 8]); + mov(ptr [pz + i * 8], t); + } + } + /* + pz[] = -px[] + use rax, rdx + */ + void gen_raw_neg(const RegExp& pz, const RegExp& px, const Pack& t) + { + Label nonZero, exit; + load_rm(t, px); + mov(rax, t[0]); + if (t.size() > 1) { + for (size_t i = 1; i < t.size(); i++) { + or_(rax, t[i]); + } + } else { + test(rax, rax); + } + jnz(nonZero); + // rax = 0 + for (size_t i = 0; i < t.size(); i++) { + mov(ptr[pz + i * 8], rax); + } + jmp(exit); + L(nonZero); + mov(rax, pL_); + for (size_t i = 0; i < t.size(); i++) { + mov(rdx, ptr [rax + i * 8]); + if (i == 0) { + sub(rdx, t[i]); + } else { + sbb(rdx, t[i]); + } + mov(ptr [pz + i * 8], rdx); + } + L(exit); + } + /* + (rdx:pz[0..n-1]) = px[0..n-1] * y + use t, rax, rdx + if n > 2 + use + wk[0] if useMulx_ + wk[0..n-2] otherwise + */ + void gen_raw_mulUnit(const RegExp& pz, const RegExp& px, const Reg64& y, const MixPack& wk, const Reg64& t, size_t n) + { + if (n == 1) { + mov(rax, ptr [px]); + mul(y); + mov(ptr [pz], rax); + return; + } + if (n == 2) { + mov(rax, ptr [px]); + mul(y); + mov(ptr [pz], rax); + mov(t, rdx); + mov(rax, ptr [px + 8]); + mul(y); + add(rax, t); + adc(rdx, 0); + mov(ptr [pz + 8], rax); + return; + } + if (useMulx_) { + assert(wk.size() > 0 && wk.isReg(0)); + const Reg64& t1 = wk.getReg(0); + // mulx(H, L, x) = [H:L] = x * rdx + mov(rdx, y); + mulx(t1, rax, ptr [px]); // [y:rax] = px * y + mov(ptr [pz], rax); + const Reg64 *pt0 = &t; + const Reg64 *pt1 = &t1; + for (size_t i = 1; i < n - 1; i++) { + mulx(*pt0, rax, ptr [px + i * 8]); + if (i == 1) { + add(rax, *pt1); + } else { + adc(rax, *pt1); + } + mov(ptr [pz + i * 8], rax); + std::swap(pt0, pt1); + } + mulx(rdx, rax, ptr [px + (n - 1) * 8]); + adc(rax, *pt1); + mov(ptr [pz + (n - 1) * 8], rax); + adc(rdx, 0); + return; + } + assert(wk.size() >= n - 1); + for (size_t i = 0; i < n; i++) { + mov(rax, ptr [px + i * 8]); + mul(y); + if (i < n - 1) { + mov(ptr [pz + i * 8], rax); + g_mov(wk[i], rdx); + } + } + for (size_t i = 1; i < n - 1; i++) { + mov(t, ptr [pz + i * 8]); + if (i == 1) { + g_add(t, wk[i - 1]); + } else { + g_adc(t, wk[i - 1]); + } + mov(ptr [pz + i * 8], t); + } + g_adc(rax, wk[n - 2]); + mov(ptr [pz + (n - 1) * 8], rax); + adc(rdx, 0); + } + void gen_mulUnit() + { +// assert(pn_ >= 2); + const int regNum = useMulx_ ? 2 : (1 + std::min(pn_ - 1, 8)); + const int stackSize = useMulx_ ? 0 : (pn_ - 1) * 8; + StackFrame sf(this, 3, regNum | UseRDX, stackSize); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& y = sf.p[2]; + size_t rspPos = 0; + Pack remain = sf.t.sub(1); + MixPack wk(remain, rspPos, pn_ - 1); + gen_raw_mulUnit(pz, px, y, wk, sf.t[0], pn_); + mov(rax, rdx); + } + /* + pz[] = px[] + */ + void gen_mov(const RegExp& pz, const RegExp& px, const Reg64& t, int n) + { + for (int i = 0; i < n; i++) { + mov(t, ptr [px + i * 8]); + mov(ptr [pz + i * 8], t); + } + } + /* + pz[] = px[] + py[] mod p[] + use rax, t + */ + void gen_raw_fp_add(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t, bool withCarry) + { + const Pack& p0 = t.sub(0, pn_); + const Pack& p1 = t.sub(pn_, pn_); + const Reg64 *fullReg = isFullBit_ ? &t[pn_ * 2] : 0; + load_rm(p0, px); + add_rm(p0, py, withCarry); + mov_rr(p1, p0); + if (isFullBit_) { + mov(*fullReg, 0); + adc(*fullReg, 0); + } + mov(rax, pL_); + sub_rm(p1, rax); + if (fullReg) { + sbb(*fullReg, 0); + } + for (size_t i = 0; i < p1.size(); i++) { + cmovc(p1[i], p0[i]); + } + store_mr(pz, p1); + } + /* + pz[] = px[] - py[] mod p[] + use rax, t + */ + void gen_raw_fp_sub(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t, bool withCarry) + { + const Pack& p0 = t.sub(0, pn_); + const Pack& p1 = t.sub(pn_, pn_); + load_rm(p0, px); + sub_rm(p0, py, withCarry); + mov(rax, pL_); + load_rm(p1, rax); + sbb(rax, rax); // rax = (x > y) ? 0 : -1 + for (size_t i = 0; i < p1.size(); i++) { + and_(p1[i], rax); + } + add_rr(p0, p1); + store_mr(pz, p0); + } + void gen_fp_add_le4() + { + assert(pn_ <= 4); + const int tn = pn_ * 2 + (isFullBit_ ? 1 : 0); + StackFrame sf(this, 3, tn); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_fp_add(pz, px, py, sf.t, false); + } + void gen_fp_sub_le4() + { + assert(pn_ <= 4); + const int tn = pn_ * 2; + StackFrame sf(this, 3, tn); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_fp_sub(pz, px, py, sf.t, false); + } + /* + add(pz, px, py); + size of t1, t2 == 6 + destroy t0, t1 + */ + void gen_raw_fp_add6(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t1, const Pack& t2, bool withCarry) + { + load_rm(t1, px); + add_rm(t1, py, withCarry); + Label exit; + if (isFullBit_) { + jnc("@f"); + mov(t2[0], pL_); // t2 is not used + sub_rm(t1, t2[0]); + jmp(exit); + L("@@"); + } + mov_rr(t2, t1); + sub_rm(t2, rip + pL_); + for (int i = 0; i < 6; i++) { + cmovnc(t1[i], t2[i]); + } + L(exit); + store_mr(pz, t1); + } + void gen_fp_add6() + { + /* + cmov is faster than jmp + */ + StackFrame sf(this, 3, 10); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + Pack t1 = sf.t.sub(0, 6); + Pack t2 = sf.t.sub(6); + t2.append(rax); + t2.append(px); // destory after used + gen_raw_fp_add6(pz, px, py, t1, t2, false); + } + void3u gen_fp_add() + { + align(16); + void3u func = getCurr(); + if (pn_ <= 4) { + gen_fp_add_le4(); + return func; + } + if (pn_ == 6) { + gen_fp_add6(); + return func; + } + StackFrame sf(this, 3, 0, pn_ * 8); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + const Xbyak::CodeGenerator::LabelType jmpMode = pn_ < 5 ? T_AUTO : T_NEAR; + + inLocalLabel(); + gen_raw_add(pz, px, py, rax, pn_); + mov(px, pL_); // destroy px + if (isFullBit_) { + jc(".over", jmpMode); + } +#ifdef MCL_USE_JMP + for (int i = 0; i < pn_; i++) { + mov(py, ptr [pz + (pn_ - 1 - i) * 8]); // destroy py + cmp(py, ptr [px + (pn_ - 1 - i) * 8]); + jc(".exit", jmpMode); + jnz(".over", jmpMode); + } + L(".over"); + gen_raw_sub(pz, pz, px, rax, pn_); + L(".exit"); +#else + gen_raw_sub(rsp, pz, px, rax, pn_); + jc(".exit", jmpMode); + gen_mov(pz, rsp, rax, pn_); + if (isFullBit_) { + jmp(".exit", jmpMode); + L(".over"); + gen_raw_sub(pz, pz, px, rax, pn_); + } + L(".exit"); +#endif + outLocalLabel(); + return func; + } + void3u gen_fpDbl_add() + { + align(16); + void3u func = getCurr(); + if (pn_ <= 4) { + int tn = pn_ * 2 + (isFullBit_ ? 1 : 0); + StackFrame sf(this, 3, tn); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_add(pz, px, py, rax, pn_); + gen_raw_fp_add(pz + 8 * pn_, px + 8 * pn_, py + 8 * pn_, sf.t, true); + return func; + } else if (pn_ == 6 && !isFullBit_) { + StackFrame sf(this, 3, 10); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_add(pz, px, py, rax, pn_); + Pack t1 = sf.t.sub(0, 6); + Pack t2 = sf.t.sub(6); + t2.append(rax); + t2.append(py); + gen_raw_fp_add6(pz + pn_ * 8, px + pn_ * 8, py + pn_ * 8, t1, t2, true); + return func; + } + return 0; + } + void3u gen_fpDbl_sub() + { + align(16); + void3u func = getCurr(); + if (pn_ <= 4) { + int tn = pn_ * 2; + StackFrame sf(this, 3, tn); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_sub(pz, px, py, rax, pn_); + gen_raw_fp_sub(pz + 8 * pn_, px + 8 * pn_, py + 8 * pn_, sf.t, true); + return func; + } else if (pn_ == 6) { + StackFrame sf(this, 3, 4); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + gen_raw_sub(pz, px, py, rax, pn_); + Pack t = sf.t; + t.append(rax); + t.append(px); + gen_raw_fp_sub6(pz, px, py, pn_ * 8, t, true); + return func; + } + return 0; + } + void gen_raw_fp_sub6(const RegExp& pz, const RegExp& px, const RegExp& py, int offset, const Pack& t, bool withCarry) + { + load_rm(t, px + offset); + sub_rm(t, py + offset, withCarry); + /* + jmp is faster than and-mask without jmp + */ + jnc("@f"); + add_rm(t, rip + pL_); + L("@@"); + store_mr(pz + offset, t); + } + void gen_fp_sub6() + { + StackFrame sf(this, 3, 4); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + Pack t = sf.t; + t.append(rax); + t.append(px); // |t| = 6 + gen_raw_fp_sub6(pz, px, py, 0, t, false); + } + void3u gen_fp_sub() + { + align(16); + void3u func = getCurr(); + if (pn_ <= 4) { + gen_fp_sub_le4(); + return func; + } + if (pn_ == 6) { + gen_fp_sub6(); + return func; + } + StackFrame sf(this, 3); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + const Xbyak::CodeGenerator::LabelType jmpMode = pn_ < 5 ? T_AUTO : T_NEAR; + Label exit; + gen_raw_sub(pz, px, py, rax, pn_); + jnc(exit, jmpMode); + mov(px, pL_); + gen_raw_add(pz, pz, px, rax, pn_); + L(exit); + return func; + } + void2u gen_fp_neg() + { + align(16); + void2u func = getCurr(); + StackFrame sf(this, 2, UseRDX | pn_); + gen_raw_neg(sf.p[0], sf.p[1], sf.t); + return func; + } + void2u gen_shr1() + { + align(16); + void2u func = getCurr(); + const int c = 1; + StackFrame sf(this, 2, 1); + const Reg64 *t0 = &rax; + const Reg64 *t1 = &sf.t[0]; + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + mov(*t0, ptr [px]); + for (int i = 0; i < pn_ - 1; i++) { + mov(*t1, ptr [px + 8 * (i + 1)]); + shrd(*t0, *t1, c); + mov(ptr [pz + i * 8], *t0); + std::swap(t0, t1); + } + shr(*t0, c); + mov(ptr [pz + (pn_ - 1) * 8], *t0); + return func; + } + void3u gen_mul() + { + align(16); + void3u func = getCurr(); + if (op_->primeMode == PM_NIST_P192) { + StackFrame sf(this, 3, 10 | UseRDX, 8 * 6); + mulPre3(rsp, sf.p[1], sf.p[2], sf.t); + fpDbl_mod_NIST_P192(sf.p[0], rsp, sf.t); + return func; + } + if (pn_ == 3) { + gen_montMul3(); + return func; + } + if (pn_ == 4) { + gen_montMul4(); + return func; + } + if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { +#if 1 + // a little faster + gen_montMul6(); +#else + if (mulPreL.getAddress() == 0 || fpDbl_modL.getAddress() == 0) return 0; + StackFrame sf(this, 3, 10 | UseRDX, 12 * 8); + /* + use xm3 + rsp + [0, ..12 * 8) ; mul(x, y) + */ + vmovq(xm3, gp0); + mov(gp0, rsp); + call(mulPreL); // gp0, x, y + vmovq(gp0, xm3); + mov(gp1, rsp); + call(fpDbl_modL); +#endif + return func; + } +#if 0 + if (pn_ <= 9) { + gen_montMulN(p_, rp_, pn_); + return func; + } +#endif + return 0; + } + /* + @input (z, xy) + z[1..0] <- montgomery reduction(x[3..0]) + @note destroy rax, rdx, t0, ..., t8 + */ + void gen_fpDbl_mod2() + { + StackFrame sf(this, 2, 9 | UseRDX); + const Reg64& z = sf.p[0]; + const Reg64& xy = sf.p[1]; + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + + const Reg64& a = rax; + const Reg64& d = rdx; + + mov(t6, ptr [xy + 8 * 0]); + + mov(a, rp_); + mul(t6); + mov(t0, pL_); + mov(t7, a); // q + + // [d:t7:t1] = p * q + mul2x1(t0, t7, t1, t8); + + xor_(t8, t8); + if (isFullBit_) { + xor_(t5, t5); + } + mov(t4, d); + add(t1, t6); + add_rm(Pack(t8, t4, t7), xy + 8 * 1, true); + // [t8:t4:t7] + if (isFullBit_) { + adc(t5, 0); + } + + mov(a, rp_); + mul(t7); + mov(t6, a); // q + + // [d:t6:xy] = p * q + mul2x1(t0, t6, xy, t3); + + add_rr(Pack(t8, t4, t7), Pack(d, t6, xy)); + // [t8:t4] + if (isFullBit_) { + adc(t5, 0); + } + + mov_rr(Pack(t2, t1), Pack(t8, t4)); + sub_rm(Pack(t8, t4), t0); + if (isFullBit_) { + sbb(t5, 0); + } + cmovc_rr(Pack(t8, t4), Pack(t2, t1)); + store_mr(z, Pack(t8, t4)); + } + /* + @input (z, xy) + z[2..0] <- montgomery reduction(x[5..0]) + @note destroy rax, rdx, t0, ..., t10 + */ + void gen_fpDbl_mod3() + { + StackFrame sf(this, 3, 10 | UseRDX); + const Reg64& z = sf.p[0]; + const Reg64& xy = sf.p[1]; + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + const Reg64& t9 = sf.t[9]; + const Reg64& t10 = sf.p[2]; + + const Reg64& a = rax; + const Reg64& d = rdx; + + mov(t10, ptr [xy + 8 * 0]); + + mov(a, rp_); + mul(t10); + mov(t0, pL_); + mov(t7, a); // q + + // [d:t7:t2:t1] = p * q + mul3x1(t0, t7, t4, t2, t1, t8); + + xor_(t8, t8); + xor_(t9, t9); + if (isFullBit_) { + xor_(t5, t5); + } + mov(t4, d); + add(t1, t10); + add_rm(Pack(t9, t8, t4, t7, t2), xy + 8 * 1, true); + // [t9:t8:t4:t7:t2] + if (isFullBit_) { + adc(t5, 0); + } + + mov(a, rp_); + mul(t2); + mov(t10, a); // q + + // [d:t10:t6:xy] = p * q + mul3x1(t0, t10, t1, t6, xy, t3); + + add_rr(Pack(t8, t4, t7, t2), Pack(d, t10, t6, xy)); + adc(t9, 0); // [t9:t8:t4:t7] + if (isFullBit_) { + adc(t5, 0); + } + + mov(a, rp_); + mul(t7); + mov(t10, a); // q + + // [d:t10:xy:t6] = p * q + mul3x1(t0, t10, t1, xy, t6, t2); + + add_rr(Pack(t9, t8, t4, t7), Pack(d, t10, xy, t6)); + // [t9:t8:t4] + if (isFullBit_) { + adc(t5, 0); + } + + mov_rr(Pack(t2, t1, t10), Pack(t9, t8, t4)); + sub_rm(Pack(t9, t8, t4), t0); + if (isFullBit_) { + sbb(t5, 0); + } + cmovc_rr(Pack(t9, t8, t4), Pack(t2, t1, t10)); + store_mr(z, Pack(t9, t8, t4)); + } + /* + @input (z, xy) + z[3..0] <- montgomery reduction(x[7..0]) + @note destroy rax, rdx, t0, ..., t10, xm0, xm1 + xm2 if isFullBit_ + */ + void gen_fpDbl_mod4(const Reg64& z, const Reg64& xy, const Pack& t, const Reg64& t10) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + + const Reg64& a = rax; + const Reg64& d = rdx; + + vmovq(xm0, z); + mov(z, ptr [xy + 8 * 0]); + + mov(a, rp_); + mul(z); + mov(t0, pL_); + mov(t7, a); // q + + // [d:t7:t3:t2:t1] = p * q + mul4x1(t0, t7, t4, t3, t2, t1, t8); + + xor_(t8, t8); + xor_(t9, t9); + xor_(t10, t10); + mov(t4, d); + add(t1, z); + adc(t2, qword [xy + 8 * 1]); + adc(t3, qword [xy + 8 * 2]); + adc(t7, qword [xy + 8 * 3]); + adc(t4, ptr [xy + 8 * 4]); + adc(t8, ptr [xy + 8 * 5]); + adc(t9, ptr [xy + 8 * 6]); + adc(t10, ptr [xy + 8 * 7]); + // [t10:t9:t8:t4:t7:t3:t2] + if (isFullBit_) { + mov(t5, 0); + adc(t5, 0); + vmovq(xm2, t5); + } + + // free z, t0, t1, t5, t6, xy + + mov(a, rp_); + mul(t2); + mov(z, a); // q + + vmovq(xm1, t10); + // [d:z:t5:t6:xy] = p * q + mul4x1(t0, z, t1, t5, t6, xy, t10); + vmovq(t10, xm1); + + add_rr(Pack(t8, t4, t7, t3, t2), Pack(d, z, t5, t6, xy)); + adc(t9, 0); + adc(t10, 0); // [t10:t9:t8:t4:t7:t3] + if (isFullBit_) { + vmovq(t5, xm2); + adc(t5, 0); + vmovq(xm2, t5); + } + + // free z, t0, t1, t2, t5, t6, xy + + mov(a, rp_); + mul(t3); + mov(z, a); // q + + // [d:z:t5:xy:t6] = p * q + mul4x1(t0, z, t1, t5, xy, t6, t2); + + add_rr(Pack(t9, t8, t4, t7, t3), Pack(d, z, t5, xy, t6)); + adc(t10, 0); // c' = [t10:t9:t8:t4:t7] + if (isFullBit_) { + vmovq(t3, xm2); + adc(t3, 0); + } + + // free z, t1, t2, t7, t5, xy, t6 + + mov(a, rp_); + mul(t7); + mov(z, a); // q + + // [d:z:t5:xy:t6] = p * q + mul4x1(t0, z, t1, t5, xy, t6, t2); + + add_rr(Pack(t10, t9, t8, t4, t7), Pack(d, z, t5, xy, t6)); + // [t10:t9:t8:t4] + if (isFullBit_) { + adc(t3, 0); + } + + mov_rr(Pack(t6, t2, t1, z), Pack(t10, t9, t8, t4)); + sub_rm(Pack(t10, t9, t8, t4), t0); + if (isFullBit_) { + sbb(t3, 0); + } + cmovc(t4, z); + cmovc(t8, t1); + cmovc(t9, t2); + cmovc(t10, t6); + + vmovq(z, xm0); + store_mr(z, Pack(t10, t9, t8, t4)); + } + void2u gen_fpDbl_mod(const fp::Op& op) + { + align(16); + void2u func = getCurr(); + if (op.primeMode == PM_NIST_P192) { + StackFrame sf(this, 2, 6 | UseRDX); + fpDbl_mod_NIST_P192(sf.p[0], sf.p[1], sf.t); + return func; + } +#if 0 + if (op.primeMode == PM_NIST_P521) { + StackFrame sf(this, 2, 8 | UseRDX); + fpDbl_mod_NIST_P521(sf.p[0], sf.p[1], sf.t); + return func; + } +#endif + if (pn_ == 2) { + gen_fpDbl_mod2(); + return func; + } + if (pn_ == 3) { + gen_fpDbl_mod3(); + return func; + } + if (pn_ == 4) { + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + call(fpDbl_modL); + sf.close(); + L(fpDbl_modL); + gen_fpDbl_mod4(gp0, gp1, sf.t, gp2); + ret(); + return func; + } + if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + call(fpDbl_modL); + sf.close(); + L(fpDbl_modL); + Pack t = sf.t; + t.append(gp2); + gen_fpDbl_mod6(gp0, gp1, t); + ret(); + return func; + } + return 0; + } + void2u gen_sqr() + { + align(16); + void2u func = getCurr(); + if (op_->primeMode == PM_NIST_P192) { + StackFrame sf(this, 3, 10 | UseRDX, 6 * 8); + Pack t = sf.t; + t.append(sf.p[2]); + sqrPre3(rsp, sf.p[1], t); + fpDbl_mod_NIST_P192(sf.p[0], rsp, sf.t); + return func; + } + if (pn_ == 3) { + gen_montSqr3(); + return func; + } + if (pn_ == 4 && useMulx_) { +#if 1 + // sqr(y, x) = mul(y, x, x) +#ifdef XBYAK64_WIN + mov(r8, rdx); +#else + mov(rdx, rsi); +#endif + jmp((const void*)op_->fp_mulA_); +#else // (sqrPre + mod) is slower than mul + StackFrame sf(this, 3, 10 | UseRDX, 8 * 8); + Pack t = sf.t; + t.append(sf.p[2]); + sqrPre4(rsp, sf.p[1], t); + mov(gp0, sf.p[0]); + mov(gp1, rsp); + call(fpDbl_modL); +#endif + return func; + } + if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { + if (fpDbl_modL.getAddress() == 0) return 0; + StackFrame sf(this, 3, 10 | UseRDX, (12 + 6) * 8); + /* + use xm3 + rsp + [6 * 8, (12 + 6) * 8) ; sqrPre(x, x) + [0..6 * 8) ; stack for sqrPre6 + */ + vmovq(xm3, gp0); + Pack t = sf.t; + t.append(sf.p[2]); + // sqrPre6 uses 6 * 8 bytes stack + sqrPre6(rsp + 6 * 8, sf.p[1], t); + mov(gp0, ptr[rsp + (12 + 6) * 8]); + vmovq(gp0, xm3); + lea(gp1, ptr[rsp + 6 * 8]); + call(fpDbl_modL); + return func; + } + return 0; + } + /* + input (pz[], px[], py[]) + z[] <- montgomery(x[], y[]) + */ + void gen_montMulN(const uint64_t *p, uint64_t pp, int n) + { + assert(1 <= pn_ && pn_ <= 9); + const int regNum = useMulx_ ? 4 : 3 + std::min(n - 1, 7); + const int stackSize = (n * 3 + (isFullBit_ ? 2 : 1)) * 8; + StackFrame sf(this, 3, regNum | UseRDX, stackSize); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + const Reg64& y = sf.t[0]; + const Reg64& pAddr = sf.t[1]; + const Reg64& t = sf.t[2]; + Pack remain = sf.t.sub(3); + size_t rspPos = 0; + + MixPack pw1(remain, rspPos, n - 1); + const RegExp pw2 = rsp + rspPos; // pw2[0..n-1] + const RegExp pc = pw2 + n * 8; // pc[0..n+1] + mov(pAddr, (size_t)p); + + for (int i = 0; i < n; i++) { + mov(y, ptr [py + i * 8]); + montgomeryN_1(pp, n, pc, px, y, pAddr, t, pw1, pw2, i == 0); + } + // pz[] = pc[] - p[] + gen_raw_sub(pz, pc, pAddr, t, n); + if (isFullBit_) sbb(qword[pc + n * 8], 0); + jnc("@f"); + for (int i = 0; i < n; i++) { + mov(t, ptr [pc + i * 8]); + mov(ptr [pz + i * 8], t); + } + L("@@"); + } + /* + input (z, x, y) = (p0, p1, p2) + z[0..3] <- montgomery(x[0..3], y[0..3]) + destroy gt0, ..., gt9, xm0, xm1, p2 + */ + void gen_montMul4() + { + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + call(fp_mulL); + sf.close(); + const Reg64& p0 = sf.p[0]; + const Reg64& p1 = sf.p[1]; + const Reg64& p2 = sf.p[2]; + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + const Reg64& t9 = sf.t[9]; + + L(fp_mulL); + vmovq(xm0, p0); // save p0 + mov(p0, pL_); + vmovq(xm1, p2); + mov(p2, ptr [p2]); + montgomery4_1(rp_, t0, t7, t3, t2, t1, p1, p2, p0, t4, t5, t6, t8, t9, true, xm2); + + vmovq(p2, xm1); + mov(p2, ptr [p2 + 8]); + montgomery4_1(rp_, t1, t0, t7, t3, t2, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); + + vmovq(p2, xm1); + mov(p2, ptr [p2 + 16]); + montgomery4_1(rp_, t2, t1, t0, t7, t3, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); + + vmovq(p2, xm1); + mov(p2, ptr [p2 + 24]); + montgomery4_1(rp_, t3, t2, t1, t0, t7, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); + // [t7:t3:t2:t1:t0] + + mov(t4, t0); + mov(t5, t1); + mov(t6, t2); + mov(rdx, t3); + sub_rm(Pack(t3, t2, t1, t0), p0); + if (isFullBit_) sbb(t7, 0); + cmovc(t0, t4); + cmovc(t1, t5); + cmovc(t2, t6); + cmovc(t3, rdx); + + vmovq(p0, xm0); // load p0 + store_mr(p0, Pack(t3, t2, t1, t0)); + ret(); + } + /* + c[n+2] = c[n+1] + px[n] * rdx + use rax + */ + void mulAdd(const Pack& c, int n, const RegExp& px) + { + const Reg64& a = rax; + xor_(a, a); + for (int i = 0; i < n; i++) { + mulx(c[n + 1], a, ptr [px + i * 8]); + adox(c[i], a); + adcx(c[i + 1], c[n + 1]); + } + mov(a, 0); + mov(c[n + 1], a); + adox(c[n], a); + adcx(c[n + 1], a); + adox(c[n + 1], a); + } + /* + input + c[6..0] + rdx = yi + use rax, rdx + output + c[7..1] + + if first: + c = x[5..0] * rdx + else: + c += x[5..0] * rdx + q = uint64_t(c0 * rp) + c += p * q + c >>= 64 + */ + void montgomery6_1(const Pack& c, const RegExp& px, const Reg64& t0, const Reg64& t1, bool isFirst) + { + const int n = 6; + const Reg64& a = rax; + const Reg64& d = rdx; + if (isFirst) { + const Reg64 *pt0 = &a; + const Reg64 *pt1 = &t0; + // c[6..0] = px[5..0] * rdx + mulx(*pt0, c[0], ptr [px + 0 * 8]); + for (int i = 1; i < n; i++) { + mulx(*pt1, c[i], ptr[px + i * 8]); + if (i == 1) { + add(c[i], *pt0); + } else { + adc(c[i], *pt0); + } + std::swap(pt0, pt1); + } + mov(c[n], 0); + adc(c[n], *pt0); + } else { + // c[7..0] = c[6..0] + px[5..0] * rdx + mulAdd(c, 6, px); + } + mov(a, rp_); + mul(c[0]); // q = a + mov(d, a); + mov(t1, pL_); + // c += p * q + mulAdd(c, 6, t1); + } + /* + input (z, x, y) = (p0, p1, p2) + z[0..5] <- montgomery(x[0..5], y[0..5]) + destroy t0, ..., t9, rax, rdx + */ + void gen_montMul6() + { + assert(!isFullBit_ && useMulx_ && useAdx_); + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + call(fp_mulL); + sf.close(); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + const Reg64& t9 = sf.t[9]; + L(fp_mulL); + mov(rdx, ptr [py + 0 * 8]); + montgomery6_1(Pack(t7, t6, t5, t4, t3, t2, t1, t0), px, t8, t9, true); + mov(rdx, ptr [py + 1 * 8]); + montgomery6_1(Pack(t0, t7, t6, t5, t4, t3, t2, t1), px, t8, t9, false); + mov(rdx, ptr [py + 2 * 8]); + montgomery6_1(Pack(t1, t0, t7, t6, t5, t4, t3, t2), px, t8, t9, false); + mov(rdx, ptr [py + 3 * 8]); + montgomery6_1(Pack(t2, t1, t0, t7, t6, t5, t4, t3), px, t8, t9, false); + mov(rdx, ptr [py + 4 * 8]); + montgomery6_1(Pack(t3, t2, t1, t0, t7, t6, t5, t4), px, t8, t9, false); + mov(rdx, ptr [py + 5 * 8]); + montgomery6_1(Pack(t4, t3, t2, t1, t0, t7, t6, t5), px, t8, t9, false); + // [t4:t3:t2:t1:t0:t7:t6] + const Pack z = Pack(t3, t2, t1, t0, t7, t6); + const Pack keep = Pack(rdx, rax, px, py, t8, t9); + mov_rr(keep, z); + mov(t5, pL_); + sub_rm(z, t5); + cmovc_rr(z, keep); + store_mr(pz, z); + ret(); + } + /* + input (z, x, y) = (p0, p1, p2) + z[0..2] <- montgomery(x[0..2], y[0..2]) + destroy gt0, ..., gt9, xm0, xm1, p2 + */ + void gen_montMul3() + { + StackFrame sf(this, 3, 10 | UseRDX); + const Reg64& p0 = sf.p[0]; + const Reg64& p1 = sf.p[1]; + const Reg64& p2 = sf.p[2]; + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + const Reg64& t9 = sf.t[9]; + + vmovq(xm0, p0); // save p0 + mov(t7, pL_); + mov(t9, ptr [p2]); + // c3, c2, c1, c0, px, y, p, + montgomery3_1(rp_, t0, t3, t2, t1, p1, t9, t7, t4, t5, t6, t8, p0, true); + mov(t9, ptr [p2 + 8]); + montgomery3_1(rp_, t1, t0, t3, t2, p1, t9, t7, t4, t5, t6, t8, p0, false); + + mov(t9, ptr [p2 + 16]); + montgomery3_1(rp_, t2, t1, t0, t3, p1, t9, t7, t4, t5, t6, t8, p0, false); + + // [(t3):t2:t1:t0] + mov(t4, t0); + mov(t5, t1); + mov(t6, t2); + sub_rm(Pack(t2, t1, t0), t7); + if (isFullBit_) sbb(t3, 0); + cmovc(t0, t4); + cmovc(t1, t5); + cmovc(t2, t6); + vmovq(p0, xm0); + store_mr(p0, Pack(t2, t1, t0)); + } + /* + input (pz, px) + z[0..2] <- montgomery(px[0..2], px[0..2]) + destroy gt0, ..., gt9, xm0, xm1, p2 + */ + void gen_montSqr3() + { + StackFrame sf(this, 3, 10 | UseRDX, 16 * 3); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; +// const Reg64& py = sf.p[2]; // not used + + const Reg64& t0 = sf.t[0]; + const Reg64& t1 = sf.t[1]; + const Reg64& t2 = sf.t[2]; + const Reg64& t3 = sf.t[3]; + const Reg64& t4 = sf.t[4]; + const Reg64& t5 = sf.t[5]; + const Reg64& t6 = sf.t[6]; + const Reg64& t7 = sf.t[7]; + const Reg64& t8 = sf.t[8]; + const Reg64& t9 = sf.t[9]; + + vmovq(xm0, pz); // save pz + mov(t7, pL_); + mov(t9, ptr [px]); + mul3x1_sqr1(px, t9, t3, t2, t1, t0); + mov(t0, rdx); + montgomery3_sub(rp_, t0, t9, t2, t1, px, t3, t7, t4, t5, t6, t8, pz, true); + + mov(t3, ptr [px + 8]); + mul3x1_sqr2(px, t3, t6, t5, t4); + add_rr(Pack(t1, t0, t9, t2), Pack(rdx, rax, t5, t4)); + if (isFullBit_) setc(pz.cvt8()); + montgomery3_sub(rp_, t1, t3, t9, t2, px, t0, t7, t4, t5, t6, t8, pz, false); + + mov(t0, ptr [px + 16]); + mul3x1_sqr3(t0, t5, t4); + add_rr(Pack(t2, t1, t3, t9), Pack(rdx, rax, t5, t4)); + if (isFullBit_) setc(pz.cvt8()); + montgomery3_sub(rp_, t2, t0, t3, t9, px, t1, t7, t4, t5, t6, t8, pz, false); + + // [t9:t2:t0:t3] + mov(t4, t3); + mov(t5, t0); + mov(t6, t2); + sub_rm(Pack(t2, t0, t3), t7); + if (isFullBit_) sbb(t9, 0); + cmovc(t3, t4); + cmovc(t0, t5); + cmovc(t2, t6); + vmovq(pz, xm0); + store_mr(pz, Pack(t2, t0, t3)); + } + /* + py[5..0] <- px[2..0]^2 + @note use rax, rdx + */ + void sqrPre3(const RegExp& py, const RegExp& px, const Pack& t) + { + const Reg64& a = rax; + const Reg64& d = rdx; + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + const Reg64& t10 = t[10]; + + if (useMulx_) { + mov(d, ptr [px + 8 * 0]); + mulx(t0, a, d); + mov(ptr [py + 8 * 0], a); + + mov(t7, ptr [px + 8 * 1]); + mov(t9, ptr [px + 8 * 2]); + mulx(t2, t1, t7); + mulx(t4, t3, t9); + + mov(t5, t2); + mov(t6, t4); + + add(t0, t1); + adc(t5, t3); + adc(t6, 0); // [t6:t5:t0] + + mov(d, t7); + mulx(t8, t7, d); + mulx(t10, t9, t9); + } else { + mov(t9, ptr [px + 8 * 0]); + mov(a, t9); + mul(t9); + mov(ptr [py + 8 * 0], a); + mov(t0, d); + mov(a, ptr [px + 8 * 1]); + mul(t9); + mov(t1, a); + mov(t2, d); + mov(a, ptr [px + 8 * 2]); + mul(t9); + mov(t3, a); + mov(t4, d); + + mov(t5, t2); + mov(t6, t4); + + add(t0, t1); + adc(t5, t3); + adc(t6, 0); // [t6:t5:t0] + + mov(t9, ptr [px + 8 * 1]); + mov(a, t9); + mul(t9); + mov(t7, a); + mov(t8, d); + mov(a, ptr [px + 8 * 2]); + mul(t9); + mov(t9, a); + mov(t10, d); + } + add(t2, t7); + adc(t8, t9); + mov(t7, t10); + adc(t7, 0); // [t7:t8:t2:t1] + + add(t0, t1); + adc(t2, t5); + adc(t6, t8); + adc(t7, 0); + mov(ptr [py + 8 * 1], t0); // [t7:t6:t2] + + mov(a, ptr [px + 8 * 2]); + mul(a); + add(t4, t9); + adc(a, t10); + adc(d, 0); // [d:a:t4:t3] + + add(t2, t3); + adc(t6, t4); + adc(t7, a); + adc(d, 0); + store_mr(py + 8 * 2, Pack(d, t7, t6, t2)); + } + /* + [pd:pz[0]] <- py[n-1..0] * px[0] + */ + void mulPack(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& pd) + { + const Reg64& a = rax; + const Reg64& d = rdx; + mov(d, ptr [px]); + mulx(pd[0], a, ptr [py + 8 * 0]); + mov(ptr [pz + 8 * 0], a); + for (size_t i = 1; i < pd.size(); i++) { + mulx(pd[i], a, ptr [py + 8 * i]); + if (i == 1) { + add(pd[i - 1], a); + } else { + adc(pd[i - 1], a); + } + } + adc(pd[pd.size() - 1], 0); + } + /* + [hi:Pack(d_(n-1), .., d1):pz[0]] <- Pack(d_(n-1), ..., d0) + py[n-1..0] * px[0] + */ + void mulPackAdd(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& hi, const Pack& pd) + { + const Reg64& a = rax; + const Reg64& d = rdx; + mov(d, ptr [px]); + xor_(a, a); + for (size_t i = 0; i < pd.size(); i++) { + mulx(hi, a, ptr [py + i * 8]); + adox(pd[i], a); + if (i == 0) mov(ptr[pz], pd[0]); + if (i == pd.size() - 1) break; + adcx(pd[i + 1], hi); + } + mov(d, 0); + adcx(hi, d); + adox(hi, d); + } + /* + input : z[n], p[n-1], rdx(implicit) + output: z[] += p[] * rdx, rax = 0 and set CF + use rax, rdx + */ + void mulPackAddShr(const Pack& z, const RegExp& p, const Reg64& H, bool last = false) + { + const Reg64& a = rax; + const size_t n = z.size(); + assert(n >= 3); + // clear CF and OF + xor_(a, a); + const size_t loop = last ? n - 1 : n - 3; + for (size_t i = 0; i < loop; i++) { + // mulx(H, L, x) = [H:L] = x * rdx + mulx(H, a, ptr [p + i * 8]); + adox(z[i], a); + adcx(z[i + 1], H); + } + if (last) { + mov(a, 0); + adox(z[n - 1], a); + return; + } + /* + reorder addtion not to propage OF outside this routine + H + + + rdx a + | | + v v + z[n-1] z[n-2] + */ + mulx(H, a, ptr [p + (n - 3) * 8]); + adox(z[n - 3], a); + mulx(rdx, a, ptr [p + (n - 2) * 8]); // destroy rdx + adox(H, a); + mov(a, 0); + adox(rdx, a); + adcx(z[n - 2], H); + adcx(z[n - 1], rdx); + } + /* + pz[5..0] <- px[2..0] * py[2..0] + */ + void mulPre3(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) + { + const Reg64& a = rax; + const Reg64& d = rdx; + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + + if (useMulx_) { + mulPack(pz, px, py, Pack(t2, t1, t0)); +#if 0 // a little slow + if (useAdx_) { + // [t2:t1:t0] + mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t3, Pack(t2, t1, t0)); + // [t3:t2:t1] + mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t4, Pack(t3, t2, t1)); + // [t4:t3:t2] + store_mr(pz + 8 * 3, Pack(t4, t3, t2)); + return; + } +#endif + } else { + mov(t5, ptr [px]); + mov(a, ptr [py + 8 * 0]); + mul(t5); + mov(ptr [pz + 8 * 0], a); + mov(t0, d); + mov(a, ptr [py + 8 * 1]); + mul(t5); + mov(t3, a); + mov(t1, d); + mov(a, ptr [py + 8 * 2]); + mul(t5); + mov(t4, a); + mov(t2, d); + add(t0, t3); + mov(t2, 0); + adc(t1, a); + adc(t2, d); // [t2:t1:t0:pz[0]] = px[0] * py[2..0] + } + + // here [t2:t1:t0] + + mov(t9, ptr [px + 8]); + + // [d:t9:t6:t5] = px[1] * py[2..0] + mul3x1(py, t9, t7, t6, t5, t4); + add_rr(Pack(t2, t1, t0), Pack(t9, t6, t5)); + adc(d, 0); + mov(t8, d); + mov(ptr [pz + 8], t0); + // here [t8:t2:t1] + + mov(t9, ptr [px + 16]); + + // [d:t9:t5:t4] + mul3x1(py, t9, t6, t5, t4, t0); + add_rr(Pack(t8, t2, t1), Pack(t9, t5, t4)); + adc(d, 0); + store_mr(pz + 8 * 2, Pack(d, t8, t2, t1)); + } + void sqrPre2(const Reg64& py, const Reg64& px, const Pack& t) + { + // QQQ + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + load_rm(Pack(px, t0), px); // x = [px:t0] + sqr2(t4, t3, t2, t1, px, t0, t5, t6); + store_mr(py, Pack(t4, t3, t2, t1)); + } + /* + [y3:y2:y1:y0] = [x1:x0] ^ 2 + use rdx + */ + void sqr2(const Reg64& y3, const Reg64& y2, const Reg64& y1, const Reg64& y0, const Reg64& x1, const Reg64& x0, const Reg64& t1, const Reg64& t0) + { + if (!useMulx_) { + throw cybozu::Exception("sqr2:not support mulx"); + } + mov(rdx, x0); + mulx(y1, y0, x0); // x0^2 + mov(rdx, x1); + mulx(y3, y2, x1); // x1^2 + mulx(t1, t0, x0); // x0 x1 + add(y1, t0); + adc(y2, t1); + adc(y3, 0); + add(y1, t0); + adc(y2, t1); + adc(y3, 0); + } + /* + [t3:t2:t1:t0] = px[1, 0] * py[1, 0] + use rax, rdx + */ + void mul2x2(const RegExp& px, const RegExp& py, const Reg64& t4, const Reg64& t3, const Reg64& t2, const Reg64& t1, const Reg64& t0) + { + if (!useMulx_) { + throw cybozu::Exception("mul2x2:not support mulx"); + } +#if 0 + // # of add is less, but a little slower + mov(t4, ptr [py + 8 * 0]); + mov(rdx, ptr [px + 8 * 1]); + mulx(t2, t1, t4); + mov(rdx, ptr [px + 8 * 0]); + mulx(t0, rax, ptr [py + 8 * 1]); + xor_(t3, t3); + add_rr(Pack(t3, t2, t1), Pack(t3, t0, rax)); + // [t3:t2:t1] = ad + bc + mulx(t4, t0, t4); + mov(rax, ptr [px + 8 * 1]); + mul(qword [py + 8 * 1]); + add_rr(Pack(t3, t2, t1), Pack(rdx, rax, t4)); +#else + mov(rdx, ptr [py + 8 * 0]); + mov(rax, ptr [px + 8 * 0]); + mulx(t1, t0, rax); + mov(t3, ptr [px + 8 * 1]); + mulx(t2, rdx, t3); + add(t1, rdx); + adc(t2, 0); // [t2:t1:t0] + + mov(rdx, ptr [py + 8 * 1]); + mulx(rax, t4, rax); + mulx(t3, rdx, t3); + add(rax, rdx); + adc(t3, 0); // [t3:rax:t4] + add(t1, t4); + adc(t2, rax); + adc(t3, 0); // t3:t2:t1:t0] +#endif + } + void mulPre2(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + mul2x2(px, py, t4, t3, t2, t1, t0); + store_mr(pz, Pack(t3, t2, t1, t0)); + } + /* + py[7..0] = px[3..0] ^ 2 + use xmm0 + */ + void sqrPre4(const RegExp& py, const RegExp& px, const Pack& t) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + const Reg64& t10 = t[10]; + const Reg64& a = rax; + const Reg64& d = rdx; + + /* + (aN + b)^2 = a^2 N^2 + 2ab N + b^2 + */ + load_rm(Pack(t9, t8), px); + sqr2(t3, t2, t1, t0, t9, t8, t7, t6); + // [t3:t2:t1:t0] = b^2 + store_mr(py, Pack(t1, t0)); + vmovq(xm0, t2); + mul2x2(px, px + 2 * 8, t6, t5, t4, t1, t0); + // [t5:t4:t1:t0] = ab + xor_(t6, t6); + add_rr(Pack(t6, t5, t4, t1, t0), Pack(t6, t5, t4, t1, t0)); + // [t6:t5:t4:t1:t0] = 2ab + load_rm(Pack(t8, t7), px + 2 * 8); + // free t10, t9, rax, rdx + /* + [d:t8:t10:t9] = [t8:t7]^2 + */ + mov(d, t7); + mulx(t10, t9, t7); // [t10:t9] = t7^2 + mulx(t7, t2, t8); // [t7:t2] = t7 t8 + xor_(a, a); + add_rr(Pack(a, t7, t2), Pack(a, t7, t2)); + // [a:t7:t2] = 2 t7 t8 + mov(d, t8); + mulx(d, t8, t8); // [d:t8] = t8^2 + add_rr(Pack(d, t8, t10), Pack(a, t7, t2)); + // [d:t8:t10:t9] = [t8:t7]^2 + vmovq(t2, xm0); + add_rr(Pack(t8, t10, t9, t3, t2), Pack(t6, t5, t4, t1, t0)); + adc(d, 0); + store_mr(py + 2 * 8, Pack(d, t8, t10, t9, t3, t2)); + } + /* + py[11..0] = px[5..0] ^ 2 + use rax, rdx, stack[6 * 8] + */ + void sqrPre6(const RegExp& py, const RegExp& px, const Pack& t) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + /* + (aN + b)^2 = a^2 N^2 + 2ab N + b^2 + */ + sqrPre3(py, px, t); // [py] <- b^2 + sqrPre3(py + 6 * 8, px + 3 * 8, t); // [py + 6 * 8] <- a^2 + mulPre3(rsp, px, px + 3 * 8, t); // ab + Pack ab = t.sub(0, 6); + load_rm(ab, rsp); + xor_(rax, rax); + for (int i = 0; i < 6; i++) { + if (i == 0) { + add(ab[i], ab[i]); + } else { + adc(ab[i], ab[i]); + } + } + adc(rax, rax); + add_rm(ab, py + 3 * 8); + store_mr(py + 3 * 8, ab); + load_rm(Pack(t2, t1, t0), py + 9 * 8); + adc(t0, rax); + adc(t1, 0); + adc(t2, 0); + store_mr(py + 9 * 8, Pack(t2, t1, t0)); + } + /* + pz[7..0] <- px[3..0] * py[3..0] + */ + void mulPre4(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) + { + const Reg64& a = rax; + const Reg64& d = rdx; + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + +#if 0 // a little slower + if (useMulx_ && useAdx_) { + mulPack(pz, px, py, Pack(t3, t2, t1, t0)); + mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t4, Pack(t3, t2, t1, t0)); + mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t0, Pack(t4, t3, t2, t1)); + mulPackAdd(pz + 8 * 3, px + 8 * 3, py, t1, Pack(t0, t4, t3, t2)); + store_mr(pz + 8 * 4, Pack(t1, t0, t4, t3)); + return; + } +#endif +#if 0 + // a little slower + if (!useMulx_) { + throw cybozu::Exception("mulPre4:not support mulx"); + } + mul2x2(px + 8 * 0, py + 8 * 2, t4, t3, t2, t1, t0); + mul2x2(px + 8 * 2, py + 8 * 0, t9, t8, t7, t6, t5); + xor_(t4, t4); + add_rr(Pack(t4, t3, t2, t1, t0), Pack(t4, t8, t7, t6, t5)); + // [t4:t3:t2:t1:t0] + mul2x2(px + 8 * 0, py + 8 * 0, t9, t8, t7, t6, t5); + store_mr(pz, Pack(t6, t5)); + // [t8:t7] + vmovq(xm0, t7); + vmovq(xm1, t8); + mul2x2(px + 8 * 2, py + 8 * 2, t8, t7, t9, t6, t5); + vmovq(a, xm0); + vmovq(d, xm1); + add_rr(Pack(t4, t3, t2, t1, t0), Pack(t9, t6, t5, d, a)); + adc(t7, 0); + store_mr(pz + 8 * 2, Pack(t7, t4, t3, t2, t1, t0)); +#else + if (useMulx_) { + mulPack(pz, px, py, Pack(t3, t2, t1, t0)); + } else { + mov(t5, ptr [px]); + mov(a, ptr [py + 8 * 0]); + mul(t5); + mov(ptr [pz + 8 * 0], a); + mov(t0, d); + mov(a, ptr [py + 8 * 1]); + mul(t5); + mov(t3, a); + mov(t1, d); + mov(a, ptr [py + 8 * 2]); + mul(t5); + mov(t4, a); + mov(t2, d); + mov(a, ptr [py + 8 * 3]); + mul(t5); + add(t0, t3); + mov(t3, 0); + adc(t1, t4); + adc(t2, a); + adc(t3, d); // [t3:t2:t1:t0:pz[0]] = px[0] * py[3..0] + } + + // here [t3:t2:t1:t0] + + mov(t9, ptr [px + 8]); + + // [d:t9:t7:t6:t5] = px[1] * py[3..0] + mul4x1(py, t9, t8, t7, t6, t5, t4); + add_rr(Pack(t3, t2, t1, t0), Pack(t9, t7, t6, t5)); + adc(d, 0); + mov(t8, d); + mov(ptr [pz + 8], t0); + // here [t8:t3:t2:t1] + + mov(t9, ptr [px + 16]); + + // [d:t9:t6:t5:t4] + mul4x1(py, t9, t7, t6, t5, t4, t0); + add_rr(Pack(t8, t3, t2, t1), Pack(t9, t6, t5, t4)); + adc(d, 0); + mov(t7, d); + mov(ptr [pz + 16], t1); + + mov(t9, ptr [px + 24]); + + // [d:t9:t5:t4:t1] + mul4x1(py, t9, t6, t5, t4, t1, t0); + add_rr(Pack(t7, t8, t3, t2), Pack(t9, t5, t4, t1)); + adc(d, 0); + store_mr(pz + 8 * 3, Pack(t7, t8, t3, t2)); + mov(ptr [pz + 8 * 7], d); +#endif + } + // [gp0] <- [gp1] * [gp2] + void mulPre6(const Pack& t) + { + const Reg64& pz = gp0; + const Reg64& px = gp1; + const Reg64& py = gp2; + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; +#if 0 // slower than basic multiplication(56clk -> 67clk) +// const Reg64& t7 = t[7]; +// const Reg64& t8 = t[8]; +// const Reg64& t9 = t[9]; + const Reg64& a = rax; + const Reg64& d = rdx; + const int stackSize = (3 + 3 + 6 + 1 + 1 + 1) * 8; // a+b, c+d, (a+b)(c+d), x, y, z + const int abPos = 0; + const int cdPos = abPos + 3 * 8; + const int abcdPos = cdPos + 3 * 8; + const int zPos = abcdPos + 6 * 8; + const int yPos = zPos + 8; + const int xPos = yPos + 8; + + sub(rsp, stackSize); + mov(ptr[rsp + zPos], pz); + mov(ptr[rsp + xPos], px); + mov(ptr[rsp + yPos], py); + /* + x = aN + b, y = cN + d + xy = abN^2 + ((a+b)(c+d) - ac - bd)N + bd + */ + xor_(a, a); + load_rm(Pack(t2, t1, t0), px); // b + add_rm(Pack(t2, t1, t0), px + 3 * 8); // a + b + adc(a, 0); + store_mr(pz, Pack(t2, t1, t0)); + vmovq(xm0, a); // carry1 + + xor_(a, a); + load_rm(Pack(t2, t1, t0), py); // d + add_rm(Pack(t2, t1, t0), py + 3 * 8); // c + d + adc(a, 0); + store_mr(pz + 3 * 8, Pack(t2, t1, t0)); + vmovq(xm1, a); // carry2 + + mulPre3(rsp + abcdPos, pz, pz + 3 * 8, t); // (a+b)(c+d) + + vmovq(a, xm0); + vmovq(d, xm1); + mov(t3, a); + and_(t3, d); // t3 = carry1 & carry2 + Label doNothing; + je(doNothing); + load_rm(Pack(t2, t1, t0), rsp + abcdPos + 3 * 8); + test(a, a); + je("@f"); + // add (c+d) + add_rm(Pack(t2, t1, t0), pz + 3 * 8); + adc(t3, 0); + L("@@"); + test(d, d); + je("@f"); + // add(a+b) + add_rm(Pack(t2, t1, t0), pz); + adc(t3, 0); + L("@@"); + store_mr(rsp + abcdPos + 3 * 8, Pack(t2, t1, t0)); + L(doNothing); + vmovq(xm0, t3); // save new carry + + + mov(gp0, ptr [rsp + zPos]); + mov(gp1, ptr [rsp + xPos]); + mov(gp2, ptr [rsp + yPos]); + mulPre3(gp0, gp1, gp2, t); // [rsp] <- bd + + mov(gp0, ptr [rsp + zPos]); + mov(gp1, ptr [rsp + xPos]); + mov(gp2, ptr [rsp + yPos]); + mulPre3(gp0 + 6 * 8, gp1 + 3 * 8, gp2 + 3 * 8, t); // [rsp + 6 * 8] <- ac + + mov(pz, ptr[rsp + zPos]); + vmovq(d, xm0); + for (int i = 0; i < 6; i++) { + mov(a, ptr[pz + (3 + i) * 8]); + if (i == 0) { + add(a, ptr[rsp + abcdPos + i * 8]); + } else { + adc(a, ptr[rsp + abcdPos + i * 8]); + } + mov(ptr[pz + (3 + i) * 8], a); + } + mov(a, ptr[pz + 9 * 8]); + adc(a, d); + mov(ptr[pz + 9 * 8], a); + jnc("@f"); + for (int i = 10; i < 12; i++) { + mov(a, ptr[pz + i * 8]); + adc(a, 0); + mov(ptr[pz + i * 8], a); + } + L("@@"); + add(rsp, stackSize); +#else + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + + mulPack(pz, px, py, Pack(t5, t4, t3, t2, t1, t0)); // [t5:t4:t3:t2:t1:t0] + mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t6, Pack(t5, t4, t3, t2, t1, t0)); // [t6:t5:t4:t3:t2:t1] + mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t0, Pack(t6, t5, t4, t3, t2, t1)); // [t0:t6:t5:t4:t3:t2] + mulPackAdd(pz + 8 * 3, px + 8 * 3, py, t1, Pack(t0, t6, t5, t4, t3, t2)); // [t1:t0:t6:t5:t4:t3] + mulPackAdd(pz + 8 * 4, px + 8 * 4, py, t2, Pack(t1, t0, t6, t5, t4, t3)); // [t2:t1:t0:t6:t5:t4] + mulPackAdd(pz + 8 * 5, px + 8 * 5, py, t3, Pack(t2, t1, t0, t6, t5, t4)); // [t3:t2:t1:t0:t6:t5] + store_mr(pz + 8 * 6, Pack(t3, t2, t1, t0, t6, t5)); +#endif + } + /* + @input (z, xy) + z[5..0] <- montgomery reduction(x[11..0]) + use xm0, xm1, xm2 + */ + void gen_fpDbl_mod6(const Reg64& z, const Reg64& xy, const Pack& t) + { + assert(!isFullBit_); + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const Reg64& t8 = t[8]; + const Reg64& t9 = t[9]; + const Reg64& t10 = t[10]; + + const Reg64& a = rax; + const Reg64& d = rdx; + vmovq(xm0, z); + mov(z, ptr [xy + 0 * 8]); + mov(a, rp_); + mul(z); + lea(t0, ptr [rip + pL_]); + load_rm(Pack(t7, t6, t5, t4, t3, t2, t1), xy); + mov(d, a); // q + mulPackAddShr(Pack(t7, t6, t5, t4, t3, t2, t1), t0, t10); + load_rm(Pack(t1, t0, t10, t9, t8), xy + 7 * 8); + adc(t8, rax); + adc(t9, rax); + adc(t10, rax); + adc(t0, rax); + adc(t1, rax); + // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4:t3:t2] + mov(a, rp_); + mul(t2); + vmovq(xm1, t0); // save + lea(t0, ptr [rip + pL_]); + mov(d, a); + vmovq(xm2, t10); + mulPackAddShr(Pack(t8, t7, t6, t5, t4, t3, t2), t0, t10); + vmovq(t10, xm2); + adc(t9, rax); + adc(t10, rax); + vmovq(t0, xm1); // load + adc(t0, rax); + adc(t1, rax); + // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4:t3] + mov(a, rp_); + mul(t3); + lea(t2, ptr [rip + pL_]); + mov(d, a); + vmovq(xm2, t10); + mulPackAddShr(Pack(t9, t8, t7, t6, t5, t4, t3), t2, t10); + vmovq(t10, xm2); + adc(t10, rax); + adc(t0, rax); + adc(t1, rax); + // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4] + mov(a, rp_); + mul(t4); + lea(t2, ptr [rip + pL_]); + mov(d, a); + mulPackAddShr(Pack(t10, t9, t8, t7, t6, t5, t4), t2, t3); + adc(t0, rax); + adc(t1, rax); + // z = [t1:t0:t10:t9:t8:t7:t6:t5] + mov(a, rp_); + mul(t5); + lea(t2, ptr [rip + pL_]); + mov(d, a); + mulPackAddShr(Pack(t0, t10, t9, t8, t7, t6, t5), t2, t3); + adc(t1, a); + // z = [t1:t0:t10:t9:t8:t7:t6] + mov(a, rp_); + mul(t6); + lea(t2, ptr [rip + pL_]); + mov(d, a); + mulPackAddShr(Pack(t1, t0, t10, t9, t8, t7, t6), t2, t3, true); + // z = [t1:t0:t10:t9:t8:t7] + Pack zp = Pack(t1, t0, t10, t9, t8, t7); + Pack keep = Pack(z, xy, rax, rdx, t3, t6); + mov_rr(keep, zp); + sub_rm(zp, t2); // z -= p + cmovc_rr(zp, keep); + vmovq(z, xm0); + store_mr(z, zp); + } + void2u gen_fpDbl_sqrPre() + { + align(16); + void2u func = getCurr(); + if (pn_ == 2 && useMulx_) { + StackFrame sf(this, 2, 7 | UseRDX); + sqrPre2(sf.p[0], sf.p[1], sf.t); + return func; + } + if (pn_ == 3) { + StackFrame sf(this, 3, 10 | UseRDX); + Pack t = sf.t; + t.append(sf.p[2]); + sqrPre3(sf.p[0], sf.p[1], t); + return func; + } + if (pn_ == 4 && useMulx_) { + StackFrame sf(this, 3, 10 | UseRDX); + Pack t = sf.t; + t.append(sf.p[2]); + sqrPre4(sf.p[0], sf.p[1], t); + return func; + } + if (pn_ == 6 && useMulx_ && useAdx_) { + StackFrame sf(this, 3, 10 | UseRDX, 6 * 8); + Pack t = sf.t; + t.append(sf.p[2]); + sqrPre6(sf.p[0], sf.p[1], t); + return func; + } + return 0; +#if 0 +#ifdef XBYAK64_WIN + mov(r8, rdx); +#else + mov(rdx, rsi); +#endif + jmp((void*)op.fpDbl_mulPreA_); + return func; +#endif + } + void3u gen_fpDbl_mulPre() + { + align(16); + void3u func = getCurr(); + if (pn_ == 2 && useMulx_) { + StackFrame sf(this, 3, 5 | UseRDX); + mulPre2(sf.p[0], sf.p[1], sf.p[2], sf.t); + return func; + } + if (pn_ == 3) { + StackFrame sf(this, 3, 10 | UseRDX); + mulPre3(sf.p[0], sf.p[1], sf.p[2], sf.t); + return func; + } + if (pn_ == 4) { + /* + fpDbl_mulPre is available as C function + this function calls mulPreL directly. + */ + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + mulPre4(gp0, gp1, gp2, sf.t); + sf.close(); // make epilog + L(mulPreL); // called only from asm code + mulPre4(gp0, gp1, gp2, sf.t); + ret(); + return func; + } + if (pn_ == 6 && useAdx_) { + StackFrame sf(this, 3, 10 | UseRDX, 0, false); + call(mulPreL); + sf.close(); // make epilog + L(mulPreL); // called only from asm code + mulPre6(sf.t); + ret(); + return func; + } + return 0; + } + static inline void debug_put_inner(const uint64_t *ptr, int n) + { + printf("debug "); + for (int i = 0; i < n; i++) { + printf("%016llx", (long long)ptr[n - 1 - i]); + } + printf("\n"); + } +#ifdef _MSC_VER + void debug_put(const RegExp& m, int n) + { + assert(n <= 8); + static uint64_t regBuf[7]; + + push(rax); + mov(rax, (size_t)regBuf); + mov(ptr [rax + 8 * 0], rcx); + mov(ptr [rax + 8 * 1], rdx); + mov(ptr [rax + 8 * 2], r8); + mov(ptr [rax + 8 * 3], r9); + mov(ptr [rax + 8 * 4], r10); + mov(ptr [rax + 8 * 5], r11); + mov(rcx, ptr [rsp + 8]); // org rax + mov(ptr [rax + 8 * 6], rcx); // save + mov(rcx, ptr [rax + 8 * 0]); // org rcx + pop(rax); + + lea(rcx, ptr [m]); + mov(rdx, n); + mov(rax, (size_t)debug_put_inner); + sub(rsp, 32); + call(rax); + add(rsp, 32); + + push(rax); + mov(rax, (size_t)regBuf); + mov(rcx, ptr [rax + 8 * 0]); + mov(rdx, ptr [rax + 8 * 1]); + mov(r8, ptr [rax + 8 * 2]); + mov(r9, ptr [rax + 8 * 3]); + mov(r10, ptr [rax + 8 * 4]); + mov(r11, ptr [rax + 8 * 5]); + mov(rax, ptr [rax + 8 * 6]); + add(rsp, 8); + } +#endif + /* + z >>= c + @note shrd(r/m, r, imm) + */ + void shr_mp(const MixPack& z, uint8_t c, const Reg64& t) + { + const size_t n = z.size(); + for (size_t i = 0; i < n - 1; i++) { + const Reg64 *p; + if (z.isReg(i + 1)) { + p = &z.getReg(i + 1); + } else { + mov(t, ptr [z.getMem(i + 1)]); + p = &t; + } + if (z.isReg(i)) { + shrd(z.getReg(i), *p, c); + } else { + shrd(qword [z.getMem(i)], *p, c); + } + } + if (z.isReg(n - 1)) { + shr(z.getReg(n - 1), c); + } else { + shr(qword [z.getMem(n - 1)], c); + } + } + /* + z *= 2 + */ + void twice_mp(const MixPack& z, const Reg64& t) + { + g_add(z[0], z[0], t); + for (size_t i = 1, n = z.size(); i < n; i++) { + g_adc(z[i], z[i], t); + } + } + /* + z += x + */ + void add_mp(const MixPack& z, const MixPack& x, const Reg64& t) + { + assert(z.size() == x.size()); + g_add(z[0], x[0], t); + for (size_t i = 1, n = z.size(); i < n; i++) { + g_adc(z[i], x[i], t); + } + } + void add_m_m(const RegExp& mz, const RegExp& mx, const Reg64& t, int n) + { + for (int i = 0; i < n; i++) { + mov(t, ptr [mx + i * 8]); + if (i == 0) { + add(ptr [mz + i * 8], t); + } else { + adc(ptr [mz + i * 8], t); + } + } + } + /* + mz[] = mx[] - y + */ + void sub_m_mp_m(const RegExp& mz, const RegExp& mx, const MixPack& y, const Reg64& t) + { + for (size_t i = 0; i < y.size(); i++) { + mov(t, ptr [mx + i * 8]); + if (i == 0) { + if (y.isReg(i)) { + sub(t, y.getReg(i)); + } else { + sub(t, ptr [y.getMem(i)]); + } + } else { + if (y.isReg(i)) { + sbb(t, y.getReg(i)); + } else { + sbb(t, ptr [y.getMem(i)]); + } + } + mov(ptr [mz + i * 8], t); + } + } + /* + z -= x + */ + void sub_mp(const MixPack& z, const MixPack& x, const Reg64& t) + { + assert(z.size() == x.size()); + g_sub(z[0], x[0], t); + for (size_t i = 1, n = z.size(); i < n; i++) { + g_sbb(z[i], x[i], t); + } + } + /* + z -= px[] + */ + void sub_mp_m(const MixPack& z, const RegExp& px, const Reg64& t) + { + if (z.isReg(0)) { + sub(z.getReg(0), ptr [px]); + } else { + mov(t, ptr [px]); + sub(ptr [z.getMem(0)], t); + } + for (size_t i = 1, n = z.size(); i < n; i++) { + if (z.isReg(i)) { + sbb(z.getReg(i), ptr [px + i * 8]); + } else { + mov(t, ptr [px + i * 8]); + sbb(ptr [z.getMem(i)], t); + } + } + } + void store_mp(const RegExp& m, const MixPack& z, const Reg64& t) + { + for (size_t i = 0, n = z.size(); i < n; i++) { + if (z.isReg(i)) { + mov(ptr [m + i * 8], z.getReg(i)); + } else { + mov(t, ptr [z.getMem(i)]); + mov(ptr [m + i * 8], t); + } + } + } + void load_mp(const MixPack& z, const RegExp& m, const Reg64& t) + { + for (size_t i = 0, n = z.size(); i < n; i++) { + if (z.isReg(i)) { + mov(z.getReg(i), ptr [m + i * 8]); + } else { + mov(t, ptr [m + i * 8]); + mov(ptr [z.getMem(i)], t); + } + } + } + void set_mp(const MixPack& z, const Reg64& t) + { + for (size_t i = 0, n = z.size(); i < n; i++) { + MCL_FP_GEN_OP_MR(mov, z[i], t) + } + } + void mov_mp(const MixPack& z, const MixPack& x, const Reg64& t) + { + for (size_t i = 0, n = z.size(); i < n; i++) { + const MemReg zi = z[i], xi = x[i]; + if (z.isReg(i)) { + MCL_FP_GEN_OP_RM(mov, zi.getReg(), xi) + } else { + if (x.isReg(i)) { + mov(ptr [z.getMem(i)], x.getReg(i)); + } else { + mov(t, ptr [x.getMem(i)]); + mov(ptr [z.getMem(i)], t); + } + } + } + } +#ifdef _MSC_VER + void debug_put_mp(const MixPack& mp, int n, const Reg64& t) + { + if (n >= 10) exit(1); + static uint64_t buf[10]; + vmovq(xm0, rax); + mov(rax, (size_t)buf); + store_mp(rax, mp, t); + vmovq(rax, xm0); + push(rax); + mov(rax, (size_t)buf); + debug_put(rax, n); + pop(rax); + } +#endif + + std::string mkLabel(const char *label, int n) const + { + return std::string(label) + Xbyak::Label::toStr(n); + } + /* + int k = preInvC(pr, px) + */ + void gen_preInv() + { + assert(1 <= pn_ && pn_ <= 4); + const int freeRegNum = 13; + StackFrame sf(this, 2, 10 | UseRDX | UseRCX, (std::max(0, pn_ * 5 - freeRegNum) + 1 + (isFullBit_ ? 1 : 0)) * 8); + const Reg64& pr = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& t = rcx; + /* + k = rax, t = rcx : temporary + use rdx, pr, px in main loop, so we can use 13 registers + v = t[0, pn_) : all registers + */ + size_t rspPos = 0; + + assert(sf.t.size() >= (size_t)pn_); + Pack remain = sf.t; + + const MixPack rr(remain, rspPos, pn_); + remain.append(rdx); + const MixPack ss(remain, rspPos, pn_); + remain.append(px); + const int rSize = (int)remain.size(); + MixPack vv(remain, rspPos, pn_, rSize > 0 ? rSize / 2 : -1); + remain.append(pr); + MixPack uu(remain, rspPos, pn_); + + const RegExp keep_pr = rsp + rspPos; + rspPos += 8; + const RegExp rTop = rsp + rspPos; // used if isFullBit_ + + inLocalLabel(); + mov(ptr [keep_pr], pr); + mov(rax, px); + // px is free frome here + load_mp(vv, rax, t); // v = x + mov(rax, pL_); + load_mp(uu, rax, t); // u = p_ + // k = 0 + xor_(rax, rax); + // rTop = 0 + if (isFullBit_) { + mov(ptr [rTop], rax); + } + // r = 0; + set_mp(rr, rax); + // s = 1 + set_mp(ss, rax); + if (ss.isReg(0)) { + mov(ss.getReg(0), 1); + } else { + mov(qword [ss.getMem(0)], 1); + } + for (int cn = pn_; cn > 0; cn--) { + const std::string _lp = mkLabel(".lp", cn); + const std::string _u_v_odd = mkLabel(".u_v_odd", cn); + const std::string _u_even = mkLabel(".u_even", cn); + const std::string _v_even = mkLabel(".v_even", cn); + const std::string _v_ge_u = mkLabel(".v_ge_u", cn); + const std::string _v_lt_u = mkLabel(".v_lt_u", cn); + L(_lp); + or_mp(vv, t); + jz(".exit", T_NEAR); + + g_test(uu[0], 1); + jz(_u_even, T_NEAR); + g_test(vv[0], 1); + jz(_v_even, T_NEAR); + L(_u_v_odd); + if (cn > 1) { + isBothZero(vv[cn - 1], uu[cn - 1], t); + jz(mkLabel(".u_v_odd", cn - 1), T_NEAR); + } + for (int i = cn - 1; i >= 0; i--) { + g_cmp(vv[i], uu[i], t); + jc(_v_lt_u, T_NEAR); + if (i > 0) jnz(_v_ge_u, T_NEAR); + } + + L(_v_ge_u); + sub_mp(vv, uu, t); + add_mp(ss, rr, t); + L(_v_even); + shr_mp(vv, 1, t); + twice_mp(rr, t); + if (isFullBit_) { + sbb(t, t); + mov(ptr [rTop], t); + } + inc(rax); + jmp(_lp, T_NEAR); + L(_v_lt_u); + sub_mp(uu, vv, t); + add_mp(rr, ss, t); + if (isFullBit_) { + sbb(t, t); + mov(ptr [rTop], t); + } + L(_u_even); + shr_mp(uu, 1, t); + twice_mp(ss, t); + inc(rax); + jmp(_lp, T_NEAR); + + if (cn > 0) { + vv.removeLast(); + uu.removeLast(); + } + } + L(".exit"); + assert(ss.isReg(0)); + const Reg64& t2 = ss.getReg(0); + const Reg64& t3 = rdx; + + mov(t2, pL_); + if (isFullBit_) { + mov(t, ptr [rTop]); + test(t, t); + jz("@f"); + sub_mp_m(rr, t2, t); + L("@@"); + } + mov(t3, ptr [keep_pr]); + // pr[] = p[] - rr + sub_m_mp_m(t3, t2, rr, t); + jnc("@f"); + // pr[] += p[] + add_m_m(t3, t2, t, pn_); + L("@@"); + outLocalLabel(); + } + void fpDbl_mod_NIST_P192(const RegExp &py, const RegExp& px, const Pack& t) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + load_rm(Pack(t2, t1, t0), px); // L=[t2:t1:t0] + load_rm(Pack(rax, t5, t4), px + 8 * 3); // H = [rax:t5:t4] + xor_(t3, t3); + add_rr(Pack(t3, t2, t1, t0), Pack(t3, rax, t5, t4)); // [t3:t2:t1:t0] = L + H + add_rr(Pack(t2, t1, t0), Pack(t5, t4, rax)); + adc(t3, 0); // [t3:t2:t1:t0] = L + H + [H1:H0:H2] + add(t1, rax); + adc(t2, 0); + adc(t3, 0); // e = t3, t = [t2:t1:t0] + xor_(t4, t4); + add(t0, t3); + adc(t1, 0); + adc(t2, 0); + adc(t4, 0); // t + e = [t4:t2:t1:t0] + add(t1, t3); + adc(t2, 0); + adc(t4, 0); // t + e + (e << 64) + // p = [ffffffffffffffff:fffffffffffffffe:ffffffffffffffff] + mov(rax, size_t(-1)); + mov(rdx, size_t(-2)); + jz("@f"); + sub_rr(Pack(t2, t1, t0), Pack(rax, rdx, rax)); + L("@@"); + mov_rr(Pack(t5, t4, t3), Pack(t2, t1, t0)); + sub_rr(Pack(t2, t1, t0), Pack(rax, rdx, rax)); + cmovc_rr(Pack(t2, t1, t0), Pack(t5, t4, t3)); + store_mr(py, Pack(t2, t1, t0)); + } + /* + p = (1 << 521) - 1 + x = [H:L] + x % p = (L + H) % p + */ + void fpDbl_mod_NIST_P521(const RegExp& py, const RegExp& px, const Pack& t) + { + const Reg64& t0 = t[0]; + const Reg64& t1 = t[1]; + const Reg64& t2 = t[2]; + const Reg64& t3 = t[3]; + const Reg64& t4 = t[4]; + const Reg64& t5 = t[5]; + const Reg64& t6 = t[6]; + const Reg64& t7 = t[7]; + const int c = 9; + const uint32_t mask = (1 << c) - 1; + const Pack pack(rdx, rax, t6, t5, t4, t3, t2, t1, t0); + load_rm(pack, px + 64); + mov(t7, mask); + and_(t7, t0); + shrd(t0, t1, c); + shrd(t1, t2, c); + shrd(t2, t3, c); + shrd(t3, t4, c); + shrd(t4, t5, c); + shrd(t5, t6, c); + shrd(t6, rax, c); + shrd(rax, rdx, c); + shr(rdx, c); + // pack = L + H + add_rm(Pack(rax, t6, t5, t4, t3, t2, t1, t0), px); + adc(rdx, t7); + + // t = (L + H) >> 521, add t + mov(t7, rdx); + shr(t7, c); + add(t0, t7); + adc(t1, 0); + adc(t2, 0); + adc(t3, 0); + adc(t4, 0); + adc(t5, 0); + adc(t6, 0); + adc(rax, 0); + adc(rdx, 0); + and_(rdx, mask); + store_mr(py, pack); + + // if [rdx..t0] == p then 0 + and_(rax, t0); + and_(rax, t1); + and_(rax, t2); + and_(rax, t3); + and_(rax, t4); + and_(rax, t5); + and_(rax, t6); + not_(rax); + xor_(rdx, (1 << c) - 1); + or_(rax, rdx); + jnz("@f"); + xor_(rax, rax); + for (int i = 0; i < 9; i++) { + mov(ptr[py + i * 8], rax); + } + L("@@"); + } +private: + FpGenerator(const FpGenerator&); + void operator=(const FpGenerator&); + void make_op_rm(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const Reg64& op1, const MemReg& op2) + { + if (op2.isReg()) { + (this->*op)(op1, op2.getReg()); + } else { + (this->*op)(op1, qword [op2.getMem()]); + } + } + void make_op_mr(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const MemReg& op1, const Reg64& op2) + { + if (op1.isReg()) { + (this->*op)(op1.getReg(), op2); + } else { + (this->*op)(qword [op1.getMem()], op2); + } + } + void make_op(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const MemReg& op1, const MemReg& op2, const Reg64& t) + { + if (op1.isReg()) { + make_op_rm(op, op1.getReg(), op2); + } else if (op2.isReg()) { + (this->*op)(ptr [op1.getMem()], op2.getReg()); + } else { + mov(t, ptr [op2.getMem()]); + (this->*op)(ptr [op1.getMem()], t); + } + } + void g_add(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::add, op1, op2, t); } + void g_adc(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::adc, op1, op2, t); } + void g_sub(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::sub, op1, op2, t); } + void g_sbb(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::sbb, op1, op2, t); } + void g_cmp(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::cmp, op1, op2, t); } + void g_or(const Reg64& r, const MemReg& op) { make_op_rm(&Xbyak::CodeGenerator::or_, r, op); } + void g_test(const MemReg& op1, const MemReg& op2, const Reg64& t) + { + const MemReg *pop1 = &op1; + const MemReg *pop2 = &op2; + if (!pop1->isReg()) { + std::swap(pop1, pop2); + } + // (M, M), (R, M), (R, R) + if (pop1->isReg()) { + MCL_FP_GEN_OP_MR(test, (*pop2), pop1->getReg()) + } else { + mov(t, ptr [pop1->getMem()]); + test(ptr [pop2->getMem()], t); + } + } + void g_mov(const MemReg& op, const Reg64& r) + { + make_op_mr(&Xbyak::CodeGenerator::mov, op, r); + } + void g_mov(const Reg64& r, const MemReg& op) + { + make_op_rm(&Xbyak::CodeGenerator::mov, r, op); + } + void g_add(const Reg64& r, const MemReg& mr) { MCL_FP_GEN_OP_RM(add, r, mr) } + void g_adc(const Reg64& r, const MemReg& mr) { MCL_FP_GEN_OP_RM(adc, r, mr) } + void isBothZero(const MemReg& op1, const MemReg& op2, const Reg64& t) + { + g_mov(t, op1); + g_or(t, op2); + } + void g_test(const MemReg& op, int imm) + { + MCL_FP_GEN_OP_MR(test, op, imm) + } + /* + z[] = x[] + */ + void mov_rr(const Pack& z, const Pack& x) + { + assert(z.size() == x.size()); + for (int i = 0, n = (int)x.size(); i < n; i++) { + mov(z[i], x[i]); + } + } + /* + m[] = x[] + */ + void store_mr(const RegExp& m, const Pack& x) + { + for (int i = 0, n = (int)x.size(); i < n; i++) { + mov(ptr [m + 8 * i], x[i]); + } + } + void store_mr(const Xbyak::RegRip& m, const Pack& x) + { + for (int i = 0, n = (int)x.size(); i < n; i++) { + mov(ptr [m + 8 * i], x[i]); + } + } + /* + x[] = m[] + */ + template + void load_rm(const Pack& z, const ADDR& m) + { + for (int i = 0, n = (int)z.size(); i < n; i++) { + mov(z[i], ptr [m + 8 * i]); + } + } + /* + z[] += x[] + */ + void add_rr(const Pack& z, const Pack& x) + { + add(z[0], x[0]); + assert(z.size() == x.size()); + for (size_t i = 1, n = z.size(); i < n; i++) { + adc(z[i], x[i]); + } + } + /* + z[] -= x[] + */ + void sub_rr(const Pack& z, const Pack& x) + { + sub(z[0], x[0]); + assert(z.size() == x.size()); + for (size_t i = 1, n = z.size(); i < n; i++) { + sbb(z[i], x[i]); + } + } + /* + z[] += m[] + */ + template + void add_rm(const Pack& z, const ADDR& m, bool withCarry = false) + { + if (withCarry) { + adc(z[0], ptr [m + 8 * 0]); + } else { + add(z[0], ptr [m + 8 * 0]); + } + for (int i = 1, n = (int)z.size(); i < n; i++) { + adc(z[i], ptr [m + 8 * i]); + } + } + /* + z[] -= m[] + */ + template + void sub_rm(const Pack& z, const ADDR& m, bool withCarry = false) + { + if (withCarry) { + sbb(z[0], ptr [m + 8 * 0]); + } else { + sub(z[0], ptr [m + 8 * 0]); + } + for (int i = 1, n = (int)z.size(); i < n; i++) { + sbb(z[i], ptr [m + 8 * i]); + } + } + void cmovc_rr(const Pack& z, const Pack& x) + { + for (int i = 0, n = (int)z.size(); i < n; i++) { + cmovc(z[i], x[i]); + } + } + /* + t = all or z[i] + ZF = z is zero + */ + void or_mp(const MixPack& z, const Reg64& t) + { + const size_t n = z.size(); + if (n == 1) { + if (z.isReg(0)) { + test(z.getReg(0), z.getReg(0)); + } else { + mov(t, ptr [z.getMem(0)]); + test(t, t); + } + } else { + g_mov(t, z[0]); + for (size_t i = 1; i < n; i++) { + g_or(t, z[i]); + } + } + } + /* + [rdx:x:t0] <- py[1:0] * x + destroy x, t + */ + void mul2x1(const RegExp& py, const Reg64& x, const Reg64& t0, const Reg64& t) + { + if (useMulx_) { + // mulx(H, L, x) = [H:L] = x * rdx + /* + rdx:x + rax:t0 + */ + mov(rdx, x); + mulx(rax, t0, ptr [py]); // [rax:t0] = py[0] * x + mulx(rdx, x, ptr [py + 8]); // [t:t1] = py[1] * x + add(x, rax); + adc(rdx, 0); + } else { + mov(rax, ptr [py]); + mul(x); + mov(t0, rax); + mov(t, rdx); + mov(rax, ptr [py + 8]); + mul(x); + /* + rdx:rax + t:t0 + */ + add(rax, t); + adc(rdx, 0); + mov(x, rax); + } + } + /* + [rdx:x:t1:t0] <- py[2:1:0] * x + destroy x, t + */ + void mul3x1(const RegExp& py, const Reg64& x, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) + { + if (useMulx_) { + // mulx(H, L, x) = [H:L] = x * rdx + /* + rdx:x + t:t1 + rax:t0 + */ + mov(rdx, x); + mulx(rax, t0, ptr [py]); // [rax:t0] = py[0] * x + mulx(t, t1, ptr [py + 8]); // [t:t1] = py[1] * x + add(t1, rax); + mulx(rdx, x, ptr [py + 8 * 2]); + adc(x, t); + adc(rdx, 0); + } else { + mov(rax, ptr [py]); + mul(x); + mov(t0, rax); + mov(t1, rdx); + mov(rax, ptr [py + 8]); + mul(x); + mov(t, rax); + mov(t2, rdx); + mov(rax, ptr [py + 8 * 2]); + mul(x); + /* + rdx:rax + t2:t + t1:t0 + */ + add(t1, t); + adc(rax, t2); + adc(rdx, 0); + mov(x, rax); + } + } + /* + [x2:x1:x0] * x0 + */ + void mul3x1_sqr1(const RegExp& px, const Reg64& x0, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) + { + mov(rax, x0); + mul(x0); + mov(t0, rax); + mov(t1, rdx); + mov(rax, ptr [px + 8]); + mul(x0); + mov(ptr [rsp + 0 * 8], rax); // (x0 * x1)_L + mov(ptr [rsp + 1 * 8], rdx); // (x0 * x1)_H + mov(t, rax); + mov(t2, rdx); + mov(rax, ptr [px + 8 * 2]); + mul(x0); + mov(ptr [rsp + 2 * 8], rax); // (x0 * x2)_L + mov(ptr [rsp + 3 * 8], rdx); // (x0 * x2)_H + /* + rdx:rax + t2:t + t1:t0 + */ + add(t1, t); + adc(t2, rax); + adc(rdx, 0); + } + /* + [x2:x1:x0] * x1 + */ + void mul3x1_sqr2(const RegExp& px, const Reg64& x1, const Reg64& t2, const Reg64& t1, const Reg64& t0) + { + mov(t0, ptr [rsp + 0 * 8]);// (x0 * x1)_L + mov(rax, x1); + mul(x1); + mov(t1, rax); + mov(t2, rdx); + mov(rax, ptr [px + 8 * 2]); + mul(x1); + mov(ptr [rsp + 4 * 8], rax); // (x1 * x2)_L + mov(ptr [rsp + 5 * 8], rdx); // (x1 * x2)_H + /* + rdx:rax + t2:t1 + t:t0 + */ + add(t1, ptr [rsp + 1 * 8]); // (x0 * x1)_H + adc(rax, t2); + adc(rdx, 0); + } + /* + [rdx:rax:t1:t0] = [x2:x1:x0] * x2 + */ + void mul3x1_sqr3(const Reg64& x2, const Reg64& t1, const Reg64& t0) + { + mov(rax, x2); + mul(x2); + /* + rdx:rax + t2:t + t1:t0 + */ + mov(t0, ptr [rsp + 2 * 8]); // (x0 * x2)_L + mov(t1, ptr [rsp + 3 * 8]); // (x0 * x2)_H + add(t1, ptr [rsp + 4 * 8]); // (x1 * x2)_L + adc(rax, ptr [rsp + 5 * 8]); // (x1 * x2)_H + adc(rdx, 0); + } + + /* + c = [c3:y:c1:c0] = c + x[2..0] * y + q = uint64_t(c0 * pp) + c = (c + q * p) >> 64 + input [c3:c2:c1:c0], px, y, p + output [c0:c3:c2:c1] ; c0 == 0 unless isFullBit_ + + @note use rax, rdx, destroy y + */ + void montgomery3_sub(uint64_t pp, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, + const Reg64& /*px*/, const Reg64& y, const Reg64& p, + const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst) + { + // input [c3:y:c1:0] + // [t4:c3:y:c1:c0] + // t4 = 0 or 1 if isFullBit_, = 0 otherwise + mov(rax, pp); + mul(c0); // q = rax + mov(c2, rax); + mul3x1(p, c2, t2, t1, t0, t3); + // [rdx:c2:t1:t0] = p * q + add(c0, t0); // always c0 is zero + adc(c1, t1); + adc(c2, y); + adc(c3, rdx); + if (isFullBit_) { + if (isFirst) { + setc(c0.cvt8()); + } else { + adc(c0.cvt8(), t4.cvt8()); + } + } + } + /* + c = [c3:c2:c1:c0] + c += x[2..0] * y + q = uint64_t(c0 * pp) + c = (c + q * p) >> 64 + input [c3:c2:c1:c0], px, y, p + output [c0:c3:c2:c1] ; c0 == 0 unless isFullBit_ + + @note use rax, rdx, destroy y + */ + void montgomery3_1(uint64_t pp, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, + const Reg64& px, const Reg64& y, const Reg64& p, + const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst) + { + if (isFirst) { + mul3x1(px, y, c2, c1, c0, c3); + mov(c3, rdx); + // [c3:y:c1:c0] = px[2..0] * y + } else { + mul3x1(px, y, t2, t1, t0, t3); + // [rdx:y:t1:t0] = px[2..0] * y + add_rr(Pack(c3, y, c1, c0), Pack(rdx, c2, t1, t0)); + if (isFullBit_) setc(t4.cvt8()); + } + montgomery3_sub(pp, c3, c2, c1, c0, px, y, p, t0, t1, t2, t3, t4, isFirst); + } + /* + pc[0..n] += x[0..n-1] * y ; pc[] = 0 if isFirst + pc[n + 1] is temporary used if isFullBit_ + q = uint64_t(pc[0] * pp) + pc[] = (pc[] + q * p) >> 64 + input : pc[], px[], y, p[], pw1[], pw2[] + output : pc[0..n] ; if isFullBit_ + pc[0..n-1] ; if !isFullBit_ + destroy y + use + pw1[0] if useMulx_ + pw1[0..n-2] otherwise + pw2[0..n-1] + */ + void montgomeryN_1(uint64_t pp, int n, const RegExp& pc, const RegExp& px, const Reg64& y, const Reg64& p, const Reg64& t, const MixPack& pw1, const RegExp& pw2, bool isFirst) + { + // pc[] += x[] * y + if (isFirst) { + gen_raw_mulUnit(pc, px, y, pw1, t, n); + mov(ptr [pc + n * 8], rdx); + } else { + gen_raw_mulUnit(pw2, px, y, pw1, t, n); + mov(t, ptr [pw2 + 0 * 8]); + add(ptr [pc + 0 * 8], t); + for (int i = 1; i < n; i++) { + mov(t, ptr [pw2 + i * 8]); + adc(ptr [pc + i * 8], t); + } + adc(ptr [pc + n * 8], rdx); + if (isFullBit_) { + mov(t, 0); + adc(t, 0); + mov(qword [pc + (n + 1) * 8], t); + } + } + mov(rax, pp); + mul(qword [pc]); + mov(y, rax); // y = q + gen_raw_mulUnit(pw2, p, y, pw1, t, n); + // c[] = (c[] + pw2[]) >> 64 + mov(t, ptr [pw2 + 0 * 8]); + add(t, ptr [pc + 0 * 8]); + for (int i = 1; i < n; i++) { + mov(t, ptr [pw2 + i * 8]); + adc(t, ptr [pc + i * 8]); + mov(ptr [pc + (i - 1) * 8], t); + } + adc(rdx, ptr [pc + n * 8]); + mov(ptr [pc + (n - 1) * 8], rdx); + if (isFullBit_) { + if (isFirst) { + mov(t, 0); + } else { + mov(t, ptr [pc + (n + 1) * 8]); + } + adc(t, 0); + mov(qword [pc + n * 8], t); + } else { + xor_(eax, eax); + mov(ptr [pc + n * 8], rax); + } + } + /* + [rdx:x:t2:t1:t0] <- py[3:2:1:0] * x + destroy x, t + */ + void mul4x1(const RegExp& py, const Reg64& x, const Reg64& t3, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) + { + if (useMulx_) { + mov(rdx, x); + mulx(t1, t0, ptr [py + 8 * 0]); + mulx(t2, rax, ptr [py + 8 * 1]); + add(t1, rax); + mulx(x, rax, ptr [py + 8 * 2]); + adc(t2, rax); + mulx(rdx, rax, ptr [py + 8 * 3]); + adc(x, rax); + adc(rdx, 0); + } else { + mov(rax, ptr [py]); + mul(x); + mov(t0, rax); + mov(t1, rdx); + mov(rax, ptr [py + 8]); + mul(x); + mov(t, rax); + mov(t2, rdx); + mov(rax, ptr [py + 8 * 2]); + mul(x); + mov(t3, rax); + mov(rax, x); + mov(x, rdx); + mul(qword [py + 8 * 3]); + add(t1, t); + adc(t2, t3); + adc(x, rax); + adc(rdx, 0); + } + } + + /* + c = [c4:c3:c2:c1:c0] + c += x[3..0] * y + q = uint64_t(c0 * pp) + c = (c + q * p) >> 64 + input [c4:c3:c2:c1:c0], px, y, p + output [c0:c4:c3:c2:c1] + + @note use rax, rdx, destroy y + use xt if isFullBit_ + */ + void montgomery4_1(uint64_t pp, const Reg64& c4, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, + const Reg64& px, const Reg64& y, const Reg64& p, + const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst, const Xmm& xt) + { + if (isFirst) { + mul4x1(px, y, c3, c2, c1, c0, c4); + mov(c4, rdx); + // [c4:y:c2:c1:c0] = px[3..0] * y + } else { + mul4x1(px, y, t3, t2, t1, t0, t4); + // [rdx:y:t2:t1:t0] = px[3..0] * y + if (isFullBit_) { + vmovq(xt, px); + xor_(px, px); + } + add_rr(Pack(c4, y, c2, c1, c0), Pack(rdx, c3, t2, t1, t0)); + if (isFullBit_) { + adc(px, 0); + } + } + // [px:c4:y:c2:c1:c0] + // px = 0 or 1 if isFullBit_, = 0 otherwise + mov(rax, pp); + mul(c0); // q = rax + mov(c3, rax); + mul4x1(p, c3, t3, t2, t1, t0, t4); + add(c0, t0); // always c0 is zero + adc(c1, t1); + adc(c2, t2); + adc(c3, y); + adc(c4, rdx); + if (isFullBit_) { + if (isFirst) { + adc(c0, 0); + } else { + adc(c0, px); + vmovq(px, xt); + } + } + } + void3u gen_fp2Dbl_mulPre() + { + if (isFullBit_) return 0; +// if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; + // almost same for pn_ == 6 + if (pn_ != 4) return 0; + align(16); + void3u func = getCurr(); + + const RegExp z = rsp + 0 * 8; + const RegExp x = rsp + 1 * 8; + const RegExp y = rsp + 2 * 8; + const Ext1 s(FpByte_, rsp, 3 * 8); + const Ext1 t(FpByte_, rsp, s.next); + const Ext1 d2(FpByte_ * 2, rsp, t.next); + const int SS = d2.next; + StackFrame sf(this, 3, 10 | UseRDX, SS); + mov(ptr [z], gp0); + mov(ptr [x], gp1); + mov(ptr [y], gp2); + // s = a + b + gen_raw_add(s, gp1, gp1 + FpByte_, rax, pn_); + // t = c + d + gen_raw_add(t, gp2, gp2 + FpByte_, rax, pn_); + // d1 = (a + b)(c + d) + mov(gp0, ptr [z]); + add(gp0, FpByte_ * 2); // d1 + lea(gp1, ptr [s]); + lea(gp2, ptr [t]); + call(mulPreL); + // d0 = a c + mov(gp0, ptr [z]); + mov(gp1, ptr [x]); + mov(gp2, ptr [y]); + call(mulPreL); + + // d2 = b d + lea(gp0, ptr [d2]); + mov(gp1, ptr [x]); + add(gp1, FpByte_); + mov(gp2, ptr [y]); + add(gp2, FpByte_); + call(mulPreL); + + mov(gp0, ptr [z]); + add(gp0, FpByte_ * 2); // d1 + mov(gp1, gp0); + mov(gp2, ptr [z]); + gen_raw_sub(gp0, gp1, gp2, rax, pn_ * 2); + lea(gp2, ptr [d2]); + gen_raw_sub(gp0, gp1, gp2, rax, pn_ * 2); + + mov(gp0, ptr [z]); + mov(gp1, gp0); + lea(gp2, ptr [d2]); + + gen_raw_sub(gp0, gp1, gp2, rax, pn_); + if (pn_ == 4) { + gen_raw_fp_sub(gp0 + pn_ * 8, gp1 + pn_ * 8, gp2 + pn_ * 8, Pack(gt0, gt1, gt2, gt3, gt4, gt5, gt6, gt7), true); + } else { + assert(pn_ == 6); + gen_raw_fp_sub6(gp0, gp1, gp2, pn_ * 8, sf.t.sub(0, 6), true); + } + return func; + } + void2u gen_fp2Dbl_sqrPre() + { + if (isFullBit_) return 0; +// if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; + // almost same for pn_ == 6 + if (pn_ != 4) return 0; + align(16); + void2u func = getCurr(); + // almost same for pn_ == 6 + if (pn_ != 4) return 0; + const RegExp y = rsp + 0 * 8; + const RegExp x = rsp + 1 * 8; + const Ext1 t1(FpByte_, rsp, 2 * 8); + const Ext1 t2(FpByte_, rsp, t1.next); + // use mulPreL then use 3 + StackFrame sf(this, 3 /* not 2 */, 10 | UseRDX, t2.next); + mov(ptr [y], gp0); + mov(ptr [x], gp1); + Pack t = sf.t; + if (pn_ == 6) { + t.append(rax); + t.append(rdx); + } + const Pack a = t.sub(0, pn_); + const Pack b = t.sub(pn_, pn_); + load_rm(b, gp1 + FpByte_); + for (int i = 0; i < pn_; i++) { + mov(rax, b[i]); + if (i == 0) { + add(rax, rax); + } else { + adc(rax, rax); + } + mov(ptr [(const RegExp&)t1 + i * 8], rax); + } + load_rm(a, gp1); + add_rr(a, b); + store_mr(t2, a); + mov(gp0, ptr [y]); + add(gp0, FpByte_ * 2); + lea(gp1, ptr [t1]); + mov(gp2, ptr [x]); + call(mulPreL); + mov(gp0, ptr [x]); + if (pn_ == 4) { + gen_raw_fp_sub(t1, gp0, gp0 + FpByte_, sf.t, false); + } else { + assert(pn_ == 6); + gen_raw_fp_sub6(t1, gp0, gp0, FpByte_, a, false); + } + mov(gp0, ptr [y]); + lea(gp1, ptr [t1]); + lea(gp2, ptr [t2]); + call(mulPreL); + return func; + } + void gen_fp2_add4() + { + assert(!isFullBit_); + StackFrame sf(this, 3, 8); + gen_raw_fp_add(sf.p[0], sf.p[1], sf.p[2], sf.t, false); + gen_raw_fp_add(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.p[2] + FpByte_, sf.t, false); + } + void gen_fp2_add6() + { + assert(!isFullBit_); + StackFrame sf(this, 3, 10); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + Pack t1 = sf.t.sub(0, 6); + Pack t2 = sf.t.sub(6); + t2.append(rax); + t2.append(px); // destory after used + vmovq(xm0, px); + gen_raw_fp_add6(pz, px, py, t1, t2, false); + vmovq(px, xm0); + gen_raw_fp_add6(pz + FpByte_, px + FpByte_, py + FpByte_, t1, t2, false); + } + void gen_fp2_sub6() + { + StackFrame sf(this, 3, 5); + const Reg64& pz = sf.p[0]; + const Reg64& px = sf.p[1]; + const Reg64& py = sf.p[2]; + Pack t = sf.t; + t.append(rax); + gen_raw_fp_sub6(pz, px, py, 0, t, false); + gen_raw_fp_sub6(pz, px, py, FpByte_, t, false); + } + void3u gen_fp2_add() + { + align(16); + void3u func = getCurr(); + if (pn_ == 4 && !isFullBit_) { + gen_fp2_add4(); + return func; + } + if (pn_ == 6 && !isFullBit_) { + gen_fp2_add6(); + return func; + } + return 0; + } + void3u gen_fp2_sub() + { + align(16); + void3u func = getCurr(); + if (pn_ == 4 && !isFullBit_) { + gen_fp2_sub4(); + return func; + } + if (pn_ == 6 && !isFullBit_) { + gen_fp2_sub6(); + return func; + } + return 0; + } + void gen_fp2_sub4() + { + assert(!isFullBit_); + StackFrame sf(this, 3, 8); + gen_raw_fp_sub(sf.p[0], sf.p[1], sf.p[2], sf.t, false); + gen_raw_fp_sub(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.p[2] + FpByte_, sf.t, false); + } + /* + for only xi_a = 1 + y.a = a - b + y.b = a + b + */ + void gen_fp2_mul_xi4() + { + assert(!isFullBit_); + StackFrame sf(this, 2, 11 | UseRDX); + const Reg64& py = sf.p[0]; + const Reg64& px = sf.p[1]; + Pack a = sf.t.sub(0, 4); + Pack b = sf.t.sub(4, 4); + Pack t = sf.t.sub(8); + t.append(rdx); + assert(t.size() == 4); + load_rm(a, px); + load_rm(b, px + FpByte_); + for (int i = 0; i < pn_; i++) { + mov(t[i], a[i]); + if (i == 0) { + add(t[i], b[i]); + } else { + adc(t[i], b[i]); + } + } + sub_rr(a, b); + mov(rax, pL_); + load_rm(b, rax); + sbb(rax, rax); + for (int i = 0; i < pn_; i++) { + and_(b[i], rax); + } + add_rr(a, b); + store_mr(py, a); + mov(rax, pL_); + mov_rr(a, t); + sub_rm(t, rax); + cmovc_rr(t, a); + store_mr(py + FpByte_, t); + } + void gen_fp2_mul_xi6() + { + assert(!isFullBit_); + StackFrame sf(this, 2, 12); + const Reg64& py = sf.p[0]; + const Reg64& px = sf.p[1]; + Pack a = sf.t.sub(0, 6); + Pack b = sf.t.sub(6); + load_rm(a, px); + mov_rr(b, a); + add_rm(b, px + FpByte_); + sub_rm(a, px + FpByte_); + mov(rax, pL_); + jnc("@f"); + add_rm(a, rax); + L("@@"); + store_mr(py, a); + mov_rr(a, b); + sub_rm(b, rax); + cmovc_rr(b, a); + store_mr(py + FpByte_, b); + } + void2u gen_fp2_mul_xi() + { + if (isFullBit_) return 0; + if (op_->xi_a != 1) return 0; + align(16); + void2u func = getCurr(); + if (pn_ == 4) { + gen_fp2_mul_xi4(); + return func; + } + if (pn_ == 6) { + gen_fp2_mul_xi6(); + return func; + } + return 0; + } + void2u gen_fp2_neg() + { + align(16); + void2u func = getCurr(); + if (pn_ <= 6) { + StackFrame sf(this, 2, UseRDX | pn_); + gen_raw_neg(sf.p[0], sf.p[1], sf.t); + gen_raw_neg(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.t); + return func; + } + return 0; + } + void3u gen_fp2_mul() + { + if (isFullBit_) return 0; + if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; + align(16); + void3u func = getCurr(); + bool embedded = pn_ == 4; + + const RegExp z = rsp + 0 * 8; + const RegExp x = rsp + 1 * 8; + const RegExp y = rsp + 2 * 8; + const Ext1 s(FpByte_, rsp, 3 * 8); + const Ext1 t(FpByte_, rsp, s.next); + const Ext1 d0(FpByte_ * 2, rsp, t.next); + const Ext1 d1(FpByte_ * 2, rsp, d0.next); + const Ext1 d2(FpByte_ * 2, rsp, d1.next); + const int SS = d2.next; + StackFrame sf(this, 3, 10 | UseRDX, SS); + mov(ptr[z], gp0); + mov(ptr[x], gp1); + mov(ptr[y], gp2); + // s = a + b + gen_raw_add(s, gp1, gp1 + FpByte_, rax, pn_); + // t = c + d + gen_raw_add(t, gp2, gp2 + FpByte_, rax, pn_); + // d1 = (a + b)(c + d) + if (embedded) { + mulPre4(d1, s, t, sf.t); + } else { + lea(gp0, ptr [d1]); + lea(gp1, ptr [s]); + lea(gp2, ptr [t]); + call(mulPreL); + } + // d0 = a c + mov(gp1, ptr [x]); + mov(gp2, ptr [y]); + if (embedded) { + mulPre4(d0, gp1, gp2, sf.t); + } else { + lea(gp0, ptr [d0]); + call(mulPreL); + } + // d2 = b d + mov(gp1, ptr [x]); + add(gp1, FpByte_); + mov(gp2, ptr [y]); + add(gp2, FpByte_); + if (embedded) { + mulPre4(d2, gp1, gp2, sf.t); + } else { + lea(gp0, ptr [d2]); + call(mulPreL); + } + + gen_raw_sub(d1, d1, d0, rax, pn_ * 2); + gen_raw_sub(d1, d1, d2, rax, pn_ * 2); + + gen_raw_sub(d0, d0, d2, rax, pn_); + if (pn_ == 4) { + gen_raw_fp_sub((RegExp)d0 + pn_ * 8, (RegExp)d0 + pn_ * 8, (RegExp)d2 + pn_ * 8, Pack(gt0, gt1, gt2, gt3, gt4, gt5, gt6, gt7), true); + } else { + lea(gp0, ptr[d0]); + lea(gp2, ptr[d2]); + gen_raw_fp_sub6(gp0, gp0, gp2, pn_ * 8, sf.t.sub(0, 6), true); + } + + mov(gp0, ptr [z]); + lea(gp1, ptr[d0]); + call(fpDbl_modL); + + mov(gp0, ptr [z]); + add(gp0, FpByte_); + lea(gp1, ptr[d1]); + call(fpDbl_modL); + return func; + } + void2u gen_fp2_sqr() + { + if (isFullBit_) return 0; + if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; + align(16); + void2u func = getCurr(); + + const RegExp y = rsp + 0 * 8; + const RegExp x = rsp + 1 * 8; + const Ext1 t1(FpByte_, rsp, 2 * 8); + const Ext1 t2(FpByte_, rsp, t1.next); + const Ext1 t3(FpByte_, rsp, t2.next); + bool nocarry = (p_[pn_ - 1] >> 62) == 0; + StackFrame sf(this, 3, 10 | UseRDX, t3.next); + mov(ptr [y], gp0); + mov(ptr [x], gp1); + // t1 = b + b + lea(gp0, ptr [t1]); + if (nocarry) { + for (int i = 0; i < pn_; i++) { + mov(rax, ptr [gp1 + FpByte_ + i * 8]); + if (i == 0) { + add(rax, rax); + } else { + adc(rax, rax); + } + mov(ptr [gp0 + i * 8], rax); + } + } else { + if (pn_ == 4) { + gen_raw_fp_add(gp0, gp1 + FpByte_, gp1 + FpByte_, sf.t, false); + } else { + assert(pn_ == 6); + Pack t = sf.t.sub(6, 4); + t.append(rax); + t.append(rdx); + gen_raw_fp_add6(gp0, gp1 + FpByte_, gp1 + FpByte_, sf.t.sub(0, 6), t, false); + } + } + // t1 = 2ab + mov(gp1, gp0); + mov(gp2, ptr [x]); + call(fp_mulL); + + if (nocarry) { + Pack t = sf.t; + t.append(rdx); + t.append(gp1); + Pack a = t.sub(0, pn_); + Pack b = t.sub(pn_, pn_); + mov(gp0, ptr [x]); + load_rm(a, gp0); + load_rm(b, gp0 + FpByte_); + // t2 = a + b + for (int i = 0; i < pn_; i++) { + mov(rax, a[i]); + if (i == 0) { + add(rax, b[i]); + } else { + adc(rax, b[i]); + } + mov(ptr [(RegExp)t2 + i * 8], rax); + } + // t3 = a + p - b + mov(rax, pL_); + add_rm(a, rax); + sub_rr(a, b); + store_mr(t3, a); + } else { + mov(gp0, ptr [x]); + if (pn_ == 4) { + gen_raw_fp_add(t2, gp0, gp0 + FpByte_, sf.t, false); + gen_raw_fp_sub(t3, gp0, gp0 + FpByte_, sf.t, false); + } else { + assert(pn_ == 6); + Pack p1 = sf.t.sub(0, 6); + Pack p2 = sf.t.sub(6, 4); + p2.append(rax); + p2.append(rdx); + gen_raw_fp_add6(t2, gp0, gp0 + FpByte_, p1, p2, false); + gen_raw_fp_sub6(t3, gp0, gp0 + FpByte_, 0, p1, false); + } + } + + mov(gp0, ptr [y]); + lea(gp1, ptr [t2]); + lea(gp2, ptr [t3]); + call(fp_mulL); + mov(gp0, ptr [y]); + for (int i = 0; i < pn_; i++) { + mov(rax, ptr [(RegExp)t1 + i * 8]); + mov(ptr [gp0 + FpByte_ + i * 8], rax); + } + return func; + } +}; + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#endif +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/src/gen.cpp b/vendor/github.com/byzantine-lab/mcl/src/gen.cpp new file mode 100644 index 000000000..763f64b98 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/gen.cpp @@ -0,0 +1,999 @@ +#include "llvm_gen.hpp" +#include +#include +#include +#include +#include + +typedef std::set StrSet; + +struct Code : public mcl::Generator { + typedef std::map FunctionMap; + typedef std::vector OperandVec; + Operand Void; + uint32_t unit; + uint32_t unit2; + uint32_t bit; + uint32_t N; + const StrSet *privateFuncList; + bool wasm; + std::string suf; + std::string unitStr; + Function mulUU; + Function mul32x32; // for WASM + Function extractHigh; + Function mulPos; + Function makeNIST_P192; + Function mcl_fpDbl_mod_NIST_P192; + Function mcl_fp_sqr_NIST_P192; + FunctionMap mcl_fp_shr1_M; + FunctionMap mcl_fp_addPreM; + FunctionMap mcl_fp_subPreM; + FunctionMap mcl_fp_addM; + FunctionMap mcl_fp_subM; + FunctionMap mulPvM; + FunctionMap mcl_fp_mulUnitPreM; + FunctionMap mcl_fpDbl_mulPreM; + FunctionMap mcl_fpDbl_sqrPreM; + FunctionMap mcl_fp_montM; + FunctionMap mcl_fp_montRedM; + Code() : unit(0), unit2(0), bit(0), N(0), privateFuncList(0), wasm(false) { } + void verifyAndSetPrivate(Function& f) + { + if (privateFuncList && privateFuncList->find(f.name) != privateFuncList->end()) { + f.setPrivate(); + } + } + void storeN(Operand r, Operand p, int offset = 0) + { + if (p.bit != unit) { + throw cybozu::Exception("bad IntPtr size") << p.bit; + } + if (offset > 0) { + p = getelementptr(p, offset); + } + if (r.bit == unit) { + store(r, p); + return; + } + const size_t n = r.bit / unit; + for (size_t i = 0; i < n; i++) { + store(trunc(r, unit), getelementptr(p, i)); + if (i < n - 1) { + r = lshr(r, unit); + } + } + } + Operand loadN(Operand p, size_t n, int offset = 0) + { + if (p.bit != unit) { + throw cybozu::Exception("bad IntPtr size") << p.bit; + } + if (offset > 0) { + p = getelementptr(p, offset); + } + Operand v = load(p); + for (size_t i = 1; i < n; i++) { + v = zext(v, v.bit + unit); + Operand t = load(getelementptr(p, i)); + t = zext(t, v.bit); + t = shl(t, unit * i); + v = _or(v, t); + } + return v; + } + void gen_mul32x32() + { + const int u = 32; + resetGlobalIdx(); + Operand z(Int, u * 2); + Operand x(Int, u); + Operand y(Int, u); + mul32x32 = Function("mul32x32L", z, x, y); + mul32x32.setPrivate(); + verifyAndSetPrivate(mul32x32); + beginFunc(mul32x32); + + x = zext(x, u * 2); + y = zext(y, u * 2); + z = mul(x, y); + ret(z); + endFunc(); + } + void gen_mul64x64(Operand& z, Operand& x, Operand& y) + { + Operand a = trunc(lshr(x, 32), 32); + Operand b = trunc(x, 32); + Operand c = trunc(lshr(y, 32), 32); + Operand d = trunc(y, 32); + Operand ad = call(mul32x32, a, d); + Operand bd = call(mul32x32, b, d); + bd = zext(bd, 96); + ad = shl(zext(ad, 96), 32); + ad = add(ad, bd); + Operand ac = call(mul32x32, a, c); + Operand bc = call(mul32x32, b, c); + bc = zext(bc, 96); + ac = shl(zext(ac, 96), 32); + ac = add(ac, bc); + ad = zext(ad, 128); + ac = shl(zext(ac, 128), 32); + z = add(ac, ad); + } + void gen_mulUU() + { + if (wasm) { + gen_mul32x32(); + } + resetGlobalIdx(); + Operand z(Int, unit2); + Operand x(Int, unit); + Operand y(Int, unit); + std::string name = "mul"; + name += unitStr + "x" + unitStr + "L"; + mulUU = Function(name, z, x, y); + mulUU.setPrivate(); + verifyAndSetPrivate(mulUU); + beginFunc(mulUU); + + if (wasm) { + gen_mul64x64(z, x, y); + } else { + x = zext(x, unit2); + y = zext(y, unit2); + z = mul(x, y); + } + ret(z); + endFunc(); + } + void gen_extractHigh() + { + resetGlobalIdx(); + Operand z(Int, unit); + Operand x(Int, unit2); + std::string name = "extractHigh"; + name += unitStr; + extractHigh = Function(name, z, x); + extractHigh.setPrivate(); + beginFunc(extractHigh); + + x = lshr(x, unit); + z = trunc(x, unit); + ret(z); + endFunc(); + } + void gen_mulPos() + { + resetGlobalIdx(); + Operand xy(Int, unit2); + Operand px(IntPtr, unit); + Operand y(Int, unit); + Operand i(Int, unit); + std::string name = "mulPos"; + name += unitStr + "x" + unitStr; + mulPos = Function(name, xy, px, y, i); + mulPos.setPrivate(); + beginFunc(mulPos); + + Operand x = load(getelementptr(px, i)); + xy = call(mulUU, x, y); + ret(xy); + endFunc(); + } + Operand extract192to64(const Operand& x, uint32_t shift) + { + Operand y = lshr(x, shift); + y = trunc(y, 64); + return y; + } + void gen_makeNIST_P192() + { + resetGlobalIdx(); + Operand p(Int, 192); + Operand p0(Int, 64); + Operand p1(Int, 64); + Operand p2(Int, 64); + Operand _0 = makeImm(64, 0); + Operand _1 = makeImm(64, 1); + Operand _2 = makeImm(64, 2); + makeNIST_P192 = Function("makeNIST_P192L" + suf, p); + verifyAndSetPrivate(makeNIST_P192); + beginFunc(makeNIST_P192); + p0 = sub(_0, _1); + p1 = sub(_0, _2); + p2 = sub(_0, _1); + p0 = zext(p0, 192); + p1 = zext(p1, 192); + p2 = zext(p2, 192); + p1 = shl(p1, 64); + p2 = shl(p2, 128); + p = add(p0, p1); + p = add(p, p2); + ret(p); + endFunc(); + } + /* + NIST_P192 + p = 0xfffffffffffffffffffffffffffffffeffffffffffffffff + 0 1 2 + ffffffffffffffff fffffffffffffffe ffffffffffffffff + + p = (1 << 192) - (1 << 64) - 1 + (1 << 192) % p = (1 << 64) + 1 + L : 192bit + Hi: 64bit + x = [H:L] = [H2:H1:H0:L] + mod p + x = L + H + (H << 64) + = L + H + [H1:H0:0] + H2 + (H2 << 64) + [e:t] = L + H + [H1:H0:H2] + [H2:0] ; 2bit(e) over + y = t + e + (e << 64) + if (y >= p) y -= p + */ + void gen_mcl_fpDbl_mod_NIST_P192() + { + resetGlobalIdx(); + Operand out(IntPtr, unit); + Operand px(IntPtr, unit); + mcl_fpDbl_mod_NIST_P192 = Function("mcl_fpDbl_mod_NIST_P192L" + suf, Void, out, px); + verifyAndSetPrivate(mcl_fpDbl_mod_NIST_P192); + beginFunc(mcl_fpDbl_mod_NIST_P192); + + const int n = 192 / unit; + Operand L = loadN(px, n); + L = zext(L, 256); + + Operand H192 = loadN(px, n, n); + Operand H = zext(H192, 256); + + Operand H10 = shl(H192, 64); + H10 = zext(H10, 256); + + Operand H2 = extract192to64(H192, 128); + H2 = zext(H2, 256); + Operand H102 = _or(H10, H2); + + H2 = shl(H2, 64); + + Operand t = add(L, H); + t = add(t, H102); + t = add(t, H2); + + Operand e = lshr(t, 192); + e = trunc(e, 64); + e = zext(e, 256); + Operand e2 = shl(e, 64); + e = _or(e, e2); + + t = trunc(t, 192); + t = zext(t, 256); + + Operand z = add(t, e); + Operand p = call(makeNIST_P192); + p = zext(p, 256); + Operand zp = sub(z, p); + Operand c = trunc(lshr(zp, 192), 1); + z = trunc(select(c, z, zp), 192); + storeN(z, out); + ret(Void); + endFunc(); + } + /* + NIST_P521 + p = (1 << 521) - 1 + x = [H:L] + x % p = (L + H) % p + */ + void gen_mcl_fpDbl_mod_NIST_P521() + { + resetGlobalIdx(); + const uint32_t len = 521; + const uint32_t n = len / unit; + const uint32_t round = unit * (n + 1); + const uint32_t rem = len - n * unit; + const size_t mask = -(1 << rem); + const Operand py(IntPtr, unit); + const Operand px(IntPtr, unit); + Function f("mcl_fpDbl_mod_NIST_P521L" + suf, Void, py, px); + verifyAndSetPrivate(f); + beginFunc(f); + Operand x = loadN(px, n * 2 + 1); + Operand L = trunc(x, len); + L = zext(L, round); + Operand H = lshr(x, len); + H = trunc(H, round); // x = [H:L] + Operand t = add(L, H); + Operand t0 = lshr(t, len); + t0 = _and(t0, makeImm(round, 1)); + t = add(t, t0); + t = trunc(t, len); + Operand z0 = zext(t, round); + t = extract(z0, n * unit); + Operand m = _or(t, makeImm(unit, mask)); + for (uint32_t i = 0; i < n; i++) { + Operand s = extract(z0, unit * i); + m = _and(m, s); + } + Operand c = icmp(eq, m, makeImm(unit, -1)); + Label zero("zero"); + Label nonzero("nonzero"); + br(c, zero, nonzero); + putLabel(zero); + for (uint32_t i = 0; i < n + 1; i++) { + storeN(makeImm(unit, 0), py, i); + } + ret(Void); + putLabel(nonzero); + storeN(z0, py); + ret(Void); + endFunc(); + } + void gen_mcl_fp_sqr_NIST_P192() + { + resetGlobalIdx(); + Operand py(IntPtr, unit); + Operand px(IntPtr, unit); + mcl_fp_sqr_NIST_P192 = Function("mcl_fp_sqr_NIST_P192L" + suf, Void, py, px); + verifyAndSetPrivate(mcl_fp_sqr_NIST_P192); + beginFunc(mcl_fp_sqr_NIST_P192); + Operand buf = _alloca(unit, 192 * 2 / unit); + // QQQ define later + Function mcl_fpDbl_sqrPre("mcl_fpDbl_sqrPre" + cybozu::itoa(192 / unit) + "L" + suf, Void, buf, px); + call(mcl_fpDbl_sqrPre, buf, px); + call(mcl_fpDbl_mod_NIST_P192, py, buf); + ret(Void); + endFunc(); + } + void gen_mcl_fp_mulNIST_P192() + { + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Function f("mcl_fp_mulNIST_P192L" + suf, Void, pz, px, py); + verifyAndSetPrivate(f); + beginFunc(f); + Operand buf = _alloca(unit, 192 * 2 / unit); + // QQQ define later + Function mcl_fpDbl_mulPre("mcl_fpDbl_mulPre" + cybozu::itoa(192 / unit) + "L" + suf, Void, buf, px, py); + call(mcl_fpDbl_mulPre, buf, px, py); + call(mcl_fpDbl_mod_NIST_P192, pz, buf); + ret(Void); + endFunc(); + } + void gen_once() + { + gen_mulUU(); + gen_extractHigh(); + gen_mulPos(); + gen_makeNIST_P192(); + gen_mcl_fpDbl_mod_NIST_P192(); + gen_mcl_fp_sqr_NIST_P192(); + gen_mcl_fp_mulNIST_P192(); + gen_mcl_fpDbl_mod_NIST_P521(); + } + Operand extract(const Operand& x, uint32_t shift) + { + Operand t = lshr(x, shift); + t = trunc(t, unit); + return t; + } + void gen_mcl_fp_addsubPre(bool isAdd) + { + resetGlobalIdx(); + Operand r(Int, unit); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + std::string name; + if (isAdd) { + name = "mcl_fp_addPre" + cybozu::itoa(N) + "L" + suf; + mcl_fp_addPreM[N] = Function(name, r, pz, px, py); + verifyAndSetPrivate(mcl_fp_addPreM[N]); + beginFunc(mcl_fp_addPreM[N]); + } else { + name = "mcl_fp_subPre" + cybozu::itoa(N) + "L" + suf; + mcl_fp_subPreM[N] = Function(name, r, pz, px, py); + verifyAndSetPrivate(mcl_fp_subPreM[N]); + beginFunc(mcl_fp_subPreM[N]); + } + Operand x = zext(loadN(px, N), bit + unit); + Operand y = zext(loadN(py, N), bit + unit); + Operand z; + if (isAdd) { + z = add(x, y); + storeN(trunc(z, bit), pz); + r = trunc(lshr(z, bit), unit); + } else { + z = sub(x, y); + storeN(trunc(z, bit), pz); + r = _and(trunc(lshr(z, bit), unit), makeImm(unit, 1)); + } + ret(r); + endFunc(); + } +#if 0 // void-return version + void gen_mcl_fp_addsubPre(bool isAdd) + { + resetGlobalIdx(); + Operand pz(IntPtr, bit); + Operand px(IntPtr, bit); + Operand py(IntPtr, bit); + std::string name; + if (isAdd) { + name = "mcl_fp_addPre" + cybozu::itoa(bit) + "L"; + mcl_fp_addPreM[bit] = Function(name, Void, pz, px, py); + verifyAndSetPrivate(mcl_fp_addPreM[bit]); + beginFunc(mcl_fp_addPreM[bit]); + } else { + name = "mcl_fp_subPre" + cybozu::itoa(bit) + "L"; + mcl_fp_subPreM[bit] = Function(name, Void, pz, px, py); + verifyAndSetPrivate(mcl_fp_subPreM[bit]); + beginFunc(mcl_fp_subPreM[bit]); + } + Operand x = load(px); + Operand y = load(py); + Operand z; + if (isAdd) { + z = add(x, y); + } else { + z = sub(x, y); + } + store(z, pz); + ret(Void); + endFunc(); + } +#endif + void gen_mcl_fp_shr1() + { + resetGlobalIdx(); + Operand py(IntPtr, unit); + Operand px(IntPtr, unit); + std::string name = "mcl_fp_shr1_" + cybozu::itoa(N) + "L" + suf; + mcl_fp_shr1_M[N] = Function(name, Void, py, px); + verifyAndSetPrivate(mcl_fp_shr1_M[N]); + beginFunc(mcl_fp_shr1_M[N]); + Operand x = loadN(px, N); + x = lshr(x, 1); + storeN(x, py); + ret(Void); + endFunc(); + } + void gen_mcl_fp_add(bool isFullBit = true) + { + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Operand pp(IntPtr, unit); + std::string name = "mcl_fp_add"; + if (!isFullBit) { + name += "NF"; + } + name += cybozu::itoa(N) + "L" + suf; + mcl_fp_addM[N] = Function(name, Void, pz, px, py, pp); + verifyAndSetPrivate(mcl_fp_addM[N]); + beginFunc(mcl_fp_addM[N]); + Operand x = loadN(px, N); + Operand y = loadN(py, N); + if (isFullBit) { + x = zext(x, bit + unit); + y = zext(y, bit + unit); + Operand t0 = add(x, y); + Operand t1 = trunc(t0, bit); + storeN(t1, pz); + Operand p = loadN(pp, N); + p = zext(p, bit + unit); + Operand vc = sub(t0, p); + Operand c = lshr(vc, bit); + c = trunc(c, 1); + Label carry("carry"); + Label nocarry("nocarry"); + br(c, carry, nocarry); + putLabel(nocarry); + storeN(trunc(vc, bit), pz); + ret(Void); + putLabel(carry); + } else { + x = add(x, y); + Operand p = loadN(pp, N); + y = sub(x, p); + Operand c = trunc(lshr(y, bit - 1), 1); + x = select(c, x, y); + storeN(x, pz); + } + ret(Void); + endFunc(); + } + void gen_mcl_fp_sub(bool isFullBit = true) + { + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Operand pp(IntPtr, unit); + std::string name = "mcl_fp_sub"; + if (!isFullBit) { + name += "NF"; + } + name += cybozu::itoa(N) + "L" + suf; + mcl_fp_subM[N] = Function(name, Void, pz, px, py, pp); + verifyAndSetPrivate(mcl_fp_subM[N]); + beginFunc(mcl_fp_subM[N]); + Operand x = loadN(px, N); + Operand y = loadN(py, N); + if (isFullBit) { + x = zext(x, bit + unit); + y = zext(y, bit + unit); + Operand vc = sub(x, y); + Operand v, c; + v = trunc(vc, bit); + c = lshr(vc, bit); + c = trunc(c, 1); + storeN(v, pz); + Label carry("carry"); + Label nocarry("nocarry"); + br(c, carry, nocarry); + putLabel(nocarry); + ret(Void); + putLabel(carry); + Operand p = loadN(pp, N); + Operand t = add(v, p); + storeN(t, pz); + } else { + Operand v = sub(x, y); + Operand c; + c = trunc(lshr(v, bit - 1), 1); + Operand p = loadN(pp, N); + c = select(c, p, makeImm(bit, 0)); + Operand t = add(v, c); + storeN(t, pz); + } + ret(Void); + endFunc(); + } + void gen_mcl_fpDbl_add() + { + // QQQ : generate unnecessary memory copy for large bit + const int bu = bit + unit; + const int b2 = bit * 2; + const int b2u = b2 + unit; + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Operand pp(IntPtr, unit); + std::string name = "mcl_fpDbl_add" + cybozu::itoa(N) + "L" + suf; + Function f(name, Void, pz, px, py, pp); + verifyAndSetPrivate(f); + beginFunc(f); + Operand x = loadN(px, N * 2); + Operand y = loadN(py, N * 2); + x = zext(x, b2u); + y = zext(y, b2u); + Operand t = add(x, y); // x + y = [H:L] + Operand L = trunc(t, bit); + storeN(L, pz); + + Operand H = lshr(t, bit); + H = trunc(H, bu); + Operand p = loadN(pp, N); + p = zext(p, bu); + Operand Hp = sub(H, p); + t = lshr(Hp, bit); + t = trunc(t, 1); + t = select(t, H, Hp); + t = trunc(t, bit); + storeN(t, pz, N); + ret(Void); + endFunc(); + } + void gen_mcl_fpDbl_sub() + { + // QQQ : rol is used? + const int b2 = bit * 2; + const int b2u = b2 + unit; + resetGlobalIdx(); + std::string name = "mcl_fpDbl_sub" + cybozu::itoa(N) + "L" + suf; + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Operand pp(IntPtr, unit); + Function f(name, Void, pz, px, py, pp); + verifyAndSetPrivate(f); + beginFunc(f); + Operand x = loadN(px, N * 2); + Operand y = loadN(py, N * 2); + x = zext(x, b2u); + y = zext(y, b2u); + Operand vc = sub(x, y); // x - y = [H:L] + Operand L = trunc(vc, bit); + storeN(L, pz); + + Operand H = lshr(vc, bit); + H = trunc(H, bit); + Operand c = lshr(vc, b2); + c = trunc(c, 1); + Operand p = loadN(pp, N); + c = select(c, p, makeImm(bit, 0)); + Operand t = add(H, c); + storeN(t, pz, N); + ret(Void); + endFunc(); + } + /* + return [px[n-1]:px[n-2]:...:px[0]] + */ + Operand pack(const Operand *px, size_t n) + { + Operand x = px[0]; + for (size_t i = 1; i < n; i++) { + Operand y = px[i]; + size_t shift = x.bit; + size_t size = x.bit + y.bit; + x = zext(x, size); + y = zext(y, size); + y = shl(y, shift); + x = _or(x, y); + } + return x; + } + /* + z = px[0..N] * y + */ + void gen_mulPv() + { + const int bu = bit + unit; + resetGlobalIdx(); + Operand z(Int, bu); + Operand px(IntPtr, unit); + Operand y(Int, unit); + std::string name = "mulPv" + cybozu::itoa(bit) + "x" + cybozu::itoa(unit); + mulPvM[bit] = Function(name, z, px, y); + mulPvM[bit].setPrivate(); + verifyAndSetPrivate(mulPvM[bit]); + beginFunc(mulPvM[bit]); + OperandVec L(N), H(N); + for (uint32_t i = 0; i < N; i++) { + Operand xy = call(mulPos, px, y, makeImm(unit, i)); + L[i] = trunc(xy, unit); + H[i] = call(extractHigh, xy); + } + Operand LL = pack(&L[0], N); + Operand HH = pack(&H[0], N); + LL = zext(LL, bu); + HH = zext(HH, bu); + HH = shl(HH, unit); + z = add(LL, HH); + ret(z); + endFunc(); + } + void gen_mcl_fp_mulUnitPre() + { + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand y(Int, unit); + std::string name = "mcl_fp_mulUnitPre" + cybozu::itoa(N) + "L" + suf; + mcl_fp_mulUnitPreM[N] = Function(name, Void, pz, px, y); + verifyAndSetPrivate(mcl_fp_mulUnitPreM[N]); + beginFunc(mcl_fp_mulUnitPreM[N]); + Operand z = call(mulPvM[bit], px, y); + storeN(z, pz); + ret(Void); + endFunc(); + } + void generic_fpDbl_mul(const Operand& pz, const Operand& px, const Operand& py) + { + if (N == 1) { + Operand x = load(px); + Operand y = load(py); + x = zext(x, unit * 2); + y = zext(y, unit * 2); + Operand z = mul(x, y); + storeN(z, pz); + ret(Void); + } else if (N >= 8 && (N % 2) == 0) { + /* + W = 1 << half + (aW + b)(cW + d) = acW^2 + (ad + bc)W + bd + ad + bc = (a + b)(c + d) - ac - bd + */ + const int H = N / 2; + const int half = bit / 2; + Operand pxW = getelementptr(px, H); + Operand pyW = getelementptr(py, H); + Operand pzWW = getelementptr(pz, N); + call(mcl_fpDbl_mulPreM[H], pz, px, py); // bd + call(mcl_fpDbl_mulPreM[H], pzWW, pxW, pyW); // ac + + Operand a = zext(loadN(pxW, H), half + unit); + Operand b = zext(loadN(px, H), half + unit); + Operand c = zext(loadN(pyW, H), half + unit); + Operand d = zext(loadN(py, H), half + unit); + Operand t1 = add(a, b); + Operand t2 = add(c, d); + Operand buf = _alloca(unit, N); + Operand t1L = trunc(t1, half); + Operand t2L = trunc(t2, half); + Operand c1 = trunc(lshr(t1, half), 1); + Operand c2 = trunc(lshr(t2, half), 1); + Operand c0 = _and(c1, c2); + c1 = select(c1, t2L, makeImm(half, 0)); + c2 = select(c2, t1L, makeImm(half, 0)); + Operand buf1 = _alloca(unit, half / unit); + Operand buf2 = _alloca(unit, half / unit); + storeN(t1L, buf1); + storeN(t2L, buf2); + call(mcl_fpDbl_mulPreM[N / 2], buf, buf1, buf2); + Operand t = loadN(buf, N); + t = zext(t, bit + unit); + c0 = zext(c0, bit + unit); + c0 = shl(c0, bit); + t = _or(t, c0); + c1 = zext(c1, bit + unit); + c2 = zext(c2, bit + unit); + c1 = shl(c1, half); + c2 = shl(c2, half); + t = add(t, c1); + t = add(t, c2); + t = sub(t, zext(loadN(pz, N), bit + unit)); + t = sub(t, zext(loadN(pz, N, N), bit + unit)); + if (bit + half > t.bit) { + t = zext(t, bit + half); + } + t = add(t, loadN(pz, N + H, H)); + storeN(t, pz, H); + ret(Void); + } else { + Operand y = load(py); + Operand xy = call(mulPvM[bit], px, y); + store(trunc(xy, unit), pz); + Operand t = lshr(xy, unit); + for (uint32_t i = 1; i < N; i++) { + y = loadN(py, 1, i); + xy = call(mulPvM[bit], px, y); + t = add(t, xy); + if (i < N - 1) { + storeN(trunc(t, unit), pz, i); + t = lshr(t, unit); + } + } + storeN(t, pz, N - 1); + ret(Void); + } + } + void gen_mcl_fpDbl_mulPre() + { + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + std::string name = "mcl_fpDbl_mulPre" + cybozu::itoa(N) + "L" + suf; + mcl_fpDbl_mulPreM[N] = Function(name, Void, pz, px, py); + verifyAndSetPrivate(mcl_fpDbl_mulPreM[N]); + beginFunc(mcl_fpDbl_mulPreM[N]); + generic_fpDbl_mul(pz, px, py); + endFunc(); + } + void gen_mcl_fpDbl_sqrPre() + { + resetGlobalIdx(); + Operand py(IntPtr, unit); + Operand px(IntPtr, unit); + std::string name = "mcl_fpDbl_sqrPre" + cybozu::itoa(N) + "L" + suf; + mcl_fpDbl_sqrPreM[N] = Function(name, Void, py, px); + verifyAndSetPrivate(mcl_fpDbl_sqrPreM[N]); + beginFunc(mcl_fpDbl_sqrPreM[N]); + generic_fpDbl_mul(py, px, px); + endFunc(); + } + void gen_mcl_fp_mont(bool isFullBit = true) + { + const int bu = bit + unit; + const int bu2 = bit + unit * 2; + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand px(IntPtr, unit); + Operand py(IntPtr, unit); + Operand pp(IntPtr, unit); + std::string name = "mcl_fp_mont"; + if (!isFullBit) { + name += "NF"; + } + name += cybozu::itoa(N) + "L" + suf; + mcl_fp_montM[N] = Function(name, Void, pz, px, py, pp); + mcl_fp_montM[N].setAlias(); + verifyAndSetPrivate(mcl_fp_montM[N]); + beginFunc(mcl_fp_montM[N]); + Operand rp = load(getelementptr(pp, -1)); + Operand z, s, a; + if (isFullBit) { + for (uint32_t i = 0; i < N; i++) { + Operand y = load(getelementptr(py, i)); + Operand xy = call(mulPvM[bit], px, y); + Operand at; + if (i == 0) { + a = zext(xy, bu2); + at = trunc(xy, unit); + } else { + xy = zext(xy, bu2); + a = add(s, xy); + at = trunc(a, unit); + } + Operand q = mul(at, rp); + Operand pq = call(mulPvM[bit], pp, q); + pq = zext(pq, bu2); + Operand t = add(a, pq); + s = lshr(t, unit); + } + s = trunc(s, bu); + Operand p = zext(loadN(pp, N), bu); + Operand vc = sub(s, p); + Operand c = trunc(lshr(vc, bit), 1); + z = select(c, s, vc); + z = trunc(z, bit); + storeN(z, pz); + } else { + Operand y = load(py); + Operand xy = call(mulPvM[bit], px, y); + Operand c0 = trunc(xy, unit); + Operand q = mul(c0, rp); + Operand pq = call(mulPvM[bit], pp, q); + Operand t = add(xy, pq); + t = lshr(t, unit); // bu-bit + for (uint32_t i = 1; i < N; i++) { + y = load(getelementptr(py, i)); + xy = call(mulPvM[bit], px, y); + t = add(t, xy); + c0 = trunc(t, unit); + q = mul(c0, rp); + pq = call(mulPvM[bit], pp, q); + t = add(t, pq); + t = lshr(t, unit); + } + t = trunc(t, bit); + Operand vc = sub(t, loadN(pp, N)); + Operand c = trunc(lshr(vc, bit - 1), 1); + z = select(c, t, vc); + storeN(z, pz); + } + ret(Void); + endFunc(); + } + void gen_mcl_fp_montRed() + { + const int bu = bit + unit; + const int b2 = bit * 2; + const int b2u = b2 + unit; + resetGlobalIdx(); + Operand pz(IntPtr, unit); + Operand pxy(IntPtr, unit); + Operand pp(IntPtr, unit); + std::string name = "mcl_fp_montRed" + cybozu::itoa(N) + "L" + suf; + mcl_fp_montRedM[N] = Function(name, Void, pz, pxy, pp); + verifyAndSetPrivate(mcl_fp_montRedM[N]); + beginFunc(mcl_fp_montRedM[N]); + Operand rp = load(getelementptr(pp, -1)); + Operand p = loadN(pp, N); + Operand xy = loadN(pxy, N * 2); + Operand t = zext(xy, b2 + unit); + Operand z; + for (uint32_t i = 0; i < N; i++) { + Operand z = trunc(t, unit); + Operand q = mul(z, rp); + Operand pq = call(mulPvM[bit], pp, q); + pq = zext(pq, b2u - unit * i); + z = add(t, pq); + z = lshr(z, unit); + t = trunc(z, b2 - unit * i); + } + p = zext(p, bu); + Operand vc = sub(t, p); + Operand c = trunc(lshr(vc, bit), 1); + z = select(c, t, vc); + z = trunc(z, bit); + storeN(z, pz); + ret(Void); + endFunc(); + } + void gen_all() + { + gen_mcl_fp_addsubPre(true); + gen_mcl_fp_addsubPre(false); + gen_mcl_fp_shr1(); + } + void gen_addsub() + { + gen_mcl_fp_add(true); + gen_mcl_fp_add(false); + gen_mcl_fp_sub(true); + gen_mcl_fp_sub(false); + gen_mcl_fpDbl_add(); + gen_mcl_fpDbl_sub(); + } + void gen_mul() + { + gen_mulPv(); + gen_mcl_fp_mulUnitPre(); + gen_mcl_fpDbl_mulPre(); + gen_mcl_fpDbl_sqrPre(); + gen_mcl_fp_mont(true); + gen_mcl_fp_mont(false); + gen_mcl_fp_montRed(); + } + void setBit(uint32_t bit) + { + this->bit = bit; + N = bit / unit; + } + void setUnit(uint32_t unit) + { + this->unit = unit; + unit2 = unit * 2; + unitStr = cybozu::itoa(unit); + } + void gen(const StrSet& privateFuncList, uint32_t maxBitSize, const std::string& suf) + { + this->suf = suf; + this->privateFuncList = &privateFuncList; +#ifdef FOR_WASM + gen_mulUU(); +#else + gen_once(); + uint32_t end = ((maxBitSize + unit - 1) / unit); + for (uint32_t n = 1; n <= end; n++) { + setBit(n * unit); + gen_mul(); + gen_all(); + gen_addsub(); + } + if (unit == 64 && maxBitSize == 768) { + for (uint32_t i = maxBitSize + unit * 2; i <= maxBitSize * 2; i += unit * 2) { + setBit(i); + gen_all(); + } + } +#endif + } +}; + +int main(int argc, char *argv[]) + try +{ + uint32_t unit; + bool oldLLVM; + bool wasm; + std::string suf; + std::string privateFile; + cybozu::Option opt; + opt.appendOpt(&unit, uint32_t(sizeof(void*)) * 8, "u", ": unit"); + opt.appendBoolOpt(&oldLLVM, "old", ": old LLVM(before 3.8)"); + opt.appendBoolOpt(&wasm, "wasm", ": for wasm"); + opt.appendOpt(&suf, "", "s", ": suffix of function name"); + opt.appendOpt(&privateFile, "", "f", ": private function list file"); + opt.appendHelp("h"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + StrSet privateFuncList; + if (!privateFile.empty()) { + std::ifstream ifs(privateFile.c_str(), std::ios::binary); + std::string name; + while (ifs >> name) { + privateFuncList.insert(name); + } + } + Code c; + if (oldLLVM) { + c.setOldLLVM(); + } + c.wasm = wasm; + c.setUnit(unit); + uint32_t maxBitSize = MCL_MAX_BIT_SIZE; + c.gen(privateFuncList, maxBitSize, suf); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/src/llvm_gen.hpp b/vendor/github.com/byzantine-lab/mcl/src/llvm_gen.hpp new file mode 100644 index 000000000..bbc5b9030 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/llvm_gen.hpp @@ -0,0 +1,616 @@ +#pragma once +/** + @file + @brief LLVM IR generator + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +//#define CYBOZU_EXCEPTION_WITH_STACKTRACE +#include +#include +#include +#include +#include +#ifdef _MSC_VER +// #pragma warning(push) + #pragma warning(disable : 4458) +#endif + +namespace mcl { + +namespace impl { + +struct File { + FILE *fp; + File() : fp(stdout) {} + ~File() { if (fp != stdout) fclose(fp); } + void open(const std::string& file) + { +#ifdef _MSC_VER + bool isOK = fopen_s(&fp, file.c_str(), "wb") != 0; +#else + fp = fopen(file.c_str(), "wb"); + bool isOK = fp != NULL; +#endif + if (!isOK) throw cybozu::Exception("File:open") << file; + } + void write(const std::string& str) + { + int ret = fprintf(fp, "%s\n", str.c_str()); + if (ret < 0) { + throw cybozu::Exception("File:write") << str; + } + } +}; +template +struct Param { + static File f; +}; + +template +File Param::f; + +bool isOldLLVM = false; + +} // mcl::impl + +inline bool isOldLLVM() { return impl::isOldLLVM; } + +struct Generator { + static const uint8_t None = 0; + static const uint8_t Int = 1; + static const uint8_t Imm = 2; + static const uint8_t Ptr = 1 << 7; + static const uint8_t IntPtr = Int | Ptr; + void setOldLLVM() { impl::isOldLLVM = true; } + struct Type { + uint8_t type; + bool isPtr; + Type(int type = 0) + : type(static_cast(type & ~Ptr)) + , isPtr((type & Ptr) != 0) + { + } + inline friend std::ostream& operator<<(std::ostream& os, const Type& self) + { + return os << (self.type | (self.isPtr ? Ptr : 0)); + } + }; + enum CondType { + eq = 1, + neq = 2, + ugt = 3, + uge = 4, + ult = 5, + ule = 6, + sgt = 7, + sge = 8, + slt = 9, + sle = 10 + }; + static inline const std::string& toStr(CondType type) + { + static const std::string tbl[] = { + "eq", "neq", "ugt", "uge", "ult", "ule", "sgt", "sge", "slt", "sle" + }; + return tbl[type - 1]; + } + void open(const std::string& file) + { + impl::Param<>::f.open(file); + } + struct Operand; + struct Function; + struct Eval; + struct Label { + std::string name; + explicit Label(const std::string& name = "") : name(name) {} + std::string toStr() const { return std::string("label %") + name; } + }; + void putLabel(const Label& label) + { + put(label.name + ":"); + } + static inline int& getGlobalIdx() + { + static int globalIdx = 0; + return ++globalIdx; + } + static inline void resetGlobalIdx() + { + getGlobalIdx() = 0; + } + static inline void put(const std::string& str) + { + impl::Param<>::f.write(str); + } + void beginFunc(const Function& f); + void endFunc() + { + put("}"); + } + Eval zext(const Operand& x, uint32_t size); + Eval mul(const Operand& x, const Operand& y); + Eval add(const Operand& x, const Operand& y); + Eval sub(const Operand& x, const Operand& y); + Eval _and(const Operand& x, const Operand& y); + Eval _or(const Operand& x, const Operand& y); + void ret(const Operand& r); + Eval lshr(const Operand& x, uint32_t size); + Eval ashr(const Operand& x, uint32_t size); + Eval shl(const Operand& x, uint32_t size); + Eval trunc(const Operand& x, uint32_t size); + Eval getelementptr(const Operand& p, const Operand& i); + Eval getelementptr(const Operand& p, int n); + Eval load(const Operand& p); + void store(const Operand& r, const Operand& p); + Eval select(const Operand& c, const Operand& r1, const Operand& r2); + Eval _alloca(uint32_t bit, uint32_t n); + // QQQ : type of type must be Type + Eval bitcast(const Operand& r, const Operand& type); + Eval icmp(CondType type, const Operand& r1, const Operand& r2); + void br(const Operand& op, const Label& ifTrue, const Label& ifFalse); + Eval call(const Function& f); + Eval call(const Function& f, const Operand& op1); + Eval call(const Function& f, const Operand& op1, const Operand& op2); + Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3); + Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4); + Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4, const Operand& op5); + + Operand makeImm(uint32_t bit, int64_t imm); +}; + +struct Generator::Operand { + Type type; + uint32_t bit; + int64_t imm; + uint32_t idx; + Operand() : type(None), bit(0), imm(0), idx(0) {} + Operand(Type type, uint32_t bit) + : type(type), bit(bit), imm(0), idx(getGlobalIdx()) + { + } + Operand(const Operand& rhs) + : type(rhs.type), bit(rhs.bit), imm(rhs.imm), idx(rhs.idx) + { + } + void operator=(const Operand& rhs) + { + type = rhs.type; + bit = rhs.bit; + imm = rhs.imm; + idx = rhs.idx; + } + void update() + { + idx = getGlobalIdx(); + } + Operand(const Eval& e); + void operator=(const Eval& e); + + std::string toStr(bool isAlias = true) const + { + if (type.isPtr) { + return getType(isAlias) + " " + getName(); + } + switch (type.type) { + default: + return getType(); + case Int: + case Imm: + return getType() + " " + getName(); + } + } + std::string getType(bool isAlias = true) const + { + std::string s; + switch (type.type) { + default: + return "void"; + case Int: + case Imm: + s = std::string("i") + cybozu::itoa(bit); + break; + } + if (type.isPtr) { + s += "*"; + if (!isAlias) { + s += " noalias "; + } + } + return s; + } + std::string getName() const + { + switch (type.type) { + default: + return ""; + case Int: + return std::string("%r") + cybozu::itoa(idx); + case Imm: + return cybozu::itoa(imm); + } + } +}; + +inline Generator::Operand Generator::makeImm(uint32_t bit, int64_t imm) +{ + Generator::Operand v(Generator::Imm, bit); + v.imm = imm; + return v; +} + +struct Generator::Eval { + std::string s; + Generator::Operand op; + mutable bool used; + Eval() : used(false) {} + ~Eval() + { + if (used) return; + put(s); + } +}; + +inline Generator::Operand::Operand(const Generator::Eval& e) +{ + *this = e.op; + update(); + put(getName() + " = " + e.s); + e.used = true; +} + +inline void Generator::Operand::operator=(const Generator::Eval& e) +{ + *this = e.op; + update(); + put(getName() + " = " + e.s); + e.used = true; +} + +struct Generator::Function { + typedef std::vector OperandVec; + std::string name; + Generator::Operand ret; + OperandVec opv; + bool isPrivate; + bool isAlias; + void clear() + { + isPrivate = false; + isAlias = false; + } + explicit Function(const std::string& name = "") : name(name) { clear(); } + Function(const std::string& name, const Operand& ret) + : name(name), ret(ret) + { + clear(); + } + Function(const std::string& name, const Operand& ret, const Operand& op1) + : name(name), ret(ret) + { + clear(); + opv.push_back(op1); + } + Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2) + : name(name), ret(ret) + { + clear(); + opv.push_back(op1); + opv.push_back(op2); + } + Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3) + : name(name), ret(ret) + { + clear(); + opv.push_back(op1); + opv.push_back(op2); + opv.push_back(op3); + } + Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4) + : name(name), ret(ret) + { + clear(); + opv.push_back(op1); + opv.push_back(op2); + opv.push_back(op3); + opv.push_back(op4); + } + Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4, const Operand& op5) + : name(name), ret(ret) + { + clear(); + opv.push_back(op1); + opv.push_back(op2); + opv.push_back(op3); + opv.push_back(op4); + opv.push_back(op5); + } + void setPrivate() + { + isPrivate = true; + } + void setAlias() + { + isAlias = true; + } + std::string toStr() const + { + std::string str = std::string("define "); + if (isPrivate) { + str += "private "; + } + str += ret.getType(); + str += " @" + name + "("; + for (size_t i = 0; i < opv.size(); i++) { + if (i > 0) str += ", "; + str += opv[i].toStr(isAlias); + } + str += ")"; + return str; + } +}; + +namespace impl { + +inline Generator::Eval callSub(const Generator::Function& f, const Generator::Operand **opTbl, size_t opNum) +{ + if (f.opv.size() != opNum) throw cybozu::Exception("impl:callSub:bad num of arg") << f.opv.size() << opNum; + if (f.name.empty()) throw cybozu::Exception("impl:callSub:no name"); + Generator::Eval e; + e.op = f.ret; + e.s = "call "; + e.s += f.ret.getType(); + e.s += " @" + f.name + "("; + for (size_t i = 0; i < opNum; i++) { + if (i > 0) { + e.s += ", "; + } + e.s += opTbl[i]->toStr(); + } + e.s += ")"; + return e; +} + +inline Generator::Eval aluSub(const char *name, const Generator::Operand& x, const Generator::Operand& y) +{ + if (x.bit != y.bit) throw cybozu::Exception("Generator:aluSub:bad size") << name << x.bit << y.bit; + Generator::Eval e; + e.op.type = Generator::Int; + e.op.bit = x.bit; + e.s = name; + e.s += " "; + e.s += x.toStr() + ", " + y.getName(); + return e; +} + +inline Generator::Eval shiftSub(const char *name, const Generator::Operand& x, uint32_t size) +{ + Generator::Eval e; + e.op = x; + e.s = name; + e.s += " "; + e.s += x.toStr() + ", " + cybozu::itoa(size); + return e; +} + +} // mcl::impl + +inline void Generator::beginFunc(const Generator::Function& f) +{ + put(f.toStr() + "\n{"); +} + +inline Generator::Eval Generator::zext(const Generator::Operand& x, uint32_t size) +{ + if (x.bit >= size) throw cybozu::Exception("Generator:zext:bad size") << x.bit << size; + Eval e; + e.op = x; + e.op.bit = size; + e.s = "zext "; + e.s += x.toStr() + " to i" + cybozu::itoa(size); + return e; +} + +inline Generator::Eval Generator::mul(const Generator::Operand& x, const Generator::Operand& y) +{ + return impl::aluSub("mul", x, y); +} + +inline Generator::Eval Generator::add(const Generator::Operand& x, const Generator::Operand& y) +{ + return impl::aluSub("add", x, y); +} + +inline Generator::Eval Generator::sub(const Generator::Operand& x, const Generator::Operand& y) +{ + return impl::aluSub("sub", x, y); +} + +inline Generator::Eval Generator::_and(const Generator::Operand& x, const Generator::Operand& y) +{ + return impl::aluSub("and", x, y); +} + +inline Generator::Eval Generator::_or(const Generator::Operand& x, const Generator::Operand& y) +{ + return impl::aluSub("or", x, y); +} + +inline void Generator::ret(const Generator::Operand& x) +{ + std::string s = "ret " + x.toStr(); + put(s); +} + +inline Generator::Eval Generator::lshr(const Generator::Operand& x, uint32_t size) +{ + return impl::shiftSub("lshr", x, size); +} + +inline Generator::Eval Generator::ashr(const Generator::Operand& x, uint32_t size) +{ + return impl::shiftSub("ashr", x, size); +} + +inline Generator::Eval Generator::shl(const Generator::Operand& x, uint32_t size) +{ + return impl::shiftSub("shl", x, size); +} + +inline Generator::Eval Generator::trunc(const Generator::Operand& x, uint32_t size) +{ + Eval e; + e.op = x; + e.op.bit = size; + e.s = "trunc "; + e.s += x.toStr() + " to i" + cybozu::itoa(size); + return e; +} + +inline Generator::Eval Generator::getelementptr(const Generator::Operand& p, const Generator::Operand& i) +{ + Eval e; + e.op = p; + e.s = "getelementptr "; + if (!isOldLLVM()) { + e.s += "i" + cybozu::itoa(p.bit) + ", "; + } + e.s += p.toStr() + ", " + i.toStr(); + return e; +} + +inline Generator::Eval Generator::getelementptr(const Generator::Operand& p, int n) +{ + return Generator::getelementptr(p, makeImm(32, n)); +} + +inline Generator::Eval Generator::load(const Generator::Operand& p) +{ + if (!p.type.isPtr) throw cybozu::Exception("Generator:load:not pointer") << p.type; + Eval e; + e.op = p; + e.op.type.isPtr = false; + e.s = "load "; + if (!isOldLLVM()) { + e.s += "i" + cybozu::itoa(p.bit) + ", "; + } + e.s += p.toStr(); + return e; +} + +inline void Generator::store(const Generator::Operand& r, const Generator::Operand& p) +{ + if (!p.type.isPtr) throw cybozu::Exception("Generator:store:not pointer") << p.type; + std::string s = "store "; + s += r.toStr(); + s += ", "; + s += p.toStr(); + put(s); +} + +inline Generator::Eval Generator::select(const Generator::Operand& c, const Generator::Operand& r1, const Generator::Operand& r2) +{ + if (c.bit != 1) throw cybozu::Exception("Generator:select:bad bit") << c.bit; + Eval e; + e.op = r1; + e.s = "select "; + e.s += c.toStr(); + e.s += ", "; + e.s += r1.toStr(); + e.s += ", "; + e.s += r2.toStr(); + return e; +} + +inline Generator::Eval Generator::_alloca(uint32_t bit, uint32_t n) +{ + Eval e; + e.op = Operand(IntPtr, bit); + e.s = "alloca i"; + e.s += cybozu::itoa(bit); + e.s += ", i32 "; + e.s += cybozu::itoa(n); + return e; +} + +inline Generator::Eval Generator::bitcast(const Generator::Operand& r, const Generator::Operand& type) +{ + Eval e; + e.op = type; + e.s = "bitcast "; + e.s += r.toStr(); + e.s += " to "; + e.s += type.getType(); + return e; +} + +inline Generator::Eval Generator::icmp(Generator::CondType type, const Generator::Operand& r1, const Generator::Operand& r2) +{ + Eval e; + e.op.type = Int; + e.op.bit = 1; + e.s = "icmp "; + e.s += toStr(type); + e.s += " "; + e.s += r1.toStr(); + e.s += ", "; + e.s += r2.getName(); + return e; +} + +inline void Generator::br(const Generator::Operand& op, const Generator::Label& ifTrue, const Generator::Label& ifFalse) +{ + if (op.bit != 1) throw cybozu::Exception("Generator:br:bad reg size") << op.bit; + std::string s = "br i1"; + s += op.getName(); + s += ", "; + s += ifTrue.toStr(); + s += ", "; + s += ifFalse.toStr(); + put(s); +} + +inline Generator::Eval Generator::call(const Generator::Function& f) +{ + return impl::callSub(f, 0, 0); +} + +inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1) +{ + const Operand *tbl[] = { &op1 }; + return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); +} + +inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2) +{ + const Operand *tbl[] = { &op1, &op2 }; + return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); +} + +inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3) +{ + const Operand *tbl[] = { &op1, &op2, &op3 }; + return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); +} + +inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3, const Generator::Operand& op4) +{ + const Operand *tbl[] = { &op1, &op2, &op3, &op4 }; + return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); +} + +inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3, const Generator::Operand& op4, const Generator::Operand& opt5) +{ + const Operand *tbl[] = { &op1, &op2, &op3, &op4, &opt5 }; + return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); +} + +#define MCL_GEN_FUNCTION(name, ...) Function name(#name, __VA_ARGS__) + +} // mcl + +#ifdef _MSC_VER +// #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/src/low_func.hpp b/vendor/github.com/byzantine-lab/mcl/src/low_func.hpp new file mode 100644 index 000000000..57c63cfa3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/low_func.hpp @@ -0,0 +1,706 @@ +#pragma once +/** + @file + @brief generic function for each N + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) +#endif + +namespace mcl { namespace fp { + +struct Gtag; // GMP +struct Ltag; // LLVM +struct LBMI2tag; // LLVM with Intel BMI2 instruction +struct Atag; // asm + +template struct TagToStr { }; +template<> struct TagToStr { static const char *f() { return "Gtag"; } }; +template<> struct TagToStr { static const char *f() { return "Ltag"; } }; +template<> struct TagToStr { static const char *f() { return "LBMI2tag"; } }; +template<> struct TagToStr { static const char *f() { return "Atag"; } }; + +template +void clearC(Unit *x) +{ + clearArray(x, 0, N); +} + +template +bool isZeroC(const Unit *x) +{ + return isZeroArray(x, N); +} + +template +void copyC(Unit *y, const Unit *x) +{ + copyArray(y, x, N); +} + +// (carry, z[N]) <- x[N] + y[N] +template +struct AddPre { + static inline Unit func(Unit *z, const Unit *x, const Unit *y) + { +#ifdef MCL_USE_VINT + return mcl::vint::addN(z, x, y, N); +#else + return mpn_add_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, N); +#endif + } + static const u3u f; +}; +template +const u3u AddPre::f = AddPre::func; + +// (carry, x[N]) <- x[N] + y +template +struct AddUnitPre { + static inline Unit func(Unit *x, Unit n, Unit y) + { +#if 1 + int ret = 0; + Unit t = x[0] + y; + x[0] = t; + if (t >= y) goto EXIT_0; + for (size_t i = 1; i < n; i++) { + t = x[i] + 1; + x[i] = t; + if (t != 0) goto EXIT_0; + } + ret = 1; + EXIT_0: + return ret; +#else + return mpn_add_1((mp_limb_t*)x, (const mp_limb_t*)x, (int)n, y); +#endif + } + static const u1uII f; +}; +template +const u1uII AddUnitPre::f = AddUnitPre::func; + +// (carry, z[N]) <- x[N] - y[N] +template +struct SubPre { + static inline Unit func(Unit *z, const Unit *x, const Unit *y) + { +#ifdef MCL_USE_VINT + return mcl::vint::subN(z, x, y, N); +#else + return mpn_sub_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, N); +#endif + } + static const u3u f; +}; + +template +const u3u SubPre::f = SubPre::func; + +// y[N] <- (x[N] >> 1) +template +struct Shr1 { + static inline void func(Unit *y, const Unit *x) + { +#ifdef MCL_USE_VINT + mcl::vint::shrN(y, x, N, 1); +#else + mpn_rshift((mp_limb_t*)y, (const mp_limb_t*)x, (int)N, 1); +#endif + } + static const void2u f; +}; + +template +const void2u Shr1::f = Shr1::func; + +// y[N] <- (-x[N]) % p[N] +template +struct Neg { + static inline void func(Unit *y, const Unit *x, const Unit *p) + { + if (isZeroC(x)) { + if (x != y) clearC(y); + return; + } + SubPre::f(y, p, x); + } + static const void3u f; +}; + +template +const void3u Neg::f = Neg::func; + +// z[N * 2] <- x[N] * y[N] +template +struct MulPreCore { + static inline void func(Unit *z, const Unit *x, const Unit *y) + { +#ifdef MCL_USE_VINT + mcl::vint::mulNM(z, x, N, y, N); +#else + mpn_mul_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, (int)N); +#endif + } + static const void3u f; +}; + +template +const void3u MulPreCore::f = MulPreCore::func; + +template +struct EnableKaratsuba { + /* always use mpn* for Gtag */ + static const size_t minMulN = 100; + static const size_t minSqrN = 100; +}; + +template +struct MulPre { + /* + W = 1 << H + x = aW + b, y = cW + d + xy = acW^2 + (ad + bc)W + bd + ad + bc = (a + b)(c + d) - ac - bd + */ + static inline void karatsuba(Unit *z, const Unit *x, const Unit *y) + { + const size_t H = N / 2; + MulPre::f(z, x, y); // bd + MulPre::f(z + N, x + H, y + H); // ac + Unit a_b[H]; + Unit c_d[H]; + Unit c1 = AddPre::f(a_b, x, x + H); // a + b + Unit c2 = AddPre::f(c_d, y, y + H); // c + d + Unit tmp[N]; + MulPre::f(tmp, a_b, c_d); + Unit c = c1 & c2; + if (c1) { + c += AddPre::f(tmp + H, tmp + H, c_d); + } + if (c2) { + c += AddPre::f(tmp + H, tmp + H, a_b); + } + // c:tmp[N] = (a + b)(c + d) + c -= SubPre::f(tmp, tmp, z); + c -= SubPre::f(tmp, tmp, z + N); + // c:tmp[N] = ad + bc + c += AddPre::f(z + H, z + H, tmp); + assert(c <= 2); + if (c) { + AddUnitPre::f(z + N + H, H, c); + } + } + static inline void func(Unit *z, const Unit *x, const Unit *y) + { +#if 1 + if (N >= EnableKaratsuba::minMulN && (N % 2) == 0) { + karatsuba(z, x, y); + return; + } +#endif + MulPreCore::f(z, x, y); + } + static const void3u f; +}; + +template +const void3u MulPre::f = MulPre::func; + +template +struct MulPre<0, Tag> { + static inline void f(Unit*, const Unit*, const Unit*) {} +}; + +template +struct MulPre<1, Tag> { + static inline void f(Unit* z, const Unit* x, const Unit* y) + { + MulPreCore<1, Tag>::f(z, x, y); + } +}; + +// z[N * 2] <- x[N] * x[N] +template +struct SqrPreCore { + static inline void func(Unit *y, const Unit *x) + { +#ifdef MCL_USE_VINT + mcl::vint::sqrN(y, x, N); +#else + mpn_sqr((mp_limb_t*)y, (const mp_limb_t*)x, N); +#endif + } + static const void2u f; +}; + +template +const void2u SqrPreCore::f = SqrPreCore::func; + +template +struct SqrPre { + /* + W = 1 << H + x = aW + b + x^2 = aaW^2 + 2abW + bb + */ + static inline void karatsuba(Unit *z, const Unit *x) + { + const size_t H = N / 2; + SqrPre::f(z, x); // b^2 + SqrPre::f(z + N, x + H); // a^2 + Unit ab[N]; + MulPre::f(ab, x, x + H); // ab + Unit c = AddPre::f(ab, ab, ab); + c += AddPre::f(z + H, z + H, ab); + if (c) { + AddUnitPre::f(z + N + H, H, c); + } + } + static inline void func(Unit *y, const Unit *x) + { +#if 1 + if (N >= EnableKaratsuba::minSqrN && (N % 2) == 0) { + karatsuba(y, x); + return; + } +#endif + SqrPreCore::f(y, x); + } + static const void2u f; +}; +template +const void2u SqrPre::f = SqrPre::func; + +template +struct SqrPre<0, Tag> { + static inline void f(Unit*, const Unit*) {} +}; + +template +struct SqrPre<1, Tag> { + static inline void f(Unit* y, const Unit* x) + { + SqrPreCore<1, Tag>::f(y, x); + } +}; + +// z[N + 1] <- x[N] * y +template +struct MulUnitPre { + static inline void func(Unit *z, const Unit *x, Unit y) + { +#ifdef MCL_USE_VINT + z[N] = mcl::vint::mulu1(z, x, N, y); +#else + z[N] = mpn_mul_1((mp_limb_t*)z, (const mp_limb_t*)x, N, y); +#endif + } + static const void2uI f; +}; + +template +const void2uI MulUnitPre::f = MulUnitPre::func; + +// z[N] <- x[N + 1] % p[N] +template +struct N1_Mod { + static inline void func(Unit *y, const Unit *x, const Unit *p) + { +#ifdef MCL_USE_VINT + mcl::vint::divNM(0, 0, y, x, N + 1, p, N); +#else + mp_limb_t q[2]; // not used + mpn_tdiv_qr(q, (mp_limb_t*)y, 0, (const mp_limb_t*)x, N + 1, (const mp_limb_t*)p, N); +#endif + } + static const void3u f; +}; + +template +const void3u N1_Mod::f = N1_Mod::func; + +// z[N] <- (x[N] * y) % p[N] +template +struct MulUnit { + static inline void func(Unit *z, const Unit *x, Unit y, const Unit *p) + { + Unit xy[N + 1]; + MulUnitPre::f(xy, x, y); +#if 1 + Unit len = UnitBitSize - 1 - cybozu::bsr(p[N - 1]); + Unit v = xy[N]; + if (N > 1 && len < 3 && v < 0xff) { + for (;;) { + if (len == 0) { + v = xy[N]; + } else { + v = (xy[N] << len) | (xy[N - 1] >> (UnitBitSize - len)); + } + if (v == 0) break; + if (v == 1) { + xy[N] -= SubPre::f(xy, xy, p); + } else { + Unit t[N + 1]; + MulUnitPre::f(t, p, v); + SubPre::f(xy, xy, t); + } + } + for (;;) { + if (SubPre::f(z, xy, p)) { + copyC(z, xy); + return; + } + if (SubPre::f(xy, z, p)) { + return; + } + } + } +#endif + N1_Mod::f(z, xy, p); + } + static const void2uIu f; +}; + +template +const void2uIu MulUnit::f = MulUnit::func; + +// z[N] <- x[N * 2] % p[N] +template +struct Dbl_Mod { + static inline void func(Unit *y, const Unit *x, const Unit *p) + { +#ifdef MCL_USE_VINT + mcl::vint::divNM(0, 0, y, x, N * 2, p, N); +#else + mp_limb_t q[N + 1]; // not used + mpn_tdiv_qr(q, (mp_limb_t*)y, 0, (const mp_limb_t*)x, N * 2, (const mp_limb_t*)p, N); +#endif + } + static const void3u f; +}; + +template +const void3u Dbl_Mod::f = Dbl_Mod::func; + +template +struct SubIfPossible { + static inline void f(Unit *z, const Unit *p) + { + Unit tmp[N - 1]; + if (SubPre::f(tmp, z, p) == 0) { + copyC(z, tmp); + z[N - 1] = 0; + } + } +}; +template +struct SubIfPossible<1, Tag> { + static inline void f(Unit *, const Unit *) + { + } +}; + + +// z[N] <- (x[N] + y[N]) % p[N] +template +struct Add { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + if (isFullBit) { + if (AddPre::f(z, x, y)) { + SubPre::f(z, z, p); + return; + } + Unit tmp[N]; + if (SubPre::f(tmp, z, p) == 0) { + copyC(z, tmp); + } + } else { + AddPre::f(z, x, y); + Unit a = z[N - 1]; + Unit b = p[N - 1]; + if (a < b) return; + if (a > b) { + SubPre::f(z, z, p); + return; + } + /* the top of z and p are same */ + SubIfPossible::f(z, p); + } + } + static const void4u f; +}; + +template +const void4u Add::f = Add::func; + +// z[N] <- (x[N] - y[N]) % p[N] +template +struct Sub { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + if (SubPre::f(z, x, y)) { + AddPre::f(z, z, p); + } + } + static const void4u f; +}; + +template +const void4u Sub::f = Sub::func; + +// z[N * 2] <- (x[N * 2] + y[N * 2]) mod p[N] << (N * UnitBitSize) +template +struct DblAdd { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + if (AddPre::f(z, x, y)) { + SubPre::f(z + N, z + N, p); + return; + } + Unit tmp[N]; + if (SubPre::f(tmp, z + N, p) == 0) { + memcpy(z + N, tmp, sizeof(tmp)); + } + } + static const void4u f; +}; + +template +const void4u DblAdd::f = DblAdd::func; + +// z[N * 2] <- (x[N * 2] - y[N * 2]) mod p[N] << (N * UnitBitSize) +template +struct DblSub { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + if (SubPre::f(z, x, y)) { + AddPre::f(z + N, z + N, p); + } + } + static const void4u f; +}; + +template +const void4u DblSub::f = DblSub::func; + +/* + z[N] <- montRed(xy[N * 2], p[N]) + REMARK : assume p[-1] = rp +*/ +template +struct MontRed { + static inline void func(Unit *z, const Unit *xy, const Unit *p) + { + const Unit rp = p[-1]; + Unit pq[N + 1]; + Unit buf[N * 2 + 1]; + copyC(buf + N + 1, xy + N + 1); + buf[N * 2] = 0; + Unit q = xy[0] * rp; + MulUnitPre::f(pq, p, q); + Unit up = AddPre::f(buf, xy, pq); + if (up) { + buf[N * 2] = AddUnitPre::f(buf + N + 1, N - 1, 1); + } + Unit *c = buf + 1; + for (size_t i = 1; i < N; i++) { + q = c[0] * rp; + MulUnitPre::f(pq, p, q); + up = AddPre::f(c, c, pq); + if (up) { + AddUnitPre::f(c + N + 1, N - i, 1); + } + c++; + } + if (c[N]) { + SubPre::f(z, c, p); + } else { + if (SubPre::f(z, c, p)) { + memcpy(z, c, N * sizeof(Unit)); + } + } + } + static const void3u f; +}; + +template +const void3u MontRed::f = MontRed::func; + +/* + z[N] <- Montgomery(x[N], y[N], p[N]) + REMARK : assume p[-1] = rp +*/ +template +struct Mont { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { +#if MCL_MAX_BIT_SIZE == 1024 || MCL_SIZEOF_UNIT == 4 // check speed + Unit xy[N * 2]; + MulPre::f(xy, x, y); + MontRed::f(z, xy, p); +#else + const Unit rp = p[-1]; + if (isFullBit) { + Unit buf[N * 2 + 2]; + Unit *c = buf; + MulUnitPre::f(c, x, y[0]); // x * y[0] + Unit q = c[0] * rp; + Unit t[N + 2]; + MulUnitPre::f(t, p, q); // p * q + t[N + 1] = 0; // always zero + c[N + 1] = AddPre::f(c, c, t); + c++; + for (size_t i = 1; i < N; i++) { + MulUnitPre::f(t, x, y[i]); + c[N + 1] = AddPre::f(c, c, t); + q = c[0] * rp; + MulUnitPre::f(t, p, q); + AddPre::f(c, c, t); + c++; + } + if (c[N]) { + SubPre::f(z, c, p); + } else { + if (SubPre::f(z, c, p)) { + memcpy(z, c, N * sizeof(Unit)); + } + } + } else { + /* + R = 1 << 64 + L % 64 = 63 ; not full bit + F = 1 << (L + 1) + max p = (1 << L) - 1 + x, y <= p - 1 + max x * y[0], p * q <= ((1 << L) - 1)(R - 1) + t = x * y[i] + p * q <= 2((1 << L) - 1)(R - 1) = (F - 2)(R - 1) + t >> 64 <= (F - 2)(R - 1)/R = (F - 2) - (F - 2)/R + t + (t >> 64) = (F - 2)R - (F - 2)/R < FR + */ + Unit carry; + (void)carry; + Unit buf[N * 2 + 1]; + Unit *c = buf; + MulUnitPre::f(c, x, y[0]); // x * y[0] + Unit q = c[0] * rp; + Unit t[N + 1]; + MulUnitPre::f(t, p, q); // p * q + carry = AddPre::f(c, c, t); + assert(carry == 0); + c++; + c[N] = 0; + for (size_t i = 1; i < N; i++) { + c[N + 1] = 0; + MulUnitPre::f(t, x, y[i]); + carry = AddPre::f(c, c, t); + assert(carry == 0); + q = c[0] * rp; + MulUnitPre::f(t, p, q); + carry = AddPre::f(c, c, t); + assert(carry == 0); + c++; + } + assert(c[N] == 0); + if (SubPre::f(z, c, p)) { + memcpy(z, c, N * sizeof(Unit)); + } + } +#endif + } + static const void4u f; +}; + +template +const void4u Mont::f = Mont::func; + +// z[N] <- Montgomery(x[N], x[N], p[N]) +template +struct SqrMont { + static inline void func(Unit *y, const Unit *x, const Unit *p) + { +#if MCL_MAX_BIT_SIZE == 1024 || MCL_SIZEOF_UNIT == 4 // check speed + Unit xx[N * 2]; + SqrPre::f(xx, x); + MontRed::f(y, xx, p); +#else + Mont::f(y, x, x, p); +#endif + } + static const void3u f; +}; +template +const void3u SqrMont::f = SqrMont::func; + +// z[N] <- (x[N] * y[N]) % p[N] +template +struct Mul { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + Unit xy[N * 2]; + MulPre::f(xy, x, y); + Dbl_Mod::f(z, xy, p); + } + static const void4u f; +}; +template +const void4u Mul::f = Mul::func; + +// y[N] <- (x[N] * x[N]) % p[N] +template +struct Sqr { + static inline void func(Unit *y, const Unit *x, const Unit *p) + { + Unit xx[N * 2]; + SqrPre::f(xx, x); + Dbl_Mod::f(y, xx, p); + } + static const void3u f; +}; +template +const void3u Sqr::f = Sqr::func; + +template +struct Fp2MulNF { + static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) + { + const Unit *const a = x; + const Unit *const b = x + N; + const Unit *const c = y; + const Unit *const d = y + N; + Unit d0[N * 2]; + Unit d1[N * 2]; + Unit d2[N * 2]; + Unit s[N]; + Unit t[N]; + AddPre::f(s, a, b); + AddPre::f(t, c, d); + MulPre::f(d0, s, t); + MulPre::f(d1, a, c); + MulPre::f(d2, b, d); + SubPre::f(d0, d0, d1); + SubPre::f(d0, d0, d2); + MontRed::f(z + N, d0, p); + DblSub::f(d1, d1, d2, p); + MontRed::f(z, d1, p); + } + static const void4u f; +}; +template +const void4u Fp2MulNF::f = Fp2MulNF::func; + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/src/low_func_llvm.hpp b/vendor/github.com/byzantine-lab/mcl/src/low_func_llvm.hpp new file mode 100644 index 000000000..8a44c2277 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/low_func_llvm.hpp @@ -0,0 +1,94 @@ +#pragma once + +namespace mcl { namespace fp { + +template<> +struct EnableKaratsuba { +#if MCL_SIZEOF_UNIT == 4 + static const size_t minMulN = 10; + static const size_t minSqrN = 10; +#else + static const size_t minMulN = 8; + static const size_t minSqrN = 6; +#endif +}; + +#if MCL_SIZEOF_UNIT == 4 + #define MCL_GMP_IS_FASTER_THAN_LLVM // QQQ : check later +#endif + +#ifdef MCL_GMP_IS_FASTER_THAN_LLVM +#define MCL_DEF_MUL(n, tag, suf) +#else +#define MCL_DEF_MUL(n, tag, suf) \ +template<>const void3u MulPreCore::f = &mcl_fpDbl_mulPre ## n ## suf; \ +template<>const void2u SqrPreCore::f = &mcl_fpDbl_sqrPre ## n ## suf; +#endif + +#define MCL_DEF_LLVM_FUNC2(n, tag, suf) \ +template<>const u3u AddPre::f = &mcl_fp_addPre ## n ## suf; \ +template<>const u3u SubPre::f = &mcl_fp_subPre ## n ## suf; \ +template<>const void2u Shr1::f = &mcl_fp_shr1_ ## n ## suf; \ +MCL_DEF_MUL(n, tag, suf) \ +template<>const void2uI MulUnitPre::f = &mcl_fp_mulUnitPre ## n ## suf; \ +template<>const void4u Add::f = &mcl_fp_add ## n ## suf; \ +template<>const void4u Add::f = &mcl_fp_addNF ## n ## suf; \ +template<>const void4u Sub::f = &mcl_fp_sub ## n ## suf; \ +template<>const void4u Sub::f = &mcl_fp_subNF ## n ## suf; \ +template<>const void4u Mont::f = &mcl_fp_mont ## n ## suf; \ +template<>const void4u Mont::f = &mcl_fp_montNF ## n ## suf; \ +template<>const void3u MontRed::f = &mcl_fp_montRed ## n ## suf; \ +template<>const void4u DblAdd::f = &mcl_fpDbl_add ## n ## suf; \ +template<>const void4u DblSub::f = &mcl_fpDbl_sub ## n ## suf; \ + +#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) && !defined(MCL_USE_VINT) +#define MCL_DEF_LLVM_FUNC(n) \ + MCL_DEF_LLVM_FUNC2(n, Ltag, L) \ + MCL_DEF_LLVM_FUNC2(n, LBMI2tag, Lbmi2) +#else +#define MCL_DEF_LLVM_FUNC(n) \ + MCL_DEF_LLVM_FUNC2(n, Ltag, L) +#endif + +MCL_DEF_LLVM_FUNC(1) +MCL_DEF_LLVM_FUNC(2) +MCL_DEF_LLVM_FUNC(3) +MCL_DEF_LLVM_FUNC(4) +#if MCL_MAX_UNIT_SIZE >= 6 +MCL_DEF_LLVM_FUNC(5) +MCL_DEF_LLVM_FUNC(6) +#endif +#if MCL_MAX_UNIT_SIZE >= 8 +MCL_DEF_LLVM_FUNC(7) +MCL_DEF_LLVM_FUNC(8) +#endif +#if MCL_MAX_UNIT_SIZE >= 9 +MCL_DEF_LLVM_FUNC(9) +#endif +#if MCL_MAX_UNIT_SIZE >= 10 +MCL_DEF_LLVM_FUNC(10) +#endif +#if MCL_MAX_UNIT_SIZE >= 12 +MCL_DEF_LLVM_FUNC(11) +MCL_DEF_LLVM_FUNC(12) +#endif +#if MCL_MAX_UNIT_SIZE >= 14 +MCL_DEF_LLVM_FUNC(13) +MCL_DEF_LLVM_FUNC(14) +#endif +#if MCL_MAX_UNIT_SIZE >= 16 +MCL_DEF_LLVM_FUNC(15) +#if MCL_SIZEOF_UNIT == 4 +MCL_DEF_LLVM_FUNC(16) +#else +/// QQQ : check speed +template<>const void3u MontRed<16, Ltag>::f = &mcl_fp_montRed16L; +template<>const void3u MontRed<16, LBMI2tag>::f = &mcl_fp_montRed16Lbmi2; +#endif +#endif +#if MCL_MAX_UNIT_SIZE >= 17 +MCL_DEF_LLVM_FUNC(17) +#endif + +} } // mcl::fp + diff --git a/vendor/github.com/byzantine-lab/mcl/src/proj/mcl.vcxproj b/vendor/github.com/byzantine-lab/mcl/src/proj/mcl.vcxproj new file mode 100644 index 000000000..b247982ab --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/proj/mcl.vcxproj @@ -0,0 +1,92 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {1DBB979A-C212-45CD-9563-446A96F87F71} + Win32Proj + ec_test + + + + StaticLibrary + true + v120 + MultiByte + + + StaticLibrary + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + true + .lib + $(SolutionDir)lib\ + + + false + .lib + $(SolutionDir)lib\ + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/src/proto.hpp b/vendor/github.com/byzantine-lab/mcl/src/proto.hpp new file mode 100644 index 000000000..97c331194 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/proto.hpp @@ -0,0 +1,81 @@ +#pragma once +/** + @file + @brief prototype of asm function + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include + +#define MCL_FP_DEF_FUNC_SUB(n, suf) \ +void mcl_fp_add ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_addNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_sub ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_subNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_shr1_ ## n ## suf(mcl::fp::Unit*y, const mcl::fp::Unit* x); \ +mcl::fp::Unit mcl_fp_addPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ +mcl::fp::Unit mcl_fp_subPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ +void mcl_fp_mulUnitPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, mcl::fp::Unit y); \ +void mcl_fpDbl_mulPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ +void mcl_fpDbl_sqrPre ## n ## suf(mcl::fp::Unit* y, const mcl::fp::Unit* x); \ +void mcl_fp_mont ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_montNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fp_montRed ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* p); \ +void mcl_fpDbl_add ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ +void mcl_fpDbl_sub ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); + +#define MCL_FP_DEF_FUNC(n) \ + MCL_FP_DEF_FUNC_SUB(n, L) \ + MCL_FP_DEF_FUNC_SUB(n, Lbmi2) + +#define MCL_FP_DEF_FUNC_SPECIAL(suf) \ +void mcl_fpDbl_mod_NIST_P192 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* /* dummy */); \ +void mcl_fp_mulNIST_P192 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* /* dummy */); \ +void mcl_fp_sqr_NIST_P192 ## suf(mcl::fp::Unit* y, const mcl::fp::Unit* x, const mcl::fp::Unit* /* dummy */); \ +void mcl_fpDbl_mod_NIST_P521 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* /* dummy */); + +extern "C" { + +MCL_FP_DEF_FUNC(1) +MCL_FP_DEF_FUNC(2) +MCL_FP_DEF_FUNC(3) +MCL_FP_DEF_FUNC(4) +#if MCL_MAX_UNIT_SIZE >= 6 +MCL_FP_DEF_FUNC(5) +MCL_FP_DEF_FUNC(6) +#endif +#if MCL_MAX_UNIT_SIZE >= 8 +MCL_FP_DEF_FUNC(7) +MCL_FP_DEF_FUNC(8) +#endif +#if MCL_MAX_UNIT_SIZE >= 9 +MCL_FP_DEF_FUNC(9) +#endif +#if MCL_MAX_UNIT_SIZE >= 10 +MCL_FP_DEF_FUNC(10) +#endif +#if MCL_MAX_UNIT_SIZE >= 12 +MCL_FP_DEF_FUNC(11) +MCL_FP_DEF_FUNC(12) +#endif +#if MCL_MAX_UNIT_SIZE >= 14 +MCL_FP_DEF_FUNC(13) +MCL_FP_DEF_FUNC(14) +#endif +#if MCL_MAX_UNIT_SIZE >= 16 +MCL_FP_DEF_FUNC(15) +MCL_FP_DEF_FUNC(16) +#endif +#if MCL_MAX_UNIT_SIZE >= 17 +MCL_FP_DEF_FUNC(17) +#endif + +MCL_FP_DEF_FUNC_SPECIAL(L) +MCL_FP_DEF_FUNC_SPECIAL(Lbmi2) + +} + +#undef MCL_FP_DEF_FUNC_SUB +#undef MCL_FP_DEF_FUNC + diff --git a/vendor/github.com/byzantine-lab/mcl/src/she_c256.cpp b/vendor/github.com/byzantine-lab/mcl/src/she_c256.cpp new file mode 100644 index 000000000..84873e4ca --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/she_c256.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "she_c_impl.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/src/she_c384.cpp b/vendor/github.com/byzantine-lab/mcl/src/she_c384.cpp new file mode 100644 index 000000000..bfc456a05 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/she_c384.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "she_c_impl.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/src/she_c_impl.hpp b/vendor/github.com/byzantine-lab/mcl/src/she_c_impl.hpp new file mode 100644 index 000000000..073bc2b34 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/she_c_impl.hpp @@ -0,0 +1,681 @@ +#include +#include +#include +#include +#include +#include +#include +#include "mcl/impl/bn_c_impl.hpp" +#define MCLSHE_DLL_EXPORT + +#include +#include + +using namespace mcl::she; +using namespace mcl::bn; + +static SecretKey *cast(sheSecretKey *p) { return reinterpret_cast(p); } +static const SecretKey *cast(const sheSecretKey *p) { return reinterpret_cast(p); } + +static PublicKey *cast(shePublicKey *p) { return reinterpret_cast(p); } +static const PublicKey *cast(const shePublicKey *p) { return reinterpret_cast(p); } + +static PrecomputedPublicKey *cast(shePrecomputedPublicKey *p) { return reinterpret_cast(p); } +static const PrecomputedPublicKey *cast(const shePrecomputedPublicKey *p) { return reinterpret_cast(p); } + +static CipherTextG1 *cast(sheCipherTextG1 *p) { return reinterpret_cast(p); } +static const CipherTextG1 *cast(const sheCipherTextG1 *p) { return reinterpret_cast(p); } + +static CipherTextG2 *cast(sheCipherTextG2 *p) { return reinterpret_cast(p); } +static const CipherTextG2 *cast(const sheCipherTextG2 *p) { return reinterpret_cast(p); } + +static CipherTextGT *cast(sheCipherTextGT *p) { return reinterpret_cast(p); } +static const CipherTextGT *cast(const sheCipherTextGT *p) { return reinterpret_cast(p); } + +static ZkpBin *cast(sheZkpBin *p) { return reinterpret_cast(p); } +static const ZkpBin *cast(const sheZkpBin *p) { return reinterpret_cast(p); } + +static ZkpEq *cast(sheZkpEq *p) { return reinterpret_cast(p); } +static const ZkpEq *cast(const sheZkpEq *p) { return reinterpret_cast(p); } + +static ZkpBinEq *cast(sheZkpBinEq *p) { return reinterpret_cast(p); } +static const ZkpBinEq *cast(const sheZkpBinEq *p) { return reinterpret_cast(p); } + +int sheInit(int curve, int compiledTimeVar) + try +{ + if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { + return -2; + } + mcl::CurveParam cp; + switch (curve) { + case MCL_BN254: + cp = mcl::BN254; + break; + case MCL_BN381_1: + cp = mcl::BN381_1; + break; + case MCL_BN381_2: + cp = mcl::BN381_2; + break; + case MCL_BN462: + cp = mcl::BN462; + break; + case MCL_BN_SNARK1: + cp = mcl::BN_SNARK1; + break; + case MCL_BLS12_381: + cp = mcl::BLS12_381; + break; + default: + return -1; + } + SHE::init(cp); + return 0; +} catch (std::exception&) { + return -1; +} + +mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec) +{ + return (mclSize)cast(sec)->serialize(buf, maxBufSize); +} + +mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub) +{ + return (mclSize)cast(pub)->serialize(buf, maxBufSize); +} + +mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c) +{ + return (mclSize)cast(c)->serialize(buf, maxBufSize); +} + +mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c) +{ + return (mclSize)cast(c)->serialize(buf, maxBufSize); +} + +mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c) +{ + return (mclSize)cast(c)->serialize(buf, maxBufSize); +} + +mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp) +{ + return (mclSize)cast(zkp)->serialize(buf, maxBufSize); +} + +mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp) +{ + return (mclSize)cast(zkp)->serialize(buf, maxBufSize); +} + +mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp) +{ + return (mclSize)cast(zkp)->serialize(buf, maxBufSize); +} + +mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(sec)->deserialize(buf, bufSize); +} + +mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(pub)->deserialize(buf, bufSize); +} + +mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(c)->deserialize(buf, bufSize); +} + +mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(c)->deserialize(buf, bufSize); +} + +mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(c)->deserialize(buf, bufSize); +} + +mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(zkp)->deserialize(buf, bufSize); +} + +mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(zkp)->deserialize(buf, bufSize); +} + +mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize) +{ + return (mclSize)cast(zkp)->deserialize(buf, bufSize); +} + +int sheSecretKeySetByCSPRNG(sheSecretKey *sec) +{ + cast(sec)->setByCSPRNG(); + return 0; +} + +void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec) +{ + cast(sec)->getPublicKey(*cast(pub)); +} + +static int setRangeForDLP(void (*f)(mclSize), mclSize hashSize) + try +{ + f(hashSize); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheSetRangeForDLP(mclSize hashSize) +{ + return setRangeForDLP(SHE::setRangeForDLP, hashSize); +} +int sheSetRangeForG1DLP(mclSize hashSize) +{ + return setRangeForDLP(SHE::setRangeForG1DLP, hashSize); +} +int sheSetRangeForG2DLP(mclSize hashSize) +{ + return setRangeForDLP(SHE::setRangeForG2DLP, hashSize); +} +int sheSetRangeForGTDLP(mclSize hashSize) +{ + return setRangeForDLP(SHE::setRangeForGTDLP, hashSize); +} + +void sheSetTryNum(mclSize tryNum) +{ + SHE::setTryNum(tryNum); +} +void sheUseDecG1ViaGT(int use) +{ + SHE::useDecG1ViaGT(use != 0); +} +void sheUseDecG2ViaGT(int use) +{ + SHE::useDecG2ViaGT(use != 0); +} + +template +mclSize loadTable(HashTable& table, const void *buf, mclSize bufSize) + try +{ + return table.load(buf, bufSize); +} catch (std::exception&) { + return 0; +} + +mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize) +{ + return loadTable(getHashTableG1(), buf, bufSize); +} +mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize) +{ + return loadTable(getHashTableG2(), buf, bufSize); +} +mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize) +{ + return loadTable(getHashTableGT(), buf, bufSize); +} + +template +mclSize saveTable(void *buf, mclSize maxBufSize, const HashTable& table) + try +{ + return table.save(buf, maxBufSize); +} catch (std::exception&) { + return 0; +} +mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize) +{ + return saveTable(buf, maxBufSize, SHE::PhashTbl_); +} +mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize) +{ + return saveTable(buf, maxBufSize, SHE::QhashTbl_); +} +mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize) +{ + return saveTable(buf, maxBufSize, SHE::ePQhashTbl_); +} + +template +int encT(CT *c, const shePublicKey *pub, mclInt m) + try +{ + cast(pub)->enc(*cast(c), m); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m) +{ + return encT(c, pub, m); +} + +int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m) +{ + return encT(c, pub, m); +} + +int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m) +{ + return encT(c, pub, m); +} + +template +int encWithZkpBinT(CT *c, sheZkpBin *zkp, const PK *pub, int m) + try +{ + cast(pub)->encWithZkpBin(*cast(c), *cast(zkp), m); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m) +{ + return encWithZkpBinT(c, zkp, pub, m); +} + +int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m) +{ + return encWithZkpBinT(c, zkp, pub, m); +} + +int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *pub, int m) +{ + return encWithZkpBinT(c, zkp, pub, m); +} + +int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *pub, int m) +{ + return encWithZkpBinT(c, zkp, pub, m); +} + +template +int encWithZkpEqT(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const PK *pub, mclInt m) + try +{ + cast(pub)->encWithZkpEq(*cast(c1), *cast(c2), *cast(zkp), m); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m) +{ + return encWithZkpEqT(c1, c2, zkp, pub, m); +} + +int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m) +{ + return encWithZkpEqT(c1, c2, zkp, ppub, m); +} + +template +int encWithZkpBinEqT(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const PK *pub, int m) + try +{ + cast(pub)->encWithZkpBinEq(*cast(c1), *cast(c2), *cast(zkp), m); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m) +{ + return encWithZkpBinEqT(c1, c2, zkp, pub, m); +} + +int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m) +{ + return encWithZkpBinEqT(c1, c2, zkp, ppub, m); +} + +template +int decT(mclInt *m, const sheSecretKey *sec, const CT *c) + try +{ + *m = (cast(sec)->dec)(*cast(c)); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c) +{ + return decT(m, sec, c); +} + +int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c) +{ + return decT(m, sec, c); +} + +int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c) +{ + return decT(m, sec, c); +} + +template +int decViaGTT(mclInt *m, const sheSecretKey *sec, const CT *c) + try +{ + *m = (cast(sec)->decViaGT)(*cast(c)); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c) +{ + return decViaGTT(m, sec, c); +} + +int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c) +{ + return decViaGTT(m, sec, c); +} + + +template +int isZeroT(const sheSecretKey *sec, const CT *c) + try +{ + return cast(sec)->isZero(*cast(c)); +} catch (std::exception&) { + return 0; +} + +int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c) +{ + return isZeroT(sec, c); +} +int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c) +{ + return isZeroT(sec, c); +} +int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c) +{ + return isZeroT(sec, c); +} + +template +int negT(CT& y, const CT& x) + try +{ + CT::neg(y, x); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x) +{ + return negT(*cast(y), *cast(x)); +} + +int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x) +{ + return negT(*cast(y), *cast(x)); +} + +int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x) +{ + return negT(*cast(y), *cast(x)); +} + +template +int addT(CT& z, const CT& x, const CT& y) + try +{ + CT::add(z, x, y); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y) +{ + return addT(*cast(z), *cast(x), *cast(y)); +} + +int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y) +{ + return addT(*cast(z), *cast(x), *cast(y)); +} + +int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y) +{ + return addT(*cast(z), *cast(x), *cast(y)); +} + +template +int subT(CT& z, const CT& x, const CT& y) + try +{ + CT::sub(z, x, y); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y) +{ + return subT(*cast(z), *cast(x), *cast(y)); +} + +int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y) +{ + return subT(*cast(z), *cast(x), *cast(y)); +} + +int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y) +{ + return subT(*cast(z), *cast(x), *cast(y)); +} + +template +int mulT(CT1& z, const CT2& x, const CT3& y) + try +{ + CT1::mul(z, x, y); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y) +{ + return mulT(*cast(z), *cast(x), y); +} + +int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y) +{ + return mulT(*cast(z), *cast(x), y); +} + +int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y) +{ + return mulT(*cast(z), *cast(x), y); +} + +int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y) +{ + return mulT(*cast(z), *cast(x), *cast(y)); +} + +int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y) + try +{ + CipherTextGT::mulML(*cast(z), *cast(x), *cast(y)); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x) + try +{ + CipherTextGT::finalExp(*cast(y), *cast(x)); + return 0; +} catch (std::exception&) { + return -1; +} + +template +int reRandT(CT& c, const shePublicKey *pub) + try +{ + cast(pub)->reRand(c); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub) +{ + return reRandT(*cast(c), pub); +} + +int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub) +{ + return reRandT(*cast(c), pub); +} + +int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub) +{ + return reRandT(*cast(c), pub); +} + +template +int convert(sheCipherTextGT *y, const shePublicKey *pub, const CT *x) + try +{ + cast(pub)->convert(*cast(y), *cast(x)); + return 0; +} catch (std::exception&) { + return -1; +} + +int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x) +{ + return convert(y, pub, x); +} + +int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x) +{ + return convert(y, pub, x); +} + +shePrecomputedPublicKey *shePrecomputedPublicKeyCreate() + try +{ + return reinterpret_cast(new PrecomputedPublicKey()); +} catch (...) { + return 0; +} + +void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub) +{ + delete cast(ppub); +} + +int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub) + try +{ + cast(ppub)->init(*cast(pub)); + return 0; +} catch (...) { + return 1; +} + +template +int pEncT(CT *c, const shePrecomputedPublicKey *pub, mclInt m) + try +{ + cast(pub)->enc(*cast(c), m); + return 0; +} catch (std::exception&) { + return -1; +} + +int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *pub, mclInt m) +{ + return pEncT(c, pub, m); +} + +int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *pub, mclInt m) +{ + return pEncT(c, pub, m); +} + +int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *pub, mclInt m) +{ + return pEncT(c, pub, m); +} + +template +int verifyT(const PK& pub, const CT& c, const ZkpBin& zkp) + try +{ + return pub.verify(c, zkp); +} catch (std::exception&) { + return 0; +} + +int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp) +{ + return verifyT(*cast(pub), *cast(c), *cast(zkp)); +} +int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp) +{ + return verifyT(*cast(pub), *cast(c), *cast(zkp)); +} +int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp) +{ + return verifyT(*cast(pub), *cast(c), *cast(zkp)); +} +int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp) +{ + return verifyT(*cast(pub), *cast(c), *cast(zkp)); +} + +template +int verifyT(const PK& pub, const CipherTextG1& c1, const CipherTextG2& c2, const Zkp& zkp) + try +{ + return pub.verify(c1, c2, zkp); +} catch (std::exception&) { + return 0; +} + +int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp) +{ + return verifyT(*cast(pub), *cast(c1), *cast(c2), *cast(zkp)); +} +int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp) +{ + return verifyT(*cast(pub), *cast(c1), *cast(c2), *cast(zkp)); +} +int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp) +{ + return verifyT(*cast(ppub), *cast(c1), *cast(c2), *cast(zkp)); +} +int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp) +{ + return verifyT(*cast(ppub), *cast(c1), *cast(c2), *cast(zkp)); +} + diff --git a/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak.h b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak.h new file mode 100644 index 000000000..bcfeb34bf --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak.h @@ -0,0 +1,2611 @@ +#pragma once +#ifndef XBYAK_XBYAK_H_ +#define XBYAK_XBYAK_H_ +/*! + @file xbyak.h + @brief Xbyak ; JIT assembler for x86(IA32)/x64 by C++ + @author herumi + @url https://github.com/herumi/xbyak + @note modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifndef XBYAK_NO_OP_NAMES + #if not +0 // trick to detect whether 'not' is operator or not + #error "use -fno-operator-names option if you want to use and(), or(), xor(), not() as function names, Or define XBYAK_NO_OP_NAMES and use and_(), or_(), xor_(), not_()." + #endif +#endif + +#include // for debug print +#include +#include +#include +#include +#ifndef NDEBUG +#include +#endif + +// #define XBYAK_DISABLE_AVX512 + +//#define XBYAK_USE_MMAP_ALLOCATOR +#if !defined(__GNUC__) || defined(__MINGW32__) + #undef XBYAK_USE_MMAP_ALLOCATOR +#endif + +#ifdef __GNUC__ + #define XBYAK_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor)) +#else + #define XBYAK_GNUC_PREREQ(major, minor) 0 +#endif + +// This covers -std=(gnu|c)++(0x|11|1y), -stdlib=libc++, and modern Microsoft. +#if ((defined(_MSC_VER) && (_MSC_VER >= 1600)) || defined(_LIBCPP_VERSION) ||\ + ((__cplusplus >= 201103) || defined(__GXX_EXPERIMENTAL_CXX0X__))) + #include + #define XBYAK_STD_UNORDERED_SET std::unordered_set + #include + #define XBYAK_STD_UNORDERED_MAP std::unordered_map + #define XBYAK_STD_UNORDERED_MULTIMAP std::unordered_multimap + +/* + Clang/llvm-gcc and ICC-EDG in 'GCC-mode' always claim to be GCC 4.2, using + libstdcxx 20070719 (from GCC 4.2.1, the last GPL 2 version). +*/ +#elif XBYAK_GNUC_PREREQ(4, 5) || (XBYAK_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || defined(__llvm__) + #include + #define XBYAK_STD_UNORDERED_SET std::tr1::unordered_set + #include + #define XBYAK_STD_UNORDERED_MAP std::tr1::unordered_map + #define XBYAK_STD_UNORDERED_MULTIMAP std::tr1::unordered_multimap + +#elif defined(_MSC_VER) && (_MSC_VER >= 1500) && (_MSC_VER < 1600) + #include + #define XBYAK_STD_UNORDERED_SET std::tr1::unordered_set + #include + #define XBYAK_STD_UNORDERED_MAP std::tr1::unordered_map + #define XBYAK_STD_UNORDERED_MULTIMAP std::tr1::unordered_multimap + +#else + #include + #define XBYAK_STD_UNORDERED_SET std::set + #include + #define XBYAK_STD_UNORDERED_MAP std::map + #define XBYAK_STD_UNORDERED_MULTIMAP std::multimap +#endif +#ifdef _WIN32 + #include + #include + #include +#elif defined(__GNUC__) + #include + #include + #include +#endif +#if !defined(_MSC_VER) || (_MSC_VER >= 1600) + #include +#endif + +#if defined(_WIN64) || defined(__MINGW64__) || (defined(__CYGWIN__) && defined(__x86_64__)) + #define XBYAK64_WIN +#elif defined(__x86_64__) + #define XBYAK64_GCC +#endif +#if !defined(XBYAK64) && !defined(XBYAK32) + #if defined(XBYAK64_GCC) || defined(XBYAK64_WIN) + #define XBYAK64 + #else + #define XBYAK32 + #endif +#endif + +#if (__cplusplus >= 201103) || (_MSC_VER >= 1800) + #define XBYAK_VARIADIC_TEMPLATE +#endif + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4514) /* remove inline function */ + #pragma warning(disable : 4786) /* identifier is too long */ + #pragma warning(disable : 4503) /* name is too long */ + #pragma warning(disable : 4127) /* constant expresison */ +#endif + +namespace Xbyak { + +enum { + DEFAULT_MAX_CODE_SIZE = 4096, + VERSION = 0x5751 /* 0xABCD = A.BC(D) */ +}; + +#ifndef MIE_INTEGER_TYPE_DEFINED +#define MIE_INTEGER_TYPE_DEFINED +#ifdef _MSC_VER + typedef unsigned __int64 uint64; + typedef __int64 sint64; +#else + typedef uint64_t uint64; + typedef int64_t sint64; +#endif +typedef unsigned int uint32; +typedef unsigned short uint16; +typedef unsigned char uint8; +#endif + +#ifndef MIE_ALIGN + #ifdef _MSC_VER + #define MIE_ALIGN(x) __declspec(align(x)) + #else + #define MIE_ALIGN(x) __attribute__((aligned(x))) + #endif +#endif +#ifndef MIE_PACK // for shufps + #define MIE_PACK(x, y, z, w) ((x) * 64 + (y) * 16 + (z) * 4 + (w)) +#endif + +enum { + ERR_NONE = 0, + ERR_BAD_ADDRESSING, + ERR_CODE_IS_TOO_BIG, + ERR_BAD_SCALE, + ERR_ESP_CANT_BE_INDEX, + ERR_BAD_COMBINATION, + ERR_BAD_SIZE_OF_REGISTER, + ERR_IMM_IS_TOO_BIG, + ERR_BAD_ALIGN, + ERR_LABEL_IS_REDEFINED, + ERR_LABEL_IS_TOO_FAR, + ERR_LABEL_IS_NOT_FOUND, + ERR_CODE_ISNOT_COPYABLE, + ERR_BAD_PARAMETER, + ERR_CANT_PROTECT, + ERR_CANT_USE_64BIT_DISP, + ERR_OFFSET_IS_TOO_BIG, + ERR_MEM_SIZE_IS_NOT_SPECIFIED, + ERR_BAD_MEM_SIZE, + ERR_BAD_ST_COMBINATION, + ERR_OVER_LOCAL_LABEL, // not used + ERR_UNDER_LOCAL_LABEL, + ERR_CANT_ALLOC, + ERR_ONLY_T_NEAR_IS_SUPPORTED_IN_AUTO_GROW, + ERR_BAD_PROTECT_MODE, + ERR_BAD_PNUM, + ERR_BAD_TNUM, + ERR_BAD_VSIB_ADDRESSING, + ERR_CANT_CONVERT, + ERR_LABEL_ISNOT_SET_BY_L, + ERR_LABEL_IS_ALREADY_SET_BY_L, + ERR_BAD_LABEL_STR, + ERR_MUNMAP, + ERR_OPMASK_IS_ALREADY_SET, + ERR_ROUNDING_IS_ALREADY_SET, + ERR_K0_IS_INVALID, + ERR_EVEX_IS_INVALID, + ERR_SAE_IS_INVALID, + ERR_ER_IS_INVALID, + ERR_INVALID_BROADCAST, + ERR_INVALID_OPMASK_WITH_MEMORY, + ERR_INVALID_ZERO, + ERR_INVALID_RIP_IN_AUTO_GROW, + ERR_INVALID_MIB_ADDRESS, + ERR_INTERNAL +}; + +class Error : public std::exception { + int err_; +public: + explicit Error(int err) : err_(err) + { + if (err_ < 0 || err_ > ERR_INTERNAL) { + fprintf(stderr, "bad err=%d in Xbyak::Error\n", err_); + exit(1); + } + } + operator int() const { return err_; } + const char *what() const throw() + { + static const char *errTbl[] = { + "none", + "bad addressing", + "code is too big", + "bad scale", + "esp can't be index", + "bad combination", + "bad size of register", + "imm is too big", + "bad align", + "label is redefined", + "label is too far", + "label is not found", + "code is not copyable", + "bad parameter", + "can't protect", + "can't use 64bit disp(use (void*))", + "offset is too big", + "MEM size is not specified", + "bad mem size", + "bad st combination", + "over local label", + "under local label", + "can't alloc", + "T_SHORT is not supported in AutoGrow", + "bad protect mode", + "bad pNum", + "bad tNum", + "bad vsib addressing", + "can't convert", + "label is not set by L()", + "label is already set by L()", + "bad label string", + "err munmap", + "opmask is already set", + "rounding is already set", + "k0 is invalid", + "evex is invalid", + "sae(suppress all exceptions) is invalid", + "er(embedded rounding) is invalid", + "invalid broadcast", + "invalid opmask with memory", + "invalid zero", + "invalid rip in AutoGrow", + "invalid mib address", + "internal error", + }; + assert((size_t)err_ < sizeof(errTbl) / sizeof(*errTbl)); + return errTbl[err_]; + } +}; + +inline const char *ConvertErrorToString(const Error& err) +{ + return err.what(); +} + +inline void *AlignedMalloc(size_t size, size_t alignment) +{ +#ifdef __MINGW32__ + return __mingw_aligned_malloc(size, alignment); +#elif defined(_WIN32) + return _aligned_malloc(size, alignment); +#else + void *p; + int ret = posix_memalign(&p, alignment, size); + return (ret == 0) ? p : 0; +#endif +} + +inline void AlignedFree(void *p) +{ +#ifdef __MINGW32__ + __mingw_aligned_free(p); +#elif defined(_MSC_VER) + _aligned_free(p); +#else + free(p); +#endif +} + +template +inline const To CastTo(From p) throw() +{ + return (const To)(size_t)(p); +} +namespace inner { + +static const size_t ALIGN_PAGE_SIZE = 4096; + +inline bool IsInDisp8(uint32 x) { return 0xFFFFFF80 <= x || x <= 0x7F; } +inline bool IsInInt32(uint64 x) { return ~uint64(0x7fffffffu) <= x || x <= 0x7FFFFFFFU; } + +inline uint32 VerifyInInt32(uint64 x) +{ +#ifdef XBYAK64 + if (!IsInInt32(x)) throw Error(ERR_OFFSET_IS_TOO_BIG); +#endif + return static_cast(x); +} + +enum LabelMode { + LasIs, // as is + Labs, // absolute + LaddTop // (addr + top) for mov(reg, label) with AutoGrow +}; + +} // inner + +/* + custom allocator +*/ +struct Allocator { + virtual uint8 *alloc(size_t size) { return reinterpret_cast(AlignedMalloc(size, inner::ALIGN_PAGE_SIZE)); } + virtual void free(uint8 *p) { AlignedFree(p); } + virtual ~Allocator() {} + /* override to return false if you call protect() manually */ + virtual bool useProtect() const { return true; } +}; + +#ifdef XBYAK_USE_MMAP_ALLOCATOR +class MmapAllocator : Allocator { + typedef XBYAK_STD_UNORDERED_MAP SizeList; + SizeList sizeList_; +public: + uint8 *alloc(size_t size) + { + const size_t alignedSizeM1 = inner::ALIGN_PAGE_SIZE - 1; + size = (size + alignedSizeM1) & ~alignedSizeM1; +#ifdef MAP_ANONYMOUS + const int mode = MAP_PRIVATE | MAP_ANONYMOUS; +#elif defined(MAP_ANON) + const int mode = MAP_PRIVATE | MAP_ANON; +#else + #error "not supported" +#endif + void *p = mmap(NULL, size, PROT_READ | PROT_WRITE, mode, -1, 0); + if (p == MAP_FAILED) throw Error(ERR_CANT_ALLOC); + assert(p); + sizeList_[(uintptr_t)p] = size; + return (uint8*)p; + } + void free(uint8 *p) + { + if (p == 0) return; + SizeList::iterator i = sizeList_.find((uintptr_t)p); + if (i == sizeList_.end()) throw Error(ERR_BAD_PARAMETER); + if (munmap((void*)i->first, i->second) < 0) throw Error(ERR_MUNMAP); + sizeList_.erase(i); + } +}; +#endif + +class Address; +class Reg; + +class Operand { + static const uint8 EXT8BIT = 0x20; + unsigned int idx_:6; // 0..31 + EXT8BIT = 1 if spl/bpl/sil/dil + unsigned int kind_:9; + unsigned int bit_:10; +protected: + unsigned int zero_:1; + unsigned int mask_:3; + unsigned int rounding_:3; + void setIdx(int idx) { idx_ = idx; } +public: + enum Kind { + NONE = 0, + MEM = 1 << 0, + REG = 1 << 1, + MMX = 1 << 2, + FPU = 1 << 3, + XMM = 1 << 4, + YMM = 1 << 5, + ZMM = 1 << 6, + OPMASK = 1 << 7, + BNDREG = 1 << 8 + }; + enum Code { +#ifdef XBYAK64 + RAX = 0, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, + R8D = 8, R9D, R10D, R11D, R12D, R13D, R14D, R15D, + R8W = 8, R9W, R10W, R11W, R12W, R13W, R14W, R15W, + R8B = 8, R9B, R10B, R11B, R12B, R13B, R14B, R15B, + SPL = 4, BPL, SIL, DIL, +#endif + EAX = 0, ECX, EDX, EBX, ESP, EBP, ESI, EDI, + AX = 0, CX, DX, BX, SP, BP, SI, DI, + AL = 0, CL, DL, BL, AH, CH, DH, BH + }; + Operand() : idx_(0), kind_(0), bit_(0), zero_(0), mask_(0), rounding_(0) { } + Operand(int idx, Kind kind, int bit, bool ext8bit = 0) + : idx_(static_cast(idx | (ext8bit ? EXT8BIT : 0))) + , kind_(kind) + , bit_(bit) + , zero_(0), mask_(0), rounding_(0) + { + assert((bit_ & (bit_ - 1)) == 0); // bit must be power of two + } + Kind getKind() const { return static_cast(kind_); } + int getIdx() const { return idx_ & (EXT8BIT - 1); } + bool isNone() const { return kind_ == 0; } + bool isMMX() const { return is(MMX); } + bool isXMM() const { return is(XMM); } + bool isYMM() const { return is(YMM); } + bool isZMM() const { return is(ZMM); } + bool isXMEM() const { return is(XMM | MEM); } + bool isYMEM() const { return is(YMM | MEM); } + bool isZMEM() const { return is(ZMM | MEM); } + bool isOPMASK() const { return is(OPMASK); } + bool isBNDREG() const { return is(BNDREG); } + bool isREG(int bit = 0) const { return is(REG, bit); } + bool isMEM(int bit = 0) const { return is(MEM, bit); } + bool isFPU() const { return is(FPU); } + bool isExt8bit() const { return (idx_ & EXT8BIT) != 0; } + bool isExtIdx() const { return (getIdx() & 8) != 0; } + bool isExtIdx2() const { return (getIdx() & 16) != 0; } + bool hasEvex() const { return isZMM() || isExtIdx2() || getOpmaskIdx() || getRounding(); } + bool hasRex() const { return isExt8bit() || isREG(64) || isExtIdx(); } + bool hasZero() const { return zero_; } + int getOpmaskIdx() const { return mask_; } + int getRounding() const { return rounding_; } + void setKind(Kind kind) + { + if ((kind & (XMM|YMM|ZMM)) == 0) return; + kind_ = kind; + bit_ = kind == XMM ? 128 : kind == YMM ? 256 : 512; + } + void setBit(int bit) { bit_ = bit; } + void setOpmaskIdx(int idx, bool ignore_idx0 = false) + { + if (!ignore_idx0 && idx == 0) throw Error(ERR_K0_IS_INVALID); + if (mask_) throw Error(ERR_OPMASK_IS_ALREADY_SET); + mask_ = idx; + } + void setRounding(int idx) + { + if (rounding_) throw Error(ERR_ROUNDING_IS_ALREADY_SET); + rounding_ = idx; + } + void setZero() { zero_ = true; } + // ah, ch, dh, bh? + bool isHigh8bit() const + { + if (!isBit(8)) return false; + if (isExt8bit()) return false; + const int idx = getIdx(); + return AH <= idx && idx <= BH; + } + // any bit is accetable if bit == 0 + bool is(int kind, uint32 bit = 0) const + { + return (kind == 0 || (kind_ & kind)) && (bit == 0 || (bit_ & bit)); // cf. you can set (8|16) + } + bool isBit(uint32 bit) const { return (bit_ & bit) != 0; } + uint32 getBit() const { return bit_; } + const char *toString() const + { + const int idx = getIdx(); + if (kind_ == REG) { + if (isExt8bit()) { + static const char *tbl[4] = { "spl", "bpl", "sil", "dil" }; + return tbl[idx - 4]; + } + static const char *tbl[4][16] = { + { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, + { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" }, + { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" }, + { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }, + }; + return tbl[bit_ == 8 ? 0 : bit_ == 16 ? 1 : bit_ == 32 ? 2 : 3][idx]; + } else if (isOPMASK()) { + static const char *tbl[8] = { "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }; + return tbl[idx]; + } else if (isZMM()) { + static const char *tbl[32] = { + "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", "zmm10", "zmm11", "zmm12", "zmm13", "zmm14", "zmm15", + "zmm16", "zmm17", "zmm18", "zmm19", "zmm20", "zmm21", "zmm22", "zmm23", "zmm24", "zmm25", "zmm26", "zmm27", "zmm28", "zmm29", "zmm30", "zmm31" + }; + return tbl[idx]; + } else if (isYMM()) { + static const char *tbl[32] = { + "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15", + "ymm16", "ymm17", "ymm18", "ymm19", "ymm20", "ymm21", "ymm22", "ymm23", "ymm24", "ymm25", "ymm26", "ymm27", "ymm28", "ymm29", "ymm30", "ymm31" + }; + return tbl[idx]; + } else if (isXMM()) { + static const char *tbl[32] = { + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", + "xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23", "xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31" + }; + return tbl[idx]; + } else if (isMMX()) { + static const char *tbl[8] = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }; + return tbl[idx]; + } else if (isFPU()) { + static const char *tbl[8] = { "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7" }; + return tbl[idx]; + } else if (isBNDREG()) { + static const char *tbl[4] = { "bnd0", "bnd1", "bnd2", "bnd3" }; + return tbl[idx]; + } + throw Error(ERR_INTERNAL); + } + bool isEqualIfNotInherited(const Operand& rhs) const { return idx_ == rhs.idx_ && kind_ == rhs.kind_ && bit_ == rhs.bit_ && zero_ == rhs.zero_ && mask_ == rhs.mask_ && rounding_ == rhs.rounding_; } + bool operator==(const Operand& rhs) const; + bool operator!=(const Operand& rhs) const { return !operator==(rhs); } + const Address& getAddress() const; + const Reg& getReg() const; +}; + +class Label; + +struct Reg8; +struct Reg16; +struct Reg32; +#ifdef XBYAK64 +struct Reg64; +#endif +class Reg : public Operand { +public: + Reg() { } + Reg(int idx, Kind kind, int bit = 0, bool ext8bit = false) : Operand(idx, kind, bit, ext8bit) { } + Reg changeBit(int bit) const { return Reg(getIdx(), getKind(), bit, isExt8bit()); } + uint8 getRexW() const { return isREG(64) ? 8 : 0; } + uint8 getRexR() const { return isExtIdx() ? 4 : 0; } + uint8 getRexX() const { return isExtIdx() ? 2 : 0; } + uint8 getRexB() const { return isExtIdx() ? 1 : 0; } + uint8 getRex(const Reg& base = Reg()) const + { + uint8 rex = getRexW() | getRexR() | base.getRexW() | base.getRexB(); + if (rex || isExt8bit() || base.isExt8bit()) rex |= 0x40; + return rex; + } + Reg8 cvt8() const; + Reg16 cvt16() const; + Reg32 cvt32() const; +#ifdef XBYAK64 + Reg64 cvt64() const; +#endif +}; + +inline const Reg& Operand::getReg() const +{ + assert(!isMEM()); + return static_cast(*this); +} + +struct Reg8 : public Reg { + explicit Reg8(int idx = 0, bool ext8bit = false) : Reg(idx, Operand::REG, 8, ext8bit) { } +}; + +struct Reg16 : public Reg { + explicit Reg16(int idx = 0) : Reg(idx, Operand::REG, 16) { } +}; + +struct Mmx : public Reg { + explicit Mmx(int idx = 0, Kind kind = Operand::MMX, int bit = 64) : Reg(idx, kind, bit) { } +}; + +struct EvexModifierRounding { + enum { + T_RN_SAE = 1, + T_RD_SAE = 2, + T_RU_SAE = 3, + T_RZ_SAE = 4, + T_SAE = 5 + }; + explicit EvexModifierRounding(int rounding) : rounding(rounding) {} + int rounding; +}; +struct EvexModifierZero{EvexModifierZero() {}}; + +struct Xmm : public Mmx { + explicit Xmm(int idx = 0, Kind kind = Operand::XMM, int bit = 128) : Mmx(idx, kind, bit) { } + Xmm(Kind kind, int idx) : Mmx(idx, kind, kind == XMM ? 128 : kind == YMM ? 256 : 512) { } + Xmm operator|(const EvexModifierRounding& emr) const { Xmm r(*this); r.setRounding(emr.rounding); return r; } + Xmm copyAndSetIdx(int idx) const { Xmm ret(*this); ret.setIdx(idx); return ret; } + Xmm copyAndSetKind(Operand::Kind kind) const { Xmm ret(*this); ret.setKind(kind); return ret; } +}; + +struct Ymm : public Xmm { + explicit Ymm(int idx = 0, Kind kind = Operand::YMM, int bit = 256) : Xmm(idx, kind, bit) { } + Ymm operator|(const EvexModifierRounding& emr) const { Ymm r(*this); r.setRounding(emr.rounding); return r; } +}; + +struct Zmm : public Ymm { + explicit Zmm(int idx = 0) : Ymm(idx, Operand::ZMM, 512) { } + Zmm operator|(const EvexModifierRounding& emr) const { Zmm r(*this); r.setRounding(emr.rounding); return r; } +}; + +struct Opmask : public Reg { + explicit Opmask(int idx = 0) : Reg(idx, Operand::OPMASK, 64) {} +}; + +struct BoundsReg : public Reg { + explicit BoundsReg(int idx = 0) : Reg(idx, Operand::BNDREG, 128) {} +}; + +templateT operator|(const T& x, const Opmask& k) { T r(x); r.setOpmaskIdx(k.getIdx()); return r; } +templateT operator|(const T& x, const EvexModifierZero&) { T r(x); r.setZero(); return r; } +templateT operator|(const T& x, const EvexModifierRounding& emr) { T r(x); r.setRounding(emr.rounding); return r; } + +struct Fpu : public Reg { + explicit Fpu(int idx = 0) : Reg(idx, Operand::FPU, 32) { } +}; + +struct Reg32e : public Reg { + explicit Reg32e(int idx, int bit) : Reg(idx, Operand::REG, bit) {} +}; +struct Reg32 : public Reg32e { + explicit Reg32(int idx = 0) : Reg32e(idx, 32) {} +}; +#ifdef XBYAK64 +struct Reg64 : public Reg32e { + explicit Reg64(int idx = 0) : Reg32e(idx, 64) {} +}; +struct RegRip { + sint64 disp_; + const Label* label_; + bool isAddr_; + explicit RegRip(sint64 disp = 0, const Label* label = 0, bool isAddr = false) : disp_(disp), label_(label), isAddr_(isAddr) {} + friend const RegRip operator+(const RegRip& r, int disp) { + return RegRip(r.disp_ + disp, r.label_, r.isAddr_); + } + friend const RegRip operator-(const RegRip& r, int disp) { + return RegRip(r.disp_ - disp, r.label_, r.isAddr_); + } + friend const RegRip operator+(const RegRip& r, sint64 disp) { + return RegRip(r.disp_ + disp, r.label_, r.isAddr_); + } + friend const RegRip operator-(const RegRip& r, sint64 disp) { + return RegRip(r.disp_ - disp, r.label_, r.isAddr_); + } + friend const RegRip operator+(const RegRip& r, const Label& label) { + if (r.label_ || r.isAddr_) throw Error(ERR_BAD_ADDRESSING); + return RegRip(r.disp_, &label); + } + friend const RegRip operator+(const RegRip& r, const void *addr) { + if (r.label_ || r.isAddr_) throw Error(ERR_BAD_ADDRESSING); + return RegRip(r.disp_ + (sint64)addr, 0, true); + } +}; +#endif + +inline Reg8 Reg::cvt8() const +{ + const int idx = getIdx(); + if (isBit(8)) return Reg8(idx, isExt8bit()); +#ifdef XBYAK32 + if (idx >= 4) throw Error(ERR_CANT_CONVERT); +#endif + return Reg8(idx, 4 <= idx && idx < 8); +} + +inline Reg16 Reg::cvt16() const +{ + const int idx = getIdx(); + if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); + return Reg16(idx); +} + +inline Reg32 Reg::cvt32() const +{ + const int idx = getIdx(); + if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); + return Reg32(idx); +} + +#ifdef XBYAK64 +inline Reg64 Reg::cvt64() const +{ + const int idx = getIdx(); + if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); + return Reg64(idx); +} +#endif + +#ifndef XBYAK_DISABLE_SEGMENT +// not derived from Reg +class Segment { + int idx_; +public: + enum { + es, cs, ss, ds, fs, gs + }; + explicit Segment(int idx) : idx_(idx) { assert(0 <= idx_ && idx_ < 6); } + int getIdx() const { return idx_; } + const char *toString() const + { + static const char tbl[][3] = { + "es", "cs", "ss", "ds", "fs", "gs" + }; + return tbl[idx_]; + } +}; +#endif + +class RegExp { +public: +#ifdef XBYAK64 + enum { i32e = 32 | 64 }; +#else + enum { i32e = 32 }; +#endif + RegExp(size_t disp = 0) : scale_(0), disp_(disp) { } + RegExp(const Reg& r, int scale = 1) + : scale_(scale) + , disp_(0) + { + if (!r.isREG(i32e) && !r.is(Reg::XMM|Reg::YMM|Reg::ZMM)) throw Error(ERR_BAD_SIZE_OF_REGISTER); + if (scale == 0) return; + if (scale != 1 && scale != 2 && scale != 4 && scale != 8) throw Error(ERR_BAD_SCALE); + if (r.getBit() >= 128 || scale != 1) { // xmm/ymm is always index + index_ = r; + } else { + base_ = r; + } + } + bool isVsib(int bit = 128 | 256 | 512) const { return index_.isBit(bit); } + RegExp optimize() const + { + RegExp exp = *this; + // [reg * 2] => [reg + reg] + if (index_.isBit(i32e) && !base_.getBit() && scale_ == 2) { + exp.base_ = index_; + exp.scale_ = 1; + } + return exp; + } + bool operator==(const RegExp& rhs) const + { + return base_ == rhs.base_ && index_ == rhs.index_ && disp_ == rhs.disp_ && scale_ == rhs.scale_; + } + const Reg& getBase() const { return base_; } + const Reg& getIndex() const { return index_; } + int getScale() const { return scale_; } + size_t getDisp() const { return disp_; } + void verify() const + { + if (base_.getBit() >= 128) throw Error(ERR_BAD_SIZE_OF_REGISTER); + if (index_.getBit() && index_.getBit() <= 64) { + if (index_.getIdx() == Operand::ESP) throw Error(ERR_ESP_CANT_BE_INDEX); + if (base_.getBit() && base_.getBit() != index_.getBit()) throw Error(ERR_BAD_SIZE_OF_REGISTER); + } + } + friend RegExp operator+(const RegExp& a, const RegExp& b); + friend RegExp operator-(const RegExp& e, size_t disp); + uint8 getRex() const + { + uint8 rex = index_.getRexX() | base_.getRexB(); + return rex ? uint8(rex | 0x40) : 0; + } +private: + /* + [base_ + index_ * scale_ + disp_] + base : Reg32e, index : Reg32e(w/o esp), Xmm, Ymm + */ + Reg base_; + Reg index_; + int scale_; + size_t disp_; +}; + +inline RegExp operator+(const RegExp& a, const RegExp& b) +{ + if (a.index_.getBit() && b.index_.getBit()) throw Error(ERR_BAD_ADDRESSING); + RegExp ret = a; + if (!ret.index_.getBit()) { ret.index_ = b.index_; ret.scale_ = b.scale_; } + if (b.base_.getBit()) { + if (ret.base_.getBit()) { + if (ret.index_.getBit()) throw Error(ERR_BAD_ADDRESSING); + // base + base => base + index * 1 + ret.index_ = b.base_; + // [reg + esp] => [esp + reg] + if (ret.index_.getIdx() == Operand::ESP) std::swap(ret.base_, ret.index_); + ret.scale_ = 1; + } else { + ret.base_ = b.base_; + } + } + ret.disp_ += b.disp_; + return ret; +} +inline RegExp operator*(const Reg& r, int scale) +{ + return RegExp(r, scale); +} +inline RegExp operator-(const RegExp& e, size_t disp) +{ + RegExp ret = e; + ret.disp_ -= disp; + return ret; +} + +// 2nd parameter for constructor of CodeArray(maxSize, userPtr, alloc) +void *const AutoGrow = (void*)1; //-V566 +void *const DontSetProtectRWE = (void*)2; //-V566 + +class CodeArray { + enum Type { + USER_BUF = 1, // use userPtr(non alignment, non protect) + ALLOC_BUF, // use new(alignment, protect) + AUTO_GROW // automatically move and grow memory if necessary + }; + CodeArray(const CodeArray& rhs); + void operator=(const CodeArray&); + bool isAllocType() const { return type_ == ALLOC_BUF || type_ == AUTO_GROW; } + struct AddrInfo { + size_t codeOffset; // position to write + size_t jmpAddr; // value to write + int jmpSize; // size of jmpAddr + inner::LabelMode mode; + AddrInfo(size_t _codeOffset, size_t _jmpAddr, int _jmpSize, inner::LabelMode _mode) + : codeOffset(_codeOffset), jmpAddr(_jmpAddr), jmpSize(_jmpSize), mode(_mode) {} + uint64 getVal(const uint8 *top) const + { + uint64 disp = (mode == inner::LaddTop) ? jmpAddr + size_t(top) : (mode == inner::LasIs) ? jmpAddr : jmpAddr - size_t(top); + if (jmpSize == 4) disp = inner::VerifyInInt32(disp); + return disp; + } + }; + typedef std::list AddrInfoList; + AddrInfoList addrInfoList_; + const Type type_; +#ifdef XBYAK_USE_MMAP_ALLOCATOR + MmapAllocator defaultAllocator_; +#else + Allocator defaultAllocator_; +#endif + Allocator *alloc_; +protected: + size_t maxSize_; + uint8 *top_; + size_t size_; + bool isCalledCalcJmpAddress_; + + bool useProtect() const { return alloc_->useProtect(); } + /* + allocate new memory and copy old data to the new area + */ + void growMemory() + { + const size_t newSize = (std::max)(DEFAULT_MAX_CODE_SIZE, maxSize_ * 2); + uint8 *newTop = alloc_->alloc(newSize); + if (newTop == 0) throw Error(ERR_CANT_ALLOC); + for (size_t i = 0; i < size_; i++) newTop[i] = top_[i]; + alloc_->free(top_); + top_ = newTop; + maxSize_ = newSize; + } + /* + calc jmp address for AutoGrow mode + */ + void calcJmpAddress() + { + if (isCalledCalcJmpAddress_) return; + for (AddrInfoList::const_iterator i = addrInfoList_.begin(), ie = addrInfoList_.end(); i != ie; ++i) { + uint64 disp = i->getVal(top_); + rewrite(i->codeOffset, disp, i->jmpSize); + } + isCalledCalcJmpAddress_ = true; + } +public: + enum ProtectMode { + PROTECT_RW = 0, // read/write + PROTECT_RWE = 1, // read/write/exec + PROTECT_RE = 2 // read/exec + }; + explicit CodeArray(size_t maxSize, void *userPtr = 0, Allocator *allocator = 0) + : type_(userPtr == AutoGrow ? AUTO_GROW : (userPtr == 0 || userPtr == DontSetProtectRWE) ? ALLOC_BUF : USER_BUF) + , alloc_(allocator ? allocator : (Allocator*)&defaultAllocator_) + , maxSize_(maxSize) + , top_(type_ == USER_BUF ? reinterpret_cast(userPtr) : alloc_->alloc((std::max)(maxSize, 1))) + , size_(0) + , isCalledCalcJmpAddress_(false) + { + if (maxSize_ > 0 && top_ == 0) throw Error(ERR_CANT_ALLOC); + if ((type_ == ALLOC_BUF && userPtr != DontSetProtectRWE && useProtect()) && !setProtectMode(PROTECT_RWE, false)) { + alloc_->free(top_); + throw Error(ERR_CANT_PROTECT); + } + } + virtual ~CodeArray() + { + if (isAllocType()) { + if (useProtect()) setProtectModeRW(false); + alloc_->free(top_); + } + } + bool setProtectMode(ProtectMode mode, bool throwException = true) + { + bool isOK = protect(top_, maxSize_, mode); + if (isOK) return true; + if (throwException) throw Error(ERR_CANT_PROTECT); + return false; + } + bool setProtectModeRE(bool throwException = true) { return setProtectMode(PROTECT_RE, throwException); } + bool setProtectModeRW(bool throwException = true) { return setProtectMode(PROTECT_RW, throwException); } + void resetSize() + { + size_ = 0; + addrInfoList_.clear(); + isCalledCalcJmpAddress_ = false; + } + void db(int code) + { + if (size_ >= maxSize_) { + if (type_ == AUTO_GROW) { + growMemory(); + } else { + throw Error(ERR_CODE_IS_TOO_BIG); + } + } + top_[size_++] = static_cast(code); + } + void db(const uint8 *code, size_t codeSize) + { + for (size_t i = 0; i < codeSize; i++) db(code[i]); + } + void db(uint64 code, size_t codeSize) + { + if (codeSize > 8) throw Error(ERR_BAD_PARAMETER); + for (size_t i = 0; i < codeSize; i++) db(static_cast(code >> (i * 8))); + } + void dw(uint32 code) { db(code, 2); } + void dd(uint32 code) { db(code, 4); } + void dq(uint64 code) { db(code, 8); } + const uint8 *getCode() const { return top_; } + template + const F getCode() const { return reinterpret_cast(top_); } + const uint8 *getCurr() const { return &top_[size_]; } + template + const F getCurr() const { return reinterpret_cast(&top_[size_]); } + size_t getSize() const { return size_; } + void setSize(size_t size) + { + if (size > maxSize_) throw Error(ERR_OFFSET_IS_TOO_BIG); + size_ = size; + } + void dump() const + { + const uint8 *p = getCode(); + size_t bufSize = getSize(); + size_t remain = bufSize; + for (int i = 0; i < 4; i++) { + size_t disp = 16; + if (remain < 16) { + disp = remain; + } + for (size_t j = 0; j < 16; j++) { + if (j < disp) { + printf("%02X", p[i * 16 + j]); + } + } + putchar('\n'); + remain -= disp; + if (remain == 0) { + break; + } + } + } + /* + @param offset [in] offset from top + @param disp [in] offset from the next of jmp + @param size [in] write size(1, 2, 4, 8) + */ + void rewrite(size_t offset, uint64 disp, size_t size) + { + assert(offset < maxSize_); + if (size != 1 && size != 2 && size != 4 && size != 8) throw Error(ERR_BAD_PARAMETER); + uint8 *const data = top_ + offset; + for (size_t i = 0; i < size; i++) { + data[i] = static_cast(disp >> (i * 8)); + } + } + void save(size_t offset, size_t val, int size, inner::LabelMode mode) + { + addrInfoList_.push_back(AddrInfo(offset, val, size, mode)); + } + bool isAutoGrow() const { return type_ == AUTO_GROW; } + bool isCalledCalcJmpAddress() const { return isCalledCalcJmpAddress_; } + /** + change exec permission of memory + @param addr [in] buffer address + @param size [in] buffer size + @param protectMode [in] mode(RW/RWE/RE) + @return true(success), false(failure) + */ + static inline bool protect(const void *addr, size_t size, int protectMode) + { +#if defined(_WIN32) + const DWORD c_rw = PAGE_READWRITE; + const DWORD c_rwe = PAGE_EXECUTE_READWRITE; + const DWORD c_re = PAGE_EXECUTE_READ; + DWORD mode; +#else + const int c_rw = PROT_READ | PROT_WRITE; + const int c_rwe = PROT_READ | PROT_WRITE | PROT_EXEC; + const int c_re = PROT_READ | PROT_EXEC; + int mode; +#endif + switch (protectMode) { + case PROTECT_RW: mode = c_rw; break; + case PROTECT_RWE: mode = c_rwe; break; + case PROTECT_RE: mode = c_re; break; + default: + return false; + } +#if defined(_WIN32) + DWORD oldProtect; + return VirtualProtect(const_cast(addr), size, mode, &oldProtect) != 0; +#elif defined(__GNUC__) + size_t pageSize = sysconf(_SC_PAGESIZE); + size_t iaddr = reinterpret_cast(addr); + size_t roundAddr = iaddr & ~(pageSize - static_cast(1)); +#ifndef NDEBUG + if (pageSize != 4096) fprintf(stderr, "large page(%zd) is used. not tested enough.\n", pageSize); +#endif + return mprotect(reinterpret_cast(roundAddr), size + (iaddr - roundAddr), mode) == 0; +#else + return true; +#endif + } + /** + get aligned memory pointer + @param addr [in] address + @param alignedSize [in] power of two + @return aligned addr by alingedSize + */ + static inline uint8 *getAlignedAddress(uint8 *addr, size_t alignedSize = 16) + { + return reinterpret_cast((reinterpret_cast(addr) + alignedSize - 1) & ~(alignedSize - static_cast(1))); + } +}; + +class Address : public Operand { +public: + enum Mode { + M_ModRM, + M_64bitDisp, + M_rip, + M_ripAddr + }; + Address(uint32 sizeBit, bool broadcast, const RegExp& e) + : Operand(0, MEM, sizeBit), e_(e), label_(0), mode_(M_ModRM), broadcast_(broadcast) + { + e_.verify(); + } +#ifdef XBYAK64 + explicit Address(size_t disp) + : Operand(0, MEM, 64), e_(disp), label_(0), mode_(M_64bitDisp), broadcast_(false){ } + Address(uint32 sizeBit, bool broadcast, const RegRip& addr) + : Operand(0, MEM, sizeBit), e_(addr.disp_), label_(addr.label_), mode_(addr.isAddr_ ? M_ripAddr : M_rip), broadcast_(broadcast) { } +#endif + RegExp getRegExp(bool optimize = true) const + { + return optimize ? e_.optimize() : e_; + } + Mode getMode() const { return mode_; } + bool is32bit() const { return e_.getBase().getBit() == 32 || e_.getIndex().getBit() == 32; } + bool isOnlyDisp() const { return !e_.getBase().getBit() && !e_.getIndex().getBit(); } // for mov eax + size_t getDisp() const { return e_.getDisp(); } + uint8 getRex() const + { + if (mode_ != M_ModRM) return 0; + return getRegExp().getRex(); + } + bool is64bitDisp() const { return mode_ == M_64bitDisp; } // for moffset + bool isBroadcast() const { return broadcast_; } + const Label* getLabel() const { return label_; } + bool operator==(const Address& rhs) const + { + return getBit() == rhs.getBit() && e_ == rhs.e_ && label_ == rhs.label_ && mode_ == rhs.mode_ && broadcast_ == rhs.broadcast_; + } + bool operator!=(const Address& rhs) const { return !operator==(rhs); } + bool isVsib() const { return e_.isVsib(); } +private: + RegExp e_; + const Label* label_; + Mode mode_; + bool broadcast_; +}; + +inline const Address& Operand::getAddress() const +{ + assert(isMEM()); + return static_cast(*this); +} + +inline bool Operand::operator==(const Operand& rhs) const +{ + if (isMEM() && rhs.isMEM()) return this->getAddress() == rhs.getAddress(); + return isEqualIfNotInherited(rhs); +} + +class AddressFrame { + void operator=(const AddressFrame&); + AddressFrame(const AddressFrame&); +public: + const uint32 bit_; + const bool broadcast_; + explicit AddressFrame(uint32 bit, bool broadcast = false) : bit_(bit), broadcast_(broadcast) { } + Address operator[](const RegExp& e) const + { + return Address(bit_, broadcast_, e); + } + Address operator[](const void *disp) const + { + return Address(bit_, broadcast_, RegExp(reinterpret_cast(disp))); + } +#ifdef XBYAK64 + Address operator[](uint64 disp) const { return Address(disp); } + Address operator[](const RegRip& addr) const { return Address(bit_, broadcast_, addr); } +#endif +}; + +struct JmpLabel { + size_t endOfJmp; /* offset from top to the end address of jmp */ + int jmpSize; + inner::LabelMode mode; + size_t disp; // disp for [rip + disp] + explicit JmpLabel(size_t endOfJmp = 0, int jmpSize = 0, inner::LabelMode mode = inner::LasIs, size_t disp = 0) + : endOfJmp(endOfJmp), jmpSize(jmpSize), mode(mode), disp(disp) + { + } +}; + +class LabelManager; + +class Label { + mutable LabelManager *mgr; + mutable int id; + friend class LabelManager; +public: + Label() : mgr(0), id(0) {} + Label(const Label& rhs); + Label& operator=(const Label& rhs); + ~Label(); + void clear() { mgr = 0; id = 0; } + int getId() const { return id; } + const uint8 *getAddress() const; + + // backward compatibility + static inline std::string toStr(int num) + { + char buf[16]; +#if defined(_MSC_VER) && (_MSC_VER < 1900) + _snprintf_s +#else + snprintf +#endif + (buf, sizeof(buf), ".%08x", num); + return buf; + } +}; + +class LabelManager { + // for string label + struct SlabelVal { + size_t offset; + SlabelVal(size_t offset) : offset(offset) {} + }; + typedef XBYAK_STD_UNORDERED_MAP SlabelDefList; + typedef XBYAK_STD_UNORDERED_MULTIMAP SlabelUndefList; + struct SlabelState { + SlabelDefList defList; + SlabelUndefList undefList; + }; + typedef std::list StateList; + // for Label class + struct ClabelVal { + ClabelVal(size_t offset = 0) : offset(offset), refCount(1) {} + size_t offset; + int refCount; + }; + typedef XBYAK_STD_UNORDERED_MAP ClabelDefList; + typedef XBYAK_STD_UNORDERED_MULTIMAP ClabelUndefList; + typedef XBYAK_STD_UNORDERED_SET LabelPtrList; + + CodeArray *base_; + // global : stateList_.front(), local : stateList_.back() + StateList stateList_; + mutable int labelId_; + ClabelDefList clabelDefList_; + ClabelUndefList clabelUndefList_; + LabelPtrList labelPtrList_; + + int getId(const Label& label) const + { + if (label.id == 0) label.id = labelId_++; + return label.id; + } + template + void define_inner(DefList& defList, UndefList& undefList, const T& labelId, size_t addrOffset) + { + // add label + typename DefList::value_type item(labelId, addrOffset); + std::pair ret = defList.insert(item); + if (!ret.second) throw Error(ERR_LABEL_IS_REDEFINED); + // search undefined label + for (;;) { + typename UndefList::iterator itr = undefList.find(labelId); + if (itr == undefList.end()) break; + const JmpLabel *jmp = &itr->second; + const size_t offset = jmp->endOfJmp - jmp->jmpSize; + size_t disp; + if (jmp->mode == inner::LaddTop) { + disp = addrOffset; + } else if (jmp->mode == inner::Labs) { + disp = size_t(base_->getCurr()); + } else { + disp = addrOffset - jmp->endOfJmp + jmp->disp; +#ifdef XBYAK64 + if (jmp->jmpSize <= 4 && !inner::IsInInt32(disp)) throw Error(ERR_OFFSET_IS_TOO_BIG); +#endif + if (jmp->jmpSize == 1 && !inner::IsInDisp8((uint32)disp)) throw Error(ERR_LABEL_IS_TOO_FAR); + } + if (base_->isAutoGrow()) { + base_->save(offset, disp, jmp->jmpSize, jmp->mode); + } else { + base_->rewrite(offset, disp, jmp->jmpSize); + } + undefList.erase(itr); + } + } + template + bool getOffset_inner(const DefList& defList, size_t *offset, const T& label) const + { + typename DefList::const_iterator i = defList.find(label); + if (i == defList.end()) return false; + *offset = i->second.offset; + return true; + } + friend class Label; + void incRefCount(int id, Label *label) + { + clabelDefList_[id].refCount++; + labelPtrList_.insert(label); + } + void decRefCount(int id, Label *label) + { + labelPtrList_.erase(label); + ClabelDefList::iterator i = clabelDefList_.find(id); + if (i == clabelDefList_.end()) return; + if (i->second.refCount == 1) { + clabelDefList_.erase(id); + } else { + --i->second.refCount; + } + } + template + bool hasUndefinedLabel_inner(const T& list) const + { +#ifndef NDEBUG + for (typename T::const_iterator i = list.begin(); i != list.end(); ++i) { + std::cerr << "undefined label:" << i->first << std::endl; + } +#endif + return !list.empty(); + } + // detach all labels linked to LabelManager + void resetLabelPtrList() + { + for (LabelPtrList::iterator i = labelPtrList_.begin(), ie = labelPtrList_.end(); i != ie; ++i) { + (*i)->clear(); + } + labelPtrList_.clear(); + } +public: + LabelManager() + { + reset(); + } + ~LabelManager() + { + resetLabelPtrList(); + } + void reset() + { + base_ = 0; + labelId_ = 1; + stateList_.clear(); + stateList_.push_back(SlabelState()); + stateList_.push_back(SlabelState()); + clabelDefList_.clear(); + clabelUndefList_.clear(); + resetLabelPtrList(); + } + void enterLocal() + { + stateList_.push_back(SlabelState()); + } + void leaveLocal() + { + if (stateList_.size() <= 2) throw Error(ERR_UNDER_LOCAL_LABEL); + if (hasUndefinedLabel_inner(stateList_.back().undefList)) throw Error(ERR_LABEL_IS_NOT_FOUND); + stateList_.pop_back(); + } + void set(CodeArray *base) { base_ = base; } + void defineSlabel(std::string label) + { + if (label == "@b" || label == "@f") throw Error(ERR_BAD_LABEL_STR); + if (label == "@@") { + SlabelDefList& defList = stateList_.front().defList; + SlabelDefList::iterator i = defList.find("@f"); + if (i != defList.end()) { + defList.erase(i); + label = "@b"; + } else { + i = defList.find("@b"); + if (i != defList.end()) { + defList.erase(i); + } + label = "@f"; + } + } + SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); + define_inner(st.defList, st.undefList, label, base_->getSize()); + } + void defineClabel(Label& label) + { + define_inner(clabelDefList_, clabelUndefList_, getId(label), base_->getSize()); + label.mgr = this; + labelPtrList_.insert(&label); + } + void assign(Label& dst, const Label& src) + { + ClabelDefList::const_iterator i = clabelDefList_.find(src.id); + if (i == clabelDefList_.end()) throw Error(ERR_LABEL_ISNOT_SET_BY_L); + define_inner(clabelDefList_, clabelUndefList_, dst.id, i->second.offset); + dst.mgr = this; + labelPtrList_.insert(&dst); + } + bool getOffset(size_t *offset, std::string& label) const + { + const SlabelDefList& defList = stateList_.front().defList; + if (label == "@b") { + if (defList.find("@f") != defList.end()) { + label = "@f"; + } else if (defList.find("@b") == defList.end()) { + throw Error(ERR_LABEL_IS_NOT_FOUND); + } + } else if (label == "@f") { + if (defList.find("@f") != defList.end()) { + label = "@b"; + } + } + const SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); + return getOffset_inner(st.defList, offset, label); + } + bool getOffset(size_t *offset, const Label& label) const + { + return getOffset_inner(clabelDefList_, offset, getId(label)); + } + void addUndefinedLabel(const std::string& label, const JmpLabel& jmp) + { + SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); + st.undefList.insert(SlabelUndefList::value_type(label, jmp)); + } + void addUndefinedLabel(const Label& label, const JmpLabel& jmp) + { + clabelUndefList_.insert(ClabelUndefList::value_type(label.id, jmp)); + } + bool hasUndefSlabel() const + { + for (StateList::const_iterator i = stateList_.begin(), ie = stateList_.end(); i != ie; ++i) { + if (hasUndefinedLabel_inner(i->undefList)) return true; + } + return false; + } + bool hasUndefClabel() const { return hasUndefinedLabel_inner(clabelUndefList_); } + const uint8 *getCode() const { return base_->getCode(); } + bool isReady() const { return !base_->isAutoGrow() || base_->isCalledCalcJmpAddress(); } +}; + +inline Label::Label(const Label& rhs) +{ + id = rhs.id; + mgr = rhs.mgr; + if (mgr) mgr->incRefCount(id, this); +} +inline Label& Label::operator=(const Label& rhs) +{ + if (id) throw Error(ERR_LABEL_IS_ALREADY_SET_BY_L); + id = rhs.id; + mgr = rhs.mgr; + if (mgr) mgr->incRefCount(id, this); + return *this; +} +inline Label::~Label() +{ + if (id && mgr) mgr->decRefCount(id, this); +} +inline const uint8* Label::getAddress() const +{ + if (mgr == 0 || !mgr->isReady()) return 0; + size_t offset; + if (!mgr->getOffset(&offset, *this)) return 0; + return mgr->getCode() + offset; +} + +class CodeGenerator : public CodeArray { +public: + enum LabelType { + T_SHORT, + T_NEAR, + T_AUTO // T_SHORT if possible + }; +private: + CodeGenerator operator=(const CodeGenerator&); // don't call +#ifdef XBYAK64 + enum { i32e = 32 | 64, BIT = 64 }; + static const size_t dummyAddr = (size_t(0x11223344) << 32) | 55667788; + typedef Reg64 NativeReg; +#else + enum { i32e = 32, BIT = 32 }; + static const size_t dummyAddr = 0x12345678; + typedef Reg32 NativeReg; +#endif + // (XMM, XMM|MEM) + static inline bool isXMM_XMMorMEM(const Operand& op1, const Operand& op2) + { + return op1.isXMM() && (op2.isXMM() || op2.isMEM()); + } + // (MMX, MMX|MEM) or (XMM, XMM|MEM) + static inline bool isXMMorMMX_MEM(const Operand& op1, const Operand& op2) + { + return (op1.isMMX() && (op2.isMMX() || op2.isMEM())) || isXMM_XMMorMEM(op1, op2); + } + // (XMM, MMX|MEM) + static inline bool isXMM_MMXorMEM(const Operand& op1, const Operand& op2) + { + return op1.isXMM() && (op2.isMMX() || op2.isMEM()); + } + // (MMX, XMM|MEM) + static inline bool isMMX_XMMorMEM(const Operand& op1, const Operand& op2) + { + return op1.isMMX() && (op2.isXMM() || op2.isMEM()); + } + // (XMM, REG32|MEM) + static inline bool isXMM_REG32orMEM(const Operand& op1, const Operand& op2) + { + return op1.isXMM() && (op2.isREG(i32e) || op2.isMEM()); + } + // (REG32, XMM|MEM) + static inline bool isREG32_XMMorMEM(const Operand& op1, const Operand& op2) + { + return op1.isREG(i32e) && (op2.isXMM() || op2.isMEM()); + } + // (REG32, REG32|MEM) + static inline bool isREG32_REG32orMEM(const Operand& op1, const Operand& op2) + { + return op1.isREG(i32e) && ((op2.isREG(i32e) && op1.getBit() == op2.getBit()) || op2.isMEM()); + } + void rex(const Operand& op1, const Operand& op2 = Operand()) + { + uint8 rex = 0; + const Operand *p1 = &op1, *p2 = &op2; + if (p1->isMEM()) std::swap(p1, p2); + if (p1->isMEM()) throw Error(ERR_BAD_COMBINATION); + if (p2->isMEM()) { + const Address& addr = p2->getAddress(); + if (BIT == 64 && addr.is32bit()) db(0x67); + rex = addr.getRex() | p1->getReg().getRex(); + } else { + // ModRM(reg, base); + rex = op2.getReg().getRex(op1.getReg()); + } + // except movsx(16bit, 32/64bit) + if ((op1.isBit(16) && !op2.isBit(i32e)) || (op2.isBit(16) && !op1.isBit(i32e))) db(0x66); + if (rex) db(rex); + } + enum AVXtype { + // low 3 bit + T_N1 = 1, + T_N2 = 2, + T_N4 = 3, + T_N8 = 4, + T_N16 = 5, + T_N32 = 6, + T_NX_MASK = 7, + // + T_N_VL = 1 << 3, // N * (1, 2, 4) for VL + T_DUP = 1 << 4, // N = (8, 32, 64) + T_66 = 1 << 5, + T_F3 = 1 << 6, + T_F2 = 1 << 7, + T_0F = 1 << 8, + T_0F38 = 1 << 9, + T_0F3A = 1 << 10, + T_L0 = 1 << 11, + T_L1 = 1 << 12, + T_W0 = 1 << 13, + T_W1 = 1 << 14, + T_EW0 = 1 << 15, + T_EW1 = 1 << 16, + T_YMM = 1 << 17, // support YMM, ZMM + T_EVEX = 1 << 18, + T_ER_X = 1 << 19, // xmm{er} + T_ER_Y = 1 << 20, // ymm{er} + T_ER_Z = 1 << 21, // zmm{er} + T_SAE_X = 1 << 22, // xmm{sae} + T_SAE_Y = 1 << 23, // ymm{sae} + T_SAE_Z = 1 << 24, // zmm{sae} + T_MUST_EVEX = 1 << 25, // contains T_EVEX + T_B32 = 1 << 26, // m32bcst + T_B64 = 1 << 27, // m64bcst + T_M_K = 1 << 28, // mem{k} + T_VSIB = 1 << 29, + T_MEM_EVEX = 1 << 30, // use evex if mem + T_XXX + }; + void vex(const Reg& reg, const Reg& base, const Operand *v, int type, int code, bool x = false) + { + int w = (type & T_W1) ? 1 : 0; + bool is256 = (type & T_L1) ? true : (type & T_L0) ? false : reg.isYMM(); + bool r = reg.isExtIdx(); + bool b = base.isExtIdx(); + int idx = v ? v->getIdx() : 0; + if ((idx | reg.getIdx() | base.getIdx()) >= 16) throw Error(ERR_BAD_COMBINATION); + uint32 pp = (type & T_66) ? 1 : (type & T_F3) ? 2 : (type & T_F2) ? 3 : 0; + uint32 vvvv = (((~idx) & 15) << 3) | (is256 ? 4 : 0) | pp; + if (!b && !x && !w && (type & T_0F)) { + db(0xC5); db((r ? 0 : 0x80) | vvvv); + } else { + uint32 mmmm = (type & T_0F) ? 1 : (type & T_0F38) ? 2 : (type & T_0F3A) ? 3 : 0; + db(0xC4); db((r ? 0 : 0x80) | (x ? 0 : 0x40) | (b ? 0 : 0x20) | mmmm); db((w << 7) | vvvv); + } + db(code); + } + void verifySAE(const Reg& r, int type) const + { + if (((type & T_SAE_X) && r.isXMM()) || ((type & T_SAE_Y) && r.isYMM()) || ((type & T_SAE_Z) && r.isZMM())) return; + throw Error(ERR_SAE_IS_INVALID); + } + void verifyER(const Reg& r, int type) const + { + if (((type & T_ER_X) && r.isXMM()) || ((type & T_ER_Y) && r.isYMM()) || ((type & T_ER_Z) && r.isZMM())) return; + throw Error(ERR_ER_IS_INVALID); + } + // (a, b, c) contains non zero two or three values then err + int verifyDuplicate(int a, int b, int c, int err) + { + int v = a | b | c; + if ((a > 0 && a != v) + (b > 0 && b != v) + (c > 0 && c != v) > 0) return Error(err); + return v; + } + int evex(const Reg& reg, const Reg& base, const Operand *v, int type, int code, bool x = false, bool b = false, int aaa = 0, uint32 VL = 0, bool Hi16Vidx = false) + { + if (!(type & (T_EVEX | T_MUST_EVEX))) throw Error(ERR_EVEX_IS_INVALID); + int w = (type & T_EW1) ? 1 : 0; + uint32 mm = (type & T_0F) ? 1 : (type & T_0F38) ? 2 : (type & T_0F3A) ? 3 : 0; + uint32 pp = (type & T_66) ? 1 : (type & T_F3) ? 2 : (type & T_F2) ? 3 : 0; + + int idx = v ? v->getIdx() : 0; + uint32 vvvv = ~idx; + + bool R = !reg.isExtIdx(); + bool X = x ? false : !base.isExtIdx2(); + bool B = !base.isExtIdx(); + bool Rp = !reg.isExtIdx2(); + int LL; + int rounding = verifyDuplicate(reg.getRounding(), base.getRounding(), v ? v->getRounding() : 0, ERR_ROUNDING_IS_ALREADY_SET); + int disp8N = 1; + if (rounding) { + if (rounding == EvexModifierRounding::T_SAE) { + verifySAE(base, type); LL = 0; + } else { + verifyER(base, type); LL = rounding - 1; + } + b = true; + } else { + if (v) VL = (std::max)(VL, v->getBit()); + VL = (std::max)((std::max)(reg.getBit(), base.getBit()), VL); + LL = (VL == 512) ? 2 : (VL == 256) ? 1 : 0; + if (b) { + disp8N = (type & T_B32) ? 4 : 8; + } else if (type & T_DUP) { + disp8N = VL == 128 ? 8 : VL == 256 ? 32 : 64; + } else { + if ((type & (T_NX_MASK | T_N_VL)) == 0) { + type |= T_N16 | T_N_VL; // default + } + int low = type & T_NX_MASK; + if (low > 0) { + disp8N = 1 << (low - 1); + if (type & T_N_VL) disp8N *= (VL == 512 ? 4 : VL == 256 ? 2 : 1); + } + } + } + bool Vp = !((v ? v->isExtIdx2() : 0) | Hi16Vidx); + bool z = reg.hasZero() || base.hasZero() || (v ? v->hasZero() : false); + if (aaa == 0) aaa = verifyDuplicate(base.getOpmaskIdx(), reg.getOpmaskIdx(), (v ? v->getOpmaskIdx() : 0), ERR_OPMASK_IS_ALREADY_SET); + db(0x62); + db((R ? 0x80 : 0) | (X ? 0x40 : 0) | (B ? 0x20 : 0) | (Rp ? 0x10 : 0) | (mm & 3)); + db((w == 1 ? 0x80 : 0) | ((vvvv & 15) << 3) | 4 | (pp & 3)); + db((z ? 0x80 : 0) | ((LL & 3) << 5) | (b ? 0x10 : 0) | (Vp ? 8 : 0) | (aaa & 7)); + db(code); + return disp8N; + } + void setModRM(int mod, int r1, int r2) + { + db(static_cast((mod << 6) | ((r1 & 7) << 3) | (r2 & 7))); + } + void setSIB(const RegExp& e, int reg, int disp8N = 0) + { + size_t disp64 = e.getDisp(); +#ifdef XBYAK64 + size_t high = disp64 >> 32; + if (high != 0 && high != 0xFFFFFFFF) throw Error(ERR_OFFSET_IS_TOO_BIG); +#endif + uint32 disp = static_cast(disp64); + const Reg& base = e.getBase(); + const Reg& index = e.getIndex(); + const int baseIdx = base.getIdx(); + const int baseBit = base.getBit(); + const int indexBit = index.getBit(); + enum { + mod00 = 0, mod01 = 1, mod10 = 2 + }; + int mod = mod10; // disp32 + if (!baseBit || ((baseIdx & 7) != Operand::EBP && disp == 0)) { + mod = mod00; + } else { + if (disp8N == 0) { + if (inner::IsInDisp8(disp)) { + mod = mod01; + } + } else { + // disp must be casted to signed + uint32 t = static_cast(static_cast(disp) / disp8N); + if ((disp % disp8N) == 0 && inner::IsInDisp8(t)) { + disp = t; + mod = mod01; + } + } + } + const int newBaseIdx = baseBit ? (baseIdx & 7) : Operand::EBP; + /* ModR/M = [2:3:3] = [Mod:reg/code:R/M] */ + bool hasSIB = indexBit || (baseIdx & 7) == Operand::ESP; +#ifdef XBYAK64 + if (!baseBit && !indexBit) hasSIB = true; +#endif + if (hasSIB) { + setModRM(mod, reg, Operand::ESP); + /* SIB = [2:3:3] = [SS:index:base(=rm)] */ + const int idx = indexBit ? (index.getIdx() & 7) : Operand::ESP; + const int scale = e.getScale(); + const int SS = (scale == 8) ? 3 : (scale == 4) ? 2 : (scale == 2) ? 1 : 0; + setModRM(SS, idx, newBaseIdx); + } else { + setModRM(mod, reg, newBaseIdx); + } + if (mod == mod01) { + db(disp); + } else if (mod == mod10 || (mod == mod00 && !baseBit)) { + dd(disp); + } + } + LabelManager labelMgr_; + bool isInDisp16(uint32 x) const { return 0xFFFF8000 <= x || x <= 0x7FFF; } + void opModR(const Reg& reg1, const Reg& reg2, int code0, int code1 = NONE, int code2 = NONE) + { + rex(reg2, reg1); + db(code0 | (reg1.isBit(8) ? 0 : 1)); if (code1 != NONE) db(code1); if (code2 != NONE) db(code2); + setModRM(3, reg1.getIdx(), reg2.getIdx()); + } + void opModM(const Address& addr, const Reg& reg, int code0, int code1 = NONE, int code2 = NONE, int immSize = 0) + { + if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); + rex(addr, reg); + db(code0 | (reg.isBit(8) ? 0 : 1)); if (code1 != NONE) db(code1); if (code2 != NONE) db(code2); + opAddr(addr, reg.getIdx(), immSize); + } + void opMIB(const Address& addr, const Reg& reg, int code0, int code1) + { + if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); + if (addr.getMode() != Address::M_ModRM) throw Error(ERR_INVALID_MIB_ADDRESS); + if (BIT == 64 && addr.is32bit()) db(0x67); + const RegExp& regExp = addr.getRegExp(false); + uint8 rex = regExp.getRex(); + if (rex) db(rex); + db(code0); db(code1); + setSIB(regExp, reg.getIdx()); + } + void makeJmp(uint32 disp, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref) + { + const int shortJmpSize = 2; + const int longHeaderSize = longPref ? 2 : 1; + const int longJmpSize = longHeaderSize + 4; + if (type != T_NEAR && inner::IsInDisp8(disp - shortJmpSize)) { + db(shortCode); db(disp - shortJmpSize); + } else { + if (type == T_SHORT) throw Error(ERR_LABEL_IS_TOO_FAR); + if (longPref) db(longPref); + db(longCode); dd(disp - longJmpSize); + } + } + template + void opJmp(T& label, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref) + { + if (isAutoGrow() && size_ + 16 >= maxSize_) growMemory(); /* avoid splitting code of jmp */ + size_t offset = 0; + if (labelMgr_.getOffset(&offset, label)) { /* label exists */ + makeJmp(inner::VerifyInInt32(offset - size_), type, shortCode, longCode, longPref); + } else { + int jmpSize = 0; + if (type == T_NEAR) { + jmpSize = 4; + if (longPref) db(longPref); + db(longCode); dd(0); + } else { + jmpSize = 1; + db(shortCode); db(0); + } + JmpLabel jmp(size_, jmpSize, inner::LasIs); + labelMgr_.addUndefinedLabel(label, jmp); + } + } + void opJmpAbs(const void *addr, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref = 0) + { + if (isAutoGrow()) { + if (type != T_NEAR) throw Error(ERR_ONLY_T_NEAR_IS_SUPPORTED_IN_AUTO_GROW); + if (size_ + 16 >= maxSize_) growMemory(); + if (longPref) db(longPref); + db(longCode); + dd(0); + save(size_ - 4, size_t(addr) - size_, 4, inner::Labs); + } else { + makeJmp(inner::VerifyInInt32(reinterpret_cast(addr) - getCurr()), type, shortCode, longCode, longPref); + } + + } + // reg is reg field of ModRM + // immSize is the size for immediate value + // disp8N = 0(normal), disp8N = 1(force disp32), disp8N = {2, 4, 8} ; compressed displacement + void opAddr(const Address &addr, int reg, int immSize = 0, int disp8N = 0, bool permitVisb = false) + { + if (!permitVisb && addr.isVsib()) throw Error(ERR_BAD_VSIB_ADDRESSING); + if (addr.getMode() == Address::M_ModRM) { + setSIB(addr.getRegExp(), reg, disp8N); + } else if (addr.getMode() == Address::M_rip || addr.getMode() == Address::M_ripAddr) { + setModRM(0, reg, 5); + if (addr.getLabel()) { // [rip + Label] + putL_inner(*addr.getLabel(), true, addr.getDisp() - immSize); + } else { + size_t disp = addr.getDisp(); + if (addr.getMode() == Address::M_ripAddr) { + if (isAutoGrow()) throw Error(ERR_INVALID_RIP_IN_AUTO_GROW); + disp -= (size_t)getCurr() + 4 + immSize; + } + dd(inner::VerifyInInt32(disp)); + } + } + } + /* preCode is for SSSE3/SSE4 */ + void opGen(const Operand& reg, const Operand& op, int code, int pref, bool isValid(const Operand&, const Operand&), int imm8 = NONE, int preCode = NONE) + { + if (isValid && !isValid(reg, op)) throw Error(ERR_BAD_COMBINATION); + if (pref != NONE) db(pref); + if (op.isMEM()) { + opModM(op.getAddress(), reg.getReg(), 0x0F, preCode, code, (imm8 != NONE) ? 1 : 0); + } else { + opModR(reg.getReg(), op.getReg(), 0x0F, preCode, code); + } + if (imm8 != NONE) db(imm8); + } + void opMMX_IMM(const Mmx& mmx, int imm8, int code, int ext) + { + if (mmx.isXMM()) db(0x66); + opModR(Reg32(ext), mmx, 0x0F, code); + db(imm8); + } + void opMMX(const Mmx& mmx, const Operand& op, int code, int pref = 0x66, int imm8 = NONE, int preCode = NONE) + { + opGen(mmx, op, code, mmx.isXMM() ? pref : NONE, isXMMorMMX_MEM, imm8, preCode); + } + void opMovXMM(const Operand& op1, const Operand& op2, int code, int pref) + { + if (pref != NONE) db(pref); + if (op1.isXMM() && op2.isMEM()) { + opModM(op2.getAddress(), op1.getReg(), 0x0F, code); + } else if (op1.isMEM() && op2.isXMM()) { + opModM(op1.getAddress(), op2.getReg(), 0x0F, code | 1); + } else { + throw Error(ERR_BAD_COMBINATION); + } + } + void opExt(const Operand& op, const Mmx& mmx, int code, int imm, bool hasMMX2 = false) + { + if (hasMMX2 && op.isREG(i32e)) { /* pextrw is special */ + if (mmx.isXMM()) db(0x66); + opModR(op.getReg(), mmx, 0x0F, 0xC5); db(imm); + } else { + opGen(mmx, op, code, 0x66, isXMM_REG32orMEM, imm, 0x3A); + } + } + void opR_ModM(const Operand& op, int bit, int ext, int code0, int code1 = NONE, int code2 = NONE, bool disableRex = false, int immSize = 0) + { + int opBit = op.getBit(); + if (disableRex && opBit == 64) opBit = 32; + if (op.isREG(bit)) { + opModR(Reg(ext, Operand::REG, opBit), op.getReg().changeBit(opBit), code0, code1, code2); + } else if (op.isMEM()) { + opModM(op.getAddress(), Reg(ext, Operand::REG, opBit), code0, code1, code2, immSize); + } else { + throw Error(ERR_BAD_COMBINATION); + } + } + void opShift(const Operand& op, int imm, int ext) + { + verifyMemHasSize(op); + opR_ModM(op, 0, ext, (0xC0 | ((imm == 1 ? 1 : 0) << 4)), NONE, NONE, false, (imm != 1) ? 1 : 0); + if (imm != 1) db(imm); + } + void opShift(const Operand& op, const Reg8& _cl, int ext) + { + if (_cl.getIdx() != Operand::CL) throw Error(ERR_BAD_COMBINATION); + opR_ModM(op, 0, ext, 0xD2); + } + void opModRM(const Operand& op1, const Operand& op2, bool condR, bool condM, int code0, int code1 = NONE, int code2 = NONE, int immSize = 0) + { + if (condR) { + opModR(op1.getReg(), op2.getReg(), code0, code1, code2); + } else if (condM) { + opModM(op2.getAddress(), op1.getReg(), code0, code1, code2, immSize); + } else { + throw Error(ERR_BAD_COMBINATION); + } + } + void opShxd(const Operand& op, const Reg& reg, uint8 imm, int code, const Reg8 *_cl = 0) + { + if (_cl && _cl->getIdx() != Operand::CL) throw Error(ERR_BAD_COMBINATION); + opModRM(reg, op, (op.isREG(16 | i32e) && op.getBit() == reg.getBit()), op.isMEM() && (reg.isREG(16 | i32e)), 0x0F, code | (_cl ? 1 : 0), NONE, _cl ? 0 : 1); + if (!_cl) db(imm); + } + // (REG, REG|MEM), (MEM, REG) + void opRM_RM(const Operand& op1, const Operand& op2, int code) + { + if (op1.isREG() && op2.isMEM()) { + opModM(op2.getAddress(), op1.getReg(), code | 2); + } else { + opModRM(op2, op1, op1.isREG() && op1.getKind() == op2.getKind(), op1.isMEM() && op2.isREG(), code); + } + } + // (REG|MEM, IMM) + void opRM_I(const Operand& op, uint32 imm, int code, int ext) + { + verifyMemHasSize(op); + uint32 immBit = inner::IsInDisp8(imm) ? 8 : isInDisp16(imm) ? 16 : 32; + if (op.isBit(8)) immBit = 8; + if (op.getBit() < immBit) throw Error(ERR_IMM_IS_TOO_BIG); + if (op.isBit(32|64) && immBit == 16) immBit = 32; /* don't use MEM16 if 32/64bit mode */ + if (op.isREG() && op.getIdx() == 0 && (op.getBit() == immBit || (op.isBit(64) && immBit == 32))) { // rax, eax, ax, al + rex(op); + db(code | 4 | (immBit == 8 ? 0 : 1)); + } else { + int tmp = immBit < (std::min)(op.getBit(), 32U) ? 2 : 0; + opR_ModM(op, 0, ext, 0x80 | tmp, NONE, NONE, false, immBit / 8); + } + db(imm, immBit / 8); + } + void opIncDec(const Operand& op, int code, int ext) + { + verifyMemHasSize(op); +#ifndef XBYAK64 + if (op.isREG() && !op.isBit(8)) { + rex(op); db(code | op.getIdx()); + return; + } +#endif + code = 0xFE; + if (op.isREG()) { + opModR(Reg(ext, Operand::REG, op.getBit()), op.getReg(), code); + } else { + opModM(op.getAddress(), Reg(ext, Operand::REG, op.getBit()), code); + } + } + void opPushPop(const Operand& op, int code, int ext, int alt) + { + int bit = op.getBit(); + if (bit == 16 || bit == BIT) { + if (bit == 16) db(0x66); + if (op.isREG()) { + if (op.getReg().getIdx() >= 8) db(0x41); + db(alt | (op.getIdx() & 7)); + return; + } + if (op.isMEM()) { + opModM(op.getAddress(), Reg(ext, Operand::REG, 32), code); + return; + } + } + throw Error(ERR_BAD_COMBINATION); + } + void verifyMemHasSize(const Operand& op) const + { + if (op.isMEM() && op.getBit() == 0) throw Error(ERR_MEM_SIZE_IS_NOT_SPECIFIED); + } + /* + mov(r, imm) = db(imm, mov_imm(r, imm)) + */ + int mov_imm(const Reg& reg, size_t imm) + { + int bit = reg.getBit(); + const int idx = reg.getIdx(); + int code = 0xB0 | ((bit == 8 ? 0 : 1) << 3); + if (bit == 64 && (imm & ~size_t(0xffffffffu)) == 0) { + rex(Reg32(idx)); + bit = 32; + } else { + rex(reg); + if (bit == 64 && inner::IsInInt32(imm)) { + db(0xC7); + code = 0xC0; + bit = 32; + } + } + db(code | (idx & 7)); + return bit / 8; + } + template + void putL_inner(T& label, bool relative = false, size_t disp = 0) + { + const int jmpSize = relative ? 4 : (int)sizeof(size_t); + if (isAutoGrow() && size_ + 16 >= maxSize_) growMemory(); + size_t offset = 0; + if (labelMgr_.getOffset(&offset, label)) { + if (relative) { + db(inner::VerifyInInt32(offset + disp - size_ - jmpSize), jmpSize); + } else if (isAutoGrow()) { + db(uint64(0), jmpSize); + save(size_ - jmpSize, offset, jmpSize, inner::LaddTop); + } else { + db(size_t(top_) + offset, jmpSize); + } + return; + } + db(uint64(0), jmpSize); + JmpLabel jmp(size_, jmpSize, (relative ? inner::LasIs : isAutoGrow() ? inner::LaddTop : inner::Labs), disp); + labelMgr_.addUndefinedLabel(label, jmp); + } + void opMovxx(const Reg& reg, const Operand& op, uint8 code) + { + if (op.isBit(32)) throw Error(ERR_BAD_COMBINATION); + int w = op.isBit(16); +#ifdef XBYAK64 + if (op.isHigh8bit()) throw Error(ERR_BAD_COMBINATION); +#endif + bool cond = reg.isREG() && (reg.getBit() > op.getBit()); + opModRM(reg, op, cond && op.isREG(), cond && op.isMEM(), 0x0F, code | w); + } + void opFpuMem(const Address& addr, uint8 m16, uint8 m32, uint8 m64, uint8 ext, uint8 m64ext) + { + if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); + uint8 code = addr.isBit(16) ? m16 : addr.isBit(32) ? m32 : addr.isBit(64) ? m64 : 0; + if (!code) throw Error(ERR_BAD_MEM_SIZE); + if (m64ext && addr.isBit(64)) ext = m64ext; + + rex(addr, st0); + db(code); + opAddr(addr, ext); + } + // use code1 if reg1 == st0 + // use code2 if reg1 != st0 && reg2 == st0 + void opFpuFpu(const Fpu& reg1, const Fpu& reg2, uint32 code1, uint32 code2) + { + uint32 code = reg1.getIdx() == 0 ? code1 : reg2.getIdx() == 0 ? code2 : 0; + if (!code) throw Error(ERR_BAD_ST_COMBINATION); + db(uint8(code >> 8)); + db(uint8(code | (reg1.getIdx() | reg2.getIdx()))); + } + void opFpu(const Fpu& reg, uint8 code1, uint8 code2) + { + db(code1); db(code2 | reg.getIdx()); + } + void opVex(const Reg& r, const Operand *p1, const Operand& op2, int type, int code, int imm8 = NONE) + { + if (op2.isMEM()) { + const Address& addr = op2.getAddress(); + const RegExp& regExp = addr.getRegExp(); + const Reg& base = regExp.getBase(); + const Reg& index = regExp.getIndex(); + if (BIT == 64 && addr.is32bit()) db(0x67); + int disp8N = 0; + bool x = index.isExtIdx(); + if ((type & (T_MUST_EVEX|T_MEM_EVEX)) || r.hasEvex() || (p1 && p1->hasEvex()) || addr.isBroadcast() || addr.getOpmaskIdx()) { + int aaa = addr.getOpmaskIdx(); + if (aaa && !(type & T_M_K)) throw Error(ERR_INVALID_OPMASK_WITH_MEMORY); + bool b = false; + if (addr.isBroadcast()) { + if (!(type & (T_B32 | T_B64))) throw Error(ERR_INVALID_BROADCAST); + b = true; + } + int VL = regExp.isVsib() ? index.getBit() : 0; + disp8N = evex(r, base, p1, type, code, x, b, aaa, VL, index.isExtIdx2()); + } else { + vex(r, base, p1, type, code, x); + } + opAddr(addr, r.getIdx(), (imm8 != NONE) ? 1 : 0, disp8N, (type & T_VSIB) != 0); + } else { + const Reg& base = op2.getReg(); + if ((type & T_MUST_EVEX) || r.hasEvex() || (p1 && p1->hasEvex()) || base.hasEvex()) { + evex(r, base, p1, type, code); + } else { + vex(r, base, p1, type, code); + } + setModRM(3, r.getIdx(), base.getIdx()); + } + if (imm8 != NONE) db(imm8); + } + // (r, r, r/m) if isR_R_RM + // (r, r/m, r) + void opGpr(const Reg32e& r, const Operand& op1, const Operand& op2, int type, uint8 code, bool isR_R_RM, int imm8 = NONE) + { + const Operand *p1 = &op1; + const Operand *p2 = &op2; + if (!isR_R_RM) std::swap(p1, p2); + const unsigned int bit = r.getBit(); + if (p1->getBit() != bit || (p2->isREG() && p2->getBit() != bit)) throw Error(ERR_BAD_COMBINATION); + type |= (bit == 64) ? T_W1 : T_W0; + opVex(r, p1, *p2, type, code, imm8); + } + void opAVX_X_X_XM(const Xmm& x1, const Operand& op1, const Operand& op2, int type, int code0, int imm8 = NONE) + { + const Xmm *x2 = static_cast(&op1); + const Operand *op = &op2; + if (op2.isNone()) { // (x1, op1) -> (x1, x1, op1) + x2 = &x1; + op = &op1; + } + // (x1, x2, op) + if (!((x1.isXMM() && x2->isXMM()) || ((type & T_YMM) && ((x1.isYMM() && x2->isYMM()) || (x1.isZMM() && x2->isZMM()))))) throw Error(ERR_BAD_COMBINATION); + opVex(x1, x2, *op, type, code0, imm8); + } + void opAVX_K_X_XM(const Opmask& k, const Xmm& x2, const Operand& op3, int type, int code0, int imm8 = NONE) + { + if (!op3.isMEM() && (x2.getKind() != op3.getKind())) throw Error(ERR_BAD_COMBINATION); + opVex(k, &x2, op3, type, code0, imm8); + } + // (x, x/m), (y, x/m256), (z, y/m) + void checkCvt1(const Operand& x, const Operand& op) const + { + if (!op.isMEM() && !(x.is(Operand::XMM | Operand::YMM) && op.isXMM()) && !(x.isZMM() && op.isYMM())) throw Error(ERR_BAD_COMBINATION); + } + // (x, x/m), (x, y/m256), (y, z/m) + void checkCvt2(const Xmm& x, const Operand& op) const + { + if (!(x.isXMM() && op.is(Operand::XMM | Operand::YMM | Operand::MEM)) && !(x.isYMM() && op.is(Operand::ZMM | Operand::MEM))) throw Error(ERR_BAD_COMBINATION); + } + void opCvt2(const Xmm& x, const Operand& op, int type, int code) + { + checkCvt2(x, op); + Operand::Kind kind = x.isXMM() ? (op.isBit(256) ? Operand::YMM : Operand::XMM) : Operand::ZMM; + opVex(x.copyAndSetKind(kind), &xm0, op, type, code); + } + void opCvt3(const Xmm& x1, const Xmm& x2, const Operand& op, int type, int type64, int type32, uint8 code) + { + if (!(x1.isXMM() && x2.isXMM() && (op.isREG(i32e) || op.isMEM()))) throw Error(ERR_BAD_SIZE_OF_REGISTER); + Xmm x(op.getIdx()); + const Operand *p = op.isREG() ? &x : &op; + opVex(x1, &x2, *p, type | (op.isBit(64) ? type64 : type32), code); + } + const Xmm& cvtIdx0(const Operand& x) const + { + return x.isZMM() ? zm0 : x.isYMM() ? ym0 : xm0; + } + // support (x, x/m, imm), (y, y/m, imm) + void opAVX_X_XM_IMM(const Xmm& x, const Operand& op, int type, int code, int imm8 = NONE) + { + opAVX_X_X_XM(x, cvtIdx0(x), op, type, code, imm8); + } + // QQQ:need to refactor + void opSp1(const Reg& reg, const Operand& op, uint8 pref, uint8 code0, uint8 code1) + { + if (reg.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); + bool is16bit = reg.isREG(16) && (op.isREG(16) || op.isMEM()); + if (!is16bit && !(reg.isREG(i32e) && (op.isREG(reg.getBit()) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); + if (is16bit) db(0x66); + db(pref); opModRM(reg.changeBit(i32e == 32 ? 32 : reg.getBit()), op, op.isREG(), true, code0, code1); + } + void opGather(const Xmm& x1, const Address& addr, const Xmm& x2, int type, uint8 code, int mode) + { + const RegExp& regExp = addr.getRegExp(); + if (!regExp.isVsib(128 | 256)) throw Error(ERR_BAD_VSIB_ADDRESSING); + const int y_vx_y = 0; + const int y_vy_y = 1; +// const int x_vy_x = 2; + const bool isAddrYMM = regExp.getIndex().getBit() == 256; + if (!x1.isXMM() || isAddrYMM || !x2.isXMM()) { + bool isOK = false; + if (mode == y_vx_y) { + isOK = x1.isYMM() && !isAddrYMM && x2.isYMM(); + } else if (mode == y_vy_y) { + isOK = x1.isYMM() && isAddrYMM && x2.isYMM(); + } else { // x_vy_x + isOK = !x1.isYMM() && isAddrYMM && !x2.isYMM(); + } + if (!isOK) throw Error(ERR_BAD_VSIB_ADDRESSING); + } + opAVX_X_X_XM(isAddrYMM ? Ymm(x1.getIdx()) : x1, isAddrYMM ? Ymm(x2.getIdx()) : x2, addr, type, code); + } + enum { + xx_yy_zz = 0, + xx_yx_zy = 1, + xx_xy_yz = 2 + }; + void checkGather2(const Xmm& x1, const Reg& x2, int mode) const + { + if (x1.isXMM() && x2.isXMM()) return; + switch (mode) { + case xx_yy_zz: if ((x1.isYMM() && x2.isYMM()) || (x1.isZMM() && x2.isZMM())) return; + break; + case xx_yx_zy: if ((x1.isYMM() && x2.isXMM()) || (x1.isZMM() && x2.isYMM())) return; + break; + case xx_xy_yz: if ((x1.isXMM() && x2.isYMM()) || (x1.isYMM() && x2.isZMM())) return; + break; + } + throw Error(ERR_BAD_VSIB_ADDRESSING); + } + void opGather2(const Xmm& x, const Address& addr, int type, uint8 code, int mode) + { + if (x.hasZero()) throw Error(ERR_INVALID_ZERO); + checkGather2(x, addr.getRegExp().getIndex(), mode); + opVex(x, 0, addr, type, code); + } + /* + xx_xy_yz ; mode = true + xx_xy_xz ; mode = false + */ + void opVmov(const Operand& op, const Xmm& x, int type, uint8 code, bool mode) + { + if (mode) { + if (!op.isMEM() && !((op.isXMM() && x.isXMM()) || (op.isXMM() && x.isYMM()) || (op.isYMM() && x.isZMM()))) throw Error(ERR_BAD_COMBINATION); + } else { + if (!op.isMEM() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); + } + opVex(x, 0, op, type, code); + } + void opGatherFetch(const Address& addr, const Xmm& x, int type, uint8 code, Operand::Kind kind) + { + if (addr.hasZero()) throw Error(ERR_INVALID_ZERO); + if (addr.getRegExp().getIndex().getKind() != kind) throw Error(ERR_BAD_VSIB_ADDRESSING); + opVex(x, 0, addr, type, code); + } +public: + unsigned int getVersion() const { return VERSION; } + using CodeArray::db; + const Mmx mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; + const Xmm xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + const Ymm ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7; + const Zmm zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7; + const Xmm &xm0, &xm1, &xm2, &xm3, &xm4, &xm5, &xm6, &xm7; + const Ymm &ym0, &ym1, &ym2, &ym3, &ym4, &ym5, &ym6, &ym7; + const Ymm &zm0, &zm1, &zm2, &zm3, &zm4, &zm5, &zm6, &zm7; + const Reg32 eax, ecx, edx, ebx, esp, ebp, esi, edi; + const Reg16 ax, cx, dx, bx, sp, bp, si, di; + const Reg8 al, cl, dl, bl, ah, ch, dh, bh; + const AddressFrame ptr, byte, word, dword, qword, xword, yword, zword; // xword is same as oword of NASM + const AddressFrame ptr_b, xword_b, yword_b, zword_b; // broadcast such as {1to2}, {1to4}, {1to8}, {1to16}, {b} + const Fpu st0, st1, st2, st3, st4, st5, st6, st7; + const Opmask k0, k1, k2, k3, k4, k5, k6, k7; + const BoundsReg bnd0, bnd1, bnd2, bnd3; + const EvexModifierRounding T_sae, T_rn_sae, T_rd_sae, T_ru_sae, T_rz_sae; // {sae}, {rn-sae}, {rd-sae}, {ru-sae}, {rz-sae} + const EvexModifierZero T_z; // {z} +#ifdef XBYAK64 + const Reg64 rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15; + const Reg32 r8d, r9d, r10d, r11d, r12d, r13d, r14d, r15d; + const Reg16 r8w, r9w, r10w, r11w, r12w, r13w, r14w, r15w; + const Reg8 r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b; + const Reg8 spl, bpl, sil, dil; + const Xmm xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15; + const Xmm xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23; + const Xmm xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31; + const Ymm ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15; + const Ymm ymm16, ymm17, ymm18, ymm19, ymm20, ymm21, ymm22, ymm23; + const Ymm ymm24, ymm25, ymm26, ymm27, ymm28, ymm29, ymm30, ymm31; + const Zmm zmm8, zmm9, zmm10, zmm11, zmm12, zmm13, zmm14, zmm15; + const Zmm zmm16, zmm17, zmm18, zmm19, zmm20, zmm21, zmm22, zmm23; + const Zmm zmm24, zmm25, zmm26, zmm27, zmm28, zmm29, zmm30, zmm31; + const Xmm &xm8, &xm9, &xm10, &xm11, &xm12, &xm13, &xm14, &xm15; // for my convenience + const Xmm &xm16, &xm17, &xm18, &xm19, &xm20, &xm21, &xm22, &xm23; + const Xmm &xm24, &xm25, &xm26, &xm27, &xm28, &xm29, &xm30, &xm31; + const Ymm &ym8, &ym9, &ym10, &ym11, &ym12, &ym13, &ym14, &ym15; + const Ymm &ym16, &ym17, &ym18, &ym19, &ym20, &ym21, &ym22, &ym23; + const Ymm &ym24, &ym25, &ym26, &ym27, &ym28, &ym29, &ym30, &ym31; + const Zmm &zm8, &zm9, &zm10, &zm11, &zm12, &zm13, &zm14, &zm15; + const Zmm &zm16, &zm17, &zm18, &zm19, &zm20, &zm21, &zm22, &zm23; + const Zmm &zm24, &zm25, &zm26, &zm27, &zm28, &zm29, &zm30, &zm31; + const RegRip rip; +#endif +#ifndef XBYAK_DISABLE_SEGMENT + const Segment es, cs, ss, ds, fs, gs; +#endif + void L(const std::string& label) { labelMgr_.defineSlabel(label); } + void L(Label& label) { labelMgr_.defineClabel(label); } + Label L() { Label label; L(label); return label; } + void inLocalLabel() { labelMgr_.enterLocal(); } + void outLocalLabel() { labelMgr_.leaveLocal(); } + /* + assign src to dst + require + dst : does not used by L() + src : used by L() + */ + void assignL(Label& dst, const Label& src) { labelMgr_.assign(dst, src); } + /* + put address of label to buffer + @note the put size is 4(32-bit), 8(64-bit) + */ + void putL(std::string label) { putL_inner(label); } + void putL(const Label& label) { putL_inner(label); } + + void jmp(const Operand& op) { opR_ModM(op, BIT, 4, 0xFF, NONE, NONE, true); } + void jmp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0xEB, 0xE9, 0); } + void jmp(const char *label, LabelType type = T_AUTO) { jmp(std::string(label), type); } + void jmp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0xEB, 0xE9, 0); } + void jmp(const void *addr, LabelType type = T_AUTO) { opJmpAbs(addr, type, 0xEB, 0xE9); } + + void call(const Operand& op) { opR_ModM(op, 16 | i32e, 2, 0xFF, NONE, NONE, true); } + // call(string label), not const std::string& + void call(std::string label) { opJmp(label, T_NEAR, 0, 0xE8, 0); } + void call(const char *label) { call(std::string(label)); } + void call(const Label& label) { opJmp(label, T_NEAR, 0, 0xE8, 0); } + // call(function pointer) +#ifdef XBYAK_VARIADIC_TEMPLATE + template + void call(Ret(*func)(Params...)) { call(reinterpret_cast(func)); } +#endif + void call(const void *addr) { opJmpAbs(addr, T_NEAR, 0, 0xE8); } + + void test(const Operand& op, const Reg& reg) + { + opModRM(reg, op, op.isREG() && (op.getKind() == reg.getKind()), op.isMEM(), 0x84); + } + void test(const Operand& op, uint32 imm) + { + verifyMemHasSize(op); + int immSize = (std::min)(op.getBit() / 8, 4U); + if (op.isREG() && op.getIdx() == 0) { // al, ax, eax + rex(op); + db(0xA8 | (op.isBit(8) ? 0 : 1)); + } else { + opR_ModM(op, 0, 0, 0xF6, NONE, NONE, false, immSize); + } + db(imm, immSize); + } + void imul(const Reg& reg, const Operand& op) + { + opModRM(reg, op, op.isREG() && (reg.getKind() == op.getKind()), op.isMEM(), 0x0F, 0xAF); + } + void imul(const Reg& reg, const Operand& op, int imm) + { + int s = inner::IsInDisp8(imm) ? 1 : 0; + int immSize = s ? 1 : reg.isREG(16) ? 2 : 4; + opModRM(reg, op, op.isREG() && (reg.getKind() == op.getKind()), op.isMEM(), 0x69 | (s << 1), NONE, NONE, immSize); + db(imm, immSize); + } + void push(const Operand& op) { opPushPop(op, 0xFF, 6, 0x50); } + void pop(const Operand& op) { opPushPop(op, 0x8F, 0, 0x58); } + void push(const AddressFrame& af, uint32 imm) + { + if (af.bit_ == 8 && inner::IsInDisp8(imm)) { + db(0x6A); db(imm); + } else if (af.bit_ == 16 && isInDisp16(imm)) { + db(0x66); db(0x68); dw(imm); + } else { + db(0x68); dd(imm); + } + } + /* use "push(word, 4)" if you want "push word 4" */ + void push(uint32 imm) + { + if (inner::IsInDisp8(imm)) { + push(byte, imm); + } else { + push(dword, imm); + } + } + void mov(const Operand& reg1, const Operand& reg2) + { + const Reg *reg = 0; + const Address *addr = 0; + uint8 code = 0; + if (reg1.isREG() && reg1.getIdx() == 0 && reg2.isMEM()) { // mov eax|ax|al, [disp] + reg = ®1.getReg(); + addr= ®2.getAddress(); + code = 0xA0; + } else + if (reg1.isMEM() && reg2.isREG() && reg2.getIdx() == 0) { // mov [disp], eax|ax|al + reg = ®2.getReg(); + addr= ®1.getAddress(); + code = 0xA2; + } +#ifdef XBYAK64 + if (addr && addr->is64bitDisp()) { + if (code) { + rex(*reg); + db(reg1.isREG(8) ? 0xA0 : reg1.isREG() ? 0xA1 : reg2.isREG(8) ? 0xA2 : 0xA3); + db(addr->getDisp(), 8); + } else { + throw Error(ERR_BAD_COMBINATION); + } + } else +#else + if (code && addr->isOnlyDisp()) { + rex(*reg, *addr); + db(code | (reg->isBit(8) ? 0 : 1)); + dd(static_cast(addr->getDisp())); + } else +#endif + { + opRM_RM(reg1, reg2, 0x88); + } + } + void mov(const Operand& op, size_t imm) + { + if (op.isREG()) { + const int size = mov_imm(op.getReg(), imm); + db(imm, size); + } else if (op.isMEM()) { + verifyMemHasSize(op); + int immSize = op.getBit() / 8; + if (immSize <= 4) { + sint64 s = sint64(imm) >> (immSize * 8); + if (s != 0 && s != -1) throw Error(ERR_IMM_IS_TOO_BIG); + } else { + if (!inner::IsInInt32(imm)) throw Error(ERR_IMM_IS_TOO_BIG); + immSize = 4; + } + opModM(op.getAddress(), Reg(0, Operand::REG, op.getBit()), 0xC6, NONE, NONE, immSize); + db(static_cast(imm), immSize); + } else { + throw Error(ERR_BAD_COMBINATION); + } + } + void mov(const NativeReg& reg, const char *label) // can't use std::string + { + if (label == 0) { + mov(static_cast(reg), 0); // call imm + return; + } + mov_imm(reg, dummyAddr); + putL(label); + } + void mov(const NativeReg& reg, const Label& label) + { + mov_imm(reg, dummyAddr); + putL(label); + } + void xchg(const Operand& op1, const Operand& op2) + { + const Operand *p1 = &op1, *p2 = &op2; + if (p1->isMEM() || (p2->isREG(16 | i32e) && p2->getIdx() == 0)) { + p1 = &op2; p2 = &op1; + } + if (p1->isMEM()) throw Error(ERR_BAD_COMBINATION); + if (p2->isREG() && (p1->isREG(16 | i32e) && p1->getIdx() == 0) +#ifdef XBYAK64 + && (p2->getIdx() != 0 || !p1->isREG(32)) +#endif + ) { + rex(*p2, *p1); db(0x90 | (p2->getIdx() & 7)); + return; + } + opModRM(*p1, *p2, (p1->isREG() && p2->isREG() && (p1->getBit() == p2->getBit())), p2->isMEM(), 0x86 | (p1->isBit(8) ? 0 : 1)); + } + +#ifndef XBYAK_DISABLE_SEGMENT + void push(const Segment& seg) + { + switch (seg.getIdx()) { + case Segment::es: db(0x06); break; + case Segment::cs: db(0x0E); break; + case Segment::ss: db(0x16); break; + case Segment::ds: db(0x1E); break; + case Segment::fs: db(0x0F); db(0xA0); break; + case Segment::gs: db(0x0F); db(0xA8); break; + default: + assert(0); + } + } + void pop(const Segment& seg) + { + switch (seg.getIdx()) { + case Segment::es: db(0x07); break; + case Segment::cs: throw Error(ERR_BAD_COMBINATION); + case Segment::ss: db(0x17); break; + case Segment::ds: db(0x1F); break; + case Segment::fs: db(0x0F); db(0xA1); break; + case Segment::gs: db(0x0F); db(0xA9); break; + default: + assert(0); + } + } + void putSeg(const Segment& seg) + { + switch (seg.getIdx()) { + case Segment::es: db(0x2E); break; + case Segment::cs: db(0x36); break; + case Segment::ss: db(0x3E); break; + case Segment::ds: db(0x26); break; + case Segment::fs: db(0x64); break; + case Segment::gs: db(0x65); break; + default: + assert(0); + } + } + void mov(const Operand& op, const Segment& seg) + { + opModRM(Reg8(seg.getIdx()), op, op.isREG(16|i32e), op.isMEM(), 0x8C); + } + void mov(const Segment& seg, const Operand& op) + { + opModRM(Reg8(seg.getIdx()), op.isREG(16|i32e) ? static_cast(op.getReg().cvt32()) : op, op.isREG(16|i32e), op.isMEM(), 0x8E); + } +#endif + + enum { NONE = 256 }; + // constructor + CodeGenerator(size_t maxSize = DEFAULT_MAX_CODE_SIZE, void *userPtr = 0, Allocator *allocator = 0) + : CodeArray(maxSize, userPtr, allocator) + , mm0(0), mm1(1), mm2(2), mm3(3), mm4(4), mm5(5), mm6(6), mm7(7) + , xmm0(0), xmm1(1), xmm2(2), xmm3(3), xmm4(4), xmm5(5), xmm6(6), xmm7(7) + , ymm0(0), ymm1(1), ymm2(2), ymm3(3), ymm4(4), ymm5(5), ymm6(6), ymm7(7) + , zmm0(0), zmm1(1), zmm2(2), zmm3(3), zmm4(4), zmm5(5), zmm6(6), zmm7(7) + // for my convenience + , xm0(xmm0), xm1(xmm1), xm2(xmm2), xm3(xmm3), xm4(xmm4), xm5(xmm5), xm6(xmm6), xm7(xmm7) + , ym0(ymm0), ym1(ymm1), ym2(ymm2), ym3(ymm3), ym4(ymm4), ym5(ymm5), ym6(ymm6), ym7(ymm7) + , zm0(zmm0), zm1(zmm1), zm2(zmm2), zm3(zmm3), zm4(zmm4), zm5(zmm5), zm6(zmm6), zm7(zmm7) + + , eax(Operand::EAX), ecx(Operand::ECX), edx(Operand::EDX), ebx(Operand::EBX), esp(Operand::ESP), ebp(Operand::EBP), esi(Operand::ESI), edi(Operand::EDI) + , ax(Operand::AX), cx(Operand::CX), dx(Operand::DX), bx(Operand::BX), sp(Operand::SP), bp(Operand::BP), si(Operand::SI), di(Operand::DI) + , al(Operand::AL), cl(Operand::CL), dl(Operand::DL), bl(Operand::BL), ah(Operand::AH), ch(Operand::CH), dh(Operand::DH), bh(Operand::BH) + , ptr(0), byte(8), word(16), dword(32), qword(64), xword(128), yword(256), zword(512) + , ptr_b(0, true), xword_b(128, true), yword_b(256, true), zword_b(512, true) + , st0(0), st1(1), st2(2), st3(3), st4(4), st5(5), st6(6), st7(7) + , k0(0), k1(1), k2(2), k3(3), k4(4), k5(5), k6(6), k7(7) + , bnd0(0), bnd1(1), bnd2(2), bnd3(3) + , T_sae(EvexModifierRounding::T_SAE), T_rn_sae(EvexModifierRounding::T_RN_SAE), T_rd_sae(EvexModifierRounding::T_RD_SAE), T_ru_sae(EvexModifierRounding::T_RU_SAE), T_rz_sae(EvexModifierRounding::T_RZ_SAE) + , T_z() +#ifdef XBYAK64 + , rax(Operand::RAX), rcx(Operand::RCX), rdx(Operand::RDX), rbx(Operand::RBX), rsp(Operand::RSP), rbp(Operand::RBP), rsi(Operand::RSI), rdi(Operand::RDI), r8(Operand::R8), r9(Operand::R9), r10(Operand::R10), r11(Operand::R11), r12(Operand::R12), r13(Operand::R13), r14(Operand::R14), r15(Operand::R15) + , r8d(8), r9d(9), r10d(10), r11d(11), r12d(12), r13d(13), r14d(14), r15d(15) + , r8w(8), r9w(9), r10w(10), r11w(11), r12w(12), r13w(13), r14w(14), r15w(15) + , r8b(8), r9b(9), r10b(10), r11b(11), r12b(12), r13b(13), r14b(14), r15b(15) + , spl(Operand::SPL, true), bpl(Operand::BPL, true), sil(Operand::SIL, true), dil(Operand::DIL, true) + , xmm8(8), xmm9(9), xmm10(10), xmm11(11), xmm12(12), xmm13(13), xmm14(14), xmm15(15) + , xmm16(16), xmm17(17), xmm18(18), xmm19(19), xmm20(20), xmm21(21), xmm22(22), xmm23(23) + , xmm24(24), xmm25(25), xmm26(26), xmm27(27), xmm28(28), xmm29(29), xmm30(30), xmm31(31) + , ymm8(8), ymm9(9), ymm10(10), ymm11(11), ymm12(12), ymm13(13), ymm14(14), ymm15(15) + , ymm16(16), ymm17(17), ymm18(18), ymm19(19), ymm20(20), ymm21(21), ymm22(22), ymm23(23) + , ymm24(24), ymm25(25), ymm26(26), ymm27(27), ymm28(28), ymm29(29), ymm30(30), ymm31(31) + , zmm8(8), zmm9(9), zmm10(10), zmm11(11), zmm12(12), zmm13(13), zmm14(14), zmm15(15) + , zmm16(16), zmm17(17), zmm18(18), zmm19(19), zmm20(20), zmm21(21), zmm22(22), zmm23(23) + , zmm24(24), zmm25(25), zmm26(26), zmm27(27), zmm28(28), zmm29(29), zmm30(30), zmm31(31) + // for my convenience + , xm8(xmm8), xm9(xmm9), xm10(xmm10), xm11(xmm11), xm12(xmm12), xm13(xmm13), xm14(xmm14), xm15(xmm15) + , xm16(xmm16), xm17(xmm17), xm18(xmm18), xm19(xmm19), xm20(xmm20), xm21(xmm21), xm22(xmm22), xm23(xmm23) + , xm24(xmm24), xm25(xmm25), xm26(xmm26), xm27(xmm27), xm28(xmm28), xm29(xmm29), xm30(xmm30), xm31(xmm31) + , ym8(ymm8), ym9(ymm9), ym10(ymm10), ym11(ymm11), ym12(ymm12), ym13(ymm13), ym14(ymm14), ym15(ymm15) + , ym16(ymm16), ym17(ymm17), ym18(ymm18), ym19(ymm19), ym20(ymm20), ym21(ymm21), ym22(ymm22), ym23(ymm23) + , ym24(ymm24), ym25(ymm25), ym26(ymm26), ym27(ymm27), ym28(ymm28), ym29(ymm29), ym30(ymm30), ym31(ymm31) + , zm8(zmm8), zm9(zmm9), zm10(zmm10), zm11(zmm11), zm12(zmm12), zm13(zmm13), zm14(zmm14), zm15(zmm15) + , zm16(zmm16), zm17(zmm17), zm18(zmm18), zm19(zmm19), zm20(zmm20), zm21(zmm21), zm22(zmm22), zm23(zmm23) + , zm24(zmm24), zm25(zmm25), zm26(zmm26), zm27(zmm27), zm28(zmm28), zm29(zmm29), zm30(zmm30), zm31(zmm31) + , rip() +#endif +#ifndef XBYAK_DISABLE_SEGMENT + , es(Segment::es), cs(Segment::cs), ss(Segment::ss), ds(Segment::ds), fs(Segment::fs), gs(Segment::gs) +#endif + { + labelMgr_.set(this); + } + void reset() + { + resetSize(); + labelMgr_.reset(); + labelMgr_.set(this); + } + bool hasUndefinedLabel() const { return labelMgr_.hasUndefSlabel() || labelMgr_.hasUndefClabel(); } + /* + MUST call ready() to complete generating code if you use AutoGrow mode. + It is not necessary for the other mode if hasUndefinedLabel() is true. + */ + void ready(ProtectMode mode = PROTECT_RWE) + { + if (hasUndefinedLabel()) throw Error(ERR_LABEL_IS_NOT_FOUND); + if (isAutoGrow()) { + calcJmpAddress(); + if (useProtect()) setProtectMode(mode); + } + } + // set read/exec + void readyRE() { return ready(PROTECT_RE); } +#ifdef XBYAK_TEST + void dump(bool doClear = true) + { + CodeArray::dump(); + if (doClear) size_ = 0; + } +#endif + +#ifdef XBYAK_UNDEF_JNL + #undef jnl +#endif + + /* + use single byte nop if useMultiByteNop = false + */ + void nop(size_t size = 1, bool useMultiByteNop = true) + { + if (!useMultiByteNop) { + for (size_t i = 0; i < size; i++) { + db(0x90); + } + return; + } + /* + Intel Architectures Software Developer's Manual Volume 2 + recommended multi-byte sequence of NOP instruction + AMD and Intel seem to agree on the same sequences for up to 9 bytes: + https://support.amd.com/TechDocs/55723_SOG_Fam_17h_Processors_3.00.pdf + */ + static const uint8 nopTbl[9][9] = { + {0x90}, + {0x66, 0x90}, + {0x0F, 0x1F, 0x00}, + {0x0F, 0x1F, 0x40, 0x00}, + {0x0F, 0x1F, 0x44, 0x00, 0x00}, + {0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00}, + {0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00}, + {0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, + }; + const size_t n = sizeof(nopTbl) / sizeof(nopTbl[0]); + while (size > 0) { + size_t len = (std::min)(n, size); + const uint8 *seq = nopTbl[len - 1]; + db(seq, len); + size -= len; + } + } + +#ifndef XBYAK_DONT_READ_LIST +#include "xbyak_mnemonic.h" + /* + use single byte nop if useMultiByteNop = false + */ + void align(size_t x = 16, bool useMultiByteNop = true) + { + if (x == 1) return; + if (x < 1 || (x & (x - 1))) throw Error(ERR_BAD_ALIGN); + if (isAutoGrow() && x > inner::ALIGN_PAGE_SIZE) fprintf(stderr, "warning:autoGrow mode does not support %d align\n", (int)x); + size_t remain = size_t(getCurr()) % x; + if (remain) { + nop(x - remain, useMultiByteNop); + } + } +#endif +}; + +namespace util { +static const Mmx mm0(0), mm1(1), mm2(2), mm3(3), mm4(4), mm5(5), mm6(6), mm7(7); +static const Xmm xmm0(0), xmm1(1), xmm2(2), xmm3(3), xmm4(4), xmm5(5), xmm6(6), xmm7(7); +static const Ymm ymm0(0), ymm1(1), ymm2(2), ymm3(3), ymm4(4), ymm5(5), ymm6(6), ymm7(7); +static const Zmm zmm0(0), zmm1(1), zmm2(2), zmm3(3), zmm4(4), zmm5(5), zmm6(6), zmm7(7); +static const Reg32 eax(Operand::EAX), ecx(Operand::ECX), edx(Operand::EDX), ebx(Operand::EBX), esp(Operand::ESP), ebp(Operand::EBP), esi(Operand::ESI), edi(Operand::EDI); +static const Reg16 ax(Operand::AX), cx(Operand::CX), dx(Operand::DX), bx(Operand::BX), sp(Operand::SP), bp(Operand::BP), si(Operand::SI), di(Operand::DI); +static const Reg8 al(Operand::AL), cl(Operand::CL), dl(Operand::DL), bl(Operand::BL), ah(Operand::AH), ch(Operand::CH), dh(Operand::DH), bh(Operand::BH); +static const AddressFrame ptr(0), byte(8), word(16), dword(32), qword(64), xword(128), yword(256), zword(512); +static const AddressFrame ptr_b(0, true), xword_b(128, true), yword_b(256, true), zword_b(512, true); +static const Fpu st0(0), st1(1), st2(2), st3(3), st4(4), st5(5), st6(6), st7(7); +static const Opmask k0(0), k1(1), k2(2), k3(3), k4(4), k5(5), k6(6), k7(7); +static const BoundsReg bnd0(0), bnd1(1), bnd2(2), bnd3(3); +static const EvexModifierRounding T_sae(EvexModifierRounding::T_SAE), T_rn_sae(EvexModifierRounding::T_RN_SAE), T_rd_sae(EvexModifierRounding::T_RD_SAE), T_ru_sae(EvexModifierRounding::T_RU_SAE), T_rz_sae(EvexModifierRounding::T_RZ_SAE); +static const EvexModifierZero T_z; +#ifdef XBYAK64 +static const Reg64 rax(Operand::RAX), rcx(Operand::RCX), rdx(Operand::RDX), rbx(Operand::RBX), rsp(Operand::RSP), rbp(Operand::RBP), rsi(Operand::RSI), rdi(Operand::RDI), r8(Operand::R8), r9(Operand::R9), r10(Operand::R10), r11(Operand::R11), r12(Operand::R12), r13(Operand::R13), r14(Operand::R14), r15(Operand::R15); +static const Reg32 r8d(8), r9d(9), r10d(10), r11d(11), r12d(12), r13d(13), r14d(14), r15d(15); +static const Reg16 r8w(8), r9w(9), r10w(10), r11w(11), r12w(12), r13w(13), r14w(14), r15w(15); +static const Reg8 r8b(8), r9b(9), r10b(10), r11b(11), r12b(12), r13b(13), r14b(14), r15b(15), spl(Operand::SPL, true), bpl(Operand::BPL, true), sil(Operand::SIL, true), dil(Operand::DIL, true); +static const Xmm xmm8(8), xmm9(9), xmm10(10), xmm11(11), xmm12(12), xmm13(13), xmm14(14), xmm15(15); +static const Xmm xmm16(16), xmm17(17), xmm18(18), xmm19(19), xmm20(20), xmm21(21), xmm22(22), xmm23(23); +static const Xmm xmm24(24), xmm25(25), xmm26(26), xmm27(27), xmm28(28), xmm29(29), xmm30(30), xmm31(31); +static const Ymm ymm8(8), ymm9(9), ymm10(10), ymm11(11), ymm12(12), ymm13(13), ymm14(14), ymm15(15); +static const Ymm ymm16(16), ymm17(17), ymm18(18), ymm19(19), ymm20(20), ymm21(21), ymm22(22), ymm23(23); +static const Ymm ymm24(24), ymm25(25), ymm26(26), ymm27(27), ymm28(28), ymm29(29), ymm30(30), ymm31(31); +static const Zmm zmm8(8), zmm9(9), zmm10(10), zmm11(11), zmm12(12), zmm13(13), zmm14(14), zmm15(15); +static const Zmm zmm16(16), zmm17(17), zmm18(18), zmm19(19), zmm20(20), zmm21(21), zmm22(22), zmm23(23); +static const Zmm zmm24(24), zmm25(25), zmm26(26), zmm27(27), zmm28(28), zmm29(29), zmm30(30), zmm31(31); +static const RegRip rip; +#endif +#ifndef XBYAK_DISABLE_SEGMENT +static const Segment es(Segment::es), cs(Segment::cs), ss(Segment::ss), ds(Segment::ds), fs(Segment::fs), gs(Segment::gs); +#endif +} // util + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +} // end of namespace + +#endif // XBYAK_XBYAK_H_ diff --git a/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_mnemonic.h b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_mnemonic.h new file mode 100644 index 000000000..766f2f6ec --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_mnemonic.h @@ -0,0 +1,1972 @@ +const char *getVersionString() const { return "5.751"; } +void adc(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x10, 2); } +void adc(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x10); } +void adcx(const Reg32e& reg, const Operand& op) { opGen(reg, op, 0xF6, 0x66, isREG32_REG32orMEM, NONE, 0x38); } +void add(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x00, 0); } +void add(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x00); } +void addpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0x66, isXMM_XMMorMEM); } +void addps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0x100, isXMM_XMMorMEM); } +void addsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0xF2, isXMM_XMMorMEM); } +void addss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0xF3, isXMM_XMMorMEM); } +void addsubpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xD0, 0x66, isXMM_XMMorMEM); } +void addsubps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xD0, 0xF2, isXMM_XMMorMEM); } +void adox(const Reg32e& reg, const Operand& op) { opGen(reg, op, 0xF6, 0xF3, isREG32_REG32orMEM, NONE, 0x38); } +void aesdec(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDE, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void aesdeclast(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDF, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void aesenc(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDC, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void aesenclast(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDD, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void aesimc(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDB, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void aeskeygenassist(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0xDF, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void and_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x20, 4); } +void and_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x20); } +void andn(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_0F38, 0xf2, true); } +void andnpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x55, 0x66, isXMM_XMMorMEM); } +void andnps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x55, 0x100, isXMM_XMMorMEM); } +void andpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x54, 0x66, isXMM_XMMorMEM); } +void andps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x54, 0x100, isXMM_XMMorMEM); } +void bextr(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_0F38, 0xf7, false); } +void blendpd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0D, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void blendps(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0C, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void blendvpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void blendvps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void blsi(const Reg32e& r, const Operand& op) { opGpr(Reg32e(3, r.getBit()), op, r, T_0F38, 0xf3, false); } +void blsmsk(const Reg32e& r, const Operand& op) { opGpr(Reg32e(2, r.getBit()), op, r, T_0F38, 0xf3, false); } +void blsr(const Reg32e& r, const Operand& op) { opGpr(Reg32e(1, r.getBit()), op, r, T_0F38, 0xf3, false); } +void bnd() { db(0xF2); } +void bndcl(const BoundsReg& bnd, const Operand& op) { db(0xF3); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1A, NONE, !op.isMEM()); } +void bndcn(const BoundsReg& bnd, const Operand& op) { db(0xF2); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1B, NONE, !op.isMEM()); } +void bndcu(const BoundsReg& bnd, const Operand& op) { db(0xF2); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1A, NONE, !op.isMEM()); } +void bndldx(const BoundsReg& bnd, const Address& addr) { opMIB(addr, bnd, 0x0F, 0x1A); } +void bndmk(const BoundsReg& bnd, const Address& addr) { db(0xF3); opModM(addr, bnd, 0x0F, 0x1B); } +void bndmov(const Address& addr, const BoundsReg& bnd) { db(0x66); opModM(addr, bnd, 0x0F, 0x1B); } +void bndmov(const BoundsReg& bnd, const Operand& op) { db(0x66); opModRM(bnd, op, op.isBNDREG(), op.isMEM(), 0x0F, 0x1A); } +void bndstx(const Address& addr, const BoundsReg& bnd) { opMIB(addr, bnd, 0x0F, 0x1B); } +void bsf(const Reg®, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0xBC); } +void bsr(const Reg®, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0xBD); } +void bswap(const Reg32e& reg) { opModR(Reg32(1), reg, 0x0F); } +void bt(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xA3); } +void bt(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 4, 0x0f, 0xba, NONE, false, 1); db(imm); } +void btc(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xBB); } +void btc(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 7, 0x0f, 0xba, NONE, false, 1); db(imm); } +void btr(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xB3); } +void btr(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 6, 0x0f, 0xba, NONE, false, 1); db(imm); } +void bts(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xAB); } +void bts(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 5, 0x0f, 0xba, NONE, false, 1); db(imm); } +void bzhi(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_0F38, 0xf5, false); } +void cbw() { db(0x66); db(0x98); } +void cdq() { db(0x99); } +void clc() { db(0xF8); } +void cld() { db(0xFC); } +void clflush(const Address& addr) { opModM(addr, Reg32(7), 0x0F, 0xAE); } +void cli() { db(0xFA); } +void cmc() { db(0xF5); } +void cmova(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 7); }//-V524 +void cmovae(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 +void cmovb(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 +void cmovbe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 6); }//-V524 +void cmovc(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 +void cmove(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 4); }//-V524 +void cmovg(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 15); }//-V524 +void cmovge(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 13); }//-V524 +void cmovl(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 12); }//-V524 +void cmovle(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 14); }//-V524 +void cmovna(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 6); }//-V524 +void cmovnae(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 +void cmovnb(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 +void cmovnbe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 7); }//-V524 +void cmovnc(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 +void cmovne(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 5); }//-V524 +void cmovng(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 14); }//-V524 +void cmovnge(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 12); }//-V524 +void cmovnl(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 13); }//-V524 +void cmovnle(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 15); }//-V524 +void cmovno(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 1); }//-V524 +void cmovnp(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 11); }//-V524 +void cmovns(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 9); }//-V524 +void cmovnz(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 5); }//-V524 +void cmovo(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 0); }//-V524 +void cmovp(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 10); }//-V524 +void cmovpe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 10); }//-V524 +void cmovpo(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 11); }//-V524 +void cmovs(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 8); }//-V524 +void cmovz(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 4); }//-V524 +void cmp(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x38, 7); } +void cmp(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x38); } +void cmpeqpd(const Xmm& x, const Operand& op) { cmppd(x, op, 0); } +void cmpeqps(const Xmm& x, const Operand& op) { cmpps(x, op, 0); } +void cmpeqsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 0); } +void cmpeqss(const Xmm& x, const Operand& op) { cmpss(x, op, 0); } +void cmplepd(const Xmm& x, const Operand& op) { cmppd(x, op, 2); } +void cmpleps(const Xmm& x, const Operand& op) { cmpps(x, op, 2); } +void cmplesd(const Xmm& x, const Operand& op) { cmpsd(x, op, 2); } +void cmpless(const Xmm& x, const Operand& op) { cmpss(x, op, 2); } +void cmpltpd(const Xmm& x, const Operand& op) { cmppd(x, op, 1); } +void cmpltps(const Xmm& x, const Operand& op) { cmpps(x, op, 1); } +void cmpltsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 1); } +void cmpltss(const Xmm& x, const Operand& op) { cmpss(x, op, 1); } +void cmpneqpd(const Xmm& x, const Operand& op) { cmppd(x, op, 4); } +void cmpneqps(const Xmm& x, const Operand& op) { cmpps(x, op, 4); } +void cmpneqsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 4); } +void cmpneqss(const Xmm& x, const Operand& op) { cmpss(x, op, 4); } +void cmpnlepd(const Xmm& x, const Operand& op) { cmppd(x, op, 6); } +void cmpnleps(const Xmm& x, const Operand& op) { cmpps(x, op, 6); } +void cmpnlesd(const Xmm& x, const Operand& op) { cmpsd(x, op, 6); } +void cmpnless(const Xmm& x, const Operand& op) { cmpss(x, op, 6); } +void cmpnltpd(const Xmm& x, const Operand& op) { cmppd(x, op, 5); } +void cmpnltps(const Xmm& x, const Operand& op) { cmpps(x, op, 5); } +void cmpnltsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 5); } +void cmpnltss(const Xmm& x, const Operand& op) { cmpss(x, op, 5); } +void cmpordpd(const Xmm& x, const Operand& op) { cmppd(x, op, 7); } +void cmpordps(const Xmm& x, const Operand& op) { cmpps(x, op, 7); } +void cmpordsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 7); } +void cmpordss(const Xmm& x, const Operand& op) { cmpss(x, op, 7); } +void cmppd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0x66, isXMM_XMMorMEM, imm8); } +void cmpps(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0x100, isXMM_XMMorMEM, imm8); } +void cmpsb() { db(0xA6); } +void cmpsd() { db(0xA7); } +void cmpsd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0xF2, isXMM_XMMorMEM, imm8); } +void cmpss(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0xF3, isXMM_XMMorMEM, imm8); } +void cmpsw() { db(0x66); db(0xA7); } +void cmpunordpd(const Xmm& x, const Operand& op) { cmppd(x, op, 3); } +void cmpunordps(const Xmm& x, const Operand& op) { cmpps(x, op, 3); } +void cmpunordsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 3); } +void cmpunordss(const Xmm& x, const Operand& op) { cmpss(x, op, 3); } +void cmpxchg(const Operand& op, const Reg& reg) { opModRM(reg, op, (op.isREG() && reg.isREG() && op.getBit() == reg.getBit()), op.isMEM(), 0x0F, 0xB0 | (reg.isBit(8) ? 0 : 1)); } +void cmpxchg8b(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0xC7); } +void comisd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2F, 0x66, isXMM_XMMorMEM); } +void comiss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2F, 0x100, isXMM_XMMorMEM); } +void cpuid() { db(0x0F); db(0xA2); } +void crc32(const Reg32e& reg, const Operand& op) { if (reg.isBit(32) && op.isBit(16)) db(0x66); db(0xF2); opModRM(reg, op, op.isREG(), op.isMEM(), 0x0F, 0x38, 0xF0 | (op.isBit(8) ? 0 : 1)); } +void cvtdq2pd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0xF3, isXMM_XMMorMEM); } +void cvtdq2ps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0x100, isXMM_XMMorMEM); } +void cvtpd2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0xF2, isXMM_XMMorMEM); } +void cvtpd2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0x66, isMMX_XMMorMEM); } +void cvtpd2ps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0x66, isXMM_XMMorMEM); } +void cvtpi2pd(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0x66, isXMM_MMXorMEM); } +void cvtpi2ps(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0x100, isXMM_MMXorMEM); } +void cvtps2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0x66, isXMM_XMMorMEM); } +void cvtps2pd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0x100, isXMM_XMMorMEM); } +void cvtps2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0x100, isMMX_XMMorMEM); } +void cvtsd2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0xF2, isREG32_XMMorMEM); } +void cvtsd2ss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0xF2, isXMM_XMMorMEM); } +void cvtsi2sd(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0xF2, isXMM_REG32orMEM); } +void cvtsi2ss(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0xF3, isXMM_REG32orMEM); } +void cvtss2sd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0xF3, isXMM_XMMorMEM); } +void cvtss2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0xF3, isREG32_XMMorMEM); } +void cvttpd2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0x66, isXMM_XMMorMEM); } +void cvttpd2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0x66, isMMX_XMMorMEM); } +void cvttps2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0xF3, isXMM_XMMorMEM); } +void cvttps2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0x100, isMMX_XMMorMEM); } +void cvttsd2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0xF2, isREG32_XMMorMEM); } +void cvttss2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0xF3, isREG32_XMMorMEM); } +void cwd() { db(0x66); db(0x99); } +void cwde() { db(0x98); } +void dec(const Operand& op) { opIncDec(op, 0x48, 1); } +void div(const Operand& op) { opR_ModM(op, 0, 6, 0xF6); } +void divpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0x66, isXMM_XMMorMEM); } +void divps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0x100, isXMM_XMMorMEM); } +void divsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0xF2, isXMM_XMMorMEM); } +void divss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0xF3, isXMM_XMMorMEM); } +void dppd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x41, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void dpps(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x40, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void emms() { db(0x0F); db(0x77); } +void extractps(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x17, imm); } +void f2xm1() { db(0xD9); db(0xF0); } +void fabs() { db(0xD9); db(0xE1); } +void fadd(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 0, 0); } +void fadd(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8C0, 0xDCC0); } +void fadd(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8C0, 0xDCC0); } +void faddp() { db(0xDE); db(0xC1); } +void faddp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEC0); } +void faddp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEC0); } +void fchs() { db(0xD9); db(0xE0); } +void fcmovb(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAC0, 0x00C0); } +void fcmovb(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAC0, 0x00C0); } +void fcmovbe(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAD0, 0x00D0); } +void fcmovbe(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAD0, 0x00D0); } +void fcmove(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAC8, 0x00C8); } +void fcmove(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAC8, 0x00C8); } +void fcmovnb(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBC0, 0x00C0); } +void fcmovnb(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBC0, 0x00C0); } +void fcmovnbe(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBD0, 0x00D0); } +void fcmovnbe(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBD0, 0x00D0); } +void fcmovne(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBC8, 0x00C8); } +void fcmovne(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBC8, 0x00C8); } +void fcmovnu(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBD8, 0x00D8); } +void fcmovnu(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBD8, 0x00D8); } +void fcmovu(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAD8, 0x00D8); } +void fcmovu(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAD8, 0x00D8); } +void fcom() { db(0xD8); db(0xD1); } +void fcom(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 2, 0); } +void fcom(const Fpu& reg) { opFpu(reg, 0xD8, 0xD0); } +void fcomi(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBF0, 0x00F0); } +void fcomi(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBF0, 0x00F0); } +void fcomip(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDFF0, 0x00F0); } +void fcomip(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDFF0, 0x00F0); } +void fcomp() { db(0xD8); db(0xD9); } +void fcomp(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 3, 0); } +void fcomp(const Fpu& reg) { opFpu(reg, 0xD8, 0xD8); } +void fcompp() { db(0xDE); db(0xD9); } +void fcos() { db(0xD9); db(0xFF); } +void fdecstp() { db(0xD9); db(0xF6); } +void fdiv(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 6, 0); } +void fdiv(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8F0, 0xDCF8); } +void fdiv(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8F0, 0xDCF8); } +void fdivp() { db(0xDE); db(0xF9); } +void fdivp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEF8); } +void fdivp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEF8); } +void fdivr(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 7, 0); } +void fdivr(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8F8, 0xDCF0); } +void fdivr(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8F8, 0xDCF0); } +void fdivrp() { db(0xDE); db(0xF1); } +void fdivrp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEF0); } +void fdivrp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEF0); } +void ffree(const Fpu& reg) { opFpu(reg, 0xDD, 0xC0); } +void fiadd(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 0, 0); } +void ficom(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 2, 0); } +void ficomp(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 3, 0); } +void fidiv(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 6, 0); } +void fidivr(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 7, 0); } +void fild(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDF, 0, 5); } +void fimul(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 1, 0); } +void fincstp() { db(0xD9); db(0xF7); } +void finit() { db(0x9B); db(0xDB); db(0xE3); } +void fist(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0x00, 2, 0); } +void fistp(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDF, 3, 7); } +void fisttp(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDD, 1, 0); } +void fisub(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 4, 0); } +void fisubr(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 5, 0); } +void fld(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 0, 0); } +void fld(const Fpu& reg) { opFpu(reg, 0xD9, 0xC0); } +void fld1() { db(0xD9); db(0xE8); } +void fldcw(const Address& addr) { opModM(addr, Reg32(5), 0xD9, 0x100); } +void fldl2e() { db(0xD9); db(0xEA); } +void fldl2t() { db(0xD9); db(0xE9); } +void fldlg2() { db(0xD9); db(0xEC); } +void fldln2() { db(0xD9); db(0xED); } +void fldpi() { db(0xD9); db(0xEB); } +void fldz() { db(0xD9); db(0xEE); } +void fmul(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 1, 0); } +void fmul(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8C8, 0xDCC8); } +void fmul(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8C8, 0xDCC8); } +void fmulp() { db(0xDE); db(0xC9); } +void fmulp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEC8); } +void fmulp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEC8); } +void fninit() { db(0xDB); db(0xE3); } +void fnop() { db(0xD9); db(0xD0); } +void fpatan() { db(0xD9); db(0xF3); } +void fprem() { db(0xD9); db(0xF8); } +void fprem1() { db(0xD9); db(0xF5); } +void fptan() { db(0xD9); db(0xF2); } +void frndint() { db(0xD9); db(0xFC); } +void fscale() { db(0xD9); db(0xFD); } +void fsin() { db(0xD9); db(0xFE); } +void fsincos() { db(0xD9); db(0xFB); } +void fsqrt() { db(0xD9); db(0xFA); } +void fst(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 2, 0); } +void fst(const Fpu& reg) { opFpu(reg, 0xDD, 0xD0); } +void fstcw(const Address& addr) { db(0x9B); opModM(addr, Reg32(7), 0xD9, NONE); } +void fstp(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 3, 0); } +void fstp(const Fpu& reg) { opFpu(reg, 0xDD, 0xD8); } +void fsub(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 4, 0); } +void fsub(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8E0, 0xDCE8); } +void fsub(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8E0, 0xDCE8); } +void fsubp() { db(0xDE); db(0xE9); } +void fsubp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEE8); } +void fsubp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEE8); } +void fsubr(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 5, 0); } +void fsubr(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8E8, 0xDCE0); } +void fsubr(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8E8, 0xDCE0); } +void fsubrp() { db(0xDE); db(0xE1); } +void fsubrp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEE0); } +void fsubrp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEE0); } +void ftst() { db(0xD9); db(0xE4); } +void fucom() { db(0xDD); db(0xE1); } +void fucom(const Fpu& reg) { opFpu(reg, 0xDD, 0xE0); } +void fucomi(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBE8, 0x00E8); } +void fucomi(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBE8, 0x00E8); } +void fucomip(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDFE8, 0x00E8); } +void fucomip(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDFE8, 0x00E8); } +void fucomp() { db(0xDD); db(0xE9); } +void fucomp(const Fpu& reg) { opFpu(reg, 0xDD, 0xE8); } +void fucompp() { db(0xDA); db(0xE9); } +void fwait() { db(0x9B); } +void fxam() { db(0xD9); db(0xE5); } +void fxch() { db(0xD9); db(0xC9); } +void fxch(const Fpu& reg) { opFpu(reg, 0xD9, 0xC8); } +void fxtract() { db(0xD9); db(0xF4); } +void fyl2x() { db(0xD9); db(0xF1); } +void fyl2xp1() { db(0xD9); db(0xF9); } +void gf2p8affineinvqb(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0xCF, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void gf2p8affineqb(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0xCE, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void gf2p8mulb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCF, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void haddpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7C, 0x66, isXMM_XMMorMEM); } +void haddps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7C, 0xF2, isXMM_XMMorMEM); } +void hsubpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7D, 0x66, isXMM_XMMorMEM); } +void hsubps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7D, 0xF2, isXMM_XMMorMEM); } +void idiv(const Operand& op) { opR_ModM(op, 0, 7, 0xF6); } +void imul(const Operand& op) { opR_ModM(op, 0, 5, 0xF6); } +void inc(const Operand& op) { opIncDec(op, 0x40, 0); } +void insertps(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x21, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void ja(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 +void ja(const char *label, LabelType type = T_AUTO) { ja(std::string(label), type); }//-V524 +void ja(const void *addr) { opJmpAbs(addr, T_NEAR, 0x77, 0x87, 0x0F); }//-V524 +void ja(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 +void jae(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jae(const char *label, LabelType type = T_AUTO) { jae(std::string(label), type); }//-V524 +void jae(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 +void jae(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jb(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void jb(const char *label, LabelType type = T_AUTO) { jb(std::string(label), type); }//-V524 +void jb(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 +void jb(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void jbe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 +void jbe(const char *label, LabelType type = T_AUTO) { jbe(std::string(label), type); }//-V524 +void jbe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x76, 0x86, 0x0F); }//-V524 +void jbe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 +void jc(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void jc(const char *label, LabelType type = T_AUTO) { jc(std::string(label), type); }//-V524 +void jc(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 +void jc(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void je(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 +void je(const char *label, LabelType type = T_AUTO) { je(std::string(label), type); }//-V524 +void je(const void *addr) { opJmpAbs(addr, T_NEAR, 0x74, 0x84, 0x0F); }//-V524 +void je(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 +void jg(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 +void jg(const char *label, LabelType type = T_AUTO) { jg(std::string(label), type); }//-V524 +void jg(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7F, 0x8F, 0x0F); }//-V524 +void jg(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 +void jge(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 +void jge(const char *label, LabelType type = T_AUTO) { jge(std::string(label), type); }//-V524 +void jge(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7D, 0x8D, 0x0F); }//-V524 +void jge(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 +void jl(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 +void jl(const char *label, LabelType type = T_AUTO) { jl(std::string(label), type); }//-V524 +void jl(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7C, 0x8C, 0x0F); }//-V524 +void jl(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 +void jle(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 +void jle(const char *label, LabelType type = T_AUTO) { jle(std::string(label), type); }//-V524 +void jle(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7E, 0x8E, 0x0F); }//-V524 +void jle(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 +void jna(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 +void jna(const char *label, LabelType type = T_AUTO) { jna(std::string(label), type); }//-V524 +void jna(const void *addr) { opJmpAbs(addr, T_NEAR, 0x76, 0x86, 0x0F); }//-V524 +void jna(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 +void jnae(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void jnae(const char *label, LabelType type = T_AUTO) { jnae(std::string(label), type); }//-V524 +void jnae(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 +void jnae(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 +void jnb(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jnb(const char *label, LabelType type = T_AUTO) { jnb(std::string(label), type); }//-V524 +void jnb(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 +void jnb(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jnbe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 +void jnbe(const char *label, LabelType type = T_AUTO) { jnbe(std::string(label), type); }//-V524 +void jnbe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x77, 0x87, 0x0F); }//-V524 +void jnbe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 +void jnc(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jnc(const char *label, LabelType type = T_AUTO) { jnc(std::string(label), type); }//-V524 +void jnc(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 +void jnc(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 +void jne(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 +void jne(const char *label, LabelType type = T_AUTO) { jne(std::string(label), type); }//-V524 +void jne(const void *addr) { opJmpAbs(addr, T_NEAR, 0x75, 0x85, 0x0F); }//-V524 +void jne(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 +void jng(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 +void jng(const char *label, LabelType type = T_AUTO) { jng(std::string(label), type); }//-V524 +void jng(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7E, 0x8E, 0x0F); }//-V524 +void jng(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 +void jnge(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 +void jnge(const char *label, LabelType type = T_AUTO) { jnge(std::string(label), type); }//-V524 +void jnge(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7C, 0x8C, 0x0F); }//-V524 +void jnge(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 +void jnl(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 +void jnl(const char *label, LabelType type = T_AUTO) { jnl(std::string(label), type); }//-V524 +void jnl(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7D, 0x8D, 0x0F); }//-V524 +void jnl(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 +void jnle(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 +void jnle(const char *label, LabelType type = T_AUTO) { jnle(std::string(label), type); }//-V524 +void jnle(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7F, 0x8F, 0x0F); }//-V524 +void jnle(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 +void jno(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x71, 0x81, 0x0F); }//-V524 +void jno(const char *label, LabelType type = T_AUTO) { jno(std::string(label), type); }//-V524 +void jno(const void *addr) { opJmpAbs(addr, T_NEAR, 0x71, 0x81, 0x0F); }//-V524 +void jno(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x71, 0x81, 0x0F); }//-V524 +void jnp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 +void jnp(const char *label, LabelType type = T_AUTO) { jnp(std::string(label), type); }//-V524 +void jnp(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7B, 0x8B, 0x0F); }//-V524 +void jnp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 +void jns(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x79, 0x89, 0x0F); }//-V524 +void jns(const char *label, LabelType type = T_AUTO) { jns(std::string(label), type); }//-V524 +void jns(const void *addr) { opJmpAbs(addr, T_NEAR, 0x79, 0x89, 0x0F); }//-V524 +void jns(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x79, 0x89, 0x0F); }//-V524 +void jnz(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 +void jnz(const char *label, LabelType type = T_AUTO) { jnz(std::string(label), type); }//-V524 +void jnz(const void *addr) { opJmpAbs(addr, T_NEAR, 0x75, 0x85, 0x0F); }//-V524 +void jnz(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 +void jo(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x70, 0x80, 0x0F); }//-V524 +void jo(const char *label, LabelType type = T_AUTO) { jo(std::string(label), type); }//-V524 +void jo(const void *addr) { opJmpAbs(addr, T_NEAR, 0x70, 0x80, 0x0F); }//-V524 +void jo(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x70, 0x80, 0x0F); }//-V524 +void jp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 +void jp(const char *label, LabelType type = T_AUTO) { jp(std::string(label), type); }//-V524 +void jp(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7A, 0x8A, 0x0F); }//-V524 +void jp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 +void jpe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 +void jpe(const char *label, LabelType type = T_AUTO) { jpe(std::string(label), type); }//-V524 +void jpe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7A, 0x8A, 0x0F); }//-V524 +void jpe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 +void jpo(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 +void jpo(const char *label, LabelType type = T_AUTO) { jpo(std::string(label), type); }//-V524 +void jpo(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7B, 0x8B, 0x0F); }//-V524 +void jpo(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 +void js(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x78, 0x88, 0x0F); }//-V524 +void js(const char *label, LabelType type = T_AUTO) { js(std::string(label), type); }//-V524 +void js(const void *addr) { opJmpAbs(addr, T_NEAR, 0x78, 0x88, 0x0F); }//-V524 +void js(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x78, 0x88, 0x0F); }//-V524 +void jz(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 +void jz(const char *label, LabelType type = T_AUTO) { jz(std::string(label), type); }//-V524 +void jz(const void *addr) { opJmpAbs(addr, T_NEAR, 0x74, 0x84, 0x0F); }//-V524 +void jz(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 +void lahf() { db(0x9F); } +void lddqu(const Xmm& xmm, const Address& addr) { db(0xF2); opModM(addr, xmm, 0x0F, 0xF0); } +void ldmxcsr(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0xAE); } +void lea(const Reg& reg, const Address& addr) { if (!reg.isBit(16 | i32e)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModM(addr, reg, 0x8D); } +void lfence() { db(0x0F); db(0xAE); db(0xE8); } +void lock() { db(0xF0); } +void lzcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xBD); } +void maskmovdqu(const Xmm& reg1, const Xmm& reg2) { db(0x66); opModR(reg1, reg2, 0x0F, 0xF7); } +void maskmovq(const Mmx& reg1, const Mmx& reg2) { if (!reg1.isMMX() || !reg2.isMMX()) throw Error(ERR_BAD_COMBINATION); opModR(reg1, reg2, 0x0F, 0xF7); } +void maxpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0x66, isXMM_XMMorMEM); } +void maxps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0x100, isXMM_XMMorMEM); } +void maxsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0xF2, isXMM_XMMorMEM); } +void maxss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0xF3, isXMM_XMMorMEM); } +void mfence() { db(0x0F); db(0xAE); db(0xF0); } +void minpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0x66, isXMM_XMMorMEM); } +void minps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0x100, isXMM_XMMorMEM); } +void minsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0xF2, isXMM_XMMorMEM); } +void minss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0xF3, isXMM_XMMorMEM); } +void monitor() { db(0x0F); db(0x01); db(0xC8); } +void movapd(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x29); } +void movapd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x28, 0x66); } +void movaps(const Address& addr, const Xmm& xmm) { opModM(addr, xmm, 0x0F, 0x29); } +void movaps(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x28, 0x100); } +void movbe(const Address& addr, const Reg& reg) { opModM(addr, reg, 0x0F, 0x38, 0xF1); } +void movbe(const Reg& reg, const Address& addr) { opModM(addr, reg, 0x0F, 0x38, 0xF0); } +void movd(const Address& addr, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, 0x7E); } +void movd(const Mmx& mmx, const Address& addr) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, 0x6E); } +void movd(const Mmx& mmx, const Reg32& reg) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x6E); } +void movd(const Reg32& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x7E); } +void movddup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x12, 0xF2, isXMM_XMMorMEM, NONE, NONE); } +void movdq2q(const Mmx& mmx, const Xmm& xmm) { db(0xF2); opModR(mmx, xmm, 0x0F, 0xD6); } +void movdqa(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x7F); } +void movdqa(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x6F, 0x66); } +void movdqu(const Address& addr, const Xmm& xmm) { db(0xF3); opModM(addr, xmm, 0x0F, 0x7F); } +void movdqu(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x6F, 0xF3); } +void movhlps(const Xmm& reg1, const Xmm& reg2) { opModR(reg1, reg2, 0x0F, 0x12); } +void movhpd(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x16, 0x66); } +void movhps(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x16, 0x100); } +void movlhps(const Xmm& reg1, const Xmm& reg2) { opModR(reg1, reg2, 0x0F, 0x16); } +void movlpd(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x12, 0x66); } +void movlps(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x12, 0x100); } +void movmskpd(const Reg32e& reg, const Xmm& xmm) { db(0x66); movmskps(reg, xmm); } +void movmskps(const Reg32e& reg, const Xmm& xmm) { opModR(reg, xmm, 0x0F, 0x50); } +void movntdq(const Address& addr, const Xmm& reg) { opModM(addr, Reg16(reg.getIdx()), 0x0F, 0xE7); } +void movntdqa(const Xmm& xmm, const Address& addr) { db(0x66); opModM(addr, xmm, 0x0F, 0x38, 0x2A); } +void movnti(const Address& addr, const Reg32e& reg) { opModM(addr, reg, 0x0F, 0xC3); } +void movntpd(const Address& addr, const Xmm& reg) { opModM(addr, Reg16(reg.getIdx()), 0x0F, 0x2B); } +void movntps(const Address& addr, const Xmm& xmm) { opModM(addr, Mmx(xmm.getIdx()), 0x0F, 0x2B); } +void movntq(const Address& addr, const Mmx& mmx) { if (!mmx.isMMX()) throw Error(ERR_BAD_COMBINATION); opModM(addr, mmx, 0x0F, 0xE7); } +void movq(const Address& addr, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, mmx.isXMM() ? 0xD6 : 0x7F); } +void movq(const Mmx& mmx, const Operand& op) { if (mmx.isXMM()) db(0xF3); opModRM(mmx, op, (mmx.getKind() == op.getKind()), op.isMEM(), 0x0F, mmx.isXMM() ? 0x7E : 0x6F); } +void movq2dq(const Xmm& xmm, const Mmx& mmx) { db(0xF3); opModR(xmm, mmx, 0x0F, 0xD6); } +void movsb() { db(0xA4); } +void movsd() { db(0xA5); } +void movsd(const Address& addr, const Xmm& xmm) { db(0xF2); opModM(addr, xmm, 0x0F, 0x11); } +void movsd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0xF2); } +void movshdup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x16, 0xF3, isXMM_XMMorMEM, NONE, NONE); } +void movsldup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x12, 0xF3, isXMM_XMMorMEM, NONE, NONE); } +void movss(const Address& addr, const Xmm& xmm) { db(0xF3); opModM(addr, xmm, 0x0F, 0x11); } +void movss(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0xF3); } +void movsw() { db(0x66); db(0xA5); } +void movsx(const Reg& reg, const Operand& op) { opMovxx(reg, op, 0xBE); } +void movupd(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x11); } +void movupd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0x66); } +void movups(const Address& addr, const Xmm& xmm) { opModM(addr, xmm, 0x0F, 0x11); } +void movups(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0x100); } +void movzx(const Reg& reg, const Operand& op) { opMovxx(reg, op, 0xB6); } +void mpsadbw(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x42, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void mul(const Operand& op) { opR_ModM(op, 0, 4, 0xF6); } +void mulpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0x66, isXMM_XMMorMEM); } +void mulps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0x100, isXMM_XMMorMEM); } +void mulsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0xF2, isXMM_XMMorMEM); } +void mulss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0xF3, isXMM_XMMorMEM); } +void mulx(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F2 | T_0F38, 0xf6, true); } +void mwait() { db(0x0F); db(0x01); db(0xC9); } +void neg(const Operand& op) { opR_ModM(op, 0, 3, 0xF6); } +void not_(const Operand& op) { opR_ModM(op, 0, 2, 0xF6); } +void or_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x08, 1); } +void or_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x08); } +void orpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x56, 0x66, isXMM_XMMorMEM); } +void orps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x56, 0x100, isXMM_XMMorMEM); } +void pabsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1C, 0x66, NONE, 0x38); } +void pabsd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1E, 0x66, NONE, 0x38); } +void pabsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1D, 0x66, NONE, 0x38); } +void packssdw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x6B); } +void packsswb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x63); } +void packusdw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2B, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void packuswb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x67); } +void paddb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFC); } +void paddd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFE); } +void paddq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD4); } +void paddsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEC); } +void paddsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xED); } +void paddusb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDC); } +void paddusw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDD); } +void paddw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFD); } +void palignr(const Mmx& mmx, const Operand& op, int imm) { opMMX(mmx, op, 0x0f, 0x66, static_cast(imm), 0x3a); } +void pand(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDB); } +void pandn(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDF); } +void pause() { db(0xF3); db(0x90); } +void pavgb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE0); } +void pavgw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE3); } +void pblendvb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x10, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pblendw(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0E, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void pclmulhqhdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x11); } +void pclmulhqlqdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x01); } +void pclmullqhdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x10); } +void pclmullqlqdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x00); } +void pclmulqdq(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x44, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void pcmpeqb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x74); } +void pcmpeqd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x76); } +void pcmpeqq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x29, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pcmpeqw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x75); } +void pcmpestri(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x61, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void pcmpestrm(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x60, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void pcmpgtb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x64); } +void pcmpgtd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x66); } +void pcmpgtq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x37, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pcmpgtw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x65); } +void pcmpistri(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x63, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void pcmpistrm(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x62, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void pdep(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F2 | T_0F38, 0xf5, true); } +void pext(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F3 | T_0F38, 0xf5, true); } +void pextrb(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x14, imm); } +void pextrd(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x16, imm); } +void pextrw(const Operand& op, const Mmx& xmm, uint8 imm) { opExt(op, xmm, 0x15, imm, true); } +void phaddd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x02, 0x66, NONE, 0x38); } +void phaddsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x03, 0x66, NONE, 0x38); } +void phaddw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x01, 0x66, NONE, 0x38); } +void phminposuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x41, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void phsubd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x06, 0x66, NONE, 0x38); } +void phsubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x07, 0x66, NONE, 0x38); } +void phsubw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x05, 0x66, NONE, 0x38); } +void pinsrb(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x20, 0x66, isXMM_REG32orMEM, imm, 0x3A); } +void pinsrd(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x22, 0x66, isXMM_REG32orMEM, imm, 0x3A); } +void pinsrw(const Mmx& mmx, const Operand& op, int imm) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(mmx, op, 0xC4, mmx.isXMM() ? 0x66 : NONE, 0, imm); } +void pmaddubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x04, 0x66, NONE, 0x38); } +void pmaddwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF5); } +void pmaxsb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3C, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmaxsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3D, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmaxsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEE); } +void pmaxub(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDE); } +void pmaxud(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3F, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmaxuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3E, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pminsb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x38, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pminsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x39, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pminsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEA); } +void pminub(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDA); } +void pminud(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3B, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pminuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3A, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovmskb(const Reg32e& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(reg, mmx, 0x0F, 0xD7); } +void pmovsxbd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x21, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovsxbq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x22, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovsxbw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x20, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovsxdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x25, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovsxwd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x23, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovsxwq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x24, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxbd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x31, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxbq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x32, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxbw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x30, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x35, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxwd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x33, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmovzxwq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x34, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmuldq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x28, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmulhrsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x0B, 0x66, NONE, 0x38); } +void pmulhuw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE4); } +void pmulhw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE5); } +void pmulld(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x40, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void pmullw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD5); } +void pmuludq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF4); } +void popcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xB8); } +void popf() { db(0x9D); } +void por(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEB); } +void prefetchnta(const Address& addr) { opModM(addr, Reg32(0), 0x0F, 0x18); } +void prefetcht0(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0x18); } +void prefetcht1(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0x18); } +void prefetcht2(const Address& addr) { opModM(addr, Reg32(3), 0x0F, 0x18); } +void prefetchw(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0x0D); } +void prefetchwt1(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0x0D); } +void psadbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF6); } +void pshufb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x00, 0x66, NONE, 0x38); } +void pshufd(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0x66, imm8); } +void pshufhw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0xF3, imm8); } +void pshuflw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0xF2, imm8); } +void pshufw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0x00, imm8); } +void psignb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x08, 0x66, NONE, 0x38); } +void psignd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x0A, 0x66, NONE, 0x38); } +void psignw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x09, 0x66, NONE, 0x38); } +void pslld(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF2); } +void pslld(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 6); } +void pslldq(const Xmm& xmm, int imm8) { opMMX_IMM(xmm, imm8, 0x73, 7); } +void psllq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF3); } +void psllq(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x73, 6); } +void psllw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF1); } +void psllw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 6); } +void psrad(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE2); } +void psrad(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 4); } +void psraw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE1); } +void psraw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 4); } +void psrld(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD2); } +void psrld(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 2); } +void psrldq(const Xmm& xmm, int imm8) { opMMX_IMM(xmm, imm8, 0x73, 3); } +void psrlq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD3); } +void psrlq(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x73, 2); } +void psrlw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD1); } +void psrlw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 2); } +void psubb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF8); } +void psubd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFA); } +void psubq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFB); } +void psubsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE8); } +void psubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE9); } +void psubusb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD8); } +void psubusw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD9); } +void psubw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF9); } +void ptest(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x17, 0x66, isXMM_XMMorMEM, NONE, 0x38); } +void punpckhbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x68); } +void punpckhdq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x6A); } +void punpckhqdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x6D, 0x66, isXMM_XMMorMEM); } +void punpckhwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x69); } +void punpcklbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x60); } +void punpckldq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x62); } +void punpcklqdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x6C, 0x66, isXMM_XMMorMEM); } +void punpcklwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x61); } +void pushf() { db(0x9C); } +void pxor(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEF); } +void rcl(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 2); } +void rcl(const Operand& op, int imm) { opShift(op, imm, 2); } +void rcpps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x53, 0x100, isXMM_XMMorMEM); } +void rcpss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x53, 0xF3, isXMM_XMMorMEM); } +void rcr(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 3); } +void rcr(const Operand& op, int imm) { opShift(op, imm, 3); } +void rdmsr() { db(0x0F); db(0x32); } +void rdpmc() { db(0x0F); db(0x33); } +void rdrand(const Reg& r) { if (r.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModR(Reg(6, Operand::REG, r.getBit()), r, 0x0F, 0xC7); } +void rdseed(const Reg& r) { if (r.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModR(Reg(7, Operand::REG, r.getBit()), r, 0x0F, 0xC7); } +void rdtsc() { db(0x0F); db(0x31); } +void rdtscp() { db(0x0F); db(0x01); db(0xF9); } +void rep() { db(0xF3); } +void ret(int imm = 0) { if (imm) { db(0xC2); dw(imm); } else { db(0xC3); } } +void rol(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 0); } +void rol(const Operand& op, int imm) { opShift(op, imm, 0); } +void ror(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 1); } +void ror(const Operand& op, int imm) { opShift(op, imm, 1); } +void rorx(const Reg32e& r, const Operand& op, uint8 imm) { opGpr(r, op, Reg32e(0, r.getBit()), T_0F3A | T_F2, 0xF0, false, imm); } +void roundpd(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x09, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void roundps(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x08, 0x66, isXMM_XMMorMEM, imm, 0x3A); } +void roundsd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0B, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void roundss(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0A, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } +void rsqrtps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x52, 0x100, isXMM_XMMorMEM); } +void rsqrtss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x52, 0xF3, isXMM_XMMorMEM); } +void sahf() { db(0x9E); } +void sal(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 4); } +void sal(const Operand& op, int imm) { opShift(op, imm, 4); } +void sar(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 7); } +void sar(const Operand& op, int imm) { opShift(op, imm, 7); } +void sarx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_F3 | T_0F38, 0xf7, false); } +void sbb(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x18, 3); } +void sbb(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x18); } +void scasb() { db(0xAE); } +void scasd() { db(0xAF); } +void scasw() { db(0x66); db(0xAF); } +void seta(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 7); }//-V524 +void setae(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 +void setb(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 +void setbe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 6); }//-V524 +void setc(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 +void sete(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 4); }//-V524 +void setg(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 15); }//-V524 +void setge(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 13); }//-V524 +void setl(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 12); }//-V524 +void setle(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 14); }//-V524 +void setna(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 6); }//-V524 +void setnae(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 +void setnb(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 +void setnbe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 7); }//-V524 +void setnc(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 +void setne(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 5); }//-V524 +void setng(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 14); }//-V524 +void setnge(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 12); }//-V524 +void setnl(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 13); }//-V524 +void setnle(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 15); }//-V524 +void setno(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 1); }//-V524 +void setnp(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 11); }//-V524 +void setns(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 9); }//-V524 +void setnz(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 5); }//-V524 +void seto(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 0); }//-V524 +void setp(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 10); }//-V524 +void setpe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 10); }//-V524 +void setpo(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 11); }//-V524 +void sets(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 8); }//-V524 +void setz(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 4); }//-V524 +void sfence() { db(0x0F); db(0xAE); db(0xF8); } +void sha1msg1(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xC9, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void sha1msg2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCA, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void sha1nexte(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xC8, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void sha1rnds4(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0xCC, NONE, isXMM_XMMorMEM, imm, 0x3A); } +void sha256msg1(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCC, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void sha256msg2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCD, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void sha256rnds2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCB, NONE, isXMM_XMMorMEM, NONE, 0x38); } +void shl(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 4); } +void shl(const Operand& op, int imm) { opShift(op, imm, 4); } +void shld(const Operand& op, const Reg& reg, const Reg8& _cl) { opShxd(op, reg, 0, 0xA4, &_cl); } +void shld(const Operand& op, const Reg& reg, uint8 imm) { opShxd(op, reg, imm, 0xA4); } +void shlx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_66 | T_0F38, 0xf7, false); } +void shr(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 5); } +void shr(const Operand& op, int imm) { opShift(op, imm, 5); } +void shrd(const Operand& op, const Reg& reg, const Reg8& _cl) { opShxd(op, reg, 0, 0xAC, &_cl); } +void shrd(const Operand& op, const Reg& reg, uint8 imm) { opShxd(op, reg, imm, 0xAC); } +void shrx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_F2 | T_0F38, 0xf7, false); } +void shufpd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC6, 0x66, isXMM_XMMorMEM, imm8); } +void shufps(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC6, 0x100, isXMM_XMMorMEM, imm8); } +void sqrtpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0x66, isXMM_XMMorMEM); } +void sqrtps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0x100, isXMM_XMMorMEM); } +void sqrtsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0xF2, isXMM_XMMorMEM); } +void sqrtss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0xF3, isXMM_XMMorMEM); } +void stac() { db(0x0F); db(0x01); db(0xCB); } +void stc() { db(0xF9); } +void std() { db(0xFD); } +void sti() { db(0xFB); } +void stmxcsr(const Address& addr) { opModM(addr, Reg32(3), 0x0F, 0xAE); } +void stosb() { db(0xAA); } +void stosd() { db(0xAB); } +void stosw() { db(0x66); db(0xAB); } +void sub(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x28, 5); } +void sub(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x28); } +void subpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0x66, isXMM_XMMorMEM); } +void subps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0x100, isXMM_XMMorMEM); } +void subsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0xF2, isXMM_XMMorMEM); } +void subss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0xF3, isXMM_XMMorMEM); } +void tzcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xBC); } +void ucomisd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2E, 0x66, isXMM_XMMorMEM); } +void ucomiss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2E, 0x100, isXMM_XMMorMEM); } +void ud2() { db(0x0F); db(0x0B); } +void unpckhpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x66, isXMM_XMMorMEM); } +void unpckhps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x100, isXMM_XMMorMEM); } +void unpcklpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x66, isXMM_XMMorMEM); } +void unpcklps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x100, isXMM_XMMorMEM); } +void vaddpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x58); } +void vaddps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x58); } +void vaddsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x58); } +void vaddss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x58); } +void vaddsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0xD0); } +void vaddsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0xD0); } +void vaesdec(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDE); } +void vaesdeclast(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDF); } +void vaesenc(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDC); } +void vaesenclast(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDD); } +void vaesimc(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_W0, 0xDB); } +void vaeskeygenassist(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0xDF, imm); } +void vandnpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x55); } +void vandnps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x55); } +void vandpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x54); } +void vandps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x54); } +void vblendpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0D, imm); } +void vblendps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0C, imm); } +void vblendvpd(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4B, x4.getIdx() << 4); } +void vblendvps(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4A, x4.getIdx() << 4); } +void vbroadcastf128(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x1A); } +void vbroadcasti128(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x5A); } +void vbroadcastsd(const Ymm& y, const Operand& op) { if (!op.isMEM() && !(y.isYMM() && op.isXMM()) && !(y.isZMM() && op.isXMM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(y, op, T_0F38 | T_66 | T_W0 | T_YMM | T_EVEX | T_EW1 | T_N8, 0x19); } +void vbroadcastss(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x18); } +void vcmpeq_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 16); } +void vcmpeq_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 16); } +void vcmpeq_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 16); } +void vcmpeq_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 16); } +void vcmpeq_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 8); } +void vcmpeq_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 8); } +void vcmpeq_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 8); } +void vcmpeq_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 8); } +void vcmpeq_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 24); } +void vcmpeq_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 24); } +void vcmpeq_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 24); } +void vcmpeq_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 24); } +void vcmpeqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 0); } +void vcmpeqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 0); } +void vcmpeqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 0); } +void vcmpeqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 0); } +void vcmpfalse_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 27); } +void vcmpfalse_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 27); } +void vcmpfalse_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 27); } +void vcmpfalse_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 27); } +void vcmpfalsepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 11); } +void vcmpfalseps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 11); } +void vcmpfalsesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 11); } +void vcmpfalsess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 11); } +void vcmpge_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 29); } +void vcmpge_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 29); } +void vcmpge_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 29); } +void vcmpge_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 29); } +void vcmpgepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 13); } +void vcmpgeps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 13); } +void vcmpgesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 13); } +void vcmpgess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 13); } +void vcmpgt_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 30); } +void vcmpgt_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 30); } +void vcmpgt_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 30); } +void vcmpgt_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 30); } +void vcmpgtpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 14); } +void vcmpgtps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 14); } +void vcmpgtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 14); } +void vcmpgtss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 14); } +void vcmple_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 18); } +void vcmple_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 18); } +void vcmple_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 18); } +void vcmple_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 18); } +void vcmplepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 2); } +void vcmpleps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 2); } +void vcmplesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 2); } +void vcmpless(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 2); } +void vcmplt_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 17); } +void vcmplt_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 17); } +void vcmplt_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 17); } +void vcmplt_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 17); } +void vcmpltpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 1); } +void vcmpltps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 1); } +void vcmpltsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 1); } +void vcmpltss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 1); } +void vcmpneq_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 12); } +void vcmpneq_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 12); } +void vcmpneq_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 12); } +void vcmpneq_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 12); } +void vcmpneq_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 28); } +void vcmpneq_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 28); } +void vcmpneq_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 28); } +void vcmpneq_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 28); } +void vcmpneq_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 20); } +void vcmpneq_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 20); } +void vcmpneq_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 20); } +void vcmpneq_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 20); } +void vcmpneqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 4); } +void vcmpneqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 4); } +void vcmpneqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 4); } +void vcmpneqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 4); } +void vcmpnge_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 25); } +void vcmpnge_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 25); } +void vcmpnge_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 25); } +void vcmpnge_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 25); } +void vcmpngepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 9); } +void vcmpngeps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 9); } +void vcmpngesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 9); } +void vcmpngess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 9); } +void vcmpngt_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 26); } +void vcmpngt_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 26); } +void vcmpngt_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 26); } +void vcmpngt_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 26); } +void vcmpngtpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 10); } +void vcmpngtps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 10); } +void vcmpngtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 10); } +void vcmpngtss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 10); } +void vcmpnle_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 22); } +void vcmpnle_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 22); } +void vcmpnle_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 22); } +void vcmpnle_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 22); } +void vcmpnlepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 6); } +void vcmpnleps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 6); } +void vcmpnlesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 6); } +void vcmpnless(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 6); } +void vcmpnlt_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 21); } +void vcmpnlt_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 21); } +void vcmpnlt_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 21); } +void vcmpnlt_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 21); } +void vcmpnltpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 5); } +void vcmpnltps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 5); } +void vcmpnltsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 5); } +void vcmpnltss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 5); } +void vcmpord_spd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 23); } +void vcmpord_sps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 23); } +void vcmpord_ssd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 23); } +void vcmpord_sss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 23); } +void vcmpordpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 7); } +void vcmpordps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 7); } +void vcmpordsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 7); } +void vcmpordss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 7); } +void vcmppd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xC2, imm); } +void vcmpps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_0F | T_YMM, 0xC2, imm); } +void vcmpsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_F2 | T_0F, 0xC2, imm); } +void vcmpss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0xC2, imm); } +void vcmptrue_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 31); } +void vcmptrue_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 31); } +void vcmptrue_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 31); } +void vcmptrue_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 31); } +void vcmptruepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 15); } +void vcmptrueps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 15); } +void vcmptruesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 15); } +void vcmptruess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 15); } +void vcmpunord_spd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 19); } +void vcmpunord_sps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 19); } +void vcmpunord_ssd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 19); } +void vcmpunord_sss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 19); } +void vcmpunordpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 3); } +void vcmpunordps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 3); } +void vcmpunordsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 3); } +void vcmpunordss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 3); } +void vcomisd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_66 | T_0F | T_EW1 | T_EVEX | T_SAE_X, 0x2F); } +void vcomiss(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x2F); } +void vcvtdq2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F | T_F3 | T_YMM | T_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL, 0xE6); } +void vcvtdq2ps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5B); } +void vcvtpd2dq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_F2 | T_YMM | T_EVEX | T_EW1 | T_B64 | T_ER_Z, 0xE6); } +void vcvtpd2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_66 | T_YMM | T_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x5A); } +void vcvtph2ps(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F38 | T_66 | T_W0 | T_EVEX | T_EW0 | T_N8 | T_N_VL | T_SAE_Y, 0x13); } +void vcvtps2dq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5B); } +void vcvtps2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F | T_YMM | T_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x5A); } +void vcvtps2ph(const Operand& op, const Xmm& x, uint8 imm) { checkCvt1(x, op); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N8 | T_N_VL | T_SAE_Y, 0x1D, imm); } +void vcvtsd2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W0 | T_EVEX | T_EW0 | T_N4 | T_ER_X, 0x2D); } +void vcvtsd2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_ER_X, 0x5A); } +void vcvtsi2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_0F | T_F2 | T_EVEX, T_W1 | T_EW1 | T_ER_X | T_N8, T_W0 | T_EW0 | T_N4, 0x2A); } +void vcvtsi2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_0F | T_F3 | T_EVEX | T_ER_X, T_W1 | T_EW1 | T_N8, T_W0 | T_EW0 | T_N4, 0x2A); } +void vcvtss2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x5A); } +void vcvtss2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W0 | T_EVEX | T_EW0 | T_ER_X | T_N8, 0x2D); } +void vcvttpd2dq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_66 | T_0F | T_YMM | T_EVEX |T_EW1 | T_B64 | T_ER_Z, 0xE6); } +void vcvttps2dq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX | T_SAE_Z | T_B32, 0x5B); } +void vcvttsd2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W0 | T_EVEX | T_EW0 | T_N4 | T_SAE_X, 0x2C); } +void vcvttss2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W0 | T_EVEX | T_EW0 | T_SAE_X | T_N8, 0x2C); } +void vdivpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5E); } +void vdivps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5E); } +void vdivsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5E); } +void vdivss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5E); } +void vdppd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x41, imm); } +void vdpps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x40, imm); } +void vextractf128(const Operand& op, const Ymm& y, uint8 imm) { if (!(op.isXMEM() && y.isYMM())) throw Error(ERR_BAD_COMBINATION); opVex(y, 0, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x19, imm); } +void vextracti128(const Operand& op, const Ymm& y, uint8 imm) { if (!(op.isXMEM() && y.isYMM())) throw Error(ERR_BAD_COMBINATION); opVex(y, 0, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x39, imm); } +void vextractps(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(32) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_N4, 0x17, imm); } +void vfmadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x98); } +void vfmadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x98); } +void vfmadd132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x99); } +void vfmadd132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x99); } +void vfmadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA8); } +void vfmadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA8); } +void vfmadd213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xA9); } +void vfmadd213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xA9); } +void vfmadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB8); } +void vfmadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB8); } +void vfmadd231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xB9); } +void vfmadd231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xB9); } +void vfmaddsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x96); } +void vfmaddsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x96); } +void vfmaddsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA6); } +void vfmaddsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA6); } +void vfmaddsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB6); } +void vfmaddsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB6); } +void vfmsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9A); } +void vfmsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9A); } +void vfmsub132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9B); } +void vfmsub132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9B); } +void vfmsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAA); } +void vfmsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAA); } +void vfmsub213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAB); } +void vfmsub213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAB); } +void vfmsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBA); } +void vfmsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBA); } +void vfmsub231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBB); } +void vfmsub231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBB); } +void vfmsubadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x97); } +void vfmsubadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x97); } +void vfmsubadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA7); } +void vfmsubadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA7); } +void vfmsubadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB7); } +void vfmsubadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB7); } +void vfnmadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9C); } +void vfnmadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9C); } +void vfnmadd132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9D); } +void vfnmadd132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9D); } +void vfnmadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAC); } +void vfnmadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAC); } +void vfnmadd213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAD); } +void vfnmadd213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAD); } +void vfnmadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBC); } +void vfnmadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBC); } +void vfnmadd231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBD); } +void vfnmadd231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBD); } +void vfnmsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9E); } +void vfnmsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9E); } +void vfnmsub132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9F); } +void vfnmsub132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9F); } +void vfnmsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAE); } +void vfnmsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAE); } +void vfnmsub213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAF); } +void vfnmsub213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAF); } +void vfnmsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBE); } +void vfnmsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBE); } +void vfnmsub231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBF); } +void vfnmsub231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBF); } +void vgatherdpd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x92, 0); } +void vgatherdps(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x92, 1); } +void vgatherqpd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x93, 1); } +void vgatherqps(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x93, 2); } +void vgf2p8affineinvqb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_SAE_Z | T_B64, 0xCF, imm); } +void vgf2p8affineqb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_SAE_Z | T_B64, 0xCE, imm); } +void vgf2p8mulb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_SAE_Z, 0xCF); } +void vhaddpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0x7C); } +void vhaddps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0x7C); } +void vhsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0x7D); } +void vhsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0x7D); } +void vinsertf128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isXMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x18, imm); } +void vinserti128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isXMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x38, imm); } +void vinsertps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_W0 | T_EW0 | T_EVEX, 0x21, imm); } +void vlddqu(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, cvtIdx0(x), addr, T_0F | T_F2 | T_W0 | T_YMM, 0xF0); } +void vldmxcsr(const Address& addr) { opAVX_X_X_XM(xm2, xm0, addr, T_0F, 0xAE); } +void vmaskmovdqu(const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x1, xm0, x2, T_0F | T_66, 0xF7); } +void vmaskmovpd(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2F); } +void vmaskmovpd(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2D); } +void vmaskmovps(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2E); } +void vmaskmovps(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2C); } +void vmaxpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5F); } +void vmaxps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5F); } +void vmaxsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5F); } +void vmaxss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5F); } +void vminpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5D); } +void vminps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5D); } +void vminsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5D); } +void vminss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5D); } +void vmovapd(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_M_K, 0x29); } +void vmovapd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0x28); } +void vmovaps(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_0F | T_EW0 | T_YMM | T_EVEX | T_M_K, 0x29); } +void vmovaps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX, 0x28); } +void vmovd(const Operand& op, const Xmm& x) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, xm0, op, T_0F | T_66 | T_W0 | T_EVEX | T_N4, 0x7E); } +void vmovd(const Xmm& x, const Operand& op) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, xm0, op, T_0F | T_66 | T_W0 | T_EVEX | T_N4, 0x6E); } +void vmovddup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_DUP | T_F2 | T_0F | T_EW1 | T_YMM | T_EVEX | T_ER_X | T_ER_Y | T_ER_Z, 0x12); } +void vmovdqa(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_YMM, 0x7F); } +void vmovdqa(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_YMM, 0x6F); } +void vmovdqu(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_F3 | T_0F | T_YMM, 0x7F); } +void vmovdqu(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_YMM, 0x6F); } +void vmovhlps(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_0F | T_EVEX | T_EW0, 0x12); } +void vmovhpd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x17); } +void vmovhpd(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x16); } +void vmovhps(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_EVEX | T_EW0 | T_N8, 0x17); } +void vmovhps(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_EVEX | T_EW0 | T_N8, 0x16); } +void vmovlhps(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_0F | T_EVEX | T_EW0, 0x16); } +void vmovlpd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x13); } +void vmovlpd(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x12); } +void vmovlps(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_EVEX | T_EW0 | T_N8, 0x13); } +void vmovlps(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_EVEX | T_EW0 | T_N8, 0x12); } +void vmovmskpd(const Reg& r, const Xmm& x) { if (!r.isBit(i32e)) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x.isXMM() ? Xmm(r.getIdx()) : Ymm(r.getIdx()), cvtIdx0(x), x, T_0F | T_66 | T_W0 | T_YMM, 0x50); } +void vmovmskps(const Reg& r, const Xmm& x) { if (!r.isBit(i32e)) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x.isXMM() ? Xmm(r.getIdx()) : Ymm(r.getIdx()), cvtIdx0(x), x, T_0F | T_W0 | T_YMM, 0x50); } +void vmovntdq(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_66 | T_YMM | T_EVEX | T_EW0, 0xE7); } +void vmovntdqa(const Xmm& x, const Address& addr) { opVex(x, 0, addr, T_0F38 | T_66 | T_YMM | T_EVEX | T_EW0, 0x2A); } +void vmovntpd(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_66 | T_YMM | T_EVEX | T_EW1, 0x2B); } +void vmovntps(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_YMM | T_EVEX | T_EW0, 0x2B); } +void vmovq(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, x.getIdx() < 16 ? 0xD6 : 0x7E); } +void vmovq(const Xmm& x, const Address& addr) { int type, code; if (x.getIdx() < 16) { type = T_0F | T_F3; code = 0x7E; } else { type = T_0F | T_66 | T_EVEX | T_EW1 | T_N8; code = 0x6E; } opAVX_X_X_XM(x, xm0, addr, type, code); } +void vmovq(const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x1, xm0, x2, T_0F | T_F3 | T_EVEX | T_EW1 | T_N8, 0x7E); } +void vmovsd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_M_K, 0x11); } +void vmovsd(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, xm0, addr, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX, 0x10); } +void vmovsd(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX, 0x10); } +void vmovshdup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX, 0x16); } +void vmovsldup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX, 0x12); } +void vmovss(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_M_K, 0x11); } +void vmovss(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, xm0, addr, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX, 0x10); } +void vmovss(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX, 0x10); } +void vmovupd(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_M_K, 0x11); } +void vmovupd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0x10); } +void vmovups(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_0F | T_EW0 | T_YMM | T_EVEX | T_M_K, 0x11); } +void vmovups(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX, 0x10); } +void vmpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x42, imm); } +void vmulpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x59); } +void vmulps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x59); } +void vmulsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x59); } +void vmulss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x59); } +void vorpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x56); } +void vorps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x56); } +void vpabsb(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x1C); } +void vpabsd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x1E); } +void vpabsw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x1D); } +void vpackssdw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x6B); } +void vpacksswb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x63); } +void vpackusdw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x2B); } +void vpackuswb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x67); } +void vpaddb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xFC); } +void vpaddd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xFE); } +void vpaddq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xD4); } +void vpaddsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEC); } +void vpaddsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xED); } +void vpaddusb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDC); } +void vpaddusw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDD); } +void vpaddw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xFD); } +void vpalignr(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_YMM | T_EVEX, 0x0F, imm); } +void vpand(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xDB); } +void vpandn(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xDF); } +void vpavgb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE0); } +void vpavgw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE3); } +void vpblendd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x02, imm); } +void vpblendvb(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4C, x4.getIdx() << 4); } +void vpblendw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0E, imm); } +void vpbroadcastb(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x78); } +void vpbroadcastd(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x58); } +void vpbroadcastq(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX, 0x59); } +void vpbroadcastw(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x79); } +void vpclmulqdq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM | T_EVEX, 0x44, imm); } +void vpcmpeqb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x74); } +void vpcmpeqd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x76); } +void vpcmpeqq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x29); } +void vpcmpeqw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x75); } +void vpcmpestri(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x61, imm); } +void vpcmpestrm(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x60, imm); } +void vpcmpgtb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x64); } +void vpcmpgtd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x66); } +void vpcmpgtq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x37); } +void vpcmpgtw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x65); } +void vpcmpistri(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x63, imm); } +void vpcmpistrm(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x62, imm); } +void vperm2f128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isYMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x06, imm); } +void vperm2i128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isYMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x46, imm); } +void vpermd(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x36); } +void vpermilpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x0D); } +void vpermilpd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_EVEX | T_B64, 0x05, imm); } +void vpermilps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x0C); } +void vpermilps(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_EVEX | T_B32, 0x04, imm); } +void vpermpd(const Ymm& y, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(y, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x01, imm); } +void vpermpd(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x16); } +void vpermps(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x16); } +void vpermq(const Ymm& y, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(y, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x00, imm); } +void vpermq(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x36); } +void vpextrb(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(8|16|i32e) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_EVEX | T_N1, 0x14, imm); } +void vpextrd(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(32) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N4, 0x16, imm); } +void vpextrq(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(64) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W1 | T_EVEX | T_EW1 | T_N8, 0x16, imm); } +void vpextrw(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(16|i32e) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); if (op.isREG() && x.getIdx() < 16) { opAVX_X_X_XM(Xmm(op.getIdx()), xm0, x, T_0F | T_66, 0xC5, imm); } else { opVex(x, 0, op, T_0F3A | T_66 | T_EVEX | T_N2, 0x15, imm); } } +void vpgatherdd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x90, 1); } +void vpgatherdq(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x90, 0); } +void vpgatherqd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x91, 2); } +void vpgatherqq(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x91, 1); } +void vphaddd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x02); } +void vphaddsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x03); } +void vphaddw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x01); } +void vphminposuw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38, 0x41); } +void vphsubd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x06); } +void vphsubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x07); } +void vphsubw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x05); } +void vpinsrb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_EVEX | T_N1, 0x20, imm); } +void vpinsrd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N4, 0x22, imm); } +void vpinsrq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(64) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_W1 | T_EVEX | T_EW1 | T_N8, 0x22, imm); } +void vpinsrw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F | T_66 | T_EVEX | T_N2, 0xC4, imm); } +void vpmaddubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x04); } +void vpmaddwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF5); } +void vpmaskmovd(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x8E); } +void vpmaskmovd(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x8C); } +void vpmaskmovq(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W1 | T_YMM, 0x8E); } +void vpmaskmovq(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W1 | T_YMM, 0x8C); } +void vpmaxsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3C); } +void vpmaxsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3D); } +void vpmaxsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEE); } +void vpmaxub(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDE); } +void vpmaxud(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3F); } +void vpmaxuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3E); } +void vpminsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x38); } +void vpminsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x39); } +void vpminsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEA); } +void vpminub(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDA); } +void vpminud(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3B); } +void vpminuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3A); } +void vpmovmskb(const Reg32e& r, const Xmm& x) { if (!x.is(Operand::XMM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(x.isYMM() ? Ymm(r.getIdx()) : Xmm(r.getIdx()), 0, x, T_0F | T_66 | T_YMM, 0xD7); } +void vpmovsxbd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x21); } +void vpmovsxbq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N2 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x22); } +void vpmovsxbw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x20); } +void vpmovsxdq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX, 0x25); } +void vpmovsxwd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x23); } +void vpmovsxwq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x24); } +void vpmovzxbd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x31); } +void vpmovzxbq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N2 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x32); } +void vpmovzxbw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x30); } +void vpmovzxdq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX, 0x35); } +void vpmovzxwd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x33); } +void vpmovzxwq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x34); } +void vpmuldq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x28); } +void vpmulhrsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x0B); } +void vpmulhuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE4); } +void vpmulhw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE5); } +void vpmulld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x40); } +void vpmullw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD5); } +void vpmuludq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xF4); } +void vpor(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xEB); } +void vpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF6); } +void vpshufb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x00); } +void vpshufd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x70, imm); } +void vpshufhw(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_YMM | T_EVEX, 0x70, imm); } +void vpshuflw(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_F2 | T_0F | T_YMM | T_EVEX, 0x70, imm); } +void vpsignb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x08); } +void vpsignd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x0A); } +void vpsignw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x09); } +void vpslld(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } +void vpslld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xF2); } +void vpslldq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 7), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x73, imm); } +void vpsllq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64 | T_MEM_EVEX, 0x73, imm); } +void vpsllq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0xF3); } +void vpsllvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x47); } +void vpsllvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x47); } +void vpsllw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } +void vpsllw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xF1); } +void vpsrad(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } +void vpsrad(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xE2); } +void vpsravd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x46); } +void vpsraw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } +void vpsraw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xE1); } +void vpsrld(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } +void vpsrld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xD2); } +void vpsrldq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 3), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x73, imm); } +void vpsrlq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64 | T_MEM_EVEX, 0x73, imm); } +void vpsrlq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0xD3); } +void vpsrlvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x45); } +void vpsrlvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x45); } +void vpsrlw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } +void vpsrlw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xD1); } +void vpsubb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF8); } +void vpsubd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xFA); } +void vpsubq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xFB); } +void vpsubsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE8); } +void vpsubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE9); } +void vpsubusb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD8); } +void vpsubusw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD9); } +void vpsubw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF9); } +void vptest(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x17); } +void vpunpckhbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x68); } +void vpunpckhdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x6A); } +void vpunpckhqdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x6D); } +void vpunpckhwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x69); } +void vpunpcklbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x60); } +void vpunpckldq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x62); } +void vpunpcklqdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x6C); } +void vpunpcklwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x61); } +void vpxor(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xEF); } +void vrcpps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_YMM, 0x53); } +void vrcpss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0x53); } +void vroundpd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_YMM, 0x09, imm); } +void vroundps(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_YMM, 0x08, imm); } +void vroundsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x0B, imm); } +void vroundss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x0A, imm); } +void vrsqrtps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_YMM, 0x52); } +void vrsqrtss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0x52); } +void vshufpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xC6, imm); } +void vshufps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xC6, imm); } +void vsqrtpd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x51); } +void vsqrtps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x51); } +void vsqrtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_ER_X, 0x51); } +void vsqrtss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_ER_X, 0x51); } +void vstmxcsr(const Address& addr) { opAVX_X_X_XM(xm3, xm0, addr, T_0F, 0xAE); } +void vsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5C); } +void vsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5C); } +void vsubsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5C); } +void vsubss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5C); } +void vtestpd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x0F); } +void vtestps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x0E); } +void vucomisd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_66 | T_0F | T_EW1 | T_EVEX | T_SAE_X, 0x2E); } +void vucomiss(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x2E); } +void vunpckhpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x15); } +void vunpckhps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x15); } +void vunpcklpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x14); } +void vunpcklps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x14); } +void vxorpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x57); } +void vxorps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x57); } +void vzeroall() { db(0xC5); db(0xFC); db(0x77); } +void vzeroupper() { db(0xC5); db(0xF8); db(0x77); } +void wait() { db(0x9B); } +void wbinvd() { db(0x0F); db(0x09); } +void wrmsr() { db(0x0F); db(0x30); } +void xadd(const Operand& op, const Reg& reg) { opModRM(reg, op, (op.isREG() && reg.isREG() && op.getBit() == reg.getBit()), op.isMEM(), 0x0F, 0xC0 | (reg.isBit(8) ? 0 : 1)); } +void xgetbv() { db(0x0F); db(0x01); db(0xD0); } +void xlatb() { db(0xD7); } +void xor_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x30, 6); } +void xor_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x30); } +void xorpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x57, 0x66, isXMM_XMMorMEM); } +void xorps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x57, 0x100, isXMM_XMMorMEM); } +#ifdef XBYAK_ENABLE_OMITTED_OPERAND +void vblendpd(const Xmm& x, const Operand& op, uint8 imm) { vblendpd(x, x, op, imm); } +void vblendps(const Xmm& x, const Operand& op, uint8 imm) { vblendps(x, x, op, imm); } +void vblendvpd(const Xmm& x1, const Operand& op, const Xmm& x4) { vblendvpd(x1, x1, op, x4); } +void vblendvps(const Xmm& x1, const Operand& op, const Xmm& x4) { vblendvps(x1, x1, op, x4); } +void vcmpeq_ospd(const Xmm& x, const Operand& op) { vcmpeq_ospd(x, x, op); } +void vcmpeq_osps(const Xmm& x, const Operand& op) { vcmpeq_osps(x, x, op); } +void vcmpeq_ossd(const Xmm& x, const Operand& op) { vcmpeq_ossd(x, x, op); } +void vcmpeq_osss(const Xmm& x, const Operand& op) { vcmpeq_osss(x, x, op); } +void vcmpeq_uqpd(const Xmm& x, const Operand& op) { vcmpeq_uqpd(x, x, op); } +void vcmpeq_uqps(const Xmm& x, const Operand& op) { vcmpeq_uqps(x, x, op); } +void vcmpeq_uqsd(const Xmm& x, const Operand& op) { vcmpeq_uqsd(x, x, op); } +void vcmpeq_uqss(const Xmm& x, const Operand& op) { vcmpeq_uqss(x, x, op); } +void vcmpeq_uspd(const Xmm& x, const Operand& op) { vcmpeq_uspd(x, x, op); } +void vcmpeq_usps(const Xmm& x, const Operand& op) { vcmpeq_usps(x, x, op); } +void vcmpeq_ussd(const Xmm& x, const Operand& op) { vcmpeq_ussd(x, x, op); } +void vcmpeq_usss(const Xmm& x, const Operand& op) { vcmpeq_usss(x, x, op); } +void vcmpeqpd(const Xmm& x, const Operand& op) { vcmpeqpd(x, x, op); } +void vcmpeqps(const Xmm& x, const Operand& op) { vcmpeqps(x, x, op); } +void vcmpeqsd(const Xmm& x, const Operand& op) { vcmpeqsd(x, x, op); } +void vcmpeqss(const Xmm& x, const Operand& op) { vcmpeqss(x, x, op); } +void vcmpfalse_ospd(const Xmm& x, const Operand& op) { vcmpfalse_ospd(x, x, op); } +void vcmpfalse_osps(const Xmm& x, const Operand& op) { vcmpfalse_osps(x, x, op); } +void vcmpfalse_ossd(const Xmm& x, const Operand& op) { vcmpfalse_ossd(x, x, op); } +void vcmpfalse_osss(const Xmm& x, const Operand& op) { vcmpfalse_osss(x, x, op); } +void vcmpfalsepd(const Xmm& x, const Operand& op) { vcmpfalsepd(x, x, op); } +void vcmpfalseps(const Xmm& x, const Operand& op) { vcmpfalseps(x, x, op); } +void vcmpfalsesd(const Xmm& x, const Operand& op) { vcmpfalsesd(x, x, op); } +void vcmpfalsess(const Xmm& x, const Operand& op) { vcmpfalsess(x, x, op); } +void vcmpge_oqpd(const Xmm& x, const Operand& op) { vcmpge_oqpd(x, x, op); } +void vcmpge_oqps(const Xmm& x, const Operand& op) { vcmpge_oqps(x, x, op); } +void vcmpge_oqsd(const Xmm& x, const Operand& op) { vcmpge_oqsd(x, x, op); } +void vcmpge_oqss(const Xmm& x, const Operand& op) { vcmpge_oqss(x, x, op); } +void vcmpgepd(const Xmm& x, const Operand& op) { vcmpgepd(x, x, op); } +void vcmpgeps(const Xmm& x, const Operand& op) { vcmpgeps(x, x, op); } +void vcmpgesd(const Xmm& x, const Operand& op) { vcmpgesd(x, x, op); } +void vcmpgess(const Xmm& x, const Operand& op) { vcmpgess(x, x, op); } +void vcmpgt_oqpd(const Xmm& x, const Operand& op) { vcmpgt_oqpd(x, x, op); } +void vcmpgt_oqps(const Xmm& x, const Operand& op) { vcmpgt_oqps(x, x, op); } +void vcmpgt_oqsd(const Xmm& x, const Operand& op) { vcmpgt_oqsd(x, x, op); } +void vcmpgt_oqss(const Xmm& x, const Operand& op) { vcmpgt_oqss(x, x, op); } +void vcmpgtpd(const Xmm& x, const Operand& op) { vcmpgtpd(x, x, op); } +void vcmpgtps(const Xmm& x, const Operand& op) { vcmpgtps(x, x, op); } +void vcmpgtsd(const Xmm& x, const Operand& op) { vcmpgtsd(x, x, op); } +void vcmpgtss(const Xmm& x, const Operand& op) { vcmpgtss(x, x, op); } +void vcmple_oqpd(const Xmm& x, const Operand& op) { vcmple_oqpd(x, x, op); } +void vcmple_oqps(const Xmm& x, const Operand& op) { vcmple_oqps(x, x, op); } +void vcmple_oqsd(const Xmm& x, const Operand& op) { vcmple_oqsd(x, x, op); } +void vcmple_oqss(const Xmm& x, const Operand& op) { vcmple_oqss(x, x, op); } +void vcmplepd(const Xmm& x, const Operand& op) { vcmplepd(x, x, op); } +void vcmpleps(const Xmm& x, const Operand& op) { vcmpleps(x, x, op); } +void vcmplesd(const Xmm& x, const Operand& op) { vcmplesd(x, x, op); } +void vcmpless(const Xmm& x, const Operand& op) { vcmpless(x, x, op); } +void vcmplt_oqpd(const Xmm& x, const Operand& op) { vcmplt_oqpd(x, x, op); } +void vcmplt_oqps(const Xmm& x, const Operand& op) { vcmplt_oqps(x, x, op); } +void vcmplt_oqsd(const Xmm& x, const Operand& op) { vcmplt_oqsd(x, x, op); } +void vcmplt_oqss(const Xmm& x, const Operand& op) { vcmplt_oqss(x, x, op); } +void vcmpltpd(const Xmm& x, const Operand& op) { vcmpltpd(x, x, op); } +void vcmpltps(const Xmm& x, const Operand& op) { vcmpltps(x, x, op); } +void vcmpltsd(const Xmm& x, const Operand& op) { vcmpltsd(x, x, op); } +void vcmpltss(const Xmm& x, const Operand& op) { vcmpltss(x, x, op); } +void vcmpneq_oqpd(const Xmm& x, const Operand& op) { vcmpneq_oqpd(x, x, op); } +void vcmpneq_oqps(const Xmm& x, const Operand& op) { vcmpneq_oqps(x, x, op); } +void vcmpneq_oqsd(const Xmm& x, const Operand& op) { vcmpneq_oqsd(x, x, op); } +void vcmpneq_oqss(const Xmm& x, const Operand& op) { vcmpneq_oqss(x, x, op); } +void vcmpneq_ospd(const Xmm& x, const Operand& op) { vcmpneq_ospd(x, x, op); } +void vcmpneq_osps(const Xmm& x, const Operand& op) { vcmpneq_osps(x, x, op); } +void vcmpneq_ossd(const Xmm& x, const Operand& op) { vcmpneq_ossd(x, x, op); } +void vcmpneq_osss(const Xmm& x, const Operand& op) { vcmpneq_osss(x, x, op); } +void vcmpneq_uspd(const Xmm& x, const Operand& op) { vcmpneq_uspd(x, x, op); } +void vcmpneq_usps(const Xmm& x, const Operand& op) { vcmpneq_usps(x, x, op); } +void vcmpneq_ussd(const Xmm& x, const Operand& op) { vcmpneq_ussd(x, x, op); } +void vcmpneq_usss(const Xmm& x, const Operand& op) { vcmpneq_usss(x, x, op); } +void vcmpneqpd(const Xmm& x, const Operand& op) { vcmpneqpd(x, x, op); } +void vcmpneqps(const Xmm& x, const Operand& op) { vcmpneqps(x, x, op); } +void vcmpneqsd(const Xmm& x, const Operand& op) { vcmpneqsd(x, x, op); } +void vcmpneqss(const Xmm& x, const Operand& op) { vcmpneqss(x, x, op); } +void vcmpnge_uqpd(const Xmm& x, const Operand& op) { vcmpnge_uqpd(x, x, op); } +void vcmpnge_uqps(const Xmm& x, const Operand& op) { vcmpnge_uqps(x, x, op); } +void vcmpnge_uqsd(const Xmm& x, const Operand& op) { vcmpnge_uqsd(x, x, op); } +void vcmpnge_uqss(const Xmm& x, const Operand& op) { vcmpnge_uqss(x, x, op); } +void vcmpngepd(const Xmm& x, const Operand& op) { vcmpngepd(x, x, op); } +void vcmpngeps(const Xmm& x, const Operand& op) { vcmpngeps(x, x, op); } +void vcmpngesd(const Xmm& x, const Operand& op) { vcmpngesd(x, x, op); } +void vcmpngess(const Xmm& x, const Operand& op) { vcmpngess(x, x, op); } +void vcmpngt_uqpd(const Xmm& x, const Operand& op) { vcmpngt_uqpd(x, x, op); } +void vcmpngt_uqps(const Xmm& x, const Operand& op) { vcmpngt_uqps(x, x, op); } +void vcmpngt_uqsd(const Xmm& x, const Operand& op) { vcmpngt_uqsd(x, x, op); } +void vcmpngt_uqss(const Xmm& x, const Operand& op) { vcmpngt_uqss(x, x, op); } +void vcmpngtpd(const Xmm& x, const Operand& op) { vcmpngtpd(x, x, op); } +void vcmpngtps(const Xmm& x, const Operand& op) { vcmpngtps(x, x, op); } +void vcmpngtsd(const Xmm& x, const Operand& op) { vcmpngtsd(x, x, op); } +void vcmpngtss(const Xmm& x, const Operand& op) { vcmpngtss(x, x, op); } +void vcmpnle_uqpd(const Xmm& x, const Operand& op) { vcmpnle_uqpd(x, x, op); } +void vcmpnle_uqps(const Xmm& x, const Operand& op) { vcmpnle_uqps(x, x, op); } +void vcmpnle_uqsd(const Xmm& x, const Operand& op) { vcmpnle_uqsd(x, x, op); } +void vcmpnle_uqss(const Xmm& x, const Operand& op) { vcmpnle_uqss(x, x, op); } +void vcmpnlepd(const Xmm& x, const Operand& op) { vcmpnlepd(x, x, op); } +void vcmpnleps(const Xmm& x, const Operand& op) { vcmpnleps(x, x, op); } +void vcmpnlesd(const Xmm& x, const Operand& op) { vcmpnlesd(x, x, op); } +void vcmpnless(const Xmm& x, const Operand& op) { vcmpnless(x, x, op); } +void vcmpnlt_uqpd(const Xmm& x, const Operand& op) { vcmpnlt_uqpd(x, x, op); } +void vcmpnlt_uqps(const Xmm& x, const Operand& op) { vcmpnlt_uqps(x, x, op); } +void vcmpnlt_uqsd(const Xmm& x, const Operand& op) { vcmpnlt_uqsd(x, x, op); } +void vcmpnlt_uqss(const Xmm& x, const Operand& op) { vcmpnlt_uqss(x, x, op); } +void vcmpnltpd(const Xmm& x, const Operand& op) { vcmpnltpd(x, x, op); } +void vcmpnltps(const Xmm& x, const Operand& op) { vcmpnltps(x, x, op); } +void vcmpnltsd(const Xmm& x, const Operand& op) { vcmpnltsd(x, x, op); } +void vcmpnltss(const Xmm& x, const Operand& op) { vcmpnltss(x, x, op); } +void vcmpord_spd(const Xmm& x, const Operand& op) { vcmpord_spd(x, x, op); } +void vcmpord_sps(const Xmm& x, const Operand& op) { vcmpord_sps(x, x, op); } +void vcmpord_ssd(const Xmm& x, const Operand& op) { vcmpord_ssd(x, x, op); } +void vcmpord_sss(const Xmm& x, const Operand& op) { vcmpord_sss(x, x, op); } +void vcmpordpd(const Xmm& x, const Operand& op) { vcmpordpd(x, x, op); } +void vcmpordps(const Xmm& x, const Operand& op) { vcmpordps(x, x, op); } +void vcmpordsd(const Xmm& x, const Operand& op) { vcmpordsd(x, x, op); } +void vcmpordss(const Xmm& x, const Operand& op) { vcmpordss(x, x, op); } +void vcmppd(const Xmm& x, const Operand& op, uint8 imm) { vcmppd(x, x, op, imm); } +void vcmpps(const Xmm& x, const Operand& op, uint8 imm) { vcmpps(x, x, op, imm); } +void vcmpsd(const Xmm& x, const Operand& op, uint8 imm) { vcmpsd(x, x, op, imm); } +void vcmpss(const Xmm& x, const Operand& op, uint8 imm) { vcmpss(x, x, op, imm); } +void vcmptrue_uspd(const Xmm& x, const Operand& op) { vcmptrue_uspd(x, x, op); } +void vcmptrue_usps(const Xmm& x, const Operand& op) { vcmptrue_usps(x, x, op); } +void vcmptrue_ussd(const Xmm& x, const Operand& op) { vcmptrue_ussd(x, x, op); } +void vcmptrue_usss(const Xmm& x, const Operand& op) { vcmptrue_usss(x, x, op); } +void vcmptruepd(const Xmm& x, const Operand& op) { vcmptruepd(x, x, op); } +void vcmptrueps(const Xmm& x, const Operand& op) { vcmptrueps(x, x, op); } +void vcmptruesd(const Xmm& x, const Operand& op) { vcmptruesd(x, x, op); } +void vcmptruess(const Xmm& x, const Operand& op) { vcmptruess(x, x, op); } +void vcmpunord_spd(const Xmm& x, const Operand& op) { vcmpunord_spd(x, x, op); } +void vcmpunord_sps(const Xmm& x, const Operand& op) { vcmpunord_sps(x, x, op); } +void vcmpunord_ssd(const Xmm& x, const Operand& op) { vcmpunord_ssd(x, x, op); } +void vcmpunord_sss(const Xmm& x, const Operand& op) { vcmpunord_sss(x, x, op); } +void vcmpunordpd(const Xmm& x, const Operand& op) { vcmpunordpd(x, x, op); } +void vcmpunordps(const Xmm& x, const Operand& op) { vcmpunordps(x, x, op); } +void vcmpunordsd(const Xmm& x, const Operand& op) { vcmpunordsd(x, x, op); } +void vcmpunordss(const Xmm& x, const Operand& op) { vcmpunordss(x, x, op); } +void vcvtsd2ss(const Xmm& x, const Operand& op) { vcvtsd2ss(x, x, op); } +void vcvtsi2sd(const Xmm& x, const Operand& op) { vcvtsi2sd(x, x, op); } +void vcvtsi2ss(const Xmm& x, const Operand& op) { vcvtsi2ss(x, x, op); } +void vcvtss2sd(const Xmm& x, const Operand& op) { vcvtss2sd(x, x, op); } +void vdppd(const Xmm& x, const Operand& op, uint8 imm) { vdppd(x, x, op, imm); } +void vdpps(const Xmm& x, const Operand& op, uint8 imm) { vdpps(x, x, op, imm); } +void vinsertps(const Xmm& x, const Operand& op, uint8 imm) { vinsertps(x, x, op, imm); } +void vmpsadbw(const Xmm& x, const Operand& op, uint8 imm) { vmpsadbw(x, x, op, imm); } +void vpackssdw(const Xmm& x, const Operand& op) { vpackssdw(x, x, op); } +void vpacksswb(const Xmm& x, const Operand& op) { vpacksswb(x, x, op); } +void vpackusdw(const Xmm& x, const Operand& op) { vpackusdw(x, x, op); } +void vpackuswb(const Xmm& x, const Operand& op) { vpackuswb(x, x, op); } +void vpaddb(const Xmm& x, const Operand& op) { vpaddb(x, x, op); } +void vpaddd(const Xmm& x, const Operand& op) { vpaddd(x, x, op); } +void vpaddq(const Xmm& x, const Operand& op) { vpaddq(x, x, op); } +void vpaddsb(const Xmm& x, const Operand& op) { vpaddsb(x, x, op); } +void vpaddsw(const Xmm& x, const Operand& op) { vpaddsw(x, x, op); } +void vpaddusb(const Xmm& x, const Operand& op) { vpaddusb(x, x, op); } +void vpaddusw(const Xmm& x, const Operand& op) { vpaddusw(x, x, op); } +void vpaddw(const Xmm& x, const Operand& op) { vpaddw(x, x, op); } +void vpalignr(const Xmm& x, const Operand& op, uint8 imm) { vpalignr(x, x, op, imm); } +void vpand(const Xmm& x, const Operand& op) { vpand(x, x, op); } +void vpandn(const Xmm& x, const Operand& op) { vpandn(x, x, op); } +void vpavgb(const Xmm& x, const Operand& op) { vpavgb(x, x, op); } +void vpavgw(const Xmm& x, const Operand& op) { vpavgw(x, x, op); } +void vpblendd(const Xmm& x, const Operand& op, uint8 imm) { vpblendd(x, x, op, imm); } +void vpblendvb(const Xmm& x1, const Operand& op, const Xmm& x4) { vpblendvb(x1, x1, op, x4); } +void vpblendw(const Xmm& x, const Operand& op, uint8 imm) { vpblendw(x, x, op, imm); } +void vpclmulqdq(const Xmm& x, const Operand& op, uint8 imm) { vpclmulqdq(x, x, op, imm); } +void vpcmpeqb(const Xmm& x, const Operand& op) { vpcmpeqb(x, x, op); } +void vpcmpeqd(const Xmm& x, const Operand& op) { vpcmpeqd(x, x, op); } +void vpcmpeqq(const Xmm& x, const Operand& op) { vpcmpeqq(x, x, op); } +void vpcmpeqw(const Xmm& x, const Operand& op) { vpcmpeqw(x, x, op); } +void vpcmpgtb(const Xmm& x, const Operand& op) { vpcmpgtb(x, x, op); } +void vpcmpgtd(const Xmm& x, const Operand& op) { vpcmpgtd(x, x, op); } +void vpcmpgtq(const Xmm& x, const Operand& op) { vpcmpgtq(x, x, op); } +void vpcmpgtw(const Xmm& x, const Operand& op) { vpcmpgtw(x, x, op); } +void vphaddd(const Xmm& x, const Operand& op) { vphaddd(x, x, op); } +void vphaddsw(const Xmm& x, const Operand& op) { vphaddsw(x, x, op); } +void vphaddw(const Xmm& x, const Operand& op) { vphaddw(x, x, op); } +void vphsubd(const Xmm& x, const Operand& op) { vphsubd(x, x, op); } +void vphsubsw(const Xmm& x, const Operand& op) { vphsubsw(x, x, op); } +void vphsubw(const Xmm& x, const Operand& op) { vphsubw(x, x, op); } +void vpinsrb(const Xmm& x, const Operand& op, uint8 imm) { vpinsrb(x, x, op, imm); } +void vpinsrd(const Xmm& x, const Operand& op, uint8 imm) { vpinsrd(x, x, op, imm); } +void vpinsrq(const Xmm& x, const Operand& op, uint8 imm) { vpinsrq(x, x, op, imm); } +void vpinsrw(const Xmm& x, const Operand& op, uint8 imm) { vpinsrw(x, x, op, imm); } +void vpmaddubsw(const Xmm& x, const Operand& op) { vpmaddubsw(x, x, op); } +void vpmaddwd(const Xmm& x, const Operand& op) { vpmaddwd(x, x, op); } +void vpmaxsb(const Xmm& x, const Operand& op) { vpmaxsb(x, x, op); } +void vpmaxsd(const Xmm& x, const Operand& op) { vpmaxsd(x, x, op); } +void vpmaxsw(const Xmm& x, const Operand& op) { vpmaxsw(x, x, op); } +void vpmaxub(const Xmm& x, const Operand& op) { vpmaxub(x, x, op); } +void vpmaxud(const Xmm& x, const Operand& op) { vpmaxud(x, x, op); } +void vpmaxuw(const Xmm& x, const Operand& op) { vpmaxuw(x, x, op); } +void vpminsb(const Xmm& x, const Operand& op) { vpminsb(x, x, op); } +void vpminsd(const Xmm& x, const Operand& op) { vpminsd(x, x, op); } +void vpminsw(const Xmm& x, const Operand& op) { vpminsw(x, x, op); } +void vpminub(const Xmm& x, const Operand& op) { vpminub(x, x, op); } +void vpminud(const Xmm& x, const Operand& op) { vpminud(x, x, op); } +void vpminuw(const Xmm& x, const Operand& op) { vpminuw(x, x, op); } +void vpmuldq(const Xmm& x, const Operand& op) { vpmuldq(x, x, op); } +void vpmulhrsw(const Xmm& x, const Operand& op) { vpmulhrsw(x, x, op); } +void vpmulhuw(const Xmm& x, const Operand& op) { vpmulhuw(x, x, op); } +void vpmulhw(const Xmm& x, const Operand& op) { vpmulhw(x, x, op); } +void vpmulld(const Xmm& x, const Operand& op) { vpmulld(x, x, op); } +void vpmullw(const Xmm& x, const Operand& op) { vpmullw(x, x, op); } +void vpmuludq(const Xmm& x, const Operand& op) { vpmuludq(x, x, op); } +void vpor(const Xmm& x, const Operand& op) { vpor(x, x, op); } +void vpsadbw(const Xmm& x, const Operand& op) { vpsadbw(x, x, op); } +void vpsignb(const Xmm& x, const Operand& op) { vpsignb(x, x, op); } +void vpsignd(const Xmm& x, const Operand& op) { vpsignd(x, x, op); } +void vpsignw(const Xmm& x, const Operand& op) { vpsignw(x, x, op); } +void vpslld(const Xmm& x, const Operand& op) { vpslld(x, x, op); } +void vpslld(const Xmm& x, uint8 imm) { vpslld(x, x, imm); } +void vpslldq(const Xmm& x, uint8 imm) { vpslldq(x, x, imm); } +void vpsllq(const Xmm& x, const Operand& op) { vpsllq(x, x, op); } +void vpsllq(const Xmm& x, uint8 imm) { vpsllq(x, x, imm); } +void vpsllw(const Xmm& x, const Operand& op) { vpsllw(x, x, op); } +void vpsllw(const Xmm& x, uint8 imm) { vpsllw(x, x, imm); } +void vpsrad(const Xmm& x, const Operand& op) { vpsrad(x, x, op); } +void vpsrad(const Xmm& x, uint8 imm) { vpsrad(x, x, imm); } +void vpsraw(const Xmm& x, const Operand& op) { vpsraw(x, x, op); } +void vpsraw(const Xmm& x, uint8 imm) { vpsraw(x, x, imm); } +void vpsrld(const Xmm& x, const Operand& op) { vpsrld(x, x, op); } +void vpsrld(const Xmm& x, uint8 imm) { vpsrld(x, x, imm); } +void vpsrldq(const Xmm& x, uint8 imm) { vpsrldq(x, x, imm); } +void vpsrlq(const Xmm& x, const Operand& op) { vpsrlq(x, x, op); } +void vpsrlq(const Xmm& x, uint8 imm) { vpsrlq(x, x, imm); } +void vpsrlw(const Xmm& x, const Operand& op) { vpsrlw(x, x, op); } +void vpsrlw(const Xmm& x, uint8 imm) { vpsrlw(x, x, imm); } +void vpsubb(const Xmm& x, const Operand& op) { vpsubb(x, x, op); } +void vpsubd(const Xmm& x, const Operand& op) { vpsubd(x, x, op); } +void vpsubq(const Xmm& x, const Operand& op) { vpsubq(x, x, op); } +void vpsubsb(const Xmm& x, const Operand& op) { vpsubsb(x, x, op); } +void vpsubsw(const Xmm& x, const Operand& op) { vpsubsw(x, x, op); } +void vpsubusb(const Xmm& x, const Operand& op) { vpsubusb(x, x, op); } +void vpsubusw(const Xmm& x, const Operand& op) { vpsubusw(x, x, op); } +void vpsubw(const Xmm& x, const Operand& op) { vpsubw(x, x, op); } +void vpunpckhbw(const Xmm& x, const Operand& op) { vpunpckhbw(x, x, op); } +void vpunpckhdq(const Xmm& x, const Operand& op) { vpunpckhdq(x, x, op); } +void vpunpckhqdq(const Xmm& x, const Operand& op) { vpunpckhqdq(x, x, op); } +void vpunpckhwd(const Xmm& x, const Operand& op) { vpunpckhwd(x, x, op); } +void vpunpcklbw(const Xmm& x, const Operand& op) { vpunpcklbw(x, x, op); } +void vpunpckldq(const Xmm& x, const Operand& op) { vpunpckldq(x, x, op); } +void vpunpcklqdq(const Xmm& x, const Operand& op) { vpunpcklqdq(x, x, op); } +void vpunpcklwd(const Xmm& x, const Operand& op) { vpunpcklwd(x, x, op); } +void vpxor(const Xmm& x, const Operand& op) { vpxor(x, x, op); } +void vrcpss(const Xmm& x, const Operand& op) { vrcpss(x, x, op); } +void vroundsd(const Xmm& x, const Operand& op, uint8 imm) { vroundsd(x, x, op, imm); } +void vroundss(const Xmm& x, const Operand& op, uint8 imm) { vroundss(x, x, op, imm); } +void vrsqrtss(const Xmm& x, const Operand& op) { vrsqrtss(x, x, op); } +void vshufpd(const Xmm& x, const Operand& op, uint8 imm) { vshufpd(x, x, op, imm); } +void vshufps(const Xmm& x, const Operand& op, uint8 imm) { vshufps(x, x, op, imm); } +void vsqrtsd(const Xmm& x, const Operand& op) { vsqrtsd(x, x, op); } +void vsqrtss(const Xmm& x, const Operand& op) { vsqrtss(x, x, op); } +void vunpckhpd(const Xmm& x, const Operand& op) { vunpckhpd(x, x, op); } +void vunpckhps(const Xmm& x, const Operand& op) { vunpckhps(x, x, op); } +void vunpcklpd(const Xmm& x, const Operand& op) { vunpcklpd(x, x, op); } +void vunpcklps(const Xmm& x, const Operand& op) { vunpcklps(x, x, op); } +#endif +#ifdef XBYAK64 +void jecxz(std::string label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jecxz(const Label& label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jrcxz(std::string label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jrcxz(const Label& label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } +void cdqe() { db(0x48); db(0x98); } +void cqo() { db(0x48); db(0x99); } +void cmpsq() { db(0x48); db(0xA7); } +void movsq() { db(0x48); db(0xA5); } +void scasq() { db(0x48); db(0xAF); } +void stosq() { db(0x48); db(0xAB); } +void cmpxchg16b(const Address& addr) { opModM(addr, Reg64(1), 0x0F, 0xC7); } +void movq(const Reg64& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x7E); } +void movq(const Mmx& mmx, const Reg64& reg) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x6E); } +void movsxd(const Reg64& reg, const Operand& op) { if (!op.isBit(32)) throw Error(ERR_BAD_COMBINATION); opModRM(reg, op, op.isREG(), op.isMEM(), 0x63); } +void pextrq(const Operand& op, const Xmm& xmm, uint8 imm) { if (!op.isREG(64) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(Reg64(xmm.getIdx()), op, 0x16, 0x66, 0, imm, 0x3A); } +void pinsrq(const Xmm& xmm, const Operand& op, uint8 imm) { if (!op.isREG(64) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(Reg64(xmm.getIdx()), op, 0x22, 0x66, 0, imm, 0x3A); } +void vcvtss2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W1 | T_EVEX | T_EW1 | T_ER_X | T_N8, 0x2D); } +void vcvttss2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W1 | T_EVEX | T_EW1 | T_SAE_X | T_N8, 0x2C); } +void vcvtsd2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W1 | T_EVEX | T_EW1 | T_N4 | T_ER_X, 0x2D); } +void vcvttsd2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W1 | T_EVEX | T_EW1 | T_N4 | T_SAE_X, 0x2C); } +void vmovq(const Xmm& x, const Reg64& r) { opAVX_X_X_XM(x, xm0, Xmm(r.getIdx()), T_66 | T_0F | T_W1 | T_EVEX | T_EW1, 0x6E); } +void vmovq(const Reg64& r, const Xmm& x) { opAVX_X_X_XM(x, xm0, Xmm(r.getIdx()), T_66 | T_0F | T_W1 | T_EVEX | T_EW1, 0x7E); } +#else +void jcxz(std::string label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jcxz(const Label& label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jecxz(std::string label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } +void jecxz(const Label& label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } +void aaa() { db(0x37); } +void aad() { db(0xD5); db(0x0A); } +void aam() { db(0xD4); db(0x0A); } +void aas() { db(0x3F); } +void daa() { db(0x27); } +void das() { db(0x2F); } +void popad() { db(0x61); } +void popfd() { db(0x9D); } +void pusha() { db(0x60); } +void pushad() { db(0x60); } +void pushfd() { db(0x9C); } +void popa() { db(0x61); } +#endif +#ifndef XBYAK_NO_OP_NAMES +void and(const Operand& op1, const Operand& op2) { and_(op1, op2); } +void and(const Operand& op, uint32 imm) { and_(op, imm); } +void or(const Operand& op1, const Operand& op2) { or_(op1, op2); } +void or(const Operand& op, uint32 imm) { or_(op, imm); } +void xor(const Operand& op1, const Operand& op2) { xor_(op1, op2); } +void xor(const Operand& op, uint32 imm) { xor_(op, imm); } +void not(const Operand& op) { not_(op); } +#endif +#ifndef XBYAK_DISABLE_AVX512 +void kaddb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x4A); } +void kaddd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x4A); } +void kaddq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x4A); } +void kaddw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x4A); } +void kandb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x41); } +void kandd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x41); } +void kandnb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x42); } +void kandnd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x42); } +void kandnq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x42); } +void kandnw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x42); } +void kandq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x41); } +void kandw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x41); } +void kmovb(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_66 | T_W0, 0x91); } +void kmovb(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_66 | T_W0, 0x90); } +void kmovb(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_66 | T_W0, 0x92); } +void kmovb(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_66 | T_W0, 0x93); } +void kmovd(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_66 | T_W1, 0x91); } +void kmovd(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_66 | T_W1, 0x90); } +void kmovd(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_F2 | T_W0, 0x92); } +void kmovd(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_F2 | T_W0, 0x93); } +void kmovq(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_W1, 0x91); } +void kmovq(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_W1, 0x90); } +void kmovw(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_W0, 0x91); } +void kmovw(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_W0, 0x90); } +void kmovw(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_W0, 0x92); } +void kmovw(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_W0, 0x93); } +void knotb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x44); } +void knotd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x44); } +void knotq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x44); } +void knotw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x44); } +void korb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x45); } +void kord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x45); } +void korq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x45); } +void kortestb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x98); } +void kortestd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x98); } +void kortestq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x98); } +void kortestw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x98); } +void korw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x45); } +void kshiftlb(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x32, imm); } +void kshiftld(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x33, imm); } +void kshiftlq(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x33, imm); } +void kshiftlw(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x32, imm); } +void kshiftrb(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x30, imm); } +void kshiftrd(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x31, imm); } +void kshiftrq(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x31, imm); } +void kshiftrw(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x30, imm); } +void ktestb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x99); } +void ktestd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x99); } +void ktestq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x99); } +void ktestw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x99); } +void kunpckbw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x4B); } +void kunpckdq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x4B); } +void kunpckwd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x4B); } +void kxnorb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x46); } +void kxnord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x46); } +void kxnorq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x46); } +void kxnorw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x46); } +void kxorb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x47); } +void kxord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x47); } +void kxorq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x47); } +void kxorw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x47); } +void v4fmaddps(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x9A); } +void v4fmaddss(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_F2 | T_EW0 | T_MUST_EVEX | T_N16, 0x9B); } +void v4fnmaddps(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0xAA); } +void v4fnmaddss(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_F2 | T_EW0 | T_MUST_EVEX | T_N16, 0xAB); } +void valignd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x03, imm); } +void valignq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x03, imm); } +void vblendmpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x65); } +void vblendmps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x65); } +void vbroadcastf32x2(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N8, 0x19); } +void vbroadcastf32x4(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N16, 0x1A); } +void vbroadcastf32x8(const Zmm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N32, 0x1B); } +void vbroadcastf64x2(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N16, 0x1A); } +void vbroadcastf64x4(const Zmm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N32, 0x1B); } +void vbroadcasti32x2(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N8, 0x59); } +void vbroadcasti32x4(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N16, 0x5A); } +void vbroadcasti32x8(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N32, 0x5B); } +void vbroadcasti64x2(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N16, 0x5A); } +void vbroadcasti64x4(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N32, 0x5B); } +void vcmppd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } +void vcmpps(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_0F | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } +void vcmpsd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_N8 | T_F2 | T_0F | T_EW1 | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } +void vcmpss(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_N4 | T_F3 | T_0F | T_EW0 | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } +void vcompressb(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x63); } +void vcompresspd(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8A); } +void vcompressps(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8A); } +void vcompressw(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x63); } +void vcvtpd2qq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x7B); } +void vcvtpd2udq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x79); } +void vcvtpd2uqq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x79); } +void vcvtps2qq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_ER_Y, 0x7B); } +void vcvtps2udq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_0F | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x79); } +void vcvtps2uqq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_ER_Y, 0x79); } +void vcvtqq2pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0xE6); } +void vcvtqq2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x5B); } +void vcvtsd2usi(const Reg32e& r, const Operand& op) { int type = (T_F2 | T_0F | T_MUST_EVEX | T_N8 | T_ER_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x79); } +void vcvtss2usi(const Reg32e& r, const Operand& op) { int type = (T_F3 | T_0F | T_MUST_EVEX | T_N4 | T_ER_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x79); } +void vcvttpd2qq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x7A); } +void vcvttpd2udq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_SAE_Z, 0x78); } +void vcvttpd2uqq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x78); } +void vcvttps2qq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x7A); } +void vcvttps2udq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_0F | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x78); } +void vcvttps2uqq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x78); } +void vcvttsd2usi(const Reg32e& r, const Operand& op) { int type = (T_F2 | T_0F | T_MUST_EVEX | T_N8 | T_SAE_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x78); } +void vcvttss2usi(const Reg32e& r, const Operand& op) { int type = (T_F3 | T_0F | T_MUST_EVEX | T_N4 | T_SAE_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x78); } +void vcvtudq2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_F3 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL, 0x7A); } +void vcvtudq2ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x7A); } +void vcvtuqq2pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x7A); } +void vcvtuqq2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_F2 | T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x7A); } +void vcvtusi2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_F2 | T_0F | T_MUST_EVEX, T_W1 | T_EW1 | T_ER_X | T_N8, T_W0 | T_EW0 | T_N4, 0x7B); } +void vcvtusi2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_F3 | T_0F | T_MUST_EVEX | T_ER_X, T_W1 | T_EW1 | T_N8, T_W0 | T_EW0 | T_N4, 0x7B); } +void vdbpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x42, imm); } +void vexp2pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xC8); } +void vexp2ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xC8); } +void vexpandpd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x88); } +void vexpandps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x88); } +void vextractf32x4(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x19, imm); } +void vextractf32x8(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x1B, imm); } +void vextractf64x2(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x19, imm); } +void vextractf64x4(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x1B, imm); } +void vextracti32x4(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x39, imm); } +void vextracti32x8(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3B, imm); } +void vextracti64x2(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x39, imm); } +void vextracti64x4(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3B, imm); } +void vfixupimmpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x54, imm); } +void vfixupimmps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x54, imm); } +void vfixupimmsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_Z | T_MUST_EVEX, 0x55, imm); } +void vfixupimmss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_Z | T_MUST_EVEX, 0x55, imm); } +void vfpclasspd(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isBit(128|256|512)) throw Error(ERR_BAD_MEM_SIZE); Reg x = k; x.setBit(op.getBit()); opVex(x, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_YMM | T_EW1 | T_B64, 0x66, imm); } +void vfpclassps(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isBit(128|256|512)) throw Error(ERR_BAD_MEM_SIZE); Reg x = k; x.setBit(op.getBit()); opVex(x, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_YMM | T_EW0 | T_B32, 0x66, imm); } +void vfpclasssd(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isXMEM()) throw Error(ERR_BAD_MEM_SIZE); opVex(k, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_EW1 | T_N8, 0x67, imm); } +void vfpclassss(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isXMEM()) throw Error(ERR_BAD_MEM_SIZE); opVex(k, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_EW0 | T_N4, 0x67, imm); } +void vgatherdpd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x92, 1); } +void vgatherdps(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x92, 0); } +void vgatherpf0dpd(const Address& addr) { opGatherFetch(addr, zm1, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } +void vgatherpf0dps(const Address& addr) { opGatherFetch(addr, zm1, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } +void vgatherpf0qpd(const Address& addr) { opGatherFetch(addr, zm1, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vgatherpf0qps(const Address& addr) { opGatherFetch(addr, zm1, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vgatherpf1dpd(const Address& addr) { opGatherFetch(addr, zm2, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } +void vgatherpf1dps(const Address& addr) { opGatherFetch(addr, zm2, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } +void vgatherpf1qpd(const Address& addr) { opGatherFetch(addr, zm2, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vgatherpf1qps(const Address& addr) { opGatherFetch(addr, zm2, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vgatherqpd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x93, 0); } +void vgatherqps(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x93, 2); } +void vgetexppd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x42); } +void vgetexpps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x42); } +void vgetexpsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x43); } +void vgetexpss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x43); } +void vgetmantpd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x26, imm); } +void vgetmantps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x26, imm); } +void vgetmantsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x27, imm); } +void vgetmantss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x27, imm); } +void vinsertf32x4(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x18, imm); } +void vinsertf32x8(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x1A, imm); } +void vinsertf64x2(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x18, imm); } +void vinsertf64x4(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x1A, imm); } +void vinserti32x4(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x38, imm); } +void vinserti32x8(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3A, imm); } +void vinserti64x2(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x38, imm); } +void vinserti64x4(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3A, imm); } +void vmovdqa32(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_66 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqa32(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vmovdqa64(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_66 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqa64(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vmovdqu16(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F2 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqu16(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vmovdqu32(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F3 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqu32(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vmovdqu64(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqu64(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vmovdqu8(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } +void vmovdqu8(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } +void vp4dpwssd(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x52); } +void vp4dpwssds(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x53); } +void vpabsq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_MUST_EVEX | T_EW1 | T_B64 | T_YMM, 0x1F); } +void vpandd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xDB); } +void vpandnd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xDF); } +void vpandnq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xDF); } +void vpandq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xDB); } +void vpblendmb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x66); } +void vpblendmd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x64); } +void vpblendmq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x64); } +void vpblendmw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x66); } +void vpbroadcastb(const Xmm& x, const Reg8& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7A); } +void vpbroadcastd(const Xmm& x, const Reg32& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7C); } +void vpbroadcastmb2q(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1, 0x2A); } +void vpbroadcastmw2d(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0, 0x3A); } +void vpbroadcastw(const Xmm& x, const Reg16& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7B); } +void vpcmpb(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3F, imm); } +void vpcmpd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x1F, imm); } +void vpcmpeqb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x74); } +void vpcmpeqd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_B32, 0x76); } +void vpcmpeqq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x29); } +void vpcmpeqw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x75); } +void vpcmpgtb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x64); } +void vpcmpgtd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x66); } +void vpcmpgtq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x37); } +void vpcmpgtw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x65); } +void vpcmpq(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x1F, imm); } +void vpcmpub(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3E, imm); } +void vpcmpud(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x1E, imm); } +void vpcmpuq(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x1E, imm); } +void vpcmpuw(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3E, imm); } +void vpcmpw(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3F, imm); } +void vpcompressd(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8B); } +void vpcompressq(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8B); } +void vpconflictd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xC4); } +void vpconflictq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xC4); } +void vpdpbusd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x50); } +void vpdpbusds(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x51); } +void vpdpwssd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x52); } +void vpdpwssds(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x53); } +void vpermb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8D); } +void vpermi2b(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x75); } +void vpermi2d(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x76); } +void vpermi2pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x77); } +void vpermi2ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x77); } +void vpermi2q(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x76); } +void vpermi2w(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x75); } +void vpermt2b(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7D); } +void vpermt2d(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x7E); } +void vpermt2pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x7F); } +void vpermt2ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x7F); } +void vpermt2q(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x7E); } +void vpermt2w(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x7D); } +void vpermw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8D); } +void vpexpandb(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x62); } +void vpexpandd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x89); } +void vpexpandq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x89); } +void vpexpandw(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x62); } +void vpgatherdd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x90, 0); } +void vpgatherdq(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x90, 1); } +void vpgatherqd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x91, 2); } +void vpgatherqq(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x91, 0); } +void vplzcntd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x44); } +void vplzcntq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x44); } +void vpmadd52huq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xB5); } +void vpmadd52luq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xB4); } +void vpmaxsq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3D); } +void vpmaxuq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3F); } +void vpminsq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x39); } +void vpminuq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3B); } +void vpmovb2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x29); } +void vpmovd2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x39); } +void vpmovdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x31, false); } +void vpmovdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x33, true); } +void vpmovm2b(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x28); } +void vpmovm2d(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x38); } +void vpmovm2q(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x38); } +void vpmovm2w(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x28); } +void vpmovq2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x39); } +void vpmovqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x32, false); } +void vpmovqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x35, true); } +void vpmovqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x34, false); } +void vpmovsdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x21, false); } +void vpmovsdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x23, true); } +void vpmovsqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x22, false); } +void vpmovsqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x25, true); } +void vpmovsqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x24, false); } +void vpmovswb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x20, true); } +void vpmovusdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x11, false); } +void vpmovusdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x13, true); } +void vpmovusqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x12, false); } +void vpmovusqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x15, true); } +void vpmovusqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x14, false); } +void vpmovuswb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x10, true); } +void vpmovw2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x29); } +void vpmovwb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x30, true); } +void vpmullq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x40); } +void vpmultishiftqb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x83); } +void vpopcntb(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x54); } +void vpopcntd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x55); } +void vpopcntq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x55); } +void vpopcntw(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x54); } +void vpord(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xEB); } +void vporq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xEB); } +void vprold(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 1), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x72, imm); } +void vprolq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 1), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } +void vprolvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x15); } +void vprolvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x15); } +void vprord(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 0), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x72, imm); } +void vprorq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 0), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } +void vprorvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x14); } +void vprorvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x14); } +void vpscatterdd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA0, 0); } +void vpscatterdq(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA0, 1); } +void vpscatterqd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA1, 2); } +void vpscatterqq(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA1, 0); } +void vpshldd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x71, imm); } +void vpshldq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x71, imm); } +void vpshldvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x71); } +void vpshldvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x71); } +void vpshldvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x70); } +void vpshldw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x70, imm); } +void vpshrdd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x73, imm); } +void vpshrdq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x73, imm); } +void vpshrdvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x73); } +void vpshrdvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x73); } +void vpshrdvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x72); } +void vpshrdw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x72, imm); } +void vpshufbitqmb(const Opmask& k, const Xmm& x, const Operand& op) { opVex(k, &x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8F); } +void vpsllvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x12); } +void vpsraq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } +void vpsraq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX, 0xE2); } +void vpsravq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x46); } +void vpsravw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x11); } +void vpsrlvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x10); } +void vpternlogd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x25, imm); } +void vpternlogq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x25, imm); } +void vptestmb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x26); } +void vptestmd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x27); } +void vptestmq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x27); } +void vptestmw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x26); } +void vptestnmb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x26); } +void vptestnmd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x27); } +void vptestnmq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x27); } +void vptestnmw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x26); } +void vpxord(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xEF); } +void vpxorq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xEF); } +void vrangepd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x50, imm); } +void vrangeps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x50, imm); } +void vrangesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x51, imm); } +void vrangess(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x51, imm); } +void vrcp14pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x4C); } +void vrcp14ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x4C); } +void vrcp14sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX, 0x4D); } +void vrcp14ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX, 0x4D); } +void vrcp28pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xCA); } +void vrcp28ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xCA); } +void vrcp28sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0xCB); } +void vrcp28ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0xCB); } +void vreducepd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x56, imm); } +void vreduceps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x56, imm); } +void vreducesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x57, imm); } +void vreducess(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x57, imm); } +void vrndscalepd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x09, imm); } +void vrndscaleps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x08, imm); } +void vrndscalesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_MUST_EVEX, 0x0B, imm); } +void vrndscaless(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_MUST_EVEX, 0x0A, imm); } +void vrsqrt14pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x4E); } +void vrsqrt14ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x4E); } +void vrsqrt14sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x4F); } +void vrsqrt14ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x4F); } +void vrsqrt28pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xCC); } +void vrsqrt28ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xCC); } +void vrsqrt28sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0xCD); } +void vrsqrt28ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0xCD); } +void vscalefpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x2C); } +void vscalefps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x2C); } +void vscalefsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_ER_X | T_MUST_EVEX, 0x2D); } +void vscalefss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_ER_X | T_MUST_EVEX, 0x2D); } +void vscatterdpd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA2, 1); } +void vscatterdps(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA2, 0); } +void vscatterpf0dpd(const Address& addr) { opGatherFetch(addr, zm5, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } +void vscatterpf0dps(const Address& addr) { opGatherFetch(addr, zm5, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } +void vscatterpf0qpd(const Address& addr) { opGatherFetch(addr, zm5, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vscatterpf0qps(const Address& addr) { opGatherFetch(addr, zm5, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vscatterpf1dpd(const Address& addr) { opGatherFetch(addr, zm6, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } +void vscatterpf1dps(const Address& addr) { opGatherFetch(addr, zm6, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } +void vscatterpf1qpd(const Address& addr) { opGatherFetch(addr, zm6, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vscatterpf1qps(const Address& addr) { opGatherFetch(addr, zm6, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } +void vscatterqpd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA3, 0); } +void vscatterqps(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA3, 2); } +void vshuff32x4(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW0 | T_B32, 0x23, imm); } +void vshuff64x2(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW1 | T_B64, 0x23, imm); } +void vshufi32x4(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW0 | T_B32, 0x43, imm); } +void vshufi64x2(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW1 | T_B64, 0x43, imm); } +#ifdef XBYAK64 +void kmovq(const Opmask& k, const Reg64& r) { opVex(k, 0, r, T_L0 | T_0F | T_F2 | T_W1, 0x92); } +void kmovq(const Reg64& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_F2 | T_W1, 0x93); } +void vpbroadcastq(const Xmm& x, const Reg64& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x7C); } +#endif +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_util.h b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_util.h new file mode 100644 index 000000000..01544501d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/src/xbyak/xbyak_util.h @@ -0,0 +1,653 @@ +#ifndef XBYAK_XBYAK_UTIL_H_ +#define XBYAK_XBYAK_UTIL_H_ + +/** + utility class and functions for Xbyak + Xbyak::util::Clock ; rdtsc timer + Xbyak::util::Cpu ; detect CPU + @note this header is UNDER CONSTRUCTION! +*/ +#include "xbyak.h" + +#ifdef _MSC_VER + #if (_MSC_VER < 1400) && defined(XBYAK32) + static inline __declspec(naked) void __cpuid(int[4], int) + { + __asm { + push ebx + push esi + mov eax, dword ptr [esp + 4 * 2 + 8] // eaxIn + cpuid + mov esi, dword ptr [esp + 4 * 2 + 4] // data + mov dword ptr [esi], eax + mov dword ptr [esi + 4], ebx + mov dword ptr [esi + 8], ecx + mov dword ptr [esi + 12], edx + pop esi + pop ebx + ret + } + } + #else + #include // for __cpuid + #endif +#else + #ifndef __GNUC_PREREQ + #define __GNUC_PREREQ(major, minor) ((((__GNUC__) << 16) + (__GNUC_MINOR__)) >= (((major) << 16) + (minor))) + #endif + #if __GNUC_PREREQ(4, 3) && !defined(__APPLE__) + #include + #else + #if defined(__APPLE__) && defined(XBYAK32) // avoid err : can't find a register in class `BREG' while reloading `asm' + #define __cpuid(eaxIn, a, b, c, d) __asm__ __volatile__("pushl %%ebx\ncpuid\nmovl %%ebp, %%esi\npopl %%ebx" : "=a"(a), "=S"(b), "=c"(c), "=d"(d) : "0"(eaxIn)) + #define __cpuid_count(eaxIn, ecxIn, a, b, c, d) __asm__ __volatile__("pushl %%ebx\ncpuid\nmovl %%ebp, %%esi\npopl %%ebx" : "=a"(a), "=S"(b), "=c"(c), "=d"(d) : "0"(eaxIn), "2"(ecxIn)) + #else + #define __cpuid(eaxIn, a, b, c, d) __asm__ __volatile__("cpuid\n" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eaxIn)) + #define __cpuid_count(eaxIn, ecxIn, a, b, c, d) __asm__ __volatile__("cpuid\n" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eaxIn), "2"(ecxIn)) + #endif + #endif +#endif + +namespace Xbyak { namespace util { + +/** + CPU detection class +*/ +class Cpu { + uint64 type_; + unsigned int get32bitAsBE(const char *x) const + { + return x[0] | (x[1] << 8) | (x[2] << 16) | (x[3] << 24); + } + unsigned int mask(int n) const + { + return (1U << n) - 1; + } + void setFamily() + { + unsigned int data[4]; + getCpuid(1, data); + stepping = data[0] & mask(4); + model = (data[0] >> 4) & mask(4); + family = (data[0] >> 8) & mask(4); + // type = (data[0] >> 12) & mask(2); + extModel = (data[0] >> 16) & mask(4); + extFamily = (data[0] >> 20) & mask(8); + if (family == 0x0f) { + displayFamily = family + extFamily; + } else { + displayFamily = family; + } + if (family == 6 || family == 0x0f) { + displayModel = (extModel << 4) + model; + } else { + displayModel = model; + } + } + unsigned int extractBit(unsigned int val, unsigned int base, unsigned int end) + { + return (val >> base) & ((1u << (end - base)) - 1); + } + void setCacheHierarchy() + { + if ((type_ & tINTEL) == 0) return; + const unsigned int NO_CACHE = 0; + const unsigned int DATA_CACHE = 1; +// const unsigned int INSTRUCTION_CACHE = 2; + const unsigned int UNIFIED_CACHE = 3; + unsigned int smt_width = 0; + unsigned int n_cores = 0; + unsigned int data[4]; + + /* + if leaf 11 exists, we use it to get the number of smt cores and cores on socket + If x2APIC is supported, these are the only correct numbers. + + leaf 0xB can be zeroed-out by a hypervisor + */ + getCpuidEx(0x0, 0, data); + if (data[0] >= 0xB) { + getCpuidEx(0xB, 0, data); // CPUID for SMT Level + smt_width = data[1] & 0x7FFF; + getCpuidEx(0xB, 1, data); // CPUID for CORE Level + n_cores = data[1] & 0x7FFF; + } + + /* + Assumptions: + the first level of data cache is not shared (which is the + case for every existing architecture) and use this to + determine the SMT width for arch not supporting leaf 11. + when leaf 4 reports a number of core less than n_cores + on socket reported by leaf 11, then it is a correct number + of cores not an upperbound. + */ + for (int i = 0; data_cache_levels < maxNumberCacheLevels; i++) { + getCpuidEx(0x4, i, data); + unsigned int cacheType = extractBit(data[0], 0, 4); + if (cacheType == NO_CACHE) break; + if (cacheType == DATA_CACHE || cacheType == UNIFIED_CACHE) { + unsigned int nb_logical_cores = extractBit(data[0], 14, 25) + 1; + if (n_cores != 0) { // true only if leaf 0xB is supported and valid + nb_logical_cores = (std::min)(nb_logical_cores, n_cores); + } + assert(nb_logical_cores != 0); + data_cache_size[data_cache_levels] = + (extractBit(data[1], 22, 31) + 1) + * (extractBit(data[1], 12, 21) + 1) + * (extractBit(data[1], 0, 11) + 1) + * (data[2] + 1); + if (cacheType == DATA_CACHE && smt_width == 0) smt_width = nb_logical_cores; + assert(smt_width != 0); + cores_sharing_data_cache[data_cache_levels] = (std::max)(nb_logical_cores / smt_width, 1u); + data_cache_levels++; + } + } + } + +public: + int model; + int family; + int stepping; + int extModel; + int extFamily; + int displayFamily; // family + extFamily + int displayModel; // model + extModel + + // may I move these members into private? + static const unsigned int maxNumberCacheLevels = 10; + unsigned int data_cache_size[maxNumberCacheLevels]; + unsigned int cores_sharing_data_cache[maxNumberCacheLevels]; + unsigned int data_cache_levels; + + unsigned int getDataCacheLevels() const { return data_cache_levels; } + unsigned int getCoresSharingDataCache(unsigned int i) const + { + if (i >= data_cache_levels) throw Error(ERR_BAD_PARAMETER); + return cores_sharing_data_cache[i]; + } + unsigned int getDataCacheSize(unsigned int i) const + { + if (i >= data_cache_levels) throw Error(ERR_BAD_PARAMETER); + return data_cache_size[i]; + } + + /* + data[] = { eax, ebx, ecx, edx } + */ + static inline void getCpuid(unsigned int eaxIn, unsigned int data[4]) + { +#ifdef _MSC_VER + __cpuid(reinterpret_cast(data), eaxIn); +#else + __cpuid(eaxIn, data[0], data[1], data[2], data[3]); +#endif + } + static inline void getCpuidEx(unsigned int eaxIn, unsigned int ecxIn, unsigned int data[4]) + { +#ifdef _MSC_VER + __cpuidex(reinterpret_cast(data), eaxIn, ecxIn); +#else + __cpuid_count(eaxIn, ecxIn, data[0], data[1], data[2], data[3]); +#endif + } + static inline uint64 getXfeature() + { +#ifdef _MSC_VER + return _xgetbv(0); +#else + unsigned int eax, edx; + // xgetvb is not support on gcc 4.2 +// __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); + __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(0)); + return ((uint64)edx << 32) | eax; +#endif + } + typedef uint64 Type; + + static const Type NONE = 0; + static const Type tMMX = 1 << 0; + static const Type tMMX2 = 1 << 1; + static const Type tCMOV = 1 << 2; + static const Type tSSE = 1 << 3; + static const Type tSSE2 = 1 << 4; + static const Type tSSE3 = 1 << 5; + static const Type tSSSE3 = 1 << 6; + static const Type tSSE41 = 1 << 7; + static const Type tSSE42 = 1 << 8; + static const Type tPOPCNT = 1 << 9; + static const Type tAESNI = 1 << 10; + static const Type tSSE5 = 1 << 11; + static const Type tOSXSAVE = 1 << 12; + static const Type tPCLMULQDQ = 1 << 13; + static const Type tAVX = 1 << 14; + static const Type tFMA = 1 << 15; + + static const Type t3DN = 1 << 16; + static const Type tE3DN = 1 << 17; + static const Type tSSE4a = 1 << 18; + static const Type tRDTSCP = 1 << 19; + static const Type tAVX2 = 1 << 20; + static const Type tBMI1 = 1 << 21; // andn, bextr, blsi, blsmsk, blsr, tzcnt + static const Type tBMI2 = 1 << 22; // bzhi, mulx, pdep, pext, rorx, sarx, shlx, shrx + static const Type tLZCNT = 1 << 23; + + static const Type tINTEL = 1 << 24; + static const Type tAMD = 1 << 25; + + static const Type tENHANCED_REP = 1 << 26; // enhanced rep movsb/stosb + static const Type tRDRAND = 1 << 27; + static const Type tADX = 1 << 28; // adcx, adox + static const Type tRDSEED = 1 << 29; // rdseed + static const Type tSMAP = 1 << 30; // stac + static const Type tHLE = uint64(1) << 31; // xacquire, xrelease, xtest + static const Type tRTM = uint64(1) << 32; // xbegin, xend, xabort + static const Type tF16C = uint64(1) << 33; // vcvtph2ps, vcvtps2ph + static const Type tMOVBE = uint64(1) << 34; // mobve + static const Type tAVX512F = uint64(1) << 35; + static const Type tAVX512DQ = uint64(1) << 36; + static const Type tAVX512_IFMA = uint64(1) << 37; + static const Type tAVX512IFMA = tAVX512_IFMA; + static const Type tAVX512PF = uint64(1) << 38; + static const Type tAVX512ER = uint64(1) << 39; + static const Type tAVX512CD = uint64(1) << 40; + static const Type tAVX512BW = uint64(1) << 41; + static const Type tAVX512VL = uint64(1) << 42; + static const Type tAVX512_VBMI = uint64(1) << 43; + static const Type tAVX512VBMI = tAVX512_VBMI; // changed by Intel's manual + static const Type tAVX512_4VNNIW = uint64(1) << 44; + static const Type tAVX512_4FMAPS = uint64(1) << 45; + static const Type tPREFETCHWT1 = uint64(1) << 46; + static const Type tPREFETCHW = uint64(1) << 47; + static const Type tSHA = uint64(1) << 48; + static const Type tMPX = uint64(1) << 49; + static const Type tAVX512_VBMI2 = uint64(1) << 50; + static const Type tGFNI = uint64(1) << 51; + static const Type tVAES = uint64(1) << 52; + static const Type tVPCLMULQDQ = uint64(1) << 53; + static const Type tAVX512_VNNI = uint64(1) << 54; + static const Type tAVX512_BITALG = uint64(1) << 55; + static const Type tAVX512_VPOPCNTDQ = uint64(1) << 56; + + Cpu() + : type_(NONE) + , data_cache_levels(0) + { + unsigned int data[4]; + const unsigned int& EAX = data[0]; + const unsigned int& EBX = data[1]; + const unsigned int& ECX = data[2]; + const unsigned int& EDX = data[3]; + getCpuid(0, data); + const unsigned int maxNum = EAX; + static const char intel[] = "ntel"; + static const char amd[] = "cAMD"; + if (ECX == get32bitAsBE(amd)) { + type_ |= tAMD; + getCpuid(0x80000001, data); + if (EDX & (1U << 31)) type_ |= t3DN; + if (EDX & (1U << 15)) type_ |= tCMOV; + if (EDX & (1U << 30)) type_ |= tE3DN; + if (EDX & (1U << 22)) type_ |= tMMX2; + if (EDX & (1U << 27)) type_ |= tRDTSCP; + } + if (ECX == get32bitAsBE(intel)) { + type_ |= tINTEL; + getCpuid(0x80000001, data); + if (EDX & (1U << 27)) type_ |= tRDTSCP; + if (ECX & (1U << 5)) type_ |= tLZCNT; + if (ECX & (1U << 8)) type_ |= tPREFETCHW; + } + getCpuid(1, data); + if (ECX & (1U << 0)) type_ |= tSSE3; + if (ECX & (1U << 9)) type_ |= tSSSE3; + if (ECX & (1U << 19)) type_ |= tSSE41; + if (ECX & (1U << 20)) type_ |= tSSE42; + if (ECX & (1U << 22)) type_ |= tMOVBE; + if (ECX & (1U << 23)) type_ |= tPOPCNT; + if (ECX & (1U << 25)) type_ |= tAESNI; + if (ECX & (1U << 1)) type_ |= tPCLMULQDQ; + if (ECX & (1U << 27)) type_ |= tOSXSAVE; + if (ECX & (1U << 30)) type_ |= tRDRAND; + if (ECX & (1U << 29)) type_ |= tF16C; + + if (EDX & (1U << 15)) type_ |= tCMOV; + if (EDX & (1U << 23)) type_ |= tMMX; + if (EDX & (1U << 25)) type_ |= tMMX2 | tSSE; + if (EDX & (1U << 26)) type_ |= tSSE2; + + if (type_ & tOSXSAVE) { + // check XFEATURE_ENABLED_MASK[2:1] = '11b' + uint64 bv = getXfeature(); + if ((bv & 6) == 6) { + if (ECX & (1U << 28)) type_ |= tAVX; + if (ECX & (1U << 12)) type_ |= tFMA; + if (((bv >> 5) & 7) == 7) { + getCpuidEx(7, 0, data); + if (EBX & (1U << 16)) type_ |= tAVX512F; + if (type_ & tAVX512F) { + if (EBX & (1U << 17)) type_ |= tAVX512DQ; + if (EBX & (1U << 21)) type_ |= tAVX512_IFMA; + if (EBX & (1U << 26)) type_ |= tAVX512PF; + if (EBX & (1U << 27)) type_ |= tAVX512ER; + if (EBX & (1U << 28)) type_ |= tAVX512CD; + if (EBX & (1U << 30)) type_ |= tAVX512BW; + if (EBX & (1U << 31)) type_ |= tAVX512VL; + if (ECX & (1U << 1)) type_ |= tAVX512_VBMI; + if (ECX & (1U << 6)) type_ |= tAVX512_VBMI2; + if (ECX & (1U << 8)) type_ |= tGFNI; + if (ECX & (1U << 9)) type_ |= tVAES; + if (ECX & (1U << 10)) type_ |= tVPCLMULQDQ; + if (ECX & (1U << 11)) type_ |= tAVX512_VNNI; + if (ECX & (1U << 12)) type_ |= tAVX512_BITALG; + if (ECX & (1U << 14)) type_ |= tAVX512_VPOPCNTDQ; + if (EDX & (1U << 2)) type_ |= tAVX512_4VNNIW; + if (EDX & (1U << 3)) type_ |= tAVX512_4FMAPS; + } + } + } + } + if (maxNum >= 7) { + getCpuidEx(7, 0, data); + if (type_ & tAVX && (EBX & (1U << 5))) type_ |= tAVX2; + if (EBX & (1U << 3)) type_ |= tBMI1; + if (EBX & (1U << 8)) type_ |= tBMI2; + if (EBX & (1U << 9)) type_ |= tENHANCED_REP; + if (EBX & (1U << 18)) type_ |= tRDSEED; + if (EBX & (1U << 19)) type_ |= tADX; + if (EBX & (1U << 20)) type_ |= tSMAP; + if (EBX & (1U << 4)) type_ |= tHLE; + if (EBX & (1U << 11)) type_ |= tRTM; + if (EBX & (1U << 14)) type_ |= tMPX; + if (EBX & (1U << 29)) type_ |= tSHA; + if (ECX & (1U << 0)) type_ |= tPREFETCHWT1; + } + setFamily(); + setCacheHierarchy(); + } + void putFamily() const + { + printf("family=%d, model=%X, stepping=%d, extFamily=%d, extModel=%X\n", + family, model, stepping, extFamily, extModel); + printf("display:family=%X, model=%X\n", displayFamily, displayModel); + } + bool has(Type type) const + { + return (type & type_) != 0; + } +}; + +class Clock { +public: + static inline uint64 getRdtsc() + { +#ifdef _MSC_VER + return __rdtsc(); +#else + unsigned int eax, edx; + __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx)); + return ((uint64)edx << 32) | eax; +#endif + } + Clock() + : clock_(0) + , count_(0) + { + } + void begin() + { + clock_ -= getRdtsc(); + } + void end() + { + clock_ += getRdtsc(); + count_++; + } + int getCount() const { return count_; } + uint64 getClock() const { return clock_; } + void clear() { count_ = 0; clock_ = 0; } +private: + uint64 clock_; + int count_; +}; + +#ifdef XBYAK64 +const int UseRCX = 1 << 6; +const int UseRDX = 1 << 7; + +class Pack { + static const size_t maxTblNum = 15; + const Xbyak::Reg64 *tbl_[maxTblNum]; + size_t n_; +public: + Pack() : tbl_(), n_(0) {} + Pack(const Xbyak::Reg64 *tbl, size_t n) { init(tbl, n); } + Pack(const Pack& rhs) + : n_(rhs.n_) + { + for (size_t i = 0; i < n_; i++) tbl_[i] = rhs.tbl_[i]; + } + Pack& operator=(const Pack& rhs) + { + n_ = rhs.n_; + for (size_t i = 0; i < n_; i++) tbl_[i] = rhs.tbl_[i]; + return *this; + } + Pack(const Xbyak::Reg64& t0) + { n_ = 1; tbl_[0] = &t0; } + Pack(const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 2; tbl_[0] = &t0; tbl_[1] = &t1; } + Pack(const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 3; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; } + Pack(const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 4; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; } + Pack(const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 5; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; } + Pack(const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 6; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; } + Pack(const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 7; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; } + Pack(const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 8; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; } + Pack(const Xbyak::Reg64& t8, const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 9; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; tbl_[8] = &t8; } + Pack(const Xbyak::Reg64& t9, const Xbyak::Reg64& t8, const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) + { n_ = 10; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; tbl_[8] = &t8; tbl_[9] = &t9; } + Pack& append(const Xbyak::Reg64& t) + { + if (n_ == maxTblNum) { + fprintf(stderr, "ERR Pack::can't append\n"); + throw Error(ERR_BAD_PARAMETER); + } + tbl_[n_++] = &t; + return *this; + } + void init(const Xbyak::Reg64 *tbl, size_t n) + { + if (n > maxTblNum) { + fprintf(stderr, "ERR Pack::init bad n=%d\n", (int)n); + throw Error(ERR_BAD_PARAMETER); + } + n_ = n; + for (size_t i = 0; i < n; i++) { + tbl_[i] = &tbl[i]; + } + } + const Xbyak::Reg64& operator[](size_t n) const + { + if (n >= n_) { + fprintf(stderr, "ERR Pack bad n=%d(%d)\n", (int)n, (int)n_); + throw Error(ERR_BAD_PARAMETER); + } + return *tbl_[n]; + } + size_t size() const { return n_; } + /* + get tbl[pos, pos + num) + */ + Pack sub(size_t pos, size_t num = size_t(-1)) const + { + if (num == size_t(-1)) num = n_ - pos; + if (pos + num > n_) { + fprintf(stderr, "ERR Pack::sub bad pos=%d, num=%d\n", (int)pos, (int)num); + throw Error(ERR_BAD_PARAMETER); + } + Pack pack; + pack.n_ = num; + for (size_t i = 0; i < num; i++) { + pack.tbl_[i] = tbl_[pos + i]; + } + return pack; + } + void put() const + { + for (size_t i = 0; i < n_; i++) { + printf("%s ", tbl_[i]->toString()); + } + printf("\n"); + } +}; + +class StackFrame { +#ifdef XBYAK64_WIN + static const int noSaveNum = 6; + static const int rcxPos = 0; + static const int rdxPos = 1; +#else + static const int noSaveNum = 8; + static const int rcxPos = 3; + static const int rdxPos = 2; +#endif + static const int maxRegNum = 14; // maxRegNum = 16 - rsp - rax + Xbyak::CodeGenerator *code_; + int pNum_; + int tNum_; + bool useRcx_; + bool useRdx_; + int saveNum_; + int P_; + bool makeEpilog_; + Xbyak::Reg64 pTbl_[4]; + Xbyak::Reg64 tTbl_[maxRegNum]; + Pack p_; + Pack t_; + StackFrame(const StackFrame&); + void operator=(const StackFrame&); +public: + const Pack& p; + const Pack& t; + /* + make stack frame + @param sf [in] this + @param pNum [in] num of function parameter(0 <= pNum <= 4) + @param tNum [in] num of temporary register(0 <= tNum, with UseRCX, UseRDX) #{pNum + tNum [+rcx] + [rdx]} <= 14 + @param stackSizeByte [in] local stack size + @param makeEpilog [in] automatically call close() if true + + you can use + rax + gp0, ..., gp(pNum - 1) + gt0, ..., gt(tNum-1) + rcx if tNum & UseRCX + rdx if tNum & UseRDX + rsp[0..stackSizeByte - 1] + */ + StackFrame(Xbyak::CodeGenerator *code, int pNum, int tNum = 0, int stackSizeByte = 0, bool makeEpilog = true) + : code_(code) + , pNum_(pNum) + , tNum_(tNum & ~(UseRCX | UseRDX)) + , useRcx_((tNum & UseRCX) != 0) + , useRdx_((tNum & UseRDX) != 0) + , saveNum_(0) + , P_(0) + , makeEpilog_(makeEpilog) + , p(p_) + , t(t_) + { + using namespace Xbyak; + if (pNum < 0 || pNum > 4) throw Error(ERR_BAD_PNUM); + const int allRegNum = pNum + tNum_ + (useRcx_ ? 1 : 0) + (useRdx_ ? 1 : 0); + if (tNum_ < 0 || allRegNum > maxRegNum) throw Error(ERR_BAD_TNUM); + const Reg64& _rsp = code->rsp; + saveNum_ = (std::max)(0, allRegNum - noSaveNum); + const int *tbl = getOrderTbl() + noSaveNum; + for (int i = 0; i < saveNum_; i++) { + code->push(Reg64(tbl[i])); + } + P_ = (stackSizeByte + 7) / 8; + if (P_ > 0 && (P_ & 1) == (saveNum_ & 1)) P_++; // (rsp % 16) == 8, then increment P_ for 16 byte alignment + P_ *= 8; + if (P_ > 0) code->sub(_rsp, P_); + int pos = 0; + for (int i = 0; i < pNum; i++) { + pTbl_[i] = Xbyak::Reg64(getRegIdx(pos)); + } + for (int i = 0; i < tNum_; i++) { + tTbl_[i] = Xbyak::Reg64(getRegIdx(pos)); + } + if (useRcx_ && rcxPos < pNum) code_->mov(code_->r10, code_->rcx); + if (useRdx_ && rdxPos < pNum) code_->mov(code_->r11, code_->rdx); + p_.init(pTbl_, pNum); + t_.init(tTbl_, tNum_); + } + /* + make epilog manually + @param callRet [in] call ret() if true + */ + void close(bool callRet = true) + { + using namespace Xbyak; + const Reg64& _rsp = code_->rsp; + const int *tbl = getOrderTbl() + noSaveNum; + if (P_ > 0) code_->add(_rsp, P_); + for (int i = 0; i < saveNum_; i++) { + code_->pop(Reg64(tbl[saveNum_ - 1 - i])); + } + + if (callRet) code_->ret(); + } + ~StackFrame() + { + if (!makeEpilog_) return; + try { + close(); + } catch (std::exception& e) { + printf("ERR:StackFrame %s\n", e.what()); + exit(1); + } + } +private: + const int *getOrderTbl() const + { + using namespace Xbyak; + static const int tbl[] = { +#ifdef XBYAK64_WIN + Operand::RCX, Operand::RDX, Operand::R8, Operand::R9, Operand::R10, Operand::R11, Operand::RDI, Operand::RSI, +#else + Operand::RDI, Operand::RSI, Operand::RDX, Operand::RCX, Operand::R8, Operand::R9, Operand::R10, Operand::R11, +#endif + Operand::RBX, Operand::RBP, Operand::R12, Operand::R13, Operand::R14, Operand::R15 + }; + return &tbl[0]; + } + int getRegIdx(int& pos) const + { + assert(pos < maxRegNum); + using namespace Xbyak; + const int *tbl = getOrderTbl(); + int r = tbl[pos++]; + if (useRcx_) { + if (r == Operand::RCX) { return Operand::R10; } + if (r == Operand::R10) { r = tbl[pos++]; } + } + if (useRdx_) { + if (r == Operand::RDX) { return Operand::R11; } + if (r == Operand::R11) { return tbl[pos++]; } + } + return r; + } +}; +#endif + +} } // end of util +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/test/aggregate_sig_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/aggregate_sig_test.cpp new file mode 100644 index 000000000..c3a0e758d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/aggregate_sig_test.cpp @@ -0,0 +1,74 @@ +//#define MCLBN_FP_UNIT_SIZE 8 +#include +#include +#include +#include + +using namespace mcl::aggs; + +CYBOZU_TEST_AUTO(init) +{ + AGGS::init(); +// AGGS::init(mcl::BN381_1); +// AGGS::init(mcl::BLS12_381); + SecretKey sec; + sec.init(); + PublicKey pub; + sec.getPublicKey(pub); + const std::string m = "abc"; + Signature sig; + sec.sign(sig, m); + CYBOZU_TEST_ASSERT(pub.verify(sig, m)); +} + +void aggregateTest(const std::vector& msgVec) +{ + const size_t n = msgVec.size(); + std::vector secVec(n); + std::vector pubVec(n); + std::vector sigVec(n); + Signature aggSig; + for (size_t i = 0; i < n; i++) { + secVec[i].init(); + secVec[i].getPublicKey(pubVec[i]); + secVec[i].sign(sigVec[i], msgVec[i]); + CYBOZU_TEST_ASSERT(pubVec[i].verify(sigVec[i], msgVec[i])); + } + aggSig.aggregate(sigVec); + CYBOZU_TEST_ASSERT(aggSig.verify(msgVec, pubVec)); + CYBOZU_BENCH_C("aggSig.verify", 10, aggSig.verify, msgVec, pubVec); +} + +CYBOZU_TEST_AUTO(aggregate) +{ +#if 0 + /* + Core i7-7700 CPU @ 3.60GHz + BN254 Fp382 Fp462 + security bit 100 115? 128 + # of sig 100 69 200 476 + 1000 693 2037 4731 + 10000 6969 20448 47000(Mclk) + */ + const size_t n = 1000; + const size_t msgSize = 16; + std::vector msgVec(n); + cybozu::XorShift rg; + for (size_t i = 0; i < n; i++) { + std::string& msg = msgVec[i]; + msg.resize(msgSize); + for (size_t j = 0; j < msgSize; j++) { + msg[j] = (char)rg(); + } + } + aggregateTest(msgVec); +#else + const std::string msgArray[] = { "abc", "12345", "xyz", "pqr", "aggregate signature" }; + const size_t n = sizeof(msgArray) / sizeof(msgArray[0]); + std::vector msgVec(n); + for (size_t i = 0; i < n; i++) { + msgVec[i] = msgArray[i]; + } + aggregateTest(msgVec); +#endif +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/array_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/array_test.cpp new file mode 100644 index 000000000..2168a28fa --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/array_test.cpp @@ -0,0 +1,104 @@ +#include +#include + +template +void setArray(Array& a, const int (&tbl)[n]) +{ + CYBOZU_TEST_ASSERT(a.resize(n)); + for (size_t i = 0; i < n; i++) a[i] = tbl[i]; +} + +template +void swapTest(const int (&a)[an], const int (&b)[bn]) +{ + Array s, t; + setArray(s, a); + setArray(t, b); + s.swap(t); + CYBOZU_TEST_EQUAL(s.size(), bn); + CYBOZU_TEST_EQUAL(t.size(), an); + CYBOZU_TEST_EQUAL_ARRAY(s, b, s.size()); + CYBOZU_TEST_EQUAL_ARRAY(t, a, t.size()); +} + +CYBOZU_TEST_AUTO(resize) +{ + mcl::Array a, b; + CYBOZU_TEST_EQUAL(a.size(), 0); + CYBOZU_TEST_EQUAL(b.size(), 0); + + const size_t n = 5; + bool ok = a.resize(n); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(n, a.size()); + for (size_t i = 0; i < n; i++) { + a[i] = i; + } + ok = b.copy(a); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(b.size(), n); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), n); + + const size_t small = n - 1; + ok = b.resize(small); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(b.size(), small); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); + const size_t large = n * 2; + ok = b.resize(large); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(b.size(), large); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); + + const int aTbl[] = { 3, 4 }; + const int bTbl[] = { 7, 6, 5, 3 }; + swapTest >(aTbl, bTbl); + swapTest >(bTbl, aTbl); +} + +CYBOZU_TEST_AUTO(FixedArray) +{ + const size_t n = 5; + mcl::FixedArray a, b; + CYBOZU_TEST_EQUAL(a.size(), 0); + CYBOZU_TEST_EQUAL(b.size(), 0); + + bool ok = a.resize(n); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(n, a.size()); + for (size_t i = 0; i < n; i++) { + a[i] = i; + } + ok = b.copy(a); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(b.size(), n); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), n); + + const size_t small = n - 1; + ok = b.resize(small); + CYBOZU_TEST_ASSERT(ok); + CYBOZU_TEST_EQUAL(b.size(), small); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); + const size_t large = n + 1; + ok = b.resize(large); + CYBOZU_TEST_ASSERT(!ok); + + const int aTbl[] = { 3, 4 }; + const int bTbl[] = { 7, 6, 5, 3 }; + swapTest >(aTbl, bTbl); + swapTest >(bTbl, aTbl); +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +CYBOZU_TEST_AUTO(assign) +{ + const int aTbl[] = { 3, 4, 2 }; + const int bTbl[] = { 3, 4, 2, 1, 5 }; + mcl::Array a, b; + setArray(a, aTbl); + setArray(b, bTbl); + a = b; + CYBOZU_TEST_EQUAL(a.size(), b.size()); + CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), a.size()); +} +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/test/base_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/base_test.cpp new file mode 100644 index 000000000..2733d17ca --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/base_test.cpp @@ -0,0 +1,392 @@ +// not compiled +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../src/fp_generator.hpp" +#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) && (MCL_SIZEOF_UNIT == 8) + #define USE_XBYAK + static mcl::FpGenerator fg; +#endif +#define PUT(x) std::cout << #x "=" << (x) << std::endl + +const size_t MAX_N = 32; +typedef mcl::fp::Unit Unit; + +size_t getUnitSize(size_t bitSize) +{ + return (bitSize + sizeof(Unit) * 8 - 1) / (sizeof(Unit) * 8); +} + +void setMpz(mpz_class& mx, const Unit *x, size_t n) +{ + mcl::gmp::setArray(mx, x, n); +} +void getMpz(Unit *x, size_t n, const mpz_class& mx) +{ + mcl::fp::toArray(x, n, mx.get_mpz_t()); +} + +struct Montgomery { + mpz_class p_; + mpz_class R_; // (1 << (n_ * 64)) % p + mpz_class RR_; // (R * R) % p + Unit r_; // p * r = -1 mod M = 1 << 64 + size_t n_; + Montgomery() {} + explicit Montgomery(const mpz_class& p) + { + p_ = p; + r_ = mcl::montgomery::getCoff(mcl::gmp::getUnit(p, 0)); + n_ = mcl::gmp::getUnitSize(p); + R_ = 1; + R_ = (R_ << (n_ * 64)) % p_; + RR_ = (R_ * R_) % p_; + } + + void toMont(mpz_class& x) const { mul(x, x, RR_); } + void fromMont(mpz_class& x) const { mul(x, x, 1); } + + void mont(Unit *z, const Unit *x, const Unit *y) const + { + mpz_class mx, my; + setMpz(mx, x, n_); + setMpz(my, y, n_); + mul(mx, mx, my); + getMpz(z, n_, mx); + } + void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) const + { +#if 1 + const size_t ySize = mcl::gmp::getUnitSize(y); + mpz_class c = y == 0 ? mpz_class(0) : x * mcl::gmp::getUnit(y, 0); + Unit q = c == 0 ? 0 : mcl::gmp::getUnit(c, 0) * r_; + c += p_ * q; + c >>= sizeof(Unit) * 8; + for (size_t i = 1; i < n_; i++) { + if (i < ySize) { + c += x * mcl::gmp::getUnit(y, i); + } + Unit q = c == 0 ? 0 : mcl::gmp::getUnit(c, 0) * r_; + c += p_ * q; + c >>= sizeof(Unit) * 8; + } + if (c >= p_) { + c -= p_; + } + z = c; +#else + z = x * y; + const size_t zSize = mcl::gmp::getUnitSize(z); + for (size_t i = 0; i < n_; i++) { + if (i < zSize) { + Unit q = mcl::gmp::getUnit(z, 0) * r_; + z += p_ * (mp_limb_t)q; + } + z >>= sizeof(Unit) * 8; + } + if (z >= p_) { + z -= p_; + } +#endif + } +}; + +void put(const char *msg, const Unit *x, size_t n) +{ + printf("%s ", msg); + for (size_t i = 0; i < n; i++) printf("%016llx ", (long long)x[n - 1 - i]); + printf("\n"); +} +void verifyEqual(const Unit *x, const Unit *y, size_t n, const char *file, int line) +{ + bool ok = mcl::fp::isEqualArray(x, y, n); + CYBOZU_TEST_ASSERT(ok); + if (ok) return; + printf("%s:%d\n", file, line); + put("L", x, n); + put("R", y, n); + exit(1); +} +#define VERIFY_EQUAL(x, y, n) verifyEqual(x, y, n, __FILE__, __LINE__) + +void addC(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) +{ + mpz_class mx, my, mp; + setMpz(mx, x, n); + setMpz(my, y, n); + setMpz(mp, p, n); + mx += my; + if (mx >= mp) mx -= mp; + getMpz(z, n, mx); +} +void subC(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) +{ + mpz_class mx, my, mp; + setMpz(mx, x, n); + setMpz(my, y, n); + setMpz(mp, p, n); + mx -= my; + if (mx < 0) mx += mp; + getMpz(z, n, mx); +} +static inline void set_zero(mpz_t& z, Unit *p, size_t n) +{ + z->_mp_alloc = (int)n; + z->_mp_size = 0; + z->_mp_d = (mp_limb_t*)p; +} +static inline void set_mpz_t(mpz_t& z, const Unit* p, int n) +{ + z->_mp_alloc = n; + int i = n; + while (i > 0 && p[i - 1] == 0) { + i--; + } + z->_mp_size = i; + z->_mp_d = (mp_limb_t*)p; +} + +// z[2n] <- x[n] * y[n] +void mulPreC(Unit *z, const Unit *x, const Unit *y, size_t n) +{ +#if 1 + mpz_t mx, my, mz; + set_zero(mz, z, n * 2); + set_mpz_t(mx, x, n); + set_mpz_t(my, y, n); + mpz_mul(mz, mx, my); + mcl::fp::toArray(z, n * 2, mz); +#else + mpz_class mx, my; + setMpz(mx, x, n); + setMpz(my, y, n); + mx *= my; + getMpz(z, n * 2, mx); +#endif +} + +void modC(Unit *y, const Unit *x, const Unit *p, size_t n) +{ + mpz_t mx, my, mp; + set_mpz_t(mx, x, n * 2); + set_mpz_t(my, y, n); + set_mpz_t(mp, p, n); + mpz_mod(my, mx, mp); + mcl::fp::clearArray(y, my->_mp_size, n); +} + +void mul(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) +{ + Unit ret[MAX_N * 2]; + mpz_t mx, my, mz, mp; + set_zero(mz, ret, MAX_N * 2); + set_mpz_t(mx, x, n); + set_mpz_t(my, y, n); + set_mpz_t(mp, p, n); + mpz_mul(mz, mx, my); + mpz_mod(mz, mz, mp); + mcl::fp::toArray(z, n, mz); +} + +typedef mcl::fp::void3op void3op; +typedef mcl::fp::void4op void4op; +typedef mcl::fp::void4Iop void4Iop; + +const struct FuncOp { + size_t bitSize; + void4op addS; + void4op addL; + void4op subS; + void4op subL; + void3op mulPre; + void4Iop mont; +} gFuncOpTbl[] = { + { 128, mcl_fp_add128S, mcl_fp_add128L, mcl_fp_sub128S, mcl_fp_sub128L, mcl_fp_mul128pre, mcl_fp_mont128 }, + { 192, mcl_fp_add192S, mcl_fp_add192L, mcl_fp_sub192S, mcl_fp_sub192L, mcl_fp_mul192pre, mcl_fp_mont192 }, + { 256, mcl_fp_add256S, mcl_fp_add256L, mcl_fp_sub256S, mcl_fp_sub256L, mcl_fp_mul256pre, mcl_fp_mont256 }, + { 320, mcl_fp_add320S, mcl_fp_add320L, mcl_fp_sub320S, mcl_fp_sub320L, mcl_fp_mul320pre, mcl_fp_mont320 }, + { 384, mcl_fp_add384S, mcl_fp_add384L, mcl_fp_sub384S, mcl_fp_sub384L, mcl_fp_mul384pre, mcl_fp_mont384 }, + { 448, mcl_fp_add448S, mcl_fp_add448L, mcl_fp_sub448S, mcl_fp_sub448L, mcl_fp_mul448pre, mcl_fp_mont448 }, + { 512, mcl_fp_add512S, mcl_fp_add512L, mcl_fp_sub512S, mcl_fp_sub512L, mcl_fp_mul512pre, mcl_fp_mont512 }, +#if MCL_SIZEOF_UNIT == 4 + { 160, mcl_fp_add160S, mcl_fp_add160L, mcl_fp_sub160S, mcl_fp_sub160L, mcl_fp_mul160pre, mcl_fp_mont160 }, + { 224, mcl_fp_add224S, mcl_fp_add224L, mcl_fp_sub224S, mcl_fp_sub224L, mcl_fp_mul224pre, mcl_fp_mont224 }, + { 288, mcl_fp_add288S, mcl_fp_add288L, mcl_fp_sub288S, mcl_fp_sub288L, mcl_fp_mul288pre, mcl_fp_mont288 }, + { 352, mcl_fp_add352S, mcl_fp_add352L, mcl_fp_sub352S, mcl_fp_sub352L, mcl_fp_mul352pre, mcl_fp_mont352 }, + { 416, mcl_fp_add416S, mcl_fp_add416L, mcl_fp_sub416S, mcl_fp_sub416L, mcl_fp_mul416pre, mcl_fp_mont416 }, + { 480, mcl_fp_add480S, mcl_fp_add480L, mcl_fp_sub480S, mcl_fp_sub480L, mcl_fp_mul480pre, mcl_fp_mont480 }, + { 544, mcl_fp_add544S, mcl_fp_add544L, mcl_fp_sub544S, mcl_fp_sub544L, mcl_fp_mul544pre, mcl_fp_mont544 }, +#else + { 576, mcl_fp_add576S, mcl_fp_add576L, mcl_fp_sub576S, mcl_fp_sub576L, mcl_fp_mul576pre, mcl_fp_mont576 }, +#endif +}; + +FuncOp getFuncOp(size_t bitSize) +{ + typedef std::map Map; + static Map map; + static bool init = false; + if (!init) { + init = true; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(gFuncOpTbl); i++) { + map[gFuncOpTbl[i].bitSize] = gFuncOpTbl[i]; + } + } + for (Map::const_iterator i = map.begin(), ie = map.end(); i != ie; ++i) { + if (bitSize <= i->second.bitSize) { + return i->second; + } + } + printf("ERR bitSize=%d\n", (int)bitSize); + exit(1); +} + +void test(const Unit *p, size_t bitSize) +{ + printf("bitSize %d\n", (int)bitSize); + const size_t n = getUnitSize(bitSize); +#ifdef NDEBUG + bool doBench = true; +#else + bool doBench = false; +#endif + const FuncOp funcOp = getFuncOp(bitSize); + const void4op addS = funcOp.addS; + const void4op addL = funcOp.addL; + const void4op subS = funcOp.subS; + const void4op subL = funcOp.subL; + const void3op mulPre = funcOp.mulPre; + const void4Iop mont = funcOp.mont; + + mcl::fp::Unit x[MAX_N], y[MAX_N]; + mcl::fp::Unit z[MAX_N], w[MAX_N]; + mcl::fp::Unit z2[MAX_N * 2]; + mcl::fp::Unit w2[MAX_N * 2]; + cybozu::XorShift rg; + mcl::fp::getRandVal(x, rg, p, bitSize); + mcl::fp::getRandVal(y, rg, p, bitSize); + const size_t C = 10; + + addC(z, x, y, p, n); + addS(w, x, y, p); + VERIFY_EQUAL(z, w, n); + for (size_t i = 0; i < C; i++) { + addC(z, y, z, p, n); + addS(w, y, w, p); + VERIFY_EQUAL(z, w, n); + addC(z, y, z, p, n); + addL(w, y, w, p); + VERIFY_EQUAL(z, w, n); + subC(z, x, z, p, n); + subS(w, x, w, p); + VERIFY_EQUAL(z, w, n); + subC(z, x, z, p, n); + subL(w, x, w, p); + VERIFY_EQUAL(z, w, n); + mulPreC(z2, x, z, n); + mulPre(w2, x, z); + VERIFY_EQUAL(z2, w2, n * 2); + } + { + mpz_class mp; + setMpz(mp, p, n); + Montgomery m(mp); +#ifdef USE_XBYAK + if (bitSize > 128) fg.init(p, n); +#endif + /* + real mont + 0 0 + 1 R^-1 + R 1 + -1 -R^-1 + -R -1 + */ + mpz_class t = 1; + const mpz_class R = (t << (n * 64)) % mp; + const mpz_class tbl[] = { + 0, 1, R, mp - 1, mp - R + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mpz_class& mx = tbl[i]; + for (size_t j = i; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { + const mpz_class& my = tbl[j]; + getMpz(x, n, mx); + getMpz(y, n, my); + m.mont(z, x, y); + mont(w, x, y, p, m.r_); + VERIFY_EQUAL(z, w, n); +#ifdef USE_XBYAK + if (bitSize > 128) { + fg.mul_(w, x, y); + VERIFY_EQUAL(z, w, n); + } +#endif + } + } + if (doBench) { +// CYBOZU_BENCH("montC", m.mont, x, y, x); + CYBOZU_BENCH("montA ", mont, x, y, x, p, m.r_); + } + } + if (doBench) { +// CYBOZU_BENCH("addS", addS, x, y, x, p); // slow +// CYBOZU_BENCH("subS", subS, x, y, x, p); +// CYBOZU_BENCH("addL", addL, x, y, x, p); +// CYBOZU_BENCH("subL", subL, x, y, x, p); + CYBOZU_BENCH("mulPreA", mulPre, w2, y, x); + CYBOZU_BENCH("mulPreC", mulPreC, w2, y, x, n); + CYBOZU_BENCH("modC ", modC, x, w2, p, n); + } +#ifdef USE_XBYAK + if (bitSize <= 128) return; + if (doBench) { + fg.init(p, n); + CYBOZU_BENCH("addA ", fg.add_, x, y, x); + CYBOZU_BENCH("subA ", fg.sub_, x, y, x); +// CYBOZU_BENCH("mulA", fg.mul_, x, y, x); + } +#endif + printf("mont test %d\n", (int)bitSize); +} + +CYBOZU_TEST_AUTO(all) +{ + const struct { + size_t n; + const uint64_t p[9]; + } tbl[] = { +// { 2, { 0xf000000000000001, 1, } }, + { 2, { 0x000000000000001d, 0x8000000000000000, } }, + { 3, { 0x000000000000012b, 0x0000000000000000, 0x0000000080000000, } }, +// { 3, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x07ffffffffffffff, } }, +// { 3, { 0x7900342423332197, 0x1234567890123456, 0x1480948109481904, } }, + { 3, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0xffffffffffffffff, } }, +// { 4, { 0x7900342423332197, 0x4242342420123456, 0x1234567892342342, 0x1480948109481904, } }, +// { 4, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x17ffffffffffffff, 0x1513423423423415, } }, + { 4, { 0xa700000000000013, 0x6121000000000013, 0xba344d8000000008, 0x2523648240000001, } }, +// { 5, { 0x0000000000000009, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x8000000000000000, } }, + { 5, { 0xfffffffffffffc97, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, +// { 6, { 0x4720422423332197, 0x0034230847204720, 0x3456789012345679, 0x4820984290482212, 0x9482094820948209, 0x0194810841094810, } }, +// { 6, { 0x7204224233321972, 0x0342308472047204, 0x4567890123456790, 0x0948204204243123, 0x2098420984209482, 0x2093482094810948, } }, + { 6, { 0x00000000ffffffff, 0xffffffff00000000, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, +// { 7, { 0x0000000000000063, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x8000000000000000, } }, + { 7, { 0x000000000fffcff1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, + { 8, { 0xffffffffffffd0c9, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, + { 9, { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x00000000000001ff, } }, +// { 9, { 0x4720422423332197, 0x0034230847204720, 0x3456789012345679, 0x2498540975555312, 0x9482904924029424, 0x0948209842098402, 0x1098410948109482, 0x0820958209582094, 0x0000000000000029, } }, +// { 9, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x7fffffffffffffff, 0x8572938572398583, 0x5732057823857293, 0x9820948205872380, 0x3409238420492034, 0x9483842098340298, 0x0000000000000003, } }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const size_t n = tbl[i].n; + const size_t bitSize = (n - 1) * 64 + cybozu::bsr(tbl[i].p[n - 1]) + 1; + test((const Unit*)tbl[i].p, bitSize); + } +} + diff --git a/vendor/github.com/byzantine-lab/mcl/test/bench.hpp b/vendor/github.com/byzantine-lab/mcl/test/bench.hpp new file mode 100644 index 000000000..cc1639e6e --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bench.hpp @@ -0,0 +1,192 @@ +#include + +void benchAddDblG1() +{ + puts("benchAddDblG1"); + const int C = 100000; + G1 P1, P2, P3; + hashAndMapToG1(P1, "a"); + hashAndMapToG1(P2, "b"); + P1 += P2; + P2 += P1; + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G1::add(1)", C, G1::add, P3, P1, P2); + P1.normalize(); + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G1::add(2)", C, G1::add, P3, P1, P2); + CYBOZU_BENCH_C("G1::add(3)", C, G1::add, P3, P2, P1); + P2.normalize(); + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G1::add(4)", C, G1::add, P3, P1, P2); + P1 = P3; + printf("z.isOne()=%d\n", P1.z.isOne()); + CYBOZU_BENCH_C("G1::dbl(1)", C, G1::dbl, P3, P1); + P1.normalize(); + printf("z.isOne()=%d\n", P1.z.isOne()); + CYBOZU_BENCH_C("G1::dbl(2)", C, G1::dbl, P3, P1); +} + +void benchAddDblG2() +{ + puts("benchAddDblG2"); + const int C = 100000; + G2 P1, P2, P3; + hashAndMapToG2(P1, "a"); + hashAndMapToG2(P2, "b"); + P1 += P2; + P2 += P1; + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G2::add(1)", C, G2::add, P3, P1, P2); + P1.normalize(); + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G2::add(2)", C, G2::add, P3, P1, P2); + CYBOZU_BENCH_C("G2::add(3)", C, G2::add, P3, P2, P1); + P2.normalize(); + printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); + CYBOZU_BENCH_C("G2::add(4)", C, G2::add, P3, P1, P2); + P1 = P3; + printf("z.isOne()=%d\n", P1.z.isOne()); + CYBOZU_BENCH_C("G2::dbl(1)", C, G2::dbl, P3, P1); + P1.normalize(); + printf("z.isOne()=%d\n", P1.z.isOne()); + CYBOZU_BENCH_C("G2::dbl(2)", C, G2::dbl, P3, P1); +} + + +void testBench(const G1& P, const G2& Q) +{ + G1 Pa; + G2 Qa; + Fp12 e1, e2; + pairing(e1, P, Q); + Fp12::pow(e2, e1, 12345); + Fp x, y; + x.setHashOf("abc"); + y.setHashOf("xyz"); + const int C = 1000; + const int C3 = 100000; +#if 1 + const int C2 = 3000; + mpz_class a = x.getMpz(); + CYBOZU_BENCH_C("G1::mulCT ", C, G1::mulCT, Pa, P, a); + CYBOZU_BENCH_C("G1::mul ", C, G1::mul, Pa, Pa, a); + CYBOZU_BENCH_C("G1::add ", C, G1::add, Pa, Pa, P); + CYBOZU_BENCH_C("G1::dbl ", C, G1::dbl, Pa, Pa); + CYBOZU_BENCH_C("G2::mulCT ", C, G2::mulCT, Qa, Q, a); + CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Qa, Qa, a); + CYBOZU_BENCH_C("G2::add ", C, G2::add, Qa, Qa, Q); + CYBOZU_BENCH_C("G2::dbl ", C, G2::dbl, Qa, Qa); + CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e1, e1, a); +// CYBOZU_BENCH_C("GT::powGLV ", C, BN::param.glv2.pow, e1, e1, a); + G1 PP; + G2 QQ; + std::string s; + s = P.getStr(); + CYBOZU_BENCH_C("G1::setStr chk", C, PP.setStr, s); + verifyOrderG1(false); + CYBOZU_BENCH_C("G1::setStr ", C, PP.setStr, s); + verifyOrderG1(true); + s = Q.getStr(); + CYBOZU_BENCH_C("G2::setStr chk", C, QQ.setStr, s); + verifyOrderG2(false); + CYBOZU_BENCH_C("G2::setStr ", C, QQ.setStr, s); + verifyOrderG2(true); + CYBOZU_BENCH_C("hashAndMapToG1", C, hashAndMapToG1, PP, "abc", 3); + CYBOZU_BENCH_C("hashAndMapToG2", C, hashAndMapToG2, QQ, "abc", 3); +#endif + CYBOZU_BENCH_C("Fp::add ", C3, Fp::add, x, x, y); + CYBOZU_BENCH_C("Fp::sub ", C3, Fp::sub, x, x, y); + CYBOZU_BENCH_C("Fp::neg ", C3, Fp::neg, x, x); + CYBOZU_BENCH_C("Fp::mul ", C3, Fp::mul, x, x, y); + CYBOZU_BENCH_C("Fp::sqr ", C3, Fp::sqr, x, x); + CYBOZU_BENCH_C("Fp::inv ", C3, Fp::inv, x, x); + Fp2 xx, yy; + xx.a = x; + xx.b = 3; + yy.a = y; + yy.b = -5; + FpDbl d0, d1; + x = 9; + y = 3; +#if 1 + CYBOZU_BENCH_C("Fp2::add ", C3, Fp2::add, xx, xx, yy); + CYBOZU_BENCH_C("Fp2::sub ", C3, Fp2::sub, xx, xx, yy); + CYBOZU_BENCH_C("Fp2::neg ", C3, Fp2::neg, xx, xx); + CYBOZU_BENCH_C("Fp2::mul ", C3, Fp2::mul, xx, xx, yy); + CYBOZU_BENCH_C("Fp2::mul_xi ", C3, Fp2::mul_xi, xx, xx); + CYBOZU_BENCH_C("Fp2::sqr ", C3, Fp2::sqr, xx, xx); + CYBOZU_BENCH_C("Fp2::inv ", C3, Fp2::inv, xx, xx); + CYBOZU_BENCH_C("FpDbl::addPre ", C3, FpDbl::addPre, d1, d1, d0); + CYBOZU_BENCH_C("FpDbl::subPre ", C3, FpDbl::subPre, d1, d1, d0); + CYBOZU_BENCH_C("FpDbl::add ", C3, FpDbl::add, d1, d1, d0); + CYBOZU_BENCH_C("FpDbl::sub ", C3, FpDbl::sub, d1, d1, d0); + CYBOZU_BENCH_C("FpDbl::mulPre ", C3, FpDbl::mulPre, d0, x, y); + CYBOZU_BENCH_C("FpDbl::sqrPre ", C3, FpDbl::sqrPre, d1, x); + CYBOZU_BENCH_C("FpDbl::mod ", C3, FpDbl::mod, x, d0); + Fp2Dbl D; + CYBOZU_BENCH_C("Fp2Dbl::mulPre ", C3, Fp2Dbl::mulPre, D, xx, yy); + CYBOZU_BENCH_C("Fp2Dbl::sqrPre ", C3, Fp2Dbl::sqrPre, D, xx); + + CYBOZU_BENCH_C("GT::add ", C2, GT::add, e1, e1, e2); + CYBOZU_BENCH_C("GT::mul ", C2, GT::mul, e1, e1, e2); + CYBOZU_BENCH_C("GT::sqr ", C2, GT::sqr, e1, e1); + CYBOZU_BENCH_C("GT::inv ", C2, GT::inv, e1, e1); +#endif + CYBOZU_BENCH_C("FpDbl::mulPre ", C3, FpDbl::mulPre, d0, x, y); + CYBOZU_BENCH_C("pairing ", 3000, pairing, e1, P, Q); + CYBOZU_BENCH_C("millerLoop ", 3000, millerLoop, e1, P, Q); + CYBOZU_BENCH_C("finalExp ", 3000, finalExp, e1, e1); +//exit(1); + std::vector Qcoeff; + CYBOZU_BENCH_C("precomputeG2 ", C, precomputeG2, Qcoeff, Q); + precomputeG2(Qcoeff, Q); + CYBOZU_BENCH_C("precomputedML ", C, precomputedMillerLoop, e2, P, Qcoeff); +} + +inline void SquareRootPrecomputeTest(const mpz_class& p) +{ + mcl::SquareRoot sq1, sq2; + bool b; + sq1.set(&b, p, true); + CYBOZU_TEST_ASSERT(b); + CYBOZU_TEST_ASSERT(sq1.isPrecomputed()); + sq2.set(&b, p, false); + CYBOZU_TEST_ASSERT(sq1 == sq2); + if (sq1 != sq2) { + puts("dump"); + puts("sq1"); + sq1.dump(); + puts("sq2"); + sq2.dump(); + puts("---"); + } +} + +void testSquareRoot() +{ + if (BN::param.cp == mcl::BN254 || BN::param.cp == mcl::BLS12_381) { + SquareRootPrecomputeTest(BN::param.p); + SquareRootPrecomputeTest(BN::param.r); + } +} + +void testLagrange() +{ + puts("testLagrange"); + const int k = 7; + Fr c[k], x[k], y[k]; + for (size_t i = 0; i < k; i++) { + c[i].setByCSPRNG(); + x[i].setByCSPRNG(); + } + for (size_t i = 0; i < k; i++) { + mcl::evaluatePolynomial(y[i], c, k, x[i]); + } + Fr s; + mcl::LagrangeInterpolation(s, x, y, k); + CYBOZU_TEST_EQUAL(s, c[0]); + mcl::LagrangeInterpolation(s, x, y, 1); + CYBOZU_TEST_EQUAL(s, y[0]); + mcl::evaluatePolynomial(y[0], c, 1, x[0]); + CYBOZU_TEST_EQUAL(y[0], c[0]); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/bls12_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bls12_test.cpp new file mode 100644 index 000000000..7011516bd --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bls12_test.cpp @@ -0,0 +1,720 @@ +#define PUT(x) std::cout << #x "=" << x << std::endl; +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +cybozu::CpuClock clk; +#include +#include +#include +#include + +#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) + #define MCL_AVOID_EXCEPTION_TEST +#endif + +using namespace mcl::bls12; + +mcl::fp::Mode g_mode; + +const struct TestSet { + mcl::CurveParam cp; + const char *name; + const char *p; + const char *r; + struct G2 { + const char *aa; + const char *ab; + const char *ba; + const char *bb; + } g2; + struct G1 { + const char *a; + const char *b; + } g1; + const char *e; +} g_testSetTbl[] = { + { + mcl::BLS12_381, + "BLS12_381", + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + { + "0x024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8", + "0x13e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e", + "0x0ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801", + "0x0606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be", + }, + { + "0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "0x08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1", + }, + "0x1250EBD871FC0A92A7B2D83168D0D727272D441BEFA15C503DD8E90CE98DB3E7B6D194F60839C508A84305AACA1789B6 " + "0x089A1C5B46E5110B86750EC6A532348868A84045483C92B7AF5AF689452EAFABF1A8943E50439F1D59882A98EAA0170F " + "0x1368BB445C7C2D209703F239689CE34C0378A68E72A6B3B216DA0E22A5031B54DDFF57309396B38C881C4C849EC23E87 " + "0x193502B86EDB8857C273FA075A50512937E0794E1E65A7617C90D8BD66065B1FFFE51D7A579973B1315021EC3C19934F " + "0x01B2F522473D171391125BA84DC4007CFBF2F8DA752F7C74185203FCCA589AC719C34DFFBBAAD8431DAD1C1FB597AAA5 " + "0x018107154F25A764BD3C79937A45B84546DA634B8F6BE14A8061E55CCEBA478B23F7DACAA35C8CA78BEAE9624045B4B6 " + "0x19F26337D205FB469CD6BD15C3D5A04DC88784FBB3D0B2DBDEA54D43B2B73F2CBB12D58386A8703E0F948226E47EE89D " + "0x06FBA23EB7C5AF0D9F80940CA771B6FFD5857BAAF222EB95A7D2809D61BFE02E1BFD1B68FF02F0B8102AE1C2D5D5AB1A " + "0x11B8B424CD48BF38FCEF68083B0B0EC5C81A93B330EE1A677D0D15FF7B984E8978EF48881E32FAC91B93B47333E2BA57 " + "0x03350F55A7AEFCD3C31B4FCB6CE5771CC6A0E9786AB5973320C806AD360829107BA810C5A09FFDD9BE2291A0C25A99A2 " + "0x04C581234D086A9902249B64728FFD21A189E87935A954051C7CDBA7B3872629A4FAFC05066245CB9108F0242D0FE3EF " + "0x0F41E58663BF08CF068672CBD01A7EC73BACA4D72CA93544DEFF686BFD6DF543D48EAA24AFE47E1EFDE449383B676631 " + }, +}; + +CYBOZU_TEST_AUTO(size) +{ + CYBOZU_TEST_EQUAL(sizeof(Fp), 48u); + CYBOZU_TEST_EQUAL(sizeof(Fr), 32u); + CYBOZU_TEST_EQUAL(sizeof(Fp2), sizeof(Fp) * 2); + CYBOZU_TEST_EQUAL(sizeof(Fp6), sizeof(Fp) * 6); + CYBOZU_TEST_EQUAL(sizeof(Fp12), sizeof(Fp) * 12); + CYBOZU_TEST_EQUAL(sizeof(G1), sizeof(Fp) * 3); + CYBOZU_TEST_EQUAL(sizeof(G2), sizeof(Fp2) * 3); +} + +void testParam(const TestSet& ts) +{ + CYBOZU_TEST_EQUAL(BN::param.r, mpz_class(ts.r)); + CYBOZU_TEST_EQUAL(BN::param.p, mpz_class(ts.p)); +} + +void finalExpC(Fp12& y, const Fp12& x) +{ + const mpz_class& r = BN::param.r; + const mpz_class& p = BN::param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; +#if 1 + Fp12::pow(y, x, p2 + 1); + Fp12::pow(y, y, p4 * p2 - 1); + Fp12::pow(y, y, (p4 - p2 + 1) / r * 3); +#else + Fp12::pow(y, x, (p4 * p4 * p4 - 1) / r * 3); +#endif +} + +void pairingC(Fp12& e, const G1& P, const G2& Q) +{ + millerLoop(e, P, Q); + finalExp(e, e); +} +void testIoAll(const G1& P, const G2& Q) +{ + const int FpTbl[] = { 0, 2, 2|mcl::IoPrefix, 10, 16, 16|mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; + const int EcTbl[] = { mcl::IoEcAffine, mcl::IoEcProj, mcl::IoEcCompY, mcl::IoSerialize }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(FpTbl); i++) { + for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(EcTbl); j++) { + G1 P2 = P, P3; + G2 Q2 = Q, Q3; + int ioMode = FpTbl[i] | EcTbl[j]; + std::string s = P2.getStr(ioMode); + P3.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(P2, P3); + s = Q2.getStr(ioMode); + Q3.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(Q2, Q3); + s = P.x.getStr(ioMode); + Fp Px; + Px.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(P.x, Px); + s = Q.x.getStr(ioMode); + Fp2 Qx; + Qx.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(Q.x, Qx); + } + } +} + +void testIo(const G1& P, const G2& Q) +{ + testIoAll(P, Q); + G1 Z1; + G2 Z2; + Z1.clear(); + Z2.clear(); + testIoAll(Z1, Z2); +} + +void testSetStr(const G2& Q0) +{ + G2::setCompressedExpression(); + G2 Q; + Q.clear(); + for (int i = 0; i < 10; i++) { + G2 R; + R.setStr(Q.getStr()); + CYBOZU_TEST_EQUAL(Q, R); + G2::add(Q, Q, Q0); + } +} + +void testMapToG1() +{ + G1 g; + for (int i = 1; i < 10; i++) { + mapToG1(g, i); + CYBOZU_TEST_ASSERT(!g.isZero()); + G1 gr; + G1::mul(gr, g, BN::param.r); + CYBOZU_TEST_ASSERT(gr.isZero()); + } +} + +void testMapToG2() +{ + G2 g; + for (int i = 1; i < 10; i++) { + mapToG2(g, i); + CYBOZU_TEST_ASSERT(!g.isZero()); + G2 gr; + G2::mul(gr, g, BN::param.r); + CYBOZU_TEST_ASSERT(gr.isZero()); + } + Fp x; + x.setHashOf("abc"); + mapToG2(g, Fp2(x, 0)); + CYBOZU_TEST_ASSERT(g.isValid()); +} + +void testPrecomputed(const G1& P, const G2& Q) +{ + Fp12 e1, e2; + pairing(e1, P, Q); + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e2, P, Qcoeff); + finalExp(e2, e2); + CYBOZU_TEST_EQUAL(e1, e2); +} + +#if 0 +void testFp12pow(const G1& P, const G2& Q) +{ + Fp12 e, e1, e2; + pairing(e, P, Q); + cybozu::XorShift rg; + for (int i = -10; i < 10; i++) { + mpz_class xm = i; + Fp12::pow(e1, e, xm); + Fp12::powGeneric(e2, e, xm); + CYBOZU_TEST_EQUAL(e1, e2); + } + for (int i = 0; i < 10; i++) { + Fr x; + x.setRand(rg); + mpz_class xm = x.getMpz(); + Fp12::pow(e1, e, xm); + param.glv2.pow(e2, e, xm); + CYBOZU_TEST_EQUAL(e1, e2); + } +} +#endif + +void testMillerLoop2(const G1& P1, const G2& Q1) +{ + Fp12 e1, e2, e3; + mpz_class c1("12342342423442"); + mpz_class c2("329428049820348209482"); + G2 Q2; + G1 P2; + G2::mul(Q2, Q1, c1); + G1::mul(P2, P1, c2); + pairing(e1, P1, Q1); + pairing(e2, P2, Q2); + e1 *= e2; + + std::vector Q1coeff, Q2coeff; + precomputeG2(Q1coeff, Q1); + precomputeG2(Q2coeff, Q2); + precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); + precomputedMillerLoop2mixed(e3, P1, Q1, P2, Q2coeff); + CYBOZU_TEST_EQUAL(e2, e3); + finalExp(e2, e2); + CYBOZU_TEST_EQUAL(e1, e2); + + // special value + G2 Z; + Z.clear(); + Q2 += Q2; + precomputeG2(Q1coeff, Z); + precomputeG2(Q2coeff, Q2); + precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); + precomputedMillerLoop2mixed(e3, P1, Z, P2, Q2coeff); + finalExp(e2, e2); + finalExp(e3, e3); + CYBOZU_TEST_EQUAL(e2, e3); +} + +void testPairing(const G1& P, const G2& Q, const char *eStr) +{ + Fp12 e1; + pairing(e1, P, Q); + Fp12 e2; + { + std::stringstream ss(eStr); + ss >> e2; + } + CYBOZU_TEST_EQUAL(e1, e2); + Fp12 e = e1, ea; + G1 Pa; + G2 Qa; +#if defined(__EMSCRIPTEN__) || MCL_SIZEOF_UNIT == 4 + const int count = 100; +#else + const int count = 1000; +#endif + mpz_class a; + cybozu::XorShift rg; + for (int i = 0; i < count; i++) { + Fr r; + r.setRand(rg); + a = r.getMpz(); + Fp12::pow(ea, e, a); + G1::mul(Pa, P, a); + G2::mul(Qa, Q, a); + G1 T; + G1::mulCT(T, P, a); + CYBOZU_TEST_EQUAL(Pa, T); + pairing(e1, Pa, Q); + pairing(e2, P, Qa); + CYBOZU_TEST_EQUAL(ea, e1); + CYBOZU_TEST_EQUAL(ea, e2); + } +} + +void testTrivial(const G1& P, const G2& Q) +{ + G1 Z1; Z1.clear(); + G2 Z2; Z2.clear(); + Fp12 e; + pairing(e, Z1, Q); + CYBOZU_TEST_EQUAL(e, 1); + pairing(e, P, Z2); + CYBOZU_TEST_EQUAL(e, 1); + pairing(e, Z1, Z2); + CYBOZU_TEST_EQUAL(e, 1); + + std::vector Qcoeff; + precomputeG2(Qcoeff, Z2); + precomputedMillerLoop(e, P, Qcoeff); + finalExp(e, e); + CYBOZU_TEST_EQUAL(e, 1); + + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e, Z1, Qcoeff); + finalExp(e, e); + CYBOZU_TEST_EQUAL(e, 1); +} + +#include "bench.hpp" + +CYBOZU_TEST_AUTO(naive) +{ + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(g_testSetTbl); i++) { + const TestSet& ts = g_testSetTbl[i]; + printf("i=%d curve=%s\n", int(i), ts.name); + initPairing(ts.cp, g_mode); + const G1 P(Fp(ts.g1.a), Fp(ts.g1.b)); + const G2 Q(Fp2(ts.g2.aa, ts.g2.ab), Fp2(ts.g2.ba, ts.g2.bb)); +#ifdef ONLY_BENCH + { + Fp12 e; + for (int i = 0; i < 1000; i++) pairing(e, P, Q); + } + clk.put(); + return; +#endif + testParam(ts); + testIo(P, Q); +// testFp12pow(P, Q); + testTrivial(P, Q); + testSetStr(Q); + testMapToG1(); + testMapToG2(); + testPairing(P, Q, ts.e); + testPrecomputed(P, Q); + testMillerLoop2(P, Q); + testBench(P, Q); + } + int count = (int)clk.getCount(); + if (count) { + printf("count=%d ", count); + clk.put(); + } +} + +CYBOZU_TEST_AUTO(finalExp) +{ + const char *e0Str = +"012974491575E232199B73B30FE53FF643FEAE11023BCA7AF961C3600B45DFECFE4B30D52A62E73DA4C0409810304997\n" +"05CE2FB890FE65E20EC36347190ECB4884E401A64B666557B53E561F6D0979B7A96AD9E647ED78BD47187195C00F563C\n" +"02E85D1E559488603A70FEE99354DA8847215EC97282CA230DE96FED6DD5D4DD4EF4D901DB7F544A1A45EBEBA1450109\n" +"048FB1E44DDABF18D55C95704158A24678AA2A6ED0844108762E88306E5880E8C67BF44E24E40AB3F93D9E3713170341\n" +"07EF7BE685DC0DBA1B3E1D2E9090CD98EAD1325B60881772F17077386A3182B117F5FD839363F5891D08E82B88EC6F12\n" +"17803435700EF7A16C06404C6D17EB4FD84079FE9872207302A36C791B6E90447B33D703BBFE04ECB641C3A573E2CD50\n" +"19A494E6A872E46FC85D09FD6D30844B6FF05729BC253A9640F7BE64AAA8C2C8E0AE014A9DD816C53A3EDEBB2FA649EB\n" +"020949ABAA14F1DCE17FA9E091DDA963E9E492BA788E12B9B610E80A4D94DB9CC50341ED107C7D50E5738052595D4A27\n" +"09E217B513B3603723DAC3188A2F7CBDD84A56E7E5004446E7D4C63D6E378DA26E411C10898E48DB4B0C065E4699A9C5\n" +"12393BD23D0EC122082A1EC892A982F3C9AFD14240CE85258D8A3EF0A13CB545D6EF7848FD40DD4AEF1554341C5C5BBF\n" +"07EA8A0D6A57C78E5663F94E2B1ABC0D760ED18DBA64305EAD5EE350FB0342A7A81C0D5C8B3AD826D009276B0F32D2C8\n" +"16804D0D4A2633ED01568B0F8F06C4497E46E88D05FD191AAE530ACA791D0E114D74874FA88E33FAF48757153B09BB0E"; + +const char *e1Str = +"0E05D19E90D2C501E5502C7AC80D77201C47DF147DD1076440F0DF0179DF9802CA0775E0E73DD9174F1094D2280787B3\n" +"14D2F5C84279E7177A3543FBEAE261DE8F6C97EFD5F3FF3F959EC9FC0303F620A4B3AF00DF409496CECADDD0A7F0A164\n" +"1414E9B9DF8DF1EAC2E70D5538018377788C62016A54F28B037A68740705089AE431B86756F98CBE19690A5EAC0C2466\n" +"12D8B32157836A131CCA3CA313DAAAF909BC3AD6BDD15885BB429922B9CD7D1246D1163E5E6F88D68BF1B75E451EFABB\n" +"102C9A839A924E0D603D13F2E08A919E0B9EE2A269FC75727BA13D66027C157B9BB4077977FA94557DE4427BF11B234B\n" +"19DBEB7F2E3096AFFD44837655BD8249741B484B0EB0DBEE569DEA8D9E38AE09D210C8BC16AA6DFBC923095B2C9A8B2B\n" +"19B9A6DCCD01FA0D04D5CE94D8BDCE1DF64AFEB7FD493B955180A5C6B236E469F0E07CC9BB4203FCAC46AE6F8E5419D6\n" +"02BFA87AF7A3726A7ABACDCFDD53694AF651554F3A431AB4274F67D5DAD2D6C88AF794705FF456A936C83594731AD8DC\n" +"0F21E0173E3B50DD98EFA815B410631A57399B451FD6E1056FFD09C9FE50EFAD3D026F0C46C8BB1583A50B7853D990DA\n" +"02230237AE04B61F9269F6E7CD2FCF1231CEE4690AA658B0018EFC0D0770FD0A56B3B7294086E8D306B1465CDDD858CD\n" +"087EB8F6547015661E9CD48D6525C808636FCB8420B867CB2A87E006B2A93BBD5EF675E6CDDA9E6F94519C49EA8BB689\n" +"19F5C988B2DD6E33D7D3D34EFB1991F80DC28006AC75E0AB53FD98FC6F2476D05DD4ECA582F5FF72B8DDD9DDDE80FFC9"; + + Fp12 e0, e1, e2; + e0.setStr(e0Str, 16); + e1.setStr(e1Str, 16); + finalExp(e2, e0); +// finalExpC(e2, e0); + CYBOZU_TEST_EQUAL(e1, e2); + CYBOZU_BENCH_C("finalExp", 100, finalExp, e2, e0); +} + +CYBOZU_TEST_AUTO(addLine) +{ +const char *l0Str= +"0EF586FCDB69442CB41C0DA719AC5C92BD99A916C1F01BCFC7606AA7A23D680C04620FDFC2144E0EA6025F05241A791F\n" +"164CFDADE9B91150C6D2C7F7CDF29BC3105A7EA51217283CDF801EBEE9E86CE5078253E322C72129DAA42F6DBAD17D37"; +const char *l1Str = +"07A124F536BE83CCB3AF8D3DA2AE094942755040B9DA8E0796C462ACE3805D6916ACA7E9281261D8025571E2F31AAF0D\n" +"12D05751A9B255143541D0A4E57E120F937D51F9A07D31982390CA6EB5DF8CC0640FD291521069BF9964AE33EDD1323D"; +const char *l4Str = +"0D609DE41CF1260B332C1A53AA54703F62AB8224777E34FEEAB09AA06187CA71D8C7C2EB66F59D3622D431BE17D0FEE6\n" +"0A93C2984041171BE701560017D64D0640B6F61D7DCA8F527FA6B6A1A1033261C0761CAA56A00D4D16C9D3B7371E02D9"; + +const char *rStr = +"4 0A8DFA69FDD43EDCCC6375750373B443157EF4641E5B4CA82FBF23E3E8EA72351EA754168CEC77573D337E6227C0D0DD\n" +"12C8508CF1828C52A9A1A71779129D605327191EE459AED3C0B4B14657B08B2927173FADF8E4275188E8D49E57A75B33\n" +"12AD7EB96734F2C93B669FD54845CD2FF351AFDF0123E96772021DC3F4F3B456DB1B37CB1C380B1947616165FF0DDAEA\n" +"03D80F92C8A6005DEB291AF28406B7B4FCEDD81A244997DBB719B01D162BD7D71F0FD63BF76F8F1AC90618C3702294DF\n" +"199F7A719EA1CA2CD03CFFBB9A4BC2FE1BD8BCA7C772D223E6CB20C1313C3D3C52CFBB88445E56C833908C52A4EC68F1\n" +"0A3F6B27A6DDA00DB848574ECB06F179271D5844BDA66CD5AE1792A8FDD25E3A504A95839113BAA8B1FCB53EEE5F1FF0"; + +const char *qStr = +"4 0B5C339C23F8EAB3647E974BCDDF72C96F97A444346BE72CA73AB1323B83B8F6161257AB34C7E0CF34F6C45086CA5868\n" +"13C2235E9F9DFB33344BA2EE5A71435859022880732EDC9EC75AC79AE9DA972593CDC40A0AC334D6D2E8D7FAD1D98D0B\n" +"134B8EED8196A00D3B70ADBC26FF963B725A351CF0B73FE1A541788AFB0BB081AF82A438021B5E878B15D53B1D27C6A7\n" +"18CC69F847BEE826B939DCB4030D33020D03B046465C9EE103AA8009A175DB169070294E75771586687FE361DB884BCD\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + +const char *pStr = +"4 0FD3977C60EC322BC281C915955ED534B491E39C72E8E800271CEF3F0492D890829FA69C45FCE93D9847A0CAB325D871\n" +"17CC2C36C5D283C05BFCECCF48DBB2050332DA058DD67326A9EE520967DBCAEDFCB5F05A085D1A49DF08BB968CC782C5\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"; + +const char *m0Str = +"1010A4F9944514352AAD5D290AFB95C435EB64B5E74519807C9602DABCD6B6F5494E419758AE9A43F539F812252C65E5\n" +"1622F7A52BAAC10EBFF0977F61866544BF9B91315FA66ADB6AC200AFF7A3676C1AD3480340B23C66F7C6AE50A703F245"; +const char *m1Str = +"0905180B20FCE2AA545A73D6B9EA1F82479EF3FB5A3BA8DDB656D9B2A4DA7B63CCF75538D15093949B442E27804C8CE2\n" +"0FE834508BBAD2F22DCBF1C3C1BCCD69561827613EB63F4907832A7ABBBC6040CF2E517D0D5E22D3812EEE1EC55640DD"; +const char *m4Str = +"1197D94A8DAFE6F72B17A31964CA5D0C3B2A12BEFF959F7DF7A37938C64C01508D12C24479E5C3D314F862A9977D6C7D\n" +"0B0254A26810307964E2A05680C19DE7C63CCBD7CC3558AD366BA48D9F7049E245BF2C54EA0339301739851E6EB2158F"; + +const char *r1Str = +"4 16A33C4ED01E95833D39EECE223380FE860B6DC1F71B1DDBEE2BE39B5D682B090F18758495E202010D3B9B45A46FF80E\n" +"01CECF5CC255E32B833C16EF3F983CCA9996A33504615909AD5685D9F637BF2357442BEC89DAFC747B69EFEF57D5A213\n" +"061C33850E5E4A418F3B68D6097C3911A551D6DB3C7E21196410F69D510C1A888635B6B699F98C14912A8D97742D36A9\n" +"0457FB78282C2B8F8BA803E77D058EF3BF6B06C6241D6FA6E2B1607F3E9D7597D1A10B7AA4E06B89FBA01736901AD826\n" +"0B45E9B7D311B8C37F7E9262227A4D721F2D148DE6F12EC5D599B45E4790F35B37A1D6928709F438849324A807EC1913\n" +"0E771545F892C247A5365BA1F14934D8ED37483A6B7DD3EB4C3FBA0AC884D7EE9C080C3B39ADA64AE545E7339F83AFB0"; + + const int mode = mcl::IoEcProj | 16; + Fp6 l, m; + G2 R, Q, R1; + G1 P; + + R.setStr(rStr, mode); + Q.setStr(qStr, mode); + P.setStr(pStr, mode); + l.a.setStr(l0Str, mode); + l.b.setStr(l4Str, mode); + l.c.setStr(l1Str, mode); + local::addLine(l, R, Q, P); + m.a.setStr(m0Str, mode); + m.b.setStr(m4Str, mode); + m.c.setStr(m1Str, mode); + R1.setStr(r1Str, mode); + CYBOZU_TEST_EQUAL(l, m); + CYBOZU_TEST_EQUAL(R, R1); +} + +CYBOZU_TEST_AUTO(dblLine) +{ +const char *l0Str= +"0905F47F07FA2177E4B73559D93D9B41A2FD20E0440167E63A0F62321EB73C784405DE360477245F5E4AE8FA2EAEC6FF\n" +"02DA455C573517EE8BD4E40A60851E2EC85CF514351681B89B1B72664E384A3678A25DC5C4CF84C086C13968BC701F91"; +const char *l1Str = +"0F48059E31CEF9546D41DEF31FB3BFD13BBCC38248E4846AFD216350B4B661FA1382E74BCF904BE8E33C26FF0D29ABA0\n" +"0D7FAEE426E4A6F32E4DE1CCB76BBA870A183113589CF58A358DC9C284C82B54C65A754F9A71EAD87304744CFD105051"; + +const char *l4Str = +"162AE8B029F8420BEDE5A399BA19C09FED01DE2748F4458065DBE51044FBFE749B28EF1B7F79A5E4545EB550DD0CFE02\n" +"091042B589FD59FBF286D21475CCCF444D8DCC49389EA3B98AF864DDB9C08BDDEB320D6D88F0C07D7CD1733A41404C1F"; + +const char *qStr = +"4 047ACF79048EDCA97A19DB1779A44CE610CEA9FDCC62D1C4014B335834BC63414F55B670CCF15A86E58BC3737FB70919\n" +"11D8063B2FFA2E1F5224593FF74D0E9650CAB6AF702B74159F7F97E2CF0843FBCD561D41FEC779BEB48746CD2F30FF74\n" +"17684E8EA85C990DF5472B4EBAF51002CDBBECF79BA2988FC610A5CE09F4A584248DCC506E78C39C9BB4F6008115DE64\n" +"1334CA7263ED9395BBEDBB6C938C642200C239FB0CADF1F652332A037FFBC7E567CCE09540939F25316CBC4FC68CE4DB\n" +"0670DCF344F027CB92F7B1F569B281C8FF756D83CD7B65EB118FE95FBE16ED28649B8F739FE3A2BECA1979FC18484ECD\n" +"13E3E30759DCC1FA9F1A62D54BEF0EE1CC75E194A559F2D6BE3025BE4BEB9F7351A7608FE8D4CCAD30BA2A8748205487"; + +const char *pStr = +"4 1579B48AE944AFE8FC69B38A7CD0D2C6B93E5F506535A5410E25FB1C1707938D6932F3D620A2BBB90ED7E2601971DEA8\n" +"0234E5B373AD62D9EF1EBAE6FA6FFAD26144717F65AE9F98BD4280978ED52B3621F60FA4A8F6E5B5DAF64469733827E6\n" +"0B4D1755924541041F75FF399E3B5F535BC85D5A982AEEC5FC2404F06AC7F062C090C4545D1602C3D8B559801EE7B9A2"; + +const char *m0Str = +"198D1123A9817C40A914A3FF9E29BB16DD2F0DF98D0AB5C3A6014D60E31AE973051C35ADCEA0A41F32BB16E6688DC73F\n" +"10339DB2F26D1B867FD3E60A24F938210EABEFC51536845A490F28A088A4AC53575DBBAA218D70D34E28EBDE14DB3465\n"; +const char *m1Str = +"066852248D915F6378B3F4A8E6173AC748FBFAE236AAEEAECC3F34D2D22706A06B925A83DD8276B2B240F61D761587B0\n" +"17CC8195E6607FF19A26AA69CA50C32487E35D2D75301AC4B6988F1B77523BF472927EE5710DF49A563534D86C684BE0\n"; +const char *m4Str = +"10B67D1A0CE430B7AD74F64DD6C2E44C4788EADF8340909843C96B918BF54703CC14686B26E350EB1140ACC3337EEEB4\n" +"0F5D52E6F0B10A081EFF885CC858109241B379985ADD8982E6B8A202FD283897EFBA4CBE444C29751410A61FC8346545"; + +const char *q1Str = +"4 17B5E51EC931E724ABACE9C7F8AFDD51F3929478B47C222F99844D166936063D3BFCDF7AD7079EEF4BE8514E3D09EF0F\n" +"0F5794F38BAEC0FA3C30AC4C0B8E9024B2047F2F4576434F91768F2B51AD58C48E88414B1D4B7A9119A947D3CFEDEF0A\n" +"1320714A8B7E23C4C558D2B1C556CC8FB6B41F3669EFD70B6D204C2A7C6EF2E0CBCA945AA7BACB402E00ED338F7D12FC\n" +"0C2846E386161F123344704528D9944677806C3F784E3997857C91D2F3F37AB6AD92360CD97CABD5D631E9FC74708AD3\n" +"17F92FF3D71B473B802F2DE90C19A5F5DBFAA397293871AB58E5B813F7D686EA8E1814C69C50C02D062F3A13C1D045E1\n" +"05214392858DE04B3B468B2D0C703A485508C29157D81E9F799BAB2FEF0F514C99D5F8085D8062281418C6CCE5621D18\n"; + + const int mode = mcl::IoEcProj | 16; + Fp6 l, m; + G2 Q, Q1; + G1 P; + + G1::setOrder(0); + Q.setStr(qStr, mode); + P.setStr(pStr, mode); + l.a.setStr(l0Str, mode); + l.b.setStr(l4Str, mode); + l.c.setStr(l1Str, mode); + local::dblLine(l, Q, P); + m.a.setStr(m0Str, mode); + m.b.setStr(m4Str, mode); + m.c.setStr(m1Str, mode); + Q1.setStr(q1Str, mode); + CYBOZU_TEST_EQUAL(l, m); + CYBOZU_TEST_EQUAL(Q, Q1); + G1::setOrder(BN::param.r); +} + +CYBOZU_TEST_AUTO(mul_012) +{ + const char *fStr = +"087590AFBFEB8F85DD912EC297C2B5DD7BC0A9B0914348C5D99F0089C09CDBA0DCDAF1C704A7B092D8FB9A75B7C06D88\n" +"119DD8B08C40D4EB3D7AF19221E41639A98A10EF1A22F9AD8CB1350526B9335F47E76B2FFD6652E693A67440574D5A0C\n" +"134ADA7C4ABFBA4324900D25E5077A119F9E55A7F337C03FD539D8DAC392B458F11261BEA007393D43657E9656B984D6\n" +"01032DDB3CAEC38B7DA916CA111C46A013F1DC83AF13DFF5B71CC3789974F946CFC43FE7B8EE519E524627248369FCE7\n" +"19E9455C14A9640139224BB1337E4EC5EE92BFF757DB179CC98CF0F09682E44ED4B6004F31D4788DE28BB2D8F41DDAE4\n" +"0B9877DF6AC1015375AB421363A5B06D2DC1763B923FF674A06AE101306A4A39967A3F9EF12E870C124A26CE68E2D003\n" +"02AA5AC5901C9C91CD0B43CA62F21FA541896802A8AAF0FD5EDF8DAF4A98CEC19F457A67369E795594543677B4A16EA4\n" +"0604DB7CE2A0ABD8ADB5F4F06F20B01510BF9787C912B1036F570E7368D341D9B794F078DFD3265306841180865500D0\n" +"08145045CF5751502778739EFE6FEA6036C8F14800F4818C2FD8BA5AF98E89B0BBE6510D511C4E5A83A2813A92B655F0\n" +"0FDE93D3326321ECF6171FBC4665F1C171F19A6F1D521BFA1A1B80E0B08CEBB78B255AF0B5F7E45AA6C1D01005200FB1\n" +"0F2A9EA2891A683AE15A79EDB0C6DF45FFAD4D22F3293AE59D3CE8F6E0E59A097673D05D81ACAD8C59817FFDD3E89CF1\n" +"0724BD07BDDCA23775C1DECD80CE7722F98C10E75A0CD9A1FA81921A04EEFAC55BE0740C5F01ED83FDFC66380339D417\n"; + +const char *l0Str = +"198D1123A9817C40A914A3FF9E29BB16DD2F0DF98D0AB5C3A6014D60E31AE973051C35ADCEA0A41F32BB16E6688DC73F\n" +"10339DB2F26D1B867FD3E60A24F938210EABEFC51536845A490F28A088A4AC53575DBBAA218D70D34E28EBDE14DB3465"; +const char *l1Str = +"066852248D915F6378B3F4A8E6173AC748FBFAE236AAEEAECC3F34D2D22706A06B925A83DD8276B2B240F61D761587B0\n" +"17CC8195E6607FF19A26AA69CA50C32487E35D2D75301AC4B6988F1B77523BF472927EE5710DF49A563534D86C684BE0\n"; +const char *l4Str = +"10B67D1A0CE430B7AD74F64DD6C2E44C4788EADF8340909843C96B918BF54703CC14686B26E350EB1140ACC3337EEEB4\n" +"0F5D52E6F0B10A081EFF885CC858109241B379985ADD8982E6B8A202FD283897EFBA4CBE444C29751410A61FC8346545\n"; + +const char *f2Str = +"10128E1A9BD00FC81F6550921D0FED3944F63F980ABF91FDB73B1ED162337ED16075730ACD60A0FA7DFABAD9FC9657C5\n" +"055BE26091D8CDA32241F4991A1F184E403C3FFDD54858B23D5CE4B44402B65B26BCA6855DA7AC1C60F1D6651632DCD8\n" +"0D70827981F0D33185DE8767FDDFEC26CEB6A28F82C83BBABB0057E432FCF9072B666974123274751E35F371E931D6CC\n" +"02382B1A80E5BC95C75AE71BE2E097FD59365279CDD7EA358D87DEF132430744DABBF1B685D110CC731A9FDA40EEFC1B\n" +"0AAB560FB99D57A9B1B6C753DAF6B0619ED598C9B5FB0908F2DAE83C530E6365DBEDE29B9357D63803F46247A1F41C73\n" +"13C048F553BFC3C56516786DD26FF9D59ECFB9BE6B165F90E77CCED623BC66C6E93EFBF14576DB7E33C8C4F4E21F64DC\n" +"0987D7DEBB96A10D977F256432871BEBB4B3A620E4AE822E089E9DAA192CD278E9FA0CF598444F6758628BC38C33A5AD\n" +"0A4F1B75845B6C976BF49C35134AE73CA7A3C16D2E0BDA39C70367E3829E94EB7CAFBB0F8B57F4734B696D9CEF84FE73\n" +"0DFAB9C035F3DA51226F27998A494A32245800F0313446D6437D2F5B3F34A9E91428818B0C9AF63EB3AA618E80055FD5\n" +"06A58B9640FF8931616F6D08BA16ECE71F341C61F22E5EC5B556DF217179C3ECEC20E4BE425A3471F1D6648D14F89FBF\n" +"1614391845CDC212937BC1070010603FB4DF99A6B3FA7E7CD3316C56BA8B633B3DC7D864B36DA2F9A1E6B977DB150100\n" +"144A44415BCCB077EAA64C8DAC50631AF432C1420EBD8538818D65D6176BC1EB699579CED8340493306AF842B4B6822E"; + + Fp6 l; + Fp12 f, f2; + l.a.setStr(l0Str, 16); + l.b.setStr(l4Str, 16); + l.c.setStr(l1Str, 16); + f.setStr(fStr, 16); + f2.setStr(f2Str, 16); + local::mulSparse(f, l); + CYBOZU_TEST_EQUAL(f, f2); +} + +CYBOZU_TEST_AUTO(pairing) +{ + const int mode = mcl::IoEcProj | 16; + +const char *pStr = +"4 0FD3977C60EC322BC281C915955ED534B491E39C72E8E800271CEF3F0492D890829FA69C45FCE93D9847A0CAB325D871\n" +"17CC2C36C5D283C05BFCECCF48DBB2050332DA058DD67326A9EE520967DBCAEDFCB5F05A085D1A49DF08BB968CC782C5\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"; +const char *qStr = +"4 0B5C339C23F8EAB3647E974BCDDF72C96F97A444346BE72CA73AB1323B83B8F6161257AB34C7E0CF34F6C45086CA5868\n" +"13C2235E9F9DFB33344BA2EE5A71435859022880732EDC9EC75AC79AE9DA972593CDC40A0AC334D6D2E8D7FAD1D98D0B\n" +"134B8EED8196A00D3B70ADBC26FF963B725A351CF0B73FE1A541788AFB0BB081AF82A438021B5E878B15D53B1D27C6A7\n" +"18CC69F847BEE826B939DCB4030D33020D03B046465C9EE103AA8009A175DB169070294E75771586687FE361DB884BCD\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\n" +"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; +const char *eStr = +"0E05D19E90D2C501E5502C7AC80D77201C47DF147DD1076440F0DF0179DF9802CA0775E0E73DD9174F1094D2280787B3\n" +"14D2F5C84279E7177A3543FBEAE261DE8F6C97EFD5F3FF3F959EC9FC0303F620A4B3AF00DF409496CECADDD0A7F0A164\n" +"1414E9B9DF8DF1EAC2E70D5538018377788C62016A54F28B037A68740705089AE431B86756F98CBE19690A5EAC0C2466\n" +"12D8B32157836A131CCA3CA313DAAAF909BC3AD6BDD15885BB429922B9CD7D1246D1163E5E6F88D68BF1B75E451EFABB\n" +"102C9A839A924E0D603D13F2E08A919E0B9EE2A269FC75727BA13D66027C157B9BB4077977FA94557DE4427BF11B234B\n" +"19DBEB7F2E3096AFFD44837655BD8249741B484B0EB0DBEE569DEA8D9E38AE09D210C8BC16AA6DFBC923095B2C9A8B2B\n" +"19B9A6DCCD01FA0D04D5CE94D8BDCE1DF64AFEB7FD493B955180A5C6B236E469F0E07CC9BB4203FCAC46AE6F8E5419D6\n" +"02BFA87AF7A3726A7ABACDCFDD53694AF651554F3A431AB4274F67D5DAD2D6C88AF794705FF456A936C83594731AD8DC\n" +"0F21E0173E3B50DD98EFA815B410631A57399B451FD6E1056FFD09C9FE50EFAD3D026F0C46C8BB1583A50B7853D990DA\n" +"02230237AE04B61F9269F6E7CD2FCF1231CEE4690AA658B0018EFC0D0770FD0A56B3B7294086E8D306B1465CDDD858CD\n" +"087EB8F6547015661E9CD48D6525C808636FCB8420B867CB2A87E006B2A93BBD5EF675E6CDDA9E6F94519C49EA8BB689\n" +"19F5C988B2DD6E33D7D3D34EFB1991F80DC28006AC75E0AB53FD98FC6F2476D05DD4ECA582F5FF72B8DDD9DDDE80FFC9"; + G1 P; + G2 Q; + P.setStr(pStr, mode); + Q.setStr(qStr, mode); + Fp12 e1, e2; + e1.setStr(eStr, 16); + pairing(e2, P, Q); + CYBOZU_TEST_EQUAL(e1, e2); +} + +void testCurve(const mcl::CurveParam& cp) +{ + initPairing(cp, g_mode); + G1 P; + G2 Q; + mapToG1(P, 1); + mapToG2(Q, 1); + GT e1, e2; + pairing(e1, P, Q); + cybozu::XorShift rg; + mpz_class a, b; + Fr r; + r.setRand(rg); a = r.getMpz(); + r.setRand(rg); b = r.getMpz(); + G1 aP; + G2 bQ; + G1::mul(aP, P, a); + G2::mul(bQ, Q, b); + pairing(e2, aP, bQ); + GT::pow(e1, e1, a * b); + CYBOZU_TEST_EQUAL(e1, e2); +} +CYBOZU_TEST_AUTO(multi) +{ + G1 P; + G2 Q; + int i; + puts("BN254"); + testCurve(mcl::BN254); + i = 1; + CYBOZU_BENCH_C("calcBN1", 100, (BN::param.mapTo.calcBN), P, i++); + CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), P, i++); + CYBOZU_BENCH_C("calcBN2", 100, (BN::param.mapTo.calcBN), Q, i++); + CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), Q, i++); + puts("BLS12_381"); + testCurve(mcl::BLS12_381); + i = 1; + CYBOZU_BENCH_C("calcBN1", 100, (BN::param.mapTo.calcBN), P, i++); + CYBOZU_BENCH_C("naiveG1", 100, (BN::param.mapTo.naiveMapTo), P, i++); + CYBOZU_BENCH_C("calcBN2", 100, (BN::param.mapTo.calcBN), Q, i++); + CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), Q, i++); +} + +CYBOZU_TEST_AUTO(BLS12_G1mulCofactor) +{ + if (BN::param.cp.curveType != MCL_BLS12_381) return; +} + +typedef std::vector FpVec; + +void f(FpVec& zv, const FpVec& xv, const FpVec& yv) +{ + for (size_t i = 0; i < zv.size(); i++) { + Fp::mul(zv[i], xv[i], yv[i]); + } +} +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + std::string mode; + opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + g_mode = mcl::fp::StrToMode(mode); + printf("JIT %d\n", mcl::fp::isEnableJIT()); +#if 0 + initPairing(mcl::BLS12_381); + cybozu::XorShift rg; + const int n = 1; + std::vector xv(n), yv(n), zv(n); + for (int i = 0; i < n; i++) { + xv[i].setByCSPRNG(rg); + yv[i].setByCSPRNG(rg); + } + FpDbl dx; + FpDbl::mulPre(dx, xv[0], yv[0]); + Fp2 x2, y2; + x2.a.setByCSPRNG(rg); + x2.b.setByCSPRNG(rg); + y2.a.setByCSPRNG(rg); + y2.b.setByCSPRNG(rg); + Fp2Dbl x2d, y2d; + Fp2Dbl::mulPre(x2d, x2, x2); + Fp2Dbl::mulPre(y2d, x2, y2); +if(0){ + puts("----------"); + xv[0].dump(); + yv[0].dump(); + dx.dump(); + puts("----------"); +// exit(1); +} +// CYBOZU_BENCH_C("Fp2::neg", 10000000, Fp2::neg, x2, x2); + CYBOZU_BENCH_C("Fp2::sqr", 10000000, Fp2::sqr, x2, x2); +// CYBOZU_BENCH_C("Fp2::sqrPre", 100000000, Fp2Dbl::sqrPre, x2d, x2); +// CYBOZU_BENCH_C("Fp2::mulPre", 100000000, Fp2Dbl::mulPre, x2d, x2, y2); +// CYBOZU_BENCH_C("sqrPre", 100000000, FpDbl::sqrPre, dx, xv[0]); +// CYBOZU_BENCH_C("mod ", 100000000, FpDbl::mod, xv[0], dx); +// CYBOZU_BENCH_C("mul ", 100000000, Fp::mul, xv[0], yv[0], xv[0]); +// CYBOZU_BENCH_C("sqr ", 100000000, Fp::sqr, xv[0], xv[0]); + return 0; +#endif + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn384_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn384_test.cpp new file mode 100644 index 000000000..b5674a918 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn384_test.cpp @@ -0,0 +1,83 @@ +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include +#include +#include +#include + +using namespace mcl::bn384; + +mcl::fp::Mode g_mode; + +#include "bench.hpp" + +void testCurve(const mcl::CurveParam& cp) +{ + initPairing(cp, g_mode); + G1 P; + G2 Q; + mapToG1(P, 1); + mapToG2(Q, 1); + GT e1, e2; +#ifdef ONLY_BENCH + cybozu::CpuClock clk; + for (int i = 0; i < 10000; i++) { clk.begin(); pairing(e1, P, Q); clk.end(); } + clk.put(); + return; +#endif + pairing(e1, P, Q); + cybozu::XorShift rg; + mpz_class a, b; + Fr r; + r.setRand(rg); a = r.getMpz(); + r.setRand(rg); b = r.getMpz(); + G1 aP; + G2 bQ; + G1::mul(aP, P, a); + G2::mul(bQ, Q, b); + pairing(e2, aP, bQ); + GT::pow(e1, e1, a * b); + CYBOZU_TEST_EQUAL(e1, e2); + testBench(P, Q); + testSquareRoot(); + testLagrange(); +} + +CYBOZU_TEST_AUTO(pairing) +{ +// puts("BN160"); +// testCurve(mcl::BN160); + puts("BN254"); + // support 256-bit pairing + testCurve(mcl::BN254); + puts("BN381_1"); + testCurve(mcl::BN381_1); + puts("BN381_2"); + testCurve(mcl::BN381_2); + puts("BLS12_381"); + testCurve(mcl::BLS12_381); + // Q is not on EcT, but bad order + { + const char *s = "1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50"; + G2 Q; + CYBOZU_TEST_EXCEPTION(Q.setStr(s, 16), std::exception); + } +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + std::string mode; + opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + g_mode = mcl::fp::StrToMode(mode); + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn512_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn512_test.cpp new file mode 100644 index 000000000..905bfd3db --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn512_test.cpp @@ -0,0 +1,68 @@ +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include +#include +#include +#include + +using namespace mcl::bn512; + +mcl::fp::Mode g_mode; + +#include "bench.hpp" + +void testCurve(const mcl::CurveParam& cp) +{ + initPairing(cp, g_mode); + G1 P; + G2 Q; + mapToG1(P, 1); + mapToG2(Q, 1); + GT e1, e2; + pairing(e1, P, Q); + cybozu::XorShift rg; + mpz_class a, b; + Fr r; + r.setRand(rg); a = r.getMpz(); + r.setRand(rg); b = r.getMpz(); + G1 aP; + G2 bQ; + G1::mul(aP, P, a); + G2::mul(bQ, Q, b); + pairing(e2, aP, bQ); + GT::pow(e1, e1, a * b); + CYBOZU_TEST_EQUAL(e1, e2); + testBench(P, Q); + testSquareRoot(); + testLagrange(); +} + +CYBOZU_TEST_AUTO(pairing) +{ + puts("BN462"); + testCurve(mcl::BN462); + puts("BN381_1"); + testCurve(mcl::BN381_1); + puts("BLS12_381"); + testCurve(mcl::BLS12_381); + puts("BN254"); + testCurve(mcl::BN254); +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + std::string mode; + opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + g_mode = mcl::fp::StrToMode(mode); + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_c256_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn_c256_test.cpp new file mode 100644 index 000000000..2ce85162d --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_c256_test.cpp @@ -0,0 +1,6 @@ +#include +using namespace mcl::bn256; +#define MCLBN_DEFINE_STRUCT +#define MCLBN_FP_UNIT_SIZE 4 +#include "bn_c_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_c384_256_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn_c384_256_test.cpp new file mode 100644 index 000000000..e7bbefda9 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_c384_256_test.cpp @@ -0,0 +1,7 @@ +#include +using namespace mcl::bls12; +#define MCLBN_DEFINE_STRUCT +#define MCLBN_FP_UNIT_SIZE 6 +#define MCLBN_FR_UNIT_SIZE 4 +#include "bn_c_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_c384_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn_c384_test.cpp new file mode 100644 index 000000000..a9f20243a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_c384_test.cpp @@ -0,0 +1,6 @@ +#include +using namespace mcl::bn384; +#define MCLBN_DEFINE_STRUCT +#define MCLBN_FP_UNIT_SIZE 6 +#include "bn_c_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_c512_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn_c512_test.cpp new file mode 100644 index 000000000..c6af3989f --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_c512_test.cpp @@ -0,0 +1,6 @@ +#include +using namespace mcl::bn512; +#define MCLBN_DEFINE_STRUCT +#define MCLBN_FP_UNIT_SIZE 8 +#include "bn_c_test.hpp" + diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_c_test.hpp b/vendor/github.com/byzantine-lab/mcl/test/bn_c_test.hpp new file mode 100644 index 000000000..e9dc59393 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_c_test.hpp @@ -0,0 +1,699 @@ +/* + include from bn_if256_test.cpp and bn_if384_test.cpp +*/ +#include +#include +#include +#include +#include + +template +std::ostream& dump(std::ostream& os, const uint64_t (&x)[N]) +{ + for (size_t i = 0; i < N; i++) { + char buf[64]; + CYBOZU_SNPRINTF(buf, sizeof(buf), "%016llx", (long long)x[i]); + os << buf; + } + return os; +} + +CYBOZU_TEST_AUTO(init) +{ + int ret; + CYBOZU_TEST_EQUAL(sizeof(mclBnFr), sizeof(Fr)); + CYBOZU_TEST_EQUAL(sizeof(mclBnG1), sizeof(G1)); + CYBOZU_TEST_EQUAL(sizeof(mclBnG2), sizeof(G2)); + CYBOZU_TEST_EQUAL(sizeof(mclBnGT), sizeof(Fp12)); + +#if MCLBN_FP_UNIT_SIZE >= 4 + printf("test BN254 %d\n", MCLBN_FP_UNIT_SIZE); + ret = mclBn_init(MCL_BN254, MCLBN_COMPILED_TIME_VAR); +#endif +#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 4 + printf("test BLS12_381 %d\n", MCLBN_FP_UNIT_SIZE); + ret = mclBn_init(MCL_BLS12_381, MCLBN_COMPILED_TIME_VAR); +#endif +#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 6 + printf("test BN381_1 %d\n", MCLBN_FP_UNIT_SIZE); + ret = mclBn_init(MCL_BN381_1, MCLBN_COMPILED_TIME_VAR); +#endif +#if MCLBN_FP_UNIT_SIZE == 8 + printf("test BN462 %d\n", MCLBN_FP_UNIT_SIZE); + ret = mclBn_init(MCL_BN462, MCLBN_COMPILED_TIME_VAR); +#endif + CYBOZU_TEST_EQUAL(ret, 0); + if (ret != 0) exit(1); +} + +CYBOZU_TEST_AUTO(Fr) +{ + mclBnFr x, y; + memset(&x, 0xff, sizeof(x)); + CYBOZU_TEST_ASSERT(!mclBnFr_isValid(&x)); + CYBOZU_TEST_ASSERT(!mclBnFr_isZero(&x)); + + mclBnFr_clear(&x); + CYBOZU_TEST_ASSERT(mclBnFr_isZero(&x)); + + mclBnFr_setInt(&x, 1); + CYBOZU_TEST_ASSERT(mclBnFr_isOne(&x)); + + mclBnFr_setInt(&y, -1); + CYBOZU_TEST_ASSERT(!mclBnFr_isEqual(&x, &y)); + + y = x; + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); + + mclBnFr_setHashOf(&x, "", 0); + mclBnFr_setHashOf(&y, "abc", 3); + CYBOZU_TEST_ASSERT(!mclBnFr_isEqual(&x, &y)); + mclBnFr_setHashOf(&x, "abc", 3); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); + + char buf[1024]; + mclBnFr_setInt(&x, 12345678); + size_t size; + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 8); + CYBOZU_TEST_EQUAL(buf, "12345678"); + + mclBnFr_setInt(&x, -7654321); + mclBnFr_neg(&x, &x); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 7); + CYBOZU_TEST_EQUAL(buf, "7654321"); + + mclBnFr_setInt(&y, 123 - 7654321); + mclBnFr_add(&x, &x, &y); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 3); + CYBOZU_TEST_EQUAL(buf, "123"); + + mclBnFr_setInt(&y, 100); + mclBnFr_sub(&x, &x, &y); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 2); + CYBOZU_TEST_EQUAL(buf, "23"); + + mclBnFr_mul(&x, &x, &y); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 4); + CYBOZU_TEST_EQUAL(buf, "2300"); + + mclBnFr_div(&x, &x, &y); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 2); + CYBOZU_TEST_EQUAL(buf, "23"); + + mclBnFr_mul(&x, &y, &y); + mclBnFr_sqr(&y, &y); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); + + const char *s = "12345678901234567"; + CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&x, s, strlen(s), 10)); + s = "20000000000000000"; + CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&y, s, strlen(s), 10)); + mclBnFr_add(&x, &x, &y); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_EQUAL(size, 17); + CYBOZU_TEST_EQUAL(buf, "32345678901234567"); + + mclBnFr_setInt(&x, 1); + mclBnFr_neg(&x, &x); + size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&y, buf, size, 10)); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); + + for (int i = 0; i < 10; i++) { + mclBnFr_setByCSPRNG(&x); + mclBnFr_getStr(buf, sizeof(buf), &x, 16); + printf("%s\n", buf); + } +} + +void G1test() +{ + mclBnG1 x, y, z; + memset(&x, 0x1, sizeof(x)); + /* + assert() of carry operation fails if use 0xff, so use 0x1 + */ + CYBOZU_TEST_ASSERT(!mclBnG1_isValid(&x)); + mclBnG1_clear(&x); + CYBOZU_TEST_ASSERT(mclBnG1_isValid(&x)); + CYBOZU_TEST_ASSERT(mclBnG1_isZero(&x)); + + CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&y, "abc", 3)); + CYBOZU_TEST_ASSERT(mclBnG1_isValidOrder(&y)); + + char buf[1024]; + size_t size; + size = mclBnG1_getStr(buf, sizeof(buf), &y, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_ASSERT(!mclBnG1_setStr(&x, buf, strlen(buf), 10)); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &y)); + + mclBnG1_neg(&x, &x); + mclBnG1_add(&x, &x, &y); + CYBOZU_TEST_ASSERT(mclBnG1_isZero(&x)); + + mclBnG1_dbl(&x, &y); // x = 2y + mclBnG1_add(&z, &y, &y); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); + mclBnG1_add(&z, &z, &y); // z = 3y + mclBnFr n; + mclBnFr_setInt(&n, 3); + mclBnG1_mul(&x, &y, &n); // x = 3y + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); + mclBnG1_sub(&x, &x, &y); // x = 2y + + mclBnFr_setInt(&n, 2); + mclBnG1_mul(&z, &y, &n); // z = 2y + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); + mclBnG1_normalize(&y, &z); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&y, &z)); +} + +CYBOZU_TEST_AUTO(G1) +{ + G1test(); +} + +CYBOZU_TEST_AUTO(G2) +{ + mclBnG2 x, y, z; + /* + assert() of carry operation fails if use 0xff, so use 0x1 + */ + memset(&x, 0x1, sizeof(x)); + CYBOZU_TEST_ASSERT(!mclBnG2_isValid(&x)); + mclBnG2_clear(&x); + CYBOZU_TEST_ASSERT(mclBnG2_isValid(&x)); + CYBOZU_TEST_ASSERT(mclBnG2_isZero(&x)); + + CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&x, "abc", 3)); + CYBOZU_TEST_ASSERT(mclBnG2_isValidOrder(&x)); + + char buf[1024]; + size_t size; + size = mclBnG2_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_ASSERT(!mclBnG2_setStr(&y, buf, strlen(buf), 10)); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &y)); + + mclBnG2_neg(&x, &x); + mclBnG2_add(&x, &x, &y); + CYBOZU_TEST_ASSERT(mclBnG2_isZero(&x)); + + mclBnG2_dbl(&x, &y); // x = 2y + mclBnG2_add(&z, &y, &y); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); + mclBnG2_add(&z, &z, &y); // z = 3y + mclBnFr n; + mclBnFr_setInt(&n, 3); + mclBnG2_mul(&x, &y, &n); // x = 3y + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); + mclBnG2_sub(&x, &x, &y); // x = 2y + + mclBnFr_setInt(&n, 2); + mclBnG2_mul(&z, &y, &n); // z = 2y + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); + mclBnG2_normalize(&y, &z); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&y, &z)); +} + +CYBOZU_TEST_AUTO(GT) +{ + mclBnGT x, y, z; + memset(&x, 1, sizeof(x)); + CYBOZU_TEST_ASSERT(!mclBnGT_isZero(&x)); + + mclBnGT_clear(&x); + CYBOZU_TEST_ASSERT(mclBnGT_isZero(&x)); + + mclBnGT_setInt(&x, 1); + CYBOZU_TEST_ASSERT(mclBnGT_isOne(&x)); + char buf[2048]; + size_t size; + size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + const char *s = "1 0 0 0 0 0 0 0 0 0 0 0"; + CYBOZU_TEST_EQUAL(buf, s); + + s = "1 2 3 4 5 6 7 8 9 10 11 12"; + CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&x,s , strlen(s), 10)); + size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_EQUAL(buf, s); + + y = x; + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &y)); + + s = "-1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12"; + CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&z, s, strlen(s), 10)); + size = mclBnGT_getStr(buf, sizeof(buf), &z, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&y, buf, size, 10)); + + mclBnGT_neg(&z, &y); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &z)); + + mclBnGT_add(&y, &x, &y); + CYBOZU_TEST_ASSERT(mclBnGT_isZero(&y)); + + s = "2 0 0 0 0 0 0 0 0 0 0 0"; + CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&y, s, strlen(s), 10)); + mclBnGT_mul(&z, &x, &y); + size = mclBnGT_getStr(buf, sizeof(buf), &z, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_EQUAL(buf, "2 4 6 8 10 12 14 16 18 20 22 24"); + + mclBnGT_div(&z, &z, &y); + size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); + CYBOZU_TEST_ASSERT(size > 0); + CYBOZU_TEST_EQUAL(size, strlen(buf)); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &z)); + + /* + can't use mclBnGT_pow because x is not in GT + */ + mclBnFr n; + mclBnFr_setInt(&n, 3); + mclBnGT_powGeneric(&z, &x, &n); + mclBnGT_mul(&y, &x, &x); + mclBnGT_mul(&y, &y, &x); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&y, &z)); + + mclBnGT_mul(&x, &y, &y); + mclBnGT_sqr(&y, &y); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &y)); +} + +CYBOZU_TEST_AUTO(pairing) +{ + mclBnFr a, b, ab; + mclBnFr_setInt(&a, 123); + mclBnFr_setInt(&b, 456); + mclBnFr_mul(&ab, &a, &b); + mclBnG1 P, aP; + mclBnG2 Q, bQ; + mclBnGT e, e1, e2; + + CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P, "1", 1)); + CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q, "1", 1)); + + mclBnG1_mul(&aP, &P, &a); + mclBnG2_mul(&bQ, &Q, &b); + + mclBn_pairing(&e, &P, &Q); + mclBnGT_pow(&e1, &e, &a); + mclBn_pairing(&e2, &aP, &Q); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); + + mclBnGT_pow(&e1, &e, &b); + mclBn_pairing(&e2, &P, &bQ); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); + + mclBnFr n; + mclBnFr_setInt(&n, 3); + mclBnGT_pow(&e1, &e, &n); + mclBnGT_mul(&e2, &e, &e); + mclBnGT_mul(&e2, &e2, &e); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); +} + +CYBOZU_TEST_AUTO(precomputed) +{ + mclBnG1 P1, P2; + mclBnG2 Q1, Q2; + CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P1, "1", 1)); + CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P2, "123", 3)); + CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q1, "1", 1)); + CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q2, "2", 1)); + + const int size = mclBn_getUint64NumToPrecompute(); + std::vector Q1buf, Q2buf; + Q1buf.resize(size); + Q2buf.resize(size); + mclBn_precomputeG2(Q1buf.data(), &Q1); + mclBn_precomputeG2(Q2buf.data(), &Q2); + + mclBnGT e1, e2, f1, f2, f3, f4; + mclBn_pairing(&e1, &P1, &Q1); + mclBn_precomputedMillerLoop(&f1, &P1, Q1buf.data()); + mclBn_finalExp(&f1, &f1); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &f1)); + + mclBn_pairing(&e2, &P2, &Q2); + mclBn_precomputedMillerLoop(&f2, &P2, Q2buf.data()); + mclBn_finalExp(&f2, &f2); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e2, &f2)); + + mclBn_precomputedMillerLoop2(&f3, &P1, Q1buf.data(), &P2, Q2buf.data()); + mclBn_precomputedMillerLoop2mixed(&f4, &P1, &Q1, &P2, Q2buf.data()); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&f3, &f4)); + mclBn_finalExp(&f3, &f3); + + mclBnGT_mul(&e1, &e1, &e2); + CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &f3)); +} + +CYBOZU_TEST_AUTO(serialize) +{ + const size_t FrSize = mclBn_getFrByteSize(); + const size_t G1Size = mclBn_getG1ByteSize(); + mclBnFr x1, x2; + mclBnG1 P1, P2; + mclBnG2 Q1, Q2; + char buf[1024]; + size_t n; + size_t expectSize; + size_t ret; + // Fr + expectSize = FrSize; + mclBnFr_setInt(&x1, -1); + n = mclBnFr_serialize(buf, sizeof(buf), &x1); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnFr_deserialize(&x2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); + + ret = mclBnFr_deserialize(&x2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&x2, 0, sizeof(x2)); + ret = mclBnFr_deserialize(&x2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); + + n = mclBnFr_serialize(buf, expectSize, &x1); + CYBOZU_TEST_EQUAL(n, expectSize); + + // G1 + expectSize = G1Size; + mclBnG1_hashAndMapTo(&P1, "1", 1); + n = mclBnG1_serialize(buf, sizeof(buf), &P1); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnG1_deserialize(&P2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); + + ret = mclBnG1_deserialize(&P2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&P2, 0, sizeof(P2)); + ret = mclBnG1_deserialize(&P2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); + + n = mclBnG1_serialize(buf, expectSize, &P1); + CYBOZU_TEST_EQUAL(n, expectSize); + + // G2 + expectSize = G1Size * 2; + mclBnG2_hashAndMapTo(&Q1, "1", 1); + n = mclBnG2_serialize(buf, sizeof(buf), &Q1); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnG2_deserialize(&Q2, buf, n); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); + + ret = mclBnG2_deserialize(&Q2, buf, n - 1); + CYBOZU_TEST_EQUAL(ret, 0); + + memset(&Q2, 0, sizeof(Q2)); + ret = mclBnG2_deserialize(&Q2, buf, n + 1); + CYBOZU_TEST_EQUAL(ret, n); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); + + n = mclBnG2_serialize(buf, expectSize, &Q1); + CYBOZU_TEST_EQUAL(n, expectSize); +} + +CYBOZU_TEST_AUTO(serializeToHexStr) +{ + const size_t FrSize = mclBn_getFrByteSize(); + const size_t G1Size = mclBn_getG1ByteSize(); + mclBnFr x1, x2; + mclBnG1 P1, P2; + mclBnG2 Q1, Q2; + char buf[1024]; + size_t n; + size_t expectSize; + size_t ret; + // Fr + expectSize = FrSize * 2; // hex string + mclBnFr_setInt(&x1, -1); + n = mclBnFr_getStr(buf, sizeof(buf), &x1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnFr_setStr(&x2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); + + ret = mclBnFr_setStr(&x2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_ASSERT(ret != 0); + + memset(&x2, 0, sizeof(x2)); + ret = mclBnFr_setStr(&x2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); + + n = mclBnFr_getStr(buf, expectSize, &x1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); + + // G1 + expectSize = G1Size * 2; // hex string + mclBnG1_hashAndMapTo(&P1, "1", 1); + n = mclBnG1_getStr(buf, sizeof(buf), &P1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnG1_setStr(&P2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); + + ret = mclBnG1_setStr(&P2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_ASSERT(ret != 0); + + memset(&P2, 0, sizeof(P2)); + ret = mclBnG1_setStr(&P2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); + + n = mclBnG1_getStr(buf, expectSize, &P1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); + + // G2 + expectSize = G1Size * 2 * 2; // hex string + mclBnG2_hashAndMapTo(&Q1, "1", 1); + n = mclBnG2_getStr(buf, sizeof(buf), &Q1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); + + ret = mclBnG2_setStr(&Q2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); + + ret = mclBnG2_setStr(&Q2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_ASSERT(ret != 0); + + memset(&Q2, 0, sizeof(Q2)); + ret = mclBnG2_setStr(&Q2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(ret, 0); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); + + n = mclBnG2_getStr(buf, expectSize, &Q1, MCLBN_IO_SERIALIZE_HEX_STR); + CYBOZU_TEST_EQUAL(n, expectSize); +} + +#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE >= 6 +CYBOZU_TEST_AUTO(badG2) +{ + int ret; + ret = mclBn_init(MCL_BN381_1, MCLBN_COMPILED_TIME_VAR); + CYBOZU_TEST_EQUAL(ret, 0); + const char *s = "1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50"; + mclBnG2 Q; + ret = mclBnG2_setStr(&Q, s, strlen(s), 16); + CYBOZU_TEST_ASSERT(ret != 0); +} +#endif + +struct Sequential { + uint32_t pos; + Sequential() : pos(0) {} + static uint32_t read(void *self, void *buf, uint32_t bufSize) + { + Sequential *seq = reinterpret_cast(self); + uint8_t *p = reinterpret_cast(buf); + for (uint32_t i = 0; i < bufSize; i++) { + p[i] = uint8_t(seq->pos + i) & 0x1f; // mask is to make valid Fp + } + seq->pos += bufSize; + return bufSize; + } +}; + +CYBOZU_TEST_AUTO(setRandFunc) +{ + Sequential seq; + for (int j = 0; j < 3; j++) { + puts(j == 1 ? "sequential rand" : "true rand"); + for (int i = 0; i < 5; i++) { + mclBnFr x; + int ret; + char buf[1024]; + ret = mclBnFr_setByCSPRNG(&x); + CYBOZU_TEST_EQUAL(ret, 0); + ret = mclBnFr_getStr(buf, sizeof(buf), &x, 16); + CYBOZU_TEST_ASSERT(ret > 0); + printf("%d %s\n", i, buf); + } + if (j == 0) { + mclBn_setRandFunc(&seq, Sequential::read); + } else { + mclBn_setRandFunc(0, 0); + } + } +} + +CYBOZU_TEST_AUTO(Fp) +{ + mclBnFp x1, x2; + char buf[1024]; + int ret = mclBnFp_setHashOf(&x1, "abc", 3); + CYBOZU_TEST_ASSERT(ret == 0); + mclSize n = mclBnFp_serialize(buf, sizeof(buf), &x1); + CYBOZU_TEST_ASSERT(n > 0); + n = mclBnFp_deserialize(&x2, buf, n); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_ASSERT(mclBnFp_isEqual(&x1, &x2)); + for (size_t i = 0; i < n; i++) { + buf[i] = char(i); + } + ret = mclBnFp_setLittleEndian(&x1, buf, n); + CYBOZU_TEST_ASSERT(ret == 0); + memset(buf, 0, sizeof(buf)); + n = mclBnFp_serialize(buf, sizeof(buf), &x1); + CYBOZU_TEST_ASSERT(n > 0); + for (size_t i = 0; i < n - 1; i++) { + CYBOZU_TEST_EQUAL(buf[i], char(i)); + } + mclBnFp_clear(&x1); + memset(&x2, 0, sizeof(x2)); + CYBOZU_TEST_ASSERT(mclBnFp_isEqual(&x1, &x2)); +} + +CYBOZU_TEST_AUTO(mod) +{ + { + // Fp + char buf[1024]; + mclBn_getFieldOrder(buf, sizeof(buf)); + mpz_class p(buf); + mpz_class x = mpz_class(1) << (mclBn_getFpByteSize() * 2); + mclBnFp y; + int ret = mclBnFp_setLittleEndianMod(&y, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size * sizeof(void*)); + CYBOZU_TEST_EQUAL(ret, 0); + mclBnFp_getStr(buf, sizeof(buf), &y, 10); + CYBOZU_TEST_EQUAL(mpz_class(buf), x % p); + } + { + // Fr + char buf[1024]; + mclBn_getCurveOrder(buf, sizeof(buf)); + mpz_class p(buf); + mpz_class x = mpz_class(1) << (mclBn_getFrByteSize() * 2); + mclBnFr y; + int ret = mclBnFr_setLittleEndianMod(&y, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size * sizeof(void*)); + CYBOZU_TEST_EQUAL(ret, 0); + mclBnFr_getStr(buf, sizeof(buf), &y, 10); + CYBOZU_TEST_EQUAL(mpz_class(buf), x % p); + } +} + +CYBOZU_TEST_AUTO(Fp2) +{ + mclBnFp2 x1, x2; + char buf[1024]; + int ret = mclBnFp_setHashOf(&x1.d[0], "abc", 3); + CYBOZU_TEST_ASSERT(ret == 0); + ret = mclBnFp_setHashOf(&x1.d[1], "xyz", 3); + CYBOZU_TEST_ASSERT(ret == 0); + mclSize n = mclBnFp2_serialize(buf, sizeof(buf), &x1); + CYBOZU_TEST_ASSERT(n > 0); + n = mclBnFp2_deserialize(&x2, buf, n); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_ASSERT(mclBnFp2_isEqual(&x1, &x2)); + mclBnFp2_clear(&x1); + memset(&x2, 0, sizeof(x2)); + CYBOZU_TEST_ASSERT(mclBnFp2_isEqual(&x1, &x2)); +} + +CYBOZU_TEST_AUTO(mapToG1) +{ + mclBnFp x; + mclBnG1 P1, P2; + mclBnFp_setHashOf(&x, "abc", 3); + int ret = mclBnFp_mapToG1(&P1, &x); + CYBOZU_TEST_ASSERT(ret == 0); + mclBnG1_hashAndMapTo(&P2, "abc", 3); + CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); +} + +CYBOZU_TEST_AUTO(mapToG2) +{ + mclBnFp2 x; + mclBnG2 P1, P2; + mclBnFp_setHashOf(&x.d[0], "abc", 3); + mclBnFp_clear(&x.d[1]); + int ret = mclBnFp2_mapToG2(&P1, &x); + CYBOZU_TEST_ASSERT(ret == 0); + mclBnG2_hashAndMapTo(&P2, "abc", 3); + CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&P1, &P2)); +} + +void G1onlyTest(int curve) +{ + printf("curve=%d\n", curve); + int ret; + ret = mclBn_init(curve, MCLBN_COMPILED_TIME_VAR); + CYBOZU_TEST_EQUAL(ret, 0); + mclBnG1 P0; + ret = mclBnG1_getBasePoint(&P0); + CYBOZU_TEST_EQUAL(ret, 0); + char buf[256]; + ret = mclBnG1_getStr(buf, sizeof(buf), &P0, 16); + CYBOZU_TEST_ASSERT(ret > 0); + printf("basePoint=%s\n", buf); + G1test(); +} + +CYBOZU_TEST_AUTO(G1only) +{ + const int tbl[] = { + MCL_SECP192K1, + MCL_NIST_P192, + MCL_SECP224K1, + MCL_NIST_P224, // hashAndMapTo is error + MCL_SECP256K1, + MCL_NIST_P256, +#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 6 + MCL_SECP384R1, +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + G1onlyTest(tbl[i]); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/bn_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/bn_test.cpp new file mode 100644 index 000000000..071ec706c --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/bn_test.cpp @@ -0,0 +1,408 @@ +#define PUT(x) std::cout << #x "=" << x << std::endl; +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +cybozu::CpuClock clk; +#include +#include +#include +#include + +#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) + #define MCL_AVOID_EXCEPTION_TEST +#endif + +typedef mcl::bn::local::Compress Compress; +using namespace mcl::bn; + +mcl::fp::Mode g_mode; + +const struct TestSet { + mcl::CurveParam cp; + const char *name; + struct G2 { + const char *aa; + const char *ab; + const char *ba; + const char *bb; + } g2; + struct G1 { + int a; + int b; + } g1; + const char *e; +} g_testSetTbl[] = { + { + mcl::BN254, + "BN254", + { + "12723517038133731887338407189719511622662176727675373276651903807414909099441", + "4168783608814932154536427934509895782246573715297911553964171371032945126671", + "13891744915211034074451795021214165905772212241412891944830863846330766296736", + "7937318970632701341203597196594272556916396164729705624521405069090520231616", + }, + { + -1, 1 + }, + "8118772341496577043438385328606447626730215814727396173233264007541007797690 " + "6742571767760762192519140673058087976840103832045324348366170860928670686713 " + "9727912590495366720378364920530546614235713408261568635512172059018197267630 " + "10180700148605185348549931182990442059136187839792856455707820203302941578832 " + "5054507763444412917986776641611331046146804026682679569910978464879371792565 " + "6917005519826733659554708445125877487590687705432214234949972860245110398023 " + "10448556317747236258066222816126375978842661908560317699736569642190930635294 " + "1516980358051268127904344653343215863076753141133525905743113718749531324025 " + "9794836735385959178744195210089532061310424844916928682580569566332541022353 " + "9375574834170998962484906689780052970915033987453510324648351251071086068423 " + "710778048594563655498360873129325895716179849942646859397874562033386335205 " + "10688745994254573144943003027511098295097561129365638275727908595677791826005" + }, + { + mcl::BN_SNARK1, + "BN_SNARK1", + { + "15267802884793550383558706039165621050290089775961208824303765753922461897946", + "9034493566019742339402378670461897774509967669562610788113215988055021632533", + "644888581738283025171396578091639672120333224302184904896215738366765861164", + "20532875081203448695448744255224543661959516361327385779878476709582931298750", + }, + { + 1, 2 + }, + "15163392945550945552839911839294582974434771053565812675833291179413834896953 " + "20389211011850518572149982239826345669421868561029856883955740401696801984953 " + "17766795911013516700216709333389761327222334145011922123798810516425387779347 " + "6064163297423711021549973931984064750876944939004405231004441199168710504090 " + "296093106139306574860102680862436174771023602986903675151017278048818344347 " + "1573596951222456889652521728261836933382094474023551133585236991207205981715 " + "3511871642997169996730611220058787939468653751355351269812083879279936651479 " + "17848534184080172844395614793152774197360421729995967636680357250333093768504 " + "3273860031361637906105800996652640969711942192883181518057117446820546419132 " + "7212721189663231589365009629980400132745687533815732336503876102977912682966 " + "18569236611881855981733896549089319395087993987737891870319625215675547032585 " + "10088832670068482545658647976676953228519838542958787800193793260459700064172 " + }, +}; + +CYBOZU_TEST_AUTO(size) +{ + CYBOZU_TEST_EQUAL(sizeof(Fp), 32u); + CYBOZU_TEST_EQUAL(sizeof(Fp2), sizeof(Fp) * 2); + CYBOZU_TEST_EQUAL(sizeof(Fp6), sizeof(Fp) * 6); + CYBOZU_TEST_EQUAL(sizeof(Fp12), sizeof(Fp) * 12); + CYBOZU_TEST_EQUAL(sizeof(G1), sizeof(Fp) * 3); + CYBOZU_TEST_EQUAL(sizeof(G2), sizeof(Fp2) * 3); +} + +void testSetStr(const G2& Q0) +{ + G2::setCompressedExpression(); + G2 Q; + Q.clear(); + for (int i = 0; i < 10; i++) { + G2 R; + R.setStr(Q.getStr()); + CYBOZU_TEST_EQUAL(Q, R); + G2::add(Q, Q, Q0); + } +} + +void testMapToG1() +{ + G1 g; + for (int i = 1; i < 10; i++) { + mapToG1(g, i); + CYBOZU_TEST_ASSERT(!g.isZero()); + G1 gr; + G1::mulGeneric(gr, g, BN::param.r); + CYBOZU_TEST_ASSERT(gr.isZero()); + } +#ifndef MCL_AVOID_EXCEPTION_TEST + if (BN::param.cp.b == 2) { + Fp c1; + bool b = Fp::squareRoot(c1, -3); + CYBOZU_TEST_ASSERT(b); + CYBOZU_TEST_EXCEPTION(mapToG1(g, 0), cybozu::Exception); + CYBOZU_TEST_EXCEPTION(mapToG1(g, c1), cybozu::Exception); + CYBOZU_TEST_EXCEPTION(mapToG1(g, -c1), cybozu::Exception); + } +#endif +} + +void testMapToG2() +{ + G2 g; + for (int i = 1; i < 10; i++) { + mapToG2(g, i); + CYBOZU_TEST_ASSERT(!g.isZero()); + G2 gr; + G2::mulGeneric(gr, g, BN::param.r); + CYBOZU_TEST_ASSERT(gr.isZero()); + } +#ifndef MCL_AVOID_EXCEPTION_TEST + if (BN::param.cp.b == 2) { + CYBOZU_TEST_EXCEPTION(mapToG2(g, 0), cybozu::Exception); + } +#endif + Fp x; + x.setHashOf("abc"); + mapToG2(g, Fp2(x, 0)); + CYBOZU_TEST_ASSERT(g.isValid()); +} + +void testCyclotomic() +{ + Fp12 a; + for (int i = 0; i < 12; ++i) { + a.getFp0()[i] = i * i; + } + local::mapToCyclotomic(a, a); + Fp12 d; + Compress b(d, a); + a *= a; + Fp12 d2; + Compress c(d2, b); + Compress::square_n(c, 1); + c.decompress(); + CYBOZU_TEST_EQUAL(a, d2); + Compress::square_n(b, 1); + b.decompress(); + CYBOZU_TEST_EQUAL(a, d); +} + +void testCompress(const G1& P, const G2& Q) +{ + if (BN::param.cp.curveType != MCL_BN254) return; + Fp12 a; + pairing(a, P, Q); + local::mapToCyclotomic(a, a); + Fp12 b; + Compress::fixed_power(b, a); + Fp12 c; + Fp12::pow(c, a, BN::param.abs_z); + CYBOZU_TEST_EQUAL(b, c); +} + +void testPrecomputed(const G1& P, const G2& Q) +{ + Fp12 e1, e2; + pairing(e1, P, Q); + std::vector Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e2, P, Qcoeff); + finalExp(e2, e2); + CYBOZU_TEST_EQUAL(e1, e2); +} + +void testFp12pow(const G1& P, const G2& Q) +{ + Fp12 e, e1, e2; + pairing(e, P, Q); + cybozu::XorShift rg; + for (int i = -10; i < 10; i++) { + mpz_class xm = i; + Fp12::pow(e1, e, xm); + Fp12::powGeneric(e2, e, xm); + CYBOZU_TEST_EQUAL(e1, e2); + } + for (int i = 0; i < 10; i++) { + Fr x; + x.setRand(rg); + mpz_class xm = x.getMpz(); + Fp12::pow(e1, e, xm); + BN::param.glv2.pow(e2, e, xm); + CYBOZU_TEST_EQUAL(e1, e2); + } +} + +void testMillerLoop2(const G1& P1, const G2& Q1) +{ + Fp12 e1, e2, e3; + mpz_class c1("12342342423442"); + mpz_class c2("329428049820348209482"); + G2 Q2; + G1 P2; + G2::mul(Q2, Q1, c1); + G1::mul(P2, P1, c2); + pairing(e1, P1, Q1); + pairing(e2, P2, Q2); + e1 *= e2; + + std::vector Q1coeff, Q2coeff; + precomputeG2(Q1coeff, Q1); + precomputeG2(Q2coeff, Q2); + precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); + precomputedMillerLoop2mixed(e3, P1, Q1, P2, Q2coeff); + CYBOZU_TEST_EQUAL(e2, e3); + finalExp(e2, e2); + CYBOZU_TEST_EQUAL(e1, e2); + + // special value + G2 Z; + Z.clear(); + Q2 += Q2; + precomputeG2(Q1coeff, Z); + precomputeG2(Q2coeff, Q2); + precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); + precomputedMillerLoop2mixed(e3, P1, Z, P2, Q2coeff); + finalExp(e2, e2); + finalExp(e3, e3); + CYBOZU_TEST_EQUAL(e2, e3); +} + +void testPairing(const G1& P, const G2& Q, const char *eStr) +{ + Fp12 e1; + pairing(e1, P, Q); + Fp12 e2; + { + std::stringstream ss(eStr); + ss >> e2; + } + CYBOZU_TEST_EQUAL(e1, e2); + + Fp12 e = e1, ea; + G1 Pa; + G2 Qa; +#if defined(__EMSCRIPTEN__) || MCL_SIZEOF_UNIT == 4 + const int count = 100; +#else + const int count = 1000; +#endif + mpz_class a; + cybozu::XorShift rg; + for (int i = 0; i < count; i++) { + Fr r; + r.setRand(rg); + a = r.getMpz(); + Fp12::pow(ea, e, a); + G1::mul(Pa, P, a); + G2::mul(Qa, Q, a); + G1 T; + G1::mulCT(T, P, a); + CYBOZU_TEST_EQUAL(Pa, T); + pairing(e1, Pa, Q); + pairing(e2, P, Qa); + CYBOZU_TEST_EQUAL(ea, e1); + CYBOZU_TEST_EQUAL(ea, e2); + } +} + +void testTrivial(const G1& P, const G2& Q) +{ + G1 Z1; Z1.clear(); + G2 Z2; Z2.clear(); + Fp12 e; + pairing(e, Z1, Q); + CYBOZU_TEST_EQUAL(e, 1); + pairing(e, P, Z2); + CYBOZU_TEST_EQUAL(e, 1); + pairing(e, Z1, Z2); + CYBOZU_TEST_EQUAL(e, 1); + + std::vector Qcoeff; + precomputeG2(Qcoeff, Z2); + precomputedMillerLoop(e, P, Qcoeff); + finalExp(e, e); + CYBOZU_TEST_EQUAL(e, 1); + + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e, Z1, Qcoeff); + finalExp(e, e); + CYBOZU_TEST_EQUAL(e, 1); +} + +void testIoAll(const G1& P, const G2& Q) +{ + const int FpTbl[] = { 0, 2, 2|mcl::IoPrefix, 10, 16, 16|mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; + const int EcTbl[] = { mcl::IoEcAffine, mcl::IoEcProj, mcl::IoEcCompY, mcl::IoSerialize }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(FpTbl); i++) { + for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(EcTbl); j++) { + G1 P2 = P, P3; + G2 Q2 = Q, Q3; + int ioMode = FpTbl[i] | EcTbl[j]; + std::string s = P2.getStr(ioMode); + P3.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(P2, P3); + s = Q2.getStr(ioMode); + Q3.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(Q2, Q3); + s = P.x.getStr(ioMode); + Fp Px; + Px.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(P.x, Px); + s = Q.x.getStr(ioMode); + Fp2 Qx; + Qx.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(Q.x, Qx); + } + } +} + +void testIo(const G1& P, const G2& Q) +{ + testIoAll(P, Q); + G1 Z1; + G2 Z2; + Z1.clear(); + Z2.clear(); + testIoAll(Z1, Z2); +} + +#include "bench.hpp" + +CYBOZU_TEST_AUTO(naive) +{ + printf("mcl version=%03x\n", mcl::version); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(g_testSetTbl); i++) { + const TestSet& ts = g_testSetTbl[i]; + printf("i=%d curve=%s\n", int(i), ts.name); + initPairing(ts.cp, g_mode); + const G1 P(ts.g1.a, ts.g1.b); + const G2 Q(Fp2(ts.g2.aa, ts.g2.ab), Fp2(ts.g2.ba, ts.g2.bb)); +#ifdef ONLY_BENCH + { + Fp12 e; + for (int i = 0; i < 10000; i++) { clk.begin(); pairing(e, P, Q); clk.end(); } + } + clk.put(); + return; +#endif + testFp12pow(P, Q); + testIo(P, Q); + testTrivial(P, Q); + testSetStr(Q); + testMapToG1(); + testMapToG2(); + testCyclotomic(); + testCompress(P, Q); + testPairing(P, Q, ts.e); + testPrecomputed(P, Q); + testMillerLoop2(P, Q); + testBench(P, Q); + benchAddDblG1(); + benchAddDblG2(); + } + int count = (int)clk.getCount(); + if (count) { + printf("count=%d ", count); + clk.put(); + } +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + std::string mode; + opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + g_mode = mcl::fp::StrToMode(mode); + printf("JIT %d\n", mcl::fp::isEnableJIT()); + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/conversion_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/conversion_test.cpp new file mode 100644 index 000000000..ec11fe900 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/conversion_test.cpp @@ -0,0 +1,96 @@ +#include +#include + +CYBOZU_TEST_AUTO(arrayToDec) +{ + const struct { + uint32_t x[5]; + size_t xn; + const char *s; + } tbl[] = { + { { 0, 0, 0, 0, 0 }, 1, "0" }, + { { 9, 0, 0, 0, 0 }, 1, "9" }, + { { 123456789, 0, 0, 0, 0 }, 1, "123456789" }, + { { 2147483647, 0, 0, 0, 0 }, 1, "2147483647" }, + { { 0xffffffff, 0, 0, 0, 0 }, 1, "4294967295" }, + { { 0x540be400, 0x2, 0, 0, 0 }, 2, "10000000000" }, + { { 0xffffffff, 0xffffffff, 0, 0, 0 }, 2, "18446744073709551615" }, + { { 0x89e80001, 0x8ac72304, 0, 0, 0 }, 2, "10000000000000000001" }, + { { 0xc582ca00, 0x8ac72304, 0, 0, 0 }, 2, "10000000001000000000" }, + { { 0, 0, 1, 0, 0 }, 3, "18446744073709551616" }, + { { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0 }, 4, "340282366920938463463374607431768211455" }, + { { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }, 5, "1461501637330902918203684832716283019655932542975" }, + { { 0x3b9aca00, 0x5e3f3700, 0x1cbfa532, 0x04f6433a, 0xd83ff078 }, 5, "1234567901234560000000000000000000000001000000000" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const size_t bufSize = 128; + char buf[bufSize] = {}; + const char *str = tbl[i].s; + const uint32_t *x = tbl[i].x; + const size_t strLen = strlen(str); + size_t n = mcl::fp::arrayToDec(buf, bufSize, x, tbl[i].xn); + CYBOZU_TEST_EQUAL(n, strLen); + CYBOZU_TEST_EQUAL_ARRAY(buf + bufSize - n, str, n); + const size_t maxN = 32; + uint32_t xx[maxN] = {}; + size_t xn = mcl::fp::decToArray(xx, maxN, str, strLen); + CYBOZU_TEST_EQUAL(xn, tbl[i].xn); + CYBOZU_TEST_EQUAL_ARRAY(xx, x, xn); + } + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const size_t bufSize = 128; + char buf[bufSize] = {}; + const char *str = tbl[i].s; + const size_t strLen = strlen(str); + uint64_t x[8] = {}; + size_t xn = (tbl[i].xn + 1) / 2; + memcpy(x, tbl[i].x, tbl[i].xn * sizeof(uint32_t)); + size_t n = mcl::fp::arrayToDec(buf, bufSize, x, xn); + CYBOZU_TEST_EQUAL(n, strLen); + CYBOZU_TEST_EQUAL_ARRAY(buf + bufSize - n, str, n); + const size_t maxN = 32; + uint64_t xx[maxN] = {}; + size_t xxn = mcl::fp::decToArray(xx, maxN, str, strLen); + CYBOZU_TEST_EQUAL(xxn, xn); + CYBOZU_TEST_EQUAL_ARRAY(xx, x, xn); + } +} + +CYBOZU_TEST_AUTO(writeHexStr) +{ + const char *hex1tbl = "0123456789abcdef"; + const char *hex2tbl = "0123456789ABCDEF"; + for (size_t i = 0; i < 16; i++) { + uint8_t v = 0xff; + CYBOZU_TEST_ASSERT(mcl::fp::local::hexCharToUint8(&v, hex1tbl[i])); + CYBOZU_TEST_EQUAL(v, i); + CYBOZU_TEST_ASSERT(mcl::fp::local::hexCharToUint8(&v, hex2tbl[i])); + CYBOZU_TEST_EQUAL(v, i); + } + const struct Tbl { + const char *bin; + size_t n; + const char *hex; + } tbl[] = { + { "", 0, "" }, + { "\x12\x34\xab", 3, "1234ab" }, + { "\xff\xfc\x00\x12", 4, "fffc0012" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + char buf[32]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + const char *bin = tbl[i].bin; + const char *hex = tbl[i].hex; + size_t n = tbl[i].n; + bool b; + mcl::fp::writeHexStr(&b, os, bin, n); + CYBOZU_TEST_ASSERT(b); + CYBOZU_TEST_EQUAL(os.getPos(), n * 2); + CYBOZU_TEST_EQUAL_ARRAY(buf, hex, n * 2); + + cybozu::MemoryInputStream is(hex, n * 2); + size_t w = mcl::fp::readHexStr(buf, n, is); + CYBOZU_TEST_EQUAL(w, n); + CYBOZU_TEST_EQUAL_ARRAY(buf, bin, n); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/ec_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/ec_test.cpp new file mode 100644 index 000000000..ec49adbfe --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/ec_test.cpp @@ -0,0 +1,573 @@ +//#define MCL_EC_USE_AFFINE +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include + +#include +#include +#include +#include +#include + +typedef mcl::FpT<> Fp; +struct tagZn; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; + +CYBOZU_TEST_AUTO(sizeof) +{ + CYBOZU_TEST_EQUAL(sizeof(Fp), sizeof(mcl::fp::Unit) * Fp::maxSize); +#ifdef MCL_EC_USE_AFFINE + CYBOZU_TEST_EQUAL(sizeof(Ec), sizeof(Fp) * 2 + sizeof(mcl::fp::Unit)); +#else + CYBOZU_TEST_EQUAL(sizeof(Ec), sizeof(Fp) * 3); +#endif +} + +struct Test { + const mcl::EcParam& para; + Test(const mcl::EcParam& para, mcl::fp::Mode fpMode, mcl::ec::Mode ecMode) + : para(para) + { + printf("fpMode=%s\n", mcl::fp::ModeToStr(fpMode)); + Fp::init(para.p, fpMode); + Zn::init(para.n, fpMode); + Ec::init(para.a, para.b, ecMode); + } + void cstr() const + { + Ec O; + O.clear(); + CYBOZU_TEST_ASSERT(O.isZero()); + CYBOZU_TEST_ASSERT(O.isValid()); + Ec P; + P.clear(); + Ec::neg(P, O); + CYBOZU_TEST_EQUAL(P, O); + } + void pow2(Ec& Q, const Ec& P, int n) const + { + Q = P; + for (int i = 0; i < n; i++) { + Q += Q; + } + } + void pow2test(const Ec& P, int n) const + { + Ec Q, R; + pow2(Q, P, n); + Q -= P; // Q = (2^n - 1)P + Fp x = 1; + for (int i = 0; i < n; i++) { + x += x; + } + x -= 1; // x = 2^n - 1 + Ec::mul(R, P, x); + CYBOZU_TEST_EQUAL(Q, R); + Q = P; + Ec::mul(Q, Q, x); + CYBOZU_TEST_EQUAL(Q, R); + } + void ope() const + { + Fp x(para.gx); + Fp y(para.gy); + Zn n = 0; + CYBOZU_TEST_NO_EXCEPTION(Ec(x, y)); + CYBOZU_TEST_EXCEPTION(Ec(x, y + 1), cybozu::Exception); + Ec P(x, y), Q, R, O; + O.clear(); + CYBOZU_TEST_ASSERT(P.isNormalized()); + { + Ec::neg(Q, P); + CYBOZU_TEST_EQUAL(Q.x, P.x); + CYBOZU_TEST_EQUAL(Q.y, -P.y); + + R = P + Q; + CYBOZU_TEST_ASSERT(R.isZero()); + CYBOZU_TEST_ASSERT(R.isNormalized()); + CYBOZU_TEST_ASSERT(R.isValid()); + + R = P + O; + CYBOZU_TEST_EQUAL(R, P); + R = O + P; + CYBOZU_TEST_EQUAL(R, P); + } + + { + Ec::dbl(R, P); +#ifndef MCL_EC_USE_AFFINE + CYBOZU_TEST_ASSERT(!R.isNormalized()); +#endif + CYBOZU_TEST_ASSERT(R.isValid()); + Ec R2 = P + P; + CYBOZU_TEST_EQUAL(R, R2); + { + Ec P2 = P; + Ec::dbl(P2, P2); + CYBOZU_TEST_EQUAL(P2, R2); + } + Ec R3L = R2 + P; + Ec R3R = P + R2; + CYBOZU_TEST_EQUAL(R3L, R3R); + { + Ec RR = R2; + RR = RR + P; + CYBOZU_TEST_EQUAL(RR, R3L); + RR = R2; + RR = P + RR; + CYBOZU_TEST_EQUAL(RR, R3L); + RR = P; + RR = RR + RR; + CYBOZU_TEST_EQUAL(RR, R2); + } + Ec::mul(R, P, 2); + CYBOZU_TEST_EQUAL(R, R2); + Ec R4L = R3L + R2; + Ec R4R = R2 + R3L; + CYBOZU_TEST_EQUAL(R4L, R4R); + Ec::mul(R, P, 5); + CYBOZU_TEST_EQUAL(R, R4L); + } + { + R = P; + for (int i = 0; i < 10; i++) { + R += P; + } + Ec R2; + Ec::mul(R2, P, 11); + CYBOZU_TEST_EQUAL(R, R2); + } + Ec::mul(R, P, n - 1); + CYBOZU_TEST_EQUAL(R, -P); + R += P; // Ec::mul(R, P, n); + CYBOZU_TEST_ASSERT(R.isZero()); + { + const int tbl[] = { 1, 2, 63, 64, 65, 127, 128, 129 }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + pow2test(P, tbl[i]); + } + } + { + Ec::mul(Q, P, 0); + CYBOZU_TEST_ASSERT(Q.isZero()); + Q = P; + CYBOZU_TEST_ASSERT(!Q.isZero()); + Ec::mul(Q, Q, 0); + CYBOZU_TEST_ASSERT(Q.isZero()); + Ec::mul(Q, P, 1); + CYBOZU_TEST_EQUAL(P, Q); + } + { + Ec R2; + P += P; + Q += P; + CYBOZU_TEST_ASSERT(!P.z.isOne()); + CYBOZU_TEST_ASSERT(!Q.z.isOne()); + Ec::add(R2, P, Q); + + P.normalize(); + CYBOZU_TEST_ASSERT(P.z.isOne()); + CYBOZU_TEST_ASSERT(!Q.z.isOne()); + // affine + generic + Ec::add(R, P, Q); + CYBOZU_TEST_EQUAL(R, R2); + // generic + affine + Ec::add(R, Q, P); + CYBOZU_TEST_EQUAL(R, R2); + + Q.normalize(); + CYBOZU_TEST_ASSERT(P.z.isOne()); + CYBOZU_TEST_ASSERT(Q.z.isOne()); + // affine + affine + Ec::add(R, P, Q); + CYBOZU_TEST_EQUAL(R, R2); + + P += P; + CYBOZU_TEST_ASSERT(!P.z.isOne()); + // generic + Ec::dbl(R2, P); + + P.normalize(); + CYBOZU_TEST_ASSERT(P.z.isOne()); + // affine + Ec::dbl(R, P); + CYBOZU_TEST_EQUAL(R, R2); + } + } + + void mul() const + { + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y); + Ec Q; + Ec R; + R.clear(); + for (int i = 0; i < 100; i++) { + Ec::mul(Q, P, i); + CYBOZU_TEST_EQUAL(Q, R); + R += P; + } + } + + void neg_mul() const + { + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y); + Ec Q; + Ec R; + R.clear(); + for (int i = 0; i < 100; i++) { + Ec::mul(Q, P, -i); + CYBOZU_TEST_EQUAL(Q, R); + R -= P; + } + } + void squareRoot() const + { + Fp x(para.gx); + Fp y(para.gy); + bool odd = y.isOdd(); + Fp yy; + bool b = Ec::getYfromX(yy, x, odd); + CYBOZU_TEST_ASSERT(b); + CYBOZU_TEST_EQUAL(yy, y); + Fp::neg(y, y); + odd = y.isOdd(); + yy.clear(); + b = Ec::getYfromX(yy, x, odd); + CYBOZU_TEST_ASSERT(b); + CYBOZU_TEST_EQUAL(yy, y); + } + void mul_fp() const + { + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y); + Ec Q; + Ec R; + R.clear(); + for (int i = 0; i < 100; i++) { + Ec::mul(Q, P, Zn(i)); + CYBOZU_TEST_EQUAL(Q, R); + R += P; + } + } + void str() const + { + const Fp x(para.gx); + const Fp y(para.gy); + Ec P(x, y); + Ec Q; + // not compressed + Ec::setCompressedExpression(false); + { + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + { + Q.clear(); + CYBOZU_TEST_EQUAL(Q.getStr(), "0"); + } + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + int base = i == 0 ? 10 : 16; + bool withPrefix = j == 0; + int ioMode = base | (withPrefix ? mcl::IoPrefix : 0); + std::string expected = "1 " + x.getStr(ioMode) + " " + y.getStr(ioMode); + CYBOZU_TEST_EQUAL(P.getStr(ioMode), expected); + std::ostringstream os; + if (base == 16) { + os << std::hex; + } + if (withPrefix) { + os << std::showbase; + } + os << P; + CYBOZU_TEST_EQUAL(os.str(), expected); + } + } + { + P = -P; + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + P.clear(); + { + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + CYBOZU_TEST_EXCEPTION(P.setStr("1 3 5"), cybozu::Exception); + // compressed + Ec::setCompressedExpression(true); + P.set(x, y); + { + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + { + P = -P; + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + P.clear(); + { + std::stringstream ss; + ss << P; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + // IoSerialize, IoSerializeHexStr + const size_t adj = Ec::isMSBserialize() ? 0 : 1; + P.set(x, y); + { + std::string s = P.getStr(mcl::IoSerialize); + CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); + Q.setStr(s, mcl::IoSerialize); + CYBOZU_TEST_EQUAL(P, Q); + } + { + std::string s = P.getStr(mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); + Q.setStr(s, mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(P, Q); + } + P = -P; + { + std::string s = P.getStr(mcl::IoSerialize); + CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); + Q.setStr(s, mcl::IoSerialize); + CYBOZU_TEST_EQUAL(P, Q); + } + { + std::string s = P.getStr(mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); + Q.setStr(s, mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(P, Q); + } + P.clear(); + { + std::string s = P.getStr(mcl::IoSerialize); + CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); + CYBOZU_TEST_ASSERT(mcl::fp::isZeroArray(s.c_str(), s.size())); + Q.setStr(s, mcl::IoSerialize); + CYBOZU_TEST_EQUAL(P, Q); + } + { + std::string s = P.getStr(mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); + for (size_t i = 0; i < s.size(); i++) { + CYBOZU_TEST_EQUAL(s[i], '0'); + } + Q.setStr(s, mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(P, Q); + } + } + void ioMode() const + { + const Fp x(para.gx); + const Fp y(para.gy); + Ec P(x, y); + const mcl::IoMode tbl[] = { + mcl::IoBin, + mcl::IoDec, + mcl::IoHex, + mcl::IoArray, + mcl::IoArrayRaw, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp::setIoMode(tbl[i]); + { + std::stringstream ss; + ss << P; + Ec Q; + ss >> Q; + CYBOZU_TEST_EQUAL(P, Q); + } + { + std::stringstream ss; + Ec Q; + Q.clear(); + ss << Q; + Ec R; + ss >> R; + CYBOZU_TEST_EQUAL(Q, R); + } + } + Fp::setIoMode(mcl::IoAuto); + } + void mulCT() const + { + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y), Q1, Q2; + for (int i = 0; i < 100; i++) { + Zn r = i; + Ec::mul(Q1, P, r); + Ec::mulCT(Q2, P, r); + CYBOZU_TEST_EQUAL(Q1, Q2); + } + } + void compare() const + { + Fp x(para.gx); + Fp y(para.gy); + Ec P1(x, y); + Ec P2(x, -y); + int c = Ec::compare(P1, P2); + int cx = Fp::compare(y, -y); + CYBOZU_TEST_EQUAL(c, cx); + c = Ec::compare(P2, P1); + cx = Fp::compare(-y, y); + CYBOZU_TEST_EQUAL(c, cx); + CYBOZU_TEST_EQUAL(Ec::compare(P1, P1), 0); + bool b1, b2; + b1 = P1 <= P2; + b2 = y <= -y; + CYBOZU_TEST_EQUAL(b1, b2); + b1 = P1 < P2; + b2 = y < -y; + CYBOZU_TEST_EQUAL(b1, b2); + CYBOZU_TEST_ASSERT(!(P1 < P1)); + CYBOZU_TEST_ASSERT((P1 <= P1)); + } + + template + void test(F f, const char *msg) const + { + const int N = 300000; + Fp x(para.gx); + Fp y(para.gy); + Ec P(x, y); + Ec Q = P + P + P; + clock_t begin = clock(); + for (int i = 0; i < N; i++) { + f(Q, P, Q); + } + clock_t end = clock(); + printf("%s %.2fusec\n", msg, (end - begin) / double(CLOCKS_PER_SEC) / N * 1e6); + } +/* +Affine : sandy-bridge +add 3.17usec +sub 2.43usec +dbl 3.32usec +mul 905.00usec +Jacobi +add 2.34usec +sub 2.65usec +dbl 1.56usec +mul 499.00usec +*/ + void run() const + { + cstr(); + ope(); + mul(); + neg_mul(); + mul_fp(); + squareRoot(); + str(); + ioMode(); + mulCT(); + compare(); + } +private: + Test(const Test&); + void operator=(const Test&); +}; + +void test_sub_sub(const mcl::EcParam& para, mcl::fp::Mode fpMode) +{ + puts("Proj"); + Test(para, fpMode, mcl::ec::Proj).run(); + puts("Jacobi"); + Test(para, fpMode, mcl::ec::Jacobi).run(); +} + +void test_sub(const mcl::EcParam *para, size_t paraNum) +{ + for (size_t i = 0; i < paraNum; i++) { + puts(para[i].name); + test_sub_sub(para[i], mcl::fp::FP_GMP); +#ifdef MCL_USE_LLVM + test_sub_sub(para[i], mcl::fp::FP_LLVM); + test_sub_sub(para[i], mcl::fp::FP_LLVM_MONT); +#endif +#ifdef MCL_USE_XBYAK + test_sub_sub(para[i], mcl::fp::FP_XBYAK); +#endif + } +} + +int g_partial = -1; + +CYBOZU_TEST_AUTO(all) +{ + if (g_partial & (1 << 3)) { + const struct mcl::EcParam para3[] = { + // mcl::ecparam::p160_1, + mcl::ecparam::secp160k1, + mcl::ecparam::secp192k1, + mcl::ecparam::NIST_P192, + }; + test_sub(para3, CYBOZU_NUM_OF_ARRAY(para3)); + } + + if (g_partial & (1 << 4)) { + const struct mcl::EcParam para4[] = { + mcl::ecparam::secp224k1, + mcl::ecparam::secp256k1, + mcl::ecparam::NIST_P224, + mcl::ecparam::NIST_P256, + }; + test_sub(para4, CYBOZU_NUM_OF_ARRAY(para4)); + } + +#if MCL_MAX_BIT_SIZE >= 384 + if (g_partial & (1 << 6)) { + const struct mcl::EcParam para6[] = { + // mcl::ecparam::secp384r1, + mcl::ecparam::NIST_P384, + }; + test_sub(para6, CYBOZU_NUM_OF_ARRAY(para6)); + } +#endif + +#if MCL_MAX_BIT_SIZE >= 521 + if (g_partial & (1 << 9)) { + const struct mcl::EcParam para9[] = { + // mcl::ecparam::secp521r1, + mcl::ecparam::NIST_P521, + }; + test_sub(para9, CYBOZU_NUM_OF_ARRAY(para9)); + } +#endif +} + +int main(int argc, char *argv[]) +{ + if (argc == 1) { + g_partial = -1; + } else { + g_partial = 0; + for (int i = 1; i < argc; i++) { + g_partial |= 1 << atoi(argv[i]); + } + } + return cybozu::test::autoRun.run(argc, argv); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/ecdsa_c_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/ecdsa_c_test.cpp new file mode 100644 index 000000000..e0af38182 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/ecdsa_c_test.cpp @@ -0,0 +1,51 @@ +#include +#include +#include + +template +void serializeTest(const T& x, const Serializer& serialize, const Deserializer& deserialize) +{ + char buf[128]; + size_t n = serialize(buf, sizeof(buf), &x); + CYBOZU_TEST_ASSERT(n > 0); + T y; + size_t m = deserialize(&y, buf, n); + CYBOZU_TEST_EQUAL(m, n); + CYBOZU_TEST_ASSERT(memcmp(&x, &y, n) == 0); +} + +CYBOZU_TEST_AUTO(ecdsa) +{ + int ret; + ret = ecdsaInit(); + CYBOZU_TEST_EQUAL(ret, 0); + ecdsaSecretKey sec; + ecdsaPublicKey pub; + ecdsaPrecomputedPublicKey *ppub; + ecdsaSignature sig; + const char *msg = "hello"; + mclSize msgSize = strlen(msg); + + ret = ecdsaSecretKeySetByCSPRNG(&sec); + CYBOZU_TEST_EQUAL(ret, 0); + serializeTest(sec, ecdsaSecretKeySerialize, ecdsaSecretKeyDeserialize); + + ecdsaGetPublicKey(&pub, &sec); + serializeTest(pub, ecdsaPublicKeySerialize, ecdsaPublicKeyDeserialize); + ecdsaSign(&sig, &sec, msg, msgSize); + serializeTest(sig, ecdsaSignatureSerialize, ecdsaSignatureDeserialize); + CYBOZU_TEST_ASSERT(ecdsaVerify(&sig, &pub, msg, msgSize)); + + ppub = ecdsaPrecomputedPublicKeyCreate(); + CYBOZU_TEST_ASSERT(ppub); + ret = ecdsaPrecomputedPublicKeyInit(ppub, &pub); + CYBOZU_TEST_EQUAL(ret, 0); + + CYBOZU_TEST_ASSERT(ecdsaVerifyPrecomputed(&sig, ppub, msg, msgSize)); + + sig.d[0]++; + CYBOZU_TEST_ASSERT(!ecdsaVerify(&sig, &pub, msg, msgSize)); + CYBOZU_TEST_ASSERT(!ecdsaVerifyPrecomputed(&sig, ppub, msg, msgSize)); + + ecdsaPrecomputedPublicKeyDestroy(ppub); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/ecdsa_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/ecdsa_test.cpp new file mode 100644 index 000000000..332c9ee27 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/ecdsa_test.cpp @@ -0,0 +1,69 @@ +#define PUT(x) std::cout << #x "=" << x << std::endl; +#include +#include +void put(const void *buf, size_t bufSize) +{ + const unsigned char* p = (const unsigned char*)buf; + for (size_t i = 0; i < bufSize; i++) { + printf("%02x", p[i]); + } + printf("\n"); +} +#include +#include +#include + +using namespace mcl::ecdsa; + +CYBOZU_TEST_AUTO(ecdsa) +{ + init(); + SecretKey sec; + PublicKey pub; + sec.setByCSPRNG(); + getPublicKey(pub, sec); + Signature sig; + const std::string msg = "hello"; + sign(sig, sec, msg.c_str(), msg.size()); + CYBOZU_TEST_ASSERT(verify(sig, pub, msg.c_str(), msg.size())); + sig.s += 1; + CYBOZU_TEST_ASSERT(!verify(sig, pub, msg.c_str(), msg.size())); +} + +CYBOZU_TEST_AUTO(value) +{ + const std::string msg = "hello"; + const char *secStr = "83ecb3984a4f9ff03e84d5f9c0d7f888a81833643047acc58eb6431e01d9bac8"; + const char *pubxStr = "653bd02ba1367e5d4cd695b6f857d1cd90d4d8d42bc155d85377b7d2d0ed2e71"; + const char *pubyStr = "04e8f5da403ab78decec1f19e2396739ea544e2b14159beb5091b30b418b813a"; + const char *sigStr = "a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5de5d79a2ba44e311d04fdca263639283965780bce9169822be9cc81756e95a24"; + + SecretKey sec; + sec.setStr(secStr, 16); + CYBOZU_TEST_EQUAL(sec.getStr(16), secStr); + PublicKey pub; + getPublicKey(pub, sec); + pub.normalize(); + Ec t(Fp(pubxStr, 16), Fp(pubyStr, 16)); + CYBOZU_TEST_EQUAL(pub, t); + Signature sig; + sig.r.setStr(std::string(sigStr, 64), 16); + sig.s.setStr(std::string(sigStr + 64, 64), 16); + PUT(sig); + CYBOZU_TEST_ASSERT(verify(sig, pub, msg.c_str(), msg.size())); +} + +CYBOZU_TEST_AUTO(bench) +{ + const std::string msg = "hello"; + SecretKey sec; + PublicKey pub; + PrecomputedPublicKey ppub; + sec.setByCSPRNG(); + getPublicKey(pub, sec); + ppub.init(pub); + Signature sig; + CYBOZU_BENCH_C("sign", 1000, sign, sig, sec, msg.c_str(), msg.size()); + CYBOZU_BENCH_C("pub.verify ", 1000, verify, sig, pub, msg.c_str(), msg.size()); + CYBOZU_BENCH_C("ppub.verify", 1000, verify, sig, ppub, msg.c_str(), msg.size()); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/elgamal_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/elgamal_test.cpp new file mode 100644 index 000000000..9532fc597 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/elgamal_test.cpp @@ -0,0 +1,155 @@ +#include +#include +#include +#include +#include + +struct TagZn; +typedef mcl::FpT<> Fp; +typedef mcl::FpT Zn; +typedef mcl::EcT Ec; +typedef mcl::ElgamalT ElgamalEc; + +const mcl::EcParam& para = mcl::ecparam::secp192k1; +cybozu::RandomGenerator rg; + +CYBOZU_TEST_AUTO(testEc) +{ + Fp::init(para.p); + Zn::init(para.n); + Ec::init(para.a, para.b); + const Fp x0(para.gx); + const Fp y0(para.gy); + const size_t bitSize = Zn::getBitSize(); + const Ec P(x0, y0); + /* + Zn =

is isomorphic to Zn - */ - const mcl::EcParam& para = mcl::ecparam::secp192k1; - Zn::init(para.n); - Fp::init(para.p); - Ec::init(para.a, para.b); - const Ec P(Fp(para.gx), Fp(para.gy)); - - /* - Alice setups a private key a and public key aP - */ - Zn a; - Ec aP; - - a.setRand(rg); - Ec::mul(aP, P, a); // aP = a * P; - - std::cout << "aP=" << aP << std::endl; - - /* - Bob setups a private key b and public key bP - */ - Zn b; - Ec bP; - - b.setRand(rg); - Ec::mul(bP, P, b); // bP = b * P; - - std::cout << "bP=" << bP << std::endl; - - Ec abP, baP; - - // Alice uses bP(B's public key) and a(A's priavte key) - Ec::mul(abP, bP, a); // abP = a * (bP) - - // Bob uses aP(A's public key) and b(B's private key) - Ec::mul(baP, aP, b); // baP = b * (aP) - - if (abP == baP) { - std::cout << "key sharing succeed:" << abP << std::endl; - } else { - std::cout << "ERR(not here)" << std::endl; - } -} - diff --git a/vendor/github.com/dexon-foundation/mcl/sample/large.cpp b/vendor/github.com/dexon-foundation/mcl/sample/large.cpp deleted file mode 100644 index 60b2ac900..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/large.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/* - large prime sample for 64-bit arch - make MCL_USE_LLVM=1 MCL_MAX_BIT_SIZE=768 -*/ -#include -#include -#include -#include "../src/low_func.hpp" - -typedef mcl::FpT<> Fp; - -using namespace mcl::fp; -const size_t N = 12; - -void testMul() -{ - Unit ux[N], uy[N], a[N * 2], b[N * 2]; - for (size_t i = 0; i < N; i++) { - ux[i] = -i * i + 5; - uy[i] = -i * i + 9; - } - MulPreCore::f(a, ux, uy); - MulPreCore::f(b, ux, uy); - for (size_t i = 0; i < N * 2; i++) { - if (a[i] != b[i]) { - printf("ERR %016llx %016llx\n", (long long)a[i], (long long)b[i]); - } - } - puts("end testMul"); - CYBOZU_BENCH("gmp ", (MulPreCore::f), ux, ux, uy); - CYBOZU_BENCH("kara", (MulPre::karatsuba), ux, ux, uy); -} - -void mulGmp(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& p) -{ - z = (x * y) % p; -} -void compareGmp(const std::string& pStr) -{ - Fp::init(pStr); - std::string xStr = "2104871209348712947120947102843728"; - std::string s1, s2; - { - Fp x(xStr); - CYBOZU_BENCH_C("mul by mcl", 1000, Fp::mul, x, x, x); - std::ostringstream os; - os << x; - s1 = os.str(); - } - { - const mpz_class p(pStr); - mpz_class x(xStr); - CYBOZU_BENCH_C("mul by GMP", 1000, mulGmp, x, x, x, p); - std::ostringstream os; - os << x; - s2 = os.str(); - } - if (s1 != s2) { - puts("ERR"); - } -} - -void test(const std::string& pStr, mcl::fp::Mode mode) -{ - printf("test %s\n", mcl::fp::ModeToStr(mode)); - Fp::init(pStr, mode); - const mcl::fp::Op& op = Fp::getOp(); - printf("bitSize=%d\n", (int)Fp::getBitSize()); - mpz_class p(pStr); - Fp x = 123456; - Fp y; - Fp::pow(y, x, p); - std::cout << y << std::endl; - if (x != y) { - std::cout << "err:pow:" << y << std::endl; - return; - } - const size_t N = 24; - mcl::fp::Unit ux[N], uy[N]; - for (size_t i = 0; i < N; i++) { - ux[i] = -i * i + 5; - uy[i] = -i * i + 9; - } - CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, ux, ux, uy); - CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, ux, ux); - CYBOZU_BENCH("add", op.fpDbl_add, ux, ux, ux, op.p); - CYBOZU_BENCH("sub", op.fpDbl_sub, ux, ux, ux, op.p); - if (op.fpDbl_addPre) { - CYBOZU_BENCH("addPre", op.fpDbl_addPre, ux, ux, ux); - CYBOZU_BENCH("subPre", op.fpDbl_subPre, ux, ux, ux); - } - CYBOZU_BENCH("mont", op.fpDbl_mod, ux, ux, op.p); - CYBOZU_BENCH("mul", Fp::mul, x, x, x); - compareGmp(pStr); -} - -void testAll(const std::string& pStr) -{ - test(pStr, mcl::fp::FP_GMP); - test(pStr, mcl::fp::FP_GMP_MONT); -#ifdef MCL_USE_LLVM - test(pStr, mcl::fp::FP_LLVM); - test(pStr, mcl::fp::FP_LLVM_MONT); -#endif - compareGmp(pStr); -} -int main() - try -{ - const char *pTbl[] = { - "40347654345107946713373737062547060536401653012956617387979052445947619094013143666088208645002153616185987062074179207", - "13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006083527", - "776259046150354467574489744231251277628443008558348305569526019013025476343188443165439204414323238975243865348565536603085790022057407195722143637520590569602227488010424952775132642815799222412631499596858234375446423426908029627", - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { - testAll(pTbl[i]); - } - testMul(); -} catch (std::exception& e) { - printf("err %s\n", e.what()); - puts("make clean"); - puts("make -DMCL_MAX_BIT_SIZE=768"); - return 1; -} - diff --git a/vendor/github.com/dexon-foundation/mcl/sample/pairing.cpp b/vendor/github.com/dexon-foundation/mcl/sample/pairing.cpp deleted file mode 100644 index 230583b6e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/pairing.cpp +++ /dev/null @@ -1,56 +0,0 @@ -#include - -using namespace mcl::bn256; - -void minimum_sample(const G1& P, const G2& Q) -{ - const mpz_class a = 123; - const mpz_class b = 456; - Fp12 e1, e2; - pairing(e1, P, Q); - G2 aQ; - G1 bP; - G2::mul(aQ, Q, a); - G1::mul(bP, P, b); - pairing(e2, bP, aQ); - Fp12::pow(e1, e1, a * b); - printf("%s\n", e1 == e2 ? "ok" : "ng"); -} - -void miller_and_finel_exp(const G1& P, const G2& Q) -{ - Fp12 e1, e2; - pairing(e1, P, Q); - - millerLoop(e2, P, Q); - finalExp(e2, e2); - printf("%s\n", e1 == e2 ? "ok" : "ng"); -} - -void precomputed(const G1& P, const G2& Q) -{ - Fp12 e1, e2; - pairing(e1, P, Q); - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e2, P, Qcoeff); - finalExp(e2, e2); - printf("%s\n", e1 == e2 ? "ok" : "ng"); -} - -int main() -{ - const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; - const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; - const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; - const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; - - initPairing(); - G2 Q(Fp2(aa, ab), Fp2(ba, bb)); - G1 P(-1, 1); - - minimum_sample(P, Q); - miller_and_finel_exp(P, Q); - precomputed(P, Q); -} - diff --git a/vendor/github.com/dexon-foundation/mcl/sample/pairing_c.c b/vendor/github.com/dexon-foundation/mcl/sample/pairing_c.c deleted file mode 100644 index 5c2cd222a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/pairing_c.c +++ /dev/null @@ -1,52 +0,0 @@ -#include -#include -#define MCLBN_FP_UNIT_SIZE 4 -#include - -int g_err = 0; -#define ASSERT(x) { if (!(x)) { printf("err %s:%d\n", __FILE__, __LINE__); g_err++; } } - -int main() -{ - char buf[1024]; - const char *aStr = "123"; - const char *bStr = "456"; - mclBn_init(MCL_BN254, MCLBN_FP_UNIT_SIZE); - mclBnFr a, b, ab; - mclBnG1 P, aP; - mclBnG2 Q, bQ; - mclBnGT e, e1, e2; - mclBnFr_setStr(&a, aStr, strlen(aStr), 10); - mclBnFr_setStr(&b, bStr, strlen(bStr), 10); - mclBnFr_mul(&ab, &a, &b); - mclBnFr_getStr(buf, sizeof(buf), &ab, 10); - printf("%s x %s = %s\n", aStr, bStr, buf); - - ASSERT(!mclBnG1_hashAndMapTo(&P, "this", 4)); - ASSERT(!mclBnG2_hashAndMapTo(&Q, "that", 4)); - mclBnG1_getStr(buf, sizeof(buf), &P, 16); - printf("P = %s\n", buf); - mclBnG2_getStr(buf, sizeof(buf), &Q, 16); - printf("Q = %s\n", buf); - - mclBnG1_mul(&aP, &P, &a); - mclBnG2_mul(&bQ, &Q, &b); - - mclBn_pairing(&e, &P, &Q); - mclBnGT_getStr(buf, sizeof(buf), &e, 16); - printf("e = %s\n", buf); - mclBnGT_pow(&e1, &e, &a); - mclBn_pairing(&e2, &aP, &Q); - ASSERT(mclBnGT_isEqual(&e1, &e2)); - - mclBnGT_pow(&e1, &e, &b); - mclBn_pairing(&e2, &P, &bQ); - ASSERT(mclBnGT_isEqual(&e1, &e2)); - if (g_err) { - printf("err %d\n", g_err); - return 1; - } else { - printf("no err\n"); - return 0; - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/random.cpp b/vendor/github.com/dexon-foundation/mcl/sample/random.cpp deleted file mode 100644 index a2a3619ad..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/random.cpp +++ /dev/null @@ -1,29 +0,0 @@ -#include -#include -#include -#include -#include -#include -typedef mcl::FpT<> Fp; - -typedef std::map Map; - -int main(int argc, char *argv[]) -{ - cybozu::RandomGenerator rg; - const char *p = mcl::ecparam::secp192k1.p; - if (argc == 2) { - p = argv[1]; - } - Fp::init(p); - Fp x; - printf("p=%s\n", p); - Map m; - for (int i = 0; i < 10000; i++) { - x.setRand(rg); - m[x.getStr(16)]++; - } - for (Map::const_iterator i = m.begin(), ie = m.end(); i != ie; ++i) { - printf("%s %d\n", i->first.c_str(), i->second); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/rawbench.cpp b/vendor/github.com/dexon-foundation/mcl/sample/rawbench.cpp deleted file mode 100644 index 4d7506ef5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/rawbench.cpp +++ /dev/null @@ -1,180 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#include -#include -#include -#include -#include - -typedef mcl::FpT Fp; -typedef mcl::Fp2T Fp2; -typedef mcl::FpDblT FpDbl; -typedef mcl::Fp6T Fp6; -typedef mcl::Fp12T Fp12; - -typedef mcl::fp::Unit Unit; - -void mul9(const mcl::fp::Op& op, Unit *y, const Unit *x, const Unit *p) -{ - const size_t maxN = sizeof(Fp) / sizeof(Unit); - Unit tmp[maxN]; - op.fp_add(tmp, x, x, p); // 2x - op.fp_add(tmp, tmp, tmp, p); // 4x - op.fp_add(tmp, tmp, tmp, p); // 8x - op.fp_add(y, tmp, x, p); // 9x -} - -void benchRaw(const char *p, mcl::fp::Mode mode) -{ - Fp::init(1, p, mode); - Fp2::init(); - const size_t maxN = sizeof(Fp) / sizeof(Unit); - const mcl::fp::Op& op = Fp::getOp(); - cybozu::XorShift rg; - Fp fx, fy; - fx.setRand(rg); - fy.setRand(rg); - Unit ux[maxN * 2] = {}; - Unit uy[maxN * 2] = {}; - Unit uz[maxN * 2] = {}; - memcpy(ux, fx.getUnit(), sizeof(Unit) * op.N); - memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N); - memcpy(uy, fy.getUnit(), sizeof(Unit) * op.N); - memcpy(ux + op.N, fx.getUnit(), sizeof(Unit) * op.N); - double fp_addT, fp_subT; - double fp_addPreT, fp_subPreT; - double fp_sqrT, fp_mulT; - double fp_mulUnitT; - double mul9T; - double fp_mulUnitPreT; - double fpN1_modT; - double fpDbl_addT, fpDbl_subT; - double fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT; - double fp2_sqrT, fp2_mulT; - CYBOZU_BENCH_T(fp_addT, op.fp_add, uz, ux, uy, op.p); - CYBOZU_BENCH_T(fp_subT, op.fp_sub, uz, uy, ux, op.p); - CYBOZU_BENCH_T(fp_addPreT, op.fp_addPre, uz, ux, uy); - CYBOZU_BENCH_T(fp_subPreT, op.fp_subPre, uz, uy, ux); - CYBOZU_BENCH_T(fp_sqrT, op.fp_sqr, uz, ux, op.p); - CYBOZU_BENCH_T(fp_mulT, op.fp_mul, uz, ux, uy, op.p); - CYBOZU_BENCH_T(fp_mulUnitT, op.fp_mulUnit, uz, ux, 9, op.p); - CYBOZU_BENCH_T(mul9T, mul9, op, uz, ux, op.p); - CYBOZU_BENCH_T(fp_mulUnitPreT, op.fp_mulUnitPre, ux, ux, 9); - CYBOZU_BENCH_T(fpN1_modT, op.fpN1_mod, ux, uy, op.p); - CYBOZU_BENCH_T(fpDbl_addT, op.fpDbl_add, uz, ux, uy, op.p); - CYBOZU_BENCH_T(fpDbl_subT, op.fpDbl_sub, uz, uy, ux, op.p); - CYBOZU_BENCH_T(fpDbl_sqrPreT, op.fpDbl_sqrPre, uz, ux); - CYBOZU_BENCH_T(fpDbl_mulPreT, op.fpDbl_mulPre, uz, ux, uy); - CYBOZU_BENCH_T(fpDbl_modT, op.fpDbl_mod, uz, ux, op.p); - Fp2 f2x, f2y; - f2x.a = fx; - f2x.b = fy; - f2y = f2x; - CYBOZU_BENCH_T(fp2_sqrT, Fp2::sqr, f2x, f2x); - CYBOZU_BENCH_T(fp2_mulT, Fp2::mul, f2x, f2x, f2y); - printf("%s\n", mcl::fp::ModeToStr(mode)); - const char *tStrTbl[] = { - "fp_add", "fp_sub", - "addPre", "subPre", - "fp_sqr", "fp_mul", - "mulUnit", - "mul9", - "mulUnitP", - "fpN1_mod", - "D_add", "D_sub", - "D_sqrPre", "D_mulPre", "D_mod", - "fp2_sqr", "fp2_mul", - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tStrTbl); i++) { - printf(" %8s", tStrTbl[i]); - } - printf("\n"); - const double tTbl[] = { - fp_addT, fp_subT, - fp_addPreT, fp_subPreT, - fp_sqrT, fp_mulT, - fp_mulUnitT, - mul9T, - fp_mulUnitPreT, - fpN1_modT, - fpDbl_addT, fpDbl_subT, - fpDbl_sqrPreT, fpDbl_mulPreT, fpDbl_modT, - fp2_sqrT, fp2_mulT, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tTbl); i++) { - printf(" %8.2f", tTbl[i]); - } - printf("\n"); -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - size_t bitSize; - opt.appendOpt(&bitSize, 0, "s", ": bitSize"); - opt.appendHelp("h", ": show this message"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - const char *tbl[] = { - // N = 2 - "0x0000000000000001000000000000000d", - "0x7fffffffffffffffffffffffffffffff", - "0x8000000000000000000000000000001d", - "0xffffffffffffffffffffffffffffff61", - - // N = 3 - "0x000000000000000100000000000000000000000000000033", // min prime - "0x70000000000000000000000000000000000000000000001f", - "0x800000000000000000000000000000000000000000000005", - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - "0xfffffffffffffffffffffffffffffffeffffffffffffffff", - "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime - - // N = 4 - "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", // BN254 - "0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", // Snark - "0x7523648240000001ba344d80000000086121000000000013a700000000000017", - "0x800000000000000000000000000000000000000000000000000000000000005f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime - // N = 5 - "0x80000000000000000000000000000000000000000000000000000000000000000000000000000009", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3b", - // N = 6 - "0x800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000171", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec3", - // N = 7 - "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff35", - // N = 8 - "0x8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006f", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc7", -#if MCL_MAX_BIT_SIZE == 1024 - "0xc70b1ddda9b96e3965e5855942aa5852d8f8e052c760ac32cdfec16a2ed3d56981e1a475e20a70144ed2f5061ba64900f69451492803f815d446ee133d0668f7a7f3276d6301c95ce231f0e4b0d0f3882f10014fca04454cff55d2e2d4cfc1aad33b8d38397e2fc8b623177e63d0b783269c40a85b8f105654783b8ed2e737df", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff97", -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const char *p = tbl[i]; - if (bitSize > 0 && (strlen(p) - 2) * 4 != bitSize) { - continue; - } - printf("prime=%s\n", p); - benchRaw(tbl[i], mcl::fp::FP_GMP); - benchRaw(tbl[i], mcl::fp::FP_GMP_MONT); -#ifdef MCL_USE_LLVM - benchRaw(tbl[i], mcl::fp::FP_LLVM); - benchRaw(tbl[i], mcl::fp::FP_LLVM_MONT); -#endif -#ifdef MCL_USE_XBYAK - if (bitSize <= 384) { - benchRaw(tbl[i], mcl::fp::FP_XBYAK); - } -#endif - } -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/she_make_dlp_table.cpp b/vendor/github.com/dexon-foundation/mcl/sample/she_make_dlp_table.cpp deleted file mode 100644 index 41f18e225..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/she_make_dlp_table.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - make she DLP table -*/ -#include -#include -#include - -using namespace mcl::she; - -struct Param { - int curveType; - int hashBitSize; - int group; - std::string path; -}; - -template -void makeTable(const Param& param, const char *groupStr, HashTable& hashTbl, const G& P) -{ - char baseName[32]; - CYBOZU_SNPRINTF(baseName, sizeof(baseName), "she-dlp-%d-%d-%s.bin", param.curveType, param.hashBitSize, groupStr); - const std::string fileName = param.path + baseName; - printf("file=%s\n", fileName.c_str()); - std::ofstream ofs(fileName.c_str(), std::ios::binary); - - const size_t hashSize = 1u << param.hashBitSize; - hashTbl.init(P, hashSize); - hashTbl.save(ofs); -} - -void run(const Param& param) -{ - SHE::init(mcl::getCurveParam(param.curveType)); - - switch (param.group) { - case 1: - makeTable(param, "g1", getHashTableG1(), SHE::P_); - break; - case 2: - makeTable(param, "g2", getHashTableG2(), SHE::Q_); - break; - case 3: - makeTable(param, "gt", getHashTableGT(), SHE::ePQ_); - break; - default: - throw cybozu::Exception("bad group") << param.group; - } -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - Param param; - opt.appendOpt(¶m.curveType, 0, "ct", ": curveType(0:BN254, 1:BN381_1, 5:BLS12_381)"); - opt.appendOpt(¶m.hashBitSize, 20, "hb", ": hash bit size"); - opt.appendOpt(¶m.group, 3, "g", ": group(1:G1, 2:G2, 3:GT"); - opt.appendOpt(¶m.path, "./", "path", ": path to table"); - opt.appendHelp("h"); - if (opt.parse(argc, argv)) { - run(param); - } else { - opt.usage(); - return 1; - } -} catch (std::exception& e) { - printf("err %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/she_smpl.cpp b/vendor/github.com/dexon-foundation/mcl/sample/she_smpl.cpp deleted file mode 100644 index e01b9c130..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/she_smpl.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/* - sample of somewhat homomorphic encryption(SHE) -*/ -#define PUT(x) std::cout << #x << "=" << (x) << std::endl; -#include -#include - -using namespace mcl::she; - -void miniSample() -{ - // init library - SHE::init(); - - SecretKey sec; - - // init secret key by random_device - sec.setByCSPRNG(); - - // set range to decode GT DLP - SHE::setRangeForDLP(1000); - - PublicKey pub; - // get public key - sec.getPublicKey(pub); - - const int N = 5; - int a[] = { 1, 5, -3, 4, 6 }; - int b[] = { 4, 2, 1, 9, -2 }; - // compute correct value - int sum = 0; - for (size_t i = 0; i < N; i++) { - sum += a[i] * b[i]; - } - - std::vector ca(N), cb(N); - - // encrypt each a[] and b[] - for (size_t i = 0; i < N; i++) { - pub.enc(ca[i], a[i]); - pub.enc(cb[i], b[i]); - } - CipherText c; - c.clearAsMultiplied(); // clear as multiplied before using c.add() - // inner product of encrypted vector - for (size_t i = 0; i < N; i++) { - CipherText t; - CipherText::mul(t, ca[i], cb[i]); // t = ca[i] * cb[i] - c.add(t); // c += t - } - // decode it - int m = (int)sec.dec(c); - // verify the value - if (m == sum) { - puts("ok"); - } else { - printf("err correct %d err %d\n", sum, m); - } -} - -void usePrimitiveCipherText() -{ - // init library - SHE::init(); - - SecretKey sec; - - // init secret key by random_device - sec.setByCSPRNG(); - - // set range to decode GT DLP - SHE::setRangeForGTDLP(100); - - PublicKey pub; - // get public key - sec.getPublicKey(pub); - - int a1 = 1, a2 = 2; - int b1 = 5, b2 = -4; - CipherTextG1 c1, c2; // size of CipherTextG1 = N * 2 ; N = 256-bit for CurveFp254BNb - CipherTextG2 d1, d2; // size of CipherTextG2 = N * 4 - pub.enc(c1, a1); - pub.enc(c2, a2); - pub.enc(d1, b1); - pub.enc(d2, b2); - c1.add(c2); // CipherTextG1 is additive HE - d1.add(d2); // CipherTextG2 is additive HE - CipherTextGT cm; // size of CipherTextGT = N * 12 * 4 - CipherTextGT::mul(cm, c1, d1); // cm = c1 * d1 - cm.add(cm); // 2cm - int m = (int)sec.dec(cm); - int ok = (a1 + a2) * (b1 + b2) * 2; - if (m == ok) { - puts("ok"); - } else { - printf("err m=%d ok=%d\n", m, ok); - } - std::string s; - s = c1.getStr(mcl::IoSerialize); // serialize - printf("c1 data size %d byte\n", (int)s.size()); - - c2.setStr(s, mcl::IoSerialize); - printf("deserialize %s\n", c1 == c2 ? "ok" : "ng"); - - s = d1.getStr(mcl::IoSerialize); // serialize - printf("d1 data size %d byte\n", (int)s.size()); - d2.setStr(s, mcl::IoSerialize); - printf("deserialize %s\n", d1 == d2 ? "ok" : "ng"); - - s = cm.getStr(mcl::IoSerialize); // serialize - printf("cm data size %d byte\n", (int)s.size()); - CipherTextGT cm2; - cm2.setStr(s, mcl::IoSerialize); - printf("deserialize %s\n", cm == cm2 ? "ok" : "ng"); -} - -int main() - try -{ - miniSample(); - usePrimitiveCipherText(); -} catch (std::exception& e) { - printf("err %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/tri-dh.cpp b/vendor/github.com/dexon-foundation/mcl/sample/tri-dh.cpp deleted file mode 100644 index 8b720edbf..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/tri-dh.cpp +++ /dev/null @@ -1,97 +0,0 @@ -/* - tripartie Diffie-Hellman -*/ -#include -#include -#include -#include -#include - -static cybozu::RandomGenerator rg; - -const std::string skSuf = ".sk.txt"; -const std::string pkSuf = ".pk.txt"; - -using namespace mcl::bn256; - -void keygen(const std::string& user) -{ - if (user.empty()) { - throw cybozu::Exception("keygen:user is empty"); - } - const char *aa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; - const char *ab = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; - const char *ba = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; - const char *bb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; - - - initPairing(); - G2 Q(Fp2(aa, ab), Fp2(ba, bb)); - G1 P(-1, 1); - - Fr s; - s.setRand(rg); - G1::mul(P, P, s); - G2::mul(Q, Q, s); - { - std::string name = user + skSuf; - std::ofstream ofs(name.c_str(), std::ios::binary); - ofs << s << std::endl; - } - { - std::string name = user + pkSuf; - std::ofstream ofs(name.c_str(), std::ios::binary); - ofs << P << std::endl; - ofs << Q << std::endl; - } -} - -void load(G1& P, G2& Q, const std::string& fileName) -{ - std::ifstream ifs(fileName.c_str(), std::ios::binary); - ifs >> P >> Q; -} - -void share(const std::string& skFile, const std::string& pk1File, const std::string& pk2File) -{ - initPairing(); - Fr s; - G1 P1, P2; - G2 Q1, Q2; - { - std::ifstream ifs(skFile.c_str(), std::ios::binary); - ifs >> s; - } - load(P1, Q1, pk1File); - load(P2, Q2, pk2File); - Fp12 e; - pairing(e, P1, Q2); - { - // verify(not necessary) - Fp12 e2; - pairing(e2, P2, Q1); - if (e != e2) { - throw cybozu::Exception("share:bad public key file") << e << e2; - } - } - Fp12::pow(e, e, s); - std::cout << "share key:\n" << e << std::endl; -} - -int main(int argc, char *argv[]) - try -{ - if (argc == 3 && strcmp(argv[1], "keygen") == 0) { - keygen(argv[2]); - } else if (argc == 5 && strcmp(argv[1], "share") == 0) { - share(argv[2], argv[3], argv[4]); - } else { - fprintf(stderr, "tri-dh.exe keygen \n"); - fprintf(stderr, "tri-dh.exe share \n"); - return 1; - } -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} - diff --git a/vendor/github.com/dexon-foundation/mcl/sample/vote.cpp b/vendor/github.com/dexon-foundation/mcl/sample/vote.cpp deleted file mode 100644 index 88137187c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/vote.cpp +++ /dev/null @@ -1,206 +0,0 @@ -/* - vote sample tool - Copyright (c) 2014, National Institute of Advanced Industrial - Science and Technology All rights reserved. - This source file is subject to BSD 3-Clause license. - - modifyed for mcl by herumi -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -typedef mcl::FpT<> Fp; -typedef mcl::FpT Zn; // use ZnTag because Zn is different class with Fp -typedef mcl::EcT Ec; -typedef mcl::ElgamalT Elgamal; - -cybozu::RandomGenerator rg; - -const std::string pubFile = "vote_pub.txt"; -const std::string prvFile = "vote_prv.txt"; -const std::string resultFile = "vote_ret.txt"; - -std::string GetSheetName(size_t n) -{ - return std::string("vote_") + cybozu::itoa(n) + ".txt"; -} - -struct Param { - std::string mode; - std::string voteList; - Param(int argc, const char *const argv[]) - { - cybozu::Option opt; - opt.appendOpt(&voteList, "11001100", "l", ": list of voters for vote mode(eg. 11001100)"); - opt.appendHelp("h", ": put this message"); - opt.appendParam(&mode, "mode", ": init/vote/count/open"); - if (!opt.parse(argc, argv)) { - opt.usage(); - exit(1); - } - printf("mode=%s\n", mode.c_str()); - if (mode == "vote") { - printf("voters=%s\n", voteList.c_str()); - size_t pos = voteList.find_first_not_of("01"); - if (pos != std::string::npos) { - printf("bad char %c\n", voteList[pos]); - exit(1); - } - } - } -}; - -void SysInit() -{ - const mcl::EcParam& para = mcl::ecparam::secp192k1; - Zn::init(para.n); - Fp::init(para.p); - Ec::init(para.a, para.b); -} - -template -bool Load(T& t, const std::string& name, bool doThrow = true) -{ - std::ifstream ifs(name.c_str(), std::ios::binary); - if (!ifs) { - if (doThrow) throw cybozu::Exception("Load:can't read") << name; - return false; - } - if (ifs >> t) return true; - if (doThrow) throw cybozu::Exception("Load:bad data") << name; - return false; -} - -template -void Save(const std::string& name, const T& t) -{ - std::ofstream ofs(name.c_str(), std::ios::binary); - ofs << t; -} - -void Init() -{ - const mcl::EcParam& para = mcl::ecparam::secp192k1; - const Fp x0(para.gx); - const Fp y0(para.gy); - const Ec P(x0, y0); - const size_t bitSize = para.bitSize; - - Elgamal::PrivateKey prv; - prv.init(P, bitSize, rg); - const Elgamal::PublicKey& pub = prv.getPublicKey(); - printf("make privateKey=%s, publicKey=%s\n", prvFile.c_str(), pubFile.c_str()); - Save(prvFile, prv); - Save(pubFile, pub); -} - -struct CipherWithZkp { - Elgamal::CipherText c; - Elgamal::Zkp zkp; - bool verify(const Elgamal::PublicKey& pub) const - { - return pub.verify(c, zkp); - } -}; - -inline std::ostream& operator<<(std::ostream& os, const CipherWithZkp& self) -{ - return os << self.c << std::endl << self.zkp; -} -inline std::istream& operator>>(std::istream& is, CipherWithZkp& self) -{ - return is >> self.c >> self.zkp; -} - -void Vote(const std::string& voteList) -{ - Elgamal::PublicKey pub; - Load(pub, pubFile); - puts("shuffle"); - std::vector idxTbl(voteList.size()); - for (size_t i = 0; i < idxTbl.size(); i++) { - idxTbl[i] = i; - } - cybozu::shuffle(idxTbl, rg); - puts("each voter votes"); - for (size_t i = 0; i < voteList.size(); i++) { - CipherWithZkp c; - pub.encWithZkp(c.c, c.zkp, voteList[i] - '0', rg); - const std::string sheetName = GetSheetName(idxTbl[i]); - printf("make %s\n", sheetName.c_str()); - Save(sheetName, c); - } -} - -void Count() -{ - Elgamal::PublicKey pub; - Load(pub, pubFile); - Elgamal::CipherText result; - puts("aggregate votes"); - for (size_t i = 0; ; i++) { - const std::string sheetName = GetSheetName(i); - CipherWithZkp c; - if (!Load(c, sheetName, false)) break; - if (!c.verify(pub)) throw cybozu::Exception("bad cipher text") << i; - printf("add %s\n", sheetName.c_str()); - result.add(c.c); - } - printf("create result file : %s\n", resultFile.c_str()); - Save(resultFile, result); -} - -void Open() -{ - Elgamal::PrivateKey prv; - Load(prv, prvFile); - Elgamal::CipherText c; - Load(c, resultFile); - Zn n; - prv.dec(n, c); - std::cout << "result of vote count " << n << std::endl; -#if 0 - puts("open real value"); - for (size_t i = 0; ; i++) { - Elgamal::CipherText c; - const std::string sheetName = GetSheetName(i); - if (!Load(c, sheetName, false)) break; - Zn n; - prv.dec(n, c); - std::cout << sheetName << " " << n << std::endl; - } -#endif -} - -int main(int argc, char *argv[]) - try -{ - const Param p(argc, argv); - SysInit(); - if (p.mode == "init") { - Init(); - } else - if (p.mode == "vote") { - Vote(p.voteList); - } else - if (p.mode == "count") { - Count(); - } else - if (p.mode == "open") { - Open(); - } else - { - printf("bad mode=%s\n", p.mode.c_str()); - return 1; - } -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); -} - diff --git a/vendor/github.com/dexon-foundation/mcl/setvar.bat b/vendor/github.com/dexon-foundation/mcl/setvar.bat deleted file mode 100644 index 1d57fa69e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/setvar.bat +++ /dev/null @@ -1,2 +0,0 @@ -set CFLAGS=/MT /DNOMINMAX /Ox /DNDEBUG /W4 /Zi /EHsc /nologo -I./include -I../cybozulib_ext/include -set LDFLAGS=/LIBPATH:..\cybozulib_ext\lib /LIBPATH:.\lib diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s b/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s deleted file mode 100644 index a49a36e3a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s +++ /dev/null @@ -1,13197 +0,0 @@ - .text - .file "" - .globl makeNIST_P192L - .align 2 - .type makeNIST_P192L,@function -makeNIST_P192L: // @makeNIST_P192L -// BB#0: - movn x0, #0 - orr x1, xzr, #0xfffffffffffffffe - movn x2, #0 - ret -.Lfunc_end0: - .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P192L - .align 2 - .type mcl_fpDbl_mod_NIST_P192L,@function -mcl_fpDbl_mod_NIST_P192L: // @mcl_fpDbl_mod_NIST_P192L -// BB#0: - ldp x8, x9, [x1, #16] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x1] - orr w14, wzr, #0x1 - adds x13, x11, x13 - adcs x8, x8, xzr - adcs x15, xzr, xzr - adds x12, x12, x9 - adcs x13, x13, x10 - adcs x8, x8, x11 - adcs x15, x15, xzr - adds x11, x12, x11 - movn x12, #0 - adcs x9, x13, x9 - adcs x8, x8, x10 - adcs x10, x15, xzr - adds x11, x10, x11 - adcs x9, x10, x9 - adcs x8, x8, xzr - adcs x10, xzr, xzr - adds x13, x11, #1 // =1 - adcs x14, x9, x14 - adcs x15, x8, xzr - adcs x10, x10, x12 - tst x10, #0x1 - csel x10, x11, x13, ne - csel x9, x9, x14, ne - csel x8, x8, x15, ne - stp x10, x9, [x0] - str x8, [x0, #16] - ret -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L - - .globl mcl_fp_sqr_NIST_P192L - .align 2 - .type mcl_fp_sqr_NIST_P192L,@function -mcl_fp_sqr_NIST_P192L: // @mcl_fp_sqr_NIST_P192L -// BB#0: - ldp x8, x9, [x1] - ldr x10, [x1, #16] - orr w11, wzr, #0x1 - umulh x12, x8, x8 - mul x13, x9, x8 - mul x14, x10, x8 - umulh x15, x9, x8 - adds x12, x12, x13 - umulh x16, x10, x8 - adcs x17, x15, x14 - adcs x18, x16, xzr - mul x1, x9, x9 - mul x2, x10, x9 - adds x15, x15, x1 - umulh x1, x9, x9 - umulh x9, x10, x9 - adcs x1, x1, x2 - adcs x3, x9, xzr - adds x12, x13, x12 - adcs x13, x15, x17 - adcs x15, x1, x18 - movn x17, #0 - umulh x18, x10, x10 - mul x10, x10, x10 - mul x8, x8, x8 - adcs x1, x3, xzr - adds x16, x16, x2 - adcs x9, x9, x10 - adcs x10, x18, xzr - adds x13, x14, x13 - adcs x14, x16, x15 - adcs x9, x9, x1 - adcs x10, x10, xzr - adds x12, x12, x10 - adcs x13, x13, xzr - adcs x15, xzr, xzr - adds x8, x8, x14 - adcs x12, x12, x9 - adcs x13, x13, x10 - adcs x15, x15, xzr - adds x8, x8, x10 - adcs x10, x12, x14 - adcs x9, x13, x9 - adcs x12, x15, xzr - adds x8, x12, x8 - adcs x10, x12, x10 - adcs x9, x9, xzr - adcs x12, xzr, xzr - adds x13, x8, #1 // =1 - adcs x11, x10, x11 - adcs x14, x9, xzr - adcs x12, x12, x17 - tst x12, #0x1 - csel x8, x8, x13, ne - csel x10, x10, x11, ne - csel x9, x9, x14, ne - stp x8, x10, [x0] - str x9, [x0, #16] - ret -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L - - .globl mcl_fp_mulNIST_P192L - .align 2 - .type mcl_fp_mulNIST_P192L,@function -mcl_fp_mulNIST_P192L: // @mcl_fp_mulNIST_P192L -// BB#0: - stp x20, x19, [sp, #-32]! - stp x29, x30, [sp, #16] - add x29, sp, #16 // =16 - sub sp, sp, #48 // =48 - mov x19, x0 - mov x0, sp - bl mcl_fpDbl_mulPre3L - ldp x9, x8, [sp, #8] - ldp x11, x10, [sp, #32] - ldr x12, [sp, #24] - ldr x13, [sp] - orr w14, wzr, #0x1 - adds x9, x10, x9 - adcs x8, x8, xzr - adcs x15, xzr, xzr - adds x13, x13, x12 - adcs x9, x9, x11 - adcs x8, x8, x10 - adcs x15, x15, xzr - adds x10, x13, x10 - movn x13, #0 - adcs x9, x9, x12 - adcs x8, x8, x11 - adcs x11, x15, xzr - adds x10, x11, x10 - adcs x9, x11, x9 - adcs x8, x8, xzr - adcs x11, xzr, xzr - adds x12, x10, #1 // =1 - adcs x14, x9, x14 - adcs x15, x8, xzr - adcs x11, x11, x13 - tst x11, #0x1 - csel x10, x10, x12, ne - csel x9, x9, x14, ne - csel x8, x8, x15, ne - stp x10, x9, [x19] - str x8, [x19, #16] - sub sp, x29, #16 // =16 - ldp x29, x30, [sp, #16] - ldp x20, x19, [sp], #32 - ret -.Lfunc_end3: - .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P521L - .align 2 - .type mcl_fpDbl_mod_NIST_P521L,@function -mcl_fpDbl_mod_NIST_P521L: // @mcl_fpDbl_mod_NIST_P521L -// BB#0: - stp x29, x30, [sp, #-16]! - mov x29, sp - ldp x8, x9, [x1, #112] - ldr x10, [x1, #128] - ldp x11, x12, [x1, #96] - ldp x13, x14, [x1, #80] - ldp x15, x16, [x1, #64] - ldp x17, x18, [x1, #48] - ldp x2, x3, [x1, #32] - ldp x4, x5, [x1, #16] - ldp x6, x1, [x1] - extr x7, x10, x9, #9 - extr x9, x9, x8, #9 - extr x8, x8, x12, #9 - extr x12, x12, x11, #9 - extr x11, x11, x14, #9 - extr x14, x14, x13, #9 - extr x13, x13, x16, #9 - extr x16, x16, x15, #9 - and x15, x15, #0x1ff - lsr x10, x10, #9 - adds x16, x16, x6 - adcs x13, x13, x1 - adcs x14, x14, x4 - adcs x11, x11, x5 - adcs x12, x12, x2 - adcs x1, x8, x3 - adcs x17, x9, x17 - adcs x18, x7, x18 - adcs x2, x10, x15 - ubfx x8, x2, #9, #1 - adds x8, x8, x16 - adcs x9, x13, xzr - and x13, x9, x8 - adcs x10, x14, xzr - and x13, x13, x10 - adcs x11, x11, xzr - and x13, x13, x11 - adcs x12, x12, xzr - and x14, x13, x12 - adcs x13, x1, xzr - and x15, x14, x13 - adcs x14, x17, xzr - and x16, x15, x14 - adcs x15, x18, xzr - and x17, x16, x15 - adcs x16, x2, xzr - orr x18, x16, #0xfffffffffffffe00 - and x17, x17, x18 - cmn x17, #1 // =1 - b.eq .LBB4_2 -// BB#1: // %nonzero - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - stp x14, x15, [x0, #48] - and x8, x16, #0x1ff - str x8, [x0, #64] - ldp x29, x30, [sp], #16 - ret -.LBB4_2: // %zero - mov w1, wzr - movz w2, #0x48 - bl memset - ldp x29, x30, [sp], #16 - ret -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L - - .globl mcl_fp_mulUnitPre1L - .align 2 - .type mcl_fp_mulUnitPre1L,@function -mcl_fp_mulUnitPre1L: // @mcl_fp_mulUnitPre1L -// BB#0: - ldr x8, [x1] - mul x9, x8, x2 - umulh x8, x8, x2 - stp x9, x8, [x0] - ret -.Lfunc_end5: - .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L - - .globl mcl_fpDbl_mulPre1L - .align 2 - .type mcl_fpDbl_mulPre1L,@function -mcl_fpDbl_mulPre1L: // @mcl_fpDbl_mulPre1L -// BB#0: - ldr x8, [x1] - ldr x9, [x2] - mul x10, x9, x8 - umulh x8, x9, x8 - stp x10, x8, [x0] - ret -.Lfunc_end6: - .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L - - .globl mcl_fpDbl_sqrPre1L - .align 2 - .type mcl_fpDbl_sqrPre1L,@function -mcl_fpDbl_sqrPre1L: // @mcl_fpDbl_sqrPre1L -// BB#0: - ldr x8, [x1] - mul x9, x8, x8 - umulh x8, x8, x8 - stp x9, x8, [x0] - ret -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L - - .globl mcl_fp_mont1L - .align 2 - .type mcl_fp_mont1L,@function -mcl_fp_mont1L: // @mcl_fp_mont1L -// BB#0: - ldr x8, [x2] - ldr x9, [x1] - ldur x10, [x3, #-8] - ldr x11, [x3] - umulh x12, x9, x8 - mul x8, x9, x8 - mul x9, x8, x10 - umulh x10, x9, x11 - mul x9, x9, x11 - cmn x9, x8 - adcs x8, x10, x12 - adcs x9, xzr, xzr - subs x10, x8, x11 - sbcs x9, x9, xzr - tst x9, #0x1 - csel x8, x8, x10, ne - str x8, [x0] - ret -.Lfunc_end8: - .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L - - .globl mcl_fp_montNF1L - .align 2 - .type mcl_fp_montNF1L,@function -mcl_fp_montNF1L: // @mcl_fp_montNF1L -// BB#0: - ldr x8, [x2] - ldr x9, [x1] - ldur x10, [x3, #-8] - ldr x11, [x3] - umulh x12, x9, x8 - mul x8, x9, x8 - mul x9, x8, x10 - umulh x10, x9, x11 - mul x9, x9, x11 - cmn x9, x8 - adcs x8, x10, x12 - sub x9, x8, x11 - cmp x9, #0 // =0 - csel x8, x8, x9, lt - str x8, [x0] - ret -.Lfunc_end9: - .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L - - .globl mcl_fp_montRed1L - .align 2 - .type mcl_fp_montRed1L,@function -mcl_fp_montRed1L: // @mcl_fp_montRed1L -// BB#0: - ldur x8, [x2, #-8] - ldp x9, x11, [x1] - ldr x10, [x2] - mul x8, x9, x8 - umulh x12, x8, x10 - mul x8, x8, x10 - cmn x9, x8 - adcs x8, x11, x12 - adcs x9, xzr, xzr - subs x10, x8, x10 - sbcs x9, x9, xzr - tst x9, #0x1 - csel x8, x8, x10, ne - str x8, [x0] - ret -.Lfunc_end10: - .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L - - .globl mcl_fp_addPre1L - .align 2 - .type mcl_fp_addPre1L,@function -mcl_fp_addPre1L: // @mcl_fp_addPre1L -// BB#0: - ldr x8, [x1] - ldr x9, [x2] - adds x9, x9, x8 - adcs x8, xzr, xzr - str x9, [x0] - mov x0, x8 - ret -.Lfunc_end11: - .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L - - .globl mcl_fp_subPre1L - .align 2 - .type mcl_fp_subPre1L,@function -mcl_fp_subPre1L: // @mcl_fp_subPre1L -// BB#0: - ldr x8, [x2] - ldr x9, [x1] - subs x9, x9, x8 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0] - mov x0, x8 - ret -.Lfunc_end12: - .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L - - .globl mcl_fp_shr1_1L - .align 2 - .type mcl_fp_shr1_1L,@function -mcl_fp_shr1_1L: // @mcl_fp_shr1_1L -// BB#0: - ldr x8, [x1] - lsr x8, x8, #1 - str x8, [x0] - ret -.Lfunc_end13: - .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L - - .globl mcl_fp_add1L - .align 2 - .type mcl_fp_add1L,@function -mcl_fp_add1L: // @mcl_fp_add1L -// BB#0: - ldr x8, [x1] - ldr x9, [x2] - ldr x10, [x3] - adds x8, x9, x8 - str x8, [x0] - adcs x9, xzr, xzr - subs x8, x8, x10 - sbcs x9, x9, xzr - and w9, w9, #0x1 - tbnz w9, #0, .LBB14_2 -// BB#1: // %nocarry - str x8, [x0] -.LBB14_2: // %carry - ret -.Lfunc_end14: - .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L - - .globl mcl_fp_addNF1L - .align 2 - .type mcl_fp_addNF1L,@function -mcl_fp_addNF1L: // @mcl_fp_addNF1L -// BB#0: - ldr x8, [x1] - ldr x9, [x2] - ldr x10, [x3] - add x8, x9, x8 - sub x9, x8, x10 - cmp x9, #0 // =0 - csel x8, x8, x9, lt - str x8, [x0] - ret -.Lfunc_end15: - .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L - - .globl mcl_fp_sub1L - .align 2 - .type mcl_fp_sub1L,@function -mcl_fp_sub1L: // @mcl_fp_sub1L -// BB#0: - ldr x8, [x2] - ldr x9, [x1] - subs x8, x9, x8 - str x8, [x0] - ngcs x9, xzr - and w9, w9, #0x1 - tbnz w9, #0, .LBB16_2 -// BB#1: // %nocarry - ret -.LBB16_2: // %carry - ldr x9, [x3] - add x8, x9, x8 - str x8, [x0] - ret -.Lfunc_end16: - .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L - - .globl mcl_fp_subNF1L - .align 2 - .type mcl_fp_subNF1L,@function -mcl_fp_subNF1L: // @mcl_fp_subNF1L -// BB#0: - ldr x8, [x2] - ldr x9, [x1] - ldr x10, [x3] - sub x8, x9, x8 - and x9, x10, x8, asr #63 - add x8, x9, x8 - str x8, [x0] - ret -.Lfunc_end17: - .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L - - .globl mcl_fpDbl_add1L - .align 2 - .type mcl_fpDbl_add1L,@function -mcl_fpDbl_add1L: // @mcl_fpDbl_add1L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - ldr x12, [x3] - adds x8, x9, x8 - str x8, [x0] - adcs x8, x10, x11 - adcs x9, xzr, xzr - subs x10, x8, x12 - sbcs x9, x9, xzr - tst x9, #0x1 - csel x8, x8, x10, ne - str x8, [x0, #8] - ret -.Lfunc_end18: - .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L - - .globl mcl_fpDbl_sub1L - .align 2 - .type mcl_fpDbl_sub1L,@function -mcl_fpDbl_sub1L: // @mcl_fpDbl_sub1L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - ldr x12, [x3] - subs x8, x8, x9 - str x8, [x0] - sbcs x8, x11, x10 - ngcs x9, xzr - tst x9, #0x1 - csel x9, x12, xzr, ne - add x8, x9, x8 - str x8, [x0, #8] - ret -.Lfunc_end19: - .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L - - .globl mcl_fp_mulUnitPre2L - .align 2 - .type mcl_fp_mulUnitPre2L,@function -mcl_fp_mulUnitPre2L: // @mcl_fp_mulUnitPre2L -// BB#0: - ldp x8, x9, [x1] - mul x10, x8, x2 - mul x11, x9, x2 - umulh x8, x8, x2 - umulh x9, x9, x2 - adds x8, x8, x11 - stp x10, x8, [x0] - adcs x8, x9, xzr - str x8, [x0, #16] - ret -.Lfunc_end20: - .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L - - .globl mcl_fpDbl_mulPre2L - .align 2 - .type mcl_fpDbl_mulPre2L,@function -mcl_fpDbl_mulPre2L: // @mcl_fpDbl_mulPre2L -// BB#0: - ldp x8, x11, [x2] - ldp x9, x10, [x1] - mul x12, x9, x8 - umulh x13, x10, x8 - mul x14, x10, x8 - umulh x8, x9, x8 - mul x15, x9, x11 - mul x16, x10, x11 - umulh x9, x9, x11 - umulh x10, x10, x11 - adds x8, x8, x14 - adcs x11, x13, xzr - adds x8, x8, x15 - stp x12, x8, [x0] - adcs x8, x11, x16 - adcs x11, xzr, xzr - adds x8, x8, x9 - str x8, [x0, #16] - adcs x8, x11, x10 - str x8, [x0, #24] - ret -.Lfunc_end21: - .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L - - .globl mcl_fpDbl_sqrPre2L - .align 2 - .type mcl_fpDbl_sqrPre2L,@function -mcl_fpDbl_sqrPre2L: // @mcl_fpDbl_sqrPre2L -// BB#0: - ldp x8, x9, [x1] - mul x10, x8, x8 - umulh x11, x9, x8 - mul x12, x9, x8 - umulh x8, x8, x8 - umulh x13, x9, x9 - mul x9, x9, x9 - str x10, [x0] - adds x8, x8, x12 - adcs x10, x11, xzr - adds x9, x11, x9 - adcs x11, x13, xzr - adds x8, x12, x8 - str x8, [x0, #8] - adcs x8, x9, x10 - str x8, [x0, #16] - adcs x8, x11, xzr - str x8, [x0, #24] - ret -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L - - .globl mcl_fp_mont2L - .align 2 - .type mcl_fp_mont2L,@function -mcl_fp_mont2L: // @mcl_fp_mont2L -// BB#0: - ldp x8, x14, [x2] - ldp x9, x10, [x1] - ldur x11, [x3, #-8] - ldp x12, x13, [x3] - umulh x15, x10, x8 - mul x16, x10, x8 - umulh x17, x9, x8 - mul x8, x9, x8 - umulh x18, x14, x10 - mul x10, x14, x10 - umulh x1, x14, x9 - mul x9, x14, x9 - adds x14, x17, x16 - mul x16, x8, x11 - adcs x15, x15, xzr - mul x17, x16, x13 - umulh x2, x16, x12 - adds x17, x2, x17 - umulh x2, x16, x13 - mul x16, x16, x12 - adcs x2, x2, xzr - cmn x16, x8 - adcs x8, x17, x14 - adcs x14, x2, x15 - adcs x15, xzr, xzr - adds x10, x1, x10 - adcs x16, x18, xzr - adds x8, x8, x9 - adcs x9, x14, x10 - mul x10, x8, x11 - adcs x11, x15, x16 - umulh x14, x10, x13 - mul x15, x10, x13 - umulh x16, x10, x12 - mul x10, x10, x12 - adcs x17, xzr, xzr - adds x15, x16, x15 - adcs x14, x14, xzr - cmn x10, x8 - adcs x8, x15, x9 - adcs x9, x14, x11 - adcs x10, x17, xzr - subs x11, x8, x12 - sbcs x12, x9, x13 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x8, x8, x11, ne - csel x9, x9, x12, ne - stp x8, x9, [x0] - ret -.Lfunc_end23: - .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L - - .globl mcl_fp_montNF2L - .align 2 - .type mcl_fp_montNF2L,@function -mcl_fp_montNF2L: // @mcl_fp_montNF2L -// BB#0: - ldp x8, x14, [x2] - ldp x9, x10, [x1] - ldur x11, [x3, #-8] - ldp x12, x13, [x3] - umulh x15, x10, x8 - mul x16, x10, x8 - umulh x17, x9, x8 - mul x8, x9, x8 - umulh x18, x14, x10 - mul x10, x14, x10 - umulh x1, x14, x9 - mul x9, x14, x9 - adds x14, x17, x16 - mul x16, x8, x11 - adcs x15, x15, xzr - mul x17, x16, x12 - cmn x17, x8 - mul x8, x16, x13 - umulh x17, x16, x13 - umulh x16, x16, x12 - adcs x8, x8, x14 - adcs x14, x15, xzr - adds x8, x8, x16 - adcs x14, x14, x17 - adds x10, x1, x10 - adcs x15, x18, xzr - adds x8, x9, x8 - adcs x9, x10, x14 - mul x10, x8, x11 - adcs x11, x15, xzr - mul x14, x10, x13 - mul x15, x10, x12 - umulh x16, x10, x13 - umulh x10, x10, x12 - cmn x15, x8 - adcs x8, x14, x9 - adcs x9, x11, xzr - adds x8, x8, x10 - adcs x9, x9, x16 - subs x10, x8, x12 - sbcs x11, x9, x13 - cmp x11, #0 // =0 - csel x8, x8, x10, lt - csel x9, x9, x11, lt - stp x8, x9, [x0] - ret -.Lfunc_end24: - .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L - - .globl mcl_fp_montRed2L - .align 2 - .type mcl_fp_montRed2L,@function -mcl_fp_montRed2L: // @mcl_fp_montRed2L -// BB#0: - ldur x8, [x2, #-8] - ldp x9, x14, [x1] - ldp x10, x11, [x2] - ldp x12, x13, [x1, #16] - mul x15, x9, x8 - mul x16, x15, x11 - umulh x17, x15, x10 - adds x16, x17, x16 - umulh x17, x15, x11 - mul x15, x15, x10 - adcs x17, x17, xzr - cmn x9, x15 - adcs x9, x14, x16 - adcs x12, x12, x17 - mul x8, x9, x8 - adcs x13, x13, xzr - umulh x14, x8, x11 - mul x15, x8, x11 - umulh x16, x8, x10 - mul x8, x8, x10 - adcs x17, xzr, xzr - adds x15, x16, x15 - adcs x14, x14, xzr - cmn x8, x9 - adcs x8, x15, x12 - adcs x9, x14, x13 - adcs x12, x17, xzr - subs x10, x8, x10 - sbcs x11, x9, x11 - sbcs x12, x12, xzr - tst x12, #0x1 - csel x8, x8, x10, ne - csel x9, x9, x11, ne - stp x8, x9, [x0] - ret -.Lfunc_end25: - .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L - - .globl mcl_fp_addPre2L - .align 2 - .type mcl_fp_addPre2L,@function -mcl_fp_addPre2L: // @mcl_fp_addPre2L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - adds x8, x9, x8 - str x8, [x0] - adcs x9, x10, x11 - adcs x8, xzr, xzr - str x9, [x0, #8] - mov x0, x8 - ret -.Lfunc_end26: - .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L - - .globl mcl_fp_subPre2L - .align 2 - .type mcl_fp_subPre2L,@function -mcl_fp_subPre2L: // @mcl_fp_subPre2L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - subs x8, x8, x9 - str x8, [x0] - sbcs x9, x11, x10 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0, #8] - mov x0, x8 - ret -.Lfunc_end27: - .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L - - .globl mcl_fp_shr1_2L - .align 2 - .type mcl_fp_shr1_2L,@function -mcl_fp_shr1_2L: // @mcl_fp_shr1_2L -// BB#0: - ldp x8, x9, [x1] - extr x8, x9, x8, #1 - lsr x9, x9, #1 - stp x8, x9, [x0] - ret -.Lfunc_end28: - .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L - - .globl mcl_fp_add2L - .align 2 - .type mcl_fp_add2L,@function -mcl_fp_add2L: // @mcl_fp_add2L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - adds x8, x9, x8 - ldp x9, x12, [x3] - adcs x10, x10, x11 - stp x8, x10, [x0] - adcs x11, xzr, xzr - subs x9, x8, x9 - sbcs x8, x10, x12 - sbcs x10, x11, xzr - and w10, w10, #0x1 - tbnz w10, #0, .LBB29_2 -// BB#1: // %nocarry - stp x9, x8, [x0] -.LBB29_2: // %carry - ret -.Lfunc_end29: - .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L - - .globl mcl_fp_addNF2L - .align 2 - .type mcl_fp_addNF2L,@function -mcl_fp_addNF2L: // @mcl_fp_addNF2L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x2] - ldp x12, x13, [x3] - adds x8, x10, x8 - adcs x9, x11, x9 - subs x10, x8, x12 - sbcs x11, x9, x13 - cmp x11, #0 // =0 - csel x8, x8, x10, lt - csel x9, x9, x11, lt - stp x8, x9, [x0] - ret -.Lfunc_end30: - .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L - - .globl mcl_fp_sub2L - .align 2 - .type mcl_fp_sub2L,@function -mcl_fp_sub2L: // @mcl_fp_sub2L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - subs x9, x8, x9 - sbcs x8, x11, x10 - stp x9, x8, [x0] - ngcs x10, xzr - and w10, w10, #0x1 - tbnz w10, #0, .LBB31_2 -// BB#1: // %nocarry - ret -.LBB31_2: // %carry - ldp x10, x11, [x3] - adds x9, x10, x9 - adcs x8, x11, x8 - stp x9, x8, [x0] - ret -.Lfunc_end31: - .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L - - .globl mcl_fp_subNF2L - .align 2 - .type mcl_fp_subNF2L,@function -mcl_fp_subNF2L: // @mcl_fp_subNF2L -// BB#0: - ldp x8, x11, [x1] - ldp x9, x10, [x2] - subs x8, x8, x9 - ldp x9, x12, [x3] - sbcs x10, x11, x10 - asr x11, x10, #63 - and x9, x11, x9 - and x11, x11, x12 - adds x8, x9, x8 - str x8, [x0] - adcs x8, x11, x10 - str x8, [x0, #8] - ret -.Lfunc_end32: - .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L - - .globl mcl_fpDbl_add2L - .align 2 - .type mcl_fpDbl_add2L,@function -mcl_fpDbl_add2L: // @mcl_fpDbl_add2L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x15, [x1] - ldp x11, x14, [x2] - ldp x12, x13, [x1, #16] - adds x10, x11, x10 - ldp x11, x16, [x3] - str x10, [x0] - adcs x10, x14, x15 - str x10, [x0, #8] - adcs x8, x8, x12 - adcs x9, x9, x13 - adcs x10, xzr, xzr - subs x11, x8, x11 - sbcs x12, x9, x16 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x8, x8, x11, ne - csel x9, x9, x12, ne - stp x8, x9, [x0, #16] - ret -.Lfunc_end33: - .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L - - .globl mcl_fpDbl_sub2L - .align 2 - .type mcl_fpDbl_sub2L,@function -mcl_fpDbl_sub2L: // @mcl_fpDbl_sub2L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x14, [x2] - ldp x11, x15, [x1] - ldp x12, x13, [x1, #16] - subs x10, x11, x10 - ldp x11, x16, [x3] - str x10, [x0] - sbcs x10, x15, x14 - str x10, [x0, #8] - sbcs x8, x12, x8 - sbcs x9, x13, x9 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x16, xzr, ne - csel x11, x11, xzr, ne - adds x8, x11, x8 - str x8, [x0, #16] - adcs x8, x10, x9 - str x8, [x0, #24] - ret -.Lfunc_end34: - .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L - - .globl mcl_fp_mulUnitPre3L - .align 2 - .type mcl_fp_mulUnitPre3L,@function -mcl_fp_mulUnitPre3L: // @mcl_fp_mulUnitPre3L -// BB#0: - ldp x8, x9, [x1] - ldr x10, [x1, #16] - mul x11, x8, x2 - mul x12, x9, x2 - umulh x8, x8, x2 - mul x13, x10, x2 - umulh x9, x9, x2 - umulh x10, x10, x2 - adds x8, x8, x12 - stp x11, x8, [x0] - adcs x8, x9, x13 - str x8, [x0, #16] - adcs x8, x10, xzr - str x8, [x0, #24] - ret -.Lfunc_end35: - .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L - - .globl mcl_fpDbl_mulPre3L - .align 2 - .type mcl_fpDbl_mulPre3L,@function -mcl_fpDbl_mulPre3L: // @mcl_fpDbl_mulPre3L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x8, x9, [x1] - ldp x10, x12, [x2] - ldr x11, [x1, #16] - ldr x13, [x2, #16] - mul x14, x8, x10 - umulh x15, x11, x10 - mul x16, x11, x10 - umulh x17, x9, x10 - mul x18, x9, x10 - umulh x10, x8, x10 - mul x1, x8, x12 - mul x2, x11, x12 - mul x3, x9, x12 - umulh x4, x11, x12 - umulh x5, x9, x12 - umulh x12, x8, x12 - mul x6, x8, x13 - mul x7, x11, x13 - mul x19, x9, x13 - umulh x8, x8, x13 - umulh x9, x9, x13 - umulh x11, x11, x13 - str x14, [x0] - adds x10, x10, x18 - adcs x13, x17, x16 - adcs x14, x15, xzr - adds x10, x10, x1 - str x10, [x0, #8] - adcs x10, x13, x3 - adcs x13, x14, x2 - adcs x14, xzr, xzr - adds x10, x10, x12 - adcs x12, x13, x5 - adcs x13, x14, x4 - adds x10, x10, x6 - str x10, [x0, #16] - adcs x10, x12, x19 - adcs x12, x13, x7 - adcs x13, xzr, xzr - adds x8, x10, x8 - str x8, [x0, #24] - adcs x8, x12, x9 - str x8, [x0, #32] - adcs x8, x13, x11 - str x8, [x0, #40] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end36: - .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L - - .globl mcl_fpDbl_sqrPre3L - .align 2 - .type mcl_fpDbl_sqrPre3L,@function -mcl_fpDbl_sqrPre3L: // @mcl_fpDbl_sqrPre3L -// BB#0: - ldp x8, x10, [x1] - ldr x9, [x1, #16] - mul x11, x8, x8 - umulh x12, x9, x8 - mul x13, x9, x8 - umulh x14, x10, x8 - mul x15, x10, x8 - umulh x8, x8, x8 - mul x16, x9, x10 - str x11, [x0] - adds x8, x8, x15 - adcs x11, x14, x13 - adcs x17, x12, xzr - adds x8, x8, x15 - mul x15, x10, x10 - str x8, [x0, #8] - umulh x8, x9, x10 - umulh x10, x10, x10 - adcs x11, x11, x15 - adcs x15, x17, x16 - adcs x17, xzr, xzr - adds x11, x11, x14 - umulh x14, x9, x9 - mul x9, x9, x9 - adcs x10, x15, x10 - adcs x15, x17, x8 - adds x12, x12, x16 - adcs x8, x8, x9 - adcs x9, x14, xzr - adds x11, x13, x11 - adcs x10, x12, x10 - stp x11, x10, [x0, #16] - adcs x8, x8, x15 - str x8, [x0, #32] - adcs x8, x9, xzr - str x8, [x0, #40] - ret -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L - - .globl mcl_fp_mont3L - .align 2 - .type mcl_fp_mont3L,@function -mcl_fp_mont3L: // @mcl_fp_mont3L -// BB#0: - stp x24, x23, [sp, #-48]! - stp x22, x21, [sp, #16] - stp x20, x19, [sp, #32] - ldp x15, x16, [x2] - ldp x13, x14, [x1, #8] - ldr x12, [x1] - ldur x11, [x3, #-8] - ldp x9, x8, [x3, #8] - ldr x10, [x3] - ldr x17, [x2, #16] - umulh x18, x14, x15 - mul x1, x14, x15 - umulh x2, x13, x15 - mul x3, x13, x15 - umulh x4, x12, x15 - mul x15, x12, x15 - umulh x5, x16, x14 - mul x6, x16, x14 - umulh x7, x16, x13 - mul x19, x16, x13 - umulh x20, x16, x12 - mul x16, x16, x12 - umulh x21, x17, x14 - mul x14, x17, x14 - adds x3, x4, x3 - mul x4, x15, x11 - adcs x1, x2, x1 - mul x2, x4, x8 - mul x22, x4, x9 - umulh x23, x4, x10 - adcs x18, x18, xzr - adds x22, x23, x22 - umulh x23, x4, x9 - adcs x2, x23, x2 - umulh x23, x4, x8 - mul x4, x4, x10 - adcs x23, x23, xzr - cmn x4, x15 - umulh x15, x17, x13 - mul x13, x17, x13 - umulh x4, x17, x12 - mul x12, x17, x12 - adcs x17, x22, x3 - adcs x1, x2, x1 - adcs x18, x23, x18 - adcs x2, xzr, xzr - adds x3, x20, x19 - adcs x6, x7, x6 - adcs x5, x5, xzr - adds x16, x17, x16 - adcs x17, x1, x3 - mul x1, x16, x11 - adcs x18, x18, x6 - mul x3, x1, x8 - mul x6, x1, x9 - umulh x7, x1, x10 - adcs x2, x2, x5 - adcs x5, xzr, xzr - adds x6, x7, x6 - umulh x7, x1, x9 - adcs x3, x7, x3 - umulh x7, x1, x8 - mul x1, x1, x10 - adcs x7, x7, xzr - cmn x1, x16 - adcs x16, x6, x17 - adcs x17, x3, x18 - adcs x18, x7, x2 - adcs x1, x5, xzr - adds x13, x4, x13 - adcs x14, x15, x14 - adcs x15, x21, xzr - adds x12, x16, x12 - adcs x13, x17, x13 - mul x11, x12, x11 - adcs x14, x18, x14 - umulh x16, x11, x8 - mul x17, x11, x8 - umulh x18, x11, x9 - mul x2, x11, x9 - umulh x3, x11, x10 - mul x11, x11, x10 - adcs x15, x1, x15 - adcs x1, xzr, xzr - adds x2, x3, x2 - adcs x17, x18, x17 - adcs x16, x16, xzr - cmn x11, x12 - adcs x11, x2, x13 - adcs x12, x17, x14 - adcs x13, x16, x15 - adcs x14, x1, xzr - subs x10, x11, x10 - sbcs x9, x12, x9 - sbcs x8, x13, x8 - sbcs x14, x14, xzr - tst x14, #0x1 - csel x10, x11, x10, ne - csel x9, x12, x9, ne - csel x8, x13, x8, ne - stp x10, x9, [x0] - str x8, [x0, #16] - ldp x20, x19, [sp, #32] - ldp x22, x21, [sp, #16] - ldp x24, x23, [sp], #48 - ret -.Lfunc_end38: - .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L - - .globl mcl_fp_montNF3L - .align 2 - .type mcl_fp_montNF3L,@function -mcl_fp_montNF3L: // @mcl_fp_montNF3L -// BB#0: - stp x22, x21, [sp, #-32]! - stp x20, x19, [sp, #16] - ldp x14, x16, [x2] - ldp x15, x13, [x1, #8] - ldr x12, [x1] - ldur x11, [x3, #-8] - ldp x9, x8, [x3, #8] - ldr x10, [x3] - ldr x17, [x2, #16] - umulh x18, x13, x14 - mul x1, x13, x14 - umulh x2, x15, x14 - mul x3, x15, x14 - umulh x4, x12, x14 - mul x14, x12, x14 - umulh x5, x16, x13 - mul x6, x16, x13 - umulh x7, x16, x15 - mul x19, x16, x15 - umulh x20, x16, x12 - mul x16, x16, x12 - umulh x21, x17, x13 - mul x13, x17, x13 - adds x3, x4, x3 - mul x4, x14, x11 - adcs x1, x2, x1 - mul x2, x4, x10 - adcs x18, x18, xzr - cmn x2, x14 - umulh x14, x17, x15 - mul x15, x17, x15 - umulh x2, x17, x12 - mul x12, x17, x12 - mul x17, x4, x9 - adcs x17, x17, x3 - mul x3, x4, x8 - adcs x1, x3, x1 - umulh x3, x4, x10 - adcs x18, x18, xzr - adds x17, x17, x3 - umulh x3, x4, x9 - adcs x1, x1, x3 - umulh x3, x4, x8 - adcs x18, x18, x3 - adds x3, x20, x19 - adcs x4, x7, x6 - adcs x5, x5, xzr - adds x16, x16, x17 - adcs x17, x3, x1 - mul x1, x16, x11 - adcs x18, x4, x18 - mul x3, x1, x8 - mul x4, x1, x10 - adcs x5, x5, xzr - cmn x4, x16 - mul x16, x1, x9 - umulh x4, x1, x8 - adcs x16, x16, x17 - umulh x17, x1, x9 - umulh x1, x1, x10 - adcs x18, x3, x18 - adcs x3, x5, xzr - adds x16, x16, x1 - adcs x17, x18, x17 - adcs x18, x3, x4 - adds x15, x2, x15 - adcs x13, x14, x13 - adcs x14, x21, xzr - adds x12, x12, x16 - adcs x15, x15, x17 - mul x11, x12, x11 - adcs x13, x13, x18 - mul x16, x11, x8 - mul x17, x11, x9 - mul x18, x11, x10 - umulh x1, x11, x8 - umulh x2, x11, x9 - umulh x11, x11, x10 - adcs x14, x14, xzr - cmn x18, x12 - adcs x12, x17, x15 - adcs x13, x16, x13 - adcs x14, x14, xzr - adds x11, x12, x11 - adcs x12, x13, x2 - adcs x13, x14, x1 - subs x10, x11, x10 - sbcs x9, x12, x9 - sbcs x8, x13, x8 - asr x14, x8, #63 - cmp x14, #0 // =0 - csel x10, x11, x10, lt - csel x9, x12, x9, lt - csel x8, x13, x8, lt - stp x10, x9, [x0] - str x8, [x0, #16] - ldp x20, x19, [sp, #16] - ldp x22, x21, [sp], #32 - ret -.Lfunc_end39: - .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L - - .globl mcl_fp_montRed3L - .align 2 - .type mcl_fp_montRed3L,@function -mcl_fp_montRed3L: // @mcl_fp_montRed3L -// BB#0: - ldur x8, [x2, #-8] - ldp x9, x17, [x1] - ldp x12, x10, [x2, #8] - ldr x11, [x2] - ldp x13, x14, [x1, #32] - ldp x15, x16, [x1, #16] - mul x18, x9, x8 - umulh x1, x18, x10 - mul x2, x18, x10 - umulh x3, x18, x12 - mul x4, x18, x12 - umulh x5, x18, x11 - mul x18, x18, x11 - adds x4, x5, x4 - adcs x2, x3, x2 - adcs x1, x1, xzr - cmn x9, x18 - adcs x9, x17, x4 - adcs x15, x15, x2 - mul x17, x9, x8 - adcs x16, x16, x1 - umulh x18, x17, x10 - mul x1, x17, x10 - umulh x2, x17, x12 - mul x3, x17, x12 - umulh x4, x17, x11 - mul x17, x17, x11 - adcs x13, x13, xzr - adcs x14, x14, xzr - adcs x5, xzr, xzr - adds x3, x4, x3 - adcs x1, x2, x1 - adcs x18, x18, xzr - cmn x17, x9 - adcs x9, x3, x15 - adcs x15, x1, x16 - mul x8, x9, x8 - adcs x13, x18, x13 - umulh x16, x8, x10 - mul x17, x8, x10 - umulh x18, x8, x12 - mul x1, x8, x12 - umulh x2, x8, x11 - mul x8, x8, x11 - adcs x14, x14, xzr - adcs x3, x5, xzr - adds x1, x2, x1 - adcs x17, x18, x17 - adcs x16, x16, xzr - cmn x8, x9 - adcs x8, x1, x15 - adcs x9, x17, x13 - adcs x13, x16, x14 - adcs x14, x3, xzr - subs x11, x8, x11 - sbcs x12, x9, x12 - sbcs x10, x13, x10 - sbcs x14, x14, xzr - tst x14, #0x1 - csel x8, x8, x11, ne - csel x9, x9, x12, ne - csel x10, x13, x10, ne - stp x8, x9, [x0] - str x10, [x0, #16] - ret -.Lfunc_end40: - .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L - - .globl mcl_fp_addPre3L - .align 2 - .type mcl_fp_addPre3L,@function -mcl_fp_addPre3L: // @mcl_fp_addPre3L -// BB#0: - ldp x11, x8, [x2, #8] - ldp x9, x12, [x1] - ldr x10, [x2] - ldr x13, [x1, #16] - adds x9, x10, x9 - str x9, [x0] - adcs x9, x11, x12 - str x9, [x0, #8] - adcs x9, x8, x13 - adcs x8, xzr, xzr - str x9, [x0, #16] - mov x0, x8 - ret -.Lfunc_end41: - .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L - - .globl mcl_fp_subPre3L - .align 2 - .type mcl_fp_subPre3L,@function -mcl_fp_subPre3L: // @mcl_fp_subPre3L -// BB#0: - ldp x11, x8, [x2, #8] - ldp x9, x12, [x1] - ldr x10, [x2] - ldr x13, [x1, #16] - subs x9, x9, x10 - str x9, [x0] - sbcs x9, x12, x11 - str x9, [x0, #8] - sbcs x9, x13, x8 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0, #16] - mov x0, x8 - ret -.Lfunc_end42: - .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L - - .globl mcl_fp_shr1_3L - .align 2 - .type mcl_fp_shr1_3L,@function -mcl_fp_shr1_3L: // @mcl_fp_shr1_3L -// BB#0: - ldp x8, x9, [x1] - ldr x10, [x1, #16] - extr x8, x9, x8, #1 - extr x9, x10, x9, #1 - lsr x10, x10, #1 - stp x8, x9, [x0] - str x10, [x0, #16] - ret -.Lfunc_end43: - .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L - - .globl mcl_fp_add3L - .align 2 - .type mcl_fp_add3L,@function -mcl_fp_add3L: // @mcl_fp_add3L -// BB#0: - ldp x11, x8, [x2, #8] - ldp x9, x12, [x1] - ldr x10, [x2] - ldr x13, [x1, #16] - adds x9, x10, x9 - adcs x11, x11, x12 - ldr x10, [x3] - ldp x12, x14, [x3, #8] - stp x9, x11, [x0] - adcs x8, x8, x13 - str x8, [x0, #16] - adcs x13, xzr, xzr - subs x10, x9, x10 - sbcs x9, x11, x12 - sbcs x8, x8, x14 - sbcs x11, x13, xzr - and w11, w11, #0x1 - tbnz w11, #0, .LBB44_2 -// BB#1: // %nocarry - stp x10, x9, [x0] - str x8, [x0, #16] -.LBB44_2: // %carry - ret -.Lfunc_end44: - .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L - - .globl mcl_fp_addNF3L - .align 2 - .type mcl_fp_addNF3L,@function -mcl_fp_addNF3L: // @mcl_fp_addNF3L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x2] - ldr x12, [x1, #16] - ldr x13, [x2, #16] - adds x8, x10, x8 - adcs x9, x11, x9 - ldp x10, x11, [x3] - ldr x14, [x3, #16] - adcs x12, x13, x12 - subs x10, x8, x10 - sbcs x11, x9, x11 - sbcs x13, x12, x14 - asr x14, x13, #63 - cmp x14, #0 // =0 - csel x8, x8, x10, lt - csel x9, x9, x11, lt - csel x10, x12, x13, lt - stp x8, x9, [x0] - str x10, [x0, #16] - ret -.Lfunc_end45: - .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L - - .globl mcl_fp_sub3L - .align 2 - .type mcl_fp_sub3L,@function -mcl_fp_sub3L: // @mcl_fp_sub3L -// BB#0: - ldp x11, x10, [x2, #8] - ldp x8, x12, [x1] - ldr x9, [x2] - ldr x13, [x1, #16] - subs x8, x8, x9 - sbcs x9, x12, x11 - stp x8, x9, [x0] - sbcs x10, x13, x10 - str x10, [x0, #16] - ngcs x11, xzr - and w11, w11, #0x1 - tbnz w11, #0, .LBB46_2 -// BB#1: // %nocarry - ret -.LBB46_2: // %carry - ldp x13, x11, [x3, #8] - ldr x12, [x3] - adds x8, x12, x8 - adcs x9, x13, x9 - adcs x10, x11, x10 - stp x8, x9, [x0] - str x10, [x0, #16] - ret -.Lfunc_end46: - .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L - - .globl mcl_fp_subNF3L - .align 2 - .type mcl_fp_subNF3L,@function -mcl_fp_subNF3L: // @mcl_fp_subNF3L -// BB#0: - ldp x8, x9, [x2] - ldp x10, x11, [x1] - ldr x12, [x2, #16] - ldr x13, [x1, #16] - subs x8, x10, x8 - sbcs x9, x11, x9 - ldp x10, x11, [x3] - ldr x14, [x3, #16] - sbcs x12, x13, x12 - asr x13, x12, #63 - and x11, x13, x11 - and x14, x13, x14 - extr x13, x13, x12, #63 - and x10, x13, x10 - adds x8, x10, x8 - str x8, [x0] - adcs x8, x11, x9 - str x8, [x0, #8] - adcs x8, x14, x12 - str x8, [x0, #16] - ret -.Lfunc_end47: - .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L - - .globl mcl_fpDbl_add3L - .align 2 - .type mcl_fpDbl_add3L,@function -mcl_fpDbl_add3L: // @mcl_fpDbl_add3L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x15, x18, [x2] - ldp x16, x17, [x1, #16] - ldp x14, x1, [x1] - adds x14, x15, x14 - ldr x15, [x3, #16] - str x14, [x0] - ldp x14, x2, [x3] - adcs x18, x18, x1 - adcs x12, x12, x16 - stp x18, x12, [x0, #8] - adcs x12, x13, x17 - adcs x8, x8, x10 - adcs x9, x9, x11 - adcs x10, xzr, xzr - subs x11, x12, x14 - sbcs x13, x8, x2 - sbcs x14, x9, x15 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x10, x12, x11, ne - csel x8, x8, x13, ne - csel x9, x9, x14, ne - stp x10, x8, [x0, #24] - str x9, [x0, #40] - ret -.Lfunc_end48: - .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L - - .globl mcl_fpDbl_sub3L - .align 2 - .type mcl_fpDbl_sub3L,@function -mcl_fpDbl_sub3L: // @mcl_fpDbl_sub3L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x14, x18, [x2] - ldp x16, x17, [x1, #16] - ldp x15, x1, [x1] - subs x14, x15, x14 - ldr x15, [x3, #16] - str x14, [x0] - ldp x14, x2, [x3] - sbcs x18, x1, x18 - sbcs x12, x16, x12 - stp x18, x12, [x0, #8] - sbcs x12, x17, x13 - sbcs x8, x10, x8 - sbcs x9, x11, x9 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x15, xzr, ne - csel x11, x2, xzr, ne - csel x13, x14, xzr, ne - adds x12, x13, x12 - adcs x8, x11, x8 - stp x12, x8, [x0, #24] - adcs x8, x10, x9 - str x8, [x0, #40] - ret -.Lfunc_end49: - .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L - - .globl mcl_fp_mulUnitPre4L - .align 2 - .type mcl_fp_mulUnitPre4L,@function -mcl_fp_mulUnitPre4L: // @mcl_fp_mulUnitPre4L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #16] - mul x12, x8, x2 - mul x13, x9, x2 - umulh x8, x8, x2 - mul x14, x10, x2 - umulh x9, x9, x2 - mul x15, x11, x2 - umulh x10, x10, x2 - umulh x11, x11, x2 - adds x8, x8, x13 - stp x12, x8, [x0] - adcs x8, x9, x14 - str x8, [x0, #16] - adcs x8, x10, x15 - str x8, [x0, #24] - adcs x8, x11, xzr - str x8, [x0, #32] - ret -.Lfunc_end50: - .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L - - .globl mcl_fpDbl_mulPre4L - .align 2 - .type mcl_fpDbl_mulPre4L,@function -mcl_fpDbl_mulPre4L: // @mcl_fpDbl_mulPre4L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #32 // =32 - ldp x8, x10, [x1] - ldp x9, x11, [x1] - ldp x12, x14, [x1, #16] - ldp x13, x1, [x1, #16] - ldp x15, x16, [x2] - ldp x17, x18, [x2, #16] - mul x2, x8, x15 - umulh x3, x14, x15 - mul x4, x14, x15 - umulh x5, x12, x15 - mul x6, x12, x15 - umulh x7, x10, x15 - mul x19, x10, x15 - umulh x15, x8, x15 - mul x20, x8, x16 - mul x21, x14, x16 - mul x22, x12, x16 - mul x23, x10, x16 - umulh x24, x14, x16 - umulh x25, x12, x16 - umulh x26, x10, x16 - umulh x16, x8, x16 - mul x27, x8, x17 - mul x28, x14, x17 - mul x29, x12, x17 - mul x30, x10, x17 - umulh x14, x14, x17 - stp x3, x14, [sp, #16] - umulh x12, x12, x17 - str x12, [sp, #8] // 8-byte Folded Spill - umulh x3, x10, x17 - umulh x14, x8, x17 - mul x17, x9, x18 - umulh x12, x9, x18 - mul x10, x11, x18 - umulh x11, x11, x18 - mul x9, x13, x18 - umulh x13, x13, x18 - mul x8, x1, x18 - umulh x18, x1, x18 - str x2, [x0] - adds x15, x15, x19 - adcs x1, x7, x6 - adcs x2, x5, x4 - ldr x4, [sp, #16] // 8-byte Folded Reload - adcs x4, x4, xzr - adds x15, x20, x15 - str x15, [x0, #8] - adcs x15, x23, x1 - adcs x1, x22, x2 - adcs x2, x21, x4 - adcs x4, xzr, xzr - adds x15, x15, x16 - adcs x16, x1, x26 - adcs x1, x2, x25 - adcs x2, x4, x24 - adds x15, x15, x27 - str x15, [x0, #16] - adcs x15, x16, x30 - adcs x16, x1, x29 - adcs x1, x2, x28 - adcs x2, xzr, xzr - adds x14, x15, x14 - adcs x15, x16, x3 - ldr x16, [sp, #8] // 8-byte Folded Reload - adcs x16, x1, x16 - ldr x1, [sp, #24] // 8-byte Folded Reload - adcs x1, x2, x1 - adds x14, x14, x17 - str x14, [x0, #24] - adcs x10, x15, x10 - adcs x9, x16, x9 - adcs x8, x1, x8 - adcs x14, xzr, xzr - adds x10, x10, x12 - adcs x9, x9, x11 - stp x10, x9, [x0, #32] - adcs x8, x8, x13 - str x8, [x0, #48] - adcs x8, x14, x18 - str x8, [x0, #56] - add sp, sp, #32 // =32 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end51: - .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L - - .globl mcl_fpDbl_sqrPre4L - .align 2 - .type mcl_fpDbl_sqrPre4L,@function -mcl_fpDbl_sqrPre4L: // @mcl_fpDbl_sqrPre4L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x13, [x1] - ldp x11, x12, [x1, #16] - ldr x14, [x1, #16] - mul x15, x10, x10 - umulh x16, x12, x10 - mul x17, x12, x10 - umulh x18, x14, x10 - mul x2, x14, x10 - umulh x3, x9, x10 - mul x4, x9, x10 - umulh x10, x10, x10 - str x15, [x0] - adds x10, x10, x4 - adcs x15, x3, x2 - adcs x17, x18, x17 - adcs x16, x16, xzr - adds x10, x10, x4 - mul x4, x12, x9 - str x10, [x0, #8] - mul x10, x9, x9 - adcs x10, x15, x10 - mul x15, x14, x9 - adcs x17, x17, x15 - adcs x16, x16, x4 - adcs x4, xzr, xzr - adds x10, x10, x3 - umulh x3, x9, x9 - adcs x17, x17, x3 - umulh x3, x12, x9 - umulh x9, x14, x9 - adcs x16, x16, x9 - adcs x3, x4, x3 - ldr x1, [x1, #24] - adds x10, x10, x2 - mul x2, x12, x14 - str x10, [x0, #16] - mul x10, x14, x14 - umulh x12, x12, x14 - umulh x14, x14, x14 - adcs x15, x17, x15 - mul x17, x8, x1 - adcs x10, x16, x10 - mul x16, x11, x1 - adcs x2, x3, x2 - adcs x3, xzr, xzr - adds x15, x15, x18 - mul x18, x13, x1 - adcs x9, x10, x9 - mul x10, x1, x1 - umulh x8, x8, x1 - umulh x13, x13, x1 - umulh x11, x11, x1 - umulh x1, x1, x1 - adcs x14, x2, x14 - adcs x12, x3, x12 - adds x15, x15, x17 - adcs x9, x9, x18 - adcs x14, x14, x16 - adcs x10, x12, x10 - adcs x12, xzr, xzr - adds x8, x9, x8 - stp x15, x8, [x0, #24] - adcs x8, x14, x13 - str x8, [x0, #40] - adcs x8, x10, x11 - str x8, [x0, #48] - adcs x8, x12, x1 - str x8, [x0, #56] - ret -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L - - .globl mcl_fp_mont4L - .align 2 - .type mcl_fp_mont4L,@function -mcl_fp_mont4L: // @mcl_fp_mont4L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #16 // =16 - str x0, [sp, #8] // 8-byte Folded Spill - ldp x13, x16, [x1, #16] - ldp x14, x15, [x1] - ldur x0, [x3, #-8] - ldp x9, x8, [x3, #16] - ldp x11, x10, [x3] - ldp x17, x18, [x2] - ldp x1, x2, [x2, #16] - umulh x3, x16, x17 - mul x4, x16, x17 - umulh x5, x13, x17 - mul x6, x13, x17 - umulh x7, x15, x17 - mul x19, x15, x17 - umulh x20, x14, x17 - mul x17, x14, x17 - umulh x21, x18, x16 - mul x22, x18, x16 - umulh x23, x18, x13 - mul x24, x18, x13 - umulh x25, x18, x15 - mul x26, x18, x15 - umulh x27, x18, x14 - mul x18, x18, x14 - umulh x28, x1, x16 - adds x19, x20, x19 - mul x20, x17, x0 - adcs x6, x7, x6 - mul x7, x20, x8 - mul x29, x20, x9 - mul x30, x20, x10 - adcs x4, x5, x4 - umulh x5, x20, x11 - adcs x3, x3, xzr - adds x5, x5, x30 - umulh x30, x20, x10 - adcs x29, x30, x29 - umulh x30, x20, x9 - adcs x7, x30, x7 - umulh x30, x20, x8 - mul x20, x20, x11 - adcs x30, x30, xzr - cmn x20, x17 - mul x17, x1, x16 - umulh x20, x1, x13 - adcs x5, x5, x19 - mul x19, x1, x13 - adcs x6, x29, x6 - umulh x29, x1, x15 - adcs x4, x7, x4 - mul x7, x1, x15 - adcs x3, x30, x3 - adcs x30, xzr, xzr - adds x26, x27, x26 - umulh x27, x1, x14 - mul x1, x1, x14 - adcs x24, x25, x24 - umulh x25, x2, x16 - mul x16, x2, x16 - adcs x22, x23, x22 - adcs x21, x21, xzr - adds x18, x5, x18 - adcs x5, x6, x26 - mul x6, x18, x0 - adcs x4, x4, x24 - mul x23, x6, x8 - mul x24, x6, x9 - mul x26, x6, x10 - adcs x3, x3, x22 - umulh x22, x6, x11 - adcs x21, x30, x21 - adcs x30, xzr, xzr - adds x22, x22, x26 - umulh x26, x6, x10 - adcs x24, x26, x24 - umulh x26, x6, x9 - adcs x23, x26, x23 - umulh x26, x6, x8 - mul x6, x6, x11 - adcs x26, x26, xzr - cmn x6, x18 - umulh x18, x2, x13 - mul x13, x2, x13 - umulh x6, x2, x15 - mul x15, x2, x15 - umulh x12, x2, x14 - mul x14, x2, x14 - adcs x2, x22, x5 - adcs x4, x24, x4 - adcs x3, x23, x3 - adcs x5, x26, x21 - adcs x21, x30, xzr - adds x7, x27, x7 - adcs x19, x29, x19 - adcs x17, x20, x17 - adcs x20, x28, xzr - adds x1, x2, x1 - adcs x2, x4, x7 - mul x4, x1, x0 - adcs x3, x3, x19 - mul x7, x4, x8 - mul x19, x4, x9 - mul x22, x4, x10 - adcs x17, x5, x17 - umulh x5, x4, x11 - adcs x20, x21, x20 - adcs x21, xzr, xzr - adds x5, x5, x22 - umulh x22, x4, x10 - adcs x19, x22, x19 - umulh x22, x4, x9 - adcs x7, x22, x7 - umulh x22, x4, x8 - mul x4, x4, x11 - adcs x22, x22, xzr - cmn x4, x1 - adcs x1, x5, x2 - adcs x2, x19, x3 - adcs x17, x7, x17 - adcs x3, x22, x20 - adcs x4, x21, xzr - adds x12, x12, x15 - adcs x13, x6, x13 - adcs x15, x18, x16 - adcs x16, x25, xzr - adds x14, x1, x14 - adcs x12, x2, x12 - mul x18, x14, x0 - adcs x13, x17, x13 - umulh x17, x18, x8 - mul x0, x18, x8 - umulh x1, x18, x9 - mul x2, x18, x9 - umulh x5, x18, x10 - mul x6, x18, x10 - umulh x7, x18, x11 - mul x18, x18, x11 - adcs x15, x3, x15 - adcs x16, x4, x16 - adcs x3, xzr, xzr - adds x4, x7, x6 - adcs x2, x5, x2 - adcs x0, x1, x0 - adcs x17, x17, xzr - cmn x18, x14 - adcs x12, x4, x12 - adcs x13, x2, x13 - adcs x14, x0, x15 - adcs x15, x17, x16 - adcs x16, x3, xzr - subs x11, x12, x11 - sbcs x10, x13, x10 - sbcs x9, x14, x9 - sbcs x8, x15, x8 - sbcs x16, x16, xzr - tst x16, #0x1 - csel x11, x12, x11, ne - csel x10, x13, x10, ne - csel x9, x14, x9, ne - csel x8, x15, x8, ne - ldr x12, [sp, #8] // 8-byte Folded Reload - stp x11, x10, [x12] - stp x9, x8, [x12, #16] - add sp, sp, #16 // =16 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end53: - .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L - - .globl mcl_fp_montNF4L - .align 2 - .type mcl_fp_montNF4L,@function -mcl_fp_montNF4L: // @mcl_fp_montNF4L -// BB#0: - stp x28, x27, [sp, #-80]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - ldp x14, x15, [x1, #16] - ldp x13, x16, [x1] - ldur x12, [x3, #-8] - ldp x9, x8, [x3, #16] - ldp x11, x10, [x3] - ldp x17, x18, [x2] - ldp x1, x2, [x2, #16] - umulh x3, x15, x17 - mul x4, x15, x17 - umulh x5, x14, x17 - mul x6, x14, x17 - umulh x7, x16, x17 - mul x19, x16, x17 - umulh x20, x13, x17 - mul x17, x13, x17 - umulh x21, x18, x15 - mul x22, x18, x15 - umulh x23, x18, x14 - mul x24, x18, x14 - umulh x25, x18, x16 - mul x26, x18, x16 - umulh x27, x18, x13 - mul x18, x18, x13 - adds x19, x20, x19 - umulh x20, x1, x15 - adcs x6, x7, x6 - mul x7, x17, x12 - adcs x4, x5, x4 - mul x5, x7, x11 - adcs x3, x3, xzr - cmn x5, x17 - mul x17, x1, x15 - mul x5, x7, x10 - adcs x5, x5, x19 - mul x19, x7, x9 - adcs x6, x19, x6 - mul x19, x7, x8 - adcs x4, x19, x4 - umulh x19, x7, x11 - adcs x3, x3, xzr - adds x5, x5, x19 - umulh x19, x7, x10 - adcs x6, x6, x19 - umulh x19, x7, x9 - adcs x4, x4, x19 - umulh x19, x1, x14 - umulh x7, x7, x8 - adcs x3, x3, x7 - mul x7, x1, x14 - adds x26, x27, x26 - umulh x27, x1, x16 - adcs x24, x25, x24 - mul x25, x1, x16 - adcs x22, x23, x22 - umulh x23, x1, x13 - mul x1, x1, x13 - adcs x21, x21, xzr - adds x18, x18, x5 - umulh x5, x2, x15 - mul x15, x2, x15 - adcs x6, x26, x6 - umulh x26, x2, x14 - mul x14, x2, x14 - adcs x4, x24, x4 - mul x24, x18, x12 - adcs x3, x22, x3 - mul x22, x24, x11 - adcs x21, x21, xzr - cmn x22, x18 - umulh x18, x2, x16 - mul x16, x2, x16 - umulh x22, x2, x13 - mul x13, x2, x13 - mul x2, x24, x10 - adcs x2, x2, x6 - mul x6, x24, x9 - adcs x4, x6, x4 - mul x6, x24, x8 - adcs x3, x6, x3 - umulh x6, x24, x11 - adcs x21, x21, xzr - adds x2, x2, x6 - umulh x6, x24, x10 - adcs x4, x4, x6 - umulh x6, x24, x9 - adcs x3, x3, x6 - umulh x6, x24, x8 - adcs x6, x21, x6 - adds x21, x23, x25 - adcs x7, x27, x7 - adcs x17, x19, x17 - adcs x19, x20, xzr - adds x1, x1, x2 - adcs x2, x21, x4 - mul x4, x1, x12 - adcs x3, x7, x3 - mul x7, x4, x8 - mul x20, x4, x9 - adcs x17, x17, x6 - mul x6, x4, x11 - adcs x19, x19, xzr - cmn x6, x1 - mul x1, x4, x10 - umulh x6, x4, x8 - adcs x1, x1, x2 - umulh x2, x4, x9 - adcs x3, x20, x3 - umulh x20, x4, x10 - umulh x4, x4, x11 - adcs x17, x7, x17 - adcs x7, x19, xzr - adds x1, x1, x4 - adcs x3, x3, x20 - adcs x17, x17, x2 - adcs x2, x7, x6 - adds x16, x22, x16 - adcs x14, x18, x14 - adcs x15, x26, x15 - adcs x18, x5, xzr - adds x13, x13, x1 - adcs x16, x16, x3 - mul x12, x13, x12 - adcs x14, x14, x17 - mul x17, x12, x8 - mul x1, x12, x9 - mul x3, x12, x10 - mul x4, x12, x11 - umulh x5, x12, x8 - umulh x6, x12, x9 - umulh x7, x12, x10 - umulh x12, x12, x11 - adcs x15, x15, x2 - adcs x18, x18, xzr - cmn x4, x13 - adcs x13, x3, x16 - adcs x14, x1, x14 - adcs x15, x17, x15 - adcs x16, x18, xzr - adds x12, x13, x12 - adcs x13, x14, x7 - adcs x14, x15, x6 - adcs x15, x16, x5 - subs x11, x12, x11 - sbcs x10, x13, x10 - sbcs x9, x14, x9 - sbcs x8, x15, x8 - cmp x8, #0 // =0 - csel x11, x12, x11, lt - csel x10, x13, x10, lt - csel x9, x14, x9, lt - csel x8, x15, x8, lt - stp x11, x10, [x0] - stp x9, x8, [x0, #16] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #80 - ret -.Lfunc_end54: - .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L - - .globl mcl_fp_montRed4L - .align 2 - .type mcl_fp_montRed4L,@function -mcl_fp_montRed4L: // @mcl_fp_montRed4L -// BB#0: - stp x22, x21, [sp, #-32]! - stp x20, x19, [sp, #16] - ldur x12, [x2, #-8] - ldp x9, x8, [x2, #16] - ldp x11, x10, [x2] - ldp x14, x15, [x1, #48] - ldp x16, x17, [x1, #32] - ldp x18, x2, [x1, #16] - ldp x13, x1, [x1] - mul x3, x13, x12 - umulh x4, x3, x8 - mul x5, x3, x8 - umulh x6, x3, x9 - mul x7, x3, x9 - umulh x19, x3, x10 - mul x20, x3, x10 - umulh x21, x3, x11 - mul x3, x3, x11 - adds x20, x21, x20 - adcs x7, x19, x7 - adcs x5, x6, x5 - adcs x4, x4, xzr - cmn x13, x3 - adcs x13, x1, x20 - adcs x18, x18, x7 - mul x1, x13, x12 - adcs x2, x2, x5 - umulh x3, x1, x8 - mul x5, x1, x8 - umulh x6, x1, x9 - mul x7, x1, x9 - umulh x19, x1, x10 - mul x20, x1, x10 - umulh x21, x1, x11 - mul x1, x1, x11 - adcs x16, x16, x4 - adcs x17, x17, xzr - adcs x14, x14, xzr - adcs x15, x15, xzr - adcs x4, xzr, xzr - adds x20, x21, x20 - adcs x7, x19, x7 - adcs x5, x6, x5 - adcs x3, x3, xzr - cmn x1, x13 - adcs x13, x20, x18 - adcs x18, x7, x2 - mul x1, x13, x12 - adcs x16, x5, x16 - umulh x2, x1, x8 - mul x5, x1, x8 - umulh x6, x1, x9 - mul x7, x1, x9 - umulh x19, x1, x10 - mul x20, x1, x10 - umulh x21, x1, x11 - mul x1, x1, x11 - adcs x17, x3, x17 - adcs x14, x14, xzr - adcs x15, x15, xzr - adcs x3, x4, xzr - adds x4, x21, x20 - adcs x7, x19, x7 - adcs x5, x6, x5 - adcs x2, x2, xzr - cmn x1, x13 - adcs x13, x4, x18 - adcs x16, x7, x16 - mul x12, x13, x12 - adcs x17, x5, x17 - umulh x18, x12, x8 - mul x1, x12, x8 - umulh x4, x12, x9 - mul x5, x12, x9 - umulh x6, x12, x10 - mul x7, x12, x10 - umulh x19, x12, x11 - mul x12, x12, x11 - adcs x14, x2, x14 - adcs x15, x15, xzr - adcs x2, x3, xzr - adds x3, x19, x7 - adcs x5, x6, x5 - adcs x1, x4, x1 - adcs x18, x18, xzr - cmn x12, x13 - adcs x12, x3, x16 - adcs x13, x5, x17 - adcs x14, x1, x14 - adcs x15, x18, x15 - adcs x16, x2, xzr - subs x11, x12, x11 - sbcs x10, x13, x10 - sbcs x9, x14, x9 - sbcs x8, x15, x8 - sbcs x16, x16, xzr - tst x16, #0x1 - csel x11, x12, x11, ne - csel x10, x13, x10, ne - csel x9, x14, x9, ne - csel x8, x15, x8, ne - stp x11, x10, [x0] - stp x9, x8, [x0, #16] - ldp x20, x19, [sp, #16] - ldp x22, x21, [sp], #32 - ret -.Lfunc_end55: - .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L - - .globl mcl_fp_addPre4L - .align 2 - .type mcl_fp_addPre4L,@function -mcl_fp_addPre4L: // @mcl_fp_addPre4L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x11, [x2] - ldp x12, x13, [x1] - ldp x14, x15, [x1, #16] - adds x10, x10, x12 - str x10, [x0] - adcs x10, x11, x13 - adcs x8, x8, x14 - stp x10, x8, [x0, #8] - adcs x9, x9, x15 - adcs x8, xzr, xzr - str x9, [x0, #24] - mov x0, x8 - ret -.Lfunc_end56: - .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L - - .globl mcl_fp_subPre4L - .align 2 - .type mcl_fp_subPre4L,@function -mcl_fp_subPre4L: // @mcl_fp_subPre4L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x11, [x2] - ldp x12, x13, [x1] - ldp x14, x15, [x1, #16] - subs x10, x12, x10 - str x10, [x0] - sbcs x10, x13, x11 - sbcs x8, x14, x8 - stp x10, x8, [x0, #8] - sbcs x9, x15, x9 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0, #24] - mov x0, x8 - ret -.Lfunc_end57: - .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L - - .globl mcl_fp_shr1_4L - .align 2 - .type mcl_fp_shr1_4L,@function -mcl_fp_shr1_4L: // @mcl_fp_shr1_4L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #16] - extr x8, x9, x8, #1 - extr x9, x10, x9, #1 - extr x10, x11, x10, #1 - lsr x11, x11, #1 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - ret -.Lfunc_end58: - .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L - - .globl mcl_fp_add4L - .align 2 - .type mcl_fp_add4L,@function -mcl_fp_add4L: // @mcl_fp_add4L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x11, [x2] - ldp x12, x13, [x1] - ldp x14, x15, [x1, #16] - adds x10, x10, x12 - adcs x12, x11, x13 - ldp x11, x13, [x3] - stp x10, x12, [x0] - adcs x8, x8, x14 - adcs x14, x9, x15 - stp x8, x14, [x0, #16] - adcs x15, xzr, xzr - ldp x9, x16, [x3, #16] - subs x11, x10, x11 - sbcs x10, x12, x13 - sbcs x9, x8, x9 - sbcs x8, x14, x16 - sbcs x12, x15, xzr - and w12, w12, #0x1 - tbnz w12, #0, .LBB59_2 -// BB#1: // %nocarry - stp x11, x10, [x0] - stp x9, x8, [x0, #16] -.LBB59_2: // %carry - ret -.Lfunc_end59: - .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L - - .globl mcl_fp_addNF4L - .align 2 - .type mcl_fp_addNF4L,@function -mcl_fp_addNF4L: // @mcl_fp_addNF4L -// BB#0: - ldp x8, x9, [x1, #16] - ldp x10, x11, [x1] - ldp x12, x13, [x2] - ldp x14, x15, [x2, #16] - adds x10, x12, x10 - adcs x11, x13, x11 - ldp x12, x13, [x3] - adcs x8, x14, x8 - ldp x14, x16, [x3, #16] - adcs x9, x15, x9 - subs x12, x10, x12 - sbcs x13, x11, x13 - sbcs x14, x8, x14 - sbcs x15, x9, x16 - cmp x15, #0 // =0 - csel x10, x10, x12, lt - csel x11, x11, x13, lt - csel x8, x8, x14, lt - csel x9, x9, x15, lt - stp x10, x11, [x0] - stp x8, x9, [x0, #16] - ret -.Lfunc_end60: - .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L - - .globl mcl_fp_sub4L - .align 2 - .type mcl_fp_sub4L,@function -mcl_fp_sub4L: // @mcl_fp_sub4L -// BB#0: - ldp x10, x11, [x2, #16] - ldp x8, x9, [x2] - ldp x12, x13, [x1] - ldp x14, x15, [x1, #16] - subs x8, x12, x8 - sbcs x9, x13, x9 - stp x8, x9, [x0] - sbcs x10, x14, x10 - sbcs x11, x15, x11 - stp x10, x11, [x0, #16] - ngcs x12, xzr - and w12, w12, #0x1 - tbnz w12, #0, .LBB61_2 -// BB#1: // %nocarry - ret -.LBB61_2: // %carry - ldp x12, x13, [x3, #16] - ldp x14, x15, [x3] - adds x8, x14, x8 - adcs x9, x15, x9 - adcs x10, x12, x10 - adcs x11, x13, x11 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - ret -.Lfunc_end61: - .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L - - .globl mcl_fp_subNF4L - .align 2 - .type mcl_fp_subNF4L,@function -mcl_fp_subNF4L: // @mcl_fp_subNF4L -// BB#0: - ldp x8, x9, [x2, #16] - ldp x10, x11, [x2] - ldp x12, x13, [x1] - ldp x14, x15, [x1, #16] - subs x10, x12, x10 - sbcs x11, x13, x11 - ldp x12, x13, [x3, #16] - sbcs x8, x14, x8 - ldp x14, x16, [x3] - sbcs x9, x15, x9 - asr x15, x9, #63 - and x14, x15, x14 - and x16, x15, x16 - and x12, x15, x12 - and x13, x15, x13 - adds x10, x14, x10 - str x10, [x0] - adcs x10, x16, x11 - adcs x8, x12, x8 - stp x10, x8, [x0, #8] - adcs x8, x13, x9 - str x8, [x0, #24] - ret -.Lfunc_end62: - .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L - - .globl mcl_fpDbl_add4L - .align 2 - .type mcl_fpDbl_add4L,@function -mcl_fpDbl_add4L: // @mcl_fpDbl_add4L -// BB#0: - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x4, x2, [x2] - ldp x5, x6, [x1, #16] - ldp x18, x1, [x1] - adds x18, x4, x18 - str x18, [x0] - ldp x18, x4, [x3, #16] - adcs x1, x2, x1 - ldp x2, x3, [x3] - adcs x16, x16, x5 - stp x1, x16, [x0, #8] - adcs x16, x17, x6 - str x16, [x0, #24] - adcs x12, x12, x14 - adcs x13, x13, x15 - adcs x8, x8, x10 - adcs x9, x9, x11 - adcs x10, xzr, xzr - subs x11, x12, x2 - sbcs x14, x13, x3 - sbcs x15, x8, x18 - sbcs x16, x9, x4 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x10, x12, x11, ne - csel x11, x13, x14, ne - csel x8, x8, x15, ne - csel x9, x9, x16, ne - stp x10, x11, [x0, #32] - stp x8, x9, [x0, #48] - ret -.Lfunc_end63: - .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L - - .globl mcl_fpDbl_sub4L - .align 2 - .type mcl_fpDbl_sub4L,@function -mcl_fpDbl_sub4L: // @mcl_fpDbl_sub4L -// BB#0: - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x18, x2, [x2] - ldp x5, x6, [x1, #16] - ldp x4, x1, [x1] - subs x18, x4, x18 - str x18, [x0] - ldp x18, x4, [x3, #16] - sbcs x1, x1, x2 - ldp x2, x3, [x3] - sbcs x16, x5, x16 - stp x1, x16, [x0, #8] - sbcs x16, x6, x17 - sbcs x12, x14, x12 - sbcs x13, x15, x13 - sbcs x8, x10, x8 - sbcs x9, x11, x9 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x4, xzr, ne - csel x11, x18, xzr, ne - csel x14, x3, xzr, ne - csel x15, x2, xzr, ne - adds x12, x15, x12 - stp x16, x12, [x0, #24] - adcs x12, x14, x13 - adcs x8, x11, x8 - stp x12, x8, [x0, #40] - adcs x8, x10, x9 - str x8, [x0, #56] - ret -.Lfunc_end64: - .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L - - .globl mcl_fp_mulUnitPre5L - .align 2 - .type mcl_fp_mulUnitPre5L,@function -mcl_fp_mulUnitPre5L: // @mcl_fp_mulUnitPre5L -// BB#0: - ldp x12, x8, [x1, #24] - ldp x9, x10, [x1] - ldr x11, [x1, #16] - mul x13, x9, x2 - mul x14, x10, x2 - umulh x9, x9, x2 - mul x15, x11, x2 - umulh x10, x10, x2 - mul x16, x12, x2 - umulh x11, x11, x2 - mul x17, x8, x2 - umulh x12, x12, x2 - umulh x8, x8, x2 - adds x9, x9, x14 - stp x13, x9, [x0] - adcs x9, x10, x15 - str x9, [x0, #16] - adcs x9, x11, x16 - str x9, [x0, #24] - adcs x9, x12, x17 - adcs x8, x8, xzr - stp x9, x8, [x0, #32] - ret -.Lfunc_end65: - .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L - - .globl mcl_fpDbl_mulPre5L - .align 2 - .type mcl_fpDbl_mulPre5L,@function -mcl_fpDbl_mulPre5L: // @mcl_fpDbl_mulPre5L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #176 // =176 - ldp x8, x10, [x1] - ldp x9, x15, [x1] - ldp x11, x12, [x1, #24] - ldp x13, x14, [x2] - ldp x16, x18, [x1, #16] - ldr x17, [x1, #16] - ldr x3, [x1, #32] - ldp x4, x5, [x2, #16] - mul x6, x8, x13 - str x6, [sp, #72] // 8-byte Folded Spill - umulh x6, x12, x13 - str x6, [sp, #168] // 8-byte Folded Spill - mul x6, x12, x13 - str x6, [sp, #152] // 8-byte Folded Spill - umulh x6, x11, x13 - str x6, [sp, #112] // 8-byte Folded Spill - mul x6, x11, x13 - str x6, [sp, #64] // 8-byte Folded Spill - umulh x6, x17, x13 - mul x23, x17, x13 - umulh x24, x10, x13 - mul x25, x10, x13 - umulh x7, x8, x13 - mul x26, x8, x14 - mul x13, x12, x14 - str x13, [sp, #104] // 8-byte Folded Spill - mul x13, x11, x14 - stp x13, x6, [sp, #40] - mul x29, x17, x14 - mul x30, x10, x14 - umulh x12, x12, x14 - umulh x11, x11, x14 - str x11, [sp, #96] // 8-byte Folded Spill - umulh x11, x17, x14 - umulh x27, x10, x14 - umulh x20, x8, x14 - mul x8, x9, x4 - stp x8, x11, [sp, #24] - mul x8, x3, x4 - stp x8, x12, [sp, #136] - mul x8, x18, x4 - str x8, [sp, #88] // 8-byte Folded Spill - mul x8, x16, x4 - str x8, [sp, #16] // 8-byte Folded Spill - mul x28, x15, x4 - umulh x8, x3, x4 - str x8, [sp, #160] // 8-byte Folded Spill - umulh x8, x18, x4 - str x8, [sp, #128] // 8-byte Folded Spill - umulh x8, x16, x4 - str x8, [sp, #80] // 8-byte Folded Spill - umulh x8, x15, x4 - str x8, [sp, #8] // 8-byte Folded Spill - umulh x22, x9, x4 - mul x8, x3, x5 - str x8, [sp, #120] // 8-byte Folded Spill - umulh x8, x3, x5 - str x8, [sp, #56] // 8-byte Folded Spill - mul x6, x18, x5 - umulh x21, x18, x5 - mul x3, x16, x5 - umulh x19, x16, x5 - mul x17, x15, x5 - umulh x4, x15, x5 - mul x16, x9, x5 - umulh x18, x9, x5 - ldr x2, [x2, #32] - ldp x10, x5, [x1, #16] - ldp x8, x9, [x1] - ldr x1, [x1, #32] - mul x15, x8, x2 - umulh x14, x8, x2 - mul x12, x9, x2 - umulh x13, x9, x2 - mul x11, x10, x2 - umulh x10, x10, x2 - mul x9, x5, x2 - umulh x5, x5, x2 - mul x8, x1, x2 - umulh x1, x1, x2 - ldr x2, [sp, #72] // 8-byte Folded Reload - str x2, [x0] - adds x2, x7, x25 - adcs x7, x24, x23 - ldr x23, [sp, #64] // 8-byte Folded Reload - ldr x24, [sp, #48] // 8-byte Folded Reload - adcs x23, x24, x23 - ldr x24, [sp, #152] // 8-byte Folded Reload - ldr x25, [sp, #112] // 8-byte Folded Reload - adcs x24, x25, x24 - ldr x25, [sp, #168] // 8-byte Folded Reload - adcs x25, x25, xzr - adds x2, x26, x2 - str x2, [x0, #8] - adcs x2, x30, x7 - adcs x7, x29, x23 - ldr x23, [sp, #40] // 8-byte Folded Reload - adcs x23, x23, x24 - ldr x24, [sp, #104] // 8-byte Folded Reload - adcs x24, x24, x25 - adcs x25, xzr, xzr - adds x2, x2, x20 - adcs x7, x7, x27 - ldr x20, [sp, #32] // 8-byte Folded Reload - adcs x20, x23, x20 - ldr x23, [sp, #96] // 8-byte Folded Reload - adcs x23, x24, x23 - ldr x24, [sp, #144] // 8-byte Folded Reload - adcs x24, x25, x24 - ldr x25, [sp, #24] // 8-byte Folded Reload - adds x2, x25, x2 - str x2, [x0, #16] - adcs x2, x28, x7 - ldr x7, [sp, #16] // 8-byte Folded Reload - adcs x7, x7, x20 - ldr x20, [sp, #88] // 8-byte Folded Reload - adcs x20, x20, x23 - ldr x23, [sp, #136] // 8-byte Folded Reload - adcs x23, x23, x24 - adcs x24, xzr, xzr - adds x2, x2, x22 - ldr x22, [sp, #8] // 8-byte Folded Reload - adcs x7, x7, x22 - ldr x22, [sp, #80] // 8-byte Folded Reload - adcs x20, x20, x22 - ldr x22, [sp, #128] // 8-byte Folded Reload - adcs x22, x23, x22 - ldr x23, [sp, #160] // 8-byte Folded Reload - adcs x23, x24, x23 - adds x16, x16, x2 - str x16, [x0, #24] - adcs x16, x17, x7 - adcs x17, x3, x20 - adcs x2, x6, x22 - ldr x3, [sp, #120] // 8-byte Folded Reload - adcs x3, x3, x23 - adcs x6, xzr, xzr - adds x16, x16, x18 - adcs x17, x17, x4 - adcs x18, x2, x19 - adcs x2, x3, x21 - ldr x3, [sp, #56] // 8-byte Folded Reload - adcs x3, x6, x3 - adds x15, x15, x16 - str x15, [x0, #32] - adcs x12, x12, x17 - adcs x11, x11, x18 - adcs x9, x9, x2 - adcs x8, x8, x3 - adcs x15, xzr, xzr - adds x12, x12, x14 - adcs x11, x11, x13 - stp x12, x11, [x0, #40] - adcs x9, x9, x10 - adcs x8, x8, x5 - stp x9, x8, [x0, #56] - adcs x8, x15, x1 - str x8, [x0, #72] - add sp, sp, #176 // =176 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end66: - .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L - - .globl mcl_fpDbl_sqrPre5L - .align 2 - .type mcl_fpDbl_sqrPre5L,@function -mcl_fpDbl_sqrPre5L: // @mcl_fpDbl_sqrPre5L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #16] - ldp x12, x15, [x1] - ldp x13, x14, [x1, #24] - ldr x16, [x1, #16] - mul x17, x12, x12 - mul x18, x14, x12 - mul x2, x11, x12 - umulh x3, x16, x12 - mul x4, x16, x12 - umulh x5, x9, x12 - mul x6, x9, x12 - str x17, [x0] - umulh x17, x12, x12 - adds x17, x17, x6 - adcs x4, x5, x4 - adcs x2, x3, x2 - umulh x3, x11, x12 - adcs x18, x3, x18 - umulh x12, x14, x12 - adcs x12, x12, xzr - adds x17, x6, x17 - ldr x3, [x1] - str x17, [x0, #8] - mul x17, x9, x9 - adcs x17, x17, x4 - mul x4, x16, x9 - adcs x2, x4, x2 - mul x4, x11, x9 - adcs x18, x4, x18 - mul x4, x14, x9 - adcs x12, x4, x12 - adcs x4, xzr, xzr - adds x17, x17, x5 - umulh x5, x9, x9 - adcs x2, x2, x5 - umulh x5, x16, x9 - adcs x18, x18, x5 - ldr x5, [x1, #8] - umulh x11, x11, x9 - adcs x11, x12, x11 - ldr x12, [x1, #24] - umulh x9, x14, x9 - adcs x9, x4, x9 - mul x4, x3, x16 - adds x17, x4, x17 - mul x4, x14, x16 - str x17, [x0, #16] - mul x17, x5, x16 - adcs x17, x17, x2 - mul x2, x16, x16 - adcs x18, x2, x18 - mul x2, x12, x16 - adcs x11, x2, x11 - umulh x2, x3, x16 - adcs x9, x4, x9 - adcs x4, xzr, xzr - adds x17, x17, x2 - umulh x2, x5, x16 - adcs x18, x18, x2 - umulh x2, x16, x16 - adcs x11, x11, x2 - umulh x14, x14, x16 - umulh x16, x12, x16 - adcs x9, x9, x16 - ldr x16, [x1, #32] - adcs x14, x4, x14 - mul x1, x3, x12 - adds x17, x1, x17 - mul x1, x16, x12 - str x17, [x0, #24] - mul x17, x5, x12 - adcs x17, x17, x18 - mul x18, x10, x12 - adcs x11, x18, x11 - mul x18, x12, x12 - adcs x9, x18, x9 - umulh x18, x16, x12 - umulh x2, x3, x12 - adcs x14, x1, x14 - adcs x1, xzr, xzr - adds x17, x17, x2 - umulh x2, x10, x12 - umulh x3, x5, x12 - umulh x12, x12, x12 - adcs x11, x11, x3 - mul x3, x8, x16 - adcs x9, x9, x2 - mul x2, x13, x16 - adcs x12, x14, x12 - mul x14, x10, x16 - adcs x18, x1, x18 - mul x1, x15, x16 - adds x17, x17, x3 - mul x3, x16, x16 - umulh x8, x8, x16 - umulh x15, x15, x16 - umulh x10, x10, x16 - umulh x13, x13, x16 - umulh x16, x16, x16 - str x17, [x0, #32] - adcs x11, x11, x1 - adcs x9, x9, x14 - adcs x12, x12, x2 - adcs x14, x18, x3 - adcs x17, xzr, xzr - adds x8, x11, x8 - str x8, [x0, #40] - adcs x8, x9, x15 - str x8, [x0, #48] - adcs x8, x12, x10 - str x8, [x0, #56] - adcs x8, x14, x13 - str x8, [x0, #64] - adcs x8, x17, x16 - str x8, [x0, #72] - ret -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L - - .globl mcl_fp_mont5L - .align 2 - .type mcl_fp_mont5L,@function -mcl_fp_mont5L: // @mcl_fp_mont5L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #80 // =80 - str x0, [sp, #72] // 8-byte Folded Spill - ldp x16, x10, [x1, #24] - ldp x18, x0, [x1, #8] - ldr x17, [x1] - ldur x9, [x3, #-8] - str x9, [sp, #16] // 8-byte Folded Spill - ldp x11, x8, [x3, #24] - ldp x14, x12, [x3, #8] - ldr x13, [x3] - ldp x3, x1, [x2] - ldp x4, x5, [x2, #16] - ldr x2, [x2, #32] - umulh x6, x10, x3 - mul x7, x10, x3 - umulh x19, x16, x3 - mul x20, x16, x3 - umulh x21, x0, x3 - mul x22, x0, x3 - umulh x23, x18, x3 - mul x24, x18, x3 - umulh x25, x17, x3 - mul x3, x17, x3 - umulh x26, x1, x10 - mul x27, x1, x10 - umulh x28, x1, x16 - adds x24, x25, x24 - mul x25, x3, x9 - adcs x22, x23, x22 - mul x23, x25, x8 - mul x29, x25, x11 - mul x30, x25, x12 - adcs x20, x21, x20 - mul x21, x25, x14 - adcs x7, x19, x7 - umulh x19, x25, x13 - adcs x6, x6, xzr - adds x19, x19, x21 - umulh x21, x25, x14 - adcs x21, x21, x30 - umulh x30, x25, x12 - adcs x29, x30, x29 - umulh x30, x25, x11 - adcs x23, x30, x23 - umulh x30, x25, x8 - mul x25, x25, x13 - adcs x30, x30, xzr - cmn x25, x3 - mul x3, x1, x16 - umulh x25, x1, x0 - adcs x19, x19, x24 - mul x24, x1, x0 - adcs x21, x21, x22 - umulh x22, x1, x18 - adcs x20, x29, x20 - mul x29, x1, x18 - adcs x7, x23, x7 - umulh x23, x1, x17 - mul x1, x1, x17 - adcs x6, x30, x6 - adcs x30, xzr, xzr - adds x23, x23, x29 - umulh x29, x4, x10 - adcs x22, x22, x24 - mul x24, x4, x10 - adcs x3, x25, x3 - umulh x25, x4, x16 - adcs x27, x28, x27 - adcs x26, x26, xzr - adds x1, x19, x1 - adcs x19, x21, x23 - mul x21, x1, x9 - adcs x20, x20, x22 - mul x22, x21, x8 - mul x23, x21, x11 - mul x28, x21, x12 - adcs x3, x7, x3 - mul x7, x21, x14 - adcs x6, x6, x27 - umulh x27, x21, x13 - adcs x26, x30, x26 - adcs x30, xzr, xzr - adds x7, x27, x7 - umulh x27, x21, x14 - adcs x27, x27, x28 - umulh x28, x21, x12 - adcs x23, x28, x23 - umulh x28, x21, x11 - adcs x22, x28, x22 - umulh x28, x21, x8 - mul x21, x21, x13 - adcs x28, x28, xzr - cmn x21, x1 - mul x1, x4, x16 - umulh x21, x4, x0 - adcs x7, x7, x19 - mul x19, x4, x0 - adcs x20, x27, x20 - umulh x27, x4, x18 - adcs x3, x23, x3 - mul x23, x4, x18 - adcs x6, x22, x6 - umulh x22, x4, x17 - mul x4, x4, x17 - adcs x26, x28, x26 - umulh x15, x5, x10 - str x15, [sp, #64] // 8-byte Folded Spill - adcs x30, x30, xzr - adds x22, x22, x23 - mul x15, x5, x10 - str x15, [sp, #56] // 8-byte Folded Spill - adcs x19, x27, x19 - umulh x15, x5, x16 - str x15, [sp, #40] // 8-byte Folded Spill - adcs x1, x21, x1 - mul x15, x5, x16 - str x15, [sp, #32] // 8-byte Folded Spill - adcs x24, x25, x24 - adcs x25, x29, xzr - adds x4, x7, x4 - adcs x7, x20, x22 - mul x20, x4, x9 - adcs x3, x3, x19 - mul x19, x20, x8 - mul x22, x20, x11 - mov x15, x12 - mul x29, x20, x15 - adcs x1, x6, x1 - mov x21, x14 - mul x6, x20, x21 - adcs x24, x26, x24 - mov x9, x13 - umulh x26, x20, x9 - adcs x25, x30, x25 - adcs x30, xzr, xzr - adds x6, x26, x6 - umulh x26, x20, x21 - adcs x26, x26, x29 - umulh x29, x20, x15 - adcs x22, x29, x22 - umulh x29, x20, x11 - mov x13, x11 - adcs x19, x29, x19 - umulh x29, x20, x8 - mov x12, x8 - mul x20, x20, x9 - mov x14, x9 - adcs x29, x29, xzr - cmn x20, x4 - umulh x4, x5, x0 - mul x20, x5, x0 - umulh x11, x5, x18 - mul x9, x5, x18 - umulh x8, x5, x17 - mul x5, x5, x17 - umulh x23, x2, x10 - str x23, [sp, #48] // 8-byte Folded Spill - mul x10, x2, x10 - str x10, [sp, #24] // 8-byte Folded Spill - umulh x10, x2, x16 - str x10, [sp, #8] // 8-byte Folded Spill - mul x28, x2, x16 - umulh x27, x2, x0 - mul x23, x2, x0 - umulh x16, x2, x18 - mul x18, x2, x18 - umulh x0, x2, x17 - mul x17, x2, x17 - adcs x2, x6, x7 - adcs x3, x26, x3 - adcs x1, x22, x1 - adcs x6, x19, x24 - adcs x7, x29, x25 - adcs x19, x30, xzr - adds x8, x8, x9 - adcs x9, x11, x20 - ldr x10, [sp, #32] // 8-byte Folded Reload - adcs x10, x4, x10 - ldr x11, [sp, #56] // 8-byte Folded Reload - ldr x4, [sp, #40] // 8-byte Folded Reload - adcs x4, x4, x11 - ldr x11, [sp, #64] // 8-byte Folded Reload - adcs x20, x11, xzr - adds x2, x2, x5 - adcs x8, x3, x8 - ldr x24, [sp, #16] // 8-byte Folded Reload - mul x3, x2, x24 - adcs x9, x1, x9 - mul x1, x3, x12 - mul x5, x3, x13 - mul x22, x3, x15 - adcs x10, x6, x10 - mul x6, x3, x21 - adcs x4, x7, x4 - umulh x7, x3, x14 - adcs x19, x19, x20 - adcs x20, xzr, xzr - adds x6, x7, x6 - umulh x7, x3, x21 - adcs x7, x7, x22 - umulh x22, x3, x15 - mov x25, x15 - adcs x5, x22, x5 - umulh x22, x3, x13 - adcs x1, x22, x1 - umulh x22, x3, x12 - mul x3, x3, x14 - adcs x22, x22, xzr - cmn x3, x2 - adcs x8, x6, x8 - adcs x9, x7, x9 - adcs x10, x5, x10 - adcs x1, x1, x4 - adcs x2, x22, x19 - adcs x3, x20, xzr - adds x11, x0, x18 - adcs x15, x16, x23 - adcs x16, x27, x28 - ldr x18, [sp, #24] // 8-byte Folded Reload - ldr x0, [sp, #8] // 8-byte Folded Reload - adcs x18, x0, x18 - ldr x0, [sp, #48] // 8-byte Folded Reload - adcs x4, x0, xzr - adds x8, x8, x17 - adcs x9, x9, x11 - mul x11, x8, x24 - adcs x10, x10, x15 - umulh x15, x11, x12 - mul x17, x11, x12 - umulh x5, x11, x13 - mul x6, x11, x13 - mov x0, x13 - mov x20, x25 - umulh x7, x11, x20 - mul x19, x11, x20 - mov x23, x20 - mov x13, x21 - umulh x20, x11, x13 - mul x21, x11, x13 - umulh x22, x11, x14 - mul x11, x11, x14 - adcs x16, x1, x16 - adcs x18, x2, x18 - adcs x1, x3, x4 - adcs x2, xzr, xzr - adds x3, x22, x21 - adcs x4, x20, x19 - adcs x6, x7, x6 - adcs x17, x5, x17 - adcs x15, x15, xzr - cmn x11, x8 - adcs x8, x3, x9 - adcs x9, x4, x10 - adcs x10, x6, x16 - adcs x11, x17, x18 - adcs x15, x15, x1 - adcs x16, x2, xzr - subs x1, x8, x14 - sbcs x13, x9, x13 - sbcs x14, x10, x23 - sbcs x17, x11, x0 - sbcs x18, x15, x12 - sbcs x16, x16, xzr - tst x16, #0x1 - csel x8, x8, x1, ne - csel x9, x9, x13, ne - csel x10, x10, x14, ne - csel x11, x11, x17, ne - csel x12, x15, x18, ne - ldr x13, [sp, #72] // 8-byte Folded Reload - stp x8, x9, [x13] - stp x10, x11, [x13, #16] - str x12, [x13, #32] - add sp, sp, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end68: - .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L - - .globl mcl_fp_montNF5L - .align 2 - .type mcl_fp_montNF5L,@function -mcl_fp_montNF5L: // @mcl_fp_montNF5L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #32 // =32 - str x0, [sp, #24] // 8-byte Folded Spill - ldp x16, x14, [x1, #24] - ldp x18, x15, [x1, #8] - ldr x17, [x1] - ldur x13, [x3, #-8] - ldp x9, x8, [x3, #24] - ldp x11, x10, [x3, #8] - ldr x12, [x3] - ldp x1, x3, [x2] - ldp x4, x5, [x2, #16] - ldr x2, [x2, #32] - umulh x6, x14, x1 - mul x7, x14, x1 - umulh x19, x16, x1 - mul x20, x16, x1 - umulh x21, x15, x1 - mul x22, x15, x1 - umulh x23, x18, x1 - mul x24, x18, x1 - umulh x25, x17, x1 - mul x1, x17, x1 - umulh x26, x3, x14 - mul x27, x3, x14 - umulh x28, x3, x16 - mul x29, x3, x16 - umulh x30, x3, x15 - adds x24, x25, x24 - mul x25, x3, x15 - adcs x22, x23, x22 - umulh x23, x3, x18 - adcs x20, x21, x20 - mul x21, x1, x13 - adcs x7, x19, x7 - mul x19, x21, x12 - adcs x6, x6, xzr - cmn x19, x1 - mul x1, x3, x18 - mul x19, x21, x11 - adcs x19, x19, x24 - mul x24, x21, x10 - adcs x22, x24, x22 - mul x24, x21, x9 - adcs x20, x24, x20 - mul x24, x21, x8 - adcs x7, x24, x7 - umulh x24, x21, x12 - adcs x6, x6, xzr - adds x19, x19, x24 - umulh x24, x21, x11 - adcs x22, x22, x24 - umulh x24, x21, x10 - adcs x20, x20, x24 - umulh x24, x21, x9 - adcs x7, x7, x24 - umulh x24, x3, x17 - mul x3, x3, x17 - umulh x21, x21, x8 - adcs x6, x6, x21 - umulh x21, x4, x14 - adds x1, x24, x1 - mul x24, x4, x14 - adcs x23, x23, x25 - umulh x25, x4, x16 - adcs x29, x30, x29 - mul x30, x4, x16 - adcs x27, x28, x27 - umulh x28, x4, x15 - adcs x26, x26, xzr - adds x3, x3, x19 - mul x19, x4, x15 - adcs x1, x1, x22 - umulh x22, x4, x18 - adcs x20, x23, x20 - mul x23, x4, x18 - adcs x7, x29, x7 - mul x29, x3, x13 - adcs x6, x27, x6 - mul x27, x29, x12 - adcs x26, x26, xzr - cmn x27, x3 - umulh x3, x4, x17 - mul x4, x4, x17 - mul x27, x29, x11 - adcs x1, x27, x1 - mul x27, x29, x10 - adcs x20, x27, x20 - mul x27, x29, x9 - adcs x7, x27, x7 - mul x27, x29, x8 - adcs x6, x27, x6 - umulh x27, x29, x12 - adcs x26, x26, xzr - adds x1, x1, x27 - umulh x27, x29, x11 - adcs x20, x20, x27 - umulh x27, x29, x10 - adcs x7, x7, x27 - umulh x27, x29, x9 - adcs x6, x6, x27 - umulh x27, x5, x14 - umulh x29, x29, x8 - adcs x26, x26, x29 - mul x29, x5, x14 - adds x3, x3, x23 - umulh x23, x5, x16 - adcs x19, x22, x19 - mul x22, x5, x16 - adcs x28, x28, x30 - umulh x30, x5, x15 - adcs x24, x25, x24 - mul x25, x5, x15 - adcs x21, x21, xzr - adds x1, x4, x1 - umulh x4, x5, x18 - adcs x3, x3, x20 - mul x20, x5, x18 - adcs x7, x19, x7 - umulh x19, x5, x17 - mul x5, x5, x17 - adcs x6, x28, x6 - mul x28, x1, x13 - adcs x24, x24, x26 - mul x26, x28, x12 - adcs x21, x21, xzr - cmn x26, x1 - umulh x0, x2, x14 - mul x14, x2, x14 - stp x14, x0, [sp, #8] - umulh x26, x2, x16 - mul x1, x2, x16 - umulh x0, x2, x15 - mul x16, x2, x15 - umulh x15, x2, x18 - mul x18, x2, x18 - umulh x14, x2, x17 - mul x17, x2, x17 - mul x2, x28, x11 - adcs x2, x2, x3 - mul x3, x28, x10 - adcs x3, x3, x7 - mul x7, x28, x9 - adcs x6, x7, x6 - mul x7, x28, x8 - adcs x7, x7, x24 - adcs x21, x21, xzr - umulh x24, x28, x12 - adds x2, x2, x24 - umulh x24, x28, x11 - adcs x3, x3, x24 - umulh x24, x28, x10 - adcs x6, x6, x24 - umulh x24, x28, x9 - adcs x7, x7, x24 - umulh x24, x28, x8 - adcs x21, x21, x24 - adds x19, x19, x20 - adcs x4, x4, x25 - adcs x20, x30, x22 - adcs x22, x23, x29 - adcs x23, x27, xzr - adds x2, x5, x2 - adcs x3, x19, x3 - mov x24, x13 - mul x5, x2, x24 - adcs x4, x4, x6 - mul x6, x5, x8 - mul x19, x5, x9 - adcs x7, x20, x7 - mul x20, x5, x10 - adcs x21, x22, x21 - mul x22, x5, x12 - adcs x23, x23, xzr - cmn x22, x2 - mul x2, x5, x11 - umulh x22, x5, x8 - adcs x2, x2, x3 - umulh x3, x5, x9 - adcs x4, x20, x4 - umulh x20, x5, x10 - adcs x7, x19, x7 - umulh x19, x5, x11 - umulh x5, x5, x12 - adcs x6, x6, x21 - adcs x21, x23, xzr - adds x2, x2, x5 - adcs x4, x4, x19 - adcs x5, x7, x20 - adcs x3, x6, x3 - adcs x6, x21, x22 - adds x13, x14, x18 - adcs x14, x15, x16 - adcs x15, x0, x1 - ldp x16, x18, [sp, #8] - adcs x16, x26, x16 - adcs x18, x18, xzr - adds x17, x17, x2 - adcs x13, x13, x4 - mul x0, x17, x24 - adcs x14, x14, x5 - mul x1, x0, x8 - mul x2, x0, x9 - mul x4, x0, x10 - mul x5, x0, x11 - mul x7, x0, x12 - umulh x19, x0, x8 - umulh x20, x0, x9 - umulh x21, x0, x10 - umulh x22, x0, x11 - umulh x0, x0, x12 - adcs x15, x15, x3 - adcs x16, x16, x6 - adcs x18, x18, xzr - cmn x7, x17 - adcs x13, x5, x13 - adcs x14, x4, x14 - adcs x15, x2, x15 - adcs x16, x1, x16 - adcs x17, x18, xzr - adds x13, x13, x0 - adcs x14, x14, x22 - adcs x15, x15, x21 - adcs x16, x16, x20 - adcs x17, x17, x19 - subs x12, x13, x12 - sbcs x11, x14, x11 - sbcs x10, x15, x10 - sbcs x9, x16, x9 - sbcs x8, x17, x8 - asr x18, x8, #63 - cmp x18, #0 // =0 - csel x12, x13, x12, lt - csel x11, x14, x11, lt - csel x10, x15, x10, lt - csel x9, x16, x9, lt - csel x8, x17, x8, lt - ldr x13, [sp, #24] // 8-byte Folded Reload - stp x12, x11, [x13] - stp x10, x9, [x13, #16] - str x8, [x13, #32] - add sp, sp, #32 // =32 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end69: - .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L - - .globl mcl_fp_montRed5L - .align 2 - .type mcl_fp_montRed5L,@function -mcl_fp_montRed5L: // @mcl_fp_montRed5L -// BB#0: - stp x26, x25, [sp, #-64]! - stp x24, x23, [sp, #16] - stp x22, x21, [sp, #32] - stp x20, x19, [sp, #48] - ldur x13, [x2, #-8] - ldp x9, x8, [x2, #24] - ldp x11, x10, [x2, #8] - ldr x12, [x2] - ldp x15, x16, [x1, #64] - ldp x17, x18, [x1, #48] - ldp x2, x3, [x1, #32] - ldp x4, x5, [x1, #16] - ldp x14, x1, [x1] - mul x6, x14, x13 - umulh x7, x6, x8 - mul x19, x6, x8 - umulh x20, x6, x9 - mul x21, x6, x9 - umulh x22, x6, x10 - mul x23, x6, x10 - umulh x24, x6, x11 - mul x25, x6, x11 - umulh x26, x6, x12 - mul x6, x6, x12 - adds x25, x26, x25 - adcs x23, x24, x23 - adcs x21, x22, x21 - adcs x19, x20, x19 - adcs x7, x7, xzr - cmn x14, x6 - adcs x14, x1, x25 - adcs x1, x4, x23 - mul x4, x14, x13 - adcs x5, x5, x21 - umulh x6, x4, x8 - mul x20, x4, x8 - umulh x21, x4, x9 - mul x22, x4, x9 - umulh x23, x4, x10 - mul x24, x4, x10 - umulh x25, x4, x11 - mul x26, x4, x11 - adcs x2, x2, x19 - umulh x19, x4, x12 - mul x4, x4, x12 - adcs x3, x3, x7 - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x15, x15, xzr - adcs x16, x16, xzr - adcs x7, xzr, xzr - adds x19, x19, x26 - adcs x24, x25, x24 - adcs x22, x23, x22 - adcs x20, x21, x20 - adcs x6, x6, xzr - cmn x4, x14 - adcs x14, x19, x1 - adcs x1, x24, x5 - mul x4, x14, x13 - adcs x2, x22, x2 - umulh x5, x4, x8 - mul x19, x4, x8 - umulh x21, x4, x9 - mul x22, x4, x9 - umulh x23, x4, x10 - mul x24, x4, x10 - umulh x25, x4, x11 - mul x26, x4, x11 - adcs x3, x20, x3 - umulh x20, x4, x12 - mul x4, x4, x12 - adcs x17, x6, x17 - adcs x18, x18, xzr - adcs x15, x15, xzr - adcs x16, x16, xzr - adcs x6, x7, xzr - adds x7, x20, x26 - adcs x20, x25, x24 - adcs x22, x23, x22 - adcs x19, x21, x19 - adcs x5, x5, xzr - cmn x4, x14 - adcs x14, x7, x1 - adcs x1, x20, x2 - mul x2, x14, x13 - adcs x3, x22, x3 - umulh x4, x2, x8 - mul x7, x2, x8 - umulh x20, x2, x9 - mul x21, x2, x9 - umulh x22, x2, x10 - mul x23, x2, x10 - umulh x24, x2, x11 - mul x25, x2, x11 - umulh x26, x2, x12 - mul x2, x2, x12 - adcs x17, x19, x17 - adcs x18, x5, x18 - adcs x15, x15, xzr - adcs x16, x16, xzr - adcs x5, x6, xzr - adds x6, x26, x25 - adcs x19, x24, x23 - adcs x21, x22, x21 - adcs x7, x20, x7 - adcs x4, x4, xzr - cmn x2, x14 - adcs x14, x6, x1 - adcs x1, x19, x3 - mul x13, x14, x13 - adcs x17, x21, x17 - umulh x2, x13, x8 - mul x3, x13, x8 - umulh x6, x13, x9 - mul x19, x13, x9 - umulh x20, x13, x10 - mul x21, x13, x10 - umulh x22, x13, x11 - mul x23, x13, x11 - umulh x24, x13, x12 - mul x13, x13, x12 - adcs x18, x7, x18 - adcs x15, x4, x15 - adcs x16, x16, xzr - adcs x4, x5, xzr - adds x5, x24, x23 - adcs x7, x22, x21 - adcs x19, x20, x19 - adcs x3, x6, x3 - adcs x2, x2, xzr - cmn x13, x14 - adcs x13, x5, x1 - adcs x14, x7, x17 - adcs x17, x19, x18 - adcs x15, x3, x15 - adcs x16, x2, x16 - adcs x18, x4, xzr - subs x12, x13, x12 - sbcs x11, x14, x11 - sbcs x10, x17, x10 - sbcs x9, x15, x9 - sbcs x8, x16, x8 - sbcs x18, x18, xzr - tst x18, #0x1 - csel x12, x13, x12, ne - csel x11, x14, x11, ne - csel x10, x17, x10, ne - csel x9, x15, x9, ne - csel x8, x16, x8, ne - stp x12, x11, [x0] - stp x10, x9, [x0, #16] - str x8, [x0, #32] - ldp x20, x19, [sp, #48] - ldp x22, x21, [sp, #32] - ldp x24, x23, [sp, #16] - ldp x26, x25, [sp], #64 - ret -.Lfunc_end70: - .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L - - .globl mcl_fp_addPre5L - .align 2 - .type mcl_fp_addPre5L,@function -mcl_fp_addPre5L: // @mcl_fp_addPre5L -// BB#0: - ldp x11, x8, [x2, #24] - ldp x17, x9, [x1, #24] - ldp x13, x10, [x2, #8] - ldr x12, [x2] - ldp x14, x15, [x1] - ldr x16, [x1, #16] - adds x12, x12, x14 - str x12, [x0] - adcs x12, x13, x15 - adcs x10, x10, x16 - stp x12, x10, [x0, #8] - adcs x10, x11, x17 - adcs x9, x8, x9 - adcs x8, xzr, xzr - stp x10, x9, [x0, #24] - mov x0, x8 - ret -.Lfunc_end71: - .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L - - .globl mcl_fp_subPre5L - .align 2 - .type mcl_fp_subPre5L,@function -mcl_fp_subPre5L: // @mcl_fp_subPre5L -// BB#0: - ldp x11, x8, [x2, #24] - ldp x17, x9, [x1, #24] - ldp x13, x10, [x2, #8] - ldr x12, [x2] - ldp x14, x15, [x1] - ldr x16, [x1, #16] - subs x12, x14, x12 - str x12, [x0] - sbcs x12, x15, x13 - sbcs x10, x16, x10 - stp x12, x10, [x0, #8] - sbcs x10, x17, x11 - sbcs x9, x9, x8 - ngcs x8, xzr - and x8, x8, #0x1 - stp x10, x9, [x0, #24] - mov x0, x8 - ret -.Lfunc_end72: - .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L - - .globl mcl_fp_shr1_5L - .align 2 - .type mcl_fp_shr1_5L,@function -mcl_fp_shr1_5L: // @mcl_fp_shr1_5L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #16] - ldr x12, [x1, #32] - extr x8, x9, x8, #1 - extr x9, x10, x9, #1 - extr x10, x11, x10, #1 - extr x11, x12, x11, #1 - lsr x12, x12, #1 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - str x12, [x0, #32] - ret -.Lfunc_end73: - .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L - - .globl mcl_fp_add5L - .align 2 - .type mcl_fp_add5L,@function -mcl_fp_add5L: // @mcl_fp_add5L -// BB#0: - ldp x11, x8, [x2, #24] - ldp x17, x9, [x1, #24] - ldp x13, x10, [x2, #8] - ldr x12, [x2] - ldp x14, x15, [x1] - ldr x16, [x1, #16] - adds x12, x12, x14 - ldr x14, [x3, #32] - adcs x13, x13, x15 - adcs x10, x10, x16 - ldp x15, x16, [x3] - stp x12, x13, [x0] - adcs x17, x11, x17 - stp x10, x17, [x0, #16] - adcs x8, x8, x9 - str x8, [x0, #32] - adcs x18, xzr, xzr - ldp x9, x1, [x3, #16] - subs x12, x12, x15 - sbcs x11, x13, x16 - sbcs x10, x10, x9 - sbcs x9, x17, x1 - sbcs x8, x8, x14 - sbcs x13, x18, xzr - and w13, w13, #0x1 - tbnz w13, #0, .LBB74_2 -// BB#1: // %nocarry - stp x12, x11, [x0] - stp x10, x9, [x0, #16] - str x8, [x0, #32] -.LBB74_2: // %carry - ret -.Lfunc_end74: - .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L - - .globl mcl_fp_addNF5L - .align 2 - .type mcl_fp_addNF5L,@function -mcl_fp_addNF5L: // @mcl_fp_addNF5L -// BB#0: - ldp x11, x8, [x1, #24] - ldp x17, x9, [x2, #24] - ldp x13, x10, [x1, #8] - ldr x12, [x1] - ldp x14, x15, [x2] - ldr x16, [x2, #16] - adds x12, x14, x12 - ldp x18, x14, [x3, #24] - adcs x13, x15, x13 - adcs x10, x16, x10 - ldp x15, x16, [x3] - adcs x11, x17, x11 - ldr x17, [x3, #16] - adcs x8, x9, x8 - subs x9, x12, x15 - sbcs x15, x13, x16 - sbcs x16, x10, x17 - sbcs x17, x11, x18 - sbcs x14, x8, x14 - asr x18, x14, #63 - cmp x18, #0 // =0 - csel x9, x12, x9, lt - csel x12, x13, x15, lt - csel x10, x10, x16, lt - csel x11, x11, x17, lt - csel x8, x8, x14, lt - stp x9, x12, [x0] - stp x10, x11, [x0, #16] - str x8, [x0, #32] - ret -.Lfunc_end75: - .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L - - .globl mcl_fp_sub5L - .align 2 - .type mcl_fp_sub5L,@function -mcl_fp_sub5L: // @mcl_fp_sub5L -// BB#0: - ldp x11, x12, [x2, #24] - ldp x17, x13, [x1, #24] - ldp x9, x10, [x2, #8] - ldr x8, [x2] - ldp x14, x15, [x1] - ldr x16, [x1, #16] - subs x8, x14, x8 - sbcs x9, x15, x9 - stp x8, x9, [x0] - sbcs x10, x16, x10 - sbcs x11, x17, x11 - stp x10, x11, [x0, #16] - sbcs x12, x13, x12 - str x12, [x0, #32] - ngcs x13, xzr - and w13, w13, #0x1 - tbnz w13, #0, .LBB76_2 -// BB#1: // %nocarry - ret -.LBB76_2: // %carry - ldp x17, x13, [x3, #24] - ldp x14, x15, [x3] - ldr x16, [x3, #16] - adds x8, x14, x8 - adcs x9, x15, x9 - adcs x10, x16, x10 - adcs x11, x17, x11 - adcs x12, x13, x12 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - str x12, [x0, #32] - ret -.Lfunc_end76: - .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L - - .globl mcl_fp_subNF5L - .align 2 - .type mcl_fp_subNF5L,@function -mcl_fp_subNF5L: // @mcl_fp_subNF5L -// BB#0: - ldp x11, x8, [x2, #24] - ldp x17, x9, [x1, #24] - ldp x13, x10, [x2, #8] - ldr x12, [x2] - ldp x14, x15, [x1] - ldr x16, [x1, #16] - subs x12, x14, x12 - sbcs x13, x15, x13 - ldp x1, x14, [x3, #8] - ldp x15, x18, [x3, #24] - sbcs x10, x16, x10 - ldr x16, [x3] - sbcs x11, x17, x11 - sbcs x8, x9, x8 - asr x9, x8, #63 - extr x17, x9, x8, #63 - and x16, x17, x16 - and x14, x14, x9, ror #63 - and x15, x9, x15 - and x17, x9, x18 - ror x9, x9, #63 - and x9, x9, x1 - adds x12, x16, x12 - adcs x9, x9, x13 - stp x12, x9, [x0] - adcs x9, x14, x10 - str x9, [x0, #16] - adcs x9, x15, x11 - adcs x8, x17, x8 - stp x9, x8, [x0, #24] - ret -.Lfunc_end77: - .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L - - .globl mcl_fpDbl_add5L - .align 2 - .type mcl_fpDbl_add5L,@function -mcl_fpDbl_add5L: // @mcl_fpDbl_add5L -// BB#0: - stp x22, x21, [sp, #-32]! - stp x20, x19, [sp, #16] - ldp x8, x9, [x2, #64] - ldp x10, x11, [x1, #64] - ldp x12, x13, [x2, #48] - ldp x14, x15, [x1, #48] - ldp x16, x17, [x2, #32] - ldp x18, x4, [x1, #32] - ldp x5, x6, [x2, #16] - ldp x19, x2, [x2] - ldp x20, x21, [x1, #16] - ldp x7, x1, [x1] - adds x7, x19, x7 - ldr x19, [x3, #32] - str x7, [x0] - adcs x1, x2, x1 - ldp x2, x7, [x3, #16] - str x1, [x0, #8] - ldp x1, x3, [x3] - adcs x5, x5, x20 - str x5, [x0, #16] - adcs x5, x6, x21 - adcs x16, x16, x18 - stp x5, x16, [x0, #24] - adcs x16, x17, x4 - adcs x12, x12, x14 - adcs x13, x13, x15 - adcs x8, x8, x10 - adcs x9, x9, x11 - adcs x10, xzr, xzr - subs x11, x16, x1 - sbcs x14, x12, x3 - sbcs x15, x13, x2 - sbcs x17, x8, x7 - sbcs x18, x9, x19 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x10, x16, x11, ne - csel x11, x12, x14, ne - csel x12, x13, x15, ne - csel x8, x8, x17, ne - csel x9, x9, x18, ne - stp x10, x11, [x0, #40] - stp x12, x8, [x0, #56] - str x9, [x0, #72] - ldp x20, x19, [sp, #16] - ldp x22, x21, [sp], #32 - ret -.Lfunc_end78: - .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L - - .globl mcl_fpDbl_sub5L - .align 2 - .type mcl_fpDbl_sub5L,@function -mcl_fpDbl_sub5L: // @mcl_fpDbl_sub5L -// BB#0: - stp x22, x21, [sp, #-32]! - stp x20, x19, [sp, #16] - ldp x8, x9, [x2, #64] - ldp x10, x11, [x1, #64] - ldp x12, x13, [x2, #48] - ldp x14, x15, [x1, #48] - ldp x16, x17, [x2, #32] - ldp x18, x4, [x1, #32] - ldp x5, x6, [x2, #16] - ldp x7, x2, [x2] - ldp x20, x21, [x1, #16] - ldp x19, x1, [x1] - subs x7, x19, x7 - ldr x19, [x3, #32] - str x7, [x0] - sbcs x1, x1, x2 - ldp x2, x7, [x3, #16] - str x1, [x0, #8] - ldp x1, x3, [x3] - sbcs x5, x20, x5 - str x5, [x0, #16] - sbcs x5, x21, x6 - sbcs x16, x18, x16 - stp x5, x16, [x0, #24] - sbcs x16, x4, x17 - sbcs x12, x14, x12 - sbcs x13, x15, x13 - sbcs x8, x10, x8 - sbcs x9, x11, x9 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x19, xzr, ne - csel x11, x7, xzr, ne - csel x14, x2, xzr, ne - csel x15, x3, xzr, ne - csel x17, x1, xzr, ne - adds x16, x17, x16 - adcs x12, x15, x12 - stp x16, x12, [x0, #40] - adcs x12, x14, x13 - adcs x8, x11, x8 - stp x12, x8, [x0, #56] - adcs x8, x10, x9 - str x8, [x0, #72] - ldp x20, x19, [sp, #16] - ldp x22, x21, [sp], #32 - ret -.Lfunc_end79: - .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L - - .globl mcl_fp_mulUnitPre6L - .align 2 - .type mcl_fp_mulUnitPre6L,@function -mcl_fp_mulUnitPre6L: // @mcl_fp_mulUnitPre6L -// BB#0: - ldp x8, x9, [x1, #32] - ldp x10, x11, [x1] - ldp x12, x13, [x1, #16] - mul x14, x10, x2 - mul x15, x11, x2 - umulh x10, x10, x2 - mul x16, x12, x2 - umulh x11, x11, x2 - mul x17, x13, x2 - umulh x12, x12, x2 - mul x18, x8, x2 - umulh x13, x13, x2 - mul x1, x9, x2 - umulh x8, x8, x2 - umulh x9, x9, x2 - adds x10, x10, x15 - stp x14, x10, [x0] - adcs x10, x11, x16 - str x10, [x0, #16] - adcs x10, x12, x17 - str x10, [x0, #24] - adcs x10, x13, x18 - adcs x8, x8, x1 - stp x10, x8, [x0, #32] - adcs x8, x9, xzr - str x8, [x0, #48] - ret -.Lfunc_end80: - .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L - - .globl mcl_fpDbl_mulPre6L - .align 2 - .type mcl_fpDbl_mulPre6L,@function -mcl_fpDbl_mulPre6L: // @mcl_fpDbl_mulPre6L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #400 // =400 - ldp x8, x9, [x1] - ldp x11, x13, [x1] - ldp x10, x17, [x1, #16] - ldp x12, x14, [x1, #32] - ldp x15, x16, [x2] - ldr x3, [x1, #32] - mul x30, x8, x15 - umulh x18, x14, x15 - str x18, [sp, #392] // 8-byte Folded Spill - mul x18, x14, x15 - str x18, [sp, #384] // 8-byte Folded Spill - umulh x18, x12, x15 - str x18, [sp, #376] // 8-byte Folded Spill - mul x18, x12, x15 - str x18, [sp, #360] // 8-byte Folded Spill - umulh x18, x17, x15 - str x18, [sp, #336] // 8-byte Folded Spill - mul x18, x17, x15 - str x18, [sp, #312] // 8-byte Folded Spill - umulh x18, x10, x15 - str x18, [sp, #304] // 8-byte Folded Spill - mul x18, x10, x15 - str x18, [sp, #272] // 8-byte Folded Spill - umulh x18, x9, x15 - str x18, [sp, #248] // 8-byte Folded Spill - mul x18, x9, x15 - umulh x15, x8, x15 - stp x15, x18, [sp, #216] - mul x15, x8, x16 - str x15, [sp, #280] // 8-byte Folded Spill - mul x15, x14, x16 - str x15, [sp, #352] // 8-byte Folded Spill - mul x15, x12, x16 - str x15, [sp, #328] // 8-byte Folded Spill - mul x15, x17, x16 - str x15, [sp, #296] // 8-byte Folded Spill - mul x15, x10, x16 - str x15, [sp, #264] // 8-byte Folded Spill - mul x15, x9, x16 - umulh x14, x14, x16 - str x14, [sp, #368] // 8-byte Folded Spill - umulh x12, x12, x16 - str x12, [sp, #344] // 8-byte Folded Spill - umulh x12, x17, x16 - str x12, [sp, #320] // 8-byte Folded Spill - umulh x10, x10, x16 - str x10, [sp, #288] // 8-byte Folded Spill - umulh x9, x9, x16 - str x9, [sp, #256] // 8-byte Folded Spill - umulh x8, x8, x16 - stp x8, x15, [sp, #232] - ldp x12, x8, [x2, #16] - ldr x9, [x1, #40] - ldp x15, x10, [x1, #16] - mul x14, x11, x12 - str x14, [sp, #144] // 8-byte Folded Spill - mul x14, x9, x12 - str x14, [sp, #200] // 8-byte Folded Spill - mul x14, x3, x12 - str x14, [sp, #176] // 8-byte Folded Spill - mul x14, x10, x12 - str x14, [sp, #160] // 8-byte Folded Spill - mul x14, x15, x12 - str x14, [sp, #128] // 8-byte Folded Spill - mul x14, x13, x12 - str x14, [sp, #112] // 8-byte Folded Spill - umulh x14, x9, x12 - str x14, [sp, #208] // 8-byte Folded Spill - umulh x14, x3, x12 - str x14, [sp, #192] // 8-byte Folded Spill - umulh x14, x10, x12 - str x14, [sp, #168] // 8-byte Folded Spill - umulh x14, x15, x12 - str x14, [sp, #152] // 8-byte Folded Spill - umulh x14, x13, x12 - str x14, [sp, #120] // 8-byte Folded Spill - umulh x12, x11, x12 - str x12, [sp, #104] // 8-byte Folded Spill - mul x12, x9, x8 - str x12, [sp, #184] // 8-byte Folded Spill - umulh x9, x9, x8 - str x9, [sp, #136] // 8-byte Folded Spill - mul x9, x3, x8 - str x9, [sp, #80] // 8-byte Folded Spill - umulh x9, x3, x8 - str x9, [sp, #96] // 8-byte Folded Spill - mul x9, x10, x8 - str x9, [sp, #64] // 8-byte Folded Spill - umulh x9, x10, x8 - str x9, [sp, #88] // 8-byte Folded Spill - mul x9, x15, x8 - str x9, [sp, #48] // 8-byte Folded Spill - umulh x9, x15, x8 - str x9, [sp, #72] // 8-byte Folded Spill - mul x9, x13, x8 - str x9, [sp, #32] // 8-byte Folded Spill - umulh x9, x13, x8 - str x9, [sp, #56] // 8-byte Folded Spill - mul x9, x11, x8 - str x9, [sp, #24] // 8-byte Folded Spill - umulh x8, x11, x8 - str x8, [sp, #40] // 8-byte Folded Spill - ldp x12, x13, [x1, #32] - ldp x9, x10, [x1] - ldp x11, x1, [x1, #16] - ldp x8, x2, [x2, #32] - mul x22, x9, x8 - mul x28, x13, x8 - mul x27, x12, x8 - mul x24, x1, x8 - mul x20, x11, x8 - mul x19, x10, x8 - umulh x14, x13, x8 - str x14, [sp, #16] // 8-byte Folded Spill - umulh x29, x12, x8 - umulh x26, x1, x8 - umulh x23, x11, x8 - umulh x21, x10, x8 - umulh x7, x9, x8 - mul x25, x9, x2 - umulh x6, x9, x2 - mul x4, x10, x2 - umulh x5, x10, x2 - mul x18, x11, x2 - umulh x3, x11, x2 - mul x16, x1, x2 - umulh x1, x1, x2 - mul x15, x12, x2 - umulh x17, x12, x2 - mul x14, x13, x2 - umulh x13, x13, x2 - str x30, [x0] - ldp x9, x8, [sp, #216] - adds x2, x9, x8 - ldp x8, x30, [sp, #272] - ldr x9, [sp, #248] // 8-byte Folded Reload - adcs x8, x9, x8 - ldp x10, x9, [sp, #304] - adcs x9, x10, x9 - ldr x10, [sp, #360] // 8-byte Folded Reload - ldr x11, [sp, #336] // 8-byte Folded Reload - adcs x10, x11, x10 - ldp x12, x11, [sp, #376] - adcs x11, x12, x11 - ldr x12, [sp, #392] // 8-byte Folded Reload - adcs x12, x12, xzr - adds x2, x30, x2 - str x2, [x0, #8] - ldp x30, x2, [sp, #232] - adcs x8, x2, x8 - ldr x2, [sp, #264] // 8-byte Folded Reload - adcs x9, x2, x9 - ldr x2, [sp, #296] // 8-byte Folded Reload - adcs x10, x2, x10 - ldr x2, [sp, #328] // 8-byte Folded Reload - adcs x11, x2, x11 - ldr x2, [sp, #352] // 8-byte Folded Reload - adcs x12, x2, x12 - adcs x2, xzr, xzr - adds x8, x8, x30 - ldr x30, [sp, #256] // 8-byte Folded Reload - adcs x9, x9, x30 - ldr x30, [sp, #288] // 8-byte Folded Reload - adcs x10, x10, x30 - ldr x30, [sp, #320] // 8-byte Folded Reload - adcs x11, x11, x30 - ldr x30, [sp, #344] // 8-byte Folded Reload - adcs x12, x12, x30 - ldr x30, [sp, #368] // 8-byte Folded Reload - adcs x2, x2, x30 - ldr x30, [sp, #144] // 8-byte Folded Reload - adds x8, x30, x8 - str x8, [x0, #16] - ldp x30, x8, [sp, #104] - adcs x8, x8, x9 - ldr x9, [sp, #128] // 8-byte Folded Reload - adcs x9, x9, x10 - ldr x10, [sp, #160] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #176] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #200] // 8-byte Folded Reload - adcs x12, x12, x2 - adcs x2, xzr, xzr - adds x8, x8, x30 - ldr x30, [sp, #120] // 8-byte Folded Reload - adcs x9, x9, x30 - ldr x30, [sp, #152] // 8-byte Folded Reload - adcs x10, x10, x30 - ldr x30, [sp, #168] // 8-byte Folded Reload - adcs x11, x11, x30 - ldr x30, [sp, #192] // 8-byte Folded Reload - adcs x12, x12, x30 - ldr x30, [sp, #208] // 8-byte Folded Reload - adcs x2, x2, x30 - ldr x30, [sp, #24] // 8-byte Folded Reload - adds x8, x30, x8 - str x8, [x0, #24] - ldp x8, x30, [sp, #32] - adcs x8, x8, x9 - ldr x9, [sp, #48] // 8-byte Folded Reload - adcs x9, x9, x10 - ldr x10, [sp, #64] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #80] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #184] // 8-byte Folded Reload - adcs x12, x12, x2 - adcs x2, xzr, xzr - adds x8, x8, x30 - ldr x30, [sp, #56] // 8-byte Folded Reload - adcs x9, x9, x30 - ldr x30, [sp, #72] // 8-byte Folded Reload - adcs x10, x10, x30 - ldr x30, [sp, #88] // 8-byte Folded Reload - adcs x11, x11, x30 - ldr x30, [sp, #96] // 8-byte Folded Reload - adcs x12, x12, x30 - ldr x30, [sp, #136] // 8-byte Folded Reload - adcs x2, x2, x30 - adds x8, x22, x8 - str x8, [x0, #32] - adcs x8, x19, x9 - adcs x9, x20, x10 - adcs x10, x24, x11 - adcs x11, x27, x12 - adcs x12, x28, x2 - adcs x2, xzr, xzr - adds x8, x8, x7 - adcs x9, x9, x21 - adcs x10, x10, x23 - adcs x11, x11, x26 - adcs x12, x12, x29 - ldr x7, [sp, #16] // 8-byte Folded Reload - adcs x2, x2, x7 - adds x8, x25, x8 - str x8, [x0, #40] - adcs x8, x4, x9 - adcs x9, x18, x10 - adcs x10, x16, x11 - adcs x11, x15, x12 - adcs x12, x14, x2 - adcs x14, xzr, xzr - adds x8, x8, x6 - str x8, [x0, #48] - adcs x8, x9, x5 - str x8, [x0, #56] - adcs x8, x10, x3 - str x8, [x0, #64] - adcs x8, x11, x1 - str x8, [x0, #72] - adcs x8, x12, x17 - str x8, [x0, #80] - adcs x8, x14, x13 - str x8, [x0, #88] - add sp, sp, #400 // =400 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end81: - .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L - - .globl mcl_fpDbl_sqrPre6L - .align 2 - .type mcl_fpDbl_sqrPre6L,@function -mcl_fpDbl_sqrPre6L: // @mcl_fpDbl_sqrPre6L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x8, x9, [x1, #8] - ldp x15, x10, [x1, #32] - ldp x11, x13, [x1] - ldr x12, [x1] - ldp x17, x14, [x1, #32] - ldr x16, [x1, #24] - mul x18, x11, x11 - umulh x2, x10, x11 - mul x3, x15, x11 - mul x4, x16, x11 - umulh x5, x9, x11 - mul x6, x9, x11 - umulh x7, x8, x11 - mul x19, x8, x11 - str x18, [x0] - umulh x18, x11, x11 - adds x18, x18, x19 - adcs x6, x7, x6 - adcs x4, x5, x4 - umulh x5, x16, x11 - adcs x3, x5, x3 - mul x5, x10, x11 - umulh x11, x15, x11 - adcs x11, x11, x5 - adcs x2, x2, xzr - adds x18, x19, x18 - ldp x5, x19, [x1, #16] - str x18, [x0, #8] - mul x18, x8, x8 - adcs x18, x18, x6 - mul x6, x9, x8 - adcs x4, x6, x4 - mul x6, x16, x8 - adcs x3, x6, x3 - mul x6, x15, x8 - adcs x11, x6, x11 - mul x6, x10, x8 - adcs x2, x6, x2 - adcs x6, xzr, xzr - adds x18, x18, x7 - ldr x7, [x1, #32] - umulh x10, x10, x8 - umulh x15, x15, x8 - umulh x16, x16, x8 - umulh x9, x9, x8 - umulh x8, x8, x8 - adcs x8, x4, x8 - adcs x9, x3, x9 - ldp x3, x4, [x1] - adcs x11, x11, x16 - mul x16, x12, x5 - adcs x15, x2, x15 - mul x2, x14, x5 - adcs x10, x6, x10 - mul x6, x7, x5 - adds x16, x16, x18 - mul x18, x19, x5 - str x16, [x0, #16] - mul x16, x13, x5 - adcs x8, x16, x8 - mul x16, x5, x5 - adcs x9, x16, x9 - umulh x16, x7, x5 - adcs x11, x18, x11 - adcs x15, x6, x15 - umulh x6, x12, x5 - adcs x10, x2, x10 - adcs x2, xzr, xzr - adds x8, x8, x6 - umulh x6, x13, x5 - adcs x9, x9, x6 - umulh x6, x5, x5 - adcs x11, x11, x6 - umulh x6, x19, x5 - adcs x15, x15, x6 - adcs x10, x10, x16 - umulh x5, x14, x5 - adcs x2, x2, x5 - mul x5, x12, x19 - adds x8, x5, x8 - ldp x16, x5, [x1, #16] - ldr x1, [x1, #40] - str x8, [x0, #24] - mul x8, x13, x19 - adcs x8, x8, x9 - mul x9, x14, x19 - adcs x11, x18, x11 - mul x18, x19, x19 - adcs x15, x18, x15 - mul x18, x7, x19 - umulh x14, x14, x19 - umulh x7, x7, x19 - umulh x13, x13, x19 - umulh x12, x12, x19 - umulh x19, x19, x19 - adcs x10, x18, x10 - mul x18, x3, x17 - adcs x9, x9, x2 - adcs x2, xzr, xzr - adds x8, x8, x12 - mul x12, x1, x17 - adcs x11, x11, x13 - mul x13, x5, x17 - adcs x15, x15, x6 - mul x6, x16, x17 - adcs x10, x10, x19 - mul x19, x4, x17 - adcs x9, x9, x7 - mul x7, x17, x17 - adcs x14, x2, x14 - umulh x2, x1, x17 - adds x8, x18, x8 - umulh x18, x5, x17 - str x8, [x0, #32] - umulh x8, x16, x17 - adcs x11, x19, x11 - umulh x19, x4, x17 - adcs x15, x6, x15 - umulh x6, x3, x17 - umulh x17, x17, x17 - adcs x10, x13, x10 - mul x13, x3, x1 - adcs x9, x7, x9 - adcs x14, x12, x14 - adcs x7, xzr, xzr - adds x11, x11, x6 - mul x6, x5, x1 - adcs x15, x15, x19 - mul x19, x16, x1 - adcs x8, x10, x8 - mul x10, x4, x1 - adcs x9, x9, x18 - mul x18, x1, x1 - umulh x3, x3, x1 - umulh x4, x4, x1 - umulh x16, x16, x1 - umulh x5, x5, x1 - umulh x1, x1, x1 - adcs x14, x14, x17 - adcs x17, x7, x2 - adds x11, x13, x11 - str x11, [x0, #40] - adcs x10, x10, x15 - adcs x8, x19, x8 - adcs x9, x6, x9 - adcs x11, x12, x14 - adcs x12, x18, x17 - adcs x13, xzr, xzr - adds x10, x10, x3 - adcs x8, x8, x4 - stp x10, x8, [x0, #48] - adcs x8, x9, x16 - str x8, [x0, #64] - adcs x8, x11, x5 - str x8, [x0, #72] - adcs x8, x12, x2 - str x8, [x0, #80] - adcs x8, x13, x1 - str x8, [x0, #88] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L - - .globl mcl_fp_mont6L - .align 2 - .type mcl_fp_mont6L,@function -mcl_fp_mont6L: // @mcl_fp_mont6L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #48 // =48 - str x0, [sp, #24] // 8-byte Folded Spill - ldr x5, [x2] - ldp x0, x4, [x1, #32] - ldp x16, x18, [x1, #16] - ldp x10, x1, [x1] - ldur x12, [x3, #-8] - str x12, [sp, #40] // 8-byte Folded Spill - ldp x11, x8, [x3, #32] - str x8, [sp, #32] // 8-byte Folded Spill - ldp x13, x17, [x3, #16] - ldp x14, x15, [x3] - ldr x3, [x2, #8] - umulh x6, x4, x5 - mul x7, x4, x5 - umulh x19, x0, x5 - mul x20, x0, x5 - umulh x21, x18, x5 - mul x22, x18, x5 - umulh x23, x16, x5 - mul x24, x16, x5 - umulh x25, x1, x5 - mul x26, x1, x5 - umulh x27, x10, x5 - mul x5, x10, x5 - umulh x28, x3, x4 - adds x26, x27, x26 - mul x27, x5, x12 - adcs x24, x25, x24 - mul x25, x27, x8 - mul x29, x27, x11 - mul x30, x27, x17 - adcs x22, x23, x22 - mul x23, x27, x13 - adcs x20, x21, x20 - mul x21, x27, x15 - adcs x7, x19, x7 - umulh x19, x27, x14 - adcs x6, x6, xzr - adds x19, x19, x21 - umulh x21, x27, x15 - adcs x21, x21, x23 - umulh x23, x27, x13 - adcs x23, x23, x30 - umulh x30, x27, x17 - adcs x29, x30, x29 - umulh x30, x27, x11 - adcs x25, x30, x25 - umulh x30, x27, x8 - mul x27, x27, x14 - adcs x30, x30, xzr - cmn x27, x5 - mul x5, x3, x4 - umulh x27, x3, x0 - adcs x19, x19, x26 - mul x26, x3, x0 - adcs x21, x21, x24 - mul x24, x3, x18 - adcs x22, x23, x22 - mul x23, x3, x16 - adcs x20, x29, x20 - mul x29, x3, x1 - adcs x7, x25, x7 - umulh x25, x3, x10 - adcs x30, x30, x6 - adcs x6, xzr, xzr - adds x25, x25, x29 - umulh x29, x3, x1 - adcs x23, x29, x23 - umulh x29, x3, x16 - adcs x24, x29, x24 - umulh x29, x3, x18 - mul x3, x3, x10 - adcs x26, x29, x26 - adcs x27, x27, x5 - adcs x29, x28, xzr - adds x3, x19, x3 - adcs x5, x21, x25 - mul x21, x3, x12 - adcs x28, x22, x23 - umulh x22, x21, x8 - mul x23, x21, x8 - mul x25, x21, x11 - mul x9, x21, x17 - adcs x19, x20, x24 - mul x8, x21, x13 - adcs x20, x7, x26 - mul x24, x21, x15 - adcs x30, x30, x27 - umulh x26, x21, x14 - adcs x6, x6, x29 - adcs x7, xzr, xzr - adds x24, x26, x24 - umulh x26, x21, x15 - adcs x29, x26, x8 - umulh x8, x21, x13 - adcs x26, x8, x9 - umulh x8, x21, x17 - adcs x27, x8, x25 - umulh x8, x21, x11 - mul x9, x21, x14 - adcs x8, x8, x23 - adcs x21, x22, xzr - cmn x9, x3 - ldp x23, x3, [x2, #16] - umulh x9, x23, x4 - adcs x5, x24, x5 - mul x22, x23, x4 - adcs x24, x29, x28 - mul x25, x23, x0 - adcs x19, x26, x19 - mul x26, x23, x18 - adcs x20, x27, x20 - mul x27, x23, x16 - adcs x8, x8, x30 - mul x28, x23, x1 - adcs x21, x21, x6 - umulh x6, x23, x10 - adcs x7, x7, xzr - adds x6, x6, x28 - umulh x28, x23, x1 - adcs x27, x28, x27 - umulh x28, x23, x16 - adcs x26, x28, x26 - umulh x28, x23, x18 - adcs x25, x28, x25 - umulh x28, x23, x0 - mul x23, x23, x10 - adcs x22, x28, x22 - adcs x9, x9, xzr - adds x23, x5, x23 - adcs x5, x24, x6 - mul x29, x23, x12 - adcs x6, x19, x27 - ldr x12, [sp, #32] // 8-byte Folded Reload - mul x28, x29, x12 - mul x27, x29, x11 - mul x30, x29, x17 - adcs x19, x20, x26 - mul x26, x29, x13 - adcs x20, x8, x25 - mul x8, x29, x15 - adcs x21, x21, x22 - umulh x24, x29, x14 - adcs x22, x7, x9 - adcs x7, xzr, xzr - adds x24, x24, x8 - umulh x8, x29, x15 - adcs x25, x8, x26 - umulh x8, x29, x13 - adcs x26, x8, x30 - umulh x8, x29, x17 - adcs x27, x8, x27 - umulh x8, x29, x11 - adcs x28, x8, x28 - umulh x8, x29, x12 - mul x9, x29, x14 - adcs x29, x8, xzr - cmn x9, x23 - ldp x23, x8, [x2, #32] - umulh x30, x3, x4 - adcs x2, x24, x5 - mul x5, x3, x4 - adcs x6, x25, x6 - mul x24, x3, x0 - adcs x19, x26, x19 - mul x25, x3, x18 - adcs x20, x27, x20 - mul x26, x3, x16 - adcs x21, x28, x21 - mul x27, x3, x1 - adcs x22, x29, x22 - mov x9, x10 - umulh x28, x3, x9 - adcs x7, x7, xzr - adds x27, x28, x27 - umulh x28, x3, x1 - adcs x26, x28, x26 - umulh x28, x3, x16 - adcs x25, x28, x25 - umulh x28, x3, x18 - adcs x24, x28, x24 - umulh x28, x3, x0 - mul x3, x3, x9 - adcs x5, x28, x5 - adcs x29, x30, xzr - adds x2, x2, x3 - adcs x3, x6, x27 - ldr x10, [sp, #40] // 8-byte Folded Reload - mul x6, x2, x10 - adcs x19, x19, x26 - mul x26, x6, x12 - mul x27, x6, x11 - mov x30, x17 - mul x28, x6, x30 - adcs x20, x20, x25 - mul x25, x6, x13 - adcs x21, x21, x24 - mov x17, x15 - mul x24, x6, x17 - adcs x5, x22, x5 - umulh x22, x6, x14 - adcs x29, x7, x29 - adcs x7, xzr, xzr - adds x22, x22, x24 - umulh x24, x6, x17 - adcs x24, x24, x25 - umulh x25, x6, x13 - mov x15, x13 - adcs x25, x25, x28 - umulh x28, x6, x30 - mov x13, x30 - adcs x27, x28, x27 - umulh x28, x6, x11 - adcs x26, x28, x26 - umulh x28, x6, x12 - mul x6, x6, x14 - adcs x28, x28, xzr - cmn x6, x2 - umulh x2, x23, x4 - mul x6, x23, x4 - adcs x3, x22, x3 - umulh x22, x23, x0 - adcs x19, x24, x19 - mul x24, x23, x0 - adcs x20, x25, x20 - mul x25, x23, x18 - adcs x21, x27, x21 - mul x27, x23, x16 - adcs x5, x26, x5 - mul x26, x23, x1 - adcs x29, x28, x29 - umulh x28, x23, x9 - adcs x7, x7, xzr - adds x26, x28, x26 - umulh x28, x23, x1 - adcs x27, x28, x27 - umulh x28, x23, x16 - adcs x25, x28, x25 - umulh x28, x23, x18 - mul x23, x23, x9 - adcs x24, x28, x24 - umulh x28, x8, x4 - str x28, [sp, #16] // 8-byte Folded Spill - mul x28, x8, x4 - adcs x6, x22, x6 - adcs x2, x2, xzr - adds x3, x3, x23 - adcs x19, x19, x26 - mul x22, x3, x10 - adcs x20, x20, x27 - mul x23, x22, x12 - mul x26, x22, x11 - mul x27, x22, x13 - adcs x21, x21, x25 - mul x25, x22, x15 - adcs x5, x5, x24 - mul x24, x22, x17 - adcs x4, x29, x6 - umulh x6, x22, x14 - adcs x2, x7, x2 - adcs x7, xzr, xzr - adds x6, x6, x24 - umulh x24, x22, x17 - adcs x24, x24, x25 - umulh x25, x22, x15 - adcs x25, x25, x27 - umulh x27, x22, x13 - adcs x26, x27, x26 - umulh x27, x22, x11 - adcs x23, x27, x23 - umulh x27, x22, x12 - mul x22, x22, x14 - adcs x27, x27, xzr - cmn x22, x3 - umulh x3, x8, x0 - mul x0, x8, x0 - umulh x22, x8, x18 - mul x18, x8, x18 - umulh x29, x8, x16 - mul x16, x8, x16 - umulh x30, x8, x1 - mul x1, x8, x1 - umulh x10, x8, x9 - mul x8, x8, x9 - adcs x6, x6, x19 - adcs x19, x24, x20 - adcs x20, x25, x21 - adcs x5, x26, x5 - adcs x9, x23, x4 - str x9, [sp, #8] // 8-byte Folded Spill - adcs x2, x27, x2 - adcs x7, x7, xzr - adds x9, x10, x1 - adcs x16, x30, x16 - adcs x18, x29, x18 - adcs x0, x22, x0 - adcs x1, x3, x28 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x3, x10, xzr - adds x8, x6, x8 - adcs x9, x19, x9 - ldr x10, [sp, #40] // 8-byte Folded Reload - mul x4, x8, x10 - adcs x16, x20, x16 - umulh x6, x4, x12 - mul x19, x4, x12 - mov x30, x11 - umulh x20, x4, x30 - mul x21, x4, x30 - umulh x22, x4, x13 - mul x23, x4, x13 - mov x29, x13 - umulh x24, x4, x15 - mul x25, x4, x15 - umulh x26, x4, x17 - mul x27, x4, x17 - umulh x28, x4, x14 - mul x4, x4, x14 - adcs x18, x5, x18 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x10, x10, x0 - adcs x0, x2, x1 - adcs x1, x7, x3 - adcs x2, xzr, xzr - adds x3, x28, x27 - adcs x5, x26, x25 - adcs x7, x24, x23 - adcs x21, x22, x21 - adcs x19, x20, x19 - adcs x6, x6, xzr - cmn x4, x8 - adcs x8, x3, x9 - adcs x9, x5, x16 - adcs x16, x7, x18 - adcs x10, x21, x10 - adcs x18, x19, x0 - adcs x0, x6, x1 - adcs x1, x2, xzr - subs x13, x8, x14 - sbcs x12, x9, x17 - sbcs x11, x16, x15 - sbcs x14, x10, x29 - sbcs x15, x18, x30 - ldr x17, [sp, #32] // 8-byte Folded Reload - sbcs x17, x0, x17 - sbcs x1, x1, xzr - tst x1, #0x1 - csel x8, x8, x13, ne - csel x9, x9, x12, ne - csel x11, x16, x11, ne - csel x10, x10, x14, ne - csel x12, x18, x15, ne - csel x13, x0, x17, ne - ldr x14, [sp, #24] // 8-byte Folded Reload - stp x8, x9, [x14] - stp x11, x10, [x14, #16] - stp x12, x13, [x14, #32] - add sp, sp, #48 // =48 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end83: - .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L - - .globl mcl_fp_montNF6L - .align 2 - .type mcl_fp_montNF6L,@function -mcl_fp_montNF6L: // @mcl_fp_montNF6L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #112 // =112 - str x0, [sp, #96] // 8-byte Folded Spill - ldp x16, x12, [x1, #32] - ldp x13, x11, [x1, #16] - ldp x17, x0, [x1] - ldur x18, [x3, #-8] - ldr x9, [x3, #32] - str x9, [sp, #104] // 8-byte Folded Spill - ldr x14, [x3, #40] - ldp x4, x10, [x3, #16] - ldr x15, [x3] - str x15, [sp, #8] // 8-byte Folded Spill - ldr x9, [x3, #8] - ldp x5, x3, [x2] - ldp x6, x7, [x2, #16] - ldp x19, x2, [x2, #32] - umulh x20, x12, x5 - mul x21, x12, x5 - umulh x22, x16, x5 - mul x23, x16, x5 - umulh x24, x11, x5 - mul x25, x11, x5 - mov x1, x13 - umulh x26, x1, x5 - mul x27, x1, x5 - mov x13, x0 - umulh x28, x13, x5 - mul x29, x13, x5 - mov x8, x17 - umulh x30, x8, x5 - mul x5, x8, x5 - adds x29, x30, x29 - mul x30, x3, x12 - adcs x27, x28, x27 - mul x28, x3, x16 - adcs x25, x26, x25 - mul x26, x3, x11 - adcs x23, x24, x23 - mul x24, x5, x18 - adcs x21, x22, x21 - mul x22, x24, x15 - adcs x20, x20, xzr - cmn x22, x5 - mul x5, x3, x1 - mov x0, x9 - mul x22, x24, x0 - adcs x22, x22, x29 - mul x29, x24, x4 - adcs x17, x29, x27 - mul x29, x24, x10 - adcs x25, x29, x25 - ldr x9, [sp, #104] // 8-byte Folded Reload - mul x29, x24, x9 - adcs x23, x29, x23 - mul x29, x24, x14 - adcs x21, x29, x21 - umulh x29, x24, x15 - adcs x20, x20, xzr - adds x22, x22, x29 - umulh x29, x24, x0 - adcs x15, x17, x29 - umulh x29, x24, x4 - mov x17, x4 - adcs x25, x25, x29 - umulh x29, x24, x10 - adcs x23, x23, x29 - umulh x29, x24, x9 - adcs x21, x21, x29 - mul x29, x3, x13 - umulh x24, x24, x14 - adcs x20, x20, x24 - umulh x24, x3, x8 - adds x24, x24, x29 - umulh x29, x3, x13 - adcs x5, x29, x5 - umulh x29, x3, x1 - adcs x26, x29, x26 - umulh x29, x3, x11 - adcs x28, x29, x28 - umulh x29, x3, x16 - adcs x29, x29, x30 - umulh x30, x3, x12 - mul x3, x3, x8 - adcs x30, x30, xzr - adds x3, x3, x22 - umulh x22, x6, x12 - adcs x24, x24, x15 - mul x27, x6, x12 - adcs x5, x5, x25 - mul x25, x6, x16 - adcs x23, x26, x23 - mul x26, x6, x11 - adcs x21, x28, x21 - mul x28, x3, x18 - mov x4, x18 - adcs x20, x29, x20 - ldr x18, [sp, #8] // 8-byte Folded Reload - mul x29, x28, x18 - adcs x30, x30, xzr - cmn x29, x3 - mul x3, x6, x1 - mul x29, x28, x0 - adcs x24, x29, x24 - mul x29, x28, x17 - adcs x5, x29, x5 - mul x29, x28, x10 - adcs x23, x29, x23 - mul x29, x28, x9 - adcs x21, x29, x21 - mul x29, x28, x14 - adcs x20, x29, x20 - umulh x29, x28, x18 - adcs x30, x30, xzr - adds x24, x24, x29 - umulh x29, x28, x0 - adcs x5, x5, x29 - umulh x29, x28, x17 - adcs x23, x23, x29 - umulh x29, x28, x10 - adcs x21, x21, x29 - umulh x29, x28, x9 - adcs x20, x20, x29 - mul x29, x6, x13 - umulh x28, x28, x14 - adcs x28, x30, x28 - umulh x30, x6, x8 - adds x29, x30, x29 - umulh x30, x6, x13 - adcs x3, x30, x3 - umulh x30, x6, x1 - adcs x26, x30, x26 - umulh x30, x6, x11 - adcs x25, x30, x25 - umulh x30, x6, x16 - mul x6, x6, x8 - adcs x27, x30, x27 - umulh x30, x7, x12 - adcs x22, x22, xzr - adds x6, x6, x24 - mul x24, x7, x12 - adcs x5, x29, x5 - umulh x29, x7, x16 - adcs x3, x3, x23 - mul x23, x7, x16 - adcs x21, x26, x21 - mul x26, x7, x11 - adcs x20, x25, x20 - mul x25, x6, x4 - adcs x27, x27, x28 - mul x28, x25, x18 - adcs x22, x22, xzr - cmn x28, x6 - mul x6, x7, x1 - mul x28, x25, x0 - adcs x5, x28, x5 - mul x28, x25, x17 - adcs x3, x28, x3 - mul x28, x25, x10 - adcs x21, x28, x21 - mul x28, x25, x9 - adcs x20, x28, x20 - mul x28, x25, x14 - adcs x27, x28, x27 - umulh x28, x25, x18 - adcs x22, x22, xzr - adds x5, x5, x28 - umulh x28, x25, x0 - adcs x3, x3, x28 - umulh x28, x25, x17 - adcs x21, x21, x28 - umulh x28, x25, x10 - adcs x20, x20, x28 - umulh x28, x25, x9 - adcs x27, x27, x28 - mul x28, x7, x13 - umulh x25, x25, x14 - adcs x22, x22, x25 - umulh x25, x7, x8 - adds x25, x25, x28 - umulh x28, x7, x13 - adcs x6, x28, x6 - umulh x28, x7, x1 - adcs x26, x28, x26 - umulh x28, x7, x11 - mul x7, x7, x8 - adcs x23, x28, x23 - umulh x9, x19, x12 - str x9, [sp, #16] // 8-byte Folded Spill - adcs x24, x29, x24 - mul x9, x19, x12 - str x9, [sp, #32] // 8-byte Folded Spill - adcs x30, x30, xzr - adds x5, x7, x5 - umulh x7, x19, x16 - adcs x3, x25, x3 - mul x25, x19, x16 - adcs x6, x6, x21 - umulh x21, x19, x11 - adcs x20, x26, x20 - mul x26, x19, x11 - adcs x23, x23, x27 - mul x27, x5, x4 - adcs x22, x24, x22 - mul x24, x27, x18 - adcs x30, x30, xzr - cmn x24, x5 - mov x28, x1 - mul x5, x19, x28 - mul x24, x19, x13 - umulh x1, x19, x8 - umulh x9, x19, x13 - umulh x15, x19, x28 - mul x19, x19, x8 - umulh x29, x2, x12 - str x29, [sp, #88] // 8-byte Folded Spill - mul x29, x2, x12 - umulh x12, x2, x16 - str x12, [sp, #80] // 8-byte Folded Spill - mul x12, x2, x16 - str x12, [sp, #72] // 8-byte Folded Spill - umulh x12, x2, x11 - mul x11, x2, x11 - stp x11, x12, [sp, #56] - umulh x11, x2, x28 - str x11, [sp, #48] // 8-byte Folded Spill - mul x11, x2, x28 - str x11, [sp, #40] // 8-byte Folded Spill - umulh x11, x2, x13 - str x11, [sp, #24] // 8-byte Folded Spill - mul x13, x2, x13 - umulh x16, x2, x8 - mul x28, x2, x8 - mul x2, x27, x0 - adcs x2, x2, x3 - mul x3, x27, x17 - adcs x3, x3, x6 - mul x6, x27, x10 - adcs x6, x6, x20 - ldr x8, [sp, #104] // 8-byte Folded Reload - mul x20, x27, x8 - adcs x20, x20, x23 - mul x23, x27, x14 - adcs x22, x23, x22 - adcs x23, x30, xzr - umulh x30, x27, x18 - adds x2, x2, x30 - umulh x30, x27, x0 - adcs x3, x3, x30 - umulh x30, x27, x17 - mov x12, x17 - adcs x6, x6, x30 - umulh x30, x27, x10 - adcs x20, x20, x30 - umulh x30, x27, x8 - mov x11, x8 - adcs x22, x22, x30 - mov x30, x14 - umulh x27, x27, x30 - adcs x23, x23, x27 - adds x8, x1, x24 - adcs x9, x9, x5 - adcs x14, x15, x26 - adcs x5, x21, x25 - ldr x15, [sp, #32] // 8-byte Folded Reload - adcs x7, x7, x15 - ldr x15, [sp, #16] // 8-byte Folded Reload - adcs x21, x15, xzr - adds x2, x19, x2 - adcs x8, x8, x3 - adcs x9, x9, x6 - mov x24, x4 - mul x3, x2, x24 - adcs x14, x14, x20 - mul x6, x3, x30 - adcs x5, x5, x22 - mul x19, x3, x11 - adcs x7, x7, x23 - mul x20, x3, x18 - adcs x21, x21, xzr - cmn x20, x2 - mul x2, x3, x10 - mul x20, x3, x0 - adcs x8, x20, x8 - mul x20, x3, x12 - adcs x9, x20, x9 - umulh x20, x3, x30 - adcs x14, x2, x14 - umulh x2, x3, x11 - mov x27, x11 - adcs x5, x19, x5 - mov x11, x10 - umulh x19, x3, x11 - adcs x6, x6, x7 - umulh x7, x3, x18 - adcs x21, x21, xzr - adds x8, x8, x7 - umulh x7, x3, x12 - umulh x3, x3, x0 - adcs x9, x9, x3 - adcs x10, x14, x7 - adcs x3, x5, x19 - adcs x2, x6, x2 - adcs x5, x21, x20 - adds x15, x16, x13 - ldr x13, [sp, #40] // 8-byte Folded Reload - ldr x14, [sp, #24] // 8-byte Folded Reload - adcs x16, x14, x13 - ldp x14, x13, [sp, #48] - adcs x17, x14, x13 - ldp x14, x13, [sp, #64] - adcs x1, x14, x13 - ldr x13, [sp, #80] // 8-byte Folded Reload - adcs x4, x13, x29 - ldr x13, [sp, #88] // 8-byte Folded Reload - adcs x6, x13, xzr - adds x8, x28, x8 - adcs x9, x15, x9 - mul x15, x8, x24 - adcs x10, x16, x10 - mul x16, x15, x30 - mul x14, x15, x27 - mul x7, x15, x11 - mul x19, x15, x12 - mul x20, x15, x0 - mul x21, x15, x18 - umulh x22, x15, x30 - umulh x23, x15, x27 - umulh x24, x15, x11 - mov x28, x11 - umulh x25, x15, x12 - umulh x26, x15, x0 - umulh x15, x15, x18 - adcs x17, x17, x3 - adcs x1, x1, x2 - adcs x2, x4, x5 - adcs x3, x6, xzr - cmn x21, x8 - adcs x8, x20, x9 - adcs x9, x19, x10 - adcs x10, x7, x17 - adcs x17, x14, x1 - adcs x16, x16, x2 - adcs x11, x3, xzr - adds x8, x8, x15 - adcs x9, x9, x26 - adcs x10, x10, x25 - adcs x15, x17, x24 - adcs x16, x16, x23 - adcs x17, x11, x22 - subs x3, x8, x18 - sbcs x2, x9, x0 - sbcs x11, x10, x12 - sbcs x14, x15, x28 - sbcs x18, x16, x27 - sbcs x0, x17, x30 - asr x1, x0, #63 - cmp x1, #0 // =0 - csel x8, x8, x3, lt - csel x9, x9, x2, lt - csel x10, x10, x11, lt - csel x11, x15, x14, lt - csel x12, x16, x18, lt - csel x13, x17, x0, lt - ldr x14, [sp, #96] // 8-byte Folded Reload - stp x8, x9, [x14] - stp x10, x11, [x14, #16] - stp x12, x13, [x14, #32] - add sp, sp, #112 // =112 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end84: - .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L - - .globl mcl_fp_montRed6L - .align 2 - .type mcl_fp_montRed6L,@function -mcl_fp_montRed6L: // @mcl_fp_montRed6L -// BB#0: - stp x26, x25, [sp, #-64]! - stp x24, x23, [sp, #16] - stp x22, x21, [sp, #32] - stp x20, x19, [sp, #48] - ldur x14, [x2, #-8] - ldp x9, x8, [x2, #32] - ldp x11, x10, [x2, #16] - ldp x13, x12, [x2] - ldp x16, x17, [x1, #80] - ldp x18, x2, [x1, #64] - ldp x3, x4, [x1, #48] - ldp x5, x6, [x1, #32] - ldp x7, x19, [x1, #16] - ldp x15, x1, [x1] - mul x20, x15, x14 - mul x21, x20, x8 - mul x22, x20, x9 - mul x23, x20, x10 - mul x24, x20, x11 - mul x25, x20, x12 - umulh x26, x20, x13 - adds x25, x26, x25 - umulh x26, x20, x12 - adcs x24, x26, x24 - umulh x26, x20, x11 - adcs x23, x26, x23 - umulh x26, x20, x10 - adcs x22, x26, x22 - umulh x26, x20, x9 - adcs x21, x26, x21 - umulh x26, x20, x8 - mul x20, x20, x13 - adcs x26, x26, xzr - cmn x15, x20 - adcs x15, x1, x25 - adcs x1, x7, x24 - mul x7, x15, x14 - adcs x19, x19, x23 - mul x20, x7, x8 - mul x23, x7, x9 - mul x24, x7, x10 - mul x25, x7, x11 - adcs x5, x5, x22 - mul x22, x7, x12 - adcs x6, x6, x21 - umulh x21, x7, x13 - adcs x3, x3, x26 - adcs x4, x4, xzr - adcs x18, x18, xzr - adcs x2, x2, xzr - adcs x16, x16, xzr - adcs x17, x17, xzr - adcs x26, xzr, xzr - adds x21, x21, x22 - umulh x22, x7, x12 - adcs x22, x22, x25 - umulh x25, x7, x11 - adcs x24, x25, x24 - umulh x25, x7, x10 - adcs x23, x25, x23 - umulh x25, x7, x9 - adcs x20, x25, x20 - umulh x25, x7, x8 - mul x7, x7, x13 - adcs x25, x25, xzr - cmn x7, x15 - adcs x15, x21, x1 - adcs x1, x22, x19 - mul x7, x15, x14 - adcs x5, x24, x5 - mul x19, x7, x8 - mul x21, x7, x9 - mul x22, x7, x10 - adcs x6, x23, x6 - mul x23, x7, x11 - adcs x3, x20, x3 - mul x20, x7, x12 - adcs x4, x25, x4 - umulh x24, x7, x13 - adcs x18, x18, xzr - adcs x2, x2, xzr - adcs x16, x16, xzr - adcs x17, x17, xzr - adcs x25, x26, xzr - adds x20, x24, x20 - umulh x24, x7, x12 - adcs x23, x24, x23 - umulh x24, x7, x11 - adcs x22, x24, x22 - umulh x24, x7, x10 - adcs x21, x24, x21 - umulh x24, x7, x9 - adcs x19, x24, x19 - umulh x24, x7, x8 - mul x7, x7, x13 - adcs x24, x24, xzr - cmn x7, x15 - adcs x15, x20, x1 - adcs x1, x23, x5 - mul x5, x15, x14 - adcs x6, x22, x6 - mul x7, x5, x8 - mul x20, x5, x9 - mul x22, x5, x10 - adcs x3, x21, x3 - mul x21, x5, x11 - adcs x4, x19, x4 - mul x19, x5, x12 - adcs x18, x24, x18 - umulh x23, x5, x13 - adcs x2, x2, xzr - adcs x16, x16, xzr - adcs x17, x17, xzr - adcs x24, x25, xzr - adds x19, x23, x19 - umulh x23, x5, x12 - adcs x21, x23, x21 - umulh x23, x5, x11 - adcs x22, x23, x22 - umulh x23, x5, x10 - adcs x20, x23, x20 - umulh x23, x5, x9 - adcs x7, x23, x7 - umulh x23, x5, x8 - mul x5, x5, x13 - adcs x23, x23, xzr - cmn x5, x15 - adcs x15, x19, x1 - adcs x1, x21, x6 - mul x5, x15, x14 - adcs x3, x22, x3 - mul x6, x5, x8 - mul x19, x5, x9 - mul x21, x5, x10 - adcs x4, x20, x4 - mul x20, x5, x11 - adcs x18, x7, x18 - mul x7, x5, x12 - adcs x2, x23, x2 - umulh x22, x5, x13 - adcs x16, x16, xzr - adcs x17, x17, xzr - adcs x23, x24, xzr - adds x7, x22, x7 - umulh x22, x5, x12 - adcs x20, x22, x20 - umulh x22, x5, x11 - adcs x21, x22, x21 - umulh x22, x5, x10 - adcs x19, x22, x19 - umulh x22, x5, x9 - adcs x6, x22, x6 - umulh x22, x5, x8 - mul x5, x5, x13 - adcs x22, x22, xzr - cmn x5, x15 - adcs x15, x7, x1 - adcs x1, x20, x3 - mul x14, x15, x14 - adcs x3, x21, x4 - mul x4, x14, x8 - mul x5, x14, x9 - mul x7, x14, x10 - adcs x18, x19, x18 - mul x19, x14, x11 - adcs x2, x6, x2 - mul x6, x14, x12 - adcs x16, x22, x16 - umulh x20, x14, x13 - adcs x17, x17, xzr - adcs x21, x23, xzr - adds x6, x20, x6 - umulh x20, x14, x12 - adcs x19, x20, x19 - umulh x20, x14, x11 - adcs x7, x20, x7 - umulh x20, x14, x10 - adcs x5, x20, x5 - umulh x20, x14, x9 - adcs x4, x20, x4 - umulh x20, x14, x8 - mul x14, x14, x13 - adcs x20, x20, xzr - cmn x14, x15 - adcs x14, x6, x1 - adcs x15, x19, x3 - adcs x18, x7, x18 - adcs x1, x5, x2 - adcs x16, x4, x16 - adcs x17, x20, x17 - adcs x2, x21, xzr - subs x13, x14, x13 - sbcs x12, x15, x12 - sbcs x11, x18, x11 - sbcs x10, x1, x10 - sbcs x9, x16, x9 - sbcs x8, x17, x8 - sbcs x2, x2, xzr - tst x2, #0x1 - csel x13, x14, x13, ne - csel x12, x15, x12, ne - csel x11, x18, x11, ne - csel x10, x1, x10, ne - csel x9, x16, x9, ne - csel x8, x17, x8, ne - stp x13, x12, [x0] - stp x11, x10, [x0, #16] - stp x9, x8, [x0, #32] - ldp x20, x19, [sp, #48] - ldp x22, x21, [sp, #32] - ldp x24, x23, [sp, #16] - ldp x26, x25, [sp], #64 - ret -.Lfunc_end85: - .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L - - .globl mcl_fp_addPre6L - .align 2 - .type mcl_fp_addPre6L,@function -mcl_fp_addPre6L: // @mcl_fp_addPre6L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x14, x15, [x2] - ldp x16, x17, [x1] - ldp x18, x1, [x1, #16] - adds x14, x14, x16 - str x14, [x0] - adcs x14, x15, x17 - adcs x12, x12, x18 - stp x14, x12, [x0, #8] - adcs x12, x13, x1 - adcs x8, x8, x10 - stp x12, x8, [x0, #24] - adcs x9, x9, x11 - adcs x8, xzr, xzr - str x9, [x0, #40] - mov x0, x8 - ret -.Lfunc_end86: - .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L - - .globl mcl_fp_subPre6L - .align 2 - .type mcl_fp_subPre6L,@function -mcl_fp_subPre6L: // @mcl_fp_subPre6L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x14, x15, [x2] - ldp x16, x17, [x1] - ldp x18, x1, [x1, #16] - subs x14, x16, x14 - str x14, [x0] - sbcs x14, x17, x15 - sbcs x12, x18, x12 - stp x14, x12, [x0, #8] - sbcs x12, x1, x13 - sbcs x8, x10, x8 - stp x12, x8, [x0, #24] - sbcs x9, x11, x9 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0, #40] - mov x0, x8 - ret -.Lfunc_end87: - .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L - - .globl mcl_fp_shr1_6L - .align 2 - .type mcl_fp_shr1_6L,@function -mcl_fp_shr1_6L: // @mcl_fp_shr1_6L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #16] - ldp x12, x13, [x1, #32] - extr x8, x9, x8, #1 - extr x9, x10, x9, #1 - extr x10, x11, x10, #1 - extr x11, x12, x11, #1 - extr x12, x13, x12, #1 - lsr x13, x13, #1 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - ret -.Lfunc_end88: - .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L - - .globl mcl_fp_add6L - .align 2 - .type mcl_fp_add6L,@function -mcl_fp_add6L: // @mcl_fp_add6L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x14, x15, [x2] - ldp x16, x17, [x1] - ldp x18, x1, [x1, #16] - adds x14, x14, x16 - adcs x15, x15, x17 - ldp x16, x17, [x3, #32] - adcs x18, x12, x18 - adcs x1, x13, x1 - ldp x12, x2, [x3] - stp x14, x15, [x0] - stp x18, x1, [x0, #16] - adcs x8, x8, x10 - adcs x4, x9, x11 - stp x8, x4, [x0, #32] - adcs x5, xzr, xzr - ldp x9, x10, [x3, #16] - subs x13, x14, x12 - sbcs x12, x15, x2 - sbcs x11, x18, x9 - sbcs x10, x1, x10 - sbcs x9, x8, x16 - sbcs x8, x4, x17 - sbcs x14, x5, xzr - and w14, w14, #0x1 - tbnz w14, #0, .LBB89_2 -// BB#1: // %nocarry - stp x13, x12, [x0] - stp x11, x10, [x0, #16] - stp x9, x8, [x0, #32] -.LBB89_2: // %carry - ret -.Lfunc_end89: - .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L - - .globl mcl_fp_addNF6L - .align 2 - .type mcl_fp_addNF6L,@function -mcl_fp_addNF6L: // @mcl_fp_addNF6L -// BB#0: - ldp x8, x9, [x1, #32] - ldp x10, x11, [x2, #32] - ldp x12, x13, [x1, #16] - ldp x14, x15, [x1] - ldp x16, x17, [x2] - ldp x18, x1, [x2, #16] - adds x14, x16, x14 - adcs x15, x17, x15 - ldp x16, x17, [x3, #32] - adcs x12, x18, x12 - adcs x13, x1, x13 - ldp x18, x1, [x3] - adcs x8, x10, x8 - ldp x10, x2, [x3, #16] - adcs x9, x11, x9 - subs x11, x14, x18 - sbcs x18, x15, x1 - sbcs x10, x12, x10 - sbcs x1, x13, x2 - sbcs x16, x8, x16 - sbcs x17, x9, x17 - asr x2, x17, #63 - cmp x2, #0 // =0 - csel x11, x14, x11, lt - csel x14, x15, x18, lt - csel x10, x12, x10, lt - csel x12, x13, x1, lt - csel x8, x8, x16, lt - csel x9, x9, x17, lt - stp x11, x14, [x0] - stp x10, x12, [x0, #16] - stp x8, x9, [x0, #32] - ret -.Lfunc_end90: - .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L - - .globl mcl_fp_sub6L - .align 2 - .type mcl_fp_sub6L,@function -mcl_fp_sub6L: // @mcl_fp_sub6L -// BB#0: - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x10, x11, [x2, #16] - ldp x8, x9, [x2] - ldp x16, x17, [x1] - ldp x18, x1, [x1, #16] - subs x8, x16, x8 - sbcs x9, x17, x9 - stp x8, x9, [x0] - sbcs x10, x18, x10 - sbcs x11, x1, x11 - stp x10, x11, [x0, #16] - sbcs x12, x14, x12 - sbcs x13, x15, x13 - stp x12, x13, [x0, #32] - ngcs x14, xzr - and w14, w14, #0x1 - tbnz w14, #0, .LBB91_2 -// BB#1: // %nocarry - ret -.LBB91_2: // %carry - ldp x14, x15, [x3, #32] - ldp x16, x17, [x3] - ldp x18, x1, [x3, #16] - adds x8, x16, x8 - adcs x9, x17, x9 - adcs x10, x18, x10 - adcs x11, x1, x11 - adcs x12, x14, x12 - adcs x13, x15, x13 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - ret -.Lfunc_end91: - .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L - - .globl mcl_fp_subNF6L - .align 2 - .type mcl_fp_subNF6L,@function -mcl_fp_subNF6L: // @mcl_fp_subNF6L -// BB#0: - ldp x8, x9, [x2, #32] - ldp x10, x11, [x1, #32] - ldp x12, x13, [x2, #16] - ldp x14, x18, [x2] - ldp x16, x17, [x1, #16] - ldp x15, x1, [x1] - subs x14, x15, x14 - ldp x15, x2, [x3, #32] - sbcs x18, x1, x18 - sbcs x12, x16, x12 - ldp x16, x1, [x3, #16] - sbcs x13, x17, x13 - ldp x17, x3, [x3] - sbcs x8, x10, x8 - sbcs x9, x11, x9 - asr x10, x9, #63 - adds x11, x10, x10 - and x16, x10, x16 - and x1, x10, x1 - and x15, x10, x15 - and x2, x10, x2 - adcs x10, x10, x10 - orr x11, x11, x9, lsr #63 - and x11, x11, x17 - and x10, x10, x3 - adds x11, x11, x14 - adcs x10, x10, x18 - stp x11, x10, [x0] - adcs x10, x16, x12 - str x10, [x0, #16] - adcs x10, x1, x13 - adcs x8, x15, x8 - stp x10, x8, [x0, #24] - adcs x8, x2, x9 - str x8, [x0, #40] - ret -.Lfunc_end92: - .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L - - .globl mcl_fpDbl_add6L - .align 2 - .type mcl_fpDbl_add6L,@function -mcl_fpDbl_add6L: // @mcl_fpDbl_add6L -// BB#0: - stp x26, x25, [sp, #-64]! - stp x24, x23, [sp, #16] - stp x22, x21, [sp, #32] - stp x20, x19, [sp, #48] - ldp x8, x9, [x2, #80] - ldp x10, x11, [x1, #80] - ldp x12, x13, [x2, #64] - ldp x14, x15, [x1, #64] - ldp x16, x17, [x2, #48] - ldp x18, x4, [x1, #48] - ldp x5, x6, [x2, #32] - ldp x7, x19, [x1, #32] - ldp x20, x21, [x2, #16] - ldp x23, x2, [x2] - ldp x24, x25, [x1, #16] - ldp x22, x1, [x1] - adds x22, x23, x22 - str x22, [x0] - ldp x22, x23, [x3, #32] - adcs x1, x2, x1 - str x1, [x0, #8] - ldp x1, x2, [x3, #16] - adcs x20, x20, x24 - ldp x24, x3, [x3] - str x20, [x0, #16] - adcs x20, x21, x25 - adcs x5, x5, x7 - stp x20, x5, [x0, #24] - adcs x5, x6, x19 - str x5, [x0, #40] - adcs x16, x16, x18 - adcs x17, x17, x4 - adcs x12, x12, x14 - adcs x13, x13, x15 - adcs x8, x8, x10 - adcs x9, x9, x11 - adcs x10, xzr, xzr - subs x11, x16, x24 - sbcs x14, x17, x3 - sbcs x15, x12, x1 - sbcs x18, x13, x2 - sbcs x1, x8, x22 - sbcs x2, x9, x23 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x10, x16, x11, ne - csel x11, x17, x14, ne - csel x12, x12, x15, ne - csel x13, x13, x18, ne - csel x8, x8, x1, ne - csel x9, x9, x2, ne - stp x10, x11, [x0, #48] - stp x12, x13, [x0, #64] - stp x8, x9, [x0, #80] - ldp x20, x19, [sp, #48] - ldp x22, x21, [sp, #32] - ldp x24, x23, [sp, #16] - ldp x26, x25, [sp], #64 - ret -.Lfunc_end93: - .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L - - .globl mcl_fpDbl_sub6L - .align 2 - .type mcl_fpDbl_sub6L,@function -mcl_fpDbl_sub6L: // @mcl_fpDbl_sub6L -// BB#0: - stp x26, x25, [sp, #-64]! - stp x24, x23, [sp, #16] - stp x22, x21, [sp, #32] - stp x20, x19, [sp, #48] - ldp x8, x9, [x2, #80] - ldp x10, x11, [x1, #80] - ldp x12, x13, [x2, #64] - ldp x14, x15, [x1, #64] - ldp x16, x17, [x2, #48] - ldp x18, x4, [x1, #48] - ldp x5, x6, [x2, #32] - ldp x7, x19, [x1, #32] - ldp x20, x21, [x2, #16] - ldp x22, x2, [x2] - ldp x24, x25, [x1, #16] - ldp x23, x1, [x1] - subs x22, x23, x22 - str x22, [x0] - ldp x22, x23, [x3, #32] - sbcs x1, x1, x2 - str x1, [x0, #8] - ldp x1, x2, [x3, #16] - sbcs x20, x24, x20 - ldp x24, x3, [x3] - str x20, [x0, #16] - sbcs x20, x25, x21 - sbcs x5, x7, x5 - stp x20, x5, [x0, #24] - sbcs x5, x19, x6 - sbcs x16, x18, x16 - sbcs x17, x4, x17 - sbcs x12, x14, x12 - sbcs x13, x15, x13 - sbcs x8, x10, x8 - sbcs x9, x11, x9 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x23, xzr, ne - csel x11, x22, xzr, ne - csel x14, x2, xzr, ne - csel x15, x1, xzr, ne - csel x18, x3, xzr, ne - csel x1, x24, xzr, ne - adds x16, x1, x16 - stp x5, x16, [x0, #40] - adcs x16, x18, x17 - adcs x12, x15, x12 - stp x16, x12, [x0, #56] - adcs x12, x14, x13 - adcs x8, x11, x8 - stp x12, x8, [x0, #72] - adcs x8, x10, x9 - str x8, [x0, #88] - ldp x20, x19, [sp, #48] - ldp x22, x21, [sp, #32] - ldp x24, x23, [sp, #16] - ldp x26, x25, [sp], #64 - ret -.Lfunc_end94: - .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L - - .globl mcl_fp_mulUnitPre7L - .align 2 - .type mcl_fp_mulUnitPre7L,@function -mcl_fp_mulUnitPre7L: // @mcl_fp_mulUnitPre7L -// BB#0: - ldp x10, x8, [x1, #40] - ldp x14, x9, [x1, #24] - ldp x11, x12, [x1] - ldr x13, [x1, #16] - mul x15, x11, x2 - mul x16, x12, x2 - umulh x11, x11, x2 - mul x17, x13, x2 - umulh x12, x12, x2 - mul x18, x14, x2 - umulh x13, x13, x2 - mul x1, x9, x2 - umulh x14, x14, x2 - mul x3, x10, x2 - umulh x9, x9, x2 - mul x4, x8, x2 - umulh x10, x10, x2 - umulh x8, x8, x2 - adds x11, x11, x16 - stp x15, x11, [x0] - adcs x11, x12, x17 - str x11, [x0, #16] - adcs x11, x13, x18 - str x11, [x0, #24] - adcs x11, x14, x1 - adcs x9, x9, x3 - stp x11, x9, [x0, #32] - adcs x9, x10, x4 - adcs x8, x8, xzr - stp x9, x8, [x0, #48] - ret -.Lfunc_end95: - .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L - - .globl mcl_fpDbl_mulPre7L - .align 2 - .type mcl_fpDbl_mulPre7L,@function -mcl_fpDbl_mulPre7L: // @mcl_fpDbl_mulPre7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #624 // =624 - ldp x8, x9, [x1] - ldp x10, x11, [x1, #24] - ldp x12, x13, [x1, #40] - ldp x14, x15, [x2] - ldp x16, x18, [x1, #16] - mul x17, x8, x14 - str x17, [sp, #528] // 8-byte Folded Spill - umulh x17, x13, x14 - str x17, [sp, #616] // 8-byte Folded Spill - mul x17, x13, x14 - str x17, [sp, #608] // 8-byte Folded Spill - umulh x17, x12, x14 - str x17, [sp, #592] // 8-byte Folded Spill - mul x17, x12, x14 - str x17, [sp, #568] // 8-byte Folded Spill - umulh x17, x11, x14 - str x17, [sp, #552] // 8-byte Folded Spill - mul x17, x11, x14 - str x17, [sp, #512] // 8-byte Folded Spill - umulh x17, x10, x14 - str x17, [sp, #496] // 8-byte Folded Spill - mul x17, x10, x14 - str x17, [sp, #456] // 8-byte Folded Spill - umulh x17, x16, x14 - str x17, [sp, #424] // 8-byte Folded Spill - mul x17, x16, x14 - str x17, [sp, #368] // 8-byte Folded Spill - umulh x17, x9, x14 - str x17, [sp, #352] // 8-byte Folded Spill - mul x17, x9, x14 - str x17, [sp, #304] // 8-byte Folded Spill - umulh x14, x8, x14 - str x14, [sp, #272] // 8-byte Folded Spill - mul x14, x13, x15 - str x14, [sp, #560] // 8-byte Folded Spill - mul x14, x12, x15 - str x14, [sp, #520] // 8-byte Folded Spill - mul x14, x11, x15 - str x14, [sp, #488] // 8-byte Folded Spill - mul x14, x10, x15 - str x14, [sp, #448] // 8-byte Folded Spill - mul x14, x16, x15 - umulh x13, x13, x15 - str x13, [sp, #600] // 8-byte Folded Spill - umulh x12, x12, x15 - str x12, [sp, #576] // 8-byte Folded Spill - umulh x11, x11, x15 - str x11, [sp, #544] // 8-byte Folded Spill - umulh x10, x10, x15 - str x10, [sp, #504] // 8-byte Folded Spill - umulh x10, x16, x15 - str x10, [sp, #472] // 8-byte Folded Spill - mul x10, x9, x15 - str x10, [sp, #208] // 8-byte Folded Spill - umulh x9, x9, x15 - stp x9, x14, [sp, #400] - mul x9, x8, x15 - str x9, [sp, #96] // 8-byte Folded Spill - umulh x8, x8, x15 - str x8, [sp, #320] // 8-byte Folded Spill - ldp x9, x11, [x1] - ldp x10, x17, [x2, #16] - ldp x12, x13, [x1, #16] - ldp x14, x16, [x1, #32] - ldr x15, [x1, #48] - mul x8, x9, x10 - str x8, [sp, #248] // 8-byte Folded Spill - mul x8, x15, x10 - str x8, [sp, #392] // 8-byte Folded Spill - mul x8, x16, x10 - str x8, [sp, #344] // 8-byte Folded Spill - mul x8, x14, x10 - str x8, [sp, #296] // 8-byte Folded Spill - mul x8, x13, x10 - str x8, [sp, #240] // 8-byte Folded Spill - mul x8, x12, x10 - str x8, [sp, #192] // 8-byte Folded Spill - mul x8, x11, x10 - str x8, [sp, #136] // 8-byte Folded Spill - umulh x8, x15, x10 - str x8, [sp, #440] // 8-byte Folded Spill - umulh x8, x16, x10 - str x8, [sp, #384] // 8-byte Folded Spill - umulh x8, x14, x10 - str x8, [sp, #336] // 8-byte Folded Spill - umulh x8, x13, x10 - str x8, [sp, #288] // 8-byte Folded Spill - umulh x8, x12, x10 - str x8, [sp, #232] // 8-byte Folded Spill - umulh x8, x11, x10 - str x8, [sp, #184] // 8-byte Folded Spill - umulh x8, x9, x10 - str x8, [sp, #128] // 8-byte Folded Spill - mul x8, x15, x17 - str x8, [sp, #464] // 8-byte Folded Spill - umulh x8, x15, x17 - str x8, [sp, #584] // 8-byte Folded Spill - mul x8, x16, x17 - str x8, [sp, #376] // 8-byte Folded Spill - umulh x8, x16, x17 - str x8, [sp, #536] // 8-byte Folded Spill - mul x8, x14, x17 - str x8, [sp, #312] // 8-byte Folded Spill - umulh x8, x14, x17 - str x8, [sp, #480] // 8-byte Folded Spill - mul x8, x13, x17 - str x8, [sp, #224] // 8-byte Folded Spill - umulh x8, x13, x17 - str x8, [sp, #416] // 8-byte Folded Spill - mul x8, x12, x17 - str x8, [sp, #144] // 8-byte Folded Spill - umulh x8, x12, x17 - str x8, [sp, #328] // 8-byte Folded Spill - mul x8, x11, x17 - str x8, [sp, #80] // 8-byte Folded Spill - umulh x8, x11, x17 - str x8, [sp, #264] // 8-byte Folded Spill - mul x28, x9, x17 - umulh x8, x9, x17 - str x8, [sp, #176] // 8-byte Folded Spill - ldp x14, x12, [x1, #24] - ldp x10, x9, [x1] - ldr x7, [x1, #16] - ldp x30, x5, [x1, #40] - ldp x27, x8, [x2, #32] - ldr x13, [x1, #48] - mul x11, x10, x27 - str x11, [sp, #48] // 8-byte Folded Spill - mul x11, x5, x27 - str x11, [sp, #168] // 8-byte Folded Spill - mul x11, x30, x27 - str x11, [sp, #120] // 8-byte Folded Spill - mul x11, x12, x27 - str x11, [sp, #72] // 8-byte Folded Spill - mul x11, x14, x27 - str x11, [sp, #40] // 8-byte Folded Spill - mul x11, x7, x27 - str x11, [sp, #16] // 8-byte Folded Spill - mul x24, x9, x27 - umulh x11, x5, x27 - str x11, [sp, #216] // 8-byte Folded Spill - umulh x11, x30, x27 - str x11, [sp, #160] // 8-byte Folded Spill - umulh x11, x12, x27 - str x11, [sp, #112] // 8-byte Folded Spill - umulh x11, x14, x27 - str x11, [sp, #64] // 8-byte Folded Spill - umulh x11, x7, x27 - str x11, [sp, #32] // 8-byte Folded Spill - umulh x29, x9, x27 - umulh x23, x10, x27 - mul x11, x5, x8 - str x11, [sp, #256] // 8-byte Folded Spill - umulh x11, x5, x8 - str x11, [sp, #432] // 8-byte Folded Spill - mul x11, x30, x8 - str x11, [sp, #152] // 8-byte Folded Spill - umulh x11, x30, x8 - str x11, [sp, #360] // 8-byte Folded Spill - mul x11, x12, x8 - str x11, [sp, #88] // 8-byte Folded Spill - umulh x11, x12, x8 - str x11, [sp, #280] // 8-byte Folded Spill - mul x11, x14, x8 - str x11, [sp, #24] // 8-byte Folded Spill - umulh x11, x14, x8 - str x11, [sp, #200] // 8-byte Folded Spill - mul x25, x7, x8 - umulh x11, x7, x8 - str x11, [sp, #104] // 8-byte Folded Spill - mul x22, x9, x8 - umulh x9, x9, x8 - str x9, [sp, #56] // 8-byte Folded Spill - mul x20, x10, x8 - umulh x26, x10, x8 - ldr x10, [x2, #48] - ldp x2, x8, [x1] - ldr x9, [x1, #16] - ldp x11, x1, [x1, #32] - mul x27, x2, x10 - umulh x21, x2, x10 - mul x5, x8, x10 - umulh x19, x8, x10 - mul x3, x9, x10 - umulh x7, x9, x10 - mul x2, x18, x10 - umulh x6, x18, x10 - mul x17, x11, x10 - umulh x4, x11, x10 - mul x16, x1, x10 - umulh x1, x1, x10 - mul x15, x13, x10 - umulh x18, x13, x10 - ldr x8, [sp, #528] // 8-byte Folded Reload - str x8, [x0] - ldr x8, [sp, #304] // 8-byte Folded Reload - ldr x9, [sp, #272] // 8-byte Folded Reload - adds x13, x9, x8 - ldr x8, [sp, #368] // 8-byte Folded Reload - ldr x9, [sp, #352] // 8-byte Folded Reload - adcs x8, x9, x8 - ldr x9, [sp, #456] // 8-byte Folded Reload - ldr x10, [sp, #424] // 8-byte Folded Reload - adcs x9, x10, x9 - ldr x10, [sp, #512] // 8-byte Folded Reload - ldr x11, [sp, #496] // 8-byte Folded Reload - adcs x10, x11, x10 - ldr x11, [sp, #568] // 8-byte Folded Reload - ldr x12, [sp, #552] // 8-byte Folded Reload - adcs x11, x12, x11 - ldr x12, [sp, #608] // 8-byte Folded Reload - ldr x14, [sp, #592] // 8-byte Folded Reload - adcs x12, x14, x12 - ldr x14, [sp, #616] // 8-byte Folded Reload - adcs x14, x14, xzr - ldr x30, [sp, #96] // 8-byte Folded Reload - adds x13, x30, x13 - str x13, [x0, #8] - ldr x13, [sp, #208] // 8-byte Folded Reload - adcs x8, x13, x8 - ldr x13, [sp, #408] // 8-byte Folded Reload - adcs x9, x13, x9 - ldr x13, [sp, #448] // 8-byte Folded Reload - adcs x10, x13, x10 - ldr x13, [sp, #488] // 8-byte Folded Reload - adcs x11, x13, x11 - ldr x13, [sp, #520] // 8-byte Folded Reload - adcs x12, x13, x12 - ldr x13, [sp, #560] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, xzr, xzr - ldr x30, [sp, #320] // 8-byte Folded Reload - adds x8, x8, x30 - ldr x30, [sp, #400] // 8-byte Folded Reload - adcs x9, x9, x30 - ldr x30, [sp, #472] // 8-byte Folded Reload - adcs x10, x10, x30 - ldr x30, [sp, #504] // 8-byte Folded Reload - adcs x11, x11, x30 - ldr x30, [sp, #544] // 8-byte Folded Reload - adcs x12, x12, x30 - ldr x30, [sp, #576] // 8-byte Folded Reload - adcs x13, x13, x30 - ldr x30, [sp, #600] // 8-byte Folded Reload - adcs x14, x14, x30 - ldr x30, [sp, #248] // 8-byte Folded Reload - adds x8, x30, x8 - str x8, [x0, #16] - ldp x30, x8, [sp, #128] - adcs x8, x8, x9 - ldr x9, [sp, #192] // 8-byte Folded Reload - adcs x9, x9, x10 - ldr x10, [sp, #240] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #296] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #344] // 8-byte Folded Reload - adcs x12, x12, x13 - ldr x13, [sp, #392] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, xzr, xzr - adds x8, x8, x30 - ldr x30, [sp, #184] // 8-byte Folded Reload - adcs x9, x9, x30 - ldr x30, [sp, #232] // 8-byte Folded Reload - adcs x10, x10, x30 - ldr x30, [sp, #288] // 8-byte Folded Reload - adcs x11, x11, x30 - ldr x30, [sp, #336] // 8-byte Folded Reload - adcs x12, x12, x30 - ldr x30, [sp, #384] // 8-byte Folded Reload - adcs x13, x13, x30 - ldr x30, [sp, #440] // 8-byte Folded Reload - adcs x14, x14, x30 - adds x8, x28, x8 - str x8, [x0, #24] - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, x9 - ldr x9, [sp, #144] // 8-byte Folded Reload - adcs x9, x9, x10 - ldr x10, [sp, #224] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #312] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #376] // 8-byte Folded Reload - adcs x12, x12, x13 - ldr x13, [sp, #464] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, xzr, xzr - ldr x28, [sp, #176] // 8-byte Folded Reload - adds x8, x8, x28 - ldr x28, [sp, #264] // 8-byte Folded Reload - adcs x9, x9, x28 - ldr x28, [sp, #328] // 8-byte Folded Reload - adcs x10, x10, x28 - ldr x28, [sp, #416] // 8-byte Folded Reload - adcs x11, x11, x28 - ldr x28, [sp, #480] // 8-byte Folded Reload - adcs x12, x12, x28 - ldr x28, [sp, #536] // 8-byte Folded Reload - adcs x13, x13, x28 - ldr x28, [sp, #584] // 8-byte Folded Reload - adcs x14, x14, x28 - ldr x28, [sp, #48] // 8-byte Folded Reload - adds x8, x28, x8 - str x8, [x0, #32] - adcs x8, x24, x9 - ldr x9, [sp, #16] // 8-byte Folded Reload - adcs x9, x9, x10 - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #72] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #120] // 8-byte Folded Reload - adcs x12, x12, x13 - ldr x13, [sp, #168] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, xzr, xzr - adds x8, x8, x23 - adcs x9, x9, x29 - ldr x23, [sp, #32] // 8-byte Folded Reload - adcs x10, x10, x23 - ldr x23, [sp, #64] // 8-byte Folded Reload - adcs x11, x11, x23 - ldr x23, [sp, #112] // 8-byte Folded Reload - adcs x12, x12, x23 - ldr x23, [sp, #160] // 8-byte Folded Reload - adcs x13, x13, x23 - ldr x23, [sp, #216] // 8-byte Folded Reload - adcs x14, x14, x23 - adds x8, x20, x8 - str x8, [x0, #40] - adcs x8, x22, x9 - adcs x9, x25, x10 - ldr x10, [sp, #24] // 8-byte Folded Reload - adcs x10, x10, x11 - ldr x11, [sp, #88] // 8-byte Folded Reload - adcs x11, x11, x12 - ldr x12, [sp, #152] // 8-byte Folded Reload - adcs x12, x12, x13 - ldr x13, [sp, #256] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, xzr, xzr - adds x8, x8, x26 - ldr x20, [sp, #56] // 8-byte Folded Reload - adcs x9, x9, x20 - ldr x20, [sp, #104] // 8-byte Folded Reload - adcs x10, x10, x20 - ldr x20, [sp, #200] // 8-byte Folded Reload - adcs x11, x11, x20 - ldr x20, [sp, #280] // 8-byte Folded Reload - adcs x12, x12, x20 - ldr x20, [sp, #360] // 8-byte Folded Reload - adcs x13, x13, x20 - ldr x20, [sp, #432] // 8-byte Folded Reload - adcs x14, x14, x20 - adds x8, x27, x8 - str x8, [x0, #48] - adcs x8, x5, x9 - adcs x9, x3, x10 - adcs x10, x2, x11 - adcs x11, x17, x12 - adcs x12, x16, x13 - adcs x13, x15, x14 - adcs x14, xzr, xzr - adds x8, x8, x21 - str x8, [x0, #56] - adcs x8, x9, x19 - str x8, [x0, #64] - adcs x8, x10, x7 - str x8, [x0, #72] - adcs x8, x11, x6 - str x8, [x0, #80] - adcs x8, x12, x4 - str x8, [x0, #88] - adcs x8, x13, x1 - str x8, [x0, #96] - adcs x8, x14, x18 - str x8, [x0, #104] - add sp, sp, #624 // =624 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end96: - .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L - - .globl mcl_fpDbl_sqrPre7L - .align 2 - .type mcl_fpDbl_sqrPre7L,@function -mcl_fpDbl_sqrPre7L: // @mcl_fpDbl_sqrPre7L -// BB#0: - stp x24, x23, [sp, #-48]! - stp x22, x21, [sp, #16] - stp x20, x19, [sp, #32] - ldp x11, x8, [x1] - ldp x9, x10, [x1, #40] - ldp x15, x12, [x1, #16] - ldp x16, x3, [x1, #16] - ldp x13, x14, [x1, #32] - ldp x18, x17, [x1, #32] - ldr x2, [x1, #32] - mul x4, x11, x11 - umulh x5, x10, x11 - mul x6, x9, x11 - mul x7, x18, x11 - mul x19, x3, x11 - umulh x20, x16, x11 - mul x21, x16, x11 - umulh x22, x8, x11 - mul x23, x8, x11 - str x4, [x0] - umulh x4, x11, x11 - adds x4, x4, x23 - adcs x21, x22, x21 - adcs x19, x20, x19 - umulh x20, x3, x11 - adcs x7, x20, x7 - umulh x20, x18, x11 - adcs x6, x20, x6 - mul x20, x10, x11 - umulh x11, x9, x11 - adcs x20, x11, x20 - adcs x5, x5, xzr - adds x4, x23, x4 - ldp x11, x23, [x1, #40] - str x4, [x0, #8] - mul x4, x8, x8 - adcs x4, x4, x21 - mul x21, x16, x8 - adcs x19, x21, x19 - mul x21, x3, x8 - adcs x7, x21, x7 - mul x21, x18, x8 - adcs x6, x21, x6 - mul x21, x9, x8 - adcs x20, x21, x20 - mul x21, x10, x8 - umulh x10, x10, x8 - umulh x9, x9, x8 - umulh x18, x18, x8 - umulh x3, x3, x8 - umulh x16, x16, x8 - umulh x8, x8, x8 - adcs x5, x21, x5 - adcs x21, xzr, xzr - adds x4, x4, x22 - adcs x8, x19, x8 - ldp x19, x22, [x1] - adcs x16, x7, x16 - adcs x3, x6, x3 - ldp x6, x7, [x1, #8] - adcs x18, x20, x18 - mul x20, x19, x15 - adcs x9, x5, x9 - mul x5, x23, x15 - adcs x10, x21, x10 - mul x21, x14, x15 - adds x4, x20, x4 - mul x20, x13, x15 - str x4, [x0, #16] - mul x4, x6, x15 - adcs x8, x4, x8 - mul x4, x15, x15 - adcs x16, x4, x16 - mul x4, x12, x15 - adcs x3, x4, x3 - adcs x18, x20, x18 - umulh x20, x13, x15 - adcs x9, x21, x9 - umulh x21, x19, x15 - adcs x10, x5, x10 - adcs x5, xzr, xzr - adds x8, x8, x21 - umulh x21, x6, x15 - adcs x16, x16, x21 - umulh x21, x15, x15 - adcs x3, x3, x21 - umulh x21, x12, x15 - adcs x18, x18, x21 - adcs x9, x9, x20 - umulh x20, x14, x15 - adcs x10, x10, x20 - umulh x15, x23, x15 - adcs x15, x5, x15 - mul x5, x19, x12 - adds x8, x5, x8 - ldr x5, [x1, #32] - str x8, [x0, #24] - mul x8, x6, x12 - adcs x8, x8, x16 - ldr x16, [x1] - adcs x3, x4, x3 - mul x4, x12, x12 - adcs x18, x4, x18 - mul x4, x13, x12 - adcs x9, x4, x9 - mul x4, x14, x12 - adcs x10, x4, x10 - mul x4, x23, x12 - umulh x19, x19, x12 - adcs x15, x4, x15 - adcs x4, xzr, xzr - adds x8, x8, x19 - ldr x19, [x1, #24] - umulh x6, x6, x12 - adcs x3, x3, x6 - ldr x6, [x1, #48] - adcs x18, x18, x21 - ldr x20, [x1, #48] - umulh x21, x23, x12 - umulh x14, x14, x12 - umulh x13, x13, x12 - umulh x12, x12, x12 - adcs x9, x9, x12 - adcs x10, x10, x13 - ldp x12, x13, [x1] - adcs x14, x15, x14 - mul x15, x16, x5 - adcs x4, x4, x21 - mul x21, x6, x5 - adds x8, x15, x8 - mul x15, x17, x5 - str x8, [x0, #32] - mul x8, x22, x5 - adcs x8, x8, x3 - mul x3, x7, x5 - adcs x18, x3, x18 - mul x3, x19, x5 - adcs x9, x3, x9 - mul x3, x5, x5 - adcs x10, x3, x10 - umulh x3, x16, x5 - adcs x14, x15, x14 - adcs x4, x21, x4 - adcs x21, xzr, xzr - adds x8, x8, x3 - umulh x3, x22, x5 - adcs x18, x18, x3 - umulh x3, x7, x5 - adcs x9, x9, x3 - umulh x3, x19, x5 - adcs x10, x10, x3 - umulh x3, x5, x5 - adcs x14, x14, x3 - umulh x3, x6, x5 - umulh x5, x17, x5 - adcs x4, x4, x5 - adcs x3, x21, x3 - mul x21, x16, x17 - adds x8, x21, x8 - ldp x21, x1, [x1, #16] - str x8, [x0, #40] - mul x8, x22, x17 - adcs x8, x8, x18 - mul x18, x7, x17 - adcs x9, x18, x9 - mul x18, x19, x17 - adcs x10, x18, x10 - mul x18, x6, x17 - adcs x14, x15, x14 - mul x15, x17, x17 - umulh x6, x6, x17 - umulh x19, x19, x17 - umulh x7, x7, x17 - umulh x22, x22, x17 - umulh x16, x16, x17 - umulh x17, x17, x17 - adcs x15, x15, x4 - mul x4, x12, x20 - adcs x18, x18, x3 - adcs x3, xzr, xzr - adds x8, x8, x16 - mul x16, x11, x20 - adcs x9, x9, x22 - mul x22, x2, x20 - adcs x10, x10, x7 - mul x7, x1, x20 - adcs x14, x14, x19 - mul x19, x21, x20 - adcs x15, x15, x5 - mul x5, x13, x20 - adcs x17, x18, x17 - mul x18, x20, x20 - umulh x12, x12, x20 - umulh x13, x13, x20 - umulh x21, x21, x20 - umulh x1, x1, x20 - umulh x2, x2, x20 - umulh x11, x11, x20 - umulh x20, x20, x20 - adcs x3, x3, x6 - adds x8, x4, x8 - str x8, [x0, #48] - adcs x8, x5, x9 - adcs x9, x19, x10 - adcs x10, x7, x14 - adcs x14, x22, x15 - adcs x15, x16, x17 - adcs x16, x18, x3 - adcs x17, xzr, xzr - adds x8, x8, x12 - str x8, [x0, #56] - adcs x8, x9, x13 - str x8, [x0, #64] - adcs x8, x10, x21 - str x8, [x0, #72] - adcs x8, x14, x1 - str x8, [x0, #80] - adcs x8, x15, x2 - str x8, [x0, #88] - adcs x8, x16, x11 - str x8, [x0, #96] - adcs x8, x17, x20 - str x8, [x0, #104] - ldp x20, x19, [sp, #32] - ldp x22, x21, [sp, #16] - ldp x24, x23, [sp], #48 - ret -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L - - .globl mcl_fp_mont7L - .align 2 - .type mcl_fp_mont7L,@function -mcl_fp_mont7L: // @mcl_fp_mont7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #144 // =144 - str x2, [sp, #112] // 8-byte Folded Spill - str x0, [sp, #64] // 8-byte Folded Spill - ldr x6, [x2] - ldr x15, [x1, #48] - str x15, [sp, #96] // 8-byte Folded Spill - ldr x0, [x1, #32] - str x0, [sp, #56] // 8-byte Folded Spill - ldr x18, [x1, #40] - ldp x11, x13, [x1, #16] - ldp x17, x5, [x1] - str x5, [sp, #88] // 8-byte Folded Spill - ldur x12, [x3, #-8] - str x12, [sp, #128] // 8-byte Folded Spill - ldr x1, [x3, #32] - str x1, [sp, #104] // 8-byte Folded Spill - ldr x9, [x3, #40] - str x9, [sp, #80] // 8-byte Folded Spill - ldr x8, [x3, #16] - str x8, [sp, #136] // 8-byte Folded Spill - ldr x10, [x3, #24] - str x10, [sp, #120] // 8-byte Folded Spill - ldr x14, [x3] - str x14, [sp, #24] // 8-byte Folded Spill - ldr x4, [x3, #8] - str x4, [sp, #72] // 8-byte Folded Spill - ldr x7, [x2, #8] - umulh x19, x15, x6 - mul x20, x15, x6 - umulh x21, x18, x6 - mul x22, x18, x6 - mov x15, x0 - umulh x23, x15, x6 - mul x24, x15, x6 - mov x16, x13 - umulh x25, x16, x6 - mul x26, x16, x6 - mov x13, x11 - umulh x27, x13, x6 - mul x28, x13, x6 - mul x29, x5, x6 - mov x11, x17 - umulh x30, x11, x6 - adds x29, x30, x29 - umulh x30, x5, x6 - mul x6, x11, x6 - adcs x28, x30, x28 - mul x30, x6, x12 - adcs x26, x27, x26 - mul x27, x30, x10 - adcs x24, x25, x24 - mul x25, x30, x8 - adcs x22, x23, x22 - mul x23, x30, x4 - adcs x20, x21, x20 - umulh x21, x30, x14 - adcs x19, x19, xzr - adds x21, x21, x23 - umulh x23, x30, x4 - adcs x23, x23, x25 - umulh x25, x30, x8 - adcs x25, x25, x27 - mul x27, x30, x1 - umulh x17, x30, x10 - adcs x17, x17, x27 - ldr x3, [x3, #48] - str x3, [sp, #48] // 8-byte Folded Spill - mul x27, x30, x9 - umulh x0, x30, x1 - adcs x0, x0, x27 - mul x27, x30, x3 - umulh x2, x30, x9 - adcs x2, x2, x27 - umulh x27, x30, x3 - mul x30, x30, x14 - adcs x27, x27, xzr - cmn x30, x6 - adcs x6, x21, x29 - adcs x21, x23, x28 - mul x23, x7, x15 - adcs x25, x25, x26 - mul x26, x7, x16 - adcs x17, x17, x24 - mul x24, x7, x13 - adcs x0, x0, x22 - mul x22, x7, x5 - adcs x2, x2, x20 - umulh x20, x7, x11 - adcs x19, x27, x19 - adcs x27, xzr, xzr - adds x20, x20, x22 - umulh x22, x7, x5 - adcs x22, x22, x24 - umulh x24, x7, x13 - mov x5, x13 - adcs x24, x24, x26 - umulh x26, x7, x16 - adcs x23, x26, x23 - mul x26, x7, x18 - umulh x28, x7, x15 - adcs x26, x28, x26 - ldr x15, [sp, #96] // 8-byte Folded Reload - mul x28, x7, x15 - umulh x29, x7, x18 - adcs x28, x29, x28 - umulh x29, x7, x15 - mul x7, x7, x11 - adcs x29, x29, xzr - adds x30, x6, x7 - adcs x6, x21, x20 - adcs x25, x25, x22 - mul x22, x30, x12 - adcs x24, x17, x24 - mul x17, x22, x10 - adcs x0, x0, x23 - mul x23, x22, x8 - adcs x7, x2, x26 - mul x2, x22, x4 - adcs x20, x19, x28 - umulh x26, x22, x14 - adcs x21, x27, x29 - adcs x19, xzr, xzr - adds x2, x26, x2 - umulh x26, x22, x4 - adcs x23, x26, x23 - umulh x26, x22, x8 - adcs x17, x26, x17 - mul x26, x22, x1 - umulh x27, x22, x10 - adcs x26, x27, x26 - mul x27, x22, x9 - umulh x28, x22, x1 - adcs x27, x28, x27 - mul x28, x22, x3 - umulh x29, x22, x9 - adcs x28, x29, x28 - umulh x29, x22, x3 - mul x22, x22, x14 - mov x10, x14 - adcs x29, x29, xzr - cmn x22, x30 - adcs x22, x2, x6 - adcs x23, x23, x25 - ldr x8, [sp, #112] // 8-byte Folded Reload - adcs x24, x17, x24 - ldp x25, x17, [x8, #16] - adcs x0, x26, x0 - mul x2, x25, x16 - adcs x6, x27, x7 - mul x7, x25, x5 - adcs x20, x28, x20 - ldp x15, x8, [sp, #88] - mul x26, x25, x15 - adcs x21, x29, x21 - mov x12, x11 - umulh x27, x25, x12 - adcs x19, x19, xzr - adds x26, x27, x26 - umulh x27, x25, x15 - adcs x7, x27, x7 - umulh x27, x25, x5 - mov x9, x5 - adcs x2, x27, x2 - ldr x11, [sp, #56] // 8-byte Folded Reload - mul x27, x25, x11 - umulh x28, x25, x16 - mov x13, x16 - adcs x27, x28, x27 - mul x28, x25, x18 - umulh x29, x25, x11 - adcs x28, x29, x28 - mul x29, x25, x8 - umulh x30, x25, x18 - adcs x29, x30, x29 - umulh x30, x25, x8 - mov x14, x8 - mul x25, x25, x12 - mov x5, x12 - adcs x30, x30, xzr - adds x22, x22, x25 - adcs x23, x23, x26 - adcs x7, x24, x7 - adcs x0, x0, x2 - ldp x8, x12, [sp, #128] - mul x2, x22, x8 - adcs x6, x6, x27 - mul x24, x2, x12 - adcs x20, x20, x28 - mul x25, x2, x4 - adcs x21, x21, x29 - mov x1, x10 - umulh x26, x2, x1 - adcs x19, x19, x30 - adcs x27, xzr, xzr - adds x25, x26, x25 - umulh x26, x2, x4 - adcs x24, x26, x24 - ldr x10, [sp, #120] // 8-byte Folded Reload - mul x26, x2, x10 - umulh x28, x2, x12 - adcs x26, x28, x26 - ldr x12, [sp, #104] // 8-byte Folded Reload - mul x28, x2, x12 - umulh x29, x2, x10 - adcs x28, x29, x28 - ldr x10, [sp, #80] // 8-byte Folded Reload - mul x29, x2, x10 - umulh x30, x2, x12 - adcs x29, x30, x29 - mul x30, x2, x3 - umulh x12, x2, x10 - adcs x12, x12, x30 - umulh x30, x2, x3 - mul x2, x2, x1 - adcs x30, x30, xzr - cmn x2, x22 - adcs x2, x25, x23 - adcs x7, x24, x7 - adcs x0, x26, x0 - mul x22, x17, x11 - adcs x6, x28, x6 - mul x23, x17, x13 - adcs x20, x29, x20 - mul x24, x17, x9 - adcs x12, x12, x21 - mul x21, x17, x15 - adcs x19, x30, x19 - umulh x25, x17, x5 - adcs x26, x27, xzr - adds x21, x25, x21 - umulh x25, x17, x15 - adcs x24, x25, x24 - umulh x25, x17, x9 - mov x16, x9 - adcs x23, x25, x23 - umulh x25, x17, x13 - adcs x22, x25, x22 - mul x25, x17, x18 - umulh x27, x17, x11 - adcs x25, x27, x25 - mov x9, x14 - mul x27, x17, x9 - umulh x28, x17, x18 - adcs x27, x28, x27 - umulh x28, x17, x9 - mul x17, x17, x5 - mov x15, x5 - adcs x28, x28, xzr - adds x17, x2, x17 - adcs x2, x7, x21 - adcs x0, x0, x24 - mul x24, x17, x8 - adcs x29, x6, x23 - ldr x9, [sp, #120] // 8-byte Folded Reload - mul x23, x24, x9 - adcs x6, x20, x22 - ldr x8, [sp, #136] // 8-byte Folded Reload - mul x22, x24, x8 - adcs x7, x12, x25 - mul x12, x24, x4 - adcs x20, x19, x27 - umulh x25, x24, x1 - adcs x21, x26, x28 - adcs x19, xzr, xzr - adds x12, x25, x12 - umulh x25, x24, x4 - adcs x25, x25, x22 - umulh x22, x24, x8 - adcs x26, x22, x23 - ldr x5, [sp, #104] // 8-byte Folded Reload - mul x22, x24, x5 - umulh x23, x24, x9 - adcs x27, x23, x22 - mov x9, x10 - mul x22, x24, x9 - umulh x23, x24, x5 - adcs x28, x23, x22 - mul x22, x24, x3 - umulh x23, x24, x9 - adcs x30, x23, x22 - umulh x22, x24, x3 - mul x23, x24, x1 - mov x3, x1 - adcs x24, x22, xzr - cmn x23, x17 - adcs x22, x12, x2 - adcs x23, x25, x0 - ldr x10, [sp, #112] // 8-byte Folded Reload - ldp x12, x0, [x10, #32] - adcs x17, x26, x29 - adcs x2, x27, x6 - mul x6, x12, x13 - adcs x7, x28, x7 - mov x10, x16 - mul x25, x12, x10 - adcs x20, x30, x20 - ldr x16, [sp, #88] // 8-byte Folded Reload - mul x26, x12, x16 - adcs x21, x24, x21 - umulh x24, x12, x15 - adcs x1, x19, xzr - adds x24, x24, x26 - umulh x26, x12, x16 - adcs x25, x26, x25 - umulh x26, x12, x10 - adcs x6, x26, x6 - mul x26, x12, x11 - umulh x27, x12, x13 - adcs x26, x27, x26 - mul x27, x12, x18 - umulh x28, x12, x11 - adcs x27, x28, x27 - mul x28, x12, x14 - umulh x29, x12, x18 - adcs x28, x29, x28 - umulh x29, x12, x14 - mul x12, x12, x15 - adcs x29, x29, xzr - adds x12, x22, x12 - adcs x22, x23, x24 - adcs x17, x17, x25 - adcs x2, x2, x6 - ldr x19, [sp, #128] // 8-byte Folded Reload - mul x6, x12, x19 - adcs x7, x7, x26 - mov x30, x8 - mul x23, x6, x30 - adcs x20, x20, x27 - mul x24, x6, x4 - adcs x21, x21, x28 - mov x8, x3 - umulh x25, x6, x8 - adcs x1, x1, x29 - adcs x26, xzr, xzr - adds x24, x25, x24 - umulh x25, x6, x4 - adcs x23, x25, x23 - ldr x4, [sp, #120] // 8-byte Folded Reload - mul x25, x6, x4 - umulh x27, x6, x30 - adcs x25, x27, x25 - mul x27, x6, x5 - umulh x28, x6, x4 - adcs x27, x28, x27 - mov x3, x9 - mul x28, x6, x3 - umulh x29, x6, x5 - adcs x28, x29, x28 - ldr x9, [sp, #48] // 8-byte Folded Reload - mul x29, x6, x9 - umulh x30, x6, x3 - adcs x29, x30, x29 - umulh x30, x6, x9 - mov x3, x9 - mul x6, x6, x8 - mov x5, x8 - adcs x30, x30, xzr - cmn x6, x12 - adcs x12, x24, x22 - adcs x17, x23, x17 - adcs x2, x25, x2 - mul x6, x0, x11 - adcs x7, x27, x7 - mul x22, x0, x13 - adcs x20, x28, x20 - mul x23, x0, x10 - adcs x21, x29, x21 - mul x24, x0, x16 - adcs x29, x30, x1 - mov x1, x15 - umulh x25, x0, x1 - adcs x26, x26, xzr - adds x24, x25, x24 - umulh x25, x0, x16 - adcs x23, x25, x23 - umulh x25, x0, x10 - adcs x22, x25, x22 - umulh x25, x0, x13 - adcs x6, x25, x6 - mul x25, x0, x18 - umulh x27, x0, x11 - adcs x25, x27, x25 - mov x9, x14 - mul x27, x0, x9 - umulh x28, x0, x18 - adcs x27, x28, x27 - umulh x28, x0, x9 - mul x0, x0, x1 - adcs x28, x28, xzr - adds x12, x12, x0 - adcs x8, x17, x24 - str x8, [sp, #40] // 8-byte Folded Spill - adcs x8, x2, x23 - str x8, [sp, #32] // 8-byte Folded Spill - mul x2, x12, x19 - adcs x7, x7, x22 - mul x22, x2, x4 - adcs x8, x20, x6 - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #136] // 8-byte Folded Reload - mul x20, x2, x8 - adcs x21, x21, x25 - ldr x9, [sp, #72] // 8-byte Folded Reload - mul x23, x2, x9 - adcs x19, x29, x27 - mov x15, x5 - umulh x24, x2, x15 - adcs x17, x26, x28 - str x17, [sp, #8] // 8-byte Folded Spill - adcs x26, xzr, xzr - adds x23, x24, x23 - umulh x24, x2, x9 - adcs x20, x24, x20 - umulh x24, x2, x8 - adcs x22, x24, x22 - ldp x25, x8, [sp, #104] - mul x24, x2, x25 - umulh x27, x2, x4 - adcs x6, x27, x24 - ldr x5, [sp, #80] // 8-byte Folded Reload - mul x27, x2, x5 - umulh x28, x2, x25 - adcs x27, x28, x27 - mul x28, x2, x3 - umulh x29, x2, x5 - adcs x28, x29, x28 - ldr x29, [x8, #48] - mul x30, x2, x15 - umulh x2, x2, x3 - adcs x2, x2, xzr - cmn x30, x12 - umulh x24, x29, x14 - mul x30, x29, x14 - umulh x0, x29, x18 - mul x18, x29, x18 - umulh x17, x29, x11 - mul x15, x29, x11 - umulh x14, x29, x13 - mul x13, x29, x13 - umulh x12, x29, x10 - mul x11, x29, x10 - mul x10, x29, x16 - umulh x9, x29, x16 - umulh x8, x29, x1 - mul x29, x29, x1 - ldr x16, [sp, #40] // 8-byte Folded Reload - adcs x23, x23, x16 - ldr x16, [sp, #32] // 8-byte Folded Reload - adcs x20, x20, x16 - adcs x7, x22, x7 - ldr x16, [sp, #16] // 8-byte Folded Reload - adcs x6, x6, x16 - adcs x21, x27, x21 - adcs x19, x28, x19 - ldr x16, [sp, #8] // 8-byte Folded Reload - adcs x2, x2, x16 - adcs x22, x26, xzr - adds x8, x8, x10 - adcs x9, x9, x11 - adcs x10, x12, x13 - adcs x11, x14, x15 - adcs x12, x17, x18 - adcs x13, x0, x30 - adcs x14, x24, xzr - adds x15, x23, x29 - adcs x8, x20, x8 - ldr x16, [sp, #128] // 8-byte Folded Reload - mul x16, x15, x16 - adcs x9, x7, x9 - mul x17, x16, x3 - mul x18, x16, x5 - mul x0, x16, x25 - adcs x10, x6, x10 - mul x6, x16, x4 - adcs x11, x21, x11 - ldr x21, [sp, #136] // 8-byte Folded Reload - mul x7, x16, x21 - adcs x12, x19, x12 - ldr x23, [sp, #72] // 8-byte Folded Reload - mul x19, x16, x23 - adcs x13, x2, x13 - ldr x24, [sp, #24] // 8-byte Folded Reload - umulh x2, x16, x24 - adcs x14, x22, x14 - adcs x20, xzr, xzr - adds x2, x2, x19 - umulh x19, x16, x23 - adcs x7, x19, x7 - umulh x19, x16, x21 - adcs x6, x19, x6 - umulh x19, x16, x4 - adcs x0, x19, x0 - umulh x19, x16, x25 - adcs x18, x19, x18 - umulh x19, x16, x5 - adcs x17, x19, x17 - umulh x19, x16, x3 - mul x16, x16, x24 - adcs x19, x19, xzr - cmn x16, x15 - adcs x8, x2, x8 - adcs x9, x7, x9 - adcs x10, x6, x10 - adcs x11, x0, x11 - adcs x12, x18, x12 - adcs x13, x17, x13 - adcs x14, x19, x14 - adcs x15, x20, xzr - subs x16, x8, x24 - sbcs x17, x9, x23 - sbcs x18, x10, x21 - sbcs x0, x11, x4 - sbcs x1, x12, x25 - sbcs x2, x13, x5 - sbcs x3, x14, x3 - sbcs x15, x15, xzr - tst x15, #0x1 - csel x8, x8, x16, ne - csel x9, x9, x17, ne - csel x10, x10, x18, ne - csel x11, x11, x0, ne - csel x12, x12, x1, ne - csel x13, x13, x2, ne - csel x14, x14, x3, ne - ldr x15, [sp, #64] // 8-byte Folded Reload - stp x8, x9, [x15] - stp x10, x11, [x15, #16] - stp x12, x13, [x15, #32] - str x14, [x15, #48] - add sp, sp, #144 // =144 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end98: - .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L - - .globl mcl_fp_montNF7L - .align 2 - .type mcl_fp_montNF7L,@function -mcl_fp_montNF7L: // @mcl_fp_montNF7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - sub sp, sp, #32 // =32 - stp x0, x2, [sp, #8] - ldr x7, [x2] - ldp x5, x16, [x1, #40] - ldp x6, x17, [x1, #24] - ldr x4, [x1] - ldp x1, x18, [x1, #8] - ldur x8, [x3, #-8] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x15, x0, [x3, #40] - ldp x11, x10, [x3, #24] - ldp x13, x12, [x3, #8] - ldr x14, [x3] - ldr x25, [x2, #8] - umulh x3, x16, x7 - mul x19, x16, x7 - umulh x20, x5, x7 - mul x21, x5, x7 - umulh x22, x17, x7 - mul x23, x17, x7 - umulh x24, x6, x7 - mul x26, x6, x7 - umulh x27, x18, x7 - mul x28, x18, x7 - mul x29, x1, x7 - umulh x30, x4, x7 - adds x29, x30, x29 - umulh x30, x1, x7 - mul x7, x4, x7 - adcs x28, x30, x28 - mul x30, x25, x5 - adcs x26, x27, x26 - mul x27, x25, x17 - adcs x23, x24, x23 - mul x24, x25, x6 - adcs x21, x22, x21 - mul x22, x7, x8 - adcs x19, x20, x19 - mul x20, x22, x14 - adcs x3, x3, xzr - cmn x20, x7 - mul x9, x25, x18 - mul x7, x22, x13 - adcs x7, x7, x29 - mul x20, x22, x12 - adcs x20, x20, x28 - mul x28, x22, x11 - adcs x26, x28, x26 - mul x28, x22, x10 - adcs x23, x28, x23 - mul x28, x22, x15 - adcs x21, x28, x21 - mul x28, x22, x0 - adcs x19, x28, x19 - umulh x28, x22, x14 - adcs x29, x3, xzr - adds x28, x7, x28 - umulh x3, x22, x13 - adcs x8, x20, x3 - umulh x3, x22, x12 - adcs x26, x26, x3 - umulh x3, x22, x11 - adcs x3, x23, x3 - umulh x7, x22, x10 - adcs x7, x21, x7 - umulh x20, x22, x15 - adcs x19, x19, x20 - mul x21, x25, x1 - umulh x20, x22, x0 - adcs x20, x29, x20 - umulh x22, x25, x4 - adds x29, x22, x21 - umulh x21, x25, x1 - adcs x23, x21, x9 - umulh x9, x25, x18 - adcs x21, x9, x24 - umulh x9, x25, x6 - adcs x22, x9, x27 - umulh x9, x25, x17 - adcs x30, x9, x30 - mul x9, x25, x16 - umulh x24, x25, x5 - adcs x24, x24, x9 - umulh x9, x25, x16 - mul x25, x25, x4 - adcs x9, x9, xzr - adds x27, x25, x28 - adcs x25, x29, x8 - ldp x28, x8, [x2, #16] - adcs x29, x23, x26 - adcs x3, x21, x3 - mul x21, x28, x17 - adcs x7, x22, x7 - mul x22, x28, x6 - adcs x19, x30, x19 - ldr x2, [sp, #24] // 8-byte Folded Reload - mul x23, x27, x2 - adcs x20, x24, x20 - mul x24, x23, x14 - adcs x9, x9, xzr - cmn x24, x27 - mul x24, x28, x18 - mul x26, x23, x13 - adcs x25, x26, x25 - mul x26, x23, x12 - adcs x26, x26, x29 - mul x27, x23, x11 - adcs x3, x27, x3 - mul x27, x23, x10 - adcs x7, x27, x7 - mul x27, x23, x15 - adcs x19, x27, x19 - mul x27, x23, x0 - adcs x20, x27, x20 - umulh x27, x23, x14 - adcs x9, x9, xzr - adds x25, x25, x27 - umulh x27, x23, x13 - adcs x26, x26, x27 - umulh x27, x23, x12 - adcs x3, x3, x27 - umulh x27, x23, x11 - adcs x7, x7, x27 - umulh x27, x23, x10 - adcs x19, x19, x27 - umulh x27, x23, x15 - adcs x20, x20, x27 - mul x27, x28, x1 - umulh x23, x23, x0 - adcs x9, x9, x23 - umulh x23, x28, x4 - adds x23, x23, x27 - umulh x27, x28, x1 - adcs x24, x27, x24 - umulh x27, x28, x18 - adcs x22, x27, x22 - umulh x27, x28, x6 - adcs x21, x27, x21 - mul x27, x28, x5 - umulh x29, x28, x17 - adcs x27, x29, x27 - mul x29, x28, x16 - umulh x30, x28, x5 - adcs x29, x30, x29 - umulh x30, x28, x16 - mul x28, x28, x4 - adcs x30, x30, xzr - adds x25, x28, x25 - adcs x23, x23, x26 - adcs x3, x24, x3 - mul x26, x8, x5 - adcs x7, x22, x7 - mul x22, x8, x17 - adcs x19, x21, x19 - mul x24, x8, x6 - adcs x20, x27, x20 - mul x21, x25, x2 - adcs x9, x29, x9 - mul x27, x21, x14 - adcs x28, x30, xzr - cmn x27, x25 - mul x25, x8, x18 - mul x27, x21, x13 - adcs x23, x27, x23 - mul x27, x21, x12 - adcs x3, x27, x3 - mul x27, x21, x11 - adcs x7, x27, x7 - mul x27, x21, x10 - adcs x19, x27, x19 - mul x27, x21, x15 - adcs x20, x27, x20 - mul x27, x21, x0 - adcs x9, x27, x9 - umulh x27, x21, x14 - adcs x28, x28, xzr - adds x27, x23, x27 - umulh x23, x21, x13 - adcs x3, x3, x23 - umulh x23, x21, x12 - adcs x30, x7, x23 - umulh x7, x21, x11 - adcs x7, x19, x7 - umulh x19, x21, x10 - adcs x19, x20, x19 - umulh x20, x21, x15 - adcs x20, x9, x20 - mul x9, x8, x1 - umulh x21, x21, x0 - adcs x21, x28, x21 - umulh x23, x8, x4 - adds x9, x23, x9 - umulh x23, x8, x1 - adcs x28, x23, x25 - umulh x23, x8, x18 - adcs x23, x23, x24 - umulh x24, x8, x6 - adcs x24, x24, x22 - umulh x22, x8, x17 - adcs x25, x22, x26 - mul x22, x8, x16 - umulh x26, x8, x5 - adcs x26, x26, x22 - umulh x22, x8, x16 - mul x29, x8, x4 - adcs x2, x22, xzr - adds x29, x29, x27 - adcs x27, x9, x3 - ldr x8, [sp, #16] // 8-byte Folded Reload - ldp x22, x3, [x8, #32] - adcs x9, x28, x30 - adcs x7, x23, x7 - mul x23, x22, x17 - adcs x19, x24, x19 - mul x24, x22, x6 - adcs x20, x25, x20 - ldr x8, [sp, #24] // 8-byte Folded Reload - mul x25, x29, x8 - adcs x21, x26, x21 - mul x26, x25, x14 - adcs x2, x2, xzr - cmn x26, x29 - mul x26, x22, x18 - mul x28, x25, x13 - adcs x27, x28, x27 - mul x28, x25, x12 - adcs x9, x28, x9 - mul x28, x25, x11 - adcs x7, x28, x7 - mul x28, x25, x10 - adcs x19, x28, x19 - mul x28, x25, x15 - adcs x20, x28, x20 - mul x28, x25, x0 - adcs x21, x28, x21 - umulh x28, x25, x14 - adcs x2, x2, xzr - adds x27, x27, x28 - umulh x28, x25, x13 - adcs x9, x9, x28 - umulh x28, x25, x12 - adcs x7, x7, x28 - umulh x28, x25, x11 - adcs x19, x19, x28 - umulh x28, x25, x10 - adcs x20, x20, x28 - umulh x28, x25, x15 - adcs x21, x21, x28 - mul x28, x22, x1 - umulh x25, x25, x0 - adcs x2, x2, x25 - umulh x25, x22, x4 - adds x25, x25, x28 - umulh x28, x22, x1 - adcs x26, x28, x26 - umulh x28, x22, x18 - adcs x24, x28, x24 - umulh x28, x22, x6 - adcs x23, x28, x23 - mul x28, x22, x5 - umulh x29, x22, x17 - adcs x28, x29, x28 - mul x29, x22, x16 - umulh x30, x22, x5 - adcs x29, x30, x29 - umulh x30, x22, x16 - mul x22, x22, x4 - adcs x30, x30, xzr - adds x22, x22, x27 - adcs x9, x25, x9 - adcs x7, x26, x7 - mul x25, x3, x5 - adcs x19, x24, x19 - mul x24, x3, x17 - adcs x20, x23, x20 - mul x23, x3, x6 - adcs x21, x28, x21 - mul x26, x22, x8 - adcs x8, x29, x2 - mul x27, x26, x14 - adcs x28, x30, xzr - cmn x27, x22 - mul x22, x3, x18 - mul x27, x26, x13 - adcs x9, x27, x9 - mul x27, x26, x12 - adcs x7, x27, x7 - mul x27, x26, x11 - adcs x19, x27, x19 - mul x27, x26, x10 - adcs x20, x27, x20 - mul x27, x26, x15 - adcs x21, x27, x21 - mul x27, x26, x0 - adcs x8, x27, x8 - umulh x27, x26, x14 - adcs x28, x28, xzr - adds x9, x9, x27 - umulh x27, x26, x13 - adcs x7, x7, x27 - umulh x27, x26, x12 - adcs x19, x19, x27 - umulh x27, x26, x11 - adcs x20, x20, x27 - umulh x27, x26, x10 - adcs x21, x21, x27 - umulh x27, x26, x15 - adcs x8, x8, x27 - mul x27, x3, x1 - umulh x26, x26, x0 - adcs x26, x28, x26 - umulh x28, x3, x4 - adds x27, x28, x27 - umulh x28, x3, x1 - adcs x22, x28, x22 - umulh x28, x3, x18 - adcs x23, x28, x23 - umulh x28, x3, x6 - adcs x24, x28, x24 - umulh x28, x3, x17 - adcs x25, x28, x25 - mul x28, x3, x16 - umulh x29, x3, x5 - adcs x28, x29, x28 - ldp x2, x30, [sp, #16] - ldr x2, [x2, #48] - umulh x29, x3, x16 - mul x3, x3, x4 - adcs x29, x29, xzr - adds x9, x3, x9 - adcs x3, x27, x7 - umulh x7, x2, x16 - mul x16, x2, x16 - adcs x19, x22, x19 - umulh x22, x2, x5 - mul x5, x2, x5 - adcs x20, x23, x20 - umulh x23, x2, x17 - mul x17, x2, x17 - adcs x21, x24, x21 - umulh x24, x2, x6 - mul x6, x2, x6 - adcs x8, x25, x8 - mul x25, x9, x30 - adcs x26, x28, x26 - mul x27, x25, x14 - adcs x28, x29, xzr - cmn x27, x9 - umulh x9, x2, x18 - mul x18, x2, x18 - umulh x27, x2, x1 - mul x1, x2, x1 - umulh x29, x2, x4 - mul x2, x2, x4 - mul x4, x25, x13 - adcs x3, x4, x3 - mul x4, x25, x12 - adcs x4, x4, x19 - mul x19, x25, x11 - adcs x19, x19, x20 - mul x20, x25, x10 - adcs x20, x20, x21 - mul x21, x25, x15 - adcs x8, x21, x8 - mul x21, x25, x0 - adcs x21, x21, x26 - adcs x26, x28, xzr - umulh x28, x25, x14 - adds x3, x3, x28 - umulh x28, x25, x13 - adcs x4, x4, x28 - umulh x28, x25, x12 - adcs x19, x19, x28 - umulh x28, x25, x11 - adcs x20, x20, x28 - umulh x28, x25, x10 - adcs x8, x8, x28 - umulh x28, x25, x15 - adcs x21, x21, x28 - umulh x25, x25, x0 - adcs x25, x26, x25 - adds x1, x29, x1 - adcs x18, x27, x18 - adcs x9, x9, x6 - adcs x17, x24, x17 - adcs x5, x23, x5 - adcs x16, x22, x16 - adcs x6, x7, xzr - adds x2, x2, x3 - adcs x1, x1, x4 - adcs x18, x18, x19 - adcs x9, x9, x20 - adcs x8, x17, x8 - adcs x17, x5, x21 - mul x3, x2, x30 - adcs x16, x16, x25 - mul x4, x3, x14 - adcs x5, x6, xzr - cmn x4, x2 - mul x2, x3, x13 - adcs x1, x2, x1 - mul x2, x3, x12 - adcs x18, x2, x18 - mul x2, x3, x11 - adcs x9, x2, x9 - mul x2, x3, x10 - adcs x8, x2, x8 - mul x2, x3, x15 - adcs x17, x2, x17 - mul x2, x3, x0 - adcs x16, x2, x16 - umulh x2, x3, x14 - adcs x4, x5, xzr - adds x1, x1, x2 - umulh x2, x3, x13 - adcs x18, x18, x2 - umulh x2, x3, x12 - adcs x9, x9, x2 - umulh x2, x3, x11 - adcs x8, x8, x2 - umulh x2, x3, x10 - adcs x17, x17, x2 - umulh x2, x3, x15 - adcs x16, x16, x2 - umulh x2, x3, x0 - adcs x2, x4, x2 - subs x14, x1, x14 - sbcs x13, x18, x13 - sbcs x12, x9, x12 - sbcs x11, x8, x11 - sbcs x10, x17, x10 - sbcs x15, x16, x15 - sbcs x0, x2, x0 - asr x3, x0, #63 - cmp x3, #0 // =0 - csel x14, x1, x14, lt - csel x13, x18, x13, lt - csel x9, x9, x12, lt - csel x8, x8, x11, lt - csel x10, x17, x10, lt - csel x11, x16, x15, lt - csel x12, x2, x0, lt - ldr x15, [sp, #8] // 8-byte Folded Reload - stp x14, x13, [x15] - stp x9, x8, [x15, #16] - stp x10, x11, [x15, #32] - str x12, [x15, #48] - add sp, sp, #32 // =32 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end99: - .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L - - .globl mcl_fp_montRed7L - .align 2 - .type mcl_fp_montRed7L,@function -mcl_fp_montRed7L: // @mcl_fp_montRed7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - ldur x15, [x2, #-8] - ldp x9, x8, [x2, #40] - ldp x11, x10, [x2, #24] - ldp x13, x12, [x2, #8] - ldr x14, [x2] - ldp x17, x18, [x1, #96] - ldp x2, x3, [x1, #80] - ldp x4, x5, [x1, #64] - ldp x6, x7, [x1, #48] - ldp x19, x20, [x1, #32] - ldp x21, x22, [x1, #16] - ldp x16, x1, [x1] - mul x23, x16, x15 - mul x24, x23, x8 - mul x25, x23, x9 - mul x26, x23, x10 - mul x27, x23, x11 - mul x28, x23, x12 - mul x29, x23, x13 - umulh x30, x23, x14 - adds x29, x30, x29 - umulh x30, x23, x13 - adcs x28, x30, x28 - umulh x30, x23, x12 - adcs x27, x30, x27 - umulh x30, x23, x11 - adcs x26, x30, x26 - umulh x30, x23, x10 - adcs x25, x30, x25 - umulh x30, x23, x9 - adcs x24, x30, x24 - umulh x30, x23, x8 - mul x23, x23, x14 - adcs x30, x30, xzr - cmn x16, x23 - adcs x16, x1, x29 - adcs x1, x21, x28 - mul x21, x16, x15 - adcs x22, x22, x27 - mul x23, x21, x8 - mul x27, x21, x9 - mul x28, x21, x10 - mul x29, x21, x11 - adcs x19, x19, x26 - mul x26, x21, x12 - adcs x20, x20, x25 - mul x25, x21, x13 - adcs x6, x6, x24 - umulh x24, x21, x14 - adcs x7, x7, x30 - adcs x4, x4, xzr - adcs x5, x5, xzr - adcs x2, x2, xzr - adcs x3, x3, xzr - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x30, xzr, xzr - adds x24, x24, x25 - umulh x25, x21, x13 - adcs x25, x25, x26 - umulh x26, x21, x12 - adcs x26, x26, x29 - umulh x29, x21, x11 - adcs x28, x29, x28 - umulh x29, x21, x10 - adcs x27, x29, x27 - umulh x29, x21, x9 - adcs x23, x29, x23 - umulh x29, x21, x8 - mul x21, x21, x14 - adcs x29, x29, xzr - cmn x21, x16 - adcs x16, x24, x1 - adcs x1, x25, x22 - mul x21, x16, x15 - adcs x19, x26, x19 - mul x22, x21, x8 - mul x24, x21, x9 - mul x25, x21, x10 - adcs x20, x28, x20 - mul x26, x21, x11 - adcs x6, x27, x6 - mul x27, x21, x12 - adcs x7, x23, x7 - mul x23, x21, x13 - adcs x4, x29, x4 - umulh x28, x21, x14 - adcs x5, x5, xzr - adcs x2, x2, xzr - adcs x3, x3, xzr - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x29, x30, xzr - adds x23, x28, x23 - umulh x28, x21, x13 - adcs x27, x28, x27 - umulh x28, x21, x12 - adcs x26, x28, x26 - umulh x28, x21, x11 - adcs x25, x28, x25 - umulh x28, x21, x10 - adcs x24, x28, x24 - umulh x28, x21, x9 - adcs x22, x28, x22 - umulh x28, x21, x8 - mul x21, x21, x14 - adcs x28, x28, xzr - cmn x21, x16 - adcs x16, x23, x1 - adcs x1, x27, x19 - mul x19, x16, x15 - adcs x20, x26, x20 - mul x21, x19, x8 - mul x23, x19, x9 - mul x26, x19, x10 - adcs x6, x25, x6 - mul x25, x19, x11 - adcs x7, x24, x7 - mul x24, x19, x12 - adcs x4, x22, x4 - mul x22, x19, x13 - adcs x5, x28, x5 - umulh x27, x19, x14 - adcs x2, x2, xzr - adcs x3, x3, xzr - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x28, x29, xzr - adds x22, x27, x22 - umulh x27, x19, x13 - adcs x24, x27, x24 - umulh x27, x19, x12 - adcs x25, x27, x25 - umulh x27, x19, x11 - adcs x26, x27, x26 - umulh x27, x19, x10 - adcs x23, x27, x23 - umulh x27, x19, x9 - adcs x21, x27, x21 - umulh x27, x19, x8 - mul x19, x19, x14 - adcs x27, x27, xzr - cmn x19, x16 - adcs x16, x22, x1 - adcs x1, x24, x20 - mul x19, x16, x15 - adcs x6, x25, x6 - mul x20, x19, x8 - mul x22, x19, x9 - mul x24, x19, x10 - adcs x7, x26, x7 - mul x25, x19, x11 - adcs x4, x23, x4 - mul x23, x19, x12 - adcs x5, x21, x5 - mul x21, x19, x13 - adcs x2, x27, x2 - umulh x26, x19, x14 - adcs x3, x3, xzr - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x27, x28, xzr - adds x21, x26, x21 - umulh x26, x19, x13 - adcs x23, x26, x23 - umulh x26, x19, x12 - adcs x25, x26, x25 - umulh x26, x19, x11 - adcs x24, x26, x24 - umulh x26, x19, x10 - adcs x22, x26, x22 - umulh x26, x19, x9 - adcs x20, x26, x20 - umulh x26, x19, x8 - mul x19, x19, x14 - adcs x26, x26, xzr - cmn x19, x16 - adcs x16, x21, x1 - adcs x1, x23, x6 - mul x6, x16, x15 - adcs x7, x25, x7 - mul x19, x6, x8 - mul x21, x6, x9 - mul x23, x6, x10 - adcs x4, x24, x4 - mul x24, x6, x11 - adcs x5, x22, x5 - mul x22, x6, x12 - adcs x2, x20, x2 - mul x20, x6, x13 - adcs x3, x26, x3 - umulh x25, x6, x14 - adcs x17, x17, xzr - adcs x18, x18, xzr - adcs x26, x27, xzr - adds x20, x25, x20 - umulh x25, x6, x13 - adcs x22, x25, x22 - umulh x25, x6, x12 - adcs x24, x25, x24 - umulh x25, x6, x11 - adcs x23, x25, x23 - umulh x25, x6, x10 - adcs x21, x25, x21 - umulh x25, x6, x9 - adcs x19, x25, x19 - umulh x25, x6, x8 - mul x6, x6, x14 - adcs x25, x25, xzr - cmn x6, x16 - adcs x16, x20, x1 - adcs x1, x22, x7 - mul x15, x16, x15 - adcs x4, x24, x4 - mul x6, x15, x8 - mul x7, x15, x9 - mul x20, x15, x10 - adcs x5, x23, x5 - mul x22, x15, x11 - adcs x2, x21, x2 - mul x21, x15, x12 - adcs x3, x19, x3 - mul x19, x15, x13 - adcs x17, x25, x17 - umulh x23, x15, x14 - adcs x18, x18, xzr - adcs x24, x26, xzr - adds x19, x23, x19 - umulh x23, x15, x13 - adcs x21, x23, x21 - umulh x23, x15, x12 - adcs x22, x23, x22 - umulh x23, x15, x11 - adcs x20, x23, x20 - umulh x23, x15, x10 - adcs x7, x23, x7 - umulh x23, x15, x9 - adcs x6, x23, x6 - umulh x23, x15, x8 - mul x15, x15, x14 - adcs x23, x23, xzr - cmn x15, x16 - adcs x15, x19, x1 - adcs x16, x21, x4 - adcs x1, x22, x5 - adcs x2, x20, x2 - adcs x3, x7, x3 - adcs x17, x6, x17 - adcs x18, x23, x18 - adcs x4, x24, xzr - subs x14, x15, x14 - sbcs x13, x16, x13 - sbcs x12, x1, x12 - sbcs x11, x2, x11 - sbcs x10, x3, x10 - sbcs x9, x17, x9 - sbcs x8, x18, x8 - sbcs x4, x4, xzr - tst x4, #0x1 - csel x14, x15, x14, ne - csel x13, x16, x13, ne - csel x12, x1, x12, ne - csel x11, x2, x11, ne - csel x10, x3, x10, ne - csel x9, x17, x9, ne - csel x8, x18, x8, ne - stp x14, x13, [x0] - stp x12, x11, [x0, #16] - stp x10, x9, [x0, #32] - str x8, [x0, #48] - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end100: - .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L - - .globl mcl_fp_addPre7L - .align 2 - .type mcl_fp_addPre7L,@function -mcl_fp_addPre7L: // @mcl_fp_addPre7L -// BB#0: - ldp x11, x8, [x2, #40] - ldp x13, x9, [x1, #40] - ldp x15, x10, [x2, #24] - ldp x17, x14, [x2, #8] - ldr x16, [x2] - ldp x18, x2, [x1] - ldr x3, [x1, #16] - ldp x1, x12, [x1, #24] - adds x16, x16, x18 - str x16, [x0] - adcs x16, x17, x2 - adcs x14, x14, x3 - stp x16, x14, [x0, #8] - adcs x14, x15, x1 - adcs x10, x10, x12 - stp x14, x10, [x0, #24] - adcs x10, x11, x13 - adcs x9, x8, x9 - adcs x8, xzr, xzr - stp x10, x9, [x0, #40] - mov x0, x8 - ret -.Lfunc_end101: - .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L - - .globl mcl_fp_subPre7L - .align 2 - .type mcl_fp_subPre7L,@function -mcl_fp_subPre7L: // @mcl_fp_subPre7L -// BB#0: - ldp x11, x8, [x2, #40] - ldp x13, x9, [x1, #40] - ldp x15, x10, [x2, #24] - ldp x17, x14, [x2, #8] - ldr x16, [x2] - ldp x18, x2, [x1] - ldr x3, [x1, #16] - ldp x1, x12, [x1, #24] - subs x16, x18, x16 - str x16, [x0] - sbcs x16, x2, x17 - sbcs x14, x3, x14 - stp x16, x14, [x0, #8] - sbcs x14, x1, x15 - sbcs x10, x12, x10 - stp x14, x10, [x0, #24] - sbcs x10, x13, x11 - sbcs x9, x9, x8 - ngcs x8, xzr - and x8, x8, #0x1 - stp x10, x9, [x0, #40] - mov x0, x8 - ret -.Lfunc_end102: - .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L - - .globl mcl_fp_shr1_7L - .align 2 - .type mcl_fp_shr1_7L,@function -mcl_fp_shr1_7L: // @mcl_fp_shr1_7L -// BB#0: - ldp x8, x9, [x1] - ldp x14, x10, [x1, #40] - ldp x11, x12, [x1, #16] - ldr x13, [x1, #32] - extr x8, x9, x8, #1 - extr x9, x11, x9, #1 - extr x11, x12, x11, #1 - extr x12, x13, x12, #1 - extr x13, x14, x13, #1 - extr x14, x10, x14, #1 - lsr x10, x10, #1 - stp x8, x9, [x0] - stp x11, x12, [x0, #16] - stp x13, x14, [x0, #32] - str x10, [x0, #48] - ret -.Lfunc_end103: - .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L - - .globl mcl_fp_add7L - .align 2 - .type mcl_fp_add7L,@function -mcl_fp_add7L: // @mcl_fp_add7L -// BB#0: - ldp x11, x8, [x2, #40] - ldp x13, x9, [x1, #40] - ldp x15, x10, [x2, #24] - ldp x17, x14, [x2, #8] - ldr x16, [x2] - ldp x18, x2, [x1] - ldr x4, [x1, #16] - ldp x1, x12, [x1, #24] - adds x16, x16, x18 - ldp x5, x18, [x3, #40] - adcs x17, x17, x2 - adcs x2, x14, x4 - ldr x4, [x3, #32] - adcs x15, x15, x1 - adcs x10, x10, x12 - ldp x12, x1, [x3] - stp x16, x17, [x0] - stp x2, x15, [x0, #16] - adcs x6, x11, x13 - stp x10, x6, [x0, #32] - adcs x8, x8, x9 - str x8, [x0, #48] - adcs x7, xzr, xzr - ldp x9, x11, [x3, #16] - subs x14, x16, x12 - sbcs x13, x17, x1 - sbcs x12, x2, x9 - sbcs x11, x15, x11 - sbcs x10, x10, x4 - sbcs x9, x6, x5 - sbcs x8, x8, x18 - sbcs x15, x7, xzr - and w15, w15, #0x1 - tbnz w15, #0, .LBB104_2 -// BB#1: // %nocarry - stp x14, x13, [x0] - stp x12, x11, [x0, #16] - stp x10, x9, [x0, #32] - str x8, [x0, #48] -.LBB104_2: // %carry - ret -.Lfunc_end104: - .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L - - .globl mcl_fp_addNF7L - .align 2 - .type mcl_fp_addNF7L,@function -mcl_fp_addNF7L: // @mcl_fp_addNF7L -// BB#0: - ldp x11, x8, [x1, #40] - ldp x13, x9, [x2, #40] - ldp x15, x10, [x1, #24] - ldp x17, x14, [x1, #8] - ldr x16, [x1] - ldp x18, x1, [x2] - ldr x4, [x2, #16] - ldp x2, x12, [x2, #24] - adds x16, x18, x16 - adcs x17, x1, x17 - adcs x14, x4, x14 - ldp x4, x18, [x3, #40] - adcs x15, x2, x15 - adcs x10, x12, x10 - ldp x12, x2, [x3] - adcs x11, x13, x11 - ldr x13, [x3, #16] - ldp x3, x1, [x3, #24] - adcs x8, x9, x8 - subs x9, x16, x12 - sbcs x12, x17, x2 - sbcs x13, x14, x13 - sbcs x2, x15, x3 - sbcs x1, x10, x1 - sbcs x3, x11, x4 - sbcs x18, x8, x18 - asr x4, x18, #63 - cmp x4, #0 // =0 - csel x9, x16, x9, lt - csel x12, x17, x12, lt - csel x13, x14, x13, lt - csel x14, x15, x2, lt - csel x10, x10, x1, lt - csel x11, x11, x3, lt - csel x8, x8, x18, lt - stp x9, x12, [x0] - stp x13, x14, [x0, #16] - stp x10, x11, [x0, #32] - str x8, [x0, #48] - ret -.Lfunc_end105: - .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L - - .globl mcl_fp_sub7L - .align 2 - .type mcl_fp_sub7L,@function -mcl_fp_sub7L: // @mcl_fp_sub7L -// BB#0: - ldp x13, x14, [x2, #40] - ldp x17, x15, [x1, #40] - ldp x11, x12, [x2, #24] - ldp x9, x10, [x2, #8] - ldr x8, [x2] - ldp x18, x2, [x1] - ldr x4, [x1, #16] - ldp x1, x16, [x1, #24] - subs x8, x18, x8 - sbcs x9, x2, x9 - stp x8, x9, [x0] - sbcs x10, x4, x10 - sbcs x11, x1, x11 - stp x10, x11, [x0, #16] - sbcs x12, x16, x12 - sbcs x13, x17, x13 - stp x12, x13, [x0, #32] - sbcs x14, x15, x14 - str x14, [x0, #48] - ngcs x15, xzr - and w15, w15, #0x1 - tbnz w15, #0, .LBB106_2 -// BB#1: // %nocarry - ret -.LBB106_2: // %carry - ldp x16, x17, [x3] - ldp x18, x1, [x3, #16] - ldr x2, [x3, #32] - ldp x3, x15, [x3, #40] - adds x8, x16, x8 - adcs x9, x17, x9 - adcs x10, x18, x10 - adcs x11, x1, x11 - adcs x12, x2, x12 - adcs x13, x3, x13 - adcs x14, x15, x14 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - str x14, [x0, #48] - ret -.Lfunc_end106: - .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L - - .globl mcl_fp_subNF7L - .align 2 - .type mcl_fp_subNF7L,@function -mcl_fp_subNF7L: // @mcl_fp_subNF7L -// BB#0: - ldp x11, x8, [x2, #40] - ldp x13, x9, [x1, #40] - ldp x15, x10, [x2, #24] - ldp x17, x14, [x2, #8] - ldr x16, [x2] - ldp x18, x2, [x1] - ldr x4, [x1, #16] - ldp x1, x12, [x1, #24] - subs x16, x18, x16 - sbcs x17, x2, x17 - sbcs x14, x4, x14 - ldp x4, x18, [x3, #40] - sbcs x15, x1, x15 - sbcs x10, x12, x10 - ldp x12, x1, [x3] - sbcs x11, x13, x11 - ldr x13, [x3, #16] - ldp x3, x2, [x3, #24] - sbcs x8, x9, x8 - asr x9, x8, #63 - and x1, x9, x1 - and x13, x9, x13 - and x3, x9, x3 - and x2, x9, x2 - and x4, x9, x4 - and x18, x9, x18 - extr x9, x9, x8, #63 - and x9, x9, x12 - adds x9, x9, x16 - str x9, [x0] - adcs x9, x1, x17 - str x9, [x0, #8] - adcs x9, x13, x14 - str x9, [x0, #16] - adcs x9, x3, x15 - str x9, [x0, #24] - adcs x9, x2, x10 - str x9, [x0, #32] - adcs x9, x4, x11 - adcs x8, x18, x8 - stp x9, x8, [x0, #40] - ret -.Lfunc_end107: - .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L - - .globl mcl_fpDbl_add7L - .align 2 - .type mcl_fpDbl_add7L,@function -mcl_fpDbl_add7L: // @mcl_fpDbl_add7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - ldp x8, x9, [x2, #96] - ldp x10, x11, [x1, #96] - ldp x12, x13, [x2, #80] - ldp x14, x15, [x1, #80] - ldp x16, x17, [x2, #64] - ldp x18, x4, [x1, #64] - ldp x5, x6, [x2, #48] - ldp x7, x19, [x1, #48] - ldp x20, x21, [x2, #32] - ldp x22, x23, [x1, #32] - ldp x24, x25, [x2, #16] - ldp x27, x2, [x2] - ldp x28, x29, [x1, #16] - ldp x26, x1, [x1] - adds x26, x27, x26 - ldr x27, [x3, #48] - str x26, [x0] - adcs x1, x2, x1 - ldp x2, x26, [x3, #32] - str x1, [x0, #8] - adcs x1, x24, x28 - ldp x24, x28, [x3, #16] - str x1, [x0, #16] - ldp x1, x3, [x3] - adcs x25, x25, x29 - adcs x20, x20, x22 - stp x25, x20, [x0, #24] - adcs x20, x21, x23 - adcs x5, x5, x7 - stp x20, x5, [x0, #40] - adcs x5, x6, x19 - adcs x16, x16, x18 - adcs x17, x17, x4 - adcs x12, x12, x14 - adcs x13, x13, x15 - adcs x8, x8, x10 - adcs x9, x9, x11 - adcs x10, xzr, xzr - subs x11, x5, x1 - sbcs x14, x16, x3 - sbcs x15, x17, x24 - sbcs x18, x12, x28 - sbcs x1, x13, x2 - sbcs x2, x8, x26 - sbcs x3, x9, x27 - sbcs x10, x10, xzr - tst x10, #0x1 - csel x10, x5, x11, ne - csel x11, x16, x14, ne - csel x14, x17, x15, ne - csel x12, x12, x18, ne - csel x13, x13, x1, ne - csel x8, x8, x2, ne - csel x9, x9, x3, ne - stp x10, x11, [x0, #56] - stp x14, x12, [x0, #72] - stp x13, x8, [x0, #88] - str x9, [x0, #104] - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end108: - .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L - - .globl mcl_fpDbl_sub7L - .align 2 - .type mcl_fpDbl_sub7L,@function -mcl_fpDbl_sub7L: // @mcl_fpDbl_sub7L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - ldp x9, x8, [x2, #96] - ldp x11, x10, [x1, #96] - ldp x12, x13, [x2, #80] - ldp x14, x15, [x1, #80] - ldp x16, x17, [x2, #64] - ldp x18, x4, [x1, #64] - ldp x5, x6, [x2, #48] - ldp x7, x19, [x1, #48] - ldp x20, x21, [x2, #32] - ldp x22, x23, [x1, #32] - ldp x24, x25, [x2, #16] - ldp x26, x2, [x2] - ldp x28, x29, [x1, #16] - ldp x27, x1, [x1] - subs x26, x27, x26 - ldr x27, [x3, #48] - str x26, [x0] - sbcs x1, x1, x2 - ldp x2, x26, [x3, #32] - str x1, [x0, #8] - sbcs x1, x28, x24 - ldp x24, x28, [x3, #16] - str x1, [x0, #16] - ldp x1, x3, [x3] - sbcs x25, x29, x25 - sbcs x20, x22, x20 - stp x25, x20, [x0, #24] - sbcs x20, x23, x21 - sbcs x5, x7, x5 - stp x20, x5, [x0, #40] - sbcs x5, x19, x6 - sbcs x16, x18, x16 - sbcs x17, x4, x17 - sbcs x12, x14, x12 - sbcs x13, x15, x13 - sbcs x9, x11, x9 - sbcs x8, x10, x8 - ngcs x10, xzr - tst x10, #0x1 - csel x10, x27, xzr, ne - csel x11, x26, xzr, ne - csel x14, x2, xzr, ne - csel x15, x28, xzr, ne - csel x18, x24, xzr, ne - csel x2, x3, xzr, ne - csel x1, x1, xzr, ne - adds x1, x1, x5 - adcs x16, x2, x16 - stp x1, x16, [x0, #56] - adcs x16, x18, x17 - adcs x12, x15, x12 - stp x16, x12, [x0, #72] - adcs x12, x14, x13 - adcs x9, x11, x9 - stp x12, x9, [x0, #88] - adcs x8, x10, x8 - str x8, [x0, #104] - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end109: - .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L - - .align 2 - .type .LmulPv512x64,@function -.LmulPv512x64: // @mulPv512x64 -// BB#0: - ldr x9, [x0] - mul x10, x9, x1 - str x10, [x8] - ldr x10, [x0, #8] - umulh x9, x9, x1 - mul x11, x10, x1 - adds x9, x9, x11 - str x9, [x8, #8] - ldr x9, [x0, #16] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #16] - ldr x10, [x0, #24] - umulh x9, x9, x1 - mul x11, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #24] - ldr x9, [x0, #32] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #32] - ldr x10, [x0, #40] - umulh x9, x9, x1 - mul x11, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #40] - ldr x9, [x0, #48] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #48] - ldr x10, [x0, #56] - umulh x9, x9, x1 - mul x11, x10, x1 - umulh x10, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #56] - adcs x9, x10, xzr - str x9, [x8, #64] - ret -.Lfunc_end110: - .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 - - .globl mcl_fp_mulUnitPre8L - .align 2 - .type mcl_fp_mulUnitPre8L,@function -mcl_fp_mulUnitPre8L: // @mcl_fp_mulUnitPre8L -// BB#0: - stp x20, x19, [sp, #-32]! - stp x29, x30, [sp, #16] - add x29, sp, #16 // =16 - sub sp, sp, #80 // =80 - mov x19, x0 - mov x8, sp - mov x0, x1 - mov x1, x2 - bl .LmulPv512x64 - ldp x9, x8, [sp, #56] - ldp x11, x10, [sp, #40] - ldp x16, x12, [sp, #24] - ldp x13, x14, [sp] - ldr x15, [sp, #16] - stp x13, x14, [x19] - stp x15, x16, [x19, #16] - stp x12, x11, [x19, #32] - stp x10, x9, [x19, #48] - str x8, [x19, #64] - sub sp, x29, #16 // =16 - ldp x29, x30, [sp, #16] - ldp x20, x19, [sp], #32 - ret -.Lfunc_end111: - .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L - - .globl mcl_fpDbl_mulPre8L - .align 2 - .type mcl_fpDbl_mulPre8L,@function -mcl_fpDbl_mulPre8L: // @mcl_fpDbl_mulPre8L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #144 // =144 - mov x20, x2 - mov x21, x1 - mov x19, x0 - bl mcl_fpDbl_mulPre4L - add x0, x19, #64 // =64 - add x1, x21, #32 // =32 - add x2, x20, #32 // =32 - bl mcl_fpDbl_mulPre4L - ldp x8, x9, [x20, #48] - ldp x10, x11, [x20, #32] - ldp x12, x13, [x20] - ldp x14, x15, [x20, #16] - adds x18, x12, x10 - str x18, [sp, #8] // 8-byte Folded Spill - ldp x10, x12, [x21, #16] - ldp x16, x17, [x21, #48] - adcs x22, x13, x11 - ldp x11, x13, [x21] - adcs x23, x14, x8 - ldp x8, x14, [x21, #32] - stp x18, x22, [sp, #16] - adcs x21, x15, x9 - stp x23, x21, [sp, #32] - adcs x24, xzr, xzr - adds x25, x11, x8 - adcs x26, x13, x14 - stp x25, x26, [sp, #48] - adcs x27, x10, x16 - adcs x28, x12, x17 - stp x27, x28, [sp, #64] - adcs x20, xzr, xzr - add x0, sp, #80 // =80 - add x1, sp, #48 // =48 - add x2, sp, #16 // =16 - bl mcl_fpDbl_mulPre4L - cmp x24, #0 // =0 - csel x8, x28, xzr, ne - and x9, x24, x20 - ldp x11, x10, [sp, #128] - ldp x13, x12, [sp, #112] - ldp x14, x15, [x19, #48] - ldp x16, x17, [x19, #32] - ldp x18, x0, [x19, #16] - csel x1, x27, xzr, ne - csel x2, x26, xzr, ne - csel x3, x25, xzr, ne - cmp x20, #0 // =0 - ldp x4, x5, [x19] - csel x6, x21, xzr, ne - csel x7, x23, xzr, ne - csel x20, x22, xzr, ne - ldr x21, [sp, #8] // 8-byte Folded Reload - csel x21, x21, xzr, ne - adds x3, x21, x3 - adcs x2, x20, x2 - ldp x20, x21, [sp, #96] - adcs x1, x7, x1 - adcs x8, x6, x8 - adcs x6, xzr, xzr - adds x13, x3, x13 - ldp x3, x7, [sp, #80] - adcs x12, x2, x12 - adcs x11, x1, x11 - ldp x1, x2, [x19, #112] - adcs x8, x8, x10 - adcs x9, x6, x9 - ldp x10, x6, [x19, #96] - subs x3, x3, x4 - sbcs x4, x7, x5 - ldp x5, x7, [x19, #80] - sbcs x18, x20, x18 - sbcs x0, x21, x0 - ldp x20, x21, [x19, #64] - sbcs x13, x13, x16 - sbcs x12, x12, x17 - sbcs x11, x11, x14 - sbcs x8, x8, x15 - sbcs x9, x9, xzr - subs x3, x3, x20 - sbcs x4, x4, x21 - sbcs x18, x18, x5 - sbcs x0, x0, x7 - sbcs x13, x13, x10 - sbcs x12, x12, x6 - sbcs x11, x11, x1 - sbcs x8, x8, x2 - sbcs x9, x9, xzr - adds x16, x16, x3 - str x16, [x19, #32] - adcs x16, x17, x4 - adcs x14, x14, x18 - stp x16, x14, [x19, #40] - adcs x14, x15, x0 - adcs x13, x20, x13 - stp x14, x13, [x19, #56] - adcs x12, x21, x12 - adcs x11, x5, x11 - stp x12, x11, [x19, #72] - adcs x8, x7, x8 - str x8, [x19, #88] - adcs x8, x10, x9 - str x8, [x19, #96] - adcs x8, x6, xzr - str x8, [x19, #104] - adcs x8, x1, xzr - str x8, [x19, #112] - adcs x8, x2, xzr - str x8, [x19, #120] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end112: - .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L - - .globl mcl_fpDbl_sqrPre8L - .align 2 - .type mcl_fpDbl_sqrPre8L,@function -mcl_fpDbl_sqrPre8L: // @mcl_fpDbl_sqrPre8L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #128 // =128 - mov x20, x1 - mov x19, x0 - mov x2, x20 - bl mcl_fpDbl_mulPre4L - add x0, x19, #64 // =64 - add x1, x20, #32 // =32 - mov x2, x1 - bl mcl_fpDbl_mulPre4L - ldp x8, x9, [x20, #16] - ldp x10, x11, [x20, #32] - ldp x12, x13, [x20] - ldp x14, x15, [x20, #48] - adds x22, x12, x10 - adcs x23, x13, x11 - adcs x20, x8, x14 - adcs x21, x9, x15 - stp x22, x23, [sp, #32] - stp x22, x23, [sp] - stp x20, x21, [sp, #48] - stp x20, x21, [sp, #16] - adcs x24, xzr, xzr - add x0, sp, #64 // =64 - add x1, sp, #32 // =32 - mov x2, sp - bl mcl_fpDbl_mulPre4L - ldp x8, x9, [x19, #48] - ldp x10, x11, [x19] - ldp x12, x13, [sp, #64] - ldp x14, x15, [x19, #16] - ldp x16, x17, [sp, #80] - ldp x18, x0, [x19, #32] - subs x10, x12, x10 - ldp x1, x12, [sp, #96] - sbcs x11, x13, x11 - sbcs x14, x16, x14 - ldp x13, x16, [sp, #112] - sbcs x15, x17, x15 - sbcs x17, x1, x18 - ldp x1, x2, [x19, #64] - ldp x3, x4, [x19, #80] - ldp x5, x6, [x19, #96] - ldp x7, x25, [x19, #112] - lsr x26, x21, #63 - sbcs x12, x12, x0 - sbcs x13, x13, x8 - sbcs x16, x16, x9 - sbcs x27, x24, xzr - subs x10, x10, x1 - sbcs x11, x11, x2 - sbcs x14, x14, x3 - sbcs x15, x15, x4 - sbcs x17, x17, x5 - sbcs x12, x12, x6 - sbcs x13, x13, x7 - sbcs x16, x16, x25 - sbcs x27, x27, xzr - adds x22, x22, x22 - adcs x23, x23, x23 - adcs x20, x20, x20 - adcs x21, x21, x21 - cmp x24, #0 // =0 - csel x24, x26, xzr, ne - csel x21, x21, xzr, ne - csel x20, x20, xzr, ne - csel x23, x23, xzr, ne - csel x22, x22, xzr, ne - adds x17, x17, x22 - adcs x12, x12, x23 - adcs x13, x13, x20 - adcs x16, x16, x21 - adcs x20, x27, x24 - adds x10, x10, x18 - str x10, [x19, #32] - adcs x10, x11, x0 - adcs x8, x14, x8 - stp x10, x8, [x19, #40] - adcs x8, x15, x9 - str x8, [x19, #56] - adcs x8, x17, x1 - str x8, [x19, #64] - adcs x8, x12, x2 - str x8, [x19, #72] - adcs x8, x13, x3 - str x8, [x19, #80] - adcs x8, x16, x4 - str x8, [x19, #88] - adcs x8, x20, x5 - str x8, [x19, #96] - adcs x8, x6, xzr - str x8, [x19, #104] - adcs x8, x7, xzr - str x8, [x19, #112] - adcs x8, x25, xzr - str x8, [x19, #120] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L - - .globl mcl_fp_mont8L - .align 2 - .type mcl_fp_mont8L,@function -mcl_fp_mont8L: // @mcl_fp_mont8L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #1424 // =1424 - mov x20, x3 - mov x26, x2 - str x26, [sp, #120] // 8-byte Folded Spill - ldur x19, [x20, #-8] - str x19, [sp, #136] // 8-byte Folded Spill - ldr x9, [x26] - mov x27, x1 - str x27, [sp, #128] // 8-byte Folded Spill - str x0, [sp, #112] // 8-byte Folded Spill - sub x8, x29, #160 // =160 - mov x0, x27 - mov x1, x9 - bl .LmulPv512x64 - ldur x24, [x29, #-160] - ldur x8, [x29, #-96] - str x8, [sp, #104] // 8-byte Folded Spill - ldur x8, [x29, #-104] - str x8, [sp, #96] // 8-byte Folded Spill - ldur x8, [x29, #-112] - str x8, [sp, #88] // 8-byte Folded Spill - ldur x8, [x29, #-120] - str x8, [sp, #80] // 8-byte Folded Spill - ldur x8, [x29, #-128] - str x8, [sp, #72] // 8-byte Folded Spill - ldur x8, [x29, #-136] - str x8, [sp, #64] // 8-byte Folded Spill - ldur x8, [x29, #-144] - str x8, [sp, #56] // 8-byte Folded Spill - ldur x8, [x29, #-152] - str x8, [sp, #48] // 8-byte Folded Spill - mul x1, x24, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv512x64 - ldur x8, [x29, #-176] - str x8, [sp, #40] // 8-byte Folded Spill - ldur x8, [x29, #-184] - str x8, [sp, #32] // 8-byte Folded Spill - ldur x8, [x29, #-192] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x19, x28, [x29, #-208] - ldp x21, x23, [x29, #-224] - ldp x25, x22, [x29, #-240] - ldr x1, [x26, #8] - add x8, sp, #1184 // =1184 - mov x0, x27 - bl .LmulPv512x64 - cmn x25, x24 - ldr x8, [sp, #1248] - ldr x9, [sp, #1240] - ldp x10, x12, [sp, #48] - adcs x10, x22, x10 - ldr x11, [sp, #1232] - adcs x12, x21, x12 - ldr x13, [sp, #1224] - ldp x14, x16, [sp, #64] - adcs x14, x23, x14 - ldr x15, [sp, #1216] - adcs x16, x19, x16 - ldr x17, [sp, #1208] - ldp x18, x1, [sp, #80] - adcs x18, x28, x18 - ldr x0, [sp, #1200] - ldp x2, x4, [sp, #24] - adcs x1, x2, x1 - ldr x2, [sp, #1184] - ldp x3, x5, [sp, #96] - adcs x3, x4, x3 - ldr x4, [sp, #1192] - ldr x6, [sp, #40] // 8-byte Folded Reload - adcs x5, x6, x5 - adcs x6, xzr, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - ldr x22, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x22 - add x8, sp, #1104 // =1104 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #1168] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1160] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1152] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #1144] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x25, [sp, #1136] - ldr x26, [sp, #1128] - ldr x27, [sp, #1120] - ldr x21, [sp, #1112] - ldr x28, [sp, #1104] - ldp x24, x23, [sp, #120] - ldr x1, [x24, #16] - add x8, sp, #1024 // =1024 - mov x0, x23 - bl .LmulPv512x64 - cmn x19, x28 - ldr x8, [sp, #1088] - ldr x9, [sp, #1080] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldr x11, [sp, #1072] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - ldr x13, [sp, #1064] - adcs x14, x14, x26 - ldr x15, [sp, #1056] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - ldr x17, [sp, #1048] - ldp x0, x2, [sp, #8] - adcs x18, x18, x0 - ldr x0, [sp, #1040] - ldr x1, [sp, #56] // 8-byte Folded Reload - adcs x1, x1, x2 - ldr x2, [sp, #1024] - ldp x5, x3, [sp, #96] - ldp x4, x6, [sp, #24] - adcs x3, x3, x4 - ldr x4, [sp, #1032] - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - mul x1, x19, x22 - add x8, sp, #944 // =944 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #1008] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1000] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #992] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #984] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x25, [sp, #976] - ldr x26, [sp, #968] - ldr x27, [sp, #960] - ldr x21, [sp, #952] - ldr x28, [sp, #944] - mov x22, x24 - ldr x1, [x22, #24] - add x8, sp, #864 // =864 - mov x0, x23 - bl .LmulPv512x64 - cmn x19, x28 - ldr x8, [sp, #928] - ldr x9, [sp, #920] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldr x11, [sp, #912] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - ldr x13, [sp, #904] - adcs x14, x14, x26 - ldr x15, [sp, #896] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - ldr x17, [sp, #888] - ldp x0, x2, [sp, #8] - adcs x18, x18, x0 - ldr x0, [sp, #880] - ldr x1, [sp, #56] // 8-byte Folded Reload - adcs x1, x1, x2 - ldr x2, [sp, #864] - ldp x5, x3, [sp, #96] - ldp x4, x6, [sp, #24] - adcs x3, x3, x4 - ldr x4, [sp, #872] - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - ldr x23, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x23 - add x8, sp, #784 // =784 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #848] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #840] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #832] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x24, [sp, #824] - ldr x25, [sp, #816] - ldr x26, [sp, #808] - ldr x27, [sp, #800] - ldr x21, [sp, #792] - ldr x28, [sp, #784] - ldr x1, [x22, #32] - add x8, sp, #704 // =704 - ldr x22, [sp, #128] // 8-byte Folded Reload - mov x0, x22 - bl .LmulPv512x64 - cmn x19, x28 - ldr x8, [sp, #768] - ldr x9, [sp, #760] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldr x11, [sp, #752] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - ldr x13, [sp, #744] - adcs x14, x14, x26 - ldr x15, [sp, #736] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - ldr x17, [sp, #728] - adcs x18, x18, x24 - ldr x0, [sp, #720] - ldr x1, [sp, #56] // 8-byte Folded Reload - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #704] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #712] - ldr x6, [sp, #32] // 8-byte Folded Reload - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - mul x1, x19, x23 - add x8, sp, #624 // =624 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #688] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #680] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #672] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x24, [sp, #664] - ldr x25, [sp, #656] - ldr x26, [sp, #648] - ldr x27, [sp, #640] - ldr x21, [sp, #632] - ldr x28, [sp, #624] - ldr x23, [sp, #120] // 8-byte Folded Reload - ldr x1, [x23, #40] - add x8, sp, #544 // =544 - mov x0, x22 - bl .LmulPv512x64 - cmn x19, x28 - ldr x8, [sp, #608] - ldr x9, [sp, #600] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldr x11, [sp, #592] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - ldr x13, [sp, #584] - adcs x14, x14, x26 - ldr x15, [sp, #576] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - ldr x17, [sp, #568] - adcs x18, x18, x24 - ldr x0, [sp, #560] - ldr x1, [sp, #56] // 8-byte Folded Reload - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #544] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #552] - ldr x6, [sp, #32] // 8-byte Folded Reload - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - ldr x22, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x22 - add x8, sp, #464 // =464 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #528] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #520] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #512] - str x8, [sp, #16] // 8-byte Folded Spill - ldp x25, x24, [sp, #496] - ldp x27, x26, [sp, #480] - ldp x28, x21, [sp, #464] - ldr x1, [x23, #48] - add x8, sp, #384 // =384 - ldr x23, [sp, #128] // 8-byte Folded Reload - mov x0, x23 - bl .LmulPv512x64 - cmn x19, x28 - ldp x9, x8, [sp, #440] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldp x13, x11, [sp, #424] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - adcs x14, x14, x26 - ldp x17, x15, [sp, #408] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - adcs x18, x18, x24 - ldr x1, [sp, #56] // 8-byte Folded Reload - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #384] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldp x4, x0, [sp, #392] - ldr x6, [sp, #32] // 8-byte Folded Reload - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x10, x12, x4 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x0 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - adcs x8, x6, x8 - stp x8, x9, [sp, #96] - adcs x8, xzr, xzr - stp x8, x10, [sp, #48] - mul x1, x19, x22 - add x8, sp, #304 // =304 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #368] - str x8, [sp, #32] // 8-byte Folded Spill - ldp x22, x8, [sp, #352] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x25, x24, [sp, #336] - ldp x27, x26, [sp, #320] - ldp x28, x21, [sp, #304] - ldr x8, [sp, #120] // 8-byte Folded Reload - ldr x1, [x8, #56] - add x8, sp, #224 // =224 - mov x0, x23 - bl .LmulPv512x64 - cmn x19, x28 - ldp x9, x8, [sp, #280] - ldr x10, [sp, #40] // 8-byte Folded Reload - adcs x10, x10, x21 - ldp x13, x11, [sp, #264] - ldp x14, x12, [sp, #80] - adcs x12, x12, x27 - adcs x14, x14, x26 - ldp x17, x15, [sp, #248] - ldp x18, x16, [sp, #64] - adcs x16, x16, x25 - adcs x18, x18, x24 - ldr x1, [sp, #56] // 8-byte Folded Reload - adcs x1, x1, x22 - ldr x2, [sp, #224] - ldp x5, x3, [sp, #96] - ldp x4, x6, [sp, #24] - adcs x3, x3, x4 - ldp x4, x0, [sp, #232] - adcs x5, x5, x6 - ldr x6, [sp, #48] // 8-byte Folded Reload - adcs x6, x6, xzr - adds x19, x10, x2 - adcs x21, x12, x4 - adcs x22, x14, x0 - adcs x23, x16, x17 - adcs x24, x18, x15 - adcs x25, x1, x13 - adcs x10, x3, x11 - str x10, [sp, #128] // 8-byte Folded Spill - adcs x27, x5, x9 - adcs x28, x6, x8 - adcs x26, xzr, xzr - ldr x8, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x8 - add x8, sp, #144 // =144 - mov x0, x20 - bl .LmulPv512x64 - ldp x15, x8, [sp, #200] - ldp x9, x10, [sp, #144] - ldp x11, x12, [sp, #160] - cmn x19, x9 - ldp x13, x9, [sp, #176] - adcs x10, x21, x10 - ldr x14, [sp, #192] - adcs x11, x22, x11 - adcs x12, x23, x12 - adcs x13, x24, x13 - adcs x9, x25, x9 - ldp x16, x17, [x20, #48] - ldp x18, x0, [x20, #32] - ldp x1, x2, [x20, #16] - ldp x3, x4, [x20] - ldr x5, [sp, #128] // 8-byte Folded Reload - adcs x14, x5, x14 - adcs x15, x27, x15 - adcs x8, x28, x8 - adcs x5, x26, xzr - subs x3, x10, x3 - sbcs x4, x11, x4 - sbcs x1, x12, x1 - sbcs x2, x13, x2 - sbcs x18, x9, x18 - sbcs x0, x14, x0 - sbcs x16, x15, x16 - sbcs x17, x8, x17 - sbcs x5, x5, xzr - tst x5, #0x1 - csel x10, x10, x3, ne - csel x11, x11, x4, ne - csel x12, x12, x1, ne - csel x13, x13, x2, ne - csel x9, x9, x18, ne - csel x14, x14, x0, ne - csel x15, x15, x16, ne - csel x8, x8, x17, ne - ldr x16, [sp, #112] // 8-byte Folded Reload - stp x10, x11, [x16] - stp x12, x13, [x16, #16] - stp x9, x14, [x16, #32] - stp x15, x8, [x16, #48] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end114: - .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L - - .globl mcl_fp_montNF8L - .align 2 - .type mcl_fp_montNF8L,@function -mcl_fp_montNF8L: // @mcl_fp_montNF8L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #1424 // =1424 - mov x20, x3 - mov x26, x2 - str x26, [sp, #128] // 8-byte Folded Spill - ldur x19, [x20, #-8] - str x19, [sp, #136] // 8-byte Folded Spill - ldr x9, [x26] - mov x27, x1 - stp x0, x27, [sp, #112] - sub x8, x29, #160 // =160 - mov x0, x27 - mov x1, x9 - bl .LmulPv512x64 - ldur x24, [x29, #-160] - ldur x8, [x29, #-96] - str x8, [sp, #104] // 8-byte Folded Spill - ldur x8, [x29, #-104] - str x8, [sp, #96] // 8-byte Folded Spill - ldur x8, [x29, #-112] - str x8, [sp, #88] // 8-byte Folded Spill - ldur x8, [x29, #-120] - str x8, [sp, #80] // 8-byte Folded Spill - ldur x8, [x29, #-128] - str x8, [sp, #72] // 8-byte Folded Spill - ldur x8, [x29, #-136] - str x8, [sp, #64] // 8-byte Folded Spill - ldur x8, [x29, #-144] - str x8, [sp, #56] // 8-byte Folded Spill - ldur x8, [x29, #-152] - str x8, [sp, #48] // 8-byte Folded Spill - mul x1, x24, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv512x64 - ldur x8, [x29, #-176] - str x8, [sp, #40] // 8-byte Folded Spill - ldur x8, [x29, #-184] - str x8, [sp, #32] // 8-byte Folded Spill - ldur x8, [x29, #-192] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x19, x28, [x29, #-208] - ldp x21, x23, [x29, #-224] - ldp x25, x22, [x29, #-240] - ldr x1, [x26, #8] - add x8, sp, #1184 // =1184 - mov x0, x27 - bl .LmulPv512x64 - cmn x25, x24 - ldr x8, [sp, #1248] - ldr x9, [sp, #1240] - ldp x10, x12, [sp, #48] - adcs x10, x22, x10 - ldr x11, [sp, #1232] - adcs x12, x21, x12 - ldr x13, [sp, #1224] - ldp x14, x16, [sp, #64] - adcs x14, x23, x14 - ldr x15, [sp, #1216] - adcs x16, x19, x16 - ldr x17, [sp, #1208] - ldp x18, x1, [sp, #80] - adcs x18, x28, x18 - ldr x0, [sp, #1192] - ldp x2, x4, [sp, #24] - adcs x1, x2, x1 - ldr x2, [sp, #1184] - ldp x3, x5, [sp, #96] - adcs x3, x4, x3 - ldr x4, [sp, #1200] - ldr x6, [sp, #40] // 8-byte Folded Reload - adcs x5, x6, x5 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x27, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x27 - add x8, sp, #1104 // =1104 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #1168] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #1160] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1152] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1144] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x23, [sp, #1136] - ldr x24, [sp, #1128] - ldr x25, [sp, #1120] - ldr x21, [sp, #1112] - ldr x26, [sp, #1104] - ldp x22, x28, [sp, #120] - ldr x1, [x28, #16] - add x8, sp, #1024 // =1024 - mov x0, x22 - bl .LmulPv512x64 - cmn x19, x26 - ldr x8, [sp, #1088] - ldr x9, [sp, #1080] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldr x11, [sp, #1072] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - ldr x13, [sp, #1064] - adcs x14, x14, x24 - ldr x15, [sp, #1056] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - ldr x17, [sp, #1048] - ldp x0, x2, [sp, #16] - adcs x18, x18, x0 - ldr x0, [sp, #1032] - ldp x3, x1, [sp, #96] - adcs x1, x1, x2 - ldr x2, [sp, #1024] - ldp x4, x6, [sp, #32] - adcs x3, x3, x4 - ldr x4, [sp, #1040] - ldr x5, [sp, #88] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x27 - add x8, sp, #944 // =944 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #1008] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #1000] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #992] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #984] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x23, [sp, #976] - ldr x24, [sp, #968] - ldr x25, [sp, #960] - ldr x21, [sp, #952] - ldr x26, [sp, #944] - ldr x1, [x28, #24] - add x8, sp, #864 // =864 - mov x27, x22 - mov x0, x27 - bl .LmulPv512x64 - cmn x19, x26 - ldr x8, [sp, #928] - ldr x9, [sp, #920] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldr x11, [sp, #912] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - ldr x13, [sp, #904] - adcs x14, x14, x24 - ldr x15, [sp, #896] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - ldr x17, [sp, #888] - ldp x0, x2, [sp, #16] - adcs x18, x18, x0 - ldr x0, [sp, #872] - ldp x3, x1, [sp, #96] - adcs x1, x1, x2 - ldr x2, [sp, #864] - ldp x4, x6, [sp, #32] - adcs x3, x3, x4 - ldr x4, [sp, #880] - ldr x5, [sp, #88] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x28, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x28 - add x8, sp, #784 // =784 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #848] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #840] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #832] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #824] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x23, [sp, #816] - ldr x24, [sp, #808] - ldr x25, [sp, #800] - ldr x21, [sp, #792] - ldr x26, [sp, #784] - ldr x22, [sp, #128] // 8-byte Folded Reload - ldr x1, [x22, #32] - add x8, sp, #704 // =704 - mov x0, x27 - bl .LmulPv512x64 - cmn x19, x26 - ldr x8, [sp, #768] - ldr x9, [sp, #760] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldr x11, [sp, #752] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - ldr x13, [sp, #744] - adcs x14, x14, x24 - ldr x15, [sp, #736] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - ldr x17, [sp, #728] - ldp x0, x2, [sp, #16] - adcs x18, x18, x0 - ldr x0, [sp, #712] - ldp x3, x1, [sp, #96] - adcs x1, x1, x2 - ldr x2, [sp, #704] - ldp x4, x6, [sp, #32] - adcs x3, x3, x4 - ldr x4, [sp, #720] - ldr x5, [sp, #88] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x28 - add x8, sp, #624 // =624 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #688] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #680] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #672] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #664] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x23, [sp, #656] - ldr x24, [sp, #648] - ldr x25, [sp, #640] - ldr x21, [sp, #632] - ldr x26, [sp, #624] - mov x27, x22 - ldr x1, [x27, #40] - add x8, sp, #544 // =544 - ldr x28, [sp, #120] // 8-byte Folded Reload - mov x0, x28 - bl .LmulPv512x64 - cmn x19, x26 - ldr x8, [sp, #608] - ldr x9, [sp, #600] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldr x11, [sp, #592] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - ldr x13, [sp, #584] - adcs x14, x14, x24 - ldr x15, [sp, #576] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - ldr x17, [sp, #568] - ldp x0, x2, [sp, #16] - adcs x18, x18, x0 - ldr x0, [sp, #552] - ldp x3, x1, [sp, #96] - adcs x1, x1, x2 - ldr x2, [sp, #544] - ldp x4, x6, [sp, #32] - adcs x3, x3, x4 - ldr x4, [sp, #560] - ldr x5, [sp, #88] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x22, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x22 - add x8, sp, #464 // =464 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #528] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #520] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #512] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x23, x8, [sp, #496] - str x8, [sp, #16] // 8-byte Folded Spill - ldp x25, x24, [sp, #480] - ldp x26, x21, [sp, #464] - ldr x1, [x27, #48] - add x8, sp, #384 // =384 - mov x0, x28 - bl .LmulPv512x64 - cmn x19, x26 - ldp x9, x8, [sp, #440] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldp x13, x11, [sp, #424] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - adcs x14, x14, x24 - ldp x17, x15, [sp, #408] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - ldp x0, x2, [sp, #16] - adcs x18, x18, x0 - ldp x3, x1, [sp, #96] - adcs x1, x1, x2 - ldp x2, x0, [sp, #384] - ldp x4, x6, [sp, #32] - adcs x3, x3, x4 - ldr x4, [sp, #400] - ldr x5, [sp, #88] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x10, x12, x0 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x4 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x17 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x15 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x13 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x11 - adcs x9, x5, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x22 - add x8, sp, #304 // =304 - mov x0, x20 - bl .LmulPv512x64 - ldp x27, x8, [sp, #360] - str x8, [sp, #40] // 8-byte Folded Spill - ldp x22, x28, [sp, #344] - ldp x24, x23, [sp, #328] - ldp x21, x25, [sp, #312] - ldr x26, [sp, #304] - ldp x0, x8, [sp, #120] - ldr x1, [x8, #56] - add x8, sp, #224 // =224 - bl .LmulPv512x64 - cmn x19, x26 - ldp x9, x8, [sp, #280] - ldp x10, x18, [sp, #48] - adcs x10, x10, x21 - ldp x13, x11, [sp, #264] - ldp x14, x12, [sp, #72] - adcs x12, x12, x25 - adcs x14, x14, x24 - ldp x17, x15, [sp, #248] - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x16, x16, x23 - adcs x18, x18, x22 - ldp x2, x0, [sp, #224] - ldp x3, x1, [sp, #96] - adcs x1, x1, x28 - adcs x3, x3, x27 - ldr x4, [sp, #240] - ldr x5, [sp, #88] // 8-byte Folded Reload - ldr x6, [sp, #40] // 8-byte Folded Reload - adcs x5, x5, x6 - adds x19, x10, x2 - adcs x21, x12, x0 - adcs x22, x14, x4 - adcs x23, x16, x17 - adcs x24, x18, x15 - adcs x25, x1, x13 - adcs x26, x3, x11 - adcs x27, x5, x9 - adcs x28, x8, xzr - ldr x8, [sp, #136] // 8-byte Folded Reload - mul x1, x19, x8 - add x8, sp, #144 // =144 - mov x0, x20 - bl .LmulPv512x64 - ldp x15, x8, [sp, #200] - ldp x9, x10, [sp, #144] - ldp x11, x12, [sp, #160] - cmn x19, x9 - ldp x13, x9, [sp, #176] - adcs x10, x21, x10 - ldr x14, [sp, #192] - adcs x11, x22, x11 - adcs x12, x23, x12 - adcs x13, x24, x13 - adcs x9, x25, x9 - ldp x16, x17, [x20, #48] - ldp x18, x0, [x20, #32] - ldp x1, x2, [x20, #16] - ldp x3, x4, [x20] - adcs x14, x26, x14 - adcs x15, x27, x15 - adcs x8, x28, x8 - subs x3, x10, x3 - sbcs x4, x11, x4 - sbcs x1, x12, x1 - sbcs x2, x13, x2 - sbcs x18, x9, x18 - sbcs x0, x14, x0 - sbcs x16, x15, x16 - sbcs x17, x8, x17 - cmp x17, #0 // =0 - csel x10, x10, x3, lt - csel x11, x11, x4, lt - csel x12, x12, x1, lt - csel x13, x13, x2, lt - csel x9, x9, x18, lt - csel x14, x14, x0, lt - csel x15, x15, x16, lt - csel x8, x8, x17, lt - ldr x16, [sp, #112] // 8-byte Folded Reload - stp x10, x11, [x16] - stp x12, x13, [x16, #16] - stp x9, x14, [x16, #32] - stp x15, x8, [x16, #48] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end115: - .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L - - .globl mcl_fp_montRed8L - .align 2 - .type mcl_fp_montRed8L,@function -mcl_fp_montRed8L: // @mcl_fp_montRed8L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #800 // =800 - mov x20, x2 - ldur x9, [x20, #-8] - str x9, [sp, #32] // 8-byte Folded Spill - ldr x8, [x20, #48] - str x8, [sp, #144] // 8-byte Folded Spill - ldr x8, [x20, #56] - str x8, [sp, #152] // 8-byte Folded Spill - ldr x8, [x20, #32] - str x8, [sp, #120] // 8-byte Folded Spill - ldr x8, [x20, #40] - str x8, [sp, #128] // 8-byte Folded Spill - ldr x8, [x20, #16] - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [x20, #24] - str x8, [sp, #112] // 8-byte Folded Spill - ldr x8, [x20] - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [x20, #8] - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [x1, #112] - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [x1, #120] - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [x1, #96] - str x8, [sp, #56] // 8-byte Folded Spill - ldr x8, [x1, #104] - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [x1, #80] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [x1, #88] - str x8, [sp, #48] // 8-byte Folded Spill - ldp x28, x8, [x1, #64] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x22, x25, [x1, #48] - ldp x24, x19, [x1, #32] - ldp x27, x26, [x1, #16] - ldp x21, x23, [x1] - str x0, [sp, #136] // 8-byte Folded Spill - mul x1, x21, x9 - sub x8, x29, #160 // =160 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [x29, #-104] - ldp x11, x10, [x29, #-120] - ldp x16, x12, [x29, #-136] - ldp x13, x14, [x29, #-160] - ldur x15, [x29, #-144] - cmn x21, x13 - adcs x21, x23, x14 - adcs x13, x27, x15 - adcs x26, x26, x16 - adcs x24, x24, x12 - adcs x11, x19, x11 - stp x11, x13, [sp, #8] - adcs x22, x22, x10 - adcs x25, x25, x9 - adcs x27, x28, x8 - ldr x8, [sp, #24] // 8-byte Folded Reload - adcs x28, x8, xzr - ldp x19, x8, [sp, #32] - adcs x23, x8, xzr - ldr x8, [sp, #48] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #48] // 8-byte Folded Spill - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #56] // 8-byte Folded Spill - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - adcs x8, xzr, xzr - str x8, [sp, #40] // 8-byte Folded Spill - mul x1, x21, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [x29, #-184] - ldp x11, x10, [x29, #-200] - ldp x16, x12, [x29, #-216] - ldp x13, x14, [x29, #-240] - ldur x15, [x29, #-224] - cmn x21, x13 - ldr x13, [sp, #16] // 8-byte Folded Reload - adcs x21, x13, x14 - adcs x13, x26, x15 - str x13, [sp, #24] // 8-byte Folded Spill - adcs x24, x24, x16 - ldr x13, [sp, #8] // 8-byte Folded Reload - adcs x12, x13, x12 - str x12, [sp, #16] // 8-byte Folded Spill - adcs x22, x22, x11 - adcs x25, x25, x10 - adcs x27, x27, x9 - adcs x28, x28, x8 - adcs x23, x23, xzr - ldr x8, [sp, #48] // 8-byte Folded Reload - adcs x26, x8, xzr - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #56] // 8-byte Folded Spill - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #40] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #48] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #560 // =560 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #624] - ldr x9, [sp, #616] - ldr x10, [sp, #608] - ldr x11, [sp, #600] - ldr x12, [sp, #592] - ldr x13, [sp, #560] - ldr x14, [sp, #568] - ldr x15, [sp, #576] - ldr x16, [sp, #584] - cmn x21, x13 - ldr x13, [sp, #24] // 8-byte Folded Reload - adcs x21, x13, x14 - adcs x13, x24, x15 - str x13, [sp, #40] // 8-byte Folded Spill - ldr x13, [sp, #16] // 8-byte Folded Reload - adcs x13, x13, x16 - str x13, [sp, #24] // 8-byte Folded Spill - adcs x22, x22, x12 - adcs x25, x25, x11 - adcs x27, x27, x10 - adcs x28, x28, x9 - adcs x23, x23, x8 - adcs x26, x26, xzr - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x24, x8, xzr - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #48] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #56] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #480 // =480 - mov x0, x20 - bl .LmulPv512x64 - ldr x8, [sp, #544] - ldr x9, [sp, #536] - ldr x10, [sp, #528] - ldr x11, [sp, #520] - ldr x12, [sp, #512] - ldp x13, x14, [sp, #480] - ldp x15, x16, [sp, #496] - cmn x21, x13 - ldr x13, [sp, #40] // 8-byte Folded Reload - adcs x21, x13, x14 - ldr x13, [sp, #24] // 8-byte Folded Reload - adcs x13, x13, x15 - adcs x22, x22, x16 - adcs x25, x25, x12 - adcs x27, x27, x11 - adcs x28, x28, x10 - adcs x23, x23, x9 - adcs x26, x26, x8 - adcs x24, x24, xzr - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - stp x13, x8, [sp, #48] - mul x1, x21, x19 - add x8, sp, #400 // =400 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [sp, #456] - ldp x11, x10, [sp, #440] - ldp x16, x12, [sp, #424] - ldp x13, x14, [sp, #400] - ldr x15, [sp, #416] - cmn x21, x13 - ldr x13, [sp, #48] // 8-byte Folded Reload - adcs x21, x13, x14 - adcs x13, x22, x15 - str x13, [sp, #48] // 8-byte Folded Spill - adcs x25, x25, x16 - adcs x27, x27, x12 - adcs x28, x28, x11 - adcs x23, x23, x10 - adcs x26, x26, x9 - adcs x24, x24, x8 - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x22, x8, xzr - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #320 // =320 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [sp, #376] - ldp x11, x10, [sp, #360] - ldp x16, x12, [sp, #344] - ldp x13, x14, [sp, #320] - ldr x15, [sp, #336] - cmn x21, x13 - ldr x13, [sp, #48] // 8-byte Folded Reload - adcs x21, x13, x14 - adcs x13, x25, x15 - adcs x27, x27, x16 - adcs x28, x28, x12 - adcs x23, x23, x11 - adcs x26, x26, x10 - adcs x24, x24, x9 - ldr x9, [sp, #64] // 8-byte Folded Reload - adcs x8, x9, x8 - stp x13, x8, [sp, #56] - adcs x22, x22, xzr - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x25, x8, xzr - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #240 // =240 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [sp, #296] - ldp x11, x10, [sp, #280] - ldp x16, x12, [sp, #264] - ldp x13, x14, [sp, #240] - ldr x15, [sp, #256] - cmn x21, x13 - ldr x13, [sp, #56] // 8-byte Folded Reload - adcs x21, x13, x14 - adcs x13, x27, x15 - adcs x28, x28, x16 - adcs x23, x23, x12 - adcs x26, x26, x11 - adcs x24, x24, x10 - ldr x10, [sp, #64] // 8-byte Folded Reload - adcs x9, x10, x9 - stp x9, x13, [sp, #64] - adcs x22, x22, x8 - adcs x25, x25, xzr - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x27, x8, xzr - mul x1, x21, x19 - add x8, sp, #160 // =160 - mov x0, x20 - bl .LmulPv512x64 - ldp x9, x8, [sp, #216] - ldp x11, x10, [sp, #200] - ldp x16, x12, [sp, #184] - ldp x13, x14, [sp, #160] - ldr x15, [sp, #176] - cmn x21, x13 - ldr x13, [sp, #72] // 8-byte Folded Reload - adcs x13, x13, x14 - adcs x14, x28, x15 - adcs x15, x23, x16 - adcs x12, x26, x12 - adcs x11, x24, x11 - ldr x16, [sp, #64] // 8-byte Folded Reload - adcs x10, x16, x10 - adcs x9, x22, x9 - adcs x8, x25, x8 - adcs x16, x27, xzr - ldp x17, x18, [sp, #88] - subs x17, x13, x17 - sbcs x18, x14, x18 - ldp x0, x1, [sp, #104] - sbcs x0, x15, x0 - sbcs x1, x12, x1 - ldp x2, x3, [sp, #120] - sbcs x2, x11, x2 - sbcs x3, x10, x3 - ldp x4, x5, [sp, #144] - sbcs x4, x9, x4 - sbcs x5, x8, x5 - sbcs x16, x16, xzr - tst x16, #0x1 - csel x13, x13, x17, ne - csel x14, x14, x18, ne - csel x15, x15, x0, ne - csel x12, x12, x1, ne - csel x11, x11, x2, ne - csel x10, x10, x3, ne - csel x9, x9, x4, ne - csel x8, x8, x5, ne - ldr x16, [sp, #136] // 8-byte Folded Reload - stp x13, x14, [x16] - stp x15, x12, [x16, #16] - stp x11, x10, [x16, #32] - stp x9, x8, [x16, #48] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end116: - .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L - - .globl mcl_fp_addPre8L - .align 2 - .type mcl_fp_addPre8L,@function -mcl_fp_addPre8L: // @mcl_fp_addPre8L -// BB#0: - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x18, x2, [x2] - ldp x3, x4, [x1] - ldp x5, x1, [x1, #16] - adds x18, x18, x3 - str x18, [x0] - adcs x18, x2, x4 - adcs x16, x16, x5 - stp x18, x16, [x0, #8] - adcs x16, x17, x1 - adcs x12, x12, x14 - stp x16, x12, [x0, #24] - adcs x12, x13, x15 - adcs x8, x8, x10 - stp x12, x8, [x0, #40] - adcs x9, x9, x11 - adcs x8, xzr, xzr - str x9, [x0, #56] - mov x0, x8 - ret -.Lfunc_end117: - .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L - - .globl mcl_fp_subPre8L - .align 2 - .type mcl_fp_subPre8L,@function -mcl_fp_subPre8L: // @mcl_fp_subPre8L -// BB#0: - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x18, x2, [x2] - ldp x3, x4, [x1] - ldp x5, x1, [x1, #16] - subs x18, x3, x18 - str x18, [x0] - sbcs x18, x4, x2 - sbcs x16, x5, x16 - stp x18, x16, [x0, #8] - sbcs x16, x1, x17 - sbcs x12, x14, x12 - stp x16, x12, [x0, #24] - sbcs x12, x15, x13 - sbcs x8, x10, x8 - stp x12, x8, [x0, #40] - sbcs x9, x11, x9 - ngcs x8, xzr - and x8, x8, #0x1 - str x9, [x0, #56] - mov x0, x8 - ret -.Lfunc_end118: - .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L - - .globl mcl_fp_shr1_8L - .align 2 - .type mcl_fp_shr1_8L,@function -mcl_fp_shr1_8L: // @mcl_fp_shr1_8L -// BB#0: - ldp x8, x9, [x1] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x1, #16] - ldp x14, x15, [x1, #32] - extr x8, x9, x8, #1 - extr x9, x12, x9, #1 - extr x12, x13, x12, #1 - extr x13, x14, x13, #1 - extr x14, x15, x14, #1 - extr x15, x10, x15, #1 - extr x10, x11, x10, #1 - lsr x11, x11, #1 - stp x8, x9, [x0] - stp x12, x13, [x0, #16] - stp x14, x15, [x0, #32] - stp x10, x11, [x0, #48] - ret -.Lfunc_end119: - .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L - - .globl mcl_fp_add8L - .align 2 - .type mcl_fp_add8L,@function -mcl_fp_add8L: // @mcl_fp_add8L -// BB#0: - stp x22, x21, [sp, #-32]! - stp x20, x19, [sp, #16] - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x18, x2, [x2] - ldp x4, x5, [x1] - ldp x6, x1, [x1, #16] - adds x18, x18, x4 - adcs x2, x2, x5 - ldp x4, x5, [x3, #48] - adcs x16, x16, x6 - adcs x17, x17, x1 - ldp x1, x6, [x3, #32] - adcs x7, x12, x14 - adcs x19, x13, x15 - ldp x12, x13, [x3] - stp x18, x2, [x0] - stp x16, x17, [x0, #16] - stp x7, x19, [x0, #32] - adcs x8, x8, x10 - adcs x20, x9, x11 - stp x8, x20, [x0, #48] - adcs x21, xzr, xzr - ldp x9, x10, [x3, #16] - subs x15, x18, x12 - sbcs x14, x2, x13 - sbcs x13, x16, x9 - sbcs x12, x17, x10 - sbcs x11, x7, x1 - sbcs x10, x19, x6 - sbcs x9, x8, x4 - sbcs x8, x20, x5 - sbcs x16, x21, xzr - and w16, w16, #0x1 - tbnz w16, #0, .LBB120_2 -// BB#1: // %nocarry - stp x15, x14, [x0] - stp x13, x12, [x0, #16] - stp x11, x10, [x0, #32] - stp x9, x8, [x0, #48] -.LBB120_2: // %carry - ldp x20, x19, [sp, #16] - ldp x22, x21, [sp], #32 - ret -.Lfunc_end120: - .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L - - .globl mcl_fp_addNF8L - .align 2 - .type mcl_fp_addNF8L,@function -mcl_fp_addNF8L: // @mcl_fp_addNF8L -// BB#0: - ldp x8, x9, [x1, #48] - ldp x10, x11, [x2, #48] - ldp x12, x13, [x1, #32] - ldp x14, x15, [x2, #32] - ldp x16, x17, [x1, #16] - ldp x18, x1, [x1] - ldp x4, x5, [x2] - ldp x6, x2, [x2, #16] - adds x18, x4, x18 - adcs x1, x5, x1 - ldp x4, x5, [x3, #48] - adcs x16, x6, x16 - adcs x17, x2, x17 - ldp x2, x6, [x3, #32] - adcs x12, x14, x12 - adcs x13, x15, x13 - ldp x14, x15, [x3] - adcs x8, x10, x8 - ldp x10, x3, [x3, #16] - adcs x9, x11, x9 - subs x11, x18, x14 - sbcs x14, x1, x15 - sbcs x10, x16, x10 - sbcs x15, x17, x3 - sbcs x2, x12, x2 - sbcs x3, x13, x6 - sbcs x4, x8, x4 - sbcs x5, x9, x5 - cmp x5, #0 // =0 - csel x11, x18, x11, lt - csel x14, x1, x14, lt - csel x10, x16, x10, lt - csel x15, x17, x15, lt - csel x12, x12, x2, lt - csel x13, x13, x3, lt - csel x8, x8, x4, lt - csel x9, x9, x5, lt - stp x11, x14, [x0] - stp x10, x15, [x0, #16] - stp x12, x13, [x0, #32] - stp x8, x9, [x0, #48] - ret -.Lfunc_end121: - .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L - - .globl mcl_fp_sub8L - .align 2 - .type mcl_fp_sub8L,@function -mcl_fp_sub8L: // @mcl_fp_sub8L -// BB#0: - ldp x14, x15, [x2, #48] - ldp x16, x17, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x18, x4, [x1, #32] - ldp x10, x11, [x2, #16] - ldp x8, x9, [x2] - ldp x2, x5, [x1] - ldp x6, x1, [x1, #16] - subs x8, x2, x8 - sbcs x9, x5, x9 - stp x8, x9, [x0] - sbcs x10, x6, x10 - sbcs x11, x1, x11 - stp x10, x11, [x0, #16] - sbcs x12, x18, x12 - sbcs x13, x4, x13 - stp x12, x13, [x0, #32] - sbcs x14, x16, x14 - sbcs x15, x17, x15 - stp x14, x15, [x0, #48] - ngcs x16, xzr - and w16, w16, #0x1 - tbnz w16, #0, .LBB122_2 -// BB#1: // %nocarry - ret -.LBB122_2: // %carry - ldp x16, x17, [x3, #48] - ldp x18, x1, [x3] - ldp x2, x4, [x3, #16] - ldp x5, x3, [x3, #32] - adds x8, x18, x8 - adcs x9, x1, x9 - adcs x10, x2, x10 - adcs x11, x4, x11 - adcs x12, x5, x12 - adcs x13, x3, x13 - adcs x14, x16, x14 - adcs x15, x17, x15 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - stp x14, x15, [x0, #48] - ret -.Lfunc_end122: - .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L - - .globl mcl_fp_subNF8L - .align 2 - .type mcl_fp_subNF8L,@function -mcl_fp_subNF8L: // @mcl_fp_subNF8L -// BB#0: - ldp x8, x9, [x2, #48] - ldp x10, x11, [x1, #48] - ldp x12, x13, [x2, #32] - ldp x14, x15, [x1, #32] - ldp x16, x17, [x2, #16] - ldp x18, x2, [x2] - ldp x4, x5, [x1] - ldp x6, x1, [x1, #16] - subs x18, x4, x18 - sbcs x2, x5, x2 - ldp x4, x5, [x3, #48] - sbcs x16, x6, x16 - sbcs x17, x1, x17 - ldp x1, x6, [x3, #32] - sbcs x12, x14, x12 - sbcs x13, x15, x13 - ldp x14, x15, [x3, #16] - sbcs x8, x10, x8 - ldp x10, x3, [x3] - sbcs x9, x11, x9 - asr x11, x9, #63 - and x10, x11, x10 - and x3, x11, x3 - and x14, x11, x14 - and x15, x11, x15 - and x1, x11, x1 - and x6, x11, x6 - and x4, x11, x4 - and x11, x11, x5 - adds x10, x10, x18 - str x10, [x0] - adcs x10, x3, x2 - str x10, [x0, #8] - adcs x10, x14, x16 - str x10, [x0, #16] - adcs x10, x15, x17 - str x10, [x0, #24] - adcs x10, x1, x12 - str x10, [x0, #32] - adcs x10, x6, x13 - adcs x8, x4, x8 - stp x10, x8, [x0, #40] - adcs x8, x11, x9 - str x8, [x0, #56] - ret -.Lfunc_end123: - .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L - - .globl mcl_fpDbl_add8L - .align 2 - .type mcl_fpDbl_add8L,@function -mcl_fpDbl_add8L: // @mcl_fpDbl_add8L -// BB#0: - ldp x8, x9, [x2, #112] - ldp x10, x11, [x1, #112] - ldp x12, x13, [x2, #96] - ldp x14, x15, [x1, #96] - ldp x16, x5, [x2] - ldp x17, x6, [x1] - ldp x18, x4, [x2, #80] - adds x16, x16, x17 - ldr x17, [x1, #16] - str x16, [x0] - adcs x16, x5, x6 - ldp x5, x6, [x2, #16] - str x16, [x0, #8] - adcs x17, x5, x17 - ldp x16, x5, [x1, #24] - str x17, [x0, #16] - adcs x16, x6, x16 - ldp x17, x6, [x2, #32] - str x16, [x0, #24] - adcs x17, x17, x5 - ldp x16, x5, [x1, #40] - str x17, [x0, #32] - adcs x16, x6, x16 - ldp x17, x6, [x2, #48] - str x16, [x0, #40] - ldr x16, [x1, #56] - adcs x17, x17, x5 - ldp x5, x2, [x2, #64] - str x17, [x0, #48] - adcs x16, x6, x16 - ldp x17, x6, [x1, #64] - str x16, [x0, #56] - ldp x16, x1, [x1, #80] - adcs x17, x5, x17 - adcs x2, x2, x6 - ldp x5, x6, [x3, #48] - adcs x16, x18, x16 - adcs x18, x4, x1 - ldp x1, x4, [x3, #32] - adcs x12, x12, x14 - adcs x13, x13, x15 - ldp x14, x15, [x3, #16] - adcs x8, x8, x10 - ldp x10, x3, [x3] - adcs x9, x9, x11 - adcs x11, xzr, xzr - subs x10, x17, x10 - sbcs x3, x2, x3 - sbcs x14, x16, x14 - sbcs x15, x18, x15 - sbcs x1, x12, x1 - sbcs x4, x13, x4 - sbcs x5, x8, x5 - sbcs x6, x9, x6 - sbcs x11, x11, xzr - tst x11, #0x1 - csel x10, x17, x10, ne - csel x11, x2, x3, ne - csel x14, x16, x14, ne - csel x15, x18, x15, ne - csel x12, x12, x1, ne - csel x13, x13, x4, ne - csel x8, x8, x5, ne - csel x9, x9, x6, ne - stp x10, x11, [x0, #64] - stp x14, x15, [x0, #80] - stp x12, x13, [x0, #96] - stp x8, x9, [x0, #112] - ret -.Lfunc_end124: - .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L - - .globl mcl_fpDbl_sub8L - .align 2 - .type mcl_fpDbl_sub8L,@function -mcl_fpDbl_sub8L: // @mcl_fpDbl_sub8L -// BB#0: - ldp x10, x8, [x2, #112] - ldp x11, x9, [x1, #112] - ldp x12, x13, [x2, #96] - ldp x14, x15, [x1, #96] - ldp x16, x5, [x1] - ldp x17, x4, [x2] - ldr x18, [x1, #80] - subs x16, x16, x17 - ldr x17, [x1, #16] - str x16, [x0] - sbcs x16, x5, x4 - ldp x4, x5, [x2, #16] - str x16, [x0, #8] - sbcs x17, x17, x4 - ldp x16, x4, [x1, #24] - str x17, [x0, #16] - sbcs x16, x16, x5 - ldp x17, x5, [x2, #32] - str x16, [x0, #24] - sbcs x17, x4, x17 - ldp x16, x4, [x1, #40] - str x17, [x0, #32] - sbcs x16, x16, x5 - ldp x17, x5, [x2, #48] - str x16, [x0, #40] - sbcs x17, x4, x17 - ldp x16, x4, [x1, #56] - str x17, [x0, #48] - sbcs x16, x16, x5 - ldp x17, x5, [x2, #64] - str x16, [x0, #56] - ldr x16, [x1, #72] - sbcs x17, x4, x17 - ldp x4, x2, [x2, #80] - ldr x1, [x1, #88] - sbcs x16, x16, x5 - sbcs x18, x18, x4 - ldp x4, x5, [x3, #48] - sbcs x1, x1, x2 - sbcs x12, x14, x12 - ldp x14, x2, [x3, #32] - sbcs x13, x15, x13 - sbcs x10, x11, x10 - ldp x11, x15, [x3, #16] - sbcs x8, x9, x8 - ngcs x9, xzr - tst x9, #0x1 - ldp x9, x3, [x3] - csel x5, x5, xzr, ne - csel x4, x4, xzr, ne - csel x2, x2, xzr, ne - csel x14, x14, xzr, ne - csel x15, x15, xzr, ne - csel x11, x11, xzr, ne - csel x3, x3, xzr, ne - csel x9, x9, xzr, ne - adds x9, x9, x17 - str x9, [x0, #64] - adcs x9, x3, x16 - str x9, [x0, #72] - adcs x9, x11, x18 - str x9, [x0, #80] - adcs x9, x15, x1 - str x9, [x0, #88] - adcs x9, x14, x12 - str x9, [x0, #96] - adcs x9, x2, x13 - str x9, [x0, #104] - adcs x9, x4, x10 - adcs x8, x5, x8 - stp x9, x8, [x0, #112] - ret -.Lfunc_end125: - .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L - - .align 2 - .type .LmulPv576x64,@function -.LmulPv576x64: // @mulPv576x64 -// BB#0: - ldr x9, [x0] - mul x10, x9, x1 - str x10, [x8] - ldr x10, [x0, #8] - umulh x9, x9, x1 - mul x11, x10, x1 - adds x9, x9, x11 - str x9, [x8, #8] - ldr x9, [x0, #16] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #16] - ldr x10, [x0, #24] - umulh x9, x9, x1 - mul x11, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #24] - ldr x9, [x0, #32] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #32] - ldr x10, [x0, #40] - umulh x9, x9, x1 - mul x11, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #40] - ldr x9, [x0, #48] - umulh x10, x10, x1 - mul x11, x9, x1 - adcs x10, x10, x11 - str x10, [x8, #48] - ldr x10, [x0, #56] - umulh x9, x9, x1 - mul x11, x10, x1 - adcs x9, x9, x11 - str x9, [x8, #56] - ldr x9, [x0, #64] - umulh x10, x10, x1 - mul x11, x9, x1 - umulh x9, x9, x1 - adcs x10, x10, x11 - adcs x9, x9, xzr - stp x10, x9, [x8, #64] - ret -.Lfunc_end126: - .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 - - .globl mcl_fp_mulUnitPre9L - .align 2 - .type mcl_fp_mulUnitPre9L,@function -mcl_fp_mulUnitPre9L: // @mcl_fp_mulUnitPre9L -// BB#0: - stp x20, x19, [sp, #-32]! - stp x29, x30, [sp, #16] - add x29, sp, #16 // =16 - sub sp, sp, #80 // =80 - mov x19, x0 - mov x8, sp - mov x0, x1 - mov x1, x2 - bl .LmulPv576x64 - ldp x9, x8, [sp, #64] - ldp x11, x10, [sp, #48] - ldp x13, x12, [sp, #32] - ldp x14, x15, [sp] - ldp x16, x17, [sp, #16] - stp x14, x15, [x19] - stp x16, x17, [x19, #16] - stp x13, x12, [x19, #32] - stp x11, x10, [x19, #48] - stp x9, x8, [x19, #64] - sub sp, x29, #16 // =16 - ldp x29, x30, [sp, #16] - ldp x20, x19, [sp], #32 - ret -.Lfunc_end127: - .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L - - .globl mcl_fpDbl_mulPre9L - .align 2 - .type mcl_fpDbl_mulPre9L,@function -mcl_fpDbl_mulPre9L: // @mcl_fpDbl_mulPre9L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #752 // =752 - mov x21, x2 - ldr x9, [x21] - mov x20, x1 - mov x19, x0 - sub x8, x29, #160 // =160 - mov x0, x20 - mov x1, x9 - bl .LmulPv576x64 - ldur x8, [x29, #-88] - str x8, [sp, #24] // 8-byte Folded Spill - ldur x8, [x29, #-96] - str x8, [sp, #16] // 8-byte Folded Spill - ldp x25, x24, [x29, #-112] - ldp x27, x26, [x29, #-128] - ldp x22, x28, [x29, #-144] - ldp x8, x23, [x29, #-160] - ldr x1, [x21, #8] - str x8, [x19] - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [x29, #-176] - ldp x11, x10, [x29, #-192] - ldp x13, x12, [x29, #-208] - ldp x14, x16, [x29, #-240] - ldp x17, x15, [x29, #-224] - adds x14, x14, x23 - str x14, [x19, #8] - adcs x22, x16, x22 - adcs x23, x17, x28 - adcs x27, x15, x27 - adcs x26, x13, x26 - adcs x25, x12, x25 - adcs x24, x11, x24 - ldr x1, [x21, #16] - ldr x11, [sp, #16] // 8-byte Folded Reload - adcs x28, x10, x11 - ldr x10, [sp, #24] // 8-byte Folded Reload - adcs x9, x9, x10 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #512 // =512 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #584] - ldr x9, [sp, #576] - ldr x10, [sp, #568] - ldr x11, [sp, #560] - ldr x12, [sp, #552] - ldr x13, [sp, #544] - ldr x14, [sp, #512] - ldr x15, [sp, #536] - ldr x16, [sp, #520] - ldr x17, [sp, #528] - adds x14, x22, x14 - str x14, [x19, #16] - adcs x22, x23, x16 - adcs x23, x27, x17 - adcs x26, x26, x15 - adcs x25, x25, x13 - adcs x24, x24, x12 - adcs x27, x28, x11 - ldr x1, [x21, #24] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x28, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #432 // =432 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #496] - ldp x11, x10, [sp, #480] - ldp x13, x12, [sp, #464] - ldp x14, x16, [sp, #432] - ldp x17, x15, [sp, #448] - adds x14, x22, x14 - str x14, [x19, #24] - adcs x22, x23, x16 - adcs x23, x26, x17 - adcs x25, x25, x15 - adcs x24, x24, x13 - adcs x26, x27, x12 - adcs x27, x28, x11 - ldr x1, [x21, #32] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x28, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #352 // =352 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #416] - ldp x11, x10, [sp, #400] - ldp x13, x12, [sp, #384] - ldp x14, x16, [sp, #352] - ldp x17, x15, [sp, #368] - adds x14, x22, x14 - str x14, [x19, #32] - adcs x22, x23, x16 - adcs x23, x25, x17 - adcs x24, x24, x15 - adcs x25, x26, x13 - adcs x26, x27, x12 - adcs x27, x28, x11 - ldr x1, [x21, #40] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x28, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #272 // =272 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #336] - ldp x11, x10, [sp, #320] - ldp x13, x12, [sp, #304] - ldp x14, x16, [sp, #272] - ldp x17, x15, [sp, #288] - adds x14, x22, x14 - str x14, [x19, #40] - adcs x22, x23, x16 - adcs x23, x24, x17 - adcs x24, x25, x15 - adcs x25, x26, x13 - adcs x26, x27, x12 - adcs x27, x28, x11 - ldr x1, [x21, #48] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x28, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #192 // =192 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #256] - ldp x11, x10, [sp, #240] - ldp x13, x12, [sp, #224] - ldp x14, x16, [sp, #192] - ldp x17, x15, [sp, #208] - adds x14, x22, x14 - str x14, [x19, #48] - adcs x22, x23, x16 - adcs x23, x24, x17 - adcs x24, x25, x15 - adcs x25, x26, x13 - adcs x26, x27, x12 - adcs x27, x28, x11 - ldr x1, [x21, #56] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x28, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x8, x9, [sp, #16] - add x8, sp, #112 // =112 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #176] - ldp x11, x10, [sp, #160] - ldp x13, x12, [sp, #144] - ldp x14, x16, [sp, #112] - ldp x17, x15, [sp, #128] - adds x14, x22, x14 - str x14, [x19, #56] - adcs x22, x23, x16 - adcs x23, x24, x17 - adcs x24, x25, x15 - adcs x25, x26, x13 - adcs x26, x27, x12 - adcs x27, x28, x11 - ldr x1, [x21, #64] - ldr x11, [sp, #24] // 8-byte Folded Reload - adcs x21, x11, x10 - ldr x10, [sp, #16] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #24] // 8-byte Folded Spill - add x8, sp, #32 // =32 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #96] - ldp x11, x10, [sp, #80] - ldp x13, x12, [sp, #64] - ldp x14, x16, [sp, #32] - ldp x17, x15, [sp, #48] - adds x14, x22, x14 - str x14, [x19, #64] - adcs x14, x23, x16 - str x14, [x19, #72] - adcs x14, x24, x17 - str x14, [x19, #80] - adcs x14, x25, x15 - adcs x13, x26, x13 - stp x14, x13, [x19, #88] - adcs x12, x27, x12 - adcs x11, x21, x11 - stp x12, x11, [x19, #104] - adcs x10, x28, x10 - str x10, [x19, #120] - ldr x10, [sp, #24] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x9, x8, [x19, #128] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end128: - .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L - - .globl mcl_fpDbl_sqrPre9L - .align 2 - .type mcl_fpDbl_sqrPre9L,@function -mcl_fpDbl_sqrPre9L: // @mcl_fpDbl_sqrPre9L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #736 // =736 - mov x20, x1 - ldr x1, [x20] - mov x19, x0 - sub x8, x29, #160 // =160 - mov x0, x20 - bl .LmulPv576x64 - ldur x8, [x29, #-88] - str x8, [sp, #8] // 8-byte Folded Spill - ldp x23, x22, [x29, #-104] - ldp x25, x24, [x29, #-120] - ldp x27, x26, [x29, #-136] - ldp x21, x28, [x29, #-152] - ldur x8, [x29, #-160] - ldr x1, [x20, #8] - str x8, [x19] - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [x29, #-176] - ldp x11, x10, [x29, #-192] - ldp x13, x12, [x29, #-208] - ldp x14, x16, [x29, #-240] - ldp x17, x15, [x29, #-224] - adds x14, x14, x21 - str x14, [x19, #8] - adcs x21, x16, x28 - adcs x27, x17, x27 - adcs x26, x15, x26 - adcs x25, x13, x25 - adcs x24, x12, x24 - adcs x23, x11, x23 - ldr x1, [x20, #16] - adcs x22, x10, x22 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x9, x10 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #496 // =496 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #568] - ldr x9, [sp, #560] - ldr x10, [sp, #552] - ldr x11, [sp, #544] - ldr x12, [sp, #536] - ldr x13, [sp, #528] - ldp x14, x16, [sp, #496] - ldr x15, [sp, #520] - ldr x17, [sp, #512] - adds x14, x21, x14 - str x14, [x19, #16] - adcs x21, x27, x16 - adcs x26, x26, x17 - adcs x25, x25, x15 - adcs x24, x24, x13 - adcs x23, x23, x12 - adcs x22, x22, x11 - ldr x1, [x20, #24] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #416 // =416 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #480] - ldp x11, x10, [sp, #464] - ldp x13, x12, [sp, #448] - ldp x14, x16, [sp, #416] - ldp x17, x15, [sp, #432] - adds x14, x21, x14 - str x14, [x19, #24] - adcs x21, x26, x16 - adcs x25, x25, x17 - adcs x24, x24, x15 - adcs x23, x23, x13 - adcs x22, x22, x12 - adcs x26, x27, x11 - ldr x1, [x20, #32] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #336 // =336 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #400] - ldp x11, x10, [sp, #384] - ldp x13, x12, [sp, #368] - ldp x14, x16, [sp, #336] - ldp x17, x15, [sp, #352] - adds x14, x21, x14 - str x14, [x19, #32] - adcs x21, x25, x16 - adcs x24, x24, x17 - adcs x23, x23, x15 - adcs x22, x22, x13 - adcs x25, x26, x12 - adcs x26, x27, x11 - ldr x1, [x20, #40] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #256 // =256 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #320] - ldp x11, x10, [sp, #304] - ldp x13, x12, [sp, #288] - ldp x14, x16, [sp, #256] - ldp x17, x15, [sp, #272] - adds x14, x21, x14 - str x14, [x19, #40] - adcs x21, x24, x16 - adcs x23, x23, x17 - adcs x22, x22, x15 - adcs x24, x25, x13 - adcs x25, x26, x12 - adcs x26, x27, x11 - ldr x1, [x20, #48] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #176 // =176 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #240] - ldp x11, x10, [sp, #224] - ldp x13, x12, [sp, #208] - ldp x14, x16, [sp, #176] - ldp x17, x15, [sp, #192] - adds x14, x21, x14 - str x14, [x19, #48] - adcs x21, x23, x16 - adcs x22, x22, x17 - adcs x23, x24, x15 - adcs x24, x25, x13 - adcs x25, x26, x12 - adcs x26, x27, x11 - ldr x1, [x20, #56] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #96 // =96 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #160] - ldp x11, x10, [sp, #144] - ldp x13, x12, [sp, #128] - ldp x14, x16, [sp, #96] - ldp x17, x15, [sp, #112] - adds x14, x21, x14 - str x14, [x19, #56] - adcs x21, x22, x16 - adcs x22, x23, x17 - adcs x23, x24, x15 - adcs x24, x25, x13 - adcs x25, x26, x12 - adcs x26, x27, x11 - ldr x1, [x20, #64] - adcs x27, x28, x10 - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x28, x10, x9 - adcs x8, x8, xzr - str x8, [sp, #8] // 8-byte Folded Spill - add x8, sp, #16 // =16 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #80] - ldp x11, x10, [sp, #64] - ldp x13, x12, [sp, #48] - ldp x14, x16, [sp, #16] - ldp x17, x15, [sp, #32] - adds x14, x21, x14 - str x14, [x19, #64] - adcs x14, x22, x16 - str x14, [x19, #72] - adcs x14, x23, x17 - str x14, [x19, #80] - adcs x14, x24, x15 - adcs x13, x25, x13 - stp x14, x13, [x19, #88] - adcs x12, x26, x12 - adcs x11, x27, x11 - stp x12, x11, [x19, #104] - adcs x10, x28, x10 - str x10, [x19, #120] - ldr x10, [sp, #8] // 8-byte Folded Reload - adcs x9, x10, x9 - adcs x8, x8, xzr - stp x9, x8, [x19, #128] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L - - .globl mcl_fp_mont9L - .align 2 - .type mcl_fp_mont9L,@function -mcl_fp_mont9L: // @mcl_fp_mont9L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #1600 // =1600 - mov x20, x3 - mov x28, x2 - str x28, [sp, #136] // 8-byte Folded Spill - ldur x19, [x20, #-8] - str x19, [sp, #144] // 8-byte Folded Spill - ldr x9, [x28] - mov x23, x1 - str x23, [sp, #152] // 8-byte Folded Spill - str x0, [sp, #128] // 8-byte Folded Spill - sub x8, x29, #160 // =160 - mov x0, x23 - mov x1, x9 - bl .LmulPv576x64 - ldur x24, [x29, #-160] - ldur x8, [x29, #-88] - str x8, [sp, #120] // 8-byte Folded Spill - ldur x8, [x29, #-96] - str x8, [sp, #112] // 8-byte Folded Spill - ldur x8, [x29, #-104] - str x8, [sp, #104] // 8-byte Folded Spill - ldur x8, [x29, #-112] - str x8, [sp, #96] // 8-byte Folded Spill - ldur x8, [x29, #-120] - str x8, [sp, #88] // 8-byte Folded Spill - ldur x8, [x29, #-128] - str x8, [sp, #80] // 8-byte Folded Spill - ldur x8, [x29, #-136] - str x8, [sp, #72] // 8-byte Folded Spill - ldur x8, [x29, #-144] - str x8, [sp, #64] // 8-byte Folded Spill - ldur x8, [x29, #-152] - str x8, [sp, #48] // 8-byte Folded Spill - mul x1, x24, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv576x64 - ldur x8, [x29, #-168] - str x8, [sp, #56] // 8-byte Folded Spill - ldur x8, [x29, #-176] - str x8, [sp, #40] // 8-byte Folded Spill - ldur x8, [x29, #-184] - str x8, [sp, #32] // 8-byte Folded Spill - ldur x8, [x29, #-192] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x21, x19, [x29, #-208] - ldp x26, x22, [x29, #-224] - ldp x27, x25, [x29, #-240] - ldr x1, [x28, #8] - add x8, sp, #1360 // =1360 - mov x0, x23 - bl .LmulPv576x64 - cmn x27, x24 - ldr x8, [sp, #1432] - ldr x9, [sp, #1424] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x25, x10 - ldr x11, [sp, #1416] - ldp x12, x14, [sp, #64] - adcs x12, x26, x12 - ldr x13, [sp, #1408] - adcs x14, x22, x14 - ldr x15, [sp, #1400] - ldp x16, x18, [sp, #80] - adcs x16, x21, x16 - ldr x17, [sp, #1392] - adcs x18, x19, x18 - ldr x0, [sp, #1384] - ldp x1, x3, [sp, #96] - ldp x2, x4, [sp, #24] - adcs x1, x2, x1 - ldr x2, [sp, #1376] - adcs x3, x4, x3 - ldr x4, [sp, #1360] - ldp x5, x7, [sp, #112] - ldr x6, [sp, #40] // 8-byte Folded Reload - adcs x5, x6, x5 - ldr x6, [sp, #1368] - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x7, x19, x7 - adcs x19, xzr, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - ldr x24, [sp, #144] // 8-byte Folded Reload - mul x1, x21, x24 - add x8, sp, #1280 // =1280 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1352] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #1344] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1336] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1328] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #1320] - ldr x27, [sp, #1312] - ldr x28, [sp, #1304] - ldr x22, [sp, #1296] - ldr x19, [sp, #1288] - ldr x23, [sp, #1280] - ldr x25, [sp, #136] // 8-byte Folded Reload - ldr x1, [x25, #16] - add x8, sp, #1200 // =1200 - ldr x0, [sp, #152] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x21, x23 - ldr x8, [sp, #1272] - ldr x9, [sp, #1264] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldr x11, [sp, #1256] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - ldr x13, [sp, #1248] - adcs x14, x14, x28 - ldr x15, [sp, #1240] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - ldr x17, [sp, #1232] - adcs x18, x18, x26 - ldr x0, [sp, #1224] - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #1216] - adcs x3, x3, x4 - ldr x4, [sp, #1200] - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldr x6, [sp, #1208] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - mul x1, x21, x24 - add x8, sp, #1120 // =1120 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1192] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #1184] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1176] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1168] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #1160] - ldr x27, [sp, #1152] - ldr x28, [sp, #1144] - ldr x22, [sp, #1136] - ldr x19, [sp, #1128] - ldr x23, [sp, #1120] - ldr x1, [x25, #24] - add x8, sp, #1040 // =1040 - ldr x24, [sp, #152] // 8-byte Folded Reload - mov x0, x24 - bl .LmulPv576x64 - cmn x21, x23 - ldr x8, [sp, #1112] - ldr x9, [sp, #1104] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldr x11, [sp, #1096] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - ldr x13, [sp, #1088] - adcs x14, x14, x28 - ldr x15, [sp, #1080] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - ldr x17, [sp, #1072] - adcs x18, x18, x26 - ldr x0, [sp, #1064] - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #1056] - adcs x3, x3, x4 - ldr x4, [sp, #1040] - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldr x6, [sp, #1048] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - ldr x8, [sp, #144] // 8-byte Folded Reload - mul x1, x21, x8 - add x8, sp, #960 // =960 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1032] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #1024] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1016] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1008] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #1000] - ldr x27, [sp, #992] - ldr x28, [sp, #984] - ldr x22, [sp, #976] - ldr x19, [sp, #968] - ldr x23, [sp, #960] - ldr x1, [x25, #32] - add x8, sp, #880 // =880 - mov x0, x24 - bl .LmulPv576x64 - cmn x21, x23 - ldr x8, [sp, #952] - ldr x9, [sp, #944] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldr x11, [sp, #936] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - ldr x13, [sp, #928] - adcs x14, x14, x28 - ldr x15, [sp, #920] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - ldr x17, [sp, #912] - adcs x18, x18, x26 - ldr x0, [sp, #904] - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #896] - adcs x3, x3, x4 - ldr x4, [sp, #880] - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldr x6, [sp, #888] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - ldr x25, [sp, #144] // 8-byte Folded Reload - mul x1, x21, x25 - add x8, sp, #800 // =800 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #872] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #864] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #856] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #848] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #840] - ldr x27, [sp, #832] - ldr x28, [sp, #824] - ldr x22, [sp, #816] - ldr x19, [sp, #808] - ldr x23, [sp, #800] - ldr x24, [sp, #136] // 8-byte Folded Reload - ldr x1, [x24, #40] - add x8, sp, #720 // =720 - ldr x0, [sp, #152] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x21, x23 - ldr x8, [sp, #792] - ldr x9, [sp, #784] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldr x11, [sp, #776] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - ldr x13, [sp, #768] - adcs x14, x14, x28 - ldr x15, [sp, #760] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - ldr x17, [sp, #752] - adcs x18, x18, x26 - ldr x0, [sp, #744] - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #736] - adcs x3, x3, x4 - ldr x4, [sp, #720] - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldr x6, [sp, #728] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - mul x1, x21, x25 - add x8, sp, #640 // =640 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #712] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #704] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #696] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #688] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #680] - ldr x27, [sp, #672] - ldr x28, [sp, #664] - ldr x22, [sp, #656] - ldr x19, [sp, #648] - ldr x23, [sp, #640] - ldr x1, [x24, #48] - add x8, sp, #560 // =560 - ldr x25, [sp, #152] // 8-byte Folded Reload - mov x0, x25 - bl .LmulPv576x64 - cmn x21, x23 - ldr x8, [sp, #632] - ldr x9, [sp, #624] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldr x11, [sp, #616] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - ldr x13, [sp, #608] - adcs x14, x14, x28 - ldr x15, [sp, #600] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - ldr x17, [sp, #592] - adcs x18, x18, x26 - ldr x0, [sp, #584] - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldr x2, [sp, #576] - adcs x3, x3, x4 - ldr x4, [sp, #560] - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldr x6, [sp, #568] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - ldr x24, [sp, #144] // 8-byte Folded Reload - mul x1, x21, x24 - add x8, sp, #480 // =480 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #552] - str x8, [sp, #40] // 8-byte Folded Spill - ldr x8, [sp, #544] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #536] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #528] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x26, [sp, #520] - ldr x27, [sp, #512] - ldp x22, x28, [sp, #496] - ldp x23, x19, [sp, #480] - ldr x8, [sp, #136] // 8-byte Folded Reload - ldr x1, [x8, #56] - add x8, sp, #400 // =400 - mov x0, x25 - bl .LmulPv576x64 - cmn x21, x23 - ldp x9, x8, [sp, #464] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldp x13, x11, [sp, #448] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - adcs x14, x14, x28 - ldp x17, x15, [sp, #432] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - adcs x18, x18, x26 - ldp x3, x1, [sp, #64] - ldp x2, x4, [sp, #16] - adcs x1, x1, x2 - ldp x2, x0, [sp, #416] - adcs x3, x3, x4 - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldp x4, x6, [sp, #400] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x10, x12, x6 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x14, x2 - str x10, [sp, #104] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #96] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #88] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - adcs x8, x19, x8 - stp x8, x9, [sp, #112] - adcs x8, xzr, xzr - stp x8, x10, [sp, #56] - mul x1, x21, x24 - add x8, sp, #320 // =320 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #392] - str x8, [sp, #40] // 8-byte Folded Spill - ldp x24, x8, [sp, #376] - str x8, [sp, #32] // 8-byte Folded Spill - ldp x26, x25, [sp, #360] - ldp x28, x27, [sp, #344] - ldp x19, x22, [sp, #328] - ldr x23, [sp, #320] - ldr x8, [sp, #136] // 8-byte Folded Reload - ldr x1, [x8, #64] - add x8, sp, #240 // =240 - ldr x0, [sp, #152] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x21, x23 - ldp x9, x8, [sp, #304] - ldr x10, [sp, #48] // 8-byte Folded Reload - adcs x10, x10, x19 - ldp x13, x11, [sp, #288] - ldp x14, x12, [sp, #96] - adcs x12, x12, x22 - adcs x14, x14, x28 - ldp x17, x15, [sp, #272] - ldp x18, x16, [sp, #80] - adcs x16, x16, x27 - adcs x18, x18, x26 - ldp x2, x0, [sp, #256] - ldp x3, x1, [sp, #64] - adcs x1, x1, x25 - adcs x3, x3, x24 - ldp x7, x5, [sp, #112] - ldp x6, x19, [sp, #32] - adcs x5, x5, x6 - ldp x4, x6, [sp, #240] - adcs x7, x7, x19 - ldr x19, [sp, #56] // 8-byte Folded Reload - adcs x19, x19, xzr - adds x21, x10, x4 - adcs x22, x12, x6 - adcs x23, x14, x2 - adcs x24, x16, x0 - adcs x25, x18, x17 - adcs x26, x1, x15 - adcs x27, x3, x13 - adcs x10, x5, x11 - str x10, [sp, #152] // 8-byte Folded Spill - adcs x9, x7, x9 - str x9, [sp, #136] // 8-byte Folded Spill - adcs x19, x19, x8 - adcs x28, xzr, xzr - ldr x8, [sp, #144] // 8-byte Folded Reload - mul x1, x21, x8 - add x8, sp, #160 // =160 - mov x0, x20 - bl .LmulPv576x64 - ldp x16, x8, [sp, #224] - ldp x9, x10, [sp, #160] - ldp x11, x12, [sp, #176] - cmn x21, x9 - ldp x13, x9, [sp, #192] - adcs x10, x22, x10 - ldp x14, x15, [sp, #208] - adcs x11, x23, x11 - adcs x12, x24, x12 - adcs x13, x25, x13 - adcs x9, x26, x9 - adcs x14, x27, x14 - ldp x0, x17, [x20, #56] - ldp x2, x18, [x20, #40] - ldp x4, x1, [x20, #24] - ldp x6, x3, [x20, #8] - ldr x5, [x20] - ldr x7, [sp, #152] // 8-byte Folded Reload - adcs x15, x7, x15 - ldr x7, [sp, #136] // 8-byte Folded Reload - adcs x16, x7, x16 - adcs x8, x19, x8 - adcs x7, x28, xzr - subs x5, x10, x5 - sbcs x6, x11, x6 - sbcs x3, x12, x3 - sbcs x4, x13, x4 - sbcs x1, x9, x1 - sbcs x2, x14, x2 - sbcs x18, x15, x18 - sbcs x0, x16, x0 - sbcs x17, x8, x17 - sbcs x7, x7, xzr - tst x7, #0x1 - csel x10, x10, x5, ne - csel x11, x11, x6, ne - csel x12, x12, x3, ne - csel x13, x13, x4, ne - csel x9, x9, x1, ne - csel x14, x14, x2, ne - csel x15, x15, x18, ne - csel x16, x16, x0, ne - csel x8, x8, x17, ne - ldr x17, [sp, #128] // 8-byte Folded Reload - stp x10, x11, [x17] - stp x12, x13, [x17, #16] - stp x9, x14, [x17, #32] - stp x15, x16, [x17, #48] - str x8, [x17, #64] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end130: - .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L - - .globl mcl_fp_montNF9L - .align 2 - .type mcl_fp_montNF9L,@function -mcl_fp_montNF9L: // @mcl_fp_montNF9L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #1584 // =1584 - mov x20, x3 - mov x28, x2 - str x28, [sp, #120] // 8-byte Folded Spill - ldur x19, [x20, #-8] - str x19, [sp, #128] // 8-byte Folded Spill - ldr x9, [x28] - mov x23, x1 - str x23, [sp, #136] // 8-byte Folded Spill - str x0, [sp, #112] // 8-byte Folded Spill - sub x8, x29, #160 // =160 - mov x0, x23 - mov x1, x9 - bl .LmulPv576x64 - ldur x24, [x29, #-160] - ldur x8, [x29, #-88] - str x8, [sp, #104] // 8-byte Folded Spill - ldur x8, [x29, #-96] - str x8, [sp, #96] // 8-byte Folded Spill - ldur x8, [x29, #-104] - str x8, [sp, #88] // 8-byte Folded Spill - ldur x8, [x29, #-112] - str x8, [sp, #80] // 8-byte Folded Spill - ldur x8, [x29, #-120] - str x8, [sp, #72] // 8-byte Folded Spill - ldur x8, [x29, #-128] - str x8, [sp, #64] // 8-byte Folded Spill - ldur x8, [x29, #-136] - str x8, [sp, #56] // 8-byte Folded Spill - ldur x8, [x29, #-144] - str x8, [sp, #48] // 8-byte Folded Spill - ldur x8, [x29, #-152] - str x8, [sp, #32] // 8-byte Folded Spill - mul x1, x24, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv576x64 - ldur x8, [x29, #-168] - str x8, [sp, #40] // 8-byte Folded Spill - ldur x8, [x29, #-176] - str x8, [sp, #24] // 8-byte Folded Spill - ldur x8, [x29, #-184] - str x8, [sp, #16] // 8-byte Folded Spill - ldur x8, [x29, #-192] - str x8, [sp, #8] // 8-byte Folded Spill - ldp x21, x19, [x29, #-208] - ldp x26, x22, [x29, #-224] - ldp x27, x25, [x29, #-240] - ldr x1, [x28, #8] - add x8, sp, #1344 // =1344 - mov x0, x23 - bl .LmulPv576x64 - cmn x27, x24 - ldr x8, [sp, #1416] - ldr x9, [sp, #1408] - ldr x10, [sp, #32] // 8-byte Folded Reload - adcs x10, x25, x10 - ldr x11, [sp, #1400] - ldp x12, x14, [sp, #48] - adcs x12, x26, x12 - ldr x13, [sp, #1392] - adcs x14, x22, x14 - ldr x15, [sp, #1384] - ldp x16, x18, [sp, #64] - adcs x16, x21, x16 - ldr x17, [sp, #1376] - adcs x18, x19, x18 - ldr x0, [sp, #1368] - ldp x1, x3, [sp, #80] - ldp x2, x4, [sp, #8] - adcs x1, x2, x1 - ldr x2, [sp, #1352] - adcs x3, x4, x3 - ldr x4, [sp, #1344] - ldp x5, x7, [sp, #96] - ldr x6, [sp, #24] // 8-byte Folded Reload - adcs x5, x6, x5 - ldr x6, [sp, #1360] - ldr x19, [sp, #40] // 8-byte Folded Reload - adcs x7, x19, x7 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x22, [sp, #128] // 8-byte Folded Reload - mul x1, x19, x22 - add x8, sp, #1264 // =1264 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1336] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1328] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1320] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #1312] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x24, [sp, #1304] - ldr x25, [sp, #1296] - ldr x26, [sp, #1288] - ldr x21, [sp, #1280] - ldr x27, [sp, #1272] - ldr x28, [sp, #1264] - ldr x23, [sp, #120] // 8-byte Folded Reload - ldr x1, [x23, #16] - add x8, sp, #1184 // =1184 - ldr x0, [sp, #136] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x19, x28 - ldr x8, [sp, #1256] - ldr x9, [sp, #1248] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldr x11, [sp, #1240] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - ldr x13, [sp, #1232] - adcs x14, x14, x26 - ldr x15, [sp, #1224] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - ldr x17, [sp, #1216] - adcs x18, x18, x24 - ldr x0, [sp, #1208] - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldr x2, [sp, #1192] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #1184] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldr x6, [sp, #1200] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x22 - add x8, sp, #1104 // =1104 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1176] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1168] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1160] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #1152] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x24, [sp, #1144] - ldr x25, [sp, #1136] - ldr x26, [sp, #1128] - ldr x21, [sp, #1120] - ldr x27, [sp, #1112] - ldr x28, [sp, #1104] - ldr x1, [x23, #24] - add x8, sp, #1024 // =1024 - ldr x22, [sp, #136] // 8-byte Folded Reload - mov x0, x22 - bl .LmulPv576x64 - cmn x19, x28 - ldr x8, [sp, #1096] - ldr x9, [sp, #1088] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldr x11, [sp, #1080] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - ldr x13, [sp, #1072] - adcs x14, x14, x26 - ldr x15, [sp, #1064] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - ldr x17, [sp, #1056] - adcs x18, x18, x24 - ldr x0, [sp, #1048] - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldr x2, [sp, #1032] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #1024] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldr x6, [sp, #1040] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #128] // 8-byte Folded Reload - mul x1, x19, x8 - add x8, sp, #944 // =944 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #1016] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #1008] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #1000] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #992] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x24, [sp, #984] - ldr x25, [sp, #976] - ldr x26, [sp, #968] - ldr x21, [sp, #960] - ldr x27, [sp, #952] - ldr x28, [sp, #944] - ldr x1, [x23, #32] - add x8, sp, #864 // =864 - mov x0, x22 - bl .LmulPv576x64 - cmn x19, x28 - ldr x8, [sp, #936] - ldr x9, [sp, #928] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldr x11, [sp, #920] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - ldr x13, [sp, #912] - adcs x14, x14, x26 - ldr x15, [sp, #904] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - ldr x17, [sp, #896] - adcs x18, x18, x24 - ldr x0, [sp, #888] - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldr x2, [sp, #872] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #864] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldr x6, [sp, #880] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x23, [sp, #128] // 8-byte Folded Reload - mul x1, x19, x23 - add x8, sp, #784 // =784 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #856] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #848] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #840] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #832] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x24, [sp, #824] - ldr x25, [sp, #816] - ldr x26, [sp, #808] - ldr x21, [sp, #800] - ldr x27, [sp, #792] - ldr x28, [sp, #784] - ldr x22, [sp, #120] // 8-byte Folded Reload - ldr x1, [x22, #40] - add x8, sp, #704 // =704 - ldr x0, [sp, #136] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x19, x28 - ldr x8, [sp, #776] - ldr x9, [sp, #768] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldr x11, [sp, #760] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - ldr x13, [sp, #752] - adcs x14, x14, x26 - ldr x15, [sp, #744] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - ldr x17, [sp, #736] - adcs x18, x18, x24 - ldr x0, [sp, #728] - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldr x2, [sp, #712] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #704] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldr x6, [sp, #720] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x23 - add x8, sp, #624 // =624 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #696] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #688] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #680] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #672] - str x8, [sp, #8] // 8-byte Folded Spill - ldr x24, [sp, #664] - ldr x25, [sp, #656] - ldr x26, [sp, #648] - ldr x21, [sp, #640] - ldr x27, [sp, #632] - ldr x28, [sp, #624] - ldr x1, [x22, #48] - add x8, sp, #544 // =544 - ldr x23, [sp, #136] // 8-byte Folded Reload - mov x0, x23 - bl .LmulPv576x64 - cmn x19, x28 - ldr x8, [sp, #616] - ldr x9, [sp, #608] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldr x11, [sp, #600] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - ldr x13, [sp, #592] - adcs x14, x14, x26 - ldr x15, [sp, #584] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - ldr x17, [sp, #576] - adcs x18, x18, x24 - ldr x0, [sp, #568] - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldr x2, [sp, #552] - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldr x4, [sp, #544] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldr x6, [sp, #560] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x22, [sp, #128] // 8-byte Folded Reload - mul x1, x19, x22 - add x8, sp, #464 // =464 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #536] - str x8, [sp, #32] // 8-byte Folded Spill - ldr x8, [sp, #528] - str x8, [sp, #24] // 8-byte Folded Spill - ldr x8, [sp, #520] - str x8, [sp, #16] // 8-byte Folded Spill - ldr x8, [sp, #512] - str x8, [sp, #8] // 8-byte Folded Spill - ldp x25, x24, [sp, #496] - ldp x21, x26, [sp, #480] - ldp x28, x27, [sp, #464] - ldr x8, [sp, #120] // 8-byte Folded Reload - ldr x1, [x8, #56] - add x8, sp, #384 // =384 - mov x0, x23 - bl .LmulPv576x64 - cmn x19, x28 - ldp x9, x8, [sp, #448] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldp x13, x11, [sp, #432] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - adcs x14, x14, x26 - ldp x17, x15, [sp, #416] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - adcs x18, x18, x24 - ldp x2, x4, [sp, #8] - adcs x1, x1, x2 - ldp x5, x3, [sp, #96] - adcs x3, x3, x4 - ldp x4, x2, [sp, #384] - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldp x6, x0, [sp, #400] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x10, x12, x2 - str x10, [sp, #40] // 8-byte Folded Spill - adcs x10, x14, x6 - str x10, [sp, #80] // 8-byte Folded Spill - adcs x10, x16, x0 - str x10, [sp, #72] // 8-byte Folded Spill - adcs x10, x18, x17 - str x10, [sp, #64] // 8-byte Folded Spill - adcs x10, x1, x15 - str x10, [sp, #56] // 8-byte Folded Spill - adcs x10, x3, x13 - str x10, [sp, #48] // 8-byte Folded Spill - adcs x10, x5, x11 - adcs x9, x7, x9 - stp x9, x10, [sp, #96] - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - mul x1, x19, x22 - add x8, sp, #304 // =304 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #376] - str x8, [sp, #32] // 8-byte Folded Spill - ldp x22, x8, [sp, #360] - str x8, [sp, #24] // 8-byte Folded Spill - ldp x24, x23, [sp, #344] - ldp x26, x25, [sp, #328] - ldp x27, x21, [sp, #312] - ldr x28, [sp, #304] - ldr x8, [sp, #120] // 8-byte Folded Reload - ldr x1, [x8, #64] - add x8, sp, #224 // =224 - ldr x0, [sp, #136] // 8-byte Folded Reload - bl .LmulPv576x64 - cmn x19, x28 - ldp x9, x8, [sp, #288] - ldp x10, x1, [sp, #40] - adcs x10, x10, x27 - ldp x13, x11, [sp, #272] - ldp x14, x12, [sp, #72] - adcs x12, x12, x21 - adcs x14, x14, x26 - ldp x17, x15, [sp, #256] - ldp x18, x16, [sp, #56] - adcs x16, x16, x25 - adcs x18, x18, x24 - adcs x1, x1, x23 - ldp x4, x2, [sp, #224] - ldp x5, x3, [sp, #96] - adcs x3, x3, x22 - ldp x6, x19, [sp, #24] - adcs x5, x5, x6 - ldp x6, x0, [sp, #240] - ldr x7, [sp, #88] // 8-byte Folded Reload - adcs x7, x7, x19 - adds x19, x10, x4 - adcs x21, x12, x2 - adcs x22, x14, x6 - adcs x23, x16, x0 - adcs x24, x18, x17 - adcs x25, x1, x15 - adcs x26, x3, x13 - adcs x10, x5, x11 - str x10, [sp, #136] // 8-byte Folded Spill - adcs x28, x7, x9 - adcs x27, x8, xzr - ldr x8, [sp, #128] // 8-byte Folded Reload - mul x1, x19, x8 - add x8, sp, #144 // =144 - mov x0, x20 - bl .LmulPv576x64 - ldp x16, x8, [sp, #208] - ldp x9, x10, [sp, #144] - ldp x11, x12, [sp, #160] - cmn x19, x9 - ldp x13, x9, [sp, #176] - adcs x10, x21, x10 - ldp x14, x15, [sp, #192] - adcs x11, x22, x11 - adcs x12, x23, x12 - adcs x13, x24, x13 - adcs x9, x25, x9 - adcs x14, x26, x14 - ldp x0, x17, [x20, #56] - ldp x2, x18, [x20, #40] - ldp x4, x1, [x20, #24] - ldp x6, x3, [x20, #8] - ldr x5, [x20] - ldr x7, [sp, #136] // 8-byte Folded Reload - adcs x15, x7, x15 - adcs x16, x28, x16 - adcs x8, x27, x8 - subs x5, x10, x5 - sbcs x6, x11, x6 - sbcs x3, x12, x3 - sbcs x4, x13, x4 - sbcs x1, x9, x1 - sbcs x2, x14, x2 - sbcs x18, x15, x18 - sbcs x0, x16, x0 - sbcs x17, x8, x17 - asr x7, x17, #63 - cmp x7, #0 // =0 - csel x10, x10, x5, lt - csel x11, x11, x6, lt - csel x12, x12, x3, lt - csel x13, x13, x4, lt - csel x9, x9, x1, lt - csel x14, x14, x2, lt - csel x15, x15, x18, lt - csel x16, x16, x0, lt - csel x8, x8, x17, lt - ldr x17, [sp, #112] // 8-byte Folded Reload - stp x10, x11, [x17] - stp x12, x13, [x17, #16] - stp x9, x14, [x17, #32] - stp x15, x16, [x17, #48] - str x8, [x17, #64] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end131: - .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L - - .globl mcl_fp_montRed9L - .align 2 - .type mcl_fp_montRed9L,@function -mcl_fp_montRed9L: // @mcl_fp_montRed9L -// BB#0: - stp x28, x27, [sp, #-96]! - stp x26, x25, [sp, #16] - stp x24, x23, [sp, #32] - stp x22, x21, [sp, #48] - stp x20, x19, [sp, #64] - stp x29, x30, [sp, #80] - add x29, sp, #80 // =80 - sub sp, sp, #912 // =912 - mov x20, x2 - ldur x9, [x20, #-8] - str x9, [sp, #40] // 8-byte Folded Spill - ldr x8, [x20, #64] - str x8, [sp, #184] // 8-byte Folded Spill - ldr x8, [x20, #48] - str x8, [sp, #168] // 8-byte Folded Spill - ldr x8, [x20, #56] - str x8, [sp, #176] // 8-byte Folded Spill - ldr x8, [x20, #32] - str x8, [sp, #144] // 8-byte Folded Spill - ldr x8, [x20, #40] - str x8, [sp, #152] // 8-byte Folded Spill - ldr x8, [x20, #16] - str x8, [sp, #128] // 8-byte Folded Spill - ldr x8, [x20, #24] - str x8, [sp, #136] // 8-byte Folded Spill - ldr x8, [x20] - str x8, [sp, #112] // 8-byte Folded Spill - ldr x8, [x20, #8] - str x8, [sp, #120] // 8-byte Folded Spill - ldr x8, [x1, #128] - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [x1, #136] - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [x1, #112] - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [x1, #120] - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [x1, #96] - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [x1, #104] - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [x1, #80] - str x8, [sp, #48] // 8-byte Folded Spill - ldr x8, [x1, #88] - str x8, [sp, #56] // 8-byte Folded Spill - ldp x23, x8, [x1, #64] - str x8, [sp, #16] // 8-byte Folded Spill - ldp x25, x19, [x1, #48] - ldp x28, x27, [x1, #32] - ldp x22, x24, [x1, #16] - ldp x21, x26, [x1] - str x0, [sp, #160] // 8-byte Folded Spill - mul x1, x21, x9 - sub x8, x29, #160 // =160 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [x29, #-96] - ldp x11, x10, [x29, #-112] - ldp x13, x12, [x29, #-128] - ldp x14, x15, [x29, #-160] - ldp x16, x17, [x29, #-144] - cmn x21, x14 - adcs x21, x26, x15 - adcs x14, x22, x16 - adcs x24, x24, x17 - adcs x26, x28, x13 - adcs x27, x27, x12 - adcs x25, x25, x11 - adcs x10, x19, x10 - stp x10, x14, [sp, #24] - adcs x23, x23, x9 - ldr x9, [sp, #16] // 8-byte Folded Reload - adcs x28, x9, x8 - ldr x8, [sp, #48] // 8-byte Folded Reload - adcs x22, x8, xzr - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #56] // 8-byte Folded Spill - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - adcs x8, xzr, xzr - str x8, [sp, #48] // 8-byte Folded Spill - ldr x19, [sp, #40] // 8-byte Folded Reload - mul x1, x21, x19 - sub x8, x29, #240 // =240 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [x29, #-176] - ldp x11, x10, [x29, #-192] - ldp x13, x12, [x29, #-208] - ldp x14, x15, [x29, #-240] - ldp x16, x17, [x29, #-224] - cmn x21, x14 - ldr x14, [sp, #32] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x24, x16 - adcs x26, x26, x17 - adcs x27, x27, x13 - adcs x25, x25, x12 - ldr x12, [sp, #24] // 8-byte Folded Reload - adcs x11, x12, x11 - stp x11, x14, [sp, #24] - adcs x23, x23, x10 - adcs x28, x28, x9 - adcs x22, x22, x8 - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x24, x8, xzr - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #48] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #56] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #672 // =672 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #744] - ldr x9, [sp, #736] - ldr x10, [sp, #728] - ldr x11, [sp, #720] - ldr x12, [sp, #712] - ldr x13, [sp, #704] - ldr x14, [sp, #672] - ldr x15, [sp, #680] - ldr x16, [sp, #688] - ldr x17, [sp, #696] - cmn x21, x14 - ldr x14, [sp, #32] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x26, x16 - str x14, [sp, #48] // 8-byte Folded Spill - adcs x27, x27, x17 - adcs x25, x25, x13 - ldr x13, [sp, #24] // 8-byte Folded Reload - adcs x12, x13, x12 - str x12, [sp, #32] // 8-byte Folded Spill - adcs x23, x23, x11 - adcs x28, x28, x10 - adcs x22, x22, x9 - adcs x24, x24, x8 - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x26, x8, xzr - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #56] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #64] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #592 // =592 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #664] - ldr x9, [sp, #656] - ldr x10, [sp, #648] - ldr x11, [sp, #640] - ldr x12, [sp, #632] - ldr x13, [sp, #624] - ldr x14, [sp, #592] - ldr x15, [sp, #600] - ldr x16, [sp, #608] - ldr x17, [sp, #616] - cmn x21, x14 - ldr x14, [sp, #48] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x27, x16 - str x14, [sp, #56] // 8-byte Folded Spill - adcs x25, x25, x17 - ldr x14, [sp, #32] // 8-byte Folded Reload - adcs x13, x14, x13 - str x13, [sp, #48] // 8-byte Folded Spill - adcs x23, x23, x12 - adcs x28, x28, x11 - adcs x22, x22, x10 - adcs x24, x24, x9 - adcs x26, x26, x8 - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x27, x8, xzr - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #64] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #72] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #512 // =512 - mov x0, x20 - bl .LmulPv576x64 - ldr x8, [sp, #584] - ldr x9, [sp, #576] - ldr x10, [sp, #568] - ldr x11, [sp, #560] - ldr x12, [sp, #552] - ldr x13, [sp, #544] - ldr x14, [sp, #512] - ldr x15, [sp, #520] - ldr x16, [sp, #528] - ldr x17, [sp, #536] - cmn x21, x14 - ldr x14, [sp, #56] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x25, x16 - str x14, [sp, #64] // 8-byte Folded Spill - ldr x14, [sp, #48] // 8-byte Folded Reload - adcs x14, x14, x17 - str x14, [sp, #56] // 8-byte Folded Spill - adcs x23, x23, x13 - adcs x28, x28, x12 - adcs x22, x22, x11 - adcs x24, x24, x10 - adcs x26, x26, x9 - adcs x27, x27, x8 - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x25, x8, xzr - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #72] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #80] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #432 // =432 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #496] - ldp x11, x10, [sp, #480] - ldp x13, x12, [sp, #464] - ldp x14, x15, [sp, #432] - ldp x16, x17, [sp, #448] - cmn x21, x14 - ldr x14, [sp, #64] // 8-byte Folded Reload - adcs x21, x14, x15 - ldr x14, [sp, #56] // 8-byte Folded Reload - adcs x14, x14, x16 - adcs x23, x23, x17 - adcs x28, x28, x13 - adcs x22, x22, x12 - adcs x24, x24, x11 - adcs x26, x26, x10 - adcs x27, x27, x9 - adcs x25, x25, x8 - ldr x8, [sp, #88] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - stp x14, x8, [sp, #72] - mul x1, x21, x19 - add x8, sp, #352 // =352 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #416] - ldp x11, x10, [sp, #400] - ldp x13, x12, [sp, #384] - ldp x14, x15, [sp, #352] - ldp x16, x17, [sp, #368] - cmn x21, x14 - ldr x14, [sp, #72] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x23, x16 - str x14, [sp, #72] // 8-byte Folded Spill - adcs x28, x28, x17 - adcs x22, x22, x13 - adcs x24, x24, x12 - adcs x26, x26, x11 - adcs x27, x27, x10 - adcs x25, x25, x9 - ldr x9, [sp, #88] // 8-byte Folded Reload - adcs x8, x9, x8 - str x8, [sp, #88] // 8-byte Folded Spill - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x23, x8, xzr - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - ldr x8, [sp, #80] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #96] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #272 // =272 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #336] - ldp x11, x10, [sp, #320] - ldp x13, x12, [sp, #304] - ldp x14, x15, [sp, #272] - ldp x16, x17, [sp, #288] - cmn x21, x14 - ldr x14, [sp, #72] // 8-byte Folded Reload - adcs x21, x14, x15 - adcs x14, x28, x16 - adcs x22, x22, x17 - adcs x24, x24, x13 - adcs x26, x26, x12 - adcs x27, x27, x11 - adcs x25, x25, x10 - ldr x10, [sp, #88] // 8-byte Folded Reload - adcs x9, x10, x9 - stp x14, x9, [sp, #80] - adcs x23, x23, x8 - ldr x8, [sp, #104] // 8-byte Folded Reload - adcs x28, x8, xzr - ldr x8, [sp, #96] // 8-byte Folded Reload - adcs x8, x8, xzr - str x8, [sp, #104] // 8-byte Folded Spill - mul x1, x21, x19 - add x8, sp, #192 // =192 - mov x0, x20 - bl .LmulPv576x64 - ldp x9, x8, [sp, #256] - ldp x11, x10, [sp, #240] - ldp x13, x12, [sp, #224] - ldp x14, x15, [sp, #192] - ldp x16, x17, [sp, #208] - cmn x21, x14 - ldr x14, [sp, #80] // 8-byte Folded Reload - adcs x14, x14, x15 - adcs x15, x22, x16 - adcs x16, x24, x17 - adcs x13, x26, x13 - adcs x12, x27, x12 - adcs x11, x25, x11 - ldr x17, [sp, #88] // 8-byte Folded Reload - adcs x10, x17, x10 - adcs x9, x23, x9 - adcs x8, x28, x8 - ldp x17, x18, [sp, #104] - adcs x17, x17, xzr - subs x18, x14, x18 - ldp x0, x1, [sp, #120] - sbcs x0, x15, x0 - sbcs x1, x16, x1 - ldp x2, x3, [sp, #136] - sbcs x2, x13, x2 - sbcs x3, x12, x3 - ldr x4, [sp, #152] // 8-byte Folded Reload - sbcs x4, x11, x4 - ldp x5, x6, [sp, #168] - sbcs x5, x10, x5 - sbcs x6, x9, x6 - ldr x7, [sp, #184] // 8-byte Folded Reload - sbcs x7, x8, x7 - sbcs x17, x17, xzr - tst x17, #0x1 - csel x14, x14, x18, ne - csel x15, x15, x0, ne - csel x16, x16, x1, ne - csel x13, x13, x2, ne - csel x12, x12, x3, ne - csel x11, x11, x4, ne - csel x10, x10, x5, ne - csel x9, x9, x6, ne - csel x8, x8, x7, ne - ldr x17, [sp, #160] // 8-byte Folded Reload - stp x14, x15, [x17] - stp x16, x13, [x17, #16] - stp x12, x11, [x17, #32] - stp x10, x9, [x17, #48] - str x8, [x17, #64] - sub sp, x29, #80 // =80 - ldp x29, x30, [sp, #80] - ldp x20, x19, [sp, #64] - ldp x22, x21, [sp, #48] - ldp x24, x23, [sp, #32] - ldp x26, x25, [sp, #16] - ldp x28, x27, [sp], #96 - ret -.Lfunc_end132: - .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L - - .globl mcl_fp_addPre9L - .align 2 - .type mcl_fp_addPre9L,@function -mcl_fp_addPre9L: // @mcl_fp_addPre9L -// BB#0: - ldp x11, x8, [x2, #56] - ldp x13, x9, [x1, #56] - ldp x15, x10, [x2, #40] - ldp x17, x12, [x1, #40] - ldp x3, x14, [x2, #24] - ldr x4, [x2] - ldp x2, x18, [x2, #8] - ldp x5, x6, [x1] - ldr x7, [x1, #16] - ldp x1, x16, [x1, #24] - adds x4, x4, x5 - adcs x2, x2, x6 - stp x4, x2, [x0] - adcs x18, x18, x7 - str x18, [x0, #16] - adcs x18, x3, x1 - adcs x14, x14, x16 - stp x18, x14, [x0, #24] - adcs x14, x15, x17 - adcs x10, x10, x12 - stp x14, x10, [x0, #40] - adcs x10, x11, x13 - adcs x9, x8, x9 - adcs x8, xzr, xzr - stp x10, x9, [x0, #56] - mov x0, x8 - ret -.Lfunc_end133: - .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L - - .globl mcl_fp_subPre9L - .align 2 - .type mcl_fp_subPre9L,@function -mcl_fp_subPre9L: // @mcl_fp_subPre9L -// BB#0: - ldp x11, x8, [x2, #56] - ldp x13, x9, [x1, #56] - ldp x15, x10, [x2, #40] - ldp x17, x12, [x1, #40] - ldp x3, x14, [x2, #24] - ldr x4, [x2] - ldp x2, x18, [x2, #8] - ldp x5, x6, [x1] - ldr x7, [x1, #16] - ldp x1, x16, [x1, #24] - subs x4, x5, x4 - sbcs x2, x6, x2 - stp x4, x2, [x0] - sbcs x18, x7, x18 - str x18, [x0, #16] - sbcs x18, x1, x3 - sbcs x14, x16, x14 - stp x18, x14, [x0, #24] - sbcs x14, x17, x15 - sbcs x10, x12, x10 - stp x14, x10, [x0, #40] - sbcs x10, x13, x11 - sbcs x9, x9, x8 - ngcs x8, xzr - and x8, x8, #0x1 - stp x10, x9, [x0, #56] - mov x0, x8 - ret -.Lfunc_end134: - .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L - - .globl mcl_fp_shr1_9L - .align 2 - .type mcl_fp_shr1_9L,@function -mcl_fp_shr1_9L: // @mcl_fp_shr1_9L -// BB#0: - ldp x8, x9, [x1] - ldp x12, x10, [x1, #56] - ldp x16, x11, [x1, #40] - ldp x13, x14, [x1, #16] - ldr x15, [x1, #32] - extr x8, x9, x8, #1 - extr x9, x13, x9, #1 - extr x13, x14, x13, #1 - extr x14, x15, x14, #1 - extr x15, x16, x15, #1 - extr x16, x11, x16, #1 - extr x11, x12, x11, #1 - extr x12, x10, x12, #1 - lsr x10, x10, #1 - stp x8, x9, [x0] - stp x13, x14, [x0, #16] - stp x15, x16, [x0, #32] - stp x11, x12, [x0, #48] - str x10, [x0, #64] - ret -.Lfunc_end135: - .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L - - .globl mcl_fp_add9L - .align 2 - .type mcl_fp_add9L,@function -mcl_fp_add9L: // @mcl_fp_add9L -// BB#0: - stp x24, x23, [sp, #-48]! - stp x22, x21, [sp, #16] - stp x20, x19, [sp, #32] - ldp x11, x8, [x2, #56] - ldp x13, x9, [x1, #56] - ldp x15, x10, [x2, #40] - ldp x17, x12, [x1, #40] - ldp x4, x14, [x2, #24] - ldr x5, [x2] - ldp x2, x18, [x2, #8] - ldp x6, x7, [x1] - ldr x19, [x1, #16] - ldp x1, x16, [x1, #24] - adds x5, x5, x6 - adcs x2, x2, x7 - adcs x18, x18, x19 - ldp x21, x7, [x3, #40] - ldp x19, x6, [x3, #56] - adcs x1, x4, x1 - adcs x4, x14, x16 - ldr x20, [x3, #32] - adcs x17, x15, x17 - adcs x10, x10, x12 - ldp x12, x14, [x3] - stp x5, x2, [x0] - stp x18, x1, [x0, #16] - stp x4, x17, [x0, #32] - adcs x22, x11, x13 - stp x10, x22, [x0, #48] - adcs x8, x8, x9 - str x8, [x0, #64] - adcs x23, xzr, xzr - ldp x9, x11, [x3, #16] - subs x16, x5, x12 - sbcs x15, x2, x14 - sbcs x14, x18, x9 - sbcs x13, x1, x11 - sbcs x12, x4, x20 - sbcs x11, x17, x21 - sbcs x10, x10, x7 - sbcs x9, x22, x19 - sbcs x8, x8, x6 - sbcs x17, x23, xzr - and w17, w17, #0x1 - tbnz w17, #0, .LBB136_2 -// BB#1: // %nocarry - stp x16, x15, [x0] - stp x14, x13, [x0, #16] - stp x12, x11, [x0, #32] - stp x10, x9, [x0, #48] - str x8, [x0, #64] -.LBB136_2: // %carry - ldp x20, x19, [sp, #32] - ldp x22, x21, [sp, #16] - ldp x24, x23, [sp], #48 - ret -.Lfunc_end136: - .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L - - .globl mcl_fp_addNF9L - .align 2 - .type mcl_fp_addNF9L,@function -mcl_fp_addNF9L: // @mcl_fp_addNF9L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x11, x8, [x1, #56] - ldp x13, x9, [x2, #56] - ldp x15, x10, [x1, #40] - ldp x17, x12, [x2, #40] - ldp x4, x14, [x1, #24] - ldr x5, [x1] - ldp x1, x18, [x1, #8] - ldp x6, x7, [x2] - ldr x19, [x2, #16] - ldp x2, x16, [x2, #24] - adds x5, x6, x5 - adcs x1, x7, x1 - adcs x18, x19, x18 - ldp x19, x6, [x3, #56] - adcs x2, x2, x4 - adcs x14, x16, x14 - ldp x4, x7, [x3, #40] - adcs x15, x17, x15 - adcs x10, x12, x10 - ldp x12, x17, [x3] - adcs x11, x13, x11 - ldr x13, [x3, #16] - ldp x3, x16, [x3, #24] - adcs x8, x9, x8 - subs x9, x5, x12 - sbcs x12, x1, x17 - sbcs x13, x18, x13 - sbcs x17, x2, x3 - sbcs x16, x14, x16 - sbcs x3, x15, x4 - sbcs x4, x10, x7 - sbcs x7, x11, x19 - sbcs x6, x8, x6 - asr x19, x6, #63 - cmp x19, #0 // =0 - csel x9, x5, x9, lt - csel x12, x1, x12, lt - csel x13, x18, x13, lt - csel x17, x2, x17, lt - csel x14, x14, x16, lt - csel x15, x15, x3, lt - csel x10, x10, x4, lt - csel x11, x11, x7, lt - csel x8, x8, x6, lt - stp x9, x12, [x0] - stp x13, x17, [x0, #16] - stp x14, x15, [x0, #32] - stp x10, x11, [x0, #48] - str x8, [x0, #64] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end137: - .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L - - .globl mcl_fp_sub9L - .align 2 - .type mcl_fp_sub9L,@function -mcl_fp_sub9L: // @mcl_fp_sub9L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x15, x16, [x2, #56] - ldp x4, x17, [x1, #56] - ldp x13, x14, [x2, #40] - ldp x6, x18, [x1, #40] - ldp x11, x12, [x2, #24] - ldp x9, x10, [x2, #8] - ldr x8, [x2] - ldp x2, x7, [x1] - ldr x19, [x1, #16] - ldp x1, x5, [x1, #24] - subs x8, x2, x8 - sbcs x9, x7, x9 - stp x8, x9, [x0] - sbcs x10, x19, x10 - sbcs x11, x1, x11 - stp x10, x11, [x0, #16] - sbcs x12, x5, x12 - sbcs x13, x6, x13 - stp x12, x13, [x0, #32] - sbcs x14, x18, x14 - sbcs x15, x4, x15 - stp x14, x15, [x0, #48] - sbcs x16, x17, x16 - str x16, [x0, #64] - ngcs x17, xzr - and w17, w17, #0x1 - tbnz w17, #0, .LBB138_2 -// BB#1: // %nocarry - ldp x20, x19, [sp], #16 - ret -.LBB138_2: // %carry - ldp x18, x1, [x3] - ldp x2, x4, [x3, #16] - ldp x5, x6, [x3, #32] - adds x8, x18, x8 - adcs x9, x1, x9 - ldr x18, [x3, #48] - ldp x1, x17, [x3, #56] - adcs x10, x2, x10 - adcs x11, x4, x11 - adcs x12, x5, x12 - adcs x13, x6, x13 - adcs x14, x18, x14 - adcs x15, x1, x15 - adcs x16, x17, x16 - stp x8, x9, [x0] - stp x10, x11, [x0, #16] - stp x12, x13, [x0, #32] - stp x14, x15, [x0, #48] - str x16, [x0, #64] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end138: - .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L - - .globl mcl_fp_subNF9L - .align 2 - .type mcl_fp_subNF9L,@function -mcl_fp_subNF9L: // @mcl_fp_subNF9L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x11, x8, [x2, #56] - ldp x13, x9, [x1, #56] - ldp x15, x10, [x2, #40] - ldp x17, x12, [x1, #40] - ldp x4, x14, [x2, #24] - ldr x5, [x2] - ldp x2, x18, [x2, #8] - ldp x6, x7, [x1] - ldr x19, [x1, #16] - ldp x1, x16, [x1, #24] - subs x5, x6, x5 - sbcs x2, x7, x2 - sbcs x18, x19, x18 - ldp x19, x6, [x3, #56] - sbcs x1, x1, x4 - sbcs x14, x16, x14 - ldp x4, x7, [x3, #40] - sbcs x15, x17, x15 - sbcs x10, x12, x10 - ldp x12, x17, [x3] - sbcs x11, x13, x11 - sbcs x8, x9, x8 - asr x9, x8, #63 - extr x13, x9, x8, #63 - and x12, x13, x12 - ldr x13, [x3, #16] - ldp x3, x16, [x3, #24] - and x19, x9, x19 - and x6, x9, x6 - ror x9, x9, #63 - and x17, x9, x17 - and x13, x9, x13 - and x3, x9, x3 - and x16, x9, x16 - and x4, x9, x4 - and x9, x9, x7 - adds x12, x12, x5 - str x12, [x0] - adcs x12, x17, x2 - str x12, [x0, #8] - adcs x12, x13, x18 - str x12, [x0, #16] - adcs x12, x3, x1 - str x12, [x0, #24] - adcs x12, x16, x14 - str x12, [x0, #32] - adcs x12, x4, x15 - adcs x9, x9, x10 - stp x12, x9, [x0, #40] - adcs x9, x19, x11 - adcs x8, x6, x8 - stp x9, x8, [x0, #56] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end139: - .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L - - .globl mcl_fpDbl_add9L - .align 2 - .type mcl_fpDbl_add9L,@function -mcl_fpDbl_add9L: // @mcl_fpDbl_add9L -// BB#0: - stp x20, x19, [sp, #-16]! - ldp x10, x8, [x2, #128] - ldp x11, x9, [x1, #128] - ldp x12, x13, [x2, #112] - ldp x14, x15, [x1, #112] - ldp x16, x17, [x2, #96] - ldp x18, x4, [x2] - ldp x5, x6, [x1] - ldp x7, x19, [x2, #16] - adds x18, x18, x5 - adcs x4, x4, x6 - ldp x5, x6, [x1, #16] - str x18, [x0] - adcs x18, x7, x5 - ldp x5, x7, [x1, #96] - str x4, [x0, #8] - ldr x4, [x1, #32] - str x18, [x0, #16] - adcs x18, x19, x6 - ldp x6, x19, [x2, #32] - str x18, [x0, #24] - adcs x4, x6, x4 - ldp x18, x6, [x1, #40] - str x4, [x0, #32] - adcs x18, x19, x18 - ldp x4, x19, [x2, #48] - str x18, [x0, #40] - adcs x4, x4, x6 - ldp x18, x6, [x1, #56] - str x4, [x0, #48] - adcs x18, x19, x18 - ldp x4, x19, [x2, #64] - str x18, [x0, #56] - ldr x18, [x1, #72] - adcs x4, x4, x6 - ldp x6, x2, [x2, #80] - str x4, [x0, #64] - ldp x4, x1, [x1, #80] - adcs x18, x19, x18 - adcs x4, x6, x4 - adcs x1, x2, x1 - ldp x6, x19, [x3, #56] - adcs x16, x16, x5 - adcs x17, x17, x7 - ldp x7, x2, [x3, #40] - adcs x12, x12, x14 - adcs x13, x13, x15 - ldp x15, x5, [x3, #24] - adcs x10, x10, x11 - ldr x11, [x3] - ldp x3, x14, [x3, #8] - adcs x8, x8, x9 - adcs x9, xzr, xzr - subs x11, x18, x11 - sbcs x3, x4, x3 - sbcs x14, x1, x14 - sbcs x15, x16, x15 - sbcs x5, x17, x5 - sbcs x7, x12, x7 - sbcs x2, x13, x2 - sbcs x6, x10, x6 - sbcs x19, x8, x19 - sbcs x9, x9, xzr - tst x9, #0x1 - csel x9, x18, x11, ne - csel x11, x4, x3, ne - csel x14, x1, x14, ne - csel x15, x16, x15, ne - csel x16, x17, x5, ne - csel x12, x12, x7, ne - csel x13, x13, x2, ne - csel x10, x10, x6, ne - csel x8, x8, x19, ne - stp x9, x11, [x0, #72] - stp x14, x15, [x0, #88] - stp x16, x12, [x0, #104] - stp x13, x10, [x0, #120] - str x8, [x0, #136] - ldp x20, x19, [sp], #16 - ret -.Lfunc_end140: - .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L - - .globl mcl_fpDbl_sub9L - .align 2 - .type mcl_fpDbl_sub9L,@function -mcl_fpDbl_sub9L: // @mcl_fpDbl_sub9L -// BB#0: - ldp x10, x8, [x2, #128] - ldp x11, x9, [x1, #128] - ldp x14, x12, [x2, #112] - ldp x15, x13, [x1, #112] - ldp x16, x17, [x2] - ldp x18, x4, [x1] - ldp x5, x6, [x2, #96] - ldr x7, [x1, #16] - subs x16, x18, x16 - sbcs x17, x4, x17 - ldp x18, x4, [x2, #16] - str x16, [x0] - ldr x16, [x1, #24] - sbcs x18, x7, x18 - str x17, [x0, #8] - ldp x17, x7, [x2, #32] - str x18, [x0, #16] - sbcs x16, x16, x4 - ldp x18, x4, [x1, #32] - str x16, [x0, #24] - sbcs x16, x18, x17 - ldp x17, x18, [x2, #48] - str x16, [x0, #32] - sbcs x4, x4, x7 - ldp x16, x7, [x1, #48] - str x4, [x0, #40] - sbcs x16, x16, x17 - ldp x17, x4, [x2, #80] - str x16, [x0, #48] - ldr x16, [x1, #64] - sbcs x18, x7, x18 - ldp x7, x2, [x2, #64] - str x18, [x0, #56] - ldr x18, [x1, #72] - sbcs x16, x16, x7 - str x16, [x0, #64] - ldp x16, x7, [x1, #80] - sbcs x18, x18, x2 - ldp x2, x1, [x1, #96] - sbcs x16, x16, x17 - sbcs x4, x7, x4 - sbcs x2, x2, x5 - ldp x7, x17, [x3, #56] - sbcs x1, x1, x6 - sbcs x14, x15, x14 - ldp x6, x5, [x3, #40] - sbcs x12, x13, x12 - sbcs x10, x11, x10 - ldp x13, x15, [x3, #24] - sbcs x8, x9, x8 - ngcs x9, xzr - tst x9, #0x1 - ldr x9, [x3] - ldp x3, x11, [x3, #8] - csel x17, x17, xzr, ne - csel x7, x7, xzr, ne - csel x5, x5, xzr, ne - csel x6, x6, xzr, ne - csel x15, x15, xzr, ne - csel x13, x13, xzr, ne - csel x11, x11, xzr, ne - csel x3, x3, xzr, ne - csel x9, x9, xzr, ne - adds x9, x9, x18 - str x9, [x0, #72] - adcs x9, x3, x16 - str x9, [x0, #80] - adcs x9, x11, x4 - str x9, [x0, #88] - adcs x9, x13, x2 - str x9, [x0, #96] - adcs x9, x15, x1 - str x9, [x0, #104] - adcs x9, x6, x14 - str x9, [x0, #112] - adcs x9, x5, x12 - str x9, [x0, #120] - adcs x9, x7, x10 - adcs x8, x17, x8 - stp x9, x8, [x0, #128] - ret -.Lfunc_end141: - .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L - - - .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s b/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s deleted file mode 100644 index 2df9bfb92..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s +++ /dev/null @@ -1,84189 +0,0 @@ - .text - .syntax unified - .eabi_attribute 67, "2.09" @ Tag_conformance - .eabi_attribute 6, 1 @ Tag_CPU_arch - .eabi_attribute 8, 1 @ Tag_ARM_ISA_use - .eabi_attribute 15, 1 @ Tag_ABI_PCS_RW_data - .eabi_attribute 16, 1 @ Tag_ABI_PCS_RO_data - .eabi_attribute 17, 2 @ Tag_ABI_PCS_GOT_use - .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal - .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions - .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model - .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access - .eabi_attribute 24, 1 @ Tag_ABI_align_needed - .eabi_attribute 25, 1 @ Tag_ABI_align_preserved - .eabi_attribute 28, 1 @ Tag_ABI_VFP_args - .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format - .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use - .file "" - .globl makeNIST_P192L - .align 2 - .type makeNIST_P192L,%function -makeNIST_P192L: @ @makeNIST_P192L - .fnstart -@ BB#0: - mvn r1, #0 - mvn r2, #1 - str r1, [r0] - stmib r0, {r1, r2} - str r1, [r0, #12] - str r1, [r0, #16] - str r1, [r0, #20] - mov pc, lr -.Lfunc_end0: - .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L - .cantunwind - .fnend - - .globl mcl_fpDbl_mod_NIST_P192L - .align 2 - .type mcl_fpDbl_mod_NIST_P192L,%function -mcl_fpDbl_mod_NIST_P192L: @ @mcl_fpDbl_mod_NIST_P192L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #8 - sub sp, sp, #8 - add lr, r1, #24 - ldr r2, [r1, #40] - ldr r3, [r1, #44] - ldr r7, [r1, #16] - ldr r8, [r1, #20] - ldm lr, {r4, r5, r6, lr} - ldm r1, {r1, r9, r10, r12} - adds r11, r4, r1 - adcs r9, r5, r9 - adcs r10, r6, r10 - adcs r1, lr, r12 - str r1, [sp, #4] @ 4-byte Spill - adcs r1, r2, r7 - mov r7, #0 - str r1, [sp] @ 4-byte Spill - adcs r8, r3, r8 - mov r1, #0 - adcs r1, r1, #0 - adc r12, r7, #0 - ldr r7, [sp, #4] @ 4-byte Reload - adds r11, r11, r2 - adcs r9, r9, r3 - adcs r4, r10, r4 - adcs r5, r7, r5 - ldr r7, [sp] @ 4-byte Reload - adcs r6, r7, r6 - adcs r7, r8, lr - adcs r1, r1, #0 - adc r12, r12, #0 - adds lr, r4, r2 - adcs r3, r5, r3 - adcs r6, r6, #0 - adcs r7, r7, #0 - adcs r1, r1, #0 - adc r5, r12, #0 - adds r12, r1, r11 - adcs r11, r5, r9 - adcs r10, r1, lr - mov r1, #0 - adcs r8, r5, r3 - adcs lr, r6, #0 - adcs r2, r7, #0 - adc r9, r1, #0 - adds r7, r12, #1 - str r2, [sp, #4] @ 4-byte Spill - adcs r6, r11, #0 - adcs r3, r10, #1 - adcs r5, r8, #0 - adcs r1, lr, #0 - adcs r2, r2, #0 - sbc r4, r9, #0 - ands r4, r4, #1 - movne r7, r12 - movne r6, r11 - movne r3, r10 - cmp r4, #0 - movne r5, r8 - movne r1, lr - str r7, [r0] - str r6, [r0, #4] - str r3, [r0, #8] - str r5, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #4] @ 4-byte Reload - movne r2, r1 - str r2, [r0, #20] - add sp, sp, #8 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L - .cantunwind - .fnend - - .globl mcl_fp_sqr_NIST_P192L - .align 2 - .type mcl_fp_sqr_NIST_P192L,%function -mcl_fp_sqr_NIST_P192L: @ @mcl_fp_sqr_NIST_P192L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - mov r8, r0 - add r0, sp, #12 - bl mcl_fpDbl_sqrPre6L(PLT) - add r12, sp, #12 - ldr lr, [sp, #48] - ldr r2, [sp, #44] - ldr r3, [sp, #40] - mov r4, #0 - ldm r12, {r0, r1, r5, r6, r12} - ldr r7, [sp, #36] - adds r0, r7, r0 - str r0, [sp, #8] @ 4-byte Spill - adcs r0, r3, r1 - mov r1, #0 - adcs r10, r2, r5 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #52] - ldr r5, [sp, #32] - adcs r11, lr, r6 - ldr r6, [sp, #56] - adcs r9, r0, r12 - adcs r5, r6, r5 - adcs r1, r1, #0 - adc r12, r4, #0 - ldr r4, [sp, #8] @ 4-byte Reload - adds r4, r4, r0 - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [sp, #4] @ 4-byte Reload - adcs r4, r4, r6 - adcs r7, r10, r7 - adcs r3, r11, r3 - adcs r2, r9, r2 - adcs r5, r5, lr - adcs r1, r1, #0 - adc r12, r12, #0 - adds lr, r7, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r3, r3, r6 - adcs r2, r2, #0 - adcs r7, r5, #0 - adcs r1, r1, #0 - adc r6, r12, #0 - adds r5, r1, r0 - mov r0, #0 - adcs r11, r6, r4 - adcs r10, r1, lr - adcs r12, r6, r3 - adcs lr, r2, #0 - adcs r4, r7, #0 - adc r9, r0, #0 - adds r7, r5, #1 - str r4, [sp, #8] @ 4-byte Spill - adcs r2, r11, #0 - adcs r3, r10, #1 - adcs r6, r12, #0 - adcs r1, lr, #0 - adcs r0, r4, #0 - sbc r4, r9, #0 - ands r4, r4, #1 - movne r7, r5 - movne r2, r11 - movne r3, r10 - cmp r4, #0 - movne r6, r12 - movne r1, lr - str r7, [r8] - str r2, [r8, #4] - str r3, [r8, #8] - str r6, [r8, #12] - str r1, [r8, #16] - ldr r1, [sp, #8] @ 4-byte Reload - movne r0, r1 - str r0, [r8, #20] - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L - .cantunwind - .fnend - - .globl mcl_fp_mulNIST_P192L - .align 2 - .type mcl_fp_mulNIST_P192L,%function -mcl_fp_mulNIST_P192L: @ @mcl_fp_mulNIST_P192L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - mov r8, r0 - add r0, sp, #12 - bl mcl_fpDbl_mulPre6L(PLT) - add r12, sp, #12 - ldr lr, [sp, #48] - ldr r2, [sp, #44] - ldr r3, [sp, #40] - mov r4, #0 - ldm r12, {r0, r1, r5, r6, r12} - ldr r7, [sp, #36] - adds r0, r7, r0 - str r0, [sp, #8] @ 4-byte Spill - adcs r0, r3, r1 - mov r1, #0 - adcs r10, r2, r5 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #52] - ldr r5, [sp, #32] - adcs r11, lr, r6 - ldr r6, [sp, #56] - adcs r9, r0, r12 - adcs r5, r6, r5 - adcs r1, r1, #0 - adc r12, r4, #0 - ldr r4, [sp, #8] @ 4-byte Reload - adds r4, r4, r0 - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [sp, #4] @ 4-byte Reload - adcs r4, r4, r6 - adcs r7, r10, r7 - adcs r3, r11, r3 - adcs r2, r9, r2 - adcs r5, r5, lr - adcs r1, r1, #0 - adc r12, r12, #0 - adds lr, r7, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r3, r3, r6 - adcs r2, r2, #0 - adcs r7, r5, #0 - adcs r1, r1, #0 - adc r6, r12, #0 - adds r5, r1, r0 - mov r0, #0 - adcs r11, r6, r4 - adcs r10, r1, lr - adcs r12, r6, r3 - adcs lr, r2, #0 - adcs r4, r7, #0 - adc r9, r0, #0 - adds r7, r5, #1 - str r4, [sp, #8] @ 4-byte Spill - adcs r2, r11, #0 - adcs r3, r10, #1 - adcs r6, r12, #0 - adcs r1, lr, #0 - adcs r0, r4, #0 - sbc r4, r9, #0 - ands r4, r4, #1 - movne r7, r5 - movne r2, r11 - movne r3, r10 - cmp r4, #0 - movne r6, r12 - movne r1, lr - str r7, [r8] - str r2, [r8, #4] - str r3, [r8, #8] - str r6, [r8, #12] - str r1, [r8, #16] - ldr r1, [sp, #8] @ 4-byte Reload - movne r0, r1 - str r0, [r8, #20] - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end3: - .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L - .cantunwind - .fnend - - .globl mcl_fpDbl_mod_NIST_P521L - .align 2 - .type mcl_fpDbl_mod_NIST_P521L,%function -mcl_fpDbl_mod_NIST_P521L: @ @mcl_fpDbl_mod_NIST_P521L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldr r6, [r1, #64] - mov r5, #255 - ldr r3, [r1, #72] - ldr r2, [r1, #76] - mov r9, r0 - orr r5, r5, #256 - and r5, r6, r5 - lsr r6, r6, #9 - lsr r7, r3, #9 - str r5, [sp, #40] @ 4-byte Spill - ldr r5, [r1, #68] - orr r12, r7, r2, lsl #23 - lsr r2, r2, #9 - lsr r4, r5, #9 - orr r6, r6, r5, lsl #23 - ldr r5, [r1] - orr r3, r4, r3, lsl #23 - ldmib r1, {r4, r7, lr} - adds r5, r6, r5 - ldr r6, [r1, #36] - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [r1, #80] - adcs r3, r3, r4 - str r3, [sp, #32] @ 4-byte Spill - adcs r7, r12, r7 - ldr r3, [r1, #84] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #88] - orr r2, r2, r5, lsl #23 - lsr r5, r5, #9 - adcs r12, r2, lr - ldr r2, [r1, #16] - orr r4, r5, r3, lsl #23 - lsr r3, r3, #9 - orr r3, r3, r7, lsl #23 - lsr r5, r7, #9 - ldr r7, [r1, #40] - adcs r2, r4, r2 - ldr r4, [r1, #24] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r1, #20] - adcs r2, r3, r2 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r1, #92] - orr r3, r5, r2, lsl #23 - ldr r5, [r1, #28] - lsr r2, r2, #9 - adcs lr, r3, r4 - ldr r3, [r1, #96] - ldr r4, [r1, #44] - orr r2, r2, r3, lsl #23 - adcs r2, r2, r5 - ldr r5, [r1, #32] - str r2, [sp, #16] @ 4-byte Spill - lsr r2, r3, #9 - ldr r3, [r1, #100] - orr r2, r2, r3, lsl #23 - adcs r2, r2, r5 - ldr r5, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - lsr r2, r3, #9 - ldr r3, [r1, #104] - orr r2, r2, r3, lsl #23 - adcs r0, r2, r6 - lsr r2, r3, #9 - ldr r3, [r1, #108] - ldr r6, [r1, #52] - str r0, [sp, #8] @ 4-byte Spill - orr r2, r2, r3, lsl #23 - adcs r7, r2, r7 - lsr r2, r3, #9 - ldr r3, [r1, #112] - orr r2, r2, r3, lsl #23 - lsr r3, r3, #9 - adcs r2, r2, r4 - ldr r4, [r1, #116] - orr r3, r3, r4, lsl #23 - lsr r4, r4, #9 - adcs r3, r3, r5 - ldr r5, [r1, #120] - orr r4, r4, r5, lsl #23 - adcs r11, r4, r6 - lsr r4, r5, #9 - ldr r5, [r1, #124] - ldr r6, [r1, #56] - orr r4, r4, r5, lsl #23 - adcs r10, r4, r6 - lsr r4, r5, #9 - ldr r5, [r1, #128] - ldr r1, [r1, #60] - orr r4, r4, r5, lsl #23 - adcs r8, r4, r1 - ldr r4, [sp, #40] @ 4-byte Reload - lsr r1, r5, #9 - ldr r5, [sp, #36] @ 4-byte Reload - adc r1, r1, r4 - mov r4, #1 - and r4, r4, r1, lsr #9 - adds r5, r4, r5 - ldr r4, [sp, #32] @ 4-byte Reload - str r5, [sp, #40] @ 4-byte Spill - adcs r6, r4, #0 - ldr r4, [sp, #28] @ 4-byte Reload - str r6, [sp, #36] @ 4-byte Spill - adcs r0, r4, #0 - and r4, r6, r5 - ldr r5, [sp, #24] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - and r4, r4, r0 - adcs r0, r12, #0 - str r0, [sp, #28] @ 4-byte Spill - and r6, r4, r0 - adcs r0, r5, #0 - and r4, r6, r0 - ldr r6, [sp, #20] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r6, #0 - ldr r6, [sp, #16] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - and r5, r4, r0 - adcs r0, lr, #0 - and r5, r5, r0 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs lr, r6, #0 - and r6, r5, lr - ldr r5, [sp, #12] @ 4-byte Reload - adcs r5, r5, #0 - and r12, r6, r5 - adcs r6, r0, #0 - adcs r7, r7, #0 - and r4, r12, r6 - adcs r2, r2, #0 - and r4, r4, r7 - adcs r3, r3, #0 - and r4, r4, r2 - adcs r0, r11, #0 - and r4, r4, r3 - adcs r10, r10, #0 - and r4, r4, r0 - adcs r11, r8, #0 - and r4, r4, r10 - adc r8, r1, #0 - ldr r1, .LCPI4_0 - and r4, r4, r11 - orr r1, r8, r1 - and r1, r4, r1 - cmn r1, #1 - beq .LBB4_2 -@ BB#1: @ %nonzero - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r9] - ldr r1, [sp, #36] @ 4-byte Reload - str r1, [r9, #4] - ldr r1, [sp, #32] @ 4-byte Reload - str r1, [r9, #8] - ldr r1, [sp, #28] @ 4-byte Reload - str r1, [r9, #12] - ldr r1, [sp, #24] @ 4-byte Reload - str r1, [r9, #16] - ldr r1, [sp, #20] @ 4-byte Reload - str r1, [r9, #20] - ldr r1, [sp, #4] @ 4-byte Reload - str r1, [r9, #24] - add r1, r9, #32 - str lr, [r9, #28] - stm r1, {r5, r6, r7} - add r1, r9, #52 - str r2, [r9, #44] - str r3, [r9, #48] - stm r1, {r0, r10, r11} - mov r1, #255 - orr r1, r1, #256 - and r1, r8, r1 - str r1, [r9, #64] - b .LBB4_3 -.LBB4_2: @ %zero - mov r0, r9 - mov r1, #0 - mov r2, #68 - bl memset(PLT) -.LBB4_3: @ %zero - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr - .align 2 -@ BB#4: -.LCPI4_0: - .long 4294966784 @ 0xfffffe00 -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre1L - .align 2 - .type mcl_fp_mulUnitPre1L,%function -mcl_fp_mulUnitPre1L: @ @mcl_fp_mulUnitPre1L - .fnstart -@ BB#0: - ldr r1, [r1] - umull r3, r12, r1, r2 - stm r0, {r3, r12} - mov pc, lr -.Lfunc_end5: - .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre1L - .align 2 - .type mcl_fpDbl_mulPre1L,%function -mcl_fpDbl_mulPre1L: @ @mcl_fpDbl_mulPre1L - .fnstart -@ BB#0: - ldr r1, [r1] - ldr r2, [r2] - umull r3, r12, r2, r1 - stm r0, {r3, r12} - mov pc, lr -.Lfunc_end6: - .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre1L - .align 2 - .type mcl_fpDbl_sqrPre1L,%function -mcl_fpDbl_sqrPre1L: @ @mcl_fpDbl_sqrPre1L - .fnstart -@ BB#0: - ldr r1, [r1] - umull r2, r3, r1, r1 - stm r0, {r2, r3} - mov pc, lr -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L - .cantunwind - .fnend - - .globl mcl_fp_mont1L - .align 2 - .type mcl_fp_mont1L,%function -mcl_fp_mont1L: @ @mcl_fp_mont1L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldr r12, [r2] - ldr r1, [r1] - mov r6, #0 - umull lr, r2, r1, r12 - ldr r12, [r3, #-4] - ldr r3, [r3] - mul r1, lr, r12 - umull r12, r4, r1, r3 - adds r5, r12, lr - adcs r5, r4, r2 - umlal lr, r2, r1, r3 - adc r6, r6, #0 - subs r1, r2, r3 - sbc r3, r6, #0 - tst r3, #1 - movne r1, r2 - str r1, [r0] - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end8: - .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L - .cantunwind - .fnend - - .globl mcl_fp_montNF1L - .align 2 - .type mcl_fp_montNF1L,%function -mcl_fp_montNF1L: @ @mcl_fp_montNF1L - .fnstart -@ BB#0: - .save {r11, lr} - push {r11, lr} - ldr r12, [r2] - ldr r1, [r1] - umull lr, r2, r1, r12 - ldr r12, [r3, #-4] - ldr r3, [r3] - mul r1, lr, r12 - umlal lr, r2, r1, r3 - sub r1, r2, r3 - cmp r1, #0 - movge r2, r1 - str r2, [r0] - pop {r11, lr} - mov pc, lr -.Lfunc_end9: - .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L - .cantunwind - .fnend - - .globl mcl_fp_montRed1L - .align 2 - .type mcl_fp_montRed1L,%function -mcl_fp_montRed1L: @ @mcl_fp_montRed1L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldr r12, [r2, #-4] - ldr r3, [r1] - ldr r2, [r2] - ldr r1, [r1, #4] - mov r6, #0 - mul lr, r3, r12 - umull r12, r4, lr, r2 - adds r5, r3, r12 - adcs r5, r1, r4 - umlal r3, r1, lr, r2 - adc r6, r6, #0 - subs r2, r1, r2 - sbc r3, r6, #0 - tst r3, #1 - movne r2, r1 - str r2, [r0] - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end10: - .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L - .cantunwind - .fnend - - .globl mcl_fp_addPre1L - .align 2 - .type mcl_fp_addPre1L,%function -mcl_fp_addPre1L: @ @mcl_fp_addPre1L - .fnstart -@ BB#0: - ldr r1, [r1] - ldr r2, [r2] - adds r1, r2, r1 - str r1, [r0] - mov r0, #0 - adc r0, r0, #0 - mov pc, lr -.Lfunc_end11: - .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L - .cantunwind - .fnend - - .globl mcl_fp_subPre1L - .align 2 - .type mcl_fp_subPre1L,%function -mcl_fp_subPre1L: @ @mcl_fp_subPre1L - .fnstart -@ BB#0: - ldr r2, [r2] - ldr r1, [r1] - subs r1, r1, r2 - str r1, [r0] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - mov pc, lr -.Lfunc_end12: - .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L - .cantunwind - .fnend - - .globl mcl_fp_shr1_1L - .align 2 - .type mcl_fp_shr1_1L,%function -mcl_fp_shr1_1L: @ @mcl_fp_shr1_1L - .fnstart -@ BB#0: - ldr r1, [r1] - lsr r1, r1, #1 - str r1, [r0] - mov pc, lr -.Lfunc_end13: - .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L - .cantunwind - .fnend - - .globl mcl_fp_add1L - .align 2 - .type mcl_fp_add1L,%function -mcl_fp_add1L: @ @mcl_fp_add1L - .fnstart -@ BB#0: - ldr r1, [r1] - ldr r2, [r2] - ldr r3, [r3] - adds r1, r2, r1 - mov r2, #0 - str r1, [r0] - adc r2, r2, #0 - subs r1, r1, r3 - sbc r2, r2, #0 - tst r2, #1 - streq r1, [r0] - mov pc, lr -.Lfunc_end14: - .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L - .cantunwind - .fnend - - .globl mcl_fp_addNF1L - .align 2 - .type mcl_fp_addNF1L,%function -mcl_fp_addNF1L: @ @mcl_fp_addNF1L - .fnstart -@ BB#0: - ldr r1, [r1] - ldr r2, [r2] - add r1, r2, r1 - ldr r2, [r3] - sub r2, r1, r2 - cmp r2, #0 - movlt r2, r1 - str r2, [r0] - mov pc, lr -.Lfunc_end15: - .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L - .cantunwind - .fnend - - .globl mcl_fp_sub1L - .align 2 - .type mcl_fp_sub1L,%function -mcl_fp_sub1L: @ @mcl_fp_sub1L - .fnstart -@ BB#0: - ldr r2, [r2] - ldr r1, [r1] - subs r1, r1, r2 - mov r2, #0 - sbc r2, r2, #0 - str r1, [r0] - tst r2, #1 - ldrne r2, [r3] - addne r1, r2, r1 - strne r1, [r0] - movne pc, lr - mov pc, lr -.Lfunc_end16: - .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L - .cantunwind - .fnend - - .globl mcl_fp_subNF1L - .align 2 - .type mcl_fp_subNF1L,%function -mcl_fp_subNF1L: @ @mcl_fp_subNF1L - .fnstart -@ BB#0: - ldr r2, [r2] - ldr r1, [r1] - sub r1, r1, r2 - ldr r2, [r3] - cmp r1, #0 - addlt r1, r1, r2 - str r1, [r0] - mov pc, lr -.Lfunc_end17: - .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L - .cantunwind - .fnend - - .globl mcl_fpDbl_add1L - .align 2 - .type mcl_fpDbl_add1L,%function -mcl_fpDbl_add1L: @ @mcl_fpDbl_add1L - .fnstart -@ BB#0: - .save {r11, lr} - push {r11, lr} - ldm r1, {r12, lr} - ldm r2, {r1, r2} - ldr r3, [r3] - adds r1, r1, r12 - str r1, [r0] - mov r1, #0 - adcs r2, r2, lr - adc r1, r1, #0 - subs r3, r2, r3 - sbc r1, r1, #0 - tst r1, #1 - movne r3, r2 - str r3, [r0, #4] - pop {r11, lr} - mov pc, lr -.Lfunc_end18: - .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub1L - .align 2 - .type mcl_fpDbl_sub1L,%function -mcl_fpDbl_sub1L: @ @mcl_fpDbl_sub1L - .fnstart -@ BB#0: - .save {r11, lr} - push {r11, lr} - ldm r2, {r12, lr} - ldr r2, [r1] - ldr r1, [r1, #4] - ldr r3, [r3] - subs r2, r2, r12 - str r2, [r0] - mov r2, #0 - sbcs r1, r1, lr - sbc r2, r2, #0 - tst r2, #1 - addne r1, r1, r3 - str r1, [r0, #4] - pop {r11, lr} - mov pc, lr -.Lfunc_end19: - .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre2L - .align 2 - .type mcl_fp_mulUnitPre2L,%function -mcl_fp_mulUnitPre2L: @ @mcl_fp_mulUnitPre2L - .fnstart -@ BB#0: - .save {r11, lr} - push {r11, lr} - ldm r1, {r3, lr} - umull r12, r1, r3, r2 - mov r3, #0 - umlal r1, r3, lr, r2 - str r12, [r0] - stmib r0, {r1, r3} - pop {r11, lr} - mov pc, lr -.Lfunc_end20: - .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre2L - .align 2 - .type mcl_fpDbl_mulPre2L,%function -mcl_fpDbl_mulPre2L: @ @mcl_fpDbl_mulPre2L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldr r3, [r2] - ldm r1, {r12, lr} - ldr r2, [r2, #4] - mov r5, #0 - umull r1, r4, r12, r3 - umlal r4, r5, lr, r3 - umull r3, r6, r12, r2 - str r1, [r0] - mov r1, #0 - adds r3, r3, r4 - str r3, [r0, #4] - umull r3, r4, lr, r2 - adcs r2, r3, r5 - adc r1, r1, #0 - adds r2, r2, r6 - adc r1, r1, r4 - str r2, [r0, #8] - str r1, [r0, #12] - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end21: - .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre2L - .align 2 - .type mcl_fpDbl_sqrPre2L,%function -mcl_fpDbl_sqrPre2L: @ @mcl_fpDbl_sqrPre2L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldr r2, [r1] - ldr r1, [r1, #4] - mov r4, #0 - mov lr, #0 - umull r12, r3, r2, r2 - umull r5, r6, r1, r2 - umlal r3, r4, r1, r2 - str r12, [r0] - adds r2, r3, r5 - umull r3, r5, r1, r1 - adcs r1, r4, r3 - str r2, [r0, #4] - adc r3, lr, #0 - adds r1, r1, r6 - adc r3, r3, r5 - str r1, [r0, #8] - str r3, [r0, #12] - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L - .cantunwind - .fnend - - .globl mcl_fp_mont2L - .align 2 - .type mcl_fp_mont2L,%function -mcl_fp_mont2L: @ @mcl_fp_mont2L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r1, {r12, lr} - ldm r2, {r1, r2} - mov r7, #0 - mov r5, #0 - mov r6, #0 - umull r8, r9, r2, r12 - umull r11, r4, r12, r1 - umlal r9, r7, r2, lr - umlal r4, r5, lr, r1 - ldmda r3, {r12, lr} - ldr r10, [r3, #4] - mul r1, r11, r12 - umull r3, r2, r1, lr - adds r3, r3, r11 - mov r3, #0 - umlal r2, r3, r1, r10 - adcs r1, r2, r4 - adcs r2, r3, r5 - adc r3, r6, #0 - adds r1, r1, r8 - adcs r8, r2, r9 - mul r5, r1, r12 - adcs r3, r3, r7 - umull r7, r2, r5, lr - adc r4, r6, #0 - umlal r2, r6, r5, r10 - adds r1, r7, r1 - adcs r1, r2, r8 - adcs r2, r6, r3 - adc r3, r4, #0 - subs r7, r1, lr - sbcs r6, r2, r10 - sbc r3, r3, #0 - ands r3, r3, #1 - movne r7, r1 - movne r6, r2 - str r7, [r0] - str r6, [r0, #4] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end23: - .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L - .cantunwind - .fnend - - .globl mcl_fp_montNF2L - .align 2 - .type mcl_fp_montNF2L,%function -mcl_fp_montNF2L: @ @mcl_fp_montNF2L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r2, {r12, lr} - ldr r11, [r1] - ldr r8, [r3, #-4] - ldr r7, [r3] - ldr r9, [r1, #4] - ldr r3, [r3, #4] - umull r4, r5, r11, r12 - mul r6, r4, r8 - umull r1, r10, r6, r7 - adds r1, r1, r4 - mov r4, #0 - umlal r5, r4, r9, r12 - umull r2, r12, r6, r3 - mov r1, #0 - adcs r2, r2, r5 - adc r4, r4, #0 - adds r2, r2, r10 - adc r6, r4, r12 - umull r5, r4, lr, r11 - adds r2, r5, r2 - umlal r4, r1, lr, r9 - adcs r9, r4, r6 - mul r5, r2, r8 - adc lr, r1, #0 - umull r1, r6, r5, r7 - umull r4, r12, r5, r3 - adds r1, r1, r2 - adcs r1, r4, r9 - adc r2, lr, #0 - adds r1, r1, r6 - adc r2, r2, r12 - subs r7, r1, r7 - sbc r3, r2, r3 - cmp r3, #0 - movlt r7, r1 - movlt r3, r2 - str r7, [r0] - str r3, [r0, #4] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end24: - .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L - .cantunwind - .fnend - - .globl mcl_fp_montRed2L - .align 2 - .type mcl_fp_montRed2L,%function -mcl_fp_montRed2L: @ @mcl_fp_montRed2L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldr r12, [r2, #-4] - ldm r2, {r3, lr} - ldm r1, {r2, r9, r10} - ldr r8, [r1, #12] - mov r5, #0 - mov r7, #0 - mul r6, r2, r12 - umull r1, r4, r6, r3 - umlal r4, r5, r6, lr - adds r1, r2, r1 - adcs r1, r9, r4 - adcs r9, r10, r5 - mul r6, r1, r12 - adcs r8, r8, #0 - umull r2, r4, r6, r3 - adc r5, r7, #0 - umlal r4, r7, r6, lr - adds r1, r2, r1 - adcs r1, r4, r9 - adcs r2, r7, r8 - adc r7, r5, #0 - subs r3, r1, r3 - sbcs r6, r2, lr - sbc r7, r7, #0 - ands r7, r7, #1 - movne r3, r1 - movne r6, r2 - stm r0, {r3, r6} - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end25: - .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L - .cantunwind - .fnend - - .globl mcl_fp_addPre2L - .align 2 - .type mcl_fp_addPre2L,%function -mcl_fp_addPre2L: @ @mcl_fp_addPre2L - .fnstart -@ BB#0: - ldm r1, {r3, r12} - ldm r2, {r1, r2} - adds r1, r1, r3 - adcs r2, r2, r12 - stm r0, {r1, r2} - mov r0, #0 - adc r0, r0, #0 - mov pc, lr -.Lfunc_end26: - .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L - .cantunwind - .fnend - - .globl mcl_fp_subPre2L - .align 2 - .type mcl_fp_subPre2L,%function -mcl_fp_subPre2L: @ @mcl_fp_subPre2L - .fnstart -@ BB#0: - ldm r2, {r3, r12} - ldr r2, [r1] - ldr r1, [r1, #4] - subs r2, r2, r3 - sbcs r1, r1, r12 - str r2, [r0] - str r1, [r0, #4] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - mov pc, lr -.Lfunc_end27: - .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L - .cantunwind - .fnend - - .globl mcl_fp_shr1_2L - .align 2 - .type mcl_fp_shr1_2L,%function -mcl_fp_shr1_2L: @ @mcl_fp_shr1_2L - .fnstart -@ BB#0: - ldr r2, [r1] - ldr r1, [r1, #4] - lsrs r3, r1, #1 - lsr r1, r1, #1 - rrx r2, r2 - str r2, [r0] - str r1, [r0, #4] - mov pc, lr -.Lfunc_end28: - .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L - .cantunwind - .fnend - - .globl mcl_fp_add2L - .align 2 - .type mcl_fp_add2L,%function -mcl_fp_add2L: @ @mcl_fp_add2L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r1, {r12, lr} - ldm r2, {r1, r2} - adds r12, r1, r12 - mov r1, #0 - adcs r2, r2, lr - str r12, [r0] - str r2, [r0, #4] - adc lr, r1, #0 - ldm r3, {r1, r4} - subs r3, r12, r1 - sbcs r2, r2, r4 - sbc r1, lr, #0 - tst r1, #1 - streq r3, [r0] - streq r2, [r0, #4] - pop {r4, lr} - mov pc, lr -.Lfunc_end29: - .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L - .cantunwind - .fnend - - .globl mcl_fp_addNF2L - .align 2 - .type mcl_fp_addNF2L,%function -mcl_fp_addNF2L: @ @mcl_fp_addNF2L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r1, {r12, lr} - ldm r2, {r1, r2} - adds r1, r1, r12 - adc r4, r2, lr - ldm r3, {r12, lr} - subs r3, r1, r12 - sbc r2, r4, lr - cmp r2, #0 - movlt r3, r1 - movlt r2, r4 - str r3, [r0] - str r2, [r0, #4] - pop {r4, lr} - mov pc, lr -.Lfunc_end30: - .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L - .cantunwind - .fnend - - .globl mcl_fp_sub2L - .align 2 - .type mcl_fp_sub2L,%function -mcl_fp_sub2L: @ @mcl_fp_sub2L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r2, {r12, lr} - ldm r1, {r2, r4} - subs r1, r2, r12 - sbcs r2, r4, lr - mov r4, #0 - sbc r4, r4, #0 - stm r0, {r1, r2} - tst r4, #1 - popeq {r4, lr} - moveq pc, lr - ldr r4, [r3] - ldr r3, [r3, #4] - adds r1, r4, r1 - adc r2, r3, r2 - stm r0, {r1, r2} - pop {r4, lr} - mov pc, lr -.Lfunc_end31: - .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L - .cantunwind - .fnend - - .globl mcl_fp_subNF2L - .align 2 - .type mcl_fp_subNF2L,%function -mcl_fp_subNF2L: @ @mcl_fp_subNF2L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r2, {r12, lr} - ldr r2, [r1] - ldr r1, [r1, #4] - subs r4, r2, r12 - sbc r1, r1, lr - ldm r3, {r12, lr} - adds r3, r4, r12 - adc r2, r1, lr - cmp r1, #0 - movge r3, r4 - movge r2, r1 - str r3, [r0] - str r2, [r0, #4] - pop {r4, lr} - mov pc, lr -.Lfunc_end32: - .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L - .cantunwind - .fnend - - .globl mcl_fpDbl_add2L - .align 2 - .type mcl_fpDbl_add2L,%function -mcl_fpDbl_add2L: @ @mcl_fpDbl_add2L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldm r1, {r12, lr} - ldr r4, [r1, #8] - ldr r1, [r1, #12] - ldm r2, {r5, r6, r7} - ldr r2, [r2, #12] - adds r5, r5, r12 - adcs r6, r6, lr - str r5, [r0] - adcs r7, r7, r4 - str r6, [r0, #4] - mov r6, #0 - adcs r1, r2, r1 - adc r2, r6, #0 - ldr r6, [r3] - ldr r3, [r3, #4] - subs r6, r7, r6 - sbcs r3, r1, r3 - sbc r2, r2, #0 - ands r2, r2, #1 - movne r6, r7 - movne r3, r1 - str r6, [r0, #8] - str r3, [r0, #12] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end33: - .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub2L - .align 2 - .type mcl_fpDbl_sub2L,%function -mcl_fpDbl_sub2L: @ @mcl_fpDbl_sub2L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldm r2, {r12, lr} - ldr r4, [r2, #8] - ldr r2, [r2, #12] - ldm r1, {r5, r6, r7} - ldr r1, [r1, #12] - subs r5, r5, r12 - sbcs r6, r6, lr - str r5, [r0] - sbcs r7, r7, r4 - str r6, [r0, #4] - mov r6, #0 - sbcs r1, r1, r2 - sbc r2, r6, #0 - ldr r6, [r3] - ldr r3, [r3, #4] - adds r6, r7, r6 - adc r3, r1, r3 - ands r2, r2, #1 - moveq r6, r7 - moveq r3, r1 - str r6, [r0, #8] - str r3, [r0, #12] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end34: - .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre3L - .align 2 - .type mcl_fp_mulUnitPre3L,%function -mcl_fp_mulUnitPre3L: @ @mcl_fp_mulUnitPre3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldr r12, [r1] - ldmib r1, {r3, r5} - umull lr, r4, r12, r2 - umull r1, r12, r5, r2 - umull r7, r8, r3, r2 - mov r5, r1 - mov r6, r4 - str lr, [r0] - umlal r6, r5, r3, r2 - adds r2, r4, r7 - adcs r1, r8, r1 - str r6, [r0, #4] - str r5, [r0, #8] - adc r1, r12, #0 - str r1, [r0, #12] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end35: - .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre3L - .align 2 - .type mcl_fpDbl_mulPre3L,%function -mcl_fpDbl_mulPre3L: @ @mcl_fpDbl_mulPre3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldr r3, [r2] - ldm r1, {r12, lr} - ldr r1, [r1, #8] - umull r4, r5, r12, r3 - str r4, [r0] - umull r4, r6, lr, r3 - adds r4, r5, r4 - umull r7, r4, r1, r3 - adcs r6, r6, r7 - umlal r5, r7, lr, r3 - ldr r3, [r2, #4] - ldr r2, [r2, #8] - adc r8, r4, #0 - umull r6, r10, r12, r3 - adds r9, r6, r5 - umull r6, r5, lr, r3 - adcs r6, r6, r7 - umull r7, r4, r1, r3 - str r9, [r0, #4] - adcs r3, r7, r8 - mov r8, #0 - adc r7, r8, #0 - adds r6, r6, r10 - adcs r11, r3, r5 - umull r5, r9, r1, r2 - umull r1, r10, lr, r2 - adc r4, r7, r4 - umull r7, r3, r12, r2 - adds r2, r6, r7 - adcs r1, r11, r1 - str r2, [r0, #8] - adcs r2, r4, r5 - adc r7, r8, #0 - adds r1, r1, r3 - str r1, [r0, #12] - adcs r1, r2, r10 - str r1, [r0, #16] - adc r1, r7, r9 - str r1, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end36: - .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre3L - .align 2 - .type mcl_fpDbl_sqrPre3L,%function -mcl_fpDbl_sqrPre3L: @ @mcl_fpDbl_sqrPre3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldm r1, {r2, r3, r12} - mov r10, #0 - umull r1, lr, r2, r2 - umull r7, r4, r3, r2 - str r1, [r0] - umull r1, r8, r12, r2 - mov r5, lr - mov r6, r1 - umlal r5, r6, r3, r2 - adds r2, lr, r7 - adcs r2, r4, r1 - adc r2, r8, #0 - adds lr, r5, r7 - umull r5, r9, r3, r3 - adcs r5, r6, r5 - umull r6, r7, r12, r3 - str lr, [r0, #4] - adcs r2, r2, r6 - adc r3, r10, #0 - adds r4, r5, r4 - adcs r2, r2, r9 - adc r3, r3, r7 - adds r1, r4, r1 - umull r5, r4, r12, r12 - str r1, [r0, #8] - adcs r1, r2, r6 - adcs r2, r3, r5 - adc r3, r10, #0 - adds r1, r1, r8 - str r1, [r0, #12] - adcs r1, r2, r7 - str r1, [r0, #16] - adc r1, r3, r4 - str r1, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L - .cantunwind - .fnend - - .globl mcl_fp_mont3L - .align 2 - .type mcl_fp_mont3L,%function -mcl_fp_mont3L: @ @mcl_fp_mont3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - str r0, [sp, #24] @ 4-byte Spill - ldm r2, {r8, lr} - ldr r0, [r2, #8] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1] - str r0, [sp, #36] @ 4-byte Spill - ldmib r1, {r4, r9} - ldr r2, [r3, #-4] - umull r7, r6, r0, r8 - ldr r0, [r3] - ldr r1, [r3, #8] - ldr r10, [r3, #4] - str r7, [sp, #12] @ 4-byte Spill - mul r5, r7, r2 - str r2, [sp, #16] @ 4-byte Spill - str r9, [sp, #32] @ 4-byte Spill - str r0, [sp, #40] @ 4-byte Spill - str r1, [sp, #28] @ 4-byte Spill - umull r12, r2, r5, r1 - umull r1, r3, r5, r0 - umull r0, r7, r9, r8 - umull r11, r9, r4, r8 - str r7, [sp] @ 4-byte Spill - adds r7, r6, r11 - str r1, [sp, #8] @ 4-byte Spill - mov r1, r3 - str r2, [sp, #4] @ 4-byte Spill - mov r2, r12 - adcs r7, r9, r0 - umlal r1, r2, r5, r10 - umlal r6, r0, r4, r8 - mov r8, #0 - ldr r7, [sp] @ 4-byte Reload - adc r9, r7, #0 - umull r7, r11, r5, r10 - ldr r5, [sp, #8] @ 4-byte Reload - adds r3, r3, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r3, r11, r12 - ldr r3, [sp, #4] @ 4-byte Reload - adc r3, r3, #0 - adds r7, r5, r7 - adcs r11, r1, r6 - adcs r12, r2, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r9, r3, r9 - ldr r3, [sp, #36] @ 4-byte Reload - adc r8, r8, #0 - umull r6, r7, lr, r0 - umull r5, r0, lr, r4 - umull r1, r2, lr, r3 - adds r5, r2, r5 - adcs r0, r0, r6 - umlal r2, r6, lr, r4 - adc r0, r7, #0 - adds r1, r11, r1 - ldr r11, [sp, #16] @ 4-byte Reload - adcs r2, r12, r2 - ldr r12, [sp, #28] @ 4-byte Reload - str r2, [sp, #12] @ 4-byte Spill - adcs r2, r9, r6 - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #4] @ 4-byte Spill - mov r0, #0 - mul r6, r1, r11 - adc r0, r0, #0 - umull r7, r9, r6, r12 - str r0, [sp] @ 4-byte Spill - mov r5, r7 - umull r8, r0, r6, r2 - umull lr, r2, r6, r10 - mov r3, r0 - adds r0, r0, lr - ldr lr, [sp, #36] @ 4-byte Reload - adcs r0, r2, r7 - umlal r3, r5, r6, r10 - adc r0, r9, #0 - adds r1, r8, r1 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r1, r3, r1 - ldr r3, [sp, #20] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - adcs r8, r5, r1 - ldr r1, [sp, #4] @ 4-byte Reload - adcs r9, r0, r1 - ldr r0, [sp] @ 4-byte Reload - umull r1, r2, r3, lr - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - umull r6, r7, r3, r0 - umull r5, r0, r3, r4 - adds r5, r2, r5 - adcs r0, r0, r6 - umlal r2, r6, r3, r4 - ldr r3, [sp, #12] @ 4-byte Reload - adc r0, r7, #0 - adds r1, r3, r1 - adcs r2, r8, r2 - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #8] @ 4-byte Reload - adcs r9, r9, r6 - mul r6, r1, r11 - umull r7, r4, r6, r12 - ldr r12, [sp, #40] @ 4-byte Reload - mov r5, r7 - adcs r0, r2, r0 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - umull r11, r3, r6, r12 - adc r8, r0, #0 - umull r0, lr, r6, r10 - mov r2, r3 - adds r0, r3, r0 - ldr r3, [sp, #32] @ 4-byte Reload - umlal r2, r5, r6, r10 - adcs r0, lr, r7 - adc r0, r4, #0 - adds r1, r11, r1 - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r2, r1 - adcs r2, r5, r9 - ldr r5, [sp, #28] @ 4-byte Reload - adcs r0, r0, r3 - adc r3, r8, #0 - subs r7, r1, r12 - sbcs r6, r2, r10 - sbcs r5, r0, r5 - sbc r3, r3, #0 - ands r3, r3, #1 - movne r5, r0 - ldr r0, [sp, #24] @ 4-byte Reload - movne r7, r1 - movne r6, r2 - str r7, [r0] - str r6, [r0, #4] - str r5, [r0, #8] - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end38: - .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L - .cantunwind - .fnend - - .globl mcl_fp_montNF3L - .align 2 - .type mcl_fp_montNF3L,%function -mcl_fp_montNF3L: @ @mcl_fp_montNF3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - str r0, [sp, #64] @ 4-byte Spill - ldr r8, [r1] - ldmib r1, {r6, r9} - ldm r2, {r4, r7} - ldr r0, [r2, #8] - mov r10, r3 - umull r3, r1, r0, r9 - str r1, [sp, #52] @ 4-byte Spill - umull r1, r2, r0, r8 - str r3, [sp, #44] @ 4-byte Spill - str r1, [sp, #48] @ 4-byte Spill - str r2, [sp, #40] @ 4-byte Spill - mov r1, r2 - mov r2, r3 - umull r3, r5, r0, r6 - umlal r1, r2, r0, r6 - str r3, [sp, #32] @ 4-byte Spill - umull r3, r0, r7, r6 - str r5, [sp, #36] @ 4-byte Spill - str r1, [sp, #56] @ 4-byte Spill - str r2, [sp, #60] @ 4-byte Spill - umull r2, r1, r7, r9 - str r0, [sp, #8] @ 4-byte Spill - str r3, [sp, #4] @ 4-byte Spill - str r1, [sp, #28] @ 4-byte Spill - umull r1, r11, r7, r8 - str r2, [sp, #16] @ 4-byte Spill - str r1, [sp, #24] @ 4-byte Spill - mov r1, r2 - str r11, [sp, #12] @ 4-byte Spill - umlal r11, r1, r7, r6 - umull r0, r7, r6, r4 - str r1, [sp, #20] @ 4-byte Spill - umull lr, r1, r9, r4 - umull r9, r2, r8, r4 - ldr r8, [r10, #-4] - adds r0, r2, r0 - str r1, [sp] @ 4-byte Spill - mov r1, r2 - mov r12, lr - adcs r0, r7, lr - umlal r1, r12, r6, r4 - ldr r0, [sp] @ 4-byte Reload - ldm r10, {r6, r7} - mul r2, r9, r8 - adc r3, r0, #0 - ldr r0, [r10, #8] - umull r4, lr, r2, r6 - adds r4, r4, r9 - umull r4, r9, r2, r7 - adcs r1, r4, r1 - umull r4, r5, r2, r0 - adcs r2, r4, r12 - ldr r4, [sp, #4] @ 4-byte Reload - adc r3, r3, #0 - adds r1, r1, lr - adcs r2, r2, r9 - adc r3, r3, r5 - ldr r5, [sp, #12] @ 4-byte Reload - adds r5, r5, r4 - ldr r4, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #16] @ 4-byte Reload - adcs r5, r4, r5 - ldr r4, [sp, #24] @ 4-byte Reload - ldr r5, [sp, #28] @ 4-byte Reload - adc r5, r5, #0 - adds r1, r4, r1 - ldr r4, [sp, #20] @ 4-byte Reload - adcs r2, r11, r2 - adcs r12, r4, r3 - mul r4, r1, r8 - umull r3, r9, r4, r6 - adc lr, r5, #0 - adds r1, r3, r1 - umull r1, r3, r4, r7 - adcs r1, r1, r2 - umull r2, r5, r4, r0 - adcs r2, r2, r12 - adc r4, lr, #0 - adds r1, r1, r9 - adcs r12, r2, r3 - ldr r2, [sp, #40] @ 4-byte Reload - ldr r3, [sp, #32] @ 4-byte Reload - adc r9, r4, r5 - adds r5, r2, r3 - ldr r2, [sp, #44] @ 4-byte Reload - ldr r3, [sp, #36] @ 4-byte Reload - adcs r5, r3, r2 - ldr r2, [sp, #52] @ 4-byte Reload - ldr r5, [sp, #60] @ 4-byte Reload - adc lr, r2, #0 - ldr r2, [sp, #48] @ 4-byte Reload - adds r1, r2, r1 - mul r4, r1, r8 - umull r10, r2, r4, r0 - umull r3, r8, r4, r7 - str r2, [sp, #52] @ 4-byte Spill - umull r2, r11, r4, r6 - ldr r4, [sp, #56] @ 4-byte Reload - adcs r4, r4, r12 - adcs r12, r5, r9 - adc r5, lr, #0 - adds r1, r2, r1 - adcs r1, r3, r4 - adcs r2, r10, r12 - adc r3, r5, #0 - ldr r5, [sp, #52] @ 4-byte Reload - adds r1, r1, r11 - adcs r2, r2, r8 - adc r3, r3, r5 - subs r6, r1, r6 - sbcs r7, r2, r7 - sbc r0, r3, r0 - asr r5, r0, #31 - cmp r5, #0 - movlt r6, r1 - ldr r1, [sp, #64] @ 4-byte Reload - movlt r7, r2 - movlt r0, r3 - stm r1, {r6, r7} - str r0, [r1, #8] - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end39: - .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L - .cantunwind - .fnend - - .globl mcl_fp_montRed3L - .align 2 - .type mcl_fp_montRed3L,%function -mcl_fp_montRed3L: @ @mcl_fp_montRed3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - ldr r5, [r2] - ldr lr, [r2, #-4] - ldr r3, [r2, #4] - ldr r2, [r2, #8] - str r0, [sp, #24] @ 4-byte Spill - str r5, [sp, #20] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - ldm r1, {r4, r7} - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #8] - mul r6, r4, lr - umull r10, r8, r6, r3 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #12] - str r7, [sp, #12] @ 4-byte Spill - umull r7, r9, r6, r2 - umull r11, r2, r6, r5 - mov r0, r2 - adds r2, r2, r10 - mov r12, r7 - adcs r2, r8, r7 - umlal r0, r12, r6, r3 - ldr r8, [r1, #20] - ldr r1, [r1, #16] - ldr r2, [sp, #8] @ 4-byte Reload - adc r10, r9, #0 - adds r7, r4, r11 - mov r11, lr - adcs r9, r2, r0 - ldr r2, [sp] @ 4-byte Reload - mul r7, r9, lr - umull lr, r0, r7, r2 - str r0, [sp, #8] @ 4-byte Spill - umull r4, r0, r7, r5 - ldr r5, [sp, #16] @ 4-byte Reload - mov r6, lr - str r4, [sp, #4] @ 4-byte Spill - mov r4, r0 - umlal r4, r6, r7, r3 - adcs r12, r5, r12 - ldr r5, [sp, #12] @ 4-byte Reload - adcs r10, r5, r10 - adcs r1, r1, #0 - str r1, [sp, #16] @ 4-byte Spill - adcs r1, r8, #0 - str r1, [sp, #12] @ 4-byte Spill - mov r1, #0 - adc r8, r1, #0 - umull r1, r5, r7, r3 - ldr r7, [sp, #16] @ 4-byte Reload - adds r1, r0, r1 - adcs r0, r5, lr - ldr r1, [sp, #4] @ 4-byte Reload - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - adds r1, r1, r9 - adcs r1, r4, r12 - adcs lr, r6, r10 - ldr r6, [sp, #20] @ 4-byte Reload - mul r5, r1, r11 - mov r11, r2 - adcs r0, r0, r7 - umull r4, r12, r5, r2 - umull r2, r7, r5, r3 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r10, r0, #0 - umull r9, r0, r5, r6 - adc r8, r8, #0 - adds r2, r0, r2 - mov r2, r4 - adcs r4, r7, r4 - adc r7, r12, #0 - adds r1, r9, r1 - umlal r0, r2, r5, r3 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, lr - adcs r1, r2, r1 - adcs r2, r7, r10 - adc r7, r8, #0 - subs r6, r0, r6 - sbcs r3, r1, r3 - sbcs r5, r2, r11 - sbc r7, r7, #0 - ands r7, r7, #1 - movne r6, r0 - ldr r0, [sp, #24] @ 4-byte Reload - movne r3, r1 - movne r5, r2 - str r6, [r0] - stmib r0, {r3, r5} - add sp, sp, #28 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end40: - .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L - .cantunwind - .fnend - - .globl mcl_fp_addPre3L - .align 2 - .type mcl_fp_addPre3L,%function -mcl_fp_addPre3L: @ @mcl_fp_addPre3L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r1, {r3, r12, lr} - ldm r2, {r1, r4} - ldr r2, [r2, #8] - adds r1, r1, r3 - adcs r3, r4, r12 - adcs r2, r2, lr - stm r0, {r1, r3} - str r2, [r0, #8] - mov r0, #0 - adc r0, r0, #0 - pop {r4, lr} - mov pc, lr -.Lfunc_end41: - .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L - .cantunwind - .fnend - - .globl mcl_fp_subPre3L - .align 2 - .type mcl_fp_subPre3L,%function -mcl_fp_subPre3L: @ @mcl_fp_subPre3L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldm r2, {r3, r12, lr} - ldm r1, {r2, r4} - ldr r1, [r1, #8] - subs r2, r2, r3 - sbcs r3, r4, r12 - sbcs r1, r1, lr - stm r0, {r2, r3} - str r1, [r0, #8] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - pop {r4, lr} - mov pc, lr -.Lfunc_end42: - .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L - .cantunwind - .fnend - - .globl mcl_fp_shr1_3L - .align 2 - .type mcl_fp_shr1_3L,%function -mcl_fp_shr1_3L: @ @mcl_fp_shr1_3L - .fnstart -@ BB#0: - ldr r3, [r1, #4] - ldr r12, [r1] - ldr r1, [r1, #8] - lsrs r2, r3, #1 - lsr r3, r3, #1 - orr r3, r3, r1, lsl #31 - rrx r2, r12 - lsr r1, r1, #1 - stm r0, {r2, r3} - str r1, [r0, #8] - mov pc, lr -.Lfunc_end43: - .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L - .cantunwind - .fnend - - .globl mcl_fp_add3L - .align 2 - .type mcl_fp_add3L,%function -mcl_fp_add3L: @ @mcl_fp_add3L - .fnstart -@ BB#0: - .save {r4, r5, r11, lr} - push {r4, r5, r11, lr} - ldm r1, {r12, lr} - ldr r1, [r1, #8] - ldm r2, {r4, r5} - ldr r2, [r2, #8] - adds r4, r4, r12 - adcs r5, r5, lr - adcs r1, r2, r1 - stm r0, {r4, r5} - mov r2, #0 - str r1, [r0, #8] - adc r12, r2, #0 - ldm r3, {r2, lr} - ldr r3, [r3, #8] - subs r4, r4, r2 - sbcs r5, r5, lr - sbcs r3, r1, r3 - sbc r1, r12, #0 - tst r1, #1 - stmeq r0, {r4, r5} - streq r3, [r0, #8] - pop {r4, r5, r11, lr} - mov pc, lr -.Lfunc_end44: - .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L - .cantunwind - .fnend - - .globl mcl_fp_addNF3L - .align 2 - .type mcl_fp_addNF3L,%function -mcl_fp_addNF3L: @ @mcl_fp_addNF3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldm r1, {r12, lr} - ldr r1, [r1, #8] - ldm r2, {r4, r5} - ldr r2, [r2, #8] - adds r4, r4, r12 - adcs r5, r5, lr - adc r7, r2, r1 - ldm r3, {r2, r12, lr} - subs r2, r4, r2 - sbcs r3, r5, r12 - sbc r1, r7, lr - asr r6, r1, #31 - cmp r6, #0 - movlt r2, r4 - movlt r3, r5 - movlt r1, r7 - stm r0, {r2, r3} - str r1, [r0, #8] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end45: - .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L - .cantunwind - .fnend - - .globl mcl_fp_sub3L - .align 2 - .type mcl_fp_sub3L,%function -mcl_fp_sub3L: @ @mcl_fp_sub3L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldm r2, {r12, lr} - ldr r4, [r2, #8] - ldm r1, {r2, r5, r6} - subs r1, r2, r12 - sbcs r2, r5, lr - sbcs r12, r6, r4 - mov r6, #0 - sbc r6, r6, #0 - stm r0, {r1, r2, r12} - tst r6, #1 - popeq {r4, r5, r6, lr} - moveq pc, lr - ldr r6, [r3] - ldr r5, [r3, #4] - ldr r3, [r3, #8] - adds r1, r6, r1 - adcs r2, r5, r2 - adc r3, r3, r12 - stm r0, {r1, r2, r3} - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end46: - .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L - .cantunwind - .fnend - - .globl mcl_fp_subNF3L - .align 2 - .type mcl_fp_subNF3L,%function -mcl_fp_subNF3L: @ @mcl_fp_subNF3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldm r2, {r12, lr} - ldr r2, [r2, #8] - ldm r1, {r4, r5} - ldr r1, [r1, #8] - subs r4, r4, r12 - sbcs r7, r5, lr - sbc r1, r1, r2 - ldm r3, {r2, r12, lr} - asr r6, r1, #31 - adds r2, r4, r2 - adcs r3, r7, r12 - adc r5, r1, lr - cmp r6, #0 - movge r2, r4 - movge r3, r7 - movge r5, r1 - stm r0, {r2, r3, r5} - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end47: - .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L - .cantunwind - .fnend - - .globl mcl_fpDbl_add3L - .align 2 - .type mcl_fpDbl_add3L,%function -mcl_fpDbl_add3L: @ @mcl_fpDbl_add3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r1, {r12, lr} - ldr r7, [r2] - ldr r11, [r1, #8] - ldr r9, [r1, #12] - ldr r10, [r1, #16] - ldr r8, [r1, #20] - ldmib r2, {r1, r5, r6} - ldr r4, [r2, #16] - ldr r2, [r2, #20] - adds r7, r7, r12 - adcs r1, r1, lr - str r7, [r0] - str r1, [r0, #4] - adcs r1, r5, r11 - ldr r5, [r3] - adcs r7, r6, r9 - str r1, [r0, #8] - mov r1, #0 - adcs r6, r4, r10 - ldr r4, [r3, #4] - ldr r3, [r3, #8] - adcs r2, r2, r8 - adc r1, r1, #0 - subs r5, r7, r5 - sbcs r4, r6, r4 - sbcs r3, r2, r3 - sbc r1, r1, #0 - ands r1, r1, #1 - movne r5, r7 - movne r4, r6 - movne r3, r2 - str r5, [r0, #12] - str r4, [r0, #16] - str r3, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end48: - .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub3L - .align 2 - .type mcl_fpDbl_sub3L,%function -mcl_fpDbl_sub3L: @ @mcl_fpDbl_sub3L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r2, {r12, lr} - ldr r7, [r1] - ldr r11, [r2, #8] - ldr r9, [r2, #12] - ldr r10, [r2, #16] - ldr r8, [r2, #20] - ldmib r1, {r2, r5, r6} - ldr r4, [r1, #16] - ldr r1, [r1, #20] - subs r7, r7, r12 - sbcs r2, r2, lr - str r7, [r0] - str r2, [r0, #4] - sbcs r2, r5, r11 - ldr r5, [r3] - sbcs r7, r6, r9 - str r2, [r0, #8] - mov r2, #0 - sbcs r6, r4, r10 - ldr r4, [r3, #4] - ldr r3, [r3, #8] - sbcs r1, r1, r8 - sbc r2, r2, #0 - adds r5, r7, r5 - adcs r4, r6, r4 - adc r3, r1, r3 - ands r2, r2, #1 - moveq r5, r7 - moveq r4, r6 - moveq r3, r1 - str r5, [r0, #12] - str r4, [r0, #16] - str r3, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end49: - .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre4L - .align 2 - .type mcl_fp_mulUnitPre4L,%function -mcl_fp_mulUnitPre4L: @ @mcl_fp_mulUnitPre4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r1, [r1, #12] - umull r4, r6, r12, r2 - umull r7, r12, lr, r2 - str r4, [r0] - mov r5, r6 - mov r4, r7 - umlal r5, r4, r3, r2 - str r5, [r0, #4] - str r4, [r0, #8] - umull r5, lr, r1, r2 - umull r1, r4, r3, r2 - adds r1, r6, r1 - adcs r1, r4, r7 - adcs r1, r12, r5 - str r1, [r0, #12] - adc r1, lr, #0 - str r1, [r0, #16] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end50: - .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre4L - .align 2 - .type mcl_fpDbl_mulPre4L,%function -mcl_fpDbl_mulPre4L: @ @mcl_fpDbl_mulPre4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #40 - sub sp, sp, #40 - mov lr, r2 - ldr r11, [r1] - ldr r4, [lr] - ldmib r1, {r8, r12} - ldr r3, [r1, #12] - umull r2, r7, r11, r4 - umull r6, r9, r8, r4 - str r12, [sp] @ 4-byte Spill - adds r6, r7, r6 - str r2, [sp, #36] @ 4-byte Spill - mov r2, r3 - umull r6, r10, r12, r4 - adcs r5, r9, r6 - umlal r7, r6, r8, r4 - umull r5, r9, r3, r4 - ldr r3, [sp, #36] @ 4-byte Reload - ldr r4, [lr, #4] - adcs r10, r10, r5 - str r3, [r0] - adc r3, r9, #0 - str r3, [sp, #24] @ 4-byte Spill - umull r5, r3, r11, r4 - adds r7, r5, r7 - str r3, [sp, #32] @ 4-byte Spill - str r7, [sp, #36] @ 4-byte Spill - umull r7, r3, r8, r4 - str r3, [sp, #28] @ 4-byte Spill - adcs r3, r7, r6 - umull r7, r9, r12, r4 - mov r12, r2 - ldr r6, [sp, #32] @ 4-byte Reload - adcs r7, r7, r10 - umull r5, r10, r2, r4 - ldr r2, [sp, #24] @ 4-byte Reload - mov r4, #0 - adcs r5, r5, r2 - ldr r2, [sp, #28] @ 4-byte Reload - adc r4, r4, #0 - adds r6, r3, r6 - adcs r7, r7, r2 - ldr r2, [lr, #12] - str r7, [sp, #24] @ 4-byte Spill - adcs r7, r5, r9 - str r7, [sp, #20] @ 4-byte Spill - adc r7, r4, r10 - ldr r4, [lr, #8] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #36] @ 4-byte Reload - str r7, [r0, #4] - umull r5, r7, r11, r4 - adds r5, r5, r6 - str r7, [sp, #12] @ 4-byte Spill - str r5, [r0, #8] - ldm r1, {r11, lr} - ldr r5, [r1, #8] - ldr r1, [r1, #12] - ldr r3, [sp, #24] @ 4-byte Reload - umull r6, r7, r1, r2 - umull r10, r1, r5, r2 - str r1, [sp, #32] @ 4-byte Spill - umull r5, r1, lr, r2 - str r6, [sp, #8] @ 4-byte Spill - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #16] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - umull r6, r1, r11, r2 - umull r2, r11, r12, r4 - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp] @ 4-byte Reload - umull lr, r12, r1, r4 - umull r9, r1, r8, r4 - ldr r4, [sp, #20] @ 4-byte Reload - mov r8, #0 - adcs r3, r9, r3 - adcs r4, lr, r4 - adcs r2, r2, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adc lr, r8, #0 - adds r3, r3, r7 - adcs r1, r4, r1 - adcs r2, r2, r12 - adc r4, lr, r11 - adds r3, r6, r3 - ldr r6, [sp, #4] @ 4-byte Reload - str r3, [r0, #12] - ldr r3, [sp, #8] @ 4-byte Reload - adcs r1, r5, r1 - adcs r2, r10, r2 - adcs r3, r3, r4 - adc r7, r8, #0 - adds r1, r1, r6 - str r1, [r0, #16] - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [r0, #20] - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r3, r1 - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r7, r1 - str r1, [r0, #28] - add sp, sp, #40 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end51: - .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre4L - .align 2 - .type mcl_fpDbl_sqrPre4L,%function -mcl_fpDbl_sqrPre4L: @ @mcl_fpDbl_sqrPre4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r1, {r2, r3, r12} - ldr r8, [r1, #12] - umull r4, r6, r2, r2 - umull r11, lr, r12, r2 - str r4, [r0] - umull r10, r4, r8, r2 - mov r7, r11 - mov r5, r6 - str lr, [sp, #12] @ 4-byte Spill - str r4, [sp, #8] @ 4-byte Spill - umull r4, r9, r3, r2 - umlal r5, r7, r3, r2 - adds r2, r6, r4 - adcs r2, r9, r11 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r10, lr, r10 - adc r2, r2, #0 - adds r4, r4, r5 - str r2, [sp] @ 4-byte Spill - umull r6, r2, r3, r3 - str r4, [sp, #8] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [sp] @ 4-byte Reload - adcs r5, r6, r7 - umull r6, r7, r12, r3 - adcs lr, r6, r10 - umull r4, r10, r8, r3 - adcs r3, r4, r2 - ldr r2, [sp, #4] @ 4-byte Reload - mov r4, #0 - adc r4, r4, #0 - adds r5, r5, r9 - adcs r9, lr, r2 - adcs r2, r3, r7 - ldr r3, [sp, #8] @ 4-byte Reload - adc r4, r4, r10 - adds r5, r11, r5 - str r2, [sp, #4] @ 4-byte Spill - umull r2, r10, r8, r12 - umull lr, r8, r12, r12 - adcs r6, r6, r9 - stmib r0, {r3, r5} - mov r5, #0 - ldr r3, [sp, #4] @ 4-byte Reload - adcs r3, lr, r3 - adcs r2, r2, r4 - ldr r4, [sp, #12] @ 4-byte Reload - adc r5, r5, #0 - adds r6, r6, r4 - adcs r11, r3, r7 - adcs lr, r2, r8 - adc r8, r5, r10 - ldr r5, [r1] - ldmib r1, {r4, r7} - ldr r1, [r1, #12] - umull r12, r2, r1, r1 - umull r3, r9, r7, r1 - umull r7, r10, r4, r1 - str r2, [sp, #12] @ 4-byte Spill - umull r4, r2, r5, r1 - adds r1, r4, r6 - adcs r4, r7, r11 - str r1, [r0, #12] - mov r7, #0 - adcs r3, r3, lr - adcs r1, r12, r8 - adc r7, r7, #0 - adds r2, r4, r2 - str r2, [r0, #16] - adcs r2, r3, r10 - adcs r1, r1, r9 - str r2, [r0, #20] - str r1, [r0, #24] - ldr r1, [sp, #12] @ 4-byte Reload - adc r1, r7, r1 - str r1, [r0, #28] - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L - .cantunwind - .fnend - - .globl mcl_fp_mont4L - .align 2 - .type mcl_fp_mont4L,%function -mcl_fp_mont4L: @ @mcl_fp_mont4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #76 - sub sp, sp, #76 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r2, #8] - ldr r9, [r2] - ldr r8, [r2, #4] - ldr r6, [r3, #-4] - ldr r11, [r1, #8] - ldr r10, [r1, #12] - ldr r7, [r3, #8] - ldr r5, [r3, #4] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r2, #12] - ldr r2, [r1, #4] - str r6, [sp, #44] @ 4-byte Spill - str r7, [sp, #40] @ 4-byte Spill - str r5, [sp, #52] @ 4-byte Spill - str r11, [sp, #60] @ 4-byte Spill - str r10, [sp, #56] @ 4-byte Spill - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1] - ldr r1, [r3] - str r2, [sp, #72] @ 4-byte Spill - ldr r3, [r3, #12] - umull r4, r2, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - str r1, [sp, #48] @ 4-byte Spill - mul r0, r4, r6 - str r4, [sp, #24] @ 4-byte Spill - mov r4, r5 - umull lr, r6, r0, r7 - umull r7, r12, r0, r1 - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - str r6, [sp, #16] @ 4-byte Spill - mov r6, r12 - str lr, [sp, #8] @ 4-byte Spill - umlal r6, lr, r0, r5 - umull r5, r1, r10, r9 - str r1, [sp, #68] @ 4-byte Spill - str r5, [sp, #12] @ 4-byte Spill - umull r1, r10, r11, r9 - umull r11, r5, r7, r9 - adds r7, r2, r11 - adcs r5, r5, r1 - ldr r5, [sp, #12] @ 4-byte Reload - adcs r11, r10, r5 - ldr r5, [sp, #68] @ 4-byte Reload - str r3, [sp, #68] @ 4-byte Spill - adc r5, r5, #0 - str r5, [sp, #12] @ 4-byte Spill - umull r5, r7, r0, r3 - umull r10, r3, r0, r4 - ldr r4, [sp, #24] @ 4-byte Reload - adds r0, r12, r10 - mov r12, #0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r3, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #20] @ 4-byte Reload - adc r3, r7, #0 - ldr r7, [sp, #72] @ 4-byte Reload - adds r4, r5, r4 - umlal r2, r1, r7, r9 - adcs r2, r6, r2 - adcs r1, lr, r1 - str r2, [sp, #24] @ 4-byte Spill - adcs r9, r0, r11 - ldr r0, [sp, #12] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - adcs r6, r3, r0 - ldr r0, [sp, #56] @ 4-byte Reload - mov r3, r7 - adc r10, r12, #0 - umull r2, r12, r8, r7 - ldr r7, [sp, #64] @ 4-byte Reload - umull r5, r4, r8, r0 - ldr r0, [sp, #60] @ 4-byte Reload - umull r1, lr, r8, r0 - umull r11, r0, r8, r7 - adds r2, r0, r2 - adcs r2, r12, r1 - umlal r0, r1, r8, r3 - ldr r3, [sp, #24] @ 4-byte Reload - ldr r8, [sp, #48] @ 4-byte Reload - adcs r2, lr, r5 - adc r5, r4, #0 - adds r7, r3, r11 - ldr r3, [sp, #20] @ 4-byte Reload - ldr r11, [sp, #40] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r9, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r6, r2 - str r0, [sp, #16] @ 4-byte Spill - adcs r0, r10, r5 - ldr r10, [sp, #44] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - mul r5, r7, r10 - umull r6, r0, r5, r11 - str r0, [sp] @ 4-byte Spill - umull r0, r3, r5, r8 - mov r4, r6 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - mov r2, r3 - umlal r2, r4, r5, r1 - umull r9, r12, r5, r0 - umull lr, r0, r5, r1 - adds r3, r3, lr - adcs r0, r0, r6 - ldr r3, [sp, #4] @ 4-byte Reload - ldr r0, [sp] @ 4-byte Reload - adcs r0, r0, r9 - adc r1, r12, #0 - adds r3, r3, r7 - ldr r12, [sp, #64] @ 4-byte Reload - ldr r3, [sp, #24] @ 4-byte Reload - adcs r2, r2, r3 - ldr r3, [sp, #28] @ 4-byte Reload - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - umull r9, r7, r3, r12 - adcs r2, r4, r2 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [sp, #16] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #72] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - umull r6, r5, r3, r0 - umull r0, r4, r3, r1 - umull r1, lr, r3, r2 - adds r1, r7, r1 - adcs r1, lr, r0 - umlal r7, r0, r3, r2 - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r4, r6 - adc r6, r5, #0 - adds r3, r2, r9 - ldr r2, [sp, #20] @ 4-byte Reload - adcs r2, r2, r7 - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [sp, #16] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, r6 - mul r6, r3, r10 - str r0, [sp, #16] @ 4-byte Spill - mov r0, #0 - umull r7, r9, r6, r11 - umull r10, r4, r6, r8 - adc r0, r0, #0 - mov r2, r4 - mov r5, r7 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - umlal r2, r5, r6, r1 - umull r8, r12, r6, r0 - umull lr, r0, r6, r1 - adds r6, r4, lr - adcs r0, r0, r7 - adcs r0, r9, r8 - adc r1, r12, #0 - adds r3, r10, r3 - ldr r3, [sp, #28] @ 4-byte Reload - adcs r2, r2, r3 - ldr r3, [sp, #32] @ 4-byte Reload - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adcs r8, r5, r2 - ldr r2, [sp, #20] @ 4-byte Reload - ldr r5, [sp, #64] @ 4-byte Reload - adcs r9, r0, r2 - ldr r0, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #72] @ 4-byte Reload - umull lr, r7, r3, r5 - ldr r5, [sp, #52] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #12] @ 4-byte Reload - ldr r1, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - umull r6, r10, r3, r0 - umull r0, r4, r3, r1 - umull r1, r12, r3, r2 - adds r1, r7, r1 - adcs r1, r12, r0 - umlal r7, r0, r3, r2 - ldr r2, [sp, #28] @ 4-byte Reload - ldr r12, [sp, #68] @ 4-byte Reload - adcs r1, r4, r6 - ldr r4, [sp, #40] @ 4-byte Reload - adc r6, r10, #0 - adds lr, r2, lr - ldr r2, [sp, #48] @ 4-byte Reload - adcs r10, r8, r7 - adcs r0, r9, r0 - str r0, [sp, #72] @ 4-byte Spill - adcs r0, r11, r1 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r8, r0, r6 - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - mul r6, lr, r0 - umull r1, r3, r6, r5 - umull r11, r7, r6, r2 - umull r0, r9, r6, r4 - adds r1, r7, r1 - adcs r1, r3, r0 - umlal r7, r0, r6, r5 - umull r1, r3, r6, r12 - adcs r1, r9, r1 - mov r9, r5 - adc r5, r3, #0 - adds r3, r11, lr - adcs r3, r7, r10 - ldr r7, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #64] @ 4-byte Reload - adcs r1, r1, r7 - adcs lr, r5, r8 - ldr r5, [sp, #60] @ 4-byte Reload - adc r8, r5, #0 - subs r6, r3, r2 - sbcs r5, r0, r9 - sbcs r4, r1, r4 - sbcs r7, lr, r12 - sbc r2, r8, #0 - ands r2, r2, #1 - movne r5, r0 - ldr r0, [sp, #36] @ 4-byte Reload - movne r6, r3 - movne r4, r1 - cmp r2, #0 - movne r7, lr - str r6, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - str r7, [r0, #12] - add sp, sp, #76 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end53: - .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L - .cantunwind - .fnend - - .globl mcl_fp_montNF4L - .align 2 - .type mcl_fp_montNF4L,%function -mcl_fp_montNF4L: @ @mcl_fp_montNF4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #140 - sub sp, sp, #140 - mov r10, r3 - str r0, [sp, #132] @ 4-byte Spill - ldr lr, [r1] - ldmib r1, {r4, r8, r12} - ldr r3, [r2] - ldr r1, [r2, #4] - ldr r0, [r2, #8] - ldr r2, [r2, #12] - umull r6, r5, r2, r8 - str r5, [sp, #124] @ 4-byte Spill - umull r5, r7, r2, lr - str r6, [sp, #112] @ 4-byte Spill - str r5, [sp, #128] @ 4-byte Spill - mov r5, r6 - mov r6, r7 - str r7, [sp, #108] @ 4-byte Spill - umlal r6, r5, r2, r4 - str r5, [sp, #120] @ 4-byte Spill - umull r7, r5, r0, r8 - str r6, [sp, #116] @ 4-byte Spill - str r5, [sp, #84] @ 4-byte Spill - umull r5, r6, r0, lr - str r7, [sp, #72] @ 4-byte Spill - str r5, [sp, #88] @ 4-byte Spill - str r6, [sp, #68] @ 4-byte Spill - mov r5, r6 - mov r6, r7 - umlal r5, r6, r0, r4 - str r5, [sp, #76] @ 4-byte Spill - str r6, [sp, #80] @ 4-byte Spill - umull r6, r5, r1, r8 - str r5, [sp, #44] @ 4-byte Spill - umull r5, r7, r1, lr - str r6, [sp, #32] @ 4-byte Spill - str r5, [sp, #48] @ 4-byte Spill - mov r5, r6 - mov r6, r7 - str r7, [sp, #28] @ 4-byte Spill - umlal r6, r5, r1, r4 - str r5, [sp, #40] @ 4-byte Spill - umull r9, r5, r8, r3 - str r6, [sp, #36] @ 4-byte Spill - str r5, [sp, #136] @ 4-byte Spill - umull r6, r5, lr, r3 - mov r8, r9 - str r6, [sp, #4] @ 4-byte Spill - umull r11, r6, r2, r12 - mov lr, r5 - str r6, [sp, #104] @ 4-byte Spill - umull r7, r6, r2, r4 - umlal lr, r8, r4, r3 - str r11, [sp, #100] @ 4-byte Spill - str r6, [sp, #96] @ 4-byte Spill - umull r6, r2, r0, r12 - str r7, [sp, #92] @ 4-byte Spill - str r6, [sp, #60] @ 4-byte Spill - str r2, [sp, #64] @ 4-byte Spill - umull r6, r2, r0, r4 - str r2, [sp, #56] @ 4-byte Spill - umull r2, r0, r1, r12 - str r6, [sp, #52] @ 4-byte Spill - str r2, [sp, #20] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - umull r2, r0, r1, r4 - str r2, [sp, #12] @ 4-byte Spill - umull r2, r6, r4, r3 - str r0, [sp, #16] @ 4-byte Spill - umull r0, r1, r12, r3 - ldr r4, [r10, #4] - adds r2, r5, r2 - ldr r5, [sp, #4] @ 4-byte Reload - adcs r2, r6, r9 - ldr r9, [r10, #8] - ldr r2, [sp, #136] @ 4-byte Reload - str r4, [sp, #136] @ 4-byte Spill - adcs r12, r2, r0 - ldr r2, [r10, #-4] - adc r0, r1, #0 - str r0, [sp] @ 4-byte Spill - ldr r0, [r10] - mul r1, r5, r2 - mov r7, r2 - umull r3, r11, r1, r0 - str r0, [sp, #8] @ 4-byte Spill - mov r6, r0 - umull r2, r0, r1, r9 - adds r3, r3, r5 - umull r3, r5, r1, r4 - adcs r3, r3, lr - ldr lr, [r10, #12] - adcs r2, r2, r8 - umull r4, r8, r1, lr - adcs r1, r4, r12 - ldr r4, [sp] @ 4-byte Reload - adc r4, r4, #0 - adds r3, r3, r11 - adcs r2, r2, r5 - adcs r12, r1, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adc r1, r4, r8 - ldr r4, [sp, #12] @ 4-byte Reload - adds r4, r0, r4 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r4, [sp, #16] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #24] @ 4-byte Reload - adc r5, r0, #0 - ldr r0, [sp, #48] @ 4-byte Reload - adds r3, r0, r3 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r2, r0, r2 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r12 - mov r12, r7 - adcs r8, r4, r1 - ldr r1, [sp, #136] @ 4-byte Reload - adc r10, r5, #0 - mul r5, r3, r7 - umull r7, r11, r5, r6 - adds r3, r7, r3 - umull r3, r7, r5, r1 - adcs r2, r3, r2 - umull r3, r4, r5, r9 - adcs r0, r3, r0 - umull r3, r6, r5, lr - adcs r3, r3, r8 - ldr r8, [sp, #8] @ 4-byte Reload - adc r5, r10, #0 - adds r2, r2, r11 - adcs r0, r0, r7 - adcs r3, r3, r4 - ldr r4, [sp, #68] @ 4-byte Reload - adc r7, r5, r6 - ldr r5, [sp, #52] @ 4-byte Reload - ldr r6, [sp, #88] @ 4-byte Reload - adds r4, r4, r5 - ldr r5, [sp, #56] @ 4-byte Reload - ldr r4, [sp, #72] @ 4-byte Reload - adcs r4, r5, r4 - ldr r5, [sp, #60] @ 4-byte Reload - ldr r4, [sp, #84] @ 4-byte Reload - adcs r4, r4, r5 - ldr r5, [sp, #64] @ 4-byte Reload - adc r5, r5, #0 - adds r2, r6, r2 - ldr r6, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #80] @ 4-byte Reload - adcs r3, r6, r3 - adcs r6, r4, r7 - adc r10, r5, #0 - mul r5, r2, r12 - umull r7, r11, r5, r8 - adds r2, r7, r2 - umull r2, r7, r5, r1 - adcs r0, r2, r0 - umull r2, r4, r5, r9 - adcs r2, r2, r3 - umull r3, r1, r5, lr - adcs r3, r3, r6 - ldr r6, [sp, #128] @ 4-byte Reload - adc r5, r10, #0 - adds r0, r0, r11 - adcs r2, r2, r7 - adcs r3, r3, r4 - ldr r4, [sp, #108] @ 4-byte Reload - adc r1, r5, r1 - ldr r5, [sp, #92] @ 4-byte Reload - adds r4, r4, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r4, [sp, #112] @ 4-byte Reload - adcs r4, r5, r4 - ldr r5, [sp, #100] @ 4-byte Reload - ldr r4, [sp, #124] @ 4-byte Reload - adcs r4, r4, r5 - ldr r5, [sp, #104] @ 4-byte Reload - adc r5, r5, #0 - adds r0, r6, r0 - ldr r6, [sp, #116] @ 4-byte Reload - adcs r2, r6, r2 - ldr r6, [sp, #120] @ 4-byte Reload - adcs r3, r6, r3 - adcs r11, r4, r1 - adc r10, r5, #0 - mul r5, r0, r12 - umull r7, r1, r5, r8 - adds r0, r7, r0 - ldr r7, [sp, #136] @ 4-byte Reload - umull r0, r12, r5, r9 - umull r6, r4, r5, r7 - adcs r2, r6, r2 - adcs r0, r0, r3 - umull r3, r6, r5, lr - adcs r3, r3, r11 - adc r5, r10, #0 - adds r1, r2, r1 - adcs r0, r0, r4 - adcs r2, r3, r12 - adc r3, r5, r6 - subs r4, r1, r8 - sbcs r7, r0, r7 - sbcs r6, r2, r9 - sbc r5, r3, lr - cmp r5, #0 - movlt r7, r0 - ldr r0, [sp, #132] @ 4-byte Reload - movlt r4, r1 - movlt r6, r2 - cmp r5, #0 - movlt r5, r3 - stm r0, {r4, r7} - str r6, [r0, #8] - str r5, [r0, #12] - add sp, sp, #140 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end54: - .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L - .cantunwind - .fnend - - .globl mcl_fp_montRed4L - .align 2 - .type mcl_fp_montRed4L,%function -mcl_fp_montRed4L: @ @mcl_fp_montRed4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - ldr r7, [r1, #4] - ldr r6, [r2, #-4] - ldr r10, [r1] - ldr r3, [r2, #8] - ldr r8, [r2] - ldr r12, [r2, #4] - ldr r2, [r2, #12] - str r0, [sp, #52] @ 4-byte Spill - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #8] - str r6, [sp, #56] @ 4-byte Spill - str r3, [sp, #40] @ 4-byte Spill - str r2, [sp, #36] @ 4-byte Spill - str r8, [sp, #32] @ 4-byte Spill - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #12] - str r7, [sp, #44] @ 4-byte Spill - mul r7, r10, r6 - umull r6, r5, r7, r3 - str r5, [sp, #20] @ 4-byte Spill - mov r5, r3 - umull r4, r3, r7, r8 - mov lr, r6 - str r4, [sp, #24] @ 4-byte Spill - umull r9, r4, r7, r2 - umull r11, r2, r7, r12 - mov r0, r3 - adds r3, r3, r11 - umlal r0, lr, r7, r12 - adcs r2, r2, r6 - ldr r6, [sp, #56] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - adcs r2, r2, r9 - str r2, [sp, #20] @ 4-byte Spill - adc r2, r4, #0 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adds r4, r10, r2 - ldr r2, [sp, #28] @ 4-byte Reload - add r10, r1, #16 - adcs r11, r2, r0 - mul r4, r11, r6 - umull r9, r0, r4, r5 - str r0, [sp, #24] @ 4-byte Spill - umull r0, r2, r4, r8 - mov r5, r9 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #28] - mov r7, r2 - umlal r7, r5, r4, r12 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r1, r8, r10} - ldr r3, [sp, #48] @ 4-byte Reload - adcs r0, r3, lr - ldr r3, [sp, #44] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r3, r3, r0 - ldr r0, [sp, #16] @ 4-byte Reload - str r3, [sp, #48] @ 4-byte Spill - adcs r1, r1, r0 - adcs r0, r8, #0 - str r1, [sp, #44] @ 4-byte Spill - ldr r8, [sp, #32] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r10, #0 - ldr r10, [sp, #36] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - umull r1, lr, r4, r10 - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - umull r3, r0, r4, r12 - adds r3, r2, r3 - ldr r2, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #40] @ 4-byte Reload - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adc r1, lr, #0 - adds r2, r2, r11 - adcs r11, r7, r0 - mul r3, r11, r6 - umull r2, r0, r3, r9 - str r0, [sp, #24] @ 4-byte Spill - umull r0, r6, r3, r8 - mov r7, r2 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mov r4, r6 - umlal r4, r7, r3, r12 - adcs r0, r5, r0 - ldr r5, [sp] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r1, r0 - umull r1, r5, r3, r10 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - umull lr, r0, r3, r12 - adds r3, r6, lr - mov lr, r8 - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - ldr r3, [sp, #44] @ 4-byte Reload - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - adc r1, r5, #0 - adds r2, r2, r11 - ldr r2, [sp, #48] @ 4-byte Reload - adcs r2, r4, r2 - adcs r3, r7, r3 - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [sp, #20] @ 4-byte Reload - adcs r0, r0, r3 - mov r3, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - mul r5, r2, r0 - umull r4, r0, r5, r12 - umull r8, r6, r5, lr - adds r4, r6, r4 - umull r1, r4, r5, r3 - adcs r0, r0, r1 - umlal r6, r1, r5, r12 - umull r0, r7, r5, r10 - adcs r0, r4, r0 - ldr r4, [sp, #44] @ 4-byte Reload - adc r5, r7, #0 - adds r2, r8, r2 - ldr r2, [sp, #48] @ 4-byte Reload - adcs r2, r6, r2 - adcs r1, r1, r4 - ldr r4, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - adcs r9, r5, r4 - ldr r4, [sp, #24] @ 4-byte Reload - adc r8, r4, #0 - subs r6, r2, lr - sbcs r5, r1, r12 - sbcs r4, r0, r3 - sbcs r7, r9, r10 - sbc r3, r8, #0 - ands r3, r3, #1 - movne r4, r0 - ldr r0, [sp, #52] @ 4-byte Reload - movne r6, r2 - movne r5, r1 - cmp r3, #0 - movne r7, r9 - str r6, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - str r7, [r0, #12] - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end55: - .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L - .cantunwind - .fnend - - .globl mcl_fp_addPre4L - .align 2 - .type mcl_fp_addPre4L,%function -mcl_fp_addPre4L: @ @mcl_fp_addPre4L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldm r1, {r3, r12, lr} - ldr r1, [r1, #12] - ldm r2, {r4, r5, r6} - ldr r2, [r2, #12] - adds r3, r4, r3 - adcs r5, r5, r12 - adcs r6, r6, lr - adcs r1, r2, r1 - stm r0, {r3, r5, r6} - str r1, [r0, #12] - mov r0, #0 - adc r0, r0, #0 - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end56: - .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L - .cantunwind - .fnend - - .globl mcl_fp_subPre4L - .align 2 - .type mcl_fp_subPre4L,%function -mcl_fp_subPre4L: @ @mcl_fp_subPre4L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldm r2, {r3, r12, lr} - ldr r2, [r2, #12] - ldm r1, {r4, r5, r6} - ldr r1, [r1, #12] - subs r3, r4, r3 - sbcs r5, r5, r12 - sbcs r6, r6, lr - sbcs r1, r1, r2 - stm r0, {r3, r5, r6} - str r1, [r0, #12] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end57: - .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L - .cantunwind - .fnend - - .globl mcl_fp_shr1_4L - .align 2 - .type mcl_fp_shr1_4L,%function -mcl_fp_shr1_4L: @ @mcl_fp_shr1_4L - .fnstart -@ BB#0: - .save {r11, lr} - push {r11, lr} - ldr r3, [r1, #4] - ldr r12, [r1] - ldr lr, [r1, #12] - ldr r2, [r1, #8] - lsrs r1, r3, #1 - lsr r3, r3, #1 - rrx r12, r12 - lsrs r1, lr, #1 - orr r3, r3, r2, lsl #31 - rrx r1, r2 - lsr r2, lr, #1 - str r12, [r0] - str r3, [r0, #4] - str r1, [r0, #8] - str r2, [r0, #12] - pop {r11, lr} - mov pc, lr -.Lfunc_end58: - .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L - .cantunwind - .fnend - - .globl mcl_fp_add4L - .align 2 - .type mcl_fp_add4L,%function -mcl_fp_add4L: @ @mcl_fp_add4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldm r1, {r12, lr} - ldr r4, [r1, #8] - ldr r1, [r1, #12] - ldm r2, {r5, r6, r7} - ldr r2, [r2, #12] - adds r5, r5, r12 - adcs r6, r6, lr - adcs r7, r7, r4 - stm r0, {r5, r6, r7} - adcs r4, r2, r1 - mov r1, #0 - ldr r2, [r3] - adc lr, r1, #0 - str r4, [r0, #12] - ldmib r3, {r1, r12} - ldr r3, [r3, #12] - subs r5, r5, r2 - sbcs r2, r6, r1 - sbcs r1, r7, r12 - sbcs r12, r4, r3 - sbc r3, lr, #0 - tst r3, #1 - streq r5, [r0] - streq r2, [r0, #4] - streq r1, [r0, #8] - streq r12, [r0, #12] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end59: - .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L - .cantunwind - .fnend - - .globl mcl_fp_addNF4L - .align 2 - .type mcl_fp_addNF4L,%function -mcl_fp_addNF4L: @ @mcl_fp_addNF4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldm r1, {r12, lr} - ldr r4, [r1, #8] - ldr r1, [r1, #12] - ldm r2, {r5, r6, r7} - ldr r2, [r2, #12] - adds r5, r5, r12 - adcs r6, r6, lr - adcs r7, r7, r4 - adc r8, r2, r1 - ldm r3, {r2, r4, r12, lr} - subs r2, r5, r2 - sbcs r4, r6, r4 - sbcs r3, r7, r12 - sbc r1, r8, lr - cmp r1, #0 - movlt r2, r5 - movlt r4, r6 - movlt r3, r7 - cmp r1, #0 - movlt r1, r8 - stm r0, {r2, r4} - str r3, [r0, #8] - str r1, [r0, #12] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end60: - .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L - .cantunwind - .fnend - - .globl mcl_fp_sub4L - .align 2 - .type mcl_fp_sub4L,%function -mcl_fp_sub4L: @ @mcl_fp_sub4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldm r2, {r12, lr} - ldr r4, [r2, #8] - ldr r5, [r2, #12] - ldm r1, {r2, r6, r7} - ldr r1, [r1, #12] - subs r8, r2, r12 - sbcs r2, r6, lr - str r8, [r0] - sbcs r12, r7, r4 - sbcs lr, r1, r5 - mov r1, #0 - sbc r1, r1, #0 - stmib r0, {r2, r12, lr} - tst r1, #1 - popeq {r4, r5, r6, r7, r8, lr} - moveq pc, lr - ldm r3, {r1, r4, r5} - ldr r3, [r3, #12] - adds r1, r1, r8 - adcs r2, r4, r2 - adcs r7, r5, r12 - adc r3, r3, lr - stm r0, {r1, r2, r7} - str r3, [r0, #12] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end61: - .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L - .cantunwind - .fnend - - .globl mcl_fp_subNF4L - .align 2 - .type mcl_fp_subNF4L,%function -mcl_fp_subNF4L: @ @mcl_fp_subNF4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldm r2, {r12, lr} - ldr r4, [r2, #8] - ldr r2, [r2, #12] - ldm r1, {r5, r6, r7} - ldr r1, [r1, #12] - subs r5, r5, r12 - sbcs r6, r6, lr - sbcs r8, r7, r4 - sbc r1, r1, r2 - ldm r3, {r2, r4, r12, lr} - adds r2, r5, r2 - adcs r4, r6, r4 - adcs r3, r8, r12 - adc r7, r1, lr - cmp r1, #0 - movge r2, r5 - movge r4, r6 - movge r3, r8 - cmp r1, #0 - movge r7, r1 - stm r0, {r2, r4} - str r3, [r0, #8] - str r7, [r0, #12] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end62: - .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L - .cantunwind - .fnend - - .globl mcl_fpDbl_add4L - .align 2 - .type mcl_fpDbl_add4L,%function -mcl_fpDbl_add4L: @ @mcl_fpDbl_add4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r1, {r8, r9, r10, r11} - ldr r7, [r1, #16] - str r7, [sp] @ 4-byte Spill - ldr r7, [r1, #20] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r1, #24] - ldr r1, [r1, #28] - str r7, [sp, #8] @ 4-byte Spill - str r1, [sp, #12] @ 4-byte Spill - ldm r2, {r1, r6, r7, r12, lr} - ldr r4, [r2, #20] - ldr r5, [r2, #24] - ldr r2, [r2, #28] - adds r1, r1, r8 - adcs r6, r6, r9 - adcs r7, r7, r10 - adcs r12, r12, r11 - stm r0, {r1, r6, r7, r12} - mov r1, #0 - ldr r7, [sp] @ 4-byte Reload - ldr r6, [sp, #4] @ 4-byte Reload - adcs r7, lr, r7 - adcs r6, r4, r6 - ldr r4, [sp, #8] @ 4-byte Reload - adcs r8, r5, r4 - ldr r5, [sp, #12] @ 4-byte Reload - ldr r4, [r3] - adcs lr, r2, r5 - adc r12, r1, #0 - ldmib r3, {r1, r2, r3} - subs r4, r7, r4 - sbcs r1, r6, r1 - sbcs r2, r8, r2 - sbcs r3, lr, r3 - sbc r5, r12, #0 - ands r5, r5, #1 - movne r4, r7 - movne r1, r6 - movne r2, r8 - cmp r5, #0 - movne r3, lr - str r4, [r0, #16] - str r1, [r0, #20] - str r2, [r0, #24] - str r3, [r0, #28] - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end63: - .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub4L - .align 2 - .type mcl_fpDbl_sub4L,%function -mcl_fpDbl_sub4L: @ @mcl_fpDbl_sub4L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r2, {r8, r9, r10, r11} - ldr r7, [r2, #16] - str r7, [sp] @ 4-byte Spill - ldr r7, [r2, #20] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r2, #28] - str r7, [sp, #8] @ 4-byte Spill - str r2, [sp, #12] @ 4-byte Spill - ldm r1, {r2, r6, r7, r12, lr} - ldr r4, [r1, #20] - ldr r5, [r1, #24] - ldr r1, [r1, #28] - subs r2, r2, r8 - str r2, [r0] - sbcs r2, r6, r9 - ldr r6, [sp, #4] @ 4-byte Reload - str r2, [r0, #4] - sbcs r2, r7, r10 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #8] - sbcs r2, r12, r11 - str r2, [r0, #12] - mov r2, #0 - sbcs r7, lr, r7 - sbcs r6, r4, r6 - ldr r4, [sp, #8] @ 4-byte Reload - sbcs r5, r5, r4 - ldr r4, [sp, #12] @ 4-byte Reload - sbcs lr, r1, r4 - ldr r4, [r3] - ldr r1, [r3, #8] - sbc r12, r2, #0 - ldr r2, [r3, #4] - ldr r3, [r3, #12] - adds r4, r7, r4 - adcs r2, r6, r2 - adcs r1, r5, r1 - adc r3, lr, r3 - ands r12, r12, #1 - moveq r4, r7 - moveq r2, r6 - moveq r1, r5 - cmp r12, #0 - moveq r3, lr - str r4, [r0, #16] - str r2, [r0, #20] - str r1, [r0, #24] - str r3, [r0, #28] - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end64: - .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre5L - .align 2 - .type mcl_fp_mulUnitPre5L,%function -mcl_fp_mulUnitPre5L: @ @mcl_fp_mulUnitPre5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r10, [r1, #12] - ldr r8, [r1, #16] - umull r4, r9, lr, r2 - umull r1, r6, r12, r2 - mov r7, r6 - mov r5, r4 - umlal r7, r5, r3, r2 - stm r0, {r1, r7} - str r5, [r0, #8] - umull r5, r7, r3, r2 - umull r1, r12, r10, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r1, r9, r1 - str r1, [r0, #12] - umull r1, r3, r8, r2 - adcs r1, r12, r1 - str r1, [r0, #16] - adc r1, r3, #0 - str r1, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end65: - .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre5L - .align 2 - .type mcl_fpDbl_mulPre5L,%function -mcl_fpDbl_mulPre5L: @ @mcl_fpDbl_mulPre5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #36 - sub sp, sp, #36 - str r2, [sp, #32] @ 4-byte Spill - ldr r3, [r2] - ldm r1, {r12, lr} - ldr r9, [r1, #8] - ldr r10, [r1, #12] - umull r5, r4, r12, r3 - umull r6, r7, lr, r3 - adds r6, r4, r6 - str r5, [sp, #24] @ 4-byte Spill - umull r5, r6, r9, r3 - adcs r7, r7, r5 - umlal r4, r5, lr, r3 - umull r7, r11, r10, r3 - adcs r6, r6, r7 - ldr r7, [r1, #16] - str r6, [sp, #28] @ 4-byte Spill - umull r6, r8, r7, r3 - ldr r3, [sp, #24] @ 4-byte Reload - adcs r11, r11, r6 - ldr r6, [r2, #4] - str r3, [r0] - umull r3, r2, r12, r6 - adc r12, r8, #0 - adds r8, r3, r4 - str r2, [sp, #24] @ 4-byte Spill - umull r3, r2, lr, r6 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adcs r5, r3, r5 - umull r3, lr, r10, r6 - umull r4, r10, r9, r6 - str r8, [r0, #4] - adcs r4, r4, r2 - umull r2, r9, r7, r6 - adcs r3, r3, r11 - adcs r7, r2, r12 - mov r2, #0 - adc r6, r2, #0 - ldr r2, [sp, #24] @ 4-byte Reload - adds r5, r5, r2 - ldr r2, [sp, #20] @ 4-byte Reload - adcs r11, r4, r2 - adcs r2, r3, r10 - ldr r3, [sp, #32] @ 4-byte Reload - str r2, [sp, #16] @ 4-byte Spill - adcs r2, r7, lr - ldr r7, [r1] - str r2, [sp, #8] @ 4-byte Spill - adc r2, r6, r9 - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r3, #8] - str r7, [sp, #28] @ 4-byte Spill - ldmib r1, {r8, lr} - ldr r6, [r1, #12] - umull r12, r4, r7, r2 - adds r7, r12, r5 - str r4, [sp, #12] @ 4-byte Spill - ldr r12, [r1, #16] - str r7, [sp, #20] @ 4-byte Spill - umull r5, r7, r8, r2 - str r7, [sp, #4] @ 4-byte Spill - adcs r10, r5, r11 - umull r5, r7, lr, r2 - str r7, [sp] @ 4-byte Spill - ldr r7, [sp, #16] @ 4-byte Reload - adcs r9, r5, r7 - umull r4, r7, r6, r2 - mov r5, #0 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #8] @ 4-byte Reload - adcs r4, r4, r7 - umull r11, r7, r12, r2 - ldr r2, [sp, #24] @ 4-byte Reload - adcs r2, r11, r2 - adc r11, r5, #0 - ldr r5, [sp, #12] @ 4-byte Reload - adds r5, r10, r5 - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [sp, #4] @ 4-byte Reload - adcs r5, r9, r5 - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [sp] @ 4-byte Reload - adcs r4, r4, r5 - ldr r5, [sp, #16] @ 4-byte Reload - adcs r10, r2, r5 - adc r2, r11, r7 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - str r2, [r0, #8] - ldr r2, [r3, #12] - umull r11, r3, r6, r2 - str r3, [sp, #20] @ 4-byte Spill - umull r6, r3, lr, r2 - umull lr, r9, r8, r2 - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [sp, #28] @ 4-byte Reload - umull r7, r8, r3, r2 - ldr r3, [sp, #12] @ 4-byte Reload - adds r3, r7, r3 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [sp, #8] @ 4-byte Reload - adcs r5, lr, r3 - mov r3, #0 - adcs r6, r6, r4 - umull r4, lr, r12, r2 - ldr r2, [sp, #16] @ 4-byte Reload - adcs r7, r11, r10 - adcs r2, r4, r2 - adc r3, r3, #0 - adds r10, r5, r8 - adcs r11, r6, r9 - ldr r6, [sp, #24] @ 4-byte Reload - adcs r7, r7, r6 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [sp, #20] @ 4-byte Reload - adcs r2, r2, r7 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - str r2, [r0, #12] - adc r2, r3, lr - ldr r3, [r1] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - ldr r4, [r2, #16] - ldmib r1, {r2, r5, r6} - ldr r1, [r1, #16] - umull lr, r9, r6, r4 - umull r6, r8, r5, r4 - umull r5, r7, r2, r4 - umull r2, r12, r3, r4 - adds r10, r2, r10 - ldr r2, [sp, #24] @ 4-byte Reload - adcs r3, r5, r11 - str r10, [r0, #16] - adcs r5, r6, r2 - ldr r2, [sp, #20] @ 4-byte Reload - adcs r6, lr, r2 - umull r2, lr, r1, r4 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r2, r1 - mov r2, #0 - adc r2, r2, #0 - adds r3, r3, r12 - adcs r7, r5, r7 - str r3, [r0, #20] - adcs r6, r6, r8 - str r7, [r0, #24] - adcs r1, r1, r9 - str r6, [r0, #28] - adc r2, r2, lr - str r1, [r0, #32] - str r2, [r0, #36] - add sp, sp, #36 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end66: - .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre5L - .align 2 - .type mcl_fpDbl_sqrPre5L,%function -mcl_fpDbl_sqrPre5L: @ @mcl_fpDbl_sqrPre5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #32 - sub sp, sp, #32 - ldm r1, {r2, r3, r12} - ldr lr, [r1, #16] - ldr r9, [r1, #12] - umull r5, r6, r2, r2 - umull r7, r11, r3, r2 - str r5, [r0] - umull r5, r4, lr, r2 - adds r8, r6, r7 - str r5, [sp, #24] @ 4-byte Spill - umull r5, r10, r12, r2 - str r4, [sp, #28] @ 4-byte Spill - adcs r4, r11, r5 - umlal r6, r5, r3, r2 - umull r4, r8, r9, r2 - adcs r10, r10, r4 - ldr r4, [sp, #24] @ 4-byte Reload - adcs r8, r8, r4 - ldr r4, [sp, #28] @ 4-byte Reload - adc r4, r4, #0 - str r4, [sp, #24] @ 4-byte Spill - umull r2, r4, r3, r3 - str r4, [sp, #28] @ 4-byte Spill - adds r4, r7, r6 - str r4, [sp, #16] @ 4-byte Spill - adcs r5, r2, r5 - umull r2, r4, r12, r3 - str r4, [sp, #12] @ 4-byte Spill - adcs r4, r2, r10 - umull r2, r6, r9, r3 - adcs r2, r2, r8 - umull r7, r8, lr, r3 - ldr r3, [sp, #24] @ 4-byte Reload - adcs r7, r7, r3 - mov r3, #0 - adc r3, r3, #0 - adds r5, r5, r11 - str r5, [sp, #24] @ 4-byte Spill - ldr r5, [sp, #28] @ 4-byte Reload - adcs r4, r4, r5 - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [sp, #16] @ 4-byte Reload - str r4, [r0, #4] - ldr r4, [sp, #12] @ 4-byte Reload - adcs r2, r2, r4 - str r2, [sp, #12] @ 4-byte Spill - adcs r2, r7, r6 - str r2, [sp, #8] @ 4-byte Spill - adc r2, r3, r8 - str r2, [sp, #4] @ 4-byte Spill - umull r11, r2, lr, r12 - umull lr, r10, r12, r12 - str r2, [sp, #28] @ 4-byte Spill - ldm r1, {r4, r6} - ldr r2, [r1, #12] - ldr r7, [sp, #24] @ 4-byte Reload - umull r8, r3, r2, r12 - str r3, [sp, #16] @ 4-byte Spill - umull r5, r3, r6, r12 - str r3, [sp] @ 4-byte Spill - umull r3, r9, r4, r12 - adds r3, r3, r7 - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [sp, #20] @ 4-byte Reload - adcs r5, r5, r3 - ldr r3, [sp, #12] @ 4-byte Reload - adcs r12, lr, r3 - ldr r3, [sp, #8] @ 4-byte Reload - adcs r7, r8, r3 - ldr r3, [sp, #4] @ 4-byte Reload - adcs lr, r11, r3 - mov r3, #0 - adc r11, r3, #0 - ldr r3, [sp] @ 4-byte Reload - adds r5, r5, r9 - adcs r12, r12, r3 - ldr r3, [sp, #16] @ 4-byte Reload - adcs r9, r7, r10 - ldr r7, [sp, #28] @ 4-byte Reload - adcs r8, lr, r3 - adc r11, r11, r7 - umull r7, r3, r4, r2 - adds r7, r7, r5 - str r3, [sp, #20] @ 4-byte Spill - umull r5, r3, r6, r2 - ldr r6, [r1, #8] - str r3, [sp, #16] @ 4-byte Spill - adcs r10, r5, r12 - ldr r3, [sp, #24] @ 4-byte Reload - ldr r5, [r1, #16] - str r7, [sp, #28] @ 4-byte Spill - umull r4, lr, r6, r2 - adcs r12, r4, r9 - ldr r4, [sp, #20] @ 4-byte Reload - umull r7, r9, r2, r2 - str r3, [r0, #8] - adcs r7, r7, r8 - umull r3, r8, r5, r2 - adcs r2, r3, r11 - mov r3, #0 - adc r3, r3, #0 - adds r11, r10, r4 - ldr r4, [sp, #16] @ 4-byte Reload - adcs r4, r12, r4 - adcs r10, r7, lr - adcs r12, r2, r9 - ldr r2, [sp, #28] @ 4-byte Reload - adc r8, r3, r8 - ldr r3, [r1] - str r2, [r0, #12] - ldr r2, [r1, #4] - ldr r1, [r1, #12] - umull r7, r9, r3, r5 - adds lr, r7, r11 - str lr, [r0, #16] - umull r7, r11, r2, r5 - adcs r2, r7, r4 - umull r4, r7, r6, r5 - adcs r4, r4, r10 - umull r6, r10, r1, r5 - adcs r1, r6, r12 - umull r6, r3, r5, r5 - mov r5, #0 - adcs r6, r6, r8 - adc r5, r5, #0 - adds r2, r2, r9 - adcs r4, r4, r11 - str r2, [r0, #20] - adcs r1, r1, r7 - str r4, [r0, #24] - adcs r7, r6, r10 - str r1, [r0, #28] - adc r3, r5, r3 - str r7, [r0, #32] - str r3, [r0, #36] - add sp, sp, #32 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L - .cantunwind - .fnend - - .globl mcl_fp_mont5L - .align 2 - .type mcl_fp_mont5L,%function -mcl_fp_mont5L: @ @mcl_fp_mont5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #100 - sub sp, sp, #100 - str r0, [sp, #52] @ 4-byte Spill - mov r0, r2 - str r2, [sp, #48] @ 4-byte Spill - ldm r0, {r2, r8} - ldr r7, [r0, #8] - ldr r0, [r0, #12] - ldr r6, [r3, #-4] - ldr r5, [r3, #8] - ldr r9, [r3] - ldr r11, [r1, #8] - ldr r12, [r1, #12] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r1, #4] - ldr r1, [r1, #16] - str r6, [sp, #84] @ 4-byte Spill - str r5, [sp, #88] @ 4-byte Spill - str r9, [sp, #80] @ 4-byte Spill - str r11, [sp, #60] @ 4-byte Spill - str r12, [sp, #56] @ 4-byte Spill - umull r4, lr, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r3, #4] - str r1, [sp, #64] @ 4-byte Spill - mul r0, r4, r6 - str r4, [sp, #36] @ 4-byte Spill - umull r6, r4, r0, r5 - str r4, [sp, #28] @ 4-byte Spill - umull r4, r5, r0, r9 - mov r10, r6 - mov r9, r5 - str r4, [sp, #32] @ 4-byte Spill - str r7, [sp, #76] @ 4-byte Spill - str r5, [sp, #12] @ 4-byte Spill - mov r4, r7 - umlal r9, r10, r0, r7 - umull r7, r5, r1, r2 - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [sp, #96] @ 4-byte Reload - str r5, [sp, #92] @ 4-byte Spill - umull r5, r1, r12, r2 - str r1, [sp, #20] @ 4-byte Spill - str r5, [sp, #24] @ 4-byte Spill - umull r12, r1, r11, r2 - umull r11, r5, r7, r2 - adds r7, lr, r11 - adcs r5, r5, r12 - ldr r5, [sp, #24] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #20] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r5, r1 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [r3, #16] - str r1, [sp, #68] @ 4-byte Spill - umull r7, r11, r0, r1 - ldr r1, [r3, #12] - umull r3, r5, r0, r4 - ldr r4, [sp, #12] @ 4-byte Reload - adds r3, r4, r3 - str r1, [sp, #92] @ 4-byte Spill - umull r3, r4, r0, r1 - adcs r0, r5, r6 - mov r1, #0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r3 - adcs r3, r4, r7 - ldr r7, [sp, #96] @ 4-byte Reload - ldr r4, [sp, #32] @ 4-byte Reload - adc r5, r11, #0 - umlal lr, r12, r7, r2 - ldr r2, [sp, #36] @ 4-byte Reload - adds r2, r4, r2 - adcs r2, r9, lr - ldr r9, [sp, #64] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - adcs r2, r10, r12 - ldr r10, [sp, #72] @ 4-byte Reload - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r5, r0 - umull r5, lr, r8, r9 - str r0, [sp, #20] @ 4-byte Spill - adc r0, r1, #0 - umull r6, r1, r8, r7 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - umull r12, r4, r8, r0 - ldr r0, [sp, #60] @ 4-byte Reload - umull r3, r2, r8, r0 - umull r11, r0, r8, r10 - ldr r10, [sp, #68] @ 4-byte Reload - adds r6, r0, r6 - adcs r1, r1, r3 - umlal r0, r3, r8, r7 - ldr r7, [sp, #36] @ 4-byte Reload - adcs r1, r2, r12 - adcs r2, r4, r5 - adc r6, lr, #0 - adds r8, r7, r11 - ldr r7, [sp, #32] @ 4-byte Reload - adcs r11, r7, r0 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r7, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - mul r4, r8, r0 - ldr r0, [sp, #88] @ 4-byte Reload - umull r6, r1, r4, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - umull r1, r5, r4, r0 - mov r0, r6 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - mov r3, r5 - umull r12, lr, r4, r1 - umlal r3, r0, r4, r1 - umull r1, r2, r4, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adds r5, r5, r12 - adcs r6, lr, r6 - umull r5, r12, r4, r10 - adcs r1, r7, r1 - ldr r7, [sp, #16] @ 4-byte Reload - adcs r2, r2, r5 - adc r6, r12, #0 - adds r7, r7, r8 - ldr r8, [sp, #60] @ 4-byte Reload - adcs r3, r3, r11 - ldr r11, [sp, #72] @ 4-byte Reload - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - umull r2, r1, r0, r9 - ldr r9, [sp, #56] @ 4-byte Reload - umull r3, r12, r0, r8 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - str r2, [sp, #4] @ 4-byte Spill - mov r2, r0 - umull r4, r5, r0, r9 - umull r6, r7, r0, r1 - umull lr, r0, r2, r11 - adds r6, r0, r6 - str lr, [sp, #8] @ 4-byte Spill - adcs r6, r7, r3 - ldr r7, [sp, #4] @ 4-byte Reload - umlal r0, r3, r2, r1 - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - adcs r12, r12, r4 - adcs r4, r5, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adc r7, r7, #0 - adds r2, r1, r2 - ldr r1, [sp, #36] @ 4-byte Reload - str r2, [sp] @ 4-byte Spill - adcs r0, r1, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - mul r4, r2, r0 - ldr r0, [sp, #88] @ 4-byte Reload - umull r5, r1, r4, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - mov r2, r5 - umull r1, r7, r4, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - umull r6, r1, r4, r10 - mov r3, r7 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - str r6, [sp, #4] @ 4-byte Spill - umlal r3, r2, r4, r0 - umull r12, lr, r4, r1 - umull r10, r1, r4, r0 - ldr r0, [sp, #12] @ 4-byte Reload - adds r4, r7, r10 - adcs r1, r1, r5 - ldr r4, [sp, #64] @ 4-byte Reload - ldr r1, [sp] @ 4-byte Reload - adcs r10, r0, r12 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r12, lr, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adc lr, r0, #0 - ldr r0, [sp, #16] @ 4-byte Reload - adds r6, r0, r1 - ldr r0, [sp, #44] @ 4-byte Reload - umull r5, r1, r0, r4 - mov r6, r0 - str r1, [sp, #16] @ 4-byte Spill - umull r4, r1, r0, r9 - str r5, [sp, #8] @ 4-byte Spill - umull r5, r9, r0, r8 - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - str r4, [sp] @ 4-byte Spill - umull r4, r8, r0, r1 - umull r7, r0, r6, r11 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #40] @ 4-byte Reload - adcs r11, r3, r7 - ldr r3, [sp, #36] @ 4-byte Reload - adcs r2, r2, r3 - ldr r3, [sp, #32] @ 4-byte Reload - str r2, [sp, #40] @ 4-byte Spill - adcs r10, r10, r3 - ldr r3, [sp, #28] @ 4-byte Reload - adcs r12, r12, r3 - ldr r3, [sp, #24] @ 4-byte Reload - adcs r7, lr, r3 - ldr r3, [sp, #20] @ 4-byte Reload - adc r2, r3, #0 - adds r4, r0, r4 - ldr r3, [sp, #4] @ 4-byte Reload - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp] @ 4-byte Reload - adcs r4, r8, r5 - umlal r0, r5, r6, r1 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r4, r9, r2 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r3, r3, r2 - ldr r2, [sp, #16] @ 4-byte Reload - adc r8, r2, #0 - adds lr, r11, r1 - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #76] @ 4-byte Reload - adcs r9, r10, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r10, [sp, #92] @ 4-byte Reload - adcs r0, r12, r4 - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - mul r4, lr, r0 - ldr r0, [sp, #88] @ 4-byte Reload - umull r12, r3, r4, r1 - umull r7, r11, r4, r0 - ldr r0, [sp, #80] @ 4-byte Reload - umull r8, r6, r4, r0 - mov r0, r7 - mov r5, r6 - adds r6, r6, r12 - umlal r5, r0, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - adcs r3, r3, r7 - umull r6, r12, r4, r1 - umull r1, r2, r4, r10 - adcs r1, r11, r1 - adcs r2, r2, r6 - adc r3, r12, #0 - adds r7, r8, lr - ldr r7, [sp, #44] @ 4-byte Reload - adcs r7, r5, r7 - adcs r0, r0, r9 - ldr r9, [sp, #72] @ 4-byte Reload - str r7, [sp, #44] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #96] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - ldr r5, [r0, #16] - ldr r0, [sp, #64] @ 4-byte Reload - umull r4, r8, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - umull r7, r1, r5, r2 - umull r12, lr, r5, r0 - ldr r0, [sp, #60] @ 4-byte Reload - umull r6, r3, r5, r0 - umull r11, r0, r5, r9 - ldr r9, [sp, #76] @ 4-byte Reload - adds r7, r0, r7 - adcs r1, r1, r6 - umlal r0, r6, r5, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adcs r1, r3, r12 - ldr r12, [sp, #80] @ 4-byte Reload - adcs r4, lr, r4 - ldr lr, [sp, #88] @ 4-byte Reload - adc r3, r8, #0 - adds r7, r2, r11 - ldr r2, [sp, #24] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #68] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - mul r4, r7, r0 - umull r0, r1, r4, r9 - umull r8, r3, r4, r12 - adds r0, r3, r0 - umull r5, r0, r4, lr - adcs r1, r1, r5 - umlal r3, r5, r4, r9 - umull r1, r6, r4, r10 - adcs r10, r0, r1 - umull r1, r0, r4, r2 - mov r4, r9 - adcs r1, r6, r1 - ldr r6, [sp, #96] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r8, r7 - adcs r3, r3, r6 - adcs r7, r5, r11 - ldr r5, [sp, #72] @ 4-byte Reload - adcs r11, r10, r5 - ldr r5, [sp, #64] @ 4-byte Reload - adcs r8, r1, r5 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - adc r9, r0, #0 - subs r5, r3, r12 - sbcs r4, r7, r4 - sbcs r0, r11, lr - sbcs r6, r8, r1 - sbcs r1, r10, r2 - sbc r2, r9, #0 - ands r2, r2, #1 - movne r5, r3 - ldr r3, [sp, #52] @ 4-byte Reload - movne r4, r7 - movne r0, r11 - cmp r2, #0 - movne r6, r8 - movne r1, r10 - str r5, [r3] - str r4, [r3, #4] - str r0, [r3, #8] - str r6, [r3, #12] - str r1, [r3, #16] - add sp, sp, #100 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end68: - .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L - .cantunwind - .fnend - - .globl mcl_fp_montNF5L - .align 2 - .type mcl_fp_montNF5L,%function -mcl_fp_montNF5L: @ @mcl_fp_montNF5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #76 - sub sp, sp, #76 - str r2, [sp, #24] @ 4-byte Spill - str r0, [sp, #28] @ 4-byte Spill - ldm r2, {r4, r9, r10} - ldr r6, [r1, #4] - ldr r0, [r2, #12] - ldr r7, [r1] - ldr r5, [r1, #8] - ldr lr, [r3, #8] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #12] - str r6, [sp, #32] @ 4-byte Spill - umull r2, r8, r6, r4 - mov r11, r6 - umull r6, r12, r7, r4 - str r7, [sp, #56] @ 4-byte Spill - str r5, [sp, #48] @ 4-byte Spill - str lr, [sp, #36] @ 4-byte Spill - adds r7, r12, r2 - umull r2, r7, r5, r4 - adcs r5, r8, r2 - umlal r12, r2, r11, r4 - umull r5, r8, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - adcs r0, r7, r5 - ldr r5, [r3, #4] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #16] - str r5, [sp, #60] @ 4-byte Spill - umull r1, r7, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - adcs r0, r8, r1 - ldr r1, [r3] - str r0, [sp, #16] @ 4-byte Spill - adc r0, r7, #0 - ldr r7, [r3, #-4] - str r0, [sp, #12] @ 4-byte Spill - str r1, [sp, #40] @ 4-byte Spill - mul r0, r6, r7 - str r7, [sp, #72] @ 4-byte Spill - umull r8, r7, r0, r1 - ldr r1, [r3, #12] - ldr r3, [r3, #16] - adds r6, r8, r6 - umull r4, r8, r0, r5 - str r7, [sp, #8] @ 4-byte Spill - umull r5, r7, r0, lr - ldr lr, [sp, #64] @ 4-byte Reload - adcs r6, r4, r12 - adcs r5, r5, r2 - str r1, [sp, #52] @ 4-byte Spill - umull r2, r4, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r3, [sp, #44] @ 4-byte Spill - adcs r2, r2, r1 - umull r12, r1, r0, r3 - ldr r0, [sp, #16] @ 4-byte Reload - ldr r3, [sp, #12] @ 4-byte Reload - adcs r0, r12, r0 - adc r12, r3, #0 - ldr r3, [sp, #8] @ 4-byte Reload - adds r6, r6, r3 - adcs r3, r5, r8 - ldr r8, [sp, #56] @ 4-byte Reload - adcs r2, r2, r7 - str r3, [sp, #16] @ 4-byte Spill - adcs r0, r0, r4 - umull r7, r4, r9, r11 - str r2, [sp, #12] @ 4-byte Spill - str r0, [sp, #8] @ 4-byte Spill - adc r0, r12, r1 - ldr r12, [sp, #68] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - umull r5, r1, r9, r8 - adds r7, r1, r7 - umull r2, r7, r9, r0 - adcs r4, r4, r2 - umlal r1, r2, r9, r11 - ldr r11, [sp, #44] @ 4-byte Reload - umull r4, r0, r9, r12 - adcs r4, r7, r4 - umull r7, r3, r9, lr - ldr r9, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - adc r3, r3, #0 - adds r7, r5, r6 - ldr r5, [sp, #16] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #12] @ 4-byte Reload - adcs r2, r2, r5 - ldr r5, [sp, #8] @ 4-byte Reload - adcs r6, r4, r5 - ldr r4, [sp, #4] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r3, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r5, r7, r0 - ldr r0, [sp, #40] @ 4-byte Reload - umull r4, r3, r5, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adds r7, r4, r7 - ldr r4, [sp, #52] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - umull r7, r3, r5, r0 - adcs r1, r7, r1 - umull r7, r0, r5, r9 - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp, #8] @ 4-byte Reload - str r0, [sp] @ 4-byte Spill - adcs r2, r7, r2 - umull r7, r0, r5, r4 - adcs r6, r7, r6 - umull r7, r4, r5, r11 - ldr r5, [sp, #16] @ 4-byte Reload - adcs r7, r7, r5 - ldr r5, [sp, #12] @ 4-byte Reload - adc r5, r5, #0 - adds r1, r1, r3 - ldr r3, [sp, #48] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp] @ 4-byte Reload - adcs r1, r6, r1 - adcs r0, r7, r0 - ldr r7, [sp, #32] @ 4-byte Reload - str r1, [sp, #8] @ 4-byte Spill - adc r11, r5, r4 - str r0, [sp, #4] @ 4-byte Spill - umull r4, r0, r10, r8 - ldr r8, [sp, #60] @ 4-byte Reload - umull r6, r5, r10, r7 - adds r6, r0, r6 - umull r1, r6, r10, r3 - adcs r5, r5, r1 - umlal r0, r1, r10, r7 - umull r5, r2, r10, r12 - adcs r12, r6, r5 - umull r6, r5, r10, lr - mov lr, r7 - adcs r2, r2, r6 - ldr r6, [sp, #16] @ 4-byte Reload - adc r5, r5, #0 - adds r6, r4, r6 - ldr r4, [sp, #12] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #8] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #4] @ 4-byte Reload - adcs r10, r12, r4 - adcs r2, r2, r11 - ldr r11, [sp, #40] @ 4-byte Reload - str r2, [sp, #8] @ 4-byte Spill - adc r2, r5, #0 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #72] @ 4-byte Reload - mul r7, r6, r2 - umull r4, r2, r7, r11 - adds r6, r4, r6 - str r2, [sp, #12] @ 4-byte Spill - umull r6, r2, r7, r8 - str r2, [sp, #4] @ 4-byte Spill - adcs r0, r6, r0 - umull r6, r2, r7, r9 - ldr r9, [sp, #52] @ 4-byte Reload - adcs r1, r6, r1 - str r2, [sp] @ 4-byte Spill - ldr r2, [sp, #8] @ 4-byte Reload - umull r6, r12, r7, r9 - adcs r5, r6, r10 - ldr r10, [sp, #44] @ 4-byte Reload - umull r6, r4, r7, r10 - adcs r7, r6, r2 - ldr r6, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - adc r6, r6, #0 - adds r0, r0, r2 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #8] @ 4-byte Spill - adcs r0, r7, r12 - ldr r7, [sp, #20] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - adc r0, r6, r4 - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - umull r1, r5, r7, r3 - mov r6, r1 - umull r4, r2, r7, r0 - mov r0, lr - mov r12, r2 - umull r3, lr, r7, r0 - umlal r12, r6, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adds r2, r2, r3 - adcs r1, lr, r1 - umull r1, r2, r7, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r5, r1 - umull r3, r5, r7, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r2, r2, r3 - adc r3, r5, #0 - ldr r5, [sp, #8] @ 4-byte Reload - adds r7, r4, r0 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r12, r0 - adcs r6, r6, r5 - ldr r5, [sp, #4] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [sp, #20] @ 4-byte Spill - adc r2, r3, #0 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #72] @ 4-byte Reload - mul r5, r7, r2 - ldr r2, [sp, #36] @ 4-byte Reload - umull r4, lr, r5, r11 - adds r7, r4, r7 - umull r7, r12, r5, r8 - adcs r0, r7, r0 - umull r7, r3, r5, r2 - adcs r6, r7, r6 - umull r7, r2, r5, r9 - adcs r1, r7, r1 - umull r7, r4, r5, r10 - ldr r5, [sp, #20] @ 4-byte Reload - adcs r7, r7, r5 - ldr r5, [sp, #16] @ 4-byte Reload - adc r5, r5, #0 - adds r0, r0, lr - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r10, r6, r12 - adcs lr, r1, r3 - adcs r8, r7, r2 - adc r9, r5, r4 - ldr r4, [sp, #32] @ 4-byte Reload - ldr r7, [r0, #16] - ldr r0, [sp, #48] @ 4-byte Reload - umull r3, r11, r7, r0 - ldr r0, [sp, #56] @ 4-byte Reload - mov r5, r3 - umull r12, r2, r7, r0 - umull r6, r0, r7, r4 - mov r1, r2 - adds r2, r2, r6 - ldr r6, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [sp, #68] @ 4-byte Reload - umlal r1, r5, r7, r4 - umull r0, r2, r7, r3 - umull r3, r4, r7, r6 - adcs r0, r11, r0 - adcs r2, r2, r3 - adc r3, r4, #0 - ldr r4, [sp, #20] @ 4-byte Reload - adds r7, r12, r4 - ldr r12, [sp, #60] @ 4-byte Reload - adcs r1, r1, r10 - adcs r6, r5, lr - adcs r11, r0, r8 - ldr r8, [sp, #40] @ 4-byte Reload - adcs r0, r2, r9 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - adc r0, r3, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r5, r7, r0 - umull r4, r0, r5, r8 - umull r3, lr, r5, r12 - adds r7, r4, r7 - ldr r4, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - adcs r1, r3, r1 - ldr r9, [sp, #72] @ 4-byte Reload - umull r7, r0, r5, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r3, r7, r6 - umull r6, r10, r5, r2 - adcs r7, r6, r11 - umull r6, r11, r5, r0 - ldr r5, [sp, #68] @ 4-byte Reload - adcs r6, r6, r5 - ldr r5, [sp, #64] @ 4-byte Reload - adc r5, r5, #0 - adds r1, r1, r9 - adcs lr, r3, lr - ldr r3, [sp, #56] @ 4-byte Reload - adcs r9, r7, r3 - adcs r10, r6, r10 - adc r11, r5, r11 - subs r6, r1, r8 - sbcs r5, lr, r12 - sbcs r4, r9, r4 - sbcs r7, r10, r2 - sbc r3, r11, r0 - asr r0, r3, #31 - cmp r0, #0 - movlt r6, r1 - ldr r1, [sp, #28] @ 4-byte Reload - movlt r5, lr - movlt r4, r9 - cmp r0, #0 - movlt r7, r10 - movlt r3, r11 - str r6, [r1] - str r5, [r1, #4] - str r4, [r1, #8] - str r7, [r1, #12] - str r3, [r1, #16] - add sp, sp, #76 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end69: - .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L - .cantunwind - .fnend - - .globl mcl_fp_montRed5L - .align 2 - .type mcl_fp_montRed5L,%function -mcl_fp_montRed5L: @ @mcl_fp_montRed5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #84 - sub sp, sp, #84 - ldr r6, [r1, #4] - ldr r9, [r2, #-4] - ldr r4, [r1] - ldr r8, [r2, #8] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r2] - ldr r10, [r2, #4] - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [r1, #8] - mul r5, r4, r9 - str r4, [sp, #24] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - str r9, [sp, #64] @ 4-byte Spill - str r8, [sp, #68] @ 4-byte Spill - umull lr, r4, r5, r8 - str r4, [sp, #40] @ 4-byte Spill - umull r4, r3, r5, r0 - mov r12, lr - str r4, [sp, #44] @ 4-byte Spill - ldr r4, [r2, #16] - ldr r2, [r2, #12] - mov r0, r3 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [r1, #12] - umlal r0, r12, r5, r10 - str r4, [sp, #76] @ 4-byte Spill - str r2, [sp, #80] @ 4-byte Spill - str r6, [sp, #52] @ 4-byte Spill - umull r7, r6, r5, r4 - str r6, [sp, #28] @ 4-byte Spill - umull r4, r6, r5, r2 - umull r11, r2, r5, r10 - str r7, [sp, #32] @ 4-byte Spill - adds r3, r3, r11 - ldr r11, [r1, #36] - adcs r2, r2, lr - ldr r3, [sp, #24] @ 4-byte Reload - add lr, r1, #16 - ldr r2, [sp, #40] @ 4-byte Reload - adcs r2, r2, r4 - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - adcs r2, r6, r2 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adc r2, r2, #0 - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - adds r5, r3, r2 - ldr r2, [sp, #48] @ 4-byte Reload - ldr r3, [sp, #72] @ 4-byte Reload - adcs r2, r2, r0 - mul r0, r2, r9 - str r2, [sp, #48] @ 4-byte Spill - ldr r9, [r1, #28] - umull r6, r2, r0, r8 - str r2, [sp, #40] @ 4-byte Spill - umull r2, r4, r0, r3 - mov r5, r6 - mov r8, r6 - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #32] - mov r7, r4 - umlal r7, r5, r0, r10 - str r2, [sp, #24] @ 4-byte Spill - ldm lr, {r1, r2, lr} - ldr r6, [sp, #56] @ 4-byte Reload - adcs r3, r6, r12 - ldr r6, [sp, #52] @ 4-byte Reload - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [sp, #36] @ 4-byte Reload - adcs r6, r6, r3 - ldr r3, [sp, #32] @ 4-byte Reload - str r6, [sp, #56] @ 4-byte Spill - adcs r1, r1, r3 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #36] @ 4-byte Spill - adcs r1, lr, #0 - ldr lr, [sp, #76] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - adcs r1, r9, #0 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adcs r1, r1, #0 - str r1, [sp, #24] @ 4-byte Spill - adcs r1, r11, #0 - umull r6, r11, r0, lr - str r1, [sp, #20] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - umull r2, r3, r0, r1 - umull r9, r1, r0, r10 - adds r0, r4, r9 - adcs r0, r1, r8 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r0, [sp, #40] @ 4-byte Reload - adcs r9, r0, r2 - ldr r2, [sp, #64] @ 4-byte Reload - adcs r0, r3, r6 - ldr r6, [sp, #72] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r11, r11, #0 - adds r3, r1, r0 - ldr r0, [sp, #12] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r7, r0 - mul r7, r0, r2 - str r0, [sp, #12] @ 4-byte Spill - umull r8, r0, r7, r1 - str r0, [sp, #4] @ 4-byte Spill - umull r3, r0, r7, r6 - mov r12, r8 - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [sp, #56] @ 4-byte Reload - mov r4, r0 - umlal r4, r12, r7, r10 - adcs r3, r5, r3 - ldr r5, [sp, #40] @ 4-byte Reload - str r3, [sp] @ 4-byte Spill - ldr r3, [sp, #52] @ 4-byte Reload - adcs r3, r9, r3 - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [sp, #36] @ 4-byte Reload - adcs r3, r5, r3 - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [sp, #32] @ 4-byte Reload - adcs r3, r11, r3 - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [sp, #28] @ 4-byte Reload - adcs r3, r3, #0 - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [sp, #24] @ 4-byte Reload - adcs r3, r3, #0 - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [sp, #20] @ 4-byte Reload - adcs r3, r3, #0 - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [sp, #16] @ 4-byte Reload - adc r3, r3, #0 - str r3, [sp, #32] @ 4-byte Spill - umull r5, r3, r7, lr - ldr lr, [sp, #80] @ 4-byte Reload - str r3, [sp, #28] @ 4-byte Spill - umull r9, r3, r7, r10 - str r5, [sp, #24] @ 4-byte Spill - adds r0, r0, r9 - adcs r0, r3, r8 - ldr r3, [sp, #8] @ 4-byte Reload - ldr r0, [sp, #4] @ 4-byte Reload - umull r5, r11, r7, lr - adcs r9, r0, r5 - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r8, r0, #0 - ldr r0, [sp, #12] @ 4-byte Reload - adds r3, r3, r0 - ldr r0, [sp] @ 4-byte Reload - adcs r11, r4, r0 - mul r7, r11, r2 - ldr r2, [sp, #20] @ 4-byte Reload - umull r3, r0, r7, r1 - str r0, [sp, #24] @ 4-byte Spill - umull r1, r0, r7, r6 - mov r5, r3 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - mov r4, r0 - umlal r4, r5, r7, r10 - adcs r1, r12, r1 - umull r12, r6, r7, lr - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #76] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r8, r1 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, #0 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, #0 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #32] @ 4-byte Spill - umull r9, r1, r7, r2 - str r1, [sp, #20] @ 4-byte Spill - umull r8, r1, r7, r10 - adds r0, r0, r8 - ldr r8, [sp, #72] @ 4-byte Reload - adcs r0, r1, r3 - ldr r3, [sp, #20] @ 4-byte Reload - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r12 - adcs r1, r6, r9 - adc r7, r3, #0 - ldr r3, [sp, #28] @ 4-byte Reload - adds r3, r3, r11 - ldr r3, [sp, #56] @ 4-byte Reload - adcs r12, r4, r3 - ldr r3, [sp, #52] @ 4-byte Reload - adcs r3, r5, r3 - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #68] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - mul r4, r12, r0 - umull r0, r1, r4, r10 - umull r11, r5, r4, r8 - adds r0, r5, r0 - umull r6, r0, r4, r7 - adcs r1, r1, r6 - umlal r5, r6, r4, r10 - umull r1, r3, r4, lr - adcs r9, r0, r1 - umull r1, r0, r4, r2 - adcs r1, r3, r1 - ldr r3, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r2, r11, r12 - ldr r2, [sp, #56] @ 4-byte Reload - adcs r2, r5, r2 - adcs r3, r6, r3 - ldr r6, [sp, #48] @ 4-byte Reload - adcs lr, r9, r6 - ldr r6, [sp, #44] @ 4-byte Reload - adcs r9, r1, r6 - ldr r1, [sp, #40] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #80] @ 4-byte Reload - adc r12, r0, #0 - subs r5, r2, r8 - sbcs r4, r3, r10 - sbcs r0, lr, r7 - sbcs r6, r9, r1 - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r1, r11, r1 - sbc r7, r12, #0 - ands r7, r7, #1 - movne r5, r2 - ldr r2, [sp, #60] @ 4-byte Reload - movne r4, r3 - movne r0, lr - cmp r7, #0 - movne r6, r9 - movne r1, r11 - str r5, [r2] - str r4, [r2, #4] - str r0, [r2, #8] - str r6, [r2, #12] - str r1, [r2, #16] - add sp, sp, #84 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end70: - .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L - .cantunwind - .fnend - - .globl mcl_fp_addPre5L - .align 2 - .type mcl_fp_addPre5L,%function -mcl_fp_addPre5L: @ @mcl_fp_addPre5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldm r2, {r3, r12, lr} - ldr r4, [r2, #12] - ldr r8, [r2, #16] - ldm r1, {r5, r6, r7} - ldr r2, [r1, #12] - ldr r1, [r1, #16] - adds r3, r3, r5 - adcs r6, r12, r6 - adcs r7, lr, r7 - adcs r2, r4, r2 - stm r0, {r3, r6, r7} - adcs r1, r8, r1 - str r2, [r0, #12] - str r1, [r0, #16] - mov r0, #0 - adc r0, r0, #0 - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end71: - .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L - .cantunwind - .fnend - - .globl mcl_fp_subPre5L - .align 2 - .type mcl_fp_subPre5L,%function -mcl_fp_subPre5L: @ @mcl_fp_subPre5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldm r2, {r3, r12, lr} - ldr r4, [r2, #12] - ldr r8, [r2, #16] - ldm r1, {r5, r6, r7} - ldr r2, [r1, #12] - ldr r1, [r1, #16] - subs r3, r5, r3 - sbcs r6, r6, r12 - sbcs r7, r7, lr - sbcs r2, r2, r4 - stm r0, {r3, r6, r7} - sbcs r1, r1, r8 - str r2, [r0, #12] - str r1, [r0, #16] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end72: - .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L - .cantunwind - .fnend - - .globl mcl_fp_shr1_5L - .align 2 - .type mcl_fp_shr1_5L,%function -mcl_fp_shr1_5L: @ @mcl_fp_shr1_5L - .fnstart -@ BB#0: - .save {r4, lr} - push {r4, lr} - ldr r3, [r1, #4] - ldr r12, [r1] - ldr lr, [r1, #12] - ldr r2, [r1, #8] - ldr r1, [r1, #16] - lsrs r4, r3, #1 - lsr r3, r3, #1 - rrx r12, r12 - lsrs r4, lr, #1 - orr r3, r3, r2, lsl #31 - lsr r4, lr, #1 - rrx r2, r2 - str r12, [r0] - str r3, [r0, #4] - orr r4, r4, r1, lsl #31 - lsr r1, r1, #1 - str r2, [r0, #8] - str r4, [r0, #12] - str r1, [r0, #16] - pop {r4, lr} - mov pc, lr -.Lfunc_end73: - .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L - .cantunwind - .fnend - - .globl mcl_fp_add5L - .align 2 - .type mcl_fp_add5L,%function -mcl_fp_add5L: @ @mcl_fp_add5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldm r2, {r12, lr} - ldr r9, [r2, #8] - ldr r5, [r2, #12] - ldr r8, [r2, #16] - ldm r1, {r6, r7} - ldr r2, [r1, #8] - ldr r4, [r1, #12] - ldr r1, [r1, #16] - adds r6, r12, r6 - adcs r7, lr, r7 - adcs r2, r9, r2 - stm r0, {r6, r7} - adcs r5, r5, r4 - mov r4, #0 - str r2, [r0, #8] - adcs r1, r8, r1 - str r5, [r0, #12] - str r1, [r0, #16] - adc r8, r4, #0 - ldm r3, {r4, r12, lr} - ldr r9, [r3, #12] - ldr r3, [r3, #16] - subs r6, r6, r4 - sbcs r7, r7, r12 - sbcs r2, r2, lr - sbcs r12, r5, r9 - sbcs lr, r1, r3 - sbc r1, r8, #0 - tst r1, #1 - stmeq r0!, {r6, r7} - stmeq r0, {r2, r12, lr} - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end74: - .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L - .cantunwind - .fnend - - .globl mcl_fp_addNF5L - .align 2 - .type mcl_fp_addNF5L,%function -mcl_fp_addNF5L: @ @mcl_fp_addNF5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldm r1, {r12, lr} - ldr r9, [r1, #8] - ldr r5, [r1, #12] - ldr r8, [r1, #16] - ldm r2, {r6, r7} - ldr r1, [r2, #8] - ldr r4, [r2, #12] - ldr r2, [r2, #16] - adds r6, r6, r12 - adcs r10, r7, lr - adcs r9, r1, r9 - adcs lr, r4, r5 - ldr r4, [r3] - adc r12, r2, r8 - ldmib r3, {r2, r5} - ldr r1, [r3, #12] - ldr r3, [r3, #16] - subs r4, r6, r4 - sbcs r2, r10, r2 - sbcs r5, r9, r5 - sbcs r1, lr, r1 - sbc r3, r12, r3 - asr r7, r3, #31 - cmp r7, #0 - movlt r4, r6 - movlt r2, r10 - movlt r5, r9 - cmp r7, #0 - movlt r1, lr - movlt r3, r12 - str r4, [r0] - str r2, [r0, #4] - str r5, [r0, #8] - str r1, [r0, #12] - str r3, [r0, #16] - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end75: - .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L - .cantunwind - .fnend - - .globl mcl_fp_sub5L - .align 2 - .type mcl_fp_sub5L,%function -mcl_fp_sub5L: @ @mcl_fp_sub5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldm r2, {r8, r12, lr} - ldr r9, [r2, #12] - ldr r6, [r2, #16] - ldm r1, {r2, r7} - ldr r4, [r1, #8] - ldr r5, [r1, #12] - ldr r1, [r1, #16] - subs r8, r2, r8 - sbcs r2, r7, r12 - str r8, [r0] - sbcs r12, r4, lr - sbcs lr, r5, r9 - sbcs r4, r1, r6 - mov r1, #0 - stmib r0, {r2, r12, lr} - sbc r1, r1, #0 - str r4, [r0, #16] - tst r1, #1 - popeq {r4, r5, r6, r7, r8, r9, r11, lr} - moveq pc, lr - ldm r3, {r1, r5, r6, r7} - ldr r3, [r3, #16] - adds r1, r1, r8 - adcs r2, r5, r2 - adcs r6, r6, r12 - adcs r7, r7, lr - adc r3, r3, r4 - stm r0, {r1, r2, r6, r7} - str r3, [r0, #16] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end76: - .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L - .cantunwind - .fnend - - .globl mcl_fp_subNF5L - .align 2 - .type mcl_fp_subNF5L,%function -mcl_fp_subNF5L: @ @mcl_fp_subNF5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r2, {r12, lr} - ldr r9, [r2, #8] - ldr r5, [r2, #12] - ldr r8, [r2, #16] - ldm r1, {r6, r7} - ldr r2, [r1, #8] - ldr r4, [r1, #12] - ldr r1, [r1, #16] - subs r11, r6, r12 - sbcs r10, r7, lr - sbcs lr, r2, r9 - add r9, r3, #8 - sbcs r12, r4, r5 - ldm r3, {r4, r5} - sbc r1, r1, r8 - ldm r9, {r2, r8, r9} - asr r6, r1, #31 - adds r4, r11, r4 - adcs r5, r10, r5 - adcs r2, lr, r2 - adcs r3, r12, r8 - adc r7, r1, r9 - cmp r6, #0 - movge r4, r11 - movge r5, r10 - movge r2, lr - cmp r6, #0 - movge r3, r12 - movge r7, r1 - str r4, [r0] - str r5, [r0, #4] - str r2, [r0, #8] - str r3, [r0, #12] - str r7, [r0, #16] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end77: - .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L - .cantunwind - .fnend - - .globl mcl_fpDbl_add5L - .align 2 - .type mcl_fpDbl_add5L,%function -mcl_fpDbl_add5L: @ @mcl_fpDbl_add5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldr r12, [r1] - ldr r9, [r1, #4] - ldr r8, [r1, #8] - ldr r10, [r1, #12] - ldmib r2, {r6, r7} - ldr r5, [r2, #16] - ldr r11, [r2] - ldr r4, [r2, #12] - str r5, [sp] @ 4-byte Spill - ldr r5, [r2, #20] - adds lr, r11, r12 - ldr r11, [r2, #32] - add r12, r1, #16 - adcs r6, r6, r9 - add r9, r1, #28 - adcs r7, r7, r8 - str r5, [sp, #4] @ 4-byte Spill - ldr r5, [r2, #24] - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [r2, #28] - ldr r2, [r2, #36] - str r5, [sp, #16] @ 4-byte Spill - str r2, [sp, #8] @ 4-byte Spill - adcs r5, r4, r10 - ldm r9, {r4, r8, r9} - ldm r12, {r1, r2, r12} - str lr, [r0] - stmib r0, {r6, r7} - ldr r7, [sp] @ 4-byte Reload - str r5, [r0, #12] - adcs r1, r7, r1 - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #12] @ 4-byte Reload - adcs r2, r7, r2 - mov r7, #0 - adcs r12, r1, r12 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r10, r1, r4 - ldr r1, [sp, #8] @ 4-byte Reload - adcs r8, r11, r8 - adcs lr, r1, r9 - adc r1, r7, #0 - ldr r7, [r3] - ldmib r3, {r4, r5, r6} - ldr r3, [r3, #16] - subs r7, r2, r7 - sbcs r4, r12, r4 - sbcs r5, r10, r5 - sbcs r6, r8, r6 - sbcs r3, lr, r3 - sbc r1, r1, #0 - ands r1, r1, #1 - movne r7, r2 - movne r4, r12 - movne r5, r10 - cmp r1, #0 - movne r6, r8 - movne r3, lr - str r7, [r0, #20] - str r4, [r0, #24] - str r5, [r0, #28] - str r6, [r0, #32] - str r3, [r0, #36] - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end78: - .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub5L - .align 2 - .type mcl_fpDbl_sub5L,%function -mcl_fpDbl_sub5L: @ @mcl_fpDbl_sub5L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #32 - sub sp, sp, #32 - ldr r7, [r2, #32] - add r8, r1, #12 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #32] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #36] - str r7, [sp, #20] @ 4-byte Spill - ldmib r2, {r9, r10, r11} - ldr r7, [r2, #16] - str r7, [sp] @ 4-byte Spill - ldr r7, [r2, #20] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r2, #28] - ldr r2, [r2] - str r7, [sp, #12] @ 4-byte Spill - ldm r8, {r4, r5, r6, r7, r8} - ldm r1, {r1, r12, lr} - subs r1, r1, r2 - sbcs r2, r12, r9 - stm r0, {r1, r2} - sbcs r1, lr, r10 - str r1, [r0, #8] - sbcs r1, r4, r11 - ldr r2, [sp, #4] @ 4-byte Reload - str r1, [r0, #12] - ldr r1, [sp] @ 4-byte Reload - sbcs r1, r5, r1 - ldr r5, [sp, #16] @ 4-byte Reload - sbcs r2, r6, r2 - ldr r6, [sp, #8] @ 4-byte Reload - str r1, [r0, #16] - mov r1, #0 - sbcs r7, r7, r6 - ldr r6, [sp, #12] @ 4-byte Reload - sbcs r9, r8, r6 - ldr r6, [sp, #24] @ 4-byte Reload - sbcs r8, r5, r6 - ldr r6, [sp, #28] @ 4-byte Reload - ldr r5, [sp, #20] @ 4-byte Reload - sbcs lr, r5, r6 - sbc r12, r1, #0 - ldm r3, {r1, r4, r5, r6} - ldr r3, [r3, #16] - adds r1, r2, r1 - adcs r4, r7, r4 - adcs r5, r9, r5 - adcs r6, r8, r6 - adc r3, lr, r3 - ands r12, r12, #1 - moveq r1, r2 - moveq r4, r7 - moveq r5, r9 - cmp r12, #0 - moveq r6, r8 - moveq r3, lr - str r1, [r0, #20] - str r4, [r0, #24] - str r5, [r0, #28] - str r6, [r0, #32] - str r3, [r0, #36] - add sp, sp, #32 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end79: - .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre6L - .align 2 - .type mcl_fp_mulUnitPre6L,%function -mcl_fp_mulUnitPre6L: @ @mcl_fp_mulUnitPre6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r11, [r1, #12] - ldr r9, [r1, #16] - ldr r8, [r1, #20] - umull r4, r10, lr, r2 - umull r1, r7, r12, r2 - mov r5, r7 - mov r6, r4 - umlal r5, r6, r3, r2 - stm r0, {r1, r5, r6} - umull r5, r6, r3, r2 - umull r1, r12, r11, r2 - adds r3, r7, r5 - adcs r3, r6, r4 - adcs r1, r10, r1 - str r1, [r0, #12] - umull r1, r3, r9, r2 - adcs r1, r12, r1 - str r1, [r0, #16] - umull r1, r7, r8, r2 - adcs r1, r3, r1 - str r1, [r0, #20] - adc r1, r7, #0 - str r1, [r0, #24] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end80: - .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre6L - .align 2 - .type mcl_fpDbl_mulPre6L,%function -mcl_fpDbl_mulPre6L: @ @mcl_fpDbl_mulPre6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #48 - sub sp, sp, #48 - str r2, [sp, #44] @ 4-byte Spill - ldr r3, [r2] - ldm r1, {r12, lr} - ldr r2, [r1, #8] - mov r8, r0 - ldr r10, [r1, #12] - umull r0, r4, r12, r3 - umull r6, r7, lr, r3 - str r2, [sp, #24] @ 4-byte Spill - adds r6, r4, r6 - str r0, [sp, #32] @ 4-byte Spill - umull r5, r6, r2, r3 - adcs r7, r7, r5 - umlal r4, r5, lr, r3 - umull r7, r11, r10, r3 - adcs r0, r6, r7 - ldr r7, [r1, #16] - str r0, [sp, #40] @ 4-byte Spill - umull r6, r0, r7, r3 - adcs r2, r11, r6 - ldr r6, [r1, #20] - str r2, [sp, #36] @ 4-byte Spill - umull r11, r2, r6, r3 - adcs r0, r0, r11 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r2, r2, #0 - str r2, [sp, #12] @ 4-byte Spill - str r0, [r8] - ldr r0, [sp, #44] @ 4-byte Reload - ldr r3, [r0, #4] - umull r11, r9, r12, r3 - adds r2, r11, r4 - umull r4, r11, lr, r3 - str r9, [sp, #28] @ 4-byte Spill - adcs lr, r4, r5 - ldr r5, [sp, #24] @ 4-byte Reload - str r2, [sp, #32] @ 4-byte Spill - umull r4, r2, r10, r3 - str r2, [sp, #20] @ 4-byte Spill - umull r2, r10, r5, r3 - ldr r5, [sp, #40] @ 4-byte Reload - adcs r2, r2, r5 - ldr r5, [sp, #36] @ 4-byte Reload - adcs r4, r4, r5 - umull r5, r9, r7, r3 - ldr r7, [sp, #16] @ 4-byte Reload - adcs r5, r5, r7 - umull r7, r12, r6, r3 - ldr r3, [sp, #12] @ 4-byte Reload - adcs r7, r7, r3 - mov r3, #0 - adc r6, r3, #0 - ldr r3, [sp, #28] @ 4-byte Reload - adds r3, lr, r3 - adcs r2, r2, r11 - adcs lr, r4, r10 - ldr r4, [sp, #20] @ 4-byte Reload - adcs r10, r5, r4 - ldr r4, [r1, #8] - adcs r11, r7, r9 - ldr r9, [r1, #4] - adc r7, r6, r12 - ldr r6, [r0, #8] - ldr r0, [r1] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #32] @ 4-byte Reload - str r9, [sp, #8] @ 4-byte Spill - umull r12, r5, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - adds r0, r12, r3 - str r7, [r8, #4] - ldr r7, [r1, #12] - ldr r12, [r1, #20] - str r5, [sp, #28] @ 4-byte Spill - str r0, [sp, #32] @ 4-byte Spill - umull r3, r0, r9, r6 - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r3, r2 - str r0, [sp, #12] @ 4-byte Spill - umull r3, r0, r4, r6 - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r3, lr - ldr lr, [r1, #16] - ldr r9, [sp, #12] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - umull r2, r0, r7, r6 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r2, r2, r10 - umull r10, r5, lr, r6 - adcs r10, r10, r11 - umull r11, r3, r12, r6 - adcs r6, r11, r0 - mov r0, #0 - adc r11, r0, #0 - ldr r0, [sp, #28] @ 4-byte Reload - adds r0, r9, r0 - ldr r9, [sp, #4] @ 4-byte Reload - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r9, r2, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r10, r10, r0 - adcs r0, r6, r5 - ldr r5, [sp, #8] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - adc r0, r11, r3 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - str r0, [r8, #8] - ldr r0, [sp, #44] @ 4-byte Reload - ldr r6, [r0, #12] - umull r11, r3, r7, r6 - str r3, [sp, #36] @ 4-byte Spill - umull r7, r3, r4, r6 - str r3, [sp, #32] @ 4-byte Spill - umull r4, r3, r5, r6 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp, #40] @ 4-byte Reload - umull r5, r2, r3, r6 - ldr r3, [sp] @ 4-byte Reload - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adds r3, r5, r3 - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [sp, #12] @ 4-byte Reload - adcs r4, r4, r3 - ldr r3, [sp, #24] @ 4-byte Reload - adcs r7, r7, r9 - adcs r9, r11, r10 - umull r5, r11, lr, r6 - adcs r3, r5, r3 - umull r5, r10, r12, r6 - mov r6, #0 - adcs r2, r5, r2 - adc r5, r6, #0 - ldr r6, [sp, #16] @ 4-byte Reload - adds r12, r4, r6 - ldr r4, [sp, #20] @ 4-byte Reload - adcs lr, r7, r4 - ldr r4, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #36] @ 4-byte Reload - adcs r9, r9, r4 - adcs r3, r3, r7 - adcs r2, r2, r11 - str r3, [sp, #20] @ 4-byte Spill - str r2, [sp, #28] @ 4-byte Spill - adc r2, r5, r10 - ldr r5, [r0, #16] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - str r2, [r8, #12] - ldr r2, [r1] - str r2, [sp, #40] @ 4-byte Spill - ldmib r1, {r0, r6} - umull r7, r4, r2, r5 - ldr r3, [r1, #12] - adds r2, r7, r12 - str r4, [sp, #24] @ 4-byte Spill - str r2, [sp, #32] @ 4-byte Spill - umull r7, r2, r0, r5 - str r2, [sp, #16] @ 4-byte Spill - adcs r2, r7, lr - str r2, [sp, #4] @ 4-byte Spill - umull r4, r2, r6, r5 - str r2, [sp, #12] @ 4-byte Spill - adcs r2, r4, r9 - ldr r4, [sp, #28] @ 4-byte Reload - ldr r9, [sp, #4] @ 4-byte Reload - str r2, [sp] @ 4-byte Spill - umull r7, r2, r3, r5 - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - adcs r7, r7, r2 - ldr r2, [r1, #16] - ldr r1, [r1, #20] - umull r10, lr, r2, r5 - umull r11, r12, r1, r5 - adcs r10, r10, r4 - ldr r4, [sp, #36] @ 4-byte Reload - adcs r5, r11, r4 - mov r4, #0 - adc r11, r4, #0 - ldr r4, [sp, #24] @ 4-byte Reload - adds r4, r9, r4 - ldr r9, [sp] @ 4-byte Reload - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [sp, #16] @ 4-byte Reload - adcs r4, r9, r4 - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [sp, #12] @ 4-byte Reload - adcs r4, r7, r4 - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [sp, #8] @ 4-byte Reload - adcs r10, r10, r4 - adcs lr, r5, lr - ldr r5, [sp, #44] @ 4-byte Reload - adc r7, r11, r12 - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [sp, #32] @ 4-byte Reload - ldr r5, [r5, #20] - str r7, [r8, #16] - umull r11, r7, r3, r5 - str r7, [sp, #44] @ 4-byte Spill - umull r3, r7, r6, r5 - umull r6, r12, r0, r5 - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #40] @ 4-byte Reload - umull r4, r0, r7, r5 - ldr r7, [sp, #4] @ 4-byte Reload - adds r9, r4, r7 - ldr r4, [sp, #24] @ 4-byte Reload - str r9, [r8, #20] - adcs r6, r6, r4 - ldr r4, [sp, #20] @ 4-byte Reload - adcs r3, r3, r4 - adcs r7, r11, r10 - umull r4, r10, r2, r5 - adcs r2, r4, lr - umull r4, lr, r1, r5 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r4, r1 - mov r4, #0 - adc r4, r4, #0 - adds r5, r6, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r3, r3, r12 - str r5, [r8, #24] - str r3, [r8, #28] - adcs r3, r7, r0 - ldr r0, [sp, #44] @ 4-byte Reload - str r3, [r8, #32] - adcs r2, r2, r0 - adcs r1, r1, r10 - str r2, [r8, #36] - str r1, [r8, #40] - adc r1, r4, lr - str r1, [r8, #44] - add sp, sp, #48 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end81: - .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre6L - .align 2 - .type mcl_fpDbl_sqrPre6L,%function -mcl_fpDbl_sqrPre6L: @ @mcl_fpDbl_sqrPre6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - ldm r1, {r2, r3} - ldr r7, [r1, #12] - mov lr, r0 - ldr r0, [r1, #8] - ldr r9, [r1, #16] - ldr r12, [r1, #20] - umull r10, r6, r7, r2 - str r0, [sp, #48] @ 4-byte Spill - umull r4, r8, r0, r2 - umull r5, r0, r2, r2 - str r7, [sp, #44] @ 4-byte Spill - str r6, [sp, #36] @ 4-byte Spill - umull r6, r7, r3, r2 - str r5, [sp, #24] @ 4-byte Spill - adds r11, r0, r6 - ldr r5, [sp, #36] @ 4-byte Reload - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r7, r4 - umlal r0, r4, r3, r2 - adcs r7, r8, r10 - str r7, [sp, #40] @ 4-byte Spill - umull r7, r10, r9, r2 - adcs r7, r5, r7 - str r7, [sp, #32] @ 4-byte Spill - umull r7, r8, r12, r2 - adcs r11, r10, r7 - adc r2, r8, #0 - adds r0, r6, r0 - umull r6, r10, r3, r3 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r4, r6, r4 - str r0, [lr] - umull r6, r0, r12, r3 - str r0, [sp, #36] @ 4-byte Spill - umull r5, r0, r9, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - umull r9, r12, r0, r3 - ldr r0, [sp, #48] @ 4-byte Reload - umull r7, r8, r0, r3 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r3, r7, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r9, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r5, r5, r11 - adcs r6, r6, r2 - mov r2, #0 - adc r2, r2, #0 - adds r4, r4, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r11, r3, r10 - adcs r8, r7, r8 - ldr r7, [r1, #4] - adcs r10, r5, r12 - ldr r5, [r1, #12] - str r0, [lr, #4] - ldr r0, [sp, #24] @ 4-byte Reload - str r7, [sp, #16] @ 4-byte Spill - adcs r0, r6, r0 - ldr r6, [r1, #8] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r2, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1] - umull r3, r2, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - adds r0, r3, r4 - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #16] - str r0, [sp, #52] @ 4-byte Spill - umull r3, r0, r7, r6 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r3, r11 - ldr r3, [sp, #44] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - umull r4, r0, r6, r6 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r4, r8 - umull r12, r4, r5, r6 - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r12, r10 - ldr r10, [sp, #24] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - str r0, [sp, #8] @ 4-byte Spill - umull r9, r0, r2, r6 - ldr r7, [sp, #20] @ 4-byte Reload - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r9, r9, r0 - ldr r0, [r1, #20] - umull r11, r8, r0, r6 - adcs r6, r11, r3 - mov r3, #0 - adc r11, r3, #0 - ldr r3, [sp, #36] @ 4-byte Reload - adds r3, r10, r3 - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [sp, #32] @ 4-byte Reload - adcs r3, r7, r3 - ldr r7, [sp, #8] @ 4-byte Reload - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp, #28] @ 4-byte Reload - adcs r3, r7, r3 - str r3, [sp, #28] @ 4-byte Spill - adcs r3, r9, r4 - ldr r4, [sp, #16] @ 4-byte Reload - ldr r9, [sp, #48] @ 4-byte Reload - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp] @ 4-byte Reload - ldr r7, [sp, #20] @ 4-byte Reload - adcs r3, r6, r3 - str r3, [sp, #12] @ 4-byte Spill - umull r6, r3, r0, r5 - adc r11, r11, r8 - str r3, [sp, #44] @ 4-byte Spill - umull r3, r0, r2, r5 - str r0, [sp, #36] @ 4-byte Spill - umull r2, r0, r5, r5 - str r0, [sp, #32] @ 4-byte Spill - umull r0, r10, r4, r5 - umull r4, r8, r9, r5 - ldr r5, [sp, #24] @ 4-byte Reload - adds r4, r4, r5 - ldr r5, [sp, #4] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #28] @ 4-byte Reload - adcs r5, r12, r5 - adcs r2, r2, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r3, r3, r7 - mov r7, #0 - adcs r6, r6, r11 - adc r7, r7, #0 - adds r9, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r11, r5, r10 - adcs r0, r2, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - add r3, r1, #8 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r12, r6, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r0, [lr, #8] - ldr r0, [sp, #44] @ 4-byte Reload - str r4, [lr, #12] - adc r0, r7, r0 - str r0, [sp, #12] @ 4-byte Spill - ldm r1, {r4, r6} - ldm r3, {r0, r2, r3} - ldr r1, [r1, #20] - umull r5, r7, r2, r1 - str r5, [sp, #32] @ 4-byte Spill - str r7, [sp, #52] @ 4-byte Spill - umull r5, r7, r0, r1 - str r5, [sp, #28] @ 4-byte Spill - str r7, [sp, #48] @ 4-byte Spill - umull r5, r7, r6, r1 - str r5, [sp, #24] @ 4-byte Spill - str r7, [sp, #44] @ 4-byte Spill - umull r5, r7, r4, r1 - str r5, [sp, #8] @ 4-byte Spill - str r7, [sp, #36] @ 4-byte Spill - umull r7, r5, r2, r3 - str r5, [sp, #4] @ 4-byte Spill - umull r2, r5, r0, r3 - umull r0, r10, r6, r3 - umull r6, r8, r4, r3 - adds r4, r6, r9 - str r5, [sp] @ 4-byte Spill - adcs r11, r0, r11 - ldr r0, [sp, #20] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - umull r4, r9, r3, r3 - adcs r5, r2, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r6, r7, r0 - umull r0, r2, r1, r3 - ldr r3, [sp, #12] @ 4-byte Reload - mov r7, #0 - adcs r12, r4, r12 - ldr r4, [sp] @ 4-byte Reload - adcs r3, r0, r3 - adc r7, r7, #0 - adds r8, r11, r8 - adcs r5, r5, r10 - adcs r6, r6, r4 - ldr r4, [sp, #4] @ 4-byte Reload - adcs r4, r12, r4 - adcs r3, r3, r9 - adc r10, r7, r2 - ldr r7, [sp, #8] @ 4-byte Reload - adds r12, r7, r8 - ldr r7, [sp, #24] @ 4-byte Reload - adcs r9, r7, r5 - ldr r5, [sp, #28] @ 4-byte Reload - ldr r7, [sp, #36] @ 4-byte Reload - adcs r6, r5, r6 - ldr r5, [sp, #32] @ 4-byte Reload - adcs r4, r5, r4 - adcs r0, r0, r3 - umull r3, r8, r1, r1 - adcs r1, r3, r10 - mov r3, #0 - adc r3, r3, #0 - adds r5, r9, r7 - ldr r7, [sp, #44] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #48] @ 4-byte Reload - adcs r4, r4, r7 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - adcs r1, r1, r2 - adc r2, r3, r8 - ldr r3, [sp, #40] @ 4-byte Reload - str r3, [lr, #16] - add r3, lr, #36 - str r12, [lr, #20] - str r5, [lr, #24] - str r6, [lr, #28] - str r4, [lr, #32] - stm r3, {r0, r1, r2} - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L - .cantunwind - .fnend - - .globl mcl_fp_mont6L - .align 2 - .type mcl_fp_mont6L,%function -mcl_fp_mont6L: @ @mcl_fp_mont6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #116 - sub sp, sp, #116 - str r0, [sp, #56] @ 4-byte Spill - mov r0, r2 - str r2, [sp, #60] @ 4-byte Spill - ldm r0, {r2, r6, r7} - ldr r0, [r0, #12] - ldr r5, [r3, #8] - ldr r9, [r3] - ldr r11, [r1, #8] - ldr lr, [r1, #12] - ldr r12, [r3, #4] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #4] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1] - str r5, [sp, #92] @ 4-byte Spill - str r9, [sp, #84] @ 4-byte Spill - str r11, [sp, #100] @ 4-byte Spill - str lr, [sp, #64] @ 4-byte Spill - str r12, [sp, #112] @ 4-byte Spill - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [r3, #-4] - umull r4, r8, r0, r2 - str r0, [sp, #88] @ 4-byte Spill - str r4, [sp, #44] @ 4-byte Spill - mul r0, r4, r7 - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r1, #20] - ldr r1, [r1, #16] - umull r10, r4, r0, r5 - str r4, [sp, #36] @ 4-byte Spill - umull r4, r5, r0, r9 - str r10, [sp, #16] @ 4-byte Spill - mov r9, r5 - str r5, [sp, #12] @ 4-byte Spill - str r4, [sp, #40] @ 4-byte Spill - umull r5, r4, r7, r2 - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [sp, #108] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - umlal r9, r10, r0, r12 - str r5, [sp, #72] @ 4-byte Spill - str r4, [sp, #76] @ 4-byte Spill - umull r5, r4, r1, r2 - str r4, [sp, #68] @ 4-byte Spill - umull r1, r4, lr, r2 - str r5, [sp, #28] @ 4-byte Spill - umull lr, r5, r11, r2 - str r4, [sp, #24] @ 4-byte Spill - umull r11, r4, r7, r2 - adds r7, r8, r11 - adcs r4, r4, lr - ldr r7, [r3, #12] - adcs r1, r5, r1 - ldr r4, [sp, #24] @ 4-byte Reload - ldr r5, [sp, #12] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r4, r1 - ldr r4, [sp, #68] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - str r7, [sp, #72] @ 4-byte Spill - adcs r1, r4, r1 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [r3, #20] - umull r11, r4, r0, r1 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [r3, #16] - str r4, [sp, #8] @ 4-byte Spill - umull r3, r4, r0, r12 - adds r3, r5, r3 - str r1, [sp, #68] @ 4-byte Spill - umull r5, r12, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r4, r4, r1 - umull r4, r3, r0, r7 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #40] @ 4-byte Reload - adcs r1, r0, r4 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r3, r3, r5 - adcs r4, r12, r11 - mov r12, #0 - adc r5, r0, #0 - ldr r0, [sp, #108] @ 4-byte Reload - umlal r8, lr, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adds r2, r7, r2 - adcs r2, r9, r8 - str r2, [sp, #44] @ 4-byte Spill - adcs r2, r10, lr - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #88] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r3, r1 - mov r3, r0 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adcs r1, r4, r1 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #20] @ 4-byte Reload - adcs r1, r5, r1 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adc r11, r12, #0 - umull lr, r10, r6, r1 - ldr r1, [sp, #96] @ 4-byte Reload - umull r7, r4, r6, r1 - ldr r1, [sp, #100] @ 4-byte Reload - umull r5, r12, r6, r1 - umull r1, r8, r6, r0 - umull r9, r0, r6, r2 - adds r1, r0, r1 - adcs r1, r8, r5 - ldr r8, [sp, #64] @ 4-byte Reload - umlal r0, r5, r6, r3 - ldr r3, [sp, #44] @ 4-byte Reload - umull r1, r2, r6, r8 - adcs r1, r12, r1 - adcs r2, r2, r7 - adcs r12, r4, lr - adc r4, r10, #0 - adds r7, r3, r9 - ldr r3, [sp, #40] @ 4-byte Reload - ldr r10, [sp, #68] @ 4-byte Reload - adcs r9, r3, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #112] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r11, r4 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - mul r0, r7, r1 - ldr r1, [sp, #92] @ 4-byte Reload - umull lr, r3, r0, r5 - umull r6, r12, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - umull r11, r2, r0, r1 - mov r1, r6 - mov r4, r2 - adds r2, r2, lr - umlal r4, r1, r0, r5 - ldr r5, [sp, #76] @ 4-byte Reload - adcs r3, r3, r6 - umull r2, lr, r0, r5 - ldr r5, [sp, #72] @ 4-byte Reload - umull r3, r6, r0, r5 - adcs r12, r12, r3 - umull r5, r3, r0, r10 - adcs r0, r6, r5 - adcs r2, r3, r2 - adc r3, lr, #0 - adds r7, r11, r7 - adcs r7, r4, r9 - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [sp, #44] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #108] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - umull r4, r5, r2, r8 - ldr r8, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - umull r3, r1, r2, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - mov r3, r2 - str r1, [sp, #16] @ 4-byte Spill - umull r6, r9, r2, r0 - ldr r0, [sp, #100] @ 4-byte Reload - umull r1, lr, r2, r0 - umull r11, r0, r3, r8 - umull r2, r12, r3, r7 - adds r2, r0, r2 - str r11, [sp, #12] @ 4-byte Spill - adcs r2, r12, r1 - umlal r0, r1, r3, r7 - ldr r3, [sp, #20] @ 4-byte Reload - ldr r7, [sp, #12] @ 4-byte Reload - adcs r2, lr, r4 - adcs r4, r5, r6 - ldr r6, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #16] @ 4-byte Reload - adcs r6, r9, r6 - adc r5, r5, #0 - adds r8, r3, r7 - ldr r3, [sp, #44] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - mul r0, r8, r1 - ldr r1, [sp, #92] @ 4-byte Reload - umull r2, r3, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r3, [sp, #16] @ 4-byte Spill - umull r3, r5, r0, r1 - mov r1, r2 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp, #76] @ 4-byte Reload - mov r4, r5 - umlal r4, r1, r0, r7 - umull r9, r6, r0, r3 - ldr r3, [sp, #72] @ 4-byte Reload - str r6, [sp, #12] @ 4-byte Spill - umull r6, lr, r0, r10 - umull r12, r10, r0, r3 - umull r11, r3, r0, r7 - adds r0, r5, r11 - adcs r0, r3, r2 - ldr r3, [sp, #52] @ 4-byte Reload - ldr r0, [sp, #16] @ 4-byte Reload - adcs r11, r0, r12 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r10, r10, r6 - adcs lr, lr, r9 - adc r9, r0, #0 - ldr r0, [sp, #20] @ 4-byte Reload - adds r6, r0, r8 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r8, [sp, #88] @ 4-byte Reload - umull r7, r2, r3, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #20] @ 4-byte Spill - umull r7, r2, r3, r0 - ldr r0, [sp, #100] @ 4-byte Reload - str r2, [sp, #8] @ 4-byte Spill - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [sp, #108] @ 4-byte Reload - umull r5, r2, r3, r0 - str r2, [sp] @ 4-byte Spill - umull r2, r0, r3, r8 - umull r6, r12, r3, r7 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - adcs r4, r4, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r11, r11, r1 - ldr r1, [sp, #36] @ 4-byte Reload - adcs r10, r10, r1 - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, lr, r1 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adc lr, r1, #0 - adds r6, r0, r6 - ldr r1, [sp, #4] @ 4-byte Reload - adcs r2, r12, r5 - umlal r0, r5, r3, r7 - ldr r2, [sp] @ 4-byte Reload - adcs r9, r2, r1 - ldr r1, [sp, #96] @ 4-byte Reload - umull r6, r2, r3, r1 - ldr r1, [sp, #8] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r2, r2, r1 - ldr r1, [sp, #20] @ 4-byte Reload - adc r8, r1, #0 - ldr r1, [sp, #16] @ 4-byte Reload - adds r4, r4, r1 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - adcs r0, r11, r5 - ldr r5, [sp, #112] @ 4-byte Reload - ldr r11, [sp, #76] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r10, r9 - ldr r10, [sp, #80] @ 4-byte Reload - ldr r9, [sp, #72] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, lr, r8 - ldr r8, [sp, #68] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - mul r0, r4, r10 - umull r2, r12, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - umull r3, r7, r0, r1 - mov r1, r2 - str r3, [sp, #24] @ 4-byte Spill - umull lr, r3, r0, r5 - mov r6, r7 - adds r7, r7, lr - umlal r6, r1, r0, r5 - adcs r2, r3, r2 - umull r7, lr, r0, r11 - umull r2, r3, r0, r9 - adcs r12, r12, r2 - umull r5, r2, r0, r8 - adcs r0, r3, r5 - adcs r2, r2, r7 - ldr r7, [sp, #24] @ 4-byte Reload - adc r3, lr, #0 - adds r7, r7, r4 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r7, r6, r7 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [sp, #48] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #108] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r4, [r0, #16] - ldr r0, [sp, #104] @ 4-byte Reload - umull r12, lr, r4, r0 - ldr r0, [sp, #64] @ 4-byte Reload - umull r5, r6, r4, r3 - umull r2, r8, r4, r0 - ldr r0, [sp, #88] @ 4-byte Reload - umull r7, r1, r4, r0 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [sp, #100] @ 4-byte Reload - adds r5, r1, r5 - umull r0, r5, r4, r7 - ldr r7, [sp, #96] @ 4-byte Reload - adcs r6, r6, r0 - umlal r1, r0, r4, r3 - ldr r3, [sp, #52] @ 4-byte Reload - adcs r2, r5, r2 - umull r5, r6, r4, r7 - ldr r4, [sp, #24] @ 4-byte Reload - adcs r7, r8, r5 - adcs r6, r6, r12 - adc r5, lr, #0 - adds r8, r3, r4 - ldr r3, [sp, #48] @ 4-byte Reload - adcs r1, r3, r1 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #68] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #112] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - mul r0, r8, r10 - umull r5, r12, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - umull lr, r3, r0, r6 - umull r10, r2, r0, r1 - mov r1, r5 - mov r4, r2 - adds r2, r2, lr - adcs r3, r3, r5 - umlal r4, r1, r0, r6 - umull r2, lr, r0, r11 - ldr r11, [sp, #88] @ 4-byte Reload - umull r3, r5, r0, r9 - adcs r12, r12, r3 - umull r6, r3, r0, r7 - adcs r0, r5, r6 - adcs r2, r3, r2 - adc r3, lr, #0 - adds r7, r10, r8 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r7, r4, r7 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [sp, #48] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #108] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r4, [r0, #20] - ldr r0, [sp, #104] @ 4-byte Reload - umull r9, r1, r4, r0 - ldr r0, [sp, #96] @ 4-byte Reload - umull r2, r12, r4, r3 - str r1, [sp, #60] @ 4-byte Spill - umull r7, r8, r4, r0 - ldr r0, [sp, #64] @ 4-byte Reload - umull r5, r6, r4, r0 - ldr r0, [sp, #100] @ 4-byte Reload - umull r1, lr, r4, r0 - umull r10, r0, r4, r11 - ldr r11, [sp, #92] @ 4-byte Reload - adds r2, r0, r2 - adcs r2, r12, r1 - umlal r0, r1, r4, r3 - ldr r3, [sp, #52] @ 4-byte Reload - ldr r12, [sp, #112] @ 4-byte Reload - adcs r2, lr, r5 - adcs r5, r6, r7 - ldr r6, [sp, #60] @ 4-byte Reload - adcs r7, r8, r9 - ldr r9, [sp, #68] @ 4-byte Reload - adc r6, r6, #0 - adds r8, r3, r10 - ldr r3, [sp, #48] @ 4-byte Reload - ldr r10, [sp, #84] @ 4-byte Reload - adcs lr, r3, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #72] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #76] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #88] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - mul r0, r8, r1 - umull r3, r4, r0, r10 - umull r1, r2, r0, r12 - adds r1, r4, r1 - str r3, [sp, #80] @ 4-byte Spill - umull r6, r1, r0, r11 - adcs r2, r2, r6 - umlal r4, r6, r0, r12 - umull r2, r3, r0, r5 - adcs r1, r1, r2 - str r1, [sp, #60] @ 4-byte Spill - umull r2, r1, r0, r9 - adcs r2, r3, r2 - str r2, [sp, #52] @ 4-byte Spill - umull r3, r2, r0, r7 - adcs r1, r1, r3 - ldr r3, [sp, #60] @ 4-byte Reload - adc r0, r2, #0 - ldr r2, [sp, #80] @ 4-byte Reload - adds r2, r2, r8 - ldr r2, [sp, #108] @ 4-byte Reload - adcs r12, r4, lr - adcs lr, r6, r2 - ldr r2, [sp, #104] @ 4-byte Reload - adcs r8, r3, r2 - ldr r2, [sp, #100] @ 4-byte Reload - ldr r3, [sp, #52] @ 4-byte Reload - adcs r6, r3, r2 - ldr r2, [sp, #96] @ 4-byte Reload - adcs r3, r1, r2 - ldr r1, [sp, #88] @ 4-byte Reload - adcs r2, r0, r1 - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - subs r4, r12, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r0, lr, r0 - sbcs r1, r8, r11 - mov r11, r6 - sbcs r5, r6, r5 - sbcs r6, r3, r9 - mov r9, r2 - sbcs r10, r2, r7 - ldr r2, [sp, #108] @ 4-byte Reload - sbc r7, r2, #0 - ldr r2, [sp, #56] @ 4-byte Reload - ands r7, r7, #1 - movne r4, r12 - movne r0, lr - movne r1, r8 - cmp r7, #0 - movne r5, r11 - movne r6, r3 - movne r10, r9 - str r4, [r2] - str r0, [r2, #4] - str r1, [r2, #8] - str r5, [r2, #12] - str r6, [r2, #16] - str r10, [r2, #20] - add sp, sp, #116 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end83: - .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L - .cantunwind - .fnend - - .globl mcl_fp_montNF6L - .align 2 - .type mcl_fp_montNF6L,%function -mcl_fp_montNF6L: @ @mcl_fp_montNF6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #88 - sub sp, sp, #88 - str r2, [sp, #32] @ 4-byte Spill - str r0, [sp, #28] @ 4-byte Spill - ldm r2, {r4, r12} - ldr r5, [r1, #4] - ldr r0, [r2, #12] - ldr r9, [r2, #8] - ldr r2, [r1] - ldr r7, [r1, #8] - ldr lr, [r3, #8] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #12] - str r5, [sp, #44] @ 4-byte Spill - umull r6, r8, r5, r4 - mov r10, r5 - umull r11, r5, r2, r4 - str r2, [sp, #52] @ 4-byte Spill - str r7, [sp, #48] @ 4-byte Spill - str lr, [sp, #40] @ 4-byte Spill - adds r6, r5, r6 - umull r2, r6, r7, r4 - adcs r7, r8, r2 - umlal r5, r2, r10, r4 - umull r7, r8, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r6, r7 - ldr r6, [r1, #16] - str r0, [sp, #64] @ 4-byte Spill - umull r7, r0, r6, r4 - str r6, [sp, #72] @ 4-byte Spill - ldr r6, [r3] - adcs r7, r8, r7 - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r1, #20] - str r6, [sp, #80] @ 4-byte Spill - umull r1, r8, r7, r4 - str r7, [sp, #76] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [r3, #-4] - str r0, [sp, #20] @ 4-byte Spill - adc r0, r8, #0 - ldr r8, [r3, #4] - str r0, [sp, #16] @ 4-byte Spill - mul r0, r11, r1 - str r1, [sp, #56] @ 4-byte Spill - umull r1, r7, r0, r6 - str r8, [sp, #68] @ 4-byte Spill - adds r1, r1, r11 - str r7, [sp, #12] @ 4-byte Spill - umull r1, r4, r0, r8 - adcs r8, r1, r5 - ldr r1, [r3, #12] - umull r5, r11, r0, lr - str r4, [sp, #8] @ 4-byte Spill - adcs r6, r5, r2 - str r1, [sp, #84] @ 4-byte Spill - umull r5, r7, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - adcs lr, r5, r1 - ldr r1, [r3, #16] - str r1, [sp, #64] @ 4-byte Spill - umull r5, r4, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r5, r5, r1 - ldr r1, [r3, #20] - umull r3, r2, r0, r1 - ldr r0, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r3, r0 - adc r3, r1, #0 - ldr r1, [sp, #12] @ 4-byte Reload - adds r1, r8, r1 - ldr r8, [sp, #36] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - adcs r1, r6, r1 - adcs r11, lr, r11 - str r1, [sp, #16] @ 4-byte Spill - ldr lr, [sp, #76] @ 4-byte Reload - adcs r1, r5, r7 - ldr r5, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r1, [sp, #12] @ 4-byte Spill - str r0, [sp, #8] @ 4-byte Spill - adc r0, r3, r2 - umull r3, r6, r12, r10 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - umull r7, r1, r12, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adds r3, r1, r3 - umull r2, r3, r12, r0 - adcs r6, r6, r2 - umlal r1, r2, r12, r10 - ldr r10, [sp, #68] @ 4-byte Reload - umull r6, r0, r12, r8 - adcs r4, r3, r6 - umull r6, r3, r12, r5 - adcs r5, r0, r6 - umull r6, r0, r12, lr - ldr r12, [sp, #60] @ 4-byte Reload - adcs r3, r3, r6 - ldr r6, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r7, r6 - ldr r6, [sp, #16] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #12] @ 4-byte Reload - adcs r2, r2, r11 - adcs r6, r4, r6 - ldr r4, [sp, #8] @ 4-byte Reload - adcs r11, r5, r4 - ldr r5, [sp, #4] @ 4-byte Reload - adcs r3, r3, r5 - adc r0, r0, #0 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp, #80] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - mul r4, r7, r0 - umull r0, r5, r4, r3 - adds r0, r0, r7 - str r5, [sp, #12] @ 4-byte Spill - umull r0, r3, r4, r10 - ldr r5, [sp, #12] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - adcs r3, r0, r1 - ldr r0, [sp, #40] @ 4-byte Reload - umull r1, r7, r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r7, [sp, #4] @ 4-byte Spill - adcs r1, r1, r2 - umull r2, r7, r4, r0 - str r7, [sp] @ 4-byte Spill - ldr r7, [sp, #64] @ 4-byte Reload - adcs r2, r2, r6 - umull r6, r0, r4, r7 - adcs r6, r6, r11 - umull r7, r11, r4, r12 - ldr r4, [sp, #20] @ 4-byte Reload - ldr r12, [sp, #48] @ 4-byte Reload - adcs r4, r7, r4 - ldr r7, [sp, #16] @ 4-byte Reload - adc r7, r7, #0 - adds r3, r3, r5 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp, #8] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp, #72] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp] @ 4-byte Reload - adcs r1, r6, r1 - adcs r0, r4, r0 - str r1, [sp, #8] @ 4-byte Spill - str r0, [sp, #4] @ 4-byte Spill - adc r0, r7, r11 - ldr r11, [sp, #52] @ 4-byte Reload - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - umull r6, r1, r9, r11 - umull r5, r4, r9, r0 - adds r5, r1, r5 - umull r2, r5, r9, r12 - adcs r4, r4, r2 - umlal r1, r2, r9, r0 - ldr r0, [sp, #20] @ 4-byte Reload - umull r4, r7, r9, r8 - adcs r8, r5, r4 - umull r5, r4, r9, r3 - adcs r5, r7, r5 - umull r7, r3, r9, lr - ldr lr, [sp, #60] @ 4-byte Reload - adcs r4, r4, r7 - adc r3, r3, #0 - adds r7, r6, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r1, r1, r0 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r2, r2, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r6, r8, r0 - ldr r0, [sp, #4] @ 4-byte Reload - ldr r8, [sp, #56] @ 4-byte Reload - adcs r9, r5, r0 - ldr r0, [sp] @ 4-byte Reload - adcs r0, r4, r0 - mul r4, r7, r8 - str r0, [sp, #20] @ 4-byte Spill - adc r0, r3, #0 - ldr r3, [sp, #80] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - umull r0, r5, r4, r3 - adds r0, r0, r7 - str r5, [sp, #12] @ 4-byte Spill - umull r0, r3, r4, r10 - ldr r10, [sp, #40] @ 4-byte Reload - ldr r5, [sp, #12] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - adcs r0, r0, r1 - umull r1, r3, r4, r10 - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - umull r2, r7, r4, r3 - ldr r3, [sp, #64] @ 4-byte Reload - str r7, [sp] @ 4-byte Spill - adcs r2, r2, r6 - umull r6, r7, r4, r3 - adcs r6, r6, r9 - umull r3, r9, r4, lr - ldr r4, [sp, #20] @ 4-byte Reload - adcs r3, r3, r4 - ldr r4, [sp, #16] @ 4-byte Reload - adc r4, r4, #0 - adds r0, r0, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #8] @ 4-byte Spill - adcs r0, r3, r7 - str r0, [sp, #4] @ 4-byte Spill - adc r0, r4, r9 - ldr r4, [sp, #44] @ 4-byte Reload - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - umull r3, lr, r0, r12 - ldr r12, [sp, #36] @ 4-byte Reload - umull r9, r2, r0, r11 - umull r6, r7, r0, r4 - mov r1, r2 - adds r2, r2, r6 - mov r5, r3 - adcs r2, r7, r3 - umlal r1, r5, r0, r4 - umull r2, r3, r0, r12 - adcs r11, lr, r2 - ldr lr, [sp, #72] @ 4-byte Reload - ldr r2, [sp, #76] @ 4-byte Reload - umull r4, r6, r0, lr - adcs r3, r3, r4 - umull r4, r7, r0, r2 - ldr r0, [sp, #20] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - adcs r4, r6, r4 - adc r6, r7, #0 - adds r0, r9, r0 - ldr r9, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - adcs r7, r5, r2 - ldr r2, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #4] @ 4-byte Reload - adcs r2, r11, r2 - adcs r11, r3, r5 - ldr r3, [sp] @ 4-byte Reload - adcs r3, r4, r3 - mul r4, r0, r8 - ldr r8, [sp, #80] @ 4-byte Reload - str r3, [sp, #24] @ 4-byte Spill - adc r3, r6, #0 - str r3, [sp, #20] @ 4-byte Spill - umull r5, r3, r4, r8 - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [sp, #68] @ 4-byte Reload - adds r0, r5, r0 - umull r0, r5, r4, r3 - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - umull r1, r3, r4, r10 - ldr r10, [sp, #60] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - adcs r1, r1, r7 - umull r7, r3, r4, r5 - adcs r2, r7, r2 - umull r7, r5, r4, r9 - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp, #16] @ 4-byte Reload - adcs r7, r7, r11 - umull r6, r11, r4, r10 - ldr r4, [sp, #24] @ 4-byte Reload - adcs r4, r6, r4 - ldr r6, [sp, #20] @ 4-byte Reload - adc r6, r6, #0 - adds r0, r0, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #44] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - adcs r0, r4, r5 - str r0, [sp, #8] @ 4-byte Spill - adc r0, r6, r11 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - ldr r5, [r0, #16] - umull r11, r2, r5, r1 - ldr r1, [sp, #48] @ 4-byte Reload - umull r4, r0, r5, r7 - adds r4, r2, r4 - umull r3, r4, r5, r1 - adcs r0, r0, r3 - umlal r2, r3, r5, r7 - ldr r7, [sp, #76] @ 4-byte Reload - umull r0, r6, r5, r12 - adcs r12, r4, r0 - umull r4, r1, r5, lr - adcs r4, r6, r4 - umull r6, r0, r5, r7 - ldr r7, [sp, #24] @ 4-byte Reload - adcs r1, r1, r6 - adc r0, r0, #0 - adds r6, r11, r7 - ldr r7, [sp, #20] @ 4-byte Reload - adcs r2, r2, r7 - ldr r7, [sp, #16] @ 4-byte Reload - adcs r3, r3, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r5, r12, r7 - ldr r7, [sp, #8] @ 4-byte Reload - adcs r7, r4, r7 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #4] @ 4-byte Reload - adcs r1, r1, r7 - adc r0, r0, #0 - str r1, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - mul r4, r6, r0 - umull r0, r1, r4, r8 - ldr r8, [sp, #40] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adds r0, r0, r6 - ldr r7, [sp, #16] @ 4-byte Reload - umull r0, r11, r4, r1 - ldr r1, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - umull r2, lr, r4, r8 - adcs r2, r2, r3 - umull r3, r12, r4, r1 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r3, r3, r5 - umull r5, r6, r4, r9 - adcs r5, r5, r1 - umull r1, r9, r4, r10 - ldr r4, [sp, #24] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #20] @ 4-byte Reload - adc r4, r4, #0 - adds r0, r0, r7 - ldr r7, [sp, #44] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r2, r11 - adcs r11, r3, lr - str r0, [sp, #20] @ 4-byte Spill - adcs r10, r5, r12 - adcs r0, r1, r6 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r4, r9 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - ldr r5, [r0, #20] - ldr r0, [sp, #48] @ 4-byte Reload - umull r6, r1, r5, r0 - ldr r0, [sp, #52] @ 4-byte Reload - mov r4, r6 - umull lr, r3, r5, r0 - umull r12, r0, r5, r7 - mov r2, r3 - adds r3, r3, r12 - umlal r2, r4, r5, r7 - ldr r7, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - umull r0, r3, r5, r7 - ldr r7, [sp, #76] @ 4-byte Reload - adcs r12, r1, r0 - ldr r0, [sp, #72] @ 4-byte Reload - umull r1, r6, r5, r0 - adcs r1, r3, r1 - umull r3, r0, r5, r7 - ldr r5, [sp, #24] @ 4-byte Reload - ldr r7, [sp, #20] @ 4-byte Reload - adcs r3, r6, r3 - adc r0, r0, #0 - adds r6, lr, r5 - ldr r5, [sp, #16] @ 4-byte Reload - ldr lr, [sp, #68] @ 4-byte Reload - adcs r2, r2, r7 - adcs r7, r4, r11 - adcs r9, r12, r10 - adcs r1, r1, r5 - ldr r5, [sp, #80] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #12] @ 4-byte Reload - adcs r1, r3, r1 - adc r0, r0, #0 - str r1, [sp, #76] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - mul r4, r6, r0 - umull r0, r1, r4, r5 - umull r3, r11, r4, lr - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adds r0, r0, r6 - umull r6, r0, r4, r8 - adcs r12, r3, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - adcs r10, r6, r7 - umull r3, r0, r4, r1 - adcs r9, r3, r9 - ldr r3, [sp, #64] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - umull r7, r0, r4, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r7, r7, r0 - umull r6, r0, r4, r2 - ldr r4, [sp, #76] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r6, r6, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adc r4, r4, #0 - adds r12, r12, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r11, r10, r11 - adcs r9, r9, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r7, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r6, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r4, r0 - subs r5, r12, r5 - sbcs r4, r11, lr - mov lr, r0 - sbcs r6, r9, r8 - sbcs r1, r10, r1 - sbcs r8, r7, r3 - sbc r3, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - asr r0, r3, #31 - cmp r0, #0 - movlt r5, r12 - movlt r4, r11 - movlt r6, r9 - cmp r0, #0 - movlt r1, r10 - movlt r8, r7 - movlt r3, lr - str r5, [r2] - str r4, [r2, #4] - str r6, [r2, #8] - str r1, [r2, #12] - str r8, [r2, #16] - str r3, [r2, #20] - add sp, sp, #88 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end84: - .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L - .cantunwind - .fnend - - .globl mcl_fp_montRed6L - .align 2 - .type mcl_fp_montRed6L,%function -mcl_fp_montRed6L: @ @mcl_fp_montRed6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #100 - sub sp, sp, #100 - ldr r6, [r1, #4] - ldr r10, [r2, #-4] - ldr r9, [r1] - ldr r3, [r2, #8] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r2] - ldr r8, [r2, #4] - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [r1, #8] - mul r4, r9, r10 - str r3, [sp, #80] @ 4-byte Spill - str r0, [sp, #76] @ 4-byte Spill - str r10, [sp, #92] @ 4-byte Spill - umull r12, r7, r4, r3 - str r7, [sp, #52] @ 4-byte Spill - umull r7, r3, r4, r0 - mov lr, r12 - str r7, [sp, #56] @ 4-byte Spill - mov r0, r3 - str r6, [sp, #64] @ 4-byte Spill - ldr r6, [r1, #12] - umlal r0, lr, r4, r8 - str r6, [sp, #60] @ 4-byte Spill - ldr r6, [r2, #20] - umull r5, r7, r4, r6 - str r6, [sp, #84] @ 4-byte Spill - ldr r6, [r2, #16] - ldr r2, [r2, #12] - str r5, [sp, #44] @ 4-byte Spill - str r7, [sp, #48] @ 4-byte Spill - umull r5, r7, r4, r6 - str r6, [sp, #96] @ 4-byte Spill - str r2, [sp, #88] @ 4-byte Spill - str r7, [sp, #40] @ 4-byte Spill - umull r6, r7, r4, r2 - umull r11, r2, r4, r8 - adds r3, r3, r11 - adcs r2, r2, r12 - ldr r3, [sp, #40] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - adcs r12, r2, r6 - ldr r2, [sp, #44] @ 4-byte Reload - adcs r11, r7, r5 - adcs r2, r3, r2 - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - adc r2, r2, #0 - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [sp, #56] @ 4-byte Reload - adds r6, r9, r2 - ldr r2, [sp, #68] @ 4-byte Reload - add r9, r1, #16 - adcs r0, r2, r0 - mul r6, r0, r10 - ldr r10, [sp, #80] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - umull r3, r0, r6, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #28] @ 4-byte Spill - ldm r9, {r2, r4, r7, r9} - ldr r5, [sp, #76] @ 4-byte Reload - umull r0, r1, r6, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - ldr lr, [sp, #84] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - mov r12, r3 - adcs r2, r2, r11 - str r0, [sp, #64] @ 4-byte Spill - mov r0, r1 - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [sp, #52] @ 4-byte Reload - umlal r0, r12, r6, r8 - adcs r2, r4, r2 - ldr r4, [sp, #96] @ 4-byte Reload - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - adcs r2, r7, r2 - str r2, [sp, #48] @ 4-byte Spill - adcs r2, r9, #0 - umull r9, r11, r6, lr - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #36] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #28] @ 4-byte Spill - mov r2, #0 - adc r2, r2, #0 - str r2, [sp, #24] @ 4-byte Spill - umull r7, r2, r6, r8 - adds r1, r1, r7 - adcs r2, r2, r3 - ldr r3, [sp, #88] @ 4-byte Reload - umull r1, r7, r6, r4 - umull r2, r4, r6, r3 - ldr r6, [sp, #56] @ 4-byte Reload - adcs r2, r6, r2 - adcs r1, r4, r1 - ldr r4, [sp, #20] @ 4-byte Reload - str r2, [sp, #56] @ 4-byte Spill - str r1, [sp, #4] @ 4-byte Spill - adcs r1, r7, r9 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adc r7, r11, #0 - adds r6, r4, r1 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #92] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - mul r6, r1, r0 - umull r9, r0, r6, r10 - str r0, [sp, #8] @ 4-byte Spill - umull r0, r1, r6, r5 - ldr r5, [sp, #60] @ 4-byte Reload - mov r4, r9 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - adcs r5, r2, r5 - ldr r2, [sp, #4] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - mov r0, r1 - str r5, [sp, #68] @ 4-byte Spill - ldr r5, [sp, #52] @ 4-byte Reload - umlal r0, r4, r6, r8 - adcs r2, r2, r5 - ldr r5, [sp] @ 4-byte Reload - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - adcs r2, r5, r2 - umull r5, r10, r6, lr - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - adcs r2, r7, r2 - umull r7, r12, r6, r8 - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [sp, #36] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adc r2, r2, #0 - adds r1, r1, r7 - ldr r1, [sp, #96] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - umull r7, r2, r6, r3 - ldr r3, [sp, #8] @ 4-byte Reload - umull r11, lr, r6, r1 - adcs r6, r12, r9 - adcs r3, r3, r7 - adcs r12, r2, r11 - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [sp, #16] @ 4-byte Reload - adcs r2, lr, r5 - ldr r5, [sp, #80] @ 4-byte Reload - ldr lr, [sp, #76] @ 4-byte Reload - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - adc r9, r10, #0 - adds r6, r3, r2 - ldr r2, [sp, #12] @ 4-byte Reload - ldr r3, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - mul r6, r0, r3 - str r0, [sp, #32] @ 4-byte Spill - umull r11, r0, r6, r5 - str r0, [sp, #24] @ 4-byte Spill - umull r0, r7, r6, lr - mov r10, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - mov r2, r7 - umlal r2, r10, r6, r8 - adcs r0, r4, r0 - ldr r4, [sp, #8] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #4] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - ldr r12, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - umull r4, r0, r6, r12 - str r4, [sp, #12] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - umull r4, r0, r6, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - umull r9, r0, r6, r8 - adds r7, r7, r9 - adcs r0, r0, r11 - ldr r0, [sp, #24] @ 4-byte Reload - umull r7, r9, r6, r1 - ldr r6, [sp, #28] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r9, r4 - ldr r4, [sp, #8] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r7, r4, r0 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r4, [sp, #32] @ 4-byte Reload - adc r11, r0, #0 - adds r4, r6, r4 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - adcs r2, r2, r4 - mul r4, r2, r3 - str r2, [sp, #36] @ 4-byte Spill - umull r9, r2, r4, r5 - ldr r5, [sp, #68] @ 4-byte Reload - str r2, [sp, #28] @ 4-byte Spill - umull r3, r2, r4, lr - mov r6, r2 - str r3, [sp, #32] @ 4-byte Spill - mov r3, r9 - umlal r6, r3, r4, r8 - adcs r5, r10, r5 - str r5, [sp, #68] @ 4-byte Spill - ldr r5, [sp, #64] @ 4-byte Reload - adcs r5, r0, r5 - ldr r0, [sp, #16] @ 4-byte Reload - str r5, [sp, #64] @ 4-byte Spill - ldr r5, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - umull r7, r0, r4, r12 - mov r12, r1 - str r0, [sp, #24] @ 4-byte Spill - umull r11, r0, r4, r8 - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [sp, #96] @ 4-byte Reload - umull r1, r5, r4, r12 - adds r2, r2, r11 - adcs r0, r0, r9 - ldr r2, [sp, #20] @ 4-byte Reload - ldr r0, [sp, #28] @ 4-byte Reload - umull lr, r10, r4, r7 - ldr r4, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - adcs r1, r5, lr - ldr r5, [sp, #24] @ 4-byte Reload - adcs r2, r10, r2 - adc lr, r5, #0 - ldr r5, [sp, #32] @ 4-byte Reload - adds r4, r5, r4 - ldr r5, [sp, #76] @ 4-byte Reload - ldr r4, [sp, #68] @ 4-byte Reload - adcs r9, r6, r4 - ldr r4, [sp, #64] @ 4-byte Reload - ldr r6, [sp, #80] @ 4-byte Reload - adcs r3, r3, r4 - str r3, [sp, #68] @ 4-byte Spill - ldr r3, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - mul r0, r9, r1 - umull r2, r4, r0, r5 - umull r1, r3, r0, r8 - adds r1, r4, r1 - str r2, [sp, #92] @ 4-byte Spill - umull r1, r2, r0, r6 - adcs r3, r3, r1 - umlal r4, r1, r0, r8 - umull r3, lr, r0, r12 - adcs r10, r2, r3 - umull r3, r2, r0, r7 - adcs r11, lr, r3 - ldr lr, [sp, #84] @ 4-byte Reload - umull r7, r3, r0, lr - adcs r2, r2, r7 - ldr r7, [sp, #64] @ 4-byte Reload - adc r0, r3, #0 - ldr r3, [sp, #92] @ 4-byte Reload - adds r3, r3, r9 - ldr r3, [sp, #68] @ 4-byte Reload - adcs r3, r4, r3 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r12, r1, r7 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r10, r10, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r9, r11, r1 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r7, r2, r1 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #44] @ 4-byte Reload - adc r11, r0, #0 - subs r0, r3, r5 - sbcs r5, r12, r8 - mov r8, r7 - sbcs r2, r10, r6 - ldr r6, [sp, #96] @ 4-byte Reload - sbcs r4, r9, r4 - sbcs r6, r7, r6 - sbcs r7, r1, lr - mov lr, r1 - sbc r1, r11, #0 - ands r1, r1, #1 - movne r0, r3 - ldr r3, [sp, #72] @ 4-byte Reload - movne r5, r12 - movne r2, r10 - cmp r1, #0 - movne r4, r9 - movne r6, r8 - movne r7, lr - str r0, [r3] - str r5, [r3, #4] - str r2, [r3, #8] - str r4, [r3, #12] - str r6, [r3, #16] - str r7, [r3, #20] - add sp, sp, #100 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end85: - .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L - .cantunwind - .fnend - - .globl mcl_fp_addPre6L - .align 2 - .type mcl_fp_addPre6L,%function -mcl_fp_addPre6L: @ @mcl_fp_addPre6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldm r1, {r9, r12, lr} - ldr r10, [r1, #12] - ldr r5, [r1, #16] - ldr r8, [r1, #20] - ldm r2, {r6, r7} - add r4, r2, #8 - ldm r4, {r1, r3, r4} - ldr r2, [r2, #20] - adds r6, r6, r9 - adcs r7, r7, r12 - add r12, r0, #8 - adcs r1, r1, lr - stm r0, {r6, r7} - adcs r3, r3, r10 - adcs r5, r4, r5 - adcs r2, r2, r8 - stm r12, {r1, r3, r5} - str r2, [r0, #20] - mov r0, #0 - adc r0, r0, #0 - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end86: - .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L - .cantunwind - .fnend - - .globl mcl_fp_subPre6L - .align 2 - .type mcl_fp_subPre6L,%function -mcl_fp_subPre6L: @ @mcl_fp_subPre6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - ldm r2, {r9, r12, lr} - ldr r10, [r2, #12] - ldr r5, [r2, #16] - ldr r8, [r2, #20] - ldm r1, {r6, r7} - add r4, r1, #8 - ldm r4, {r2, r3, r4} - ldr r1, [r1, #20] - subs r6, r6, r9 - sbcs r7, r7, r12 - add r12, r0, #8 - sbcs r2, r2, lr - stm r0, {r6, r7} - sbcs r3, r3, r10 - sbcs r5, r4, r5 - sbcs r1, r1, r8 - stm r12, {r2, r3, r5} - str r1, [r0, #20] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end87: - .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L - .cantunwind - .fnend - - .globl mcl_fp_shr1_6L - .align 2 - .type mcl_fp_shr1_6L,%function -mcl_fp_shr1_6L: @ @mcl_fp_shr1_6L - .fnstart -@ BB#0: - .save {r4, r5, r6, lr} - push {r4, r5, r6, lr} - ldr r3, [r1, #4] - ldr r12, [r1] - ldr lr, [r1, #12] - ldr r2, [r1, #8] - ldr r4, [r1, #16] - ldr r1, [r1, #20] - lsrs r5, r3, #1 - lsr r3, r3, #1 - rrx r12, r12 - lsrs r5, lr, #1 - orr r6, r3, r2, lsl #31 - lsr r5, lr, #1 - rrx r2, r2 - lsrs r3, r1, #1 - lsr r1, r1, #1 - str r12, [r0] - str r6, [r0, #4] - orr r5, r5, r4, lsl #31 - rrx r3, r4 - str r2, [r0, #8] - str r5, [r0, #12] - str r3, [r0, #16] - str r1, [r0, #20] - pop {r4, r5, r6, lr} - mov pc, lr -.Lfunc_end88: - .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L - .cantunwind - .fnend - - .globl mcl_fp_add6L - .align 2 - .type mcl_fp_add6L,%function -mcl_fp_add6L: @ @mcl_fp_add6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldm r1, {r9, r12, lr} - ldr r7, [r2] - ldr r10, [r1, #12] - ldr r11, [r1, #16] - ldr r8, [r1, #20] - ldmib r2, {r1, r4, r5, r6} - ldr r2, [r2, #20] - adds r7, r7, r9 - adcs r12, r1, r12 - add r1, r0, #8 - adcs r4, r4, lr - stm r0, {r7, r12} - adcs r5, r5, r10 - adcs r6, r6, r11 - stm r1, {r4, r5, r6} - adcs r2, r2, r8 - mov r1, #0 - str r2, [r0, #20] - adc r9, r1, #0 - ldm r3, {r1, lr} - ldr r10, [r3, #8] - ldr r11, [r3, #12] - ldr r8, [r3, #16] - ldr r3, [r3, #20] - subs r7, r7, r1 - sbcs r1, r12, lr - sbcs r10, r4, r10 - sbcs r12, r5, r11 - sbcs lr, r6, r8 - sbcs r4, r2, r3 - sbc r2, r9, #0 - tst r2, #1 - streq r7, [r0] - stmibeq r0, {r1, r10, r12, lr} - streq r4, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end89: - .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L - .cantunwind - .fnend - - .globl mcl_fp_addNF6L - .align 2 - .type mcl_fp_addNF6L,%function -mcl_fp_addNF6L: @ @mcl_fp_addNF6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - add r11, r1, #8 - ldm r1, {r12, lr} - ldm r11, {r9, r10, r11} - ldr r7, [r2] - ldr r8, [r1, #20] - ldmib r2, {r1, r4, r5, r6} - ldr r2, [r2, #20] - adds r7, r7, r12 - adcs r1, r1, lr - adcs r4, r4, r9 - adcs r9, r5, r10 - adcs lr, r6, r11 - add r11, r3, #8 - adc r12, r2, r8 - ldm r3, {r2, r6} - ldm r11, {r5, r8, r10, r11} - subs r2, r7, r2 - sbcs r6, r1, r6 - sbcs r5, r4, r5 - sbcs r3, r9, r8 - sbcs r8, lr, r10 - sbc r10, r12, r11 - asr r11, r10, #31 - cmp r11, #0 - movlt r2, r7 - movlt r6, r1 - movlt r5, r4 - cmp r11, #0 - movlt r3, r9 - movlt r8, lr - movlt r10, r12 - str r2, [r0] - str r6, [r0, #4] - str r5, [r0, #8] - str r3, [r0, #12] - str r8, [r0, #16] - str r10, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end90: - .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L - .cantunwind - .fnend - - .globl mcl_fp_sub6L - .align 2 - .type mcl_fp_sub6L,%function -mcl_fp_sub6L: @ @mcl_fp_sub6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldr r9, [r2] - ldmib r2, {r8, r12, lr} - ldr r10, [r2, #16] - ldr r11, [r2, #20] - ldm r1, {r2, r4, r5, r6, r7} - ldr r1, [r1, #20] - subs r9, r2, r9 - sbcs r2, r4, r8 - str r9, [r0] - sbcs r12, r5, r12 - sbcs lr, r6, lr - sbcs r4, r7, r10 - stmib r0, {r2, r12, lr} - sbcs r5, r1, r11 - mov r1, #0 - str r4, [r0, #16] - sbc r1, r1, #0 - str r5, [r0, #20] - tst r1, #1 - popeq {r4, r5, r6, r7, r8, r9, r10, r11, lr} - moveq pc, lr - ldm r3, {r1, r6, r7, r8, r10} - ldr r3, [r3, #20] - adds r1, r1, r9 - adcs r2, r6, r2 - adcs r7, r7, r12 - adcs r6, r8, lr - stm r0, {r1, r2, r7} - adcs r4, r10, r4 - str r6, [r0, #12] - adc r3, r3, r5 - str r4, [r0, #16] - str r3, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end91: - .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L - .cantunwind - .fnend - - .globl mcl_fp_subNF6L - .align 2 - .type mcl_fp_subNF6L,%function -mcl_fp_subNF6L: @ @mcl_fp_subNF6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - add r11, r2, #8 - ldm r2, {r12, lr} - ldm r11, {r9, r10, r11} - ldr r7, [r1] - ldr r8, [r2, #20] - ldmib r1, {r2, r4, r5, r6} - ldr r1, [r1, #20] - subs r7, r7, r12 - sbcs r2, r2, lr - sbcs r9, r4, r9 - sbcs lr, r5, r10 - ldr r5, [r3, #4] - sbcs r12, r6, r11 - ldr r6, [r3] - add r11, r3, #8 - sbc r1, r1, r8 - ldm r11, {r4, r8, r10, r11} - adds r6, r7, r6 - adcs r5, r2, r5 - adcs r4, r9, r4 - adcs r3, lr, r8 - adcs r8, r12, r10 - adc r10, r1, r11 - asr r11, r1, #31 - cmp r11, #0 - movge r6, r7 - movge r5, r2 - movge r4, r9 - cmp r11, #0 - movge r3, lr - movge r8, r12 - movge r10, r1 - str r6, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - str r3, [r0, #12] - str r8, [r0, #16] - str r10, [r0, #20] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end92: - .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L - .cantunwind - .fnend - - .globl mcl_fpDbl_add6L - .align 2 - .type mcl_fpDbl_add6L,%function -mcl_fpDbl_add6L: @ @mcl_fpDbl_add6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #32 - sub sp, sp, #32 - ldm r1, {r12, lr} - ldr r8, [r1, #8] - ldr r10, [r1, #12] - ldmib r2, {r6, r7} - ldr r5, [r2, #16] - ldr r11, [r2] - ldr r4, [r2, #12] - str r5, [sp] @ 4-byte Spill - ldr r5, [r2, #20] - adds r9, r11, r12 - add r11, r1, #32 - adcs r6, r6, lr - add lr, r1, #16 - adcs r7, r7, r8 - str r5, [sp, #4] @ 4-byte Spill - ldr r5, [r2, #24] - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [r2, #28] - str r5, [sp, #28] @ 4-byte Spill - ldr r5, [r2, #32] - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [r2, #36] - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [r2, #40] - ldr r2, [r2, #44] - str r5, [sp, #20] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - adcs r5, r4, r10 - ldm r11, {r4, r8, r11} - ldr r10, [r1, #44] - ldm lr, {r1, r2, r12, lr} - str r9, [r0] - stmib r0, {r6, r7} - ldr r6, [sp] @ 4-byte Reload - str r5, [r0, #12] - ldr r5, [sp, #4] @ 4-byte Reload - ldr r7, [sp, #8] @ 4-byte Reload - adcs r1, r6, r1 - adcs r2, r5, r2 - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - ldr r5, [r3] - str r2, [r0, #20] - ldr r2, [sp, #28] @ 4-byte Reload - adcs r1, r1, r12 - adcs r2, r2, lr - adcs r12, r7, r4 - ldr r7, [sp, #12] @ 4-byte Reload - mov r4, #0 - adcs r9, r7, r8 - ldr r7, [sp, #20] @ 4-byte Reload - adcs r8, r7, r11 - ldr r7, [sp, #24] @ 4-byte Reload - adcs lr, r7, r10 - adc r7, r4, #0 - ldmib r3, {r4, r6, r10, r11} - subs r5, r1, r5 - ldr r3, [r3, #20] - sbcs r4, r2, r4 - sbcs r6, r12, r6 - sbcs r10, r9, r10 - sbcs r11, r8, r11 - sbcs r3, lr, r3 - sbc r7, r7, #0 - ands r7, r7, #1 - movne r5, r1 - movne r4, r2 - movne r6, r12 - cmp r7, #0 - add r1, r0, #32 - movne r10, r9 - movne r11, r8 - movne r3, lr - str r5, [r0, #24] - str r4, [r0, #28] - stm r1, {r6, r10, r11} - str r3, [r0, #44] - add sp, sp, #32 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end93: - .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub6L - .align 2 - .type mcl_fpDbl_sub6L,%function -mcl_fpDbl_sub6L: @ @mcl_fpDbl_sub6L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldr r6, [r2, #8] - ldr r7, [r2, #32] - add r10, r1, #12 - str r6, [sp] @ 4-byte Spill - ldr r6, [r2, #12] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #36] - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #40] - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [r2, #20] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #44] - str r6, [sp, #12] @ 4-byte Spill - ldr r6, [r2, #24] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r1, #44] - str r6, [sp, #16] @ 4-byte Spill - ldr r6, [r2, #28] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #4] - ldr r2, [r2] - str r6, [sp, #20] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldm r1, {r11, r12, lr} - ldr r6, [sp] @ 4-byte Reload - subs r2, r11, r2 - ldr r11, [r1, #40] - sbcs r7, r12, r7 - ldr r12, [r1, #36] - ldr r1, [r1, #32] - sbcs lr, lr, r6 - ldr r6, [sp, #4] @ 4-byte Reload - stm r0, {r2, r7, lr} - mov lr, #0 - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r4, r4, r6 - str r4, [r0, #12] - sbcs r2, r5, r2 - ldr r5, [sp, #24] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #12] @ 4-byte Reload - sbcs r2, r8, r2 - str r2, [r0, #20] - ldr r2, [sp, #16] @ 4-byte Reload - sbcs r7, r9, r2 - ldr r2, [sp, #20] @ 4-byte Reload - sbcs r6, r10, r2 - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - sbcs r10, r12, r2 - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r9, r11, r2 - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r8, r5, r2 - sbc r12, lr, #0 - ldm r3, {r2, r4, r5, lr} - ldr r11, [r3, #16] - ldr r3, [r3, #20] - adds r2, r7, r2 - adcs r4, r6, r4 - adcs r5, r1, r5 - adcs lr, r10, lr - adcs r11, r9, r11 - adc r3, r8, r3 - ands r12, r12, #1 - moveq r2, r7 - moveq r4, r6 - moveq r5, r1 - cmp r12, #0 - moveq lr, r10 - moveq r11, r9 - moveq r3, r8 - str r2, [r0, #24] - str r4, [r0, #28] - str r5, [r0, #32] - str lr, [r0, #36] - str r11, [r0, #40] - str r3, [r0, #44] - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end94: - .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre7L - .align 2 - .type mcl_fp_mulUnitPre7L,%function -mcl_fp_mulUnitPre7L: @ @mcl_fp_mulUnitPre7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r11, [r1, #12] - ldr r10, [r1, #16] - ldr r9, [r1, #20] - ldr r8, [r1, #24] - umull r7, r1, lr, r2 - umull lr, r4, r12, r2 - mov r5, r4 - mov r6, r7 - str lr, [r0] - umlal r5, r6, r3, r2 - stmib r0, {r5, r6} - umull r6, r5, r3, r2 - adds r3, r4, r6 - umull r3, r6, r11, r2 - adcs r7, r5, r7 - adcs r1, r1, r3 - str r1, [r0, #12] - umull r1, r3, r10, r2 - adcs r1, r6, r1 - str r1, [r0, #16] - umull r1, r7, r9, r2 - adcs r1, r3, r1 - str r1, [r0, #20] - umull r1, r3, r8, r2 - adcs r1, r7, r1 - str r1, [r0, #24] - adc r1, r3, #0 - str r1, [r0, #28] - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end95: - .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre7L - .align 2 - .type mcl_fpDbl_mulPre7L,%function -mcl_fpDbl_mulPre7L: @ @mcl_fpDbl_mulPre7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - mov r3, r2 - ldr r7, [r1] - ldr lr, [r1, #4] - mov r9, r0 - ldr r0, [r1, #8] - ldr r2, [r1, #12] - ldr r10, [r1, #16] - ldr r8, [r1, #20] - str r3, [sp, #64] @ 4-byte Spill - ldr r3, [r3] - str r9, [sp, #60] @ 4-byte Spill - str r7, [sp, #28] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - str r2, [sp, #44] @ 4-byte Spill - umull r5, r4, r7, r3 - umull r6, r12, lr, r3 - adds r6, r4, r6 - str r5, [sp, #48] @ 4-byte Spill - umull r5, r6, r0, r3 - adcs r7, r12, r5 - umlal r4, r5, lr, r3 - umull r7, r11, r2, r3 - adcs r0, r6, r7 - str r0, [sp, #52] @ 4-byte Spill - umull r6, r0, r10, r3 - adcs r2, r11, r6 - umull r11, r7, r8, r3 - ldr r6, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r2, [sp, #40] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #24] - umull r11, r12, r0, r3 - adcs r2, r7, r11 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - str r2, [r9] - ldr r2, [sp, #64] @ 4-byte Reload - ldr r3, [r2, #4] - umull r11, r7, r6, r3 - str r7, [sp, #32] @ 4-byte Spill - adc r7, r12, #0 - str r7, [sp, #16] @ 4-byte Spill - adds r7, r11, r4 - str r7, [sp, #48] @ 4-byte Spill - umull r4, r7, lr, r3 - str r7, [sp, #28] @ 4-byte Spill - adcs r7, r4, r5 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #44] @ 4-byte Reload - umull r4, r5, r7, r3 - ldr r7, [sp, #56] @ 4-byte Reload - str r5, [sp, #24] @ 4-byte Spill - umull r5, r6, r7, r3 - ldr r7, [sp, #52] @ 4-byte Reload - str r6, [sp, #44] @ 4-byte Spill - ldr r6, [sp, #20] @ 4-byte Reload - adcs r11, r5, r7 - ldr r7, [sp, #40] @ 4-byte Reload - ldr r5, [sp, #12] @ 4-byte Reload - adcs lr, r4, r7 - umull r9, r7, r10, r3 - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [sp, #36] @ 4-byte Reload - adcs r7, r9, r7 - umull r4, r9, r8, r3 - adcs r4, r4, r6 - umull r6, r12, r0, r3 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r3, r6, r0 - mov r0, #0 - adc r6, r0, #0 - ldr r0, [sp, #32] @ 4-byte Reload - adds r8, r5, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r5, r11, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - ldr lr, [r1, #12] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r7, r7, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r7, [sp, #24] @ 4-byte Spill - adcs r7, r4, r0 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r4, [r1, #4] - adcs r3, r3, r9 - ldr r9, [r1, #8] - str r7, [sp, #36] @ 4-byte Spill - str r3, [sp, #40] @ 4-byte Spill - adc r3, r6, r12 - ldr r6, [r2, #8] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [sp, #48] @ 4-byte Reload - str r4, [sp, #52] @ 4-byte Spill - str r3, [r0, #4] - ldr r3, [r1] - umull r12, r7, r3, r6 - str r3, [sp, #56] @ 4-byte Spill - str r7, [sp, #32] @ 4-byte Spill - adds r3, r12, r8 - umull r7, r0, r4, r6 - ldr r12, [r1, #24] - str r0, [sp, #28] @ 4-byte Spill - adcs r2, r7, r5 - umull r7, r0, r9, r6 - str r3, [sp, #48] @ 4-byte Spill - ldr r10, [sp, #32] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #12] @ 4-byte Spill - umull r5, r0, lr, r6 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #12] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [r1, #16] - umull r11, r3, r0, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r1, #20] - adcs r11, r11, r0 - ldr r0, [sp, #40] @ 4-byte Reload - umull r8, r4, r3, r6 - adcs r8, r8, r0 - umull r7, r0, r12, r6 - ldr r6, [sp, #44] @ 4-byte Reload - adcs r6, r7, r6 - mov r7, #0 - adc r7, r7, #0 - adds r2, r2, r10 - str r2, [sp] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adcs r2, r5, r2 - ldr r5, [sp, #4] @ 4-byte Reload - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - adcs r10, r5, r2 - ldr r2, [sp, #16] @ 4-byte Reload - adcs r11, r11, r2 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r2, r8, r2 - ldr r8, [sp, #56] @ 4-byte Reload - str r2, [sp, #28] @ 4-byte Spill - adcs r2, r6, r4 - adc r0, r7, r0 - ldr r7, [sp, #60] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - str r0, [r7, #8] - ldr r0, [sp, #64] @ 4-byte Reload - ldr r6, [r0, #12] - umull r2, r4, lr, r6 - str r4, [sp, #48] @ 4-byte Spill - umull lr, r4, r9, r6 - str r4, [sp, #44] @ 4-byte Spill - ldr r4, [sp, #52] @ 4-byte Reload - umull r9, r5, r4, r6 - str r5, [sp, #32] @ 4-byte Spill - umull r4, r5, r8, r6 - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [sp] @ 4-byte Reload - adds r4, r4, r5 - umull r5, r8, r3, r6 - str r4, [sp, #56] @ 4-byte Spill - ldr r4, [sp, #12] @ 4-byte Reload - adcs r9, r9, r4 - adcs lr, lr, r10 - adcs r11, r2, r11 - ldr r2, [sp, #24] @ 4-byte Reload - umull r4, r10, r2, r6 - ldr r2, [sp, #28] @ 4-byte Reload - adcs r4, r4, r2 - ldr r2, [sp, #36] @ 4-byte Reload - adcs r3, r5, r2 - umull r5, r2, r12, r6 - ldr r6, [sp, #40] @ 4-byte Reload - adcs r12, r5, r6 - ldr r6, [sp, #52] @ 4-byte Reload - mov r5, #0 - adc r5, r5, #0 - adds r9, r9, r6 - ldr r6, [sp, #32] @ 4-byte Reload - adcs lr, lr, r6 - ldr r6, [sp, #44] @ 4-byte Reload - adcs r6, r11, r6 - ldr r11, [r1, #8] - str r6, [sp, #20] @ 4-byte Spill - ldr r6, [sp, #48] @ 4-byte Reload - adcs r4, r4, r6 - adcs r3, r3, r10 - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r1, #12] - adcs r12, r12, r8 - str r3, [sp, #40] @ 4-byte Spill - adc r2, r5, r2 - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #56] @ 4-byte Reload - str r2, [r7, #12] - ldr r7, [r0, #16] - ldr r0, [r1] - ldr r2, [r1, #4] - umull r8, r3, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - str r2, [sp, #52] @ 4-byte Spill - adds r0, r8, r9 - str r3, [sp, #36] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - umull r6, r0, r2, r7 - ldr r2, [r1, #24] - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r6, lr - ldr lr, [r1, #16] - str r0, [sp, #16] @ 4-byte Spill - umull r6, r0, r11, r7 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r6, r0 - mov r6, #0 - str r0, [sp, #12] @ 4-byte Spill - umull r3, r0, r4, r7 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [r1, #20] - str r0, [sp, #8] @ 4-byte Spill - umull r10, r0, lr, r7 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - umull r9, r5, r3, r7 - adcs r10, r10, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r9, r9, r12 - umull r8, r12, r2, r7 - adcs r7, r8, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adc r8, r6, #0 - ldr r6, [sp, #16] @ 4-byte Reload - adds r0, r6, r0 - ldr r6, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #8] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r10, r10, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r7, r5 - ldr r7, [sp, #48] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - adc r0, r8, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - str r7, [r0, #16] - ldr r7, [sp, #64] @ 4-byte Reload - ldr r7, [r7, #20] - umull r8, r6, r4, r7 - str r6, [sp, #48] @ 4-byte Spill - umull r4, r6, r11, r7 - str r6, [sp, #40] @ 4-byte Spill - ldr r6, [sp, #52] @ 4-byte Reload - umull r11, r5, r6, r7 - ldr r6, [sp, #56] @ 4-byte Reload - str r5, [sp, #28] @ 4-byte Spill - umull r5, r9, r6, r7 - ldr r6, [sp, #44] @ 4-byte Reload - adds r6, r5, r6 - str r6, [sp, #44] @ 4-byte Spill - ldr r6, [sp, #16] @ 4-byte Reload - adcs r11, r11, r6 - ldr r6, [sp, #12] @ 4-byte Reload - adcs r12, r4, r6 - ldr r6, [sp, #24] @ 4-byte Reload - adcs r10, r8, r10 - umull r5, r8, lr, r7 - umull r4, lr, r3, r7 - ldr r3, [sp, #32] @ 4-byte Reload - adcs r5, r5, r6 - adcs r3, r4, r3 - umull r4, r6, r2, r7 - ldr r2, [sp, #36] @ 4-byte Reload - adcs r2, r4, r2 - mov r4, #0 - adc r4, r4, #0 - adds r7, r11, r9 - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #28] @ 4-byte Reload - adcs r7, r12, r7 - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [sp, #40] @ 4-byte Reload - adcs r9, r10, r7 - ldr r7, [sp, #48] @ 4-byte Reload - adcs r11, r5, r7 - adcs r3, r3, r8 - adcs r2, r2, lr - str r3, [sp, #40] @ 4-byte Spill - str r2, [sp, #52] @ 4-byte Spill - adc r2, r4, r6 - ldr r6, [r1] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - str r2, [r0, #20] - ldr r0, [sp, #64] @ 4-byte Reload - ldr r4, [r0, #24] - ldmib r1, {r0, r3, r5} - umull r12, r2, r5, r4 - str r2, [sp, #64] @ 4-byte Spill - umull r5, r2, r3, r4 - umull r3, r10, r0, r4 - umull r0, r8, r6, r4 - ldr r6, [r1, #16] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #36] @ 4-byte Reload - adds r0, r0, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs lr, r3, r0 - adcs r9, r5, r9 - adcs r11, r12, r11 - umull r0, r12, r6, r4 - ldr r6, [r1, #20] - ldr r1, [r1, #24] - adcs r0, r0, r2 - ldr r2, [sp, #52] @ 4-byte Reload - umull r3, r5, r6, r4 - umull r6, r7, r1, r4 - ldr r1, [sp, #56] @ 4-byte Reload - mov r4, #0 - adcs r3, r3, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adcs r1, r6, r1 - adc r4, r4, #0 - adds r6, lr, r8 - adcs lr, r9, r10 - adcs r8, r11, r2 - ldr r2, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #60] @ 4-byte Reload - adcs r3, r3, r12 - adcs r1, r1, r5 - ldr r5, [sp, #48] @ 4-byte Reload - adc r7, r4, r7 - add r12, r2, #24 - stm r12, {r5, r6, lr} - str r8, [r2, #36] - str r0, [r2, #40] - str r3, [r2, #44] - str r1, [r2, #48] - str r7, [r2, #52] - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end96: - .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre7L - .align 2 - .type mcl_fpDbl_sqrPre7L,%function -mcl_fpDbl_sqrPre7L: @ @mcl_fpDbl_sqrPre7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #16] - ldr r9, [r1, #20] - str r0, [sp, #8] @ 4-byte Spill - ldm r1, {r2, r3} - ldr r0, [r1, #8] - ldr r11, [r1, #12] - umull r6, r7, r2, r2 - str r0, [sp, #48] @ 4-byte Spill - umull r5, r4, r0, r2 - umull r12, r0, r3, r2 - umull r8, r10, r11, r2 - adds lr, r7, r12 - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - adcs r6, r0, r5 - umlal r7, r5, r3, r2 - adcs r0, r4, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - umull r4, r6, r0, r2 - adcs r4, r10, r4 - mov r10, r9 - str r4, [sp, #40] @ 4-byte Spill - umull r4, r8, r10, r2 - adcs r6, r6, r4 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [r1, #24] - umull lr, r9, r6, r2 - adcs r4, r8, lr - str r4, [sp, #20] @ 4-byte Spill - adc r4, r9, #0 - adds r2, r12, r7 - ldr r12, [sp, #56] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - umull r2, r7, r3, r3 - adcs r2, r2, r5 - str r7, [sp, #16] @ 4-byte Spill - umull r5, r8, r11, r3 - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [sp, #32] @ 4-byte Reload - str r2, [r12] - umull lr, r2, r6, r3 - str r2, [sp, #32] @ 4-byte Spill - umull r6, r2, r10, r3 - str r2, [sp, #24] @ 4-byte Spill - umull r2, r10, r0, r3 - ldr r0, [sp, #48] @ 4-byte Reload - umull r7, r9, r0, r3 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r3, r7, r0 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r5, r0 - ldr r0, [sp, #28] @ 4-byte Reload - mov r5, #0 - adcs r2, r2, r0 - ldr r0, [sp, #20] @ 4-byte Reload - adcs r6, r6, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adcs lr, lr, r4 - ldr r4, [sp, #12] @ 4-byte Reload - adc r5, r5, #0 - adds r11, r4, r0 - ldr r0, [sp, #16] @ 4-byte Reload - ldr r4, [r1, #4] - adcs r3, r3, r0 - ldr r0, [sp, #36] @ 4-byte Reload - str r4, [sp, #44] @ 4-byte Spill - adcs r7, r7, r9 - adcs r9, r2, r8 - ldr r2, [r1, #12] - str r0, [r12, #4] - ldr r0, [sp, #24] @ 4-byte Reload - adcs r12, r6, r10 - adcs r10, lr, r0 - ldr r0, [sp, #32] @ 4-byte Reload - ldr lr, [r1, #8] - adc r0, r5, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1] - umull r8, r5, r0, lr - str r0, [sp, #48] @ 4-byte Spill - adds r0, r8, r11 - str r5, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - umull r5, r0, r4, lr - ldr r4, [r1, #16] - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r5, r3 - str r0, [sp, #20] @ 4-byte Spill - umull r3, r0, lr, lr - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r3, r7 - ldr r3, [r1, #20] - ldr r7, [sp, #40] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - umull r0, r5, r2, lr - str r0, [sp, #12] @ 4-byte Spill - adcs r0, r0, r9 - ldr r9, [sp, #20] @ 4-byte Reload - str r5, [sp, #36] @ 4-byte Spill - str r0, [sp, #4] @ 4-byte Spill - umull r11, r0, r4, lr - str r0, [sp, #8] @ 4-byte Spill - umull r8, r0, r3, lr - adcs r11, r11, r12 - str r0, [sp] @ 4-byte Spill - ldr r0, [r1, #24] - adcs r8, r8, r10 - umull r10, r12, r0, lr - adcs lr, r10, r7 - mov r7, #0 - adc r10, r7, #0 - ldr r7, [sp, #32] @ 4-byte Reload - adds r6, r9, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str r6, [sp, #20] @ 4-byte Spill - ldr r6, [sp, #16] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #24] @ 4-byte Reload - str r6, [sp, #16] @ 4-byte Spill - ldr r6, [sp, #4] @ 4-byte Reload - adcs r6, r6, r7 - adcs r11, r11, r5 - ldr r5, [sp, #8] @ 4-byte Reload - adcs r5, r8, r5 - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [sp] @ 4-byte Reload - adcs r7, lr, r5 - str r7, [sp, #4] @ 4-byte Spill - adc r7, r10, r12 - ldr r10, [sp, #48] @ 4-byte Reload - str r7, [sp] @ 4-byte Spill - umull r9, r7, r0, r2 - umull r5, r0, r3, r2 - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [sp, #44] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - umull r3, r0, r4, r2 - str r0, [sp, #28] @ 4-byte Spill - umull r4, r0, r2, r2 - str r0, [sp, #24] @ 4-byte Spill - umull r8, lr, r10, r2 - umull r0, r12, r7, r2 - ldr r2, [sp, #20] @ 4-byte Reload - mov r7, #0 - adds r8, r8, r2 - ldr r2, [sp, #16] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #12] @ 4-byte Reload - adcs r6, r2, r6 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r4, r4, r11 - adcs r3, r3, r2 - ldr r2, [sp, #4] @ 4-byte Reload - adcs r5, r5, r2 - ldr r2, [sp] @ 4-byte Reload - adcs r2, r9, r2 - adc r9, r7, #0 - adds r0, r0, lr - adcs r7, r6, r12 - ldr r6, [sp, #36] @ 4-byte Reload - adcs r4, r4, r6 - ldr r6, [sp, #24] @ 4-byte Reload - adcs r11, r3, r6 - ldr r3, [sp, #28] @ 4-byte Reload - adcs r12, r5, r3 - ldr r3, [sp, #32] @ 4-byte Reload - ldr r5, [r1, #12] - adcs r10, r2, r3 - ldr r2, [sp, #40] @ 4-byte Reload - ldr r3, [sp, #56] @ 4-byte Reload - adc r2, r9, r2 - ldr r9, [r1, #4] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #52] @ 4-byte Reload - str r9, [sp, #16] @ 4-byte Spill - str r2, [r3, #8] - str r8, [r3, #12] - ldr r2, [r1] - ldr r3, [r1, #16] - ldr r8, [r1, #8] - umull lr, r6, r2, r3 - str r2, [sp, #48] @ 4-byte Spill - str r8, [sp, #4] @ 4-byte Spill - adds r0, lr, r0 - ldr lr, [r1, #24] - str r6, [sp, #36] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - umull r0, r2, r9, r3 - adcs r0, r0, r7 - str r2, [sp, #32] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - umull r7, r0, r8, r3 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r7, r4 - ldr r9, [sp, #20] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - umull r7, r0, r5, r3 - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r7, r11 - mov r7, #0 - str r0, [sp] @ 4-byte Spill - umull r11, r0, r3, r3 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #20] - adcs r11, r11, r12 - umull r12, r2, r0, r3 - adcs r4, r12, r10 - umull r10, r8, lr, r3 - ldr r3, [sp, #44] @ 4-byte Reload - str r2, [sp, #40] @ 4-byte Spill - adcs r3, r10, r3 - adc r10, r7, #0 - ldr r7, [sp, #36] @ 4-byte Reload - adds r6, r9, r7 - ldr r7, [sp, #32] @ 4-byte Reload - str r6, [sp, #36] @ 4-byte Spill - ldr r6, [sp, #8] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str r6, [sp, #20] @ 4-byte Spill - ldr r6, [sp] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #24] @ 4-byte Reload - str r6, [sp, #8] @ 4-byte Spill - adcs r11, r11, r7 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r4, r4, r7 - adcs r2, r3, r2 - ldr r3, [sp, #4] @ 4-byte Reload - str r2, [sp, #24] @ 4-byte Spill - umull r6, r2, r5, r0 - adc r10, r10, r8 - str r2, [sp, #44] @ 4-byte Spill - umull r5, r2, r3, r0 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #16] @ 4-byte Reload - umull r8, r3, r2, r0 - ldr r2, [sp, #48] @ 4-byte Reload - str r3, [sp, #28] @ 4-byte Spill - umull r3, r9, r2, r0 - ldr r2, [sp, #36] @ 4-byte Reload - adds r2, r3, r2 - ldr r3, [sp, #24] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - adcs r7, r8, r2 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r5, r5, r2 - adcs r6, r6, r11 - adcs r2, r12, r4 - umull r4, r8, r0, r0 - adcs r4, r4, r3 - umull r3, r11, lr, r0 - adcs r0, r3, r10 - mov r3, #0 - adc r3, r3, #0 - adds r7, r7, r9 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [sp, #28] @ 4-byte Reload - adcs r9, r5, r7 - ldr r5, [sp, #32] @ 4-byte Reload - adcs r6, r6, r5 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [sp, #44] @ 4-byte Reload - adcs r10, r2, r6 - ldr r2, [sp, #40] @ 4-byte Reload - adcs r12, r4, r2 - ldr r2, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - adc r0, r3, r11 - ldr r3, [r1, #24] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - str r0, [r2, #16] - ldr r0, [sp, #36] @ 4-byte Reload - str r0, [r2, #20] - ldm r1, {r0, r4} - ldr r5, [r1, #12] - ldr r2, [r1, #8] - umull lr, r6, r5, r3 - umull r5, r11, r2, r3 - umull r2, r8, r4, r3 - str r6, [sp, #52] @ 4-byte Spill - umull r4, r6, r0, r3 - ldr r0, [sp, #24] @ 4-byte Reload - adds r0, r4, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r9, r2, r9 - ldr r2, [sp, #52] @ 4-byte Reload - adcs r4, r5, r0 - ldr r0, [r1, #16] - ldr r1, [r1, #20] - adcs r10, lr, r10 - umull r7, lr, r0, r3 - adcs r0, r7, r12 - umull r7, r12, r1, r3 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r7, r1 - umull r7, r5, r3, r3 - ldr r3, [sp, #48] @ 4-byte Reload - adcs r3, r7, r3 - mov r7, #0 - adc r7, r7, #0 - adds r6, r9, r6 - adcs r4, r4, r8 - adcs r8, r10, r11 - adcs r0, r0, r2 - adcs r1, r1, lr - adcs r2, r3, r12 - adc r3, r7, r5 - ldr r7, [sp, #56] @ 4-byte Reload - ldr r5, [sp, #40] @ 4-byte Reload - add r12, r7, #40 - str r5, [r7, #24] - str r6, [r7, #28] - str r4, [r7, #32] - str r8, [r7, #36] - stm r12, {r0, r1, r2, r3} - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L - .cantunwind - .fnend - - .globl mcl_fp_mont7L - .align 2 - .type mcl_fp_mont7L,%function -mcl_fp_mont7L: @ @mcl_fp_mont7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #124 - sub sp, sp, #124 - str r0, [sp, #56] @ 4-byte Spill - mov r0, r2 - str r2, [sp, #60] @ 4-byte Spill - ldm r0, {r2, lr} - ldr r7, [r0, #8] - ldr r0, [r0, #12] - ldr r5, [r3, #-4] - ldr r6, [r3, #8] - ldr r9, [r3, #4] - ldr r11, [r1, #8] - ldr r12, [r1, #12] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #4] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1] - str r5, [sp, #80] @ 4-byte Spill - str r6, [sp, #116] @ 4-byte Spill - str r9, [sp, #108] @ 4-byte Spill - str r11, [sp, #104] @ 4-byte Spill - str r12, [sp, #72] @ 4-byte Spill - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r3] - umull r4, r8, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - mul r0, r4, r5 - str r4, [sp, #44] @ 4-byte Spill - umull r10, r4, r0, r6 - str r4, [sp, #32] @ 4-byte Spill - str r10, [sp, #8] @ 4-byte Spill - umull r4, r5, r0, r7 - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [sp, #68] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - mov r4, r5 - str r5, [sp, #4] @ 4-byte Spill - umlal r4, r10, r0, r9 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r1, #24] - umull r6, r5, r4, r2 - str r4, [sp, #88] @ 4-byte Spill - ldr r4, [r1, #20] - ldr r1, [r1, #16] - str r6, [sp, #96] @ 4-byte Spill - str r5, [sp, #120] @ 4-byte Spill - umull r6, r5, r4, r2 - str r4, [sp, #64] @ 4-byte Spill - umull r9, r4, r1, r2 - str r1, [sp, #100] @ 4-byte Spill - str r6, [sp, #76] @ 4-byte Spill - str r5, [sp, #92] @ 4-byte Spill - str r4, [sp, #20] @ 4-byte Spill - umull r6, r5, r12, r2 - umull r12, r4, r11, r2 - umull r11, r1, r7, r2 - adds r7, r8, r11 - adcs r7, r1, r12 - adcs r1, r4, r6 - ldr r4, [sp, #20] @ 4-byte Reload - ldr r6, [sp, #108] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - adcs r1, r5, r9 - ldr r5, [r3, #12] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - str r5, [sp, #76] @ 4-byte Spill - adcs r1, r4, r1 - ldr r4, [sp, #92] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r4, r1 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #24] - umull r9, r4, r0, r1 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [r3, #16] - str r4, [sp] @ 4-byte Spill - ldr r4, [r3, #20] - umull r3, r7, r0, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r1, [sp, #120] @ 4-byte Spill - adds r3, r6, r3 - str r4, [sp, #92] @ 4-byte Spill - umull r3, r6, r0, r5 - ldr r5, [sp, #8] @ 4-byte Reload - adcs r7, r7, r5 - ldr r5, [sp, #32] @ 4-byte Reload - adcs r11, r5, r3 - umull r7, r5, r0, r1 - adcs r1, r6, r7 - umull r7, r3, r0, r4 - ldr r4, [sp] @ 4-byte Reload - ldr r6, [sp, #40] @ 4-byte Reload - adcs r0, r5, r7 - ldr r5, [sp, #68] @ 4-byte Reload - adcs r3, r3, r9 - adc r7, r4, #0 - mov r4, #0 - umlal r8, r12, r5, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adds r2, r6, r2 - mov r6, r5 - ldr r2, [sp, #36] @ 4-byte Reload - adcs r2, r2, r8 - str r2, [sp, #44] @ 4-byte Spill - adcs r2, r10, r12 - ldr r10, [sp, #84] @ 4-byte Reload - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - adcs r2, r11, r2 - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r3, r0 - umull r2, r3, lr, r5 - ldr r5, [sp, #72] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #20] @ 4-byte Spill - adc r0, r4, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - umull r12, r9, lr, r0 - ldr r0, [sp, #100] @ 4-byte Reload - umull r8, r4, lr, r0 - ldr r0, [sp, #104] @ 4-byte Reload - umull r1, r7, lr, r0 - umull r11, r0, lr, r10 - adds r2, r0, r2 - adcs r2, r3, r1 - umlal r0, r1, lr, r6 - ldr r6, [sp, #40] @ 4-byte Reload - umull r2, r3, lr, r5 - adcs r2, r7, r2 - adcs r10, r3, r8 - ldr r8, [sp, #64] @ 4-byte Reload - umull r7, r3, lr, r8 - adcs r4, r4, r7 - ldr r7, [sp, #44] @ 4-byte Reload - adcs r3, r3, r12 - adc r5, r9, #0 - adds r7, r7, r11 - adcs r0, r6, r0 - ldr r6, [sp, #108] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #96] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #20] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - mul r0, r7, r1 - ldr r1, [sp, #116] @ 4-byte Reload - umull lr, r12, r0, r6 - umull r3, r4, r0, r1 - ldr r1, [sp, #112] @ 4-byte Reload - mov r2, r3 - umull r9, r5, r0, r1 - mov r1, r5 - adds r5, r5, lr - umlal r1, r2, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - adcs r3, r12, r3 - umull r5, lr, r0, r6 - ldr r6, [sp, #76] @ 4-byte Reload - umull r3, r12, r0, r6 - ldr r6, [sp, #92] @ 4-byte Reload - adcs r3, r4, r3 - adcs r12, r12, r5 - umull r4, r5, r0, r6 - adcs lr, lr, r4 - umull r6, r4, r0, r10 - adcs r0, r5, r6 - adc r4, r4, #0 - adds r5, r9, r7 - ldr r9, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #44] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r3, r1 - ldr r3, [sp, #68] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r12, r1 - ldr r12, [sp, #48] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, lr, r1 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - umull r2, r1, r12, r0 - umull r10, r0, r12, r8 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - str r2, [sp, #8] @ 4-byte Spill - str r1, [sp, #12] @ 4-byte Spill - umull r2, lr, r12, r3 - umull r7, r8, r12, r0 - ldr r0, [sp, #72] @ 4-byte Reload - umull r5, r6, r12, r0 - ldr r0, [sp, #104] @ 4-byte Reload - umull r1, r4, r12, r0 - umull r11, r0, r12, r9 - adds r2, r0, r2 - str r11, [sp] @ 4-byte Spill - adcs r2, lr, r1 - umlal r0, r1, r12, r3 - adcs lr, r4, r5 - ldmib sp, {r4, r5} - ldr r3, [sp, #44] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - adcs r7, r6, r7 - adcs r6, r8, r10 - adcs r4, r4, r5 - ldr r5, [sp, #12] @ 4-byte Reload - adc r5, r5, #0 - adds r9, r3, r2 - ldr r3, [sp, #40] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #108] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - mul r0, r9, r1 - ldr r1, [sp, #116] @ 4-byte Reload - umull r3, r2, r0, r1 - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [sp, #112] @ 4-byte Reload - umull r7, r1, r0, r2 - mov r2, r3 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #96] @ 4-byte Reload - mov r5, r1 - umlal r5, r2, r0, r6 - umull r10, r4, r0, r7 - ldr r7, [sp, #92] @ 4-byte Reload - str r4, [sp, #8] @ 4-byte Spill - umull r12, r8, r0, r7 - ldr r7, [sp, #120] @ 4-byte Reload - umull lr, r4, r0, r7 - umull r11, r7, r0, r6 - ldr r6, [sp, #8] @ 4-byte Reload - adds r1, r1, r11 - ldr r11, [sp, #76] @ 4-byte Reload - adcs r1, r7, r3 - umull r1, r3, r0, r11 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - adcs r1, r3, lr - adcs r3, r4, r12 - ldr r4, [sp, #16] @ 4-byte Reload - adcs r7, r8, r10 - ldr r10, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #64] @ 4-byte Reload - adc r6, r6, #0 - adds r4, r4, r9 - ldr r9, [sp, #72] @ 4-byte Reload - ldr r4, [sp, #48] @ 4-byte Reload - adcs r5, r5, r4 - str r5, [sp, #48] @ 4-byte Spill - ldr r5, [sp, #44] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #84] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - umull r4, r5, r10, r7 - adcs r0, r6, r0 - str r4, [sp, #16] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - umull r1, r6, r10, r0 - ldr r0, [sp, #68] @ 4-byte Reload - umull r2, r3, r10, r0 - adds r2, r5, r2 - adcs r2, r3, r1 - umull r2, r3, r10, r9 - adcs r7, r6, r2 - ldr r6, [sp, #100] @ 4-byte Reload - umull r2, r12, r10, r6 - adcs r6, r3, r2 - umull r3, lr, r10, r8 - mov r2, r10 - ldr r10, [sp, #88] @ 4-byte Reload - adcs r4, r12, r3 - umlal r5, r1, r2, r0 - umull r3, r12, r2, r10 - mov r10, r0 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - adcs r3, lr, r3 - adc r12, r12, #0 - adds lr, r0, r2 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #108] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - mul r0, lr, r1 - ldr r1, [sp, #116] @ 4-byte Reload - umull r5, r12, r0, r7 - umull r3, r6, r0, r1 - ldr r1, [sp, #112] @ 4-byte Reload - umull r2, r4, r0, r1 - str r2, [sp, #20] @ 4-byte Spill - mov r1, r4 - mov r2, r3 - adds r4, r4, r5 - umlal r1, r2, r0, r7 - ldr r7, [sp, #120] @ 4-byte Reload - adcs r3, r12, r3 - umull r3, r12, r0, r11 - adcs r11, r6, r3 - ldr r3, [sp, #92] @ 4-byte Reload - umull r4, r5, r0, r7 - ldr r7, [sp, #96] @ 4-byte Reload - adcs r12, r12, r4 - umull r4, r6, r0, r3 - adcs r4, r5, r4 - umull r5, r3, r0, r7 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r0, r6, r5 - ldr r5, [sp, #20] @ 4-byte Reload - adc r3, r3, #0 - adds r6, r5, lr - adcs r1, r1, r7 - ldr r7, [sp, #104] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r11, r1 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r0, [r0, #16] - umull lr, r6, r0, r8 - umull r5, r3, r0, r10 - umull r8, r2, r0, r1 - umull r12, r4, r0, r9 - adds r5, r2, r5 - umull r1, r5, r0, r7 - ldr r7, [sp, #100] @ 4-byte Reload - adcs r3, r3, r1 - umlal r2, r1, r0, r10 - adcs r9, r5, r12 - umull r5, r3, r0, r7 - ldr r7, [sp, #108] @ 4-byte Reload - adcs r12, r4, r5 - ldr r4, [sp, #88] @ 4-byte Reload - adcs lr, r3, lr - umull r5, r3, r0, r4 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r5, r6, r5 - adc r3, r3, #0 - adds r4, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r11, r12 - ldr r11, [sp, #80] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - mul r1, r4, r11 - adcs r0, r0, lr - umull lr, r12, r1, r7 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - umull r2, r6, r1, r0 - ldr r0, [sp, #112] @ 4-byte Reload - mov r3, r2 - umull r8, r5, r1, r0 - mov r0, r5 - adds r5, r5, lr - umlal r0, r3, r1, r7 - ldr r7, [sp, #120] @ 4-byte Reload - adcs r2, r12, r2 - umull r5, lr, r1, r7 - ldr r7, [sp, #76] @ 4-byte Reload - umull r2, r12, r1, r7 - ldr r7, [sp, #92] @ 4-byte Reload - adcs r9, r6, r2 - ldr r2, [sp, #96] @ 4-byte Reload - adcs r12, r12, r5 - umull r5, r6, r1, r7 - adcs lr, lr, r5 - umull r7, r5, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - adcs r1, r6, r7 - ldr r7, [sp, #104] @ 4-byte Reload - adc r5, r5, #0 - adds r4, r8, r4 - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r12, r0 - mov r12, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r0, [r0, #20] - umull lr, r8, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - umull r6, r3, r0, r12 - umull r4, r5, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - umull r10, r2, r0, r1 - adds r6, r2, r6 - umull r1, r6, r0, r7 - ldr r7, [sp, #88] @ 4-byte Reload - adcs r3, r3, r1 - umlal r2, r1, r0, r12 - ldr r3, [sp, #100] @ 4-byte Reload - adcs r9, r6, r4 - umull r4, r6, r0, r3 - adcs r4, r5, r4 - adcs r3, r6, lr - umull r5, r6, r0, r7 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r7, [sp, #108] @ 4-byte Reload - adcs r5, r8, r5 - adc r6, r6, #0 - adds lr, r0, r10 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r0, r2 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - mul r1, lr, r11 - ldr r11, [sp, #84] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r6 - umull r6, r12, r1, r7 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - umull r3, r4, r1, r0 - ldr r0, [sp, #112] @ 4-byte Reload - mov r2, r3 - umull r8, r5, r1, r0 - mov r0, r5 - adds r5, r5, r6 - umlal r0, r2, r1, r7 - ldr r7, [sp, #120] @ 4-byte Reload - adcs r3, r12, r3 - umull r5, r6, r1, r7 - ldr r7, [sp, #76] @ 4-byte Reload - umull r3, r12, r1, r7 - ldr r7, [sp, #96] @ 4-byte Reload - adcs r9, r4, r3 - ldr r3, [sp, #92] @ 4-byte Reload - adcs r12, r12, r5 - umull r4, r5, r1, r3 - adcs r4, r6, r4 - umull r6, r3, r1, r7 - adcs r1, r5, r6 - adc r3, r3, #0 - adds r6, r8, lr - adcs r0, r0, r10 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - ldr r12, [sp, #68] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r0, [r0, #24] - umull r3, r2, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r2, [sp, #60] @ 4-byte Spill - str r3, [sp, #20] @ 4-byte Spill - umull r3, lr, r0, r12 - umull r9, r2, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r2, [sp, #88] @ 4-byte Spill - umull r7, r8, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - umull r5, r6, r0, r1 - ldr r1, [sp, #104] @ 4-byte Reload - umull r2, r4, r0, r1 - umull r10, r1, r0, r11 - ldr r11, [sp, #92] @ 4-byte Reload - adds r3, r1, r3 - str r10, [sp, #104] @ 4-byte Spill - ldr r10, [sp, #96] @ 4-byte Reload - adcs r3, lr, r2 - umlal r1, r2, r0, r12 - ldr r0, [sp, #24] @ 4-byte Reload - adcs lr, r4, r5 - ldr r5, [sp, #20] @ 4-byte Reload - ldr r3, [sp, #88] @ 4-byte Reload - ldr r4, [sp, #60] @ 4-byte Reload - adcs r6, r6, r7 - adcs r7, r8, r9 - ldr r8, [sp, #108] @ 4-byte Reload - adcs r5, r3, r5 - ldr r3, [sp, #104] @ 4-byte Reload - adc r4, r4, #0 - adds r9, r0, r3 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - ldr lr, [sp, #76] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #116] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r1, r9, r0 - ldr r0, [sp, #112] @ 4-byte Reload - umull r2, r3, r1, r8 - umull r4, r5, r1, r0 - adds r2, r5, r2 - umull r0, r2, r1, r7 - ldr r7, [sp, #120] @ 4-byte Reload - adcs r3, r3, r0 - umull r3, r12, r1, lr - adcs r6, r2, r3 - umull r3, r2, r1, r7 - adcs r12, r12, r3 - umull r7, r3, r1, r11 - adcs r2, r2, r7 - str r2, [sp, #80] @ 4-byte Spill - umull r7, r2, r1, r10 - adcs r3, r3, r7 - mov r7, r8 - umlal r5, r0, r1, r7 - adc r1, r2, #0 - adds r2, r4, r9 - ldr r2, [sp, #104] @ 4-byte Reload - adcs r8, r5, r2 - ldr r2, [sp, #100] @ 4-byte Reload - ldr r5, [sp, #116] @ 4-byte Reload - adcs r9, r0, r2 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #80] @ 4-byte Reload - adcs r4, r6, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r4, [sp, #88] @ 4-byte Spill - adcs r6, r12, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r6, [sp, #100] @ 4-byte Spill - adcs r12, r2, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r2, r3, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r2, [sp, #104] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #60] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - subs r1, r8, r1 - sbcs r3, r9, r7 - ldr r7, [sp, #120] @ 4-byte Reload - sbcs r5, r4, r5 - sbcs r6, r6, lr - sbcs r4, r12, r7 - sbcs r11, r2, r11 - ldr r2, [sp, #84] @ 4-byte Reload - sbcs lr, r0, r10 - sbc r7, r2, #0 - ldr r2, [sp, #56] @ 4-byte Reload - ands r7, r7, #1 - movne r1, r8 - movne r3, r9 - str r1, [r2] - ldr r1, [sp, #88] @ 4-byte Reload - str r3, [r2, #4] - movne r5, r1 - ldr r1, [sp, #100] @ 4-byte Reload - cmp r7, #0 - movne r4, r12 - str r5, [r2, #8] - movne r6, r1 - ldr r1, [sp, #104] @ 4-byte Reload - str r6, [r2, #12] - str r4, [r2, #16] - movne r11, r1 - cmp r7, #0 - movne lr, r0 - str r11, [r2, #20] - str lr, [r2, #24] - add sp, sp, #124 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end98: - .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L - .cantunwind - .fnend - - .globl mcl_fp_montNF7L - .align 2 - .type mcl_fp_montNF7L,%function -mcl_fp_montNF7L: @ @mcl_fp_montNF7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #104 - sub sp, sp, #104 - str r0, [sp, #36] @ 4-byte Spill - mov r0, r2 - str r2, [sp, #40] @ 4-byte Spill - ldm r0, {r4, r12} - ldr r6, [r1, #4] - ldr r2, [r0, #8] - ldr r7, [r1] - ldr r0, [r0, #12] - ldr r5, [r1, #8] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #12] - umull r9, r8, r6, r4 - umull lr, r10, r7, r4 - str r6, [sp, #52] @ 4-byte Spill - mov r11, r6 - str r7, [sp, #96] @ 4-byte Spill - str r5, [sp, #80] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - adds r6, r10, r9 - umull r6, r9, r5, r4 - ldr r5, [r1, #20] - adcs r7, r8, r6 - umlal r10, r6, r11, r4 - umull r7, r8, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r9, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #16] - str r5, [sp, #44] @ 4-byte Spill - umull r7, r9, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - adcs r0, r8, r7 - str r0, [sp, #84] @ 4-byte Spill - umull r7, r0, r5, r4 - adcs r5, r9, r7 - ldr r7, [r3, #4] - str r5, [sp, #76] @ 4-byte Spill - ldr r5, [r1, #24] - str r7, [sp, #72] @ 4-byte Spill - umull r1, r9, r5, r4 - str r5, [sp, #68] @ 4-byte Spill - ldr r5, [r3] - adcs r0, r0, r1 - ldr r1, [r3, #-4] - str r0, [sp, #28] @ 4-byte Spill - adc r0, r9, #0 - ldr r9, [r3, #8] - str r0, [sp, #24] @ 4-byte Spill - str r5, [sp, #56] @ 4-byte Spill - mul r0, lr, r1 - str r1, [sp, #60] @ 4-byte Spill - umull r1, r2, r0, r5 - str r9, [sp, #100] @ 4-byte Spill - adds r1, r1, lr - str r2, [sp, #20] @ 4-byte Spill - umull r1, lr, r0, r7 - adcs r11, r1, r10 - umull r5, r1, r0, r9 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [r3, #12] - adcs r9, r5, r6 - str r1, [sp, #92] @ 4-byte Spill - umull r5, r10, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - adcs r7, r5, r1 - ldr r1, [r3, #16] - str r1, [sp, #88] @ 4-byte Spill - umull r5, r8, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - adcs r4, r5, r1 - ldr r1, [r3, #20] - str r1, [sp, #84] @ 4-byte Spill - umull r5, r6, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - adcs r5, r5, r1 - ldr r1, [r3, #24] - umull r3, r2, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adcs r0, r3, r0 - adc r3, r1, #0 - ldr r1, [sp, #20] @ 4-byte Reload - adds r11, r11, r1 - adcs r1, r9, lr - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #16] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #80] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - adcs r1, r4, r10 - str r1, [sp, #20] @ 4-byte Spill - adcs r1, r5, r8 - ldr r5, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - adc r0, r3, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - umull r9, r0, r12, r1 - umull r3, r4, r12, r2 - adds r3, r0, r3 - umull r1, r3, r12, r7 - ldr r7, [sp, #44] @ 4-byte Reload - adcs r4, r4, r1 - umlal r0, r1, r12, r2 - umull r4, r6, r12, r5 - ldr r5, [sp, #64] @ 4-byte Reload - adcs r10, r3, r4 - umull r4, r3, r12, r5 - adcs r8, r6, r4 - umull r6, r4, r12, r7 - ldr r7, [sp, #68] @ 4-byte Reload - adcs r5, r3, r6 - umull r6, r3, r12, r7 - ldr r7, [sp, #28] @ 4-byte Reload - adcs r4, r4, r6 - adc r2, r3, #0 - adds r3, r9, r11 - adcs r0, r0, r7 - ldr r7, [sp, #24] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #20] @ 4-byte Reload - adcs r6, r10, r7 - ldr r7, [sp, #16] @ 4-byte Reload - adcs r11, r8, r7 - ldr r7, [sp, #12] @ 4-byte Reload - ldr r8, [sp, #72] @ 4-byte Reload - adcs r7, r5, r7 - ldr r5, [sp, #8] @ 4-byte Reload - str r7, [sp, #16] @ 4-byte Spill - adcs r7, r4, r5 - ldr r5, [sp, #60] @ 4-byte Reload - adc r2, r2, #0 - str r7, [sp, #20] @ 4-byte Spill - str r2, [sp, #28] @ 4-byte Spill - mul r2, r3, r5 - ldr r5, [sp, #56] @ 4-byte Reload - umull r4, r7, r2, r5 - adds r3, r4, r3 - str r7, [sp, #24] @ 4-byte Spill - umull r3, r7, r2, r8 - ldr r4, [sp, #24] @ 4-byte Reload - adcs lr, r3, r0 - ldr r0, [sp, #100] @ 4-byte Reload - str r7, [sp, #12] @ 4-byte Spill - umull r3, r7, r2, r0 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r12, r3, r1 - str r7, [sp, #8] @ 4-byte Spill - umull r3, r10, r2, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r3, r3, r6 - umull r6, r9, r2, r0 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r5, r6, r11 - ldr r11, [sp, #76] @ 4-byte Reload - umull r6, r1, r2, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r6, r6, r0 - umull r7, r0, r2, r11 - ldr r2, [sp, #20] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp, #28] @ 4-byte Reload - adc r7, r7, #0 - adds r4, lr, r4 - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [sp, #12] @ 4-byte Reload - adcs r4, r12, r4 - ldr r12, [sp, #52] @ 4-byte Reload - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [sp, #8] @ 4-byte Reload - adcs r3, r3, r4 - ldr r4, [sp, #64] @ 4-byte Reload - str r3, [sp, #20] @ 4-byte Spill - adcs r3, r5, r10 - ldr r5, [sp, #48] @ 4-byte Reload - str r3, [sp, #16] @ 4-byte Spill - adcs r3, r6, r9 - ldr r9, [sp, #68] @ 4-byte Reload - adcs r1, r2, r1 - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [sp, #80] @ 4-byte Reload - adc r0, r7, r0 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp] @ 4-byte Reload - umull r2, r6, r0, r12 - umull r11, lr, r0, r1 - adds r2, lr, r2 - umull r1, r2, r0, r3 - adcs r6, r6, r1 - umlal lr, r1, r0, r12 - umull r6, r3, r0, r5 - adcs r5, r2, r6 - umull r6, r2, r0, r4 - adcs r10, r3, r6 - umull r6, r3, r0, r7 - ldr r7, [sp, #28] @ 4-byte Reload - adcs r4, r2, r6 - umull r6, r2, r0, r9 - ldr r9, [sp, #56] @ 4-byte Reload - adcs r3, r3, r6 - ldr r6, [sp, #24] @ 4-byte Reload - adc r2, r2, #0 - adds r7, r11, r7 - adcs r0, lr, r6 - ldr r6, [sp, #20] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #16] @ 4-byte Reload - adcs r6, r5, r6 - ldr r5, [sp, #12] @ 4-byte Reload - adcs r11, r10, r5 - ldr r5, [sp, #8] @ 4-byte Reload - adcs r10, r4, r5 - ldr r5, [sp, #4] @ 4-byte Reload - ldr r4, [sp, #92] @ 4-byte Reload - adcs r3, r3, r5 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [sp, #60] @ 4-byte Reload - adc r2, r2, #0 - str r2, [sp, #24] @ 4-byte Spill - mul r2, r7, r3 - umull r3, r5, r2, r9 - adds r3, r3, r7 - str r5, [sp, #20] @ 4-byte Spill - umull r3, r7, r2, r8 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #100] @ 4-byte Reload - adcs r8, r3, r0 - ldr r0, [sp, #76] @ 4-byte Reload - umull r3, lr, r2, r7 - ldr r7, [sp, #84] @ 4-byte Reload - adcs r1, r3, r1 - umull r3, r12, r2, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r3, r3, r6 - umull r6, r5, r2, r4 - adcs r6, r6, r11 - umull r4, r11, r2, r7 - adcs r4, r4, r10 - umull r7, r10, r2, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r2, r7, r0 - ldr r0, [sp, #24] @ 4-byte Reload - adc r7, r0, #0 - ldr r0, [sp, #20] @ 4-byte Reload - adds r0, r8, r0 - ldr r8, [sp, #48] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - adcs r0, r3, lr - ldr r3, [sp, #96] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r6, r12 - ldr r6, [sp, #32] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - adcs r0, r4, r5 - str r0, [sp, #12] @ 4-byte Spill - adcs r0, r2, r11 - str r0, [sp, #8] @ 4-byte Spill - adc r0, r7, r10 - ldr r7, [sp, #80] @ 4-byte Reload - ldr r10, [sp, #44] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - umull r4, r0, r6, r1 - umull r11, r2, r6, r3 - adds r4, r2, r4 - umull r3, r4, r6, r7 - adcs r0, r0, r3 - umlal r2, r3, r6, r1 - umull r0, r7, r6, r8 - adcs r5, r4, r0 - ldr r0, [sp, #64] @ 4-byte Reload - umull r4, r1, r6, r0 - mov r0, r6 - adcs r4, r7, r4 - umull r7, r12, r6, r10 - ldr r6, [sp, #68] @ 4-byte Reload - adcs lr, r1, r7 - umull r7, r1, r0, r6 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r7, r12, r7 - adc r12, r1, #0 - ldr r1, [sp, #24] @ 4-byte Reload - adds r0, r11, r0 - adcs r2, r2, r1 - ldr r1, [sp, #20] @ 4-byte Reload - adcs r3, r3, r1 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r6, r5, r1 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #8] @ 4-byte Reload - adcs r1, lr, r1 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - adcs r1, r7, r1 - str r1, [sp, #24] @ 4-byte Spill - adc r1, r12, #0 - ldr r12, [sp, #76] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - mul r4, r0, r1 - umull r7, r1, r4, r9 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adds r0, r7, r0 - umull r0, r7, r4, r1 - ldr r1, [sp, #100] @ 4-byte Reload - adcs lr, r0, r2 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #84] @ 4-byte Reload - umull r2, r0, r4, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - adcs r2, r2, r3 - umull r3, r0, r4, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r3, r3, r6 - umull r6, r5, r4, r1 - adcs r6, r6, r11 - umull r1, r11, r4, r7 - umull r7, r9, r4, r12 - ldr r12, [sp, #52] @ 4-byte Reload - adcs r1, r1, r0 - ldr r0, [sp, #24] @ 4-byte Reload - adcs r4, r7, r0 - ldr r7, [sp, #32] @ 4-byte Reload - ldr r0, [sp, #28] @ 4-byte Reload - adc r7, r7, #0 - adds r0, lr, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #96] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #68] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - adcs r0, r1, r5 - str r0, [sp, #16] @ 4-byte Spill - adcs r0, r4, r11 - str r0, [sp, #12] @ 4-byte Spill - adc r0, r7, r9 - ldr r9, [sp, #40] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r4, [r9, #16] - umull r11, r3, r4, r2 - ldr r2, [sp, #80] @ 4-byte Reload - umull r0, r1, r4, r12 - adds r0, r3, r0 - umull r5, r0, r4, r2 - ldr r2, [sp, #64] @ 4-byte Reload - adcs r1, r1, r5 - umlal r3, r5, r4, r12 - umull r1, r7, r4, r8 - adcs r8, r0, r1 - umull r1, r0, r4, r2 - adcs lr, r7, r1 - umull r7, r1, r4, r10 - adcs r2, r0, r7 - umull r7, r0, r4, r6 - ldr r6, [sp, #16] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - adds r4, r11, r7 - ldr r7, [sp, #28] @ 4-byte Reload - adcs r3, r3, r7 - ldr r7, [sp, #24] @ 4-byte Reload - adcs r5, r5, r7 - ldr r7, [sp, #20] @ 4-byte Reload - adcs r7, r8, r7 - adcs r11, lr, r6 - ldr r6, [sp, #12] @ 4-byte Reload - adcs r10, r2, r6 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - mul r0, r4, r1 - umull r1, r6, r0, r2 - ldr r2, [sp, #72] @ 4-byte Reload - adds r1, r1, r4 - str r6, [sp, #24] @ 4-byte Spill - ldr r4, [sp, #84] @ 4-byte Reload - umull r1, r6, r0, r2 - adcs lr, r1, r3 - ldr r1, [sp, #100] @ 4-byte Reload - str r6, [sp, #20] @ 4-byte Spill - umull r3, r2, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - adcs r3, r3, r5 - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - umull r5, r8, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - adcs r5, r5, r7 - umull r7, r12, r0, r1 - adcs r6, r7, r11 - ldr r11, [sp, #76] @ 4-byte Reload - umull r7, r1, r0, r4 - adcs r7, r7, r10 - umull r4, r10, r0, r11 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #28] @ 4-byte Reload - adc r4, r4, #0 - adds r2, lr, r2 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #20] @ 4-byte Reload - adcs r2, r3, r2 - ldr r3, [sp, #52] @ 4-byte Reload - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [sp, #16] @ 4-byte Reload - adcs r11, r5, r2 - adcs r2, r6, r8 - ldr r6, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #76] @ 4-byte Reload - str r2, [sp, #24] @ 4-byte Spill - adcs r2, r7, r12 - ldr r7, [r9, #20] - adcs r0, r0, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r2, [sp, #20] @ 4-byte Spill - str r0, [sp, #16] @ 4-byte Spill - adc r0, r4, r10 - str r0, [sp, #12] @ 4-byte Spill - umull r4, r0, r7, r3 - umull r10, r2, r7, r1 - ldr r1, [sp, #80] @ 4-byte Reload - adds r4, r2, r4 - umull r5, r4, r7, r1 - adcs r0, r0, r5 - umlal r2, r5, r7, r3 - ldr r3, [sp, #68] @ 4-byte Reload - umull r0, r1, r7, r6 - ldr r6, [sp, #64] @ 4-byte Reload - adcs lr, r4, r0 - umull r4, r0, r7, r6 - ldr r6, [sp, #44] @ 4-byte Reload - adcs r12, r1, r4 - umull r4, r1, r7, r6 - adcs r9, r0, r4 - umull r4, r0, r7, r3 - ldr r3, [sp, #32] @ 4-byte Reload - adcs r1, r1, r4 - adc r0, r0, #0 - adds r4, r10, r3 - ldr r3, [sp, #28] @ 4-byte Reload - adcs r2, r2, r3 - ldr r3, [sp, #24] @ 4-byte Reload - adcs r5, r5, r11 - adcs r7, lr, r3 - ldr r3, [sp, #20] @ 4-byte Reload - adcs r11, r12, r3 - ldr r3, [sp, #16] @ 4-byte Reload - adcs r9, r9, r3 - ldr r3, [sp, #12] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp, #56] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - mul r0, r4, r1 - umull r1, r6, r0, r3 - ldr r3, [sp, #72] @ 4-byte Reload - adds r1, r1, r4 - str r6, [sp, #24] @ 4-byte Spill - ldr r4, [sp, #84] @ 4-byte Reload - umull r1, r6, r0, r3 - ldr r3, [sp, #100] @ 4-byte Reload - adcs r12, r1, r2 - str r6, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - umull r2, r10, r0, r3 - ldr r3, [sp, #92] @ 4-byte Reload - adcs r2, r2, r5 - umull r5, lr, r0, r3 - ldr r3, [sp, #88] @ 4-byte Reload - adcs r5, r5, r7 - umull r7, r6, r0, r3 - adcs r7, r7, r11 - umull r3, r11, r0, r4 - adcs r3, r3, r9 - umull r4, r9, r0, r8 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #28] @ 4-byte Reload - adc r4, r4, #0 - adds r8, r12, r1 - ldr r1, [sp, #20] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #16] @ 4-byte Spill - adcs r1, r5, r10 - ldr r5, [sp, #52] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - adcs r1, r7, lr - ldr r7, [sp, #64] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - adcs r1, r3, r6 - adcs r0, r0, r11 - str r1, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r9, r4, r9 - ldr r4, [r0, #24] - ldr r0, [sp, #80] @ 4-byte Reload - umull r6, lr, r4, r0 - ldr r0, [sp, #96] @ 4-byte Reload - umull r12, r1, r4, r5 - umull r11, r2, r4, r0 - mov r0, r6 - mov r3, r2 - adds r2, r2, r12 - adcs r1, r1, r6 - ldr r6, [sp, #48] @ 4-byte Reload - umlal r3, r0, r4, r5 - umull r1, r2, r4, r6 - adcs r5, lr, r1 - umull r6, r1, r4, r7 - ldr r7, [sp, #44] @ 4-byte Reload - adcs lr, r2, r6 - umull r6, r2, r4, r7 - ldr r7, [sp, #68] @ 4-byte Reload - adcs r12, r1, r6 - umull r6, r1, r4, r7 - ldr r7, [sp, #20] @ 4-byte Reload - adcs r2, r2, r6 - ldr r6, [sp, #16] @ 4-byte Reload - adc r1, r1, #0 - adds r4, r11, r8 - ldr r11, [sp, #88] @ 4-byte Reload - adcs r3, r3, r6 - ldr r6, [sp, #32] @ 4-byte Reload - adcs r6, r0, r6 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r5, r5, r0 - ldr r0, [sp, #24] @ 4-byte Reload - adcs r10, lr, r0 - adcs r7, r12, r7 - adcs r12, r2, r9 - ldr r2, [sp, #60] @ 4-byte Reload - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [sp, #56] @ 4-byte Reload - adc lr, r1, #0 - mul r1, r4, r2 - umull r2, r8, r1, r7 - ldr r7, [sp, #100] @ 4-byte Reload - adds r2, r2, r4 - umull r2, r9, r1, r7 - ldr r7, [sp, #72] @ 4-byte Reload - umull r4, r0, r1, r7 - ldr r7, [sp, #92] @ 4-byte Reload - adcs r3, r4, r3 - str r0, [sp, #80] @ 4-byte Spill - adcs r0, r2, r6 - str r0, [sp, #60] @ 4-byte Spill - umull r2, r0, r1, r7 - str r0, [sp, #68] @ 4-byte Spill - adcs r0, r2, r5 - str r0, [sp, #48] @ 4-byte Spill - umull r5, r0, r1, r11 - adcs r2, r5, r10 - ldr r10, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r5, [sp, #76] @ 4-byte Reload - umull r6, r0, r1, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r6, r6, r0 - umull r4, r0, r1, r5 - adcs r1, r4, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adc r4, lr, #0 - adds r8, r3, r8 - ldr r3, [sp, #60] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #48] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - adcs lr, r3, r9 - ldr r3, [sp, #68] @ 4-byte Reload - adcs r12, r2, r3 - ldr r2, [sp, #64] @ 4-byte Reload - adcs r3, r6, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r3, [sp, #96] @ 4-byte Spill - adcs r2, r1, r2 - ldr r1, [sp, #44] @ 4-byte Reload - adc r9, r4, r1 - ldr r1, [sp, #56] @ 4-byte Reload - subs r4, r8, r1 - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r6, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - sbcs r1, lr, r1 - sbcs r7, r12, r7 - sbcs r11, r3, r11 - ldr r3, [sp, #36] @ 4-byte Reload - sbcs r10, r2, r10 - sbc r5, r9, r5 - asr r0, r5, #31 - cmp r0, #0 - movlt r4, r8 - movlt r1, lr - str r4, [r3] - ldr r4, [sp, #80] @ 4-byte Reload - movlt r6, r4 - cmp r0, #0 - str r6, [r3, #4] - str r1, [r3, #8] - ldr r1, [sp, #96] @ 4-byte Reload - movlt r7, r12 - movlt r10, r2 - str r7, [r3, #12] - movlt r11, r1 - cmp r0, #0 - movlt r5, r9 - str r11, [r3, #16] - str r10, [r3, #20] - str r5, [r3, #24] - add sp, sp, #104 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end99: - .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L - .cantunwind - .fnend - - .globl mcl_fp_montRed7L - .align 2 - .type mcl_fp_montRed7L,%function -mcl_fp_montRed7L: @ @mcl_fp_montRed7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #120 - sub sp, sp, #120 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #4] - ldr r10, [r2, #-4] - ldr r4, [r1] - ldr r3, [r2] - ldr r7, [r2, #8] - ldr r5, [r2, #4] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #8] - str r4, [sp, #60] @ 4-byte Spill - str r7, [sp, #108] @ 4-byte Spill - str r3, [sp, #116] @ 4-byte Spill - str r5, [sp, #24] @ 4-byte Spill - str r10, [sp, #92] @ 4-byte Spill - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #12] - str r0, [sp, #76] @ 4-byte Spill - mul r0, r4, r10 - umull r4, r12, r0, r3 - umull lr, r6, r0, r7 - str r4, [sp, #52] @ 4-byte Spill - ldr r4, [r2, #24] - str r6, [sp, #72] @ 4-byte Spill - mov r9, lr - mov r3, r12 - umlal r3, r9, r0, r5 - umull r7, r6, r0, r4 - str r4, [sp, #104] @ 4-byte Spill - ldr r4, [r2, #20] - str r7, [sp, #68] @ 4-byte Spill - str r6, [sp, #64] @ 4-byte Spill - umull r7, r6, r0, r4 - str r4, [sp, #112] @ 4-byte Spill - ldr r4, [r2, #16] - ldr r2, [r2, #12] - str r7, [sp, #44] @ 4-byte Spill - str r6, [sp, #48] @ 4-byte Spill - str r4, [sp, #96] @ 4-byte Spill - umull r8, r7, r0, r4 - str r2, [sp, #100] @ 4-byte Spill - umull r4, r6, r0, r2 - umull r11, r2, r0, r5 - adds r0, r12, r11 - ldr r11, [r1, #36] - adcs r0, r2, lr - ldr r2, [sp, #48] @ 4-byte Reload - ldr lr, [r1, #28] - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r5, r6, r8 - ldr r8, [sp, #108] @ 4-byte Reload - ldr r6, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adds r0, r0, r2 - ldr r2, [r1, #24] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [r1, #20] - mul r4, r0, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #32] - ldr r10, [r1, #40] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #16] - umull r12, r1, r4, r8 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r9 - ldr r9, [sp, #96] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #116] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #24] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - umull r7, r1, r4, r6 - str r7, [sp, #28] @ 4-byte Spill - mov r7, r12 - adcs r0, r3, r0 - ldr r3, [sp, #68] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - mov r0, r1 - umlal r0, r7, r4, r5 - adcs r2, r2, r3 - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [sp, #64] @ 4-byte Reload - adcs r2, lr, r2 - ldr lr, [sp, #100] @ 4-byte Reload - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [sp, #60] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #60] @ 4-byte Spill - adcs r2, r11, #0 - mov r11, r5 - str r2, [sp, #56] @ 4-byte Spill - adcs r2, r10, #0 - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [sp, #48] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adcs r2, r2, #0 - str r2, [sp, #40] @ 4-byte Spill - mov r2, #0 - adc r2, r2, #0 - str r2, [sp, #36] @ 4-byte Spill - umull r3, r2, r4, r5 - ldr r5, [sp, #20] @ 4-byte Reload - adds r1, r1, r3 - adcs r2, r2, r12 - umull r1, r3, r4, r9 - umull r2, r12, r4, lr - adcs r2, r5, r2 - adcs r10, r12, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r2, [sp] @ 4-byte Spill - ldr r12, [sp, #92] @ 4-byte Reload - umull r5, r2, r4, r1 - adcs r1, r3, r5 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - umull r5, r3, r4, r1 - adcs r2, r2, r5 - ldr r5, [sp] @ 4-byte Reload - str r2, [sp, #8] @ 4-byte Spill - adc r2, r3, #0 - ldr r3, [sp, #28] @ 4-byte Reload - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [sp, #84] @ 4-byte Reload - adds r4, r3, r2 - ldr r2, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r4, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - umull r3, r0, r4, r8 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #12] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - umull r0, r2, r4, r6 - ldr r6, [sp, #68] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - adcs r6, r7, r6 - ldr r7, [sp, #8] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - mov r0, r2 - str r6, [sp, #76] @ 4-byte Spill - ldr r6, [sp, #64] @ 4-byte Reload - umlal r0, r5, r4, r11 - adcs r6, r7, r6 - ldr r7, [sp, #4] @ 4-byte Reload - str r6, [sp, #72] @ 4-byte Spill - ldr r6, [sp, #60] @ 4-byte Reload - adcs r6, r7, r6 - umull r7, r8, r4, r1 - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [sp, #56] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #64] @ 4-byte Spill - ldr r6, [sp, #52] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #60] @ 4-byte Spill - ldr r6, [sp, #48] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [sp, #44] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #52] @ 4-byte Spill - ldr r6, [sp, #40] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [sp, #36] @ 4-byte Reload - adc r6, r6, #0 - str r6, [sp, #44] @ 4-byte Spill - umull r6, r10, r4, r11 - adds r1, r2, r6 - adcs r2, r10, r3 - umull r1, r6, r4, lr - ldr lr, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - adcs r10, r2, r1 - umull r2, r3, r4, r9 - adcs r9, r6, r2 - ldr r2, [sp, #112] @ 4-byte Reload - umull r6, r1, r4, r2 - adcs r3, r3, r6 - adcs r1, r1, r7 - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [sp, #20] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adc r8, r8, #0 - ldr r6, [sp, #16] @ 4-byte Reload - adds r7, r3, r1 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - mul r7, r0, r12 - str r0, [sp, #40] @ 4-byte Spill - umull r3, r0, r7, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - umull r4, r1, r7, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r4, [sp, #36] @ 4-byte Spill - mov r4, r3 - adcs r0, r5, r0 - ldr r5, [sp, #76] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #104] @ 4-byte Reload - adcs r5, r9, r5 - str r0, [sp, #84] @ 4-byte Spill - mov r0, r1 - str r5, [sp, #80] @ 4-byte Spill - ldr r5, [sp, #72] @ 4-byte Reload - umlal r0, r4, r7, r11 - adcs r5, r6, r5 - ldr r6, [sp, #12] @ 4-byte Reload - str r5, [sp, #76] @ 4-byte Spill - ldr r5, [sp, #68] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #72] @ 4-byte Spill - ldr r5, [sp, #64] @ 4-byte Reload - adcs r6, r8, r5 - ldr r8, [sp, #100] @ 4-byte Reload - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [sp, #60] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #64] @ 4-byte Spill - ldr r6, [sp, #56] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #60] @ 4-byte Spill - ldr r6, [sp, #52] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [sp, #48] @ 4-byte Reload - adcs r6, r6, #0 - str r6, [sp, #52] @ 4-byte Spill - ldr r6, [sp, #44] @ 4-byte Reload - adc r6, r6, #0 - str r6, [sp, #48] @ 4-byte Spill - umull r9, r6, r7, r10 - str r6, [sp, #44] @ 4-byte Spill - umull r6, r5, r7, r11 - adds r1, r1, r6 - umull r6, r12, r7, r2 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r2, r5, r3 - umull r2, r3, r7, r8 - adcs r1, r1, r2 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - umull r5, r2, r7, r1 - ldr r7, [sp, #36] @ 4-byte Reload - adcs r3, r3, r5 - ldr r5, [sp, #116] @ 4-byte Reload - adcs r2, r2, r6 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [sp, #28] @ 4-byte Reload - str r2, [sp, #16] @ 4-byte Spill - adcs r2, r12, r9 - ldr r9, [sp, #92] @ 4-byte Reload - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - adc r2, r2, #0 - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - adds r6, r7, r2 - ldr r2, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r6, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - umull r7, r0, r6, lr - str r0, [sp, #32] @ 4-byte Spill - umull r0, r2, r6, r5 - mov r12, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r2 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - umlal r4, r12, r6, r11 - adcs r0, r3, r0 - ldr r3, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #12] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - ldr r3, [sp, #8] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - umull r3, r0, r6, r10 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [sp, #112] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - umull lr, r0, r6, r3 - str r0, [sp, #20] @ 4-byte Spill - umull r10, r0, r6, r11 - adds r2, r2, r10 - adcs r0, r0, r7 - umull r2, r10, r6, r1 - umull r0, r1, r6, r8 - ldr r6, [sp, #32] @ 4-byte Reload - adcs r8, r6, r0 - adcs r0, r1, r2 - ldr r1, [sp, #20] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r10, r10, lr - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc lr, r0, #0 - ldr r0, [sp, #44] @ 4-byte Reload - adds r7, r2, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #48] @ 4-byte Spill - mul r4, r0, r9 - ldr r0, [sp, #108] @ 4-byte Reload - umull r7, r2, r4, r0 - str r2, [sp, #40] @ 4-byte Spill - umull r2, r0, r4, r5 - ldr r5, [sp, #84] @ 4-byte Reload - str r2, [sp, #44] @ 4-byte Spill - mov r6, r0 - mov r2, r7 - umlal r6, r2, r4, r11 - adcs r5, r12, r5 - ldr r12, [sp, #100] @ 4-byte Reload - str r5, [sp, #84] @ 4-byte Spill - ldr r5, [sp, #80] @ 4-byte Reload - adcs r5, r8, r5 - ldr r8, [sp, #104] @ 4-byte Reload - str r5, [sp, #80] @ 4-byte Spill - ldr r5, [sp, #76] @ 4-byte Reload - adcs r5, r1, r5 - ldr r1, [sp, #28] @ 4-byte Reload - str r5, [sp, #76] @ 4-byte Spill - ldr r5, [sp, #72] @ 4-byte Reload - adcs r5, r10, r5 - str r5, [sp, #72] @ 4-byte Spill - ldr r5, [sp, #68] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, lr, r1 - ldr lr, [sp, #96] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, #0 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, #0 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #52] @ 4-byte Spill - umull r5, r1, r4, r8 - str r5, [sp, #32] @ 4-byte Spill - str r1, [sp, #36] @ 4-byte Spill - umull r5, r1, r4, r3 - str r5, [sp, #20] @ 4-byte Spill - umull r9, r5, r4, r11 - str r1, [sp, #28] @ 4-byte Spill - adds r0, r0, r9 - umull r3, r9, r4, lr - umull r0, r1, r4, r12 - adcs r4, r5, r7 - ldr r4, [sp, #40] @ 4-byte Reload - adcs r10, r4, r0 - ldr r0, [sp, #20] @ 4-byte Reload - ldr r4, [sp, #28] @ 4-byte Reload - adcs r1, r1, r3 - adcs r3, r9, r0 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r9, [sp, #112] @ 4-byte Reload - adcs r7, r4, r0 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r4, [sp, #48] @ 4-byte Reload - adc r5, r0, #0 - ldr r0, [sp, #44] @ 4-byte Reload - adds r4, r0, r4 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r4, r6, r0 - ldr r0, [sp, #80] @ 4-byte Reload - ldr r6, [sp, #108] @ 4-byte Reload - adcs r2, r2, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r2, [sp, #84] @ 4-byte Spill - adcs r0, r10, r0 - mov r10, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - mul r0, r4, r1 - ldr r1, [sp, #116] @ 4-byte Reload - umull r2, r7, r0, r11 - umull r4, r3, r0, r1 - adds r2, r3, r2 - str r4, [sp, #92] @ 4-byte Spill - umull r1, r2, r0, r6 - adcs r4, r7, r1 - umlal r3, r1, r0, r11 - umull r4, r5, r0, r12 - adcs r2, r2, r4 - str r2, [sp, #52] @ 4-byte Spill - umull r4, r2, r0, lr - adcs r7, r5, r4 - str r7, [sp, #48] @ 4-byte Spill - umull r7, r4, r0, r9 - adcs r5, r2, r7 - umull r7, r2, r0, r8 - adcs r7, r4, r7 - adc r0, r2, #0 - ldr r2, [sp, #92] @ 4-byte Reload - adds r2, r2, r10 - ldr r2, [sp, #84] @ 4-byte Reload - adcs r12, r3, r2 - ldr r2, [sp, #80] @ 4-byte Reload - adcs lr, r1, r2 - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - adcs r10, r2, r1 - ldr r1, [sp, #72] @ 4-byte Reload - ldr r2, [sp, #48] @ 4-byte Reload - adcs r4, r2, r1 - ldr r1, [sp, #68] @ 4-byte Reload - adcs r8, r5, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r8, [sp, #84] @ 4-byte Spill - adcs r2, r7, r1 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r7, [sp, #100] @ 4-byte Reload - str r2, [sp, #92] @ 4-byte Spill - adcs r1, r0, r1 - ldr r0, [sp, #56] @ 4-byte Reload - adc r3, r0, #0 - ldr r0, [sp, #116] @ 4-byte Reload - subs r0, r12, r0 - sbcs r5, lr, r11 - mov r11, r4 - sbcs r6, r10, r6 - sbcs r7, r4, r7 - ldr r4, [sp, #96] @ 4-byte Reload - sbcs r4, r8, r4 - sbcs r8, r2, r9 - ldr r2, [sp, #104] @ 4-byte Reload - sbcs r9, r1, r2 - ldr r2, [sp, #88] @ 4-byte Reload - sbc r3, r3, #0 - ands r3, r3, #1 - movne r0, r12 - movne r5, lr - movne r6, r10 - cmp r3, #0 - str r0, [r2] - ldr r0, [sp, #84] @ 4-byte Reload - movne r7, r11 - str r5, [r2, #4] - str r6, [r2, #8] - str r7, [r2, #12] - movne r4, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r4, [r2, #16] - movne r8, r0 - cmp r3, #0 - movne r9, r1 - str r8, [r2, #20] - str r9, [r2, #24] - add sp, sp, #120 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end100: - .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L - .cantunwind - .fnend - - .globl mcl_fp_addPre7L - .align 2 - .type mcl_fp_addPre7L,%function -mcl_fp_addPre7L: @ @mcl_fp_addPre7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #8 - sub sp, sp, #8 - ldr r3, [r1, #4] - ldr r9, [r1] - ldr r7, [r2] - ldr lr, [r1, #8] - ldr r10, [r1, #12] - ldr r11, [r1, #16] - ldr r8, [r1, #24] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r1, #20] - adds r7, r7, r9 - str r3, [sp] @ 4-byte Spill - ldmib r2, {r1, r3, r4, r5, r12} - ldr r6, [sp, #4] @ 4-byte Reload - ldr r2, [r2, #24] - str r7, [r0] - adcs r1, r1, r6 - ldr r6, [sp] @ 4-byte Reload - adcs r3, r3, lr - adcs r4, r4, r10 - adcs r5, r5, r11 - adcs r6, r12, r6 - adcs r2, r2, r8 - stmib r0, {r1, r3, r4, r5, r6} - str r2, [r0, #24] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #8 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end101: - .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L - .cantunwind - .fnend - - .globl mcl_fp_subPre7L - .align 2 - .type mcl_fp_subPre7L,%function -mcl_fp_subPre7L: @ @mcl_fp_subPre7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #8 - sub sp, sp, #8 - ldr r3, [r2, #4] - ldr r9, [r2] - ldr r7, [r1] - ldr lr, [r2, #8] - ldr r10, [r2, #12] - ldr r11, [r2, #16] - ldr r8, [r2, #24] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r2, #20] - subs r7, r7, r9 - str r3, [sp] @ 4-byte Spill - ldmib r1, {r2, r3, r4, r5, r12} - ldr r6, [sp, #4] @ 4-byte Reload - ldr r1, [r1, #24] - str r7, [r0] - sbcs r2, r2, r6 - ldr r6, [sp] @ 4-byte Reload - sbcs r3, r3, lr - sbcs r4, r4, r10 - sbcs r5, r5, r11 - sbcs r6, r12, r6 - sbcs r1, r1, r8 - stmib r0, {r2, r3, r4, r5, r6} - str r1, [r0, #24] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #8 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end102: - .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L - .cantunwind - .fnend - - .globl mcl_fp_shr1_7L - .align 2 - .type mcl_fp_shr1_7L,%function -mcl_fp_shr1_7L: @ @mcl_fp_shr1_7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - ldr r3, [r1, #4] - ldr r12, [r1] - ldr lr, [r1, #12] - ldr r2, [r1, #8] - ldr r5, [r1, #20] - ldr r4, [r1, #16] - ldr r1, [r1, #24] - lsrs r6, r3, #1 - lsr r3, r3, #1 - rrx r12, r12 - lsrs r6, lr, #1 - orr r7, r3, r2, lsl #31 - lsr r6, lr, #1 - rrx r2, r2 - lsrs r3, r5, #1 - lsr r5, r5, #1 - str r12, [r0] - str r7, [r0, #4] - orr r5, r5, r1, lsl #31 - orr r6, r6, r4, lsl #31 - rrx r3, r4 - lsr r1, r1, #1 - str r2, [r0, #8] - str r6, [r0, #12] - str r3, [r0, #16] - str r5, [r0, #20] - str r1, [r0, #24] - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end103: - .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L - .cantunwind - .fnend - - .globl mcl_fp_add7L - .align 2 - .type mcl_fp_add7L,%function -mcl_fp_add7L: @ @mcl_fp_add7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #12 - sub sp, sp, #12 - ldr r7, [r1, #8] - ldr r10, [r1] - ldr r9, [r1, #4] - ldr r11, [r1, #16] - ldr r8, [r1, #24] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #12] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r1, #20] - ldm r2, {r1, r4, r5, r6, r12, lr} - ldr r2, [r2, #24] - adds r10, r1, r10 - ldr r1, [sp, #8] @ 4-byte Reload - adcs r4, r4, r9 - str r10, [r0] - adcs r5, r5, r1 - ldr r1, [sp, #4] @ 4-byte Reload - adcs r6, r6, r1 - mov r1, #0 - adcs r9, r12, r11 - adcs r7, lr, r7 - stmib r0, {r4, r5, r6, r9} - adcs r2, r2, r8 - str r7, [r0, #20] - adc r1, r1, #0 - str r2, [r0, #24] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3] - str r1, [sp] @ 4-byte Spill - ldmib r3, {r12, lr} - ldr r1, [r3, #20] - ldr r8, [r3, #12] - ldr r11, [r3, #16] - ldr r3, [r3, #24] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp] @ 4-byte Reload - subs r10, r10, r1 - sbcs r1, r4, r12 - ldr r4, [sp, #4] @ 4-byte Reload - sbcs r5, r5, lr - sbcs r12, r6, r8 - str r5, [sp] @ 4-byte Spill - sbcs lr, r9, r11 - sbcs r4, r7, r4 - sbcs r5, r2, r3 - ldr r2, [sp, #8] @ 4-byte Reload - sbc r2, r2, #0 - tst r2, #1 - bne .LBB104_2 -@ BB#1: @ %nocarry - str r10, [r0] - str r1, [r0, #4] - ldr r1, [sp] @ 4-byte Reload - add r2, r0, #8 - stm r2, {r1, r12, lr} - str r4, [r0, #20] - str r5, [r0, #24] -.LBB104_2: @ %carry - add sp, sp, #12 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end104: - .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L - .cantunwind - .fnend - - .globl mcl_fp_addNF7L - .align 2 - .type mcl_fp_addNF7L,%function -mcl_fp_addNF7L: @ @mcl_fp_addNF7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldm r1, {r6, r7} - ldr r11, [r1, #16] - ldr r9, [r1, #20] - ldr r8, [r1, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #8] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r1, #12] - str r7, [sp, #8] @ 4-byte Spill - ldm r2, {r1, r4, r5, r10, r12, lr} - ldr r2, [r2, #24] - adds r7, r1, r6 - ldr r1, [sp, #16] @ 4-byte Reload - str r7, [sp, #4] @ 4-byte Spill - adcs r6, r4, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r6, [sp, #16] @ 4-byte Spill - adcs r5, r5, r1 - ldr r1, [sp, #8] @ 4-byte Reload - adcs r4, r10, r1 - ldr r10, [r3, #8] - adcs r12, r12, r11 - ldr r11, [r3, #16] - adcs lr, lr, r9 - ldr r9, [r3, #20] - adc r1, r2, r8 - ldr r2, [r3] - ldr r8, [r3, #12] - str r1, [sp, #12] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #4] - ldr r3, [r3, #24] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [sp] @ 4-byte Reload - subs r2, r7, r2 - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r7, r6, r7 - sbcs r6, r5, r10 - mov r10, r12 - sbcs r8, r4, r8 - sbcs r11, r12, r11 - sbcs r12, lr, r9 - ldr r9, [sp, #4] @ 4-byte Reload - sbc r3, r1, r3 - asr r1, r3, #31 - cmp r1, #0 - movlt r2, r9 - movlt r6, r5 - str r2, [r0] - ldr r2, [sp, #16] @ 4-byte Reload - movlt r7, r2 - cmp r1, #0 - movlt r8, r4 - movlt r11, r10 - movlt r12, lr - cmp r1, #0 - ldr r1, [sp, #12] @ 4-byte Reload - str r7, [r0, #4] - str r6, [r0, #8] - str r8, [r0, #12] - str r11, [r0, #16] - str r12, [r0, #20] - movlt r3, r1 - str r3, [r0, #24] - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end105: - .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L - .cantunwind - .fnend - - .globl mcl_fp_sub7L - .align 2 - .type mcl_fp_sub7L,%function -mcl_fp_sub7L: @ @mcl_fp_sub7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #12 - sub sp, sp, #12 - ldr r7, [r2, #8] - ldr r11, [r2] - ldr r9, [r2, #4] - ldr r8, [r2, #20] - ldr r10, [r2, #24] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r2, #12] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r2, #16] - str r7, [sp] @ 4-byte Spill - ldm r1, {r2, r4, r5, r6, r7, lr} - ldr r1, [r1, #24] - subs r12, r2, r11 - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r9, r4, r9 - ldr r4, [sp, #4] @ 4-byte Reload - str r12, [r0] - str r9, [r0, #4] - sbcs r2, r5, r2 - sbcs r11, r6, r4 - ldr r4, [sp] @ 4-byte Reload - str r2, [r0, #8] - str r11, [r0, #12] - sbcs r4, r7, r4 - sbcs r5, lr, r8 - sbcs r6, r1, r10 - add r1, r0, #16 - stm r1, {r4, r5, r6} - mov r1, #0 - sbc r1, r1, #0 - tst r1, #1 - beq .LBB106_2 -@ BB#1: @ %carry - ldr r1, [r3] - ldr r7, [r3, #4] - ldr lr, [r3, #12] - ldr r8, [r3, #16] - ldr r10, [r3, #20] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #8] - ldr r3, [r3, #24] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - adds r1, r1, r12 - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - adcs r7, r7, r9 - adcs r2, r1, r2 - ldr r1, [sp, #4] @ 4-byte Reload - adcs r12, lr, r11 - adcs r4, r8, r4 - adcs r5, r10, r5 - adc r3, r3, r6 - stm r0, {r1, r7} - str r2, [r0, #8] - str r12, [r0, #12] - str r4, [r0, #16] - str r5, [r0, #20] - str r3, [r0, #24] -.LBB106_2: @ %nocarry - add sp, sp, #12 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end106: - .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L - .cantunwind - .fnend - - .globl mcl_fp_subNF7L - .align 2 - .type mcl_fp_subNF7L,%function -mcl_fp_subNF7L: @ @mcl_fp_subNF7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r2, {r5, lr} - ldr r7, [r2, #8] - ldr r11, [r2, #16] - ldr r10, [r2, #24] - add r9, r1, #12 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r2, #12] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r2, #20] - str r7, [sp, #8] @ 4-byte Spill - ldm r1, {r2, r4, r12} - ldm r9, {r6, r8, r9} - ldr r7, [r1, #24] - ldr r1, [sp, #12] @ 4-byte Reload - subs r5, r2, r5 - sbcs lr, r4, lr - sbcs r4, r12, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str lr, [sp] @ 4-byte Spill - sbcs r12, r6, r1 - ldr r6, [r3, #4] - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r2, r8, r11 - ldr r8, [r3, #12] - ldr r11, [r3, #16] - str r2, [sp, #12] @ 4-byte Spill - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [r3, #20] - sbcs r1, r9, r1 - sbc r9, r7, r10 - ldr r7, [r3] - ldr r10, [r3, #8] - ldr r3, [r3, #24] - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [sp, #4] @ 4-byte Reload - adds r7, r5, r7 - adcs r6, lr, r6 - adcs lr, r4, r10 - mov r10, r1 - adcs r8, r12, r8 - adcs r11, r2, r11 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r2, r1, r2 - asr r1, r9, #31 - adc r3, r9, r3 - cmp r1, #0 - movge r7, r5 - ldr r5, [sp] @ 4-byte Reload - movge lr, r4 - str r7, [r0] - ldr r7, [sp, #12] @ 4-byte Reload - movge r6, r5 - cmp r1, #0 - movge r8, r12 - movge r11, r7 - movge r2, r10 - cmp r1, #0 - str r6, [r0, #4] - str lr, [r0, #8] - movge r3, r9 - str r8, [r0, #12] - str r11, [r0, #16] - str r2, [r0, #20] - str r3, [r0, #24] - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end107: - .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L - .cantunwind - .fnend - - .globl mcl_fpDbl_add7L - .align 2 - .type mcl_fpDbl_add7L,%function -mcl_fpDbl_add7L: @ @mcl_fpDbl_add7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #48 - sub sp, sp, #48 - ldm r1, {r12, lr} - ldr r8, [r1, #8] - ldr r10, [r1, #12] - ldmib r2, {r6, r7} - ldr r4, [r2, #16] - ldr r11, [r2] - ldr r5, [r2, #12] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #20] - adds r9, r11, r12 - ldr r11, [r1, #44] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r2, #24] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r2, #28] - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [r2, #32] - str r4, [sp, #16] @ 4-byte Spill - ldr r4, [r2, #36] - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [r2, #40] - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r2, #44] - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #48] - ldr r2, [r2, #52] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #36] - str r4, [sp, #36] @ 4-byte Spill - adcs r4, r6, lr - add lr, r1, #16 - adcs r7, r7, r8 - ldr r8, [r1, #52] - adcs r6, r5, r10 - ldr r5, [r1, #32] - ldr r10, [r1, #48] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - str r9, [r0] - stmib r0, {r4, r7} - str r6, [r0, #12] - ldr r4, [sp, #8] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - adcs r1, r4, r1 - ldr r4, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #20] @ 4-byte Reload - adcs r2, r4, r2 - str r2, [r0, #20] - adcs r1, r1, r12 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r2, lr - str r2, [sp, #20] @ 4-byte Spill - adcs r2, r1, r5 - ldr r1, [sp, #24] @ 4-byte Reload - str r2, [sp, #16] @ 4-byte Spill - adcs r5, r1, r7 - ldr r1, [sp, #28] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - adcs r12, r1, r7 - ldr r1, [sp, #32] @ 4-byte Reload - mov r7, #0 - str r12, [sp, #40] @ 4-byte Spill - adcs lr, r1, r11 - ldr r1, [sp, #36] @ 4-byte Reload - adcs r4, r1, r10 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r9, r1, r8 - adc r1, r7, #0 - str r1, [sp, #44] @ 4-byte Spill - ldm r3, {r1, r7, r11} - ldr r10, [r3, #12] - ldr r8, [r3, #16] - ldr r6, [r3, #20] - ldr r3, [r3, #24] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [sp, #20] @ 4-byte Reload - subs r1, r3, r1 - sbcs r7, r2, r7 - sbcs r2, r5, r11 - mov r11, lr - sbcs r10, r12, r10 - sbcs r12, lr, r8 - sbcs lr, r4, r6 - ldr r6, [sp, #36] @ 4-byte Reload - sbcs r8, r9, r6 - ldr r6, [sp, #44] @ 4-byte Reload - sbc r6, r6, #0 - ands r6, r6, #1 - movne r1, r3 - movne r2, r5 - str r1, [r0, #28] - ldr r1, [sp, #16] @ 4-byte Reload - movne r7, r1 - ldr r1, [sp, #40] @ 4-byte Reload - cmp r6, #0 - movne r12, r11 - movne lr, r4 - str r7, [r0, #32] - str r2, [r0, #36] - movne r10, r1 - cmp r6, #0 - movne r8, r9 - str r10, [r0, #40] - str r12, [r0, #44] - str lr, [r0, #48] - str r8, [r0, #52] - add sp, sp, #48 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end108: - .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub7L - .align 2 - .type mcl_fpDbl_sub7L,%function -mcl_fpDbl_sub7L: @ @mcl_fpDbl_sub7L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - ldr r7, [r2, #32] - add r8, r1, #16 - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #64] @ 4-byte Spill - ldm r2, {r4, r7} - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #8] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #12] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r2, #16] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #28] - ldr r2, [r2, #20] - str r7, [sp, #36] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - ldmib r1, {r2, r12, lr} - ldm r8, {r5, r6, r8} - ldr r7, [r1, #28] - ldr r11, [r1] - ldr r9, [r1, #32] - ldr r10, [r1, #44] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #36] - subs r4, r11, r4 - str r4, [r0] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r1, #40] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #48] - ldr r1, [r1, #52] - str r7, [sp] @ 4-byte Spill - ldr r7, [sp, #20] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp, #16] @ 4-byte Reload - sbcs r12, r12, r7 - ldr r7, [sp, #12] @ 4-byte Reload - stmib r0, {r2, r12} - ldr r2, [sp, #32] @ 4-byte Reload - sbcs lr, lr, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str lr, [r0, #12] - sbcs r2, r5, r2 - str r2, [r0, #16] - ldr r2, [sp, #24] @ 4-byte Reload - sbcs r2, r6, r2 - ldr r6, [sp, #8] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r2, r8, r2 - mov r8, #0 - str r2, [r0, #24] - ldr r2, [sp, #36] @ 4-byte Reload - sbcs lr, r7, r2 - ldr r2, [sp, #44] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - sbcs r4, r9, r2 - ldr r2, [sp, #48] @ 4-byte Reload - ldr r9, [r3, #20] - str r4, [sp, #44] @ 4-byte Spill - sbcs r7, r7, r2 - ldr r2, [sp, #52] @ 4-byte Reload - sbcs r12, r6, r2 - ldr r2, [sp, #56] @ 4-byte Reload - ldr r6, [sp] @ 4-byte Reload - str r12, [sp, #52] @ 4-byte Spill - sbcs r11, r10, r2 - ldr r2, [sp, #60] @ 4-byte Reload - ldr r10, [r3, #12] - sbcs r6, r6, r2 - ldr r2, [sp, #64] @ 4-byte Reload - sbcs r5, r1, r2 - ldr r2, [r3, #8] - sbc r1, r8, #0 - ldr r8, [r3, #4] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [r3] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [r3, #16] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [r3, #24] - ldr r3, [sp, #60] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adds r1, lr, r1 - adcs r4, r4, r8 - adcs r2, r7, r2 - adcs r10, r12, r10 - adcs r12, r11, r3 - ldr r3, [sp, #56] @ 4-byte Reload - adcs r8, r6, r9 - adc r9, r5, r3 - ldr r3, [sp, #64] @ 4-byte Reload - ands r3, r3, #1 - moveq r1, lr - moveq r2, r7 - str r1, [r0, #28] - ldr r1, [sp, #44] @ 4-byte Reload - moveq r4, r1 - ldr r1, [sp, #52] @ 4-byte Reload - cmp r3, #0 - moveq r12, r11 - moveq r8, r6 - str r4, [r0, #32] - str r2, [r0, #36] - moveq r10, r1 - cmp r3, #0 - moveq r9, r5 - str r10, [r0, #40] - str r12, [r0, #44] - str r8, [r0, #48] - str r9, [r0, #52] - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end109: - .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L - .cantunwind - .fnend - - .align 2 - .type .LmulPv256x32,%function -.LmulPv256x32: @ @mulPv256x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r1, [r1, #28] - umull r3, r7, r1, r2 - adcs r1, r6, r3 - str r1, [r0, #28] - adc r1, r7, #0 - str r1, [r0, #32] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end110: - .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre8L - .align 2 - .type mcl_fp_mulUnitPre8L,%function -mcl_fp_mulUnitPre8L: @ @mcl_fp_mulUnitPre8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r11, lr} - push {r4, r5, r6, r7, r11, lr} - .pad #40 - sub sp, sp, #40 - mov r4, r0 - mov r0, sp - bl .LmulPv256x32(PLT) - ldr r0, [sp, #32] - add lr, sp, #16 - ldr r12, [sp, #28] - ldm lr, {r1, r3, lr} - ldm sp, {r2, r5, r6, r7} - str r0, [r4, #32] - add r0, r4, #16 - stm r4, {r2, r5, r6, r7} - stm r0, {r1, r3, lr} - str r12, [r4, #28] - add sp, sp, #40 - pop {r4, r5, r6, r7, r11, lr} - mov pc, lr -.Lfunc_end111: - .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre8L - .align 2 - .type mcl_fpDbl_mulPre8L,%function -mcl_fpDbl_mulPre8L: @ @mcl_fpDbl_mulPre8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #124 - sub sp, sp, #124 - mov r6, r2 - mov r5, r1 - mov r4, r0 - bl mcl_fpDbl_mulPre4L(PLT) - add r0, r4, #32 - add r1, r5, #16 - add r2, r6, #16 - bl mcl_fpDbl_mulPre4L(PLT) - ldm r6, {r12, lr} - ldr r7, [r6, #16] - ldr r9, [r6, #8] - ldr r3, [r6, #12] - add r6, r6, #20 - mov r8, #0 - ldm r6, {r0, r1, r6} - adds r2, r12, r7 - adcs r0, lr, r0 - str r2, [sp, #56] @ 4-byte Spill - adcs r1, r9, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r9, [r5] - str r1, [sp, #44] @ 4-byte Spill - adcs r1, r3, r6 - str r1, [sp, #48] @ 4-byte Spill - adc r6, r8, #0 - ldmib r5, {r8, r10, r12} - ldr r7, [r5, #16] - ldr r3, [r5, #20] - ldr lr, [r5, #24] - ldr r11, [r5, #28] - str r2, [sp, #60] - str r0, [sp, #64] - mov r0, #0 - add r2, sp, #60 - adds r5, r9, r7 - ldr r7, [sp, #44] @ 4-byte Reload - adcs r8, r8, r3 - str r5, [sp, #76] - adcs r10, r10, lr - str r8, [sp, #80] - adcs r9, r12, r11 - str r10, [sp, #84] - str r7, [sp, #68] - str r1, [sp, #72] - adc r11, r0, #0 - add r0, sp, #92 - add r1, sp, #76 - str r9, [sp, #88] - bl mcl_fpDbl_mulPre4L(PLT) - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [sp, #52] @ 4-byte Reload - cmp r6, #0 - ldr r3, [sp, #48] @ 4-byte Reload - and r12, r6, r11 - ldr lr, [sp, #120] - moveq r5, r6 - moveq r9, r6 - moveq r10, r6 - moveq r8, r6 - ldr r6, [sp, #116] - adds r0, r5, r0 - adcs r1, r8, r1 - adcs r2, r10, r7 - mov r7, #0 - adcs r3, r9, r3 - adc r7, r7, #0 - cmp r11, #0 - moveq r0, r5 - ldr r5, [sp, #108] - moveq r2, r10 - moveq r3, r9 - moveq r7, r11 - moveq r1, r8 - adds r8, r0, r5 - ldr r5, [sp, #112] - adcs r10, r1, r5 - adcs r9, r2, r6 - ldr r6, [r4] - ldmib r4, {r5, r11} - ldr r2, [sp, #92] - adcs lr, r3, lr - add r3, sp, #96 - adc r12, r7, r12 - ldr r7, [r4, #12] - ldm r3, {r0, r1, r3} - subs r2, r2, r6 - str r2, [sp, #52] @ 4-byte Spill - sbcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - sbcs r0, r1, r11 - str r0, [sp, #44] @ 4-byte Spill - sbcs r0, r3, r7 - ldr r7, [r4, #20] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r4, #16] - str r0, [sp, #56] @ 4-byte Spill - sbcs r0, r8, r0 - ldr r8, [r4, #28] - str r0, [sp, #28] @ 4-byte Spill - sbcs r0, r10, r7 - ldr r10, [r4, #24] - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, r9, r10 - str r0, [sp, #20] @ 4-byte Spill - sbcs r0, lr, r8 - add lr, r4, #32 - str r0, [sp, #16] @ 4-byte Spill - sbc r0, r12, #0 - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r5, r9, lr} - ldr r6, [sp, #52] @ 4-byte Reload - ldr r12, [r4, #44] - ldr r2, [r4, #48] - ldr r0, [r4, #52] - ldr r1, [r4, #56] - ldr r3, [r4, #60] - subs r6, r6, r5 - str r1, [sp, #36] @ 4-byte Spill - str r3, [sp, #32] @ 4-byte Spill - str r6, [sp] @ 4-byte Spill - ldr r6, [sp, #48] @ 4-byte Reload - sbcs r11, r6, r9 - ldr r6, [sp, #44] @ 4-byte Reload - sbcs r6, r6, lr - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [sp, #40] @ 4-byte Reload - sbcs r6, r6, r12 - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [sp, #28] @ 4-byte Reload - sbcs r6, r6, r2 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [sp, #24] @ 4-byte Reload - sbcs r6, r6, r0 - str r6, [sp, #40] @ 4-byte Spill - mov r6, r0 - ldr r0, [sp, #20] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adds r3, r0, r1 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r7, r7, r11 - str r3, [r4, #16] - str r7, [r4, #20] - adcs r3, r10, r0 - ldr r0, [sp, #8] @ 4-byte Reload - str r3, [r4, #24] - ldr r3, [sp, #32] @ 4-byte Reload - adcs r1, r8, r0 - ldr r0, [sp, #28] @ 4-byte Reload - str r1, [r4, #28] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [r4, #32] - ldr r0, [sp, #44] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [r4, #36] - adcs r0, lr, r0 - str r0, [r4, #40] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r12, r0 - add r12, r4, #48 - str r0, [r4, #44] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #36] @ 4-byte Reload - adcs r1, r6, #0 - adcs r2, r2, #0 - adc r3, r3, #0 - stm r12, {r0, r1, r2, r3} - add sp, sp, #124 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end112: - .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre8L - .align 2 - .type mcl_fpDbl_sqrPre8L,%function -mcl_fpDbl_sqrPre8L: @ @mcl_fpDbl_sqrPre8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #124 - sub sp, sp, #124 - mov r5, r1 - mov r4, r0 - mov r2, r5 - bl mcl_fpDbl_mulPre4L(PLT) - add r1, r5, #16 - add r0, r4, #32 - mov r2, r1 - bl mcl_fpDbl_mulPre4L(PLT) - ldm r5, {r0, r8, lr} - ldr r3, [r5, #16] - ldr r2, [r5, #20] - ldr r6, [r5, #24] - ldr r12, [r5, #12] - ldr r1, [r5, #28] - adds r9, r0, r3 - add r0, sp, #64 - adcs r5, r8, r2 - str r9, [sp, #76] - str r9, [sp, #60] - add r2, sp, #60 - adcs r6, lr, r6 - str r5, [sp, #80] - adcs r7, r12, r1 - str r6, [sp, #84] - add r1, sp, #76 - str r7, [sp, #88] - stm r0, {r5, r6, r7} - mov r0, #0 - adc r8, r0, #0 - add r0, sp, #92 - bl mcl_fpDbl_mulPre4L(PLT) - adds r12, r9, r9 - adcs lr, r5, r5 - adcs r9, r6, r6 - add r6, sp, #112 - ldm r6, {r0, r5, r6} - ldr r1, [sp, #108] - adc r10, r7, r7 - adds r2, r1, r12 - adcs r3, r0, lr - adcs r12, r5, r9 - adcs lr, r6, r10 - adc r7, r8, r7, lsr #31 - cmp r8, #0 - moveq lr, r6 - add r6, sp, #92 - moveq r7, r8 - moveq r12, r5 - moveq r3, r0 - moveq r2, r1 - ldm r4, {r8, r9, r10, r11} - ldm r6, {r0, r1, r5, r6} - subs r0, r0, r8 - ldr r8, [r4, #20] - str r0, [sp, #52] @ 4-byte Spill - sbcs r0, r1, r9 - ldr r9, [r4, #24] - str r0, [sp, #48] @ 4-byte Spill - sbcs r0, r5, r10 - ldr r10, [r4, #28] - str r0, [sp, #44] @ 4-byte Spill - sbcs r0, r6, r11 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r4, #16] - str r0, [sp, #56] @ 4-byte Spill - sbcs r0, r2, r0 - str r0, [sp, #28] @ 4-byte Spill - sbcs r0, r3, r8 - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, r12, r9 - str r0, [sp, #20] @ 4-byte Spill - sbcs r0, lr, r10 - add lr, r4, #32 - str r0, [sp, #16] @ 4-byte Spill - sbc r0, r7, #0 - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r5, r7, lr} - ldr r6, [sp, #52] @ 4-byte Reload - ldr r12, [r4, #44] - ldr r2, [r4, #48] - ldr r0, [r4, #52] - ldr r1, [r4, #56] - ldr r3, [r4, #60] - subs r6, r6, r5 - str r1, [sp, #36] @ 4-byte Spill - str r3, [sp, #32] @ 4-byte Spill - str r6, [sp] @ 4-byte Spill - ldr r6, [sp, #48] @ 4-byte Reload - sbcs r11, r6, r7 - ldr r6, [sp, #44] @ 4-byte Reload - sbcs r6, r6, lr - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [sp, #40] @ 4-byte Reload - sbcs r6, r6, r12 - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [sp, #28] @ 4-byte Reload - sbcs r6, r6, r2 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [sp, #24] @ 4-byte Reload - sbcs r6, r6, r0 - str r6, [sp, #40] @ 4-byte Spill - mov r6, r0 - ldr r0, [sp, #20] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adds r3, r1, r0 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r1, r11, r8 - str r3, [r4, #16] - str r1, [r4, #20] - adcs r3, r0, r9 - ldr r0, [sp, #8] @ 4-byte Reload - str r3, [r4, #24] - ldr r3, [sp, #32] @ 4-byte Reload - adcs r1, r0, r10 - ldr r0, [sp, #28] @ 4-byte Reload - str r1, [r4, #28] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [r4, #32] - ldr r0, [sp, #44] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [r4, #36] - adcs r0, r0, lr - str r0, [r4, #40] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - add r12, r4, #48 - str r0, [r4, #44] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #36] @ 4-byte Reload - adcs r1, r6, #0 - adcs r2, r2, #0 - adc r3, r3, #0 - stm r12, {r0, r1, r2, r3} - add sp, sp, #124 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L - .cantunwind - .fnend - - .globl mcl_fp_mont8L - .align 2 - .type mcl_fp_mont8L,%function -mcl_fp_mont8L: @ @mcl_fp_mont8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #724 - sub sp, sp, #724 - mov r7, r2 - ldr r5, [r3, #-4] - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #680 - str r3, [sp, #64] @ 4-byte Spill - str r1, [sp, #68] @ 4-byte Spill - mov r4, r3 - mov r11, r1 - ldr r2, [r7] - str r7, [sp, #76] @ 4-byte Spill - str r5, [sp, #72] @ 4-byte Spill - bl .LmulPv256x32(PLT) - ldr r0, [sp, #684] - ldr r9, [sp, #680] - mov r1, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #688] - mul r2, r9, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #640 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #672] - add r10, sp, #644 - ldr r4, [sp, #656] - ldr r6, [sp, #640] - mov r1, r11 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r5, r8, r10} - ldr r2, [r7, #4] - add r0, sp, #600 - bl .LmulPv256x32(PLT) - adds r0, r6, r9 - ldr r2, [sp, #12] @ 4-byte Reload - mov r1, #0 - add r12, sp, #604 - ldr r9, [sp, #628] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #632] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r10, r10, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #600] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r11, r2, r0 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - adcs r7, r2, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [sp, #24] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r1, #0 - str r0, [sp, #24] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r6, r12} - ldr lr, [sp, #48] @ 4-byte Reload - ldr r5, [sp, #44] @ 4-byte Reload - adds r4, lr, r4 - adcs r0, r5, r0 - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r10, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r11, r6 - ldr r6, [sp, #64] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - mov r1, r6 - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #560 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #592] - ldr r5, [sp, #76] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r7, [sp, #576] - ldr r10, [sp, #560] - ldr r11, [sp, #564] - ldr r8, [sp, #568] - ldr r9, [sp, #572] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #588] - ldr r2, [r5, #8] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #520 - bl .LmulPv256x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #520 - ldr r4, [sp, #544] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #552] - adcs r11, r0, r9 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r9, [sp, #548] - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #56] @ 4-byte Reload - adds r7, r7, r0 - adcs r0, r10, r1 - mov r1, r6 - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r11, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #480 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #512] - ldr r2, [r5, #12] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #500] - ldr r6, [sp, #496] - ldr r10, [sp, #480] - ldr r11, [sp, #484] - ldr r8, [sp, #488] - ldr r9, [sp, #492] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #440 - bl .LmulPv256x32(PLT) - adds r0, r7, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #440 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #472] - adcs r11, r0, r9 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r9, [sp, #468] - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #464] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r5, r0 - adcs r0, r10, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r11, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r6, r4 - ldr r6, [sp, #72] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - mul r2, r7, r6 - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #400 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #432] - ldr r5, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #416] - ldr r10, [sp, #400] - ldr r11, [sp, #404] - ldr r8, [sp, #408] - ldr r9, [sp, #412] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #428] - mov r1, r5 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #360 - bl .LmulPv256x32(PLT) - adds r0, r7, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #360 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r7, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #392] - adcs r11, r0, r9 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r9, [sp, #388] - adcs r0, r0, r4 - ldr r4, [sp, #384] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r7, r0 - adcs r0, r10, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r11, r2 - mul r2, r7, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #320 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #352] - ldr r6, [sp, #340] - ldr r4, [sp, #336] - ldr r10, [sp, #320] - ldr r11, [sp, #324] - ldr r8, [sp, #328] - ldr r9, [sp, #332] - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #348] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #280 - bl .LmulPv256x32(PLT) - adds r0, r7, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #280 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #312] - adcs r11, r0, r9 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r9, [sp, #308] - adcs r0, r0, r4 - ldr r4, [sp, #304] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r5, r0 - ldr r5, [sp, #64] @ 4-byte Reload - adcs r0, r10, r1 - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r11, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r6, r4 - ldr r6, [sp, #72] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - mul r2, r7, r6 - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #240 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #272] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #256] - ldr r10, [sp, #240] - ldr r11, [sp, #244] - ldr r8, [sp, #248] - ldr r9, [sp, #252] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #200 - bl .LmulPv256x32(PLT) - adds r0, r7, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #200 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r7, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r8, [sp, #232] - adcs r11, r0, r9 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r9, [sp, #228] - adcs r0, r0, r4 - ldr r4, [sp, #224] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r7, r0 - adcs r0, r10, r1 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - adcs r0, r11, r2 - mul r2, r7, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #160 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #192] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r6, [sp, #184] - ldr r4, [sp, #180] - ldr r5, [sp, #176] - ldr r10, [sp, #160] - ldr r11, [sp, #164] - ldr r8, [sp, #168] - ldr r9, [sp, #172] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #120 - bl .LmulPv256x32(PLT) - adds r0, r7, r10 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - ldr r12, [sp, #124] - ldr r3, [sp, #128] - add lr, sp, #136 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - adcs r8, r1, r8 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r9, r1, r9 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r11, r1, r4 - ldr r1, [sp, #36] @ 4-byte Reload - ldr r4, [sp, #132] - adcs r1, r1, r6 - ldr r6, [sp, #152] - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r10, r1, r2 - ldr r1, [sp, #28] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #120] - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adc r1, r1, #0 - adds r5, r0, r2 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r8, r8, r12 - str r1, [sp, #52] @ 4-byte Spill - adcs r3, r9, r3 - mul r7, r5, r0 - ldm lr, {r0, r1, r2, lr} - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [sp, #76] @ 4-byte Reload - adcs r3, r3, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r9, r11, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r3, [sp, #44] @ 4-byte Spill - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #68] @ 4-byte Spill - adcs r0, r10, r2 - mov r2, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r10, r0, r6 - mov r0, #0 - adc r11, r0, #0 - add r0, sp, #80 - bl .LmulPv256x32(PLT) - add r3, sp, #80 - ldm r3, {r0, r1, r2, r3} - adds r0, r5, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adcs lr, r8, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str lr, [sp, #40] @ 4-byte Spill - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r7, r0, r3 - ldr r0, [sp, #96] - str r7, [sp, #52] @ 4-byte Spill - adcs r9, r9, r0 - ldr r0, [sp, #100] - adcs r12, r1, r0 - ldr r0, [sp, #104] - ldr r1, [sp, #72] @ 4-byte Reload - str r12, [sp, #68] @ 4-byte Spill - adcs r8, r1, r0 - ldr r0, [sp, #108] - ldr r1, [sp, #76] @ 4-byte Reload - str r8, [sp, #72] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #112] - adcs r5, r10, r0 - adc r0, r11, #0 - str r0, [sp, #76] @ 4-byte Spill - ldm r4, {r1, r2, r3, r11} - ldr r0, [r4, #16] - ldr r10, [r4, #24] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r4, #20] - subs r1, lr, r1 - ldr lr, [sp, #56] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r4, #28] - sbcs r2, lr, r2 - ldr r4, [sp, #48] @ 4-byte Reload - sbcs r3, r7, r3 - sbcs r7, r9, r11 - mov r11, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r12, r0 - sbcs r12, r8, r4 - ldr r4, [sp, #64] @ 4-byte Reload - sbcs r8, r6, r10 - mov r10, r5 - sbcs r4, r5, r4 - ldr r5, [sp, #76] @ 4-byte Reload - sbc r6, r5, #0 - ldr r5, [sp, #40] @ 4-byte Reload - ands r6, r6, #1 - movne r2, lr - movne r1, r5 - ldr r5, [sp, #60] @ 4-byte Reload - str r1, [r5] - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [r5, #4] - movne r3, r1 - ldr r1, [sp, #68] @ 4-byte Reload - cmp r6, #0 - movne r7, r9 - str r3, [r5, #8] - str r7, [r5, #12] - movne r0, r1 - str r0, [r5, #16] - ldr r0, [sp, #72] @ 4-byte Reload - movne r12, r0 - cmp r6, #0 - movne r8, r11 - movne r4, r10 - str r12, [r5, #20] - str r8, [r5, #24] - str r4, [r5, #28] - add sp, sp, #724 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end114: - .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L - .cantunwind - .fnend - - .globl mcl_fp_montNF8L - .align 2 - .type mcl_fp_montNF8L,%function -mcl_fp_montNF8L: @ @mcl_fp_montNF8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #716 - sub sp, sp, #716 - mov r7, r2 - ldr r5, [r3, #-4] - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #672 - str r3, [sp, #60] @ 4-byte Spill - str r1, [sp, #68] @ 4-byte Spill - mov r4, r3 - mov r10, r1 - ldr r2, [r7] - str r7, [sp, #56] @ 4-byte Spill - str r5, [sp, #64] @ 4-byte Spill - bl .LmulPv256x32(PLT) - ldr r0, [sp, #676] - ldr r11, [sp, #672] - mov r1, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #680] - mul r2, r11, r5 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #632 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #664] - ldr r2, [r7, #4] - ldr r4, [sp, #648] - ldr r6, [sp, #632] - ldr r8, [sp, #636] - ldr r5, [sp, #640] - ldr r9, [sp, #644] - mov r1, r10 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #656] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #652] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #592 - bl .LmulPv256x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add r6, sp, #596 - ldr r12, [sp, #616] - ldr r3, [sp, #612] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #620] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r9, r9, r0 - ldr r0, [sp, #20] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r4, [sp, #592] - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r10, r1, r0 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r7, r1, r0 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adc r0, r1, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #624] - str r0, [sp, #20] @ 4-byte Spill - ldm r6, {r0, r1, r2, r6} - ldr lr, [sp, #40] @ 4-byte Reload - ldr r5, [sp, #36] @ 4-byte Reload - adds r4, lr, r4 - adcs r0, r5, r0 - ldr r5, [sp, #64] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r9, r1 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r11, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - mul r2, r4, r5 - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r10, r3 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r7, r12 - ldr r7, [sp, #60] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - mov r1, r7 - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #552 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #584] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r9, [sp, #568] - ldr r10, [sp, #552] - ldr r11, [sp, #556] - ldr r8, [sp, #560] - ldr r6, [sp, #564] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #576] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #512 - bl .LmulPv256x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #516 - ldr r4, [sp, #536] - ldr r3, [sp, #512] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #540] - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - ldr r6, [sp, #48] @ 4-byte Reload - adds r9, r6, r3 - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r1 - mov r1, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r9, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #472 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #504] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #492] - ldr r7, [sp, #488] - ldr r10, [sp, #472] - ldr r11, [sp, #476] - ldr r8, [sp, #480] - ldr r6, [sp, #484] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #432 - bl .LmulPv256x32(PLT) - adds r0, r9, r10 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r3, [sp, #432] - add lr, sp, #436 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r5, r0, r11 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #460] - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #456] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, r1 - adds r9, r5, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #464] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r7, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r6, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #392 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #424] - ldr r5, [sp, #56] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #408] - ldr r10, [sp, #392] - ldr r11, [sp, #396] - ldr r8, [sp, #400] - ldr r6, [sp, #404] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #420] - ldr r2, [r5, #16] - mov r1, r7 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #352 - bl .LmulPv256x32(PLT) - adds r0, r9, r10 - ldr r1, [sp, #4] @ 4-byte Reload - ldr r3, [sp, #352] - add lr, sp, #356 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r9, r0, r11 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #380] - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #376] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, r1 - adds r9, r9, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r6, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #312 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #344] - ldr r2, [r5, #20] - ldr r4, [sp, #328] - ldr r10, [sp, #312] - ldr r11, [sp, #316] - ldr r8, [sp, #320] - ldr r6, [sp, #324] - mov r1, r7 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #340] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #336] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #332] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #272 - bl .LmulPv256x32(PLT) - adds r0, r9, r10 - ldr r1, [sp, #4] @ 4-byte Reload - ldr r3, [sp, #272] - add lr, sp, #276 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r5, r0, r11 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #300] - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r0, r4 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r4, [sp, #296] - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, r1 - adds r9, r5, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #304] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - ldr r5, [sp, #60] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r1 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r7, r2 - ldr r7, [sp, #64] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - mul r2, r9, r7 - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r6, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #232 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #264] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #248] - ldr r10, [sp, #232] - ldr r11, [sp, #236] - ldr r8, [sp, #240] - ldr r6, [sp, #244] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #192 - bl .LmulPv256x32(PLT) - adds r0, r9, r10 - ldr r1, [sp, #4] @ 4-byte Reload - ldr r3, [sp, #192] - add lr, sp, #196 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r9, r0, r11 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r10, r0, r8 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #220] - adcs r11, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #216] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, r1 - adds r9, r9, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #224] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r1 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r9, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r6, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #152 - bl .LmulPv256x32(PLT) - ldr r0, [sp, #184] - ldr r1, [sp, #68] @ 4-byte Reload - ldr r5, [sp, #176] - ldr r4, [sp, #172] - ldr r7, [sp, #168] - ldr r10, [sp, #152] - ldr r11, [sp, #156] - ldr r8, [sp, #160] - ldr r6, [sp, #164] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #180] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #112 - bl .LmulPv256x32(PLT) - adds r0, r9, r10 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #128 - ldr r12, [sp, #116] - ldr r3, [sp, #120] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - adcs r1, r1, r8 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r11, r1, r7 - ldr r1, [sp, #32] @ 4-byte Reload - adcs r10, r1, r4 - ldr r1, [sp, #28] @ 4-byte Reload - ldr r4, [sp, #124] - adcs r1, r1, r5 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #20] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #112] - str r1, [sp, #40] @ 4-byte Spill - adds r5, r0, r2 - ldr r0, [sp, #64] @ 4-byte Reload - mul r9, r5, r0 - ldm lr, {r0, r1, r2, r6, lr} - ldr r8, [sp, #68] @ 4-byte Reload - adcs r7, r8, r12 - ldr r8, [sp, #60] @ 4-byte Reload - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #56] @ 4-byte Reload - adcs r3, r7, r3 - adcs r11, r11, r4 - str r3, [sp, #56] @ 4-byte Spill - adcs r4, r10, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - mov r2, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r10, r0, r6 - add r0, sp, #72 - adc r7, lr, #0 - bl .LmulPv256x32(PLT) - add r3, sp, #72 - ldm r3, {r0, r1, r2, r3} - adds r0, r5, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r5, r0, r1 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #88] - adcs r3, r4, r0 - ldr r0, [sp, #92] - str r3, [sp, #40] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #96] - ldr r1, [sp, #68] @ 4-byte Reload - str r6, [sp, #64] @ 4-byte Spill - adcs r12, r1, r0 - ldr r0, [sp, #100] - ldr r1, [sp, #104] - str r12, [sp, #68] @ 4-byte Spill - adcs r11, r10, r0 - adc r4, r7, r1 - ldm r8, {r1, r2, r9, r10} - ldr r0, [r8, #20] - ldr r7, [r8, #16] - ldr lr, [r8, #28] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r8, #24] - str r0, [sp, #44] @ 4-byte Spill - mov r0, r5 - subs r5, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r8, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - sbcs r9, r2, r9 - sbcs r10, r3, r10 - ldr r3, [sp, #36] @ 4-byte Reload - sbcs r7, r6, r7 - sbcs r6, r12, r3 - ldr r3, [sp, #44] @ 4-byte Reload - sbcs r12, r11, r3 - sbc lr, r4, lr - cmp lr, #0 - movlt r5, r0 - ldr r0, [sp, #40] @ 4-byte Reload - movlt r8, r1 - movlt r9, r2 - cmp lr, #0 - movlt r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - movlt r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - movlt r6, r0 - cmp lr, #0 - movlt lr, r4 - ldr r4, [sp, #52] @ 4-byte Reload - movlt r12, r11 - add r0, r4, #20 - stm r4, {r5, r8, r9, r10} - str r7, [r4, #16] - stm r0, {r6, r12, lr} - add sp, sp, #716 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end115: - .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L - .cantunwind - .fnend - - .globl mcl_fp_montRed8L - .align 2 - .type mcl_fp_montRed8L,%function -mcl_fp_montRed8L: @ @mcl_fp_montRed8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #420 - sub sp, sp, #420 - mov r5, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r4, [r1] - ldr r9, [r1, #40] - ldr r10, [r1, #44] - ldr r0, [r5] - ldr r11, [r5, #-4] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r5, #4] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r5, #8] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #16] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r5, #12] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #20] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r5, #16] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #24] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r5, #20] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #28] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r5, #24] - str r2, [sp, #44] @ 4-byte Spill - mul r2, r4, r11 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r5, #28] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #60] - mov r1, r5 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #376 - bl .LmulPv256x32(PLT) - add lr, sp, #396 - ldr r8, [sp, #408] - add r6, sp, #384 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #376] - ldr r1, [sp, #380] - ldm r6, {r0, r2, r6} - adds r4, r4, r7 - ldr r4, [sp, #56] @ 4-byte Reload - adcs r4, r4, r1 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r11 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - adcs r9, r9, #0 - str r0, [sp, #12] @ 4-byte Spill - adcs r0, r10, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #336 - bl .LmulPv256x32(PLT) - add lr, sp, #356 - ldr r8, [sp, #368] - add r6, sp, #340 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #336] - ldm r6, {r0, r1, r2, r6} - adds r4, r4, r7 - ldr r4, [sp, #56] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r0, r2 - ldr r0, [sp, #36] @ 4-byte Reload - mul r2, r4, r11 - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r9, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #296 - bl .LmulPv256x32(PLT) - add r8, sp, #320 - add lr, sp, #300 - ldm r8, {r6, r7, r8} - ldr r1, [sp, #296] - ldm lr, {r0, r2, r3, r12, lr} - adds r1, r4, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r4, r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r10, r10, r2 - mul r2, r4, r11 - adcs r9, r0, r3 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #256 - bl .LmulPv256x32(PLT) - add lr, sp, #276 - ldr r8, [sp, #288] - add r6, sp, #260 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #256] - ldm r6, {r0, r1, r2, r6} - adds r4, r4, r7 - adcs r4, r10, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r9, r9, r1 - mov r1, r5 - adcs r10, r0, r2 - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r4, r11 - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #216 - bl .LmulPv256x32(PLT) - add r8, sp, #240 - add lr, sp, #220 - ldm r8, {r6, r7, r8} - ldr r1, [sp, #216] - ldm lr, {r0, r2, r3, r12, lr} - adds r1, r4, r1 - adcs r4, r9, r0 - ldr r0, [sp, #56] @ 4-byte Reload - mov r1, r5 - adcs r10, r10, r2 - mul r2, r4, r11 - adcs r9, r0, r3 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #176 - bl .LmulPv256x32(PLT) - add lr, sp, #196 - ldr r8, [sp, #208] - add r6, sp, #180 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #176] - ldm r6, {r0, r1, r2, r6} - adds r4, r4, r7 - adcs r4, r10, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r7, r9, r1 - mov r1, r5 - adcs r9, r0, r2 - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r4, r11 - adcs r6, r0, r6 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r0, r3 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #136 - bl .LmulPv256x32(PLT) - add r12, sp, #136 - ldm r12, {r0, r1, r3, r12} - adds r0, r4, r0 - adcs r4, r7, r1 - ldr r7, [sp, #152] - ldr r0, [sp, #168] - adcs r1, r9, r3 - ldr r3, [sp, #160] - mul r2, r4, r11 - adcs r9, r6, r12 - ldr r6, [sp, #156] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #164] - adcs r10, r10, r7 - ldr r7, [sp, #56] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r8, r7, r3 - ldr r3, [sp, #48] @ 4-byte Reload - adcs r11, r3, r1 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #96 - bl .LmulPv256x32(PLT) - add r3, sp, #96 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r5, r0, r1 - ldr r0, [sp, #112] - ldr r1, [sp, #48] @ 4-byte Reload - adcs r9, r9, r2 - adcs r10, r10, r3 - adcs r3, r6, r0 - ldr r0, [sp, #116] - str r3, [sp, #36] @ 4-byte Spill - adcs lr, r8, r0 - ldr r0, [sp, #120] - str lr, [sp, #40] @ 4-byte Spill - adcs r7, r11, r0 - ldr r0, [sp, #124] - str r7, [sp, #44] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #128] - ldr r1, [sp, #56] @ 4-byte Reload - str r4, [sp, #48] @ 4-byte Spill - adcs r12, r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adc r8, r0, #0 - ldr r0, [sp, #88] @ 4-byte Reload - subs r1, r5, r0 - ldr r0, [sp, #84] @ 4-byte Reload - sbcs r2, r9, r0 - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r6, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r3, r0 - ldr r3, [sp, #68] @ 4-byte Reload - sbcs r11, lr, r3 - ldr r3, [sp, #72] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #76] @ 4-byte Reload - sbcs lr, r4, r7 - ldr r7, [sp, #60] @ 4-byte Reload - sbcs r4, r12, r7 - sbc r7, r8, #0 - ands r7, r7, #1 - movne r1, r5 - ldr r5, [sp, #92] @ 4-byte Reload - movne r2, r9 - movne r6, r10 - cmp r7, #0 - str r1, [r5] - ldr r1, [sp, #36] @ 4-byte Reload - str r2, [r5, #4] - str r6, [r5, #8] - movne r0, r1 - str r0, [r5, #12] - ldr r0, [sp, #40] @ 4-byte Reload - movne r11, r0 - ldr r0, [sp, #44] @ 4-byte Reload - str r11, [r5, #16] - movne r3, r0 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r7, #0 - movne r4, r12 - str r3, [r5, #20] - movne lr, r0 - str lr, [r5, #24] - str r4, [r5, #28] - add sp, sp, #420 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end116: - .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L - .cantunwind - .fnend - - .globl mcl_fp_addPre8L - .align 2 - .type mcl_fp_addPre8L,%function -mcl_fp_addPre8L: @ @mcl_fp_addPre8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldr r3, [r1, #4] - ldr r9, [r1] - ldr r10, [r1, #12] - ldr r11, [r1, #16] - ldr r8, [r1, #28] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r1, #8] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r1, #20] - str r3, [sp] @ 4-byte Spill - ldr r3, [r1, #24] - str r3, [sp, #4] @ 4-byte Spill - ldm r2, {r1, r3, r4, r5, r12, lr} - ldr r7, [sp, #12] @ 4-byte Reload - ldr r6, [r2, #24] - ldr r2, [r2, #28] - adds r1, r1, r9 - adcs r3, r3, r7 - ldr r7, [sp, #8] @ 4-byte Reload - adcs r4, r4, r7 - ldr r7, [sp] @ 4-byte Reload - adcs r5, r5, r10 - adcs r12, r12, r11 - adcs lr, lr, r7 - ldr r7, [sp, #4] @ 4-byte Reload - stm r0, {r1, r3, r4, r5, r12, lr} - adcs r6, r6, r7 - adcs r2, r2, r8 - str r6, [r0, #24] - str r2, [r0, #28] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end117: - .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L - .cantunwind - .fnend - - .globl mcl_fp_subPre8L - .align 2 - .type mcl_fp_subPre8L,%function -mcl_fp_subPre8L: @ @mcl_fp_subPre8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldr r3, [r2, #4] - ldr r9, [r2] - ldr r10, [r2, #12] - ldr r11, [r2, #16] - ldr r8, [r2, #28] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #8] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r2, #20] - str r3, [sp] @ 4-byte Spill - ldr r3, [r2, #24] - str r3, [sp, #4] @ 4-byte Spill - ldm r1, {r2, r3, r4, r5, r12, lr} - ldr r7, [sp, #12] @ 4-byte Reload - ldr r6, [r1, #24] - ldr r1, [r1, #28] - subs r2, r2, r9 - sbcs r3, r3, r7 - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r4, r4, r7 - ldr r7, [sp] @ 4-byte Reload - sbcs r5, r5, r10 - sbcs r12, r12, r11 - sbcs lr, lr, r7 - ldr r7, [sp, #4] @ 4-byte Reload - stm r0, {r2, r3, r4, r5, r12, lr} - sbcs r6, r6, r7 - sbcs r1, r1, r8 - str r6, [r0, #24] - str r1, [r0, #28] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end118: - .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L - .cantunwind - .fnend - - .globl mcl_fp_shr1_8L - .align 2 - .type mcl_fp_shr1_8L,%function -mcl_fp_shr1_8L: @ @mcl_fp_shr1_8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - ldr r3, [r1, #4] - ldr r12, [r1] - ldr lr, [r1, #12] - add r6, r1, #16 - ldr r2, [r1, #8] - ldm r6, {r4, r5, r6} - ldr r1, [r1, #28] - lsrs r7, r3, #1 - lsr r3, r3, #1 - rrx r12, r12 - lsrs r7, lr, #1 - orr r8, r3, r2, lsl #31 - lsr r7, lr, #1 - rrx r2, r2 - lsrs r3, r5, #1 - lsr r5, r5, #1 - str r12, [r0] - str r8, [r0, #4] - orr r7, r7, r4, lsl #31 - rrx r3, r4 - lsrs r4, r1, #1 - str r2, [r0, #8] - orr r5, r5, r6, lsl #31 - lsr r1, r1, #1 - add r2, r0, #16 - rrx r6, r6 - str r7, [r0, #12] - stm r2, {r3, r5, r6} - str r1, [r0, #28] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end119: - .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L - .cantunwind - .fnend - - .globl mcl_fp_add8L - .align 2 - .type mcl_fp_add8L,%function -mcl_fp_add8L: @ @mcl_fp_add8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - ldr r7, [r1, #12] - ldr lr, [r1] - ldr r11, [r1, #4] - ldr r10, [r1, #8] - add r8, r2, #20 - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #16] - str r7, [sp] @ 4-byte Spill - ldr r7, [r1, #20] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #24] - ldr r1, [r1, #28] - str r7, [sp, #12] @ 4-byte Spill - str r1, [sp, #4] @ 4-byte Spill - ldm r2, {r1, r4, r5, r12} - ldr r9, [r2, #16] - ldm r8, {r6, r7, r8} - ldr r2, [sp] @ 4-byte Reload - adds lr, r1, lr - adcs r1, r4, r11 - str lr, [r0] - adcs r4, r5, r10 - ldr r5, [sp, #16] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - str r4, [sp, #20] @ 4-byte Spill - adcs r10, r12, r5 - adcs r5, r9, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r5, [sp, #16] @ 4-byte Spill - adcs r12, r6, r2 - ldr r6, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - stmib r0, {r1, r4, r10} - mov r1, #0 - str r5, [r0, #16] - str r12, [r0, #20] - adcs r7, r7, r6 - mov r6, r12 - adcs r11, r8, r2 - str r7, [r0, #24] - mov r8, lr - adc r1, r1, #0 - str r11, [r0, #28] - str r1, [sp, #12] @ 4-byte Spill - ldm r3, {r1, r2, r9, r12, lr} - ldr r4, [r3, #20] - ldr r5, [r3, #24] - ldr r3, [r3, #28] - subs r1, r8, r1 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r8, r1, r2 - ldr r1, [sp, #20] @ 4-byte Reload - sbcs r2, r1, r9 - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r12, r10, r12 - sbcs lr, r1, lr - ldr r1, [sp, #12] @ 4-byte Reload - sbcs r4, r6, r4 - sbcs r5, r7, r5 - sbcs r6, r11, r3 - sbc r3, r1, #0 - tst r3, #1 - bne .LBB120_2 -@ BB#1: @ %nocarry - ldr r1, [sp, #8] @ 4-byte Reload - stm r0, {r1, r8} - add r1, r0, #8 - add r0, r0, #20 - stm r1, {r2, r12, lr} - stm r0, {r4, r5, r6} -.LBB120_2: @ %carry - add sp, sp, #28 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end120: - .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L - .cantunwind - .fnend - - .globl mcl_fp_addNF8L - .align 2 - .type mcl_fp_addNF8L,%function -mcl_fp_addNF8L: @ @mcl_fp_addNF8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #32 - sub sp, sp, #32 - ldm r1, {r6, r8} - ldr r7, [r1, #8] - ldr r9, [r1, #28] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #12] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r1, #16] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r1, #20] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #24] - str r7, [sp, #20] @ 4-byte Spill - ldm r2, {r1, r4, r5, r12, lr} - ldr r10, [r2, #20] - ldr r11, [r2, #24] - ldr r2, [r2, #28] - adds r7, r1, r6 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r6, r4, r8 - ldr r4, [sp, #20] @ 4-byte Reload - str r7, [sp, #4] @ 4-byte Spill - str r6, [sp, #8] @ 4-byte Spill - adcs r8, r5, r1 - ldr r1, [sp, #24] @ 4-byte Reload - ldr r5, [sp, #12] @ 4-byte Reload - adcs r1, r12, r1 - adcs r12, lr, r5 - ldr r5, [sp, #16] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - adcs lr, r10, r5 - adcs r5, r11, r4 - ldr r4, [r3, #4] - ldr r11, [r3, #16] - str lr, [sp, #24] @ 4-byte Spill - adc r10, r2, r9 - ldr r2, [r3] - ldr r9, [r3, #12] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r3, #8] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #24] - ldr r3, [r3, #28] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [sp, #16] @ 4-byte Reload - subs r2, r7, r2 - sbcs r7, r6, r4 - ldr r4, [sp, #20] @ 4-byte Reload - sbcs r6, r8, r4 - sbcs r9, r1, r9 - ldr r1, [sp] @ 4-byte Reload - sbcs r4, r12, r11 - mov r11, r12 - sbcs r12, lr, r1 - ldr r1, [sp, #12] @ 4-byte Reload - sbcs lr, r5, r1 - ldr r1, [sp, #4] @ 4-byte Reload - sbc r3, r10, r3 - cmp r3, #0 - movlt r6, r8 - movlt r2, r1 - ldr r1, [sp, #8] @ 4-byte Reload - movlt r7, r1 - ldr r1, [sp, #28] @ 4-byte Reload - cmp r3, #0 - movlt r4, r11 - movlt r9, r1 - ldr r1, [sp, #24] @ 4-byte Reload - stm r0, {r2, r7} - str r6, [r0, #8] - str r9, [r0, #12] - movlt r12, r1 - cmp r3, #0 - add r1, r0, #16 - movlt lr, r5 - movlt r3, r10 - stm r1, {r4, r12, lr} - str r3, [r0, #28] - add sp, sp, #32 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end121: - .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L - .cantunwind - .fnend - - .globl mcl_fp_sub8L - .align 2 - .type mcl_fp_sub8L,%function -mcl_fp_sub8L: @ @mcl_fp_sub8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r2, {r12, lr} - ldr r4, [r2, #8] - ldr r9, [r2, #20] - ldr r10, [r2, #24] - add r8, r1, #12 - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r2, #12] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #16] - ldr r2, [r2, #28] - str r4, [sp] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldm r1, {r4, r5, r11} - ldm r8, {r2, r7, r8} - ldr r6, [r1, #24] - ldr r1, [r1, #28] - subs r12, r4, r12 - ldr r4, [sp, #12] @ 4-byte Reload - sbcs lr, r5, lr - sbcs r11, r11, r4 - ldr r4, [sp, #8] @ 4-byte Reload - sbcs r2, r2, r4 - ldr r4, [sp] @ 4-byte Reload - sbcs r4, r7, r4 - ldr r7, [sp, #4] @ 4-byte Reload - stm r0, {r12, lr} - str r11, [r0, #8] - sbcs r5, r8, r9 - sbcs r6, r6, r10 - sbcs r7, r1, r7 - add r1, r0, #12 - stm r1, {r2, r4, r5, r6, r7} - mov r1, #0 - sbc r1, r1, #0 - tst r1, #1 - beq .LBB122_2 -@ BB#1: @ %carry - ldr r1, [r3] - add r10, r3, #12 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3, #4] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #8] - str r1, [sp] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r1, [r3, #24] - ldr r3, [r3, #28] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - adds r1, r1, r12 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - adcs r12, r1, lr - ldr r1, [sp] @ 4-byte Reload - adcs lr, r1, r11 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r2, r8, r2 - adcs r4, r9, r4 - adcs r5, r10, r5 - adcs r6, r1, r6 - ldr r1, [sp, #8] @ 4-byte Reload - adc r3, r3, r7 - stm r0, {r1, r12, lr} - add r1, r0, #12 - stm r1, {r2, r4, r5, r6} - str r3, [r0, #28] -.LBB122_2: @ %nocarry - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end122: - .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L - .cantunwind - .fnend - - .globl mcl_fp_subNF8L - .align 2 - .type mcl_fp_subNF8L,%function -mcl_fp_subNF8L: @ @mcl_fp_subNF8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #36 - sub sp, sp, #36 - ldm r2, {r6, r8} - ldr r7, [r2, #8] - ldr r11, [r2, #12] - ldr r9, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #16] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #20] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #32] @ 4-byte Spill - ldm r1, {r2, r4, r5, r12, lr} - ldr r10, [r1, #20] - ldr r7, [r1, #24] - ldr r1, [r1, #28] - subs r6, r2, r6 - ldr r2, [sp, #20] @ 4-byte Reload - sbcs r8, r4, r8 - ldr r4, [sp, #24] @ 4-byte Reload - str r6, [sp, #16] @ 4-byte Spill - sbcs r5, r5, r2 - sbcs r2, r12, r11 - ldr r11, [r3, #12] - sbcs r12, lr, r4 - ldr r4, [sp, #28] @ 4-byte Reload - str r2, [sp, #20] @ 4-byte Spill - str r12, [sp, #24] @ 4-byte Spill - sbcs lr, r10, r4 - ldr r4, [sp, #32] @ 4-byte Reload - ldr r10, [r3, #16] - str lr, [sp, #28] @ 4-byte Spill - sbcs r4, r7, r4 - ldr r7, [r3] - sbc r1, r1, r9 - ldr r9, [r3, #8] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r3, #4] - str r7, [sp] @ 4-byte Spill - ldr r7, [r3, #20] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r3, #24] - ldr r3, [r3, #28] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [sp, #4] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adds r7, r6, r3 - ldr r3, [sp] @ 4-byte Reload - adcs r6, r8, r3 - ldr r3, [sp, #8] @ 4-byte Reload - adcs r9, r5, r9 - adcs r11, r2, r11 - adcs r2, r12, r10 - ldr r10, [sp, #16] @ 4-byte Reload - adcs r12, lr, r3 - ldr r3, [sp, #32] @ 4-byte Reload - adcs lr, r4, r3 - ldr r3, [sp, #12] @ 4-byte Reload - adc r3, r1, r3 - cmp r1, #0 - movge r9, r5 - ldr r5, [sp, #20] @ 4-byte Reload - movge r7, r10 - movge r6, r8 - cmp r1, #0 - str r7, [r0] - movge r11, r5 - ldr r5, [sp, #24] @ 4-byte Reload - movge r2, r5 - ldr r5, [sp, #28] @ 4-byte Reload - stmib r0, {r6, r9, r11} - movge r12, r5 - cmp r1, #0 - movge r3, r1 - movge lr, r4 - add r1, r0, #16 - stm r1, {r2, r12, lr} - str r3, [r0, #28] - add sp, sp, #36 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end123: - .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L - .cantunwind - .fnend - - .globl mcl_fpDbl_add8L - .align 2 - .type mcl_fpDbl_add8L,%function -mcl_fpDbl_add8L: @ @mcl_fpDbl_add8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - ldm r1, {r7, r9} - ldr r6, [r1, #8] - ldr r8, [r1, #12] - ldm r2, {r4, r12, lr} - ldr r5, [r2, #12] - adds r4, r4, r7 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #32] - adcs r7, r12, r9 - adcs r6, lr, r6 - add lr, r1, #16 - adcs r9, r5, r8 - ldr r5, [r2, #28] - add r8, r2, #16 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #36] - str r5, [sp, #28] @ 4-byte Spill - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [r2, #40] - str r4, [sp, #44] @ 4-byte Spill - ldr r4, [r2, #44] - str r4, [sp, #48] @ 4-byte Spill - ldr r4, [r2, #48] - str r4, [sp, #52] @ 4-byte Spill - ldr r4, [r2, #52] - str r4, [sp, #56] @ 4-byte Spill - ldr r4, [r2, #56] - str r4, [sp, #60] @ 4-byte Spill - ldr r4, [r2, #60] - str r4, [sp, #64] @ 4-byte Spill - ldm r8, {r4, r5, r8} - ldr r2, [r1, #36] - ldr r10, [r1, #32] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #24] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #32] @ 4-byte Reload - adcs r1, r4, r1 - str r11, [r0] - str r7, [r0, #4] - str r6, [r0, #8] - str r9, [r0, #12] - ldr r6, [sp, #8] @ 4-byte Reload - ldr r4, [sp, #16] @ 4-byte Reload - adcs r2, r5, r2 - str r1, [r0, #16] - str r2, [r0, #20] - adcs r1, r8, r12 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r2, r2, lr - adcs r1, r1, r10 - str r2, [r0, #28] - ldr r2, [sp] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r7, r1, r2 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - adcs r2, r1, r2 - ldr r1, [sp, #48] @ 4-byte Reload - str r2, [sp, #44] @ 4-byte Spill - adcs r12, r1, r6 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r6, [sp, #12] @ 4-byte Reload - str r12, [sp, #48] @ 4-byte Spill - adcs lr, r1, r6 - ldr r1, [sp, #56] @ 4-byte Reload - str lr, [sp, #52] @ 4-byte Spill - adcs r5, r1, r4 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - str r5, [sp, #56] @ 4-byte Spill - adcs r8, r1, r4 - ldr r1, [sp, #64] @ 4-byte Reload - ldr r4, [sp, #24] @ 4-byte Reload - adcs r10, r1, r4 - mov r1, #0 - adc r1, r1, #0 - str r10, [sp, #60] @ 4-byte Spill - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [r3] - str r1, [sp, #24] @ 4-byte Spill - ldmib r3, {r4, r11} - ldr r6, [r3, #12] - ldr r1, [r3, #24] - ldr r9, [r3, #16] - str r6, [sp, #40] @ 4-byte Spill - ldr r6, [r3, #20] - ldr r3, [r3, #28] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [sp, #36] @ 4-byte Reload - subs r1, r3, r1 - sbcs r4, r7, r4 - sbcs r11, r2, r11 - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r2, r12, r2 - sbcs r12, lr, r9 - mov r9, r8 - sbcs lr, r5, r6 - ldr r5, [sp, #28] @ 4-byte Reload - sbcs r6, r8, r5 - ldr r5, [sp, #32] @ 4-byte Reload - sbcs r8, r10, r5 - ldr r5, [sp, #64] @ 4-byte Reload - sbc r10, r5, #0 - ands r10, r10, #1 - movne r1, r3 - movne r4, r7 - str r1, [r0, #32] - ldr r1, [sp, #44] @ 4-byte Reload - str r4, [r0, #36] - movne r11, r1 - ldr r1, [sp, #48] @ 4-byte Reload - cmp r10, #0 - str r11, [r0, #40] - movne r2, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [r0, #44] - movne r12, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r12, [r0, #48] - movne lr, r1 - ldr r1, [sp, #60] @ 4-byte Reload - cmp r10, #0 - movne r6, r9 - str lr, [r0, #52] - str r6, [r0, #56] - movne r8, r1 - str r8, [r0, #60] - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end124: - .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub8L - .align 2 - .type mcl_fpDbl_sub8L,%function -mcl_fpDbl_sub8L: @ @mcl_fpDbl_sub8L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldr r7, [r2, #32] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #60] @ 4-byte Spill - ldm r2, {r4, r5, r8} - ldr r6, [r2, #20] - ldr r7, [r2, #12] - ldr r9, [r2, #16] - ldr r11, [r2, #24] - ldr r10, [r2, #28] - str r6, [sp, #28] @ 4-byte Spill - ldm r1, {r2, r12, lr} - ldr r6, [r1, #12] - subs r4, r2, r4 - ldr r2, [r1, #32] - sbcs r5, r12, r5 - ldr r12, [r1, #36] - sbcs lr, lr, r8 - add r8, r1, #16 - sbcs r6, r6, r7 - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #24] @ 4-byte Spill - ldm r8, {r1, r2, r7, r8} - stm r0, {r4, r5, lr} - str r6, [r0, #12] - mov r4, #0 - ldr r6, [sp, #28] @ 4-byte Reload - ldr r5, [sp, #20] @ 4-byte Reload - sbcs r1, r1, r9 - sbcs r2, r2, r6 - str r1, [r0, #16] - sbcs r1, r7, r11 - str r2, [r0, #20] - ldr r2, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #8] @ 4-byte Reload - str r1, [r0, #24] - sbcs r1, r8, r10 - str r1, [r0, #28] - ldr r1, [sp] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r6, r12, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r6, [sp, #36] @ 4-byte Spill - sbcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - sbcs r9, r7, r2 - ldr r2, [sp, #48] @ 4-byte Reload - ldr r7, [sp, #12] @ 4-byte Reload - sbcs r12, r7, r2 - ldr r2, [sp, #52] @ 4-byte Reload - ldr r7, [sp, #16] @ 4-byte Reload - str r12, [sp, #48] @ 4-byte Spill - sbcs lr, r7, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str lr, [sp, #52] @ 4-byte Spill - sbcs r8, r5, r2 - ldr r2, [sp, #60] @ 4-byte Reload - ldr r5, [sp, #24] @ 4-byte Reload - sbcs r11, r5, r2 - sbc r2, r4, #0 - str r2, [sp, #60] @ 4-byte Spill - ldm r3, {r4, r5} - ldr r2, [r3, #8] - ldr r10, [r3, #20] - ldr r7, [r3, #24] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r3, #12] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r3, #16] - ldr r3, [r3, #28] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [sp, #32] @ 4-byte Reload - adds r4, r3, r4 - adcs r5, r6, r5 - ldr r6, [sp, #44] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r9, r1 - adcs r2, r12, r2 - adcs r12, lr, r10 - adcs lr, r8, r7 - ldr r7, [sp, #56] @ 4-byte Reload - adc r10, r11, r7 - ldr r7, [sp, #60] @ 4-byte Reload - ands r7, r7, #1 - moveq r4, r3 - ldr r3, [sp, #36] @ 4-byte Reload - str r4, [r0, #32] - moveq r5, r3 - ldr r3, [sp, #40] @ 4-byte Reload - str r5, [r0, #36] - moveq r6, r3 - cmp r7, #0 - moveq r1, r9 - str r6, [r0, #40] - str r1, [r0, #44] - ldr r1, [sp, #48] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [r0, #48] - moveq r12, r1 - cmp r7, #0 - moveq lr, r8 - moveq r10, r11 - str r12, [r0, #52] - str lr, [r0, #56] - str r10, [r0, #60] - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end125: - .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L - .cantunwind - .fnend - - .align 2 - .type .LmulPv288x32,%function -.LmulPv288x32: @ @mulPv288x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r1, [r1, #32] - umull r3, r7, r1, r2 - adcs r1, r5, r3 - adc r2, r7, #0 - str r1, [r0, #32] - str r2, [r0, #36] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end126: - .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre9L - .align 2 - .type mcl_fp_mulUnitPre9L,%function -mcl_fp_mulUnitPre9L: @ @mcl_fp_mulUnitPre9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - .pad #40 - sub sp, sp, #40 - mov r4, r0 - mov r0, sp - bl .LmulPv288x32(PLT) - add lr, sp, #20 - ldr r12, [sp, #36] - ldm lr, {r0, r3, r8, lr} - ldr r1, [sp, #16] - ldm sp, {r5, r6, r7} - ldr r2, [sp, #12] - stm r4, {r5, r6, r7} - str r2, [r4, #12] - str r1, [r4, #16] - add r1, r4, #20 - stm r1, {r0, r3, r8, lr} - str r12, [r4, #36] - add sp, sp, #40 - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end127: - .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre9L - .align 2 - .type mcl_fpDbl_mulPre9L,%function -mcl_fpDbl_mulPre9L: @ @mcl_fpDbl_mulPre9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #412 - sub sp, sp, #412 - mov r10, r2 - mov r8, r0 - add r0, sp, #368 - str r1, [sp, #44] @ 4-byte Spill - mov r4, r1 - ldr r2, [r10] - bl .LmulPv288x32(PLT) - ldr r0, [sp, #404] - ldr r1, [sp, #376] - ldr r2, [r10, #4] - ldr r9, [sp, #372] - ldr r11, [sp, #380] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #400] - str r1, [sp, #16] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #396] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #392] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #388] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [r8] - add r0, sp, #328 - bl .LmulPv288x32(PLT) - add lr, sp, #352 - ldr r4, [sp, #364] - add r7, sp, #332 - ldm lr, {r3, r12, lr} - ldr r6, [sp, #328] - ldm r7, {r0, r1, r2, r5, r7} - adds r6, r6, r9 - str r6, [r8, #4] - ldr r6, [sp, #16] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #12] @ 4-byte Spill - adcs r0, r1, r11 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r10, #8] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r4, #0 - ldr r4, [sp, #44] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #288 - mov r1, r4 - bl .LmulPv288x32(PLT) - add r9, sp, #312 - add lr, sp, #288 - ldm r9, {r5, r6, r7, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #12] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r8, #8] - ldr r0, [sp, #8] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #20] @ 4-byte Reload - mov r1, r4 - adcs r0, r2, r0 - ldr r2, [r10, #12] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r9, #0 - mov r9, r4 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #248 - bl .LmulPv288x32(PLT) - add lr, sp, #272 - ldr r4, [sp, #284] - add r6, sp, #252 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #248] - ldr r5, [sp, #268] - ldm r6, {r0, r1, r2, r6} - adds r7, r7, r11 - str r7, [r8, #12] - ldr r7, [sp, #20] @ 4-byte Reload - adcs r11, r0, r7 - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r10, #16] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r4, #0 - mov r4, r9 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #208 - mov r1, r4 - bl .LmulPv288x32(PLT) - add r9, sp, #232 - add lr, sp, #208 - ldm r9, {r5, r6, r7, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r0, r11 - str r0, [r8, #16] - ldr r0, [sp, #24] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #28] @ 4-byte Reload - mov r1, r4 - adcs r0, r2, r0 - ldr r2, [r10, #20] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r9, #0 - mov r9, r4 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #168 - bl .LmulPv288x32(PLT) - add lr, sp, #192 - ldr r4, [sp, #204] - add r6, sp, #172 - ldm lr, {r3, r12, lr} - ldr r7, [sp, #168] - ldr r5, [sp, #188] - ldm r6, {r0, r1, r2, r6} - adds r7, r7, r11 - str r7, [r8, #20] - ldr r7, [sp, #28] @ 4-byte Reload - adcs r11, r0, r7 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r10, #24] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #12] @ 4-byte Spill - adc r0, r4, #0 - mov r4, r9 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #128 - mov r1, r4 - bl .LmulPv288x32(PLT) - add r9, sp, #152 - add lr, sp, #128 - ldm r9, {r5, r6, r7, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r0, r11 - str r0, [r8, #24] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #36] @ 4-byte Reload - mov r1, r4 - adcs r0, r2, r0 - ldr r2, [r10, #28] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #16] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #88 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #124] - add lr, sp, #112 - add r7, sp, #92 - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r5, r12, lr} - ldr r2, [sp, #88] - ldr r6, [sp, #108] - ldm r7, {r0, r1, r3, r7} - ldr r4, [sp, #40] @ 4-byte Reload - adds r2, r2, r11 - adcs r9, r0, r4 - ldr r0, [sp, #36] @ 4-byte Reload - str r2, [r8, #28] - ldr r2, [r10, #32] - adcs r10, r1, r0 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #44] @ 4-byte Reload - adcs r11, r3, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r7, r7, r0 - ldr r0, [sp, #24] @ 4-byte Reload - adcs r6, r6, r0 - ldr r0, [sp, #20] @ 4-byte Reload - adcs r5, r5, r0 - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r4, r0, #0 - add r0, sp, #48 - bl .LmulPv288x32(PLT) - add r3, sp, #48 - ldm r3, {r0, r1, r2, r3} - ldr r12, [sp, #84] - ldr lr, [sp, #80] - adds r0, r0, r9 - ldr r9, [sp, #76] - adcs r1, r1, r10 - adcs r2, r2, r11 - ldr r11, [sp, #72] - adcs r10, r3, r7 - ldr r7, [sp, #64] - ldr r3, [sp, #68] - str r0, [r8, #32] - str r1, [r8, #36] - str r2, [r8, #40] - str r10, [r8, #44] - adcs r0, r7, r6 - str r0, [r8, #48] - adcs r0, r3, r5 - str r0, [r8, #52] - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [r8, #56] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [r8, #60] - adcs r0, lr, r4 - adc r1, r12, #0 - str r0, [r8, #64] - str r1, [r8, #68] - add sp, sp, #412 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end128: - .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre9L - .align 2 - .type mcl_fpDbl_sqrPre9L,%function -mcl_fpDbl_sqrPre9L: @ @mcl_fpDbl_sqrPre9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #412 - sub sp, sp, #412 - mov r5, r1 - mov r4, r0 - add r0, sp, #368 - ldr r2, [r5] - bl .LmulPv288x32(PLT) - ldr r0, [sp, #404] - add r11, sp, #368 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #400] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #396] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #392] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #388] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r0, r10, r11} - ldr r1, [sp, #380] - ldr r2, [r5, #4] - str r1, [sp, #20] @ 4-byte Spill - str r0, [r4] - add r0, sp, #328 - mov r1, r5 - bl .LmulPv288x32(PLT) - add lr, sp, #348 - add r7, sp, #328 - ldr r9, [sp, #364] - ldr r8, [sp, #360] - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - adds r0, r0, r10 - str r0, [r4, #4] - ldr r0, [sp, #20] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #24] @ 4-byte Reload - ldr r2, [r5, #8] - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #44] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #288 - bl .LmulPv288x32(PLT) - add r9, sp, #312 - add lr, sp, #288 - ldm r9, {r6, r7, r8, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r0, r10 - str r0, [r4, #8] - ldr r0, [sp, #24] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r2, [r5, #12] - adcs r0, r3, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #248 - bl .LmulPv288x32(PLT) - add lr, sp, #268 - add r7, sp, #248 - ldr r9, [sp, #284] - ldr r8, [sp, #280] - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - adds r0, r0, r10 - str r0, [r4, #12] - ldr r0, [sp, #28] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r2, [r5, #16] - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #208 - bl .LmulPv288x32(PLT) - add r9, sp, #232 - add lr, sp, #208 - ldm r9, {r6, r7, r8, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r0, r10 - str r0, [r4, #16] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r2, [r5, #20] - adcs r0, r3, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #168 - bl .LmulPv288x32(PLT) - add lr, sp, #188 - add r7, sp, #168 - ldr r9, [sp, #204] - ldr r8, [sp, #200] - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - adds r0, r0, r10 - str r0, [r4, #20] - ldr r0, [sp, #36] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r2, [r5, #24] - adcs r0, r3, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #128 - bl .LmulPv288x32(PLT) - add r9, sp, #152 - add lr, sp, #128 - ldm r9, {r6, r7, r8, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r0, r10 - str r0, [r4, #24] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r10, r1, r11 - mov r1, r5 - adcs r11, r2, r0 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r2, [r5, #28] - adcs r0, r3, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #88 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #124] - ldr r2, [sp, #88] - ldr r1, [sp, #92] - add r12, sp, #96 - ldr lr, [sp, #116] - ldr r6, [sp, #112] - ldr r7, [sp, #108] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #120] - adds r2, r2, r10 - adcs r10, r1, r11 - str r0, [sp, #8] @ 4-byte Spill - ldm r12, {r0, r3, r12} - ldr r1, [sp, #44] @ 4-byte Reload - str r2, [r4, #28] - ldr r2, [r5, #32] - adcs r11, r0, r1 - ldr r0, [sp, #40] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r8, r3, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r9, r12, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #48 - bl .LmulPv288x32(PLT) - add r3, sp, #48 - add lr, sp, #72 - ldm r3, {r0, r1, r2, r3} - ldr r12, [sp, #84] - adds r0, r0, r10 - adcs r1, r1, r11 - adcs r2, r2, r8 - ldm lr, {r5, r8, lr} - ldr r6, [sp, #68] - ldr r7, [sp, #64] - adcs r3, r3, r9 - add r9, r4, #32 - stm r9, {r0, r1, r2} - str r3, [r4, #44] - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [r4, #48] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [r4, #52] - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [r4, #56] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [r4, #60] - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - adc r1, r12, #0 - str r0, [r4, #64] - str r1, [r4, #68] - add sp, sp, #412 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L - .cantunwind - .fnend - - .globl mcl_fp_mont9L - .align 2 - .type mcl_fp_mont9L,%function -mcl_fp_mont9L: @ @mcl_fp_mont9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #804 - sub sp, sp, #804 - str r2, [sp, #64] @ 4-byte Spill - ldr r6, [r3, #-4] - ldr r2, [r2] - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #760 - str r3, [sp, #76] @ 4-byte Spill - str r1, [sp, #68] @ 4-byte Spill - mov r4, r3 - mov r7, r1 - str r6, [sp, #72] @ 4-byte Spill - bl .LmulPv288x32(PLT) - ldr r0, [sp, #764] - ldr r5, [sp, #760] - mov r1, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #768] - mul r2, r5, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #772] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #720 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #756] - add r11, sp, #724 - ldr r4, [sp, #736] - ldr r9, [sp, #720] - mov r1, r7 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #744] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #740] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r8, r10, r11} - ldr r6, [sp, #64] @ 4-byte Reload - add r0, sp, #680 - ldr r2, [r6, #4] - bl .LmulPv288x32(PLT) - adds r0, r9, r5 - ldr r2, [sp, #4] @ 4-byte Reload - mov r1, #0 - add lr, sp, #680 - ldr r9, [sp, #716] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r5, r8, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #712] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #708] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #704] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #8] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r2, r0 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #20] @ 4-byte Spill - adc r8, r1, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #640 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #676] - add r10, sp, #640 - ldr r11, [sp, #660] - ldr r7, [sp, #656] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r2, [r6, #8] - ldr r6, [sp, #68] @ 4-byte Reload - add r0, sp, #600 - mov r1, r6 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #600 - ldr r4, [sp, #624] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #636] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #632] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #628] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #560 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #596] - add r10, sp, #560 - ldr r11, [sp, #580] - ldr r7, [sp, #576] - mov r1, r6 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r0, [sp, #64] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #520 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #520 - ldr r4, [sp, #544] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #556] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #552] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #548] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r6, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #480 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #516] - add r10, sp, #480 - ldr r11, [sp, #500] - ldr r7, [sp, #496] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r6, [sp, #64] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - add r0, sp, #440 - ldr r2, [r6, #16] - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #440 - ldr r4, [sp, #464] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #476] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #472] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #468] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #400 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #436] - add r10, sp, #400 - ldr r11, [sp, #420] - ldr r7, [sp, #416] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #432] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #428] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r2, [r6, #20] - ldr r1, [sp, #68] @ 4-byte Reload - add r0, sp, #360 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #360 - ldr r4, [sp, #384] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #396] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #392] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #388] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r6, lr - ldr r6, [sp, #72] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - mul r2, r5, r6 - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #320 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #356] - add r10, sp, #320 - ldr r11, [sp, #340] - ldr r7, [sp, #336] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #352] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #348] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r0, [sp, #64] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #280 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #280 - ldr r4, [sp, #304] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #316] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #312] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #308] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r5, r6 - ldr r6, [sp, #76] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mov r1, r6 - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #240 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #276] - add r10, sp, #240 - ldr r11, [sp, #260] - ldr r7, [sp, #256] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r0, [sp, #64] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #200 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #200 - ldr r4, [sp, #224] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #236] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #232] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #228] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r8, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - adcs r0, r8, r9 - str r0, [sp, #24] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #160 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #196] - add r10, sp, #160 - ldr r11, [sp, #184] - ldr r6, [sp, #180] - ldr r7, [sp, #176] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldr r0, [sp, #64] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #120 - bl .LmulPv288x32(PLT) - adds r0, r5, r4 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #120] - ldr lr, [sp, #124] - ldr r5, [sp, #128] - ldr r12, [sp, #132] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r9, r0, r9 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r0, r10 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #136 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r11, r0, r11 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - adds r4, r4, r2 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r9, r9, lr - adcs r10, r10, r5 - mul r8, r4, r0 - ldm r7, {r0, r1, r2, r3, r6, r7} - ldr r5, [sp, #68] @ 4-byte Reload - adcs r5, r5, r12 - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [sp, #64] @ 4-byte Reload - adcs r5, r5, r0 - adcs r0, r11, r1 - ldr r11, [sp, #76] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - mov r1, r11 - adcs r0, r0, r2 - mov r2, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #80 - bl .LmulPv288x32(PLT) - add r3, sp, #80 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - adcs r0, r9, r1 - ldr r1, [sp, #96] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r10, r2 - str r7, [sp, #40] @ 4-byte Spill - adcs r8, r0, r3 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r5, r1 - ldr r1, [sp, #100] - adcs r4, r0, r1 - ldr r1, [sp, #104] - ldr r0, [sp, #56] @ 4-byte Reload - str r4, [sp, #44] @ 4-byte Spill - adcs r6, r0, r1 - ldr r1, [sp, #108] - ldr r0, [sp, #68] @ 4-byte Reload - str r6, [sp, #48] @ 4-byte Spill - adcs r12, r0, r1 - ldr r1, [sp, #112] - ldr r0, [sp, #32] @ 4-byte Reload - str r12, [sp, #56] @ 4-byte Spill - adcs lr, r0, r1 - ldr r1, [sp, #116] - ldr r0, [sp, #72] @ 4-byte Reload - str lr, [sp, #68] @ 4-byte Spill - adcs r5, r0, r1 - ldr r0, [sp, #64] @ 4-byte Reload - str r5, [sp, #72] @ 4-byte Spill - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - mov r0, r11 - ldmib r0, {r2, r3, r11} - ldr r1, [r0, #16] - ldr r9, [r0] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r0, #20] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r0, #24] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r0, #28] - str r1, [sp, #36] @ 4-byte Spill - mov r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - subs r9, r0, r9 - sbcs r2, r7, r2 - sbcs r3, r8, r3 - sbcs r7, r10, r11 - ldr r11, [r1, #32] - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r1, r4, r1 - ldr r4, [sp, #28] @ 4-byte Reload - sbcs r4, r6, r4 - ldr r6, [sp, #32] @ 4-byte Reload - sbcs r12, r12, r6 - ldr r6, [sp, #36] @ 4-byte Reload - sbcs lr, lr, r6 - sbcs r11, r5, r11 - ldr r5, [sp, #64] @ 4-byte Reload - sbc r6, r5, #0 - ldr r5, [sp, #60] @ 4-byte Reload - ands r6, r6, #1 - movne r9, r0 - ldr r0, [sp, #40] @ 4-byte Reload - movne r3, r8 - str r9, [r5] - movne r2, r0 - ldr r0, [sp, #44] @ 4-byte Reload - cmp r6, #0 - movne r7, r10 - str r2, [r5, #4] - str r3, [r5, #8] - str r7, [r5, #12] - movne r1, r0 - ldr r0, [sp, #48] @ 4-byte Reload - str r1, [r5, #16] - movne r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - cmp r6, #0 - str r4, [r5, #20] - movne r12, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r12, [r5, #24] - movne lr, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str lr, [r5, #28] - movne r11, r0 - str r11, [r5, #32] - add sp, sp, #804 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end130: - .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L - .cantunwind - .fnend - - .globl mcl_fp_montNF9L - .align 2 - .type mcl_fp_montNF9L,%function -mcl_fp_montNF9L: @ @mcl_fp_montNF9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #804 - sub sp, sp, #804 - add r12, sp, #60 - str r2, [sp, #72] @ 4-byte Spill - mov r4, r3 - mov r7, r1 - stm r12, {r0, r1, r3} - add r0, sp, #760 - ldr r6, [r3, #-4] - ldr r2, [r2] - str r6, [sp, #76] @ 4-byte Spill - bl .LmulPv288x32(PLT) - ldr r0, [sp, #764] - ldr r5, [sp, #760] - mov r1, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #768] - mul r2, r5, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #772] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #720 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #756] - add r10, sp, #724 - ldr r6, [sp, #736] - ldr r11, [sp, #720] - mov r1, r7 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #744] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #740] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r4, [sp, #72] @ 4-byte Reload - add r0, sp, #680 - ldr r2, [r4, #4] - bl .LmulPv288x32(PLT) - adds r0, r11, r5 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #680 - ldr r11, [sp, #704] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #716] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #712] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #708] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r5, r1, r0 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r1, r0 - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #48] @ 4-byte Reload - adds r6, r6, r0 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r5, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #640 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #676] - add r10, sp, #644 - ldr r7, [sp, #656] - ldr r11, [sp, #640] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r5, [sp, #64] @ 4-byte Reload - ldr r2, [r4, #8] - add r0, sp, #600 - mov r1, r5 - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #600 - ldr r11, [sp, #624] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #636] - adcs r0, r0, r9 - ldr r9, [sp, #632] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #628] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #560 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #596] - add r10, sp, #564 - ldr r7, [sp, #576] - ldr r11, [sp, #560] - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #520 - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #520 - ldr r11, [sp, #544] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r6, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #556] - adcs r0, r0, r9 - ldr r9, [sp, #552] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #548] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r5, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r6, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r5, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #480 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #516] - add r10, sp, #484 - ldr r7, [sp, #496] - ldr r11, [sp, #480] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r5, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - add r0, sp, #440 - ldr r2, [r5, #16] - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #440 - ldr r11, [sp, #464] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #476] - adcs r0, r0, r9 - ldr r9, [sp, #472] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #468] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r4, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #400 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #436] - add r10, sp, #404 - ldr r7, [sp, #416] - ldr r11, [sp, #400] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #432] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #428] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r2, [r5, #20] - ldr r1, [sp, #64] @ 4-byte Reload - add r0, sp, #360 - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #360 - ldr r11, [sp, #384] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #396] - adcs r0, r0, r9 - ldr r9, [sp, #392] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #388] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #320 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #356] - add r10, sp, #324 - ldr r7, [sp, #336] - ldr r11, [sp, #320] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #352] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #348] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #340] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r5, [sp, #64] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #280 - mov r1, r5 - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #280 - ldr r11, [sp, #304] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #316] - adcs r0, r0, r9 - ldr r9, [sp, #312] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #308] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #240 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #276] - add r10, sp, #244 - ldr r7, [sp, #256] - ldr r11, [sp, #240] - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #200 - bl .LmulPv288x32(PLT) - adds r0, r6, r11 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #200 - ldr r11, [sp, #224] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r5, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #236] - adcs r0, r0, r9 - ldr r9, [sp, #232] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #228] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r5, r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r6, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - adcs r0, r7, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #160 - bl .LmulPv288x32(PLT) - ldr r0, [sp, #196] - add r10, sp, #164 - ldr r4, [sp, #184] - ldr r6, [sp, #180] - ldr r7, [sp, #176] - ldr r11, [sp, #160] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #120 - bl .LmulPv288x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #120 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #136 - adcs r1, r1, r9 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r10, r1, r10 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r11, r1, r7 - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adc r1, r1, r2 - str r1, [sp, #44] @ 4-byte Spill - ldm lr, {r2, r12, lr} - ldr r4, [sp, #132] - adds r5, r0, r2 - ldr r0, [sp, #76] @ 4-byte Reload - mul r9, r5, r0 - ldm r8, {r0, r1, r2, r3, r6, r8} - ldr r7, [sp, #56] @ 4-byte Reload - adcs r7, r7, r12 - str r7, [sp, #32] @ 4-byte Spill - adcs r7, r10, lr - ldr r10, [sp, #68] @ 4-byte Reload - adcs r11, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - str r7, [sp, #36] @ 4-byte Spill - adcs r0, r4, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - mov r2, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r4, r0, r3 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - adc r0, r8, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #80 - bl .LmulPv288x32(PLT) - add r3, sp, #80 - ldm r3, {r0, r1, r2, r3} - adds r0, r5, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r9, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #96] - str r9, [sp, #32] @ 4-byte Spill - adcs r2, r0, r2 - adcs r0, r11, r3 - str r2, [sp, #44] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r1, [sp, #100] - ldr r0, [sp, #56] @ 4-byte Reload - str r7, [sp, #48] @ 4-byte Spill - adcs r6, r0, r1 - ldr r1, [sp, #104] - ldr r0, [sp, #64] @ 4-byte Reload - adcs lr, r0, r1 - ldr r1, [sp, #108] - ldr r0, [sp, #76] @ 4-byte Reload - str lr, [sp, #56] @ 4-byte Spill - adcs r4, r4, r1 - ldr r1, [sp, #112] - str r4, [sp, #64] @ 4-byte Spill - adcs r5, r0, r1 - ldr r1, [sp, #116] - ldr r0, [sp, #72] @ 4-byte Reload - str r5, [sp, #76] @ 4-byte Spill - adc r12, r0, r1 - mov r0, r10 - ldr r1, [r0, #16] - ldr r8, [r0] - ldr r11, [r0, #4] - ldr r10, [r0, #8] - ldr r3, [r0, #12] - str r12, [sp, #72] @ 4-byte Spill - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r0, #20] - subs r8, r9, r8 - ldr r9, [sp, #52] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r0, #24] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [r0, #28] - ldr r0, [r0, #32] - str r1, [sp, #40] @ 4-byte Spill - sbcs r1, r2, r11 - sbcs r2, r9, r10 - mov r10, r6 - sbcs r3, r7, r3 - ldr r7, [sp, #24] @ 4-byte Reload - sbcs r7, r6, r7 - ldr r6, [sp, #28] @ 4-byte Reload - sbcs r11, lr, r6 - ldr r6, [sp, #36] @ 4-byte Reload - sbcs lr, r4, r6 - ldr r4, [sp, #40] @ 4-byte Reload - ldr r6, [sp, #44] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #32] @ 4-byte Reload - sbc r0, r12, r0 - asr r12, r0, #31 - cmp r12, #0 - movlt r8, r5 - ldr r5, [sp, #60] @ 4-byte Reload - movlt r1, r6 - movlt r2, r9 - cmp r12, #0 - movlt r7, r10 - str r8, [r5] - str r1, [r5, #4] - ldr r1, [sp, #48] @ 4-byte Reload - str r2, [r5, #8] - movlt r3, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r3, [r5, #12] - str r7, [r5, #16] - movlt r11, r1 - ldr r1, [sp, #64] @ 4-byte Reload - cmp r12, #0 - str r11, [r5, #20] - movlt lr, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str lr, [r5, #24] - movlt r4, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r4, [r5, #28] - movlt r0, r1 - str r0, [r5, #32] - add sp, sp, #804 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end131: - .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L - .cantunwind - .fnend - - .globl mcl_fp_montRed9L - .align 2 - .type mcl_fp_montRed9L,%function -mcl_fp_montRed9L: @ @mcl_fp_montRed9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #476 - sub sp, sp, #476 - mov r5, r2 - str r0, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r4, [r1] - ldr r11, [r1, #32] - ldr r10, [r1, #36] - ldr r0, [r5] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r5, #4] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r5, #8] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #16] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r5, #12] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #20] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r5, #16] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #24] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r5, #20] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #28] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r5, #24] - str r2, [sp, #44] @ 4-byte Spill - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r5, #-4] - str r0, [sp, #108] @ 4-byte Spill - mul r2, r4, r0 - ldr r0, [r5, #28] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r5, #32] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #60] - mov r1, r5 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #432 - bl .LmulPv288x32(PLT) - ldr r1, [sp, #432] - add lr, sp, #436 - ldr r9, [sp, #468] - ldr r8, [sp, #464] - ldm lr, {r0, r2, r3, r6, r7, r12, lr} - adds r1, r4, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r4, r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #108] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - mul r2, r4, r7 - adcs r0, r0, r12 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r11, r8 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r9, r10, r9 - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #392 - bl .LmulPv288x32(PLT) - add r11, sp, #408 - add r6, sp, #392 - ldr r12, [sp, #428] - ldr lr, [sp, #424] - ldr r8, [sp, #420] - ldm r11, {r2, r10, r11} - ldm r6, {r0, r1, r3, r6} - adds r0, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r11, r0, r11 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #44] @ 4-byte Spill - adcs r0, r9, lr - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #4] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #352 - bl .LmulPv288x32(PLT) - add lr, sp, #372 - add r7, sp, #352 - ldr r10, [sp, #388] - ldr r9, [sp, #384] - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - adds r0, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - mul r2, r4, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r11, r6 - mov r11, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #312 - bl .LmulPv288x32(PLT) - add lr, sp, #332 - ldr r7, [sp, #348] - add r9, sp, #320 - ldm lr, {r6, r8, r12, lr} - ldr r1, [sp, #312] - ldr r3, [sp, #316] - ldm r9, {r0, r2, r9} - adds r1, r4, r1 - mov r4, r11 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r10, r1, r3 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #272 - bl .LmulPv288x32(PLT) - add lr, sp, #272 - ldr r11, [sp, #308] - ldr r9, [sp, #304] - ldm lr, {r0, r1, r2, r3, r6, r7, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - mul r2, r8, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - mov r6, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #232 - bl .LmulPv288x32(PLT) - add r11, sp, #256 - add lr, sp, #232 - ldm r11, {r7, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r8, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - mul r2, r4, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #192 - bl .LmulPv288x32(PLT) - add lr, sp, #212 - add r7, sp, #192 - ldr r9, [sp, #228] - ldr r8, [sp, #224] - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - adds r0, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r4, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r10, r0, r2 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r11, r0, r3 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r6, r0, r6 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #108] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - mul r2, r4, r8 - adcs r9, r0, r9 - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #152 - bl .LmulPv288x32(PLT) - add r12, sp, #152 - ldm r12, {r0, r1, r3, r12} - ldr lr, [sp, #188] - adds r0, r4, r0 - adcs r4, r10, r1 - ldr r1, [sp, #168] - adcs r11, r11, r3 - mul r2, r4, r8 - ldr r3, [sp, #180] - adcs r0, r7, r12 - ldr r7, [sp, #176] - ldr r12, [sp, #184] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r10, r6, r1 - ldr r1, [sp, #64] @ 4-byte Reload - adcs r8, r1, r0 - ldr r0, [sp, #60] @ 4-byte Reload - mov r1, r5 - adcs r7, r0, r7 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r9, r12 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #112 - bl .LmulPv288x32(PLT) - add r3, sp, #112 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r6, r11, r1 - ldr r1, [sp, #128] - adcs r9, r0, r2 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r11, r10, r3 - adcs lr, r8, r1 - ldr r1, [sp, #132] - str r11, [sp, #28] @ 4-byte Spill - str lr, [sp, #32] @ 4-byte Spill - adcs r7, r7, r1 - ldr r1, [sp, #136] - str r7, [sp, #44] @ 4-byte Spill - adcs r8, r0, r1 - ldr r1, [sp, #140] - ldr r0, [sp, #40] @ 4-byte Reload - str r8, [sp, #48] @ 4-byte Spill - adcs r4, r0, r1 - ldr r1, [sp, #144] - ldr r0, [sp, #56] @ 4-byte Reload - str r4, [sp, #52] @ 4-byte Spill - adcs r5, r0, r1 - ldr r1, [sp, #148] - ldr r0, [sp, #64] @ 4-byte Reload - str r5, [sp, #108] @ 4-byte Spill - adcs r12, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - adc r10, r0, #0 - ldr r0, [sp, #100] @ 4-byte Reload - subs r2, r6, r0 - ldr r0, [sp, #96] @ 4-byte Reload - sbcs r3, r9, r0 - ldr r0, [sp, #92] @ 4-byte Reload - sbcs r1, r11, r0 - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r11, lr, r0 - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #84] @ 4-byte Reload - sbcs lr, r8, r7 - ldr r7, [sp, #88] @ 4-byte Reload - sbcs r8, r4, r7 - ldr r4, [sp, #68] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #72] @ 4-byte Reload - sbcs r5, r12, r5 - sbc r7, r10, #0 - ands r7, r7, #1 - movne r2, r6 - ldr r6, [sp, #104] @ 4-byte Reload - movne r3, r9 - str r2, [r6] - ldr r2, [sp, #28] @ 4-byte Reload - str r3, [r6, #4] - movne r1, r2 - cmp r7, #0 - str r1, [r6, #8] - ldr r1, [sp, #32] @ 4-byte Reload - movne r11, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r11, [r6, #12] - movne r0, r1 - str r0, [r6, #16] - ldr r0, [sp, #48] @ 4-byte Reload - movne lr, r0 - ldr r0, [sp, #52] @ 4-byte Reload - cmp r7, #0 - movne r5, r12 - str lr, [r6, #20] - movne r8, r0 - ldr r0, [sp, #108] @ 4-byte Reload - str r8, [r6, #24] - movne r4, r0 - str r4, [r6, #28] - str r5, [r6, #32] - add sp, sp, #476 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end132: - .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L - .cantunwind - .fnend - - .globl mcl_fp_addPre9L - .align 2 - .type mcl_fp_addPre9L,%function -mcl_fp_addPre9L: @ @mcl_fp_addPre9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r1, {r3, r12, lr} - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7} - ldr r4, [r2, #16] - ldr r8, [r2] - ldr r11, [r2, #28] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r2, #20] - adds r10, r8, r3 - adcs r5, r5, r12 - ldr r12, [r1, #32] - ldr r8, [sp, #12] @ 4-byte Reload - str r10, [r0] - adcs lr, r6, lr - ldr r6, [r1, #20] - adcs r7, r7, r9 - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r2, #24] - ldr r2, [r2, #32] - ldr r3, [sp, #4] @ 4-byte Reload - str r4, [sp] @ 4-byte Spill - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #28] - ldr r4, [r1, #24] - ldr r1, [r1, #16] - adcs r1, r8, r1 - adcs r6, r3, r6 - ldr r3, [sp] @ 4-byte Reload - stmib r0, {r5, lr} - str r7, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #8] @ 4-byte Reload - str r6, [r0, #20] - adcs r4, r3, r4 - adcs r2, r11, r2 - str r4, [r0, #24] - adcs r1, r1, r12 - str r2, [r0, #28] - str r1, [r0, #32] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end133: - .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L - .cantunwind - .fnend - - .globl mcl_fp_subPre9L - .align 2 - .type mcl_fp_subPre9L,%function -mcl_fp_subPre9L: @ @mcl_fp_subPre9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldr r3, [r2, #8] - add lr, r1, #16 - ldr r11, [r2, #4] - ldr r10, [r2, #12] - ldr r4, [r2] - str r3, [sp] @ 4-byte Spill - ldr r3, [r2, #16] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r2, #20] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r2, #24] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #16] @ 4-byte Spill - ldmib r1, {r5, r6, r7} - ldm lr, {r3, r12, lr} - ldr r9, [r1] - ldr r8, [r1, #28] - subs r4, r9, r4 - ldr r9, [r2, #32] - ldr r2, [sp] @ 4-byte Reload - sbcs r11, r5, r11 - ldr r5, [sp, #16] @ 4-byte Reload - sbcs r6, r6, r2 - sbcs r7, r7, r10 - ldr r10, [r1, #32] - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r3, r3, r1 - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r2, r12, r1 - ldr r1, [sp, #12] @ 4-byte Reload - stm r0, {r4, r11} - str r6, [r0, #8] - str r7, [r0, #12] - str r3, [r0, #16] - str r2, [r0, #20] - sbcs r1, lr, r1 - sbcs r5, r8, r5 - str r1, [r0, #24] - sbcs r1, r10, r9 - str r5, [r0, #28] - str r1, [r0, #32] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end134: - .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L - .cantunwind - .fnend - - .globl mcl_fp_shr1_9L - .align 2 - .type mcl_fp_shr1_9L,%function -mcl_fp_shr1_9L: @ @mcl_fp_shr1_9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, lr} - push {r4, r5, r6, r7, r8, lr} - add r12, r1, #16 - ldr r2, [r1, #8] - ldr lr, [r1, #12] - ldm r12, {r4, r5, r6, r8, r12} - ldm r1, {r1, r3} - lsrs r7, r3, #1 - rrx r1, r1 - str r1, [r0] - lsr r1, r3, #1 - orr r1, r1, r2, lsl #31 - str r1, [r0, #4] - lsrs r1, lr, #1 - rrx r1, r2 - str r1, [r0, #8] - lsr r1, lr, #1 - orr r1, r1, r4, lsl #31 - str r1, [r0, #12] - lsrs r1, r5, #1 - rrx r1, r4 - str r1, [r0, #16] - lsr r1, r5, #1 - orr r1, r1, r6, lsl #31 - str r1, [r0, #20] - lsrs r1, r8, #1 - rrx r1, r6 - str r1, [r0, #24] - lsr r1, r8, #1 - orr r1, r1, r12, lsl #31 - str r1, [r0, #28] - lsr r1, r12, #1 - str r1, [r0, #32] - pop {r4, r5, r6, r7, r8, lr} - mov pc, lr -.Lfunc_end135: - .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L - .cantunwind - .fnend - - .globl mcl_fp_add9L - .align 2 - .type mcl_fp_add9L,%function -mcl_fp_add9L: @ @mcl_fp_add9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r1, {r12, lr} - ldr r5, [r2] - ldr r9, [r1, #8] - ldr r8, [r1, #12] - ldmib r2, {r4, r6, r7} - adds r12, r5, r12 - ldr r5, [r1, #24] - adcs lr, r4, lr - ldr r4, [r1, #20] - str r12, [sp, #8] @ 4-byte Spill - adcs r10, r6, r9 - ldr r6, [r1, #16] - adcs r9, r7, r8 - ldr r7, [r2, #16] - str r10, [sp, #4] @ 4-byte Spill - adcs r6, r7, r6 - ldr r7, [r2, #20] - adcs r7, r7, r4 - ldr r4, [r2, #24] - adcs r11, r4, r5 - ldr r5, [r1, #28] - ldr r4, [r2, #28] - ldr r1, [r1, #32] - ldr r2, [r2, #32] - adcs r8, r4, r5 - adcs r4, r2, r1 - mov r2, lr - add r1, r0, #16 - str r4, [r0, #32] - str r12, [r0] - stmib r0, {r2, r10} - str r9, [r0, #12] - stm r1, {r6, r7, r11} - mov r1, #0 - str r8, [r0, #28] - adc r1, r1, #0 - str r1, [sp, #12] @ 4-byte Spill - ldm r3, {r1, r5, lr} - ldr r10, [sp, #8] @ 4-byte Reload - ldr r12, [r3, #12] - subs r1, r10, r1 - str r1, [sp, #8] @ 4-byte Spill - sbcs r1, r2, r5 - ldr r5, [r3, #20] - str r1, [sp] @ 4-byte Spill - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r2, r1, lr - ldr r1, [r3, #16] - sbcs r12, r9, r12 - sbcs r1, r6, r1 - ldr r6, [r3, #24] - sbcs r5, r7, r5 - ldr r7, [r3, #28] - ldr r3, [r3, #32] - sbcs r6, r11, r6 - sbcs r7, r8, r7 - sbcs r3, r4, r3 - ldr r4, [sp, #12] @ 4-byte Reload - sbc r4, r4, #0 - tst r4, #1 - bne .LBB136_2 -@ BB#1: @ %nocarry - str r3, [r0, #32] - ldr r3, [sp, #8] @ 4-byte Reload - str r3, [r0] - ldr r3, [sp] @ 4-byte Reload - str r3, [r0, #4] - str r2, [r0, #8] - str r12, [r0, #12] - add r0, r0, #16 - stm r0, {r1, r5, r6, r7} -.LBB136_2: @ %carry - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end136: - .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L - .cantunwind - .fnend - - .globl mcl_fp_addNF9L - .align 2 - .type mcl_fp_addNF9L,%function -mcl_fp_addNF9L: @ @mcl_fp_addNF9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #52 - sub sp, sp, #52 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r5, [r2] - ldr r12, [r1, #12] - ldmib r2, {r4, r6, r7} - ldr r10, [r3, #4] - adds r5, r5, r9 - adcs r9, r4, r8 - ldr r4, [r1, #16] - ldr r8, [r1, #20] - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [r1, #24] - adcs r11, r6, lr - ldr lr, [sp, #16] @ 4-byte Reload - str r9, [sp, #28] @ 4-byte Spill - adcs r12, r7, r12 - ldr r7, [r2, #16] - str r12, [sp, #32] @ 4-byte Spill - adcs r6, r7, r4 - ldr r7, [r2, #20] - str r6, [sp, #36] @ 4-byte Spill - adcs r4, r7, r8 - ldr r7, [r2, #24] - ldr r8, [r3] - str r4, [sp, #40] @ 4-byte Spill - adcs r7, r7, r5 - ldr r5, [r2, #28] - ldr r2, [r2, #32] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #28] - ldr r1, [r1, #32] - adcs r7, r5, r7 - ldr r5, [r3, #8] - adc r1, r2, r1 - ldr r2, [r3, #16] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r3, #12] - subs r8, lr, r8 - str r1, [sp, #24] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3, #28] - ldr r3, [r3, #32] - str r3, [sp] @ 4-byte Spill - ldr r3, [sp, #4] @ 4-byte Reload - str r2, [sp, #20] @ 4-byte Spill - sbcs r2, r9, r10 - sbcs r5, r11, r5 - sbcs r7, r12, r7 - sbcs r12, r6, r3 - ldr r3, [sp, #8] @ 4-byte Reload - sbcs r6, r4, r3 - ldr r4, [sp, #48] @ 4-byte Reload - ldr r3, [sp, #12] @ 4-byte Reload - sbcs r9, r4, r3 - ldr r3, [sp, #44] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - sbcs r10, r3, r4 - ldr r3, [sp] @ 4-byte Reload - ldr r4, [sp, #28] @ 4-byte Reload - sbc r3, r1, r3 - asr r1, r3, #31 - cmp r1, #0 - movlt r8, lr - movlt r2, r4 - movlt r5, r11 - cmp r1, #0 - str r8, [r0] - str r2, [r0, #4] - ldr r2, [sp, #32] @ 4-byte Reload - str r5, [r0, #8] - movlt r7, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r7, [r0, #12] - movlt r12, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r12, [r0, #16] - movlt r6, r2 - cmp r1, #0 - ldr r1, [sp, #48] @ 4-byte Reload - str r6, [r0, #20] - movlt r9, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r9, [r0, #24] - movlt r10, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r10, [r0, #28] - movlt r3, r1 - str r3, [r0, #32] - add sp, sp, #52 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end137: - .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L - .cantunwind - .fnend - - .globl mcl_fp_sub9L - .align 2 - .type mcl_fp_sub9L,%function -mcl_fp_sub9L: @ @mcl_fp_sub9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #24 - sub sp, sp, #24 - ldm r2, {r12, lr} - ldr r5, [r1] - ldr r8, [r2, #8] - ldr r9, [r2, #12] - ldmib r1, {r4, r6, r7} - subs r12, r5, r12 - ldr r5, [r2, #24] - sbcs lr, r4, lr - ldr r4, [r2, #20] - sbcs r8, r6, r8 - ldr r6, [r2, #16] - sbcs r9, r7, r9 - ldr r7, [r1, #16] - sbcs r10, r7, r6 - ldr r7, [r1, #20] - ldr r6, [r1, #28] - sbcs r7, r7, r4 - ldr r4, [r1, #24] - ldr r1, [r1, #32] - sbcs r4, r4, r5 - ldr r5, [r2, #28] - ldr r2, [r2, #32] - sbcs r5, r6, r5 - sbcs r1, r1, r2 - add r2, r0, #8 - str r1, [r0, #32] - stm r0, {r12, lr} - stm r2, {r8, r9, r10} - mov r2, #0 - str r7, [r0, #20] - str r4, [r0, #24] - str r5, [r0, #28] - sbc r2, r2, #0 - tst r2, #1 - beq .LBB138_2 -@ BB#1: @ %carry - ldr r2, [r3, #32] - ldr r6, [r3, #4] - ldr r11, [r3, #12] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #8] - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #16] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3, #28] - ldr r3, [r3] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [sp] @ 4-byte Reload - adds r3, r3, r12 - adcs r6, r6, lr - adcs r8, r2, r8 - ldr r2, [sp, #4] @ 4-byte Reload - adcs r12, r11, r9 - adcs lr, r2, r10 - ldr r2, [sp, #8] @ 4-byte Reload - adcs r7, r2, r7 - ldr r2, [sp, #12] @ 4-byte Reload - adcs r4, r2, r4 - ldr r2, [sp, #16] @ 4-byte Reload - stm r0, {r3, r6, r8, r12, lr} - str r7, [r0, #20] - str r4, [r0, #24] - adcs r5, r2, r5 - ldr r2, [sp, #20] @ 4-byte Reload - str r5, [r0, #28] - adc r1, r2, r1 - str r1, [r0, #32] -.LBB138_2: @ %nocarry - add sp, sp, #24 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end138: - .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L - .cantunwind - .fnend - - .globl mcl_fp_subNF9L - .align 2 - .type mcl_fp_subNF9L,%function -mcl_fp_subNF9L: @ @mcl_fp_subNF9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #52 - sub sp, sp, #52 - ldr r7, [r2, #32] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #32] - str r7, [sp, #44] @ 4-byte Spill - ldm r2, {r6, r8} - ldr r7, [r2, #8] - ldr r5, [r2, #16] - ldr r4, [r1, #16] - ldr r11, [r1, #20] - ldr r10, [r1, #24] - ldr r9, [r1, #28] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #12] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #20] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r2, #28] - str r7, [sp, #24] @ 4-byte Spill - str r2, [sp, #28] @ 4-byte Spill - ldm r1, {r1, r2, r12, lr} - subs r6, r1, r6 - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r7, r2, r8 - ldr r2, [sp, #44] @ 4-byte Reload - str r6, [sp, #12] @ 4-byte Spill - str r7, [sp, #16] @ 4-byte Spill - sbcs r8, r12, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r8, [sp, #20] @ 4-byte Spill - sbcs r12, lr, r1 - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r5, r4, r5 - str r12, [sp, #32] @ 4-byte Spill - str r5, [sp, #36] @ 4-byte Spill - sbcs lr, r11, r1 - ldr r1, [sp, #24] @ 4-byte Reload - ldr r11, [r3, #16] - str lr, [sp, #40] @ 4-byte Spill - sbcs r4, r10, r1 - ldr r1, [sp, #28] @ 4-byte Reload - ldr r10, [r3, #20] - str r4, [sp, #24] @ 4-byte Spill - sbcs r9, r9, r1 - ldr r1, [sp, #48] @ 4-byte Reload - sbc r1, r2, r1 - ldr r2, [r3, #24] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [r3, #4] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3, #8] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #12] - str r1, [sp] @ 4-byte Spill - ldr r1, [r3, #28] - ldr r3, [r3] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - adds r3, r6, r3 - adcs r6, r7, r1 - ldr r1, [sp, #4] @ 4-byte Reload - adcs r7, r8, r1 - ldr r1, [sp] @ 4-byte Reload - adcs r1, r12, r1 - adcs r12, r5, r11 - adcs r5, lr, r10 - ldr r10, [sp, #12] @ 4-byte Reload - adcs lr, r4, r2 - ldr r2, [sp, #28] @ 4-byte Reload - ldr r4, [sp, #48] @ 4-byte Reload - adcs r8, r9, r2 - ldr r2, [sp, #44] @ 4-byte Reload - adc r11, r4, r2 - asr r2, r4, #31 - cmp r2, #0 - movge r3, r10 - str r3, [r0] - ldr r3, [sp, #16] @ 4-byte Reload - movge r6, r3 - ldr r3, [sp, #20] @ 4-byte Reload - str r6, [r0, #4] - movge r7, r3 - ldr r3, [sp, #32] @ 4-byte Reload - cmp r2, #0 - str r7, [r0, #8] - movge r1, r3 - str r1, [r0, #12] - ldr r1, [sp, #36] @ 4-byte Reload - movge r12, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r12, [r0, #16] - movge r5, r1 - ldr r1, [sp, #24] @ 4-byte Reload - cmp r2, #0 - movge r8, r9 - movge r11, r4 - str r5, [r0, #20] - movge lr, r1 - str lr, [r0, #24] - str r8, [r0, #28] - str r11, [r0, #32] - add sp, sp, #52 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end139: - .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L - .cantunwind - .fnend - - .globl mcl_fpDbl_add9L - .align 2 - .type mcl_fpDbl_add9L,%function -mcl_fpDbl_add9L: @ @mcl_fpDbl_add9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #88 - sub sp, sp, #88 - ldm r1, {r7, r9} - ldr r8, [r1, #8] - ldr lr, [r1, #12] - ldm r2, {r4, r5, r6, r12} - add r11, r2, #16 - adds r4, r4, r7 - ldr r7, [r2, #28] - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #64] - str r7, [sp, #24] @ 4-byte Spill - str r4, [sp, #76] @ 4-byte Spill - ldr r4, [r2, #68] - str r4, [sp, #80] @ 4-byte Spill - adcs r4, r5, r9 - str r4, [sp, #32] @ 4-byte Spill - adcs r4, r6, r8 - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r2, #32] - adcs r9, r12, lr - add lr, r1, #16 - str r4, [sp, #48] @ 4-byte Spill - ldr r4, [r2, #36] - str r4, [sp, #52] @ 4-byte Spill - ldr r4, [r2, #40] - str r4, [sp, #56] @ 4-byte Spill - ldr r4, [r2, #44] - str r4, [sp, #60] @ 4-byte Spill - ldr r4, [r2, #48] - str r4, [sp, #64] @ 4-byte Spill - ldr r4, [r2, #52] - str r4, [sp, #68] @ 4-byte Spill - ldr r4, [r2, #56] - str r4, [sp, #72] @ 4-byte Spill - ldr r4, [r2, #60] - str r4, [sp, #84] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r2, [r1, #64] - ldr r8, [r1, #32] - ldr r4, [r1, #36] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #20] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r10, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #32] @ 4-byte Reload - str r10, [r0] - str r7, [r0, #4] - ldr r7, [sp, #28] @ 4-byte Reload - adcs r1, r5, r1 - adcs r2, r6, r2 - str r7, [r0, #8] - str r9, [r0, #12] - str r1, [r0, #16] - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r11, r12 - str r1, [r0, #24] - ldr r1, [sp, #48] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - ldr r2, [sp, #52] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [r0, #32] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r4, r2, r4 - ldr r2, [sp] @ 4-byte Reload - adcs r5, r1, r2 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - str r5, [sp, #56] @ 4-byte Spill - adcs lr, r1, r2 - ldr r1, [sp, #64] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str lr, [sp, #60] @ 4-byte Spill - adcs r12, r1, r2 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str r12, [sp, #64] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #72] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r7, [sp, #68] @ 4-byte Spill - adcs r8, r1, r2 - ldr r1, [sp, #84] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - str r8, [sp, #72] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r6, r1, r2 - ldr r1, [sp, #80] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - adcs r9, r1, r2 - mov r2, #0 - adc r1, r2, #0 - str r9, [sp, #76] @ 4-byte Spill - str r1, [sp, #80] @ 4-byte Spill - ldmib r3, {r2, r11} - ldr r1, [r3, #12] - ldr r10, [r3] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [r3, #16] - subs r10, r4, r10 - sbcs r2, r5, r2 - sbcs r11, lr, r11 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #20] - ldr r5, [sp, #40] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [r3, #24] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [r3, #28] - ldr r3, [r3, #32] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r1, r12, r1 - sbcs r12, r7, r5 - ldr r7, [sp, #44] @ 4-byte Reload - ldr r5, [sp, #84] @ 4-byte Reload - sbcs lr, r8, r7 - ldr r7, [sp, #48] @ 4-byte Reload - mov r8, r6 - sbcs r7, r5, r7 - ldr r5, [sp, #52] @ 4-byte Reload - sbcs r5, r6, r5 - sbcs r6, r9, r3 - ldr r3, [sp, #80] @ 4-byte Reload - sbc r9, r3, #0 - ldr r3, [sp, #56] @ 4-byte Reload - ands r9, r9, #1 - movne r10, r4 - str r10, [r0, #36] - movne r2, r3 - str r2, [r0, #40] - ldr r2, [sp, #60] @ 4-byte Reload - movne r11, r2 - ldr r2, [sp, #64] @ 4-byte Reload - cmp r9, #0 - str r11, [r0, #44] - movne r1, r2 - str r1, [r0, #48] - ldr r1, [sp, #68] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r12, [r0, #52] - movne lr, r1 - ldr r1, [sp, #84] @ 4-byte Reload - cmp r9, #0 - movne r5, r8 - str lr, [r0, #56] - movne r7, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r7, [r0, #60] - str r5, [r0, #64] - movne r6, r1 - str r6, [r0, #68] - add sp, sp, #88 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end140: - .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub9L - .align 2 - .type mcl_fpDbl_sub9L,%function -mcl_fpDbl_sub9L: @ @mcl_fpDbl_sub9L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #80 - sub sp, sp, #80 - ldr r7, [r2, #64] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #44] @ 4-byte Spill - ldm r2, {r5, r6, r7, r8} - ldr r4, [r2, #16] - ldr r10, [r2, #24] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r2, #20] - ldr r2, [r2, #28] - str r4, [sp, #24] @ 4-byte Spill - str r2, [sp, #32] @ 4-byte Spill - ldm r1, {r2, r12, lr} - ldr r4, [r1, #12] - ldr r11, [r1, #60] - subs r9, r2, r5 - ldr r2, [r1, #64] - sbcs r5, r12, r6 - sbcs r6, lr, r7 - add lr, r1, #16 - ldr r7, [r1, #36] - sbcs r4, r4, r8 - ldr r8, [r1, #32] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - str r9, [r0] - stmib r0, {r5, r6} - str r4, [r0, #12] - ldr r5, [sp, #20] @ 4-byte Reload - ldr r4, [sp, #24] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #16] @ 4-byte Reload - sbcs r2, r2, r4 - str r1, [r0, #16] - str r2, [r0, #20] - ldr r2, [sp, #32] @ 4-byte Reload - sbcs r1, r12, r10 - str r1, [r0, #24] - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - ldr r2, [sp, #44] @ 4-byte Reload - sbcs r1, r8, r1 - str r1, [r0, #32] - sbcs r1, r7, r2 - ldr r2, [sp, #52] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - sbcs r4, r7, r2 - ldr r2, [sp, #40] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - sbcs r9, r7, r2 - ldr r2, [sp, #56] @ 4-byte Reload - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r12, r7, r2 - ldr r2, [sp, #60] @ 4-byte Reload - ldr r7, [sp, #12] @ 4-byte Reload - str r12, [sp, #56] @ 4-byte Spill - sbcs lr, r7, r2 - ldr r2, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #36] @ 4-byte Reload - str lr, [sp, #60] @ 4-byte Spill - sbcs r10, r5, r2 - ldr r2, [sp, #68] @ 4-byte Reload - ldr r5, [sp, #28] @ 4-byte Reload - str r10, [sp, #64] @ 4-byte Spill - sbcs r6, r11, r2 - ldr r2, [sp, #72] @ 4-byte Reload - str r6, [sp, #68] @ 4-byte Spill - sbcs r8, r7, r2 - ldr r2, [sp, #76] @ 4-byte Reload - str r8, [sp, #44] @ 4-byte Spill - sbcs r11, r5, r2 - mov r2, #0 - sbc r2, r2, #0 - str r11, [sp, #76] @ 4-byte Spill - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r3, #32] - str r2, [sp, #52] @ 4-byte Spill - ldmib r3, {r5, r7} - ldr r2, [r3, #12] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r3, #16] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r3, #28] - ldr r3, [r3] - adds r3, r1, r3 - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #24] @ 4-byte Reload - adcs r5, r4, r5 - adcs r1, r9, r7 - ldr r7, [sp, #32] @ 4-byte Reload - adcs r2, r12, r2 - adcs r12, lr, r7 - ldr r7, [sp, #28] @ 4-byte Reload - adcs lr, r10, r7 - ldr r7, [sp, #36] @ 4-byte Reload - adcs r10, r6, r7 - ldr r6, [sp, #40] @ 4-byte Reload - ldr r7, [sp, #52] @ 4-byte Reload - adcs r6, r8, r6 - adc r11, r11, r7 - ldr r7, [sp, #72] @ 4-byte Reload - ands r8, r7, #1 - ldr r7, [sp, #48] @ 4-byte Reload - moveq r5, r4 - moveq r1, r9 - moveq r3, r7 - cmp r8, #0 - str r3, [r0, #36] - str r5, [r0, #40] - str r1, [r0, #44] - ldr r1, [sp, #56] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r2, [r0, #48] - moveq r12, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r12, [r0, #52] - moveq lr, r1 - ldr r1, [sp, #68] @ 4-byte Reload - cmp r8, #0 - str lr, [r0, #56] - moveq r10, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r10, [r0, #60] - moveq r6, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r6, [r0, #64] - moveq r11, r1 - str r11, [r0, #68] - add sp, sp, #80 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end141: - .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L - .cantunwind - .fnend - - .align 2 - .type .LmulPv320x32,%function -.LmulPv320x32: @ @mulPv320x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r1, [r1, #36] - umull r3, r7, r1, r2 - adcs r1, r6, r3 - str r1, [r0, #36] - adc r1, r7, #0 - str r1, [r0, #40] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end142: - .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre10L - .align 2 - .type mcl_fp_mulUnitPre10L,%function -mcl_fp_mulUnitPre10L: @ @mcl_fp_mulUnitPre10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - .pad #48 - sub sp, sp, #48 - mov r4, r0 - mov r0, sp - bl .LmulPv320x32(PLT) - ldr r12, [sp, #40] - ldr lr, [sp, #36] - ldr r8, [sp, #32] - ldr r9, [sp, #28] - ldr r0, [sp, #24] - ldr r1, [sp, #20] - ldm sp, {r6, r7} - add r5, sp, #8 - ldm r5, {r2, r3, r5} - stm r4, {r6, r7} - add r6, r4, #8 - stm r6, {r2, r3, r5} - str r1, [r4, #20] - str r0, [r4, #24] - str r9, [r4, #28] - str r8, [r4, #32] - str lr, [r4, #36] - str r12, [r4, #40] - add sp, sp, #48 - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end143: - .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre10L - .align 2 - .type mcl_fpDbl_mulPre10L,%function -mcl_fpDbl_mulPre10L: @ @mcl_fpDbl_mulPre10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #156 - sub sp, sp, #156 - mov r6, r2 - mov r5, r1 - mov r4, r0 - bl mcl_fpDbl_mulPre5L(PLT) - add r0, r4, #40 - add r1, r5, #20 - add r2, r6, #20 - bl mcl_fpDbl_mulPre5L(PLT) - add r11, r6, #24 - ldr r7, [r6, #12] - ldr r8, [r6, #16] - ldr r1, [r6, #20] - ldm r11, {r0, r2, r10, r11} - ldm r6, {r6, r9, r12} - adds lr, r6, r1 - adcs r3, r9, r0 - mov r0, #0 - str lr, [sp, #72] @ 4-byte Spill - adcs r2, r12, r2 - str r3, [sp, #68] @ 4-byte Spill - adcs r12, r7, r10 - str r2, [sp, #64] @ 4-byte Spill - adcs r10, r8, r11 - str r12, [sp, #60] @ 4-byte Spill - adc r6, r0, #0 - ldr r0, [r5, #32] - str r10, [sp, #56] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r5, #36] - str r0, [sp, #52] @ 4-byte Spill - ldmib r5, {r8, r9, r11} - ldr r0, [r5, #16] - ldr r7, [r5, #20] - ldr r1, [r5, #28] - str lr, [sp, #76] - str r3, [sp, #80] - str r2, [sp, #84] - str r12, [sp, #88] - str r10, [sp, #92] - add r2, sp, #76 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r5, #24] - ldr r5, [r5] - adds r5, r5, r7 - adcs r7, r8, r0 - ldr r0, [sp, #48] @ 4-byte Reload - str r5, [sp, #96] - adcs r9, r9, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r7, [sp, #100] - str r9, [sp, #104] - adcs r11, r11, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r11, [sp, #108] - adcs r8, r1, r0 - mov r0, #0 - add r1, sp, #96 - adc r10, r0, #0 - add r0, sp, #116 - str r8, [sp, #112] - bl mcl_fpDbl_mulPre5L(PLT) - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - cmp r6, #0 - ldr r2, [sp, #64] @ 4-byte Reload - ldr r3, [sp, #60] @ 4-byte Reload - moveq r5, r6 - moveq r8, r6 - moveq r11, r6 - moveq r9, r6 - moveq r7, r6 - str r5, [sp, #52] @ 4-byte Spill - adds r0, r5, r0 - ldr r5, [sp, #56] @ 4-byte Reload - adcs r1, r7, r1 - adcs r2, r9, r2 - adcs r3, r11, r3 - adcs r12, r8, r5 - mov r5, #0 - adc lr, r5, #0 - cmp r10, #0 - ldr r5, [sp, #52] @ 4-byte Reload - moveq r1, r7 - ldr r7, [sp, #136] - moveq r3, r11 - moveq r2, r9 - moveq r12, r8 - moveq lr, r10 - cmp r10, #0 - moveq r0, r5 - and r5, r6, r10 - ldr r6, [sp, #152] - adds r8, r0, r7 - ldr r7, [sp, #140] - adcs r10, r1, r7 - ldr r7, [sp, #144] - adcs r11, r2, r7 - ldr r7, [sp, #148] - adcs r0, r3, r7 - adcs r12, r12, r6 - str r0, [sp, #60] @ 4-byte Spill - adc r9, lr, r5 - ldm r4, {r5, r6, r7, lr} - ldr r1, [sp, #116] - ldr r2, [sp, #120] - ldr r0, [sp, #124] - ldr r3, [sp, #128] - subs r1, r1, r5 - sbcs r2, r2, r6 - ldr r6, [sp, #132] - sbcs r0, r0, r7 - ldr r7, [r4, #16] - sbcs lr, r3, lr - ldr r3, [r4, #20] - sbcs r5, r6, r7 - ldr r6, [r4, #32] - ldr r7, [r4, #52] - str r3, [sp, #72] @ 4-byte Spill - sbcs r3, r8, r3 - ldr r8, [r4, #56] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r4, #24] - str r6, [sp, #28] @ 4-byte Spill - str r3, [sp, #68] @ 4-byte Spill - sbcs r3, r10, r3 - ldr r10, [r4, #44] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r4, #28] - str r3, [sp, #64] @ 4-byte Spill - sbcs r3, r11, r3 - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [sp, #60] @ 4-byte Reload - sbcs r3, r3, r6 - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r4, #36] - str r3, [sp, #60] @ 4-byte Spill - sbcs r3, r12, r3 - ldr r12, [r4, #64] - str r3, [sp, #40] @ 4-byte Spill - sbc r3, r9, #0 - ldr r9, [r4, #40] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r4, #76] - subs r1, r1, r9 - sbcs r2, r2, r10 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r4, #48] - ldr r11, [sp, #32] @ 4-byte Reload - sbcs r0, r0, r2 - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r4, #72] - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, lr, r7 - ldr lr, [r4, #68] - str r0, [sp, #16] @ 4-byte Spill - sbcs r0, r5, r8 - ldr r5, [r4, #60] - ldr r6, [sp, #24] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r0, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r12 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbcs r0, r0, lr - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - sbcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [r4, #20] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r1, r11 - adcs r0, r0, r6 - str r1, [r4, #24] - ldr r1, [sp, #28] @ 4-byte Reload - ldr r6, [sp, #16] @ 4-byte Reload - str r0, [r4, #28] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #12] @ 4-byte Reload - str r1, [r4, #32] - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #8] @ 4-byte Reload - str r0, [r4, #36] - adcs r1, r9, r1 - ldr r0, [sp, #4] @ 4-byte Reload - str r1, [r4, #40] - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r10, r0 - adcs r1, r1, r6 - str r0, [r4, #44] - ldr r0, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #52] @ 4-byte Reload - str r1, [r4, #48] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - adcs r1, r8, r1 - adcs r5, r5, r6 - adcs r7, r12, #0 - add r12, r4, #52 - adcs r6, lr, #0 - stm r12, {r0, r1, r5, r7} - adcs r2, r2, #0 - str r6, [r4, #68] - adc r3, r3, #0 - str r2, [r4, #72] - str r3, [r4, #76] - add sp, sp, #156 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end144: - .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre10L - .align 2 - .type mcl_fpDbl_sqrPre10L,%function -mcl_fpDbl_sqrPre10L: @ @mcl_fpDbl_sqrPre10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #156 - sub sp, sp, #156 - mov r5, r1 - mov r4, r0 - mov r2, r5 - bl mcl_fpDbl_mulPre5L(PLT) - add r1, r5, #20 - add r0, r4, #40 - mov r2, r1 - bl mcl_fpDbl_mulPre5L(PLT) - ldr lr, [r5, #32] - ldr r12, [r5, #36] - ldmib r5, {r2, r3, r6, r8} - ldr r0, [r5, #20] - ldr r7, [r5, #24] - ldr r1, [r5, #28] - ldr r5, [r5] - adds r5, r5, r0 - adcs r0, r2, r7 - str r5, [sp, #96] - str r5, [sp, #76] - adcs r1, r3, r1 - add r3, sp, #80 - str r0, [sp, #100] - adcs r2, r6, lr - str r1, [sp, #104] - adcs r6, r8, r12 - str r2, [sp, #108] - str r6, [sp, #112] - stm r3, {r0, r1, r2, r6} - lsr r3, r2, #31 - orr r3, r3, r6, lsl #1 - str r3, [sp, #72] @ 4-byte Spill - lsr r3, r1, #31 - lsl r1, r1, #1 - orr r1, r1, r0, lsr #31 - orr r2, r3, r2, lsl #1 - str r1, [sp, #64] @ 4-byte Spill - lsr r1, r5, #31 - str r2, [sp, #68] @ 4-byte Spill - add r2, sp, #76 - orr r11, r1, r0, lsl #1 - mov r0, #0 - add r1, sp, #96 - adc r7, r0, #0 - add r0, sp, #116 - bl mcl_fpDbl_mulPre5L(PLT) - ldr r10, [sp, #136] - ldr r9, [sp, #140] - ldr r8, [sp, #144] - ldr r0, [sp, #64] @ 4-byte Reload - ldr r2, [sp, #148] - ldr r1, [sp, #152] - adds r3, r10, r5, lsl #1 - adcs r5, r9, r11 - adcs r12, r8, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs lr, r2, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r1, r0 - adc r6, r7, r6, lsr #31 - cmp r7, #0 - moveq lr, r2 - moveq r12, r8 - moveq r11, r1 - moveq r6, r7 - moveq r5, r9 - cmp r7, #0 - add r7, sp, #116 - moveq r3, r10 - ldm r4, {r9, r10} - ldr r0, [r4, #8] - ldr r8, [r4, #12] - str r0, [sp, #72] @ 4-byte Spill - ldm r7, {r1, r2, r7} - ldr r0, [sp, #128] - subs r1, r1, r9 - ldr r9, [r4, #40] - sbcs r2, r2, r10 - ldr r10, [r4, #44] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [sp, #72] @ 4-byte Reload - sbcs r7, r7, r2 - ldr r2, [r4, #48] - str r7, [sp, #44] @ 4-byte Spill - sbcs r8, r0, r8 - ldr r0, [r4, #16] - ldr r7, [sp, #132] - str r2, [sp, #16] @ 4-byte Spill - sbcs r0, r7, r0 - ldr r7, [r4, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r4, #20] - sbcs r3, r3, r0 - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r4, #24] - str r3, [sp, #72] @ 4-byte Spill - sbcs r3, r5, r3 - ldr r5, [r4, #60] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r4, #28] - str r3, [sp, #68] @ 4-byte Spill - sbcs r3, r12, r3 - ldr r12, [r4, #64] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r4, #32] - str r3, [sp, #64] @ 4-byte Spill - sbcs r3, lr, r3 - ldr lr, [r4, #68] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r4, #36] - str r3, [sp, #60] @ 4-byte Spill - sbcs r3, r11, r3 - str r3, [sp, #32] @ 4-byte Spill - sbc r3, r6, #0 - subs r1, r1, r9 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #20] @ 4-byte Reload - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r4, #76] - sbcs r1, r1, r10 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - ldr r11, [sp, #20] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [r4, #72] - str r1, [sp, #44] @ 4-byte Spill - sbcs r1, r8, r7 - ldr r8, [r4, #56] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - ldr r6, [sp, #44] @ 4-byte Reload - sbcs r1, r1, r8 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r1, r1, r5 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r1, r1, r12 - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r1, r1, lr - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r1, r1, r2 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - sbcs r1, r1, r3 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - sbc r1, r1, #0 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [r4, #20] - ldr r0, [sp, #68] @ 4-byte Reload - adcs r1, r1, r11 - adcs r0, r0, r6 - str r1, [r4, #24] - ldr r1, [sp, #64] @ 4-byte Reload - ldr r6, [sp, #12] @ 4-byte Reload - str r0, [r4, #28] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #40] @ 4-byte Reload - str r1, [r4, #32] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #8] @ 4-byte Reload - str r0, [r4, #36] - adcs r1, r9, r1 - ldr r0, [sp, #4] @ 4-byte Reload - str r1, [r4, #40] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r10, r0 - adcs r1, r1, r6 - str r0, [r4, #44] - ldr r0, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #52] @ 4-byte Reload - str r1, [r4, #48] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - adcs r1, r8, r1 - adcs r5, r5, r6 - adcs r7, r12, #0 - add r12, r4, #52 - adcs r6, lr, #0 - stm r12, {r0, r1, r5, r7} - adcs r2, r2, #0 - str r6, [r4, #68] - adc r3, r3, #0 - str r2, [r4, #72] - str r3, [r4, #76] - add sp, sp, #156 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end145: - .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L - .cantunwind - .fnend - - .globl mcl_fp_mont10L - .align 2 - .type mcl_fp_mont10L,%function -mcl_fp_mont10L: @ @mcl_fp_mont10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - .pad #1024 - sub sp, sp, #1024 - mov r7, r2 - ldr r5, [r3, #-4] - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #1000 - str r3, [sp, #84] @ 4-byte Spill - str r1, [sp, #76] @ 4-byte Spill - mov r4, r3 - mov r6, r1 - ldr r2, [r7] - str r7, [sp, #72] @ 4-byte Spill - str r5, [sp, #80] @ 4-byte Spill - bl .LmulPv320x32(PLT) - ldr r0, [sp, #1004] - ldr r10, [sp, #1000] - mov r1, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1008] - mul r2, r10, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1012] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #952 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #992] - ldr r2, [r7, #4] - ldr r9, [sp, #968] - ldr r8, [sp, #952] - ldr r11, [sp, #956] - ldr r5, [sp, #960] - ldr r4, [sp, #964] - mov r1, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #976] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #972] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #904 - bl .LmulPv320x32(PLT) - ldr r1, [sp, #52] @ 4-byte Reload - adds r0, r8, r10 - ldr r2, [sp, #4] @ 4-byte Reload - add lr, sp, #908 - ldr r10, [sp, #944] - mov r0, #0 - adcs r1, r11, r1 - add r11, sp, #932 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r5, r1 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r4, r1 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - ldm r11, {r5, r6, r11} - ldr r4, [sp, #904] - adcs r8, r2, r1 - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #52] @ 4-byte Reload - adds r4, r7, r4 - ldr r7, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #856 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #896] - add r11, sp, #856 - ldr r6, [sp, #880] - ldr r7, [sp, #876] - ldr r5, [sp, #872] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #888] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #884] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #808 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #808 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #848] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #832 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r11} - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #760 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #800] - add r11, sp, #760 - ldr r6, [sp, #784] - ldr r4, [sp, #780] - ldr r5, [sp, #776] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #712 - bl .LmulPv320x32(PLT) - adds r0, r7, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #716 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #752] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #740 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r5, r6, r11} - ldr r4, [sp, #712] - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r4, r7, r4 - ldr r7, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #664 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #704] - add r11, sp, #664 - ldr r6, [sp, #688] - ldr r7, [sp, #684] - ldr r5, [sp, #680] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #616 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #616 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #656] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #640 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r11} - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #568 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #608] - add r11, sp, #568 - ldr r6, [sp, #592] - ldr r4, [sp, #588] - ldr r5, [sp, #584] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #520 - bl .LmulPv320x32(PLT) - adds r0, r7, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #524 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #560] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #548 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r5, r6, r11} - ldr r4, [sp, #520] - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r4, r7, r4 - ldr r7, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #472 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #512] - add r11, sp, #472 - ldr r6, [sp, #496] - ldr r7, [sp, #492] - ldr r5, [sp, #488] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #424 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #424 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #464] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #448 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r11} - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #376 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #416] - add r11, sp, #376 - ldr r6, [sp, #400] - ldr r4, [sp, #396] - ldr r5, [sp, #392] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #328 - bl .LmulPv320x32(PLT) - adds r0, r7, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #332 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #368] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #356 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r5, r6, r11} - ldr r4, [sp, #328] - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r4, r7, r4 - ldr r7, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #280 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #320] - add r11, sp, #280 - ldr r6, [sp, #304] - ldr r7, [sp, #300] - ldr r5, [sp, #296] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #316] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #312] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #308] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #232 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #232 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #272] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #256 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r11} - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r11 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #28] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #184 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #224] - add r11, sp, #184 - ldr r6, [sp, #208] - ldr r4, [sp, #204] - ldr r5, [sp, #200] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #216] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #212] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #136 - bl .LmulPv320x32(PLT) - adds r0, r7, r8 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #136 - add r7, sp, #152 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - add r9, sp, #164 - adcs r10, r1, r10 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r11, r1, r11 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #44] @ 4-byte Spill - ldm lr, {r2, r6, r12, lr} - ldr r8, [sp, #176] - adds r4, r0, r2 - ldr r0, [sp, #80] @ 4-byte Reload - ldm r9, {r3, r5, r9} - adcs r6, r10, r6 - mul r2, r4, r0 - ldm r7, {r0, r1, r7} - str r6, [sp, #40] @ 4-byte Spill - adcs r6, r11, r12 - ldr r11, [sp, #84] @ 4-byte Reload - str r6, [sp, #36] @ 4-byte Spill - ldr r6, [sp, #76] @ 4-byte Reload - adcs r10, r6, lr - ldr r6, [sp, #72] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - mov r1, r11 - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r7, r0, r8 - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #88 - bl .LmulPv320x32(PLT) - add r3, sp, #88 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r4, r0, r2 - ldr r2, [sp, #104] - adcs r0, r10, r3 - str r4, [sp, #40] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #108] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r5, r6, r2 - ldr r2, [sp, #112] - str r5, [sp, #48] @ 4-byte Spill - adcs r12, r0, r2 - ldr r2, [sp, #116] - ldr r0, [sp, #64] @ 4-byte Reload - str r12, [sp, #52] @ 4-byte Spill - adcs lr, r0, r2 - ldr r2, [sp, #120] - ldr r0, [sp, #76] @ 4-byte Reload - str lr, [sp, #60] @ 4-byte Spill - adcs r0, r0, r2 - ldr r2, [sp, #124] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #128] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r9, r7, r2 - adc r0, r0, #0 - str r9, [sp, #64] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - mov r0, r11 - ldr r2, [r0, #16] - ldr r10, [r0] - ldr r3, [r0, #4] - ldr r1, [r0, #8] - ldr r6, [r0, #12] - ldr r7, [r0, #24] - ldr r11, [r0, #32] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r0, #20] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r0, #28] - ldr r0, [r0, #36] - str r2, [sp, #36] @ 4-byte Spill - mov r2, r8 - ldr r8, [sp, #56] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - subs r10, r2, r10 - sbcs r3, r4, r3 - ldr r4, [sp, #80] @ 4-byte Reload - sbcs r1, r8, r1 - sbcs r6, r4, r6 - sbcs r4, r5, r0 - ldr r0, [sp, #32] @ 4-byte Reload - sbcs r5, r12, r0 - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r12, lr, r7 - ldr r7, [sp, #36] @ 4-byte Reload - sbcs lr, r0, r7 - ldr r0, [sp, #72] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - sbcs r11, r0, r11 - ldr r0, [sp, #84] @ 4-byte Reload - sbcs r0, r9, r0 - ldr r9, [sp, #68] @ 4-byte Reload - sbc r7, r7, #0 - ands r7, r7, #1 - movne r10, r2 - ldr r2, [sp, #40] @ 4-byte Reload - movne r1, r8 - str r10, [r9] - movne r3, r2 - cmp r7, #0 - str r3, [r9, #4] - str r1, [r9, #8] - ldr r1, [sp, #80] @ 4-byte Reload - movne r6, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r6, [r9, #12] - movne r4, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r4, [r9, #16] - movne r5, r1 - ldr r1, [sp, #60] @ 4-byte Reload - cmp r7, #0 - str r5, [r9, #20] - movne r12, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r12, [r9, #24] - movne lr, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str lr, [r9, #28] - movne r11, r1 - ldr r1, [sp, #64] @ 4-byte Reload - cmp r7, #0 - str r11, [r9, #32] - movne r0, r1 - str r0, [r9, #36] - add sp, sp, #28 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end146: - .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L - .cantunwind - .fnend - - .globl mcl_fp_montNF10L - .align 2 - .type mcl_fp_montNF10L,%function -mcl_fp_montNF10L: @ @mcl_fp_montNF10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - .pad #1024 - sub sp, sp, #1024 - mov r7, r2 - ldr r5, [r3, #-4] - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #1000 - str r3, [sp, #84] @ 4-byte Spill - str r1, [sp, #76] @ 4-byte Spill - mov r4, r3 - mov r6, r1 - ldr r2, [r7] - str r7, [sp, #72] @ 4-byte Spill - str r5, [sp, #80] @ 4-byte Spill - bl .LmulPv320x32(PLT) - ldr r0, [sp, #1004] - ldr r10, [sp, #1000] - mov r1, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1008] - mul r2, r10, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1012] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #952 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #992] - ldr r2, [r7, #4] - ldr r9, [sp, #968] - ldr r8, [sp, #952] - ldr r11, [sp, #956] - ldr r5, [sp, #960] - ldr r4, [sp, #964] - mov r1, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #976] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #972] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #904 - bl .LmulPv320x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #908 - ldr r10, [sp, #940] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #936] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #932] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #904] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #944] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r8, r1, r0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #52] @ 4-byte Reload - adds r4, r6, r4 - ldr r6, [sp, #48] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #856 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #896] - add r11, sp, #856 - ldr r6, [sp, #880] - ldr r7, [sp, #876] - ldr r5, [sp, #872] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #888] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #884] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #808 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #808 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #848] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #844] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #832 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r4, r5, r11} - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r6, r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #760 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #800] - add r11, sp, #760 - ldr r5, [sp, #784] - ldr r7, [sp, #780] - ldr r4, [sp, #776] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #712 - bl .LmulPv320x32(PLT) - adds r0, r6, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #716 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #752] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #748] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #744] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #712] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #740] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r4, r6, r4 - ldr r6, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #664 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #704] - add r11, sp, #664 - ldr r6, [sp, #688] - ldr r7, [sp, #684] - ldr r5, [sp, #680] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #616 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #616 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #656] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #652] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #640 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r4, r5, r11} - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r6, r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #568 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #608] - add r11, sp, #568 - ldr r5, [sp, #592] - ldr r7, [sp, #588] - ldr r4, [sp, #584] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #520 - bl .LmulPv320x32(PLT) - adds r0, r6, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #524 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #560] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #556] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #552] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #520] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #548] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r4, r6, r4 - ldr r6, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #472 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #512] - add r11, sp, #472 - ldr r6, [sp, #496] - ldr r7, [sp, #492] - ldr r5, [sp, #488] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #424 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #424 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #464] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #460] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #448 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r4, r5, r11} - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r6, r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #376 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #416] - add r11, sp, #376 - ldr r5, [sp, #400] - ldr r7, [sp, #396] - ldr r4, [sp, #392] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #328 - bl .LmulPv320x32(PLT) - adds r0, r6, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #332 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #368] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #364] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #360] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #328] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #356] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r4, r6, r4 - ldr r6, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #280 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #320] - add r11, sp, #280 - ldr r6, [sp, #304] - ldr r7, [sp, #300] - ldr r5, [sp, #296] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #316] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #312] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #308] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #232 - bl .LmulPv320x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #232 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #272] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #268] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #256 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r7, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r4, r5, r11} - adc r8, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r6, [sp, #64] @ 4-byte Reload - adds r6, r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - adcs r0, r7, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r8, r10 - str r0, [sp, #32] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #184 - bl .LmulPv320x32(PLT) - ldr r0, [sp, #224] - add r11, sp, #184 - ldr r5, [sp, #208] - ldr r7, [sp, #204] - ldr r4, [sp, #200] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #216] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #212] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #136 - bl .LmulPv320x32(PLT) - adds r0, r6, r8 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - ldr lr, [sp, #140] - ldr r6, [sp, #144] - add r8, sp, #152 - ldr r12, [sp, #148] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - adcs r9, r1, r10 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r10, r1, r11 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #136] - str r1, [sp, #48] @ 4-byte Spill - adds r4, r0, r2 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r9, r9, lr - adcs r11, r10, r6 - mul r1, r4, r0 - str r1, [sp, #44] @ 4-byte Spill - ldm r8, {r0, r1, r2, r3, r5, r7, r8} - ldr r6, [sp, #76] @ 4-byte Reload - adcs r10, r6, r12 - ldr r6, [sp, #72] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #84] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #88 - adc r8, r8, #0 - bl .LmulPv320x32(PLT) - add r3, sp, #88 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - adcs r7, r9, r1 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r9, r11, r2 - ldr r2, [sp, #104] - str r7, [sp, #48] @ 4-byte Spill - adcs lr, r10, r3 - str lr, [sp, #52] @ 4-byte Spill - adcs r6, r0, r2 - ldr r2, [sp, #108] - ldr r0, [sp, #80] @ 4-byte Reload - str r6, [sp, #56] @ 4-byte Spill - adcs r0, r0, r2 - ldr r2, [sp, #112] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r4, r0, r2 - ldr r2, [sp, #116] - ldr r0, [sp, #64] @ 4-byte Reload - str r4, [sp, #60] @ 4-byte Spill - adcs r12, r0, r2 - ldr r2, [sp, #120] - ldr r0, [sp, #76] @ 4-byte Reload - str r12, [sp, #64] @ 4-byte Spill - adcs r0, r0, r2 - ldr r2, [sp, #124] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r2 - ldr r2, [sp, #128] - mov r0, r5 - str r11, [sp, #72] @ 4-byte Spill - adc r1, r8, r2 - str r1, [sp, #44] @ 4-byte Spill - ldmib r0, {r2, r8} - ldr r5, [r0, #16] - ldr r10, [r0] - ldr r3, [r0, #12] - str r5, [sp, #28] @ 4-byte Spill - ldr r5, [r0, #20] - subs r10, r7, r10 - str r5, [sp, #32] @ 4-byte Spill - ldr r5, [r0, #24] - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [r0, #28] - str r5, [sp, #40] @ 4-byte Spill - mov r5, r0 - sbcs r0, r9, r2 - sbcs r2, lr, r8 - ldr r8, [r5, #32] - sbcs r7, r6, r3 - ldr r3, [r5, #36] - ldr r6, [sp, #80] @ 4-byte Reload - ldr r5, [sp, #76] @ 4-byte Reload - str r3, [sp, #84] @ 4-byte Spill - ldr r3, [sp, #28] @ 4-byte Reload - sbcs r6, r6, r3 - ldr r3, [sp, #32] @ 4-byte Reload - sbcs lr, r4, r3 - ldr r3, [sp, #36] @ 4-byte Reload - sbcs r4, r12, r3 - ldr r3, [sp, #40] @ 4-byte Reload - sbcs r12, r5, r3 - ldr r3, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #48] @ 4-byte Reload - sbcs r11, r11, r8 - ldr r8, [sp, #68] @ 4-byte Reload - sbc r3, r1, r3 - asr r1, r3, #31 - cmp r1, #0 - movlt r10, r5 - movlt r0, r9 - str r10, [r8] - str r0, [r8, #4] - ldr r0, [sp, #52] @ 4-byte Reload - movlt r2, r0 - ldr r0, [sp, #56] @ 4-byte Reload - cmp r1, #0 - str r2, [r8, #8] - movlt r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r7, [r8, #12] - movlt r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - str r6, [r8, #16] - movlt lr, r0 - ldr r0, [sp, #64] @ 4-byte Reload - cmp r1, #0 - str lr, [r8, #20] - movlt r4, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r4, [r8, #24] - movlt r12, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r12, [r8, #28] - movlt r11, r0 - ldr r0, [sp, #44] @ 4-byte Reload - cmp r1, #0 - str r11, [r8, #32] - movlt r3, r0 - str r3, [r8, #36] - add sp, sp, #28 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end147: - .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L - .cantunwind - .fnend - - .globl mcl_fp_montRed10L - .align 2 - .type mcl_fp_montRed10L,%function -mcl_fp_montRed10L: @ @mcl_fp_montRed10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #612 - sub sp, sp, #612 - mov r5, r2 - str r0, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r9, [r1] - ldr r11, [r1, #16] - ldr r0, [r5] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r5, #4] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r5, #8] - str r2, [sp, #52] @ 4-byte Spill - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r5, #12] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r5, #16] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r5, #20] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r5, #24] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r5, #-4] - str r0, [sp, #124] @ 4-byte Spill - mul r2, r9, r0 - ldr r0, [r5, #28] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r5, #32] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r5, #36] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #20] - mov r1, r5 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #560 - bl .LmulPv320x32(PLT) - add lr, sp, #584 - ldr r10, [sp, #600] - ldr r8, [sp, #596] - add r7, sp, #564 - ldm lr, {r6, r12, lr} - ldr r4, [sp, #560] - ldm r7, {r0, r1, r2, r3, r7} - adds r4, r9, r4 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r9, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - adcs r0, r11, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #512 - bl .LmulPv320x32(PLT) - add r6, sp, #512 - ldr r12, [sp, #552] - ldr lr, [sp, #548] - ldr r2, [sp, #544] - ldr r10, [sp, #540] - ldr r11, [sp, #536] - ldr r7, [sp, #532] - ldr r8, [sp, #528] - ldm r6, {r1, r3, r6} - ldr r0, [sp, #524] - adds r1, r4, r1 - ldr r4, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #60] @ 4-byte Reload - adcs r9, r9, r3 - adcs r1, r1, r6 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r9, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #464 - bl .LmulPv320x32(PLT) - ldr r1, [sp, #464] - ldr r0, [sp, #504] - add r12, sp, #468 - ldr r10, [sp, #500] - ldr r8, [sp, #496] - ldr lr, [sp, #492] - ldr r6, [sp, #488] - ldr r7, [sp, #484] - adds r1, r9, r1 - str r0, [sp, #4] @ 4-byte Spill - ldm r12, {r0, r2, r3, r12} - ldr r1, [sp, #60] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #416 - bl .LmulPv320x32(PLT) - add r7, sp, #416 - ldr r12, [sp, #456] - ldr lr, [sp, #452] - ldr r2, [sp, #448] - ldr r3, [sp, #444] - add r10, sp, #428 - ldm r7, {r1, r6, r7} - ldm r10, {r0, r8, r9, r10} - adds r1, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r11, r1, r6 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #368 - bl .LmulPv320x32(PLT) - add r10, sp, #400 - add r12, sp, #372 - ldm r10, {r8, r9, r10} - ldr r1, [sp, #368] - ldr lr, [sp, #396] - ldr r6, [sp, #392] - ldr r7, [sp, #388] - ldm r12, {r0, r2, r3, r12} - adds r1, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #320 - bl .LmulPv320x32(PLT) - add r7, sp, #320 - ldr r12, [sp, #360] - ldr lr, [sp, #356] - ldr r2, [sp, #352] - ldr r3, [sp, #348] - add r10, sp, #332 - ldm r7, {r1, r6, r7} - ldm r10, {r0, r8, r9, r10} - adds r1, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r11, r1, r6 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #272 - bl .LmulPv320x32(PLT) - add r10, sp, #304 - add r12, sp, #276 - ldm r10, {r8, r9, r10} - ldr r1, [sp, #272] - ldr lr, [sp, #300] - ldr r6, [sp, #296] - ldr r7, [sp, #292] - ldm r12, {r0, r2, r3, r12} - adds r1, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - mov r1, r5 - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #224 - bl .LmulPv320x32(PLT) - add r10, sp, #240 - add r6, sp, #224 - ldr r12, [sp, #264] - ldr lr, [sp, #260] - ldr r8, [sp, #256] - ldr r9, [sp, #252] - ldm r10, {r0, r7, r10} - ldm r6, {r1, r2, r3, r6} - adds r1, r11, r1 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r4, r1, r2 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r11, r1, r3 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #124] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - mul r2, r4, r7 - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r9, r0, r9 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #176 - bl .LmulPv320x32(PLT) - add r12, sp, #176 - ldm r12, {r0, r1, r3, r12} - ldr lr, [sp, #216] - adds r0, r4, r0 - ldr r4, [sp, #76] @ 4-byte Reload - adcs r10, r11, r1 - ldr r1, [sp, #192] - adcs r0, r6, r3 - mul r2, r10, r7 - ldr r7, [sp, #200] - ldr r6, [sp, #204] - ldr r3, [sp, #208] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r12 - ldr r12, [sp, #212] - str r0, [sp, #44] @ 4-byte Spill - adcs r8, r4, r1 - ldr r0, [sp, #196] - ldr r1, [sp, #72] @ 4-byte Reload - adcs r11, r1, r0 - ldr r0, [sp, #68] @ 4-byte Reload - mov r1, r5 - adcs r9, r9, r7 - adcs r6, r0, r6 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #128 - bl .LmulPv320x32(PLT) - add r3, sp, #128 - ldm r3, {r0, r1, r2, r3} - adds r0, r10, r0 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r1, r0, r2 - ldr r0, [sp, #144] - adcs r2, r8, r3 - ldr r3, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - str r2, [sp, #44] @ 4-byte Spill - adcs r7, r11, r0 - ldr r0, [sp, #148] - str r7, [sp, #48] @ 4-byte Spill - adcs r12, r9, r0 - ldr r0, [sp, #152] - str r12, [sp, #52] @ 4-byte Spill - adcs r4, r6, r0 - ldr r0, [sp, #156] - str r4, [sp, #56] @ 4-byte Spill - adcs r5, r3, r0 - ldr r0, [sp, #160] - ldr r3, [sp, #68] @ 4-byte Reload - str r5, [sp, #60] @ 4-byte Spill - adcs r6, r3, r0 - ldr r0, [sp, #164] - ldr r3, [sp, #64] @ 4-byte Reload - str r6, [sp, #68] @ 4-byte Spill - adcs r8, r3, r0 - ldr r0, [sp, #168] - ldr r3, [sp, #76] @ 4-byte Reload - str r8, [sp, #124] @ 4-byte Spill - adcs lr, r3, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adc r11, r0, #0 - ldr r0, [sp, #116] @ 4-byte Reload - subs r3, r10, r0 - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #92] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #96] @ 4-byte Reload - sbcs r12, r12, r7 - ldr r7, [sp, #100] @ 4-byte Reload - sbcs r7, r4, r7 - ldr r4, [sp, #104] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #80] @ 4-byte Reload - sbcs r5, r6, r5 - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r9, r8, r6 - ldr r6, [sp, #88] @ 4-byte Reload - sbcs r8, lr, r6 - sbc r6, r11, #0 - ands r11, r6, #1 - ldr r6, [sp, #120] @ 4-byte Reload - movne r3, r10 - str r3, [r6] - ldr r3, [sp, #36] @ 4-byte Reload - movne r0, r3 - str r0, [r6, #4] - ldr r0, [sp, #44] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r11, #0 - str r1, [r6, #8] - movne r2, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r2, [r6, #12] - movne r12, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r12, [r6, #16] - movne r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r11, #0 - str r7, [r6, #20] - movne r4, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r4, [r6, #24] - movne r5, r0 - ldr r0, [sp, #124] @ 4-byte Reload - str r5, [r6, #28] - movne r9, r0 - cmp r11, #0 - movne r8, lr - str r9, [r6, #32] - str r8, [r6, #36] - add sp, sp, #612 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end148: - .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L - .cantunwind - .fnend - - .globl mcl_fp_addPre10L - .align 2 - .type mcl_fp_addPre10L,%function -mcl_fp_addPre10L: @ @mcl_fp_addPre10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - ldm r1, {r3, r8, lr} - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7, r10} - ldr r4, [r2, #20] - ldr r11, [r2] - str r4, [sp] @ 4-byte Spill - ldr r4, [r2, #24] - adds r12, r11, r3 - ldr r11, [r2, #32] - adcs r5, r5, r8 - ldr r8, [r1, #36] - adcs r6, r6, lr - add lr, r1, #16 - adcs r7, r7, r9 - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r2, #28] - ldr r2, [r2, #36] - str r4, [sp, #12] @ 4-byte Spill - str r2, [sp, #8] @ 4-byte Spill - ldm lr, {r1, r2, r3, r4, lr} - str r12, [r0] - stmib r0, {r5, r6} - str r7, [r0, #12] - ldr r7, [sp] @ 4-byte Reload - adcs r1, r10, r1 - str r1, [r0, #16] - ldr r1, [sp, #4] @ 4-byte Reload - adcs r2, r7, r2 - str r2, [r0, #20] - ldr r2, [sp, #8] @ 4-byte Reload - adcs r1, r1, r3 - str r1, [r0, #24] - ldr r1, [sp, #12] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [r0, #28] - adcs r1, r11, lr - adcs r2, r2, r8 - str r1, [r0, #32] - str r2, [r0, #36] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end149: - .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L - .cantunwind - .fnend - - .globl mcl_fp_subPre10L - .align 2 - .type mcl_fp_subPre10L,%function -mcl_fp_subPre10L: @ @mcl_fp_subPre10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #24 - sub sp, sp, #24 - ldr r3, [r2, #4] - ldr r7, [r2] - ldr r11, [r1] - ldr r6, [r1, #4] - ldr r9, [r2, #8] - ldr r5, [r1, #8] - ldr lr, [r2, #12] - ldr r4, [r1, #12] - ldr r12, [r1, #16] - ldr r8, [r1, #20] - ldr r10, [r1, #24] - str r3, [sp] @ 4-byte Spill - ldr r3, [r2, #16] - subs r7, r11, r7 - ldr r11, [r2, #32] - str r7, [r0] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r2, #20] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #24] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #28] - ldr r2, [r2, #36] - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [r1, #28] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp] @ 4-byte Reload - sbcs r6, r6, r3 - sbcs r5, r5, r9 - str r6, [r0, #4] - str r5, [r0, #8] - ldr r5, [sp, #8] @ 4-byte Reload - sbcs r4, r4, lr - ldr lr, [r1, #32] - ldr r1, [r1, #36] - str r4, [r0, #12] - ldr r4, [sp, #12] @ 4-byte Reload - sbcs r3, r12, r5 - str r3, [r0, #16] - ldr r3, [sp, #16] @ 4-byte Reload - sbcs r7, r8, r4 - str r7, [r0, #20] - ldr r7, [sp, #4] @ 4-byte Reload - sbcs r3, r10, r3 - str r3, [r0, #24] - ldr r3, [sp, #20] @ 4-byte Reload - sbcs r3, r7, r3 - str r3, [r0, #28] - sbcs r3, lr, r11 - sbcs r1, r1, r2 - str r3, [r0, #32] - str r1, [r0, #36] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #24 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end150: - .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L - .cantunwind - .fnend - - .globl mcl_fp_shr1_10L - .align 2 - .type mcl_fp_shr1_10L,%function -mcl_fp_shr1_10L: @ @mcl_fp_shr1_10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr lr, [r1, #32] - ldr r12, [r1, #36] - ldr r8, [r1, #28] - ldm r1, {r1, r2, r3, r4, r5, r6, r9} - lsrs r7, r2, #1 - rrx r1, r1 - str r1, [r0] - lsr r1, r2, #1 - lsr r2, r12, #1 - orr r1, r1, r3, lsl #31 - str r1, [r0, #4] - lsrs r1, r4, #1 - rrx r1, r3 - str r1, [r0, #8] - lsr r1, r4, #1 - orr r1, r1, r5, lsl #31 - str r1, [r0, #12] - lsrs r1, r6, #1 - rrx r1, r5 - str r1, [r0, #16] - lsr r1, r6, #1 - orr r1, r1, r9, lsl #31 - str r1, [r0, #20] - lsrs r1, r8, #1 - rrx r1, r9 - str r1, [r0, #24] - lsr r1, r8, #1 - orr r1, r1, lr, lsl #31 - str r1, [r0, #28] - lsrs r1, r12, #1 - rrx r1, lr - str r1, [r0, #32] - str r2, [r0, #36] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end151: - .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L - .cantunwind - .fnend - - .globl mcl_fp_add10L - .align 2 - .type mcl_fp_add10L,%function -mcl_fp_add10L: @ @mcl_fp_add10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldm r1, {r12, lr} - ldr r5, [r2] - ldr r10, [r1, #8] - ldr r8, [r1, #12] - ldmib r2, {r4, r6, r7} - adds r9, r5, r12 - ldr r5, [r1, #24] - adcs lr, r4, lr - ldr r4, [r1, #20] - adcs r6, r6, r10 - ldr r10, [r1, #36] - str lr, [sp] @ 4-byte Spill - str r6, [sp, #12] @ 4-byte Spill - adcs r12, r7, r8 - ldr r6, [r1, #16] - ldr r7, [r2, #16] - adcs r6, r7, r6 - ldr r7, [r2, #20] - str r6, [sp, #4] @ 4-byte Spill - adcs r8, r7, r4 - ldr r4, [r2, #24] - adcs r6, r4, r5 - ldr r4, [r1, #28] - ldr r5, [r2, #28] - str r6, [sp, #8] @ 4-byte Spill - adcs r7, r5, r4 - ldr r5, [r1, #32] - ldr r1, [r2, #32] - ldr r2, [r2, #36] - stm r0, {r9, lr} - mov lr, r12 - ldr r4, [sp, #4] @ 4-byte Reload - adcs r11, r1, r5 - add r1, r0, #24 - adcs r10, r2, r10 - ldr r2, [sp, #12] @ 4-byte Reload - str r2, [r0, #8] - str lr, [r0, #12] - str r4, [r0, #16] - str r8, [r0, #20] - stm r1, {r6, r7, r11} - mov r1, #0 - str r10, [r0, #36] - adc r1, r1, #0 - str r1, [sp, #16] @ 4-byte Spill - ldm r3, {r1, r6, r12} - ldr r5, [r3, #12] - subs r9, r9, r1 - ldr r1, [sp] @ 4-byte Reload - sbcs r6, r1, r6 - sbcs r1, r2, r12 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r12, lr, r5 - sbcs lr, r4, r1 - ldr r1, [r3, #20] - ldr r4, [sp, #16] @ 4-byte Reload - sbcs r8, r8, r1 - ldr r1, [r3, #24] - sbcs r5, r2, r1 - ldr r2, [r3, #28] - sbcs r1, r7, r2 - ldr r2, [r3, #32] - ldr r7, [r3, #36] - sbcs r3, r11, r2 - sbcs r2, r10, r7 - sbc r4, r4, #0 - tst r4, #1 - bne .LBB152_2 -@ BB#1: @ %nocarry - ldr r4, [sp, #12] @ 4-byte Reload - str r9, [r0] - str r6, [r0, #4] - str r4, [r0, #8] - str r12, [r0, #12] - str lr, [r0, #16] - str r8, [r0, #20] - str r5, [r0, #24] - str r1, [r0, #28] - str r3, [r0, #32] - str r2, [r0, #36] -.LBB152_2: @ %carry - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end152: - .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L - .cantunwind - .fnend - - .globl mcl_fp_addNF10L - .align 2 - .type mcl_fp_addNF10L,%function -mcl_fp_addNF10L: @ @mcl_fp_addNF10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r5, [r2] - ldr r12, [r1, #12] - ldmib r2, {r4, r6, r7} - ldr r10, [r1, #24] - adds r9, r5, r9 - ldr r5, [r1, #16] - adcs r11, r4, r8 - ldr r8, [r1, #20] - str r9, [sp, #16] @ 4-byte Spill - adcs r6, r6, lr - str r11, [sp, #20] @ 4-byte Spill - str r6, [sp, #32] @ 4-byte Spill - adcs r6, r7, r12 - ldr r7, [r2, #16] - str r6, [sp, #24] @ 4-byte Spill - adcs r4, r7, r5 - ldr r7, [r2, #20] - ldr r5, [r2, #28] - str r4, [sp, #28] @ 4-byte Spill - adcs r7, r7, r8 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r7, r7, r10 - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r5, r7 - ldr r5, [r1, #32] - ldr r1, [r1, #36] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #32] - ldr r2, [r2, #36] - adcs lr, r7, r5 - adc r1, r2, r1 - str lr, [sp, #36] @ 4-byte Spill - str r1, [sp, #40] @ 4-byte Spill - ldmib r3, {r1, r2, r12} - ldr r7, [r3, #20] - ldr r8, [r3] - ldr r10, [sp, #32] @ 4-byte Reload - ldr r5, [r3, #16] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r3, #24] - subs r8, r9, r8 - sbcs r1, r11, r1 - ldr r11, [r3, #32] - sbcs r2, r10, r2 - sbcs r12, r6, r12 - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r3, #28] - ldr r3, [r3, #36] - sbcs r6, r4, r5 - ldr r4, [sp, #4] @ 4-byte Reload - ldr r5, [sp, #8] @ 4-byte Reload - str r3, [sp] @ 4-byte Spill - ldr r3, [sp, #52] @ 4-byte Reload - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #12] @ 4-byte Reload - sbcs r3, r3, r4 - ldr r4, [sp, #48] @ 4-byte Reload - sbcs r4, r4, r5 - ldr r5, [sp, #44] @ 4-byte Reload - sbcs r9, r5, r7 - ldr r7, [sp, #40] @ 4-byte Reload - ldr r5, [sp] @ 4-byte Reload - sbcs r11, lr, r11 - sbc lr, r7, r5 - ldr r5, [sp, #16] @ 4-byte Reload - asr r7, lr, #31 - cmp r7, #0 - movlt r2, r10 - movlt r8, r5 - ldr r5, [sp, #20] @ 4-byte Reload - str r8, [r0] - movlt r1, r5 - cmp r7, #0 - str r1, [r0, #4] - ldr r1, [sp, #24] @ 4-byte Reload - str r2, [r0, #8] - movlt r12, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r12, [r0, #12] - movlt r6, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r6, [r0, #16] - movlt r3, r1 - ldr r1, [sp, #48] @ 4-byte Reload - cmp r7, #0 - str r3, [r0, #20] - movlt r4, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r4, [r0, #24] - movlt r9, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r9, [r0, #28] - movlt r11, r1 - ldr r1, [sp, #40] @ 4-byte Reload - cmp r7, #0 - str r11, [r0, #32] - movlt lr, r1 - str lr, [r0, #36] - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end153: - .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L - .cantunwind - .fnend - - .globl mcl_fp_sub10L - .align 2 - .type mcl_fp_sub10L,%function -mcl_fp_sub10L: @ @mcl_fp_sub10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #36 - sub sp, sp, #36 - ldm r2, {r12, lr} - ldr r8, [r2, #8] - ldr r10, [r2, #12] - ldm r1, {r4, r5, r6, r7} - subs r4, r4, r12 - ldr r12, [r1, #36] - sbcs r9, r5, lr - ldr r5, [r2, #20] - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #24] - sbcs lr, r6, r8 - ldr r6, [r2, #16] - sbcs r8, r7, r10 - ldr r7, [r1, #16] - sbcs r10, r7, r6 - ldr r6, [r1, #20] - sbcs r7, r6, r5 - ldr r5, [r1, #24] - ldr r6, [r1, #32] - str r7, [sp, #28] @ 4-byte Spill - sbcs r11, r5, r4 - ldr r4, [r2, #28] - ldr r5, [r1, #28] - sbcs r5, r5, r4 - ldr r4, [r2, #32] - ldr r2, [r2, #36] - sbcs r1, r6, r4 - mov r6, #0 - sbcs r2, r12, r2 - ldr r12, [sp, #32] @ 4-byte Reload - sbc r6, r6, #0 - tst r6, #1 - str r12, [r0] - stmib r0, {r9, lr} - str r8, [r0, #12] - str r10, [r0, #16] - str r7, [r0, #20] - mov r7, r11 - str r7, [r0, #24] - str r5, [r0, #28] - str r1, [r0, #32] - str r2, [r0, #36] - beq .LBB154_2 -@ BB#1: @ %carry - ldr r4, [r3, #32] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r3, #36] - str r4, [sp, #24] @ 4-byte Spill - ldmib r3, {r4, r11} - ldr r6, [r3, #12] - str r6, [sp] @ 4-byte Spill - ldr r6, [r3, #16] - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [r3, #20] - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [r3, #24] - str r6, [sp, #12] @ 4-byte Spill - ldr r6, [r3, #28] - ldr r3, [r3] - adds r3, r3, r12 - str r6, [sp, #16] @ 4-byte Spill - adcs r4, r4, r9 - stm r0, {r3, r4} - adcs r3, r11, lr - str r3, [r0, #8] - ldr r3, [sp] @ 4-byte Reload - ldr r6, [sp, #8] @ 4-byte Reload - adcs r3, r3, r8 - str r3, [r0, #12] - ldr r3, [sp, #4] @ 4-byte Reload - adcs r3, r3, r10 - str r3, [r0, #16] - ldr r3, [sp, #28] @ 4-byte Reload - adcs r3, r6, r3 - str r3, [r0, #20] - ldr r3, [sp, #12] @ 4-byte Reload - adcs r3, r3, r7 - str r3, [r0, #24] - ldr r3, [sp, #16] @ 4-byte Reload - adcs r3, r3, r5 - str r3, [r0, #28] - ldr r3, [sp, #20] @ 4-byte Reload - adcs r1, r3, r1 - ldr r3, [sp, #24] @ 4-byte Reload - str r1, [r0, #32] - adc r2, r3, r2 - str r2, [r0, #36] -.LBB154_2: @ %nocarry - add sp, sp, #36 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end154: - .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L - .cantunwind - .fnend - - .globl mcl_fp_subNF10L - .align 2 - .type mcl_fp_subNF10L,%function -mcl_fp_subNF10L: @ @mcl_fp_subNF10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - mov r12, r0 - ldr r0, [r2, #32] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #44] @ 4-byte Spill - ldm r2, {r4, r5} - ldr r0, [r2, #8] - ldr r7, [r2, #16] - ldr r8, [r2, #20] - ldr lr, [r1, #12] - ldr r6, [r1, #16] - ldr r11, [r1, #20] - ldr r9, [r1, #24] - ldr r10, [r1, #28] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r2, #12] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r2, #24] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r2, #28] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #8] - ldm r1, {r1, r2} - subs r1, r1, r4 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r2, r2, r5 - str r2, [sp, #16] @ 4-byte Spill - sbcs r4, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - str r4, [sp, #20] @ 4-byte Spill - sbcs r5, lr, r0 - ldr r0, [sp, #48] @ 4-byte Reload - sbcs r7, r6, r7 - ldr r6, [sp, #44] @ 4-byte Reload - str r5, [sp, #28] @ 4-byte Spill - sbcs lr, r11, r8 - str r7, [sp, #32] @ 4-byte Spill - str lr, [sp, #36] @ 4-byte Spill - sbcs r8, r9, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r8, [sp, #48] @ 4-byte Spill - sbcs r9, r10, r0 - ldr r0, [sp, #60] @ 4-byte Reload - str r9, [sp, #56] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - sbc r1, r6, r1 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #44] @ 4-byte Spill - ldmib r3, {r1, r6} - ldr r11, [r3, #24] - ldr r10, [sp, #24] @ 4-byte Reload - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [r3, #12] - str r6, [sp] @ 4-byte Spill - ldr r6, [r3, #16] - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [r3, #20] - str r6, [sp, #12] @ 4-byte Spill - ldr r6, [r3, #28] - ldr r3, [r3] - adds r3, r10, r3 - adcs r1, r2, r1 - ldr r2, [sp, #4] @ 4-byte Reload - adcs r2, r4, r2 - ldr r4, [sp] @ 4-byte Reload - adcs r4, r5, r4 - ldr r5, [sp, #8] @ 4-byte Reload - adcs r5, r7, r5 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r7, lr, r7 - adcs r11, r8, r11 - adcs r8, r9, r6 - ldr r6, [sp, #40] @ 4-byte Reload - adcs r9, r0, r6 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r6, [sp, #44] @ 4-byte Reload - asr lr, r0, #31 - adc r6, r0, r6 - cmp lr, #0 - movge r3, r10 - str r3, [r12] - ldr r3, [sp, #16] @ 4-byte Reload - movge r1, r3 - str r1, [r12, #4] - ldr r1, [sp, #20] @ 4-byte Reload - movge r2, r1 - ldr r1, [sp, #28] @ 4-byte Reload - cmp lr, #0 - str r2, [r12, #8] - movge r4, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r4, [r12, #12] - movge r5, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r5, [r12, #16] - movge r7, r1 - ldr r1, [sp, #48] @ 4-byte Reload - cmp lr, #0 - str r7, [r12, #20] - movge r11, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r11, [r12, #24] - movge r8, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r8, [r12, #28] - movge r9, r1 - cmp lr, #0 - movge r6, r0 - str r9, [r12, #32] - str r6, [r12, #36] - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end155: - .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L - .cantunwind - .fnend - - .globl mcl_fpDbl_add10L - .align 2 - .type mcl_fpDbl_add10L,%function -mcl_fpDbl_add10L: @ @mcl_fpDbl_add10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #104 - sub sp, sp, #104 - ldm r1, {r7, r9} - ldr r8, [r1, #8] - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r10} - add lr, r1, #16 - adds r7, r4, r7 - ldr r4, [r2, #16] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #92] @ 4-byte Spill - adcs r7, r5, r9 - str r7, [sp, #28] @ 4-byte Spill - adcs r7, r6, r8 - ldr r8, [r2, #20] - str r7, [sp, #24] @ 4-byte Spill - adcs r7, r10, r12 - add r10, r1, #32 - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r1, #64] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #68] - str r7, [sp] @ 4-byte Spill - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #52] @ 4-byte Spill - ldm r10, {r7, r9, r10} - ldr r2, [r1, #48] - ldr r5, [r1, #44] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #16] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #36] @ 4-byte Reload - ldr r6, [sp, #28] @ 4-byte Reload - adcs r1, r4, r1 - str r11, [r0] - str r6, [r0, #4] - ldr r6, [sp, #24] @ 4-byte Reload - ldr r4, [sp, #32] @ 4-byte Reload - adcs r2, r8, r2 - str r6, [r0, #8] - str r4, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - adcs r1, r1, r12 - str r1, [r0, #24] - ldr r1, [sp, #60] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - ldr r2, [sp, #64] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [r0, #32] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r2, r2, r9 - str r2, [r0, #36] - ldr r2, [sp, #4] @ 4-byte Reload - adcs lr, r1, r10 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r4, r1, r5 - ldr r1, [sp, #72] @ 4-byte Reload - str r4, [sp, #68] @ 4-byte Spill - adcs r12, r1, r2 - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r12, [sp, #72] @ 4-byte Spill - adcs r5, r1, r2 - ldr r1, [sp, #80] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str r5, [sp, #76] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #84] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r7, [sp, #80] @ 4-byte Spill - adcs r9, r1, r2 - ldr r1, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r9, [sp, #84] @ 4-byte Spill - adcs r10, r1, r2 - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r10, [sp, #64] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #92] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #88] @ 4-byte Spill - ldmib r3, {r1, r2, r8} - ldr r6, [r3, #16] - ldr r11, [r3] - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [r3, #20] - subs r11, lr, r11 - sbcs r1, r4, r1 - sbcs r2, r12, r2 - sbcs r12, r5, r8 - ldr r8, [r3, #32] - ldr r5, [r3, #36] - str r6, [sp, #52] @ 4-byte Spill - ldr r6, [r3, #24] - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [r3, #28] - ldr r3, [sp, #48] @ 4-byte Reload - str r6, [sp, #60] @ 4-byte Spill - sbcs r6, r7, r3 - ldr r3, [sp, #52] @ 4-byte Reload - ldr r4, [sp, #60] @ 4-byte Reload - sbcs r7, r9, r3 - ldr r3, [sp, #56] @ 4-byte Reload - sbcs r9, r10, r3 - ldr r3, [sp, #100] @ 4-byte Reload - sbcs r10, r3, r4 - ldr r3, [sp, #96] @ 4-byte Reload - ldr r4, [sp, #68] @ 4-byte Reload - sbcs r8, r3, r8 - ldr r3, [sp, #92] @ 4-byte Reload - sbcs r5, r3, r5 - ldr r3, [sp, #88] @ 4-byte Reload - sbc r3, r3, #0 - ands r3, r3, #1 - movne r11, lr - movne r1, r4 - str r11, [r0, #40] - str r1, [r0, #44] - ldr r1, [sp, #72] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #76] @ 4-byte Reload - cmp r3, #0 - str r2, [r0, #48] - movne r12, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r12, [r0, #52] - movne r6, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r6, [r0, #56] - movne r7, r1 - ldr r1, [sp, #64] @ 4-byte Reload - cmp r3, #0 - str r7, [r0, #60] - movne r9, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r9, [r0, #64] - movne r10, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r10, [r0, #68] - movne r8, r1 - ldr r1, [sp, #92] @ 4-byte Reload - cmp r3, #0 - str r8, [r0, #72] - movne r5, r1 - str r5, [r0, #76] - add sp, sp, #104 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end156: - .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub10L - .align 2 - .type mcl_fpDbl_sub10L,%function -mcl_fpDbl_sub10L: @ @mcl_fpDbl_sub10L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #96 - sub sp, sp, #96 - ldr r7, [r2, #64] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #52] @ 4-byte Spill - ldm r2, {r6, r7, r8, r9} - ldm r1, {r12, lr} - ldr r4, [r1, #8] - ldr r10, [r2, #20] - ldr r5, [r1, #12] - subs r11, r12, r6 - ldr r6, [r2, #28] - sbcs r7, lr, r7 - add lr, r1, #16 - sbcs r8, r4, r8 - ldr r4, [r2, #16] - sbcs r5, r5, r9 - ldr r9, [r1, #32] - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [r2, #24] - ldr r2, [r1, #64] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #68] - str r6, [sp, #24] @ 4-byte Spill - ldr r6, [r1, #44] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #20] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - str r11, [r0] - stmib r0, {r7, r8} - str r5, [r0, #12] - ldr r7, [sp] @ 4-byte Reload - ldr r8, [r3, #20] - sbcs r1, r1, r4 - str r1, [r0, #16] - sbcs r2, r2, r10 - ldr r1, [sp, #24] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [r0, #24] - sbcs r2, lr, r2 - ldr r1, [sp, #48] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #56] @ 4-byte Reload - sbcs r1, r9, r1 - sbcs r2, r7, r2 - str r1, [r0, #32] - ldr r1, [sp, #60] @ 4-byte Reload - str r2, [r0, #36] - ldr r2, [sp, #4] @ 4-byte Reload - sbcs r12, r2, r1 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r12, [sp, #48] @ 4-byte Spill - sbcs r4, r6, r1 - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r11, r2, r1 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str r11, [sp, #52] @ 4-byte Spill - sbcs r6, r2, r1 - ldr r1, [sp, #72] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r6, [sp, #64] @ 4-byte Spill - sbcs r7, r2, r1 - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - str r7, [sp, #68] @ 4-byte Spill - sbcs r9, r2, r1 - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r9, [sp, #76] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r10, r2, r1 - ldr r1, [sp, #84] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r10, [sp, #80] @ 4-byte Spill - sbcs lr, r2, r1 - mov r1, #0 - ldr r2, [r3, #4] - sbc r1, r1, #0 - str lr, [sp, #84] @ 4-byte Spill - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [r3, #8] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #12] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #16] - ldr r5, [sp, #28] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [r3, #24] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #28] - ldr r3, [r3] - str r1, [sp, #44] @ 4-byte Spill - adds r1, r12, r3 - ldr r3, [sp, #32] @ 4-byte Reload - adcs r2, r4, r2 - adcs r3, r11, r3 - adcs r12, r6, r5 - ldr r6, [sp, #36] @ 4-byte Reload - ldr r5, [sp, #92] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #40] @ 4-byte Reload - adcs r8, r9, r8 - adcs r9, r5, r7 - ldr r5, [sp, #44] @ 4-byte Reload - ldr r7, [sp, #88] @ 4-byte Reload - adcs r7, r7, r5 - ldr r5, [sp, #56] @ 4-byte Reload - adcs r11, r10, r5 - ldr r5, [sp, #60] @ 4-byte Reload - adc r10, lr, r5 - ldr r5, [sp, #72] @ 4-byte Reload - ands lr, r5, #1 - ldr r5, [sp, #48] @ 4-byte Reload - moveq r2, r4 - moveq r1, r5 - str r1, [r0, #40] - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [r0, #44] - moveq r3, r1 - ldr r1, [sp, #64] @ 4-byte Reload - cmp lr, #0 - str r3, [r0, #48] - moveq r12, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r12, [r0, #52] - moveq r6, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r6, [r0, #56] - moveq r8, r1 - ldr r1, [sp, #92] @ 4-byte Reload - cmp lr, #0 - str r8, [r0, #60] - moveq r9, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r9, [r0, #64] - moveq r7, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r7, [r0, #68] - moveq r11, r1 - ldr r1, [sp, #84] @ 4-byte Reload - cmp lr, #0 - str r11, [r0, #72] - moveq r10, r1 - str r10, [r0, #76] - add sp, sp, #96 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end157: - .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L - .cantunwind - .fnend - - .align 2 - .type .LmulPv352x32,%function -.LmulPv352x32: @ @mulPv352x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r1, [r1, #40] - umull r3, r7, r1, r2 - adcs r1, r5, r3 - str r1, [r0, #40] - adc r1, r7, #0 - str r1, [r0, #44] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end158: - .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre11L - .align 2 - .type mcl_fp_mulUnitPre11L,%function -mcl_fp_mulUnitPre11L: @ @mcl_fp_mulUnitPre11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, lr} - push {r4, r5, r6, r7, r8, r9, r10, lr} - .pad #48 - sub sp, sp, #48 - mov r4, r0 - mov r0, sp - bl .LmulPv352x32(PLT) - ldr r12, [sp, #44] - ldr lr, [sp, #40] - ldr r8, [sp, #36] - ldr r9, [sp, #32] - ldr r10, [sp, #28] - ldr r1, [sp, #24] - ldr r5, [sp, #20] - ldr r6, [sp, #16] - ldr r7, [sp] - ldmib sp, {r2, r3} - ldr r0, [sp, #12] - str r7, [r4] - stmib r4, {r2, r3} - str r0, [r4, #12] - str r6, [r4, #16] - str r5, [r4, #20] - str r1, [r4, #24] - str r10, [r4, #28] - str r9, [r4, #32] - str r8, [r4, #36] - str lr, [r4, #40] - str r12, [r4, #44] - add sp, sp, #48 - pop {r4, r5, r6, r7, r8, r9, r10, lr} - mov pc, lr -.Lfunc_end159: - .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre11L - .align 2 - .type mcl_fpDbl_mulPre11L,%function -mcl_fpDbl_mulPre11L: @ @mcl_fpDbl_mulPre11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #604 - sub sp, sp, #604 - mov r3, r2 - mov r4, r0 - add r0, sp, #552 - str r1, [sp, #68] @ 4-byte Spill - mov r5, r1 - ldr r2, [r3] - str r3, [sp, #64] @ 4-byte Spill - str r4, [sp, #60] @ 4-byte Spill - mov r6, r3 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #596] - ldr r1, [sp, #560] - ldr r2, [r6, #4] - ldr r11, [sp, #556] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #592] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #564] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #588] - str r1, [sp, #20] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #576] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [r4] - add r0, sp, #504 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #548] - add r10, sp, #532 - add r12, sp, #508 - mov r6, r4 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r1, [sp, #504] - ldr lr, [sp, #528] - ldr r7, [sp, #524] - ldm r12, {r0, r2, r3, r12} - adds r1, r1, r11 - str r1, [r4, #4] - ldr r1, [sp, #28] @ 4-byte Reload - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #64] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - ldr r2, [r5, #8] - adcs r0, r8, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #456 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #500] - add r10, sp, #484 - add r12, sp, #460 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr lr, [sp, #480] - ldr r7, [sp, #476] - ldr r1, [sp, #456] - ldm r12, {r0, r2, r3, r12} - ldr r11, [sp, #16] @ 4-byte Reload - adds r1, r1, r11 - str r1, [r6, #8] - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #12] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #408 - bl .LmulPv352x32(PLT) - add r10, sp, #444 - add lr, sp, #432 - add r12, sp, #412 - ldm r10, {r8, r9, r10} - ldm lr, {r6, r11, lr} - ldr r7, [sp, #428] - ldr r1, [sp, #408] - ldm r12, {r0, r2, r3, r12} - ldr r4, [sp, #16] @ 4-byte Reload - adds r1, r1, r4 - ldr r4, [sp, #60] @ 4-byte Reload - str r1, [r4, #12] - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - ldr r5, [sp, #68] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - mov r1, r5 - adcs r0, r3, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #360 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #404] - add r10, sp, #392 - add r12, sp, #364 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr lr, [sp, #388] - ldr r6, [sp, #384] - ldr r7, [sp, #380] - ldr r1, [sp, #360] - ldm r12, {r0, r2, r3, r12} - ldr r11, [sp, #16] @ 4-byte Reload - adds r1, r1, r11 - str r1, [r4, #16] - ldr r1, [sp, #32] @ 4-byte Reload - ldr r4, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #20] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #312 - bl .LmulPv352x32(PLT) - add r11, sp, #344 - add r12, sp, #316 - ldm r11, {r8, r9, r10, r11} - ldr lr, [sp, #340] - ldr r6, [sp, #336] - ldr r7, [sp, #332] - ldr r1, [sp, #312] - ldm r12, {r0, r2, r3, r12} - ldr r5, [sp, #16] @ 4-byte Reload - adds r1, r1, r5 - ldr r5, [sp, #60] @ 4-byte Reload - str r1, [r5, #20] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #24] - ldr r4, [sp, #68] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - mov r1, r4 - adcs r0, r3, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #24] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #264 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #308] - add r10, sp, #296 - add r12, sp, #268 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr lr, [sp, #292] - ldr r6, [sp, #288] - ldr r7, [sp, #284] - ldr r1, [sp, #264] - ldm r12, {r0, r2, r3, r12} - ldr r11, [sp, #16] @ 4-byte Reload - adds r1, r1, r11 - str r1, [r5, #24] - ldr r1, [sp, #40] @ 4-byte Reload - ldr r5, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #28] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #216 - bl .LmulPv352x32(PLT) - add r10, sp, #252 - add lr, sp, #240 - add r12, sp, #220 - ldm r10, {r8, r9, r10} - ldm lr, {r6, r11, lr} - ldr r7, [sp, #236] - ldr r1, [sp, #216] - ldm r12, {r0, r2, r3, r12} - ldr r4, [sp, #16] @ 4-byte Reload - adds r1, r1, r4 - ldr r4, [sp, #60] @ 4-byte Reload - str r1, [r4, #28] - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - ldr r5, [sp, #68] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - mov r1, r5 - adcs r0, r3, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #20] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #168 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #212] - add r10, sp, #200 - add r12, sp, #172 - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr lr, [sp, #196] - ldr r6, [sp, #192] - ldr r7, [sp, #188] - ldr r1, [sp, #168] - ldm r12, {r0, r2, r3, r12} - ldr r11, [sp, #12] @ 4-byte Reload - adds r1, r1, r11 - ldr r11, [sp, #64] @ 4-byte Reload - str r1, [r4, #32] - ldr r1, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r11, #36] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #120 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #164] - add lr, sp, #152 - add r10, sp, #140 - add r8, sp, #128 - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r9, r12, lr} - ldm r10, {r0, r6, r10} - ldr r2, [sp, #120] - ldr r3, [sp, #124] - ldm r8, {r1, r7, r8} - ldr r5, [sp, #12] @ 4-byte Reload - adds r2, r2, r5 - ldr r5, [sp, #56] @ 4-byte Reload - str r2, [r4, #36] - ldr r2, [r11, #40] - adcs r11, r3, r5 - ldr r3, [sp, #52] @ 4-byte Reload - adcs r5, r1, r3 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r7, r7, r1 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r8, r8, r1 - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r10, r10, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #72 - bl .LmulPv352x32(PLT) - add r3, sp, #72 - ldm r3, {r0, r1, r2, r3} - ldr r9, [sp, #116] - ldr r6, [sp, #112] - adds r12, r0, r11 - add r11, sp, #88 - adcs lr, r1, r5 - adcs r2, r2, r7 - adcs r3, r3, r8 - ldr r8, [sp, #108] - ldm r11, {r0, r1, r5, r7, r11} - str r12, [r4, #40] - str lr, [r4, #44] - str r2, [r4, #48] - ldr r2, [sp, #40] @ 4-byte Reload - add r12, r4, #72 - str r3, [r4, #52] - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [r4, #56] - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - adcs r0, r5, r10 - str r1, [r4, #60] - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [r4, #64] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [r4, #68] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r11, r0 - adcs r1, r8, r1 - adcs r2, r6, r2 - adc r3, r9, #0 - stm r12, {r0, r1, r2, r3} - add sp, sp, #604 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end160: - .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre11L - .align 2 - .type mcl_fpDbl_sqrPre11L,%function -mcl_fpDbl_sqrPre11L: @ @mcl_fpDbl_sqrPre11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #596 - sub sp, sp, #596 - mov r5, r1 - mov r4, r0 - add r0, sp, #544 - ldr r2, [r5] - bl .LmulPv352x32(PLT) - ldr r0, [sp, #588] - ldr r1, [sp, #548] - ldr r2, [r5, #4] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #584] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #552] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #580] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #556] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #576] - str r1, [sp, #24] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [r4] - add r0, sp, #496 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #540] - add r10, sp, #520 - add lr, sp, #496 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #8] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #448 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #492] - add r10, sp, #476 - add lr, sp, #448 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #472] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #8] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #12] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #400 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #444] - add r10, sp, #428 - add lr, sp, #400 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #424] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #12] - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #352 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #396] - add r10, sp, #380 - add lr, sp, #352 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #376] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #16] - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #20] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #304 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #348] - add r10, sp, #332 - add lr, sp, #304 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #328] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #20] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #24] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #256 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #300] - add r10, sp, #284 - add lr, sp, #256 - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #280] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #24] - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #28] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #208 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #252] - add r10, sp, #236 - add lr, sp, #208 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #232] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #28] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #160 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #204] - add r10, sp, #188 - add lr, sp, #160 - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r7, [sp, #184] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #16] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #32] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #36] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #112 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #156] - add lr, sp, #140 - add r12, sp, #124 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #152] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r8, r11, lr} - ldr r9, [sp, #136] - ldr r2, [sp, #112] - ldr r7, [sp, #116] - ldr r6, [sp, #120] - ldm r12, {r0, r3, r12} - ldr r1, [sp, #16] @ 4-byte Reload - adds r2, r2, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r2, [r4, #36] - ldr r2, [r5, #40] - adcs r7, r7, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r6, r6, r1 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r11, r11, r0 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #64 - bl .LmulPv352x32(PLT) - add r3, sp, #64 - ldm r3, {r0, r1, r2, r3} - ldr r9, [sp, #108] - ldr r8, [sp, #104] - adds r12, r0, r7 - ldr r0, [sp, #16] @ 4-byte Reload - adcs lr, r1, r6 - adcs r2, r2, r10 - add r10, sp, #80 - adcs r3, r3, r0 - ldm r10, {r0, r1, r5, r6, r7, r10} - str r12, [r4, #40] - str lr, [r4, #44] - str r2, [r4, #48] - ldr r2, [sp, #20] @ 4-byte Reload - add r12, r4, #72 - str r3, [r4, #52] - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - adcs r0, r5, r0 - str r1, [r4, #60] - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [r4, #64] - adcs r0, r6, r11 - str r0, [r4, #68] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - adcs r1, r10, r1 - adcs r2, r8, r2 - adc r3, r9, #0 - stm r12, {r0, r1, r2, r3} - add sp, sp, #596 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end161: - .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L - .cantunwind - .fnend - - .globl mcl_fp_mont11L - .align 2 - .type mcl_fp_mont11L,%function -mcl_fp_mont11L: @ @mcl_fp_mont11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #132 - sub sp, sp, #132 - .pad #1024 - sub sp, sp, #1024 - mov r7, r2 - ldr r5, [r3, #-4] - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #1104 - str r3, [sp, #92] @ 4-byte Spill - str r1, [sp, #84] @ 4-byte Spill - mov r4, r3 - mov r6, r1 - ldr r2, [r7] - str r7, [sp, #80] @ 4-byte Spill - str r5, [sp, #88] @ 4-byte Spill - bl .LmulPv352x32(PLT) - ldr r0, [sp, #1108] - ldr r8, [sp, #1104] - mov r1, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1112] - mul r2, r8, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1124] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1120] - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #1056 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #1100] - ldr r2, [r7, #4] - ldr r11, [sp, #1072] - ldr r5, [sp, #1056] - ldr r4, [sp, #1060] - ldr r10, [sp, #1064] - ldr r9, [sp, #1068] - mov r1, r6 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1096] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1092] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1088] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1084] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1080] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #4] @ 4-byte Spill - add r0, sp, #1008 - bl .LmulPv352x32(PLT) - adds r0, r5, r8 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - add lr, sp, #1008 - ldr r7, [sp, #1044] - ldr r6, [sp, #1040] - ldr r5, [sp, #1036] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r8, r4, r0 - mov r0, #0 - ldr r4, [sp, #1032] - adcs r1, r10, r1 - ldr r10, [sp, #1052] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r11, r1 - ldr r11, [sp, #1048] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - str r1, [sp, #28] @ 4-byte Spill - adc r9, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r8, r8, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r9, r10 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #960 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #1004] - ldr r1, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #984] - ldr r6, [sp, #980] - ldr r9, [sp, #976] - ldr r10, [sp, #960] - ldr r11, [sp, #964] - ldr r7, [sp, #968] - ldr r4, [sp, #972] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #996] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #912 - bl .LmulPv352x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #916 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #940 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r5, r6, r7, r8, r11} - ldr r4, [sp, #912] - adc r10, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r9, [sp, #76] @ 4-byte Reload - adds r9, r9, r4 - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r10, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #864 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #908] - add r11, sp, #864 - ldr r7, [sp, #888] - ldr r5, [sp, #884] - ldr r8, [sp, #880] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #876] - ldr r2, [r0, #12] - add r0, sp, #816 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #816 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #840 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r4, r5, r6, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #76] @ 4-byte Reload - adds r8, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #768 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #812] - ldr r1, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #792] - ldr r6, [sp, #788] - ldr r9, [sp, #784] - ldr r10, [sp, #768] - ldr r11, [sp, #772] - ldr r7, [sp, #776] - ldr r4, [sp, #780] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #720 - bl .LmulPv352x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #724 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #748 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r5, r6, r7, r8, r11} - ldr r4, [sp, #720] - adc r10, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r9, [sp, #76] @ 4-byte Reload - adds r9, r9, r4 - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r10, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #672 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #716] - add r11, sp, #672 - ldr r7, [sp, #696] - ldr r5, [sp, #692] - ldr r8, [sp, #688] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #684] - ldr r2, [r0, #20] - add r0, sp, #624 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #624 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #648 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r4, r5, r6, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #76] @ 4-byte Reload - adds r8, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #576 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #620] - ldr r1, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #600] - ldr r6, [sp, #596] - ldr r9, [sp, #592] - ldr r10, [sp, #576] - ldr r11, [sp, #580] - ldr r7, [sp, #584] - ldr r4, [sp, #588] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #616] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #612] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #528 - bl .LmulPv352x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #532 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #556 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r5, r6, r7, r8, r11} - ldr r4, [sp, #528] - adc r10, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r9, [sp, #76] @ 4-byte Reload - adds r9, r9, r4 - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r10, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #480 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #524] - add r11, sp, #480 - ldr r7, [sp, #504] - ldr r5, [sp, #500] - ldr r8, [sp, #496] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #520] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #516] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #492] - ldr r2, [r0, #28] - add r0, sp, #432 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #432 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #456 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r4, r5, r6, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #76] @ 4-byte Reload - adds r8, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #384 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #428] - ldr r1, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #408] - ldr r6, [sp, #404] - ldr r9, [sp, #400] - ldr r10, [sp, #384] - ldr r11, [sp, #388] - ldr r7, [sp, #392] - ldr r4, [sp, #396] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #336 - bl .LmulPv352x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #340 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #364 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - ldm r11, {r5, r6, r7, r8, r11} - ldr r4, [sp, #336] - adc r10, r0, #0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r9, [sp, #76] @ 4-byte Reload - adds r9, r9, r4 - ldr r4, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adcs r0, r10, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #288 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #332] - add r11, sp, #288 - ldr r7, [sp, #312] - ldr r5, [sp, #308] - ldr r8, [sp, #304] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #328] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #324] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #320] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #316] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #300] - ldr r2, [r0, #36] - add r0, sp, #240 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #240 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #264 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r4, r5, r6, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #76] @ 4-byte Reload - adds r8, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #192 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #236] - ldr r1, [sp, #84] @ 4-byte Reload - ldr r5, [sp, #216] - ldr r6, [sp, #212] - ldr r9, [sp, #208] - ldr r10, [sp, #192] - ldr r11, [sp, #196] - ldr r7, [sp, #200] - ldr r4, [sp, #204] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #232] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #228] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #224] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #144 - bl .LmulPv352x32(PLT) - adds r0, r8, r10 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #144 - add r12, sp, #160 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - adcs r10, r1, r7 - ldr r1, [sp, #64] @ 4-byte Reload - adcs r11, r1, r4 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r9 - add r9, sp, #180 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #48] @ 4-byte Spill - ldm lr, {r2, r6, lr} - ldr r5, [sp, #156] - adds r4, r0, r2 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r6, r10, r6 - mul r1, r4, r0 - str r1, [sp, #44] @ 4-byte Spill - ldm r9, {r7, r8, r9} - ldm r12, {r0, r1, r2, r3, r12} - str r6, [sp, #40] @ 4-byte Spill - adcs r6, r11, lr - ldr r10, [sp, #92] @ 4-byte Reload - str r6, [sp, #36] @ 4-byte Spill - ldr r6, [sp, #84] @ 4-byte Reload - adcs r11, r6, r5 - ldr r6, [sp, #80] @ 4-byte Reload - adcs r6, r6, r0 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r5, r0, r3 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r8, r0, r9 - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #96 - bl .LmulPv352x32(PLT) - add r7, sp, #96 - ldm r7, {r0, r1, r3, r7} - adds r0, r4, r0 - ldr r0, [sp, #40] @ 4-byte Reload - adcs lr, r0, r1 - ldr r0, [sp, #36] @ 4-byte Reload - str lr, [sp, #44] @ 4-byte Spill - adcs r1, r0, r3 - ldr r3, [sp, #112] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r9, r11, r7 - str r1, [sp, #48] @ 4-byte Spill - adcs r6, r6, r3 - ldr r3, [sp, #116] - str r6, [sp, #52] @ 4-byte Spill - adcs r0, r0, r3 - ldr r3, [sp, #120] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r12, r0, r3 - ldr r3, [sp, #124] - ldr r0, [sp, #88] @ 4-byte Reload - str r12, [sp, #56] @ 4-byte Spill - adcs r5, r5, r3 - ldr r3, [sp, #128] - str r5, [sp, #60] @ 4-byte Spill - adcs r0, r0, r3 - ldr r3, [sp, #132] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [sp, #136] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [sp, #140] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r8, r8, r3 - adc r0, r0, #0 - str r8, [sp, #68] @ 4-byte Spill - str r0, [sp, #64] @ 4-byte Spill - ldmib r10, {r3, r7} - ldr r4, [r10, #16] - ldr r11, [r10] - ldr r2, [r10, #12] - mov r0, r10 - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r10, #20] - subs r11, lr, r11 - ldr lr, [sp, #84] @ 4-byte Reload - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r10, #24] - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r10, #28] - sbcs r10, r1, r3 - mov r3, r9 - ldr r9, [r0, #32] - sbcs r1, r3, r7 - ldr r7, [r0, #36] - ldr r0, [r0, #40] - sbcs r2, r6, r2 - ldr r6, [sp, #36] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [sp, #28] @ 4-byte Reload - sbcs lr, lr, r4 - ldr r4, [sp, #32] @ 4-byte Reload - sbcs r4, r12, r4 - ldr r12, [sp, #88] @ 4-byte Reload - sbcs r5, r5, r6 - ldr r6, [sp, #40] @ 4-byte Reload - sbcs r12, r12, r6 - ldr r6, [sp, #80] @ 4-byte Reload - sbcs r9, r6, r9 - ldr r6, [sp, #76] @ 4-byte Reload - sbcs r7, r6, r7 - ldr r6, [sp, #64] @ 4-byte Reload - sbcs r0, r8, r0 - ldr r8, [sp, #72] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbc r6, r6, #0 - ands r6, r6, #1 - movne r11, r0 - ldr r0, [sp, #48] @ 4-byte Reload - movne r1, r3 - str r11, [r8] - movne r10, r0 - cmp r6, #0 - ldr r0, [sp, #92] @ 4-byte Reload - str r10, [r8, #4] - str r1, [r8, #8] - ldr r1, [sp, #52] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r2, [r8, #12] - movne lr, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str lr, [r8, #16] - movne r4, r1 - ldr r1, [sp, #60] @ 4-byte Reload - cmp r6, #0 - str r4, [r8, #20] - movne r5, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r5, [r8, #24] - movne r12, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r12, [r8, #28] - movne r9, r1 - ldr r1, [sp, #76] @ 4-byte Reload - cmp r6, #0 - str r9, [r8, #32] - movne r7, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r7, [r8, #36] - movne r0, r1 - str r0, [r8, #40] - add sp, sp, #132 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end162: - .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L - .cantunwind - .fnend - - .globl mcl_fp_montNF11L - .align 2 - .type mcl_fp_montNF11L,%function -mcl_fp_montNF11L: @ @mcl_fp_montNF11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #124 - sub sp, sp, #124 - .pad #1024 - sub sp, sp, #1024 - str r2, [sp, #72] @ 4-byte Spill - ldr r5, [r3, #-4] - ldr r2, [r2] - add r6, sp, #1024 - str r0, [sp, #68] @ 4-byte Spill - str r3, [sp, #84] @ 4-byte Spill - str r1, [sp, #76] @ 4-byte Spill - mov r4, r3 - add r0, r6, #72 - str r5, [sp, #80] @ 4-byte Spill - bl .LmulPv352x32(PLT) - ldr r0, [sp, #1100] - ldr r10, [sp, #1096] - add r9, sp, #1024 - mov r1, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1104] - mul r2, r10, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1108] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1124] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1120] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1112] - str r0, [sp, #24] @ 4-byte Spill - add r0, r9, #24 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #1092] - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #1072] - ldr r7, [sp, #1068] - ldr r8, [sp, #1064] - ldr r11, [sp, #1048] - ldr r4, [sp, #1052] - ldr r6, [sp, #1056] - ldr r9, [sp, #1060] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1088] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1084] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1080] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, sp, #1000 - bl .LmulPv352x32(PLT) - adds r0, r11, r10 - ldr r1, [sp, #4] @ 4-byte Reload - add r11, sp, #1024 - add lr, sp, #1000 - ldr r10, [sp, #1044] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - ldm r11, {r4, r5, r6, r8, r11} - adc r9, r1, r0 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #52] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #952 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #996] - add r11, sp, #952 - ldr r6, [sp, #976] - ldr r4, [sp, #972] - ldr r8, [sp, #968] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #964] - ldr r2, [r0, #8] - add r0, sp, #904 - bl .LmulPv352x32(PLT) - adds r0, r7, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #908 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #948] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #932 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r5, r6, r9, r11} - ldr r4, [sp, #904] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r4, r8, r4 - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #856 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #900] - add r11, sp, #856 - ldr r7, [sp, #880] - ldr r5, [sp, #876] - ldr r8, [sp, #872] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #888] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #884] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r6, [sp, #868] - ldr r2, [r0, #12] - add r0, sp, #808 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #808 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #852] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #832 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r8, r11} - adc r9, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #760 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #804] - add r11, sp, #760 - ldr r6, [sp, #784] - ldr r4, [sp, #780] - ldr r8, [sp, #776] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #772] - ldr r2, [r0, #16] - add r0, sp, #712 - bl .LmulPv352x32(PLT) - adds r0, r7, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #716 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #756] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #740 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r5, r6, r9, r11} - ldr r4, [sp, #712] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r4, r8, r4 - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #664 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #708] - add r11, sp, #664 - ldr r7, [sp, #688] - ldr r5, [sp, #684] - ldr r8, [sp, #680] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r6, [sp, #676] - ldr r2, [r0, #20] - add r0, sp, #616 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #616 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #660] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #640 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r8, r11} - adc r9, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #568 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #612] - add r11, sp, #568 - ldr r6, [sp, #592] - ldr r4, [sp, #588] - ldr r8, [sp, #584] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #580] - ldr r2, [r0, #24] - add r0, sp, #520 - bl .LmulPv352x32(PLT) - adds r0, r7, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #524 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #564] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #548 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r5, r6, r9, r11} - ldr r4, [sp, #520] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r4, r8, r4 - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #472 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #516] - add r11, sp, #472 - ldr r7, [sp, #496] - ldr r5, [sp, #492] - ldr r8, [sp, #488] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r6, [sp, #484] - ldr r2, [r0, #28] - add r0, sp, #424 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #424 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #468] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #448 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r8, r11} - adc r9, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #376 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #420] - add r11, sp, #376 - ldr r6, [sp, #400] - ldr r4, [sp, #396] - ldr r8, [sp, #392] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #388] - ldr r2, [r0, #32] - add r0, sp, #328 - bl .LmulPv352x32(PLT) - adds r0, r7, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #332 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #372] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #356 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r5, r6, r9, r11} - ldr r4, [sp, #328] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r4, r8, r4 - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r4, r0 - add r0, sp, #280 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #324] - add r11, sp, #280 - ldr r7, [sp, #304] - ldr r5, [sp, #300] - ldr r8, [sp, #296] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #320] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #316] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #312] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #308] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r6, [sp, #292] - ldr r2, [r0, #36] - add r0, sp, #232 - bl .LmulPv352x32(PLT) - adds r0, r4, r9 - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #232 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #276] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #256 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - ldm r11, {r4, r5, r6, r8, r11} - adc r9, r0, r1 - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #64] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #32] @ 4-byte Spill - adcs r0, r9, r11 - str r0, [sp, #28] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #184 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #228] - add r11, sp, #184 - ldr r6, [sp, #208] - ldr r4, [sp, #204] - ldr r8, [sp, #200] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #224] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #216] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #212] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #196] - ldr r2, [r0, #40] - add r0, sp, #136 - bl .LmulPv352x32(PLT) - adds r0, r7, r9 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - ldr lr, [sp, #140] - add r9, sp, #172 - add r12, sp, #152 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - adcs r11, r1, r11 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r10, r1, r5 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #148] - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #144] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #24] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #136] - str r1, [sp, #44] @ 4-byte Spill - adds r5, r0, r2 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r11, r11, lr - adcs r6, r10, r6 - mul r1, r5, r0 - str r1, [sp, #40] @ 4-byte Spill - ldm r9, {r7, r8, r9} - ldm r12, {r0, r1, r2, r3, r12} - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [sp, #76] @ 4-byte Reload - adcs r10, r6, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #84] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #88 - adc r9, r9, #0 - bl .LmulPv352x32(PLT) - add r7, sp, #88 - ldm r7, {r0, r1, r3, r7} - adds r0, r5, r0 - ldr r0, [sp, #32] @ 4-byte Reload - adcs r8, r11, r1 - str r8, [sp, #28] @ 4-byte Spill - adcs r6, r0, r3 - ldr r3, [sp, #104] - ldr r0, [sp, #36] @ 4-byte Reload - adcs r2, r10, r7 - str r6, [sp, #44] @ 4-byte Spill - str r2, [sp, #48] @ 4-byte Spill - adcs r7, r0, r3 - ldr r3, [sp, #108] - ldr r0, [sp, #72] @ 4-byte Reload - str r7, [sp, #52] @ 4-byte Spill - adcs r0, r0, r3 - ldr r3, [sp, #112] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r5, r0, r3 - ldr r3, [sp, #116] - ldr r0, [sp, #64] @ 4-byte Reload - str r5, [sp, #56] @ 4-byte Spill - adcs lr, r0, r3 - ldr r3, [sp, #120] - ldr r0, [sp, #80] @ 4-byte Reload - str lr, [sp, #60] @ 4-byte Spill - adcs r0, r0, r3 - ldr r3, [sp, #124] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [sp, #128] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r10, r0, r3 - ldr r3, [sp, #132] - str r10, [sp, #64] @ 4-byte Spill - adc r12, r9, r3 - mov r3, r4 - str r12, [sp, #40] @ 4-byte Spill - ldmib r3, {r0, r1, r9} - ldr r4, [r3, #16] - ldr r11, [r3] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r3, #20] - subs r11, r8, r11 - ldr r8, [r3, #36] - sbcs r0, r6, r0 - sbcs r1, r2, r1 - sbcs r2, r7, r9 - ldr r9, [r3, #32] - ldr r7, [sp, #80] @ 4-byte Reload - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [r3, #24] - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r3, #28] - ldr r3, [r3, #40] - str r4, [sp, #36] @ 4-byte Spill - str r3, [sp, #84] @ 4-byte Spill - ldr r3, [sp, #72] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - ldr r6, [sp, #36] @ 4-byte Reload - sbcs r3, r3, r4 - ldr r4, [sp, #24] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #32] @ 4-byte Reload - sbcs r5, lr, r5 - sbcs lr, r7, r6 - ldr r7, [sp, #76] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r9, r7, r9 - ldr r7, [sp, #28] @ 4-byte Reload - sbcs r10, r10, r8 - ldr r8, [sp, #68] @ 4-byte Reload - sbc r12, r12, r6 - asr r6, r12, #31 - cmp r6, #0 - movlt r11, r7 - ldr r7, [sp, #44] @ 4-byte Reload - str r11, [r8] - movlt r0, r7 - str r0, [r8, #4] - ldr r0, [sp, #48] @ 4-byte Reload - movlt r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - cmp r6, #0 - str r1, [r8, #8] - movlt r2, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r2, [r8, #12] - movlt r3, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r3, [r8, #16] - movlt r4, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r6, #0 - str r4, [r8, #20] - movlt r5, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r5, [r8, #24] - movlt lr, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str lr, [r8, #28] - movlt r9, r0 - ldr r0, [sp, #64] @ 4-byte Reload - cmp r6, #0 - movlt r10, r0 - ldr r0, [sp, #40] @ 4-byte Reload - movlt r12, r0 - add r0, r8, #32 - stm r0, {r9, r10, r12} - add sp, sp, #124 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end163: - .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L - .cantunwind - .fnend - - .globl mcl_fp_montRed11L - .align 2 - .type mcl_fp_montRed11L,%function -mcl_fp_montRed11L: @ @mcl_fp_montRed11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #676 - sub sp, sp, #676 - mov r10, r2 - str r0, [sp, #136] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r5, [r1] - ldr r0, [r10] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r10, #4] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r10, #8] - str r2, [sp, #56] @ 4-byte Spill - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r10, #12] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r10, #16] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r10, #20] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r10, #24] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r10, #-4] - str r0, [sp, #140] @ 4-byte Spill - mul r2, r5, r0 - ldr r0, [r10, #28] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r10, #32] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r10, #36] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r10, #40] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r10 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #624 - bl .LmulPv352x32(PLT) - add r11, sp, #656 - add lr, sp, #624 - ldm r11, {r4, r8, r9, r11} - ldr r7, [sp, #652] - ldr r6, [sp, #648] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r5, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - mov r1, r10 - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - mul r2, r5, r0 - add r0, sp, #576 - bl .LmulPv352x32(PLT) - ldr r4, [sp, #576] - add r9, sp, #584 - ldr r12, [sp, #620] - ldr lr, [sp, #616] - ldr r2, [sp, #612] - ldr r3, [sp, #608] - ldr r11, [sp, #604] - ldr r7, [sp, #600] - ldr r6, [sp, #580] - ldm r9, {r0, r1, r8, r9} - adds r4, r5, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r5, r4, r6 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #140] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - mov r9, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r5, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #528 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #572] - add r11, sp, #560 - add lr, sp, #528 - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r8, r11} - ldr r6, [sp, #556] - ldr r7, [sp, #552] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r9, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - mov r5, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r1, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #480 - bl .LmulPv352x32(PLT) - ldr r4, [sp, #480] - add r9, sp, #488 - ldr r12, [sp, #524] - ldr lr, [sp, #520] - ldr r2, [sp, #516] - ldr r3, [sp, #512] - ldr r11, [sp, #508] - ldr r7, [sp, #504] - ldr r6, [sp, #484] - ldm r9, {r0, r1, r8, r9} - adds r4, r5, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r5, r4, r6 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #140] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r5, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #432 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #476] - add r11, sp, #460 - add lr, sp, #432 - str r0, [sp, #16] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #456] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r4, r1 - mov r1, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #384 - bl .LmulPv352x32(PLT) - ldr r6, [sp, #384] - add r9, sp, #392 - ldr r12, [sp, #428] - ldr lr, [sp, #424] - ldr r2, [sp, #420] - ldr r3, [sp, #416] - ldr r11, [sp, #412] - ldr r5, [sp, #408] - ldr r7, [sp, #388] - ldm r9, {r0, r1, r8, r9} - adds r4, r4, r6 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r6, r4, r7 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #140] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - mov r5, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #336 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #380] - add r11, sp, #364 - add lr, sp, #336 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #360] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r4, r1 - mov r1, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #288 - bl .LmulPv352x32(PLT) - ldr r6, [sp, #288] - add r9, sp, #296 - ldr r12, [sp, #332] - ldr lr, [sp, #328] - ldr r2, [sp, #324] - ldr r3, [sp, #320] - ldr r11, [sp, #316] - ldr r5, [sp, #312] - ldr r7, [sp, #292] - ldm r9, {r0, r1, r8, r9} - adds r4, r4, r6 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r6, r4, r7 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #140] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - mov r5, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #240 - bl .LmulPv352x32(PLT) - ldr r0, [sp, #284] - add r11, sp, #264 - add lr, sp, #240 - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r6, r7, r8, r9, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r5, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r5, r4 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r11, r0, r11 - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #192 - bl .LmulPv352x32(PLT) - add r6, sp, #192 - add r7, sp, #208 - ldm r6, {r0, r1, r3, r6} - ldr r12, [sp, #236] - ldr lr, [sp, #232] - adds r0, r5, r0 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #16] @ 4-byte Reload - mul r2, r8, r4 - adcs r0, r0, r3 - ldr r3, [sp, #228] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #224] - str r0, [sp, #52] @ 4-byte Spill - ldm r7, {r0, r1, r4, r7} - ldr r5, [sp, #88] @ 4-byte Reload - adcs r9, r5, r0 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r5, r0, r6 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r11, r11, r3 - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r6, r0, #0 - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - add r0, sp, #144 - bl .LmulPv352x32(PLT) - add r3, sp, #144 - ldm r3, {r0, r1, r2, r3} - adds r0, r8, r0 - ldr r0, [sp, #140] @ 4-byte Reload - adcs r12, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r1, [sp, #160] - str r12, [sp, #44] @ 4-byte Spill - adcs r2, r0, r2 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r3, r9, r3 - str r2, [sp, #52] @ 4-byte Spill - str r3, [sp, #56] @ 4-byte Spill - adcs r7, r0, r1 - ldr r1, [sp, #164] - ldr r0, [sp, #76] @ 4-byte Reload - str r7, [sp, #60] @ 4-byte Spill - adcs r8, r4, r1 - ldr r1, [sp, #168] - str r8, [sp, #64] @ 4-byte Spill - adcs r4, r0, r1 - ldr r1, [sp, #172] - ldr r0, [sp, #84] @ 4-byte Reload - str r4, [sp, #68] @ 4-byte Spill - adcs r5, r5, r1 - ldr r1, [sp, #176] - str r5, [sp, #72] @ 4-byte Spill - adcs r11, r11, r1 - ldr r1, [sp, #180] - str r11, [sp, #76] @ 4-byte Spill - adcs r9, r0, r1 - ldr r1, [sp, #184] - ldr r0, [sp, #88] @ 4-byte Reload - str r9, [sp, #84] @ 4-byte Spill - adcs lr, r0, r1 - ldr r1, [sp, #188] - str lr, [sp, #88] @ 4-byte Spill - adcs r0, r6, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r6, [sp, #140] @ 4-byte Reload - adc r10, r0, #0 - ldr r0, [sp, #132] @ 4-byte Reload - subs r0, r12, r0 - sbcs r1, r2, r1 - ldr r2, [sp, #124] @ 4-byte Reload - sbcs r2, r3, r2 - ldr r3, [sp, #108] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #112] @ 4-byte Reload - sbcs r12, r8, r7 - ldr r7, [sp, #116] @ 4-byte Reload - sbcs r8, r4, r7 - ldr r4, [sp, #120] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #92] @ 4-byte Reload - sbcs r5, r11, r5 - sbcs r11, r9, r7 - ldr r7, [sp, #100] @ 4-byte Reload - sbcs r9, lr, r7 - ldr r7, [sp, #104] @ 4-byte Reload - sbcs lr, r6, r7 - ldr r7, [sp, #44] @ 4-byte Reload - sbc r6, r10, #0 - ldr r10, [sp, #136] @ 4-byte Reload - ands r6, r6, #1 - movne r0, r7 - str r0, [r10] - ldr r0, [sp, #52] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r1, [r10, #4] - movne r2, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r6, #0 - str r2, [r10, #8] - movne r3, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r3, [r10, #12] - movne r12, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r12, [r10, #16] - movne r8, r0 - ldr r0, [sp, #72] @ 4-byte Reload - cmp r6, #0 - str r8, [r10, #20] - movne r4, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r4, [r10, #24] - movne r5, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r5, [r10, #28] - movne r11, r0 - ldr r0, [sp, #88] @ 4-byte Reload - cmp r6, #0 - str r11, [r10, #32] - movne r9, r0 - ldr r0, [sp, #140] @ 4-byte Reload - str r9, [r10, #36] - movne lr, r0 - str lr, [r10, #40] - add sp, sp, #676 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end164: - .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L - .cantunwind - .fnend - - .globl mcl_fp_addPre11L - .align 2 - .type mcl_fp_addPre11L,%function -mcl_fp_addPre11L: @ @mcl_fp_addPre11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldm r1, {r3, r12} - ldr r8, [r1, #8] - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7, r10} - ldr r4, [r2, #20] - ldr r11, [r2] - str r4, [sp] @ 4-byte Spill - ldr r4, [r2, #24] - adds lr, r11, r3 - ldr r3, [r2, #36] - ldr r11, [r2, #32] - adcs r5, r5, r12 - add r12, r1, #16 - adcs r6, r6, r8 - adcs r7, r7, r9 - add r9, r1, #32 - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r2, #28] - ldr r2, [r2, #40] - str r3, [sp, #8] @ 4-byte Spill - str r4, [sp, #16] @ 4-byte Spill - str r2, [sp, #12] @ 4-byte Spill - ldm r9, {r4, r8, r9} - ldm r12, {r1, r2, r3, r12} - str lr, [r0] - stmib r0, {r5, r6} - str r7, [r0, #12] - ldr r7, [sp] @ 4-byte Reload - adcs r1, r10, r1 - str r1, [r0, #16] - ldr r1, [sp, #4] @ 4-byte Reload - adcs r2, r7, r2 - str r2, [r0, #20] - ldr r2, [sp, #8] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp, #12] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r1, r1, r12 - str r1, [r0, #28] - adcs r1, r11, r4 - add r0, r0, #32 - adcs r2, r2, r8 - adcs r3, r3, r9 - stm r0, {r1, r2, r3} - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end165: - .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L - .cantunwind - .fnend - - .globl mcl_fp_subPre11L - .align 2 - .type mcl_fp_subPre11L,%function -mcl_fp_subPre11L: @ @mcl_fp_subPre11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldmib r2, {r8, r12, lr} - ldr r3, [r2, #16] - ldr r7, [r2] - ldr r6, [r1] - ldr r5, [r1, #4] - ldr r4, [r1, #8] - ldr r11, [r2, #32] - ldr r10, [r2, #40] - ldr r9, [r1, #36] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r2, #20] - subs r6, r6, r7 - ldr r7, [r2, #36] - sbcs r5, r5, r8 - ldr r8, [r1, #40] - sbcs r4, r4, r12 - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r2, #24] - str r7, [sp] @ 4-byte Spill - ldr r7, [r1, #32] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r1, #12] - sbcs r12, r3, lr - add lr, r1, #16 - ldm lr, {r1, r2, r3, lr} - str r6, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - ldr r4, [sp, #4] @ 4-byte Reload - ldr r6, [sp, #8] @ 4-byte Reload - str r12, [r0, #12] - sbcs r1, r1, r4 - str r1, [r0, #16] - ldr r1, [sp, #12] @ 4-byte Reload - sbcs r2, r2, r6 - str r2, [r0, #20] - ldr r2, [sp] @ 4-byte Reload - sbcs r1, r3, r1 - str r1, [r0, #24] - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r1, lr, r1 - str r1, [r0, #28] - sbcs r1, r7, r11 - add r0, r0, #32 - sbcs r2, r9, r2 - sbcs r3, r8, r10 - stm r0, {r1, r2, r3} - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end166: - .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L - .cantunwind - .fnend - - .globl mcl_fp_shr1_11L - .align 2 - .type mcl_fp_shr1_11L,%function -mcl_fp_shr1_11L: @ @mcl_fp_shr1_11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - ldmib r1, {r2, r3, r12, lr} - add r8, r1, #20 - add r11, r1, #32 - ldm r8, {r4, r5, r8} - ldr r7, [r1] - ldm r11, {r9, r10, r11} - lsrs r1, r12, #1 - lsr r6, r2, #1 - rrx r1, r3 - lsrs r2, r2, #1 - orr r6, r6, r3, lsl #31 - lsr r3, r11, #1 - rrx r2, r7 - stm r0, {r2, r6} - str r1, [r0, #8] - lsr r1, r12, #1 - lsr r2, r10, #1 - orr r1, r1, lr, lsl #31 - orr r2, r2, r11, lsl #31 - str r1, [r0, #12] - lsrs r1, r4, #1 - rrx r1, lr - str r1, [r0, #16] - lsr r1, r4, #1 - orr r1, r1, r5, lsl #31 - str r1, [r0, #20] - lsrs r1, r8, #1 - rrx r1, r5 - str r1, [r0, #24] - lsr r1, r8, #1 - orr r1, r1, r9, lsl #31 - str r1, [r0, #28] - lsrs r1, r10, #1 - add r0, r0, #32 - rrx r1, r9 - stm r0, {r1, r2, r3} - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end167: - .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L - .cantunwind - .fnend - - .globl mcl_fp_add11L - .align 2 - .type mcl_fp_add11L,%function -mcl_fp_add11L: @ @mcl_fp_add11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #32 - sub sp, sp, #32 - ldm r1, {r12, lr} - ldr r5, [r2] - ldr r8, [r1, #8] - ldr r9, [r1, #12] - ldmib r2, {r4, r6, r7} - adds r5, r5, r12 - ldr r12, [r1, #32] - adcs r4, r4, lr - str r5, [sp, #28] @ 4-byte Spill - ldr r5, [r1, #24] - ldr lr, [r1, #40] - adcs r6, r6, r8 - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [r1, #20] - adcs r7, r7, r9 - str r6, [sp, #12] @ 4-byte Spill - ldr r6, [r1, #16] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r2, #16] - adcs r9, r7, r6 - ldr r7, [r2, #20] - str r9, [sp] @ 4-byte Spill - adcs r7, r7, r4 - ldr r4, [r2, #24] - str r7, [sp, #4] @ 4-byte Spill - adcs r8, r4, r5 - ldr r4, [r1, #28] - ldr r5, [r2, #28] - adcs r6, r5, r4 - ldr r5, [r2, #32] - ldr r4, [r1, #36] - ldr r1, [r2, #36] - ldr r2, [r2, #40] - adcs r10, r5, r12 - ldr r12, [sp, #24] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #8] @ 4-byte Reload - adcs r11, r2, lr - ldr r2, [sp, #28] @ 4-byte Reload - ldr lr, [sp, #12] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - str r2, [r0] - str r12, [r0, #4] - str lr, [r0, #8] - str r4, [r0, #12] - str r9, [r0, #16] - str r7, [r0, #20] - str r8, [r0, #24] - str r6, [r0, #28] - str r10, [r0, #32] - str r1, [r0, #36] - mov r1, #0 - str r11, [r0, #40] - mov r9, r6 - adc r1, r1, #0 - str r1, [sp, #16] @ 4-byte Spill - ldm r3, {r1, r7} - ldr r5, [r3, #8] - ldr r6, [r3, #12] - subs r1, r2, r1 - ldr r2, [sp] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - sbcs r1, r12, r7 - str r1, [sp, #24] @ 4-byte Spill - sbcs r1, lr, r5 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r5, r4, r6 - sbcs r7, r2, r1 - ldr r1, [r3, #20] - ldr r2, [sp, #4] @ 4-byte Reload - sbcs r4, r2, r1 - ldr r1, [r3, #24] - sbcs r12, r8, r1 - ldr r1, [r3, #28] - add r3, r3, #32 - sbcs lr, r9, r1 - ldm r3, {r1, r2, r3} - ldr r6, [sp, #20] @ 4-byte Reload - sbcs r1, r10, r1 - sbcs r2, r6, r2 - ldr r6, [sp, #16] @ 4-byte Reload - sbcs r3, r11, r3 - sbc r6, r6, #0 - tst r6, #1 - bne .LBB168_2 -@ BB#1: @ %nocarry - ldr r6, [sp, #28] @ 4-byte Reload - str r6, [r0] - ldr r6, [sp, #24] @ 4-byte Reload - str r6, [r0, #4] - ldr r6, [sp, #12] @ 4-byte Reload - str r6, [r0, #8] - str r5, [r0, #12] - str r7, [r0, #16] - str r4, [r0, #20] - str r12, [r0, #24] - str lr, [r0, #28] - add r0, r0, #32 - stm r0, {r1, r2, r3} -.LBB168_2: @ %carry - add sp, sp, #32 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end168: - .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L - .cantunwind - .fnend - - .globl mcl_fp_addNF11L - .align 2 - .type mcl_fp_addNF11L,%function -mcl_fp_addNF11L: @ @mcl_fp_addNF11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - ldm r1, {r5, r8, lr} - ldr r6, [r2] - ldr r12, [r1, #12] - ldmib r2, {r4, r7, r9} - ldr r11, [r1, #24] - adds r10, r6, r5 - adcs r4, r4, r8 - ldr r8, [r1, #20] - adcs r7, r7, lr - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #16] - ldr lr, [r1, #36] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r1, #16] - adcs r6, r9, r12 - ldr r12, [r2, #36] - str r6, [sp, #16] @ 4-byte Spill - adcs r7, r4, r7 - ldr r4, [r2, #28] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r8 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r8, r7, r11 - ldr r7, [r1, #28] - ldr r11, [r1, #40] - str r8, [sp, #20] @ 4-byte Spill - adcs r7, r4, r7 - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r1, #32] - ldr r1, [r2, #32] - ldr r2, [r2, #40] - adcs r4, r1, r7 - adcs r1, r12, lr - str r4, [sp, #24] @ 4-byte Spill - str r1, [sp, #48] @ 4-byte Spill - adc r9, r2, r11 - ldmib r3, {r1, r2, lr} - ldr r5, [r3, #20] - ldr r11, [r3] - ldr r7, [r3, #16] - ldr r12, [r3, #24] - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [r3, #28] - subs r11, r10, r11 - str r5, [sp, #28] @ 4-byte Spill - ldr r5, [sp, #32] @ 4-byte Reload - sbcs r1, r5, r1 - ldr r5, [sp, #40] @ 4-byte Reload - sbcs r2, r5, r2 - ldr r5, [r3, #32] - sbcs lr, r6, lr - ldr r6, [sp, #36] @ 4-byte Reload - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [r3, #36] - ldr r3, [r3, #40] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [sp, #44] @ 4-byte Reload - str r5, [sp] @ 4-byte Spill - ldr r5, [sp, #12] @ 4-byte Reload - sbcs r7, r3, r7 - ldr r3, [sp, #52] @ 4-byte Reload - sbcs r3, r3, r5 - ldr r5, [sp, #28] @ 4-byte Reload - sbcs r12, r8, r12 - sbcs r8, r6, r5 - ldr r5, [sp, #8] @ 4-byte Reload - sbcs r4, r4, r5 - ldr r5, [sp] @ 4-byte Reload - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [sp, #48] @ 4-byte Reload - sbcs r4, r4, r5 - ldr r5, [sp, #32] @ 4-byte Reload - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [sp, #4] @ 4-byte Reload - sbc r6, r9, r4 - asr r4, r6, #31 - cmp r4, #0 - movlt r11, r10 - movlt r1, r5 - str r11, [r0] - str r1, [r0, #4] - ldr r1, [sp, #40] @ 4-byte Reload - movlt r2, r1 - ldr r1, [sp, #16] @ 4-byte Reload - cmp r4, #0 - str r2, [r0, #8] - ldr r2, [sp, #28] @ 4-byte Reload - movlt lr, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str lr, [r0, #12] - movlt r7, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r7, [r0, #16] - movlt r3, r1 - ldr r1, [sp, #20] @ 4-byte Reload - cmp r4, #0 - str r3, [r0, #20] - ldr r3, [sp, #12] @ 4-byte Reload - movlt r12, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r12, [r0, #24] - movlt r8, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r8, [r0, #28] - movlt r3, r1 - ldr r1, [sp, #48] @ 4-byte Reload - cmp r4, #0 - movlt r6, r9 - str r3, [r0, #32] - movlt r2, r1 - str r2, [r0, #36] - str r6, [r0, #40] - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end169: - .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L - .cantunwind - .fnend - - .globl mcl_fp_sub11L - .align 2 - .type mcl_fp_sub11L,%function -mcl_fp_sub11L: @ @mcl_fp_sub11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #48 - sub sp, sp, #48 - mov r10, r3 - ldr r12, [r2] - ldr r9, [r2, #4] - ldr r8, [r2, #8] - ldr r3, [r2, #12] - ldm r1, {r4, r5, r6, r7} - subs r4, r4, r12 - sbcs r5, r5, r9 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #24] - sbcs r6, r6, r8 - str r5, [sp, #44] @ 4-byte Spill - ldr r5, [r2, #20] - add r8, r1, #32 - sbcs r12, r7, r3 - str r6, [sp, #40] @ 4-byte Spill - ldr r6, [r2, #16] - ldr r7, [r1, #16] - ldr r3, [sp, #36] @ 4-byte Reload - str r12, [sp, #24] @ 4-byte Spill - sbcs r11, r7, r6 - ldr r6, [r1, #20] - ldr r7, [r2, #40] - sbcs r9, r6, r5 - ldr r5, [r1, #24] - sbcs r6, r5, r4 - ldr r4, [r2, #28] - ldr r5, [r1, #28] - str r6, [sp, #28] @ 4-byte Spill - sbcs lr, r5, r4 - ldr r4, [r2, #36] - ldr r5, [r2, #32] - str lr, [sp, #20] @ 4-byte Spill - str r4, [sp, #32] @ 4-byte Spill - ldm r8, {r2, r4, r8} - str r3, [r0] - sbcs r1, r2, r5 - ldr r2, [sp, #32] @ 4-byte Reload - sbcs r2, r4, r2 - mov r4, r3 - ldr r3, [sp, #44] @ 4-byte Reload - sbcs r8, r8, r7 - mov r7, #0 - sbc r7, r7, #0 - tst r7, #1 - str r3, [r0, #4] - ldr r3, [sp, #40] @ 4-byte Reload - str r3, [r0, #8] - add r3, r0, #32 - str r12, [r0, #12] - str r11, [r0, #16] - str r9, [r0, #20] - str r6, [r0, #24] - str lr, [r0, #28] - stm r3, {r1, r2, r8} - beq .LBB170_2 -@ BB#1: @ %carry - ldr r3, [r10, #32] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r10, #36] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r10, #40] - str r3, [sp, #32] @ 4-byte Spill - ldmib r10, {r5, lr} - ldr r3, [r10, #20] - ldr r6, [sp, #44] @ 4-byte Reload - ldr r7, [r10, #12] - ldr r12, [r10, #16] - str r3, [sp] @ 4-byte Spill - ldr r3, [r10, #24] - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r10, #28] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r10] - adds r3, r3, r4 - ldr r4, [sp, #40] @ 4-byte Reload - adcs r5, r5, r6 - stm r0, {r3, r5} - ldr r3, [sp, #24] @ 4-byte Reload - adcs r4, lr, r4 - str r4, [r0, #8] - adcs r3, r7, r3 - ldr r7, [sp, #4] @ 4-byte Reload - str r3, [r0, #12] - adcs r3, r12, r11 - str r3, [r0, #16] - ldr r3, [sp] @ 4-byte Reload - adcs r3, r3, r9 - str r3, [r0, #20] - ldr r3, [sp, #28] @ 4-byte Reload - adcs r3, r7, r3 - ldr r7, [sp, #8] @ 4-byte Reload - str r3, [r0, #24] - ldr r3, [sp, #20] @ 4-byte Reload - adcs r3, r7, r3 - str r3, [r0, #28] - ldr r3, [sp, #12] @ 4-byte Reload - add r0, r0, #32 - adcs r1, r3, r1 - ldr r3, [sp, #16] @ 4-byte Reload - adcs r2, r3, r2 - ldr r3, [sp, #32] @ 4-byte Reload - adc r3, r3, r8 - stm r0, {r1, r2, r3} -.LBB170_2: @ %nocarry - add sp, sp, #48 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end170: - .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L - .cantunwind - .fnend - - .globl mcl_fp_subNF11L - .align 2 - .type mcl_fp_subNF11L,%function -mcl_fp_subNF11L: @ @mcl_fp_subNF11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - mov r12, r0 - ldr r0, [r2, #32] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #44] @ 4-byte Spill - ldm r2, {r8, r10} - ldr r0, [r2, #8] - ldr r5, [r2, #16] - ldr r11, [r2, #20] - ldr lr, [r1, #16] - ldr r6, [r1, #20] - ldr r9, [r1, #24] - ldr r7, [r1, #28] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r2, #12] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r2, #24] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r2, #28] - ldr r2, [r1, #8] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #12] - ldm r1, {r1, r4} - subs r1, r1, r8 - sbcs r8, r4, r10 - ldr r4, [sp, #32] @ 4-byte Reload - str r8, [sp, #16] @ 4-byte Spill - sbcs r2, r2, r4 - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r4, r0, r2 - ldr r0, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r5, lr, r5 - ldr lr, [r3, #12] - str r4, [sp, #20] @ 4-byte Spill - sbcs r11, r6, r11 - mov r6, r1 - str r5, [sp, #28] @ 4-byte Spill - str r11, [sp, #32] @ 4-byte Spill - sbcs r0, r9, r0 - ldr r9, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r2, r0 - ldr r2, [sp, #40] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - sbcs r10, r2, r0 - ldr r2, [sp, #56] @ 4-byte Reload - str r10, [sp, #48] @ 4-byte Spill - sbc r0, r7, r2 - ldr r2, [r3, #36] - ldr r7, [r3, #4] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r3, #32] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r3, #40] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #16] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r3, #8] - str r0, [sp] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r3, #28] - ldr r3, [r3] - adds r1, r6, r3 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp] @ 4-byte Reload - ldr r3, [sp, #8] @ 4-byte Reload - adcs r7, r8, r7 - adcs r2, r9, r2 - adcs lr, r4, lr - adcs r4, r5, r0 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r5, r11, r0 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r8, r0, r3 - ldr r3, [sp, #64] @ 4-byte Reload - ldr r0, [sp, #12] @ 4-byte Reload - adcs r11, r3, r0 - ldr r3, [sp, #60] @ 4-byte Reload - ldr r0, [sp, #40] @ 4-byte Reload - adcs r3, r3, r0 - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [sp, #44] @ 4-byte Reload - adcs r0, r10, r3 - ldr r3, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r10, r0, r3 - asr r3, r0, #31 - ldr r0, [sp, #16] @ 4-byte Reload - cmp r3, #0 - movge r1, r6 - movge r2, r9 - str r1, [r12] - ldr r1, [sp, #60] @ 4-byte Reload - movge r7, r0 - ldr r0, [sp, #20] @ 4-byte Reload - cmp r3, #0 - str r7, [r12, #4] - str r2, [r12, #8] - ldr r2, [sp, #48] @ 4-byte Reload - movge lr, r0 - ldr r0, [sp, #28] @ 4-byte Reload - str lr, [r12, #12] - movge r4, r0 - ldr r0, [sp, #32] @ 4-byte Reload - str r4, [r12, #16] - movge r5, r0 - ldr r0, [sp, #52] @ 4-byte Reload - cmp r3, #0 - str r5, [r12, #20] - movge r8, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r8, [r12, #24] - movge r11, r0 - ldr r0, [sp, #40] @ 4-byte Reload - movge r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - cmp r3, #0 - str r11, [r12, #28] - movge r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - movge r10, r2 - add r2, r12, #32 - stm r2, {r0, r1, r10} - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end171: - .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L - .cantunwind - .fnend - - .globl mcl_fpDbl_add11L - .align 2 - .type mcl_fpDbl_add11L,%function -mcl_fpDbl_add11L: @ @mcl_fpDbl_add11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #120 - sub sp, sp, #120 - ldm r1, {r7, r12, lr} - ldr r8, [r1, #12] - ldm r2, {r4, r5, r6, r9} - ldr r10, [r2, #20] - adds r4, r4, r7 - adcs r7, r5, r12 - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [r2, #64] - str r7, [sp, #28] @ 4-byte Spill - adcs r7, r6, lr - add lr, r1, #16 - str r7, [sp, #24] @ 4-byte Spill - adcs r7, r9, r8 - add r8, r1, #32 - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #32] - str r4, [sp, #108] @ 4-byte Spill - ldr r4, [r2, #68] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #36] - str r4, [sp, #104] @ 4-byte Spill - ldr r4, [r2, #72] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #40] - str r4, [sp, #96] @ 4-byte Spill - ldr r4, [r2, #76] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #44] - str r4, [sp, #116] @ 4-byte Spill - ldr r4, [r2, #80] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #48] - str r4, [sp, #100] @ 4-byte Spill - ldr r4, [r2, #84] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #56] - str r4, [sp, #112] @ 4-byte Spill - ldr r4, [r2, #16] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r1, #64] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #68] - str r7, [sp, #16] @ 4-byte Spill - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #64] @ 4-byte Spill - ldm r8, {r5, r6, r8} - ldr r2, [r1, #44] - ldr r11, [r1, #52] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #12] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r9, [sp, #40] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - adcs r1, r4, r1 - str r9, [r0] - str r7, [r0, #4] - ldr r7, [sp, #24] @ 4-byte Reload - ldr r4, [sp, #32] @ 4-byte Reload - adcs r2, r10, r2 - add r10, r3, #32 - str r7, [r0, #8] - str r4, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - adcs r1, r1, r12 - str r1, [r0, #24] - adcs r2, r2, lr - ldr r1, [sp, #68] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #72] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [r0, #32] - adcs r2, r2, r6 - ldr r1, [sp, #76] @ 4-byte Reload - str r2, [r0, #36] - ldr r2, [sp, #84] @ 4-byte Reload - adcs r1, r1, r8 - adcs r6, r2, r7 - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - ldr r7, [sp, #8] @ 4-byte Reload - str r6, [sp, #72] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #80] @ 4-byte Reload - str r4, [sp, #76] @ 4-byte Spill - adcs r2, r1, r11 - ldr r1, [sp, #92] @ 4-byte Reload - str r2, [sp, #80] @ 4-byte Spill - adcs r5, r1, r7 - ldr r1, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #12] @ 4-byte Reload - str r5, [sp, #92] @ 4-byte Spill - adcs r8, r1, r7 - ldr r1, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - str r8, [sp, #84] @ 4-byte Spill - adcs r1, r1, r7 - ldr r7, [sp, #48] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #52] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r12, r1, r7 - ldr r1, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #56] @ 4-byte Reload - str r12, [sp, #96] @ 4-byte Spill - adcs r1, r1, r7 - ldr r7, [sp, #60] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #64] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #112] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #88] @ 4-byte Spill - ldmib r3, {r1, r9, lr} - ldr r7, [r3, #16] - ldr r11, [r3] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r3, #20] - subs r11, r6, r11 - sbcs r1, r4, r1 - sbcs r4, r2, r9 - sbcs r2, r5, lr - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r3, #24] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r3, #28] - str r7, [sp, #68] @ 4-byte Spill - ldm r10, {r5, r9, r10} - ldr r3, [sp, #56] @ 4-byte Reload - ldr r6, [sp, #60] @ 4-byte Reload - sbcs r7, r8, r3 - ldr r3, [sp, #108] @ 4-byte Reload - sbcs r8, r3, r6 - ldr r3, [sp, #104] @ 4-byte Reload - ldr r6, [sp, #64] @ 4-byte Reload - sbcs r3, r3, r6 - ldr r6, [sp, #68] @ 4-byte Reload - sbcs r12, r12, r6 - ldr r6, [sp, #116] @ 4-byte Reload - sbcs lr, r6, r5 - ldr r5, [sp, #100] @ 4-byte Reload - ldr r6, [sp, #112] @ 4-byte Reload - sbcs r9, r5, r9 - ldr r5, [sp, #72] @ 4-byte Reload - sbcs r10, r6, r10 - ldr r6, [sp, #88] @ 4-byte Reload - sbc r6, r6, #0 - ands r6, r6, #1 - movne r11, r5 - ldr r5, [sp, #76] @ 4-byte Reload - str r11, [r0, #44] - movne r1, r5 - str r1, [r0, #48] - ldr r1, [sp, #80] @ 4-byte Reload - movne r4, r1 - ldr r1, [sp, #92] @ 4-byte Reload - cmp r6, #0 - str r4, [r0, #52] - movne r2, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r2, [r0, #56] - movne r7, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r7, [r0, #60] - movne r8, r1 - ldr r1, [sp, #104] @ 4-byte Reload - cmp r6, #0 - str r8, [r0, #64] - movne r3, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r3, [r0, #68] - movne r12, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r12, [r0, #72] - movne lr, r1 - ldr r1, [sp, #100] @ 4-byte Reload - cmp r6, #0 - str lr, [r0, #76] - movne r9, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r9, [r0, #80] - movne r10, r1 - str r10, [r0, #84] - add sp, sp, #120 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end172: - .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub11L - .align 2 - .type mcl_fpDbl_sub11L,%function -mcl_fpDbl_sub11L: @ @mcl_fpDbl_sub11L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #120 - sub sp, sp, #120 - ldr r7, [r2, #64] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2] - ldmib r2, {r4, r8, r10} - ldm r1, {r5, r6, r12, lr} - ldr r9, [r2, #20] - subs r5, r5, r7 - ldr r7, [r2, #24] - sbcs r4, r6, r4 - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [r2, #32] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #28] - sbcs r8, r12, r8 - str r7, [sp, #32] @ 4-byte Spill - sbcs r7, lr, r10 - add r10, r1, #32 - add lr, r1, #16 - str r5, [sp, #40] @ 4-byte Spill - str r7, [sp] @ 4-byte Spill - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #16] - ldr r2, [r1, #64] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #64] @ 4-byte Spill - ldm r10, {r5, r6, r10} - ldr r2, [r1, #44] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #16] @ 4-byte Reload - ldr r7, [sp, #8] @ 4-byte Reload - str r11, [r0] - stmib r0, {r7, r8} - sbcs r1, r1, r4 - mov r8, #0 - ldr r4, [sp] @ 4-byte Reload - sbcs r2, r2, r9 - ldr r7, [sp, #4] @ 4-byte Reload - str r4, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #32] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - ldr r2, [sp, #68] @ 4-byte Reload - sbcs r1, r5, r1 - str r1, [r0, #32] - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r2, r6, r2 - str r2, [r0, #36] - ldr r2, [sp, #12] @ 4-byte Reload - sbcs r1, r10, r1 - str r1, [r0, #40] - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r4, r2, r1 - ldr r1, [sp, #80] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - sbcs r2, r2, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r2, [sp, #68] @ 4-byte Spill - sbcs r9, r7, r1 - ldr r1, [sp, #88] @ 4-byte Reload - ldr r7, [sp, #24] @ 4-byte Reload - sbcs r12, r7, r1 - ldr r1, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - str r12, [sp, #80] @ 4-byte Spill - sbcs lr, r7, r1 - ldr r1, [sp, #96] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - str lr, [sp, #84] @ 4-byte Spill - sbcs r5, r7, r1 - ldr r1, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #48] @ 4-byte Reload - str r5, [sp, #96] @ 4-byte Spill - sbcs r6, r7, r1 - ldr r1, [sp, #104] @ 4-byte Reload - ldr r7, [sp, #52] @ 4-byte Reload - str r6, [sp, #100] @ 4-byte Spill - sbcs r11, r7, r1 - ldr r1, [sp, #112] @ 4-byte Reload - ldr r7, [sp, #56] @ 4-byte Reload - str r11, [sp, #104] @ 4-byte Spill - sbcs r1, r7, r1 - ldr r7, [sp, #60] @ 4-byte Reload - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - sbcs r10, r7, r1 - ldr r1, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #64] @ 4-byte Reload - str r10, [sp, #108] @ 4-byte Spill - sbcs r1, r7, r1 - ldr r7, [r3, #4] - str r1, [sp, #116] @ 4-byte Spill - sbc r1, r8, #0 - ldr r8, [r3, #28] - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [r3, #8] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [r3, #12] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [r3, #16] - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [r3, #20] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [r3, #24] - ldr r3, [r3] - str r1, [sp, #64] @ 4-byte Spill - adds r1, r4, r3 - ldr r3, [sp, #48] @ 4-byte Reload - ldr r4, [sp, #56] @ 4-byte Reload - adcs r7, r2, r7 - ldr r2, [sp, #52] @ 4-byte Reload - adcs r2, r9, r2 - adcs r3, r12, r3 - adcs r12, lr, r4 - ldr r4, [sp, #60] @ 4-byte Reload - adcs r4, r5, r4 - ldr r5, [sp, #64] @ 4-byte Reload - adcs lr, r6, r5 - ldr r6, [sp, #112] @ 4-byte Reload - ldr r5, [sp, #72] @ 4-byte Reload - adcs r8, r11, r8 - adcs r11, r6, r5 - ldr r6, [sp, #76] @ 4-byte Reload - ldr r5, [sp, #116] @ 4-byte Reload - adcs r10, r10, r6 - ldr r6, [sp, #88] @ 4-byte Reload - adc r6, r5, r6 - str r6, [sp, #88] @ 4-byte Spill - ldr r6, [sp, #92] @ 4-byte Reload - ands r5, r6, #1 - ldr r6, [sp, #40] @ 4-byte Reload - moveq r2, r9 - moveq r1, r6 - str r1, [r0, #44] - ldr r1, [sp, #68] @ 4-byte Reload - moveq r7, r1 - ldr r1, [sp, #80] @ 4-byte Reload - cmp r5, #0 - str r7, [r0, #48] - str r2, [r0, #52] - ldr r2, [sp, #88] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r3, [r0, #56] - moveq r12, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r12, [r0, #60] - moveq r4, r1 - ldr r1, [sp, #100] @ 4-byte Reload - cmp r5, #0 - str r4, [r0, #64] - moveq lr, r1 - ldr r1, [sp, #104] @ 4-byte Reload - str lr, [r0, #68] - moveq r8, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r8, [r0, #72] - moveq r11, r1 - ldr r1, [sp, #108] @ 4-byte Reload - cmp r5, #0 - str r11, [r0, #76] - moveq r10, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r10, [r0, #80] - moveq r2, r1 - str r2, [r0, #84] - add sp, sp, #120 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end173: - .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L - .cantunwind - .fnend - - .align 2 - .type .LmulPv384x32,%function -.LmulPv384x32: @ @mulPv384x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r1, [r1, #44] - umull r3, r7, r1, r2 - adcs r1, r6, r3 - str r1, [r0, #44] - adc r1, r7, #0 - str r1, [r0, #48] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end174: - .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre12L - .align 2 - .type mcl_fp_mulUnitPre12L,%function -mcl_fp_mulUnitPre12L: @ @mcl_fp_mulUnitPre12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - mov r4, r0 - mov r0, sp - bl .LmulPv384x32(PLT) - ldr r12, [sp, #48] - ldr lr, [sp, #44] - ldr r8, [sp, #40] - ldr r9, [sp, #36] - ldr r10, [sp, #32] - ldr r11, [sp, #28] - ldr r5, [sp, #24] - ldr r6, [sp, #20] - ldm sp, {r2, r3} - add r7, sp, #8 - ldm r7, {r0, r1, r7} - stm r4, {r2, r3} - add r2, r4, #8 - stm r2, {r0, r1, r7} - str r6, [r4, #20] - str r5, [r4, #24] - str r11, [r4, #28] - str r10, [r4, #32] - str r9, [r4, #36] - str r8, [r4, #40] - str lr, [r4, #44] - str r12, [r4, #48] - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end175: - .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre12L - .align 2 - .type mcl_fpDbl_mulPre12L,%function -mcl_fpDbl_mulPre12L: @ @mcl_fpDbl_mulPre12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #196 - sub sp, sp, #196 - mov r6, r2 - mov r5, r1 - mov r4, r0 - bl mcl_fpDbl_mulPre6L(PLT) - add r0, r4, #48 - add r1, r5, #24 - add r2, r6, #24 - bl mcl_fpDbl_mulPre6L(PLT) - add lr, r6, #24 - ldr r8, [r6, #40] - ldr r9, [r6, #44] - ldr r2, [r6, #16] - ldr r3, [r6, #20] - ldm lr, {r0, r1, r12, lr} - ldm r6, {r6, r7, r10, r11} - adds r0, r6, r0 - adcs r1, r7, r1 - str r0, [sp, #80] @ 4-byte Spill - adcs r12, r10, r12 - str r1, [sp, #72] @ 4-byte Spill - ldr r10, [r5, #36] - adcs r0, r11, lr - add lr, r5, #8 - str r12, [sp, #68] @ 4-byte Spill - str r0, [sp, #92] @ 4-byte Spill - adcs r0, r2, r8 - str r0, [sp, #88] @ 4-byte Spill - adcs r0, r3, r9 - ldr r9, [r5, #32] - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - adc r6, r0, #0 - ldr r0, [r5, #40] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r5, #44] - str r0, [sp, #76] @ 4-byte Spill - ldm lr, {r3, r11, lr} - ldr r8, [r5, #20] - ldr r0, [r5, #24] - ldr r2, [r5, #28] - ldm r5, {r5, r7} - adds r0, r5, r0 - ldr r5, [sp, #80] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - str r0, [sp, #124] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r7, r7, r2 - add r2, sp, #100 - adcs r9, r3, r9 - str r7, [sp, #128] - adcs r11, r11, r10 - str r9, [sp, #132] - str r5, [sp, #100] - str r1, [sp, #104] - str r12, [sp, #108] - add r1, sp, #124 - str r11, [sp, #136] - adcs r10, lr, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r10, [sp, #140] - adcs r8, r8, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r8, [sp, #144] - str r0, [sp, #112] - ldr r0, [sp, #88] @ 4-byte Reload - str r0, [sp, #116] - ldr r0, [sp, #84] @ 4-byte Reload - str r0, [sp, #120] - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - add r0, sp, #148 - bl mcl_fpDbl_mulPre6L(PLT) - cmp r6, #0 - ldr r0, [sp, #96] @ 4-byte Reload - ldr r3, [sp, #92] @ 4-byte Reload - moveq r8, r6 - moveq r10, r6 - moveq r11, r6 - moveq r9, r6 - moveq r7, r6 - cmp r6, #0 - moveq r0, r6 - adds r2, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - ldr r5, [sp, #88] @ 4-byte Reload - adcs r1, r7, r0 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r12, r9, r0 - adcs r3, r11, r3 - adcs lr, r10, r5 - ldr r5, [sp, #84] @ 4-byte Reload - adcs r0, r8, r5 - str r0, [sp, #92] @ 4-byte Spill - mov r0, #0 - adc r5, r0, #0 - ldr r0, [sp, #76] @ 4-byte Reload - cmp r0, #0 - and r6, r6, r0 - moveq r1, r7 - ldr r7, [sp, #96] @ 4-byte Reload - moveq r12, r9 - ldr r9, [sp, #92] @ 4-byte Reload - moveq lr, r10 - moveq r3, r11 - moveq r2, r7 - ldr r7, [sp, #172] - cmp r0, #0 - moveq r9, r8 - moveq r5, r0 - adds r8, r2, r7 - ldr r7, [sp, #176] - adcs r10, r1, r7 - ldr r7, [sp, #180] - adcs r0, r12, r7 - ldr r7, [sp, #184] - str r0, [sp, #96] @ 4-byte Spill - adcs r0, r3, r7 - ldr r7, [sp, #188] - str r0, [sp, #92] @ 4-byte Spill - adcs r0, lr, r7 - ldr r7, [sp, #192] - str r0, [sp, #84] @ 4-byte Spill - adcs r0, r9, r7 - ldr r7, [r4] - str r0, [sp, #80] @ 4-byte Spill - adc r0, r5, r6 - str r0, [sp, #76] @ 4-byte Spill - ldmib r4, {r6, r9, lr} - ldr r0, [sp, #148] - ldr r5, [sp, #152] - ldr r1, [sp, #156] - ldr r2, [sp, #160] - ldr r11, [r4, #24] - subs r3, r0, r7 - ldr r0, [r4, #16] - sbcs r12, r5, r6 - ldr r5, [r4, #68] - sbcs r6, r1, r9 - ldr r1, [sp, #164] - ldr r9, [r4, #32] - sbcs r2, r2, lr - ldr lr, [r4, #72] - str r5, [sp, #56] @ 4-byte Spill - sbcs r7, r1, r0 - ldr r0, [r4, #20] - ldr r1, [sp, #168] - sbcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - sbcs r0, r8, r11 - ldr r8, [r4, #28] - str r0, [sp, #60] @ 4-byte Spill - sbcs r0, r10, r8 - ldr r10, [r4, #52] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - sbcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r4, #36] - str r0, [sp, #96] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r4, #40] - str r0, [sp, #88] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r4, #44] - str r0, [sp, #92] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [r4, #92] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - sbc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r4, #48] - str r0, [sp, #80] @ 4-byte Spill - subs r0, r3, r0 - ldr r3, [r4, #80] - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, r12, r10 - ldr r12, [r4, #76] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r4, #56] - str r0, [sp, #76] @ 4-byte Spill - sbcs r0, r6, r0 - ldr r6, [r4, #64] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r4, #60] - str r6, [sp, #44] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - sbcs r0, r2, r0 - ldr r2, [r4, #84] - sbcs r7, r7, r6 - ldr r6, [sp, #64] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r4, #88] - str r2, [sp, #68] @ 4-byte Spill - sbcs r6, r6, r5 - ldr r5, [sp, #60] @ 4-byte Reload - sbcs r5, r5, lr - str r5, [sp] @ 4-byte Spill - ldr r5, [sp, #52] @ 4-byte Reload - sbcs r5, r5, r12 - str r5, [sp, #4] @ 4-byte Spill - ldr r5, [sp, #48] @ 4-byte Reload - sbcs r5, r5, r3 - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [sp, #40] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r2, r2, r0 - str r2, [sp, #52] @ 4-byte Spill - mov r2, r0 - ldr r0, [sp, #32] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adds r11, r11, r0 - ldr r0, [sp, #20] @ 4-byte Reload - str r11, [r4, #24] - adcs r8, r8, r0 - ldr r0, [sp, #16] @ 4-byte Reload - str r8, [r4, #28] - adcs r9, r9, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r9, [r4, #32] - adcs r5, r0, r1 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r1, [sp] @ 4-byte Reload - str r5, [r4, #36] - ldr r5, [sp, #8] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #92] @ 4-byte Reload - str r7, [r4, #40] - adcs r6, r0, r6 - ldr r0, [sp, #80] @ 4-byte Reload - str r6, [r4, #44] - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [r4, #48] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r1, r10, r1 - adcs r0, r0, r5 - str r1, [r4, #52] - ldr r1, [sp, #72] @ 4-byte Reload - ldr r5, [sp, #48] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #44] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #52] @ 4-byte Reload - str r1, [r4, #60] - ldr r1, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [r4, #64] - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [r4, #68] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [r4, #72] - adcs r0, r12, #0 - str r0, [r4, #76] - adcs r0, r3, #0 - str r0, [r4, #80] - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [r4, #84] - adcs r0, r2, #0 - adc r1, r1, #0 - str r0, [r4, #88] - str r1, [r4, #92] - add sp, sp, #196 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end176: - .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre12L - .align 2 - .type mcl_fpDbl_sqrPre12L,%function -mcl_fpDbl_sqrPre12L: @ @mcl_fpDbl_sqrPre12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #204 - sub sp, sp, #204 - mov r5, r1 - mov r4, r0 - mov r2, r5 - bl mcl_fpDbl_mulPre6L(PLT) - add r1, r5, #24 - add r0, r4, #48 - mov r2, r1 - bl mcl_fpDbl_mulPre6L(PLT) - ldr r10, [r5, #32] - ldr r9, [r5, #36] - ldr lr, [r5, #40] - ldr r12, [r5, #44] - ldr r3, [r5, #8] - ldr r2, [r5, #12] - ldr r1, [r5, #16] - ldr r11, [r5, #20] - ldr r6, [r5, #24] - ldr r0, [r5, #28] - ldm r5, {r5, r7} - adds r8, r5, r6 - adcs r6, r7, r0 - mov r0, #0 - str r8, [sp, #132] - str r8, [sp, #108] - adcs r10, r3, r10 - str r6, [sp, #136] - str r6, [sp, #112] - adcs r5, r2, r9 - add r2, sp, #108 - str r10, [sp, #140] - str r10, [sp, #116] - adcs r9, r1, lr - add r1, sp, #132 - str r5, [sp, #144] - str r5, [sp, #120] - adcs r7, r11, r12 - str r9, [sp, #148] - str r9, [sp, #124] - adc r11, r0, #0 - add r0, sp, #156 - str r7, [sp, #152] - str r7, [sp, #128] - bl mcl_fpDbl_mulPre6L(PLT) - adds r0, r9, r9 - ldr lr, [sp, #192] - ldr r12, [sp, #196] - ldr r9, [sp, #200] - orr r0, r0, r5, lsr #31 - str r0, [sp, #104] @ 4-byte Spill - adc r0, r7, r7 - str r0, [sp, #100] @ 4-byte Spill - adds r0, r10, r10 - ldr r10, [sp, #180] - adc r1, r5, r5 - orr r0, r0, r6, lsr #31 - str r1, [sp, #92] @ 4-byte Spill - adds r1, r8, r8 - ldr r8, [sp, #184] - adc r5, r6, r6 - ldr r6, [sp, #188] - adds r1, r10, r1 - str r1, [sp, #96] @ 4-byte Spill - adcs r3, r8, r5 - ldr r5, [sp, #100] @ 4-byte Reload - adcs r2, r6, r0 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r1, lr, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r12, r0 - adcs r5, r9, r5 - adc r7, r11, r7, lsr #31 - cmp r11, #0 - moveq r3, r8 - moveq r2, r6 - moveq r5, r9 - moveq r0, r12 - moveq r1, lr - cmp r11, #0 - ldr r6, [sp, #96] @ 4-byte Reload - mov r8, r3 - add r3, sp, #156 - str r0, [sp, #104] @ 4-byte Spill - str r1, [sp, #100] @ 4-byte Spill - str r2, [sp, #88] @ 4-byte Spill - mov r9, r5 - ldm r4, {r12, lr} - moveq r7, r11 - ldr r11, [r4, #8] - ldr r5, [r4, #12] - moveq r6, r10 - ldm r3, {r0, r1, r2, r3} - ldr r10, [r4, #64] - subs r12, r0, r12 - ldr r0, [r4, #16] - sbcs lr, r1, lr - ldr r1, [sp, #172] - sbcs r2, r2, r11 - ldr r11, [r4, #48] - sbcs r3, r3, r5 - ldr r5, [r4, #68] - sbcs r0, r1, r0 - ldr r1, [sp, #176] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r4, #20] - str r5, [sp, #60] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r4, #24] - str r0, [sp, #96] @ 4-byte Spill - sbcs r0, r6, r0 - ldr r6, [sp, #76] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r4, #28] - str r0, [sp, #72] @ 4-byte Spill - sbcs r0, r8, r0 - ldr r8, [r4, #56] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r4, #32] - str r0, [sp, #92] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r4, #36] - str r0, [sp, #88] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [r4, #40] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - sbcs r0, r0, r1 - ldr r1, [r4, #92] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r4, #44] - str r1, [sp, #84] @ 4-byte Spill - str r0, [sp, #104] @ 4-byte Spill - sbcs r0, r9, r0 - ldr r9, [r4, #60] - str r0, [sp, #40] @ 4-byte Spill - sbc r0, r7, #0 - ldr r7, [r4, #52] - str r0, [sp, #36] @ 4-byte Spill - subs r0, r12, r11 - ldr r12, [r4, #76] - str r0, [sp, #32] @ 4-byte Spill - sbcs r0, lr, r7 - ldr lr, [r4, #72] - str r0, [sp, #28] @ 4-byte Spill - sbcs r0, r2, r8 - ldr r2, [r4, #84] - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, r3, r9 - ldr r3, [r4, #80] - sbcs r6, r6, r10 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r4, #88] - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [sp, #68] @ 4-byte Reload - str r2, [sp, #80] @ 4-byte Spill - sbcs r5, r6, r5 - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [sp, #64] @ 4-byte Reload - sbcs r5, r5, lr - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [sp, #56] @ 4-byte Reload - sbcs r5, r5, r12 - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [sp, #52] @ 4-byte Reload - sbcs r5, r5, r3 - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [sp, #48] @ 4-byte Reload - sbcs r2, r5, r2 - ldr r5, [sp, #28] @ 4-byte Reload - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [sp, #44] @ 4-byte Reload - sbcs r2, r2, r0 - str r2, [sp, #64] @ 4-byte Spill - mov r2, r0 - ldr r0, [sp, #40] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [r4, #24] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r6, r1, r5 - ldr r1, [sp, #24] @ 4-byte Reload - ldr r5, [sp, #20] @ 4-byte Reload - str r6, [r4, #28] - adcs r0, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [r4, #32] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r6, r1, r5 - ldr r1, [sp, #4] @ 4-byte Reload - ldr r5, [sp, #8] @ 4-byte Reload - str r6, [r4, #36] - adcs r0, r0, r1 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [r4, #40] - ldr r0, [sp, #12] @ 4-byte Reload - adcs r5, r1, r5 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r11, r0 - str r5, [r4, #44] - str r0, [r4, #48] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r1, r7, r1 - str r1, [r4, #52] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [r4, #56] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r1, r9, r1 - str r1, [r4, #60] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [r4, #64] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [r4, #68] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [r4, #72] - adcs r0, r12, #0 - str r0, [r4, #76] - adcs r0, r3, #0 - str r0, [r4, #80] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [r4, #84] - adcs r0, r2, #0 - adc r1, r1, #0 - str r0, [r4, #88] - str r1, [r4, #92] - add sp, sp, #204 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end177: - .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L - .cantunwind - .fnend - - .globl mcl_fp_mont12L - .align 2 - .type mcl_fp_mont12L,%function -mcl_fp_mont12L: @ @mcl_fp_mont12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #428 - sub sp, sp, #428 - .pad #1024 - sub sp, sp, #1024 - str r2, [sp, #92] @ 4-byte Spill - ldr r5, [r3, #-4] - ldr r2, [r2] - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #1392 - str r3, [sp, #100] @ 4-byte Spill - str r1, [sp, #96] @ 4-byte Spill - mov r4, r3 - str r5, [sp, #88] @ 4-byte Spill - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1396] - ldr r6, [sp, #1392] - add r11, sp, #1024 - mov r1, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1400] - mul r2, r6, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1404] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1424] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1420] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1416] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1412] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1408] - str r0, [sp, #36] @ 4-byte Spill - add r0, r11, #312 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1384] - ldr r1, [sp, #96] @ 4-byte Reload - ldr r5, [sp, #1360] - ldr r8, [sp, #1356] - ldr r7, [sp, #1352] - ldr r10, [sp, #1336] - ldr r9, [sp, #1340] - ldr r4, [sp, #1344] - ldr r11, [sp, #1348] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1380] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1376] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1372] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1368] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1364] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, sp, #1280 - bl .LmulPv384x32(PLT) - adds r0, r10, r6 - ldr r1, [sp, #64] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - ldr r3, [sp, #1296] - ldr r12, [sp, #1300] - ldr lr, [sp, #1304] - ldr r6, [sp, #1312] - ldr r10, [sp, #1328] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #1324] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1280] - adcs r1, r11, r1 - ldr r11, [sp, #60] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #1316] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r8, r1 - ldr r8, [sp, #1320] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r5, r1 - ldr r5, [sp, #1308] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #1292] - adc r0, r0, #0 - adds r11, r11, r4 - ldr r4, [sp, #56] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #1288] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1284] - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r11, r0 - add r0, r7, #200 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1272] - add r9, sp, #1232 - ldr r5, [sp, #1248] - ldr r8, [sp, #1244] - ldr r10, [sp, #1224] - ldr r11, [sp, #1228] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1256] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1252] - str r0, [sp, #8] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #1168 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #1168 - ldr r10, [sp, #1212] - ldr r4, [sp, #1192] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #1216] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1200] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1208] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1204] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1196] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - add r5, sp, #1024 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, r5, #88 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1160] - add r10, sp, #1120 - ldr r6, [sp, #1136] - ldr r9, [sp, #1132] - ldr r11, [sp, #1112] - ldr r7, [sp, #1116] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #1056 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1068] - ldr r3, [sp, #1072] - ldr r12, [sp, #1076] - ldr lr, [sp, #1080] - ldr r8, [sp, #1096] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1092] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r11, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1056] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1084] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1104] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1100] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1088] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1064] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - adds r11, r11, r4 - ldr r4, [sp, #80] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1060] - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1000 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1048] - add r9, sp, #1008 - ldr r5, [sp, #1024] - ldr r8, [sp, #1020] - ldr r10, [sp, #1000] - ldr r11, [sp, #1004] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #8] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #944 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #944 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #968 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #888 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #936] - add r10, sp, #896 - ldr r6, [sp, #912] - ldr r9, [sp, #908] - ldr r11, [sp, #888] - ldr r7, [sp, #892] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #832 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #836 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #860 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #832] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #776 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #824] - add r9, sp, #784 - ldr r5, [sp, #800] - ldr r8, [sp, #796] - ldr r10, [sp, #776] - ldr r11, [sp, #780] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #8] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #720 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #720 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #744 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #664 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #712] - add r10, sp, #672 - ldr r6, [sp, #688] - ldr r9, [sp, #684] - ldr r11, [sp, #664] - ldr r7, [sp, #668] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #608 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #612 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #636 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #608] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #552 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #600] - add r9, sp, #560 - ldr r5, [sp, #576] - ldr r8, [sp, #572] - ldr r10, [sp, #552] - ldr r11, [sp, #556] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #8] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #496 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #496 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #520 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #440 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #488] - add r10, sp, #448 - ldr r6, [sp, #464] - ldr r9, [sp, #460] - ldr r11, [sp, #440] - ldr r7, [sp, #444] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #484] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #384 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #388 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #412 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #384] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #88] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - mul r2, r11, r6 - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #328 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #376] - ldr r1, [sp, #96] @ 4-byte Reload - ldr r5, [sp, #348] - ldr r9, [sp, #344] - ldr r10, [sp, #328] - ldr r11, [sp, #332] - ldr r8, [sp, #336] - ldr r7, [sp, #340] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #364] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #360] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #356] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #352] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #272 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r2, [sp, #4] @ 4-byte Reload - add r12, sp, #288 - ldr lr, [sp, #276] - ldr r4, [sp, #284] - ldr r10, [sp, #312] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r1, r0, r11 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #316] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #320] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #280] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #272] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - adds r0, r1, r2 - mul r11, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r6, [sp, #308] - ldm r12, {r0, r1, r2, r3, r12} - ldr r7, [sp, #80] @ 4-byte Reload - adcs r7, r7, lr - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [sp, #76] @ 4-byte Reload - adcs r7, r7, r5 - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - adcs r7, r7, r4 - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - mov r2, r11 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #216 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #264] - add r10, sp, #220 - ldr r6, [sp, #244] - ldr r7, [sp, #240] - ldr r8, [sp, #236] - ldr r9, [sp, #232] - ldr r11, [sp, #216] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #160 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #160 - add r12, sp, #176 - adds r0, r0, r11 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r11, r0, r5 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #196 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldm lr, {r2, r7, lr} - ldr r0, [sp, #88] @ 4-byte Reload - ldr r6, [sp, #172] - adds r4, r4, r2 - mul r1, r4, r0 - adcs r7, r11, r7 - str r1, [sp, #44] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldm r12, {r0, r1, r2, r3, r12} - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #96] @ 4-byte Reload - adcs r11, r7, lr - ldr r7, [sp, #92] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #100] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [sp, #84] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r7, r0, r5 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r10, r0, r10 - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #104 - bl .LmulPv384x32(PLT) - add r5, sp, #104 - mov r3, r6 - ldm r5, {r0, r1, r2, r5} - adds r0, r4, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs lr, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r4, r11, r2 - str lr, [sp, #44] @ 4-byte Spill - str r4, [sp, #48] @ 4-byte Spill - adcs r2, r0, r5 - ldr r0, [sp, #120] - str r2, [sp, #52] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #124] - ldr r1, [sp, #80] @ 4-byte Reload - str r5, [sp, #56] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #132] - adcs r12, r1, r0 - ldr r0, [sp, #136] - ldr r1, [sp, #92] @ 4-byte Reload - str r12, [sp, #60] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #140] - adcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r0, r8, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #148] - adcs r0, r1, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r10, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldmib r3, {r0, r1, r7, r10} - ldr r11, [r3] - ldr r6, [r3, #24] - ldr r9, [r3, #20] - ldr r8, [r3, #36] - subs r11, lr, r11 - str r6, [sp, #36] @ 4-byte Spill - ldr r6, [r3, #28] - ldr lr, [r3, #44] - sbcs r0, r4, r0 - ldr r4, [sp, #72] @ 4-byte Reload - sbcs r1, r2, r1 - sbcs r2, r5, r7 - ldr r7, [r3, #32] - ldr r5, [r3, #40] - ldr r3, [sp, #80] @ 4-byte Reload - str r6, [sp, #40] @ 4-byte Spill - sbcs r10, r3, r10 - ldr r3, [sp, #84] @ 4-byte Reload - sbcs r6, r3, r9 - ldr r3, [sp, #36] @ 4-byte Reload - ldr r9, [sp, #40] @ 4-byte Reload - sbcs r3, r12, r3 - ldr r12, [sp, #88] @ 4-byte Reload - sbcs r12, r12, r9 - sbcs r7, r4, r7 - ldr r4, [sp, #76] @ 4-byte Reload - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [sp, #48] @ 4-byte Reload - sbcs r9, r4, r8 - ldr r4, [sp, #96] @ 4-byte Reload - sbcs r8, r4, r5 - ldr r4, [sp, #92] @ 4-byte Reload - ldr r5, [sp, #44] @ 4-byte Reload - sbcs lr, r4, lr - ldr r4, [sp, #64] @ 4-byte Reload - sbc r4, r4, #0 - ands r4, r4, #1 - movne r11, r5 - ldr r5, [sp, #68] @ 4-byte Reload - movne r0, r7 - str r11, [r5] - str r0, [r5, #4] - ldr r0, [sp, #52] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - cmp r4, #0 - str r1, [r5, #8] - ldr r1, [sp, #100] @ 4-byte Reload - movne r2, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r2, [r5, #12] - movne r10, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r10, [r5, #16] - movne r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r4, #0 - str r6, [r5, #20] - movne r3, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r3, [r5, #24] - movne r12, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r12, [r5, #28] - movne r1, r0 - ldr r0, [sp, #76] @ 4-byte Reload - cmp r4, #0 - str r1, [r5, #32] - movne r9, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r9, [r5, #36] - movne r8, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r8, [r5, #40] - movne lr, r0 - str lr, [r5, #44] - add sp, sp, #428 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end178: - .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L - .cantunwind - .fnend - - .globl mcl_fp_montNF12L - .align 2 - .type mcl_fp_montNF12L,%function -mcl_fp_montNF12L: @ @mcl_fp_montNF12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #428 - sub sp, sp, #428 - .pad #1024 - sub sp, sp, #1024 - add r12, sp, #92 - mov r4, r3 - mov r7, r1 - stm r12, {r1, r2, r3} - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #1392 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #88] @ 4-byte Spill - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1396] - ldr r8, [sp, #1392] - add r10, sp, #1024 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1400] - mul r2, r8, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1404] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1424] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1420] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1416] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1412] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1408] - str r0, [sp, #36] @ 4-byte Spill - add r0, r10, #312 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1384] - add r11, sp, #1344 - ldr r9, [sp, #1356] - ldr r4, [sp, #1336] - ldr r6, [sp, #1340] - mov r1, r7 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1380] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1376] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1372] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1368] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1364] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1360] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r10, r11} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, sp, #1280 - bl .LmulPv384x32(PLT) - adds r0, r4, r8 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #1280 - ldr r7, [sp, #1316] - ldr r4, [sp, #1304] - ldr r0, [sp, #64] @ 4-byte Reload - adcs r8, r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r6, [sp, #1312] - adcs r0, r5, r0 - ldr r5, [sp, #1308] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #1324] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #1328] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #1320] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r1, r0 - str r0, [sp, #32] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r8, r8, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - add r5, sp, #1024 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r8, r0 - add r0, r5, #200 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1272] - add r10, sp, #1232 - ldr r6, [sp, #1248] - ldr r9, [sp, #1244] - ldr r11, [sp, #1224] - ldr r7, [sp, #1228] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1256] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1252] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #1168 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1180] - ldr r3, [sp, #1184] - ldr r12, [sp, #1188] - ldr lr, [sp, #1192] - ldr r8, [sp, #1208] - ldr r11, [sp, #1216] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1204] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1168] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1196] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #84] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1212] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1200] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - adds r10, r10, r4 - ldr r4, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #1176] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1172] - adcs r0, r4, r0 - mov r4, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r10, r0 - add r0, r7, #88 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1160] - add r9, sp, #1120 - ldr r5, [sp, #1136] - ldr r8, [sp, #1132] - ldr r10, [sp, #1112] - ldr r11, [sp, #1116] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #1056 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #1056 - ldr r10, [sp, #1100] - ldr r4, [sp, #1080] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #1104] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1088] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1096] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1092] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1084] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1000 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #1048] - add r10, sp, #1008 - ldr r6, [sp, #1024] - ldr r9, [sp, #1020] - ldr r11, [sp, #1000] - ldr r7, [sp, #1004] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #944 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #972 - add lr, sp, #948 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r11} - ldr r4, [sp, #944] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #84] @ 4-byte Reload - adds r10, r10, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #888 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #936] - add r9, sp, #896 - ldr r5, [sp, #912] - ldr r8, [sp, #908] - ldr r10, [sp, #888] - ldr r11, [sp, #892] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #832 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #832 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #856 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #776 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #824] - add r10, sp, #784 - ldr r6, [sp, #800] - ldr r9, [sp, #796] - ldr r11, [sp, #776] - ldr r7, [sp, #780] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #720 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #748 - add lr, sp, #724 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r11} - ldr r4, [sp, #720] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #84] @ 4-byte Reload - adds r10, r10, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #664 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #712] - add r9, sp, #672 - ldr r5, [sp, #688] - ldr r8, [sp, #684] - ldr r10, [sp, #664] - ldr r11, [sp, #668] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #608 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #608 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #632 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #552 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #600] - add r10, sp, #560 - ldr r6, [sp, #576] - ldr r9, [sp, #572] - ldr r11, [sp, #552] - ldr r7, [sp, #556] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #496 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #524 - add lr, sp, #500 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r11} - ldr r4, [sp, #496] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #84] @ 4-byte Reload - adds r10, r10, r4 - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #440 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #488] - add r9, sp, #448 - ldr r5, [sp, #464] - ldr r8, [sp, #460] - ldr r10, [sp, #440] - ldr r11, [sp, #444] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #484] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #384 - bl .LmulPv384x32(PLT) - adds r0, r4, r10 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #384 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #408 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #84] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #88] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r7, r4 - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r11, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #328 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #376] - ldr r1, [sp, #92] @ 4-byte Reload - ldr r6, [sp, #348] - ldr r10, [sp, #344] - ldr r11, [sp, #328] - ldr r7, [sp, #332] - ldr r9, [sp, #336] - ldr r5, [sp, #340] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #364] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #360] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #356] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #352] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #272 - bl .LmulPv384x32(PLT) - adds r0, r8, r11 - ldr r1, [sp, #80] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - ldr lr, [sp, #276] - add r12, sp, #288 - ldr r8, [sp, #316] - ldr r11, [sp, #312] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - adcs r7, r1, r9 - ldr r1, [sp, #76] @ 4-byte Reload - ldr r9, [sp, #320] - adcs r1, r1, r5 - ldr r5, [sp, #280] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r10 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #284] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #272] - str r1, [sp, #36] @ 4-byte Spill - adds r0, r0, r2 - adcs r7, r7, lr - mul r10, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r4, [sp, #308] - ldm r12, {r0, r1, r2, r3, r12} - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [sp, #76] @ 4-byte Reload - adcs r7, r7, r5 - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - adcs r7, r7, r6 - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - mov r2, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - adc r0, r9, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #216 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #264] - ldr r1, [sp, #92] @ 4-byte Reload - ldr r5, [sp, #244] - ldr r6, [sp, #240] - ldr r8, [sp, #236] - ldr r9, [sp, #232] - ldr r10, [sp, #216] - ldr r7, [sp, #220] - ldr r4, [sp, #224] - ldr r11, [sp, #228] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #160 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add r12, sp, #176 - ldr lr, [sp, #164] - adds r0, r0, r10 - add r10, sp, #200 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #172] - adcs r1, r1, r4 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #168] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #160] - str r1, [sp, #48] @ 4-byte Spill - adds r4, r0, r2 - ldr r0, [sp, #88] @ 4-byte Reload - mul r1, r4, r0 - str r1, [sp, #44] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r11, [sp, #196] - ldm r12, {r0, r1, r2, r3, r12} - ldr r5, [sp, #96] @ 4-byte Reload - adcs r5, r5, lr - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [sp, #92] @ 4-byte Reload - adcs r6, r5, r6 - ldr r5, [sp, #100] @ 4-byte Reload - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [sp, #84] @ 4-byte Reload - adcs r7, r6, r7 - ldr r6, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r11, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r9, r0, r9 - adc r0, r10, #0 - str r0, [sp, #92] @ 4-byte Spill - add r0, sp, #104 - bl .LmulPv384x32(PLT) - add r6, sp, #104 - ldm r6, {r0, r1, r2, r6} - adds r0, r4, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs lr, r0, r1 - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r10, r0, r2 - ldr r0, [sp, #120] - mov r2, r5 - adcs r3, r7, r6 - str r10, [sp, #52] @ 4-byte Spill - str r3, [sp, #56] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #124] - ldr r1, [sp, #80] @ 4-byte Reload - str r6, [sp, #60] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #128] - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #132] - adcs r12, r1, r0 - ldr r0, [sp, #136] - ldr r1, [sp, #96] @ 4-byte Reload - str r12, [sp, #64] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #140] - adcs r0, r11, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r0, r8, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #148] - adcs r0, r9, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #152] - adc r0, r1, r0 - str r0, [sp, #92] @ 4-byte Spill - ldmib r2, {r0, r1, r7, r9} - ldr r4, [r2, #24] - ldr r8, [r2] - ldr r5, [r2, #20] - str r4, [sp, #44] @ 4-byte Spill - ldr r4, [r2, #28] - subs r8, lr, r8 - sbcs r0, r10, r0 - sbcs r1, r3, r1 - sbcs r7, r6, r7 - str r4, [sp, #48] @ 4-byte Spill - mov r4, r2 - ldr r2, [r4, #44] - ldr r10, [r4, #32] - ldr r6, [r4, #36] - ldr r11, [r4, #40] - ldr r4, [sp, #48] @ 4-byte Reload - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [sp, #76] @ 4-byte Reload - sbcs r9, r2, r9 - ldr r2, [sp, #80] @ 4-byte Reload - sbcs r5, r2, r5 - ldr r2, [sp, #44] @ 4-byte Reload - sbcs r3, r12, r2 - ldr r2, [sp, #84] @ 4-byte Reload - sbcs r12, r2, r4 - ldr r2, [sp, #88] @ 4-byte Reload - ldr r4, [sp, #40] @ 4-byte Reload - sbcs r10, r2, r10 - ldr r2, [sp, #72] @ 4-byte Reload - sbcs r2, r2, r6 - ldr r6, [sp, #52] @ 4-byte Reload - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [sp, #96] @ 4-byte Reload - sbcs r2, r2, r11 - ldr r11, [sp, #68] @ 4-byte Reload - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [sp, #92] @ 4-byte Reload - sbc r2, r2, r4 - asr r4, r2, #31 - cmp r4, #0 - movlt r8, lr - movlt r0, r6 - str r8, [r11] - str r0, [r11, #4] - ldr r0, [sp, #56] @ 4-byte Reload - movlt r1, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r4, #0 - str r1, [r11, #8] - ldr r1, [sp, #100] @ 4-byte Reload - movlt r7, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r7, [r11, #12] - movlt r9, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r9, [r11, #16] - movlt r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - cmp r4, #0 - str r5, [r11, #20] - movlt r3, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r3, [r11, #24] - ldr r3, [sp, #48] @ 4-byte Reload - movlt r12, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r12, [r11, #28] - movlt r10, r0 - ldr r0, [sp, #72] @ 4-byte Reload - cmp r4, #0 - str r10, [r11, #32] - movlt r3, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r3, [r11, #36] - movlt r1, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r1, [r11, #40] - movlt r2, r0 - str r2, [r11, #44] - add sp, sp, #428 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end179: - .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L - .cantunwind - .fnend - - .globl mcl_fp_montRed12L - .align 2 - .type mcl_fp_montRed12L,%function -mcl_fp_montRed12L: @ @mcl_fp_montRed12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #836 - sub sp, sp, #836 - mov r3, r2 - str r0, [sp, #148] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r10, [r1] - ldr r0, [r3] - str r3, [sp, #152] @ 4-byte Spill - mov r5, r3 - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #56] @ 4-byte Spill - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #156] @ 4-byte Spill - mul r2, r10, r0 - ldr r0, [r3, #28] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #8] @ 4-byte Spill - add r0, sp, #776 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #824] - add r11, sp, #808 - add lr, sp, #776 - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #804] - ldr r4, [sp, #800] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #156] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #720 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #768] - add lr, sp, #756 - add r9, sp, #732 - str r0, [sp, #4] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #720] - ldr r6, [sp, #752] - ldr r11, [sp, #748] - ldr r2, [sp, #744] - ldr r1, [sp, #724] - ldr r7, [sp, #728] - ldm r9, {r0, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r10, r4, r1 - ldr r1, [sp, #60] @ 4-byte Reload - mov r4, r5 - adcs r1, r1, r7 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #8] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #156] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #664 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #712] - add r11, sp, #696 - add lr, sp, #664 - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #692] - ldr r5, [sp, #688] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #156] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - mul r2, r10, r5 - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #608 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #656] - add lr, sp, #644 - add r9, sp, #620 - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #608] - ldr r6, [sp, #640] - ldr r11, [sp, #636] - ldr r2, [sp, #632] - ldr r1, [sp, #612] - ldr r7, [sp, #616] - ldm r9, {r0, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r10, r4, r1 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r4, [sp, #152] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - mov r0, r5 - mul r2, r10, r0 - add r0, sp, #552 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #600] - add r11, sp, #584 - add lr, sp, #552 - str r0, [sp, #16] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #580] - ldr r5, [sp, #576] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #156] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - mul r2, r10, r5 - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #496 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #544] - add lr, sp, #532 - add r9, sp, #508 - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #496] - ldr r6, [sp, #528] - ldr r11, [sp, #524] - ldr r2, [sp, #520] - ldr r1, [sp, #500] - ldr r7, [sp, #504] - ldm r9, {r0, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r10, r4, r1 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r4, [sp, #152] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #440 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #488] - add r11, sp, #472 - add lr, sp, #440 - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r6, r8, r9, r11} - ldr r7, [sp, #468] - ldr r5, [sp, #464] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #156] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - mul r2, r10, r5 - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #384 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #432] - add lr, sp, #420 - add r9, sp, #396 - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #384] - ldr r6, [sp, #416] - ldr r11, [sp, #412] - ldr r2, [sp, #408] - ldr r1, [sp, #388] - ldr r7, [sp, #392] - ldm r9, {r0, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #64] @ 4-byte Reload - adcs r10, r4, r1 - ldr r1, [sp, #60] @ 4-byte Reload - mov r4, r5 - adcs r1, r1, r7 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #152] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #328 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #376] - add r11, sp, #352 - add lr, sp, #328 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r5, r7, r8, r9, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - mov r5, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #272 - bl .LmulPv384x32(PLT) - ldr r0, [sp, #320] - add lr, sp, #300 - add r6, sp, #272 - add r12, sp, #284 - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r4, r8, r9, r11, lr} - ldr r7, [sp, #296] - ldm r6, {r2, r3, r6} - ldm r12, {r0, r1, r12} - adds r2, r10, r2 - ldr r2, [sp, #64] @ 4-byte Reload - adcs r10, r2, r3 - ldr r2, [sp, #60] @ 4-byte Reload - adcs r6, r2, r6 - ldr r2, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #156] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r10, r4 - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #216 - bl .LmulPv384x32(PLT) - add r7, sp, #216 - add lr, sp, #252 - ldm r7, {r0, r1, r3, r7} - ldr r8, [sp, #264] - adds r0, r10, r0 - adcs r10, r6, r1 - mul r0, r10, r4 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #232 - str r0, [sp, #52] @ 4-byte Spill - ldm lr, {r6, r12, lr} - ldm r7, {r0, r1, r2, r3, r7} - ldr r4, [sp, #96] @ 4-byte Reload - adcs r9, r4, r0 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r11 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r4, r0, r3 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r5, r0, r7 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r6, r0, r6 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - add r0, sp, #160 - bl .LmulPv384x32(PLT) - add r3, sp, #160 - ldm r3, {r0, r1, r2, r3} - adds r0, r10, r0 - ldr r0, [sp, #156] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #52] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - adcs r12, r0, r2 - ldr r2, [sp, #176] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r3, r9, r3 - str r12, [sp, #52] @ 4-byte Spill - str r3, [sp, #56] @ 4-byte Spill - adcs r7, r0, r2 - ldr r2, [sp, #180] - ldr r0, [sp, #44] @ 4-byte Reload - str r7, [sp, #60] @ 4-byte Spill - adcs r8, r0, r2 - ldr r2, [sp, #184] - ldr r0, [sp, #84] @ 4-byte Reload - str r8, [sp, #64] @ 4-byte Spill - adcs r4, r4, r2 - ldr r2, [sp, #188] - str r4, [sp, #68] @ 4-byte Spill - adcs r5, r5, r2 - ldr r2, [sp, #192] - str r5, [sp, #72] @ 4-byte Spill - adcs r6, r6, r2 - ldr r2, [sp, #196] - str r6, [sp, #76] @ 4-byte Spill - adcs r9, r0, r2 - ldr r2, [sp, #200] - ldr r0, [sp, #96] @ 4-byte Reload - str r9, [sp, #84] @ 4-byte Spill - adcs r10, r0, r2 - ldr r2, [sp, #204] - ldr r0, [sp, #80] @ 4-byte Reload - str r10, [sp, #96] @ 4-byte Spill - adcs lr, r0, r2 - ldr r2, [sp, #208] - ldr r0, [sp, #92] @ 4-byte Reload - str lr, [sp, #156] @ 4-byte Spill - adcs r11, r0, r2 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #136] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - subs r0, r1, r0 - ldr r1, [sp, #140] @ 4-byte Reload - sbcs r1, r12, r1 - sbcs r2, r3, r2 - ldr r3, [sp, #120] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #124] @ 4-byte Reload - sbcs r12, r8, r7 - ldr r7, [sp, #128] @ 4-byte Reload - sbcs r7, r4, r7 - ldr r4, [sp, #132] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #100] @ 4-byte Reload - sbcs r8, r6, r5 - ldr r6, [sp, #104] @ 4-byte Reload - sbcs r5, r9, r6 - ldr r6, [sp, #108] @ 4-byte Reload - str r5, [sp, #144] @ 4-byte Spill - ldr r5, [sp, #92] @ 4-byte Reload - sbcs r9, r10, r6 - ldr r6, [sp, #112] @ 4-byte Reload - sbcs r6, lr, r6 - mov lr, r11 - ldr r11, [sp, #148] @ 4-byte Reload - str r6, [sp, #152] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - sbcs r10, lr, r6 - sbc r6, r5, #0 - ldr r5, [sp, #48] @ 4-byte Reload - ands r6, r6, #1 - movne r0, r5 - str r0, [r11] - ldr r0, [sp, #52] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r1, [r11, #4] - ldr r1, [sp, #156] @ 4-byte Reload - movne r2, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r6, #0 - str r2, [r11, #8] - ldr r2, [sp, #144] @ 4-byte Reload - movne r3, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r3, [r11, #12] - movne r12, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r12, [r11, #16] - movne r7, r0 - ldr r0, [sp, #72] @ 4-byte Reload - cmp r6, #0 - str r7, [r11, #20] - movne r4, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r4, [r11, #24] - movne r8, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r8, [r11, #28] - movne r2, r0 - ldr r0, [sp, #96] @ 4-byte Reload - cmp r6, #0 - movne r10, lr - str r2, [r11, #32] - movne r9, r0 - ldr r0, [sp, #152] @ 4-byte Reload - movne r0, r1 - str r9, [r11, #36] - str r0, [r11, #40] - str r10, [r11, #44] - add sp, sp, #836 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end180: - .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L - .cantunwind - .fnend - - .globl mcl_fp_addPre12L - .align 2 - .type mcl_fp_addPre12L,%function -mcl_fp_addPre12L: @ @mcl_fp_addPre12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - ldm r1, {r3, r12, lr} - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7} - ldr r4, [r2, #16] - ldr r11, [r2] - str r4, [sp] @ 4-byte Spill - ldr r4, [r2, #20] - adds r8, r11, r3 - ldr r3, [r2, #36] - ldr r11, [r2, #32] - adcs r5, r5, r12 - add r12, r1, #16 - adcs r6, r6, lr - add lr, r1, #32 - adcs r7, r7, r9 - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r2, #24] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #40] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #28] - ldr r2, [r2, #44] - str r3, [sp, #20] @ 4-byte Spill - str r4, [sp, #12] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - ldm lr, {r4, r10, lr} - ldr r9, [r1, #44] - ldm r12, {r1, r2, r3, r12} - str r8, [r0] - stmib r0, {r5, r6} - str r7, [r0, #12] - ldr r5, [sp] @ 4-byte Reload - ldr r7, [sp, #24] @ 4-byte Reload - adcs r1, r5, r1 - ldr r5, [sp, #4] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #8] @ 4-byte Reload - adcs r2, r5, r2 - str r2, [r0, #20] - ldr r2, [sp, #12] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - adcs r2, r2, r12 - str r2, [r0, #28] - ldr r2, [sp, #16] @ 4-byte Reload - adcs r1, r11, r4 - add r0, r0, #32 - adcs r2, r2, r10 - adcs r3, r3, lr - adcs r7, r7, r9 - stm r0, {r1, r2, r3, r7} - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #28 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end181: - .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L - .cantunwind - .fnend - - .globl mcl_fp_subPre12L - .align 2 - .type mcl_fp_subPre12L,%function -mcl_fp_subPre12L: @ @mcl_fp_subPre12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #28 - sub sp, sp, #28 - ldmib r2, {r8, r12, lr} - ldr r3, [r2, #16] - ldr r7, [r2] - ldr r6, [r1] - ldr r5, [r1, #4] - ldr r4, [r1, #8] - ldr r11, [r2, #44] - ldr r9, [r1, #32] - ldr r10, [r1, #36] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #20] - subs r6, r6, r7 - ldr r7, [r2, #32] - sbcs r5, r5, r8 - ldr r8, [r1, #40] - sbcs r4, r4, r12 - add r12, r1, #16 - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #24] - str r7, [sp] @ 4-byte Spill - ldr r7, [r2, #36] - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [r2, #28] - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r2, #40] - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [r1, #12] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #44] - sbcs lr, r3, lr - ldm r12, {r1, r2, r3, r12} - str r6, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - ldr r4, [sp, #12] @ 4-byte Reload - ldr r6, [sp, #16] @ 4-byte Reload - str lr, [r0, #12] - sbcs r1, r1, r4 - str r1, [r0, #16] - sbcs r2, r2, r6 - ldr r1, [sp, #20] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - sbcs r1, r3, r1 - ldr r3, [sp, #8] @ 4-byte Reload - str r1, [r0, #24] - sbcs r2, r12, r2 - ldr r1, [sp] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #4] @ 4-byte Reload - add r0, r0, #32 - sbcs r1, r9, r1 - sbcs r2, r10, r2 - sbcs r3, r8, r3 - sbcs r7, r7, r11 - stm r0, {r1, r2, r3, r7} - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #28 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end182: - .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L - .cantunwind - .fnend - - .globl mcl_fp_shr1_12L - .align 2 - .type mcl_fp_shr1_12L,%function -mcl_fp_shr1_12L: @ @mcl_fp_shr1_12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #4 - sub sp, sp, #4 - add r6, r1, #20 - ldr r3, [r1, #8] - ldr r2, [r1, #12] - ldr lr, [r1, #16] - add r11, r1, #32 - ldm r6, {r4, r5, r6} - ldm r1, {r8, r12} - lsr r7, r12, #1 - orr r9, r7, r3, lsl #31 - ldm r11, {r7, r10, r11} - ldr r1, [r1, #44] - str r1, [sp] @ 4-byte Spill - lsr r1, r2, #1 - lsrs r2, r2, #1 - rrx r2, r3 - lsrs r3, r12, #1 - orr r1, r1, lr, lsl #31 - rrx r3, r8 - stm r0, {r3, r9} - str r2, [r0, #8] - str r1, [r0, #12] - lsrs r1, r4, #1 - lsr r2, r10, #1 - rrx r1, lr - orr r2, r2, r11, lsl #31 - str r1, [r0, #16] - lsr r1, r4, #1 - orr r1, r1, r5, lsl #31 - str r1, [r0, #20] - lsrs r1, r6, #1 - rrx r1, r5 - str r1, [r0, #24] - lsr r1, r6, #1 - orr r1, r1, r7, lsl #31 - str r1, [r0, #28] - lsrs r1, r10, #1 - add r0, r0, #32 - rrx r1, r7 - ldr r7, [sp] @ 4-byte Reload - lsrs r3, r7, #1 - lsr r7, r7, #1 - rrx r3, r11 - stm r0, {r1, r2, r3, r7} - add sp, sp, #4 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end183: - .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L - .cantunwind - .fnend - - .globl mcl_fp_add12L - .align 2 - .type mcl_fp_add12L,%function -mcl_fp_add12L: @ @mcl_fp_add12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldm r1, {r12, lr} - ldr r5, [r2] - ldr r8, [r1, #8] - ldr r9, [r1, #12] - ldmib r2, {r4, r6, r7} - ldr r11, [r1, #40] - adds r5, r5, r12 - ldr r12, [r2, #40] - adcs r4, r4, lr - str r5, [sp, #40] @ 4-byte Spill - ldr r5, [r1, #24] - ldr lr, [r1, #32] - adcs r6, r6, r8 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r1, #20] - ldr r8, [r1, #36] - adcs r7, r7, r9 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r1, #16] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #16] - adcs r10, r7, r6 - ldr r6, [r2, #20] - adcs r7, r6, r4 - ldr r4, [r2, #24] - str r7, [sp, #12] @ 4-byte Spill - adcs r7, r4, r5 - ldr r4, [r1, #28] - ldr r5, [r2, #28] - str r7, [sp, #4] @ 4-byte Spill - adcs r6, r5, r4 - ldr r5, [r2, #32] - ldr r4, [r1, #44] - ldr r1, [r2, #36] - ldr r2, [r2, #44] - str r6, [sp, #8] @ 4-byte Spill - adcs r9, r5, lr - ldr lr, [sp, #32] @ 4-byte Reload - adcs r5, r1, r8 - ldr r1, [sp, #40] @ 4-byte Reload - ldr r8, [sp, #12] @ 4-byte Reload - adcs r11, r12, r11 - ldr r12, [sp, #36] @ 4-byte Reload - str r5, [sp, #28] @ 4-byte Spill - adcs r2, r2, r4 - ldr r4, [sp, #16] @ 4-byte Reload - str r2, [sp, #24] @ 4-byte Spill - str r1, [r0] - str r12, [r0, #4] - str lr, [r0, #8] - str r4, [r0, #12] - str r10, [r0, #16] - str r8, [r0, #20] - str r7, [r0, #24] - str r6, [r0, #28] - str r9, [r0, #32] - str r5, [r0, #36] - str r11, [r0, #40] - str r2, [r0, #44] - mov r2, #0 - adc r2, r2, #0 - str r2, [sp, #20] @ 4-byte Spill - ldm r3, {r2, r6, r7} - ldr r5, [r3, #12] - subs r1, r1, r2 - ldr r2, [sp, #4] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - sbcs r1, r12, r6 - str r1, [sp] @ 4-byte Spill - sbcs r1, lr, r7 - str r1, [sp, #36] @ 4-byte Spill - sbcs r1, r4, r5 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r1, r10, r1 - add r10, r3, #36 - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [r3, #20] - sbcs r6, r8, r1 - ldr r1, [r3, #24] - sbcs lr, r2, r1 - ldr r2, [r3, #28] - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r12, r1, r2 - ldr r2, [r3, #32] - ldm r10, {r1, r4, r10} - sbcs r7, r9, r2 - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r2, r2, r1 - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r3, r11, r4 - sbcs r5, r1, r10 - ldr r1, [sp, #20] @ 4-byte Reload - sbc r1, r1, #0 - tst r1, #1 - bne .LBB184_2 -@ BB#1: @ %nocarry - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r0] - ldr r1, [sp] @ 4-byte Reload - str r1, [r0, #4] - ldr r1, [sp, #36] @ 4-byte Reload - str r1, [r0, #8] - ldr r1, [sp, #32] @ 4-byte Reload - str r1, [r0, #12] - ldr r1, [sp, #16] @ 4-byte Reload - str r1, [r0, #16] - str r6, [r0, #20] - str lr, [r0, #24] - str r12, [r0, #28] - str r7, [r0, #32] - add r0, r0, #36 - stm r0, {r2, r3, r5} -.LBB184_2: @ %carry - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end184: - .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L - .cantunwind - .fnend - - .globl mcl_fp_addNF12L - .align 2 - .type mcl_fp_addNF12L,%function -mcl_fp_addNF12L: @ @mcl_fp_addNF12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - ldm r1, {r5, r8, lr} - ldr r6, [r2] - ldr r10, [r1, #12] - ldmib r2, {r4, r7, r9} - ldr r12, [r1, #20] - adds r6, r6, r5 - ldr r5, [r1, #24] - adcs r8, r4, r8 - ldr r4, [r2, #16] - str r6, [sp, #16] @ 4-byte Spill - adcs r7, r7, lr - add lr, r2, #32 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r1, #16] - adcs r6, r9, r10 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r1, #44] - adcs r7, r4, r7 - ldr r4, [r1, #40] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r12 - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r7, r7, r5 - ldr r5, [r2, #28] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r5, r7 - ldr r5, [r1, #36] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r1, #32] - ldm lr, {r1, r12, lr} - ldr r2, [r2, #44] - adcs r1, r1, r7 - str r1, [sp, #20] @ 4-byte Spill - adcs r1, r12, r5 - str r1, [sp, #28] @ 4-byte Spill - adcs r1, lr, r4 - str r1, [sp, #36] @ 4-byte Spill - adc r1, r2, r6 - str r1, [sp, #44] @ 4-byte Spill - ldmib r3, {r1, r2, r6, r11} - ldr r7, [r3, #20] - ldr r4, [r3, #32] - ldr r9, [r3] - ldr r5, [sp, #16] @ 4-byte Reload - ldr lr, [r3, #24] - ldr r10, [r3, #28] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #24] @ 4-byte Reload - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r3, #36] - subs r9, r5, r9 - sbcs r1, r8, r1 - sbcs r2, r7, r2 - ldr r7, [sp, #32] @ 4-byte Reload - str r4, [sp] @ 4-byte Spill - ldr r4, [r3, #40] - sbcs r12, r7, r6 - ldr r7, [r3, #44] - ldr r3, [sp, #40] @ 4-byte Reload - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #12] @ 4-byte Reload - sbcs r3, r3, r11 - sbcs r11, r4, r6 - ldr r4, [sp, #56] @ 4-byte Reload - ldr r6, [sp, #8] @ 4-byte Reload - sbcs lr, r4, lr - ldr r4, [sp, #52] @ 4-byte Reload - sbcs r10, r4, r10 - ldr r4, [sp, #20] @ 4-byte Reload - sbcs r4, r4, r6 - ldr r6, [sp] @ 4-byte Reload - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [sp, #28] @ 4-byte Reload - sbcs r4, r4, r6 - ldr r6, [sp, #36] @ 4-byte Reload - str r4, [sp] @ 4-byte Spill - ldr r4, [sp, #4] @ 4-byte Reload - sbcs r6, r6, r4 - str r6, [sp, #12] @ 4-byte Spill - ldr r6, [sp, #44] @ 4-byte Reload - sbc r6, r6, r7 - asr r7, r6, #31 - cmp r7, #0 - movlt r9, r5 - movlt r1, r8 - str r9, [r0] - str r1, [r0, #4] - ldr r1, [sp, #24] @ 4-byte Reload - movlt r2, r1 - ldr r1, [sp, #32] @ 4-byte Reload - cmp r7, #0 - str r2, [r0, #8] - ldr r2, [sp, #8] @ 4-byte Reload - movlt r12, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r12, [r0, #12] - movlt r3, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r3, [r0, #16] - ldr r3, [sp, #12] @ 4-byte Reload - movlt r11, r1 - ldr r1, [sp, #56] @ 4-byte Reload - cmp r7, #0 - str r11, [r0, #20] - movlt lr, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str lr, [r0, #24] - movlt r10, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r10, [r0, #28] - movlt r2, r1 - ldr r1, [sp, #28] @ 4-byte Reload - cmp r7, #0 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #32] - movlt r7, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r7, [r0, #36] - movlt r3, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r3, [r0, #40] - movlt r6, r1 - str r6, [r0, #44] - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end185: - .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L - .cantunwind - .fnend - - .globl mcl_fp_sub12L - .align 2 - .type mcl_fp_sub12L,%function -mcl_fp_sub12L: @ @mcl_fp_sub12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldr r9, [r2] - ldmib r2, {r8, r12, lr} - ldm r1, {r4, r5, r6, r7} - add r10, r1, #32 - subs r4, r4, r9 - sbcs r5, r5, r8 - str r4, [sp, #48] @ 4-byte Spill - ldr r4, [r2, #24] - sbcs r6, r6, r12 - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [r2, #20] - sbcs r7, r7, lr - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r1, #16] - sbcs r11, r7, r6 - ldr r6, [r1, #20] - str r11, [sp, #28] @ 4-byte Spill - sbcs lr, r6, r5 - ldr r5, [r1, #24] - str lr, [sp, #40] @ 4-byte Spill - sbcs r7, r5, r4 - ldr r4, [r2, #28] - ldr r5, [r1, #28] - str r7, [sp, #44] @ 4-byte Spill - add r7, r2, #32 - sbcs r12, r5, r4 - str r12, [sp, #36] @ 4-byte Spill - ldm r7, {r4, r5, r6, r7} - ldm r10, {r2, r8, r9, r10} - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r4, r2, r4 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [r0] - sbcs r8, r8, r5 - str r4, [sp, #32] @ 4-byte Spill - sbcs r6, r9, r6 - sbcs r7, r10, r7 - ldr r10, [sp, #52] @ 4-byte Reload - str r10, [r0, #4] - str r2, [r0, #8] - ldr r2, [sp, #60] @ 4-byte Reload - str r2, [r0, #12] - ldr r2, [sp, #44] @ 4-byte Reload - str r11, [r0, #16] - str lr, [r0, #20] - str r2, [r0, #24] - str r12, [r0, #28] - str r4, [r0, #32] - mov r4, #0 - str r8, [r0, #36] - str r6, [r0, #40] - str r7, [r0, #44] - sbc r4, r4, #0 - tst r4, #1 - beq .LBB186_2 -@ BB#1: @ %carry - ldr r5, [r3, #32] - ldr r4, [r3, #20] - ldr r12, [r3, #28] - ldr r9, [r3, #4] - ldr lr, [r3, #12] - ldr r11, [r3, #16] - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [r3, #36] - str r4, [sp] @ 4-byte Spill - ldr r4, [r3, #24] - str r12, [sp, #8] @ 4-byte Spill - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [r3, #40] - str r4, [sp, #4] @ 4-byte Spill - str r5, [sp, #20] @ 4-byte Spill - ldr r5, [r3, #44] - str r5, [sp, #24] @ 4-byte Spill - ldr r5, [r3, #8] - ldr r3, [r3] - adds r3, r3, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r4, r9, r10 - adcs r5, r5, r1 - ldr r1, [sp, #60] @ 4-byte Reload - stm r0, {r3, r4, r5} - ldr r3, [sp] @ 4-byte Reload - adcs r1, lr, r1 - str r1, [r0, #12] - ldr r1, [sp, #28] @ 4-byte Reload - adcs r1, r11, r1 - str r1, [r0, #16] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r3, r1 - ldr r3, [sp, #20] @ 4-byte Reload - str r1, [r0, #20] - ldr r1, [sp, #4] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [r0, #28] - ldr r1, [sp, #32] @ 4-byte Reload - add r0, r0, #32 - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - adcs r2, r2, r8 - adcs r3, r3, r6 - ldr r6, [sp, #24] @ 4-byte Reload - adc r7, r6, r7 - stm r0, {r1, r2, r3, r7} -.LBB186_2: @ %nocarry - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end186: - .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L - .cantunwind - .fnend - - .globl mcl_fp_subNF12L - .align 2 - .type mcl_fp_subNF12L,%function -mcl_fp_subNF12L: @ @mcl_fp_subNF12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - mov r12, r0 - ldr r0, [r2, #32] - add r11, r2, #8 - ldr r6, [r2] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r2, #44] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r2, #4] - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r8, r10, r11} - ldr r0, [r2, #20] - ldr lr, [r1, #16] - ldr r7, [r1, #20] - ldr r5, [r1, #24] - ldr r4, [r1, #28] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r2, #24] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r2, #28] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #12] - ldm r1, {r1, r2, r9} - subs r1, r1, r6 - ldr r6, [sp, #36] @ 4-byte Reload - sbcs r2, r2, r6 - sbcs r6, r9, r8 - mov r9, r2 - sbcs r10, r0, r10 - str r6, [sp, #4] @ 4-byte Spill - sbcs r0, lr, r11 - add r11, r3, #8 - ldr lr, [r3, #4] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r5, r0 - ldr r5, [sp, #20] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - sbcs r0, r4, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #24] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - sbc r0, r5, r7 - ldr r7, [r3, #36] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r3, #32] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r3, #40] - str r0, [sp] @ 4-byte Spill - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r3, #44] - str r7, [sp, #20] @ 4-byte Spill - ldm r11, {r7, r8, r11} - ldr r4, [r3, #28] - ldr r5, [r3, #20] - ldr r0, [r3, #24] - ldr r3, [r3] - str r4, [sp, #8] @ 4-byte Spill - mov r4, r1 - adds r1, r4, r3 - ldr r3, [sp, #36] @ 4-byte Reload - adcs r2, r9, lr - adcs lr, r6, r7 - adcs r6, r10, r8 - adcs r7, r3, r11 - ldr r3, [sp, #40] @ 4-byte Reload - adcs r8, r3, r5 - ldr r3, [sp, #44] @ 4-byte Reload - adcs r5, r3, r0 - ldr r3, [sp, #48] @ 4-byte Reload - ldr r0, [sp, #8] @ 4-byte Reload - adcs r11, r3, r0 - ldr r3, [sp, #52] @ 4-byte Reload - ldr r0, [sp] @ 4-byte Reload - adcs r3, r3, r0 - ldr r0, [sp, #12] @ 4-byte Reload - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [sp, #32] @ 4-byte Reload - adcs r3, r3, r0 - ldr r0, [sp, #28] @ 4-byte Reload - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [sp, #16] @ 4-byte Reload - adcs r0, r0, r3 - ldr r3, [sp, #20] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r3, r0, r3 - str r3, [sp, #20] @ 4-byte Spill - asr r3, r0, #31 - ldr r0, [sp, #4] @ 4-byte Reload - cmp r3, #0 - movge r1, r4 - movge r2, r9 - str r1, [r12] - str r2, [r12, #4] - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - movge lr, r0 - ldr r0, [sp, #36] @ 4-byte Reload - cmp r3, #0 - movge r6, r10 - str lr, [r12, #8] - str r6, [r12, #12] - movge r7, r0 - ldr r0, [sp, #40] @ 4-byte Reload - str r7, [r12, #16] - ldr r7, [sp, #24] @ 4-byte Reload - movge r8, r0 - ldr r0, [sp, #44] @ 4-byte Reload - cmp r3, #0 - str r8, [r12, #20] - movge r5, r0 - ldr r0, [sp, #48] @ 4-byte Reload - str r5, [r12, #24] - movge r11, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r11, [r12, #28] - movge r1, r0 - cmp r3, #0 - ldr r3, [sp, #28] @ 4-byte Reload - ldr r0, [sp, #12] @ 4-byte Reload - movge r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [r12, #32] - add r1, r12, #36 - movge r2, r3 - ldr r3, [sp, #20] @ 4-byte Reload - movge r3, r7 - stm r1, {r0, r2, r3} - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end187: - .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L - .cantunwind - .fnend - - .globl mcl_fpDbl_add12L - .align 2 - .type mcl_fpDbl_add12L,%function -mcl_fpDbl_add12L: @ @mcl_fpDbl_add12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #136 - sub sp, sp, #136 - ldm r1, {r7, r8, r12, lr} - ldm r2, {r4, r5, r6, r9} - ldr r10, [r2, #20] - adds r4, r4, r7 - str r4, [sp, #80] @ 4-byte Spill - ldr r4, [r2, #64] - str r4, [sp, #108] @ 4-byte Spill - ldr r4, [r2, #68] - str r4, [sp, #112] @ 4-byte Spill - ldr r4, [r2, #72] - str r4, [sp, #116] @ 4-byte Spill - ldr r4, [r2, #76] - str r4, [sp, #120] @ 4-byte Spill - ldr r4, [r2, #80] - str r4, [sp, #124] @ 4-byte Spill - ldr r4, [r2, #84] - str r4, [sp, #128] @ 4-byte Spill - ldr r4, [r2, #88] - str r4, [sp, #132] @ 4-byte Spill - ldr r4, [r2, #92] - str r4, [sp, #76] @ 4-byte Spill - adcs r4, r5, r8 - adcs r7, r6, r12 - ldr r6, [r2, #16] - str r4, [sp, #28] @ 4-byte Spill - str r7, [sp, #24] @ 4-byte Spill - adcs r7, r9, lr - add r9, r1, #32 - add lr, r1, #16 - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r1, #64] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #68] - str r7, [sp, #16] @ 4-byte Spill - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #68] @ 4-byte Spill - ldm r9, {r4, r5, r8, r9} - ldr r2, [r1, #48] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #12] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #80] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - adcs r1, r6, r1 - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #24] @ 4-byte Reload - ldr r6, [sp, #32] @ 4-byte Reload - adcs r2, r10, r2 - ldr r10, [r3] - str r7, [r0, #8] - str r6, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - adcs r1, r1, r12 - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - ldr r2, [sp, #72] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [r0, #32] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r2, r2, r5 - ldr r5, [r3, #12] - str r2, [r0, #36] - ldr r2, [sp, #88] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [r0, #40] - ldr r1, [sp, #92] @ 4-byte Reload - adcs r2, r2, r9 - str r2, [r0, #44] - ldr r2, [sp, #4] @ 4-byte Reload - adcs r12, r1, r7 - ldr r1, [sp, #96] @ 4-byte Reload - str r12, [sp, #80] @ 4-byte Spill - adcs r8, r1, r2 - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r8, [sp, #88] @ 4-byte Spill - adcs lr, r1, r2 - ldr r1, [sp, #104] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str lr, [sp, #92] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r4, [sp, #104] @ 4-byte Spill - adcs r9, r1, r2 - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r9, [sp, #96] @ 4-byte Spill - adcs r11, r1, r2 - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [sp, #48] @ 4-byte Reload - str r11, [sp, #108] @ 4-byte Spill - adcs r6, r1, r2 - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - str r6, [sp, #112] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [sp, #56] @ 4-byte Reload - str r7, [sp, #116] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #132] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [r3, #8] - str r1, [sp, #132] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - subs r10, r12, r10 - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [r3, #4] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [r3, #16] - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [r3, #20] - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [r3, #24] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [r3, #28] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r1, r8, r1 - ldr r8, [r3, #40] - sbcs r2, lr, r2 - ldr lr, [r3, #32] - sbcs r12, r4, r5 - ldr r4, [r3, #36] - ldr r3, [r3, #44] - ldr r5, [sp, #72] @ 4-byte Reload - str r3, [sp, #64] @ 4-byte Spill - ldr r3, [sp, #68] @ 4-byte Reload - sbcs r3, r9, r3 - sbcs r9, r11, r5 - ldr r5, [sp, #76] @ 4-byte Reload - sbcs r5, r6, r5 - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r6, r7, r6 - ldr r7, [sp, #124] @ 4-byte Reload - sbcs r11, r7, lr - ldr r7, [sp, #120] @ 4-byte Reload - sbcs lr, r7, r4 - ldr r7, [sp, #128] @ 4-byte Reload - ldr r4, [sp, #64] @ 4-byte Reload - sbcs r8, r7, r8 - ldr r7, [sp, #132] @ 4-byte Reload - sbcs r4, r7, r4 - ldr r7, [sp, #100] @ 4-byte Reload - str r4, [sp, #84] @ 4-byte Spill - ldr r4, [sp, #80] @ 4-byte Reload - sbc r7, r7, #0 - ands r7, r7, #1 - movne r10, r4 - ldr r4, [sp, #88] @ 4-byte Reload - str r10, [r0, #48] - movne r1, r4 - str r1, [r0, #52] - ldr r1, [sp, #92] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #104] @ 4-byte Reload - cmp r7, #0 - str r2, [r0, #56] - ldr r2, [sp, #84] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r12, [r0, #60] - movne r3, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r3, [r0, #64] - movne r9, r1 - ldr r1, [sp, #112] @ 4-byte Reload - cmp r7, #0 - str r9, [r0, #68] - movne r5, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r5, [r0, #72] - movne r6, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r6, [r0, #76] - movne r11, r1 - ldr r1, [sp, #120] @ 4-byte Reload - cmp r7, #0 - str r11, [r0, #80] - movne lr, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str lr, [r0, #84] - movne r8, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r8, [r0, #88] - movne r2, r1 - str r2, [r0, #92] - add sp, sp, #136 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end188: - .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub12L - .align 2 - .type mcl_fpDbl_sub12L,%function -mcl_fpDbl_sub12L: @ @mcl_fpDbl_sub12L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #136 - sub sp, sp, #136 - ldr r7, [r2, #64] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2] - ldmib r2, {r6, r9} - ldr r5, [r1] - ldr r8, [r2, #12] - ldmib r1, {r4, lr} - ldr r12, [r1, #12] - ldr r10, [r2, #20] - subs r5, r5, r7 - sbcs r4, r4, r6 - str r5, [sp, #32] @ 4-byte Spill - ldr r5, [r2, #36] - ldr r6, [r2, #16] - sbcs r7, lr, r9 - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [r2, #32] - add r9, r1, #32 - add lr, r1, #16 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r2, #28] - str r5, [sp, #44] @ 4-byte Spill - str r4, [sp, #40] @ 4-byte Spill - str r7, [sp, #36] @ 4-byte Spill - sbcs r7, r12, r8 - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r2, [r1, #64] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #68] - str r7, [sp, #28] @ 4-byte Spill - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #76] @ 4-byte Spill - ldm r9, {r4, r5, r8, r9} - ldr r2, [r1, #48] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #20] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #24] @ 4-byte Reload - sbcs r1, r1, r6 - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #12] @ 4-byte Reload - ldr r6, [sp, #8] @ 4-byte Reload - sbcs r2, r2, r10 - str r7, [r0, #8] - str r6, [r0, #12] - str r1, [r0, #16] - ldr r1, [sp, #28] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #36] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - ldr r2, [sp, #44] @ 4-byte Reload - sbcs r1, r4, r1 - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [r0, #36] - ldr r2, [sp, #84] @ 4-byte Reload - sbcs r1, r8, r1 - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r2, r9, r2 - str r2, [r0, #44] - ldr r2, [sp, #4] @ 4-byte Reload - sbcs r9, r7, r1 - ldr r1, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #16] @ 4-byte Reload - str r9, [sp, #40] @ 4-byte Spill - sbcs lr, r2, r1 - ldr r2, [sp, #96] @ 4-byte Reload - mov r1, #0 - str lr, [sp, #44] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #20] @ 4-byte Reload - str r2, [sp, #92] @ 4-byte Spill - ldr r2, [sp, #100] @ 4-byte Reload - sbcs r4, r7, r2 - ldr r2, [sp, #128] @ 4-byte Reload - ldr r7, [sp, #48] @ 4-byte Reload - str r4, [sp, #88] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #52] @ 4-byte Reload - str r2, [sp, #128] @ 4-byte Spill - ldr r2, [sp, #104] @ 4-byte Reload - sbcs r5, r7, r2 - ldr r2, [sp, #132] @ 4-byte Reload - ldr r7, [sp, #56] @ 4-byte Reload - str r5, [sp, #96] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #60] @ 4-byte Reload - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [sp, #108] @ 4-byte Reload - sbcs r8, r7, r2 - ldr r2, [sp, #112] @ 4-byte Reload - ldr r7, [sp, #64] @ 4-byte Reload - str r8, [sp, #104] @ 4-byte Spill - sbcs r10, r7, r2 - ldr r2, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - str r10, [sp, #108] @ 4-byte Spill - sbcs r6, r7, r2 - ldr r2, [sp, #124] @ 4-byte Reload - ldr r7, [sp, #72] @ 4-byte Reload - str r6, [sp, #112] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #76] @ 4-byte Reload - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [sp, #120] @ 4-byte Reload - sbcs r2, r7, r2 - sbc r1, r1, #0 - str r2, [sp, #120] @ 4-byte Spill - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #116] @ 4-byte Spill - ldmib r3, {r1, r2, r12} - ldr r7, [r3, #16] - ldr r11, [r3, #20] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r3, #24] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r3, #28] - ldr r3, [r3] - adds r3, r9, r3 - ldr r9, [sp, #92] @ 4-byte Reload - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - adcs r1, lr, r1 - ldr lr, [sp, #128] @ 4-byte Reload - adcs r2, r9, r2 - adcs r12, r4, r12 - ldr r4, [sp, #64] @ 4-byte Reload - adcs lr, lr, r4 - adcs r4, r5, r11 - ldr r5, [sp, #132] @ 4-byte Reload - ldr r11, [sp, #116] @ 4-byte Reload - adcs r5, r5, r7 - ldr r7, [sp, #68] @ 4-byte Reload - adcs r8, r8, r7 - ldr r7, [sp, #76] @ 4-byte Reload - adcs r10, r10, r7 - ldr r7, [sp, #80] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #84] @ 4-byte Reload - str r6, [sp, #80] @ 4-byte Spill - ldr r6, [sp, #124] @ 4-byte Reload - adcs r6, r6, r7 - ldr r7, [sp, #40] @ 4-byte Reload - str r6, [sp, #84] @ 4-byte Spill - ldr r6, [sp, #120] @ 4-byte Reload - adc r6, r6, r11 - str r6, [sp, #116] @ 4-byte Spill - ldr r6, [sp, #100] @ 4-byte Reload - ands r6, r6, #1 - moveq r3, r7 - moveq r2, r9 - str r3, [r0, #48] - ldr r3, [sp, #44] @ 4-byte Reload - moveq r1, r3 - cmp r6, #0 - str r1, [r0, #52] - ldr r1, [sp, #88] @ 4-byte Reload - str r2, [r0, #56] - ldr r2, [sp, #80] @ 4-byte Reload - moveq r12, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r12, [r0, #60] - moveq lr, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str lr, [r0, #64] - moveq r4, r1 - ldr r1, [sp, #132] @ 4-byte Reload - cmp r6, #0 - str r4, [r0, #68] - moveq r5, r1 - ldr r1, [sp, #104] @ 4-byte Reload - str r5, [r0, #72] - moveq r8, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r8, [r0, #76] - moveq r10, r1 - ldr r1, [sp, #112] @ 4-byte Reload - cmp r6, #0 - str r10, [r0, #80] - moveq r2, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r2, [r0, #84] - ldr r2, [sp, #84] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #120] @ 4-byte Reload - str r2, [r0, #88] - ldr r2, [sp, #116] @ 4-byte Reload - moveq r2, r1 - str r2, [r0, #92] - add sp, sp, #136 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end189: - .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L - .cantunwind - .fnend - - .align 2 - .type .LmulPv416x32,%function -.LmulPv416x32: @ @mulPv416x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r3, [r1, #44] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #44] - ldr r1, [r1, #48] - umull r3, r7, r1, r2 - adcs r1, r5, r3 - str r1, [r0, #48] - adc r1, r7, #0 - str r1, [r0, #52] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end190: - .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre13L - .align 2 - .type mcl_fp_mulUnitPre13L,%function -mcl_fp_mulUnitPre13L: @ @mcl_fp_mulUnitPre13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - mov r4, r0 - add r0, sp, #8 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #60] - add r12, sp, #12 - ldr lr, [sp, #56] - ldr r8, [sp, #52] - ldr r9, [sp, #48] - ldr r10, [sp, #44] - ldr r11, [sp, #40] - ldr r5, [sp, #36] - ldr r6, [sp, #32] - ldr r7, [sp, #28] - ldr r3, [sp, #8] - str r0, [sp, #4] @ 4-byte Spill - ldm r12, {r0, r1, r2, r12} - str r3, [r4] - stmib r4, {r0, r1, r2, r12} - str r7, [r4, #20] - str r6, [r4, #24] - str r5, [r4, #28] - str r11, [r4, #32] - str r10, [r4, #36] - str r9, [r4, #40] - str r8, [r4, #44] - str lr, [r4, #48] - ldr r0, [sp, #4] @ 4-byte Reload - str r0, [r4, #52] - add sp, sp, #68 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end191: - .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre13L - .align 2 - .type mcl_fpDbl_mulPre13L,%function -mcl_fpDbl_mulPre13L: @ @mcl_fpDbl_mulPre13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #820 - sub sp, sp, #820 - mov r7, r2 - mov r4, r0 - add r0, sp, #760 - str r1, [sp, #84] @ 4-byte Spill - mov r5, r1 - ldr r2, [r7] - str r7, [sp, #80] @ 4-byte Spill - str r4, [sp, #76] @ 4-byte Spill - bl .LmulPv416x32(PLT) - ldr r0, [sp, #812] - ldr r1, [sp, #764] - ldr r2, [r7, #4] - mov r6, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #808] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #768] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #804] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #772] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #800] - str r1, [sp, #20] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #760] - str r0, [r4] - add r0, sp, #704 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #756] - add r10, sp, #728 - add lr, sp, #704 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #744] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #24] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #8] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #648 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #700] - add lr, sp, #676 - add r9, sp, #656 - ldr r11, [sp, #692] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r5, r7, r12, lr} - ldr r8, [sp, #648] - ldr r10, [sp, #652] - ldm r9, {r0, r1, r2, r3, r9} - ldr r6, [sp, #24] @ 4-byte Reload - adds r6, r8, r6 - str r6, [r4, #8] - mov r6, r4 - ldr r4, [sp, #40] @ 4-byte Reload - adcs r4, r10, r4 - str r4, [sp, #24] @ 4-byte Spill - ldr r4, [sp, #36] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #84] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #80] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - ldr r2, [r5, #12] - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #592 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #644] - add lr, sp, #612 - add r7, sp, #600 - ldr r8, [sp, #628] - ldr r11, [sp, #624] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #640] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #636] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #632] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r0, [sp, #592] - ldr r9, [sp, #596] - ldm r7, {r1, r2, r7} - ldr r10, [sp, #24] @ 4-byte Reload - adds r0, r0, r10 - str r0, [r6, #12] - ldr r0, [sp, #40] @ 4-byte Reload - adcs r6, r9, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #536 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #588] - ldr r8, [sp, #536] - add r4, sp, #540 - ldr r11, [sp, #580] - ldr r9, [sp, #576] - ldr lr, [sp, #572] - ldr r5, [sp, #568] - ldr r10, [sp, #564] - ldr r12, [sp, #560] - ldr r3, [sp, #556] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #584] - adds r6, r8, r6 - str r0, [sp, #16] @ 4-byte Spill - ldm r4, {r0, r1, r2, r4} - ldr r7, [sp, #76] @ 4-byte Reload - str r6, [r7, #16] - ldr r6, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #80] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - ldr r2, [r4, #20] - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #84] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #480 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #532] - add r10, sp, #480 - add r12, sp, #492 - ldr r6, [sp, #516] - ldr r11, [sp, #512] - ldr lr, [sp, #508] - ldr r9, [sp, #504] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #528] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #524] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #520] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r0, r1, r10} - ldm r12, {r2, r3, r12} - ldr r8, [sp, #24] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r7, #20] - ldr r0, [sp, #44] @ 4-byte Reload - mov r7, r5 - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #24] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #424 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #476] - add r5, sp, #428 - ldr r11, [sp, #464] - ldr r9, [sp, #460] - ldr lr, [sp, #456] - ldr r10, [sp, #452] - ldr r12, [sp, #448] - ldr r3, [sp, #444] - ldr r8, [sp, #424] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #12] @ 4-byte Spill - ldm r5, {r0, r1, r2, r5} - ldr r4, [sp, #24] @ 4-byte Reload - adds r6, r8, r4 - ldr r4, [sp, #76] @ 4-byte Reload - str r6, [r4, #24] - ldr r6, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #80] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - ldr r2, [r5, #28] - adcs r0, r3, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #368 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #420] - add r12, sp, #388 - add r10, sp, #368 - ldr lr, [sp, #408] - ldr r6, [sp, #404] - ldr r11, [sp, #400] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #12] @ 4-byte Spill - ldm r12, {r3, r9, r12} - ldr r7, [sp, #384] - ldm r10, {r0, r1, r10} - ldr r8, [sp, #24] @ 4-byte Reload - ldr r2, [sp, #380] - adds r0, r0, r8 - str r0, [r4, #28] - ldr r0, [sp, #52] @ 4-byte Reload - ldr r4, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #312 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #364] - add r11, sp, #344 - add lr, sp, #316 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #360] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #356] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r7, r9, r11} - ldr r10, [sp, #340] - ldr r8, [sp, #312] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r5, [sp, #24] @ 4-byte Reload - adds r6, r8, r5 - ldr r5, [sp, #76] @ 4-byte Reload - str r6, [r5, #32] - ldr r6, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #80] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r6, #36] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #256 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #308] - add lr, sp, #288 - add r12, sp, #268 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #304] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #300] - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r7, r8, lr} - ldr r11, [sp, #284] - ldr r1, [sp, #256] - ldr r0, [sp, #260] - ldr r10, [sp, #264] - ldm r12, {r2, r3, r9, r12} - ldr r4, [sp, #24] @ 4-byte Reload - adds r1, r1, r4 - str r1, [r5, #36] - ldr r1, [sp, #60] @ 4-byte Reload - ldr r5, [sp, #84] @ 4-byte Reload - adcs r4, r0, r1 - ldr r0, [sp, #64] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r6, #40] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #200 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #252] - add r11, sp, #228 - add lr, sp, #204 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #244] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r7, r8, r10, r11} - ldr r9, [sp, #200] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r6, r9, r4 - ldr r4, [sp, #76] @ 4-byte Reload - str r6, [r4, #40] - ldr r6, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #80] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r6, #44] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #144 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #196] - add r11, sp, #164 - add r12, sp, #152 - ldr lr, [sp, #184] - ldr r7, [sp, #180] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r8, r10, r11} - ldr r2, [sp, #144] - ldr r1, [sp, #148] - ldm r12, {r0, r3, r12} - ldr r9, [sp, #24] @ 4-byte Reload - adds r2, r2, r9 - str r2, [r4, #44] - ldr r2, [r6, #48] - ldr r6, [sp, #20] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #72] @ 4-byte Reload - adcs r9, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #88 - bl .LmulPv416x32(PLT) - add r3, sp, #88 - add r11, sp, #104 - ldm r3, {r0, r1, r2, r3} - adds r12, r0, r6 - ldr r0, [sp, #20] @ 4-byte Reload - adcs lr, r1, r9 - adcs r5, r2, r0 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r6, r3, r0 - ldr r0, [sp, #140] - str r0, [sp, #84] @ 4-byte Spill - ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11} - str r12, [r4, #48] - str lr, [r4, #52] - str r5, [r4, #56] - ldr r5, [sp, #24] @ 4-byte Reload - str r6, [r4, #60] - ldr r6, [sp, #28] @ 4-byte Reload - add r12, r4, #80 - adcs r0, r0, r5 - adcs r1, r1, r6 - str r0, [r4, #64] - ldr r0, [sp, #52] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - str r1, [r4, #68] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #72] @ 4-byte Reload - adcs r1, r3, r1 - str r0, [r4, #72] - ldr r0, [sp, #60] @ 4-byte Reload - ldr r3, [sp, #68] @ 4-byte Reload - str r1, [r4, #76] - ldr r1, [sp, #80] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #64] @ 4-byte Reload - adcs r1, r8, r1 - adcs r2, r9, r2 - adcs r3, r10, r3 - adcs r7, r11, r7 - adc r6, r6, #0 - stm r12, {r0, r1, r2, r3, r7} - str r6, [r4, #100] - add sp, sp, #820 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end192: - .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre13L - .align 2 - .type mcl_fpDbl_sqrPre13L,%function -mcl_fpDbl_sqrPre13L: @ @mcl_fpDbl_sqrPre13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #820 - sub sp, sp, #820 - mov r5, r1 - mov r4, r0 - add r0, sp, #760 - ldr r2, [r5] - bl .LmulPv416x32(PLT) - ldr r0, [sp, #812] - ldr r1, [sp, #764] - ldr r2, [r5, #4] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #808] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #768] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #804] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #772] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #800] - str r1, [sp, #32] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #760] - str r0, [r4] - add r0, sp, #704 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #756] - add r10, sp, #728 - add lr, sp, #704 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #36] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #8] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #648 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #700] - add lr, sp, #680 - add r11, sp, #656 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r6, r12, lr} - ldr r8, [sp, #648] - ldr r10, [sp, #652] - ldm r11, {r0, r1, r2, r3, r9, r11} - ldr r7, [sp, #36] @ 4-byte Reload - adds r7, r8, r7 - str r7, [r4, #8] - ldr r7, [sp, #52] @ 4-byte Reload - adcs r7, r10, r7 - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #12] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #592 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #644] - add r9, sp, #620 - add lr, sp, #600 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #640] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #636] - str r0, [sp, #24] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r0, [sp, #592] - ldr r11, [sp, #596] - ldm lr, {r1, r2, r3, r12, lr} - ldr r10, [sp, #36] @ 4-byte Reload - adds r0, r0, r10 - str r0, [r4, #12] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #536 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #588] - add r12, sp, #540 - ldr r11, [sp, #576] - ldr lr, [sp, #572] - ldr r6, [sp, #568] - ldr r8, [sp, #536] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #24] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r9, r10, r12} - ldr r7, [sp, #36] @ 4-byte Reload - adds r7, r8, r7 - str r7, [r4, #16] - ldr r7, [sp, #52] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #20] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #480 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #532] - add r10, sp, #512 - add lr, sp, #484 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #528] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #524] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r6, r8, r10} - ldr r9, [sp, #480] - ldr r11, [sp, #508] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r9, r7 - str r7, [r4, #20] - ldr r7, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #24] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #424 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #476] - add r8, sp, #456 - add r12, sp, #432 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #24] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldr lr, [sp, #452] - ldr r10, [sp, #448] - ldr r0, [sp, #424] - ldr r11, [sp, #428] - ldm r12, {r1, r2, r3, r12} - ldr r9, [sp, #36] @ 4-byte Reload - adds r0, r0, r9 - str r0, [r4, #24] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #28] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #368 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #420] - add r11, sp, #400 - add lr, sp, #372 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r6, r8, r11} - ldr r10, [sp, #368] - ldm lr, {r0, r1, r2, r3, r9, r12, lr} - ldr r7, [sp, #36] @ 4-byte Reload - adds r7, r10, r7 - str r7, [r4, #28] - ldr r7, [sp, #64] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #312 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #364] - add r10, sp, #344 - add lr, sp, #316 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #360] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #356] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r6, r8, r10} - ldr r9, [sp, #312] - ldr r11, [sp, #340] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r9, r7 - str r7, [r4, #32] - ldr r7, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #36] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #256 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #308] - add r8, sp, #288 - add r12, sp, #264 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #304] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #300] - str r0, [sp, #20] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldr lr, [sp, #284] - ldr r10, [sp, #280] - ldr r0, [sp, #256] - ldr r11, [sp, #260] - ldm r12, {r1, r2, r3, r12} - ldr r9, [sp, #36] @ 4-byte Reload - adds r0, r0, r9 - str r0, [r4, #36] - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #40] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #200 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #252] - add r10, sp, #228 - add r12, sp, #200 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #244] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r10} - ldr lr, [sp, #224] - ldr r9, [sp, #220] - ldm r12, {r0, r1, r2, r3, r12} - ldr r11, [sp, #32] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #40] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #44] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #144 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #196] - add r12, sp, #148 - ldr r7, [sp, #180] - ldr r11, [sp, #176] - ldr r8, [sp, #172] - ldr lr, [sp, #168] - ldr r10, [sp, #164] - ldr r2, [sp, #144] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #184] - str r0, [sp, #16] @ 4-byte Spill - ldm r12, {r0, r1, r3, r12} - ldr r6, [sp, #32] @ 4-byte Reload - adds r2, r2, r6 - ldr r6, [sp, #84] @ 4-byte Reload - str r2, [r4, #44] - ldr r2, [r5, #48] - adcs r6, r0, r6 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r9, r1, r0 - ldr r0, [sp, #76] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #88 - bl .LmulPv416x32(PLT) - add r3, sp, #88 - add r11, sp, #104 - ldm r3, {r0, r1, r2, r3} - adds r12, r0, r6 - ldr r0, [sp, #12] @ 4-byte Reload - adcs lr, r1, r9 - adcs r5, r2, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r6, r3, r0 - ldr r0, [sp, #140] - str r0, [sp, #56] @ 4-byte Spill - ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11} - str r12, [r4, #48] - str lr, [r4, #52] - str r5, [r4, #56] - ldr r5, [sp, #32] @ 4-byte Reload - str r6, [r4, #60] - ldr r6, [sp, #36] @ 4-byte Reload - add r12, r4, #80 - adcs r0, r0, r5 - adcs r1, r1, r6 - str r0, [r4, #64] - ldr r0, [sp, #60] @ 4-byte Reload - ldr r6, [sp, #56] @ 4-byte Reload - str r1, [r4, #68] - ldr r1, [sp, #64] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #80] @ 4-byte Reload - adcs r1, r3, r1 - str r0, [r4, #72] - ldr r0, [sp, #68] @ 4-byte Reload - ldr r3, [sp, #76] @ 4-byte Reload - str r1, [r4, #76] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #72] @ 4-byte Reload - adcs r1, r8, r1 - adcs r2, r9, r2 - adcs r3, r10, r3 - adcs r7, r11, r7 - adc r6, r6, #0 - stm r12, {r0, r1, r2, r3, r7} - str r6, [r4, #100] - add sp, sp, #820 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end193: - .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L - .cantunwind - .fnend - - .globl mcl_fp_mont13L - .align 2 - .type mcl_fp_mont13L,%function -mcl_fp_mont13L: @ @mcl_fp_mont13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #548 - sub sp, sp, #548 - .pad #1024 - sub sp, sp, #1024 - add r12, sp, #100 - add r6, sp, #1024 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #68] @ 4-byte Spill - add r0, r6, #488 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #96] @ 4-byte Spill - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1516] - ldr r7, [sp, #1512] - mov r1, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1520] - mul r2, r7, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1524] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1556] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1552] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1548] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1544] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1540] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1536] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1532] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1528] - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #1456 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1508] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r5, [sp, #1480] - ldr r10, [sp, #1476] - ldr r11, [sp, #1472] - ldr r6, [sp, #1456] - ldr r9, [sp, #1460] - ldr r8, [sp, #1464] - ldr r4, [sp, #1468] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1504] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1500] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1496] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #376 - bl .LmulPv416x32(PLT) - adds r0, r6, r7 - ldr r1, [sp, #36] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - ldr r3, [sp, #1416] - ldr r12, [sp, #1420] - ldr lr, [sp, #1424] - ldr r6, [sp, #1432] - ldr r7, [sp, #1436] - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #1444] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #1440] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1428] - adcs r1, r11, r1 - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - ldr r11, [sp, #72] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r10, r1 - ldr r10, [sp, #1448] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r5, r1 - ldr r5, [sp, #1400] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #1412] - adc r0, r0, #0 - adds r11, r11, r5 - ldr r5, [sp, #64] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #1408] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1404] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1344 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1396] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1368] - ldr r9, [sp, #1364] - ldr r10, [sp, #1360] - ldr r11, [sp, #1344] - ldr r6, [sp, #1348] - ldr r7, [sp, #1352] - ldr r4, [sp, #1356] - add lr, sp, #1024 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1392] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1388] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1384] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1380] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1376] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1372] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #264 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #1288] - ldr r2, [sp, #1300] - ldr r3, [sp, #1304] - ldr r12, [sp, #1308] - ldr lr, [sp, #1312] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1320] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - ldr r11, [sp, #92] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1324] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1316] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1336] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1332] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1328] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1296] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1292] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1232 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1284] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1256] - ldr r9, [sp, #1252] - ldr r10, [sp, #1248] - ldr r11, [sp, #1232] - ldr r6, [sp, #1236] - ldr r7, [sp, #1240] - ldr r4, [sp, #1244] - add lr, sp, #1024 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #152 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #1176] - ldr r2, [sp, #1188] - ldr r3, [sp, #1192] - ldr r12, [sp, #1196] - ldr lr, [sp, #1200] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1208] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - ldr r11, [sp, #92] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1212] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1204] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1224] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1220] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1216] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1184] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1228] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1180] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1120 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1172] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1144] - ldr r9, [sp, #1140] - ldr r10, [sp, #1136] - ldr r11, [sp, #1120] - ldr r6, [sp, #1124] - ldr r7, [sp, #1128] - ldr r4, [sp, #1132] - add lr, sp, #1024 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1168] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1164] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1160] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #40 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #1064] - ldr r2, [sp, #1076] - ldr r3, [sp, #1080] - ldr r12, [sp, #1084] - ldr lr, [sp, #1088] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1096] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - ldr r11, [sp, #92] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1100] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1092] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1112] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1108] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1104] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1072] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1068] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1008 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1060] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1032] - ldr r9, [sp, #1028] - ldr r10, [sp, #1024] - ldr r11, [sp, #1008] - ldr r6, [sp, #1012] - ldr r7, [sp, #1016] - ldr r4, [sp, #1020] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #952 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #956 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #980 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #952] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #896 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #948] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #920] - ldr r9, [sp, #916] - ldr r10, [sp, #912] - ldr r11, [sp, #896] - ldr r6, [sp, #900] - ldr r7, [sp, #904] - ldr r4, [sp, #908] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #840 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #844 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #868 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #840] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #784 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #836] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #808] - ldr r9, [sp, #804] - ldr r10, [sp, #800] - ldr r11, [sp, #784] - ldr r6, [sp, #788] - ldr r7, [sp, #792] - ldr r4, [sp, #796] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #832] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #828] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #728 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #732 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #756 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #728] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #672 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #724] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #696] - ldr r9, [sp, #692] - ldr r10, [sp, #688] - ldr r11, [sp, #672] - ldr r6, [sp, #676] - ldr r7, [sp, #680] - ldr r4, [sp, #684] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #720] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #716] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #616 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #620 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #644 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #616] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #560 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #612] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #584] - ldr r9, [sp, #580] - ldr r10, [sp, #576] - ldr r11, [sp, #560] - ldr r6, [sp, #564] - ldr r7, [sp, #568] - ldr r4, [sp, #572] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #504 - bl .LmulPv416x32(PLT) - adds r0, r5, r11 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #508 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #532 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r6, r8, r9, r10} - ldr r5, [sp, #504] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #88] @ 4-byte Reload - adds r5, r11, r5 - adcs r0, r7, r0 - str r5, [sp, #20] @ 4-byte Spill - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #96] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mul r2, r5, r8 - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #448 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #500] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r5, [sp, #472] - ldr r9, [sp, #468] - ldr r10, [sp, #464] - ldr r11, [sp, #448] - ldr r6, [sp, #452] - ldr r7, [sp, #456] - ldr r4, [sp, #460] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #492] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #488] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #484] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #392 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - add lr, sp, #408 - adds r0, r0, r11 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - add r6, sp, #392 - adcs r11, r1, r7 - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #432 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #36] @ 4-byte Spill - ldm r6, {r2, r5, r6} - ldr r4, [sp, #404] - adds r0, r0, r2 - mul r1, r0, r8 - adcs r5, r11, r5 - str r0, [sp, #92] @ 4-byte Spill - str r1, [sp, #28] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - str r5, [sp, #88] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r5, r5, r6 - str r5, [sp, #84] @ 4-byte Spill - ldr r5, [sp, #80] @ 4-byte Reload - adcs r4, r5, r4 - str r4, [sp, #80] @ 4-byte Spill - ldr r4, [sp, #76] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #336 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #388] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r6, [sp, #364] - ldr r8, [sp, #360] - ldr r9, [sp, #356] - ldr r10, [sp, #352] - ldr r7, [sp, #336] - ldr r4, [sp, #340] - ldr r11, [sp, #344] - ldr r5, [sp, #348] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #380] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #376] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #280 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #296 - adds r0, r0, r7 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #292] - adcs r11, r1, r11 - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #288] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #320 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #284] - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #280] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #28] @ 4-byte Spill - adds r1, r0, r2 - ldr r0, [sp, #96] @ 4-byte Reload - adcs r6, r11, r6 - str r1, [sp, #92] @ 4-byte Spill - mul r2, r1, r0 - str r2, [sp, #24] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - str r6, [sp, #40] @ 4-byte Spill - ldr r6, [sp, #88] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r4, r5, r4 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #224 - bl .LmulPv416x32(PLT) - ldr r1, [sp, #276] - add r11, sp, #224 - ldr r4, [sp, #252] - ldr r8, [sp, #248] - ldr r9, [sp, #244] - ldr r10, [sp, #240] - add r0, sp, #168 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #272] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #268] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #264] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #260] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #256] - str r1, [sp, #8] @ 4-byte Spill - ldm r11, {r6, r7, r11} - ldr r1, [sp, #104] @ 4-byte Reload - ldr r5, [sp, #236] - ldr r2, [r1, #48] - ldr r1, [sp, #100] @ 4-byte Reload - bl .LmulPv416x32(PLT) - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #36] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #184 - adds r0, r0, r6 - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #168 - adcs r1, r1, r11 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #208 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #48] @ 4-byte Spill - ldm r7, {r2, r6, r7} - ldr r5, [sp, #180] - adds r4, r0, r2 - ldr r0, [sp, #96] @ 4-byte Reload - mul r1, r4, r0 - ldr r0, [sp, #220] - str r1, [sp, #44] @ 4-byte Spill - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #104] @ 4-byte Reload - adcs r11, r11, r6 - ldr r6, [sp, #100] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #36] @ 4-byte Spill - ldr r6, [sp, #92] @ 4-byte Reload - adcs r5, r6, r5 - ldr r6, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r9, r0, r9 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #108] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r6, r0, r1 - mov r0, #0 - mov r1, r10 - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #112 - bl .LmulPv416x32(PLT) - add r3, sp, #112 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - ldr r0, [sp, #36] @ 4-byte Reload - adcs r7, r11, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r7, [sp, #48] @ 4-byte Spill - adcs lr, r0, r2 - ldr r0, [sp, #128] - adcs r12, r5, r3 - str lr, [sp, #52] @ 4-byte Spill - str r12, [sp, #56] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #132] - ldr r1, [sp, #84] @ 4-byte Reload - str r4, [sp, #60] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #136] - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #140] - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #148] - adcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #156] - adcs r0, r9, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #160] - adcs r0, r1, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - mov r0, r10 - ldmib r0, {r1, r2, r3, r5} - ldr r6, [r0] - ldr r10, [r0, #20] - ldr r11, [r0, #28] - str r5, [sp, #40] @ 4-byte Spill - ldr r5, [r0, #24] - subs r6, r7, r6 - sbcs r9, lr, r1 - str r5, [sp, #44] @ 4-byte Spill - mov r5, r0 - sbcs r0, r12, r2 - ldr r2, [sp, #40] @ 4-byte Reload - ldr r1, [r5, #48] - sbcs r3, r4, r3 - ldr lr, [r5, #32] - ldr r12, [r5, #36] - ldr r8, [r5, #40] - ldr r4, [r5, #44] - ldr r5, [sp, #44] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #76] @ 4-byte Reload - sbcs r7, r2, r10 - ldr r2, [sp, #80] @ 4-byte Reload - sbcs r2, r2, r5 - ldr r5, [sp, #84] @ 4-byte Reload - sbcs r10, r5, r11 - ldr r5, [sp, #88] @ 4-byte Reload - sbcs r11, r5, lr - ldr r5, [sp, #92] @ 4-byte Reload - sbcs r12, r5, r12 - ldr r5, [sp, #96] @ 4-byte Reload - sbcs lr, r5, r8 - ldr r5, [sp, #100] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #104] @ 4-byte Reload - str r4, [sp, #44] @ 4-byte Spill - ldr r4, [sp, #108] @ 4-byte Reload - sbcs r5, r5, r4 - str r5, [sp, #108] @ 4-byte Spill - ldr r5, [sp, #64] @ 4-byte Reload - sbc r5, r5, #0 - ands r8, r5, #1 - ldr r5, [sp, #48] @ 4-byte Reload - movne r6, r5 - ldr r5, [sp, #68] @ 4-byte Reload - str r6, [r5] - ldr r6, [sp, #52] @ 4-byte Reload - movne r9, r6 - ldr r6, [sp, #56] @ 4-byte Reload - str r9, [r5, #4] - movne r0, r6 - cmp r8, #0 - str r0, [r5, #8] - ldr r0, [sp, #60] @ 4-byte Reload - movne r3, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r3, [r5, #12] - movne r1, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r1, [r5, #16] - ldr r1, [sp, #44] @ 4-byte Reload - movne r7, r0 - ldr r0, [sp, #80] @ 4-byte Reload - cmp r8, #0 - str r7, [r5, #20] - movne r2, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r2, [r5, #24] - movne r10, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r10, [r5, #28] - movne r11, r0 - ldr r0, [sp, #92] @ 4-byte Reload - cmp r8, #0 - str r11, [r5, #32] - movne r12, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r12, [r5, #36] - movne lr, r0 - ldr r0, [sp, #100] @ 4-byte Reload - str lr, [r5, #40] - movne r1, r0 - ldr r0, [sp, #104] @ 4-byte Reload - cmp r8, #0 - str r1, [r5, #44] - ldr r1, [sp, #108] @ 4-byte Reload - movne r1, r0 - str r1, [r5, #48] - add sp, sp, #548 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end194: - .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L - .cantunwind - .fnend - - .globl mcl_fp_montNF13L - .align 2 - .type mcl_fp_montNF13L,%function -mcl_fp_montNF13L: @ @mcl_fp_montNF13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #548 - sub sp, sp, #548 - .pad #1024 - sub sp, sp, #1024 - add r12, sp, #100 - add r6, sp, #1024 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #72] @ 4-byte Spill - add r0, r6, #488 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #96] @ 4-byte Spill - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1516] - ldr r8, [sp, #1512] - mov r1, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1520] - mul r2, r8, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1524] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1556] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1552] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1548] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1544] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1540] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1536] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1532] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1528] - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #1456 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1508] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r10, [sp, #1480] - ldr r11, [sp, #1476] - ldr r6, [sp, #1472] - ldr r7, [sp, #1456] - ldr r9, [sp, #1460] - ldr r4, [sp, #1464] - ldr r5, [sp, #1468] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1504] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1500] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1496] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #376 - bl .LmulPv416x32(PLT) - adds r0, r7, r8 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1412] - ldr r3, [sp, #1416] - ldr r12, [sp, #1420] - ldr lr, [sp, #1424] - ldr r7, [sp, #1436] - ldr r8, [sp, #1440] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #1444] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1400] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #1428] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #1432] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #76] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #1448] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adc r0, r1, r0 - adds r11, r11, r4 - ldr r4, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #1408] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1404] - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1344 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1396] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1368] - ldr r9, [sp, #1364] - ldr r10, [sp, #1360] - ldr r11, [sp, #1344] - ldr r6, [sp, #1348] - ldr r7, [sp, #1352] - ldr r5, [sp, #1356] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1392] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1388] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1384] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1380] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1376] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1372] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #264 - bl .LmulPv416x32(PLT) - adds r0, r4, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #1312 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldr r0, [sp, #1288] - ldr r7, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #1292] - ldr r2, [sp, #1296] - ldr r3, [sp, #1300] - ldr r12, [sp, #1304] - ldr lr, [sp, #1308] - adds r7, r7, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1232 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1284] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r9, [sp, #1256] - ldr r10, [sp, #1252] - ldr r11, [sp, #1248] - ldr r7, [sp, #1232] - ldr r5, [sp, #1236] - ldr r4, [sp, #1240] - ldr r6, [sp, #1244] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #152 - bl .LmulPv416x32(PLT) - adds r0, r8, r7 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1188] - ldr r3, [sp, #1192] - ldr r12, [sp, #1196] - ldr lr, [sp, #1200] - ldr r7, [sp, #1212] - ldr r8, [sp, #1216] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1204] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1176] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1208] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #92] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1224] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1220] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r4 - ldr r4, [sp, #88] @ 4-byte Reload - ldr r1, [sp, #1184] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1228] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1180] - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1120 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1172] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #1144] - ldr r9, [sp, #1140] - ldr r10, [sp, #1136] - ldr r11, [sp, #1120] - ldr r6, [sp, #1124] - ldr r7, [sp, #1128] - ldr r5, [sp, #1132] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1168] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1164] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1160] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #40 - bl .LmulPv416x32(PLT) - adds r0, r4, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #1088 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldr r0, [sp, #1064] - ldr r7, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #1068] - ldr r2, [sp, #1072] - ldr r3, [sp, #1076] - ldr r12, [sp, #1080] - ldr lr, [sp, #1084] - adds r7, r7, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1008 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #1060] - add r11, sp, #1016 - ldr r9, [sp, #1032] - ldr r10, [sp, #1028] - ldr r7, [sp, #1008] - ldr r5, [sp, #1012] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r6, r11} - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #952 - bl .LmulPv416x32(PLT) - adds r0, r8, r7 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #956 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #980 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #952] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #896 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #948] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #920] - ldr r9, [sp, #916] - ldr r10, [sp, #912] - ldr r11, [sp, #896] - ldr r6, [sp, #900] - ldr r7, [sp, #904] - ldr r5, [sp, #908] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #840 - bl .LmulPv416x32(PLT) - adds r0, r4, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #864 - add lr, sp, #840 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #92] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #784 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #836] - add r11, sp, #792 - ldr r9, [sp, #808] - ldr r10, [sp, #804] - ldr r7, [sp, #784] - ldr r5, [sp, #788] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #832] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #828] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r6, r11} - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #728 - bl .LmulPv416x32(PLT) - adds r0, r8, r7 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #732 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #756 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #728] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #672 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #724] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r8, [sp, #696] - ldr r9, [sp, #692] - ldr r10, [sp, #688] - ldr r11, [sp, #672] - ldr r6, [sp, #676] - ldr r7, [sp, #680] - ldr r5, [sp, #684] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #720] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #716] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #616 - bl .LmulPv416x32(PLT) - adds r0, r4, r11 - ldr r1, [sp, #12] @ 4-byte Reload - add r11, sp, #640 - add lr, sp, #616 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #36] @ 4-byte Spill - ldm r11, {r4, r5, r6, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #92] @ 4-byte Reload - adds r7, r7, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #560 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #612] - add r11, sp, #568 - ldr r9, [sp, #584] - ldr r10, [sp, #580] - ldr r7, [sp, #560] - ldr r5, [sp, #564] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r6, r11} - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #504 - bl .LmulPv416x32(PLT) - adds r0, r8, r7 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #508 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #532 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldr r4, [sp, #504] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - adds r11, r11, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - mov r4, r11 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #96] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - mul r2, r11, r8 - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #448 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #500] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r9, [sp, #468] - ldr r10, [sp, #464] - ldr r11, [sp, #448] - ldr r6, [sp, #452] - ldr r7, [sp, #456] - ldr r5, [sp, #460] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #492] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #488] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #484] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #392 - bl .LmulPv416x32(PLT) - adds r0, r4, r11 - ldr r1, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #408 - ldr r4, [sp, #400] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #396] - adcs r1, r1, r7 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #404] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #432 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #392] - str r1, [sp, #40] @ 4-byte Spill - adds r0, r0, r2 - mul r1, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - str r1, [sp, #32] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #88] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #88] @ 4-byte Spill - ldr r6, [sp, #84] @ 4-byte Reload - adcs r4, r6, r4 - str r4, [sp, #84] @ 4-byte Spill - ldr r4, [sp, #80] @ 4-byte Reload - adcs r4, r4, r5 - str r4, [sp, #80] @ 4-byte Spill - ldr r4, [sp, #76] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #336 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #388] - add r9, sp, #344 - ldr r6, [sp, #364] - ldr r7, [sp, #360] - ldr r8, [sp, #356] - ldr r10, [sp, #336] - ldr r11, [sp, #340] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #380] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #376] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r4, r5, r9} - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #280 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #84] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #296 - adds r0, r0, r10 - add r10, sp, #320 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - adcs r1, r1, r4 - ldr r4, [sp, #288] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #292] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #284] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #280] - str r1, [sp, #32] @ 4-byte Spill - adds r1, r0, r2 - ldr r0, [sp, #96] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - mul r2, r1, r0 - str r2, [sp, #24] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #88] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #44] @ 4-byte Spill - ldr r6, [sp, #84] @ 4-byte Reload - adcs r4, r6, r4 - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [sp, #80] @ 4-byte Reload - adcs r4, r4, r5 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [sp, #76] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - adc r0, r10, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #224 - bl .LmulPv416x32(PLT) - ldr r1, [sp, #276] - add r9, sp, #232 - ldr r6, [sp, #252] - ldr r7, [sp, #248] - ldr r8, [sp, #244] - ldr r10, [sp, #224] - ldr r11, [sp, #228] - add r0, sp, #168 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #272] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #268] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #264] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #260] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #256] - str r1, [sp, #12] @ 4-byte Spill - ldm r9, {r4, r5, r9} - ldr r1, [sp, #104] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #100] @ 4-byte Reload - bl .LmulPv416x32(PLT) - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #184 - adds r0, r0, r10 - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r11 - adcs r1, r1, r4 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r7 - add r7, sp, #168 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adc r1, r1, r2 - str r1, [sp, #52] @ 4-byte Spill - ldm r7, {r2, r6, r7} - ldr r5, [sp, #180] - ldr r4, [sp, #216] - ldr r9, [sp, #212] - ldr r8, [sp, #208] - adds r10, r0, r2 - ldr r0, [sp, #96] @ 4-byte Reload - mul r1, r10, r0 - ldr r0, [sp, #220] - str r1, [sp, #48] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #104] @ 4-byte Reload - adcs r11, r11, r6 - ldr r6, [sp, #100] @ 4-byte Reload - adcs r7, r6, r7 - ldr r6, [sp, #92] @ 4-byte Reload - adcs r5, r6, r5 - ldr r6, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #108] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - mov r1, r4 - adc r6, r0, #0 - add r0, sp, #112 - bl .LmulPv416x32(PLT) - add r3, sp, #112 - ldm r3, {r0, r1, r2, r3} - adds r0, r10, r0 - adcs r12, r11, r1 - ldr r0, [sp, #128] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r2, r7, r2 - str r12, [sp, #52] @ 4-byte Spill - adcs lr, r5, r3 - str r2, [sp, #56] @ 4-byte Spill - str lr, [sp, #60] @ 4-byte Spill - adcs r9, r1, r0 - ldr r0, [sp, #132] - ldr r1, [sp, #44] @ 4-byte Reload - str r9, [sp, #64] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #136] - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #140] - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r10, r1, r0 - ldr r0, [sp, #148] - ldr r1, [sp, #96] @ 4-byte Reload - str r10, [sp, #68] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #156] - adcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #160] - adcs r0, r1, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #164] - adc r0, r6, r0 - mov r6, r4 - str r0, [sp, #104] @ 4-byte Spill - ldmib r6, {r0, r1, r7} - ldr r5, [r6, #24] - ldr r4, [r6, #28] - ldr r3, [r6, #16] - ldr r11, [r6, #20] - str r5, [sp, #48] @ 4-byte Spill - ldr r5, [r6] - str r4, [sp, #44] @ 4-byte Spill - subs r5, r12, r5 - sbcs r8, r2, r0 - sbcs r2, lr, r1 - sbcs lr, r9, r7 - add r7, r6, #32 - ldm r7, {r0, r1, r7} - ldr r4, [r6, #44] - ldr r9, [r6, #48] - ldr r6, [sp, #76] @ 4-byte Reload - sbcs r3, r6, r3 - ldr r6, [sp, #80] @ 4-byte Reload - str r4, [sp, #40] @ 4-byte Spill - ldr r4, [sp, #48] @ 4-byte Reload - sbcs r12, r6, r11 - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r11, r6, r4 - ldr r4, [sp, #44] @ 4-byte Reload - sbcs r10, r10, r4 - ldr r4, [sp, #88] @ 4-byte Reload - sbcs r4, r4, r0 - ldr r0, [sp, #92] @ 4-byte Reload - sbcs r6, r0, r1 - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r7, r0, r7 - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - sbc r9, r0, r9 - ldr r0, [sp, #52] @ 4-byte Reload - asr r1, r9, #31 - cmp r1, #0 - movlt r5, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r5, [r0] - ldr r5, [sp, #56] @ 4-byte Reload - movlt r8, r5 - ldr r5, [sp, #60] @ 4-byte Reload - str r8, [r0, #4] - movlt r2, r5 - cmp r1, #0 - str r2, [r0, #8] - ldr r2, [sp, #64] @ 4-byte Reload - movlt lr, r2 - ldr r2, [sp, #76] @ 4-byte Reload - str lr, [r0, #12] - movlt r3, r2 - ldr r2, [sp, #80] @ 4-byte Reload - str r3, [r0, #16] - ldr r3, [sp, #108] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #84] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #20] - movlt r11, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r11, [r0, #24] - movlt r10, r2 - ldr r2, [sp, #88] @ 4-byte Reload - str r10, [r0, #28] - movlt r4, r2 - ldr r2, [sp, #92] @ 4-byte Reload - cmp r1, #0 - str r4, [r0, #32] - movlt r6, r2 - ldr r2, [sp, #96] @ 4-byte Reload - str r6, [r0, #36] - movlt r7, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r7, [r0, #40] - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #104] @ 4-byte Reload - str r3, [r0, #44] - movlt r9, r1 - str r9, [r0, #48] - add sp, sp, #548 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end195: - .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L - .cantunwind - .fnend - - .globl mcl_fp_montRed13L - .align 2 - .type mcl_fp_montRed13L,%function -mcl_fp_montRed13L: @ @mcl_fp_montRed13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #908 - sub sp, sp, #908 - mov r3, r2 - str r0, [sp, #164] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r11, [r1] - ldr r0, [r3] - str r3, [sp, #168] @ 4-byte Spill - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #160] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #64] @ 4-byte Spill - str r0, [sp, #152] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #172] @ 4-byte Spill - mul r2, r11, r0 - ldr r0, [r3, #28] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r1, #96] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r1, #100] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #848 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #900] - add r10, sp, #872 - add lr, sp, #848 - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #168] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #172] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #792 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #844] - add lr, sp, #832 - add r9, sp, #800 - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #792] - ldr r5, [sp, #828] - ldr r6, [sp, #824] - ldr r7, [sp, #820] - ldr r10, [sp, #816] - ldr r8, [sp, #812] - ldr r1, [sp, #796] - ldm r9, {r0, r2, r9} - adds r4, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #172] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #168] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #736 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #788] - add r10, sp, #760 - add lr, sp, #736 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #680 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #732] - add lr, sp, #720 - add r10, sp, #688 - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #680] - ldr r5, [sp, #716] - ldr r6, [sp, #712] - ldr r7, [sp, #708] - ldr r1, [sp, #684] - ldm r10, {r0, r2, r8, r9, r10} - adds r4, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #172] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #168] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #624 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #676] - add r10, sp, #648 - add lr, sp, #624 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #568 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #620] - add lr, sp, #608 - add r10, sp, #576 - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #568] - ldr r5, [sp, #604] - ldr r6, [sp, #600] - ldr r7, [sp, #596] - ldr r1, [sp, #572] - ldm r10, {r0, r2, r8, r9, r10} - adds r4, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #172] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #168] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #512 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #564] - add r10, sp, #536 - add lr, sp, #512 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #456 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #508] - add lr, sp, #496 - add r10, sp, #464 - str r0, [sp, #32] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #456] - ldr r5, [sp, #492] - ldr r6, [sp, #488] - ldr r7, [sp, #484] - ldr r1, [sp, #460] - ldm r10, {r0, r2, r8, r9, r10} - adds r4, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r4, [sp, #172] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #168] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #400 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #452] - add r10, sp, #424 - add lr, sp, #400 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #448] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #444] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #344 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #396] - add lr, sp, #384 - add r10, sp, #352 - str r0, [sp, #40] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r4, [sp, #344] - ldr r5, [sp, #380] - ldr r6, [sp, #376] - ldr r7, [sp, #372] - ldr r1, [sp, #348] - ldm r10, {r0, r2, r8, r9, r10} - adds r4, r11, r4 - ldr r4, [sp, #72] @ 4-byte Reload - adcs r11, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #168] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #172] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - mul r2, r11, r7 - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r8 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #288 - bl .LmulPv416x32(PLT) - ldr r0, [sp, #340] - add r10, sp, #312 - add lr, sp, #288 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #336] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #332] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r6, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - mov r4, r7 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - mul r2, r11, r4 - adcs r0, r0, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r9 - mov r9, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #232 - bl .LmulPv416x32(PLT) - add r7, sp, #232 - add lr, sp, #272 - ldm r7, {r0, r1, r3, r7} - ldr r8, [sp, #284] - adds r0, r11, r0 - ldr r0, [sp, #20] @ 4-byte Reload - adcs r11, r0, r1 - mul r0, r11, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #256 - str r0, [sp, #60] @ 4-byte Spill - ldm lr, {r5, r12, lr} - ldr r6, [sp, #268] - ldm r7, {r1, r2, r7} - ldr r0, [sp, #248] - ldr r3, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #252] - adcs r10, r3, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - add r0, sp, #176 - bl .LmulPv416x32(PLT) - add r3, sp, #176 - ldm r3, {r0, r1, r2, r3} - adds r0, r11, r0 - ldr r0, [sp, #172] @ 4-byte Reload - adcs r12, r0, r1 - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #44] @ 4-byte Reload - str r12, [sp, #52] @ 4-byte Spill - adcs r2, r0, r2 - ldr r0, [sp, #192] - adcs r3, r10, r3 - str r2, [sp, #64] @ 4-byte Spill - str r3, [sp, #68] @ 4-byte Spill - adcs r7, r4, r0 - ldr r0, [sp, #196] - str r7, [sp, #72] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #200] - ldr r1, [sp, #48] @ 4-byte Reload - str r4, [sp, #76] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #204] - ldr r1, [sp, #56] @ 4-byte Reload - str r5, [sp, #80] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #208] - ldr r1, [sp, #88] @ 4-byte Reload - str r6, [sp, #84] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [sp, #212] - adcs r11, r1, r0 - ldr r0, [sp, #216] - ldr r1, [sp, #100] @ 4-byte Reload - str r11, [sp, #92] @ 4-byte Spill - adcs r10, r1, r0 - ldr r0, [sp, #220] - ldr r1, [sp, #108] @ 4-byte Reload - str r10, [sp, #100] @ 4-byte Spill - adcs r9, r1, r0 - ldr r0, [sp, #224] - ldr r1, [sp, #104] @ 4-byte Reload - str r9, [sp, #108] @ 4-byte Spill - adcs r8, r8, r0 - ldr r0, [sp, #228] - str r8, [sp, #168] @ 4-byte Spill - adcs lr, r1, r0 - ldr r0, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #156] @ 4-byte Reload - str lr, [sp, #104] @ 4-byte Spill - adc r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #160] @ 4-byte Reload - subs r0, r12, r0 - sbcs r1, r2, r1 - ldr r2, [sp, #152] @ 4-byte Reload - sbcs r2, r3, r2 - ldr r3, [sp, #136] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #140] @ 4-byte Reload - sbcs r12, r4, r7 - ldr r4, [sp, #144] @ 4-byte Reload - ldr r7, [sp, #172] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #148] @ 4-byte Reload - sbcs r5, r6, r5 - ldr r6, [sp, #112] @ 4-byte Reload - sbcs r6, r7, r6 - ldr r7, [sp, #116] @ 4-byte Reload - sbcs r7, r11, r7 - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [sp, #120] @ 4-byte Reload - sbcs r11, r10, r7 - ldr r7, [sp, #124] @ 4-byte Reload - sbcs r9, r9, r7 - ldr r7, [sp, #128] @ 4-byte Reload - sbcs r10, r8, r7 - ldr r7, [sp, #132] @ 4-byte Reload - sbcs r8, lr, r7 - ldr r7, [sp, #96] @ 4-byte Reload - sbc r7, r7, #0 - ands lr, r7, #1 - ldr r7, [sp, #52] @ 4-byte Reload - movne r0, r7 - ldr r7, [sp, #164] @ 4-byte Reload - str r0, [r7] - ldr r0, [sp, #64] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r1, [r7, #4] - ldr r1, [sp, #92] @ 4-byte Reload - movne r2, r0 - ldr r0, [sp, #72] @ 4-byte Reload - cmp lr, #0 - str r2, [r7, #8] - movne r3, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r3, [r7, #12] - movne r12, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r12, [r7, #16] - movne r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - cmp lr, #0 - str r4, [r7, #20] - movne r5, r0 - ldr r0, [sp, #172] @ 4-byte Reload - str r5, [r7, #24] - movne r6, r0 - ldr r0, [sp, #160] @ 4-byte Reload - movne r0, r1 - str r6, [r7, #28] - cmp lr, #0 - str r0, [r7, #32] - ldr r0, [sp, #100] @ 4-byte Reload - movne r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - str r11, [r7, #36] - movne r9, r0 - ldr r0, [sp, #168] @ 4-byte Reload - str r9, [r7, #40] - movne r10, r0 - ldr r0, [sp, #104] @ 4-byte Reload - cmp lr, #0 - str r10, [r7, #44] - movne r8, r0 - str r8, [r7, #48] - add sp, sp, #908 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end196: - .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L - .cantunwind - .fnend - - .globl mcl_fp_addPre13L - .align 2 - .type mcl_fp_addPre13L,%function -mcl_fp_addPre13L: @ @mcl_fp_addPre13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #36 - sub sp, sp, #36 - ldm r1, {r3, r12, lr} - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7} - ldr r11, [r2] - ldr r4, [r2, #16] - ldr r10, [r2, #32] - adds r8, r11, r3 - ldr r3, [r2, #48] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #20] - ldr r11, [r1, #44] - adcs r5, r5, r12 - add r12, r1, #16 - adcs r6, r6, lr - ldr lr, [r1, #32] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #44] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r2, #24] - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #40] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r2, #28] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #36] - ldr r2, [r1, #36] - str r4, [sp, #24] @ 4-byte Spill - adcs r4, r7, r9 - ldr r7, [r1, #40] - ldr r9, [r1, #48] - str r3, [sp, #4] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - str r8, [r0] - stmib r0, {r5, r6} - str r4, [r0, #12] - ldr r5, [sp, #8] @ 4-byte Reload - ldr r4, [sp, #12] @ 4-byte Reload - ldr r6, [sp, #32] @ 4-byte Reload - adcs r1, r5, r1 - str r1, [r0, #16] - adcs r2, r4, r2 - ldr r1, [sp, #20] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp] @ 4-byte Reload - adcs r2, r2, r12 - str r1, [r0, #24] - add r12, r0, #32 - str r2, [r0, #28] - ldr r2, [sp, #4] @ 4-byte Reload - adcs r1, r10, lr - adcs r2, r2, r3 - ldr r3, [sp, #16] @ 4-byte Reload - adcs r3, r3, r7 - ldr r7, [sp, #28] @ 4-byte Reload - adcs r7, r7, r11 - adcs r6, r6, r9 - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #36 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end197: - .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L - .cantunwind - .fnend - - .globl mcl_fp_subPre13L - .align 2 - .type mcl_fp_subPre13L,%function -mcl_fp_subPre13L: @ @mcl_fp_subPre13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #36 - sub sp, sp, #36 - ldr r3, [r2, #16] - ldr r7, [r2] - ldr r6, [r1] - ldr r12, [r2, #4] - ldr r4, [r2, #8] - ldr r11, [r2, #12] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #20] - subs r7, r6, r7 - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [r2, #24] - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #28] @ 4-byte Spill - ldmib r1, {r5, lr} - ldr r6, [r2, #48] - ldr r3, [r1, #12] - ldr r10, [r2, #32] - ldr r8, [r1, #44] - ldr r9, [r1, #48] - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r2, #44] - sbcs r5, r5, r12 - add r12, r1, #16 - sbcs r4, lr, r4 - sbcs lr, r3, r11 - ldr r3, [r2, #36] - ldr r11, [r1, #36] - str r6, [sp, #16] @ 4-byte Spill - ldr r6, [r2, #40] - ldr r2, [r1, #40] - str r3, [sp, #4] @ 4-byte Spill - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [r1, #32] - str r2, [sp] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - str r7, [r0] - str r5, [r0, #4] - str r4, [r0, #8] - ldr r4, [sp, #12] @ 4-byte Reload - ldr r7, [sp, #20] @ 4-byte Reload - str lr, [r0, #12] - sbcs r1, r1, r4 - sbcs r2, r2, r7 - str r1, [r0, #16] - ldr r1, [sp, #24] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r1, r3, r1 - ldr r3, [sp, #8] @ 4-byte Reload - sbcs r2, r12, r2 - str r1, [r0, #24] - add r12, r0, #32 - str r2, [r0, #28] - ldr r2, [sp, #4] @ 4-byte Reload - sbcs r1, r6, r10 - ldr r6, [sp, #32] @ 4-byte Reload - sbcs r2, r11, r2 - sbcs r3, r7, r3 - ldr r7, [sp, #16] @ 4-byte Reload - sbcs r7, r8, r7 - sbcs r6, r9, r6 - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #36 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end198: - .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L - .cantunwind - .fnend - - .globl mcl_fp_shr1_13L - .align 2 - .type mcl_fp_shr1_13L,%function -mcl_fp_shr1_13L: @ @mcl_fp_shr1_13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #8 - sub sp, sp, #8 - add r9, r1, #8 - ldm r9, {r2, r3, r4, r5, r8, r9} - ldm r1, {r10, lr} - ldr r12, [r1, #36] - lsr r7, lr, #1 - lsr r6, r3, #1 - lsrs r3, r3, #1 - orr r11, r7, r2, lsl #31 - ldr r7, [r1, #48] - rrx r2, r2 - lsrs r3, lr, #1 - rrx r3, r10 - str r7, [sp, #4] @ 4-byte Spill - ldr r7, [r1, #44] - str r7, [sp] @ 4-byte Spill - ldr r7, [r1, #40] - ldr r1, [r1, #32] - stm r0, {r3, r11} - str r2, [r0, #8] - orr r2, r6, r4, lsl #31 - str r2, [r0, #12] - lsrs r2, r5, #1 - ldr r6, [sp] @ 4-byte Reload - rrx r2, r4 - str r2, [r0, #16] - lsr r2, r5, #1 - orr r2, r2, r8, lsl #31 - str r2, [r0, #20] - lsrs r2, r9, #1 - rrx r2, r8 - str r2, [r0, #24] - lsr r2, r9, #1 - orr r2, r2, r1, lsl #31 - str r2, [r0, #28] - lsrs r2, r12, #1 - lsr r2, r12, #1 - rrx r1, r1 - lsrs r3, r6, #1 - add r12, r0, #32 - orr r2, r2, r7, lsl #31 - rrx r3, r7 - lsr r7, r6, #1 - ldr r6, [sp, #4] @ 4-byte Reload - orr r7, r7, r6, lsl #31 - lsr r6, r6, #1 - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - add sp, sp, #8 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end199: - .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L - .cantunwind - .fnend - - .globl mcl_fp_add13L - .align 2 - .type mcl_fp_add13L,%function -mcl_fp_add13L: @ @mcl_fp_add13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r11, r4, r9 - ldr r9, [r1, #24] - adcs r4, r5, r8 - ldr r5, [r1, #20] - adcs r6, r6, lr - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r1, #16] - mov lr, r11 - adcs r7, r7, r12 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [r2, #32] - str lr, [r0] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #16] - adcs r8, r7, r4 - ldr r4, [r2, #20] - adcs r7, r4, r5 - ldr r5, [r2, #24] - ldr r4, [r1, #28] - str r7, [sp, #40] @ 4-byte Spill - adcs r7, r5, r9 - ldr r5, [r2, #28] - str r7, [sp, #4] @ 4-byte Spill - ldr r11, [sp, #4] @ 4-byte Reload - adcs r7, r5, r4 - ldr r5, [r1, #32] - ldr r4, [sp, #32] @ 4-byte Reload - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #20] @ 4-byte Reload - adcs r10, r6, r5 - ldr r6, [r1, #36] - ldr r5, [r2, #36] - str r4, [r0, #4] - str r10, [sp, #24] @ 4-byte Spill - adcs r9, r5, r6 - ldr r6, [r1, #40] - ldr r5, [r2, #40] - adcs r12, r5, r6 - ldr r6, [r1, #44] - ldr r5, [r2, #44] - ldr r1, [r1, #48] - ldr r2, [r2, #48] - adcs r6, r5, r6 - ldr r5, [sp, #28] @ 4-byte Reload - adcs r2, r2, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r6, [sp, #16] @ 4-byte Spill - str r2, [sp, #12] @ 4-byte Spill - str r5, [r0, #8] - str r7, [r0, #12] - str r8, [r0, #16] - str r1, [r0, #20] - ldr r1, [sp, #36] @ 4-byte Reload - str r11, [r0, #24] - str r1, [r0, #28] - str r10, [r0, #32] - str r9, [r0, #36] - str r12, [r0, #40] - str r6, [r0, #44] - str r2, [r0, #48] - mov r2, #0 - mov r10, r12 - adc r1, r2, #0 - str r1, [sp, #8] @ 4-byte Spill - ldm r3, {r2, r6} - ldr r1, [r3, #8] - ldr r12, [r3, #12] - subs r2, lr, r2 - str r2, [sp] @ 4-byte Spill - sbcs r2, r4, r6 - sbcs r1, r5, r1 - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r7, r7, r12 - add r12, r3, #32 - sbcs r8, r8, r1 - ldr r1, [r3, #20] - sbcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #24] - sbcs r1, r11, r1 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [r3, #28] - sbcs r5, r2, r1 - ldm r12, {r1, r2, r6, r11, r12} - ldr r3, [sp, #24] @ 4-byte Reload - sbcs r3, r3, r1 - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r4, r9, r2 - sbcs lr, r10, r6 - ldr r6, [sp, #8] @ 4-byte Reload - sbcs r2, r1, r11 - ldr r1, [sp, #12] @ 4-byte Reload - sbcs r1, r1, r12 - sbc r6, r6, #0 - tst r6, #1 - bne .LBB200_2 -@ BB#1: @ %nocarry - mov r6, r7 - ldr r7, [sp] @ 4-byte Reload - add r12, r0, #32 - str r7, [r0] - ldr r7, [sp, #32] @ 4-byte Reload - str r7, [r0, #4] - ldr r7, [sp, #28] @ 4-byte Reload - str r7, [r0, #8] - ldr r7, [sp, #40] @ 4-byte Reload - str r6, [r0, #12] - str r8, [r0, #16] - str r7, [r0, #20] - ldr r7, [sp, #20] @ 4-byte Reload - str r7, [r0, #24] - str r5, [r0, #28] - stm r12, {r3, r4, lr} - str r2, [r0, #44] - str r1, [r0, #48] -.LBB200_2: @ %carry - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end200: - .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L - .cantunwind - .fnend - - .globl mcl_fp_addNF13L - .align 2 - .type mcl_fp_addNF13L,%function -mcl_fp_addNF13L: @ @mcl_fp_addNF13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldm r1, {r7, r8, lr} - ldr r6, [r2] - ldr r12, [r1, #12] - ldmib r2, {r4, r5, r9} - adds r10, r6, r7 - ldr r7, [r2, #16] - ldr r6, [r1, #24] - adcs r4, r4, r8 - adcs lr, r5, lr - ldr r5, [r1, #16] - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r1, #20] - adcs r9, r9, r12 - str lr, [sp, #8] @ 4-byte Spill - str r9, [sp, #12] @ 4-byte Spill - adcs r7, r7, r5 - ldr r5, [r2, #20] - str r7, [sp, #32] @ 4-byte Spill - adcs r7, r5, r4 - ldr r5, [r2, #24] - str r7, [sp, #36] @ 4-byte Spill - adcs r8, r5, r6 - ldr r6, [r1, #28] - ldr r5, [r2, #28] - str r8, [sp, #16] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #32] - ldr r5, [r2, #32] - str r7, [sp, #40] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #36] - ldr r5, [r2, #36] - str r7, [sp, #44] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #40] - ldr r5, [r2, #40] - str r7, [sp, #56] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #44] - ldr r5, [r2, #44] - ldr r1, [r1, #48] - ldr r2, [r2, #48] - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r5, r6 - adc r1, r2, r1 - str r7, [sp, #48] @ 4-byte Spill - str r1, [sp, #60] @ 4-byte Spill - ldmib r3, {r1, r12} - ldr r2, [r3, #24] - ldr r7, [r3] - ldr r6, [r3, #12] - ldr r5, [r3, #16] - ldr r4, [r3, #20] - ldr r11, [r3, #28] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [sp, #28] @ 4-byte Reload - subs r7, r10, r7 - sbcs r2, r2, r1 - ldr r1, [r3, #40] - sbcs r12, lr, r12 - sbcs lr, r9, r6 - ldr r9, [r3, #32] - ldr r6, [r3, #36] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [sp, #32] @ 4-byte Reload - sbcs r5, r1, r5 - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r3, r1, r4 - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r4, r8, r1 - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r8, r1, r11 - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r9, r1, r9 - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r11, r1, r6 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r6, [sp, #20] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp] @ 4-byte Reload - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - sbc r6, r1, r6 - asr r1, r6, #31 - cmp r1, #0 - movlt r7, r10 - str r7, [r0] - ldr r7, [sp, #28] @ 4-byte Reload - movlt r2, r7 - str r2, [r0, #4] - ldr r2, [sp, #8] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #12] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #8] - movlt lr, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str lr, [r0, #12] - movlt r5, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r5, [r0, #16] - movlt r3, r2 - ldr r2, [sp, #16] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #20] @ 4-byte Reload - movlt r4, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r4, [r0, #24] - movlt r8, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r8, [r0, #28] - movlt r9, r2 - ldr r2, [sp, #56] @ 4-byte Reload - cmp r1, #0 - str r9, [r0, #32] - movlt r11, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r11, [r0, #36] - movlt r3, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #24] @ 4-byte Reload - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #60] @ 4-byte Reload - str r3, [r0, #44] - movlt r6, r1 - str r6, [r0, #48] - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end201: - .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L - .cantunwind - .fnend - - .globl mcl_fp_sub13L - .align 2 - .type mcl_fp_sub13L,%function -mcl_fp_sub13L: @ @mcl_fp_sub13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - ldr r9, [r2] - ldmib r2, {r8, lr} - ldr r12, [r2, #12] - ldm r1, {r4, r5, r6, r7} - subs r11, r4, r9 - ldr r4, [r2, #24] - sbcs r5, r5, r8 - str r11, [sp, #28] @ 4-byte Spill - str r11, [r0] - sbcs r6, r6, lr - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [r2, #20] - sbcs r7, r7, r12 - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #16] - ldr r11, [sp, #44] @ 4-byte Reload - sbcs r10, r7, r6 - ldr r7, [r1, #20] - str r10, [sp, #36] @ 4-byte Spill - sbcs r12, r7, r5 - ldr r7, [r1, #24] - ldr r5, [r1, #28] - sbcs r8, r7, r4 - ldr r7, [r2, #28] - ldr r4, [r1, #36] - str r8, [sp, #40] @ 4-byte Spill - sbcs r9, r5, r7 - ldr r7, [r2, #32] - ldr r5, [r1, #32] - sbcs r5, r5, r7 - ldr r7, [r2, #36] - sbcs r6, r4, r7 - ldr r7, [r2, #40] - ldr r4, [r1, #40] - sbcs lr, r4, r7 - ldr r7, [r2, #44] - ldr r4, [r1, #44] - ldr r2, [r2, #48] - ldr r1, [r1, #48] - sbcs r7, r4, r7 - ldr r4, [sp, #52] @ 4-byte Reload - sbcs r2, r1, r2 - ldr r1, [sp, #48] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - str r4, [r0, #4] - str r1, [r0, #8] - str r11, [r0, #12] - str r10, [r0, #16] - str r12, [r0, #20] - str r8, [r0, #24] - str r9, [r0, #28] - str r5, [r0, #32] - str r6, [r0, #36] - str lr, [r0, #40] - str r7, [r0, #44] - str r2, [r0, #48] - mov r2, #0 - sbc r2, r2, #0 - tst r2, #1 - beq .LBB202_2 -@ BB#1: @ %carry - ldr r2, [r3, #48] - ldr r7, [sp, #28] @ 4-byte Reload - ldr r10, [r3, #4] - ldr r8, [r3, #8] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #12] - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #16] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3, #28] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r3] - adds r2, r2, r7 - ldr r7, [r3, #44] - adcs r4, r10, r4 - ldr r10, [r3, #36] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r3, #40] - ldr r3, [r3, #32] - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r8, r1 - ldr r1, [sp] @ 4-byte Reload - stm r0, {r2, r4, r7} - ldr r2, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [r0, #12] - ldr r1, [sp, #8] @ 4-byte Reload - adcs r2, r7, r2 - str r2, [r0, #16] - adcs r2, r1, r12 - ldr r1, [sp, #12] @ 4-byte Reload - add r12, r0, #32 - str r2, [r0, #20] - ldr r2, [sp, #40] @ 4-byte Reload - adcs r2, r1, r2 - ldr r1, [sp, #16] @ 4-byte Reload - str r2, [r0, #24] - adcs r2, r1, r9 - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [r0, #28] - adcs r2, r3, r5 - ldr r5, [sp, #20] @ 4-byte Reload - adcs r3, r10, r6 - ldr r6, [sp, #28] @ 4-byte Reload - adcs r7, r1, lr - ldr r1, [sp, #32] @ 4-byte Reload - adcs r6, r6, r1 - ldr r1, [sp, #24] @ 4-byte Reload - stm r12, {r2, r3, r7} - str r6, [r0, #44] - adc r1, r5, r1 - str r1, [r0, #48] -.LBB202_2: @ %nocarry - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end202: - .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L - .cantunwind - .fnend - - .globl mcl_fp_subNF13L - .align 2 - .type mcl_fp_subNF13L,%function -mcl_fp_subNF13L: @ @mcl_fp_subNF13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #72 - sub sp, sp, #72 - mov r12, r0 - ldr r0, [r2, #32] - add r9, r1, #20 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r2, #44] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r2, #48] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #28] @ 4-byte Spill - ldm r2, {r7, r11} - ldr r0, [r2, #8] - ldr r10, [r2, #12] - ldr r8, [r2, #16] - ldr lr, [r1, #16] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r2, #20] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r2, #24] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r2, #28] - ldr r2, [r1, #8] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #12] - ldm r9, {r4, r5, r9} - ldm r1, {r1, r6} - subs r7, r1, r7 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r6, r6, r11 - str r7, [sp] @ 4-byte Spill - str r6, [sp, #4] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - sbcs r0, r0, r10 - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - sbcs r0, lr, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r4, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r5, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - sbcs r11, r1, r0 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - str r11, [sp, #20] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - sbc r0, r2, r1 - ldr r1, [r3, #40] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #32] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r3, #44] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r3, #36] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #32] @ 4-byte Spill - ldm r3, {r2, lr} - ldr r1, [r3, #20] - ldr r5, [r3, #8] - ldr r10, [sp, #8] @ 4-byte Reload - ldr r4, [r3, #12] - ldr r8, [r3, #24] - ldr r9, [r3, #28] - adds r2, r7, r2 - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #16] - adcs r3, r6, lr - ldr r6, [sp, #12] @ 4-byte Reload - adcs lr, r10, r5 - ldr r5, [sp, #48] @ 4-byte Reload - adcs r4, r5, r4 - ldr r5, [sp, #52] @ 4-byte Reload - adcs r5, r5, r1 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r7, r1, r8 - ldr r1, [sp, #64] @ 4-byte Reload - adcs r8, r1, r9 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r9, r11, r1 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r11, r1, r0 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r0, [sp, #24] @ 4-byte Reload - adcs r1, r1, r0 - ldr r0, [sp, #36] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r1, r0, r1 - str r1, [sp, #32] @ 4-byte Spill - asr r1, r0, #31 - ldr r0, [sp] @ 4-byte Reload - cmp r1, #0 - movge lr, r10 - movge r2, r0 - ldr r0, [sp, #4] @ 4-byte Reload - str r2, [r12] - ldr r2, [sp, #24] @ 4-byte Reload - movge r3, r0 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r1, #0 - str r3, [r12, #4] - str lr, [r12, #8] - movge r4, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r4, [r12, #12] - movge r5, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r5, [r12, #16] - movge r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r1, #0 - str r6, [r12, #20] - movge r7, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r7, [r12, #24] - movge r8, r0 - ldr r0, [sp, #20] @ 4-byte Reload - str r8, [r12, #28] - movge r9, r0 - ldr r0, [sp, #44] @ 4-byte Reload - cmp r1, #0 - str r9, [r12, #32] - movge r11, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r11, [r12, #36] - movge r2, r0 - ldr r0, [sp, #28] @ 4-byte Reload - str r2, [r12, #40] - ldr r2, [sp, #36] @ 4-byte Reload - movge r0, r2 - cmp r1, #0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [r12, #44] - ldr r0, [sp, #32] @ 4-byte Reload - movge r0, r1 - str r0, [r12, #48] - add sp, sp, #72 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end203: - .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L - .cantunwind - .fnend - - .globl mcl_fpDbl_add13L - .align 2 - .type mcl_fpDbl_add13L,%function -mcl_fpDbl_add13L: @ @mcl_fpDbl_add13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #152 - sub sp, sp, #152 - ldm r1, {r7, r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r9} - add r10, r1, #32 - adds r4, r4, r7 - str r4, [sp, #84] @ 4-byte Spill - ldr r4, [r2, #96] - str r4, [sp, #144] @ 4-byte Spill - ldr r4, [r2, #100] - str r4, [sp, #148] @ 4-byte Spill - adcs r4, r5, r8 - ldr r8, [r2, #16] - adcs r7, r6, lr - str r4, [sp, #72] @ 4-byte Spill - add lr, r1, #16 - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #140] @ 4-byte Spill - adcs r7, r9, r12 - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r1, #96] - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #100] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r6, r9, r10} - ldr r2, [r1, #52] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #8] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - ldr r7, [sp, #72] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #68] @ 4-byte Reload - adcs r1, r8, r1 - str r7, [r0, #8] - ldr r7, [sp, #28] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - adcs r1, r1, r12 - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - ldr r2, [sp, #64] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [r0, #32] - ldr r1, [sp, #76] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [r0, #36] - ldr r2, [sp, #80] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - adcs r2, r2, r9 - str r2, [r0, #44] - ldr r2, [sp, #92] @ 4-byte Reload - adcs r1, r1, r10 - str r1, [r0, #48] - ldr r1, [sp, #96] @ 4-byte Reload - adcs r6, r2, r7 - ldr r2, [sp, #4] @ 4-byte Reload - str r6, [sp, #88] @ 4-byte Spill - adcs r5, r1, r2 - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r5, [sp, #92] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r4, [sp, #96] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [sp, #24] @ 4-byte Reload - str r7, [sp, #112] @ 4-byte Spill - adcs lr, r1, r2 - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str lr, [sp, #100] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r8, r1, r2 - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [sp, #48] @ 4-byte Reload - str r8, [sp, #116] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #132] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #136] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [sp, #140] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [sp, #144] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [sp, #148] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #148] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #108] @ 4-byte Spill - ldmib r3, {r2, r9, r12} - ldr r1, [r3, #20] - ldr r11, [r3] - ldr r10, [r3, #16] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [r3, #24] - subs r11, r6, r11 - sbcs r2, r5, r2 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [r3, #28] - str r1, [sp, #120] @ 4-byte Spill - sbcs r1, r4, r9 - add r9, r3, #32 - sbcs r12, r7, r12 - ldm r9, {r5, r7, r9} - ldr r4, [r3, #44] - ldr r3, [r3, #48] - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r10, lr, r10 - str r3, [sp, #80] @ 4-byte Spill - ldr r3, [sp, #124] @ 4-byte Reload - str r4, [sp, #76] @ 4-byte Spill - sbcs lr, r3, r6 - ldr r3, [sp, #104] @ 4-byte Reload - ldr r6, [sp, #120] @ 4-byte Reload - sbcs r4, r8, r3 - ldr r3, [sp, #128] @ 4-byte Reload - sbcs r6, r3, r6 - ldr r3, [sp, #132] @ 4-byte Reload - sbcs r5, r3, r5 - ldr r3, [sp, #136] @ 4-byte Reload - sbcs r8, r3, r7 - ldr r3, [sp, #140] @ 4-byte Reload - ldr r7, [sp, #76] @ 4-byte Reload - sbcs r9, r3, r9 - ldr r3, [sp, #144] @ 4-byte Reload - sbcs r3, r3, r7 - ldr r7, [sp, #80] @ 4-byte Reload - str r3, [sp, #120] @ 4-byte Spill - ldr r3, [sp, #148] @ 4-byte Reload - sbcs r3, r3, r7 - ldr r7, [sp, #88] @ 4-byte Reload - str r3, [sp, #104] @ 4-byte Spill - ldr r3, [sp, #108] @ 4-byte Reload - sbc r3, r3, #0 - ands r3, r3, #1 - movne r11, r7 - ldr r7, [sp, #92] @ 4-byte Reload - str r11, [r0, #52] - movne r2, r7 - str r2, [r0, #56] - ldr r2, [sp, #96] @ 4-byte Reload - movne r1, r2 - cmp r3, #0 - ldr r2, [sp, #120] @ 4-byte Reload - str r1, [r0, #60] - ldr r1, [sp, #112] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r12, [r0, #64] - movne r10, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r10, [r0, #68] - movne lr, r1 - ldr r1, [sp, #116] @ 4-byte Reload - cmp r3, #0 - str lr, [r0, #72] - movne r4, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r4, [r0, #76] - movne r6, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r6, [r0, #80] - movne r5, r1 - ldr r1, [sp, #136] @ 4-byte Reload - cmp r3, #0 - str r5, [r0, #84] - movne r8, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r8, [r0, #88] - movne r9, r1 - ldr r1, [sp, #144] @ 4-byte Reload - str r9, [r0, #92] - movne r2, r1 - ldr r1, [sp, #148] @ 4-byte Reload - cmp r3, #0 - ldr r3, [sp, #104] @ 4-byte Reload - str r2, [r0, #96] - movne r3, r1 - str r3, [r0, #100] - add sp, sp, #152 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end204: - .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub13L - .align 2 - .type mcl_fpDbl_sub13L,%function -mcl_fpDbl_sub13L: @ @mcl_fpDbl_sub13L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #152 - sub sp, sp, #152 - ldr r7, [r2, #96] - add r10, r1, #32 - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #92] @ 4-byte Spill - ldm r2, {r9, lr} - ldr r6, [r1] - ldr r5, [r1, #4] - ldr r12, [r2, #8] - ldr r4, [r1, #8] - ldr r8, [r2, #12] - ldr r7, [r1, #12] - subs r6, r6, r9 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r2, #40] - str r6, [sp, #80] @ 4-byte Spill - sbcs r6, r5, lr - add lr, r1, #16 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [r2, #36] - str r6, [sp, #48] @ 4-byte Spill - sbcs r6, r4, r12 - sbcs r7, r7, r8 - str r6, [sp, #20] @ 4-byte Spill - ldr r6, [r2, #32] - ldr r8, [r2, #16] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #28] - str r6, [sp, #40] @ 4-byte Spill - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r1, #96] - str r2, [sp, #84] @ 4-byte Spill - ldr r2, [r1, #100] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #88] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #76] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #44] @ 4-byte Spill - ldm r10, {r4, r5, r6, r9, r10} - ldr r2, [r1, #52] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #8] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #20] @ 4-byte Reload - sbcs r1, r1, r8 - str r7, [r0, #8] - ldr r7, [sp, #16] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - ldr r2, [sp, #48] @ 4-byte Reload - sbcs r1, r4, r1 - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [r0, #36] - ldr r2, [sp, #92] @ 4-byte Reload - sbcs r1, r6, r1 - str r1, [r0, #40] - ldr r1, [sp, #96] @ 4-byte Reload - sbcs r2, r9, r2 - str r2, [r0, #44] - ldr r2, [sp, #100] @ 4-byte Reload - sbcs r1, r10, r1 - add r10, r3, #16 - str r1, [r0, #48] - ldr r1, [sp, #104] @ 4-byte Reload - sbcs r9, r7, r2 - ldr r2, [sp, #4] @ 4-byte Reload - ldr r7, [sp, #52] @ 4-byte Reload - sbcs r11, r2, r1 - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #124] @ 4-byte Reload - str r1, [sp, #120] @ 4-byte Spill - mov r1, #0 - sbcs r6, r7, r2 - ldr r2, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - str r6, [sp, #92] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #56] @ 4-byte Reload - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [sp, #112] @ 4-byte Reload - sbcs r8, r7, r2 - ldr r2, [sp, #140] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - str r8, [sp, #96] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #64] @ 4-byte Reload - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [sp, #132] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #68] @ 4-byte Reload - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [sp, #128] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #72] @ 4-byte Reload - str r2, [sp, #128] @ 4-byte Spill - ldr r2, [sp, #116] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #76] @ 4-byte Reload - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [sp, #136] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #84] @ 4-byte Reload - str r2, [sp, #136] @ 4-byte Spill - ldr r2, [sp, #144] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #88] @ 4-byte Reload - str r2, [sp, #144] @ 4-byte Spill - ldr r2, [sp, #148] @ 4-byte Reload - sbcs r2, r7, r2 - mov r7, r9 - mov r9, r11 - sbc r1, r1, #0 - str r2, [sp, #148] @ 4-byte Spill - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #112] @ 4-byte Spill - ldm r3, {r1, r2, r12, lr} - ldm r10, {r3, r4, r5, r10} - ldr r11, [sp, #120] @ 4-byte Reload - adds r1, r7, r1 - adcs r2, r9, r2 - adcs r12, r11, r12 - ldr r11, [sp, #112] @ 4-byte Reload - adcs lr, r6, lr - ldr r6, [sp, #124] @ 4-byte Reload - adcs r3, r6, r3 - ldr r6, [sp, #140] @ 4-byte Reload - adcs r4, r8, r4 - adcs r8, r6, r5 - ldr r5, [sp, #132] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - adcs r10, r5, r10 - ldr r5, [sp, #128] @ 4-byte Reload - adcs r5, r5, r6 - ldr r6, [sp, #88] @ 4-byte Reload - str r5, [sp, #84] @ 4-byte Spill - ldr r5, [sp, #116] @ 4-byte Reload - adcs r5, r5, r6 - ldr r6, [sp, #104] @ 4-byte Reload - str r5, [sp, #88] @ 4-byte Spill - ldr r5, [sp, #136] @ 4-byte Reload - adcs r5, r5, r6 - ldr r6, [sp, #108] @ 4-byte Reload - str r5, [sp, #104] @ 4-byte Spill - ldr r5, [sp, #144] @ 4-byte Reload - adcs r5, r5, r6 - str r5, [sp, #108] @ 4-byte Spill - ldr r5, [sp, #148] @ 4-byte Reload - adc r5, r5, r11 - str r5, [sp, #112] @ 4-byte Spill - ldr r5, [sp, #100] @ 4-byte Reload - ands r5, r5, #1 - moveq r1, r7 - moveq r2, r9 - str r1, [r0, #52] - ldr r1, [sp, #120] @ 4-byte Reload - str r2, [r0, #56] - ldr r2, [sp, #84] @ 4-byte Reload - moveq r12, r1 - ldr r1, [sp, #92] @ 4-byte Reload - cmp r5, #0 - str r12, [r0, #60] - moveq lr, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str lr, [r0, #64] - moveq r3, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r3, [r0, #68] - ldr r3, [sp, #112] @ 4-byte Reload - moveq r4, r1 - ldr r1, [sp, #140] @ 4-byte Reload - cmp r5, #0 - str r4, [r0, #72] - moveq r8, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r8, [r0, #76] - moveq r10, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r10, [r0, #80] - moveq r2, r1 - ldr r1, [sp, #116] @ 4-byte Reload - cmp r5, #0 - str r2, [r0, #84] - ldr r2, [sp, #88] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #136] @ 4-byte Reload - str r2, [r0, #88] - ldr r2, [sp, #104] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #144] @ 4-byte Reload - str r2, [r0, #92] - ldr r2, [sp, #108] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #148] @ 4-byte Reload - cmp r5, #0 - str r2, [r0, #96] - moveq r3, r1 - str r3, [r0, #100] - add sp, sp, #152 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end205: - .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L - .cantunwind - .fnend - - .align 2 - .type .LmulPv448x32,%function -.LmulPv448x32: @ @mulPv448x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r3, [r1, #44] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #44] - ldr r3, [r1, #48] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #48] - ldr r1, [r1, #52] - umull r3, r7, r1, r2 - adcs r1, r6, r3 - str r1, [r0, #52] - adc r1, r7, #0 - str r1, [r0, #56] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end206: - .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre14L - .align 2 - .type mcl_fp_mulUnitPre14L,%function -mcl_fp_mulUnitPre14L: @ @mcl_fp_mulUnitPre14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #76 - sub sp, sp, #76 - mov r4, r0 - add r0, sp, #8 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #64] - add lr, sp, #8 - ldr r8, [sp, #56] - ldr r9, [sp, #52] - ldr r10, [sp, #48] - ldr r11, [sp, #44] - ldr r5, [sp, #40] - ldr r6, [sp, #36] - ldr r7, [sp, #32] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #60] - str r0, [sp] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - stm r4, {r0, r1, r2, r3, r12, lr} - str r7, [r4, #24] - str r6, [r4, #28] - str r5, [r4, #32] - str r11, [r4, #36] - str r10, [r4, #40] - str r9, [r4, #44] - str r8, [r4, #48] - ldr r0, [sp] @ 4-byte Reload - str r0, [r4, #52] - ldr r0, [sp, #4] @ 4-byte Reload - str r0, [r4, #56] - add sp, sp, #76 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end207: - .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre14L - .align 2 - .type mcl_fpDbl_mulPre14L,%function -mcl_fpDbl_mulPre14L: @ @mcl_fpDbl_mulPre14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #228 - sub sp, sp, #228 - mov r6, r2 - mov r5, r1 - mov r4, r0 - bl mcl_fpDbl_mulPre7L(PLT) - add r0, r4, #56 - add r1, r5, #28 - add r2, r6, #28 - bl mcl_fpDbl_mulPre7L(PLT) - ldr r0, [r6, #32] - add r11, r6, #36 - str r0, [sp, #104] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [r6, #52] - ldr r12, [r6] - str r0, [sp, #112] @ 4-byte Spill - ldmib r6, {r1, r2, r3, r7} - ldr r0, [r6, #28] - ldr lr, [r6, #24] - ldr r6, [r6, #20] - adds r0, r12, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #104] @ 4-byte Spill - adcs r0, r2, r8 - str r0, [sp, #100] @ 4-byte Spill - adcs r0, r3, r9 - str r0, [sp, #96] @ 4-byte Spill - adcs r0, r7, r10 - str r0, [sp, #92] @ 4-byte Spill - adcs r0, r6, r11 - add r11, r5, #32 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, lr, r0 - add lr, r5, #12 - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - ldm r11, {r8, r10, r11} - ldr r7, [r5] - ldr r3, [r5, #4] - ldr r2, [r5, #8] - adc r6, r0, #0 - ldr r0, [r5, #44] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r5, #48] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r5, #52] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r5, #28] - ldm lr, {r1, r9, r12, lr} - adds r0, r7, r0 - str r0, [sp, #112] @ 4-byte Spill - str r0, [sp, #144] - ldr r0, [sp, #72] @ 4-byte Reload - adcs r7, r3, r8 - adcs r10, r2, r10 - add r2, sp, #116 - str r7, [sp, #148] - adcs r11, r1, r11 - add r1, sp, #144 - str r10, [sp, #152] - str r11, [sp, #156] - adcs r5, r9, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r5, [sp, #160] - adcs r9, r12, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r9, [sp, #164] - adcs r8, lr, r0 - ldr r0, [sp, #108] @ 4-byte Reload - str r8, [sp, #168] - str r0, [sp, #116] - ldr r0, [sp, #104] @ 4-byte Reload - str r0, [sp, #120] - ldr r0, [sp, #100] @ 4-byte Reload - str r0, [sp, #124] - ldr r0, [sp, #96] @ 4-byte Reload - str r0, [sp, #128] - ldr r0, [sp, #92] @ 4-byte Reload - str r0, [sp, #132] - ldr r0, [sp, #88] @ 4-byte Reload - str r0, [sp, #136] - ldr r0, [sp, #84] @ 4-byte Reload - str r0, [sp, #140] - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - add r0, sp, #172 - bl mcl_fpDbl_mulPre7L(PLT) - ldr r0, [sp, #108] @ 4-byte Reload - cmp r6, #0 - ldr r2, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #100] @ 4-byte Reload - moveq r8, r6 - moveq r9, r6 - moveq r5, r6 - moveq r11, r6 - moveq r10, r6 - cmp r6, #0 - moveq r2, r6 - moveq r7, r6 - str r2, [sp, #112] @ 4-byte Spill - str r7, [sp, #76] @ 4-byte Spill - adds r3, r2, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r2, [sp, #92] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #88] @ 4-byte Reload - adcs lr, r10, r1 - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r11, r1 - adcs r2, r5, r2 - adcs r12, r9, r7 - ldr r7, [sp, #84] @ 4-byte Reload - adcs r7, r8, r7 - str r7, [sp, #104] @ 4-byte Spill - mov r7, #0 - adc r7, r7, #0 - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [sp, #80] @ 4-byte Reload - cmp r7, #0 - moveq r2, r5 - ldr r5, [sp, #76] @ 4-byte Reload - moveq r1, r11 - moveq lr, r10 - ldr r11, [sp, #104] @ 4-byte Reload - moveq r0, r5 - ldr r5, [sp, #112] @ 4-byte Reload - moveq r3, r5 - cmp r7, #0 - ldr r5, [sp, #108] @ 4-byte Reload - moveq r5, r7 - and r7, r6, r7 - ldr r6, [sp, #200] - moveq r12, r9 - moveq r11, r8 - adds r10, r3, r6 - ldr r3, [sp, #204] - adcs r8, r0, r3 - ldr r0, [sp, #208] - add r3, sp, #172 - adcs r9, lr, r0 - ldr r0, [sp, #212] - ldr lr, [r4] - adcs r0, r1, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #216] - adcs r0, r2, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #220] - adcs r0, r12, r0 - ldr r12, [r4, #4] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #224] - adcs r0, r11, r0 - ldr r11, [r4, #12] - str r0, [sp, #92] @ 4-byte Spill - adc r0, r5, r7 - ldr r5, [r4, #8] - str r0, [sp, #88] @ 4-byte Spill - ldm r3, {r0, r1, r2, r3} - subs lr, r0, lr - sbcs r12, r1, r12 - ldr r1, [sp, #188] - sbcs r5, r2, r5 - ldr r2, [r4, #36] - sbcs r0, r3, r11 - ldr r3, [sp, #104] @ 4-byte Reload - ldr r11, [r4, #60] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r4, #16] - str r2, [sp, #112] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #192] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r4, #20] - sbcs r0, r1, r0 - ldr r1, [sp, #196] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r4, #24] - sbcs r6, r1, r0 - ldr r0, [r4, #28] - sbcs r7, r10, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r4, #32] - ldr r10, [r4, #56] - sbcs r8, r8, r0 - str r0, [sp, #44] @ 4-byte Spill - sbcs r9, r9, r2 - ldr r2, [r4, #40] - sbcs r0, r3, r2 - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r4, #44] - ldr r3, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - sbcs r0, r3, r2 - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r4, #48] - ldr r3, [sp, #96] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - sbcs r0, r3, r2 - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [r4, #52] - ldr r3, [sp, #92] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - sbcs r0, r3, r2 - str r2, [sp, #96] @ 4-byte Spill - ldr r2, [sp, #88] @ 4-byte Reload - ldr r3, [r4, #68] - str r0, [sp, #56] @ 4-byte Spill - sbc r0, r2, #0 - str r0, [sp, #52] @ 4-byte Spill - subs r0, lr, r10 - ldr lr, [r4, #76] - str r0, [sp, #48] @ 4-byte Spill - sbcs r0, r12, r11 - ldr r12, [r4, #72] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r4, #64] - str r0, [sp, #36] @ 4-byte Spill - sbcs r0, r5, r0 - ldr r5, [sp, #84] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - sbcs r0, r5, r3 - ldr r5, [r4, #80] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - sbcs r0, r6, r5 - ldr r6, [r4, #84] - str r0, [sp, #24] @ 4-byte Spill - sbcs r0, r7, r6 - str r6, [sp, #92] @ 4-byte Spill - ldr r6, [r4, #88] - str r0, [sp, #20] @ 4-byte Spill - sbcs r0, r8, r6 - str r6, [sp, #88] @ 4-byte Spill - ldr r6, [r4, #92] - str r0, [sp, #16] @ 4-byte Spill - sbcs r0, r9, r6 - add r9, r4, #96 - str r6, [sp, #84] @ 4-byte Spill - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #48] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r0, r0, r6 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r0, r7 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [r4, #28] - ldr r0, [sp, #112] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [r4, #32] - ldr r1, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [r4, #36] - ldr r0, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #72] @ 4-byte Reload - str r1, [r4, #40] - ldr r1, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r0, [r4, #44] - ldr r0, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [r4, #48] - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r0, [r4, #52] - adcs r1, r10, r1 - ldr r0, [sp, #16] @ 4-byte Reload - str r1, [r4, #56] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [r4, #60] - adcs r1, r1, r2 - ldr r0, [sp, #4] @ 4-byte Reload - str r1, [r4, #64] - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r3, r0 - adcs r1, r12, r1 - str r0, [r4, #68] - ldr r0, [sp, #60] @ 4-byte Reload - add r12, r4, #92 - str r1, [r4, #72] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - adcs r1, r5, r1 - str r0, [r4, #76] - ldr r0, [sp, #92] @ 4-byte Reload - str r1, [r4, #80] - ldr r1, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [r4, #84] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [r4, #88] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - adcs r1, r6, #0 - adcs r2, r7, #0 - adcs r3, r8, #0 - adc r7, r9, #0 - stm r12, {r0, r1, r2, r3, r7} - add sp, sp, #228 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end208: - .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre14L - .align 2 - .type mcl_fpDbl_sqrPre14L,%function -mcl_fpDbl_sqrPre14L: @ @mcl_fpDbl_sqrPre14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #220 - sub sp, sp, #220 - mov r5, r1 - mov r4, r0 - mov r2, r5 - bl mcl_fpDbl_mulPre7L(PLT) - add r1, r5, #28 - add r0, r4, #56 - mov r2, r1 - bl mcl_fpDbl_mulPre7L(PLT) - ldr r0, [r5, #44] - ldr r11, [r5, #32] - ldr r10, [r5, #36] - ldr r8, [r5, #40] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r5, #48] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r5, #52] - str r0, [sp, #104] @ 4-byte Spill - ldm r5, {r6, r7} - ldr r0, [r5, #28] - ldr r3, [r5, #8] - ldr r2, [r5, #12] - ldr r12, [r5, #16] - ldr lr, [r5, #24] - ldr r1, [r5, #20] - ldr r5, [sp, #96] @ 4-byte Reload - adds r9, r6, r0 - adcs r0, r7, r11 - ldr r7, [sp, #100] @ 4-byte Reload - str r9, [sp, #136] - str r9, [sp, #108] - adcs r3, r3, r10 - str r0, [sp, #140] - str r0, [sp, #112] - adcs r2, r2, r8 - str r3, [sp, #144] - str r3, [sp, #116] - adcs r6, r12, r5 - str r2, [sp, #148] - str r2, [sp, #120] - adcs r1, r1, r7 - ldr r7, [sp, #104] @ 4-byte Reload - str r6, [sp, #152] - str r6, [sp, #124] - lsr r5, r1, #31 - str r1, [sp, #156] - str r1, [sp, #128] - adcs r8, lr, r7 - orr r5, r5, r8, lsl #1 - str r8, [sp, #160] - str r8, [sp, #132] - str r5, [sp, #104] @ 4-byte Spill - lsr r5, r6, #31 - orr r1, r5, r1, lsl #1 - str r1, [sp, #100] @ 4-byte Spill - lsr r1, r2, #31 - orr r1, r1, r6, lsl #1 - str r1, [sp, #96] @ 4-byte Spill - lsr r1, r3, #31 - orr r1, r1, r2, lsl #1 - add r2, sp, #108 - str r1, [sp, #92] @ 4-byte Spill - lsr r1, r0, #31 - orr r1, r1, r3, lsl #1 - str r1, [sp, #84] @ 4-byte Spill - lsr r1, r9, #31 - orr r0, r1, r0, lsl #1 - add r1, sp, #136 - str r0, [sp, #76] @ 4-byte Spill - mov r0, #0 - adc r6, r0, #0 - add r0, sp, #164 - bl mcl_fpDbl_mulPre7L(PLT) - add lr, sp, #204 - add r7, sp, #192 - ldm lr, {r5, r10, r11, lr} - ldm r7, {r0, r1, r7} - ldr r2, [sp, #100] @ 4-byte Reload - ldr r3, [sp, #104] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - adds r0, r0, r9, lsl #1 - mov r9, r1 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r12, r7, r0 - ldr r0, [sp, #92] @ 4-byte Reload - adcs r1, r5, r0 - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - adcs r2, r11, r2 - adcs r3, lr, r3 - adc r8, r6, r8, lsr #31 - cmp r6, #0 - moveq r0, r10 - moveq r1, r5 - moveq r3, lr - moveq r2, r11 - moveq r12, r7 - cmp r6, #0 - ldr lr, [r4] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - moveq r8, r6 - str r2, [sp, #100] @ 4-byte Spill - mov r5, r3 - ldr r3, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #80] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - mov r7, r8 - add r8, sp, #164 - moveq r3, r9 - ldmib r4, {r9, r10, r11} - moveq r2, r0 - ldm r8, {r0, r1, r8} - ldr r6, [sp, #176] - subs lr, r0, lr - sbcs r0, r1, r9 - ldr r1, [sp, #180] - str r0, [sp, #60] @ 4-byte Spill - sbcs r0, r8, r10 - ldr r10, [r4, #56] - str r0, [sp, #76] @ 4-byte Spill - sbcs r0, r6, r11 - ldr r11, [r4, #60] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r4, #16] - sbcs r0, r1, r0 - ldr r1, [sp, #184] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r4, #20] - sbcs r0, r1, r0 - ldr r1, [sp, #188] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r4, #24] - sbcs r6, r1, r0 - ldr r1, [r4, #28] - ldr r0, [r4, #32] - sbcs r9, r2, r1 - str r0, [sp, #92] @ 4-byte Spill - ldr r2, [sp, #96] @ 4-byte Reload - sbcs r8, r3, r0 - ldr r0, [r4, #36] - ldr r3, [r4, #68] - str r0, [sp, #88] @ 4-byte Spill - sbcs r0, r12, r0 - ldr r12, [r4, #72] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r4, #40] - str r0, [sp, #84] @ 4-byte Spill - sbcs r0, r2, r0 - ldr r2, [r4, #44] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - str r2, [sp, #96] @ 4-byte Spill - sbcs r0, r0, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r4, #48] - str r0, [sp, #104] @ 4-byte Spill - sbcs r0, r2, r0 - ldr r2, [r4, #64] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r4, #52] - str r2, [sp, #32] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - sbcs r0, r5, r0 - ldr r5, [r4, #80] - str r0, [sp, #44] @ 4-byte Spill - sbc r0, r7, #0 - str r0, [sp, #40] @ 4-byte Spill - subs r0, lr, r10 - ldr lr, [r4, #76] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r0, r11 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r0, r2 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r0, r12 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - sbcs r0, r6, r5 - ldr r6, [sp, #48] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r4, #84] - str r0, [sp, #80] @ 4-byte Spill - sbcs r0, r9, r0 - add r9, r4, #96 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r4, #88] - str r0, [sp, #76] @ 4-byte Spill - sbcs r0, r8, r0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r4, #92] - str r0, [sp, #72] @ 4-byte Spill - sbcs r0, r6, r0 - str r0, [sp, #48] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r0, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #60] @ 4-byte Reload - sbcs r0, r0, r6 - str r0, [sp] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r0, r7 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - sbc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adds r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [r4, #28] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [r4, #32] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [r4, #36] - ldr r0, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r4, #40] - ldr r1, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r0, [r4, #44] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [r4, #48] - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [r4, #52] - adcs r1, r10, r1 - ldr r0, [sp, #8] @ 4-byte Reload - str r1, [r4, #56] - ldr r1, [sp, #32] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [r4, #60] - adcs r1, r1, r2 - ldr r0, [sp] @ 4-byte Reload - str r1, [r4, #64] - ldr r1, [sp, #4] @ 4-byte Reload - adcs r0, r3, r0 - adcs r1, r12, r1 - str r0, [r4, #68] - ldr r0, [sp, #52] @ 4-byte Reload - add r12, r4, #92 - str r1, [r4, #72] - ldr r1, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - adcs r1, r5, r1 - str r0, [r4, #76] - ldr r0, [sp, #80] @ 4-byte Reload - str r1, [r4, #80] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [r4, #84] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [r4, #88] - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - adcs r1, r6, #0 - adcs r2, r7, #0 - adcs r3, r8, #0 - adc r7, r9, #0 - stm r12, {r0, r1, r2, r3, r7} - add sp, sp, #220 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end209: - .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L - .cantunwind - .fnend - - .globl mcl_fp_mont14L - .align 2 - .type mcl_fp_mont14L,%function -mcl_fp_mont14L: @ @mcl_fp_mont14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #892 - sub sp, sp, #892 - .pad #1024 - sub sp, sp, #1024 - add r12, sp, #108 - add r7, sp, #1024 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #72] @ 4-byte Spill - add r0, r7, #824 - ldr r6, [r3, #-4] - ldr r2, [r2] - str r6, [sp, #104] @ 4-byte Spill - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1852] - ldr r5, [sp, #1848] - add r8, sp, #1024 - mov r1, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1856] - mul r2, r5, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1860] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #1900] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #1896] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1892] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1888] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1884] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1880] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1876] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1872] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1868] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1864] - str r0, [sp, #36] @ 4-byte Spill - add r0, r8, #760 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1840] - ldr r1, [sp, #108] @ 4-byte Reload - ldr r10, [sp, #1808] - ldr r11, [sp, #1804] - ldr r7, [sp, #1800] - ldr r9, [sp, #1784] - ldr r4, [sp, #1788] - ldr r6, [sp, #1792] - ldr r8, [sp, #1796] - add lr, sp, #1024 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1836] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1832] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1828] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1824] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1820] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1816] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1812] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #696 - bl .LmulPv448x32(PLT) - adds r0, r9, r5 - ldr r1, [sp, #48] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - ldr r3, [sp, #1736] - ldr r12, [sp, #1740] - ldr lr, [sp, #1744] - ldr r5, [sp, #1752] - ldr r9, [sp, #1760] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1748] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #1720] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #1756] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #76] @ 4-byte Reload - adcs r1, r11, r1 - str r0, [sp, #36] @ 4-byte Spill - mov r0, #0 - ldr r11, [sp, #80] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r10, r1 - ldr r10, [sp, #1764] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #1732] - adc r0, r0, #0 - adds r6, r11, r6 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #1728] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1776] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1768] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #1724] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r6, sp, #1024 - add r0, r6, #632 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1712] - add r11, sp, #1664 - ldr r8, [sp, #1684] - ldr r9, [sp, #1680] - ldr r10, [sp, #1676] - ldr r4, [sp, #1656] - ldr r7, [sp, #1660] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1700] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1696] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1692] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1688] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #568 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1604] - ldr r3, [sp, #1608] - ldr r12, [sp, #1612] - ldr lr, [sp, #1616] - adds r0, r0, r4 - ldr r4, [sp, #1620] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1624] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1592] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1636] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1632] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1628] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1600] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1648] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1644] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1640] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1596] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r6, sp, #1024 - add r0, r6, #504 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1584] - add r11, sp, #1536 - ldr r8, [sp, #1556] - ldr r9, [sp, #1552] - ldr r10, [sp, #1548] - ldr r4, [sp, #1528] - ldr r7, [sp, #1532] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #440 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1476] - ldr r3, [sp, #1480] - ldr r12, [sp, #1484] - ldr lr, [sp, #1488] - adds r0, r0, r4 - ldr r4, [sp, #1492] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1496] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1464] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1508] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1504] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1500] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1472] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1520] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1516] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1512] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1468] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r6, sp, #1024 - add r0, r6, #376 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1456] - add r11, sp, #1408 - ldr r8, [sp, #1428] - ldr r9, [sp, #1424] - ldr r10, [sp, #1420] - ldr r4, [sp, #1400] - ldr r7, [sp, #1404] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1448] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1444] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #312 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1348] - ldr r3, [sp, #1352] - ldr r12, [sp, #1356] - ldr lr, [sp, #1360] - adds r0, r0, r4 - ldr r4, [sp, #1364] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1368] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1336] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1380] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1376] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1372] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1344] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1392] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1388] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1384] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1340] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r6, sp, #1024 - add r0, r6, #248 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1328] - add r11, sp, #1280 - ldr r8, [sp, #1300] - ldr r9, [sp, #1296] - ldr r10, [sp, #1292] - ldr r4, [sp, #1272] - ldr r7, [sp, #1276] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1316] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1308] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1304] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, lr, #184 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1220] - ldr r3, [sp, #1224] - ldr r12, [sp, #1228] - ldr lr, [sp, #1232] - adds r0, r0, r4 - ldr r4, [sp, #1236] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1240] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1208] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1252] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1248] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1244] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1216] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1264] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1256] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1212] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r6, sp, #1024 - add r0, r6, #120 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1200] - add r11, sp, #1152 - ldr r8, [sp, #1172] - ldr r9, [sp, #1168] - ldr r10, [sp, #1164] - ldr r4, [sp, #1144] - ldr r7, [sp, #1148] - add lr, sp, #1024 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1180] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1176] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, lr, #56 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - ldr r2, [sp, #1092] - ldr r3, [sp, #1096] - ldr r12, [sp, #1100] - ldr lr, [sp, #1104] - adds r0, r0, r4 - ldr r4, [sp, #1108] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1112] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1080] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1124] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1120] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1116] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1088] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1136] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1084] - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1016 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1072] - add r11, sp, #1024 - ldr r8, [sp, #1044] - ldr r9, [sp, #1040] - ldr r10, [sp, #1036] - ldr r4, [sp, #1016] - ldr r7, [sp, #1020] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #952 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #956 - adds r0, r0, r4 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #980 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1008] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #952] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #888 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #944] - add r11, sp, #896 - ldr r8, [sp, #916] - ldr r9, [sp, #912] - ldr r10, [sp, #908] - ldr r4, [sp, #888] - ldr r7, [sp, #892] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #824 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #828 - adds r0, r0, r4 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #852 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #880] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #876] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #872] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #824] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #760 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #816] - add r11, sp, #768 - ldr r8, [sp, #788] - ldr r9, [sp, #784] - ldr r10, [sp, #780] - ldr r4, [sp, #760] - ldr r7, [sp, #764] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r5, r6, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #696 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #700 - adds r0, r0, r4 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #724 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #744] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #696] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #104] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - mul r2, r6, r5 - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #632 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #688] - add r11, sp, #632 - ldr r6, [sp, #656] - ldr r4, [sp, #652] - ldr r7, [sp, #648] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #676] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #568 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - add lr, sp, #584 - adds r0, r0, r8 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r2, r0, r9 - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #608 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #568 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldm r11, {r4, r6, r7, r11} - adds r0, r2, r4 - mul r1, r0, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #624] - str r1, [sp, #32] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r5, [sp, #96] @ 4-byte Reload - adcs r6, r5, r6 - ldr r5, [sp, #92] @ 4-byte Reload - str r6, [sp, #96] @ 4-byte Spill - adcs r6, r5, r7 - ldr r5, [sp, #88] @ 4-byte Reload - str r6, [sp, #92] @ 4-byte Spill - adcs r6, r5, r11 - ldr r5, [sp, #84] @ 4-byte Reload - str r6, [sp, #88] @ 4-byte Spill - adcs r0, r5, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #504 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #560] - add r10, sp, #504 - ldr r11, [sp, #532] - ldr r4, [sp, #528] - ldr r6, [sp, #524] - ldr r7, [sp, #520] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #548] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #536] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #440 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #456 - adds r0, r0, r5 - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r8 - adcs r1, r1, r9 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #480 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r7 - add r7, sp, #440 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #24] @ 4-byte Spill - ldm r7, {r4, r6, r7} - ldr r5, [sp, #452] - adds r1, r0, r4 - ldr r0, [sp, #104] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #496] - str r2, [sp, #20] @ 4-byte Spill - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #96] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [sp, #92] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #44] @ 4-byte Spill - ldr r6, [sp, #88] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #40] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #376 - bl .LmulPv448x32(PLT) - ldr r1, [sp, #432] - ldr r8, [sp, #404] - ldr r9, [sp, #400] - ldr r10, [sp, #396] - ldr r11, [sp, #392] - ldr r6, [sp, #376] - ldr r5, [sp, #380] - ldr r7, [sp, #384] - ldr r4, [sp, #388] - add r0, sp, #312 - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #428] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #424] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #420] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #416] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #412] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #408] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #108] @ 4-byte Reload - bl .LmulPv448x32(PLT) - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #316 - adds r0, r0, r6 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #340 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #364] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #312] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #48] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #248 - bl .LmulPv448x32(PLT) - ldr r1, [sp, #304] - ldr r10, [sp, #272] - ldr r11, [sp, #268] - ldr r8, [sp, #264] - ldr r6, [sp, #248] - ldr r7, [sp, #252] - ldr r4, [sp, #256] - ldr r9, [sp, #260] - add r0, sp, #184 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #300] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #296] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #292] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #288] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #284] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #280] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #276] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #108] @ 4-byte Reload - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #200 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r3, r0, r7 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #184 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #224 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldm r8, {r4, r7, r8} - ldr r0, [sp, #104] @ 4-byte Reload - ldr r5, [sp, #196] - adds r4, r3, r4 - mul r1, r4, r0 - ldr r0, [sp, #240] - str r1, [sp, #48] @ 4-byte Spill - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #236] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r6, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #112] @ 4-byte Reload - adcs r11, r11, r7 - ldr r7, [sp, #108] @ 4-byte Reload - adcs r8, r7, r8 - ldr r7, [sp, #52] @ 4-byte Reload - adcs r5, r7, r5 - ldr r7, [sp, #100] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r9, r0, r9 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #116] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #68] @ 4-byte Spill - mov r0, #0 - adc r7, r0, #0 - add r0, sp, #120 - bl .LmulPv448x32(PLT) - add r3, sp, #120 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - adcs r4, r11, r1 - ldr r0, [sp, #136] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r6, r8, r2 - str r4, [sp, #36] @ 4-byte Spill - adcs r12, r5, r3 - str r6, [sp, #48] @ 4-byte Spill - str r12, [sp, #56] @ 4-byte Spill - adcs r8, r1, r0 - ldr r0, [sp, #140] - ldr r1, [sp, #44] @ 4-byte Reload - str r8, [sp, #64] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #148] - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #156] - adcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #160] - adcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r9, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #168] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r0, r1, r0 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - str r0, [sp, #112] @ 4-byte Spill - adc r0, r7, #0 - mov r7, r10 - str r0, [sp, #60] @ 4-byte Spill - ldmib r7, {r1, r2, r3, r10, r11, lr} - ldr r5, [r7] - ldr r0, [r7, #28] - ldr r9, [r7, #44] - subs r5, r4, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r7, #40] - sbcs r6, r6, r1 - ldr r1, [r7, #32] - ldr r4, [sp, #68] @ 4-byte Reload - sbcs r2, r12, r2 - sbcs r12, r8, r3 - ldr r3, [r7, #48] - ldr r8, [r7, #36] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r7, #52] - ldr r7, [sp, #84] @ 4-byte Reload - str r3, [sp, #116] @ 4-byte Spill - ldr r3, [sp, #80] @ 4-byte Reload - sbcs r10, r3, r10 - ldr r3, [sp, #76] @ 4-byte Reload - sbcs r3, r3, r11 - sbcs lr, r7, lr - ldr r7, [sp, #88] @ 4-byte Reload - sbcs r4, r7, r4 - ldr r7, [sp, #92] @ 4-byte Reload - sbcs r7, r7, r1 - ldr r1, [sp, #96] @ 4-byte Reload - sbcs r8, r1, r8 - ldr r1, [sp, #100] @ 4-byte Reload - sbcs r11, r1, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r9, r0, r9 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbc r0, r0, #0 - ands r1, r0, #1 - ldr r0, [sp, #36] @ 4-byte Reload - movne r5, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r5, [r0] - ldr r5, [sp, #48] @ 4-byte Reload - movne r6, r5 - ldr r5, [sp, #56] @ 4-byte Reload - str r6, [r0, #4] - movne r2, r5 - cmp r1, #0 - str r2, [r0, #8] - ldr r2, [sp, #64] @ 4-byte Reload - movne r12, r2 - ldr r2, [sp, #80] @ 4-byte Reload - str r12, [r0, #12] - movne r10, r2 - ldr r2, [sp, #76] @ 4-byte Reload - str r10, [r0, #16] - movne r3, r2 - ldr r2, [sp, #84] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - movne lr, r2 - ldr r2, [sp, #88] @ 4-byte Reload - str lr, [r0, #24] - movne r4, r2 - ldr r2, [sp, #92] @ 4-byte Reload - str r4, [r0, #28] - movne r7, r2 - ldr r2, [sp, #96] @ 4-byte Reload - cmp r1, #0 - str r7, [r0, #32] - movne r8, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r8, [r0, #36] - movne r11, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r11, [r0, #40] - movne r9, r2 - cmp r1, #0 - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #68] @ 4-byte Reload - str r9, [r0, #44] - movne r2, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r2, [r0, #48] - ldr r2, [sp, #116] @ 4-byte Reload - movne r2, r1 - str r2, [r0, #52] - add sp, sp, #892 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end210: - .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L - .cantunwind - .fnend - - .globl mcl_fp_montNF14L - .align 2 - .type mcl_fp_montNF14L,%function -mcl_fp_montNF14L: @ @mcl_fp_montNF14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #892 - sub sp, sp, #892 - .pad #1024 - sub sp, sp, #1024 - add r12, sp, #108 - add r6, sp, #1024 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #76] @ 4-byte Spill - add r0, r6, #824 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #104] @ 4-byte Spill - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1852] - ldr r8, [sp, #1848] - add r10, sp, #1024 - mov r1, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1856] - mul r2, r8, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1860] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #1900] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #1896] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1892] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1888] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1884] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1880] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1876] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1872] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1868] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1864] - str r0, [sp, #40] @ 4-byte Spill - add r0, r10, #760 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1840] - ldr r1, [sp, #108] @ 4-byte Reload - ldr r11, [sp, #1808] - ldr r6, [sp, #1804] - ldr r7, [sp, #1800] - ldr r5, [sp, #1784] - ldr r9, [sp, #1788] - ldr r10, [sp, #1792] - ldr r4, [sp, #1796] - add lr, sp, #1024 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1836] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1832] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1828] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1824] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1820] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1816] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1812] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #696 - bl .LmulPv448x32(PLT) - adds r0, r5, r8 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1720] - ldr r2, [sp, #1732] - ldr r3, [sp, #1736] - ldr r12, [sp, #1740] - ldr lr, [sp, #1744] - ldr r8, [sp, #1760] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #1764] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #1768] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1748] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #1756] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #1752] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #80] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adc r0, r1, r0 - adds r11, r11, r5 - ldr r5, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #1728] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1724] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, r8, #632 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1712] - add r11, sp, #1664 - ldr r9, [sp, #1680] - ldr r10, [sp, #1676] - ldr r6, [sp, #1656] - ldr r7, [sp, #1660] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1700] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1696] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1692] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1688] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1684] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #568 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1592] - ldr r2, [sp, #1604] - ldr r3, [sp, #1608] - ldr r12, [sp, #1612] - ldr lr, [sp, #1616] - ldr r6, [sp, #1624] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1628] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1620] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1632] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1640] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1636] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #1600] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1648] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1644] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1596] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, r8, #504 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1584] - add r11, sp, #1536 - ldr r9, [sp, #1552] - ldr r10, [sp, #1548] - ldr r6, [sp, #1528] - ldr r7, [sp, #1532] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1556] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #440 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1464] - ldr r2, [sp, #1476] - ldr r3, [sp, #1480] - ldr r12, [sp, #1484] - ldr lr, [sp, #1488] - ldr r6, [sp, #1496] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1500] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1492] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1504] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1512] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1508] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #1472] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1520] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1516] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1468] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, r8, #376 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1456] - add r11, sp, #1408 - ldr r9, [sp, #1424] - ldr r10, [sp, #1420] - ldr r6, [sp, #1400] - ldr r7, [sp, #1404] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1448] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1444] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #312 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1336] - ldr r2, [sp, #1348] - ldr r3, [sp, #1352] - ldr r12, [sp, #1356] - ldr lr, [sp, #1360] - ldr r6, [sp, #1368] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1372] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1364] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1376] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1384] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1380] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #1344] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1392] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1388] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1340] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, r8, #248 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1328] - add r11, sp, #1280 - ldr r9, [sp, #1296] - ldr r10, [sp, #1292] - ldr r6, [sp, #1272] - ldr r7, [sp, #1276] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1316] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1308] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1304] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1300] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, lr, #184 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1208] - ldr r2, [sp, #1220] - ldr r3, [sp, #1224] - ldr r12, [sp, #1228] - ldr lr, [sp, #1232] - ldr r6, [sp, #1240] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1244] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1236] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1248] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1256] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1252] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #1216] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1212] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, r8, #120 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1200] - add r11, sp, #1152 - ldr r9, [sp, #1168] - ldr r10, [sp, #1164] - ldr r6, [sp, #1144] - ldr r7, [sp, #1148] - add lr, sp, #1024 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1180] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1176] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1172] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, lr, #56 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r5, [sp, #1080] - ldr r2, [sp, #1092] - ldr r3, [sp, #1096] - ldr r12, [sp, #1100] - ldr lr, [sp, #1104] - ldr r6, [sp, #1112] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1116] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1108] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1120] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1128] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1124] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - ldr r1, [sp, #1088] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1084] - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #1016 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1072] - add r11, sp, #1024 - ldr r9, [sp, #1040] - ldr r10, [sp, #1036] - ldr r6, [sp, #1016] - ldr r7, [sp, #1020] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #952 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #956 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #980 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1008] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #952] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #888 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #944] - add r11, sp, #896 - ldr r9, [sp, #912] - ldr r10, [sp, #908] - ldr r6, [sp, #888] - ldr r7, [sp, #892] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #824 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #828 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #852 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #880] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #876] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #824] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #96] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #760 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #816] - add r11, sp, #768 - ldr r9, [sp, #784] - ldr r10, [sp, #780] - ldr r6, [sp, #760] - ldr r7, [sp, #764] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #696 - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #700 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #724 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #744] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r8, r9, r10} - ldr r5, [sp, #696] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - adds r5, r11, r5 - adcs r0, r7, r0 - str r5, [sp, #24] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #104] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r5, r9 - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #632 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #688] - add r11, sp, #640 - ldr r5, [sp, #656] - ldr r10, [sp, #652] - ldr r6, [sp, #632] - ldr r7, [sp, #636] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #676] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #568 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #96] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - add lr, sp, #584 - adds r0, r0, r6 - ldr r6, [sp, #580] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #572] - adcs r1, r1, r4 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #608 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #576] - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #568] - str r1, [sp, #44] @ 4-byte Spill - adds r0, r0, r2 - mul r1, r0, r9 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #624] - str r1, [sp, #36] @ 4-byte Spill - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #96] @ 4-byte Reload - adcs r7, r11, r7 - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [sp, #92] @ 4-byte Reload - adcs r5, r7, r5 - str r5, [sp, #92] @ 4-byte Spill - ldr r5, [sp, #88] @ 4-byte Reload - adcs r5, r5, r6 - str r5, [sp, #88] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #504 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #560] - add r10, sp, #508 - ldr r7, [sp, #532] - ldr r8, [sp, #528] - ldr r9, [sp, #524] - ldr r11, [sp, #504] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #548] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #536] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r6, r10} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #440 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #456 - adds r0, r0, r11 - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - adcs r1, r1, r5 - ldr r5, [sp, #448] - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #452] - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #480 - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #444] - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #40] @ 4-byte Reload - adc r1, r1, r2 - ldr r2, [sp, #440] - str r1, [sp, #36] @ 4-byte Spill - adds r1, r0, r2 - ldr r0, [sp, #104] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #496] - str r2, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #96] @ 4-byte Reload - adcs r7, r11, r7 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [sp, #92] @ 4-byte Reload - adcs r5, r7, r5 - str r5, [sp, #48] @ 4-byte Spill - ldr r5, [sp, #88] @ 4-byte Reload - adcs r5, r5, r6 - str r5, [sp, #44] @ 4-byte Spill - ldr r5, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #376 - bl .LmulPv448x32(PLT) - ldr r1, [sp, #432] - add r10, sp, #380 - ldr r7, [sp, #404] - ldr r8, [sp, #400] - ldr r9, [sp, #396] - ldr r11, [sp, #376] - add r0, sp, #312 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #428] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #424] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #420] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #416] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #412] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #408] - str r1, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r6, r10} - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #108] @ 4-byte Reload - bl .LmulPv448x32(PLT) - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #316 - adds r0, r0, r11 - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #340 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #364] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r6, r7, r8, r9, r10} - ldr r5, [sp, #312] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - adds r11, r11, r5 - ldr r5, [sp, #52] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r11 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #248 - bl .LmulPv448x32(PLT) - ldr r1, [sp, #304] - ldr r10, [sp, #272] - ldr r11, [sp, #268] - ldr r8, [sp, #264] - ldr r6, [sp, #248] - ldr r7, [sp, #252] - ldr r4, [sp, #256] - ldr r9, [sp, #260] - add r0, sp, #184 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #300] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #296] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #292] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #288] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #284] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #280] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #276] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #108] @ 4-byte Reload - bl .LmulPv448x32(PLT) - adds r0, r5, r6 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #200 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - adcs r1, r1, r4 - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - adcs r1, r1, r8 - add r8, sp, #184 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #224 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adc r1, r1, r2 - str r1, [sp, #60] @ 4-byte Spill - ldm r8, {r2, r7, r8} - ldr r6, [sp, #196] - adds r4, r0, r2 - ldr r0, [sp, #104] @ 4-byte Reload - mul r1, r4, r0 - ldr r0, [sp, #240] - str r1, [sp, #52] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #236] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r5, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #112] @ 4-byte Reload - adcs r11, r11, r7 - ldr r7, [sp, #108] @ 4-byte Reload - adcs r8, r7, r8 - ldr r7, [sp, #56] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #100] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r7, r0, r5 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r5, [sp, #116] @ 4-byte Reload - adcs r9, r0, r9 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - add r0, sp, #120 - bl .LmulPv448x32(PLT) - add r3, sp, #120 - ldm r3, {r0, r1, r2, r3} - adds r0, r4, r0 - mov r4, r5 - adcs r11, r11, r1 - ldr r0, [sp, #136] - ldr r1, [sp, #40] @ 4-byte Reload - adcs r2, r8, r2 - str r11, [sp, #44] @ 4-byte Spill - adcs lr, r6, r3 - str r2, [sp, #52] @ 4-byte Spill - str lr, [sp, #60] @ 4-byte Spill - adcs r8, r1, r0 - ldr r0, [sp, #140] - ldr r1, [sp, #48] @ 4-byte Reload - str r8, [sp, #64] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #144] - adcs r0, r1, r0 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #148] - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #156] - adcs r10, r1, r0 - ldr r0, [sp, #160] - ldr r1, [sp, #104] @ 4-byte Reload - str r10, [sp, #68] @ 4-byte Spill - adcs r0, r7, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r9, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #168] - adcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #176] - adc r0, r1, r0 - str r0, [sp, #112] @ 4-byte Spill - ldmib r4, {r0, r1, r7, r9, r12} - ldr r6, [r4] - ldr r3, [r4, #24] - ldr r5, [r4, #28] - subs r6, r11, r6 - str r3, [sp, #72] @ 4-byte Spill - add r11, r4, #32 - sbcs r3, r2, r0 - sbcs r2, lr, r1 - ldm r11, {r0, r1, r11} - sbcs lr, r8, r7 - ldr r7, [r4, #44] - ldr r8, [r4, #52] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r4, #48] - ldr r4, [sp, #80] @ 4-byte Reload - sbcs r9, r4, r9 - ldr r4, [sp, #84] @ 4-byte Reload - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - sbcs r12, r4, r12 - ldr r4, [sp, #88] @ 4-byte Reload - sbcs r4, r4, r7 - ldr r7, [sp, #92] @ 4-byte Reload - sbcs r5, r7, r5 - sbcs r7, r10, r0 - ldr r0, [sp, #96] @ 4-byte Reload - sbcs r10, r0, r1 - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r11, r0, r11 - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - sbc r8, r0, r8 - ldr r0, [sp, #44] @ 4-byte Reload - asr r1, r8, #31 - cmp r1, #0 - movlt r6, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r6, [r0] - ldr r6, [sp, #52] @ 4-byte Reload - movlt r3, r6 - str r3, [r0, #4] - ldr r3, [sp, #60] @ 4-byte Reload - movlt r2, r3 - cmp r1, #0 - ldr r3, [sp, #72] @ 4-byte Reload - str r2, [r0, #8] - ldr r2, [sp, #64] @ 4-byte Reload - movlt lr, r2 - ldr r2, [sp, #80] @ 4-byte Reload - str lr, [r0, #12] - movlt r9, r2 - ldr r2, [sp, #84] @ 4-byte Reload - str r9, [r0, #16] - movlt r12, r2 - ldr r2, [sp, #88] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #20] - movlt r4, r2 - ldr r2, [sp, #92] @ 4-byte Reload - str r4, [r0, #24] - movlt r5, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r5, [r0, #28] - movlt r7, r2 - ldr r2, [sp, #96] @ 4-byte Reload - cmp r1, #0 - str r7, [r0, #32] - movlt r10, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r10, [r0, #36] - movlt r11, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r11, [r0, #40] - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #116] @ 4-byte Reload - str r3, [r0, #44] - movlt r2, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r2, [r0, #48] - movlt r8, r1 - str r8, [r0, #52] - add sp, sp, #892 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end211: - .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L - .cantunwind - .fnend - - .globl mcl_fp_montRed14L - .align 2 - .type mcl_fp_montRed14L,%function -mcl_fp_montRed14L: @ @mcl_fp_montRed14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #68 - sub sp, sp, #68 - .pad #1024 - sub sp, sp, #1024 - mov r3, r2 - str r0, [sp, #180] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r6, [r1] - ldr r0, [r3] - str r3, [sp, #184] @ 4-byte Spill - str r2, [sp, #88] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #84] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #80] @ 4-byte Spill - str r0, [sp, #168] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #152] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #160] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #164] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #188] @ 4-byte Spill - mul r2, r6, r0 - ldr r0, [r3, #28] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [r1, #96] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r1, #100] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r1, #104] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r1, #108] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #12] @ 4-byte Spill - add r0, sp, #1024 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1080] - ldr r8, [sp, #1024] - ldr r1, [sp, #1032] - ldr r2, [sp, #1036] - ldr r3, [sp, #1040] - ldr r12, [sp, #1044] - ldr lr, [sp, #1048] - ldr r4, [sp, #1052] - ldr r5, [sp, #1056] - ldr r7, [sp, #1060] - ldr r9, [sp, #1064] - ldr r10, [sp, #1068] - ldr r11, [sp, #1072] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1076] - adds r6, r6, r8 - ldr r6, [sp, #88] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1028] - adcs r8, r6, r0 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #184] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #188] @ 4-byte Reload - mul r2, r8, r0 - add r0, sp, #960 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #1016] - add lr, sp, #996 - add r10, sp, #964 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1012] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r4, r5, r12, lr} - ldr r6, [sp, #960] - ldr r7, [sp, #992] - ldr r11, [sp, #988] - ldr r3, [sp, #984] - ldm r10, {r0, r1, r2, r9, r10} - adds r6, r8, r6 - ldr r6, [sp, #88] @ 4-byte Reload - adcs r8, r6, r0 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #188] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r8, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #184] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #896 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #952] - add r10, sp, #924 - add lr, sp, #900 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #948] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #4] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r4, [sp, #896] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #832 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #888] - add lr, sp, #872 - add r11, sp, #832 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #884] - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r5, r12, lr} - ldr r6, [sp, #868] - ldr r7, [sp, #864] - ldm r11, {r0, r1, r2, r3, r8, r9, r10, r11} - adds r0, r4, r0 - ldr r4, [sp, #188] @ 4-byte Reload - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #184] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r1, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #768 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #824] - add r10, sp, #796 - add lr, sp, #784 - add r9, sp, #768 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r6, r7, r8, r10} - ldm lr, {r3, r12, lr} - ldm r9, {r0, r1, r2, r9} - adds r0, r11, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - mov r10, r1 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r1, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #704 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #760] - add lr, sp, #744 - add r9, sp, #708 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #756] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r5, r12, lr} - ldr r4, [sp, #704] - ldr r6, [sp, #740] - ldr r7, [sp, #736] - ldr r11, [sp, #732] - ldr r3, [sp, #728] - ldm r9, {r0, r1, r2, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #188] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - mul r2, r4, r5 - ldr r4, [sp, #184] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #640 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #696] - add r10, sp, #664 - add lr, sp, #640 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - mov r10, r1 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r5 - mov r1, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #576 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #632] - add lr, sp, #616 - add r9, sp, #580 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #628] - str r0, [sp, #32] @ 4-byte Spill - ldm lr, {r5, r12, lr} - ldr r4, [sp, #576] - ldr r6, [sp, #612] - ldr r7, [sp, #608] - ldr r11, [sp, #604] - ldr r3, [sp, #600] - ldm r9, {r0, r1, r2, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r10, r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r4, [sp, #188] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #184] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r9 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #512 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #568] - add r11, sp, #536 - add lr, sp, #512 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - mov r5, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #448 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #504] - add lr, sp, #484 - add r9, sp, #452 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #36] @ 4-byte Spill - ldm lr, {r6, r12, lr} - ldr r4, [sp, #448] - ldr r7, [sp, #480] - ldr r11, [sp, #476] - ldr r3, [sp, #472] - ldm r9, {r0, r1, r2, r8, r9} - adds r4, r10, r4 - ldr r4, [sp, #88] @ 4-byte Reload - adcs r10, r4, r0 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r4, [sp, #188] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #384 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #440] - add r11, sp, #408 - add lr, sp, #384 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #436] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #432] - str r0, [sp, #40] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r10, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #184] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r7 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #320 - bl .LmulPv448x32(PLT) - ldr r0, [sp, #376] - add r9, sp, #348 - ldr r11, [sp, #364] - ldr r8, [sp, #360] - add lr, sp, #328 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #16] @ 4-byte Spill - ldm r9, {r4, r6, r9} - ldr r3, [sp, #320] - ldr r5, [sp, #324] - ldm lr, {r0, r1, r2, r12, lr} - adds r3, r10, r3 - ldr r3, [sp, #88] @ 4-byte Reload - adcs r5, r3, r5 - ldr r3, [sp, #84] @ 4-byte Reload - adcs r10, r3, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #188] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - mul r2, r5, r6 - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r7 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r11 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #256 - bl .LmulPv448x32(PLT) - add r7, sp, #256 - add r12, sp, #272 - ldm r7, {r0, r1, r3, r7} - ldr r9, [sp, #312] - ldr r8, [sp, #308] - ldr lr, [sp, #304] - adds r0, r5, r0 - ldr r5, [sp, #300] - adcs r10, r10, r1 - mul r0, r10, r6 - ldr r6, [sp, #296] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #292] - str r0, [sp, #68] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r12} - ldr r4, [sp, #120] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r11 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r4, r0, r2 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r8, r0, r9 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - add r0, sp, #192 - bl .LmulPv448x32(PLT) - add r3, sp, #192 - ldm r3, {r0, r1, r2, r3} - adds r0, r10, r0 - ldr r0, [sp, #188] @ 4-byte Reload - adcs lr, r0, r1 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #48] @ 4-byte Reload - str lr, [sp, #72] @ 4-byte Spill - adcs r2, r0, r2 - ldr r0, [sp, #44] @ 4-byte Reload - str r2, [sp, #76] @ 4-byte Spill - adcs r3, r0, r3 - ldr r0, [sp, #208] - str r3, [sp, #80] @ 4-byte Spill - adcs r7, r1, r0 - ldr r0, [sp, #212] - ldr r1, [sp, #52] @ 4-byte Reload - str r7, [sp, #84] @ 4-byte Spill - adcs r4, r4, r0 - ldr r0, [sp, #216] - str r4, [sp, #88] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #220] - ldr r1, [sp, #56] @ 4-byte Reload - str r5, [sp, #92] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #224] - ldr r1, [sp, #60] @ 4-byte Reload - str r6, [sp, #96] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [sp, #228] - adcs r11, r1, r0 - ldr r0, [sp, #232] - ldr r1, [sp, #108] @ 4-byte Reload - str r11, [sp, #100] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [sp, #236] - adcs r10, r1, r0 - ldr r0, [sp, #240] - ldr r1, [sp, #120] @ 4-byte Reload - str r10, [sp, #108] @ 4-byte Spill - adcs r9, r1, r0 - ldr r0, [sp, #244] - ldr r1, [sp, #112] @ 4-byte Reload - str r9, [sp, #116] @ 4-byte Spill - adcs r8, r8, r0 - ldr r0, [sp, #248] - str r8, [sp, #120] @ 4-byte Spill - adcs r12, r1, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #172] @ 4-byte Reload - str r12, [sp, #112] @ 4-byte Spill - adc r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #176] @ 4-byte Reload - subs r0, lr, r0 - sbcs r1, r2, r1 - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r3, r2 - ldr r3, [sp, #152] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #156] @ 4-byte Reload - sbcs lr, r4, r7 - ldr r4, [sp, #160] @ 4-byte Reload - ldr r7, [sp, #184] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #164] @ 4-byte Reload - sbcs r5, r6, r5 - ldr r6, [sp, #124] @ 4-byte Reload - sbcs r6, r7, r6 - ldr r7, [sp, #128] @ 4-byte Reload - sbcs r7, r11, r7 - ldr r11, [sp, #188] @ 4-byte Reload - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [sp, #132] @ 4-byte Reload - sbcs r11, r11, r7 - ldr r7, [sp, #136] @ 4-byte Reload - sbcs r7, r10, r7 - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [sp, #140] @ 4-byte Reload - sbcs r9, r9, r7 - ldr r7, [sp, #144] @ 4-byte Reload - sbcs r10, r8, r7 - ldr r7, [sp, #148] @ 4-byte Reload - sbcs r8, r12, r7 - ldr r7, [sp, #104] @ 4-byte Reload - sbc r7, r7, #0 - ands r12, r7, #1 - ldr r7, [sp, #72] @ 4-byte Reload - movne r0, r7 - ldr r7, [sp, #180] @ 4-byte Reload - str r0, [r7] - ldr r0, [sp, #76] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r1, [r7, #4] - ldr r1, [sp, #100] @ 4-byte Reload - movne r2, r0 - ldr r0, [sp, #84] @ 4-byte Reload - cmp r12, #0 - str r2, [r7, #8] - movne r3, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r3, [r7, #12] - movne lr, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str lr, [r7, #16] - movne r4, r0 - ldr r0, [sp, #96] @ 4-byte Reload - cmp r12, #0 - str r4, [r7, #20] - movne r5, r0 - ldr r0, [sp, #184] @ 4-byte Reload - str r5, [r7, #24] - movne r6, r0 - ldr r0, [sp, #172] @ 4-byte Reload - movne r0, r1 - str r6, [r7, #28] - cmp r12, #0 - str r0, [r7, #32] - ldr r0, [sp, #188] @ 4-byte Reload - movne r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - str r11, [r7, #36] - ldr r11, [sp, #176] @ 4-byte Reload - movne r11, r0 - ldr r0, [sp, #116] @ 4-byte Reload - str r11, [r7, #40] - movne r9, r0 - ldr r0, [sp, #120] @ 4-byte Reload - cmp r12, #0 - str r9, [r7, #44] - movne r10, r0 - ldr r0, [sp, #112] @ 4-byte Reload - str r10, [r7, #48] - movne r8, r0 - str r8, [r7, #52] - add sp, sp, #68 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end212: - .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L - .cantunwind - .fnend - - .globl mcl_fp_addPre14L - .align 2 - .type mcl_fp_addPre14L,%function -mcl_fp_addPre14L: @ @mcl_fp_addPre14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldm r1, {r3, r12, lr} - ldr r9, [r1, #12] - ldmib r2, {r5, r6, r7} - ldr r11, [r2] - ldr r4, [r2, #16] - ldr r10, [r1, #44] - adds r8, r11, r3 - ldr r3, [r2, #32] - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r2, #20] - ldr r11, [r1, #48] - adcs r5, r5, r12 - add r12, r1, #16 - adcs r6, r6, lr - ldr lr, [r1, #40] - adcs r7, r7, r9 - ldr r9, [r1, #52] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #36] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r2, #24] - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [r2, #40] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r2, #28] - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #44] - str r4, [sp, #20] @ 4-byte Spill - ldr r4, [r1, #32] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #48] - ldr r2, [r2, #52] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #36] - str r3, [sp, #36] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - str r8, [r0] - stmib r0, {r5, r6} - str r7, [r0, #12] - ldr r5, [sp, #4] @ 4-byte Reload - ldr r7, [sp, #32] @ 4-byte Reload - ldr r6, [sp, #36] @ 4-byte Reload - adcs r1, r5, r1 - ldr r5, [sp, #8] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #12] @ 4-byte Reload - adcs r2, r5, r2 - ldr r5, [sp, #40] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r2, r12 - add r12, r0, #32 - str r2, [r0, #28] - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r1, r4 - adcs r2, r2, r3 - ldr r3, [sp, #28] @ 4-byte Reload - adcs r3, r3, lr - adcs r7, r7, r10 - adcs r6, r6, r11 - stm r12, {r1, r2, r3, r7} - adcs r5, r5, r9 - str r6, [r0, #48] - str r5, [r0, #52] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end213: - .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L - .cantunwind - .fnend - - .globl mcl_fp_subPre14L - .align 2 - .type mcl_fp_subPre14L,%function -mcl_fp_subPre14L: @ @mcl_fp_subPre14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - ldmib r2, {r10, r11} - ldr r3, [r2, #16] - ldr r7, [r1] - ldr r6, [r2, #12] - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #20] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #24] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2] - ldmib r1, {r4, r5, r12} - subs lr, r7, r3 - ldr r3, [r2, #32] - sbcs r4, r4, r10 - sbcs r5, r5, r11 - add r11, r1, #32 - sbcs r6, r12, r6 - add r12, r1, #16 - str r3, [sp, #4] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #8] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #12] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #16] @ 4-byte Spill - ldr r3, [r2, #48] - ldr r2, [r2, #52] - str r3, [sp, #20] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - ldm r11, {r7, r10, r11} - ldr r2, [r1, #52] - ldr r8, [r1, #44] - ldr r9, [r1, #48] - str r2, [sp] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - str lr, [r0] - stmib r0, {r4, r5} - str r6, [r0, #12] - ldr r5, [sp, #28] @ 4-byte Reload - ldr r6, [sp, #32] @ 4-byte Reload - ldr r4, [sp] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #24] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r2, r2, r6 - ldr r6, [sp, #20] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r1, r3, r1 - ldr r3, [sp, #12] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r2, r12, r2 - add r12, r0, #32 - str r2, [r0, #28] - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r1, r7, r1 - ldr r7, [sp, #16] @ 4-byte Reload - sbcs r2, r10, r2 - sbcs r3, r11, r3 - sbcs r7, r8, r7 - sbcs r6, r9, r6 - stm r12, {r1, r2, r3, r7} - sbcs r5, r4, r5 - str r6, [r0, #48] - str r5, [r0, #52] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #44 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end214: - .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L - .cantunwind - .fnend - - .globl mcl_fp_shr1_14L - .align 2 - .type mcl_fp_shr1_14L,%function -mcl_fp_shr1_14L: @ @mcl_fp_shr1_14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #16 - sub sp, sp, #16 - add r9, r1, #8 - add r12, r1, #32 - ldm r9, {r2, r3, r4, r5, r6, r9} - ldm r1, {r7, lr} - str r7, [sp, #4] @ 4-byte Spill - lsr r7, lr, #1 - orr r7, r7, r2, lsl #31 - str r7, [sp] @ 4-byte Spill - ldm r12, {r7, r11, r12} - ldr r10, [r1, #48] - ldr r8, [r1, #44] - ldr r1, [r1, #52] - str r1, [sp, #12] @ 4-byte Spill - lsr r1, r3, #1 - lsrs r3, r3, #1 - str r10, [sp, #8] @ 4-byte Spill - rrx r2, r2 - lsrs r3, lr, #1 - orr r1, r1, r4, lsl #31 - ldr r3, [sp, #4] @ 4-byte Reload - rrx r3, r3 - str r3, [r0] - ldr r3, [sp] @ 4-byte Reload - str r3, [r0, #4] - str r2, [r0, #8] - str r1, [r0, #12] - lsrs r1, r5, #1 - lsr r2, r11, #1 - rrx r1, r4 - ldr r4, [sp, #8] @ 4-byte Reload - orr r2, r2, r12, lsl #31 - str r1, [r0, #16] - lsr r1, r5, #1 - ldr r5, [sp, #12] @ 4-byte Reload - orr r1, r1, r6, lsl #31 - str r1, [r0, #20] - lsrs r1, r9, #1 - rrx r1, r6 - str r1, [r0, #24] - lsr r1, r9, #1 - orr r1, r1, r7, lsl #31 - str r1, [r0, #28] - lsrs r1, r11, #1 - rrx r1, r7 - lsrs r3, r8, #1 - lsr r7, r8, #1 - rrx r3, r12 - lsrs r6, r5, #1 - orr r7, r7, r4, lsl #31 - add r12, r0, #32 - lsr r5, r5, #1 - rrx r6, r4 - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - str r5, [r0, #52] - add sp, sp, #16 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end215: - .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L - .cantunwind - .fnend - - .globl mcl_fp_add14L - .align 2 - .type mcl_fp_add14L,%function -mcl_fp_add14L: @ @mcl_fp_add14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #52 - sub sp, sp, #52 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r9, r4, r9 - ldr r4, [r1, #24] - adcs r10, r5, r8 - ldr r5, [r1, #20] - str r9, [r0] - adcs r6, r6, lr - mov lr, r10 - adcs r7, r7, r12 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r1, #16] - str lr, [r0, #4] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #16] - adcs r7, r7, r6 - ldr r6, [r2, #44] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r5 - ldr r5, [r2, #28] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r10, [sp, #16] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [sp, #32] @ 4-byte Reload - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #28] - str r4, [r0, #8] - adcs r7, r5, r7 - ldr r5, [r2, #32] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r1, #32] - adcs r7, r5, r7 - ldr r5, [r2, #36] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r1, #36] - adcs r11, r5, r7 - ldr r7, [r1, #40] - ldr r5, [r2, #40] - str r11, [sp, #24] @ 4-byte Spill - adcs r8, r5, r7 - ldr r7, [r1, #44] - ldr r5, [sp, #28] @ 4-byte Reload - str r8, [sp, #12] @ 4-byte Spill - adcs r12, r6, r7 - ldr r7, [r1, #48] - ldr r6, [r2, #48] - ldr r1, [r1, #52] - ldr r2, [r2, #52] - str r5, [r0, #12] - str r12, [sp, #8] @ 4-byte Spill - adcs r6, r6, r7 - adcs r2, r2, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #44] @ 4-byte Reload - str r10, [r0, #20] - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - str r1, [r0, #28] - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r0, #32] - str r11, [r0, #36] - str r8, [r0, #40] - str r12, [r0, #44] - str r6, [r0, #48] - str r2, [r0, #52] - mov r8, r2 - mov r2, #0 - mov r12, r6 - add r11, r3, #32 - adc r1, r2, #0 - str r1, [sp, #20] @ 4-byte Spill - ldm r3, {r6, r7} - ldr r1, [r3, #8] - ldr r2, [r3, #12] - subs r6, r9, r6 - sbcs r7, lr, r7 - str r6, [sp, #4] @ 4-byte Spill - sbcs r1, r4, r1 - str r7, [sp] @ 4-byte Spill - str r1, [sp, #32] @ 4-byte Spill - sbcs r1, r5, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r1, r2, r1 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [r3, #20] - sbcs r10, r10, r1 - ldr r1, [r3, #24] - sbcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [r3, #28] - sbcs r5, r2, r1 - ldm r11, {r1, r2, r6, r7, r11} - ldr r9, [r3, #52] - ldr r3, [sp, #40] @ 4-byte Reload - sbcs r3, r3, r1 - ldr r1, [sp, #24] @ 4-byte Reload - sbcs lr, r1, r2 - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - sbcs r4, r1, r6 - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r7, r1, r7 - sbcs r6, r12, r11 - sbcs r1, r8, r9 - sbc r2, r2, #0 - tst r2, #1 - bne .LBB216_2 -@ BB#1: @ %nocarry - ldr r2, [sp, #4] @ 4-byte Reload - str r2, [r0] - ldr r2, [sp] @ 4-byte Reload - str r2, [r0, #4] - ldr r2, [sp, #32] @ 4-byte Reload - str r2, [r0, #8] - ldr r2, [sp, #28] @ 4-byte Reload - str r2, [r0, #12] - ldr r2, [sp, #48] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #44] @ 4-byte Reload - str r10, [r0, #20] - str r2, [r0, #24] - str r5, [r0, #28] - str r3, [r0, #32] - str lr, [r0, #36] - str r4, [r0, #40] - str r7, [r0, #44] - str r6, [r0, #48] - str r1, [r0, #52] -.LBB216_2: @ %carry - add sp, sp, #52 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end216: - .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L - .cantunwind - .fnend - - .globl mcl_fp_addNF14L - .align 2 - .type mcl_fp_addNF14L,%function -mcl_fp_addNF14L: @ @mcl_fp_addNF14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #80 - sub sp, sp, #80 - ldm r1, {r7, r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r10} - adds r4, r4, r7 - ldr r7, [r2, #16] - adcs r5, r5, r8 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r1, #24] - adcs lr, r6, lr - ldr r6, [r1, #16] - str r5, [sp, #40] @ 4-byte Spill - ldr r5, [r1, #20] - adcs r9, r10, r12 - str lr, [sp, #12] @ 4-byte Spill - str r9, [sp, #16] @ 4-byte Spill - adcs r7, r7, r6 - ldr r6, [r2, #20] - str r7, [sp, #44] @ 4-byte Spill - adcs r7, r6, r5 - ldr r6, [r2, #24] - ldr r5, [r2, #28] - str r7, [sp, #48] @ 4-byte Spill - adcs r8, r6, r4 - ldr r6, [r1, #28] - str r8, [sp, #20] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #32] - ldr r5, [r2, #32] - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #36] - ldr r5, [r2, #36] - str r7, [sp, #56] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #40] - ldr r5, [r2, #40] - str r7, [sp, #68] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #44] - ldr r5, [r2, #44] - str r7, [sp, #64] @ 4-byte Spill - adcs r7, r5, r6 - ldr r6, [r1, #48] - ldr r5, [r2, #48] - ldr r1, [r1, #52] - ldr r2, [r2, #52] - str r7, [sp, #60] @ 4-byte Spill - adcs r7, r5, r6 - adc r1, r2, r1 - str r7, [sp, #76] @ 4-byte Spill - str r1, [sp, #72] @ 4-byte Spill - ldmib r3, {r1, r4, r6} - ldr r2, [r3, #24] - ldr r7, [r3] - ldr r5, [r3, #16] - ldr r11, [r3, #20] - ldr r10, [r3, #40] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r3, #28] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [sp, #36] @ 4-byte Reload - subs r7, r2, r7 - ldr r2, [sp, #40] @ 4-byte Reload - sbcs r2, r2, r1 - ldr r1, [r3, #36] - sbcs r12, lr, r4 - sbcs lr, r9, r6 - ldr r9, [r3, #32] - ldr r6, [sp, #32] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r5, r1, r5 - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r3, r1, r11 - ldr r1, [sp, #28] @ 4-byte Reload - sbcs r4, r8, r1 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r8, r1, r6 - ldr r1, [sp, #56] @ 4-byte Reload - ldr r6, [sp, #24] @ 4-byte Reload - sbcs r11, r1, r9 - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r9, r1, r6 - ldr r1, [sp, #64] @ 4-byte Reload - ldr r6, [sp] @ 4-byte Reload - sbcs r1, r1, r10 - ldr r10, [sp, #36] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #8] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - sbc r6, r1, r6 - asr r1, r6, #31 - cmp r1, #0 - movlt r7, r10 - str r7, [r0] - ldr r7, [sp, #40] @ 4-byte Reload - movlt r2, r7 - str r2, [r0, #4] - ldr r2, [sp, #12] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #16] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #8] - movlt lr, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str lr, [r0, #12] - movlt r5, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r5, [r0, #16] - movlt r3, r2 - ldr r2, [sp, #20] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #24] @ 4-byte Reload - movlt r4, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r4, [r0, #24] - movlt r8, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r8, [r0, #28] - movlt r11, r2 - ldr r2, [sp, #68] @ 4-byte Reload - cmp r1, #0 - str r11, [r0, #32] - movlt r9, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r9, [r0, #36] - movlt r3, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #28] @ 4-byte Reload - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r3, [r0, #44] - movlt r2, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r2, [r0, #48] - movlt r6, r1 - str r6, [r0, #52] - add sp, sp, #80 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end217: - .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L - .cantunwind - .fnend - - .globl mcl_fp_sub14L - .align 2 - .type mcl_fp_sub14L,%function -mcl_fp_sub14L: @ @mcl_fp_sub14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - ldr r9, [r2] - ldmib r2, {r8, lr} - ldr r5, [r1] - ldr r12, [r2, #12] - ldmib r1, {r4, r6, r7} - subs r5, r5, r9 - sbcs r4, r4, r8 - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [r2, #24] - sbcs r6, r6, lr - str r4, [sp, #48] @ 4-byte Spill - ldr r4, [r2, #20] - sbcs r7, r7, r12 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #16] - sbcs r8, r7, r6 - ldr r7, [r1, #20] - ldr r6, [r1, #28] - str r8, [sp, #40] @ 4-byte Spill - sbcs r10, r7, r4 - ldr r7, [r1, #24] - ldr r4, [r1, #40] - str r10, [sp, #36] @ 4-byte Spill - sbcs r9, r7, r5 - ldr r7, [r2, #28] - sbcs r11, r6, r7 - ldr r7, [r2, #32] - ldr r6, [r1, #32] - str r11, [sp, #32] @ 4-byte Spill - sbcs r12, r6, r7 - ldr r7, [r2, #36] - ldr r6, [r1, #36] - str r12, [sp, #28] @ 4-byte Spill - sbcs r6, r6, r7 - ldr r7, [r2, #40] - sbcs r5, r4, r7 - ldr r7, [r2, #44] - ldr r4, [r1, #44] - str r5, [sp, #24] @ 4-byte Spill - sbcs lr, r4, r7 - ldr r4, [r2, #48] - ldr r7, [r1, #48] - ldr r2, [r2, #52] - ldr r1, [r1, #52] - sbcs r7, r7, r4 - ldr r4, [sp, #44] @ 4-byte Reload - sbcs r2, r1, r2 - ldr r1, [sp, #52] @ 4-byte Reload - str r1, [r0] - ldr r1, [sp, #48] @ 4-byte Reload - str r1, [r0, #4] - ldr r1, [sp, #56] @ 4-byte Reload - str r1, [r0, #8] - str r4, [r0, #12] - str r8, [r0, #16] - mov r1, lr - add r8, r0, #24 - str r10, [r0, #20] - stm r8, {r9, r11, r12} - str r6, [r0, #36] - str r5, [r0, #40] - str r1, [r0, #44] - str r7, [r0, #48] - mov r8, r2 - str r2, [r0, #52] - mov r2, #0 - sbc r2, r2, #0 - tst r2, #1 - beq .LBB218_2 -@ BB#1: @ %carry - ldr r2, [r3, #52] - ldr r5, [r3, #48] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #52] @ 4-byte Reload - ldr lr, [r3, #4] - ldr r12, [r3, #8] - ldr r10, [r3, #12] - ldr r11, [r3, #40] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #16] - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [sp, #48] @ 4-byte Reload - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #28] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3] - adds r2, r2, r7 - ldr r7, [sp, #56] @ 4-byte Reload - adcs lr, lr, r5 - ldr r5, [r3, #44] - adcs r7, r12, r7 - add r12, r0, #32 - str r5, [sp, #48] @ 4-byte Spill - adcs r5, r10, r4 - ldr r10, [r3, #36] - ldr r3, [r3, #32] - stm r0, {r2, lr} - str r7, [r0, #8] - ldr r2, [sp, #40] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - ldr r4, [sp, #36] @ 4-byte Reload - str r5, [r0, #12] - ldr r5, [sp, #52] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp, #4] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #8] @ 4-byte Reload - adcs r4, r7, r4 - ldr r7, [sp, #12] @ 4-byte Reload - adcs r2, r2, r9 - str r4, [r0, #20] - str r2, [r0, #24] - ldr r2, [sp, #32] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp, #24] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #28] @ 4-byte Reload - adcs r2, r3, r2 - adcs r3, r10, r6 - ldr r6, [sp, #48] @ 4-byte Reload - adcs r7, r11, r7 - adcs r6, r6, r1 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r5, r5, r1 - ldr r1, [sp, #20] @ 4-byte Reload - stm r12, {r2, r3, r7} - str r6, [r0, #44] - str r5, [r0, #48] - adc r1, r1, r8 - str r1, [r0, #52] -.LBB218_2: @ %nocarry - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end218: - .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L - .cantunwind - .fnend - - .globl mcl_fp_subNF14L - .align 2 - .type mcl_fp_subNF14L,%function -mcl_fp_subNF14L: @ @mcl_fp_subNF14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #80 - sub sp, sp, #80 - mov r12, r0 - ldr r0, [r2, #32] - add r7, r1, #16 - ldr r9, [r2] - ldr r11, [r2, #20] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r2, #44] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r2, #48] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r2, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r2, #4] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r2, #8] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r2, #12] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r2, #16] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r2, #24] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r2, #28] - str r0, [sp, #44] @ 4-byte Spill - ldmib r1, {r2, r8, lr} - ldm r7, {r4, r5, r6, r7} - ldr r10, [r1] - ldr r0, [sp, #56] @ 4-byte Reload - ldr r1, [r1, #32] - subs r10, r10, r9 - sbcs r9, r2, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r10, [sp] @ 4-byte Spill - str r9, [sp, #4] @ 4-byte Spill - sbcs r0, r8, r0 - add r8, r3, #20 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - sbcs r0, lr, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r4, r0 - str r0, [sp, #56] @ 4-byte Spill - sbcs r0, r5, r11 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r7, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - sbcs r11, r1, r0 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - str r11, [sp, #20] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - sbc r0, r1, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #36] @ 4-byte Spill - ldm r3, {r2, r4, r6} - ldr r5, [r3, #12] - ldr lr, [r3, #16] - ldm r8, {r0, r7, r8} - ldr r3, [sp, #56] @ 4-byte Reload - adds r1, r10, r2 - ldr r10, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - adcs r4, r9, r4 - adcs r6, r10, r6 - adcs r2, r2, r5 - ldr r5, [sp, #60] @ 4-byte Reload - adcs r3, r3, lr - adcs lr, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r5, r0, r7 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r7, [sp, #16] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r9, r11, r0 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r11, r0, r7 - ldr r0, [sp, #68] @ 4-byte Reload - ldr r7, [sp, #24] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r7, r0, r7 - str r7, [sp, #36] @ 4-byte Spill - asr r7, r0, #31 - ldr r0, [sp] @ 4-byte Reload - cmp r7, #0 - movge r6, r10 - movge r1, r0 - ldr r0, [sp, #4] @ 4-byte Reload - str r1, [r12] - ldr r1, [sp, #24] @ 4-byte Reload - movge r4, r0 - ldr r0, [sp, #52] @ 4-byte Reload - cmp r7, #0 - str r4, [r12, #4] - str r6, [r12, #8] - movge r2, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r2, [r12, #12] - movge r3, r0 - ldr r0, [sp, #60] @ 4-byte Reload - str r3, [r12, #16] - movge lr, r0 - ldr r0, [sp, #64] @ 4-byte Reload - cmp r7, #0 - str lr, [r12, #20] - movge r5, r0 - ldr r0, [sp, #44] @ 4-byte Reload - str r5, [r12, #24] - movge r8, r0 - ldr r0, [sp, #20] @ 4-byte Reload - str r8, [r12, #28] - movge r9, r0 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r7, #0 - str r9, [r12, #32] - movge r11, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r11, [r12, #36] - movge r1, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r1, [r12, #40] - ldr r1, [sp, #28] @ 4-byte Reload - movge r1, r0 - ldr r0, [sp, #76] @ 4-byte Reload - cmp r7, #0 - str r1, [r12, #44] - ldr r1, [sp, #32] @ 4-byte Reload - movge r1, r0 - ldr r0, [sp, #36] @ 4-byte Reload - str r1, [r12, #48] - ldr r1, [sp, #40] @ 4-byte Reload - movge r0, r1 - str r0, [r12, #52] - add sp, sp, #80 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end219: - .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L - .cantunwind - .fnend - - .globl mcl_fpDbl_add14L - .align 2 - .type mcl_fpDbl_add14L,%function -mcl_fpDbl_add14L: @ @mcl_fpDbl_add14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #168 - sub sp, sp, #168 - ldr r7, [r1] - ldmib r1, {r6, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r8, r9} - add r10, r1, #32 - adds r4, r4, r7 - str r4, [sp, #92] @ 4-byte Spill - ldr r4, [r2, #96] - str r4, [sp, #152] @ 4-byte Spill - ldr r4, [r2, #100] - str r4, [sp, #156] @ 4-byte Spill - ldr r4, [r2, #104] - str r4, [sp, #160] @ 4-byte Spill - ldr r4, [r2, #108] - str r4, [sp, #164] @ 4-byte Spill - adcs r4, r5, r6 - adcs r7, r8, lr - str r4, [sp, #68] @ 4-byte Spill - add lr, r1, #16 - str r7, [sp, #64] @ 4-byte Spill - adcs r7, r9, r12 - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #100] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #96] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r6, r10} - ldr r2, [r1, #56] - ldr r8, [r1, #48] - ldr r9, [r1, #52] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #8] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #64] @ 4-byte Reload - str r7, [r0, #8] - ldr r7, [sp] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #76] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp, #4] @ 4-byte Reload - str r2, [r0, #20] - adcs r1, r1, r12 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - adcs r1, r1, r4 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #72] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [r0, #36] - adcs r1, r1, r6 - ldr r2, [sp, #80] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r2, r2, r10 - str r2, [r0, #44] - adcs r1, r1, r8 - ldr r2, [sp, #88] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #96] @ 4-byte Reload - adcs r2, r2, r9 - adcs r6, r1, r7 - str r2, [r0, #52] - ldr r1, [sp, #100] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r6, [sp, #84] @ 4-byte Spill - adcs r5, r1, r2 - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [sp, #28] @ 4-byte Reload - str r5, [sp, #88] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r4, [sp, #96] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [sp, #36] @ 4-byte Reload - str r7, [sp, #100] @ 4-byte Spill - adcs lr, r1, r2 - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [sp, #24] @ 4-byte Reload - str lr, [sp, #92] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [sp, #140] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [sp, #144] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [sp, #132] @ 4-byte Reload - adcs r8, r1, r2 - ldr r1, [sp, #148] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - str r8, [sp, #124] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [sp, #152] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [sp, #156] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [sp, #160] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r1, [sp, #160] @ 4-byte Spill - ldr r1, [sp, #164] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #164] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #120] @ 4-byte Spill - ldmib r3, {r2, r12} - ldr r1, [r3, #16] - ldr r11, [r3] - ldr r9, [r3, #12] - ldr r10, [r3, #36] - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [r3, #20] - subs r11, r6, r11 - sbcs r2, r5, r2 - sbcs r12, r4, r12 - sbcs r4, r7, r9 - ldr r7, [r3, #32] - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [r3, #24] - ldr r6, [sp, #116] @ 4-byte Reload - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [r3, #28] - ldr r5, [sp, #128] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - sbcs r3, lr, r1 - ldr r1, [sp, #136] @ 4-byte Reload - sbcs lr, r1, r6 - ldr r1, [sp, #140] @ 4-byte Reload - ldr r6, [sp, #132] @ 4-byte Reload - sbcs r5, r1, r5 - ldr r1, [sp, #144] @ 4-byte Reload - sbcs r6, r1, r6 - ldr r1, [sp, #148] @ 4-byte Reload - sbcs r8, r8, r7 - ldr r7, [sp, #76] @ 4-byte Reload - sbcs r9, r1, r10 - ldr r1, [sp, #152] @ 4-byte Reload - sbcs r10, r1, r7 - ldr r1, [sp, #156] @ 4-byte Reload - ldr r7, [sp, #80] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #104] @ 4-byte Reload - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #160] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #108] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #164] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #84] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - sbc r1, r1, #0 - ands r1, r1, #1 - movne r11, r7 - ldr r7, [sp, #88] @ 4-byte Reload - str r11, [r0, #56] - movne r2, r7 - ldr r7, [sp, #116] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #96] @ 4-byte Reload - movne r12, r2 - ldr r2, [sp, #100] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #64] - movne r4, r2 - ldr r2, [sp, #92] @ 4-byte Reload - str r4, [r0, #68] - movne r3, r2 - ldr r2, [sp, #136] @ 4-byte Reload - str r3, [r0, #72] - ldr r3, [sp, #128] @ 4-byte Reload - movne lr, r2 - ldr r2, [sp, #140] @ 4-byte Reload - cmp r1, #0 - str lr, [r0, #76] - movne r5, r2 - ldr r2, [sp, #144] @ 4-byte Reload - str r5, [r0, #80] - movne r6, r2 - ldr r2, [sp, #124] @ 4-byte Reload - str r6, [r0, #84] - movne r8, r2 - ldr r2, [sp, #148] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #88] - movne r9, r2 - ldr r2, [sp, #152] @ 4-byte Reload - str r9, [r0, #92] - movne r10, r2 - ldr r2, [sp, #156] @ 4-byte Reload - str r10, [r0, #96] - movne r3, r2 - cmp r1, #0 - ldr r1, [sp, #160] @ 4-byte Reload - ldr r2, [sp, #132] @ 4-byte Reload - str r3, [r0, #100] - movne r2, r1 - ldr r1, [sp, #164] @ 4-byte Reload - str r2, [r0, #104] - movne r7, r1 - str r7, [r0, #108] - add sp, sp, #168 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end220: - .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub14L - .align 2 - .type mcl_fpDbl_sub14L,%function -mcl_fpDbl_sub14L: @ @mcl_fpDbl_sub14L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #168 - sub sp, sp, #168 - ldr r7, [r2, #96] - add r9, r1, #32 - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #104] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #108] - str r7, [sp, #164] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #108] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #104] @ 4-byte Spill - ldm r2, {r5, r8, r12, lr} - ldr r6, [r1] - ldmib r1, {r4, r7, r10} - subs r5, r6, r5 - sbcs r4, r4, r8 - str r5, [sp, #32] @ 4-byte Spill - ldr r5, [r2, #44] - sbcs r7, r7, r12 - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r2, #40] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #36] - str r5, [sp, #84] @ 4-byte Spill - str r4, [sp, #80] @ 4-byte Spill - str r7, [sp, #48] @ 4-byte Spill - sbcs r7, r10, lr - ldr r10, [r2, #16] - add lr, r1, #16 - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r1, #96] - str r2, [sp, #88] @ 4-byte Spill - ldr r2, [r1, #100] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #92] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #96] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #76] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #44] @ 4-byte Spill - ldm r9, {r4, r5, r6, r8, r9} - ldr r2, [r1, #52] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #8] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #24] @ 4-byte Reload - sbcs r1, r1, r10 - str r7, [r0, #8] - ldr r7, [sp, #20] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - add lr, r3, #8 - str r2, [r0, #28] - ldr r2, [sp, #48] @ 4-byte Reload - sbcs r1, r4, r1 - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [r0, #36] - ldr r2, [sp, #84] @ 4-byte Reload - sbcs r1, r6, r1 - str r1, [r0, #40] - ldr r1, [sp, #104] @ 4-byte Reload - sbcs r2, r8, r2 - str r2, [r0, #44] - ldr r2, [sp, #108] @ 4-byte Reload - sbcs r1, r9, r1 - str r1, [r0, #48] - ldr r1, [sp, #112] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #4] @ 4-byte Reload - str r2, [r0, #52] - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r10, r7, r1 - ldr r1, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - str r10, [sp, #80] @ 4-byte Spill - sbcs r11, r2, r1 - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - str r11, [sp, #84] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #136] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #144] @ 4-byte Reload - str r1, [sp, #136] @ 4-byte Spill - mov r1, #0 - sbcs r2, r7, r2 - ldr r7, [sp, #44] @ 4-byte Reload - str r2, [sp, #128] @ 4-byte Spill - ldr r2, [sp, #120] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #64] @ 4-byte Reload - str r2, [sp, #144] @ 4-byte Spill - ldr r2, [sp, #148] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #68] @ 4-byte Reload - str r2, [sp, #148] @ 4-byte Spill - ldr r2, [sp, #152] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #72] @ 4-byte Reload - str r2, [sp, #152] @ 4-byte Spill - ldr r2, [sp, #124] @ 4-byte Reload - sbcs r9, r7, r2 - ldr r2, [sp, #132] @ 4-byte Reload - ldr r7, [sp, #76] @ 4-byte Reload - str r9, [sp, #108] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #88] @ 4-byte Reload - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [sp, #160] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #92] @ 4-byte Reload - str r2, [sp, #160] @ 4-byte Spill - ldr r2, [sp, #156] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #96] @ 4-byte Reload - str r2, [sp, #156] @ 4-byte Spill - ldr r2, [sp, #140] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #100] @ 4-byte Reload - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [sp, #164] @ 4-byte Reload - sbcs r2, r7, r2 - sbc r1, r1, #0 - str r2, [sp, #164] @ 4-byte Spill - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #124] @ 4-byte Spill - ldm r3, {r2, r5} - ldm lr, {r4, r6, lr} - ldr r7, [r3, #24] - ldr r8, [r3, #28] - ldr r12, [r3, #20] - ldr r3, [sp, #128] @ 4-byte Reload - adds r1, r10, r2 - ldr r10, [sp, #104] @ 4-byte Reload - ldr r2, [sp, #136] @ 4-byte Reload - adcs r5, r11, r5 - ldr r11, [sp, #124] @ 4-byte Reload - adcs r4, r10, r4 - adcs r2, r2, r6 - ldr r6, [sp, #144] @ 4-byte Reload - adcs r3, r3, lr - adcs r12, r6, r12 - ldr r6, [sp, #148] @ 4-byte Reload - adcs lr, r6, r7 - ldr r6, [sp, #152] @ 4-byte Reload - ldr r7, [sp, #132] @ 4-byte Reload - adcs r8, r6, r8 - ldr r6, [sp, #92] @ 4-byte Reload - adcs r9, r9, r6 - ldr r6, [sp, #96] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #160] @ 4-byte Reload - str r6, [sp, #96] @ 4-byte Spill - ldr r6, [sp, #112] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #116] @ 4-byte Reload - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [sp, #156] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [sp, #140] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #80] @ 4-byte Reload - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [sp, #164] @ 4-byte Reload - adc r7, r7, r11 - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [sp, #100] @ 4-byte Reload - ands r7, r7, #1 - moveq r1, r6 - moveq r4, r10 - ldr r6, [sp, #124] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #84] @ 4-byte Reload - moveq r5, r1 - ldr r1, [sp, #136] @ 4-byte Reload - cmp r7, #0 - str r5, [r0, #60] - str r4, [r0, #64] - moveq r2, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r2, [r0, #68] - ldr r2, [sp, #96] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #144] @ 4-byte Reload - str r3, [r0, #72] - ldr r3, [sp, #116] @ 4-byte Reload - moveq r12, r1 - ldr r1, [sp, #148] @ 4-byte Reload - cmp r7, #0 - str r12, [r0, #76] - moveq lr, r1 - ldr r1, [sp, #152] @ 4-byte Reload - str lr, [r0, #80] - moveq r8, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r8, [r0, #84] - moveq r9, r1 - ldr r1, [sp, #132] @ 4-byte Reload - cmp r7, #0 - str r9, [r0, #88] - moveq r2, r1 - ldr r1, [sp, #160] @ 4-byte Reload - str r2, [r0, #92] - ldr r2, [sp, #112] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #156] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #140] @ 4-byte Reload - cmp r7, #0 - ldr r7, [sp, #120] @ 4-byte Reload - moveq r7, r1 - ldr r1, [sp, #164] @ 4-byte Reload - moveq r6, r1 - add r1, r0, #96 - stm r1, {r2, r3, r7} - str r6, [r0, #108] - add sp, sp, #168 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end221: - .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L - .cantunwind - .fnend - - .align 2 - .type .LmulPv480x32,%function -.LmulPv480x32: @ @mulPv480x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r3, [r1, #44] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #44] - ldr r3, [r1, #48] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #48] - ldr r3, [r1, #52] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #52] - ldr r1, [r1, #56] - umull r3, r7, r1, r2 - adcs r1, r5, r3 - str r1, [r0, #56] - adc r1, r7, #0 - str r1, [r0, #60] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end222: - .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre15L - .align 2 - .type mcl_fp_mulUnitPre15L,%function -mcl_fp_mulUnitPre15L: @ @mcl_fp_mulUnitPre15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #84 - sub sp, sp, #84 - mov r4, r0 - add r0, sp, #16 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #76] - add r11, sp, #48 - add lr, sp, #20 - ldr r9, [sp, #64] - ldr r10, [sp, #60] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #72] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #68] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r6, r8, r11} - ldr r7, [sp, #44] - ldr r5, [sp, #40] - ldr r1, [sp, #16] - ldm lr, {r0, r2, r3, r12, lr} - str r1, [r4] - stmib r4, {r0, r2, r3, r12, lr} - add r0, r4, #32 - str r5, [r4, #24] - str r7, [r4, #28] - stm r0, {r6, r8, r11} - str r10, [r4, #44] - str r9, [r4, #48] - ldr r0, [sp, #4] @ 4-byte Reload - str r0, [r4, #52] - ldr r0, [sp, #8] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #12] @ 4-byte Reload - str r0, [r4, #60] - add sp, sp, #84 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end223: - .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre15L - .align 2 - .type mcl_fpDbl_mulPre15L,%function -mcl_fpDbl_mulPre15L: @ @mcl_fpDbl_mulPre15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - .pad #1024 - sub sp, sp, #1024 - mov r3, r2 - mov r4, r0 - add r0, sp, #1000 - str r1, [sp, #96] @ 4-byte Spill - mov r8, r1 - ldr r2, [r3] - str r3, [sp, #92] @ 4-byte Spill - str r4, [sp, #100] @ 4-byte Spill - mov r6, r3 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1060] - ldr r1, [sp, #1004] - ldr r2, [r6, #4] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1056] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #1008] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1052] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #1012] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1048] - str r1, [sp, #28] @ 4-byte Spill - mov r1, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [r4] - add r0, sp, #936 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #996] - add r10, sp, #960 - add lr, sp, #936 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r5, r6, r7, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #24] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r4, r1, r0 - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #92] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - ldr r2, [r6, #8] - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #872 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #932] - ldr r8, [sp, #872] - add r12, sp, #880 - ldr lr, [sp, #912] - ldr r7, [sp, #908] - ldr r11, [sp, #904] - ldr r9, [sp, #900] - ldr r10, [sp, #876] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #928] - adds r4, r8, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #12] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r12} - ldr r5, [sp, #100] @ 4-byte Reload - str r4, [r5, #8] - ldr r4, [sp, #52] @ 4-byte Reload - adcs r4, r10, r4 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [sp, #48] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #96] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r6, #12] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #808 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #868] - add r9, sp, #836 - add lr, sp, #816 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #864] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #856] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r0, [sp, #808] - ldr r11, [sp, #812] - ldm lr, {r1, r2, r3, r12, lr} - ldr r10, [sp, #32] @ 4-byte Reload - adds r0, r0, r10 - str r0, [r5, #12] - ldr r0, [sp, #52] @ 4-byte Reload - ldr r5, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r6, r0 - mov r6, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #744 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #804] - add lr, sp, #768 - add r12, sp, #748 - ldr r11, [sp, #780] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r9, r10, lr} - ldr r8, [sp, #744] - ldm r12, {r0, r1, r2, r3, r12} - ldr r4, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #100] @ 4-byte Reload - adds r4, r8, r4 - str r4, [r7, #16] - ldr r4, [sp, #52] @ 4-byte Reload - mov r7, r5 - adcs r4, r0, r4 - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #20] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #680 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #740] - ldr r9, [sp, #680] - add lr, sp, #684 - ldr r10, [sp, #720] - ldr r8, [sp, #716] - ldr r11, [sp, #712] - ldr r6, [sp, #708] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #736] - adds r4, r9, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #732] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #728] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #724] - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r5, [sp, #100] @ 4-byte Reload - str r4, [r5, #20] - ldr r4, [sp, #52] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #96] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #24] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #616 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #676] - add r8, sp, #648 - add lr, sp, #624 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #12] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldr r10, [sp, #644] - ldr r0, [sp, #616] - ldr r11, [sp, #620] - ldm lr, {r1, r2, r3, r12, lr} - ldr r9, [sp, #32] @ 4-byte Reload - adds r0, r0, r9 - str r0, [r5, #24] - ldr r0, [sp, #56] @ 4-byte Reload - ldr r5, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #28] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #552 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #612] - add r11, sp, #584 - add r12, sp, #556 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r6, r7, r11} - ldr lr, [sp, #580] - ldr r9, [sp, #576] - ldr r10, [sp, #552] - ldm r12, {r0, r1, r2, r3, r12} - ldr r4, [sp, #32] @ 4-byte Reload - ldr r8, [sp, #100] @ 4-byte Reload - adds r4, r10, r4 - str r4, [r8, #28] - ldr r4, [sp, #60] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - ldr r5, [sp, #96] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #488 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #548] - ldr r9, [sp, #488] - add lr, sp, #492 - mov r6, r8 - ldr r10, [sp, #524] - ldr r11, [sp, #520] - ldr r7, [sp, #516] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #544] - adds r4, r9, r4 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #536] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #532] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #528] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - str r4, [r6, #32] - ldr r4, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #92] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #36] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #424 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #484] - add r8, sp, #456 - add lr, sp, #432 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #12] @ 4-byte Spill - ldm r8, {r5, r7, r8} - ldr r10, [sp, #452] - ldr r0, [sp, #424] - ldr r11, [sp, #428] - ldm lr, {r1, r2, r3, r12, lr} - ldr r9, [sp, #32] @ 4-byte Reload - adds r0, r0, r9 - str r0, [r6, #36] - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #40] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #96] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r7, r0 - mov r7, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #360 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #420] - add r12, sp, #364 - ldr r11, [sp, #396] - ldr r6, [sp, #392] - ldr lr, [sp, #388] - ldr r9, [sp, #384] - ldr r10, [sp, #360] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #400] - str r0, [sp, #8] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r12} - ldr r4, [sp, #32] @ 4-byte Reload - ldr r8, [sp, #100] @ 4-byte Reload - adds r4, r10, r4 - str r4, [r8, #40] - ldr r4, [sp, #72] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #44] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #296 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #356] - ldr r9, [sp, #296] - add lr, sp, #300 - mov r5, r8 - ldr r10, [sp, #336] - ldr r7, [sp, #332] - ldr r11, [sp, #328] - ldr r6, [sp, #324] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #352] - adds r4, r9, r4 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #348] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #340] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - str r4, [r5, #44] - ldr r4, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #92] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #48] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #96] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #232 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #292] - add lr, sp, #240 - ldr r8, [sp, #268] - ldr r7, [sp, #264] - ldr r10, [sp, #260] - ldr r3, [sp, #232] - ldr r11, [sp, #236] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #288] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #284] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #280] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #276] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #4] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - ldr r9, [sp, #28] @ 4-byte Reload - adds r3, r3, r9 - add r9, sp, #168 - str r3, [r5, #48] - ldr r3, [r4, #52] - ldr r4, [sp, #88] @ 4-byte Reload - adcs r4, r11, r4 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [sp, #84] @ 4-byte Reload - adcs r11, r0, r4 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - mov r0, r9 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #228] - add r12, sp, #172 - ldr r6, [sp, #204] - ldr r4, [sp, #200] - ldr lr, [sp, #196] - ldr r8, [sp, #192] - ldr r9, [sp, #188] - ldr r2, [sp, #168] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #224] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #216] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #212] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #208] - str r0, [sp, #4] @ 4-byte Spill - ldm r12, {r0, r1, r3, r12} - ldr r7, [sp, #32] @ 4-byte Reload - adds r2, r2, r7 - str r2, [r5, #52] - adcs r5, r0, r11 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #92] @ 4-byte Reload - adcs r7, r1, r0 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - ldr r2, [r2, #56] - adcs r10, r3, r0 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r11, r12, r0 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #104 - bl .LmulPv480x32(PLT) - add r3, sp, #104 - add r12, sp, #120 - ldm r3, {r0, r1, r2, r3} - adds r6, r0, r5 - ldr r0, [sp, #164] - adcs lr, r1, r7 - adcs r4, r2, r10 - adcs r7, r3, r11 - add r11, sp, #136 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #160] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #156] - str r0, [sp, #52] @ 4-byte Spill - ldm r11, {r5, r8, r9, r10, r11} - ldm r12, {r1, r2, r3, r12} - ldr r0, [sp, #100] @ 4-byte Reload - str r6, [r0, #56] - ldr r6, [sp, #28] @ 4-byte Reload - str lr, [r0, #60] - str r4, [r0, #64] - str r7, [r0, #68] - ldr r7, [sp, #80] @ 4-byte Reload - ldr r4, [sp, #56] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #32] @ 4-byte Reload - str r6, [r0, #72] - ldr r6, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [r0, #76] - ldr r1, [sp, #64] @ 4-byte Reload - adcs r2, r3, r2 - ldr r3, [sp, #84] @ 4-byte Reload - str r2, [r0, #80] - ldr r2, [sp, #68] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [r0, #84] - ldr r1, [sp, #92] @ 4-byte Reload - adcs r12, r5, r2 - ldr r2, [sp, #88] @ 4-byte Reload - ldr r5, [sp, #52] @ 4-byte Reload - adcs r1, r8, r1 - str r12, [r0, #88] - add r12, r0, #92 - adcs r2, r9, r2 - adcs r3, r10, r3 - adcs r7, r11, r7 - adcs r6, r5, r6 - ldr r5, [sp, #72] @ 4-byte Reload - adcs r5, r4, r5 - ldr r4, [sp, #96] @ 4-byte Reload - stm r12, {r1, r2, r3, r7} - str r6, [r0, #108] - str r5, [r0, #112] - adc r4, r4, #0 - str r4, [r0, #116] - add sp, sp, #44 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end224: - .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre15L - .align 2 - .type mcl_fpDbl_sqrPre15L,%function -mcl_fpDbl_sqrPre15L: @ @mcl_fpDbl_sqrPre15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #44 - sub sp, sp, #44 - .pad #1024 - sub sp, sp, #1024 - mov r5, r1 - mov r4, r0 - add r0, sp, #1000 - ldr r2, [r5] - str r4, [sp, #100] @ 4-byte Spill - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1060] - ldr r1, [sp, #1004] - ldr r2, [r5, #4] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #1056] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #1008] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1052] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #1012] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1048] - str r1, [sp, #36] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [r4] - add r0, sp, #936 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #996] - add r10, sp, #960 - add lr, sp, #936 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #32] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r4, r1, r0 - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #8] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #872 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #932] - add r12, sp, #896 - ldr lr, [sp, #912] - ldr r6, [sp, #908] - add r10, sp, #876 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #24] @ 4-byte Spill - ldm r12, {r9, r11, r12} - ldr r8, [sp, #872] - ldm r10, {r0, r1, r2, r3, r10} - ldr r7, [sp, #100] @ 4-byte Reload - adds r4, r8, r4 - str r4, [r7, #8] - ldr r4, [sp, #60] @ 4-byte Reload - adcs r4, r0, r4 - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #12] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #808 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #868] - add r10, sp, #836 - add lr, sp, #812 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #864] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #856] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldr r11, [sp, #808] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r11, r4 - ldr r4, [sp, #100] @ 4-byte Reload - str r7, [r4, #12] - ldr r7, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #744 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #804] - add r8, sp, #776 - add lr, sp, #764 - add r12, sp, #744 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #792] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #20] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r11, [sp, #40] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #16] - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #20] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #680 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #740] - add r8, sp, #712 - add lr, sp, #684 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #736] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #732] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #728] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #724] - str r0, [sp, #20] @ 4-byte Spill - ldm r8, {r4, r6, r8} - ldr r11, [sp, #708] - ldr r10, [sp, #680] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #40] @ 4-byte Reload - ldr r9, [sp, #100] @ 4-byte Reload - adds r7, r10, r7 - str r7, [r9, #20] - ldr r7, [sp, #60] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #24] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #616 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #676] - add r10, sp, #644 - add lr, sp, #620 - mov r4, r9 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #668] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #656] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r10} - ldr r11, [sp, #616] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r11, r7 - str r7, [r4, #24] - ldr r7, [sp, #64] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #28] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #552 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #612] - add r8, sp, #584 - add lr, sp, #572 - add r12, sp, #552 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #604] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #600] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #596] - str r0, [sp, #20] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r11, [sp, #40] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #28] - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #32] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #488 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #548] - add r8, sp, #520 - add lr, sp, #492 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #536] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #532] - str r0, [sp, #20] @ 4-byte Spill - ldm r8, {r4, r6, r8} - ldr r11, [sp, #516] - ldr r10, [sp, #488] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #40] @ 4-byte Reload - ldr r9, [sp, #100] @ 4-byte Reload - adds r7, r10, r7 - str r7, [r9, #32] - ldr r7, [sp, #72] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #36] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #424 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #484] - add r10, sp, #452 - add lr, sp, #428 - mov r4, r9 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #464] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r8, r10} - ldr r11, [sp, #424] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r11, r7 - str r7, [r4, #36] - ldr r7, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #40] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #360 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #420] - add r8, sp, #392 - add lr, sp, #380 - add r12, sp, #360 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #16] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r11, [sp, #40] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #40] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #44] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #296 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #356] - add r9, sp, #328 - add lr, sp, #300 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #352] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #348] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #340] - str r0, [sp, #16] @ 4-byte Spill - ldm r9, {r6, r8, r9} - ldr r11, [sp, #324] - ldr r10, [sp, #296] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #36] @ 4-byte Reload - adds r7, r10, r7 - str r7, [r4, #44] - ldr r7, [sp, #84] @ 4-byte Reload - adcs r7, r0, r7 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #48] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #232 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #292] - add r11, sp, #256 - add lr, sp, #236 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #288] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #284] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #280] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #276] - str r0, [sp, #16] @ 4-byte Spill - ldm r11, {r6, r8, r9, r10, r11} - ldr r12, [sp, #232] - ldm lr, {r0, r1, r2, r3, lr} - adds r7, r12, r7 - ldr r12, [r5, #52] - str r7, [r4, #48] - ldr r7, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r7, r1, r0 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #168 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #228] - add lr, sp, #196 - add r12, sp, #172 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #224] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #220] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #216] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #212] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #208] - str r0, [sp, #12] @ 4-byte Spill - ldm lr, {r8, r11, lr} - ldr r9, [sp, #192] - ldr r10, [sp, #188] - ldr r2, [sp, #168] - ldm r12, {r0, r1, r3, r12} - ldr r6, [sp, #40] @ 4-byte Reload - adds r2, r2, r6 - add r6, sp, #104 - str r2, [r4, #52] - adcs r4, r0, r7 - ldr r0, [sp, #96] @ 4-byte Reload - ldr r2, [r5, #56] - adcs r0, r1, r0 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r7, r3, r0 - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - mov r0, r6 - bl .LmulPv480x32(PLT) - add r3, sp, #104 - add r11, sp, #136 - add r12, sp, #120 - ldm r3, {r0, r1, r2, r3} - adds r6, r0, r4 - ldr r0, [sp, #8] @ 4-byte Reload - adcs lr, r1, r0 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r4, r2, r7 - adcs r7, r3, r0 - ldr r0, [sp, #164] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #160] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #156] - str r0, [sp, #52] @ 4-byte Spill - ldm r11, {r5, r8, r9, r10, r11} - ldm r12, {r1, r2, r3, r12} - ldr r0, [sp, #100] @ 4-byte Reload - str r6, [r0, #56] - ldr r6, [sp, #36] @ 4-byte Reload - str lr, [r0, #60] - str r4, [r0, #64] - str r7, [r0, #68] - ldr r7, [sp, #84] @ 4-byte Reload - ldr r4, [sp, #56] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #40] @ 4-byte Reload - str r6, [r0, #72] - ldr r6, [sp, #80] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #64] @ 4-byte Reload - str r1, [r0, #76] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r2, r3, r2 - ldr r3, [sp, #88] @ 4-byte Reload - str r2, [r0, #80] - ldr r2, [sp, #72] @ 4-byte Reload - adcs r1, r12, r1 - str r1, [r0, #84] - ldr r1, [sp, #96] @ 4-byte Reload - adcs r12, r5, r2 - ldr r2, [sp, #92] @ 4-byte Reload - ldr r5, [sp, #52] @ 4-byte Reload - adcs r1, r8, r1 - str r12, [r0, #88] - add r12, r0, #92 - adcs r2, r9, r2 - adcs r3, r10, r3 - adcs r7, r11, r7 - adcs r6, r5, r6 - ldr r5, [sp, #76] @ 4-byte Reload - adcs r5, r4, r5 - ldr r4, [sp, #60] @ 4-byte Reload - stm r12, {r1, r2, r3, r7} - str r6, [r0, #108] - str r5, [r0, #112] - adc r4, r4, #0 - str r4, [r0, #116] - add sp, sp, #44 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end225: - .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L - .cantunwind - .fnend - - .globl mcl_fp_mont15L - .align 2 - .type mcl_fp_mont15L,%function -mcl_fp_mont15L: @ @mcl_fp_mont15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #12 - sub sp, sp, #12 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #124 - add r7, sp, #1024 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #88] @ 4-byte Spill - add r0, r7, #968 - ldr r6, [r3, #-4] - ldr r2, [r2] - str r6, [sp, #120] @ 4-byte Spill - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1996] - ldr r5, [sp, #1992] - add r7, sp, #1024 - mov r1, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #2000] - mul r2, r5, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #2004] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #2052] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #2048] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #2044] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2040] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #2036] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #2032] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #2028] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #2024] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #2020] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #2016] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2012] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2008] - str r0, [sp, #48] @ 4-byte Spill - add r0, r7, #904 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1988] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r9, [sp, #1952] - ldr r6, [sp, #1948] - ldr r8, [sp, #1944] - ldr r4, [sp, #1928] - ldr r10, [sp, #1932] - ldr r11, [sp, #1936] - ldr r7, [sp, #1940] - add lr, sp, #1024 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1984] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1980] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1976] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1972] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1968] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1964] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1960] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1956] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #840 - bl .LmulPv480x32(PLT) - adds r0, r4, r5 - ldr r1, [sp, #64] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - ldr r3, [sp, #1880] - ldr r12, [sp, #1884] - ldr lr, [sp, #1888] - ldr r4, [sp, #1892] - ldr r5, [sp, #1896] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #1908] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #92] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #84] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #1900] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #1864] - adcs r1, r9, r1 - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - ldr r9, [sp, #1904] - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #1876] - adc r0, r0, #0 - adds r6, r11, r6 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #1872] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1924] - str r6, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1920] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1916] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1868] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #776 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1860] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1828] - ldr r11, [sp, #1824] - ldr r8, [sp, #1820] - ldr r4, [sp, #1816] - ldr r5, [sp, #1800] - ldr r7, [sp, #1804] - ldr r9, [sp, #1808] - ldr r10, [sp, #1812] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1856] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1852] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1848] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1844] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1840] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1836] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1832] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #712 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1748] - ldr r3, [sp, #1752] - ldr r12, [sp, #1756] - ldr lr, [sp, #1760] - adds r0, r0, r5 - ldr r5, [sp, #1768] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1776] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1780] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1764] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1772] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1736] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1744] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1796] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1792] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1788] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1784] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1740] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #648 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1732] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1700] - ldr r11, [sp, #1696] - ldr r8, [sp, #1692] - ldr r4, [sp, #1688] - ldr r5, [sp, #1672] - ldr r7, [sp, #1676] - ldr r9, [sp, #1680] - ldr r10, [sp, #1684] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1728] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1724] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1720] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1716] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1712] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #584 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1620] - ldr r3, [sp, #1624] - ldr r12, [sp, #1628] - ldr lr, [sp, #1632] - adds r0, r0, r5 - ldr r5, [sp, #1640] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1648] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1652] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1636] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1644] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1608] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1616] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1668] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1664] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1660] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1656] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1612] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #520 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1604] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1572] - ldr r11, [sp, #1568] - ldr r8, [sp, #1564] - ldr r4, [sp, #1560] - ldr r5, [sp, #1544] - ldr r7, [sp, #1548] - ldr r9, [sp, #1552] - ldr r10, [sp, #1556] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1600] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1596] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1592] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1588] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1584] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #456 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1492] - ldr r3, [sp, #1496] - ldr r12, [sp, #1500] - ldr lr, [sp, #1504] - adds r0, r0, r5 - ldr r5, [sp, #1512] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1520] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1524] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1508] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1516] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1480] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1488] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1540] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1536] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1532] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1528] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1484] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #392 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1476] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1444] - ldr r11, [sp, #1440] - ldr r8, [sp, #1436] - ldr r4, [sp, #1432] - ldr r5, [sp, #1416] - ldr r7, [sp, #1420] - ldr r9, [sp, #1424] - ldr r10, [sp, #1428] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1472] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1468] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1464] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1460] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1456] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1448] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, lr, #328 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1364] - ldr r3, [sp, #1368] - ldr r12, [sp, #1372] - ldr lr, [sp, #1376] - adds r0, r0, r5 - ldr r5, [sp, #1384] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1392] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1396] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1380] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1388] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1352] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1360] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1412] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1408] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1404] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1400] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1356] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #264 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1348] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1316] - ldr r11, [sp, #1312] - ldr r8, [sp, #1308] - ldr r4, [sp, #1304] - ldr r5, [sp, #1288] - ldr r7, [sp, #1292] - ldr r9, [sp, #1296] - ldr r10, [sp, #1300] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1344] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, lr, #200 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1236] - ldr r3, [sp, #1240] - ldr r12, [sp, #1244] - ldr lr, [sp, #1248] - adds r0, r0, r5 - ldr r5, [sp, #1256] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1264] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1268] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1252] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1260] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1224] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1232] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1284] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1228] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #136 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1220] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1188] - ldr r11, [sp, #1184] - ldr r8, [sp, #1180] - ldr r4, [sp, #1176] - ldr r5, [sp, #1160] - ldr r7, [sp, #1164] - ldr r9, [sp, #1168] - ldr r10, [sp, #1172] - add lr, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1216] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1212] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, lr, #72 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1108] - ldr r3, [sp, #1112] - ldr r12, [sp, #1116] - ldr lr, [sp, #1120] - adds r0, r0, r5 - ldr r5, [sp, #1128] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1136] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1140] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1124] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1132] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1096] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1104] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r6, r11, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1156] - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1100] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1024 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r10, #8 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1092] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1060] - ldr r11, [sp, #1056] - ldr r8, [sp, #1052] - ldr r4, [sp, #1048] - ldr r5, [sp, #1032] - ldr r7, [sp, #1036] - ldr r9, [sp, #1040] - ldr r10, [sp, #1044] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1088] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1084] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1080] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1072] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #968 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #972 - adds r0, r0, r5 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #996 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #968] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #112] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #904 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #964] - add r11, sp, #920 - add r10, sp, #904 - ldr r6, [sp, #932] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #960] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #956] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #952] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #948] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #840 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #844 - adds r0, r0, r5 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #880 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - add r11, sp, #868 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldm r11, {r4, r5, r11} - ldr r6, [sp, #840] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #116] @ 4-byte Reload - adds r6, r7, r6 - ldr r7, [sp, #112] @ 4-byte Reload - str r6, [sp, #32] @ 4-byte Spill - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #120] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - mul r2, r6, r11 - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #776 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #836] - add r10, sp, #776 - ldr r4, [sp, #800] - ldr r5, [sp, #796] - ldr r6, [sp, #792] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #832] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #828] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #712 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #728 - adds r0, r0, r7 - ldr r7, [sp, #724] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r8 - adcs r1, r1, r9 - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #752 - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #716] - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #720] - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #712] - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adc r1, r1, #0 - adds r0, r0, r4 - str r1, [sp, #52] @ 4-byte Spill - mul r1, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #772] - str r1, [sp, #44] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #768] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #112] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #112] @ 4-byte Spill - ldr r6, [sp, #108] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #108] @ 4-byte Spill - ldr r5, [sp, #104] @ 4-byte Reload - adcs r5, r5, r7 - str r5, [sp, #104] @ 4-byte Spill - ldr r5, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #648 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #708] - add r10, sp, #648 - ldr r11, [sp, #676] - ldr r4, [sp, #672] - ldr r6, [sp, #668] - ldr r5, [sp, #664] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #584 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #108] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - add lr, sp, #600 - adds r0, r0, r7 - add r7, sp, #584 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r8 - adcs r1, r1, r9 - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #624 - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #32] @ 4-byte Spill - ldm r7, {r4, r5, r6, r7} - adds r1, r0, r4 - ldr r0, [sp, #120] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #644] - str r2, [sp, #28] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #640] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #112] @ 4-byte Reload - adcs r5, r11, r5 - str r5, [sp, #64] @ 4-byte Spill - ldr r5, [sp, #108] @ 4-byte Reload - adcs r5, r5, r6 - str r5, [sp, #60] @ 4-byte Spill - ldr r5, [sp, #104] @ 4-byte Reload - adcs r5, r5, r7 - str r5, [sp, #56] @ 4-byte Spill - ldr r5, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #520 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #580] - add r11, sp, #524 - ldr r10, [sp, #548] - ldr r5, [sp, #544] - ldr r6, [sp, #540] - ldr r7, [sp, #520] - add r0, sp, #456 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #576] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #572] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #568] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #564] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #560] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #556] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #552] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r8, r9, r11} - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #460 - adds r0, r0, r7 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #484 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #516] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #456] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #64] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #392 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #452] - ldr r6, [sp, #420] - ldr r7, [sp, #416] - ldr r9, [sp, #412] - ldr r4, [sp, #408] - ldr r10, [sp, #392] - ldr r11, [sp, #396] - ldr r8, [sp, #400] - ldr r5, [sp, #404] - add r0, sp, #328 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #448] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #444] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #440] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #436] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #432] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #428] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #424] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #332 - adds r0, r0, r10 - add r10, sp, #356 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #388] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #380] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #376] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #328] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #68] @ 4-byte Reload - ldr r7, [sp, #64] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #264 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #324] - add r9, sp, #276 - ldr r6, [sp, #292] - ldr r7, [sp, #288] - ldr r10, [sp, #264] - ldr r11, [sp, #268] - ldr r5, [sp, #272] - add r0, sp, #200 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #320] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #316] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #312] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #308] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #304] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #300] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #296] - str r1, [sp, #16] @ 4-byte Spill - ldm r9, {r4, r8, r9} - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - add lr, sp, #216 - adds r0, r0, r10 - ldr r10, [sp, #212] - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r11 - adcs r1, r1, r5 - ldr r5, [sp, #208] - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #200] - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r9 - add r9, sp, #240 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #204] - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adc r1, r1, #0 - adds r7, r0, r4 - ldr r0, [sp, #120] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - mul r1, r7, r0 - ldr r0, [sp, #260] - str r1, [sp, #60] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #120] @ 4-byte Spill - ldm r9, {r4, r8, r9} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #128] @ 4-byte Reload - adcs r11, r11, r6 - ldr r6, [sp, #124] @ 4-byte Reload - adcs r5, r6, r5 - ldr r6, [sp, #68] @ 4-byte Reload - adcs r10, r6, r10 - ldr r6, [sp, #64] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r6, r0, r9 - ldr r0, [sp, #80] @ 4-byte Reload - ldr r9, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r9 - str r0, [sp, #128] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - add r0, sp, #136 - bl .LmulPv480x32(PLT) - add r3, sp, #136 - ldm r3, {r0, r1, r2, r3} - adds r0, r7, r0 - adcs r11, r11, r1 - ldr r0, [sp, #152] - ldr r1, [sp, #48] @ 4-byte Reload - adcs lr, r5, r2 - mov r5, r9 - str r11, [sp, #44] @ 4-byte Spill - adcs r10, r10, r3 - str lr, [sp, #52] @ 4-byte Spill - str r10, [sp, #60] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #156] - ldr r1, [sp, #56] @ 4-byte Reload - str r4, [sp, #76] @ 4-byte Spill - adcs r12, r1, r0 - ldr r0, [sp, #160] - ldr r1, [sp, #64] @ 4-byte Reload - str r12, [sp, #56] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #168] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r8, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r6, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #188] - adcs r0, r1, r0 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #192] - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #196] - adcs r0, r1, r0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldmib r5, {r1, r2} - ldr r3, [r5, #16] - ldr r7, [r5] - ldr r0, [r5, #12] - ldr r6, [r5, #20] - ldr r9, [r5, #24] - ldr r8, [r5, #32] - str r3, [sp, #80] @ 4-byte Spill - ldr r3, [r5, #28] - subs r7, r11, r7 - add r11, r5, #36 - str r3, [sp, #84] @ 4-byte Spill - sbcs r3, lr, r1 - sbcs lr, r10, r2 - ldm r11, {r1, r10, r11} - sbcs r4, r4, r0 - ldr r0, [r5, #48] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r5, #52] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r5, #56] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r2, r12, r0 - ldr r0, [sp, #92] @ 4-byte Reload - sbcs r12, r0, r6 - ldr r0, [sp, #96] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - sbcs r5, r0, r9 - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r6, r0, r6 - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r8, r0, r8 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r9, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r10, r0, r10 - ldr r0, [sp, #116] @ 4-byte Reload - sbcs r11, r0, r11 - ldr r0, [sp, #120] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbc r0, r0, #0 - ands r1, r0, #1 - ldr r0, [sp, #44] @ 4-byte Reload - movne r7, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r7, [r0] - ldr r7, [sp, #52] @ 4-byte Reload - movne r3, r7 - str r3, [r0, #4] - ldr r3, [sp, #60] @ 4-byte Reload - movne lr, r3 - ldr r3, [sp, #76] @ 4-byte Reload - cmp r1, #0 - str lr, [r0, #8] - movne r4, r3 - ldr r3, [sp, #56] @ 4-byte Reload - str r4, [r0, #12] - movne r2, r3 - str r2, [r0, #16] - ldr r2, [sp, #92] @ 4-byte Reload - movne r12, r2 - ldr r2, [sp, #96] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #20] - movne r5, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r5, [r0, #24] - movne r6, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r6, [r0, #28] - movne r8, r2 - ldr r2, [sp, #108] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #32] - movne r9, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r9, [r0, #36] - movne r10, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r10, [r0, #40] - movne r11, r2 - cmp r1, #0 - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [sp, #80] @ 4-byte Reload - str r11, [r0, #44] - movne r2, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r2, [r0, #48] - ldr r2, [sp, #84] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r2, [r0, #52] - ldr r2, [sp, #132] @ 4-byte Reload - movne r2, r1 - str r2, [r0, #56] - add sp, sp, #12 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end226: - .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L - .cantunwind - .fnend - - .globl mcl_fp_montNF15L - .align 2 - .type mcl_fp_montNF15L,%function -mcl_fp_montNF15L: @ @mcl_fp_montNF15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #4 - sub sp, sp, #4 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #116 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #76] @ 4-byte Spill - add r0, sp, #1984 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #112] @ 4-byte Spill - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1984] - ldr r1, [sp, #1988] - str r0, [sp, #60] @ 4-byte Spill - mul r2, r0, r5 - ldr r0, [sp, #2044] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #1992] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2040] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #1996] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #2036] - str r1, [sp, #80] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #2032] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #2028] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #2024] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #2020] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #2016] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2012] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2008] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2004] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2000] - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #1920 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1980] - add r7, sp, #1936 - add r11, sp, #1920 - ldr r6, [sp, #1948] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1976] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1972] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1968] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1964] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1960] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1956] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1952] - str r0, [sp, #12] @ 4-byte Spill - ldm r7, {r4, r5, r7} - ldm r11, {r9, r10, r11} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r8, [sp, #1932] - ldr r2, [r0, #4] - add r0, sp, #1856 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #60] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1868] - ldr r3, [sp, #1872] - ldr r12, [sp, #1876] - ldr lr, [sp, #1880] - adds r0, r9, r0 - ldr r9, [sp, #1896] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #1900] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #88] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #1892] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #1884] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #1888] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #84] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #1856] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adc r0, r1, r0 - adds r6, r11, r6 - ldr r1, [sp, #1864] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1916] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1908] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1860] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1792 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1852] - add r11, sp, #1808 - add r10, sp, #1792 - ldr r6, [sp, #1820] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1848] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1844] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1840] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1836] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1832] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1828] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1824] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #1728 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1740] - ldr r3, [sp, #1744] - ldr r12, [sp, #1748] - ldr lr, [sp, #1752] - adds r0, r0, r5 - ldr r5, [sp, #1760] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1768] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1772] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1756] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1764] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1728] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1736] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1788] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1784] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1780] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1732] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1664 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1724] - add r11, sp, #1680 - add r10, sp, #1664 - ldr r6, [sp, #1692] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1720] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1716] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1712] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1700] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1696] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #1600 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1612] - ldr r3, [sp, #1616] - ldr r12, [sp, #1620] - ldr lr, [sp, #1624] - adds r0, r0, r5 - ldr r5, [sp, #1632] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1640] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1644] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1628] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1636] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1600] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1608] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1660] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1656] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1652] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1648] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1604] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1536 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1596] - add r11, sp, #1552 - add r10, sp, #1536 - ldr r6, [sp, #1564] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1592] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1588] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1584] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #1472 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1484] - ldr r3, [sp, #1488] - ldr r12, [sp, #1492] - ldr lr, [sp, #1496] - adds r0, r0, r5 - ldr r5, [sp, #1504] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1512] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1516] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1500] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1508] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1472] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1480] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1532] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1528] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1524] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1520] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1476] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1408 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1468] - add r11, sp, #1424 - add r10, sp, #1408 - ldr r6, [sp, #1436] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1464] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1460] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1456] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1448] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1444] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #1344 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1356] - ldr r3, [sp, #1360] - ldr r12, [sp, #1364] - ldr lr, [sp, #1368] - adds r0, r0, r5 - ldr r5, [sp, #1376] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1384] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1388] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1372] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1380] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1344] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1352] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1404] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1400] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1396] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1392] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1348] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1280 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1340] - add r11, sp, #1296 - add r10, sp, #1280 - ldr r6, [sp, #1308] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1316] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #1216 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1228] - ldr r3, [sp, #1232] - ldr r12, [sp, #1236] - ldr lr, [sp, #1240] - adds r0, r0, r5 - ldr r5, [sp, #1248] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1256] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1260] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1244] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1252] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1216] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1224] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1276] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1220] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1152 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1212] - add r11, sp, #1168 - add r10, sp, #1152 - ldr r6, [sp, #1180] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #1088 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1100] - ldr r3, [sp, #1104] - ldr r12, [sp, #1108] - ldr lr, [sp, #1112] - adds r0, r0, r5 - ldr r5, [sp, #1120] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1128] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1132] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1116] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1124] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #108] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1088] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1096] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1148] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1092] - adcs r0, r7, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #1024 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1084] - add r11, sp, #1040 - add r10, sp, #1024 - ldr r6, [sp, #1052] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1080] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1072] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #960 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #964 - adds r0, r0, r5 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #988 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1012] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1008] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #960] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #896 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #956] - add r11, sp, #912 - add r10, sp, #896 - ldr r6, [sp, #924] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #952] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #948] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #936] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r8, r11} - ldm r10, {r5, r7, r9, r10} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #832 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #836 - adds r0, r0, r5 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #860 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #888] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #884] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #880] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #832] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - ldr r7, [sp, #112] @ 4-byte Reload - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #768 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #828] - add r11, sp, #768 - ldr r6, [sp, #792] - ldr r5, [sp, #788] - ldr r8, [sp, #784] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #816] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #812] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r4, [sp, #780] - ldr r2, [r0, #40] - add r0, sp, #704 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #720 - adds r0, r0, r9 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r2, r0, r10 - ldr r0, [sp, #104] @ 4-byte Reload - add r10, sp, #744 - adcs r0, r0, r11 - ldr r11, [sp, #708] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #716] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #704] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #712] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - adds r0, r2, r5 - mul r1, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #764] - str r1, [sp, #40] @ 4-byte Spill - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #760] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #104] @ 4-byte Reload - adcs r7, r7, r11 - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [sp, #100] @ 4-byte Reload - adcs r6, r7, r6 - str r6, [sp, #100] @ 4-byte Spill - ldr r6, [sp, #96] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #96] @ 4-byte Spill - ldr r6, [sp, #92] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #640 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #700] - add r7, sp, #656 - add r11, sp, #640 - ldr r4, [sp, #668] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #676] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #672] - str r0, [sp, #12] @ 4-byte Spill - ldm r7, {r5, r6, r7} - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #120] @ 4-byte Reload - ldr r1, [sp, #116] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #576 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #592 - adds r0, r0, r8 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r2, r0, r9 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #616 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - add r7, sp, #576 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #40] @ 4-byte Spill - ldm r7, {r4, r6, r7} - ldr r0, [sp, #112] @ 4-byte Reload - ldr r5, [sp, #588] - adds r1, r2, r4 - mul r2, r1, r0 - ldr r0, [sp, #636] - str r1, [sp, #108] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #632] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #104] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #60] @ 4-byte Spill - ldr r6, [sp, #100] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [sp, #96] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #52] @ 4-byte Spill - ldr r5, [sp, #92] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #512 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #572] - add r11, sp, #520 - ldr r8, [sp, #540] - ldr r9, [sp, #536] - ldr r10, [sp, #532] - ldr r6, [sp, #512] - ldr r7, [sp, #516] - add r0, sp, #448 - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #568] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #564] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #560] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #556] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #552] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #548] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #544] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r5, r11} - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #116] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #452 - adds r0, r0, r6 - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #476 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #448] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #108] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #24] @ 4-byte Spill - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #384 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #444] - add r9, sp, #396 - ldr r11, [sp, #412] - ldr r8, [sp, #408] - ldr r5, [sp, #384] - ldr r4, [sp, #388] - ldr r10, [sp, #392] - add r0, sp, #320 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #440] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #436] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #432] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #428] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #424] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #420] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #416] - str r1, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r9} - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #116] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #324 - adds r0, r0, r5 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #348 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #380] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #376] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #372] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #368] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #320] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #64] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #24] @ 4-byte Spill - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #256 - bl .LmulPv480x32(PLT) - ldr r1, [sp, #316] - add r11, sp, #260 - ldr r8, [sp, #284] - ldr r9, [sp, #280] - ldr r10, [sp, #276] - ldr r7, [sp, #256] - add r0, sp, #192 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #312] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #308] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #304] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #300] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #296] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #292] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #288] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r5, r6, r11} - ldr r1, [sp, #120] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #116] @ 4-byte Reload - bl .LmulPv480x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #208 - adds r0, r0, r7 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - adcs r1, r1, r5 - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - adcs r1, r1, r10 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r9 - add r9, sp, #192 - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adc r1, r1, r2 - str r1, [sp, #68] @ 4-byte Spill - ldm r9, {r4, r8, r9} - ldr r7, [sp, #204] - ldr r10, [sp, #236] - adds r5, r0, r4 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r4, [sp, #232] - mul r1, r5, r0 - ldr r0, [sp, #252] - str r1, [sp, #56] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #244] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #240] - str r0, [sp, #36] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #120] @ 4-byte Reload - ldr r6, [sp, #116] @ 4-byte Reload - adcs r8, r11, r8 - adcs r9, r6, r9 - ldr r6, [sp, #64] @ 4-byte Reload - adcs r7, r6, r7 - ldr r6, [sp, #60] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r6, r0, r10 - ldr r0, [sp, #80] @ 4-byte Reload - ldr r10, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - add r0, sp, #128 - bl .LmulPv480x32(PLT) - add r3, sp, #128 - ldm r3, {r0, r1, r2, r3} - adds r0, r5, r0 - adcs r11, r8, r1 - ldr r0, [sp, #144] - ldr r1, [sp, #64] @ 4-byte Reload - adcs lr, r9, r2 - str r11, [sp, #40] @ 4-byte Spill - adcs r8, r7, r3 - str lr, [sp, #48] @ 4-byte Spill - str r8, [sp, #56] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #148] - ldr r1, [sp, #44] @ 4-byte Reload - str r4, [sp, #64] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #152] - adcs r0, r1, r0 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #156] - adcs r0, r1, r0 - ldr r1, [sp, #96] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #160] - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #168] - adcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #188] - adc r0, r1, r0 - mov r1, r10 - add r10, r1, #20 - str r0, [sp, #120] @ 4-byte Spill - ldmib r1, {r0, r6} - ldr r2, [r1, #12] - ldr r12, [r1, #16] - ldm r10, {r5, r9, r10} - ldr r7, [r1] - subs r7, r11, r7 - ldr r11, [r1, #36] - sbcs r3, lr, r0 - ldr r0, [r1, #32] - sbcs lr, r8, r6 - ldr r8, [r1, #40] - sbcs r4, r4, r2 - ldr r2, [r1, #44] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #52] - ldr r1, [r1, #56] - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - str r2, [sp, #52] @ 4-byte Spill - sbcs r2, r1, r12 - ldr r1, [sp, #84] @ 4-byte Reload - sbcs r12, r1, r5 - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r5, r1, r9 - ldr r1, [sp, #92] @ 4-byte Reload - sbcs r6, r1, r10 - ldr r1, [sp, #96] @ 4-byte Reload - sbcs r9, r1, r0 - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r10, r0, r11 - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r11, r0, r8 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - sbc r8, r0, r1 - ldr r0, [sp, #40] @ 4-byte Reload - asr r1, r8, #31 - cmp r1, #0 - movlt r7, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r7, [r0] - ldr r7, [sp, #48] @ 4-byte Reload - movlt r3, r7 - str r3, [r0, #4] - ldr r3, [sp, #56] @ 4-byte Reload - movlt lr, r3 - ldr r3, [sp, #64] @ 4-byte Reload - cmp r1, #0 - str lr, [r0, #8] - movlt r4, r3 - ldr r3, [sp, #80] @ 4-byte Reload - str r4, [r0, #12] - movlt r2, r3 - ldr r3, [sp, #68] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #84] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #88] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #20] - movlt r5, r2 - ldr r2, [sp, #92] @ 4-byte Reload - str r5, [r0, #24] - movlt r6, r2 - ldr r2, [sp, #96] @ 4-byte Reload - str r6, [r0, #28] - movlt r9, r2 - ldr r2, [sp, #100] @ 4-byte Reload - cmp r1, #0 - str r9, [r0, #32] - movlt r10, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r10, [r0, #36] - movlt r11, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r11, [r0, #40] - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #112] @ 4-byte Reload - ldr r2, [sp, #72] @ 4-byte Reload - str r3, [r0, #44] - movlt r2, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str r2, [r0, #48] - ldr r2, [sp, #124] @ 4-byte Reload - movlt r2, r1 - ldr r1, [sp, #120] @ 4-byte Reload - str r2, [r0, #52] - movlt r8, r1 - str r8, [r0, #56] - add sp, sp, #4 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end227: - .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L - .cantunwind - .fnend - - .globl mcl_fp_montRed15L - .align 2 - .type mcl_fp_montRed15L,%function -mcl_fp_montRed15L: @ @mcl_fp_montRed15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #148 - sub sp, sp, #148 - .pad #1024 - sub sp, sp, #1024 - mov r3, r2 - str r0, [sp, #192] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r7, [r1] - ldr r0, [r3] - str r3, [sp, #200] @ 4-byte Spill - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #100] @ 4-byte Spill - str r0, [sp, #180] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #164] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #168] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #204] @ 4-byte Spill - mul r2, r7, r0 - ldr r0, [r3, #28] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #152] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #160] @ 4-byte Spill - ldr r0, [r1, #96] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [r1, #100] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r1, #104] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r1, #108] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r1, #112] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r1, #116] - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #16] @ 4-byte Spill - add r0, sp, #1104 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1164] - ldr r9, [sp, #1104] - ldr r1, [sp, #1112] - ldr r2, [sp, #1116] - ldr r3, [sp, #1120] - ldr r12, [sp, #1124] - ldr lr, [sp, #1128] - ldr r4, [sp, #1132] - ldr r5, [sp, #1136] - ldr r6, [sp, #1140] - ldr r8, [sp, #1144] - ldr r10, [sp, #1148] - ldr r11, [sp, #1152] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1160] - adds r7, r7, r9 - ldr r7, [sp, #108] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1108] - adcs r9, r7, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #200] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - mul r2, r9, r0 - add r0, sp, #1040 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1100] - ldr r4, [sp, #1040] - ldr r1, [sp, #1048] - ldr r2, [sp, #1052] - ldr r8, [sp, #1056] - ldr r3, [sp, #1060] - ldr r10, [sp, #1064] - ldr r11, [sp, #1068] - ldr r12, [sp, #1072] - ldr r7, [sp, #1076] - ldr r6, [sp, #1080] - ldr lr, [sp, #1084] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1096] - adds r4, r9, r4 - ldr r4, [sp, #108] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1092] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1088] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #1044] - adcs r9, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r4, [sp, #204] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r9, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - add r0, sp, #976 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #1036] - add lr, sp, #1000 - add r10, sp, #976 - ldr r5, [sp, #1020] - ldr r6, [sp, #1016] - ldr r7, [sp, #1012] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #8] @ 4-byte Spill - ldm lr, {r3, r12, lr} - ldr r9, [sp, #996] - ldr r2, [sp, #992] - ldm r10, {r0, r1, r8, r10} - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r1 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - ldr r1, [sp, #200] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, sp, #912 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #972] - ldr r4, [sp, #912] - add lr, sp, #916 - ldr r11, [sp, #960] - ldr r5, [sp, #956] - ldr r6, [sp, #952] - ldr r7, [sp, #948] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #968] - adds r4, r8, r4 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #964] - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r9, r10, r12, lr} - ldr r4, [sp, #108] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - str r4, [sp, #12] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #200] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #204] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - mul r2, r4, r5 - adcs r0, r0, r11 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #848 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #908] - add r10, sp, #872 - add lr, sp, #848 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #12] @ 4-byte Reload - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - mov r11, r1 - adcs r0, r0, r2 - ldr r2, [sp, #8] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r5 - mov r1, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #784 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #844] - ldr r4, [sp, #784] - add r10, sp, #788 - ldr lr, [sp, #832] - ldr r5, [sp, #828] - ldr r6, [sp, #824] - ldr r7, [sp, #820] - ldr r12, [sp, #816] - ldr r3, [sp, #812] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #840] - adds r4, r11, r4 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #836] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r0, r1, r2, r8, r9, r10} - ldr r4, [sp, #108] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r4, [sp, #204] @ 4-byte Reload - str r11, [sp, #20] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #200] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #720 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #780] - add r10, sp, #744 - add lr, sp, #720 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #772] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #768] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #764] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #20] @ 4-byte Reload - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - mov r11, r1 - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r1, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #656 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #716] - ldr r4, [sp, #656] - add r10, sp, #660 - ldr lr, [sp, #704] - ldr r5, [sp, #700] - ldr r6, [sp, #696] - ldr r7, [sp, #692] - ldr r12, [sp, #688] - ldr r3, [sp, #684] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #712] - adds r4, r11, r4 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r0, r1, r2, r8, r9, r10} - ldr r4, [sp, #108] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r4, [sp, #200] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - mul r2, r11, r0 - add r0, sp, #592 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #652] - add r10, sp, #616 - add lr, sp, #592 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #648] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #644] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #640] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #204] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r11, r5 - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #528 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #588] - ldr r4, [sp, #528] - add r10, sp, #532 - ldr lr, [sp, #572] - ldr r6, [sp, #568] - ldr r7, [sp, #564] - ldr r12, [sp, #560] - ldr r3, [sp, #556] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #584] - adds r4, r11, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #576] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r0, r1, r2, r8, r9, r10} - ldr r4, [sp, #108] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #200] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #464 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #524] - add r10, sp, #488 - add lr, sp, #464 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #520] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #516] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r5, r6, r7, r8, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #204] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r11, r5 - adcs r0, r0, r6 - mov r6, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #400 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #460] - ldr r4, [sp, #400] - add r10, sp, #404 - ldr lr, [sp, #440] - ldr r7, [sp, #436] - ldr r12, [sp, #432] - ldr r3, [sp, #428] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #456] - adds r4, r11, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #452] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #448] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #444] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r0, r1, r2, r8, r9, r10} - ldr r4, [sp, #108] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #196] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #336 - bl .LmulPv480x32(PLT) - ldr r0, [sp, #396] - add r10, sp, #360 - add lr, sp, #336 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #392] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #388] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #384] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r6, r7, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #204] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - mul r2, r11, r6 - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - add r0, sp, #272 - bl .LmulPv480x32(PLT) - add r5, sp, #272 - add lr, sp, #288 - ldm r5, {r0, r1, r3, r5} - ldr r9, [sp, #332] - ldr r8, [sp, #328] - ldr r7, [sp, #312] - adds r0, r11, r0 - ldr r11, [sp, #324] - ldr r0, [sp, #24] @ 4-byte Reload - adcs r10, r0, r1 - mul r0, r10, r6 - ldr r6, [sp, #316] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #320] - str r0, [sp, #76] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r4, [sp, #196] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #200] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r4, r0, r2 - ldr r0, [sp, #120] @ 4-byte Reload - ldr r2, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r8, r0, r9 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - add r0, sp, #208 - bl .LmulPv480x32(PLT) - add r3, sp, #208 - ldm r3, {r0, r1, r2, r3} - adds r0, r10, r0 - ldr r0, [sp, #204] @ 4-byte Reload - adcs lr, r0, r1 - ldr r0, [sp, #76] @ 4-byte Reload - ldr r1, [sp, #52] @ 4-byte Reload - str lr, [sp, #80] @ 4-byte Spill - adcs r2, r0, r2 - ldr r0, [sp, #48] @ 4-byte Reload - str r2, [sp, #84] @ 4-byte Spill - adcs r3, r0, r3 - ldr r0, [sp, #224] - str r3, [sp, #88] @ 4-byte Spill - adcs r7, r1, r0 - ldr r0, [sp, #228] - ldr r1, [sp, #56] @ 4-byte Reload - str r7, [sp, #92] @ 4-byte Spill - adcs r4, r4, r0 - ldr r0, [sp, #232] - str r4, [sp, #96] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #236] - ldr r1, [sp, #60] @ 4-byte Reload - str r5, [sp, #100] @ 4-byte Spill - adcs r6, r1, r0 - ldr r0, [sp, #240] - ldr r1, [sp, #64] @ 4-byte Reload - str r6, [sp, #104] @ 4-byte Spill - adcs r11, r1, r0 - ldr r0, [sp, #244] - ldr r1, [sp, #68] @ 4-byte Reload - str r11, [sp, #108] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #196] @ 4-byte Reload - str r0, [sp, #200] @ 4-byte Spill - ldr r0, [sp, #248] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #252] - adcs r0, r1, r0 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #256] - adcs r10, r1, r0 - ldr r0, [sp, #260] - ldr r1, [sp, #128] @ 4-byte Reload - str r10, [sp, #124] @ 4-byte Spill - adcs r9, r1, r0 - ldr r0, [sp, #264] - ldr r1, [sp, #120] @ 4-byte Reload - str r9, [sp, #128] @ 4-byte Spill - adcs r8, r8, r0 - ldr r0, [sp, #268] - adcs r12, r1, r0 - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #184] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #188] @ 4-byte Reload - subs r0, lr, r0 - sbcs r1, r2, r1 - ldr r2, [sp, #180] @ 4-byte Reload - sbcs r2, r3, r2 - ldr r3, [sp, #164] @ 4-byte Reload - sbcs r3, r7, r3 - ldr r7, [sp, #168] @ 4-byte Reload - sbcs lr, r4, r7 - ldr r4, [sp, #172] @ 4-byte Reload - ldr r7, [sp, #136] @ 4-byte Reload - sbcs r4, r5, r4 - ldr r5, [sp, #176] @ 4-byte Reload - sbcs r5, r6, r5 - ldr r6, [sp, #132] @ 4-byte Reload - sbcs r6, r11, r6 - ldr r11, [sp, #200] @ 4-byte Reload - str r6, [sp, #172] @ 4-byte Spill - sbcs r6, r11, r7 - ldr r7, [sp, #140] @ 4-byte Reload - ldr r11, [sp, #204] @ 4-byte Reload - str r6, [sp, #176] @ 4-byte Spill - ldr r6, [sp, #196] @ 4-byte Reload - sbcs r6, r6, r7 - ldr r7, [sp, #144] @ 4-byte Reload - str r6, [sp, #180] @ 4-byte Spill - sbcs r6, r11, r7 - ldr r7, [sp, #148] @ 4-byte Reload - str r6, [sp, #184] @ 4-byte Spill - sbcs r6, r10, r7 - ldr r7, [sp, #152] @ 4-byte Reload - mov r10, r8 - str r6, [sp, #188] @ 4-byte Spill - sbcs r6, r9, r7 - ldr r7, [sp, #156] @ 4-byte Reload - sbcs r11, r8, r7 - ldr r7, [sp, #160] @ 4-byte Reload - mov r8, r12 - sbcs r9, r12, r7 - ldr r7, [sp, #120] @ 4-byte Reload - sbc r7, r7, #0 - ands r12, r7, #1 - ldr r7, [sp, #80] @ 4-byte Reload - movne r0, r7 - ldr r7, [sp, #192] @ 4-byte Reload - str r0, [r7] - ldr r0, [sp, #84] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #88] @ 4-byte Reload - str r1, [r7, #4] - ldr r1, [sp, #108] @ 4-byte Reload - movne r2, r0 - ldr r0, [sp, #92] @ 4-byte Reload - cmp r12, #0 - str r2, [r7, #8] - movne r3, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r3, [r7, #12] - movne lr, r0 - ldr r0, [sp, #100] @ 4-byte Reload - str lr, [r7, #16] - movne r4, r0 - ldr r0, [sp, #104] @ 4-byte Reload - cmp r12, #0 - str r4, [r7, #20] - movne r5, r0 - ldr r0, [sp, #172] @ 4-byte Reload - movne r0, r1 - str r5, [r7, #24] - ldr r1, [sp, #176] @ 4-byte Reload - str r0, [r7, #28] - ldr r0, [sp, #200] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #196] @ 4-byte Reload - cmp r12, #0 - str r1, [r7, #32] - ldr r1, [sp, #180] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #204] @ 4-byte Reload - str r1, [r7, #36] - ldr r1, [sp, #184] @ 4-byte Reload - movne r1, r0 - ldr r0, [sp, #188] @ 4-byte Reload - str r1, [r7, #40] - ldr r1, [sp, #124] @ 4-byte Reload - movne r0, r1 - cmp r12, #0 - str r0, [r7, #44] - ldr r0, [sp, #128] @ 4-byte Reload - movne r11, r10 - movne r9, r8 - movne r6, r0 - str r6, [r7, #48] - str r11, [r7, #52] - str r9, [r7, #56] - add sp, sp, #148 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end228: - .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L - .cantunwind - .fnend - - .globl mcl_fp_addPre15L - .align 2 - .type mcl_fp_addPre15L,%function -mcl_fp_addPre15L: @ @mcl_fp_addPre15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #52 - sub sp, sp, #52 - ldm r1, {r3, r7, r11} - ldr r10, [r2] - ldr r5, [r2, #16] - ldr r6, [r2, #4] - ldr r4, [r2, #8] - ldr r12, [r2, #12] - ldr r8, [r1, #12] - ldr r9, [r1, #56] - adds lr, r10, r3 - ldr r3, [r2, #32] - str r5, [sp, #8] @ 4-byte Spill - ldr r5, [r2, #20] - ldr r10, [r1, #44] - adcs r6, r6, r7 - adcs r4, r4, r11 - ldr r11, [r1, #40] - adcs r7, r12, r8 - add r12, r1, #16 - ldr r8, [r1, #52] - str r3, [sp, #20] @ 4-byte Spill - ldr r3, [r2, #36] - str r5, [sp, #12] @ 4-byte Spill - ldr r5, [r2, #24] - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #40] - str r5, [sp, #16] @ 4-byte Spill - ldr r5, [r2, #28] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #44] - str r5, [sp, #24] @ 4-byte Spill - ldr r5, [r1, #32] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #52] - ldr r2, [r2, #56] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #36] - str r3, [sp, #44] @ 4-byte Spill - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #4] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - str lr, [r0] - str r6, [r0, #4] - ldr r6, [sp, #8] @ 4-byte Reload - str r4, [r0, #8] - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - ldr r4, [sp, #48] @ 4-byte Reload - adcs r1, r6, r1 - ldr r6, [sp, #40] @ 4-byte Reload - adcs r2, r7, r2 - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - ldr r7, [sp, #36] @ 4-byte Reload - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - adcs r1, r1, r3 - ldr r3, [sp] @ 4-byte Reload - adcs r2, r2, r12 - str r1, [r0, #24] - ldr r1, [sp, #20] @ 4-byte Reload - add r12, r0, #32 - str r2, [r0, #28] - ldr r2, [sp, #28] @ 4-byte Reload - adcs r1, r1, r5 - ldr r5, [sp, #4] @ 4-byte Reload - adcs r2, r2, r3 - ldr r3, [sp, #32] @ 4-byte Reload - adcs r3, r3, r11 - adcs r7, r7, r10 - adcs r6, r6, r5 - ldr r5, [sp, #44] @ 4-byte Reload - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - adcs r5, r5, r8 - adcs r4, r4, r9 - str r5, [r0, #52] - str r4, [r0, #56] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #52 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end229: - .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L - .cantunwind - .fnend - - .globl mcl_fp_subPre15L - .align 2 - .type mcl_fp_subPre15L,%function -mcl_fp_subPre15L: @ @mcl_fp_subPre15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #56 - sub sp, sp, #56 - ldm r2, {r3, r11} - ldr r7, [r1] - ldr r5, [r2, #8] - ldr r6, [r2, #12] - ldmib r1, {r4, r12, lr} - ldr r8, [r1, #32] - ldr r10, [r1, #52] - subs r3, r7, r3 - ldr r7, [r2, #24] - str r3, [sp, #24] @ 4-byte Spill - ldr r3, [r2, #32] - sbcs r4, r4, r11 - sbcs r5, r12, r5 - add r12, r1, #16 - sbcs r11, lr, r6 - ldr r6, [r2, #20] - ldr lr, [r2, #16] - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r2, #52] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r2, #56] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r2, #28] - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r3, [sp, #20] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - ldr r9, [sp, #24] @ 4-byte Reload - sbcs r1, r1, lr - str r9, [r0] - stmib r0, {r4, r5} - str r11, [r0, #12] - sbcs r2, r2, r6 - str r1, [r0, #16] - ldr r6, [sp, #44] @ 4-byte Reload - ldr r5, [sp, #48] @ 4-byte Reload - ldr r4, [sp, #52] @ 4-byte Reload - sbcs r1, r3, r7 - str r2, [r0, #20] - ldr r2, [sp, #20] @ 4-byte Reload - ldr r3, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #40] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #28] @ 4-byte Reload - sbcs r2, r12, r2 - sbcs r12, r8, r1 - str r2, [r0, #28] - ldr r2, [sp, #32] @ 4-byte Reload - ldr r1, [sp] @ 4-byte Reload - str r12, [r0, #32] - sbcs r2, r1, r2 - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r3, r1, r3 - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r7, r1, r7 - ldr r1, [sp, #12] @ 4-byte Reload - sbcs r6, r1, r6 - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r5, r10, r5 - sbcs r4, r1, r4 - add r1, r0, #36 - stm r1, {r2, r3, r7} - str r6, [r0, #48] - str r5, [r0, #52] - str r4, [r0, #56] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #56 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end230: - .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L - .cantunwind - .fnend - - .globl mcl_fp_shr1_15L - .align 2 - .type mcl_fp_shr1_15L,%function -mcl_fp_shr1_15L: @ @mcl_fp_shr1_15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #24 - sub sp, sp, #24 - ldmib r1, {r2, r3, r4, r5, r6, r10} - ldr r7, [r1] - ldr r11, [r1, #52] - ldr r8, [r1, #28] - ldr lr, [r1, #32] - ldr r12, [r1, #36] - ldr r9, [r1, #44] - str r7, [sp, #4] @ 4-byte Spill - lsr r7, r2, #1 - str r11, [sp, #16] @ 4-byte Spill - orr r7, r7, r3, lsl #31 - str r7, [sp] @ 4-byte Spill - ldr r7, [r1, #40] - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [r1, #48] - ldr r1, [r1, #56] - str r1, [sp, #20] @ 4-byte Spill - lsr r1, r4, #1 - lsrs r4, r4, #1 - str r7, [sp, #12] @ 4-byte Spill - rrx r3, r3 - lsrs r2, r2, #1 - orr r1, r1, r5, lsl #31 - ldr r2, [sp, #4] @ 4-byte Reload - rrx r2, r2 - str r2, [r0] - ldr r2, [sp] @ 4-byte Reload - stmib r0, {r2, r3} - str r1, [r0, #12] - lsrs r1, r6, #1 - lsr r2, r12, #1 - rrx r1, r5 - ldr r7, [sp, #8] @ 4-byte Reload - ldr r5, [sp, #16] @ 4-byte Reload - ldr r4, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - lsr r1, r6, #1 - orr r1, r1, r10, lsl #31 - str r1, [r0, #20] - lsrs r1, r8, #1 - rrx r1, r10 - orr r2, r2, r7, lsl #31 - str r1, [r0, #24] - lsr r1, r8, #1 - orr r1, r1, lr, lsl #31 - str r1, [r0, #28] - lsrs r1, r12, #1 - add r12, r0, #32 - rrx r1, lr - lsrs r3, r9, #1 - rrx r3, r7 - lsrs r6, r5, #1 - lsr r7, r9, #1 - lsr r5, r5, #1 - orr r7, r7, r4, lsl #31 - rrx r6, r4 - ldr r4, [sp, #20] @ 4-byte Reload - stm r12, {r1, r2, r3, r7} - str r6, [r0, #48] - orr r5, r5, r4, lsl #31 - lsr r4, r4, #1 - str r5, [r0, #52] - str r4, [r0, #56] - add sp, sp, #24 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end231: - .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L - .cantunwind - .fnend - - .globl mcl_fp_add15L - .align 2 - .type mcl_fp_add15L,%function -mcl_fp_add15L: @ @mcl_fp_add15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r10, r4, r9 - ldr r4, [r1, #24] - adcs r11, r5, r8 - ldr r5, [r1, #20] - mov r8, r10 - adcs r6, r6, lr - mov lr, r11 - str r8, [r0] - adcs r9, r7, r12 - str r6, [sp, #40] @ 4-byte Spill - ldr r6, [r1, #16] - ldr r7, [r2, #16] - str lr, [r0, #4] - str r9, [sp, #8] @ 4-byte Spill - adcs r7, r7, r6 - ldr r6, [r2, #48] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r5 - ldr r5, [r2, #28] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r10, [sp, #32] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r5, r7 - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r1, #32] - ldr r11, [sp, #12] @ 4-byte Reload - adcs r7, r4, r7 - ldr r4, [r2, #36] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r1, #36] - adcs r7, r4, r7 - ldr r4, [r2, #40] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #40] - adcs r7, r4, r7 - ldr r4, [r2, #44] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r1, #44] - adcs r5, r4, r7 - ldr r7, [r1, #48] - ldr r4, [sp, #40] @ 4-byte Reload - str r5, [sp, #28] @ 4-byte Spill - adcs r12, r6, r7 - ldr r7, [r1, #52] - ldr r6, [r2, #52] - ldr r1, [r1, #56] - ldr r2, [r2, #56] - str r4, [r0, #8] - str r9, [r0, #12] - ldr r9, [sp, #36] @ 4-byte Reload - adcs r6, r6, r7 - str r9, [r0, #16] - str r10, [r0, #20] - add r7, r0, #40 - adcs r2, r2, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r6, [sp, #24] @ 4-byte Spill - str r2, [sp, #20] @ 4-byte Spill - str r1, [r0, #24] - ldr r1, [sp, #52] @ 4-byte Reload - str r11, [r0, #28] - str r1, [r0, #32] - ldr r1, [sp, #44] @ 4-byte Reload - str r1, [r0, #36] - ldr r1, [sp, #48] @ 4-byte Reload - stm r7, {r1, r5, r12} - str r6, [r0, #52] - str r2, [r0, #56] - mov r2, #0 - adc r1, r2, #0 - str r1, [sp, #16] @ 4-byte Spill - ldm r3, {r6, r7} - ldr r1, [r3, #8] - ldr r2, [r3, #12] - subs r5, r8, r6 - sbcs r7, lr, r7 - str r5, [sp, #4] @ 4-byte Spill - sbcs r1, r4, r1 - str r7, [sp] @ 4-byte Spill - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r9, r9, r1 - ldr r1, [r3, #20] - sbcs r1, r10, r1 - add r10, r3, #32 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [r3, #24] - sbcs r1, r2, r1 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [r3, #28] - sbcs r11, r11, r1 - ldm r10, {r1, r2, r6, r10} - ldr r5, [sp, #52] @ 4-byte Reload - ldr r8, [r3, #48] - ldr r7, [r3, #52] - ldr r3, [r3, #56] - sbcs r1, r5, r1 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r4, r1, r2 - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r2, r1, r6 - ldr r1, [sp, #28] @ 4-byte Reload - sbcs lr, r1, r10 - ldr r1, [sp, #24] @ 4-byte Reload - sbcs r6, r12, r8 - sbcs r5, r1, r7 - ldr r1, [sp, #20] @ 4-byte Reload - sbcs r1, r1, r3 - ldr r3, [sp, #16] @ 4-byte Reload - sbc r3, r3, #0 - tst r3, #1 - bne .LBB232_2 -@ BB#1: @ %nocarry - ldr r3, [sp, #4] @ 4-byte Reload - str r3, [r0] - ldr r3, [sp] @ 4-byte Reload - str r3, [r0, #4] - ldr r3, [sp, #40] @ 4-byte Reload - str r3, [r0, #8] - ldr r3, [sp, #8] @ 4-byte Reload - str r3, [r0, #12] - ldr r3, [sp, #36] @ 4-byte Reload - str r9, [r0, #16] - str r3, [r0, #20] - ldr r3, [sp, #56] @ 4-byte Reload - str r3, [r0, #24] - ldr r3, [sp, #52] @ 4-byte Reload - str r11, [r0, #28] - str r3, [r0, #32] - str r4, [r0, #36] - str r2, [r0, #40] - str lr, [r0, #44] - str r6, [r0, #48] - str r5, [r0, #52] - str r1, [r0, #56] -.LBB232_2: @ %carry - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end232: - .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L - .cantunwind - .fnend - - .globl mcl_fp_addNF15L - .align 2 - .type mcl_fp_addNF15L,%function -mcl_fp_addNF15L: @ @mcl_fp_addNF15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #92 - sub sp, sp, #92 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - add r11, r3, #32 - adds r10, r4, r9 - ldr r4, [r1, #24] - adcs r9, r5, r8 - ldr r5, [r1, #20] - str r10, [sp, #20] @ 4-byte Spill - adcs lr, r6, lr - ldr r6, [r1, #16] - str r9, [sp, #24] @ 4-byte Spill - adcs r8, r7, r12 - ldr r7, [r2, #16] - str lr, [sp, #28] @ 4-byte Spill - str r8, [sp, #32] @ 4-byte Spill - adcs r7, r7, r6 - ldr r6, [r2, #28] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r5 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r7, r7, r4 - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r6, r7 - ldr r6, [r2, #32] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r1, #32] - adcs r7, r6, r7 - ldr r6, [r2, #36] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #36] - adcs r7, r6, r7 - ldr r6, [r2, #40] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r1, #40] - adcs r7, r6, r7 - ldr r6, [r2, #44] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r1, #44] - adcs r7, r6, r7 - ldr r6, [r2, #48] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r1, #48] - adcs r7, r6, r7 - ldr r6, [r2, #52] - ldr r2, [r2, #56] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r1, #52] - ldr r1, [r1, #56] - adcs r7, r6, r7 - adc r1, r2, r1 - str r7, [sp, #84] @ 4-byte Spill - str r1, [sp, #80] @ 4-byte Spill - ldmib r3, {r1, r5, r7} - ldr r2, [r3, #16] - ldr r4, [r3] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r3, #20] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r3, #24] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r3, #28] - str r2, [sp, #44] @ 4-byte Spill - subs r2, r10, r4 - sbcs r12, r9, r1 - ldm r11, {r9, r10, r11} - ldr r1, [r3, #44] - ldr r4, [sp, #36] @ 4-byte Reload - sbcs lr, lr, r5 - ldr r5, [sp, #64] @ 4-byte Reload - sbcs r6, r8, r7 - ldr r7, [sp, #60] @ 4-byte Reload - str r1, [sp] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3, #56] - ldr r3, [sp, #16] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r3, r1, r3 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r4, r1, r4 - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r5, r5, r1 - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r8, r7, r1 - ldr r1, [sp, #56] @ 4-byte Reload - ldr r7, [sp] @ 4-byte Reload - sbcs r9, r1, r9 - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r10, r1, r10 - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r1, r1, r11 - ldr r11, [sp, #20] @ 4-byte Reload - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #8] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - sbc r7, r1, r7 - asr r1, r7, #31 - cmp r1, #0 - movlt r2, r11 - str r2, [r0] - ldr r2, [sp, #24] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r12, [r0, #4] - movlt lr, r2 - ldr r2, [sp, #32] @ 4-byte Reload - cmp r1, #0 - str lr, [r0, #8] - movlt r6, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r6, [r0, #12] - movlt r3, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r3, [r0, #16] - ldr r3, [sp, #16] @ 4-byte Reload - movlt r4, r2 - ldr r2, [sp, #64] @ 4-byte Reload - cmp r1, #0 - str r4, [r0, #20] - movlt r5, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r5, [r0, #24] - movlt r8, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r8, [r0, #28] - movlt r9, r2 - ldr r2, [sp, #76] @ 4-byte Reload - cmp r1, #0 - str r9, [r0, #32] - movlt r10, r2 - ldr r2, [sp, #72] @ 4-byte Reload - str r10, [r0, #36] - movlt r3, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #36] @ 4-byte Reload - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #88] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r3, [r0, #44] - movlt r2, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r2, [r0, #48] - ldr r2, [sp, #44] @ 4-byte Reload - movlt r2, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r2, [r0, #52] - movlt r7, r1 - str r7, [r0, #56] - add sp, sp, #92 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end233: - .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L - .cantunwind - .fnend - - .globl mcl_fp_sub15L - .align 2 - .type mcl_fp_sub15L,%function -mcl_fp_sub15L: @ @mcl_fp_sub15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldr r9, [r2] - ldmib r2, {r8, lr} - ldr r5, [r1] - ldr r12, [r2, #12] - ldmib r1, {r4, r6, r7} - subs r5, r5, r9 - sbcs r4, r4, r8 - str r5, [sp, #48] @ 4-byte Spill - ldr r5, [r2, #24] - sbcs r6, r6, lr - str r4, [sp, #60] @ 4-byte Spill - ldr r4, [r2, #20] - sbcs r7, r7, r12 - str r6, [sp, #56] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r1, #16] - sbcs r9, r7, r6 - ldr r7, [r1, #20] - ldr r6, [r1, #28] - str r9, [sp, #40] @ 4-byte Spill - sbcs r7, r7, r4 - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #24] - sbcs r5, r7, r5 - ldr r7, [r2, #28] - sbcs r10, r6, r7 - ldr r7, [r2, #32] - ldr r6, [r1, #32] - str r10, [sp, #36] @ 4-byte Spill - sbcs r11, r6, r7 - ldr r7, [r2, #36] - ldr r6, [r1, #36] - str r11, [sp, #32] @ 4-byte Spill - sbcs lr, r6, r7 - ldr r7, [r2, #40] - ldr r6, [r1, #40] - str lr, [sp, #28] @ 4-byte Spill - sbcs r12, r6, r7 - ldr r7, [r2, #44] - ldr r6, [r1, #44] - str r12, [sp, #24] @ 4-byte Spill - sbcs r4, r6, r7 - ldr r6, [r2, #48] - ldr r7, [r1, #48] - sbcs r8, r7, r6 - ldr r6, [r2, #52] - ldr r7, [r1, #52] - ldr r2, [r2, #56] - ldr r1, [r1, #56] - sbcs r6, r7, r6 - ldr r7, [sp, #48] @ 4-byte Reload - sbcs r2, r1, r2 - ldr r1, [sp, #60] @ 4-byte Reload - str r2, [sp, #20] @ 4-byte Spill - str r7, [r0] - str r1, [r0, #4] - ldr r1, [sp, #56] @ 4-byte Reload - str r1, [r0, #8] - ldr r1, [sp, #52] @ 4-byte Reload - str r1, [r0, #12] - str r9, [r0, #16] - mov r9, r6 - mov r6, r5 - ldr r5, [sp, #44] @ 4-byte Reload - mov r1, r4 - str r5, [r0, #20] - str r6, [r0, #24] - str r10, [r0, #28] - str r11, [r0, #32] - str lr, [r0, #36] - str r12, [r0, #40] - add r12, r0, #44 - stm r12, {r1, r8, r9} - str r2, [r0, #56] - mov r2, #0 - sbc r2, r2, #0 - tst r2, #1 - beq .LBB234_2 -@ BB#1: @ %carry - ldr r2, [r3, #56] - str r2, [sp, #16] @ 4-byte Spill - ldmib r3, {r2, lr} - ldr r4, [r3, #16] - ldr r12, [r3, #12] - str r4, [sp] @ 4-byte Spill - ldr r4, [r3, #20] - str r4, [sp, #4] @ 4-byte Spill - ldr r4, [r3, #24] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r3, #28] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r3] - adds r4, r4, r7 - ldr r7, [r3, #52] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [sp, #60] @ 4-byte Reload - adcs r11, r2, r7 - ldr r2, [r3, #48] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [sp, #56] @ 4-byte Reload - adcs r7, lr, r2 - ldr r2, [r3, #44] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [sp, #52] @ 4-byte Reload - adcs r2, r12, r2 - add r12, r3, #32 - ldm r12, {r3, r10, r12} - stm r0, {r4, r11} - str r7, [r0, #8] - str r2, [r0, #12] - ldr r7, [sp, #40] @ 4-byte Reload - ldr r4, [sp] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - adcs r4, r4, r7 - ldr r7, [sp, #8] @ 4-byte Reload - adcs r2, r2, r5 - str r4, [r0, #16] - str r2, [r0, #20] - ldr r2, [sp, #36] @ 4-byte Reload - adcs r4, r7, r6 - ldr r7, [sp, #12] @ 4-byte Reload - str r4, [r0, #24] - adcs r2, r7, r2 - ldr r7, [sp, #24] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #32] @ 4-byte Reload - adcs lr, r3, r2 - ldr r3, [sp, #28] @ 4-byte Reload - ldr r2, [sp, #56] @ 4-byte Reload - str lr, [r0, #32] - adcs r3, r10, r3 - adcs r7, r12, r7 - str r3, [r0, #36] - adcs r6, r2, r1 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r7, [r0, #40] - str r6, [r0, #44] - adcs r5, r1, r8 - ldr r1, [sp, #48] @ 4-byte Reload - str r5, [r0, #48] - adcs r4, r1, r9 - ldr r1, [sp, #20] @ 4-byte Reload - str r4, [r0, #52] - adc r1, r2, r1 - str r1, [r0, #56] -.LBB234_2: @ %nocarry - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end234: - .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L - .cantunwind - .fnend - - .globl mcl_fp_subNF15L - .align 2 - .type mcl_fp_subNF15L,%function -mcl_fp_subNF15L: @ @mcl_fp_subNF15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #84 - sub sp, sp, #84 - mov r12, r0 - ldr r0, [r2, #32] - add r9, r2, #8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r2, #44] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r2, #48] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r2, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r2, #56] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #16] @ 4-byte Spill - ldm r2, {r10, r11} - ldm r9, {r5, r6, r7, r9} - ldr r0, [r2, #28] - ldr r8, [r2, #24] - ldr r2, [r1] - str r0, [sp, #64] @ 4-byte Spill - ldmib r1, {r0, lr} - ldr r4, [r1, #12] - subs r2, r2, r10 - add r10, r3, #12 - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #36] - sbcs r11, r0, r11 - ldr r0, [r1, #32] - sbcs lr, lr, r5 - ldr r5, [r1, #28] - str r11, [sp] @ 4-byte Spill - sbcs r6, r4, r6 - str r6, [sp, #48] @ 4-byte Spill - ldr r6, [r1, #16] - sbcs r7, r6, r7 - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #24] - ldr r1, [r1, #20] - sbcs r1, r1, r9 - str r1, [sp, #52] @ 4-byte Spill - sbcs r1, r7, r8 - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r1, r5, r1 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbcs r0, r2, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - sbc r0, r1, r0 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #32] @ 4-byte Spill - ldm r3, {r2, r5, r7} - ldm r10, {r6, r9, r10} - ldr r8, [sp, #8] @ 4-byte Reload - ldr r4, [sp, #48] @ 4-byte Reload - ldr r0, [r3, #28] - ldr r1, [r3, #24] - adds r2, r8, r2 - adcs r3, r11, r5 - mov r11, lr - ldr r5, [sp, #56] @ 4-byte Reload - adcs lr, r11, r7 - ldr r7, [sp, #60] @ 4-byte Reload - adcs r4, r4, r6 - ldr r6, [sp, #52] @ 4-byte Reload - adcs r5, r5, r9 - adcs r6, r6, r10 - adcs r7, r7, r1 - ldr r1, [sp, #64] @ 4-byte Reload - adcs r9, r1, r0 - ldr r1, [sp, #68] @ 4-byte Reload - ldr r0, [sp, #4] @ 4-byte Reload - adcs r10, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r0, [sp, #12] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r1, r0, r1 - str r1, [sp, #32] @ 4-byte Spill - asr r1, r0, #31 - ldr r0, [sp] @ 4-byte Reload - cmp r1, #0 - movge r2, r8 - movge lr, r11 - str r2, [r12] - ldr r2, [sp, #12] @ 4-byte Reload - movge r3, r0 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r1, #0 - str r3, [r12, #4] - str lr, [r12, #8] - movge r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r4, [r12, #12] - movge r5, r0 - ldr r0, [sp, #52] @ 4-byte Reload - str r5, [r12, #16] - movge r6, r0 - ldr r0, [sp, #60] @ 4-byte Reload - cmp r1, #0 - str r6, [r12, #20] - movge r7, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r7, [r12, #24] - movge r9, r0 - ldr r0, [sp, #68] @ 4-byte Reload - str r9, [r12, #28] - movge r10, r0 - ldr r0, [sp, #44] @ 4-byte Reload - cmp r1, #0 - str r10, [r12, #32] - movge r2, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r2, [r12, #36] - ldr r2, [sp, #16] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #76] @ 4-byte Reload - str r2, [r12, #40] - ldr r2, [sp, #20] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #80] @ 4-byte Reload - cmp r1, #0 - ldr r1, [sp, #24] @ 4-byte Reload - str r2, [r12, #44] - movge r1, r0 - ldr r0, [sp, #40] @ 4-byte Reload - str r1, [r12, #48] - ldr r1, [sp, #28] @ 4-byte Reload - movge r1, r0 - ldr r0, [sp, #32] @ 4-byte Reload - str r1, [r12, #52] - ldr r1, [sp, #36] @ 4-byte Reload - movge r0, r1 - str r0, [r12, #56] - add sp, sp, #84 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end235: - .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L - .cantunwind - .fnend - - .globl mcl_fpDbl_add15L - .align 2 - .type mcl_fpDbl_add15L,%function -mcl_fpDbl_add15L: @ @mcl_fpDbl_add15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #184 - sub sp, sp, #184 - ldm r1, {r7, r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r10} - adds r4, r4, r7 - str r4, [sp, #100] @ 4-byte Spill - ldr r4, [r2, #96] - str r4, [sp, #148] @ 4-byte Spill - ldr r4, [r2, #100] - str r4, [sp, #164] @ 4-byte Spill - ldr r4, [r2, #104] - str r4, [sp, #168] @ 4-byte Spill - ldr r4, [r2, #108] - str r4, [sp, #172] @ 4-byte Spill - ldr r4, [r2, #112] - str r4, [sp, #176] @ 4-byte Spill - ldr r4, [r2, #116] - str r4, [sp, #180] @ 4-byte Spill - adcs r4, r5, r8 - adcs r7, r6, lr - str r4, [sp, #68] @ 4-byte Spill - add lr, r1, #16 - str r7, [sp, #64] @ 4-byte Spill - adcs r7, r10, r12 - add r10, r1, #32 - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #96] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #64] @ 4-byte Reload - add r11, r3, #32 - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #76] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - adcs r1, r1, r12 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - adcs r1, r1, r4 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #72] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [r0, #36] - adcs r1, r1, r6 - ldr r2, [sp, #80] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r2, r2, r8 - str r2, [r0, #44] - adcs r1, r1, r9 - ldr r2, [sp, #88] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #92] @ 4-byte Reload - adcs r2, r2, r10 - adcs r1, r1, r7 - str r2, [r0, #52] - ldr r2, [sp, #96] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #132] @ 4-byte Reload - adcs r12, r2, r7 - ldr r2, [sp, #28] @ 4-byte Reload - str r12, [sp, #84] @ 4-byte Spill - adcs r9, r1, r2 - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r9, [sp, #88] @ 4-byte Spill - adcs r6, r1, r2 - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [sp, #36] @ 4-byte Reload - str r6, [sp, #96] @ 4-byte Spill - adcs r7, r1, r2 - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r7, [sp, #132] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #152] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r4, [sp, #92] @ 4-byte Spill - adcs r5, r1, r2 - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [sp, #24] @ 4-byte Reload - str r5, [sp, #100] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [sp, #156] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [sp, #160] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r1, [sp, #160] @ 4-byte Spill - ldr r1, [sp, #148] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [sp, #164] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r1, [sp, #164] @ 4-byte Spill - ldr r1, [sp, #168] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r1, [sp, #168] @ 4-byte Spill - ldr r1, [sp, #172] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r1, [sp, #172] @ 4-byte Spill - ldr r1, [sp, #176] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #124] @ 4-byte Reload - str r1, [sp, #176] @ 4-byte Spill - ldr r1, [sp, #180] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #180] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #128] @ 4-byte Spill - ldmib r3, {r2, lr} - ldr r1, [r3, #16] - ldr r8, [r3, #12] - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [r3, #20] - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [r3, #24] - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [r3, #28] - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [r3] - subs r1, r12, r1 - sbcs r12, r9, r2 - ldm r11, {r9, r10, r11} - ldr r2, [r3, #44] - sbcs lr, r6, lr - sbcs r6, r7, r8 - ldr r7, [sp, #144] @ 4-byte Reload - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r3, #48] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r3, #52] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r3, #56] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [sp, #124] @ 4-byte Reload - sbcs r3, r4, r2 - ldr r2, [sp, #136] @ 4-byte Reload - sbcs r4, r5, r2 - ldr r2, [sp, #152] @ 4-byte Reload - ldr r5, [sp, #140] @ 4-byte Reload - sbcs r5, r2, r5 - ldr r2, [sp, #156] @ 4-byte Reload - sbcs r8, r2, r7 - ldr r2, [sp, #160] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - sbcs r9, r2, r9 - ldr r2, [sp, #148] @ 4-byte Reload - sbcs r10, r2, r10 - ldr r2, [sp, #164] @ 4-byte Reload - sbcs r2, r2, r11 - ldr r11, [sp, #84] @ 4-byte Reload - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp, #108] @ 4-byte Reload - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [sp, #172] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r2, [sp, #136] @ 4-byte Spill - ldr r2, [sp, #176] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp, #120] @ 4-byte Reload - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [sp, #180] @ 4-byte Reload - sbcs r2, r2, r7 - str r2, [sp, #144] @ 4-byte Spill - ldr r2, [sp, #128] @ 4-byte Reload - sbc r2, r2, #0 - ands r2, r2, #1 - movne r1, r11 - str r1, [r0, #60] - ldr r1, [sp, #88] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #96] @ 4-byte Reload - str r12, [r0, #64] - movne lr, r1 - ldr r1, [sp, #132] @ 4-byte Reload - cmp r2, #0 - str lr, [r0, #68] - movne r6, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r6, [r0, #72] - movne r3, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r3, [r0, #76] - ldr r3, [sp, #116] @ 4-byte Reload - movne r4, r1 - ldr r1, [sp, #152] @ 4-byte Reload - cmp r2, #0 - str r4, [r0, #80] - movne r5, r1 - ldr r1, [sp, #156] @ 4-byte Reload - str r5, [r0, #84] - movne r8, r1 - ldr r1, [sp, #160] @ 4-byte Reload - str r8, [r0, #88] - movne r9, r1 - ldr r1, [sp, #148] @ 4-byte Reload - cmp r2, #0 - str r9, [r0, #92] - movne r10, r1 - ldr r1, [sp, #164] @ 4-byte Reload - str r10, [r0, #96] - movne r3, r1 - ldr r1, [sp, #168] @ 4-byte Reload - str r3, [r0, #100] - ldr r3, [sp, #124] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #172] @ 4-byte Reload - cmp r2, #0 - ldr r2, [sp, #136] @ 4-byte Reload - str r3, [r0, #104] - movne r2, r1 - ldr r1, [sp, #176] @ 4-byte Reload - str r2, [r0, #108] - ldr r2, [sp, #140] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #180] @ 4-byte Reload - str r2, [r0, #112] - ldr r2, [sp, #144] @ 4-byte Reload - movne r2, r1 - str r2, [r0, #116] - add sp, sp, #184 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end236: - .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub15L - .align 2 - .type mcl_fpDbl_sub15L,%function -mcl_fpDbl_sub15L: @ @mcl_fpDbl_sub15L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #184 - sub sp, sp, #184 - ldr r7, [r2, #96] - ldr r9, [r2] - add r10, r1, #32 - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #104] - str r7, [sp, #168] @ 4-byte Spill - ldr r7, [r2, #108] - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [r2, #112] - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [r2, #116] - str r7, [sp, #180] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #164] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #116] @ 4-byte Spill - ldmib r2, {r8, lr} - ldr r5, [r1] - ldr r12, [r2, #12] - ldmib r1, {r4, r6, r7} - subs r5, r5, r9 - sbcs r4, r4, r8 - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [r2, #48] - sbcs r6, r6, lr - str r4, [sp, #28] @ 4-byte Spill - ldr r4, [r2, #44] - add lr, r1, #16 - sbcs r7, r7, r12 - str r6, [sp, #24] @ 4-byte Spill - ldr r6, [r2, #40] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #36] - str r5, [sp, #88] @ 4-byte Spill - str r4, [sp, #84] @ 4-byte Spill - str r6, [sp, #80] @ 4-byte Spill - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #96] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #92] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #96] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #76] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #48] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #24] @ 4-byte Reload - ldr r11, [r3, #32] - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #20] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - sbcs r1, r12, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - sbcs r1, r4, r1 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [r0, #36] - sbcs r1, r6, r1 - ldr r2, [sp, #84] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r2, r8, r2 - str r2, [r0, #44] - sbcs r1, r9, r1 - ldr r2, [sp, #116] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #120] @ 4-byte Reload - sbcs r2, r10, r2 - sbcs r1, r7, r1 - str r2, [r0, #52] - ldr r2, [sp, #124] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #132] @ 4-byte Reload - sbcs lr, r7, r2 - ldr r2, [sp, #52] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - sbcs r9, r2, r1 - ldr r1, [sp, #148] @ 4-byte Reload - ldr r2, [sp, #56] @ 4-byte Reload - str r9, [sp, #88] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #152] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #64] @ 4-byte Reload - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [sp, #156] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #160] @ 4-byte Reload - str r1, [sp, #152] @ 4-byte Spill - mov r1, #0 - sbcs r2, r7, r2 - ldr r7, [sp, #48] @ 4-byte Reload - str r2, [sp, #156] @ 4-byte Spill - ldr r2, [sp, #128] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #72] @ 4-byte Reload - str r2, [sp, #160] @ 4-byte Spill - ldr r2, [sp, #164] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #76] @ 4-byte Reload - str r2, [sp, #164] @ 4-byte Spill - ldr r2, [sp, #140] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #92] @ 4-byte Reload - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [sp, #136] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #96] @ 4-byte Reload - str r2, [sp, #136] @ 4-byte Spill - ldr r2, [sp, #144] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #100] @ 4-byte Reload - str r2, [sp, #144] @ 4-byte Spill - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #104] @ 4-byte Reload - str r2, [sp, #168] @ 4-byte Spill - ldr r2, [sp, #172] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #108] @ 4-byte Reload - str r2, [sp, #172] @ 4-byte Spill - ldr r2, [sp, #176] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #112] @ 4-byte Reload - str r2, [sp, #176] @ 4-byte Spill - ldr r2, [sp, #180] @ 4-byte Reload - sbcs r2, r7, r2 - sbc r1, r1, #0 - str r2, [sp, #180] @ 4-byte Spill - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #128] @ 4-byte Spill - ldm r3, {r2, r5, r6} - ldr r4, [r3, #12] - ldr r12, [r3, #16] - ldr r8, [r3, #20] - ldr r10, [r3, #28] - ldr r7, [r3, #24] - ldr r3, [sp, #152] @ 4-byte Reload - adds r1, lr, r2 - ldr r2, [sp, #132] @ 4-byte Reload - adcs r5, r9, r5 - adcs r6, r2, r6 - ldr r2, [sp, #148] @ 4-byte Reload - adcs r2, r2, r4 - ldr r4, [sp, #156] @ 4-byte Reload - adcs r3, r3, r12 - adcs r12, r4, r8 - ldr r4, [sp, #160] @ 4-byte Reload - adcs r8, r4, r7 - ldr r4, [sp, #164] @ 4-byte Reload - ldr r7, [sp, #140] @ 4-byte Reload - adcs r9, r4, r10 - ldr r4, [sp, #104] @ 4-byte Reload - ldr r10, [sp, #128] @ 4-byte Reload - adcs r11, r7, r11 - ldr r7, [sp, #136] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [sp, #112] @ 4-byte Reload - str r7, [sp, #104] @ 4-byte Spill - ldr r7, [sp, #144] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [sp, #116] @ 4-byte Reload - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [sp, #168] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [sp, #120] @ 4-byte Reload - str r7, [sp, #116] @ 4-byte Spill - ldr r7, [sp, #172] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [sp, #124] @ 4-byte Reload - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [sp, #176] @ 4-byte Reload - adcs r7, r7, r4 - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [sp, #180] @ 4-byte Reload - adc r7, r7, r10 - str r7, [sp, #128] @ 4-byte Spill - ldr r7, [sp, #108] @ 4-byte Reload - ands r7, r7, #1 - moveq r1, lr - str r1, [r0, #60] - ldr r1, [sp, #88] @ 4-byte Reload - moveq r5, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r5, [r0, #64] - moveq r6, r1 - ldr r1, [sp, #148] @ 4-byte Reload - cmp r7, #0 - str r6, [r0, #68] - moveq r2, r1 - ldr r1, [sp, #152] @ 4-byte Reload - str r2, [r0, #72] - ldr r2, [sp, #104] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #156] @ 4-byte Reload - str r3, [r0, #76] - moveq r12, r1 - ldr r1, [sp, #160] @ 4-byte Reload - cmp r7, #0 - str r12, [r0, #80] - moveq r8, r1 - ldr r1, [sp, #164] @ 4-byte Reload - str r8, [r0, #84] - moveq r9, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r9, [r0, #88] - moveq r11, r1 - ldr r1, [sp, #136] @ 4-byte Reload - cmp r7, #0 - str r11, [r0, #92] - moveq r2, r1 - ldr r1, [sp, #144] @ 4-byte Reload - str r2, [r0, #96] - ldr r2, [sp, #112] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #168] @ 4-byte Reload - str r2, [r0, #100] - ldr r2, [sp, #116] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #172] @ 4-byte Reload - cmp r7, #0 - str r2, [r0, #104] - ldr r2, [sp, #120] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #176] @ 4-byte Reload - str r2, [r0, #108] - ldr r2, [sp, #124] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #180] @ 4-byte Reload - str r2, [r0, #112] - ldr r2, [sp, #128] @ 4-byte Reload - moveq r2, r1 - str r2, [r0, #116] - add sp, sp, #184 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end237: - .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L - .cantunwind - .fnend - - .align 2 - .type .LmulPv512x32,%function -.LmulPv512x32: @ @mulPv512x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r3, [r1, #44] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #44] - ldr r3, [r1, #48] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #48] - ldr r3, [r1, #52] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #52] - ldr r3, [r1, #56] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #56] - ldr r1, [r1, #60] - umull r3, r7, r1, r2 - adcs r1, r6, r3 - str r1, [r0, #60] - adc r1, r7, #0 - str r1, [r0, #64] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end238: - .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre16L - .align 2 - .type mcl_fp_mulUnitPre16L,%function -mcl_fp_mulUnitPre16L: @ @mcl_fp_mulUnitPre16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #92 - sub sp, sp, #92 - mov r4, r0 - add r0, sp, #16 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #76] - add r11, sp, #40 - add lr, sp, #16 - ldr r10, [sp, #80] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #72] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #68] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #64] - str r0, [sp] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - stm r4, {r0, r1, r2, r3, r12, lr} - add r0, r4, #24 - str r10, [r4, #64] - stm r0, {r5, r6, r7, r8, r9, r11} - ldr r0, [sp] @ 4-byte Reload - str r0, [r4, #48] - ldr r0, [sp, #4] @ 4-byte Reload - str r0, [r4, #52] - ldr r0, [sp, #8] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #12] @ 4-byte Reload - str r0, [r4, #60] - add sp, sp, #92 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end239: - .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre16L - .align 2 - .type mcl_fpDbl_mulPre16L,%function -mcl_fpDbl_mulPre16L: @ @mcl_fpDbl_mulPre16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #276 - sub sp, sp, #276 - mov r6, r2 - mov r5, r1 - mov r4, r0 - bl mcl_fpDbl_mulPre8L(PLT) - add r0, r4, #64 - add r1, r5, #32 - add r2, r6, #32 - bl mcl_fpDbl_mulPre8L(PLT) - add r11, r6, #32 - ldm r11, {r9, r10, r11} - ldr r0, [r6, #44] - ldr r8, [r6, #60] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r6, #48] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r6, #52] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r6, #56] - str r0, [sp, #144] @ 4-byte Spill - ldm r6, {r0, r1, r2, r3, r12, lr} - ldr r7, [r6, #24] - ldr r6, [r6, #28] - adds r0, r0, r9 - str r0, [sp, #136] @ 4-byte Spill - adcs r0, r1, r10 - str r0, [sp, #132] @ 4-byte Spill - adcs r0, r2, r11 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, lr, r0 - add lr, r5, #44 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #112] @ 4-byte Spill - adcs r0, r6, r8 - str r0, [sp, #108] @ 4-byte Spill - mov r0, #0 - ldm r5, {r8, r10, r11} - ldr r7, [r5, #32] - ldr r3, [r5, #36] - ldr r2, [r5, #40] - adc r6, r0, #0 - ldr r0, [r5, #12] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r5, #16] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r5, #20] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r5, #24] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r5, #28] - str r0, [sp, #104] @ 4-byte Spill - ldm lr, {r0, r1, r12, lr} - ldr r9, [r5, #60] - adds r5, r8, r7 - adcs r3, r10, r3 - str r5, [sp, #180] - str r5, [sp, #144] @ 4-byte Spill - adcs r8, r11, r2 - ldr r2, [sp, #88] @ 4-byte Reload - str r3, [sp, #184] - str r3, [sp, #140] @ 4-byte Spill - str r8, [sp, #188] - adcs r11, r2, r0 - ldr r0, [sp, #92] @ 4-byte Reload - add r2, sp, #148 - str r11, [sp, #192] - adcs r5, r0, r1 - ldr r0, [sp, #96] @ 4-byte Reload - add r1, sp, #180 - str r5, [sp, #196] - adcs r7, r0, r12 - ldr r0, [sp, #100] @ 4-byte Reload - str r7, [sp, #200] - adcs r10, r0, lr - ldr r0, [sp, #104] @ 4-byte Reload - str r10, [sp, #204] - adcs r0, r0, r9 - str r0, [sp, #208] - mov r9, r0 - ldr r0, [sp, #136] @ 4-byte Reload - str r0, [sp, #148] - ldr r0, [sp, #132] @ 4-byte Reload - str r0, [sp, #152] - ldr r0, [sp, #128] @ 4-byte Reload - str r0, [sp, #156] - ldr r0, [sp, #124] @ 4-byte Reload - str r0, [sp, #160] - ldr r0, [sp, #120] @ 4-byte Reload - str r0, [sp, #164] - ldr r0, [sp, #116] @ 4-byte Reload - str r0, [sp, #168] - ldr r0, [sp, #112] @ 4-byte Reload - str r0, [sp, #172] - ldr r0, [sp, #108] @ 4-byte Reload - str r0, [sp, #176] - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - add r0, sp, #212 - bl mcl_fpDbl_mulPre8L(PLT) - ldr r0, [sp, #136] @ 4-byte Reload - cmp r6, #0 - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [sp, #140] @ 4-byte Reload - ldr r3, [sp, #124] @ 4-byte Reload - moveq r9, r6 - moveq r10, r6 - moveq r7, r6 - moveq r5, r6 - moveq r11, r6 - cmp r6, #0 - moveq r1, r6 - moveq r8, r6 - moveq r2, r6 - str r9, [sp, #104] @ 4-byte Spill - str r1, [sp, #144] @ 4-byte Spill - str r2, [sp, #140] @ 4-byte Spill - str r8, [sp, #96] @ 4-byte Spill - adds r12, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - adcs lr, r2, r1 - ldr r2, [sp, #128] @ 4-byte Reload - adcs r2, r8, r2 - ldr r8, [sp, #104] @ 4-byte Reload - adcs r9, r11, r3 - ldr r3, [sp, #120] @ 4-byte Reload - adcs r1, r5, r3 - ldr r3, [sp, #116] @ 4-byte Reload - adcs r0, r7, r3 - ldr r3, [sp, #112] @ 4-byte Reload - adcs r3, r10, r3 - str r3, [sp, #124] @ 4-byte Spill - ldr r3, [sp, #108] @ 4-byte Reload - adcs r3, r8, r3 - ldr r8, [sp, #124] @ 4-byte Reload - str r3, [sp, #128] @ 4-byte Spill - mov r3, #0 - adc r3, r3, #0 - str r3, [sp, #136] @ 4-byte Spill - ldr r3, [sp, #100] @ 4-byte Reload - cmp r3, #0 - moveq r0, r7 - moveq r1, r5 - moveq r9, r11 - ldr r5, [sp, #136] @ 4-byte Reload - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - moveq r2, r0 - ldr r0, [sp, #140] @ 4-byte Reload - moveq lr, r0 - ldr r0, [sp, #144] @ 4-byte Reload - cmp r3, #0 - moveq r5, r3 - and r3, r6, r3 - ldr r6, [sp, #244] - moveq r8, r10 - moveq r12, r0 - ldr r0, [sp, #104] @ 4-byte Reload - moveq r7, r0 - adds r0, r12, r6 - add r6, sp, #216 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #248] - adcs r0, lr, r0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #252] - adcs r10, r2, r0 - ldr r0, [sp, #256] - adcs r0, r9, r0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #260] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #264] - adcs r0, r1, r0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #268] - adcs r0, r8, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #272] - adcs r0, r7, r0 - str r0, [sp, #112] @ 4-byte Spill - adc r0, r5, r3 - str r0, [sp, #108] @ 4-byte Spill - ldm r4, {r1, r12, lr} - ldr r5, [sp, #212] - ldr r8, [r4, #12] - ldm r6, {r2, r3, r6} - ldr r0, [sp, #236] - ldr r7, [sp, #240] - ldr r9, [r4, #72] - subs r1, r5, r1 - ldr r5, [sp, #228] - sbcs r2, r2, r12 - sbcs r12, r3, lr - ldr r3, [sp, #140] @ 4-byte Reload - sbcs r11, r6, r8 - ldr r6, [r4, #16] - ldr r8, [r4, #68] - sbcs lr, r5, r6 - ldr r5, [r4, #20] - ldr r6, [sp, #232] - sbcs r5, r6, r5 - ldr r6, [r4, #24] - sbcs r6, r0, r6 - ldr r0, [r4, #28] - sbcs r0, r7, r0 - ldr r7, [r4, #32] - sbcs r3, r3, r7 - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r4, #36] - str r3, [sp, #84] @ 4-byte Spill - ldr r3, [sp, #136] @ 4-byte Reload - str r7, [sp, #140] @ 4-byte Spill - sbcs r3, r3, r7 - ldr r7, [r4, #40] - str r3, [sp, #76] @ 4-byte Spill - sbcs r3, r10, r7 - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r4, #44] - ldr r10, [r4, #76] - str r3, [sp, #72] @ 4-byte Spill - ldr r3, [sp, #128] @ 4-byte Reload - str r7, [sp, #132] @ 4-byte Spill - sbcs r3, r3, r7 - ldr r7, [r4, #48] - str r3, [sp, #68] @ 4-byte Spill - ldr r3, [sp, #124] @ 4-byte Reload - str r7, [sp, #128] @ 4-byte Spill - sbcs r3, r3, r7 - ldr r7, [r4, #52] - str r3, [sp, #64] @ 4-byte Spill - ldr r3, [sp, #120] @ 4-byte Reload - str r7, [sp, #124] @ 4-byte Spill - sbcs r3, r3, r7 - ldr r7, [r4, #56] - str r3, [sp, #60] @ 4-byte Spill - ldr r3, [sp, #116] @ 4-byte Reload - str r7, [sp, #120] @ 4-byte Spill - sbcs r3, r3, r7 - ldr r7, [r4, #60] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [sp, #112] @ 4-byte Reload - str r7, [sp, #116] @ 4-byte Spill - sbcs r3, r3, r7 - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [sp, #108] @ 4-byte Reload - sbc r3, r3, #0 - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r4, #64] - subs r1, r1, r3 - str r3, [sp, #80] @ 4-byte Spill - str r1, [sp, #44] @ 4-byte Spill - sbcs r1, r2, r8 - str r1, [sp, #40] @ 4-byte Spill - sbcs r1, r12, r9 - add r12, r4, #104 - str r1, [sp, #36] @ 4-byte Spill - sbcs r1, r11, r10 - ldr r11, [r4, #80] - str r1, [sp, #32] @ 4-byte Spill - sbcs r1, lr, r11 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r4, #84] - str r1, [sp, #112] @ 4-byte Spill - sbcs r1, r5, r1 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r4, #88] - str r1, [sp, #108] @ 4-byte Spill - sbcs r1, r6, r1 - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [r4, #92] - sbcs r0, r0, r1 - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [r4, #100] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r4, #96] - str r1, [sp, #96] @ 4-byte Spill - str r0, [sp, #100] @ 4-byte Spill - ldm r12, {r2, r3, r12} - ldr r7, [sp, #84] @ 4-byte Reload - ldr lr, [r4, #116] - ldr r5, [r4, #120] - ldr r6, [r4, #124] - sbcs r0, r7, r0 - str r12, [sp, #92] @ 4-byte Spill - str r6, [sp, #88] @ 4-byte Spill - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r0, r2 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r0, r12 - mov r12, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r0, lr - mov lr, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbcs r7, r0, r6 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #40] @ 4-byte Reload - sbc r5, r0, #0 - ldr r0, [sp, #144] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [r4, #32] - ldr r0, [sp, #136] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #36] @ 4-byte Reload - str r1, [r4, #36] - ldr r1, [sp, #132] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #32] @ 4-byte Reload - str r0, [r4, #40] - ldr r0, [sp, #128] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #28] @ 4-byte Reload - str r1, [r4, #44] - ldr r1, [sp, #124] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #24] @ 4-byte Reload - str r0, [r4, #48] - ldr r0, [sp, #120] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #20] @ 4-byte Reload - str r1, [r4, #52] - ldr r1, [sp, #116] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #16] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r1, [r4, #60] - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #76] @ 4-byte Reload - str r0, [r4, #64] - adcs r1, r8, r1 - ldr r0, [sp, #12] @ 4-byte Reload - str r1, [r4, #68] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [r4, #72] - adcs r1, r10, r1 - ldr r0, [sp, #72] @ 4-byte Reload - str r1, [r4, #76] - ldr r1, [sp, #112] @ 4-byte Reload - adcs r0, r11, r0 - adcs r1, r1, r6 - str r0, [r4, #80] - ldr r0, [sp, #108] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - str r1, [r4, #84] - ldr r1, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [r4, #88] - adcs r1, r1, r7 - ldr r0, [sp, #100] @ 4-byte Reload - str r1, [r4, #92] - ldr r1, [sp, #96] @ 4-byte Reload - adcs r8, r0, r5 - ldr r5, [sp, #92] @ 4-byte Reload - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r1, #0 - str r8, [r4, #96] - adcs r2, r2, #0 - adcs r3, r3, #0 - adcs r7, r5, #0 - adcs r6, r12, #0 - adcs r5, lr, #0 - adc r12, r0, #0 - add r0, r4, #100 - stm r0, {r1, r2, r3, r7} - str r6, [r4, #116] - str r5, [r4, #120] - str r12, [r4, #124] - add sp, sp, #276 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end240: - .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre16L - .align 2 - .type mcl_fpDbl_sqrPre16L,%function -mcl_fpDbl_sqrPre16L: @ @mcl_fpDbl_sqrPre16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #276 - sub sp, sp, #276 - mov r5, r1 - mov r4, r0 - mov r2, r5 - bl mcl_fpDbl_mulPre8L(PLT) - add r1, r5, #32 - add r0, r4, #64 - mov r2, r1 - bl mcl_fpDbl_mulPre8L(PLT) - ldm r5, {r8, r9, r10} - ldr r0, [r5, #12] - ldr r6, [r5, #32] - ldr r7, [r5, #36] - ldr r3, [r5, #40] - add lr, r5, #44 - ldr r11, [r5, #16] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r5, #20] - adds r6, r8, r6 - adcs r7, r9, r7 - adcs r3, r10, r3 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r5, #24] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r5, #28] - str r0, [sp, #144] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - ldr r5, [sp, #136] @ 4-byte Reload - str r6, [sp, #180] - str r7, [sp, #184] - str r6, [sp, #148] - str r3, [sp, #128] @ 4-byte Spill - str r3, [sp, #188] - str r7, [sp, #152] - adcs r10, r5, r0 - ldr r0, [sp, #140] @ 4-byte Reload - adcs r11, r11, r1 - str r10, [sp, #192] - add r1, sp, #180 - str r11, [sp, #196] - adcs r8, r0, r2 - ldr r0, [sp, #132] @ 4-byte Reload - add r2, sp, #148 - str r8, [sp, #200] - adcs r9, r0, r12 - ldr r0, [sp, #144] @ 4-byte Reload - str r9, [sp, #204] - adcs r5, r0, lr - add r0, sp, #156 - str r5, [sp, #208] - stm r0, {r3, r10, r11} - mov r0, #0 - str r8, [sp, #168] - str r9, [sp, #172] - str r5, [sp, #176] - adc r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - add r0, sp, #212 - bl mcl_fpDbl_mulPre8L(PLT) - ldr r0, [sp, #128] @ 4-byte Reload - adds r2, r6, r6 - ldr r1, [sp, #244] - ldr r6, [sp, #248] - ldr lr, [sp, #264] - ldr r12, [sp, #268] - adcs r3, r7, r7 - adcs r7, r0, r0 - str r1, [sp, #128] @ 4-byte Spill - str r6, [sp, #116] @ 4-byte Spill - str r12, [sp, #108] @ 4-byte Spill - adcs r10, r10, r10 - adcs r0, r11, r11 - ldr r11, [sp, #252] - str r0, [sp, #144] @ 4-byte Spill - adcs r0, r8, r8 - ldr r8, [sp, #260] - str r0, [sp, #140] @ 4-byte Spill - adcs r0, r9, r9 - ldr r9, [sp, #256] - str r0, [sp, #120] @ 4-byte Spill - adc r0, r5, r5 - adds r2, r1, r2 - adcs r1, r6, r3 - str r2, [sp, #132] @ 4-byte Spill - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #272] - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #144] @ 4-byte Reload - adcs r7, r11, r7 - adcs r3, r9, r10 - adcs r2, r8, r1 - ldr r1, [sp, #140] @ 4-byte Reload - adcs r1, lr, r1 - adcs r10, r12, r6 - ldr r6, [sp, #112] @ 4-byte Reload - adcs r12, r0, r6 - mov r6, r0 - ldr r0, [sp, #136] @ 4-byte Reload - adc r5, r0, r5, lsr #31 - cmp r0, #0 - moveq r1, lr - moveq r2, r8 - moveq r3, r9 - moveq r7, r11 - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [sp, #128] @ 4-byte Reload - str r3, [sp, #120] @ 4-byte Spill - add r3, sp, #216 - moveq r10, r1 - ldr r1, [sp, #116] @ 4-byte Reload - cmp r0, #0 - moveq r12, r6 - ldr r6, [sp, #124] @ 4-byte Reload - moveq r5, r0 - str r12, [sp, #112] @ 4-byte Spill - moveq r6, r1 - ldr r1, [sp, #132] @ 4-byte Reload - ldm r4, {r12, lr} - ldr r9, [sp, #212] - ldr r11, [r4, #8] - ldr r8, [r4, #12] - moveq r1, r2 - ldm r3, {r0, r2, r3} - subs r12, r9, r12 - sbcs r9, r0, lr - ldr r0, [r4, #16] - sbcs r11, r2, r11 - ldr r2, [sp, #228] - sbcs lr, r3, r8 - ldr r8, [r4, #68] - sbcs r0, r2, r0 - ldr r2, [sp, #232] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [r4, #20] - sbcs r0, r2, r0 - ldr r2, [sp, #236] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r4, #24] - sbcs r0, r2, r0 - ldr r2, [sp, #240] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r4, #28] - sbcs r3, r2, r0 - ldr r0, [r4, #32] - str r0, [sp, #136] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r4, #36] - str r0, [sp, #132] @ 4-byte Spill - sbcs r0, r6, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r4, #40] - str r0, [sp, #128] @ 4-byte Spill - sbcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r4, #44] - str r0, [sp, #124] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r4, #48] - str r0, [sp, #120] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [r4, #52] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - str r1, [sp, #140] @ 4-byte Spill - sbcs r0, r0, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r4, #56] - str r0, [sp, #144] @ 4-byte Spill - sbcs r0, r10, r0 - ldr r10, [r4, #76] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r4, #60] - str r0, [sp, #116] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - sbc r0, r5, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r4, #64] - str r0, [sp, #80] @ 4-byte Spill - subs r0, r12, r0 - add r12, r4, #104 - str r0, [sp, #44] @ 4-byte Spill - sbcs r0, r9, r8 - ldr r9, [r4, #72] - str r0, [sp, #40] @ 4-byte Spill - sbcs r0, r11, r9 - ldr r11, [r4, #80] - str r0, [sp, #36] @ 4-byte Spill - sbcs r0, lr, r10 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r0, r0, r11 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r4, #84] - str r0, [sp, #112] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r4, #88] - str r0, [sp, #108] @ 4-byte Spill - sbcs r0, r1, r0 - ldr r1, [r4, #100] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r4, #92] - str r1, [sp, #96] @ 4-byte Spill - str r0, [sp, #104] @ 4-byte Spill - sbcs r0, r3, r0 - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r4, #96] - str r0, [sp, #100] @ 4-byte Spill - ldm r12, {r2, r3, r12} - ldr r7, [sp, #84] @ 4-byte Reload - ldr lr, [r4, #116] - ldr r5, [r4, #120] - ldr r6, [r4, #124] - sbcs r0, r7, r0 - str r12, [sp, #92] @ 4-byte Spill - str r6, [sp, #88] @ 4-byte Spill - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r0, r2 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r0, r12 - mov r12, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r0, lr - mov lr, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - sbcs r7, r0, r6 - ldr r0, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #40] @ 4-byte Reload - sbc r5, r0, #0 - ldr r0, [sp, #136] @ 4-byte Reload - adds r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [r4, #32] - ldr r0, [sp, #128] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #36] @ 4-byte Reload - str r1, [r4, #36] - ldr r1, [sp, #124] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #32] @ 4-byte Reload - str r0, [r4, #40] - ldr r0, [sp, #120] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #28] @ 4-byte Reload - str r1, [r4, #44] - ldr r1, [sp, #140] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #24] @ 4-byte Reload - str r0, [r4, #48] - ldr r0, [sp, #144] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #20] @ 4-byte Reload - str r1, [r4, #52] - ldr r1, [sp, #116] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #16] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r1, r1, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r1, [r4, #60] - ldr r1, [sp, #8] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #76] @ 4-byte Reload - str r0, [r4, #64] - adcs r1, r8, r1 - ldr r0, [sp, #12] @ 4-byte Reload - str r1, [r4, #68] - ldr r1, [sp, #68] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [r4, #72] - adcs r1, r10, r1 - ldr r0, [sp, #72] @ 4-byte Reload - str r1, [r4, #76] - ldr r1, [sp, #112] @ 4-byte Reload - adcs r0, r11, r0 - adcs r1, r1, r6 - str r0, [r4, #80] - ldr r0, [sp, #108] @ 4-byte Reload - ldr r6, [sp, #84] @ 4-byte Reload - str r1, [r4, #84] - ldr r1, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [r4, #88] - adcs r1, r1, r7 - ldr r0, [sp, #100] @ 4-byte Reload - str r1, [r4, #92] - ldr r1, [sp, #96] @ 4-byte Reload - adcs r8, r0, r5 - ldr r5, [sp, #92] @ 4-byte Reload - ldr r0, [sp, #88] @ 4-byte Reload - adcs r1, r1, #0 - str r8, [r4, #96] - adcs r2, r2, #0 - adcs r3, r3, #0 - adcs r7, r5, #0 - adcs r6, r12, #0 - adcs r5, lr, #0 - adc r12, r0, #0 - add r0, r4, #100 - stm r0, {r1, r2, r3, r7} - str r6, [r4, #116] - str r5, [r4, #120] - str r12, [r4, #124] - add sp, sp, #276 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end241: - .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L - .cantunwind - .fnend - - .globl mcl_fp_mont16L - .align 2 - .type mcl_fp_mont16L,%function -mcl_fp_mont16L: @ @mcl_fp_mont16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #404 - sub sp, sp, #404 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #132 - add r6, sp, #2048 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #92] @ 4-byte Spill - add r0, r6, #328 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #128] @ 4-byte Spill - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2376] - ldr r1, [sp, #2380] - str r0, [sp, #72] @ 4-byte Spill - mul r2, r0, r5 - ldr r0, [sp, #2440] - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #2384] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #2436] - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #2388] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #2432] - str r1, [sp, #88] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #2428] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #2424] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2420] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #2416] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #2412] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #2408] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #2404] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #2400] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2396] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2392] - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #2304 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2368] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r7, [sp, #2332] - ldr r4, [sp, #2328] - ldr r8, [sp, #2324] - ldr r11, [sp, #2320] - ldr r9, [sp, #2304] - ldr r10, [sp, #2308] - ldr r6, [sp, #2312] - ldr r5, [sp, #2316] - add lr, sp, #2048 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2364] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2360] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2356] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2352] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2348] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2344] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2340] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2336] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, lr, #184 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - ldr r3, [sp, #2248] - ldr r12, [sp, #2252] - ldr lr, [sp, #2256] - adds r0, r9, r0 - ldr r9, [sp, #2272] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #2276] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - ldr r6, [sp, #96] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #2264] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r11, r0 - ldr r11, [sp, #100] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #2268] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #2260] - adcs r1, r7, r1 - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - ldr r7, [sp, #2232] - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #64] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #124] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #2244] - adc r0, r0, #0 - adds r7, r11, r7 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #2240] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2296] - str r7, [sp, #24] @ 4-byte Spill - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2292] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2288] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2284] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #2280] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #2236] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #2160 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2224] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #2188] - ldr r6, [sp, #2184] - ldr r8, [sp, #2180] - ldr r9, [sp, #2176] - ldr r10, [sp, #2160] - ldr r11, [sp, #2164] - ldr r4, [sp, #2168] - ldr r7, [sp, #2172] - add lr, sp, #2048 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2220] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2216] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2212] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2208] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2204] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2200] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2196] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2192] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #40 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #2100] - ldr r3, [sp, #2104] - ldr r12, [sp, #2108] - ldr lr, [sp, #2112] - adds r0, r0, r10 - ldr r10, [sp, #2132] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #2116] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #2088] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #2128] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #2124] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #2120] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #2096] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2152] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2148] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2144] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2140] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2136] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2092] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #2016 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2080] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #2044] - ldr r6, [sp, #2040] - ldr r8, [sp, #2036] - ldr r9, [sp, #2032] - ldr r10, [sp, #2016] - ldr r11, [sp, #2020] - ldr r4, [sp, #2024] - ldr r7, [sp, #2028] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2076] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2072] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2068] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2064] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2060] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2056] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2052] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2048] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #920 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1956] - ldr r3, [sp, #1960] - ldr r12, [sp, #1964] - ldr lr, [sp, #1968] - adds r0, r0, r10 - ldr r10, [sp, #1988] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1972] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1944] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1984] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1980] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1976] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1952] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2008] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2004] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2000] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1996] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1992] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1948] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1872 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1936] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1900] - ldr r6, [sp, #1896] - ldr r8, [sp, #1892] - ldr r9, [sp, #1888] - ldr r10, [sp, #1872] - ldr r11, [sp, #1876] - ldr r4, [sp, #1880] - ldr r7, [sp, #1884] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1932] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1928] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1924] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1920] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1916] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1908] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #776 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1812] - ldr r3, [sp, #1816] - ldr r12, [sp, #1820] - ldr lr, [sp, #1824] - adds r0, r0, r10 - ldr r10, [sp, #1844] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1828] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1800] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1840] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1836] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1832] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1808] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1864] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1860] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1856] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1852] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1848] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1804] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1728 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1792] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1756] - ldr r6, [sp, #1752] - ldr r8, [sp, #1748] - ldr r9, [sp, #1744] - ldr r10, [sp, #1728] - ldr r11, [sp, #1732] - ldr r4, [sp, #1736] - ldr r7, [sp, #1740] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1788] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1784] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1780] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1768] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1764] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1760] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, lr, #632 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1668] - ldr r3, [sp, #1672] - ldr r12, [sp, #1676] - ldr lr, [sp, #1680] - adds r0, r0, r10 - ldr r10, [sp, #1700] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1684] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1656] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1696] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1692] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1688] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1664] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1720] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1716] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1712] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1660] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1584 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1648] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1612] - ldr r6, [sp, #1608] - ldr r8, [sp, #1604] - ldr r9, [sp, #1600] - ldr r10, [sp, #1584] - ldr r11, [sp, #1588] - ldr r4, [sp, #1592] - ldr r7, [sp, #1596] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1644] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1640] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1636] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1632] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1628] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1624] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1620] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1616] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, lr, #488 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1524] - ldr r3, [sp, #1528] - ldr r12, [sp, #1532] - ldr lr, [sp, #1536] - adds r0, r0, r10 - ldr r10, [sp, #1556] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1540] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1512] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1552] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1548] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1544] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1520] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1576] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1516] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1440 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1504] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1468] - ldr r6, [sp, #1464] - ldr r8, [sp, #1460] - ldr r9, [sp, #1456] - ldr r10, [sp, #1440] - ldr r11, [sp, #1444] - ldr r4, [sp, #1448] - ldr r7, [sp, #1452] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1500] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1496] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1480] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1476] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1472] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, lr, #344 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1380] - ldr r3, [sp, #1384] - ldr r12, [sp, #1388] - ldr lr, [sp, #1392] - adds r0, r0, r10 - ldr r10, [sp, #1412] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1396] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1368] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1408] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1404] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1400] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1376] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1432] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1424] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1420] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1416] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1372] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1296 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1360] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1324] - ldr r6, [sp, #1320] - ldr r8, [sp, #1316] - ldr r9, [sp, #1312] - ldr r10, [sp, #1296] - ldr r11, [sp, #1300] - ldr r4, [sp, #1304] - ldr r7, [sp, #1308] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1356] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1352] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1348] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1344] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, lr, #200 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1236] - ldr r3, [sp, #1240] - ldr r12, [sp, #1244] - ldr lr, [sp, #1248] - adds r0, r0, r10 - ldr r10, [sp, #1268] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1252] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1224] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1264] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1260] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1256] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1232] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1288] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1284] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1228] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1152 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1216] - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #1180] - ldr r6, [sp, #1176] - ldr r8, [sp, #1172] - ldr r9, [sp, #1168] - ldr r10, [sp, #1152] - ldr r11, [sp, #1156] - ldr r4, [sp, #1160] - ldr r7, [sp, #1164] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1212] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, lr, #56 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1092] - ldr r3, [sp, #1096] - ldr r12, [sp, #1100] - ldr lr, [sp, #1104] - adds r0, r0, r10 - ldr r10, [sp, #1124] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1108] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1080] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1120] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1116] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1112] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1088] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - adds r7, r11, r7 - ldr r11, [sp, #128] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1144] - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1084] - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r7, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #1008 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1072] - add r10, sp, #1008 - ldr r4, [sp, #1032] - ldr r5, [sp, #1028] - ldr r6, [sp, #1024] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #936 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #952 - adds r0, r0, r7 - ldr r7, [sp, #948] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r2, r0, r8 - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #976 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #940] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #944] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #936] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - adds r0, r2, r4 - mul r1, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #1000] - str r1, [sp, #48] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #996] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #120] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #120] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #116] @ 4-byte Spill - ldr r5, [sp, #112] @ 4-byte Reload - adcs r5, r5, r7 - str r5, [sp, #112] @ 4-byte Spill - ldr r5, [sp, #108] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #864 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #928] - add r10, sp, #864 - ldr r11, [sp, #892] - ldr r4, [sp, #888] - ldr r5, [sp, #884] - ldr r6, [sp, #880] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #912] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #908] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #792 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #808 - adds r0, r0, r7 - ldr r7, [sp, #804] - ldr r0, [sp, #120] @ 4-byte Reload - adcs r2, r0, r8 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #832 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #796] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #800] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #792] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - adds r1, r2, r4 - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - str r1, [sp, #124] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #856] - str r2, [sp, #28] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #848] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #120] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #72] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #68] @ 4-byte Spill - ldr r5, [sp, #112] @ 4-byte Reload - adcs r5, r5, r7 - str r5, [sp, #64] @ 4-byte Spill - ldr r5, [sp, #108] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - add r0, sp, #720 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #784] - add r10, sp, #720 - ldr r5, [sp, #748] - ldr r6, [sp, #744] - ldr r7, [sp, #740] - ldr r11, [sp, #736] - add r0, sp, #648 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #780] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #776] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #772] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #768] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #764] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #760] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #756] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #752] - str r1, [sp, #16] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r4, [sp, #732] - ldr r2, [r1, #48] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #652 - adds r0, r0, r8 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #676 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #648] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #124] @ 4-byte Reload - ldr r7, [sp, #72] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #576 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #640] - add r11, sp, #584 - ldr r6, [sp, #604] - ldr r5, [sp, #600] - ldr r8, [sp, #596] - ldr r9, [sp, #576] - ldr r10, [sp, #580] - add r0, sp, #504 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #636] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #632] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #628] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #624] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #620] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #616] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #612] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #608] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r7, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #508 - adds r0, r0, r9 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #532 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #504] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #76] @ 4-byte Reload - ldr r7, [sp, #72] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #432 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #496] - add r11, sp, #440 - ldr r6, [sp, #460] - ldr r5, [sp, #456] - ldr r8, [sp, #452] - ldr r9, [sp, #432] - ldr r10, [sp, #436] - add r0, sp, #360 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #492] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #488] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #484] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #480] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #476] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #472] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #468] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #464] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r7, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #364 - adds r0, r0, r9 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #388 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #360] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #76] @ 4-byte Reload - ldr r7, [sp, #72] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #32] @ 4-byte Spill - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #288 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #352] - add r11, sp, #296 - ldr r7, [sp, #316] - ldr r9, [sp, #288] - ldr r5, [sp, #292] - add r0, sp, #216 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #348] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #344] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #340] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #336] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #332] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #328] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #324] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #320] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r6, r8, r10, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #60] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #72] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - add lr, sp, #232 - adds r0, r0, r9 - add r9, sp, #216 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - adcs r1, r1, r4 - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r8 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #256 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #124] @ 4-byte Reload - adcs r1, r1, r7 - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #80] @ 4-byte Spill - ldm r9, {r4, r7, r9} - ldr r5, [sp, #228] - adds r8, r0, r4 - ldr r0, [sp, #128] @ 4-byte Reload - mul r1, r8, r0 - ldr r0, [sp, #280] - str r1, [sp, #64] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #276] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r6, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #136] @ 4-byte Reload - adcs r11, r11, r7 - ldr r7, [sp, #132] @ 4-byte Reload - adcs r9, r7, r9 - ldr r7, [sp, #76] @ 4-byte Reload - adcs r5, r7, r5 - ldr r7, [sp, #72] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #140] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r10, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #84] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - add r0, sp, #144 - bl .LmulPv512x32(PLT) - add r3, sp, #144 - ldm r3, {r0, r1, r2, r3} - adds r0, r8, r0 - adcs r7, r11, r1 - ldr r0, [sp, #160] - ldr r1, [sp, #76] @ 4-byte Reload - adcs r8, r9, r2 - str r7, [sp, #56] @ 4-byte Spill - adcs r5, r5, r3 - mov r3, r6 - str r8, [sp, #64] @ 4-byte Spill - str r5, [sp, #72] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #164] - ldr r1, [sp, #48] @ 4-byte Reload - str r4, [sp, #76] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #168] - adcs lr, r1, r0 - ldr r0, [sp, #172] - ldr r1, [sp, #60] @ 4-byte Reload - str lr, [sp, #52] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r1, r0 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #188] - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #192] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #196] - adcs r0, r1, r0 - ldr r1, [sp, #136] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #200] - adcs r0, r10, r0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #204] - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #208] - adcs r0, r1, r0 - ldr r1, [r3] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adc r0, r0, #0 - subs r12, r7, r1 - str r0, [sp, #68] @ 4-byte Spill - ldmib r3, {r0, r2, r6} - ldr r1, [r3, #32] - ldr r11, [r3, #40] - ldr r9, [r3, #28] - sbcs r7, r8, r0 - ldr r0, [r3, #36] - sbcs r5, r5, r2 - ldr r2, [sp, #96] @ 4-byte Reload - sbcs r10, r4, r6 - ldr r6, [r3, #20] - ldr r4, [r3, #24] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r3, #60] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r3, #16] - sbcs r2, r2, r0 - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r3, lr, r6 - ldr r6, [sp, #64] @ 4-byte Reload - sbcs lr, r0, r4 - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r4, r0, r9 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r8, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r9, r0, r1 - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #60] @ 4-byte Reload - sbcs r11, r0, r11 - ldr r0, [sp, #120] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbc r0, r0, #0 - ands r1, r0, #1 - ldr r0, [sp, #56] @ 4-byte Reload - movne r7, r6 - movne r12, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r12, [r0] - str r7, [r0, #4] - ldr r7, [sp, #72] @ 4-byte Reload - movne r5, r7 - ldr r7, [sp, #76] @ 4-byte Reload - cmp r1, #0 - str r5, [r0, #8] - movne r10, r7 - ldr r7, [sp, #96] @ 4-byte Reload - str r10, [r0, #12] - movne r2, r7 - str r2, [r0, #16] - ldr r2, [sp, #52] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #100] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #60] @ 4-byte Reload - movne lr, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str lr, [r0, #24] - movne r4, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r4, [r0, #28] - movne r8, r2 - ldr r2, [sp, #112] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #32] - movne r9, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r9, [r0, #36] - movne r11, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r11, [r0, #40] - movne r3, r2 - ldr r2, [sp, #124] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #44] - ldr r3, [sp, #80] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #128] @ 4-byte Reload - str r3, [r0, #48] - ldr r3, [sp, #84] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #132] @ 4-byte Reload - str r3, [r0, #52] - ldr r3, [sp, #88] @ 4-byte Reload - movne r3, r2 - cmp r1, #0 - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [sp, #140] @ 4-byte Reload - str r3, [r0, #56] - movne r2, r1 - str r2, [r0, #60] - add sp, sp, #404 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end242: - .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L - .cantunwind - .fnend - - .globl mcl_fp_montNF16L - .align 2 - .type mcl_fp_montNF16L,%function -mcl_fp_montNF16L: @ @mcl_fp_montNF16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #396 - sub sp, sp, #396 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #124 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #92] @ 4-byte Spill - add r0, sp, #2368 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #120] @ 4-byte Spill - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2368] - ldr r1, [sp, #2372] - add r9, sp, #2048 - str r0, [sp, #68] @ 4-byte Spill - mul r2, r0, r5 - ldr r0, [sp, #2432] - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #2376] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #2428] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #2380] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #2424] - str r1, [sp, #80] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2420] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #2416] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #2412] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #2408] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #2404] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #2400] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2396] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2392] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2388] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2384] - str r0, [sp, #40] @ 4-byte Spill - add r0, r9, #248 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2360] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r5, [sp, #2324] - ldr r6, [sp, #2320] - ldr r7, [sp, #2316] - ldr r8, [sp, #2312] - ldr r10, [sp, #2296] - ldr r11, [sp, #2300] - ldr r4, [sp, #2304] - ldr r9, [sp, #2308] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2356] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2352] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2348] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2344] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2340] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2336] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2332] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2328] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, sp, #2224 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #68] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #2236] - ldr r3, [sp, #2240] - ldr r12, [sp, #2244] - ldr lr, [sp, #2248] - adds r0, r10, r0 - ldr r10, [sp, #2268] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - ldr r11, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #2252] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #2264] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #2260] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #84] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #2224] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #2256] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adc r0, r1, r0 - adds r6, r11, r6 - ldr r1, [sp, #2232] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2288] - str r6, [sp, #20] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2284] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2280] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #2276] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #2272] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #2228] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #2048 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #104 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2216] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #2180] - ldr r7, [sp, #2176] - ldr r5, [sp, #2172] - ldr r8, [sp, #2168] - ldr r9, [sp, #2152] - ldr r10, [sp, #2156] - ldr r11, [sp, #2160] - ldr r4, [sp, #2164] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2212] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2208] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2204] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2200] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2196] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2192] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2188] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2184] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #2080 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #20] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #2092] - ldr r3, [sp, #2096] - ldr r12, [sp, #2100] - ldr lr, [sp, #2104] - adds r0, r0, r9 - ldr r9, [sp, #2120] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #2124] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #2108] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #2116] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #2112] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #2080] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #2088] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2144] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2140] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2136] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2132] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2128] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2084] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #984 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #2072] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #2036] - ldr r7, [sp, #2032] - ldr r5, [sp, #2028] - ldr r8, [sp, #2024] - ldr r9, [sp, #2008] - ldr r10, [sp, #2012] - ldr r11, [sp, #2016] - ldr r4, [sp, #2020] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2068] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2064] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2060] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2056] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2052] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2048] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2044] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2040] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #1936 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1948] - ldr r3, [sp, #1952] - ldr r12, [sp, #1956] - ldr lr, [sp, #1960] - adds r0, r0, r9 - ldr r9, [sp, #1976] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1980] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1964] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1972] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1968] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1936] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1944] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2000] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1996] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1992] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1988] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1984] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1940] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #840 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1928] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1892] - ldr r7, [sp, #1888] - ldr r5, [sp, #1884] - ldr r8, [sp, #1880] - ldr r9, [sp, #1864] - ldr r10, [sp, #1868] - ldr r11, [sp, #1872] - ldr r4, [sp, #1876] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1924] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1920] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1916] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1908] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1900] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1896] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #1792 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1804] - ldr r3, [sp, #1808] - ldr r12, [sp, #1812] - ldr lr, [sp, #1816] - adds r0, r0, r9 - ldr r9, [sp, #1832] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1836] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1820] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1828] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1824] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1792] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1800] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1856] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1852] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1848] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1844] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1840] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1796] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #696 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1784] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1748] - ldr r7, [sp, #1744] - ldr r5, [sp, #1740] - ldr r8, [sp, #1736] - ldr r9, [sp, #1720] - ldr r10, [sp, #1724] - ldr r11, [sp, #1728] - ldr r4, [sp, #1732] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1780] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1768] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1764] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1760] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1756] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1752] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #1648 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1660] - ldr r3, [sp, #1664] - ldr r12, [sp, #1668] - ldr lr, [sp, #1672] - adds r0, r0, r9 - ldr r9, [sp, #1688] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1692] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1676] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1684] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1680] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1648] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1656] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1712] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1704] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1700] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1696] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1652] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #552 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1640] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1604] - ldr r7, [sp, #1600] - ldr r5, [sp, #1596] - ldr r8, [sp, #1592] - ldr r9, [sp, #1576] - ldr r10, [sp, #1580] - ldr r11, [sp, #1584] - ldr r4, [sp, #1588] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1636] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1632] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1628] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1624] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1620] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1616] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1612] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1608] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #1504 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1516] - ldr r3, [sp, #1520] - ldr r12, [sp, #1524] - ldr lr, [sp, #1528] - adds r0, r0, r9 - ldr r9, [sp, #1544] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1548] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1532] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1540] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1536] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1504] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1512] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1568] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1560] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1556] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1552] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1508] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #408 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1496] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1460] - ldr r7, [sp, #1456] - ldr r5, [sp, #1452] - ldr r8, [sp, #1448] - ldr r9, [sp, #1432] - ldr r10, [sp, #1436] - ldr r11, [sp, #1440] - ldr r4, [sp, #1444] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1480] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1476] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1472] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1468] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1464] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #1360 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1372] - ldr r3, [sp, #1376] - ldr r12, [sp, #1380] - ldr lr, [sp, #1384] - adds r0, r0, r9 - ldr r9, [sp, #1400] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1404] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1388] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1396] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1392] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1360] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1368] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1424] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1420] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1416] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1412] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1408] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1364] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #264 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1352] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1316] - ldr r7, [sp, #1312] - ldr r5, [sp, #1308] - ldr r8, [sp, #1304] - ldr r9, [sp, #1288] - ldr r10, [sp, #1292] - ldr r11, [sp, #1296] - ldr r4, [sp, #1300] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1348] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1344] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #1216 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1228] - ldr r3, [sp, #1232] - ldr r12, [sp, #1236] - ldr lr, [sp, #1240] - adds r0, r0, r9 - ldr r9, [sp, #1256] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1260] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1244] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1252] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1248] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1216] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1224] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1280] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1220] - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, r4, #120 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1208] - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #1172] - ldr r7, [sp, #1168] - ldr r5, [sp, #1164] - ldr r8, [sp, #1160] - ldr r9, [sp, #1144] - ldr r10, [sp, #1148] - ldr r11, [sp, #1152] - ldr r4, [sp, #1156] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1180] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1176] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #1072 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - ldr r2, [sp, #1084] - ldr r3, [sp, #1088] - ldr r12, [sp, #1092] - ldr lr, [sp, #1096] - adds r0, r0, r9 - ldr r9, [sp, #1112] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1116] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - ldr r11, [sp, #116] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1100] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1108] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1104] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1072] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - adds r6, r11, r6 - ldr r1, [sp, #1080] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1136] - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1124] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1120] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1076] - adcs r0, r7, r0 - ldr r7, [sp, #120] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r6, r7 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #1000 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1064] - add r11, sp, #1000 - ldr r6, [sp, #1024] - ldr r5, [sp, #1020] - ldr r8, [sp, #1016] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1032] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1028] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r4, [sp, #1012] - ldr r2, [r0, #40] - add r0, sp, #928 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - add lr, sp, #944 - adds r0, r0, r9 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r2, r0, r10 - ldr r0, [sp, #112] @ 4-byte Reload - add r10, sp, #968 - adcs r0, r0, r11 - ldr r11, [sp, #932] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #940] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #928] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #936] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #52] @ 4-byte Spill - adds r0, r2, r5 - mul r1, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #992] - str r1, [sp, #44] @ 4-byte Spill - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #112] @ 4-byte Reload - adcs r7, r7, r11 - str r7, [sp, #112] @ 4-byte Spill - ldr r7, [sp, #108] @ 4-byte Reload - adcs r6, r7, r6 - str r6, [sp, #108] @ 4-byte Spill - ldr r6, [sp, #104] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #104] @ 4-byte Spill - ldr r6, [sp, #100] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #856 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #920] - add r11, sp, #856 - ldr r4, [sp, #884] - ldr r7, [sp, #880] - ldr r5, [sp, #876] - ldr r6, [sp, #872] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #912] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #908] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #888] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #784 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #800 - adds r0, r0, r8 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r2, r0, r9 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #824 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #788] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #792] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #796] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #784] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, r1 - adds r1, r2, r4 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #848] - str r2, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #844] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #840] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #112] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [sp, #108] @ 4-byte Reload - adcs r5, r6, r5 - str r5, [sp, #64] @ 4-byte Spill - ldr r5, [sp, #104] @ 4-byte Reload - adcs r5, r5, r7 - str r5, [sp, #60] @ 4-byte Spill - ldr r5, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #712 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #776] - ldr r11, [sp, #740] - ldr r8, [sp, #736] - ldr r9, [sp, #732] - ldr r10, [sp, #728] - ldr r6, [sp, #712] - ldr r7, [sp, #716] - ldr r5, [sp, #720] - ldr r4, [sp, #724] - add r0, sp, #640 - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #772] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #768] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #764] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #760] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #756] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #752] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #748] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #744] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #116] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #644 - adds r0, r0, r6 - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #668 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #640] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #116] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #568 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #632] - ldr r6, [sp, #596] - ldr r7, [sp, #592] - ldr r8, [sp, #588] - ldr r5, [sp, #584] - ldr r9, [sp, #568] - ldr r10, [sp, #572] - ldr r4, [sp, #576] - ldr r11, [sp, #580] - add r0, sp, #496 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #628] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #624] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #620] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #616] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #612] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #608] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #604] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #600] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #500 - adds r0, r0, r9 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #524 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #548] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #496] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #72] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #424 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #488] - ldr r6, [sp, #452] - ldr r7, [sp, #448] - ldr r8, [sp, #444] - ldr r5, [sp, #440] - ldr r9, [sp, #424] - ldr r10, [sp, #428] - ldr r4, [sp, #432] - ldr r11, [sp, #436] - add r0, sp, #352 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #484] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #480] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #476] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #472] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #468] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #464] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #460] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #456] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #356 - adds r0, r0, r9 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #380 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #400] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r6, [sp, #352] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #72] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - adds r6, r11, r6 - adcs r0, r7, r0 - str r6, [sp, #28] @ 4-byte Spill - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - mul r2, r6, r0 - add r0, sp, #280 - bl .LmulPv512x32(PLT) - ldr r1, [sp, #344] - add r11, sp, #284 - ldr r8, [sp, #308] - ldr r9, [sp, #304] - ldr r10, [sp, #300] - ldr r7, [sp, #280] - add r0, sp, #208 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #340] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #336] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #332] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #328] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #324] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #320] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #316] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #312] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r5, r6, r11} - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [r1, #60] - ldr r1, [sp, #124] @ 4-byte Reload - bl .LmulPv512x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #68] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #224 - adds r0, r0, r7 - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r4 - adcs r1, r1, r5 - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #248 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #52] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r8 - add r8, sp, #208 - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #96] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adc r1, r1, r2 - str r1, [sp, #76] @ 4-byte Spill - ldm r8, {r4, r5, r6, r8} - adds r9, r0, r4 - ldr r0, [sp, #120] @ 4-byte Reload - mul r1, r9, r0 - ldr r0, [sp, #272] - str r1, [sp, #60] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r7, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #128] @ 4-byte Reload - adcs r11, r11, r5 - ldr r5, [sp, #124] @ 4-byte Reload - adcs r6, r5, r6 - ldr r5, [sp, #72] @ 4-byte Reload - adcs r8, r5, r8 - ldr r5, [sp, #68] @ 4-byte Reload - adcs r5, r5, r0 - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #60] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #132] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r7 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r10, r0, #0 - add r0, sp, #136 - bl .LmulPv512x32(PLT) - add r3, sp, #136 - ldm r3, {r0, r1, r2, r3} - adds r0, r9, r0 - ldr r0, [sp, #152] - adcs r4, r11, r1 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r6, r6, r2 - str r4, [sp, #48] @ 4-byte Spill - adcs lr, r8, r3 - mov r3, r7 - str r6, [sp, #56] @ 4-byte Spill - str lr, [sp, #60] @ 4-byte Spill - adcs r5, r5, r0 - ldr r0, [sp, #156] - str r5, [sp, #68] @ 4-byte Spill - adcs r9, r1, r0 - ldr r0, [sp, #160] - ldr r1, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #164] - adcs r0, r1, r0 - ldr r1, [sp, #108] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #168] - adcs r0, r1, r0 - ldr r1, [sp, #112] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #172] - adcs r0, r1, r0 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #188] - adcs r0, r1, r0 - ldr r1, [sp, #100] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #192] - adcs r0, r1, r0 - ldr r1, [sp, #104] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #196] - adcs r0, r1, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #200] - adc r10, r10, r0 - ldm r3, {r0, r7} - ldr r1, [r3, #8] - ldr r2, [r3, #12] - subs r12, r4, r0 - ldr r0, [r3, #32] - sbcs r4, r6, r7 - ldr r7, [r3, #60] - sbcs r6, lr, r1 - add lr, r3, #16 - ldr r1, [r3, #28] - sbcs r8, r5, r2 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #52] @ 4-byte Spill - ldm lr, {r0, r5, lr} - ldr r11, [sp, #84] @ 4-byte Reload - sbcs r2, r9, r0 - ldr r0, [sp, #96] @ 4-byte Reload - sbcs r3, r0, r5 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs lr, r11, lr - sbcs r5, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - sbc r1, r10, r7 - ldr r7, [sp, #56] @ 4-byte Reload - cmp r1, #0 - movlt r12, r0 - ldr r0, [sp, #92] @ 4-byte Reload - movlt r4, r7 - ldr r7, [sp, #52] @ 4-byte Reload - str r12, [r0] - str r4, [r0, #4] - ldr r4, [sp, #60] @ 4-byte Reload - ldr r12, [sp, #64] @ 4-byte Reload - movlt r6, r4 - cmp r1, #0 - ldr r4, [sp, #88] @ 4-byte Reload - str r6, [r0, #8] - ldr r6, [sp, #68] @ 4-byte Reload - movlt r2, r9 - movlt r8, r6 - ldr r6, [sp, #76] @ 4-byte Reload - str r8, [r0, #12] - str r2, [r0, #16] - ldr r2, [sp, #96] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #108] @ 4-byte Reload - cmp r1, #0 - movlt lr, r11 - str r3, [r0, #20] - ldr r3, [sp, #132] @ 4-byte Reload - str lr, [r0, #24] - ldr lr, [sp, #72] @ 4-byte Reload - movlt r5, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r5, [r0, #28] - ldr r5, [sp, #80] @ 4-byte Reload - movlt r12, r2 - ldr r2, [sp, #124] @ 4-byte Reload - cmp r1, #0 - str r12, [r0, #32] - movlt lr, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str lr, [r0, #36] - movlt r6, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r6, [r0, #40] - movlt r5, r2 - ldr r2, [sp, #128] @ 4-byte Reload - cmp r1, #0 - str r5, [r0, #44] - movlt r4, r2 - ldr r2, [sp, #100] @ 4-byte Reload - str r4, [r0, #48] - movlt r3, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r3, [r0, #52] - movlt r7, r2 - cmp r1, #0 - movlt r1, r10 - str r7, [r0, #56] - str r1, [r0, #60] - add sp, sp, #396 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end243: - .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L - .cantunwind - .fnend - - .globl mcl_fp_montRed16L - .align 2 - .type mcl_fp_montRed16L,%function -mcl_fp_montRed16L: @ @mcl_fp_montRed16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #356 - sub sp, sp, #356 - .pad #1024 - sub sp, sp, #1024 - mov r3, r2 - str r0, [sp, #200] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r7, [r1] - add r10, sp, #1024 - ldr r0, [r3] - str r3, [sp, #216] @ 4-byte Spill - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #192] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #104] @ 4-byte Spill - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #168] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #180] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #220] @ 4-byte Spill - mul r2, r7, r0 - ldr r0, [r3, #60] - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #152] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #160] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #164] @ 4-byte Spill - ldr r0, [r3, #28] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r1, #96] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [r1, #100] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r1, #104] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r1, #108] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r1, #112] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r1, #116] - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [r1, #120] - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [r1, #124] - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #16] @ 4-byte Spill - add r0, r10, #280 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1368] - ldr r10, [sp, #1304] - ldr r1, [sp, #1312] - ldr r2, [sp, #1316] - ldr r3, [sp, #1320] - ldr r12, [sp, #1324] - ldr lr, [sp, #1328] - ldr r4, [sp, #1332] - ldr r5, [sp, #1336] - ldr r6, [sp, #1340] - ldr r8, [sp, #1344] - ldr r9, [sp, #1348] - ldr r11, [sp, #1352] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1364] - adds r7, r7, r10 - ldr r7, [sp, #112] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1360] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1356] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1308] - adcs r10, r7, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #16] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - mul r2, r10, r0 - add r0, sp, #1232 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1296] - ldr r4, [sp, #1232] - ldr r1, [sp, #1240] - ldr r2, [sp, #1244] - ldr r3, [sp, #1248] - ldr r9, [sp, #1252] - ldr r12, [sp, #1256] - ldr r11, [sp, #1260] - ldr lr, [sp, #1264] - ldr r6, [sp, #1268] - ldr r7, [sp, #1272] - ldr r8, [sp, #1276] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1292] - adds r4, r10, r4 - ldr r4, [sp, #112] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1288] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1284] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #1236] - adcs r10, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #24] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - add r0, r8, #136 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1224] - add r12, sp, #1168 - ldr r9, [sp, #1204] - ldr r7, [sp, #1200] - ldr r6, [sp, #1196] - ldr r5, [sp, #1192] - ldr lr, [sp, #1188] - ldr r10, [sp, #1184] - ldr r8, [sp, #1164] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1220] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1216] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1212] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1160] - ldm r12, {r1, r2, r3, r12} - adds r0, r11, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r8, r0, r8 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r8, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - mov r10, r8 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #1088 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1152] - add r9, sp, #1120 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r4, [sp, #1088] - ldr r0, [sp, #1092] - ldr r1, [sp, #1096] - ldr r2, [sp, #1100] - ldr r3, [sp, #1104] - ldr r12, [sp, #1108] - ldr lr, [sp, #1112] - ldr r11, [sp, #1116] - adds r4, r10, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r10, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - mov r8, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, sp, #1016 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1080] - add r11, sp, #1016 - ldr r6, [sp, #1060] - ldr r7, [sp, #1056] - ldr r5, [sp, #1052] - ldr lr, [sp, #1048] - ldr r12, [sp, #1044] - ldr r10, [sp, #1040] - ldr r9, [sp, #1036] - ldr r3, [sp, #1032] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1072] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #16] @ 4-byte Spill - ldm r11, {r0, r1, r2, r11} - adds r0, r8, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r1, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - mov r10, r1 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r1, r4 - mov r1, r5 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #944 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #1008] - add r9, sp, #976 - add lr, sp, #948 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #996] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #20] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r4, [sp, #944] - ldr r11, [sp, #972] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r10, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r10, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r10, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r10 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, sp, #872 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #936] - add lr, sp, #888 - add r8, sp, #872 - ldr r6, [sp, #916] - ldr r7, [sp, #912] - ldr r5, [sp, #908] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #932] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #928] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #924] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r3, r9, r10, r12, lr} - ldm r8, {r0, r1, r2, r8} - adds r0, r11, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #800 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #864] - add r10, sp, #828 - add lr, sp, #804 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #856] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #848] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldr r4, [sp, #800] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #728 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #792] - add r8, sp, #760 - add lr, sp, #748 - add r12, sp, #728 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #788] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #784] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #780] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #32] @ 4-byte Spill - ldm r8, {r5, r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - adds r0, r11, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #32] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #656 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #720] - add r10, sp, #684 - add lr, sp, #660 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #716] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldr r4, [sp, #656] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #584 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #648] - add r8, sp, #616 - add lr, sp, #604 - add r12, sp, #584 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #644] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #640] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #636] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #632] - str r0, [sp, #40] @ 4-byte Spill - ldm r8, {r5, r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - adds r0, r11, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #512 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #576] - add r10, sp, #540 - add lr, sp, #516 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #44] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldr r4, [sp, #512] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r4, [sp, #220] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #440 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #504] - add r8, sp, #472 - add lr, sp, #460 - add r12, sp, #440 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #500] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #496] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #492] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #488] - str r0, [sp, #48] @ 4-byte Spill - ldm r8, {r5, r6, r7, r8} - ldm lr, {r9, r10, lr} - ldm r12, {r0, r1, r2, r3, r12} - adds r0, r11, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #48] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #216] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #212] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #368 - bl .LmulPv512x32(PLT) - ldr r0, [sp, #432] - add r10, sp, #396 - add lr, sp, #372 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #428] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldr r4, [sp, #368] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #112] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #220] @ 4-byte Reload - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - mul r2, r4, r6 - adcs r0, r0, r7 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - add r0, sp, #296 - bl .LmulPv512x32(PLT) - add r5, sp, #296 - add r7, sp, #336 - add lr, sp, #312 - ldm r5, {r0, r1, r3, r5} - ldr r9, [sp, #356] - adds r0, r4, r0 - adcs r8, r11, r1 - ldr r11, [sp, #352] - mul r0, r8, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #360] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #348] - str r0, [sp, #84] @ 4-byte Spill - ldm r7, {r4, r6, r7} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #212] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #208] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #204] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #80] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r10, r0, r11 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - add r0, sp, #224 - bl .LmulPv512x32(PLT) - add r3, sp, #224 - ldm r3, {r0, r1, r2, r3} - adds r0, r8, r0 - ldr r0, [sp, #220] @ 4-byte Reload - adcs r12, r0, r1 - ldr r0, [sp, #84] @ 4-byte Reload - ldr r1, [sp, #60] @ 4-byte Reload - str r12, [sp, #92] @ 4-byte Spill - adcs r2, r0, r2 - ldr r0, [sp, #52] @ 4-byte Reload - str r2, [sp, #96] @ 4-byte Spill - adcs lr, r0, r3 - ldr r0, [sp, #240] - str lr, [sp, #100] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #244] - ldr r1, [sp, #64] @ 4-byte Reload - str r4, [sp, #104] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #248] - ldr r1, [sp, #68] @ 4-byte Reload - str r5, [sp, #108] @ 4-byte Spill - adcs r7, r1, r0 - ldr r0, [sp, #252] - ldr r1, [sp, #208] @ 4-byte Reload - str r7, [sp, #112] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #204] @ 4-byte Reload - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [sp, #256] - adcs r0, r1, r0 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [sp, #260] - adcs r11, r1, r0 - ldr r0, [sp, #264] - ldr r1, [sp, #76] @ 4-byte Reload - str r11, [sp, #116] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #212] @ 4-byte Reload - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #268] - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #212] @ 4-byte Spill - ldr r0, [sp, #272] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #276] - adcs r10, r10, r0 - ldr r0, [sp, #280] - str r10, [sp, #128] @ 4-byte Spill - adcs r8, r1, r0 - ldr r0, [sp, #284] - ldr r1, [sp, #124] @ 4-byte Reload - str r8, [sp, #132] @ 4-byte Spill - adcs r6, r6, r0 - ldr r0, [sp, #288] - adcs r3, r1, r0 - ldr r0, [sp, #120] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #192] @ 4-byte Reload - subs r1, r12, r0 - ldr r0, [sp, #188] @ 4-byte Reload - sbcs r2, r2, r0 - ldr r0, [sp, #184] @ 4-byte Reload - sbcs r12, lr, r0 - ldr r0, [sp, #168] @ 4-byte Reload - sbcs lr, r4, r0 - ldr r0, [sp, #172] @ 4-byte Reload - sbcs r4, r5, r0 - ldr r0, [sp, #176] @ 4-byte Reload - sbcs r5, r7, r0 - ldr r0, [sp, #180] @ 4-byte Reload - ldr r7, [sp, #208] @ 4-byte Reload - sbcs r9, r7, r0 - ldr r0, [sp, #136] @ 4-byte Reload - ldr r7, [sp, #204] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #212] @ 4-byte Reload - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - sbcs r0, r11, r0 - ldr r11, [sp, #216] @ 4-byte Reload - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - sbcs r0, r11, r0 - ldr r11, [sp, #220] @ 4-byte Reload - str r0, [sp, #180] @ 4-byte Spill - ldr r0, [sp, #148] @ 4-byte Reload - sbcs r0, r7, r0 - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [sp, #152] @ 4-byte Reload - sbcs r0, r11, r0 - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [sp, #156] @ 4-byte Reload - sbcs r0, r10, r0 - mov r10, r6 - str r0, [sp, #192] @ 4-byte Spill - ldr r0, [sp, #160] @ 4-byte Reload - sbcs r7, r8, r0 - ldr r0, [sp, #164] @ 4-byte Reload - mov r8, r3 - sbcs r11, r6, r0 - ldr r0, [sp, #196] @ 4-byte Reload - sbcs r6, r3, r0 - ldr r0, [sp, #124] @ 4-byte Reload - sbc r3, r0, #0 - ldr r0, [sp, #92] @ 4-byte Reload - ands r3, r3, #1 - movne r1, r0 - ldr r0, [sp, #200] @ 4-byte Reload - str r1, [r0] - ldr r1, [sp, #96] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #100] @ 4-byte Reload - str r2, [r0, #4] - ldr r2, [sp, #172] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #104] @ 4-byte Reload - cmp r3, #0 - str r12, [r0, #8] - movne lr, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str lr, [r0, #12] - movne r4, r1 - ldr r1, [sp, #112] @ 4-byte Reload - str r4, [r0, #16] - movne r5, r1 - ldr r1, [sp, #208] @ 4-byte Reload - cmp r3, #0 - str r5, [r0, #20] - movne r9, r1 - ldr r1, [sp, #204] @ 4-byte Reload - str r9, [r0, #24] - movne r2, r1 - ldr r1, [sp, #176] @ 4-byte Reload - str r2, [r0, #28] - ldr r2, [sp, #116] @ 4-byte Reload - movne r1, r2 - cmp r3, #0 - ldr r2, [sp, #180] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #216] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #212] @ 4-byte Reload - str r2, [r0, #36] - ldr r2, [sp, #184] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #220] @ 4-byte Reload - str r2, [r0, #40] - ldr r2, [sp, #188] @ 4-byte Reload - movne r2, r1 - cmp r3, #0 - ldr r1, [sp, #192] @ 4-byte Reload - str r2, [r0, #44] - ldr r2, [sp, #128] @ 4-byte Reload - movne r11, r10 - movne r1, r2 - str r1, [r0, #48] - ldr r1, [sp, #132] @ 4-byte Reload - movne r7, r1 - cmp r3, #0 - movne r6, r8 - str r7, [r0, #52] - str r11, [r0, #56] - str r6, [r0, #60] - add sp, sp, #356 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end244: - .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L - .cantunwind - .fnend - - .globl mcl_fp_addPre16L - .align 2 - .type mcl_fp_addPre16L,%function -mcl_fp_addPre16L: @ @mcl_fp_addPre16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldm r1, {r3, r8} - ldr r5, [r1, #8] - ldr r6, [r1, #12] - ldm r2, {r7, r12, lr} - ldr r4, [r2, #12] - ldr r9, [r1, #32] - ldr r11, [r1, #52] - adds r3, r7, r3 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #32] - adcs r7, r12, r8 - ldr r8, [r2, #24] - add r12, r1, #16 - adcs r5, lr, r5 - ldr lr, [r2, #16] - adcs r6, r4, r6 - ldr r4, [r2, #20] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r2, #52] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r2, #56] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r2, #60] - str r3, [sp, #60] @ 4-byte Spill - ldr r3, [r2, #28] - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r3, [sp, #24] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #20] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - ldr r10, [sp, #28] @ 4-byte Reload - adcs r1, lr, r1 - str r10, [r0] - str r7, [r0, #4] - str r5, [r0, #8] - str r6, [r0, #12] - adcs r2, r4, r2 - str r1, [r0, #16] - str r2, [r0, #20] - adcs r1, r8, r3 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #32] @ 4-byte Reload - adcs r2, r2, r12 - adcs r12, r1, r9 - str r2, [r0, #28] - ldr r1, [sp, #36] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - str r12, [r0, #32] - adcs lr, r1, r2 - ldr r1, [sp, #40] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - str lr, [r0, #36] - adcs r3, r1, r2 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r3, [r0, #40] - adcs r7, r1, r2 - ldr r1, [sp, #48] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str r7, [r0, #44] - adcs r6, r1, r2 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r6, [r0, #48] - adcs r5, r1, r11 - ldr r1, [sp, #56] @ 4-byte Reload - str r5, [r0, #52] - adcs r4, r1, r2 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - str r4, [r0, #56] - adcs r1, r1, r2 - str r1, [r0, #60] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end245: - .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L - .cantunwind - .fnend - - .globl mcl_fp_subPre16L - .align 2 - .type mcl_fp_subPre16L,%function -mcl_fp_subPre16L: @ @mcl_fp_subPre16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldm r2, {r3, r8} - ldr r5, [r2, #8] - ldr r6, [r2, #12] - ldm r1, {r7, r12, lr} - ldr r4, [r1, #12] - ldr r9, [r1, #32] - ldr r11, [r1, #52] - subs r3, r7, r3 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #32] - sbcs r7, r12, r8 - ldr r8, [r2, #24] - add r12, r1, #16 - sbcs r5, lr, r5 - ldr lr, [r2, #16] - sbcs r6, r4, r6 - ldr r4, [r2, #20] - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r2, #52] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r2, #56] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r2, #60] - str r3, [sp, #60] @ 4-byte Spill - ldr r3, [r2, #28] - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r3, [sp, #24] @ 4-byte Spill - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #20] @ 4-byte Spill - ldm r12, {r1, r2, r3, r12} - ldr r10, [sp, #28] @ 4-byte Reload - sbcs r1, r1, lr - str r10, [r0] - str r7, [r0, #4] - str r5, [r0, #8] - str r6, [r0, #12] - sbcs r2, r2, r4 - str r1, [r0, #16] - str r2, [r0, #20] - sbcs r1, r3, r8 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #32] @ 4-byte Reload - sbcs r2, r12, r2 - sbcs r12, r9, r1 - str r2, [r0, #28] - ldr r1, [sp, #36] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - str r12, [r0, #32] - sbcs lr, r2, r1 - ldr r1, [sp, #40] @ 4-byte Reload - ldr r2, [sp, #4] @ 4-byte Reload - str lr, [r0, #36] - sbcs r3, r2, r1 - ldr r1, [sp, #44] @ 4-byte Reload - ldr r2, [sp, #8] @ 4-byte Reload - str r3, [r0, #40] - sbcs r7, r2, r1 - ldr r1, [sp, #48] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - str r7, [r0, #44] - sbcs r6, r2, r1 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - str r6, [r0, #48] - sbcs r5, r11, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r5, [r0, #52] - sbcs r4, r2, r1 - ldr r1, [sp, #60] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - str r4, [r0, #56] - sbcs r1, r2, r1 - str r1, [r0, #60] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end246: - .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L - .cantunwind - .fnend - - .globl mcl_fp_shr1_16L - .align 2 - .type mcl_fp_shr1_16L,%function -mcl_fp_shr1_16L: @ @mcl_fp_shr1_16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #24 - sub sp, sp, #24 - ldr r3, [r1, #16] - ldr r2, [r1, #20] - ldr r12, [r1, #24] - ldr r11, [r1, #28] - ldm r1, {r4, r5, r6, r7} - ldr r8, [r1, #56] - ldr lr, [r1, #32] - ldr r9, [r1, #36] - ldr r10, [r1, #40] - str r4, [sp, #4] @ 4-byte Spill - lsr r4, r5, #1 - str r8, [sp, #16] @ 4-byte Spill - orr r4, r4, r6, lsl #31 - str r4, [sp] @ 4-byte Spill - ldr r4, [r1, #44] - str r4, [sp, #8] @ 4-byte Spill - ldr r4, [r1, #48] - str r4, [sp, #12] @ 4-byte Spill - ldr r4, [r1, #52] - ldr r1, [r1, #60] - str r1, [sp, #20] @ 4-byte Spill - lsr r1, r7, #1 - lsrs r7, r7, #1 - rrx r6, r6 - lsrs r5, r5, #1 - orr r1, r1, r3, lsl #31 - ldr r5, [sp, #4] @ 4-byte Reload - rrx r5, r5 - str r5, [r0] - ldr r5, [sp] @ 4-byte Reload - stmib r0, {r5, r6} - str r1, [r0, #12] - lsrs r1, r2, #1 - rrx r1, r3 - str r1, [r0, #16] - lsr r1, r2, #1 - lsr r2, r9, #1 - orr r1, r1, r12, lsl #31 - str r1, [r0, #20] - lsrs r1, r11, #1 - rrx r1, r12 - str r1, [r0, #24] - lsr r1, r11, #1 - orr r1, r1, lr, lsl #31 - str r1, [r0, #28] - lsrs r1, r9, #1 - ldr r1, [sp, #8] @ 4-byte Reload - rrx r12, lr - orr lr, r2, r10, lsl #31 - mov r2, r4 - lsr r5, r2, #1 - str r12, [r0, #32] - str lr, [r0, #36] - lsrs r3, r1, #1 - lsr r7, r1, #1 - ldr r1, [sp, #12] @ 4-byte Reload - rrx r3, r10 - lsrs r6, r2, #1 - ldr r2, [sp, #16] @ 4-byte Reload - str r3, [r0, #40] - orr r7, r7, r1, lsl #31 - rrx r6, r1 - ldr r1, [sp, #20] @ 4-byte Reload - orr r5, r5, r2, lsl #31 - str r7, [r0, #44] - str r6, [r0, #48] - str r5, [r0, #52] - lsrs r4, r1, #1 - lsr r1, r1, #1 - rrx r4, r2 - str r4, [r0, #56] - str r1, [r0, #60] - add sp, sp, #24 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end247: - .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L - .cantunwind - .fnend - - .globl mcl_fp_add16L - .align 2 - .type mcl_fp_add16L,%function -mcl_fp_add16L: @ @mcl_fp_add16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #60 - sub sp, sp, #60 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r9, r4, r9 - ldr r4, [r1, #24] - adcs r5, r5, r8 - mov r8, r9 - adcs r6, r6, lr - str r5, [sp, #36] @ 4-byte Spill - ldr r5, [r1, #20] - str r8, [r0] - adcs r10, r7, r12 - str r6, [sp, #32] @ 4-byte Spill - ldr r6, [r1, #16] - ldr r7, [r2, #16] - ldr lr, [sp, #36] @ 4-byte Reload - str r10, [sp] @ 4-byte Spill - adcs r7, r7, r6 - ldr r6, [r1, #28] - str lr, [r0, #4] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r9, [sp, #28] @ 4-byte Reload - adcs r7, r7, r5 - ldr r5, [r2, #28] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r7, r7, r4 - ldr r4, [r2, #32] - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r5, r6 - ldr r5, [r1, #32] - str r7, [sp, #40] @ 4-byte Spill - adcs r7, r4, r5 - ldr r5, [r1, #36] - ldr r4, [r2, #36] - str r7, [sp, #48] @ 4-byte Spill - adcs r7, r4, r5 - ldr r5, [r1, #40] - ldr r4, [r2, #40] - str r7, [sp, #56] @ 4-byte Spill - adcs r7, r4, r5 - ldr r5, [r1, #44] - ldr r4, [r2, #44] - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #48] - adcs r11, r4, r5 - ldr r4, [r1, #48] - ldr r5, [r2, #52] - str r11, [sp, #20] @ 4-byte Spill - adcs r12, r7, r4 - ldr r7, [r1, #52] - ldr r4, [sp, #32] @ 4-byte Reload - str r12, [sp, #16] @ 4-byte Spill - adcs r6, r5, r7 - ldr r7, [r1, #56] - ldr r5, [r2, #56] - ldr r1, [r1, #60] - ldr r2, [r2, #60] - str r4, [r0, #8] - str r10, [r0, #12] - ldr r10, [sp, #24] @ 4-byte Reload - str r9, [r0, #16] - str r6, [sp, #4] @ 4-byte Spill - adcs r5, r5, r7 - str r10, [r0, #20] - add r7, r0, #40 - adcs r2, r2, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r2, [sp, #8] @ 4-byte Spill - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r0, #28] - ldr r1, [sp, #48] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #56] @ 4-byte Reload - str r1, [r0, #36] - ldr r1, [sp, #44] @ 4-byte Reload - stm r7, {r1, r11, r12} - str r6, [r0, #52] - str r5, [r0, #56] - str r2, [r0, #60] - mov r2, #0 - mov r12, r5 - add r11, r3, #32 - adc r1, r2, #0 - str r1, [sp, #12] @ 4-byte Spill - ldm r3, {r5, r7} - ldr r1, [r3, #8] - ldr r2, [r3, #12] - subs r8, r8, r5 - sbcs lr, lr, r7 - sbcs r1, r4, r1 - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r1, r9, r1 - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #20] - sbcs r1, r10, r1 - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r3, #24] - sbcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [r3, #28] - sbcs r1, r2, r1 - str r1, [sp, #40] @ 4-byte Spill - ldm r11, {r1, r2, r5, r7, r9, r10, r11} - ldr r6, [sp, #48] @ 4-byte Reload - ldr r3, [r3, #60] - sbcs r1, r6, r1 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r1, r1, r2 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r2, r1, r5 - ldr r1, [sp, #20] @ 4-byte Reload - sbcs r5, r1, r7 - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r7, r1, r9 - ldr r1, [sp, #4] @ 4-byte Reload - sbcs r4, r1, r10 - ldr r1, [sp, #8] @ 4-byte Reload - sbcs r6, r12, r11 - sbcs r1, r1, r3 - ldr r3, [sp, #12] @ 4-byte Reload - sbc r3, r3, #0 - tst r3, #1 - bne .LBB248_2 -@ BB#1: @ %nocarry - stm r0, {r8, lr} - ldr r3, [sp, #36] @ 4-byte Reload - str r3, [r0, #8] - ldr r3, [sp, #32] @ 4-byte Reload - str r3, [r0, #12] - ldr r3, [sp, #28] @ 4-byte Reload - str r3, [r0, #16] - ldr r3, [sp, #24] @ 4-byte Reload - str r3, [r0, #20] - ldr r3, [sp, #52] @ 4-byte Reload - str r3, [r0, #24] - ldr r3, [sp, #40] @ 4-byte Reload - str r3, [r0, #28] - ldr r3, [sp, #48] @ 4-byte Reload - str r3, [r0, #32] - ldr r3, [sp, #56] @ 4-byte Reload - str r3, [r0, #36] - add r3, r0, #40 - stm r3, {r2, r5, r7} - str r4, [r0, #52] - str r6, [r0, #56] - str r1, [r0, #60] -.LBB248_2: @ %carry - add sp, sp, #60 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end248: - .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L - .cantunwind - .fnend - - .globl mcl_fp_addNF16L - .align 2 - .type mcl_fp_addNF16L,%function -mcl_fp_addNF16L: @ @mcl_fp_addNF16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #88 - sub sp, sp, #88 - mov r12, r0 - ldm r1, {r0, r9} - ldr r8, [r1, #8] - ldr lr, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r10, r4, r0 - ldr r4, [r1, #20] - ldr r0, [r1, #24] - adcs r9, r5, r9 - ldr r5, [r1, #16] - adcs r8, r6, r8 - str r9, [sp, #4] @ 4-byte Spill - adcs r6, r7, lr - ldr r7, [r2, #16] - str r8, [sp, #8] @ 4-byte Spill - str r6, [sp, #16] @ 4-byte Spill - adcs r7, r7, r5 - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r2, #20] - adcs r7, r7, r4 - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r2, #24] - adcs r0, r7, r0 - ldr r7, [r2, #28] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #28] - adcs r0, r7, r0 - ldr r7, [r2, #32] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #32] - adcs r0, r7, r0 - ldr r7, [r2, #36] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #36] - adcs r0, r7, r0 - ldr r7, [r2, #40] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #40] - adcs r0, r7, r0 - ldr r7, [r2, #44] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #44] - adcs r0, r7, r0 - ldr r7, [r2, #48] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #48] - adcs r0, r7, r0 - ldr r7, [r2, #52] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #52] - adcs r0, r7, r0 - ldr r7, [r2, #56] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #56] - adcs r0, r7, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #60] - ldr r1, [r2, #60] - adc r11, r1, r0 - ldm r3, {r0, r7} - ldr r1, [r3, #8] - ldr r4, [r3, #12] - subs lr, r10, r0 - ldr r0, [r3, #32] - sbcs r5, r9, r7 - ldr r9, [sp, #44] @ 4-byte Reload - sbcs r7, r8, r1 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r8, r6, r4 - ldr r4, [r3, #24] - ldr r6, [r3, #20] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #60] - str r0, [sp] @ 4-byte Spill - ldr r0, [r3, #28] - ldr r3, [r3, #16] - sbcs r1, r1, r3 - ldr r3, [sp, #48] @ 4-byte Reload - sbcs r2, r9, r6 - ldr r6, [sp, #12] @ 4-byte Reload - sbcs r3, r3, r4 - ldr r4, [sp, #84] @ 4-byte Reload - sbcs r4, r4, r0 - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #20] @ 4-byte Reload - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #24] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp] @ 4-byte Reload - sbc r0, r11, r0 - cmp r0, #0 - movlt lr, r10 - movlt r5, r6 - ldr r6, [sp, #28] @ 4-byte Reload - str lr, [r12] - str r5, [r12, #4] - ldr r5, [sp, #8] @ 4-byte Reload - ldr lr, [sp, #12] @ 4-byte Reload - movlt r7, r5 - cmp r0, #0 - ldr r5, [sp, #32] @ 4-byte Reload - str r7, [r12, #8] - ldr r7, [sp, #16] @ 4-byte Reload - movlt r2, r9 - movlt r8, r7 - ldr r7, [sp, #52] @ 4-byte Reload - str r8, [r12, #12] - movlt r1, r7 - cmp r0, #0 - ldr r7, [sp, #24] @ 4-byte Reload - str r1, [r12, #16] - ldr r1, [sp, #48] @ 4-byte Reload - str r2, [r12, #20] - ldr r2, [sp, #40] @ 4-byte Reload - movlt r3, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r3, [r12, #24] - ldr r3, [sp, #20] @ 4-byte Reload - movlt r4, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r4, [r12, #28] - ldr r4, [sp, #36] @ 4-byte Reload - movlt lr, r1 - ldr r1, [sp, #76] @ 4-byte Reload - cmp r0, #0 - str lr, [r12, #32] - movlt r3, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r3, [r12, #36] - movlt r7, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r7, [r12, #40] - movlt r6, r1 - ldr r1, [sp, #64] @ 4-byte Reload - cmp r0, #0 - str r6, [r12, #44] - movlt r5, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r5, [r12, #48] - movlt r4, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r4, [r12, #52] - movlt r2, r1 - cmp r0, #0 - movlt r0, r11 - str r2, [r12, #56] - str r0, [r12, #60] - add sp, sp, #88 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end249: - .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L - .cantunwind - .fnend - - .globl mcl_fp_sub16L - .align 2 - .type mcl_fp_sub16L,%function -mcl_fp_sub16L: @ @mcl_fp_sub16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #84 - sub sp, sp, #84 - ldr r9, [r2] - ldmib r2, {r8, lr} - ldr r5, [r1] - ldr r12, [r2, #12] - ldmib r1, {r4, r6, r7} - subs r5, r5, r9 - sbcs r4, r4, r8 - str r5, [sp, #60] @ 4-byte Spill - ldr r5, [r2, #24] - sbcs r6, r6, lr - str r4, [sp, #64] @ 4-byte Spill - ldr r4, [r2, #20] - sbcs r7, r7, r12 - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r1, #16] - sbcs r7, r7, r6 - ldr r6, [r1, #28] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r1, #20] - sbcs r7, r7, r4 - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r1, #24] - sbcs r7, r7, r5 - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #28] - sbcs r11, r6, r7 - ldr r7, [r2, #32] - ldr r6, [r1, #32] - sbcs r10, r6, r7 - ldr r7, [r2, #36] - ldr r6, [r1, #36] - sbcs r8, r6, r7 - ldr r7, [r2, #40] - ldr r6, [r1, #40] - str r8, [sp, #52] @ 4-byte Spill - sbcs r5, r6, r7 - ldr r7, [r2, #44] - ldr r6, [r1, #44] - str r5, [sp, #48] @ 4-byte Spill - sbcs r4, r6, r7 - ldr r6, [r2, #48] - ldr r7, [r1, #48] - str r4, [sp, #44] @ 4-byte Spill - sbcs lr, r7, r6 - ldr r6, [r2, #52] - ldr r7, [r1, #52] - str lr, [sp, #40] @ 4-byte Spill - sbcs r9, r7, r6 - ldr r6, [r2, #56] - ldr r7, [r1, #56] - ldr r2, [r2, #60] - ldr r1, [r1, #60] - sbcs r6, r7, r6 - sbcs r12, r1, r2 - ldr r1, [sp, #60] @ 4-byte Reload - mov r2, #0 - str r6, [sp, #36] @ 4-byte Spill - sbc r2, r2, #0 - str r12, [sp, #32] @ 4-byte Spill - tst r2, #1 - str r1, [r0] - ldr r1, [sp, #64] @ 4-byte Reload - str r1, [r0, #4] - ldr r1, [sp, #68] @ 4-byte Reload - str r1, [r0, #8] - ldr r1, [sp, #76] @ 4-byte Reload - str r1, [r0, #12] - ldr r1, [sp, #72] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #80] @ 4-byte Reload - str r1, [r0, #20] - ldr r1, [sp, #56] @ 4-byte Reload - str r1, [r0, #24] - str r11, [r0, #28] - str r10, [r0, #32] - str r8, [r0, #36] - str r5, [r0, #40] - str r4, [r0, #44] - str lr, [r0, #48] - str r9, [r0, #52] - str r6, [r0, #56] - str r12, [r0, #60] - beq .LBB250_2 -@ BB#1: @ %carry - ldr r2, [r3, #32] - ldr r8, [r3, #60] - str r11, [sp] @ 4-byte Spill - ldr r5, [r3] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #36] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #40] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3, #44] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r3, #48] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #52] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r3, #56] - str r2, [sp, #28] @ 4-byte Spill - ldmib r3, {r4, r11, r12} - ldr r1, [sp, #60] @ 4-byte Reload - ldr r7, [sp, #76] @ 4-byte Reload - ldr lr, [r3, #20] - ldr r2, [sp, #80] @ 4-byte Reload - adds r5, r5, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r5, [r0] - adcs r4, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r4, [r0, #4] - adcs r1, r11, r1 - ldr r11, [r3, #24] - adcs r6, r12, r7 - str r1, [r0, #8] - ldr r12, [r3, #28] - ldr r3, [r3, #16] - ldr r1, [sp, #72] @ 4-byte Reload - ldr r7, [sp, #44] @ 4-byte Reload - str r6, [r0, #12] - adcs r1, r3, r1 - str r1, [r0, #16] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r3, lr, r2 - ldr r2, [sp] @ 4-byte Reload - str r3, [r0, #20] - adcs r1, r11, r1 - str r1, [r0, #24] - ldr r1, [sp, #4] @ 4-byte Reload - adcs r3, r12, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r3, [r0, #28] - ldr r3, [sp, #48] @ 4-byte Reload - adcs lr, r1, r10 - ldr r1, [sp, #8] @ 4-byte Reload - str lr, [r0, #32] - adcs r2, r1, r2 - ldr r1, [sp, #12] @ 4-byte Reload - adcs r3, r1, r3 - ldr r1, [sp, #16] @ 4-byte Reload - adcs r6, r1, r7 - ldr r7, [sp, #40] @ 4-byte Reload - ldr r1, [sp, #20] @ 4-byte Reload - adcs r5, r1, r7 - ldr r1, [sp, #24] @ 4-byte Reload - ldr r7, [sp, #36] @ 4-byte Reload - adcs r4, r1, r9 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r7, r1, r7 - ldr r1, [sp, #32] @ 4-byte Reload - adc r12, r8, r1 - add r1, r0, #36 - stm r1, {r2, r3, r6} - str r5, [r0, #48] - add r0, r0, #52 - stm r0, {r4, r7, r12} -.LBB250_2: @ %nocarry - add sp, sp, #84 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end250: - .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L - .cantunwind - .fnend - - .globl mcl_fp_subNF16L - .align 2 - .type mcl_fp_subNF16L,%function -mcl_fp_subNF16L: @ @mcl_fp_subNF16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #92 - sub sp, sp, #92 - ldr r7, [r2, #32] - add r9, r2, #8 - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [r1, #60] - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [r1, #56] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r1, #52] - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r1, #48] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r1, #44] - str r7, [sp, #12] @ 4-byte Spill - ldm r2, {r10, r11} - ldm r9, {r5, r6, r7, r9} - ldr r4, [r2, #24] - ldr r2, [r2, #28] - str r4, [sp, #60] @ 4-byte Spill - str r2, [sp, #64] @ 4-byte Spill - ldm r1, {r2, r12, lr} - ldr r4, [r1, #12] - ldr r8, [r1, #40] - subs r2, r2, r10 - str r2, [sp, #40] @ 4-byte Spill - sbcs r2, r12, r11 - ldr r12, [r1, #36] - sbcs lr, lr, r5 - str r2, [sp, #20] @ 4-byte Spill - ldr r5, [r1, #32] - ldr r2, [sp, #60] @ 4-byte Reload - sbcs r4, r4, r6 - ldr r6, [r1, #16] - str lr, [sp] @ 4-byte Spill - str r4, [sp, #44] @ 4-byte Spill - sbcs r4, r6, r7 - ldr r7, [r1, #20] - str r4, [sp, #52] @ 4-byte Spill - sbcs r4, r7, r9 - ldr r7, [r1, #28] - ldr r1, [r1, #24] - str r4, [sp, #48] @ 4-byte Spill - sbcs r1, r1, r2 - ldr r2, [sp, #12] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r1, r7, r1 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r1, r5, r1 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r1, r12, r1 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r1, r8, r1 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #36] @ 4-byte Reload - sbc r2, r2, r1 - ldr r1, [r3, #32] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #8] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #36] @ 4-byte Spill - ldm r3, {r1, r4, r5, r6, r7, r8, r9, r10} - ldr r3, [sp, #40] @ 4-byte Reload - ldr r11, [sp, #20] @ 4-byte Reload - adds r1, r3, r1 - adcs r3, r11, r4 - ldr r4, [sp, #52] @ 4-byte Reload - adcs r12, lr, r5 - ldr r5, [sp, #44] @ 4-byte Reload - adcs lr, r5, r6 - ldr r5, [sp, #48] @ 4-byte Reload - ldr r6, [sp, #60] @ 4-byte Reload - adcs r4, r4, r7 - ldr r7, [sp, #64] @ 4-byte Reload - adcs r5, r5, r8 - ldr r8, [sp, #88] @ 4-byte Reload - adcs r9, r6, r9 - ldr r6, [sp, #12] @ 4-byte Reload - adcs r10, r7, r10 - ldr r7, [sp, #68] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #16] @ 4-byte Reload - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [sp, #72] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #4] @ 4-byte Reload - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [sp, #76] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #80] @ 4-byte Reload - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [sp, #24] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #28] @ 4-byte Reload - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [sp, #84] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #40] @ 4-byte Reload - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [sp, #8] @ 4-byte Reload - adcs r7, r8, r7 - ldr r8, [sp, #32] @ 4-byte Reload - str r7, [sp, #8] @ 4-byte Spill - ldr r7, [sp, #56] @ 4-byte Reload - adcs r7, r7, r8 - str r7, [sp, #32] @ 4-byte Spill - ldr r7, [sp, #36] @ 4-byte Reload - adc r7, r2, r7 - cmp r2, #0 - movge r1, r6 - movge r3, r11 - str r7, [sp, #36] @ 4-byte Spill - ldr r7, [sp, #4] @ 4-byte Reload - ldr r6, [sp, #24] @ 4-byte Reload - str r1, [r0] - ldr r1, [sp] @ 4-byte Reload - str r3, [r0, #4] - ldr r3, [sp, #8] @ 4-byte Reload - movge r12, r1 - ldr r1, [sp, #44] @ 4-byte Reload - cmp r2, #0 - str r12, [r0, #8] - ldr r12, [sp, #12] @ 4-byte Reload - movge lr, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str lr, [r0, #12] - ldr lr, [sp, #16] @ 4-byte Reload - movge r4, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r4, [r0, #16] - ldr r4, [sp, #32] @ 4-byte Reload - movge r5, r1 - ldr r1, [sp, #60] @ 4-byte Reload - cmp r2, #0 - str r5, [r0, #20] - ldr r5, [sp, #28] @ 4-byte Reload - movge r9, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r9, [r0, #24] - movge r10, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r10, [r0, #28] - movge r12, r1 - ldr r1, [sp, #72] @ 4-byte Reload - cmp r2, #0 - str r12, [r0, #32] - movge lr, r1 - ldr r1, [sp, #76] @ 4-byte Reload - str lr, [r0, #36] - movge r7, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r7, [r0, #40] - movge r6, r1 - ldr r1, [sp, #84] @ 4-byte Reload - cmp r2, #0 - str r6, [r0, #44] - movge r5, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r5, [r0, #48] - movge r3, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r3, [r0, #52] - movge r4, r1 - ldr r1, [sp, #36] @ 4-byte Reload - cmp r2, #0 - movge r1, r2 - str r4, [r0, #56] - str r1, [r0, #60] - add sp, sp, #92 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end251: - .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L - .cantunwind - .fnend - - .globl mcl_fpDbl_add16L - .align 2 - .type mcl_fpDbl_add16L,%function -mcl_fpDbl_add16L: @ @mcl_fpDbl_add16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #200 - sub sp, sp, #200 - ldm r1, {r7, r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r9} - add r10, r1, #32 - adds r4, r4, r7 - str r4, [sp, #100] @ 4-byte Spill - ldr r4, [r2, #96] - str r4, [sp, #164] @ 4-byte Spill - ldr r4, [r2, #100] - str r4, [sp, #160] @ 4-byte Spill - ldr r4, [r2, #104] - str r4, [sp, #156] @ 4-byte Spill - ldr r4, [r2, #108] - str r4, [sp, #180] @ 4-byte Spill - ldr r4, [r2, #112] - str r4, [sp, #184] @ 4-byte Spill - ldr r4, [r2, #116] - str r4, [sp, #188] @ 4-byte Spill - ldr r4, [r2, #120] - str r4, [sp, #192] @ 4-byte Spill - ldr r4, [r2, #124] - str r4, [sp, #196] @ 4-byte Spill - adcs r4, r5, r8 - adcs r7, r6, lr - str r4, [sp, #68] @ 4-byte Spill - add lr, r1, #16 - str r7, [sp, #64] @ 4-byte Spill - adcs r7, r9, r12 - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #168] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #96] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [r1, #120] - str r2, [sp, #128] @ 4-byte Spill - ldr r2, [r1, #124] - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #52] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #100] @ 4-byte Reload - ldr r7, [sp, #68] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #64] @ 4-byte Reload - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #76] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - adcs r1, r1, r12 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - adcs r1, r1, r4 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #72] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [r0, #36] - adcs r1, r1, r6 - ldr r2, [sp, #80] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #84] @ 4-byte Reload - adcs r2, r2, r8 - str r2, [r0, #44] - adcs r1, r1, r9 - ldr r2, [sp, #88] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #92] @ 4-byte Reload - adcs r2, r2, r10 - adcs r1, r1, r7 - str r2, [r0, #52] - ldr r2, [sp, #96] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #136] @ 4-byte Reload - adcs r2, r2, r7 - ldr r7, [sp, #24] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #28] @ 4-byte Reload - adcs r12, r1, r7 - ldr r1, [sp, #140] @ 4-byte Reload - str r12, [sp, #92] @ 4-byte Spill - adcs r9, r1, r2 - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [sp, #32] @ 4-byte Reload - str r9, [sp, #96] @ 4-byte Spill - adcs r8, r1, r2 - ldr r1, [sp, #148] @ 4-byte Reload - ldr r2, [sp, #36] @ 4-byte Reload - str r8, [sp, #100] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #168] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r4, [sp, #136] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #168] @ 4-byte Spill - ldr r1, [sp, #152] @ 4-byte Reload - adcs r10, r1, r2 - ldr r1, [sp, #172] @ 4-byte Reload - ldr r2, [sp, #48] @ 4-byte Reload - str r10, [sp, #88] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #172] @ 4-byte Spill - ldr r1, [sp, #176] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str r1, [sp, #176] @ 4-byte Spill - ldr r1, [sp, #164] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r1, [sp, #164] @ 4-byte Spill - ldr r1, [sp, #160] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r1, [sp, #160] @ 4-byte Spill - ldr r1, [sp, #156] @ 4-byte Reload - adcs r11, r1, r2 - ldr r1, [sp, #180] @ 4-byte Reload - ldr r2, [sp, #116] @ 4-byte Reload - str r11, [sp, #140] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r1, [sp, #180] @ 4-byte Spill - ldr r1, [sp, #184] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #124] @ 4-byte Reload - str r1, [sp, #184] @ 4-byte Spill - ldr r1, [sp, #188] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #128] @ 4-byte Reload - str r1, [sp, #188] @ 4-byte Spill - ldr r1, [sp, #192] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #132] @ 4-byte Reload - str r1, [sp, #192] @ 4-byte Spill - ldr r1, [sp, #196] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #196] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #128] @ 4-byte Spill - ldm r3, {r2, r7} - ldr r1, [r3, #36] - ldr r6, [r3, #8] - ldr r5, [r3, #12] - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [r3, #40] - subs r12, r12, r2 - ldr r2, [sp, #168] @ 4-byte Reload - sbcs lr, r9, r7 - sbcs r7, r8, r6 - ldr r8, [r3, #32] - ldr r6, [r3, #24] - sbcs r9, r4, r5 - ldr r5, [r3, #28] - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [r3, #20] - ldr r3, [r3, #16] - sbcs r2, r2, r3 - sbcs r3, r10, r1 - ldr r1, [sp, #172] @ 4-byte Reload - sbcs r4, r1, r6 - ldr r1, [sp, #176] @ 4-byte Reload - ldr r6, [sp, #120] @ 4-byte Reload - sbcs r5, r1, r5 - ldr r1, [sp, #164] @ 4-byte Reload - sbcs r8, r1, r8 - ldr r1, [sp, #160] @ 4-byte Reload - sbcs r10, r1, r6 - ldr r1, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #132] @ 4-byte Reload - sbcs r11, r11, r1 - ldr r1, [sp, #180] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #144] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #184] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #148] @ 4-byte Reload - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [sp, #188] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #152] @ 4-byte Reload - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [sp, #192] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #156] @ 4-byte Reload - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [sp, #196] @ 4-byte Reload - sbcs r1, r1, r6 - ldr r6, [sp, #92] @ 4-byte Reload - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - sbc r1, r1, #0 - ands r1, r1, #1 - movne r12, r6 - ldr r6, [sp, #96] @ 4-byte Reload - str r12, [r0, #64] - movne lr, r6 - ldr r6, [sp, #100] @ 4-byte Reload - str lr, [r0, #68] - movne r7, r6 - cmp r1, #0 - str r7, [r0, #72] - ldr r7, [sp, #136] @ 4-byte Reload - movne r9, r7 - ldr r7, [sp, #168] @ 4-byte Reload - str r9, [r0, #76] - movne r2, r7 - str r2, [r0, #80] - ldr r2, [sp, #88] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #172] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #84] - ldr r3, [sp, #132] @ 4-byte Reload - movne r4, r2 - ldr r2, [sp, #176] @ 4-byte Reload - str r4, [r0, #88] - movne r5, r2 - ldr r2, [sp, #164] @ 4-byte Reload - str r5, [r0, #92] - movne r8, r2 - ldr r2, [sp, #160] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #96] - movne r10, r2 - ldr r2, [sp, #140] @ 4-byte Reload - str r10, [r0, #100] - movne r11, r2 - ldr r2, [sp, #180] @ 4-byte Reload - str r11, [r0, #104] - movne r3, r2 - ldr r2, [sp, #184] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #108] - ldr r3, [sp, #144] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #188] @ 4-byte Reload - str r3, [r0, #112] - ldr r3, [sp, #148] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #192] @ 4-byte Reload - str r3, [r0, #116] - ldr r3, [sp, #152] @ 4-byte Reload - movne r3, r2 - cmp r1, #0 - ldr r1, [sp, #196] @ 4-byte Reload - ldr r2, [sp, #156] @ 4-byte Reload - str r3, [r0, #120] - movne r2, r1 - str r2, [r0, #124] - add sp, sp, #200 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end252: - .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub16L - .align 2 - .type mcl_fpDbl_sub16L,%function -mcl_fpDbl_sub16L: @ @mcl_fpDbl_sub16L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #200 - sub sp, sp, #200 - ldr r7, [r2, #96] - ldr r9, [r2] - add r10, r1, #32 - str r7, [sp, #168] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [r2, #104] - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [r2, #108] - str r7, [sp, #180] @ 4-byte Spill - ldr r7, [r2, #112] - str r7, [sp, #184] @ 4-byte Spill - ldr r7, [r2, #116] - str r7, [sp, #188] @ 4-byte Spill - ldr r7, [r2, #120] - str r7, [sp, #192] @ 4-byte Spill - ldr r7, [r2, #124] - str r7, [sp, #196] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #164] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #128] @ 4-byte Spill - ldmib r2, {r8, r12, lr} - ldm r1, {r4, r5, r6, r7} - subs r4, r4, r9 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #52] - str r4, [sp, #92] @ 4-byte Spill - sbcs r4, r5, r8 - sbcs r6, r6, r12 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #48] - sbcs r7, r7, lr - str r6, [sp, #24] @ 4-byte Spill - ldr r6, [r2, #44] - add lr, r1, #16 - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #40] - str r4, [sp, #88] @ 4-byte Spill - str r6, [sp, #84] @ 4-byte Spill - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #96] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #96] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #120] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #124] - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #72] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #76] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #32] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #24] @ 4-byte Reload - add r11, r3, #12 - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #20] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - sbcs r1, r12, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - str r2, [r0, #28] - sbcs r1, r4, r1 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - ldr r5, [sp, #72] @ 4-byte Reload - str r2, [r0, #36] - sbcs r1, r6, r1 - ldr r2, [sp, #84] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r2, r8, r2 - str r2, [r0, #44] - sbcs r1, r9, r1 - ldr r2, [sp, #92] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #128] @ 4-byte Reload - sbcs r2, r10, r2 - sbcs r1, r7, r1 - str r2, [r0, #52] - ldr r2, [sp, #132] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #136] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #44] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #52] @ 4-byte Reload - sbcs r9, r7, r1 - ldr r1, [sp, #140] @ 4-byte Reload - ldr r7, [sp, #96] @ 4-byte Reload - str r9, [sp, #80] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #144] @ 4-byte Reload - sbcs r12, r2, r1 - ldr r1, [sp, #148] @ 4-byte Reload - ldr r2, [sp, #60] @ 4-byte Reload - str r12, [sp, #84] @ 4-byte Spill - sbcs lr, r2, r1 - ldr r1, [sp, #152] @ 4-byte Reload - ldr r2, [sp, #64] @ 4-byte Reload - str lr, [sp, #88] @ 4-byte Spill - sbcs r4, r2, r1 - ldr r1, [sp, #156] @ 4-byte Reload - ldr r2, [sp, #68] @ 4-byte Reload - str r4, [sp, #92] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #160] @ 4-byte Reload - str r1, [sp, #156] @ 4-byte Spill - mov r1, #0 - sbcs r2, r5, r2 - ldr r5, [sp, #76] @ 4-byte Reload - str r2, [sp, #160] @ 4-byte Spill - ldr r2, [sp, #164] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [sp, #164] @ 4-byte Spill - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #100] @ 4-byte Reload - str r2, [sp, #168] @ 4-byte Spill - ldr r2, [sp, #172] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #104] @ 4-byte Reload - str r2, [sp, #172] @ 4-byte Spill - ldr r2, [sp, #176] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #108] @ 4-byte Reload - str r2, [sp, #176] @ 4-byte Spill - ldr r2, [sp, #180] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #112] @ 4-byte Reload - str r2, [sp, #180] @ 4-byte Spill - ldr r2, [sp, #184] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #116] @ 4-byte Reload - str r2, [sp, #184] @ 4-byte Spill - ldr r2, [sp, #188] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #120] @ 4-byte Reload - str r2, [sp, #188] @ 4-byte Spill - ldr r2, [sp, #192] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #124] @ 4-byte Reload - str r2, [sp, #192] @ 4-byte Spill - ldr r2, [sp, #196] @ 4-byte Reload - sbcs r2, r7, r2 - sbc r1, r1, #0 - str r2, [sp, #196] @ 4-byte Spill - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #124] @ 4-byte Spill - ldm r3, {r2, r6, r7} - ldm r11, {r5, r8, r11} - ldr r1, [r3, #28] - ldr r10, [r3, #24] - str r1, [sp, #112] @ 4-byte Spill - adds r1, r9, r2 - ldr r9, [sp, #132] @ 4-byte Reload - adcs r2, r9, r6 - ldr r6, [sp, #164] @ 4-byte Reload - adcs r3, r12, r7 - ldr r7, [sp, #112] @ 4-byte Reload - adcs r12, lr, r5 - ldr r5, [sp, #160] @ 4-byte Reload - adcs lr, r4, r8 - ldr r4, [sp, #156] @ 4-byte Reload - adcs r4, r4, r11 - adcs r5, r5, r10 - adcs r8, r6, r7 - ldr r7, [sp, #168] @ 4-byte Reload - ldr r6, [sp, #116] @ 4-byte Reload - adcs r11, r7, r6 - ldr r7, [sp, #172] @ 4-byte Reload - ldr r6, [sp, #120] @ 4-byte Reload - adcs r6, r7, r6 - ldr r7, [sp, #176] @ 4-byte Reload - str r6, [sp, #120] @ 4-byte Spill - ldr r6, [sp, #136] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #140] @ 4-byte Reload - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [sp, #180] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #144] @ 4-byte Reload - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [sp, #184] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #148] @ 4-byte Reload - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [sp, #188] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #152] @ 4-byte Reload - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [sp, #192] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #124] @ 4-byte Reload - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [sp, #196] @ 4-byte Reload - adc r7, r7, r6 - ldr r6, [sp, #80] @ 4-byte Reload - str r7, [sp, #124] @ 4-byte Spill - ldr r7, [sp, #128] @ 4-byte Reload - ands r10, r7, #1 - moveq r1, r6 - moveq r2, r9 - str r1, [r0, #64] - ldr r1, [sp, #84] @ 4-byte Reload - str r2, [r0, #68] - ldr r2, [sp, #120] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #88] @ 4-byte Reload - cmp r10, #0 - str r3, [r0, #72] - moveq r12, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r12, [r0, #76] - moveq lr, r1 - ldr r1, [sp, #156] @ 4-byte Reload - str lr, [r0, #80] - moveq r4, r1 - ldr r1, [sp, #160] @ 4-byte Reload - cmp r10, #0 - str r4, [r0, #84] - moveq r5, r1 - ldr r1, [sp, #164] @ 4-byte Reload - str r5, [r0, #88] - moveq r8, r1 - ldr r1, [sp, #168] @ 4-byte Reload - str r8, [r0, #92] - moveq r11, r1 - ldr r1, [sp, #172] @ 4-byte Reload - cmp r10, #0 - str r11, [r0, #96] - moveq r2, r1 - ldr r1, [sp, #176] @ 4-byte Reload - str r2, [r0, #100] - ldr r2, [sp, #136] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #180] @ 4-byte Reload - str r2, [r0, #104] - ldr r2, [sp, #140] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #184] @ 4-byte Reload - cmp r10, #0 - str r2, [r0, #108] - ldr r2, [sp, #144] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #188] @ 4-byte Reload - str r2, [r0, #112] - ldr r2, [sp, #148] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #192] @ 4-byte Reload - str r2, [r0, #116] - ldr r2, [sp, #152] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #196] @ 4-byte Reload - cmp r10, #0 - str r2, [r0, #120] - ldr r2, [sp, #124] @ 4-byte Reload - moveq r2, r1 - str r2, [r0, #124] - add sp, sp, #200 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end253: - .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L - .cantunwind - .fnend - - .align 2 - .type .LmulPv544x32,%function -.LmulPv544x32: @ @mulPv544x32 - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r11, lr} - push {r4, r5, r6, r7, r8, r9, r11, lr} - ldr r12, [r1] - ldmib r1, {r3, lr} - ldr r9, [r1, #12] - umull r4, r8, lr, r2 - umull lr, r6, r12, r2 - mov r5, r4 - mov r7, r6 - str lr, [r0] - umull lr, r12, r9, r2 - umlal r7, r5, r3, r2 - str r5, [r0, #8] - str r7, [r0, #4] - umull r5, r7, r3, r2 - adds r3, r6, r5 - adcs r3, r7, r4 - adcs r3, r8, lr - str r3, [r0, #12] - ldr r3, [r1, #16] - umull r7, r6, r3, r2 - adcs r3, r12, r7 - str r3, [r0, #16] - ldr r3, [r1, #20] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #20] - ldr r3, [r1, #24] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #24] - ldr r3, [r1, #28] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #28] - ldr r3, [r1, #32] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #32] - ldr r3, [r1, #36] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #36] - ldr r3, [r1, #40] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #40] - ldr r3, [r1, #44] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #44] - ldr r3, [r1, #48] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #48] - ldr r3, [r1, #52] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #52] - ldr r3, [r1, #56] - umull r7, r6, r3, r2 - adcs r3, r5, r7 - str r3, [r0, #56] - ldr r3, [r1, #60] - umull r7, r5, r3, r2 - adcs r3, r6, r7 - str r3, [r0, #60] - ldr r1, [r1, #64] - umull r3, r7, r1, r2 - adcs r1, r5, r3 - adc r2, r7, #0 - str r1, [r0, #64] - str r2, [r0, #68] - pop {r4, r5, r6, r7, r8, r9, r11, lr} - mov pc, lr -.Lfunc_end254: - .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 - .cantunwind - .fnend - - .globl mcl_fp_mulUnitPre17L - .align 2 - .type mcl_fp_mulUnitPre17L,%function -mcl_fp_mulUnitPre17L: @ @mcl_fp_mulUnitPre17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #100 - sub sp, sp, #100 - mov r4, r0 - add r0, sp, #24 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #92] - add r11, sp, #48 - add lr, sp, #24 - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #88] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #84] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #80] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #76] - str r0, [sp, #4] @ 4-byte Spill - ldm r11, {r5, r6, r7, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - stm r4, {r0, r1, r2, r3, r12, lr} - add r0, r4, #24 - stm r0, {r5, r6, r7, r8, r9, r10, r11} - ldr r0, [sp, #4] @ 4-byte Reload - str r0, [r4, #52] - ldr r0, [sp, #8] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #12] @ 4-byte Reload - str r0, [r4, #60] - ldr r0, [sp, #16] @ 4-byte Reload - str r0, [r4, #64] - ldr r0, [sp, #20] @ 4-byte Reload - str r0, [r4, #68] - add sp, sp, #100 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end255: - .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L - .cantunwind - .fnend - - .globl mcl_fpDbl_mulPre17L - .align 2 - .type mcl_fpDbl_mulPre17L,%function -mcl_fpDbl_mulPre17L: @ @mcl_fpDbl_mulPre17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #340 - sub sp, sp, #340 - .pad #1024 - sub sp, sp, #1024 - mov r9, r2 - add r6, sp, #1024 - mov r4, r0 - str r1, [sp, #128] @ 4-byte Spill - mov r5, r1 - ldr r2, [r9] - add r0, r6, #264 - str r9, [sp, #124] @ 4-byte Spill - str r4, [sp, #132] @ 4-byte Spill - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1356] - ldr r1, [sp, #1292] - ldr r2, [r9, #4] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #1352] - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #1296] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #1348] - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #1300] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #1344] - str r1, [sp, #44] @ 4-byte Spill - mov r1, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1316] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1308] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1304] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #1288] - str r0, [r4] - add r0, sp, #1216 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1284] - add lr, sp, #1216 - ldr r10, [sp, #1256] - ldr r8, [sp, #1252] - ldr r7, [sp, #1248] - ldr r6, [sp, #1244] - ldr r5, [sp, #1240] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #56] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r9, #8] - add r9, sp, #1024 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #128] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, r9, #120 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1212] - ldr r9, [sp, #56] @ 4-byte Reload - ldr r8, [sp, #1184] - ldr r7, [sp, #1180] - ldr r11, [sp, #1176] - ldr r5, [sp, #1172] - ldr lr, [sp, #1168] - ldr r10, [sp, #1164] - ldr r12, [sp, #1160] - ldr r1, [sp, #1148] - ldr r2, [sp, #1152] - ldr r3, [sp, #1156] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1144] - adds r0, r0, r9 - str r0, [r4, #8] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #124] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - ldr r2, [r5, #12] - adcs r0, r11, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #1072 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1140] - add lr, sp, #1072 - ldr r10, [sp, #1112] - ldr r9, [sp, #1108] - ldr r8, [sp, #1104] - ldr r7, [sp, #1100] - ldr r6, [sp, #1096] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1132] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1124] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1120] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #28] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #56] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #12] - ldr r0, [sp, #80] @ 4-byte Reload - ldr r4, [sp, #128] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #16] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #1000 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1068] - add r11, sp, #1024 - add lr, sp, #1000 - ldr r6, [sp, #1040] - ldr r5, [sp, #1036] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #56] @ 4-byte Reload - ldr r8, [sp, #132] @ 4-byte Reload - adds r0, r0, r7 - str r0, [r8, #16] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #124] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - ldr r2, [r5, #20] - adcs r0, r6, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #928 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #996] - add r11, sp, #952 - add lr, sp, #928 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #988] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #976] - str r0, [sp, #32] @ 4-byte Spill - ldm r11, {r6, r7, r8, r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r4, [sp, #56] @ 4-byte Reload - adds r0, r0, r4 - ldr r4, [sp, #132] @ 4-byte Reload - str r0, [r4, #20] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r5, #24] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #128] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #856 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #924] - add r11, sp, #880 - add lr, sp, #856 - ldr r7, [sp, #896] - ldr r5, [sp, #892] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #920] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #916] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #912] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #908] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #56] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r4, #24] - ldr r0, [sp, #80] @ 4-byte Reload - ldr r4, [sp, #124] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #28] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r5, r0 - mov r5, r6 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #784 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #852] - add r10, sp, #808 - add lr, sp, #784 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #848] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #844] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #840] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #836] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #832] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #828] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r6, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r7, [sp, #56] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adds r0, r0, r7 - str r0, [r11, #28] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #32] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #712 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #780] - add r8, sp, #748 - add r11, sp, #736 - add lr, sp, #712 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #776] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #772] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #768] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #764] - str r0, [sp, #36] @ 4-byte Spill - ldm r8, {r4, r6, r7, r8} - ldm r11, {r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r5, [sp, #56] @ 4-byte Reload - adds r0, r0, r5 - ldr r5, [sp, #132] @ 4-byte Reload - str r0, [r5, #32] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #128] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #124] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - ldr r2, [r6, #36] - adcs r0, r7, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #640 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #708] - add r10, sp, #664 - add lr, sp, #640 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #56] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r5, #36] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r6, #40] - mov r6, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #568 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #636] - add r11, sp, #592 - add lr, sp, #568 - ldr r7, [sp, #608] - ldr r4, [sp, #604] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #632] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #628] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #624] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #620] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #616] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #612] - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r8, [sp, #56] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r5, #40] - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #124] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - ldr r2, [r4, #44] - adcs r0, r7, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #496 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #564] - add r10, sp, #520 - add lr, sp, #496 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #548] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #48] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r5, #44] - ldr r0, [sp, #100] @ 4-byte Reload - ldr r5, [sp, #128] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r4, #48] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r8, r0 - mov r8, r4 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #424 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #492] - add lr, sp, #428 - ldr r9, [sp, #460] - ldr r7, [sp, #456] - ldr r11, [sp, #452] - ldr r10, [sp, #448] - ldr r3, [sp, #424] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #488] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #484] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #464] - str r0, [sp, #20] @ 4-byte Spill - ldm lr, {r0, r1, r2, r12, lr} - ldr r6, [sp, #48] @ 4-byte Reload - ldr r4, [sp, #120] @ 4-byte Reload - adds r3, r3, r6 - ldr r6, [sp, #132] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - str r3, [r6, #48] - ldr r3, [r8, #52] - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r3 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r5 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #352 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #420] - add r11, sp, #380 - add r12, sp, #356 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #400] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #396] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r4, r9, r10, r11} - ldr r5, [sp, #376] - ldr lr, [sp, #352] - ldm r12, {r0, r1, r2, r3, r12} - ldr r7, [sp, #52] @ 4-byte Reload - adds r7, lr, r7 - ldr lr, [r8, #56] - str r7, [r6, #52] - ldr r6, [sp, #120] @ 4-byte Reload - add r7, sp, #280 - adcs r0, r0, r6 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #128] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - mov r0, r7 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #348] - add r8, sp, #316 - add r11, sp, #304 - add lr, sp, #280 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #344] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #340] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #336] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #332] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #328] - str r0, [sp, #28] @ 4-byte Spill - ldm r8, {r6, r7, r8} - ldm r11, {r9, r10, r11} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r5, [sp, #52] @ 4-byte Reload - adds r0, r0, r5 - ldr r5, [sp, #132] @ 4-byte Reload - str r0, [r5, #56] - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #124] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r8, #60] - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #208 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #276] - add lr, sp, #228 - add r12, sp, #212 - ldr r6, [sp, #248] - ldr r9, [sp, #244] - ldr r4, [sp, #240] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r10, r11, lr} - ldr r3, [sp, #208] - ldm r12, {r0, r1, r2, r12} - ldr r7, [sp, #88] @ 4-byte Reload - adds r3, r3, r7 - str r3, [r5, #60] - ldr r5, [sp, #120] @ 4-byte Reload - ldr r3, [r8, #64] - adcs r8, r0, r5 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r5, r1, r0 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r3 - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - add r0, sp, #136 - bl .LmulPv544x32(PLT) - add r3, sp, #136 - add r11, sp, #172 - add lr, sp, #152 - ldm r3, {r0, r1, r2, r3} - adds r7, r0, r8 - ldr r0, [sp, #12] @ 4-byte Reload - adcs r6, r1, r5 - adcs r5, r2, r0 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r4, r3, r0 - ldr r0, [sp, #204] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #200] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #196] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #72] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldm lr, {r0, r2, r3, r12, lr} - ldr r1, [sp, #132] @ 4-byte Reload - str r7, [r1, #64] - str r6, [r1, #68] - str r5, [r1, #72] - ldr r5, [sp, #44] @ 4-byte Reload - str r4, [r1, #76] - ldr r4, [sp, #48] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [r1, #80] - ldr r0, [sp, #52] @ 4-byte Reload - adcs r2, r2, r4 - str r2, [r1, #84] - ldr r2, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [r1, #88] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r2, r12, r2 - str r2, [r1, #92] - ldr r2, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [r1, #96] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r2, r8, r2 - str r2, [r1, #100] - ldr r2, [sp, #104] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [r1, #104] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r2, r10, r2 - str r2, [r1, #108] - ldr r2, [sp, #72] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [r1, #112] - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #76] @ 4-byte Reload - str r0, [r1, #116] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #80] @ 4-byte Reload - str r0, [r1, #120] - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #84] @ 4-byte Reload - str r0, [r1, #124] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #128] @ 4-byte Reload - str r0, [r1, #128] - adc r2, r2, #0 - str r2, [r1, #132] - add sp, sp, #340 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end256: - .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L - .cantunwind - .fnend - - .globl mcl_fpDbl_sqrPre17L - .align 2 - .type mcl_fpDbl_sqrPre17L,%function -mcl_fpDbl_sqrPre17L: @ @mcl_fpDbl_sqrPre17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #332 - sub sp, sp, #332 - .pad #1024 - sub sp, sp, #1024 - mov r7, r1 - mov r4, r0 - add r0, sp, #1280 - ldr r2, [r7] - str r7, [sp, #120] @ 4-byte Spill - str r4, [sp, #124] @ 4-byte Spill - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1348] - ldr r1, [sp, #1284] - ldr r2, [r7, #4] - add r11, sp, #1024 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #1344] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #1288] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #1340] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #1292] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #1336] - str r1, [sp, #40] @ 4-byte Spill - mov r1, r7 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #1324] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #1320] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #1316] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #1308] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1304] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #1300] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #1296] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [r4] - add r0, r11, #184 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1276] - add r10, sp, #1232 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1272] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1268] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1264] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1260] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1256] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1252] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r5, r6, r8, r9, r10} - ldr r0, [sp, #1208] - ldr r11, [sp, #52] @ 4-byte Reload - ldr lr, [sp, #1228] - ldr r12, [sp, #1224] - ldr r1, [sp, #1212] - ldr r2, [sp, #1216] - ldr r3, [sp, #1220] - adds r0, r0, r11 - str r0, [r4, #4] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #8] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #1136 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1204] - add r12, sp, #1136 - ldr r6, [sp, #1176] - ldr r4, [sp, #1172] - ldr lr, [sp, #1168] - ldr r11, [sp, #1164] - ldr r10, [sp, #1160] - ldr r9, [sp, #1156] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1180] - str r0, [sp, #24] @ 4-byte Spill - ldm r12, {r0, r1, r2, r3, r12} - ldr r5, [sp, #52] @ 4-byte Reload - ldr r8, [sp, #124] @ 4-byte Reload - adds r0, r0, r5 - str r0, [r8, #8] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #12] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r4, r0 - add r4, sp, #1024 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, r4, #40 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1132] - add r11, sp, #1088 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1128] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1124] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1120] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1116] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1112] - str r0, [sp, #28] @ 4-byte Spill - ldm r11, {r5, r6, r8, r9, r10, r11} - ldr r0, [sp, #1064] - ldr r4, [sp, #52] @ 4-byte Reload - ldr lr, [sp, #1084] - ldr r12, [sp, #1080] - ldr r1, [sp, #1068] - ldr r2, [sp, #1072] - ldr r3, [sp, #1076] - adds r0, r0, r4 - ldr r4, [sp, #124] @ 4-byte Reload - str r0, [r4, #12] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #16] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #992 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1060] - add lr, sp, #1012 - add r12, sp, #992 - ldr r6, [sp, #1032] - ldr r5, [sp, #1028] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1036] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r9, r10, r11, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r8, [sp, #52] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r4, #16] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #20] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #920 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #988] - add r10, sp, #944 - add lr, sp, #920 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #984] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #980] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #976] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #972] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #968] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #964] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r5, r6, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #52] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #20] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #24] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #848 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #916] - add lr, sp, #868 - add r12, sp, #848 - ldr r6, [sp, #888] - ldr r5, [sp, #884] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #912] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #908] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #904] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #900] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #896] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #892] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r9, r10, r11, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r8, [sp, #52] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r4, #24] - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #28] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #776 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #844] - add r10, sp, #800 - add lr, sp, #776 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #840] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #836] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #832] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #828] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #824] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #820] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r5, r6, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #52] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #28] - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #32] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #704 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #772] - add lr, sp, #724 - add r12, sp, #704 - ldr r6, [sp, #744] - ldr r5, [sp, #740] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #768] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #764] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #760] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #756] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #752] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #748] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r9, r10, r11, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r8, [sp, #52] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r4, #32] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #36] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #632 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #700] - add r10, sp, #656 - add lr, sp, #632 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #692] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #688] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #684] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #680] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #676] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r5, r6, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #52] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #36] - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [r7, #40] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #560 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #628] - add r7, sp, #596 - add lr, sp, #580 - add r12, sp, #560 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #624] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #620] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #616] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #612] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #608] - str r0, [sp, #24] @ 4-byte Spill - ldm r7, {r5, r6, r7} - ldm lr, {r9, r10, r11, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r8, [sp, #52] @ 4-byte Reload - adds r0, r0, r8 - str r0, [r4, #40] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - ldr r2, [r1, #44] - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #488 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #556] - add r10, sp, #512 - add lr, sp, #488 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #548] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #544] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #540] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #536] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #44] @ 4-byte Reload - adds r0, r0, r11 - str r0, [r4, #44] - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #120] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r9, #48] - adcs r0, r10, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #416 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #484] - add r10, sp, #444 - add lr, sp, #420 - mov r8, r4 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #480] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #476] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #472] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #468] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #464] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #460] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #456] - str r0, [sp, #20] @ 4-byte Spill - ldm r10, {r5, r6, r10} - ldr r11, [sp, #440] - ldr r12, [sp, #416] - ldm lr, {r0, r1, r2, r3, lr} - ldr r7, [sp, #44] @ 4-byte Reload - adds r7, r12, r7 - str r7, [r4, #48] - ldr r7, [sp, #116] @ 4-byte Reload - mov r4, r9 - add r9, sp, #344 - ldr r12, [r4, #52] - adcs r7, r0, r7 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r12 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r6, r0 - mov r6, r4 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - mov r0, r9 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #412] - add r11, sp, #368 - add r12, sp, #348 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #404] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #400] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #396] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #392] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #388] - str r0, [sp, #24] @ 4-byte Spill - ldm r11, {r4, r5, r9, r10, r11} - ldr lr, [sp, #344] - ldm r12, {r0, r1, r2, r3, r12} - adds r7, lr, r7 - str r7, [r8, #52] - mov r7, r6 - ldr r6, [sp, #116] @ 4-byte Reload - add r8, sp, #272 - ldr lr, [r7, #56] - adcs r0, r0, r6 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r7 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - mov r0, r8 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #340] - add r8, sp, #308 - add lr, sp, #292 - add r12, sp, #272 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #336] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #332] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #328] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #324] - str r0, [sp, #28] @ 4-byte Spill - ldm r8, {r5, r6, r7, r8} - ldm lr, {r9, r10, r11, lr} - ldm r12, {r0, r1, r2, r3, r12} - ldr r4, [sp, #48] @ 4-byte Reload - adds r0, r0, r4 - ldr r4, [sp, #124] @ 4-byte Reload - str r0, [r4, #56] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r2, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r12, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #120] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - ldr r2, [r6, #60] - adcs r0, r7, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #200 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #268] - add r9, sp, #232 - add lr, sp, #204 - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #260] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #256] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #252] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #248] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #244] - str r0, [sp, #12] @ 4-byte Spill - ldm r9, {r5, r8, r9} - ldr r10, [sp, #228] - ldr r12, [sp, #200] - ldm lr, {r0, r1, r2, r3, r11, lr} - ldr r7, [sp, #80] @ 4-byte Reload - adds r7, r12, r7 - ldr r12, [r6, #64] - str r7, [r4, #60] - ldr r4, [sp, #116] @ 4-byte Reload - adcs r7, r0, r4 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r4, r1, r0 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adcs r0, r2, r0 - mov r2, r12 - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r10, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r8, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r1, r0 - mov r1, r6 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #116] @ 4-byte Spill - add r0, sp, #128 - bl .LmulPv544x32(PLT) - add r3, sp, #128 - add r11, sp, #164 - add lr, sp, #144 - ldm r3, {r0, r1, r2, r3} - adds r7, r0, r7 - ldr r0, [sp, #8] @ 4-byte Reload - adcs r6, r1, r4 - adcs r5, r2, r0 - ldr r0, [sp, #4] @ 4-byte Reload - adcs r4, r3, r0 - ldr r0, [sp, #196] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #192] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #188] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #184] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #180] - str r0, [sp, #64] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldm lr, {r0, r2, r3, r12, lr} - ldr r1, [sp, #124] @ 4-byte Reload - str r7, [r1, #64] - str r6, [r1, #68] - str r5, [r1, #72] - ldr r5, [sp, #40] @ 4-byte Reload - str r4, [r1, #76] - ldr r4, [sp, #44] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [r1, #80] - ldr r0, [sp, #48] @ 4-byte Reload - adcs r2, r2, r4 - str r2, [r1, #84] - ldr r2, [sp, #80] @ 4-byte Reload - adcs r0, r3, r0 - str r0, [r1, #88] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r2, r12, r2 - str r2, [r1, #92] - ldr r2, [sp, #88] @ 4-byte Reload - adcs r0, lr, r0 - str r0, [r1, #96] - ldr r0, [sp, #92] @ 4-byte Reload - adcs r2, r8, r2 - str r2, [r1, #100] - ldr r2, [sp, #96] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [r1, #104] - ldr r0, [sp, #100] @ 4-byte Reload - adcs r2, r10, r2 - str r2, [r1, #108] - ldr r2, [sp, #64] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [r1, #112] - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #68] @ 4-byte Reload - str r0, [r1, #116] - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #72] @ 4-byte Reload - str r0, [r1, #120] - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #76] @ 4-byte Reload - str r0, [r1, #124] - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r2, r0 - ldr r2, [sp, #120] @ 4-byte Reload - str r0, [r1, #128] - adc r2, r2, #0 - str r2, [r1, #132] - add sp, sp, #332 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end257: - .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L - .cantunwind - .fnend - - .globl mcl_fp_mont17L - .align 2 - .type mcl_fp_mont17L,%function -mcl_fp_mont17L: @ @mcl_fp_mont17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #556 - sub sp, sp, #556 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #140 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #96] @ 4-byte Spill - add r0, sp, #2528 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #136] @ 4-byte Spill - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2528] - ldr r1, [sp, #2532] - mul r2, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #2596] - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #2536] - add r5, sp, #2048 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #2592] - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #2540] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #2588] - str r1, [sp, #92] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #2584] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #2580] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #2576] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #2572] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2568] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #2564] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #2560] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #2556] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #2552] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2548] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2544] - str r0, [sp, #48] @ 4-byte Spill - add r0, r5, #408 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2524] - ldr r1, [sp, #140] @ 4-byte Reload - ldr r4, [sp, #2484] - ldr r10, [sp, #2480] - ldr r6, [sp, #2476] - ldr r7, [sp, #2472] - ldr r11, [sp, #2456] - ldr r9, [sp, #2460] - ldr r5, [sp, #2464] - ldr r8, [sp, #2468] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #2520] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2516] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2512] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2508] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2504] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2500] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2496] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2492] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2488] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - ldr r2, [r0, #4] - add r0, sp, #2384 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #80] @ 4-byte Reload - ldr r1, [sp, #76] @ 4-byte Reload - ldr r2, [sp, #20] @ 4-byte Reload - ldr r3, [sp, #2400] - ldr r12, [sp, #2404] - ldr lr, [sp, #2408] - adds r0, r11, r0 - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r9, r0 - ldr r9, [sp, #2424] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - ldr r11, [sp, #104] @ 4-byte Reload - adcs r0, r5, r0 - ldr r5, [sp, #2416] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r8, r0 - ldr r8, [sp, #2384] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - ldr r7, [sp, #100] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r6, r0 - ldr r6, [sp, #2420] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r10, r0 - ldr r10, [sp, #2428] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r4, r0 - ldr r4, [sp, #2412] - adcs r1, r2, r1 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - mov r0, #0 - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #124] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #68] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [sp, #132] @ 4-byte Reload - adcs r1, r2, r1 - ldr r2, [sp, #2396] - adc r0, r0, #0 - adds r8, r11, r8 - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #2392] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2452] - str r8, [sp, #24] @ 4-byte Spill - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2448] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2444] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2440] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #2436] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #2432] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #2388] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #2048 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #264 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2380] - add r10, sp, #2320 - ldr r7, [sp, #2340] - ldr r6, [sp, #2336] - ldr r4, [sp, #2312] - ldr r11, [sp, #2316] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2376] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2372] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2368] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2364] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2360] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2356] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2352] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2348] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2344] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, sp, #2240 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #2252] - ldr r3, [sp, #2256] - ldr r12, [sp, #2260] - ldr lr, [sp, #2264] - adds r0, r0, r4 - ldr r4, [sp, #2268] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #2272] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #2240] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #2280] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #2284] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #2276] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #2248] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2308] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2304] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2300] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2296] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2292] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2288] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2244] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #2048 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #120 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2236] - add r10, sp, #2176 - ldr r7, [sp, #2196] - ldr r6, [sp, #2192] - ldr r4, [sp, #2168] - ldr r11, [sp, #2172] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2232] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2228] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2224] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2220] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2216] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2212] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2208] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2204] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2200] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, sp, #2096 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #2108] - ldr r3, [sp, #2112] - ldr r12, [sp, #2116] - ldr lr, [sp, #2120] - adds r0, r0, r4 - ldr r4, [sp, #2124] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #2128] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #2096] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #2136] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #2140] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #2132] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #2104] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2164] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2160] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2156] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2152] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2148] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2144] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2100] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #1000 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2092] - add r10, sp, #2032 - ldr r7, [sp, #2052] - ldr r6, [sp, #2048] - ldr r4, [sp, #2024] - ldr r11, [sp, #2028] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2088] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2084] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2080] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2076] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2072] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2068] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2064] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2060] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2056] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, sp, #1952 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1964] - ldr r3, [sp, #1968] - ldr r12, [sp, #1972] - ldr lr, [sp, #1976] - adds r0, r0, r4 - ldr r4, [sp, #1980] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1984] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1952] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1992] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1996] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1988] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1960] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2020] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2016] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2012] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2008] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2004] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2000] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1956] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #856 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1948] - add r10, sp, #1888 - ldr r7, [sp, #1908] - ldr r6, [sp, #1904] - ldr r4, [sp, #1880] - ldr r11, [sp, #1884] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1944] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1940] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1936] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1932] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1928] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1924] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1920] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1916] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, sp, #1808 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1820] - ldr r3, [sp, #1824] - ldr r12, [sp, #1828] - ldr lr, [sp, #1832] - adds r0, r0, r4 - ldr r4, [sp, #1836] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1840] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1808] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1848] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1852] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1844] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1816] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1876] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1872] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1868] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1864] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1860] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1856] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1812] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #712 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1804] - add r10, sp, #1744 - ldr r7, [sp, #1764] - ldr r6, [sp, #1760] - ldr r4, [sp, #1736] - ldr r11, [sp, #1740] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1800] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1796] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1792] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1788] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1784] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1780] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1768] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, sp, #1664 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1676] - ldr r3, [sp, #1680] - ldr r12, [sp, #1684] - ldr lr, [sp, #1688] - adds r0, r0, r4 - ldr r4, [sp, #1692] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1696] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1664] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1704] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1708] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1700] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1672] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1732] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1728] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1724] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1720] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1716] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1712] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1668] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #568 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1660] - add r10, sp, #1600 - ldr r7, [sp, #1620] - ldr r6, [sp, #1616] - ldr r4, [sp, #1592] - ldr r11, [sp, #1596] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1656] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1652] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1648] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1644] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1640] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1636] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1632] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1628] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1624] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, sp, #1520 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1532] - ldr r3, [sp, #1536] - ldr r12, [sp, #1540] - ldr lr, [sp, #1544] - adds r0, r0, r4 - ldr r4, [sp, #1548] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1552] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1520] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1560] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1564] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1556] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1528] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1588] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1584] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1524] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #424 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1516] - add r10, sp, #1456 - ldr r7, [sp, #1476] - ldr r6, [sp, #1472] - ldr r4, [sp, #1448] - ldr r11, [sp, #1452] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1512] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1508] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1504] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1500] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1496] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1480] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, sp, #1376 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1388] - ldr r3, [sp, #1392] - ldr r12, [sp, #1396] - ldr lr, [sp, #1400] - adds r0, r0, r4 - ldr r4, [sp, #1404] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1408] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1376] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1416] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1420] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1412] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1384] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1444] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1440] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1424] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1380] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r8, r0 - add r0, r4, #280 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1372] - add r10, sp, #1312 - ldr r7, [sp, #1332] - ldr r6, [sp, #1328] - ldr r4, [sp, #1304] - ldr r11, [sp, #1308] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1368] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1364] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1360] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1356] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1352] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1348] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1344] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r5, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, sp, #1232 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - ldr r2, [sp, #1244] - ldr r3, [sp, #1248] - ldr r12, [sp, #1252] - ldr lr, [sp, #1256] - adds r0, r0, r4 - ldr r4, [sp, #1260] - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - ldr r11, [sp, #132] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #1264] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - ldr r8, [sp, #1232] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #1272] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - ldr r10, [sp, #1276] - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1268] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #128] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #1240] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - adds r8, r11, r8 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #1300] - str r8, [sp, #36] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1296] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1292] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1288] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1284] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1236] - adcs r0, r7, r0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - add r4, sp, #1024 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #136] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - mul r2, r8, r5 - adcs r0, r0, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, r4, #136 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1228] - ldr r1, [sp, #140] @ 4-byte Reload - ldr r11, [sp, #1184] - ldr r4, [sp, #1180] - ldr r6, [sp, #1176] - ldr r7, [sp, #1160] - ldr r8, [sp, #1164] - ldr r9, [sp, #1168] - ldr r10, [sp, #1172] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1224] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1220] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1216] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1212] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, sp, #1088 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #36] @ 4-byte Reload - ldr r1, [sp, #128] @ 4-byte Reload - ldr r2, [sp, #12] @ 4-byte Reload - add lr, sp, #1104 - adds r0, r0, r7 - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1088 - adcs r1, r1, r9 - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #124] @ 4-byte Reload - adcs r1, r1, r10 - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #16] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #68] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #64] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #60] @ 4-byte Spill - ldm r8, {r4, r6, r8} - ldr r7, [sp, #1100] - ldr r10, [sp, #1140] - ldr r9, [sp, #1136] - adds r0, r0, r4 - ldr r4, [sp, #1128] - mul r1, r0, r5 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #1156] - ldr r5, [sp, #1132] - str r1, [sp, #52] @ 4-byte Spill - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #40] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #128] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #128] @ 4-byte Spill - ldr r6, [sp, #124] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #124] @ 4-byte Spill - ldr r6, [sp, #120] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #120] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #1016 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1084] - add r10, sp, #1016 - ldr r11, [sp, #1044] - ldr r4, [sp, #1040] - ldr r5, [sp, #1036] - ldr r6, [sp, #1032] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1080] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1076] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1072] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #16] @ 4-byte Spill - ldm r10, {r7, r8, r9, r10} - ldr r0, [sp, #144] @ 4-byte Reload - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #944 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #132] @ 4-byte Reload - ldr r1, [sp, #124] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - add lr, sp, #960 - adds r0, r0, r7 - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #944 - adcs r1, r1, r9 - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r1, r10 - add r10, sp, #984 - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r6 - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r5 - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r4 - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #32] @ 4-byte Reload - str r1, [sp, #88] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #56] @ 4-byte Reload - adc r1, r1, #0 - str r1, [sp, #32] @ 4-byte Spill - ldm r8, {r4, r6, r8} - ldr r7, [sp, #956] - adds r1, r0, r4 - ldr r0, [sp, #136] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - mul r2, r1, r0 - ldr r0, [sp, #1012] - str r2, [sp, #28] @ 4-byte Spill - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1008] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [sp, #12] @ 4-byte Spill - ldm r10, {r4, r5, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #128] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #80] @ 4-byte Spill - ldr r6, [sp, #124] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #76] @ 4-byte Spill - ldr r6, [sp, #120] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #72] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - add r0, sp, #872 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #940] - add r11, sp, #880 - ldr r5, [sp, #900] - ldr r4, [sp, #896] - ldr r9, [sp, #872] - ldr r10, [sp, #876] - add r0, sp, #800 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #936] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #932] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #928] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #924] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #920] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #916] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #912] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #908] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #904] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r6, r7, r8, r11} - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #140] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #132] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #804 - adds r0, r0, r9 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #828 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #868] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #864] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #856] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #848] - str r0, [sp, #28] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #800] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #132] @ 4-byte Reload - ldr r6, [sp, #80] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #728 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #796] - add r9, sp, #732 - ldr r5, [sp, #756] - ldr r11, [sp, #752] - ldr r8, [sp, #748] - ldr r10, [sp, #728] - add r0, sp, #656 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #792] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #788] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #784] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #780] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #776] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #772] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #768] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #764] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #760] - str r1, [sp, #16] @ 4-byte Spill - ldm r9, {r4, r6, r7, r9} - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #140] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #660 - adds r0, r0, r10 - add r10, sp, #684 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #724] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #720] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #716] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #656] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #80] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #584 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #652] - add r9, sp, #588 - ldr r5, [sp, #612] - ldr r11, [sp, #608] - ldr r8, [sp, #604] - ldr r10, [sp, #584] - add r0, sp, #512 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #648] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #644] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #640] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #636] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #632] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #628] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #624] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #620] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #616] - str r1, [sp, #16] @ 4-byte Spill - ldm r9, {r4, r6, r7, r9} - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #140] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #516 - adds r0, r0, r10 - add r10, sp, #540 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #576] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #512] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #80] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #440 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #508] - add r9, sp, #444 - ldr r5, [sp, #468] - ldr r11, [sp, #464] - ldr r8, [sp, #460] - ldr r10, [sp, #440] - add r0, sp, #368 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #504] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #500] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #496] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #492] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #488] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #484] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #480] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #476] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #472] - str r1, [sp, #16] @ 4-byte Spill - ldm r9, {r4, r6, r7, r9} - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [r1, #60] - ldr r1, [sp, #140] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #16] @ 4-byte Reload - add lr, sp, #372 - adds r0, r0, r10 - add r10, sp, #396 - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #436] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #432] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #428] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #368] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #84] @ 4-byte Reload - ldr r6, [sp, #80] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #32] @ 4-byte Spill - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #148] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #296 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #364] - add r11, sp, #312 - add r7, sp, #300 - ldr r9, [sp, #324] - add r0, sp, #224 - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [sp, #360] - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #356] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #352] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #348] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #344] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #340] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #336] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #332] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #328] - str r1, [sp, #16] @ 4-byte Spill - ldm r11, {r4, r10, r11} - ldr r8, [sp, #296] - ldm r7, {r5, r6, r7} - ldr r1, [sp, #144] @ 4-byte Reload - ldr r2, [r1, #64] - ldr r1, [sp, #140] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #80] @ 4-byte Reload - ldr r2, [sp, #16] @ 4-byte Reload - add lr, sp, #240 - adds r0, r0, r8 - ldr r8, [sp, #232] - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #268] - adcs r1, r1, r6 - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #236] - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r4 - ldr r4, [sp, #224] - str r1, [sp, #84] @ 4-byte Spill - ldr r1, [sp, #68] @ 4-byte Reload - adcs r1, r1, r10 - str r1, [sp, #80] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - adcs r1, r1, r11 - ldr r11, [sp, #228] - str r1, [sp, #76] @ 4-byte Spill - ldr r1, [sp, #60] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [sp, #72] @ 4-byte Spill - ldr r1, [sp, #132] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [sp, #128] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r1, [sp, #128] @ 4-byte Spill - ldr r1, [sp, #124] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #28] @ 4-byte Reload - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [sp, #120] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #36] @ 4-byte Reload - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [sp, #116] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #116] @ 4-byte Spill - ldr r1, [sp, #112] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #44] @ 4-byte Reload - str r1, [sp, #112] @ 4-byte Spill - ldr r1, [sp, #108] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #108] @ 4-byte Spill - ldr r1, [sp, #104] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #104] @ 4-byte Spill - ldr r1, [sp, #100] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #92] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - adc r1, r1, #0 - adds r9, r0, r4 - ldr r0, [sp, #136] @ 4-byte Reload - ldr r4, [sp, #264] - str r1, [sp, #88] @ 4-byte Spill - mul r1, r9, r0 - ldr r0, [sp, #292] - str r1, [sp, #68] @ 4-byte Spill - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #288] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #284] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #280] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #276] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #36] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #144] @ 4-byte Reload - ldr r6, [sp, #140] @ 4-byte Reload - adcs r11, r10, r11 - adcs r10, r6, r8 - ldr r6, [sp, #84] @ 4-byte Reload - adcs r7, r6, r7 - ldr r6, [sp, #80] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #148] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #136] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #144] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - add r0, sp, #152 - bl .LmulPv544x32(PLT) - add r3, sp, #152 - ldm r3, {r0, r1, r2, r3} - adds r0, r9, r0 - adcs r4, r11, r1 - ldr r0, [sp, #168] - ldr r1, [sp, #44] @ 4-byte Reload - adcs r6, r10, r2 - str r4, [sp, #52] @ 4-byte Spill - adcs r9, r7, r3 - mov r3, r5 - str r6, [sp, #60] @ 4-byte Spill - str r9, [sp, #68] @ 4-byte Spill - adcs lr, r1, r0 - ldr r0, [sp, #172] - ldr r1, [sp, #48] @ 4-byte Reload - str lr, [sp, #72] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r1, r0 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r1, r0 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #188] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #192] - adcs r11, r1, r0 - ldr r0, [sp, #196] - ldr r1, [sp, #124] @ 4-byte Reload - str r11, [sp, #76] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #200] - adcs r0, r8, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #204] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #208] - adcs r0, r1, r0 - ldr r1, [sp, #136] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #212] - adcs r0, r1, r0 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #216] - adcs r0, r1, r0 - ldr r1, [sp, #144] @ 4-byte Reload - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #220] - adcs r0, r1, r0 - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - ldm r3, {r1, r2, r7} - ldr r0, [r3, #64] - ldr r5, [r3, #12] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r3, #36] - subs r12, r4, r1 - ldr r1, [r3, #40] - sbcs r4, r6, r2 - ldr r2, [sp, #100] @ 4-byte Reload - sbcs r6, r9, r7 - ldr r7, [r3, #32] - ldr r9, [r3, #28] - sbcs r10, lr, r5 - ldr r5, [r3, #16] - ldr lr, [r3, #24] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r3, #44] - sbcs r2, r2, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r3, #60] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r3, #20] - ldr r3, [sp, #104] @ 4-byte Reload - sbcs r3, r3, r0 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs lr, r0, lr - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r5, r0, r9 - ldr r0, [sp, #116] @ 4-byte Reload - sbcs r8, r0, r7 - ldr r0, [sp, #44] @ 4-byte Reload - ldr r7, [sp, #60] @ 4-byte Reload - sbcs r9, r11, r0 - ldr r0, [sp, #120] @ 4-byte Reload - sbcs r11, r0, r1 - ldr r0, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #84] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #92] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - sbcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - sbcs r0, r0, r1 - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbc r0, r0, #0 - ands r1, r0, #1 - ldr r0, [sp, #52] @ 4-byte Reload - movne r4, r7 - movne r12, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r12, [r0] - str r4, [r0, #4] - ldr r4, [sp, #68] @ 4-byte Reload - movne r6, r4 - cmp r1, #0 - str r6, [r0, #8] - ldr r6, [sp, #72] @ 4-byte Reload - movne r10, r6 - ldr r6, [sp, #100] @ 4-byte Reload - str r10, [r0, #12] - movne r2, r6 - str r2, [r0, #16] - ldr r2, [sp, #104] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #108] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #56] @ 4-byte Reload - movne lr, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str lr, [r0, #24] - movne r5, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r5, [r0, #28] - movne r8, r2 - ldr r2, [sp, #76] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #32] - movne r9, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r9, [r0, #36] - movne r11, r2 - ldr r2, [sp, #124] @ 4-byte Reload - str r11, [r0, #40] - movne r3, r2 - ldr r2, [sp, #128] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #44] - ldr r3, [sp, #80] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #132] @ 4-byte Reload - str r3, [r0, #48] - ldr r3, [sp, #84] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #136] @ 4-byte Reload - str r3, [r0, #52] - ldr r3, [sp, #88] @ 4-byte Reload - movne r3, r2 - cmp r1, #0 - ldr r1, [sp, #140] @ 4-byte Reload - ldr r2, [sp, #92] @ 4-byte Reload - str r3, [r0, #56] - movne r2, r1 - ldr r1, [sp, #144] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #148] @ 4-byte Reload - movne r2, r1 - str r2, [r0, #64] - add sp, sp, #556 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end258: - .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L - .cantunwind - .fnend - - .globl mcl_fp_montNF17L - .align 2 - .type mcl_fp_montNF17L,%function -mcl_fp_montNF17L: @ @mcl_fp_montNF17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #548 - sub sp, sp, #548 - .pad #2048 - sub sp, sp, #2048 - add r12, sp, #132 - add r6, sp, #2048 - mov r4, r3 - stm r12, {r1, r2, r3} - str r0, [sp, #92] @ 4-byte Spill - add r0, r6, #472 - ldr r5, [r3, #-4] - ldr r2, [r2] - str r5, [sp, #128] @ 4-byte Spill - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2520] - ldr r1, [sp, #2524] - str r0, [sp, #72] @ 4-byte Spill - mul r2, r0, r5 - ldr r0, [sp, #2588] - str r1, [sp, #100] @ 4-byte Spill - ldr r1, [sp, #2528] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #2584] - str r1, [sp, #96] @ 4-byte Spill - ldr r1, [sp, #2532] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #2580] - str r1, [sp, #88] @ 4-byte Spill - mov r1, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #2576] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #2572] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #2568] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #2564] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #2560] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #2556] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #2552] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #2548] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #2544] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2540] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2536] - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #2448 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2516] - add r11, sp, #2448 - ldr r9, [sp, #2476] - ldr r4, [sp, #2472] - ldr r7, [sp, #2468] - ldr r6, [sp, #2464] - add lr, sp, #2048 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2512] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2508] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2504] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2500] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2496] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2492] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2488] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2484] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2480] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r5, [sp, #2460] - ldr r2, [r0, #4] - add r0, lr, #328 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r8, r0 - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r10, r0 - add r10, sp, #2416 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r5, r0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r7, r0 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r4, r0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r9, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r1, r0 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adc r0, r1, r0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #2444] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2440] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2436] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2432] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #2428] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #2376] - ldr r6, [sp, #100] @ 4-byte Reload - ldr r0, [sp, #2380] - ldr r1, [sp, #2384] - ldr r2, [sp, #2388] - ldr r3, [sp, #2392] - ldr r12, [sp, #2396] - ldr lr, [sp, #2400] - ldr r4, [sp, #2404] - ldr r5, [sp, #2408] - ldr r11, [sp, #2412] - adds r7, r6, r7 - ldr r6, [sp, #96] @ 4-byte Reload - str r7, [sp, #24] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #2304 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2372] - add r11, sp, #2304 - ldr r4, [sp, #2332] - ldr r5, [sp, #2328] - ldr r6, [sp, #2324] - ldr r7, [sp, #2320] - add lr, sp, #2048 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2368] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2364] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2360] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2356] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2352] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #2348] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2344] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2340] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2336] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #8] - add r0, lr, #184 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #24] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #2272 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2300] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2296] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2292] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2288] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2284] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #2232] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #2236] - ldr r1, [sp, #2240] - ldr r2, [sp, #2244] - ldr r3, [sp, #2248] - ldr r12, [sp, #2252] - ldr lr, [sp, #2256] - ldr r4, [sp, #2260] - ldr r5, [sp, #2264] - ldr r11, [sp, #2268] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #2160 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2228] - add r11, sp, #2160 - ldr r4, [sp, #2188] - ldr r5, [sp, #2184] - ldr r6, [sp, #2180] - ldr r7, [sp, #2176] - add lr, sp, #2048 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2224] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2220] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2216] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2212] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2208] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2204] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2200] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2196] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2192] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #12] - add r0, lr, #40 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #2128 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2156] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2152] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2148] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2144] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2140] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #2088] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #2092] - ldr r1, [sp, #2096] - ldr r2, [sp, #2100] - ldr r3, [sp, #2104] - ldr r12, [sp, #2108] - ldr lr, [sp, #2112] - ldr r4, [sp, #2116] - ldr r5, [sp, #2120] - ldr r11, [sp, #2124] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #2016 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #2084] - add r11, sp, #2016 - ldr r4, [sp, #2044] - ldr r5, [sp, #2040] - ldr r6, [sp, #2036] - ldr r7, [sp, #2032] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2080] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2076] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2072] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #2068] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #2064] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #2060] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #2056] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #2052] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #2048] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #16] - add r0, lr, #920 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1984 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #2012] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #2008] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #2004] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #2000] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1996] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1944] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1948] - ldr r1, [sp, #1952] - ldr r2, [sp, #1956] - ldr r3, [sp, #1960] - ldr r12, [sp, #1964] - ldr lr, [sp, #1968] - ldr r4, [sp, #1972] - ldr r5, [sp, #1976] - ldr r11, [sp, #1980] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1872 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1940] - add r11, sp, #1872 - ldr r4, [sp, #1900] - ldr r5, [sp, #1896] - ldr r6, [sp, #1892] - ldr r7, [sp, #1888] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1936] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1932] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1928] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1924] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1920] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1916] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1912] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1908] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1904] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #20] - add r0, lr, #776 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1840 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1868] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1864] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1860] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1856] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1852] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1800] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1804] - ldr r1, [sp, #1808] - ldr r2, [sp, #1812] - ldr r3, [sp, #1816] - ldr r12, [sp, #1820] - ldr lr, [sp, #1824] - ldr r4, [sp, #1828] - ldr r5, [sp, #1832] - ldr r11, [sp, #1836] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1728 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1796] - add r11, sp, #1728 - ldr r4, [sp, #1756] - ldr r5, [sp, #1752] - ldr r6, [sp, #1748] - ldr r7, [sp, #1744] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1792] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1788] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1784] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1780] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1776] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1772] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1768] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1764] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1760] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #24] - add r0, lr, #632 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1696 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1724] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1720] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1716] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1712] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1708] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1656] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1660] - ldr r1, [sp, #1664] - ldr r2, [sp, #1668] - ldr r3, [sp, #1672] - ldr r12, [sp, #1676] - ldr lr, [sp, #1680] - ldr r4, [sp, #1684] - ldr r5, [sp, #1688] - ldr r11, [sp, #1692] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1584 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1652] - add r11, sp, #1584 - ldr r4, [sp, #1612] - ldr r5, [sp, #1608] - ldr r6, [sp, #1604] - ldr r7, [sp, #1600] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1648] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1644] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1640] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1636] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1632] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1628] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1624] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1620] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1616] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #28] - add r0, lr, #488 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1552 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1580] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1576] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1572] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1568] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1564] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1512] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1516] - ldr r1, [sp, #1520] - ldr r2, [sp, #1524] - ldr r3, [sp, #1528] - ldr r12, [sp, #1532] - ldr lr, [sp, #1536] - ldr r4, [sp, #1540] - ldr r5, [sp, #1544] - ldr r11, [sp, #1548] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1440 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1508] - add r11, sp, #1440 - ldr r4, [sp, #1468] - ldr r5, [sp, #1464] - ldr r6, [sp, #1460] - ldr r7, [sp, #1456] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1504] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1500] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1496] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1492] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1488] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1484] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1480] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1476] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1472] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #32] - add r0, lr, #344 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1408 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1436] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1432] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1428] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1424] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1420] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1368] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1372] - ldr r1, [sp, #1376] - ldr r2, [sp, #1380] - ldr r3, [sp, #1384] - ldr r12, [sp, #1388] - ldr lr, [sp, #1392] - ldr r4, [sp, #1396] - ldr r5, [sp, #1400] - ldr r11, [sp, #1404] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #1296 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1364] - add r11, sp, #1296 - ldr r4, [sp, #1324] - ldr r5, [sp, #1320] - ldr r6, [sp, #1316] - ldr r7, [sp, #1312] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1360] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1356] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1352] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1348] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1344] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1340] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1336] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1332] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1328] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #36] - add r0, lr, #200 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - adds r0, r0, r8 - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1264 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #1292] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1288] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1284] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1280] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1276] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r8, r9, r10} - ldr r7, [sp, #1224] - ldr r6, [sp, #124] @ 4-byte Reload - ldr r0, [sp, #1228] - ldr r1, [sp, #1232] - ldr r2, [sp, #1236] - ldr r3, [sp, #1240] - ldr r12, [sp, #1244] - ldr lr, [sp, #1248] - ldr r4, [sp, #1252] - ldr r5, [sp, #1256] - ldr r11, [sp, #1260] - adds r7, r6, r7 - ldr r6, [sp, #120] @ 4-byte Reload - str r7, [sp, #32] @ 4-byte Spill - adcs r0, r6, r0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #128] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - mul r2, r7, r5 - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #1152 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1220] - add r11, sp, #1152 - ldr r4, [sp, #1176] - ldr r6, [sp, #1172] - ldr r7, [sp, #1168] - add lr, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1216] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1212] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1208] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1204] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1200] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1196] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1192] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1188] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1184] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1180] - str r0, [sp, #8] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #40] - add r0, lr, #56 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #32] @ 4-byte Reload - ldr r1, [sp, #8] @ 4-byte Reload - adds r0, r0, r8 - ldr r8, [sp, #1092] - ldr r0, [sp, #124] @ 4-byte Reload - adcs r2, r0, r9 - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #1120 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r7 - ldr r7, [sp, #1084] - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #1088] - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r4 - ldr r4, [sp, #1080] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #56] @ 4-byte Spill - adds r0, r2, r4 - mul r1, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #1148] - str r1, [sp, #48] @ 4-byte Spill - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #1144] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1140] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1136] - str r0, [sp, #36] @ 4-byte Spill - ldm r10, {r4, r5, r9, r10} - ldr r11, [sp, #120] @ 4-byte Reload - ldr r0, [sp, #1096] - ldr r1, [sp, #1100] - ldr r2, [sp, #1104] - ldr r3, [sp, #1108] - ldr r12, [sp, #1112] - ldr lr, [sp, #1116] - adcs r7, r11, r7 - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [sp, #116] @ 4-byte Reload - adcs r6, r7, r6 - str r6, [sp, #116] @ 4-byte Spill - ldr r6, [sp, #112] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #112] @ 4-byte Spill - ldr r6, [sp, #108] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #1008 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1076] - add r11, sp, #1008 - ldr r4, [sp, #1036] - ldr r5, [sp, #1032] - ldr r6, [sp, #1028] - ldr r7, [sp, #1024] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #1072] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1068] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1064] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1060] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1056] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1052] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1048] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1044] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1040] - str r0, [sp, #12] @ 4-byte Spill - ldm r11, {r8, r9, r10, r11} - ldr r0, [sp, #136] @ 4-byte Reload - ldr r1, [sp, #132] @ 4-byte Reload - ldr r2, [r0, #44] - add r0, sp, #936 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #952 - adds r0, r0, r8 - add r8, sp, #936 - ldr r0, [sp, #120] @ 4-byte Reload - adcs r2, r0, r9 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #976 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #48] @ 4-byte Spill - ldm r8, {r4, r6, r7, r8} - ldr r0, [sp, #128] @ 4-byte Reload - adds r1, r2, r4 - mul r2, r1, r0 - ldr r0, [sp, #1004] - str r1, [sp, #124] @ 4-byte Spill - str r2, [sp, #24] @ 4-byte Spill - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1000] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #996] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #992] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r5, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #120] @ 4-byte Reload - adcs r6, r11, r6 - str r6, [sp, #76] @ 4-byte Spill - ldr r6, [sp, #116] @ 4-byte Reload - adcs r6, r6, r7 - str r6, [sp, #72] @ 4-byte Spill - ldr r6, [sp, #112] @ 4-byte Reload - adcs r6, r6, r8 - str r6, [sp, #68] @ 4-byte Spill - ldr r6, [sp, #108] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #24] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #80] @ 4-byte Spill - add r0, sp, #864 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #932] - ldr r5, [sp, #892] - ldr r7, [sp, #888] - ldr r4, [sp, #884] - ldr r9, [sp, #880] - ldr r8, [sp, #864] - ldr r11, [sp, #868] - ldr r10, [sp, #872] - ldr r6, [sp, #876] - add r0, sp, #792 - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #928] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #924] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #920] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #916] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #912] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #908] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #904] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #900] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #896] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #48] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #124] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #796 - adds r0, r0, r8 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r10 - add r10, sp, #820 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #856] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #852] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #848] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #844] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #840] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #792] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #124] @ 4-byte Reload - ldr r6, [sp, #76] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #28] @ 4-byte Spill - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #720 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #788] - add r11, sp, #728 - ldr r5, [sp, #748] - ldr r9, [sp, #744] - ldr r10, [sp, #720] - ldr r6, [sp, #724] - add r0, sp, #648 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #784] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #780] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #776] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #772] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #768] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #764] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #760] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #756] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #752] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r7, r8, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #52] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #652 - adds r0, r0, r10 - add r10, sp, #676 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #716] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #712] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #708] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #704] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #700] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #696] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #648] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #80] @ 4-byte Reload - ldr r6, [sp, #76] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #28] @ 4-byte Spill - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #576 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #644] - add r11, sp, #584 - ldr r5, [sp, #604] - ldr r9, [sp, #600] - ldr r10, [sp, #576] - ldr r6, [sp, #580] - add r0, sp, #504 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #640] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #636] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #632] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #628] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #624] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #620] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #616] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #612] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #608] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r7, r8, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #56] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #508 - adds r0, r0, r10 - add r10, sp, #532 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #572] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #568] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #564] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #560] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #556] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #552] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #504] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #80] @ 4-byte Reload - ldr r6, [sp, #76] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #28] @ 4-byte Spill - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #432 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #500] - add r11, sp, #440 - ldr r5, [sp, #460] - ldr r9, [sp, #456] - ldr r10, [sp, #432] - ldr r6, [sp, #436] - add r0, sp, #360 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #496] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #492] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #488] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #484] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #480] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #476] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #472] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #468] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #464] - str r1, [sp, #12] @ 4-byte Spill - ldm r11, {r4, r7, r8, r11} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [r1, #60] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #364 - adds r0, r0, r10 - add r10, sp, #388 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #428] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #424] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #420] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #416] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #412] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #408] - str r0, [sp, #32] @ 4-byte Spill - ldm r10, {r4, r5, r8, r9, r10} - ldr r7, [sp, #360] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #80] @ 4-byte Reload - ldr r6, [sp, #76] @ 4-byte Reload - adds r7, r11, r7 - adcs r0, r6, r0 - str r7, [sp, #28] @ 4-byte Spill - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - mul r2, r7, r0 - add r0, sp, #288 - bl .LmulPv544x32(PLT) - ldr r1, [sp, #356] - add r8, sp, #288 - ldr r9, [sp, #316] - ldr r10, [sp, #312] - ldr r11, [sp, #308] - ldr r6, [sp, #304] - add r0, sp, #216 - str r1, [sp, #52] @ 4-byte Spill - ldr r1, [sp, #352] - str r1, [sp, #48] @ 4-byte Spill - ldr r1, [sp, #348] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #344] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #340] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #336] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #332] - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [sp, #328] - str r1, [sp, #20] @ 4-byte Spill - ldr r1, [sp, #324] - str r1, [sp, #16] @ 4-byte Spill - ldr r1, [sp, #320] - str r1, [sp, #12] @ 4-byte Spill - ldm r8, {r4, r5, r8} - ldr r1, [sp, #136] @ 4-byte Reload - ldr r7, [sp, #300] - ldr r2, [r1, #64] - ldr r1, [sp, #132] @ 4-byte Reload - bl .LmulPv544x32(PLT) - ldr r0, [sp, #28] @ 4-byte Reload - ldr r1, [sp, #12] @ 4-byte Reload - add lr, sp, #232 - adds r0, r0, r4 - ldr r0, [sp, #80] @ 4-byte Reload - adcs r2, r0, r5 - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r9 - add r9, sp, #216 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adc r0, r0, r1 - str r0, [sp, #84] @ 4-byte Spill - ldm r9, {r4, r8, r9} - ldr r0, [sp, #128] @ 4-byte Reload - ldr r7, [sp, #228] - ldr r5, [sp, #260] - adds r11, r2, r4 - ldr r4, [sp, #256] - mul r1, r11, r0 - ldr r0, [sp, #284] - str r1, [sp, #64] @ 4-byte Spill - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #280] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #276] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #272] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #268] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #264] - str r0, [sp, #32] @ 4-byte Spill - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r10, [sp, #136] @ 4-byte Reload - ldr r6, [sp, #132] @ 4-byte Reload - adcs r8, r10, r8 - ldr r10, [sp, #140] @ 4-byte Reload - adcs r9, r6, r9 - ldr r6, [sp, #80] @ 4-byte Reload - adcs r7, r6, r7 - ldr r6, [sp, #76] @ 4-byte Reload - adcs r0, r6, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #88] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r10 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - add r0, sp, #144 - bl .LmulPv544x32(PLT) - add r3, sp, #144 - ldm r3, {r0, r1, r2, r3} - adds r0, r11, r0 - adcs r4, r8, r1 - ldr r0, [sp, #160] - ldr r1, [sp, #44] @ 4-byte Reload - adcs r8, r9, r2 - str r4, [sp, #52] @ 4-byte Spill - adcs r9, r7, r3 - mov r3, r10 - str r8, [sp, #60] @ 4-byte Spill - str r9, [sp, #64] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #164] - ldr r1, [sp, #48] @ 4-byte Reload - str r5, [sp, #68] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #168] - adcs lr, r1, r0 - ldr r0, [sp, #172] - ldr r1, [sp, #72] @ 4-byte Reload - str lr, [sp, #48] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #76] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #176] - adcs r0, r1, r0 - ldr r1, [sp, #80] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #180] - adcs r0, r1, r0 - ldr r1, [sp, #116] @ 4-byte Reload - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #184] - adcs r0, r1, r0 - ldr r1, [sp, #120] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #188] - adcs r0, r1, r0 - ldr r1, [sp, #124] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #192] - adcs r0, r1, r0 - ldr r1, [sp, #128] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #196] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #200] - adcs r0, r6, r0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #204] - adcs r0, r1, r0 - ldr r1, [sp, #136] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #208] - adcs r0, r1, r0 - ldr r1, [sp, #88] @ 4-byte Reload - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #212] - adc r1, r1, r0 - str r1, [sp, #88] @ 4-byte Spill - ldm r3, {r0, r2, r7} - ldr r6, [r3, #12] - ldr r11, [r3, #36] - ldr r10, [r3, #32] - subs r12, r4, r0 - ldr r0, [r3, #64] - sbcs r4, r8, r2 - ldr r2, [sp, #96] @ 4-byte Reload - sbcs r8, r9, r7 - ldr r7, [r3, #20] - sbcs r9, r5, r6 - ldr r6, [r3, #24] - ldr r5, [r3, #28] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r3, #60] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #16] - sbcs r2, r2, r0 - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r3, lr, r7 - ldr r7, [sp, #56] @ 4-byte Reload - sbcs lr, r0, r6 - ldr r0, [sp, #104] @ 4-byte Reload - sbcs r5, r0, r5 - ldr r0, [sp, #108] @ 4-byte Reload - sbcs r6, r0, r10 - ldr r0, [sp, #112] @ 4-byte Reload - sbcs r11, r0, r11 - ldr r0, [sp, #116] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #72] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #76] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #80] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #84] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #40] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - sbcs r0, r0, r7 - ldr r7, [sp, #60] @ 4-byte Reload - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbc r10, r1, r0 - ldr r0, [sp, #52] @ 4-byte Reload - asr r1, r10, #31 - cmp r1, #0 - movlt r4, r7 - movlt r12, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r12, [r0] - str r4, [r0, #4] - ldr r4, [sp, #64] @ 4-byte Reload - movlt r8, r4 - ldr r4, [sp, #68] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #8] - movlt r9, r4 - ldr r4, [sp, #96] @ 4-byte Reload - str r9, [r0, #12] - movlt r2, r4 - str r2, [r0, #16] - ldr r2, [sp, #48] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #100] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #56] @ 4-byte Reload - movlt lr, r2 - ldr r2, [sp, #104] @ 4-byte Reload - str lr, [r0, #24] - movlt r5, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r5, [r0, #28] - movlt r6, r2 - ldr r2, [sp, #112] @ 4-byte Reload - cmp r1, #0 - str r6, [r0, #32] - movlt r11, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r11, [r0, #36] - movlt r3, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #72] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #124] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #44] - ldr r3, [sp, #76] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #128] @ 4-byte Reload - str r3, [r0, #48] - ldr r3, [sp, #80] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #132] @ 4-byte Reload - str r3, [r0, #52] - ldr r3, [sp, #84] @ 4-byte Reload - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #136] @ 4-byte Reload - ldr r2, [sp, #140] @ 4-byte Reload - str r3, [r0, #56] - movlt r2, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r2, [r0, #60] - movlt r10, r1 - str r10, [r0, #64] - add sp, sp, #548 - add sp, sp, #2048 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end259: - .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L - .cantunwind - .fnend - - .globl mcl_fp_montRed17L - .align 2 - .type mcl_fp_montRed17L,%function -mcl_fp_montRed17L: @ @mcl_fp_montRed17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #444 - sub sp, sp, #444 - .pad #1024 - sub sp, sp, #1024 - mov r3, r2 - str r0, [sp, #212] @ 4-byte Spill - ldr r2, [r1, #4] - ldr r7, [r1] - ldr r0, [r3] - str r3, [sp, #236] @ 4-byte Spill - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #8] - str r0, [sp, #200] @ 4-byte Spill - ldr r0, [r3, #4] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #12] - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [r3, #8] - str r2, [sp, #108] @ 4-byte Spill - str r0, [sp, #192] @ 4-byte Spill - ldr r0, [r3, #12] - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [r3, #16] - str r0, [sp, #180] @ 4-byte Spill - ldr r0, [r3, #20] - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [r3, #24] - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [r3, #-4] - str r0, [sp, #232] @ 4-byte Spill - mul r2, r7, r0 - ldr r0, [r3, #60] - str r0, [sp, #204] @ 4-byte Spill - ldr r0, [r3, #64] - str r0, [sp, #208] @ 4-byte Spill - ldr r0, [r3, #28] - str r0, [sp, #148] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #152] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #156] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #160] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #164] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #168] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #172] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp, #144] @ 4-byte Spill - ldr r0, [r1, #128] - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [r1, #132] - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [r1, #96] - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [r1, #104] - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [r1, #108] - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [r1, #112] - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [r1, #116] - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [r1, #120] - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [r1, #124] - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [r1, #100] - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r1, #68] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r1, #72] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r1, #76] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r1, #80] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r1, #84] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r1, #88] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r1, #92] - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [r1, #32] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [r1, #36] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [r1, #40] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [r1, #44] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #28] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [r1, #24] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [r1, #20] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r1, #16] - mov r1, r3 - str r0, [sp, #20] @ 4-byte Spill - add r0, sp, #1392 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1460] - ldr r11, [sp, #1392] - ldr r1, [sp, #1400] - ldr r2, [sp, #1404] - ldr r3, [sp, #1408] - ldr r12, [sp, #1412] - ldr lr, [sp, #1416] - ldr r4, [sp, #1420] - ldr r5, [sp, #1424] - ldr r6, [sp, #1428] - ldr r8, [sp, #1432] - ldr r9, [sp, #1436] - ldr r10, [sp, #1440] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1456] - adds r7, r7, r11 - ldr r7, [sp, #116] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1452] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1448] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1444] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1396] - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #20] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #236] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - add r9, sp, #1024 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - mov r0, #0 - adc r0, r0, #0 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #232] @ 4-byte Reload - mul r2, r7, r0 - add r0, r9, #296 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1388] - ldr r9, [sp, #1320] - ldr r1, [sp, #1328] - ldr r2, [sp, #1332] - ldr r3, [sp, #1336] - ldr r12, [sp, #1340] - ldr r10, [sp, #1344] - ldr lr, [sp, #1348] - ldr r4, [sp, #1352] - ldr r5, [sp, #1356] - ldr r8, [sp, #1360] - ldr r11, [sp, #1364] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1384] - adds r7, r7, r9 - ldr r7, [sp, #116] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1380] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1376] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1372] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1368] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #1324] - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #232] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mul r2, r7, r5 - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #28] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #32] @ 4-byte Spill - add r0, sp, #1248 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1316] - add r10, sp, #1280 - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1312] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1308] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1304] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1300] - str r0, [sp, #12] @ 4-byte Spill - ldr r0, [sp, #1296] - str r0, [sp, #8] @ 4-byte Spill - ldm r10, {r4, r6, r9, r10} - ldr r8, [sp, #1248] - ldr r0, [sp, #1252] - ldr r1, [sp, #1256] - ldr r2, [sp, #1260] - ldr r3, [sp, #1264] - ldr r12, [sp, #1268] - ldr lr, [sp, #1272] - ldr r11, [sp, #1276] - adds r7, r7, r8 - ldr r7, [sp, #116] @ 4-byte Reload - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r7, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - add r9, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #12] @ 4-byte Reload - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #236] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #32] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #36] @ 4-byte Spill - add r0, r9, #152 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1244] - ldr r9, [sp, #1176] - ldr r1, [sp, #1184] - ldr r2, [sp, #1188] - ldr r3, [sp, #1192] - ldr r12, [sp, #1196] - ldr lr, [sp, #1200] - ldr r4, [sp, #1204] - ldr r5, [sp, #1208] - ldr r6, [sp, #1212] - ldr r8, [sp, #1216] - ldr r10, [sp, #1220] - ldr r11, [sp, #1224] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1240] - adds r7, r7, r9 - ldr r7, [sp, #116] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1236] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1232] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1228] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1180] - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - str r7, [sp, #12] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #236] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #232] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - mul r2, r7, r6 - adcs r0, r0, r8 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #36] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #40] @ 4-byte Spill - add r0, sp, #1104 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1172] - ldr r4, [sp, #1104] - ldr r9, [sp, #12] @ 4-byte Reload - ldr r1, [sp, #1112] - ldr r2, [sp, #1116] - ldr r3, [sp, #1120] - ldr r12, [sp, #1124] - ldr r10, [sp, #1128] - ldr r11, [sp, #1132] - ldr lr, [sp, #1136] - ldr r7, [sp, #1140] - ldr r8, [sp, #1144] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1168] - adds r4, r9, r4 - ldr r4, [sp, #116] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1164] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1160] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1156] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #1152] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #1148] - str r0, [sp, #8] @ 4-byte Spill - ldr r0, [sp, #1108] - adcs r4, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #8] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r6 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - mov r7, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r8 - add r8, sp, #1024 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r5 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #40] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #44] @ 4-byte Spill - add r0, r8, #8 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1100] - ldr r8, [sp, #1032] - ldr r1, [sp, #1040] - ldr r2, [sp, #1044] - ldr r3, [sp, #1048] - ldr r12, [sp, #1052] - ldr lr, [sp, #1056] - ldr r4, [sp, #1060] - ldr r5, [sp, #1064] - ldr r6, [sp, #1068] - ldr r9, [sp, #1072] - ldr r10, [sp, #1076] - ldr r11, [sp, #1080] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1096] - adds r7, r7, r8 - ldr r7, [sp, #116] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1092] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1088] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1084] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1036] - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - str r7, [sp, #20] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #232] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - mul r2, r7, r5 - adcs r0, r0, r6 - ldr r6, [sp, #236] @ 4-byte Reload - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #48] @ 4-byte Spill - add r0, sp, #960 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #1028] - add lr, sp, #984 - add r12, sp, #964 - ldr r8, [sp, #1000] - ldr r7, [sp, #996] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #1024] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #1020] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #1016] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #1012] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #1008] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #1004] - str r0, [sp, #16] @ 4-byte Spill - ldm lr, {r10, r11, lr} - ldr r4, [sp, #960] - ldm r12, {r0, r1, r2, r3, r12} - ldr r9, [sp, #20] @ 4-byte Reload - adds r4, r9, r4 - ldr r4, [sp, #116] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, r7 - mov r7, r4 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #48] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - add r0, sp, #888 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #956] - add r11, sp, #916 - add lr, sp, #892 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #952] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #948] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #944] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #940] - str r0, [sp, #32] @ 4-byte Spill - ldm r11, {r4, r5, r6, r9, r10, r11} - ldr r8, [sp, #888] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r7, r7, r8 - ldr r7, [sp, #116] @ 4-byte Reload - adcs r7, r7, r0 - ldr r0, [sp, #112] @ 4-byte Reload - str r7, [sp, #28] @ 4-byte Spill - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #232] @ 4-byte Reload - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - mul r2, r7, r5 - ldr r7, [sp, #236] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r11 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r7 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - add r0, sp, #816 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #884] - add lr, sp, #840 - add r12, sp, #820 - ldr r8, [sp, #856] - ldr r6, [sp, #852] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #880] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #876] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #872] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #868] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #864] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #860] - str r0, [sp, #24] @ 4-byte Spill - ldm lr, {r10, r11, lr} - ldr r4, [sp, #816] - ldm r12, {r0, r1, r2, r3, r12} - ldr r9, [sp, #28] @ 4-byte Reload - adds r4, r9, r4 - ldr r4, [sp, #116] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r4, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r11 - mov r11, r4 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r7 - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - add r0, sp, #744 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #812] - add r10, sp, #768 - add lr, sp, #744 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #808] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #804] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #800] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #796] - str r0, [sp, #40] @ 4-byte Spill - ldm r10, {r4, r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #40] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #232] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - mul r2, r11, r5 - adcs r0, r0, r6 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #236] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #64] @ 4-byte Spill - add r0, sp, #672 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #740] - add r9, sp, #704 - add r12, sp, #676 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #736] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #732] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #728] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #724] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #720] - str r0, [sp, #40] @ 4-byte Spill - ldm r9, {r6, r7, r8, r9} - ldr r4, [sp, #672] - ldr lr, [sp, #700] - ldr r10, [sp, #696] - ldm r12, {r0, r1, r2, r3, r12} - adds r4, r11, r4 - ldr r4, [sp, #116] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r4, [sp, #236] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r5 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #44] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #68] @ 4-byte Spill - add r0, sp, #600 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #668] - add r10, sp, #624 - add lr, sp, #600 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #664] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #660] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #656] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #652] - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #648] - str r0, [sp, #44] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #44] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r9 - ldr r9, [sp, #232] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - mul r2, r11, r9 - adcs r0, r0, r10 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r4 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #72] @ 4-byte Spill - add r0, sp, #528 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #596] - add r8, sp, #560 - add r12, sp, #532 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #592] - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #588] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #584] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #580] - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #576] - str r0, [sp, #48] @ 4-byte Spill - ldm r8, {r5, r6, r7, r8} - ldr r4, [sp, #528] - ldr lr, [sp, #556] - ldr r10, [sp, #552] - ldm r12, {r0, r1, r2, r3, r12} - adds r4, r11, r4 - ldr r4, [sp, #116] @ 4-byte Reload - adcs r11, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - mov r4, r9 - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #52] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #236] @ 4-byte Reload - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - add r0, sp, #456 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #524] - add r10, sp, #480 - add lr, sp, #456 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #520] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #516] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #512] - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #508] - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #504] - str r0, [sp, #52] @ 4-byte Spill - ldm r10, {r5, r6, r7, r8, r9, r10} - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r0, r11, r0 - ldr r0, [sp, #116] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #112] @ 4-byte Reload - ldr r1, [sp, #52] @ 4-byte Reload - adcs r0, r0, r2 - mul r2, r11, r4 - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #108] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r6 - ldr r6, [sp, #236] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r7 - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #56] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #72] @ 4-byte Reload - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #68] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #60] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #56] @ 4-byte Spill - ldr r0, [sp, #228] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #52] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #76] @ 4-byte Spill - add r0, sp, #384 - bl .LmulPv544x32(PLT) - ldr r0, [sp, #452] - add r10, sp, #412 - add lr, sp, #388 - str r0, [sp, #48] @ 4-byte Spill - ldr r0, [sp, #448] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [sp, #444] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #440] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #436] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #432] - str r0, [sp, #24] @ 4-byte Spill - ldm r10, {r5, r7, r8, r9, r10} - ldr r4, [sp, #384] - ldm lr, {r0, r1, r2, r3, r12, lr} - adds r4, r11, r4 - ldr r4, [sp, #116] @ 4-byte Reload - adcs r4, r4, r0 - ldr r0, [sp, #112] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #108] @ 4-byte Reload - ldr r1, [sp, #24] @ 4-byte Reload - adcs r0, r0, r2 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r5 - ldr r5, [sp, #232] @ 4-byte Reload - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - mul r2, r4, r5 - adcs r0, r0, r7 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #104] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r10 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #128] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #124] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #120] @ 4-byte Spill - ldr r0, [sp, #68] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #116] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #48] @ 4-byte Reload - str r0, [sp, #112] @ 4-byte Spill - ldr r0, [sp, #60] @ 4-byte Reload - adcs r0, r0, r1 - mov r1, r6 - str r0, [sp, #108] @ 4-byte Spill - ldr r0, [sp, #56] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #104] @ 4-byte Spill - ldr r0, [sp, #52] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #96] @ 4-byte Spill - add r0, sp, #312 - bl .LmulPv544x32(PLT) - add r6, sp, #312 - add r10, sp, #356 - add lr, sp, #328 - ldm r6, {r0, r1, r3, r6} - adds r0, r4, r0 - adcs r7, r11, r1 - mul r0, r7, r5 - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #380] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adcs r0, r0, r3 - str r0, [sp, #232] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #376] - str r0, [sp, #64] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r10} - ldr r9, [sp, #352] - ldm lr, {r0, r1, r2, r3, r12, lr} - ldr r11, [sp, #228] @ 4-byte Reload - adcs r0, r11, r0 - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #224] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #64] @ 4-byte Reload - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #220] @ 4-byte Reload - adcs r0, r0, r2 - ldr r2, [sp, #88] @ 4-byte Reload - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #216] @ 4-byte Reload - adcs r11, r0, r3 - ldr r0, [sp, #140] @ 4-byte Reload - adcs r0, r0, r12 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #136] @ 4-byte Reload - adcs r0, r0, lr - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #132] @ 4-byte Reload - adcs r0, r0, r9 - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #128] @ 4-byte Reload - adcs r0, r0, r4 - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #124] @ 4-byte Reload - adcs r0, r0, r5 - str r0, [sp, #136] @ 4-byte Spill - ldr r0, [sp, #120] @ 4-byte Reload - adcs r0, r0, r6 - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #116] @ 4-byte Reload - adcs r0, r0, r8 - str r0, [sp, #140] @ 4-byte Spill - ldr r0, [sp, #112] @ 4-byte Reload - adcs r10, r0, r10 - ldr r0, [sp, #108] @ 4-byte Reload - adcs r8, r0, r1 - ldr r0, [sp, #104] @ 4-byte Reload - ldr r1, [sp, #80] @ 4-byte Reload - adcs r6, r0, r1 - ldr r0, [sp, #100] @ 4-byte Reload - ldr r1, [sp, #236] @ 4-byte Reload - adcs r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #128] @ 4-byte Spill - add r0, sp, #240 - bl .LmulPv544x32(PLT) - add r3, sp, #240 - ldm r3, {r0, r1, r2, r3} - adds r0, r7, r0 - ldr r0, [sp, #232] @ 4-byte Reload - adcs r9, r0, r1 - ldr r0, [sp, #92] @ 4-byte Reload - ldr r1, [sp, #72] @ 4-byte Reload - str r9, [sp, #100] @ 4-byte Spill - adcs r12, r0, r2 - ldr r0, [sp, #68] @ 4-byte Reload - str r12, [sp, #104] @ 4-byte Spill - adcs lr, r0, r3 - ldr r0, [sp, #256] - str lr, [sp, #108] @ 4-byte Spill - adcs r4, r1, r0 - ldr r0, [sp, #260] - ldr r1, [sp, #76] @ 4-byte Reload - str r4, [sp, #112] @ 4-byte Spill - adcs r5, r1, r0 - ldr r0, [sp, #264] - ldr r1, [sp, #84] @ 4-byte Reload - str r5, [sp, #116] @ 4-byte Spill - adcs r11, r11, r0 - ldr r0, [sp, #268] - str r11, [sp, #120] @ 4-byte Spill - adcs r7, r1, r0 - ldr r0, [sp, #272] - ldr r1, [sp, #224] @ 4-byte Reload - str r7, [sp, #124] @ 4-byte Spill - adcs r0, r1, r0 - ldr r1, [sp, #220] @ 4-byte Reload - str r0, [sp, #224] @ 4-byte Spill - ldr r0, [sp, #276] - adcs r0, r1, r0 - ldr r1, [sp, #216] @ 4-byte Reload - str r0, [sp, #220] @ 4-byte Spill - ldr r0, [sp, #280] - adcs r0, r1, r0 - ldr r1, [sp, #136] @ 4-byte Reload - str r0, [sp, #216] @ 4-byte Spill - ldr r0, [sp, #284] - adcs r0, r1, r0 - ldr r1, [sp, #228] @ 4-byte Reload - str r0, [sp, #232] @ 4-byte Spill - ldr r0, [sp, #288] - adcs r0, r1, r0 - ldr r1, [sp, #140] @ 4-byte Reload - str r0, [sp, #228] @ 4-byte Spill - ldr r0, [sp, #292] - adcs r0, r1, r0 - ldr r1, [sp, #132] @ 4-byte Reload - str r0, [sp, #236] @ 4-byte Spill - ldr r0, [sp, #296] - adcs r10, r10, r0 - ldr r0, [sp, #300] - str r10, [sp, #136] @ 4-byte Spill - adcs r8, r8, r0 - ldr r0, [sp, #304] - str r8, [sp, #140] @ 4-byte Spill - adcs r6, r6, r0 - ldr r0, [sp, #308] - adcs r2, r1, r0 - ldr r0, [sp, #128] @ 4-byte Reload - adc r0, r0, #0 - str r0, [sp, #132] @ 4-byte Spill - ldr r0, [sp, #200] @ 4-byte Reload - subs r1, r9, r0 - ldr r0, [sp, #196] @ 4-byte Reload - sbcs r3, r12, r0 - ldr r0, [sp, #192] @ 4-byte Reload - sbcs r12, lr, r0 - ldr r0, [sp, #176] @ 4-byte Reload - sbcs lr, r4, r0 - ldr r0, [sp, #180] @ 4-byte Reload - sbcs r4, r5, r0 - ldr r0, [sp, #184] @ 4-byte Reload - sbcs r5, r11, r0 - ldr r0, [sp, #188] @ 4-byte Reload - ldr r11, [sp, #224] @ 4-byte Reload - sbcs r9, r7, r0 - ldr r0, [sp, #148] @ 4-byte Reload - ldr r7, [sp, #220] @ 4-byte Reload - sbcs r0, r11, r0 - ldr r11, [sp, #232] @ 4-byte Reload - str r0, [sp, #176] @ 4-byte Spill - ldr r0, [sp, #144] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #216] @ 4-byte Reload - str r0, [sp, #180] @ 4-byte Spill - ldr r0, [sp, #152] @ 4-byte Reload - sbcs r0, r7, r0 - ldr r7, [sp, #228] @ 4-byte Reload - str r0, [sp, #184] @ 4-byte Spill - ldr r0, [sp, #156] @ 4-byte Reload - sbcs r0, r11, r0 - ldr r11, [sp, #236] @ 4-byte Reload - str r0, [sp, #188] @ 4-byte Spill - ldr r0, [sp, #160] @ 4-byte Reload - sbcs r0, r7, r0 - str r0, [sp, #192] @ 4-byte Spill - ldr r0, [sp, #164] @ 4-byte Reload - sbcs r0, r11, r0 - str r0, [sp, #196] @ 4-byte Spill - ldr r0, [sp, #168] @ 4-byte Reload - sbcs r0, r10, r0 - mov r10, r6 - str r0, [sp, #200] @ 4-byte Spill - ldr r0, [sp, #172] @ 4-byte Reload - sbcs r7, r8, r0 - ldr r0, [sp, #204] @ 4-byte Reload - mov r8, r2 - sbcs r11, r6, r0 - ldr r0, [sp, #208] @ 4-byte Reload - sbcs r6, r2, r0 - ldr r0, [sp, #132] @ 4-byte Reload - sbc r2, r0, #0 - ldr r0, [sp, #100] @ 4-byte Reload - ands r2, r2, #1 - movne r1, r0 - ldr r0, [sp, #212] @ 4-byte Reload - str r1, [r0] - ldr r1, [sp, #104] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #108] @ 4-byte Reload - str r3, [r0, #4] - ldr r3, [sp, #176] @ 4-byte Reload - movne r12, r1 - ldr r1, [sp, #112] @ 4-byte Reload - cmp r2, #0 - str r12, [r0, #8] - movne lr, r1 - ldr r1, [sp, #116] @ 4-byte Reload - str lr, [r0, #12] - movne r4, r1 - ldr r1, [sp, #120] @ 4-byte Reload - str r4, [r0, #16] - movne r5, r1 - ldr r1, [sp, #124] @ 4-byte Reload - cmp r2, #0 - str r5, [r0, #20] - movne r9, r1 - ldr r1, [sp, #224] @ 4-byte Reload - str r9, [r0, #24] - movne r3, r1 - ldr r1, [sp, #220] @ 4-byte Reload - str r3, [r0, #28] - ldr r3, [sp, #180] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #216] @ 4-byte Reload - cmp r2, #0 - str r3, [r0, #32] - ldr r3, [sp, #184] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #232] @ 4-byte Reload - str r3, [r0, #36] - ldr r3, [sp, #188] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #228] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #192] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #236] @ 4-byte Reload - cmp r2, #0 - str r3, [r0, #44] - ldr r3, [sp, #196] @ 4-byte Reload - movne r3, r1 - ldr r1, [sp, #200] @ 4-byte Reload - str r3, [r0, #48] - ldr r3, [sp, #136] @ 4-byte Reload - movne r1, r3 - str r1, [r0, #52] - ldr r1, [sp, #140] @ 4-byte Reload - movne r7, r1 - cmp r2, #0 - movne r11, r10 - movne r6, r8 - str r7, [r0, #56] - str r11, [r0, #60] - str r6, [r0, #64] - add sp, sp, #444 - add sp, sp, #1024 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end260: - .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L - .cantunwind - .fnend - - .globl mcl_fp_addPre17L - .align 2 - .type mcl_fp_addPre17L,%function -mcl_fp_addPre17L: @ @mcl_fp_addPre17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #76 - sub sp, sp, #76 - ldm r1, {r3, lr} - ldr r8, [r1, #8] - ldr r5, [r1, #12] - ldm r2, {r6, r7, r12} - ldr r4, [r2, #12] - add r10, r2, #16 - adds r3, r6, r3 - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #64] - str r3, [sp, #72] @ 4-byte Spill - adcs r3, r7, lr - add lr, r1, #16 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #32] - adcs r6, r12, r8 - adcs r8, r4, r5 - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r2, #52] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r2, #56] - str r3, [sp, #60] @ 4-byte Spill - ldr r3, [r2, #60] - str r3, [sp, #64] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r2, [r1, #64] - ldr r11, [r1, #60] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #20] @ 4-byte Spill - ldm lr, {r1, r2, r3, r12, lr} - ldr r9, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - adcs r1, r4, r1 - str r9, [r0] - str r7, [r0, #4] - str r6, [r0, #8] - str r8, [r0, #12] - ldr r7, [sp, #8] @ 4-byte Reload - ldr r6, [sp, #12] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - adcs r2, r5, r2 - str r1, [r0, #16] - ldr r5, [sp, #16] @ 4-byte Reload - adcs r1, r10, r3 - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - ldr r3, [sp, #4] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - adcs r2, r2, r12 - adcs r12, r1, lr - str r2, [r0, #28] - ldr r1, [sp, #40] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - str r12, [r0, #32] - add r12, r0, #36 - adcs r2, r1, r2 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r3, r1, r3 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r7, r1, r7 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r5, r1, r5 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r4, r1, r4 - ldr r1, [sp, #64] @ 4-byte Reload - stm r12, {r2, r3, r7} - str r6, [r0, #48] - str r5, [r0, #52] - str r4, [r0, #56] - ldr r2, [sp, #68] @ 4-byte Reload - adcs r1, r1, r11 - str r1, [r0, #60] - ldr r1, [sp, #72] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [r0, #64] - mov r0, #0 - adc r0, r0, #0 - add sp, sp, #76 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end261: - .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L - .cantunwind - .fnend - - .globl mcl_fp_subPre17L - .align 2 - .type mcl_fp_subPre17L,%function -mcl_fp_subPre17L: @ @mcl_fp_subPre17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #76 - sub sp, sp, #76 - ldm r2, {r3, lr} - ldr r8, [r2, #8] - ldr r5, [r2, #12] - ldm r1, {r6, r7, r12} - ldr r4, [r1, #12] - add r10, r2, #16 - subs r3, r6, r3 - str r3, [sp, #32] @ 4-byte Spill - ldr r3, [r2, #64] - str r3, [sp, #72] @ 4-byte Spill - sbcs r3, r7, lr - add lr, r1, #16 - str r3, [sp, #28] @ 4-byte Spill - ldr r3, [r2, #32] - sbcs r6, r12, r8 - sbcs r8, r4, r5 - str r3, [sp, #36] @ 4-byte Spill - ldr r3, [r2, #36] - str r3, [sp, #40] @ 4-byte Spill - ldr r3, [r2, #40] - str r3, [sp, #44] @ 4-byte Spill - ldr r3, [r2, #44] - str r3, [sp, #48] @ 4-byte Spill - ldr r3, [r2, #48] - str r3, [sp, #52] @ 4-byte Spill - ldr r3, [r2, #52] - str r3, [sp, #56] @ 4-byte Spill - ldr r3, [r2, #56] - str r3, [sp, #60] @ 4-byte Spill - ldr r3, [r2, #60] - str r3, [sp, #64] @ 4-byte Spill - ldr r3, [r2, #28] - str r3, [sp, #24] @ 4-byte Spill - ldm r10, {r4, r5, r10} - ldr r2, [r1, #64] - ldr r11, [r1, #60] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #36] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #40] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r1, #44] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #48] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #52] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r1, #56] - str r2, [sp, #20] @ 4-byte Spill - ldm lr, {r1, r2, r3, r12, lr} - ldr r9, [sp, #32] @ 4-byte Reload - ldr r7, [sp, #28] @ 4-byte Reload - sbcs r1, r1, r4 - str r9, [r0] - str r7, [r0, #4] - str r6, [r0, #8] - str r8, [r0, #12] - ldr r7, [sp, #8] @ 4-byte Reload - ldr r6, [sp, #12] @ 4-byte Reload - ldr r4, [sp, #20] @ 4-byte Reload - sbcs r2, r2, r5 - str r1, [r0, #16] - ldr r5, [sp, #16] @ 4-byte Reload - sbcs r1, r3, r10 - str r2, [r0, #20] - ldr r2, [sp, #24] @ 4-byte Reload - ldr r3, [sp, #4] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #36] @ 4-byte Reload - sbcs r2, r12, r2 - sbcs r12, lr, r1 - str r2, [r0, #28] - ldr r1, [sp, #40] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - str r12, [r0, #32] - add r12, r0, #36 - sbcs r2, r2, r1 - ldr r1, [sp, #44] @ 4-byte Reload - sbcs r3, r3, r1 - ldr r1, [sp, #48] @ 4-byte Reload - sbcs r7, r7, r1 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r6, r6, r1 - ldr r1, [sp, #56] @ 4-byte Reload - sbcs r5, r5, r1 - ldr r1, [sp, #60] @ 4-byte Reload - sbcs r4, r4, r1 - ldr r1, [sp, #64] @ 4-byte Reload - stm r12, {r2, r3, r7} - str r6, [r0, #48] - str r5, [r0, #52] - str r4, [r0, #56] - ldr r2, [sp, #68] @ 4-byte Reload - sbcs r1, r11, r1 - str r1, [r0, #60] - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r1, r2, r1 - str r1, [r0, #64] - mov r0, #0 - sbc r0, r0, #0 - and r0, r0, #1 - add sp, sp, #76 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end262: - .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L - .cantunwind - .fnend - - .globl mcl_fp_shr1_17L - .align 2 - .type mcl_fp_shr1_17L,%function -mcl_fp_shr1_17L: @ @mcl_fp_shr1_17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #20 - sub sp, sp, #20 - ldr r4, [r1, #4] - ldr r3, [r1, #8] - add r9, r1, #32 - ldr r2, [r1, #12] - ldr r11, [r1] - lsr r7, r4, #1 - lsr lr, r2, #1 - lsrs r2, r2, #1 - orr r10, r7, r3, lsl #31 - ldr r7, [r1, #64] - rrx r12, r3 - lsrs r3, r4, #1 - add r4, r1, #16 - rrx r11, r11 - str r7, [sp, #16] @ 4-byte Spill - ldm r9, {r5, r7, r9} - ldr r6, [r1, #48] - ldr r8, [r1, #44] - str r6, [sp] @ 4-byte Spill - ldr r6, [r1, #52] - str r6, [sp, #4] @ 4-byte Spill - ldr r6, [r1, #56] - str r6, [sp, #8] @ 4-byte Spill - ldr r6, [r1, #60] - str r6, [sp, #12] @ 4-byte Spill - ldm r4, {r1, r2, r3, r4} - str r11, [r0] - stmib r0, {r10, r12} - orr r6, lr, r1, lsl #31 - str r6, [r0, #12] - lsrs r6, r2, #1 - rrx r1, r1 - str r1, [r0, #16] - lsr r1, r2, #1 - ldr r2, [sp, #4] @ 4-byte Reload - orr r1, r1, r3, lsl #31 - str r1, [r0, #20] - lsrs r1, r4, #1 - rrx r1, r3 - ldr r3, [sp] @ 4-byte Reload - str r1, [r0, #24] - lsr r1, r4, #1 - orr r1, r1, r5, lsl #31 - str r1, [r0, #28] - lsrs r1, r7, #1 - rrx r1, r5 - str r1, [r0, #32] - lsr r1, r7, #1 - orr r1, r1, r9, lsl #31 - str r1, [r0, #36] - lsrs r1, r8, #1 - rrx r1, r9 - str r1, [r0, #40] - lsr r1, r8, #1 - orr r1, r1, r3, lsl #31 - str r1, [r0, #44] - lsrs r1, r2, #1 - rrx r1, r3 - ldr r3, [sp, #8] @ 4-byte Reload - str r1, [r0, #48] - lsr r1, r2, #1 - ldr r2, [sp, #12] @ 4-byte Reload - orr r1, r1, r3, lsl #31 - str r1, [r0, #52] - lsrs r1, r2, #1 - rrx r1, r3 - str r1, [r0, #56] - lsr r1, r2, #1 - ldr r2, [sp, #16] @ 4-byte Reload - orr r1, r1, r2, lsl #31 - str r1, [r0, #60] - lsr r1, r2, #1 - str r1, [r0, #64] - add sp, sp, #20 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end263: - .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L - .cantunwind - .fnend - - .globl mcl_fp_add17L - .align 2 - .type mcl_fp_add17L,%function -mcl_fp_add17L: @ @mcl_fp_add17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #64 - sub sp, sp, #64 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r9, r4, r9 - ldr r4, [r1, #24] - adcs r5, r5, r8 - mov r8, r9 - adcs r6, r6, lr - str r5, [sp, #32] @ 4-byte Spill - ldr r5, [r1, #20] - str r8, [r0] - adcs r7, r7, r12 - str r6, [sp, #28] @ 4-byte Spill - ldr r6, [r1, #16] - ldr lr, [sp, #32] @ 4-byte Reload - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #16] - str lr, [r0, #4] - adcs r10, r7, r6 - ldr r7, [r2, #20] - ldr r6, [r2, #28] - str r10, [sp, #4] @ 4-byte Spill - adcs r7, r7, r5 - ldr r5, [r2, #44] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - ldr r9, [sp, #20] @ 4-byte Reload - adcs r7, r7, r4 - ldr r4, [r2, #48] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r6, r7 - ldr r6, [r2, #32] - str r7, [sp, #12] @ 4-byte Spill - ldr r7, [r1, #32] - adcs r7, r6, r7 - ldr r6, [r2, #36] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #36] - adcs r7, r6, r7 - ldr r6, [r2, #40] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r1, #40] - adcs r7, r6, r7 - ldr r6, [r1, #44] - str r7, [sp, #52] @ 4-byte Spill - adcs r7, r5, r6 - ldr r5, [r1, #48] - ldr r6, [r2, #56] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #52] - adcs r11, r4, r5 - ldr r4, [r1, #52] - ldr r5, [sp, #24] @ 4-byte Reload - str r11, [sp, #8] @ 4-byte Spill - adcs r7, r7, r4 - ldr r4, [sp, #28] @ 4-byte Reload - str r7, [sp, #44] @ 4-byte Spill - ldr r7, [r1, #56] - str r4, [r0, #8] - str r5, [r0, #12] - str r10, [r0, #16] - str r9, [r0, #20] - ldr r10, [sp, #12] @ 4-byte Reload - adcs r12, r6, r7 - ldr r7, [r1, #60] - ldr r6, [r2, #60] - ldr r1, [r1, #64] - ldr r2, [r2, #64] - adcs r6, r6, r7 - adcs r2, r2, r1 - ldr r1, [sp, #60] @ 4-byte Reload - str r2, [sp, #36] @ 4-byte Spill - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - str r10, [r0, #28] - str r2, [r0, #64] - mov r2, #0 - str r1, [r0, #32] - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r0, #36] - ldr r1, [sp, #52] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #48] @ 4-byte Reload - str r1, [r0, #44] - ldr r1, [sp, #44] @ 4-byte Reload - str r11, [r0, #48] - mov r11, r12 - str r1, [r0, #52] - adc r1, r2, #0 - str r12, [r0, #56] - str r6, [r0, #60] - mov r12, r6 - str r1, [sp, #16] @ 4-byte Spill - ldm r3, {r6, r7} - ldr r1, [r3, #8] - ldr r2, [r3, #12] - subs r6, r8, r6 - sbcs r7, lr, r7 - str r6, [sp] @ 4-byte Spill - sbcs r1, r4, r1 - str r7, [sp, #32] @ 4-byte Spill - str r1, [sp, #28] @ 4-byte Spill - sbcs r1, r5, r2 - ldr r2, [sp, #4] @ 4-byte Reload - str r1, [sp, #24] @ 4-byte Spill - ldr r1, [r3, #16] - sbcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #4] @ 4-byte Spill - ldr r1, [r3, #20] - sbcs r9, r9, r1 - ldr r1, [r3, #24] - sbcs r1, r2, r1 - ldr r2, [sp, #56] @ 4-byte Reload - str r1, [sp, #60] @ 4-byte Spill - ldr r1, [r3, #28] - sbcs r10, r10, r1 - ldr r1, [r3, #32] - sbcs r1, r2, r1 - ldr r2, [sp, #40] @ 4-byte Reload - str r1, [sp, #56] @ 4-byte Spill - ldr r1, [r3, #36] - sbcs r1, r2, r1 - ldr r2, [sp, #52] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #40] - sbcs lr, r2, r1 - ldr r1, [r3, #44] - ldr r2, [sp, #48] @ 4-byte Reload - sbcs r8, r2, r1 - ldr r1, [r3, #48] - ldr r2, [sp, #8] @ 4-byte Reload - sbcs r4, r2, r1 - ldr r1, [r3, #52] - ldr r2, [sp, #44] @ 4-byte Reload - sbcs r5, r2, r1 - ldr r1, [r3, #56] - ldr r2, [sp, #36] @ 4-byte Reload - sbcs r7, r11, r1 - ldr r1, [r3, #60] - sbcs r6, r12, r1 - ldr r1, [r3, #64] - sbcs r1, r2, r1 - ldr r2, [sp, #16] @ 4-byte Reload - sbc r2, r2, #0 - tst r2, #1 - bne .LBB264_2 -@ BB#1: @ %nocarry - ldr r2, [sp] @ 4-byte Reload - str r2, [r0] - ldr r2, [sp, #32] @ 4-byte Reload - str r2, [r0, #4] - ldr r2, [sp, #28] @ 4-byte Reload - str r2, [r0, #8] - ldr r2, [sp, #24] @ 4-byte Reload - str r2, [r0, #12] - ldr r2, [sp, #4] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #60] @ 4-byte Reload - str r9, [r0, #20] - str r2, [r0, #24] - str r10, [r0, #28] - str r1, [r0, #64] - ldr r1, [sp, #56] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #40] @ 4-byte Reload - str r1, [r0, #36] - add r1, r0, #48 - str lr, [r0, #40] - str r8, [r0, #44] - stm r1, {r4, r5, r7} - str r6, [r0, #60] -.LBB264_2: @ %carry - add sp, sp, #64 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end264: - .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L - .cantunwind - .fnend - - .globl mcl_fp_addNF17L - .align 2 - .type mcl_fp_addNF17L,%function -mcl_fp_addNF17L: @ @mcl_fp_addNF17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #96 - sub sp, sp, #96 - ldr r9, [r1] - ldmib r1, {r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r7} - adds r11, r4, r9 - ldr r4, [r1, #24] - adcs r10, r5, r8 - ldr r5, [r1, #20] - str r11, [sp, #8] @ 4-byte Spill - adcs r8, r6, lr - ldr r6, [r1, #16] - str r10, [sp, #16] @ 4-byte Spill - adcs r9, r7, r12 - ldr r7, [r2, #16] - str r8, [sp, #20] @ 4-byte Spill - str r9, [sp, #24] @ 4-byte Spill - adcs r7, r7, r6 - ldr r6, [r2, #28] - str r7, [sp, #48] @ 4-byte Spill - ldr r7, [r2, #20] - adcs lr, r7, r5 - ldr r7, [r2, #24] - str lr, [sp, #4] @ 4-byte Spill - adcs r7, r7, r4 - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r1, #28] - adcs r7, r6, r7 - ldr r6, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r1, #32] - adcs r7, r6, r7 - ldr r6, [r2, #36] - str r7, [sp, #52] @ 4-byte Spill - ldr r7, [r1, #36] - adcs r7, r6, r7 - ldr r6, [r2, #40] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r1, #40] - adcs r7, r6, r7 - ldr r6, [r2, #44] - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r1, #44] - adcs r7, r6, r7 - ldr r6, [r2, #48] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r1, #48] - adcs r7, r6, r7 - ldr r6, [r2, #52] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r1, #52] - adcs r7, r6, r7 - ldr r6, [r2, #56] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r1, #56] - adcs r7, r6, r7 - ldr r6, [r2, #60] - ldr r2, [r2, #64] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r1, #60] - ldr r1, [r1, #64] - adcs r7, r6, r7 - adc r1, r2, r1 - str r7, [sp, #92] @ 4-byte Spill - str r1, [sp, #88] @ 4-byte Spill - ldm r3, {r1, r7} - ldr r6, [r3, #8] - ldr r5, [r3, #12] - ldr r2, [sp, #48] @ 4-byte Reload - subs r12, r11, r1 - ldr r1, [r3, #64] - ldr r11, [r3, #36] - sbcs r4, r10, r7 - ldr r10, [r3, #32] - ldr r7, [r3, #24] - sbcs r6, r8, r6 - sbcs r9, r9, r5 - ldr r5, [r3, #28] - str r1, [sp] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [r3, #20] - ldr r3, [r3, #16] - sbcs r2, r2, r3 - sbcs r3, lr, r1 - ldr r1, [sp, #60] @ 4-byte Reload - sbcs lr, r1, r7 - ldr r1, [sp, #56] @ 4-byte Reload - ldr r7, [sp, #12] @ 4-byte Reload - sbcs r5, r1, r5 - ldr r1, [sp, #52] @ 4-byte Reload - sbcs r8, r1, r10 - ldr r1, [sp, #72] @ 4-byte Reload - sbcs r11, r1, r11 - ldr r1, [sp, #68] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str r1, [sp, #12] @ 4-byte Spill - ldr r1, [sp, #64] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #32] @ 4-byte Reload - str r1, [sp, #28] @ 4-byte Spill - ldr r1, [sp, #84] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #36] @ 4-byte Reload - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #40] @ 4-byte Reload - str r1, [sp, #36] @ 4-byte Spill - ldr r1, [sp, #76] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #44] @ 4-byte Reload - str r1, [sp, #40] @ 4-byte Spill - ldr r1, [sp, #92] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp] @ 4-byte Reload - str r1, [sp, #44] @ 4-byte Spill - ldr r1, [sp, #88] @ 4-byte Reload - sbc r10, r1, r7 - ldr r7, [sp, #8] @ 4-byte Reload - asr r1, r10, #31 - cmp r1, #0 - movlt r12, r7 - ldr r7, [sp, #16] @ 4-byte Reload - str r12, [r0] - movlt r4, r7 - str r4, [r0, #4] - ldr r4, [sp, #20] @ 4-byte Reload - movlt r6, r4 - cmp r1, #0 - str r6, [r0, #8] - ldr r6, [sp, #24] @ 4-byte Reload - movlt r9, r6 - ldr r6, [sp, #48] @ 4-byte Reload - str r9, [r0, #12] - movlt r2, r6 - str r2, [r0, #16] - ldr r2, [sp, #4] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #60] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #20] - ldr r3, [sp, #12] @ 4-byte Reload - movlt lr, r2 - ldr r2, [sp, #56] @ 4-byte Reload - str lr, [r0, #24] - movlt r5, r2 - ldr r2, [sp, #52] @ 4-byte Reload - str r5, [r0, #28] - movlt r8, r2 - ldr r2, [sp, #72] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #32] - movlt r11, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r11, [r0, #36] - movlt r3, r2 - ldr r2, [sp, #64] @ 4-byte Reload - str r3, [r0, #40] - ldr r3, [sp, #28] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #84] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #44] - ldr r3, [sp, #32] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #80] @ 4-byte Reload - str r3, [r0, #48] - ldr r3, [sp, #36] @ 4-byte Reload - movlt r3, r2 - ldr r2, [sp, #76] @ 4-byte Reload - str r3, [r0, #52] - ldr r3, [sp, #40] @ 4-byte Reload - movlt r3, r2 - cmp r1, #0 - ldr r1, [sp, #92] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r3, [r0, #56] - movlt r2, r1 - ldr r1, [sp, #88] @ 4-byte Reload - str r2, [r0, #60] - movlt r10, r1 - str r10, [r0, #64] - add sp, sp, #96 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end265: - .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L - .cantunwind - .fnend - - .globl mcl_fp_sub17L - .align 2 - .type mcl_fp_sub17L,%function -mcl_fp_sub17L: @ @mcl_fp_sub17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #92 - sub sp, sp, #92 - ldm r2, {r8, r9, lr} - ldr r12, [r2, #12] - ldm r1, {r4, r5, r6, r7} - subs r4, r4, r8 - sbcs r5, r5, r9 - str r4, [sp, #68] @ 4-byte Spill - ldr r4, [r2, #24] - sbcs r6, r6, lr - str r5, [sp, #88] @ 4-byte Spill - ldr r5, [r2, #20] - sbcs r7, r7, r12 - str r6, [sp, #84] @ 4-byte Spill - ldr r6, [r2, #16] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r1, #16] - sbcs r7, r7, r6 - ldr r6, [r1, #28] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r1, #20] - sbcs r7, r7, r5 - ldr r5, [r1, #44] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r1, #24] - sbcs r11, r7, r4 - ldr r7, [r2, #28] - ldr r4, [r2, #52] - sbcs r10, r6, r7 - ldr r7, [r2, #32] - ldr r6, [r1, #32] - str r10, [sp, #60] @ 4-byte Spill - sbcs r9, r6, r7 - ldr r7, [r2, #36] - ldr r6, [r1, #36] - str r9, [sp, #56] @ 4-byte Spill - sbcs r7, r6, r7 - ldr r6, [r1, #40] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #40] - sbcs r8, r6, r7 - ldr r7, [r2, #44] - str r8, [sp, #52] @ 4-byte Spill - sbcs lr, r5, r7 - ldr r7, [r2, #48] - ldr r5, [r1, #48] - str lr, [sp, #48] @ 4-byte Spill - sbcs r6, r5, r7 - ldr r5, [r1, #52] - sbcs r7, r5, r4 - ldr r4, [r2, #56] - ldr r5, [r1, #56] - str r7, [sp, #44] @ 4-byte Spill - sbcs r12, r5, r4 - ldr r4, [r2, #60] - ldr r5, [r1, #60] - ldr r2, [r2, #64] - ldr r1, [r1, #64] - str r12, [sp, #40] @ 4-byte Spill - sbcs r4, r5, r4 - ldr r5, [sp, #64] @ 4-byte Reload - sbcs r1, r1, r2 - ldr r2, [sp, #68] @ 4-byte Reload - str r2, [r0] - ldr r2, [sp, #88] @ 4-byte Reload - str r2, [r0, #4] - ldr r2, [sp, #84] @ 4-byte Reload - str r2, [r0, #8] - ldr r2, [sp, #80] @ 4-byte Reload - str r2, [r0, #12] - ldr r2, [sp, #76] @ 4-byte Reload - str r2, [r0, #16] - ldr r2, [sp, #72] @ 4-byte Reload - str r2, [r0, #20] - add r2, r0, #36 - str r11, [r0, #24] - str r10, [r0, #28] - str r1, [r0, #64] - str r9, [r0, #32] - stm r2, {r5, r8, lr} - add r2, r0, #48 - stm r2, {r6, r7, r12} - mov r2, #0 - str r4, [r0, #60] - sbc r2, r2, #0 - tst r2, #1 - beq .LBB266_2 -@ BB#1: @ %carry - ldr r2, [r3, #64] - mov r9, r4 - str r2, [sp, #36] @ 4-byte Spill - ldm r3, {r4, r12} - ldr r2, [sp, #68] @ 4-byte Reload - str r6, [sp, #28] @ 4-byte Spill - ldr r7, [r3, #8] - str r1, [sp, #32] @ 4-byte Spill - ldr r1, [r3, #12] - ldr lr, [r3, #20] - adds r8, r4, r2 - ldr r2, [r3, #32] - str r8, [r0] - str r2, [sp] @ 4-byte Spill - ldr r2, [r3, #36] - str r2, [sp, #4] @ 4-byte Spill - ldr r2, [r3, #40] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r3, #44] - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r3, #48] - str r2, [sp, #16] @ 4-byte Spill - ldr r2, [r3, #52] - str r2, [sp, #20] @ 4-byte Spill - ldr r2, [r3, #56] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r3, #60] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [sp, #88] @ 4-byte Reload - adcs r6, r12, r2 - ldr r2, [sp, #84] @ 4-byte Reload - adcs r7, r7, r2 - ldr r2, [sp, #80] @ 4-byte Reload - adcs r4, r1, r2 - ldr r2, [r3, #28] - ldr r1, [r3, #24] - ldr r3, [r3, #16] - stmib r0, {r6, r7} - ldr r7, [sp, #76] @ 4-byte Reload - str r4, [r0, #12] - ldr r6, [sp, #16] @ 4-byte Reload - ldr r4, [sp, #24] @ 4-byte Reload - adcs r3, r3, r7 - ldr r7, [sp, #72] @ 4-byte Reload - str r3, [r0, #16] - ldr r3, [sp, #60] @ 4-byte Reload - adcs r7, lr, r7 - adcs r1, r1, r11 - str r7, [r0, #20] - ldr r7, [sp, #12] @ 4-byte Reload - adcs r3, r2, r3 - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - ldr r2, [sp] @ 4-byte Reload - str r3, [r0, #28] - ldr r3, [sp, #8] @ 4-byte Reload - adcs r12, r2, r1 - ldr r1, [sp, #4] @ 4-byte Reload - str r12, [r0, #32] - add r12, r0, #36 - adcs r2, r1, r5 - ldr r1, [sp, #52] @ 4-byte Reload - ldr r5, [sp, #20] @ 4-byte Reload - adcs r3, r3, r1 - ldr r1, [sp, #48] @ 4-byte Reload - adcs r7, r7, r1 - ldr r1, [sp, #28] @ 4-byte Reload - adcs r6, r6, r1 - ldr r1, [sp, #44] @ 4-byte Reload - adcs r5, r5, r1 - ldr r1, [sp, #40] @ 4-byte Reload - adcs r4, r4, r1 - ldr r1, [sp, #68] @ 4-byte Reload - stm r12, {r2, r3, r7} - str r6, [r0, #48] - str r5, [r0, #52] - str r4, [r0, #56] - ldr r2, [sp, #32] @ 4-byte Reload - adcs r1, r1, r9 - str r1, [r0, #60] - ldr r1, [sp, #36] @ 4-byte Reload - adc r1, r1, r2 - str r1, [r0, #64] -.LBB266_2: @ %nocarry - add sp, sp, #92 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end266: - .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L - .cantunwind - .fnend - - .globl mcl_fp_subNF17L - .align 2 - .type mcl_fp_subNF17L,%function -mcl_fp_subNF17L: @ @mcl_fp_subNF17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #104 - sub sp, sp, #104 - mov r12, r0 - ldr r0, [r2, #64] - ldr r11, [r2] - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r1, #64] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r2, #32] - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [r2, #36] - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [r2, #40] - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [r2, #44] - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [r2, #48] - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [r2, #52] - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [r2, #56] - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [r2, #60] - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [r1, #60] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r1, #56] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r1, #52] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r1, #48] - str r0, [sp, #24] @ 4-byte Spill - ldmib r2, {r5, r6, r7, r8, r9, r10} - ldr r0, [r2, #28] - ldr r2, [r1] - str r0, [sp, #64] @ 4-byte Spill - ldmib r1, {r0, lr} - ldr r4, [r1, #12] - subs r2, r2, r11 - add r11, r3, #8 - str r2, [sp, #12] @ 4-byte Spill - ldr r2, [r1, #44] - sbcs r0, r0, r5 - ldr r5, [r1, #40] - str r0, [sp, #8] @ 4-byte Spill - sbcs r0, lr, r6 - ldr r6, [r1, #36] - str r0, [sp, #48] @ 4-byte Spill - sbcs r0, r4, r7 - ldr r7, [r1, #16] - str r0, [sp, #52] @ 4-byte Spill - sbcs r0, r7, r8 - ldr r7, [r1, #20] - str r0, [sp, #56] @ 4-byte Spill - sbcs r0, r7, r9 - ldr r7, [r1, #24] - str r0, [sp, #60] @ 4-byte Spill - sbcs r0, r7, r10 - ldr r7, [r1, #32] - ldr r1, [r1, #28] - str r0, [sp, #68] @ 4-byte Spill - ldr r0, [sp, #64] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #64] @ 4-byte Spill - ldr r0, [sp, #72] @ 4-byte Reload - sbcs r0, r7, r0 - str r0, [sp, #72] @ 4-byte Spill - ldr r0, [sp, #76] @ 4-byte Reload - sbcs r0, r6, r0 - str r0, [sp, #76] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - sbcs r0, r5, r0 - str r0, [sp, #80] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - sbcs r0, r2, r0 - str r0, [sp, #84] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #88] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #92] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #96] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - sbcs r0, r1, r0 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #100] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - sbc r0, r1, r0 - str r0, [sp, #44] @ 4-byte Spill - ldr r0, [r3, #64] - str r0, [sp, #40] @ 4-byte Spill - ldr r0, [r3, #36] - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [r3, #40] - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [r3, #44] - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [r3, #48] - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [r3, #52] - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [r3, #56] - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [r3, #60] - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [r3, #32] - str r0, [sp] @ 4-byte Spill - ldm r3, {r2, r7} - ldm r11, {r1, r4, r5, r6, r11} - ldr r8, [sp, #12] @ 4-byte Reload - ldr r10, [sp, #8] @ 4-byte Reload - ldr r0, [r3, #28] - adds r2, r8, r2 - adcs r3, r10, r7 - ldr r7, [sp, #48] @ 4-byte Reload - adcs lr, r7, r1 - ldr r1, [sp, #52] @ 4-byte Reload - adcs r4, r1, r4 - ldr r1, [sp, #56] @ 4-byte Reload - adcs r5, r1, r5 - ldr r1, [sp, #60] @ 4-byte Reload - adcs r6, r1, r6 - ldr r1, [sp, #68] @ 4-byte Reload - adcs r7, r1, r11 - ldr r1, [sp, #64] @ 4-byte Reload - adcs r9, r1, r0 - ldr r0, [sp, #72] @ 4-byte Reload - ldr r1, [sp] @ 4-byte Reload - adcs r11, r0, r1 - ldr r0, [sp, #76] @ 4-byte Reload - ldr r1, [sp, #4] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #16] @ 4-byte Reload - str r0, [sp, #4] @ 4-byte Spill - ldr r0, [sp, #80] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #20] @ 4-byte Reload - str r0, [sp, #16] @ 4-byte Spill - ldr r0, [sp, #84] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #24] @ 4-byte Reload - str r0, [sp, #20] @ 4-byte Spill - ldr r0, [sp, #88] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #28] @ 4-byte Reload - str r0, [sp, #24] @ 4-byte Spill - ldr r0, [sp, #92] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #32] @ 4-byte Reload - str r0, [sp, #28] @ 4-byte Spill - ldr r0, [sp, #96] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #36] @ 4-byte Reload - str r0, [sp, #32] @ 4-byte Spill - ldr r0, [sp, #100] @ 4-byte Reload - adcs r0, r0, r1 - ldr r1, [sp, #40] @ 4-byte Reload - str r0, [sp, #36] @ 4-byte Spill - ldr r0, [sp, #44] @ 4-byte Reload - adc r1, r0, r1 - str r1, [sp, #40] @ 4-byte Spill - asr r1, r0, #31 - ldr r0, [sp, #48] @ 4-byte Reload - cmp r1, #0 - movge r2, r8 - movge r3, r10 - str r2, [r12] - ldr r2, [sp, #4] @ 4-byte Reload - str r3, [r12, #4] - movge lr, r0 - ldr r0, [sp, #52] @ 4-byte Reload - cmp r1, #0 - str lr, [r12, #8] - movge r4, r0 - ldr r0, [sp, #56] @ 4-byte Reload - str r4, [r12, #12] - movge r5, r0 - ldr r0, [sp, #60] @ 4-byte Reload - str r5, [r12, #16] - movge r6, r0 - ldr r0, [sp, #68] @ 4-byte Reload - cmp r1, #0 - str r6, [r12, #20] - movge r7, r0 - ldr r0, [sp, #64] @ 4-byte Reload - str r7, [r12, #24] - movge r9, r0 - ldr r0, [sp, #72] @ 4-byte Reload - str r9, [r12, #28] - movge r11, r0 - ldr r0, [sp, #76] @ 4-byte Reload - cmp r1, #0 - str r11, [r12, #32] - movge r2, r0 - ldr r0, [sp, #80] @ 4-byte Reload - str r2, [r12, #36] - ldr r2, [sp, #16] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #84] @ 4-byte Reload - str r2, [r12, #40] - ldr r2, [sp, #20] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #88] @ 4-byte Reload - cmp r1, #0 - str r2, [r12, #44] - ldr r2, [sp, #24] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #92] @ 4-byte Reload - str r2, [r12, #48] - ldr r2, [sp, #28] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #96] @ 4-byte Reload - str r2, [r12, #52] - ldr r2, [sp, #32] @ 4-byte Reload - movge r2, r0 - ldr r0, [sp, #100] @ 4-byte Reload - cmp r1, #0 - ldr r1, [sp, #36] @ 4-byte Reload - str r2, [r12, #56] - movge r1, r0 - ldr r0, [sp, #40] @ 4-byte Reload - str r1, [r12, #60] - ldr r1, [sp, #44] @ 4-byte Reload - movge r0, r1 - str r0, [r12, #64] - add sp, sp, #104 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end267: - .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L - .cantunwind - .fnend - - .globl mcl_fpDbl_add17L - .align 2 - .type mcl_fpDbl_add17L,%function -mcl_fpDbl_add17L: @ @mcl_fpDbl_add17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #216 - sub sp, sp, #216 - ldm r1, {r7, r8, lr} - ldr r12, [r1, #12] - ldm r2, {r4, r5, r6, r9} - add r10, r1, #32 - adds r4, r4, r7 - str r4, [sp, #104] @ 4-byte Spill - ldr r4, [r2, #128] - str r4, [sp, #208] @ 4-byte Spill - ldr r4, [r2, #132] - str r4, [sp, #212] @ 4-byte Spill - adcs r4, r5, r8 - adcs r7, r6, lr - str r4, [sp, #100] @ 4-byte Spill - add lr, r1, #16 - str r7, [sp, #96] @ 4-byte Spill - ldr r7, [r2, #96] - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #180] @ 4-byte Spill - ldr r7, [r2, #104] - str r7, [sp, #184] @ 4-byte Spill - ldr r7, [r2, #108] - str r7, [sp, #188] @ 4-byte Spill - ldr r7, [r2, #112] - str r7, [sp, #192] @ 4-byte Spill - ldr r7, [r2, #116] - str r7, [sp, #196] @ 4-byte Spill - ldr r7, [r2, #120] - str r7, [sp, #200] @ 4-byte Spill - ldr r7, [r2, #124] - str r7, [sp, #204] @ 4-byte Spill - adcs r7, r9, r12 - str r7, [sp, #68] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #168] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #164] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #56] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #60] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #64] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #72] @ 4-byte Spill - ldr r7, [r2, #48] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #52] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #56] - str r7, [sp, #88] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #92] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #128] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #136] @ 4-byte Spill - ldr r2, [r1, #132] - str r2, [sp, #140] @ 4-byte Spill - ldr r2, [r1, #96] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [r1, #120] - str r2, [sp, #128] @ 4-byte Spill - ldr r2, [r1, #124] - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #84] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #24] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #28] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #32] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #36] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #40] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #52] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #104] @ 4-byte Reload - ldr r7, [sp, #100] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #96] @ 4-byte Reload - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - adcs r1, r7, r1 - ldr r7, [sp, #68] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - adcs r2, r7, r2 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - adcs r1, r1, r12 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #56] @ 4-byte Reload - adcs r2, r2, lr - str r2, [r0, #28] - adcs r1, r1, r4 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #64] @ 4-byte Reload - adcs r2, r2, r5 - str r2, [r0, #36] - adcs r1, r1, r6 - ldr r2, [sp, #72] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #76] @ 4-byte Reload - adcs r2, r2, r8 - str r2, [r0, #44] - adcs r1, r1, r9 - ldr r2, [sp, #80] @ 4-byte Reload - str r1, [r0, #48] - ldr r1, [sp, #88] @ 4-byte Reload - adcs r2, r2, r10 - adcs r1, r1, r7 - str r2, [r0, #52] - ldr r2, [sp, #92] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #144] @ 4-byte Reload - adcs r2, r2, r7 - ldr r7, [sp, #24] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #148] @ 4-byte Reload - adcs r1, r1, r7 - ldr r7, [sp, #28] @ 4-byte Reload - str r1, [r0, #64] - ldr r1, [sp, #152] @ 4-byte Reload - adcs r12, r2, r7 - ldr r2, [sp, #32] @ 4-byte Reload - str r12, [sp, #96] @ 4-byte Spill - adcs r9, r1, r2 - ldr r1, [sp, #156] @ 4-byte Reload - ldr r2, [sp, #36] @ 4-byte Reload - str r9, [sp, #100] @ 4-byte Spill - adcs r8, r1, r2 - ldr r1, [sp, #160] @ 4-byte Reload - ldr r2, [sp, #40] @ 4-byte Reload - str r8, [sp, #104] @ 4-byte Spill - adcs r4, r1, r2 - ldr r1, [sp, #168] @ 4-byte Reload - ldr r2, [sp, #44] @ 4-byte Reload - str r4, [sp, #144] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #48] @ 4-byte Reload - str r1, [sp, #168] @ 4-byte Spill - ldr r1, [sp, #164] @ 4-byte Reload - adcs lr, r1, r2 - ldr r1, [sp, #172] @ 4-byte Reload - ldr r2, [sp, #52] @ 4-byte Reload - str lr, [sp, #92] @ 4-byte Spill - adcs r1, r1, r2 - ldr r2, [sp, #108] @ 4-byte Reload - str r1, [sp, #172] @ 4-byte Spill - ldr r1, [sp, #176] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #84] @ 4-byte Reload - str r1, [sp, #176] @ 4-byte Spill - ldr r1, [sp, #180] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #112] @ 4-byte Reload - str r1, [sp, #180] @ 4-byte Spill - ldr r1, [sp, #184] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #116] @ 4-byte Reload - str r1, [sp, #184] @ 4-byte Spill - ldr r1, [sp, #188] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #120] @ 4-byte Reload - str r1, [sp, #188] @ 4-byte Spill - ldr r1, [sp, #192] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #124] @ 4-byte Reload - str r1, [sp, #192] @ 4-byte Spill - ldr r1, [sp, #196] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #128] @ 4-byte Reload - str r1, [sp, #196] @ 4-byte Spill - ldr r1, [sp, #200] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #132] @ 4-byte Reload - str r1, [sp, #200] @ 4-byte Spill - ldr r1, [sp, #204] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #136] @ 4-byte Reload - str r1, [sp, #204] @ 4-byte Spill - ldr r1, [sp, #208] @ 4-byte Reload - adcs r1, r1, r2 - ldr r2, [sp, #140] @ 4-byte Reload - str r1, [sp, #208] @ 4-byte Spill - ldr r1, [sp, #212] @ 4-byte Reload - adcs r1, r1, r2 - str r1, [sp, #212] @ 4-byte Spill - mov r1, #0 - adc r1, r1, #0 - str r1, [sp, #140] @ 4-byte Spill - ldm r3, {r2, r7} - ldr r1, [r3, #64] - ldr r6, [r3, #8] - ldr r5, [r3, #12] - ldr r10, [r3, #36] - ldr r11, [r3, #40] - str r1, [sp, #164] @ 4-byte Spill - ldr r1, [r3, #44] - subs r12, r12, r2 - sbcs r7, r9, r7 - sbcs r6, r8, r6 - add r8, r3, #20 - sbcs r9, r4, r5 - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #160] @ 4-byte Spill - ldm r8, {r1, r4, r5, r8} - ldr r3, [r3, #16] - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r2, r3 - sbcs r3, lr, r1 - ldr r1, [sp, #172] @ 4-byte Reload - sbcs lr, r1, r4 - ldr r1, [sp, #176] @ 4-byte Reload - sbcs r4, r1, r5 - ldr r1, [sp, #180] @ 4-byte Reload - ldr r5, [sp, #136] @ 4-byte Reload - sbcs r8, r1, r8 - ldr r1, [sp, #184] @ 4-byte Reload - sbcs r10, r1, r10 - ldr r1, [sp, #188] @ 4-byte Reload - sbcs r11, r1, r11 - ldr r1, [sp, #192] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #148] @ 4-byte Reload - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [sp, #196] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #152] @ 4-byte Reload - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [sp, #200] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #156] @ 4-byte Reload - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [sp, #204] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #160] @ 4-byte Reload - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [sp, #208] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #164] @ 4-byte Reload - str r1, [sp, #160] @ 4-byte Spill - ldr r1, [sp, #212] @ 4-byte Reload - sbcs r1, r1, r5 - ldr r5, [sp, #96] @ 4-byte Reload - str r1, [sp, #164] @ 4-byte Spill - ldr r1, [sp, #140] @ 4-byte Reload - sbc r1, r1, #0 - ands r1, r1, #1 - movne r12, r5 - ldr r5, [sp, #100] @ 4-byte Reload - str r12, [r0, #68] - movne r7, r5 - str r7, [r0, #72] - ldr r7, [sp, #104] @ 4-byte Reload - movne r6, r7 - ldr r7, [sp, #144] @ 4-byte Reload - cmp r1, #0 - str r6, [r0, #76] - movne r9, r7 - ldr r7, [sp, #168] @ 4-byte Reload - str r9, [r0, #80] - movne r2, r7 - str r2, [r0, #84] - ldr r2, [sp, #92] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #172] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #88] - ldr r3, [sp, #136] @ 4-byte Reload - movne lr, r2 - ldr r2, [sp, #176] @ 4-byte Reload - str lr, [r0, #92] - movne r4, r2 - ldr r2, [sp, #180] @ 4-byte Reload - str r4, [r0, #96] - movne r8, r2 - ldr r2, [sp, #184] @ 4-byte Reload - cmp r1, #0 - str r8, [r0, #100] - movne r10, r2 - ldr r2, [sp, #188] @ 4-byte Reload - str r10, [r0, #104] - movne r11, r2 - ldr r2, [sp, #192] @ 4-byte Reload - str r11, [r0, #108] - movne r3, r2 - ldr r2, [sp, #196] @ 4-byte Reload - cmp r1, #0 - str r3, [r0, #112] - ldr r3, [sp, #148] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #200] @ 4-byte Reload - str r3, [r0, #116] - ldr r3, [sp, #152] @ 4-byte Reload - movne r3, r2 - ldr r2, [sp, #204] @ 4-byte Reload - str r3, [r0, #120] - ldr r3, [sp, #156] @ 4-byte Reload - movne r3, r2 - cmp r1, #0 - ldr r1, [sp, #208] @ 4-byte Reload - ldr r2, [sp, #160] @ 4-byte Reload - str r3, [r0, #124] - ldr r3, [sp, #164] @ 4-byte Reload - movne r2, r1 - ldr r1, [sp, #212] @ 4-byte Reload - str r2, [r0, #128] - movne r3, r1 - str r3, [r0, #132] - add sp, sp, #216 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end268: - .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L - .cantunwind - .fnend - - .globl mcl_fpDbl_sub17L - .align 2 - .type mcl_fpDbl_sub17L,%function -mcl_fpDbl_sub17L: @ @mcl_fpDbl_sub17L - .fnstart -@ BB#0: - .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - .pad #216 - sub sp, sp, #216 - ldr r7, [r2, #128] - add r10, r1, #32 - str r7, [sp, #208] @ 4-byte Spill - ldr r7, [r2, #132] - str r7, [sp, #212] @ 4-byte Spill - ldr r7, [r2, #96] - str r7, [sp, #188] @ 4-byte Spill - ldr r7, [r2, #104] - str r7, [sp, #164] @ 4-byte Spill - ldr r7, [r2, #108] - str r7, [sp, #168] @ 4-byte Spill - ldr r7, [r2, #112] - str r7, [sp, #192] @ 4-byte Spill - ldr r7, [r2, #116] - str r7, [sp, #196] @ 4-byte Spill - ldr r7, [r2, #120] - str r7, [sp, #200] @ 4-byte Spill - ldr r7, [r2, #124] - str r7, [sp, #204] @ 4-byte Spill - ldr r7, [r2, #100] - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [r2, #64] - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [r2, #68] - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [r2, #72] - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [r2, #76] - str r7, [sp, #160] @ 4-byte Spill - ldr r7, [r2, #80] - str r7, [sp, #172] @ 4-byte Spill - ldr r7, [r2, #84] - str r7, [sp, #176] @ 4-byte Spill - ldr r7, [r2, #88] - str r7, [sp, #180] @ 4-byte Spill - ldr r7, [r2, #92] - str r7, [sp, #184] @ 4-byte Spill - ldr r7, [r2, #60] - str r7, [sp, #140] @ 4-byte Spill - ldm r2, {r6, r8, r12, lr} - ldm r1, {r4, r5, r7, r9} - subs r4, r4, r6 - str r4, [sp, #36] @ 4-byte Spill - ldr r4, [r2, #56] - str r4, [sp, #128] @ 4-byte Spill - sbcs r4, r5, r8 - sbcs r7, r7, r12 - str r4, [sp, #32] @ 4-byte Spill - ldr r4, [r2, #52] - str r7, [sp, #28] @ 4-byte Spill - ldr r7, [r2, #48] - str r4, [sp, #96] @ 4-byte Spill - str r7, [sp, #88] @ 4-byte Spill - sbcs r7, r9, lr - add lr, r1, #16 - str r7, [sp, #24] @ 4-byte Spill - ldr r7, [r2, #44] - str r7, [sp, #84] @ 4-byte Spill - ldr r7, [r2, #40] - str r7, [sp, #80] @ 4-byte Spill - ldr r7, [r2, #36] - str r7, [sp, #76] @ 4-byte Spill - ldr r7, [r2, #32] - str r7, [sp, #40] @ 4-byte Spill - ldr r7, [r2, #28] - str r7, [sp, #20] @ 4-byte Spill - ldr r7, [r2, #24] - str r7, [sp, #16] @ 4-byte Spill - ldr r7, [r2, #20] - ldr r2, [r2, #16] - str r2, [sp, #8] @ 4-byte Spill - ldr r2, [r1, #128] - str r7, [sp, #12] @ 4-byte Spill - str r2, [sp, #132] @ 4-byte Spill - ldr r2, [r1, #132] - str r2, [sp, #136] @ 4-byte Spill - ldr r2, [r1, #96] - str r2, [sp, #100] @ 4-byte Spill - ldr r2, [r1, #104] - str r2, [sp, #104] @ 4-byte Spill - ldr r2, [r1, #108] - str r2, [sp, #108] @ 4-byte Spill - ldr r2, [r1, #112] - str r2, [sp, #112] @ 4-byte Spill - ldr r2, [r1, #116] - str r2, [sp, #116] @ 4-byte Spill - ldr r2, [r1, #120] - str r2, [sp, #120] @ 4-byte Spill - ldr r2, [r1, #124] - str r2, [sp, #124] @ 4-byte Spill - ldr r2, [r1, #100] - str r2, [sp, #92] @ 4-byte Spill - ldr r2, [r1, #64] - str r2, [sp, #44] @ 4-byte Spill - ldr r2, [r1, #68] - str r2, [sp, #48] @ 4-byte Spill - ldr r2, [r1, #72] - str r2, [sp, #52] @ 4-byte Spill - ldr r2, [r1, #76] - str r2, [sp, #56] @ 4-byte Spill - ldr r2, [r1, #80] - str r2, [sp, #60] @ 4-byte Spill - ldr r2, [r1, #84] - str r2, [sp, #64] @ 4-byte Spill - ldr r2, [r1, #88] - str r2, [sp, #68] @ 4-byte Spill - ldr r2, [r1, #92] - str r2, [sp, #72] @ 4-byte Spill - ldm r10, {r4, r5, r6, r8, r9, r10} - ldr r2, [r1, #56] - str r2, [sp] @ 4-byte Spill - ldr r2, [r1, #60] - str r2, [sp, #4] @ 4-byte Spill - ldm lr, {r1, r2, r12, lr} - ldr r11, [sp, #36] @ 4-byte Reload - ldr r7, [sp, #32] @ 4-byte Reload - str r11, [r0] - str r7, [r0, #4] - ldr r7, [sp, #28] @ 4-byte Reload - str r7, [r0, #8] - ldr r7, [sp, #8] @ 4-byte Reload - sbcs r1, r1, r7 - ldr r7, [sp, #24] @ 4-byte Reload - str r7, [r0, #12] - ldr r7, [sp, #12] @ 4-byte Reload - str r1, [r0, #16] - ldr r1, [sp, #16] @ 4-byte Reload - sbcs r2, r2, r7 - ldr r7, [sp] @ 4-byte Reload - str r2, [r0, #20] - sbcs r1, r12, r1 - ldr r2, [sp, #20] @ 4-byte Reload - str r1, [r0, #24] - ldr r1, [sp, #40] @ 4-byte Reload - sbcs r2, lr, r2 - add lr, r3, #8 - str r2, [r0, #28] - sbcs r1, r4, r1 - ldr r2, [sp, #76] @ 4-byte Reload - str r1, [r0, #32] - ldr r1, [sp, #80] @ 4-byte Reload - sbcs r2, r5, r2 - str r2, [r0, #36] - sbcs r1, r6, r1 - ldr r2, [sp, #84] @ 4-byte Reload - str r1, [r0, #40] - ldr r1, [sp, #88] @ 4-byte Reload - sbcs r2, r8, r2 - sbcs r1, r9, r1 - str r2, [r0, #44] - ldr r2, [sp, #96] @ 4-byte Reload - add r9, r3, #20 - str r1, [r0, #48] - ldr r1, [sp, #128] @ 4-byte Reload - sbcs r2, r10, r2 - sbcs r1, r7, r1 - str r2, [r0, #52] - ldr r2, [sp, #140] @ 4-byte Reload - ldr r7, [sp, #4] @ 4-byte Reload - str r1, [r0, #56] - ldr r1, [sp, #144] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #44] @ 4-byte Reload - str r2, [r0, #60] - ldr r2, [sp, #148] @ 4-byte Reload - sbcs r1, r7, r1 - ldr r7, [sp, #48] @ 4-byte Reload - str r1, [r0, #64] - ldr r1, [sp, #152] @ 4-byte Reload - sbcs r5, r7, r2 - ldr r2, [sp, #52] @ 4-byte Reload - ldr r7, [sp, #100] @ 4-byte Reload - sbcs r10, r2, r1 - ldr r1, [sp, #160] @ 4-byte Reload - ldr r2, [sp, #56] @ 4-byte Reload - str r10, [sp, #96] @ 4-byte Spill - sbcs r1, r2, r1 - ldr r2, [sp, #60] @ 4-byte Reload - str r1, [sp, #160] @ 4-byte Spill - ldr r1, [sp, #172] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #64] @ 4-byte Reload - str r1, [sp, #172] @ 4-byte Spill - ldr r1, [sp, #176] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #68] @ 4-byte Reload - str r1, [sp, #176] @ 4-byte Spill - ldr r1, [sp, #180] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #72] @ 4-byte Reload - str r1, [sp, #180] @ 4-byte Spill - ldr r1, [sp, #184] @ 4-byte Reload - sbcs r1, r2, r1 - ldr r2, [sp, #188] @ 4-byte Reload - str r1, [sp, #184] @ 4-byte Spill - mov r1, #0 - sbcs r2, r7, r2 - ldr r7, [sp, #92] @ 4-byte Reload - str r2, [sp, #188] @ 4-byte Spill - ldr r2, [sp, #156] @ 4-byte Reload - sbcs r11, r7, r2 - ldr r2, [sp, #164] @ 4-byte Reload - ldr r7, [sp, #104] @ 4-byte Reload - str r11, [sp, #128] @ 4-byte Spill - sbcs r2, r7, r2 - ldr r7, [sp, #108] @ 4-byte Reload - str r2, [sp, #164] @ 4-byte Spill - ldr r2, [sp, #168] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #112] @ 4-byte Reload - str r2, [sp, #168] @ 4-byte Spill - ldr r2, [sp, #192] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #116] @ 4-byte Reload - str r2, [sp, #192] @ 4-byte Spill - ldr r2, [sp, #196] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #120] @ 4-byte Reload - str r2, [sp, #196] @ 4-byte Spill - ldr r2, [sp, #200] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #124] @ 4-byte Reload - str r2, [sp, #200] @ 4-byte Spill - ldr r2, [sp, #204] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #132] @ 4-byte Reload - str r2, [sp, #204] @ 4-byte Spill - ldr r2, [sp, #208] @ 4-byte Reload - sbcs r2, r7, r2 - ldr r7, [sp, #136] @ 4-byte Reload - str r2, [sp, #208] @ 4-byte Spill - ldr r2, [sp, #212] @ 4-byte Reload - sbcs r2, r7, r2 - sbc r1, r1, #0 - str r2, [sp, #212] @ 4-byte Spill - str r1, [sp, #124] @ 4-byte Spill - ldr r1, [r3, #64] - str r1, [sp, #156] @ 4-byte Spill - ldr r1, [r3, #36] - str r1, [sp, #120] @ 4-byte Spill - ldr r1, [r3, #40] - str r1, [sp, #132] @ 4-byte Spill - ldr r1, [r3, #44] - str r1, [sp, #136] @ 4-byte Spill - ldr r1, [r3, #48] - str r1, [sp, #140] @ 4-byte Spill - ldr r1, [r3, #52] - str r1, [sp, #144] @ 4-byte Spill - ldr r1, [r3, #56] - str r1, [sp, #148] @ 4-byte Spill - ldr r1, [r3, #60] - str r1, [sp, #152] @ 4-byte Spill - ldr r1, [r3, #32] - str r1, [sp, #116] @ 4-byte Spill - ldm r3, {r2, r7} - ldm lr, {r6, r12, lr} - ldm r9, {r4, r8, r9} - ldr r3, [sp, #160] @ 4-byte Reload - adds r1, r5, r2 - adcs r2, r10, r7 - ldr r7, [sp, #164] @ 4-byte Reload - adcs r3, r3, r6 - ldr r6, [sp, #172] @ 4-byte Reload - adcs r12, r6, r12 - ldr r6, [sp, #176] @ 4-byte Reload - adcs lr, r6, lr - ldr r6, [sp, #180] @ 4-byte Reload - adcs r4, r6, r4 - ldr r6, [sp, #184] @ 4-byte Reload - adcs r8, r6, r8 - ldr r6, [sp, #188] @ 4-byte Reload - adcs r9, r6, r9 - ldr r6, [sp, #116] @ 4-byte Reload - adcs r10, r11, r6 - ldr r6, [sp, #120] @ 4-byte Reload - ldr r11, [sp, #156] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #132] @ 4-byte Reload - str r7, [sp, #120] @ 4-byte Spill - ldr r7, [sp, #168] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #136] @ 4-byte Reload - str r7, [sp, #132] @ 4-byte Spill - ldr r7, [sp, #192] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #140] @ 4-byte Reload - str r7, [sp, #136] @ 4-byte Spill - ldr r7, [sp, #196] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #144] @ 4-byte Reload - str r7, [sp, #140] @ 4-byte Spill - ldr r7, [sp, #200] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #148] @ 4-byte Reload - str r7, [sp, #144] @ 4-byte Spill - ldr r7, [sp, #204] @ 4-byte Reload - adcs r7, r7, r6 - ldr r6, [sp, #152] @ 4-byte Reload - str r7, [sp, #148] @ 4-byte Spill - ldr r7, [sp, #208] @ 4-byte Reload - adcs r7, r7, r6 - str r7, [sp, #152] @ 4-byte Spill - ldr r7, [sp, #212] @ 4-byte Reload - adc r7, r7, r11 - str r7, [sp, #156] @ 4-byte Spill - ldr r7, [sp, #124] @ 4-byte Reload - ands r7, r7, #1 - moveq r1, r5 - str r1, [r0, #68] - ldr r1, [sp, #96] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #160] @ 4-byte Reload - str r2, [r0, #72] - ldr r2, [sp, #120] @ 4-byte Reload - moveq r3, r1 - ldr r1, [sp, #172] @ 4-byte Reload - cmp r7, #0 - str r3, [r0, #76] - ldr r3, [sp, #156] @ 4-byte Reload - moveq r12, r1 - ldr r1, [sp, #176] @ 4-byte Reload - str r12, [r0, #80] - moveq lr, r1 - ldr r1, [sp, #180] @ 4-byte Reload - str lr, [r0, #84] - moveq r4, r1 - ldr r1, [sp, #184] @ 4-byte Reload - cmp r7, #0 - str r4, [r0, #88] - moveq r8, r1 - ldr r1, [sp, #188] @ 4-byte Reload - str r8, [r0, #92] - moveq r9, r1 - ldr r1, [sp, #128] @ 4-byte Reload - str r9, [r0, #96] - moveq r10, r1 - ldr r1, [sp, #164] @ 4-byte Reload - cmp r7, #0 - str r10, [r0, #100] - moveq r2, r1 - ldr r1, [sp, #168] @ 4-byte Reload - str r2, [r0, #104] - ldr r2, [sp, #132] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #192] @ 4-byte Reload - str r2, [r0, #108] - ldr r2, [sp, #136] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #196] @ 4-byte Reload - cmp r7, #0 - str r2, [r0, #112] - ldr r2, [sp, #140] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #200] @ 4-byte Reload - str r2, [r0, #116] - ldr r2, [sp, #144] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #204] @ 4-byte Reload - str r2, [r0, #120] - ldr r2, [sp, #148] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #208] @ 4-byte Reload - cmp r7, #0 - str r2, [r0, #124] - ldr r2, [sp, #152] @ 4-byte Reload - moveq r2, r1 - ldr r1, [sp, #212] @ 4-byte Reload - str r2, [r0, #128] - moveq r3, r1 - str r3, [r0, #132] - add sp, sp, #216 - pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} - mov pc, lr -.Lfunc_end269: - .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L - .cantunwind - .fnend - - - .section ".note.GNU-stack","",%progbits - .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/low_arm.s b/vendor/github.com/dexon-foundation/mcl/src/asm/low_arm.s deleted file mode 100644 index 1ed2a1233..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/low_arm.s +++ /dev/null @@ -1,154 +0,0 @@ - .arch armv7-a - - .align 2 - .global mcl_fp_addPre64 -mcl_fp_addPre64: - ldm r1, {r3, r12} - ldm r2, {r1, r2} - adds r1, r1, r3 - adc r2, r2, r12 - stm r0, {r1, r2} - bx lr - - - .align 2 - .global mcl_fp_addPre96 -mcl_fp_addPre96: - push {r4, lr} - ldm r1, {r1, r3, r12} - ldm r2, {r2, r4, lr} - adds r1, r1, r2 - adcs r3, r3, r4 - adc r12, r12, lr - stm r0, {r1, r3, r12} - pop {r4, lr} - bx lr - -# slower - .align 2 - .global mcl_fp_addPre96_2 -mcl_fp_addPre96_2: - ldr r3, [r1], #4 - ldr r12, [r2], #4 - adds r3, r3, r12 - str r3, [r0], #4 - - ldm r1, {r1, r3} - ldm r2, {r2, r12} - adcs r1, r1, r2 - adcs r3, r3, r12 - stm r0, {r1, r3} - bx lr - - .globl mcl_fp_addPre128 - .align 2 -mcl_fp_addPre128: - push {r4, lr} - ldm r1!, {r3, r4} - ldm r2!, {r12, lr} - adds r3, r3, r12 - adcs r4, r4, lr - stm r0!, {r3, r4} - ldm r1, {r3, r4} - ldm r2, {r12, lr} - adcs r3, r3, r12 - adcs r4, r4, lr - stm r0, {r3, r4} - pop {r4, lr} - bx lr - - # almost same - .globl mcl_fp_addPre128_2 - .align 2 -cl_fp_addPre128_2: - push {r4, r5, r6, lr} - ldm r1, {r1, r3, r4, r5} - ldm r2, {r2, r6, r12, lr} - adds r1, r1, r2 - adcs r3, r3, r6 - adcs r4, r4, r12 - adcs r5, r5, lr - stm r0, {r1, r3, r4, r5} - pop {r4, r5, r6, lr} - bx lr - - .globl mcl_fp_addPre160 - .align 2 -mcl_fp_addPre160: - push {r4, lr} - ldm r1!, {r3, r4} - ldm r2!, {r12, lr} - adds r3, r3, r12 - adcs r4, r4, lr - stm r0!, {r3, r4} - ldm r1, {r1, r3, r4} - ldm r2, {r2, r12, lr} - adcs r1, r1, r2 - adcs r3, r3, r12 - adcs r4, r4, lr - stm r0, {r1, r3, r4} - pop {r4, lr} - bx lr - - .globl mcl_fp_addPre192 - .align 2 -mcl_fp_addPre192: - push {r4, r5, r6, lr} - ldm r1!, {r3, r4, r5} - ldm r2!, {r6, r12, lr} - adds r3, r3, r6 - adcs r4, r4, r12 - adcs r5, r5, lr - stm r0!, {r3, r4, r5} - - ldm r1, {r3, r4, r5} - ldm r2, {r6, r12, lr} - adcs r3, r3, r6 - adcs r4, r4, r12 - adcs r5, r5, lr - stm r0, {r3, r4, r5} - pop {r4, r5, r6, lr} - bx lr - - .globl mcl_fp_addPre224 - .align 2 -mcl_fp_addPre224: - push {r4, r5, r6, lr} - ldm r1!, {r3, r4, r5} - ldm r2!, {r6, r12, lr} - adds r3, r3, r6 - adcs r4, r4, r12 - adcs r5, r5, lr - stm r0!, {r3, r4, r5} - - ldm r1, {r1, r3, r4, r5} - ldm r2, {r2, r6, r12, lr} - adcs r1, r1, r2 - adcs r3, r3, r6 - adcs r4, r4, r12 - adcs r5, r5, lr - stm r0, {r1, r3, r4, r5} - pop {r4, r5, r6, lr} - bx lr - - .globl mcl_fp_addPre256 - .align 2 -mcl_fp_addPre256: - push {r4, r5, r6, r7, r8, lr} - ldm r1!, {r3, r4, r5, r6} - ldm r2!, {r7, r8, r12, lr} - adds r3, r3, r7 - adcs r4, r4, r8 - adcs r5, r5, r12 - adcs r6, r6, lr - stm r0!, {r3, r4, r5, r6} - - ldm r1, {r3, r4, r5, r6} - ldm r2, {r7, r8, r12, lr} - adcs r3, r3, r7 - adcs r4, r4, r8 - adcs r5, r5, r12 - adcs r6, r6, lr - stm r0, {r3, r4, r5, r6} - pop {r4, r5, r6, r7, r8, lr} - bx lr diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/low_x86-64.asm b/vendor/github.com/dexon-foundation/mcl/src/asm/low_x86-64.asm deleted file mode 100644 index b09b9dcd3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/low_x86-64.asm +++ /dev/null @@ -1,153 +0,0 @@ - -; Linux rdi rsi rdx rcx -; Win rcx rdx r8 r9 - -%ifdef _WIN64 - %define p1org rcx - %define p2org rdx - %define p3org r8 - %define p4org r9 -%else - %define p1org rdi - %define p2org rsi - %define p3org rdx - %define p4org rcx -%endif - -%imacro proc 1 -global %1 -%1: -%endmacro - -segment .text - -%imacro addPre 1 - mov rax, [p2org] - add rax, [p3org] - mov [p1org], rax -%assign i 1 -%rep %1 - mov rax, [p2org + i * 8] - adc rax, [p3org + i * 8] - mov [p1org + i * 8], rax -%assign i (i+1) -%endrep - setc al - movzx eax, al - ret -%endmacro - -%imacro subNC 1 - mov rax, [p2org] - sub rax, [p3org] - mov [p1org], rax -%assign i 1 -%rep %1 - mov rax, [p2org + i * 8] - sbb rax, [p3org + i * 8] - mov [p1org + i * 8], rax -%assign i (i+1) -%endrep - setc al - movzx eax, al - ret -%endmacro - -proc mcl_fp_addPre64 - addPre 0 -proc mcl_fp_addPre128 - addPre 1 -proc mcl_fp_addPre192 - addPre 2 -proc mcl_fp_addPre256 - addPre 3 -proc mcl_fp_addPre320 - addPre 4 -proc mcl_fp_addPre384 - addPre 5 -proc mcl_fp_addPre448 - addPre 6 -proc mcl_fp_addPre512 - addPre 7 -proc mcl_fp_addPre576 - addPre 8 -proc mcl_fp_addPre640 - addPre 9 -proc mcl_fp_addPre704 - addPre 10 -proc mcl_fp_addPre768 - addPre 11 -proc mcl_fp_addPre832 - addPre 12 -proc mcl_fp_addPre896 - addPre 13 -proc mcl_fp_addPre960 - addPre 14 -proc mcl_fp_addPre1024 - addPre 15 -proc mcl_fp_addPre1088 - addPre 16 -proc mcl_fp_addPre1152 - addPre 17 -proc mcl_fp_addPre1216 - addPre 18 -proc mcl_fp_addPre1280 - addPre 19 -proc mcl_fp_addPre1344 - addPre 20 -proc mcl_fp_addPre1408 - addPre 21 -proc mcl_fp_addPre1472 - addPre 22 -proc mcl_fp_addPre1536 - addPre 23 - -proc mcl_fp_subNC64 - subNC 0 -proc mcl_fp_subNC128 - subNC 1 -proc mcl_fp_subNC192 - subNC 2 -proc mcl_fp_subNC256 - subNC 3 -proc mcl_fp_subNC320 - subNC 4 -proc mcl_fp_subNC384 - subNC 5 -proc mcl_fp_subNC448 - subNC 6 -proc mcl_fp_subNC512 - subNC 7 -proc mcl_fp_subNC576 - subNC 8 -proc mcl_fp_subNC640 - subNC 9 -proc mcl_fp_subNC704 - subNC 10 -proc mcl_fp_subNC768 - subNC 11 -proc mcl_fp_subNC832 - subNC 12 -proc mcl_fp_subNC896 - subNC 13 -proc mcl_fp_subNC960 - subNC 14 -proc mcl_fp_subNC1024 - subNC 15 -proc mcl_fp_subNC1088 - subNC 16 -proc mcl_fp_subNC1152 - subNC 17 -proc mcl_fp_subNC1216 - subNC 18 -proc mcl_fp_subNC1280 - subNC 19 -proc mcl_fp_subNC1344 - subNC 20 -proc mcl_fp_subNC1408 - subNC 21 -proc mcl_fp_subNC1472 - subNC 22 -proc mcl_fp_subNC1536 - subNC 23 - diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/low_x86.asm b/vendor/github.com/dexon-foundation/mcl/src/asm/low_x86.asm deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.bmi2.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.bmi2.s deleted file mode 100644 index e12174ac6..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.bmi2.s +++ /dev/null @@ -1,14155 +0,0 @@ - .text - .file "" - .globl makeNIST_P192Lbmi2 - .align 16, 0x90 - .type makeNIST_P192Lbmi2,@function -makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2 -# BB#0: - movq $-1, %rax - movq $-2, %rdx - movq $-1, %rcx - retq -.Lfunc_end0: - .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2 - - .globl mcl_fpDbl_mod_NIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function -mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq 24(%rsi), %r8 - movq 40(%rsi), %r9 - movq 8(%rsi), %rax - addq %r9, %rax - adcq $0, %r10 - sbbq %rcx, %rcx - andl $1, %ecx - movq 32(%rsi), %r11 - movq (%rsi), %r14 - addq %r8, %r14 - adcq %r11, %rax - adcq %r9, %r10 - adcq $0, %rcx - addq %r9, %r14 - adcq %r8, %rax - adcq %r11, %r10 - adcq $0, %rcx - addq %rcx, %r14 - adcq %rax, %rcx - adcq $0, %r10 - sbbq %rax, %rax - andl $1, %eax - movq %r14, %rsi - addq $1, %rsi - movq %rcx, %rdx - adcq $1, %rdx - movq %r10, %rbx - adcq $0, %rbx - adcq $-1, %rax - andl $1, %eax - cmovneq %r14, %rsi - movq %rsi, (%rdi) - testb %al, %al - cmovneq %rcx, %rdx - movq %rdx, 8(%rdi) - cmovneq %r10, %rbx - movq %rbx, 16(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2 - - .globl mcl_fp_sqr_NIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fp_sqr_NIST_P192Lbmi2,@function -mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %r8, %rdx - mulxq %rsi, %r14, %rbx - movq %rbx, -16(%rsp) # 8-byte Spill - movq %rsi, %rdx - mulxq %rsi, %r13, %r15 - movq %rsi, %rdx - mulxq %rcx, %r12, %rsi - addq %rsi, %r13 - adcq %r14, %r15 - adcq $0, %rbx - movq %rcx, %rdx - mulxq %rcx, %r9, %rax - addq %r12, %rax - movq %r8, %rdx - mulxq %rcx, %rbp, %r11 - adcq %rbp, %rsi - movq %r11, %r10 - adcq $0, %r10 - addq %r12, %rax - adcq %r13, %rsi - adcq %r15, %r10 - adcq $0, %rbx - movq %r8, %rdx - mulxq %r8, %rcx, %rdi - addq %r14, %r11 - adcq -16(%rsp), %rcx # 8-byte Folded Reload - adcq $0, %rdi - addq %rbp, %rsi - adcq %r10, %r11 - adcq %rbx, %rcx - adcq $0, %rdi - addq %rdi, %rax - adcq $0, %rsi - sbbq %rdx, %rdx - andl $1, %edx - addq %r11, %r9 - adcq %rcx, %rax - adcq %rdi, %rsi - adcq $0, %rdx - addq %rdi, %r9 - adcq %r11, %rax - adcq %rcx, %rsi - adcq $0, %rdx - addq %rdx, %r9 - adcq %rax, %rdx - adcq $0, %rsi - sbbq %rax, %rax - andl $1, %eax - movq %r9, %rcx - addq $1, %rcx - movq %rdx, %rdi - adcq $1, %rdi - movq %rsi, %rbp - adcq $0, %rbp - adcq $-1, %rax - andl $1, %eax - cmovneq %r9, %rcx - movq -8(%rsp), %rbx # 8-byte Reload - movq %rcx, (%rbx) - testb %al, %al - cmovneq %rdx, %rdi - movq %rdi, 8(%rbx) - cmovneq %rsi, %rbp - movq %rbp, 16(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2 - - .globl mcl_fp_mulNIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulNIST_P192Lbmi2,@function -mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - subq $56, %rsp - movq %rdi, %r14 - leaq 8(%rsp), %rdi - callq mcl_fpDbl_mulPre3Lbmi2@PLT - movq 24(%rsp), %r9 - movq 32(%rsp), %r8 - movq 48(%rsp), %rdi - movq 16(%rsp), %rbx - addq %rdi, %rbx - adcq $0, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - movq 40(%rsp), %rsi - movq 8(%rsp), %rdx - addq %r8, %rdx - adcq %rsi, %rbx - adcq %rdi, %r9 - adcq $0, %rcx - addq %rdi, %rdx - adcq %r8, %rbx - adcq %rsi, %r9 - adcq $0, %rcx - addq %rcx, %rdx - adcq %rbx, %rcx - adcq $0, %r9 - sbbq %rsi, %rsi - andl $1, %esi - movq %rdx, %rdi - addq $1, %rdi - movq %rcx, %rbx - adcq $1, %rbx - movq %r9, %rax - adcq $0, %rax - adcq $-1, %rsi - andl $1, %esi - cmovneq %rdx, %rdi - movq %rdi, (%r14) - testb %sil, %sil - cmovneq %rcx, %rbx - movq %rbx, 8(%r14) - cmovneq %r9, %rax - movq %rax, 16(%r14) - addq $56, %rsp - popq %rbx - popq %r14 - retq -.Lfunc_end3: - .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2 - - .globl mcl_fpDbl_mod_NIST_P521Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function -mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 120(%rsi), %r9 - movq 128(%rsi), %r14 - movq %r14, %r8 - shldq $55, %r9, %r8 - movq 112(%rsi), %r10 - shldq $55, %r10, %r9 - movq 104(%rsi), %r11 - shldq $55, %r11, %r10 - movq 96(%rsi), %r15 - shldq $55, %r15, %r11 - movq 88(%rsi), %r12 - shldq $55, %r12, %r15 - movq 80(%rsi), %rcx - shldq $55, %rcx, %r12 - movq 64(%rsi), %rbx - movq 72(%rsi), %rax - shldq $55, %rax, %rcx - shrq $9, %r14 - shldq $55, %rbx, %rax - andl $511, %ebx # imm = 0x1FF - addq (%rsi), %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r10 - adcq 48(%rsi), %r9 - adcq 56(%rsi), %r8 - adcq %r14, %rbx - movq %rbx, %rsi - shrq $9, %rsi - andl $1, %esi - addq %rax, %rsi - adcq $0, %rcx - adcq $0, %r12 - adcq $0, %r15 - adcq $0, %r11 - adcq $0, %r10 - adcq $0, %r9 - adcq $0, %r8 - adcq $0, %rbx - movq %rsi, %rax - andq %r12, %rax - andq %r15, %rax - andq %r11, %rax - andq %r10, %rax - andq %r9, %rax - andq %r8, %rax - movq %rbx, %rdx - orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00 - andq %rax, %rdx - andq %rcx, %rdx - cmpq $-1, %rdx - je .LBB4_1 -# BB#3: # %nonzero - movq %rsi, (%rdi) - movq %rcx, 8(%rdi) - movq %r12, 16(%rdi) - movq %r15, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) - andl $511, %ebx # imm = 0x1FF - movq %rbx, 64(%rdi) - jmp .LBB4_2 -.LBB4_1: # %zero - movq $0, 64(%rdi) - movq $0, 56(%rdi) - movq $0, 48(%rdi) - movq $0, 40(%rdi) - movq $0, 32(%rdi) - movq $0, 24(%rdi) - movq $0, 16(%rdi) - movq $0, 8(%rdi) - movq $0, (%rdi) -.LBB4_2: # %zero - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2 - - .globl mcl_fp_mulUnitPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre1Lbmi2,@function -mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2 -# BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq -.Lfunc_end5: - .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2 - - .globl mcl_fpDbl_mulPre1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre1Lbmi2,@function -mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2 -# BB#0: - movq (%rdx), %rdx - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq -.Lfunc_end6: - .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2 - - .globl mcl_fpDbl_sqrPre1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre1Lbmi2,@function -mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2 -# BB#0: - movq (%rsi), %rdx - mulxq %rdx, %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2 - - .globl mcl_fp_mont1Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont1Lbmi2,@function -mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2 -# BB#0: - movq %rdx, %rax - movq (%rsi), %rdx - mulxq (%rax), %rsi, %r8 - movq -8(%rcx), %rdx - imulq %rsi, %rdx - movq (%rcx), %rcx - mulxq %rcx, %rdx, %rax - addq %rsi, %rdx - adcq %r8, %rax - sbbq %rdx, %rdx - andl $1, %edx - movq %rax, %rsi - subq %rcx, %rsi - sbbq $0, %rdx - testb $1, %dl - cmovneq %rax, %rsi - movq %rsi, (%rdi) - retq -.Lfunc_end8: - .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2 - - .globl mcl_fp_montNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF1Lbmi2,@function -mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2 -# BB#0: - movq %rdx, %rax - movq (%rsi), %rdx - mulxq (%rax), %rsi, %r8 - movq -8(%rcx), %rdx - imulq %rsi, %rdx - movq (%rcx), %rcx - mulxq %rcx, %rdx, %rax - addq %rsi, %rdx - adcq %r8, %rax - movq %rax, %rdx - subq %rcx, %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq -.Lfunc_end9: - .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2 - - .globl mcl_fp_montRed1Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed1Lbmi2,@function -mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2 -# BB#0: - movq (%rsi), %rcx - movq -8(%rdx), %rax - imulq %rcx, %rax - movq (%rdx), %r8 - movq %rax, %rdx - mulxq %r8, %rax, %rdx - addq %rcx, %rax - adcq 8(%rsi), %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rcx - subq %r8, %rcx - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rcx - movq %rcx, (%rdi) - retq -.Lfunc_end10: - .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2 - - .globl mcl_fp_addPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre1Lbmi2,@function -mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2 -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end11: - .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2 - - .globl mcl_fp_subPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre1Lbmi2,@function -mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2 -# BB#0: - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end12: - .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2 - - .globl mcl_fp_shr1_1Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_1Lbmi2,@function -mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2 -# BB#0: - movq (%rsi), %rax - shrq %rax - movq %rax, (%rdi) - retq -.Lfunc_end13: - .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2 - - .globl mcl_fp_add1Lbmi2 - .align 16, 0x90 - .type mcl_fp_add1Lbmi2,@function -mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2 -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rax - sbbq $0, %rdx - testb $1, %dl - jne .LBB14_2 -# BB#1: # %nocarry - movq %rax, (%rdi) -.LBB14_2: # %carry - retq -.Lfunc_end14: - .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2 - - .globl mcl_fp_addNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF1Lbmi2,@function -mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2 -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, %rdx - subq (%rcx), %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq -.Lfunc_end15: - .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2 - - .globl mcl_fp_sub1Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub1Lbmi2,@function -mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2 -# BB#0: - movq (%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rax - movq %rax, (%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB16_2 -# BB#1: # %nocarry - retq -.LBB16_2: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - retq -.Lfunc_end16: - .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2 - - .globl mcl_fp_subNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF1Lbmi2,@function -mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2 -# BB#0: - movq (%rsi), %rax - subq (%rdx), %rax - movq %rax, %rdx - sarq $63, %rdx - andq (%rcx), %rdx - addq %rax, %rdx - movq %rdx, (%rdi) - retq -.Lfunc_end17: - .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2 - - .globl mcl_fpDbl_add1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add1Lbmi2,@function -mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2 -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq (%rcx), %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, 8(%rdi) - retq -.Lfunc_end18: - .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2 - - .globl mcl_fpDbl_sub1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub1Lbmi2,@function -mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2 -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movl $0, %eax - sbbq $0, %rax - testb $1, %al - cmovneq (%rcx), %rsi - addq %r8, %rsi - movq %rsi, 8(%rdi) - retq -.Lfunc_end19: - .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2 - - .globl mcl_fp_mulUnitPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre2Lbmi2,@function -mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2 -# BB#0: - mulxq 8(%rsi), %rax, %rcx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %rax, %rsi - movq %rsi, 8(%rdi) - adcq $0, %rcx - movq %rcx, 16(%rdi) - retq -.Lfunc_end20: - .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2 - - .globl mcl_fpDbl_mulPre2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre2Lbmi2,@function -mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2 -# BB#0: - movq %rdx, %r10 - movq (%rsi), %r11 - movq 8(%rsi), %r8 - movq (%r10), %rsi - movq %r11, %rdx - mulxq %rsi, %rdx, %r9 - movq %rdx, (%rdi) - movq %r8, %rdx - mulxq %rsi, %rsi, %rax - addq %r9, %rsi - adcq $0, %rax - movq 8(%r10), %rcx - movq %r11, %rdx - mulxq %rcx, %rdx, %r9 - addq %rsi, %rdx - movq %rdx, 8(%rdi) - movq %r8, %rdx - mulxq %rcx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %rcx, %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end21: - .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2 - - .globl mcl_fpDbl_sqrPre2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre2Lbmi2,@function -mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2 -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - movq %rax, %rdx - mulxq %rax, %rdx, %rsi - movq %rdx, (%rdi) - movq %rcx, %rdx - mulxq %rax, %rdx, %r8 - addq %rdx, %rsi - movq %r8, %rax - adcq $0, %rax - addq %rdx, %rsi - movq %rsi, 8(%rdi) - movq %rcx, %rdx - mulxq %rcx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r8, %rdx - movq %rdx, 16(%rdi) - adcq %rcx, %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2 - - .globl mcl_fp_mont2Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont2Lbmi2,@function -mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%rdx), %rax - movq 8(%rdx), %r11 - movq %r9, %rdx - mulxq %rax, %r10, %r13 - movq %r8, %rdx - mulxq %rax, %r14, %rsi - addq %r10, %rsi - adcq $0, %r13 - movq -8(%rcx), %rbp - movq (%rcx), %r10 - movq %r14, %rdx - imulq %rbp, %rdx - movq 8(%rcx), %r15 - mulxq %r15, %r12, %rcx - mulxq %r10, %rdx, %rbx - addq %r12, %rbx - adcq $0, %rcx - addq %r14, %rdx - adcq %rsi, %rbx - adcq %r13, %rcx - sbbq %rsi, %rsi - andl $1, %esi - movq %r11, %rdx - mulxq %r9, %r9, %r14 - movq %r11, %rdx - mulxq %r8, %r8, %rax - addq %r9, %rax - adcq $0, %r14 - addq %rbx, %r8 - adcq %rcx, %rax - adcq %rsi, %r14 - sbbq %rsi, %rsi - andl $1, %esi - imulq %r8, %rbp - movq %rbp, %rdx - mulxq %r15, %rcx, %rbx - movq %rbp, %rdx - mulxq %r10, %rdx, %rbp - addq %rcx, %rbp - adcq $0, %rbx - addq %r8, %rdx - adcq %rax, %rbp - adcq %r14, %rbx - adcq $0, %rsi - movq %rbp, %rax - subq %r10, %rax - movq %rbx, %rcx - sbbq %r15, %rcx - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rcx - testb %sil, %sil - cmovneq %rbp, %rax - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end23: - .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2 - - .globl mcl_fp_montNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF2Lbmi2,@function -mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%rdx), %rax - movq 8(%rdx), %r11 - movq %r9, %rdx - mulxq %rax, %r10, %rsi - movq %r8, %rdx - mulxq %rax, %r15, %r13 - addq %r10, %r13 - adcq $0, %rsi - movq -8(%rcx), %rbp - movq (%rcx), %r10 - movq %r15, %rdx - imulq %rbp, %rdx - movq 8(%rcx), %r14 - mulxq %r10, %rcx, %r12 - addq %r15, %rcx - mulxq %r14, %rbx, %rcx - adcq %r13, %rbx - adcq $0, %rsi - addq %r12, %rbx - adcq %rcx, %rsi - movq %r11, %rdx - mulxq %r9, %r9, %rcx - movq %r11, %rdx - mulxq %r8, %r8, %rax - addq %r9, %rax - adcq $0, %rcx - addq %rbx, %r8 - adcq %rsi, %rax - adcq $0, %rcx - imulq %r8, %rbp - movq %rbp, %rdx - mulxq %r14, %rbx, %rsi - movq %rbp, %rdx - mulxq %r10, %rbp, %rdx - addq %r8, %rbp - adcq %rax, %rbx - adcq $0, %rcx - addq %rdx, %rbx - adcq %rsi, %rcx - movq %rbx, %rax - subq %r10, %rax - movq %rcx, %rdx - sbbq %r14, %rdx - cmovsq %rbx, %rax - movq %rax, (%rdi) - cmovsq %rcx, %rdx - movq %rdx, 8(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end24: - .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2 - - .globl mcl_fp_montRed2Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed2Lbmi2,@function -mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq -8(%rdx), %r15 - movq (%rdx), %r8 - movq (%rsi), %r10 - movq %r10, %rcx - imulq %r15, %rcx - movq 8(%rdx), %r9 - movq %rcx, %rdx - mulxq %r9, %r11, %r14 - movq %rcx, %rdx - mulxq %r8, %rcx, %rax - addq %r11, %rax - adcq $0, %r14 - movq 24(%rsi), %r11 - addq %r10, %rcx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r14 - adcq $0, %r11 - sbbq %rcx, %rcx - andl $1, %ecx - imulq %rax, %r15 - movq %r15, %rdx - mulxq %r9, %r10, %rbx - movq %r15, %rdx - mulxq %r8, %rsi, %rdx - addq %r10, %rdx - adcq $0, %rbx - addq %rax, %rsi - adcq %r14, %rdx - adcq %r11, %rbx - adcq $0, %rcx - movq %rdx, %rax - subq %r8, %rax - movq %rbx, %rsi - sbbq %r9, %rsi - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rbx, %rsi - testb %cl, %cl - cmovneq %rdx, %rax - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end25: - .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2 - - .globl mcl_fp_addPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre2Lbmi2,@function -mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2 -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rcx - addq (%rsi), %rax - adcq 8(%rsi), %rcx - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end26: - .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2 - - .globl mcl_fp_subPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre2Lbmi2,@function -mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2 -# BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end27: - .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2 - - .globl mcl_fp_shr1_2Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_2Lbmi2,@function -mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2 -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - shrdq $1, %rcx, %rax - movq %rax, (%rdi) - shrq %rcx - movq %rcx, 8(%rdi) - retq -.Lfunc_end28: - .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2 - - .globl mcl_fp_add2Lbmi2 - .align 16, 0x90 - .type mcl_fp_add2Lbmi2,@function -mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2 -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq $0, %rsi - testb $1, %sil - jne .LBB29_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) -.LBB29_2: # %carry - retq -.Lfunc_end29: - .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2 - - .globl mcl_fp_addNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF2Lbmi2,@function -mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2 -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %r8 - addq (%rsi), %rax - adcq 8(%rsi), %r8 - movq %rax, %rsi - subq (%rcx), %rsi - movq %r8, %rdx - sbbq 8(%rcx), %rdx - testq %rdx, %rdx - cmovsq %rax, %rsi - movq %rsi, (%rdi) - cmovsq %r8, %rdx - movq %rdx, 8(%rdi) - retq -.Lfunc_end30: - .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2 - - .globl mcl_fp_sub2Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub2Lbmi2,@function -mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2 -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movq %r8, 8(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB31_2 -# BB#1: # %nocarry - retq -.LBB31_2: # %carry - movq 8(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r8, %rdx - movq %rdx, 8(%rdi) - retq -.Lfunc_end31: - .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2 - - .globl mcl_fp_subNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF2Lbmi2,@function -mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2 -# BB#0: - movq (%rsi), %r8 - movq 8(%rsi), %rsi - subq (%rdx), %r8 - sbbq 8(%rdx), %rsi - movq %rsi, %rdx - sarq $63, %rdx - movq 8(%rcx), %rax - andq %rdx, %rax - andq (%rcx), %rdx - addq %r8, %rdx - movq %rdx, (%rdi) - adcq %rsi, %rax - movq %rax, 8(%rdi) - retq -.Lfunc_end32: - .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2 - - .globl mcl_fpDbl_add2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add2Lbmi2,@function -mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2 -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - adcq %r8, %r9 - sbbq %rax, %rax - andl $1, %eax - movq %r10, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - sbbq $0, %rax - andl $1, %eax - cmovneq %r10, %rdx - movq %rdx, 16(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 24(%rdi) - retq -.Lfunc_end33: - .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2 - - .globl mcl_fpDbl_sub2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub2Lbmi2,@function -mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2 -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %r11 - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %r11, (%rdi) - movq %rsi, 8(%rdi) - sbbq %r8, %r9 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - cmovneq 8(%rcx), %rax - addq %r10, %rsi - movq %rsi, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end34: - .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2 - - .globl mcl_fp_mulUnitPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre3Lbmi2,@function -mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2 -# BB#0: - mulxq 16(%rsi), %r8, %rcx - mulxq 8(%rsi), %r9, %rax - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r9, %rsi - movq %rsi, 8(%rdi) - adcq %r8, %rax - movq %rax, 16(%rdi) - adcq $0, %rcx - movq %rcx, 24(%rdi) - retq -.Lfunc_end35: - .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2 - - .globl mcl_fpDbl_mulPre3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre3Lbmi2,@function -mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq %rdx, %r9 - movq (%rsi), %r10 - movq 8(%rsi), %r8 - movq (%r9), %rax - movq %r10, %rdx - mulxq %rax, %rdx, %r14 - movq 16(%rsi), %r11 - movq %rdx, (%rdi) - movq %r11, %rdx - mulxq %rax, %rsi, %rbx - movq %r8, %rdx - mulxq %rax, %rax, %rcx - addq %r14, %rax - adcq %rsi, %rcx - adcq $0, %rbx - movq 8(%r9), %rsi - movq %r10, %rdx - mulxq %rsi, %rdx, %r14 - addq %rax, %rdx - movq %rdx, 8(%rdi) - movq %r11, %rdx - mulxq %rsi, %rax, %r15 - movq %r8, %rdx - mulxq %rsi, %rsi, %rdx - adcq %rcx, %rsi - adcq %rbx, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r14, %rsi - adcq %rdx, %rax - adcq %r15, %rcx - movq 16(%r9), %rbx - movq %r10, %rdx - mulxq %rbx, %rdx, %r9 - addq %rsi, %rdx - movq %rdx, 16(%rdi) - movq %r11, %rdx - mulxq %rbx, %rsi, %r10 - movq %r8, %rdx - mulxq %rbx, %rbx, %rdx - adcq %rax, %rbx - adcq %rcx, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %r9, %rbx - movq %rbx, 24(%rdi) - adcq %rdx, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end36: - .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2 - - .globl mcl_fpDbl_sqrPre3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre3Lbmi2,@function -mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %rcx, %rdx - mulxq %rcx, %rdx, %rax - movq %rdx, (%rdi) - movq %r10, %rdx - mulxq %rcx, %r11, %r8 - movq %rsi, %rdx - mulxq %rcx, %rdx, %r14 - addq %rdx, %rax - movq %r14, %rbx - adcq %r11, %rbx - movq %r8, %rcx - adcq $0, %rcx - addq %rdx, %rax - movq %rax, 8(%rdi) - movq %r10, %rdx - mulxq %rsi, %rax, %r9 - movq %rsi, %rdx - mulxq %rsi, %rsi, %rdx - adcq %rbx, %rsi - adcq %rax, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r14, %rsi - adcq %rdx, %rcx - adcq %r9, %rbx - addq %r11, %rsi - movq %rsi, 16(%rdi) - movq %r10, %rdx - mulxq %r10, %rsi, %rdx - adcq %rax, %rcx - adcq %rbx, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %r8, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %rdx, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2 - - .globl mcl_fp_mont3Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont3Lbmi2,@function -mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r15 - movq %r15, -32(%rsp) # 8-byte Spill - movq %rdi, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %rdi - movq %rdi, -56(%rsp) # 8-byte Spill - movq (%r15), %rax - movq %rdi, %rdx - mulxq %rax, %r14, %r11 - movq (%rsi), %r12 - movq %r12, -48(%rsp) # 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - mulxq %rax, %rbx, %r8 - movq %r12, %rdx - mulxq %rax, %r9, %rdi - addq %rbx, %rdi - adcq %r14, %r8 - adcq $0, %r11 - movq -8(%rcx), %r13 - movq (%rcx), %rbx - movq %rbx, -8(%rsp) # 8-byte Spill - movq %r9, %rdx - imulq %r13, %rdx - movq 8(%rcx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulxq %rax, %rax, %r10 - mulxq %rbx, %rsi, %rbx - addq %rax, %rbx - movq 16(%rcx), %rbp - mulxq %rbp, %rcx, %rax - movq %rbp, %r14 - adcq %r10, %rcx - adcq $0, %rax - addq %r9, %rsi - adcq %rdi, %rbx - movq 8(%r15), %rdx - adcq %r8, %rcx - adcq %r11, %rax - sbbq %r9, %r9 - andl $1, %r9d - movq -56(%rsp), %r15 # 8-byte Reload - mulxq %r15, %r11, %rdi - mulxq -16(%rsp), %r10, %rsi # 8-byte Folded Reload - mulxq %r12, %r8, %rbp - addq %r10, %rbp - adcq %r11, %rsi - adcq $0, %rdi - addq %rbx, %r8 - adcq %rcx, %rbp - adcq %rax, %rsi - adcq %r9, %rdi - sbbq %r11, %r11 - andl $1, %r11d - movq %r8, %rdx - imulq %r13, %rdx - mulxq %r14, %r9, %rcx - movq %r14, %r12 - movq -40(%rsp), %r14 # 8-byte Reload - mulxq %r14, %r10, %rax - mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload - addq %r10, %rbx - adcq %r9, %rax - adcq $0, %rcx - addq %r8, %rdx - adcq %rbp, %rbx - adcq %rsi, %rax - adcq %rdi, %rcx - adcq $0, %r11 - movq -32(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdx - mulxq %r15, %r9, %rsi - mulxq -16(%rsp), %r10, %r15 # 8-byte Folded Reload - mulxq -48(%rsp), %r8, %rdi # 8-byte Folded Reload - addq %r10, %rdi - adcq %r9, %r15 - adcq $0, %rsi - addq %rbx, %r8 - adcq %rax, %rdi - adcq %rcx, %r15 - adcq %r11, %rsi - sbbq %rbx, %rbx - andl $1, %ebx - imulq %r8, %r13 - movq %r13, %rdx - mulxq %r12, %r9, %rbp - movq %r13, %rdx - mulxq %r14, %r10, %rax - movq %r13, %rdx - movq -8(%rsp), %rcx # 8-byte Reload - mulxq %rcx, %r11, %rdx - addq %r10, %rdx - adcq %r9, %rax - adcq $0, %rbp - addq %r8, %r11 - adcq %rdi, %rdx - adcq %r15, %rax - adcq %rsi, %rbp - adcq $0, %rbx - movq %rdx, %rsi - subq %rcx, %rsi - movq %rax, %rdi - sbbq %r14, %rdi - movq %rbp, %rcx - sbbq %r12, %rcx - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rbp, %rcx - testb %bl, %bl - cmovneq %rdx, %rsi - movq -24(%rsp), %rdx # 8-byte Reload - movq %rsi, (%rdx) - cmovneq %rax, %rdi - movq %rdi, 8(%rdx) - movq %rcx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end38: - .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2 - - .globl mcl_fp_montNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF3Lbmi2,@function -mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdx, %r10 - movq %r10, -16(%rsp) # 8-byte Spill - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -32(%rsp) # 8-byte Spill - movq (%r10), %rax - movq %rdi, %rdx - mulxq %rax, %rbx, %r14 - movq %rcx, %rdx - mulxq %rax, %r15, %r12 - movq 16(%rsi), %r11 - addq %rbx, %r12 - movq %r11, %rdx - mulxq %rax, %rsi, %rbx - adcq %r14, %rsi - adcq $0, %rbx - movq -8(%r8), %r9 - movq (%r8), %r14 - movq %r15, %rdx - imulq %r9, %rdx - mulxq %r14, %rbp, %r13 - addq %r15, %rbp - movq 8(%r8), %r15 - mulxq %r15, %rdi, %rbp - adcq %r12, %rdi - movq 16(%r8), %r12 - mulxq %r12, %rax, %r8 - adcq %rsi, %rax - adcq $0, %rbx - addq %r13, %rdi - movq 8(%r10), %rdx - adcq %rbp, %rax - adcq %r8, %rbx - movq -32(%rsp), %r10 # 8-byte Reload - mulxq %r10, %rsi, %r8 - mulxq %rcx, %r13, %rbp - addq %rsi, %rbp - mulxq %r11, %rcx, %rsi - adcq %r8, %rcx - adcq $0, %rsi - addq %rdi, %r13 - adcq %rax, %rbp - adcq %rbx, %rcx - adcq $0, %rsi - movq %r13, %rdx - imulq %r9, %rdx - mulxq %r14, %rdi, %rbx - addq %r13, %rdi - mulxq %r15, %rax, %rdi - adcq %rbp, %rax - mulxq %r12, %rbp, %rdx - adcq %rcx, %rbp - adcq $0, %rsi - addq %rbx, %rax - adcq %rdi, %rbp - adcq %rdx, %rsi - movq -16(%rsp), %rcx # 8-byte Reload - movq 16(%rcx), %rdx - mulxq %r10, %rbx, %r8 - mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload - addq %rbx, %rdi - mulxq %r11, %rcx, %rbx - adcq %r8, %rcx - adcq $0, %rbx - addq %rax, %r10 - adcq %rbp, %rdi - adcq %rsi, %rcx - adcq $0, %rbx - imulq %r10, %r9 - movq %r9, %rdx - mulxq %r14, %rdx, %r8 - addq %r10, %rdx - movq %r9, %rdx - mulxq %r12, %rbp, %rsi - movq %r9, %rdx - mulxq %r15, %rax, %rdx - adcq %rdi, %rax - adcq %rcx, %rbp - adcq $0, %rbx - addq %r8, %rax - adcq %rdx, %rbp - adcq %rsi, %rbx - movq %rax, %rcx - subq %r14, %rcx - movq %rbp, %rdx - sbbq %r15, %rdx - movq %rbx, %rsi - sbbq %r12, %rsi - movq %rsi, %rdi - sarq $63, %rdi - cmovsq %rax, %rcx - movq -8(%rsp), %rax # 8-byte Reload - movq %rcx, (%rax) - cmovsq %rbp, %rdx - movq %rdx, 8(%rax) - cmovsq %rbx, %rsi - movq %rsi, 16(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end39: - .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2 - - .globl mcl_fp_montRed3Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed3Lbmi2,@function -mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) # 8-byte Spill - movq -8(%rcx), %r15 - movq (%rcx), %r9 - movq (%rsi), %rbx - movq %rbx, %rdx - imulq %r15, %rdx - movq 16(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - mulxq %rax, %r14, %r11 - movq %rax, %rbp - movq 8(%rcx), %r10 - mulxq %r10, %rax, %r13 - mulxq %r9, %rdx, %rcx - addq %rax, %rcx - adcq %r14, %r13 - adcq $0, %r11 - movq 40(%rsi), %r14 - movq 32(%rsi), %r12 - addq %rbx, %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r13 - adcq 24(%rsi), %r11 - adcq $0, %r12 - adcq $0, %r14 - sbbq %rsi, %rsi - andl $1, %esi - movq %rcx, %rdx - imulq %r15, %rdx - mulxq %rbp, %rbp, %rdi - mulxq %r10, %r8, %rbx - mulxq %r9, %rdx, %rax - addq %r8, %rax - adcq %rbp, %rbx - adcq $0, %rdi - addq %rcx, %rdx - adcq %r13, %rax - adcq %r11, %rbx - adcq %r12, %rdi - adcq $0, %r14 - adcq $0, %rsi - imulq %rax, %r15 - movq %r15, %rdx - movq -16(%rsp), %r13 # 8-byte Reload - mulxq %r13, %r8, %rcx - movq %r15, %rdx - mulxq %r10, %r11, %r12 - movq %r15, %rdx - mulxq %r9, %r15, %rdx - addq %r11, %rdx - adcq %r8, %r12 - adcq $0, %rcx - addq %rax, %r15 - adcq %rbx, %rdx - adcq %rdi, %r12 - adcq %r14, %rcx - adcq $0, %rsi - movq %rdx, %rax - subq %r9, %rax - movq %r12, %rdi - sbbq %r10, %rdi - movq %rcx, %rbp - sbbq %r13, %rbp - sbbq $0, %rsi - andl $1, %esi - cmovneq %rcx, %rbp - testb %sil, %sil - cmovneq %rdx, %rax - movq -8(%rsp), %rcx # 8-byte Reload - movq %rax, (%rcx) - cmovneq %r12, %rdi - movq %rdi, 8(%rcx) - movq %rbp, 16(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end40: - .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2 - - .globl mcl_fp_addPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre3Lbmi2,@function -mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2 -# BB#0: - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end41: - .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2 - - .globl mcl_fp_subPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre3Lbmi2,@function -mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2 -# BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r8 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end42: - .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2 - - .globl mcl_fp_shr1_3Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_3Lbmi2,@function -mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2 -# BB#0: - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rdx - shrdq $1, %rdx, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rdx - movq %rdx, 8(%rdi) - shrq %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end43: - .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2 - - .globl mcl_fp_add3Lbmi2 - .align 16, 0x90 - .type mcl_fp_add3Lbmi2,@function -mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2 -# BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r8 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB44_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) -.LBB44_2: # %carry - retq -.Lfunc_end44: - .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2 - - .globl mcl_fp_addNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF3Lbmi2,@function -mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2 -# BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %r10 - movq 8(%rdx), %r9 - addq (%rsi), %r10 - adcq 8(%rsi), %r9 - adcq 16(%rsi), %r8 - movq %r10, %rsi - subq (%rcx), %rsi - movq %r9, %rdx - sbbq 8(%rcx), %rdx - movq %r8, %rax - sbbq 16(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r10, %rsi - movq %rsi, (%rdi) - cmovsq %r9, %rdx - movq %rdx, 8(%rdi) - cmovsq %r8, %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end45: - .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2 - - .globl mcl_fp_sub3Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub3Lbmi2,@function -mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2 -# BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r8 - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB46_2 -# BB#1: # %nocarry - retq -.LBB46_2: # %carry - movq 8(%rcx), %rdx - movq 16(%rcx), %rsi - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r8, %rsi - movq %rsi, 16(%rdi) - retq -.Lfunc_end46: - .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2 - - .globl mcl_fp_subNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF3Lbmi2,@function -mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2 -# BB#0: - movq 16(%rsi), %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - subq (%rdx), %r8 - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r10 - movq %r10, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r10, %rsi - andq (%rcx), %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - andq 8(%rcx), %rdx - addq %r8, %rsi - movq %rsi, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end47: - .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2 - - .globl mcl_fpDbl_add3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add3Lbmi2,@function -mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r9 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %r15 - adcq %r11, %r9 - adcq %r10, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %r15, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - movq %r8, %rbx - sbbq 16(%rcx), %rbx - sbbq $0, %rax - andl $1, %eax - cmovneq %r15, %rdx - movq %rdx, 24(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 32(%rdi) - cmovneq %r8, %rbx - movq %rbx, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end48: - .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2 - - .globl mcl_fpDbl_sub3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub3Lbmi2,@function -mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rbx - sbbq 8(%rdx), %rax - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r14 - movq %rbx, (%rdi) - movq %rax, 8(%rdi) - movq %r14, 16(%rdi) - sbbq %r15, %r11 - sbbq %r12, %r9 - sbbq %r10, %r8 - movl $0, %eax - sbbq $0, %rax - andl $1, %eax - movq (%rcx), %rdx - cmoveq %rsi, %rdx - testb %al, %al - movq 16(%rcx), %rax - cmoveq %rsi, %rax - cmovneq 8(%rcx), %rsi - addq %r11, %rdx - movq %rdx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end49: - .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2 - - .globl mcl_fp_mulUnitPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre4Lbmi2,@function -mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2 -# BB#0: - mulxq 24(%rsi), %r8, %r11 - mulxq 16(%rsi), %r9, %rax - mulxq 8(%rsi), %r10, %rcx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r10, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rcx - movq %rcx, 16(%rdi) - adcq %r8, %rax - movq %rax, 24(%rdi) - adcq $0, %r11 - movq %r11, 32(%rdi) - retq -.Lfunc_end50: - .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2 - - .globl mcl_fpDbl_mulPre4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre4Lbmi2,@function -mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r14 - movq 8(%rsi), %r10 - movq (%rdx), %rcx - movq %rdx, %rbp - movq %r14, %rdx - mulxq %rcx, %rdx, %r15 - movq 24(%rsi), %r11 - movq 16(%rsi), %r9 - movq %rdx, (%rdi) - movq %r10, %rdx - mulxq %rcx, %rbx, %r12 - addq %r15, %rbx - movq %r9, %rdx - mulxq %rcx, %r13, %r15 - adcq %r12, %r13 - movq %r11, %rdx - mulxq %rcx, %rcx, %r12 - adcq %r15, %rcx - adcq $0, %r12 - movq 8(%rbp), %rax - movq %r14, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - addq %rbx, %r8 - movq %r10, %rdx - mulxq %rax, %r15, %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - adcq %r13, %r15 - movq %r9, %rdx - mulxq %rax, %rbx, %r13 - adcq %rcx, %rbx - movq %r11, %rdx - mulxq %rax, %rcx, %rax - adcq %r12, %rcx - sbbq %r12, %r12 - andl $1, %r12d - addq -8(%rsp), %r15 # 8-byte Folded Reload - adcq -16(%rsp), %rbx # 8-byte Folded Reload - adcq %r13, %rcx - movq %r8, 8(%rdi) - adcq %rax, %r12 - movq %rbp, %r13 - movq 16(%r13), %rax - movq %r14, %rdx - mulxq %rax, %rdx, %r8 - addq %r15, %rdx - movq %rdx, 16(%rdi) - movq %r10, %rdx - mulxq %rax, %rbp, %r10 - adcq %rbx, %rbp - movq %r11, %rdx - mulxq %rax, %r14, %r11 - movq %r9, %rdx - mulxq %rax, %r15, %rdx - adcq %rcx, %r15 - adcq %r12, %r14 - sbbq %rcx, %rcx - andl $1, %ecx - addq %r8, %rbp - adcq %r10, %r15 - adcq %rdx, %r14 - adcq %r11, %rcx - movq 24(%r13), %rdx - mulxq 24(%rsi), %rbx, %r8 - mulxq (%rsi), %rax, %r9 - addq %rbp, %rax - mulxq 16(%rsi), %rbp, %r10 - mulxq 8(%rsi), %rsi, %rdx - movq %rax, 24(%rdi) - adcq %r15, %rsi - adcq %r14, %rbp - adcq %rcx, %rbx - sbbq %rax, %rax - andl $1, %eax - addq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %rdx, %rbp - movq %rbp, 40(%rdi) - adcq %r10, %rbx - movq %rbx, 48(%rdi) - adcq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end51: - .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2 - - .globl mcl_fpDbl_sqrPre4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre4Lbmi2,@function -mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rcx - movq 8(%rsi), %rax - movq %rcx, %rdx - mulxq %rcx, %rdx, %r11 - movq %rdx, (%rdi) - movq %r9, %rdx - mulxq %rcx, %rbp, %r10 - movq %rbp, -16(%rsp) # 8-byte Spill - movq %r10, -8(%rsp) # 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r12, %r15 - addq %r12, %r11 - movq %r15, %rbx - adcq %rbp, %rbx - movq %r8, %rdx - mulxq %rcx, %rcx, %r13 - adcq %r10, %rcx - adcq $0, %r13 - addq %r12, %r11 - movq %rax, %rdx - mulxq %rax, %rbp, %r12 - adcq %rbx, %rbp - movq %r8, %rdx - mulxq %rax, %r10, %rbx - movq %r9, %rdx - mulxq %rax, %r14, %rdx - adcq %r14, %rcx - adcq %r13, %r10 - sbbq %rax, %rax - andl $1, %eax - addq %r15, %rbp - adcq %r12, %rcx - adcq %rdx, %r10 - movq %rdx, %r12 - adcq %rbx, %rax - movq %r11, 8(%rdi) - addq -16(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rdi) - movq %r8, %rdx - mulxq %r9, %r11, %r8 - movq %r9, %rdx - mulxq %r9, %r15, %rdx - adcq %r14, %rcx - adcq %r10, %r15 - adcq %rax, %r11 - sbbq %rax, %rax - andl $1, %eax - addq -8(%rsp), %rcx # 8-byte Folded Reload - adcq %r12, %r15 - adcq %rdx, %r11 - adcq %r8, %rax - movq 24(%rsi), %rdx - mulxq 16(%rsi), %rbx, %r8 - mulxq 8(%rsi), %rbp, %r9 - mulxq (%rsi), %rsi, %r10 - addq %rcx, %rsi - movq %rsi, 24(%rdi) - adcq %r15, %rbp - adcq %r11, %rbx - mulxq %rdx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r10, %rbp - movq %rbp, 32(%rdi) - adcq %r9, %rbx - movq %rbx, 40(%rdi) - adcq %r8, %rdx - movq %rdx, 48(%rdi) - adcq %rcx, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2 - - .globl mcl_fp_mont4Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont4Lbmi2,@function -mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rdi, -88(%rsp) # 8-byte Spill - movq 24(%rsi), %rdi - movq %rdi, -40(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %r10, %r15 - movq 16(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rax, %rbx, %r11 - movq (%rsi), %rdi - movq %rdi, -56(%rsp) # 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rax, %rbp, %r14 - movq %rdi, %rdx - mulxq %rax, %r13, %r12 - addq %rbp, %r12 - adcq %rbx, %r14 - adcq %r10, %r11 - adcq $0, %r15 - movq -8(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq (%rcx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq %r13, %rdx - imulq %rax, %rdx - movq 24(%rcx), %rsi - movq %rsi, -72(%rsp) # 8-byte Spill - movq 16(%rcx), %rbp - movq %rbp, -8(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulxq %rsi, %r10, %r8 - mulxq %rbp, %r9, %rbx - mulxq %rax, %rsi, %rcx - mulxq %rdi, %rdx, %rbp - addq %rsi, %rbp - adcq %r9, %rcx - adcq %r10, %rbx - adcq $0, %r8 - addq %r13, %rdx - adcq %r12, %rbp - adcq %r14, %rcx - adcq %r11, %rbx - adcq %r15, %r8 - sbbq %rax, %rax - andl $1, %eax - movq -32(%rsp), %rdx # 8-byte Reload - movq 8(%rdx), %rdx - mulxq -40(%rsp), %r12, %r14 # 8-byte Folded Reload - mulxq -48(%rsp), %r15, %r11 # 8-byte Folded Reload - mulxq -64(%rsp), %r9, %rdi # 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rsi # 8-byte Folded Reload - addq %r9, %rsi - adcq %r15, %rdi - adcq %r12, %r11 - adcq $0, %r14 - addq %rbp, %r10 - adcq %rcx, %rsi - adcq %rbx, %rdi - adcq %r8, %r11 - adcq %rax, %r14 - sbbq %rbx, %rbx - andl $1, %ebx - movq %r10, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq -72(%rsp), %r15, %r9 # 8-byte Folded Reload - mulxq -8(%rsp), %r12, %r8 # 8-byte Folded Reload - movq -80(%rsp), %r13 # 8-byte Reload - mulxq %r13, %rbp, %rcx - mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rbp, %rax - adcq %r12, %rcx - adcq %r15, %r8 - adcq $0, %r9 - addq %r10, %rdx - adcq %rsi, %rax - adcq %rdi, %rcx - adcq %r11, %r8 - adcq %r14, %r9 - adcq $0, %rbx - movq -32(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdx - mulxq -40(%rsp), %r15, %r11 # 8-byte Folded Reload - mulxq -48(%rsp), %r12, %r14 # 8-byte Folded Reload - mulxq -64(%rsp), %rsi, %rbp # 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload - addq %rsi, %rdi - adcq %r12, %rbp - adcq %r15, %r14 - adcq $0, %r11 - addq %rax, %r10 - adcq %rcx, %rdi - adcq %r8, %rbp - adcq %r9, %r14 - adcq %rbx, %r11 - sbbq %rbx, %rbx - movq %r10, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq %r13, %rcx, %rsi - mulxq -24(%rsp), %r8, %rax # 8-byte Folded Reload - addq %rcx, %rax - mulxq -8(%rsp), %rcx, %r15 # 8-byte Folded Reload - adcq %rsi, %rcx - movq -72(%rsp), %r13 # 8-byte Reload - mulxq %r13, %r9, %rsi - adcq %r15, %r9 - adcq $0, %rsi - andl $1, %ebx - addq %r10, %r8 - adcq %rdi, %rax - adcq %rbp, %rcx - adcq %r14, %r9 - adcq %r11, %rsi - adcq $0, %rbx - movq -32(%rsp), %rdx # 8-byte Reload - movq 24(%rdx), %rdx - mulxq -40(%rsp), %r11, %r8 # 8-byte Folded Reload - mulxq -48(%rsp), %r15, %rdi # 8-byte Folded Reload - mulxq -64(%rsp), %r12, %r14 # 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rbp # 8-byte Folded Reload - addq %r12, %rbp - adcq %r15, %r14 - adcq %r11, %rdi - adcq $0, %r8 - addq %rax, %r10 - adcq %rcx, %rbp - adcq %r9, %r14 - adcq %rsi, %rdi - adcq %rbx, %r8 - sbbq %rax, %rax - andl $1, %eax - movq -16(%rsp), %rdx # 8-byte Reload - imulq %r10, %rdx - mulxq %r13, %rcx, %rsi - movq %rcx, -16(%rsp) # 8-byte Spill - mulxq -8(%rsp), %r11, %rbx # 8-byte Folded Reload - mulxq -80(%rsp), %r15, %rcx # 8-byte Folded Reload - movq -24(%rsp), %r9 # 8-byte Reload - mulxq %r9, %r12, %r13 - addq %r15, %r13 - adcq %r11, %rcx - adcq -16(%rsp), %rbx # 8-byte Folded Reload - adcq $0, %rsi - addq %r10, %r12 - adcq %rbp, %r13 - adcq %r14, %rcx - adcq %rdi, %rbx - adcq %r8, %rsi - adcq $0, %rax - movq %r13, %rdi - subq %r9, %rdi - movq %rcx, %rbp - sbbq -80(%rsp), %rbp # 8-byte Folded Reload - movq %rbx, %r8 - sbbq -8(%rsp), %r8 # 8-byte Folded Reload - movq %rsi, %rdx - sbbq -72(%rsp), %rdx # 8-byte Folded Reload - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - testb %al, %al - cmovneq %r13, %rdi - movq -88(%rsp), %rax # 8-byte Reload - movq %rdi, (%rax) - cmovneq %rcx, %rbp - movq %rbp, 8(%rax) - cmovneq %rbx, %r8 - movq %r8, 16(%rax) - movq %rdx, 24(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end53: - .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2 - - .globl mcl_fp_montNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF4Lbmi2,@function -mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rdi, -80(%rsp) # 8-byte Spill - movq (%rsi), %rdi - movq %rdi, -64(%rsp) # 8-byte Spill - movq 8(%rsi), %rbp - movq %rbp, -72(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rdx, %r15 - movq %rbp, %rdx - mulxq %rax, %rbp, %r9 - movq %rdi, %rdx - mulxq %rax, %r12, %rbx - movq 16(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - addq %rbp, %rbx - mulxq %rax, %r14, %rbp - adcq %r9, %r14 - movq 24(%rsi), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - mulxq %rax, %r8, %rdi - adcq %rbp, %r8 - adcq $0, %rdi - movq -8(%rcx), %r13 - movq (%rcx), %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq %r12, %rdx - imulq %r13, %rdx - mulxq %rax, %rax, %r11 - addq %r12, %rax - movq 8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulxq %rax, %rbp, %r10 - adcq %rbx, %rbp - movq 16(%rcx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulxq %rax, %rsi, %rbx - adcq %r14, %rsi - movq 24(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - mulxq %rax, %rcx, %rdx - adcq %r8, %rcx - adcq $0, %rdi - addq %r11, %rbp - adcq %r10, %rsi - adcq %rbx, %rcx - adcq %rdx, %rdi - movq 8(%r15), %rdx - movq -72(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rbx, %r9 - movq -64(%rsp), %r15 # 8-byte Reload - mulxq %r15, %r10, %r11 - addq %rbx, %r11 - mulxq -48(%rsp), %rax, %r8 # 8-byte Folded Reload - adcq %r9, %rax - mulxq -16(%rsp), %r9, %rbx # 8-byte Folded Reload - adcq %r8, %r9 - adcq $0, %rbx - addq %rbp, %r10 - adcq %rsi, %r11 - adcq %rcx, %rax - adcq %rdi, %r9 - adcq $0, %rbx - movq %r10, %rdx - imulq %r13, %rdx - movq -56(%rsp), %r14 # 8-byte Reload - mulxq %r14, %rcx, %r8 - addq %r10, %rcx - mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload - adcq %r11, %r10 - mulxq -40(%rsp), %rcx, %rsi # 8-byte Folded Reload - adcq %rax, %rcx - mulxq -8(%rsp), %rax, %rdx # 8-byte Folded Reload - adcq %r9, %rax - adcq $0, %rbx - addq %r8, %r10 - adcq %rdi, %rcx - adcq %rsi, %rax - adcq %rdx, %rbx - movq -32(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdx - mulxq %r12, %rsi, %r8 - mulxq %r15, %r11, %rbp - addq %rsi, %rbp - movq -48(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rdi, %r9 - adcq %r8, %rdi - mulxq -16(%rsp), %r8, %rsi # 8-byte Folded Reload - adcq %r9, %r8 - adcq $0, %rsi - addq %r10, %r11 - adcq %rcx, %rbp - adcq %rax, %rdi - adcq %rbx, %r8 - adcq $0, %rsi - movq %r11, %rdx - imulq %r13, %rdx - mulxq %r14, %rax, %r10 - addq %r11, %rax - movq -24(%rsp), %r14 # 8-byte Reload - mulxq %r14, %r9, %rbx - adcq %rbp, %r9 - movq -40(%rsp), %r15 # 8-byte Reload - mulxq %r15, %rax, %rbp - adcq %rdi, %rax - mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload - adcq %r8, %rcx - adcq $0, %rsi - addq %r10, %r9 - adcq %rbx, %rax - adcq %rbp, %rcx - adcq %rdx, %rsi - movq -32(%rsp), %rdx # 8-byte Reload - movq 24(%rdx), %rdx - mulxq -72(%rsp), %rbx, %r8 # 8-byte Folded Reload - mulxq -64(%rsp), %r11, %rbp # 8-byte Folded Reload - addq %rbx, %rbp - mulxq %r12, %rdi, %r10 - adcq %r8, %rdi - mulxq -16(%rsp), %r8, %rbx # 8-byte Folded Reload - adcq %r10, %r8 - adcq $0, %rbx - addq %r9, %r11 - adcq %rax, %rbp - adcq %rcx, %rdi - adcq %rsi, %r8 - adcq $0, %rbx - imulq %r11, %r13 - movq %r13, %rdx - movq -56(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rcx, %r9 - addq %r11, %rcx - movq %r13, %rdx - mulxq %r14, %r11, %r10 - adcq %rbp, %r11 - movq %r13, %rdx - movq %r15, %rsi - mulxq %rsi, %rax, %rcx - adcq %rdi, %rax - movq %r13, %rdx - movq -8(%rsp), %rbp # 8-byte Reload - mulxq %rbp, %r15, %rdx - adcq %r8, %r15 - adcq $0, %rbx - addq %r9, %r11 - adcq %r10, %rax - adcq %rcx, %r15 - adcq %rdx, %rbx - movq %r11, %rcx - subq %r12, %rcx - movq %rax, %rdx - sbbq %r14, %rdx - movq %r15, %rdi - sbbq %rsi, %rdi - movq %rbx, %rsi - sbbq %rbp, %rsi - cmovsq %r11, %rcx - movq -80(%rsp), %rbp # 8-byte Reload - movq %rcx, (%rbp) - cmovsq %rax, %rdx - movq %rdx, 8(%rbp) - cmovsq %r15, %rdi - movq %rdi, 16(%rbp) - cmovsq %rbx, %rsi - movq %rsi, 24(%rbp) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end54: - .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2 - - .globl mcl_fp_montRed4Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed4Lbmi2,@function -mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -56(%rsp) # 8-byte Spill - movq -8(%rcx), %r13 - movq (%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq (%rsi), %r10 - movq %r10, %rdx - imulq %r13, %rdx - movq 24(%rcx), %rdi - movq %rdi, -48(%rsp) # 8-byte Spill - mulxq %rdi, %r9, %r15 - movq %rdi, %r14 - movq 16(%rcx), %rdi - movq %rdi, -8(%rsp) # 8-byte Spill - mulxq %rdi, %rdi, %rbx - movq 8(%rcx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - mulxq %rcx, %rcx, %r8 - mulxq %rax, %rdx, %rbp - addq %rcx, %rbp - adcq %rdi, %r8 - adcq %r9, %rbx - adcq $0, %r15 - movq 56(%rsi), %r11 - movq 48(%rsi), %rcx - addq %r10, %rdx - movq 40(%rsi), %r12 - adcq 8(%rsi), %rbp - adcq 16(%rsi), %r8 - adcq 24(%rsi), %rbx - adcq 32(%rsi), %r15 - adcq $0, %r12 - adcq $0, %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - adcq $0, %r11 - sbbq %rsi, %rsi - andl $1, %esi - movq %rbp, %rdx - imulq %r13, %rdx - mulxq %r14, %rax, %r9 - movq %rax, -40(%rsp) # 8-byte Spill - mulxq -8(%rsp), %r14, %rdi # 8-byte Folded Reload - mulxq -16(%rsp), %r10, %rcx # 8-byte Folded Reload - mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %r10, %rax - adcq %r14, %rcx - adcq -40(%rsp), %rdi # 8-byte Folded Reload - adcq $0, %r9 - addq %rbp, %rdx - adcq %r8, %rax - adcq %rbx, %rcx - adcq %r15, %rdi - adcq %r12, %r9 - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, %r11 - movq %r11, -40(%rsp) # 8-byte Spill - adcq $0, %rsi - movq %rax, %rdx - imulq %r13, %rdx - movq -48(%rsp), %r15 # 8-byte Reload - mulxq %r15, %rbp, %r8 - movq %rbp, -64(%rsp) # 8-byte Spill - movq -8(%rsp), %r11 # 8-byte Reload - mulxq %r11, %rbx, %r10 - movq %rbx, -72(%rsp) # 8-byte Spill - mulxq -16(%rsp), %r12, %rbp # 8-byte Folded Reload - movq -24(%rsp), %r14 # 8-byte Reload - mulxq %r14, %rdx, %rbx - addq %r12, %rbx - adcq -72(%rsp), %rbp # 8-byte Folded Reload - adcq -64(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r8 - addq %rax, %rdx - adcq %rcx, %rbx - adcq %rdi, %rbp - adcq %r9, %r10 - adcq -32(%rsp), %r8 # 8-byte Folded Reload - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, %rsi - imulq %rbx, %r13 - movq %r13, %rdx - mulxq %r15, %rax, %rdi - movq %rax, -32(%rsp) # 8-byte Spill - movq %r13, %rdx - mulxq %r11, %r9, %rax - movq %r13, %rdx - movq -16(%rsp), %r11 # 8-byte Reload - mulxq %r11, %r12, %rcx - movq %r13, %rdx - mulxq %r14, %r15, %r13 - addq %r12, %r13 - adcq %r9, %rcx - adcq -32(%rsp), %rax # 8-byte Folded Reload - adcq $0, %rdi - addq %rbx, %r15 - adcq %rbp, %r13 - adcq %r10, %rcx - adcq %r8, %rax - adcq -40(%rsp), %rdi # 8-byte Folded Reload - adcq $0, %rsi - movq %r13, %rdx - subq %r14, %rdx - movq %rcx, %rbp - sbbq %r11, %rbp - movq %rax, %r8 - sbbq -8(%rsp), %r8 # 8-byte Folded Reload - movq %rdi, %rbx - sbbq -48(%rsp), %rbx # 8-byte Folded Reload - sbbq $0, %rsi - andl $1, %esi - cmovneq %rdi, %rbx - testb %sil, %sil - cmovneq %r13, %rdx - movq -56(%rsp), %rsi # 8-byte Reload - movq %rdx, (%rsi) - cmovneq %rcx, %rbp - movq %rbp, 8(%rsi) - cmovneq %rax, %r8 - movq %r8, 16(%rsi) - movq %rbx, 24(%rsi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end55: - .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2 - - .globl mcl_fp_addPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre4Lbmi2,@function -mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2 -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end56: - .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2 - - .globl mcl_fp_subPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre4Lbmi2,@function -mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2 -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r8, %r9 - movq %r9, 24(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end57: - .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2 - - .globl mcl_fp_shr1_4Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_4Lbmi2,@function -mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2 -# BB#0: - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrq %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end58: - .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2 - - .globl mcl_fp_add4Lbmi2 - .align 16, 0x90 - .type mcl_fp_add4Lbmi2,@function -mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2 -# BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r9 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - adcq %r10, %r8 - movq %r8, 24(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r9 - sbbq 24(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB59_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - movq %r8, 24(%rdi) -.LBB59_2: # %carry - retq -.Lfunc_end59: - .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2 - - .globl mcl_fp_addNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF4Lbmi2,@function -mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2 -# BB#0: - pushq %rbx - movq 24(%rdx), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %r11 - movq 8(%rdx), %r10 - addq (%rsi), %r11 - adcq 8(%rsi), %r10 - adcq 16(%rsi), %r9 - adcq 24(%rsi), %r8 - movq %r11, %rsi - subq (%rcx), %rsi - movq %r10, %rdx - sbbq 8(%rcx), %rdx - movq %r9, %rax - sbbq 16(%rcx), %rax - movq %r8, %rbx - sbbq 24(%rcx), %rbx - testq %rbx, %rbx - cmovsq %r11, %rsi - movq %rsi, (%rdi) - cmovsq %r10, %rdx - movq %rdx, 8(%rdi) - cmovsq %r9, %rax - movq %rax, 16(%rdi) - cmovsq %r8, %rbx - movq %rbx, 24(%rdi) - popq %rbx - retq -.Lfunc_end60: - .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2 - - .globl mcl_fp_sub4Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub4Lbmi2,@function -mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2 -# BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r9 - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r9, 16(%rdi) - sbbq %r10, %r8 - movq %r8, 24(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB61_2 -# BB#1: # %nocarry - retq -.LBB61_2: # %carry - movq 24(%rcx), %r10 - movq 8(%rcx), %rsi - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r10 - movq %r10, 24(%rdi) - retq -.Lfunc_end61: - .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2 - - .globl mcl_fp_subNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF4Lbmi2,@function -mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2 -# BB#0: - pushq %rbx - movq 24(%rsi), %r11 - movq 16(%rsi), %r8 - movq (%rsi), %r9 - movq 8(%rsi), %r10 - subq (%rdx), %r9 - sbbq 8(%rdx), %r10 - sbbq 16(%rdx), %r8 - sbbq 24(%rdx), %r11 - movq %r11, %rdx - sarq $63, %rdx - movq 24(%rcx), %rsi - andq %rdx, %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - movq 8(%rcx), %rbx - andq %rdx, %rbx - andq (%rcx), %rdx - addq %r9, %rdx - movq %rdx, (%rdi) - adcq %r10, %rbx - movq %rbx, 8(%rdi) - adcq %r8, %rax - movq %rax, 16(%rdi) - adcq %r11, %rsi - movq %rsi, 24(%rdi) - popq %rbx - retq -.Lfunc_end62: - .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2 - - .globl mcl_fpDbl_add4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add4Lbmi2,@function -mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 48(%rsi), %r12 - movq 40(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rdx), %r15 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rbp - movq 32(%rsi), %rsi - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r15, %rbp - movq %rbp, 24(%rdi) - adcq %r14, %rsi - adcq %r11, %r13 - adcq %r10, %r12 - adcq %r9, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %rsi, %rdx - subq (%rcx), %rdx - movq %r13, %rbp - sbbq 8(%rcx), %rbp - movq %r12, %rbx - sbbq 16(%rcx), %rbx - movq %r8, %r9 - sbbq 24(%rcx), %r9 - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - movq %rdx, 32(%rdi) - testb %al, %al - cmovneq %r13, %rbp - movq %rbp, 40(%rdi) - cmovneq %r12, %rbx - movq %rbx, 48(%rdi) - cmovneq %r8, %r9 - movq %r9, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end63: - .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2 - - .globl mcl_fpDbl_sub4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub4Lbmi2,@function -mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 24(%rdx), %r11 - movq (%rsi), %rbx - xorl %eax, %eax - subq (%rdx), %rbx - movq %rbx, (%rdi) - movq 8(%rsi), %rbx - sbbq 8(%rdx), %rbx - movq %rbx, 8(%rdi) - movq 16(%rsi), %rbx - sbbq 16(%rdx), %rbx - movq %rbx, 16(%rdi) - movq 24(%rsi), %rbx - sbbq %r11, %rbx - movq 40(%rdx), %r11 - movq 32(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 32(%rsi), %r12 - sbbq %rdx, %r12 - movq 48(%rsi), %r14 - movq 40(%rsi), %r15 - sbbq %r11, %r15 - sbbq %r10, %r14 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 24(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 8(%rcx), %rax - addq %r12, %rsi - movq %rsi, 32(%rdi) - adcq %r15, %rax - movq %rax, 40(%rdi) - adcq %r14, %rdx - movq %rdx, 48(%rdi) - adcq %r8, %rbx - movq %rbx, 56(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end64: - .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2 - - .globl mcl_fp_mulUnitPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre5Lbmi2,@function -mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - mulxq 32(%rsi), %r8, %r11 - mulxq 24(%rsi), %r9, %rax - mulxq 16(%rsi), %r10, %rcx - mulxq 8(%rsi), %r14, %rbx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r14, %rsi - movq %rsi, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %rcx - movq %rcx, 24(%rdi) - adcq %r8, %rax - movq %rax, 32(%rdi) - adcq $0, %r11 - movq %r11, 40(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end65: - .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2 - - .globl mcl_fpDbl_mulPre5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre5Lbmi2,@function -mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %r11 - movq 8(%rsi), %r10 - movq (%rdx), %rcx - movq %r10, %rdx - mulxq %rcx, %rax, %r14 - movq %r11, %rdx - mulxq %rcx, %rdx, %rbx - movq %rdx, -24(%rsp) # 8-byte Spill - movq 24(%rsi), %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - movq 16(%rsi), %r15 - addq %rax, %rbx - movq %r15, %rdx - mulxq %rcx, %rax, %r13 - adcq %r14, %rax - movq %rbp, %rdx - mulxq %rcx, %r8, %r12 - adcq %r13, %r8 - movq 32(%rsi), %r14 - movq %r14, %rdx - mulxq %rcx, %r9, %r13 - adcq %r12, %r9 - movq -24(%rsp), %rcx # 8-byte Reload - movq %rcx, (%rdi) - adcq $0, %r13 - movq -48(%rsp), %rdi # 8-byte Reload - movq 8(%rdi), %rbp - movq %r11, %rdx - mulxq %rbp, %r12, %r11 - addq %rbx, %r12 - movq %r10, %rdx - mulxq %rbp, %rbx, %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - adcq %rax, %rbx - movq %r15, %rdx - mulxq %rbp, %rcx, %r10 - adcq %r8, %rcx - movq -16(%rsp), %rdx # 8-byte Reload - mulxq %rbp, %rax, %r8 - adcq %r9, %rax - movq %r14, %rdx - mulxq %rbp, %r15, %rdx - adcq %r13, %r15 - sbbq %r14, %r14 - andl $1, %r14d - addq %r11, %rbx - movq -8(%rsp), %rbp # 8-byte Reload - movq %r12, 8(%rbp) - adcq -24(%rsp), %rcx # 8-byte Folded Reload - adcq %r10, %rax - adcq %r8, %r15 - adcq %rdx, %r14 - movq (%rsi), %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -16(%rsp) # 8-byte Spill - movq 16(%rdi), %rbp - mulxq %rbp, %r12, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - addq %rbx, %r12 - movq %r8, %rdx - mulxq %rbp, %rbx, %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - adcq %rcx, %rbx - movq 16(%rsi), %r11 - movq %r11, %rdx - mulxq %rbp, %rcx, %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - adcq %rax, %rcx - movq 24(%rsi), %r13 - movq %r13, %rdx - mulxq %rbp, %r9, %r10 - adcq %r15, %r9 - movq 32(%rsi), %r15 - movq %r15, %rdx - mulxq %rbp, %r8, %rdx - adcq %r14, %r8 - sbbq %r14, %r14 - andl $1, %r14d - addq -32(%rsp), %rbx # 8-byte Folded Reload - adcq -40(%rsp), %rcx # 8-byte Folded Reload - adcq -56(%rsp), %r9 # 8-byte Folded Reload - adcq %r10, %r8 - adcq %rdx, %r14 - movq -8(%rsp), %r10 # 8-byte Reload - movq %r12, 16(%r10) - movq %rdi, %rbp - movq 24(%rbp), %rax - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r12, %rdi - addq %rbx, %r12 - movq -16(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rbx, %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - adcq %rcx, %rbx - movq %r11, %rdx - mulxq %rax, %rcx, %r11 - adcq %r9, %rcx - movq %r13, %rdx - mulxq %rax, %r13, %r9 - adcq %r8, %r13 - movq %r15, %rdx - mulxq %rax, %r8, %rdx - adcq %r14, %r8 - sbbq %r14, %r14 - andl $1, %r14d - addq %rdi, %rbx - movq %r12, 24(%r10) - movq %r10, %rdi - adcq -16(%rsp), %rcx # 8-byte Folded Reload - adcq %r11, %r13 - adcq %r9, %r8 - adcq %rdx, %r14 - movq 32(%rbp), %rdx - mulxq 8(%rsi), %rax, %r9 - mulxq (%rsi), %rbp, %r10 - addq %rbx, %rbp - adcq %rcx, %rax - mulxq 16(%rsi), %rbx, %r15 - adcq %r13, %rbx - mulxq 32(%rsi), %rcx, %r11 - mulxq 24(%rsi), %rsi, %rdx - movq %rbp, 32(%rdi) - adcq %r8, %rsi - adcq %r14, %rcx - sbbq %rbp, %rbp - andl $1, %ebp - addq %r10, %rax - movq %rax, 40(%rdi) - adcq %r9, %rbx - movq %rbx, 48(%rdi) - adcq %r15, %rsi - movq %rsi, 56(%rdi) - adcq %rdx, %rcx - movq %rcx, 64(%rdi) - adcq %r11, %rbp - movq %rbp, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end66: - .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2 - - .globl mcl_fpDbl_sqrPre5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre5Lbmi2,@function -mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rcx - movq %r11, %rdx - mulxq %rax, %rbx, %r15 - movq 32(%rsi), %r9 - movq %r9, -8(%rsp) # 8-byte Spill - movq 24(%rsi), %r13 - movq %rcx, %rdx - mulxq %rax, %r12, %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - movq %rax, %rdx - mulxq %rax, %rdx, %r14 - movq %rdx, -24(%rsp) # 8-byte Spill - addq %r12, %r14 - adcq %rbp, %rbx - movq %r13, %rdx - mulxq %rax, %r8, %r10 - adcq %r15, %r8 - movq %r9, %rdx - mulxq %rax, %rbp, %r15 - adcq %r10, %rbp - movq -24(%rsp), %rax # 8-byte Reload - movq %rax, (%rdi) - adcq $0, %r15 - addq %r12, %r14 - movq %rcx, %rdx - mulxq %rcx, %rax, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %rbx, %rax - movq %r11, %rdx - mulxq %rcx, %rbx, %r10 - adcq %r8, %rbx - movq %r13, %rdx - mulxq %rcx, %r13, %r8 - adcq %rbp, %r13 - movq %r9, %rdx - mulxq %rcx, %r12, %rcx - adcq %r15, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -16(%rsp), %rax # 8-byte Folded Reload - movq %r14, 8(%rdi) - adcq -24(%rsp), %rbx # 8-byte Folded Reload - adcq %r10, %r13 - adcq %r8, %r12 - adcq %rcx, %r15 - movq (%rsi), %r9 - movq 8(%rsi), %r10 - movq %r9, %rdx - mulxq %r11, %rbp, %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - addq %rax, %rbp - movq %r10, %rdx - mulxq %r11, %rax, %r8 - adcq %rbx, %rax - movq %r11, %rdx - mulxq %r11, %r14, %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - adcq %r13, %r14 - movq 24(%rsi), %rcx - movq %rcx, %rdx - mulxq %r11, %rbx, %r13 - adcq %r12, %rbx - movq -8(%rsp), %rdx # 8-byte Reload - mulxq %r11, %r12, %rdx - adcq %r15, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -16(%rsp), %rax # 8-byte Folded Reload - adcq %r8, %r14 - movq %rbp, 16(%rdi) - adcq -24(%rsp), %rbx # 8-byte Folded Reload - adcq %r13, %r12 - adcq %rdx, %r15 - movq %r10, %rdx - mulxq %rcx, %r10, %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - movq %r9, %rdx - mulxq %rcx, %r13, %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - addq %rax, %r13 - movq 16(%rsi), %r8 - movq 32(%rsi), %rax - adcq %r14, %r10 - movq %r8, %rdx - mulxq %rcx, %r9, %r14 - adcq %rbx, %r9 - movq %rcx, %rdx - mulxq %rcx, %r11, %rbp - adcq %r12, %r11 - movq %rax, %rdx - mulxq %rcx, %r12, %rdx - adcq %r15, %r12 - sbbq %rbx, %rbx - andl $1, %ebx - addq -16(%rsp), %r10 # 8-byte Folded Reload - movq %r13, 24(%rdi) - adcq -8(%rsp), %r9 # 8-byte Folded Reload - adcq %r14, %r11 - adcq %rbp, %r12 - adcq %rdx, %rbx - movq %rax, %rdx - mulxq 24(%rsi), %rbp, %r14 - movq %rax, %rdx - mulxq (%rsi), %rcx, %r15 - addq %r10, %rcx - movq %rax, %rdx - mulxq 8(%rsi), %rsi, %r10 - movq %rcx, 32(%rdi) - adcq %r9, %rsi - movq %r8, %rdx - mulxq %rax, %rcx, %r8 - adcq %r11, %rcx - adcq %r12, %rbp - movq %rax, %rdx - mulxq %rax, %rdx, %rax - adcq %rbx, %rdx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r15, %rsi - movq %rsi, 40(%rdi) - adcq %r10, %rcx - movq %rcx, 48(%rdi) - adcq %r8, %rbp - movq %rbp, 56(%rdi) - adcq %r14, %rdx - movq %rdx, 64(%rdi) - adcq %rax, %rbx - movq %rbx, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2 - - .globl mcl_fp_mont5Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont5Lbmi2,@function -mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rdi, -112(%rsp) # 8-byte Spill - movq 32(%rsi), %rdi - movq %rdi, -64(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %r10, %rbx - movq 24(%rsi), %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - mulxq %rax, %r12, %r14 - movq 16(%rsi), %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - mulxq %rax, %r13, %r11 - movq (%rsi), %rbp - movq %rbp, -88(%rsp) # 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -96(%rsp) # 8-byte Spill - mulxq %rax, %rdi, %r9 - movq %rbp, %rdx - mulxq %rax, %r15, %r8 - addq %rdi, %r8 - adcq %r13, %r9 - adcq %r12, %r11 - adcq %r10, %r14 - adcq $0, %rbx - movq %rbx, -104(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -48(%rsp) # 8-byte Spill - movq %r15, %rdx - imulq %rax, %rdx - movq (%rcx), %rsi - movq %rsi, -32(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - mulxq %rax, %rax, %r12 - movq %rax, -120(%rsp) # 8-byte Spill - movq 24(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - mulxq %rax, %r13, %r10 - movq 8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulxq %rax, %rdi, %rbp - mulxq %rsi, %rax, %rbx - addq %rdi, %rbx - movq 16(%rcx), %rcx - movq %rcx, -40(%rsp) # 8-byte Spill - mulxq %rcx, %rdi, %rcx - adcq %rbp, %rdi - adcq %r13, %rcx - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r12 - addq %r15, %rax - adcq %r8, %rbx - adcq %r9, %rdi - adcq %r11, %rcx - adcq %r14, %r10 - adcq -104(%rsp), %r12 # 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq -56(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - mulxq -64(%rsp), %rax, %r14 # 8-byte Folded Reload - movq %rax, -104(%rsp) # 8-byte Spill - mulxq -72(%rsp), %rax, %r15 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload - mulxq -96(%rsp), %r8, %rsi # 8-byte Folded Reload - mulxq -88(%rsp), %r11, %rax # 8-byte Folded Reload - addq %r8, %rax - adcq %r13, %rsi - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -104(%rsp), %r15 # 8-byte Folded Reload - adcq $0, %r14 - addq %rbx, %r11 - adcq %rdi, %rax - adcq %rcx, %rsi - adcq %r10, %r9 - adcq %r12, %r15 - adcq %rbp, %r14 - sbbq %r12, %r12 - andl $1, %r12d - movq %r11, %rdx - imulq -48(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rcx, %r10 # 8-byte Folded Reload - movq %rcx, -104(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rcx, %rdi # 8-byte Folded Reload - movq %rcx, -120(%rsp) # 8-byte Spill - mulxq -40(%rsp), %r13, %rcx # 8-byte Folded Reload - mulxq -24(%rsp), %r8, %rbx # 8-byte Folded Reload - mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload - addq %r8, %rbp - adcq %r13, %rbx - adcq -120(%rsp), %rcx # 8-byte Folded Reload - adcq -104(%rsp), %rdi # 8-byte Folded Reload - adcq $0, %r10 - addq %r11, %rdx - adcq %rax, %rbp - adcq %rsi, %rbx - adcq %r9, %rcx - adcq %r15, %rdi - adcq %r14, %r10 - adcq $0, %r12 - movq -56(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - mulxq -64(%rsp), %rax, %r15 # 8-byte Folded Reload - movq %rax, -104(%rsp) # 8-byte Spill - mulxq -72(%rsp), %rax, %r11 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload - mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload - mulxq -88(%rsp), %r14, %rax # 8-byte Folded Reload - addq %rsi, %rax - adcq %r13, %r8 - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r15 - addq %rbp, %r14 - adcq %rbx, %rax - adcq %rcx, %r8 - adcq %rdi, %r9 - adcq %r10, %r11 - adcq %r12, %r15 - sbbq %r13, %r13 - andl $1, %r13d - movq %r14, %rdx - imulq -48(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rcx, %r12 # 8-byte Folded Reload - movq %rcx, -104(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rcx, %r10 # 8-byte Folded Reload - movq %rcx, -120(%rsp) # 8-byte Spill - mulxq -40(%rsp), %rdi, %rsi # 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %rbx # 8-byte Folded Reload - mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload - addq %rcx, %rbp - adcq %rdi, %rbx - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r12 - addq %r14, %rdx - adcq %rax, %rbp - adcq %r8, %rbx - adcq %r9, %rsi - adcq %r11, %r10 - adcq %r15, %r12 - adcq $0, %r13 - movq -56(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - mulxq -64(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -120(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - mulxq -72(%rsp), %r11, %r14 # 8-byte Folded Reload - mulxq -80(%rsp), %r8, %r9 # 8-byte Folded Reload - mulxq -96(%rsp), %rax, %rdi # 8-byte Folded Reload - mulxq -88(%rsp), %r15, %rcx # 8-byte Folded Reload - addq %rax, %rcx - adcq %r8, %rdi - adcq %r11, %r9 - adcq -120(%rsp), %r14 # 8-byte Folded Reload - movq -104(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rbp, %r15 - adcq %rbx, %rcx - adcq %rsi, %rdi - adcq %r10, %r9 - adcq %r12, %r14 - adcq %r13, %rax - movq %rax, -104(%rsp) # 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq %r15, %rdx - imulq -48(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rax, %rbp # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -16(%rsp), %r13, %r10 # 8-byte Folded Reload - mulxq -40(%rsp), %rbx, %r8 # 8-byte Folded Reload - mulxq -24(%rsp), %rsi, %r11 # 8-byte Folded Reload - mulxq -32(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rsi, %rax - adcq %rbx, %r11 - adcq %r13, %r8 - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %rbp - addq %r15, %rdx - adcq %rcx, %rax - adcq %rdi, %r11 - adcq %r9, %r8 - adcq %r14, %r10 - adcq -104(%rsp), %rbp # 8-byte Folded Reload - adcq $0, %r12 - movq -56(%rsp), %rcx # 8-byte Reload - movq 32(%rcx), %rdx - mulxq -64(%rsp), %rcx, %r14 # 8-byte Folded Reload - movq %rcx, -56(%rsp) # 8-byte Spill - mulxq -72(%rsp), %rcx, %rbx # 8-byte Folded Reload - movq %rcx, -64(%rsp) # 8-byte Spill - mulxq -80(%rsp), %rsi, %r15 # 8-byte Folded Reload - mulxq -96(%rsp), %rcx, %r9 # 8-byte Folded Reload - mulxq -88(%rsp), %r13, %rdi # 8-byte Folded Reload - addq %rcx, %rdi - adcq %rsi, %r9 - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq -56(%rsp), %rbx # 8-byte Folded Reload - adcq $0, %r14 - addq %rax, %r13 - adcq %r11, %rdi - adcq %r8, %r9 - adcq %r10, %r15 - adcq %rbp, %rbx - adcq %r12, %r14 - sbbq %rax, %rax - movq -48(%rsp), %rdx # 8-byte Reload - imulq %r13, %rdx - mulxq -32(%rsp), %r10, %rcx # 8-byte Folded Reload - mulxq -24(%rsp), %r8, %rsi # 8-byte Folded Reload - addq %rcx, %r8 - mulxq -40(%rsp), %rbp, %r11 # 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -16(%rsp), %rcx, %r12 # 8-byte Folded Reload - adcq %r11, %rcx - mulxq -8(%rsp), %rsi, %r11 # 8-byte Folded Reload - adcq %r12, %rsi - adcq $0, %r11 - andl $1, %eax - addq %r13, %r10 - adcq %rdi, %r8 - adcq %r9, %rbp - adcq %r15, %rcx - adcq %rbx, %rsi - adcq %r14, %r11 - adcq $0, %rax - movq %r8, %rdi - subq -32(%rsp), %rdi # 8-byte Folded Reload - movq %rbp, %rbx - sbbq -24(%rsp), %rbx # 8-byte Folded Reload - movq %rcx, %r9 - sbbq -40(%rsp), %r9 # 8-byte Folded Reload - movq %rsi, %rdx - sbbq -16(%rsp), %rdx # 8-byte Folded Reload - movq %r11, %r10 - sbbq -8(%rsp), %r10 # 8-byte Folded Reload - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - testb %al, %al - cmovneq %r8, %rdi - movq -112(%rsp), %rax # 8-byte Reload - movq %rdi, (%rax) - cmovneq %rbp, %rbx - movq %rbx, 8(%rax) - cmovneq %rcx, %r9 - movq %r9, 16(%rax) - movq %rdx, 24(%rax) - cmovneq %r11, %r10 - movq %r10, 32(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end68: - .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2 - - .globl mcl_fp_montNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF5Lbmi2,@function -mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rdi, -104(%rsp) # 8-byte Spill - movq (%rsi), %r13 - movq %r13, -64(%rsp) # 8-byte Spill - movq 8(%rsi), %rbp - movq %rbp, -24(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rbp, %rdx - mulxq %rax, %rbp, %r9 - movq %r13, %rdx - mulxq %rax, %r8, %r10 - movq 16(%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - addq %rbp, %r10 - mulxq %rax, %rbp, %rbx - adcq %r9, %rbp - movq 24(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rax, %r15, %r9 - adcq %rbx, %r15 - movq 32(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rax, %rax, %r11 - adcq %r9, %rax - adcq $0, %r11 - movq -8(%rcx), %rsi - movq %rsi, -72(%rsp) # 8-byte Spill - movq %r8, %rdx - imulq %rsi, %rdx - movq (%rcx), %rsi - movq %rsi, -88(%rsp) # 8-byte Spill - mulxq %rsi, %rbx, %r14 - addq %r8, %rbx - movq 8(%rcx), %rsi - movq %rsi, -80(%rsp) # 8-byte Spill - mulxq %rsi, %rbx, %r12 - adcq %r10, %rbx - movq 16(%rcx), %rsi - movq %rsi, -96(%rsp) # 8-byte Spill - mulxq %rsi, %r10, %rdi - adcq %rbp, %r10 - movq 24(%rcx), %rsi - movq %rsi, -56(%rsp) # 8-byte Spill - mulxq %rsi, %r9, %rbp - adcq %r15, %r9 - movq 32(%rcx), %rcx - movq %rcx, -8(%rsp) # 8-byte Spill - mulxq %rcx, %r8, %rcx - adcq %rax, %r8 - adcq $0, %r11 - addq %r14, %rbx - adcq %r12, %r10 - adcq %rdi, %r9 - adcq %rbp, %r8 - adcq %rcx, %r11 - movq -16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - mulxq -24(%rsp), %rcx, %rsi # 8-byte Folded Reload - mulxq %r13, %r14, %rax - addq %rcx, %rax - mulxq -32(%rsp), %rcx, %rdi # 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload - adcq %r15, %rdi - adcq $0, %rbp - addq %rbx, %r14 - adcq %r10, %rax - adcq %r9, %rcx - adcq %r8, %rsi - adcq %r11, %rdi - adcq $0, %rbp - movq %r14, %rdx - movq -72(%rsp), %r12 # 8-byte Reload - imulq %r12, %rdx - mulxq -88(%rsp), %rbx, %r15 # 8-byte Folded Reload - addq %r14, %rbx - movq -80(%rsp), %r13 # 8-byte Reload - mulxq %r13, %r8, %rbx - adcq %rax, %r8 - mulxq -96(%rsp), %r9, %rax # 8-byte Folded Reload - adcq %rcx, %r9 - mulxq -56(%rsp), %r10, %rcx # 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload - adcq %rdi, %r11 - adcq $0, %rbp - addq %r15, %r8 - adcq %rbx, %r9 - adcq %rax, %r10 - adcq %rcx, %r11 - adcq %rdx, %rbp - movq -16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - mulxq -24(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload - addq %rcx, %rsi - mulxq -32(%rsp), %rbx, %rcx # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -40(%rsp), %rdi, %r15 # 8-byte Folded Reload - adcq %rcx, %rdi - mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload - adcq %r15, %rcx - adcq $0, %rax - addq %r8, %r14 - adcq %r9, %rsi - adcq %r10, %rbx - adcq %r11, %rdi - adcq %rbp, %rcx - adcq $0, %rax - movq %r14, %rdx - imulq %r12, %rdx - movq -88(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rbp, %r15 - addq %r14, %rbp - mulxq %r13, %r8, %rbp - adcq %rsi, %r8 - movq -96(%rsp), %r13 # 8-byte Reload - mulxq %r13, %r9, %rsi - adcq %rbx, %r9 - mulxq -56(%rsp), %r10, %rbx # 8-byte Folded Reload - adcq %rdi, %r10 - mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload - adcq %rcx, %r11 - adcq $0, %rax - addq %r15, %r8 - adcq %rbp, %r9 - adcq %rsi, %r10 - adcq %rbx, %r11 - adcq %rdx, %rax - movq -16(%rsp), %rcx # 8-byte Reload - movq 24(%rcx), %rdx - mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload - mulxq -64(%rsp), %r14, %rcx # 8-byte Folded Reload - addq %rdi, %rcx - mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload - adcq %rsi, %rbx - mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload - adcq %r15, %rdi - adcq $0, %rbp - addq %r8, %r14 - adcq %r9, %rcx - adcq %r10, %rbx - adcq %r11, %rsi - adcq %rax, %rdi - adcq $0, %rbp - movq %r14, %rdx - imulq -72(%rsp), %rdx # 8-byte Folded Reload - mulxq %r12, %rax, %r11 - addq %r14, %rax - mulxq -80(%rsp), %r8, %r14 # 8-byte Folded Reload - adcq %rcx, %r8 - mulxq %r13, %r9, %rax - adcq %rbx, %r9 - movq -56(%rsp), %r12 # 8-byte Reload - mulxq %r12, %r10, %rbx - adcq %rsi, %r10 - mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload - adcq %rdi, %rcx - adcq $0, %rbp - addq %r11, %r8 - adcq %r14, %r9 - adcq %rax, %r10 - adcq %rbx, %rcx - adcq %rdx, %rbp - movq -16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - mulxq -24(%rsp), %rdi, %rbx # 8-byte Folded Reload - mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload - addq %rdi, %rsi - mulxq -32(%rsp), %rdi, %rax # 8-byte Folded Reload - adcq %rbx, %rdi - mulxq -40(%rsp), %rbx, %r15 # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -48(%rsp), %r11, %rax # 8-byte Folded Reload - adcq %r15, %r11 - adcq $0, %rax - addq %r8, %r14 - adcq %r9, %rsi - adcq %r10, %rdi - adcq %rcx, %rbx - adcq %rbp, %r11 - adcq $0, %rax - movq -72(%rsp), %rdx # 8-byte Reload - imulq %r14, %rdx - movq -88(%rsp), %r10 # 8-byte Reload - mulxq %r10, %rcx, %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - addq %r14, %rcx - movq -80(%rsp), %r9 # 8-byte Reload - mulxq %r9, %r14, %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - adcq %rsi, %r14 - movq %r13, %r8 - mulxq %r8, %r15, %r13 - adcq %rdi, %r15 - mulxq %r12, %rbp, %rcx - adcq %rbx, %rbp - movq -8(%rsp), %rbx # 8-byte Reload - mulxq %rbx, %r12, %rdx - adcq %r11, %r12 - adcq $0, %rax - addq -16(%rsp), %r14 # 8-byte Folded Reload - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq %r13, %rbp - adcq %rcx, %r12 - adcq %rdx, %rax - movq %r14, %rcx - subq %r10, %rcx - movq %r15, %rsi - sbbq %r9, %rsi - movq %rbp, %rdi - sbbq %r8, %rdi - movq %r12, %r8 - sbbq -56(%rsp), %r8 # 8-byte Folded Reload - movq %rax, %rdx - sbbq %rbx, %rdx - movq %rdx, %rbx - sarq $63, %rbx - cmovsq %r14, %rcx - movq -104(%rsp), %rbx # 8-byte Reload - movq %rcx, (%rbx) - cmovsq %r15, %rsi - movq %rsi, 8(%rbx) - cmovsq %rbp, %rdi - movq %rdi, 16(%rbx) - cmovsq %r12, %r8 - movq %r8, 24(%rbx) - cmovsq %rax, %rdx - movq %rdx, 32(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end69: - .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2 - - .globl mcl_fp_montRed5Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed5Lbmi2,@function -mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -80(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq (%rcx), %rbx - movq %rbx, -8(%rsp) # 8-byte Spill - movq (%rsi), %r9 - movq %r9, %rdx - imulq %rax, %rdx - movq %rax, %r15 - movq 32(%rcx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulxq %rax, %r8, %r13 - movq 24(%rcx), %r12 - movq %r12, -32(%rsp) # 8-byte Spill - mulxq %r12, %r10, %r14 - movq 16(%rcx), %rax - movq %rax, -48(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - mulxq %rax, %rdi, %rbp - mulxq %rcx, %rax, %r11 - mulxq %rbx, %rdx, %rcx - addq %rax, %rcx - adcq %rdi, %r11 - adcq %r10, %rbp - adcq %r8, %r14 - adcq $0, %r13 - addq %r9, %rdx - movq 72(%rsi), %rax - movq 64(%rsi), %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r11 - adcq 24(%rsi), %rbp - adcq 32(%rsi), %r14 - adcq 40(%rsi), %r13 - movq 56(%rsi), %rdi - movq 48(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -88(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -72(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -24(%rsp) # 8-byte Spill - sbbq %rsi, %rsi - andl $1, %esi - movq %rcx, %rdx - imulq %r15, %rdx - mulxq -40(%rsp), %rax, %r15 # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - mulxq %r12, %rax, %r10 - movq %rax, -104(%rsp) # 8-byte Spill - movq -48(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rbx, %r8 - mulxq -16(%rsp), %r9, %rdi # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %r9, %rax - adcq %rbx, %rdi - adcq -104(%rsp), %r8 # 8-byte Folded Reload - adcq -96(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r15 - addq %rcx, %rdx - adcq %r11, %rax - adcq %rbp, %rdi - adcq %r14, %r8 - adcq %r13, %r10 - adcq -88(%rsp), %r15 # 8-byte Folded Reload - adcq $0, -72(%rsp) # 8-byte Folded Spill - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, %rsi - movq %rax, %rdx - imulq -64(%rsp), %rdx # 8-byte Folded Reload - mulxq -40(%rsp), %rcx, %r13 # 8-byte Folded Reload - movq %rcx, -88(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rcx, %r14 # 8-byte Folded Reload - movq %rcx, -96(%rsp) # 8-byte Spill - mulxq %r12, %r11, %rbx - mulxq -16(%rsp), %r9, %rbp # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload - addq %r9, %rcx - adcq %r11, %rbp - adcq -96(%rsp), %rbx # 8-byte Folded Reload - adcq -88(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r13 - addq %rax, %rdx - adcq %rdi, %rcx - adcq %r8, %rbp - adcq %r10, %rbx - adcq %r15, %r14 - adcq -72(%rsp), %r13 # 8-byte Folded Reload - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, %rsi - movq %rcx, %rdx - imulq -64(%rsp), %rdx # 8-byte Folded Reload - movq -40(%rsp), %r9 # 8-byte Reload - mulxq %r9, %rax, %r12 - movq %rax, -72(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rax, %r10 # 8-byte Folded Reload - movq %rax, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %r8, %r11 # 8-byte Folded Reload - mulxq -16(%rsp), %rdi, %r15 # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rdi, %rax - adcq %r8, %r15 - adcq -88(%rsp), %r11 # 8-byte Folded Reload - adcq -72(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r12 - addq %rcx, %rdx - adcq %rbp, %rax - adcq %rbx, %r15 - adcq %r14, %r11 - adcq %r13, %r10 - adcq -56(%rsp), %r12 # 8-byte Folded Reload - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, %rsi - movq -64(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - mulxq %r9, %rdi, %rcx - movq %rdi, -56(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rbp, %rdi # 8-byte Folded Reload - movq %rbp, -64(%rsp) # 8-byte Spill - mulxq -48(%rsp), %r13, %rbp # 8-byte Folded Reload - mulxq -8(%rsp), %r8, %r9 # 8-byte Folded Reload - movq -16(%rsp), %r14 # 8-byte Reload - mulxq %r14, %rbx, %rdx - addq %r9, %rbx - adcq %r13, %rdx - adcq -64(%rsp), %rbp # 8-byte Folded Reload - adcq -56(%rsp), %rdi # 8-byte Folded Reload - adcq $0, %rcx - addq %rax, %r8 - adcq %r15, %rbx - adcq %r11, %rdx - adcq %r10, %rbp - adcq %r12, %rdi - adcq -24(%rsp), %rcx # 8-byte Folded Reload - adcq $0, %rsi - movq %rbx, %rax - subq -8(%rsp), %rax # 8-byte Folded Reload - movq %rdx, %r8 - sbbq %r14, %r8 - movq %rbp, %r9 - sbbq -48(%rsp), %r9 # 8-byte Folded Reload - movq %rdi, %r10 - sbbq -32(%rsp), %r10 # 8-byte Folded Reload - movq %rcx, %r11 - sbbq -40(%rsp), %r11 # 8-byte Folded Reload - sbbq $0, %rsi - andl $1, %esi - cmovneq %rcx, %r11 - testb %sil, %sil - cmovneq %rbx, %rax - movq -80(%rsp), %rcx # 8-byte Reload - movq %rax, (%rcx) - cmovneq %rdx, %r8 - movq %r8, 8(%rcx) - cmovneq %rbp, %r9 - movq %r9, 16(%rcx) - cmovneq %rdi, %r10 - movq %r10, 24(%rcx) - movq %r11, 32(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end70: - .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2 - - .globl mcl_fp_addPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre5Lbmi2,@function -mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2 -# BB#0: - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq 16(%rdx), %rcx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rcx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rcx, 16(%rdi) - adcq %r9, %r11 - movq %r11, 24(%rdi) - adcq %r8, %r10 - movq %r10, 32(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end71: - .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2 - - .globl mcl_fp_subPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre5Lbmi2,@function -mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2 -# BB#0: - pushq %rbx - movq 32(%rsi), %r10 - movq 24(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r8, %r11 - movq %r11, 24(%rdi) - sbbq %r9, %r10 - movq %r10, 32(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - retq -.Lfunc_end72: - .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2 - - .globl mcl_fp_shr1_5Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_5Lbmi2,@function -mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2 -# BB#0: - movq 32(%rsi), %r8 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r8, %rcx - movq %rcx, 24(%rdi) - shrq %r8 - movq %r8, 32(%rdi) - retq -.Lfunc_end73: - .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2 - - .globl mcl_fp_add5Lbmi2 - .align 16, 0x90 - .type mcl_fp_add5Lbmi2,@function -mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2 -# BB#0: - pushq %rbx - movq 32(%rdx), %r11 - movq 24(%rdx), %rbx - movq 24(%rsi), %r9 - movq 32(%rsi), %r8 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %rbx, %r9 - movq %r9, 24(%rdi) - adcq %r11, %r8 - movq %r8, 32(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %r9 - sbbq 32(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB74_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %r9, 24(%rdi) - movq %r8, 32(%rdi) -.LBB74_2: # %carry - popq %rbx - retq -.Lfunc_end74: - .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2 - - .globl mcl_fp_addNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF5Lbmi2,@function -mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %r11 - addq (%rsi), %r14 - adcq 8(%rsi), %r11 - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r9 - adcq 32(%rsi), %r8 - movq %r14, %rsi - subq (%rcx), %rsi - movq %r11, %rdx - sbbq 8(%rcx), %rdx - movq %r10, %rbx - sbbq 16(%rcx), %rbx - movq %r9, %r15 - sbbq 24(%rcx), %r15 - movq %r8, %rax - sbbq 32(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r14, %rsi - movq %rsi, (%rdi) - cmovsq %r11, %rdx - movq %rdx, 8(%rdi) - cmovsq %r10, %rbx - movq %rbx, 16(%rdi) - cmovsq %r9, %r15 - movq %r15, 24(%rdi) - cmovsq %r8, %rax - movq %rax, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end75: - .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2 - - .globl mcl_fp_sub5Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub5Lbmi2,@function -mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - movq 32(%rsi), %r8 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r11, %r9 - movq %r9, 24(%rdi) - sbbq %r14, %r8 - movq %r8, 32(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB76_2 -# BB#1: # %carry - movq 32(%rcx), %r11 - movq 24(%rcx), %r14 - movq 8(%rcx), %rdx - movq 16(%rcx), %rbx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r14 - movq %r14, 24(%rdi) - adcq %r8, %r11 - movq %r11, 32(%rdi) -.LBB76_2: # %nocarry - popq %rbx - popq %r14 - retq -.Lfunc_end76: - .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2 - - .globl mcl_fp_subNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF5Lbmi2,@function -mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rsi), %r11 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %r10 - movq 8(%rsi), %r14 - subq (%rdx), %r10 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r9 - sbbq 24(%rdx), %r8 - sbbq 32(%rdx), %r11 - movq %r11, %rax - sarq $63, %rax - movq %rax, %rdx - shldq $1, %r11, %rdx - movq 8(%rcx), %rbx - andq %rdx, %rbx - andq (%rcx), %rdx - movq 32(%rcx), %r15 - andq %rax, %r15 - rorxq $63, %rax, %rsi - andq 24(%rcx), %rax - andq 16(%rcx), %rsi - addq %r10, %rdx - movq %rdx, (%rdi) - adcq %r14, %rbx - movq %rbx, 8(%rdi) - adcq %r9, %rsi - movq %rsi, 16(%rdi) - adcq %r8, %rax - movq %rax, 24(%rdi) - adcq %r11, %r15 - movq %r15, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end77: - .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2 - - .globl mcl_fpDbl_add5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add5Lbmi2,@function -mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 64(%rdx), %r11 - movq 56(%rdx), %r14 - movq 48(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 40(%rdx), %r9 - movq %rbx, (%rdi) - movq 72(%rsi), %r8 - movq %rax, 8(%rdi) - movq 64(%rsi), %r10 - movq %r12, 16(%rdi) - movq 56(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 48(%rsi), %rbp - movq 40(%rsi), %rbx - movq %r13, 32(%rdi) - adcq %r9, %rbx - adcq %r15, %rbp - adcq %r14, %r12 - adcq %r11, %r10 - adcq -8(%rsp), %r8 # 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - movq %rbx, %rax - subq (%rcx), %rax - movq %rbp, %rdx - sbbq 8(%rcx), %rdx - movq %r12, %r9 - sbbq 16(%rcx), %r9 - movq %r10, %r11 - sbbq 24(%rcx), %r11 - movq %r8, %r14 - sbbq 32(%rcx), %r14 - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rax - movq %rax, 40(%rdi) - testb %sil, %sil - cmovneq %rbp, %rdx - movq %rdx, 48(%rdi) - cmovneq %r12, %r9 - movq %r9, 56(%rdi) - cmovneq %r10, %r11 - movq %r11, 64(%rdi) - cmovneq %r8, %r14 - movq %r14, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end78: - .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2 - - .globl mcl_fpDbl_sub5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub5Lbmi2,@function -mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %r9 - movq 64(%rdx), %r10 - movq 56(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %r12 - sbbq 24(%rdx), %r12 - movq %r15, (%rdi) - movq 32(%rsi), %rbx - sbbq 32(%rdx), %rbx - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 72(%rsi), %r8 - movq %r12, 24(%rdi) - movq 64(%rsi), %r11 - movq %rbx, 32(%rdi) - movq 40(%rsi), %rbp - sbbq %rdx, %rbp - movq 56(%rsi), %r12 - movq 48(%rsi), %r13 - sbbq %r15, %r13 - sbbq %r14, %r12 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 8(%rcx), %rbx - cmoveq %rax, %rbx - movq 32(%rcx), %r9 - cmoveq %rax, %r9 - cmovneq 24(%rcx), %rax - addq %rbp, %rsi - movq %rsi, 40(%rdi) - adcq %r13, %rbx - movq %rbx, 48(%rdi) - adcq %r12, %rdx - movq %rdx, 56(%rdi) - adcq %r11, %rax - movq %rax, 64(%rdi) - adcq %r8, %r9 - movq %r9, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end79: - .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2 - - .globl mcl_fp_mulUnitPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre6Lbmi2,@function -mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - mulxq 40(%rsi), %r8, %r11 - mulxq 32(%rsi), %r9, %r12 - mulxq 24(%rsi), %r10, %rcx - mulxq 16(%rsi), %r14, %rbx - mulxq 8(%rsi), %r15, %rax - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r15, %rsi - movq %rsi, 8(%rdi) - adcq %r14, %rax - movq %rax, 16(%rdi) - adcq %r10, %rbx - movq %rbx, 24(%rdi) - adcq %r9, %rcx - movq %rcx, 32(%rdi) - adcq %r8, %r12 - movq %r12, 40(%rdi) - adcq $0, %r11 - movq %r11, 48(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end80: - .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2 - - .globl mcl_fpDbl_mulPre6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre6Lbmi2,@function -mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r11 - movq %r11, -16(%rsp) # 8-byte Spill - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq (%r11), %rax - movq %rcx, %rdx - mulxq %rax, %rcx, %r14 - movq %r15, %rdx - mulxq %rax, %rdx, %rbp - movq %rdx, -48(%rsp) # 8-byte Spill - movq 24(%rsi), %rbx - movq %rbx, -32(%rsp) # 8-byte Spill - movq 16(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - addq %rcx, %rbp - mulxq %rax, %rcx, %r12 - adcq %r14, %rcx - movq %rbx, %rdx - mulxq %rax, %rbx, %r14 - adcq %r12, %rbx - movq 32(%rsi), %r12 - movq %r12, %rdx - mulxq %rax, %r8, %r13 - adcq %r14, %r8 - movq 40(%rsi), %r14 - movq %r14, %rdx - mulxq %rax, %r9, %r10 - adcq %r13, %r9 - movq -48(%rsp), %rax # 8-byte Reload - movq %rax, (%rdi) - adcq $0, %r10 - movq 8(%r11), %rdi - movq %r15, %rdx - mulxq %rdi, %r13, %rax - movq %rax, -48(%rsp) # 8-byte Spill - addq %rbp, %r13 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbp, %rax - movq %rax, -24(%rsp) # 8-byte Spill - adcq %rcx, %rbp - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rax, %r11 - adcq %rbx, %rax - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbx, %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - adcq %r8, %rbx - movq %r12, %rdx - mulxq %rdi, %rcx, %r8 - adcq %r9, %rcx - movq %r14, %rdx - mulxq %rdi, %r12, %rdx - adcq %r10, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -48(%rsp), %rbp # 8-byte Folded Reload - adcq -24(%rsp), %rax # 8-byte Folded Reload - adcq %r11, %rbx - movq -8(%rsp), %rdi # 8-byte Reload - movq %r13, 8(%rdi) - adcq -32(%rsp), %rcx # 8-byte Folded Reload - adcq %r8, %r12 - adcq %rdx, %r15 - movq (%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -24(%rsp) # 8-byte Spill - movq -16(%rsp), %r14 # 8-byte Reload - movq 16(%r14), %rdi - mulxq %rdi, %r13, %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - addq %rbp, %r13 - movq %r8, %rdx - mulxq %rdi, %r8, %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - adcq %rax, %r8 - movq 16(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rdi, %r11, %rax - movq %rax, -88(%rsp) # 8-byte Spill - adcq %rbx, %r11 - movq 24(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rdi, %rax, %rbx - adcq %rcx, %rax - movq 32(%rsi), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - mulxq %rdi, %r10, %rcx - adcq %r12, %r10 - movq 40(%rsi), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rdi, %r9, %rdx - adcq %r15, %r9 - sbbq %rbp, %rbp - andl $1, %ebp - addq -72(%rsp), %r8 # 8-byte Folded Reload - adcq -80(%rsp), %r11 # 8-byte Folded Reload - adcq -88(%rsp), %rax # 8-byte Folded Reload - adcq %rbx, %r10 - adcq %rcx, %r9 - adcq %rdx, %rbp - movq -8(%rsp), %rcx # 8-byte Reload - movq %r13, 16(%rcx) - movq 24(%r14), %rdi - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r12, %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - addq %r8, %r12 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbx, %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - adcq %r11, %rbx - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rcx, %r11 - adcq %rax, %rcx - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r14, %rax - movq %rax, -40(%rsp) # 8-byte Spill - adcq %r10, %r14 - movq -56(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r8, %rax - adcq %r9, %r8 - movq -64(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r13, %rdx - adcq %rbp, %r13 - sbbq %r15, %r15 - andl $1, %r15d - addq -32(%rsp), %rbx # 8-byte Folded Reload - adcq -24(%rsp), %rcx # 8-byte Folded Reload - adcq %r11, %r14 - movq -8(%rsp), %rdi # 8-byte Reload - movq %r12, 24(%rdi) - adcq -40(%rsp), %r8 # 8-byte Folded Reload - adcq %rax, %r13 - adcq %rdx, %r15 - movq (%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rbp - movq %rbp, -24(%rsp) # 8-byte Spill - movq -16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdi - mulxq %rdi, %r12, %rax - movq %rax, -40(%rsp) # 8-byte Spill - addq %rbx, %r12 - movq %rbp, %rdx - mulxq %rdi, %rbx, %rax - movq %rax, -48(%rsp) # 8-byte Spill - adcq %rcx, %rbx - movq 16(%rsi), %r11 - movq %r11, %rdx - mulxq %rdi, %rax, %rcx - movq %rcx, -56(%rsp) # 8-byte Spill - adcq %r14, %rax - movq 24(%rsi), %r14 - movq %r14, %rdx - mulxq %rdi, %rbp, %rcx - movq %rcx, -64(%rsp) # 8-byte Spill - adcq %r8, %rbp - movq 32(%rsi), %r8 - movq %r8, %rdx - mulxq %rdi, %rcx, %r10 - adcq %r13, %rcx - movq 40(%rsi), %r13 - movq %r13, %rdx - mulxq %rdi, %r9, %rdx - adcq %r15, %r9 - sbbq %rsi, %rsi - andl $1, %esi - addq -40(%rsp), %rbx # 8-byte Folded Reload - adcq -48(%rsp), %rax # 8-byte Folded Reload - adcq -56(%rsp), %rbp # 8-byte Folded Reload - adcq -64(%rsp), %rcx # 8-byte Folded Reload - adcq %r10, %r9 - adcq %rdx, %rsi - movq -8(%rsp), %r10 # 8-byte Reload - movq %r12, 32(%r10) - movq -16(%rsp), %rdx # 8-byte Reload - movq 40(%rdx), %rdi - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r15, %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - addq %rbx, %r15 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbx, %r12 - adcq %rax, %rbx - movq %r11, %rdx - mulxq %rdi, %rax, %r11 - adcq %rbp, %rax - movq %r14, %rdx - mulxq %rdi, %rbp, %r14 - adcq %rcx, %rbp - movq %r8, %rdx - mulxq %rdi, %rcx, %r8 - adcq %r9, %rcx - movq %r13, %rdx - mulxq %rdi, %rdi, %r9 - adcq %rsi, %rdi - sbbq %rsi, %rsi - andl $1, %esi - addq -16(%rsp), %rbx # 8-byte Folded Reload - movq %r15, 40(%r10) - movq %rbx, 48(%r10) - adcq %r12, %rax - movq %rax, 56(%r10) - adcq %r11, %rbp - movq %rbp, 64(%r10) - adcq %r14, %rcx - movq %rcx, 72(%r10) - adcq %r8, %rdi - movq %rdi, 80(%r10) - adcq %r9, %rsi - movq %rsi, 88(%r10) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end81: - .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2 - - .globl mcl_fpDbl_sqrPre6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre6Lbmi2,@function -mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r9 - movq %r9, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - movq (%rsi), %rcx - movq 8(%rsi), %rax - mulxq %rcx, %r10, %r8 - movq 24(%rsi), %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r11, %rbx - movq %rbx, -16(%rsp) # 8-byte Spill - movq %rcx, %rdx - mulxq %rcx, %rdx, %r14 - movq %rdx, -40(%rsp) # 8-byte Spill - addq %r11, %r14 - adcq %rbx, %r10 - movq %rbp, %rdx - mulxq %rcx, %r15, %rbp - adcq %r8, %r15 - movq 32(%rsi), %rbx - movq %rbx, %rdx - mulxq %rcx, %r8, %r13 - adcq %rbp, %r8 - movq 40(%rsi), %rdi - movq %rdi, %rdx - mulxq %rcx, %rcx, %r12 - adcq %r13, %rcx - movq -40(%rsp), %rdx # 8-byte Reload - movq %rdx, (%r9) - adcq $0, %r12 - addq %r11, %r14 - movq %rax, %rdx - mulxq %rax, %rbp, %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - adcq %r10, %rbp - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r13, %r10 - adcq %r15, %r13 - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r15, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %r8, %r15 - movq %rbx, %rdx - mulxq %rax, %rbx, %r8 - adcq %rcx, %rbx - movq %rdi, %rdx - mulxq %rax, %r11, %rax - adcq %r12, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -16(%rsp), %rbp # 8-byte Folded Reload - adcq -40(%rsp), %r13 # 8-byte Folded Reload - movq %r14, 8(%r9) - adcq %r10, %r15 - adcq -24(%rsp), %rbx # 8-byte Folded Reload - adcq %r8, %r11 - adcq %rax, %r12 - movq (%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %rcx - mulxq %rcx, %rax, %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - addq %rbp, %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdi, %rdx - mulxq %rcx, %rbp, %rax - movq %rax, -56(%rsp) # 8-byte Spill - adcq %r13, %rbp - movq %rcx, %rdx - mulxq %rcx, %r13, %rax - movq %rax, -64(%rsp) # 8-byte Spill - adcq %r15, %r13 - movq 24(%rsi), %rax - movq %rax, %rdx - mulxq %rcx, %r8, %rdi - movq %rdi, -40(%rsp) # 8-byte Spill - adcq %r8, %rbx - movq 32(%rsi), %r10 - movq %r10, %rdx - mulxq %rcx, %r14, %r15 - adcq %r11, %r14 - movq 40(%rsi), %r11 - movq %r11, %rdx - mulxq %rcx, %r9, %rdx - adcq %r12, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - addq -48(%rsp), %rbp # 8-byte Folded Reload - adcq -56(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %rbx # 8-byte Folded Reload - adcq %rdi, %r14 - adcq %r15, %r9 - adcq %rdx, %rcx - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rdi, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - addq %rbp, %rdi - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r15, %rbp - adcq %r13, %r15 - adcq %r8, %rbx - movq %rax, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %r14, %r8 - movq %r10, %rdx - mulxq %rax, %r12, %r10 - adcq %r9, %r12 - movq %r11, %rdx - mulxq %rax, %r13, %rax - adcq %rcx, %r13 - sbbq %r9, %r9 - andl $1, %r9d - addq -32(%rsp), %r15 # 8-byte Folded Reload - adcq %rbp, %rbx - movq -8(%rsp), %rdx # 8-byte Reload - movq -16(%rsp), %rbp # 8-byte Reload - movq %rbp, 16(%rdx) - movq %rdi, 24(%rdx) - adcq -40(%rsp), %r8 # 8-byte Folded Reload - adcq -24(%rsp), %r12 # 8-byte Folded Reload - adcq %r10, %r13 - adcq %rax, %r9 - movq (%rsi), %rcx - movq 8(%rsi), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq 32(%rsi), %rax - movq %rcx, %rdx - mulxq %rax, %rdx, %rbp - movq %rbp, -40(%rsp) # 8-byte Spill - addq %r15, %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rdi, %rdx - mulxq %rax, %r15, %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %rbx, %r15 - movq 16(%rsi), %r10 - movq %r10, %rdx - mulxq %rax, %r14, %rbx - adcq %r8, %r14 - movq 24(%rsi), %r8 - movq %r8, %rdx - mulxq %rax, %rbp, %rdi - adcq %r12, %rbp - movq %rax, %rdx - mulxq %rax, %r11, %r12 - adcq %r13, %r11 - movq 40(%rsi), %rsi - movq %rsi, %rdx - mulxq %rax, %r13, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - adcq %r13, %r9 - sbbq %rax, %rax - andl $1, %eax - addq -40(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %r14 # 8-byte Folded Reload - adcq %rbx, %rbp - adcq %rdi, %r11 - adcq %r12, %r9 - adcq %rdx, %rax - movq %rcx, %rdx - mulxq %rsi, %r12, %rcx - addq %r15, %r12 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rsi, %rdi, %r15 - adcq %r14, %rdi - movq %r10, %rdx - mulxq %rsi, %rbx, %r10 - adcq %rbp, %rbx - movq %r8, %rdx - mulxq %rsi, %rbp, %r8 - adcq %r11, %rbp - adcq %r13, %r9 - movq %rsi, %rdx - mulxq %rsi, %rsi, %r11 - adcq %rax, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %rcx, %rdi - movq -8(%rsp), %rdx # 8-byte Reload - movq -16(%rsp), %rcx # 8-byte Reload - movq %rcx, 32(%rdx) - movq %r12, 40(%rdx) - movq %rdi, 48(%rdx) - adcq %r15, %rbx - movq %rbx, 56(%rdx) - adcq %r10, %rbp - movq %rbp, 64(%rdx) - adcq %r8, %r9 - movq %r9, 72(%rdx) - adcq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 80(%rdx) - adcq %r11, %rax - movq %rax, 88(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2 - - .globl mcl_fp_mont6Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont6Lbmi2,@function -mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $32, %rsp - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rdi, -104(%rsp) # 8-byte Spill - movq 40(%rsi), %rdi - movq %rdi, -40(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %r11, %r14 - movq 32(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rax, %r15, %rbx - movq 24(%rsi), %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - movq 16(%rsi), %rdi - movq %rdi, -72(%rsp) # 8-byte Spill - movq (%rsi), %rbp - movq %rbp, -56(%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -64(%rsp) # 8-byte Spill - mulxq %rax, %r8, %r12 - movq %rdi, %rdx - mulxq %rax, %r9, %r10 - movq %rsi, %rdx - mulxq %rax, %rdi, %r13 - movq %rbp, %rdx - mulxq %rax, %rdx, %rbp - movq %rdx, -96(%rsp) # 8-byte Spill - addq %rdi, %rbp - adcq %r9, %r13 - adcq %r8, %r10 - adcq %r15, %r12 - adcq %r11, %rbx - movq %rbx, %rdi - adcq $0, %r14 - movq %r14, -88(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - imulq %rax, %rdx - movq (%rcx), %rsi - movq %rsi, (%rsp) # 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - mulxq %rax, %rax, %r9 - movq %rax, -112(%rsp) # 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - mulxq %rax, %r8, %r11 - movq 8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - mulxq %rax, %rax, %r14 - mulxq %rsi, %r15, %rsi - addq %rax, %rsi - adcq %r8, %r14 - movq 24(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - mulxq %rax, %rbx, %r8 - adcq %r11, %rbx - movq 32(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - mulxq %rax, %rax, %rcx - adcq %r8, %rax - adcq -112(%rsp), %rcx # 8-byte Folded Reload - adcq $0, %r9 - addq -96(%rsp), %r15 # 8-byte Folded Reload - adcq %rbp, %rsi - adcq %r13, %r14 - adcq %r10, %rbx - adcq %r12, %rax - adcq %rdi, %rcx - adcq -88(%rsp), %r9 # 8-byte Folded Reload - movq %r9, -96(%rsp) # 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq -32(%rsp), %rdx # 8-byte Reload - movq 8(%rdx), %rdx - mulxq -40(%rsp), %rdi, %rbp # 8-byte Folded Reload - movq %rdi, -112(%rsp) # 8-byte Spill - movq %rbp, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload - movq %rdi, -120(%rsp) # 8-byte Spill - mulxq -80(%rsp), %rdi, %r15 # 8-byte Folded Reload - movq %rdi, -128(%rsp) # 8-byte Spill - mulxq -64(%rsp), %r8, %rdi # 8-byte Folded Reload - mulxq -56(%rsp), %rbp, %r10 # 8-byte Folded Reload - addq %r8, %r10 - mulxq -72(%rsp), %r9, %r11 # 8-byte Folded Reload - adcq %rdi, %r9 - adcq -128(%rsp), %r11 # 8-byte Folded Reload - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rsi, %rbp - adcq %r14, %r10 - adcq %rbx, %r9 - adcq %rax, %r11 - adcq %rcx, %r15 - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq %r12, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %rbp, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rax, %r12 # 8-byte Folded Reload - movq %rax, -112(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rax, %r14 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload - mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload - addq %rcx, %r8 - mulxq 16(%rsp), %rdi, %rbx # 8-byte Folded Reload - adcq %rsi, %rdi - mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload - adcq %rbx, %rcx - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r12 - addq %rbp, %rax - adcq %r10, %r8 - adcq %r9, %rdi - adcq %r11, %rcx - adcq %r15, %rsi - adcq %r13, %r14 - adcq -88(%rsp), %r12 # 8-byte Folded Reload - adcq $0, -96(%rsp) # 8-byte Folded Spill - movq -32(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdx - mulxq -40(%rsp), %rbp, %rax # 8-byte Folded Reload - movq %rbp, -112(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %rax, %r13 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -80(%rsp), %rbp, %r15 # 8-byte Folded Reload - mulxq -64(%rsp), %r9, %rbx # 8-byte Folded Reload - mulxq -56(%rsp), %rax, %r11 # 8-byte Folded Reload - addq %r9, %r11 - mulxq -72(%rsp), %r9, %r10 # 8-byte Folded Reload - adcq %rbx, %r9 - adcq %rbp, %r10 - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r8, %rax - movq %rax, -112(%rsp) # 8-byte Spill - movq %rax, %rbp - adcq %rdi, %r11 - adcq %rcx, %r9 - adcq %rsi, %r10 - adcq %r14, %r15 - adcq %r12, %r13 - adcq -96(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %rbp, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rax, %r8 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -16(%rsp), %r12, %r14 # 8-byte Folded Reload - mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload - mulxq (%rsp), %rax, %rbx # 8-byte Folded Reload - addq %rcx, %rbx - mulxq 16(%rsp), %rbp, %rdi # 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload - adcq %rdi, %rcx - adcq %r12, %rsi - adcq -120(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r8 - addq -112(%rsp), %rax # 8-byte Folded Reload - adcq %r11, %rbx - adcq %r9, %rbp - adcq %r10, %rcx - adcq %r15, %rsi - adcq %r13, %r14 - adcq -88(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -112(%rsp) # 8-byte Spill - movq -96(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - movq -32(%rsp), %rdx # 8-byte Reload - movq 24(%rdx), %rdx - mulxq -40(%rsp), %rdi, %rax # 8-byte Folded Reload - movq %rdi, -96(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %rdi, %rax # 8-byte Folded Reload - movq %rdi, -120(%rsp) # 8-byte Spill - mulxq -80(%rsp), %r15, %r12 # 8-byte Folded Reload - mulxq -64(%rsp), %r8, %r11 # 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload - addq %r8, %rdi - mulxq -72(%rsp), %r8, %r9 # 8-byte Folded Reload - adcq %r11, %r8 - adcq %r15, %r9 - adcq -120(%rsp), %r12 # 8-byte Folded Reload - adcq -96(%rsp), %rax # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rbx, %r10 - adcq %rbp, %rdi - adcq %rcx, %r8 - adcq %rsi, %r9 - adcq %r14, %r12 - adcq -112(%rsp), %rax # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - adcq %r13, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, %r14 - movq %r10, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rax, %r13 # 8-byte Folded Reload - movq %rax, -112(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rax, %r11 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq 8(%rsp), %rbp, %rsi # 8-byte Folded Reload - mulxq (%rsp), %rcx, %rbx # 8-byte Folded Reload - addq %rbp, %rbx - mulxq 16(%rsp), %rbp, %rax # 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -8(%rsp), %rsi, %r15 # 8-byte Folded Reload - adcq %rax, %rsi - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r13 - addq %r10, %rcx - adcq %rdi, %rbx - adcq %r8, %rbp - adcq %r9, %rsi - adcq %r12, %r15 - adcq -96(%rsp), %r11 # 8-byte Folded Reload - adcq -88(%rsp), %r13 # 8-byte Folded Reload - movq %r14, %rdi - adcq $0, %rdi - movq -32(%rsp), %rcx # 8-byte Reload - movq 32(%rcx), %rdx - mulxq -40(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -96(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %rax, %r12 # 8-byte Folded Reload - movq %rax, -112(%rsp) # 8-byte Spill - mulxq -80(%rsp), %rax, %r14 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -64(%rsp), %rcx, %r9 # 8-byte Folded Reload - mulxq -56(%rsp), %rax, %r8 # 8-byte Folded Reload - addq %rcx, %r8 - mulxq -72(%rsp), %rcx, %r10 # 8-byte Folded Reload - adcq %r9, %rcx - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -96(%rsp), %r12 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rbx, %rax - movq %rax, -112(%rsp) # 8-byte Spill - movq %rax, %rbx - adcq %rbp, %r8 - adcq %rsi, %rcx - adcq %r15, %r10 - adcq %r11, %r14 - adcq %r13, %r12 - adcq %rdi, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %rbx, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rax, %r15 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -16(%rsp), %r13, %r11 # 8-byte Folded Reload - mulxq 8(%rsp), %rsi, %rax # 8-byte Folded Reload - mulxq (%rsp), %rdi, %rbx # 8-byte Folded Reload - addq %rsi, %rbx - mulxq 16(%rsp), %rbp, %r9 # 8-byte Folded Reload - adcq %rax, %rbp - mulxq -8(%rsp), %rax, %rsi # 8-byte Folded Reload - adcq %r9, %rax - adcq %r13, %rsi - adcq -120(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r15 - addq -112(%rsp), %rdi # 8-byte Folded Reload - adcq %r8, %rbx - adcq %rcx, %rbp - adcq %r10, %rax - adcq %r14, %rsi - adcq %r12, %r11 - adcq -88(%rsp), %r15 # 8-byte Folded Reload - movq -96(%rsp), %r8 # 8-byte Reload - adcq $0, %r8 - movq -32(%rsp), %rcx # 8-byte Reload - movq 40(%rcx), %rdx - mulxq -40(%rsp), %rdi, %rcx # 8-byte Folded Reload - movq %rdi, -88(%rsp) # 8-byte Spill - movq %rcx, -32(%rsp) # 8-byte Spill - mulxq -48(%rsp), %rdi, %rcx # 8-byte Folded Reload - movq %rdi, -48(%rsp) # 8-byte Spill - movq %rcx, -40(%rsp) # 8-byte Spill - mulxq -80(%rsp), %rcx, %r14 # 8-byte Folded Reload - movq %rcx, -80(%rsp) # 8-byte Spill - mulxq -72(%rsp), %rdi, %r12 # 8-byte Folded Reload - mulxq -64(%rsp), %rcx, %r10 # 8-byte Folded Reload - mulxq -56(%rsp), %r13, %r9 # 8-byte Folded Reload - addq %rcx, %r9 - adcq %rdi, %r10 - adcq -80(%rsp), %r12 # 8-byte Folded Reload - adcq -48(%rsp), %r14 # 8-byte Folded Reload - movq -40(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -32(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %rbx, %r13 - adcq %rbp, %r9 - adcq %rax, %r10 - adcq %rsi, %r12 - adcq %r11, %r14 - adcq %r15, %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - adcq %r8, %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - sbbq %rcx, %rcx - movq -24(%rsp), %rdx # 8-byte Reload - imulq %r13, %rdx - mulxq (%rsp), %r11, %rax # 8-byte Folded Reload - mulxq 8(%rsp), %rdi, %rbx # 8-byte Folded Reload - addq %rax, %rdi - mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload - adcq %rbx, %rsi - mulxq -8(%rsp), %rbx, %rbp # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload - adcq %rbp, %rax - mulxq 24(%rsp), %rbp, %rdx # 8-byte Folded Reload - adcq %r15, %rbp - adcq $0, %rdx - andl $1, %ecx - addq %r13, %r11 - adcq %r9, %rdi - adcq %r10, %rsi - adcq %r12, %rbx - adcq %r14, %rax - adcq -40(%rsp), %rbp # 8-byte Folded Reload - adcq -32(%rsp), %rdx # 8-byte Folded Reload - adcq $0, %rcx - movq %rdi, %r8 - subq (%rsp), %r8 # 8-byte Folded Reload - movq %rsi, %r9 - sbbq 8(%rsp), %r9 # 8-byte Folded Reload - movq %rbx, %r10 - sbbq 16(%rsp), %r10 # 8-byte Folded Reload - movq %rax, %r11 - sbbq -8(%rsp), %r11 # 8-byte Folded Reload - movq %rbp, %r14 - sbbq -16(%rsp), %r14 # 8-byte Folded Reload - movq %rdx, %r15 - sbbq 24(%rsp), %r15 # 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rax, %r11 - testb %cl, %cl - cmovneq %rdi, %r8 - movq -104(%rsp), %rax # 8-byte Reload - movq %r8, (%rax) - cmovneq %rsi, %r9 - movq %r9, 8(%rax) - cmovneq %rbx, %r10 - movq %r10, 16(%rax) - movq %r11, 24(%rax) - cmovneq %rbp, %r14 - movq %r14, 32(%rax) - cmovneq %rdx, %r15 - movq %r15, 40(%rax) - addq $32, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end83: - .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2 - - .globl mcl_fp_montNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF6Lbmi2,@function -mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rdi, -120(%rsp) # 8-byte Spill - movq (%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -80(%rsp) # 8-byte Spill - movq (%rdx), %rbp - movq %rdi, %rdx - mulxq %rbp, %rdi, %rbx - movq %rax, %rdx - mulxq %rbp, %r9, %r14 - movq 16(%rsi), %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - addq %rdi, %r14 - mulxq %rbp, %rdi, %r8 - adcq %rbx, %rdi - movq 24(%rsi), %rdx - movq %rdx, -96(%rsp) # 8-byte Spill - mulxq %rbp, %rbx, %r10 - adcq %r8, %rbx - movq 32(%rsi), %rdx - movq %rdx, -104(%rsp) # 8-byte Spill - mulxq %rbp, %r8, %r11 - adcq %r10, %r8 - movq 40(%rsi), %rdx - movq %rdx, -112(%rsp) # 8-byte Spill - mulxq %rbp, %rsi, %r15 - adcq %r11, %rsi - adcq $0, %r15 - movq -8(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %r9, %rdx - imulq %rax, %rdx - movq (%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - mulxq %rax, %rbp, %rax - movq %rax, -128(%rsp) # 8-byte Spill - addq %r9, %rbp - movq 8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulxq %rax, %r12, %r9 - adcq %r14, %r12 - movq 16(%rcx), %rax - movq %rax, -32(%rsp) # 8-byte Spill - mulxq %rax, %r14, %rax - adcq %rdi, %r14 - movq 24(%rcx), %rdi - movq %rdi, -40(%rsp) # 8-byte Spill - mulxq %rdi, %r13, %rdi - adcq %rbx, %r13 - movq 32(%rcx), %rbp - movq %rbp, -48(%rsp) # 8-byte Spill - mulxq %rbp, %r11, %rbx - adcq %r8, %r11 - movq 40(%rcx), %rcx - movq %rcx, -56(%rsp) # 8-byte Spill - mulxq %rcx, %r10, %rcx - adcq %rsi, %r10 - adcq $0, %r15 - addq -128(%rsp), %r12 # 8-byte Folded Reload - adcq %r9, %r14 - adcq %rax, %r13 - adcq %rdi, %r11 - adcq %rbx, %r10 - adcq %rcx, %r15 - movq -72(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - mulxq -80(%rsp), %rcx, %rsi # 8-byte Folded Reload - mulxq -64(%rsp), %rbx, %rax # 8-byte Folded Reload - addq %rcx, %rax - mulxq -88(%rsp), %rcx, %rdi # 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -104(%rsp), %rdi, %rbp # 8-byte Folded Reload - movq %rbp, -128(%rsp) # 8-byte Spill - adcq %r8, %rdi - mulxq -112(%rsp), %r8, %r9 # 8-byte Folded Reload - adcq -128(%rsp), %r8 # 8-byte Folded Reload - adcq $0, %r9 - addq %r12, %rbx - adcq %r14, %rax - adcq %r13, %rcx - adcq %r11, %rsi - adcq %r10, %rdi - adcq %r15, %r8 - adcq $0, %r9 - movq %rbx, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rbp, %r13 # 8-byte Folded Reload - addq %rbx, %rbp - mulxq -24(%rsp), %r11, %rbx # 8-byte Folded Reload - adcq %rax, %r11 - mulxq -32(%rsp), %r14, %rax # 8-byte Folded Reload - adcq %rcx, %r14 - mulxq -40(%rsp), %r10, %rcx # 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -48(%rsp), %r15, %rsi # 8-byte Folded Reload - adcq %rdi, %r15 - mulxq -56(%rsp), %r12, %rdx # 8-byte Folded Reload - adcq %r8, %r12 - adcq $0, %r9 - addq %r13, %r11 - adcq %rbx, %r14 - adcq %rax, %r10 - adcq %rcx, %r15 - adcq %rsi, %r12 - adcq %rdx, %r9 - movq -72(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - mulxq -80(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq -64(%rsp), %r13, %rdi # 8-byte Folded Reload - addq %rcx, %rdi - mulxq -88(%rsp), %rbx, %rcx # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -96(%rsp), %rsi, %rbp # 8-byte Folded Reload - adcq %rcx, %rsi - mulxq -104(%rsp), %rax, %rcx # 8-byte Folded Reload - movq %rcx, -128(%rsp) # 8-byte Spill - adcq %rbp, %rax - mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload - adcq -128(%rsp), %r8 # 8-byte Folded Reload - adcq $0, %rcx - addq %r11, %r13 - adcq %r14, %rdi - adcq %r10, %rbx - adcq %r15, %rsi - adcq %r12, %rax - adcq %r9, %r8 - adcq $0, %rcx - movq %r13, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rbp, %r12 # 8-byte Folded Reload - addq %r13, %rbp - mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload - adcq %rdi, %r11 - mulxq -32(%rsp), %r9, %rdi # 8-byte Folded Reload - adcq %rbx, %r9 - mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload - adcq %rax, %r14 - mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload - adcq %r8, %r15 - adcq $0, %rcx - addq %r12, %r11 - adcq %rbp, %r9 - adcq %rdi, %r10 - adcq %rbx, %r14 - adcq %rsi, %r15 - adcq %rax, %rcx - movq -72(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - mulxq -80(%rsp), %rsi, %rax # 8-byte Folded Reload - mulxq -64(%rsp), %r13, %rbx # 8-byte Folded Reload - addq %rsi, %rbx - mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload - adcq %rax, %rdi - mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload - adcq %rbp, %rsi - mulxq -104(%rsp), %rax, %rbp # 8-byte Folded Reload - adcq %r8, %rax - mulxq -112(%rsp), %r8, %r12 # 8-byte Folded Reload - adcq %rbp, %r8 - adcq $0, %r12 - addq %r11, %r13 - adcq %r9, %rbx - adcq %r10, %rdi - adcq %r14, %rsi - adcq %r15, %rax - adcq %rcx, %r8 - adcq $0, %r12 - movq %r13, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rbp, %rcx # 8-byte Folded Reload - addq %r13, %rbp - mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload - adcq %rbx, %r11 - mulxq -32(%rsp), %r9, %rbx # 8-byte Folded Reload - adcq %rdi, %r9 - mulxq -40(%rsp), %r10, %rdi # 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload - adcq %rax, %r14 - mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload - adcq %r8, %r15 - adcq $0, %r12 - addq %rcx, %r11 - adcq %rbp, %r9 - adcq %rbx, %r10 - adcq %rdi, %r14 - adcq %rsi, %r15 - adcq %rax, %r12 - movq -72(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - mulxq -80(%rsp), %rsi, %rcx # 8-byte Folded Reload - mulxq -64(%rsp), %r13, %rax # 8-byte Folded Reload - addq %rsi, %rax - mulxq -88(%rsp), %rbx, %rsi # 8-byte Folded Reload - adcq %rcx, %rbx - mulxq -96(%rsp), %rdi, %rcx # 8-byte Folded Reload - adcq %rsi, %rdi - mulxq -104(%rsp), %rsi, %rbp # 8-byte Folded Reload - adcq %rcx, %rsi - mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload - adcq %rbp, %r8 - adcq $0, %rcx - addq %r11, %r13 - adcq %r9, %rax - adcq %r10, %rbx - adcq %r14, %rdi - adcq %r15, %rsi - adcq %r12, %r8 - adcq $0, %rcx - movq %r13, %rdx - imulq -16(%rsp), %rdx # 8-byte Folded Reload - mulxq -8(%rsp), %rbp, %r9 # 8-byte Folded Reload - addq %r13, %rbp - mulxq -24(%rsp), %r13, %rbp # 8-byte Folded Reload - adcq %rax, %r13 - mulxq -32(%rsp), %r11, %rax # 8-byte Folded Reload - adcq %rbx, %r11 - mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload - adcq %rdi, %r10 - mulxq -48(%rsp), %r14, %rdi # 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -56(%rsp), %rsi, %rdx # 8-byte Folded Reload - adcq %r8, %rsi - adcq $0, %rcx - addq %r9, %r13 - adcq %rbp, %r11 - adcq %rax, %r10 - adcq %rbx, %r14 - adcq %rdi, %rsi - adcq %rdx, %rcx - movq -72(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload - mulxq -64(%rsp), %r8, %rbx # 8-byte Folded Reload - addq %rdi, %rbx - mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload - adcq %rax, %rdi - mulxq -96(%rsp), %r15, %rax # 8-byte Folded Reload - adcq %rbp, %r15 - mulxq -104(%rsp), %r12, %rbp # 8-byte Folded Reload - adcq %rax, %r12 - mulxq -112(%rsp), %r9, %rax # 8-byte Folded Reload - adcq %rbp, %r9 - adcq $0, %rax - addq %r13, %r8 - adcq %r11, %rbx - adcq %r10, %rdi - adcq %r14, %r15 - adcq %rsi, %r12 - adcq %rcx, %r9 - adcq $0, %rax - movq -16(%rsp), %rdx # 8-byte Reload - imulq %r8, %rdx - mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload - movq %rsi, -16(%rsp) # 8-byte Spill - addq %r8, %rcx - movq -24(%rsp), %r11 # 8-byte Reload - mulxq %r11, %r8, %rcx - movq %rcx, -64(%rsp) # 8-byte Spill - adcq %rbx, %r8 - movq -32(%rsp), %r10 # 8-byte Reload - mulxq %r10, %rsi, %rcx - movq %rcx, -72(%rsp) # 8-byte Spill - adcq %rdi, %rsi - movq -40(%rsp), %r13 # 8-byte Reload - mulxq %r13, %rdi, %rcx - movq %rcx, -80(%rsp) # 8-byte Spill - adcq %r15, %rdi - movq -48(%rsp), %rcx # 8-byte Reload - mulxq %rcx, %r15, %rbx - adcq %r12, %r15 - movq -56(%rsp), %r14 # 8-byte Reload - mulxq %r14, %r12, %rbp - adcq %r9, %r12 - adcq $0, %rax - addq -16(%rsp), %r8 # 8-byte Folded Reload - adcq -64(%rsp), %rsi # 8-byte Folded Reload - adcq -72(%rsp), %rdi # 8-byte Folded Reload - adcq -80(%rsp), %r15 # 8-byte Folded Reload - adcq %rbx, %r12 - adcq %rbp, %rax - movq %r8, %rbp - subq -8(%rsp), %rbp # 8-byte Folded Reload - movq %rsi, %rbx - sbbq %r11, %rbx - movq %rdi, %r11 - sbbq %r10, %r11 - movq %r15, %r10 - sbbq %r13, %r10 - movq %r12, %r9 - sbbq %rcx, %r9 - movq %rax, %rcx - sbbq %r14, %rcx - movq %rcx, %rdx - sarq $63, %rdx - cmovsq %r8, %rbp - movq -120(%rsp), %rdx # 8-byte Reload - movq %rbp, (%rdx) - cmovsq %rsi, %rbx - movq %rbx, 8(%rdx) - cmovsq %rdi, %r11 - movq %r11, 16(%rdx) - cmovsq %r15, %r10 - movq %r10, 24(%rdx) - cmovsq %r12, %r9 - movq %r9, 32(%rdx) - cmovsq %rax, %rcx - movq %rcx, 40(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end84: - .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2 - - .globl mcl_fp_montRed6Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed6Lbmi2,@function -mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - pushq %rax - movq %rdx, %rcx - movq %rdi, -104(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq (%rcx), %rdi - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %r14 - movq %r14, %rdx - imulq %rax, %rdx - movq 40(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulxq %rax, %rbx, %r12 - movq 32(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - mulxq %rax, %r10, %r11 - movq 24(%rcx), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 16(%rcx), %rbp - movq %rbp, -40(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, (%rsp) # 8-byte Spill - mulxq %rax, %r15, %r8 - mulxq %rbp, %r13, %rbp - mulxq %rcx, %rax, %r9 - mulxq %rdi, %rdx, %rcx - addq %rax, %rcx - adcq %r13, %r9 - adcq %r15, %rbp - adcq %r10, %r8 - adcq %rbx, %r11 - adcq $0, %r12 - addq %r14, %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r9 - adcq 24(%rsi), %rbp - adcq 32(%rsi), %r8 - adcq 40(%rsi), %r11 - movq %r11, -88(%rsp) # 8-byte Spill - adcq 48(%rsi), %r12 - movq %r12, -80(%rsp) # 8-byte Spill - movq 88(%rsi), %r10 - movq 80(%rsi), %rdx - movq 72(%rsi), %rdi - movq 64(%rsi), %rax - movq 56(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -112(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -96(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -72(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - adcq $0, %r10 - movq %r10, -48(%rsp) # 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq %rcx, %rdx - imulq -56(%rsp), %rdx # 8-byte Folded Reload - mulxq -24(%rsp), %rax, %r13 # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload - movq %rax, -128(%rsp) # 8-byte Spill - mulxq -32(%rsp), %r11, %r14 # 8-byte Folded Reload - mulxq -40(%rsp), %rbx, %r10 # 8-byte Folded Reload - mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rsi, %rax - adcq %rbx, %rdi - adcq %r11, %r10 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq $0, %r13 - addq %rcx, %rdx - adcq %r9, %rax - adcq %rbp, %rdi - adcq %r8, %r10 - adcq -88(%rsp), %r14 # 8-byte Folded Reload - adcq -80(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - adcq $0, -96(%rsp) # 8-byte Folded Spill - adcq $0, -72(%rsp) # 8-byte Folded Spill - adcq $0, -64(%rsp) # 8-byte Folded Spill - adcq $0, -48(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - movq -56(%rsp), %r11 # 8-byte Reload - imulq %r11, %rdx - mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -112(%rsp) # 8-byte Spill - movq %rcx, -80(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -120(%rsp) # 8-byte Spill - movq %rcx, -88(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rcx, %rbx # 8-byte Folded Reload - movq %rcx, -128(%rsp) # 8-byte Spill - mulxq -40(%rsp), %rcx, %r9 # 8-byte Folded Reload - mulxq (%rsp), %rsi, %rbp # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %r8 # 8-byte Folded Reload - addq %rsi, %r8 - adcq %rcx, %rbp - adcq -128(%rsp), %r9 # 8-byte Folded Reload - adcq -120(%rsp), %rbx # 8-byte Folded Reload - movq -88(%rsp), %rsi # 8-byte Reload - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq -80(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %rax, %rdx - adcq %rdi, %r8 - adcq %r10, %rbp - adcq %r14, %r9 - adcq %r15, %rbx - adcq %r13, %rsi - movq %rsi, -88(%rsp) # 8-byte Spill - adcq -96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -80(%rsp) # 8-byte Spill - adcq $0, -72(%rsp) # 8-byte Folded Spill - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - adcq $0, -48(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %r8, %rdx - imulq %r11, %rdx - mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -64(%rsp) # 8-byte Spill - movq %rcx, -96(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rcx, %r11 # 8-byte Folded Reload - movq %rcx, -112(%rsp) # 8-byte Spill - mulxq -32(%rsp), %r10, %r14 # 8-byte Folded Reload - mulxq -40(%rsp), %r13, %r15 # 8-byte Folded Reload - mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload - addq %rsi, %rcx - adcq %r13, %rdi - adcq %r10, %r15 - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -64(%rsp), %r11 # 8-byte Folded Reload - movq -96(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %r8, %rdx - adcq %rbp, %rcx - adcq %r9, %rdi - adcq %rbx, %r15 - adcq -88(%rsp), %r14 # 8-byte Folded Reload - adcq -80(%rsp), %r11 # 8-byte Folded Reload - adcq -72(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -96(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq -48(%rsp), %rax # 8-byte Reload - adcq $0, %rax - adcq $0, %r12 - movq %rcx, %rdx - imulq -56(%rsp), %rdx # 8-byte Folded Reload - mulxq -24(%rsp), %rbp, %rsi # 8-byte Folded Reload - movq %rbp, -48(%rsp) # 8-byte Spill - movq %rsi, -72(%rsp) # 8-byte Spill - mulxq -16(%rsp), %rbp, %rsi # 8-byte Folded Reload - movq %rbp, -88(%rsp) # 8-byte Spill - movq %rsi, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rsi, %r13 # 8-byte Folded Reload - movq %rsi, -112(%rsp) # 8-byte Spill - movq -40(%rsp), %r9 # 8-byte Reload - mulxq %r9, %r10, %rbp - mulxq (%rsp), %rsi, %r8 # 8-byte Folded Reload - mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload - addq %rsi, %rbx - adcq %r10, %r8 - adcq -112(%rsp), %rbp # 8-byte Folded Reload - adcq -88(%rsp), %r13 # 8-byte Folded Reload - movq -80(%rsp), %r10 # 8-byte Reload - adcq -48(%rsp), %r10 # 8-byte Folded Reload - movq -72(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %rcx, %rdx - adcq %rdi, %rbx - adcq %r15, %r8 - adcq %r14, %rbp - adcq %r11, %r13 - adcq -96(%rsp), %r10 # 8-byte Folded Reload - movq %r10, -80(%rsp) # 8-byte Spill - adcq -64(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -72(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -48(%rsp) # 8-byte Spill - adcq $0, %r12 - movq -56(%rsp), %rdx # 8-byte Reload - imulq %rbx, %rdx - mulxq -24(%rsp), %rax, %r10 # 8-byte Folded Reload - movq %rax, -56(%rsp) # 8-byte Spill - mulxq %r9, %rsi, %r14 - mulxq -8(%rsp), %r11, %rdi # 8-byte Folded Reload - mulxq (%rsp), %rax, %r9 # 8-byte Folded Reload - addq %rdi, %rax - adcq %rsi, %r9 - movq -32(%rsp), %r15 # 8-byte Reload - mulxq %r15, %rsi, %rdi - adcq %r14, %rsi - mulxq -16(%rsp), %rdx, %r14 # 8-byte Folded Reload - adcq %rdi, %rdx - adcq -56(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r10 - addq %rbx, %r11 - adcq %r8, %rax - adcq %rbp, %r9 - adcq %r13, %rsi - adcq -80(%rsp), %rdx # 8-byte Folded Reload - adcq -72(%rsp), %r14 # 8-byte Folded Reload - adcq -48(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r12 - movq %rax, %rcx - subq -8(%rsp), %rcx # 8-byte Folded Reload - movq %r9, %rdi - sbbq (%rsp), %rdi # 8-byte Folded Reload - movq %rsi, %rbp - sbbq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rdx, %rbx - sbbq %r15, %rbx - movq %r14, %r8 - sbbq -16(%rsp), %r8 # 8-byte Folded Reload - movq %r10, %r15 - sbbq -24(%rsp), %r15 # 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %r10, %r15 - testb %r12b, %r12b - cmovneq %rax, %rcx - movq -104(%rsp), %rax # 8-byte Reload - movq %rcx, (%rax) - cmovneq %r9, %rdi - movq %rdi, 8(%rax) - cmovneq %rsi, %rbp - movq %rbp, 16(%rax) - cmovneq %rdx, %rbx - movq %rbx, 24(%rax) - cmovneq %r14, %r8 - movq %r8, 32(%rax) - movq %r15, 40(%rax) - addq $8, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end85: - .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2 - - .globl mcl_fp_addPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre6Lbmi2,@function -mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r11 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 24(%rsi), %rax - movq 32(%rsi), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r10, %rax - movq %rax, 24(%rdi) - adcq %r9, %r14 - movq %r14, 32(%rdi) - adcq %r8, %r11 - movq %r11, 40(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r14 - retq -.Lfunc_end86: - .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2 - - .globl mcl_fp_subPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre6Lbmi2,@function -mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r14 - movq 32(%rdx), %r15 - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r14, %r11 - movq %r11, 24(%rdi) - sbbq %r15, %r10 - movq %r10, 32(%rdi) - sbbq %r8, %r9 - movq %r9, 40(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end87: - .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2 - - .globl mcl_fp_shr1_6Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_6Lbmi2,@function -mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2 -# BB#0: - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %r9, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 32(%rdi) - shrq %r8 - movq %r8, 40(%rdi) - retq -.Lfunc_end88: - .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2 - - .globl mcl_fp_add6Lbmi2 - .align 16, 0x90 - .type mcl_fp_add6Lbmi2,@function -mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rdx), %r15 - movq 24(%rdx), %rbx - movq 24(%rsi), %r10 - movq 32(%rsi), %r9 - movq 16(%rdx), %r11 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - adcq %rbx, %r10 - movq %r10, 24(%rdi) - adcq %r15, %r9 - movq %r9, 32(%rdi) - adcq %r14, %r8 - movq %r8, 40(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r11 - sbbq 24(%rcx), %r10 - sbbq 32(%rcx), %r9 - sbbq 40(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB89_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - movq %r10, 24(%rdi) - movq %r9, 32(%rdi) - movq %r8, 40(%rdi) -.LBB89_2: # %carry - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end89: - .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2 - - .globl mcl_fp_addNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF6Lbmi2,@function -mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 16(%rdx), %r11 - movq (%rdx), %r15 - movq 8(%rdx), %r14 - addq (%rsi), %r15 - adcq 8(%rsi), %r14 - adcq 16(%rsi), %r11 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r9 - adcq 40(%rsi), %r8 - movq %r15, %rsi - subq (%rcx), %rsi - movq %r14, %rbx - sbbq 8(%rcx), %rbx - movq %r11, %rdx - sbbq 16(%rcx), %rdx - movq %r10, %r13 - sbbq 24(%rcx), %r13 - movq %r9, %r12 - sbbq 32(%rcx), %r12 - movq %r8, %rax - sbbq 40(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r15, %rsi - movq %rsi, (%rdi) - cmovsq %r14, %rbx - movq %rbx, 8(%rdi) - cmovsq %r11, %rdx - movq %rdx, 16(%rdi) - cmovsq %r10, %r13 - movq %r13, 24(%rdi) - cmovsq %r9, %r12 - movq %r12, 32(%rdi) - cmovsq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end90: - .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2 - - .globl mcl_fp_sub6Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub6Lbmi2,@function -mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r11 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r11, 16(%rdi) - sbbq %r15, %r10 - movq %r10, 24(%rdi) - sbbq %r12, %r9 - movq %r9, 32(%rdi) - sbbq %r14, %r8 - movq %r8, 40(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB91_2 -# BB#1: # %carry - movq 40(%rcx), %r14 - movq 32(%rcx), %r15 - movq 24(%rcx), %r12 - movq 8(%rcx), %rbx - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rdx - movq %rdx, 16(%rdi) - adcq %r10, %r12 - movq %r12, 24(%rdi) - adcq %r9, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r14 - movq %r14, 40(%rdi) -.LBB91_2: # %nocarry - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end91: - .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2 - - .globl mcl_fp_subNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF6Lbmi2,@function -mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rsi), %r15 - movq 32(%rsi), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %r14 - subq (%rdx), %r11 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r10 - sbbq 24(%rdx), %r9 - sbbq 32(%rdx), %r8 - sbbq 40(%rdx), %r15 - movq %r15, %rdx - sarq $63, %rdx - movq %rdx, %rbx - addq %rbx, %rbx - movq %rdx, %rsi - adcq %rsi, %rsi - andq 8(%rcx), %rsi - movq %r15, %rax - shrq $63, %rax - orq %rbx, %rax - andq (%rcx), %rax - movq 40(%rcx), %r12 - andq %rdx, %r12 - movq 32(%rcx), %r13 - andq %rdx, %r13 - movq 24(%rcx), %rbx - andq %rdx, %rbx - andq 16(%rcx), %rdx - addq %r11, %rax - movq %rax, (%rdi) - adcq %r14, %rsi - movq %rsi, 8(%rdi) - adcq %r10, %rdx - movq %rdx, 16(%rdi) - adcq %r9, %rbx - movq %rbx, 24(%rdi) - adcq %r8, %r13 - movq %r13, 32(%rdi) - adcq %r15, %r12 - movq %r12, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end92: - .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2 - - .globl mcl_fpDbl_add6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add6Lbmi2,@function -mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 80(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 72(%rdx), %r14 - movq 64(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rdx - movq %rbx, (%rdi) - movq 88(%rsi), %r8 - movq %rax, 8(%rdi) - movq 80(%rsi), %r10 - movq %r12, 16(%rdi) - movq 72(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 40(%rsi), %rax - adcq %rdx, %rax - movq 64(%rsi), %rdx - movq %r13, 32(%rdi) - movq 56(%rsi), %r13 - movq 48(%rsi), %rbp - adcq %r9, %rbp - movq %rax, 40(%rdi) - adcq %r11, %r13 - adcq %r15, %rdx - adcq %r14, %r12 - adcq -16(%rsp), %r10 # 8-byte Folded Reload - adcq -8(%rsp), %r8 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rbp, %rsi - subq (%rcx), %rsi - movq %r13, %rbx - sbbq 8(%rcx), %rbx - movq %rdx, %r9 - sbbq 16(%rcx), %r9 - movq %r12, %r11 - sbbq 24(%rcx), %r11 - movq %r10, %r14 - sbbq 32(%rcx), %r14 - movq %r8, %r15 - sbbq 40(%rcx), %r15 - sbbq $0, %rax - andl $1, %eax - cmovneq %rbp, %rsi - movq %rsi, 48(%rdi) - testb %al, %al - cmovneq %r13, %rbx - movq %rbx, 56(%rdi) - cmovneq %rdx, %r9 - movq %r9, 64(%rdi) - cmovneq %r12, %r11 - movq %r11, 72(%rdi) - cmovneq %r10, %r14 - movq %r14, 80(%rdi) - cmovneq %r8, %r15 - movq %r15, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end93: - .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2 - - .globl mcl_fpDbl_sub6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub6Lbmi2,@function -mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %r9 - movq 80(%rdx), %r10 - movq 72(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 64(%rdx), %r13 - movq %r15, (%rdi) - movq 56(%rdx), %rbp - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 88(%rsi), %r8 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 80(%rsi), %r11 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - sbbq %r15, %rdx - movq 72(%rsi), %r15 - movq %rbx, 40(%rdi) - movq 64(%rsi), %r12 - movq 56(%rsi), %rsi - sbbq %rbp, %rsi - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%rcx), %r14 - cmoveq %rax, %r14 - testb %bpl, %bpl - movq 16(%rcx), %r9 - cmoveq %rax, %r9 - movq 8(%rcx), %rbp - cmoveq %rax, %rbp - movq 40(%rcx), %r10 - cmoveq %rax, %r10 - movq 32(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 24(%rcx), %rax - addq %rdx, %r14 - movq %r14, 48(%rdi) - adcq %rsi, %rbp - movq %rbp, 56(%rdi) - adcq %r12, %r9 - movq %r9, 64(%rdi) - adcq %r15, %rax - movq %rax, 72(%rdi) - adcq %r11, %rbx - movq %rbx, 80(%rdi) - adcq %r8, %r10 - movq %r10, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end94: - .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2 - - .globl mcl_fp_mulUnitPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre7Lbmi2,@function -mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - mulxq 48(%rsi), %r8, %r11 - mulxq 40(%rsi), %r9, %r13 - mulxq 32(%rsi), %r10, %rcx - mulxq 8(%rsi), %r12, %r14 - mulxq (%rsi), %r15, %rbx - addq %r12, %rbx - mulxq 24(%rsi), %r12, %rax - mulxq 16(%rsi), %rdx, %rsi - movq %r15, (%rdi) - movq %rbx, 8(%rdi) - adcq %r14, %rdx - movq %rdx, 16(%rdi) - adcq %r12, %rsi - movq %rsi, 24(%rdi) - adcq %r10, %rax - movq %rax, 32(%rdi) - adcq %r9, %rcx - movq %rcx, 40(%rdi) - adcq %r8, %r13 - movq %r13, 48(%rdi) - adcq $0, %r11 - movq %r11, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end95: - .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2 - - .globl mcl_fpDbl_mulPre7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre7Lbmi2,@function -mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r14 - movq %r14, -8(%rsp) # 8-byte Spill - movq %rsi, %r8 - movq %rdi, %r13 - movq %r13, -16(%rsp) # 8-byte Spill - movq (%r8), %rcx - movq %rcx, -56(%rsp) # 8-byte Spill - movq 8(%r8), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq (%r14), %rsi - movq %rax, %rdx - mulxq %rsi, %rbp, %rax - movq %rcx, %rdx - mulxq %rsi, %rdx, %rcx - movq %rdx, -64(%rsp) # 8-byte Spill - movq 24(%r8), %rdi - movq %rdi, -32(%rsp) # 8-byte Spill - movq 16(%r8), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - addq %rbp, %rcx - mulxq %rsi, %rbx, %rbp - adcq %rax, %rbx - movq %rdi, %rdx - mulxq %rsi, %r12, %rax - adcq %rbp, %r12 - movq 32(%r8), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rsi, %r9, %rbp - adcq %rax, %r9 - movq 40(%r8), %rdi - movq %rdi, %rdx - mulxq %rsi, %r10, %rax - adcq %rbp, %r10 - movq 48(%r8), %r15 - movq %r15, %rdx - mulxq %rsi, %rsi, %r11 - adcq %rax, %rsi - movq -64(%rsp), %rax # 8-byte Reload - movq %rax, (%r13) - adcq $0, %r11 - movq 8(%r14), %r13 - movq -56(%rsp), %rdx # 8-byte Reload - mulxq %r13, %r14, %rax - movq %rax, -56(%rsp) # 8-byte Spill - addq %rcx, %r14 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %r13, %rcx, %rax - movq %rax, -24(%rsp) # 8-byte Spill - adcq %rbx, %rcx - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %r13, %rbx, %rax - movq %rax, -40(%rsp) # 8-byte Spill - adcq %r12, %rbx - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %r13, %rbp, %rax - movq %rax, -32(%rsp) # 8-byte Spill - adcq %r9, %rbp - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %r13, %rax, %r9 - adcq %r10, %rax - movq %rdi, %rdx - mulxq %r13, %r10, %rdi - adcq %rsi, %r10 - movq %r15, %rdx - mulxq %r13, %r13, %rdx - adcq %r11, %r13 - sbbq %r12, %r12 - andl $1, %r12d - addq -56(%rsp), %rcx # 8-byte Folded Reload - adcq -24(%rsp), %rbx # 8-byte Folded Reload - adcq -40(%rsp), %rbp # 8-byte Folded Reload - adcq -32(%rsp), %rax # 8-byte Folded Reload - adcq %r9, %r10 - movq -16(%rsp), %rsi # 8-byte Reload - movq %r14, 8(%rsi) - adcq %rdi, %r13 - adcq %rdx, %r12 - movq (%r8), %rsi - movq %rsi, -32(%rsp) # 8-byte Spill - movq 8(%r8), %r11 - movq %r11, -24(%rsp) # 8-byte Spill - movq -8(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdi - movq %rsi, %rdx - mulxq %rdi, %r9, %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - addq %rcx, %r9 - movq %r11, %rdx - mulxq %rdi, %r14, %rcx - movq %rcx, -80(%rsp) # 8-byte Spill - adcq %rbx, %r14 - movq 16(%r8), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rdi, %rsi, %rcx - movq %rcx, -88(%rsp) # 8-byte Spill - adcq %rbp, %rsi - movq 24(%r8), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rdi, %rbp, %rcx - movq %rcx, -96(%rsp) # 8-byte Spill - adcq %rax, %rbp - movq 32(%r8), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - mulxq %rdi, %r11, %rax - movq %rax, -104(%rsp) # 8-byte Spill - adcq %r10, %r11 - movq 40(%r8), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rdi, %r15, %rax - adcq %r13, %r15 - movq 48(%r8), %r13 - movq %r13, %rdx - mulxq %rdi, %rcx, %rdx - adcq %r12, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq -72(%rsp), %r14 # 8-byte Folded Reload - adcq -80(%rsp), %rsi # 8-byte Folded Reload - adcq -88(%rsp), %rbp # 8-byte Folded Reload - adcq -96(%rsp), %r11 # 8-byte Folded Reload - adcq -104(%rsp), %r15 # 8-byte Folded Reload - adcq %rax, %rcx - adcq %rdx, %rbx - movq -16(%rsp), %rax # 8-byte Reload - movq %r9, 16(%rax) - movq -8(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdi - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r9, %rax - movq %rax, -32(%rsp) # 8-byte Spill - addq %r14, %r9 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rax, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %rsi, %rax - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r14, %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - adcq %rbp, %r14 - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r10, %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %r11, %r10 - movq -56(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbp, %rsi - adcq %r15, %rbp - movq -64(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r11, %r15 - adcq %rcx, %r11 - movq %r13, %rdx - mulxq %rdi, %r13, %rcx - adcq %rbx, %r13 - sbbq %r12, %r12 - andl $1, %r12d - addq -32(%rsp), %rax # 8-byte Folded Reload - adcq -24(%rsp), %r14 # 8-byte Folded Reload - adcq -40(%rsp), %r10 # 8-byte Folded Reload - adcq -48(%rsp), %rbp # 8-byte Folded Reload - adcq %rsi, %r11 - movq -16(%rsp), %rdi # 8-byte Reload - movq %r9, 24(%rdi) - adcq %r15, %r13 - adcq %rcx, %r12 - movq (%r8), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%r8), %rbx - movq %rbx, -24(%rsp) # 8-byte Spill - movq -8(%rsp), %rcx # 8-byte Reload - movq 32(%rcx), %rcx - mulxq %rcx, %rsi, %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - addq %rax, %rsi - movq %rbx, %rdx - mulxq %rcx, %r9, %rax - movq %rax, -88(%rsp) # 8-byte Spill - adcq %r14, %r9 - movq 16(%r8), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rcx, %rax, %rdx - movq %rdx, -96(%rsp) # 8-byte Spill - adcq %r10, %rax - movq 24(%r8), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rcx, %r15, %rdx - movq %rdx, -104(%rsp) # 8-byte Spill - adcq %rbp, %r15 - movq 32(%r8), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - mulxq %rcx, %r10, %rbp - adcq %r11, %r10 - movq 40(%r8), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rcx, %r11, %rbx - adcq %r13, %r11 - movq 48(%r8), %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - mulxq %rcx, %r14, %rcx - adcq %r12, %r14 - sbbq %r12, %r12 - andl $1, %r12d - addq -80(%rsp), %r9 # 8-byte Folded Reload - adcq -88(%rsp), %rax # 8-byte Folded Reload - adcq -96(%rsp), %r15 # 8-byte Folded Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq %rbp, %r11 - adcq %rbx, %r14 - adcq %rcx, %r12 - movq %rsi, 32(%rdi) - movq -8(%rsp), %rsi # 8-byte Reload - movq 40(%rsi), %rdi - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r13, %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - addq %r9, %r13 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rcx, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %rax, %rcx - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rax, %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - adcq %r15, %rax - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbx, %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %r10, %rbx - movq -56(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %rbp, %r15 - adcq %r11, %rbp - movq -64(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r9, %r11 - adcq %r14, %r9 - movq -72(%rsp), %rdx # 8-byte Reload - mulxq %rdi, %r10, %rdx - adcq %r12, %r10 - sbbq %rdi, %rdi - andl $1, %edi - addq -32(%rsp), %rcx # 8-byte Folded Reload - adcq -24(%rsp), %rax # 8-byte Folded Reload - adcq -40(%rsp), %rbx # 8-byte Folded Reload - adcq -48(%rsp), %rbp # 8-byte Folded Reload - adcq %r15, %r9 - movq -16(%rsp), %r14 # 8-byte Reload - movq %r13, 40(%r14) - adcq %r11, %r10 - adcq %rdx, %rdi - movq 48(%rsi), %rdx - mulxq (%r8), %r11, %rsi - movq %rsi, -8(%rsp) # 8-byte Spill - addq %rcx, %r11 - mulxq 8(%r8), %rsi, %r15 - adcq %rax, %rsi - mulxq 16(%r8), %rcx, %rax - movq %rax, -24(%rsp) # 8-byte Spill - adcq %rbx, %rcx - mulxq 24(%r8), %rbx, %r12 - adcq %rbp, %rbx - mulxq 32(%r8), %rbp, %r13 - adcq %r9, %rbp - mulxq 40(%r8), %rax, %r9 - adcq %r10, %rax - mulxq 48(%r8), %rdx, %r8 - adcq %rdi, %rdx - sbbq %r10, %r10 - andl $1, %r10d - addq -8(%rsp), %rsi # 8-byte Folded Reload - adcq %r15, %rcx - movq %r11, 48(%r14) - movq %rsi, 56(%r14) - movq %rcx, 64(%r14) - adcq -24(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq %r12, %rbp - movq %rbp, 80(%r14) - adcq %r13, %rax - movq %rax, 88(%r14) - adcq %r9, %rdx - movq %rdx, 96(%r14) - adcq %r8, %r10 - movq %r10, 104(%r14) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end96: - .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2 - - .globl mcl_fpDbl_sqrPre7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre7Lbmi2,@function -mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - movq (%rsi), %rcx - movq 8(%rsi), %rax - mulxq %rcx, %r8, %r10 - movq 24(%rsi), %rbx - movq %rbx, -32(%rsp) # 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r12, %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - movq %rcx, %rdx - mulxq %rcx, %rdx, %rdi - movq %rdx, -48(%rsp) # 8-byte Spill - addq %r12, %rdi - adcq %rbp, %r8 - movq %rbx, %rdx - mulxq %rcx, %rbp, %r9 - adcq %r10, %rbp - movq 32(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rcx, %r11, %r14 - adcq %r9, %r11 - movq 40(%rsi), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - mulxq %rcx, %r10, %r15 - adcq %r14, %r10 - movq 48(%rsi), %r14 - movq %r14, %rdx - mulxq %rcx, %rcx, %r13 - adcq %r15, %rcx - movq -8(%rsp), %rdx # 8-byte Reload - movq -48(%rsp), %rbx # 8-byte Reload - movq %rbx, (%rdx) - adcq $0, %r13 - addq %r12, %rdi - movq %rax, %rdx - mulxq %rax, %r12, %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %r8, %r12 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r8, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %rbp, %r8 - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r9, %rbp - adcq %r11, %r9 - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r15, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - adcq %r10, %r15 - movq -56(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r11, %rbx - adcq %rcx, %r11 - movq %r14, %rdx - mulxq %rax, %r14, %rax - adcq %r13, %r14 - sbbq %r13, %r13 - andl $1, %r13d - addq -16(%rsp), %r12 # 8-byte Folded Reload - adcq -48(%rsp), %r8 # 8-byte Folded Reload - adcq -24(%rsp), %r9 # 8-byte Folded Reload - adcq %rbp, %r15 - movq -8(%rsp), %rcx # 8-byte Reload - movq %rdi, 8(%rcx) - adcq -32(%rsp), %r11 # 8-byte Folded Reload - adcq %rbx, %r14 - adcq %rax, %r13 - movq (%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %rbx - mulxq %rbx, %rax, %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - addq %r12, %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %rcx, %rdx - mulxq %rbx, %r10, %rax - movq %rax, -72(%rsp) # 8-byte Spill - adcq %r8, %r10 - movq %rbx, %rdx - mulxq %rbx, %r12, %rax - movq %rax, -80(%rsp) # 8-byte Spill - adcq %r9, %r12 - movq 24(%rsi), %rax - movq %rax, %rdx - mulxq %rbx, %r8, %rdi - movq %rdi, -56(%rsp) # 8-byte Spill - adcq %r8, %r15 - movq 32(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rbx, %rcx, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - adcq %r11, %rcx - movq 40(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rbx, %rbp, %r11 - adcq %r14, %rbp - movq 48(%rsi), %r14 - movq %r14, %rdx - mulxq %rbx, %r9, %rdx - adcq %r13, %r9 - sbbq %rbx, %rbx - andl $1, %ebx - addq -64(%rsp), %r10 # 8-byte Folded Reload - adcq -72(%rsp), %r12 # 8-byte Folded Reload - adcq -80(%rsp), %r15 # 8-byte Folded Reload - adcq %rdi, %rcx - adcq -88(%rsp), %rbp # 8-byte Folded Reload - adcq %r11, %r9 - adcq %rdx, %rbx - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rdi, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - addq %r10, %rdi - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r11, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %r12, %r11 - adcq %r8, %r15 - movq %rax, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - adcq %rcx, %r8 - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r13, %rcx - movq %rcx, -40(%rsp) # 8-byte Spill - adcq %rbp, %r13 - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r12, %rbp - adcq %r9, %r12 - movq %r14, %rdx - mulxq %rax, %rcx, %rax - adcq %rbx, %rcx - sbbq %r10, %r10 - andl $1, %r10d - addq -32(%rsp), %r11 # 8-byte Folded Reload - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq -56(%rsp), %r8 # 8-byte Folded Reload - adcq -64(%rsp), %r13 # 8-byte Folded Reload - movq -8(%rsp), %rdx # 8-byte Reload - movq -16(%rsp), %rbx # 8-byte Reload - movq %rbx, 16(%rdx) - movq %rdi, 24(%rdx) - adcq -40(%rsp), %r12 # 8-byte Folded Reload - adcq %rbp, %rcx - adcq %rax, %r10 - movq (%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq 32(%rsi), %rbx - mulxq %rbx, %rax, %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - addq %r11, %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdi, %rdx - mulxq %rbx, %r9, %rax - movq %rax, -88(%rsp) # 8-byte Spill - adcq %r15, %r9 - movq 16(%rsi), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - mulxq %rbx, %r15, %rax - movq %rax, -96(%rsp) # 8-byte Spill - adcq %r8, %r15 - movq 24(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - mulxq %rbx, %r8, %rbp - adcq %r13, %r8 - movq %rbx, %rdx - mulxq %rbx, %r13, %r14 - adcq %r12, %r13 - movq 40(%rsi), %rax - movq %rax, %rdx - mulxq %rbx, %rdx, %rdi - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rdi, -56(%rsp) # 8-byte Spill - adcq %rdx, %rcx - movq 48(%rsi), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rbx, %r11, %rdx - adcq %r10, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -80(%rsp), %r9 # 8-byte Folded Reload - adcq -88(%rsp), %r15 # 8-byte Folded Reload - adcq -96(%rsp), %r8 # 8-byte Folded Reload - adcq %rbp, %r13 - adcq %r14, %rcx - adcq %rdi, %r11 - adcq %rdx, %r12 - movq -32(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r14, %rdi - addq %r9, %r14 - movq -24(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rbx, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %r15, %rbx - movq -40(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rbp, %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - adcq %r8, %rbp - movq -48(%rsp), %rdx # 8-byte Reload - mulxq %rax, %r10, %r15 - adcq %r13, %r10 - adcq -72(%rsp), %rcx # 8-byte Folded Reload - movq %rax, %rdx - mulxq %rax, %r9, %r13 - adcq %r11, %r9 - movq -64(%rsp), %rdx # 8-byte Reload - mulxq %rax, %rax, %r11 - adcq %r12, %rax - sbbq %r8, %r8 - andl $1, %r8d - addq %rdi, %rbx - adcq -24(%rsp), %rbp # 8-byte Folded Reload - adcq -32(%rsp), %r10 # 8-byte Folded Reload - adcq %r15, %rcx - movq -8(%rsp), %rdi # 8-byte Reload - movq -16(%rsp), %rdx # 8-byte Reload - movq %rdx, 32(%rdi) - movq %r14, 40(%rdi) - adcq -56(%rsp), %r9 # 8-byte Folded Reload - adcq %r13, %rax - adcq %r11, %r8 - movq 48(%rsi), %rdx - mulxq (%rsi), %r12, %r11 - addq %rbx, %r12 - mulxq 8(%rsi), %rbx, %r14 - adcq %rbp, %rbx - mulxq 16(%rsi), %rbp, %r15 - adcq %r10, %rbp - mulxq 24(%rsi), %rdi, %r10 - adcq %rcx, %rdi - mulxq 32(%rsi), %rcx, %r13 - adcq %r9, %rcx - mulxq 40(%rsi), %rsi, %r9 - adcq %rax, %rsi - mulxq %rdx, %rdx, %rax - adcq %r8, %rdx - sbbq %r8, %r8 - andl $1, %r8d - addq %r11, %rbx - adcq %r14, %rbp - movq -8(%rsp), %r11 # 8-byte Reload - movq %r12, 48(%r11) - movq %rbx, 56(%r11) - movq %rbp, 64(%r11) - adcq %r15, %rdi - movq %rdi, 72(%r11) - adcq %r10, %rcx - movq %rcx, 80(%r11) - adcq %r13, %rsi - movq %rsi, 88(%r11) - adcq %r9, %rdx - movq %rdx, 96(%r11) - adcq %rax, %r8 - movq %r8, 104(%r11) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2 - - .globl mcl_fp_mont7Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont7Lbmi2,@function -mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $56, %rsp - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rdi, -120(%rsp) # 8-byte Spill - movq 48(%rsi), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %rdx, %r8 - movq %rdx, 48(%rsp) # 8-byte Spill - movq 40(%rsi), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - mulxq %rax, %rdx, %r9 - movq %rdx, 40(%rsp) # 8-byte Spill - movq 32(%rsi), %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - movq 24(%rsi), %r11 - movq %r11, -64(%rsp) # 8-byte Spill - movq 16(%rsi), %r10 - movq %r10, -56(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq %r15, -40(%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -48(%rsp) # 8-byte Spill - mulxq %rax, %r13, %rdi - movq %r11, %rdx - mulxq %rax, %r14, %rbp - movq %r10, %rdx - mulxq %rax, %r12, %rbx - movq %rsi, %rdx - mulxq %rax, %r10, %rsi - movq %r15, %rdx - mulxq %rax, %r15, %r11 - addq %r10, %r11 - adcq %r12, %rsi - movq %rsi, -112(%rsp) # 8-byte Spill - adcq %r14, %rbx - movq %rbx, -104(%rsp) # 8-byte Spill - adcq %r13, %rbp - movq %rbp, -96(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -88(%rsp) # 8-byte Spill - adcq 48(%rsp), %r9 # 8-byte Folded Reload - movq %r9, -80(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, %r13 - movq -8(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq %r15, %rdx - imulq %rax, %rdx - movq (%rcx), %rdi - movq %rdi, 24(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 48(%rsp) # 8-byte Spill - mulxq %rax, %rbx, %r9 - movq 16(%rcx), %rsi - movq %rsi, 40(%rsp) # 8-byte Spill - mulxq %rsi, %r14, %rbp - movq 8(%rcx), %rax - movq %rax, 32(%rsp) # 8-byte Spill - mulxq %rax, %rsi, %rax - mulxq %rdi, %r8, %r12 - addq %rsi, %r12 - adcq %r14, %rax - movq %rax, %rsi - movq 24(%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - mulxq %rax, %r10, %r14 - adcq %rbp, %r10 - adcq %rbx, %r14 - movq 40(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - mulxq %rax, %rbp, %rdi - adcq %r9, %rbp - movq 48(%rcx), %rax - movq %rax, (%rsp) # 8-byte Spill - mulxq %rax, %rax, %rbx - adcq %rdi, %rax - adcq $0, %rbx - addq %r15, %r8 - adcq %r11, %r12 - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -112(%rsp) # 8-byte Spill - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq -96(%rsp), %r14 # 8-byte Folded Reload - adcq -88(%rsp), %rbp # 8-byte Folded Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - adcq %r13, %rbx - sbbq %r11, %r11 - andl $1, %r11d - movq -16(%rsp), %rcx # 8-byte Reload - movq 8(%rcx), %rdx - mulxq -24(%rsp), %rdi, %rcx # 8-byte Folded Reload - movq %rdi, -96(%rsp) # 8-byte Spill - movq %rcx, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -128(%rsp) # 8-byte Spill - movq %rcx, -88(%rsp) # 8-byte Spill - mulxq -48(%rsp), %r9, %r8 # 8-byte Folded Reload - mulxq -40(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -104(%rsp) # 8-byte Spill - addq %r9, %rcx - movq %rcx, %rdi - mulxq -56(%rsp), %rcx, %r9 # 8-byte Folded Reload - adcq %r8, %rcx - movq %rcx, %rsi - mulxq -64(%rsp), %r13, %rcx # 8-byte Folded Reload - adcq %r9, %r13 - mulxq -72(%rsp), %r8, %r15 # 8-byte Folded Reload - adcq %rcx, %r8 - adcq -128(%rsp), %r15 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq -96(%rsp), %rdx # 8-byte Folded Reload - movq -80(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - movq -104(%rsp), %r9 # 8-byte Reload - addq %r12, %r9 - movq %r9, -104(%rsp) # 8-byte Spill - adcq -112(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, %r12 - adcq %r10, %rsi - movq %rsi, -128(%rsp) # 8-byte Spill - adcq %r14, %r13 - adcq %rbp, %r8 - adcq %rax, %r15 - adcq %rbx, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - adcq %r11, %rcx - movq %rcx, -80(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) # 8-byte Spill - movq %r9, %rdx - imulq -8(%rsp), %rdx # 8-byte Folded Reload - mulxq (%rsp), %r10, %rax # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq 32(%rsp), %rdi, %rbx # 8-byte Folded Reload - mulxq 24(%rsp), %r14, %r9 # 8-byte Folded Reload - addq %rdi, %r9 - mulxq 40(%rsp), %rbp, %r11 # 8-byte Folded Reload - adcq %rbx, %rbp - adcq %rcx, %r11 - mulxq 48(%rsp), %rbx, %rsi # 8-byte Folded Reload - adcq %rax, %rbx - mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload - adcq %rsi, %rax - adcq %r10, %rcx - movq -96(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq -104(%rsp), %r14 # 8-byte Folded Reload - adcq %r12, %r9 - adcq -128(%rsp), %rbp # 8-byte Folded Reload - adcq %r13, %r11 - adcq %r8, %rbx - adcq %r15, %rax - adcq -88(%rsp), %rcx # 8-byte Folded Reload - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - adcq $0, -112(%rsp) # 8-byte Folded Spill - movq -16(%rsp), %rdx # 8-byte Reload - movq 16(%rdx), %rdx - mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload - movq %rdi, -104(%rsp) # 8-byte Spill - movq %rsi, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rdi, %rsi # 8-byte Folded Reload - movq %rdi, -128(%rsp) # 8-byte Spill - movq %rsi, -88(%rsp) # 8-byte Spill - mulxq -56(%rsp), %rdi, %r10 # 8-byte Folded Reload - mulxq -48(%rsp), %rsi, %r13 # 8-byte Folded Reload - mulxq -40(%rsp), %r8, %r15 # 8-byte Folded Reload - addq %rsi, %r15 - adcq %rdi, %r13 - mulxq -64(%rsp), %r12, %rsi # 8-byte Folded Reload - adcq %r10, %r12 - mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload - adcq %rsi, %r10 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - movq -88(%rsp), %rsi # 8-byte Reload - adcq -104(%rsp), %rsi # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r9, %r8 - movq %r8, -104(%rsp) # 8-byte Spill - adcq %rbp, %r15 - adcq %r11, %r13 - adcq %rbx, %r12 - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -96(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -88(%rsp) # 8-byte Spill - adcq -112(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, %rbx - movq %r8, %rdx - imulq -8(%rsp), %rdx # 8-byte Folded Reload - mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -112(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload - mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload - addq %rbp, %r8 - mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r9 - mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload - adcq %rax, %rsi - mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload - adcq %rdi, %rax - adcq -112(%rsp), %rcx # 8-byte Folded Reload - movq -96(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq -104(%rsp), %r11 # 8-byte Folded Reload - adcq %r15, %r8 - adcq %r13, %rbp - adcq %r12, %r9 - adcq %r10, %rsi - adcq %r14, %rax - adcq -88(%rsp), %rcx # 8-byte Folded Reload - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - adcq $0, %rbx - movq %rbx, -88(%rsp) # 8-byte Spill - movq -16(%rsp), %rdx # 8-byte Reload - movq 24(%rdx), %rdx - mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload - movq %rbx, -112(%rsp) # 8-byte Spill - movq %rdi, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rdi, %r13 # 8-byte Folded Reload - movq %rdi, -128(%rsp) # 8-byte Spill - mulxq -56(%rsp), %r10, %r11 # 8-byte Folded Reload - mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload - mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload - movq %rbx, -104(%rsp) # 8-byte Spill - addq %rdi, %r12 - adcq %r10, %r15 - mulxq -64(%rsp), %rbx, %rdi # 8-byte Folded Reload - adcq %r11, %rbx - mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload - adcq %rdi, %r10 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - movq -104(%rsp), %rdi # 8-byte Reload - addq %r8, %rdi - movq %rdi, -104(%rsp) # 8-byte Spill - adcq %rbp, %r12 - adcq %r9, %r15 - adcq %rsi, %rbx - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %rdi, %rdx - imulq -8(%rsp), %rdx # 8-byte Folded Reload - mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -112(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload - mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload - addq %rbp, %r8 - mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r9 - mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload - adcq %rax, %rsi - mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload - adcq %rdi, %rax - adcq -112(%rsp), %rcx # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq -104(%rsp), %r11 # 8-byte Folded Reload - adcq %r12, %r8 - adcq %r15, %rbp - adcq %rbx, %r9 - adcq %r10, %rsi - adcq %r14, %rax - adcq %r13, %rcx - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - adcq $0, -96(%rsp) # 8-byte Folded Spill - movq -16(%rsp), %rdx # 8-byte Reload - movq 32(%rdx), %rdx - mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload - movq %rbx, -104(%rsp) # 8-byte Spill - movq %rdi, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rdi, %r11 # 8-byte Folded Reload - movq %rdi, -112(%rsp) # 8-byte Spill - mulxq -56(%rsp), %r10, %r13 # 8-byte Folded Reload - mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload - mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload - addq %rdi, %r12 - adcq %r10, %r15 - mulxq -64(%rsp), %r10, %rdi # 8-byte Folded Reload - adcq %r13, %r10 - mulxq -72(%rsp), %r13, %r14 # 8-byte Folded Reload - adcq %rdi, %r13 - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r8, %rbx - movq %rbx, -112(%rsp) # 8-byte Spill - adcq %rbp, %r12 - adcq %r9, %r15 - adcq %rsi, %r10 - adcq %rax, %r13 - adcq %rcx, %r14 - adcq -88(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -128(%rsp) # 8-byte Spill - adcq -96(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbx, %rdx - imulq -8(%rsp), %rdx # 8-byte Folded Reload - mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -88(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload - mulxq 24(%rsp), %r9, %r11 # 8-byte Folded Reload - addq %rbp, %r11 - mulxq 40(%rsp), %rbp, %r8 # 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r8 - mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload - adcq %rax, %rsi - mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload - adcq %rdi, %rax - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq -96(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq -112(%rsp), %r9 # 8-byte Folded Reload - adcq %r12, %r11 - adcq %r15, %rbp - adcq %r10, %r8 - adcq %r13, %rsi - adcq %r14, %rax - adcq -128(%rsp), %rcx # 8-byte Folded Reload - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - adcq $0, -104(%rsp) # 8-byte Folded Spill - movq -16(%rsp), %rdx # 8-byte Reload - movq 40(%rdx), %rdx - mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload - movq %rbx, -112(%rsp) # 8-byte Spill - movq %rdi, -80(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload - movq %rbx, -128(%rsp) # 8-byte Spill - movq %rdi, -88(%rsp) # 8-byte Spill - mulxq -56(%rsp), %rbx, %r10 # 8-byte Folded Reload - mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload - mulxq -40(%rsp), %r9, %r12 # 8-byte Folded Reload - addq %rdi, %r12 - adcq %rbx, %r13 - mulxq -64(%rsp), %r15, %rdi # 8-byte Folded Reload - adcq %r10, %r15 - mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload - adcq %rdi, %r10 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - movq -88(%rsp), %rdi # 8-byte Reload - adcq -112(%rsp), %rdi # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r11, %r9 - movq %r9, -112(%rsp) # 8-byte Spill - adcq %rbp, %r12 - adcq %r8, %r13 - adcq %rsi, %r15 - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -96(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -88(%rsp) # 8-byte Spill - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -104(%rsp) # 8-byte Spill - movq %r9, %rdx - imulq -8(%rsp), %rdx # 8-byte Folded Reload - mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -128(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq 32(%rsp), %rdi, %rsi # 8-byte Folded Reload - mulxq 24(%rsp), %r11, %rbx # 8-byte Folded Reload - addq %rdi, %rbx - mulxq 40(%rsp), %r8, %r9 # 8-byte Folded Reload - adcq %rsi, %r8 - adcq %rcx, %r9 - mulxq 48(%rsp), %rdi, %rbp # 8-byte Folded Reload - adcq %rax, %rdi - mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload - adcq %rbp, %rcx - adcq -128(%rsp), %rsi # 8-byte Folded Reload - movq -96(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq -112(%rsp), %r11 # 8-byte Folded Reload - adcq %r12, %rbx - adcq %r13, %r8 - adcq %r15, %r9 - adcq %r10, %rdi - adcq %r14, %rcx - adcq -88(%rsp), %rsi # 8-byte Folded Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - movq -104(%rsp), %r12 # 8-byte Reload - adcq $0, %r12 - movq -16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - mulxq -24(%rsp), %rbp, %rax # 8-byte Folded Reload - movq %rbp, -80(%rsp) # 8-byte Spill - movq %rax, -16(%rsp) # 8-byte Spill - mulxq -32(%rsp), %rbp, %rax # 8-byte Folded Reload - movq %rbp, -88(%rsp) # 8-byte Spill - movq %rax, -24(%rsp) # 8-byte Spill - mulxq -72(%rsp), %rbp, %rax # 8-byte Folded Reload - movq %rbp, -72(%rsp) # 8-byte Spill - movq %rax, -32(%rsp) # 8-byte Spill - mulxq -64(%rsp), %r13, %rbp # 8-byte Folded Reload - mulxq -56(%rsp), %r14, %r15 # 8-byte Folded Reload - mulxq -48(%rsp), %rax, %r11 # 8-byte Folded Reload - mulxq -40(%rsp), %rdx, %r10 # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - addq %rax, %r10 - adcq %r14, %r11 - adcq %r13, %r15 - adcq -72(%rsp), %rbp # 8-byte Folded Reload - movq -32(%rsp), %r14 # 8-byte Reload - adcq -88(%rsp), %r14 # 8-byte Folded Reload - movq -24(%rsp), %rdx # 8-byte Reload - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq -16(%rsp), %rax # 8-byte Reload - adcq $0, %rax - movq -40(%rsp), %r13 # 8-byte Reload - addq %rbx, %r13 - movq %r13, -40(%rsp) # 8-byte Spill - adcq %r8, %r10 - adcq %r9, %r11 - adcq %rdi, %r15 - adcq %rcx, %rbp - movq %rbp, -48(%rsp) # 8-byte Spill - adcq %rsi, %r14 - movq %r14, -32(%rsp) # 8-byte Spill - adcq -96(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -24(%rsp) # 8-byte Spill - adcq %r12, %rax - movq %rax, -16(%rsp) # 8-byte Spill - sbbq %rdi, %rdi - movq -8(%rsp), %rdx # 8-byte Reload - imulq %r13, %rdx - mulxq 16(%rsp), %rbp, %rsi # 8-byte Folded Reload - mulxq 32(%rsp), %rcx, %rbx # 8-byte Folded Reload - mulxq 24(%rsp), %r13, %rax # 8-byte Folded Reload - addq %rcx, %rax - mulxq 40(%rsp), %rcx, %r9 # 8-byte Folded Reload - adcq %rbx, %rcx - adcq %rbp, %r9 - mulxq 48(%rsp), %rbp, %rbx # 8-byte Folded Reload - adcq %rsi, %rbp - mulxq 8(%rsp), %rsi, %r14 # 8-byte Folded Reload - adcq %rbx, %rsi - mulxq (%rsp), %rdx, %rbx # 8-byte Folded Reload - adcq %r14, %rdx - adcq $0, %rbx - andl $1, %edi - addq -40(%rsp), %r13 # 8-byte Folded Reload - adcq %r10, %rax - adcq %r11, %rcx - adcq %r15, %r9 - adcq -48(%rsp), %rbp # 8-byte Folded Reload - adcq -32(%rsp), %rsi # 8-byte Folded Reload - adcq -24(%rsp), %rdx # 8-byte Folded Reload - adcq -16(%rsp), %rbx # 8-byte Folded Reload - adcq $0, %rdi - movq %rax, %r8 - subq 24(%rsp), %r8 # 8-byte Folded Reload - movq %rcx, %r10 - sbbq 32(%rsp), %r10 # 8-byte Folded Reload - movq %r9, %r11 - sbbq 40(%rsp), %r11 # 8-byte Folded Reload - movq %rbp, %r14 - sbbq 16(%rsp), %r14 # 8-byte Folded Reload - movq %rsi, %r15 - sbbq 48(%rsp), %r15 # 8-byte Folded Reload - movq %rdx, %r12 - sbbq 8(%rsp), %r12 # 8-byte Folded Reload - movq %rbx, %r13 - sbbq (%rsp), %r13 # 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %rbx, %r13 - testb %dil, %dil - cmovneq %rax, %r8 - movq -120(%rsp), %rax # 8-byte Reload - movq %r8, (%rax) - cmovneq %rcx, %r10 - movq %r10, 8(%rax) - cmovneq %r9, %r11 - movq %r11, 16(%rax) - cmovneq %rbp, %r14 - movq %r14, 24(%rax) - cmovneq %rsi, %r15 - movq %r15, 32(%rax) - cmovneq %rdx, %r12 - movq %r12, 40(%rax) - movq %r13, 48(%rax) - addq $56, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end98: - .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2 - - .globl mcl_fp_montNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF7Lbmi2,@function -mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $40, %rsp - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rdi, -104(%rsp) # 8-byte Spill - movq (%rsi), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -48(%rsp) # 8-byte Spill - movq (%rdx), %rbp - movq %rdi, %rdx - mulxq %rbp, %rdi, %rbx - movq %rax, %rdx - mulxq %rbp, %r8, %r14 - movq 16(%rsi), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - addq %rdi, %r14 - mulxq %rbp, %r15, %rax - adcq %rbx, %r15 - movq 24(%rsi), %rdx - movq %rdx, -64(%rsp) # 8-byte Spill - mulxq %rbp, %rbx, %rdi - adcq %rax, %rbx - movq 32(%rsi), %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - mulxq %rbp, %r11, %rax - adcq %rdi, %r11 - movq 40(%rsi), %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - mulxq %rbp, %r9, %rdi - adcq %rax, %r9 - movq 48(%rsi), %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - mulxq %rbp, %r10, %rbp - adcq %rdi, %r10 - adcq $0, %rbp - movq -8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq %r8, %rdx - imulq %rax, %rdx - movq (%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - mulxq %rax, %rax, %rsi - movq %rsi, -96(%rsp) # 8-byte Spill - addq %r8, %rax - movq 8(%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - mulxq %rax, %r8, %rsi - movq %rsi, -112(%rsp) # 8-byte Spill - adcq %r14, %r8 - movq 16(%rcx), %rax - movq %rax, (%rsp) # 8-byte Spill - mulxq %rax, %rsi, %r13 - adcq %r15, %rsi - movq 24(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - mulxq %rax, %r12, %rax - adcq %rbx, %r12 - movq 32(%rcx), %rdi - movq %rdi, -16(%rsp) # 8-byte Spill - mulxq %rdi, %r15, %rbx - adcq %r11, %r15 - movq 40(%rcx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - mulxq %rdi, %r14, %rdi - adcq %r9, %r14 - movq 48(%rcx), %rcx - movq %rcx, 32(%rsp) # 8-byte Spill - mulxq %rcx, %r11, %rcx - adcq %r10, %r11 - adcq $0, %rbp - addq -96(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -96(%rsp) # 8-byte Spill - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -112(%rsp) # 8-byte Spill - adcq %r13, %r12 - adcq %rax, %r15 - adcq %rbx, %r14 - adcq %rdi, %r11 - adcq %rcx, %rbp - movq -40(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - mulxq -48(%rsp), %rcx, %rsi # 8-byte Folded Reload - mulxq -32(%rsp), %r13, %rax # 8-byte Folded Reload - addq %rcx, %rax - mulxq -56(%rsp), %rcx, %rdi # 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -64(%rsp), %rsi, %r8 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload - adcq %r8, %rdi - mulxq -80(%rsp), %r8, %rbx # 8-byte Folded Reload - adcq %r9, %r8 - mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload - adcq %rbx, %r9 - adcq $0, %r10 - addq -96(%rsp), %r13 # 8-byte Folded Reload - adcq -112(%rsp), %rax # 8-byte Folded Reload - adcq %r12, %rcx - adcq %r15, %rsi - adcq %r14, %rdi - adcq %r11, %r8 - adcq %rbp, %r9 - adcq $0, %r10 - movq %r13, %rdx - imulq 8(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rbp, %rbx # 8-byte Folded Reload - movq %rbx, -96(%rsp) # 8-byte Spill - addq %r13, %rbp - mulxq 16(%rsp), %rbp, %r14 # 8-byte Folded Reload - adcq %rax, %rbp - mulxq (%rsp), %rax, %r11 # 8-byte Folded Reload - adcq %rcx, %rax - mulxq -8(%rsp), %r12, %rcx # 8-byte Folded Reload - adcq %rsi, %r12 - mulxq -16(%rsp), %r15, %rbx # 8-byte Folded Reload - adcq %rdi, %r15 - mulxq -24(%rsp), %r13, %rdi # 8-byte Folded Reload - adcq %r8, %r13 - mulxq 32(%rsp), %rsi, %rdx # 8-byte Folded Reload - adcq %r9, %rsi - adcq $0, %r10 - addq -96(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, -96(%rsp) # 8-byte Spill - adcq %r14, %rax - movq %rax, -112(%rsp) # 8-byte Spill - adcq %r11, %r12 - adcq %rcx, %r15 - adcq %rbx, %r13 - adcq %rdi, %rsi - adcq %rdx, %r10 - movq -40(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload - mulxq -32(%rsp), %r14, %rdi # 8-byte Folded Reload - addq %rcx, %rdi - mulxq -56(%rsp), %rbp, %rcx # 8-byte Folded Reload - adcq %rax, %rbp - mulxq -64(%rsp), %rbx, %r8 # 8-byte Folded Reload - adcq %rcx, %rbx - mulxq -72(%rsp), %rax, %r9 # 8-byte Folded Reload - adcq %r8, %rax - mulxq -80(%rsp), %r8, %rcx # 8-byte Folded Reload - movq %rcx, -120(%rsp) # 8-byte Spill - adcq %r9, %r8 - mulxq -88(%rsp), %r9, %r11 # 8-byte Folded Reload - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r11 - addq -96(%rsp), %r14 # 8-byte Folded Reload - adcq -112(%rsp), %rdi # 8-byte Folded Reload - adcq %r12, %rbp - adcq %r15, %rbx - adcq %r13, %rax - adcq %rsi, %r8 - adcq %r10, %r9 - adcq $0, %r11 - movq %r14, %rdx - imulq 8(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rcx, -96(%rsp) # 8-byte Spill - addq %r14, %rsi - mulxq 16(%rsp), %rsi, %r13 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq (%rsp), %rdi, %r15 # 8-byte Folded Reload - adcq %rbp, %rdi - mulxq -8(%rsp), %rcx, %rbp # 8-byte Folded Reload - adcq %rbx, %rcx - mulxq -16(%rsp), %r14, %rbx # 8-byte Folded Reload - adcq %rax, %r14 - mulxq -24(%rsp), %r12, %rax # 8-byte Folded Reload - adcq %r8, %r12 - mulxq 32(%rsp), %r10, %rdx # 8-byte Folded Reload - adcq %r9, %r10 - adcq $0, %r11 - addq -96(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -96(%rsp) # 8-byte Spill - adcq %r13, %rdi - movq %rdi, -112(%rsp) # 8-byte Spill - adcq %r15, %rcx - adcq %rbp, %r14 - adcq %rbx, %r12 - adcq %rax, %r10 - adcq %rdx, %r11 - movq -40(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload - mulxq -32(%rsp), %r15, %rbp # 8-byte Folded Reload - addq %rsi, %rbp - mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload - adcq %rax, %rdi - mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload - adcq %r9, %r8 - mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r13 - addq -96(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %rbp # 8-byte Folded Reload - adcq %rcx, %rbx - adcq %r14, %rsi - adcq %r12, %rdi - adcq %r10, %r8 - adcq %r11, %r9 - adcq $0, %r13 - movq %r15, %rdx - imulq 8(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - addq %r15, %rcx - mulxq 16(%rsp), %rcx, %r11 # 8-byte Folded Reload - adcq %rbp, %rcx - mulxq (%rsp), %rbp, %r10 # 8-byte Folded Reload - adcq %rbx, %rbp - mulxq -8(%rsp), %rax, %rbx # 8-byte Folded Reload - adcq %rsi, %rax - mulxq -16(%rsp), %r14, %rsi # 8-byte Folded Reload - adcq %rdi, %r14 - mulxq -24(%rsp), %r15, %rdi # 8-byte Folded Reload - adcq %r8, %r15 - mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload - adcq %r9, %r12 - adcq $0, %r13 - addq -96(%rsp), %rcx # 8-byte Folded Reload - adcq %r11, %rbp - movq %rbp, -96(%rsp) # 8-byte Spill - adcq %r10, %rax - movq %rax, -112(%rsp) # 8-byte Spill - adcq %rbx, %r14 - adcq %rsi, %r15 - adcq %rdi, %r12 - adcq %rdx, %r13 - movq -40(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - mulxq -48(%rsp), %rsi, %rdi # 8-byte Folded Reload - mulxq -32(%rsp), %r11, %r8 # 8-byte Folded Reload - addq %rsi, %r8 - mulxq -56(%rsp), %rbx, %rsi # 8-byte Folded Reload - adcq %rdi, %rbx - mulxq -64(%rsp), %rbp, %rdi # 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -72(%rsp), %rsi, %r9 # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload - adcq %r9, %rdi - mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r10 - addq %rcx, %r11 - adcq -96(%rsp), %r8 # 8-byte Folded Reload - adcq -112(%rsp), %rbx # 8-byte Folded Reload - adcq %r14, %rbp - adcq %r15, %rsi - adcq %r12, %rdi - adcq %r13, %r9 - adcq $0, %r10 - movq %r11, %rdx - imulq 8(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - addq %r11, %rcx - mulxq 16(%rsp), %rcx, %r13 # 8-byte Folded Reload - adcq %r8, %rcx - mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload - adcq %rbx, %rax - mulxq -8(%rsp), %rbx, %r11 # 8-byte Folded Reload - adcq %rbp, %rbx - mulxq -16(%rsp), %r14, %rbp # 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -24(%rsp), %r15, %rsi # 8-byte Folded Reload - adcq %rdi, %r15 - mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload - adcq %r9, %r12 - adcq $0, %r10 - addq -96(%rsp), %rcx # 8-byte Folded Reload - adcq %r13, %rax - movq %rax, -96(%rsp) # 8-byte Spill - adcq %r8, %rbx - movq %rbx, -112(%rsp) # 8-byte Spill - adcq %r11, %r14 - adcq %rbp, %r15 - adcq %rsi, %r12 - adcq %rdx, %r10 - movq -40(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload - mulxq -32(%rsp), %r11, %rbp # 8-byte Folded Reload - addq %rsi, %rbp - mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload - adcq %rax, %rbx - mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload - adcq %rax, %rdi - mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload - adcq %r9, %r8 - mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r13 - addq %rcx, %r11 - adcq -96(%rsp), %rbp # 8-byte Folded Reload - adcq -112(%rsp), %rbx # 8-byte Folded Reload - adcq %r14, %rsi - adcq %r15, %rdi - adcq %r12, %r8 - adcq %r10, %r9 - adcq $0, %r13 - movq %r11, %rdx - imulq 8(%rsp), %rdx # 8-byte Folded Reload - mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rax, -112(%rsp) # 8-byte Spill - addq %r11, %rcx - mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rax, -120(%rsp) # 8-byte Spill - adcq %rbp, %rcx - mulxq (%rsp), %rax, %rbp # 8-byte Folded Reload - movq %rbp, -128(%rsp) # 8-byte Spill - adcq %rbx, %rax - movq %rax, -96(%rsp) # 8-byte Spill - mulxq -8(%rsp), %r14, %rbp # 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -16(%rsp), %r11, %r12 # 8-byte Folded Reload - adcq %rdi, %r11 - mulxq -24(%rsp), %r10, %rbx # 8-byte Folded Reload - adcq %r8, %r10 - mulxq 32(%rsp), %rdi, %rax # 8-byte Folded Reload - adcq %r9, %rdi - adcq $0, %r13 - addq -112(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -112(%rsp) # 8-byte Spill - movq -96(%rsp), %rcx # 8-byte Reload - adcq -120(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -96(%rsp) # 8-byte Spill - adcq -128(%rsp), %r14 # 8-byte Folded Reload - adcq %rbp, %r11 - adcq %r12, %r10 - adcq %rbx, %rdi - adcq %rax, %r13 - movq -40(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - mulxq -48(%rsp), %rbp, %r9 # 8-byte Folded Reload - mulxq -32(%rsp), %r8, %rax # 8-byte Folded Reload - addq %rbp, %rax - mulxq -56(%rsp), %rbx, %rcx # 8-byte Folded Reload - adcq %r9, %rbx - mulxq -64(%rsp), %rbp, %r9 # 8-byte Folded Reload - adcq %rcx, %rbp - mulxq -72(%rsp), %rcx, %r12 # 8-byte Folded Reload - adcq %r9, %rcx - mulxq -80(%rsp), %r15, %rsi # 8-byte Folded Reload - movq %rsi, -32(%rsp) # 8-byte Spill - adcq %r12, %r15 - mulxq -88(%rsp), %r12, %r9 # 8-byte Folded Reload - adcq -32(%rsp), %r12 # 8-byte Folded Reload - adcq $0, %r9 - addq -112(%rsp), %r8 # 8-byte Folded Reload - adcq -96(%rsp), %rax # 8-byte Folded Reload - adcq %r14, %rbx - adcq %r11, %rbp - adcq %r10, %rcx - adcq %rdi, %r15 - adcq %r13, %r12 - adcq $0, %r9 - movq 8(%rsp), %rdx # 8-byte Reload - imulq %r8, %rdx - mulxq 24(%rsp), %rdi, %rsi # 8-byte Folded Reload - movq %rsi, 8(%rsp) # 8-byte Spill - addq %r8, %rdi - mulxq 16(%rsp), %r8, %rsi # 8-byte Folded Reload - movq %rsi, -32(%rsp) # 8-byte Spill - adcq %rax, %r8 - movq (%rsp), %r11 # 8-byte Reload - mulxq %r11, %rsi, %rax - movq %rax, -40(%rsp) # 8-byte Spill - adcq %rbx, %rsi - movq -8(%rsp), %r14 # 8-byte Reload - mulxq %r14, %rdi, %rax - movq %rax, -48(%rsp) # 8-byte Spill - adcq %rbp, %rdi - movq -16(%rsp), %rbp # 8-byte Reload - mulxq %rbp, %rax, %rbx - movq %rbx, -56(%rsp) # 8-byte Spill - adcq %rcx, %rax - movq -24(%rsp), %rbx # 8-byte Reload - mulxq %rbx, %rcx, %r13 - adcq %r15, %rcx - mulxq 32(%rsp), %rdx, %r15 # 8-byte Folded Reload - adcq %r12, %rdx - adcq $0, %r9 - addq 8(%rsp), %r8 # 8-byte Folded Reload - adcq -32(%rsp), %rsi # 8-byte Folded Reload - adcq -40(%rsp), %rdi # 8-byte Folded Reload - adcq -48(%rsp), %rax # 8-byte Folded Reload - adcq -56(%rsp), %rcx # 8-byte Folded Reload - adcq %r13, %rdx - adcq %r15, %r9 - movq %r8, %r13 - subq 24(%rsp), %r13 # 8-byte Folded Reload - movq %rsi, %r12 - sbbq 16(%rsp), %r12 # 8-byte Folded Reload - movq %rdi, %r10 - sbbq %r11, %r10 - movq %rax, %r11 - sbbq %r14, %r11 - movq %rcx, %r14 - sbbq %rbp, %r14 - movq %rdx, %r15 - sbbq %rbx, %r15 - movq %r9, %rbp - sbbq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r8, %r13 - movq -104(%rsp), %rbx # 8-byte Reload - movq %r13, (%rbx) - cmovsq %rsi, %r12 - movq %r12, 8(%rbx) - cmovsq %rdi, %r10 - movq %r10, 16(%rbx) - cmovsq %rax, %r11 - movq %r11, 24(%rbx) - cmovsq %rcx, %r14 - movq %r14, 32(%rbx) - cmovsq %rdx, %r15 - movq %r15, 40(%rbx) - cmovsq %r9, %rbp - movq %rbp, 48(%rbx) - addq $40, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end99: - .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2 - - .globl mcl_fp_montRed7Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed7Lbmi2,@function -mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $64, %rsp - movq %rdx, %rcx - movq %rdi, -88(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq (%rcx), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rsi), %rdx - movq %rdx, 48(%rsp) # 8-byte Spill - imulq %rax, %rdx - movq 48(%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - mulxq %rax, %rdi, %rax - movq %rdi, 40(%rsp) # 8-byte Spill - movq %rax, -48(%rsp) # 8-byte Spill - movq 40(%rcx), %r8 - movq %r8, (%rsp) # 8-byte Spill - movq 32(%rcx), %r9 - movq %r9, 24(%rsp) # 8-byte Spill - movq 24(%rcx), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - movq 16(%rcx), %rdi - movq %rdi, 56(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - mulxq %r8, %r10, %r11 - mulxq %r9, %r14, %r9 - mulxq %rbp, %r8, %r13 - mulxq %rdi, %rcx, %r12 - mulxq %rax, %rbp, %r15 - mulxq %rbx, %rdx, %rdi - addq %rbp, %rdi - adcq %rcx, %r15 - adcq %r8, %r12 - adcq %r14, %r13 - adcq %r10, %r9 - adcq 40(%rsp), %r11 # 8-byte Folded Reload - movq -48(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq 48(%rsp), %rdx # 8-byte Folded Reload - adcq 8(%rsi), %rdi - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r12 - adcq 32(%rsi), %r13 - adcq 40(%rsi), %r9 - movq %r9, -96(%rsp) # 8-byte Spill - adcq 48(%rsi), %r11 - movq %r11, -72(%rsp) # 8-byte Spill - adcq 56(%rsi), %rcx - movq %rcx, -48(%rsp) # 8-byte Spill - movq 104(%rsi), %r8 - movq 96(%rsi), %rdx - movq 88(%rsi), %rbp - movq 80(%rsi), %rbx - movq 72(%rsi), %rcx - movq 64(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -104(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, -80(%rsp) # 8-byte Spill - adcq $0, %rbx - movq %rbx, -40(%rsp) # 8-byte Spill - adcq $0, %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) # 8-byte Spill - sbbq %rcx, %rcx - andl $1, %ecx - movq %rcx, 40(%rsp) # 8-byte Spill - movq %rdi, %rdx - movq -24(%rsp), %r9 # 8-byte Reload - imulq %r9, %rdx - mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -112(%rsp) # 8-byte Spill - movq %rcx, -56(%rsp) # 8-byte Spill - mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -120(%rsp) # 8-byte Spill - movq %rcx, -64(%rsp) # 8-byte Spill - movq 24(%rsp), %rbx # 8-byte Reload - mulxq %rbx, %rcx, %rbp - movq %rcx, -128(%rsp) # 8-byte Spill - mulxq 8(%rsp), %r10, %r14 # 8-byte Folded Reload - mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload - mulxq %rax, %rcx, %r8 - mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rcx, %rax - adcq %rsi, %r8 - adcq %r10, %r11 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - adcq -120(%rsp), %rbp # 8-byte Folded Reload - movq -64(%rsp), %rsi # 8-byte Reload - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq -56(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %rdi, %rdx - adcq %r15, %rax - adcq %r12, %r8 - adcq %r13, %r11 - adcq -96(%rsp), %r14 # 8-byte Folded Reload - adcq -72(%rsp), %rbp # 8-byte Folded Reload - adcq -48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -64(%rsp) # 8-byte Spill - adcq -104(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -56(%rsp) # 8-byte Spill - adcq $0, -80(%rsp) # 8-byte Folded Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 48(%rsp) # 8-byte Folded Spill - adcq $0, 40(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - imulq %r9, %rdx - mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -96(%rsp) # 8-byte Spill - movq %rcx, -48(%rsp) # 8-byte Spill - movq (%rsp), %r15 # 8-byte Reload - mulxq %r15, %rsi, %rcx - movq %rsi, -104(%rsp) # 8-byte Spill - movq %rcx, -72(%rsp) # 8-byte Spill - mulxq %rbx, %rcx, %r13 - movq %rcx, -112(%rsp) # 8-byte Spill - mulxq 8(%rsp), %rbx, %r12 # 8-byte Folded Reload - mulxq 56(%rsp), %rdi, %r9 # 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %r10 # 8-byte Folded Reload - mulxq 32(%rsp), %rdx, %rcx # 8-byte Folded Reload - addq %rsi, %rcx - adcq %rdi, %r10 - adcq %rbx, %r9 - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %r13 # 8-byte Folded Reload - movq -72(%rsp), %rdi # 8-byte Reload - adcq -96(%rsp), %rdi # 8-byte Folded Reload - movq -48(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %rax, %rdx - adcq %r8, %rcx - adcq %r11, %r10 - adcq %r14, %r9 - adcq %rbp, %r12 - adcq -64(%rsp), %r13 # 8-byte Folded Reload - adcq -56(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -72(%rsp) # 8-byte Spill - adcq -80(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -48(%rsp) # 8-byte Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 48(%rsp) # 8-byte Folded Spill - adcq $0, 40(%rsp) # 8-byte Folded Spill - movq %rcx, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload - movq %rsi, -80(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - mulxq %r15, %rsi, %rax - movq %rsi, -96(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - mulxq 24(%rsp), %r8, %r15 # 8-byte Folded Reload - mulxq 8(%rsp), %r14, %rbp # 8-byte Folded Reload - mulxq 56(%rsp), %rdi, %rbx # 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %r11 # 8-byte Folded Reload - mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload - addq %rsi, %rax - adcq %rdi, %r11 - adcq %r14, %rbx - adcq %r8, %rbp - adcq -96(%rsp), %r15 # 8-byte Folded Reload - movq -64(%rsp), %rdi # 8-byte Reload - adcq -80(%rsp), %rdi # 8-byte Folded Reload - movq -56(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %rcx, %rdx - adcq %r10, %rax - adcq %r9, %r11 - adcq %r12, %rbx - adcq %r13, %rbp - adcq -72(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -64(%rsp) # 8-byte Spill - adcq -40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -56(%rsp) # 8-byte Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 48(%rsp) # 8-byte Folded Spill - adcq $0, 40(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - movq 16(%rsp), %r10 # 8-byte Reload - mulxq %r10, %rsi, %rcx - movq %rsi, -72(%rsp) # 8-byte Spill - movq %rcx, -40(%rsp) # 8-byte Spill - mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -80(%rsp) # 8-byte Spill - movq %rcx, -48(%rsp) # 8-byte Spill - mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload - movq %rsi, -96(%rsp) # 8-byte Spill - mulxq 8(%rsp), %r12, %r13 # 8-byte Folded Reload - mulxq 56(%rsp), %r8, %r14 # 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %r9 # 8-byte Folded Reload - mulxq 32(%rsp), %rdx, %rdi # 8-byte Folded Reload - addq %rsi, %rdi - adcq %r8, %r9 - adcq %r12, %r14 - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq -80(%rsp), %rcx # 8-byte Folded Reload - movq -48(%rsp), %r8 # 8-byte Reload - adcq -72(%rsp), %r8 # 8-byte Folded Reload - movq -40(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %rax, %rdx - adcq %r11, %rdi - adcq %rbx, %r9 - adcq %rbp, %r14 - adcq %r15, %r13 - adcq -64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -64(%rsp) # 8-byte Spill - adcq -56(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -48(%rsp) # 8-byte Spill - adcq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -40(%rsp) # 8-byte Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 48(%rsp) # 8-byte Folded Spill - adcq $0, 40(%rsp) # 8-byte Folded Spill - movq %rdi, %rdx - imulq -24(%rsp), %rdx # 8-byte Folded Reload - mulxq %r10, %rcx, %rax - movq %rcx, -72(%rsp) # 8-byte Spill - movq %rax, -32(%rsp) # 8-byte Spill - mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload - movq %rcx, -80(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - mulxq 24(%rsp), %rax, %rcx # 8-byte Folded Reload - movq %rax, -96(%rsp) # 8-byte Spill - movq 8(%rsp), %r12 # 8-byte Reload - mulxq %r12, %rax, %r15 - movq %rax, -104(%rsp) # 8-byte Spill - mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload - movq -16(%rsp), %r10 # 8-byte Reload - mulxq %r10, %rax, %rbp - movq 32(%rsp), %rbx # 8-byte Reload - mulxq %rbx, %rdx, %r8 - addq %rax, %r8 - adcq %rsi, %rbp - adcq -104(%rsp), %r11 # 8-byte Folded Reload - adcq -96(%rsp), %r15 # 8-byte Folded Reload - adcq -80(%rsp), %rcx # 8-byte Folded Reload - movq -56(%rsp), %rsi # 8-byte Reload - adcq -72(%rsp), %rsi # 8-byte Folded Reload - movq -32(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdi, %rdx - adcq %r9, %r8 - adcq %r14, %rbp - adcq %r13, %r11 - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -48(%rsp) # 8-byte Spill - adcq -40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -56(%rsp) # 8-byte Spill - adcq -8(%rsp), %rax # 8-byte Folded Reload - movq %rax, -32(%rsp) # 8-byte Spill - adcq $0, 48(%rsp) # 8-byte Folded Spill - adcq $0, 40(%rsp) # 8-byte Folded Spill - movq -24(%rsp), %rdx # 8-byte Reload - imulq %r8, %rdx - mulxq %r12, %rax, %r13 - mulxq %r10, %rcx, %rdi - mulxq %rbx, %r12, %r14 - addq %rcx, %r14 - mulxq 56(%rsp), %rcx, %r10 # 8-byte Folded Reload - adcq %rdi, %rcx - adcq %rax, %r10 - mulxq 24(%rsp), %rax, %r9 # 8-byte Folded Reload - adcq %r13, %rax - mulxq (%rsp), %rdi, %r13 # 8-byte Folded Reload - adcq %r9, %rdi - mulxq 16(%rsp), %rdx, %rsi # 8-byte Folded Reload - adcq %r13, %rdx - adcq $0, %rsi - addq %r8, %r12 - adcq %rbp, %r14 - adcq %r11, %rcx - adcq %r15, %r10 - adcq -48(%rsp), %rax # 8-byte Folded Reload - adcq -56(%rsp), %rdi # 8-byte Folded Reload - adcq -32(%rsp), %rdx # 8-byte Folded Reload - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq 40(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq %r14, %rbp - subq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rcx, %r13 - sbbq -16(%rsp), %r13 # 8-byte Folded Reload - movq %r10, %r8 - sbbq 56(%rsp), %r8 # 8-byte Folded Reload - movq %rax, %r9 - sbbq 8(%rsp), %r9 # 8-byte Folded Reload - movq %rdi, %r11 - sbbq 24(%rsp), %r11 # 8-byte Folded Reload - movq %rdx, %r15 - sbbq (%rsp), %r15 # 8-byte Folded Reload - movq %rsi, %r12 - sbbq 16(%rsp), %r12 # 8-byte Folded Reload - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rsi, %r12 - testb %bl, %bl - cmovneq %r14, %rbp - movq -88(%rsp), %rsi # 8-byte Reload - movq %rbp, (%rsi) - cmovneq %rcx, %r13 - movq %r13, 8(%rsi) - cmovneq %r10, %r8 - movq %r8, 16(%rsi) - cmovneq %rax, %r9 - movq %r9, 24(%rsi) - cmovneq %rdi, %r11 - movq %r11, 32(%rsi) - cmovneq %rdx, %r15 - movq %r15, 40(%rsi) - movq %r12, 48(%rsi) - addq $64, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end100: - .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2 - - .globl mcl_fp_addPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre7Lbmi2,@function -mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r14 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r12 - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %r12, 16(%rdi) - adcq %r11, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbx - movq %rbx, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end101: - .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2 - - .globl mcl_fp_subPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre7Lbmi2,@function -mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r10 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 32(%rsi), %rdx - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rdx - movq %rdx, 32(%rdi) - sbbq %r9, %r15 - movq %r15, 40(%rdi) - sbbq %r8, %r10 - movq %r10, 48(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end102: - .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2 - - .globl mcl_fp_shr1_7Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_7Lbmi2,@function -mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2 -# BB#0: - movq 48(%rsi), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrdq $1, %r10, %rax - movq %rax, 24(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 32(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 40(%rdi) - shrq %r8 - movq %r8, 48(%rdi) - retq -.Lfunc_end103: - .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2 - - .globl mcl_fp_add7Lbmi2 - .align 16, 0x90 - .type mcl_fp_add7Lbmi2,@function -mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq 24(%rdx), %r13 - movq 16(%rdx), %r10 - movq (%rdx), %r11 - movq 8(%rdx), %rdx - addq (%rsi), %r11 - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r10 - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %r13, %rax - movq %rax, 24(%rdi) - adcq %r12, %rbx - movq %rbx, 32(%rdi) - adcq %r15, %r9 - movq %r9, 40(%rdi) - adcq %r14, %r8 - movq %r8, 48(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %r11 - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %rax - sbbq 32(%rcx), %rbx - sbbq 40(%rcx), %r9 - sbbq 48(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB104_2 -# BB#1: # %nocarry - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %rax, 24(%rdi) - movq %rbx, 32(%rdi) - movq %r9, 40(%rdi) - movq %r8, 48(%rdi) -.LBB104_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end104: - .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2 - - .globl mcl_fp_addNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF7Lbmi2,@function -mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r9 - movq 40(%rdx), %rbp - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r14 - movq (%rdx), %r12 - movq 8(%rdx), %r15 - addq (%rsi), %r12 - adcq 8(%rsi), %r15 - adcq 16(%rsi), %r14 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %rbp - movq %rbp, -8(%rsp) # 8-byte Spill - adcq 48(%rsi), %r9 - movq %r12, %rsi - subq (%rcx), %rsi - movq %r15, %rdx - sbbq 8(%rcx), %rdx - movq %r14, %rax - sbbq 16(%rcx), %rax - movq %r11, %rbx - sbbq 24(%rcx), %rbx - movq %r10, %r13 - sbbq 32(%rcx), %r13 - sbbq 40(%rcx), %rbp - movq %r9, %r8 - sbbq 48(%rcx), %r8 - movq %r8, %rcx - sarq $63, %rcx - cmovsq %r12, %rsi - movq %rsi, (%rdi) - cmovsq %r15, %rdx - movq %rdx, 8(%rdi) - cmovsq %r14, %rax - movq %rax, 16(%rdi) - cmovsq %r11, %rbx - movq %rbx, 24(%rdi) - cmovsq %r10, %r13 - movq %r13, 32(%rdi) - cmovsq -8(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 40(%rdi) - cmovsq %r9, %r8 - movq %r8, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end105: - .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2 - - .globl mcl_fp_sub7Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub7Lbmi2,@function -mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - movq 16(%rsi), %r13 - sbbq 16(%rdx), %r13 - movq 32(%rsi), %r10 - movq 24(%rsi), %rsi - sbbq 24(%rdx), %rsi - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r13, 16(%rdi) - movq %rsi, 24(%rdi) - sbbq %r12, %r10 - movq %r10, 32(%rdi) - sbbq %r15, %r9 - movq %r9, 40(%rdi) - sbbq %r14, %r8 - movq %r8, 48(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB106_2 -# BB#1: # %carry - movq 48(%rcx), %r14 - movq 40(%rcx), %r15 - movq 32(%rcx), %r12 - movq 24(%rcx), %rbx - movq 8(%rcx), %rdx - movq 16(%rcx), %rbp - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r13, %rbp - movq %rbp, 16(%rdi) - adcq %rsi, %rbx - movq %rbx, 24(%rdi) - adcq %r10, %r12 - movq %r12, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) -.LBB106_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end106: - .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2 - - .globl mcl_fp_subNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF7Lbmi2,@function -mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 48(%rsi), %r12 - movq 40(%rsi), %rax - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %r14 - movq 8(%rsi), %r15 - subq (%rdx), %r14 - sbbq 8(%rdx), %r15 - sbbq 16(%rdx), %r11 - sbbq 24(%rdx), %r10 - sbbq 32(%rdx), %r9 - sbbq 40(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 48(%rdx), %r12 - movq %r12, %rax - sarq $63, %rax - movq %rax, %rsi - shldq $1, %r12, %rsi - andq (%r8), %rsi - movq 48(%r8), %r13 - andq %rax, %r13 - movq 40(%r8), %rbx - andq %rax, %rbx - movq 32(%r8), %rdx - andq %rax, %rdx - movq 24(%r8), %rbp - andq %rax, %rbp - movq 16(%r8), %rcx - andq %rax, %rcx - andq 8(%r8), %rax - addq %r14, %rsi - adcq %r15, %rax - movq %rsi, (%rdi) - movq %rax, 8(%rdi) - adcq %r11, %rcx - movq %rcx, 16(%rdi) - adcq %r10, %rbp - movq %rbp, 24(%rdi) - adcq %r9, %rdx - movq %rdx, 32(%rdi) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 40(%rdi) - adcq %r12, %r13 - movq %r13, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end107: - .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2 - - .globl mcl_fpDbl_add7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add7Lbmi2,@function -mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 96(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 88(%rdx), %r11 - movq 80(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r12 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r9 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r12 - movq 72(%rdx), %r13 - movq 64(%rdx), %rbp - movq %rax, (%rdi) - movq 56(%rdx), %r10 - movq %rbx, 8(%rdi) - movq 48(%rdx), %rcx - movq 40(%rdx), %rdx - movq %r9, 16(%rdi) - movq 104(%rsi), %r9 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %rdx, %rbx - movq 96(%rsi), %r15 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - adcq %rcx, %rdx - movq 88(%rsi), %rax - movq %rbx, 40(%rdi) - movq 56(%rsi), %rcx - adcq %r10, %rcx - movq 80(%rsi), %r12 - movq %rdx, 48(%rdi) - movq 72(%rsi), %rdx - movq 64(%rsi), %rsi - adcq %rbp, %rsi - adcq %r13, %rdx - adcq %r14, %r12 - adcq %r11, %rax - movq %rax, -8(%rsp) # 8-byte Spill - adcq -24(%rsp), %r15 # 8-byte Folded Reload - movq %r15, -24(%rsp) # 8-byte Spill - adcq -16(%rsp), %r9 # 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq %rcx, %rbx - subq (%r8), %rbx - movq %rsi, %r10 - sbbq 8(%r8), %r10 - movq %rdx, %r11 - sbbq 16(%r8), %r11 - movq %r12, %r14 - sbbq 24(%r8), %r14 - movq -8(%rsp), %r13 # 8-byte Reload - sbbq 32(%r8), %r13 - sbbq 40(%r8), %r15 - movq %r9, %rax - sbbq 48(%r8), %rax - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rcx, %rbx - movq %rbx, 56(%rdi) - testb %bpl, %bpl - cmovneq %rsi, %r10 - movq %r10, 64(%rdi) - cmovneq %rdx, %r11 - movq %r11, 72(%rdi) - cmovneq %r12, %r14 - movq %r14, 80(%rdi) - cmovneq -8(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 88(%rdi) - cmovneq -24(%rsp), %r15 # 8-byte Folded Reload - movq %r15, 96(%rdi) - cmovneq %r9, %rax - movq %rax, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end108: - .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2 - - .globl mcl_fpDbl_sub7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub7Lbmi2,@function -mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 96(%rdx), %r10 - movq 88(%rdx), %r14 - movq 16(%rsi), %rax - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %ecx, %ecx - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %rax - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 80(%rdx), %r13 - movq 72(%rdx), %rbp - movq %r15, (%rdi) - movq 64(%rdx), %r9 - movq %r11, 8(%rdi) - movq 56(%rdx), %r15 - movq %rax, 16(%rdi) - movq 48(%rdx), %r11 - movq 40(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 104(%rsi), %rax - movq %r12, 32(%rdi) - movq 48(%rsi), %r12 - sbbq %r11, %r12 - movq 96(%rsi), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r15, %rdx - movq 88(%rsi), %r15 - movq %r12, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r9, %rbx - movq 80(%rsi), %r12 - movq 72(%rsi), %r9 - sbbq %rbp, %r9 - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq -8(%rsp), %rax # 8-byte Folded Reload - movq %rax, -8(%rsp) # 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r8), %r10 - cmoveq %rcx, %r10 - testb %bpl, %bpl - movq 16(%r8), %rbp - cmoveq %rcx, %rbp - movq 8(%r8), %rsi - cmoveq %rcx, %rsi - movq 48(%r8), %r14 - cmoveq %rcx, %r14 - movq 40(%r8), %r13 - cmoveq %rcx, %r13 - movq 32(%r8), %rax - cmoveq %rcx, %rax - cmovneq 24(%r8), %rcx - addq %rdx, %r10 - adcq %rbx, %rsi - movq %r10, 56(%rdi) - movq %rsi, 64(%rdi) - adcq %r9, %rbp - movq %rbp, 72(%rdi) - adcq %r12, %rcx - movq %rcx, 80(%rdi) - adcq %r15, %rax - movq %rax, 88(%rdi) - adcq %r11, %r13 - movq %r13, 96(%rdi) - adcq -8(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end109: - .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2 - - .align 16, 0x90 - .type .LmulPv512x64,@function -.LmulPv512x64: # @mulPv512x64 -# BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - mulxq 8(%rsi), %rcx, %r8 - addq %rax, %rcx - movq %rcx, 8(%rdi) - mulxq 16(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 16(%rdi) - mulxq 24(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 24(%rdi) - mulxq 32(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 32(%rdi) - mulxq 40(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 40(%rdi) - mulxq 48(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 48(%rdi) - mulxq 56(%rsi), %rax, %rdx - adcq %rcx, %rax - movq %rax, 56(%rdi) - adcq $0, %rdx - movq %rdx, 64(%rdi) - movq %rdi, %rax - retq -.Lfunc_end110: - .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 - - .globl mcl_fp_mulUnitPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre8Lbmi2,@function -mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2 -# BB#0: - pushq %rbx - subq $80, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq .LmulPv512x64 - movq 72(%rsp), %r8 - movq 64(%rsp), %r9 - movq 56(%rsp), %r10 - movq 48(%rsp), %r11 - movq 40(%rsp), %rdi - movq 32(%rsp), %rax - movq 24(%rsp), %rcx - movq 8(%rsp), %rdx - movq 16(%rsp), %rsi - movq %rdx, (%rbx) - movq %rsi, 8(%rbx) - movq %rcx, 16(%rbx) - movq %rax, 24(%rbx) - movq %rdi, 32(%rbx) - movq %r11, 40(%rbx) - movq %r10, 48(%rbx) - movq %r9, 56(%rbx) - movq %r8, 64(%rbx) - addq $80, %rsp - popq %rbx - retq -.Lfunc_end111: - .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2 - - .globl mcl_fpDbl_mulPre8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre8Lbmi2,@function -mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2 -# BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rdx, %rbx - movq %rsi, %r15 - movq %rdi, %r14 - callq mcl_fpDbl_mulPre4Lbmi2@PLT - leaq 64(%r14), %rdi - leaq 32(%r15), %rsi - leaq 32(%rbx), %rdx - callq mcl_fpDbl_mulPre4Lbmi2@PLT - movq 56(%rbx), %r10 - movq 48(%rbx), %rcx - movq (%rbx), %rdx - movq 8(%rbx), %rsi - addq 32(%rbx), %rdx - adcq 40(%rbx), %rsi - adcq 16(%rbx), %rcx - adcq 24(%rbx), %r10 - pushfq - popq %r8 - xorl %r9d, %r9d - movq 56(%r15), %rdi - movq 48(%r15), %r13 - movq (%r15), %r12 - movq 8(%r15), %rbx - addq 32(%r15), %r12 - adcq 40(%r15), %rbx - adcq 16(%r15), %r13 - adcq 24(%r15), %rdi - movl $0, %eax - cmovbq %r10, %rax - movq %rax, -176(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rcx, %rax - movq %rax, -184(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rsi, %rax - movq %rax, -192(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rdx, %rax - movq %rax, -200(%rbp) # 8-byte Spill - sbbq %r15, %r15 - movq %r12, -136(%rbp) - movq %rbx, -128(%rbp) - movq %r13, -120(%rbp) - movq %rdi, -112(%rbp) - movq %rdx, -168(%rbp) - movq %rsi, -160(%rbp) - movq %rcx, -152(%rbp) - movq %r10, -144(%rbp) - pushq %r8 - popfq - cmovaeq %r9, %rdi - movq %rdi, -216(%rbp) # 8-byte Spill - cmovaeq %r9, %r13 - cmovaeq %r9, %rbx - cmovaeq %r9, %r12 - sbbq %rax, %rax - movq %rax, -208(%rbp) # 8-byte Spill - leaq -104(%rbp), %rdi - leaq -136(%rbp), %rsi - leaq -168(%rbp), %rdx - callq mcl_fpDbl_mulPre4Lbmi2@PLT - addq -200(%rbp), %r12 # 8-byte Folded Reload - adcq -192(%rbp), %rbx # 8-byte Folded Reload - adcq -184(%rbp), %r13 # 8-byte Folded Reload - movq -216(%rbp), %r10 # 8-byte Reload - adcq -176(%rbp), %r10 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq -208(%rbp), %rdx # 8-byte Reload - andl %edx, %r15d - andl $1, %r15d - addq -72(%rbp), %r12 - adcq -64(%rbp), %rbx - adcq -56(%rbp), %r13 - adcq -48(%rbp), %r10 - adcq %rax, %r15 - movq -80(%rbp), %rax - movq -88(%rbp), %rcx - movq -104(%rbp), %rsi - movq -96(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %rdi - movq %rdi, -184(%rbp) # 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -176(%rbp) # 8-byte Spill - sbbq %rdi, %r12 - sbbq %r8, %rbx - movq 48(%r14), %rdi - movq %rdi, -192(%rbp) # 8-byte Spill - sbbq %rdi, %r13 - movq 56(%r14), %rdi - movq %rdi, -200(%rbp) # 8-byte Spill - sbbq %rdi, %r10 - sbbq $0, %r15 - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -208(%rbp) # 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -216(%rbp) # 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -224(%rbp) # 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -232(%rbp) # 8-byte Spill - sbbq %rdi, %r12 - movq 104(%r14), %rdi - sbbq %rdi, %rbx - movq 112(%r14), %r8 - sbbq %r8, %r13 - movq 120(%r14), %r9 - sbbq %r9, %r10 - sbbq $0, %r15 - addq -184(%rbp), %rsi # 8-byte Folded Reload - adcq -176(%rbp), %rdx # 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -192(%rbp), %rcx # 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -200(%rbp), %rax # 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r12 - movq %rax, 56(%r14) - movq %r12, 64(%r14) - adcq -208(%rbp), %rbx # 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq -216(%rbp), %r13 # 8-byte Folded Reload - movq %r13, 80(%r14) - adcq -224(%rbp), %r10 # 8-byte Folded Reload - movq %r10, 88(%r14) - adcq -232(%rbp), %r15 # 8-byte Folded Reload - movq %r15, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end112: - .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2 - - .globl mcl_fpDbl_sqrPre8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre8Lbmi2,@function -mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2 -# BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rsi, %r14 - movq %rdi, %rbx - movq %r14, %rdx - callq mcl_fpDbl_mulPre4Lbmi2@PLT - leaq 64(%rbx), %rdi - leaq 32(%r14), %rsi - movq %rsi, %rdx - callq mcl_fpDbl_mulPre4Lbmi2@PLT - movq (%r14), %r12 - movq 8(%r14), %r15 - addq 32(%r14), %r12 - adcq 40(%r14), %r15 - pushfq - popq %rax - movq %r12, -136(%rbp) - movq %r12, -168(%rbp) - addq %r12, %r12 - movq %r15, -128(%rbp) - movq %r15, -160(%rbp) - adcq %r15, %r15 - pushfq - popq %rcx - movq 56(%r14), %r13 - movq 48(%r14), %rdx - pushq %rax - popfq - adcq 16(%r14), %rdx - adcq 24(%r14), %r13 - pushfq - popq %r8 - pushfq - popq %rsi - pushfq - popq %rdi - sbbq %rax, %rax - movq %rax, -184(%rbp) # 8-byte Spill - xorl %eax, %eax - pushq %rdi - popfq - cmovaeq %rax, %r15 - movq %r15, -176(%rbp) # 8-byte Spill - cmovaeq %rax, %r12 - movq %rdx, -120(%rbp) - movq %rdx, -152(%rbp) - movq %rdx, %r15 - pushq %rcx - popfq - adcq %r15, %r15 - movq %r13, %r14 - movq %r13, -112(%rbp) - movq %r13, -144(%rbp) - adcq %r13, %r13 - pushq %rsi - popfq - cmovaeq %rax, %r13 - cmovaeq %rax, %r15 - shrq $63, %r14 - pushq %r8 - popfq - cmovaeq %rax, %r14 - leaq -104(%rbp), %rdi - leaq -136(%rbp), %rsi - leaq -168(%rbp), %rdx - callq mcl_fpDbl_mulPre4Lbmi2@PLT - movq -184(%rbp), %rax # 8-byte Reload - andl $1, %eax - addq -72(%rbp), %r12 - movq -176(%rbp), %r8 # 8-byte Reload - adcq -64(%rbp), %r8 - adcq -56(%rbp), %r15 - adcq -48(%rbp), %r13 - adcq %r14, %rax - movq %rax, %rdi - movq -80(%rbp), %rax - movq -88(%rbp), %rcx - movq -104(%rbp), %rsi - movq -96(%rbp), %rdx - subq (%rbx), %rsi - sbbq 8(%rbx), %rdx - sbbq 16(%rbx), %rcx - sbbq 24(%rbx), %rax - movq 32(%rbx), %r10 - movq %r10, -184(%rbp) # 8-byte Spill - movq 40(%rbx), %r9 - movq %r9, -176(%rbp) # 8-byte Spill - sbbq %r10, %r12 - sbbq %r9, %r8 - movq %r8, %r10 - movq 48(%rbx), %r8 - movq %r8, -192(%rbp) # 8-byte Spill - sbbq %r8, %r15 - movq 56(%rbx), %r8 - movq %r8, -200(%rbp) # 8-byte Spill - sbbq %r8, %r13 - sbbq $0, %rdi - movq 64(%rbx), %r11 - subq %r11, %rsi - movq 72(%rbx), %r8 - movq %r8, -208(%rbp) # 8-byte Spill - sbbq %r8, %rdx - movq 80(%rbx), %r8 - movq %r8, -216(%rbp) # 8-byte Spill - sbbq %r8, %rcx - movq 88(%rbx), %r8 - movq %r8, -224(%rbp) # 8-byte Spill - sbbq %r8, %rax - movq 96(%rbx), %r8 - movq %r8, -232(%rbp) # 8-byte Spill - sbbq %r8, %r12 - movq 104(%rbx), %r14 - sbbq %r14, %r10 - movq 112(%rbx), %r8 - sbbq %r8, %r15 - movq 120(%rbx), %r9 - sbbq %r9, %r13 - sbbq $0, %rdi - addq -184(%rbp), %rsi # 8-byte Folded Reload - adcq -176(%rbp), %rdx # 8-byte Folded Reload - movq %rsi, 32(%rbx) - adcq -192(%rbp), %rcx # 8-byte Folded Reload - movq %rdx, 40(%rbx) - adcq -200(%rbp), %rax # 8-byte Folded Reload - movq %rcx, 48(%rbx) - adcq %r11, %r12 - movq %rax, 56(%rbx) - movq %r12, 64(%rbx) - adcq -208(%rbp), %r10 # 8-byte Folded Reload - movq %r10, 72(%rbx) - adcq -216(%rbp), %r15 # 8-byte Folded Reload - movq %r15, 80(%rbx) - adcq -224(%rbp), %r13 # 8-byte Folded Reload - movq %r13, 88(%rbx) - adcq -232(%rbp), %rdi # 8-byte Folded Reload - movq %rdi, 96(%rbx) - adcq $0, %r14 - movq %r14, 104(%rbx) - adcq $0, %r8 - movq %r8, 112(%rbx) - adcq $0, %r9 - movq %r9, 120(%rbx) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2 - - .globl mcl_fp_mont8Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont8Lbmi2,@function -mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1256, %rsp # imm = 0x4E8 - movq %rcx, %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%r13), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1184(%rsp), %rdi - callq .LmulPv512x64 - movq 1184(%rsp), %r15 - movq 1192(%rsp), %r14 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1248(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 1240(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1232(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1216(%rsp), %r12 - movq 1208(%rsp), %rbx - movq 1200(%rsp), %rbp - leaq 1112(%rsp), %rdi - movq %r13, %rsi - callq .LmulPv512x64 - addq 1112(%rsp), %r15 - adcq 1120(%rsp), %r14 - adcq 1128(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - adcq 1136(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 1144(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - movq 72(%rsp), %r13 # 8-byte Reload - adcq 1152(%rsp), %r13 - movq 88(%rsp), %rbx # 8-byte Reload - adcq 1160(%rsp), %rbx - movq 80(%rsp), %rbp # 8-byte Reload - adcq 1168(%rsp), %rbp - movq 96(%rsp), %rax # 8-byte Reload - adcq 1176(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1040(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 1040(%rsp), %r14 - movq 8(%rsp), %rax # 8-byte Reload - adcq 1048(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1056(%rsp), %rax - movq %rax, %r12 - movq 64(%rsp), %rax # 8-byte Reload - adcq 1064(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - adcq 1072(%rsp), %r13 - movq %r13, 72(%rsp) # 8-byte Spill - adcq 1080(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 1088(%rsp), %rbp - movq 96(%rsp), %rax # 8-byte Reload - adcq 1096(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 1104(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %r14, %rdx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 968(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 968(%rsp), %r14 - movq 8(%rsp), %r13 # 8-byte Reload - adcq 976(%rsp), %r13 - adcq 984(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r14 # 8-byte Reload - adcq 992(%rsp), %r14 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 1000(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 1008(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 1016(%rsp), %rbp - movq %rbp, %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 1024(%rsp), %rbp - movq 56(%rsp), %rax # 8-byte Reload - adcq 1032(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - adcq $0, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 896(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rcx - addq 896(%rsp), %rcx - movq 48(%rsp), %r13 # 8-byte Reload - adcq 904(%rsp), %r13 - adcq 912(%rsp), %r14 - adcq 920(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 936(%rsp), %r12 - movq %r12, 80(%rsp) # 8-byte Spill - adcq 944(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 56(%rsp), %r12 # 8-byte Reload - adcq 952(%rsp), %r12 - adcq 960(%rsp), %r15 - sbbq %rbx, %rbx - movq %rcx, %rdx - movq %rcx, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 824(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - addq 824(%rsp), %rbp - adcq 832(%rsp), %r13 - movq %r13, 48(%rsp) # 8-byte Spill - adcq 840(%rsp), %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 72(%rsp), %r13 # 8-byte Reload - adcq 848(%rsp), %r13 - movq 88(%rsp), %rbp # 8-byte Reload - adcq 856(%rsp), %rbp - movq 80(%rsp), %r14 # 8-byte Reload - adcq 864(%rsp), %r14 - movq 96(%rsp), %rax # 8-byte Reload - adcq 872(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 880(%rsp), %r12 - adcq 888(%rsp), %r15 - adcq $0, %rbx - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 752(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 48(%rsp), %rax # 8-byte Reload - addq 752(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 760(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 72(%rsp) # 8-byte Spill - adcq 776(%rsp), %rbp - movq %rbp, 88(%rsp) # 8-byte Spill - adcq 784(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - movq 96(%rsp), %rbp # 8-byte Reload - adcq 792(%rsp), %rbp - adcq 800(%rsp), %r12 - adcq 808(%rsp), %r15 - adcq 816(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 680(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 680(%rsp), %rbx - movq 64(%rsp), %r14 # 8-byte Reload - adcq 688(%rsp), %r14 - movq 72(%rsp), %rcx # 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %r13 # 8-byte Reload - adcq 704(%rsp), %r13 - movq 80(%rsp), %rbx # 8-byte Reload - adcq 712(%rsp), %rbx - adcq 720(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq %r12, %rbp - adcq 728(%rsp), %rbp - adcq 736(%rsp), %r15 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 744(%rsp), %r12 - adcq $0, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 608(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r14, %rax - addq 608(%rsp), %rax - movq 72(%rsp), %r14 # 8-byte Reload - adcq 616(%rsp), %r14 - adcq 624(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 632(%rsp), %rbx - movq %rbx, %r13 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 640(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 648(%rsp), %rbp - movq %rbp, 56(%rsp) # 8-byte Spill - adcq 656(%rsp), %r15 - adcq 664(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %rcx # 8-byte Reload - adcq 672(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - sbbq %rbp, %rbp - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %rbp, %rax - andl $1, %eax - addq 536(%rsp), %rbx - adcq 544(%rsp), %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %rbx # 8-byte Reload - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r13 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 568(%rsp), %rbp - movq 56(%rsp), %r12 # 8-byte Reload - adcq 576(%rsp), %r12 - adcq 584(%rsp), %r15 - movq 48(%rsp), %rcx # 8-byte Reload - adcq 592(%rsp), %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r14 # 8-byte Reload - adcq 600(%rsp), %r14 - adcq $0, %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 464(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 72(%rsp), %rax # 8-byte Reload - addq 464(%rsp), %rax - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - adcq 488(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - adcq 496(%rsp), %r12 - adcq 504(%rsp), %r15 - movq %r15, 72(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 512(%rsp), %r15 - adcq 520(%rsp), %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 528(%rsp), %r14 - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 392(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 408(%rsp), %rbp - movq 96(%rsp), %rbx # 8-byte Reload - adcq 416(%rsp), %rbx - adcq 424(%rsp), %r12 - movq 72(%rsp), %r13 # 8-byte Reload - adcq 432(%rsp), %r13 - adcq 440(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r15 # 8-byte Reload - adcq 448(%rsp), %r15 - adcq 456(%rsp), %r14 - adcq $0, %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 88(%rsp), %rax # 8-byte Reload - addq 320(%rsp), %rax - adcq 328(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 336(%rsp), %rbx - movq %rbx, 96(%rsp) # 8-byte Spill - movq %r12, %rbp - adcq 344(%rsp), %rbp - adcq 352(%rsp), %r13 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 360(%rsp), %r12 - adcq 368(%rsp), %r15 - movq %r15, 64(%rsp) # 8-byte Spill - adcq 376(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 72(%rsp), %rcx # 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 248(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 248(%rsp), %rbx - movq 80(%rsp), %rax # 8-byte Reload - adcq 256(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 96(%rsp), %r14 # 8-byte Reload - adcq 264(%rsp), %r14 - adcq 272(%rsp), %rbp - movq %rbp, 56(%rsp) # 8-byte Spill - movq %r13, %rbx - adcq 280(%rsp), %rbx - movq %r12, %rbp - adcq 288(%rsp), %rbp - movq 64(%rsp), %r13 # 8-byte Reload - adcq 296(%rsp), %r13 - movq 88(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 72(%rsp), %r12 # 8-byte Reload - adcq 312(%rsp), %r12 - adcq $0, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 176(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 80(%rsp), %rax # 8-byte Reload - addq 176(%rsp), %rax - adcq 184(%rsp), %r14 - movq %r14, 96(%rsp) # 8-byte Spill - movq 56(%rsp), %rcx # 8-byte Reload - adcq 192(%rsp), %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - adcq 200(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 208(%rsp), %rbp - adcq 216(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 224(%rsp), %r14 - adcq 232(%rsp), %r12 - adcq 240(%rsp), %r15 - sbbq %rbx, %rbx - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %r13 - leaq 104(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - addq 104(%rsp), %r13 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 112(%rsp), %rcx - movq 56(%rsp), %rdx # 8-byte Reload - adcq 120(%rsp), %rdx - movq 72(%rsp), %rsi # 8-byte Reload - adcq 128(%rsp), %rsi - movq %rbp, %rdi - adcq 136(%rsp), %rdi - movq %rdi, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r8 # 8-byte Reload - adcq 144(%rsp), %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq %r14, %r9 - adcq 152(%rsp), %r9 - movq %r9, 88(%rsp) # 8-byte Spill - adcq 160(%rsp), %r12 - adcq 168(%rsp), %r15 - adcq $0, %rbx - movq %rcx, %rax - movq %rcx, %r11 - movq 40(%rsp), %rbp # 8-byte Reload - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r14 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - movq %rsi, %r13 - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %r8, %rdi - sbbq 32(%rbp), %rdi - movq %r9, %r10 - sbbq 40(%rbp), %r10 - movq %r12, %r8 - sbbq 48(%rbp), %r8 - movq %r15, %r9 - sbbq 56(%rbp), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r15, %r9 - testb %bl, %bl - cmovneq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovneq %r14, %rcx - movq %rcx, 8(%rbx) - cmovneq %r13, %rdx - movq %rdx, 16(%rbx) - cmovneq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovneq 64(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovneq 88(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovneq %r12, %r8 - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $1256, %rsp # imm = 0x4E8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end114: - .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2 - - .globl mcl_fp_montNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF8Lbmi2,@function -mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1240, %rsp # imm = 0x4D8 - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1168(%rsp), %rdi - callq .LmulPv512x64 - movq 1168(%rsp), %r15 - movq 1176(%rsp), %r12 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1232(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1216(%rsp), %r13 - movq 1208(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1200(%rsp), %r14 - movq 1192(%rsp), %rbp - movq 1184(%rsp), %rbx - leaq 1096(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 1096(%rsp), %r15 - adcq 1104(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 1112(%rsp), %rbx - adcq 1120(%rsp), %rbp - adcq 1128(%rsp), %r14 - movq %r14, %r12 - movq 72(%rsp), %r14 # 8-byte Reload - adcq 1136(%rsp), %r14 - adcq 1144(%rsp), %r13 - movq 80(%rsp), %rax # 8-byte Reload - adcq 1152(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1160(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1024(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 1088(%rsp), %r15 - movq 64(%rsp), %rax # 8-byte Reload - addq 1024(%rsp), %rax - adcq 1032(%rsp), %rbx - movq %rbx, 8(%rsp) # 8-byte Spill - movq %rbp, %rbx - adcq 1040(%rsp), %rbx - adcq 1048(%rsp), %r12 - adcq 1056(%rsp), %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq %r13, %rbp - adcq 1064(%rsp), %rbp - movq 80(%rsp), %rcx # 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 1080(%rsp), %r14 - adcq $0, %r15 - movq %rax, %rdx - movq %rax, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 952(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 952(%rsp), %r13 - movq 8(%rsp), %rax # 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - adcq 968(%rsp), %rbx - movq %rbx, 64(%rsp) # 8-byte Spill - movq %r12, %rbx - adcq 976(%rsp), %rbx - movq 72(%rsp), %r12 # 8-byte Reload - adcq 984(%rsp), %r12 - adcq 992(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 1000(%rsp), %r13 - movq %r14, %rbp - adcq 1008(%rsp), %rbp - adcq 1016(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 880(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 944(%rsp), %r14 - movq 8(%rsp), %rax # 8-byte Reload - addq 880(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 888(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 896(%rsp), %rbx - adcq 904(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 40(%rsp), %rcx # 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 920(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - adcq 928(%rsp), %rbp - movq %rbp, 48(%rsp) # 8-byte Spill - adcq 936(%rsp), %r15 - adcq $0, %r14 - movq %rax, %rdx - movq %rax, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 808(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 808(%rsp), %rbp - movq 64(%rsp), %r13 # 8-byte Reload - adcq 816(%rsp), %r13 - movq %rbx, %r12 - adcq 824(%rsp), %r12 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 832(%rsp), %rbx - movq 40(%rsp), %rbp # 8-byte Reload - adcq 840(%rsp), %rbp - movq 80(%rsp), %rax # 8-byte Reload - adcq 848(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 856(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - adcq 864(%rsp), %r15 - adcq 872(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 736(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 800(%rsp), %rax - movq %r13, %rcx - addq 736(%rsp), %rcx - adcq 744(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - adcq 752(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 760(%rsp), %rbp - movq %rbp, %r13 - movq 80(%rsp), %rbp # 8-byte Reload - adcq 768(%rsp), %rbp - movq 48(%rsp), %rbx # 8-byte Reload - adcq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r14 - adcq $0, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq %rcx, %rdx - movq %rcx, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 664(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 664(%rsp), %r12 - movq 40(%rsp), %rax # 8-byte Reload - adcq 672(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 72(%rsp), %rax # 8-byte Reload - adcq 680(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - adcq 688(%rsp), %r13 - adcq 696(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 704(%rsp), %rbx - adcq 712(%rsp), %r15 - adcq 720(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 728(%rsp), %r12 - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 592(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 656(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 592(%rsp), %rax - movq 72(%rsp), %rbp # 8-byte Reload - adcq 600(%rsp), %rbp - adcq 608(%rsp), %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 616(%rsp), %r13 - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - adcq 640(%rsp), %r14 - adcq 648(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 520(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 520(%rsp), %r12 - adcq 528(%rsp), %rbp - movq %rbp, 72(%rsp) # 8-byte Spill - movq 40(%rsp), %r12 # 8-byte Reload - adcq 536(%rsp), %r12 - movq %r13, %rbp - adcq 544(%rsp), %rbp - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r15 - adcq 568(%rsp), %r14 - movq 64(%rsp), %r13 # 8-byte Reload - adcq 576(%rsp), %r13 - movq 80(%rsp), %rax # 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 448(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 512(%rsp), %rcx - movq 72(%rsp), %rax # 8-byte Reload - addq 448(%rsp), %rax - adcq 456(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - adcq 464(%rsp), %rbp - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r15 - adcq 488(%rsp), %r14 - adcq 496(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 504(%rsp), %r13 - adcq $0, %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 376(%rsp), %r12 - movq 40(%rsp), %rax # 8-byte Reload - adcq 384(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - adcq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - adcq 408(%rsp), %r15 - adcq 416(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 424(%rsp), %r12 - adcq 432(%rsp), %r13 - movq 72(%rsp), %rax # 8-byte Reload - adcq 440(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 304(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 368(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 304(%rsp), %rax - adcq 312(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 320(%rsp), %rbx - adcq 328(%rsp), %r15 - adcq 336(%rsp), %r14 - adcq 344(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 352(%rsp), %r13 - movq 72(%rsp), %rbp # 8-byte Reload - adcq 360(%rsp), %rbp - adcq $0, %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 232(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 232(%rsp), %r12 - movq 80(%rsp), %rax # 8-byte Reload - adcq 240(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - adcq 248(%rsp), %rbx - adcq 256(%rsp), %r15 - adcq 264(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 272(%rsp), %r12 - adcq 280(%rsp), %r13 - adcq 288(%rsp), %rbp - movq %rbp, 72(%rsp) # 8-byte Spill - movq 48(%rsp), %rbp # 8-byte Reload - adcq 296(%rsp), %rbp - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 224(%rsp), %rcx - movq 80(%rsp), %rax # 8-byte Reload - addq 160(%rsp), %rax - adcq 168(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 200(%rsp), %r13 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 208(%rsp), %rbx - adcq 216(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbp - leaq 88(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 88(%rsp), %rbp - movq 48(%rsp), %r11 # 8-byte Reload - adcq 96(%rsp), %r11 - adcq 104(%rsp), %r15 - adcq 112(%rsp), %r14 - movq 64(%rsp), %rsi # 8-byte Reload - adcq 120(%rsp), %rsi - movq %rsi, 64(%rsp) # 8-byte Spill - adcq 128(%rsp), %r13 - adcq 136(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 144(%rsp), %r12 - movq 80(%rsp), %r8 # 8-byte Reload - adcq 152(%rsp), %r8 - movq %r11, %rax - movq 56(%rsp), %rbp # 8-byte Reload - subq (%rbp), %rax - movq %r15, %rcx - sbbq 8(%rbp), %rcx - movq %r14, %rdx - sbbq 16(%rbp), %rdx - sbbq 24(%rbp), %rsi - movq %r13, %rdi - sbbq 32(%rbp), %rdi - movq %rbx, %r9 - sbbq 40(%rbp), %r9 - movq %r12, %r10 - sbbq 48(%rbp), %r10 - movq %rbp, %rbx - movq %r8, %rbp - sbbq 56(%rbx), %rbp - testq %rbp, %rbp - cmovsq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovsq %r15, %rcx - movq %rcx, 8(%rbx) - cmovsq %r14, %rdx - movq %rdx, 16(%rbx) - cmovsq 64(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq %r13, %rdi - movq %rdi, 32(%rbx) - cmovsq 72(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 40(%rbx) - cmovsq %r12, %r10 - movq %r10, 48(%rbx) - cmovsq %r8, %rbp - movq %rbp, 56(%rbx) - addq $1240, %rsp # imm = 0x4D8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end115: - .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2 - - .globl mcl_fp_montRed8Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed8Lbmi2,@function -mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $776, %rsp # imm = 0x308 - movq %rdx, %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq %rdi, 72(%rsp) # 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 128(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rdx - movq %rdx, 184(%rsp) # 8-byte Spill - movq %r15, %rdx - imulq %rcx, %rdx - movq 120(%rsi), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 120(%rsp) # 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - movq 72(%rsi), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 64(%rsi), %r13 - movq 56(%rsi), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 48(%rsi), %r14 - movq 40(%rsi), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - movq 32(%rsi), %r12 - movq 24(%rsi), %rbx - movq 16(%rsi), %rbp - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq %rcx, %rsi - leaq 704(%rsp), %rdi - callq .LmulPv512x64 - addq 704(%rsp), %r15 - movq 184(%rsp), %rcx # 8-byte Reload - adcq 712(%rsp), %rcx - adcq 720(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 728(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rax # 8-byte Reload - adcq 744(%rsp), %rax - movq %rax, 152(%rsp) # 8-byte Spill - adcq 752(%rsp), %r14 - movq %r14, %r12 - movq 144(%rsp), %rax # 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 144(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 184(%rsp) # 8-byte Spill - adcq $0, 192(%rsp) # 8-byte Folded Spill - movq 160(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 176(%rsp) # 8-byte Folded Spill - adcq $0, 168(%rsp) # 8-byte Folded Spill - adcq $0, 120(%rsp) # 8-byte Folded Spill - movq 136(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - movq 96(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - sbbq %rbx, %rbx - movq %rcx, %rbp - movq %rbp, %rdx - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 632(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - movq %rbx, %rax - addq 632(%rsp), %rbp - movq 80(%rsp), %rsi # 8-byte Reload - adcq 640(%rsp), %rsi - movq 88(%rsp), %rcx # 8-byte Reload - adcq 648(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rcx # 8-byte Reload - adcq 656(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rcx # 8-byte Reload - adcq 664(%rsp), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - adcq 672(%rsp), %r12 - movq 144(%rsp), %rcx # 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, 160(%rsp) # 8-byte Spill - movq 176(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq 168(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 120(%rsp) # 8-byte Folded Spill - adcq $0, %r13 - movq %r13, 136(%rsp) # 8-byte Spill - adcq $0, %r14 - movq %r14, 96(%rsp) # 8-byte Spill - movq %rax, %rbp - adcq $0, %rbp - movq %rsi, %rdx - movq %rsi, %r14 - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 560(%rsp), %rdi - movq 112(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv512x64 - addq 560(%rsp), %r14 - movq 88(%rsp), %rcx # 8-byte Reload - adcq 568(%rsp), %rcx - movq 104(%rsp), %rax # 8-byte Reload - adcq 576(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rax # 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 152(%rsp) # 8-byte Spill - adcq 592(%rsp), %r12 - movq %r12, 88(%rsp) # 8-byte Spill - movq 144(%rsp), %r14 # 8-byte Reload - adcq 600(%rsp), %r14 - movq 184(%rsp), %rax # 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 624(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq $0, %rbx - movq %rbx, 176(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, 168(%rsp) # 8-byte Spill - movq 120(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq 136(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 96(%rsp) # 8-byte Folded Spill - adcq $0, %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - movq %rcx, %rbp - movq %rbp, %rdx - movq 128(%rsp), %r12 # 8-byte Reload - imulq %r12, %rdx - leaq 488(%rsp), %rdi - movq %r13, %rsi - callq .LmulPv512x64 - addq 488(%rsp), %rbp - movq 104(%rsp), %rax # 8-byte Reload - adcq 496(%rsp), %rax - movq 152(%rsp), %rbp # 8-byte Reload - adcq 504(%rsp), %rbp - movq 88(%rsp), %rcx # 8-byte Reload - adcq 512(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 520(%rsp), %r14 - movq 184(%rsp), %rcx # 8-byte Reload - adcq 528(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 536(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %r13 # 8-byte Reload - adcq 544(%rsp), %r13 - movq 176(%rsp), %rcx # 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - adcq $0, 168(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 120(%rsp) # 8-byte Spill - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 96(%rsp) # 8-byte Folded Spill - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq %r12, %rdx - leaq 416(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 416(%rsp), %r15 - adcq 424(%rsp), %rbp - movq %rbp, %rax - movq 88(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - movq %r14, %r12 - adcq 440(%rsp), %r12 - movq 184(%rsp), %r14 # 8-byte Reload - adcq 448(%rsp), %r14 - movq 192(%rsp), %rbp # 8-byte Reload - adcq 456(%rsp), %rbp - adcq 464(%rsp), %r13 - movq 176(%rsp), %rcx # 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 168(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - adcq $0, 120(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 136(%rsp) # 8-byte Spill - movq 96(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rbx - movq %rbx, %rdx - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 344(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 344(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 352(%rsp), %rax - adcq 360(%rsp), %r12 - movq %r12, 144(%rsp) # 8-byte Spill - adcq 368(%rsp), %r14 - movq %r14, 184(%rsp) # 8-byte Spill - adcq 376(%rsp), %rbp - movq %rbp, 192(%rsp) # 8-byte Spill - adcq 384(%rsp), %r13 - movq %r13, 160(%rsp) # 8-byte Spill - movq 176(%rsp), %r13 # 8-byte Reload - adcq 392(%rsp), %r13 - movq 168(%rsp), %r12 # 8-byte Reload - adcq 400(%rsp), %r12 - movq 120(%rsp), %r14 # 8-byte Reload - adcq 408(%rsp), %r14 - movq 136(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 272(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 272(%rsp), %r15 - movq 144(%rsp), %rcx # 8-byte Reload - adcq 280(%rsp), %rcx - movq 184(%rsp), %rax # 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq 312(%rsp), %r13 - movq %r13, 176(%rsp) # 8-byte Spill - adcq 320(%rsp), %r12 - movq %r12, 168(%rsp) # 8-byte Spill - adcq 328(%rsp), %r14 - movq %r14, %r13 - adcq 336(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rbx - movq %rbx, %r14 - movq 80(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - movq 128(%rsp), %rdx # 8-byte Reload - movq %rcx, %rbx - imulq %rbx, %rdx - leaq 200(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 200(%rsp), %rbx - movq 184(%rsp), %rax # 8-byte Reload - adcq 208(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %r8 # 8-byte Reload - adcq 216(%rsp), %r8 - movq %r8, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rdx # 8-byte Reload - adcq 224(%rsp), %rdx - movq 176(%rsp), %rsi # 8-byte Reload - adcq 232(%rsp), %rsi - movq 168(%rsp), %rdi # 8-byte Reload - adcq 240(%rsp), %rdi - movq %r13, %rbp - adcq 248(%rsp), %rbp - movq %r12, %rbx - adcq 256(%rsp), %rbx - movq %rbx, 136(%rsp) # 8-byte Spill - movq %r14, %r9 - adcq 264(%rsp), %r9 - adcq $0, %r15 - movq %r15, %r10 - subq 16(%rsp), %rax # 8-byte Folded Reload - movq %r8, %rcx - sbbq 8(%rsp), %rcx # 8-byte Folded Reload - movq %rdx, %r13 - sbbq 24(%rsp), %r13 # 8-byte Folded Reload - movq %rsi, %r12 - sbbq 32(%rsp), %r12 # 8-byte Folded Reload - movq %rdi, %r14 - sbbq 40(%rsp), %r14 # 8-byte Folded Reload - movq %rbp, %r11 - sbbq 48(%rsp), %r11 # 8-byte Folded Reload - movq %rbx, %r8 - sbbq 56(%rsp), %r8 # 8-byte Folded Reload - movq %r9, %r15 - sbbq 64(%rsp), %r9 # 8-byte Folded Reload - sbbq $0, %r10 - andl $1, %r10d - cmovneq %r15, %r9 - testb %r10b, %r10b - cmovneq 184(%rsp), %rax # 8-byte Folded Reload - movq 72(%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovneq 192(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rbx) - cmovneq %rdx, %r13 - movq %r13, 16(%rbx) - cmovneq %rsi, %r12 - movq %r12, 24(%rbx) - cmovneq %rdi, %r14 - movq %r14, 32(%rbx) - cmovneq %rbp, %r11 - movq %r11, 40(%rbx) - cmovneq 136(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $776, %rsp # imm = 0x308 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end116: - .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2 - - .globl mcl_fp_addPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre8Lbmi2,@function -mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 48(%rsi), %r12 - movq 40(%rdx), %r10 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rax - movq 32(%rsi), %rsi - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %rax - movq %rax, 24(%rdi) - adcq %r11, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %r13 - movq %r13, 40(%rdi) - adcq %r9, %r12 - movq %r12, 48(%rdi) - adcq %r8, %r15 - movq %r15, 56(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end117: - .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2 - - .globl mcl_fp_subPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre8Lbmi2,@function -mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 40(%rdx), %r10 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 48(%rsi), %r13 - movq 40(%rsi), %rdx - movq 32(%rsi), %rbp - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rbp - movq %rbp, 32(%rdi) - sbbq %r10, %rdx - movq %rdx, 40(%rdi) - sbbq %r9, %r13 - movq %r13, 48(%rdi) - sbbq %r8, %r15 - movq %r15, 56(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end118: - .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2 - - .globl mcl_fp_shr1_8Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_8Lbmi2,@function -mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2 -# BB#0: - movq 56(%rsi), %r8 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 32(%rsi), %r11 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 24(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 32(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 40(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 48(%rdi) - shrq %r8 - movq %r8, 56(%rdi) - retq -.Lfunc_end119: - .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2 - - .globl mcl_fp_add8Lbmi2 - .align 16, 0x90 - .type mcl_fp_add8Lbmi2,@function -mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r15 - movq 56(%rsi), %r8 - movq 48(%rdx), %r12 - movq 48(%rsi), %r9 - movq 40(%rsi), %r13 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %rbx - addq (%rsi), %r14 - adcq 8(%rsi), %rbx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r11 - movq 40(%rdx), %rsi - adcq 32(%rdx), %r10 - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - adcq %r13, %rsi - movq %rsi, 40(%rdi) - adcq %r12, %r9 - movq %r9, 48(%rdi) - adcq %r15, %r8 - movq %r8, 56(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %r14 - sbbq 8(%rcx), %rbx - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r11 - sbbq 32(%rcx), %r10 - sbbq 40(%rcx), %rsi - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne .LBB120_2 -# BB#1: # %nocarry - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - movq %rsi, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) -.LBB120_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end120: - .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2 - - .globl mcl_fp_addNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF8Lbmi2,@function -mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 48(%rdx), %rbp - movq 40(%rdx), %rbx - movq 32(%rdx), %rax - movq 24(%rdx), %r11 - movq 16(%rdx), %r15 - movq (%rdx), %r13 - movq 8(%rdx), %r12 - addq (%rsi), %r13 - adcq 8(%rsi), %r12 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq %rax, %r10 - adcq 40(%rsi), %rbx - movq %rbx, -16(%rsp) # 8-byte Spill - movq %rbx, %r9 - adcq 48(%rsi), %rbp - movq %rbp, -8(%rsp) # 8-byte Spill - movq %rbp, %rax - adcq 56(%rsi), %r8 - movq %r13, %rsi - subq (%rcx), %rsi - movq %r12, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %rbx - sbbq 16(%rcx), %rbx - movq %r11, %r14 - sbbq 24(%rcx), %r14 - movq %r10, %rbp - sbbq 32(%rcx), %rbp - movq %r9, %r10 - sbbq 40(%rcx), %r10 - movq %rax, %r9 - sbbq 48(%rcx), %r9 - movq %r8, %rax - sbbq 56(%rcx), %rax - testq %rax, %rax - cmovsq %r13, %rsi - movq %rsi, (%rdi) - cmovsq %r12, %rdx - movq %rdx, 8(%rdi) - cmovsq %r15, %rbx - movq %rbx, 16(%rdi) - cmovsq %r11, %r14 - movq %r14, 24(%rdi) - cmovsq -24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 32(%rdi) - cmovsq -16(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rdi) - cmovsq -8(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rdi) - cmovsq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end121: - .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2 - - .globl mcl_fp_sub8Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub8Lbmi2,@function -mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r12 - movq 56(%rsi), %r8 - movq 48(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r10 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r10 - movq 16(%rsi), %r11 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %r15 - sbbq 24(%rdx), %r15 - movq 32(%rsi), %r14 - sbbq 32(%rdx), %r14 - movq 48(%rsi), %r9 - movq 40(%rsi), %rsi - sbbq 40(%rdx), %rsi - movq %rax, (%rdi) - movq %r10, 8(%rdi) - movq %r11, 16(%rdi) - movq %r15, 24(%rdi) - movq %r14, 32(%rdi) - movq %rsi, 40(%rdi) - sbbq %r13, %r9 - movq %r9, 48(%rdi) - sbbq %r12, %r8 - movq %r8, 56(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB122_2 -# BB#1: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r10, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r11, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r15, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r14, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %rsi, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r9, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %r8, %rax - movq %rax, 56(%rdi) -.LBB122_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end122: - .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2 - - .globl mcl_fp_subNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF8Lbmi2,@function -mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r9 - movq 56(%rsi), %r14 - movq 48(%rsi), %rax - movq 40(%rsi), %rcx - movq 32(%rsi), %rdi - movq 24(%rsi), %r11 - movq 16(%rsi), %r15 - movq (%rsi), %r13 - movq 8(%rsi), %r12 - subq (%rdx), %r13 - sbbq 8(%rdx), %r12 - sbbq 16(%rdx), %r15 - sbbq 24(%rdx), %r11 - sbbq 32(%rdx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - sbbq 40(%rdx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - sbbq 48(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 56(%rdx), %r14 - movq %r14, %rsi - sarq $63, %rsi - movq 56(%r8), %r10 - andq %rsi, %r10 - movq 48(%r8), %rbx - andq %rsi, %rbx - movq 40(%r8), %rdi - andq %rsi, %rdi - movq 32(%r8), %rbp - andq %rsi, %rbp - movq 24(%r8), %rdx - andq %rsi, %rdx - movq 16(%r8), %rcx - andq %rsi, %rcx - movq 8(%r8), %rax - andq %rsi, %rax - andq (%r8), %rsi - addq %r13, %rsi - adcq %r12, %rax - movq %rsi, (%r9) - adcq %r15, %rcx - movq %rax, 8(%r9) - movq %rcx, 16(%r9) - adcq %r11, %rdx - movq %rdx, 24(%r9) - adcq -24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 32(%r9) - adcq -16(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 40(%r9) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 48(%r9) - adcq %r14, %r10 - movq %r10, 56(%r9) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end123: - .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2 - - .globl mcl_fpDbl_add8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add8Lbmi2,@function -mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 96(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r11 - movq 88(%rdx), %rbp - movq 80(%rdx), %r13 - movq %rbx, (%rdi) - movq 72(%rdx), %r10 - movq %rax, 8(%rdi) - movq 64(%rdx), %r9 - movq %r12, 16(%rdi) - movq 40(%rdx), %r12 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %r12, %rbx - movq 56(%rdx), %r15 - movq 48(%rdx), %r12 - movq %r11, 32(%rdi) - movq 48(%rsi), %rdx - adcq %r12, %rdx - movq 120(%rsi), %r12 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rax - adcq %r15, %rax - movq 112(%rsi), %rcx - movq %rdx, 48(%rdi) - movq 64(%rsi), %rbx - adcq %r9, %rbx - movq 104(%rsi), %rdx - movq %rax, 56(%rdi) - movq 72(%rsi), %r9 - adcq %r10, %r9 - movq 80(%rsi), %r11 - adcq %r13, %r11 - movq 96(%rsi), %rax - movq 88(%rsi), %r15 - adcq %rbp, %r15 - adcq %r14, %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdx, %rax - adcq -32(%rsp), %rax # 8-byte Folded Reload - movq %rax, -32(%rsp) # 8-byte Spill - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -24(%rsp) # 8-byte Spill - adcq -8(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -8(%rsp) # 8-byte Spill - sbbq %rbp, %rbp - andl $1, %ebp - movq %rbx, %rsi - subq (%r8), %rsi - movq %r9, %rdx - sbbq 8(%r8), %rdx - movq %r11, %r10 - sbbq 16(%r8), %r10 - movq %r15, %r14 - sbbq 24(%r8), %r14 - movq -16(%rsp), %r13 # 8-byte Reload - sbbq 32(%r8), %r13 - movq %rax, %r12 - sbbq 40(%r8), %r12 - movq %rcx, %rax - sbbq 48(%r8), %rax - movq -8(%rsp), %rcx # 8-byte Reload - sbbq 56(%r8), %rcx - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rbx, %rsi - movq %rsi, 64(%rdi) - testb %bpl, %bpl - cmovneq %r9, %rdx - movq %rdx, 72(%rdi) - cmovneq %r11, %r10 - movq %r10, 80(%rdi) - cmovneq %r15, %r14 - movq %r14, 88(%rdi) - cmovneq -16(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 96(%rdi) - cmovneq -32(%rsp), %r12 # 8-byte Folded Reload - movq %r12, 104(%rdi) - cmovneq -24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 112(%rdi) - cmovneq -8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end124: - .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2 - - .globl mcl_fpDbl_sub8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub8Lbmi2,@function -mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %r9 - movq (%rsi), %r12 - movq 8(%rsi), %r14 - xorl %r8d, %r8d - subq (%rdx), %r12 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r9 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r13 - sbbq 32(%rdx), %r13 - movq 96(%rdx), %rbp - movq 88(%rdx), %r11 - movq %r12, (%rdi) - movq 80(%rdx), %r12 - movq %r14, 8(%rdi) - movq 72(%rdx), %r10 - movq %r9, 16(%rdi) - movq 40(%rdx), %r9 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r9, %rbx - movq 48(%rdx), %r9 - movq %r13, 32(%rdi) - movq 48(%rsi), %r14 - sbbq %r9, %r14 - movq 64(%rdx), %r13 - movq 56(%rdx), %r9 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r9, %rdx - movq 120(%rsi), %rcx - movq %r14, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r13, %rbx - movq 112(%rsi), %rax - movq %rdx, 56(%rdi) - movq 72(%rsi), %r9 - sbbq %r10, %r9 - movq 80(%rsi), %r13 - sbbq %r12, %r13 - movq 88(%rsi), %r12 - sbbq %r11, %r12 - movq 104(%rsi), %rdx - movq 96(%rsi), %r14 - sbbq %rbp, %r14 - sbbq -24(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -24(%rsp) # 8-byte Spill - sbbq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, -16(%rsp) # 8-byte Spill - sbbq -8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -8(%rsp) # 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r15), %r11 - cmoveq %r8, %r11 - testb %bpl, %bpl - movq 16(%r15), %rbp - cmoveq %r8, %rbp - movq 8(%r15), %rsi - cmoveq %r8, %rsi - movq 56(%r15), %r10 - cmoveq %r8, %r10 - movq 48(%r15), %rdx - cmoveq %r8, %rdx - movq 40(%r15), %rcx - cmoveq %r8, %rcx - movq 32(%r15), %rax - cmoveq %r8, %rax - cmovneq 24(%r15), %r8 - addq %rbx, %r11 - adcq %r9, %rsi - movq %r11, 64(%rdi) - adcq %r13, %rbp - movq %rsi, 72(%rdi) - movq %rbp, 80(%rdi) - adcq %r12, %r8 - movq %r8, 88(%rdi) - adcq %r14, %rax - movq %rax, 96(%rdi) - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 104(%rdi) - adcq -16(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 112(%rdi) - adcq -8(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end125: - .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2 - - .align 16, 0x90 - .type .LmulPv576x64,@function -.LmulPv576x64: # @mulPv576x64 -# BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - mulxq 8(%rsi), %rcx, %r8 - addq %rax, %rcx - movq %rcx, 8(%rdi) - mulxq 16(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 16(%rdi) - mulxq 24(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 24(%rdi) - mulxq 32(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 32(%rdi) - mulxq 40(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 40(%rdi) - mulxq 48(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 48(%rdi) - mulxq 56(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 56(%rdi) - mulxq 64(%rsi), %rax, %rcx - adcq %r8, %rax - movq %rax, 64(%rdi) - adcq $0, %rcx - movq %rcx, 72(%rdi) - movq %rdi, %rax - retq -.Lfunc_end126: - .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 - - .globl mcl_fp_mulUnitPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre9Lbmi2,@function -mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2 -# BB#0: - pushq %r14 - pushq %rbx - subq $88, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq .LmulPv576x64 - movq 80(%rsp), %r8 - movq 72(%rsp), %r9 - movq 64(%rsp), %r10 - movq 56(%rsp), %r11 - movq 48(%rsp), %r14 - movq 40(%rsp), %rax - movq 32(%rsp), %rcx - movq 24(%rsp), %rdx - movq 8(%rsp), %rsi - movq 16(%rsp), %rdi - movq %rsi, (%rbx) - movq %rdi, 8(%rbx) - movq %rdx, 16(%rbx) - movq %rcx, 24(%rbx) - movq %rax, 32(%rbx) - movq %r14, 40(%rbx) - movq %r11, 48(%rbx) - movq %r10, 56(%rbx) - movq %r9, 64(%rbx) - movq %r8, 72(%rbx) - addq $88, %rsp - popq %rbx - popq %r14 - retq -.Lfunc_end127: - .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2 - - .globl mcl_fpDbl_mulPre9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre9Lbmi2,@function -mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp # imm = 0x328 - movq %rdx, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq %rsi, 72(%rsp) # 8-byte Spill - movq %rdi, %r12 - movq %r12, 80(%rsp) # 8-byte Spill - movq (%rax), %rdx - movq %rax, %rbx - leaq 728(%rsp), %rdi - movq %rsi, %rbp - callq .LmulPv576x64 - movq 800(%rsp), %r13 - movq 792(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r14 - movq %rax, (%r12) - movq 8(%rbx), %rdx - leaq 648(%rsp), %rdi - movq %rbp, %rsi - callq .LmulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r14 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r15 - movq %r14, 8(%r12) - adcq 8(%rsp), %rbx # 8-byte Folded Reload - adcq 16(%rsp), %r15 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, %r14 - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq %r13, %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %r13 # 8-byte Reload - movq 16(%r13), %rdx - leaq 568(%rsp), %rdi - movq 72(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %r9 - movq 624(%rsp), %r10 - movq 616(%rsp), %rdi - movq 608(%rsp), %rbp - movq 600(%rsp), %rcx - addq 568(%rsp), %rbx - movq 592(%rsp), %rdx - movq 576(%rsp), %r12 - movq 584(%rsp), %rsi - movq 80(%rsp), %rax # 8-byte Reload - movq %rbx, 16(%rax) - adcq %r15, %r12 - adcq %r14, %rsi - movq %rsi, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 24(%r13), %rdx - leaq 488(%rsp), %rdi - movq 72(%rsp), %r15 # 8-byte Reload - movq %r15, %rsi - callq .LmulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r12 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq 80(%rsp), %r14 # 8-byte Reload - movq %r12, 24(%r14) - adcq (%rsp), %rbx # 8-byte Folded Reload - adcq 8(%rsp), %r13 # 8-byte Folded Reload - adcq 16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - movq 32(%r12), %rdx - leaq 408(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %r9 - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r15 - movq 424(%rsp), %rcx - movq %rbx, 32(%r14) - adcq %r13, %r15 - adcq 8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq %r12, %r14 - movq 40(%r14), %rdx - leaq 328(%rsp), %rdi - movq 72(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %r9 - movq 384(%rsp), %rsi - movq 376(%rsp), %rdi - movq 368(%rsp), %rbx - movq 360(%rsp), %rbp - addq 328(%rsp), %r15 - movq 352(%rsp), %rcx - movq 336(%rsp), %r12 - movq 344(%rsp), %rdx - movq 80(%rsp), %rax # 8-byte Reload - movq %r15, 40(%rax) - adcq (%rsp), %r12 # 8-byte Folded Reload - adcq 8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 48(%r14), %rdx - leaq 248(%rsp), %rdi - movq %r13, %rsi - movq %r13, %r15 - callq .LmulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %r9 - movq 304(%rsp), %rsi - movq 296(%rsp), %rdi - movq 288(%rsp), %rbx - movq 280(%rsp), %rbp - addq 248(%rsp), %r12 - movq 272(%rsp), %rcx - movq 256(%rsp), %r13 - movq 264(%rsp), %rdx - movq 80(%rsp), %rax # 8-byte Reload - movq %r12, 48(%rax) - adcq (%rsp), %r13 # 8-byte Folded Reload - adcq 8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 56(%r14), %rdx - leaq 168(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 240(%rsp), %rcx - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - addq 168(%rsp), %r13 - movq 200(%rsp), %r12 - movq 192(%rsp), %rbp - movq 176(%rsp), %r14 - movq 184(%rsp), %r15 - movq 80(%rsp), %rax # 8-byte Reload - movq %r13, 56(%rax) - adcq (%rsp), %r14 # 8-byte Folded Reload - adcq 8(%rsp), %r15 # 8-byte Folded Reload - adcq 16(%rsp), %rbp # 8-byte Folded Reload - adcq 24(%rsp), %r12 # 8-byte Folded Reload - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %r13 - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 88(%rsp), %rdi - movq 72(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 88(%rsp), %r14 - adcq 96(%rsp), %r15 - movq 160(%rsp), %r8 - adcq 104(%rsp), %rbp - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 80(%rsp), %rcx # 8-byte Reload - movq %r14, 64(%rcx) - movq %r15, 72(%rcx) - adcq %r12, %rax - movq %rbp, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r13, %rbx - movq %rbx, 96(%rcx) - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp # imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end128: - .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2 - - .globl mcl_fpDbl_sqrPre9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre9Lbmi2,@function -mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp # imm = 0x328 - movq %rsi, %r15 - movq %r15, 80(%rsp) # 8-byte Spill - movq %rdi, %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq (%r15), %rdx - leaq 728(%rsp), %rdi - callq .LmulPv576x64 - movq 800(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 792(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r12 - movq %rax, (%r14) - movq 8(%r15), %rdx - leaq 648(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r12 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r13 - movq %r12, 8(%r14) - adcq 8(%rsp), %rbx # 8-byte Folded Reload - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 16(%r15), %rdx - leaq 568(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %rcx - movq 624(%rsp), %rdx - movq 616(%rsp), %rsi - movq 608(%rsp), %rdi - movq 600(%rsp), %rbp - addq 568(%rsp), %rbx - movq 592(%rsp), %rax - movq 576(%rsp), %r14 - movq 584(%rsp), %r12 - movq 72(%rsp), %r15 # 8-byte Reload - movq %rbx, 16(%r15) - adcq %r13, %r14 - adcq 16(%rsp), %r12 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 24(%rsi), %rdx - leaq 488(%rsp), %rdi - callq .LmulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r14 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq %r14, 24(%r15) - adcq %r12, %rbx - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 32(%rsi), %rdx - leaq 408(%rsp), %rdi - callq .LmulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %rcx - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r14 - movq 424(%rsp), %r12 - movq %rbx, 32(%r15) - adcq %r13, %r14 - adcq 16(%rsp), %r12 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 40(%rsi), %rdx - leaq 328(%rsp), %rdi - callq .LmulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %rcx - movq 384(%rsp), %rdx - movq 376(%rsp), %rsi - movq 368(%rsp), %rdi - movq 360(%rsp), %rbp - addq 328(%rsp), %r14 - movq 352(%rsp), %rax - movq 336(%rsp), %rbx - movq 344(%rsp), %r13 - movq %r14, 40(%r15) - adcq %r12, %rbx - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 48(%rsi), %rdx - leaq 248(%rsp), %rdi - callq .LmulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %rcx - movq 304(%rsp), %rdx - movq 296(%rsp), %rsi - movq 288(%rsp), %rdi - movq 280(%rsp), %rbp - addq 248(%rsp), %rbx - movq 272(%rsp), %rax - movq 256(%rsp), %r12 - movq 264(%rsp), %r14 - movq %rbx, 48(%r15) - adcq %r13, %r12 - adcq 16(%rsp), %r14 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 56(%rsi), %rdx - leaq 168(%rsp), %rdi - callq .LmulPv576x64 - movq 240(%rsp), %r8 - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - movq 200(%rsp), %rcx - addq 168(%rsp), %r12 - movq 192(%rsp), %r15 - movq 176(%rsp), %r13 - movq 184(%rsp), %rbp - movq 72(%rsp), %rax # 8-byte Reload - movq %r12, 56(%rax) - adcq %r14, %r13 - adcq 16(%rsp), %rbp # 8-byte Folded Reload - adcq 24(%rsp), %r15 # 8-byte Folded Reload - adcq 32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %r12 - adcq 40(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %r14 - adcq 48(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 64(%rsi), %rdx - leaq 88(%rsp), %rdi - callq .LmulPv576x64 - addq 88(%rsp), %r13 - adcq 96(%rsp), %rbp - movq 160(%rsp), %r8 - adcq 104(%rsp), %r15 - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 72(%rsp), %rcx # 8-byte Reload - movq %r13, 64(%rcx) - movq %rbp, 72(%rcx) - adcq %r12, %rax - movq %r15, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r14, %rbx - movq %rbx, 96(%rcx) - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 64(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp # imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2 - - .globl mcl_fp_mont9Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont9Lbmi2,@function -mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp # imm = 0x618 - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rdx, 32(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 16(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq .LmulPv576x64 - movq 1480(%rsp), %r14 - movq 1488(%rsp), %r15 - movq %r14, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 1544(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 1536(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 1528(%rsp), %r12 - movq 1520(%rsp), %r13 - movq 1512(%rsp), %rbx - movq 1504(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1400(%rsp), %r14 - adcq 1408(%rsp), %r15 - adcq 1416(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rax # 8-byte Reload - adcq 1424(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - adcq 1432(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - adcq 1440(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - adcq 1448(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %rbx # 8-byte Reload - adcq 1456(%rsp), %rbx - movq 104(%rsp), %r14 # 8-byte Reload - adcq 1464(%rsp), %r14 - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1472(%rsp), %r13 - sbbq %rbp, %rbp - movq 32(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebp - addq 1320(%rsp), %r15 - movq 96(%rsp), %rax # 8-byte Reload - adcq 1328(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rax # 8-byte Reload - adcq 1336(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r12 # 8-byte Reload - adcq 1344(%rsp), %r12 - movq 64(%rsp), %rax # 8-byte Reload - adcq 1352(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1360(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - adcq 1368(%rsp), %rbx - adcq 1376(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 1384(%rsp), %r13 - movq %r13, 112(%rsp) # 8-byte Spill - adcq 1392(%rsp), %rbp - sbbq %r14, %r14 - movq %r15, %rdx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq %r14, %rax - andl $1, %eax - addq 1240(%rsp), %r15 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 1248(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r14 # 8-byte Reload - adcq 1256(%rsp), %r14 - adcq 1264(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - adcq 1272(%rsp), %r12 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 1280(%rsp), %r13 - adcq 1288(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r15 # 8-byte Reload - adcq 1296(%rsp), %r15 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 1304(%rsp), %rbx - adcq 1312(%rsp), %rbp - adcq $0, %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 96(%rsp), %rax # 8-byte Reload - addq 1160(%rsp), %rax - adcq 1168(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r14 # 8-byte Reload - adcq 1176(%rsp), %r14 - adcq 1184(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - movq %r13, %r12 - adcq 1192(%rsp), %r12 - movq 88(%rsp), %rcx # 8-byte Reload - adcq 1200(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, %r13 - adcq 1216(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq 1224(%rsp), %rbp - movq 72(%rsp), %rcx # 8-byte Reload - adcq 1232(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq %r15, %rax - andl $1, %eax - addq 1080(%rsp), %rbx - movq 80(%rsp), %rcx # 8-byte Reload - adcq 1088(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq %r14, %r15 - adcq 1096(%rsp), %r15 - movq 64(%rsp), %r14 # 8-byte Reload - adcq 1104(%rsp), %r14 - movq %r12, %rbx - adcq 1112(%rsp), %rbx - movq 88(%rsp), %rcx # 8-byte Reload - adcq 1120(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 1128(%rsp), %r13 - movq %r13, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1136(%rsp), %r13 - adcq 1144(%rsp), %rbp - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1152(%rsp), %r12 - adcq $0, %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 80(%rsp), %rax # 8-byte Reload - addq 1000(%rsp), %rax - adcq 1008(%rsp), %r15 - movq %r15, 40(%rsp) # 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, %r15 - adcq 1024(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 1032(%rsp), %r14 - movq 104(%rsp), %rcx # 8-byte Reload - adcq 1040(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - adcq 1048(%rsp), %r13 - movq %r13, 112(%rsp) # 8-byte Spill - adcq 1056(%rsp), %rbp - adcq 1064(%rsp), %r12 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 920(%rsp), %r13 - movq 40(%rsp), %rcx # 8-byte Reload - adcq 928(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 936(%rsp), %r15 - movq %r15, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 944(%rsp), %r15 - movq %r14, %r13 - adcq 952(%rsp), %r13 - movq 104(%rsp), %r14 # 8-byte Reload - adcq 960(%rsp), %r14 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 968(%rsp), %rbx - adcq 976(%rsp), %rbp - adcq 984(%rsp), %r12 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 992(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 40(%rsp), %rax # 8-byte Reload - addq 840(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 848(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 856(%rsp), %r15 - adcq 864(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 872(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 880(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq 888(%rsp), %rbp - adcq 896(%rsp), %r12 - movq 96(%rsp), %r13 # 8-byte Reload - adcq 904(%rsp), %r13 - movq 80(%rsp), %rcx # 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r14 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 760(%rsp), %r14 - movq 64(%rsp), %rcx # 8-byte Reload - adcq 768(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 776(%rsp), %r15 - movq 88(%rsp), %r14 # 8-byte Reload - adcq 784(%rsp), %r14 - movq 104(%rsp), %rcx # 8-byte Reload - adcq 792(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rcx # 8-byte Reload - adcq 800(%rsp), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - adcq 808(%rsp), %rbp - movq %r12, %rbx - adcq 816(%rsp), %rbx - movq %r13, %r12 - adcq 824(%rsp), %r12 - movq 80(%rsp), %r13 # 8-byte Reload - adcq 832(%rsp), %r13 - adcq $0, %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 64(%rsp), %rax # 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - adcq 696(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rcx # 8-byte Reload - adcq 704(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %r15 # 8-byte Reload - adcq 712(%rsp), %r15 - adcq 720(%rsp), %rbp - adcq 728(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - adcq 744(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r13 # 8-byte Reload - adcq 752(%rsp), %r13 - sbbq %r14, %r14 - movq %rax, %rdx - movq %rax, %rbx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r14d - addq 600(%rsp), %rbx - movq 48(%rsp), %rax # 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rbx # 8-byte Reload - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - movq %r15, 112(%rsp) # 8-byte Spill - adcq 640(%rsp), %rbp - movq 72(%rsp), %r12 # 8-byte Reload - adcq 648(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 656(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r15 # 8-byte Reload - adcq 664(%rsp), %r15 - adcq 672(%rsp), %r13 - adcq $0, %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 48(%rsp), %rax # 8-byte Reload - addq 520(%rsp), %rax - movq 88(%rsp), %r14 # 8-byte Reload - adcq 528(%rsp), %r14 - adcq 536(%rsp), %rbx - movq %rbx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rcx # 8-byte Reload - adcq 544(%rsp), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - adcq 552(%rsp), %rbp - adcq 560(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %r12 # 8-byte Reload - adcq 568(%rsp), %r12 - adcq 576(%rsp), %r15 - movq %r15, 80(%rsp) # 8-byte Spill - adcq 584(%rsp), %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r15 # 8-byte Reload - adcq 592(%rsp), %r15 - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 440(%rsp), %r13 - adcq 448(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq 456(%rsp), %r14 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 464(%rsp), %rbx - adcq 472(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - adcq 488(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 496(%rsp), %rbp - movq 40(%rsp), %r12 # 8-byte Reload - adcq 504(%rsp), %r12 - adcq 512(%rsp), %r15 - movq %r15, %r13 - adcq $0, %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 88(%rsp), %rax # 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r14 - adcq 376(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - movq 8(%rsp), %rcx # 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %rbx # 8-byte Reload - adcq 392(%rsp), %rbx - movq 96(%rsp), %r15 # 8-byte Reload - adcq 400(%rsp), %r15 - adcq 408(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 416(%rsp), %r12 - movq %r12, %rbp - adcq 424(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %r12 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r13d - addq 280(%rsp), %r12 - adcq 288(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rax # 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 8(%rsp), %r14 # 8-byte Reload - adcq 304(%rsp), %r14 - adcq 312(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 320(%rsp), %r15 - movq %r15, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rbx # 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - adcq 344(%rsp), %r12 - movq 48(%rsp), %rbp # 8-byte Reload - adcq 352(%rsp), %rbp - adcq $0, %r13 - movq 32(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 104(%rsp), %rax # 8-byte Reload - addq 200(%rsp), %rax - movq 112(%rsp), %r15 # 8-byte Reload - adcq 208(%rsp), %r15 - adcq 216(%rsp), %r14 - movq %r14, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %r14 # 8-byte Reload - adcq 224(%rsp), %r14 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 232(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %rcx # 8-byte Reload - adcq 248(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 256(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 264(%rsp), %rbp - movq %rbp, 48(%rsp) # 8-byte Spill - adcq 272(%rsp), %r13 - sbbq %rbx, %rbx - movq 16(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %r12 - leaq 120(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - addq 120(%rsp), %r12 - adcq 128(%rsp), %r15 - movq 8(%rsp), %rbp # 8-byte Reload - adcq 136(%rsp), %rbp - movq %r14, %rcx - adcq 144(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %r8 # 8-byte Reload - adcq 152(%rsp), %r8 - movq %r8, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r9 # 8-byte Reload - adcq 160(%rsp), %r9 - movq %r9, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r10 # 8-byte Reload - adcq 168(%rsp), %r10 - movq %r10, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %rdi # 8-byte Reload - adcq 176(%rsp), %rdi - movq %rdi, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r13 - adcq $0, %rbx - movq %r15, %rsi - movq %r15, %r12 - movq 56(%rsp), %rdx # 8-byte Reload - subq (%rdx), %rsi - movq %rbp, %rax - movq %rbp, %r15 - sbbq 8(%rdx), %rax - movq %rcx, %rbp - sbbq 16(%rdx), %rbp - movq %r8, %rcx - sbbq 24(%rdx), %rcx - movq %r9, %r8 - sbbq 32(%rdx), %r8 - movq %r10, %r11 - sbbq 40(%rdx), %r11 - movq %rdi, %r10 - sbbq 48(%rdx), %r10 - movq %r14, %rdi - sbbq 56(%rdx), %rdi - movq %r13, %r9 - sbbq 64(%rdx), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r13, %r9 - testb %bl, %bl - cmovneq %r12, %rsi - movq (%rsp), %rbx # 8-byte Reload - movq %rsi, (%rbx) - cmovneq %r15, %rax - movq %rax, 8(%rbx) - cmovneq 72(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rbx) - cmovneq 96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 24(%rbx) - cmovneq 80(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 32(%rbx) - cmovneq 40(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 40(%rbx) - cmovneq 64(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 48(%rbx) - cmovneq %r14, %rdi - movq %rdi, 56(%rbx) - movq %r9, 64(%rbx) - addq $1560, %rsp # imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end130: - .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2 - - .globl mcl_fp_montNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF9Lbmi2,@function -mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp # imm = 0x618 - movq %rcx, 64(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq .LmulPv576x64 - movq 1480(%rsp), %r12 - movq 1488(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq %r12, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 1544(%rsp), %r13 - movq 1536(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1528(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 1520(%rsp), %r14 - movq 1512(%rsp), %r15 - movq 1504(%rsp), %rbx - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1400(%rsp), %r12 - movq 88(%rsp), %rax # 8-byte Reload - adcq 1408(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 1416(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - adcq 1424(%rsp), %rbx - movq %rbx, 104(%rsp) # 8-byte Spill - adcq 1432(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - adcq 1440(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %rbx # 8-byte Reload - adcq 1448(%rsp), %rbx - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1456(%rsp), %r12 - adcq 1464(%rsp), %r13 - movq %r13, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbp # 8-byte Reload - adcq 1472(%rsp), %rbp - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1392(%rsp), %rax - movq 88(%rsp), %rcx # 8-byte Reload - addq 1320(%rsp), %rcx - movq 8(%rsp), %r15 # 8-byte Reload - adcq 1328(%rsp), %r15 - movq 104(%rsp), %r14 # 8-byte Reload - adcq 1336(%rsp), %r14 - movq 56(%rsp), %rdx # 8-byte Reload - adcq 1344(%rsp), %rdx - movq %rdx, 56(%rsp) # 8-byte Spill - movq 40(%rsp), %r13 # 8-byte Reload - adcq 1352(%rsp), %r13 - adcq 1360(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 1368(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %rdx # 8-byte Reload - adcq 1376(%rsp), %rdx - movq %rdx, 96(%rsp) # 8-byte Spill - adcq 1384(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, %rbp - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1240(%rsp), %rbx - adcq 1248(%rsp), %r15 - movq %r15, 8(%rsp) # 8-byte Spill - adcq 1256(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - movq 56(%rsp), %r12 # 8-byte Reload - adcq 1264(%rsp), %r12 - adcq 1272(%rsp), %r13 - movq %r13, %r14 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 1280(%rsp), %r13 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 1288(%rsp), %rbx - movq 96(%rsp), %r15 # 8-byte Reload - adcq 1296(%rsp), %r15 - movq 112(%rsp), %rax # 8-byte Reload - adcq 1304(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - adcq 1312(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1232(%rsp), %rax - movq 8(%rsp), %rcx # 8-byte Reload - addq 1160(%rsp), %rcx - movq 104(%rsp), %rbp # 8-byte Reload - adcq 1168(%rsp), %rbp - adcq 1176(%rsp), %r12 - movq %r12, 56(%rsp) # 8-byte Spill - adcq 1184(%rsp), %r14 - adcq 1192(%rsp), %r13 - movq %r13, %r12 - adcq 1200(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbx # 8-byte Reload - adcq 1216(%rsp), %rbx - movq 80(%rsp), %rdx # 8-byte Reload - adcq 1224(%rsp), %rdx - movq %rdx, 80(%rsp) # 8-byte Spill - movq %rax, %r15 - adcq $0, %r15 - movq %rcx, %rdx - movq %rcx, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1080(%rsp), %r13 - adcq 1088(%rsp), %rbp - movq %rbp, 104(%rsp) # 8-byte Spill - movq 56(%rsp), %r13 # 8-byte Reload - adcq 1096(%rsp), %r13 - adcq 1104(%rsp), %r14 - adcq 1112(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1120(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 1128(%rsp), %rbp - adcq 1136(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %rbx # 8-byte Reload - adcq 1144(%rsp), %rbx - adcq 1152(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1072(%rsp), %rax - movq 104(%rsp), %rcx # 8-byte Reload - addq 1000(%rsp), %rcx - adcq 1008(%rsp), %r13 - movq %r13, 56(%rsp) # 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 1024(%rsp), %r14 - adcq 1032(%rsp), %r12 - adcq 1040(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1048(%rsp), %r13 - adcq 1056(%rsp), %rbx - movq %rbx, 80(%rsp) # 8-byte Spill - adcq 1064(%rsp), %r15 - movq %r15, 88(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 920(%rsp), %rbx - movq 56(%rsp), %rax # 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rsp), %rbp # 8-byte Reload - adcq 936(%rsp), %rbp - movq %r14, %rbx - adcq 944(%rsp), %rbx - adcq 952(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 968(%rsp), %r13 - movq %r13, %r15 - movq 80(%rsp), %r13 # 8-byte Reload - adcq 976(%rsp), %r13 - movq 88(%rsp), %r14 # 8-byte Reload - adcq 984(%rsp), %r14 - movq 104(%rsp), %rax # 8-byte Reload - adcq 992(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 912(%rsp), %rax - movq 56(%rsp), %rcx # 8-byte Reload - addq 840(%rsp), %rcx - adcq 848(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - adcq 856(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 864(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 872(%rsp), %rbp - adcq 880(%rsp), %r15 - movq %r15, 112(%rsp) # 8-byte Spill - adcq 888(%rsp), %r13 - adcq 896(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rdx # 8-byte Reload - adcq 904(%rsp), %rdx - movq %rdx, 104(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, %r14 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 760(%rsp), %rbx - movq 40(%rsp), %rax # 8-byte Reload - adcq 768(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 776(%rsp), %r15 - adcq 784(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq %rbp, %rbx - adcq 792(%rsp), %rbx - movq 112(%rsp), %rbp # 8-byte Reload - adcq 800(%rsp), %rbp - adcq 808(%rsp), %r13 - movq 88(%rsp), %rax # 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r12 # 8-byte Reload - adcq 824(%rsp), %r12 - adcq 832(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 752(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %rdx # 8-byte Reload - adcq 696(%rsp), %rdx - movq %rdx, 72(%rsp) # 8-byte Spill - adcq 704(%rsp), %rbx - movq %rbx, 96(%rsp) # 8-byte Spill - adcq 712(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - adcq 720(%rsp), %r13 - movq %r13, %r15 - movq 88(%rsp), %rbx # 8-byte Reload - adcq 728(%rsp), %rbx - adcq 736(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - adcq 744(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 600(%rsp), %r13 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 608(%rsp), %r13 - movq 72(%rsp), %r12 # 8-byte Reload - adcq 616(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 624(%rsp), %rbp - movq 112(%rsp), %rax # 8-byte Reload - adcq 632(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - adcq 640(%rsp), %r15 - movq %r15, 80(%rsp) # 8-byte Spill - adcq 648(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq 656(%rsp), %r14 - movq 40(%rsp), %rbx # 8-byte Reload - adcq 664(%rsp), %rbx - movq 56(%rsp), %r15 # 8-byte Reload - adcq 672(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 592(%rsp), %rcx - movq %r13, %rax - addq 520(%rsp), %rax - adcq 528(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq %rbp, %r12 - adcq 536(%rsp), %r12 - movq 112(%rsp), %rbp # 8-byte Reload - adcq 544(%rsp), %rbp - movq 80(%rsp), %rdx # 8-byte Reload - adcq 552(%rsp), %rdx - movq %rdx, 80(%rsp) # 8-byte Spill - movq 88(%rsp), %rdx # 8-byte Reload - adcq 560(%rsp), %rdx - movq %rdx, 88(%rsp) # 8-byte Spill - adcq 568(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 576(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - adcq 584(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, %r13 - movq %rax, %rdx - movq %rax, %r14 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 440(%rsp), %r14 - movq 72(%rsp), %rax # 8-byte Reload - adcq 448(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - adcq 456(%rsp), %r12 - adcq 464(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %r14 # 8-byte Reload - adcq 472(%rsp), %r14 - movq 88(%rsp), %r15 # 8-byte Reload - adcq 480(%rsp), %r15 - movq 104(%rsp), %rbp # 8-byte Reload - adcq 488(%rsp), %rbp - movq 40(%rsp), %rbx # 8-byte Reload - adcq 496(%rsp), %rbx - movq 56(%rsp), %rax # 8-byte Reload - adcq 504(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - adcq 512(%rsp), %r13 - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 432(%rsp), %rcx - movq 72(%rsp), %rax # 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rdx # 8-byte Reload - adcq 376(%rsp), %rdx - movq %rdx, 112(%rsp) # 8-byte Spill - adcq 384(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - adcq 392(%rsp), %r15 - movq %r15, 88(%rsp) # 8-byte Spill - adcq 400(%rsp), %rbp - movq %rbp, 104(%rsp) # 8-byte Spill - adcq 408(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 56(%rsp), %r14 # 8-byte Reload - adcq 416(%rsp), %r14 - adcq 424(%rsp), %r13 - movq %r13, %r15 - adcq $0, %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 280(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbp # 8-byte Reload - adcq 296(%rsp), %rbp - movq 80(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 88(%rsp), %r13 # 8-byte Reload - adcq 312(%rsp), %r13 - movq 104(%rsp), %r12 # 8-byte Reload - adcq 320(%rsp), %r12 - movq 40(%rsp), %rbx # 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %r14 - movq %r14, 56(%rsp) # 8-byte Spill - adcq 344(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %r14 # 8-byte Reload - adcq 352(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 272(%rsp), %rcx - movq 96(%rsp), %rax # 8-byte Reload - addq 200(%rsp), %rax - adcq 208(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 216(%rsp), %rbp - adcq 224(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 232(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 56(%rsp), %r15 # 8-byte Reload - adcq 248(%rsp), %r15 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 256(%rsp), %r12 - adcq 264(%rsp), %r14 - adcq $0, %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 120(%rsp), %rdi - movq 64(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv576x64 - addq 120(%rsp), %rbx - movq 112(%rsp), %rcx # 8-byte Reload - adcq 128(%rsp), %rcx - movq %rbp, %rdx - adcq 136(%rsp), %rdx - movq 88(%rsp), %rsi # 8-byte Reload - adcq 144(%rsp), %rsi - movq %rsi, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rdi # 8-byte Reload - adcq 152(%rsp), %rdi - movq %rdi, 104(%rsp) # 8-byte Spill - movq 40(%rsp), %rbx # 8-byte Reload - adcq 160(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq %r15, %r8 - adcq 168(%rsp), %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq %r12, %r15 - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - movq 96(%rsp), %r9 # 8-byte Reload - adcq 192(%rsp), %r9 - movq %rcx, %rax - movq %rcx, %r11 - movq %r13, %rbp - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r12 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %rbx, %rdi - sbbq 32(%rbp), %rdi - movq %r8, %r10 - sbbq 40(%rbp), %r10 - movq %r15, %r13 - sbbq 48(%rbp), %r13 - movq %r14, %r8 - sbbq 56(%rbp), %r8 - movq %rbp, %rbx - movq %r9, %rbp - sbbq 64(%rbx), %rbp - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovsq %r12, %rcx - movq %rcx, 8(%rbx) - cmovsq 88(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 16(%rbx) - cmovsq 104(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovsq 56(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovsq %r15, %r13 - movq %r13, 48(%rbx) - cmovsq %r14, %r8 - movq %r8, 56(%rbx) - cmovsq %r9, %rbp - movq %rbp, 64(%rbx) - addq $1560, %rsp # imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end131: - .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2 - - .globl mcl_fp_montRed9Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed9Lbmi2,@function -mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $936, %rsp # imm = 0x3A8 - movq %rdx, %rax - movq %rax, 128(%rsp) # 8-byte Spill - movq %rdi, 80(%rsp) # 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 120(%rsp) # 8-byte Spill - movq (%rsi), %r14 - movq 8(%rsi), %rdx - movq %rdx, 192(%rsp) # 8-byte Spill - movq %r14, %rdx - imulq %rcx, %rdx - movq 136(%rsi), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - movq 128(%rsi), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - movq 120(%rsi), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - movq 72(%rsi), %r12 - movq 64(%rsi), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 56(%rsi), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 48(%rsi), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 40(%rsi), %rbp - movq 32(%rsi), %rbx - movq 24(%rsi), %r13 - movq 16(%rsi), %r15 - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 64(%rcx), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq %rcx, %rsi - leaq 856(%rsp), %rdi - callq .LmulPv576x64 - addq 856(%rsp), %r14 - movq 192(%rsp), %rcx # 8-byte Reload - adcq 864(%rsp), %rcx - adcq 872(%rsp), %r15 - adcq 880(%rsp), %r13 - adcq 888(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 896(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rax # 8-byte Reload - adcq 904(%rsp), %rax - movq %rax, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rax # 8-byte Reload - adcq 912(%rsp), %rax - movq %rax, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rax # 8-byte Reload - adcq 920(%rsp), %rax - movq %rax, 176(%rsp) # 8-byte Spill - adcq 928(%rsp), %r12 - movq %r12, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - adcq $0, 200(%rsp) # 8-byte Folded Spill - adcq $0, 208(%rsp) # 8-byte Folded Spill - adcq $0, 184(%rsp) # 8-byte Folded Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - movq 112(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - sbbq %r12, %r12 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 776(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r12d - addq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r13 - movq %r13, (%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 800(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 96(%rsp), %rax # 8-byte Reload - adcq 808(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rax # 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rax # 8-byte Reload - adcq 824(%rsp), %rax - movq %rax, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rax # 8-byte Reload - adcq 832(%rsp), %rax - movq %rax, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 840(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - adcq 848(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - adcq $0, 208(%rsp) # 8-byte Folded Spill - adcq $0, 184(%rsp) # 8-byte Folded Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - movq 152(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, %r14 - movq %r14, 112(%rsp) # 8-byte Spill - adcq $0, %r12 - movq %r15, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 696(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 696(%rsp), %r15 - movq (%rsp), %rcx # 8-byte Reload - adcq 704(%rsp), %rcx - movq 88(%rsp), %rax # 8-byte Reload - adcq 712(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 96(%rsp), %rax # 8-byte Reload - adcq 720(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rbp # 8-byte Reload - adcq 728(%rsp), %rbp - movq 168(%rsp), %r14 # 8-byte Reload - adcq 736(%rsp), %r14 - movq 176(%rsp), %r15 # 8-byte Reload - adcq 744(%rsp), %r15 - movq 192(%rsp), %rax # 8-byte Reload - adcq 752(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 200(%rsp) # 8-byte Spill - adcq $0, 208(%rsp) # 8-byte Folded Spill - movq 184(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 152(%rsp) # 8-byte Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rcx, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 616(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 616(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 624(%rsp), %rax - movq 96(%rsp), %rcx # 8-byte Reload - adcq 632(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 640(%rsp), %rbp - movq %rbp, 136(%rsp) # 8-byte Spill - adcq 648(%rsp), %r14 - movq %r14, 168(%rsp) # 8-byte Spill - adcq 656(%rsp), %r15 - movq 192(%rsp), %r14 # 8-byte Reload - adcq 664(%rsp), %r14 - movq 160(%rsp), %rbp # 8-byte Reload - adcq 672(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - adcq $0, %r13 - movq %r13, 184(%rsp) # 8-byte Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 536(%rsp), %rbx - movq 96(%rsp), %rax # 8-byte Reload - adcq 544(%rsp), %rax - movq 136(%rsp), %rcx # 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rcx # 8-byte Reload - adcq 560(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - adcq 568(%rsp), %r15 - movq %r15, 176(%rsp) # 8-byte Spill - adcq 576(%rsp), %r14 - movq %r14, 192(%rsp) # 8-byte Spill - adcq 584(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r13 # 8-byte Reload - adcq 592(%rsp), %r13 - movq 208(%rsp), %r15 # 8-byte Reload - adcq 600(%rsp), %r15 - movq 184(%rsp), %rbp # 8-byte Reload - adcq 608(%rsp), %rbp - movq 144(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r14 - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 456(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 456(%rsp), %r14 - movq 136(%rsp), %rax # 8-byte Reload - adcq 464(%rsp), %rax - movq 168(%rsp), %rcx # 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 488(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rcx # 8-byte Reload - adcq 496(%rsp), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - adcq 504(%rsp), %r13 - movq %r13, 200(%rsp) # 8-byte Spill - adcq 512(%rsp), %r15 - movq %r15, 208(%rsp) # 8-byte Spill - adcq 520(%rsp), %rbp - movq %rbp, 184(%rsp) # 8-byte Spill - adcq 528(%rsp), %rbx - movq %rbx, 144(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - movq 152(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - movq 112(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r15 - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 376(%rsp), %r15 - movq 168(%rsp), %rax # 8-byte Reload - adcq 384(%rsp), %rax - movq 176(%rsp), %rcx # 8-byte Reload - adcq 392(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 400(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rbp # 8-byte Reload - adcq 408(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 416(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 424(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 144(%rsp), %r15 # 8-byte Reload - adcq 440(%rsp), %r15 - adcq 448(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq $0, %r13 - movq %r13, %r14 - adcq $0, %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 296(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 296(%rsp), %rbx - movq 176(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq 192(%rsp), %r13 # 8-byte Reload - adcq 312(%rsp), %r13 - adcq 320(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 328(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 336(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 344(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - adcq 352(%rsp), %r15 - movq %r15, 144(%rsp) # 8-byte Spill - movq 104(%rsp), %r15 # 8-byte Reload - adcq 360(%rsp), %r15 - adcq 368(%rsp), %r14 - movq %r14, 152(%rsp) # 8-byte Spill - movq 112(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - adcq $0, %r12 - movq 120(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 216(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 216(%rsp), %rbx - movq %r13, %rsi - adcq 224(%rsp), %rsi - movq %rsi, 192(%rsp) # 8-byte Spill - adcq 232(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r9 # 8-byte Reload - adcq 240(%rsp), %r9 - movq %r9, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %r8 # 8-byte Reload - adcq 248(%rsp), %r8 - movq %r8, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rbx # 8-byte Reload - adcq 256(%rsp), %rbx - movq 144(%rsp), %rax # 8-byte Reload - adcq 264(%rsp), %rax - movq %r15, %rcx - adcq 272(%rsp), %rcx - movq 152(%rsp), %rdx # 8-byte Reload - adcq 280(%rsp), %rdx - movq %rdx, 152(%rsp) # 8-byte Spill - adcq 288(%rsp), %r14 - movq %r14, %r11 - adcq $0, %r12 - subq 16(%rsp), %rsi # 8-byte Folded Reload - movq %rbp, %rdi - sbbq 8(%rsp), %rdi # 8-byte Folded Reload - movq %r9, %rbp - sbbq 24(%rsp), %rbp # 8-byte Folded Reload - movq %r8, %r13 - sbbq 32(%rsp), %r13 # 8-byte Folded Reload - movq %rbx, %r15 - sbbq 40(%rsp), %r15 # 8-byte Folded Reload - movq %rax, %r14 - sbbq 48(%rsp), %r14 # 8-byte Folded Reload - movq %rcx, %r10 - sbbq 56(%rsp), %r10 # 8-byte Folded Reload - movq %rdx, %r8 - sbbq 64(%rsp), %r8 # 8-byte Folded Reload - movq %r11, %r9 - sbbq 72(%rsp), %r9 # 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %r11, %r9 - testb %r12b, %r12b - cmovneq 192(%rsp), %rsi # 8-byte Folded Reload - movq 80(%rsp), %rdx # 8-byte Reload - movq %rsi, (%rdx) - cmovneq 160(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 8(%rdx) - cmovneq 200(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rdx) - cmovneq 208(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 24(%rdx) - cmovneq %rbx, %r15 - movq %r15, 32(%rdx) - cmovneq %rax, %r14 - movq %r14, 40(%rdx) - cmovneq %rcx, %r10 - movq %r10, 48(%rdx) - cmovneq 152(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 56(%rdx) - movq %r9, 64(%rdx) - addq $936, %rsp # imm = 0x3A8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end132: - .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2 - - .globl mcl_fp_addPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre9Lbmi2,@function -mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r8 - movq 64(%rsi), %r15 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 24(%rsi), %r12 - movq 32(%rsi), %r14 - movq (%rdx), %rbx - movq 8(%rdx), %rcx - addq (%rsi), %rbx - adcq 8(%rsi), %rcx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r12 - movq 56(%rdx), %r13 - movq 48(%rdx), %rsi - movq 40(%rdx), %rbp - movq 32(%rdx), %rdx - movq %rbx, (%rdi) - movq %rcx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r12, 24(%rdi) - adcq %r14, %rdx - movq %rdx, 32(%rdi) - adcq %r11, %rbp - movq %rbp, 40(%rdi) - adcq %r10, %rsi - movq %rsi, 48(%rdi) - adcq %r9, %r13 - movq %r13, 56(%rdi) - adcq %r8, %r15 - movq %r15, 64(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end133: - .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2 - - .globl mcl_fp_subPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre9Lbmi2,@function -mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2 -# BB#0: - movq 32(%rdx), %r8 - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - movq 8(%rsi), %rcx - sbbq 8(%rdx), %rcx - movq %rcx, 8(%rdi) - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq %rcx, 16(%rdi) - movq 24(%rsi), %rcx - sbbq 24(%rdx), %rcx - movq %rcx, 24(%rdi) - movq 32(%rsi), %rcx - sbbq %r8, %rcx - movq 40(%rdx), %r8 - movq %rcx, 32(%rdi) - movq 40(%rsi), %rcx - sbbq %r8, %rcx - movq 48(%rdx), %r8 - movq %rcx, 40(%rdi) - movq 48(%rsi), %rcx - sbbq %r8, %rcx - movq 56(%rdx), %r8 - movq %rcx, 48(%rdi) - movq 56(%rsi), %rcx - sbbq %r8, %rcx - movq %rcx, 56(%rdi) - movq 64(%rdx), %rcx - movq 64(%rsi), %rdx - sbbq %rcx, %rdx - movq %rdx, 64(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end134: - .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2 - - .globl mcl_fp_shr1_9Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_9Lbmi2,@function -mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2 -# BB#0: - pushq %rbx - movq 64(%rsi), %r8 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 32(%rsi), %rcx - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rbx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rbx - movq %rbx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 32(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 40(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 48(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 56(%rdi) - shrq %r8 - movq %r8, 64(%rdi) - popq %rbx - retq -.Lfunc_end135: - .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2 - - .globl mcl_fp_add9Lbmi2 - .align 16, 0x90 - .type mcl_fp_add9Lbmi2,@function -mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r12 - movq 64(%rsi), %r8 - movq 56(%rsi), %r13 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 24(%rsi), %r14 - movq 32(%rsi), %r11 - movq (%rdx), %rbx - movq 8(%rdx), %r15 - addq (%rsi), %rbx - adcq 8(%rsi), %r15 - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r14 - adcq 32(%rdx), %r11 - adcq 40(%rdx), %r10 - movq 56(%rdx), %rsi - adcq 48(%rdx), %r9 - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - adcq %r13, %rsi - movq %rsi, 56(%rdi) - adcq %r12, %r8 - movq %r8, 64(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rbx - sbbq 8(%rcx), %r15 - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r14 - sbbq 32(%rcx), %r11 - sbbq 40(%rcx), %r10 - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %rsi - sbbq 64(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne .LBB136_2 -# BB#1: # %nocarry - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %rsi, 56(%rdi) - movq %r8, 64(%rdi) -.LBB136_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end136: - .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2 - - .globl mcl_fp_addNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF9Lbmi2,@function -mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r8 - movq 64(%rdx), %r10 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rax - movq 32(%rdx), %rdi - movq 24(%rdx), %rbp - movq 16(%rdx), %r15 - movq (%rdx), %rbx - movq 8(%rdx), %r13 - addq (%rsi), %rbx - adcq 8(%rsi), %r13 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %rbp - movq %rbp, -40(%rsp) # 8-byte Spill - adcq 32(%rsi), %rdi - movq %rdi, -16(%rsp) # 8-byte Spill - adcq 40(%rsi), %rax - movq %rax, -8(%rsp) # 8-byte Spill - adcq 48(%rsi), %r9 - movq %r9, -32(%rsp) # 8-byte Spill - movq %r9, %rdi - adcq 56(%rsi), %r11 - movq %r11, -24(%rsp) # 8-byte Spill - movq %r11, %rax - adcq 64(%rsi), %r10 - movq %r10, %r9 - movq %rbx, %rsi - subq (%rcx), %rsi - movq %r13, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %r12 - sbbq 16(%rcx), %r12 - sbbq 24(%rcx), %rbp - movq -16(%rsp), %r14 # 8-byte Reload - sbbq 32(%rcx), %r14 - movq -8(%rsp), %r11 # 8-byte Reload - sbbq 40(%rcx), %r11 - movq %rdi, %r10 - sbbq 48(%rcx), %r10 - movq %rax, %rdi - sbbq 56(%rcx), %rdi - movq %r9, %rax - sbbq 64(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %rbx, %rsi - movq %rsi, (%r8) - cmovsq %r13, %rdx - movq %rdx, 8(%r8) - cmovsq %r15, %r12 - movq %r12, 16(%r8) - cmovsq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%r8) - cmovsq -16(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 32(%r8) - cmovsq -8(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 40(%r8) - cmovsq -32(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 48(%r8) - cmovsq -24(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 56(%r8) - cmovsq %r9, %rax - movq %rax, 64(%r8) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end137: - .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2 - - .globl mcl_fp_sub9Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub9Lbmi2,@function -mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2 -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 40(%rsi), %r14 - sbbq 40(%rdx), %r14 - movq 48(%rsi), %r15 - sbbq 48(%rdx), %r15 - movq 64(%rsi), %r8 - movq 56(%rsi), %rsi - sbbq 56(%rdx), %rsi - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - movq %r14, 40(%rdi) - movq %r15, 48(%rdi) - movq %rsi, 56(%rdi) - sbbq %r13, %r8 - movq %r8, 64(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB138_2 -# BB#1: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r9, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r10, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r11, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r12, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %r14, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r15, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %rsi, %rax - movq %rax, 56(%rdi) - movq 64(%rcx), %rax - adcq %r8, %rax - movq %rax, 64(%rdi) -.LBB138_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end138: - .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2 - - .globl mcl_fp_subNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF9Lbmi2,@function -mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r10 - movq 64(%rsi), %r14 - movq 56(%rsi), %rax - movq 48(%rsi), %rcx - movq 40(%rsi), %rdi - movq 32(%rsi), %rbp - movq 24(%rsi), %rbx - movq 16(%rsi), %r15 - movq (%rsi), %r13 - movq 8(%rsi), %r12 - subq (%rdx), %r13 - sbbq 8(%rdx), %r12 - sbbq 16(%rdx), %r15 - sbbq 24(%rdx), %rbx - movq %rbx, -40(%rsp) # 8-byte Spill - sbbq 32(%rdx), %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - sbbq 40(%rdx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - sbbq 48(%rdx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - sbbq 56(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 64(%rdx), %r14 - movq %r14, %rdx - sarq $63, %rdx - movq %rdx, %rbp - shldq $1, %r14, %rbp - movq 24(%r8), %rbx - andq %rbp, %rbx - movq 8(%r8), %rdi - andq %rbp, %rdi - andq (%r8), %rbp - movq 64(%r8), %r11 - andq %rdx, %r11 - rorxq $63, %rdx, %rax - andq 56(%r8), %rdx - movq 48(%r8), %r9 - andq %rax, %r9 - movq 40(%r8), %rsi - andq %rax, %rsi - movq 32(%r8), %rcx - andq %rax, %rcx - andq 16(%r8), %rax - addq %r13, %rbp - adcq %r12, %rdi - movq %rbp, (%r10) - adcq %r15, %rax - movq %rdi, 8(%r10) - adcq -40(%rsp), %rbx # 8-byte Folded Reload - movq %rax, 16(%r10) - movq %rbx, 24(%r10) - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 32(%r10) - adcq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%r10) - adcq -16(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%r10) - adcq -8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 56(%r10) - adcq %r14, %r11 - movq %r11, 64(%r10) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end139: - .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2 - - .globl mcl_fpDbl_add9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add9Lbmi2,@function -mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 136(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq 120(%rdx), %r10 - movq 112(%rdx), %r11 - movq 24(%rsi), %rcx - movq 32(%rsi), %r14 - movq 16(%rdx), %rbp - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %rbp - adcq 24(%rdx), %rcx - adcq 32(%rdx), %r14 - movq 104(%rdx), %r9 - movq 96(%rdx), %r13 - movq %rax, (%rdi) - movq 88(%rdx), %r8 - movq %rbx, 8(%rdi) - movq 80(%rdx), %r12 - movq %rbp, 16(%rdi) - movq 40(%rdx), %rax - movq %rcx, 24(%rdi) - movq 40(%rsi), %rbp - adcq %rax, %rbp - movq 48(%rdx), %rcx - movq %r14, 32(%rdi) - movq 48(%rsi), %rax - adcq %rcx, %rax - movq 56(%rdx), %r14 - movq %rbp, 40(%rdi) - movq 56(%rsi), %rbp - adcq %r14, %rbp - movq 72(%rdx), %rcx - movq 64(%rdx), %rdx - movq %rax, 48(%rdi) - movq 64(%rsi), %rax - adcq %rdx, %rax - movq 136(%rsi), %rbx - movq %rbp, 56(%rdi) - movq 72(%rsi), %rbp - adcq %rcx, %rbp - movq 128(%rsi), %rcx - movq %rax, 64(%rdi) - movq 80(%rsi), %rdx - adcq %r12, %rdx - movq 88(%rsi), %r12 - adcq %r8, %r12 - movq 96(%rsi), %r14 - adcq %r13, %r14 - movq %r14, -48(%rsp) # 8-byte Spill - movq 104(%rsi), %rax - adcq %r9, %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 120(%rsi), %rax - movq 112(%rsi), %rsi - adcq %r11, %rsi - movq %rsi, -24(%rsp) # 8-byte Spill - adcq %r10, %rax - movq %rax, -16(%rsp) # 8-byte Spill - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -40(%rsp) # 8-byte Spill - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, -8(%rsp) # 8-byte Spill - sbbq %r9, %r9 - andl $1, %r9d - movq %rbp, %r10 - subq (%r15), %r10 - movq %rdx, %r11 - sbbq 8(%r15), %r11 - movq %r12, %rbx - sbbq 16(%r15), %rbx - sbbq 24(%r15), %r14 - movq -32(%rsp), %r13 # 8-byte Reload - sbbq 32(%r15), %r13 - movq -24(%rsp), %rsi # 8-byte Reload - sbbq 40(%r15), %rsi - movq -16(%rsp), %rax # 8-byte Reload - sbbq 48(%r15), %rax - sbbq 56(%r15), %rcx - movq -8(%rsp), %r8 # 8-byte Reload - sbbq 64(%r15), %r8 - sbbq $0, %r9 - andl $1, %r9d - cmovneq %rbp, %r10 - movq %r10, 72(%rdi) - testb %r9b, %r9b - cmovneq %rdx, %r11 - movq %r11, 80(%rdi) - cmovneq %r12, %rbx - movq %rbx, 88(%rdi) - cmovneq -48(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 96(%rdi) - cmovneq -32(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 104(%rdi) - cmovneq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rdi) - cmovneq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 120(%rdi) - cmovneq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 128(%rdi) - cmovneq -8(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end140: - .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2 - - .globl mcl_fpDbl_sub9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub9Lbmi2,@function -mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r14 - movq 136(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 120(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %r12 - movq 8(%rsi), %r13 - xorl %r9d, %r9d - subq (%rdx), %r12 - sbbq 8(%rdx), %r13 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %rbp - sbbq 32(%rdx), %rbp - movq 112(%rdx), %r10 - movq 104(%rdx), %rcx - movq %r12, (%rdi) - movq 96(%rdx), %rax - movq %r13, 8(%rdi) - movq 88(%rdx), %r13 - movq %r11, 16(%rdi) - movq 40(%rdx), %r11 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r11, %rbx - movq 48(%rdx), %r11 - movq %rbp, 32(%rdi) - movq 48(%rsi), %rbp - sbbq %r11, %rbp - movq 56(%rdx), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rbx - sbbq %r11, %rbx - movq 64(%rdx), %r11 - movq %rbp, 48(%rdi) - movq 64(%rsi), %rbp - sbbq %r11, %rbp - movq 80(%rdx), %r8 - movq 72(%rdx), %r11 - movq %rbx, 56(%rdi) - movq 72(%rsi), %r15 - sbbq %r11, %r15 - movq 136(%rsi), %rdx - movq %rbp, 64(%rdi) - movq 80(%rsi), %rbp - sbbq %r8, %rbp - movq 88(%rsi), %r12 - sbbq %r13, %r12 - movq 96(%rsi), %r13 - sbbq %rax, %r13 - movq 104(%rsi), %rax - sbbq %rcx, %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq 112(%rsi), %rax - sbbq %r10, %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 128(%rsi), %rax - movq 120(%rsi), %rcx - sbbq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -24(%rsp) # 8-byte Spill - sbbq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, -16(%rsp) # 8-byte Spill - sbbq -8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -8(%rsp) # 8-byte Spill - movl $0, %r8d - sbbq $0, %r8 - andl $1, %r8d - movq (%r14), %r10 - cmoveq %r9, %r10 - testb %r8b, %r8b - movq 16(%r14), %r8 - cmoveq %r9, %r8 - movq 8(%r14), %rdx - cmoveq %r9, %rdx - movq 64(%r14), %rbx - cmoveq %r9, %rbx - movq 56(%r14), %r11 - cmoveq %r9, %r11 - movq 48(%r14), %rsi - cmoveq %r9, %rsi - movq 40(%r14), %rcx - cmoveq %r9, %rcx - movq 32(%r14), %rax - cmoveq %r9, %rax - cmovneq 24(%r14), %r9 - addq %r15, %r10 - adcq %rbp, %rdx - movq %r10, 72(%rdi) - adcq %r12, %r8 - movq %rdx, 80(%rdi) - adcq %r13, %r9 - movq %r8, 88(%rdi) - movq %r9, 96(%rdi) - adcq -40(%rsp), %rax # 8-byte Folded Reload - movq %rax, 104(%rdi) - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 112(%rdi) - adcq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 120(%rdi) - adcq -16(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 128(%rdi) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end141: - .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2 - - - .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.s deleted file mode 100644 index aa677d2ea..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64.s +++ /dev/null @@ -1,16652 +0,0 @@ - .text - .file "" - .globl makeNIST_P192L - .align 16, 0x90 - .type makeNIST_P192L,@function -makeNIST_P192L: # @makeNIST_P192L -# BB#0: - movq $-1, %rax - movq $-2, %rdx - movq $-1, %rcx - retq -.Lfunc_end0: - .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P192L - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P192L,@function -mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L -# BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq 24(%rsi), %r8 - movq 40(%rsi), %r9 - movq 8(%rsi), %rax - addq %r9, %rax - adcq $0, %r10 - sbbq %rcx, %rcx - andl $1, %ecx - movq 32(%rsi), %r11 - movq (%rsi), %r14 - addq %r8, %r14 - adcq %r11, %rax - adcq %r9, %r10 - adcq $0, %rcx - addq %r9, %r14 - adcq %r8, %rax - adcq %r11, %r10 - adcq $0, %rcx - addq %rcx, %r14 - adcq %rax, %rcx - adcq $0, %r10 - sbbq %rax, %rax - andl $1, %eax - movq %r14, %rsi - addq $1, %rsi - movq %rcx, %rdx - adcq $1, %rdx - movq %r10, %rbx - adcq $0, %rbx - adcq $-1, %rax - andl $1, %eax - cmovneq %r14, %rsi - movq %rsi, (%rdi) - testb %al, %al - cmovneq %rcx, %rdx - movq %rdx, 8(%rdi) - cmovneq %r10, %rbx - movq %rbx, 16(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L - - .globl mcl_fp_sqr_NIST_P192L - .align 16, 0x90 - .type mcl_fp_sqr_NIST_P192L,@function -mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %rbx - movq 8(%rsi), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, %rdi - movq %rax, %r14 - movq %rcx, %rax - mulq %rcx - movq %rdx, %r15 - movq %rax, %r12 - movq %rcx, %rax - mulq %rbx - movq %rax, %r13 - movq %rdx, %rcx - addq %rcx, %r12 - adcq %r14, %r15 - movq %rdi, %r10 - adcq $0, %r10 - movq %r11, %rax - mulq %rbx - movq %rdx, %r9 - movq %rax, %rbp - movq %rbx, %rax - mulq %rbx - movq %rax, %r8 - movq %rdx, %rsi - addq %r13, %rsi - adcq %rbp, %rcx - movq %r9, %rbx - adcq $0, %rbx - addq %r13, %rsi - adcq %r12, %rcx - adcq %r15, %rbx - adcq $0, %r10 - movq %r11, %rax - mulq %r11 - addq %r14, %r9 - adcq %rdi, %rax - adcq $0, %rdx - addq %rbp, %rcx - adcq %rbx, %r9 - adcq %r10, %rax - adcq $0, %rdx - addq %rdx, %rsi - adcq $0, %rcx - sbbq %rbp, %rbp - andl $1, %ebp - addq %r9, %r8 - adcq %rax, %rsi - adcq %rdx, %rcx - adcq $0, %rbp - addq %rdx, %r8 - adcq %r9, %rsi - adcq %rax, %rcx - adcq $0, %rbp - addq %rbp, %r8 - adcq %rsi, %rbp - adcq $0, %rcx - sbbq %rax, %rax - andl $1, %eax - movq %r8, %rdx - addq $1, %rdx - movq %rbp, %rsi - adcq $1, %rsi - movq %rcx, %rdi - adcq $0, %rdi - adcq $-1, %rax - andl $1, %eax - cmovneq %r8, %rdx - movq -8(%rsp), %rbx # 8-byte Reload - movq %rdx, (%rbx) - testb %al, %al - cmovneq %rbp, %rsi - movq %rsi, 8(%rbx) - cmovneq %rcx, %rdi - movq %rdi, 16(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L - - .globl mcl_fp_mulNIST_P192L - .align 16, 0x90 - .type mcl_fp_mulNIST_P192L,@function -mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L -# BB#0: - pushq %r14 - pushq %rbx - subq $56, %rsp - movq %rdi, %r14 - leaq 8(%rsp), %rdi - callq mcl_fpDbl_mulPre3L@PLT - movq 24(%rsp), %r9 - movq 32(%rsp), %r8 - movq 48(%rsp), %rdi - movq 16(%rsp), %rbx - addq %rdi, %rbx - adcq $0, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - movq 40(%rsp), %rsi - movq 8(%rsp), %rdx - addq %r8, %rdx - adcq %rsi, %rbx - adcq %rdi, %r9 - adcq $0, %rcx - addq %rdi, %rdx - adcq %r8, %rbx - adcq %rsi, %r9 - adcq $0, %rcx - addq %rcx, %rdx - adcq %rbx, %rcx - adcq $0, %r9 - sbbq %rsi, %rsi - andl $1, %esi - movq %rdx, %rdi - addq $1, %rdi - movq %rcx, %rbx - adcq $1, %rbx - movq %r9, %rax - adcq $0, %rax - adcq $-1, %rsi - andl $1, %esi - cmovneq %rdx, %rdi - movq %rdi, (%r14) - testb %sil, %sil - cmovneq %rcx, %rbx - movq %rbx, 8(%r14) - cmovneq %r9, %rax - movq %rax, 16(%r14) - addq $56, %rsp - popq %rbx - popq %r14 - retq -.Lfunc_end3: - .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P521L - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P521L,@function -mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 120(%rsi), %r9 - movq 128(%rsi), %r14 - movq %r14, %r8 - shldq $55, %r9, %r8 - movq 112(%rsi), %r10 - shldq $55, %r10, %r9 - movq 104(%rsi), %r11 - shldq $55, %r11, %r10 - movq 96(%rsi), %r15 - shldq $55, %r15, %r11 - movq 88(%rsi), %r12 - shldq $55, %r12, %r15 - movq 80(%rsi), %rcx - shldq $55, %rcx, %r12 - movq 64(%rsi), %rbx - movq 72(%rsi), %rax - shldq $55, %rax, %rcx - shrq $9, %r14 - shldq $55, %rbx, %rax - andl $511, %ebx # imm = 0x1FF - addq (%rsi), %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r10 - adcq 48(%rsi), %r9 - adcq 56(%rsi), %r8 - adcq %r14, %rbx - movq %rbx, %rsi - shrq $9, %rsi - andl $1, %esi - addq %rax, %rsi - adcq $0, %rcx - adcq $0, %r12 - adcq $0, %r15 - adcq $0, %r11 - adcq $0, %r10 - adcq $0, %r9 - adcq $0, %r8 - adcq $0, %rbx - movq %rsi, %rax - andq %r12, %rax - andq %r15, %rax - andq %r11, %rax - andq %r10, %rax - andq %r9, %rax - andq %r8, %rax - movq %rbx, %rdx - orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00 - andq %rax, %rdx - andq %rcx, %rdx - cmpq $-1, %rdx - je .LBB4_1 -# BB#3: # %nonzero - movq %rsi, (%rdi) - movq %rcx, 8(%rdi) - movq %r12, 16(%rdi) - movq %r15, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) - andl $511, %ebx # imm = 0x1FF - movq %rbx, 64(%rdi) - jmp .LBB4_2 -.LBB4_1: # %zero - movq $0, 64(%rdi) - movq $0, 56(%rdi) - movq $0, 48(%rdi) - movq $0, 40(%rdi) - movq $0, 32(%rdi) - movq $0, 24(%rdi) - movq $0, 16(%rdi) - movq $0, 8(%rdi) - movq $0, (%rdi) -.LBB4_2: # %zero - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L - - .globl mcl_fp_mulUnitPre1L - .align 16, 0x90 - .type mcl_fp_mulUnitPre1L,@function -mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L -# BB#0: - movq %rdx, %rax - mulq (%rsi) - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq -.Lfunc_end5: - .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L - - .globl mcl_fpDbl_mulPre1L - .align 16, 0x90 - .type mcl_fpDbl_mulPre1L,@function -mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L -# BB#0: - movq (%rdx), %rax - mulq (%rsi) - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq -.Lfunc_end6: - .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L - - .globl mcl_fpDbl_sqrPre1L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre1L,@function -mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L -# BB#0: - movq (%rsi), %rax - mulq %rax - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L - - .globl mcl_fp_mont1L - .align 16, 0x90 - .type mcl_fp_mont1L,@function -mcl_fp_mont1L: # @mcl_fp_mont1L -# BB#0: - movq (%rsi), %rax - mulq (%rdx) - movq %rax, %rsi - movq %rdx, %r8 - movq -8(%rcx), %rax - imulq %rsi, %rax - movq (%rcx), %rcx - mulq %rcx - addq %rsi, %rax - adcq %r8, %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq %rcx, %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, (%rdi) - retq -.Lfunc_end8: - .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L - - .globl mcl_fp_montNF1L - .align 16, 0x90 - .type mcl_fp_montNF1L,@function -mcl_fp_montNF1L: # @mcl_fp_montNF1L -# BB#0: - movq (%rsi), %rax - mulq (%rdx) - movq %rax, %rsi - movq %rdx, %r8 - movq -8(%rcx), %rax - imulq %rsi, %rax - movq (%rcx), %rcx - mulq %rcx - addq %rsi, %rax - adcq %r8, %rdx - movq %rdx, %rax - subq %rcx, %rax - cmovsq %rdx, %rax - movq %rax, (%rdi) - retq -.Lfunc_end9: - .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L - - .globl mcl_fp_montRed1L - .align 16, 0x90 - .type mcl_fp_montRed1L,@function -mcl_fp_montRed1L: # @mcl_fp_montRed1L -# BB#0: - movq (%rsi), %rcx - movq -8(%rdx), %rax - imulq %rcx, %rax - movq (%rdx), %r8 - mulq %r8 - addq %rcx, %rax - adcq 8(%rsi), %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rcx - subq %r8, %rcx - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rcx - movq %rcx, (%rdi) - retq -.Lfunc_end10: - .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L - - .globl mcl_fp_addPre1L - .align 16, 0x90 - .type mcl_fp_addPre1L,@function -mcl_fp_addPre1L: # @mcl_fp_addPre1L -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end11: - .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L - - .globl mcl_fp_subPre1L - .align 16, 0x90 - .type mcl_fp_subPre1L,@function -mcl_fp_subPre1L: # @mcl_fp_subPre1L -# BB#0: - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end12: - .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L - - .globl mcl_fp_shr1_1L - .align 16, 0x90 - .type mcl_fp_shr1_1L,@function -mcl_fp_shr1_1L: # @mcl_fp_shr1_1L -# BB#0: - movq (%rsi), %rax - shrq %rax - movq %rax, (%rdi) - retq -.Lfunc_end13: - .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L - - .globl mcl_fp_add1L - .align 16, 0x90 - .type mcl_fp_add1L,@function -mcl_fp_add1L: # @mcl_fp_add1L -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rax - sbbq $0, %rdx - testb $1, %dl - jne .LBB14_2 -# BB#1: # %nocarry - movq %rax, (%rdi) -.LBB14_2: # %carry - retq -.Lfunc_end14: - .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L - - .globl mcl_fp_addNF1L - .align 16, 0x90 - .type mcl_fp_addNF1L,@function -mcl_fp_addNF1L: # @mcl_fp_addNF1L -# BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, %rdx - subq (%rcx), %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq -.Lfunc_end15: - .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L - - .globl mcl_fp_sub1L - .align 16, 0x90 - .type mcl_fp_sub1L,@function -mcl_fp_sub1L: # @mcl_fp_sub1L -# BB#0: - movq (%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rax - movq %rax, (%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB16_2 -# BB#1: # %nocarry - retq -.LBB16_2: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - retq -.Lfunc_end16: - .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L - - .globl mcl_fp_subNF1L - .align 16, 0x90 - .type mcl_fp_subNF1L,@function -mcl_fp_subNF1L: # @mcl_fp_subNF1L -# BB#0: - movq (%rsi), %rax - subq (%rdx), %rax - movq %rax, %rdx - sarq $63, %rdx - andq (%rcx), %rdx - addq %rax, %rdx - movq %rdx, (%rdi) - retq -.Lfunc_end17: - .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L - - .globl mcl_fpDbl_add1L - .align 16, 0x90 - .type mcl_fpDbl_add1L,@function -mcl_fpDbl_add1L: # @mcl_fpDbl_add1L -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq (%rcx), %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, 8(%rdi) - retq -.Lfunc_end18: - .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L - - .globl mcl_fpDbl_sub1L - .align 16, 0x90 - .type mcl_fpDbl_sub1L,@function -mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movl $0, %eax - sbbq $0, %rax - testb $1, %al - cmovneq (%rcx), %rsi - addq %r8, %rsi - movq %rsi, 8(%rdi) - retq -.Lfunc_end19: - .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L - - .globl mcl_fp_mulUnitPre2L - .align 16, 0x90 - .type mcl_fp_mulUnitPre2L,@function -mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L -# BB#0: - movq %rdx, %r8 - movq %r8, %rax - mulq 8(%rsi) - movq %rdx, %rcx - movq %rax, %r9 - movq %r8, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r9, %rdx - movq %rdx, 8(%rdi) - adcq $0, %rcx - movq %rcx, 16(%rdi) - retq -.Lfunc_end20: - .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L - - .globl mcl_fpDbl_mulPre2L - .align 16, 0x90 - .type mcl_fpDbl_mulPre2L,@function -mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L -# BB#0: - pushq %r14 - pushq %rbx - movq %rdx, %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%r10), %rcx - movq %r8, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %rsi - addq %r9, %rsi - adcq $0, %r14 - movq 8(%r10), %rbx - movq %r11, %rax - mulq %rbx - movq %rdx, %r9 - movq %rax, %rcx - movq %r8, %rax - mulq %rbx - addq %rsi, %rax - movq %rax, 8(%rdi) - adcq %r14, %rcx - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rcx - movq %rcx, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end21: - .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L - - .globl mcl_fpDbl_sqrPre2L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre2L,@function -mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L -# BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %r8 - movq %rcx, %rax - mulq %rcx - movq %rdx, %rsi - movq %rax, (%rdi) - movq %r8, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, %r10 - addq %r10, %rsi - movq %r9, %rcx - adcq $0, %rcx - movq %r8, %rax - mulq %r8 - addq %r10, %rsi - movq %rsi, 8(%rdi) - adcq %rcx, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r9, %rax - movq %rax, 16(%rdi) - adcq %rdx, %rcx - movq %rcx, 24(%rdi) - retq -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L - - .globl mcl_fp_mont2L - .align 16, 0x90 - .type mcl_fp_mont2L,@function -mcl_fp_mont2L: # @mcl_fp_mont2L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%rdx), %rsi - movq 8(%rdx), %r9 - movq %r11, %rax - mulq %rsi - movq %rdx, %r15 - movq %rax, %r10 - movq %r8, %rax - mulq %rsi - movq %rax, %r14 - movq %rdx, %r13 - addq %r10, %r13 - adcq $0, %r15 - movq -8(%rcx), %r10 - movq (%rcx), %rbp - movq %r14, %rsi - imulq %r10, %rsi - movq 8(%rcx), %rdi - movq %rsi, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq %rbp - movq %rdx, %rbx - addq %r12, %rbx - adcq $0, %rcx - addq %r14, %rax - adcq %r13, %rbx - adcq %r15, %rcx - sbbq %r15, %r15 - andl $1, %r15d - movq %r9, %rax - mulq %r11 - movq %rdx, %r14 - movq %rax, %r11 - movq %r9, %rax - mulq %r8 - movq %rax, %r8 - movq %rdx, %rsi - addq %r11, %rsi - adcq $0, %r14 - addq %rbx, %r8 - adcq %rcx, %rsi - adcq %r15, %r14 - sbbq %rbx, %rbx - andl $1, %ebx - imulq %r8, %r10 - movq %r10, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %r9 - movq %r10, %rax - mulq %rbp - addq %r9, %rdx - adcq $0, %rcx - addq %r8, %rax - adcq %rsi, %rdx - adcq %r14, %rcx - adcq $0, %rbx - movq %rdx, %rax - subq %rbp, %rax - movq %rcx, %rsi - sbbq %rdi, %rsi - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rcx, %rsi - testb %bl, %bl - cmovneq %rdx, %rax - movq -8(%rsp), %rcx # 8-byte Reload - movq %rax, (%rcx) - movq %rsi, 8(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end23: - .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L - - .globl mcl_fp_montNF2L - .align 16, 0x90 - .type mcl_fp_montNF2L,@function -mcl_fp_montNF2L: # @mcl_fp_montNF2L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%rdx), %rbp - movq 8(%rdx), %r9 - movq %r8, %rax - mulq %rbp - movq %rax, %rsi - movq %rdx, %r14 - movq -8(%rcx), %r10 - movq (%rcx), %r15 - movq %rsi, %rbx - imulq %r10, %rbx - movq 8(%rcx), %rdi - movq %rbx, %rax - mulq %rdi - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq %r15 - movq %rdx, %r12 - movq %rax, %rbx - movq %r11, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, %rbp - addq %r14, %rbp - adcq $0, %rcx - addq %rsi, %rbx - adcq %r13, %rbp - adcq $0, %rcx - addq %r12, %rbp - adcq -16(%rsp), %rcx # 8-byte Folded Reload - movq %r9, %rax - mulq %r11 - movq %rdx, %rsi - movq %rax, %r11 - movq %r9, %rax - mulq %r8 - movq %rax, %r8 - movq %rdx, %rbx - addq %r11, %rbx - adcq $0, %rsi - addq %rbp, %r8 - adcq %rcx, %rbx - adcq $0, %rsi - imulq %r8, %r10 - movq %r10, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %rbp - movq %r10, %rax - mulq %r15 - addq %r8, %rax - adcq %rbx, %rbp - adcq $0, %rsi - addq %rdx, %rbp - adcq %rcx, %rsi - movq %rbp, %rax - subq %r15, %rax - movq %rsi, %rcx - sbbq %rdi, %rcx - cmovsq %rbp, %rax - movq -8(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovsq %rsi, %rcx - movq %rcx, 8(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end24: - .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L - - .globl mcl_fp_montRed2L - .align 16, 0x90 - .type mcl_fp_montRed2L,@function -mcl_fp_montRed2L: # @mcl_fp_montRed2L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq -8(%rdx), %r9 - movq (%rdx), %r11 - movq (%rsi), %rbx - movq %rbx, %rcx - imulq %r9, %rcx - movq 8(%rdx), %r14 - movq %rcx, %rax - mulq %r14 - movq %rdx, %r8 - movq %rax, %r10 - movq %rcx, %rax - mulq %r11 - movq %rdx, %rcx - addq %r10, %rcx - adcq $0, %r8 - movq 24(%rsi), %r15 - addq %rbx, %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r8 - adcq $0, %r15 - sbbq %rbx, %rbx - andl $1, %ebx - imulq %rcx, %r9 - movq %r9, %rax - mulq %r14 - movq %rdx, %rsi - movq %rax, %r10 - movq %r9, %rax - mulq %r11 - addq %r10, %rdx - adcq $0, %rsi - addq %rcx, %rax - adcq %r8, %rdx - adcq %r15, %rsi - adcq $0, %rbx - movq %rdx, %rax - subq %r11, %rax - movq %rsi, %rcx - sbbq %r14, %rcx - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rsi, %rcx - testb %bl, %bl - cmovneq %rdx, %rax - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end25: - .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L - - .globl mcl_fp_addPre2L - .align 16, 0x90 - .type mcl_fp_addPre2L,@function -mcl_fp_addPre2L: # @mcl_fp_addPre2L -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rcx - addq (%rsi), %rax - adcq 8(%rsi), %rcx - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end26: - .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L - - .globl mcl_fp_subPre2L - .align 16, 0x90 - .type mcl_fp_subPre2L,@function -mcl_fp_subPre2L: # @mcl_fp_subPre2L -# BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end27: - .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L - - .globl mcl_fp_shr1_2L - .align 16, 0x90 - .type mcl_fp_shr1_2L,@function -mcl_fp_shr1_2L: # @mcl_fp_shr1_2L -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - shrdq $1, %rcx, %rax - movq %rax, (%rdi) - shrq %rcx - movq %rcx, 8(%rdi) - retq -.Lfunc_end28: - .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L - - .globl mcl_fp_add2L - .align 16, 0x90 - .type mcl_fp_add2L,@function -mcl_fp_add2L: # @mcl_fp_add2L -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq $0, %rsi - testb $1, %sil - jne .LBB29_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) -.LBB29_2: # %carry - retq -.Lfunc_end29: - .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L - - .globl mcl_fp_addNF2L - .align 16, 0x90 - .type mcl_fp_addNF2L,@function -mcl_fp_addNF2L: # @mcl_fp_addNF2L -# BB#0: - movq (%rdx), %rax - movq 8(%rdx), %r8 - addq (%rsi), %rax - adcq 8(%rsi), %r8 - movq %rax, %rsi - subq (%rcx), %rsi - movq %r8, %rdx - sbbq 8(%rcx), %rdx - testq %rdx, %rdx - cmovsq %rax, %rsi - movq %rsi, (%rdi) - cmovsq %r8, %rdx - movq %rdx, 8(%rdi) - retq -.Lfunc_end30: - .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L - - .globl mcl_fp_sub2L - .align 16, 0x90 - .type mcl_fp_sub2L,@function -mcl_fp_sub2L: # @mcl_fp_sub2L -# BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movq %r8, 8(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB31_2 -# BB#1: # %nocarry - retq -.LBB31_2: # %carry - movq 8(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r8, %rdx - movq %rdx, 8(%rdi) - retq -.Lfunc_end31: - .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L - - .globl mcl_fp_subNF2L - .align 16, 0x90 - .type mcl_fp_subNF2L,@function -mcl_fp_subNF2L: # @mcl_fp_subNF2L -# BB#0: - movq (%rsi), %r8 - movq 8(%rsi), %rsi - subq (%rdx), %r8 - sbbq 8(%rdx), %rsi - movq %rsi, %rdx - sarq $63, %rdx - movq 8(%rcx), %rax - andq %rdx, %rax - andq (%rcx), %rdx - addq %r8, %rdx - movq %rdx, (%rdi) - adcq %rsi, %rax - movq %rax, 8(%rdi) - retq -.Lfunc_end32: - .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L - - .globl mcl_fpDbl_add2L - .align 16, 0x90 - .type mcl_fpDbl_add2L,@function -mcl_fpDbl_add2L: # @mcl_fpDbl_add2L -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - adcq %r8, %r9 - sbbq %rax, %rax - andl $1, %eax - movq %r10, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - sbbq $0, %rax - andl $1, %eax - cmovneq %r10, %rdx - movq %rdx, 16(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 24(%rdi) - retq -.Lfunc_end33: - .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L - - .globl mcl_fpDbl_sub2L - .align 16, 0x90 - .type mcl_fpDbl_sub2L,@function -mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %r11 - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %r11, (%rdi) - movq %rsi, 8(%rdi) - sbbq %r8, %r9 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - cmovneq 8(%rcx), %rax - addq %r10, %rsi - movq %rsi, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end34: - .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L - - .globl mcl_fp_mulUnitPre3L - .align 16, 0x90 - .type mcl_fp_mulUnitPre3L,@function -mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L -# BB#0: - movq %rdx, %rcx - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r9, %r10 - movq %r10, 16(%rdi) - adcq $0, %r8 - movq %r8, 24(%rdi) - retq -.Lfunc_end35: - .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L - - .globl mcl_fpDbl_mulPre3L - .align 16, 0x90 - .type mcl_fpDbl_mulPre3L,@function -mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%r10), %rbx - movq %r8, %rax - mulq %rbx - movq %rdx, %rcx - movq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rbx - movq %rdx, %r14 - movq %rax, %rsi - movq %r9, %rax - mulq %rbx - movq %rdx, %r15 - movq %rax, %rbx - addq %rcx, %rbx - adcq %rsi, %r15 - adcq $0, %r14 - movq 8(%r10), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %rbp - movq %r9, %rax - mulq %rcx - movq %rdx, %r13 - movq %rax, %rsi - movq %r8, %rax - mulq %rcx - addq %rbx, %rax - movq %rax, 8(%rdi) - adcq %r15, %rsi - adcq %r14, %rbp - sbbq %r14, %r14 - andl $1, %r14d - addq %rdx, %rsi - adcq %r13, %rbp - adcq %r12, %r14 - movq 16(%r10), %r15 - movq %r11, %rax - mulq %r15 - movq %rdx, %r10 - movq %rax, %rbx - movq %r9, %rax - mulq %r15 - movq %rdx, %r9 - movq %rax, %rcx - movq %r8, %rax - mulq %r15 - addq %rsi, %rax - movq %rax, 16(%rdi) - adcq %rbp, %rcx - adcq %r14, %rbx - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rbx - movq %rbx, 32(%rdi) - adcq %r10, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end36: - .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L - - .globl mcl_fpDbl_sqrPre3L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre3L,@function -mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %rcx, %rax - mulq %rcx - movq %rdx, %rbx - movq %rax, (%rdi) - movq %r10, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %r11 - movq %rsi, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %r12 - addq %r12, %rbx - movq %r14, %r13 - adcq %r11, %r13 - movq %r8, %rcx - adcq $0, %rcx - movq %r10, %rax - mulq %rsi - movq %rdx, %r9 - movq %rax, %r15 - movq %rsi, %rax - mulq %rsi - movq %rax, %rsi - addq %r12, %rbx - movq %rbx, 8(%rdi) - adcq %r13, %rsi - adcq %r15, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r14, %rsi - adcq %rdx, %rcx - adcq %r9, %rbx - movq %r10, %rax - mulq %r10 - addq %r11, %rsi - movq %rsi, 16(%rdi) - adcq %r15, %rcx - adcq %rbx, %rax - sbbq %rsi, %rsi - andl $1, %esi - addq %r8, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rax - movq %rax, 32(%rdi) - adcq %rdx, %rsi - movq %rsi, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L - - .globl mcl_fp_mont3L - .align 16, 0x90 - .type mcl_fp_mont3L,@function -mcl_fp_mont3L: # @mcl_fp_mont3L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r10 - movq %r10, -56(%rsp) # 8-byte Spill - movq %rdi, -48(%rsp) # 8-byte Spill - movq 16(%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq (%r10), %rdi - mulq %rdi - movq %rax, %rbp - movq %rdx, %r8 - movq (%rsi), %rbx - movq %rbx, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, %r15 - movq %rax, %rsi - movq %rbx, %rax - mulq %rdi - movq %rax, %r12 - movq %rdx, %r11 - addq %rsi, %r11 - adcq %rbp, %r15 - adcq $0, %r8 - movq -8(%rcx), %r14 - movq (%rcx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq %r12, %rbp - imulq %r14, %rbp - movq 16(%rcx), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - movq 8(%rcx), %rbx - movq %rbx, -8(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rdx, %rcx - movq %rax, %r13 - movq %rbp, %rax - mulq %rbx - movq %rdx, %rsi - movq %rax, %r9 - movq %rbp, %rax - mulq %rdi - movq %rdx, %rbp - addq %r9, %rbp - adcq %r13, %rsi - adcq $0, %rcx - addq %r12, %rax - adcq %r11, %rbp - movq 8(%r10), %rbx - adcq %r15, %rsi - adcq %r8, %rcx - sbbq %rdi, %rdi - andl $1, %edi - movq %rbx, %rax - movq -64(%rsp), %r10 # 8-byte Reload - mulq %r10 - movq %rdx, %r15 - movq %rax, %r9 - movq %rbx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r11 - movq %rbx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rbx - addq %r11, %rbx - adcq %r9, %r12 - adcq $0, %r15 - addq %rbp, %r8 - adcq %rsi, %rbx - adcq %rcx, %r12 - adcq %rdi, %r15 - sbbq %r11, %r11 - andl $1, %r11d - movq %r8, %rcx - imulq %r14, %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r9 - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %rdi - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - addq %rdi, %rbp - adcq %r9, %rsi - adcq $0, %r13 - addq %r8, %rax - adcq %rbx, %rbp - adcq %r12, %rsi - adcq %r15, %r13 - adcq $0, %r11 - movq -56(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rcx - movq %rcx, %rax - mulq %r10 - movq %rdx, %r8 - movq %rax, %r10 - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rdi - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rcx - addq %rdi, %rcx - adcq %r10, %r15 - adcq $0, %r8 - addq %rbp, %r9 - adcq %rsi, %rcx - adcq %r13, %r15 - adcq %r11, %r8 - sbbq %rdi, %rdi - andl $1, %edi - imulq %r9, %r14 - movq %r14, %rax - movq -16(%rsp), %r12 # 8-byte Reload - mulq %r12 - movq %rdx, %rbx - movq %rax, %r10 - movq %r14, %rax - movq -8(%rsp), %r13 # 8-byte Reload - mulq %r13 - movq %rdx, %rsi - movq %rax, %r11 - movq %r14, %rax - movq -24(%rsp), %rbp # 8-byte Reload - mulq %rbp - addq %r11, %rdx - adcq %r10, %rsi - adcq $0, %rbx - addq %r9, %rax - adcq %rcx, %rdx - adcq %r15, %rsi - adcq %r8, %rbx - adcq $0, %rdi - movq %rdx, %rax - subq %rbp, %rax - movq %rsi, %rcx - sbbq %r13, %rcx - movq %rbx, %rbp - sbbq %r12, %rbp - sbbq $0, %rdi - andl $1, %edi - cmovneq %rbx, %rbp - testb %dil, %dil - cmovneq %rdx, %rax - movq -48(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rsi, %rcx - movq %rcx, 8(%rdx) - movq %rbp, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end38: - .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L - - .globl mcl_fp_montNF3L - .align 16, 0x90 - .type mcl_fp_montNF3L,@function -mcl_fp_montNF3L: # @mcl_fp_montNF3L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rdi, -32(%rsp) # 8-byte Spill - movq 16(%rsi), %r10 - movq %r10, -40(%rsp) # 8-byte Spill - movq (%rdx), %rbp - movq %r10, %rax - mulq %rbp - movq %rax, %r14 - movq %rdx, %r15 - movq (%rsi), %rbx - movq %rbx, -64(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulq %rbp - movq %rdx, %rdi - movq %rax, %rsi - movq %rbx, %rax - mulq %rbp - movq %rax, %r13 - movq %rdx, %rbp - addq %rsi, %rbp - adcq %r14, %rdi - adcq $0, %r15 - movq -8(%rcx), %r14 - movq (%rcx), %r11 - movq %r11, -48(%rsp) # 8-byte Spill - movq %r13, %rbx - imulq %r14, %rbx - movq 16(%rcx), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -56(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rdx, %r8 - movq %rax, %r12 - movq %rbx, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, %rcx - movq %rbx, %rax - mulq %r11 - addq %r13, %rax - adcq %rbp, %rcx - adcq %rdi, %r12 - adcq $0, %r15 - addq %rdx, %rcx - movq -8(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rbp - adcq %r9, %r12 - adcq %r8, %r15 - movq %rbp, %rax - mulq %r10 - movq %rdx, %rsi - movq %rax, %r8 - movq %rbp, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rbp, %rax - movq -64(%rsp), %r10 # 8-byte Reload - mulq %r10 - movq %rax, %r13 - movq %rdx, %rbp - addq %r9, %rbp - adcq %r8, %rbx - adcq $0, %rsi - addq %rcx, %r13 - adcq %r12, %rbp - adcq %r15, %rbx - adcq $0, %rsi - movq %r13, %rcx - imulq %r14, %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r15 - movq %rcx, %rax - movq -56(%rsp), %rdi # 8-byte Reload - mulq %rdi - movq %rdx, %r9 - movq %rax, %r12 - movq %rcx, %rax - mulq %r11 - addq %r13, %rax - adcq %rbp, %r12 - adcq %rbx, %r15 - adcq $0, %rsi - addq %rdx, %r12 - adcq %r9, %r15 - adcq %r8, %rsi - movq -8(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rbx - movq %rbx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r8 - movq %rbx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r9 - movq %rbx, %rax - mulq %r10 - movq %rax, %r10 - movq %rdx, %rbx - addq %r9, %rbx - adcq %r8, %rcx - adcq $0, %rbp - addq %r12, %r10 - adcq %r15, %rbx - adcq %rsi, %rcx - adcq $0, %rbp - imulq %r10, %r14 - movq %r14, %rax - movq -16(%rsp), %r15 # 8-byte Reload - mulq %r15 - movq %rdx, %r8 - movq %rax, %rsi - movq %r14, %rax - movq %rdi, %r11 - mulq %r11 - movq %rdx, %r9 - movq %rax, %rdi - movq %r14, %rax - movq -48(%rsp), %r14 # 8-byte Reload - mulq %r14 - addq %r10, %rax - adcq %rbx, %rdi - adcq %rcx, %rsi - adcq $0, %rbp - addq %rdx, %rdi - adcq %r9, %rsi - adcq %r8, %rbp - movq %rdi, %rax - subq %r14, %rax - movq %rsi, %rcx - sbbq %r11, %rcx - movq %rbp, %rbx - sbbq %r15, %rbx - movq %rbx, %rdx - sarq $63, %rdx - cmovsq %rdi, %rax - movq -32(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovsq %rsi, %rcx - movq %rcx, 8(%rdx) - cmovsq %rbp, %rbx - movq %rbx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end39: - .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L - - .globl mcl_fp_montRed3L - .align 16, 0x90 - .type mcl_fp_montRed3L,@function -mcl_fp_montRed3L: # @mcl_fp_montRed3L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) # 8-byte Spill - movq -8(%rcx), %r9 - movq (%rcx), %rdi - movq %rdi, -16(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq %r15, %rbx - imulq %r9, %rbx - movq 16(%rcx), %rbp - movq %rbp, -24(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rbp - movq %rax, %r11 - movq %rdx, %r8 - movq 8(%rcx), %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rcx, %r12 - movq %rdx, %r10 - movq %rax, %r14 - movq %rbx, %rax - mulq %rdi - movq %rdi, %rbx - movq %rdx, %rcx - addq %r14, %rcx - adcq %r11, %r10 - adcq $0, %r8 - movq 40(%rsi), %rdi - movq 32(%rsi), %r13 - addq %r15, %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r8 - adcq $0, %r13 - adcq $0, %rdi - sbbq %r15, %r15 - andl $1, %r15d - movq %rcx, %rsi - imulq %r9, %rsi - movq %rsi, %rax - mulq %rbp - movq %rdx, %r11 - movq %rax, %rbp - movq %rsi, %rax - mulq %r12 - movq %rdx, %r14 - movq %rax, %r12 - movq %rsi, %rax - mulq %rbx - movq %rdx, %rbx - addq %r12, %rbx - adcq %rbp, %r14 - adcq $0, %r11 - addq %rcx, %rax - adcq %r10, %rbx - adcq %r8, %r14 - adcq %r13, %r11 - adcq $0, %rdi - adcq $0, %r15 - imulq %rbx, %r9 - movq %r9, %rax - movq -24(%rsp), %r12 # 8-byte Reload - mulq %r12 - movq %rdx, %rbp - movq %rax, %r8 - movq %r9, %rax - movq -32(%rsp), %r13 # 8-byte Reload - mulq %r13 - movq %rdx, %rsi - movq %rax, %r10 - movq %r9, %rax - movq -16(%rsp), %rcx # 8-byte Reload - mulq %rcx - addq %r10, %rdx - adcq %r8, %rsi - adcq $0, %rbp - addq %rbx, %rax - adcq %r14, %rdx - adcq %r11, %rsi - adcq %rdi, %rbp - adcq $0, %r15 - movq %rdx, %rax - subq %rcx, %rax - movq %rsi, %rdi - sbbq %r13, %rdi - movq %rbp, %rcx - sbbq %r12, %rcx - sbbq $0, %r15 - andl $1, %r15d - cmovneq %rbp, %rcx - testb %r15b, %r15b - cmovneq %rdx, %rax - movq -8(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rsi, %rdi - movq %rdi, 8(%rdx) - movq %rcx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end40: - .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L - - .globl mcl_fp_addPre3L - .align 16, 0x90 - .type mcl_fp_addPre3L,@function -mcl_fp_addPre3L: # @mcl_fp_addPre3L -# BB#0: - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end41: - .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L - - .globl mcl_fp_subPre3L - .align 16, 0x90 - .type mcl_fp_subPre3L,@function -mcl_fp_subPre3L: # @mcl_fp_subPre3L -# BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r8 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end42: - .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L - - .globl mcl_fp_shr1_3L - .align 16, 0x90 - .type mcl_fp_shr1_3L,@function -mcl_fp_shr1_3L: # @mcl_fp_shr1_3L -# BB#0: - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rdx - shrdq $1, %rdx, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rdx - movq %rdx, 8(%rdi) - shrq %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end43: - .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L - - .globl mcl_fp_add3L - .align 16, 0x90 - .type mcl_fp_add3L,@function -mcl_fp_add3L: # @mcl_fp_add3L -# BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r8 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB44_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) -.LBB44_2: # %carry - retq -.Lfunc_end44: - .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L - - .globl mcl_fp_addNF3L - .align 16, 0x90 - .type mcl_fp_addNF3L,@function -mcl_fp_addNF3L: # @mcl_fp_addNF3L -# BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %r10 - movq 8(%rdx), %r9 - addq (%rsi), %r10 - adcq 8(%rsi), %r9 - adcq 16(%rsi), %r8 - movq %r10, %rsi - subq (%rcx), %rsi - movq %r9, %rdx - sbbq 8(%rcx), %rdx - movq %r8, %rax - sbbq 16(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r10, %rsi - movq %rsi, (%rdi) - cmovsq %r9, %rdx - movq %rdx, 8(%rdi) - cmovsq %r8, %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end45: - .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L - - .globl mcl_fp_sub3L - .align 16, 0x90 - .type mcl_fp_sub3L,@function -mcl_fp_sub3L: # @mcl_fp_sub3L -# BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r8 - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB46_2 -# BB#1: # %nocarry - retq -.LBB46_2: # %carry - movq 8(%rcx), %rdx - movq 16(%rcx), %rsi - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r8, %rsi - movq %rsi, 16(%rdi) - retq -.Lfunc_end46: - .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L - - .globl mcl_fp_subNF3L - .align 16, 0x90 - .type mcl_fp_subNF3L,@function -mcl_fp_subNF3L: # @mcl_fp_subNF3L -# BB#0: - movq 16(%rsi), %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - subq (%rdx), %r8 - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r10 - movq %r10, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r10, %rsi - andq (%rcx), %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - andq 8(%rcx), %rdx - addq %r8, %rsi - movq %rsi, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rax - movq %rax, 16(%rdi) - retq -.Lfunc_end47: - .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L - - .globl mcl_fpDbl_add3L - .align 16, 0x90 - .type mcl_fpDbl_add3L,@function -mcl_fpDbl_add3L: # @mcl_fpDbl_add3L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r9 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %r15 - adcq %r11, %r9 - adcq %r10, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %r15, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - movq %r8, %rbx - sbbq 16(%rcx), %rbx - sbbq $0, %rax - andl $1, %eax - cmovneq %r15, %rdx - movq %rdx, 24(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 32(%rdi) - cmovneq %r8, %rbx - movq %rbx, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end48: - .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L - - .globl mcl_fpDbl_sub3L - .align 16, 0x90 - .type mcl_fpDbl_sub3L,@function -mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rbx - sbbq 8(%rdx), %rax - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r14 - movq %rbx, (%rdi) - movq %rax, 8(%rdi) - movq %r14, 16(%rdi) - sbbq %r15, %r11 - sbbq %r12, %r9 - sbbq %r10, %r8 - movl $0, %eax - sbbq $0, %rax - andl $1, %eax - movq (%rcx), %rdx - cmoveq %rsi, %rdx - testb %al, %al - movq 16(%rcx), %rax - cmoveq %rsi, %rax - cmovneq 8(%rcx), %rsi - addq %r11, %rdx - movq %rdx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end49: - .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L - - .globl mcl_fp_mulUnitPre4L - .align 16, 0x90 - .type mcl_fp_mulUnitPre4L,@function -mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L -# BB#0: - pushq %r14 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %r14 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r14, %rdx - movq %rdx, 8(%rdi) - adcq %r11, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r10 - movq %r10, 24(%rdi) - adcq $0, %r8 - movq %r8, 32(%rdi) - popq %rbx - popq %r14 - retq -.Lfunc_end50: - .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L - - .globl mcl_fpDbl_mulPre4L - .align 16, 0x90 - .type mcl_fpDbl_mulPre4L,@function -mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq (%rsi), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -64(%rsp) # 8-byte Spill - movq (%rdx), %rbx - movq %rdx, %rbp - mulq %rbx - movq %rdx, %r15 - movq 16(%rsi), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq 24(%rsi), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rbx - movq %rdx, %r12 - movq %rax, %r14 - movq %rcx, %rax - mulq %rbx - movq %rdx, %r10 - movq %rax, %r9 - movq %r8, %rax - mulq %rbx - movq %rdx, %r13 - movq %rax, %r8 - addq %r15, %r8 - adcq %r9, %r13 - adcq %r14, %r10 - adcq $0, %r12 - movq %rbp, %r9 - movq 8(%r9), %rbp - movq %r11, %rax - mulq %rbp - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rcx, %rax - mulq %rbp - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %rcx - movq -64(%rsp), %r14 # 8-byte Reload - movq %r14, %rax - mulq %rbp - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %rbx - movq -8(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -56(%rsp) # 8-byte Spill - addq %r8, %rax - movq %rax, 8(%rdi) - adcq %r13, %rbx - adcq %r10, %rcx - adcq %r12, %r15 - sbbq %r13, %r13 - andl $1, %r13d - movq 16(%r9), %rbp - movq %r14, %rax - mulq %rbp - movq %rax, %r12 - movq %rdx, %r8 - addq -56(%rsp), %rbx # 8-byte Folded Reload - adcq -48(%rsp), %rcx # 8-byte Folded Reload - adcq -40(%rsp), %r15 # 8-byte Folded Reload - adcq -32(%rsp), %r13 # 8-byte Folded Reload - movq %r11, %rax - mulq %rbp - movq %rdx, %r9 - movq %rax, %r11 - movq -24(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, %r14 - movq %rax, %r10 - movq -8(%rsp), %rax # 8-byte Reload - mulq %rbp - addq %rbx, %rax - movq %rax, 16(%rdi) - adcq %r12, %rcx - adcq %r15, %r10 - adcq %r13, %r11 - sbbq %r13, %r13 - andl $1, %r13d - addq %rdx, %rcx - adcq %r8, %r10 - adcq %r14, %r11 - adcq %r9, %r13 - movq -16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rbx - movq %rbx, %rax - mulq 24(%rsi) - movq %rdx, %r8 - movq %rax, %r14 - movq %rbx, %rax - mulq 16(%rsi) - movq %rdx, %r9 - movq %rax, %r12 - movq %rbx, %rax - mulq 8(%rsi) - movq %rdx, %r15 - movq %rax, %rbp - movq %rbx, %rax - mulq (%rsi) - addq %rcx, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbp - adcq %r11, %r12 - adcq %r13, %r14 - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rbp - movq %rbp, 32(%rdi) - adcq %r15, %r12 - movq %r12, 40(%rdi) - adcq %r9, %r14 - movq %r14, 48(%rdi) - adcq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end51: - .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L - - .globl mcl_fpDbl_sqrPre4L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre4L,@function -mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rsi, %r10 - movq 16(%r10), %r9 - movq 24(%r10), %r11 - movq (%r10), %r15 - movq 8(%r10), %r8 - movq %r15, %rax - mulq %r15 - movq %rdx, %rbp - movq %rax, (%rdi) - movq %r11, %rax - mulq %r8 - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, -32(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %r8 - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, -40(%rsp) # 8-byte Spill - movq %r11, %rax - mulq %r15 - movq %rdx, %rbx - movq %rax, %rcx - movq %r9, %rax - mulq %r15 - movq %rdx, %rsi - movq %rsi, -16(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %r8, %rax - mulq %r8 - movq %rdx, %r13 - movq %rax, %r14 - movq %r8, %rax - mulq %r15 - addq %rax, %rbp - movq %rdx, %r8 - adcq %r12, %r8 - adcq %rsi, %rcx - adcq $0, %rbx - addq %rax, %rbp - movq %rbp, 8(%rdi) - adcq %r14, %r8 - movq -40(%rsp), %rsi # 8-byte Reload - adcq %rsi, %rcx - adcq -32(%rsp), %rbx # 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - addq %rdx, %r8 - adcq %r13, %rcx - movq -24(%rsp), %r15 # 8-byte Reload - adcq %r15, %rbx - adcq -8(%rsp), %rbp # 8-byte Folded Reload - movq %r11, %rax - mulq %r9 - movq %rdx, %r14 - movq %rax, %r11 - movq %r9, %rax - mulq %r9 - movq %rax, %r9 - addq %r12, %r8 - movq %r8, 16(%rdi) - adcq %rsi, %rcx - adcq %rbx, %r9 - adcq %rbp, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -16(%rsp), %rcx # 8-byte Folded Reload - adcq %r15, %r9 - adcq %rdx, %r11 - adcq %r14, %r12 - movq 24(%r10), %rbp - movq %rbp, %rax - mulq 16(%r10) - movq %rdx, %r8 - movq %rax, %r14 - movq %rbp, %rax - mulq 8(%r10) - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq (%r10) - movq %rdx, %r15 - movq %rax, %rsi - movq %rbp, %rax - mulq %rbp - addq %rcx, %rsi - movq %rsi, 24(%rdi) - adcq %r9, %rbx - adcq %r11, %r14 - adcq %r12, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r15, %rbx - movq %rbx, 32(%rdi) - adcq %r13, %r14 - movq %r14, 40(%rdi) - adcq %r8, %rax - movq %rax, 48(%rdi) - adcq %rdx, %rcx - movq %rcx, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L - - .globl mcl_fp_mont4L - .align 16, 0x90 - .type mcl_fp_mont4L,@function -mcl_fp_mont4L: # @mcl_fp_mont4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rdi, -88(%rsp) # 8-byte Spill - movq 24(%rsi), %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, %r9 - movq %rdx, %rbp - movq 16(%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r8 - movq %rdx, %r10 - movq (%rsi), %rbx - movq %rbx, -72(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, %r14 - movq %rax, %rsi - movq %rbx, %rax - mulq %rdi - movq %rax, %r11 - movq %rdx, %r13 - addq %rsi, %r13 - adcq %r8, %r14 - adcq %r9, %r10 - adcq $0, %rbp - movq %rbp, -96(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq (%rcx), %r8 - movq %r8, -32(%rsp) # 8-byte Spill - movq %r11, %rdi - imulq %rax, %rdi - movq 24(%rcx), %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - movq 16(%rcx), %rsi - movq %rsi, -16(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -40(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rdx, %r9 - movq %rax, %r12 - movq %rdi, %rax - mulq %rsi - movq %rdx, %rbp - movq %rax, %rbx - movq %rdi, %rax - mulq %rcx - movq %rdx, %rsi - movq %rax, %r15 - movq %rdi, %rax - mulq %r8 - movq %rdx, %rcx - addq %r15, %rcx - adcq %rbx, %rsi - adcq %r12, %rbp - adcq $0, %r9 - addq %r11, %rax - adcq %r13, %rcx - adcq %r14, %rsi - adcq %r10, %rbp - adcq -96(%rsp), %r9 # 8-byte Folded Reload - sbbq %r13, %r13 - andl $1, %r13d - movq -48(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r11 - movq %rdi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r14 - movq %rdi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r15 - movq %rdi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rdi - addq %r15, %rdi - adcq %r14, %rbx - adcq %r11, %r10 - adcq $0, %r12 - addq %rcx, %r8 - adcq %rsi, %rdi - adcq %rbp, %rbx - adcq %r9, %r10 - adcq %r13, %r12 - sbbq %r15, %r15 - andl $1, %r15d - movq %r8, %rsi - imulq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rbp - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - addq %rbp, %r11 - adcq %r14, %r9 - adcq -96(%rsp), %rcx # 8-byte Folded Reload - adcq $0, %r13 - addq %r8, %rax - adcq %rdi, %r11 - adcq %rbx, %r9 - adcq %r10, %rcx - adcq %r12, %r13 - adcq $0, %r15 - movq -48(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rsi - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rbx - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rdi - movq %rsi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rbp - addq %rdi, %rbp - adcq %rbx, %r14 - adcq -96(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r12 - addq %r11, %r8 - adcq %r9, %rbp - adcq %rcx, %r14 - adcq %r13, %r10 - adcq %r15, %r12 - sbbq %r13, %r13 - movq %r8, %rsi - imulq -24(%rsp), %rsi # 8-byte Folded Reload - andl $1, %r13d - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r9 - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r11 - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r15 - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - addq %r15, %rsi - adcq %r11, %rbx - adcq %r9, %rcx - adcq $0, %rdi - addq %r8, %rax - adcq %rbp, %rsi - adcq %r14, %rbx - adcq %r10, %rcx - adcq %r12, %rdi - adcq $0, %r13 - movq -48(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rbp - movq %rbp, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rbp, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r15 - movq %rbp, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r12 - movq %rbp, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rbp - addq %r12, %rbp - adcq %r15, %r11 - adcq %r14, %r10 - adcq $0, %r8 - addq %rsi, %r9 - adcq %rbx, %rbp - adcq %rcx, %r11 - adcq %rdi, %r10 - adcq %r13, %r8 - sbbq %rsi, %rsi - andl $1, %esi - movq -24(%rsp), %rcx # 8-byte Reload - imulq %r9, %rcx - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -24(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r15 - movq %rcx, %rax - movq -40(%rsp), %r14 # 8-byte Reload - mulq %r14 - movq %rdx, %rdi - movq %rax, %r12 - movq %rcx, %rax - movq -32(%rsp), %rcx # 8-byte Reload - mulq %rcx - addq %r12, %rdx - adcq %r15, %rdi - adcq -24(%rsp), %r13 # 8-byte Folded Reload - adcq $0, %rbx - addq %r9, %rax - adcq %rbp, %rdx - adcq %r11, %rdi - adcq %r10, %r13 - adcq %r8, %rbx - adcq $0, %rsi - movq %rdx, %rax - subq %rcx, %rax - movq %rdi, %rcx - sbbq %r14, %rcx - movq %r13, %r8 - sbbq -16(%rsp), %r8 # 8-byte Folded Reload - movq %rbx, %rbp - sbbq -8(%rsp), %rbp # 8-byte Folded Reload - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rbp - testb %sil, %sil - cmovneq %rdx, %rax - movq -88(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rdi, %rcx - movq %rcx, 8(%rdx) - cmovneq %r13, %r8 - movq %r8, 16(%rdx) - movq %rbp, 24(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end53: - .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L - - .globl mcl_fp_montNF4L - .align 16, 0x90 - .type mcl_fp_montNF4L,@function -mcl_fp_montNF4L: # @mcl_fp_montNF4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rdi, -88(%rsp) # 8-byte Spill - movq 24(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, %r15 - movq %rdx, %r12 - movq 16(%rsi), %rax - movq %rax, -32(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r8 - movq %rdx, %r9 - movq (%rsi), %rbp - movq %rbp, -40(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -48(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, %rbx - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, %r11 - movq %rdx, %rdi - addq %rsi, %rdi - adcq %r8, %rbx - adcq %r15, %r9 - adcq $0, %r12 - movq -8(%rcx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq (%rcx), %r8 - movq %r8, -64(%rsp) # 8-byte Spill - movq %r11, %rsi - imulq %rax, %rsi - movq 24(%rcx), %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - movq 16(%rcx), %rbp - movq %rbp, -72(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -80(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rdx, %r15 - movq %rax, %r13 - movq %rsi, %rax - mulq %rbp - movq %rdx, %r10 - movq %rax, %rbp - movq %rsi, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %rcx - movq %rsi, %rax - mulq %r8 - addq %r11, %rax - adcq %rdi, %rcx - adcq %rbx, %rbp - adcq %r9, %r13 - adcq $0, %r12 - addq %rdx, %rcx - adcq %r14, %rbp - adcq %r10, %r13 - adcq %r15, %r12 - movq -16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r11 - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %r9 - addq %r14, %r9 - adcq %r11, %r8 - adcq %r10, %rsi - adcq $0, %rbx - addq %rcx, %rdi - adcq %rbp, %r9 - adcq %r13, %r8 - adcq %r12, %rsi - adcq $0, %rbx - movq %rdi, %rcx - imulq -8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r12 - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r13 - movq %rcx, %rax - movq -80(%rsp), %r15 # 8-byte Reload - mulq %r15 - movq %rdx, %r14 - movq %rax, %rbp - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - addq %rdi, %rax - adcq %r9, %rbp - adcq %r8, %r13 - adcq %rsi, %r12 - adcq $0, %rbx - addq %rdx, %rbp - adcq %r14, %r13 - adcq %r11, %r12 - adcq %r10, %rbx - movq -16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rdi - addq %r14, %rdi - adcq %r11, %rcx - adcq %r10, %r8 - adcq $0, %rsi - addq %rbp, %r9 - adcq %r13, %rdi - adcq %r12, %rcx - adcq %rbx, %r8 - adcq $0, %rsi - movq %r9, %rbx - imulq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r12 - movq %rbx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r13 - movq %rbx, %rax - mulq %r15 - movq %rdx, %r14 - movq %rax, %rbp - movq %rbx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - addq %r9, %rax - adcq %rdi, %rbp - adcq %rcx, %r13 - adcq %r8, %r12 - adcq $0, %rsi - addq %rdx, %rbp - adcq %r14, %r13 - adcq %r11, %r12 - adcq %r10, %rsi - movq -16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdi - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %rcx - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r14 - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rdi - addq %r14, %rdi - adcq %r11, %r10 - adcq %rcx, %r8 - adcq $0, %rbx - addq %rbp, %r9 - adcq %r13, %rdi - adcq %r12, %r10 - adcq %rsi, %r8 - adcq $0, %rbx - movq -8(%rsp), %rsi # 8-byte Reload - imulq %r9, %rsi - movq %rsi, %rax - movq -56(%rsp), %r12 # 8-byte Reload - mulq %r12 - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %rsi, %rax - movq -72(%rsp), %r14 # 8-byte Reload - mulq %r14 - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rsi, %rax - movq -64(%rsp), %r11 # 8-byte Reload - mulq %r11 - movq %rdx, %r15 - movq %rax, %rcx - movq %rsi, %rax - movq -80(%rsp), %rsi # 8-byte Reload - mulq %rsi - addq %r9, %rcx - adcq %rdi, %rax - adcq %r10, %rbp - adcq %r8, %r13 - adcq $0, %rbx - addq %r15, %rax - adcq %rdx, %rbp - adcq -16(%rsp), %r13 # 8-byte Folded Reload - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rax, %rcx - subq %r11, %rcx - movq %rbp, %rdx - sbbq %rsi, %rdx - movq %r13, %rdi - sbbq %r14, %rdi - movq %rbx, %rsi - sbbq %r12, %rsi - cmovsq %rax, %rcx - movq -88(%rsp), %rax # 8-byte Reload - movq %rcx, (%rax) - cmovsq %rbp, %rdx - movq %rdx, 8(%rax) - cmovsq %r13, %rdi - movq %rdi, 16(%rax) - cmovsq %rbx, %rsi - movq %rsi, 24(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end54: - .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L - - .globl mcl_fp_montRed4L - .align 16, 0x90 - .type mcl_fp_montRed4L,@function -mcl_fp_montRed4L: # @mcl_fp_montRed4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -56(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq (%rcx), %rdi - movq %rdi, -48(%rsp) # 8-byte Spill - movq (%rsi), %r12 - movq %r12, %rbx - imulq %rax, %rbx - movq %rax, %r9 - movq 24(%rcx), %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r11 - movq %rdx, %r8 - movq 16(%rcx), %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rbp - movq %rbp, %r13 - movq %rax, %r14 - movq %rdx, %r10 - movq 8(%rcx), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rcx, %rbp - movq %rdx, %r15 - movq %rax, %rcx - movq %rbx, %rax - mulq %rdi - movq %rdx, %rbx - addq %rcx, %rbx - adcq %r14, %r15 - adcq %r11, %r10 - adcq $0, %r8 - movq 56(%rsi), %rcx - movq 48(%rsi), %rdx - addq %r12, %rax - movq 40(%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r8 - adcq $0, %rax - movq %rax, -64(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, %r12 - adcq $0, %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - sbbq %rdi, %rdi - andl $1, %edi - movq %rbx, %rsi - imulq %r9, %rsi - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq %r13 - movq %rdx, %r14 - movq %rax, %r9 - movq %rsi, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, %rbp - movq %rsi, %rax - movq -48(%rsp), %r13 # 8-byte Reload - mulq %r13 - movq %rdx, %rsi - addq %rbp, %rsi - adcq %r9, %rcx - adcq -72(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r11 - addq %rbx, %rax - adcq %r15, %rsi - adcq %r10, %rcx - adcq %r8, %r14 - adcq -64(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r12 - movq %r12, -64(%rsp) # 8-byte Spill - movq -16(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - adcq $0, %rdi - movq %rsi, %rbx - imulq -40(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - movq -8(%rsp), %r12 # 8-byte Reload - mulq %r12 - movq %rdx, %r8 - movq %rax, -16(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -72(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r9 - movq %rbx, %rax - mulq %r13 - movq %rdx, %rbx - addq %r9, %rbx - adcq -72(%rsp), %r15 # 8-byte Folded Reload - adcq -16(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r8 - addq %rsi, %rax - adcq %rcx, %rbx - adcq %r14, %r15 - adcq %r11, %r10 - adcq -64(%rsp), %r8 # 8-byte Folded Reload - adcq $0, %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - adcq $0, %rdi - movq -40(%rsp), %rcx # 8-byte Reload - imulq %rbx, %rcx - movq %rcx, %rax - mulq %r12 - movq %rdx, %r13 - movq %rax, -40(%rsp) # 8-byte Spill - movq %rcx, %rax - movq -32(%rsp), %r14 # 8-byte Reload - mulq %r14 - movq %rdx, %r11 - movq %rax, %r12 - movq %rcx, %rax - movq %rcx, %r9 - movq -24(%rsp), %rsi # 8-byte Reload - mulq %rsi - movq %rdx, %rbp - movq %rax, %rcx - movq %r9, %rax - movq -48(%rsp), %r9 # 8-byte Reload - mulq %r9 - addq %rcx, %rdx - adcq %r12, %rbp - adcq -40(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r13 - addq %rbx, %rax - adcq %r15, %rdx - adcq %r10, %rbp - adcq %r8, %r11 - adcq -16(%rsp), %r13 # 8-byte Folded Reload - adcq $0, %rdi - movq %rdx, %rax - subq %r9, %rax - movq %rbp, %rcx - sbbq %rsi, %rcx - movq %r11, %rbx - sbbq %r14, %rbx - movq %r13, %rsi - sbbq -8(%rsp), %rsi # 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %r13, %rsi - testb %dil, %dil - cmovneq %rdx, %rax - movq -56(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rbp, %rcx - movq %rcx, 8(%rdx) - cmovneq %r11, %rbx - movq %rbx, 16(%rdx) - movq %rsi, 24(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end55: - .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L - - .globl mcl_fp_addPre4L - .align 16, 0x90 - .type mcl_fp_addPre4L,@function -mcl_fp_addPre4L: # @mcl_fp_addPre4L -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end56: - .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L - - .globl mcl_fp_subPre4L - .align 16, 0x90 - .type mcl_fp_subPre4L,@function -mcl_fp_subPre4L: # @mcl_fp_subPre4L -# BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r8, %r9 - movq %r9, 24(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end57: - .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L - - .globl mcl_fp_shr1_4L - .align 16, 0x90 - .type mcl_fp_shr1_4L,@function -mcl_fp_shr1_4L: # @mcl_fp_shr1_4L -# BB#0: - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrq %rax - movq %rax, 24(%rdi) - retq -.Lfunc_end58: - .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L - - .globl mcl_fp_add4L - .align 16, 0x90 - .type mcl_fp_add4L,@function -mcl_fp_add4L: # @mcl_fp_add4L -# BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r9 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - adcq %r10, %r8 - movq %r8, 24(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r9 - sbbq 24(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB59_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - movq %r8, 24(%rdi) -.LBB59_2: # %carry - retq -.Lfunc_end59: - .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L - - .globl mcl_fp_addNF4L - .align 16, 0x90 - .type mcl_fp_addNF4L,@function -mcl_fp_addNF4L: # @mcl_fp_addNF4L -# BB#0: - pushq %rbx - movq 24(%rdx), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %r11 - movq 8(%rdx), %r10 - addq (%rsi), %r11 - adcq 8(%rsi), %r10 - adcq 16(%rsi), %r9 - adcq 24(%rsi), %r8 - movq %r11, %rsi - subq (%rcx), %rsi - movq %r10, %rdx - sbbq 8(%rcx), %rdx - movq %r9, %rax - sbbq 16(%rcx), %rax - movq %r8, %rbx - sbbq 24(%rcx), %rbx - testq %rbx, %rbx - cmovsq %r11, %rsi - movq %rsi, (%rdi) - cmovsq %r10, %rdx - movq %rdx, 8(%rdi) - cmovsq %r9, %rax - movq %rax, 16(%rdi) - cmovsq %r8, %rbx - movq %rbx, 24(%rdi) - popq %rbx - retq -.Lfunc_end60: - .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L - - .globl mcl_fp_sub4L - .align 16, 0x90 - .type mcl_fp_sub4L,@function -mcl_fp_sub4L: # @mcl_fp_sub4L -# BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r9 - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r9, 16(%rdi) - sbbq %r10, %r8 - movq %r8, 24(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne .LBB61_2 -# BB#1: # %nocarry - retq -.LBB61_2: # %carry - movq 24(%rcx), %r10 - movq 8(%rcx), %rsi - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r10 - movq %r10, 24(%rdi) - retq -.Lfunc_end61: - .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L - - .globl mcl_fp_subNF4L - .align 16, 0x90 - .type mcl_fp_subNF4L,@function -mcl_fp_subNF4L: # @mcl_fp_subNF4L -# BB#0: - pushq %rbx - movq 24(%rsi), %r11 - movq 16(%rsi), %r8 - movq (%rsi), %r9 - movq 8(%rsi), %r10 - subq (%rdx), %r9 - sbbq 8(%rdx), %r10 - sbbq 16(%rdx), %r8 - sbbq 24(%rdx), %r11 - movq %r11, %rdx - sarq $63, %rdx - movq 24(%rcx), %rsi - andq %rdx, %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - movq 8(%rcx), %rbx - andq %rdx, %rbx - andq (%rcx), %rdx - addq %r9, %rdx - movq %rdx, (%rdi) - adcq %r10, %rbx - movq %rbx, 8(%rdi) - adcq %r8, %rax - movq %rax, 16(%rdi) - adcq %r11, %rsi - movq %rsi, 24(%rdi) - popq %rbx - retq -.Lfunc_end62: - .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L - - .globl mcl_fpDbl_add4L - .align 16, 0x90 - .type mcl_fpDbl_add4L,@function -mcl_fpDbl_add4L: # @mcl_fpDbl_add4L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 48(%rsi), %r12 - movq 40(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rdx), %r15 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rbp - movq 32(%rsi), %rsi - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r15, %rbp - movq %rbp, 24(%rdi) - adcq %r14, %rsi - adcq %r11, %r13 - adcq %r10, %r12 - adcq %r9, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %rsi, %rdx - subq (%rcx), %rdx - movq %r13, %rbp - sbbq 8(%rcx), %rbp - movq %r12, %rbx - sbbq 16(%rcx), %rbx - movq %r8, %r9 - sbbq 24(%rcx), %r9 - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - movq %rdx, 32(%rdi) - testb %al, %al - cmovneq %r13, %rbp - movq %rbp, 40(%rdi) - cmovneq %r12, %rbx - movq %rbx, 48(%rdi) - cmovneq %r8, %r9 - movq %r9, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end63: - .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L - - .globl mcl_fpDbl_sub4L - .align 16, 0x90 - .type mcl_fpDbl_sub4L,@function -mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 24(%rdx), %r11 - movq (%rsi), %rbx - xorl %eax, %eax - subq (%rdx), %rbx - movq %rbx, (%rdi) - movq 8(%rsi), %rbx - sbbq 8(%rdx), %rbx - movq %rbx, 8(%rdi) - movq 16(%rsi), %rbx - sbbq 16(%rdx), %rbx - movq %rbx, 16(%rdi) - movq 24(%rsi), %rbx - sbbq %r11, %rbx - movq 40(%rdx), %r11 - movq 32(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 32(%rsi), %r12 - sbbq %rdx, %r12 - movq 48(%rsi), %r14 - movq 40(%rsi), %r15 - sbbq %r11, %r15 - sbbq %r10, %r14 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 24(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 8(%rcx), %rax - addq %r12, %rsi - movq %rsi, 32(%rdi) - adcq %r15, %rax - movq %rax, 40(%rdi) - adcq %r14, %rdx - movq %rdx, 48(%rdi) - adcq %r8, %rbx - movq %rbx, 56(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end64: - .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L - - .globl mcl_fp_mulUnitPre5L - .align 16, 0x90 - .type mcl_fp_mulUnitPre5L,@function -mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %r12 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r12, %rdx - movq %rdx, 8(%rdi) - adcq %r14, %rbx - movq %rbx, 16(%rdi) - adcq %r11, %r15 - movq %r15, 24(%rdi) - adcq %r9, %r10 - movq %r10, 32(%rdi) - adcq $0, %r8 - movq %r8, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end65: - .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L - - .globl mcl_fpDbl_mulPre5L - .align 16, 0x90 - .type mcl_fpDbl_mulPre5L,@function -mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rsi, %r9 - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%r9), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 8(%r9), %rbx - movq %rbx, -48(%rsp) # 8-byte Spill - movq (%rdx), %rbp - movq %rdx, %r8 - mulq %rbp - movq %rdx, -32(%rsp) # 8-byte Spill - movq 16(%r9), %r13 - movq 24(%r9), %r14 - movq 32(%r9), %r15 - movq %rax, (%rdi) - movq %r15, %rax - mulq %rbp - movq %rdx, %r10 - movq %rax, -40(%rsp) # 8-byte Spill - movq %r14, %rax - mulq %rbp - movq %rdx, %r12 - movq %rax, %r11 - movq %r13, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, %rsi - movq %rbx, %rax - mulq %rbp - movq %rdx, %rbp - movq %rax, %rdi - addq -32(%rsp), %rdi # 8-byte Folded Reload - adcq %rsi, %rbp - adcq %r11, %rcx - adcq -40(%rsp), %r12 # 8-byte Folded Reload - adcq $0, %r10 - movq 8(%r8), %r11 - movq %r15, %rax - mulq %r11 - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r14, %rax - mulq %r11 - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %r13, %rax - mulq %r11 - movq %rdx, %r8 - movq %rax, %r13 - movq -48(%rsp), %rax # 8-byte Reload - mulq %r11 - movq %rdx, %r14 - movq %rax, %rbx - movq -24(%rsp), %rax # 8-byte Reload - mulq %r11 - addq %rdi, %rax - movq -8(%rsp), %rdi # 8-byte Reload - movq %rax, 8(%rdi) - adcq %rbp, %rbx - adcq %rcx, %r13 - adcq %r12, %r15 - adcq %r10, %rsi - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %rbx - adcq %r14, %r13 - adcq %r8, %r15 - adcq -40(%rsp), %rsi # 8-byte Folded Reload - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq 32(%r9), %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq -16(%rsp), %rdi # 8-byte Reload - movq 16(%rdi), %r12 - mulq %r12 - movq %rax, %r11 - movq %rdx, -24(%rsp) # 8-byte Spill - movq 24(%r9), %rax - movq %rax, -72(%rsp) # 8-byte Spill - mulq %r12 - movq %rax, %r10 - movq %rdx, -32(%rsp) # 8-byte Spill - movq 16(%r9), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulq %r12 - movq %rax, %r8 - movq %rdx, -48(%rsp) # 8-byte Spill - movq (%r9), %r14 - movq 8(%r9), %rax - movq %rax, -56(%rsp) # 8-byte Spill - mulq %r12 - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r14, %rax - mulq %r12 - movq %rdx, -88(%rsp) # 8-byte Spill - addq %rbx, %rax - movq -8(%rsp), %rbx # 8-byte Reload - movq %rax, 16(%rbx) - adcq %r13, %rbp - adcq %r15, %r8 - adcq %rsi, %r10 - adcq %rcx, %r11 - sbbq %rcx, %rcx - movq 24(%rdi), %rsi - movq -40(%rsp), %rax # 8-byte Reload - mulq %rsi - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r13 - movq -56(%rsp), %rax # 8-byte Reload - mulq %rsi - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %r14, %rax - mulq %rsi - movq %rdx, %r15 - movq %rax, %rdi - movq -72(%rsp), %rax # 8-byte Reload - mulq %rsi - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r14 - movq -80(%rsp), %rax # 8-byte Reload - mulq %rsi - andl $1, %ecx - addq -88(%rsp), %rbp # 8-byte Folded Reload - adcq -64(%rsp), %r8 # 8-byte Folded Reload - adcq -48(%rsp), %r10 # 8-byte Folded Reload - adcq -32(%rsp), %r11 # 8-byte Folded Reload - adcq -24(%rsp), %rcx # 8-byte Folded Reload - addq %rdi, %rbp - movq %rbp, 24(%rbx) - adcq %r12, %r8 - adcq %rax, %r10 - adcq %r14, %r11 - adcq %r13, %rcx - sbbq %rsi, %rsi - andl $1, %esi - addq %r15, %r8 - adcq -56(%rsp), %r10 # 8-byte Folded Reload - adcq %rdx, %r11 - adcq -72(%rsp), %rcx # 8-byte Folded Reload - adcq -40(%rsp), %rsi # 8-byte Folded Reload - movq -16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdi - movq %rdi, %rax - mulq 32(%r9) - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rdi, %rax - mulq 24(%r9) - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %rdi, %rax - mulq 16(%r9) - movq %rdx, %r14 - movq %rax, %rbx - movq %rdi, %rax - mulq 8(%r9) - movq %rdx, %r12 - movq %rax, %rbp - movq %rdi, %rax - mulq (%r9) - addq %r8, %rax - movq -8(%rsp), %rdi # 8-byte Reload - movq %rax, 32(%rdi) - adcq %r10, %rbp - adcq %r11, %rbx - adcq %rcx, %r13 - adcq %rsi, %r15 - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rbp - movq %rbp, 40(%rdi) - adcq %r12, %rbx - movq %rbx, 48(%rdi) - adcq %r14, %r13 - movq %r13, 56(%rdi) - adcq -24(%rsp), %r15 # 8-byte Folded Reload - movq %r15, 64(%rdi) - adcq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end66: - .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L - - .globl mcl_fpDbl_sqrPre5L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre5L,@function -mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq 32(%rsi), %r11 - movq (%rsi), %r13 - movq 8(%rsi), %rbx - movq %r11, %rax - mulq %rbx - movq %rax, -40(%rsp) # 8-byte Spill - movq %rdx, -16(%rsp) # 8-byte Spill - movq 24(%rsi), %rbp - movq %rbp, %rax - mulq %rbx - movq %rax, -48(%rsp) # 8-byte Spill - movq %rdx, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %rcx - movq %rcx, %rax - mulq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - movq %r11, %rax - mulq %r13 - movq %rdx, %r8 - movq %rax, -72(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %r13 - movq %rdx, %r9 - movq %rax, %rbp - movq %rcx, %rax - mulq %r13 - movq %rdx, %r10 - movq %rax, %r12 - movq %rbx, %rax - mulq %rbx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rbx, %rax - mulq %r13 - movq %rdx, %rbx - movq %rax, %r14 - movq %r13, %rax - mulq %r13 - movq %rax, (%rdi) - addq %r14, %rdx - adcq %rbx, %r12 - adcq %rbp, %r10 - adcq -72(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r8 - addq %r14, %rdx - movq %rdx, 8(%rdi) - adcq %r15, %r12 - adcq -56(%rsp), %r10 # 8-byte Folded Reload - adcq -48(%rsp), %r9 # 8-byte Folded Reload - adcq -40(%rsp), %r8 # 8-byte Folded Reload - sbbq %rdi, %rdi - andl $1, %edi - addq %rbx, %r12 - adcq -64(%rsp), %r10 # 8-byte Folded Reload - adcq -32(%rsp), %r9 # 8-byte Folded Reload - adcq -24(%rsp), %r8 # 8-byte Folded Reload - adcq -16(%rsp), %rdi # 8-byte Folded Reload - movq %r11, %rax - mulq %rcx - movq %rax, %r11 - movq %rdx, -16(%rsp) # 8-byte Spill - movq 24(%rsi), %rbx - movq %rbx, %rax - mulq %rcx - movq %rax, %r14 - movq %rdx, -24(%rsp) # 8-byte Spill - movq (%rsi), %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulq %rcx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rbp, %rax - mulq %rcx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq %rcx - movq %rax, %r13 - addq %r12, %rbp - movq -8(%rsp), %rax # 8-byte Reload - movq %rbp, 16(%rax) - adcq %r10, %r15 - adcq %r9, %r13 - adcq %r8, %r14 - adcq %rdi, %r11 - sbbq %r10, %r10 - andl $1, %r10d - addq -56(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %r13 # 8-byte Folded Reload - adcq %rdx, %r14 - adcq -24(%rsp), %r11 # 8-byte Folded Reload - adcq -16(%rsp), %r10 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r8 - movq -32(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rax, %rdi - movq %rdx, -40(%rsp) # 8-byte Spill - movq 16(%rsi), %rbp - movq %rbp, -16(%rsp) # 8-byte Spill - movq 32(%rsi), %rcx - movq %rcx, %rax - mulq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %rbp, %rax - mulq %rbx - movq %rdx, %rbp - movq %rax, %r12 - movq %rbx, %rax - mulq %rbx - movq %rax, %rbx - addq %r15, %rdi - movq -8(%rsp), %r15 # 8-byte Reload - movq %rdi, 24(%r15) - adcq %r13, %r8 - adcq %r14, %r12 - adcq %r11, %rbx - adcq %r10, %r9 - sbbq %r10, %r10 - andl $1, %r10d - addq -40(%rsp), %r8 # 8-byte Folded Reload - adcq -24(%rsp), %r12 # 8-byte Folded Reload - adcq %rbp, %rbx - adcq %rdx, %r9 - adcq -32(%rsp), %r10 # 8-byte Folded Reload - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r14 - movq %rax, %rdi - movq %rcx, %rax - mulq (%rsi) - movq %rdx, %r13 - movq %rax, %rsi - movq %rcx, %rax - mulq %rcx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r11 - movq -16(%rsp), %rax # 8-byte Reload - mulq %rcx - addq %r8, %rsi - movq %rsi, 32(%r15) - adcq %r12, %rdi - adcq %rbx, %rax - adcq %r9, %rbp - adcq %r10, %r11 - sbbq %rcx, %rcx - andl $1, %ecx - addq %r13, %rdi - movq %rdi, 40(%r15) - adcq %r14, %rax - movq %rax, 48(%r15) - adcq %rdx, %rbp - movq %rbp, 56(%r15) - adcq -24(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 64(%r15) - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 72(%r15) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L - - .globl mcl_fp_mont5L - .align 16, 0x90 - .type mcl_fp_mont5L,@function -mcl_fp_mont5L: # @mcl_fp_mont5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - pushq %rax - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rdi, -104(%rsp) # 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, %r8 - movq %rdx, %r14 - movq 24(%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r9 - movq %rdx, %r12 - movq 16(%rsi), %rax - movq %rax, -72(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r10 - movq %rdx, %rbp - movq (%rsi), %rbx - movq %rbx, -80(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -88(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, %r11 - movq %rax, %rsi - movq %rbx, %rax - mulq %rdi - movq %rax, -128(%rsp) # 8-byte Spill - movq %rdx, %r15 - addq %rsi, %r15 - adcq %r10, %r11 - adcq %r9, %rbp - movq %rbp, -96(%rsp) # 8-byte Spill - adcq %r8, %r12 - movq %r12, -112(%rsp) # 8-byte Spill - adcq $0, %r14 - movq %r14, -120(%rsp) # 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %rbp - imulq %rdx, %rbp - movq (%rcx), %r9 - movq %r9, -32(%rsp) # 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, (%rsp) # 8-byte Spill - movq 24(%rcx), %rsi - movq %rsi, -8(%rsp) # 8-byte Spill - movq 16(%rcx), %rbx - movq %rbx, -16(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -24(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rdx, %r14 - movq %rax, %r13 - movq %rbp, %rax - mulq %rsi - movq %rdx, %rdi - movq %rax, %r10 - movq %rbp, %rax - mulq %rbx - movq %rdx, %rbx - movq %rax, %r8 - movq %rbp, %rax - mulq %rcx - movq %rdx, %rsi - movq %rax, %r12 - movq %rbp, %rax - mulq %r9 - movq %rdx, %rbp - addq %r12, %rbp - adcq %r8, %rsi - adcq %r10, %rbx - adcq %r13, %rdi - adcq $0, %r14 - addq -128(%rsp), %rax # 8-byte Folded Reload - adcq %r15, %rbp - adcq %r11, %rsi - adcq -96(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %rdi # 8-byte Folded Reload - adcq -120(%rsp), %r14 # 8-byte Folded Reload - sbbq %r9, %r9 - andl $1, %r9d - movq -48(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rcx - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r10 - movq %rcx, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r15 - movq %rdx, %rcx - addq %r10, %rcx - adcq -120(%rsp), %r8 # 8-byte Folded Reload - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -96(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r13 - addq %rbp, %r15 - adcq %rsi, %rcx - adcq %rbx, %r8 - adcq %rdi, %r12 - adcq %r14, %r11 - adcq %r9, %r13 - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %r15, %rsi - imulq -40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rdi - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - addq %rdi, %rbx - adcq -128(%rsp), %r10 # 8-byte Folded Reload - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %rbp # 8-byte Folded Reload - adcq $0, %r14 - addq %r15, %rax - adcq %rcx, %rbx - adcq %r8, %r10 - adcq %r12, %r9 - adcq %r11, %rbp - adcq %r13, %r14 - adcq $0, -96(%rsp) # 8-byte Folded Spill - movq -48(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rcx - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -128(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r8 - movq %rcx, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %r15 - addq %r8, %r15 - adcq -128(%rsp), %rdi # 8-byte Folded Reload - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r13 - addq %rbx, %r12 - adcq %r10, %r15 - adcq %r9, %rdi - adcq %rbp, %rsi - adcq %r14, %r11 - adcq -96(%rsp), %r13 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %r12, %rbp - imulq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r10 - movq %rbp, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r14 - movq %rbp, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - addq %r14, %rbp - adcq %r10, %rbx - adcq -120(%rsp), %rcx # 8-byte Folded Reload - adcq -112(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r8 - addq %r12, %rax - adcq %r15, %rbp - adcq %rdi, %rbx - adcq %rsi, %rcx - adcq %r11, %r9 - adcq %r13, %r8 - adcq $0, -96(%rsp) # 8-byte Folded Spill - movq -48(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rsi - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r15 - movq %rsi, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %rsi - addq %r12, %rsi - adcq %r15, %rdi - adcq -120(%rsp), %r11 # 8-byte Folded Reload - adcq -112(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r13 - addq %rbp, %r14 - adcq %rbx, %rsi - adcq %rcx, %rdi - adcq %r9, %r11 - adcq %r8, %r10 - adcq -96(%rsp), %r13 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) # 8-byte Spill - movq %r14, %rbp - imulq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r8 - movq %rbp, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r12 - movq %rbp, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - addq %r12, %rbp - adcq %r8, %rbx - adcq -120(%rsp), %rcx # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - adcq $0, %r9 - addq %r14, %rax - adcq %rsi, %rbp - adcq %rdi, %rbx - adcq %r11, %rcx - adcq %r10, %r15 - adcq %r13, %r9 - movq -96(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - movq -48(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rsi - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -64(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rdi - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r8 - addq %rdi, %r8 - adcq -72(%rsp), %r12 # 8-byte Folded Reload - adcq -64(%rsp), %r11 # 8-byte Folded Reload - adcq -56(%rsp), %r13 # 8-byte Folded Reload - movq -48(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rbp, %r10 - adcq %rbx, %r8 - adcq %rcx, %r12 - adcq %r15, %r11 - adcq %r9, %r13 - adcq %r14, %rax - movq %rax, -48(%rsp) # 8-byte Spill - sbbq %rcx, %rcx - movq -40(%rsp), %rsi # 8-byte Reload - imulq %r10, %rsi - movq %rsi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -40(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -56(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r15 - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r9 - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - addq %r9, %rdx - adcq %r15, %rdi - adcq -56(%rsp), %rbp # 8-byte Folded Reload - adcq -40(%rsp), %rbx # 8-byte Folded Reload - adcq $0, %r14 - andl $1, %ecx - addq %r10, %rax - adcq %r8, %rdx - adcq %r12, %rdi - adcq %r11, %rbp - adcq %r13, %rbx - adcq -48(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %rcx - movq %rdx, %rax - subq -32(%rsp), %rax # 8-byte Folded Reload - movq %rdi, %r8 - sbbq -24(%rsp), %r8 # 8-byte Folded Reload - movq %rbp, %r9 - sbbq -16(%rsp), %r9 # 8-byte Folded Reload - movq %rbx, %r10 - sbbq -8(%rsp), %r10 # 8-byte Folded Reload - movq %r14, %r11 - sbbq (%rsp), %r11 # 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rbx, %r10 - testb %cl, %cl - cmovneq %rdx, %rax - movq -104(%rsp), %rcx # 8-byte Reload - movq %rax, (%rcx) - cmovneq %rdi, %r8 - movq %r8, 8(%rcx) - cmovneq %rbp, %r9 - movq %r9, 16(%rcx) - movq %r10, 24(%rcx) - cmovneq %r14, %r11 - movq %r11, 32(%rcx) - addq $8, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end68: - .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L - - .globl mcl_fp_montNF5L - .align 16, 0x90 - .type mcl_fp_montNF5L,@function -mcl_fp_montNF5L: # @mcl_fp_montNF5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rdi, -104(%rsp) # 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -48(%rsp) # 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rax, %r15 - movq %rdx, %r10 - movq 24(%rsi), %rax - movq %rax, -56(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, %r13 - movq %rdx, %r14 - movq 16(%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, %r8 - movq %rdx, %r9 - movq (%rsi), %rbp - movq %rbp, -80(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -88(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, %r11 - movq %rax, %rdi - movq %rbp, %rax - mulq %rbx - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdx, %r12 - addq %rdi, %r12 - adcq %r8, %r11 - adcq %r13, %r9 - adcq %r15, %r14 - adcq $0, %r10 - movq -8(%rcx), %rdx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %rsi - imulq %rdx, %rsi - movq (%rcx), %r8 - movq %r8, -96(%rsp) # 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - movq 24(%rcx), %rdi - movq %rdi, -16(%rsp) # 8-byte Spill - movq 16(%rcx), %rbx - movq %rbx, -24(%rsp) # 8-byte Spill - movq 8(%rcx), %rbp - movq %rbp, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %rcx - movq %rsi, %rax - mulq %rdi - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rsi, %rax - mulq %rbx - movq %rdx, %r15 - movq %rax, %rbx - movq %rsi, %rax - mulq %rbp - movq %rdx, %r13 - movq %rax, %rbp - movq %rsi, %rax - mulq %r8 - addq -112(%rsp), %rax # 8-byte Folded Reload - adcq %r12, %rbp - adcq %r11, %rbx - adcq %r9, %rdi - adcq %r14, %rcx - adcq $0, %r10 - addq %rdx, %rbp - adcq %r13, %rbx - adcq %r15, %rdi - adcq -128(%rsp), %rcx # 8-byte Folded Reload - adcq -120(%rsp), %r10 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rsi - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r8 - movq %rsi, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r14 - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %rsi - movq %rdx, %r12 - addq %r14, %r12 - adcq %r8, %r11 - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - adcq $0, %r13 - addq %rbp, %rsi - adcq %rbx, %r12 - adcq %rdi, %r11 - adcq %rcx, %r9 - adcq %r10, %r15 - adcq $0, %r13 - movq %rsi, %rdi - imulq -32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r8 - movq %rdi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rdi, %rax - mulq -96(%rsp) # 8-byte Folded Reload - addq %rsi, %rax - adcq %r12, %r10 - adcq %r11, %r8 - adcq %r9, %r14 - adcq %r15, %rbp - adcq $0, %r13 - addq %rdx, %r10 - adcq %rbx, %r8 - adcq %rcx, %r14 - adcq -120(%rsp), %rbp # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rsi - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rbx - movq %rsi, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %rsi - addq %r12, %rsi - adcq %rbx, %rcx - adcq -120(%rsp), %rdi # 8-byte Folded Reload - adcq -112(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r15 - addq %r10, %r11 - adcq %r8, %rsi - adcq %r14, %rcx - adcq %rbp, %rdi - adcq %r13, %r9 - adcq $0, %r15 - movq %r11, %rbx - imulq -32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rbx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r10 - movq %rbx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbp - movq %rbx, %rax - mulq -96(%rsp) # 8-byte Folded Reload - addq %r11, %rax - adcq %rsi, %rbp - adcq %rcx, %r10 - adcq %rdi, %r8 - adcq %r9, %r13 - adcq $0, %r15 - addq %rdx, %rbp - adcq %r12, %r10 - adcq %r14, %r8 - adcq -120(%rsp), %r13 # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rsi - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rbx - movq %rsi, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %rsi - addq %r12, %rsi - adcq %rbx, %rcx - adcq -120(%rsp), %rdi # 8-byte Folded Reload - adcq -112(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r11 - addq %rbp, %r14 - adcq %r10, %rsi - adcq %r8, %rcx - adcq %r13, %rdi - adcq %r15, %r9 - adcq $0, %r11 - movq %r14, %rbx - imulq -32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rbx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r10 - movq %rbx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbp - movq %rbx, %rax - mulq -96(%rsp) # 8-byte Folded Reload - addq %r14, %rax - adcq %rsi, %rbp - adcq %rcx, %r10 - adcq %rdi, %r8 - adcq %r9, %r13 - adcq $0, %r11 - addq %rdx, %rbp - adcq %r12, %r10 - adcq %r15, %r8 - adcq -120(%rsp), %r13 # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rcx - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -40(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -48(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -56(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rsi - movq %rcx, %rax - mulq -80(%rsp) # 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %rdi - addq %rsi, %rdi - adcq -56(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %r14 # 8-byte Folded Reload - adcq -40(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %rbx - addq %rbp, %r12 - adcq %r10, %rdi - adcq %r8, %r15 - adcq %r13, %r14 - adcq %r11, %r9 - adcq $0, %rbx - movq -32(%rsp), %r8 # 8-byte Reload - imulq %r12, %r8 - movq %r8, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %rcx - movq %r8, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r8, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r8, %rax - movq %r8, %r13 - movq -96(%rsp), %r10 # 8-byte Reload - mulq %r10 - movq %rdx, %r11 - movq %rax, %r8 - movq %r13, %rax - movq -72(%rsp), %r13 # 8-byte Reload - mulq %r13 - addq %r12, %r8 - adcq %rdi, %rax - adcq %r15, %rsi - adcq %r14, %rbp - adcq %r9, %rcx - adcq $0, %rbx - addq %r11, %rax - adcq %rdx, %rsi - adcq -48(%rsp), %rbp # 8-byte Folded Reload - adcq -40(%rsp), %rcx # 8-byte Folded Reload - adcq -32(%rsp), %rbx # 8-byte Folded Reload - movq %rax, %r11 - subq %r10, %r11 - movq %rsi, %r10 - sbbq %r13, %r10 - movq %rbp, %r8 - sbbq -24(%rsp), %r8 # 8-byte Folded Reload - movq %rcx, %r9 - sbbq -16(%rsp), %r9 # 8-byte Folded Reload - movq %rbx, %rdx - sbbq -8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, %rdi - sarq $63, %rdi - cmovsq %rax, %r11 - movq -104(%rsp), %rax # 8-byte Reload - movq %r11, (%rax) - cmovsq %rsi, %r10 - movq %r10, 8(%rax) - cmovsq %rbp, %r8 - movq %r8, 16(%rax) - cmovsq %rcx, %r9 - movq %r9, 24(%rax) - cmovsq %rbx, %rdx - movq %rdx, 32(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end69: - .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L - - .globl mcl_fp_montRed5L - .align 16, 0x90 - .type mcl_fp_montRed5L,@function -mcl_fp_montRed5L: # @mcl_fp_montRed5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -80(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq (%rcx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - movq (%rsi), %r9 - movq %r9, %rbp - imulq %rax, %rbp - movq 32(%rcx), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r10 - movq %rdx, %r13 - movq 24(%rcx), %rdx - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, %r11 - movq 16(%rcx), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rdx, %r15 - movq %rax, %r12 - movq %rbp, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %rbx - movq %rbp, %rax - mulq %rdi - movq %rdx, %rcx - addq %rbx, %rcx - adcq %r12, %r8 - adcq %r14, %r15 - adcq %r10, %r11 - adcq $0, %r13 - addq %r9, %rax - movq 72(%rsi), %rax - movq 64(%rsi), %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r8 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r13 - movq %r13, -88(%rsp) # 8-byte Spill - movq 56(%rsi), %rdi - movq 48(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -96(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -72(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -40(%rsp) # 8-byte Spill - sbbq %rdi, %rdi - andl $1, %edi - movq %rcx, %rsi - movq -64(%rsp), %r9 # 8-byte Reload - imulq %r9, %rsi - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbx - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbp - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - addq %rbp, %rsi - adcq %rbx, %r13 - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %r10 - addq %rcx, %rax - adcq %r8, %rsi - adcq %r15, %r13 - adcq %r11, %r12 - adcq -88(%rsp), %r14 # 8-byte Folded Reload - adcq -96(%rsp), %r10 # 8-byte Folded Reload - adcq $0, -72(%rsp) # 8-byte Folded Spill - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, %rdi - movq %rsi, %rcx - imulq %r9, %rcx - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -88(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - addq %r8, %rbp - adcq -104(%rsp), %rbx # 8-byte Folded Reload - adcq -96(%rsp), %r15 # 8-byte Folded Reload - adcq -88(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r9 - addq %rsi, %rax - adcq %r13, %rbp - adcq %r12, %rbx - adcq %r14, %r15 - adcq %r10, %r11 - adcq -72(%rsp), %r9 # 8-byte Folded Reload - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, %rdi - movq %rbp, %rcx - imulq -64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %rax - movq -48(%rsp), %rsi # 8-byte Reload - mulq %rsi - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r10 - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r8 - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - addq %r8, %rcx - adcq %r10, %r13 - adcq -96(%rsp), %r12 # 8-byte Folded Reload - adcq -88(%rsp), %r14 # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rbp, %rax - adcq %rbx, %rcx - adcq %r15, %r13 - adcq %r11, %r12 - adcq %r9, %r14 - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, %rdi - movq -64(%rsp), %rbx # 8-byte Reload - imulq %rcx, %rbx - movq %rbx, %rax - mulq %rsi - movq %rdx, %rsi - movq %rax, -56(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -64(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r15 - movq %rbx, %rax - movq %rbx, %r10 - movq -32(%rsp), %r11 # 8-byte Reload - mulq %r11 - movq %rdx, %rbx - movq %rax, %r8 - movq %r10, %rax - movq -24(%rsp), %r10 # 8-byte Reload - mulq %r10 - addq %r8, %rdx - adcq %r15, %rbx - adcq -64(%rsp), %rbp # 8-byte Folded Reload - adcq -56(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %rsi - addq %rcx, %rax - adcq %r13, %rdx - adcq %r12, %rbx - adcq %r14, %rbp - adcq -72(%rsp), %r9 # 8-byte Folded Reload - adcq -40(%rsp), %rsi # 8-byte Folded Reload - adcq $0, %rdi - movq %rdx, %rax - subq %r10, %rax - movq %rbx, %rcx - sbbq %r11, %rcx - movq %rbp, %r8 - sbbq -16(%rsp), %r8 # 8-byte Folded Reload - movq %r9, %r10 - sbbq -8(%rsp), %r10 # 8-byte Folded Reload - movq %rsi, %r11 - sbbq -48(%rsp), %r11 # 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %rsi, %r11 - testb %dil, %dil - cmovneq %rdx, %rax - movq -80(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rbx, %rcx - movq %rcx, 8(%rdx) - cmovneq %rbp, %r8 - movq %r8, 16(%rdx) - cmovneq %r9, %r10 - movq %r10, 24(%rdx) - movq %r11, 32(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end70: - .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L - - .globl mcl_fp_addPre5L - .align 16, 0x90 - .type mcl_fp_addPre5L,@function -mcl_fp_addPre5L: # @mcl_fp_addPre5L -# BB#0: - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq 16(%rdx), %rcx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rcx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rcx, 16(%rdi) - adcq %r9, %r11 - movq %r11, 24(%rdi) - adcq %r8, %r10 - movq %r10, 32(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq -.Lfunc_end71: - .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L - - .globl mcl_fp_subPre5L - .align 16, 0x90 - .type mcl_fp_subPre5L,@function -mcl_fp_subPre5L: # @mcl_fp_subPre5L -# BB#0: - pushq %rbx - movq 32(%rsi), %r10 - movq 24(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r8, %r11 - movq %r11, 24(%rdi) - sbbq %r9, %r10 - movq %r10, 32(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - retq -.Lfunc_end72: - .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L - - .globl mcl_fp_shr1_5L - .align 16, 0x90 - .type mcl_fp_shr1_5L,@function -mcl_fp_shr1_5L: # @mcl_fp_shr1_5L -# BB#0: - movq 32(%rsi), %r8 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r8, %rcx - movq %rcx, 24(%rdi) - shrq %r8 - movq %r8, 32(%rdi) - retq -.Lfunc_end73: - .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L - - .globl mcl_fp_add5L - .align 16, 0x90 - .type mcl_fp_add5L,@function -mcl_fp_add5L: # @mcl_fp_add5L -# BB#0: - pushq %rbx - movq 32(%rdx), %r11 - movq 24(%rdx), %rbx - movq 24(%rsi), %r9 - movq 32(%rsi), %r8 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %rbx, %r9 - movq %r9, 24(%rdi) - adcq %r11, %r8 - movq %r8, 32(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %r9 - sbbq 32(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB74_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %r9, 24(%rdi) - movq %r8, 32(%rdi) -.LBB74_2: # %carry - popq %rbx - retq -.Lfunc_end74: - .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L - - .globl mcl_fp_addNF5L - .align 16, 0x90 - .type mcl_fp_addNF5L,@function -mcl_fp_addNF5L: # @mcl_fp_addNF5L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %r11 - addq (%rsi), %r14 - adcq 8(%rsi), %r11 - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r9 - adcq 32(%rsi), %r8 - movq %r14, %rsi - subq (%rcx), %rsi - movq %r11, %rdx - sbbq 8(%rcx), %rdx - movq %r10, %rbx - sbbq 16(%rcx), %rbx - movq %r9, %r15 - sbbq 24(%rcx), %r15 - movq %r8, %rax - sbbq 32(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r14, %rsi - movq %rsi, (%rdi) - cmovsq %r11, %rdx - movq %rdx, 8(%rdi) - cmovsq %r10, %rbx - movq %rbx, 16(%rdi) - cmovsq %r9, %r15 - movq %r15, 24(%rdi) - cmovsq %r8, %rax - movq %rax, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end75: - .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L - - .globl mcl_fp_sub5L - .align 16, 0x90 - .type mcl_fp_sub5L,@function -mcl_fp_sub5L: # @mcl_fp_sub5L -# BB#0: - pushq %r14 - pushq %rbx - movq 32(%rsi), %r8 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r11, %r9 - movq %r9, 24(%rdi) - sbbq %r14, %r8 - movq %r8, 32(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB76_2 -# BB#1: # %carry - movq 32(%rcx), %r11 - movq 24(%rcx), %r14 - movq 8(%rcx), %rdx - movq 16(%rcx), %rbx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r14 - movq %r14, 24(%rdi) - adcq %r8, %r11 - movq %r11, 32(%rdi) -.LBB76_2: # %nocarry - popq %rbx - popq %r14 - retq -.Lfunc_end76: - .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L - - .globl mcl_fp_subNF5L - .align 16, 0x90 - .type mcl_fp_subNF5L,@function -mcl_fp_subNF5L: # @mcl_fp_subNF5L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rsi), %r14 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %r10 - movq 8(%rsi), %r11 - subq (%rdx), %r10 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r9 - sbbq 24(%rdx), %r8 - sbbq 32(%rdx), %r14 - movq %r14, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r14, %rsi - movq 8(%rcx), %rbx - andq %rsi, %rbx - andq (%rcx), %rsi - movq 32(%rcx), %r15 - andq %rdx, %r15 - movq 24(%rcx), %rax - andq %rdx, %rax - rolq %rdx - andq 16(%rcx), %rdx - addq %r10, %rsi - movq %rsi, (%rdi) - adcq %r11, %rbx - movq %rbx, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %rax - movq %rax, 24(%rdi) - adcq %r14, %r15 - movq %r15, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end77: - .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L - - .globl mcl_fpDbl_add5L - .align 16, 0x90 - .type mcl_fpDbl_add5L,@function -mcl_fpDbl_add5L: # @mcl_fpDbl_add5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 64(%rdx), %r11 - movq 56(%rdx), %r14 - movq 48(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 40(%rdx), %r9 - movq %rbx, (%rdi) - movq 72(%rsi), %r8 - movq %rax, 8(%rdi) - movq 64(%rsi), %r10 - movq %r12, 16(%rdi) - movq 56(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 48(%rsi), %rbp - movq 40(%rsi), %rbx - movq %r13, 32(%rdi) - adcq %r9, %rbx - adcq %r15, %rbp - adcq %r14, %r12 - adcq %r11, %r10 - adcq -8(%rsp), %r8 # 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - movq %rbx, %rax - subq (%rcx), %rax - movq %rbp, %rdx - sbbq 8(%rcx), %rdx - movq %r12, %r9 - sbbq 16(%rcx), %r9 - movq %r10, %r11 - sbbq 24(%rcx), %r11 - movq %r8, %r14 - sbbq 32(%rcx), %r14 - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rax - movq %rax, 40(%rdi) - testb %sil, %sil - cmovneq %rbp, %rdx - movq %rdx, 48(%rdi) - cmovneq %r12, %r9 - movq %r9, 56(%rdi) - cmovneq %r10, %r11 - movq %r11, 64(%rdi) - cmovneq %r8, %r14 - movq %r14, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end78: - .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L - - .globl mcl_fpDbl_sub5L - .align 16, 0x90 - .type mcl_fpDbl_sub5L,@function -mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %r9 - movq 64(%rdx), %r10 - movq 56(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %r12 - sbbq 24(%rdx), %r12 - movq %r15, (%rdi) - movq 32(%rsi), %rbx - sbbq 32(%rdx), %rbx - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 72(%rsi), %r8 - movq %r12, 24(%rdi) - movq 64(%rsi), %r11 - movq %rbx, 32(%rdi) - movq 40(%rsi), %rbp - sbbq %rdx, %rbp - movq 56(%rsi), %r12 - movq 48(%rsi), %r13 - sbbq %r15, %r13 - sbbq %r14, %r12 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 8(%rcx), %rbx - cmoveq %rax, %rbx - movq 32(%rcx), %r9 - cmoveq %rax, %r9 - cmovneq 24(%rcx), %rax - addq %rbp, %rsi - movq %rsi, 40(%rdi) - adcq %r13, %rbx - movq %rbx, 48(%rdi) - adcq %r12, %rdx - movq %rdx, 56(%rdi) - adcq %r11, %rax - movq %rax, 64(%rdi) - adcq %r8, %r9 - movq %r9, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end79: - .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L - - .globl mcl_fp_mulUnitPre6L - .align 16, 0x90 - .type mcl_fp_mulUnitPre6L,@function -mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r9 - movq %rax, %r8 - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r13 - movq %rax, %r12 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %rbp - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %rbp, %rdx - movq %rdx, 8(%rdi) - adcq %r12, %rbx - movq %rbx, 16(%rdi) - adcq %r14, %r13 - movq %r13, 24(%rdi) - adcq %r11, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r10 - movq %r10, 40(%rdi) - adcq $0, %r9 - movq %r9, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end80: - .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L - - .globl mcl_fpDbl_mulPre6L - .align 16, 0x90 - .type mcl_fpDbl_mulPre6L,@function -mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rsi, %r8 - movq %rdi, -8(%rsp) # 8-byte Spill - movq (%r8), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 8(%r8), %r13 - movq %r13, -72(%rsp) # 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq 16(%r8), %rbp - movq %rbp, -64(%rsp) # 8-byte Spill - movq 24(%r8), %rsi - movq %rsi, -48(%rsp) # 8-byte Spill - movq 32(%r8), %r10 - movq 40(%r8), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rbx - movq %rdx, %rcx - movq %rax, -40(%rsp) # 8-byte Spill - movq %r10, %rax - mulq %rbx - movq %rdx, %r12 - movq %rax, %rdi - movq %rsi, %rax - mulq %rbx - movq %rdx, %r9 - movq %rax, %r14 - movq %rbp, %rax - mulq %rbx - movq %rdx, %rbp - movq %rax, %r15 - movq %r13, %rax - mulq %rbx - movq %rdx, %r13 - movq %rax, %rsi - addq -32(%rsp), %rsi # 8-byte Folded Reload - adcq %r15, %r13 - adcq %r14, %rbp - adcq %rdi, %r9 - adcq -40(%rsp), %r12 # 8-byte Folded Reload - movq %r12, %rdi - adcq $0, %rcx - movq %rcx, -56(%rsp) # 8-byte Spill - movq -16(%rsp), %r15 # 8-byte Reload - movq 8(%r15), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %r10, %rax - mulq %rcx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r12 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r14 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %rbx - movq -72(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r10 - movq -24(%rsp), %rax # 8-byte Reload - mulq %rcx - addq %rsi, %rax - movq -8(%rsp), %rcx # 8-byte Reload - movq %rax, 8(%rcx) - adcq %r13, %r10 - adcq %rbp, %rbx - adcq %r9, %r14 - adcq %rdi, %r12 - adcq -56(%rsp), %r11 # 8-byte Folded Reload - sbbq %rdi, %rdi - andl $1, %edi - addq %rdx, %r10 - adcq -72(%rsp), %rbx # 8-byte Folded Reload - adcq -64(%rsp), %r14 # 8-byte Folded Reload - adcq -48(%rsp), %r12 # 8-byte Folded Reload - adcq -40(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -96(%rsp) # 8-byte Spill - adcq -32(%rsp), %rdi # 8-byte Folded Reload - movq 40(%r8), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 16(%r15), %rcx - mulq %rcx - movq %rax, -40(%rsp) # 8-byte Spill - movq %rdx, -32(%rsp) # 8-byte Spill - movq 32(%r8), %rax - movq %rax, -48(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r15 - movq %rdx, -88(%rsp) # 8-byte Spill - movq 24(%r8), %rax - movq %rax, -56(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r11 - movq %rdx, -104(%rsp) # 8-byte Spill - movq 16(%r8), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %rbp - movq %rdx, -112(%rsp) # 8-byte Spill - movq (%r8), %rsi - movq %rsi, -72(%rsp) # 8-byte Spill - movq 8(%r8), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulq %rcx - movq %rdx, %r13 - movq %rax, %r9 - movq %rsi, %rax - mulq %rcx - addq %r10, %rax - movq -8(%rsp), %r10 # 8-byte Reload - movq %rax, 16(%r10) - adcq %rbx, %r9 - adcq %r14, %rbp - adcq %r12, %r11 - adcq -96(%rsp), %r15 # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - adcq %rdi, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %r9 - adcq %r13, %rbp - adcq -112(%rsp), %r11 # 8-byte Folded Reload - adcq -104(%rsp), %r15 # 8-byte Folded Reload - adcq -88(%rsp), %rax # 8-byte Folded Reload - movq %rax, -40(%rsp) # 8-byte Spill - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq -16(%rsp), %rdi # 8-byte Reload - movq 24(%rdi), %rbx - movq -24(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -32(%rsp) # 8-byte Spill - movq -48(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -24(%rsp) # 8-byte Spill - movq -56(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r14 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %r12 - movq -80(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, %rsi - movq %rax, %r13 - movq -72(%rsp), %rax # 8-byte Reload - mulq %rbx - addq %r9, %rax - movq %rax, 24(%r10) - adcq %rbp, %r13 - adcq %r11, %r12 - adcq %r15, %r14 - movq -24(%rsp), %rbp # 8-byte Reload - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq -32(%rsp), %rax # 8-byte Reload - adcq %rcx, %rax - sbbq %r10, %r10 - andl $1, %r10d - addq %rdx, %r13 - adcq %rsi, %r12 - adcq -64(%rsp), %r14 # 8-byte Folded Reload - adcq -56(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, -24(%rsp) # 8-byte Spill - adcq -48(%rsp), %rax # 8-byte Folded Reload - movq %rax, -32(%rsp) # 8-byte Spill - adcq -88(%rsp), %r10 # 8-byte Folded Reload - movq 40(%r8), %rax - movq %rax, -72(%rsp) # 8-byte Spill - movq 32(%rdi), %rcx - movq 32(%r8), %rbx - movq %rbx, -112(%rsp) # 8-byte Spill - movq 24(%r8), %rsi - movq %rsi, -64(%rsp) # 8-byte Spill - movq 16(%r8), %rdi - movq %rdi, -104(%rsp) # 8-byte Spill - movq (%r8), %r15 - movq 8(%r8), %r9 - mulq %rcx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %rbx, %rax - mulq %rcx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rsi, %rax - mulq %rcx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %rbx - movq %rdi, %rax - mulq %rcx - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %r9, %rax - mulq %rcx - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r15, %rax - mulq %rcx - movq %rdx, -96(%rsp) # 8-byte Spill - addq %r13, %rax - movq -8(%rsp), %r13 # 8-byte Reload - movq %rax, 32(%r13) - adcq %r12, %rbp - adcq %r14, %rdi - adcq -24(%rsp), %rbx # 8-byte Folded Reload - adcq -32(%rsp), %r8 # 8-byte Folded Reload - adcq %r10, %r11 - movq -16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rcx - sbbq %rsi, %rsi - movq -72(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq -112(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %r9, %rax - mulq %rcx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %r15, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %r9 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -104(%rsp), %rax # 8-byte Reload - mulq %rcx - andl $1, %esi - addq -96(%rsp), %rbp # 8-byte Folded Reload - adcq -88(%rsp), %rdi # 8-byte Folded Reload - adcq -80(%rsp), %rbx # 8-byte Folded Reload - adcq -56(%rsp), %r8 # 8-byte Folded Reload - adcq -48(%rsp), %r11 # 8-byte Folded Reload - adcq -40(%rsp), %rsi # 8-byte Folded Reload - addq %r9, %rbp - movq %rbp, 40(%r13) - adcq %r10, %rdi - adcq %rax, %rbx - adcq %r15, %r8 - adcq %r14, %r11 - adcq -72(%rsp), %rsi # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - addq %r12, %rdi - movq %rdi, 48(%r13) - adcq -32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 56(%r13) - adcq %rdx, %r8 - movq %r8, 64(%r13) - adcq -64(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 72(%r13) - adcq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 80(%r13) - adcq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 88(%r13) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end81: - .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L - - .globl mcl_fpDbl_sqrPre6L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre6L,@function -mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %r8 - movq %r8, -56(%rsp) # 8-byte Spill - movq 24(%rsi), %r10 - movq %r10, -40(%rsp) # 8-byte Spill - movq 32(%rsi), %r9 - movq %r9, -32(%rsp) # 8-byte Spill - movq 40(%rsi), %r11 - movq (%rsi), %rcx - movq 8(%rsi), %rbx - movq %rcx, %rax - mulq %rcx - movq %rdx, %rbp - movq %rax, (%rdi) - movq %r11, %rax - mulq %rcx - movq %rdx, %rdi - movq %rax, -16(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %r9 - movq %r10, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %r10 - movq %r8, %rax - mulq %rcx - movq %rdx, %r13 - movq %rax, %r15 - movq %rbx, %rax - mulq %rcx - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r8 - addq %r8, %rbp - adcq %rdx, %r15 - adcq %r10, %r13 - adcq %r9, %r12 - adcq -16(%rsp), %r14 # 8-byte Folded Reload - adcq $0, %rdi - movq %rdi, -48(%rsp) # 8-byte Spill - movq %r11, %rax - mulq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %rcx - movq -32(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r9 - movq -40(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r10 - movq -56(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, %rdi - movq %rax, %r11 - movq %rbx, %rax - mulq %rbx - movq %rax, %rbx - addq %r8, %rbp - movq -8(%rsp), %rax # 8-byte Reload - movq %rbp, 8(%rax) - adcq %r15, %rbx - adcq %r13, %r11 - adcq %r12, %r10 - adcq %r14, %r9 - movq %rcx, %rax - adcq -48(%rsp), %rax # 8-byte Folded Reload - sbbq %rcx, %rcx - andl $1, %ecx - addq -24(%rsp), %rbx # 8-byte Folded Reload - adcq %rdx, %r11 - adcq %rdi, %r10 - adcq -40(%rsp), %r9 # 8-byte Folded Reload - adcq -32(%rsp), %rax # 8-byte Folded Reload - movq %rax, -72(%rsp) # 8-byte Spill - adcq -16(%rsp), %rcx # 8-byte Folded Reload - movq 40(%rsi), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 16(%rsi), %rdi - mulq %rdi - movq %rax, -80(%rsp) # 8-byte Spill - movq %rdx, -40(%rsp) # 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -32(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r12 - movq %rdx, -56(%rsp) # 8-byte Spill - movq 24(%rsi), %rbp - movq %rbp, %rax - mulq %rdi - movq %rax, %r14 - movq %r14, -96(%rsp) # 8-byte Spill - movq %rdx, -24(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq %r15, -48(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %r15, %rax - mulq %rdi - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rdi, %rax - mulq %rdi - movq %rax, %r13 - addq %rbx, %r15 - movq -8(%rsp), %rbx # 8-byte Reload - movq %r15, 16(%rbx) - adcq %r11, %r8 - adcq %r10, %r13 - adcq %r14, %r9 - adcq -72(%rsp), %r12 # 8-byte Folded Reload - movq -80(%rsp), %r14 # 8-byte Reload - adcq %rcx, %r14 - sbbq %rcx, %rcx - andl $1, %ecx - addq -104(%rsp), %r8 # 8-byte Folded Reload - adcq -88(%rsp), %r13 # 8-byte Folded Reload - adcq %rdx, %r9 - adcq -24(%rsp), %r12 # 8-byte Folded Reload - adcq -56(%rsp), %r14 # 8-byte Folded Reload - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq -16(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -16(%rsp) # 8-byte Spill - movq -32(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r10 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %r11 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rbp, %rax - mulq %rbp - movq %rax, %r15 - movq %rdx, -104(%rsp) # 8-byte Spill - addq %r8, %rdi - movq %rdi, 24(%rbx) - adcq %r13, %r11 - adcq -96(%rsp), %r9 # 8-byte Folded Reload - adcq %r12, %r15 - adcq %r14, %r10 - movq -16(%rsp), %r12 # 8-byte Reload - adcq %rcx, %r12 - sbbq %rcx, %rcx - movq (%rsi), %r8 - andl $1, %ecx - movq 8(%rsi), %rbx - movq 40(%rsi), %rdi - movq %rbx, %rax - mulq %rdi - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, -48(%rsp) # 8-byte Spill - movq %r8, %rax - mulq %rdi - movq %rax, -64(%rsp) # 8-byte Spill - movq %rdx, -40(%rsp) # 8-byte Spill - movq 32(%rsi), %rbp - movq %rbx, %rax - mulq %rbp - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %r8, %rax - mulq %rbp - movq %rax, %r14 - movq %rdx, -112(%rsp) # 8-byte Spill - addq -88(%rsp), %r11 # 8-byte Folded Reload - adcq -80(%rsp), %r9 # 8-byte Folded Reload - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq -72(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -16(%rsp) # 8-byte Spill - adcq -56(%rsp), %rcx # 8-byte Folded Reload - movq 24(%rsi), %rbx - movq 16(%rsi), %r8 - movq %rbx, %rax - mulq %rdi - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq %rbp - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r8, %rax - mulq %rdi - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %r8, %rax - mulq %rbp - movq %rdx, %rbx - movq %rax, %r13 - movq %rdi, %rax - mulq %rbp - movq %rdx, %r12 - movq %rax, %r8 - movq %rdi, %rax - mulq %rdi - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rbp, %rax - mulq %rbp - addq %r14, %r11 - movq -8(%rsp), %r14 # 8-byte Reload - movq %r11, 32(%r14) - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq %r15, %r13 - adcq %r10, %rsi - adcq -16(%rsp), %rax # 8-byte Folded Reload - adcq %r8, %rcx - sbbq %rbp, %rbp - andl $1, %ebp - addq -112(%rsp), %r9 # 8-byte Folded Reload - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq %rbx, %rsi - adcq -104(%rsp), %rax # 8-byte Folded Reload - adcq %rdx, %rcx - adcq %r12, %rbp - addq -64(%rsp), %r9 # 8-byte Folded Reload - movq %r14, %rbx - movq %r9, 40(%rbx) - adcq -48(%rsp), %r13 # 8-byte Folded Reload - adcq -88(%rsp), %rsi # 8-byte Folded Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - adcq %r8, %rcx - adcq %rdi, %rbp - sbbq %rdx, %rdx - andl $1, %edx - addq -40(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 48(%rbx) - adcq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 56(%rbx) - adcq -72(%rsp), %rax # 8-byte Folded Reload - movq %rax, 64(%rbx) - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 72(%rbx) - adcq %r12, %rbp - movq %rbp, 80(%rbx) - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 88(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L - - .globl mcl_fp_mont6L - .align 16, 0x90 - .type mcl_fp_mont6L,@function -mcl_fp_mont6L: # @mcl_fp_mont6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $56, %rsp - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rdi, -96(%rsp) # 8-byte Spill - movq 40(%rsi), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rax, %r8 - movq %rdx, %r14 - movq 32(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, %r9 - movq %rdx, %r15 - movq 24(%rsi), %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq 16(%rsi), %rbp - movq %rbp, -48(%rsp) # 8-byte Spill - movq (%rsi), %r12 - movq %r12, -32(%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -40(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, %rdi - movq %rax, %r10 - movq %rbp, %rax - mulq %rbx - movq %rdx, %rbp - movq %rax, %r11 - movq %rsi, %rax - mulq %rbx - movq %rdx, %rsi - movq %rax, %r13 - movq %r12, %rax - mulq %rbx - movq %rax, -120(%rsp) # 8-byte Spill - addq %r13, %rdx - movq %rdx, -112(%rsp) # 8-byte Spill - adcq %r11, %rsi - movq %rsi, -104(%rsp) # 8-byte Spill - adcq %r10, %rbp - movq %rbp, -88(%rsp) # 8-byte Spill - adcq %r9, %rdi - movq %rdi, -80(%rsp) # 8-byte Spill - adcq %r8, %r15 - movq %r15, -72(%rsp) # 8-byte Spill - adcq $0, %r14 - movq %r14, -64(%rsp) # 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %rdi - imulq %rdx, %rdi - movq (%rcx), %r9 - movq %r9, 8(%rsp) # 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, 48(%rsp) # 8-byte Spill - movq 32(%rcx), %rbp - movq %rbp, 32(%rsp) # 8-byte Spill - movq 24(%rcx), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 16(%rcx), %rsi - movq %rsi, 16(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 24(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rdx, %r11 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %rbp - movq %rdx, %r13 - movq %rax, %r10 - movq %rdi, %rax - mulq %rbx - movq %rdx, %rbp - movq %rax, %r14 - movq %rdi, %rax - mulq %rsi - movq %rdx, %rbx - movq %rax, %r12 - movq %rdi, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %r15 - movq %rdi, %rax - mulq %r9 - movq %rdx, %r9 - addq %r15, %r9 - adcq %r12, %r8 - adcq %r14, %rbx - adcq %r10, %rbp - adcq -128(%rsp), %r13 # 8-byte Folded Reload - adcq $0, %r11 - addq -120(%rsp), %rax # 8-byte Folded Reload - adcq -112(%rsp), %r9 # 8-byte Folded Reload - adcq -104(%rsp), %r8 # 8-byte Folded Reload - adcq -88(%rsp), %rbx # 8-byte Folded Reload - adcq -80(%rsp), %rbp # 8-byte Folded Reload - adcq -72(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %r11 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rax, -80(%rsp) # 8-byte Spill - movq -8(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rcx - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %rdi - addq %r10, %rdi - adcq %rcx, %rsi - adcq -112(%rsp), %r15 # 8-byte Folded Reload - adcq -104(%rsp), %r14 # 8-byte Folded Reload - movq -72(%rsp), %rcx # 8-byte Reload - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r9, %r12 - adcq %r8, %rdi - adcq %rbx, %rsi - adcq %rbp, %r15 - adcq %r13, %r14 - adcq %r11, %rcx - movq %rcx, -72(%rsp) # 8-byte Spill - adcq -80(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r12, %rbx - imulq (%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r13 - movq %rbx, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r11 - movq %rbx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - addq %r11, %r9 - adcq %r13, %rbp - adcq -120(%rsp), %rcx # 8-byte Folded Reload - adcq -112(%rsp), %r10 # 8-byte Folded Reload - adcq -104(%rsp), %r8 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r12, %rax - adcq %rdi, %r9 - adcq %rsi, %rbp - adcq %r15, %rcx - adcq %r14, %r10 - adcq -72(%rsp), %r8 # 8-byte Folded Reload - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq $0, -88(%rsp) # 8-byte Folded Spill - movq -8(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rbx - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r11 - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r15 - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rdi - addq %r15, %rdi - adcq %r11, %rsi - adcq %rbx, %r12 - adcq -112(%rsp), %r14 # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r9, %r13 - adcq %rbp, %rdi - adcq %rcx, %rsi - adcq %r10, %r12 - adcq %r8, %r14 - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq -88(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r13, %rbp - imulq (%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rbp, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r8 - movq %rbp, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - addq %r8, %r9 - adcq %r10, %rcx - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r13, %rax - adcq %rdi, %r9 - adcq %rsi, %rcx - adcq %r12, %rbx - adcq %r14, %r15 - adcq -72(%rsp), %r11 # 8-byte Folded Reload - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq -88(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - movq -8(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdi - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r8 - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %rdi - addq %r13, %rdi - adcq %r8, %rsi - adcq -112(%rsp), %r10 # 8-byte Folded Reload - adcq -104(%rsp), %r14 # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r9, %r12 - adcq %rcx, %rdi - adcq %rbx, %rsi - adcq %r15, %r10 - adcq %r11, %r14 - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq %rbp, %rax - movq %rax, -64(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r12, %rbp - imulq (%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r13 - movq %rbp, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r9 - movq %rbp, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - addq %r9, %r8 - adcq %r13, %rcx - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r12, %rax - adcq %rdi, %r8 - adcq %rsi, %rcx - adcq %r10, %rbx - adcq %r14, %r15 - adcq -72(%rsp), %r11 # 8-byte Folded Reload - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq $0, -88(%rsp) # 8-byte Folded Spill - movq -8(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rsi - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r10 - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %r13 - addq %r10, %r13 - adcq %r12, %r14 - adcq -120(%rsp), %rdi # 8-byte Folded Reload - adcq -112(%rsp), %rbp # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r8, %r9 - adcq %rcx, %r13 - adcq %rbx, %r14 - adcq %r15, %rdi - adcq %r11, %rbp - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq -88(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r9, %rsi - imulq (%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rbx - movq %rsi, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rcx - movq %rsi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - addq %rcx, %r8 - adcq %rbx, %r12 - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r9, %rax - adcq %r13, %r8 - adcq %r14, %r12 - adcq %rdi, %r15 - adcq %rbp, %r11 - adcq -72(%rsp), %r10 # 8-byte Folded Reload - movq %r10, -72(%rsp) # 8-byte Spill - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq -88(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq -8(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, -24(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rsi - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rdi - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rbp - addq %rdi, %rbp - adcq %rsi, %r14 - adcq %r9, %r10 - movq -8(%rsp), %rax # 8-byte Reload - adcq -24(%rsp), %rax # 8-byte Folded Reload - movq -16(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - addq %r8, %r13 - movq %r13, -40(%rsp) # 8-byte Spill - adcq %r12, %rbp - adcq %r15, %r14 - movq %r14, -24(%rsp) # 8-byte Spill - adcq %r11, %r10 - movq %r10, -32(%rsp) # 8-byte Spill - adcq -72(%rsp), %rax # 8-byte Folded Reload - movq %rax, -8(%rsp) # 8-byte Spill - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -16(%rsp) # 8-byte Spill - adcq %rbx, %rsi - movq %rsi, -64(%rsp) # 8-byte Spill - sbbq %rcx, %rcx - movq (%rsp), %r9 # 8-byte Reload - imulq %r13, %r9 - andl $1, %ecx - movq %r9, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, (%rsp) # 8-byte Spill - movq %r9, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -48(%rsp) # 8-byte Spill - movq %r9, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, -56(%rsp) # 8-byte Spill - movq %r9, %rax - movq 8(%rsp), %r13 # 8-byte Reload - mulq %r13 - movq %rdx, %r15 - movq %rax, %r12 - movq %r9, %rax - movq 16(%rsp), %r14 # 8-byte Reload - mulq %r14 - movq %rdx, %rsi - movq %rax, %r11 - movq %r9, %rax - movq 24(%rsp), %r10 # 8-byte Reload - mulq %r10 - addq %r15, %rax - adcq %r11, %rdx - adcq -56(%rsp), %rsi # 8-byte Folded Reload - adcq -48(%rsp), %rdi # 8-byte Folded Reload - adcq (%rsp), %rbx # 8-byte Folded Reload - adcq $0, %r8 - addq -40(%rsp), %r12 # 8-byte Folded Reload - adcq %rbp, %rax - adcq -24(%rsp), %rdx # 8-byte Folded Reload - adcq -32(%rsp), %rsi # 8-byte Folded Reload - adcq -8(%rsp), %rdi # 8-byte Folded Reload - adcq -16(%rsp), %rbx # 8-byte Folded Reload - adcq -64(%rsp), %r8 # 8-byte Folded Reload - adcq $0, %rcx - movq %rax, %rbp - subq %r13, %rbp - movq %rdx, %r9 - sbbq %r10, %r9 - movq %rsi, %r10 - sbbq %r14, %r10 - movq %rdi, %r11 - sbbq 40(%rsp), %r11 # 8-byte Folded Reload - movq %rbx, %r14 - sbbq 32(%rsp), %r14 # 8-byte Folded Reload - movq %r8, %r15 - sbbq 48(%rsp), %r15 # 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rdi, %r11 - testb %cl, %cl - cmovneq %rax, %rbp - movq -96(%rsp), %rax # 8-byte Reload - movq %rbp, (%rax) - cmovneq %rdx, %r9 - movq %r9, 8(%rax) - cmovneq %rsi, %r10 - movq %r10, 16(%rax) - movq %r11, 24(%rax) - cmovneq %rbx, %r14 - movq %r14, 32(%rax) - cmovneq %r8, %r15 - movq %r15, 40(%rax) - addq $56, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end83: - .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L - - .globl mcl_fp_montNF6L - .align 16, 0x90 - .type mcl_fp_montNF6L,@function -mcl_fp_montNF6L: # @mcl_fp_montNF6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $40, %rsp - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rdi, -88(%rsp) # 8-byte Spill - movq 40(%rsi), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rax, 32(%rsp) # 8-byte Spill - movq %rdx, %r13 - movq 32(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, %r10 - movq %rdx, %r9 - movq 24(%rsi), %rax - movq %rax, -72(%rsp) # 8-byte Spill - movq 16(%rsi), %rbp - movq %rbp, -64(%rsp) # 8-byte Spill - movq (%rsi), %rdi - movq %rdi, -48(%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -56(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, %r11 - movq %rax, %r8 - movq %rbp, %rax - mulq %rbx - movq %rdx, %r14 - movq %rax, %r15 - movq %rsi, %rax - mulq %rbx - movq %rdx, %r12 - movq %rax, %rbp - movq %rdi, %rax - mulq %rbx - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdx, %rbx - addq %rbp, %rbx - adcq %r15, %r12 - adcq %r8, %r14 - adcq %r10, %r11 - adcq 32(%rsp), %r9 # 8-byte Folded Reload - movq %r9, -96(%rsp) # 8-byte Spill - adcq $0, %r13 - movq %r13, -80(%rsp) # 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %r9 - imulq %rdx, %r9 - movq (%rcx), %r8 - movq %r8, 8(%rsp) # 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, 32(%rsp) # 8-byte Spill - movq 32(%rcx), %rsi - movq %rsi, 24(%rsp) # 8-byte Spill - movq 24(%rcx), %rbp - movq %rbp, 16(%rsp) # 8-byte Spill - movq 16(%rcx), %rdi - movq %rdi, -40(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -32(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %rdx - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %r9, %rax - mulq %rsi - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %r9, %rax - mulq %rbp - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r9, %rax - mulq %rdi - movq %rdx, %r13 - movq %rax, %rdi - movq %r9, %rax - mulq %rcx - movq %rdx, %rcx - movq %rax, %rbp - movq %r9, %rax - mulq %r8 - addq -104(%rsp), %rax # 8-byte Folded Reload - adcq %rbx, %rbp - adcq %r12, %rdi - adcq %r14, %rsi - adcq %r11, %r15 - adcq -96(%rsp), %r10 # 8-byte Folded Reload - movq -80(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdx, %rbp - adcq %rcx, %rdi - adcq %r13, %rsi - adcq -128(%rsp), %r15 # 8-byte Folded Reload - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -112(%rsp), %rax # 8-byte Folded Reload - movq %rax, -80(%rsp) # 8-byte Spill - movq -8(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rcx - addq %r8, %rcx - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - adcq -96(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r14 - addq %rbp, %r13 - adcq %rdi, %rcx - adcq %rsi, %rbx - adcq %r15, %r12 - adcq %r10, %r11 - adcq -80(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r14 - movq %r13, %rsi - imulq (%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rsi, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rsi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rsi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - addq %r13, %rax - adcq %rcx, %r15 - adcq %rbx, %r10 - adcq %r12, %r8 - adcq %r11, %rbp - adcq %r9, %rdi - adcq $0, %r14 - addq %rdx, %r15 - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -112(%rsp), %r8 # 8-byte Folded Reload - adcq -104(%rsp), %rbp # 8-byte Folded Reload - adcq -80(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -80(%rsp) # 8-byte Spill - adcq -96(%rsp), %r14 # 8-byte Folded Reload - movq -8(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rsi - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %rdi - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rsi - addq %rdi, %rsi - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %rcx # 8-byte Folded Reload - adcq -96(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r13 - addq %r15, %r9 - adcq %r10, %rsi - adcq %r8, %rbx - adcq %rbp, %r12 - adcq -80(%rsp), %rcx # 8-byte Folded Reload - adcq %r14, %r11 - adcq $0, %r13 - movq %r9, %r8 - imulq (%rsp), %r8 # 8-byte Folded Reload - movq %r8, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %r8, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %r8, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %r8, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %r8, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %rdi - movq %r8, %rax - mulq 8(%rsp) # 8-byte Folded Reload - addq %r9, %rax - adcq %rsi, %rdi - adcq %rbx, %r14 - adcq %r12, %r10 - adcq %rcx, %r15 - movq -80(%rsp), %rax # 8-byte Reload - adcq %r11, %rax - adcq $0, %r13 - addq %rdx, %rdi - adcq %rbp, %r14 - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -96(%rsp), %r15 # 8-byte Folded Reload - movq %r15, -96(%rsp) # 8-byte Spill - adcq -112(%rsp), %rax # 8-byte Folded Reload - movq %rax, -80(%rsp) # 8-byte Spill - adcq -104(%rsp), %r13 # 8-byte Folded Reload - movq -8(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rbp - movq %rbp, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r12 - movq %rbp, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rbp, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rbp - addq %r9, %rbp - adcq %r12, %rbx - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -112(%rsp), %rcx # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - adcq $0, %r15 - addq %rdi, %r8 - adcq %r14, %rbp - adcq %r10, %rbx - adcq -96(%rsp), %rsi # 8-byte Folded Reload - adcq -80(%rsp), %rcx # 8-byte Folded Reload - adcq %r13, %r11 - adcq $0, %r15 - movq %r8, %r14 - imulq (%rsp), %r14 # 8-byte Folded Reload - movq %r14, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %r14, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %r14, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %r14, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %r14, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %r14, %rax - mulq 8(%rsp) # 8-byte Folded Reload - addq %r8, %rax - adcq %rbp, %rdi - adcq %rbx, %r12 - adcq %rsi, %r10 - adcq %rcx, %r13 - adcq %r11, %r9 - adcq $0, %r15 - addq %rdx, %rdi - adcq -120(%rsp), %r12 # 8-byte Folded Reload - adcq -112(%rsp), %r10 # 8-byte Folded Reload - adcq -96(%rsp), %r13 # 8-byte Folded Reload - movq %r13, -96(%rsp) # 8-byte Spill - adcq -80(%rsp), %r9 # 8-byte Folded Reload - movq %r9, -80(%rsp) # 8-byte Spill - adcq -104(%rsp), %r15 # 8-byte Folded Reload - movq -8(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -128(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r13 - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %rbp - addq %r13, %rbp - adcq -128(%rsp), %rbx # 8-byte Folded Reload - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -112(%rsp), %r8 # 8-byte Folded Reload - adcq -104(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r14 - addq %rdi, %r11 - adcq %r12, %rbp - adcq %r10, %rbx - adcq -96(%rsp), %rsi # 8-byte Folded Reload - adcq -80(%rsp), %r8 # 8-byte Folded Reload - adcq %r15, %r9 - adcq $0, %r14 - movq %r11, %rcx - imulq (%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %rcx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rdi - movq %rcx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - addq %r11, %rax - adcq %rbp, %rdi - adcq %rbx, %r15 - adcq %rsi, %r10 - adcq %r8, %r12 - movq -80(%rsp), %rcx # 8-byte Reload - adcq %r9, %rcx - adcq $0, %r14 - addq %rdx, %rdi - adcq %r13, %r15 - adcq -104(%rsp), %r10 # 8-byte Folded Reload - movq %r10, -104(%rsp) # 8-byte Spill - adcq -96(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -96(%rsp) # 8-byte Spill - adcq -120(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -80(%rsp) # 8-byte Spill - adcq -112(%rsp), %r14 # 8-byte Folded Reload - movq -8(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -8(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -16(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -24(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -64(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbp - movq %rcx, %rax - mulq -56(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rsi - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %r8 - addq %rsi, %r8 - adcq %rbp, %r10 - adcq -24(%rsp), %r13 # 8-byte Folded Reload - adcq -16(%rsp), %r12 # 8-byte Folded Reload - adcq -8(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %rbx - addq %rdi, %r11 - adcq %r15, %r8 - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq -80(%rsp), %r12 # 8-byte Folded Reload - adcq %r14, %r9 - movq %r9, -16(%rsp) # 8-byte Spill - adcq $0, %rbx - movq (%rsp), %r9 # 8-byte Reload - imulq %r11, %r9 - movq %r9, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r9, %rax - mulq 24(%rsp) # 8-byte Folded Reload - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %r9, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r9, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %r9, %rax - movq -40(%rsp), %r15 # 8-byte Reload - mulq %r15 - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %rcx - movq %r9, %rax - movq -32(%rsp), %r9 # 8-byte Reload - mulq %r9 - addq %r11, %r14 - adcq %r8, %rax - adcq %r10, %rcx - adcq %r13, %rbp - adcq %r12, %rdi - adcq -16(%rsp), %rsi # 8-byte Folded Reload - adcq $0, %rbx - addq -48(%rsp), %rax # 8-byte Folded Reload - adcq %rdx, %rcx - adcq -56(%rsp), %rbp # 8-byte Folded Reload - adcq -24(%rsp), %rdi # 8-byte Folded Reload - adcq -8(%rsp), %rsi # 8-byte Folded Reload - adcq (%rsp), %rbx # 8-byte Folded Reload - movq %rax, %r14 - subq 8(%rsp), %r14 # 8-byte Folded Reload - movq %rcx, %r8 - sbbq %r9, %r8 - movq %rbp, %r9 - sbbq %r15, %r9 - movq %rdi, %r10 - sbbq 16(%rsp), %r10 # 8-byte Folded Reload - movq %rsi, %r11 - sbbq 24(%rsp), %r11 # 8-byte Folded Reload - movq %rbx, %r15 - sbbq 32(%rsp), %r15 # 8-byte Folded Reload - movq %r15, %rdx - sarq $63, %rdx - cmovsq %rax, %r14 - movq -88(%rsp), %rax # 8-byte Reload - movq %r14, (%rax) - cmovsq %rcx, %r8 - movq %r8, 8(%rax) - cmovsq %rbp, %r9 - movq %r9, 16(%rax) - cmovsq %rdi, %r10 - movq %r10, 24(%rax) - cmovsq %rsi, %r11 - movq %r11, 32(%rax) - cmovsq %rbx, %r15 - movq %r15, 40(%rax) - addq $40, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end84: - .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L - - .globl mcl_fp_montRed6L - .align 16, 0x90 - .type mcl_fp_montRed6L,@function -mcl_fp_montRed6L: # @mcl_fp_montRed6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $16, %rsp - movq %rdx, %rcx - movq %rdi, -104(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq (%rcx), %r11 - movq %r11, -24(%rsp) # 8-byte Spill - movq (%rsi), %r9 - movq %r9, %rbp - imulq %rax, %rbp - movq 40(%rcx), %rdx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r12 - movq %rdx, -72(%rsp) # 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, 8(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r15 - movq %rdx, %r8 - movq 24(%rcx), %rdx - movq %rdx, (%rsp) # 8-byte Spill - movq 16(%rcx), %rdi - movq %rdi, -8(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rdx, %r10 - movq %rax, %r14 - movq %rbp, %rax - mulq %rdi - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq %rcx - movq %rdx, %rcx - movq %rax, %rdi - movq %rbp, %rax - mulq %r11 - movq %rdx, %rbp - addq %rdi, %rbp - adcq %rbx, %rcx - adcq %r14, %r13 - adcq %r15, %r10 - adcq %r12, %r8 - movq -72(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r9, %rax - adcq 8(%rsi), %rbp - adcq 16(%rsi), %rcx - adcq 24(%rsi), %r13 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %r8 - movq %r8, -112(%rsp) # 8-byte Spill - adcq 48(%rsi), %rdx - movq %rdx, -72(%rsp) # 8-byte Spill - movq 88(%rsi), %rax - movq 80(%rsi), %rdx - movq 72(%rsi), %rdi - movq 64(%rsi), %rbx - movq 56(%rsi), %r15 - adcq $0, %r15 - adcq $0, %rbx - movq %rbx, -96(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -64(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -48(%rsp) # 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq %rbp, %rdi - imulq -32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -128(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r8 - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r11 - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - addq %r11, %rdi - adcq %r9, %rsi - adcq %r8, %rbx - adcq -128(%rsp), %r14 # 8-byte Folded Reload - movq -88(%rsp), %r8 # 8-byte Reload - adcq -120(%rsp), %r8 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rbp, %rax - adcq %rcx, %rdi - adcq %r13, %rsi - adcq %r10, %rbx - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -72(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -88(%rsp) # 8-byte Spill - adcq %r15, %rdx - movq %rdx, -80(%rsp) # 8-byte Spill - adcq $0, -96(%rsp) # 8-byte Folded Spill - adcq $0, -64(%rsp) # 8-byte Folded Spill - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -48(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rdi, %rcx - imulq -32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r8 - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r10 - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - addq %r10, %r9 - adcq %r8, %rbp - adcq -128(%rsp), %r13 # 8-byte Folded Reload - adcq -120(%rsp), %r11 # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - movq -72(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %rdi, %rax - adcq %rsi, %r9 - adcq %rbx, %rbp - adcq %r14, %r13 - adcq -88(%rsp), %r11 # 8-byte Folded Reload - adcq -80(%rsp), %r15 # 8-byte Folded Reload - adcq -96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -72(%rsp) # 8-byte Spill - adcq $0, -64(%rsp) # 8-byte Folded Spill - adcq $0, -56(%rsp) # 8-byte Folded Spill - adcq $0, -48(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %r9, %rsi - imulq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r10 - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %rbx - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - addq %rbx, %rdi - adcq %r10, %rcx - adcq -120(%rsp), %r8 # 8-byte Folded Reload - adcq -112(%rsp), %r14 # 8-byte Folded Reload - movq -88(%rsp), %rsi # 8-byte Reload - adcq -96(%rsp), %rsi # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r9, %rax - adcq %rbp, %rdi - adcq %r13, %rcx - adcq %r11, %r8 - adcq %r15, %r14 - adcq -72(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -88(%rsp) # 8-byte Spill - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq $0, -56(%rsp) # 8-byte Folded Spill - movq -48(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - adcq $0, %r12 - movq %rdi, %rsi - imulq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - movq -40(%rsp), %r11 # 8-byte Reload - mulq %r11 - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -48(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r15 - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rbx - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - addq %rbx, %rsi - adcq %r15, %r10 - adcq -112(%rsp), %r13 # 8-byte Folded Reload - adcq -96(%rsp), %r9 # 8-byte Folded Reload - movq -72(%rsp), %rbx # 8-byte Reload - adcq -48(%rsp), %rbx # 8-byte Folded Reload - movq -64(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %rdi, %rax - adcq %rcx, %rsi - adcq %r8, %r10 - adcq %r14, %r13 - adcq -88(%rsp), %r9 # 8-byte Folded Reload - adcq -80(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, -72(%rsp) # 8-byte Spill - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - adcq $0, %rbp - movq %rbp, -48(%rsp) # 8-byte Spill - adcq $0, %r12 - movq -32(%rsp), %r8 # 8-byte Reload - imulq %rsi, %r8 - movq %r8, %rax - mulq %r11 - movq %rdx, %rdi - movq %rax, -32(%rsp) # 8-byte Spill - movq %r8, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -56(%rsp) # 8-byte Spill - movq %r8, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -80(%rsp) # 8-byte Spill - movq %r8, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r11 - movq %r8, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r14 - movq %r8, %rax - movq -24(%rsp), %r8 # 8-byte Reload - mulq %r8 - addq %r14, %rdx - adcq %r11, %rbp - adcq -80(%rsp), %rbx # 8-byte Folded Reload - adcq -56(%rsp), %rcx # 8-byte Folded Reload - adcq -32(%rsp), %r15 # 8-byte Folded Reload - adcq $0, %rdi - addq %rsi, %rax - adcq %r10, %rdx - adcq %r13, %rbp - adcq %r9, %rbx - adcq -72(%rsp), %rcx # 8-byte Folded Reload - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %rdi # 8-byte Folded Reload - adcq $0, %r12 - movq %rdx, %rax - subq %r8, %rax - movq %rbp, %rsi - sbbq -16(%rsp), %rsi # 8-byte Folded Reload - movq %rbx, %r9 - sbbq -8(%rsp), %r9 # 8-byte Folded Reload - movq %rcx, %r10 - sbbq (%rsp), %r10 # 8-byte Folded Reload - movq %r15, %r11 - sbbq 8(%rsp), %r11 # 8-byte Folded Reload - movq %rdi, %r14 - sbbq -40(%rsp), %r14 # 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %rdi, %r14 - testb %r12b, %r12b - cmovneq %rdx, %rax - movq -104(%rsp), %rdx # 8-byte Reload - movq %rax, (%rdx) - cmovneq %rbp, %rsi - movq %rsi, 8(%rdx) - cmovneq %rbx, %r9 - movq %r9, 16(%rdx) - cmovneq %rcx, %r10 - movq %r10, 24(%rdx) - cmovneq %r15, %r11 - movq %r11, 32(%rdx) - movq %r14, 40(%rdx) - addq $16, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end85: - .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L - - .globl mcl_fp_addPre6L - .align 16, 0x90 - .type mcl_fp_addPre6L,@function -mcl_fp_addPre6L: # @mcl_fp_addPre6L -# BB#0: - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r11 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 24(%rsi), %rax - movq 32(%rsi), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r10, %rax - movq %rax, 24(%rdi) - adcq %r9, %r14 - movq %r14, 32(%rdi) - adcq %r8, %r11 - movq %r11, 40(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r14 - retq -.Lfunc_end86: - .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L - - .globl mcl_fp_subPre6L - .align 16, 0x90 - .type mcl_fp_subPre6L,@function -mcl_fp_subPre6L: # @mcl_fp_subPre6L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r14 - movq 32(%rdx), %r15 - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r14, %r11 - movq %r11, 24(%rdi) - sbbq %r15, %r10 - movq %r10, 32(%rdi) - sbbq %r8, %r9 - movq %r9, 40(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end87: - .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L - - .globl mcl_fp_shr1_6L - .align 16, 0x90 - .type mcl_fp_shr1_6L,@function -mcl_fp_shr1_6L: # @mcl_fp_shr1_6L -# BB#0: - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %r9, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 32(%rdi) - shrq %r8 - movq %r8, 40(%rdi) - retq -.Lfunc_end88: - .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L - - .globl mcl_fp_add6L - .align 16, 0x90 - .type mcl_fp_add6L,@function -mcl_fp_add6L: # @mcl_fp_add6L -# BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rdx), %r15 - movq 24(%rdx), %rbx - movq 24(%rsi), %r10 - movq 32(%rsi), %r9 - movq 16(%rdx), %r11 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - adcq %rbx, %r10 - movq %r10, 24(%rdi) - adcq %r15, %r9 - movq %r9, 32(%rdi) - adcq %r14, %r8 - movq %r8, 40(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r11 - sbbq 24(%rcx), %r10 - sbbq 32(%rcx), %r9 - sbbq 40(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB89_2 -# BB#1: # %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - movq %r10, 24(%rdi) - movq %r9, 32(%rdi) - movq %r8, 40(%rdi) -.LBB89_2: # %carry - popq %rbx - popq %r14 - popq %r15 - retq -.Lfunc_end89: - .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L - - .globl mcl_fp_addNF6L - .align 16, 0x90 - .type mcl_fp_addNF6L,@function -mcl_fp_addNF6L: # @mcl_fp_addNF6L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 16(%rdx), %r11 - movq (%rdx), %r15 - movq 8(%rdx), %r14 - addq (%rsi), %r15 - adcq 8(%rsi), %r14 - adcq 16(%rsi), %r11 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r9 - adcq 40(%rsi), %r8 - movq %r15, %rsi - subq (%rcx), %rsi - movq %r14, %rbx - sbbq 8(%rcx), %rbx - movq %r11, %rdx - sbbq 16(%rcx), %rdx - movq %r10, %r13 - sbbq 24(%rcx), %r13 - movq %r9, %r12 - sbbq 32(%rcx), %r12 - movq %r8, %rax - sbbq 40(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r15, %rsi - movq %rsi, (%rdi) - cmovsq %r14, %rbx - movq %rbx, 8(%rdi) - cmovsq %r11, %rdx - movq %rdx, 16(%rdi) - cmovsq %r10, %r13 - movq %r13, 24(%rdi) - cmovsq %r9, %r12 - movq %r12, 32(%rdi) - cmovsq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end90: - .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L - - .globl mcl_fp_sub6L - .align 16, 0x90 - .type mcl_fp_sub6L,@function -mcl_fp_sub6L: # @mcl_fp_sub6L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r11 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r11, 16(%rdi) - sbbq %r15, %r10 - movq %r10, 24(%rdi) - sbbq %r12, %r9 - movq %r9, 32(%rdi) - sbbq %r14, %r8 - movq %r8, 40(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB91_2 -# BB#1: # %carry - movq 40(%rcx), %r14 - movq 32(%rcx), %r15 - movq 24(%rcx), %r12 - movq 8(%rcx), %rbx - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rdx - movq %rdx, 16(%rdi) - adcq %r10, %r12 - movq %r12, 24(%rdi) - adcq %r9, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r14 - movq %r14, 40(%rdi) -.LBB91_2: # %nocarry - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end91: - .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L - - .globl mcl_fp_subNF6L - .align 16, 0x90 - .type mcl_fp_subNF6L,@function -mcl_fp_subNF6L: # @mcl_fp_subNF6L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rsi), %r15 - movq 32(%rsi), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %r14 - subq (%rdx), %r11 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r10 - sbbq 24(%rdx), %r9 - sbbq 32(%rdx), %r8 - sbbq 40(%rdx), %r15 - movq %r15, %rdx - sarq $63, %rdx - movq %rdx, %rbx - addq %rbx, %rbx - movq %rdx, %rsi - adcq %rsi, %rsi - andq 8(%rcx), %rsi - movq %r15, %rax - shrq $63, %rax - orq %rbx, %rax - andq (%rcx), %rax - movq 40(%rcx), %r12 - andq %rdx, %r12 - movq 32(%rcx), %r13 - andq %rdx, %r13 - movq 24(%rcx), %rbx - andq %rdx, %rbx - andq 16(%rcx), %rdx - addq %r11, %rax - movq %rax, (%rdi) - adcq %r14, %rsi - movq %rsi, 8(%rdi) - adcq %r10, %rdx - movq %rdx, 16(%rdi) - adcq %r9, %rbx - movq %rbx, 24(%rdi) - adcq %r8, %r13 - movq %r13, 32(%rdi) - adcq %r15, %r12 - movq %r12, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end92: - .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L - - .globl mcl_fpDbl_add6L - .align 16, 0x90 - .type mcl_fpDbl_add6L,@function -mcl_fpDbl_add6L: # @mcl_fpDbl_add6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 80(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 72(%rdx), %r14 - movq 64(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rdx - movq %rbx, (%rdi) - movq 88(%rsi), %r8 - movq %rax, 8(%rdi) - movq 80(%rsi), %r10 - movq %r12, 16(%rdi) - movq 72(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 40(%rsi), %rax - adcq %rdx, %rax - movq 64(%rsi), %rdx - movq %r13, 32(%rdi) - movq 56(%rsi), %r13 - movq 48(%rsi), %rbp - adcq %r9, %rbp - movq %rax, 40(%rdi) - adcq %r11, %r13 - adcq %r15, %rdx - adcq %r14, %r12 - adcq -16(%rsp), %r10 # 8-byte Folded Reload - adcq -8(%rsp), %r8 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rbp, %rsi - subq (%rcx), %rsi - movq %r13, %rbx - sbbq 8(%rcx), %rbx - movq %rdx, %r9 - sbbq 16(%rcx), %r9 - movq %r12, %r11 - sbbq 24(%rcx), %r11 - movq %r10, %r14 - sbbq 32(%rcx), %r14 - movq %r8, %r15 - sbbq 40(%rcx), %r15 - sbbq $0, %rax - andl $1, %eax - cmovneq %rbp, %rsi - movq %rsi, 48(%rdi) - testb %al, %al - cmovneq %r13, %rbx - movq %rbx, 56(%rdi) - cmovneq %rdx, %r9 - movq %r9, 64(%rdi) - cmovneq %r12, %r11 - movq %r11, 72(%rdi) - cmovneq %r10, %r14 - movq %r14, 80(%rdi) - cmovneq %r8, %r15 - movq %r15, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end93: - .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L - - .globl mcl_fpDbl_sub6L - .align 16, 0x90 - .type mcl_fpDbl_sub6L,@function -mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %r9 - movq 80(%rdx), %r10 - movq 72(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 64(%rdx), %r13 - movq %r15, (%rdi) - movq 56(%rdx), %rbp - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 88(%rsi), %r8 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 80(%rsi), %r11 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - sbbq %r15, %rdx - movq 72(%rsi), %r15 - movq %rbx, 40(%rdi) - movq 64(%rsi), %r12 - movq 56(%rsi), %rsi - sbbq %rbp, %rsi - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%rcx), %r14 - cmoveq %rax, %r14 - testb %bpl, %bpl - movq 16(%rcx), %r9 - cmoveq %rax, %r9 - movq 8(%rcx), %rbp - cmoveq %rax, %rbp - movq 40(%rcx), %r10 - cmoveq %rax, %r10 - movq 32(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 24(%rcx), %rax - addq %rdx, %r14 - movq %r14, 48(%rdi) - adcq %rsi, %rbp - movq %rbp, 56(%rdi) - adcq %r12, %r9 - movq %r9, 64(%rdi) - adcq %r15, %rax - movq %rax, 72(%rdi) - adcq %r11, %rbx - movq %rbx, 80(%rdi) - adcq %r8, %r10 - movq %r10, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end94: - .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L - - .globl mcl_fp_mulUnitPre7L - .align 16, 0x90 - .type mcl_fp_mulUnitPre7L,@function -mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 48(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r11 - movq %rax, -16(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r13 - movq %rax, %r12 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %rbx - movq %rax, %rbp - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %rbp, %r8 - movq %r8, 16(%rdi) - adcq %r12, %rbx - movq %rbx, 24(%rdi) - adcq %r14, %r13 - movq %r13, 32(%rdi) - adcq -16(%rsp), %r15 # 8-byte Folded Reload - movq %r15, 40(%rdi) - adcq -8(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 48(%rdi) - adcq $0, %r10 - movq %r10, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end95: - .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L - - .globl mcl_fpDbl_mulPre7L - .align 16, 0x90 - .type mcl_fpDbl_mulPre7L,@function -mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $24, %rsp - movq %rdx, 8(%rsp) # 8-byte Spill - movq %rsi, %r9 - movq %rdi, 16(%rsp) # 8-byte Spill - movq (%r9), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 8(%r9), %r10 - movq %r10, -64(%rsp) # 8-byte Spill - movq (%rdx), %rsi - mulq %rsi - movq %rdx, -32(%rsp) # 8-byte Spill - movq 16(%r9), %r11 - movq %r11, -72(%rsp) # 8-byte Spill - movq 24(%r9), %rbx - movq %rbx, -56(%rsp) # 8-byte Spill - movq 32(%r9), %rbp - movq %rbp, -24(%rsp) # 8-byte Spill - movq 40(%r9), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - movq 48(%r9), %r14 - movq %rax, (%rdi) - movq %r14, %rax - mulq %rsi - movq %rdx, %rdi - movq %rax, (%rsp) # 8-byte Spill - movq %rcx, %rax - mulq %rsi - movq %rdx, %rcx - movq %rax, -40(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rsi - movq %rdx, %rbp - movq %rax, %r15 - movq %rbx, %rax - mulq %rsi - movq %rdx, %rbx - movq %rax, %r8 - movq %r11, %rax - mulq %rsi - movq %rdx, %r12 - movq %rax, %r13 - movq %r10, %rax - mulq %rsi - movq %rdx, %rsi - movq %rax, %r10 - addq -32(%rsp), %r10 # 8-byte Folded Reload - adcq %r13, %rsi - adcq %r8, %r12 - adcq %r15, %rbx - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, -48(%rsp) # 8-byte Spill - adcq (%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -40(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -32(%rsp) # 8-byte Spill - movq 8(%rsp), %r11 # 8-byte Reload - movq 8(%r11), %rcx - movq %r14, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, (%rsp) # 8-byte Spill - movq -16(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %r8 - movq -24(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r13 - movq -56(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -72(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %rbp - movq -64(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %rdi - movq -8(%rsp), %rax # 8-byte Reload - mulq %rcx - addq %r10, %rax - movq 16(%rsp), %r10 # 8-byte Reload - movq %rax, 8(%r10) - adcq %rsi, %rdi - adcq %r12, %rbp - adcq %rbx, %r15 - adcq -48(%rsp), %r13 # 8-byte Folded Reload - movq %r8, %rcx - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq (%rsp), %rax # 8-byte Reload - adcq -32(%rsp), %rax # 8-byte Folded Reload - sbbq %r8, %r8 - andl $1, %r8d - addq %rdx, %rdi - adcq -64(%rsp), %rbp # 8-byte Folded Reload - adcq -72(%rsp), %r15 # 8-byte Folded Reload - adcq -56(%rsp), %r13 # 8-byte Folded Reload - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -88(%rsp) # 8-byte Spill - adcq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, (%rsp) # 8-byte Spill - adcq %r14, %r8 - movq 48(%r9), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 16(%r11), %rcx - mulq %rcx - movq %rax, -96(%rsp) # 8-byte Spill - movq %rdx, -40(%rsp) # 8-byte Spill - movq 40(%r9), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdx, -72(%rsp) # 8-byte Spill - movq 32(%r9), %rax - movq %rax, -32(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r12 - movq %rdx, -80(%rsp) # 8-byte Spill - movq 24(%r9), %rax - movq %rax, -48(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r14 - movq %rdx, -104(%rsp) # 8-byte Spill - movq 16(%r9), %rax - movq %rax, -56(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %rbx - movq %rdx, -112(%rsp) # 8-byte Spill - movq (%r9), %rsi - movq 8(%r9), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rcx - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %rsi, %rax - mulq %rcx - addq %rdi, %rax - movq %rax, 16(%r10) - adcq %rbp, %r11 - adcq %r15, %rbx - adcq %r13, %r14 - adcq -88(%rsp), %r12 # 8-byte Folded Reload - movq -16(%rsp), %rdi # 8-byte Reload - adcq (%rsp), %rdi # 8-byte Folded Reload - movq -96(%rsp), %rax # 8-byte Reload - adcq %r8, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %r11 - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -104(%rsp), %r12 # 8-byte Folded Reload - adcq -80(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -16(%rsp) # 8-byte Spill - adcq -72(%rsp), %rax # 8-byte Folded Reload - movq %rax, %rdi - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq 8(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rbp - movq -8(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, (%rsp) # 8-byte Spill - movq -24(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, -8(%rsp) # 8-byte Spill - movq -32(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r13 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -56(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r8 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rbp - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %rsi, %rax - mulq %rbp - addq %r11, %rax - movq 16(%rsp), %rsi # 8-byte Reload - movq %rax, 24(%rsi) - adcq %rbx, %r10 - adcq %r14, %r8 - adcq %r12, %r15 - adcq -16(%rsp), %r13 # 8-byte Folded Reload - movq -8(%rsp), %rsi # 8-byte Reload - adcq %rdi, %rsi - movq (%rsp), %rax # 8-byte Reload - adcq %rcx, %rax - sbbq %rdi, %rdi - andl $1, %edi - addq %rdx, %r10 - adcq -64(%rsp), %r8 # 8-byte Folded Reload - adcq -56(%rsp), %r15 # 8-byte Folded Reload - adcq -48(%rsp), %r13 # 8-byte Folded Reload - adcq -32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -8(%rsp) # 8-byte Spill - adcq -24(%rsp), %rax # 8-byte Folded Reload - movq %rax, (%rsp) # 8-byte Spill - adcq -40(%rsp), %rdi # 8-byte Folded Reload - movq 48(%r9), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 8(%rsp), %rbx # 8-byte Reload - movq 32(%rbx), %rcx - mulq %rcx - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdx, -56(%rsp) # 8-byte Spill - movq 40(%r9), %rax - movq %rax, -40(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, -24(%rsp) # 8-byte Spill - movq %rdx, -96(%rsp) # 8-byte Spill - movq 32(%r9), %rax - movq %rax, -48(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r12 - movq %rdx, -104(%rsp) # 8-byte Spill - movq 24(%r9), %rax - movq %rax, -64(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %rbp - movq %rdx, -112(%rsp) # 8-byte Spill - movq 16(%r9), %rax - movq %rax, -72(%rsp) # 8-byte Spill - mulq %rcx - movq %rax, %r14 - movq %rdx, -120(%rsp) # 8-byte Spill - movq (%r9), %rsi - movq %rsi, -80(%rsp) # 8-byte Spill - movq 8(%r9), %rax - movq %rax, -88(%rsp) # 8-byte Spill - mulq %rcx - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %rsi, %rax - mulq %rcx - addq %r10, %rax - movq 16(%rsp), %rcx # 8-byte Reload - movq %rax, 32(%rcx) - adcq %r8, %r11 - adcq %r15, %r14 - adcq %r13, %rbp - adcq -8(%rsp), %r12 # 8-byte Folded Reload - movq -24(%rsp), %rcx # 8-byte Reload - adcq (%rsp), %rcx # 8-byte Folded Reload - movq -16(%rsp), %rax # 8-byte Reload - adcq %rdi, %rax - sbbq %r13, %r13 - andl $1, %r13d - addq %rdx, %r11 - adcq -128(%rsp), %r14 # 8-byte Folded Reload - adcq -120(%rsp), %rbp # 8-byte Folded Reload - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -24(%rsp) # 8-byte Spill - adcq -96(%rsp), %rax # 8-byte Folded Reload - movq %rax, -16(%rsp) # 8-byte Spill - adcq -56(%rsp), %r13 # 8-byte Folded Reload - movq 40(%rbx), %rcx - movq -32(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %rdi - movq -40(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, %r10 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %rbx - movq -72(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %rsi - movq -88(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r8 - movq -80(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -96(%rsp) # 8-byte Spill - addq %r11, %rax - movq 16(%rsp), %rcx # 8-byte Reload - movq %rax, 40(%rcx) - adcq %r14, %r8 - adcq %rbp, %rsi - adcq %r12, %rbx - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq -16(%rsp), %r10 # 8-byte Folded Reload - adcq %r13, %rdi - movq 8(%rsp), %rax # 8-byte Reload - movq 48(%rax), %r11 - sbbq %rcx, %rcx - movq %r11, %rax - mulq 48(%r9) - movq %rdx, 8(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %r11, %rax - mulq 40(%r9) - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %r11, %rax - mulq 32(%r9) - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %r11, %rax - mulq 24(%r9) - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r11, %rax - mulq 16(%r9) - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %r11, %rax - mulq 8(%r9) - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %r11, %rax - mulq (%r9) - andl $1, %ecx - addq -96(%rsp), %r8 # 8-byte Folded Reload - adcq -72(%rsp), %rsi # 8-byte Folded Reload - adcq -48(%rsp), %rbx # 8-byte Folded Reload - adcq -40(%rsp), %r15 # 8-byte Folded Reload - adcq -32(%rsp), %r10 # 8-byte Folded Reload - adcq -8(%rsp), %rdi # 8-byte Folded Reload - adcq (%rsp), %rcx # 8-byte Folded Reload - addq %rax, %r8 - movq 16(%rsp), %r9 # 8-byte Reload - movq %r8, 48(%r9) - adcq %r12, %rsi - adcq %r14, %rbx - adcq %rbp, %r15 - adcq %r13, %r10 - adcq -88(%rsp), %rdi # 8-byte Folded Reload - adcq -64(%rsp), %rcx # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rsi - adcq -104(%rsp), %rbx # 8-byte Folded Reload - movq %r9, %rdx - movq %rsi, 56(%rdx) - movq %rbx, 64(%rdx) - adcq -80(%rsp), %r15 # 8-byte Folded Reload - movq %r15, 72(%rdx) - adcq -56(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 80(%rdx) - adcq -24(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 88(%rdx) - adcq -16(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 96(%rdx) - adcq 8(%rsp), %rax # 8-byte Folded Reload - movq %rax, 104(%rdx) - addq $24, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end96: - .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L - - .globl mcl_fpDbl_sqrPre7L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre7L,@function -mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $16, %rsp - movq %rdi, 8(%rsp) # 8-byte Spill - movq 16(%rsi), %r11 - movq %r11, -64(%rsp) # 8-byte Spill - movq 24(%rsi), %r14 - movq %r14, -48(%rsp) # 8-byte Spill - movq 32(%rsi), %r9 - movq %r9, -24(%rsp) # 8-byte Spill - movq 40(%rsi), %r10 - movq %r10, -16(%rsp) # 8-byte Spill - movq 48(%rsi), %r8 - movq (%rsi), %rbp - movq 8(%rsi), %rbx - movq %rbp, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, (%rdi) - movq %r8, %rax - mulq %rbp - movq %rdx, %r15 - movq %rax, (%rsp) # 8-byte Spill - movq %r10, %rax - mulq %rbp - movq %rdx, %rdi - movq %rax, -32(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %rbp - movq %rdx, %r9 - movq %rax, %r10 - movq %r14, %rax - mulq %rbp - movq %rdx, %r13 - movq %rax, %r14 - movq %r11, %rax - mulq %rbp - movq %rdx, %r12 - movq %rax, %r11 - movq %rbx, %rax - mulq %rbp - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - addq %rax, %rcx - adcq %rdx, %r11 - adcq %r14, %r12 - adcq %r10, %r13 - adcq -32(%rsp), %r9 # 8-byte Folded Reload - adcq (%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -40(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, -32(%rsp) # 8-byte Spill - movq %r8, %rax - mulq %rbx - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %rdi - movq -16(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -24(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %r10 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %r14 - movq -64(%rsp), %rax # 8-byte Reload - mulq %rbx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rbx, %rax - mulq %rbx - movq %rax, %rbx - addq -56(%rsp), %rcx # 8-byte Folded Reload - movq 8(%rsp), %rax # 8-byte Reload - movq %rcx, 8(%rax) - adcq %r11, %rbx - adcq %r12, %rbp - adcq %r13, %r14 - adcq %r9, %r10 - adcq -40(%rsp), %r15 # 8-byte Folded Reload - adcq -32(%rsp), %rdi # 8-byte Folded Reload - sbbq %r8, %r8 - andl $1, %r8d - addq -8(%rsp), %rbx # 8-byte Folded Reload - adcq %rdx, %rbp - adcq -64(%rsp), %r14 # 8-byte Folded Reload - adcq -48(%rsp), %r10 # 8-byte Folded Reload - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq -16(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -72(%rsp) # 8-byte Spill - adcq (%rsp), %r8 # 8-byte Folded Reload - movq 48(%rsi), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 16(%rsi), %rdi - mulq %rdi - movq %rax, (%rsp) # 8-byte Spill - movq %rdx, -40(%rsp) # 8-byte Spill - movq 40(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, -88(%rsp) # 8-byte Spill - movq %rdx, -56(%rsp) # 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -32(%rsp) # 8-byte Spill - mulq %rdi - movq %rax, %r13 - movq %rdx, -64(%rsp) # 8-byte Spill - movq 24(%rsi), %rcx - movq %rcx, %rax - mulq %rdi - movq %rax, %r9 - movq %r9, -104(%rsp) # 8-byte Spill - movq %rdx, -16(%rsp) # 8-byte Spill - movq (%rsi), %r12 - movq %r12, -48(%rsp) # 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulq %rdi - movq %rdx, -96(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %r12, %rax - mulq %rdi - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %rdi, %rax - mulq %rdi - movq %rax, %rdi - addq %rbx, %r12 - movq 8(%rsp), %rax # 8-byte Reload - movq %r12, 16(%rax) - adcq %rbp, %r11 - adcq %r14, %rdi - adcq %r9, %r10 - adcq %r15, %r13 - movq -88(%rsp), %r14 # 8-byte Reload - adcq -72(%rsp), %r14 # 8-byte Folded Reload - movq (%rsp), %rax # 8-byte Reload - adcq %r8, %rax - sbbq %rbx, %rbx - andl $1, %ebx - addq -112(%rsp), %r11 # 8-byte Folded Reload - adcq -96(%rsp), %rdi # 8-byte Folded Reload - adcq %rdx, %r10 - adcq -16(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %r14 # 8-byte Folded Reload - adcq -56(%rsp), %rax # 8-byte Folded Reload - movq %rax, (%rsp) # 8-byte Spill - adcq -40(%rsp), %rbx # 8-byte Folded Reload - movq -8(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r8 - movq -24(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -8(%rsp) # 8-byte Spill - movq -32(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r9 - movq -80(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -48(%rsp), %rax # 8-byte Reload - mulq %rcx - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq %rcx - movq %rax, %r12 - movq %rdx, -96(%rsp) # 8-byte Spill - addq %r11, %rbp - movq 8(%rsp), %rax # 8-byte Reload - movq %rbp, 24(%rax) - adcq %rdi, %r15 - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq %r13, %r12 - movq %r9, %rcx - adcq %r14, %rcx - movq -8(%rsp), %rdi # 8-byte Reload - adcq (%rsp), %rdi # 8-byte Folded Reload - adcq %rbx, %r8 - sbbq %r14, %r14 - andl $1, %r14d - movq (%rsi), %r9 - movq 8(%rsi), %rbp - movq 40(%rsi), %r11 - movq %rbp, %rax - mulq %r11 - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, -40(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %r11 - movq %rax, -48(%rsp) # 8-byte Spill - movq %rdx, -32(%rsp) # 8-byte Spill - movq 32(%rsi), %rbx - movq %rbp, %rax - mulq %rbx - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %rbx - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdx, -104(%rsp) # 8-byte Spill - addq -88(%rsp), %r15 # 8-byte Folded Reload - adcq -80(%rsp), %r10 # 8-byte Folded Reload - adcq -16(%rsp), %r12 # 8-byte Folded Reload - adcq -96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -96(%rsp) # 8-byte Spill - adcq -72(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -8(%rsp) # 8-byte Spill - adcq -64(%rsp), %r8 # 8-byte Folded Reload - adcq -56(%rsp), %r14 # 8-byte Folded Reload - movq 48(%rsi), %rax - movq %rax, -56(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %rcx - movq %r11, %rax - mulq %rbx - movq %rax, %rbp - movq %rdx, -16(%rsp) # 8-byte Spill - movq 24(%rsi), %rax - movq %rax, -72(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, %rdi - movq %rdx, -88(%rsp) # 8-byte Spill - movq 16(%rsi), %rax - movq %rax, -80(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %rbx, %rax - mulq %rbx - movq %rax, %r13 - addq -120(%rsp), %r15 # 8-byte Folded Reload - movq 8(%rsp), %rax # 8-byte Reload - movq %r15, 32(%rax) - adcq -112(%rsp), %r10 # 8-byte Folded Reload - adcq %r12, %r9 - adcq -96(%rsp), %rdi # 8-byte Folded Reload - adcq -8(%rsp), %r13 # 8-byte Folded Reload - adcq %rbp, %r8 - adcq %r14, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq -104(%rsp), %r10 # 8-byte Folded Reload - adcq (%rsp), %r9 # 8-byte Folded Reload - adcq -128(%rsp), %rdi # 8-byte Folded Reload - adcq -88(%rsp), %r13 # 8-byte Folded Reload - adcq %rdx, %r8 - adcq -16(%rsp), %rcx # 8-byte Folded Reload - adcq -64(%rsp), %rbx # 8-byte Folded Reload - movq -56(%rsp), %rax # 8-byte Reload - mulq %r11 - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, (%rsp) # 8-byte Spill - movq -72(%rsp), %rax # 8-byte Reload - mulq %r11 - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, %r15 - movq -80(%rsp), %rax # 8-byte Reload - mulq %r11 - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %r11, %rax - mulq %r11 - movq %rax, %r12 - addq -48(%rsp), %r10 # 8-byte Folded Reload - movq 8(%rsp), %rax # 8-byte Reload - movq %r10, 40(%rax) - adcq -40(%rsp), %r9 # 8-byte Folded Reload - adcq %rdi, %r14 - adcq %r13, %r15 - adcq %rbp, %r8 - adcq %rcx, %r12 - movq (%rsp), %rax # 8-byte Reload - adcq %rbx, %rax - sbbq %r11, %r11 - andl $1, %r11d - addq -32(%rsp), %r9 # 8-byte Folded Reload - adcq -24(%rsp), %r14 # 8-byte Folded Reload - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq -56(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -32(%rsp) # 8-byte Spill - adcq -16(%rsp), %r12 # 8-byte Folded Reload - adcq %rdx, %rax - movq %rax, (%rsp) # 8-byte Spill - adcq -8(%rsp), %r11 # 8-byte Folded Reload - movq 48(%rsi), %rcx - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, -8(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %rbx - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %r10 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rcx, %rax - mulq (%rsi) - movq %rdx, %r13 - movq %rax, %rsi - movq %rcx, %rax - mulq %rcx - addq %r9, %rsi - movq 8(%rsp), %r9 # 8-byte Reload - movq %rsi, 48(%r9) - adcq %r14, %rdi - adcq %r15, %r10 - adcq -32(%rsp), %rbp # 8-byte Folded Reload - adcq %r12, %rbx - adcq (%rsp), %r8 # 8-byte Folded Reload - adcq %r11, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r13, %rdi - adcq -48(%rsp), %r10 # 8-byte Folded Reload - movq %r9, %rsi - movq %rdi, 56(%rsi) - movq %r10, 64(%rsi) - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 72(%rsi) - adcq -24(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 80(%rsi) - adcq -16(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 88(%rsi) - adcq -8(%rsp), %rax # 8-byte Folded Reload - movq %rax, 96(%rsi) - adcq %rdx, %rcx - movq %rcx, 104(%rsi) - addq $16, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L - - .globl mcl_fp_mont7L - .align 16, 0x90 - .type mcl_fp_mont7L,@function -mcl_fp_mont7L: # @mcl_fp_mont7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $96, %rsp - movq %rdx, 24(%rsp) # 8-byte Spill - movq %rdi, -96(%rsp) # 8-byte Spill - movq 48(%rsi), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rax, 88(%rsp) # 8-byte Spill - movq %rdx, %r15 - movq 40(%rsi), %rax - movq %rax, 8(%rsp) # 8-byte Spill - mulq %rbx - movq %rax, 80(%rsp) # 8-byte Spill - movq %rdx, %r12 - movq 32(%rsi), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 24(%rsi), %r9 - movq %r9, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %r10 - movq %r10, -16(%rsp) # 8-byte Spill - movq (%rsi), %r13 - movq %r13, (%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -8(%rsp) # 8-byte Spill - mulq %rbx - movq %rdx, %r14 - movq %rax, %r8 - movq %r9, %rax - mulq %rbx - movq %rdx, %rdi - movq %rax, %r9 - movq %r10, %rax - mulq %rbx - movq %rdx, %rbp - movq %rax, %r10 - movq %rsi, %rax - mulq %rbx - movq %rdx, %rsi - movq %rax, %r11 - movq %r13, %rax - mulq %rbx - movq %rax, -112(%rsp) # 8-byte Spill - addq %r11, %rdx - movq %rdx, -104(%rsp) # 8-byte Spill - adcq %r10, %rsi - movq %rsi, -88(%rsp) # 8-byte Spill - adcq %r9, %rbp - movq %rbp, -80(%rsp) # 8-byte Spill - adcq %r8, %rdi - movq %rdi, -72(%rsp) # 8-byte Spill - adcq 80(%rsp), %r14 # 8-byte Folded Reload - movq %r14, -64(%rsp) # 8-byte Spill - adcq 88(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -48(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, -40(%rsp) # 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, 32(%rsp) # 8-byte Spill - movq %rax, %rdi - imulq %rdx, %rdi - movq (%rcx), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - movq 48(%rcx), %rdx - movq %rdx, 64(%rsp) # 8-byte Spill - movq 40(%rcx), %r9 - movq %r9, 88(%rsp) # 8-byte Spill - movq 32(%rcx), %rbx - movq %rbx, 80(%rsp) # 8-byte Spill - movq 24(%rcx), %rsi - movq %rsi, 72(%rsp) # 8-byte Spill - movq 16(%rcx), %rbp - movq %rbp, 48(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %r9 - movq %rdx, %r14 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq %rbx - movq %rdx, %r11 - movq %rax, %r15 - movq %rdi, %rax - mulq %rsi - movq %rdx, %rbx - movq %rax, %r10 - movq %rdi, %rax - mulq %rbp - movq %rdx, %r8 - movq %rax, %r13 - movq %rdi, %rax - mulq %rcx - movq %rdx, %rbp - movq %rax, %r9 - movq %rdi, %rax - mulq %r12 - movq %rdx, %r12 - addq %r9, %r12 - adcq %r13, %rbp - adcq %r10, %r8 - adcq %r15, %rbx - adcq -128(%rsp), %r11 # 8-byte Folded Reload - adcq -120(%rsp), %r14 # 8-byte Folded Reload - movq -56(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq -112(%rsp), %rax # 8-byte Folded Reload - adcq -104(%rsp), %r12 # 8-byte Folded Reload - adcq -88(%rsp), %rbp # 8-byte Folded Reload - adcq -80(%rsp), %r8 # 8-byte Folded Reload - adcq -72(%rsp), %rbx # 8-byte Folded Reload - adcq -64(%rsp), %r11 # 8-byte Folded Reload - adcq -48(%rsp), %r14 # 8-byte Folded Reload - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -56(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -72(%rsp) # 8-byte Spill - movq 24(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r13 - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r10 - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - movq %rdx, %rdi - addq %r10, %rdi - adcq %r13, %r15 - adcq -112(%rsp), %r9 # 8-byte Folded Reload - movq %rcx, %rdx - adcq -104(%rsp), %rdx # 8-byte Folded Reload - adcq -88(%rsp), %rsi # 8-byte Folded Reload - movq -48(%rsp), %rax # 8-byte Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - movq -40(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - movq -64(%rsp), %r10 # 8-byte Reload - addq %r12, %r10 - movq %r10, -64(%rsp) # 8-byte Spill - adcq %rbp, %rdi - adcq %r8, %r15 - adcq %rbx, %r9 - adcq %r11, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - adcq %r14, %rsi - movq %rsi, -80(%rsp) # 8-byte Spill - adcq -56(%rsp), %rax # 8-byte Folded Reload - movq %rax, -48(%rsp) # 8-byte Spill - adcq -72(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -40(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -56(%rsp) # 8-byte Spill - movq %r10, %rbp - imulq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r10 - movq %rbp, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r14 - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - addq %r14, %r11 - adcq %r10, %rsi - adcq %rbx, %rcx - adcq -120(%rsp), %r13 # 8-byte Folded Reload - adcq -112(%rsp), %r12 # 8-byte Folded Reload - adcq -104(%rsp), %r8 # 8-byte Folded Reload - movq -72(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - addq -64(%rsp), %rax # 8-byte Folded Reload - adcq %rdi, %r11 - adcq %r15, %rsi - adcq %r9, %rcx - adcq -88(%rsp), %r13 # 8-byte Folded Reload - adcq -80(%rsp), %r12 # 8-byte Folded Reload - adcq -48(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -80(%rsp) # 8-byte Spill - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, -72(%rsp) # 8-byte Spill - movq -56(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - movq 24(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r9 - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r14 - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r8 - addq %r14, %r8 - adcq %r9, %rbx - adcq -120(%rsp), %r15 # 8-byte Folded Reload - movq -64(%rsp), %r9 # 8-byte Reload - adcq -112(%rsp), %r9 # 8-byte Folded Reload - movq -56(%rsp), %rdi # 8-byte Reload - adcq -104(%rsp), %rdi # 8-byte Folded Reload - movq -48(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r11, %r10 - adcq %rsi, %r8 - adcq %rcx, %rbx - adcq %r13, %r15 - adcq %r12, %r9 - movq %r9, -64(%rsp) # 8-byte Spill - adcq -80(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -56(%rsp) # 8-byte Spill - adcq -72(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %rbp, %rax - movq %rax, -40(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r10, %rbp - imulq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rcx - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r13 - movq %rbp, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r12 - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - addq %r12, %r14 - adcq %r13, %rsi - adcq %rcx, %rdi - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq -72(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %r10, %rax - adcq %r8, %r14 - adcq %rbx, %rsi - adcq %r15, %rdi - adcq -64(%rsp), %r9 # 8-byte Folded Reload - adcq -56(%rsp), %r11 # 8-byte Folded Reload - adcq -48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -72(%rsp) # 8-byte Spill - adcq $0, -88(%rsp) # 8-byte Folded Spill - movq 24(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rcx - movq %rcx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rcx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r12 - movq %rcx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r13 - addq %r12, %r13 - adcq %r8, %rbp - adcq -120(%rsp), %rbx # 8-byte Folded Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - movq -56(%rsp), %rdx # 8-byte Reload - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq -48(%rsp), %rcx # 8-byte Reload - adcq -64(%rsp), %rcx # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r14, %r10 - adcq %rsi, %r13 - adcq %rdi, %rbp - adcq %r9, %rbx - adcq %r11, %r15 - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - adcq -72(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -48(%rsp) # 8-byte Spill - adcq -88(%rsp), %rax # 8-byte Folded Reload - movq %rax, -40(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -64(%rsp) # 8-byte Spill - movq %r10, %rsi - imulq 32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r9 - movq %rsi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - addq %r11, %r14 - adcq %r9, %r8 - adcq -120(%rsp), %rcx # 8-byte Folded Reload - adcq -112(%rsp), %rdi # 8-byte Folded Reload - adcq -104(%rsp), %r12 # 8-byte Folded Reload - movq -80(%rsp), %rsi # 8-byte Reload - adcq -88(%rsp), %rsi # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %r13, %r14 - adcq %rbp, %r8 - adcq %rbx, %rcx - adcq %r15, %rdi - adcq -56(%rsp), %r12 # 8-byte Folded Reload - adcq -48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -80(%rsp) # 8-byte Spill - adcq -40(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq -64(%rsp), %r11 # 8-byte Reload - adcq $0, %r11 - movq 24(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rbp - movq %rbp, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rbp, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r15 - movq %rbp, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rax, %rsi - movq %rdx, %r10 - addq %r15, %r10 - adcq %r9, %r13 - adcq -120(%rsp), %rbx # 8-byte Folded Reload - movq -64(%rsp), %r15 # 8-byte Reload - adcq -112(%rsp), %r15 # 8-byte Folded Reload - movq -56(%rsp), %rbp # 8-byte Reload - adcq -104(%rsp), %rbp # 8-byte Folded Reload - movq -48(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - adcq $0, %rax - movq %rsi, %r9 - addq %r14, %r9 - adcq %r8, %r10 - adcq %rcx, %r13 - adcq %rdi, %rbx - adcq %r12, %r15 - movq %r15, -64(%rsp) # 8-byte Spill - adcq -80(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, -56(%rsp) # 8-byte Spill - adcq -72(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - adcq %r11, %rax - movq %rax, -40(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) # 8-byte Spill - movq %r9, %rsi - movq %r9, %r11 - imulq 32(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rdi - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r12 - movq %rsi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r15 - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - addq %r15, %r14 - adcq %r12, %rcx - adcq %rdi, %rbp - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %r8 # 8-byte Folded Reload - movq -80(%rsp), %rsi # 8-byte Reload - adcq -104(%rsp), %rsi # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r11, %rax - adcq %r10, %r14 - adcq %r13, %rcx - adcq %rbx, %rbp - adcq -64(%rsp), %r9 # 8-byte Folded Reload - adcq -56(%rsp), %r8 # 8-byte Folded Reload - adcq -48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -80(%rsp) # 8-byte Spill - adcq -40(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq $0, -88(%rsp) # 8-byte Folded Spill - movq 24(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdi - movq %rdi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r10 - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r15 - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %r11 - addq %r13, %r11 - adcq %r15, %rsi - adcq %r10, %rbx - adcq -112(%rsp), %r12 # 8-byte Folded Reload - movq -56(%rsp), %r10 # 8-byte Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - movq -48(%rsp), %rdx # 8-byte Reload - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq -40(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %r14, %rdi - adcq %rcx, %r11 - adcq %rbp, %rsi - adcq %r9, %rbx - adcq %r8, %r12 - adcq -80(%rsp), %r10 # 8-byte Folded Reload - movq %r10, -56(%rsp) # 8-byte Spill - adcq -72(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - adcq -88(%rsp), %rax # 8-byte Folded Reload - movq %rax, -40(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -80(%rsp) # 8-byte Spill - movq %rdi, %rbp - imulq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r8 - movq %rbp, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r9 - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - addq %r9, %r15 - adcq %r8, %r13 - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -112(%rsp), %rcx # 8-byte Folded Reload - adcq -104(%rsp), %r14 # 8-byte Folded Reload - movq -72(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %r8 # 8-byte Reload - adcq $0, %r8 - addq %rdi, %rax - adcq %r11, %r15 - adcq %rsi, %r13 - adcq %rbx, %r10 - adcq %r12, %rcx - adcq -56(%rsp), %r14 # 8-byte Folded Reload - movq %r14, -56(%rsp) # 8-byte Spill - adcq -48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - adcq -40(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -64(%rsp) # 8-byte Spill - movq -80(%rsp), %rsi # 8-byte Reload - adcq $0, %rsi - movq 24(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdi - movq %rdi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rax, -40(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, 24(%rsp) # 8-byte Spill - movq %rax, 8(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -32(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r12 - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rbx - movq %rdi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rbp - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - addq %rbp, %rdx - movq %rdx, %rbp - adcq %rbx, %r9 - adcq %r12, %r14 - movq %r8, %rdi - adcq -32(%rsp), %rdi # 8-byte Folded Reload - adcq 8(%rsp), %r11 # 8-byte Folded Reload - movq 24(%rsp), %rbx # 8-byte Reload - adcq -40(%rsp), %rbx # 8-byte Folded Reload - movq 16(%rsp), %r8 # 8-byte Reload - adcq $0, %r8 - addq %r15, %rax - movq %rax, -32(%rsp) # 8-byte Spill - adcq %r13, %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - adcq %r10, %r9 - movq %r9, (%rsp) # 8-byte Spill - adcq %rcx, %r14 - movq %r14, -8(%rsp) # 8-byte Spill - adcq -56(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -16(%rsp) # 8-byte Spill - adcq -72(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -24(%rsp) # 8-byte Spill - adcq -64(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 24(%rsp) # 8-byte Spill - adcq %rsi, %r8 - movq %r8, 16(%rsp) # 8-byte Spill - sbbq %rcx, %rcx - movq 32(%rsp), %r10 # 8-byte Reload - imulq %rax, %r10 - andl $1, %ecx - movq %r10, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, 32(%rsp) # 8-byte Spill - movq %r10, %rax - mulq 88(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -40(%rsp) # 8-byte Spill - movq %r10, %rax - mulq 80(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, -48(%rsp) # 8-byte Spill - movq %r10, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -56(%rsp) # 8-byte Spill - movq %r10, %rax - movq 48(%rsp), %r13 # 8-byte Reload - mulq %r13 - movq %rdx, %rbp - movq %rax, %r12 - movq %r10, %rax - movq 40(%rsp), %r15 # 8-byte Reload - mulq %r15 - movq %rdx, %r11 - movq %rax, %r8 - movq %r10, %rax - movq 56(%rsp), %r14 # 8-byte Reload - mulq %r14 - addq %r11, %rax - adcq %r12, %rdx - adcq -56(%rsp), %rbp # 8-byte Folded Reload - adcq -48(%rsp), %rsi # 8-byte Folded Reload - adcq -40(%rsp), %rdi # 8-byte Folded Reload - adcq 32(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %rbx - addq -32(%rsp), %r8 # 8-byte Folded Reload - adcq 8(%rsp), %rax # 8-byte Folded Reload - adcq (%rsp), %rdx # 8-byte Folded Reload - adcq -8(%rsp), %rbp # 8-byte Folded Reload - adcq -16(%rsp), %rsi # 8-byte Folded Reload - adcq -24(%rsp), %rdi # 8-byte Folded Reload - adcq 24(%rsp), %r9 # 8-byte Folded Reload - adcq 16(%rsp), %rbx # 8-byte Folded Reload - adcq $0, %rcx - movq %rax, %r8 - subq %r15, %r8 - movq %rdx, %r10 - sbbq %r14, %r10 - movq %rbp, %r11 - sbbq %r13, %r11 - movq %rsi, %r14 - sbbq 72(%rsp), %r14 # 8-byte Folded Reload - movq %rdi, %r15 - sbbq 80(%rsp), %r15 # 8-byte Folded Reload - movq %r9, %r12 - sbbq 88(%rsp), %r12 # 8-byte Folded Reload - movq %rbx, %r13 - sbbq 64(%rsp), %r13 # 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rbx, %r13 - testb %cl, %cl - cmovneq %rax, %r8 - movq -96(%rsp), %rax # 8-byte Reload - movq %r8, (%rax) - cmovneq %rdx, %r10 - movq %r10, 8(%rax) - cmovneq %rbp, %r11 - movq %r11, 16(%rax) - cmovneq %rsi, %r14 - movq %r14, 24(%rax) - cmovneq %rdi, %r15 - movq %r15, 32(%rax) - cmovneq %r9, %r12 - movq %r12, 40(%rax) - movq %r13, 48(%rax) - addq $96, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end98: - .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L - - .globl mcl_fp_montNF7L - .align 16, 0x90 - .type mcl_fp_montNF7L,@function -mcl_fp_montNF7L: # @mcl_fp_montNF7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $80, %rsp - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rdi, -96(%rsp) # 8-byte Spill - movq 48(%rsi), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq (%rdx), %rbp - mulq %rbp - movq %rax, 72(%rsp) # 8-byte Spill - movq %rdx, %r9 - movq 40(%rsi), %rax - movq %rax, (%rsp) # 8-byte Spill - mulq %rbp - movq %rax, 64(%rsp) # 8-byte Spill - movq %rdx, %r11 - movq 32(%rsi), %rax - movq %rax, -48(%rsp) # 8-byte Spill - movq 24(%rsi), %r8 - movq %r8, -40(%rsp) # 8-byte Spill - movq 16(%rsi), %rbx - movq %rbx, -32(%rsp) # 8-byte Spill - movq (%rsi), %r10 - movq %r10, -16(%rsp) # 8-byte Spill - movq 8(%rsi), %rsi - movq %rsi, -24(%rsp) # 8-byte Spill - mulq %rbp - movq %rdx, %rdi - movq %rax, 56(%rsp) # 8-byte Spill - movq %r8, %rax - mulq %rbp - movq %rdx, %r14 - movq %rax, %r15 - movq %rbx, %rax - mulq %rbp - movq %rdx, %rbx - movq %rax, %r13 - movq %rsi, %rax - mulq %rbp - movq %rdx, %rsi - movq %rax, %r12 - movq %r10, %rax - mulq %rbp - movq %rdx, %r8 - addq %r12, %r8 - adcq %r13, %rsi - movq %rsi, -104(%rsp) # 8-byte Spill - adcq %r15, %rbx - movq %rbx, -88(%rsp) # 8-byte Spill - adcq 56(%rsp), %r14 # 8-byte Folded Reload - movq %r14, %r12 - adcq 64(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -80(%rsp) # 8-byte Spill - adcq 72(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -56(%rsp) # 8-byte Spill - adcq $0, %r9 - movq %r9, -64(%rsp) # 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, 24(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %rax, %r14 - imulq %rdx, %r9 - movq (%rcx), %r11 - movq %r11, 32(%rsp) # 8-byte Spill - movq 48(%rcx), %rdx - movq %rdx, 72(%rsp) # 8-byte Spill - movq 40(%rcx), %r10 - movq %r10, 64(%rsp) # 8-byte Spill - movq 32(%rcx), %rbp - movq %rbp, 56(%rsp) # 8-byte Spill - movq 24(%rcx), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - movq 16(%rcx), %rdi - movq %rdi, 40(%rsp) # 8-byte Spill - movq 8(%rcx), %rsi - movq %rsi, -8(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %rdx - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %r9, %rax - mulq %r10 - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %r9, %rax - mulq %rbp - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %r13 - movq %r9, %rax - mulq %rbx - movq %rdx, %rbx - movq %rax, %rbp - movq %r9, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %rdi - movq %r9, %rax - mulq %rsi - movq %rdx, %r10 - movq %rax, %rsi - movq %r9, %rax - mulq %r11 - addq %r14, %rax - adcq %r8, %rsi - adcq -104(%rsp), %rdi # 8-byte Folded Reload - adcq -88(%rsp), %rbp # 8-byte Folded Reload - adcq %r12, %r13 - adcq -80(%rsp), %r15 # 8-byte Folded Reload - movq -72(%rsp), %r8 # 8-byte Reload - adcq -56(%rsp), %r8 # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdx, %rsi - adcq %r10, %rdi - adcq %rcx, %rbp - adcq %rbx, %r13 - adcq -128(%rsp), %r15 # 8-byte Folded Reload - adcq -120(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -72(%rsp) # 8-byte Spill - adcq -112(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rbx - movq %rbx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rbx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r12 - addq %r14, %r12 - adcq -128(%rsp), %rcx # 8-byte Folded Reload - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %r8 # 8-byte Folded Reload - adcq -104(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq -88(%rsp), %rdx # 8-byte Folded Reload - movq -56(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rsi, %r10 - adcq %rdi, %r12 - adcq %rbp, %rcx - adcq %r13, %r9 - adcq %r15, %r8 - adcq -72(%rsp), %r11 # 8-byte Folded Reload - adcq -64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq %r10, %rbx - imulq 24(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %rbx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -128(%rsp) # 8-byte Spill - movq %rax, %rdi - movq %rbx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rbp - movq %rbx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rsi - movq %rbx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r10, %rax - adcq %r12, %rsi - adcq %rcx, %rbp - adcq %r9, %rdi - adcq %r8, %r14 - movq -72(%rsp), %rcx # 8-byte Reload - adcq %r11, %rcx - movq -64(%rsp), %r8 # 8-byte Reload - adcq -80(%rsp), %r8 # 8-byte Folded Reload - movq -56(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdx, %rsi - adcq %r13, %rbp - adcq %r15, %rdi - movq %rdi, -88(%rsp) # 8-byte Spill - adcq -128(%rsp), %r14 # 8-byte Folded Reload - movq %r14, -80(%rsp) # 8-byte Spill - adcq -120(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -72(%rsp) # 8-byte Spill - adcq -112(%rsp), %r8 # 8-byte Folded Reload - movq %r8, -64(%rsp) # 8-byte Spill - adcq -104(%rsp), %rax # 8-byte Folded Reload - movq %rax, -56(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rcx - movq %rcx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r11 - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rcx - addq %r11, %rcx - adcq %r9, %r15 - adcq %r12, %rbx - adcq -120(%rsp), %rdi # 8-byte Folded Reload - adcq -112(%rsp), %r14 # 8-byte Folded Reload - adcq -104(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r8 - addq %rsi, %r13 - adcq %rbp, %rcx - adcq -88(%rsp), %r15 # 8-byte Folded Reload - adcq -80(%rsp), %rbx # 8-byte Folded Reload - adcq -72(%rsp), %rdi # 8-byte Folded Reload - adcq -64(%rsp), %r14 # 8-byte Folded Reload - adcq -56(%rsp), %r10 # 8-byte Folded Reload - adcq $0, %r8 - movq %r13, %r9 - imulq 24(%rsp), %r9 # 8-byte Folded Reload - movq %r9, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %r9, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %r9, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %r9, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, %r11 - movq %r9, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %r9, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %r9, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r13, %rax - adcq %rcx, %rsi - adcq %r15, %r12 - adcq %rbx, %r11 - adcq %rdi, %rbp - movq -72(%rsp), %rcx # 8-byte Reload - adcq %r14, %rcx - movq -64(%rsp), %rax # 8-byte Reload - adcq %r10, %rax - adcq $0, %r8 - addq %rdx, %rsi - adcq -120(%rsp), %r12 # 8-byte Folded Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - adcq -104(%rsp), %rbp # 8-byte Folded Reload - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -72(%rsp) # 8-byte Spill - adcq -80(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - adcq -56(%rsp), %r8 # 8-byte Folded Reload - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdi - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rbx - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r9 - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %rdi - addq %r9, %rdi - adcq %rbx, %rcx - adcq -120(%rsp), %r10 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - adcq -104(%rsp), %r15 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq -80(%rsp), %rdx # 8-byte Folded Reload - movq -56(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rsi, %r14 - adcq %r12, %rdi - adcq %r11, %rcx - adcq %rbp, %r10 - adcq -72(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq %r8, %rdx - movq %rdx, -88(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -56(%rsp) # 8-byte Spill - movq %r14, %rsi - imulq 24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r8 - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rbp - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rbx - movq %rsi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r14, %rax - adcq %rdi, %rbx - adcq %rcx, %rbp - adcq %r10, %r8 - adcq %r13, %r12 - movq -80(%rsp), %rsi # 8-byte Reload - adcq %r15, %rsi - movq -72(%rsp), %rcx # 8-byte Reload - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq -56(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdx, %rbx - adcq %r9, %rbp - adcq %r11, %r8 - adcq -120(%rsp), %r12 # 8-byte Folded Reload - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -80(%rsp) # 8-byte Spill - adcq -104(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -72(%rsp) # 8-byte Spill - adcq -64(%rsp), %rax # 8-byte Folded Reload - movq %rax, -56(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rsi - movq %rsi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -104(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rsi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r15 - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %r10 - addq %r15, %r10 - adcq %r14, %rdi - adcq -128(%rsp), %rcx # 8-byte Folded Reload - adcq -120(%rsp), %r9 # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -88(%rsp), %rdx # 8-byte Reload - adcq -104(%rsp), %rdx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rbx, %r11 - adcq %rbp, %r10 - adcq %r8, %rdi - adcq %r12, %rcx - adcq -80(%rsp), %r9 # 8-byte Folded Reload - adcq -72(%rsp), %r13 # 8-byte Folded Reload - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -64(%rsp) # 8-byte Spill - movq %r11, %rsi - imulq 24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r14 - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rbp - movq %rsi, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %rbx - movq %rsi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r11, %rax - adcq %r10, %rbx - adcq %rdi, %rbp - adcq %rcx, %r12 - adcq %r9, %r14 - movq -72(%rsp), %rdi # 8-byte Reload - adcq %r13, %rdi - movq -56(%rsp), %rcx # 8-byte Reload - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq -64(%rsp), %rax # 8-byte Reload - adcq $0, %rax - addq %rdx, %rbx - adcq %r8, %rbp - adcq %r15, %r12 - adcq -120(%rsp), %r14 # 8-byte Folded Reload - movq %r14, -88(%rsp) # 8-byte Spill - adcq -112(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -72(%rsp) # 8-byte Spill - adcq -104(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -56(%rsp) # 8-byte Spill - adcq -80(%rsp), %rax # 8-byte Folded Reload - movq %rax, -64(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -104(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -128(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r15 - movq %rcx, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %r10 - addq %r14, %r10 - adcq %r15, %r8 - adcq -128(%rsp), %rdi # 8-byte Folded Reload - adcq -120(%rsp), %rsi # 8-byte Folded Reload - adcq -112(%rsp), %r13 # 8-byte Folded Reload - movq -80(%rsp), %rax # 8-byte Reload - adcq -104(%rsp), %rax # 8-byte Folded Reload - adcq $0, %r9 - addq %rbx, %r11 - adcq %rbp, %r10 - adcq %r12, %r8 - adcq -88(%rsp), %rdi # 8-byte Folded Reload - adcq -72(%rsp), %rsi # 8-byte Folded Reload - adcq -56(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %rax # 8-byte Folded Reload - movq %rax, -80(%rsp) # 8-byte Spill - adcq $0, %r9 - movq %r11, %rbx - imulq 24(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -56(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, -104(%rsp) # 8-byte Spill - movq %rax, -64(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -112(%rsp) # 8-byte Spill - movq %rax, %r12 - movq %rbx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rbx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, -120(%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rbx, %rax - mulq -8(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rcx - movq %rbx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r11, %rax - adcq %r10, %rcx - adcq %r8, %rbp - adcq %rdi, %r15 - adcq %rsi, %r12 - movq -64(%rsp), %rsi # 8-byte Reload - adcq %r13, %rsi - movq -56(%rsp), %rax # 8-byte Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - adcq $0, %r9 - addq %rdx, %rcx - adcq %r14, %rbp - adcq -120(%rsp), %r15 # 8-byte Folded Reload - adcq -72(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -72(%rsp) # 8-byte Spill - adcq -112(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -64(%rsp) # 8-byte Spill - adcq -104(%rsp), %rax # 8-byte Folded Reload - movq %rax, -56(%rsp) # 8-byte Spill - adcq -88(%rsp), %r9 # 8-byte Folded Reload - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdi - movq %rdi, %rax - mulq 8(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -80(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rax, (%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) # 8-byte Folded Reload - movq %rdx, 8(%rsp) # 8-byte Spill - movq %rax, -48(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -40(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -40(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) # 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rsi - movq %rdi, %rax - mulq -24(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rbx - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %r8 - addq %rbx, %r8 - adcq %rsi, %r10 - adcq -40(%rsp), %r11 # 8-byte Folded Reload - adcq -48(%rsp), %r13 # 8-byte Folded Reload - movq 8(%rsp), %rdx # 8-byte Reload - adcq (%rsp), %rdx # 8-byte Folded Reload - movq 16(%rsp), %rax # 8-byte Reload - adcq -80(%rsp), %rax # 8-byte Folded Reload - adcq $0, %r14 - addq %rcx, %r12 - adcq %rbp, %r8 - adcq %r15, %r10 - adcq -72(%rsp), %r11 # 8-byte Folded Reload - adcq -64(%rsp), %r13 # 8-byte Folded Reload - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 8(%rsp) # 8-byte Spill - adcq %r9, %rax - movq %rax, 16(%rsp) # 8-byte Spill - adcq $0, %r14 - movq 24(%rsp), %rdi # 8-byte Reload - imulq %r12, %rdi - movq %rdi, %rax - mulq 72(%rsp) # 8-byte Folded Reload - movq %rdx, 24(%rsp) # 8-byte Spill - movq %rax, %r9 - movq %rdi, %rax - mulq 64(%rsp) # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - movq %rax, %rbp - movq %rdi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rax, %rsi - movq %rdi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, %rcx - movq %rdi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, %r15 - movq %rdi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, %rbx - movq %rdi, %rax - movq -8(%rsp), %rdi # 8-byte Reload - mulq %rdi - addq %r12, %r15 - adcq %r8, %rax - adcq %r10, %rbx - adcq %r11, %rcx - adcq %r13, %rsi - adcq 8(%rsp), %rbp # 8-byte Folded Reload - adcq 16(%rsp), %r9 # 8-byte Folded Reload - adcq $0, %r14 - addq -32(%rsp), %rax # 8-byte Folded Reload - adcq %rdx, %rbx - adcq -40(%rsp), %rcx # 8-byte Folded Reload - adcq -24(%rsp), %rsi # 8-byte Folded Reload - adcq -16(%rsp), %rbp # 8-byte Folded Reload - adcq (%rsp), %r9 # 8-byte Folded Reload - adcq 24(%rsp), %r14 # 8-byte Folded Reload - movq %rax, %r13 - subq 32(%rsp), %r13 # 8-byte Folded Reload - movq %rbx, %r12 - sbbq %rdi, %r12 - movq %rcx, %r8 - sbbq 40(%rsp), %r8 # 8-byte Folded Reload - movq %rsi, %r10 - sbbq 48(%rsp), %r10 # 8-byte Folded Reload - movq %rbp, %r11 - sbbq 56(%rsp), %r11 # 8-byte Folded Reload - movq %r9, %r15 - sbbq 64(%rsp), %r15 # 8-byte Folded Reload - movq %r14, %rdx - sbbq 72(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, %rdi - sarq $63, %rdi - cmovsq %rax, %r13 - movq -96(%rsp), %rax # 8-byte Reload - movq %r13, (%rax) - cmovsq %rbx, %r12 - movq %r12, 8(%rax) - cmovsq %rcx, %r8 - movq %r8, 16(%rax) - cmovsq %rsi, %r10 - movq %r10, 24(%rax) - cmovsq %rbp, %r11 - movq %r11, 32(%rax) - cmovsq %r9, %r15 - movq %r15, 40(%rax) - cmovsq %r14, %rdx - movq %rdx, 48(%rax) - addq $80, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end99: - .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L - - .globl mcl_fp_montRed7L - .align 16, 0x90 - .type mcl_fp_montRed7L,@function -mcl_fp_montRed7L: # @mcl_fp_montRed7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $64, %rsp - movq %rdx, %rcx - movq %rdi, -104(%rsp) # 8-byte Spill - movq -8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq (%rcx), %rdx - movq %rdx, 32(%rsp) # 8-byte Spill - movq (%rsi), %rbp - movq %rbp, 24(%rsp) # 8-byte Spill - imulq %rax, %rbp - movq 48(%rcx), %rdx - movq %rdx, -16(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, -8(%rsp) # 8-byte Spill - movq %rdx, -48(%rsp) # 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, (%rsp) # 8-byte Spill - movq 32(%rcx), %r10 - movq %r10, 56(%rsp) # 8-byte Spill - movq 24(%rcx), %rdi - movq %rdi, 48(%rsp) # 8-byte Spill - movq 16(%rcx), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 16(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rdx, %r13 - movq %rax, %r9 - movq %rbp, %rax - mulq %r10 - movq %rdx, %r15 - movq %rax, %r11 - movq %rbp, %rax - mulq %rdi - movq %rdx, %r10 - movq %rax, %r8 - movq %rbp, %rax - mulq %rbx - movq %rdx, %r14 - movq %rax, %rbx - movq %rbp, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %rdi - movq %rbp, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - addq %rdi, %rbp - adcq %rbx, %r12 - adcq %r8, %r14 - adcq %r11, %r10 - adcq %r9, %r15 - adcq -8(%rsp), %r13 # 8-byte Folded Reload - movq -48(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq 24(%rsp), %rax # 8-byte Folded Reload - adcq 8(%rsi), %rbp - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r14 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %r15 - adcq 48(%rsi), %r13 - movq %r13, -80(%rsp) # 8-byte Spill - adcq 56(%rsi), %rdx - movq %rdx, -48(%rsp) # 8-byte Spill - movq 104(%rsi), %r8 - movq 96(%rsi), %rdx - movq 88(%rsi), %rdi - movq 80(%rsi), %rbx - movq 72(%rsi), %rax - movq 64(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -88(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, -96(%rsp) # 8-byte Spill - adcq $0, %rbx - movq %rbx, -40(%rsp) # 8-byte Spill - adcq $0, %rdi - movq %rdi, -32(%rsp) # 8-byte Spill - adcq $0, %rdx - movq %rdx, -24(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, -8(%rsp) # 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, 24(%rsp) # 8-byte Spill - movq %rbp, %rdi - imulq 8(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -128(%rsp) # 8-byte Spill - movq %rdi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbx - movq %rdi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r11 - movq %rdi, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %rcx - movq %rdi, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - addq %rcx, %r9 - adcq %r11, %r8 - adcq %rbx, %rsi - adcq -128(%rsp), %r13 # 8-byte Folded Reload - movq -72(%rsp), %rdi # 8-byte Reload - adcq -120(%rsp), %rdi # 8-byte Folded Reload - movq -64(%rsp), %rdx # 8-byte Reload - adcq -112(%rsp), %rdx # 8-byte Folded Reload - movq -56(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %rbp, %rax - adcq %r12, %r9 - adcq %r14, %r8 - adcq %r10, %rsi - adcq %r15, %r13 - adcq -80(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, -72(%rsp) # 8-byte Spill - adcq -48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - adcq -88(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -56(%rsp) # 8-byte Spill - adcq $0, -96(%rsp) # 8-byte Folded Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - movq -8(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, 24(%rsp) # 8-byte Folded Spill - movq %r9, %rcx - imulq 8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -8(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -88(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r11 - movq %rcx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r12 - movq %rcx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r14 - movq %rcx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - addq %r14, %r10 - adcq %r12, %rdi - adcq %r11, %rbp - adcq -120(%rsp), %r15 # 8-byte Folded Reload - movq -88(%rsp), %r11 # 8-byte Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - movq -80(%rsp), %rdx # 8-byte Reload - adcq -8(%rsp), %rdx # 8-byte Folded Reload - movq -48(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %r9, %rax - adcq %r8, %r10 - adcq %rsi, %rdi - adcq %r13, %rbp - adcq -72(%rsp), %r15 # 8-byte Folded Reload - adcq -64(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -88(%rsp) # 8-byte Spill - adcq -56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -80(%rsp) # 8-byte Spill - adcq -96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -48(%rsp) # 8-byte Spill - adcq $0, -40(%rsp) # 8-byte Folded Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, -8(%rsp) # 8-byte Spill - adcq $0, 24(%rsp) # 8-byte Folded Spill - movq %r10, %rbx - imulq 8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %rax - movq -16(%rsp), %r12 # 8-byte Reload - mulq %r12 - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -96(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -112(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -72(%rsp) # 8-byte Spill - movq %rax, -120(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rbx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rbx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r11 - movq %rbx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - addq %r11, %r9 - adcq %r13, %rcx - adcq %r14, %rsi - adcq -120(%rsp), %r8 # 8-byte Folded Reload - movq -72(%rsp), %r11 # 8-byte Reload - adcq -112(%rsp), %r11 # 8-byte Folded Reload - movq -64(%rsp), %rbx # 8-byte Reload - adcq -96(%rsp), %rbx # 8-byte Folded Reload - movq -56(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %rdi, %r9 - adcq %rbp, %rcx - adcq %r15, %rsi - adcq -88(%rsp), %r8 # 8-byte Folded Reload - adcq -80(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -72(%rsp) # 8-byte Spill - adcq -48(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, -64(%rsp) # 8-byte Spill - adcq -40(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - adcq $0, -32(%rsp) # 8-byte Folded Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 24(%rsp) # 8-byte Folded Spill - movq %r9, %rbp - imulq 8(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, %rax - mulq %r12 - movq %rdx, -40(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq (%rsp) # 8-byte Folded Reload - movq %rdx, -48(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rbp, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r14 - movq %rbp, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r11 - movq %rbp, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r12 - movq %rbp, %rax - movq 32(%rsp), %rbp # 8-byte Reload - mulq %rbp - movq %rdx, %r10 - addq %r12, %r10 - adcq %r11, %rbx - adcq %r14, %rdi - adcq -96(%rsp), %r13 # 8-byte Folded Reload - adcq -88(%rsp), %r15 # 8-byte Folded Reload - movq -48(%rsp), %r11 # 8-byte Reload - adcq -80(%rsp), %r11 # 8-byte Folded Reload - movq -40(%rsp), %rdx # 8-byte Reload - adcq $0, %rdx - addq %r9, %rax - adcq %rcx, %r10 - adcq %rsi, %rbx - adcq %r8, %rdi - adcq -72(%rsp), %r13 # 8-byte Folded Reload - adcq -64(%rsp), %r15 # 8-byte Folded Reload - adcq -56(%rsp), %r11 # 8-byte Folded Reload - movq %r11, -48(%rsp) # 8-byte Spill - adcq -32(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -40(%rsp) # 8-byte Spill - adcq $0, -24(%rsp) # 8-byte Folded Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 24(%rsp) # 8-byte Folded Spill - movq %r10, %rsi - imulq 8(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, -72(%rsp) # 8-byte Spill - movq %rsi, %rax - movq (%rsp), %r8 # 8-byte Reload - mulq %r8 - movq %rdx, -56(%rsp) # 8-byte Spill - movq %rax, -80(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, -64(%rsp) # 8-byte Spill - movq %rax, -88(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -96(%rsp) # 8-byte Spill - movq %rsi, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r11 - movq %rsi, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rcx - movq %rsi, %rax - mulq %rbp - movq %rdx, %rbp - addq %rcx, %rbp - adcq %r11, %r14 - adcq -96(%rsp), %r9 # 8-byte Folded Reload - adcq -88(%rsp), %r12 # 8-byte Folded Reload - movq -64(%rsp), %rsi # 8-byte Reload - adcq -80(%rsp), %rsi # 8-byte Folded Reload - movq -56(%rsp), %rdx # 8-byte Reload - adcq -72(%rsp), %rdx # 8-byte Folded Reload - movq -32(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - addq %r10, %rax - adcq %rbx, %rbp - adcq %rdi, %r14 - adcq %r13, %r9 - adcq %r15, %r12 - adcq -48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, -64(%rsp) # 8-byte Spill - adcq -40(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -56(%rsp) # 8-byte Spill - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -32(%rsp) # 8-byte Spill - adcq $0, -8(%rsp) # 8-byte Folded Spill - adcq $0, 24(%rsp) # 8-byte Folded Spill - movq 8(%rsp), %rcx # 8-byte Reload - imulq %rbp, %rcx - movq %rcx, %rax - mulq -16(%rsp) # 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, 8(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq %r8 - movq %rdx, %r13 - movq %rax, -24(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 56(%rsp) # 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -40(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 48(%rsp) # 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -48(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 40(%rsp) # 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r8 - movq %rcx, %rax - mulq 16(%rsp) # 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r11 - movq %rcx, %rax - mulq 32(%rsp) # 8-byte Folded Reload - addq %r11, %rdx - adcq %r8, %rbx - adcq -48(%rsp), %rdi # 8-byte Folded Reload - adcq -40(%rsp), %r10 # 8-byte Folded Reload - adcq -24(%rsp), %r15 # 8-byte Folded Reload - adcq 8(%rsp), %r13 # 8-byte Folded Reload - adcq $0, %rsi - addq %rbp, %rax - adcq %r14, %rdx - adcq %r9, %rbx - adcq %r12, %rdi - adcq -64(%rsp), %r10 # 8-byte Folded Reload - adcq -56(%rsp), %r15 # 8-byte Folded Reload - adcq -32(%rsp), %r13 # 8-byte Folded Reload - adcq -8(%rsp), %rsi # 8-byte Folded Reload - movq 24(%rsp), %rcx # 8-byte Reload - adcq $0, %rcx - movq %rdx, %rax - subq 32(%rsp), %rax # 8-byte Folded Reload - movq %rbx, %rbp - sbbq 16(%rsp), %rbp # 8-byte Folded Reload - movq %rdi, %r8 - sbbq 40(%rsp), %r8 # 8-byte Folded Reload - movq %r10, %r9 - sbbq 48(%rsp), %r9 # 8-byte Folded Reload - movq %r15, %r11 - sbbq 56(%rsp), %r11 # 8-byte Folded Reload - movq %r13, %r14 - sbbq (%rsp), %r14 # 8-byte Folded Reload - movq %rsi, %r12 - sbbq -16(%rsp), %r12 # 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rsi, %r12 - testb %cl, %cl - cmovneq %rdx, %rax - movq -104(%rsp), %rcx # 8-byte Reload - movq %rax, (%rcx) - cmovneq %rbx, %rbp - movq %rbp, 8(%rcx) - cmovneq %rdi, %r8 - movq %r8, 16(%rcx) - cmovneq %r10, %r9 - movq %r9, 24(%rcx) - cmovneq %r15, %r11 - movq %r11, 32(%rcx) - cmovneq %r13, %r14 - movq %r14, 40(%rcx) - movq %r12, 48(%rcx) - addq $64, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end100: - .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L - - .globl mcl_fp_addPre7L - .align 16, 0x90 - .type mcl_fp_addPre7L,@function -mcl_fp_addPre7L: # @mcl_fp_addPre7L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r14 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r12 - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %r12, 16(%rdi) - adcq %r11, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbx - movq %rbx, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end101: - .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L - - .globl mcl_fp_subPre7L - .align 16, 0x90 - .type mcl_fp_subPre7L,@function -mcl_fp_subPre7L: # @mcl_fp_subPre7L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r10 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 32(%rsi), %rdx - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rdx - movq %rdx, 32(%rdi) - sbbq %r9, %r15 - movq %r15, 40(%rdi) - sbbq %r8, %r10 - movq %r10, 48(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq -.Lfunc_end102: - .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L - - .globl mcl_fp_shr1_7L - .align 16, 0x90 - .type mcl_fp_shr1_7L,@function -mcl_fp_shr1_7L: # @mcl_fp_shr1_7L -# BB#0: - movq 48(%rsi), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrdq $1, %r10, %rax - movq %rax, 24(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 32(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 40(%rdi) - shrq %r8 - movq %r8, 48(%rdi) - retq -.Lfunc_end103: - .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L - - .globl mcl_fp_add7L - .align 16, 0x90 - .type mcl_fp_add7L,@function -mcl_fp_add7L: # @mcl_fp_add7L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq 24(%rdx), %r13 - movq 16(%rdx), %r10 - movq (%rdx), %r11 - movq 8(%rdx), %rdx - addq (%rsi), %r11 - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r10 - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %r13, %rax - movq %rax, 24(%rdi) - adcq %r12, %rbx - movq %rbx, 32(%rdi) - adcq %r15, %r9 - movq %r9, 40(%rdi) - adcq %r14, %r8 - movq %r8, 48(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %r11 - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %rax - sbbq 32(%rcx), %rbx - sbbq 40(%rcx), %r9 - sbbq 48(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne .LBB104_2 -# BB#1: # %nocarry - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %rax, 24(%rdi) - movq %rbx, 32(%rdi) - movq %r9, 40(%rdi) - movq %r8, 48(%rdi) -.LBB104_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end104: - .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L - - .globl mcl_fp_addNF7L - .align 16, 0x90 - .type mcl_fp_addNF7L,@function -mcl_fp_addNF7L: # @mcl_fp_addNF7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r9 - movq 40(%rdx), %rbp - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r14 - movq (%rdx), %r12 - movq 8(%rdx), %r15 - addq (%rsi), %r12 - adcq 8(%rsi), %r15 - adcq 16(%rsi), %r14 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %rbp - movq %rbp, -8(%rsp) # 8-byte Spill - adcq 48(%rsi), %r9 - movq %r12, %rsi - subq (%rcx), %rsi - movq %r15, %rdx - sbbq 8(%rcx), %rdx - movq %r14, %rax - sbbq 16(%rcx), %rax - movq %r11, %rbx - sbbq 24(%rcx), %rbx - movq %r10, %r13 - sbbq 32(%rcx), %r13 - sbbq 40(%rcx), %rbp - movq %r9, %r8 - sbbq 48(%rcx), %r8 - movq %r8, %rcx - sarq $63, %rcx - cmovsq %r12, %rsi - movq %rsi, (%rdi) - cmovsq %r15, %rdx - movq %rdx, 8(%rdi) - cmovsq %r14, %rax - movq %rax, 16(%rdi) - cmovsq %r11, %rbx - movq %rbx, 24(%rdi) - cmovsq %r10, %r13 - movq %r13, 32(%rdi) - cmovsq -8(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 40(%rdi) - cmovsq %r9, %r8 - movq %r8, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end105: - .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L - - .globl mcl_fp_sub7L - .align 16, 0x90 - .type mcl_fp_sub7L,@function -mcl_fp_sub7L: # @mcl_fp_sub7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - movq 16(%rsi), %r13 - sbbq 16(%rdx), %r13 - movq 32(%rsi), %r10 - movq 24(%rsi), %rsi - sbbq 24(%rdx), %rsi - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r13, 16(%rdi) - movq %rsi, 24(%rdi) - sbbq %r12, %r10 - movq %r10, 32(%rdi) - sbbq %r15, %r9 - movq %r9, 40(%rdi) - sbbq %r14, %r8 - movq %r8, 48(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB106_2 -# BB#1: # %carry - movq 48(%rcx), %r14 - movq 40(%rcx), %r15 - movq 32(%rcx), %r12 - movq 24(%rcx), %rbx - movq 8(%rcx), %rdx - movq 16(%rcx), %rbp - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r13, %rbp - movq %rbp, 16(%rdi) - adcq %rsi, %rbx - movq %rbx, 24(%rdi) - adcq %r10, %r12 - movq %r12, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) -.LBB106_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end106: - .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L - - .globl mcl_fp_subNF7L - .align 16, 0x90 - .type mcl_fp_subNF7L,@function -mcl_fp_subNF7L: # @mcl_fp_subNF7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 48(%rsi), %r12 - movq 40(%rsi), %rax - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %r14 - movq 8(%rsi), %r15 - subq (%rdx), %r14 - sbbq 8(%rdx), %r15 - sbbq 16(%rdx), %r11 - sbbq 24(%rdx), %r10 - sbbq 32(%rdx), %r9 - sbbq 40(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 48(%rdx), %r12 - movq %r12, %rax - sarq $63, %rax - movq %rax, %rsi - shldq $1, %r12, %rsi - andq (%r8), %rsi - movq 48(%r8), %r13 - andq %rax, %r13 - movq 40(%r8), %rbx - andq %rax, %rbx - movq 32(%r8), %rdx - andq %rax, %rdx - movq 24(%r8), %rbp - andq %rax, %rbp - movq 16(%r8), %rcx - andq %rax, %rcx - andq 8(%r8), %rax - addq %r14, %rsi - adcq %r15, %rax - movq %rsi, (%rdi) - movq %rax, 8(%rdi) - adcq %r11, %rcx - movq %rcx, 16(%rdi) - adcq %r10, %rbp - movq %rbp, 24(%rdi) - adcq %r9, %rdx - movq %rdx, 32(%rdi) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 40(%rdi) - adcq %r12, %r13 - movq %r13, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end107: - .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L - - .globl mcl_fpDbl_add7L - .align 16, 0x90 - .type mcl_fpDbl_add7L,@function -mcl_fpDbl_add7L: # @mcl_fpDbl_add7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 96(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 88(%rdx), %r11 - movq 80(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r12 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r9 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r12 - movq 72(%rdx), %r13 - movq 64(%rdx), %rbp - movq %rax, (%rdi) - movq 56(%rdx), %r10 - movq %rbx, 8(%rdi) - movq 48(%rdx), %rcx - movq 40(%rdx), %rdx - movq %r9, 16(%rdi) - movq 104(%rsi), %r9 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %rdx, %rbx - movq 96(%rsi), %r15 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - adcq %rcx, %rdx - movq 88(%rsi), %rax - movq %rbx, 40(%rdi) - movq 56(%rsi), %rcx - adcq %r10, %rcx - movq 80(%rsi), %r12 - movq %rdx, 48(%rdi) - movq 72(%rsi), %rdx - movq 64(%rsi), %rsi - adcq %rbp, %rsi - adcq %r13, %rdx - adcq %r14, %r12 - adcq %r11, %rax - movq %rax, -8(%rsp) # 8-byte Spill - adcq -24(%rsp), %r15 # 8-byte Folded Reload - movq %r15, -24(%rsp) # 8-byte Spill - adcq -16(%rsp), %r9 # 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq %rcx, %rbx - subq (%r8), %rbx - movq %rsi, %r10 - sbbq 8(%r8), %r10 - movq %rdx, %r11 - sbbq 16(%r8), %r11 - movq %r12, %r14 - sbbq 24(%r8), %r14 - movq -8(%rsp), %r13 # 8-byte Reload - sbbq 32(%r8), %r13 - sbbq 40(%r8), %r15 - movq %r9, %rax - sbbq 48(%r8), %rax - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rcx, %rbx - movq %rbx, 56(%rdi) - testb %bpl, %bpl - cmovneq %rsi, %r10 - movq %r10, 64(%rdi) - cmovneq %rdx, %r11 - movq %r11, 72(%rdi) - cmovneq %r12, %r14 - movq %r14, 80(%rdi) - cmovneq -8(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 88(%rdi) - cmovneq -24(%rsp), %r15 # 8-byte Folded Reload - movq %r15, 96(%rdi) - cmovneq %r9, %rax - movq %rax, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end108: - .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L - - .globl mcl_fpDbl_sub7L - .align 16, 0x90 - .type mcl_fpDbl_sub7L,@function -mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 96(%rdx), %r10 - movq 88(%rdx), %r14 - movq 16(%rsi), %rax - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %ecx, %ecx - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %rax - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 80(%rdx), %r13 - movq 72(%rdx), %rbp - movq %r15, (%rdi) - movq 64(%rdx), %r9 - movq %r11, 8(%rdi) - movq 56(%rdx), %r15 - movq %rax, 16(%rdi) - movq 48(%rdx), %r11 - movq 40(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 104(%rsi), %rax - movq %r12, 32(%rdi) - movq 48(%rsi), %r12 - sbbq %r11, %r12 - movq 96(%rsi), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r15, %rdx - movq 88(%rsi), %r15 - movq %r12, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r9, %rbx - movq 80(%rsi), %r12 - movq 72(%rsi), %r9 - sbbq %rbp, %r9 - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq -8(%rsp), %rax # 8-byte Folded Reload - movq %rax, -8(%rsp) # 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r8), %r10 - cmoveq %rcx, %r10 - testb %bpl, %bpl - movq 16(%r8), %rbp - cmoveq %rcx, %rbp - movq 8(%r8), %rsi - cmoveq %rcx, %rsi - movq 48(%r8), %r14 - cmoveq %rcx, %r14 - movq 40(%r8), %r13 - cmoveq %rcx, %r13 - movq 32(%r8), %rax - cmoveq %rcx, %rax - cmovneq 24(%r8), %rcx - addq %rdx, %r10 - adcq %rbx, %rsi - movq %r10, 56(%rdi) - movq %rsi, 64(%rdi) - adcq %r9, %rbp - movq %rbp, 72(%rdi) - adcq %r12, %rcx - movq %rcx, 80(%rdi) - adcq %r15, %rax - movq %rax, 88(%rdi) - adcq %r11, %r13 - movq %r13, 96(%rdi) - adcq -8(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end109: - .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L - - .align 16, 0x90 - .type .LmulPv512x64,@function -.LmulPv512x64: # @mulPv512x64 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq (%rsi) - movq %rdx, -24(%rsp) # 8-byte Spill - movq %rax, (%rdi) - movq %rcx, %rax - mulq 56(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 48(%rsi) - movq %rdx, %r11 - movq %rax, -16(%rsp) # 8-byte Spill - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r12 - movq %rax, %r15 - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %rbx - movq %rax, %r13 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %rbp - movq %rax, %r8 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r9 - movq %rax, %r14 - movq %rcx, %rax - mulq 8(%rsi) - addq -24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rdi) - adcq %r14, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - adcq %r13, %rbp - movq %rbp, 32(%rdi) - adcq %r15, %rbx - movq %rbx, 40(%rdi) - adcq -16(%rsp), %r12 # 8-byte Folded Reload - movq %r12, 48(%rdi) - adcq -8(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 56(%rdi) - adcq $0, %r10 - movq %r10, 64(%rdi) - movq %rdi, %rax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end110: - .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 - - .globl mcl_fp_mulUnitPre8L - .align 16, 0x90 - .type mcl_fp_mulUnitPre8L,@function -mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L -# BB#0: - pushq %rbx - subq $80, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq .LmulPv512x64 - movq 72(%rsp), %r8 - movq 64(%rsp), %r9 - movq 56(%rsp), %r10 - movq 48(%rsp), %r11 - movq 40(%rsp), %rdi - movq 32(%rsp), %rax - movq 24(%rsp), %rcx - movq 8(%rsp), %rdx - movq 16(%rsp), %rsi - movq %rdx, (%rbx) - movq %rsi, 8(%rbx) - movq %rcx, 16(%rbx) - movq %rax, 24(%rbx) - movq %rdi, 32(%rbx) - movq %r11, 40(%rbx) - movq %r10, 48(%rbx) - movq %r9, 56(%rbx) - movq %r8, 64(%rbx) - addq $80, %rsp - popq %rbx - retq -.Lfunc_end111: - .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L - - .globl mcl_fpDbl_mulPre8L - .align 16, 0x90 - .type mcl_fpDbl_mulPre8L,@function -mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L -# BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rdx, %rbx - movq %rsi, %r15 - movq %rdi, %r14 - callq mcl_fpDbl_mulPre4L@PLT - leaq 64(%r14), %rdi - leaq 32(%r15), %rsi - leaq 32(%rbx), %rdx - callq mcl_fpDbl_mulPre4L@PLT - movq 56(%rbx), %r10 - movq 48(%rbx), %rcx - movq (%rbx), %rdx - movq 8(%rbx), %rsi - addq 32(%rbx), %rdx - adcq 40(%rbx), %rsi - adcq 16(%rbx), %rcx - adcq 24(%rbx), %r10 - pushfq - popq %r8 - xorl %r9d, %r9d - movq 56(%r15), %rdi - movq 48(%r15), %r13 - movq (%r15), %r12 - movq 8(%r15), %rbx - addq 32(%r15), %r12 - adcq 40(%r15), %rbx - adcq 16(%r15), %r13 - adcq 24(%r15), %rdi - movl $0, %eax - cmovbq %r10, %rax - movq %rax, -176(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rcx, %rax - movq %rax, -184(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rsi, %rax - movq %rax, -192(%rbp) # 8-byte Spill - movl $0, %eax - cmovbq %rdx, %rax - movq %rax, -200(%rbp) # 8-byte Spill - sbbq %r15, %r15 - movq %r12, -136(%rbp) - movq %rbx, -128(%rbp) - movq %r13, -120(%rbp) - movq %rdi, -112(%rbp) - movq %rdx, -168(%rbp) - movq %rsi, -160(%rbp) - movq %rcx, -152(%rbp) - movq %r10, -144(%rbp) - pushq %r8 - popfq - cmovaeq %r9, %rdi - movq %rdi, -216(%rbp) # 8-byte Spill - cmovaeq %r9, %r13 - cmovaeq %r9, %rbx - cmovaeq %r9, %r12 - sbbq %rax, %rax - movq %rax, -208(%rbp) # 8-byte Spill - leaq -104(%rbp), %rdi - leaq -136(%rbp), %rsi - leaq -168(%rbp), %rdx - callq mcl_fpDbl_mulPre4L@PLT - addq -200(%rbp), %r12 # 8-byte Folded Reload - adcq -192(%rbp), %rbx # 8-byte Folded Reload - adcq -184(%rbp), %r13 # 8-byte Folded Reload - movq -216(%rbp), %r10 # 8-byte Reload - adcq -176(%rbp), %r10 # 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq -208(%rbp), %rdx # 8-byte Reload - andl %edx, %r15d - andl $1, %r15d - addq -72(%rbp), %r12 - adcq -64(%rbp), %rbx - adcq -56(%rbp), %r13 - adcq -48(%rbp), %r10 - adcq %rax, %r15 - movq -80(%rbp), %rax - movq -88(%rbp), %rcx - movq -104(%rbp), %rsi - movq -96(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %rdi - movq %rdi, -184(%rbp) # 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -176(%rbp) # 8-byte Spill - sbbq %rdi, %r12 - sbbq %r8, %rbx - movq 48(%r14), %rdi - movq %rdi, -192(%rbp) # 8-byte Spill - sbbq %rdi, %r13 - movq 56(%r14), %rdi - movq %rdi, -200(%rbp) # 8-byte Spill - sbbq %rdi, %r10 - sbbq $0, %r15 - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -208(%rbp) # 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -216(%rbp) # 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -224(%rbp) # 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -232(%rbp) # 8-byte Spill - sbbq %rdi, %r12 - movq 104(%r14), %rdi - sbbq %rdi, %rbx - movq 112(%r14), %r8 - sbbq %r8, %r13 - movq 120(%r14), %r9 - sbbq %r9, %r10 - sbbq $0, %r15 - addq -184(%rbp), %rsi # 8-byte Folded Reload - adcq -176(%rbp), %rdx # 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -192(%rbp), %rcx # 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -200(%rbp), %rax # 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r12 - movq %rax, 56(%r14) - movq %r12, 64(%r14) - adcq -208(%rbp), %rbx # 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq -216(%rbp), %r13 # 8-byte Folded Reload - movq %r13, 80(%r14) - adcq -224(%rbp), %r10 # 8-byte Folded Reload - movq %r10, 88(%r14) - adcq -232(%rbp), %r15 # 8-byte Folded Reload - movq %r15, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end112: - .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L - - .globl mcl_fpDbl_sqrPre8L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre8L,@function -mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L -# BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rsi, %r14 - movq %rdi, %rbx - movq %r14, %rdx - callq mcl_fpDbl_mulPre4L@PLT - leaq 64(%rbx), %rdi - leaq 32(%r14), %rsi - movq %rsi, %rdx - callq mcl_fpDbl_mulPre4L@PLT - movq (%r14), %r12 - movq 8(%r14), %r15 - addq 32(%r14), %r12 - adcq 40(%r14), %r15 - pushfq - popq %rax - movq %r12, -136(%rbp) - movq %r12, -168(%rbp) - addq %r12, %r12 - movq %r15, -128(%rbp) - movq %r15, -160(%rbp) - adcq %r15, %r15 - pushfq - popq %rcx - movq 56(%r14), %r13 - movq 48(%r14), %rdx - pushq %rax - popfq - adcq 16(%r14), %rdx - adcq 24(%r14), %r13 - pushfq - popq %r8 - pushfq - popq %rsi - pushfq - popq %rdi - sbbq %rax, %rax - movq %rax, -184(%rbp) # 8-byte Spill - xorl %eax, %eax - pushq %rdi - popfq - cmovaeq %rax, %r15 - movq %r15, -176(%rbp) # 8-byte Spill - cmovaeq %rax, %r12 - movq %rdx, -120(%rbp) - movq %rdx, -152(%rbp) - movq %rdx, %r15 - pushq %rcx - popfq - adcq %r15, %r15 - movq %r13, %r14 - movq %r13, -112(%rbp) - movq %r13, -144(%rbp) - adcq %r13, %r13 - pushq %rsi - popfq - cmovaeq %rax, %r13 - cmovaeq %rax, %r15 - shrq $63, %r14 - pushq %r8 - popfq - cmovaeq %rax, %r14 - leaq -104(%rbp), %rdi - leaq -136(%rbp), %rsi - leaq -168(%rbp), %rdx - callq mcl_fpDbl_mulPre4L@PLT - movq -184(%rbp), %rax # 8-byte Reload - andl $1, %eax - addq -72(%rbp), %r12 - movq -176(%rbp), %r8 # 8-byte Reload - adcq -64(%rbp), %r8 - adcq -56(%rbp), %r15 - adcq -48(%rbp), %r13 - adcq %r14, %rax - movq %rax, %rdi - movq -80(%rbp), %rax - movq -88(%rbp), %rcx - movq -104(%rbp), %rsi - movq -96(%rbp), %rdx - subq (%rbx), %rsi - sbbq 8(%rbx), %rdx - sbbq 16(%rbx), %rcx - sbbq 24(%rbx), %rax - movq 32(%rbx), %r10 - movq %r10, -184(%rbp) # 8-byte Spill - movq 40(%rbx), %r9 - movq %r9, -176(%rbp) # 8-byte Spill - sbbq %r10, %r12 - sbbq %r9, %r8 - movq %r8, %r10 - movq 48(%rbx), %r8 - movq %r8, -192(%rbp) # 8-byte Spill - sbbq %r8, %r15 - movq 56(%rbx), %r8 - movq %r8, -200(%rbp) # 8-byte Spill - sbbq %r8, %r13 - sbbq $0, %rdi - movq 64(%rbx), %r11 - subq %r11, %rsi - movq 72(%rbx), %r8 - movq %r8, -208(%rbp) # 8-byte Spill - sbbq %r8, %rdx - movq 80(%rbx), %r8 - movq %r8, -216(%rbp) # 8-byte Spill - sbbq %r8, %rcx - movq 88(%rbx), %r8 - movq %r8, -224(%rbp) # 8-byte Spill - sbbq %r8, %rax - movq 96(%rbx), %r8 - movq %r8, -232(%rbp) # 8-byte Spill - sbbq %r8, %r12 - movq 104(%rbx), %r14 - sbbq %r14, %r10 - movq 112(%rbx), %r8 - sbbq %r8, %r15 - movq 120(%rbx), %r9 - sbbq %r9, %r13 - sbbq $0, %rdi - addq -184(%rbp), %rsi # 8-byte Folded Reload - adcq -176(%rbp), %rdx # 8-byte Folded Reload - movq %rsi, 32(%rbx) - adcq -192(%rbp), %rcx # 8-byte Folded Reload - movq %rdx, 40(%rbx) - adcq -200(%rbp), %rax # 8-byte Folded Reload - movq %rcx, 48(%rbx) - adcq %r11, %r12 - movq %rax, 56(%rbx) - movq %r12, 64(%rbx) - adcq -208(%rbp), %r10 # 8-byte Folded Reload - movq %r10, 72(%rbx) - adcq -216(%rbp), %r15 # 8-byte Folded Reload - movq %r15, 80(%rbx) - adcq -224(%rbp), %r13 # 8-byte Folded Reload - movq %r13, 88(%rbx) - adcq -232(%rbp), %rdi # 8-byte Folded Reload - movq %rdi, 96(%rbx) - adcq $0, %r14 - movq %r14, 104(%rbx) - adcq $0, %r8 - movq %r8, 112(%rbx) - adcq $0, %r9 - movq %r9, 120(%rbx) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L - - .globl mcl_fp_mont8L - .align 16, 0x90 - .type mcl_fp_mont8L,@function -mcl_fp_mont8L: # @mcl_fp_mont8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1256, %rsp # imm = 0x4E8 - movq %rcx, %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%r13), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1184(%rsp), %rdi - callq .LmulPv512x64 - movq 1184(%rsp), %r15 - movq 1192(%rsp), %r14 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1248(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 1240(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1232(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1216(%rsp), %r12 - movq 1208(%rsp), %rbx - movq 1200(%rsp), %rbp - leaq 1112(%rsp), %rdi - movq %r13, %rsi - callq .LmulPv512x64 - addq 1112(%rsp), %r15 - adcq 1120(%rsp), %r14 - adcq 1128(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - adcq 1136(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 1144(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - movq 72(%rsp), %r13 # 8-byte Reload - adcq 1152(%rsp), %r13 - movq 88(%rsp), %rbx # 8-byte Reload - adcq 1160(%rsp), %rbx - movq 80(%rsp), %rbp # 8-byte Reload - adcq 1168(%rsp), %rbp - movq 96(%rsp), %rax # 8-byte Reload - adcq 1176(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1040(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 1040(%rsp), %r14 - movq 8(%rsp), %rax # 8-byte Reload - adcq 1048(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1056(%rsp), %rax - movq %rax, %r12 - movq 64(%rsp), %rax # 8-byte Reload - adcq 1064(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - adcq 1072(%rsp), %r13 - movq %r13, 72(%rsp) # 8-byte Spill - adcq 1080(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 1088(%rsp), %rbp - movq 96(%rsp), %rax # 8-byte Reload - adcq 1096(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 1104(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %r14, %rdx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 968(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 968(%rsp), %r14 - movq 8(%rsp), %r13 # 8-byte Reload - adcq 976(%rsp), %r13 - adcq 984(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r14 # 8-byte Reload - adcq 992(%rsp), %r14 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 1000(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 1008(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 1016(%rsp), %rbp - movq %rbp, %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 1024(%rsp), %rbp - movq 56(%rsp), %rax # 8-byte Reload - adcq 1032(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - adcq $0, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 896(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rcx - addq 896(%rsp), %rcx - movq 48(%rsp), %r13 # 8-byte Reload - adcq 904(%rsp), %r13 - adcq 912(%rsp), %r14 - adcq 920(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 936(%rsp), %r12 - movq %r12, 80(%rsp) # 8-byte Spill - adcq 944(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 56(%rsp), %r12 # 8-byte Reload - adcq 952(%rsp), %r12 - adcq 960(%rsp), %r15 - sbbq %rbx, %rbx - movq %rcx, %rdx - movq %rcx, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 824(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - addq 824(%rsp), %rbp - adcq 832(%rsp), %r13 - movq %r13, 48(%rsp) # 8-byte Spill - adcq 840(%rsp), %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 72(%rsp), %r13 # 8-byte Reload - adcq 848(%rsp), %r13 - movq 88(%rsp), %rbp # 8-byte Reload - adcq 856(%rsp), %rbp - movq 80(%rsp), %r14 # 8-byte Reload - adcq 864(%rsp), %r14 - movq 96(%rsp), %rax # 8-byte Reload - adcq 872(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 880(%rsp), %r12 - adcq 888(%rsp), %r15 - adcq $0, %rbx - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 752(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 48(%rsp), %rax # 8-byte Reload - addq 752(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 760(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 72(%rsp) # 8-byte Spill - adcq 776(%rsp), %rbp - movq %rbp, 88(%rsp) # 8-byte Spill - adcq 784(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - movq 96(%rsp), %rbp # 8-byte Reload - adcq 792(%rsp), %rbp - adcq 800(%rsp), %r12 - adcq 808(%rsp), %r15 - adcq 816(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 680(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 680(%rsp), %rbx - movq 64(%rsp), %r14 # 8-byte Reload - adcq 688(%rsp), %r14 - movq 72(%rsp), %rcx # 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %r13 # 8-byte Reload - adcq 704(%rsp), %r13 - movq 80(%rsp), %rbx # 8-byte Reload - adcq 712(%rsp), %rbx - adcq 720(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq %r12, %rbp - adcq 728(%rsp), %rbp - adcq 736(%rsp), %r15 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 744(%rsp), %r12 - adcq $0, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 608(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r14, %rax - addq 608(%rsp), %rax - movq 72(%rsp), %r14 # 8-byte Reload - adcq 616(%rsp), %r14 - adcq 624(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 632(%rsp), %rbx - movq %rbx, %r13 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 640(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 648(%rsp), %rbp - movq %rbp, 56(%rsp) # 8-byte Spill - adcq 656(%rsp), %r15 - adcq 664(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %rcx # 8-byte Reload - adcq 672(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - sbbq %rbp, %rbp - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %rbp, %rax - andl $1, %eax - addq 536(%rsp), %rbx - adcq 544(%rsp), %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq 88(%rsp), %rbx # 8-byte Reload - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r13 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 568(%rsp), %rbp - movq 56(%rsp), %r12 # 8-byte Reload - adcq 576(%rsp), %r12 - adcq 584(%rsp), %r15 - movq 48(%rsp), %rcx # 8-byte Reload - adcq 592(%rsp), %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r14 # 8-byte Reload - adcq 600(%rsp), %r14 - adcq $0, %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 464(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 72(%rsp), %rax # 8-byte Reload - addq 464(%rsp), %rax - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - adcq 488(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - adcq 496(%rsp), %r12 - adcq 504(%rsp), %r15 - movq %r15, 72(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 512(%rsp), %r15 - adcq 520(%rsp), %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 528(%rsp), %r14 - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 392(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 408(%rsp), %rbp - movq 96(%rsp), %rbx # 8-byte Reload - adcq 416(%rsp), %rbx - adcq 424(%rsp), %r12 - movq 72(%rsp), %r13 # 8-byte Reload - adcq 432(%rsp), %r13 - adcq 440(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r15 # 8-byte Reload - adcq 448(%rsp), %r15 - adcq 456(%rsp), %r14 - adcq $0, %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 88(%rsp), %rax # 8-byte Reload - addq 320(%rsp), %rax - adcq 328(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 336(%rsp), %rbx - movq %rbx, 96(%rsp) # 8-byte Spill - movq %r12, %rbp - adcq 344(%rsp), %rbp - adcq 352(%rsp), %r13 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 360(%rsp), %r12 - adcq 368(%rsp), %r15 - movq %r15, 64(%rsp) # 8-byte Spill - adcq 376(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 72(%rsp), %rcx # 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 248(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %r15d - addq 248(%rsp), %rbx - movq 80(%rsp), %rax # 8-byte Reload - adcq 256(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 96(%rsp), %r14 # 8-byte Reload - adcq 264(%rsp), %r14 - adcq 272(%rsp), %rbp - movq %rbp, 56(%rsp) # 8-byte Spill - movq %r13, %rbx - adcq 280(%rsp), %rbx - movq %r12, %rbp - adcq 288(%rsp), %rbp - movq 64(%rsp), %r13 # 8-byte Reload - adcq 296(%rsp), %r13 - movq 88(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 72(%rsp), %r12 # 8-byte Reload - adcq 312(%rsp), %r12 - adcq $0, %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 176(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 80(%rsp), %rax # 8-byte Reload - addq 176(%rsp), %rax - adcq 184(%rsp), %r14 - movq %r14, 96(%rsp) # 8-byte Spill - movq 56(%rsp), %rcx # 8-byte Reload - adcq 192(%rsp), %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - adcq 200(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 208(%rsp), %rbp - adcq 216(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 224(%rsp), %r14 - adcq 232(%rsp), %r12 - adcq 240(%rsp), %r15 - sbbq %rbx, %rbx - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %r13 - leaq 104(%rsp), %rdi - movq 40(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - addq 104(%rsp), %r13 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 112(%rsp), %rcx - movq 56(%rsp), %rdx # 8-byte Reload - adcq 120(%rsp), %rdx - movq 72(%rsp), %rsi # 8-byte Reload - adcq 128(%rsp), %rsi - movq %rbp, %rdi - adcq 136(%rsp), %rdi - movq %rdi, 48(%rsp) # 8-byte Spill - movq 64(%rsp), %r8 # 8-byte Reload - adcq 144(%rsp), %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq %r14, %r9 - adcq 152(%rsp), %r9 - movq %r9, 88(%rsp) # 8-byte Spill - adcq 160(%rsp), %r12 - adcq 168(%rsp), %r15 - adcq $0, %rbx - movq %rcx, %rax - movq %rcx, %r11 - movq 40(%rsp), %rbp # 8-byte Reload - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r14 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - movq %rsi, %r13 - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %r8, %rdi - sbbq 32(%rbp), %rdi - movq %r9, %r10 - sbbq 40(%rbp), %r10 - movq %r12, %r8 - sbbq 48(%rbp), %r8 - movq %r15, %r9 - sbbq 56(%rbp), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r15, %r9 - testb %bl, %bl - cmovneq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovneq %r14, %rcx - movq %rcx, 8(%rbx) - cmovneq %r13, %rdx - movq %rdx, 16(%rbx) - cmovneq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovneq 64(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovneq 88(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovneq %r12, %r8 - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $1256, %rsp # imm = 0x4E8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end114: - .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L - - .globl mcl_fp_montNF8L - .align 16, 0x90 - .type mcl_fp_montNF8L,@function -mcl_fp_montNF8L: # @mcl_fp_montNF8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1240, %rsp # imm = 0x4D8 - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1168(%rsp), %rdi - callq .LmulPv512x64 - movq 1168(%rsp), %r15 - movq 1176(%rsp), %r12 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1232(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1216(%rsp), %r13 - movq 1208(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1200(%rsp), %r14 - movq 1192(%rsp), %rbp - movq 1184(%rsp), %rbx - leaq 1096(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 1096(%rsp), %r15 - adcq 1104(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 1112(%rsp), %rbx - adcq 1120(%rsp), %rbp - adcq 1128(%rsp), %r14 - movq %r14, %r12 - movq 72(%rsp), %r14 # 8-byte Reload - adcq 1136(%rsp), %r14 - adcq 1144(%rsp), %r13 - movq 80(%rsp), %rax # 8-byte Reload - adcq 1152(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1160(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1024(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 1088(%rsp), %r15 - movq 64(%rsp), %rax # 8-byte Reload - addq 1024(%rsp), %rax - adcq 1032(%rsp), %rbx - movq %rbx, 8(%rsp) # 8-byte Spill - movq %rbp, %rbx - adcq 1040(%rsp), %rbx - adcq 1048(%rsp), %r12 - adcq 1056(%rsp), %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq %r13, %rbp - adcq 1064(%rsp), %rbp - movq 80(%rsp), %rcx # 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 1080(%rsp), %r14 - adcq $0, %r15 - movq %rax, %rdx - movq %rax, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 952(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 952(%rsp), %r13 - movq 8(%rsp), %rax # 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - adcq 968(%rsp), %rbx - movq %rbx, 64(%rsp) # 8-byte Spill - movq %r12, %rbx - adcq 976(%rsp), %rbx - movq 72(%rsp), %r12 # 8-byte Reload - adcq 984(%rsp), %r12 - adcq 992(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 1000(%rsp), %r13 - movq %r14, %rbp - adcq 1008(%rsp), %rbp - adcq 1016(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 880(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 944(%rsp), %r14 - movq 8(%rsp), %rax # 8-byte Reload - addq 880(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 888(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 896(%rsp), %rbx - adcq 904(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 40(%rsp), %rcx # 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 920(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - adcq 928(%rsp), %rbp - movq %rbp, 48(%rsp) # 8-byte Spill - adcq 936(%rsp), %r15 - adcq $0, %r14 - movq %rax, %rdx - movq %rax, %rbp - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 808(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 808(%rsp), %rbp - movq 64(%rsp), %r13 # 8-byte Reload - adcq 816(%rsp), %r13 - movq %rbx, %r12 - adcq 824(%rsp), %r12 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 832(%rsp), %rbx - movq 40(%rsp), %rbp # 8-byte Reload - adcq 840(%rsp), %rbp - movq 80(%rsp), %rax # 8-byte Reload - adcq 848(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 856(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - adcq 864(%rsp), %r15 - adcq 872(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 736(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 800(%rsp), %rax - movq %r13, %rcx - addq 736(%rsp), %rcx - adcq 744(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - adcq 752(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 760(%rsp), %rbp - movq %rbp, %r13 - movq 80(%rsp), %rbp # 8-byte Reload - adcq 768(%rsp), %rbp - movq 48(%rsp), %rbx # 8-byte Reload - adcq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r14 - adcq $0, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq %rcx, %rdx - movq %rcx, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 664(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 664(%rsp), %r12 - movq 40(%rsp), %rax # 8-byte Reload - adcq 672(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 72(%rsp), %rax # 8-byte Reload - adcq 680(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - adcq 688(%rsp), %r13 - adcq 696(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 704(%rsp), %rbx - adcq 712(%rsp), %r15 - adcq 720(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 728(%rsp), %r12 - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 592(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 656(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 592(%rsp), %rax - movq 72(%rsp), %rbp # 8-byte Reload - adcq 600(%rsp), %rbp - adcq 608(%rsp), %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 616(%rsp), %r13 - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - adcq 640(%rsp), %r14 - adcq 648(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 520(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 520(%rsp), %r12 - adcq 528(%rsp), %rbp - movq %rbp, 72(%rsp) # 8-byte Spill - movq 40(%rsp), %r12 # 8-byte Reload - adcq 536(%rsp), %r12 - movq %r13, %rbp - adcq 544(%rsp), %rbp - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r15 - adcq 568(%rsp), %r14 - movq 64(%rsp), %r13 # 8-byte Reload - adcq 576(%rsp), %r13 - movq 80(%rsp), %rax # 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 448(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 512(%rsp), %rcx - movq 72(%rsp), %rax # 8-byte Reload - addq 448(%rsp), %rax - adcq 456(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - adcq 464(%rsp), %rbp - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r15 - adcq 488(%rsp), %r14 - adcq 496(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %r13 # 8-byte Reload - adcq 504(%rsp), %r13 - adcq $0, %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 376(%rsp), %r12 - movq 40(%rsp), %rax # 8-byte Reload - adcq 384(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - adcq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - adcq 408(%rsp), %r15 - adcq 416(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 424(%rsp), %r12 - adcq 432(%rsp), %r13 - movq 72(%rsp), %rax # 8-byte Reload - adcq 440(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 304(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 368(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 304(%rsp), %rax - adcq 312(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 320(%rsp), %rbx - adcq 328(%rsp), %r15 - adcq 336(%rsp), %r14 - adcq 344(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 352(%rsp), %r13 - movq 72(%rsp), %rbp # 8-byte Reload - adcq 360(%rsp), %rbp - adcq $0, %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 232(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 232(%rsp), %r12 - movq 80(%rsp), %rax # 8-byte Reload - adcq 240(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - adcq 248(%rsp), %rbx - adcq 256(%rsp), %r15 - adcq 264(%rsp), %r14 - movq 64(%rsp), %r12 # 8-byte Reload - adcq 272(%rsp), %r12 - adcq 280(%rsp), %r13 - adcq 288(%rsp), %rbp - movq %rbp, 72(%rsp) # 8-byte Spill - movq 48(%rsp), %rbp # 8-byte Reload - adcq 296(%rsp), %rbp - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - movq 224(%rsp), %rcx - movq 80(%rsp), %rax # 8-byte Reload - addq 160(%rsp), %rax - adcq 168(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 200(%rsp), %r13 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 208(%rsp), %rbx - adcq 216(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbp - leaq 88(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 88(%rsp), %rbp - movq 48(%rsp), %r11 # 8-byte Reload - adcq 96(%rsp), %r11 - adcq 104(%rsp), %r15 - adcq 112(%rsp), %r14 - movq 64(%rsp), %rsi # 8-byte Reload - adcq 120(%rsp), %rsi - movq %rsi, 64(%rsp) # 8-byte Spill - adcq 128(%rsp), %r13 - adcq 136(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 144(%rsp), %r12 - movq 80(%rsp), %r8 # 8-byte Reload - adcq 152(%rsp), %r8 - movq %r11, %rax - movq 56(%rsp), %rbp # 8-byte Reload - subq (%rbp), %rax - movq %r15, %rcx - sbbq 8(%rbp), %rcx - movq %r14, %rdx - sbbq 16(%rbp), %rdx - sbbq 24(%rbp), %rsi - movq %r13, %rdi - sbbq 32(%rbp), %rdi - movq %rbx, %r9 - sbbq 40(%rbp), %r9 - movq %r12, %r10 - sbbq 48(%rbp), %r10 - movq %rbp, %rbx - movq %r8, %rbp - sbbq 56(%rbx), %rbp - testq %rbp, %rbp - cmovsq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovsq %r15, %rcx - movq %rcx, 8(%rbx) - cmovsq %r14, %rdx - movq %rdx, 16(%rbx) - cmovsq 64(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq %r13, %rdi - movq %rdi, 32(%rbx) - cmovsq 72(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 40(%rbx) - cmovsq %r12, %r10 - movq %r10, 48(%rbx) - cmovsq %r8, %rbp - movq %rbp, 56(%rbx) - addq $1240, %rsp # imm = 0x4D8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end115: - .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L - - .globl mcl_fp_montRed8L - .align 16, 0x90 - .type mcl_fp_montRed8L,@function -mcl_fp_montRed8L: # @mcl_fp_montRed8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $776, %rsp # imm = 0x308 - movq %rdx, %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq %rdi, 72(%rsp) # 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 128(%rsp) # 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rdx - movq %rdx, 184(%rsp) # 8-byte Spill - movq %r15, %rdx - imulq %rcx, %rdx - movq 120(%rsi), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 120(%rsp) # 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - movq 72(%rsi), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 64(%rsi), %r13 - movq 56(%rsi), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 48(%rsi), %r14 - movq 40(%rsi), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - movq 32(%rsi), %r12 - movq 24(%rsi), %rbx - movq 16(%rsi), %rbp - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq %rcx, %rsi - leaq 704(%rsp), %rdi - callq .LmulPv512x64 - addq 704(%rsp), %r15 - movq 184(%rsp), %rcx # 8-byte Reload - adcq 712(%rsp), %rcx - adcq 720(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 728(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rax # 8-byte Reload - adcq 744(%rsp), %rax - movq %rax, 152(%rsp) # 8-byte Spill - adcq 752(%rsp), %r14 - movq %r14, %r12 - movq 144(%rsp), %rax # 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 144(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 184(%rsp) # 8-byte Spill - adcq $0, 192(%rsp) # 8-byte Folded Spill - movq 160(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 176(%rsp) # 8-byte Folded Spill - adcq $0, 168(%rsp) # 8-byte Folded Spill - adcq $0, 120(%rsp) # 8-byte Folded Spill - movq 136(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - movq 96(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - sbbq %rbx, %rbx - movq %rcx, %rbp - movq %rbp, %rdx - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 632(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - andl $1, %ebx - movq %rbx, %rax - addq 632(%rsp), %rbp - movq 80(%rsp), %rsi # 8-byte Reload - adcq 640(%rsp), %rsi - movq 88(%rsp), %rcx # 8-byte Reload - adcq 648(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rcx # 8-byte Reload - adcq 656(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rcx # 8-byte Reload - adcq 664(%rsp), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - adcq 672(%rsp), %r12 - movq 144(%rsp), %rcx # 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, 160(%rsp) # 8-byte Spill - movq 176(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq 168(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 120(%rsp) # 8-byte Folded Spill - adcq $0, %r13 - movq %r13, 136(%rsp) # 8-byte Spill - adcq $0, %r14 - movq %r14, 96(%rsp) # 8-byte Spill - movq %rax, %rbp - adcq $0, %rbp - movq %rsi, %rdx - movq %rsi, %r14 - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 560(%rsp), %rdi - movq 112(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv512x64 - addq 560(%rsp), %r14 - movq 88(%rsp), %rcx # 8-byte Reload - adcq 568(%rsp), %rcx - movq 104(%rsp), %rax # 8-byte Reload - adcq 576(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 152(%rsp), %rax # 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 152(%rsp) # 8-byte Spill - adcq 592(%rsp), %r12 - movq %r12, 88(%rsp) # 8-byte Spill - movq 144(%rsp), %r14 # 8-byte Reload - adcq 600(%rsp), %r14 - movq 184(%rsp), %rax # 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 624(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq $0, %rbx - movq %rbx, 176(%rsp) # 8-byte Spill - adcq $0, %r15 - movq %r15, 168(%rsp) # 8-byte Spill - movq 120(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - movq 136(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 96(%rsp) # 8-byte Folded Spill - adcq $0, %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - movq %rcx, %rbp - movq %rbp, %rdx - movq 128(%rsp), %r12 # 8-byte Reload - imulq %r12, %rdx - leaq 488(%rsp), %rdi - movq %r13, %rsi - callq .LmulPv512x64 - addq 488(%rsp), %rbp - movq 104(%rsp), %rax # 8-byte Reload - adcq 496(%rsp), %rax - movq 152(%rsp), %rbp # 8-byte Reload - adcq 504(%rsp), %rbp - movq 88(%rsp), %rcx # 8-byte Reload - adcq 512(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 520(%rsp), %r14 - movq 184(%rsp), %rcx # 8-byte Reload - adcq 528(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 536(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %r13 # 8-byte Reload - adcq 544(%rsp), %r13 - movq 176(%rsp), %rcx # 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - adcq $0, 168(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 120(%rsp) # 8-byte Spill - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 96(%rsp) # 8-byte Folded Spill - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq %r12, %rdx - leaq 416(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 416(%rsp), %r15 - adcq 424(%rsp), %rbp - movq %rbp, %rax - movq 88(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - movq %r14, %r12 - adcq 440(%rsp), %r12 - movq 184(%rsp), %r14 # 8-byte Reload - adcq 448(%rsp), %r14 - movq 192(%rsp), %rbp # 8-byte Reload - adcq 456(%rsp), %rbp - adcq 464(%rsp), %r13 - movq 176(%rsp), %rcx # 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 168(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - adcq $0, 120(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 136(%rsp) # 8-byte Spill - movq 96(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rbx - movq %rbx, %rdx - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 344(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 344(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 352(%rsp), %rax - adcq 360(%rsp), %r12 - movq %r12, 144(%rsp) # 8-byte Spill - adcq 368(%rsp), %r14 - movq %r14, 184(%rsp) # 8-byte Spill - adcq 376(%rsp), %rbp - movq %rbp, 192(%rsp) # 8-byte Spill - adcq 384(%rsp), %r13 - movq %r13, 160(%rsp) # 8-byte Spill - movq 176(%rsp), %r13 # 8-byte Reload - adcq 392(%rsp), %r13 - movq 168(%rsp), %r12 # 8-byte Reload - adcq 400(%rsp), %r12 - movq 120(%rsp), %r14 # 8-byte Reload - adcq 408(%rsp), %r14 - movq 136(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 80(%rsp) # 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq 128(%rsp), %rdx # 8-byte Folded Reload - leaq 272(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 272(%rsp), %r15 - movq 144(%rsp), %rcx # 8-byte Reload - adcq 280(%rsp), %rcx - movq 184(%rsp), %rax # 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq 312(%rsp), %r13 - movq %r13, 176(%rsp) # 8-byte Spill - adcq 320(%rsp), %r12 - movq %r12, 168(%rsp) # 8-byte Spill - adcq 328(%rsp), %r14 - movq %r14, %r13 - adcq 336(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rbx - movq %rbx, %r14 - movq 80(%rsp), %r15 # 8-byte Reload - adcq $0, %r15 - movq 128(%rsp), %rdx # 8-byte Reload - movq %rcx, %rbx - imulq %rbx, %rdx - leaq 200(%rsp), %rdi - movq 112(%rsp), %rsi # 8-byte Reload - callq .LmulPv512x64 - addq 200(%rsp), %rbx - movq 184(%rsp), %rax # 8-byte Reload - adcq 208(%rsp), %rax - movq %rax, 184(%rsp) # 8-byte Spill - movq 192(%rsp), %r8 # 8-byte Reload - adcq 216(%rsp), %r8 - movq %r8, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rdx # 8-byte Reload - adcq 224(%rsp), %rdx - movq 176(%rsp), %rsi # 8-byte Reload - adcq 232(%rsp), %rsi - movq 168(%rsp), %rdi # 8-byte Reload - adcq 240(%rsp), %rdi - movq %r13, %rbp - adcq 248(%rsp), %rbp - movq %r12, %rbx - adcq 256(%rsp), %rbx - movq %rbx, 136(%rsp) # 8-byte Spill - movq %r14, %r9 - adcq 264(%rsp), %r9 - adcq $0, %r15 - movq %r15, %r10 - subq 16(%rsp), %rax # 8-byte Folded Reload - movq %r8, %rcx - sbbq 8(%rsp), %rcx # 8-byte Folded Reload - movq %rdx, %r13 - sbbq 24(%rsp), %r13 # 8-byte Folded Reload - movq %rsi, %r12 - sbbq 32(%rsp), %r12 # 8-byte Folded Reload - movq %rdi, %r14 - sbbq 40(%rsp), %r14 # 8-byte Folded Reload - movq %rbp, %r11 - sbbq 48(%rsp), %r11 # 8-byte Folded Reload - movq %rbx, %r8 - sbbq 56(%rsp), %r8 # 8-byte Folded Reload - movq %r9, %r15 - sbbq 64(%rsp), %r9 # 8-byte Folded Reload - sbbq $0, %r10 - andl $1, %r10d - cmovneq %r15, %r9 - testb %r10b, %r10b - cmovneq 184(%rsp), %rax # 8-byte Folded Reload - movq 72(%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovneq 192(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rbx) - cmovneq %rdx, %r13 - movq %r13, 16(%rbx) - cmovneq %rsi, %r12 - movq %r12, 24(%rbx) - cmovneq %rdi, %r14 - movq %r14, 32(%rbx) - cmovneq %rbp, %r11 - movq %r11, 40(%rbx) - cmovneq 136(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $776, %rsp # imm = 0x308 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end116: - .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L - - .globl mcl_fp_addPre8L - .align 16, 0x90 - .type mcl_fp_addPre8L,@function -mcl_fp_addPre8L: # @mcl_fp_addPre8L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 48(%rsi), %r12 - movq 40(%rdx), %r10 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rax - movq 32(%rsi), %rsi - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %rax - movq %rax, 24(%rdi) - adcq %r11, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %r13 - movq %r13, 40(%rdi) - adcq %r9, %r12 - movq %r12, 48(%rdi) - adcq %r8, %r15 - movq %r15, 56(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end117: - .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L - - .globl mcl_fp_subPre8L - .align 16, 0x90 - .type mcl_fp_subPre8L,@function -mcl_fp_subPre8L: # @mcl_fp_subPre8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 40(%rdx), %r10 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 48(%rsi), %r13 - movq 40(%rsi), %rdx - movq 32(%rsi), %rbp - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rbp - movq %rbp, 32(%rdi) - sbbq %r10, %rdx - movq %rdx, 40(%rdi) - sbbq %r9, %r13 - movq %r13, 48(%rdi) - sbbq %r8, %r15 - movq %r15, 56(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end118: - .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L - - .globl mcl_fp_shr1_8L - .align 16, 0x90 - .type mcl_fp_shr1_8L,@function -mcl_fp_shr1_8L: # @mcl_fp_shr1_8L -# BB#0: - movq 56(%rsi), %r8 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 32(%rsi), %r11 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 24(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 32(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 40(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 48(%rdi) - shrq %r8 - movq %r8, 56(%rdi) - retq -.Lfunc_end119: - .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L - - .globl mcl_fp_add8L - .align 16, 0x90 - .type mcl_fp_add8L,@function -mcl_fp_add8L: # @mcl_fp_add8L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r15 - movq 56(%rsi), %r8 - movq 48(%rdx), %r12 - movq 48(%rsi), %r9 - movq 40(%rsi), %r13 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %rbx - addq (%rsi), %r14 - adcq 8(%rsi), %rbx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r11 - movq 40(%rdx), %rsi - adcq 32(%rdx), %r10 - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - adcq %r13, %rsi - movq %rsi, 40(%rdi) - adcq %r12, %r9 - movq %r9, 48(%rdi) - adcq %r15, %r8 - movq %r8, 56(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %r14 - sbbq 8(%rcx), %rbx - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r11 - sbbq 32(%rcx), %r10 - sbbq 40(%rcx), %rsi - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne .LBB120_2 -# BB#1: # %nocarry - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - movq %rsi, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) -.LBB120_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end120: - .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L - - .globl mcl_fp_addNF8L - .align 16, 0x90 - .type mcl_fp_addNF8L,@function -mcl_fp_addNF8L: # @mcl_fp_addNF8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 48(%rdx), %rbp - movq 40(%rdx), %rbx - movq 32(%rdx), %rax - movq 24(%rdx), %r11 - movq 16(%rdx), %r15 - movq (%rdx), %r13 - movq 8(%rdx), %r12 - addq (%rsi), %r13 - adcq 8(%rsi), %r12 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq %rax, %r10 - adcq 40(%rsi), %rbx - movq %rbx, -16(%rsp) # 8-byte Spill - movq %rbx, %r9 - adcq 48(%rsi), %rbp - movq %rbp, -8(%rsp) # 8-byte Spill - movq %rbp, %rax - adcq 56(%rsi), %r8 - movq %r13, %rsi - subq (%rcx), %rsi - movq %r12, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %rbx - sbbq 16(%rcx), %rbx - movq %r11, %r14 - sbbq 24(%rcx), %r14 - movq %r10, %rbp - sbbq 32(%rcx), %rbp - movq %r9, %r10 - sbbq 40(%rcx), %r10 - movq %rax, %r9 - sbbq 48(%rcx), %r9 - movq %r8, %rax - sbbq 56(%rcx), %rax - testq %rax, %rax - cmovsq %r13, %rsi - movq %rsi, (%rdi) - cmovsq %r12, %rdx - movq %rdx, 8(%rdi) - cmovsq %r15, %rbx - movq %rbx, 16(%rdi) - cmovsq %r11, %r14 - movq %r14, 24(%rdi) - cmovsq -24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 32(%rdi) - cmovsq -16(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rdi) - cmovsq -8(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rdi) - cmovsq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end121: - .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L - - .globl mcl_fp_sub8L - .align 16, 0x90 - .type mcl_fp_sub8L,@function -mcl_fp_sub8L: # @mcl_fp_sub8L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r12 - movq 56(%rsi), %r8 - movq 48(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r10 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r10 - movq 16(%rsi), %r11 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %r15 - sbbq 24(%rdx), %r15 - movq 32(%rsi), %r14 - sbbq 32(%rdx), %r14 - movq 48(%rsi), %r9 - movq 40(%rsi), %rsi - sbbq 40(%rdx), %rsi - movq %rax, (%rdi) - movq %r10, 8(%rdi) - movq %r11, 16(%rdi) - movq %r15, 24(%rdi) - movq %r14, 32(%rdi) - movq %rsi, 40(%rdi) - sbbq %r13, %r9 - movq %r9, 48(%rdi) - sbbq %r12, %r8 - movq %r8, 56(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB122_2 -# BB#1: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r10, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r11, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r15, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r14, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %rsi, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r9, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %r8, %rax - movq %rax, 56(%rdi) -.LBB122_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end122: - .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L - - .globl mcl_fp_subNF8L - .align 16, 0x90 - .type mcl_fp_subNF8L,@function -mcl_fp_subNF8L: # @mcl_fp_subNF8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r9 - movq 56(%rsi), %r14 - movq 48(%rsi), %rax - movq 40(%rsi), %rcx - movq 32(%rsi), %rdi - movq 24(%rsi), %r11 - movq 16(%rsi), %r15 - movq (%rsi), %r13 - movq 8(%rsi), %r12 - subq (%rdx), %r13 - sbbq 8(%rdx), %r12 - sbbq 16(%rdx), %r15 - sbbq 24(%rdx), %r11 - sbbq 32(%rdx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - sbbq 40(%rdx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - sbbq 48(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 56(%rdx), %r14 - movq %r14, %rsi - sarq $63, %rsi - movq 56(%r8), %r10 - andq %rsi, %r10 - movq 48(%r8), %rbx - andq %rsi, %rbx - movq 40(%r8), %rdi - andq %rsi, %rdi - movq 32(%r8), %rbp - andq %rsi, %rbp - movq 24(%r8), %rdx - andq %rsi, %rdx - movq 16(%r8), %rcx - andq %rsi, %rcx - movq 8(%r8), %rax - andq %rsi, %rax - andq (%r8), %rsi - addq %r13, %rsi - adcq %r12, %rax - movq %rsi, (%r9) - adcq %r15, %rcx - movq %rax, 8(%r9) - movq %rcx, 16(%r9) - adcq %r11, %rdx - movq %rdx, 24(%r9) - adcq -24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 32(%r9) - adcq -16(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 40(%r9) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 48(%r9) - adcq %r14, %r10 - movq %r10, 56(%r9) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end123: - .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L - - .globl mcl_fpDbl_add8L - .align 16, 0x90 - .type mcl_fpDbl_add8L,@function -mcl_fpDbl_add8L: # @mcl_fpDbl_add8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 96(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r11 - movq 88(%rdx), %rbp - movq 80(%rdx), %r13 - movq %rbx, (%rdi) - movq 72(%rdx), %r10 - movq %rax, 8(%rdi) - movq 64(%rdx), %r9 - movq %r12, 16(%rdi) - movq 40(%rdx), %r12 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %r12, %rbx - movq 56(%rdx), %r15 - movq 48(%rdx), %r12 - movq %r11, 32(%rdi) - movq 48(%rsi), %rdx - adcq %r12, %rdx - movq 120(%rsi), %r12 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rax - adcq %r15, %rax - movq 112(%rsi), %rcx - movq %rdx, 48(%rdi) - movq 64(%rsi), %rbx - adcq %r9, %rbx - movq 104(%rsi), %rdx - movq %rax, 56(%rdi) - movq 72(%rsi), %r9 - adcq %r10, %r9 - movq 80(%rsi), %r11 - adcq %r13, %r11 - movq 96(%rsi), %rax - movq 88(%rsi), %r15 - adcq %rbp, %r15 - adcq %r14, %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq %rdx, %rax - adcq -32(%rsp), %rax # 8-byte Folded Reload - movq %rax, -32(%rsp) # 8-byte Spill - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -24(%rsp) # 8-byte Spill - adcq -8(%rsp), %r12 # 8-byte Folded Reload - movq %r12, -8(%rsp) # 8-byte Spill - sbbq %rbp, %rbp - andl $1, %ebp - movq %rbx, %rsi - subq (%r8), %rsi - movq %r9, %rdx - sbbq 8(%r8), %rdx - movq %r11, %r10 - sbbq 16(%r8), %r10 - movq %r15, %r14 - sbbq 24(%r8), %r14 - movq -16(%rsp), %r13 # 8-byte Reload - sbbq 32(%r8), %r13 - movq %rax, %r12 - sbbq 40(%r8), %r12 - movq %rcx, %rax - sbbq 48(%r8), %rax - movq -8(%rsp), %rcx # 8-byte Reload - sbbq 56(%r8), %rcx - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rbx, %rsi - movq %rsi, 64(%rdi) - testb %bpl, %bpl - cmovneq %r9, %rdx - movq %rdx, 72(%rdi) - cmovneq %r11, %r10 - movq %r10, 80(%rdi) - cmovneq %r15, %r14 - movq %r14, 88(%rdi) - cmovneq -16(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 96(%rdi) - cmovneq -32(%rsp), %r12 # 8-byte Folded Reload - movq %r12, 104(%rdi) - cmovneq -24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 112(%rdi) - cmovneq -8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end124: - .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L - - .globl mcl_fpDbl_sub8L - .align 16, 0x90 - .type mcl_fpDbl_sub8L,@function -mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %r9 - movq (%rsi), %r12 - movq 8(%rsi), %r14 - xorl %r8d, %r8d - subq (%rdx), %r12 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r9 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r13 - sbbq 32(%rdx), %r13 - movq 96(%rdx), %rbp - movq 88(%rdx), %r11 - movq %r12, (%rdi) - movq 80(%rdx), %r12 - movq %r14, 8(%rdi) - movq 72(%rdx), %r10 - movq %r9, 16(%rdi) - movq 40(%rdx), %r9 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r9, %rbx - movq 48(%rdx), %r9 - movq %r13, 32(%rdi) - movq 48(%rsi), %r14 - sbbq %r9, %r14 - movq 64(%rdx), %r13 - movq 56(%rdx), %r9 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r9, %rdx - movq 120(%rsi), %rcx - movq %r14, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r13, %rbx - movq 112(%rsi), %rax - movq %rdx, 56(%rdi) - movq 72(%rsi), %r9 - sbbq %r10, %r9 - movq 80(%rsi), %r13 - sbbq %r12, %r13 - movq 88(%rsi), %r12 - sbbq %r11, %r12 - movq 104(%rsi), %rdx - movq 96(%rsi), %r14 - sbbq %rbp, %r14 - sbbq -24(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -24(%rsp) # 8-byte Spill - sbbq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, -16(%rsp) # 8-byte Spill - sbbq -8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -8(%rsp) # 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r15), %r11 - cmoveq %r8, %r11 - testb %bpl, %bpl - movq 16(%r15), %rbp - cmoveq %r8, %rbp - movq 8(%r15), %rsi - cmoveq %r8, %rsi - movq 56(%r15), %r10 - cmoveq %r8, %r10 - movq 48(%r15), %rdx - cmoveq %r8, %rdx - movq 40(%r15), %rcx - cmoveq %r8, %rcx - movq 32(%r15), %rax - cmoveq %r8, %rax - cmovneq 24(%r15), %r8 - addq %rbx, %r11 - adcq %r9, %rsi - movq %r11, 64(%rdi) - adcq %r13, %rbp - movq %rsi, 72(%rdi) - movq %rbp, 80(%rdi) - adcq %r12, %r8 - movq %r8, 88(%rdi) - adcq %r14, %rax - movq %rax, 96(%rdi) - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 104(%rdi) - adcq -16(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 112(%rdi) - adcq -8(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end125: - .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L - - .align 16, 0x90 - .type .LmulPv576x64,@function -.LmulPv576x64: # @mulPv576x64 -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rbx - movq %rbx, %rax - mulq (%rsi) - movq %rdx, -32(%rsp) # 8-byte Spill - movq %rax, (%rdi) - movq %rbx, %rax - mulq 64(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 56(%rsi) - movq %rdx, %r14 - movq %rax, -16(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 48(%rsi) - movq %rdx, %r12 - movq %rax, -24(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 40(%rsi) - movq %rdx, %rcx - movq %rax, -40(%rsp) # 8-byte Spill - movq %rbx, %rax - mulq 32(%rsi) - movq %rdx, %rbp - movq %rax, %r8 - movq %rbx, %rax - mulq 24(%rsi) - movq %rdx, %r9 - movq %rax, %r11 - movq %rbx, %rax - mulq 16(%rsi) - movq %rdx, %r15 - movq %rax, %r13 - movq %rbx, %rax - mulq 8(%rsi) - addq -32(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rdi) - adcq %r13, %rdx - movq %rdx, 16(%rdi) - adcq %r11, %r15 - movq %r15, 24(%rdi) - adcq %r8, %r9 - movq %r9, 32(%rdi) - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 40(%rdi) - adcq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 48(%rdi) - adcq -16(%rsp), %r12 # 8-byte Folded Reload - movq %r12, 56(%rdi) - adcq -8(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 64(%rdi) - adcq $0, %r10 - movq %r10, 72(%rdi) - movq %rdi, %rax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end126: - .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 - - .globl mcl_fp_mulUnitPre9L - .align 16, 0x90 - .type mcl_fp_mulUnitPre9L,@function -mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L -# BB#0: - pushq %r14 - pushq %rbx - subq $88, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq .LmulPv576x64 - movq 80(%rsp), %r8 - movq 72(%rsp), %r9 - movq 64(%rsp), %r10 - movq 56(%rsp), %r11 - movq 48(%rsp), %r14 - movq 40(%rsp), %rax - movq 32(%rsp), %rcx - movq 24(%rsp), %rdx - movq 8(%rsp), %rsi - movq 16(%rsp), %rdi - movq %rsi, (%rbx) - movq %rdi, 8(%rbx) - movq %rdx, 16(%rbx) - movq %rcx, 24(%rbx) - movq %rax, 32(%rbx) - movq %r14, 40(%rbx) - movq %r11, 48(%rbx) - movq %r10, 56(%rbx) - movq %r9, 64(%rbx) - movq %r8, 72(%rbx) - addq $88, %rsp - popq %rbx - popq %r14 - retq -.Lfunc_end127: - .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L - - .globl mcl_fpDbl_mulPre9L - .align 16, 0x90 - .type mcl_fpDbl_mulPre9L,@function -mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp # imm = 0x328 - movq %rdx, %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq %rsi, 72(%rsp) # 8-byte Spill - movq %rdi, %r12 - movq %r12, 80(%rsp) # 8-byte Spill - movq (%rax), %rdx - movq %rax, %rbx - leaq 728(%rsp), %rdi - movq %rsi, %rbp - callq .LmulPv576x64 - movq 800(%rsp), %r13 - movq 792(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r14 - movq %rax, (%r12) - movq 8(%rbx), %rdx - leaq 648(%rsp), %rdi - movq %rbp, %rsi - callq .LmulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r14 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r15 - movq %r14, 8(%r12) - adcq 8(%rsp), %rbx # 8-byte Folded Reload - adcq 16(%rsp), %r15 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, %r14 - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq %r13, %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %r13 # 8-byte Reload - movq 16(%r13), %rdx - leaq 568(%rsp), %rdi - movq 72(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %r9 - movq 624(%rsp), %r10 - movq 616(%rsp), %rdi - movq 608(%rsp), %rbp - movq 600(%rsp), %rcx - addq 568(%rsp), %rbx - movq 592(%rsp), %rdx - movq 576(%rsp), %r12 - movq 584(%rsp), %rsi - movq 80(%rsp), %rax # 8-byte Reload - movq %rbx, 16(%rax) - adcq %r15, %r12 - adcq %r14, %rsi - movq %rsi, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 24(%r13), %rdx - leaq 488(%rsp), %rdi - movq 72(%rsp), %r15 # 8-byte Reload - movq %r15, %rsi - callq .LmulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r12 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq 80(%rsp), %r14 # 8-byte Reload - movq %r12, 24(%r14) - adcq (%rsp), %rbx # 8-byte Folded Reload - adcq 8(%rsp), %r13 # 8-byte Folded Reload - adcq 16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - movq 32(%r12), %rdx - leaq 408(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %r9 - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r15 - movq 424(%rsp), %rcx - movq %rbx, 32(%r14) - adcq %r13, %r15 - adcq 8(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq %r12, %r14 - movq 40(%r14), %rdx - leaq 328(%rsp), %rdi - movq 72(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %r9 - movq 384(%rsp), %rsi - movq 376(%rsp), %rdi - movq 368(%rsp), %rbx - movq 360(%rsp), %rbp - addq 328(%rsp), %r15 - movq 352(%rsp), %rcx - movq 336(%rsp), %r12 - movq 344(%rsp), %rdx - movq 80(%rsp), %rax # 8-byte Reload - movq %r15, 40(%rax) - adcq (%rsp), %r12 # 8-byte Folded Reload - adcq 8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 48(%r14), %rdx - leaq 248(%rsp), %rdi - movq %r13, %rsi - movq %r13, %r15 - callq .LmulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %r9 - movq 304(%rsp), %rsi - movq 296(%rsp), %rdi - movq 288(%rsp), %rbx - movq 280(%rsp), %rbp - addq 248(%rsp), %r12 - movq 272(%rsp), %rcx - movq 256(%rsp), %r13 - movq 264(%rsp), %rdx - movq 80(%rsp), %rax # 8-byte Reload - movq %r12, 48(%rax) - adcq (%rsp), %r13 # 8-byte Folded Reload - adcq 8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, (%rsp) # 8-byte Spill - adcq 16(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 8(%rsp) # 8-byte Spill - adcq 24(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq 56(%r14), %rdx - leaq 168(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 240(%rsp), %rcx - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - addq 168(%rsp), %r13 - movq 200(%rsp), %r12 - movq 192(%rsp), %rbp - movq 176(%rsp), %r14 - movq 184(%rsp), %r15 - movq 80(%rsp), %rax # 8-byte Reload - movq %r13, 56(%rax) - adcq (%rsp), %r14 # 8-byte Folded Reload - adcq 8(%rsp), %r15 # 8-byte Folded Reload - adcq 16(%rsp), %rbp # 8-byte Folded Reload - adcq 24(%rsp), %r12 # 8-byte Folded Reload - adcq 32(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %r13 - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - movq 64(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 88(%rsp), %rdi - movq 72(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 88(%rsp), %r14 - adcq 96(%rsp), %r15 - movq 160(%rsp), %r8 - adcq 104(%rsp), %rbp - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 80(%rsp), %rcx # 8-byte Reload - movq %r14, 64(%rcx) - movq %r15, 72(%rcx) - adcq %r12, %rax - movq %rbp, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r13, %rbx - movq %rbx, 96(%rcx) - adcq 32(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 40(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 48(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 56(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp # imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end128: - .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L - - .globl mcl_fpDbl_sqrPre9L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre9L,@function -mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp # imm = 0x328 - movq %rsi, %r15 - movq %r15, 80(%rsp) # 8-byte Spill - movq %rdi, %r14 - movq %r14, 72(%rsp) # 8-byte Spill - movq (%r15), %rdx - leaq 728(%rsp), %rdi - callq .LmulPv576x64 - movq 800(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 792(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r12 - movq %rax, (%r14) - movq 8(%r15), %rdx - leaq 648(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r12 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r13 - movq %r12, 8(%r14) - adcq 8(%rsp), %rbx # 8-byte Folded Reload - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 16(%r15), %rdx - leaq 568(%rsp), %rdi - movq %r15, %rsi - callq .LmulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %rcx - movq 624(%rsp), %rdx - movq 616(%rsp), %rsi - movq 608(%rsp), %rdi - movq 600(%rsp), %rbp - addq 568(%rsp), %rbx - movq 592(%rsp), %rax - movq 576(%rsp), %r14 - movq 584(%rsp), %r12 - movq 72(%rsp), %r15 # 8-byte Reload - movq %rbx, 16(%r15) - adcq %r13, %r14 - adcq 16(%rsp), %r12 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 24(%rsi), %rdx - leaq 488(%rsp), %rdi - callq .LmulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r14 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq %r14, 24(%r15) - adcq %r12, %rbx - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 32(%rsi), %rdx - leaq 408(%rsp), %rdi - callq .LmulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %rcx - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r14 - movq 424(%rsp), %r12 - movq %rbx, 32(%r15) - adcq %r13, %r14 - adcq 16(%rsp), %r12 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 40(%rsi), %rdx - leaq 328(%rsp), %rdi - callq .LmulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %rcx - movq 384(%rsp), %rdx - movq 376(%rsp), %rsi - movq 368(%rsp), %rdi - movq 360(%rsp), %rbp - addq 328(%rsp), %r14 - movq 352(%rsp), %rax - movq 336(%rsp), %rbx - movq 344(%rsp), %r13 - movq %r14, 40(%r15) - adcq %r12, %rbx - adcq 16(%rsp), %r13 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 48(%rsi), %rdx - leaq 248(%rsp), %rdi - callq .LmulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %rcx - movq 304(%rsp), %rdx - movq 296(%rsp), %rsi - movq 288(%rsp), %rdi - movq 280(%rsp), %rbp - addq 248(%rsp), %rbx - movq 272(%rsp), %rax - movq 256(%rsp), %r12 - movq 264(%rsp), %r14 - movq %rbx, 48(%r15) - adcq %r13, %r12 - adcq 16(%rsp), %r14 # 8-byte Folded Reload - adcq 24(%rsp), %rax # 8-byte Folded Reload - movq %rax, 16(%rsp) # 8-byte Spill - adcq 32(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%rsp) # 8-byte Spill - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rsp) # 8-byte Spill - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 56(%rsi), %rdx - leaq 168(%rsp), %rdi - callq .LmulPv576x64 - movq 240(%rsp), %r8 - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - movq 200(%rsp), %rcx - addq 168(%rsp), %r12 - movq 192(%rsp), %r15 - movq 176(%rsp), %r13 - movq 184(%rsp), %rbp - movq 72(%rsp), %rax # 8-byte Reload - movq %r12, 56(%rax) - adcq %r14, %r13 - adcq 16(%rsp), %rbp # 8-byte Folded Reload - adcq 24(%rsp), %r15 # 8-byte Folded Reload - adcq 32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, %r12 - adcq 40(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, %r14 - adcq 48(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 40(%rsp) # 8-byte Spill - adcq 56(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 48(%rsp) # 8-byte Spill - adcq 64(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 56(%rsp) # 8-byte Spill - adcq $0, %r8 - movq %r8, 64(%rsp) # 8-byte Spill - movq 80(%rsp), %rsi # 8-byte Reload - movq 64(%rsi), %rdx - leaq 88(%rsp), %rdi - callq .LmulPv576x64 - addq 88(%rsp), %r13 - adcq 96(%rsp), %rbp - movq 160(%rsp), %r8 - adcq 104(%rsp), %r15 - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 72(%rsp), %rcx # 8-byte Reload - movq %r13, 64(%rcx) - movq %rbp, 72(%rcx) - adcq %r12, %rax - movq %r15, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r14, %rbx - movq %rbx, 96(%rcx) - adcq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 48(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 56(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 64(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp # imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L - - .globl mcl_fp_mont9L - .align 16, 0x90 - .type mcl_fp_mont9L,@function -mcl_fp_mont9L: # @mcl_fp_mont9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp # imm = 0x618 - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rdx, 32(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 16(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq .LmulPv576x64 - movq 1480(%rsp), %r14 - movq 1488(%rsp), %r15 - movq %r14, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 1544(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 1536(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 1528(%rsp), %r12 - movq 1520(%rsp), %r13 - movq 1512(%rsp), %rbx - movq 1504(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1400(%rsp), %r14 - adcq 1408(%rsp), %r15 - adcq 1416(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rax # 8-byte Reload - adcq 1424(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - adcq 1432(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - adcq 1440(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - adcq 1448(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %rbx # 8-byte Reload - adcq 1456(%rsp), %rbx - movq 104(%rsp), %r14 # 8-byte Reload - adcq 1464(%rsp), %r14 - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1472(%rsp), %r13 - sbbq %rbp, %rbp - movq 32(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebp - addq 1320(%rsp), %r15 - movq 96(%rsp), %rax # 8-byte Reload - adcq 1328(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rax # 8-byte Reload - adcq 1336(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r12 # 8-byte Reload - adcq 1344(%rsp), %r12 - movq 64(%rsp), %rax # 8-byte Reload - adcq 1352(%rsp), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %rax # 8-byte Reload - adcq 1360(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - adcq 1368(%rsp), %rbx - adcq 1376(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 1384(%rsp), %r13 - movq %r13, 112(%rsp) # 8-byte Spill - adcq 1392(%rsp), %rbp - sbbq %r14, %r14 - movq %r15, %rdx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq %r14, %rax - andl $1, %eax - addq 1240(%rsp), %r15 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 1248(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r14 # 8-byte Reload - adcq 1256(%rsp), %r14 - adcq 1264(%rsp), %r12 - movq %r12, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - adcq 1272(%rsp), %r12 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 1280(%rsp), %r13 - adcq 1288(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r15 # 8-byte Reload - adcq 1296(%rsp), %r15 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 1304(%rsp), %rbx - adcq 1312(%rsp), %rbp - adcq $0, %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 96(%rsp), %rax # 8-byte Reload - addq 1160(%rsp), %rax - adcq 1168(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r14 # 8-byte Reload - adcq 1176(%rsp), %r14 - adcq 1184(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - movq %r13, %r12 - adcq 1192(%rsp), %r12 - movq 88(%rsp), %rcx # 8-byte Reload - adcq 1200(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, %r13 - adcq 1216(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq 1224(%rsp), %rbp - movq 72(%rsp), %rcx # 8-byte Reload - adcq 1232(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq %r15, %rax - andl $1, %eax - addq 1080(%rsp), %rbx - movq 80(%rsp), %rcx # 8-byte Reload - adcq 1088(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - movq %r14, %r15 - adcq 1096(%rsp), %r15 - movq 64(%rsp), %r14 # 8-byte Reload - adcq 1104(%rsp), %r14 - movq %r12, %rbx - adcq 1112(%rsp), %rbx - movq 88(%rsp), %rcx # 8-byte Reload - adcq 1120(%rsp), %rcx - movq %rcx, 88(%rsp) # 8-byte Spill - adcq 1128(%rsp), %r13 - movq %r13, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1136(%rsp), %r13 - adcq 1144(%rsp), %rbp - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1152(%rsp), %r12 - adcq $0, %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 80(%rsp), %rax # 8-byte Reload - addq 1000(%rsp), %rax - adcq 1008(%rsp), %r15 - movq %r15, 40(%rsp) # 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, %r15 - adcq 1024(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %r14 # 8-byte Reload - adcq 1032(%rsp), %r14 - movq 104(%rsp), %rcx # 8-byte Reload - adcq 1040(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - adcq 1048(%rsp), %r13 - movq %r13, 112(%rsp) # 8-byte Spill - adcq 1056(%rsp), %rbp - adcq 1064(%rsp), %r12 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 920(%rsp), %r13 - movq 40(%rsp), %rcx # 8-byte Reload - adcq 928(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 936(%rsp), %r15 - movq %r15, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 944(%rsp), %r15 - movq %r14, %r13 - adcq 952(%rsp), %r13 - movq 104(%rsp), %r14 # 8-byte Reload - adcq 960(%rsp), %r14 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 968(%rsp), %rbx - adcq 976(%rsp), %rbp - adcq 984(%rsp), %r12 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 992(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 40(%rsp), %rax # 8-byte Reload - addq 840(%rsp), %rax - movq 64(%rsp), %rcx # 8-byte Reload - adcq 848(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 856(%rsp), %r15 - adcq 864(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 872(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 880(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq 888(%rsp), %rbp - adcq 896(%rsp), %r12 - movq 96(%rsp), %r13 # 8-byte Reload - adcq 904(%rsp), %r13 - movq 80(%rsp), %rcx # 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 80(%rsp) # 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r14 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 760(%rsp), %r14 - movq 64(%rsp), %rcx # 8-byte Reload - adcq 768(%rsp), %rcx - movq %rcx, 64(%rsp) # 8-byte Spill - adcq 776(%rsp), %r15 - movq 88(%rsp), %r14 # 8-byte Reload - adcq 784(%rsp), %r14 - movq 104(%rsp), %rcx # 8-byte Reload - adcq 792(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rcx # 8-byte Reload - adcq 800(%rsp), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - adcq 808(%rsp), %rbp - movq %r12, %rbx - adcq 816(%rsp), %rbx - movq %r13, %r12 - adcq 824(%rsp), %r12 - movq 80(%rsp), %r13 # 8-byte Reload - adcq 832(%rsp), %r13 - adcq $0, %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 64(%rsp), %rax # 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - adcq 696(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rcx # 8-byte Reload - adcq 704(%rsp), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %r15 # 8-byte Reload - adcq 712(%rsp), %r15 - adcq 720(%rsp), %rbp - adcq 728(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - adcq 744(%rsp), %r13 - movq %r13, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r13 # 8-byte Reload - adcq 752(%rsp), %r13 - sbbq %r14, %r14 - movq %rax, %rdx - movq %rax, %rbx - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r14d - addq 600(%rsp), %rbx - movq 48(%rsp), %rax # 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rbx # 8-byte Reload - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - movq %r15, 112(%rsp) # 8-byte Spill - adcq 640(%rsp), %rbp - movq 72(%rsp), %r12 # 8-byte Reload - adcq 648(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 656(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r15 # 8-byte Reload - adcq 664(%rsp), %r15 - adcq 672(%rsp), %r13 - adcq $0, %r14 - movq %r14, 64(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 48(%rsp), %rax # 8-byte Reload - addq 520(%rsp), %rax - movq 88(%rsp), %r14 # 8-byte Reload - adcq 528(%rsp), %r14 - adcq 536(%rsp), %rbx - movq %rbx, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rcx # 8-byte Reload - adcq 544(%rsp), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - adcq 552(%rsp), %rbp - adcq 560(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %r12 # 8-byte Reload - adcq 568(%rsp), %r12 - adcq 576(%rsp), %r15 - movq %r15, 80(%rsp) # 8-byte Spill - adcq 584(%rsp), %r13 - movq %r13, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r15 # 8-byte Reload - adcq 592(%rsp), %r15 - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 440(%rsp), %r13 - adcq 448(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq 456(%rsp), %r14 - movq 112(%rsp), %rbx # 8-byte Reload - adcq 464(%rsp), %rbx - adcq 472(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - adcq 488(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 496(%rsp), %rbp - movq 40(%rsp), %r12 # 8-byte Reload - adcq 504(%rsp), %r12 - adcq 512(%rsp), %r15 - movq %r15, %r13 - adcq $0, %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 88(%rsp), %rax # 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r14 - adcq 376(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - movq 8(%rsp), %rcx # 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %rbx # 8-byte Reload - adcq 392(%rsp), %rbx - movq 96(%rsp), %r15 # 8-byte Reload - adcq 400(%rsp), %r15 - adcq 408(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - adcq 416(%rsp), %r12 - movq %r12, %rbp - adcq 424(%rsp), %r13 - movq %r13, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) # 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %r12 - imulq 16(%rsp), %rdx # 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r13d - addq 280(%rsp), %r12 - adcq 288(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - movq 112(%rsp), %rax # 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 8(%rsp), %r14 # 8-byte Reload - adcq 304(%rsp), %r14 - adcq 312(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 320(%rsp), %r15 - movq %r15, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %rbx # 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %r12 # 8-byte Reload - adcq 344(%rsp), %r12 - movq 48(%rsp), %rbp # 8-byte Reload - adcq 352(%rsp), %rbp - adcq $0, %r13 - movq 32(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 104(%rsp), %rax # 8-byte Reload - addq 200(%rsp), %rax - movq 112(%rsp), %r15 # 8-byte Reload - adcq 208(%rsp), %r15 - adcq 216(%rsp), %r14 - movq %r14, 8(%rsp) # 8-byte Spill - movq 72(%rsp), %r14 # 8-byte Reload - adcq 224(%rsp), %r14 - movq 96(%rsp), %rcx # 8-byte Reload - adcq 232(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %rcx # 8-byte Reload - adcq 248(%rsp), %rcx - movq %rcx, 40(%rsp) # 8-byte Spill - adcq 256(%rsp), %r12 - movq %r12, 64(%rsp) # 8-byte Spill - adcq 264(%rsp), %rbp - movq %rbp, 48(%rsp) # 8-byte Spill - adcq 272(%rsp), %r13 - sbbq %rbx, %rbx - movq 16(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %r12 - leaq 120(%rsp), %rdi - movq 56(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %ebx - addq 120(%rsp), %r12 - adcq 128(%rsp), %r15 - movq 8(%rsp), %rbp # 8-byte Reload - adcq 136(%rsp), %rbp - movq %r14, %rcx - adcq 144(%rsp), %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %r8 # 8-byte Reload - adcq 152(%rsp), %r8 - movq %r8, 96(%rsp) # 8-byte Spill - movq 80(%rsp), %r9 # 8-byte Reload - adcq 160(%rsp), %r9 - movq %r9, 80(%rsp) # 8-byte Spill - movq 40(%rsp), %r10 # 8-byte Reload - adcq 168(%rsp), %r10 - movq %r10, 40(%rsp) # 8-byte Spill - movq 64(%rsp), %rdi # 8-byte Reload - adcq 176(%rsp), %rdi - movq %rdi, 64(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r13 - adcq $0, %rbx - movq %r15, %rsi - movq %r15, %r12 - movq 56(%rsp), %rdx # 8-byte Reload - subq (%rdx), %rsi - movq %rbp, %rax - movq %rbp, %r15 - sbbq 8(%rdx), %rax - movq %rcx, %rbp - sbbq 16(%rdx), %rbp - movq %r8, %rcx - sbbq 24(%rdx), %rcx - movq %r9, %r8 - sbbq 32(%rdx), %r8 - movq %r10, %r11 - sbbq 40(%rdx), %r11 - movq %rdi, %r10 - sbbq 48(%rdx), %r10 - movq %r14, %rdi - sbbq 56(%rdx), %rdi - movq %r13, %r9 - sbbq 64(%rdx), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r13, %r9 - testb %bl, %bl - cmovneq %r12, %rsi - movq (%rsp), %rbx # 8-byte Reload - movq %rsi, (%rbx) - cmovneq %r15, %rax - movq %rax, 8(%rbx) - cmovneq 72(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rbx) - cmovneq 96(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 24(%rbx) - cmovneq 80(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 32(%rbx) - cmovneq 40(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 40(%rbx) - cmovneq 64(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 48(%rbx) - cmovneq %r14, %rdi - movq %rdi, 56(%rbx) - movq %r9, 64(%rbx) - addq $1560, %rsp # imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end130: - .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L - - .globl mcl_fp_montNF9L - .align 16, 0x90 - .type mcl_fp_montNF9L,@function -mcl_fp_montNF9L: # @mcl_fp_montNF9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp # imm = 0x618 - movq %rcx, 64(%rsp) # 8-byte Spill - movq %rdx, 16(%rsp) # 8-byte Spill - movq %rsi, 24(%rsp) # 8-byte Spill - movq %rdi, (%rsp) # 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 32(%rsp) # 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq .LmulPv576x64 - movq 1480(%rsp), %r12 - movq 1488(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq %r12, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - movq 1544(%rsp), %r13 - movq 1536(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 1528(%rsp), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 1520(%rsp), %r14 - movq 1512(%rsp), %r15 - movq 1504(%rsp), %rbx - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1400(%rsp), %r12 - movq 88(%rsp), %rax # 8-byte Reload - adcq 1408(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - adcq 1416(%rsp), %rbp - movq %rbp, 8(%rsp) # 8-byte Spill - adcq 1424(%rsp), %rbx - movq %rbx, 104(%rsp) # 8-byte Spill - adcq 1432(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - adcq 1440(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %rbx # 8-byte Reload - adcq 1448(%rsp), %rbx - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1456(%rsp), %r12 - adcq 1464(%rsp), %r13 - movq %r13, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbp # 8-byte Reload - adcq 1472(%rsp), %rbp - movq 16(%rsp), %rax # 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1392(%rsp), %rax - movq 88(%rsp), %rcx # 8-byte Reload - addq 1320(%rsp), %rcx - movq 8(%rsp), %r15 # 8-byte Reload - adcq 1328(%rsp), %r15 - movq 104(%rsp), %r14 # 8-byte Reload - adcq 1336(%rsp), %r14 - movq 56(%rsp), %rdx # 8-byte Reload - adcq 1344(%rsp), %rdx - movq %rdx, 56(%rsp) # 8-byte Spill - movq 40(%rsp), %r13 # 8-byte Reload - adcq 1352(%rsp), %r13 - adcq 1360(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 1368(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq 96(%rsp), %rdx # 8-byte Reload - adcq 1376(%rsp), %rdx - movq %rdx, 96(%rsp) # 8-byte Spill - adcq 1384(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, %rbp - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1240(%rsp), %rbx - adcq 1248(%rsp), %r15 - movq %r15, 8(%rsp) # 8-byte Spill - adcq 1256(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - movq 56(%rsp), %r12 # 8-byte Reload - adcq 1264(%rsp), %r12 - adcq 1272(%rsp), %r13 - movq %r13, %r14 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 1280(%rsp), %r13 - movq 72(%rsp), %rbx # 8-byte Reload - adcq 1288(%rsp), %rbx - movq 96(%rsp), %r15 # 8-byte Reload - adcq 1296(%rsp), %r15 - movq 112(%rsp), %rax # 8-byte Reload - adcq 1304(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - adcq 1312(%rsp), %rbp - movq %rbp, 80(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1232(%rsp), %rax - movq 8(%rsp), %rcx # 8-byte Reload - addq 1160(%rsp), %rcx - movq 104(%rsp), %rbp # 8-byte Reload - adcq 1168(%rsp), %rbp - adcq 1176(%rsp), %r12 - movq %r12, 56(%rsp) # 8-byte Spill - adcq 1184(%rsp), %r14 - adcq 1192(%rsp), %r13 - movq %r13, %r12 - adcq 1200(%rsp), %rbx - movq %rbx, 72(%rsp) # 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbx # 8-byte Reload - adcq 1216(%rsp), %rbx - movq 80(%rsp), %rdx # 8-byte Reload - adcq 1224(%rsp), %rdx - movq %rdx, 80(%rsp) # 8-byte Spill - movq %rax, %r15 - adcq $0, %r15 - movq %rcx, %rdx - movq %rcx, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 1080(%rsp), %r13 - adcq 1088(%rsp), %rbp - movq %rbp, 104(%rsp) # 8-byte Spill - movq 56(%rsp), %r13 # 8-byte Reload - adcq 1096(%rsp), %r13 - adcq 1104(%rsp), %r14 - adcq 1112(%rsp), %r12 - movq %r12, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %r12 # 8-byte Reload - adcq 1120(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 1128(%rsp), %rbp - adcq 1136(%rsp), %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %rbx # 8-byte Reload - adcq 1144(%rsp), %rbx - adcq 1152(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 1072(%rsp), %rax - movq 104(%rsp), %rcx # 8-byte Reload - addq 1000(%rsp), %rcx - adcq 1008(%rsp), %r13 - movq %r13, 56(%rsp) # 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %r14 # 8-byte Reload - adcq 1024(%rsp), %r14 - adcq 1032(%rsp), %r12 - adcq 1040(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %r13 # 8-byte Reload - adcq 1048(%rsp), %r13 - adcq 1056(%rsp), %rbx - movq %rbx, 80(%rsp) # 8-byte Spill - adcq 1064(%rsp), %r15 - movq %r15, 88(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 920(%rsp), %rbx - movq 56(%rsp), %rax # 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rsp), %rbp # 8-byte Reload - adcq 936(%rsp), %rbp - movq %r14, %rbx - adcq 944(%rsp), %rbx - adcq 952(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - adcq 968(%rsp), %r13 - movq %r13, %r15 - movq 80(%rsp), %r13 # 8-byte Reload - adcq 976(%rsp), %r13 - movq 88(%rsp), %r14 # 8-byte Reload - adcq 984(%rsp), %r14 - movq 104(%rsp), %rax # 8-byte Reload - adcq 992(%rsp), %rax - movq %rax, 104(%rsp) # 8-byte Spill - movq 16(%rsp), %rax # 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 912(%rsp), %rax - movq 56(%rsp), %rcx # 8-byte Reload - addq 840(%rsp), %rcx - adcq 848(%rsp), %rbp - movq %rbp, 40(%rsp) # 8-byte Spill - adcq 856(%rsp), %rbx - movq %rbx, 48(%rsp) # 8-byte Spill - adcq 864(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 872(%rsp), %rbp - adcq 880(%rsp), %r15 - movq %r15, 112(%rsp) # 8-byte Spill - adcq 888(%rsp), %r13 - adcq 896(%rsp), %r14 - movq %r14, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rdx # 8-byte Reload - adcq 904(%rsp), %rdx - movq %rdx, 104(%rsp) # 8-byte Spill - adcq $0, %rax - movq %rax, %r14 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 760(%rsp), %rbx - movq 40(%rsp), %rax # 8-byte Reload - adcq 768(%rsp), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 48(%rsp), %r15 # 8-byte Reload - adcq 776(%rsp), %r15 - adcq 784(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq %rbp, %rbx - adcq 792(%rsp), %rbx - movq 112(%rsp), %rbp # 8-byte Reload - adcq 800(%rsp), %rbp - adcq 808(%rsp), %r13 - movq 88(%rsp), %rax # 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r12 # 8-byte Reload - adcq 824(%rsp), %r12 - adcq 832(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 752(%rsp), %rcx - movq 40(%rsp), %rax # 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %rdx # 8-byte Reload - adcq 696(%rsp), %rdx - movq %rdx, 72(%rsp) # 8-byte Spill - adcq 704(%rsp), %rbx - movq %rbx, 96(%rsp) # 8-byte Spill - adcq 712(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - adcq 720(%rsp), %r13 - movq %r13, %r15 - movq 88(%rsp), %rbx # 8-byte Reload - adcq 728(%rsp), %rbx - adcq 736(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - adcq 744(%rsp), %r14 - movq %r14, 40(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, 56(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r13 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 600(%rsp), %r13 - movq 48(%rsp), %r13 # 8-byte Reload - adcq 608(%rsp), %r13 - movq 72(%rsp), %r12 # 8-byte Reload - adcq 616(%rsp), %r12 - movq 96(%rsp), %rbp # 8-byte Reload - adcq 624(%rsp), %rbp - movq 112(%rsp), %rax # 8-byte Reload - adcq 632(%rsp), %rax - movq %rax, 112(%rsp) # 8-byte Spill - adcq 640(%rsp), %r15 - movq %r15, 80(%rsp) # 8-byte Spill - adcq 648(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq 656(%rsp), %r14 - movq 40(%rsp), %rbx # 8-byte Reload - adcq 664(%rsp), %rbx - movq 56(%rsp), %r15 # 8-byte Reload - adcq 672(%rsp), %r15 - movq 16(%rsp), %rax # 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 592(%rsp), %rcx - movq %r13, %rax - addq 520(%rsp), %rax - adcq 528(%rsp), %r12 - movq %r12, 72(%rsp) # 8-byte Spill - movq %rbp, %r12 - adcq 536(%rsp), %r12 - movq 112(%rsp), %rbp # 8-byte Reload - adcq 544(%rsp), %rbp - movq 80(%rsp), %rdx # 8-byte Reload - adcq 552(%rsp), %rdx - movq %rdx, 80(%rsp) # 8-byte Spill - movq 88(%rsp), %rdx # 8-byte Reload - adcq 560(%rsp), %rdx - movq %rdx, 88(%rsp) # 8-byte Spill - adcq 568(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq 576(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - adcq 584(%rsp), %r15 - movq %r15, 56(%rsp) # 8-byte Spill - adcq $0, %rcx - movq %rcx, %r13 - movq %rax, %rdx - movq %rax, %r14 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 440(%rsp), %r14 - movq 72(%rsp), %rax # 8-byte Reload - adcq 448(%rsp), %rax - movq %rax, 72(%rsp) # 8-byte Spill - adcq 456(%rsp), %r12 - adcq 464(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %r14 # 8-byte Reload - adcq 472(%rsp), %r14 - movq 88(%rsp), %r15 # 8-byte Reload - adcq 480(%rsp), %r15 - movq 104(%rsp), %rbp # 8-byte Reload - adcq 488(%rsp), %rbp - movq 40(%rsp), %rbx # 8-byte Reload - adcq 496(%rsp), %rbx - movq 56(%rsp), %rax # 8-byte Reload - adcq 504(%rsp), %rax - movq %rax, 56(%rsp) # 8-byte Spill - adcq 512(%rsp), %r13 - movq 16(%rsp), %rax # 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 432(%rsp), %rcx - movq 72(%rsp), %rax # 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r12 - movq %r12, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rdx # 8-byte Reload - adcq 376(%rsp), %rdx - movq %rdx, 112(%rsp) # 8-byte Spill - adcq 384(%rsp), %r14 - movq %r14, 80(%rsp) # 8-byte Spill - adcq 392(%rsp), %r15 - movq %r15, 88(%rsp) # 8-byte Spill - adcq 400(%rsp), %rbp - movq %rbp, 104(%rsp) # 8-byte Spill - adcq 408(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 56(%rsp), %r14 # 8-byte Reload - adcq 416(%rsp), %r14 - adcq 424(%rsp), %r13 - movq %r13, %r15 - adcq $0, %rcx - movq %rcx, 72(%rsp) # 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 32(%rsp), %rdx # 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 64(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 280(%rsp), %r12 - movq 96(%rsp), %rax # 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 112(%rsp), %rbp # 8-byte Reload - adcq 296(%rsp), %rbp - movq 80(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 80(%rsp) # 8-byte Spill - movq 88(%rsp), %r13 # 8-byte Reload - adcq 312(%rsp), %r13 - movq 104(%rsp), %r12 # 8-byte Reload - adcq 320(%rsp), %r12 - movq 40(%rsp), %rbx # 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %r14 - movq %r14, 56(%rsp) # 8-byte Spill - adcq 344(%rsp), %r15 - movq %r15, 48(%rsp) # 8-byte Spill - movq 72(%rsp), %r14 # 8-byte Reload - adcq 352(%rsp), %r14 - movq 16(%rsp), %rax # 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 24(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - movq 272(%rsp), %rcx - movq 96(%rsp), %rax # 8-byte Reload - addq 200(%rsp), %rax - adcq 208(%rsp), %rbp - movq %rbp, 112(%rsp) # 8-byte Spill - movq 80(%rsp), %rbp # 8-byte Reload - adcq 216(%rsp), %rbp - adcq 224(%rsp), %r13 - movq %r13, 88(%rsp) # 8-byte Spill - adcq 232(%rsp), %r12 - movq %r12, 104(%rsp) # 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq 56(%rsp), %r15 # 8-byte Reload - adcq 248(%rsp), %r15 - movq 48(%rsp), %r12 # 8-byte Reload - adcq 256(%rsp), %r12 - adcq 264(%rsp), %r14 - adcq $0, %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - movq 32(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 120(%rsp), %rdi - movq 64(%rsp), %r13 # 8-byte Reload - movq %r13, %rsi - callq .LmulPv576x64 - addq 120(%rsp), %rbx - movq 112(%rsp), %rcx # 8-byte Reload - adcq 128(%rsp), %rcx - movq %rbp, %rdx - adcq 136(%rsp), %rdx - movq 88(%rsp), %rsi # 8-byte Reload - adcq 144(%rsp), %rsi - movq %rsi, 88(%rsp) # 8-byte Spill - movq 104(%rsp), %rdi # 8-byte Reload - adcq 152(%rsp), %rdi - movq %rdi, 104(%rsp) # 8-byte Spill - movq 40(%rsp), %rbx # 8-byte Reload - adcq 160(%rsp), %rbx - movq %rbx, 40(%rsp) # 8-byte Spill - movq %r15, %r8 - adcq 168(%rsp), %r8 - movq %r8, 56(%rsp) # 8-byte Spill - movq %r12, %r15 - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - movq 96(%rsp), %r9 # 8-byte Reload - adcq 192(%rsp), %r9 - movq %rcx, %rax - movq %rcx, %r11 - movq %r13, %rbp - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r12 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %rbx, %rdi - sbbq 32(%rbp), %rdi - movq %r8, %r10 - sbbq 40(%rbp), %r10 - movq %r15, %r13 - sbbq 48(%rbp), %r13 - movq %r14, %r8 - sbbq 56(%rbp), %r8 - movq %rbp, %rbx - movq %r9, %rbp - sbbq 64(%rbx), %rbp - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r11, %rax - movq (%rsp), %rbx # 8-byte Reload - movq %rax, (%rbx) - cmovsq %r12, %rcx - movq %rcx, 8(%rbx) - cmovsq 88(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 16(%rbx) - cmovsq 104(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq 40(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovsq 56(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovsq %r15, %r13 - movq %r13, 48(%rbx) - cmovsq %r14, %r8 - movq %r8, 56(%rbx) - cmovsq %r9, %rbp - movq %rbp, 64(%rbx) - addq $1560, %rsp # imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end131: - .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L - - .globl mcl_fp_montRed9L - .align 16, 0x90 - .type mcl_fp_montRed9L,@function -mcl_fp_montRed9L: # @mcl_fp_montRed9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $936, %rsp # imm = 0x3A8 - movq %rdx, %rax - movq %rax, 128(%rsp) # 8-byte Spill - movq %rdi, 80(%rsp) # 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 120(%rsp) # 8-byte Spill - movq (%rsi), %r14 - movq 8(%rsi), %rdx - movq %rdx, 192(%rsp) # 8-byte Spill - movq %r14, %rdx - imulq %rcx, %rdx - movq 136(%rsi), %rcx - movq %rcx, 112(%rsp) # 8-byte Spill - movq 128(%rsi), %rcx - movq %rcx, 152(%rsp) # 8-byte Spill - movq 120(%rsi), %rcx - movq %rcx, 104(%rsp) # 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 144(%rsp) # 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - movq 72(%rsi), %r12 - movq 64(%rsi), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 56(%rsi), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 48(%rsi), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 40(%rsi), %rbp - movq 32(%rsi), %rbx - movq 24(%rsi), %r13 - movq 16(%rsi), %r15 - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 16(%rsp) # 8-byte Spill - movq 64(%rcx), %rax - movq %rax, 72(%rsp) # 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 64(%rsp) # 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 56(%rsp) # 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 48(%rsp) # 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 40(%rsp) # 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 32(%rsp) # 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 24(%rsp) # 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 8(%rsp) # 8-byte Spill - movq %rcx, %rsi - leaq 856(%rsp), %rdi - callq .LmulPv576x64 - addq 856(%rsp), %r14 - movq 192(%rsp), %rcx # 8-byte Reload - adcq 864(%rsp), %rcx - adcq 872(%rsp), %r15 - adcq 880(%rsp), %r13 - adcq 888(%rsp), %rbx - movq %rbx, 88(%rsp) # 8-byte Spill - adcq 896(%rsp), %rbp - movq %rbp, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rax # 8-byte Reload - adcq 904(%rsp), %rax - movq %rax, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rax # 8-byte Reload - adcq 912(%rsp), %rax - movq %rax, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rax # 8-byte Reload - adcq 920(%rsp), %rax - movq %rax, 176(%rsp) # 8-byte Spill - adcq 928(%rsp), %r12 - movq %r12, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rbp # 8-byte Reload - adcq $0, %rbp - adcq $0, 200(%rsp) # 8-byte Folded Spill - adcq $0, 208(%rsp) # 8-byte Folded Spill - adcq $0, 184(%rsp) # 8-byte Folded Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - movq 112(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - sbbq %r12, %r12 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 776(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - andl $1, %r12d - addq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r13 - movq %r13, (%rsp) # 8-byte Spill - movq 88(%rsp), %rax # 8-byte Reload - adcq 800(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 96(%rsp), %rax # 8-byte Reload - adcq 808(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rax # 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rax # 8-byte Reload - adcq 824(%rsp), %rax - movq %rax, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rax # 8-byte Reload - adcq 832(%rsp), %rax - movq %rax, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rax # 8-byte Reload - adcq 840(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - adcq 848(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - adcq $0, 208(%rsp) # 8-byte Folded Spill - adcq $0, 184(%rsp) # 8-byte Folded Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - movq 152(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, %r14 - movq %r14, 112(%rsp) # 8-byte Spill - adcq $0, %r12 - movq %r15, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 696(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 696(%rsp), %r15 - movq (%rsp), %rcx # 8-byte Reload - adcq 704(%rsp), %rcx - movq 88(%rsp), %rax # 8-byte Reload - adcq 712(%rsp), %rax - movq %rax, 88(%rsp) # 8-byte Spill - movq 96(%rsp), %rax # 8-byte Reload - adcq 720(%rsp), %rax - movq %rax, 96(%rsp) # 8-byte Spill - movq 136(%rsp), %rbp # 8-byte Reload - adcq 728(%rsp), %rbp - movq 168(%rsp), %r14 # 8-byte Reload - adcq 736(%rsp), %r14 - movq 176(%rsp), %r15 # 8-byte Reload - adcq 744(%rsp), %r15 - movq 192(%rsp), %rax # 8-byte Reload - adcq 752(%rsp), %rax - movq %rax, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rax # 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 160(%rsp) # 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 200(%rsp) # 8-byte Spill - adcq $0, 208(%rsp) # 8-byte Folded Spill - movq 184(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 152(%rsp) # 8-byte Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rcx, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 616(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 616(%rsp), %rbx - movq 88(%rsp), %rax # 8-byte Reload - adcq 624(%rsp), %rax - movq 96(%rsp), %rcx # 8-byte Reload - adcq 632(%rsp), %rcx - movq %rcx, 96(%rsp) # 8-byte Spill - adcq 640(%rsp), %rbp - movq %rbp, 136(%rsp) # 8-byte Spill - adcq 648(%rsp), %r14 - movq %r14, 168(%rsp) # 8-byte Spill - adcq 656(%rsp), %r15 - movq 192(%rsp), %r14 # 8-byte Reload - adcq 664(%rsp), %r14 - movq 160(%rsp), %rbp # 8-byte Reload - adcq 672(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - adcq $0, %r13 - movq %r13, 184(%rsp) # 8-byte Spill - adcq $0, 144(%rsp) # 8-byte Folded Spill - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 536(%rsp), %rbx - movq 96(%rsp), %rax # 8-byte Reload - adcq 544(%rsp), %rax - movq 136(%rsp), %rcx # 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 136(%rsp) # 8-byte Spill - movq 168(%rsp), %rcx # 8-byte Reload - adcq 560(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - adcq 568(%rsp), %r15 - movq %r15, 176(%rsp) # 8-byte Spill - adcq 576(%rsp), %r14 - movq %r14, 192(%rsp) # 8-byte Spill - adcq 584(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r13 # 8-byte Reload - adcq 592(%rsp), %r13 - movq 208(%rsp), %r15 # 8-byte Reload - adcq 600(%rsp), %r15 - movq 184(%rsp), %rbp # 8-byte Reload - adcq 608(%rsp), %rbp - movq 144(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, 104(%rsp) # 8-byte Folded Spill - adcq $0, 152(%rsp) # 8-byte Folded Spill - adcq $0, 112(%rsp) # 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r14 - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 456(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 456(%rsp), %r14 - movq 136(%rsp), %rax # 8-byte Reload - adcq 464(%rsp), %rax - movq 168(%rsp), %rcx # 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 168(%rsp) # 8-byte Spill - movq 176(%rsp), %rcx # 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 488(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rcx # 8-byte Reload - adcq 496(%rsp), %rcx - movq %rcx, 160(%rsp) # 8-byte Spill - adcq 504(%rsp), %r13 - movq %r13, 200(%rsp) # 8-byte Spill - adcq 512(%rsp), %r15 - movq %r15, 208(%rsp) # 8-byte Spill - adcq 520(%rsp), %rbp - movq %rbp, 184(%rsp) # 8-byte Spill - adcq 528(%rsp), %rbx - movq %rbx, 144(%rsp) # 8-byte Spill - movq 104(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - movq 152(%rsp), %r13 # 8-byte Reload - adcq $0, %r13 - movq 112(%rsp), %rbx # 8-byte Reload - adcq $0, %rbx - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r15 - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 376(%rsp), %r15 - movq 168(%rsp), %rax # 8-byte Reload - adcq 384(%rsp), %rax - movq 176(%rsp), %rcx # 8-byte Reload - adcq 392(%rsp), %rcx - movq %rcx, 176(%rsp) # 8-byte Spill - movq 192(%rsp), %rcx # 8-byte Reload - adcq 400(%rsp), %rcx - movq %rcx, 192(%rsp) # 8-byte Spill - movq 160(%rsp), %rbp # 8-byte Reload - adcq 408(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 416(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 424(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - movq 144(%rsp), %r15 # 8-byte Reload - adcq 440(%rsp), %r15 - adcq 448(%rsp), %r14 - movq %r14, 104(%rsp) # 8-byte Spill - adcq $0, %r13 - movq %r13, %r14 - adcq $0, %rbx - movq %rbx, 112(%rsp) # 8-byte Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 120(%rsp), %rdx # 8-byte Folded Reload - leaq 296(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 296(%rsp), %rbx - movq 176(%rsp), %rax # 8-byte Reload - adcq 304(%rsp), %rax - movq 192(%rsp), %r13 # 8-byte Reload - adcq 312(%rsp), %r13 - adcq 320(%rsp), %rbp - movq 200(%rsp), %rcx # 8-byte Reload - adcq 328(%rsp), %rcx - movq %rcx, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %rcx # 8-byte Reload - adcq 336(%rsp), %rcx - movq %rcx, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rcx # 8-byte Reload - adcq 344(%rsp), %rcx - movq %rcx, 184(%rsp) # 8-byte Spill - adcq 352(%rsp), %r15 - movq %r15, 144(%rsp) # 8-byte Spill - movq 104(%rsp), %r15 # 8-byte Reload - adcq 360(%rsp), %r15 - adcq 368(%rsp), %r14 - movq %r14, 152(%rsp) # 8-byte Spill - movq 112(%rsp), %r14 # 8-byte Reload - adcq $0, %r14 - adcq $0, %r12 - movq 120(%rsp), %rdx # 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 216(%rsp), %rdi - movq 128(%rsp), %rsi # 8-byte Reload - callq .LmulPv576x64 - addq 216(%rsp), %rbx - movq %r13, %rsi - adcq 224(%rsp), %rsi - movq %rsi, 192(%rsp) # 8-byte Spill - adcq 232(%rsp), %rbp - movq %rbp, 160(%rsp) # 8-byte Spill - movq 200(%rsp), %r9 # 8-byte Reload - adcq 240(%rsp), %r9 - movq %r9, 200(%rsp) # 8-byte Spill - movq 208(%rsp), %r8 # 8-byte Reload - adcq 248(%rsp), %r8 - movq %r8, 208(%rsp) # 8-byte Spill - movq 184(%rsp), %rbx # 8-byte Reload - adcq 256(%rsp), %rbx - movq 144(%rsp), %rax # 8-byte Reload - adcq 264(%rsp), %rax - movq %r15, %rcx - adcq 272(%rsp), %rcx - movq 152(%rsp), %rdx # 8-byte Reload - adcq 280(%rsp), %rdx - movq %rdx, 152(%rsp) # 8-byte Spill - adcq 288(%rsp), %r14 - movq %r14, %r11 - adcq $0, %r12 - subq 16(%rsp), %rsi # 8-byte Folded Reload - movq %rbp, %rdi - sbbq 8(%rsp), %rdi # 8-byte Folded Reload - movq %r9, %rbp - sbbq 24(%rsp), %rbp # 8-byte Folded Reload - movq %r8, %r13 - sbbq 32(%rsp), %r13 # 8-byte Folded Reload - movq %rbx, %r15 - sbbq 40(%rsp), %r15 # 8-byte Folded Reload - movq %rax, %r14 - sbbq 48(%rsp), %r14 # 8-byte Folded Reload - movq %rcx, %r10 - sbbq 56(%rsp), %r10 # 8-byte Folded Reload - movq %rdx, %r8 - sbbq 64(%rsp), %r8 # 8-byte Folded Reload - movq %r11, %r9 - sbbq 72(%rsp), %r9 # 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %r11, %r9 - testb %r12b, %r12b - cmovneq 192(%rsp), %rsi # 8-byte Folded Reload - movq 80(%rsp), %rdx # 8-byte Reload - movq %rsi, (%rdx) - cmovneq 160(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 8(%rdx) - cmovneq 200(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 16(%rdx) - cmovneq 208(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 24(%rdx) - cmovneq %rbx, %r15 - movq %r15, 32(%rdx) - cmovneq %rax, %r14 - movq %r14, 40(%rdx) - cmovneq %rcx, %r10 - movq %r10, 48(%rdx) - cmovneq 152(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 56(%rdx) - movq %r9, 64(%rdx) - addq $936, %rsp # imm = 0x3A8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end132: - .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L - - .globl mcl_fp_addPre9L - .align 16, 0x90 - .type mcl_fp_addPre9L,@function -mcl_fp_addPre9L: # @mcl_fp_addPre9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r8 - movq 64(%rsi), %r15 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 24(%rsi), %r12 - movq 32(%rsi), %r14 - movq (%rdx), %rbx - movq 8(%rdx), %rcx - addq (%rsi), %rbx - adcq 8(%rsi), %rcx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r12 - movq 56(%rdx), %r13 - movq 48(%rdx), %rsi - movq 40(%rdx), %rbp - movq 32(%rdx), %rdx - movq %rbx, (%rdi) - movq %rcx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r12, 24(%rdi) - adcq %r14, %rdx - movq %rdx, 32(%rdi) - adcq %r11, %rbp - movq %rbp, 40(%rdi) - adcq %r10, %rsi - movq %rsi, 48(%rdi) - adcq %r9, %r13 - movq %r13, 56(%rdi) - adcq %r8, %r15 - movq %r15, 64(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end133: - .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L - - .globl mcl_fp_subPre9L - .align 16, 0x90 - .type mcl_fp_subPre9L,@function -mcl_fp_subPre9L: # @mcl_fp_subPre9L -# BB#0: - movq 32(%rdx), %r8 - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - movq 8(%rsi), %rcx - sbbq 8(%rdx), %rcx - movq %rcx, 8(%rdi) - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq %rcx, 16(%rdi) - movq 24(%rsi), %rcx - sbbq 24(%rdx), %rcx - movq %rcx, 24(%rdi) - movq 32(%rsi), %rcx - sbbq %r8, %rcx - movq 40(%rdx), %r8 - movq %rcx, 32(%rdi) - movq 40(%rsi), %rcx - sbbq %r8, %rcx - movq 48(%rdx), %r8 - movq %rcx, 40(%rdi) - movq 48(%rsi), %rcx - sbbq %r8, %rcx - movq 56(%rdx), %r8 - movq %rcx, 48(%rdi) - movq 56(%rsi), %rcx - sbbq %r8, %rcx - movq %rcx, 56(%rdi) - movq 64(%rdx), %rcx - movq 64(%rsi), %rdx - sbbq %rcx, %rdx - movq %rdx, 64(%rdi) - sbbq $0, %rax - andl $1, %eax - retq -.Lfunc_end134: - .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L - - .globl mcl_fp_shr1_9L - .align 16, 0x90 - .type mcl_fp_shr1_9L,@function -mcl_fp_shr1_9L: # @mcl_fp_shr1_9L -# BB#0: - pushq %rbx - movq 64(%rsi), %r8 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 32(%rsi), %rcx - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rbx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rbx - movq %rbx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 32(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 40(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 48(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 56(%rdi) - shrq %r8 - movq %r8, 64(%rdi) - popq %rbx - retq -.Lfunc_end135: - .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L - - .globl mcl_fp_add9L - .align 16, 0x90 - .type mcl_fp_add9L,@function -mcl_fp_add9L: # @mcl_fp_add9L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r12 - movq 64(%rsi), %r8 - movq 56(%rsi), %r13 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 24(%rsi), %r14 - movq 32(%rsi), %r11 - movq (%rdx), %rbx - movq 8(%rdx), %r15 - addq (%rsi), %rbx - adcq 8(%rsi), %r15 - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r14 - adcq 32(%rdx), %r11 - adcq 40(%rdx), %r10 - movq 56(%rdx), %rsi - adcq 48(%rdx), %r9 - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - adcq %r13, %rsi - movq %rsi, 56(%rdi) - adcq %r12, %r8 - movq %r8, 64(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rbx - sbbq 8(%rcx), %r15 - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r14 - sbbq 32(%rcx), %r11 - sbbq 40(%rcx), %r10 - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %rsi - sbbq 64(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne .LBB136_2 -# BB#1: # %nocarry - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %rsi, 56(%rdi) - movq %r8, 64(%rdi) -.LBB136_2: # %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end136: - .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L - - .globl mcl_fp_addNF9L - .align 16, 0x90 - .type mcl_fp_addNF9L,@function -mcl_fp_addNF9L: # @mcl_fp_addNF9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r8 - movq 64(%rdx), %r10 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rax - movq 32(%rdx), %rdi - movq 24(%rdx), %rbp - movq 16(%rdx), %r15 - movq (%rdx), %rbx - movq 8(%rdx), %r13 - addq (%rsi), %rbx - adcq 8(%rsi), %r13 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %rbp - movq %rbp, -40(%rsp) # 8-byte Spill - adcq 32(%rsi), %rdi - movq %rdi, -16(%rsp) # 8-byte Spill - adcq 40(%rsi), %rax - movq %rax, -8(%rsp) # 8-byte Spill - adcq 48(%rsi), %r9 - movq %r9, -32(%rsp) # 8-byte Spill - movq %r9, %rdi - adcq 56(%rsi), %r11 - movq %r11, -24(%rsp) # 8-byte Spill - movq %r11, %rax - adcq 64(%rsi), %r10 - movq %r10, %r9 - movq %rbx, %rsi - subq (%rcx), %rsi - movq %r13, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %r12 - sbbq 16(%rcx), %r12 - sbbq 24(%rcx), %rbp - movq -16(%rsp), %r14 # 8-byte Reload - sbbq 32(%rcx), %r14 - movq -8(%rsp), %r11 # 8-byte Reload - sbbq 40(%rcx), %r11 - movq %rdi, %r10 - sbbq 48(%rcx), %r10 - movq %rax, %rdi - sbbq 56(%rcx), %rdi - movq %r9, %rax - sbbq 64(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %rbx, %rsi - movq %rsi, (%r8) - cmovsq %r13, %rdx - movq %rdx, 8(%r8) - cmovsq %r15, %r12 - movq %r12, 16(%r8) - cmovsq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rbp, 24(%r8) - cmovsq -16(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 32(%r8) - cmovsq -8(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 40(%r8) - cmovsq -32(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 48(%r8) - cmovsq -24(%rsp), %rdi # 8-byte Folded Reload - movq %rdi, 56(%r8) - cmovsq %r9, %rax - movq %rax, 64(%r8) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end137: - .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L - - .globl mcl_fp_sub9L - .align 16, 0x90 - .type mcl_fp_sub9L,@function -mcl_fp_sub9L: # @mcl_fp_sub9L -# BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 40(%rsi), %r14 - sbbq 40(%rdx), %r14 - movq 48(%rsi), %r15 - sbbq 48(%rdx), %r15 - movq 64(%rsi), %r8 - movq 56(%rsi), %rsi - sbbq 56(%rdx), %rsi - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - movq %r14, 40(%rdi) - movq %r15, 48(%rdi) - movq %rsi, 56(%rdi) - sbbq %r13, %r8 - movq %r8, 64(%rdi) - sbbq $0, %rbx - testb $1, %bl - je .LBB138_2 -# BB#1: # %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r9, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r10, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r11, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r12, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %r14, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r15, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %rsi, %rax - movq %rax, 56(%rdi) - movq 64(%rcx), %rax - adcq %r8, %rax - movq %rax, 64(%rdi) -.LBB138_2: # %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq -.Lfunc_end138: - .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L - - .globl mcl_fp_subNF9L - .align 16, 0x90 - .type mcl_fp_subNF9L,@function -mcl_fp_subNF9L: # @mcl_fp_subNF9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r11 - movq 64(%rsi), %r14 - movq 56(%rsi), %rax - movq 48(%rsi), %rcx - movq 40(%rsi), %rdi - movq 32(%rsi), %rbp - movq 24(%rsi), %rbx - movq 16(%rsi), %r15 - movq (%rsi), %r12 - movq 8(%rsi), %r13 - subq (%rdx), %r12 - sbbq 8(%rdx), %r13 - sbbq 16(%rdx), %r15 - sbbq 24(%rdx), %rbx - movq %rbx, -40(%rsp) # 8-byte Spill - sbbq 32(%rdx), %rbp - movq %rbp, -32(%rsp) # 8-byte Spill - sbbq 40(%rdx), %rdi - movq %rdi, -24(%rsp) # 8-byte Spill - sbbq 48(%rdx), %rcx - movq %rcx, -16(%rsp) # 8-byte Spill - sbbq 56(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - sbbq 64(%rdx), %r14 - movq %r14, %rax - sarq $63, %rax - movq %rax, %rcx - shldq $1, %r14, %rcx - movq 24(%r8), %rbp - andq %rcx, %rbp - movq 8(%r8), %rdi - andq %rcx, %rdi - andq (%r8), %rcx - movq 64(%r8), %rbx - andq %rax, %rbx - movq 56(%r8), %r10 - andq %rax, %r10 - rolq %rax - movq 48(%r8), %r9 - andq %rax, %r9 - movq 40(%r8), %rsi - andq %rax, %rsi - movq 32(%r8), %rdx - andq %rax, %rdx - andq 16(%r8), %rax - addq %r12, %rcx - adcq %r13, %rdi - movq %rcx, (%r11) - adcq %r15, %rax - movq %rdi, 8(%r11) - adcq -40(%rsp), %rbp # 8-byte Folded Reload - movq %rax, 16(%r11) - movq %rbp, 24(%r11) - adcq -32(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, 32(%r11) - adcq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 40(%r11) - adcq -16(%rsp), %r9 # 8-byte Folded Reload - movq %r9, 48(%r11) - adcq -8(%rsp), %r10 # 8-byte Folded Reload - movq %r10, 56(%r11) - adcq %r14, %rbx - movq %rbx, 64(%r11) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end139: - .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L - - .globl mcl_fpDbl_add9L - .align 16, 0x90 - .type mcl_fpDbl_add9L,@function -mcl_fpDbl_add9L: # @mcl_fpDbl_add9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 136(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq 120(%rdx), %r10 - movq 112(%rdx), %r11 - movq 24(%rsi), %rcx - movq 32(%rsi), %r14 - movq 16(%rdx), %rbp - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %rbp - adcq 24(%rdx), %rcx - adcq 32(%rdx), %r14 - movq 104(%rdx), %r9 - movq 96(%rdx), %r13 - movq %rax, (%rdi) - movq 88(%rdx), %r8 - movq %rbx, 8(%rdi) - movq 80(%rdx), %r12 - movq %rbp, 16(%rdi) - movq 40(%rdx), %rax - movq %rcx, 24(%rdi) - movq 40(%rsi), %rbp - adcq %rax, %rbp - movq 48(%rdx), %rcx - movq %r14, 32(%rdi) - movq 48(%rsi), %rax - adcq %rcx, %rax - movq 56(%rdx), %r14 - movq %rbp, 40(%rdi) - movq 56(%rsi), %rbp - adcq %r14, %rbp - movq 72(%rdx), %rcx - movq 64(%rdx), %rdx - movq %rax, 48(%rdi) - movq 64(%rsi), %rax - adcq %rdx, %rax - movq 136(%rsi), %rbx - movq %rbp, 56(%rdi) - movq 72(%rsi), %rbp - adcq %rcx, %rbp - movq 128(%rsi), %rcx - movq %rax, 64(%rdi) - movq 80(%rsi), %rdx - adcq %r12, %rdx - movq 88(%rsi), %r12 - adcq %r8, %r12 - movq 96(%rsi), %r14 - adcq %r13, %r14 - movq %r14, -48(%rsp) # 8-byte Spill - movq 104(%rsi), %rax - adcq %r9, %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 120(%rsi), %rax - movq 112(%rsi), %rsi - adcq %r11, %rsi - movq %rsi, -24(%rsp) # 8-byte Spill - adcq %r10, %rax - movq %rax, -16(%rsp) # 8-byte Spill - adcq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -40(%rsp) # 8-byte Spill - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, -8(%rsp) # 8-byte Spill - sbbq %r9, %r9 - andl $1, %r9d - movq %rbp, %r10 - subq (%r15), %r10 - movq %rdx, %r11 - sbbq 8(%r15), %r11 - movq %r12, %rbx - sbbq 16(%r15), %rbx - sbbq 24(%r15), %r14 - movq -32(%rsp), %r13 # 8-byte Reload - sbbq 32(%r15), %r13 - movq -24(%rsp), %rsi # 8-byte Reload - sbbq 40(%r15), %rsi - movq -16(%rsp), %rax # 8-byte Reload - sbbq 48(%r15), %rax - sbbq 56(%r15), %rcx - movq -8(%rsp), %r8 # 8-byte Reload - sbbq 64(%r15), %r8 - sbbq $0, %r9 - andl $1, %r9d - cmovneq %rbp, %r10 - movq %r10, 72(%rdi) - testb %r9b, %r9b - cmovneq %rdx, %r11 - movq %r11, 80(%rdi) - cmovneq %r12, %rbx - movq %rbx, 88(%rdi) - cmovneq -48(%rsp), %r14 # 8-byte Folded Reload - movq %r14, 96(%rdi) - cmovneq -32(%rsp), %r13 # 8-byte Folded Reload - movq %r13, 104(%rdi) - cmovneq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 112(%rdi) - cmovneq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, 120(%rdi) - cmovneq -40(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 128(%rdi) - cmovneq -8(%rsp), %r8 # 8-byte Folded Reload - movq %r8, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end140: - .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L - - .globl mcl_fpDbl_sub9L - .align 16, 0x90 - .type mcl_fpDbl_sub9L,@function -mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L -# BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r14 - movq 136(%rdx), %rax - movq %rax, -8(%rsp) # 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -16(%rsp) # 8-byte Spill - movq 120(%rdx), %rax - movq %rax, -24(%rsp) # 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %r12 - movq 8(%rsi), %r13 - xorl %r9d, %r9d - subq (%rdx), %r12 - sbbq 8(%rdx), %r13 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %rbp - sbbq 32(%rdx), %rbp - movq 112(%rdx), %r10 - movq 104(%rdx), %rcx - movq %r12, (%rdi) - movq 96(%rdx), %rax - movq %r13, 8(%rdi) - movq 88(%rdx), %r13 - movq %r11, 16(%rdi) - movq 40(%rdx), %r11 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r11, %rbx - movq 48(%rdx), %r11 - movq %rbp, 32(%rdi) - movq 48(%rsi), %rbp - sbbq %r11, %rbp - movq 56(%rdx), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rbx - sbbq %r11, %rbx - movq 64(%rdx), %r11 - movq %rbp, 48(%rdi) - movq 64(%rsi), %rbp - sbbq %r11, %rbp - movq 80(%rdx), %r8 - movq 72(%rdx), %r11 - movq %rbx, 56(%rdi) - movq 72(%rsi), %r15 - sbbq %r11, %r15 - movq 136(%rsi), %rdx - movq %rbp, 64(%rdi) - movq 80(%rsi), %rbp - sbbq %r8, %rbp - movq 88(%rsi), %r12 - sbbq %r13, %r12 - movq 96(%rsi), %r13 - sbbq %rax, %r13 - movq 104(%rsi), %rax - sbbq %rcx, %rax - movq %rax, -40(%rsp) # 8-byte Spill - movq 112(%rsi), %rax - sbbq %r10, %rax - movq %rax, -32(%rsp) # 8-byte Spill - movq 128(%rsi), %rax - movq 120(%rsi), %rcx - sbbq -24(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, -24(%rsp) # 8-byte Spill - sbbq -16(%rsp), %rax # 8-byte Folded Reload - movq %rax, -16(%rsp) # 8-byte Spill - sbbq -8(%rsp), %rdx # 8-byte Folded Reload - movq %rdx, -8(%rsp) # 8-byte Spill - movl $0, %r8d - sbbq $0, %r8 - andl $1, %r8d - movq (%r14), %r10 - cmoveq %r9, %r10 - testb %r8b, %r8b - movq 16(%r14), %r8 - cmoveq %r9, %r8 - movq 8(%r14), %rdx - cmoveq %r9, %rdx - movq 64(%r14), %rbx - cmoveq %r9, %rbx - movq 56(%r14), %r11 - cmoveq %r9, %r11 - movq 48(%r14), %rsi - cmoveq %r9, %rsi - movq 40(%r14), %rcx - cmoveq %r9, %rcx - movq 32(%r14), %rax - cmoveq %r9, %rax - cmovneq 24(%r14), %r9 - addq %r15, %r10 - adcq %rbp, %rdx - movq %r10, 72(%rdi) - adcq %r12, %r8 - movq %rdx, 80(%rdi) - adcq %r13, %r9 - movq %r8, 88(%rdi) - movq %r9, 96(%rdi) - adcq -40(%rsp), %rax # 8-byte Folded Reload - movq %rax, 104(%rdi) - adcq -32(%rsp), %rcx # 8-byte Folded Reload - movq %rcx, 112(%rdi) - adcq -24(%rsp), %rsi # 8-byte Folded Reload - movq %rsi, 120(%rdi) - adcq -16(%rsp), %r11 # 8-byte Folded Reload - movq %r11, 128(%rdi) - adcq -8(%rsp), %rbx # 8-byte Folded Reload - movq %rbx, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq -.Lfunc_end141: - .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L - - - .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.bmi2.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.bmi2.s deleted file mode 100644 index 849c66649..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.bmi2.s +++ /dev/null @@ -1,13830 +0,0 @@ - .section __TEXT,__text,regular,pure_instructions - .macosx_version_min 10, 12 - .globl _makeNIST_P192Lbmi2 - .p2align 4, 0x90 -_makeNIST_P192Lbmi2: ## @makeNIST_P192Lbmi2 -## BB#0: - movq $-1, %rax - movq $-2, %rdx - movq $-1, %rcx - retq - - .globl _mcl_fpDbl_mod_NIST_P192Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mod_NIST_P192Lbmi2: ## @mcl_fpDbl_mod_NIST_P192Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq 24(%rsi), %r8 - movq 40(%rsi), %r9 - movq 8(%rsi), %rax - addq %r9, %rax - adcq $0, %r10 - sbbq %rcx, %rcx - andl $1, %ecx - movq 32(%rsi), %r11 - movq (%rsi), %r14 - addq %r8, %r14 - adcq %r11, %rax - adcq %r9, %r10 - adcq $0, %rcx - addq %r9, %r14 - adcq %r8, %rax - adcq %r11, %r10 - adcq $0, %rcx - addq %rcx, %r14 - adcq %rax, %rcx - adcq $0, %r10 - sbbq %rax, %rax - andl $1, %eax - movq %r14, %rsi - addq $1, %rsi - movq %rcx, %rdx - adcq $1, %rdx - movq %r10, %rbx - adcq $0, %rbx - adcq $-1, %rax - andl $1, %eax - cmovneq %r14, %rsi - movq %rsi, (%rdi) - testb %al, %al - cmovneq %rcx, %rdx - movq %rdx, 8(%rdi) - cmovneq %r10, %rbx - movq %rbx, 16(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_sqr_NIST_P192Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sqr_NIST_P192Lbmi2: ## @mcl_fp_sqr_NIST_P192Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %r8, %rdx - mulxq %rsi, %r14, %rbx - movq %rbx, -16(%rsp) ## 8-byte Spill - movq %rsi, %rdx - mulxq %rsi, %r13, %r15 - mulxq %rcx, %r12, %rsi - addq %rsi, %r13 - adcq %r14, %r15 - adcq $0, %rbx - movq %rcx, %rdx - mulxq %rcx, %r9, %rax - addq %r12, %rax - movq %r8, %rdx - mulxq %rcx, %rbp, %r11 - adcq %rbp, %rsi - movq %r11, %r10 - adcq $0, %r10 - addq %r12, %rax - adcq %r13, %rsi - adcq %r15, %r10 - adcq $0, %rbx - mulxq %r8, %rcx, %rdi - addq %r14, %r11 - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - adcq $0, %rdi - addq %rbp, %rsi - adcq %r10, %r11 - adcq %rbx, %rcx - adcq $0, %rdi - addq %rdi, %rax - adcq $0, %rsi - sbbq %rdx, %rdx - andl $1, %edx - addq %r11, %r9 - adcq %rcx, %rax - adcq %rdi, %rsi - adcq $0, %rdx - addq %rdi, %r9 - adcq %r11, %rax - adcq %rcx, %rsi - adcq $0, %rdx - addq %rdx, %r9 - adcq %rax, %rdx - adcq $0, %rsi - sbbq %rax, %rax - andl $1, %eax - movq %r9, %rcx - addq $1, %rcx - movq %rdx, %rdi - adcq $1, %rdi - movq %rsi, %rbp - adcq $0, %rbp - adcq $-1, %rax - andl $1, %eax - cmovneq %r9, %rcx - movq -8(%rsp), %rbx ## 8-byte Reload - movq %rcx, (%rbx) - testb %al, %al - cmovneq %rdx, %rdi - movq %rdi, 8(%rbx) - cmovneq %rsi, %rbp - movq %rbp, 16(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulNIST_P192Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulNIST_P192Lbmi2: ## @mcl_fp_mulNIST_P192Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - subq $56, %rsp - movq %rdi, %r14 - leaq 8(%rsp), %rdi - callq _mcl_fpDbl_mulPre3Lbmi2 - movq 24(%rsp), %r9 - movq 32(%rsp), %r8 - movq 48(%rsp), %rdi - movq 16(%rsp), %rbx - addq %rdi, %rbx - adcq $0, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - movq 40(%rsp), %rsi - movq 8(%rsp), %rdx - addq %r8, %rdx - adcq %rsi, %rbx - adcq %rdi, %r9 - adcq $0, %rcx - addq %rdi, %rdx - adcq %r8, %rbx - adcq %rsi, %r9 - adcq $0, %rcx - addq %rcx, %rdx - adcq %rbx, %rcx - adcq $0, %r9 - sbbq %rsi, %rsi - andl $1, %esi - movq %rdx, %rdi - addq $1, %rdi - movq %rcx, %rbx - adcq $1, %rbx - movq %r9, %rax - adcq $0, %rax - adcq $-1, %rsi - andl $1, %esi - cmovneq %rdx, %rdi - movq %rdi, (%r14) - testb %sil, %sil - cmovneq %rcx, %rbx - movq %rbx, 8(%r14) - cmovneq %r9, %rax - movq %rax, 16(%r14) - addq $56, %rsp - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mod_NIST_P521Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mod_NIST_P521Lbmi2: ## @mcl_fpDbl_mod_NIST_P521Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 120(%rsi), %r9 - movq 128(%rsi), %r14 - movq %r14, %r8 - shldq $55, %r9, %r8 - movq 112(%rsi), %r10 - shldq $55, %r10, %r9 - movq 104(%rsi), %r11 - shldq $55, %r11, %r10 - movq 96(%rsi), %r15 - shldq $55, %r15, %r11 - movq 88(%rsi), %r12 - shldq $55, %r12, %r15 - movq 80(%rsi), %rcx - shldq $55, %rcx, %r12 - movq 64(%rsi), %rbx - movq 72(%rsi), %rax - shldq $55, %rax, %rcx - shrq $9, %r14 - shldq $55, %rbx, %rax - ## kill: %EBX %EBX %RBX %RBX - andl $511, %ebx ## imm = 0x1FF - addq (%rsi), %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r10 - adcq 48(%rsi), %r9 - adcq 56(%rsi), %r8 - adcq %r14, %rbx - movl %ebx, %esi - shrl $9, %esi - andl $1, %esi - addq %rax, %rsi - adcq $0, %rcx - adcq $0, %r12 - adcq $0, %r15 - adcq $0, %r11 - adcq $0, %r10 - adcq $0, %r9 - adcq $0, %r8 - adcq $0, %rbx - movq %rsi, %rax - andq %r12, %rax - andq %r15, %rax - andq %r11, %rax - andq %r10, %rax - andq %r9, %rax - andq %r8, %rax - movq %rbx, %rdx - orq $-512, %rdx ## imm = 0xFE00 - andq %rax, %rdx - andq %rcx, %rdx - cmpq $-1, %rdx - je LBB4_1 -## BB#3: ## %nonzero - movq %rsi, (%rdi) - movq %rcx, 8(%rdi) - movq %r12, 16(%rdi) - movq %r15, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) - andl $511, %ebx ## imm = 0x1FF - movq %rbx, 64(%rdi) - jmp LBB4_2 -LBB4_1: ## %zero - movq $0, 64(%rdi) - movq $0, 56(%rdi) - movq $0, 48(%rdi) - movq $0, 40(%rdi) - movq $0, 32(%rdi) - movq $0, 24(%rdi) - movq $0, 16(%rdi) - movq $0, 8(%rdi) - movq $0, (%rdi) -LBB4_2: ## %zero - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre1Lbmi2: ## @mcl_fp_mulUnitPre1Lbmi2 -## BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq - - .globl _mcl_fpDbl_mulPre1Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre1Lbmi2: ## @mcl_fpDbl_mulPre1Lbmi2 -## BB#0: - movq (%rdx), %rdx - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq - - .globl _mcl_fpDbl_sqrPre1Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre1Lbmi2: ## @mcl_fpDbl_sqrPre1Lbmi2 -## BB#0: - movq (%rsi), %rdx - mulxq %rdx, %rcx, %rax - movq %rcx, (%rdi) - movq %rax, 8(%rdi) - retq - - .globl _mcl_fp_mont1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont1Lbmi2: ## @mcl_fp_mont1Lbmi2 -## BB#0: - movq %rdx, %rax - movq (%rsi), %rdx - mulxq (%rax), %rsi, %r8 - movq -8(%rcx), %rdx - imulq %rsi, %rdx - movq (%rcx), %rcx - mulxq %rcx, %rdx, %rax - addq %rsi, %rdx - adcq %r8, %rax - sbbq %rdx, %rdx - andl $1, %edx - movq %rax, %rsi - subq %rcx, %rsi - sbbq $0, %rdx - testb $1, %dl - cmovneq %rax, %rsi - movq %rsi, (%rdi) - retq - - .globl _mcl_fp_montNF1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF1Lbmi2: ## @mcl_fp_montNF1Lbmi2 -## BB#0: - movq %rdx, %rax - movq (%rsi), %rdx - mulxq (%rax), %rsi, %r8 - movq -8(%rcx), %rdx - imulq %rsi, %rdx - movq (%rcx), %rcx - mulxq %rcx, %rdx, %rax - addq %rsi, %rdx - adcq %r8, %rax - movq %rax, %rdx - subq %rcx, %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq - - .globl _mcl_fp_montRed1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed1Lbmi2: ## @mcl_fp_montRed1Lbmi2 -## BB#0: - movq (%rsi), %rcx - movq -8(%rdx), %rax - imulq %rcx, %rax - movq (%rdx), %r8 - movq %rax, %rdx - mulxq %r8, %rax, %rdx - addq %rcx, %rax - adcq 8(%rsi), %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rcx - subq %r8, %rcx - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rcx - movq %rcx, (%rdi) - retq - - .globl _mcl_fp_addPre1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre1Lbmi2: ## @mcl_fp_addPre1Lbmi2 -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre1Lbmi2: ## @mcl_fp_subPre1Lbmi2 -## BB#0: - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_1Lbmi2: ## @mcl_fp_shr1_1Lbmi2 -## BB#0: - movq (%rsi), %rax - shrq %rax - movq %rax, (%rdi) - retq - - .globl _mcl_fp_add1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add1Lbmi2: ## @mcl_fp_add1Lbmi2 -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rax - sbbq $0, %rdx - testb $1, %dl - jne LBB14_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) -LBB14_2: ## %carry - retq - - .globl _mcl_fp_addNF1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF1Lbmi2: ## @mcl_fp_addNF1Lbmi2 -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, %rdx - subq (%rcx), %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq - - .globl _mcl_fp_sub1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub1Lbmi2: ## @mcl_fp_sub1Lbmi2 -## BB#0: - movq (%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rax - movq %rax, (%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB16_2 -## BB#1: ## %nocarry - retq -LBB16_2: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - retq - - .globl _mcl_fp_subNF1Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF1Lbmi2: ## @mcl_fp_subNF1Lbmi2 -## BB#0: - movq (%rsi), %rax - subq (%rdx), %rax - movq %rax, %rdx - sarq $63, %rdx - andq (%rcx), %rdx - addq %rax, %rdx - movq %rdx, (%rdi) - retq - - .globl _mcl_fpDbl_add1Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add1Lbmi2: ## @mcl_fpDbl_add1Lbmi2 -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq (%rcx), %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, 8(%rdi) - retq - - .globl _mcl_fpDbl_sub1Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub1Lbmi2: ## @mcl_fpDbl_sub1Lbmi2 -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movl $0, %eax - sbbq $0, %rax - testb $1, %al - cmovneq (%rcx), %rsi - addq %r8, %rsi - movq %rsi, 8(%rdi) - retq - - .globl _mcl_fp_mulUnitPre2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre2Lbmi2: ## @mcl_fp_mulUnitPre2Lbmi2 -## BB#0: - mulxq 8(%rsi), %rax, %rcx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %rax, %rsi - movq %rsi, 8(%rdi) - adcq $0, %rcx - movq %rcx, 16(%rdi) - retq - - .globl _mcl_fpDbl_mulPre2Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre2Lbmi2: ## @mcl_fpDbl_mulPre2Lbmi2 -## BB#0: - movq %rdx, %r10 - movq (%rsi), %r11 - movq 8(%rsi), %r8 - movq (%r10), %rsi - movq %r11, %rdx - mulxq %rsi, %rdx, %r9 - movq %rdx, (%rdi) - movq %r8, %rdx - mulxq %rsi, %rsi, %rax - addq %r9, %rsi - adcq $0, %rax - movq 8(%r10), %rcx - movq %r11, %rdx - mulxq %rcx, %rdx, %r9 - addq %rsi, %rdx - movq %rdx, 8(%rdi) - movq %r8, %rdx - mulxq %rcx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %rcx, %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fpDbl_sqrPre2Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre2Lbmi2: ## @mcl_fpDbl_sqrPre2Lbmi2 -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - movq %rax, %rdx - mulxq %rax, %rdx, %rsi - movq %rdx, (%rdi) - movq %rcx, %rdx - mulxq %rax, %rdx, %r8 - addq %rdx, %rsi - movq %r8, %rax - adcq $0, %rax - addq %rdx, %rsi - movq %rsi, 8(%rdi) - movq %rcx, %rdx - mulxq %rcx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r8, %rdx - movq %rdx, 16(%rdi) - adcq %rcx, %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fp_mont2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont2Lbmi2: ## @mcl_fp_mont2Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%rdx), %rax - movq 8(%rdx), %r11 - movq %r9, %rdx - mulxq %rax, %r10, %r13 - movq %r8, %rdx - mulxq %rax, %r14, %rsi - addq %r10, %rsi - adcq $0, %r13 - movq -8(%rcx), %rbp - movq (%rcx), %r10 - movq %r14, %rdx - imulq %rbp, %rdx - movq 8(%rcx), %r15 - mulxq %r15, %r12, %rcx - mulxq %r10, %rdx, %rbx - addq %r12, %rbx - adcq $0, %rcx - addq %r14, %rdx - adcq %rsi, %rbx - adcq %r13, %rcx - sbbq %rsi, %rsi - andl $1, %esi - movq %r11, %rdx - mulxq %r9, %r9, %r14 - movq %r11, %rdx - mulxq %r8, %r8, %rax - addq %r9, %rax - adcq $0, %r14 - addq %rbx, %r8 - adcq %rcx, %rax - adcq %rsi, %r14 - sbbq %rsi, %rsi - andl $1, %esi - imulq %r8, %rbp - movq %rbp, %rdx - mulxq %r15, %rcx, %rbx - mulxq %r10, %rdx, %rbp - addq %rcx, %rbp - adcq $0, %rbx - addq %r8, %rdx - adcq %rax, %rbp - adcq %r14, %rbx - adcq $0, %rsi - movq %rbp, %rax - subq %r10, %rax - movq %rbx, %rcx - sbbq %r15, %rcx - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rcx - testb %sil, %sil - cmovneq %rbp, %rax - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF2Lbmi2: ## @mcl_fp_montNF2Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%rdx), %rax - movq 8(%rdx), %r11 - movq %r9, %rdx - mulxq %rax, %r10, %rsi - movq %r8, %rdx - mulxq %rax, %r15, %r13 - addq %r10, %r13 - adcq $0, %rsi - movq -8(%rcx), %rbp - movq (%rcx), %r10 - movq %r15, %rdx - imulq %rbp, %rdx - movq 8(%rcx), %r14 - mulxq %r10, %rcx, %r12 - addq %r15, %rcx - mulxq %r14, %rbx, %rcx - adcq %r13, %rbx - adcq $0, %rsi - addq %r12, %rbx - adcq %rcx, %rsi - movq %r11, %rdx - mulxq %r9, %r9, %rcx - movq %r11, %rdx - mulxq %r8, %r8, %rax - addq %r9, %rax - adcq $0, %rcx - addq %rbx, %r8 - adcq %rsi, %rax - adcq $0, %rcx - imulq %r8, %rbp - movq %rbp, %rdx - mulxq %r14, %rbx, %rsi - mulxq %r10, %rbp, %rdx - addq %r8, %rbp - adcq %rax, %rbx - adcq $0, %rcx - addq %rdx, %rbx - adcq %rsi, %rcx - movq %rbx, %rax - subq %r10, %rax - movq %rcx, %rdx - sbbq %r14, %rdx - cmovsq %rbx, %rax - movq %rax, (%rdi) - cmovsq %rcx, %rdx - movq %rdx, 8(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed2Lbmi2: ## @mcl_fp_montRed2Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq -8(%rdx), %r15 - movq (%rdx), %r8 - movq (%rsi), %r10 - movq %r10, %rcx - imulq %r15, %rcx - movq 8(%rdx), %r9 - movq %rcx, %rdx - mulxq %r9, %r11, %r14 - mulxq %r8, %rcx, %rax - addq %r11, %rax - adcq $0, %r14 - movq 24(%rsi), %r11 - addq %r10, %rcx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r14 - adcq $0, %r11 - sbbq %rcx, %rcx - andl $1, %ecx - imulq %rax, %r15 - movq %r15, %rdx - mulxq %r9, %r10, %rbx - mulxq %r8, %rsi, %rdx - addq %r10, %rdx - adcq $0, %rbx - addq %rax, %rsi - adcq %r14, %rdx - adcq %r11, %rbx - adcq $0, %rcx - movq %rdx, %rax - subq %r8, %rax - movq %rbx, %rsi - sbbq %r9, %rsi - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rbx, %rsi - testb %cl, %cl - cmovneq %rdx, %rax - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addPre2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre2Lbmi2: ## @mcl_fp_addPre2Lbmi2 -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rcx - addq (%rsi), %rax - adcq 8(%rsi), %rcx - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre2Lbmi2: ## @mcl_fp_subPre2Lbmi2 -## BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_2Lbmi2: ## @mcl_fp_shr1_2Lbmi2 -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - shrdq $1, %rcx, %rax - movq %rax, (%rdi) - shrq %rcx - movq %rcx, 8(%rdi) - retq - - .globl _mcl_fp_add2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add2Lbmi2: ## @mcl_fp_add2Lbmi2 -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq $0, %rsi - testb $1, %sil - jne LBB29_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) -LBB29_2: ## %carry - retq - - .globl _mcl_fp_addNF2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF2Lbmi2: ## @mcl_fp_addNF2Lbmi2 -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %r8 - addq (%rsi), %rax - adcq 8(%rsi), %r8 - movq %rax, %rsi - subq (%rcx), %rsi - movq %r8, %rdx - sbbq 8(%rcx), %rdx - testq %rdx, %rdx - cmovsq %rax, %rsi - movq %rsi, (%rdi) - cmovsq %r8, %rdx - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fp_sub2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub2Lbmi2: ## @mcl_fp_sub2Lbmi2 -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movq %r8, 8(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB31_2 -## BB#1: ## %nocarry - retq -LBB31_2: ## %carry - movq 8(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r8, %rdx - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fp_subNF2Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF2Lbmi2: ## @mcl_fp_subNF2Lbmi2 -## BB#0: - movq (%rsi), %r8 - movq 8(%rsi), %rsi - subq (%rdx), %r8 - sbbq 8(%rdx), %rsi - movq %rsi, %rdx - sarq $63, %rdx - movq 8(%rcx), %rax - andq %rdx, %rax - andq (%rcx), %rdx - addq %r8, %rdx - movq %rdx, (%rdi) - adcq %rsi, %rax - movq %rax, 8(%rdi) - retq - - .globl _mcl_fpDbl_add2Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add2Lbmi2: ## @mcl_fpDbl_add2Lbmi2 -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - adcq %r8, %r9 - sbbq %rax, %rax - andl $1, %eax - movq %r10, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - sbbq $0, %rax - andl $1, %eax - cmovneq %r10, %rdx - movq %rdx, 16(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 24(%rdi) - retq - - .globl _mcl_fpDbl_sub2Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub2Lbmi2: ## @mcl_fpDbl_sub2Lbmi2 -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %r11 - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %r11, (%rdi) - movq %rsi, 8(%rdi) - sbbq %r8, %r9 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - cmovneq 8(%rcx), %rax - addq %r10, %rsi - movq %rsi, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fp_mulUnitPre3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre3Lbmi2: ## @mcl_fp_mulUnitPre3Lbmi2 -## BB#0: - mulxq 16(%rsi), %r8, %rcx - mulxq 8(%rsi), %r9, %rax - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r9, %rsi - movq %rsi, 8(%rdi) - adcq %r8, %rax - movq %rax, 16(%rdi) - adcq $0, %rcx - movq %rcx, 24(%rdi) - retq - - .globl _mcl_fpDbl_mulPre3Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre3Lbmi2: ## @mcl_fpDbl_mulPre3Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq %rdx, %r9 - movq (%rsi), %r10 - movq 8(%rsi), %r8 - movq (%r9), %rax - movq %r10, %rdx - mulxq %rax, %rdx, %r14 - movq 16(%rsi), %r11 - movq %rdx, (%rdi) - movq %r11, %rdx - mulxq %rax, %rsi, %rbx - movq %r8, %rdx - mulxq %rax, %rax, %rcx - addq %r14, %rax - adcq %rsi, %rcx - adcq $0, %rbx - movq 8(%r9), %rsi - movq %r10, %rdx - mulxq %rsi, %rdx, %r14 - addq %rax, %rdx - movq %rdx, 8(%rdi) - movq %r11, %rdx - mulxq %rsi, %rax, %r15 - movq %r8, %rdx - mulxq %rsi, %rsi, %rdx - adcq %rcx, %rsi - adcq %rbx, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r14, %rsi - adcq %rdx, %rax - adcq %r15, %rcx - movq 16(%r9), %rbx - movq %r10, %rdx - mulxq %rbx, %rdx, %r9 - addq %rsi, %rdx - movq %rdx, 16(%rdi) - movq %r11, %rdx - mulxq %rbx, %rsi, %r10 - movq %r8, %rdx - mulxq %rbx, %rbx, %rdx - adcq %rax, %rbx - adcq %rcx, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %r9, %rbx - movq %rbx, 24(%rdi) - adcq %rdx, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_sqrPre3Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre3Lbmi2: ## @mcl_fpDbl_sqrPre3Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %rcx, %rdx - mulxq %rcx, %rdx, %rax - movq %rdx, (%rdi) - movq %r10, %rdx - mulxq %rcx, %r11, %r8 - movq %rsi, %rdx - mulxq %rcx, %rdx, %r14 - addq %rdx, %rax - movq %r14, %rbx - adcq %r11, %rbx - movq %r8, %rcx - adcq $0, %rcx - addq %rdx, %rax - movq %rax, 8(%rdi) - movq %r10, %rdx - mulxq %rsi, %rax, %r9 - movq %rsi, %rdx - mulxq %rsi, %rsi, %rdx - adcq %rbx, %rsi - adcq %rax, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r14, %rsi - adcq %rdx, %rcx - adcq %r9, %rbx - addq %r11, %rsi - movq %rsi, 16(%rdi) - movq %r10, %rdx - mulxq %r10, %rsi, %rdx - adcq %rax, %rcx - adcq %rbx, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %r8, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %rdx, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_mont3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont3Lbmi2: ## @mcl_fp_mont3Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r14 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 16(%rsi), %r12 - movq (%r14), %rax - movq %r14, -16(%rsp) ## 8-byte Spill - movq %r12, %rdx - movq %r12, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r11, %rbp - movq (%rsi), %r15 - movq 8(%rsi), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - mulxq %rax, %rbx, %r8 - movq %r15, %rdx - movq %r15, -32(%rsp) ## 8-byte Spill - mulxq %rax, %r9, %rdi - addq %rbx, %rdi - adcq %r11, %r8 - adcq $0, %rbp - movq -8(%rcx), %r13 - movq %r9, %rdx - imulq %r13, %rdx - movq 8(%rcx), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulxq %rax, %r11, %r10 - movq (%rcx), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %rbx - addq %r11, %rbx - movq 16(%rcx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulxq %rax, %rcx, %rax - adcq %r10, %rcx - adcq $0, %rax - addq %r9, %rsi - adcq %rdi, %rbx - movq 8(%r14), %rdx - adcq %r8, %rcx - adcq %rbp, %rax - sbbq %r9, %r9 - andl $1, %r9d - mulxq %r12, %r11, %rdi - movq -48(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %r10, %rsi - mulxq %r15, %r8, %rbp - addq %r10, %rbp - adcq %r11, %rsi - adcq $0, %rdi - addq %rbx, %r8 - adcq %rcx, %rbp - adcq %rax, %rsi - adcq %r9, %rdi - sbbq %r11, %r11 - andl $1, %r11d - movq %r8, %rdx - imulq %r13, %rdx - movq -40(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %r9, %rcx - mulxq -56(%rsp), %r10, %rax ## 8-byte Folded Reload - mulxq -64(%rsp), %rdx, %rbx ## 8-byte Folded Reload - addq %r10, %rbx - adcq %r9, %rax - adcq $0, %rcx - addq %r8, %rdx - adcq %rbp, %rbx - adcq %rsi, %rax - adcq %rdi, %rcx - adcq $0, %r11 - movq -16(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdx - mulxq -24(%rsp), %r9, %rsi ## 8-byte Folded Reload - mulxq %r12, %r10, %r15 - mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload - addq %r10, %rdi - adcq %r9, %r15 - adcq $0, %rsi - addq %rbx, %r8 - adcq %rax, %rdi - adcq %rcx, %r15 - adcq %r11, %rsi - sbbq %rbx, %rbx - andl $1, %ebx - imulq %r8, %r13 - movq %r13, %rdx - mulxq %r14, %r9, %rbp - movq %r14, %r12 - movq -56(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %r10, %rax - movq -64(%rsp), %rcx ## 8-byte Reload - mulxq %rcx, %r11, %rdx - addq %r10, %rdx - adcq %r9, %rax - adcq $0, %rbp - addq %r8, %r11 - adcq %rdi, %rdx - adcq %r15, %rax - adcq %rsi, %rbp - adcq $0, %rbx - movq %rdx, %rsi - subq %rcx, %rsi - movq %rax, %rdi - sbbq %r14, %rdi - movq %rbp, %rcx - sbbq %r12, %rcx - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rbp, %rcx - testb %bl, %bl - cmovneq %rdx, %rsi - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rsi, (%rdx) - cmovneq %rax, %rdi - movq %rdi, 8(%rdx) - movq %rcx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF3Lbmi2: ## @mcl_fp_montNF3Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdx, %r10 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %rcx - movq 8(%rsi), %rdi - movq %rdi, -32(%rsp) ## 8-byte Spill - movq (%r10), %rax - movq %r10, -16(%rsp) ## 8-byte Spill - movq %rdi, %rdx - mulxq %rax, %rbx, %r14 - movq %rcx, %rdx - movq %rcx, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r15, %r12 - movq 16(%rsi), %r11 - addq %rbx, %r12 - movq %r11, %rdx - mulxq %rax, %rsi, %rbx - adcq %r14, %rsi - adcq $0, %rbx - movq -8(%r8), %r9 - movq (%r8), %r14 - movq %r15, %rdx - imulq %r9, %rdx - mulxq %r14, %rbp, %r13 - addq %r15, %rbp - movq 8(%r8), %r15 - mulxq %r15, %rdi, %rbp - adcq %r12, %rdi - movq 16(%r8), %r12 - mulxq %r12, %rax, %r8 - adcq %rsi, %rax - adcq $0, %rbx - addq %r13, %rdi - movq 8(%r10), %rdx - adcq %rbp, %rax - adcq %r8, %rbx - movq -32(%rsp), %r10 ## 8-byte Reload - mulxq %r10, %rsi, %r8 - mulxq %rcx, %r13, %rbp - addq %rsi, %rbp - mulxq %r11, %rcx, %rsi - adcq %r8, %rcx - adcq $0, %rsi - addq %rdi, %r13 - adcq %rax, %rbp - adcq %rbx, %rcx - adcq $0, %rsi - movq %r13, %rdx - imulq %r9, %rdx - mulxq %r14, %rdi, %rbx - addq %r13, %rdi - mulxq %r15, %rax, %rdi - adcq %rbp, %rax - mulxq %r12, %rbp, %rdx - adcq %rcx, %rbp - adcq $0, %rsi - addq %rbx, %rax - adcq %rdi, %rbp - adcq %rdx, %rsi - movq -16(%rsp), %rcx ## 8-byte Reload - movq 16(%rcx), %rdx - mulxq %r10, %rbx, %r8 - mulxq -24(%rsp), %r10, %rdi ## 8-byte Folded Reload - addq %rbx, %rdi - mulxq %r11, %rcx, %rbx - adcq %r8, %rcx - adcq $0, %rbx - addq %rax, %r10 - adcq %rbp, %rdi - adcq %rsi, %rcx - adcq $0, %rbx - imulq %r10, %r9 - movq %r9, %rdx - mulxq %r14, %rdx, %r8 - addq %r10, %rdx - movq %r9, %rdx - mulxq %r12, %rbp, %rsi - mulxq %r15, %rax, %rdx - adcq %rdi, %rax - adcq %rcx, %rbp - adcq $0, %rbx - addq %r8, %rax - adcq %rdx, %rbp - adcq %rsi, %rbx - movq %rax, %rcx - subq %r14, %rcx - movq %rbp, %rdx - sbbq %r15, %rdx - movq %rbx, %rsi - sbbq %r12, %rsi - movq %rsi, %rdi - sarq $63, %rdi - cmovsq %rax, %rcx - movq -8(%rsp), %rax ## 8-byte Reload - movq %rcx, (%rax) - cmovsq %rbp, %rdx - movq %rdx, 8(%rax) - cmovsq %rbx, %rsi - movq %rsi, 16(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed3Lbmi2: ## @mcl_fp_montRed3Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %r15 - movq (%rcx), %r9 - movq (%rsi), %rbx - movq %rbx, %rdx - imulq %r15, %rdx - movq 16(%rcx), %rax - mulxq %rax, %r14, %r11 - movq %rax, %rbp - movq %rbp, -16(%rsp) ## 8-byte Spill - movq 8(%rcx), %r10 - mulxq %r10, %rax, %r13 - mulxq %r9, %rdx, %rcx - addq %rax, %rcx - adcq %r14, %r13 - adcq $0, %r11 - movq 40(%rsi), %r14 - movq 32(%rsi), %r12 - addq %rbx, %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r13 - adcq 24(%rsi), %r11 - adcq $0, %r12 - adcq $0, %r14 - sbbq %rsi, %rsi - andl $1, %esi - movq %rcx, %rdx - imulq %r15, %rdx - mulxq %rbp, %rbp, %rdi - mulxq %r10, %r8, %rbx - mulxq %r9, %rdx, %rax - addq %r8, %rax - adcq %rbp, %rbx - adcq $0, %rdi - addq %rcx, %rdx - adcq %r13, %rax - adcq %r11, %rbx - adcq %r12, %rdi - adcq $0, %r14 - adcq $0, %rsi - imulq %rax, %r15 - movq %r15, %rdx - movq -16(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %r8, %rcx - movq %r15, %rdx - mulxq %r10, %r11, %r12 - mulxq %r9, %r15, %rdx - addq %r11, %rdx - adcq %r8, %r12 - adcq $0, %rcx - addq %rax, %r15 - adcq %rbx, %rdx - adcq %rdi, %r12 - adcq %r14, %rcx - adcq $0, %rsi - movq %rdx, %rax - subq %r9, %rax - movq %r12, %rdi - sbbq %r10, %rdi - movq %rcx, %rbp - sbbq %r13, %rbp - sbbq $0, %rsi - andl $1, %esi - cmovneq %rcx, %rbp - testb %sil, %sil - cmovneq %rdx, %rax - movq -8(%rsp), %rcx ## 8-byte Reload - movq %rax, (%rcx) - cmovneq %r12, %rdi - movq %rdi, 8(%rcx) - movq %rbp, 16(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre3Lbmi2: ## @mcl_fp_addPre3Lbmi2 -## BB#0: - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre3Lbmi2: ## @mcl_fp_subPre3Lbmi2 -## BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r8 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_3Lbmi2: ## @mcl_fp_shr1_3Lbmi2 -## BB#0: - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rdx - shrdq $1, %rdx, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rdx - movq %rdx, 8(%rdi) - shrq %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fp_add3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add3Lbmi2: ## @mcl_fp_add3Lbmi2 -## BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r8 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB44_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) -LBB44_2: ## %carry - retq - - .globl _mcl_fp_addNF3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF3Lbmi2: ## @mcl_fp_addNF3Lbmi2 -## BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %r10 - movq 8(%rdx), %r9 - addq (%rsi), %r10 - adcq 8(%rsi), %r9 - adcq 16(%rsi), %r8 - movq %r10, %rsi - subq (%rcx), %rsi - movq %r9, %rdx - sbbq 8(%rcx), %rdx - movq %r8, %rax - sbbq 16(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r10, %rsi - movq %rsi, (%rdi) - cmovsq %r9, %rdx - movq %rdx, 8(%rdi) - cmovsq %r8, %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fp_sub3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub3Lbmi2: ## @mcl_fp_sub3Lbmi2 -## BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r8 - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB46_2 -## BB#1: ## %nocarry - retq -LBB46_2: ## %carry - movq 8(%rcx), %rdx - movq 16(%rcx), %rsi - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r8, %rsi - movq %rsi, 16(%rdi) - retq - - .globl _mcl_fp_subNF3Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF3Lbmi2: ## @mcl_fp_subNF3Lbmi2 -## BB#0: - movq 16(%rsi), %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - subq (%rdx), %r8 - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r10 - movq %r10, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r10, %rsi - andq (%rcx), %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - andq 8(%rcx), %rdx - addq %r8, %rsi - movq %rsi, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fpDbl_add3Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add3Lbmi2: ## @mcl_fpDbl_add3Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r9 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %r15 - adcq %r11, %r9 - adcq %r10, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %r15, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - movq %r8, %rbx - sbbq 16(%rcx), %rbx - sbbq $0, %rax - andl $1, %eax - cmovneq %r15, %rdx - movq %rdx, 24(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 32(%rdi) - cmovneq %r8, %rbx - movq %rbx, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_sub3Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub3Lbmi2: ## @mcl_fpDbl_sub3Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rbx - sbbq 8(%rdx), %rax - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r14 - movq %rbx, (%rdi) - movq %rax, 8(%rdi) - movq %r14, 16(%rdi) - sbbq %r15, %r11 - sbbq %r12, %r9 - sbbq %r10, %r8 - movl $0, %eax - sbbq $0, %rax - andl $1, %eax - movq (%rcx), %rdx - cmoveq %rsi, %rdx - testb %al, %al - movq 16(%rcx), %rax - cmoveq %rsi, %rax - cmovneq 8(%rcx), %rsi - addq %r11, %rdx - movq %rdx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre4Lbmi2: ## @mcl_fp_mulUnitPre4Lbmi2 -## BB#0: - mulxq 24(%rsi), %r8, %r11 - mulxq 16(%rsi), %r9, %rax - mulxq 8(%rsi), %r10, %rcx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r10, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rcx - movq %rcx, 16(%rdi) - adcq %r8, %rax - movq %rax, 24(%rdi) - adcq $0, %r11 - movq %r11, 32(%rdi) - retq - - .globl _mcl_fpDbl_mulPre4Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre4Lbmi2: ## @mcl_fpDbl_mulPre4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %r14 - movq 8(%rsi), %r10 - movq (%rdx), %rcx - movq %rdx, %rbp - movq %r14, %rdx - mulxq %rcx, %rdx, %r15 - movq 24(%rsi), %r11 - movq 16(%rsi), %r9 - movq %rdx, (%rdi) - movq %r10, %rdx - mulxq %rcx, %rbx, %r12 - addq %r15, %rbx - movq %r9, %rdx - mulxq %rcx, %r13, %r15 - adcq %r12, %r13 - movq %r11, %rdx - mulxq %rcx, %rcx, %r12 - adcq %r15, %rcx - adcq $0, %r12 - movq 8(%rbp), %rax - movq %r14, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - addq %rbx, %r8 - movq %r10, %rdx - mulxq %rax, %r15, %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - adcq %r13, %r15 - movq %r9, %rdx - mulxq %rax, %rbx, %r13 - adcq %rcx, %rbx - movq %r11, %rdx - mulxq %rax, %rcx, %rax - adcq %r12, %rcx - sbbq %r12, %r12 - andl $1, %r12d - addq -8(%rsp), %r15 ## 8-byte Folded Reload - adcq -16(%rsp), %rbx ## 8-byte Folded Reload - adcq %r13, %rcx - movq %r8, 8(%rdi) - adcq %rax, %r12 - movq %rbp, %r13 - movq 16(%r13), %rax - movq %r14, %rdx - mulxq %rax, %rdx, %r8 - addq %r15, %rdx - movq %rdx, 16(%rdi) - movq %r10, %rdx - mulxq %rax, %rbp, %r10 - adcq %rbx, %rbp - movq %r11, %rdx - mulxq %rax, %r14, %r11 - movq %r9, %rdx - mulxq %rax, %r15, %rdx - adcq %rcx, %r15 - adcq %r12, %r14 - sbbq %rcx, %rcx - andl $1, %ecx - addq %r8, %rbp - adcq %r10, %r15 - adcq %rdx, %r14 - adcq %r11, %rcx - movq 24(%r13), %rdx - mulxq 24(%rsi), %rbx, %r8 - mulxq (%rsi), %rax, %r9 - addq %rbp, %rax - movq %rax, 24(%rdi) - mulxq 16(%rsi), %rbp, %rax - mulxq 8(%rsi), %rsi, %rdx - adcq %r15, %rsi - adcq %r14, %rbp - adcq %rcx, %rbx - sbbq %rcx, %rcx - andl $1, %ecx - addq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %rdx, %rbp - movq %rbp, 40(%rdi) - adcq %rax, %rbx - movq %rbx, 48(%rdi) - adcq %r8, %rcx - movq %rcx, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre4Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre4Lbmi2: ## @mcl_fpDbl_sqrPre4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rcx - movq 8(%rsi), %rax - movq %rcx, %rdx - mulxq %rcx, %rdx, %r11 - movq %rdx, (%rdi) - movq %r9, %rdx - mulxq %rcx, %rbp, %r10 - movq %rbp, -16(%rsp) ## 8-byte Spill - movq %r10, -8(%rsp) ## 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r12, %r15 - addq %r12, %r11 - movq %r15, %rbx - adcq %rbp, %rbx - movq %r8, %rdx - mulxq %rcx, %rcx, %r13 - adcq %r10, %rcx - adcq $0, %r13 - addq %r12, %r11 - movq %rax, %rdx - mulxq %rax, %rbp, %r12 - adcq %rbx, %rbp - movq %r8, %rdx - mulxq %rax, %r10, %rbx - movq %r9, %rdx - mulxq %rax, %r14, %rdx - adcq %r14, %rcx - adcq %r13, %r10 - sbbq %rax, %rax - andl $1, %eax - addq %r15, %rbp - adcq %r12, %rcx - adcq %rdx, %r10 - movq %rdx, %r12 - adcq %rbx, %rax - movq %r11, 8(%rdi) - addq -16(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 16(%rdi) - movq %r8, %rdx - mulxq %r9, %r11, %r8 - movq %r9, %rdx - mulxq %r9, %r15, %rdx - adcq %r14, %rcx - adcq %r10, %r15 - adcq %rax, %r11 - sbbq %rax, %rax - andl $1, %eax - addq -8(%rsp), %rcx ## 8-byte Folded Reload - adcq %r12, %r15 - adcq %rdx, %r11 - adcq %r8, %rax - movq 24(%rsi), %rdx - mulxq 16(%rsi), %rbx, %r8 - mulxq 8(%rsi), %rbp, %r9 - mulxq (%rsi), %rsi, %r10 - addq %rcx, %rsi - movq %rsi, 24(%rdi) - adcq %r15, %rbp - adcq %r11, %rbx - mulxq %rdx, %rdx, %rcx - adcq %rax, %rdx - sbbq %rax, %rax - andl $1, %eax - addq %r10, %rbp - movq %rbp, 32(%rdi) - adcq %r9, %rbx - movq %rbx, 40(%rdi) - adcq %r8, %rdx - movq %rdx, 48(%rdi) - adcq %rcx, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont4Lbmi2: ## @mcl_fp_mont4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r13 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 24(%rsi), %rdi - movq %rdi, -32(%rsp) ## 8-byte Spill - movq (%r13), %rax - movq %r13, -16(%rsp) ## 8-byte Spill - movq %rdi, %rdx - mulxq %rax, %rdi, %r11 - movq 16(%rsi), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - mulxq %rax, %rbx, %r10 - movq (%rsi), %rbp - movq %rbp, -48(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %r12 - movq %rbp, %rdx - mulxq %rax, %r14, %r8 - addq %rsi, %r8 - adcq %rbx, %r12 - adcq %rdi, %r10 - adcq $0, %r11 - movq -8(%rcx), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r14, %rdx - imulq %rax, %rdx - movq 24(%rcx), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq %rax, %r15, %rax - movq 16(%rcx), %rsi - movq %rsi, -80(%rsp) ## 8-byte Spill - mulxq %rsi, %r9, %rsi - movq (%rcx), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -72(%rsp) ## 8-byte Spill - mulxq %rcx, %rdi, %rcx - mulxq %rbp, %rdx, %rbx - addq %rdi, %rbx - adcq %r9, %rcx - adcq %r15, %rsi - adcq $0, %rax - addq %r14, %rdx - adcq %r8, %rbx - adcq %r12, %rcx - adcq %r10, %rsi - adcq %r11, %rax - sbbq %rdi, %rdi - andl $1, %edi - movq 8(%r13), %rdx - mulxq -32(%rsp), %r12, %r10 ## 8-byte Folded Reload - mulxq -40(%rsp), %r15, %r11 ## 8-byte Folded Reload - mulxq -56(%rsp), %r14, %rbp ## 8-byte Folded Reload - mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload - addq %r14, %r9 - adcq %r15, %rbp - adcq %r12, %r11 - adcq $0, %r10 - addq %rbx, %r8 - adcq %rcx, %r9 - adcq %rsi, %rbp - adcq %rax, %r11 - adcq %rdi, %r10 - sbbq %rbx, %rbx - andl $1, %ebx - movq %r8, %rdx - imulq -88(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %r14, %rcx ## 8-byte Folded Reload - mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload - mulxq -72(%rsp), %r12, %rax ## 8-byte Folded Reload - movq -24(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %rdx, %rdi - addq %r12, %rdi - adcq %r15, %rax - adcq %r14, %rsi - adcq $0, %rcx - addq %r8, %rdx - adcq %r9, %rdi - adcq %rbp, %rax - adcq %r11, %rsi - adcq %r10, %rcx - adcq $0, %rbx - movq -16(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdx - mulxq -32(%rsp), %r14, %r11 ## 8-byte Folded Reload - mulxq -40(%rsp), %r15, %rbp ## 8-byte Folded Reload - mulxq -56(%rsp), %r12, %r8 ## 8-byte Folded Reload - mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload - addq %r12, %r10 - adcq %r15, %r8 - adcq %r14, %rbp - adcq $0, %r11 - addq %rdi, %r9 - adcq %rax, %r10 - adcq %rsi, %r8 - adcq %rcx, %rbp - adcq %rbx, %r11 - sbbq %rax, %rax - movq %r9, %rdx - imulq -88(%rsp), %rdx ## 8-byte Folded Reload - mulxq -72(%rsp), %rcx, %rsi ## 8-byte Folded Reload - mulxq %r13, %r14, %rdi - addq %rcx, %rdi - mulxq -80(%rsp), %rcx, %r15 ## 8-byte Folded Reload - adcq %rsi, %rcx - movq -64(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %rbx, %rsi - adcq %r15, %rbx - adcq $0, %rsi - andl $1, %eax - addq %r9, %r14 - adcq %r10, %rdi - adcq %r8, %rcx - adcq %rbp, %rbx - adcq %r11, %rsi - adcq $0, %rax - movq -16(%rsp), %rdx ## 8-byte Reload - movq 24(%rdx), %rdx - mulxq -32(%rsp), %r11, %r8 ## 8-byte Folded Reload - mulxq -40(%rsp), %r15, %r9 ## 8-byte Folded Reload - mulxq -56(%rsp), %r12, %r14 ## 8-byte Folded Reload - mulxq -48(%rsp), %r10, %rbp ## 8-byte Folded Reload - addq %r12, %rbp - adcq %r15, %r14 - adcq %r11, %r9 - adcq $0, %r8 - addq %rdi, %r10 - adcq %rcx, %rbp - adcq %rbx, %r14 - adcq %rsi, %r9 - adcq %rax, %r8 - sbbq %rax, %rax - andl $1, %eax - movq -88(%rsp), %rdx ## 8-byte Reload - imulq %r10, %rdx - mulxq %r13, %rcx, %rdi - movq %rcx, -88(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload - movq -72(%rsp), %rbx ## 8-byte Reload - mulxq %rbx, %r12, %rcx - movq -24(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %rdx, %r13 - addq %r12, %r13 - adcq %r15, %rcx - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - adcq $0, %rdi - addq %r10, %rdx - adcq %rbp, %r13 - adcq %r14, %rcx - adcq %r9, %rsi - adcq %r8, %rdi - adcq $0, %rax - movq %r13, %rdx - subq %r11, %rdx - movq %rcx, %rbp - sbbq %rbx, %rbp - movq %rsi, %r8 - sbbq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %rdi, %rbx - sbbq -64(%rsp), %rbx ## 8-byte Folded Reload - sbbq $0, %rax - andl $1, %eax - cmovneq %rdi, %rbx - testb %al, %al - cmovneq %r13, %rdx - movq -8(%rsp), %rax ## 8-byte Reload - movq %rdx, (%rax) - cmovneq %rcx, %rbp - movq %rbp, 8(%rax) - cmovneq %rsi, %r8 - movq %r8, 16(%rax) - movq %rbx, 24(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF4Lbmi2: ## @mcl_fp_montNF4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %rdi - movq %rdi, -56(%rsp) ## 8-byte Spill - movq 8(%rsi), %rbp - movq %rbp, -64(%rsp) ## 8-byte Spill - movq (%rdx), %rax - movq %rdx, %r15 - movq %r15, -24(%rsp) ## 8-byte Spill - movq %rbp, %rdx - mulxq %rax, %rbp, %r9 - movq %rdi, %rdx - mulxq %rax, %r12, %rbx - movq 16(%rsi), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - addq %rbp, %rbx - mulxq %rax, %r14, %rbp - adcq %r9, %r14 - movq 24(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %rdi - adcq %rbp, %r8 - adcq $0, %rdi - movq -8(%rcx), %r13 - movq (%rcx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq %r12, %rdx - imulq %r13, %rdx - mulxq %rax, %rax, %r11 - addq %r12, %rax - movq 8(%rcx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulxq %rax, %rbp, %r10 - adcq %rbx, %rbp - movq 16(%rcx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %rbx - adcq %r14, %rsi - movq 24(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %rax, %rcx, %rdx - adcq %r8, %rcx - adcq $0, %rdi - addq %r11, %rbp - adcq %r10, %rsi - adcq %rbx, %rcx - adcq %rdx, %rdi - movq 8(%r15), %rdx - movq -64(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %rbx, %r9 - movq -56(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %r10, %r11 - addq %rbx, %r11 - mulxq -40(%rsp), %rax, %r8 ## 8-byte Folded Reload - adcq %r9, %rax - mulxq -80(%rsp), %r9, %rbx ## 8-byte Folded Reload - adcq %r8, %r9 - adcq $0, %rbx - addq %rbp, %r10 - adcq %rsi, %r11 - adcq %rcx, %rax - adcq %rdi, %r9 - adcq $0, %rbx - movq %r10, %rdx - imulq %r13, %rdx - movq -48(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %rcx, %r8 - addq %r10, %rcx - mulxq -16(%rsp), %r10, %rdi ## 8-byte Folded Reload - adcq %r11, %r10 - mulxq -32(%rsp), %rcx, %rsi ## 8-byte Folded Reload - adcq %rax, %rcx - mulxq -72(%rsp), %rax, %rdx ## 8-byte Folded Reload - adcq %r9, %rax - adcq $0, %rbx - addq %r8, %r10 - adcq %rdi, %rcx - adcq %rsi, %rax - adcq %rdx, %rbx - movq -24(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdx - mulxq %r12, %rsi, %r8 - mulxq %r15, %r11, %rbp - addq %rsi, %rbp - movq -40(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %rdi, %r9 - adcq %r8, %rdi - mulxq -80(%rsp), %r8, %rsi ## 8-byte Folded Reload - adcq %r9, %r8 - adcq $0, %rsi - addq %r10, %r11 - adcq %rcx, %rbp - adcq %rax, %rdi - adcq %rbx, %r8 - adcq $0, %rsi - movq %r11, %rdx - imulq %r13, %rdx - mulxq %r14, %rax, %r10 - addq %r11, %rax - movq -16(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %r9, %rbx - adcq %rbp, %r9 - movq -32(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %rax, %rbp - adcq %rdi, %rax - mulxq -72(%rsp), %rcx, %rdx ## 8-byte Folded Reload - adcq %r8, %rcx - adcq $0, %rsi - addq %r10, %r9 - adcq %rbx, %rax - adcq %rbp, %rcx - adcq %rdx, %rsi - movq -24(%rsp), %rdx ## 8-byte Reload - movq 24(%rdx), %rdx - mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload - mulxq -56(%rsp), %r11, %rbp ## 8-byte Folded Reload - addq %rbx, %rbp - mulxq %r12, %rdi, %r10 - adcq %r8, %rdi - mulxq -80(%rsp), %r8, %rbx ## 8-byte Folded Reload - adcq %r10, %r8 - adcq $0, %rbx - addq %r9, %r11 - adcq %rax, %rbp - adcq %rcx, %rdi - adcq %rsi, %r8 - adcq $0, %rbx - imulq %r11, %r13 - movq %r13, %rdx - movq -48(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %rcx, %r9 - addq %r11, %rcx - mulxq %r14, %r11, %r10 - adcq %rbp, %r11 - movq %r15, %rsi - mulxq %rsi, %rax, %rcx - adcq %rdi, %rax - movq -72(%rsp), %rbp ## 8-byte Reload - mulxq %rbp, %r15, %rdx - adcq %r8, %r15 - adcq $0, %rbx - addq %r9, %r11 - adcq %r10, %rax - adcq %rcx, %r15 - adcq %rdx, %rbx - movq %r11, %rcx - subq %r12, %rcx - movq %rax, %rdx - sbbq %r14, %rdx - movq %r15, %rdi - sbbq %rsi, %rdi - movq %rbx, %rsi - sbbq %rbp, %rsi - cmovsq %r11, %rcx - movq -8(%rsp), %rbp ## 8-byte Reload - movq %rcx, (%rbp) - cmovsq %rax, %rdx - movq %rdx, 8(%rbp) - cmovsq %r15, %rdi - movq %rdi, 16(%rbp) - cmovsq %rbx, %rsi - movq %rsi, 24(%rbp) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed4Lbmi2: ## @mcl_fp_montRed4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %r13 - movq (%rcx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq (%rsi), %r10 - movq %r10, %rdx - imulq %r13, %rdx - movq 24(%rcx), %rdi - mulxq %rdi, %r9, %r15 - movq %rdi, %r14 - movq %r14, -40(%rsp) ## 8-byte Spill - movq 16(%rcx), %rdi - movq %rdi, -48(%rsp) ## 8-byte Spill - mulxq %rdi, %rdi, %rbx - movq 8(%rcx), %rcx - movq %rcx, -56(%rsp) ## 8-byte Spill - mulxq %rcx, %rcx, %r8 - mulxq %rax, %rdx, %rbp - addq %rcx, %rbp - adcq %rdi, %r8 - adcq %r9, %rbx - adcq $0, %r15 - movq 56(%rsi), %r11 - movq 48(%rsi), %rcx - addq %r10, %rdx - movq 40(%rsi), %r12 - adcq 8(%rsi), %rbp - adcq 16(%rsi), %r8 - adcq 24(%rsi), %rbx - adcq 32(%rsi), %r15 - adcq $0, %r12 - adcq $0, %rcx - movq %rcx, -64(%rsp) ## 8-byte Spill - adcq $0, %r11 - sbbq %rsi, %rsi - andl $1, %esi - movq %rbp, %rdx - imulq %r13, %rdx - mulxq %r14, %rax, %r9 - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq -48(%rsp), %r14, %rdi ## 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rcx ## 8-byte Folded Reload - mulxq -32(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %r10, %rax - adcq %r14, %rcx - adcq -72(%rsp), %rdi ## 8-byte Folded Reload - adcq $0, %r9 - addq %rbp, %rdx - adcq %r8, %rax - adcq %rbx, %rcx - adcq %r15, %rdi - adcq %r12, %r9 - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, %r11 - movq %r11, -72(%rsp) ## 8-byte Spill - adcq $0, %rsi - movq %rax, %rdx - imulq %r13, %rdx - movq -40(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %rbp, %r8 - movq %rbp, -16(%rsp) ## 8-byte Spill - movq -48(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %rbx, %r10 - movq %rbx, -24(%rsp) ## 8-byte Spill - mulxq -56(%rsp), %r12, %rbp ## 8-byte Folded Reload - movq -32(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %rdx, %rbx - addq %r12, %rbx - adcq -24(%rsp), %rbp ## 8-byte Folded Reload - adcq -16(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r8 - addq %rax, %rdx - adcq %rcx, %rbx - adcq %rdi, %rbp - adcq %r9, %r10 - adcq -64(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, -72(%rsp) ## 8-byte Folded Spill - adcq $0, %rsi - imulq %rbx, %r13 - movq %r13, %rdx - mulxq %r15, %rax, %rdi - movq %rax, -64(%rsp) ## 8-byte Spill - movq %r13, %rdx - mulxq %r11, %r9, %rax - movq -56(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %r12, %rcx - mulxq %r14, %r15, %r13 - addq %r12, %r13 - adcq %r9, %rcx - adcq -64(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %rdi - addq %rbx, %r15 - adcq %rbp, %r13 - adcq %r10, %rcx - adcq %r8, %rax - adcq -72(%rsp), %rdi ## 8-byte Folded Reload - adcq $0, %rsi - movq %r13, %rdx - subq %r14, %rdx - movq %rcx, %rbp - sbbq %r11, %rbp - movq %rax, %r8 - sbbq -48(%rsp), %r8 ## 8-byte Folded Reload - movq %rdi, %rbx - sbbq -40(%rsp), %rbx ## 8-byte Folded Reload - sbbq $0, %rsi - andl $1, %esi - cmovneq %rdi, %rbx - testb %sil, %sil - cmovneq %r13, %rdx - movq -8(%rsp), %rsi ## 8-byte Reload - movq %rdx, (%rsi) - cmovneq %rcx, %rbp - movq %rbp, 8(%rsi) - cmovneq %rax, %r8 - movq %r8, 16(%rsi) - movq %rbx, 24(%rsi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre4Lbmi2: ## @mcl_fp_addPre4Lbmi2 -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre4Lbmi2: ## @mcl_fp_subPre4Lbmi2 -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r8, %r9 - movq %r9, 24(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_4Lbmi2: ## @mcl_fp_shr1_4Lbmi2 -## BB#0: - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrq %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fp_add4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add4Lbmi2: ## @mcl_fp_add4Lbmi2 -## BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r9 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - adcq %r10, %r8 - movq %r8, 24(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r9 - sbbq 24(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB59_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - movq %r8, 24(%rdi) -LBB59_2: ## %carry - retq - - .globl _mcl_fp_addNF4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF4Lbmi2: ## @mcl_fp_addNF4Lbmi2 -## BB#0: - pushq %rbx - movq 24(%rdx), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %r11 - movq 8(%rdx), %r10 - addq (%rsi), %r11 - adcq 8(%rsi), %r10 - adcq 16(%rsi), %r9 - adcq 24(%rsi), %r8 - movq %r11, %rsi - subq (%rcx), %rsi - movq %r10, %rdx - sbbq 8(%rcx), %rdx - movq %r9, %rax - sbbq 16(%rcx), %rax - movq %r8, %rbx - sbbq 24(%rcx), %rbx - testq %rbx, %rbx - cmovsq %r11, %rsi - movq %rsi, (%rdi) - cmovsq %r10, %rdx - movq %rdx, 8(%rdi) - cmovsq %r9, %rax - movq %rax, 16(%rdi) - cmovsq %r8, %rbx - movq %rbx, 24(%rdi) - popq %rbx - retq - - .globl _mcl_fp_sub4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub4Lbmi2: ## @mcl_fp_sub4Lbmi2 -## BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r9 - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r9, 16(%rdi) - sbbq %r10, %r8 - movq %r8, 24(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB61_2 -## BB#1: ## %nocarry - retq -LBB61_2: ## %carry - movq 24(%rcx), %r10 - movq 8(%rcx), %rsi - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r10 - movq %r10, 24(%rdi) - retq - - .globl _mcl_fp_subNF4Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF4Lbmi2: ## @mcl_fp_subNF4Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r8 - movdqu (%rsi), %xmm2 - movdqu 16(%rsi), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r15 - movd %xmm1, %r9 - movd %xmm3, %r11 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %r10 - pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] - movd %xmm1, %r14 - movd %xmm0, %rdx - movd %xmm2, %r12 - subq %rdx, %r12 - sbbq %r10, %r14 - sbbq %r9, %r11 - sbbq %r8, %r15 - movq %r15, %rdx - sarq $63, %rdx - movq 24(%rcx), %rsi - andq %rdx, %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - movq 8(%rcx), %rbx - andq %rdx, %rbx - andq (%rcx), %rdx - addq %r12, %rdx - movq %rdx, (%rdi) - adcq %r14, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rax - movq %rax, 16(%rdi) - adcq %r15, %rsi - movq %rsi, 24(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_add4Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add4Lbmi2: ## @mcl_fpDbl_add4Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 48(%rsi), %r12 - movq 40(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rdx), %r15 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rbp - movq 32(%rsi), %rsi - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r15, %rbp - movq %rbp, 24(%rdi) - adcq %r14, %rsi - adcq %r11, %r13 - adcq %r10, %r12 - adcq %r9, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %rsi, %rdx - subq (%rcx), %rdx - movq %r13, %rbp - sbbq 8(%rcx), %rbp - movq %r12, %rbx - sbbq 16(%rcx), %rbx - movq %r8, %r9 - sbbq 24(%rcx), %r9 - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - movq %rdx, 32(%rdi) - testb %al, %al - cmovneq %r13, %rbp - movq %rbp, 40(%rdi) - cmovneq %r12, %rbx - movq %rbx, 48(%rdi) - cmovneq %r8, %r9 - movq %r9, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub4Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub4Lbmi2: ## @mcl_fpDbl_sub4Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 24(%rdx), %r11 - movq (%rsi), %rbx - xorl %eax, %eax - subq (%rdx), %rbx - movq %rbx, (%rdi) - movq 8(%rsi), %rbx - sbbq 8(%rdx), %rbx - movq %rbx, 8(%rdi) - movq 16(%rsi), %rbx - sbbq 16(%rdx), %rbx - movq %rbx, 16(%rdi) - movq 24(%rsi), %rbx - sbbq %r11, %rbx - movq 40(%rdx), %r11 - movq 32(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 32(%rsi), %r12 - sbbq %rdx, %r12 - movq 48(%rsi), %r14 - movq 40(%rsi), %r15 - sbbq %r11, %r15 - sbbq %r10, %r14 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 24(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 8(%rcx), %rax - addq %r12, %rsi - movq %rsi, 32(%rdi) - adcq %r15, %rax - movq %rax, 40(%rdi) - adcq %r14, %rdx - movq %rdx, 48(%rdi) - adcq %r8, %rbx - movq %rbx, 56(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre5Lbmi2: ## @mcl_fp_mulUnitPre5Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - mulxq 32(%rsi), %r8, %r11 - mulxq 24(%rsi), %r9, %rax - mulxq 16(%rsi), %r10, %rcx - mulxq 8(%rsi), %r14, %rbx - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r14, %rsi - movq %rsi, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %rcx - movq %rcx, 24(%rdi) - adcq %r8, %rax - movq %rax, 32(%rdi) - adcq $0, %r11 - movq %r11, 40(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mulPre5Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre5Lbmi2: ## @mcl_fpDbl_mulPre5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rdi, -40(%rsp) ## 8-byte Spill - movq (%rsi), %r11 - movq 8(%rsi), %r10 - movq (%rdx), %rcx - movq %r10, %rdx - mulxq %rcx, %rax, %r14 - movq %r11, %rdx - mulxq %rcx, %rdx, %rbx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 24(%rsi), %rbp - movq %rbp, -48(%rsp) ## 8-byte Spill - movq 16(%rsi), %r15 - addq %rax, %rbx - movq %r15, %rdx - mulxq %rcx, %rax, %r13 - adcq %r14, %rax - movq %rbp, %rdx - mulxq %rcx, %r8, %r12 - adcq %r13, %r8 - movq 32(%rsi), %r14 - movq %r14, %rdx - mulxq %rcx, %r9, %r13 - adcq %r12, %r9 - movq -56(%rsp), %rcx ## 8-byte Reload - movq %rcx, (%rdi) - adcq $0, %r13 - movq -24(%rsp), %rdi ## 8-byte Reload - movq 8(%rdi), %rbp - movq %r11, %rdx - mulxq %rbp, %r12, %r11 - addq %rbx, %r12 - movq %r10, %rdx - mulxq %rbp, %rbx, %rcx - movq %rcx, -56(%rsp) ## 8-byte Spill - adcq %rax, %rbx - movq %r15, %rdx - mulxq %rbp, %rcx, %r10 - adcq %r8, %rcx - movq -48(%rsp), %rdx ## 8-byte Reload - mulxq %rbp, %rax, %r8 - adcq %r9, %rax - movq %r14, %rdx - mulxq %rbp, %r15, %rdx - adcq %r13, %r15 - sbbq %r14, %r14 - andl $1, %r14d - addq %r11, %rbx - movq -40(%rsp), %rbp ## 8-byte Reload - movq %r12, 8(%rbp) - adcq -56(%rsp), %rcx ## 8-byte Folded Reload - adcq %r10, %rax - adcq %r8, %r15 - adcq %rdx, %r14 - movq (%rsi), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -48(%rsp) ## 8-byte Spill - movq 16(%rdi), %rbp - mulxq %rbp, %r12, %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - addq %rbx, %r12 - movq %r8, %rdx - mulxq %rbp, %rbx, %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - adcq %rcx, %rbx - movq 16(%rsi), %r11 - movq %r11, %rdx - mulxq %rbp, %rcx, %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - adcq %rax, %rcx - movq 24(%rsi), %r13 - movq %r13, %rdx - mulxq %rbp, %r9, %r10 - adcq %r15, %r9 - movq 32(%rsi), %r15 - movq %r15, %rdx - mulxq %rbp, %r8, %rdx - adcq %r14, %r8 - sbbq %r14, %r14 - andl $1, %r14d - addq -8(%rsp), %rbx ## 8-byte Folded Reload - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - adcq -32(%rsp), %r9 ## 8-byte Folded Reload - adcq %r10, %r8 - adcq %rdx, %r14 - movq -40(%rsp), %r10 ## 8-byte Reload - movq %r12, 16(%r10) - movq %rdi, %rbp - movq 24(%rbp), %rax - movq -56(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r12, %rdi - addq %rbx, %r12 - movq -48(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rbx, %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - adcq %rcx, %rbx - movq %r11, %rdx - mulxq %rax, %rcx, %r11 - adcq %r9, %rcx - movq %r13, %rdx - mulxq %rax, %r13, %r9 - adcq %r8, %r13 - movq %r15, %rdx - mulxq %rax, %r8, %rdx - adcq %r14, %r8 - sbbq %r14, %r14 - andl $1, %r14d - addq %rdi, %rbx - movq %r12, 24(%r10) - movq %r10, %rdi - adcq -48(%rsp), %rcx ## 8-byte Folded Reload - adcq %r11, %r13 - adcq %r9, %r8 - adcq %rdx, %r14 - movq 32(%rbp), %rdx - mulxq 8(%rsi), %rax, %r9 - mulxq (%rsi), %rbp, %r10 - addq %rbx, %rbp - adcq %rcx, %rax - mulxq 16(%rsi), %rbx, %r11 - adcq %r13, %rbx - movq %rbp, 32(%rdi) - mulxq 32(%rsi), %rcx, %r15 - mulxq 24(%rsi), %rsi, %rdx - adcq %r8, %rsi - adcq %r14, %rcx - sbbq %rbp, %rbp - andl $1, %ebp - addq %r10, %rax - movq %rax, 40(%rdi) - adcq %r9, %rbx - movq %rbx, 48(%rdi) - adcq %r11, %rsi - movq %rsi, 56(%rdi) - adcq %rdx, %rcx - movq %rcx, 64(%rdi) - adcq %r15, %rbp - movq %rbp, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre5Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre5Lbmi2: ## @mcl_fpDbl_sqrPre5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rcx - movq %r11, %rdx - mulxq %rax, %rbx, %r15 - movq 32(%rsi), %r9 - movq 24(%rsi), %r13 - movq %rcx, %rdx - mulxq %rax, %r12, %rbp - movq %rbp, -16(%rsp) ## 8-byte Spill - movq %rax, %rdx - mulxq %rax, %rdx, %r14 - movq %rdx, -24(%rsp) ## 8-byte Spill - addq %r12, %r14 - adcq %rbp, %rbx - movq %r13, %rdx - mulxq %rax, %r8, %r10 - adcq %r15, %r8 - movq %r9, %rdx - movq %r9, -8(%rsp) ## 8-byte Spill - mulxq %rax, %rbp, %r15 - adcq %r10, %rbp - movq -24(%rsp), %rax ## 8-byte Reload - movq %rax, (%rdi) - adcq $0, %r15 - addq %r12, %r14 - movq %rcx, %rdx - mulxq %rcx, %rax, %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - adcq %rbx, %rax - movq %r11, %rdx - mulxq %rcx, %rbx, %r10 - adcq %r8, %rbx - movq %r13, %rdx - mulxq %rcx, %r13, %r8 - adcq %rbp, %r13 - movq %r9, %rdx - mulxq %rcx, %r12, %rcx - adcq %r15, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -16(%rsp), %rax ## 8-byte Folded Reload - movq %r14, 8(%rdi) - adcq -24(%rsp), %rbx ## 8-byte Folded Reload - adcq %r10, %r13 - adcq %r8, %r12 - adcq %rcx, %r15 - movq (%rsi), %r9 - movq 8(%rsi), %r10 - movq %r9, %rdx - mulxq %r11, %rbp, %rcx - movq %rcx, -16(%rsp) ## 8-byte Spill - addq %rax, %rbp - movq %r10, %rdx - mulxq %r11, %rax, %r8 - adcq %rbx, %rax - movq %r11, %rdx - mulxq %r11, %r14, %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - adcq %r13, %r14 - movq 24(%rsi), %rcx - movq %rcx, %rdx - mulxq %r11, %rbx, %r13 - adcq %r12, %rbx - movq -8(%rsp), %rdx ## 8-byte Reload - mulxq %r11, %r12, %rdx - adcq %r15, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -16(%rsp), %rax ## 8-byte Folded Reload - adcq %r8, %r14 - movq %rbp, 16(%rdi) - adcq -24(%rsp), %rbx ## 8-byte Folded Reload - adcq %r13, %r12 - adcq %rdx, %r15 - movq %r10, %rdx - mulxq %rcx, %r10, %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %r9, %rdx - mulxq %rcx, %r13, %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - addq %rax, %r13 - movq 16(%rsi), %r8 - movq 32(%rsi), %rax - adcq %r14, %r10 - movq %r8, %rdx - mulxq %rcx, %r9, %r14 - adcq %rbx, %r9 - movq %rcx, %rdx - mulxq %rcx, %r11, %rbp - adcq %r12, %r11 - movq %rax, %rdx - mulxq %rcx, %r12, %rdx - adcq %r15, %r12 - sbbq %rbx, %rbx - andl $1, %ebx - addq -16(%rsp), %r10 ## 8-byte Folded Reload - movq %r13, 24(%rdi) - adcq -8(%rsp), %r9 ## 8-byte Folded Reload - adcq %r14, %r11 - adcq %rbp, %r12 - adcq %rdx, %rbx - movq %rax, %rdx - mulxq 24(%rsi), %rbp, %r14 - mulxq (%rsi), %rdx, %r15 - addq %r10, %rdx - movq %rdx, 32(%rdi) - movq %rax, %rdx - mulxq 8(%rsi), %rsi, %r10 - adcq %r9, %rsi - movq %r8, %rdx - mulxq %rax, %rcx, %r8 - adcq %r11, %rcx - adcq %r12, %rbp - movq %rax, %rdx - mulxq %rax, %rdx, %rax - adcq %rbx, %rdx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r15, %rsi - movq %rsi, 40(%rdi) - adcq %r10, %rcx - movq %rcx, 48(%rdi) - adcq %r8, %rbp - movq %rbp, 56(%rdi) - adcq %r14, %rdx - movq %rdx, 64(%rdi) - adcq %rax, %rbx - movq %rbx, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont5Lbmi2: ## @mcl_fp_mont5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 32(%rsi), %rdi - movq %rdi, -104(%rsp) ## 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %r10, %rbx - movq 24(%rsi), %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r12, %r14 - movq 16(%rsi), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - mulxq %rax, %r13, %r11 - movq (%rsi), %rbp - movq %rbp, -40(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - mulxq %rax, %rdi, %r9 - movq %rbp, %rdx - mulxq %rax, %r15, %r8 - addq %rdi, %r8 - adcq %r13, %r9 - adcq %r12, %r11 - adcq %r10, %r14 - adcq $0, %rbx - movq %rbx, -112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq %r15, %rdx - imulq %rax, %rdx - movq 32(%rcx), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %r12 - movq %rax, -120(%rsp) ## 8-byte Spill - movq 24(%rcx), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq %rax, %r13, %r10 - movq 8(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %rax, %rdi, %rbp - movq (%rcx), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %rbx - addq %rdi, %rbx - movq 16(%rcx), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - mulxq %rax, %rdi, %rcx - adcq %rbp, %rdi - adcq %r13, %rcx - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r12 - addq %r15, %rsi - adcq %r8, %rbx - adcq %r9, %rdi - adcq %r11, %rcx - adcq %r14, %r10 - adcq -112(%rsp), %r12 ## 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq -96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - mulxq -104(%rsp), %rax, %r14 ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rax, %r15 ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload - mulxq -48(%rsp), %r8, %rsi ## 8-byte Folded Reload - mulxq -40(%rsp), %r11, %rax ## 8-byte Folded Reload - addq %r8, %rax - adcq %r13, %rsi - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq -112(%rsp), %r15 ## 8-byte Folded Reload - adcq $0, %r14 - addq %rbx, %r11 - adcq %rdi, %rax - adcq %rcx, %rsi - adcq %r10, %r9 - adcq %r12, %r15 - adcq %rbp, %r14 - sbbq %r12, %r12 - andl $1, %r12d - movq %r11, %rdx - imulq -16(%rsp), %rdx ## 8-byte Folded Reload - mulxq -56(%rsp), %rcx, %r10 ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq -88(%rsp), %r13, %rcx ## 8-byte Folded Reload - mulxq -72(%rsp), %r8, %rbx ## 8-byte Folded Reload - mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload - addq %r8, %rbp - adcq %r13, %rbx - adcq -120(%rsp), %rcx ## 8-byte Folded Reload - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - adcq $0, %r10 - addq %r11, %rdx - adcq %rax, %rbp - adcq %rsi, %rbx - adcq %r9, %rcx - adcq %r15, %rdi - adcq %r14, %r10 - adcq $0, %r12 - movq -96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - mulxq -104(%rsp), %rax, %r15 ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rax, %r11 ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload - mulxq -48(%rsp), %rsi, %r8 ## 8-byte Folded Reload - mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload - addq %rsi, %rax - adcq %r13, %r8 - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r15 - addq %rbp, %r14 - adcq %rbx, %rax - adcq %rcx, %r8 - adcq %rdi, %r9 - adcq %r10, %r11 - adcq %r12, %r15 - sbbq %r13, %r13 - andl $1, %r13d - movq %r14, %rdx - imulq -16(%rsp), %rdx ## 8-byte Folded Reload - mulxq -56(%rsp), %rcx, %r12 ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %rcx, %r10 ## 8-byte Folded Reload - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq -88(%rsp), %rdi, %rsi ## 8-byte Folded Reload - mulxq -72(%rsp), %rcx, %rbx ## 8-byte Folded Reload - mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload - addq %rcx, %rbp - adcq %rdi, %rbx - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq -112(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r12 - addq %r14, %rdx - adcq %rax, %rbp - adcq %r8, %rbx - adcq %r9, %rsi - adcq %r11, %r10 - adcq %r15, %r12 - adcq $0, %r13 - movq -96(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, -120(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %r11, %r14 ## 8-byte Folded Reload - mulxq -32(%rsp), %r8, %r9 ## 8-byte Folded Reload - mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload - mulxq -40(%rsp), %r15, %rcx ## 8-byte Folded Reload - addq %rax, %rcx - adcq %r8, %rdi - adcq %r11, %r9 - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbp, %r15 - adcq %rbx, %rcx - adcq %rsi, %rdi - adcq %r10, %r9 - adcq %r12, %r14 - adcq %r13, %rax - movq %rax, -112(%rsp) ## 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq %r15, %rdx - imulq -16(%rsp), %rdx ## 8-byte Folded Reload - mulxq -56(%rsp), %rax, %rbp ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %r13, %r10 ## 8-byte Folded Reload - mulxq -88(%rsp), %rbx, %r8 ## 8-byte Folded Reload - mulxq -72(%rsp), %rsi, %r11 ## 8-byte Folded Reload - mulxq -80(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %rsi, %rax - adcq %rbx, %r11 - adcq %r13, %r8 - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %rbp - addq %r15, %rdx - adcq %rcx, %rax - adcq %rdi, %r11 - adcq %r9, %r8 - adcq %r14, %r10 - adcq -112(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, %r12 - movq -96(%rsp), %rcx ## 8-byte Reload - movq 32(%rcx), %rdx - mulxq -104(%rsp), %rcx, %r14 ## 8-byte Folded Reload - movq %rcx, -96(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rcx, %rbx ## 8-byte Folded Reload - movq %rcx, -104(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %rsi, %r15 ## 8-byte Folded Reload - mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload - mulxq -40(%rsp), %r13, %rdi ## 8-byte Folded Reload - addq %rcx, %rdi - adcq %rsi, %r9 - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq -96(%rsp), %rbx ## 8-byte Folded Reload - adcq $0, %r14 - addq %rax, %r13 - adcq %r11, %rdi - adcq %r8, %r9 - adcq %r10, %r15 - adcq %rbp, %rbx - adcq %r12, %r14 - sbbq %rax, %rax - movq -16(%rsp), %rdx ## 8-byte Reload - imulq %r13, %rdx - mulxq -80(%rsp), %r10, %rcx ## 8-byte Folded Reload - mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload - addq %rcx, %r8 - mulxq -88(%rsp), %rbp, %r11 ## 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -64(%rsp), %rcx, %r12 ## 8-byte Folded Reload - adcq %r11, %rcx - mulxq -56(%rsp), %rsi, %r11 ## 8-byte Folded Reload - adcq %r12, %rsi - adcq $0, %r11 - andl $1, %eax - addq %r13, %r10 - adcq %rdi, %r8 - adcq %r9, %rbp - adcq %r15, %rcx - adcq %rbx, %rsi - adcq %r14, %r11 - adcq $0, %rax - movq %r8, %rdi - subq -80(%rsp), %rdi ## 8-byte Folded Reload - movq %rbp, %rbx - sbbq -72(%rsp), %rbx ## 8-byte Folded Reload - movq %rcx, %r9 - sbbq -88(%rsp), %r9 ## 8-byte Folded Reload - movq %rsi, %rdx - sbbq -64(%rsp), %rdx ## 8-byte Folded Reload - movq %r11, %r10 - sbbq -56(%rsp), %r10 ## 8-byte Folded Reload - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - testb %al, %al - cmovneq %r8, %rdi - movq -8(%rsp), %rax ## 8-byte Reload - movq %rdi, (%rax) - cmovneq %rbp, %rbx - movq %rbx, 8(%rax) - cmovneq %rcx, %r9 - movq %r9, 16(%rax) - movq %rdx, 24(%rax) - cmovneq %r11, %r10 - movq %r10, 32(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF5Lbmi2: ## @mcl_fp_montNF5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %r13 - movq 8(%rsi), %rbp - movq %rbp, -104(%rsp) ## 8-byte Spill - movq (%rdx), %rax - movq %rbp, %rdx - mulxq %rax, %rbp, %r9 - movq %r13, %rdx - movq %r13, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %r10 - movq 16(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - addq %rbp, %r10 - mulxq %rax, %rbp, %rbx - adcq %r9, %rbp - movq 24(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rax, %r15, %r9 - adcq %rbx, %r15 - movq 32(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %r11 - adcq %r9, %rax - adcq $0, %r11 - movq -8(%rcx), %rsi - movq %rsi, -32(%rsp) ## 8-byte Spill - movq %r8, %rdx - imulq %rsi, %rdx - movq (%rcx), %rsi - movq %rsi, -48(%rsp) ## 8-byte Spill - mulxq %rsi, %rbx, %r14 - addq %r8, %rbx - movq 8(%rcx), %rsi - movq %rsi, -40(%rsp) ## 8-byte Spill - mulxq %rsi, %rbx, %r12 - adcq %r10, %rbx - movq 16(%rcx), %rsi - movq %rsi, -16(%rsp) ## 8-byte Spill - mulxq %rsi, %r10, %rdi - adcq %rbp, %r10 - movq 24(%rcx), %rsi - movq %rsi, -88(%rsp) ## 8-byte Spill - mulxq %rsi, %r9, %rbp - adcq %r15, %r9 - movq 32(%rcx), %rcx - movq %rcx, -56(%rsp) ## 8-byte Spill - mulxq %rcx, %r8, %rcx - adcq %rax, %r8 - adcq $0, %r11 - addq %r14, %rbx - adcq %r12, %r10 - adcq %rdi, %r9 - adcq %rbp, %r8 - adcq %rcx, %r11 - movq -96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - mulxq -104(%rsp), %rcx, %rsi ## 8-byte Folded Reload - mulxq %r13, %r14, %rax - addq %rcx, %rax - mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload - adcq %r15, %rdi - adcq $0, %rbp - addq %rbx, %r14 - adcq %r10, %rax - adcq %r9, %rcx - adcq %r8, %rsi - adcq %r11, %rdi - adcq $0, %rbp - movq %r14, %rdx - movq -32(%rsp), %r12 ## 8-byte Reload - imulq %r12, %rdx - mulxq -48(%rsp), %rbx, %r15 ## 8-byte Folded Reload - addq %r14, %rbx - movq -40(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %r8, %rbx - adcq %rax, %r8 - mulxq -16(%rsp), %r9, %rax ## 8-byte Folded Reload - adcq %rcx, %r9 - mulxq -88(%rsp), %r10, %rcx ## 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload - adcq %rdi, %r11 - adcq $0, %rbp - addq %r15, %r8 - adcq %rbx, %r9 - adcq %rax, %r10 - adcq %rcx, %r11 - adcq %rdx, %rbp - movq -96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload - addq %rcx, %rsi - mulxq -64(%rsp), %rbx, %rcx ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -72(%rsp), %rdi, %r15 ## 8-byte Folded Reload - adcq %rcx, %rdi - mulxq -80(%rsp), %rcx, %rax ## 8-byte Folded Reload - adcq %r15, %rcx - adcq $0, %rax - addq %r8, %r14 - adcq %r9, %rsi - adcq %r10, %rbx - adcq %r11, %rdi - adcq %rbp, %rcx - adcq $0, %rax - movq %r14, %rdx - imulq %r12, %rdx - movq -48(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %rbp, %r15 - addq %r14, %rbp - mulxq %r13, %r8, %rbp - adcq %rsi, %r8 - movq -16(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %r9, %rsi - adcq %rbx, %r9 - mulxq -88(%rsp), %r10, %rbx ## 8-byte Folded Reload - adcq %rdi, %r10 - mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload - adcq %rcx, %r11 - adcq $0, %rax - addq %r15, %r8 - adcq %rbp, %r9 - adcq %rsi, %r10 - adcq %rbx, %r11 - adcq %rdx, %rax - movq -96(%rsp), %rcx ## 8-byte Reload - movq 24(%rcx), %rdx - mulxq -104(%rsp), %rdi, %rsi ## 8-byte Folded Reload - mulxq -24(%rsp), %r14, %rcx ## 8-byte Folded Reload - addq %rdi, %rcx - mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload - adcq %rsi, %rbx - mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload - adcq %r15, %rdi - adcq $0, %rbp - addq %r8, %r14 - adcq %r9, %rcx - adcq %r10, %rbx - adcq %r11, %rsi - adcq %rax, %rdi - adcq $0, %rbp - movq %r14, %rdx - imulq -32(%rsp), %rdx ## 8-byte Folded Reload - mulxq %r12, %rax, %r11 - addq %r14, %rax - mulxq -40(%rsp), %r8, %r14 ## 8-byte Folded Reload - adcq %rcx, %r8 - mulxq %r13, %r9, %rax - adcq %rbx, %r9 - movq -88(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %r10, %rbx - adcq %rsi, %r10 - mulxq -56(%rsp), %rcx, %rdx ## 8-byte Folded Reload - adcq %rdi, %rcx - adcq $0, %rbp - addq %r11, %r8 - adcq %r14, %r9 - adcq %rax, %r10 - adcq %rbx, %rcx - adcq %rdx, %rbp - movq -96(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - mulxq -104(%rsp), %rdi, %rbx ## 8-byte Folded Reload - mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload - addq %rdi, %rsi - mulxq -64(%rsp), %rdi, %rax ## 8-byte Folded Reload - adcq %rbx, %rdi - mulxq -72(%rsp), %rbx, %r15 ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -80(%rsp), %r11, %rax ## 8-byte Folded Reload - adcq %r15, %r11 - adcq $0, %rax - addq %r8, %r14 - adcq %r9, %rsi - adcq %r10, %rdi - adcq %rcx, %rbx - adcq %rbp, %r11 - adcq $0, %rax - movq -32(%rsp), %rdx ## 8-byte Reload - imulq %r14, %rdx - movq -48(%rsp), %r10 ## 8-byte Reload - mulxq %r10, %rcx, %rbp - movq %rbp, -96(%rsp) ## 8-byte Spill - addq %r14, %rcx - movq -40(%rsp), %r9 ## 8-byte Reload - mulxq %r9, %r14, %rcx - movq %rcx, -104(%rsp) ## 8-byte Spill - adcq %rsi, %r14 - movq %r13, %r8 - mulxq %r8, %r15, %r13 - adcq %rdi, %r15 - mulxq %r12, %rbp, %rcx - adcq %rbx, %rbp - movq -56(%rsp), %rbx ## 8-byte Reload - mulxq %rbx, %r12, %rdx - adcq %r11, %r12 - adcq $0, %rax - addq -96(%rsp), %r14 ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq %r13, %rbp - adcq %rcx, %r12 - adcq %rdx, %rax - movq %r14, %rcx - subq %r10, %rcx - movq %r15, %rsi - sbbq %r9, %rsi - movq %rbp, %rdi - sbbq %r8, %rdi - movq %r12, %r8 - sbbq -88(%rsp), %r8 ## 8-byte Folded Reload - movq %rax, %rdx - sbbq %rbx, %rdx - movq %rdx, %rbx - sarq $63, %rbx - cmovsq %r14, %rcx - movq -8(%rsp), %rbx ## 8-byte Reload - movq %rcx, (%rbx) - cmovsq %r15, %rsi - movq %rsi, 8(%rbx) - cmovsq %rbp, %rdi - movq %rdi, 16(%rbx) - cmovsq %r12, %r8 - movq %r8, 24(%rbx) - cmovsq %rax, %rdx - movq %rdx, 32(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed5Lbmi2: ## @mcl_fp_montRed5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq (%rsi), %r15 - movq %r15, %rdx - imulq %rax, %rdx - movq 32(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %r14 - movq 24(%rcx), %r12 - mulxq %r12, %r10, %r13 - movq %r12, -56(%rsp) ## 8-byte Spill - movq 16(%rcx), %r9 - mulxq %r9, %rdi, %rbp - movq %r9, -64(%rsp) ## 8-byte Spill - movq (%rcx), %rbx - movq %rbx, -40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %r11 - mulxq %rbx, %rdx, %rcx - addq %rax, %rcx - adcq %rdi, %r11 - adcq %r10, %rbp - adcq %r8, %r13 - adcq $0, %r14 - addq %r15, %rdx - movq 72(%rsi), %rax - movq 64(%rsi), %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r11 - adcq 24(%rsi), %rbp - adcq 32(%rsi), %r13 - adcq 40(%rsi), %r14 - movq %r14, -112(%rsp) ## 8-byte Spill - movq 56(%rsi), %rdi - movq 48(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -32(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -88(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -48(%rsp) ## 8-byte Spill - sbbq %rsi, %rsi - andl $1, %esi - movq %rcx, %rdx - movq -104(%rsp), %r14 ## 8-byte Reload - imulq %r14, %rdx - mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload - movq %rax, -16(%rsp) ## 8-byte Spill - mulxq %r12, %rax, %r10 - movq %rax, -24(%rsp) ## 8-byte Spill - mulxq %r9, %rbx, %r8 - movq -80(%rsp), %r12 ## 8-byte Reload - mulxq %r12, %r9, %rdi - mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %r9, %rax - adcq %rbx, %rdi - adcq -24(%rsp), %r8 ## 8-byte Folded Reload - adcq -16(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r15 - addq %rcx, %rdx - adcq %r11, %rax - adcq %rbp, %rdi - adcq %r13, %r8 - adcq -112(%rsp), %r10 ## 8-byte Folded Reload - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq $0, -88(%rsp) ## 8-byte Folded Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %rsi - movq %rax, %rdx - imulq %r14, %rdx - mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - mulxq -56(%rsp), %rcx, %r14 ## 8-byte Folded Reload - movq %rcx, -32(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %r11, %rbx ## 8-byte Folded Reload - mulxq %r12, %r9, %rbp - mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload - addq %r9, %rcx - adcq %r11, %rbp - adcq -32(%rsp), %rbx ## 8-byte Folded Reload - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r13 - addq %rax, %rdx - adcq %rdi, %rcx - adcq %r8, %rbp - adcq %r10, %rbx - adcq %r15, %r14 - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %rsi - movq %rcx, %rdx - imulq -104(%rsp), %rdx ## 8-byte Folded Reload - movq -72(%rsp), %r9 ## 8-byte Reload - mulxq %r9, %rax, %r12 - movq %rax, -88(%rsp) ## 8-byte Spill - mulxq -56(%rsp), %rax, %r10 ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %r8, %r11 ## 8-byte Folded Reload - mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload - mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %rdi, %rax - adcq %r8, %r15 - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq -88(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r12 - addq %rcx, %rdx - adcq %rbp, %rax - adcq %rbx, %r15 - adcq %r14, %r11 - adcq %r13, %r10 - adcq -96(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %rsi - movq -104(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - mulxq %r9, %rdi, %rcx - movq %rdi, -96(%rsp) ## 8-byte Spill - mulxq -56(%rsp), %rbp, %rdi ## 8-byte Folded Reload - movq %rbp, -104(%rsp) ## 8-byte Spill - mulxq -64(%rsp), %r13, %rbp ## 8-byte Folded Reload - movq -40(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %r8, %r9 - mulxq -80(%rsp), %rbx, %rdx ## 8-byte Folded Reload - addq %r9, %rbx - adcq %r13, %rdx - adcq -104(%rsp), %rbp ## 8-byte Folded Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - adcq $0, %rcx - addq %rax, %r8 - adcq %r15, %rbx - adcq %r11, %rdx - adcq %r10, %rbp - adcq %r12, %rdi - adcq -48(%rsp), %rcx ## 8-byte Folded Reload - adcq $0, %rsi - movq %rbx, %rax - subq %r14, %rax - movq %rdx, %r8 - sbbq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %rbp, %r9 - sbbq -64(%rsp), %r9 ## 8-byte Folded Reload - movq %rdi, %r10 - sbbq -56(%rsp), %r10 ## 8-byte Folded Reload - movq %rcx, %r11 - sbbq -72(%rsp), %r11 ## 8-byte Folded Reload - sbbq $0, %rsi - andl $1, %esi - cmovneq %rcx, %r11 - testb %sil, %sil - cmovneq %rbx, %rax - movq -8(%rsp), %rcx ## 8-byte Reload - movq %rax, (%rcx) - cmovneq %rdx, %r8 - movq %r8, 8(%rcx) - cmovneq %rbp, %r9 - movq %r9, 16(%rcx) - cmovneq %rdi, %r10 - movq %r10, 24(%rcx) - movq %r11, 32(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre5Lbmi2: ## @mcl_fp_addPre5Lbmi2 -## BB#0: - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq 16(%rdx), %rcx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rcx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rcx, 16(%rdi) - adcq %r9, %r11 - movq %r11, 24(%rdi) - adcq %r8, %r10 - movq %r10, 32(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre5Lbmi2: ## @mcl_fp_subPre5Lbmi2 -## BB#0: - pushq %rbx - movq 32(%rsi), %r10 - movq 24(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r8, %r11 - movq %r11, 24(%rdi) - sbbq %r9, %r10 - movq %r10, 32(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - retq - - .globl _mcl_fp_shr1_5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_5Lbmi2: ## @mcl_fp_shr1_5Lbmi2 -## BB#0: - movq 32(%rsi), %r8 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r8, %rcx - movq %rcx, 24(%rdi) - shrq %r8 - movq %r8, 32(%rdi) - retq - - .globl _mcl_fp_add5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add5Lbmi2: ## @mcl_fp_add5Lbmi2 -## BB#0: - pushq %rbx - movq 32(%rdx), %r11 - movq 24(%rdx), %rbx - movq 24(%rsi), %r9 - movq 32(%rsi), %r8 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %rbx, %r9 - movq %r9, 24(%rdi) - adcq %r11, %r8 - movq %r8, 32(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %r9 - sbbq 32(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB74_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %r9, 24(%rdi) - movq %r8, 32(%rdi) -LBB74_2: ## %carry - popq %rbx - retq - - .globl _mcl_fp_addNF5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF5Lbmi2: ## @mcl_fp_addNF5Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %r11 - addq (%rsi), %r14 - adcq 8(%rsi), %r11 - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r9 - adcq 32(%rsi), %r8 - movq %r14, %rsi - subq (%rcx), %rsi - movq %r11, %rdx - sbbq 8(%rcx), %rdx - movq %r10, %rbx - sbbq 16(%rcx), %rbx - movq %r9, %r15 - sbbq 24(%rcx), %r15 - movq %r8, %rax - sbbq 32(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r14, %rsi - movq %rsi, (%rdi) - cmovsq %r11, %rdx - movq %rdx, 8(%rdi) - cmovsq %r10, %rbx - movq %rbx, 16(%rdi) - cmovsq %r9, %r15 - movq %r15, 24(%rdi) - cmovsq %r8, %rax - movq %rax, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_sub5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub5Lbmi2: ## @mcl_fp_sub5Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - movq 32(%rsi), %r8 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r11, %r9 - movq %r9, 24(%rdi) - sbbq %r14, %r8 - movq %r8, 32(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB76_2 -## BB#1: ## %carry - movq 32(%rcx), %r11 - movq 24(%rcx), %r14 - movq 8(%rcx), %rdx - movq 16(%rcx), %rbx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r14 - movq %r14, 24(%rdi) - adcq %r8, %r11 - movq %r11, 32(%rdi) -LBB76_2: ## %nocarry - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_subNF5Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF5Lbmi2: ## @mcl_fp_subNF5Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 32(%rsi), %r12 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r9 - movdqu (%rsi), %xmm2 - movdqu 16(%rsi), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r8 - movd %xmm1, %r10 - movd %xmm3, %r14 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %r11 - pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] - movd %xmm1, %r15 - movd %xmm0, %rsi - movd %xmm2, %r13 - subq %rsi, %r13 - sbbq %r11, %r15 - sbbq %r10, %r14 - sbbq %r9, %r8 - sbbq 32(%rdx), %r12 - movq %r12, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r12, %rsi - movq 8(%rcx), %rax - andq %rsi, %rax - andq (%rcx), %rsi - movq 32(%rcx), %r9 - andq %rdx, %r9 - rorxq $63, %rdx, %rbx - andq 24(%rcx), %rdx - andq 16(%rcx), %rbx - addq %r13, %rsi - movq %rsi, (%rdi) - adcq %r15, %rax - movq %rax, 8(%rdi) - adcq %r14, %rbx - movq %rbx, 16(%rdi) - adcq %r8, %rdx - movq %rdx, 24(%rdi) - adcq %r12, %r9 - movq %r9, 32(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_add5Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add5Lbmi2: ## @mcl_fpDbl_add5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 64(%rdx), %r11 - movq 56(%rdx), %r14 - movq 48(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 40(%rdx), %r9 - movq %rbx, (%rdi) - movq 72(%rsi), %r8 - movq %rax, 8(%rdi) - movq 64(%rsi), %r10 - movq %r12, 16(%rdi) - movq 56(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 48(%rsi), %rbp - movq 40(%rsi), %rbx - movq %r13, 32(%rdi) - adcq %r9, %rbx - adcq %r15, %rbp - adcq %r14, %r12 - adcq %r11, %r10 - adcq -8(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - movq %rbx, %rax - subq (%rcx), %rax - movq %rbp, %rdx - sbbq 8(%rcx), %rdx - movq %r12, %r9 - sbbq 16(%rcx), %r9 - movq %r10, %r11 - sbbq 24(%rcx), %r11 - movq %r8, %r14 - sbbq 32(%rcx), %r14 - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rax - movq %rax, 40(%rdi) - testb %sil, %sil - cmovneq %rbp, %rdx - movq %rdx, 48(%rdi) - cmovneq %r12, %r9 - movq %r9, 56(%rdi) - cmovneq %r10, %r11 - movq %r11, 64(%rdi) - cmovneq %r8, %r14 - movq %r14, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub5Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub5Lbmi2: ## @mcl_fpDbl_sub5Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %r9 - movq 64(%rdx), %r10 - movq 56(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %r12 - sbbq 24(%rdx), %r12 - movq %r15, (%rdi) - movq 32(%rsi), %rbx - sbbq 32(%rdx), %rbx - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 72(%rsi), %r8 - movq %r12, 24(%rdi) - movq 64(%rsi), %r11 - movq %rbx, 32(%rdi) - movq 40(%rsi), %rbp - sbbq %rdx, %rbp - movq 56(%rsi), %r12 - movq 48(%rsi), %r13 - sbbq %r15, %r13 - sbbq %r14, %r12 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 8(%rcx), %rbx - cmoveq %rax, %rbx - movq 32(%rcx), %r9 - cmoveq %rax, %r9 - cmovneq 24(%rcx), %rax - addq %rbp, %rsi - movq %rsi, 40(%rdi) - adcq %r13, %rbx - movq %rbx, 48(%rdi) - adcq %r12, %rdx - movq %rdx, 56(%rdi) - adcq %r11, %rax - movq %rax, 64(%rdi) - adcq %r8, %r9 - movq %r9, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre6Lbmi2: ## @mcl_fp_mulUnitPre6Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - mulxq 40(%rsi), %r8, %r11 - mulxq 32(%rsi), %r9, %r12 - mulxq 24(%rsi), %r10, %rcx - mulxq 16(%rsi), %r14, %rbx - mulxq 8(%rsi), %r15, %rax - mulxq (%rsi), %rdx, %rsi - movq %rdx, (%rdi) - addq %r15, %rsi - movq %rsi, 8(%rdi) - adcq %r14, %rax - movq %rax, 16(%rdi) - adcq %r10, %rbx - movq %rbx, 24(%rdi) - adcq %r9, %rcx - movq %rcx, 32(%rdi) - adcq %r8, %r12 - movq %r12, 40(%rdi) - adcq $0, %r11 - movq %r11, 48(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_mulPre6Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre6Lbmi2: ## @mcl_fpDbl_mulPre6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r11 - movq %rdi, -48(%rsp) ## 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rcx - movq %rcx, -80(%rsp) ## 8-byte Spill - movq (%r11), %rax - movq %r11, -56(%rsp) ## 8-byte Spill - movq %rcx, %rdx - mulxq %rax, %rcx, %r14 - movq %r15, %rdx - mulxq %rax, %rdx, %rbp - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 24(%rsi), %rbx - movq %rbx, -88(%rsp) ## 8-byte Spill - movq 16(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - addq %rcx, %rbp - mulxq %rax, %rcx, %r12 - adcq %r14, %rcx - movq %rbx, %rdx - mulxq %rax, %rbx, %r14 - adcq %r12, %rbx - movq 32(%rsi), %r12 - movq %r12, %rdx - mulxq %rax, %r8, %r13 - adcq %r14, %r8 - movq 40(%rsi), %r14 - movq %r14, %rdx - mulxq %rax, %r9, %r10 - adcq %r13, %r9 - movq -72(%rsp), %rax ## 8-byte Reload - movq %rax, (%rdi) - adcq $0, %r10 - movq 8(%r11), %rdi - movq %r15, %rdx - mulxq %rdi, %r13, %rax - movq %rax, -72(%rsp) ## 8-byte Spill - addq %rbp, %r13 - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbp, %rax - movq %rax, -80(%rsp) ## 8-byte Spill - adcq %rcx, %rbp - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rax, %r11 - adcq %rbx, %rax - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbx, %rcx - movq %rcx, -88(%rsp) ## 8-byte Spill - adcq %r8, %rbx - movq %r12, %rdx - mulxq %rdi, %rcx, %r8 - adcq %r9, %rcx - movq %r14, %rdx - mulxq %rdi, %r12, %rdx - adcq %r10, %r12 - sbbq %r15, %r15 - andl $1, %r15d - addq -72(%rsp), %rbp ## 8-byte Folded Reload - adcq -80(%rsp), %rax ## 8-byte Folded Reload - adcq %r11, %rbx - movq -48(%rsp), %rdi ## 8-byte Reload - movq %r13, 8(%rdi) - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq %r8, %r12 - adcq %rdx, %r15 - movq (%rsi), %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -80(%rsp) ## 8-byte Spill - movq -56(%rsp), %r14 ## 8-byte Reload - movq 16(%r14), %rdi - mulxq %rdi, %r13, %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - addq %rbp, %r13 - movq %r8, %rdx - mulxq %rdi, %r8, %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - adcq %rax, %r8 - movq 16(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - mulxq %rdi, %r11, %rax - movq %rax, -24(%rsp) ## 8-byte Spill - adcq %rbx, %r11 - movq 24(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rdi, %rax, %rbx - adcq %rcx, %rax - movq 32(%rsi), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - mulxq %rdi, %r10, %rcx - adcq %r12, %r10 - movq 40(%rsi), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - mulxq %rdi, %r9, %rdx - adcq %r15, %r9 - sbbq %rbp, %rbp - andl $1, %ebp - addq -8(%rsp), %r8 ## 8-byte Folded Reload - adcq -16(%rsp), %r11 ## 8-byte Folded Reload - adcq -24(%rsp), %rax ## 8-byte Folded Reload - adcq %rbx, %r10 - adcq %rcx, %r9 - adcq %rdx, %rbp - movq -48(%rsp), %rcx ## 8-byte Reload - movq %r13, 16(%rcx) - movq 24(%r14), %rdi - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r12, %rcx - movq %rcx, -88(%rsp) ## 8-byte Spill - addq %r8, %r12 - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbx, %rcx - movq %rcx, -80(%rsp) ## 8-byte Spill - adcq %r11, %rbx - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rcx, %r11 - adcq %rax, %rcx - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r14, %rax - movq %rax, -64(%rsp) ## 8-byte Spill - adcq %r10, %r14 - movq -32(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r8, %rax - adcq %r9, %r8 - movq -40(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r13, %rdx - adcq %rbp, %r13 - sbbq %r15, %r15 - andl $1, %r15d - addq -88(%rsp), %rbx ## 8-byte Folded Reload - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - adcq %r11, %r14 - movq -48(%rsp), %rdi ## 8-byte Reload - movq %r12, 24(%rdi) - adcq -64(%rsp), %r8 ## 8-byte Folded Reload - adcq %rax, %r13 - adcq %rdx, %r15 - movq (%rsi), %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 8(%rsi), %rbp - movq %rbp, -80(%rsp) ## 8-byte Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdi - mulxq %rdi, %r12, %rax - movq %rax, -64(%rsp) ## 8-byte Spill - addq %rbx, %r12 - movq %rbp, %rdx - mulxq %rdi, %rbx, %rax - movq %rax, -72(%rsp) ## 8-byte Spill - adcq %rcx, %rbx - movq 16(%rsi), %r11 - movq %r11, %rdx - mulxq %rdi, %rax, %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - adcq %r14, %rax - movq 24(%rsi), %r14 - movq %r14, %rdx - mulxq %rdi, %rbp, %rcx - movq %rcx, -40(%rsp) ## 8-byte Spill - adcq %r8, %rbp - movq 32(%rsi), %r8 - movq %r8, %rdx - mulxq %rdi, %rcx, %r10 - adcq %r13, %rcx - movq 40(%rsi), %r13 - movq %r13, %rdx - mulxq %rdi, %r9, %rdx - adcq %r15, %r9 - sbbq %rsi, %rsi - andl $1, %esi - addq -64(%rsp), %rbx ## 8-byte Folded Reload - adcq -72(%rsp), %rax ## 8-byte Folded Reload - adcq -32(%rsp), %rbp ## 8-byte Folded Reload - adcq -40(%rsp), %rcx ## 8-byte Folded Reload - adcq %r10, %r9 - adcq %rdx, %rsi - movq -48(%rsp), %r10 ## 8-byte Reload - movq %r12, 32(%r10) - movq -56(%rsp), %rdx ## 8-byte Reload - movq 40(%rdx), %rdi - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r15, %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - addq %rbx, %r15 - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbx, %r12 - adcq %rax, %rbx - movq %r11, %rdx - mulxq %rdi, %rax, %r11 - adcq %rbp, %rax - movq %r14, %rdx - mulxq %rdi, %rbp, %r14 - adcq %rcx, %rbp - movq %r8, %rdx - mulxq %rdi, %rcx, %r8 - adcq %r9, %rcx - movq %r13, %rdx - mulxq %rdi, %rdi, %r9 - adcq %rsi, %rdi - sbbq %rsi, %rsi - andl $1, %esi - addq -56(%rsp), %rbx ## 8-byte Folded Reload - movq %r15, 40(%r10) - movq %rbx, 48(%r10) - adcq %r12, %rax - movq %rax, 56(%r10) - adcq %r11, %rbp - movq %rbp, 64(%r10) - adcq %r14, %rcx - movq %rcx, 72(%r10) - adcq %r8, %rdi - movq %rdi, 80(%r10) - adcq %r9, %rsi - movq %rsi, 88(%r10) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre6Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre6Lbmi2: ## @mcl_fpDbl_sqrPre6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r9 - movq 16(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq (%rsi), %rcx - movq 8(%rsi), %rax - mulxq %rcx, %r10, %r8 - movq 24(%rsi), %rbp - movq %rbp, -48(%rsp) ## 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r11, %rbx - movq %rbx, -40(%rsp) ## 8-byte Spill - movq %rcx, %rdx - mulxq %rcx, %rdx, %r14 - movq %rdx, -56(%rsp) ## 8-byte Spill - addq %r11, %r14 - adcq %rbx, %r10 - movq %rbp, %rdx - mulxq %rcx, %r15, %rbp - adcq %r8, %r15 - movq 32(%rsi), %rbx - movq %rbx, %rdx - mulxq %rcx, %r8, %r13 - adcq %rbp, %r8 - movq 40(%rsi), %rdi - movq %rdi, %rdx - mulxq %rcx, %rcx, %r12 - adcq %r13, %rcx - movq %r9, -24(%rsp) ## 8-byte Spill - movq -56(%rsp), %rdx ## 8-byte Reload - movq %rdx, (%r9) - adcq $0, %r12 - addq %r11, %r14 - movq %rax, %rdx - mulxq %rax, %rbp, %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - adcq %r10, %rbp - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r13, %r10 - adcq %r15, %r13 - movq -48(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r15, %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - adcq %r8, %r15 - movq %rbx, %rdx - mulxq %rax, %rbx, %r8 - adcq %rcx, %rbx - movq %rdi, %rdx - mulxq %rax, %r11, %rax - adcq %r12, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -40(%rsp), %rbp ## 8-byte Folded Reload - adcq -56(%rsp), %r13 ## 8-byte Folded Reload - movq %r14, 8(%r9) - adcq %r10, %r15 - adcq -64(%rsp), %rbx ## 8-byte Folded Reload - adcq %r8, %r11 - adcq %rax, %r12 - movq (%rsi), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -64(%rsp) ## 8-byte Spill - movq 16(%rsi), %rcx - mulxq %rcx, %rax, %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - addq %rbp, %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq %rdi, %rdx - mulxq %rcx, %rbp, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq %r13, %rbp - movq %rcx, %rdx - mulxq %rcx, %r13, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq %r15, %r13 - movq 24(%rsi), %rax - movq %rax, %rdx - mulxq %rcx, %r8, %rdi - movq %rdi, -56(%rsp) ## 8-byte Spill - adcq %r8, %rbx - movq 32(%rsi), %r10 - movq %r10, %rdx - mulxq %rcx, %r14, %r15 - adcq %r11, %r14 - movq 40(%rsi), %r11 - movq %r11, %rdx - mulxq %rcx, %r9, %rdx - adcq %r12, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - addq -32(%rsp), %rbp ## 8-byte Folded Reload - adcq -8(%rsp), %r13 ## 8-byte Folded Reload - adcq -16(%rsp), %rbx ## 8-byte Folded Reload - adcq %rdi, %r14 - adcq %r15, %r9 - adcq %rdx, %rcx - movq -48(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rdi, %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - addq %rbp, %rdi - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r15, %rbp - adcq %r13, %r15 - adcq %r8, %rbx - movq %rax, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - adcq %r14, %r8 - movq %r10, %rdx - mulxq %rax, %r12, %r10 - adcq %r9, %r12 - movq %r11, %rdx - mulxq %rax, %r13, %rax - adcq %rcx, %r13 - sbbq %r9, %r9 - andl $1, %r9d - addq -48(%rsp), %r15 ## 8-byte Folded Reload - adcq %rbp, %rbx - movq -24(%rsp), %rdx ## 8-byte Reload - movq -40(%rsp), %rbp ## 8-byte Reload - movq %rbp, 16(%rdx) - movq %rdi, 24(%rdx) - adcq -56(%rsp), %r8 ## 8-byte Folded Reload - adcq -64(%rsp), %r12 ## 8-byte Folded Reload - adcq %r10, %r13 - adcq %rax, %r9 - movq (%rsi), %rcx - movq 8(%rsi), %rdi - movq %rdi, -64(%rsp) ## 8-byte Spill - movq 32(%rsi), %rax - movq %rcx, %rdx - mulxq %rax, %rdx, %rbp - movq %rbp, -56(%rsp) ## 8-byte Spill - addq %r15, %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rdi, %rdx - mulxq %rax, %r15, %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - adcq %rbx, %r15 - movq 16(%rsi), %r10 - movq %r10, %rdx - mulxq %rax, %r14, %rbx - adcq %r8, %r14 - movq 24(%rsi), %r8 - movq %r8, %rdx - mulxq %rax, %rbp, %rdi - adcq %r12, %rbp - movq %rax, %rdx - mulxq %rax, %r11, %r12 - adcq %r13, %r11 - movq 40(%rsi), %rsi - movq %rsi, %rdx - mulxq %rax, %r13, %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - adcq %r13, %r9 - sbbq %rax, %rax - andl $1, %eax - addq -56(%rsp), %r15 ## 8-byte Folded Reload - adcq -32(%rsp), %r14 ## 8-byte Folded Reload - adcq %rbx, %rbp - adcq %rdi, %r11 - adcq %r12, %r9 - adcq %rdx, %rax - movq %rcx, %rdx - mulxq %rsi, %r12, %rcx - addq %r15, %r12 - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rsi, %rdi, %r15 - adcq %r14, %rdi - movq %r10, %rdx - mulxq %rsi, %rbx, %r10 - adcq %rbp, %rbx - movq %r8, %rdx - mulxq %rsi, %rbp, %r8 - adcq %r11, %rbp - adcq %r13, %r9 - movq %rsi, %rdx - mulxq %rsi, %rsi, %r11 - adcq %rax, %rsi - sbbq %rax, %rax - andl $1, %eax - addq %rcx, %rdi - movq -24(%rsp), %rdx ## 8-byte Reload - movq -40(%rsp), %rcx ## 8-byte Reload - movq %rcx, 32(%rdx) - movq %r12, 40(%rdx) - movq %rdi, 48(%rdx) - adcq %r15, %rbx - movq %rbx, 56(%rdx) - adcq %r10, %rbp - movq %rbp, 64(%rdx) - adcq %r8, %r9 - movq %r9, 72(%rdx) - adcq -48(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 80(%rdx) - adcq %r11, %rax - movq %rax, 88(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont6Lbmi2: ## @mcl_fp_mont6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $32, %rsp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rdi, 24(%rsp) ## 8-byte Spill - movq 40(%rsi), %rdi - movq %rdi, -96(%rsp) ## 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %r11, %rbx - movq 32(%rsi), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - mulxq %rax, %r14, %r12 - movq 24(%rsi), %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - mulxq %rax, %r15, %r13 - movq 16(%rsi), %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %r10 - movq (%rsi), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - mulxq %rax, %rdi, %r9 - movq %rbp, %rdx - mulxq %rax, %rdx, %rbp - movq %rdx, -128(%rsp) ## 8-byte Spill - addq %rdi, %rbp - adcq %r8, %r9 - adcq %r15, %r10 - adcq %r14, %r13 - adcq %r11, %r12 - adcq $0, %rbx - movq %rbx, -120(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - imulq %rax, %rdx - movq 40(%rcx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %r15 - movq %rax, -112(%rsp) ## 8-byte Spill - movq 16(%rcx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %rax - movq 8(%rcx), %rsi - movq %rsi, -56(%rsp) ## 8-byte Spill - mulxq %rsi, %rbx, %r11 - movq (%rcx), %rsi - movq %rsi, -64(%rsp) ## 8-byte Spill - mulxq %rsi, %rsi, %r14 - addq %rbx, %r14 - adcq %r8, %r11 - movq 24(%rcx), %rdi - movq %rdi, -72(%rsp) ## 8-byte Spill - mulxq %rdi, %rdi, %r8 - adcq %rax, %rdi - movq 32(%rcx), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulxq %rax, %rbx, %rax - adcq %r8, %rbx - adcq -112(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %r15 - addq -128(%rsp), %rsi ## 8-byte Folded Reload - adcq %rbp, %r14 - adcq %r9, %r11 - adcq %r10, %rdi - adcq %r13, %rbx - adcq %r12, %rax - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - sbbq %r10, %r10 - andl $1, %r10d - movq -88(%rsp), %rcx ## 8-byte Reload - movq 8(%rcx), %rdx - mulxq -96(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq (%rsp), %rcx, %r13 ## 8-byte Folded Reload - movq %rcx, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %r12, %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %rbp, %rcx ## 8-byte Folded Reload - mulxq -24(%rsp), %rsi, %r9 ## 8-byte Folded Reload - addq %rbp, %r9 - mulxq -16(%rsp), %rbp, %r8 ## 8-byte Folded Reload - adcq %rcx, %rbp - adcq %r12, %r8 - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq %r14, %rsi - adcq %r11, %r9 - adcq %rdi, %rbp - adcq %rbx, %r8 - adcq %rax, %rdx - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq %r15, %r13 - adcq %r10, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rsi, %rbx - movq %rbx, %rdx - imulq 8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -40(%rsp), %rax, %r12 ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %r14, %r11 ## 8-byte Folded Reload - mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload - addq %rcx, %rsi - mulxq -48(%rsp), %rcx, %r10 ## 8-byte Folded Reload - adcq %rax, %rcx - mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload - adcq %r10, %rax - adcq %r14, %r15 - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r12 - addq %rbx, %rdi - adcq %r9, %rsi - adcq %rbp, %rcx - adcq %r8, %rax - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq %r13, %r11 - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - movq -112(%rsp), %r10 ## 8-byte Reload - adcq $0, %r10 - movq -88(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdx - mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload - movq %rbp, -112(%rsp) ## 8-byte Spill - movq %rdi, -120(%rsp) ## 8-byte Spill - mulxq (%rsp), %rdi, %rbp ## 8-byte Folded Reload - movq %rdi, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rdi, %r13 ## 8-byte Folded Reload - movq %rdi, 16(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload - mulxq -24(%rsp), %rbx, %r9 ## 8-byte Folded Reload - movq %rbx, -128(%rsp) ## 8-byte Spill - addq %rdi, %r9 - mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload - adcq %r14, %rbx - adcq 16(%rsp), %r8 ## 8-byte Folded Reload - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -112(%rsp), %rbp ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - movq -128(%rsp), %rdi ## 8-byte Reload - addq %rsi, %rdi - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq %rcx, %r9 - adcq %rax, %rbx - adcq %r15, %r8 - adcq %r11, %r13 - adcq %r12, %rbp - adcq %r10, %rdx - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdi, %rdx - imulq 8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %r15, %r12 ## 8-byte Folded Reload - mulxq -56(%rsp), %rax, %rcx ## 8-byte Folded Reload - mulxq -64(%rsp), %rdi, %r14 ## 8-byte Folded Reload - addq %rax, %r14 - mulxq -48(%rsp), %rax, %r10 ## 8-byte Folded Reload - adcq %rcx, %rax - mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload - adcq %r10, %rsi - adcq %r15, %rcx - adcq -104(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, %r11 - addq -128(%rsp), %rdi ## 8-byte Folded Reload - adcq %r9, %r14 - adcq %rbx, %rax - adcq %r8, %rsi - adcq %r13, %rcx - adcq %rbp, %r12 - adcq -120(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, -112(%rsp) ## 8-byte Folded Spill - movq -88(%rsp), %rdx ## 8-byte Reload - movq 24(%rdx), %rdx - mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload - movq %rbp, -128(%rsp) ## 8-byte Spill - movq %rdi, -120(%rsp) ## 8-byte Spill - mulxq (%rsp), %rdi, %r15 ## 8-byte Folded Reload - movq %rdi, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload - mulxq -32(%rsp), %rbx, %r9 ## 8-byte Folded Reload - mulxq -24(%rsp), %r13, %rdi ## 8-byte Folded Reload - addq %rbx, %rdi - mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload - adcq %r9, %rbx - adcq %r10, %r8 - adcq -104(%rsp), %rbp ## 8-byte Folded Reload - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r14, %r13 - adcq %rax, %rdi - adcq %rsi, %rbx - adcq %rcx, %r8 - adcq %r12, %rbp - adcq %r11, %r15 - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -128(%rsp) ## 8-byte Spill - movq %r13, %rdx - imulq 8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -40(%rsp), %rax, %r10 ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %rax, %r12 ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -56(%rsp), %rax, %r11 ## 8-byte Folded Reload - mulxq -64(%rsp), %rcx, %rsi ## 8-byte Folded Reload - addq %rax, %rsi - mulxq -48(%rsp), %r14, %r9 ## 8-byte Folded Reload - adcq %r11, %r14 - mulxq -72(%rsp), %rax, %r11 ## 8-byte Folded Reload - adcq %r9, %rax - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - adcq -112(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, %r10 - addq %r13, %rcx - adcq %rdi, %rsi - adcq %rbx, %r14 - adcq %r8, %rax - adcq %rbp, %r11 - adcq %r15, %r12 - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - movq -128(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - movq -88(%rsp), %rcx ## 8-byte Reload - movq 32(%rcx), %rdx - mulxq -96(%rsp), %rdi, %rcx ## 8-byte Folded Reload - movq %rdi, -112(%rsp) ## 8-byte Spill - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq (%rsp), %rdi, %rcx ## 8-byte Folded Reload - movq %rdi, 16(%rsp) ## 8-byte Spill - movq %rcx, -128(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %r13, %rbp ## 8-byte Folded Reload - mulxq -32(%rsp), %rdi, %rcx ## 8-byte Folded Reload - mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload - movq %rbx, -104(%rsp) ## 8-byte Spill - addq %rdi, %r8 - mulxq -16(%rsp), %rbx, %r9 ## 8-byte Folded Reload - adcq %rcx, %rbx - adcq %r13, %r9 - adcq 16(%rsp), %rbp ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq -120(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - movq -104(%rsp), %rdi ## 8-byte Reload - addq %rsi, %rdi - movq %rdi, -104(%rsp) ## 8-byte Spill - adcq %r14, %r8 - adcq %rax, %rbx - adcq %r11, %r9 - adcq %r12, %rbp - adcq %r10, %rdx - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq %r15, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, %r13 - movq %rdi, %rdx - imulq 8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %r12, %r15 ## 8-byte Folded Reload - mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload - addq %rcx, %rsi - mulxq -48(%rsp), %r11, %r10 ## 8-byte Folded Reload - adcq %rax, %r11 - mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload - adcq %r10, %rax - adcq %r12, %rcx - adcq %r14, %r15 - movq -112(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -104(%rsp), %rdi ## 8-byte Folded Reload - adcq %r8, %rsi - adcq %rbx, %r11 - adcq %r9, %rax - adcq %rbp, %rcx - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, -120(%rsp) ## 8-byte Spill - movq -88(%rsp), %rdx ## 8-byte Reload - movq 40(%rdx), %rdx - mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload - movq %rbp, -128(%rsp) ## 8-byte Spill - movq %rdi, -88(%rsp) ## 8-byte Spill - mulxq (%rsp), %rbx, %rdi ## 8-byte Folded Reload - movq %rdi, -96(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload - mulxq -16(%rsp), %r8, %r12 ## 8-byte Folded Reload - mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload - mulxq -24(%rsp), %r13, %r9 ## 8-byte Folded Reload - addq %rdi, %r9 - adcq %r8, %r14 - adcq %r10, %r12 - adcq %rbx, %rbp - movq -96(%rsp), %rdi ## 8-byte Reload - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - movq -88(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %rsi, %r13 - adcq %r11, %r9 - adcq %rax, %r14 - adcq %rcx, %r12 - adcq %r15, %rbp - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -96(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - sbbq %rcx, %rcx - movq 8(%rsp), %rdx ## 8-byte Reload - imulq %r13, %rdx - mulxq -64(%rsp), %r8, %rax ## 8-byte Folded Reload - mulxq -56(%rsp), %r10, %rdi ## 8-byte Folded Reload - addq %rax, %r10 - mulxq -48(%rsp), %rsi, %rax ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -72(%rsp), %rbx, %r11 ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload - adcq %r11, %rdi - mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload - adcq %r15, %rax - adcq $0, %r11 - andl $1, %ecx - addq %r13, %r8 - adcq %r9, %r10 - adcq %r14, %rsi - adcq %r12, %rbx - adcq %rbp, %rdi - adcq -96(%rsp), %rax ## 8-byte Folded Reload - adcq -88(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %rcx - movq %r10, %rbp - subq -64(%rsp), %rbp ## 8-byte Folded Reload - movq %rsi, %rdx - sbbq -56(%rsp), %rdx ## 8-byte Folded Reload - movq %rbx, %r8 - sbbq -48(%rsp), %r8 ## 8-byte Folded Reload - movq %rdi, %r9 - sbbq -72(%rsp), %r9 ## 8-byte Folded Reload - movq %rax, %r14 - sbbq -80(%rsp), %r14 ## 8-byte Folded Reload - movq %r11, %r15 - sbbq -40(%rsp), %r15 ## 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rdi, %r9 - testb %cl, %cl - cmovneq %r10, %rbp - movq 24(%rsp), %rcx ## 8-byte Reload - movq %rbp, (%rcx) - cmovneq %rsi, %rdx - movq %rdx, 8(%rcx) - cmovneq %rbx, %r8 - movq %r8, 16(%rcx) - movq %r9, 24(%rcx) - cmovneq %rax, %r14 - movq %r14, 32(%rcx) - cmovneq %r11, %r15 - movq %r15, 40(%rcx) - addq $32, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF6Lbmi2: ## @mcl_fp_montNF6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -128(%rsp) ## 8-byte Spill - movq (%rdx), %rbp - movq %rdi, %rdx - mulxq %rbp, %rdi, %rbx - movq %rax, %rdx - mulxq %rbp, %r9, %r14 - movq 16(%rsi), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - addq %rdi, %r14 - mulxq %rbp, %rdi, %r8 - adcq %rbx, %rdi - movq 24(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - mulxq %rbp, %rbx, %r10 - adcq %r8, %rbx - movq 32(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rbp, %r8, %r11 - adcq %r10, %r8 - movq 40(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rbp, %rsi, %r15 - adcq %r11, %rsi - adcq $0, %r15 - movq -8(%rcx), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq %r9, %rdx - imulq %rax, %rdx - movq (%rcx), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulxq %rax, %rbp, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - addq %r9, %rbp - movq 8(%rcx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulxq %rax, %r12, %r9 - adcq %r14, %r12 - movq 16(%rcx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r14, %rax - adcq %rdi, %r14 - movq 24(%rcx), %rdi - movq %rdi, -32(%rsp) ## 8-byte Spill - mulxq %rdi, %r13, %rdi - adcq %rbx, %r13 - movq 32(%rcx), %rbp - movq %rbp, -40(%rsp) ## 8-byte Spill - mulxq %rbp, %r11, %rbx - adcq %r8, %r11 - movq 40(%rcx), %rcx - movq %rcx, -48(%rsp) ## 8-byte Spill - mulxq %rcx, %r10, %rcx - adcq %rsi, %r10 - adcq $0, %r15 - addq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq %r9, %r14 - adcq %rax, %r13 - adcq %rdi, %r11 - adcq %rbx, %r10 - adcq %rcx, %r15 - movq -120(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - mulxq -128(%rsp), %rcx, %rsi ## 8-byte Folded Reload - mulxq -112(%rsp), %rbx, %rax ## 8-byte Folded Reload - addq %rcx, %rax - mulxq -56(%rsp), %rcx, %rdi ## 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -72(%rsp), %rdi, %rbp ## 8-byte Folded Reload - movq %rbp, -88(%rsp) ## 8-byte Spill - adcq %r8, %rdi - mulxq -80(%rsp), %r8, %r9 ## 8-byte Folded Reload - adcq -88(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, %r9 - addq %r12, %rbx - adcq %r14, %rax - adcq %r13, %rcx - adcq %r11, %rsi - adcq %r10, %rdi - adcq %r15, %r8 - adcq $0, %r9 - movq %rbx, %rdx - imulq -104(%rsp), %rdx ## 8-byte Folded Reload - mulxq -96(%rsp), %rbp, %r13 ## 8-byte Folded Reload - addq %rbx, %rbp - mulxq -16(%rsp), %r11, %rbx ## 8-byte Folded Reload - adcq %rax, %r11 - mulxq -24(%rsp), %r14, %rax ## 8-byte Folded Reload - adcq %rcx, %r14 - mulxq -32(%rsp), %r10, %rcx ## 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload - adcq %rdi, %r15 - mulxq -48(%rsp), %r12, %rdx ## 8-byte Folded Reload - adcq %r8, %r12 - adcq $0, %r9 - addq %r13, %r11 - adcq %rbx, %r14 - adcq %rax, %r10 - adcq %rcx, %r15 - adcq %rsi, %r12 - adcq %rdx, %r9 - movq -120(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - mulxq -128(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r13, %rdi ## 8-byte Folded Reload - addq %rcx, %rdi - mulxq -56(%rsp), %rbx, %rcx ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -64(%rsp), %rsi, %rbp ## 8-byte Folded Reload - adcq %rcx, %rsi - mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload - movq %rcx, -88(%rsp) ## 8-byte Spill - adcq %rbp, %rax - mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload - adcq -88(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, %rcx - addq %r11, %r13 - adcq %r14, %rdi - adcq %r10, %rbx - adcq %r15, %rsi - adcq %r12, %rax - adcq %r9, %r8 - adcq $0, %rcx - movq %r13, %rdx - imulq -104(%rsp), %rdx ## 8-byte Folded Reload - mulxq -96(%rsp), %rbp, %r12 ## 8-byte Folded Reload - addq %r13, %rbp - mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload - adcq %rdi, %r11 - mulxq -24(%rsp), %r9, %rdi ## 8-byte Folded Reload - adcq %rbx, %r9 - mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload - adcq %rax, %r14 - mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload - adcq %r8, %r15 - adcq $0, %rcx - addq %r12, %r11 - adcq %rbp, %r9 - adcq %rdi, %r10 - adcq %rbx, %r14 - adcq %rsi, %r15 - adcq %rax, %rcx - movq -120(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - mulxq -128(%rsp), %rsi, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r13, %rbx ## 8-byte Folded Reload - addq %rsi, %rbx - mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload - adcq %rax, %rdi - mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload - adcq %rbp, %rsi - mulxq -72(%rsp), %rax, %rbp ## 8-byte Folded Reload - adcq %r8, %rax - mulxq -80(%rsp), %r8, %r12 ## 8-byte Folded Reload - adcq %rbp, %r8 - adcq $0, %r12 - addq %r11, %r13 - adcq %r9, %rbx - adcq %r10, %rdi - adcq %r14, %rsi - adcq %r15, %rax - adcq %rcx, %r8 - adcq $0, %r12 - movq %r13, %rdx - imulq -104(%rsp), %rdx ## 8-byte Folded Reload - mulxq -96(%rsp), %rbp, %rcx ## 8-byte Folded Reload - addq %r13, %rbp - mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload - adcq %rbx, %r11 - mulxq -24(%rsp), %r9, %rbx ## 8-byte Folded Reload - adcq %rdi, %r9 - mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload - adcq %rsi, %r10 - mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload - adcq %rax, %r14 - mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload - adcq %r8, %r15 - adcq $0, %r12 - addq %rcx, %r11 - adcq %rbp, %r9 - adcq %rbx, %r10 - adcq %rdi, %r14 - adcq %rsi, %r15 - adcq %rax, %r12 - movq -120(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - mulxq -128(%rsp), %rsi, %rcx ## 8-byte Folded Reload - mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload - addq %rsi, %rax - mulxq -56(%rsp), %rbx, %rsi ## 8-byte Folded Reload - adcq %rcx, %rbx - mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload - adcq %rsi, %rdi - mulxq -72(%rsp), %rsi, %rbp ## 8-byte Folded Reload - adcq %rcx, %rsi - mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload - adcq %rbp, %r8 - adcq $0, %rcx - addq %r11, %r13 - adcq %r9, %rax - adcq %r10, %rbx - adcq %r14, %rdi - adcq %r15, %rsi - adcq %r12, %r8 - adcq $0, %rcx - movq %r13, %rdx - imulq -104(%rsp), %rdx ## 8-byte Folded Reload - mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload - addq %r13, %rbp - mulxq -16(%rsp), %r13, %rbp ## 8-byte Folded Reload - adcq %rax, %r13 - mulxq -24(%rsp), %r11, %rax ## 8-byte Folded Reload - adcq %rbx, %r11 - mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload - adcq %rdi, %r10 - mulxq -40(%rsp), %r14, %rdi ## 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -48(%rsp), %rsi, %rdx ## 8-byte Folded Reload - adcq %r8, %rsi - adcq $0, %rcx - addq %r9, %r13 - adcq %rbp, %r11 - adcq %rax, %r10 - adcq %rbx, %r14 - adcq %rdi, %rsi - adcq %rdx, %rcx - movq -120(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - mulxq -128(%rsp), %rdi, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r8, %rbx ## 8-byte Folded Reload - addq %rdi, %rbx - mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload - adcq %rax, %rdi - mulxq -64(%rsp), %r15, %rax ## 8-byte Folded Reload - adcq %rbp, %r15 - mulxq -72(%rsp), %r12, %rbp ## 8-byte Folded Reload - adcq %rax, %r12 - mulxq -80(%rsp), %r9, %rax ## 8-byte Folded Reload - adcq %rbp, %r9 - adcq $0, %rax - addq %r13, %r8 - adcq %r11, %rbx - adcq %r10, %rdi - adcq %r14, %r15 - adcq %rsi, %r12 - adcq %rcx, %r9 - adcq $0, %rax - movq -104(%rsp), %rdx ## 8-byte Reload - imulq %r8, %rdx - mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - addq %r8, %rcx - movq -16(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %r8, %rcx - movq %rcx, -112(%rsp) ## 8-byte Spill - adcq %rbx, %r8 - movq -24(%rsp), %r10 ## 8-byte Reload - mulxq %r10, %rsi, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - adcq %rdi, %rsi - movq -32(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %rdi, %rcx - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq %r15, %rdi - movq -40(%rsp), %rcx ## 8-byte Reload - mulxq %rcx, %r15, %rbx - adcq %r12, %r15 - movq -48(%rsp), %r14 ## 8-byte Reload - mulxq %r14, %r12, %rbp - adcq %r9, %r12 - adcq $0, %rax - addq -104(%rsp), %r8 ## 8-byte Folded Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -120(%rsp), %rdi ## 8-byte Folded Reload - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq %rbx, %r12 - adcq %rbp, %rax - movq %r8, %rbp - subq -96(%rsp), %rbp ## 8-byte Folded Reload - movq %rsi, %rbx - sbbq %r11, %rbx - movq %rdi, %r11 - sbbq %r10, %r11 - movq %r15, %r10 - sbbq %r13, %r10 - movq %r12, %r9 - sbbq %rcx, %r9 - movq %rax, %rcx - sbbq %r14, %rcx - movq %rcx, %rdx - sarq $63, %rdx - cmovsq %r8, %rbp - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rbp, (%rdx) - cmovsq %rsi, %rbx - movq %rbx, 8(%rdx) - cmovsq %rdi, %r11 - movq %r11, 16(%rdx) - cmovsq %r15, %r10 - movq %r10, 24(%rdx) - cmovsq %r12, %r9 - movq %r9, 32(%rdx) - cmovsq %rax, %rcx - movq %rcx, 40(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed6Lbmi2: ## @mcl_fp_montRed6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - pushq %rax - movq %rdx, %rcx - movq %rdi, (%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq (%rsi), %r9 - movq %r9, %rdx - imulq %rax, %rdx - movq 40(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %rax, %r12, %r13 - movq 32(%rcx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulxq %rax, %r10, %r8 - movq 24(%rcx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - mulxq %rax, %r14, %r15 - movq 16(%rcx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulxq %rax, %rbp, %r11 - movq (%rcx), %rdi - movq %rdi, -40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %rbx - mulxq %rdi, %rdx, %rcx - addq %rax, %rcx - adcq %rbp, %rbx - adcq %r14, %r11 - adcq %r10, %r15 - adcq %r12, %r8 - adcq $0, %r13 - addq %r9, %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %rbx - adcq 24(%rsi), %r11 - adcq 32(%rsi), %r15 - adcq 40(%rsi), %r8 - movq %r8, -112(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r13 - movq %r13, -104(%rsp) ## 8-byte Spill - movq 88(%rsi), %r8 - movq 80(%rsi), %rdx - movq 72(%rsi), %rdi - movq 64(%rsi), %rax - movq 56(%rsi), %r14 - adcq $0, %r14 - adcq $0, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -96(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, -24(%rsp) ## 8-byte Spill - sbbq %r12, %r12 - andl $1, %r12d - movq %rcx, %rdx - imulq -8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload - movq %rsi, -120(%rsp) ## 8-byte Spill - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq -16(%rsp), %rax, %r13 ## 8-byte Folded Reload - movq %rax, -56(%rsp) ## 8-byte Spill - mulxq -48(%rsp), %rbp, %r10 ## 8-byte Folded Reload - mulxq -32(%rsp), %r9, %r8 ## 8-byte Folded Reload - mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload - mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %rsi, %rax - adcq %r9, %rdi - adcq %rbp, %r8 - adcq -56(%rsp), %r10 ## 8-byte Folded Reload - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - movq -128(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rcx, %rdx - adcq %rbx, %rax - adcq %r11, %rdi - adcq %r15, %r8 - adcq -112(%rsp), %r10 ## 8-byte Folded Reload - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq %r14, %rsi - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, -24(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - imulq -8(%rsp), %rdx ## 8-byte Folded Reload - mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - movq %rcx, -104(%rsp) ## 8-byte Spill - movq -16(%rsp), %rbx ## 8-byte Reload - mulxq %rbx, %rcx, %r14 - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq -48(%rsp), %rcx, %r15 ## 8-byte Folded Reload - movq %rcx, -56(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r11, %rbp ## 8-byte Folded Reload - mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload - mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload - addq %rsi, %rcx - adcq %r11, %r9 - adcq -56(%rsp), %rbp ## 8-byte Folded Reload - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - movq -104(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rax, %rdx - adcq %rdi, %rcx - adcq %r8, %r9 - adcq %r10, %rbp - adcq %r13, %r15 - adcq -128(%rsp), %r14 ## 8-byte Folded Reload - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, -24(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rcx, %rdx - movq -8(%rsp), %r13 ## 8-byte Reload - imulq %r13, %rdx - mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq %rbx, %rsi, %rax - movq %rsi, -120(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq -48(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %rax, %rbx - movq %rax, -56(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r10, %r8 ## 8-byte Folded Reload - mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload - mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %rsi, %rax - adcq %r10, %rdi - adcq -56(%rsp), %r8 ## 8-byte Folded Reload - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - movq -88(%rsp), %r10 ## 8-byte Reload - adcq -112(%rsp), %r10 ## 8-byte Folded Reload - movq -128(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rcx, %rdx - adcq %r9, %rax - adcq %rbp, %rdi - adcq %r15, %r8 - adcq %r14, %rbx - adcq -104(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, -88(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, -24(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - imulq %r13, %rdx - mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - movq %rcx, -96(%rsp) ## 8-byte Spill - mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - mulxq %r11, %rsi, %r13 - movq %rsi, -120(%rsp) ## 8-byte Spill - movq -32(%rsp), %r10 ## 8-byte Reload - mulxq %r10, %r15, %r14 - mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload - movq -40(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %rdx, %rbp - addq %rsi, %rbp - adcq %r15, %r9 - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - movq -96(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rax, %rdx - adcq %rdi, %rbp - adcq %r8, %r9 - adcq %rbx, %r14 - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -96(%rsp) ## 8-byte Spill - adcq $0, -24(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq -8(%rsp), %rdx ## 8-byte Reload - imulq %rbp, %rdx - mulxq -72(%rsp), %rax, %rsi ## 8-byte Folded Reload - movq %rax, -80(%rsp) ## 8-byte Spill - mulxq %r10, %rax, %r15 - mulxq %r11, %r10, %rdi - mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload - addq %rdi, %rbx - adcq %rax, %r8 - mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload - adcq %r15, %rax - movq -16(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %rdx, %r11 - adcq %rdi, %rdx - adcq -80(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %rsi - addq %rbp, %r10 - adcq %r9, %rbx - adcq %r14, %r8 - adcq %r13, %rax - adcq -128(%rsp), %rdx ## 8-byte Folded Reload - adcq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq -24(%rsp), %rsi ## 8-byte Folded Reload - adcq $0, %r12 - movq %rbx, %rcx - subq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %r8, %rdi - sbbq -64(%rsp), %rdi ## 8-byte Folded Reload - movq %rax, %rbp - sbbq -32(%rsp), %rbp ## 8-byte Folded Reload - movq %rdx, %r9 - sbbq -48(%rsp), %r9 ## 8-byte Folded Reload - movq %r11, %r10 - sbbq %r15, %r10 - movq %rsi, %r15 - sbbq -72(%rsp), %r15 ## 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %rsi, %r15 - testb %r12b, %r12b - cmovneq %rbx, %rcx - movq (%rsp), %rsi ## 8-byte Reload - movq %rcx, (%rsi) - cmovneq %r8, %rdi - movq %rdi, 8(%rsi) - cmovneq %rax, %rbp - movq %rbp, 16(%rsi) - cmovneq %rdx, %r9 - movq %r9, 24(%rsi) - cmovneq %r11, %r10 - movq %r10, 32(%rsi) - movq %r15, 40(%rsi) - addq $8, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre6Lbmi2: ## @mcl_fp_addPre6Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r11 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 24(%rsi), %rax - movq 32(%rsi), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r10, %rax - movq %rax, 24(%rdi) - adcq %r9, %r14 - movq %r14, 32(%rdi) - adcq %r8, %r11 - movq %r11, 40(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_subPre6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre6Lbmi2: ## @mcl_fp_subPre6Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r14 - movq 32(%rdx), %r15 - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r14, %r11 - movq %r11, 24(%rdi) - sbbq %r15, %r10 - movq %r10, 32(%rdi) - sbbq %r8, %r9 - movq %r9, 40(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_shr1_6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_6Lbmi2: ## @mcl_fp_shr1_6Lbmi2 -## BB#0: - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %r9, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 32(%rdi) - shrq %r8 - movq %r8, 40(%rdi) - retq - - .globl _mcl_fp_add6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add6Lbmi2: ## @mcl_fp_add6Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rdx), %r15 - movq 24(%rdx), %rbx - movq 24(%rsi), %r10 - movq 32(%rsi), %r9 - movq 16(%rdx), %r11 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - adcq %rbx, %r10 - movq %r10, 24(%rdi) - adcq %r15, %r9 - movq %r9, 32(%rdi) - adcq %r14, %r8 - movq %r8, 40(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r11 - sbbq 24(%rcx), %r10 - sbbq 32(%rcx), %r9 - sbbq 40(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB89_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - movq %r10, 24(%rdi) - movq %r9, 32(%rdi) - movq %r8, 40(%rdi) -LBB89_2: ## %carry - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF6Lbmi2: ## @mcl_fp_addNF6Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 16(%rdx), %r11 - movq (%rdx), %r15 - movq 8(%rdx), %r14 - addq (%rsi), %r15 - adcq 8(%rsi), %r14 - adcq 16(%rsi), %r11 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r9 - adcq 40(%rsi), %r8 - movq %r15, %rsi - subq (%rcx), %rsi - movq %r14, %rbx - sbbq 8(%rcx), %rbx - movq %r11, %rdx - sbbq 16(%rcx), %rdx - movq %r10, %r13 - sbbq 24(%rcx), %r13 - movq %r9, %r12 - sbbq 32(%rcx), %r12 - movq %r8, %rax - sbbq 40(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r15, %rsi - movq %rsi, (%rdi) - cmovsq %r14, %rbx - movq %rbx, 8(%rdi) - cmovsq %r11, %rdx - movq %rdx, 16(%rdi) - cmovsq %r10, %r13 - movq %r13, 24(%rdi) - cmovsq %r9, %r12 - movq %r12, 32(%rdi) - cmovsq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_sub6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub6Lbmi2: ## @mcl_fp_sub6Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r11 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r11, 16(%rdi) - sbbq %r15, %r10 - movq %r10, 24(%rdi) - sbbq %r12, %r9 - movq %r9, 32(%rdi) - sbbq %r14, %r8 - movq %r8, 40(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB91_2 -## BB#1: ## %carry - movq 40(%rcx), %r14 - movq 32(%rcx), %r15 - movq 24(%rcx), %r12 - movq 8(%rcx), %rbx - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rdx - movq %rdx, 16(%rdi) - adcq %r10, %r12 - movq %r12, 24(%rdi) - adcq %r9, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r14 - movq %r14, 40(%rdi) -LBB91_2: ## %nocarry - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF6Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF6Lbmi2: ## @mcl_fp_subNF6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %r10 - movdqu (%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] - movd %xmm6, %rax - movd %xmm2, %r11 - movd %xmm5, %r8 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r14 - pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] - movd %xmm2, %r9 - movd %xmm1, %r15 - movd %xmm4, %r12 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %r13 - pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] - movd %xmm1, %rbp - movd %xmm0, %rdx - movd %xmm3, %rbx - subq %rdx, %rbx - sbbq %r13, %rbp - sbbq %r15, %r12 - sbbq %r14, %r9 - sbbq %r11, %r8 - sbbq %r10, %rax - movq %rax, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %rax, %rsi - andq (%rcx), %rsi - movq 40(%rcx), %r10 - andq %rdx, %r10 - movq 32(%rcx), %r11 - andq %rdx, %r11 - movq 24(%rcx), %r14 - andq %rdx, %r14 - rorxq $63, %rdx, %r15 - andq 16(%rcx), %rdx - andq 8(%rcx), %r15 - addq %rbx, %rsi - movq %rsi, (%rdi) - adcq %rbp, %r15 - movq %r15, 8(%rdi) - adcq %r12, %rdx - movq %rdx, 16(%rdi) - adcq %r9, %r14 - movq %r14, 24(%rdi) - adcq %r8, %r11 - movq %r11, 32(%rdi) - adcq %rax, %r10 - movq %r10, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add6Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add6Lbmi2: ## @mcl_fpDbl_add6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 80(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 72(%rdx), %r14 - movq 64(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rdx - movq %rbx, (%rdi) - movq 88(%rsi), %r8 - movq %rax, 8(%rdi) - movq 80(%rsi), %r10 - movq %r12, 16(%rdi) - movq 72(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 40(%rsi), %rax - adcq %rdx, %rax - movq 64(%rsi), %rdx - movq %r13, 32(%rdi) - movq 56(%rsi), %r13 - movq 48(%rsi), %rbp - adcq %r9, %rbp - movq %rax, 40(%rdi) - adcq %r11, %r13 - adcq %r15, %rdx - adcq %r14, %r12 - adcq -16(%rsp), %r10 ## 8-byte Folded Reload - adcq -8(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rbp, %rsi - subq (%rcx), %rsi - movq %r13, %rbx - sbbq 8(%rcx), %rbx - movq %rdx, %r9 - sbbq 16(%rcx), %r9 - movq %r12, %r11 - sbbq 24(%rcx), %r11 - movq %r10, %r14 - sbbq 32(%rcx), %r14 - movq %r8, %r15 - sbbq 40(%rcx), %r15 - sbbq $0, %rax - andl $1, %eax - cmovneq %rbp, %rsi - movq %rsi, 48(%rdi) - testb %al, %al - cmovneq %r13, %rbx - movq %rbx, 56(%rdi) - cmovneq %rdx, %r9 - movq %r9, 64(%rdi) - cmovneq %r12, %r11 - movq %r11, 72(%rdi) - cmovneq %r10, %r14 - movq %r14, 80(%rdi) - cmovneq %r8, %r15 - movq %r15, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub6Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub6Lbmi2: ## @mcl_fpDbl_sub6Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %r9 - movq 80(%rdx), %r10 - movq 72(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 64(%rdx), %r13 - movq %r15, (%rdi) - movq 56(%rdx), %rbp - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 88(%rsi), %r8 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 80(%rsi), %r11 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - sbbq %r15, %rdx - movq 72(%rsi), %r15 - movq %rbx, 40(%rdi) - movq 64(%rsi), %r12 - movq 56(%rsi), %rsi - sbbq %rbp, %rsi - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%rcx), %r14 - cmoveq %rax, %r14 - testb %bpl, %bpl - movq 16(%rcx), %r9 - cmoveq %rax, %r9 - movq 8(%rcx), %rbp - cmoveq %rax, %rbp - movq 40(%rcx), %r10 - cmoveq %rax, %r10 - movq 32(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 24(%rcx), %rax - addq %rdx, %r14 - movq %r14, 48(%rdi) - adcq %rsi, %rbp - movq %rbp, 56(%rdi) - adcq %r12, %r9 - movq %r9, 64(%rdi) - adcq %r15, %rax - movq %rax, 72(%rdi) - adcq %r11, %rbx - movq %rbx, 80(%rdi) - adcq %r8, %r10 - movq %r10, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre7Lbmi2: ## @mcl_fp_mulUnitPre7Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - mulxq 48(%rsi), %r8, %r11 - mulxq 40(%rsi), %r9, %r13 - mulxq 32(%rsi), %r10, %rcx - mulxq 8(%rsi), %r12, %r14 - mulxq (%rsi), %r15, %rbx - addq %r12, %rbx - mulxq 24(%rsi), %r12, %rax - mulxq 16(%rsi), %rdx, %rsi - movq %r15, (%rdi) - movq %rbx, 8(%rdi) - adcq %r14, %rdx - movq %rdx, 16(%rdi) - adcq %r12, %rsi - movq %rsi, 24(%rdi) - adcq %r10, %rax - movq %rax, 32(%rdi) - adcq %r9, %rcx - movq %rcx, 40(%rdi) - adcq %r8, %r13 - movq %r13, 48(%rdi) - adcq $0, %r11 - movq %r11, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_mulPre7Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre7Lbmi2: ## @mcl_fpDbl_mulPre7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r14 - movq %rsi, %r8 - movq %rdi, %r13 - movq %r13, -48(%rsp) ## 8-byte Spill - movq (%r8), %rcx - movq %rcx, -72(%rsp) ## 8-byte Spill - movq 8(%r8), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq (%r14), %rsi - movq %r14, -64(%rsp) ## 8-byte Spill - movq %rax, %rdx - mulxq %rsi, %rbp, %rax - movq %rcx, %rdx - mulxq %rsi, %rdx, %rcx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 24(%r8), %rdi - movq %rdi, -88(%rsp) ## 8-byte Spill - movq 16(%r8), %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - addq %rbp, %rcx - mulxq %rsi, %rbx, %rbp - adcq %rax, %rbx - movq %rdi, %rdx - mulxq %rsi, %r12, %rax - adcq %rbp, %r12 - movq 32(%r8), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rsi, %r9, %rbp - adcq %rax, %r9 - movq 40(%r8), %rdi - movq %rdi, %rdx - mulxq %rsi, %r10, %rax - adcq %rbp, %r10 - movq 48(%r8), %r15 - movq %r15, %rdx - mulxq %rsi, %rsi, %r11 - adcq %rax, %rsi - movq -56(%rsp), %rax ## 8-byte Reload - movq %rax, (%r13) - adcq $0, %r11 - movq 8(%r14), %r13 - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %r13, %r14, %rax - movq %rax, -72(%rsp) ## 8-byte Spill - addq %rcx, %r14 - movq -104(%rsp), %rdx ## 8-byte Reload - mulxq %r13, %rcx, %rax - movq %rax, -104(%rsp) ## 8-byte Spill - adcq %rbx, %rcx - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %r13, %rbx, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - adcq %r12, %rbx - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %r13, %rbp, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - adcq %r9, %rbp - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %r13, %rax, %r9 - adcq %r10, %rax - movq %rdi, %rdx - mulxq %r13, %r10, %rdi - adcq %rsi, %r10 - movq %r15, %rdx - mulxq %r13, %r13, %rdx - adcq %r11, %r13 - sbbq %r12, %r12 - andl $1, %r12d - addq -72(%rsp), %rcx ## 8-byte Folded Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - adcq -88(%rsp), %rax ## 8-byte Folded Reload - adcq %r9, %r10 - movq -48(%rsp), %rsi ## 8-byte Reload - movq %r14, 8(%rsi) - adcq %rdi, %r13 - adcq %rdx, %r12 - movq (%r8), %rsi - movq %rsi, -88(%rsp) ## 8-byte Spill - movq 8(%r8), %r11 - movq %r11, -104(%rsp) ## 8-byte Spill - movq -64(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdi - movq %rsi, %rdx - mulxq %rdi, %r9, %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - addq %rcx, %r9 - movq %r11, %rdx - mulxq %rdi, %r14, %rcx - movq %rcx, -16(%rsp) ## 8-byte Spill - adcq %rbx, %r14 - movq 16(%r8), %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - mulxq %rdi, %rsi, %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - adcq %rbp, %rsi - movq 24(%r8), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rdi, %rbp, %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - adcq %rax, %rbp - movq 32(%r8), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rdi, %r11, %rax - movq %rax, -40(%rsp) ## 8-byte Spill - adcq %r10, %r11 - movq 40(%r8), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - mulxq %rdi, %r15, %rax - adcq %r13, %r15 - movq 48(%r8), %r13 - movq %r13, %rdx - mulxq %rdi, %rcx, %rdx - adcq %r12, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq -8(%rsp), %r14 ## 8-byte Folded Reload - adcq -16(%rsp), %rsi ## 8-byte Folded Reload - adcq -24(%rsp), %rbp ## 8-byte Folded Reload - adcq -32(%rsp), %r11 ## 8-byte Folded Reload - adcq -40(%rsp), %r15 ## 8-byte Folded Reload - adcq %rax, %rcx - adcq %rdx, %rbx - movq -48(%rsp), %rax ## 8-byte Reload - movq %r9, 16(%rax) - movq -64(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdi - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r9, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - addq %r14, %r9 - movq -104(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rax, %rdx - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq %rsi, %rax - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r14, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq %rbp, %r14 - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r10, %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - adcq %r11, %r10 - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbp, %rsi - adcq %r15, %rbp - movq -56(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r11, %r15 - adcq %rcx, %r11 - movq %r13, %rdx - mulxq %rdi, %r13, %rcx - adcq %rbx, %r13 - sbbq %r12, %r12 - andl $1, %r12d - addq -88(%rsp), %rax ## 8-byte Folded Reload - adcq -104(%rsp), %r14 ## 8-byte Folded Reload - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq -80(%rsp), %rbp ## 8-byte Folded Reload - adcq %rsi, %r11 - movq -48(%rsp), %rdi ## 8-byte Reload - movq %r9, 24(%rdi) - adcq %r15, %r13 - adcq %rcx, %r12 - movq (%r8), %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 8(%r8), %rbx - movq %rbx, -104(%rsp) ## 8-byte Spill - movq -64(%rsp), %rcx ## 8-byte Reload - movq 32(%rcx), %rcx - mulxq %rcx, %rsi, %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - addq %rax, %rsi - movq %rbx, %rdx - mulxq %rcx, %r9, %rax - movq %rax, -24(%rsp) ## 8-byte Spill - adcq %r14, %r9 - movq 16(%r8), %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - mulxq %rcx, %rax, %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - adcq %r10, %rax - movq 24(%r8), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rcx, %r15, %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - adcq %rbp, %r15 - movq 32(%r8), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rcx, %r10, %rbp - adcq %r11, %r10 - movq 40(%r8), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - mulxq %rcx, %r11, %rbx - adcq %r13, %r11 - movq 48(%r8), %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - mulxq %rcx, %r14, %rcx - adcq %r12, %r14 - sbbq %r12, %r12 - andl $1, %r12d - addq -16(%rsp), %r9 ## 8-byte Folded Reload - adcq -24(%rsp), %rax ## 8-byte Folded Reload - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq -40(%rsp), %r10 ## 8-byte Folded Reload - adcq %rbp, %r11 - adcq %rbx, %r14 - adcq %rcx, %r12 - movq %rsi, 32(%rdi) - movq -64(%rsp), %rsi ## 8-byte Reload - movq 40(%rsi), %rdi - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r13, %rcx - movq %rcx, -88(%rsp) ## 8-byte Spill - addq %r9, %r13 - movq -104(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rcx, %rdx - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq %rax, %rcx - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rax, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq %r15, %rax - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbx, %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - adcq %r10, %rbx - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %rbp, %r15 - adcq %r11, %rbp - movq -56(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r9, %r11 - adcq %r14, %r9 - movq -8(%rsp), %rdx ## 8-byte Reload - mulxq %rdi, %r10, %rdx - adcq %r12, %r10 - sbbq %rdi, %rdi - andl $1, %edi - addq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -104(%rsp), %rax ## 8-byte Folded Reload - adcq -96(%rsp), %rbx ## 8-byte Folded Reload - adcq -80(%rsp), %rbp ## 8-byte Folded Reload - adcq %r15, %r9 - movq -48(%rsp), %r14 ## 8-byte Reload - movq %r13, 40(%r14) - adcq %r11, %r10 - adcq %rdx, %rdi - movq 48(%rsi), %rdx - mulxq (%r8), %r11, %rsi - movq %rsi, -64(%rsp) ## 8-byte Spill - addq %rcx, %r11 - mulxq 8(%r8), %rsi, %r15 - adcq %rax, %rsi - mulxq 16(%r8), %rcx, %rax - movq %rax, -104(%rsp) ## 8-byte Spill - adcq %rbx, %rcx - mulxq 24(%r8), %rbx, %r12 - adcq %rbp, %rbx - mulxq 32(%r8), %rbp, %r13 - adcq %r9, %rbp - mulxq 40(%r8), %rax, %r9 - adcq %r10, %rax - mulxq 48(%r8), %rdx, %r8 - adcq %rdi, %rdx - sbbq %r10, %r10 - andl $1, %r10d - addq -64(%rsp), %rsi ## 8-byte Folded Reload - adcq %r15, %rcx - movq %r11, 48(%r14) - movq %rsi, 56(%r14) - movq %rcx, 64(%r14) - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq %r12, %rbp - movq %rbp, 80(%r14) - adcq %r13, %rax - movq %rax, 88(%r14) - adcq %r9, %rdx - movq %rdx, 96(%r14) - adcq %r8, %r10 - movq %r10, 104(%r14) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre7Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre7Lbmi2: ## @mcl_fpDbl_sqrPre7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -40(%rsp) ## 8-byte Spill - movq 16(%rsi), %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq (%rsi), %rcx - movq 8(%rsi), %rax - mulxq %rcx, %r8, %r10 - movq 24(%rsi), %rbx - movq %rbx, -96(%rsp) ## 8-byte Spill - movq %rax, %rdx - mulxq %rcx, %r12, %rbp - movq %rbp, -48(%rsp) ## 8-byte Spill - movq %rcx, %rdx - mulxq %rcx, %rdx, %rdi - movq %rdx, -80(%rsp) ## 8-byte Spill - addq %r12, %rdi - adcq %rbp, %r8 - movq %rbx, %rdx - mulxq %rcx, %rbp, %r9 - adcq %r10, %rbp - movq 32(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rcx, %r11, %r14 - adcq %r9, %r11 - movq 40(%rsi), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - mulxq %rcx, %r10, %r15 - adcq %r14, %r10 - movq 48(%rsi), %r14 - movq %r14, %rdx - mulxq %rcx, %rcx, %r13 - adcq %r15, %rcx - movq -40(%rsp), %rdx ## 8-byte Reload - movq -80(%rsp), %rbx ## 8-byte Reload - movq %rbx, (%rdx) - adcq $0, %r13 - addq %r12, %rdi - movq %rax, %rdx - mulxq %rax, %r12, %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - adcq %r8, %r12 - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r8, %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - adcq %rbp, %r8 - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r9, %rbp - adcq %r11, %r9 - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r15, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq %r10, %r15 - movq -56(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r11, %rbx - adcq %rcx, %r11 - movq %r14, %rdx - mulxq %rax, %r14, %rax - adcq %r13, %r14 - sbbq %r13, %r13 - andl $1, %r13d - addq -48(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - adcq -88(%rsp), %r9 ## 8-byte Folded Reload - adcq %rbp, %r15 - movq -40(%rsp), %rcx ## 8-byte Reload - movq %rdi, 8(%rcx) - adcq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq %rbx, %r14 - adcq %rax, %r13 - movq (%rsi), %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 8(%rsi), %rcx - movq %rcx, -88(%rsp) ## 8-byte Spill - movq 16(%rsi), %rbx - mulxq %rbx, %rax, %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - addq %r12, %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rcx, %rdx - mulxq %rbx, %r10, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq %r8, %r10 - movq %rbx, %rdx - mulxq %rbx, %r12, %rax - movq %rax, -24(%rsp) ## 8-byte Spill - adcq %r9, %r12 - movq 24(%rsi), %rax - movq %rax, %rdx - mulxq %rbx, %r8, %rdi - movq %rdi, -56(%rsp) ## 8-byte Spill - adcq %r8, %r15 - movq 32(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rbx, %rcx, %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - adcq %r11, %rcx - movq 40(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rbx, %rbp, %r11 - adcq %r14, %rbp - movq 48(%rsi), %r14 - movq %r14, %rdx - mulxq %rbx, %r9, %rdx - adcq %r13, %r9 - sbbq %rbx, %rbx - andl $1, %ebx - addq -64(%rsp), %r10 ## 8-byte Folded Reload - adcq -16(%rsp), %r12 ## 8-byte Folded Reload - adcq -24(%rsp), %r15 ## 8-byte Folded Reload - adcq %rdi, %rcx - adcq -32(%rsp), %rbp ## 8-byte Folded Reload - adcq %r11, %r9 - adcq %rdx, %rbx - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rdi, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - addq %r10, %rdi - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r11, %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - adcq %r12, %r11 - adcq %r8, %r15 - movq %rax, %rdx - mulxq %rax, %r8, %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - adcq %rcx, %r8 - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r13, %rcx - movq %rcx, -72(%rsp) ## 8-byte Spill - adcq %rbp, %r13 - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r12, %rbp - adcq %r9, %r12 - movq %r14, %rdx - mulxq %rax, %rcx, %rax - adcq %rbx, %rcx - sbbq %r10, %r10 - andl $1, %r10d - addq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - adcq -56(%rsp), %r8 ## 8-byte Folded Reload - adcq -64(%rsp), %r13 ## 8-byte Folded Reload - movq -40(%rsp), %rdx ## 8-byte Reload - movq -48(%rsp), %rbx ## 8-byte Reload - movq %rbx, 16(%rdx) - movq %rdi, 24(%rdx) - adcq -72(%rsp), %r12 ## 8-byte Folded Reload - adcq %rbp, %rcx - adcq %rax, %r10 - movq (%rsi), %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -88(%rsp) ## 8-byte Spill - movq 32(%rsi), %rbx - mulxq %rbx, %rax, %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - addq %r11, %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rdi, %rdx - mulxq %rbx, %r9, %rax - movq %rax, -32(%rsp) ## 8-byte Spill - adcq %r15, %r9 - movq 16(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rbx, %r15, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq %r8, %r15 - movq 24(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rbx, %r8, %rbp - adcq %r13, %r8 - movq %rbx, %rdx - mulxq %rbx, %r13, %r14 - adcq %r12, %r13 - movq 40(%rsi), %rax - movq %rax, %rdx - mulxq %rbx, %rdx, %rdi - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rdi, -56(%rsp) ## 8-byte Spill - adcq %rdx, %rcx - movq 48(%rsi), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - mulxq %rbx, %r11, %rdx - adcq %r10, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -24(%rsp), %r9 ## 8-byte Folded Reload - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq -8(%rsp), %r8 ## 8-byte Folded Reload - adcq %rbp, %r13 - adcq %r14, %rcx - adcq %rdi, %r11 - adcq %rdx, %r12 - movq -96(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r14, %rdi - addq %r9, %r14 - movq -88(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rbx, %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - adcq %r15, %rbx - movq -72(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rbp, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq %r8, %rbp - movq -80(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %r10, %r15 - adcq %r13, %r10 - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - movq %rax, %rdx - mulxq %rax, %r9, %r13 - adcq %r11, %r9 - movq -64(%rsp), %rdx ## 8-byte Reload - mulxq %rax, %rax, %r11 - adcq %r12, %rax - sbbq %r8, %r8 - andl $1, %r8d - addq %rdi, %rbx - adcq -88(%rsp), %rbp ## 8-byte Folded Reload - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq %r15, %rcx - movq -40(%rsp), %rdi ## 8-byte Reload - movq -48(%rsp), %rdx ## 8-byte Reload - movq %rdx, 32(%rdi) - movq %r14, 40(%rdi) - adcq -56(%rsp), %r9 ## 8-byte Folded Reload - adcq %r13, %rax - adcq %r11, %r8 - movq 48(%rsi), %rdx - mulxq (%rsi), %r12, %r11 - addq %rbx, %r12 - mulxq 8(%rsi), %rbx, %r14 - adcq %rbp, %rbx - mulxq 16(%rsi), %rbp, %r15 - adcq %r10, %rbp - mulxq 24(%rsi), %rdi, %r10 - adcq %rcx, %rdi - mulxq 32(%rsi), %rcx, %r13 - adcq %r9, %rcx - mulxq 40(%rsi), %rsi, %r9 - adcq %rax, %rsi - mulxq %rdx, %rdx, %rax - adcq %r8, %rdx - sbbq %r8, %r8 - andl $1, %r8d - addq %r11, %rbx - adcq %r14, %rbp - movq -40(%rsp), %r11 ## 8-byte Reload - movq %r12, 48(%r11) - movq %rbx, 56(%r11) - movq %rbp, 64(%r11) - adcq %r15, %rdi - movq %rdi, 72(%r11) - adcq %r10, %rcx - movq %rcx, 80(%r11) - adcq %r13, %rsi - movq %rsi, 88(%r11) - adcq %r9, %rdx - movq %rdx, 96(%r11) - adcq %rax, %r8 - movq %r8, 104(%r11) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont7Lbmi2: ## @mcl_fp_mont7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $56, %rsp - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rdi, 48(%rsp) ## 8-byte Spill - movq 48(%rsi), %rdi - movq %rdi, -64(%rsp) ## 8-byte Spill - movq (%rdx), %rax - movq %rdi, %rdx - mulxq %rax, %rdx, %r13 - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 40(%rsi), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - mulxq %rax, %rdx, %r8 - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 32(%rsi), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - mulxq %rax, %r10, %rdi - movq 24(%rsi), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - mulxq %rax, %r14, %rbp - movq 16(%rsi), %rdx - movq %rdx, 32(%rsp) ## 8-byte Spill - mulxq %rax, %r12, %r15 - movq (%rsi), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %r11 - movq %rbx, %rdx - mulxq %rax, %rdx, %r9 - movq %rdx, -96(%rsp) ## 8-byte Spill - addq %rsi, %r9 - adcq %r12, %r11 - adcq %r14, %r15 - adcq %r10, %rbp - movq %rbp, -112(%rsp) ## 8-byte Spill - adcq -48(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -104(%rsp) ## 8-byte Spill - adcq -40(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -128(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, -120(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - imulq %rax, %rdx - movq 32(%rcx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulxq %rax, %rbx, %r13 - movq 16(%rcx), %rsi - movq %rsi, -48(%rsp) ## 8-byte Spill - mulxq %rsi, %r14, %rbp - movq 8(%rcx), %rsi - movq %rsi, 8(%rsp) ## 8-byte Spill - mulxq %rsi, %rsi, %rax - movq (%rcx), %rdi - movq %rdi, (%rsp) ## 8-byte Spill - mulxq %rdi, %r8, %r12 - addq %rsi, %r12 - adcq %r14, %rax - movq %rax, %rdi - movq 24(%rcx), %rsi - movq %rsi, -8(%rsp) ## 8-byte Spill - mulxq %rsi, %r10, %r14 - adcq %rbp, %r10 - adcq %rbx, %r14 - movq 40(%rcx), %rsi - movq %rsi, -16(%rsp) ## 8-byte Spill - mulxq %rsi, %rbp, %rsi - adcq %r13, %rbp - movq 48(%rcx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %rbx - adcq %rsi, %rax - adcq $0, %rbx - addq -96(%rsp), %r8 ## 8-byte Folded Reload - adcq %r9, %r12 - adcq %r11, %rdi - movq %rdi, -96(%rsp) ## 8-byte Spill - adcq %r15, %r10 - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq -104(%rsp), %rbp ## 8-byte Folded Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - movq -56(%rsp), %rcx ## 8-byte Reload - movq 8(%rcx), %rdx - mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload - movq %rdi, -104(%rsp) ## 8-byte Spill - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rdi, %rcx ## 8-byte Folded Reload - movq %rdi, -88(%rsp) ## 8-byte Spill - movq %rcx, -128(%rsp) ## 8-byte Spill - mulxq 16(%rsp), %r9, %r8 ## 8-byte Folded Reload - mulxq 24(%rsp), %rdi, %r11 ## 8-byte Folded Reload - movq %rdi, -112(%rsp) ## 8-byte Spill - addq %r9, %r11 - mulxq 32(%rsp), %rcx, %r9 ## 8-byte Folded Reload - adcq %r8, %rcx - movq %rcx, %rdi - mulxq -32(%rsp), %r13, %rcx ## 8-byte Folded Reload - adcq %r9, %r13 - mulxq -80(%rsp), %r8, %r15 ## 8-byte Folded Reload - adcq %rcx, %r8 - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq -120(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - movq -112(%rsp), %r9 ## 8-byte Reload - addq %r12, %r9 - movq %r9, -112(%rsp) ## 8-byte Spill - movq %r11, %r12 - adcq -96(%rsp), %r12 ## 8-byte Folded Reload - adcq %r10, %rdi - movq %rdi, -88(%rsp) ## 8-byte Spill - adcq %r14, %r13 - adcq %rbp, %r8 - adcq %rax, %r15 - adcq %rbx, %rdx - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq %rsi, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -96(%rsp) ## 8-byte Spill - movq %r9, %rdx - imulq 40(%rsp), %rdx ## 8-byte Folded Reload - mulxq -24(%rsp), %r10, %rax ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq 8(%rsp), %rdi, %rbx ## 8-byte Folded Reload - mulxq (%rsp), %r14, %r9 ## 8-byte Folded Reload - addq %rdi, %r9 - mulxq -48(%rsp), %rbp, %r11 ## 8-byte Folded Reload - adcq %rbx, %rbp - adcq %rcx, %r11 - mulxq -40(%rsp), %rbx, %rsi ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload - adcq %rsi, %rax - adcq %r10, %rcx - movq -104(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq %r12, %r9 - adcq -88(%rsp), %rbp ## 8-byte Folded Reload - adcq %r13, %r11 - adcq %r8, %rbx - adcq %r15, %rax - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rdx ## 8-byte Reload - movq 16(%rdx), %rdx - mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload - movq %rdi, -112(%rsp) ## 8-byte Spill - movq %rsi, -120(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rdi, %rsi ## 8-byte Folded Reload - movq %rdi, -88(%rsp) ## 8-byte Spill - movq %rsi, -128(%rsp) ## 8-byte Spill - mulxq 32(%rsp), %rdi, %r10 ## 8-byte Folded Reload - mulxq 16(%rsp), %rsi, %r13 ## 8-byte Folded Reload - mulxq 24(%rsp), %r8, %r15 ## 8-byte Folded Reload - addq %rsi, %r15 - adcq %rdi, %r13 - mulxq -32(%rsp), %r12, %rsi ## 8-byte Folded Reload - adcq %r10, %r12 - mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload - adcq %rsi, %r10 - adcq -88(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rsi ## 8-byte Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r9, %r8 - movq %r8, -112(%rsp) ## 8-byte Spill - adcq %rbp, %r15 - adcq %r11, %r13 - adcq %rbx, %r12 - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, %rbx - movq %r8, %rdx - imulq 40(%rsp), %rdx ## 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, -96(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload - mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload - addq %rbp, %r8 - mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r9 - mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload - adcq %rax, %rsi - mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload - adcq %rdi, %rax - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq %r15, %r8 - adcq %r13, %rbp - adcq %r12, %r9 - adcq %r10, %rsi - adcq %r14, %rax - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq $0, %rbx - movq %rbx, -128(%rsp) ## 8-byte Spill - movq -56(%rsp), %rdx ## 8-byte Reload - movq 24(%rdx), %rdx - mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload - movq %rbx, -96(%rsp) ## 8-byte Spill - movq %rdi, -120(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rdi, %r13 ## 8-byte Folded Reload - movq %rdi, -88(%rsp) ## 8-byte Spill - mulxq 32(%rsp), %r10, %r11 ## 8-byte Folded Reload - mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload - mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - addq %rdi, %r12 - adcq %r10, %r15 - mulxq -32(%rsp), %rbx, %rdi ## 8-byte Folded Reload - adcq %r11, %rbx - mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload - adcq %rdi, %r10 - adcq -88(%rsp), %r14 ## 8-byte Folded Reload - adcq -96(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - movq -112(%rsp), %rdi ## 8-byte Reload - addq %r8, %rdi - movq %rdi, -112(%rsp) ## 8-byte Spill - adcq %rbp, %r12 - adcq %r9, %r15 - adcq %rsi, %rbx - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -128(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rdx - imulq 40(%rsp), %rdx ## 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, -96(%rsp) ## 8-byte Spill - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload - mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload - addq %rbp, %r8 - mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r9 - mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload - adcq %rax, %rsi - mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload - adcq %rdi, %rax - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq %r12, %r8 - adcq %r15, %rbp - adcq %rbx, %r9 - adcq %r10, %rsi - adcq %r14, %rax - adcq %r13, %rcx - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, -104(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rdx ## 8-byte Reload - movq 32(%rdx), %rdx - mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - movq %rdi, -120(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rdi, %r11 ## 8-byte Folded Reload - movq %rdi, -96(%rsp) ## 8-byte Spill - mulxq 32(%rsp), %r10, %r13 ## 8-byte Folded Reload - mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload - mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload - addq %rdi, %r12 - adcq %r10, %r15 - mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload - adcq %r13, %r10 - mulxq -80(%rsp), %r13, %r14 ## 8-byte Folded Reload - adcq %rdi, %r13 - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r8, %rbx - movq %rbx, -96(%rsp) ## 8-byte Spill - adcq %rbp, %r12 - adcq %r9, %r15 - adcq %rsi, %r10 - adcq %rax, %r13 - adcq %rcx, %r14 - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, -88(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rbx, %rdx - imulq 40(%rsp), %rdx ## 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload - mulxq (%rsp), %r9, %r11 ## 8-byte Folded Reload - addq %rbp, %r11 - mulxq -48(%rsp), %rbp, %r8 ## 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rcx, %r8 - mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload - adcq %rax, %rsi - mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload - adcq %rdi, %rax - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -96(%rsp), %r9 ## 8-byte Folded Reload - adcq %r12, %r11 - adcq %r15, %rbp - adcq %r10, %r8 - adcq %r13, %rsi - adcq %r14, %rax - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq $0, -112(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rdx ## 8-byte Reload - movq 40(%rdx), %rdx - mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload - movq %rbx, -96(%rsp) ## 8-byte Spill - movq %rdi, -120(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rbx, %rdi ## 8-byte Folded Reload - movq %rbx, -88(%rsp) ## 8-byte Spill - movq %rdi, -128(%rsp) ## 8-byte Spill - mulxq 32(%rsp), %rbx, %r10 ## 8-byte Folded Reload - mulxq 16(%rsp), %rdi, %r13 ## 8-byte Folded Reload - mulxq 24(%rsp), %r9, %r12 ## 8-byte Folded Reload - addq %rdi, %r12 - adcq %rbx, %r13 - mulxq -32(%rsp), %r15, %rdi ## 8-byte Folded Reload - adcq %r10, %r15 - mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload - adcq %rdi, %r10 - adcq -88(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rdi ## 8-byte Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r11, %r9 - movq %r9, -96(%rsp) ## 8-byte Spill - adcq %rbp, %r12 - adcq %r8, %r13 - adcq %rsi, %r15 - adcq %rax, %r10 - adcq %rcx, %r14 - adcq -104(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r9, %rdx - imulq 40(%rsp), %rdx ## 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, -88(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq 8(%rsp), %rdi, %rsi ## 8-byte Folded Reload - mulxq (%rsp), %r11, %rbx ## 8-byte Folded Reload - addq %rdi, %rbx - mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload - adcq %rsi, %r8 - adcq %rcx, %r9 - mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload - adcq %rax, %rdi - mulxq -16(%rsp), %rcx, %rsi ## 8-byte Folded Reload - adcq %rbp, %rcx - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq %r12, %rbx - adcq %r13, %r8 - adcq %r15, %r9 - adcq %r10, %rdi - adcq %r14, %rcx - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - adcq -120(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - movq -112(%rsp), %r12 ## 8-byte Reload - adcq $0, %r12 - movq -56(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - mulxq -64(%rsp), %rbp, %rax ## 8-byte Folded Reload - movq %rbp, -120(%rsp) ## 8-byte Spill - movq %rax, -56(%rsp) ## 8-byte Spill - mulxq -72(%rsp), %rbp, %rax ## 8-byte Folded Reload - movq %rbp, -128(%rsp) ## 8-byte Spill - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq -80(%rsp), %rbp, %rax ## 8-byte Folded Reload - movq %rbp, -112(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r13, %rbp ## 8-byte Folded Reload - mulxq 32(%rsp), %r14, %r15 ## 8-byte Folded Reload - mulxq 16(%rsp), %rax, %r11 ## 8-byte Folded Reload - mulxq 24(%rsp), %rdx, %r10 ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - addq %rax, %r10 - adcq %r14, %r11 - adcq %r13, %r15 - adcq -112(%rsp), %rbp ## 8-byte Folded Reload - movq -72(%rsp), %r14 ## 8-byte Reload - adcq -128(%rsp), %r14 ## 8-byte Folded Reload - movq -64(%rsp), %rdx ## 8-byte Reload - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq -56(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - movq -80(%rsp), %r13 ## 8-byte Reload - addq %rbx, %r13 - movq %r13, -80(%rsp) ## 8-byte Spill - adcq %r8, %r10 - adcq %r9, %r11 - adcq %rdi, %r15 - adcq %rcx, %rbp - movq %rbp, -32(%rsp) ## 8-byte Spill - adcq %rsi, %r14 - movq %r14, -72(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -64(%rsp) ## 8-byte Spill - adcq %r12, %rax - movq %rax, -56(%rsp) ## 8-byte Spill - sbbq %rdi, %rdi - movq 40(%rsp), %rdx ## 8-byte Reload - imulq %r13, %rdx - mulxq -8(%rsp), %rbp, %rsi ## 8-byte Folded Reload - mulxq 8(%rsp), %rcx, %rbx ## 8-byte Folded Reload - mulxq (%rsp), %r13, %rax ## 8-byte Folded Reload - addq %rcx, %rax - mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload - adcq %rbx, %rcx - adcq %rbp, %r9 - mulxq -40(%rsp), %rbp, %rbx ## 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -16(%rsp), %rsi, %r14 ## 8-byte Folded Reload - adcq %rbx, %rsi - mulxq -24(%rsp), %rdx, %rbx ## 8-byte Folded Reload - adcq %r14, %rdx - adcq $0, %rbx - andl $1, %edi - addq -80(%rsp), %r13 ## 8-byte Folded Reload - adcq %r10, %rax - adcq %r11, %rcx - adcq %r15, %r9 - adcq -32(%rsp), %rbp ## 8-byte Folded Reload - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - adcq -64(%rsp), %rdx ## 8-byte Folded Reload - adcq -56(%rsp), %rbx ## 8-byte Folded Reload - adcq $0, %rdi - movq %rax, %r8 - subq (%rsp), %r8 ## 8-byte Folded Reload - movq %rcx, %r10 - sbbq 8(%rsp), %r10 ## 8-byte Folded Reload - movq %r9, %r11 - sbbq -48(%rsp), %r11 ## 8-byte Folded Reload - movq %rbp, %r14 - sbbq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %rsi, %r15 - sbbq -40(%rsp), %r15 ## 8-byte Folded Reload - movq %rdx, %r12 - sbbq -16(%rsp), %r12 ## 8-byte Folded Reload - movq %rbx, %r13 - sbbq -24(%rsp), %r13 ## 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %rbx, %r13 - testb %dil, %dil - cmovneq %rax, %r8 - movq 48(%rsp), %rax ## 8-byte Reload - movq %r8, (%rax) - cmovneq %rcx, %r10 - movq %r10, 8(%rax) - cmovneq %r9, %r11 - movq %r11, 16(%rax) - cmovneq %rbp, %r14 - movq %r14, 24(%rax) - cmovneq %rsi, %r15 - movq %r15, 32(%rax) - cmovneq %rdx, %r12 - movq %r12, 40(%rax) - movq %r13, 48(%rax) - addq $56, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF7Lbmi2: ## @mcl_fp_montNF7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $40, %rsp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rdi, 32(%rsp) ## 8-byte Spill - movq (%rsi), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - movq 8(%rsi), %rdi - movq %rdi, -96(%rsp) ## 8-byte Spill - movq (%rdx), %rbp - movq %rdi, %rdx - mulxq %rbp, %rdi, %rbx - movq %rax, %rdx - mulxq %rbp, %r8, %r14 - movq 16(%rsi), %rdx - movq %rdx, -104(%rsp) ## 8-byte Spill - addq %rdi, %r14 - mulxq %rbp, %r15, %rax - adcq %rbx, %r15 - movq 24(%rsi), %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - mulxq %rbp, %rbx, %rdi - adcq %rax, %rbx - movq 32(%rsi), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - mulxq %rbp, %r11, %rax - adcq %rdi, %r11 - movq 40(%rsi), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - mulxq %rbp, %r9, %rdi - adcq %rax, %r9 - movq 48(%rsi), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - mulxq %rbp, %r10, %rbp - adcq %rdi, %r10 - adcq $0, %rbp - movq -8(%rcx), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq %r8, %rdx - imulq %rax, %rdx - movq (%rcx), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %rsi - movq %rsi, -128(%rsp) ## 8-byte Spill - addq %r8, %rax - movq 8(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %rax, %r8, %rsi - movq %rsi, -120(%rsp) ## 8-byte Spill - adcq %r14, %r8 - movq 16(%rcx), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - mulxq %rax, %rsi, %r13 - adcq %r15, %rsi - movq 24(%rcx), %rax - movq %rax, (%rsp) ## 8-byte Spill - mulxq %rax, %r12, %rax - adcq %rbx, %r12 - movq 32(%rcx), %rdi - movq %rdi, -8(%rsp) ## 8-byte Spill - mulxq %rdi, %r15, %rbx - adcq %r11, %r15 - movq 40(%rcx), %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - mulxq %rdi, %r14, %rdi - adcq %r9, %r14 - movq 48(%rcx), %rcx - movq %rcx, -56(%rsp) ## 8-byte Spill - mulxq %rcx, %r11, %rcx - adcq %r10, %r11 - adcq $0, %rbp - addq -128(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -128(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -120(%rsp) ## 8-byte Spill - adcq %r13, %r12 - adcq %rax, %r15 - adcq %rbx, %r14 - adcq %rdi, %r11 - adcq %rcx, %rbp - movq -88(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload - mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload - addq %rcx, %rax - mulxq -104(%rsp), %rcx, %rdi ## 8-byte Folded Reload - adcq %rsi, %rcx - mulxq -24(%rsp), %rsi, %r8 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload - adcq %r8, %rdi - mulxq -40(%rsp), %r8, %rbx ## 8-byte Folded Reload - adcq %r9, %r8 - mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload - adcq %rbx, %r9 - adcq $0, %r10 - addq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %rax ## 8-byte Folded Reload - adcq %r12, %rcx - adcq %r15, %rsi - adcq %r14, %rdi - adcq %r11, %r8 - adcq %rbp, %r9 - adcq $0, %r10 - movq %r13, %rdx - imulq -80(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %rbp, %rbx ## 8-byte Folded Reload - movq %rbx, -128(%rsp) ## 8-byte Spill - addq %r13, %rbp - mulxq -72(%rsp), %rbp, %r14 ## 8-byte Folded Reload - adcq %rax, %rbp - mulxq 8(%rsp), %rax, %r11 ## 8-byte Folded Reload - adcq %rcx, %rax - mulxq (%rsp), %r12, %rcx ## 8-byte Folded Reload - adcq %rsi, %r12 - mulxq -8(%rsp), %r15, %rbx ## 8-byte Folded Reload - adcq %rdi, %r15 - mulxq -16(%rsp), %r13, %rdi ## 8-byte Folded Reload - adcq %r8, %r13 - mulxq -56(%rsp), %rsi, %rdx ## 8-byte Folded Reload - adcq %r9, %rsi - adcq $0, %r10 - addq -128(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, -128(%rsp) ## 8-byte Spill - adcq %r14, %rax - movq %rax, -120(%rsp) ## 8-byte Spill - adcq %r11, %r12 - adcq %rcx, %r15 - adcq %rbx, %r13 - adcq %rdi, %rsi - adcq %rdx, %r10 - movq -88(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - mulxq -96(%rsp), %rcx, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r14, %rdi ## 8-byte Folded Reload - addq %rcx, %rdi - mulxq -104(%rsp), %rbp, %rcx ## 8-byte Folded Reload - adcq %rax, %rbp - mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload - adcq %rcx, %rbx - mulxq -32(%rsp), %rax, %r9 ## 8-byte Folded Reload - adcq %r8, %rax - mulxq -40(%rsp), %r8, %rcx ## 8-byte Folded Reload - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq %r9, %r8 - mulxq -48(%rsp), %r9, %r11 ## 8-byte Folded Reload - adcq 16(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r11 - addq -128(%rsp), %r14 ## 8-byte Folded Reload - adcq -120(%rsp), %rdi ## 8-byte Folded Reload - adcq %r12, %rbp - adcq %r15, %rbx - adcq %r13, %rax - adcq %rsi, %r8 - adcq %r10, %r9 - adcq $0, %r11 - movq %r14, %rdx - imulq -80(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - addq %r14, %rsi - mulxq -72(%rsp), %rsi, %r13 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq 8(%rsp), %rdi, %r15 ## 8-byte Folded Reload - adcq %rbp, %rdi - mulxq (%rsp), %rcx, %rbp ## 8-byte Folded Reload - adcq %rbx, %rcx - mulxq -8(%rsp), %r14, %rbx ## 8-byte Folded Reload - adcq %rax, %r14 - mulxq -16(%rsp), %r12, %rax ## 8-byte Folded Reload - adcq %r8, %r12 - mulxq -56(%rsp), %r10, %rdx ## 8-byte Folded Reload - adcq %r9, %r10 - adcq $0, %r11 - addq -128(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq %r13, %rdi - movq %rdi, -120(%rsp) ## 8-byte Spill - adcq %r15, %rcx - adcq %rbp, %r14 - adcq %rbx, %r12 - adcq %rax, %r10 - adcq %rdx, %r11 - movq -88(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r15, %rbp ## 8-byte Folded Reload - addq %rsi, %rbp - mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload - adcq %rax, %rdi - mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload - adcq %r9, %r8 - mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r13 - addq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq -120(%rsp), %rbp ## 8-byte Folded Reload - adcq %rcx, %rbx - adcq %r14, %rsi - adcq %r12, %rdi - adcq %r10, %r8 - adcq %r11, %r9 - adcq $0, %r13 - movq %r15, %rdx - imulq -80(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rax, -128(%rsp) ## 8-byte Spill - addq %r15, %rcx - mulxq -72(%rsp), %rcx, %r11 ## 8-byte Folded Reload - adcq %rbp, %rcx - mulxq 8(%rsp), %rbp, %r10 ## 8-byte Folded Reload - adcq %rbx, %rbp - mulxq (%rsp), %rax, %rbx ## 8-byte Folded Reload - adcq %rsi, %rax - mulxq -8(%rsp), %r14, %rsi ## 8-byte Folded Reload - adcq %rdi, %r14 - mulxq -16(%rsp), %r15, %rdi ## 8-byte Folded Reload - adcq %r8, %r15 - mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload - adcq %r9, %r12 - adcq $0, %r13 - addq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq %r11, %rbp - movq %rbp, -128(%rsp) ## 8-byte Spill - adcq %r10, %rax - movq %rax, -120(%rsp) ## 8-byte Spill - adcq %rbx, %r14 - adcq %rsi, %r15 - adcq %rdi, %r12 - adcq %rdx, %r13 - movq -88(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - mulxq -96(%rsp), %rsi, %rdi ## 8-byte Folded Reload - mulxq -112(%rsp), %r11, %r8 ## 8-byte Folded Reload - addq %rsi, %r8 - mulxq -104(%rsp), %rbx, %rsi ## 8-byte Folded Reload - adcq %rdi, %rbx - mulxq -24(%rsp), %rbp, %rdi ## 8-byte Folded Reload - adcq %rsi, %rbp - mulxq -32(%rsp), %rsi, %r9 ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -40(%rsp), %rdi, %rax ## 8-byte Folded Reload - adcq %r9, %rdi - mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r10 - addq %rcx, %r11 - adcq -128(%rsp), %r8 ## 8-byte Folded Reload - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - adcq %r14, %rbp - adcq %r15, %rsi - adcq %r12, %rdi - adcq %r13, %r9 - adcq $0, %r10 - movq %r11, %rdx - imulq -80(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rax, -128(%rsp) ## 8-byte Spill - addq %r11, %rcx - mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload - adcq %r8, %rcx - mulxq 8(%rsp), %rax, %r8 ## 8-byte Folded Reload - adcq %rbx, %rax - mulxq (%rsp), %rbx, %r11 ## 8-byte Folded Reload - adcq %rbp, %rbx - mulxq -8(%rsp), %r14, %rbp ## 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -16(%rsp), %r15, %rsi ## 8-byte Folded Reload - adcq %rdi, %r15 - mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload - adcq %r9, %r12 - adcq $0, %r10 - addq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq %r13, %rax - movq %rax, -128(%rsp) ## 8-byte Spill - adcq %r8, %rbx - movq %rbx, -120(%rsp) ## 8-byte Spill - adcq %r11, %r14 - adcq %rbp, %r15 - adcq %rsi, %r12 - adcq %rdx, %r10 - movq -88(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload - mulxq -112(%rsp), %r11, %rbp ## 8-byte Folded Reload - addq %rsi, %rbp - mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload - adcq %rax, %rbx - mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload - adcq %rdi, %rsi - mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload - adcq %rax, %rdi - mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload - adcq %r9, %r8 - mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload - adcq %rax, %r9 - adcq $0, %r13 - addq %rcx, %r11 - adcq -128(%rsp), %rbp ## 8-byte Folded Reload - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - adcq %r14, %rsi - adcq %r15, %rdi - adcq %r12, %r8 - adcq %r10, %r9 - adcq $0, %r13 - movq %r11, %rdx - imulq -80(%rsp), %rdx ## 8-byte Folded Reload - mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - addq %r11, %rcx - mulxq -72(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rax, 16(%rsp) ## 8-byte Spill - adcq %rbp, %rcx - mulxq 8(%rsp), %rax, %rbp ## 8-byte Folded Reload - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq %rbx, %rax - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq (%rsp), %r14, %rbp ## 8-byte Folded Reload - adcq %rsi, %r14 - mulxq -8(%rsp), %r11, %r12 ## 8-byte Folded Reload - adcq %rdi, %r11 - mulxq -16(%rsp), %r10, %rbx ## 8-byte Folded Reload - adcq %r8, %r10 - mulxq -56(%rsp), %rdi, %rax ## 8-byte Folded Reload - adcq %r9, %rdi - adcq $0, %r13 - addq -120(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -120(%rsp) ## 8-byte Spill - movq -128(%rsp), %rcx ## 8-byte Reload - adcq 16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq 24(%rsp), %r14 ## 8-byte Folded Reload - adcq %rbp, %r11 - adcq %r12, %r10 - adcq %rbx, %rdi - adcq %rax, %r13 - movq -88(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload - mulxq -112(%rsp), %r8, %rax ## 8-byte Folded Reload - addq %rbp, %rax - mulxq -104(%rsp), %rbx, %rcx ## 8-byte Folded Reload - adcq %r9, %rbx - mulxq -24(%rsp), %rbp, %r9 ## 8-byte Folded Reload - adcq %rcx, %rbp - mulxq -32(%rsp), %rcx, %r12 ## 8-byte Folded Reload - adcq %r9, %rcx - mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - adcq %r12, %r15 - mulxq -48(%rsp), %r12, %r9 ## 8-byte Folded Reload - adcq -112(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, %r9 - addq -120(%rsp), %r8 ## 8-byte Folded Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - adcq %r14, %rbx - adcq %r11, %rbp - adcq %r10, %rcx - adcq %rdi, %r15 - adcq %r13, %r12 - adcq $0, %r9 - movq -80(%rsp), %rdx ## 8-byte Reload - imulq %r8, %rdx - mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload - movq %rsi, -80(%rsp) ## 8-byte Spill - addq %r8, %rdi - mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - adcq %rax, %r8 - movq 8(%rsp), %r11 ## 8-byte Reload - mulxq %r11, %rsi, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - adcq %rbx, %rsi - movq (%rsp), %r14 ## 8-byte Reload - mulxq %r14, %rdi, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - adcq %rbp, %rdi - movq -8(%rsp), %rbp ## 8-byte Reload - mulxq %rbp, %rax, %rbx - movq %rbx, -104(%rsp) ## 8-byte Spill - adcq %rcx, %rax - movq -16(%rsp), %rbx ## 8-byte Reload - mulxq %rbx, %rcx, %r13 - adcq %r15, %rcx - mulxq -56(%rsp), %rdx, %r15 ## 8-byte Folded Reload - adcq %r12, %rdx - adcq $0, %r9 - addq -80(%rsp), %r8 ## 8-byte Folded Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -88(%rsp), %rdi ## 8-byte Folded Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - adcq %r13, %rdx - adcq %r15, %r9 - movq %r8, %r13 - subq -64(%rsp), %r13 ## 8-byte Folded Reload - movq %rsi, %r12 - sbbq -72(%rsp), %r12 ## 8-byte Folded Reload - movq %rdi, %r10 - sbbq %r11, %r10 - movq %rax, %r11 - sbbq %r14, %r11 - movq %rcx, %r14 - sbbq %rbp, %r14 - movq %rdx, %r15 - sbbq %rbx, %r15 - movq %r9, %rbp - sbbq -56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r8, %r13 - movq 32(%rsp), %rbx ## 8-byte Reload - movq %r13, (%rbx) - cmovsq %rsi, %r12 - movq %r12, 8(%rbx) - cmovsq %rdi, %r10 - movq %r10, 16(%rbx) - cmovsq %rax, %r11 - movq %r11, 24(%rbx) - cmovsq %rcx, %r14 - movq %r14, 32(%rbx) - cmovsq %rdx, %r15 - movq %r15, 40(%rbx) - cmovsq %r9, %rbp - movq %rbp, 48(%rbx) - addq $40, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed7Lbmi2: ## @mcl_fp_montRed7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $56, %rsp - movq %rdx, %rcx - movq %rdi, 48(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - movq (%rsi), %r13 - movq %r13, %rdx - imulq %rax, %rdx - movq 48(%rcx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulxq %rax, %rdi, %rax - movq %rdi, -64(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq 40(%rcx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulxq %rax, %r10, %rax - movq %rax, -128(%rsp) ## 8-byte Spill - movq 32(%rcx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulxq %rax, %r14, %r8 - movq 24(%rcx), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - mulxq %rax, %r12, %r15 - movq 16(%rcx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulxq %rax, %rbp, %rbx - movq (%rcx), %rdi - movq %rdi, -48(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - mulxq %rax, %rax, %r11 - mulxq %rdi, %rdx, %r9 - addq %rax, %r9 - adcq %rbp, %r11 - adcq %r12, %rbx - adcq %r14, %r15 - adcq %r10, %r8 - movq -128(%rsp), %rcx ## 8-byte Reload - adcq -64(%rsp), %rcx ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r13, %rdx - adcq 8(%rsi), %r9 - adcq 16(%rsi), %r11 - adcq 24(%rsi), %rbx - adcq 32(%rsi), %r15 - adcq 40(%rsi), %r8 - movq %r8, -112(%rsp) ## 8-byte Spill - adcq 48(%rsi), %rcx - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq 56(%rsi), %rax - movq %rax, -120(%rsp) ## 8-byte Spill - movq 104(%rsi), %r8 - movq 96(%rsi), %rdx - movq 88(%rsi), %rdi - movq 80(%rsi), %rbp - movq 72(%rsi), %rax - movq 64(%rsi), %rcx - adcq $0, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - adcq $0, %rbp - movq %rbp, -56(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -80(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, -64(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, (%rsp) ## 8-byte Spill - movq %r9, %rdx - imulq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -16(%rsp), %r13 ## 8-byte Reload - mulxq %r13, %rcx, %rax - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload - movq %rcx, 40(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r14, %r12 ## 8-byte Folded Reload - mulxq 16(%rsp), %r8, %rax ## 8-byte Folded Reload - mulxq -40(%rsp), %rsi, %r10 ## 8-byte Folded Reload - mulxq -8(%rsp), %rcx, %rdi ## 8-byte Folded Reload - mulxq -48(%rsp), %rdx, %rbp ## 8-byte Folded Reload - addq %rcx, %rbp - adcq %rsi, %rdi - adcq %r8, %r10 - adcq %r14, %rax - movq %rax, %rcx - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - movq -104(%rsp), %rsi ## 8-byte Reload - adcq 32(%rsp), %rsi ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r9, %rdx - adcq %r11, %rbp - adcq %rbx, %rdi - adcq %r15, %r10 - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - adcq -128(%rsp), %r12 ## 8-byte Folded Reload - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - adcq $0, -56(%rsp) ## 8-byte Folded Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, (%rsp) ## 8-byte Folded Spill - movq %rbp, %rdx - imulq -72(%rsp), %rdx ## 8-byte Folded Reload - mulxq %r13, %rcx, %rax - movq %rcx, 8(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq -24(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %rcx, %rax - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %r11, %r13 ## 8-byte Folded Reload - mulxq 16(%rsp), %r9, %r14 ## 8-byte Folded Reload - mulxq -40(%rsp), %rsi, %r8 ## 8-byte Folded Reload - mulxq -8(%rsp), %rax, %rbx ## 8-byte Folded Reload - mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload - addq %rax, %rcx - adcq %rsi, %rbx - adcq %r9, %r8 - adcq %r11, %r14 - adcq 32(%rsp), %r13 ## 8-byte Folded Reload - movq -128(%rsp), %rsi ## 8-byte Reload - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbp, %rdx - adcq %rdi, %rcx - adcq %r10, %rbx - adcq -112(%rsp), %r8 ## 8-byte Folded Reload - adcq %r12, %r14 - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -96(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq $0, -56(%rsp) ## 8-byte Folded Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, (%rsp) ## 8-byte Folded Spill - movq %rcx, %rdx - imulq -72(%rsp), %rdx ## 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload - movq %rsi, -88(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - mulxq %r15, %rsi, %rax - movq %rsi, -112(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq -32(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %rax, %r12 - movq %rax, 8(%rsp) ## 8-byte Spill - mulxq 16(%rsp), %r9, %rbp ## 8-byte Folded Reload - mulxq -40(%rsp), %rdi, %r10 ## 8-byte Folded Reload - mulxq -8(%rsp), %rsi, %r11 ## 8-byte Folded Reload - mulxq -48(%rsp), %rdx, %rax ## 8-byte Folded Reload - addq %rsi, %rax - adcq %rdi, %r11 - adcq %r9, %r10 - adcq 8(%rsp), %rbp ## 8-byte Folded Reload - adcq -112(%rsp), %r12 ## 8-byte Folded Reload - movq -104(%rsp), %rdi ## 8-byte Reload - adcq -88(%rsp), %rdi ## 8-byte Folded Reload - movq -96(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rcx, %rdx - adcq %rbx, %rax - adcq %r8, %r11 - adcq %r14, %r10 - adcq %r13, %rbp - adcq -128(%rsp), %r12 ## 8-byte Folded Reload - adcq -120(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -104(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -96(%rsp) ## 8-byte Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, (%rsp) ## 8-byte Folded Spill - movq %rax, %rdx - imulq -72(%rsp), %rdx ## 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - movq %rcx, -56(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rsi, %rcx ## 8-byte Folded Reload - movq %rsi, -88(%rsp) ## 8-byte Spill - movq %rcx, -120(%rsp) ## 8-byte Spill - mulxq %r15, %rcx, %r13 - movq %rcx, -112(%rsp) ## 8-byte Spill - movq 16(%rsp), %r15 ## 8-byte Reload - mulxq %r15, %r9, %r14 - mulxq -40(%rsp), %rdi, %rbx ## 8-byte Folded Reload - mulxq -8(%rsp), %rsi, %r8 ## 8-byte Folded Reload - mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload - addq %rsi, %rcx - adcq %rdi, %r8 - adcq %r9, %rbx - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rdi ## 8-byte Reload - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - movq -56(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - addq %rax, %rdx - adcq %r11, %rcx - adcq %r10, %r8 - adcq %rbp, %rbx - adcq %r12, %r14 - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -120(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -56(%rsp) ## 8-byte Spill - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, (%rsp) ## 8-byte Folded Spill - movq %rcx, %rdx - imulq -72(%rsp), %rdx ## 8-byte Folded Reload - mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload - movq %rsi, -96(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - movq %rax, -128(%rsp) ## 8-byte Spill - mulxq -32(%rsp), %rax, %r12 ## 8-byte Folded Reload - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r15, %r11 - mulxq %r11, %rax, %r15 - movq %rax, -112(%rsp) ## 8-byte Spill - mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload - movq -8(%rsp), %r9 ## 8-byte Reload - mulxq %r9, %rax, %r10 - mulxq -48(%rsp), %rdx, %rsi ## 8-byte Folded Reload - addq %rax, %rsi - adcq %rdi, %r10 - adcq -112(%rsp), %rbp ## 8-byte Folded Reload - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - adcq -104(%rsp), %r12 ## 8-byte Folded Reload - movq -128(%rsp), %rdi ## 8-byte Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq -80(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rcx, %rdx - adcq %r8, %rsi - adcq %rbx, %r10 - adcq %r14, %rbp - adcq %r13, %r15 - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - adcq -56(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -80(%rsp) ## 8-byte Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, (%rsp) ## 8-byte Folded Spill - movq -72(%rsp), %rdx ## 8-byte Reload - imulq %rsi, %rdx - mulxq %r11, %rcx, %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulxq %r9, %rbx, %rdi - mulxq -48(%rsp), %r11, %r14 ## 8-byte Folded Reload - addq %rbx, %r14 - mulxq -40(%rsp), %rbx, %r13 ## 8-byte Folded Reload - adcq %rdi, %rbx - adcq %rcx, %r13 - mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload - adcq -72(%rsp), %r8 ## 8-byte Folded Reload - mulxq -24(%rsp), %rcx, %r9 ## 8-byte Folded Reload - adcq %rdi, %rcx - mulxq -16(%rsp), %rdx, %rdi ## 8-byte Folded Reload - adcq %r9, %rdx - adcq $0, %rdi - addq %rsi, %r11 - adcq %r10, %r14 - adcq %rbp, %rbx - adcq %r15, %r13 - adcq %r12, %r8 - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq -80(%rsp), %rdx ## 8-byte Folded Reload - adcq -64(%rsp), %rdi ## 8-byte Folded Reload - movq (%rsp), %rax ## 8-byte Reload - adcq $0, %rax - movq %r14, %rsi - subq -48(%rsp), %rsi ## 8-byte Folded Reload - movq %rbx, %rbp - sbbq -8(%rsp), %rbp ## 8-byte Folded Reload - movq %r13, %r9 - sbbq -40(%rsp), %r9 ## 8-byte Folded Reload - movq %r8, %r10 - sbbq 16(%rsp), %r10 ## 8-byte Folded Reload - movq %rcx, %r11 - sbbq -32(%rsp), %r11 ## 8-byte Folded Reload - movq %rdx, %r15 - sbbq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %rdi, %r12 - sbbq -16(%rsp), %r12 ## 8-byte Folded Reload - sbbq $0, %rax - andl $1, %eax - cmovneq %rdi, %r12 - testb %al, %al - cmovneq %r14, %rsi - movq 48(%rsp), %rdi ## 8-byte Reload - movq %rsi, (%rdi) - cmovneq %rbx, %rbp - movq %rbp, 8(%rdi) - cmovneq %r13, %r9 - movq %r9, 16(%rdi) - cmovneq %r8, %r10 - movq %r10, 24(%rdi) - cmovneq %rcx, %r11 - movq %r11, 32(%rdi) - cmovneq %rdx, %r15 - movq %r15, 40(%rdi) - movq %r12, 48(%rdi) - addq $56, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre7Lbmi2: ## @mcl_fp_addPre7Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r14 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r12 - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %r12, 16(%rdi) - adcq %r11, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbx - movq %rbx, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subPre7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre7Lbmi2: ## @mcl_fp_subPre7Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r10 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 32(%rsi), %rdx - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rdx - movq %rdx, 32(%rdi) - sbbq %r9, %r15 - movq %r15, 40(%rdi) - sbbq %r8, %r10 - movq %r10, 48(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_shr1_7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_7Lbmi2: ## @mcl_fp_shr1_7Lbmi2 -## BB#0: - movq 48(%rsi), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrdq $1, %r10, %rax - movq %rax, 24(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 32(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 40(%rdi) - shrq %r8 - movq %r8, 48(%rdi) - retq - - .globl _mcl_fp_add7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add7Lbmi2: ## @mcl_fp_add7Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq 24(%rdx), %r13 - movq 16(%rdx), %r10 - movq (%rdx), %r11 - movq 8(%rdx), %rdx - addq (%rsi), %r11 - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r10 - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %r13, %rax - movq %rax, 24(%rdi) - adcq %r12, %rbx - movq %rbx, 32(%rdi) - adcq %r15, %r9 - movq %r9, 40(%rdi) - adcq %r14, %r8 - movq %r8, 48(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %r11 - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %rax - sbbq 32(%rcx), %rbx - sbbq 40(%rcx), %r9 - sbbq 48(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB104_2 -## BB#1: ## %nocarry - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %rax, 24(%rdi) - movq %rbx, 32(%rdi) - movq %r9, 40(%rdi) - movq %r8, 48(%rdi) -LBB104_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF7Lbmi2: ## @mcl_fp_addNF7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r9 - movq 40(%rdx), %rbp - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r14 - movq (%rdx), %r12 - movq 8(%rdx), %r15 - addq (%rsi), %r12 - adcq 8(%rsi), %r15 - adcq 16(%rsi), %r14 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %rbp - movq %rbp, -8(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r9 - movq %r12, %rsi - subq (%rcx), %rsi - movq %r15, %rdx - sbbq 8(%rcx), %rdx - movq %r14, %rax - sbbq 16(%rcx), %rax - movq %r11, %rbx - sbbq 24(%rcx), %rbx - movq %r10, %r13 - sbbq 32(%rcx), %r13 - sbbq 40(%rcx), %rbp - movq %r9, %r8 - sbbq 48(%rcx), %r8 - movq %r8, %rcx - sarq $63, %rcx - cmovsq %r12, %rsi - movq %rsi, (%rdi) - cmovsq %r15, %rdx - movq %rdx, 8(%rdi) - cmovsq %r14, %rax - movq %rax, 16(%rdi) - cmovsq %r11, %rbx - movq %rbx, 24(%rdi) - cmovsq %r10, %r13 - movq %r13, 32(%rdi) - cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 40(%rdi) - cmovsq %r9, %r8 - movq %r8, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub7Lbmi2: ## @mcl_fp_sub7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - movq 16(%rsi), %r13 - sbbq 16(%rdx), %r13 - movq 32(%rsi), %r10 - movq 24(%rsi), %rsi - sbbq 24(%rdx), %rsi - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r13, 16(%rdi) - movq %rsi, 24(%rdi) - sbbq %r12, %r10 - movq %r10, 32(%rdi) - sbbq %r15, %r9 - movq %r9, 40(%rdi) - sbbq %r14, %r8 - movq %r8, 48(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB106_2 -## BB#1: ## %carry - movq 48(%rcx), %r14 - movq 40(%rcx), %r15 - movq 32(%rcx), %r12 - movq 24(%rcx), %rbx - movq 8(%rcx), %rdx - movq 16(%rcx), %rbp - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r13, %rbp - movq %rbp, 16(%rdi) - adcq %rsi, %rbx - movq %rbx, 24(%rdi) - adcq %r10, %r12 - movq %r12, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) -LBB106_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_subNF7Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF7Lbmi2: ## @mcl_fp_subNF7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 48(%rsi), %r11 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %r14 - movdqu (%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] - movd %xmm6, %rcx - movd %xmm2, %r15 - movd %xmm5, %r9 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r12 - pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] - movd %xmm2, %r10 - movd %xmm1, %r13 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %rax - pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] - movd %xmm0, %rbx - movd %xmm3, %rsi - subq %rbx, %rsi - movd %xmm1, %rbx - sbbq %rax, %rbx - movd %xmm4, %rbp - sbbq %r13, %rbp - sbbq %r12, %r10 - sbbq %r15, %r9 - sbbq %r14, %rcx - movq %rcx, -8(%rsp) ## 8-byte Spill - sbbq 48(%rdx), %r11 - movq %r11, %rax - sarq $63, %rax - movq %rax, %rdx - shldq $1, %r11, %rdx - andq (%r8), %rdx - movq 48(%r8), %r14 - andq %rax, %r14 - movq 40(%r8), %r15 - andq %rax, %r15 - movq 32(%r8), %r12 - andq %rax, %r12 - movq 24(%r8), %r13 - andq %rax, %r13 - movq 16(%r8), %rcx - andq %rax, %rcx - andq 8(%r8), %rax - addq %rsi, %rdx - adcq %rbx, %rax - movq %rdx, (%rdi) - movq %rax, 8(%rdi) - adcq %rbp, %rcx - movq %rcx, 16(%rdi) - adcq %r10, %r13 - movq %r13, 24(%rdi) - adcq %r9, %r12 - movq %r12, 32(%rdi) - adcq -8(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 40(%rdi) - adcq %r11, %r14 - movq %r14, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add7Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add7Lbmi2: ## @mcl_fpDbl_add7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 96(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 88(%rdx), %r11 - movq 80(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r12 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r9 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r12 - movq 72(%rdx), %r13 - movq 64(%rdx), %rbp - movq %rax, (%rdi) - movq 56(%rdx), %r10 - movq %rbx, 8(%rdi) - movq 48(%rdx), %rcx - movq 40(%rdx), %rdx - movq %r9, 16(%rdi) - movq 104(%rsi), %r9 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %rdx, %rbx - movq 96(%rsi), %r15 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - adcq %rcx, %rdx - movq 88(%rsi), %rax - movq %rbx, 40(%rdi) - movq 56(%rsi), %rcx - adcq %r10, %rcx - movq 80(%rsi), %r12 - movq %rdx, 48(%rdi) - movq 72(%rsi), %rdx - movq 64(%rsi), %rsi - adcq %rbp, %rsi - adcq %r13, %rdx - adcq %r14, %r12 - adcq %r11, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, -24(%rsp) ## 8-byte Spill - adcq -8(%rsp), %r9 ## 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq %rcx, %rbx - subq (%r8), %rbx - movq %rsi, %r10 - sbbq 8(%r8), %r10 - movq %rdx, %r11 - sbbq 16(%r8), %r11 - movq %r12, %r14 - sbbq 24(%r8), %r14 - movq -16(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r8), %r13 - sbbq 40(%r8), %r15 - movq %r9, %rax - sbbq 48(%r8), %rax - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rcx, %rbx - movq %rbx, 56(%rdi) - testb %bpl, %bpl - cmovneq %rsi, %r10 - movq %r10, 64(%rdi) - cmovneq %rdx, %r11 - movq %r11, 72(%rdi) - cmovneq %r12, %r14 - movq %r14, 80(%rdi) - cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 88(%rdi) - cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 96(%rdi) - cmovneq %r9, %rax - movq %rax, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub7Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub7Lbmi2: ## @mcl_fpDbl_sub7Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 96(%rdx), %r10 - movq 88(%rdx), %r14 - movq 16(%rsi), %rax - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %ecx, %ecx - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %rax - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 80(%rdx), %r13 - movq 72(%rdx), %rbp - movq %r15, (%rdi) - movq 64(%rdx), %r9 - movq %r11, 8(%rdi) - movq 56(%rdx), %r15 - movq %rax, 16(%rdi) - movq 48(%rdx), %r11 - movq 40(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 104(%rsi), %rax - movq %r12, 32(%rdi) - movq 48(%rsi), %r12 - sbbq %r11, %r12 - movq 96(%rsi), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r15, %rdx - movq 88(%rsi), %r15 - movq %r12, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r9, %rbx - movq 80(%rsi), %r12 - movq 72(%rsi), %r9 - sbbq %rbp, %r9 - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq -8(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -8(%rsp) ## 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r8), %r10 - cmoveq %rcx, %r10 - testb %bpl, %bpl - movq 16(%r8), %rbp - cmoveq %rcx, %rbp - movq 8(%r8), %rsi - cmoveq %rcx, %rsi - movq 48(%r8), %r14 - cmoveq %rcx, %r14 - movq 40(%r8), %r13 - cmoveq %rcx, %r13 - movq 32(%r8), %rax - cmoveq %rcx, %rax - cmovneq 24(%r8), %rcx - addq %rdx, %r10 - adcq %rbx, %rsi - movq %r10, 56(%rdi) - movq %rsi, 64(%rdi) - adcq %r9, %rbp - movq %rbp, 72(%rdi) - adcq %r12, %rcx - movq %rcx, 80(%rdi) - adcq %r15, %rax - movq %rax, 88(%rdi) - adcq %r11, %r13 - movq %r13, 96(%rdi) - adcq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .p2align 4, 0x90 -l_mulPv512x64: ## @mulPv512x64 -## BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - mulxq 8(%rsi), %rcx, %r8 - addq %rax, %rcx - movq %rcx, 8(%rdi) - mulxq 16(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 16(%rdi) - mulxq 24(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 24(%rdi) - mulxq 32(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 32(%rdi) - mulxq 40(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 40(%rdi) - mulxq 48(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 48(%rdi) - mulxq 56(%rsi), %rax, %rdx - adcq %rcx, %rax - movq %rax, 56(%rdi) - adcq $0, %rdx - movq %rdx, 64(%rdi) - movq %rdi, %rax - retq - - .globl _mcl_fp_mulUnitPre8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre8Lbmi2: ## @mcl_fp_mulUnitPre8Lbmi2 -## BB#0: - pushq %rbx - subq $80, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq l_mulPv512x64 - movq 72(%rsp), %r8 - movq 64(%rsp), %r9 - movq 56(%rsp), %r10 - movq 48(%rsp), %r11 - movq 40(%rsp), %rdi - movq 32(%rsp), %rax - movq 24(%rsp), %rcx - movq 8(%rsp), %rdx - movq 16(%rsp), %rsi - movq %rdx, (%rbx) - movq %rsi, 8(%rbx) - movq %rcx, 16(%rbx) - movq %rax, 24(%rbx) - movq %rdi, 32(%rbx) - movq %r11, 40(%rbx) - movq %r10, 48(%rbx) - movq %r9, 56(%rbx) - movq %r8, 64(%rbx) - addq $80, %rsp - popq %rbx - retq - - .globl _mcl_fpDbl_mulPre8Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre8Lbmi2: ## @mcl_fpDbl_mulPre8Lbmi2 -## BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rdx, %r15 - movq %rsi, %rbx - movq %rdi, %r14 - callq _mcl_fpDbl_mulPre4Lbmi2 - leaq 64(%r14), %rdi - leaq 32(%rbx), %rsi - leaq 32(%r15), %rdx - callq _mcl_fpDbl_mulPre4Lbmi2 - movq 56(%rbx), %r10 - movq 48(%rbx), %rdx - movq (%rbx), %rsi - movq 8(%rbx), %rdi - addq 32(%rbx), %rsi - adcq 40(%rbx), %rdi - adcq 16(%rbx), %rdx - adcq 24(%rbx), %r10 - pushfq - popq %r8 - xorl %r9d, %r9d - movq 56(%r15), %rcx - movq 48(%r15), %r13 - movq (%r15), %r12 - movq 8(%r15), %rbx - addq 32(%r15), %r12 - adcq 40(%r15), %rbx - adcq 16(%r15), %r13 - adcq 24(%r15), %rcx - movl $0, %eax - cmovbq %r10, %rax - movq %rax, -88(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rdx, %rax - movq %rax, -80(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rdi, %rax - movq %rax, -72(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rsi, %rax - movq %rax, -64(%rbp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rsi, -168(%rbp) - movq %rdi, -160(%rbp) - movq %rdx, -152(%rbp) - movq %r10, -144(%rbp) - movq %r12, -136(%rbp) - movq %rbx, -128(%rbp) - movq %r13, -120(%rbp) - movq %rcx, -112(%rbp) - pushq %r8 - popfq - cmovaeq %r9, %rcx - movq %rcx, -48(%rbp) ## 8-byte Spill - cmovaeq %r9, %r13 - cmovaeq %r9, %rbx - cmovaeq %r9, %r12 - sbbq %rax, %rax - movq %rax, -56(%rbp) ## 8-byte Spill - leaq -232(%rbp), %rdi - leaq -168(%rbp), %rsi - leaq -136(%rbp), %rdx - callq _mcl_fpDbl_mulPre4Lbmi2 - addq -64(%rbp), %r12 ## 8-byte Folded Reload - adcq -72(%rbp), %rbx ## 8-byte Folded Reload - adcq -80(%rbp), %r13 ## 8-byte Folded Reload - movq -48(%rbp), %r10 ## 8-byte Reload - adcq -88(%rbp), %r10 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq -56(%rbp), %rdx ## 8-byte Reload - andl %edx, %r15d - andl $1, %r15d - addq -200(%rbp), %r12 - adcq -192(%rbp), %rbx - adcq -184(%rbp), %r13 - adcq -176(%rbp), %r10 - adcq %rax, %r15 - movq -208(%rbp), %rax - movq -216(%rbp), %rcx - movq -232(%rbp), %rsi - movq -224(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %rdi - movq %rdi, -80(%rbp) ## 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -88(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - sbbq %r8, %rbx - movq 48(%r14), %rdi - movq %rdi, -72(%rbp) ## 8-byte Spill - sbbq %rdi, %r13 - movq 56(%r14), %rdi - movq %rdi, -64(%rbp) ## 8-byte Spill - sbbq %rdi, %r10 - sbbq $0, %r15 - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -56(%rbp) ## 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -48(%rbp) ## 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -104(%rbp) ## 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -96(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - movq 104(%r14), %rdi - sbbq %rdi, %rbx - movq 112(%r14), %r8 - sbbq %r8, %r13 - movq 120(%r14), %r9 - sbbq %r9, %r10 - sbbq $0, %r15 - addq -80(%rbp), %rsi ## 8-byte Folded Reload - adcq -88(%rbp), %rdx ## 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -72(%rbp), %rcx ## 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -64(%rbp), %rax ## 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r12 - movq %rax, 56(%r14) - movq %r12, 64(%r14) - adcq -56(%rbp), %rbx ## 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq -48(%rbp), %r13 ## 8-byte Folded Reload - movq %r13, 80(%r14) - adcq -104(%rbp), %r10 ## 8-byte Folded Reload - movq %r10, 88(%r14) - adcq -96(%rbp), %r15 ## 8-byte Folded Reload - movq %r15, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre8Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre8Lbmi2: ## @mcl_fpDbl_sqrPre8Lbmi2 -## BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rsi, %rbx - movq %rdi, %r14 - movq %rbx, %rdx - callq _mcl_fpDbl_mulPre4Lbmi2 - leaq 64(%r14), %rdi - leaq 32(%rbx), %rsi - movq %rsi, %rdx - callq _mcl_fpDbl_mulPre4Lbmi2 - movq 56(%rbx), %r15 - movq 48(%rbx), %rax - movq (%rbx), %rcx - movq 8(%rbx), %rdx - addq 32(%rbx), %rcx - adcq 40(%rbx), %rdx - adcq 16(%rbx), %rax - adcq 24(%rbx), %r15 - pushfq - popq %r8 - pushfq - popq %r9 - pushfq - popq %r10 - pushfq - popq %rdi - pushfq - popq %rbx - sbbq %rsi, %rsi - movq %rsi, -56(%rbp) ## 8-byte Spill - leaq (%rcx,%rcx), %rsi - xorl %r11d, %r11d - pushq %rbx - popfq - cmovaeq %r11, %rsi - movq %rsi, -48(%rbp) ## 8-byte Spill - movq %rdx, %r13 - shldq $1, %rcx, %r13 - pushq %rdi - popfq - cmovaeq %r11, %r13 - movq %rax, %r12 - shldq $1, %rdx, %r12 - pushq %r10 - popfq - cmovaeq %r11, %r12 - movq %r15, %rbx - movq %rcx, -168(%rbp) - movq %rdx, -160(%rbp) - movq %rax, -152(%rbp) - movq %r15, -144(%rbp) - movq %rcx, -136(%rbp) - movq %rdx, -128(%rbp) - movq %rax, -120(%rbp) - movq %r15, -112(%rbp) - shldq $1, %rax, %r15 - pushq %r9 - popfq - cmovaeq %r11, %r15 - shrq $63, %rbx - pushq %r8 - popfq - cmovaeq %r11, %rbx - leaq -232(%rbp), %rdi - leaq -168(%rbp), %rsi - leaq -136(%rbp), %rdx - callq _mcl_fpDbl_mulPre4Lbmi2 - movq -56(%rbp), %rax ## 8-byte Reload - andl $1, %eax - movq -48(%rbp), %r10 ## 8-byte Reload - addq -200(%rbp), %r10 - adcq -192(%rbp), %r13 - adcq -184(%rbp), %r12 - adcq -176(%rbp), %r15 - adcq %rbx, %rax - movq %rax, %rbx - movq -208(%rbp), %rax - movq -216(%rbp), %rcx - movq -232(%rbp), %rsi - movq -224(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %r9 - movq %r9, -56(%rbp) ## 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -48(%rbp) ## 8-byte Spill - sbbq %r9, %r10 - sbbq %r8, %r13 - movq 48(%r14), %rdi - movq %rdi, -104(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - movq 56(%r14), %rdi - movq %rdi, -96(%rbp) ## 8-byte Spill - sbbq %rdi, %r15 - sbbq $0, %rbx - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -88(%rbp) ## 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -80(%rbp) ## 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -72(%rbp) ## 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -64(%rbp) ## 8-byte Spill - sbbq %rdi, %r10 - movq 104(%r14), %rdi - sbbq %rdi, %r13 - movq 112(%r14), %r8 - sbbq %r8, %r12 - movq 120(%r14), %r9 - sbbq %r9, %r15 - sbbq $0, %rbx - addq -56(%rbp), %rsi ## 8-byte Folded Reload - adcq -48(%rbp), %rdx ## 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -104(%rbp), %rcx ## 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -96(%rbp), %rax ## 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r10 - movq %rax, 56(%r14) - movq %r10, 64(%r14) - adcq -88(%rbp), %r13 ## 8-byte Folded Reload - movq %r13, 72(%r14) - adcq -80(%rbp), %r12 ## 8-byte Folded Reload - movq %r12, 80(%r14) - adcq -72(%rbp), %r15 ## 8-byte Folded Reload - movq %r15, 88(%r14) - movq %rbx, %rax - adcq -64(%rbp), %rax ## 8-byte Folded Reload - movq %rax, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont8Lbmi2: ## @mcl_fp_mont8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1256, %rsp ## imm = 0x4E8 - movq %rcx, %r13 - movq %rdx, 64(%rsp) ## 8-byte Spill - movq %rsi, 72(%rsp) ## 8-byte Spill - movq %rdi, 96(%rsp) ## 8-byte Spill - movq -8(%r13), %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - movq %r13, 56(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1184(%rsp), %rdi - callq l_mulPv512x64 - movq 1184(%rsp), %r15 - movq 1192(%rsp), %r14 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1248(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1240(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 1232(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 1216(%rsp), %r12 - movq 1208(%rsp), %rbx - movq 1200(%rsp), %rbp - leaq 1112(%rsp), %rdi - movq %r13, %rsi - callq l_mulPv512x64 - addq 1112(%rsp), %r15 - adcq 1120(%rsp), %r14 - adcq 1128(%rsp), %rbp - movq %rbp, 88(%rsp) ## 8-byte Spill - adcq 1136(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 1144(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 1152(%rsp), %r13 - movq (%rsp), %rbx ## 8-byte Reload - adcq 1160(%rsp), %rbx - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 1168(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1176(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1040(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 1040(%rsp), %r14 - movq 88(%rsp), %rax ## 8-byte Reload - adcq 1048(%rsp), %rax - movq %rax, 88(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 1056(%rsp), %rax - movq %rax, %r12 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 1064(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 1072(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 1080(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - adcq 1088(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1096(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 1104(%rsp), %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %r14, %rdx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 968(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 968(%rsp), %r14 - movq 88(%rsp), %r13 ## 8-byte Reload - adcq 976(%rsp), %r13 - adcq 984(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 992(%rsp), %r14 - movq 16(%rsp), %rbx ## 8-byte Reload - adcq 1000(%rsp), %rbx - movq (%rsp), %rax ## 8-byte Reload - adcq 1008(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 1016(%rsp), %rbp - movq %rbp, %r12 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 1024(%rsp), %rbp - movq 48(%rsp), %rax ## 8-byte Reload - adcq 1032(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 896(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rcx - addq 896(%rsp), %rcx - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 904(%rsp), %r13 - adcq 912(%rsp), %r14 - adcq 920(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 936(%rsp), %r12 - movq %r12, 40(%rsp) ## 8-byte Spill - adcq 944(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 952(%rsp), %r12 - adcq 960(%rsp), %r15 - sbbq %rbx, %rbx - movq %rcx, %rdx - movq %rcx, %rbp - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 824(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - addq 824(%rsp), %rbp - adcq 832(%rsp), %r13 - movq %r13, 32(%rsp) ## 8-byte Spill - adcq 840(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 848(%rsp), %r13 - movq (%rsp), %rbp ## 8-byte Reload - adcq 856(%rsp), %rbp - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 864(%rsp), %r14 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 872(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 880(%rsp), %r12 - adcq 888(%rsp), %r15 - adcq $0, %rbx - movq 64(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 752(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 32(%rsp), %rax ## 8-byte Reload - addq 752(%rsp), %rax - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 760(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 776(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 784(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 792(%rsp), %rbp - adcq 800(%rsp), %r12 - adcq 808(%rsp), %r15 - adcq 816(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 680(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 680(%rsp), %rbx - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 688(%rsp), %r14 - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 704(%rsp), %r13 - movq 40(%rsp), %rbx ## 8-byte Reload - adcq 712(%rsp), %rbx - adcq 720(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq %r12, %rbp - adcq 728(%rsp), %rbp - adcq 736(%rsp), %r15 - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 744(%rsp), %r12 - adcq $0, %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 608(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r14, %rax - addq 608(%rsp), %rax - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 616(%rsp), %r14 - adcq 624(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - adcq 632(%rsp), %rbx - movq %rbx, %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 640(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 648(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 656(%rsp), %r15 - adcq 664(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 672(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - sbbq %rbp, %rbp - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %rbp, %rax - andl $1, %eax - addq 536(%rsp), %rbx - adcq 544(%rsp), %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rbx ## 8-byte Reload - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r13 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 568(%rsp), %rbp - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 576(%rsp), %r12 - adcq 584(%rsp), %r15 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 592(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 600(%rsp), %r14 - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 464(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 16(%rsp), %rax ## 8-byte Reload - addq 464(%rsp), %rax - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - adcq 488(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 496(%rsp), %r12 - adcq 504(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - movq 32(%rsp), %r15 ## 8-byte Reload - adcq 512(%rsp), %r15 - adcq 520(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 528(%rsp), %r14 - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbp - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 392(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 408(%rsp), %rbp - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 416(%rsp), %rbx - adcq 424(%rsp), %r12 - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 432(%rsp), %r13 - adcq 440(%rsp), %r15 - movq %r15, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 448(%rsp), %r15 - adcq 456(%rsp), %r14 - adcq $0, %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 320(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq (%rsp), %rax ## 8-byte Reload - addq 320(%rsp), %rax - adcq 328(%rsp), %rbp - movq %rbp, 40(%rsp) ## 8-byte Spill - adcq 336(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq %r12, %rbp - adcq 344(%rsp), %rbp - adcq 352(%rsp), %r13 - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 360(%rsp), %r12 - adcq 368(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq 376(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 248(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 248(%rsp), %rbx - movq 40(%rsp), %rax ## 8-byte Reload - adcq 256(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r14 ## 8-byte Reload - adcq 264(%rsp), %r14 - adcq 272(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - movq %r13, %rbx - adcq 280(%rsp), %rbx - movq %r12, %rbp - adcq 288(%rsp), %rbp - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 296(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 312(%rsp), %r12 - adcq $0, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 176(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 40(%rsp), %rax ## 8-byte Reload - addq 176(%rsp), %rax - adcq 184(%rsp), %r14 - movq %r14, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 192(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq 200(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - adcq 208(%rsp), %rbp - adcq 216(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 224(%rsp), %r14 - adcq 232(%rsp), %r12 - adcq 240(%rsp), %r15 - sbbq %rbx, %rbx - movq 80(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %r13 - leaq 104(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - addq 104(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 112(%rsp), %rcx - movq 48(%rsp), %rdx ## 8-byte Reload - adcq 120(%rsp), %rdx - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 128(%rsp), %rsi - movq %rbp, %rdi - adcq 136(%rsp), %rdi - movq %rdi, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r8 ## 8-byte Reload - adcq 144(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq %r14, %r9 - adcq 152(%rsp), %r9 - movq %r9, (%rsp) ## 8-byte Spill - adcq 160(%rsp), %r12 - adcq 168(%rsp), %r15 - adcq $0, %rbx - movq %rcx, %rax - movq %rcx, %r11 - movq 56(%rsp), %rbp ## 8-byte Reload - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r14 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - movq %rsi, %r13 - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %r8, %rdi - sbbq 32(%rbp), %rdi - movq %r9, %r10 - sbbq 40(%rbp), %r10 - movq %r12, %r8 - sbbq 48(%rbp), %r8 - movq %r15, %r9 - sbbq 56(%rbp), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r15, %r9 - testb %bl, %bl - cmovneq %r11, %rax - movq 96(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovneq %r14, %rcx - movq %rcx, 8(%rbx) - cmovneq %r13, %rdx - movq %rdx, 16(%rbx) - cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovneq (%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovneq %r12, %r8 - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $1256, %rsp ## imm = 0x4E8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF8Lbmi2: ## @mcl_fp_montNF8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1240, %rsp ## imm = 0x4D8 - movq %rcx, 40(%rsp) ## 8-byte Spill - movq %rdx, 48(%rsp) ## 8-byte Spill - movq %rsi, 56(%rsp) ## 8-byte Spill - movq %rdi, 80(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1168(%rsp), %rdi - callq l_mulPv512x64 - movq 1168(%rsp), %r15 - movq 1176(%rsp), %r12 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1232(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1216(%rsp), %r13 - movq 1208(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 1200(%rsp), %r14 - movq 1192(%rsp), %rbp - movq 1184(%rsp), %rbx - leaq 1096(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 1096(%rsp), %r15 - adcq 1104(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 1112(%rsp), %rbx - adcq 1120(%rsp), %rbp - adcq 1128(%rsp), %r14 - movq %r14, %r12 - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 1136(%rsp), %r14 - adcq 1144(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 1152(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 1160(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1024(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 1088(%rsp), %r15 - movq 16(%rsp), %rax ## 8-byte Reload - addq 1024(%rsp), %rax - adcq 1032(%rsp), %rbx - movq %rbx, 72(%rsp) ## 8-byte Spill - movq %rbp, %rbx - adcq 1040(%rsp), %rbx - adcq 1048(%rsp), %r12 - adcq 1056(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq %r13, %rbp - adcq 1064(%rsp), %rbp - movq (%rsp), %rcx ## 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 1080(%rsp), %r14 - adcq $0, %r15 - movq %rax, %rdx - movq %rax, %r13 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 952(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 952(%rsp), %r13 - movq 72(%rsp), %rax ## 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 968(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq %r12, %rbx - adcq 976(%rsp), %rbx - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 984(%rsp), %r12 - adcq 992(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 1000(%rsp), %r13 - movq %r14, %rbp - adcq 1008(%rsp), %rbp - adcq 1016(%rsp), %r15 - movq 48(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 880(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 944(%rsp), %r14 - movq 72(%rsp), %rax ## 8-byte Reload - addq 880(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 888(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 896(%rsp), %rbx - adcq 904(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 920(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - adcq 928(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 936(%rsp), %r15 - adcq $0, %r14 - movq %rax, %rdx - movq %rax, %rbp - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 808(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 808(%rsp), %rbp - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 816(%rsp), %r13 - movq %rbx, %r12 - adcq 824(%rsp), %r12 - movq 8(%rsp), %rbx ## 8-byte Reload - adcq 832(%rsp), %rbx - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 840(%rsp), %rbp - movq (%rsp), %rax ## 8-byte Reload - adcq 848(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 856(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - adcq 864(%rsp), %r15 - adcq 872(%rsp), %r14 - movq 48(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 736(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 800(%rsp), %rax - movq %r13, %rcx - addq 736(%rsp), %rcx - adcq 744(%rsp), %r12 - movq %r12, 24(%rsp) ## 8-byte Spill - adcq 752(%rsp), %rbx - movq %rbx, 8(%rsp) ## 8-byte Spill - adcq 760(%rsp), %rbp - movq %rbp, %r13 - movq (%rsp), %rbp ## 8-byte Reload - adcq 768(%rsp), %rbp - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r14 - adcq $0, %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq %rcx, %rdx - movq %rcx, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 664(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 664(%rsp), %r12 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 672(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %rax ## 8-byte Reload - adcq 680(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 688(%rsp), %r13 - adcq 696(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 704(%rsp), %rbx - adcq 712(%rsp), %r15 - adcq 720(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 728(%rsp), %r12 - movq 48(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 592(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 656(%rsp), %rcx - movq 24(%rsp), %rax ## 8-byte Reload - addq 592(%rsp), %rax - movq 8(%rsp), %rbp ## 8-byte Reload - adcq 600(%rsp), %rbp - adcq 608(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 616(%rsp), %r13 - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - adcq 640(%rsp), %r14 - adcq 648(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 520(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 520(%rsp), %r12 - adcq 528(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq 24(%rsp), %r12 ## 8-byte Reload - adcq 536(%rsp), %r12 - movq %r13, %rbp - adcq 544(%rsp), %rbp - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r15 - adcq 568(%rsp), %r14 - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 576(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 448(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 512(%rsp), %rcx - movq 8(%rsp), %rax ## 8-byte Reload - addq 448(%rsp), %rax - adcq 456(%rsp), %r12 - movq %r12, 24(%rsp) ## 8-byte Spill - adcq 464(%rsp), %rbp - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r15 - adcq 488(%rsp), %r14 - adcq 496(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 504(%rsp), %r13 - adcq $0, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 376(%rsp), %r12 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 384(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - adcq 408(%rsp), %r15 - adcq 416(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 424(%rsp), %r12 - adcq 432(%rsp), %r13 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 440(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 304(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 368(%rsp), %rcx - movq 24(%rsp), %rax ## 8-byte Reload - addq 304(%rsp), %rax - adcq 312(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 320(%rsp), %rbx - adcq 328(%rsp), %r15 - adcq 336(%rsp), %r14 - adcq 344(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 352(%rsp), %r13 - movq 8(%rsp), %rbp ## 8-byte Reload - adcq 360(%rsp), %rbp - adcq $0, %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 232(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 232(%rsp), %r12 - movq (%rsp), %rax ## 8-byte Reload - adcq 240(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 248(%rsp), %rbx - adcq 256(%rsp), %r15 - adcq 264(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 272(%rsp), %r12 - adcq 280(%rsp), %r13 - adcq 288(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %rbp ## 8-byte Reload - adcq 296(%rsp), %rbp - movq 48(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 160(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 224(%rsp), %rcx - movq (%rsp), %rax ## 8-byte Reload - addq 160(%rsp), %rax - adcq 168(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 200(%rsp), %r13 - movq 8(%rsp), %rbx ## 8-byte Reload - adcq 208(%rsp), %rbx - adcq 216(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 64(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbp - leaq 88(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 88(%rsp), %rbp - movq 32(%rsp), %r11 ## 8-byte Reload - adcq 96(%rsp), %r11 - adcq 104(%rsp), %r15 - adcq 112(%rsp), %r14 - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 120(%rsp), %rsi - movq %rsi, 16(%rsp) ## 8-byte Spill - adcq 128(%rsp), %r13 - adcq 136(%rsp), %rbx - movq %rbx, 8(%rsp) ## 8-byte Spill - adcq 144(%rsp), %r12 - movq (%rsp), %r8 ## 8-byte Reload - adcq 152(%rsp), %r8 - movq %r11, %rax - movq 40(%rsp), %rbp ## 8-byte Reload - subq (%rbp), %rax - movq %r15, %rcx - sbbq 8(%rbp), %rcx - movq %r14, %rdx - sbbq 16(%rbp), %rdx - sbbq 24(%rbp), %rsi - movq %r13, %rdi - sbbq 32(%rbp), %rdi - movq %rbx, %r9 - sbbq 40(%rbp), %r9 - movq %r12, %r10 - sbbq 48(%rbp), %r10 - movq %rbp, %rbx - movq %r8, %rbp - sbbq 56(%rbx), %rbp - testq %rbp, %rbp - cmovsq %r11, %rax - movq 80(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovsq %r15, %rcx - movq %rcx, 8(%rbx) - cmovsq %r14, %rdx - movq %rdx, 16(%rbx) - cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq %r13, %rdi - movq %rdi, 32(%rbx) - cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 40(%rbx) - cmovsq %r12, %r10 - movq %r10, 48(%rbx) - cmovsq %r8, %rbp - movq %rbp, 56(%rbx) - addq $1240, %rsp ## imm = 0x4D8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed8Lbmi2: ## @mcl_fp_montRed8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $776, %rsp ## imm = 0x308 - movq %rdx, %rax - movq %rdi, 192(%rsp) ## 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 104(%rsp) ## 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %r15, %rdx - imulq %rcx, %rdx - movq 120(%rsi), %rcx - movq %rcx, 112(%rsp) ## 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 96(%rsp) ## 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 72(%rsi), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 64(%rsi), %r13 - movq 56(%rsi), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 48(%rsi), %r14 - movq 40(%rsi), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - movq 32(%rsi), %r12 - movq 24(%rsi), %rbx - movq 16(%rsi), %rbp - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 136(%rsp) ## 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 184(%rsp) ## 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 176(%rsp) ## 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 168(%rsp) ## 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 160(%rsp) ## 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 152(%rsp) ## 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 144(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 128(%rsp) ## 8-byte Spill - movq %rcx, %rsi - movq %rsi, 88(%rsp) ## 8-byte Spill - leaq 704(%rsp), %rdi - callq l_mulPv512x64 - addq 704(%rsp), %r15 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 712(%rsp), %rcx - adcq 720(%rsp), %rbp - movq %rbp, 80(%rsp) ## 8-byte Spill - adcq 728(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rax ## 8-byte Reload - adcq 744(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 752(%rsp), %r14 - movq %r14, %r12 - movq 64(%rsp), %rax ## 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - movq 40(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 96(%rsp) ## 8-byte Folded Spill - movq 56(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - movq 112(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - sbbq %rbx, %rbx - movq %rcx, %rbp - movq %rbp, %rdx - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 632(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - movq %rbx, %rax - addq 632(%rsp), %rbp - movq 80(%rsp), %rsi ## 8-byte Reload - adcq 640(%rsp), %rsi - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 648(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 120(%rsp), %rcx ## 8-byte Reload - adcq 656(%rsp), %rcx - movq %rcx, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rcx ## 8-byte Reload - adcq 664(%rsp), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - adcq 672(%rsp), %r12 - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - movq 48(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 96(%rsp) ## 8-byte Folded Spill - adcq $0, %r13 - movq %r13, 56(%rsp) ## 8-byte Spill - adcq $0, %r14 - movq %r14, 112(%rsp) ## 8-byte Spill - movq %rax, %rbp - adcq $0, %rbp - movq %rsi, %rdx - movq %rsi, %r14 - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 560(%rsp), %rdi - movq 88(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv512x64 - addq 560(%rsp), %r14 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 568(%rsp), %rcx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 576(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rax ## 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 592(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 600(%rsp), %r14 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rax ## 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 624(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq $0, %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - movq 96(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - movq 56(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 112(%rsp) ## 8-byte Folded Spill - adcq $0, %rbp - movq %rbp, 80(%rsp) ## 8-byte Spill - movq %rcx, %rbp - movq %rbp, %rdx - movq 104(%rsp), %r12 ## 8-byte Reload - imulq %r12, %rdx - leaq 488(%rsp), %rdi - movq %r13, %rsi - callq l_mulPv512x64 - addq 488(%rsp), %rbp - movq 120(%rsp), %rax ## 8-byte Reload - adcq 496(%rsp), %rax - movq 72(%rsp), %rbp ## 8-byte Reload - adcq 504(%rsp), %rbp - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 512(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 520(%rsp), %r14 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 528(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 536(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %r13 ## 8-byte Reload - adcq 544(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 96(%rsp) ## 8-byte Spill - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 112(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq %r12, %rdx - leaq 416(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 416(%rsp), %r15 - adcq 424(%rsp), %rbp - movq %rbp, %rax - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %r14, %r12 - adcq 440(%rsp), %r12 - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 448(%rsp), %r14 - movq 16(%rsp), %rbp ## 8-byte Reload - adcq 456(%rsp), %rbp - adcq 464(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq $0, 96(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq 112(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rbx - movq %rbx, %rdx - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 344(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 344(%rsp), %rbx - movq 32(%rsp), %rax ## 8-byte Reload - adcq 352(%rsp), %rax - adcq 360(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - adcq 368(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - adcq 376(%rsp), %rbp - movq %rbp, 16(%rsp) ## 8-byte Spill - adcq 384(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 392(%rsp), %r13 - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 400(%rsp), %r12 - movq 96(%rsp), %r14 ## 8-byte Reload - adcq 408(%rsp), %r14 - movq 56(%rsp), %rbp ## 8-byte Reload - adcq $0, %rbp - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 272(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 272(%rsp), %r15 - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 280(%rsp), %rcx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rax ## 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 312(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 320(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - adcq 328(%rsp), %r14 - movq %r14, %r13 - adcq 336(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rbx - movq %rbx, %r14 - movq 80(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - movq 104(%rsp), %rdx ## 8-byte Reload - movq %rcx, %rbx - imulq %rbx, %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 200(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 208(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r8 ## 8-byte Reload - adcq 216(%rsp), %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rdx ## 8-byte Reload - adcq 224(%rsp), %rdx - movq 24(%rsp), %rsi ## 8-byte Reload - adcq 232(%rsp), %rsi - movq 48(%rsp), %rdi ## 8-byte Reload - adcq 240(%rsp), %rdi - movq %r13, %rbp - adcq 248(%rsp), %rbp - movq %r12, %rbx - adcq 256(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq %r14, %r9 - adcq 264(%rsp), %r9 - adcq $0, %r15 - movq %r15, %r10 - subq 136(%rsp), %rax ## 8-byte Folded Reload - movq %r8, %rcx - sbbq 128(%rsp), %rcx ## 8-byte Folded Reload - movq %rdx, %r13 - sbbq 144(%rsp), %r13 ## 8-byte Folded Reload - movq %rsi, %r12 - sbbq 152(%rsp), %r12 ## 8-byte Folded Reload - movq %rdi, %r14 - sbbq 160(%rsp), %r14 ## 8-byte Folded Reload - movq %rbp, %r11 - sbbq 168(%rsp), %r11 ## 8-byte Folded Reload - movq %rbx, %r8 - sbbq 176(%rsp), %r8 ## 8-byte Folded Reload - movq %r9, %r15 - sbbq 184(%rsp), %r9 ## 8-byte Folded Reload - sbbq $0, %r10 - andl $1, %r10d - cmovneq %r15, %r9 - testb %r10b, %r10b - cmovneq 8(%rsp), %rax ## 8-byte Folded Reload - movq 192(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 8(%rbx) - cmovneq %rdx, %r13 - movq %r13, 16(%rbx) - cmovneq %rsi, %r12 - movq %r12, 24(%rbx) - cmovneq %rdi, %r14 - movq %r14, 32(%rbx) - cmovneq %rbp, %r11 - movq %r11, 40(%rbx) - cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $776, %rsp ## imm = 0x308 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre8Lbmi2: ## @mcl_fp_addPre8Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 48(%rsi), %r12 - movq 40(%rdx), %r10 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rax - movq 32(%rsi), %rsi - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %rax - movq %rax, 24(%rdi) - adcq %r11, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %r13 - movq %r13, 40(%rdi) - adcq %r9, %r12 - movq %r12, 48(%rdi) - adcq %r8, %r15 - movq %r15, 56(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subPre8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre8Lbmi2: ## @mcl_fp_subPre8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 40(%rdx), %r10 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 48(%rsi), %r13 - movq 40(%rsi), %rdx - movq 32(%rsi), %rbp - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rbp - movq %rbp, 32(%rdi) - sbbq %r10, %rdx - movq %rdx, 40(%rdi) - sbbq %r9, %r13 - movq %r13, 48(%rdi) - sbbq %r8, %r15 - movq %r15, 56(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_shr1_8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_8Lbmi2: ## @mcl_fp_shr1_8Lbmi2 -## BB#0: - movq 56(%rsi), %r8 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 32(%rsi), %r11 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 24(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 32(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 40(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 48(%rdi) - shrq %r8 - movq %r8, 56(%rdi) - retq - - .globl _mcl_fp_add8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add8Lbmi2: ## @mcl_fp_add8Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r15 - movq 56(%rsi), %r8 - movq 48(%rdx), %r12 - movq 48(%rsi), %r9 - movq 40(%rsi), %r13 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %rbx - addq (%rsi), %r14 - adcq 8(%rsi), %rbx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r11 - movq 40(%rdx), %rsi - adcq 32(%rdx), %r10 - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - adcq %r13, %rsi - movq %rsi, 40(%rdi) - adcq %r12, %r9 - movq %r9, 48(%rdi) - adcq %r15, %r8 - movq %r8, 56(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %r14 - sbbq 8(%rcx), %rbx - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r11 - sbbq 32(%rcx), %r10 - sbbq 40(%rcx), %rsi - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne LBB120_2 -## BB#1: ## %nocarry - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - movq %rsi, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) -LBB120_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF8Lbmi2: ## @mcl_fp_addNF8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 48(%rdx), %rbp - movq 40(%rdx), %rbx - movq 32(%rdx), %rax - movq 24(%rdx), %r11 - movq 16(%rdx), %r15 - movq (%rdx), %r13 - movq 8(%rdx), %r12 - addq (%rsi), %r13 - adcq 8(%rsi), %r12 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %rax - movq %rax, %r10 - movq %r10, -24(%rsp) ## 8-byte Spill - adcq 40(%rsi), %rbx - movq %rbx, %r9 - movq %r9, -16(%rsp) ## 8-byte Spill - adcq 48(%rsi), %rbp - movq %rbp, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq 56(%rsi), %r8 - movq %r13, %rsi - subq (%rcx), %rsi - movq %r12, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %rbx - sbbq 16(%rcx), %rbx - movq %r11, %r14 - sbbq 24(%rcx), %r14 - movq %r10, %rbp - sbbq 32(%rcx), %rbp - movq %r9, %r10 - sbbq 40(%rcx), %r10 - movq %rax, %r9 - sbbq 48(%rcx), %r9 - movq %r8, %rax - sbbq 56(%rcx), %rax - testq %rax, %rax - cmovsq %r13, %rsi - movq %rsi, (%rdi) - cmovsq %r12, %rdx - movq %rdx, 8(%rdi) - cmovsq %r15, %rbx - movq %rbx, 16(%rdi) - cmovsq %r11, %r14 - movq %r14, 24(%rdi) - cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rdi) - cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rdi) - cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 48(%rdi) - cmovsq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub8Lbmi2: ## @mcl_fp_sub8Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r12 - movq 56(%rsi), %r8 - movq 48(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r10 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r10 - movq 16(%rsi), %r11 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %r15 - sbbq 24(%rdx), %r15 - movq 32(%rsi), %r14 - sbbq 32(%rdx), %r14 - movq 48(%rsi), %r9 - movq 40(%rsi), %rsi - sbbq 40(%rdx), %rsi - movq %rax, (%rdi) - movq %r10, 8(%rdi) - movq %r11, 16(%rdi) - movq %r15, 24(%rdi) - movq %r14, 32(%rdi) - movq %rsi, 40(%rdi) - sbbq %r13, %r9 - movq %r9, 48(%rdi) - sbbq %r12, %r8 - movq %r8, 56(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB122_2 -## BB#1: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r10, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r11, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r15, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r14, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %rsi, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r9, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %r8, %rax - movq %rax, 56(%rdi) -LBB122_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF8Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF8Lbmi2: ## @mcl_fp_subNF8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r9 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - movdqu 48(%rdx), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r12 - movdqu (%rsi), %xmm4 - movdqu 16(%rsi), %xmm5 - movdqu 32(%rsi), %xmm8 - movdqu 48(%rsi), %xmm7 - pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1] - movd %xmm6, %rcx - movd %xmm3, %r13 - movd %xmm7, %rdi - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %rbp - pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1] - movd %xmm3, %rdx - movd %xmm2, %rsi - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r11 - pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] - movd %xmm1, %r15 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %rbx - pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1] - movd %xmm0, %rax - movd %xmm4, %r14 - subq %rax, %r14 - movd %xmm1, %r10 - sbbq %rbx, %r10 - movd %xmm5, %rbx - sbbq %r15, %rbx - movd %xmm2, %r15 - sbbq %r11, %r15 - movd %xmm8, %r11 - sbbq %rsi, %r11 - sbbq %rbp, %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - sbbq %r13, %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - sbbq %r12, %rcx - movq %rcx, -8(%rsp) ## 8-byte Spill - movq %rcx, %rbp - sarq $63, %rbp - movq 56(%r8), %r12 - andq %rbp, %r12 - movq 48(%r8), %r13 - andq %rbp, %r13 - movq 40(%r8), %rdi - andq %rbp, %rdi - movq 32(%r8), %rsi - andq %rbp, %rsi - movq 24(%r8), %rdx - andq %rbp, %rdx - movq 16(%r8), %rcx - andq %rbp, %rcx - movq 8(%r8), %rax - andq %rbp, %rax - andq (%r8), %rbp - addq %r14, %rbp - adcq %r10, %rax - movq %rbp, (%r9) - adcq %rbx, %rcx - movq %rax, 8(%r9) - movq %rcx, 16(%r9) - adcq %r15, %rdx - movq %rdx, 24(%r9) - adcq %r11, %rsi - movq %rsi, 32(%r9) - adcq -24(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%r9) - adcq -16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 48(%r9) - adcq -8(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 56(%r9) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add8Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add8Lbmi2: ## @mcl_fpDbl_add8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 120(%rdx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 96(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r11 - movq 88(%rdx), %rbp - movq 80(%rdx), %r13 - movq %rbx, (%rdi) - movq 72(%rdx), %r10 - movq %rax, 8(%rdi) - movq 64(%rdx), %r9 - movq %r12, 16(%rdi) - movq 40(%rdx), %r12 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %r12, %rbx - movq 56(%rdx), %r15 - movq 48(%rdx), %r12 - movq %r11, 32(%rdi) - movq 48(%rsi), %rdx - adcq %r12, %rdx - movq 120(%rsi), %r12 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rax - adcq %r15, %rax - movq 112(%rsi), %rcx - movq %rdx, 48(%rdi) - movq 64(%rsi), %rbx - adcq %r9, %rbx - movq 104(%rsi), %rdx - movq %rax, 56(%rdi) - movq 72(%rsi), %r9 - adcq %r10, %r9 - movq 80(%rsi), %r11 - adcq %r13, %r11 - movq 96(%rsi), %rax - movq 88(%rsi), %r15 - adcq %rbp, %r15 - adcq %r14, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdx, %rax - adcq -24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -24(%rsp) ## 8-byte Spill - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -16(%rsp) ## 8-byte Spill - adcq -32(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, -32(%rsp) ## 8-byte Spill - sbbq %rbp, %rbp - andl $1, %ebp - movq %rbx, %rsi - subq (%r8), %rsi - movq %r9, %rdx - sbbq 8(%r8), %rdx - movq %r11, %r10 - sbbq 16(%r8), %r10 - movq %r15, %r14 - sbbq 24(%r8), %r14 - movq -8(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r8), %r13 - movq %rax, %r12 - sbbq 40(%r8), %r12 - movq %rcx, %rax - sbbq 48(%r8), %rax - movq -32(%rsp), %rcx ## 8-byte Reload - sbbq 56(%r8), %rcx - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rbx, %rsi - movq %rsi, 64(%rdi) - testb %bpl, %bpl - cmovneq %r9, %rdx - movq %rdx, 72(%rdi) - cmovneq %r11, %r10 - movq %r10, 80(%rdi) - cmovneq %r15, %r14 - movq %r14, 88(%rdi) - cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 96(%rdi) - cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 104(%rdi) - cmovneq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 112(%rdi) - cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub8Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub8Lbmi2: ## @mcl_fpDbl_sub8Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 16(%rsi), %r9 - movq (%rsi), %r12 - movq 8(%rsi), %r14 - xorl %r8d, %r8d - subq (%rdx), %r12 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r9 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r13 - sbbq 32(%rdx), %r13 - movq 96(%rdx), %rbp - movq 88(%rdx), %r11 - movq %r12, (%rdi) - movq 80(%rdx), %r12 - movq %r14, 8(%rdi) - movq 72(%rdx), %r10 - movq %r9, 16(%rdi) - movq 40(%rdx), %r9 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r9, %rbx - movq 48(%rdx), %r9 - movq %r13, 32(%rdi) - movq 48(%rsi), %r14 - sbbq %r9, %r14 - movq 64(%rdx), %r13 - movq 56(%rdx), %r9 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r9, %rdx - movq 120(%rsi), %rcx - movq %r14, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r13, %rbx - movq 112(%rsi), %rax - movq %rdx, 56(%rdi) - movq 72(%rsi), %r9 - sbbq %r10, %r9 - movq 80(%rsi), %r13 - sbbq %r12, %r13 - movq 88(%rsi), %r12 - sbbq %r11, %r12 - movq 104(%rsi), %rdx - movq 96(%rsi), %r14 - sbbq %rbp, %r14 - sbbq -24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -24(%rsp) ## 8-byte Spill - sbbq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -16(%rsp) ## 8-byte Spill - sbbq -8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -8(%rsp) ## 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r15), %r11 - cmoveq %r8, %r11 - testb %bpl, %bpl - movq 16(%r15), %rbp - cmoveq %r8, %rbp - movq 8(%r15), %rsi - cmoveq %r8, %rsi - movq 56(%r15), %r10 - cmoveq %r8, %r10 - movq 48(%r15), %rdx - cmoveq %r8, %rdx - movq 40(%r15), %rcx - cmoveq %r8, %rcx - movq 32(%r15), %rax - cmoveq %r8, %rax - cmovneq 24(%r15), %r8 - addq %rbx, %r11 - adcq %r9, %rsi - movq %r11, 64(%rdi) - adcq %r13, %rbp - movq %rsi, 72(%rdi) - movq %rbp, 80(%rdi) - adcq %r12, %r8 - movq %r8, 88(%rdi) - adcq %r14, %rax - movq %rax, 96(%rdi) - adcq -24(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 104(%rdi) - adcq -16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 112(%rdi) - adcq -8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .p2align 4, 0x90 -l_mulPv576x64: ## @mulPv576x64 -## BB#0: - mulxq (%rsi), %rcx, %rax - movq %rcx, (%rdi) - mulxq 8(%rsi), %rcx, %r8 - addq %rax, %rcx - movq %rcx, 8(%rdi) - mulxq 16(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 16(%rdi) - mulxq 24(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 24(%rdi) - mulxq 32(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 32(%rdi) - mulxq 40(%rsi), %rcx, %r9 - adcq %r8, %rcx - movq %rcx, 40(%rdi) - mulxq 48(%rsi), %rax, %rcx - adcq %r9, %rax - movq %rax, 48(%rdi) - mulxq 56(%rsi), %rax, %r8 - adcq %rcx, %rax - movq %rax, 56(%rdi) - mulxq 64(%rsi), %rax, %rcx - adcq %r8, %rax - movq %rax, 64(%rdi) - adcq $0, %rcx - movq %rcx, 72(%rdi) - movq %rdi, %rax - retq - - .globl _mcl_fp_mulUnitPre9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mulUnitPre9Lbmi2: ## @mcl_fp_mulUnitPre9Lbmi2 -## BB#0: - pushq %r14 - pushq %rbx - subq $88, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq l_mulPv576x64 - movq 80(%rsp), %r8 - movq 72(%rsp), %r9 - movq 64(%rsp), %r10 - movq 56(%rsp), %r11 - movq 48(%rsp), %r14 - movq 40(%rsp), %rax - movq 32(%rsp), %rcx - movq 24(%rsp), %rdx - movq 8(%rsp), %rsi - movq 16(%rsp), %rdi - movq %rsi, (%rbx) - movq %rdi, 8(%rbx) - movq %rdx, 16(%rbx) - movq %rcx, 24(%rbx) - movq %rax, 32(%rbx) - movq %r14, 40(%rbx) - movq %r11, 48(%rbx) - movq %r10, 56(%rbx) - movq %r9, 64(%rbx) - movq %r8, 72(%rbx) - addq $88, %rsp - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mulPre9Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_mulPre9Lbmi2: ## @mcl_fpDbl_mulPre9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp ## imm = 0x328 - movq %rdx, %rax - movq %rdi, %r12 - movq (%rax), %rdx - movq %rax, %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - leaq 728(%rsp), %rdi - movq %rsi, %rbp - movq %rbp, 72(%rsp) ## 8-byte Spill - callq l_mulPv576x64 - movq 800(%rsp), %r13 - movq 792(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 768(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r14 - movq %rax, (%r12) - movq %r12, 64(%rsp) ## 8-byte Spill - movq 8(%rbx), %rdx - leaq 648(%rsp), %rdi - movq %rbp, %rsi - callq l_mulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r14 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r15 - movq %r14, 8(%r12) - adcq 24(%rsp), %rbx ## 8-byte Folded Reload - adcq 32(%rsp), %r15 ## 8-byte Folded Reload - adcq 40(%rsp), %rax ## 8-byte Folded Reload - movq %rax, %r14 - adcq (%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 40(%rsp) ## 8-byte Spill - adcq 48(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, (%rsp) ## 8-byte Spill - adcq %r13, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 80(%rsp), %r13 ## 8-byte Reload - movq 16(%r13), %rdx - leaq 568(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %r9 - movq 624(%rsp), %r10 - movq 616(%rsp), %rdi - movq 608(%rsp), %rbp - movq 600(%rsp), %rcx - addq 568(%rsp), %rbx - movq 592(%rsp), %rdx - movq 576(%rsp), %r12 - movq 584(%rsp), %rsi - movq 64(%rsp), %rax ## 8-byte Reload - movq %rbx, 16(%rax) - adcq %r15, %r12 - adcq %r14, %rsi - movq %rsi, 48(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 8(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 24(%r13), %rdx - leaq 488(%rsp), %rdi - movq 72(%rsp), %r15 ## 8-byte Reload - movq %r15, %rsi - callq l_mulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r12 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq 64(%rsp), %r14 ## 8-byte Reload - movq %r12, 24(%r14) - adcq 48(%rsp), %rbx ## 8-byte Folded Reload - adcq 56(%rsp), %r13 ## 8-byte Folded Reload - adcq 24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq (%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, (%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 8(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 80(%rsp), %r12 ## 8-byte Reload - movq 32(%r12), %rdx - leaq 408(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %r9 - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r15 - movq 424(%rsp), %rcx - movq %rbx, 32(%r14) - adcq %r13, %r15 - adcq 24(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq %r12, %r14 - movq 40(%r14), %rdx - leaq 328(%rsp), %rdi - movq 72(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %r9 - movq 384(%rsp), %rsi - movq 376(%rsp), %rdi - movq 368(%rsp), %rbx - movq 360(%rsp), %rbp - addq 328(%rsp), %r15 - movq 352(%rsp), %rcx - movq 336(%rsp), %r12 - movq 344(%rsp), %rdx - movq 64(%rsp), %rax ## 8-byte Reload - movq %r15, 40(%rax) - adcq 56(%rsp), %r12 ## 8-byte Folded Reload - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 48(%r14), %rdx - leaq 248(%rsp), %rdi - movq %r13, %rsi - movq %r13, %r15 - callq l_mulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %r9 - movq 304(%rsp), %rsi - movq 296(%rsp), %rdi - movq 288(%rsp), %rbx - movq 280(%rsp), %rbp - addq 248(%rsp), %r12 - movq 272(%rsp), %rcx - movq 256(%rsp), %r13 - movq 264(%rsp), %rdx - movq 64(%rsp), %rax ## 8-byte Reload - movq %r12, 48(%rax) - adcq 56(%rsp), %r13 ## 8-byte Folded Reload - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 56(%r14), %rdx - leaq 168(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 240(%rsp), %rcx - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - addq 168(%rsp), %r13 - movq 200(%rsp), %r12 - movq 192(%rsp), %rbp - movq 176(%rsp), %r14 - movq 184(%rsp), %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq %r13, 56(%rax) - adcq 56(%rsp), %r14 ## 8-byte Folded Reload - adcq 24(%rsp), %r15 ## 8-byte Folded Reload - adcq 32(%rsp), %rbp ## 8-byte Folded Reload - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %r13 - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 88(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 88(%rsp), %r14 - adcq 96(%rsp), %r15 - movq 160(%rsp), %r8 - adcq 104(%rsp), %rbp - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 64(%rsp), %rcx ## 8-byte Reload - movq %r14, 64(%rcx) - movq %r15, 72(%rcx) - adcq %r12, %rax - movq %rbp, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r13, %rbx - movq %rbx, 96(%rcx) - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp ## imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre9Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre9Lbmi2: ## @mcl_fpDbl_sqrPre9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp ## imm = 0x328 - movq %rsi, %r15 - movq %rdi, %r14 - movq (%r15), %rdx - leaq 728(%rsp), %rdi - callq l_mulPv576x64 - movq 800(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 792(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 80(%rsp) ## 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r12 - movq %rax, (%r14) - movq %r14, 72(%rsp) ## 8-byte Spill - movq 8(%r15), %rdx - leaq 648(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r12 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r13 - movq %r12, 8(%r14) - adcq 80(%rsp), %rbx ## 8-byte Folded Reload - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq %r15, 64(%rsp) ## 8-byte Spill - movq 16(%r15), %rdx - leaq 568(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %rcx - movq 624(%rsp), %rdx - movq 616(%rsp), %rsi - movq 608(%rsp), %rdi - movq 600(%rsp), %rbp - addq 568(%rsp), %rbx - movq 592(%rsp), %rax - movq 576(%rsp), %r14 - movq 584(%rsp), %r12 - movq 72(%rsp), %r15 ## 8-byte Reload - movq %rbx, 16(%r15) - adcq %r13, %r14 - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 24(%rsi), %rdx - leaq 488(%rsp), %rdi - callq l_mulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r14 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq %r14, 24(%r15) - adcq %r12, %rbx - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 32(%rsi), %rdx - leaq 408(%rsp), %rdi - callq l_mulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %rcx - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r14 - movq 424(%rsp), %r12 - movq %rbx, 32(%r15) - adcq %r13, %r14 - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 40(%rsi), %rdx - leaq 328(%rsp), %rdi - callq l_mulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %rcx - movq 384(%rsp), %rdx - movq 376(%rsp), %rsi - movq 368(%rsp), %rdi - movq 360(%rsp), %rbp - addq 328(%rsp), %r14 - movq 352(%rsp), %rax - movq 336(%rsp), %rbx - movq 344(%rsp), %r13 - movq %r14, 40(%r15) - adcq %r12, %rbx - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 48(%rsi), %rdx - leaq 248(%rsp), %rdi - callq l_mulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %rcx - movq 304(%rsp), %rdx - movq 296(%rsp), %rsi - movq 288(%rsp), %rdi - movq 280(%rsp), %rbp - addq 248(%rsp), %rbx - movq 272(%rsp), %rax - movq 256(%rsp), %r12 - movq 264(%rsp), %r14 - movq %rbx, 48(%r15) - adcq %r13, %r12 - adcq 40(%rsp), %r14 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 56(%rsi), %rdx - leaq 168(%rsp), %rdi - callq l_mulPv576x64 - movq 240(%rsp), %r8 - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - movq 200(%rsp), %rcx - addq 168(%rsp), %r12 - movq 192(%rsp), %r15 - movq 176(%rsp), %r13 - movq 184(%rsp), %rbp - movq 72(%rsp), %rax ## 8-byte Reload - movq %r12, 56(%rax) - adcq %r14, %r13 - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - adcq 48(%rsp), %r15 ## 8-byte Folded Reload - adcq 56(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %r12 - adcq 8(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %r14 - adcq 16(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 64(%rsi), %rdx - leaq 88(%rsp), %rdi - callq l_mulPv576x64 - addq 88(%rsp), %r13 - adcq 96(%rsp), %rbp - movq 160(%rsp), %r8 - adcq 104(%rsp), %r15 - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 72(%rsp), %rcx ## 8-byte Reload - movq %r13, 64(%rcx) - movq %rbp, 72(%rcx) - adcq %r12, %rax - movq %r15, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r14, %rbx - movq %rbx, 96(%rcx) - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 32(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp ## imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_mont9Lbmi2: ## @mcl_fp_mont9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp ## imm = 0x618 - movq %rcx, 72(%rsp) ## 8-byte Spill - movq %rdx, 96(%rsp) ## 8-byte Spill - movq %rsi, 88(%rsp) ## 8-byte Spill - movq %rdi, 112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq l_mulPv576x64 - movq 1480(%rsp), %r14 - movq 1488(%rsp), %r15 - movq %r14, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1544(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 1536(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 1528(%rsp), %r12 - movq 1520(%rsp), %r13 - movq 1512(%rsp), %rbx - movq 1504(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1400(%rsp), %r14 - adcq 1408(%rsp), %r15 - adcq 1416(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 1424(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 1432(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 1440(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 1448(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq 1456(%rsp), %rbx - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 1464(%rsp), %r14 - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1472(%rsp), %r13 - sbbq %rbp, %rbp - movq 96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebp - addq 1320(%rsp), %r15 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 1328(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 1336(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 1344(%rsp), %r12 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 1352(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - adcq 1360(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq 1368(%rsp), %rbx - adcq 1376(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - adcq 1384(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 1392(%rsp), %rbp - sbbq %r14, %r14 - movq %r15, %rdx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq %r14, %rax - andl $1, %eax - addq 1240(%rsp), %r15 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 1248(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 1256(%rsp), %r14 - adcq 1264(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 1272(%rsp), %r12 - movq 48(%rsp), %r13 ## 8-byte Reload - adcq 1280(%rsp), %r13 - adcq 1288(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 1296(%rsp), %r15 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 1304(%rsp), %rbx - adcq 1312(%rsp), %rbp - adcq $0, %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 8(%rsp), %rax ## 8-byte Reload - addq 1160(%rsp), %rax - adcq 1168(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 1176(%rsp), %r14 - adcq 1184(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - movq %r13, %r12 - adcq 1192(%rsp), %r12 - movq 56(%rsp), %rcx ## 8-byte Reload - adcq 1200(%rsp), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, %r13 - adcq 1216(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq 1224(%rsp), %rbp - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 1232(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq %r15, %rax - andl $1, %eax - addq 1080(%rsp), %rbx - movq (%rsp), %rcx ## 8-byte Reload - adcq 1088(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq %r14, %r15 - adcq 1096(%rsp), %r15 - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 1104(%rsp), %r14 - movq %r12, %rbx - adcq 1112(%rsp), %rbx - movq 56(%rsp), %rcx ## 8-byte Reload - adcq 1120(%rsp), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 1128(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1136(%rsp), %r13 - adcq 1144(%rsp), %rbp - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 1152(%rsp), %r12 - adcq $0, %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq (%rsp), %rax ## 8-byte Reload - addq 1000(%rsp), %rax - adcq 1008(%rsp), %r15 - movq %r15, 32(%rsp) ## 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, %r15 - adcq 1024(%rsp), %rbx - movq %rbx, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 1032(%rsp), %r14 - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 1040(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - adcq 1048(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 1056(%rsp), %rbp - adcq 1064(%rsp), %r12 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 920(%rsp), %r13 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 928(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 936(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %r15 ## 8-byte Reload - adcq 944(%rsp), %r15 - movq %r14, %r13 - adcq 952(%rsp), %r13 - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 960(%rsp), %r14 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 968(%rsp), %rbx - adcq 976(%rsp), %rbp - adcq 984(%rsp), %r12 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 992(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 32(%rsp), %rax ## 8-byte Reload - addq 840(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 848(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 856(%rsp), %r15 - adcq 864(%rsp), %r13 - movq %r13, 56(%rsp) ## 8-byte Spill - adcq 872(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - adcq 880(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq 888(%rsp), %rbp - adcq 896(%rsp), %r12 - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 904(%rsp), %r13 - movq (%rsp), %rcx ## 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r14 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 760(%rsp), %r14 - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 768(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 776(%rsp), %r15 - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 784(%rsp), %r14 - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 792(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 800(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 808(%rsp), %rbp - movq %r12, %rbx - adcq 816(%rsp), %rbx - movq %r13, %r12 - adcq 824(%rsp), %r12 - movq (%rsp), %r13 ## 8-byte Reload - adcq 832(%rsp), %r13 - adcq $0, %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 16(%rsp), %rax ## 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - adcq 696(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 704(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r15 ## 8-byte Reload - adcq 712(%rsp), %r15 - adcq 720(%rsp), %rbp - adcq 728(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - adcq 744(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 752(%rsp), %r13 - sbbq %r14, %r14 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r14d - addq 600(%rsp), %rbx - movq 48(%rsp), %rax ## 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %rax ## 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %rbx ## 8-byte Reload - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - movq %r15, 24(%rsp) ## 8-byte Spill - adcq 640(%rsp), %rbp - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 648(%rsp), %r12 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 656(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r15 ## 8-byte Reload - adcq 664(%rsp), %r15 - adcq 672(%rsp), %r13 - adcq $0, %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 48(%rsp), %rax ## 8-byte Reload - addq 520(%rsp), %rax - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 528(%rsp), %r14 - adcq 536(%rsp), %rbx - movq %rbx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 544(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 552(%rsp), %rbp - adcq 560(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 568(%rsp), %r12 - adcq 576(%rsp), %r15 - movq %r15, (%rsp) ## 8-byte Spill - adcq 584(%rsp), %r13 - movq %r13, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 592(%rsp), %r15 - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 440(%rsp), %r13 - adcq 448(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 456(%rsp), %r14 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 464(%rsp), %rbx - adcq 472(%rsp), %rbp - movq %rbp, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - adcq 488(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rbp ## 8-byte Reload - adcq 496(%rsp), %rbp - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 504(%rsp), %r12 - adcq 512(%rsp), %r15 - movq %r15, %r13 - adcq $0, %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 56(%rsp), %rax ## 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r14 - adcq 376(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq 104(%rsp), %rcx ## 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbx ## 8-byte Reload - adcq 392(%rsp), %rbx - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 400(%rsp), %r15 - adcq 408(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 416(%rsp), %r12 - movq %r12, %rbp - adcq 424(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %r12 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r13d - addq 280(%rsp), %r12 - adcq 288(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rax ## 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 104(%rsp), %r14 ## 8-byte Reload - adcq 304(%rsp), %r14 - adcq 312(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 320(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rbx ## 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 344(%rsp), %r12 - movq 48(%rsp), %rbp ## 8-byte Reload - adcq 352(%rsp), %rbp - adcq $0, %r13 - movq 96(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 40(%rsp), %rax ## 8-byte Reload - addq 200(%rsp), %rax - movq 24(%rsp), %r15 ## 8-byte Reload - adcq 208(%rsp), %r15 - adcq 216(%rsp), %r14 - movq %r14, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 224(%rsp), %r14 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 232(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 248(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 256(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 264(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 272(%rsp), %r13 - sbbq %rbx, %rbx - movq 80(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %r12 - leaq 120(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - addq 120(%rsp), %r12 - adcq 128(%rsp), %r15 - movq 104(%rsp), %rbp ## 8-byte Reload - adcq 136(%rsp), %rbp - movq %r14, %rcx - adcq 144(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %r8 ## 8-byte Reload - adcq 152(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r9 ## 8-byte Reload - adcq 160(%rsp), %r9 - movq %r9, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r10 ## 8-byte Reload - adcq 168(%rsp), %r10 - movq %r10, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %rdi ## 8-byte Reload - adcq 176(%rsp), %rdi - movq %rdi, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %r14 ## 8-byte Reload - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r13 - adcq $0, %rbx - movq %r15, %rsi - movq %r15, %r12 - movq 72(%rsp), %rdx ## 8-byte Reload - subq (%rdx), %rsi - movq %rbp, %rax - movq %rbp, %r15 - sbbq 8(%rdx), %rax - movq %rcx, %rbp - sbbq 16(%rdx), %rbp - movq %r8, %rcx - sbbq 24(%rdx), %rcx - movq %r9, %r8 - sbbq 32(%rdx), %r8 - movq %r10, %r11 - sbbq 40(%rdx), %r11 - movq %rdi, %r10 - sbbq 48(%rdx), %r10 - movq %r14, %rdi - sbbq 56(%rdx), %rdi - movq %r13, %r9 - sbbq 64(%rdx), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r13, %r9 - testb %bl, %bl - cmovneq %r12, %rsi - movq 112(%rsp), %rbx ## 8-byte Reload - movq %rsi, (%rbx) - cmovneq %r15, %rax - movq %rax, 8(%rbx) - cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 16(%rbx) - cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rbx) - cmovneq (%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 32(%rbx) - cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 40(%rbx) - cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 48(%rbx) - cmovneq %r14, %rdi - movq %rdi, 56(%rbx) - movq %r9, 64(%rbx) - addq $1560, %rsp ## imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montNF9Lbmi2: ## @mcl_fp_montNF9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp ## imm = 0x618 - movq %rcx, 72(%rsp) ## 8-byte Spill - movq %rdx, 80(%rsp) ## 8-byte Spill - movq %rsi, 88(%rsp) ## 8-byte Spill - movq %rdi, 112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 96(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq l_mulPv576x64 - movq 1480(%rsp), %r12 - movq 1488(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq %r12, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1544(%rsp), %r13 - movq 1536(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 1528(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 1520(%rsp), %r14 - movq 1512(%rsp), %r15 - movq 1504(%rsp), %rbx - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1400(%rsp), %r12 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 1408(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - adcq 1416(%rsp), %rbp - movq %rbp, 104(%rsp) ## 8-byte Spill - adcq 1424(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - adcq 1432(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq 1440(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbx ## 8-byte Reload - adcq 1448(%rsp), %rbx - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 1456(%rsp), %r12 - adcq 1464(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 1472(%rsp), %rbp - movq 80(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1392(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - addq 1320(%rsp), %rcx - movq 104(%rsp), %r15 ## 8-byte Reload - adcq 1328(%rsp), %r15 - movq (%rsp), %r14 ## 8-byte Reload - adcq 1336(%rsp), %r14 - movq 8(%rsp), %rdx ## 8-byte Reload - adcq 1344(%rsp), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 1352(%rsp), %r13 - adcq 1360(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 1368(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq 40(%rsp), %rdx ## 8-byte Reload - adcq 1376(%rsp), %rdx - movq %rdx, 40(%rsp) ## 8-byte Spill - adcq 1384(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, %rbp - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1240(%rsp), %rbx - adcq 1248(%rsp), %r15 - movq %r15, 104(%rsp) ## 8-byte Spill - adcq 1256(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 1264(%rsp), %r12 - adcq 1272(%rsp), %r13 - movq %r13, %r14 - movq 64(%rsp), %r13 ## 8-byte Reload - adcq 1280(%rsp), %r13 - movq 48(%rsp), %rbx ## 8-byte Reload - adcq 1288(%rsp), %rbx - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 1296(%rsp), %r15 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1304(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 1312(%rsp), %rbp - movq %rbp, 56(%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1232(%rsp), %rax - movq 104(%rsp), %rcx ## 8-byte Reload - addq 1160(%rsp), %rcx - movq (%rsp), %rbp ## 8-byte Reload - adcq 1168(%rsp), %rbp - adcq 1176(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - adcq 1184(%rsp), %r14 - adcq 1192(%rsp), %r13 - movq %r13, %r12 - adcq 1200(%rsp), %rbx - movq %rbx, 48(%rsp) ## 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 1216(%rsp), %rbx - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 1224(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq %rax, %r15 - adcq $0, %r15 - movq %rcx, %rdx - movq %rcx, %r13 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1080(%rsp), %r13 - adcq 1088(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 1096(%rsp), %r13 - adcq 1104(%rsp), %r14 - adcq 1112(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 1120(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 1128(%rsp), %rbp - adcq 1136(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq 1144(%rsp), %rbx - adcq 1152(%rsp), %r15 - movq 80(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1072(%rsp), %rax - movq (%rsp), %rcx ## 8-byte Reload - addq 1000(%rsp), %rcx - adcq 1008(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 1024(%rsp), %r14 - adcq 1032(%rsp), %r12 - adcq 1040(%rsp), %rbp - movq %rbp, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1048(%rsp), %r13 - adcq 1056(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - adcq 1064(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 920(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %rbp ## 8-byte Reload - adcq 936(%rsp), %rbp - movq %r14, %rbx - adcq 944(%rsp), %rbx - adcq 952(%rsp), %r12 - movq 40(%rsp), %rax ## 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 968(%rsp), %r13 - movq %r13, %r15 - movq 56(%rsp), %r13 ## 8-byte Reload - adcq 976(%rsp), %r13 - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 984(%rsp), %r14 - movq (%rsp), %rax ## 8-byte Reload - adcq 992(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 912(%rsp), %rax - movq 8(%rsp), %rcx ## 8-byte Reload - addq 840(%rsp), %rcx - adcq 848(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 856(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 864(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 872(%rsp), %rbp - adcq 880(%rsp), %r15 - movq %r15, 24(%rsp) ## 8-byte Spill - adcq 888(%rsp), %r13 - adcq 896(%rsp), %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rdx ## 8-byte Reload - adcq 904(%rsp), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, %r14 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 760(%rsp), %rbx - movq 32(%rsp), %rax ## 8-byte Reload - adcq 768(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r15 ## 8-byte Reload - adcq 776(%rsp), %r15 - adcq 784(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq %rbp, %rbx - adcq 792(%rsp), %rbx - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 800(%rsp), %rbp - adcq 808(%rsp), %r13 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r12 ## 8-byte Reload - adcq 824(%rsp), %r12 - adcq 832(%rsp), %r14 - movq 80(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 752(%rsp), %rcx - movq 32(%rsp), %rax ## 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %rdx ## 8-byte Reload - adcq 696(%rsp), %rdx - movq %rdx, 48(%rsp) ## 8-byte Spill - adcq 704(%rsp), %rbx - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 712(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 720(%rsp), %r13 - movq %r13, %r15 - movq 16(%rsp), %rbx ## 8-byte Reload - adcq 728(%rsp), %rbx - adcq 736(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - adcq 744(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r13 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 600(%rsp), %r13 - movq 64(%rsp), %r13 ## 8-byte Reload - adcq 608(%rsp), %r13 - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 616(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 624(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 632(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 640(%rsp), %r15 - movq %r15, 56(%rsp) ## 8-byte Spill - adcq 648(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 656(%rsp), %r14 - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 664(%rsp), %rbx - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 672(%rsp), %r15 - movq 80(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 592(%rsp), %rcx - movq %r13, %rax - addq 520(%rsp), %rax - adcq 528(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq %rbp, %r12 - adcq 536(%rsp), %r12 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 544(%rsp), %rbp - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 552(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq 16(%rsp), %rdx ## 8-byte Reload - adcq 560(%rsp), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 568(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - adcq 576(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 584(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, %r13 - movq %rax, %rdx - movq %rax, %r14 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 440(%rsp), %r14 - movq 48(%rsp), %rax ## 8-byte Reload - adcq 448(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq 456(%rsp), %r12 - adcq 464(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 472(%rsp), %r14 - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 480(%rsp), %r15 - movq (%rsp), %rbp ## 8-byte Reload - adcq 488(%rsp), %rbp - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 496(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 504(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 512(%rsp), %r13 - movq 80(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 432(%rsp), %rcx - movq 48(%rsp), %rax ## 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r12 - movq %r12, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rdx ## 8-byte Reload - adcq 376(%rsp), %rdx - movq %rdx, 24(%rsp) ## 8-byte Spill - adcq 384(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - adcq 392(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq 400(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 408(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 416(%rsp), %r14 - adcq 424(%rsp), %r13 - movq %r13, %r15 - adcq $0, %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 280(%rsp), %r12 - movq 40(%rsp), %rax ## 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 296(%rsp), %rbp - movq 56(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 312(%rsp), %r13 - movq (%rsp), %r12 ## 8-byte Reload - adcq 320(%rsp), %r12 - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - adcq 344(%rsp), %r15 - movq %r15, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %r14 ## 8-byte Reload - adcq 352(%rsp), %r14 - movq 80(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 272(%rsp), %rcx - movq 40(%rsp), %rax ## 8-byte Reload - addq 200(%rsp), %rax - adcq 208(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbp ## 8-byte Reload - adcq 216(%rsp), %rbp - adcq 224(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 232(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 248(%rsp), %r15 - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 256(%rsp), %r12 - adcq 264(%rsp), %r14 - adcq $0, %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 96(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 120(%rsp), %rdi - movq 72(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv576x64 - addq 120(%rsp), %rbx - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 128(%rsp), %rcx - movq %rbp, %rdx - adcq 136(%rsp), %rdx - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 144(%rsp), %rsi - movq %rsi, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rdi ## 8-byte Reload - adcq 152(%rsp), %rdi - movq %rdi, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 160(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq %r15, %r8 - adcq 168(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq %r12, %r15 - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - movq 40(%rsp), %r9 ## 8-byte Reload - adcq 192(%rsp), %r9 - movq %rcx, %rax - movq %rcx, %r11 - movq %r13, %rbp - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r12 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %rbx, %rdi - sbbq 32(%rbp), %rdi - movq %r8, %r10 - sbbq 40(%rbp), %r10 - movq %r15, %r13 - sbbq 48(%rbp), %r13 - movq %r14, %r8 - sbbq 56(%rbp), %r8 - movq %rbp, %rbx - movq %r9, %rbp - sbbq 64(%rbx), %rbp - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r11, %rax - movq 112(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovsq %r12, %rcx - movq %rcx, 8(%rbx) - cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rbx) - cmovsq (%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovsq %r15, %r13 - movq %r13, 48(%rbx) - cmovsq %r14, %r8 - movq %r8, 56(%rbx) - cmovsq %r9, %rbp - movq %rbp, 64(%rbx) - addq $1560, %rsp ## imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_montRed9Lbmi2: ## @mcl_fp_montRed9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $936, %rsp ## imm = 0x3A8 - movq %rdx, %rax - movq %rdi, 208(%rsp) ## 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 96(%rsp) ## 8-byte Spill - movq (%rsi), %r14 - movq 8(%rsi), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - movq %r14, %rdx - imulq %rcx, %rdx - movq 136(%rsi), %rcx - movq %rcx, 88(%rsp) ## 8-byte Spill - movq 128(%rsi), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - movq 120(%rsi), %rcx - movq %rcx, 80(%rsp) ## 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 72(%rsi), %r12 - movq 64(%rsi), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 56(%rsi), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 48(%rsi), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 40(%rsi), %rbp - movq 32(%rsi), %rbx - movq 24(%rsi), %r13 - movq 16(%rsi), %r15 - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 144(%rsp) ## 8-byte Spill - movq 64(%rcx), %rax - movq %rax, 200(%rsp) ## 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 192(%rsp) ## 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 184(%rsp) ## 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 176(%rsp) ## 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 168(%rsp) ## 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 160(%rsp) ## 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 152(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 136(%rsp) ## 8-byte Spill - movq %rcx, %rsi - movq %rsi, 104(%rsp) ## 8-byte Spill - leaq 856(%rsp), %rdi - callq l_mulPv576x64 - addq 856(%rsp), %r14 - movq (%rsp), %rcx ## 8-byte Reload - adcq 864(%rsp), %rcx - adcq 872(%rsp), %r15 - adcq 880(%rsp), %r13 - adcq 888(%rsp), %rbx - movq %rbx, 120(%rsp) ## 8-byte Spill - adcq 896(%rsp), %rbp - movq %rbp, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - adcq 904(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 912(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 920(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 928(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq $0, %rbp - adcq $0, 8(%rsp) ## 8-byte Folded Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - movq 88(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - sbbq %r12, %r12 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 776(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r12d - addq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r13 - movq %r13, 128(%rsp) ## 8-byte Spill - movq 120(%rsp), %rax ## 8-byte Reload - adcq 800(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 112(%rsp), %rax ## 8-byte Reload - adcq 808(%rsp), %rax - movq %rax, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 824(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 832(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 840(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 848(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - adcq $0, 16(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, %r14 - movq %r14, 88(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %r15, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 696(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 696(%rsp), %r15 - movq 128(%rsp), %rcx ## 8-byte Reload - adcq 704(%rsp), %rcx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 712(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 112(%rsp), %rax ## 8-byte Reload - adcq 720(%rsp), %rax - movq %rax, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbp ## 8-byte Reload - adcq 728(%rsp), %rbp - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 736(%rsp), %r14 - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 744(%rsp), %r15 - movq (%rsp), %rax ## 8-byte Reload - adcq 752(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rax ## 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - movq 48(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rcx, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 616(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 616(%rsp), %rbx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 624(%rsp), %rax - movq 112(%rsp), %rcx ## 8-byte Reload - adcq 632(%rsp), %rcx - movq %rcx, 112(%rsp) ## 8-byte Spill - adcq 640(%rsp), %rbp - movq %rbp, 64(%rsp) ## 8-byte Spill - adcq 648(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - adcq 656(%rsp), %r15 - movq (%rsp), %r14 ## 8-byte Reload - adcq 664(%rsp), %r14 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 672(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, 48(%rsp) ## 8-byte Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 536(%rsp), %rbx - movq 112(%rsp), %rax ## 8-byte Reload - adcq 544(%rsp), %rax - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 560(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 568(%rsp), %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - adcq 576(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - adcq 584(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 592(%rsp), %r13 - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 600(%rsp), %r15 - movq 48(%rsp), %rbp ## 8-byte Reload - adcq 608(%rsp), %rbp - movq 72(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r14 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 456(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 456(%rsp), %r14 - movq 64(%rsp), %rax ## 8-byte Reload - adcq 464(%rsp), %rax - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rcx ## 8-byte Reload - adcq 488(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 496(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 504(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq 512(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq 520(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 528(%rsp), %rbx - movq %rbx, 72(%rsp) ## 8-byte Spill - movq 80(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - movq 56(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - movq 88(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r15 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 376(%rsp), %r15 - movq 32(%rsp), %rax ## 8-byte Reload - adcq 384(%rsp), %rax - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 392(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rcx ## 8-byte Reload - adcq 400(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 408(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 416(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 424(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 72(%rsp), %r15 ## 8-byte Reload - adcq 440(%rsp), %r15 - adcq 448(%rsp), %r14 - movq %r14, 80(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, %r14 - adcq $0, %rbx - movq %rbx, 88(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 296(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 296(%rsp), %rbx - movq 40(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq (%rsp), %r13 ## 8-byte Reload - adcq 312(%rsp), %r13 - adcq 320(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 328(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 336(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 344(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq 352(%rsp), %r15 - movq %r15, 72(%rsp) ## 8-byte Spill - movq 80(%rsp), %r15 ## 8-byte Reload - adcq 360(%rsp), %r15 - adcq 368(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 88(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - adcq $0, %r12 - movq 96(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 216(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 216(%rsp), %rbx - movq %r13, %rsi - adcq 224(%rsp), %rsi - movq %rsi, (%rsp) ## 8-byte Spill - adcq 232(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r9 ## 8-byte Reload - adcq 240(%rsp), %r9 - movq %r9, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r8 ## 8-byte Reload - adcq 248(%rsp), %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rbx ## 8-byte Reload - adcq 256(%rsp), %rbx - movq 72(%rsp), %rax ## 8-byte Reload - adcq 264(%rsp), %rax - movq %r15, %rcx - adcq 272(%rsp), %rcx - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 280(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 288(%rsp), %r14 - movq %r14, %r11 - adcq $0, %r12 - subq 144(%rsp), %rsi ## 8-byte Folded Reload - movq %rbp, %rdi - sbbq 136(%rsp), %rdi ## 8-byte Folded Reload - movq %r9, %rbp - sbbq 152(%rsp), %rbp ## 8-byte Folded Reload - movq %r8, %r13 - sbbq 160(%rsp), %r13 ## 8-byte Folded Reload - movq %rbx, %r15 - sbbq 168(%rsp), %r15 ## 8-byte Folded Reload - movq %rax, %r14 - sbbq 176(%rsp), %r14 ## 8-byte Folded Reload - movq %rcx, %r10 - sbbq 184(%rsp), %r10 ## 8-byte Folded Reload - movq %rdx, %r8 - sbbq 192(%rsp), %r8 ## 8-byte Folded Reload - movq %r11, %r9 - sbbq 200(%rsp), %r9 ## 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %r11, %r9 - testb %r12b, %r12b - cmovneq (%rsp), %rsi ## 8-byte Folded Reload - movq 208(%rsp), %rdx ## 8-byte Reload - movq %rsi, (%rdx) - cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 8(%rdx) - cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 16(%rdx) - cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 24(%rdx) - cmovneq %rbx, %r15 - movq %r15, 32(%rdx) - cmovneq %rax, %r14 - movq %r14, 40(%rdx) - cmovneq %rcx, %r10 - movq %r10, 48(%rdx) - cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 56(%rdx) - movq %r9, 64(%rdx) - addq $936, %rsp ## imm = 0x3A8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addPre9Lbmi2: ## @mcl_fp_addPre9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r8 - movq 64(%rsi), %r15 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 24(%rsi), %r12 - movq 32(%rsi), %r14 - movq (%rdx), %rbx - movq 8(%rdx), %rcx - addq (%rsi), %rbx - adcq 8(%rsi), %rcx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r12 - movq 56(%rdx), %r13 - movq 48(%rdx), %rsi - movq 40(%rdx), %rbp - movq 32(%rdx), %rdx - movq %rbx, (%rdi) - movq %rcx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r12, 24(%rdi) - adcq %r14, %rdx - movq %rdx, 32(%rdi) - adcq %r11, %rbp - movq %rbp, 40(%rdi) - adcq %r10, %rsi - movq %rsi, 48(%rdi) - adcq %r9, %r13 - movq %r13, 56(%rdi) - adcq %r8, %r15 - movq %r15, 64(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_subPre9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subPre9Lbmi2: ## @mcl_fp_subPre9Lbmi2 -## BB#0: - movq 32(%rdx), %r8 - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - movq 8(%rsi), %rcx - sbbq 8(%rdx), %rcx - movq %rcx, 8(%rdi) - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq %rcx, 16(%rdi) - movq 24(%rsi), %rcx - sbbq 24(%rdx), %rcx - movq %rcx, 24(%rdi) - movq 32(%rsi), %rcx - sbbq %r8, %rcx - movq 40(%rdx), %r8 - movq %rcx, 32(%rdi) - movq 40(%rsi), %rcx - sbbq %r8, %rcx - movq 48(%rdx), %r8 - movq %rcx, 40(%rdi) - movq 48(%rsi), %rcx - sbbq %r8, %rcx - movq 56(%rdx), %r8 - movq %rcx, 48(%rdi) - movq 56(%rsi), %rcx - sbbq %r8, %rcx - movq %rcx, 56(%rdi) - movq 64(%rdx), %rcx - movq 64(%rsi), %rdx - sbbq %rcx, %rdx - movq %rdx, 64(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_shr1_9Lbmi2: ## @mcl_fp_shr1_9Lbmi2 -## BB#0: - pushq %rbx - movq 64(%rsi), %r8 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 32(%rsi), %rcx - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rbx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rbx - movq %rbx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 32(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 40(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 48(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 56(%rdi) - shrq %r8 - movq %r8, 64(%rdi) - popq %rbx - retq - - .globl _mcl_fp_add9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_add9Lbmi2: ## @mcl_fp_add9Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r12 - movq 64(%rsi), %r8 - movq 56(%rsi), %r13 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 24(%rsi), %r14 - movq 32(%rsi), %r11 - movq (%rdx), %rbx - movq 8(%rdx), %r15 - addq (%rsi), %rbx - adcq 8(%rsi), %r15 - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r14 - adcq 32(%rdx), %r11 - adcq 40(%rdx), %r10 - movq 56(%rdx), %rsi - adcq 48(%rdx), %r9 - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - adcq %r13, %rsi - movq %rsi, 56(%rdi) - adcq %r12, %r8 - movq %r8, 64(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rbx - sbbq 8(%rcx), %r15 - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r14 - sbbq 32(%rcx), %r11 - sbbq 40(%rcx), %r10 - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %rsi - sbbq 64(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne LBB136_2 -## BB#1: ## %nocarry - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %rsi, 56(%rdi) - movq %r8, 64(%rdi) -LBB136_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_addNF9Lbmi2: ## @mcl_fp_addNF9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r8 - movq 64(%rdx), %r10 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rax - movq 32(%rdx), %rdi - movq 24(%rdx), %rbp - movq 16(%rdx), %r15 - movq (%rdx), %rbx - movq 8(%rdx), %r13 - addq (%rsi), %rbx - adcq 8(%rsi), %r13 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - adcq 32(%rsi), %rdi - movq %rdi, -40(%rsp) ## 8-byte Spill - adcq 40(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r9 - movq %r9, %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - adcq 56(%rsi), %r11 - movq %r11, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq 64(%rsi), %r10 - movq %r10, %r9 - movq %rbx, %rsi - subq (%rcx), %rsi - movq %r13, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %r12 - sbbq 16(%rcx), %r12 - sbbq 24(%rcx), %rbp - movq -40(%rsp), %r14 ## 8-byte Reload - sbbq 32(%rcx), %r14 - movq -32(%rsp), %r11 ## 8-byte Reload - sbbq 40(%rcx), %r11 - movq %rdi, %r10 - sbbq 48(%rcx), %r10 - movq %rax, %rdi - sbbq 56(%rcx), %rdi - movq %r9, %rax - sbbq 64(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %rbx, %rsi - movq %rsi, (%r8) - cmovsq %r13, %rdx - movq %rdx, 8(%r8) - cmovsq %r15, %r12 - movq %r12, 16(%r8) - cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 24(%r8) - cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 32(%r8) - cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 40(%r8) - cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 48(%r8) - cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%r8) - cmovsq %r9, %rax - movq %rax, 64(%r8) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_sub9Lbmi2: ## @mcl_fp_sub9Lbmi2 -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 40(%rsi), %r14 - sbbq 40(%rdx), %r14 - movq 48(%rsi), %r15 - sbbq 48(%rdx), %r15 - movq 64(%rsi), %r8 - movq 56(%rsi), %rsi - sbbq 56(%rdx), %rsi - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - movq %r14, 40(%rdi) - movq %r15, 48(%rdi) - movq %rsi, 56(%rdi) - sbbq %r13, %r8 - movq %r8, 64(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB138_2 -## BB#1: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r9, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r10, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r11, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r12, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %r14, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r15, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %rsi, %rax - movq %rax, 56(%rdi) - movq 64(%rcx), %rax - adcq %r8, %rax - movq %rax, 64(%rdi) -LBB138_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF9Lbmi2 - .p2align 4, 0x90 -_mcl_fp_subNF9Lbmi2: ## @mcl_fp_subNF9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r10 - movq %rdi, %rbx - movq 64(%rsi), %r11 - movdqu (%rdx), %xmm1 - movdqu 16(%rdx), %xmm2 - movdqu 32(%rdx), %xmm3 - movdqu 48(%rdx), %xmm4 - pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1] - movd %xmm0, %r8 - movdqu (%rsi), %xmm5 - movdqu 16(%rsi), %xmm6 - movdqu 32(%rsi), %xmm7 - movdqu 48(%rsi), %xmm8 - pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1] - movd %xmm0, %rax - movd %xmm4, %r9 - pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1] - movd %xmm0, %rdi - pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1] - movd %xmm3, %rcx - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %rbp - pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1] - movd %xmm2, %r13 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r12 - pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] - movd %xmm1, %rsi - movd %xmm5, %r15 - subq %rsi, %r15 - movd %xmm2, %r14 - sbbq %r12, %r14 - movd %xmm6, %r12 - sbbq %r13, %r12 - movd %xmm3, %r13 - sbbq %rbp, %r13 - movd %xmm7, %rsi - sbbq %rcx, %rsi - movq %rsi, -16(%rsp) ## 8-byte Spill - movd %xmm0, %rcx - sbbq %rdi, %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - movd %xmm8, %rcx - sbbq %r9, %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - sbbq %r8, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - sbbq 64(%rdx), %r11 - movq %r11, -40(%rsp) ## 8-byte Spill - movq %r11, %rdx - sarq $63, %rdx - movq %rdx, %rbp - shldq $1, %r11, %rbp - movq 24(%r10), %r9 - andq %rbp, %r9 - movq 8(%r10), %rdi - andq %rbp, %rdi - andq (%r10), %rbp - movq 64(%r10), %r11 - andq %rdx, %r11 - rorxq $63, %rdx, %rax - andq 56(%r10), %rdx - movq 48(%r10), %r8 - andq %rax, %r8 - movq 40(%r10), %rsi - andq %rax, %rsi - movq 32(%r10), %rcx - andq %rax, %rcx - andq 16(%r10), %rax - addq %r15, %rbp - adcq %r14, %rdi - movq %rbp, (%rbx) - adcq %r12, %rax - movq %rdi, 8(%rbx) - adcq %r13, %r9 - movq %rax, 16(%rbx) - movq %r9, 24(%rbx) - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 32(%rbx) - adcq -24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 40(%rbx) - adcq -32(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 48(%rbx) - adcq -8(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rbx) - adcq -40(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 64(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add9Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_add9Lbmi2: ## @mcl_fpDbl_add9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 136(%rdx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq 120(%rdx), %r10 - movq 112(%rdx), %r11 - movq 24(%rsi), %rcx - movq 32(%rsi), %r14 - movq 16(%rdx), %rbp - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %rbp - adcq 24(%rdx), %rcx - adcq 32(%rdx), %r14 - movq 104(%rdx), %r9 - movq 96(%rdx), %r13 - movq %rax, (%rdi) - movq 88(%rdx), %r8 - movq %rbx, 8(%rdi) - movq 80(%rdx), %r12 - movq %rbp, 16(%rdi) - movq 40(%rdx), %rax - movq %rcx, 24(%rdi) - movq 40(%rsi), %rbp - adcq %rax, %rbp - movq 48(%rdx), %rcx - movq %r14, 32(%rdi) - movq 48(%rsi), %rax - adcq %rcx, %rax - movq 56(%rdx), %r14 - movq %rbp, 40(%rdi) - movq 56(%rsi), %rbp - adcq %r14, %rbp - movq 72(%rdx), %rcx - movq 64(%rdx), %rdx - movq %rax, 48(%rdi) - movq 64(%rsi), %rax - adcq %rdx, %rax - movq 136(%rsi), %rbx - movq %rbp, 56(%rdi) - movq 72(%rsi), %rbp - adcq %rcx, %rbp - movq 128(%rsi), %rcx - movq %rax, 64(%rdi) - movq 80(%rsi), %rdx - adcq %r12, %rdx - movq 88(%rsi), %r12 - adcq %r8, %r12 - movq 96(%rsi), %r14 - adcq %r13, %r14 - movq %r14, -8(%rsp) ## 8-byte Spill - movq 104(%rsi), %rax - adcq %r9, %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 120(%rsi), %rax - movq 112(%rsi), %rsi - adcq %r11, %rsi - movq %rsi, -24(%rsp) ## 8-byte Spill - adcq %r10, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -40(%rsp) ## 8-byte Spill - adcq -48(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -48(%rsp) ## 8-byte Spill - sbbq %r9, %r9 - andl $1, %r9d - movq %rbp, %r10 - subq (%r15), %r10 - movq %rdx, %r11 - sbbq 8(%r15), %r11 - movq %r12, %rbx - sbbq 16(%r15), %rbx - sbbq 24(%r15), %r14 - movq -32(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r15), %r13 - movq -24(%rsp), %rsi ## 8-byte Reload - sbbq 40(%r15), %rsi - movq -16(%rsp), %rax ## 8-byte Reload - sbbq 48(%r15), %rax - sbbq 56(%r15), %rcx - movq -48(%rsp), %r8 ## 8-byte Reload - sbbq 64(%r15), %r8 - sbbq $0, %r9 - andl $1, %r9d - cmovneq %rbp, %r10 - movq %r10, 72(%rdi) - testb %r9b, %r9b - cmovneq %rdx, %r11 - movq %r11, 80(%rdi) - cmovneq %r12, %rbx - movq %rbx, 88(%rdi) - cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 96(%rdi) - cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 104(%rdi) - cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rdi) - cmovneq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 120(%rdi) - cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 128(%rdi) - cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub9Lbmi2 - .p2align 4, 0x90 -_mcl_fpDbl_sub9Lbmi2: ## @mcl_fpDbl_sub9Lbmi2 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r14 - movq 136(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 120(%rdx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %r12 - movq 8(%rsi), %r13 - xorl %r9d, %r9d - subq (%rdx), %r12 - sbbq 8(%rdx), %r13 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %rbp - sbbq 32(%rdx), %rbp - movq 112(%rdx), %r10 - movq 104(%rdx), %rcx - movq %r12, (%rdi) - movq 96(%rdx), %rax - movq %r13, 8(%rdi) - movq 88(%rdx), %r13 - movq %r11, 16(%rdi) - movq 40(%rdx), %r11 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r11, %rbx - movq 48(%rdx), %r11 - movq %rbp, 32(%rdi) - movq 48(%rsi), %rbp - sbbq %r11, %rbp - movq 56(%rdx), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rbx - sbbq %r11, %rbx - movq 64(%rdx), %r11 - movq %rbp, 48(%rdi) - movq 64(%rsi), %rbp - sbbq %r11, %rbp - movq 80(%rdx), %r8 - movq 72(%rdx), %r11 - movq %rbx, 56(%rdi) - movq 72(%rsi), %r15 - sbbq %r11, %r15 - movq 136(%rsi), %rdx - movq %rbp, 64(%rdi) - movq 80(%rsi), %rbp - sbbq %r8, %rbp - movq 88(%rsi), %r12 - sbbq %r13, %r12 - movq 96(%rsi), %r13 - sbbq %rax, %r13 - movq 104(%rsi), %rax - sbbq %rcx, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 112(%rsi), %rax - sbbq %r10, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 128(%rsi), %rax - movq 120(%rsi), %rcx - sbbq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -40(%rsp) ## 8-byte Spill - sbbq -32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -32(%rsp) ## 8-byte Spill - sbbq -24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -24(%rsp) ## 8-byte Spill - movl $0, %r8d - sbbq $0, %r8 - andl $1, %r8d - movq (%r14), %r10 - cmoveq %r9, %r10 - testb %r8b, %r8b - movq 16(%r14), %r8 - cmoveq %r9, %r8 - movq 8(%r14), %rdx - cmoveq %r9, %rdx - movq 64(%r14), %rbx - cmoveq %r9, %rbx - movq 56(%r14), %r11 - cmoveq %r9, %r11 - movq 48(%r14), %rsi - cmoveq %r9, %rsi - movq 40(%r14), %rcx - cmoveq %r9, %rcx - movq 32(%r14), %rax - cmoveq %r9, %rax - cmovneq 24(%r14), %r9 - addq %r15, %r10 - adcq %rbp, %rdx - movq %r10, 72(%rdi) - adcq %r12, %r8 - movq %rdx, 80(%rdi) - adcq %r13, %r9 - movq %r8, 88(%rdi) - movq %r9, 96(%rdi) - adcq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 104(%rdi) - adcq -8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 112(%rdi) - adcq -40(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 120(%rdi) - adcq -32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 128(%rdi) - adcq -24(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - -.subsections_via_symbols diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.s deleted file mode 100644 index 0dc7014a3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86-64mac.s +++ /dev/null @@ -1,16313 +0,0 @@ - .section __TEXT,__text,regular,pure_instructions - .macosx_version_min 10, 12 - .globl _makeNIST_P192L - .p2align 4, 0x90 -_makeNIST_P192L: ## @makeNIST_P192L -## BB#0: - movq $-1, %rax - movq $-2, %rdx - movq $-1, %rcx - retq - - .globl _mcl_fpDbl_mod_NIST_P192L - .p2align 4, 0x90 -_mcl_fpDbl_mod_NIST_P192L: ## @mcl_fpDbl_mod_NIST_P192L -## BB#0: - pushq %r14 - pushq %rbx - movq 16(%rsi), %r10 - movq 24(%rsi), %r8 - movq 40(%rsi), %r9 - movq 8(%rsi), %rax - addq %r9, %rax - adcq $0, %r10 - sbbq %rcx, %rcx - andl $1, %ecx - movq 32(%rsi), %r11 - movq (%rsi), %r14 - addq %r8, %r14 - adcq %r11, %rax - adcq %r9, %r10 - adcq $0, %rcx - addq %r9, %r14 - adcq %r8, %rax - adcq %r11, %r10 - adcq $0, %rcx - addq %rcx, %r14 - adcq %rax, %rcx - adcq $0, %r10 - sbbq %rax, %rax - andl $1, %eax - movq %r14, %rsi - addq $1, %rsi - movq %rcx, %rdx - adcq $1, %rdx - movq %r10, %rbx - adcq $0, %rbx - adcq $-1, %rax - andl $1, %eax - cmovneq %r14, %rsi - movq %rsi, (%rdi) - testb %al, %al - cmovneq %rcx, %rdx - movq %rdx, 8(%rdi) - cmovneq %r10, %rbx - movq %rbx, 16(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_sqr_NIST_P192L - .p2align 4, 0x90 -_mcl_fp_sqr_NIST_P192L: ## @mcl_fp_sqr_NIST_P192L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %rbx - movq 8(%rsi), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, %rdi - movq %rax, %r14 - movq %rcx, %rax - mulq %rcx - movq %rdx, %r15 - movq %rax, %r12 - movq %rcx, %rax - mulq %rbx - movq %rax, %r13 - movq %rdx, %rcx - addq %rcx, %r12 - adcq %r14, %r15 - movq %rdi, %r10 - adcq $0, %r10 - movq %r11, %rax - mulq %rbx - movq %rdx, %r9 - movq %rax, %rbp - movq %rbx, %rax - mulq %rbx - movq %rax, %r8 - movq %rdx, %rsi - addq %r13, %rsi - adcq %rbp, %rcx - movq %r9, %rbx - adcq $0, %rbx - addq %r13, %rsi - adcq %r12, %rcx - adcq %r15, %rbx - adcq $0, %r10 - movq %r11, %rax - mulq %r11 - addq %r14, %r9 - adcq %rdi, %rax - adcq $0, %rdx - addq %rbp, %rcx - adcq %rbx, %r9 - adcq %r10, %rax - adcq $0, %rdx - addq %rdx, %rsi - adcq $0, %rcx - sbbq %rbp, %rbp - andl $1, %ebp - addq %r9, %r8 - adcq %rax, %rsi - adcq %rdx, %rcx - adcq $0, %rbp - addq %rdx, %r8 - adcq %r9, %rsi - adcq %rax, %rcx - adcq $0, %rbp - addq %rbp, %r8 - adcq %rsi, %rbp - adcq $0, %rcx - sbbq %rax, %rax - andl $1, %eax - movq %r8, %rdx - addq $1, %rdx - movq %rbp, %rsi - adcq $1, %rsi - movq %rcx, %rdi - adcq $0, %rdi - adcq $-1, %rax - andl $1, %eax - cmovneq %r8, %rdx - movq -8(%rsp), %rbx ## 8-byte Reload - movq %rdx, (%rbx) - testb %al, %al - cmovneq %rbp, %rsi - movq %rsi, 8(%rbx) - cmovneq %rcx, %rdi - movq %rdi, 16(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulNIST_P192L - .p2align 4, 0x90 -_mcl_fp_mulNIST_P192L: ## @mcl_fp_mulNIST_P192L -## BB#0: - pushq %r14 - pushq %rbx - subq $56, %rsp - movq %rdi, %r14 - leaq 8(%rsp), %rdi - callq _mcl_fpDbl_mulPre3L - movq 24(%rsp), %r9 - movq 32(%rsp), %r8 - movq 48(%rsp), %rdi - movq 16(%rsp), %rbx - addq %rdi, %rbx - adcq $0, %r9 - sbbq %rcx, %rcx - andl $1, %ecx - movq 40(%rsp), %rsi - movq 8(%rsp), %rdx - addq %r8, %rdx - adcq %rsi, %rbx - adcq %rdi, %r9 - adcq $0, %rcx - addq %rdi, %rdx - adcq %r8, %rbx - adcq %rsi, %r9 - adcq $0, %rcx - addq %rcx, %rdx - adcq %rbx, %rcx - adcq $0, %r9 - sbbq %rsi, %rsi - andl $1, %esi - movq %rdx, %rdi - addq $1, %rdi - movq %rcx, %rbx - adcq $1, %rbx - movq %r9, %rax - adcq $0, %rax - adcq $-1, %rsi - andl $1, %esi - cmovneq %rdx, %rdi - movq %rdi, (%r14) - testb %sil, %sil - cmovneq %rcx, %rbx - movq %rbx, 8(%r14) - cmovneq %r9, %rax - movq %rax, 16(%r14) - addq $56, %rsp - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mod_NIST_P521L - .p2align 4, 0x90 -_mcl_fpDbl_mod_NIST_P521L: ## @mcl_fpDbl_mod_NIST_P521L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 120(%rsi), %r9 - movq 128(%rsi), %r14 - movq %r14, %r8 - shldq $55, %r9, %r8 - movq 112(%rsi), %r10 - shldq $55, %r10, %r9 - movq 104(%rsi), %r11 - shldq $55, %r11, %r10 - movq 96(%rsi), %r15 - shldq $55, %r15, %r11 - movq 88(%rsi), %r12 - shldq $55, %r12, %r15 - movq 80(%rsi), %rcx - shldq $55, %rcx, %r12 - movq 64(%rsi), %rbx - movq 72(%rsi), %rax - shldq $55, %rax, %rcx - shrq $9, %r14 - shldq $55, %rbx, %rax - ## kill: %EBX %EBX %RBX %RBX - andl $511, %ebx ## imm = 0x1FF - addq (%rsi), %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r10 - adcq 48(%rsi), %r9 - adcq 56(%rsi), %r8 - adcq %r14, %rbx - movl %ebx, %esi - shrl $9, %esi - andl $1, %esi - addq %rax, %rsi - adcq $0, %rcx - adcq $0, %r12 - adcq $0, %r15 - adcq $0, %r11 - adcq $0, %r10 - adcq $0, %r9 - adcq $0, %r8 - adcq $0, %rbx - movq %rsi, %rax - andq %r12, %rax - andq %r15, %rax - andq %r11, %rax - andq %r10, %rax - andq %r9, %rax - andq %r8, %rax - movq %rbx, %rdx - orq $-512, %rdx ## imm = 0xFE00 - andq %rax, %rdx - andq %rcx, %rdx - cmpq $-1, %rdx - je LBB4_1 -## BB#3: ## %nonzero - movq %rsi, (%rdi) - movq %rcx, 8(%rdi) - movq %r12, 16(%rdi) - movq %r15, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) - andl $511, %ebx ## imm = 0x1FF - movq %rbx, 64(%rdi) - jmp LBB4_2 -LBB4_1: ## %zero - movq $0, 64(%rdi) - movq $0, 56(%rdi) - movq $0, 48(%rdi) - movq $0, 40(%rdi) - movq $0, 32(%rdi) - movq $0, 24(%rdi) - movq $0, 16(%rdi) - movq $0, 8(%rdi) - movq $0, (%rdi) -LBB4_2: ## %zero - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre1L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre1L: ## @mcl_fp_mulUnitPre1L -## BB#0: - movq %rdx, %rax - mulq (%rsi) - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fpDbl_mulPre1L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre1L: ## @mcl_fpDbl_mulPre1L -## BB#0: - movq (%rdx), %rax - mulq (%rsi) - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fpDbl_sqrPre1L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre1L: ## @mcl_fpDbl_sqrPre1L -## BB#0: - movq (%rsi), %rax - mulq %rax - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fp_mont1L - .p2align 4, 0x90 -_mcl_fp_mont1L: ## @mcl_fp_mont1L -## BB#0: - movq (%rsi), %rax - mulq (%rdx) - movq %rax, %rsi - movq %rdx, %r8 - movq -8(%rcx), %rax - imulq %rsi, %rax - movq (%rcx), %rcx - mulq %rcx - addq %rsi, %rax - adcq %r8, %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq %rcx, %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, (%rdi) - retq - - .globl _mcl_fp_montNF1L - .p2align 4, 0x90 -_mcl_fp_montNF1L: ## @mcl_fp_montNF1L -## BB#0: - movq (%rsi), %rax - mulq (%rdx) - movq %rax, %rsi - movq %rdx, %r8 - movq -8(%rcx), %rax - imulq %rsi, %rax - movq (%rcx), %rcx - mulq %rcx - addq %rsi, %rax - adcq %r8, %rdx - movq %rdx, %rax - subq %rcx, %rax - cmovsq %rdx, %rax - movq %rax, (%rdi) - retq - - .globl _mcl_fp_montRed1L - .p2align 4, 0x90 -_mcl_fp_montRed1L: ## @mcl_fp_montRed1L -## BB#0: - movq (%rsi), %rcx - movq -8(%rdx), %rax - imulq %rcx, %rax - movq (%rdx), %r8 - mulq %r8 - addq %rcx, %rax - adcq 8(%rsi), %rdx - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rcx - subq %r8, %rcx - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rcx - movq %rcx, (%rdi) - retq - - .globl _mcl_fp_addPre1L - .p2align 4, 0x90 -_mcl_fp_addPre1L: ## @mcl_fp_addPre1L -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre1L - .p2align 4, 0x90 -_mcl_fp_subPre1L: ## @mcl_fp_subPre1L -## BB#0: - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_1L - .p2align 4, 0x90 -_mcl_fp_shr1_1L: ## @mcl_fp_shr1_1L -## BB#0: - movq (%rsi), %rax - shrq %rax - movq %rax, (%rdi) - retq - - .globl _mcl_fp_add1L - .p2align 4, 0x90 -_mcl_fp_add1L: ## @mcl_fp_add1L -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, (%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rax - sbbq $0, %rdx - testb $1, %dl - jne LBB14_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) -LBB14_2: ## %carry - retq - - .globl _mcl_fp_addNF1L - .p2align 4, 0x90 -_mcl_fp_addNF1L: ## @mcl_fp_addNF1L -## BB#0: - movq (%rdx), %rax - addq (%rsi), %rax - movq %rax, %rdx - subq (%rcx), %rdx - cmovsq %rax, %rdx - movq %rdx, (%rdi) - retq - - .globl _mcl_fp_sub1L - .p2align 4, 0x90 -_mcl_fp_sub1L: ## @mcl_fp_sub1L -## BB#0: - movq (%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rax - movq %rax, (%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB16_2 -## BB#1: ## %nocarry - retq -LBB16_2: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - retq - - .globl _mcl_fp_subNF1L - .p2align 4, 0x90 -_mcl_fp_subNF1L: ## @mcl_fp_subNF1L -## BB#0: - movq (%rsi), %rax - subq (%rdx), %rax - movq %rax, %rdx - sarq $63, %rdx - andq (%rcx), %rdx - addq %rax, %rdx - movq %rdx, (%rdi) - retq - - .globl _mcl_fpDbl_add1L - .p2align 4, 0x90 -_mcl_fpDbl_add1L: ## @mcl_fpDbl_add1L -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - sbbq %rax, %rax - andl $1, %eax - movq %rdx, %rsi - subq (%rcx), %rsi - sbbq $0, %rax - testb $1, %al - cmovneq %rdx, %rsi - movq %rsi, 8(%rdi) - retq - - .globl _mcl_fpDbl_sub1L - .p2align 4, 0x90 -_mcl_fpDbl_sub1L: ## @mcl_fpDbl_sub1L -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movl $0, %eax - sbbq $0, %rax - testb $1, %al - cmovneq (%rcx), %rsi - addq %r8, %rsi - movq %rsi, 8(%rdi) - retq - - .globl _mcl_fp_mulUnitPre2L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre2L: ## @mcl_fp_mulUnitPre2L -## BB#0: - movq %rdx, %r8 - movq %r8, %rax - mulq 8(%rsi) - movq %rdx, %rcx - movq %rax, %r9 - movq %r8, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r9, %rdx - movq %rdx, 8(%rdi) - adcq $0, %rcx - movq %rcx, 16(%rdi) - retq - - .globl _mcl_fpDbl_mulPre2L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre2L: ## @mcl_fpDbl_mulPre2L -## BB#0: - pushq %r14 - pushq %rbx - movq %rdx, %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%r10), %rcx - movq %r8, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %rsi - addq %r9, %rsi - adcq $0, %r14 - movq 8(%r10), %rbx - movq %r11, %rax - mulq %rbx - movq %rdx, %r9 - movq %rax, %rcx - movq %r8, %rax - mulq %rbx - addq %rsi, %rax - movq %rax, 8(%rdi) - adcq %r14, %rcx - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rcx - movq %rcx, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_sqrPre2L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre2L: ## @mcl_fpDbl_sqrPre2L -## BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %r8 - movq %rcx, %rax - mulq %rcx - movq %rdx, %rsi - movq %rax, (%rdi) - movq %r8, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, %r10 - addq %r10, %rsi - movq %r9, %rcx - adcq $0, %rcx - movq %r8, %rax - mulq %r8 - addq %r10, %rsi - movq %rsi, 8(%rdi) - adcq %rcx, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r9, %rax - movq %rax, 16(%rdi) - adcq %rdx, %rcx - movq %rcx, 24(%rdi) - retq - - .globl _mcl_fp_mont2L - .p2align 4, 0x90 -_mcl_fp_mont2L: ## @mcl_fp_mont2L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%rdx), %rsi - movq 8(%rdx), %r9 - movq %r11, %rax - mulq %rsi - movq %rdx, %r15 - movq %rax, %r10 - movq %r8, %rax - mulq %rsi - movq %rax, %r14 - movq %rdx, %r13 - addq %r10, %r13 - adcq $0, %r15 - movq -8(%rcx), %r10 - movq (%rcx), %rbp - movq %r14, %rsi - imulq %r10, %rsi - movq 8(%rcx), %rdi - movq %rsi, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq %rbp - movq %rdx, %rbx - addq %r12, %rbx - adcq $0, %rcx - addq %r14, %rax - adcq %r13, %rbx - adcq %r15, %rcx - sbbq %r15, %r15 - andl $1, %r15d - movq %r9, %rax - mulq %r11 - movq %rdx, %r14 - movq %rax, %r11 - movq %r9, %rax - mulq %r8 - movq %rax, %r8 - movq %rdx, %rsi - addq %r11, %rsi - adcq $0, %r14 - addq %rbx, %r8 - adcq %rcx, %rsi - adcq %r15, %r14 - sbbq %rbx, %rbx - andl $1, %ebx - imulq %r8, %r10 - movq %r10, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %r9 - movq %r10, %rax - mulq %rbp - addq %r9, %rdx - adcq $0, %rcx - addq %r8, %rax - adcq %rsi, %rdx - adcq %r14, %rcx - adcq $0, %rbx - movq %rdx, %rax - subq %rbp, %rax - movq %rcx, %rsi - sbbq %rdi, %rsi - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rcx, %rsi - testb %bl, %bl - cmovneq %rdx, %rax - movq -8(%rsp), %rcx ## 8-byte Reload - movq %rax, (%rcx) - movq %rsi, 8(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF2L - .p2align 4, 0x90 -_mcl_fp_montNF2L: ## @mcl_fp_montNF2L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%rsi), %r8 - movq 8(%rsi), %r11 - movq (%rdx), %rbp - movq 8(%rdx), %r9 - movq %r8, %rax - mulq %rbp - movq %rax, %rsi - movq %rdx, %r14 - movq -8(%rcx), %r10 - movq (%rcx), %r15 - movq %rsi, %rbx - imulq %r10, %rbx - movq 8(%rcx), %rdi - movq %rbx, %rax - mulq %rdi - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq %r15 - movq %rdx, %r12 - movq %rax, %rbx - movq %r11, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, %rbp - addq %r14, %rbp - adcq $0, %rcx - addq %rsi, %rbx - adcq %r13, %rbp - adcq $0, %rcx - addq %r12, %rbp - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - movq %r9, %rax - mulq %r11 - movq %rdx, %rsi - movq %rax, %r11 - movq %r9, %rax - mulq %r8 - movq %rax, %r8 - movq %rdx, %rbx - addq %r11, %rbx - adcq $0, %rsi - addq %rbp, %r8 - adcq %rcx, %rbx - adcq $0, %rsi - imulq %r8, %r10 - movq %r10, %rax - mulq %rdi - movq %rdx, %rcx - movq %rax, %rbp - movq %r10, %rax - mulq %r15 - addq %r8, %rax - adcq %rbx, %rbp - adcq $0, %rsi - addq %rdx, %rbp - adcq %rcx, %rsi - movq %rbp, %rax - subq %r15, %rax - movq %rsi, %rcx - sbbq %rdi, %rcx - cmovsq %rbp, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovsq %rsi, %rcx - movq %rcx, 8(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed2L - .p2align 4, 0x90 -_mcl_fp_montRed2L: ## @mcl_fp_montRed2L -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq -8(%rdx), %r9 - movq (%rdx), %r11 - movq (%rsi), %rbx - movq %rbx, %rcx - imulq %r9, %rcx - movq 8(%rdx), %r14 - movq %rcx, %rax - mulq %r14 - movq %rdx, %r8 - movq %rax, %r10 - movq %rcx, %rax - mulq %r11 - movq %rdx, %rcx - addq %r10, %rcx - adcq $0, %r8 - movq 24(%rsi), %r15 - addq %rbx, %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r8 - adcq $0, %r15 - sbbq %rbx, %rbx - andl $1, %ebx - imulq %rcx, %r9 - movq %r9, %rax - mulq %r14 - movq %rdx, %rsi - movq %rax, %r10 - movq %r9, %rax - mulq %r11 - addq %r10, %rdx - adcq $0, %rsi - addq %rcx, %rax - adcq %r8, %rdx - adcq %r15, %rsi - adcq $0, %rbx - movq %rdx, %rax - subq %r11, %rax - movq %rsi, %rcx - sbbq %r14, %rcx - sbbq $0, %rbx - andl $1, %ebx - cmovneq %rsi, %rcx - testb %bl, %bl - cmovneq %rdx, %rax - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addPre2L - .p2align 4, 0x90 -_mcl_fp_addPre2L: ## @mcl_fp_addPre2L -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rcx - addq (%rsi), %rax - adcq 8(%rsi), %rcx - movq %rax, (%rdi) - movq %rcx, 8(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre2L - .p2align 4, 0x90 -_mcl_fp_subPre2L: ## @mcl_fp_subPre2L -## BB#0: - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_2L - .p2align 4, 0x90 -_mcl_fp_shr1_2L: ## @mcl_fp_shr1_2L -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %rcx - shrdq $1, %rcx, %rax - movq %rax, (%rdi) - shrq %rcx - movq %rcx, 8(%rdi) - retq - - .globl _mcl_fp_add2L - .p2align 4, 0x90 -_mcl_fp_add2L: ## @mcl_fp_add2L -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq $0, %rsi - testb $1, %sil - jne LBB29_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) -LBB29_2: ## %carry - retq - - .globl _mcl_fp_addNF2L - .p2align 4, 0x90 -_mcl_fp_addNF2L: ## @mcl_fp_addNF2L -## BB#0: - movq (%rdx), %rax - movq 8(%rdx), %r8 - addq (%rsi), %rax - adcq 8(%rsi), %r8 - movq %rax, %rsi - subq (%rcx), %rsi - movq %r8, %rdx - sbbq 8(%rcx), %rdx - testq %rdx, %rdx - cmovsq %rax, %rsi - movq %rsi, (%rdi) - cmovsq %r8, %rdx - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fp_sub2L - .p2align 4, 0x90 -_mcl_fp_sub2L: ## @mcl_fp_sub2L -## BB#0: - movq (%rsi), %rax - movq 8(%rsi), %r8 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r8 - movq %rax, (%rdi) - movq %r8, 8(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB31_2 -## BB#1: ## %nocarry - retq -LBB31_2: ## %carry - movq 8(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r8, %rdx - movq %rdx, 8(%rdi) - retq - - .globl _mcl_fp_subNF2L - .p2align 4, 0x90 -_mcl_fp_subNF2L: ## @mcl_fp_subNF2L -## BB#0: - movq (%rsi), %r8 - movq 8(%rsi), %rsi - subq (%rdx), %r8 - sbbq 8(%rdx), %rsi - movq %rsi, %rdx - sarq $63, %rdx - movq 8(%rcx), %rax - andq %rdx, %rax - andq (%rcx), %rdx - addq %r8, %rdx - movq %rdx, (%rdi) - adcq %rsi, %rax - movq %rax, 8(%rdi) - retq - - .globl _mcl_fpDbl_add2L - .p2align 4, 0x90 -_mcl_fpDbl_add2L: ## @mcl_fpDbl_add2L -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - adcq %r8, %r9 - sbbq %rax, %rax - andl $1, %eax - movq %r10, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - sbbq $0, %rax - andl $1, %eax - cmovneq %r10, %rdx - movq %rdx, 16(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 24(%rdi) - retq - - .globl _mcl_fpDbl_sub2L - .p2align 4, 0x90 -_mcl_fpDbl_sub2L: ## @mcl_fpDbl_sub2L -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %r11 - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %r11 - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %r11, (%rdi) - movq %rsi, 8(%rdi) - sbbq %r8, %r9 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - cmovneq 8(%rcx), %rax - addq %r10, %rsi - movq %rsi, 16(%rdi) - adcq %r9, %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fp_mulUnitPre3L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre3L: ## @mcl_fp_mulUnitPre3L -## BB#0: - movq %rdx, %rcx - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r9, %r10 - movq %r10, 16(%rdi) - adcq $0, %r8 - movq %r8, 24(%rdi) - retq - - .globl _mcl_fpDbl_mulPre3L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre3L: ## @mcl_fpDbl_mulPre3L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - movq (%r10), %rbx - movq %r8, %rax - mulq %rbx - movq %rdx, %rcx - movq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rbx - movq %rdx, %r14 - movq %rax, %rsi - movq %r9, %rax - mulq %rbx - movq %rdx, %r15 - movq %rax, %rbx - addq %rcx, %rbx - adcq %rsi, %r15 - adcq $0, %r14 - movq 8(%r10), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %rbp - movq %r9, %rax - mulq %rcx - movq %rdx, %r13 - movq %rax, %rsi - movq %r8, %rax - mulq %rcx - addq %rbx, %rax - movq %rax, 8(%rdi) - adcq %r15, %rsi - adcq %r14, %rbp - sbbq %r14, %r14 - andl $1, %r14d - addq %rdx, %rsi - adcq %r13, %rbp - adcq %r12, %r14 - movq 16(%r10), %r15 - movq %r11, %rax - mulq %r15 - movq %rdx, %r10 - movq %rax, %rbx - movq %r9, %rax - mulq %r15 - movq %rdx, %r9 - movq %rax, %rcx - movq %r8, %rax - mulq %r15 - addq %rsi, %rax - movq %rax, 16(%rdi) - adcq %rbp, %rcx - adcq %r14, %rbx - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rbx - movq %rbx, 32(%rdi) - adcq %r10, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre3L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre3L: ## @mcl_fpDbl_sqrPre3L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - movq %rcx, %rax - mulq %rcx - movq %rdx, %rbx - movq %rax, (%rdi) - movq %r10, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %r11 - movq %rsi, %rax - mulq %rcx - movq %rdx, %r14 - movq %rax, %r12 - addq %r12, %rbx - movq %r14, %r13 - adcq %r11, %r13 - movq %r8, %rcx - adcq $0, %rcx - movq %r10, %rax - mulq %rsi - movq %rdx, %r9 - movq %rax, %r15 - movq %rsi, %rax - mulq %rsi - movq %rax, %rsi - addq %r12, %rbx - movq %rbx, 8(%rdi) - adcq %r13, %rsi - adcq %r15, %rcx - sbbq %rbx, %rbx - andl $1, %ebx - addq %r14, %rsi - adcq %rdx, %rcx - adcq %r9, %rbx - movq %r10, %rax - mulq %r10 - addq %r11, %rsi - movq %rsi, 16(%rdi) - adcq %r15, %rcx - adcq %rbx, %rax - sbbq %rsi, %rsi - andl $1, %esi - addq %r8, %rcx - movq %rcx, 24(%rdi) - adcq %r9, %rax - movq %rax, 32(%rdi) - adcq %rdx, %rsi - movq %rsi, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mont3L - .p2align 4, 0x90 -_mcl_fp_mont3L: ## @mcl_fp_mont3L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 16(%rsi), %r10 - movq (%rdx), %rdi - movq %rdx, %r11 - movq %r11, -16(%rsp) ## 8-byte Spill - movq %r10, %rax - movq %r10, -24(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %rbx - movq %rdx, %r15 - movq (%rsi), %rbp - movq %rbp, -64(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %r12 - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, %r8 - movq %rdx, %r13 - addq %rsi, %r13 - adcq %rbx, %r12 - adcq $0, %r15 - movq -8(%rcx), %r14 - movq %r8, %rbp - imulq %r14, %rbp - movq 16(%rcx), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r9 - movq %rdx, %rbx - movq (%rcx), %rdi - movq %rdi, -40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -48(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rcx - movq %rdx, %rsi - movq %rax, %rcx - movq %rbp, %rax - mulq %rdi - movq %rdx, %rbp - addq %rcx, %rbp - adcq %r9, %rsi - adcq $0, %rbx - addq %r8, %rax - adcq %r13, %rbp - movq 8(%r11), %rcx - adcq %r12, %rsi - adcq %r15, %rbx - sbbq %rdi, %rdi - andl $1, %edi - movq %rcx, %rax - mulq %r10 - movq %rdx, %r15 - movq %rax, %r8 - movq %rcx, %rax - movq -32(%rsp), %r10 ## 8-byte Reload - mulq %r10 - movq %rdx, %r12 - movq %rax, %r9 - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rcx - addq %r9, %rcx - adcq %r8, %r12 - adcq $0, %r15 - addq %rbp, %r13 - adcq %rsi, %rcx - adcq %rbx, %r12 - adcq %rdi, %r15 - sbbq %r11, %r11 - andl $1, %r11d - movq %r13, %rdi - imulq %r14, %rdi - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r9 - movq %rdi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - addq %r9, %rbp - adcq %r8, %rsi - adcq $0, %rbx - addq %r13, %rax - adcq %rcx, %rbp - adcq %r12, %rsi - adcq %r15, %rbx - adcq $0, %r11 - movq -16(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rcx - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r15 - movq %rcx, %rax - mulq %r10 - movq %rdx, %r10 - movq %rax, %rdi - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rcx - addq %rdi, %rcx - adcq %r15, %r10 - adcq $0, %r8 - addq %rbp, %r9 - adcq %rsi, %rcx - adcq %rbx, %r10 - adcq %r11, %r8 - sbbq %rdi, %rdi - andl $1, %edi - imulq %r9, %r14 - movq %r14, %rax - movq -56(%rsp), %r15 ## 8-byte Reload - mulq %r15 - movq %rdx, %rbx - movq %rax, %r11 - movq %r14, %rax - movq -48(%rsp), %r12 ## 8-byte Reload - mulq %r12 - movq %rdx, %rsi - movq %rax, %r13 - movq %r14, %rax - movq -40(%rsp), %rbp ## 8-byte Reload - mulq %rbp - addq %r13, %rdx - adcq %r11, %rsi - adcq $0, %rbx - addq %r9, %rax - adcq %rcx, %rdx - adcq %r10, %rsi - adcq %r8, %rbx - adcq $0, %rdi - movq %rdx, %rax - subq %rbp, %rax - movq %rsi, %rcx - sbbq %r12, %rcx - movq %rbx, %rbp - sbbq %r15, %rbp - sbbq $0, %rdi - andl $1, %edi - cmovneq %rbx, %rbp - testb %dil, %dil - cmovneq %rdx, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rsi, %rcx - movq %rcx, 8(%rdx) - movq %rbp, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF3L - .p2align 4, 0x90 -_mcl_fp_montNF3L: ## @mcl_fp_montNF3L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r10 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 16(%rsi), %r11 - movq (%r10), %rbp - movq %r10, -16(%rsp) ## 8-byte Spill - movq %r11, %rax - movq %r11, -24(%rsp) ## 8-byte Spill - mulq %rbp - movq %rax, %r14 - movq %rdx, %r15 - movq (%rsi), %rbx - movq %rbx, -48(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %rbp - movq %rdx, %rdi - movq %rax, %r8 - movq %rbx, %rax - mulq %rbp - movq %rax, %r13 - movq %rdx, %rbp - addq %r8, %rbp - adcq %r14, %rdi - adcq $0, %r15 - movq -8(%rcx), %r14 - movq %r13, %rbx - imulq %r14, %rbx - movq 16(%rcx), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r12 - movq %rdx, %r8 - movq (%rcx), %rsi - movq %rsi, -32(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -40(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, %rcx - movq %rbx, %rax - mulq %rsi - addq %r13, %rax - adcq %rbp, %rcx - adcq %rdi, %r12 - adcq $0, %r15 - addq %rdx, %rcx - movq 8(%r10), %rbp - adcq %r9, %r12 - adcq %r8, %r15 - movq %rbp, %rax - mulq %r11 - movq %rdx, %rsi - movq %rax, %r8 - movq %rbp, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rbp, %rax - movq -48(%rsp), %r10 ## 8-byte Reload - mulq %r10 - movq %rax, %r13 - movq %rdx, %rbp - addq %r9, %rbp - adcq %r8, %rbx - adcq $0, %rsi - addq %rcx, %r13 - adcq %r12, %rbp - adcq %r15, %rbx - adcq $0, %rsi - movq %r13, %rcx - imulq %r14, %rcx - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r15 - movq %rcx, %rax - movq -40(%rsp), %rdi ## 8-byte Reload - mulq %rdi - movq %rdx, %r9 - movq %rax, %r12 - movq %rcx, %rax - movq -32(%rsp), %r11 ## 8-byte Reload - mulq %r11 - addq %r13, %rax - adcq %rbp, %r12 - adcq %rbx, %r15 - adcq $0, %rsi - addq %rdx, %r12 - adcq %r9, %r15 - adcq %r8, %rsi - movq -16(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rbx - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r8 - movq %rbx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r9 - movq %rbx, %rax - mulq %r10 - movq %rax, %r10 - movq %rdx, %rbx - addq %r9, %rbx - adcq %r8, %rcx - adcq $0, %rbp - addq %r12, %r10 - adcq %r15, %rbx - adcq %rsi, %rcx - adcq $0, %rbp - imulq %r10, %r14 - movq %r14, %rax - movq -56(%rsp), %r15 ## 8-byte Reload - mulq %r15 - movq %rdx, %r8 - movq %rax, %rsi - movq %r14, %rax - movq %rdi, %r12 - mulq %r12 - movq %rdx, %r9 - movq %rax, %rdi - movq %r14, %rax - mulq %r11 - addq %r10, %rax - adcq %rbx, %rdi - adcq %rcx, %rsi - adcq $0, %rbp - addq %rdx, %rdi - adcq %r9, %rsi - adcq %r8, %rbp - movq %rdi, %rax - subq %r11, %rax - movq %rsi, %rcx - sbbq %r12, %rcx - movq %rbp, %rbx - sbbq %r15, %rbx - movq %rbx, %rdx - sarq $63, %rdx - cmovsq %rdi, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovsq %rsi, %rcx - movq %rcx, 8(%rdx) - cmovsq %rbp, %rbx - movq %rbx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed3L - .p2align 4, 0x90 -_mcl_fp_montRed3L: ## @mcl_fp_montRed3L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %r9 - movq (%rcx), %rdi - movq (%rsi), %r15 - movq %r15, %rbx - imulq %r9, %rbx - movq 16(%rcx), %rbp - movq %rbx, %rax - mulq %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rdx, %r8 - movq 8(%rcx), %rcx - movq %rbx, %rax - mulq %rcx - movq %rcx, %r12 - movq %r12, -32(%rsp) ## 8-byte Spill - movq %rdx, %r10 - movq %rax, %r14 - movq %rbx, %rax - mulq %rdi - movq %rdi, %rbx - movq %rbx, -16(%rsp) ## 8-byte Spill - movq %rdx, %rcx - addq %r14, %rcx - adcq %r11, %r10 - adcq $0, %r8 - movq 40(%rsi), %rdi - movq 32(%rsi), %r13 - addq %r15, %rax - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r8 - adcq $0, %r13 - adcq $0, %rdi - sbbq %r15, %r15 - andl $1, %r15d - movq %rcx, %rsi - imulq %r9, %rsi - movq %rsi, %rax - mulq %rbp - movq %rdx, %r11 - movq %rax, %rbp - movq %rsi, %rax - mulq %r12 - movq %rdx, %r14 - movq %rax, %r12 - movq %rsi, %rax - mulq %rbx - movq %rdx, %rbx - addq %r12, %rbx - adcq %rbp, %r14 - adcq $0, %r11 - addq %rcx, %rax - adcq %r10, %rbx - adcq %r8, %r14 - adcq %r13, %r11 - adcq $0, %rdi - adcq $0, %r15 - imulq %rbx, %r9 - movq %r9, %rax - movq -24(%rsp), %r12 ## 8-byte Reload - mulq %r12 - movq %rdx, %rbp - movq %rax, %r8 - movq %r9, %rax - movq -32(%rsp), %r13 ## 8-byte Reload - mulq %r13 - movq %rdx, %rsi - movq %rax, %r10 - movq %r9, %rax - movq -16(%rsp), %rcx ## 8-byte Reload - mulq %rcx - addq %r10, %rdx - adcq %r8, %rsi - adcq $0, %rbp - addq %rbx, %rax - adcq %r14, %rdx - adcq %r11, %rsi - adcq %rdi, %rbp - adcq $0, %r15 - movq %rdx, %rax - subq %rcx, %rax - movq %rsi, %rdi - sbbq %r13, %rdi - movq %rbp, %rcx - sbbq %r12, %rcx - sbbq $0, %r15 - andl $1, %r15d - cmovneq %rbp, %rcx - testb %r15b, %r15b - cmovneq %rdx, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rsi, %rdi - movq %rdi, 8(%rdx) - movq %rcx, 16(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre3L - .p2align 4, 0x90 -_mcl_fp_addPre3L: ## @mcl_fp_addPre3L -## BB#0: - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre3L - .p2align 4, 0x90 -_mcl_fp_subPre3L: ## @mcl_fp_subPre3L -## BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r8 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_3L - .p2align 4, 0x90 -_mcl_fp_shr1_3L: ## @mcl_fp_shr1_3L -## BB#0: - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rdx - shrdq $1, %rdx, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rdx - movq %rdx, 8(%rdi) - shrq %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fp_add3L - .p2align 4, 0x90 -_mcl_fp_add3L: ## @mcl_fp_add3L -## BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r8 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB44_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) -LBB44_2: ## %carry - retq - - .globl _mcl_fp_addNF3L - .p2align 4, 0x90 -_mcl_fp_addNF3L: ## @mcl_fp_addNF3L -## BB#0: - movq 16(%rdx), %r8 - movq (%rdx), %r10 - movq 8(%rdx), %r9 - addq (%rsi), %r10 - adcq 8(%rsi), %r9 - adcq 16(%rsi), %r8 - movq %r10, %rsi - subq (%rcx), %rsi - movq %r9, %rdx - sbbq 8(%rcx), %rdx - movq %r8, %rax - sbbq 16(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r10, %rsi - movq %rsi, (%rdi) - cmovsq %r9, %rdx - movq %rdx, 8(%rdi) - cmovsq %r8, %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fp_sub3L - .p2align 4, 0x90 -_mcl_fp_sub3L: ## @mcl_fp_sub3L -## BB#0: - movq 16(%rsi), %r8 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r8 - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r8, 16(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB46_2 -## BB#1: ## %nocarry - retq -LBB46_2: ## %carry - movq 8(%rcx), %rdx - movq 16(%rcx), %rsi - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r8, %rsi - movq %rsi, 16(%rdi) - retq - - .globl _mcl_fp_subNF3L - .p2align 4, 0x90 -_mcl_fp_subNF3L: ## @mcl_fp_subNF3L -## BB#0: - movq 16(%rsi), %r10 - movq (%rsi), %r8 - movq 8(%rsi), %r9 - subq (%rdx), %r8 - sbbq 8(%rdx), %r9 - sbbq 16(%rdx), %r10 - movq %r10, %rdx - sarq $63, %rdx - movq %rdx, %rsi - shldq $1, %r10, %rsi - andq (%rcx), %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - andq 8(%rcx), %rdx - addq %r8, %rsi - movq %rsi, (%rdi) - adcq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rax - movq %rax, 16(%rdi) - retq - - .globl _mcl_fpDbl_add3L - .p2align 4, 0x90 -_mcl_fpDbl_add3L: ## @mcl_fpDbl_add3L -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r9 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %r15 - adcq %r11, %r9 - adcq %r10, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %r15, %rdx - subq (%rcx), %rdx - movq %r9, %rsi - sbbq 8(%rcx), %rsi - movq %r8, %rbx - sbbq 16(%rcx), %rbx - sbbq $0, %rax - andl $1, %eax - cmovneq %r15, %rdx - movq %rdx, 24(%rdi) - testb %al, %al - cmovneq %r9, %rsi - movq %rsi, 32(%rdi) - cmovneq %r8, %rbx - movq %rbx, 40(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_sub3L - .p2align 4, 0x90 -_mcl_fpDbl_sub3L: ## @mcl_fpDbl_sub3L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r10 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %rax - xorl %esi, %esi - subq (%rdx), %rbx - sbbq 8(%rdx), %rax - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r14 - movq %rbx, (%rdi) - movq %rax, 8(%rdi) - movq %r14, 16(%rdi) - sbbq %r15, %r11 - sbbq %r12, %r9 - sbbq %r10, %r8 - movl $0, %eax - sbbq $0, %rax - andl $1, %eax - movq (%rcx), %rdx - cmoveq %rsi, %rdx - testb %al, %al - movq 16(%rcx), %rax - cmoveq %rsi, %rax - cmovneq 8(%rcx), %rsi - addq %r11, %rdx - movq %rdx, 24(%rdi) - adcq %r9, %rsi - movq %rsi, 32(%rdi) - adcq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre4L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre4L: ## @mcl_fp_mulUnitPre4L -## BB#0: - pushq %r14 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %r14 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r14, %rdx - movq %rdx, 8(%rdi) - adcq %r11, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r10 - movq %r10, 24(%rdi) - adcq $0, %r8 - movq %r8, 32(%rdi) - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mulPre4L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre4L: ## @mcl_fpDbl_mulPre4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq (%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - movq 8(%rsi), %r8 - movq %r8, -56(%rsp) ## 8-byte Spill - movq (%rdx), %rbx - movq %rdx, %rbp - mulq %rbx - movq %rdx, %r15 - movq 16(%rsi), %rcx - movq 24(%rsi), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rbx - movq %rdx, %r12 - movq %rax, %r14 - movq %rcx, %rax - movq %rcx, -16(%rsp) ## 8-byte Spill - mulq %rbx - movq %rdx, %r10 - movq %rax, %r9 - movq %r8, %rax - mulq %rbx - movq %rdx, %r13 - movq %rax, %r8 - addq %r15, %r8 - adcq %r9, %r13 - adcq %r14, %r10 - adcq $0, %r12 - movq %rbp, %r9 - movq %r9, -8(%rsp) ## 8-byte Spill - movq 8(%r9), %rbp - movq %r11, %rax - mulq %rbp - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %rcx, %rax - mulq %rbp - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq -56(%rsp), %r14 ## 8-byte Reload - movq %r14, %rax - mulq %rbp - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -48(%rsp) ## 8-byte Spill - addq %r8, %rax - movq %rax, 8(%rdi) - adcq %r13, %rbx - adcq %r10, %rcx - adcq %r12, %r15 - sbbq %r13, %r13 - movq 16(%r9), %rbp - movq %r14, %rax - mulq %rbp - movq %rax, %r12 - movq %rdx, %r14 - andl $1, %r13d - addq -48(%rsp), %rbx ## 8-byte Folded Reload - adcq -40(%rsp), %rcx ## 8-byte Folded Reload - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq -24(%rsp), %r13 ## 8-byte Folded Reload - movq %r11, %rax - mulq %rbp - movq %rdx, %r8 - movq %rax, %r11 - movq -16(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, %r9 - movq %rax, %r10 - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rbp - addq %rbx, %rax - movq %rax, 16(%rdi) - adcq %r12, %rcx - adcq %r15, %r10 - adcq %r13, %r11 - sbbq %r13, %r13 - andl $1, %r13d - addq %rdx, %rcx - adcq %r14, %r10 - adcq %r9, %r11 - adcq %r8, %r13 - movq -8(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rbx - movq %rbx, %rax - mulq 24(%rsi) - movq %rdx, %r8 - movq %rax, %r14 - movq %rbx, %rax - mulq 16(%rsi) - movq %rdx, %r9 - movq %rax, %r12 - movq %rbx, %rax - mulq 8(%rsi) - movq %rdx, %r15 - movq %rax, %rbp - movq %rbx, %rax - mulq (%rsi) - addq %rcx, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbp - adcq %r11, %r12 - adcq %r13, %r14 - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rbp - movq %rbp, 32(%rdi) - adcq %r15, %r12 - movq %r12, 40(%rdi) - adcq %r9, %r14 - movq %r14, 48(%rdi) - adcq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre4L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre4L: ## @mcl_fpDbl_sqrPre4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rsi, %r10 - movq 16(%r10), %r9 - movq 24(%r10), %r11 - movq (%r10), %r15 - movq 8(%r10), %r8 - movq %r15, %rax - mulq %r15 - movq %rdx, %rbp - movq %rax, (%rdi) - movq %r11, %rax - mulq %r8 - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %r9, %rax - mulq %r8 - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rax, -40(%rsp) ## 8-byte Spill - movq %r11, %rax - mulq %r15 - movq %rdx, %rbx - movq %rax, %rcx - movq %r9, %rax - mulq %r15 - movq %rdx, %rsi - movq %rsi, -16(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %r8, %rax - mulq %r8 - movq %rdx, %r13 - movq %rax, %r14 - movq %r8, %rax - mulq %r15 - addq %rax, %rbp - movq %rdx, %r8 - adcq %r12, %r8 - adcq %rsi, %rcx - adcq $0, %rbx - addq %rax, %rbp - movq %rbp, 8(%rdi) - adcq %r14, %r8 - movq -40(%rsp), %rsi ## 8-byte Reload - adcq %rsi, %rcx - adcq -32(%rsp), %rbx ## 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - addq %rdx, %r8 - adcq %r13, %rcx - movq -24(%rsp), %r15 ## 8-byte Reload - adcq %r15, %rbx - adcq -8(%rsp), %rbp ## 8-byte Folded Reload - movq %r11, %rax - mulq %r9 - movq %rdx, %r14 - movq %rax, %r11 - movq %r9, %rax - mulq %r9 - movq %rax, %r9 - addq %r12, %r8 - movq %r8, 16(%rdi) - adcq %rsi, %rcx - adcq %rbx, %r9 - adcq %rbp, %r11 - sbbq %r12, %r12 - andl $1, %r12d - addq -16(%rsp), %rcx ## 8-byte Folded Reload - adcq %r15, %r9 - adcq %rdx, %r11 - adcq %r14, %r12 - movq 24(%r10), %rbp - movq %rbp, %rax - mulq 16(%r10) - movq %rdx, %r8 - movq %rax, %r14 - movq %rbp, %rax - mulq 8(%r10) - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq (%r10) - movq %rdx, %r15 - movq %rax, %rsi - movq %rbp, %rax - mulq %rbp - addq %rcx, %rsi - movq %rsi, 24(%rdi) - adcq %r9, %rbx - adcq %r11, %r14 - adcq %r12, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r15, %rbx - movq %rbx, 32(%rdi) - adcq %r13, %r14 - movq %r14, 40(%rdi) - adcq %r8, %rax - movq %rax, 48(%rdi) - adcq %rdx, %rcx - movq %rcx, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont4L - .p2align 4, 0x90 -_mcl_fp_mont4L: ## @mcl_fp_mont4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 24(%rsi), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq (%rdx), %rbp - mulq %rbp - movq %rax, %r9 - movq %rdx, %r8 - movq 16(%rsi), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - mulq %rbp - movq %rax, %rbx - movq %rdx, %r11 - movq (%rsi), %rdi - movq %rdi, -56(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %rbp - movq %rdx, %r12 - movq %rax, %rsi - movq %rdi, %rax - mulq %rbp - movq %rax, %r13 - movq %rdx, %r15 - addq %rsi, %r15 - adcq %rbx, %r12 - adcq %r9, %r11 - adcq $0, %r8 - movq -8(%rcx), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r13, %rsi - imulq %rax, %rsi - movq 24(%rcx), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r10 - movq %rdx, %r9 - movq 16(%rcx), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, %rbx - movq (%rcx), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rcx - movq %rdx, %rdi - movq %rax, %rcx - movq %rsi, %rax - mulq %rbp - movq %rdx, %rsi - addq %rcx, %rsi - adcq %r14, %rdi - adcq %r10, %rbx - adcq $0, %r9 - addq %r13, %rax - adcq %r15, %rsi - adcq %r12, %rdi - adcq %r11, %rbx - adcq %r8, %r9 - sbbq %r15, %r15 - andl $1, %r15d - movq -96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rbp - movq %rbp, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r10 - movq %rbp, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r11 - movq %rbp, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rbp, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rbp - addq %r14, %rbp - adcq %r11, %rcx - adcq %r10, %r13 - adcq $0, %r12 - addq %rsi, %r8 - adcq %rdi, %rbp - adcq %rbx, %rcx - adcq %r9, %r13 - adcq %r15, %r12 - sbbq %r15, %r15 - andl $1, %r15d - movq %r8, %rsi - imulq -88(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r10 - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r11 - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r14 - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - addq %r14, %rsi - adcq %r11, %rdi - adcq %r10, %rbx - adcq $0, %r9 - addq %r8, %rax - adcq %rbp, %rsi - adcq %rcx, %rdi - adcq %r13, %rbx - adcq %r12, %r9 - adcq $0, %r15 - movq -96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rbp - movq %rbp, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r10 - movq %rbp, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r11 - movq %rbp, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rbp, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %rbp - movq %rdx, %r8 - addq %r14, %r8 - adcq %r11, %rcx - adcq %r10, %r13 - adcq $0, %r12 - addq %rsi, %rbp - adcq %rdi, %r8 - adcq %rbx, %rcx - adcq %r9, %r13 - adcq %r15, %r12 - sbbq %r14, %r14 - movq %rbp, %rsi - imulq -88(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r15 - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - andl $1, %r14d - addq %r15, %r11 - adcq %r10, %r9 - adcq -16(%rsp), %rbx ## 8-byte Folded Reload - adcq $0, %rdi - addq %rbp, %rax - adcq %r8, %r11 - adcq %rcx, %r9 - adcq %r13, %rbx - adcq %r12, %rdi - adcq $0, %r14 - movq -96(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rcx - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r15 - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %rbp - addq %r13, %rbp - adcq %r15, %rsi - adcq -96(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, %r8 - addq %r11, %r10 - adcq %r9, %rbp - adcq %rbx, %rsi - adcq %rdi, %r12 - adcq %r14, %r8 - sbbq %rdi, %rdi - andl $1, %edi - movq -88(%rsp), %rcx ## 8-byte Reload - imulq %r10, %rcx - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rbx - movq %rcx, %rax - movq %rcx, %r9 - movq -32(%rsp), %r11 ## 8-byte Reload - mulq %r11 - movq %rdx, %rcx - movq %rax, %r14 - movq %r9, %rax - movq -24(%rsp), %r9 ## 8-byte Reload - mulq %r9 - addq %r14, %rdx - adcq %rbx, %rcx - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - adcq $0, %r13 - addq %r10, %rax - adcq %rbp, %rdx - adcq %rsi, %rcx - adcq %r12, %r15 - adcq %r8, %r13 - adcq $0, %rdi - movq %rdx, %rax - subq %r9, %rax - movq %rcx, %rsi - sbbq %r11, %rsi - movq %r15, %rbp - sbbq -80(%rsp), %rbp ## 8-byte Folded Reload - movq %r13, %rbx - sbbq -72(%rsp), %rbx ## 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %r13, %rbx - testb %dil, %dil - cmovneq %rdx, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rcx, %rsi - movq %rsi, 8(%rdx) - cmovneq %r15, %rbp - movq %rbp, 16(%rdx) - movq %rbx, 24(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF4L - .p2align 4, 0x90 -_mcl_fp_montNF4L: ## @mcl_fp_montNF4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %r15 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 24(%rsi), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq (%r15), %rdi - movq %r15, -24(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r8 - movq %rdx, %r12 - movq 16(%rsi), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r14 - movq %rdx, %r10 - movq (%rsi), %rbp - movq %rbp, -56(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %rbx - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, %r11 - movq %rdx, %r9 - addq %rsi, %r9 - adcq %r14, %rbx - adcq %r8, %r10 - adcq $0, %r12 - movq -8(%rcx), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq %r11, %rsi - imulq %rax, %rsi - movq 24(%rcx), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r13 - movq %rdx, -16(%rsp) ## 8-byte Spill - movq 16(%rcx), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r8 - movq %rdx, %r14 - movq (%rcx), %rdi - movq %rdi, -72(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rcx - movq %rdx, %rcx - movq %rax, %rbp - movq %rsi, %rax - mulq %rdi - addq %r11, %rax - adcq %r9, %rbp - adcq %rbx, %r8 - adcq %r10, %r13 - adcq $0, %r12 - addq %rdx, %rbp - adcq %rcx, %r8 - adcq %r14, %r13 - adcq -16(%rsp), %r12 ## 8-byte Folded Reload - movq 8(%r15), %rdi - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %rsi - movq %rdi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r11 - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %r9 - addq %r14, %r9 - adcq %r11, %rcx - adcq %rsi, %r10 - adcq $0, %rbx - addq %rbp, %rdi - adcq %r8, %r9 - adcq %r13, %rcx - adcq %r12, %r10 - adcq $0, %rbx - movq %rdi, %rsi - imulq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r12 - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r13 - movq %rsi, %rax - movq -32(%rsp), %r15 ## 8-byte Reload - mulq %r15 - movq %rdx, %r14 - movq %rax, %rbp - movq %rsi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - addq %rdi, %rax - adcq %r9, %rbp - adcq %rcx, %r13 - adcq %r10, %r12 - adcq $0, %rbx - addq %rdx, %rbp - adcq %r14, %r13 - adcq %r11, %r12 - adcq %r8, %rbx - movq -24(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %rdi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rdi - addq %r14, %rdi - adcq %r11, %rcx - adcq %r10, %r8 - adcq $0, %rsi - addq %rbp, %r9 - adcq %r13, %rdi - adcq %r12, %rcx - adcq %rbx, %r8 - adcq $0, %rsi - movq %r9, %rbx - imulq -80(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r12 - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r13 - movq %rbx, %rax - mulq %r15 - movq %rdx, %r14 - movq %rax, %rbp - movq %rbx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - addq %r9, %rax - adcq %rdi, %rbp - adcq %rcx, %r13 - adcq %r8, %r12 - adcq $0, %rsi - addq %rdx, %rbp - adcq %r14, %r13 - adcq %r11, %r12 - adcq %r10, %rsi - movq -24(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdi - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %rcx - movq %rdi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r14 - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %rdi - addq %r14, %rdi - adcq %r11, %r10 - adcq %rcx, %r8 - adcq $0, %rbx - addq %rbp, %r9 - adcq %r13, %rdi - adcq %r12, %r10 - adcq %rsi, %r8 - adcq $0, %rbx - movq -80(%rsp), %rcx ## 8-byte Reload - imulq %r9, %rcx - movq %rcx, %rax - movq -40(%rsp), %r12 ## 8-byte Reload - mulq %r12 - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rcx, %rax - movq -48(%rsp), %r11 ## 8-byte Reload - mulq %r11 - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - movq %rcx, %r15 - movq -72(%rsp), %rsi ## 8-byte Reload - mulq %rsi - movq %rdx, %r14 - movq %rax, %rcx - movq %r15, %rax - movq -32(%rsp), %r15 ## 8-byte Reload - mulq %r15 - addq %r9, %rcx - adcq %rdi, %rax - adcq %r10, %rbp - adcq %r8, %r13 - adcq $0, %rbx - addq %r14, %rax - adcq %rdx, %rbp - adcq -96(%rsp), %r13 ## 8-byte Folded Reload - adcq -88(%rsp), %rbx ## 8-byte Folded Reload - movq %rax, %rcx - subq %rsi, %rcx - movq %rbp, %rdx - sbbq %r15, %rdx - movq %r13, %rdi - sbbq %r11, %rdi - movq %rbx, %rsi - sbbq %r12, %rsi - cmovsq %rax, %rcx - movq -8(%rsp), %rax ## 8-byte Reload - movq %rcx, (%rax) - cmovsq %rbp, %rdx - movq %rdx, 8(%rax) - cmovsq %r13, %rdi - movq %rdi, 16(%rax) - cmovsq %rbx, %rsi - movq %rsi, 24(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed4L - .p2align 4, 0x90 -_mcl_fp_montRed4L: ## @mcl_fp_montRed4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq (%rcx), %rdi - movq %rdi, -32(%rsp) ## 8-byte Spill - movq (%rsi), %r12 - movq %r12, %rbx - imulq %rax, %rbx - movq %rax, %r9 - movq %r9, -64(%rsp) ## 8-byte Spill - movq 24(%rcx), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r11 - movq %rdx, %r8 - movq 16(%rcx), %rbp - movq %rbx, %rax - mulq %rbp - movq %rbp, %r13 - movq %r13, -24(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rdx, %r10 - movq 8(%rcx), %rcx - movq %rbx, %rax - mulq %rcx - movq %rcx, %rbp - movq %rbp, -16(%rsp) ## 8-byte Spill - movq %rdx, %r15 - movq %rax, %rcx - movq %rbx, %rax - mulq %rdi - movq %rdx, %rbx - addq %rcx, %rbx - adcq %r14, %r15 - adcq %r11, %r10 - adcq $0, %r8 - movq 56(%rsi), %rcx - movq 48(%rsi), %rdx - addq %r12, %rax - movq 40(%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r8 - adcq $0, %rax - movq %rax, -48(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, %r12 - adcq $0, %rcx - movq %rcx, -72(%rsp) ## 8-byte Spill - sbbq %rdi, %rdi - andl $1, %edi - movq %rbx, %rsi - imulq %r9, %rsi - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %r13 - movq %rdx, %r14 - movq %rax, %r9 - movq %rsi, %rax - mulq %rbp - movq %rdx, %rcx - movq %rax, %rbp - movq %rsi, %rax - movq -32(%rsp), %r13 ## 8-byte Reload - mulq %r13 - movq %rdx, %rsi - addq %rbp, %rsi - adcq %r9, %rcx - adcq -56(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r11 - addq %rbx, %rax - adcq %r15, %rsi - adcq %r10, %rcx - adcq %r8, %r14 - adcq -48(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r12 - movq %r12, -48(%rsp) ## 8-byte Spill - movq -72(%rsp), %rbp ## 8-byte Reload - adcq $0, %rbp - adcq $0, %rdi - movq %rsi, %rbx - imulq -64(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - movq -40(%rsp), %r12 ## 8-byte Reload - mulq %r12 - movq %rdx, %r8 - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r9 - movq %rbx, %rax - mulq %r13 - movq %rdx, %rbx - addq %r9, %rbx - adcq -56(%rsp), %r15 ## 8-byte Folded Reload - adcq -72(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r8 - addq %rsi, %rax - adcq %rcx, %rbx - adcq %r14, %r15 - adcq %r11, %r10 - adcq -48(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, %rbp - movq %rbp, -72(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq -64(%rsp), %rcx ## 8-byte Reload - imulq %rbx, %rcx - movq %rcx, %rax - mulq %r12 - movq %rdx, %r13 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rcx, %rax - movq -24(%rsp), %r14 ## 8-byte Reload - mulq %r14 - movq %rdx, %r11 - movq %rax, %r12 - movq %rcx, %rax - movq %rcx, %r9 - movq -16(%rsp), %rsi ## 8-byte Reload - mulq %rsi - movq %rdx, %rbp - movq %rax, %rcx - movq %r9, %rax - movq -32(%rsp), %r9 ## 8-byte Reload - mulq %r9 - addq %rcx, %rdx - adcq %r12, %rbp - adcq -64(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r13 - addq %rbx, %rax - adcq %r15, %rdx - adcq %r10, %rbp - adcq %r8, %r11 - adcq -72(%rsp), %r13 ## 8-byte Folded Reload - adcq $0, %rdi - movq %rdx, %rax - subq %r9, %rax - movq %rbp, %rcx - sbbq %rsi, %rcx - movq %r11, %rbx - sbbq %r14, %rbx - movq %r13, %rsi - sbbq -40(%rsp), %rsi ## 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %r13, %rsi - testb %dil, %dil - cmovneq %rdx, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rbp, %rcx - movq %rcx, 8(%rdx) - cmovneq %r11, %rbx - movq %rbx, 16(%rdx) - movq %rsi, 24(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre4L - .p2align 4, 0x90 -_mcl_fp_addPre4L: ## @mcl_fp_addPre4L -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rdx), %rax - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rax - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rax, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre4L - .p2align 4, 0x90 -_mcl_fp_subPre4L: ## @mcl_fp_subPre4L -## BB#0: - movq 24(%rdx), %r8 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rcx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rcx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rcx, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r8, %r9 - movq %r9, 24(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_4L - .p2align 4, 0x90 -_mcl_fp_shr1_4L: ## @mcl_fp_shr1_4L -## BB#0: - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrq %rax - movq %rax, 24(%rdi) - retq - - .globl _mcl_fp_add4L - .p2align 4, 0x90 -_mcl_fp_add4L: ## @mcl_fp_add4L -## BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r9 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - adcq %r10, %r8 - movq %r8, 24(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r9 - sbbq 24(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB59_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r9, 16(%rdi) - movq %r8, 24(%rdi) -LBB59_2: ## %carry - retq - - .globl _mcl_fp_addNF4L - .p2align 4, 0x90 -_mcl_fp_addNF4L: ## @mcl_fp_addNF4L -## BB#0: - pushq %rbx - movq 24(%rdx), %r8 - movq 16(%rdx), %r9 - movq (%rdx), %r11 - movq 8(%rdx), %r10 - addq (%rsi), %r11 - adcq 8(%rsi), %r10 - adcq 16(%rsi), %r9 - adcq 24(%rsi), %r8 - movq %r11, %rsi - subq (%rcx), %rsi - movq %r10, %rdx - sbbq 8(%rcx), %rdx - movq %r9, %rax - sbbq 16(%rcx), %rax - movq %r8, %rbx - sbbq 24(%rcx), %rbx - testq %rbx, %rbx - cmovsq %r11, %rsi - movq %rsi, (%rdi) - cmovsq %r10, %rdx - movq %rdx, 8(%rdi) - cmovsq %r9, %rax - movq %rax, 16(%rdi) - cmovsq %r8, %rbx - movq %rbx, 24(%rdi) - popq %rbx - retq - - .globl _mcl_fp_sub4L - .p2align 4, 0x90 -_mcl_fp_sub4L: ## @mcl_fp_sub4L -## BB#0: - movq 24(%rdx), %r10 - movq 24(%rsi), %r8 - movq 16(%rsi), %r9 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %esi, %esi - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r9 - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r9, 16(%rdi) - sbbq %r10, %r8 - movq %r8, 24(%rdi) - sbbq $0, %rsi - testb $1, %sil - jne LBB61_2 -## BB#1: ## %nocarry - retq -LBB61_2: ## %carry - movq 24(%rcx), %r10 - movq 8(%rcx), %rsi - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r10 - movq %r10, 24(%rdi) - retq - - .globl _mcl_fp_subNF4L - .p2align 4, 0x90 -_mcl_fp_subNF4L: ## @mcl_fp_subNF4L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r8 - movdqu (%rsi), %xmm2 - movdqu 16(%rsi), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r15 - movd %xmm1, %r9 - movd %xmm3, %r11 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %r10 - pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] - movd %xmm1, %r14 - movd %xmm0, %rdx - movd %xmm2, %r12 - subq %rdx, %r12 - sbbq %r10, %r14 - sbbq %r9, %r11 - sbbq %r8, %r15 - movq %r15, %rdx - sarq $63, %rdx - movq 24(%rcx), %rsi - andq %rdx, %rsi - movq 16(%rcx), %rax - andq %rdx, %rax - movq 8(%rcx), %rbx - andq %rdx, %rbx - andq (%rcx), %rdx - addq %r12, %rdx - movq %rdx, (%rdi) - adcq %r14, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rax - movq %rax, 16(%rdi) - adcq %r15, %rsi - movq %rsi, 24(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_add4L - .p2align 4, 0x90 -_mcl_fpDbl_add4L: ## @mcl_fpDbl_add4L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 48(%rsi), %r12 - movq 40(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rdx), %r15 - movq 16(%rdx), %rbx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rbp - movq 32(%rsi), %rsi - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r15, %rbp - movq %rbp, 24(%rdi) - adcq %r14, %rsi - adcq %r11, %r13 - adcq %r10, %r12 - adcq %r9, %r8 - sbbq %rax, %rax - andl $1, %eax - movq %rsi, %rdx - subq (%rcx), %rdx - movq %r13, %rbp - sbbq 8(%rcx), %rbp - movq %r12, %rbx - sbbq 16(%rcx), %rbx - movq %r8, %r9 - sbbq 24(%rcx), %r9 - sbbq $0, %rax - andl $1, %eax - cmovneq %rsi, %rdx - movq %rdx, 32(%rdi) - testb %al, %al - cmovneq %r13, %rbp - movq %rbp, 40(%rdi) - cmovneq %r12, %rbx - movq %rbx, 48(%rdi) - cmovneq %r8, %r9 - movq %r9, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub4L - .p2align 4, 0x90 -_mcl_fpDbl_sub4L: ## @mcl_fpDbl_sub4L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r9 - movq 56(%rsi), %r8 - movq 48(%rdx), %r10 - movq 24(%rdx), %r11 - movq (%rsi), %rbx - xorl %eax, %eax - subq (%rdx), %rbx - movq %rbx, (%rdi) - movq 8(%rsi), %rbx - sbbq 8(%rdx), %rbx - movq %rbx, 8(%rdi) - movq 16(%rsi), %rbx - sbbq 16(%rdx), %rbx - movq %rbx, 16(%rdi) - movq 24(%rsi), %rbx - sbbq %r11, %rbx - movq 40(%rdx), %r11 - movq 32(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 32(%rsi), %r12 - sbbq %rdx, %r12 - movq 48(%rsi), %r14 - movq 40(%rsi), %r15 - sbbq %r11, %r15 - sbbq %r10, %r14 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 24(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 8(%rcx), %rax - addq %r12, %rsi - movq %rsi, 32(%rdi) - adcq %r15, %rax - movq %rax, 40(%rdi) - adcq %r14, %rdx - movq %rdx, 48(%rdi) - adcq %r8, %rbx - movq %rbx, 56(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_mulUnitPre5L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre5L: ## @mcl_fp_mulUnitPre5L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %r12 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r12, %rdx - movq %rdx, 8(%rdi) - adcq %r14, %rbx - movq %rbx, 16(%rdi) - adcq %r11, %r15 - movq %r15, 24(%rdi) - adcq %r9, %r10 - movq %r10, 32(%rdi) - adcq $0, %r8 - movq %r8, 40(%rdi) - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_mulPre5L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre5L: ## @mcl_fpDbl_mulPre5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rsi, %r9 - movq %rdi, -48(%rsp) ## 8-byte Spill - movq (%r9), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq (%rdx), %rbp - mulq %rbp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 16(%r9), %r13 - movq 24(%r9), %r15 - movq 32(%r9), %rbx - movq %rax, (%rdi) - movq %rbx, %rax - mulq %rbp - movq %rdx, %r11 - movq %rax, %r10 - movq %r15, %rax - mulq %rbp - movq %rdx, %r14 - movq %rax, %rdi - movq %r13, %rax - mulq %rbp - movq %rax, %rsi - movq %rdx, %rcx - movq 8(%r9), %r8 - movq %r8, %rax - mulq %rbp - movq %rdx, %rbp - movq %rax, %r12 - addq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq %rsi, %rbp - adcq %rdi, %rcx - adcq %r10, %r14 - adcq $0, %r11 - movq -72(%rsp), %r10 ## 8-byte Reload - movq 8(%r10), %rdi - movq %rbx, %rax - mulq %rdi - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq %r15, %rax - mulq %rdi - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %r13, %rax - mulq %rdi - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %r8, %rax - mulq %rdi - movq %rdx, %r8 - movq %rax, %rbx - movq -80(%rsp), %rax ## 8-byte Reload - mulq %rdi - addq %r12, %rax - movq -48(%rsp), %rdi ## 8-byte Reload - movq %rax, 8(%rdi) - adcq %rbp, %rbx - adcq %rcx, %r13 - adcq %r14, %r15 - adcq %r11, %rsi - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %rbx - adcq %r8, %r13 - adcq -56(%rsp), %r15 ## 8-byte Folded Reload - adcq -96(%rsp), %rsi ## 8-byte Folded Reload - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - movq 32(%r9), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - movq 16(%r10), %r12 - mulq %r12 - movq %rax, %r11 - movq %rdx, -80(%rsp) ## 8-byte Spill - movq 24(%r9), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %r12 - movq %rax, %r10 - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 16(%r9), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %r12 - movq %rax, %r8 - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 8(%r9), %rdi - movq %rdi, %rax - mulq %r12 - movq %rax, %rbp - movq %rdx, -16(%rsp) ## 8-byte Spill - movq (%r9), %r14 - movq %r14, %rax - mulq %r12 - movq %rdx, -40(%rsp) ## 8-byte Spill - addq %rbx, %rax - movq -48(%rsp), %rbx ## 8-byte Reload - movq %rax, 16(%rbx) - adcq %r13, %rbp - adcq %r15, %r8 - adcq %rsi, %r10 - adcq %rcx, %r11 - sbbq %rsi, %rsi - movq -72(%rsp), %r12 ## 8-byte Reload - movq 24(%r12), %rcx - movq -96(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -24(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %r14, %rax - mulq %rcx - movq %rdx, %r13 - movq %rax, %rdi - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq -32(%rsp), %rax ## 8-byte Reload - mulq %rcx - andl $1, %esi - addq -40(%rsp), %rbp ## 8-byte Folded Reload - adcq -16(%rsp), %r8 ## 8-byte Folded Reload - adcq -56(%rsp), %r10 ## 8-byte Folded Reload - adcq -88(%rsp), %r11 ## 8-byte Folded Reload - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - addq %rdi, %rbp - movq %rbp, 24(%rbx) - adcq %r15, %r8 - adcq %rax, %r10 - adcq %r14, %r11 - adcq -24(%rsp), %rsi ## 8-byte Folded Reload - sbbq %rcx, %rcx - andl $1, %ecx - addq %r13, %r8 - adcq -8(%rsp), %r10 ## 8-byte Folded Reload - adcq %rdx, %r11 - adcq -64(%rsp), %rsi ## 8-byte Folded Reload - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq 32(%r12), %rdi - movq %rdi, %rax - mulq 32(%r9) - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %rdi, %rax - mulq 24(%r9) - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rdi, %rax - mulq 16(%r9) - movq %rdx, %r14 - movq %rax, %rbx - movq %rdi, %rax - mulq 8(%r9) - movq %rdx, %r12 - movq %rax, %rbp - movq %rdi, %rax - mulq (%r9) - addq %r8, %rax - movq -48(%rsp), %rdi ## 8-byte Reload - movq %rax, 32(%rdi) - adcq %r10, %rbp - adcq %r11, %rbx - adcq %rsi, %r13 - adcq %rcx, %r15 - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rbp - movq %rbp, 40(%rdi) - adcq %r12, %rbx - movq %rbx, 48(%rdi) - adcq %r14, %r13 - movq %r13, 56(%rdi) - adcq -80(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 64(%rdi) - adcq -72(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre5L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre5L: ## @mcl_fpDbl_sqrPre5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 32(%rsi), %r11 - movq (%rsi), %rbp - movq 8(%rsi), %r13 - movq %r11, %rax - mulq %r13 - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 24(%rsi), %rbx - movq %rbx, %rax - mulq %r13 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 16(%rsi), %rcx - movq %rcx, %rax - mulq %r13 - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %r11, %rax - mulq %rbp - movq %rdx, %r8 - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rbp - movq %rdx, %r9 - movq %rax, %r15 - movq %rcx, %rax - mulq %rbp - movq %rdx, %r10 - movq %rax, %r12 - movq %r13, %rax - mulq %r13 - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %r13, %rax - mulq %rbp - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq %rbp - movq %rdi, -24(%rsp) ## 8-byte Spill - movq %rax, (%rdi) - addq %rbx, %rdx - adcq %r13, %r12 - adcq %r15, %r10 - adcq -16(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r8 - addq %rbx, %rdx - movq %rdx, 8(%rdi) - adcq %r14, %r12 - adcq -32(%rsp), %r10 ## 8-byte Folded Reload - adcq -64(%rsp), %r9 ## 8-byte Folded Reload - adcq -56(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - addq %r13, %r12 - adcq -8(%rsp), %r10 ## 8-byte Folded Reload - adcq -48(%rsp), %r9 ## 8-byte Folded Reload - adcq -72(%rsp), %r8 ## 8-byte Folded Reload - adcq -40(%rsp), %rbp ## 8-byte Folded Reload - movq %r11, %rax - mulq %rcx - movq %rax, %r11 - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 24(%rsi), %rbx - movq %rbx, %rax - mulq %rcx - movq %rax, %r14 - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r15 - movq %rdx, -64(%rsp) ## 8-byte Spill - movq (%rsi), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - mulq %rcx - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq %rcx, %rax - mulq %rcx - movq %rax, %r13 - addq %r12, %rdi - movq -24(%rsp), %r12 ## 8-byte Reload - movq %rdi, 16(%r12) - adcq %r10, %r15 - adcq %r9, %r13 - adcq %r8, %r14 - adcq %rbp, %r11 - sbbq %rdi, %rdi - andl $1, %edi - addq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq -64(%rsp), %r13 ## 8-byte Folded Reload - adcq %rdx, %r14 - adcq -72(%rsp), %r11 ## 8-byte Folded Reload - adcq -40(%rsp), %rdi ## 8-byte Folded Reload - movq -56(%rsp), %rax ## 8-byte Reload - mulq %rbx - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq -48(%rsp), %rax ## 8-byte Reload - mulq %rbx - movq %rax, %rbp - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 32(%rsi), %rcx - movq %rcx, %rax - mulq %rbx - movq %rax, %r9 - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 16(%rsi), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulq %rbx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %rbx, %rax - mulq %rbx - movq %rax, %rbx - addq %r15, %rbp - movq %rbp, 24(%r12) - adcq %r13, %r8 - adcq %r14, %r10 - adcq %r11, %rbx - adcq %rdi, %r9 - sbbq %r12, %r12 - andl $1, %r12d - addq -56(%rsp), %r8 ## 8-byte Folded Reload - adcq -72(%rsp), %r10 ## 8-byte Folded Reload - adcq -64(%rsp), %rbx ## 8-byte Folded Reload - adcq %rdx, %r9 - adcq -48(%rsp), %r12 ## 8-byte Folded Reload - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r14 - movq %rax, %rdi - movq %rcx, %rax - mulq (%rsi) - movq %rdx, %r13 - movq %rax, %rsi - movq %rcx, %rax - mulq %rcx - movq %rdx, %r15 - movq %rax, %r11 - movq -40(%rsp), %rax ## 8-byte Reload - mulq %rcx - addq %r8, %rsi - movq -24(%rsp), %r8 ## 8-byte Reload - movq %rsi, 32(%r8) - adcq %r10, %rdi - adcq %rbx, %rax - adcq %r9, %rbp - adcq %r12, %r11 - sbbq %rcx, %rcx - andl $1, %ecx - addq %r13, %rdi - movq %r8, %rsi - movq %rdi, 40(%rsi) - adcq %r14, %rax - movq %rax, 48(%rsi) - adcq %rdx, %rbp - movq %rbp, 56(%rsi) - adcq -72(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 64(%rsi) - adcq %r15, %rcx - movq %rcx, 72(%rsi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont5L - .p2align 4, 0x90 -_mcl_fp_mont5L: ## @mcl_fp_mont5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - pushq %rax - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rdi, (%rsp) ## 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, %r8 - movq %rdx, %r15 - movq 24(%rsi), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r10 - movq %rdx, %rbx - movq 16(%rsi), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r11 - movq %rdx, %r14 - movq (%rsi), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %r12 - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdx, %r9 - addq %rsi, %r9 - adcq %r11, %r12 - adcq %r10, %r14 - adcq %r8, %rbx - movq %rbx, -120(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %r15, -112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %rbp - imulq %rdx, %rbp - movq 32(%rcx), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdx, %r8 - movq 24(%rcx), %rdx - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r13 - movq %rdx, %rsi - movq 16(%rcx), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r11 - movq %rdx, %rbx - movq (%rcx), %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -64(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rcx - movq %rdx, %r10 - movq %rax, %r15 - movq %rbp, %rax - mulq %rdi - movq %rdx, %rcx - addq %r15, %rcx - adcq %r11, %r10 - adcq %r13, %rbx - adcq -8(%rsp), %rsi ## 8-byte Folded Reload - adcq $0, %r8 - addq -128(%rsp), %rax ## 8-byte Folded Reload - adcq %r9, %rcx - adcq %r12, %r10 - adcq %r14, %rbx - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq -112(%rsp), %r8 ## 8-byte Folded Reload - sbbq %r15, %r15 - andl $1, %r15d - movq -96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -104(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r12 - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %r11 - addq %r12, %r11 - adcq -128(%rsp), %r9 ## 8-byte Folded Reload - adcq -120(%rsp), %rbp ## 8-byte Folded Reload - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r13 - addq %rcx, %rdi - adcq %r10, %r11 - adcq %rbx, %r9 - adcq %rsi, %rbp - adcq %r8, %r14 - adcq %r15, %r13 - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdi, %rbx - imulq -72(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r15 - movq %rbx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - addq %r12, %rbx - adcq %r15, %rcx - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - adcq -120(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, %r10 - addq %rdi, %rax - adcq %r11, %rbx - adcq %r9, %rcx - adcq %rbp, %rsi - adcq %r14, %r8 - adcq %r13, %r10 - adcq $0, -112(%rsp) ## 8-byte Folded Spill - movq -96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rbp - movq %rbp, %rax - mulq -104(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r14 - movq %rbp, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %rbp, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rax, %r15 - movq %rdx, %rbp - addq %r12, %rbp - adcq %r14, %rdi - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r13 - addq %rbx, %r15 - adcq %rcx, %rbp - adcq %rsi, %rdi - adcq %r8, %r11 - adcq %r10, %r9 - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r15, %rsi - imulq -72(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rsi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - addq %r8, %r12 - adcq -8(%rsp), %rbx ## 8-byte Folded Reload - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r10 - addq %r15, %rax - adcq %rbp, %r12 - adcq %rdi, %rbx - adcq %r11, %rcx - adcq %r9, %r14 - adcq %r13, %r10 - adcq $0, -112(%rsp) ## 8-byte Folded Spill - movq -96(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rsi - movq %rsi, %rax - mulq -104(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r15 - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r13 - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %rsi - addq %r13, %rsi - adcq %r15, %rdi - adcq -128(%rsp), %rbp ## 8-byte Folded Reload - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r8 - addq %r12, %r11 - adcq %rbx, %rsi - adcq %rcx, %rdi - adcq %r14, %rbp - adcq %r10, %r9 - adcq -112(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r11, %rbx - imulq -72(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r14 - movq %rbx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - addq %r12, %rbx - adcq %r14, %rcx - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %r13 - addq %r11, %rax - adcq %rsi, %rbx - adcq %rdi, %rcx - adcq %rbp, %r15 - adcq %r9, %r10 - adcq %r8, %r13 - movq -112(%rsp), %r8 ## 8-byte Reload - adcq $0, %r8 - movq -96(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rsi - movq %rsi, %rax - mulq -104(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rdi - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %rbp - addq %rdi, %rbp - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %r11 ## 8-byte Folded Reload - adcq -104(%rsp), %r9 ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbx, %r14 - adcq %rcx, %rbp - adcq %r15, %r12 - adcq %r10, %r11 - adcq %r13, %r9 - adcq %r8, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rcx, %rcx - movq -72(%rsp), %rdi ## 8-byte Reload - imulq %r14, %rdi - movq %rdi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rdi, %rax - movq %rdi, %r15 - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r10 - movq %r15, %rax - movq -16(%rsp), %r15 ## 8-byte Reload - mulq %r15 - addq %r10, %rdx - adcq %r13, %rdi - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - adcq -72(%rsp), %rbx ## 8-byte Folded Reload - adcq $0, %r8 - andl $1, %ecx - addq %r14, %rax - adcq %rbp, %rdx - adcq %r12, %rdi - adcq %r11, %rsi - adcq %r9, %rbx - adcq -96(%rsp), %r8 ## 8-byte Folded Reload - adcq $0, %rcx - movq %rdx, %rax - subq %r15, %rax - movq %rdi, %rbp - sbbq -64(%rsp), %rbp ## 8-byte Folded Reload - movq %rsi, %r9 - sbbq -56(%rsp), %r9 ## 8-byte Folded Reload - movq %rbx, %r10 - sbbq -48(%rsp), %r10 ## 8-byte Folded Reload - movq %r8, %r11 - sbbq -40(%rsp), %r11 ## 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rbx, %r10 - testb %cl, %cl - cmovneq %rdx, %rax - movq (%rsp), %rcx ## 8-byte Reload - movq %rax, (%rcx) - cmovneq %rdi, %rbp - movq %rbp, 8(%rcx) - cmovneq %rsi, %r9 - movq %r9, 16(%rcx) - movq %r10, 24(%rcx) - cmovneq %r8, %r11 - movq %r11, 32(%rcx) - addq $8, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF5L - .p2align 4, 0x90 -_mcl_fp_montNF5L: ## @mcl_fp_montNF5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - movq (%rdx), %rbp - mulq %rbp - movq %rax, %r8 - movq %rdx, %r13 - movq 24(%rsi), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulq %rbp - movq %rax, %r10 - movq %rdx, %r11 - movq 16(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rbp - movq %rax, %r15 - movq %rdx, %r9 - movq (%rsi), %rdi - movq %rdi, -48(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulq %rbp - movq %rdx, %r12 - movq %rax, %rbx - movq %rdi, %rax - mulq %rbp - movq %rax, %r14 - movq %rdx, %rbp - addq %rbx, %rbp - adcq %r15, %r12 - adcq %r10, %r9 - adcq %r8, %r11 - adcq $0, %r13 - movq -8(%rcx), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r14, %rsi - imulq %rax, %rsi - movq 32(%rcx), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r10 - movq %rdx, -120(%rsp) ## 8-byte Spill - movq 24(%rcx), %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %rbx - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 16(%rcx), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rdx - movq %rax, %r8 - movq %rdx, -16(%rsp) ## 8-byte Spill - movq (%rcx), %rdi - movq %rdi, -40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rcx - movq %rdx, %r15 - movq %rax, %rcx - movq %rsi, %rax - mulq %rdi - addq %r14, %rax - adcq %rbp, %rcx - adcq %r12, %r8 - adcq %r9, %rbx - adcq %r11, %r10 - adcq $0, %r13 - addq %rdx, %rcx - adcq %r15, %r8 - adcq -16(%rsp), %rbx ## 8-byte Folded Reload - adcq -128(%rsp), %r10 ## 8-byte Folded Reload - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rsi - movq %rsi, %rax - mulq -112(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rdi - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r14 - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rax, %rsi - movq %rdx, %r15 - addq %r14, %r15 - adcq %rdi, %r11 - adcq -128(%rsp), %r9 ## 8-byte Folded Reload - adcq -120(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, %r12 - addq %rcx, %rsi - adcq %r8, %r15 - adcq %rbx, %r11 - adcq %r10, %r9 - adcq %r13, %rbp - adcq $0, %r12 - movq %rsi, %rdi - imulq -88(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rdi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rdi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - addq %rsi, %rax - adcq %r15, %r10 - adcq %r11, %r14 - adcq %r9, %r8 - adcq %rbp, %r13 - adcq $0, %r12 - addq %rdx, %r10 - adcq %rbx, %r14 - adcq %rcx, %r8 - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rsi - movq %rsi, %rax - mulq -112(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rbx - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %rbp - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %rsi - addq %rbp, %rsi - adcq %rbx, %rcx - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r15 - addq %r10, %r11 - adcq %r14, %rsi - adcq %r8, %rcx - adcq %r13, %rdi - adcq %r12, %r9 - adcq $0, %r15 - movq %r11, %rbx - imulq -88(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %rbx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r10 - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbp - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - addq %r11, %rax - adcq %rsi, %rbp - adcq %rcx, %r10 - adcq %rdi, %r8 - adcq %r9, %r13 - adcq $0, %r15 - addq %rdx, %rbp - adcq %r12, %r10 - adcq %r14, %r8 - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rsi - movq %rsi, %rax - mulq -112(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rbx - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %rsi - addq %r12, %rsi - adcq %rbx, %rcx - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r11 - addq %rbp, %r14 - adcq %r10, %rsi - adcq %r8, %rcx - adcq %r13, %rdi - adcq %r15, %r9 - adcq $0, %r11 - movq %r14, %rbx - imulq -88(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %rbx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %rbx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r10 - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbp - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - addq %r14, %rax - adcq %rsi, %rbp - adcq %rcx, %r10 - adcq %rdi, %r8 - adcq %r9, %r13 - adcq $0, %r11 - addq %rdx, %rbp - adcq %r12, %r10 - adcq %r15, %r8 - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %r11 ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rcx - movq %rcx, %rax - mulq -112(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -96(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %rsi - movq %rcx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %rdi - addq %rsi, %rdi - adcq -96(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq -104(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %rbx - addq %rbp, %r12 - adcq %r10, %rdi - adcq %r8, %r15 - adcq %r13, %r14 - adcq %r11, %r9 - adcq $0, %rbx - movq -88(%rsp), %r8 ## 8-byte Reload - imulq %r12, %r8 - movq %r8, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq %r8, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %r8, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq %r8, %rax - movq %r8, %r13 - movq -40(%rsp), %r10 ## 8-byte Reload - mulq %r10 - movq %rdx, %r11 - movq %rax, %r8 - movq %r13, %rax - movq -24(%rsp), %r13 ## 8-byte Reload - mulq %r13 - addq %r12, %r8 - adcq %rdi, %rax - adcq %r15, %rsi - adcq %r14, %rbp - adcq %r9, %rcx - adcq $0, %rbx - addq %r11, %rax - adcq %rdx, %rsi - adcq -112(%rsp), %rbp ## 8-byte Folded Reload - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - adcq -88(%rsp), %rbx ## 8-byte Folded Reload - movq %rax, %r11 - subq %r10, %r11 - movq %rsi, %r10 - sbbq %r13, %r10 - movq %rbp, %r8 - sbbq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %rcx, %r9 - sbbq -72(%rsp), %r9 ## 8-byte Folded Reload - movq %rbx, %rdx - sbbq -64(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, %rdi - sarq $63, %rdi - cmovsq %rax, %r11 - movq -8(%rsp), %rax ## 8-byte Reload - movq %r11, (%rax) - cmovsq %rsi, %r10 - movq %r10, 8(%rax) - cmovsq %rbp, %r8 - movq %r8, 16(%rax) - cmovsq %rcx, %r9 - movq %r9, 24(%rax) - cmovsq %rbx, %rdx - movq %rdx, 32(%rax) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed5L - .p2align 4, 0x90 -_mcl_fp_montRed5L: ## @mcl_fp_montRed5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rdi, -8(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq (%rsi), %r9 - movq %r9, %rdi - imulq %rax, %rdi - movq 32(%rcx), %rdx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r8 - movq %rdx, %r13 - movq 24(%rcx), %rdx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r11 - movq %rdx, %r10 - movq 16(%rcx), %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, %r15 - movq (%rcx), %rbp - movq %rbp, -40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rdx, %r12 - movq %rax, %rbx - movq %rdi, %rax - mulq %rbp - movq %rdx, %rcx - addq %rbx, %rcx - adcq %r14, %r12 - adcq %r11, %r15 - adcq %r8, %r10 - adcq $0, %r13 - addq %r9, %rax - movq 72(%rsi), %rax - movq 64(%rsi), %rdx - adcq 8(%rsi), %rcx - adcq 16(%rsi), %r12 - adcq 24(%rsi), %r15 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %r13 - movq %r13, -112(%rsp) ## 8-byte Spill - movq 56(%rsi), %rdi - movq 48(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -24(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -64(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -48(%rsp) ## 8-byte Spill - sbbq %r8, %r8 - andl $1, %r8d - movq %rcx, %rsi - movq -104(%rsp), %r9 ## 8-byte Reload - imulq %r9, %rsi - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rdi - movq %rsi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %rbp - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - addq %rbp, %rsi - adcq %rdi, %rbx - adcq -16(%rsp), %r13 ## 8-byte Folded Reload - adcq -32(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r11 - addq %rcx, %rax - adcq %r12, %rsi - adcq %r15, %rbx - adcq %r10, %r13 - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq -24(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %r8 - movq %rsi, %rcx - imulq %r9, %rcx - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -24(%rsp) ## 8-byte Spill - movq %rcx, %rax - movq -56(%rsp), %r9 ## 8-byte Reload - mulq %r9 - movq %rdx, %r15 - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rdi - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - addq %rdi, %rcx - adcq -32(%rsp), %r12 ## 8-byte Folded Reload - adcq -24(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %rbp - addq %rsi, %rax - adcq %rbx, %rcx - adcq %r13, %r12 - adcq %r14, %r15 - adcq %r11, %r10 - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %r8 - movq %rcx, %rsi - imulq -104(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %r9 - movq %rdx, %r13 - movq %rax, %rbx - movq %rsi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rdi - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - addq %rdi, %rsi - adcq %rbx, %r9 - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - adcq -64(%rsp), %r14 ## 8-byte Folded Reload - adcq $0, %r11 - addq %rcx, %rax - adcq %r12, %rsi - adcq %r15, %r9 - adcq %r10, %r13 - adcq %rbp, %r14 - adcq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, -48(%rsp) ## 8-byte Folded Spill - adcq $0, %r8 - movq -104(%rsp), %rdi ## 8-byte Reload - imulq %rsi, %rdi - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r15 - movq %rdi, %rax - movq %rdi, %r10 - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %r10, %rax - movq -40(%rsp), %r10 ## 8-byte Reload - mulq %r10 - addq %r12, %rdx - adcq %r15, %rdi - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - adcq $0, %rbp - addq %rsi, %rax - adcq %r9, %rdx - adcq %r13, %rdi - adcq %r14, %rbx - adcq %r11, %rcx - adcq -48(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, %r8 - movq %rdx, %rax - subq %r10, %rax - movq %rdi, %rsi - sbbq -72(%rsp), %rsi ## 8-byte Folded Reload - movq %rbx, %r9 - sbbq -56(%rsp), %r9 ## 8-byte Folded Reload - movq %rcx, %r10 - sbbq -88(%rsp), %r10 ## 8-byte Folded Reload - movq %rbp, %r11 - sbbq -80(%rsp), %r11 ## 8-byte Folded Reload - sbbq $0, %r8 - andl $1, %r8d - cmovneq %rbp, %r11 - testb %r8b, %r8b - cmovneq %rdx, %rax - movq -8(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rdi, %rsi - movq %rsi, 8(%rdx) - cmovneq %rbx, %r9 - movq %r9, 16(%rdx) - cmovneq %rcx, %r10 - movq %r10, 24(%rdx) - movq %r11, 32(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre5L - .p2align 4, 0x90 -_mcl_fp_addPre5L: ## @mcl_fp_addPre5L -## BB#0: - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq 16(%rdx), %rcx - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rcx - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %rcx, 16(%rdi) - adcq %r9, %r11 - movq %r11, 24(%rdi) - adcq %r8, %r10 - movq %r10, 32(%rdi) - sbbq %rax, %rax - andl $1, %eax - retq - - .globl _mcl_fp_subPre5L - .p2align 4, 0x90 -_mcl_fp_subPre5L: ## @mcl_fp_subPre5L -## BB#0: - pushq %rbx - movq 32(%rsi), %r10 - movq 24(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r8, %r11 - movq %r11, 24(%rdi) - sbbq %r9, %r10 - movq %r10, 32(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - retq - - .globl _mcl_fp_shr1_5L - .p2align 4, 0x90 -_mcl_fp_shr1_5L: ## @mcl_fp_shr1_5L -## BB#0: - movq 32(%rsi), %r8 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r8, %rcx - movq %rcx, 24(%rdi) - shrq %r8 - movq %r8, 32(%rdi) - retq - - .globl _mcl_fp_add5L - .p2align 4, 0x90 -_mcl_fp_add5L: ## @mcl_fp_add5L -## BB#0: - pushq %rbx - movq 32(%rdx), %r11 - movq 24(%rdx), %rbx - movq 24(%rsi), %r9 - movq 32(%rsi), %r8 - movq 16(%rdx), %r10 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r10 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %rbx, %r9 - movq %r9, 24(%rdi) - adcq %r11, %r8 - movq %r8, 32(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %r9 - sbbq 32(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB74_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %r9, 24(%rdi) - movq %r8, 32(%rdi) -LBB74_2: ## %carry - popq %rbx - retq - - .globl _mcl_fp_addNF5L - .p2align 4, 0x90 -_mcl_fp_addNF5L: ## @mcl_fp_addNF5L -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 32(%rdx), %r8 - movq 24(%rdx), %r9 - movq 16(%rdx), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %r11 - addq (%rsi), %r14 - adcq 8(%rsi), %r11 - adcq 16(%rsi), %r10 - adcq 24(%rsi), %r9 - adcq 32(%rsi), %r8 - movq %r14, %rsi - subq (%rcx), %rsi - movq %r11, %rdx - sbbq 8(%rcx), %rdx - movq %r10, %rbx - sbbq 16(%rcx), %rbx - movq %r9, %r15 - sbbq 24(%rcx), %r15 - movq %r8, %rax - sbbq 32(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r14, %rsi - movq %rsi, (%rdi) - cmovsq %r11, %rdx - movq %rdx, 8(%rdi) - cmovsq %r10, %rbx - movq %rbx, 16(%rdi) - cmovsq %r9, %r15 - movq %r15, 24(%rdi) - cmovsq %r8, %rax - movq %rax, 32(%rdi) - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_sub5L - .p2align 4, 0x90 -_mcl_fp_sub5L: ## @mcl_fp_sub5L -## BB#0: - pushq %r14 - pushq %rbx - movq 32(%rsi), %r8 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq 24(%rsi), %r9 - movq 16(%rsi), %r10 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - sbbq 16(%rdx), %r10 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r10, 16(%rdi) - sbbq %r11, %r9 - movq %r9, 24(%rdi) - sbbq %r14, %r8 - movq %r8, 32(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB76_2 -## BB#1: ## %carry - movq 32(%rcx), %r11 - movq 24(%rcx), %r14 - movq 8(%rcx), %rdx - movq 16(%rcx), %rbx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rdx - movq %rdx, 8(%rdi) - adcq %r10, %rbx - movq %rbx, 16(%rdi) - adcq %r9, %r14 - movq %r14, 24(%rdi) - adcq %r8, %r11 - movq %r11, 32(%rdi) -LBB76_2: ## %nocarry - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_subNF5L - .p2align 4, 0x90 -_mcl_fp_subNF5L: ## @mcl_fp_subNF5L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 32(%rsi), %r13 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r10 - movdqu (%rsi), %xmm2 - movdqu 16(%rsi), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r8 - movd %xmm1, %r11 - movd %xmm3, %r9 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %r14 - pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1] - movd %xmm1, %r15 - movd %xmm0, %rbx - movd %xmm2, %r12 - subq %rbx, %r12 - sbbq %r14, %r15 - sbbq %r11, %r9 - sbbq %r10, %r8 - sbbq 32(%rdx), %r13 - movq %r13, %rdx - sarq $63, %rdx - movq %rdx, %rbx - shldq $1, %r13, %rbx - movq 8(%rcx), %rsi - andq %rbx, %rsi - andq (%rcx), %rbx - movq 32(%rcx), %r10 - andq %rdx, %r10 - movq 24(%rcx), %rax - andq %rdx, %rax - rolq %rdx - andq 16(%rcx), %rdx - addq %r12, %rbx - movq %rbx, (%rdi) - adcq %r15, %rsi - movq %rsi, 8(%rdi) - adcq %r9, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %rax - movq %rax, 24(%rdi) - adcq %r13, %r10 - movq %r10, 32(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fpDbl_add5L - .p2align 4, 0x90 -_mcl_fpDbl_add5L: ## @mcl_fpDbl_add5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 64(%rdx), %r11 - movq 56(%rdx), %r14 - movq 48(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 40(%rdx), %r9 - movq %rbx, (%rdi) - movq 72(%rsi), %r8 - movq %rax, 8(%rdi) - movq 64(%rsi), %r10 - movq %r12, 16(%rdi) - movq 56(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 48(%rsi), %rbp - movq 40(%rsi), %rbx - movq %r13, 32(%rdi) - adcq %r9, %rbx - adcq %r15, %rbp - adcq %r14, %r12 - adcq %r11, %r10 - adcq -8(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - movq %rbx, %rax - subq (%rcx), %rax - movq %rbp, %rdx - sbbq 8(%rcx), %rdx - movq %r12, %r9 - sbbq 16(%rcx), %r9 - movq %r10, %r11 - sbbq 24(%rcx), %r11 - movq %r8, %r14 - sbbq 32(%rcx), %r14 - sbbq $0, %rsi - andl $1, %esi - cmovneq %rbx, %rax - movq %rax, 40(%rdi) - testb %sil, %sil - cmovneq %rbp, %rdx - movq %rdx, 48(%rdi) - cmovneq %r12, %r9 - movq %r9, 56(%rdi) - cmovneq %r10, %r11 - movq %r11, 64(%rdi) - cmovneq %r8, %r14 - movq %r14, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub5L - .p2align 4, 0x90 -_mcl_fpDbl_sub5L: ## @mcl_fpDbl_sub5L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 72(%rdx), %r9 - movq 64(%rdx), %r10 - movq 56(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %r12 - sbbq 24(%rdx), %r12 - movq %r15, (%rdi) - movq 32(%rsi), %rbx - sbbq 32(%rdx), %rbx - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 72(%rsi), %r8 - movq %r12, 24(%rdi) - movq 64(%rsi), %r11 - movq %rbx, 32(%rdi) - movq 40(%rsi), %rbp - sbbq %rdx, %rbp - movq 56(%rsi), %r12 - movq 48(%rsi), %r13 - sbbq %r15, %r13 - sbbq %r14, %r12 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %edx - sbbq $0, %rdx - andl $1, %edx - movq (%rcx), %rsi - cmoveq %rax, %rsi - testb %dl, %dl - movq 16(%rcx), %rdx - cmoveq %rax, %rdx - movq 8(%rcx), %rbx - cmoveq %rax, %rbx - movq 32(%rcx), %r9 - cmoveq %rax, %r9 - cmovneq 24(%rcx), %rax - addq %rbp, %rsi - movq %rsi, 40(%rdi) - adcq %r13, %rbx - movq %rbx, 48(%rdi) - adcq %r12, %rdx - movq %rdx, 56(%rdi) - adcq %r11, %rax - movq %rax, 64(%rdi) - adcq %r8, %r9 - movq %r9, 72(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre6L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre6L: ## @mcl_fp_mulUnitPre6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r9 - movq %rax, %r8 - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r13 - movq %rax, %r12 - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %rbx - movq %rax, %rbp - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %rbp, %rdx - movq %rdx, 8(%rdi) - adcq %r12, %rbx - movq %rbx, 16(%rdi) - adcq %r14, %r13 - movq %r13, 24(%rdi) - adcq %r11, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r10 - movq %r10, 40(%rdi) - adcq $0, %r9 - movq %r9, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_mulPre6L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre6L: ## @mcl_fpDbl_mulPre6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rsi, %r12 - movq %rdi, -16(%rsp) ## 8-byte Spill - movq (%r12), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - movq (%rdx), %rsi - mulq %rsi - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 24(%r12), %rbp - movq %rbp, -104(%rsp) ## 8-byte Spill - movq 32(%r12), %rbx - movq 40(%r12), %r11 - movq %rax, (%rdi) - movq %r11, %rax - mulq %rsi - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rsi - movq %rdx, %rcx - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rsi - movq %rax, %r9 - movq %rdx, %rdi - movq 16(%r12), %r8 - movq %r8, %rax - mulq %rsi - movq %rax, %r14 - movq %rdx, %rbp - movq 8(%r12), %r10 - movq %r10, %rax - mulq %rsi - movq %rdx, %r15 - movq %rax, %r13 - addq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq %r14, %r15 - adcq %r9, %rbp - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - movq -120(%rsp), %rsi ## 8-byte Reload - adcq $0, %rsi - movq -64(%rsp), %r9 ## 8-byte Reload - movq 8(%r9), %rcx - movq %r11, %rax - mulq %rcx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq -104(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %r8, %rax - mulq %rcx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %r10, %rax - mulq %rcx - movq %rdx, %r10 - movq %rax, %rbx - movq -72(%rsp), %rax ## 8-byte Reload - mulq %rcx - addq %r13, %rax - movq -16(%rsp), %r13 ## 8-byte Reload - movq %rax, 8(%r13) - adcq %r15, %rbx - adcq %rbp, %r8 - adcq %rdi, %r14 - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq %rsi, %rax - sbbq %rsi, %rsi - andl $1, %esi - addq %rdx, %rbx - adcq %r10, %r8 - adcq -80(%rsp), %r14 ## 8-byte Folded Reload - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - movq 40(%r12), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq 16(%r9), %rcx - mulq %rcx - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdx, -112(%rsp) ## 8-byte Spill - movq 32(%r12), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r10 - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 24(%r12), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r9 - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 16(%r12), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %rbp - movq %rdx, -56(%rsp) ## 8-byte Spill - movq 8(%r12), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %rdi - movq %rdx, %r15 - movq (%r12), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulq %rcx - addq %rbx, %rax - movq %rax, 16(%r13) - adcq %r8, %rdi - adcq %r14, %rbp - adcq %r11, %r9 - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - movq -72(%rsp), %rax ## 8-byte Reload - adcq %rsi, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %rdi - adcq %r15, %rbp - adcq -56(%rsp), %r9 ## 8-byte Folded Reload - adcq -48(%rsp), %r10 ## 8-byte Folded Reload - adcq -40(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -72(%rsp) ## 8-byte Spill - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - movq -64(%rsp), %rbx ## 8-byte Reload - movq 24(%rbx), %rsi - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rsi - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq -96(%rsp), %rax ## 8-byte Reload - mulq %rsi - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq -104(%rsp), %rax ## 8-byte Reload - mulq %rsi - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -80(%rsp), %rax ## 8-byte Reload - mulq %rsi - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq -32(%rsp), %rax ## 8-byte Reload - mulq %rsi - movq %rdx, %r8 - movq %rax, %r11 - movq -24(%rsp), %rax ## 8-byte Reload - mulq %rsi - addq %rdi, %rax - movq -16(%rsp), %rsi ## 8-byte Reload - movq %rax, 24(%rsi) - adcq %rbp, %r11 - adcq %r9, %r13 - adcq %r10, %r15 - adcq -72(%rsp), %r14 ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq %rcx, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %r11 - adcq %r8, %r13 - adcq -112(%rsp), %r15 ## 8-byte Folded Reload - adcq -104(%rsp), %r14 ## 8-byte Folded Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - movq 40(%r12), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq 32(%rbx), %rdi - mulq %rdi - movq %rax, %r9 - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 32(%r12), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r10 - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 24(%r12), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r8 - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 16(%r12), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %rbx - movq %rdx, -104(%rsp) ## 8-byte Spill - movq (%r12), %rbp - movq 8(%r12), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %rbp, %rax - mulq %rdi - movq %rdx, -48(%rsp) ## 8-byte Spill - addq %r11, %rax - movq %rax, 32(%rsi) - adcq %r13, %r12 - adcq %r15, %rbx - adcq %r14, %r8 - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq %rcx, %r9 - movq -64(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rcx - sbbq %rsi, %rsi - movq -80(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, -24(%rsp) ## 8-byte Spill - movq -56(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -8(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rbp, %rax - mulq %rcx - movq %rdx, %rbp - movq %rax, %rdi - movq -32(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, %r13 - movq %rax, %r14 - movq -40(%rsp), %rax ## 8-byte Reload - mulq %rcx - andl $1, %esi - addq -48(%rsp), %r12 ## 8-byte Folded Reload - adcq -112(%rsp), %rbx ## 8-byte Folded Reload - adcq -104(%rsp), %r8 ## 8-byte Folded Reload - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq -88(%rsp), %r9 ## 8-byte Folded Reload - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - addq %rdi, %r12 - movq -16(%rsp), %rcx ## 8-byte Reload - movq %r12, 40(%rcx) - adcq %r11, %rbx - adcq %rax, %r8 - adcq %r14, %r10 - adcq %r15, %r9 - adcq -24(%rsp), %rsi ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - addq %rbp, %rbx - movq %rbx, 48(%rcx) - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 56(%rcx) - adcq %rdx, %r10 - movq %r10, 64(%rcx) - adcq %r13, %r9 - movq %r9, 72(%rcx) - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 80(%rcx) - adcq -64(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 88(%rcx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre6L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre6L: ## @mcl_fpDbl_sqrPre6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, -48(%rsp) ## 8-byte Spill - movq 16(%rsi), %r8 - movq %r8, -120(%rsp) ## 8-byte Spill - movq 24(%rsi), %r11 - movq %r11, -112(%rsp) ## 8-byte Spill - movq 32(%rsi), %r12 - movq 40(%rsi), %r9 - movq (%rsi), %rcx - movq %rcx, %rax - mulq %rcx - movq %rdx, %rbp - movq %rax, (%rdi) - movq %r9, %rax - mulq %rcx - movq %rdx, %rbx - movq %rax, -128(%rsp) ## 8-byte Spill - movq %r12, %rax - mulq %rcx - movq %rdx, %r10 - movq %rax, %r13 - movq %r11, %rax - mulq %rcx - movq %rdx, %rdi - movq %rax, %r15 - movq %r8, %rax - mulq %rcx - movq %rax, %r11 - movq %rdx, %r14 - movq 8(%rsi), %r8 - movq %r8, %rax - mulq %rcx - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, %rcx - addq %rcx, %rbp - adcq %rdx, %r11 - adcq %r15, %r14 - adcq %r13, %rdi - adcq -128(%rsp), %r10 ## 8-byte Folded Reload - adcq $0, %rbx - movq %rbx, -72(%rsp) ## 8-byte Spill - movq %r9, %rax - mulq %r8 - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %r12, %rax - mulq %r8 - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq -112(%rsp), %rax ## 8-byte Reload - mulq %r8 - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -120(%rsp), %rax ## 8-byte Reload - mulq %r8 - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %r8, %rax - mulq %r8 - movq %rax, %rbx - addq %rcx, %rbp - movq -48(%rsp), %rax ## 8-byte Reload - movq %rbp, 8(%rax) - adcq %r11, %rbx - adcq %r14, %r12 - adcq %rdi, %r15 - adcq %r10, %r9 - movq %r13, %rax - adcq -72(%rsp), %rax ## 8-byte Folded Reload - sbbq %r13, %r13 - andl $1, %r13d - addq -56(%rsp), %rbx ## 8-byte Folded Reload - adcq %rdx, %r12 - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - adcq -64(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -88(%rsp) ## 8-byte Spill - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - movq 40(%rsi), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - movq 16(%rsi), %rdi - mulq %rdi - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdx, -112(%rsp) ## 8-byte Spill - movq 32(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r11 - movq %rdx, -80(%rsp) ## 8-byte Spill - movq 24(%rsi), %rbp - movq %rbp, %rax - mulq %rdi - movq %rax, %r8 - movq %r8, -24(%rsp) ## 8-byte Spill - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, -120(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r10 - movq %rdx, -32(%rsp) ## 8-byte Spill - movq (%rsi), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rdi, %rax - mulq %rdi - movq %rax, %rcx - addq %rbx, %r14 - movq -48(%rsp), %rax ## 8-byte Reload - movq %r14, 16(%rax) - adcq %r12, %r10 - adcq %r15, %rcx - adcq %r8, %r9 - adcq -88(%rsp), %r11 ## 8-byte Folded Reload - movq -96(%rsp), %r8 ## 8-byte Reload - adcq %r13, %r8 - sbbq %rdi, %rdi - andl $1, %edi - addq -104(%rsp), %r10 ## 8-byte Folded Reload - adcq -32(%rsp), %rcx ## 8-byte Folded Reload - adcq %rdx, %r9 - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - movq -56(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -120(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq -72(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %rbp, %rax - mulq %rbp - movq %rax, %r13 - movq %rdx, -104(%rsp) ## 8-byte Spill - addq %r10, %rbx - movq -48(%rsp), %rax ## 8-byte Reload - movq %rbx, 24(%rax) - adcq %rcx, %r14 - adcq -24(%rsp), %r9 ## 8-byte Folded Reload - adcq %r11, %r13 - adcq %r8, %r15 - adcq %rdi, %r12 - sbbq %rcx, %rcx - movq 8(%rsi), %rbp - movq 40(%rsi), %rbx - movq %rbp, %rax - mulq %rbx - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdx, -56(%rsp) ## 8-byte Spill - movq (%rsi), %rdi - movq %rdi, %rax - mulq %rbx - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdx, -64(%rsp) ## 8-byte Spill - movq 32(%rsi), %r10 - movq %rbp, %rax - mulq %r10 - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %r10 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdx, -24(%rsp) ## 8-byte Spill - andl $1, %ecx - addq -40(%rsp), %r14 ## 8-byte Folded Reload - adcq -96(%rsp), %r9 ## 8-byte Folded Reload - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - movq 24(%rsi), %rdi - movq %rdi, %rax - mulq %rbx - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %r10 - movq %rax, %rbp - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 16(%rsi), %rsi - movq %rsi, %rax - mulq %rbx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %r10 - movq %rdx, %r11 - movq %rax, %rsi - movq %rbx, %rax - mulq %r10 - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq %rbx, %rax - mulq %rbx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %r10, %rax - mulq %r10 - movq %rdx, %r8 - addq -8(%rsp), %r14 ## 8-byte Folded Reload - movq -48(%rsp), %rdx ## 8-byte Reload - movq %r14, 32(%rdx) - adcq -32(%rsp), %r9 ## 8-byte Folded Reload - adcq %r13, %rsi - adcq %r15, %rbp - adcq %r12, %rax - adcq %rdi, %rcx - sbbq %r10, %r10 - andl $1, %r10d - addq -24(%rsp), %r9 ## 8-byte Folded Reload - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq %r11, %rbp - adcq -40(%rsp), %rax ## 8-byte Folded Reload - adcq %r8, %rcx - movq -16(%rsp), %r8 ## 8-byte Reload - adcq %r8, %r10 - addq -72(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 40(%rdx) - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %rbp ## 8-byte Folded Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - adcq %rdi, %rcx - adcq %rbx, %r10 - sbbq %rdi, %rdi - andl $1, %edi - addq -64(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 48(%rdx) - adcq -56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 56(%rdx) - adcq -80(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 64(%rdx) - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 72(%rdx) - adcq %r8, %r10 - movq %r10, 80(%rdx) - adcq -88(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 88(%rdx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont6L - .p2align 4, 0x90 -_mcl_fp_mont6L: ## @mcl_fp_mont6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $48, %rsp - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rdi, 40(%rsp) ## 8-byte Spill - movq 40(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, %r10 - movq %rdx, %r11 - movq 32(%rsi), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r14 - movq %rdx, %r15 - movq 24(%rsi), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r8 - movq %rdx, %rbx - movq 16(%rsi), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r9 - movq %rdx, %r12 - movq (%rsi), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %r13 - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdx, %rdi - addq %rsi, %rdi - adcq %r9, %r13 - adcq %r8, %r12 - adcq %r14, %rbx - movq %rbx, -88(%rsp) ## 8-byte Spill - adcq %r10, %r15 - movq %r15, -120(%rsp) ## 8-byte Spill - adcq $0, %r11 - movq %r11, -112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, %rbx - imulq %rdx, %rbx - movq 40(%rcx), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r9 - movq %rdx, %r14 - movq 24(%rcx), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r8 - movq %rdx, %r15 - movq 16(%rcx), %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r10 - movq %rdx, %r11 - movq (%rcx), %rsi - movq %rsi, -24(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -16(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rdx, %rbp - movq %rax, %rcx - movq %rbx, %rax - mulq %rsi - movq %rdx, %rbx - addq %rcx, %rbx - adcq %r10, %rbp - adcq %r8, %r11 - adcq %r9, %r15 - adcq -104(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq -96(%rsp), %rax ## 8-byte Folded Reload - adcq %rdi, %rbx - adcq %r13, %rbp - adcq %r12, %r11 - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - sbbq %rsi, %rsi - andl $1, %esi - movq -56(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r9 - movq %rdi, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r10 - movq %rdi, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %rdi - addq %r10, %rdi - adcq %r9, %rcx - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -96(%rsp), %r8 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq -88(%rsp), %rdx ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbx, %r12 - adcq %rbp, %rdi - adcq %r11, %rcx - adcq %r15, %r13 - adcq %r14, %r8 - adcq -128(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq %rsi, %rax - movq %rax, -112(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r12, %rbx - imulq -32(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r11 - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - addq %r11, %r9 - adcq %r10, %rbp - adcq -48(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r12, %rax - adcq %rdi, %r9 - adcq %rcx, %rbp - adcq %r13, %rsi - adcq %r8, %r15 - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rcx - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rdi - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r8 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rcx - addq %r10, %rcx - adcq %r8, %rbx - adcq %rdi, %r12 - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r9, %r13 - adcq %rbp, %rcx - adcq %rsi, %rbx - adcq %r15, %r12 - adcq %r14, %r11 - adcq -128(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r13, %rdi - imulq -32(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r15 - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r10 - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - addq %r10, %r8 - adcq %r15, %rbp - adcq -48(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %r9 ## 8-byte Folded Reload - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r13, %rax - adcq %rcx, %r8 - adcq %rbx, %rbp - adcq %r12, %rsi - adcq %r11, %r9 - adcq -120(%rsp), %r14 ## 8-byte Folded Reload - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rcx - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rdi - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r10 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r12 - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rax, %r13 - movq %rdx, %rcx - addq %r12, %rcx - adcq %r10, %rbx - adcq %rdi, %r15 - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r8, %r13 - adcq %rbp, %rcx - adcq %rsi, %rbx - adcq %r9, %r15 - adcq %r14, %r11 - adcq -128(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r13, %rsi - imulq -32(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r10 - movq %rsi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r8 - movq %rsi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r9 - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - addq %r9, %rsi - adcq %r8, %r12 - adcq %r10, %r14 - adcq -104(%rsp), %rdi ## 8-byte Folded Reload - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r13, %rax - adcq %rcx, %rsi - adcq %rbx, %r12 - adcq %r15, %r14 - adcq %r11, %rdi - adcq -120(%rsp), %rbp ## 8-byte Folded Reload - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rcx - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r11 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r9 - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %r13 - addq %r9, %r13 - adcq %r11, %r15 - adcq -48(%rsp), %r10 ## 8-byte Folded Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - movq -120(%rsp), %rcx ## 8-byte Reload - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rsi, %r8 - adcq %r12, %r13 - adcq %r14, %r15 - adcq %rdi, %r10 - adcq %rbp, %rbx - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -88(%rsp) ## 8-byte Spill - movq %r8, %rcx - imulq -32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %rdi - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r12 - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - addq %r12, %r14 - adcq %rdi, %rbp - adcq -48(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - adcq -96(%rsp), %r9 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r8, %rax - adcq %r13, %r14 - adcq %r15, %rbp - adcq %r10, %rsi - adcq %rbx, %r11 - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -120(%rsp) ## 8-byte Spill - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq -88(%rsp), %rdi ## 8-byte Reload - adcq $0, %rdi - movq -56(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -72(%rsp) ## 8-byte Folded Reload - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbx - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r9 - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rax, %r15 - movq %rdx, %r8 - addq %r9, %r8 - adcq %rbx, %r10 - adcq -80(%rsp), %r13 ## 8-byte Folded Reload - adcq -72(%rsp), %r12 ## 8-byte Folded Reload - movq -64(%rsp), %rax ## 8-byte Reload - adcq -112(%rsp), %rax ## 8-byte Folded Reload - movq -56(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r14, %r15 - adcq %rbp, %r8 - adcq %rsi, %r10 - adcq %r11, %r13 - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, -72(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -64(%rsp) ## 8-byte Spill - adcq %rdi, %rdx - movq %rdx, -56(%rsp) ## 8-byte Spill - sbbq %rcx, %rcx - movq -32(%rsp), %rdi ## 8-byte Reload - imulq %r15, %rdi - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -40(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %r9 - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r11 - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - andl $1, %ecx - addq %r14, %rax - adcq %r11, %rdx - adcq -40(%rsp), %rbx ## 8-byte Folded Reload - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - adcq -32(%rsp), %r12 ## 8-byte Folded Reload - adcq $0, %rbp - addq %r15, %r9 - adcq %r8, %rax - adcq %r10, %rdx - adcq %r13, %rbx - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - adcq -64(%rsp), %r12 ## 8-byte Folded Reload - adcq -56(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, %rcx - movq %rax, %r8 - subq -24(%rsp), %r8 ## 8-byte Folded Reload - movq %rdx, %r9 - sbbq -16(%rsp), %r9 ## 8-byte Folded Reload - movq %rbx, %r10 - sbbq -8(%rsp), %r10 ## 8-byte Folded Reload - movq %rsi, %r11 - sbbq (%rsp), %r11 ## 8-byte Folded Reload - movq %r12, %r14 - sbbq 8(%rsp), %r14 ## 8-byte Folded Reload - movq %rbp, %r15 - sbbq 16(%rsp), %r15 ## 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rsi, %r11 - testb %cl, %cl - cmovneq %rax, %r8 - movq 40(%rsp), %rax ## 8-byte Reload - movq %r8, (%rax) - cmovneq %rdx, %r9 - movq %r9, 8(%rax) - cmovneq %rbx, %r10 - movq %r10, 16(%rax) - movq %r11, 24(%rax) - cmovneq %r12, %r14 - movq %r14, 32(%rax) - cmovneq %rbp, %r15 - movq %r15, 40(%rax) - addq $48, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF6L - .p2align 4, 0x90 -_mcl_fp_montNF6L: ## @mcl_fp_montNF6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $40, %rsp - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rdi, 32(%rsp) ## 8-byte Spill - movq 40(%rsi), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rdx, %r12 - movq 32(%rsi), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r14 - movq %rdx, %r10 - movq 24(%rsi), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r15 - movq %rdx, %r9 - movq 16(%rsi), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r11 - movq %rdx, %r8 - movq (%rsi), %rbx - movq %rbx, 8(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, (%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %rbp - movq %rax, %rsi - movq %rbx, %rax - mulq %rdi - movq %rax, %r13 - movq %rdx, %rdi - addq %rsi, %rdi - adcq %r11, %rbp - adcq %r15, %r8 - adcq %r14, %r9 - adcq -64(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, -128(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %r12, -112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq %r13, %rbx - imulq %rax, %rbx - movq 40(%rcx), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, -120(%rsp) ## 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r15 - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 24(%rcx), %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r12 - movq %rdx, -104(%rsp) ## 8-byte Spill - movq 16(%rcx), %rdx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rdx - movq %rax, %r10 - movq %rdx, -8(%rsp) ## 8-byte Spill - movq (%rcx), %rsi - movq %rsi, -32(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq %rcx - movq %rdx, %r11 - movq %rax, %rcx - movq %rbx, %rax - mulq %rsi - addq %r13, %rax - adcq %rdi, %rcx - adcq %rbp, %r10 - adcq %r8, %r12 - adcq %r9, %r15 - adcq -128(%rsp), %r14 ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rdx, %rcx - adcq %r11, %r10 - adcq -8(%rsp), %r12 ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, -128(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - movq -72(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r14 - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r11 - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %rbp - addq %r11, %rbp - adcq %r14, %rbx - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - adcq -96(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r8 - addq %rcx, %rdi - adcq %r10, %rbp - adcq %r12, %rbx - adcq %r15, %rsi - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r8 - movq %rdi, %r11 - imulq -48(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r11, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %r11, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq %r11, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %r11, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r14 - movq %r11, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - addq %rdi, %rax - adcq %rbp, %r14 - adcq %rbx, %r10 - adcq %rsi, %rcx - adcq %r13, %r15 - movq -112(%rsp), %rax ## 8-byte Reload - adcq %r9, %rax - adcq $0, %r8 - addq %rdx, %r14 - adcq %r12, %r10 - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, -120(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - adcq -128(%rsp), %r8 ## 8-byte Folded Reload - movq -72(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r9 - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rax, %rbp - movq %rdx, %rbx - addq %r9, %rbx - adcq -8(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %r12 ## 8-byte Folded Reload - adcq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq $0, %r13 - addq %r14, %rbp - adcq %r10, %rbx - adcq %rcx, %rsi - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq %r8, %r15 - adcq $0, %r13 - movq %rbp, %rcx - imulq -48(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %rdi - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - addq %rbp, %rax - adcq %rbx, %rdi - adcq %rsi, %r14 - adcq %r12, %r10 - adcq %r11, %r9 - movq -112(%rsp), %rax ## 8-byte Reload - adcq %r15, %rax - adcq $0, %r13 - addq %rdx, %rdi - adcq %r8, %r14 - adcq -104(%rsp), %r10 ## 8-byte Folded Reload - adcq -96(%rsp), %r9 ## 8-byte Folded Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -112(%rsp) ## 8-byte Spill - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - movq -72(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rbp - movq %rbp, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r12 - movq %rbp, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %rbp - addq %r12, %rbp - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %rsi ## 8-byte Folded Reload - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - adcq -120(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r15 - addq %rdi, %r8 - adcq %r14, %rbp - adcq %r10, %rbx - adcq %r9, %rsi - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - adcq %r13, %r11 - adcq $0, %r15 - movq %r8, %r14 - imulq -48(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq %r14, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %r14, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %r14, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %r14, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq %r14, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - addq %r8, %rax - adcq %rbp, %rdi - adcq %rbx, %r12 - adcq %rsi, %r10 - adcq %rcx, %r13 - adcq %r11, %r9 - adcq $0, %r15 - addq %rdx, %rdi - adcq -104(%rsp), %r12 ## 8-byte Folded Reload - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, -120(%rsp) ## 8-byte Spill - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -112(%rsp) ## 8-byte Spill - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - movq -72(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rcx - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r13 - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %rbp - addq %r13, %rbp - adcq -8(%rsp), %rbx ## 8-byte Folded Reload - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - adcq -96(%rsp), %r8 ## 8-byte Folded Reload - adcq -128(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r14 - addq %rdi, %r11 - adcq %r12, %rbp - adcq %r10, %rbx - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq -112(%rsp), %r8 ## 8-byte Folded Reload - adcq %r15, %r9 - adcq $0, %r14 - movq %r11, %rcx - imulq -48(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rdi - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - addq %r11, %rax - adcq %rbp, %rdi - adcq %rbx, %r15 - adcq %rsi, %r10 - adcq %r8, %r12 - movq -112(%rsp), %rcx ## 8-byte Reload - adcq %r9, %rcx - adcq $0, %r14 - addq %rdx, %rdi - adcq %r13, %r15 - adcq -128(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, -128(%rsp) ## 8-byte Spill - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, -120(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - movq -72(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq -80(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -88(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbp - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rsi - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rax, %r9 - movq %rdx, %r8 - addq %rsi, %r8 - adcq %rbp, %r10 - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq -80(%rsp), %r12 ## 8-byte Folded Reload - adcq -72(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %rbx - addq %rdi, %r9 - adcq %r15, %r8 - adcq -128(%rsp), %r10 ## 8-byte Folded Reload - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - adcq -112(%rsp), %r12 ## 8-byte Folded Reload - adcq %r14, %r11 - adcq $0, %rbx - movq -48(%rsp), %rcx ## 8-byte Reload - imulq %r9, %rcx - movq %rcx, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rcx, %rax - movq %rcx, %r15 - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq %r15, %rax - movq 24(%rsp), %r15 ## 8-byte Reload - mulq %r15 - addq %r9, %r14 - adcq %r8, %rax - adcq %r10, %rcx - adcq %r13, %rbp - adcq %r12, %rdi - adcq %r11, %rsi - adcq $0, %rbx - addq -88(%rsp), %rax ## 8-byte Folded Reload - adcq %rdx, %rcx - adcq -56(%rsp), %rbp ## 8-byte Folded Reload - adcq -80(%rsp), %rdi ## 8-byte Folded Reload - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - adcq -48(%rsp), %rbx ## 8-byte Folded Reload - movq %rax, %r14 - subq -32(%rsp), %r14 ## 8-byte Folded Reload - movq %rcx, %r8 - sbbq %r15, %r8 - movq %rbp, %r9 - sbbq -40(%rsp), %r9 ## 8-byte Folded Reload - movq %rdi, %r10 - sbbq -24(%rsp), %r10 ## 8-byte Folded Reload - movq %rsi, %r11 - sbbq -16(%rsp), %r11 ## 8-byte Folded Reload - movq %rbx, %r15 - sbbq -64(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, %rdx - sarq $63, %rdx - cmovsq %rax, %r14 - movq 32(%rsp), %rax ## 8-byte Reload - movq %r14, (%rax) - cmovsq %rcx, %r8 - movq %r8, 8(%rax) - cmovsq %rbp, %r9 - movq %r9, 16(%rax) - cmovsq %rdi, %r10 - movq %r10, 24(%rax) - cmovsq %rsi, %r11 - movq %r11, 32(%rax) - cmovsq %rbx, %r15 - movq %r15, 40(%rax) - addq $40, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed6L - .p2align 4, 0x90 -_mcl_fp_montRed6L: ## @mcl_fp_montRed6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $24, %rsp - movq %rdx, %rbp - movq %rdi, 16(%rsp) ## 8-byte Spill - movq -8(%rbp), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - movq (%rsi), %r10 - movq %r10, %rdi - imulq %rax, %rdi - movq 40(%rbp), %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rax, %r14 - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 32(%rbp), %rcx - movq %rcx, -40(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rax, %r15 - movq %rdx, %r9 - movq 24(%rbp), %rcx - movq %rcx, -48(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rax, %r12 - movq %rdx, %r11 - movq 16(%rbp), %rcx - movq %rcx, -56(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rax, %rcx - movq %rdx, %r13 - movq (%rbp), %rbx - movq 8(%rbp), %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rdx, %r8 - movq %rax, %rbp - movq %rdi, %rax - mulq %rbx - movq %rbx, %rdi - movq %rdi, -8(%rsp) ## 8-byte Spill - movq %rdx, %rbx - addq %rbp, %rbx - adcq %rcx, %r8 - adcq %r12, %r13 - adcq %r15, %r11 - adcq %r14, %r9 - movq -128(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq %r10, %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r8 - adcq 24(%rsi), %r13 - adcq 32(%rsi), %r11 - adcq 40(%rsi), %r9 - movq %r9, -120(%rsp) ## 8-byte Spill - adcq 48(%rsi), %rcx - movq %rcx, -128(%rsp) ## 8-byte Spill - movq 88(%rsi), %rax - movq 80(%rsi), %rcx - movq 72(%rsi), %rdx - movq 64(%rsi), %rbp - movq 56(%rsi), %rsi - adcq $0, %rsi - movq %rsi, -104(%rsp) ## 8-byte Spill - adcq $0, %rbp - movq %rbp, -72(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, -64(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -88(%rsp) ## 8-byte Spill - sbbq %r14, %r14 - andl $1, %r14d - movq %rbx, %rsi - imulq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, (%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, 8(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r10 - movq %rsi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r9 - movq %rsi, %rax - mulq %rdi - movq %rdx, %rdi - addq %r9, %rdi - adcq %r10, %rbp - adcq 8(%rsp), %rcx ## 8-byte Folded Reload - adcq (%rsp), %r12 ## 8-byte Folded Reload - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %rbx, %rax - adcq %r8, %rdi - adcq %r13, %rbp - adcq %r11, %rcx - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq $0, -72(%rsp) ## 8-byte Folded Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - adcq $0, %r14 - movq %rdi, %rbx - imulq -80(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, (%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r9 - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rbx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - addq %r11, %r10 - adcq %r9, %r8 - adcq (%rsp), %rsi ## 8-byte Folded Reload - adcq -32(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rbx ## 8-byte Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %rdi, %rax - adcq %rbp, %r10 - adcq %rcx, %r8 - adcq %r12, %rsi - adcq %r15, %r13 - adcq -112(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -120(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, -96(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -88(%rsp) ## 8-byte Folded Spill - adcq $0, %r14 - movq %r10, %rcx - imulq -80(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - movq -24(%rsp), %rbp ## 8-byte Reload - mulq %rbp - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbx - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r9 - movq %rcx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - addq %r9, %rcx - adcq %rbx, %rdi - adcq -32(%rsp), %r12 ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - adcq -72(%rsp), %r11 ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %r8, %rcx - adcq %rsi, %rdi - adcq %r13, %r12 - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - movq -88(%rsp), %r8 ## 8-byte Reload - adcq $0, %r8 - adcq $0, %r14 - movq %rcx, %rsi - imulq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq %rbp - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbp - movq %rsi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r10 - movq %rsi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - addq %r10, %rbx - adcq %rbp, %r9 - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rbp ## 8-byte Reload - adcq -72(%rsp), %rbp ## 8-byte Folded Reload - movq -128(%rsp), %rsi ## 8-byte Reload - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - movq -96(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %rcx, %rax - adcq %rdi, %rbx - adcq %r12, %r9 - adcq %r15, %r13 - adcq %r11, %rbp - movq %rbp, -120(%rsp) ## 8-byte Spill - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -128(%rsp) ## 8-byte Spill - adcq -64(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, -88(%rsp) ## 8-byte Spill - adcq $0, %r14 - movq -80(%rsp), %r8 ## 8-byte Reload - imulq %rbx, %r8 - movq %r8, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, -80(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %r8, %rax - movq -16(%rsp), %r12 ## 8-byte Reload - mulq %r12 - movq %rdx, %rcx - movq %rax, %r15 - movq %r8, %rax - movq -8(%rsp), %r8 ## 8-byte Reload - mulq %r8 - addq %r15, %rdx - adcq %r10, %rcx - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -64(%rsp), %rdi ## 8-byte Folded Reload - adcq -80(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %rbp - addq %rbx, %rax - adcq %r9, %rdx - adcq %r13, %rcx - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - adcq -96(%rsp), %r11 ## 8-byte Folded Reload - adcq -88(%rsp), %rbp ## 8-byte Folded Reload - adcq $0, %r14 - movq %rdx, %rax - subq %r8, %rax - movq %rcx, %rbx - sbbq %r12, %rbx - movq %rsi, %r8 - sbbq -56(%rsp), %r8 ## 8-byte Folded Reload - movq %rdi, %r9 - sbbq -48(%rsp), %r9 ## 8-byte Folded Reload - movq %r11, %r10 - sbbq -40(%rsp), %r10 ## 8-byte Folded Reload - movq %rbp, %r15 - sbbq -24(%rsp), %r15 ## 8-byte Folded Reload - sbbq $0, %r14 - andl $1, %r14d - cmovneq %rbp, %r15 - testb %r14b, %r14b - cmovneq %rdx, %rax - movq 16(%rsp), %rdx ## 8-byte Reload - movq %rax, (%rdx) - cmovneq %rcx, %rbx - movq %rbx, 8(%rdx) - cmovneq %rsi, %r8 - movq %r8, 16(%rdx) - cmovneq %rdi, %r9 - movq %r9, 24(%rdx) - cmovneq %r11, %r10 - movq %r10, 32(%rdx) - movq %r15, 40(%rdx) - addq $24, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre6L - .p2align 4, 0x90 -_mcl_fp_addPre6L: ## @mcl_fp_addPre6L -## BB#0: - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r11 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 24(%rsi), %rax - movq 32(%rsi), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r10, %rax - movq %rax, 24(%rdi) - adcq %r9, %r14 - movq %r14, 32(%rdi) - adcq %r8, %r11 - movq %r11, 40(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r14 - retq - - .globl _mcl_fp_subPre6L - .p2align 4, 0x90 -_mcl_fp_subPre6L: ## @mcl_fp_subPre6L -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %r11 - movq 16(%rsi), %rcx - movq (%rsi), %rbx - movq 8(%rsi), %rsi - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r14 - movq 32(%rdx), %r15 - sbbq 16(%rdx), %rcx - movq %rbx, (%rdi) - movq %rsi, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r14, %r11 - movq %r11, 24(%rdi) - sbbq %r15, %r10 - movq %r10, 32(%rdi) - sbbq %r8, %r9 - movq %r9, 40(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_shr1_6L - .p2align 4, 0x90 -_mcl_fp_shr1_6L: ## @mcl_fp_shr1_6L -## BB#0: - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rcx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rcx - movq %rcx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %r9, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 32(%rdi) - shrq %r8 - movq %r8, 40(%rdi) - retq - - .globl _mcl_fp_add6L - .p2align 4, 0x90 -_mcl_fp_add6L: ## @mcl_fp_add6L -## BB#0: - pushq %r15 - pushq %r14 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rdx), %r15 - movq 24(%rdx), %rbx - movq 24(%rsi), %r10 - movq 32(%rsi), %r9 - movq 16(%rdx), %r11 - movq (%rdx), %rax - movq 8(%rdx), %rdx - addq (%rsi), %rax - adcq 8(%rsi), %rdx - adcq 16(%rsi), %r11 - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - adcq %rbx, %r10 - movq %r10, 24(%rdi) - adcq %r15, %r9 - movq %r9, 32(%rdi) - adcq %r14, %r8 - movq %r8, 40(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %rax - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r11 - sbbq 24(%rcx), %r10 - sbbq 32(%rcx), %r9 - sbbq 40(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB89_2 -## BB#1: ## %nocarry - movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r11, 16(%rdi) - movq %r10, 24(%rdi) - movq %r9, 32(%rdi) - movq %r8, 40(%rdi) -LBB89_2: ## %carry - popq %rbx - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF6L - .p2align 4, 0x90 -_mcl_fp_addNF6L: ## @mcl_fp_addNF6L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r8 - movq 32(%rdx), %r9 - movq 24(%rdx), %r10 - movq 16(%rdx), %r11 - movq (%rdx), %r15 - movq 8(%rdx), %r14 - addq (%rsi), %r15 - adcq 8(%rsi), %r14 - adcq 16(%rsi), %r11 - adcq 24(%rsi), %r10 - adcq 32(%rsi), %r9 - adcq 40(%rsi), %r8 - movq %r15, %rsi - subq (%rcx), %rsi - movq %r14, %rbx - sbbq 8(%rcx), %rbx - movq %r11, %rdx - sbbq 16(%rcx), %rdx - movq %r10, %r13 - sbbq 24(%rcx), %r13 - movq %r9, %r12 - sbbq 32(%rcx), %r12 - movq %r8, %rax - sbbq 40(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %r15, %rsi - movq %rsi, (%rdi) - cmovsq %r14, %rbx - movq %rbx, 8(%rdi) - cmovsq %r11, %rdx - movq %rdx, 16(%rdi) - cmovsq %r10, %r13 - movq %r13, 24(%rdi) - cmovsq %r9, %r12 - movq %r12, 32(%rdi) - cmovsq %r8, %rax - movq %rax, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_sub6L - .p2align 4, 0x90 -_mcl_fp_sub6L: ## @mcl_fp_sub6L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 40(%rdx), %r14 - movq 40(%rsi), %r8 - movq 32(%rsi), %r9 - movq 24(%rsi), %r10 - movq 16(%rsi), %r11 - movq (%rsi), %rax - movq 8(%rsi), %rsi - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %rsi - movq 24(%rdx), %r15 - movq 32(%rdx), %r12 - sbbq 16(%rdx), %r11 - movq %rax, (%rdi) - movq %rsi, 8(%rdi) - movq %r11, 16(%rdi) - sbbq %r15, %r10 - movq %r10, 24(%rdi) - sbbq %r12, %r9 - movq %r9, 32(%rdi) - sbbq %r14, %r8 - movq %r8, 40(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB91_2 -## BB#1: ## %carry - movq 40(%rcx), %r14 - movq 32(%rcx), %r15 - movq 24(%rcx), %r12 - movq 8(%rcx), %rbx - movq 16(%rcx), %rdx - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %rsi, %rbx - movq %rbx, 8(%rdi) - adcq %r11, %rdx - movq %rdx, 16(%rdi) - adcq %r10, %r12 - movq %r12, 24(%rdi) - adcq %r9, %r15 - movq %r15, 32(%rdi) - adcq %r8, %r14 - movq %r14, 40(%rdi) -LBB91_2: ## %nocarry - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF6L - .p2align 4, 0x90 -_mcl_fp_subNF6L: ## @mcl_fp_subNF6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %r11 - movdqu (%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] - movd %xmm6, %rax - movd %xmm2, %r14 - movd %xmm5, %r8 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r15 - pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] - movd %xmm2, %r9 - movd %xmm1, %r12 - movd %xmm4, %r10 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %rbx - pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] - movd %xmm1, %r13 - movd %xmm0, %rsi - movd %xmm3, %rbp - subq %rsi, %rbp - sbbq %rbx, %r13 - sbbq %r12, %r10 - sbbq %r15, %r9 - sbbq %r14, %r8 - sbbq %r11, %rax - movq %rax, %rsi - sarq $63, %rsi - movq %rsi, %rbx - shldq $1, %rax, %rbx - andq (%rcx), %rbx - movq 40(%rcx), %r11 - andq %rsi, %r11 - movq 32(%rcx), %r14 - andq %rsi, %r14 - movq 24(%rcx), %r15 - andq %rsi, %r15 - movq 16(%rcx), %rdx - andq %rsi, %rdx - rolq %rsi - andq 8(%rcx), %rsi - addq %rbp, %rbx - movq %rbx, (%rdi) - adcq %r13, %rsi - movq %rsi, 8(%rdi) - adcq %r10, %rdx - movq %rdx, 16(%rdi) - adcq %r9, %r15 - movq %r15, 24(%rdi) - adcq %r8, %r14 - movq %r14, 32(%rdi) - adcq %rax, %r11 - movq %r11, 40(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add6L - .p2align 4, 0x90 -_mcl_fpDbl_add6L: ## @mcl_fpDbl_add6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 80(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 72(%rdx), %r14 - movq 64(%rdx), %r15 - movq 24(%rsi), %rbp - movq 32(%rsi), %r13 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %rbp - adcq 32(%rdx), %r13 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rdx - movq %rbx, (%rdi) - movq 88(%rsi), %r8 - movq %rax, 8(%rdi) - movq 80(%rsi), %r10 - movq %r12, 16(%rdi) - movq 72(%rsi), %r12 - movq %rbp, 24(%rdi) - movq 40(%rsi), %rax - adcq %rdx, %rax - movq 64(%rsi), %rdx - movq %r13, 32(%rdi) - movq 56(%rsi), %r13 - movq 48(%rsi), %rbp - adcq %r9, %rbp - movq %rax, 40(%rdi) - adcq %r11, %r13 - adcq %r15, %rdx - adcq %r14, %r12 - adcq -16(%rsp), %r10 ## 8-byte Folded Reload - adcq -8(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq %rbp, %rsi - subq (%rcx), %rsi - movq %r13, %rbx - sbbq 8(%rcx), %rbx - movq %rdx, %r9 - sbbq 16(%rcx), %r9 - movq %r12, %r11 - sbbq 24(%rcx), %r11 - movq %r10, %r14 - sbbq 32(%rcx), %r14 - movq %r8, %r15 - sbbq 40(%rcx), %r15 - sbbq $0, %rax - andl $1, %eax - cmovneq %rbp, %rsi - movq %rsi, 48(%rdi) - testb %al, %al - cmovneq %r13, %rbx - movq %rbx, 56(%rdi) - cmovneq %rdx, %r9 - movq %r9, 64(%rdi) - cmovneq %r12, %r11 - movq %r11, 72(%rdi) - cmovneq %r10, %r14 - movq %r14, 80(%rdi) - cmovneq %r8, %r15 - movq %r15, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub6L - .p2align 4, 0x90 -_mcl_fpDbl_sub6L: ## @mcl_fpDbl_sub6L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 88(%rdx), %r9 - movq 80(%rdx), %r10 - movq 72(%rdx), %r14 - movq 16(%rsi), %r8 - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %eax, %eax - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %r8 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 64(%rdx), %r13 - movq %r15, (%rdi) - movq 56(%rdx), %rbp - movq %r11, 8(%rdi) - movq 48(%rdx), %r15 - movq 40(%rdx), %rdx - movq %r8, 16(%rdi) - movq 88(%rsi), %r8 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 80(%rsi), %r11 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - sbbq %r15, %rdx - movq 72(%rsi), %r15 - movq %rbx, 40(%rdi) - movq 64(%rsi), %r12 - movq 56(%rsi), %rsi - sbbq %rbp, %rsi - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq %r9, %r8 - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%rcx), %r14 - cmoveq %rax, %r14 - testb %bpl, %bpl - movq 16(%rcx), %r9 - cmoveq %rax, %r9 - movq 8(%rcx), %rbp - cmoveq %rax, %rbp - movq 40(%rcx), %r10 - cmoveq %rax, %r10 - movq 32(%rcx), %rbx - cmoveq %rax, %rbx - cmovneq 24(%rcx), %rax - addq %rdx, %r14 - movq %r14, 48(%rdi) - adcq %rsi, %rbp - movq %rbp, 56(%rdi) - adcq %r12, %r9 - movq %r9, 64(%rdi) - adcq %r15, %rax - movq %rax, 72(%rdi) - adcq %r11, %rbx - movq %rbx, 80(%rdi) - adcq %r8, %r10 - movq %r10, 88(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre7L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre7L: ## @mcl_fp_mulUnitPre7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq 48(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r11 - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %r15 - movq %rax, %r14 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %r13 - movq %rax, %r12 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %rbx - movq %rax, %rbp - movq %rcx, %rax - mulq 8(%rsi) - movq %rdx, %r8 - movq %rax, %r9 - movq %rcx, %rax - mulq (%rsi) - movq %rax, (%rdi) - addq %r9, %rdx - movq %rdx, 8(%rdi) - adcq %rbp, %r8 - movq %r8, 16(%rdi) - adcq %r12, %rbx - movq %rbx, 24(%rdi) - adcq %r14, %r13 - movq %r13, 32(%rdi) - adcq -16(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 40(%rdi) - adcq -8(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 48(%rdi) - adcq $0, %r10 - movq %r10, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_mulPre7L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre7L: ## @mcl_fpDbl_mulPre7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $16, %rsp - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rsi, %r9 - movq %rdi, -8(%rsp) ## 8-byte Spill - movq (%r9), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - movq (%rdx), %rsi - mulq %rsi - movq %rdx, -120(%rsp) ## 8-byte Spill - movq 32(%r9), %rbp - movq %rbp, -88(%rsp) ## 8-byte Spill - movq 40(%r9), %rcx - movq %rcx, -128(%rsp) ## 8-byte Spill - movq 48(%r9), %r14 - movq %rax, (%rdi) - movq %r14, %rax - mulq %rsi - movq %rdx, %rdi - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq %rsi - movq %rdx, %rcx - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rsi - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdx, %rbp - movq 24(%r9), %r8 - movq %r8, %rax - mulq %rsi - movq %rax, %r15 - movq %rdx, %rbx - movq 16(%r9), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - mulq %rsi - movq %rax, %r13 - movq %rdx, %r12 - movq 8(%r9), %r11 - movq %r11, %rax - mulq %rsi - movq %rdx, %rsi - movq %rax, %r10 - addq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq %r13, %rsi - adcq %r15, %r12 - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, -72(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -104(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -96(%rsp) ## 8-byte Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rcx - movq %r14, %rax - mulq %rcx - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq -128(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %r8, %rax - mulq %rcx - movq %rdx, %r8 - movq %rax, %r14 - movq -112(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %r11, %rax - mulq %rcx - movq %rdx, %r11 - movq %rax, %rdi - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rcx - addq %r10, %rax - movq -8(%rsp), %r10 ## 8-byte Reload - movq %rax, 8(%r10) - adcq %rsi, %rdi - adcq %r12, %rbp - adcq %rbx, %r14 - adcq -72(%rsp), %r15 ## 8-byte Folded Reload - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - movq -80(%rsp), %rax ## 8-byte Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - addq %rdx, %rdi - adcq %r11, %rbp - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq %r8, %r15 - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -80(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq 48(%r9), %rdx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq -56(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rcx - movq %rdx, %rax - mulq %rcx - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rdx, -104(%rsp) ## 8-byte Spill - movq 40(%r9), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdx, -32(%rsp) ## 8-byte Spill - movq 32(%r9), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r12 - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 24(%r9), %rax - movq %rax, -72(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %rbx - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 16(%r9), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r8 - movq %rdx, 8(%rsp) ## 8-byte Spill - movq 8(%r9), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r11 - movq %rdx, (%rsp) ## 8-byte Spill - movq (%r9), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulq %rcx - addq %rdi, %rax - movq %rax, 16(%r10) - adcq %rbp, %r11 - adcq %r14, %r8 - adcq %r15, %rbx - adcq %r13, %r12 - movq -128(%rsp), %rdi ## 8-byte Reload - adcq -80(%rsp), %rdi ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq %rsi, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %rdx, %r11 - adcq (%rsp), %r8 ## 8-byte Folded Reload - adcq 8(%rsp), %rbx ## 8-byte Folded Reload - adcq -48(%rsp), %r12 ## 8-byte Folded Reload - adcq -40(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq -32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - movq -56(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rbp - movq -64(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, -64(%rsp) ## 8-byte Spill - movq -96(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq -72(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -112(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq -24(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, %r14 - movq %rax, %r10 - movq -16(%rsp), %rax ## 8-byte Reload - mulq %rbp - addq %r11, %rax - movq -8(%rsp), %rsi ## 8-byte Reload - movq %rax, 24(%rsi) - adcq %r8, %r10 - adcq %rbx, %rdi - adcq %r12, %r15 - adcq -128(%rsp), %r13 ## 8-byte Folded Reload - movq -64(%rsp), %rbp ## 8-byte Reload - adcq -120(%rsp), %rbp ## 8-byte Folded Reload - movq -80(%rsp), %rax ## 8-byte Reload - adcq %rcx, %rax - sbbq %rsi, %rsi - andl $1, %esi - addq %rdx, %r10 - adcq %r14, %rdi - adcq -112(%rsp), %r15 ## 8-byte Folded Reload - adcq -72(%rsp), %r13 ## 8-byte Folded Reload - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, -64(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -80(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - movq 48(%r9), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - movq -56(%rsp), %rbx ## 8-byte Reload - movq 32(%rbx), %rcx - mulq %rcx - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 40(%r9), %rax - movq %rax, -96(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdx, -40(%rsp) ## 8-byte Spill - movq 32(%r9), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r12 - movq %rdx, -48(%rsp) ## 8-byte Spill - movq 24(%r9), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %rbp - movq %rdx, 8(%rsp) ## 8-byte Spill - movq 16(%r9), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r14 - movq %rdx, (%rsp) ## 8-byte Spill - movq 8(%r9), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rcx - movq %rax, %r11 - movq %rdx, %r8 - movq (%r9), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - mulq %rcx - addq %r10, %rax - movq -8(%rsp), %rcx ## 8-byte Reload - movq %rax, 32(%rcx) - adcq %rdi, %r11 - adcq %r15, %r14 - adcq %r13, %rbp - adcq -64(%rsp), %r12 ## 8-byte Folded Reload - movq -128(%rsp), %rcx ## 8-byte Reload - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq %rsi, %rax - sbbq %r13, %r13 - andl $1, %r13d - addq %rdx, %r11 - adcq %r8, %r14 - adcq (%rsp), %rbp ## 8-byte Folded Reload - adcq 8(%rsp), %r12 ## 8-byte Folded Reload - adcq -48(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq -40(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq -72(%rsp), %r13 ## 8-byte Folded Reload - movq 40(%rbx), %rcx - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %rdi - movq -96(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq -104(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq -112(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq -16(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq -32(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq -24(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -40(%rsp) ## 8-byte Spill - addq %r11, %rax - movq -8(%rsp), %rcx ## 8-byte Reload - movq %rax, 40(%rcx) - adcq %r14, %r8 - adcq %rbp, %rsi - adcq %r12, %rbx - adcq -128(%rsp), %r15 ## 8-byte Folded Reload - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq %r13, %rdi - movq -56(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %r11 - sbbq %rcx, %rcx - movq %r11, %rax - mulq 48(%r9) - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %r11, %rax - mulq 40(%r9) - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -32(%rsp) ## 8-byte Spill - movq %r11, %rax - mulq 32(%r9) - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r13 - movq %r11, %rax - mulq 24(%r9) - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %r11, %rax - mulq 16(%r9) - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %r11, %rax - mulq 8(%r9) - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %r11, %rax - mulq (%r9) - andl $1, %ecx - addq -40(%rsp), %r8 ## 8-byte Folded Reload - adcq -16(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %r15 ## 8-byte Folded Reload - adcq -88(%rsp), %r10 ## 8-byte Folded Reload - adcq -64(%rsp), %rdi ## 8-byte Folded Reload - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - addq %rax, %r8 - movq -8(%rsp), %r9 ## 8-byte Reload - movq %r8, 48(%r9) - adcq %r12, %rsi - adcq %r14, %rbx - adcq %rbp, %r15 - adcq %r13, %r10 - adcq -32(%rsp), %rdi ## 8-byte Folded Reload - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - addq %rdx, %rsi - adcq -48(%rsp), %rbx ## 8-byte Folded Reload - movq %r9, %rdx - movq %rsi, 56(%rdx) - movq %rbx, 64(%rdx) - adcq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 72(%rdx) - adcq -72(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 80(%rdx) - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 88(%rdx) - adcq -120(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 96(%rdx) - adcq -56(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 104(%rdx) - addq $16, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre7L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre7L: ## @mcl_fpDbl_sqrPre7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $16, %rsp - movq %rsi, %r9 - movq %rdi, -24(%rsp) ## 8-byte Spill - movq 24(%r9), %r10 - movq %r10, -128(%rsp) ## 8-byte Spill - movq 32(%r9), %r14 - movq %r14, -88(%rsp) ## 8-byte Spill - movq 40(%r9), %rsi - movq %rsi, -80(%rsp) ## 8-byte Spill - movq 48(%r9), %rbp - movq %rbp, -120(%rsp) ## 8-byte Spill - movq (%r9), %rbx - movq %rbx, %rax - mulq %rbx - movq %rdx, %rcx - movq %rax, (%rdi) - movq %rbp, %rax - mulq %rbx - movq %rdx, %r11 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq %rbx - movq %rdx, %r8 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %r14, %rax - mulq %rbx - movq %rdx, %r13 - movq %rax, %rsi - movq %r10, %rax - mulq %rbx - movq %rax, %r14 - movq %rdx, %rdi - movq 16(%r9), %r15 - movq %r15, %rax - mulq %rbx - movq %rax, %r10 - movq %rdx, %r12 - movq 8(%r9), %rbp - movq %rbp, %rax - mulq %rbx - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - addq %rax, %rcx - adcq %rdx, %r10 - adcq %r14, %r12 - adcq %rsi, %rdi - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq -96(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -104(%rsp) ## 8-byte Spill - adcq $0, %r11 - movq %r11, -96(%rsp) ## 8-byte Spill - movq -120(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq -80(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq -128(%rsp), %rax ## 8-byte Reload - mulq %rbp - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %r15, %rax - mulq %rbp - movq %rdx, %r15 - movq %rax, %rbx - movq %rbp, %rax - mulq %rbp - movq %rax, %rbp - addq -72(%rsp), %rcx ## 8-byte Folded Reload - movq -24(%rsp), %rax ## 8-byte Reload - movq %rcx, 8(%rax) - adcq %r10, %rbp - adcq %r12, %rbx - adcq %rdi, %r14 - adcq %r13, %r11 - movq %rsi, %rax - adcq -104(%rsp), %rax ## 8-byte Folded Reload - adcq -96(%rsp), %r8 ## 8-byte Folded Reload - sbbq %rsi, %rsi - andl $1, %esi - addq -112(%rsp), %rbp ## 8-byte Folded Reload - adcq %rdx, %rbx - adcq %r15, %r14 - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - adcq -88(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -48(%rsp) ## 8-byte Spill - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -40(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq 48(%r9), %rax - movq %rax, -112(%rsp) ## 8-byte Spill - movq 16(%r9), %rdi - mulq %rdi - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 40(%r9), %rax - movq %rax, -80(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 32(%r9), %rax - movq %rax, -88(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r13 - movq %rdx, -32(%rsp) ## 8-byte Spill - movq 24(%r9), %rcx - movq %rcx, %rax - mulq %rdi - movq %rax, %r10 - movq %r10, -8(%rsp) ## 8-byte Spill - movq %rdx, %r12 - movq %r12, -72(%rsp) ## 8-byte Spill - movq 8(%r9), %rax - movq %rax, (%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r15 - movq %rdx, -64(%rsp) ## 8-byte Spill - movq (%r9), %rax - movq %rax, -104(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %rdi, %rax - mulq %rdi - movq %rax, %rdi - addq %rbp, %r8 - movq -24(%rsp), %rax ## 8-byte Reload - movq %r8, 16(%rax) - adcq %rbx, %r15 - adcq %r14, %rdi - adcq %r10, %r11 - adcq -48(%rsp), %r13 ## 8-byte Folded Reload - movq -56(%rsp), %r10 ## 8-byte Reload - adcq -40(%rsp), %r10 ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq %rsi, %rax - sbbq %rbp, %rbp - andl $1, %ebp - addq -16(%rsp), %r15 ## 8-byte Folded Reload - adcq -64(%rsp), %rdi ## 8-byte Folded Reload - adcq %rdx, %r11 - adcq %r12, %r13 - adcq -32(%rsp), %r10 ## 8-byte Folded Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -120(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - movq -112(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq -80(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq -88(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq (%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq -104(%rsp), %rax ## 8-byte Reload - mulq %rcx - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %rcx, %rax - mulq %rcx - movq %rdx, -56(%rsp) ## 8-byte Spill - addq %r15, %rbx - movq -24(%rsp), %rcx ## 8-byte Reload - movq %rbx, 24(%rcx) - adcq %rdi, %r12 - adcq -8(%rsp), %r11 ## 8-byte Folded Reload - adcq %r13, %rax - movq %rax, %r15 - movq %r8, %rsi - adcq %r10, %rsi - movq -112(%rsp), %rbx ## 8-byte Reload - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - adcq %rbp, %r14 - sbbq %r8, %r8 - movq 8(%r9), %rcx - movq 40(%r9), %r13 - movq %rcx, %rax - mulq %r13 - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdx, -80(%rsp) ## 8-byte Spill - movq (%r9), %rbp - movq %rbp, %rax - mulq %r13 - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 32(%r9), %rdi - movq %rcx, %rax - mulq %rdi - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdi - movq %rax, %rbp - movq %rdx, (%rsp) ## 8-byte Spill - andl $1, %r8d - addq -64(%rsp), %r12 ## 8-byte Folded Reload - adcq -48(%rsp), %r11 ## 8-byte Folded Reload - adcq -72(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, -64(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -56(%rsp) ## 8-byte Spill - adcq -40(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - adcq -32(%rsp), %r14 ## 8-byte Folded Reload - adcq -128(%rsp), %r8 ## 8-byte Folded Reload - movq 48(%r9), %rax - movq %rax, -128(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq %r13, %rax - mulq %rdi - movq %rax, %rsi - movq %rsi, -48(%rsp) ## 8-byte Spill - movq %rdx, %rbx - movq 24(%r9), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r15 - movq %rdx, -16(%rsp) ## 8-byte Spill - movq 16(%r9), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %rdi, %rax - mulq %rdi - movq %rax, %rdi - addq %rbp, %r12 - movq -24(%rsp), %rbp ## 8-byte Reload - movq %r12, 32(%rbp) - adcq -8(%rsp), %r11 ## 8-byte Folded Reload - adcq -64(%rsp), %r10 ## 8-byte Folded Reload - adcq -56(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - adcq %rsi, %r14 - adcq %r8, %rcx - sbbq %rsi, %rsi - andl $1, %esi - addq (%rsp), %r11 ## 8-byte Folded Reload - adcq -120(%rsp), %r10 ## 8-byte Folded Reload - adcq 8(%rsp), %r15 ## 8-byte Folded Reload - adcq -16(%rsp), %rdi ## 8-byte Folded Reload - adcq %rdx, %r14 - adcq %rbx, %rcx - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - movq -128(%rsp), %rax ## 8-byte Reload - mulq %r13 - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq -32(%rsp), %rax ## 8-byte Reload - mulq %r13 - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq -40(%rsp), %rax ## 8-byte Reload - mulq %r13 - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %r13, %rax - mulq %r13 - movq %rax, %r13 - addq -104(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 40(%rbp) - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq %r15, %r12 - adcq %rdi, %r8 - movq %r14, %rax - adcq -48(%rsp), %rax ## 8-byte Folded Reload - adcq %rcx, %r13 - movq -120(%rsp), %rcx ## 8-byte Reload - adcq %rsi, %rcx - sbbq %r14, %r14 - andl $1, %r14d - addq -88(%rsp), %r10 ## 8-byte Folded Reload - adcq -80(%rsp), %r12 ## 8-byte Folded Reload - adcq -72(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -104(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - adcq %rbx, %r13 - adcq %rdx, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - movq 48(%r9), %rcx - movq %rcx, %rax - mulq 40(%r9) - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, %r8 - movq %rcx, %rax - mulq 32(%r9) - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %rcx, %rax - mulq 24(%r9) - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %rcx, %rax - mulq 16(%r9) - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rcx, %rax - mulq 8(%r9) - movq %rdx, %r15 - movq %rax, %rdi - movq %rcx, %rax - mulq (%r9) - movq %rdx, %r9 - movq %rax, %rsi - movq %rcx, %rax - mulq %rcx - addq %r10, %rsi - movq -24(%rsp), %r10 ## 8-byte Reload - movq %rsi, 48(%r10) - adcq %r12, %rdi - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - adcq -96(%rsp), %rbp ## 8-byte Folded Reload - adcq %r13, %rbx - adcq -120(%rsp), %r8 ## 8-byte Folded Reload - adcq %r14, %rax - sbbq %rcx, %rcx - andl $1, %ecx - addq %r9, %rdi - adcq %r15, %r11 - movq %r10, %rsi - movq %rdi, 56(%rsi) - movq %r11, 64(%rsi) - adcq -128(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 72(%rsi) - adcq -88(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 80(%rsi) - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 88(%rsi) - adcq -112(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 96(%rsi) - adcq %rdx, %rcx - movq %rcx, 104(%rsi) - addq $16, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont7L - .p2align 4, 0x90 -_mcl_fp_mont7L: ## @mcl_fp_mont7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $88, %rsp - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rdi, 80(%rsp) ## 8-byte Spill - movq 48(%rsi), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq (%rdx), %rdi - mulq %rdi - movq %rax, 8(%rsp) ## 8-byte Spill - movq %rdx, %r12 - movq 40(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, (%rsp) ## 8-byte Spill - movq %rdx, %r8 - movq 32(%rsi), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdx, %r9 - movq 24(%rsi), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r14 - movq %rdx, %r11 - movq 16(%rsi), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - mulq %rdi - movq %rax, %r15 - movq %rdx, %rbx - movq (%rsi), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - mulq %rdi - movq %rdx, %r13 - movq %rax, %rsi - movq %rbp, %rax - mulq %rdi - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdx, %r10 - addq %rsi, %r10 - adcq %r15, %r13 - adcq %r14, %rbx - movq %rbx, -72(%rsp) ## 8-byte Spill - adcq -8(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, -56(%rsp) ## 8-byte Spill - adcq (%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -112(%rsp) ## 8-byte Spill - adcq 8(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -104(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %r12, -96(%rsp) ## 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, 40(%rsp) ## 8-byte Spill - movq %rax, %rdi - imulq %rdx, %rdi - movq 48(%rcx), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdx, -120(%rsp) ## 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, %r9 - movq 24(%rcx), %rdx - movq %rdx, 64(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r8 - movq %rdx, %rbx - movq 16(%rcx), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rdx - movq %rax, %r15 - movq %rdx, %rbp - movq (%rcx), %rsi - movq %rsi, 48(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq %rcx - movq %rdx, %rcx - movq %rax, %r12 - movq %rdi, %rax - mulq %rsi - movq %rdx, %r11 - addq %r12, %r11 - adcq %r15, %rcx - adcq %r8, %rbp - adcq %r14, %rbx - adcq -64(%rsp), %r9 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -88(%rsp), %rdx ## 8-byte Folded Reload - movq -120(%rsp), %rdi ## 8-byte Reload - adcq $0, %rdi - addq -80(%rsp), %rax ## 8-byte Folded Reload - adcq %r10, %r11 - adcq %r13, %rcx - adcq -72(%rsp), %rbp ## 8-byte Folded Reload - adcq -56(%rsp), %rbx ## 8-byte Folded Reload - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -56(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -120(%rsp) ## 8-byte Spill - sbbq %rsi, %rsi - andl $1, %esi - movq -16(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdi - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r12 - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r9 - movq %rdi, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %r8 - movq %rdx, %r14 - addq %r9, %r14 - adcq %r12, %r13 - adcq -64(%rsp), %r15 ## 8-byte Folded Reload - adcq -88(%rsp), %r10 ## 8-byte Folded Reload - movq -112(%rsp), %rdi ## 8-byte Reload - adcq -80(%rsp), %rdi ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r11, %r8 - adcq %rcx, %r14 - adcq %rbp, %r13 - adcq %rbx, %r15 - adcq -56(%rsp), %r10 ## 8-byte Folded Reload - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -112(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq %rsi, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -56(%rsp) ## 8-byte Spill - movq %r8, %rcx - imulq 40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbx - movq %rcx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rdi - movq %rcx, %rax - mulq 72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %rbp - movq %rcx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - addq %rbp, %rcx - adcq %rdi, %rsi - adcq %rbx, %r9 - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %r11 ## 8-byte Folded Reload - movq -128(%rsp), %rdi ## 8-byte Reload - adcq -72(%rsp), %rdi ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r8, %rax - adcq %r14, %rcx - adcq %r13, %rsi - adcq %r15, %r9 - adcq %r10, %r12 - adcq -112(%rsp), %r11 ## 8-byte Folded Reload - adcq -104(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq $0, -56(%rsp) ## 8-byte Folded Spill - movq -16(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rbx - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r8 - movq %rbx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r15 - movq %rbx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %r14 - movq %rdx, %r10 - addq %r15, %r10 - adcq %r8, %rdi - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - movq -112(%rsp), %rbx ## 8-byte Reload - adcq -80(%rsp), %rbx ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rcx, %r14 - adcq %rsi, %r10 - adcq %r9, %rdi - adcq %r12, %rbp - adcq %r11, %r13 - adcq -128(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -56(%rsp) ## 8-byte Spill - movq %r14, %rbx - imulq 40(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r15 - movq %rbx, %rax - mulq 72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r12 - movq %rbx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - addq %r12, %r11 - adcq %r15, %r8 - adcq -64(%rsp), %rsi ## 8-byte Folded Reload - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -80(%rsp), %r9 ## 8-byte Folded Reload - movq -128(%rsp), %rbx ## 8-byte Reload - adcq -72(%rsp), %rbx ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r14, %rax - adcq %r10, %r11 - adcq %rdi, %r8 - adcq %rbp, %rsi - adcq %r13, %rcx - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -128(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq $0, -56(%rsp) ## 8-byte Folded Spill - movq -16(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rbx - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r14 - movq %rbx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r15 - movq %rbx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r13 - addq %r15, %r13 - adcq %r14, %rdi - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - movq -112(%rsp), %rbx ## 8-byte Reload - adcq -80(%rsp), %rbx ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r11, %r10 - adcq %r8, %r13 - adcq %rsi, %rdi - adcq %rcx, %rbp - adcq %r9, %r12 - adcq -128(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -56(%rsp) ## 8-byte Spill - movq %r10, %rbx - imulq 40(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r14 - movq %rbx, %rax - mulq 72(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r15 - movq %rbx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - addq %r15, %r11 - adcq %r14, %r8 - adcq -64(%rsp), %rsi ## 8-byte Folded Reload - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -80(%rsp), %r9 ## 8-byte Folded Reload - movq -128(%rsp), %rbx ## 8-byte Reload - adcq -72(%rsp), %rbx ## 8-byte Folded Reload - movq -120(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %r13, %r11 - adcq %rdi, %r8 - adcq %rbp, %rsi - adcq %r12, %rcx - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - adcq -104(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -128(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - adcq $0, -56(%rsp) ## 8-byte Folded Spill - movq -16(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rbx - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r13 - movq %rbx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r14 - movq %rbx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r12 - addq %r14, %r12 - adcq %r13, %rdi - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - adcq -88(%rsp), %r15 ## 8-byte Folded Reload - movq -112(%rsp), %rbx ## 8-byte Reload - adcq -80(%rsp), %rbx ## 8-byte Folded Reload - movq -104(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r11, %r10 - adcq %r8, %r12 - adcq %rsi, %rdi - adcq %rcx, %rbp - adcq %r9, %r15 - adcq -128(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - adcq -120(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -120(%rsp) ## 8-byte Spill - movq %r10, %rcx - imulq 40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r14 - movq %rcx, %rax - mulq 72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r8 - movq %rcx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - addq %r8, %r11 - adcq %r14, %rbx - adcq -64(%rsp), %rsi ## 8-byte Folded Reload - adcq -88(%rsp), %r9 ## 8-byte Folded Reload - adcq -80(%rsp), %r13 ## 8-byte Folded Reload - movq -56(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -128(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq %r10, %rax - adcq %r12, %r11 - adcq %rdi, %rbx - adcq %rbp, %rsi - adcq %r15, %r9 - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -56(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - movq -120(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - movq -16(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rcx - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r12 - movq %rcx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r14 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r8 - addq %r14, %r8 - adcq %r12, %rdi - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - movq -120(%rsp), %r14 ## 8-byte Reload - adcq -88(%rsp), %r14 ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq -80(%rsp), %rdx ## 8-byte Folded Reload - movq -104(%rsp), %rcx ## 8-byte Reload - adcq -72(%rsp), %rcx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %r11, %r10 - adcq %rbx, %r8 - adcq %rsi, %rdi - adcq %r9, %rbp - adcq %r13, %r14 - movq %r14, -120(%rsp) ## 8-byte Spill - adcq -56(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -104(%rsp) ## 8-byte Spill - adcq %r15, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -56(%rsp) ## 8-byte Spill - movq %r10, %rcx - imulq 40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r13 - movq %rcx, %rax - mulq 72(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r9 - movq %rcx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - addq %r9, %r11 - adcq %r13, %rbx - adcq -64(%rsp), %r15 ## 8-byte Folded Reload - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - adcq -72(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %r8, %r11 - adcq %rdi, %rbx - adcq %rbp, %r15 - adcq -120(%rsp), %r12 ## 8-byte Folded Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - adcq -104(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, -104(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq -56(%rsp), %r8 ## 8-byte Reload - adcq $0, %r8 - movq -16(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rcx - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -40(%rsp) ## 8-byte Folded Reload - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, -40(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -48(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rdi - movq %rcx, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rbp - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rax, %rsi - movq %rdx, %r10 - addq %rbp, %r10 - adcq %rdi, %r14 - adcq -48(%rsp), %r13 ## 8-byte Folded Reload - adcq -40(%rsp), %r9 ## 8-byte Folded Reload - movq -32(%rsp), %rcx ## 8-byte Reload - adcq -120(%rsp), %rcx ## 8-byte Folded Reload - movq -24(%rsp), %rax ## 8-byte Reload - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq -16(%rsp), %rdi ## 8-byte Reload - adcq $0, %rdi - addq %r11, %rsi - movq %rsi, -48(%rsp) ## 8-byte Spill - adcq %rbx, %r10 - adcq %r15, %r14 - adcq %r12, %r13 - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -40(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -32(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -24(%rsp) ## 8-byte Spill - adcq %r8, %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - sbbq %rcx, %rcx - movq 40(%rsp), %r8 ## 8-byte Reload - imulq %rsi, %r8 - movq %r8, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, 40(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, 32(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, 24(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, 16(%rsp) ## 8-byte Spill - movq %r8, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r11 - movq %r8, %rax - movq %r8, %r12 - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, %r8 - movq %r12, %rax - movq 72(%rsp), %r12 ## 8-byte Reload - mulq %r12 - andl $1, %ecx - addq %r15, %rax - adcq %r11, %rdx - adcq 16(%rsp), %rbp ## 8-byte Folded Reload - adcq 24(%rsp), %rbx ## 8-byte Folded Reload - adcq 32(%rsp), %rsi ## 8-byte Folded Reload - adcq 40(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %rdi - addq -48(%rsp), %r8 ## 8-byte Folded Reload - adcq %r10, %rax - adcq %r14, %rdx - adcq %r13, %rbp - adcq -40(%rsp), %rbx ## 8-byte Folded Reload - adcq -32(%rsp), %rsi ## 8-byte Folded Reload - adcq -24(%rsp), %r9 ## 8-byte Folded Reload - adcq -16(%rsp), %rdi ## 8-byte Folded Reload - adcq $0, %rcx - movq %rax, %r8 - subq 48(%rsp), %r8 ## 8-byte Folded Reload - movq %rdx, %r10 - sbbq %r12, %r10 - movq %rbp, %r11 - sbbq 56(%rsp), %r11 ## 8-byte Folded Reload - movq %rbx, %r14 - sbbq 64(%rsp), %r14 ## 8-byte Folded Reload - movq %rsi, %r15 - sbbq -8(%rsp), %r15 ## 8-byte Folded Reload - movq %r9, %r12 - sbbq (%rsp), %r12 ## 8-byte Folded Reload - movq %rdi, %r13 - sbbq 8(%rsp), %r13 ## 8-byte Folded Reload - sbbq $0, %rcx - andl $1, %ecx - cmovneq %rdi, %r13 - testb %cl, %cl - cmovneq %rax, %r8 - movq 80(%rsp), %rax ## 8-byte Reload - movq %r8, (%rax) - cmovneq %rdx, %r10 - movq %r10, 8(%rax) - cmovneq %rbp, %r11 - movq %r11, 16(%rax) - cmovneq %rbx, %r14 - movq %r14, 24(%rax) - cmovneq %rsi, %r15 - movq %r15, 32(%rax) - cmovneq %r9, %r12 - movq %r12, 40(%rax) - movq %r13, 48(%rax) - addq $88, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF7L - .p2align 4, 0x90 -_mcl_fp_montNF7L: ## @mcl_fp_montNF7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $80, %rsp - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rdi, 72(%rsp) ## 8-byte Spill - movq 48(%rsi), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq (%rdx), %rbx - mulq %rbx - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rdx, %r12 - movq 40(%rsi), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - mulq %rbx - movq %rax, -24(%rsp) ## 8-byte Spill - movq %rdx, %r8 - movq 32(%rsi), %rax - movq %rax, -64(%rsp) ## 8-byte Spill - mulq %rbx - movq %rax, -32(%rsp) ## 8-byte Spill - movq %rdx, %rbp - movq 24(%rsi), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - mulq %rbx - movq %rax, %r10 - movq %rdx, %r15 - movq 16(%rsi), %rax - movq %rax, (%rsp) ## 8-byte Spill - mulq %rbx - movq %rax, %r9 - movq %rdx, %r14 - movq (%rsi), %rdi - movq %rdi, -8(%rsp) ## 8-byte Spill - movq 8(%rsi), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - mulq %rbx - movq %rdx, %r13 - movq %rax, %r11 - movq %rdi, %rax - mulq %rbx - movq %rdx, %rsi - addq %r11, %rsi - adcq %r9, %r13 - adcq %r10, %r14 - adcq -32(%rsp), %r15 ## 8-byte Folded Reload - adcq -24(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, -128(%rsp) ## 8-byte Spill - adcq -16(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -120(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %r12, -104(%rsp) ## 8-byte Spill - movq -8(%rcx), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - movq %rax, %r10 - movq %rax, %r8 - imulq %rdx, %r10 - movq 48(%rcx), %rdx - movq %rdx, 32(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rdx - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdx, -96(%rsp) ## 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rdx - movq %rax, %r11 - movq %rdx, -72(%rsp) ## 8-byte Spill - movq 32(%rcx), %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rdx - movq %rax, %rbp - movq %rdx, -80(%rsp) ## 8-byte Spill - movq 24(%rcx), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rdx - movq %rax, %r12 - movq %rdx, -88(%rsp) ## 8-byte Spill - movq 16(%rcx), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rdx - movq %rax, %rbx - movq %rdx, 24(%rsp) ## 8-byte Spill - movq (%rcx), %rdi - movq %rdi, 40(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq %r10, %rax - mulq %rcx - movq %rdx, %r9 - movq %rax, %rcx - movq %r10, %rax - mulq %rdi - addq %r8, %rax - adcq %rsi, %rcx - adcq %r13, %rbx - adcq %r14, %r12 - adcq %r15, %rbp - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - movq -112(%rsp), %rdi ## 8-byte Reload - adcq -120(%rsp), %rdi ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rdx, %rcx - adcq %r9, %rbx - adcq 24(%rsp), %r12 ## 8-byte Folded Reload - adcq -88(%rsp), %rbp ## 8-byte Folded Reload - adcq -80(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, -120(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -112(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - movq -40(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rsi - movq %rsi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, 24(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %rdi - movq %rsi, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r11 - movq %rsi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r15 - addq %r11, %r15 - adcq %rdi, %r8 - adcq 24(%rsp), %r9 ## 8-byte Folded Reload - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq -80(%rsp), %r14 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rcx, %r10 - adcq %rbx, %r15 - adcq %r12, %r8 - adcq %rbp, %r9 - adcq -120(%rsp), %r13 ## 8-byte Folded Reload - adcq -112(%rsp), %r14 ## 8-byte Folded Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - movq %r10, %rsi - imulq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, -120(%rsp) ## 8-byte Spill - movq %rsi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %rsi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rsi, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r12 - movq %rsi, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rbp - movq %rsi, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - addq %r10, %rax - adcq %r15, %rbp - adcq %r8, %r12 - adcq %r9, %r11 - adcq %r13, %rbx - movq -120(%rsp), %r8 ## 8-byte Reload - adcq %r14, %r8 - movq -112(%rsp), %rsi ## 8-byte Reload - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rdx, %rbp - adcq %rdi, %r12 - adcq %rcx, %r11 - adcq -88(%rsp), %rbx ## 8-byte Folded Reload - adcq -80(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -120(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -112(%rsp) ## 8-byte Spill - adcq -104(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - movq -40(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdi - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, -104(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, 24(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rdi, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r13 - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %r15 - addq %r13, %r15 - adcq %r14, %rcx - adcq 24(%rsp), %r8 ## 8-byte Folded Reload - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - adcq -80(%rsp), %r9 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq -72(%rsp), %rdx ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbp, %r10 - adcq %r12, %r15 - adcq %r11, %rcx - adcq %rbx, %r8 - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - adcq -112(%rsp), %r9 ## 8-byte Folded Reload - adcq -96(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -104(%rsp) ## 8-byte Spill - movq %r10, %rdi - imulq 16(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %rdi, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbp - movq %rdi, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rbx - movq %rdi, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - addq %r10, %rax - adcq %r15, %rbx - adcq %rcx, %rbp - adcq %r8, %r12 - adcq %rsi, %r11 - movq -112(%rsp), %rcx ## 8-byte Reload - adcq %r9, %rcx - movq -96(%rsp), %rsi ## 8-byte Reload - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - movq -104(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rdx, %rbx - adcq %r14, %rbp - adcq %r13, %r12 - adcq -120(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, -120(%rsp) ## 8-byte Spill - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -96(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - movq -40(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdi - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r13 - movq %rdi, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %r10 - movq %rdx, %rdi - addq %r14, %rdi - adcq %r13, %r8 - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - adcq -72(%rsp), %r15 ## 8-byte Folded Reload - adcq -128(%rsp), %r11 ## 8-byte Folded Reload - adcq $0, %r9 - addq %rbx, %r10 - adcq %rbp, %rdi - adcq %r12, %r8 - adcq -120(%rsp), %rcx ## 8-byte Folded Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -96(%rsp), %r15 ## 8-byte Folded Reload - adcq -104(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, -112(%rsp) ## 8-byte Spill - adcq $0, %r9 - movq %r10, %rbp - imulq 16(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rbp, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %rbp, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r11 - movq %rbp, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %rbx - movq %rbp, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - addq %r10, %rax - adcq %rdi, %rbx - adcq %r8, %r11 - adcq %rcx, %r12 - adcq %rsi, %r14 - movq -104(%rsp), %rcx ## 8-byte Reload - adcq %r15, %rcx - movq -96(%rsp), %rax ## 8-byte Reload - adcq -112(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %r9 - addq %rdx, %rbx - adcq %r13, %r11 - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -80(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, -112(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -104(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - adcq -120(%rsp), %r9 ## 8-byte Folded Reload - movq -40(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdi - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %rbp - movq %rdi, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r14 - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %rdi - movq %rdx, %r13 - addq %r14, %r13 - adcq %rbp, %r8 - adcq -88(%rsp), %rcx ## 8-byte Folded Reload - adcq -80(%rsp), %rsi ## 8-byte Folded Reload - adcq -72(%rsp), %r10 ## 8-byte Folded Reload - movq -120(%rsp), %rax ## 8-byte Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %r15 - addq %rbx, %rdi - adcq %r11, %r13 - adcq %r12, %r8 - adcq -112(%rsp), %rcx ## 8-byte Folded Reload - adcq -104(%rsp), %rsi ## 8-byte Folded Reload - adcq -96(%rsp), %r10 ## 8-byte Folded Reload - adcq %r9, %rax - movq %rax, -120(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %rdi, %rbp - imulq 16(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq %rbp, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, %r12 - movq %rbp, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r14 - movq %rbp, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rbx - movq %rbp, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - addq %rdi, %rax - adcq %r13, %rbx - adcq %r8, %r14 - adcq %rcx, %r12 - adcq %rsi, %r9 - movq -112(%rsp), %rcx ## 8-byte Reload - adcq %r10, %rcx - movq -104(%rsp), %rax ## 8-byte Reload - adcq -120(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %r15 - addq %rdx, %rbx - adcq %r11, %r14 - adcq -88(%rsp), %r12 ## 8-byte Folded Reload - adcq -128(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -128(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -112(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -104(%rsp) ## 8-byte Spill - adcq -96(%rsp), %r15 ## 8-byte Folded Reload - movq -40(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rbp - movq %rbp, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, 24(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %rcx - movq %rbp, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r8 - movq %rax, %r9 - movq %rbp, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %r11 - movq %rdx, %r10 - addq %r9, %r10 - adcq %rcx, %r8 - adcq 24(%rsp), %rdi ## 8-byte Folded Reload - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - adcq -80(%rsp), %r13 ## 8-byte Folded Reload - movq -120(%rsp), %rcx ## 8-byte Reload - adcq -72(%rsp), %rcx ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rbx, %r11 - adcq %r14, %r10 - adcq %r12, %r8 - adcq -128(%rsp), %rdi ## 8-byte Folded Reload - adcq -112(%rsp), %rsi ## 8-byte Folded Reload - adcq -104(%rsp), %r13 ## 8-byte Folded Reload - adcq %r15, %rcx - movq %rcx, -120(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -96(%rsp) ## 8-byte Spill - movq %r11, %rbx - imulq 16(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -72(%rsp) ## 8-byte Spill - movq %rax, -112(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq %rbx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %rbx, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, %rbp - movq %rbx, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rcx - movq %rbx, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - addq %r11, %rax - adcq %r10, %rcx - adcq %r8, %rbp - adcq %rdi, %r15 - adcq %rsi, %r9 - movq -112(%rsp), %rbx ## 8-byte Reload - adcq %r13, %rbx - movq -104(%rsp), %rsi ## 8-byte Reload - adcq -120(%rsp), %rsi ## 8-byte Folded Reload - movq -96(%rsp), %rax ## 8-byte Reload - adcq $0, %rax - addq %rdx, %rcx - adcq %r12, %rbp - adcq %r14, %r15 - adcq -88(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, -120(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -112(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -104(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -96(%rsp) ## 8-byte Spill - movq -40(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdi - movq %rdi, %rax - mulq -48(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -56(%rsp) ## 8-byte Folded Reload - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -64(%rsp) ## 8-byte Folded Reload - movq %rdx, -48(%rsp) ## 8-byte Spill - movq %rax, -64(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r9 - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rbx - movq %rdi, %rax - mulq 64(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %rsi - movq %rdi, %rax - mulq -8(%rsp) ## 8-byte Folded Reload - movq %rax, %r12 - movq %rdx, %r8 - addq %rsi, %r8 - adcq %rbx, %r10 - adcq %r9, %r11 - adcq -64(%rsp), %r13 ## 8-byte Folded Reload - movq -48(%rsp), %rdx ## 8-byte Reload - adcq -56(%rsp), %rdx ## 8-byte Folded Reload - movq -40(%rsp), %rax ## 8-byte Reload - adcq -128(%rsp), %rax ## 8-byte Folded Reload - adcq $0, %r14 - addq %rcx, %r12 - adcq %rbp, %r8 - adcq %r15, %r10 - adcq -120(%rsp), %r11 ## 8-byte Folded Reload - adcq -112(%rsp), %r13 ## 8-byte Folded Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -48(%rsp) ## 8-byte Spill - adcq -96(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -40(%rsp) ## 8-byte Spill - adcq $0, %r14 - movq 16(%rsp), %rdi ## 8-byte Reload - imulq %r12, %rdi - movq %rdi, %rax - mulq 32(%rsp) ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - movq %rax, %r9 - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, -56(%rsp) ## 8-byte Spill - movq %rax, %rbp - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, %rsi - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %rax, %rcx - movq %rdi, %rax - mulq 40(%rsp) ## 8-byte Folded Reload - movq %rdx, (%rsp) ## 8-byte Spill - movq %rax, %r15 - movq %rdi, %rax - mulq 56(%rsp) ## 8-byte Folded Reload - movq %rdx, -8(%rsp) ## 8-byte Spill - movq %rax, %rbx - movq %rdi, %rax - mulq 48(%rsp) ## 8-byte Folded Reload - addq %r12, %r15 - adcq %r8, %rax - adcq %r10, %rbx - adcq %r11, %rcx - adcq %r13, %rsi - adcq -48(%rsp), %rbp ## 8-byte Folded Reload - adcq -40(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %r14 - addq (%rsp), %rax ## 8-byte Folded Reload - adcq %rdx, %rbx - adcq -8(%rsp), %rcx ## 8-byte Folded Reload - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - adcq -64(%rsp), %rbp ## 8-byte Folded Reload - adcq -56(%rsp), %r9 ## 8-byte Folded Reload - adcq 16(%rsp), %r14 ## 8-byte Folded Reload - movq %rax, %r13 - subq 40(%rsp), %r13 ## 8-byte Folded Reload - movq %rbx, %r12 - sbbq 48(%rsp), %r12 ## 8-byte Folded Reload - movq %rcx, %r8 - sbbq 56(%rsp), %r8 ## 8-byte Folded Reload - movq %rsi, %r10 - sbbq -32(%rsp), %r10 ## 8-byte Folded Reload - movq %rbp, %r11 - sbbq -24(%rsp), %r11 ## 8-byte Folded Reload - movq %r9, %r15 - sbbq -16(%rsp), %r15 ## 8-byte Folded Reload - movq %r14, %rdx - sbbq 32(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, %rdi - sarq $63, %rdi - cmovsq %rax, %r13 - movq 72(%rsp), %rax ## 8-byte Reload - movq %r13, (%rax) - cmovsq %rbx, %r12 - movq %r12, 8(%rax) - cmovsq %rcx, %r8 - movq %r8, 16(%rax) - cmovsq %rsi, %r10 - movq %r10, 24(%rax) - cmovsq %rbp, %r11 - movq %r11, 32(%rax) - cmovsq %r9, %r15 - movq %r15, 40(%rax) - cmovsq %r14, %rdx - movq %rdx, 48(%rax) - addq $80, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed7L - .p2align 4, 0x90 -_mcl_fp_montRed7L: ## @mcl_fp_montRed7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $72, %rsp - movq %rdx, %rcx - movq %rdi, 64(%rsp) ## 8-byte Spill - movq -8(%rcx), %rax - movq %rax, -56(%rsp) ## 8-byte Spill - movq (%rsi), %rbp - movq %rbp, -48(%rsp) ## 8-byte Spill - imulq %rax, %rbp - movq 48(%rcx), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 40(%rcx), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rdx, %r15 - movq 32(%rcx), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r14 - movq %rdx, %r11 - movq 24(%rcx), %rdx - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r13 - movq %rdx, %r10 - movq 16(%rcx), %rdx - movq %rdx, -16(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rdx - movq %rax, %r9 - movq %rdx, %r12 - movq (%rcx), %rdi - movq %rdi, 24(%rsp) ## 8-byte Spill - movq 8(%rcx), %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %rcx - movq %rdx, %rcx - movq %rax, %rbx - movq %rbp, %rax - mulq %rdi - movq %rdx, %r8 - addq %rbx, %r8 - adcq %r9, %rcx - adcq %r13, %r12 - adcq %r14, %r10 - adcq -72(%rsp), %r11 ## 8-byte Folded Reload - adcq -104(%rsp), %r15 ## 8-byte Folded Reload - movq -128(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq -48(%rsp), %rax ## 8-byte Folded Reload - adcq 8(%rsi), %r8 - adcq 16(%rsi), %rcx - adcq 24(%rsi), %r12 - adcq 32(%rsi), %r10 - movq %r10, 40(%rsp) ## 8-byte Spill - adcq 40(%rsi), %r11 - movq %r11, -40(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r15 - movq %r15, -96(%rsp) ## 8-byte Spill - adcq 56(%rsi), %rdx - movq %rdx, -128(%rsp) ## 8-byte Spill - movq 104(%rsi), %rax - movq 96(%rsi), %rdx - movq 88(%rsi), %rdi - movq 80(%rsi), %rbp - movq 72(%rsi), %rbx - movq 64(%rsi), %r9 - adcq $0, %r9 - adcq $0, %rbx - movq %rbx, -8(%rsp) ## 8-byte Spill - adcq $0, %rbp - movq %rbp, -80(%rsp) ## 8-byte Spill - adcq $0, %rdi - movq %rdi, -64(%rsp) ## 8-byte Spill - adcq $0, %rdx - movq %rdx, -72(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, -104(%rsp) ## 8-byte Spill - sbbq %rax, %rax - andl $1, %eax - movq %rax, -48(%rsp) ## 8-byte Spill - movq %r8, %rdi - imulq -56(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, 32(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, 48(%rsp) ## 8-byte Spill - movq %rdi, %rax - movq 16(%rsp), %r13 ## 8-byte Reload - mulq %r13 - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, 56(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r10 - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %rsi - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %r15 - movq %rdi, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r14 - addq %r15, %r14 - adcq %rsi, %r11 - adcq %r10, %rbp - adcq 56(%rsp), %rbx ## 8-byte Folded Reload - movq -88(%rsp), %rdi ## 8-byte Reload - adcq 48(%rsp), %rdi ## 8-byte Folded Reload - movq -120(%rsp), %rsi ## 8-byte Reload - adcq 32(%rsp), %rsi ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r8, %rax - adcq %rcx, %r14 - adcq %r12, %r11 - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - adcq -40(%rsp), %rbx ## 8-byte Folded Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -88(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, -120(%rsp) ## 8-byte Spill - adcq %r9, %rdx - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq $0, -8(%rsp) ## 8-byte Folded Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -72(%rsp) ## 8-byte Folded Spill - adcq $0, -104(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - movq %r14, %rcx - imulq -56(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, 40(%rsp) ## 8-byte Spill - movq %rcx, %rax - movq 8(%rsp), %r15 ## 8-byte Reload - mulq %r15 - movq %rdx, -96(%rsp) ## 8-byte Spill - movq %rax, 32(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq %r13 - movq %rdx, -40(%rsp) ## 8-byte Spill - movq %rax, 48(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r8 - movq %rcx, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - movq %rax, %r12 - movq %rcx, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, %r13 - movq %rcx, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - addq %r13, %r10 - adcq %r12, %r9 - adcq %r8, %rdi - adcq 48(%rsp), %rsi ## 8-byte Folded Reload - movq -40(%rsp), %r8 ## 8-byte Reload - adcq 32(%rsp), %r8 ## 8-byte Folded Reload - movq -96(%rsp), %rdx ## 8-byte Reload - adcq 40(%rsp), %rdx ## 8-byte Folded Reload - movq -128(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq %r14, %rax - adcq %r11, %r10 - adcq %rbp, %r9 - adcq %rbx, %rdi - adcq -88(%rsp), %rsi ## 8-byte Folded Reload - adcq -120(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, -40(%rsp) ## 8-byte Spill - adcq -112(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -96(%rsp) ## 8-byte Spill - adcq -8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -128(%rsp) ## 8-byte Spill - adcq $0, -80(%rsp) ## 8-byte Folded Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -72(%rsp) ## 8-byte Folded Spill - adcq $0, -104(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - movq %r10, %rbp - imulq -56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, %rax - movq (%rsp), %r8 ## 8-byte Reload - mulq %r8 - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq %r15 - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, 40(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -88(%rsp) ## 8-byte Spill - movq %rax, 32(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %r13 - movq %rbp, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, %r14 - movq %rbp, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, %r15 - movq %rbp, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - addq %r15, %r11 - adcq %r14, %rbx - adcq %r13, %rcx - adcq 32(%rsp), %r12 ## 8-byte Folded Reload - movq -88(%rsp), %r14 ## 8-byte Reload - adcq 40(%rsp), %r14 ## 8-byte Folded Reload - movq -120(%rsp), %rbp ## 8-byte Reload - adcq -8(%rsp), %rbp ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r10, %rax - adcq %r9, %r11 - adcq %rdi, %rbx - adcq %rsi, %rcx - adcq -40(%rsp), %r12 ## 8-byte Folded Reload - adcq -96(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, -88(%rsp) ## 8-byte Spill - adcq -128(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, -120(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq $0, -64(%rsp) ## 8-byte Folded Spill - adcq $0, -72(%rsp) ## 8-byte Folded Spill - adcq $0, -104(%rsp) ## 8-byte Folded Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - movq %r11, %rdi - imulq -56(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq %r8 - movq %rdx, -80(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -128(%rsp) ## 8-byte Spill - movq %rax, -40(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r13 - movq %rax, %r14 - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r10 - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r8 - movq %rdi, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - addq %r8, %r9 - adcq %r10, %rbp - adcq %r14, %rsi - adcq -8(%rsp), %r13 ## 8-byte Folded Reload - adcq -40(%rsp), %r15 ## 8-byte Folded Reload - movq -128(%rsp), %rdi ## 8-byte Reload - adcq -96(%rsp), %rdi ## 8-byte Folded Reload - movq -80(%rsp), %rdx ## 8-byte Reload - adcq $0, %rdx - addq %r11, %rax - adcq %rbx, %r9 - adcq %rcx, %rbp - adcq %r12, %rsi - adcq -88(%rsp), %r13 ## 8-byte Folded Reload - adcq -120(%rsp), %r15 ## 8-byte Folded Reload - adcq -112(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, -128(%rsp) ## 8-byte Spill - adcq -64(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -80(%rsp) ## 8-byte Spill - adcq $0, -72(%rsp) ## 8-byte Folded Spill - movq -104(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - adcq $0, -48(%rsp) ## 8-byte Folded Spill - movq %r9, %rdi - imulq -56(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, -64(%rsp) ## 8-byte Spill - movq %rax, -104(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - movq %rax, -88(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, -120(%rsp) ## 8-byte Spill - movq %rax, -96(%rsp) ## 8-byte Spill - movq %rdi, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %r12 - movq %rax, %rbx - movq %rdi, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r11 - movq %rax, %rcx - movq %rdi, %rax - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %r10 - movq %rax, %r8 - movq %rdi, %rax - mulq 24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rdi - addq %r8, %rdi - adcq %rcx, %r10 - adcq %rbx, %r11 - adcq -96(%rsp), %r12 ## 8-byte Folded Reload - movq -120(%rsp), %rbx ## 8-byte Reload - adcq -88(%rsp), %rbx ## 8-byte Folded Reload - movq -112(%rsp), %rdx ## 8-byte Reload - adcq -104(%rsp), %rdx ## 8-byte Folded Reload - movq -64(%rsp), %rcx ## 8-byte Reload - adcq $0, %rcx - addq %r9, %rax - adcq %rbp, %rdi - adcq %rsi, %r10 - adcq %r13, %r11 - adcq %r15, %r12 - adcq -128(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -120(%rsp) ## 8-byte Spill - adcq -80(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -112(%rsp) ## 8-byte Spill - adcq -72(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -64(%rsp) ## 8-byte Spill - adcq $0, %r14 - movq %r14, -104(%rsp) ## 8-byte Spill - adcq $0, -48(%rsp) ## 8-byte Folded Spill - movq -56(%rsp), %rbp ## 8-byte Reload - imulq %rdi, %rbp - movq %rbp, %rax - mulq (%rsp) ## 8-byte Folded Reload - movq %rdx, %rcx - movq %rax, -56(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq 8(%rsp) ## 8-byte Folded Reload - movq %rdx, %r9 - movq %rax, -72(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq 16(%rsp) ## 8-byte Folded Reload - movq %rdx, %r15 - movq %rax, -80(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -32(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbx - movq %rax, -128(%rsp) ## 8-byte Spill - movq %rbp, %rax - mulq -16(%rsp) ## 8-byte Folded Reload - movq %rdx, %rsi - movq %rax, %r13 - movq %rbp, %rax - movq %rbp, %r14 - mulq -24(%rsp) ## 8-byte Folded Reload - movq %rdx, %rbp - movq %rax, %r8 - movq %r14, %rax - movq 24(%rsp), %r14 ## 8-byte Reload - mulq %r14 - addq %r8, %rdx - adcq %r13, %rbp - adcq -128(%rsp), %rsi ## 8-byte Folded Reload - adcq -80(%rsp), %rbx ## 8-byte Folded Reload - adcq -72(%rsp), %r15 ## 8-byte Folded Reload - adcq -56(%rsp), %r9 ## 8-byte Folded Reload - adcq $0, %rcx - addq %rdi, %rax - adcq %r10, %rdx - adcq %r11, %rbp - adcq %r12, %rsi - adcq -120(%rsp), %rbx ## 8-byte Folded Reload - adcq -112(%rsp), %r15 ## 8-byte Folded Reload - adcq -64(%rsp), %r9 ## 8-byte Folded Reload - adcq -104(%rsp), %rcx ## 8-byte Folded Reload - movq -48(%rsp), %rdi ## 8-byte Reload - adcq $0, %rdi - movq %rdx, %rax - subq %r14, %rax - movq %rbp, %r13 - sbbq -24(%rsp), %r13 ## 8-byte Folded Reload - movq %rsi, %r8 - sbbq -16(%rsp), %r8 ## 8-byte Folded Reload - movq %rbx, %r10 - sbbq -32(%rsp), %r10 ## 8-byte Folded Reload - movq %r15, %r11 - sbbq 16(%rsp), %r11 ## 8-byte Folded Reload - movq %r9, %r14 - sbbq 8(%rsp), %r14 ## 8-byte Folded Reload - movq %rcx, %r12 - sbbq (%rsp), %r12 ## 8-byte Folded Reload - sbbq $0, %rdi - andl $1, %edi - cmovneq %rcx, %r12 - testb %dil, %dil - cmovneq %rdx, %rax - movq 64(%rsp), %rcx ## 8-byte Reload - movq %rax, (%rcx) - cmovneq %rbp, %r13 - movq %r13, 8(%rcx) - cmovneq %rsi, %r8 - movq %r8, 16(%rcx) - cmovneq %rbx, %r10 - movq %r10, 24(%rcx) - cmovneq %r15, %r11 - movq %r11, 32(%rcx) - cmovneq %r9, %r14 - movq %r14, 40(%rcx) - movq %r12, 48(%rcx) - addq $72, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre7L - .p2align 4, 0x90 -_mcl_fp_addPre7L: ## @mcl_fp_addPre7L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r14 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r12 - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %r12, 16(%rdi) - adcq %r11, %rax - movq %rax, 24(%rdi) - adcq %r10, %rbx - movq %rbx, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subPre7L - .p2align 4, 0x90 -_mcl_fp_subPre7L: ## @mcl_fp_subPre7L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r8 - movq 48(%rsi), %r10 - movq 40(%rdx), %r9 - movq 40(%rsi), %r15 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 32(%rsi), %rdx - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rdx - movq %rdx, 32(%rdi) - sbbq %r9, %r15 - movq %r15, 40(%rdi) - sbbq %r8, %r10 - movq %r10, 48(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_shr1_7L - .p2align 4, 0x90 -_mcl_fp_shr1_7L: ## @mcl_fp_shr1_7L -## BB#0: - movq 48(%rsi), %r8 - movq 40(%rsi), %r9 - movq 32(%rsi), %r10 - movq 24(%rsi), %rax - movq 16(%rsi), %rcx - movq (%rsi), %rdx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rdx - movq %rdx, (%rdi) - shrdq $1, %rcx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rax, %rcx - movq %rcx, 16(%rdi) - shrdq $1, %r10, %rax - movq %rax, 24(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 32(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 40(%rdi) - shrq %r8 - movq %r8, 48(%rdi) - retq - - .globl _mcl_fp_add7L - .p2align 4, 0x90 -_mcl_fp_add7L: ## @mcl_fp_add7L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq 24(%rdx), %r13 - movq 16(%rdx), %r10 - movq (%rdx), %r11 - movq 8(%rdx), %rdx - addq (%rsi), %r11 - adcq 8(%rsi), %rdx - movq 24(%rsi), %rax - movq 32(%rsi), %rbx - adcq 16(%rsi), %r10 - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - adcq %r13, %rax - movq %rax, 24(%rdi) - adcq %r12, %rbx - movq %rbx, 32(%rdi) - adcq %r15, %r9 - movq %r9, 40(%rdi) - adcq %r14, %r8 - movq %r8, 48(%rdi) - sbbq %rsi, %rsi - andl $1, %esi - subq (%rcx), %r11 - sbbq 8(%rcx), %rdx - sbbq 16(%rcx), %r10 - sbbq 24(%rcx), %rax - sbbq 32(%rcx), %rbx - sbbq 40(%rcx), %r9 - sbbq 48(%rcx), %r8 - sbbq $0, %rsi - testb $1, %sil - jne LBB104_2 -## BB#1: ## %nocarry - movq %r11, (%rdi) - movq %rdx, 8(%rdi) - movq %r10, 16(%rdi) - movq %rax, 24(%rdi) - movq %rbx, 32(%rdi) - movq %r9, 40(%rdi) - movq %r8, 48(%rdi) -LBB104_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF7L - .p2align 4, 0x90 -_mcl_fp_addNF7L: ## @mcl_fp_addNF7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r9 - movq 40(%rdx), %rbp - movq 32(%rdx), %r10 - movq 24(%rdx), %r11 - movq 16(%rdx), %r14 - movq (%rdx), %r12 - movq 8(%rdx), %r15 - addq (%rsi), %r12 - adcq 8(%rsi), %r15 - adcq 16(%rsi), %r14 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %r10 - adcq 40(%rsi), %rbp - movq %rbp, -8(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r9 - movq %r12, %rsi - subq (%rcx), %rsi - movq %r15, %rdx - sbbq 8(%rcx), %rdx - movq %r14, %rax - sbbq 16(%rcx), %rax - movq %r11, %rbx - sbbq 24(%rcx), %rbx - movq %r10, %r13 - sbbq 32(%rcx), %r13 - sbbq 40(%rcx), %rbp - movq %r9, %r8 - sbbq 48(%rcx), %r8 - movq %r8, %rcx - sarq $63, %rcx - cmovsq %r12, %rsi - movq %rsi, (%rdi) - cmovsq %r15, %rdx - movq %rdx, 8(%rdi) - cmovsq %r14, %rax - movq %rax, 16(%rdi) - cmovsq %r11, %rbx - movq %rbx, 24(%rdi) - cmovsq %r10, %r13 - movq %r13, 32(%rdi) - cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 40(%rdi) - cmovsq %r9, %r8 - movq %r8, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub7L - .p2align 4, 0x90 -_mcl_fp_sub7L: ## @mcl_fp_sub7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 48(%rdx), %r14 - movq 48(%rsi), %r8 - movq 40(%rdx), %r15 - movq 40(%rsi), %r9 - movq 32(%rdx), %r12 - movq (%rsi), %rax - movq 8(%rsi), %r11 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r11 - movq 16(%rsi), %r13 - sbbq 16(%rdx), %r13 - movq 32(%rsi), %r10 - movq 24(%rsi), %rsi - sbbq 24(%rdx), %rsi - movq %rax, (%rdi) - movq %r11, 8(%rdi) - movq %r13, 16(%rdi) - movq %rsi, 24(%rdi) - sbbq %r12, %r10 - movq %r10, 32(%rdi) - sbbq %r15, %r9 - movq %r9, 40(%rdi) - sbbq %r14, %r8 - movq %r8, 48(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB106_2 -## BB#1: ## %carry - movq 48(%rcx), %r14 - movq 40(%rcx), %r15 - movq 32(%rcx), %r12 - movq 24(%rcx), %rbx - movq 8(%rcx), %rdx - movq 16(%rcx), %rbp - addq (%rcx), %rax - movq %rax, (%rdi) - adcq %r11, %rdx - movq %rdx, 8(%rdi) - adcq %r13, %rbp - movq %rbp, 16(%rdi) - adcq %rsi, %rbx - movq %rbx, 24(%rdi) - adcq %r10, %r12 - movq %r12, 32(%rdi) - adcq %r9, %r15 - movq %r15, 40(%rdi) - adcq %r8, %r14 - movq %r14, 48(%rdi) -LBB106_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_subNF7L - .p2align 4, 0x90 -_mcl_fp_subNF7L: ## @mcl_fp_subNF7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 48(%rsi), %r11 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %r14 - movdqu (%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1] - movd %xmm6, %rcx - movd %xmm2, %r15 - movd %xmm5, %r9 - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r12 - pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1] - movd %xmm2, %r10 - movd %xmm1, %r13 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %rax - pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1] - movd %xmm0, %rbx - movd %xmm3, %rsi - subq %rbx, %rsi - movd %xmm1, %rbx - sbbq %rax, %rbx - movd %xmm4, %rbp - sbbq %r13, %rbp - sbbq %r12, %r10 - sbbq %r15, %r9 - sbbq %r14, %rcx - movq %rcx, -8(%rsp) ## 8-byte Spill - sbbq 48(%rdx), %r11 - movq %r11, %rax - sarq $63, %rax - movq %rax, %rdx - shldq $1, %r11, %rdx - andq (%r8), %rdx - movq 48(%r8), %r14 - andq %rax, %r14 - movq 40(%r8), %r15 - andq %rax, %r15 - movq 32(%r8), %r12 - andq %rax, %r12 - movq 24(%r8), %r13 - andq %rax, %r13 - movq 16(%r8), %rcx - andq %rax, %rcx - andq 8(%r8), %rax - addq %rsi, %rdx - adcq %rbx, %rax - movq %rdx, (%rdi) - movq %rax, 8(%rdi) - adcq %rbp, %rcx - movq %rcx, 16(%rdi) - adcq %r10, %r13 - movq %r13, 24(%rdi) - adcq %r9, %r12 - movq %r12, 32(%rdi) - adcq -8(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 40(%rdi) - adcq %r11, %r14 - movq %r14, 48(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add7L - .p2align 4, 0x90 -_mcl_fpDbl_add7L: ## @mcl_fpDbl_add7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 96(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 88(%rdx), %r11 - movq 80(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r12 - movq 16(%rdx), %r9 - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %r9 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r12 - movq 72(%rdx), %r13 - movq 64(%rdx), %rbp - movq %rax, (%rdi) - movq 56(%rdx), %r10 - movq %rbx, 8(%rdi) - movq 48(%rdx), %rcx - movq 40(%rdx), %rdx - movq %r9, 16(%rdi) - movq 104(%rsi), %r9 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %rdx, %rbx - movq 96(%rsi), %r15 - movq %r12, 32(%rdi) - movq 48(%rsi), %rdx - adcq %rcx, %rdx - movq 88(%rsi), %rax - movq %rbx, 40(%rdi) - movq 56(%rsi), %rcx - adcq %r10, %rcx - movq 80(%rsi), %r12 - movq %rdx, 48(%rdi) - movq 72(%rsi), %rdx - movq 64(%rsi), %rsi - adcq %rbp, %rsi - adcq %r13, %rdx - adcq %r14, %r12 - adcq %r11, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, -24(%rsp) ## 8-byte Spill - adcq -8(%rsp), %r9 ## 8-byte Folded Reload - sbbq %rbp, %rbp - andl $1, %ebp - movq %rcx, %rbx - subq (%r8), %rbx - movq %rsi, %r10 - sbbq 8(%r8), %r10 - movq %rdx, %r11 - sbbq 16(%r8), %r11 - movq %r12, %r14 - sbbq 24(%r8), %r14 - movq -16(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r8), %r13 - sbbq 40(%r8), %r15 - movq %r9, %rax - sbbq 48(%r8), %rax - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rcx, %rbx - movq %rbx, 56(%rdi) - testb %bpl, %bpl - cmovneq %rsi, %r10 - movq %r10, 64(%rdi) - cmovneq %rdx, %r11 - movq %r11, 72(%rdi) - cmovneq %r12, %r14 - movq %r14, 80(%rdi) - cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 88(%rdi) - cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload - movq %r15, 96(%rdi) - cmovneq %r9, %rax - movq %rax, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub7L - .p2align 4, 0x90 -_mcl_fpDbl_sub7L: ## @mcl_fpDbl_sub7L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 104(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 96(%rdx), %r10 - movq 88(%rdx), %r14 - movq 16(%rsi), %rax - movq (%rsi), %r15 - movq 8(%rsi), %r11 - xorl %ecx, %ecx - subq (%rdx), %r15 - sbbq 8(%rdx), %r11 - sbbq 16(%rdx), %rax - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 80(%rdx), %r13 - movq 72(%rdx), %rbp - movq %r15, (%rdi) - movq 64(%rdx), %r9 - movq %r11, 8(%rdi) - movq 56(%rdx), %r15 - movq %rax, 16(%rdi) - movq 48(%rdx), %r11 - movq 40(%rdx), %rdx - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %rdx, %rbx - movq 104(%rsi), %rax - movq %r12, 32(%rdi) - movq 48(%rsi), %r12 - sbbq %r11, %r12 - movq 96(%rsi), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r15, %rdx - movq 88(%rsi), %r15 - movq %r12, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r9, %rbx - movq 80(%rsi), %r12 - movq 72(%rsi), %r9 - sbbq %rbp, %r9 - sbbq %r13, %r12 - sbbq %r14, %r15 - sbbq %r10, %r11 - sbbq -8(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -8(%rsp) ## 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r8), %r10 - cmoveq %rcx, %r10 - testb %bpl, %bpl - movq 16(%r8), %rbp - cmoveq %rcx, %rbp - movq 8(%r8), %rsi - cmoveq %rcx, %rsi - movq 48(%r8), %r14 - cmoveq %rcx, %r14 - movq 40(%r8), %r13 - cmoveq %rcx, %r13 - movq 32(%r8), %rax - cmoveq %rcx, %rax - cmovneq 24(%r8), %rcx - addq %rdx, %r10 - adcq %rbx, %rsi - movq %r10, 56(%rdi) - movq %rsi, 64(%rdi) - adcq %r9, %rbp - movq %rbp, 72(%rdi) - adcq %r12, %rcx - movq %rcx, 80(%rdi) - adcq %r15, %rax - movq %rax, 88(%rdi) - adcq %r11, %r13 - movq %r13, 96(%rdi) - adcq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 104(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .p2align 4, 0x90 -l_mulPv512x64: ## @mulPv512x64 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rcx - movq %rcx, %rax - mulq (%rsi) - movq %rdx, -24(%rsp) ## 8-byte Spill - movq %rax, (%rdi) - movq %rcx, %rax - mulq 56(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 48(%rsi) - movq %rdx, %r11 - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rcx, %rax - mulq 40(%rsi) - movq %rdx, %r12 - movq %rax, %r15 - movq %rcx, %rax - mulq 32(%rsi) - movq %rdx, %rbx - movq %rax, %r13 - movq %rcx, %rax - mulq 24(%rsi) - movq %rdx, %rbp - movq %rax, %r8 - movq %rcx, %rax - mulq 16(%rsi) - movq %rdx, %r9 - movq %rax, %r14 - movq %rcx, %rax - mulq 8(%rsi) - addq -24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 8(%rdi) - adcq %r14, %rdx - movq %rdx, 16(%rdi) - adcq %r8, %r9 - movq %r9, 24(%rdi) - adcq %r13, %rbp - movq %rbp, 32(%rdi) - adcq %r15, %rbx - movq %rbx, 40(%rdi) - adcq -16(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 48(%rdi) - adcq -8(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 56(%rdi) - adcq $0, %r10 - movq %r10, 64(%rdi) - movq %rdi, %rax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre8L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre8L: ## @mcl_fp_mulUnitPre8L -## BB#0: - pushq %rbx - subq $80, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq l_mulPv512x64 - movq 72(%rsp), %r8 - movq 64(%rsp), %r9 - movq 56(%rsp), %r10 - movq 48(%rsp), %r11 - movq 40(%rsp), %rdi - movq 32(%rsp), %rax - movq 24(%rsp), %rcx - movq 8(%rsp), %rdx - movq 16(%rsp), %rsi - movq %rdx, (%rbx) - movq %rsi, 8(%rbx) - movq %rcx, 16(%rbx) - movq %rax, 24(%rbx) - movq %rdi, 32(%rbx) - movq %r11, 40(%rbx) - movq %r10, 48(%rbx) - movq %r9, 56(%rbx) - movq %r8, 64(%rbx) - addq $80, %rsp - popq %rbx - retq - - .globl _mcl_fpDbl_mulPre8L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre8L: ## @mcl_fpDbl_mulPre8L -## BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rdx, %r15 - movq %rsi, %rbx - movq %rdi, %r14 - callq _mcl_fpDbl_mulPre4L - leaq 64(%r14), %rdi - leaq 32(%rbx), %rsi - leaq 32(%r15), %rdx - callq _mcl_fpDbl_mulPre4L - movq 56(%rbx), %r10 - movq 48(%rbx), %rdx - movq (%rbx), %rsi - movq 8(%rbx), %rdi - addq 32(%rbx), %rsi - adcq 40(%rbx), %rdi - adcq 16(%rbx), %rdx - adcq 24(%rbx), %r10 - pushfq - popq %r8 - xorl %r9d, %r9d - movq 56(%r15), %rcx - movq 48(%r15), %r13 - movq (%r15), %r12 - movq 8(%r15), %rbx - addq 32(%r15), %r12 - adcq 40(%r15), %rbx - adcq 16(%r15), %r13 - adcq 24(%r15), %rcx - movl $0, %eax - cmovbq %r10, %rax - movq %rax, -88(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rdx, %rax - movq %rax, -80(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rdi, %rax - movq %rax, -72(%rbp) ## 8-byte Spill - movl $0, %eax - cmovbq %rsi, %rax - movq %rax, -64(%rbp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rsi, -168(%rbp) - movq %rdi, -160(%rbp) - movq %rdx, -152(%rbp) - movq %r10, -144(%rbp) - movq %r12, -136(%rbp) - movq %rbx, -128(%rbp) - movq %r13, -120(%rbp) - movq %rcx, -112(%rbp) - pushq %r8 - popfq - cmovaeq %r9, %rcx - movq %rcx, -48(%rbp) ## 8-byte Spill - cmovaeq %r9, %r13 - cmovaeq %r9, %rbx - cmovaeq %r9, %r12 - sbbq %rax, %rax - movq %rax, -56(%rbp) ## 8-byte Spill - leaq -232(%rbp), %rdi - leaq -168(%rbp), %rsi - leaq -136(%rbp), %rdx - callq _mcl_fpDbl_mulPre4L - addq -64(%rbp), %r12 ## 8-byte Folded Reload - adcq -72(%rbp), %rbx ## 8-byte Folded Reload - adcq -80(%rbp), %r13 ## 8-byte Folded Reload - movq -48(%rbp), %r10 ## 8-byte Reload - adcq -88(%rbp), %r10 ## 8-byte Folded Reload - sbbq %rax, %rax - andl $1, %eax - movq -56(%rbp), %rdx ## 8-byte Reload - andl %edx, %r15d - andl $1, %r15d - addq -200(%rbp), %r12 - adcq -192(%rbp), %rbx - adcq -184(%rbp), %r13 - adcq -176(%rbp), %r10 - adcq %rax, %r15 - movq -208(%rbp), %rax - movq -216(%rbp), %rcx - movq -232(%rbp), %rsi - movq -224(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %rdi - movq %rdi, -80(%rbp) ## 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -88(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - sbbq %r8, %rbx - movq 48(%r14), %rdi - movq %rdi, -72(%rbp) ## 8-byte Spill - sbbq %rdi, %r13 - movq 56(%r14), %rdi - movq %rdi, -64(%rbp) ## 8-byte Spill - sbbq %rdi, %r10 - sbbq $0, %r15 - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -56(%rbp) ## 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -48(%rbp) ## 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -104(%rbp) ## 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -96(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - movq 104(%r14), %rdi - sbbq %rdi, %rbx - movq 112(%r14), %r8 - sbbq %r8, %r13 - movq 120(%r14), %r9 - sbbq %r9, %r10 - sbbq $0, %r15 - addq -80(%rbp), %rsi ## 8-byte Folded Reload - adcq -88(%rbp), %rdx ## 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -72(%rbp), %rcx ## 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -64(%rbp), %rax ## 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r12 - movq %rax, 56(%r14) - movq %r12, 64(%r14) - adcq -56(%rbp), %rbx ## 8-byte Folded Reload - movq %rbx, 72(%r14) - adcq -48(%rbp), %r13 ## 8-byte Folded Reload - movq %r13, 80(%r14) - adcq -104(%rbp), %r10 ## 8-byte Folded Reload - movq %r10, 88(%r14) - adcq -96(%rbp), %r15 ## 8-byte Folded Reload - movq %r15, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre8L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre8L: ## @mcl_fpDbl_sqrPre8L -## BB#0: - pushq %rbp - movq %rsp, %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $200, %rsp - movq %rsi, %rbx - movq %rdi, %r14 - movq %rbx, %rdx - callq _mcl_fpDbl_mulPre4L - leaq 64(%r14), %rdi - leaq 32(%rbx), %rsi - movq %rsi, %rdx - callq _mcl_fpDbl_mulPre4L - movq 56(%rbx), %r15 - movq 48(%rbx), %rax - movq (%rbx), %rcx - movq 8(%rbx), %rdx - addq 32(%rbx), %rcx - adcq 40(%rbx), %rdx - adcq 16(%rbx), %rax - adcq 24(%rbx), %r15 - pushfq - popq %r8 - pushfq - popq %r9 - pushfq - popq %r10 - pushfq - popq %rdi - pushfq - popq %rbx - sbbq %rsi, %rsi - movq %rsi, -56(%rbp) ## 8-byte Spill - leaq (%rcx,%rcx), %rsi - xorl %r11d, %r11d - pushq %rbx - popfq - cmovaeq %r11, %rsi - movq %rsi, -48(%rbp) ## 8-byte Spill - movq %rdx, %r13 - shldq $1, %rcx, %r13 - pushq %rdi - popfq - cmovaeq %r11, %r13 - movq %rax, %r12 - shldq $1, %rdx, %r12 - pushq %r10 - popfq - cmovaeq %r11, %r12 - movq %r15, %rbx - movq %rcx, -168(%rbp) - movq %rdx, -160(%rbp) - movq %rax, -152(%rbp) - movq %r15, -144(%rbp) - movq %rcx, -136(%rbp) - movq %rdx, -128(%rbp) - movq %rax, -120(%rbp) - movq %r15, -112(%rbp) - shldq $1, %rax, %r15 - pushq %r9 - popfq - cmovaeq %r11, %r15 - shrq $63, %rbx - pushq %r8 - popfq - cmovaeq %r11, %rbx - leaq -232(%rbp), %rdi - leaq -168(%rbp), %rsi - leaq -136(%rbp), %rdx - callq _mcl_fpDbl_mulPre4L - movq -56(%rbp), %rax ## 8-byte Reload - andl $1, %eax - movq -48(%rbp), %r10 ## 8-byte Reload - addq -200(%rbp), %r10 - adcq -192(%rbp), %r13 - adcq -184(%rbp), %r12 - adcq -176(%rbp), %r15 - adcq %rbx, %rax - movq %rax, %rbx - movq -208(%rbp), %rax - movq -216(%rbp), %rcx - movq -232(%rbp), %rsi - movq -224(%rbp), %rdx - subq (%r14), %rsi - sbbq 8(%r14), %rdx - sbbq 16(%r14), %rcx - sbbq 24(%r14), %rax - movq 32(%r14), %r9 - movq %r9, -56(%rbp) ## 8-byte Spill - movq 40(%r14), %r8 - movq %r8, -48(%rbp) ## 8-byte Spill - sbbq %r9, %r10 - sbbq %r8, %r13 - movq 48(%r14), %rdi - movq %rdi, -104(%rbp) ## 8-byte Spill - sbbq %rdi, %r12 - movq 56(%r14), %rdi - movq %rdi, -96(%rbp) ## 8-byte Spill - sbbq %rdi, %r15 - sbbq $0, %rbx - movq 64(%r14), %r11 - subq %r11, %rsi - movq 72(%r14), %rdi - movq %rdi, -88(%rbp) ## 8-byte Spill - sbbq %rdi, %rdx - movq 80(%r14), %rdi - movq %rdi, -80(%rbp) ## 8-byte Spill - sbbq %rdi, %rcx - movq 88(%r14), %rdi - movq %rdi, -72(%rbp) ## 8-byte Spill - sbbq %rdi, %rax - movq 96(%r14), %rdi - movq %rdi, -64(%rbp) ## 8-byte Spill - sbbq %rdi, %r10 - movq 104(%r14), %rdi - sbbq %rdi, %r13 - movq 112(%r14), %r8 - sbbq %r8, %r12 - movq 120(%r14), %r9 - sbbq %r9, %r15 - sbbq $0, %rbx - addq -56(%rbp), %rsi ## 8-byte Folded Reload - adcq -48(%rbp), %rdx ## 8-byte Folded Reload - movq %rsi, 32(%r14) - adcq -104(%rbp), %rcx ## 8-byte Folded Reload - movq %rdx, 40(%r14) - adcq -96(%rbp), %rax ## 8-byte Folded Reload - movq %rcx, 48(%r14) - adcq %r11, %r10 - movq %rax, 56(%r14) - movq %r10, 64(%r14) - adcq -88(%rbp), %r13 ## 8-byte Folded Reload - movq %r13, 72(%r14) - adcq -80(%rbp), %r12 ## 8-byte Folded Reload - movq %r12, 80(%r14) - adcq -72(%rbp), %r15 ## 8-byte Folded Reload - movq %r15, 88(%r14) - movq %rbx, %rax - adcq -64(%rbp), %rax ## 8-byte Folded Reload - movq %rax, 96(%r14) - adcq $0, %rdi - movq %rdi, 104(%r14) - adcq $0, %r8 - movq %r8, 112(%r14) - adcq $0, %r9 - movq %r9, 120(%r14) - addq $200, %rsp - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont8L - .p2align 4, 0x90 -_mcl_fp_mont8L: ## @mcl_fp_mont8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1256, %rsp ## imm = 0x4E8 - movq %rcx, %r13 - movq %rdx, 64(%rsp) ## 8-byte Spill - movq %rsi, 72(%rsp) ## 8-byte Spill - movq %rdi, 96(%rsp) ## 8-byte Spill - movq -8(%r13), %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - movq %r13, 56(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1184(%rsp), %rdi - callq l_mulPv512x64 - movq 1184(%rsp), %r15 - movq 1192(%rsp), %r14 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1248(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1240(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 1232(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 1216(%rsp), %r12 - movq 1208(%rsp), %rbx - movq 1200(%rsp), %rbp - leaq 1112(%rsp), %rdi - movq %r13, %rsi - callq l_mulPv512x64 - addq 1112(%rsp), %r15 - adcq 1120(%rsp), %r14 - adcq 1128(%rsp), %rbp - movq %rbp, 88(%rsp) ## 8-byte Spill - adcq 1136(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 1144(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 1152(%rsp), %r13 - movq (%rsp), %rbx ## 8-byte Reload - adcq 1160(%rsp), %rbx - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 1168(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1176(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1040(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 1040(%rsp), %r14 - movq 88(%rsp), %rax ## 8-byte Reload - adcq 1048(%rsp), %rax - movq %rax, 88(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 1056(%rsp), %rax - movq %rax, %r12 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 1064(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 1072(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 1080(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - adcq 1088(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1096(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 1104(%rsp), %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %r14, %rdx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 968(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 968(%rsp), %r14 - movq 88(%rsp), %r13 ## 8-byte Reload - adcq 976(%rsp), %r13 - adcq 984(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 992(%rsp), %r14 - movq 16(%rsp), %rbx ## 8-byte Reload - adcq 1000(%rsp), %rbx - movq (%rsp), %rax ## 8-byte Reload - adcq 1008(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 1016(%rsp), %rbp - movq %rbp, %r12 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 1024(%rsp), %rbp - movq 48(%rsp), %rax ## 8-byte Reload - adcq 1032(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 896(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rcx - addq 896(%rsp), %rcx - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 904(%rsp), %r13 - adcq 912(%rsp), %r14 - adcq 920(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 936(%rsp), %r12 - movq %r12, 40(%rsp) ## 8-byte Spill - adcq 944(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 952(%rsp), %r12 - adcq 960(%rsp), %r15 - sbbq %rbx, %rbx - movq %rcx, %rdx - movq %rcx, %rbp - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 824(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - addq 824(%rsp), %rbp - adcq 832(%rsp), %r13 - movq %r13, 32(%rsp) ## 8-byte Spill - adcq 840(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 848(%rsp), %r13 - movq (%rsp), %rbp ## 8-byte Reload - adcq 856(%rsp), %rbp - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 864(%rsp), %r14 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 872(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 880(%rsp), %r12 - adcq 888(%rsp), %r15 - adcq $0, %rbx - movq 64(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 752(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 32(%rsp), %rax ## 8-byte Reload - addq 752(%rsp), %rax - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 760(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 776(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 784(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 792(%rsp), %rbp - adcq 800(%rsp), %r12 - adcq 808(%rsp), %r15 - adcq 816(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 680(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 680(%rsp), %rbx - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 688(%rsp), %r14 - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 704(%rsp), %r13 - movq 40(%rsp), %rbx ## 8-byte Reload - adcq 712(%rsp), %rbx - adcq 720(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq %r12, %rbp - adcq 728(%rsp), %rbp - adcq 736(%rsp), %r15 - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 744(%rsp), %r12 - adcq $0, %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 608(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r14, %rax - addq 608(%rsp), %rax - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 616(%rsp), %r14 - adcq 624(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - adcq 632(%rsp), %rbx - movq %rbx, %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 640(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 648(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 656(%rsp), %r15 - adcq 664(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 672(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - sbbq %rbp, %rbp - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %rbp, %rax - andl $1, %eax - addq 536(%rsp), %rbx - adcq 544(%rsp), %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rbx ## 8-byte Reload - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r13 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 568(%rsp), %rbp - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 576(%rsp), %r12 - adcq 584(%rsp), %r15 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 592(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 600(%rsp), %r14 - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 464(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 16(%rsp), %rax ## 8-byte Reload - addq 464(%rsp), %rax - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - adcq 488(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 496(%rsp), %r12 - adcq 504(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - movq 32(%rsp), %r15 ## 8-byte Reload - adcq 512(%rsp), %r15 - adcq 520(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 528(%rsp), %r14 - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %rbp - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 392(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq %r13, %rax - andl $1, %eax - addq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 408(%rsp), %rbp - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 416(%rsp), %rbx - adcq 424(%rsp), %r12 - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 432(%rsp), %r13 - adcq 440(%rsp), %r15 - movq %r15, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 448(%rsp), %r15 - adcq 456(%rsp), %r14 - adcq $0, %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 320(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq (%rsp), %rax ## 8-byte Reload - addq 320(%rsp), %rax - adcq 328(%rsp), %rbp - movq %rbp, 40(%rsp) ## 8-byte Spill - adcq 336(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq %r12, %rbp - adcq 344(%rsp), %rbp - adcq 352(%rsp), %r13 - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 360(%rsp), %r12 - adcq 368(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq 376(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 248(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %r15d - addq 248(%rsp), %rbx - movq 40(%rsp), %rax ## 8-byte Reload - adcq 256(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r14 ## 8-byte Reload - adcq 264(%rsp), %r14 - adcq 272(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - movq %r13, %rbx - adcq 280(%rsp), %rbx - movq %r12, %rbp - adcq 288(%rsp), %rbp - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 296(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 312(%rsp), %r12 - adcq $0, %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 176(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 40(%rsp), %rax ## 8-byte Reload - addq 176(%rsp), %rax - adcq 184(%rsp), %r14 - movq %r14, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 192(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq 200(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - adcq 208(%rsp), %rbp - adcq 216(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 224(%rsp), %r14 - adcq 232(%rsp), %r12 - adcq 240(%rsp), %r15 - sbbq %rbx, %rbx - movq 80(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %r13 - leaq 104(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - addq 104(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 112(%rsp), %rcx - movq 48(%rsp), %rdx ## 8-byte Reload - adcq 120(%rsp), %rdx - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 128(%rsp), %rsi - movq %rbp, %rdi - adcq 136(%rsp), %rdi - movq %rdi, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r8 ## 8-byte Reload - adcq 144(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq %r14, %r9 - adcq 152(%rsp), %r9 - movq %r9, (%rsp) ## 8-byte Spill - adcq 160(%rsp), %r12 - adcq 168(%rsp), %r15 - adcq $0, %rbx - movq %rcx, %rax - movq %rcx, %r11 - movq 56(%rsp), %rbp ## 8-byte Reload - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r14 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - movq %rsi, %r13 - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %r8, %rdi - sbbq 32(%rbp), %rdi - movq %r9, %r10 - sbbq 40(%rbp), %r10 - movq %r12, %r8 - sbbq 48(%rbp), %r8 - movq %r15, %r9 - sbbq 56(%rbp), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r15, %r9 - testb %bl, %bl - cmovneq %r11, %rax - movq 96(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovneq %r14, %rcx - movq %rcx, 8(%rbx) - cmovneq %r13, %rdx - movq %rdx, 16(%rbx) - cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovneq (%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovneq %r12, %r8 - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $1256, %rsp ## imm = 0x4E8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF8L - .p2align 4, 0x90 -_mcl_fp_montNF8L: ## @mcl_fp_montNF8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1240, %rsp ## imm = 0x4D8 - movq %rcx, 40(%rsp) ## 8-byte Spill - movq %rdx, 48(%rsp) ## 8-byte Spill - movq %rsi, 56(%rsp) ## 8-byte Spill - movq %rdi, 80(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1168(%rsp), %rdi - callq l_mulPv512x64 - movq 1168(%rsp), %r15 - movq 1176(%rsp), %r12 - movq %r15, %rdx - imulq %rbx, %rdx - movq 1232(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 1224(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1216(%rsp), %r13 - movq 1208(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 1200(%rsp), %r14 - movq 1192(%rsp), %rbp - movq 1184(%rsp), %rbx - leaq 1096(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 1096(%rsp), %r15 - adcq 1104(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 1112(%rsp), %rbx - adcq 1120(%rsp), %rbp - adcq 1128(%rsp), %r14 - movq %r14, %r12 - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 1136(%rsp), %r14 - adcq 1144(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 1152(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 1160(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1024(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 1088(%rsp), %r15 - movq 16(%rsp), %rax ## 8-byte Reload - addq 1024(%rsp), %rax - adcq 1032(%rsp), %rbx - movq %rbx, 72(%rsp) ## 8-byte Spill - movq %rbp, %rbx - adcq 1040(%rsp), %rbx - adcq 1048(%rsp), %r12 - adcq 1056(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - movq %r13, %rbp - adcq 1064(%rsp), %rbp - movq (%rsp), %rcx ## 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 1080(%rsp), %r14 - adcq $0, %r15 - movq %rax, %rdx - movq %rax, %r13 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 952(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 952(%rsp), %r13 - movq 72(%rsp), %rax ## 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 968(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq %r12, %rbx - adcq 976(%rsp), %rbx - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 984(%rsp), %r12 - adcq 992(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 1000(%rsp), %r13 - movq %r14, %rbp - adcq 1008(%rsp), %rbp - adcq 1016(%rsp), %r15 - movq 48(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 880(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 944(%rsp), %r14 - movq 72(%rsp), %rax ## 8-byte Reload - addq 880(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 888(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 896(%rsp), %rbx - adcq 904(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 920(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - adcq 928(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 936(%rsp), %r15 - adcq $0, %r14 - movq %rax, %rdx - movq %rax, %rbp - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 808(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 808(%rsp), %rbp - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 816(%rsp), %r13 - movq %rbx, %r12 - adcq 824(%rsp), %r12 - movq 8(%rsp), %rbx ## 8-byte Reload - adcq 832(%rsp), %rbx - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 840(%rsp), %rbp - movq (%rsp), %rax ## 8-byte Reload - adcq 848(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 856(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - adcq 864(%rsp), %r15 - adcq 872(%rsp), %r14 - movq 48(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 736(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 800(%rsp), %rax - movq %r13, %rcx - addq 736(%rsp), %rcx - adcq 744(%rsp), %r12 - movq %r12, 24(%rsp) ## 8-byte Spill - adcq 752(%rsp), %rbx - movq %rbx, 8(%rsp) ## 8-byte Spill - adcq 760(%rsp), %rbp - movq %rbp, %r13 - movq (%rsp), %rbp ## 8-byte Reload - adcq 768(%rsp), %rbp - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r14 - adcq $0, %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq %rcx, %rdx - movq %rcx, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 664(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 664(%rsp), %r12 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 672(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %rax ## 8-byte Reload - adcq 680(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 688(%rsp), %r13 - adcq 696(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 704(%rsp), %rbx - adcq 712(%rsp), %r15 - adcq 720(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 728(%rsp), %r12 - movq 48(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 592(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 656(%rsp), %rcx - movq 24(%rsp), %rax ## 8-byte Reload - addq 592(%rsp), %rax - movq 8(%rsp), %rbp ## 8-byte Reload - adcq 600(%rsp), %rbp - adcq 608(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 616(%rsp), %r13 - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - adcq 640(%rsp), %r14 - adcq 648(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 520(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 520(%rsp), %r12 - adcq 528(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq 24(%rsp), %r12 ## 8-byte Reload - adcq 536(%rsp), %r12 - movq %r13, %rbp - adcq 544(%rsp), %rbp - adcq 552(%rsp), %rbx - adcq 560(%rsp), %r15 - adcq 568(%rsp), %r14 - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 576(%rsp), %r13 - movq (%rsp), %rax ## 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 448(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 512(%rsp), %rcx - movq 8(%rsp), %rax ## 8-byte Reload - addq 448(%rsp), %rax - adcq 456(%rsp), %r12 - movq %r12, 24(%rsp) ## 8-byte Spill - adcq 464(%rsp), %rbp - adcq 472(%rsp), %rbx - adcq 480(%rsp), %r15 - adcq 488(%rsp), %r14 - adcq 496(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r13 ## 8-byte Reload - adcq 504(%rsp), %r13 - adcq $0, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 376(%rsp), %r12 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 384(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 392(%rsp), %rbp - adcq 400(%rsp), %rbx - adcq 408(%rsp), %r15 - adcq 416(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 424(%rsp), %r12 - adcq 432(%rsp), %r13 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 440(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 304(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 368(%rsp), %rcx - movq 24(%rsp), %rax ## 8-byte Reload - addq 304(%rsp), %rax - adcq 312(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 320(%rsp), %rbx - adcq 328(%rsp), %r15 - adcq 336(%rsp), %r14 - adcq 344(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 352(%rsp), %r13 - movq 8(%rsp), %rbp ## 8-byte Reload - adcq 360(%rsp), %rbp - adcq $0, %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 64(%rsp), %rdx ## 8-byte Folded Reload - leaq 232(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 232(%rsp), %r12 - movq (%rsp), %rax ## 8-byte Reload - adcq 240(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 248(%rsp), %rbx - adcq 256(%rsp), %r15 - adcq 264(%rsp), %r14 - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 272(%rsp), %r12 - adcq 280(%rsp), %r13 - adcq 288(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %rbp ## 8-byte Reload - adcq 296(%rsp), %rbp - movq 48(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 160(%rsp), %rdi - movq 56(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - movq 224(%rsp), %rcx - movq (%rsp), %rax ## 8-byte Reload - addq 160(%rsp), %rax - adcq 168(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 200(%rsp), %r13 - movq 8(%rsp), %rbx ## 8-byte Reload - adcq 208(%rsp), %rbx - adcq 216(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 64(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbp - leaq 88(%rsp), %rdi - movq 40(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 88(%rsp), %rbp - movq 32(%rsp), %r11 ## 8-byte Reload - adcq 96(%rsp), %r11 - adcq 104(%rsp), %r15 - adcq 112(%rsp), %r14 - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 120(%rsp), %rsi - movq %rsi, 16(%rsp) ## 8-byte Spill - adcq 128(%rsp), %r13 - adcq 136(%rsp), %rbx - movq %rbx, 8(%rsp) ## 8-byte Spill - adcq 144(%rsp), %r12 - movq (%rsp), %r8 ## 8-byte Reload - adcq 152(%rsp), %r8 - movq %r11, %rax - movq 40(%rsp), %rbp ## 8-byte Reload - subq (%rbp), %rax - movq %r15, %rcx - sbbq 8(%rbp), %rcx - movq %r14, %rdx - sbbq 16(%rbp), %rdx - sbbq 24(%rbp), %rsi - movq %r13, %rdi - sbbq 32(%rbp), %rdi - movq %rbx, %r9 - sbbq 40(%rbp), %r9 - movq %r12, %r10 - sbbq 48(%rbp), %r10 - movq %rbp, %rbx - movq %r8, %rbp - sbbq 56(%rbx), %rbp - testq %rbp, %rbp - cmovsq %r11, %rax - movq 80(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovsq %r15, %rcx - movq %rcx, 8(%rbx) - cmovsq %r14, %rdx - movq %rdx, 16(%rbx) - cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq %r13, %rdi - movq %rdi, 32(%rbx) - cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 40(%rbx) - cmovsq %r12, %r10 - movq %r10, 48(%rbx) - cmovsq %r8, %rbp - movq %rbp, 56(%rbx) - addq $1240, %rsp ## imm = 0x4D8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed8L - .p2align 4, 0x90 -_mcl_fp_montRed8L: ## @mcl_fp_montRed8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $776, %rsp ## imm = 0x308 - movq %rdx, %rax - movq %rdi, 192(%rsp) ## 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 104(%rsp) ## 8-byte Spill - movq (%rsi), %r15 - movq 8(%rsi), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq %r15, %rdx - imulq %rcx, %rdx - movq 120(%rsi), %rcx - movq %rcx, 112(%rsp) ## 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 96(%rsp) ## 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 72(%rsi), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 64(%rsi), %r13 - movq 56(%rsi), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 48(%rsi), %r14 - movq 40(%rsi), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - movq 32(%rsi), %r12 - movq 24(%rsi), %rbx - movq 16(%rsi), %rbp - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 136(%rsp) ## 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 184(%rsp) ## 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 176(%rsp) ## 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 168(%rsp) ## 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 160(%rsp) ## 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 152(%rsp) ## 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 144(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 128(%rsp) ## 8-byte Spill - movq %rcx, %rsi - movq %rsi, 88(%rsp) ## 8-byte Spill - leaq 704(%rsp), %rdi - callq l_mulPv512x64 - addq 704(%rsp), %r15 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 712(%rsp), %rcx - adcq 720(%rsp), %rbp - movq %rbp, 80(%rsp) ## 8-byte Spill - adcq 728(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rax ## 8-byte Reload - adcq 744(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 752(%rsp), %r14 - movq %r14, %r12 - movq 64(%rsp), %rax ## 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - movq 40(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 24(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 96(%rsp) ## 8-byte Folded Spill - movq 56(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - movq 112(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - sbbq %rbx, %rbx - movq %rcx, %rbp - movq %rbp, %rdx - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 632(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - andl $1, %ebx - movq %rbx, %rax - addq 632(%rsp), %rbp - movq 80(%rsp), %rsi ## 8-byte Reload - adcq 640(%rsp), %rsi - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 648(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 120(%rsp), %rcx ## 8-byte Reload - adcq 656(%rsp), %rcx - movq %rcx, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rcx ## 8-byte Reload - adcq 664(%rsp), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - adcq 672(%rsp), %r12 - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 696(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - movq 48(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 96(%rsp) ## 8-byte Folded Spill - adcq $0, %r13 - movq %r13, 56(%rsp) ## 8-byte Spill - adcq $0, %r14 - movq %r14, 112(%rsp) ## 8-byte Spill - movq %rax, %rbp - adcq $0, %rbp - movq %rsi, %rdx - movq %rsi, %r14 - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 560(%rsp), %rdi - movq 88(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv512x64 - addq 560(%rsp), %r14 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 568(%rsp), %rcx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 576(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 72(%rsp), %rax ## 8-byte Reload - adcq 584(%rsp), %rax - movq %rax, 72(%rsp) ## 8-byte Spill - adcq 592(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 600(%rsp), %r14 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rax ## 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 624(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq $0, %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq $0, %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - movq 96(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - movq 56(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 112(%rsp) ## 8-byte Folded Spill - adcq $0, %rbp - movq %rbp, 80(%rsp) ## 8-byte Spill - movq %rcx, %rbp - movq %rbp, %rdx - movq 104(%rsp), %r12 ## 8-byte Reload - imulq %r12, %rdx - leaq 488(%rsp), %rdi - movq %r13, %rsi - callq l_mulPv512x64 - addq 488(%rsp), %rbp - movq 120(%rsp), %rax ## 8-byte Reload - adcq 496(%rsp), %rax - movq 72(%rsp), %rbp ## 8-byte Reload - adcq 504(%rsp), %rbp - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 512(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 520(%rsp), %r14 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 528(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 536(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %r13 ## 8-byte Reload - adcq 544(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 96(%rsp) ## 8-byte Spill - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 112(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq %r12, %rdx - leaq 416(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 416(%rsp), %r15 - adcq 424(%rsp), %rbp - movq %rbp, %rax - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq %r14, %r12 - adcq 440(%rsp), %r12 - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 448(%rsp), %r14 - movq 16(%rsp), %rbp ## 8-byte Reload - adcq 456(%rsp), %rbp - adcq 464(%rsp), %r13 - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq $0, 96(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq 112(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rbx - movq %rbx, %rdx - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 344(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 344(%rsp), %rbx - movq 32(%rsp), %rax ## 8-byte Reload - adcq 352(%rsp), %rax - adcq 360(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - adcq 368(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - adcq 376(%rsp), %rbp - movq %rbp, 16(%rsp) ## 8-byte Spill - adcq 384(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 392(%rsp), %r13 - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 400(%rsp), %r12 - movq 96(%rsp), %r14 ## 8-byte Reload - adcq 408(%rsp), %r14 - movq 56(%rsp), %rbp ## 8-byte Reload - adcq $0, %rbp - movq %r15, %rbx - adcq $0, %rbx - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq %rax, %rdx - movq %rax, %r15 - imulq 104(%rsp), %rdx ## 8-byte Folded Reload - leaq 272(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 272(%rsp), %r15 - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 280(%rsp), %rcx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rax ## 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 312(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 320(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - adcq 328(%rsp), %r14 - movq %r14, %r13 - adcq 336(%rsp), %rbp - movq %rbp, %r12 - adcq $0, %rbx - movq %rbx, %r14 - movq 80(%rsp), %r15 ## 8-byte Reload - adcq $0, %r15 - movq 104(%rsp), %rdx ## 8-byte Reload - movq %rcx, %rbx - imulq %rbx, %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv512x64 - addq 200(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 208(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r8 ## 8-byte Reload - adcq 216(%rsp), %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 40(%rsp), %rdx ## 8-byte Reload - adcq 224(%rsp), %rdx - movq 24(%rsp), %rsi ## 8-byte Reload - adcq 232(%rsp), %rsi - movq 48(%rsp), %rdi ## 8-byte Reload - adcq 240(%rsp), %rdi - movq %r13, %rbp - adcq 248(%rsp), %rbp - movq %r12, %rbx - adcq 256(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq %r14, %r9 - adcq 264(%rsp), %r9 - adcq $0, %r15 - movq %r15, %r10 - subq 136(%rsp), %rax ## 8-byte Folded Reload - movq %r8, %rcx - sbbq 128(%rsp), %rcx ## 8-byte Folded Reload - movq %rdx, %r13 - sbbq 144(%rsp), %r13 ## 8-byte Folded Reload - movq %rsi, %r12 - sbbq 152(%rsp), %r12 ## 8-byte Folded Reload - movq %rdi, %r14 - sbbq 160(%rsp), %r14 ## 8-byte Folded Reload - movq %rbp, %r11 - sbbq 168(%rsp), %r11 ## 8-byte Folded Reload - movq %rbx, %r8 - sbbq 176(%rsp), %r8 ## 8-byte Folded Reload - movq %r9, %r15 - sbbq 184(%rsp), %r9 ## 8-byte Folded Reload - sbbq $0, %r10 - andl $1, %r10d - cmovneq %r15, %r9 - testb %r10b, %r10b - cmovneq 8(%rsp), %rax ## 8-byte Folded Reload - movq 192(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 8(%rbx) - cmovneq %rdx, %r13 - movq %r13, 16(%rbx) - cmovneq %rsi, %r12 - movq %r12, 24(%rbx) - cmovneq %rdi, %r14 - movq %r14, 32(%rbx) - cmovneq %rbp, %r11 - movq %r11, 40(%rbx) - cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 48(%rbx) - movq %r9, 56(%rbx) - addq $776, %rsp ## imm = 0x308 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre8L - .p2align 4, 0x90 -_mcl_fp_addPre8L: ## @mcl_fp_addPre8L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 48(%rsi), %r12 - movq 40(%rdx), %r10 - movq 32(%rdx), %r11 - movq 24(%rdx), %r14 - movq 16(%rdx), %rbx - movq (%rdx), %rcx - movq 8(%rdx), %rdx - addq (%rsi), %rcx - adcq 8(%rsi), %rdx - adcq 16(%rsi), %rbx - movq 40(%rsi), %r13 - movq 24(%rsi), %rax - movq 32(%rsi), %rsi - movq %rcx, (%rdi) - movq %rdx, 8(%rdi) - movq %rbx, 16(%rdi) - adcq %r14, %rax - movq %rax, 24(%rdi) - adcq %r11, %rsi - movq %rsi, 32(%rdi) - adcq %r10, %r13 - movq %r13, 40(%rdi) - adcq %r9, %r12 - movq %r12, 48(%rdi) - adcq %r8, %r15 - movq %r15, 56(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subPre8L - .p2align 4, 0x90 -_mcl_fp_subPre8L: ## @mcl_fp_subPre8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 56(%rsi), %r15 - movq 48(%rdx), %r9 - movq 40(%rdx), %r10 - movq 24(%rdx), %r11 - movq 32(%rdx), %r14 - movq (%rsi), %rbx - movq 8(%rsi), %r12 - xorl %eax, %eax - subq (%rdx), %rbx - sbbq 8(%rdx), %r12 - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq 48(%rsi), %r13 - movq 40(%rsi), %rdx - movq 32(%rsi), %rbp - movq 24(%rsi), %rsi - movq %rbx, (%rdi) - movq %r12, 8(%rdi) - movq %rcx, 16(%rdi) - sbbq %r11, %rsi - movq %rsi, 24(%rdi) - sbbq %r14, %rbp - movq %rbp, 32(%rdi) - sbbq %r10, %rdx - movq %rdx, 40(%rdi) - sbbq %r9, %r13 - movq %r13, 48(%rdi) - sbbq %r8, %r15 - movq %r15, 56(%rdi) - sbbq $0, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_shr1_8L - .p2align 4, 0x90 -_mcl_fp_shr1_8L: ## @mcl_fp_shr1_8L -## BB#0: - movq 56(%rsi), %r8 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 32(%rsi), %r11 - movq 24(%rsi), %rcx - movq 16(%rsi), %rdx - movq (%rsi), %rax - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rax - movq %rax, (%rdi) - shrdq $1, %rdx, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 16(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 24(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 32(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 40(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 48(%rdi) - shrq %r8 - movq %r8, 56(%rdi) - retq - - .globl _mcl_fp_add8L - .p2align 4, 0x90 -_mcl_fp_add8L: ## @mcl_fp_add8L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r15 - movq 56(%rsi), %r8 - movq 48(%rdx), %r12 - movq 48(%rsi), %r9 - movq 40(%rsi), %r13 - movq 24(%rsi), %r11 - movq 32(%rsi), %r10 - movq (%rdx), %r14 - movq 8(%rdx), %rbx - addq (%rsi), %r14 - adcq 8(%rsi), %rbx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r11 - movq 40(%rdx), %rsi - adcq 32(%rdx), %r10 - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - adcq %r13, %rsi - movq %rsi, 40(%rdi) - adcq %r12, %r9 - movq %r9, 48(%rdi) - adcq %r15, %r8 - movq %r8, 56(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %r14 - sbbq 8(%rcx), %rbx - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r11 - sbbq 32(%rcx), %r10 - sbbq 40(%rcx), %rsi - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne LBB120_2 -## BB#1: ## %nocarry - movq %r14, (%rdi) - movq %rbx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r11, 24(%rdi) - movq %r10, 32(%rdi) - movq %rsi, 40(%rdi) - movq %r9, 48(%rdi) - movq %r8, 56(%rdi) -LBB120_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF8L - .p2align 4, 0x90 -_mcl_fp_addNF8L: ## @mcl_fp_addNF8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r8 - movq 48(%rdx), %rbp - movq 40(%rdx), %rbx - movq 32(%rdx), %rax - movq 24(%rdx), %r11 - movq 16(%rdx), %r15 - movq (%rdx), %r13 - movq 8(%rdx), %r12 - addq (%rsi), %r13 - adcq 8(%rsi), %r12 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %r11 - adcq 32(%rsi), %rax - movq %rax, %r10 - movq %r10, -24(%rsp) ## 8-byte Spill - adcq 40(%rsi), %rbx - movq %rbx, %r9 - movq %r9, -16(%rsp) ## 8-byte Spill - adcq 48(%rsi), %rbp - movq %rbp, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq 56(%rsi), %r8 - movq %r13, %rsi - subq (%rcx), %rsi - movq %r12, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %rbx - sbbq 16(%rcx), %rbx - movq %r11, %r14 - sbbq 24(%rcx), %r14 - movq %r10, %rbp - sbbq 32(%rcx), %rbp - movq %r9, %r10 - sbbq 40(%rcx), %r10 - movq %rax, %r9 - sbbq 48(%rcx), %r9 - movq %r8, %rax - sbbq 56(%rcx), %rax - testq %rax, %rax - cmovsq %r13, %rsi - movq %rsi, (%rdi) - cmovsq %r12, %rdx - movq %rdx, 8(%rdi) - cmovsq %r15, %rbx - movq %rbx, 16(%rdi) - cmovsq %r11, %r14 - movq %r14, 24(%rdi) - cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rdi) - cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rdi) - cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 48(%rdi) - cmovsq %r8, %rax - movq %rax, 56(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub8L - .p2align 4, 0x90 -_mcl_fp_sub8L: ## @mcl_fp_sub8L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 56(%rdx), %r12 - movq 56(%rsi), %r8 - movq 48(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r10 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r10 - movq 16(%rsi), %r11 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %r15 - sbbq 24(%rdx), %r15 - movq 32(%rsi), %r14 - sbbq 32(%rdx), %r14 - movq 48(%rsi), %r9 - movq 40(%rsi), %rsi - sbbq 40(%rdx), %rsi - movq %rax, (%rdi) - movq %r10, 8(%rdi) - movq %r11, 16(%rdi) - movq %r15, 24(%rdi) - movq %r14, 32(%rdi) - movq %rsi, 40(%rdi) - sbbq %r13, %r9 - movq %r9, 48(%rdi) - sbbq %r12, %r8 - movq %r8, 56(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB122_2 -## BB#1: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r10, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r11, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r15, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r14, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %rsi, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r9, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %r8, %rax - movq %rax, 56(%rdi) -LBB122_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF8L - .p2align 4, 0x90 -_mcl_fp_subNF8L: ## @mcl_fp_subNF8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq %rdi, %r9 - movdqu (%rdx), %xmm0 - movdqu 16(%rdx), %xmm1 - movdqu 32(%rdx), %xmm2 - movdqu 48(%rdx), %xmm3 - pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1] - movd %xmm4, %r12 - movdqu (%rsi), %xmm4 - movdqu 16(%rsi), %xmm5 - movdqu 32(%rsi), %xmm8 - movdqu 48(%rsi), %xmm7 - pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1] - movd %xmm6, %rcx - movd %xmm3, %r13 - movd %xmm7, %rdi - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %rbp - pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1] - movd %xmm3, %rdx - movd %xmm2, %rsi - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %r11 - pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] - movd %xmm1, %r15 - pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1] - movd %xmm1, %rbx - pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1] - movd %xmm0, %rax - movd %xmm4, %r14 - subq %rax, %r14 - movd %xmm1, %r10 - sbbq %rbx, %r10 - movd %xmm5, %rbx - sbbq %r15, %rbx - movd %xmm2, %r15 - sbbq %r11, %r15 - movd %xmm8, %r11 - sbbq %rsi, %r11 - sbbq %rbp, %rdx - movq %rdx, -24(%rsp) ## 8-byte Spill - sbbq %r13, %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - sbbq %r12, %rcx - movq %rcx, -8(%rsp) ## 8-byte Spill - movq %rcx, %rbp - sarq $63, %rbp - movq 56(%r8), %r12 - andq %rbp, %r12 - movq 48(%r8), %r13 - andq %rbp, %r13 - movq 40(%r8), %rdi - andq %rbp, %rdi - movq 32(%r8), %rsi - andq %rbp, %rsi - movq 24(%r8), %rdx - andq %rbp, %rdx - movq 16(%r8), %rcx - andq %rbp, %rcx - movq 8(%r8), %rax - andq %rbp, %rax - andq (%r8), %rbp - addq %r14, %rbp - adcq %r10, %rax - movq %rbp, (%r9) - adcq %rbx, %rcx - movq %rax, 8(%r9) - movq %rcx, 16(%r9) - adcq %r15, %rdx - movq %rdx, 24(%r9) - adcq %r11, %rsi - movq %rsi, 32(%r9) - adcq -24(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%r9) - adcq -16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 48(%r9) - adcq -8(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 56(%r9) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add8L - .p2align 4, 0x90 -_mcl_fpDbl_add8L: ## @mcl_fpDbl_add8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r8 - movq 120(%rdx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 96(%rdx), %r14 - movq 24(%rsi), %r15 - movq 32(%rsi), %r11 - movq 16(%rdx), %r12 - movq (%rdx), %rbx - movq 8(%rdx), %rax - addq (%rsi), %rbx - adcq 8(%rsi), %rax - adcq 16(%rsi), %r12 - adcq 24(%rdx), %r15 - adcq 32(%rdx), %r11 - movq 88(%rdx), %rbp - movq 80(%rdx), %r13 - movq %rbx, (%rdi) - movq 72(%rdx), %r10 - movq %rax, 8(%rdi) - movq 64(%rdx), %r9 - movq %r12, 16(%rdi) - movq 40(%rdx), %r12 - movq %r15, 24(%rdi) - movq 40(%rsi), %rbx - adcq %r12, %rbx - movq 56(%rdx), %r15 - movq 48(%rdx), %r12 - movq %r11, 32(%rdi) - movq 48(%rsi), %rdx - adcq %r12, %rdx - movq 120(%rsi), %r12 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rax - adcq %r15, %rax - movq 112(%rsi), %rcx - movq %rdx, 48(%rdi) - movq 64(%rsi), %rbx - adcq %r9, %rbx - movq 104(%rsi), %rdx - movq %rax, 56(%rdi) - movq 72(%rsi), %r9 - adcq %r10, %r9 - movq 80(%rsi), %r11 - adcq %r13, %r11 - movq 96(%rsi), %rax - movq 88(%rsi), %r15 - adcq %rbp, %r15 - adcq %r14, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rdx, %rax - adcq -24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -24(%rsp) ## 8-byte Spill - adcq -16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -16(%rsp) ## 8-byte Spill - adcq -32(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, -32(%rsp) ## 8-byte Spill - sbbq %rbp, %rbp - andl $1, %ebp - movq %rbx, %rsi - subq (%r8), %rsi - movq %r9, %rdx - sbbq 8(%r8), %rdx - movq %r11, %r10 - sbbq 16(%r8), %r10 - movq %r15, %r14 - sbbq 24(%r8), %r14 - movq -8(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r8), %r13 - movq %rax, %r12 - sbbq 40(%r8), %r12 - movq %rcx, %rax - sbbq 48(%r8), %rax - movq -32(%rsp), %rcx ## 8-byte Reload - sbbq 56(%r8), %rcx - sbbq $0, %rbp - andl $1, %ebp - cmovneq %rbx, %rsi - movq %rsi, 64(%rdi) - testb %bpl, %bpl - cmovneq %r9, %rdx - movq %rdx, 72(%rdi) - cmovneq %r11, %r10 - movq %r10, 80(%rdi) - cmovneq %r15, %r14 - movq %r14, 88(%rdi) - cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 96(%rdi) - cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 104(%rdi) - cmovneq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 112(%rdi) - cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub8L - .p2align 4, 0x90 -_mcl_fpDbl_sub8L: ## @mcl_fpDbl_sub8L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 120(%rdx), %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 112(%rdx), %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 104(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 16(%rsi), %r9 - movq (%rsi), %r12 - movq 8(%rsi), %r14 - xorl %r8d, %r8d - subq (%rdx), %r12 - sbbq 8(%rdx), %r14 - sbbq 16(%rdx), %r9 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %r13 - sbbq 32(%rdx), %r13 - movq 96(%rdx), %rbp - movq 88(%rdx), %r11 - movq %r12, (%rdi) - movq 80(%rdx), %r12 - movq %r14, 8(%rdi) - movq 72(%rdx), %r10 - movq %r9, 16(%rdi) - movq 40(%rdx), %r9 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r9, %rbx - movq 48(%rdx), %r9 - movq %r13, 32(%rdi) - movq 48(%rsi), %r14 - sbbq %r9, %r14 - movq 64(%rdx), %r13 - movq 56(%rdx), %r9 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rdx - sbbq %r9, %rdx - movq 120(%rsi), %rcx - movq %r14, 48(%rdi) - movq 64(%rsi), %rbx - sbbq %r13, %rbx - movq 112(%rsi), %rax - movq %rdx, 56(%rdi) - movq 72(%rsi), %r9 - sbbq %r10, %r9 - movq 80(%rsi), %r13 - sbbq %r12, %r13 - movq 88(%rsi), %r12 - sbbq %r11, %r12 - movq 104(%rsi), %rdx - movq 96(%rsi), %r14 - sbbq %rbp, %r14 - sbbq -24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -24(%rsp) ## 8-byte Spill - sbbq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -16(%rsp) ## 8-byte Spill - sbbq -8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -8(%rsp) ## 8-byte Spill - movl $0, %ebp - sbbq $0, %rbp - andl $1, %ebp - movq (%r15), %r11 - cmoveq %r8, %r11 - testb %bpl, %bpl - movq 16(%r15), %rbp - cmoveq %r8, %rbp - movq 8(%r15), %rsi - cmoveq %r8, %rsi - movq 56(%r15), %r10 - cmoveq %r8, %r10 - movq 48(%r15), %rdx - cmoveq %r8, %rdx - movq 40(%r15), %rcx - cmoveq %r8, %rcx - movq 32(%r15), %rax - cmoveq %r8, %rax - cmovneq 24(%r15), %r8 - addq %rbx, %r11 - adcq %r9, %rsi - movq %r11, 64(%rdi) - adcq %r13, %rbp - movq %rsi, 72(%rdi) - movq %rbp, 80(%rdi) - adcq %r12, %r8 - movq %r8, 88(%rdi) - adcq %r14, %rax - movq %rax, 96(%rdi) - adcq -24(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 104(%rdi) - adcq -16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 112(%rdi) - adcq -8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 120(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .p2align 4, 0x90 -l_mulPv576x64: ## @mulPv576x64 -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdx, %rbx - movq %rbx, %rax - mulq (%rsi) - movq %rdx, -32(%rsp) ## 8-byte Spill - movq %rax, (%rdi) - movq %rbx, %rax - mulq 64(%rsi) - movq %rdx, %r10 - movq %rax, -8(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 56(%rsi) - movq %rdx, %r14 - movq %rax, -16(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 48(%rsi) - movq %rdx, %r12 - movq %rax, -24(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 40(%rsi) - movq %rdx, %rcx - movq %rax, -40(%rsp) ## 8-byte Spill - movq %rbx, %rax - mulq 32(%rsi) - movq %rdx, %rbp - movq %rax, %r8 - movq %rbx, %rax - mulq 24(%rsi) - movq %rdx, %r9 - movq %rax, %r11 - movq %rbx, %rax - mulq 16(%rsi) - movq %rdx, %r15 - movq %rax, %r13 - movq %rbx, %rax - mulq 8(%rsi) - addq -32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 8(%rdi) - adcq %r13, %rdx - movq %rdx, 16(%rdi) - adcq %r11, %r15 - movq %r15, 24(%rdi) - adcq %r8, %r9 - movq %r9, 32(%rdi) - adcq -40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 40(%rdi) - adcq -24(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 48(%rdi) - adcq -16(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 56(%rdi) - adcq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 64(%rdi) - adcq $0, %r10 - movq %r10, 72(%rdi) - movq %rdi, %rax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mulUnitPre9L - .p2align 4, 0x90 -_mcl_fp_mulUnitPre9L: ## @mcl_fp_mulUnitPre9L -## BB#0: - pushq %r14 - pushq %rbx - subq $88, %rsp - movq %rdi, %rbx - leaq 8(%rsp), %rdi - callq l_mulPv576x64 - movq 80(%rsp), %r8 - movq 72(%rsp), %r9 - movq 64(%rsp), %r10 - movq 56(%rsp), %r11 - movq 48(%rsp), %r14 - movq 40(%rsp), %rax - movq 32(%rsp), %rcx - movq 24(%rsp), %rdx - movq 8(%rsp), %rsi - movq 16(%rsp), %rdi - movq %rsi, (%rbx) - movq %rdi, 8(%rbx) - movq %rdx, 16(%rbx) - movq %rcx, 24(%rbx) - movq %rax, 32(%rbx) - movq %r14, 40(%rbx) - movq %r11, 48(%rbx) - movq %r10, 56(%rbx) - movq %r9, 64(%rbx) - movq %r8, 72(%rbx) - addq $88, %rsp - popq %rbx - popq %r14 - retq - - .globl _mcl_fpDbl_mulPre9L - .p2align 4, 0x90 -_mcl_fpDbl_mulPre9L: ## @mcl_fpDbl_mulPre9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp ## imm = 0x328 - movq %rdx, %rax - movq %rdi, %r12 - movq (%rax), %rdx - movq %rax, %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - leaq 728(%rsp), %rdi - movq %rsi, %rbp - movq %rbp, 72(%rsp) ## 8-byte Spill - callq l_mulPv576x64 - movq 800(%rsp), %r13 - movq 792(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 768(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r14 - movq %rax, (%r12) - movq %r12, 64(%rsp) ## 8-byte Spill - movq 8(%rbx), %rdx - leaq 648(%rsp), %rdi - movq %rbp, %rsi - callq l_mulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r14 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r15 - movq %r14, 8(%r12) - adcq 24(%rsp), %rbx ## 8-byte Folded Reload - adcq 32(%rsp), %r15 ## 8-byte Folded Reload - adcq 40(%rsp), %rax ## 8-byte Folded Reload - movq %rax, %r14 - adcq (%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 40(%rsp) ## 8-byte Spill - adcq 48(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, (%rsp) ## 8-byte Spill - adcq %r13, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 80(%rsp), %r13 ## 8-byte Reload - movq 16(%r13), %rdx - leaq 568(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %r9 - movq 624(%rsp), %r10 - movq 616(%rsp), %rdi - movq 608(%rsp), %rbp - movq 600(%rsp), %rcx - addq 568(%rsp), %rbx - movq 592(%rsp), %rdx - movq 576(%rsp), %r12 - movq 584(%rsp), %rsi - movq 64(%rsp), %rax ## 8-byte Reload - movq %rbx, 16(%rax) - adcq %r15, %r12 - adcq %r14, %rsi - movq %rsi, 48(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 8(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 24(%r13), %rdx - leaq 488(%rsp), %rdi - movq 72(%rsp), %r15 ## 8-byte Reload - movq %r15, %rsi - callq l_mulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r12 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq 64(%rsp), %r14 ## 8-byte Reload - movq %r12, 24(%r14) - adcq 48(%rsp), %rbx ## 8-byte Folded Reload - adcq 56(%rsp), %r13 ## 8-byte Folded Reload - adcq 24(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq (%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, (%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 8(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 80(%rsp), %r12 ## 8-byte Reload - movq 32(%r12), %rdx - leaq 408(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %r9 - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r15 - movq 424(%rsp), %rcx - movq %rbx, 32(%r14) - adcq %r13, %r15 - adcq 24(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq %r12, %r14 - movq 40(%r14), %rdx - leaq 328(%rsp), %rdi - movq 72(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %r9 - movq 384(%rsp), %rsi - movq 376(%rsp), %rdi - movq 368(%rsp), %rbx - movq 360(%rsp), %rbp - addq 328(%rsp), %r15 - movq 352(%rsp), %rcx - movq 336(%rsp), %r12 - movq 344(%rsp), %rdx - movq 64(%rsp), %rax ## 8-byte Reload - movq %r15, 40(%rax) - adcq 56(%rsp), %r12 ## 8-byte Folded Reload - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 48(%r14), %rdx - leaq 248(%rsp), %rdi - movq %r13, %rsi - movq %r13, %r15 - callq l_mulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %r9 - movq 304(%rsp), %rsi - movq 296(%rsp), %rdi - movq 288(%rsp), %rbx - movq 280(%rsp), %rbp - addq 248(%rsp), %r12 - movq 272(%rsp), %rcx - movq 256(%rsp), %r13 - movq 264(%rsp), %rdx - movq 64(%rsp), %rax ## 8-byte Reload - movq %r12, 48(%rax) - adcq 56(%rsp), %r13 ## 8-byte Folded Reload - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 16(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 48(%rsp) ## 8-byte Spill - movq 56(%r14), %rdx - leaq 168(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 240(%rsp), %rcx - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - addq 168(%rsp), %r13 - movq 200(%rsp), %r12 - movq 192(%rsp), %rbp - movq 176(%rsp), %r14 - movq 184(%rsp), %r15 - movq 64(%rsp), %rax ## 8-byte Reload - movq %r13, 56(%rax) - adcq 56(%rsp), %r14 ## 8-byte Folded Reload - adcq 24(%rsp), %r15 ## 8-byte Folded Reload - adcq 32(%rsp), %rbp ## 8-byte Folded Reload - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq (%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %r13 - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, (%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 48(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 88(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 88(%rsp), %r14 - adcq 96(%rsp), %r15 - movq 160(%rsp), %r8 - adcq 104(%rsp), %rbp - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 64(%rsp), %rcx ## 8-byte Reload - movq %r14, 64(%rcx) - movq %r15, 72(%rcx) - adcq %r12, %rax - movq %rbp, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r13, %rbx - movq %rbx, 96(%rcx) - adcq (%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 8(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 48(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp ## imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sqrPre9L - .p2align 4, 0x90 -_mcl_fpDbl_sqrPre9L: ## @mcl_fpDbl_sqrPre9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $808, %rsp ## imm = 0x328 - movq %rsi, %r15 - movq %rdi, %r14 - movq (%r15), %rdx - leaq 728(%rsp), %rdi - callq l_mulPv576x64 - movq 800(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 792(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 784(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 776(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 768(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 760(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 752(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 744(%rsp), %rax - movq %rax, 80(%rsp) ## 8-byte Spill - movq 728(%rsp), %rax - movq 736(%rsp), %r12 - movq %rax, (%r14) - movq %r14, 72(%rsp) ## 8-byte Spill - movq 8(%r15), %rdx - leaq 648(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 720(%rsp), %r8 - movq 712(%rsp), %rcx - movq 704(%rsp), %rdx - movq 696(%rsp), %rsi - movq 688(%rsp), %rdi - movq 680(%rsp), %rbp - addq 648(%rsp), %r12 - movq 672(%rsp), %rax - movq 656(%rsp), %rbx - movq 664(%rsp), %r13 - movq %r12, 8(%r14) - adcq 80(%rsp), %rbx ## 8-byte Folded Reload - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq %r15, 64(%rsp) ## 8-byte Spill - movq 16(%r15), %rdx - leaq 568(%rsp), %rdi - movq %r15, %rsi - callq l_mulPv576x64 - movq 640(%rsp), %r8 - movq 632(%rsp), %rcx - movq 624(%rsp), %rdx - movq 616(%rsp), %rsi - movq 608(%rsp), %rdi - movq 600(%rsp), %rbp - addq 568(%rsp), %rbx - movq 592(%rsp), %rax - movq 576(%rsp), %r14 - movq 584(%rsp), %r12 - movq 72(%rsp), %r15 ## 8-byte Reload - movq %rbx, 16(%r15) - adcq %r13, %r14 - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 24(%rsi), %rdx - leaq 488(%rsp), %rdi - callq l_mulPv576x64 - movq 560(%rsp), %r8 - movq 552(%rsp), %rcx - movq 544(%rsp), %rdx - movq 536(%rsp), %rsi - movq 528(%rsp), %rdi - movq 520(%rsp), %rbp - addq 488(%rsp), %r14 - movq 512(%rsp), %rax - movq 496(%rsp), %rbx - movq 504(%rsp), %r13 - movq %r14, 24(%r15) - adcq %r12, %rbx - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 32(%rsi), %rdx - leaq 408(%rsp), %rdi - callq l_mulPv576x64 - movq 480(%rsp), %r8 - movq 472(%rsp), %rcx - movq 464(%rsp), %rdx - movq 456(%rsp), %rsi - movq 448(%rsp), %rdi - movq 440(%rsp), %rbp - addq 408(%rsp), %rbx - movq 432(%rsp), %rax - movq 416(%rsp), %r14 - movq 424(%rsp), %r12 - movq %rbx, 32(%r15) - adcq %r13, %r14 - adcq 40(%rsp), %r12 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 40(%rsi), %rdx - leaq 328(%rsp), %rdi - callq l_mulPv576x64 - movq 400(%rsp), %r8 - movq 392(%rsp), %rcx - movq 384(%rsp), %rdx - movq 376(%rsp), %rsi - movq 368(%rsp), %rdi - movq 360(%rsp), %rbp - addq 328(%rsp), %r14 - movq 352(%rsp), %rax - movq 336(%rsp), %rbx - movq 344(%rsp), %r13 - movq %r14, 40(%r15) - adcq %r12, %rbx - adcq 40(%rsp), %r13 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 48(%rsi), %rdx - leaq 248(%rsp), %rdi - callq l_mulPv576x64 - movq 320(%rsp), %r8 - movq 312(%rsp), %rcx - movq 304(%rsp), %rdx - movq 296(%rsp), %rsi - movq 288(%rsp), %rdi - movq 280(%rsp), %rbp - addq 248(%rsp), %rbx - movq 272(%rsp), %rax - movq 256(%rsp), %r12 - movq 264(%rsp), %r14 - movq %rbx, 48(%r15) - adcq %r13, %r12 - adcq 40(%rsp), %r14 ## 8-byte Folded Reload - adcq 48(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 56(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%rsp) ## 8-byte Spill - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 56(%rsi), %rdx - leaq 168(%rsp), %rdi - callq l_mulPv576x64 - movq 240(%rsp), %r8 - movq 232(%rsp), %rdx - movq 224(%rsp), %rsi - movq 216(%rsp), %rdi - movq 208(%rsp), %rbx - movq 200(%rsp), %rcx - addq 168(%rsp), %r12 - movq 192(%rsp), %r15 - movq 176(%rsp), %r13 - movq 184(%rsp), %rbp - movq 72(%rsp), %rax ## 8-byte Reload - movq %r12, 56(%rax) - adcq %r14, %r13 - adcq 40(%rsp), %rbp ## 8-byte Folded Reload - adcq 48(%rsp), %r15 ## 8-byte Folded Reload - adcq 56(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, %r12 - adcq 8(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, %r14 - adcq 16(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 8(%rsp) ## 8-byte Spill - adcq 24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 16(%rsp) ## 8-byte Spill - adcq 32(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 24(%rsp) ## 8-byte Spill - adcq $0, %r8 - movq %r8, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rsi ## 8-byte Reload - movq 64(%rsi), %rdx - leaq 88(%rsp), %rdi - callq l_mulPv576x64 - addq 88(%rsp), %r13 - adcq 96(%rsp), %rbp - movq 160(%rsp), %r8 - adcq 104(%rsp), %r15 - movq 152(%rsp), %r9 - movq 144(%rsp), %rdx - movq 136(%rsp), %rsi - movq 128(%rsp), %rdi - movq 120(%rsp), %rbx - movq 112(%rsp), %rax - movq 72(%rsp), %rcx ## 8-byte Reload - movq %r13, 64(%rcx) - movq %rbp, 72(%rcx) - adcq %r12, %rax - movq %r15, 80(%rcx) - movq %rax, 88(%rcx) - adcq %r14, %rbx - movq %rbx, 96(%rcx) - adcq 8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 104(%rcx) - adcq 16(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rcx) - adcq 24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 120(%rcx) - adcq 32(%rsp), %r9 ## 8-byte Folded Reload - movq %r9, 128(%rcx) - adcq $0, %r8 - movq %r8, 136(%rcx) - addq $808, %rsp ## imm = 0x328 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_mont9L - .p2align 4, 0x90 -_mcl_fp_mont9L: ## @mcl_fp_mont9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp ## imm = 0x618 - movq %rcx, 72(%rsp) ## 8-byte Spill - movq %rdx, 96(%rsp) ## 8-byte Spill - movq %rsi, 88(%rsp) ## 8-byte Spill - movq %rdi, 112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 80(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq l_mulPv576x64 - movq 1480(%rsp), %r14 - movq 1488(%rsp), %r15 - movq %r14, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1544(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 1536(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 1528(%rsp), %r12 - movq 1520(%rsp), %r13 - movq 1512(%rsp), %rbx - movq 1504(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1400(%rsp), %r14 - adcq 1408(%rsp), %r15 - adcq 1416(%rsp), %rbp - movq %rbp, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 1424(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 1432(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 1440(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 1448(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq 1456(%rsp), %rbx - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 1464(%rsp), %r14 - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1472(%rsp), %r13 - sbbq %rbp, %rbp - movq 96(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebp - addq 1320(%rsp), %r15 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 1328(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 1336(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 1344(%rsp), %r12 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 1352(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rax ## 8-byte Reload - adcq 1360(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq 1368(%rsp), %rbx - adcq 1376(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - adcq 1384(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 1392(%rsp), %rbp - sbbq %r14, %r14 - movq %r15, %rdx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq %r14, %rax - andl $1, %eax - addq 1240(%rsp), %r15 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 1248(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 1256(%rsp), %r14 - adcq 1264(%rsp), %r12 - movq %r12, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 1272(%rsp), %r12 - movq 48(%rsp), %r13 ## 8-byte Reload - adcq 1280(%rsp), %r13 - adcq 1288(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 1296(%rsp), %r15 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 1304(%rsp), %rbx - adcq 1312(%rsp), %rbp - adcq $0, %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 8(%rsp), %rax ## 8-byte Reload - addq 1160(%rsp), %rax - adcq 1168(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 1176(%rsp), %r14 - adcq 1184(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - movq %r13, %r12 - adcq 1192(%rsp), %r12 - movq 56(%rsp), %rcx ## 8-byte Reload - adcq 1200(%rsp), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, %r13 - adcq 1216(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq 1224(%rsp), %rbp - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 1232(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - sbbq %r15, %r15 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq %r15, %rax - andl $1, %eax - addq 1080(%rsp), %rbx - movq (%rsp), %rcx ## 8-byte Reload - adcq 1088(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq %r14, %r15 - adcq 1096(%rsp), %r15 - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 1104(%rsp), %r14 - movq %r12, %rbx - adcq 1112(%rsp), %rbx - movq 56(%rsp), %rcx ## 8-byte Reload - adcq 1120(%rsp), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - adcq 1128(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1136(%rsp), %r13 - adcq 1144(%rsp), %rbp - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 1152(%rsp), %r12 - adcq $0, %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq (%rsp), %rax ## 8-byte Reload - addq 1000(%rsp), %rax - adcq 1008(%rsp), %r15 - movq %r15, 32(%rsp) ## 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, %r15 - adcq 1024(%rsp), %rbx - movq %rbx, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 1032(%rsp), %r14 - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 1040(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - adcq 1048(%rsp), %r13 - movq %r13, 24(%rsp) ## 8-byte Spill - adcq 1056(%rsp), %rbp - adcq 1064(%rsp), %r12 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 1072(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 920(%rsp), %r13 - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 928(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 936(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %r15 ## 8-byte Reload - adcq 944(%rsp), %r15 - movq %r14, %r13 - adcq 952(%rsp), %r13 - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 960(%rsp), %r14 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 968(%rsp), %rbx - adcq 976(%rsp), %rbp - adcq 984(%rsp), %r12 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 992(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 32(%rsp), %rax ## 8-byte Reload - addq 840(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 848(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 856(%rsp), %r15 - adcq 864(%rsp), %r13 - movq %r13, 56(%rsp) ## 8-byte Spill - adcq 872(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - adcq 880(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - adcq 888(%rsp), %rbp - adcq 896(%rsp), %r12 - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 904(%rsp), %r13 - movq (%rsp), %rcx ## 8-byte Reload - adcq 912(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r14 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 760(%rsp), %r14 - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 768(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq 776(%rsp), %r15 - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 784(%rsp), %r14 - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 792(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 800(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 808(%rsp), %rbp - movq %r12, %rbx - adcq 816(%rsp), %rbx - movq %r13, %r12 - adcq 824(%rsp), %r12 - movq (%rsp), %r13 ## 8-byte Reload - adcq 832(%rsp), %r13 - adcq $0, %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 16(%rsp), %rax ## 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 48(%rsp) ## 8-byte Spill - adcq 696(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 704(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r15 ## 8-byte Reload - adcq 712(%rsp), %r15 - adcq 720(%rsp), %rbp - adcq 728(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 736(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - adcq 744(%rsp), %r13 - movq %r13, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 752(%rsp), %r13 - sbbq %r14, %r14 - movq %rax, %rdx - movq %rax, %rbx - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r14d - addq 600(%rsp), %rbx - movq 48(%rsp), %rax ## 8-byte Reload - adcq 608(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 56(%rsp), %rax ## 8-byte Reload - adcq 616(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %rbx ## 8-byte Reload - adcq 624(%rsp), %rbx - adcq 632(%rsp), %r15 - movq %r15, 24(%rsp) ## 8-byte Spill - adcq 640(%rsp), %rbp - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 648(%rsp), %r12 - movq 8(%rsp), %rax ## 8-byte Reload - adcq 656(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r15 ## 8-byte Reload - adcq 664(%rsp), %r15 - adcq 672(%rsp), %r13 - adcq $0, %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 48(%rsp), %rax ## 8-byte Reload - addq 520(%rsp), %rax - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 528(%rsp), %r14 - adcq 536(%rsp), %rbx - movq %rbx, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 544(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 552(%rsp), %rbp - adcq 560(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 568(%rsp), %r12 - adcq 576(%rsp), %r15 - movq %r15, (%rsp) ## 8-byte Spill - adcq 584(%rsp), %r13 - movq %r13, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 592(%rsp), %r15 - sbbq %rbx, %rbx - movq %rax, %rdx - movq %rax, %r13 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - movq %rbx, %rax - addq 440(%rsp), %r13 - adcq 448(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 40(%rsp), %r14 ## 8-byte Reload - adcq 456(%rsp), %r14 - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 464(%rsp), %rbx - adcq 472(%rsp), %rbp - movq %rbp, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - adcq 488(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rbp ## 8-byte Reload - adcq 496(%rsp), %rbp - movq 32(%rsp), %r12 ## 8-byte Reload - adcq 504(%rsp), %r12 - adcq 512(%rsp), %r15 - movq %r15, %r13 - adcq $0, %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 96(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 56(%rsp), %rax ## 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r14 - adcq 376(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq 104(%rsp), %rcx ## 8-byte Reload - adcq 384(%rsp), %rcx - movq %rcx, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbx ## 8-byte Reload - adcq 392(%rsp), %rbx - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 400(%rsp), %r15 - adcq 408(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 416(%rsp), %r12 - movq %r12, %rbp - adcq 424(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - sbbq %r13, %r13 - movq %rax, %rdx - movq %rax, %r12 - imulq 80(%rsp), %rdx ## 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r13d - addq 280(%rsp), %r12 - adcq 288(%rsp), %r14 - movq %r14, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rax ## 8-byte Reload - adcq 296(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 104(%rsp), %r14 ## 8-byte Reload - adcq 304(%rsp), %r14 - adcq 312(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 320(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - movq (%rsp), %rbx ## 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %r12 ## 8-byte Reload - adcq 344(%rsp), %r12 - movq 48(%rsp), %rbp ## 8-byte Reload - adcq 352(%rsp), %rbp - adcq $0, %r13 - movq 96(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 40(%rsp), %rax ## 8-byte Reload - addq 200(%rsp), %rax - movq 24(%rsp), %r15 ## 8-byte Reload - adcq 208(%rsp), %r15 - adcq 216(%rsp), %r14 - movq %r14, 104(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 224(%rsp), %r14 - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 232(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 248(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 256(%rsp), %r12 - movq %r12, 16(%rsp) ## 8-byte Spill - adcq 264(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 272(%rsp), %r13 - sbbq %rbx, %rbx - movq 80(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %r12 - leaq 120(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %ebx - addq 120(%rsp), %r12 - adcq 128(%rsp), %r15 - movq 104(%rsp), %rbp ## 8-byte Reload - adcq 136(%rsp), %rbp - movq %r14, %rcx - adcq 144(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 8(%rsp), %r8 ## 8-byte Reload - adcq 152(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq (%rsp), %r9 ## 8-byte Reload - adcq 160(%rsp), %r9 - movq %r9, (%rsp) ## 8-byte Spill - movq 32(%rsp), %r10 ## 8-byte Reload - adcq 168(%rsp), %r10 - movq %r10, 32(%rsp) ## 8-byte Spill - movq 16(%rsp), %rdi ## 8-byte Reload - adcq 176(%rsp), %rdi - movq %rdi, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %r14 ## 8-byte Reload - adcq 184(%rsp), %r14 - adcq 192(%rsp), %r13 - adcq $0, %rbx - movq %r15, %rsi - movq %r15, %r12 - movq 72(%rsp), %rdx ## 8-byte Reload - subq (%rdx), %rsi - movq %rbp, %rax - movq %rbp, %r15 - sbbq 8(%rdx), %rax - movq %rcx, %rbp - sbbq 16(%rdx), %rbp - movq %r8, %rcx - sbbq 24(%rdx), %rcx - movq %r9, %r8 - sbbq 32(%rdx), %r8 - movq %r10, %r11 - sbbq 40(%rdx), %r11 - movq %rdi, %r10 - sbbq 48(%rdx), %r10 - movq %r14, %rdi - sbbq 56(%rdx), %rdi - movq %r13, %r9 - sbbq 64(%rdx), %r9 - sbbq $0, %rbx - andl $1, %ebx - cmovneq %r13, %r9 - testb %bl, %bl - cmovneq %r12, %rsi - movq 112(%rsp), %rbx ## 8-byte Reload - movq %rsi, (%rbx) - cmovneq %r15, %rax - movq %rax, 8(%rbx) - cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 16(%rbx) - cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 24(%rbx) - cmovneq (%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 32(%rbx) - cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 40(%rbx) - cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 48(%rbx) - cmovneq %r14, %rdi - movq %rdi, 56(%rbx) - movq %r9, 64(%rbx) - addq $1560, %rsp ## imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montNF9L - .p2align 4, 0x90 -_mcl_fp_montNF9L: ## @mcl_fp_montNF9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $1560, %rsp ## imm = 0x618 - movq %rcx, 72(%rsp) ## 8-byte Spill - movq %rdx, 80(%rsp) ## 8-byte Spill - movq %rsi, 88(%rsp) ## 8-byte Spill - movq %rdi, 112(%rsp) ## 8-byte Spill - movq -8(%rcx), %rbx - movq %rbx, 96(%rsp) ## 8-byte Spill - movq (%rdx), %rdx - leaq 1480(%rsp), %rdi - callq l_mulPv576x64 - movq 1480(%rsp), %r12 - movq 1488(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq %r12, %rdx - imulq %rbx, %rdx - movq 1552(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - movq 1544(%rsp), %r13 - movq 1536(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - movq 1528(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 1520(%rsp), %r14 - movq 1512(%rsp), %r15 - movq 1504(%rsp), %rbx - movq 1496(%rsp), %rbp - leaq 1400(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1400(%rsp), %r12 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 1408(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - adcq 1416(%rsp), %rbp - movq %rbp, 104(%rsp) ## 8-byte Spill - adcq 1424(%rsp), %rbx - movq %rbx, (%rsp) ## 8-byte Spill - adcq 1432(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq 1440(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbx ## 8-byte Reload - adcq 1448(%rsp), %rbx - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 1456(%rsp), %r12 - adcq 1464(%rsp), %r13 - movq %r13, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 1472(%rsp), %rbp - movq 80(%rsp), %rax ## 8-byte Reload - movq 8(%rax), %rdx - leaq 1320(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1392(%rsp), %rax - movq 16(%rsp), %rcx ## 8-byte Reload - addq 1320(%rsp), %rcx - movq 104(%rsp), %r15 ## 8-byte Reload - adcq 1328(%rsp), %r15 - movq (%rsp), %r14 ## 8-byte Reload - adcq 1336(%rsp), %r14 - movq 8(%rsp), %rdx ## 8-byte Reload - adcq 1344(%rsp), %rdx - movq %rdx, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %r13 ## 8-byte Reload - adcq 1352(%rsp), %r13 - adcq 1360(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 1368(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq 40(%rsp), %rdx ## 8-byte Reload - adcq 1376(%rsp), %rdx - movq %rdx, 40(%rsp) ## 8-byte Spill - adcq 1384(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, %rbp - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 1240(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1240(%rsp), %rbx - adcq 1248(%rsp), %r15 - movq %r15, 104(%rsp) ## 8-byte Spill - adcq 1256(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - movq 8(%rsp), %r12 ## 8-byte Reload - adcq 1264(%rsp), %r12 - adcq 1272(%rsp), %r13 - movq %r13, %r14 - movq 64(%rsp), %r13 ## 8-byte Reload - adcq 1280(%rsp), %r13 - movq 48(%rsp), %rbx ## 8-byte Reload - adcq 1288(%rsp), %rbx - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 1296(%rsp), %r15 - movq 24(%rsp), %rax ## 8-byte Reload - adcq 1304(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 1312(%rsp), %rbp - movq %rbp, 56(%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 16(%rax), %rdx - leaq 1160(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1232(%rsp), %rax - movq 104(%rsp), %rcx ## 8-byte Reload - addq 1160(%rsp), %rcx - movq (%rsp), %rbp ## 8-byte Reload - adcq 1168(%rsp), %rbp - adcq 1176(%rsp), %r12 - movq %r12, 8(%rsp) ## 8-byte Spill - adcq 1184(%rsp), %r14 - adcq 1192(%rsp), %r13 - movq %r13, %r12 - adcq 1200(%rsp), %rbx - movq %rbx, 48(%rsp) ## 8-byte Spill - adcq 1208(%rsp), %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbx ## 8-byte Reload - adcq 1216(%rsp), %rbx - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 1224(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq %rax, %r15 - adcq $0, %r15 - movq %rcx, %rdx - movq %rcx, %r13 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 1080(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 1080(%rsp), %r13 - adcq 1088(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 1096(%rsp), %r13 - adcq 1104(%rsp), %r14 - adcq 1112(%rsp), %r12 - movq %r12, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 1120(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 1128(%rsp), %rbp - adcq 1136(%rsp), %rbx - movq %rbx, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq 1144(%rsp), %rbx - adcq 1152(%rsp), %r15 - movq 80(%rsp), %rax ## 8-byte Reload - movq 24(%rax), %rdx - leaq 1000(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 1072(%rsp), %rax - movq (%rsp), %rcx ## 8-byte Reload - addq 1000(%rsp), %rcx - adcq 1008(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq 1016(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r14 ## 8-byte Reload - adcq 1024(%rsp), %r14 - adcq 1032(%rsp), %r12 - adcq 1040(%rsp), %rbp - movq %rbp, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %r13 ## 8-byte Reload - adcq 1048(%rsp), %r13 - adcq 1056(%rsp), %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - adcq 1064(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, (%rsp) ## 8-byte Spill - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 920(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 920(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 928(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - movq 32(%rsp), %rbp ## 8-byte Reload - adcq 936(%rsp), %rbp - movq %r14, %rbx - adcq 944(%rsp), %rbx - adcq 952(%rsp), %r12 - movq 40(%rsp), %rax ## 8-byte Reload - adcq 960(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 968(%rsp), %r13 - movq %r13, %r15 - movq 56(%rsp), %r13 ## 8-byte Reload - adcq 976(%rsp), %r13 - movq 16(%rsp), %r14 ## 8-byte Reload - adcq 984(%rsp), %r14 - movq (%rsp), %rax ## 8-byte Reload - adcq 992(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 80(%rsp), %rax ## 8-byte Reload - movq 32(%rax), %rdx - leaq 840(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 912(%rsp), %rax - movq 8(%rsp), %rcx ## 8-byte Reload - addq 840(%rsp), %rcx - adcq 848(%rsp), %rbp - movq %rbp, 32(%rsp) ## 8-byte Spill - adcq 856(%rsp), %rbx - movq %rbx, 64(%rsp) ## 8-byte Spill - adcq 864(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 872(%rsp), %rbp - adcq 880(%rsp), %r15 - movq %r15, 24(%rsp) ## 8-byte Spill - adcq 888(%rsp), %r13 - adcq 896(%rsp), %r14 - movq %r14, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rdx ## 8-byte Reload - adcq 904(%rsp), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - adcq $0, %rax - movq %rax, %r14 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 760(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 760(%rsp), %rbx - movq 32(%rsp), %rax ## 8-byte Reload - adcq 768(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 64(%rsp), %r15 ## 8-byte Reload - adcq 776(%rsp), %r15 - adcq 784(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq %rbp, %rbx - adcq 792(%rsp), %rbx - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 800(%rsp), %rbp - adcq 808(%rsp), %r13 - movq 16(%rsp), %rax ## 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r12 ## 8-byte Reload - adcq 824(%rsp), %r12 - adcq 832(%rsp), %r14 - movq 80(%rsp), %rax ## 8-byte Reload - movq 40(%rax), %rdx - leaq 680(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 752(%rsp), %rcx - movq 32(%rsp), %rax ## 8-byte Reload - addq 680(%rsp), %rax - adcq 688(%rsp), %r15 - movq %r15, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %rdx ## 8-byte Reload - adcq 696(%rsp), %rdx - movq %rdx, 48(%rsp) ## 8-byte Spill - adcq 704(%rsp), %rbx - movq %rbx, 40(%rsp) ## 8-byte Spill - adcq 712(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - adcq 720(%rsp), %r13 - movq %r13, %r15 - movq 16(%rsp), %rbx ## 8-byte Reload - adcq 728(%rsp), %rbx - adcq 736(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - adcq 744(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r13 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 600(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 600(%rsp), %r13 - movq 64(%rsp), %r13 ## 8-byte Reload - adcq 608(%rsp), %r13 - movq 48(%rsp), %r12 ## 8-byte Reload - adcq 616(%rsp), %r12 - movq 40(%rsp), %rbp ## 8-byte Reload - adcq 624(%rsp), %rbp - movq 24(%rsp), %rax ## 8-byte Reload - adcq 632(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 640(%rsp), %r15 - movq %r15, 56(%rsp) ## 8-byte Spill - adcq 648(%rsp), %rbx - movq %rbx, 16(%rsp) ## 8-byte Spill - movq (%rsp), %r14 ## 8-byte Reload - adcq 656(%rsp), %r14 - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 664(%rsp), %rbx - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 672(%rsp), %r15 - movq 80(%rsp), %rax ## 8-byte Reload - movq 48(%rax), %rdx - leaq 520(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 592(%rsp), %rcx - movq %r13, %rax - addq 520(%rsp), %rax - adcq 528(%rsp), %r12 - movq %r12, 48(%rsp) ## 8-byte Spill - movq %rbp, %r12 - adcq 536(%rsp), %r12 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 544(%rsp), %rbp - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 552(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - movq 16(%rsp), %rdx ## 8-byte Reload - adcq 560(%rsp), %rdx - movq %rdx, 16(%rsp) ## 8-byte Spill - adcq 568(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - adcq 576(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - adcq 584(%rsp), %r15 - movq %r15, 8(%rsp) ## 8-byte Spill - adcq $0, %rcx - movq %rcx, %r13 - movq %rax, %rdx - movq %rax, %r14 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 440(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 440(%rsp), %r14 - movq 48(%rsp), %rax ## 8-byte Reload - adcq 448(%rsp), %rax - movq %rax, 48(%rsp) ## 8-byte Spill - adcq 456(%rsp), %r12 - adcq 464(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %r14 ## 8-byte Reload - adcq 472(%rsp), %r14 - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 480(%rsp), %r15 - movq (%rsp), %rbp ## 8-byte Reload - adcq 488(%rsp), %rbp - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 496(%rsp), %rbx - movq 8(%rsp), %rax ## 8-byte Reload - adcq 504(%rsp), %rax - movq %rax, 8(%rsp) ## 8-byte Spill - adcq 512(%rsp), %r13 - movq 80(%rsp), %rax ## 8-byte Reload - movq 56(%rax), %rdx - leaq 360(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 432(%rsp), %rcx - movq 48(%rsp), %rax ## 8-byte Reload - addq 360(%rsp), %rax - adcq 368(%rsp), %r12 - movq %r12, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rdx ## 8-byte Reload - adcq 376(%rsp), %rdx - movq %rdx, 24(%rsp) ## 8-byte Spill - adcq 384(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - adcq 392(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq 400(%rsp), %rbp - movq %rbp, (%rsp) ## 8-byte Spill - adcq 408(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r14 ## 8-byte Reload - adcq 416(%rsp), %r14 - adcq 424(%rsp), %r13 - movq %r13, %r15 - adcq $0, %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq %rax, %rdx - movq %rax, %r12 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 280(%rsp), %rdi - movq 72(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 280(%rsp), %r12 - movq 40(%rsp), %rax ## 8-byte Reload - adcq 288(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 296(%rsp), %rbp - movq 56(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq %rax, 56(%rsp) ## 8-byte Spill - movq 16(%rsp), %r13 ## 8-byte Reload - adcq 312(%rsp), %r13 - movq (%rsp), %r12 ## 8-byte Reload - adcq 320(%rsp), %r12 - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 328(%rsp), %rbx - adcq 336(%rsp), %r14 - movq %r14, 8(%rsp) ## 8-byte Spill - adcq 344(%rsp), %r15 - movq %r15, 64(%rsp) ## 8-byte Spill - movq 48(%rsp), %r14 ## 8-byte Reload - adcq 352(%rsp), %r14 - movq 80(%rsp), %rax ## 8-byte Reload - movq 64(%rax), %rdx - leaq 200(%rsp), %rdi - movq 88(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - movq 272(%rsp), %rcx - movq 40(%rsp), %rax ## 8-byte Reload - addq 200(%rsp), %rax - adcq 208(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 56(%rsp), %rbp ## 8-byte Reload - adcq 216(%rsp), %rbp - adcq 224(%rsp), %r13 - movq %r13, 16(%rsp) ## 8-byte Spill - adcq 232(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - adcq 240(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq 8(%rsp), %r15 ## 8-byte Reload - adcq 248(%rsp), %r15 - movq 64(%rsp), %r12 ## 8-byte Reload - adcq 256(%rsp), %r12 - adcq 264(%rsp), %r14 - adcq $0, %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 96(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 120(%rsp), %rdi - movq 72(%rsp), %r13 ## 8-byte Reload - movq %r13, %rsi - callq l_mulPv576x64 - addq 120(%rsp), %rbx - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 128(%rsp), %rcx - movq %rbp, %rdx - adcq 136(%rsp), %rdx - movq 16(%rsp), %rsi ## 8-byte Reload - adcq 144(%rsp), %rsi - movq %rsi, 16(%rsp) ## 8-byte Spill - movq (%rsp), %rdi ## 8-byte Reload - adcq 152(%rsp), %rdi - movq %rdi, (%rsp) ## 8-byte Spill - movq 32(%rsp), %rbx ## 8-byte Reload - adcq 160(%rsp), %rbx - movq %rbx, 32(%rsp) ## 8-byte Spill - movq %r15, %r8 - adcq 168(%rsp), %r8 - movq %r8, 8(%rsp) ## 8-byte Spill - movq %r12, %r15 - adcq 176(%rsp), %r15 - adcq 184(%rsp), %r14 - movq 40(%rsp), %r9 ## 8-byte Reload - adcq 192(%rsp), %r9 - movq %rcx, %rax - movq %rcx, %r11 - movq %r13, %rbp - subq (%rbp), %rax - movq %rdx, %rcx - movq %rdx, %r12 - sbbq 8(%rbp), %rcx - movq %rsi, %rdx - sbbq 16(%rbp), %rdx - movq %rdi, %rsi - sbbq 24(%rbp), %rsi - movq %rbx, %rdi - sbbq 32(%rbp), %rdi - movq %r8, %r10 - sbbq 40(%rbp), %r10 - movq %r15, %r13 - sbbq 48(%rbp), %r13 - movq %r14, %r8 - sbbq 56(%rbp), %r8 - movq %rbp, %rbx - movq %r9, %rbp - sbbq 64(%rbx), %rbp - movq %rbp, %rbx - sarq $63, %rbx - cmovsq %r11, %rax - movq 112(%rsp), %rbx ## 8-byte Reload - movq %rax, (%rbx) - cmovsq %r12, %rcx - movq %rcx, 8(%rbx) - cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 16(%rbx) - cmovsq (%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 24(%rbx) - cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 32(%rbx) - cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 40(%rbx) - cmovsq %r15, %r13 - movq %r13, 48(%rbx) - cmovsq %r14, %r8 - movq %r8, 56(%rbx) - cmovsq %r9, %rbp - movq %rbp, 64(%rbx) - addq $1560, %rsp ## imm = 0x618 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_montRed9L - .p2align 4, 0x90 -_mcl_fp_montRed9L: ## @mcl_fp_montRed9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - subq $936, %rsp ## imm = 0x3A8 - movq %rdx, %rax - movq %rdi, 208(%rsp) ## 8-byte Spill - movq -8(%rax), %rcx - movq %rcx, 96(%rsp) ## 8-byte Spill - movq (%rsi), %r14 - movq 8(%rsi), %rdx - movq %rdx, (%rsp) ## 8-byte Spill - movq %r14, %rdx - imulq %rcx, %rdx - movq 136(%rsi), %rcx - movq %rcx, 88(%rsp) ## 8-byte Spill - movq 128(%rsi), %rcx - movq %rcx, 56(%rsp) ## 8-byte Spill - movq 120(%rsi), %rcx - movq %rcx, 80(%rsp) ## 8-byte Spill - movq 112(%rsi), %rcx - movq %rcx, 72(%rsp) ## 8-byte Spill - movq 104(%rsi), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 96(%rsi), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 88(%rsi), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 80(%rsi), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - movq 72(%rsi), %r12 - movq 64(%rsi), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq 56(%rsi), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 48(%rsi), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 40(%rsi), %rbp - movq 32(%rsi), %rbx - movq 24(%rsi), %r13 - movq 16(%rsi), %r15 - movq %rax, %rcx - movq (%rcx), %rax - movq %rax, 144(%rsp) ## 8-byte Spill - movq 64(%rcx), %rax - movq %rax, 200(%rsp) ## 8-byte Spill - movq 56(%rcx), %rax - movq %rax, 192(%rsp) ## 8-byte Spill - movq 48(%rcx), %rax - movq %rax, 184(%rsp) ## 8-byte Spill - movq 40(%rcx), %rax - movq %rax, 176(%rsp) ## 8-byte Spill - movq 32(%rcx), %rax - movq %rax, 168(%rsp) ## 8-byte Spill - movq 24(%rcx), %rax - movq %rax, 160(%rsp) ## 8-byte Spill - movq 16(%rcx), %rax - movq %rax, 152(%rsp) ## 8-byte Spill - movq 8(%rcx), %rax - movq %rax, 136(%rsp) ## 8-byte Spill - movq %rcx, %rsi - movq %rsi, 104(%rsp) ## 8-byte Spill - leaq 856(%rsp), %rdi - callq l_mulPv576x64 - addq 856(%rsp), %r14 - movq (%rsp), %rcx ## 8-byte Reload - adcq 864(%rsp), %rcx - adcq 872(%rsp), %r15 - adcq 880(%rsp), %r13 - adcq 888(%rsp), %rbx - movq %rbx, 120(%rsp) ## 8-byte Spill - adcq 896(%rsp), %rbp - movq %rbp, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - adcq 904(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 912(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 920(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - adcq 928(%rsp), %r12 - movq %r12, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq $0, %rbp - adcq $0, 8(%rsp) ## 8-byte Folded Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - movq 88(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - sbbq %r12, %r12 - movq %rcx, %rdx - movq %rcx, %rbx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 776(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - andl $1, %r12d - addq 776(%rsp), %rbx - adcq 784(%rsp), %r15 - adcq 792(%rsp), %r13 - movq %r13, 128(%rsp) ## 8-byte Spill - movq 120(%rsp), %rax ## 8-byte Reload - adcq 800(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 112(%rsp), %rax ## 8-byte Reload - adcq 808(%rsp), %rax - movq %rax, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rax ## 8-byte Reload - adcq 816(%rsp), %rax - movq %rax, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rax ## 8-byte Reload - adcq 824(%rsp), %rax - movq %rax, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rax ## 8-byte Reload - adcq 832(%rsp), %rax - movq %rax, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rax ## 8-byte Reload - adcq 840(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - adcq 848(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - adcq $0, 16(%rsp) ## 8-byte Folded Spill - adcq $0, 48(%rsp) ## 8-byte Folded Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - movq 56(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, %r14 - movq %r14, 88(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %r15, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 696(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 696(%rsp), %r15 - movq 128(%rsp), %rcx ## 8-byte Reload - adcq 704(%rsp), %rcx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 712(%rsp), %rax - movq %rax, 120(%rsp) ## 8-byte Spill - movq 112(%rsp), %rax ## 8-byte Reload - adcq 720(%rsp), %rax - movq %rax, 112(%rsp) ## 8-byte Spill - movq 64(%rsp), %rbp ## 8-byte Reload - adcq 728(%rsp), %rbp - movq 32(%rsp), %r14 ## 8-byte Reload - adcq 736(%rsp), %r14 - movq 40(%rsp), %r15 ## 8-byte Reload - adcq 744(%rsp), %r15 - movq (%rsp), %rax ## 8-byte Reload - adcq 752(%rsp), %rax - movq %rax, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rax ## 8-byte Reload - adcq 760(%rsp), %rax - movq %rax, 24(%rsp) ## 8-byte Spill - adcq 768(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq $0, 16(%rsp) ## 8-byte Folded Spill - movq 48(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, %rbx - movq %rbx, 56(%rsp) ## 8-byte Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rcx, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 616(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 616(%rsp), %rbx - movq 120(%rsp), %rax ## 8-byte Reload - adcq 624(%rsp), %rax - movq 112(%rsp), %rcx ## 8-byte Reload - adcq 632(%rsp), %rcx - movq %rcx, 112(%rsp) ## 8-byte Spill - adcq 640(%rsp), %rbp - movq %rbp, 64(%rsp) ## 8-byte Spill - adcq 648(%rsp), %r14 - movq %r14, 32(%rsp) ## 8-byte Spill - adcq 656(%rsp), %r15 - movq (%rsp), %r14 ## 8-byte Reload - adcq 664(%rsp), %r14 - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 672(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 680(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 688(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, 48(%rsp) ## 8-byte Spill - adcq $0, 72(%rsp) ## 8-byte Folded Spill - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 536(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 536(%rsp), %rbx - movq 112(%rsp), %rax ## 8-byte Reload - adcq 544(%rsp), %rax - movq 64(%rsp), %rcx ## 8-byte Reload - adcq 552(%rsp), %rcx - movq %rcx, 64(%rsp) ## 8-byte Spill - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 560(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - adcq 568(%rsp), %r15 - movq %r15, 40(%rsp) ## 8-byte Spill - adcq 576(%rsp), %r14 - movq %r14, (%rsp) ## 8-byte Spill - adcq 584(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r13 ## 8-byte Reload - adcq 592(%rsp), %r13 - movq 16(%rsp), %r15 ## 8-byte Reload - adcq 600(%rsp), %r15 - movq 48(%rsp), %rbp ## 8-byte Reload - adcq 608(%rsp), %rbp - movq 72(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, 80(%rsp) ## 8-byte Folded Spill - adcq $0, 56(%rsp) ## 8-byte Folded Spill - adcq $0, 88(%rsp) ## 8-byte Folded Spill - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r14 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 456(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 456(%rsp), %r14 - movq 64(%rsp), %rax ## 8-byte Reload - adcq 464(%rsp), %rax - movq 32(%rsp), %rcx ## 8-byte Reload - adcq 472(%rsp), %rcx - movq %rcx, 32(%rsp) ## 8-byte Spill - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 480(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rcx ## 8-byte Reload - adcq 488(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rcx ## 8-byte Reload - adcq 496(%rsp), %rcx - movq %rcx, 24(%rsp) ## 8-byte Spill - adcq 504(%rsp), %r13 - movq %r13, 8(%rsp) ## 8-byte Spill - adcq 512(%rsp), %r15 - movq %r15, 16(%rsp) ## 8-byte Spill - adcq 520(%rsp), %rbp - movq %rbp, 48(%rsp) ## 8-byte Spill - adcq 528(%rsp), %rbx - movq %rbx, 72(%rsp) ## 8-byte Spill - movq 80(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - movq 56(%rsp), %r13 ## 8-byte Reload - adcq $0, %r13 - movq 88(%rsp), %rbx ## 8-byte Reload - adcq $0, %rbx - adcq $0, %r12 - movq %rax, %rdx - movq %rax, %r15 - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 376(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 376(%rsp), %r15 - movq 32(%rsp), %rax ## 8-byte Reload - adcq 384(%rsp), %rax - movq 40(%rsp), %rcx ## 8-byte Reload - adcq 392(%rsp), %rcx - movq %rcx, 40(%rsp) ## 8-byte Spill - movq (%rsp), %rcx ## 8-byte Reload - adcq 400(%rsp), %rcx - movq %rcx, (%rsp) ## 8-byte Spill - movq 24(%rsp), %rbp ## 8-byte Reload - adcq 408(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 416(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 424(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 432(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - movq 72(%rsp), %r15 ## 8-byte Reload - adcq 440(%rsp), %r15 - adcq 448(%rsp), %r14 - movq %r14, 80(%rsp) ## 8-byte Spill - adcq $0, %r13 - movq %r13, %r14 - adcq $0, %rbx - movq %rbx, 88(%rsp) ## 8-byte Spill - adcq $0, %r12 - movq %rax, %rbx - movq %rbx, %rdx - imulq 96(%rsp), %rdx ## 8-byte Folded Reload - leaq 296(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 296(%rsp), %rbx - movq 40(%rsp), %rax ## 8-byte Reload - adcq 304(%rsp), %rax - movq (%rsp), %r13 ## 8-byte Reload - adcq 312(%rsp), %r13 - adcq 320(%rsp), %rbp - movq 8(%rsp), %rcx ## 8-byte Reload - adcq 328(%rsp), %rcx - movq %rcx, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %rcx ## 8-byte Reload - adcq 336(%rsp), %rcx - movq %rcx, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rcx ## 8-byte Reload - adcq 344(%rsp), %rcx - movq %rcx, 48(%rsp) ## 8-byte Spill - adcq 352(%rsp), %r15 - movq %r15, 72(%rsp) ## 8-byte Spill - movq 80(%rsp), %r15 ## 8-byte Reload - adcq 360(%rsp), %r15 - adcq 368(%rsp), %r14 - movq %r14, 56(%rsp) ## 8-byte Spill - movq 88(%rsp), %r14 ## 8-byte Reload - adcq $0, %r14 - adcq $0, %r12 - movq 96(%rsp), %rdx ## 8-byte Reload - imulq %rax, %rdx - movq %rax, %rbx - leaq 216(%rsp), %rdi - movq 104(%rsp), %rsi ## 8-byte Reload - callq l_mulPv576x64 - addq 216(%rsp), %rbx - movq %r13, %rsi - adcq 224(%rsp), %rsi - movq %rsi, (%rsp) ## 8-byte Spill - adcq 232(%rsp), %rbp - movq %rbp, 24(%rsp) ## 8-byte Spill - movq 8(%rsp), %r9 ## 8-byte Reload - adcq 240(%rsp), %r9 - movq %r9, 8(%rsp) ## 8-byte Spill - movq 16(%rsp), %r8 ## 8-byte Reload - adcq 248(%rsp), %r8 - movq %r8, 16(%rsp) ## 8-byte Spill - movq 48(%rsp), %rbx ## 8-byte Reload - adcq 256(%rsp), %rbx - movq 72(%rsp), %rax ## 8-byte Reload - adcq 264(%rsp), %rax - movq %r15, %rcx - adcq 272(%rsp), %rcx - movq 56(%rsp), %rdx ## 8-byte Reload - adcq 280(%rsp), %rdx - movq %rdx, 56(%rsp) ## 8-byte Spill - adcq 288(%rsp), %r14 - movq %r14, %r11 - adcq $0, %r12 - subq 144(%rsp), %rsi ## 8-byte Folded Reload - movq %rbp, %rdi - sbbq 136(%rsp), %rdi ## 8-byte Folded Reload - movq %r9, %rbp - sbbq 152(%rsp), %rbp ## 8-byte Folded Reload - movq %r8, %r13 - sbbq 160(%rsp), %r13 ## 8-byte Folded Reload - movq %rbx, %r15 - sbbq 168(%rsp), %r15 ## 8-byte Folded Reload - movq %rax, %r14 - sbbq 176(%rsp), %r14 ## 8-byte Folded Reload - movq %rcx, %r10 - sbbq 184(%rsp), %r10 ## 8-byte Folded Reload - movq %rdx, %r8 - sbbq 192(%rsp), %r8 ## 8-byte Folded Reload - movq %r11, %r9 - sbbq 200(%rsp), %r9 ## 8-byte Folded Reload - sbbq $0, %r12 - andl $1, %r12d - cmovneq %r11, %r9 - testb %r12b, %r12b - cmovneq (%rsp), %rsi ## 8-byte Folded Reload - movq 208(%rsp), %rdx ## 8-byte Reload - movq %rsi, (%rdx) - cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 8(%rdx) - cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 16(%rdx) - cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 24(%rdx) - cmovneq %rbx, %r15 - movq %r15, 32(%rdx) - cmovneq %rax, %r14 - movq %r14, 40(%rdx) - cmovneq %rcx, %r10 - movq %r10, 48(%rdx) - cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 56(%rdx) - movq %r9, 64(%rdx) - addq $936, %rsp ## imm = 0x3A8 - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_addPre9L - .p2align 4, 0x90 -_mcl_fp_addPre9L: ## @mcl_fp_addPre9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r8 - movq 64(%rsi), %r15 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 24(%rsi), %r12 - movq 32(%rsi), %r14 - movq (%rdx), %rbx - movq 8(%rdx), %rcx - addq (%rsi), %rbx - adcq 8(%rsi), %rcx - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r12 - movq 56(%rdx), %r13 - movq 48(%rdx), %rsi - movq 40(%rdx), %rbp - movq 32(%rdx), %rdx - movq %rbx, (%rdi) - movq %rcx, 8(%rdi) - movq %rax, 16(%rdi) - movq %r12, 24(%rdi) - adcq %r14, %rdx - movq %rdx, 32(%rdi) - adcq %r11, %rbp - movq %rbp, 40(%rdi) - adcq %r10, %rsi - movq %rsi, 48(%rdi) - adcq %r9, %r13 - movq %r13, 56(%rdi) - adcq %r8, %r15 - movq %r15, 64(%rdi) - sbbq %rax, %rax - andl $1, %eax - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_subPre9L - .p2align 4, 0x90 -_mcl_fp_subPre9L: ## @mcl_fp_subPre9L -## BB#0: - movq 32(%rdx), %r8 - movq (%rsi), %rcx - xorl %eax, %eax - subq (%rdx), %rcx - movq %rcx, (%rdi) - movq 8(%rsi), %rcx - sbbq 8(%rdx), %rcx - movq %rcx, 8(%rdi) - movq 16(%rsi), %rcx - sbbq 16(%rdx), %rcx - movq %rcx, 16(%rdi) - movq 24(%rsi), %rcx - sbbq 24(%rdx), %rcx - movq %rcx, 24(%rdi) - movq 32(%rsi), %rcx - sbbq %r8, %rcx - movq 40(%rdx), %r8 - movq %rcx, 32(%rdi) - movq 40(%rsi), %rcx - sbbq %r8, %rcx - movq 48(%rdx), %r8 - movq %rcx, 40(%rdi) - movq 48(%rsi), %rcx - sbbq %r8, %rcx - movq 56(%rdx), %r8 - movq %rcx, 48(%rdi) - movq 56(%rsi), %rcx - sbbq %r8, %rcx - movq %rcx, 56(%rdi) - movq 64(%rdx), %rcx - movq 64(%rsi), %rdx - sbbq %rcx, %rdx - movq %rdx, 64(%rdi) - sbbq $0, %rax - andl $1, %eax - retq - - .globl _mcl_fp_shr1_9L - .p2align 4, 0x90 -_mcl_fp_shr1_9L: ## @mcl_fp_shr1_9L -## BB#0: - pushq %rbx - movq 64(%rsi), %r8 - movq 56(%rsi), %r9 - movq 48(%rsi), %r10 - movq 40(%rsi), %r11 - movq 32(%rsi), %rcx - movq 24(%rsi), %rdx - movq 16(%rsi), %rax - movq (%rsi), %rbx - movq 8(%rsi), %rsi - shrdq $1, %rsi, %rbx - movq %rbx, (%rdi) - shrdq $1, %rax, %rsi - movq %rsi, 8(%rdi) - shrdq $1, %rdx, %rax - movq %rax, 16(%rdi) - shrdq $1, %rcx, %rdx - movq %rdx, 24(%rdi) - shrdq $1, %r11, %rcx - movq %rcx, 32(%rdi) - shrdq $1, %r10, %r11 - movq %r11, 40(%rdi) - shrdq $1, %r9, %r10 - movq %r10, 48(%rdi) - shrdq $1, %r8, %r9 - movq %r9, 56(%rdi) - shrq %r8 - movq %r8, 64(%rdi) - popq %rbx - retq - - .globl _mcl_fp_add9L - .p2align 4, 0x90 -_mcl_fp_add9L: ## @mcl_fp_add9L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r12 - movq 64(%rsi), %r8 - movq 56(%rsi), %r13 - movq 48(%rsi), %r9 - movq 40(%rsi), %r10 - movq 24(%rsi), %r14 - movq 32(%rsi), %r11 - movq (%rdx), %rbx - movq 8(%rdx), %r15 - addq (%rsi), %rbx - adcq 8(%rsi), %r15 - movq 16(%rdx), %rax - adcq 16(%rsi), %rax - adcq 24(%rdx), %r14 - adcq 32(%rdx), %r11 - adcq 40(%rdx), %r10 - movq 56(%rdx), %rsi - adcq 48(%rdx), %r9 - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - adcq %r13, %rsi - movq %rsi, 56(%rdi) - adcq %r12, %r8 - movq %r8, 64(%rdi) - sbbq %rdx, %rdx - andl $1, %edx - subq (%rcx), %rbx - sbbq 8(%rcx), %r15 - sbbq 16(%rcx), %rax - sbbq 24(%rcx), %r14 - sbbq 32(%rcx), %r11 - sbbq 40(%rcx), %r10 - sbbq 48(%rcx), %r9 - sbbq 56(%rcx), %rsi - sbbq 64(%rcx), %r8 - sbbq $0, %rdx - testb $1, %dl - jne LBB136_2 -## BB#1: ## %nocarry - movq %rbx, (%rdi) - movq %r15, 8(%rdi) - movq %rax, 16(%rdi) - movq %r14, 24(%rdi) - movq %r11, 32(%rdi) - movq %r10, 40(%rdi) - movq %r9, 48(%rdi) - movq %rsi, 56(%rdi) - movq %r8, 64(%rdi) -LBB136_2: ## %carry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_addNF9L - .p2align 4, 0x90 -_mcl_fp_addNF9L: ## @mcl_fp_addNF9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rdi, %r8 - movq 64(%rdx), %r10 - movq 56(%rdx), %r11 - movq 48(%rdx), %r9 - movq 40(%rdx), %rax - movq 32(%rdx), %rdi - movq 24(%rdx), %rbp - movq 16(%rdx), %r15 - movq (%rdx), %rbx - movq 8(%rdx), %r13 - addq (%rsi), %rbx - adcq 8(%rsi), %r13 - adcq 16(%rsi), %r15 - adcq 24(%rsi), %rbp - movq %rbp, -24(%rsp) ## 8-byte Spill - adcq 32(%rsi), %rdi - movq %rdi, -40(%rsp) ## 8-byte Spill - adcq 40(%rsi), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - adcq 48(%rsi), %r9 - movq %r9, %rdi - movq %rdi, -16(%rsp) ## 8-byte Spill - adcq 56(%rsi), %r11 - movq %r11, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - adcq 64(%rsi), %r10 - movq %r10, %r9 - movq %rbx, %rsi - subq (%rcx), %rsi - movq %r13, %rdx - sbbq 8(%rcx), %rdx - movq %r15, %r12 - sbbq 16(%rcx), %r12 - sbbq 24(%rcx), %rbp - movq -40(%rsp), %r14 ## 8-byte Reload - sbbq 32(%rcx), %r14 - movq -32(%rsp), %r11 ## 8-byte Reload - sbbq 40(%rcx), %r11 - movq %rdi, %r10 - sbbq 48(%rcx), %r10 - movq %rax, %rdi - sbbq 56(%rcx), %rdi - movq %r9, %rax - sbbq 64(%rcx), %rax - movq %rax, %rcx - sarq $63, %rcx - cmovsq %rbx, %rsi - movq %rsi, (%r8) - cmovsq %r13, %rdx - movq %rdx, 8(%r8) - cmovsq %r15, %r12 - movq %r12, 16(%r8) - cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload - movq %rbp, 24(%r8) - cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 32(%r8) - cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 40(%r8) - cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 48(%r8) - cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload - movq %rdi, 56(%r8) - cmovsq %r9, %rax - movq %rax, 64(%r8) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fp_sub9L - .p2align 4, 0x90 -_mcl_fp_sub9L: ## @mcl_fp_sub9L -## BB#0: - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq 64(%rdx), %r13 - movq (%rsi), %rax - movq 8(%rsi), %r9 - xorl %ebx, %ebx - subq (%rdx), %rax - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - movq 32(%rsi), %r12 - sbbq 32(%rdx), %r12 - movq 40(%rsi), %r14 - sbbq 40(%rdx), %r14 - movq 48(%rsi), %r15 - sbbq 48(%rdx), %r15 - movq 64(%rsi), %r8 - movq 56(%rsi), %rsi - sbbq 56(%rdx), %rsi - movq %rax, (%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - movq %r14, 40(%rdi) - movq %r15, 48(%rdi) - movq %rsi, 56(%rdi) - sbbq %r13, %r8 - movq %r8, 64(%rdi) - sbbq $0, %rbx - testb $1, %bl - je LBB138_2 -## BB#1: ## %carry - addq (%rcx), %rax - movq %rax, (%rdi) - movq 8(%rcx), %rax - adcq %r9, %rax - movq %rax, 8(%rdi) - movq 16(%rcx), %rax - adcq %r10, %rax - movq %rax, 16(%rdi) - movq 24(%rcx), %rax - adcq %r11, %rax - movq %rax, 24(%rdi) - movq 32(%rcx), %rax - adcq %r12, %rax - movq %rax, 32(%rdi) - movq 40(%rcx), %rax - adcq %r14, %rax - movq %rax, 40(%rdi) - movq 48(%rcx), %rax - adcq %r15, %rax - movq %rax, 48(%rdi) - movq 56(%rcx), %rax - adcq %rsi, %rax - movq %rax, 56(%rdi) - movq 64(%rcx), %rax - adcq %r8, %rax - movq %rax, 64(%rdi) -LBB138_2: ## %nocarry - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - - .globl _mcl_fp_subNF9L - .p2align 4, 0x90 -_mcl_fp_subNF9L: ## @mcl_fp_subNF9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r11 - movq %rdi, %rbx - movq 64(%rsi), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movdqu (%rdx), %xmm1 - movdqu 16(%rdx), %xmm2 - movdqu 32(%rdx), %xmm3 - movdqu 48(%rdx), %xmm4 - pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1] - movd %xmm0, %r12 - movdqu (%rsi), %xmm5 - movdqu 16(%rsi), %xmm6 - movdqu 32(%rsi), %xmm7 - movdqu 48(%rsi), %xmm8 - pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1] - movd %xmm0, %rax - movd %xmm4, %r10 - pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1] - movd %xmm0, %r9 - pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1] - movd %xmm3, %r8 - pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1] - movd %xmm3, %rcx - pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1] - movd %xmm2, %rbp - pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1] - movd %xmm2, %rsi - pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1] - movd %xmm1, %rdi - movd %xmm5, %r15 - subq %rdi, %r15 - movd %xmm2, %r14 - sbbq %rsi, %r14 - movd %xmm6, %r13 - sbbq %rbp, %r13 - movd %xmm3, %rbp - sbbq %rcx, %rbp - movd %xmm7, %rcx - sbbq %r8, %rcx - movq %rcx, -16(%rsp) ## 8-byte Spill - movd %xmm0, %rcx - sbbq %r9, %rcx - movq %rcx, -24(%rsp) ## 8-byte Spill - movd %xmm8, %rcx - sbbq %r10, %rcx - movq %rcx, -32(%rsp) ## 8-byte Spill - sbbq %r12, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq -40(%rsp), %rsi ## 8-byte Reload - sbbq 64(%rdx), %rsi - movq %rsi, -40(%rsp) ## 8-byte Spill - movq %rsi, %rax - sarq $63, %rax - movq %rax, %rcx - shldq $1, %rsi, %rcx - movq 24(%r11), %r9 - andq %rcx, %r9 - movq 8(%r11), %rdi - andq %rcx, %rdi - andq (%r11), %rcx - movq 64(%r11), %r12 - andq %rax, %r12 - movq 56(%r11), %r10 - andq %rax, %r10 - rolq %rax - movq 48(%r11), %r8 - andq %rax, %r8 - movq 40(%r11), %rsi - andq %rax, %rsi - movq 32(%r11), %rdx - andq %rax, %rdx - andq 16(%r11), %rax - addq %r15, %rcx - adcq %r14, %rdi - movq %rcx, (%rbx) - adcq %r13, %rax - movq %rdi, 8(%rbx) - adcq %rbp, %r9 - movq %rax, 16(%rbx) - movq %r9, 24(%rbx) - adcq -16(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, 32(%rbx) - adcq -24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 40(%rbx) - adcq -32(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 48(%rbx) - adcq -8(%rsp), %r10 ## 8-byte Folded Reload - movq %r10, 56(%rbx) - adcq -40(%rsp), %r12 ## 8-byte Folded Reload - movq %r12, 64(%rbx) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_add9L - .p2align 4, 0x90 -_mcl_fpDbl_add9L: ## @mcl_fpDbl_add9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r15 - movq 136(%rdx), %rax - movq %rax, -48(%rsp) ## 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq 120(%rdx), %r10 - movq 112(%rdx), %r11 - movq 24(%rsi), %rcx - movq 32(%rsi), %r14 - movq 16(%rdx), %rbp - movq (%rdx), %rax - movq 8(%rdx), %rbx - addq (%rsi), %rax - adcq 8(%rsi), %rbx - adcq 16(%rsi), %rbp - adcq 24(%rdx), %rcx - adcq 32(%rdx), %r14 - movq 104(%rdx), %r9 - movq 96(%rdx), %r13 - movq %rax, (%rdi) - movq 88(%rdx), %r8 - movq %rbx, 8(%rdi) - movq 80(%rdx), %r12 - movq %rbp, 16(%rdi) - movq 40(%rdx), %rax - movq %rcx, 24(%rdi) - movq 40(%rsi), %rbp - adcq %rax, %rbp - movq 48(%rdx), %rcx - movq %r14, 32(%rdi) - movq 48(%rsi), %rax - adcq %rcx, %rax - movq 56(%rdx), %r14 - movq %rbp, 40(%rdi) - movq 56(%rsi), %rbp - adcq %r14, %rbp - movq 72(%rdx), %rcx - movq 64(%rdx), %rdx - movq %rax, 48(%rdi) - movq 64(%rsi), %rax - adcq %rdx, %rax - movq 136(%rsi), %rbx - movq %rbp, 56(%rdi) - movq 72(%rsi), %rbp - adcq %rcx, %rbp - movq 128(%rsi), %rcx - movq %rax, 64(%rdi) - movq 80(%rsi), %rdx - adcq %r12, %rdx - movq 88(%rsi), %r12 - adcq %r8, %r12 - movq 96(%rsi), %r14 - adcq %r13, %r14 - movq %r14, -8(%rsp) ## 8-byte Spill - movq 104(%rsi), %rax - adcq %r9, %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 120(%rsi), %rax - movq 112(%rsi), %rsi - adcq %r11, %rsi - movq %rsi, -24(%rsp) ## 8-byte Spill - adcq %r10, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - adcq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -40(%rsp) ## 8-byte Spill - adcq -48(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, -48(%rsp) ## 8-byte Spill - sbbq %r9, %r9 - andl $1, %r9d - movq %rbp, %r10 - subq (%r15), %r10 - movq %rdx, %r11 - sbbq 8(%r15), %r11 - movq %r12, %rbx - sbbq 16(%r15), %rbx - sbbq 24(%r15), %r14 - movq -32(%rsp), %r13 ## 8-byte Reload - sbbq 32(%r15), %r13 - movq -24(%rsp), %rsi ## 8-byte Reload - sbbq 40(%r15), %rsi - movq -16(%rsp), %rax ## 8-byte Reload - sbbq 48(%r15), %rax - sbbq 56(%r15), %rcx - movq -48(%rsp), %r8 ## 8-byte Reload - sbbq 64(%r15), %r8 - sbbq $0, %r9 - andl $1, %r9d - cmovneq %rbp, %r10 - movq %r10, 72(%rdi) - testb %r9b, %r9b - cmovneq %rdx, %r11 - movq %r11, 80(%rdi) - cmovneq %r12, %rbx - movq %rbx, 88(%rdi) - cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload - movq %r14, 96(%rdi) - cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload - movq %r13, 104(%rdi) - cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 112(%rdi) - cmovneq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 120(%rdi) - cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 128(%rdi) - cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload - movq %r8, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - .globl _mcl_fpDbl_sub9L - .p2align 4, 0x90 -_mcl_fpDbl_sub9L: ## @mcl_fpDbl_sub9L -## BB#0: - pushq %rbp - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbx - movq %rcx, %r14 - movq 136(%rdx), %rax - movq %rax, -24(%rsp) ## 8-byte Spill - movq 128(%rdx), %rax - movq %rax, -32(%rsp) ## 8-byte Spill - movq 120(%rdx), %rax - movq %rax, -40(%rsp) ## 8-byte Spill - movq 16(%rsi), %r11 - movq (%rsi), %r12 - movq 8(%rsi), %r13 - xorl %r9d, %r9d - subq (%rdx), %r12 - sbbq 8(%rdx), %r13 - sbbq 16(%rdx), %r11 - movq 24(%rsi), %rbx - sbbq 24(%rdx), %rbx - movq 32(%rsi), %rbp - sbbq 32(%rdx), %rbp - movq 112(%rdx), %r10 - movq 104(%rdx), %rcx - movq %r12, (%rdi) - movq 96(%rdx), %rax - movq %r13, 8(%rdi) - movq 88(%rdx), %r13 - movq %r11, 16(%rdi) - movq 40(%rdx), %r11 - movq %rbx, 24(%rdi) - movq 40(%rsi), %rbx - sbbq %r11, %rbx - movq 48(%rdx), %r11 - movq %rbp, 32(%rdi) - movq 48(%rsi), %rbp - sbbq %r11, %rbp - movq 56(%rdx), %r11 - movq %rbx, 40(%rdi) - movq 56(%rsi), %rbx - sbbq %r11, %rbx - movq 64(%rdx), %r11 - movq %rbp, 48(%rdi) - movq 64(%rsi), %rbp - sbbq %r11, %rbp - movq 80(%rdx), %r8 - movq 72(%rdx), %r11 - movq %rbx, 56(%rdi) - movq 72(%rsi), %r15 - sbbq %r11, %r15 - movq 136(%rsi), %rdx - movq %rbp, 64(%rdi) - movq 80(%rsi), %rbp - sbbq %r8, %rbp - movq 88(%rsi), %r12 - sbbq %r13, %r12 - movq 96(%rsi), %r13 - sbbq %rax, %r13 - movq 104(%rsi), %rax - sbbq %rcx, %rax - movq %rax, -16(%rsp) ## 8-byte Spill - movq 112(%rsi), %rax - sbbq %r10, %rax - movq %rax, -8(%rsp) ## 8-byte Spill - movq 128(%rsi), %rax - movq 120(%rsi), %rcx - sbbq -40(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, -40(%rsp) ## 8-byte Spill - sbbq -32(%rsp), %rax ## 8-byte Folded Reload - movq %rax, -32(%rsp) ## 8-byte Spill - sbbq -24(%rsp), %rdx ## 8-byte Folded Reload - movq %rdx, -24(%rsp) ## 8-byte Spill - movl $0, %r8d - sbbq $0, %r8 - andl $1, %r8d - movq (%r14), %r10 - cmoveq %r9, %r10 - testb %r8b, %r8b - movq 16(%r14), %r8 - cmoveq %r9, %r8 - movq 8(%r14), %rdx - cmoveq %r9, %rdx - movq 64(%r14), %rbx - cmoveq %r9, %rbx - movq 56(%r14), %r11 - cmoveq %r9, %r11 - movq 48(%r14), %rsi - cmoveq %r9, %rsi - movq 40(%r14), %rcx - cmoveq %r9, %rcx - movq 32(%r14), %rax - cmoveq %r9, %rax - cmovneq 24(%r14), %r9 - addq %r15, %r10 - adcq %rbp, %rdx - movq %r10, 72(%rdi) - adcq %r12, %r8 - movq %rdx, 80(%rdi) - adcq %r13, %r9 - movq %r8, 88(%rdi) - movq %r9, 96(%rdi) - adcq -16(%rsp), %rax ## 8-byte Folded Reload - movq %rax, 104(%rdi) - adcq -8(%rsp), %rcx ## 8-byte Folded Reload - movq %rcx, 112(%rdi) - adcq -40(%rsp), %rsi ## 8-byte Folded Reload - movq %rsi, 120(%rdi) - adcq -32(%rsp), %r11 ## 8-byte Folded Reload - movq %r11, 128(%rdi) - adcq -24(%rsp), %rbx ## 8-byte Folded Reload - movq %rbx, 136(%rdi) - popq %rbx - popq %r12 - popq %r13 - popq %r14 - popq %r15 - popq %rbp - retq - - -.subsections_via_symbols diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86.bmi2.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86.bmi2.s deleted file mode 100644 index 77729c530..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86.bmi2.s +++ /dev/null @@ -1,71547 +0,0 @@ - .text - .file "" - .globl makeNIST_P192Lbmi2 - .align 16, 0x90 - .type makeNIST_P192Lbmi2,@function -makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2 -# BB#0: - movl 4(%esp), %eax - movl $-1, 20(%eax) - movl $-1, 16(%eax) - movl $-1, 12(%eax) - movl $-2, 8(%eax) - movl $-1, 4(%eax) - movl $-1, (%eax) - retl $4 -.Lfunc_end0: - .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2 - - .globl mcl_fpDbl_mod_NIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function -mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %eax - movl 32(%eax), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 16(%esp) # 4-byte Spill - xorl %edx, %edx - movl (%eax), %ebx - addl %ecx, %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 4(%eax), %ecx - adcl %edi, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%eax), %ebp - adcl %esi, %ebp - movl 36(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 12(%eax), %esi - adcl %ecx, %esi - movl 40(%eax), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 16(%eax), %ecx - adcl %ebx, %ecx - movl 44(%eax), %edi - movl %edi, (%esp) # 4-byte Spill - movl 20(%eax), %eax - adcl %edi, %eax - adcl $0, %edx - sbbl %edi, %edi - andl $1, %edi - addl %ebx, 24(%esp) # 4-byte Folded Spill - movl (%esp), %ebx # 4-byte Reload - adcl %ebx, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - adcl $0, %edx - adcl $0, %edi - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %eax - adcl $0, %edx - adcl $0, %edi - addl %edx, 24(%esp) # 4-byte Folded Spill - adcl %edi, 28(%esp) # 4-byte Folded Spill - adcl %ebp, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl %esi, %edi - adcl $0, %ecx - adcl $0, %eax - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 28(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $1, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edi, %edx - adcl $0, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %ecx, %edx - adcl $0, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %edx - adcl $0, %edx - adcl $-1, %ebx - andl $1, %ebx - jne .LBB1_2 -# BB#1: - movl %edx, %eax -.LBB1_2: - testb %bl, %bl - movl 24(%esp), %edx # 4-byte Reload - jne .LBB1_4 -# BB#3: - movl %esi, %edx -.LBB1_4: - movl 52(%esp), %esi - movl %edx, (%esi) - movl 20(%esp), %edx # 4-byte Reload - movl 28(%esp), %ebx # 4-byte Reload - jne .LBB1_6 -# BB#5: - movl %ebp, %ebx -.LBB1_6: - movl %ebx, 4(%esi) - jne .LBB1_8 -# BB#7: - movl 8(%esp), %edx # 4-byte Reload -.LBB1_8: - movl %edx, 8(%esi) - jne .LBB1_10 -# BB#9: - movl 12(%esp), %edi # 4-byte Reload -.LBB1_10: - movl %edi, 12(%esi) - jne .LBB1_12 -# BB#11: - movl 16(%esp), %ecx # 4-byte Reload -.LBB1_12: - movl %ecx, 16(%esi) - movl %eax, 20(%esi) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2 - - .globl mcl_fp_sqr_NIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fp_sqr_NIST_P192Lbmi2,@function -mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L2$pb -.L2$pb: - popl %ebx -.Ltmp0: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx - movl 116(%esp), %eax - movl %eax, 4(%esp) - leal 44(%esp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_sqrPre6Lbmi2@PLT - xorl %edi, %edi - movl 76(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 72(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi - addl %eax, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax - adcl %edx, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %ebp - adcl %ecx, %ebp - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi - adcl %eax, %esi - movl 84(%esp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx - adcl %ebx, %ecx - movl 88(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 64(%esp), %edx - adcl %eax, %edx - adcl $0, %edi - sbbl %eax, %eax - andl $1, %eax - addl %ebx, 36(%esp) # 4-byte Folded Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl %ebx, 40(%esp) # 4-byte Folded Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl $0, %edi - adcl $0, %eax - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %edx - adcl $0, %edi - adcl $0, %eax - addl %edi, 36(%esp) # 4-byte Folded Spill - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl %ebp, %edi - adcl %esi, %eax - adcl $0, %ecx - adcl $0, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 36(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, %ebp - adcl $1, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %ebp - adcl $-1, %ebx - andl $1, %ebx - jne .LBB2_2 -# BB#1: - movl %ebp, %edx -.LBB2_2: - testb %bl, %bl - movl 36(%esp), %ebx # 4-byte Reload - jne .LBB2_4 -# BB#3: - movl %esi, %ebx -.LBB2_4: - movl 112(%esp), %esi - movl %ebx, (%esi) - movl 40(%esp), %ebx # 4-byte Reload - jne .LBB2_6 -# BB#5: - movl 20(%esp), %ebx # 4-byte Reload -.LBB2_6: - movl %ebx, 4(%esi) - jne .LBB2_8 -# BB#7: - movl 24(%esp), %edi # 4-byte Reload -.LBB2_8: - movl %edi, 8(%esi) - jne .LBB2_10 -# BB#9: - movl 28(%esp), %eax # 4-byte Reload -.LBB2_10: - movl %eax, 12(%esi) - jne .LBB2_12 -# BB#11: - movl 32(%esp), %ecx # 4-byte Reload -.LBB2_12: - movl %ecx, 16(%esi) - movl %edx, 20(%esi) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2 - - .globl mcl_fp_mulNIST_P192Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulNIST_P192Lbmi2,@function -mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L3$pb -.L3$pb: - popl %ebx -.Ltmp1: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx - movl 120(%esp), %eax - movl %eax, 8(%esp) - movl 116(%esp), %eax - movl %eax, 4(%esp) - leal 44(%esp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6Lbmi2@PLT - xorl %edi, %edi - movl 76(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 72(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi - addl %eax, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax - adcl %edx, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %ebp - adcl %ecx, %ebp - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi - adcl %eax, %esi - movl 84(%esp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx - adcl %ebx, %ecx - movl 88(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 64(%esp), %edx - adcl %eax, %edx - adcl $0, %edi - sbbl %eax, %eax - andl $1, %eax - addl %ebx, 36(%esp) # 4-byte Folded Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl %ebx, 40(%esp) # 4-byte Folded Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl $0, %edi - adcl $0, %eax - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %edx - adcl $0, %edi - adcl $0, %eax - addl %edi, 36(%esp) # 4-byte Folded Spill - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl %ebp, %edi - adcl %esi, %eax - adcl $0, %ecx - adcl $0, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 36(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, %ebp - adcl $1, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %ebp - adcl $-1, %ebx - andl $1, %ebx - jne .LBB3_2 -# BB#1: - movl %ebp, %edx -.LBB3_2: - testb %bl, %bl - movl 36(%esp), %ebx # 4-byte Reload - jne .LBB3_4 -# BB#3: - movl %esi, %ebx -.LBB3_4: - movl 112(%esp), %esi - movl %ebx, (%esi) - movl 40(%esp), %ebx # 4-byte Reload - jne .LBB3_6 -# BB#5: - movl 20(%esp), %ebx # 4-byte Reload -.LBB3_6: - movl %ebx, 4(%esi) - jne .LBB3_8 -# BB#7: - movl 24(%esp), %edi # 4-byte Reload -.LBB3_8: - movl %edi, 8(%esi) - jne .LBB3_10 -# BB#9: - movl 28(%esp), %eax # 4-byte Reload -.LBB3_10: - movl %eax, 12(%esi) - jne .LBB3_12 -# BB#11: - movl 32(%esp), %ecx # 4-byte Reload -.LBB3_12: - movl %ecx, 16(%esi) - movl %edx, 20(%esi) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end3: - .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2 - - .globl mcl_fpDbl_mod_NIST_P521Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function -mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ecx - movl 124(%ecx), %edx - movl 128(%ecx), %esi - movl %esi, %eax - shldl $23, %edx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 120(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 116(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 112(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 108(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 104(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 100(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 92(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 84(%ecx), %edi - shldl $23, %edi, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%ecx), %edx - shldl $23, %edx, %edi - movl 76(%ecx), %eax - shldl $23, %eax, %edx - movl 72(%ecx), %ebx - shldl $23, %ebx, %eax - movl 68(%ecx), %ebp - shldl $23, %ebp, %ebx - shrl $9, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 64(%ecx), %esi - shldl $23, %esi, %ebp - andl $511, %esi # imm = 0x1FF - addl (%ecx), %ebp - adcl 4(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - adcl 8(%ecx), %eax - adcl 12(%ecx), %edx - adcl 16(%ecx), %edi - movl 28(%esp), %ebx # 4-byte Reload - adcl 20(%ecx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - adcl 24(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - adcl 28(%ecx), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - adcl 32(%ecx), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl 36(%ecx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - adcl 40(%ecx), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 24(%esp), %ebx # 4-byte Reload - adcl 44(%ecx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl 48(%ecx), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - adcl 52(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - adcl 56(%ecx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl 60(%ecx), %ebx - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - movl %esi, %ecx - shrl $9, %ecx - andl $1, %ecx - addl %ebp, %ecx - adcl $0, 16(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, (%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl %edi, %esi - adcl $0, 28(%esp) # 4-byte Folded Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ebx, %ebp - adcl $0, %ebp - movl 12(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %ecx, %edi - andl %eax, %edi - andl %edx, %edi - andl %esi, %edi - andl 28(%esp), %edi # 4-byte Folded Reload - andl 32(%esp), %edi # 4-byte Folded Reload - andl 36(%esp), %edi # 4-byte Folded Reload - andl 40(%esp), %edi # 4-byte Folded Reload - andl 44(%esp), %edi # 4-byte Folded Reload - andl 48(%esp), %edi # 4-byte Folded Reload - andl 24(%esp), %edi # 4-byte Folded Reload - andl 52(%esp), %edi # 4-byte Folded Reload - movl 20(%esp), %esi # 4-byte Reload - andl %esi, %edi - andl 56(%esp), %edi # 4-byte Folded Reload - movl %ebx, %edx - movl 16(%esp), %ebx # 4-byte Reload - andl %ebp, %edi - movl %ebp, %eax - movl %edx, %ebp - orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00 - andl %edi, %ebp - andl %ebx, %ebp - cmpl $-1, %ebp - movl 80(%esp), %edi - je .LBB4_1 -# BB#3: # %nonzero - movl %ecx, (%edi) - movl %ebx, 4(%edi) - movl (%esp), %ecx # 4-byte Reload - movl %ecx, 8(%edi) - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edi) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%edi) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%edi) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%edi) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%edi) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%edi) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%edi) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%edi) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%edi) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%edi) - movl %esi, 52(%edi) - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%edi) - movl %eax, 60(%edi) - andl $511, %edx # imm = 0x1FF - movl %edx, 64(%edi) - jmp .LBB4_2 -.LBB4_1: # %zero - xorl %eax, %eax - movl $17, %ecx - rep;stosl -.LBB4_2: # %zero - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2 - - .globl mcl_fp_mulUnitPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre1Lbmi2,@function -mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2 -# BB#0: - movl 8(%esp), %eax - movl (%eax), %edx - mulxl 12(%esp), %ecx, %eax - movl 4(%esp), %edx - movl %ecx, (%edx) - movl %eax, 4(%edx) - retl -.Lfunc_end5: - .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2 - - .globl mcl_fpDbl_mulPre1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre1Lbmi2,@function -mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2 -# BB#0: - movl 12(%esp), %eax - movl (%eax), %edx - movl 8(%esp), %eax - mulxl (%eax), %ecx, %eax - movl 4(%esp), %edx - movl %ecx, (%edx) - movl %eax, 4(%edx) - retl -.Lfunc_end6: - .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2 - - .globl mcl_fpDbl_sqrPre1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre1Lbmi2,@function -mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2 -# BB#0: - movl 8(%esp), %eax - movl (%eax), %edx - mulxl %edx, %ecx, %eax - movl 4(%esp), %edx - movl %ecx, (%edx) - movl %eax, 4(%edx) - retl -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2 - - .globl mcl_fp_mont1Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont1Lbmi2,@function -mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %edx - movl 20(%esp), %eax - mulxl (%eax), %esi, %ecx - movl 24(%esp), %eax - movl -4(%eax), %edx - imull %esi, %edx - movl (%eax), %edi - mulxl %edi, %edx, %eax - addl %esi, %edx - adcl %ecx, %eax - sbbl %edx, %edx - andl $1, %edx - movl %eax, %ecx - subl %edi, %ecx - sbbl $0, %edx - testb $1, %dl - jne .LBB8_2 -# BB#1: - movl %ecx, %eax -.LBB8_2: - movl 12(%esp), %ecx - movl %eax, (%ecx) - popl %esi - popl %edi - retl -.Lfunc_end8: - .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2 - - .globl mcl_fp_montNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF1Lbmi2,@function -mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %edx - movl 20(%esp), %eax - mulxl (%eax), %esi, %ecx - movl 24(%esp), %eax - movl -4(%eax), %edx - imull %esi, %edx - movl (%eax), %edi - mulxl %edi, %edx, %eax - addl %esi, %edx - adcl %ecx, %eax - movl %eax, %ecx - subl %edi, %ecx - js .LBB9_2 -# BB#1: - movl %ecx, %eax -.LBB9_2: - movl 12(%esp), %ecx - movl %eax, (%ecx) - popl %esi - popl %edi - retl -.Lfunc_end9: - .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2 - - .globl mcl_fp_montRed1Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed1Lbmi2,@function -mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %esi - movl 20(%esp), %eax - movl -4(%eax), %edx - imull %esi, %edx - movl (%eax), %edi - mulxl %edi, %edx, %eax - addl %esi, %edx - adcl 4(%ecx), %eax - sbbl %edx, %edx - andl $1, %edx - movl %eax, %ecx - subl %edi, %ecx - sbbl $0, %edx - testb $1, %dl - jne .LBB10_2 -# BB#1: - movl %ecx, %eax -.LBB10_2: - movl 12(%esp), %ecx - movl %eax, (%ecx) - popl %esi - popl %edi - retl -.Lfunc_end10: - .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2 - - .globl mcl_fp_addPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre1Lbmi2,@function -mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2 -# BB#0: - movl 12(%esp), %eax - movl (%eax), %eax - movl 4(%esp), %ecx - movl 8(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - sbbl %eax, %eax - andl $1, %eax - retl -.Lfunc_end11: - .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2 - - .globl mcl_fp_subPre1Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre1Lbmi2,@function -mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - xorl %eax, %eax - movl 8(%esp), %edx - movl 16(%esp), %esi - subl (%esi), %ecx - movl %ecx, (%edx) - sbbl $0, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end12: - .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2 - - .globl mcl_fp_shr1_1Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_1Lbmi2,@function -mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2 -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - shrl %eax - movl 4(%esp), %ecx - movl %eax, (%ecx) - retl -.Lfunc_end13: - .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2 - - .globl mcl_fp_add1Lbmi2 - .align 16, 0x90 - .type mcl_fp_add1Lbmi2,@function -mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2 -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %eax - movl 8(%esp), %ecx - movl 12(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - sbbl %edx, %edx - andl $1, %edx - movl 20(%esp), %esi - subl (%esi), %eax - sbbl $0, %edx - testb $1, %dl - jne .LBB14_2 -# BB#1: # %nocarry - movl %eax, (%ecx) -.LBB14_2: # %carry - popl %esi - retl -.Lfunc_end14: - .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2 - - .globl mcl_fp_addNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF1Lbmi2,@function -mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2 -# BB#0: - movl 12(%esp), %eax - movl (%eax), %eax - movl 8(%esp), %ecx - addl (%ecx), %eax - movl 16(%esp), %edx - movl %eax, %ecx - subl (%edx), %ecx - js .LBB15_2 -# BB#1: - movl %ecx, %eax -.LBB15_2: - movl 4(%esp), %ecx - movl %eax, (%ecx) - retl -.Lfunc_end15: - .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2 - - .globl mcl_fp_sub1Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub1Lbmi2,@function -mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %eax - xorl %edx, %edx - movl 8(%esp), %ecx - movl 16(%esp), %esi - subl (%esi), %eax - movl %eax, (%ecx) - sbbl $0, %edx - testb $1, %dl - jne .LBB16_2 -# BB#1: # %nocarry - popl %esi - retl -.LBB16_2: # %carry - movl 20(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - popl %esi - retl -.Lfunc_end16: - .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2 - - .globl mcl_fp_subNF1Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF1Lbmi2,@function -mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2 -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - movl 12(%esp), %ecx - subl (%ecx), %eax - movl %eax, %ecx - sarl $31, %ecx - movl 16(%esp), %edx - andl (%edx), %ecx - addl %eax, %ecx - movl 4(%esp), %eax - movl %ecx, (%eax) - retl -.Lfunc_end17: - .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2 - - .globl mcl_fpDbl_add1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add1Lbmi2,@function -mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2 -# BB#0: - pushl %ebx - pushl %esi - movl 20(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %eax - movl 16(%esp), %esi - addl (%esi), %edx - movl 12(%esp), %ecx - adcl 4(%esi), %eax - movl %edx, (%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi - movl %eax, %edx - subl (%esi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB18_2 -# BB#1: - movl %edx, %eax -.LBB18_2: - movl %eax, 4(%ecx) - popl %esi - popl %ebx - retl -.Lfunc_end18: - .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2 - - .globl mcl_fpDbl_sub1Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub1Lbmi2,@function -mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %eax - xorl %ecx, %ecx - movl 16(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %eax - movl 8(%esp), %edx - movl %esi, (%edx) - sbbl $0, %ecx - andl $1, %ecx - je .LBB19_2 -# BB#1: - movl 20(%esp), %ecx - movl (%ecx), %ecx -.LBB19_2: - addl %eax, %ecx - movl %ecx, 4(%edx) - popl %esi - retl -.Lfunc_end19: - .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2 - - .globl mcl_fp_mulUnitPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre2Lbmi2,@function -mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 20(%esp), %edx - movl 16(%esp), %eax - mulxl 4(%eax), %ecx, %esi - mulxl (%eax), %eax, %edx - movl 12(%esp), %edi - movl %eax, (%edi) - addl %ecx, %edx - movl %edx, 4(%edi) - adcl $0, %esi - movl %esi, 8(%edi) - popl %esi - popl %edi - retl -.Lfunc_end20: - .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2 - - .globl mcl_fpDbl_mulPre2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre2Lbmi2,@function -mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %ecx - movl 28(%esp), %esi - movl (%esi), %edi - movl %ecx, %edx - mulxl %edi, %ebx, %ebp - movl %eax, %edx - mulxl %edi, %edx, %edi - addl %ebx, %edi - movl 20(%esp), %ebx - movl %edx, (%ebx) - adcl $0, %ebp - movl 4(%esi), %esi - movl %eax, %edx - mulxl %esi, %eax, %ebx - addl %edi, %eax - movl %ecx, %edx - mulxl %esi, %edx, %ecx - adcl %ebp, %edx - sbbl %esi, %esi - andl $1, %esi - addl %ebx, %edx - movl 20(%esp), %edi - movl %eax, 4(%edi) - movl %edx, 8(%edi) - adcl %ecx, %esi - movl %esi, 12(%edi) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end21: - .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2 - - .globl mcl_fpDbl_sqrPre2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre2Lbmi2,@function -mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 20(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %ecx - movl 16(%esp), %esi - movl %eax, %edx - mulxl %eax, %edx, %edi - movl %edx, (%esi) - movl %ecx, %edx - mulxl %eax, %edx, %eax - addl %edx, %edi - movl %eax, %ebx - adcl $0, %ebx - addl %edx, %edi - movl %ecx, %edx - mulxl %ecx, %edx, %ecx - adcl %ebx, %edx - sbbl %ebx, %ebx - andl $1, %ebx - addl %eax, %edx - movl %edi, 4(%esi) - movl %edx, 8(%esi) - adcl %ecx, %ebx - movl %ebx, 12(%esi) - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2 - - .globl mcl_fp_mont2Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont2Lbmi2,@function -mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %eax - movl (%eax), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 52(%esp), %eax - movl (%eax), %eax - mulxl %eax, %ecx, %esi - movl %edi, %edx - mulxl %eax, %edx, %edi - movl %edx, (%esp) # 4-byte Spill - addl %ecx, %edi - adcl $0, %esi - movl 56(%esp), %eax - movl -4(%eax), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - imull %ecx, %edx - movl (%eax), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 4(%eax), %eax - movl %eax, 20(%esp) # 4-byte Spill - mulxl %eax, %ebp, %ecx - mulxl %ebx, %edx, %eax - addl %ebp, %eax - adcl $0, %ecx - addl (%esp), %edx # 4-byte Folded Reload - adcl %edi, %eax - adcl %esi, %ecx - movl 52(%esp), %edx - movl 4(%edx), %edx - sbbl %ebx, %ebx - andl $1, %ebx - mulxl 4(%esp), %esi, %ebp # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - mulxl 8(%esp), %edi, %esi # 4-byte Folded Reload - addl 4(%esp), %esi # 4-byte Folded Reload - adcl $0, %ebp - addl %eax, %edi - adcl %ecx, %esi - adcl %ebx, %ebp - sbbl %ecx, %ecx - movl 12(%esp), %edx # 4-byte Reload - imull %edi, %edx - movl %edx, %eax - mulxl 16(%esp), %ebx, %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, %edx - mulxl 20(%esp), %edx, %eax # 4-byte Folded Reload - addl 12(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - andl $1, %ecx - addl %edi, %ebx - adcl %esi, %edx - adcl %ebp, %eax - adcl $0, %ecx - movl %edx, %ebp - subl 16(%esp), %ebp # 4-byte Folded Reload - movl %eax, %esi - sbbl 20(%esp), %esi # 4-byte Folded Reload - sbbl $0, %ecx - andl $1, %ecx - jne .LBB23_2 -# BB#1: - movl %ebp, %edx -.LBB23_2: - movl 44(%esp), %edi - movl %edx, (%edi) - testb %cl, %cl - jne .LBB23_4 -# BB#3: - movl %esi, %eax -.LBB23_4: - movl %eax, 4(%edi) - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end23: - .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2 - - .globl mcl_fp_montNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF2Lbmi2,@function -mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 44(%esp), %eax - movl (%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax - movl (%eax), %eax - mulxl %eax, %edi, %ebp - movl %ecx, %edx - mulxl %eax, %ecx, %esi - addl %edi, %esi - adcl $0, %ebp - movl 52(%esp), %eax - movl -4(%eax), %ebx - movl %ecx, %edx - imull %ebx, %edx - movl (%eax), %eax - movl %eax, 16(%esp) # 4-byte Spill - mulxl %eax, %edi, %eax - movl %eax, (%esp) # 4-byte Spill - addl %ecx, %edi - movl 52(%esp), %eax - movl 4(%eax), %eax - movl %eax, 12(%esp) # 4-byte Spill - mulxl %eax, %edi, %edx - adcl %esi, %edi - adcl $0, %ebp - addl (%esp), %edi # 4-byte Folded Reload - adcl %edx, %ebp - movl 48(%esp), %eax - movl 4(%eax), %edx - mulxl 4(%esp), %eax, %esi # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - mulxl 8(%esp), %eax, %ecx # 4-byte Folded Reload - addl 4(%esp), %ecx # 4-byte Folded Reload - adcl $0, %esi - addl %edi, %eax - adcl %ebp, %ecx - adcl $0, %esi - imull %eax, %ebx - movl %ebx, %edx - movl 16(%esp), %ebp # 4-byte Reload - mulxl %ebp, %edx, %edi - addl %eax, %edx - movl %ebx, %edx - movl 12(%esp), %ebx # 4-byte Reload - mulxl %ebx, %eax, %edx - adcl %ecx, %eax - adcl $0, %esi - addl %edi, %eax - adcl %edx, %esi - movl %eax, %edx - subl %ebp, %edx - movl %esi, %ecx - sbbl %ebx, %ecx - testl %ecx, %ecx - js .LBB24_2 -# BB#1: - movl %edx, %eax -.LBB24_2: - movl 40(%esp), %edx - movl %eax, (%edx) - js .LBB24_4 -# BB#3: - movl %ecx, %esi -.LBB24_4: - movl %esi, 4(%edx) - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end24: - .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2 - - .globl mcl_fp_montRed2Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed2Lbmi2,@function -mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 44(%esp), %esi - movl -4(%esi), %ecx - movl (%esi), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 40(%esp), %eax - movl (%eax), %ebx - movl %ebx, %edx - imull %ecx, %edx - movl 4(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill - mulxl %eax, %ebp, %esi - mulxl %edi, %edx, %eax - addl %ebp, %eax - adcl $0, %esi - addl %ebx, %edx - movl 40(%esp), %edi - movl 12(%edi), %edx - adcl 4(%edi), %eax - adcl 8(%edi), %esi - adcl $0, %edx - movl %edx, 4(%esp) # 4-byte Spill - sbbl %ebx, %ebx - imull %eax, %ecx - movl %ecx, %edx - mulxl 8(%esp), %edi, %edx # 4-byte Folded Reload - movl %edx, (%esp) # 4-byte Spill - movl %ecx, %edx - mulxl 12(%esp), %edx, %ebp # 4-byte Folded Reload - addl (%esp), %edx # 4-byte Folded Reload - adcl $0, %ebp - andl $1, %ebx - addl %eax, %edi - adcl %esi, %edx - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl $0, %ebx - movl %edx, %edi - subl 8(%esp), %edi # 4-byte Folded Reload - movl %ebp, %ecx - sbbl 12(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB25_2 -# BB#1: - movl %edi, %edx -.LBB25_2: - movl 36(%esp), %esi - movl %edx, (%esi) - testb %bl, %bl - jne .LBB25_4 -# BB#3: - movl %ecx, %ebp -.LBB25_4: - movl %ebp, 4(%esi) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end25: - .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2 - - .globl mcl_fp_addPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre2Lbmi2,@function -mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2 -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 12(%esp), %edx - addl (%edx), %ecx - movl 8(%esp), %esi - adcl 4(%edx), %eax - movl %ecx, (%esi) - movl %eax, 4(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end26: - .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2 - - .globl mcl_fp_subPre2Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre2Lbmi2,@function -mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - xorl %eax, %eax - movl 16(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %edx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl %edx, 4(%esi) - sbbl $0, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end27: - .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2 - - .globl mcl_fp_shr1_2Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_2Lbmi2,@function -mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2 -# BB#0: - movl 8(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - shrdl $1, %eax, %ecx - movl 4(%esp), %edx - movl %ecx, (%edx) - shrl %eax - movl %eax, 4(%edx) - retl -.Lfunc_end28: - .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2 - - .globl mcl_fp_add2Lbmi2 - .align 16, 0x90 - .type mcl_fp_add2Lbmi2,@function -mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2 -# BB#0: - pushl %ebx - pushl %esi - movl 20(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %ecx - movl 16(%esp), %esi - addl (%esi), %eax - movl 12(%esp), %edx - adcl 4(%esi), %ecx - movl %eax, (%edx) - movl %ecx, 4(%edx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %ecx - sbbl $0, %ebx - testb $1, %bl - jne .LBB29_2 -# BB#1: # %nocarry - movl %eax, (%edx) - movl %ecx, 4(%edx) -.LBB29_2: # %carry - popl %esi - popl %ebx - retl -.Lfunc_end29: - .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2 - - .globl mcl_fp_addNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF2Lbmi2,@function -mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 16(%esp), %edx - addl (%edx), %ecx - adcl 4(%edx), %eax - movl 24(%esp), %edi - movl %ecx, %esi - subl (%edi), %esi - movl %eax, %edx - sbbl 4(%edi), %edx - testl %edx, %edx - js .LBB30_2 -# BB#1: - movl %esi, %ecx -.LBB30_2: - movl 12(%esp), %esi - movl %ecx, (%esi) - js .LBB30_4 -# BB#3: - movl %edx, %eax -.LBB30_4: - movl %eax, 4(%esi) - popl %esi - popl %edi - retl -.Lfunc_end30: - .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2 - - .globl mcl_fp_sub2Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub2Lbmi2,@function -mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - xorl %ebx, %ebx - movl 24(%esp), %edx - subl (%edx), %ecx - sbbl 4(%edx), %eax - movl 16(%esp), %edx - movl %ecx, (%edx) - movl %eax, 4(%edx) - sbbl $0, %ebx - testb $1, %bl - je .LBB31_2 -# BB#1: # %carry - movl 28(%esp), %esi - movl 4(%esi), %edi - addl (%esi), %ecx - movl %ecx, (%edx) - adcl %eax, %edi - movl %edi, 4(%edx) -.LBB31_2: # %nocarry - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end31: - .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2 - - .globl mcl_fp_subNF2Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF2Lbmi2,@function -mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 20(%esp), %edx - subl (%edx), %ecx - sbbl 4(%edx), %eax - movl %eax, %edx - sarl $31, %edx - movl 24(%esp), %esi - movl 4(%esi), %edi - andl %edx, %edi - andl (%esi), %edx - addl %ecx, %edx - movl 12(%esp), %ecx - movl %edx, (%ecx) - adcl %eax, %edi - movl %edi, 4(%ecx) - popl %esi - popl %edi - retl -.Lfunc_end32: - .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2 - - .globl mcl_fpDbl_add2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add2Lbmi2,@function -mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %edx - movl 12(%edx), %esi - movl 24(%esp), %edi - movl 12(%edi), %eax - movl 8(%edx), %ecx - movl (%edx), %ebx - movl 4(%edx), %ebp - addl (%edi), %ebx - adcl 4(%edi), %ebp - movl 20(%esp), %edx - adcl 8(%edi), %ecx - movl %ebx, (%edx) - movl %ebp, 4(%edx) - adcl %esi, %eax - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - movl %ecx, %esi - subl (%ebp), %esi - movl %eax, %edi - sbbl 4(%ebp), %edi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB33_2 -# BB#1: - movl %edi, %eax -.LBB33_2: - testb %bl, %bl - jne .LBB33_4 -# BB#3: - movl %esi, %ecx -.LBB33_4: - movl %ecx, 8(%edx) - movl %eax, 12(%edx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end33: - .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2 - - .globl mcl_fpDbl_sub2Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub2Lbmi2,@function -mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %ebx, %ebx - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %eax - sbbl 8(%edx), %eax - movl 12(%edx), %ebp - movl 12(%ecx), %edx - movl 20(%esp), %ecx - movl %esi, (%ecx) - movl %edi, 4(%ecx) - sbbl %ebp, %edx - movl 32(%esp), %edi - movl (%edi), %esi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB34_1 -# BB#2: - xorl %edi, %edi - jmp .LBB34_3 -.LBB34_1: - movl 4(%edi), %edi -.LBB34_3: - testb %bl, %bl - jne .LBB34_5 -# BB#4: - xorl %esi, %esi -.LBB34_5: - addl %eax, %esi - movl %esi, 8(%ecx) - adcl %edx, %edi - movl %edi, 12(%ecx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end34: - .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2 - - .globl mcl_fp_mulUnitPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre3Lbmi2,@function -mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edx - movl 20(%esp), %eax - mulxl 4(%eax), %esi, %ecx - mulxl (%eax), %edi, %ebx - addl %esi, %ebx - mulxl 8(%eax), %eax, %edx - movl 16(%esp), %esi - movl %edi, (%esi) - movl %ebx, 4(%esi) - adcl %ecx, %eax - movl %eax, 8(%esi) - adcl $0, %edx - movl %edx, 12(%esi) - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end35: - .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2 - - .globl mcl_fpDbl_mulPre3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre3Lbmi2,@function -mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 44(%esp), %esi - movl (%esi), %edi - mulxl %edi, %ebx, %ebp - movl %eax, %edx - movl %eax, %esi - mulxl %edi, %edx, %eax - movl %edx, 4(%esp) # 4-byte Spill - addl %ebx, %eax - movl 8(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - mulxl %edi, %ebx, %edi - adcl %ebp, %ebx - movl 36(%esp), %ecx - movl 4(%esp), %edx # 4-byte Reload - movl %edx, (%ecx) - adcl $0, %edi - movl 44(%esp), %ecx - movl 4(%ecx), %ebp - movl %esi, %edx - mulxl %ebp, %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - addl %eax, %ecx - movl 12(%esp), %edx # 4-byte Reload - mulxl %ebp, %eax, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebx, %eax - movl 8(%esp), %edx # 4-byte Reload - mulxl %ebp, %ebx, %edx - adcl %edi, %ebx - sbbl %edi, %edi - andl $1, %edi - addl 4(%esp), %eax # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl %edx, %edi - movl 36(%esp), %edx - movl %ecx, 4(%edx) - movl 44(%esp), %ecx - movl 8(%ecx), %ecx - movl %esi, %edx - mulxl %ecx, %ebp, %edx - movl %edx, 4(%esp) # 4-byte Spill - addl %eax, %ebp - movl 12(%esp), %edx # 4-byte Reload - mulxl %ecx, %eax, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %ebx, %eax - movl 8(%esp), %edx # 4-byte Reload - mulxl %ecx, %edx, %ecx - adcl %edi, %edx - sbbl %esi, %esi - andl $1, %esi - addl 4(%esp), %eax # 4-byte Folded Reload - adcl 12(%esp), %edx # 4-byte Folded Reload - movl 36(%esp), %edi - movl %ebp, 8(%edi) - movl %eax, 12(%edi) - movl %edx, 16(%edi) - adcl %ecx, %esi - movl %esi, 20(%edi) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end36: - .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2 - - .globl mcl_fpDbl_sqrPre3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre3Lbmi2,@function -mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 44(%esp), %edx - movl 8(%edx), %edi - movl %edi, (%esp) # 4-byte Spill - movl (%edx), %ecx - movl 4(%edx), %esi - movl 40(%esp), %eax - movl %ecx, %edx - mulxl %ecx, %edx, %ebx - movl %edx, (%eax) - movl %esi, %edx - mulxl %ecx, %ebp, %eax - movl %eax, 8(%esp) # 4-byte Spill - addl %ebp, %ebx - movl %edi, %edx - mulxl %ecx, %edx, %ecx - movl %edx, 12(%esp) # 4-byte Spill - movl %ecx, 16(%esp) # 4-byte Spill - movl %eax, %edi - adcl %edx, %edi - adcl $0, %ecx - addl %ebp, %ebx - movl %esi, %edx - mulxl %esi, %ebp, %eax - movl %eax, 4(%esp) # 4-byte Spill - adcl %edi, %ebp - movl (%esp), %eax # 4-byte Reload - movl %eax, %edx - mulxl %esi, %edx, %esi - adcl %edx, %ecx - sbbl %edi, %edi - andl $1, %edi - addl 8(%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl %esi, %edi - addl 12(%esp), %ebp # 4-byte Folded Reload - adcl %edx, %ecx - movl %eax, %edx - mulxl %eax, %edx, %eax - adcl %edi, %edx - sbbl %edi, %edi - andl $1, %edi - addl 16(%esp), %ecx # 4-byte Folded Reload - adcl %esi, %edx - movl 40(%esp), %esi - movl %ebx, 4(%esi) - movl %ebp, 8(%esi) - movl %ecx, 12(%esi) - movl %edx, 16(%esi) - adcl %eax, %edi - movl %edi, 20(%esi) - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2 - - .globl mcl_fp_mont3Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont3Lbmi2,@function -mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 68(%esp), %eax - movl 8(%eax), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 72(%esp), %ecx - movl (%ecx), %ecx - mulxl %ecx, %edx, %edi - movl %edx, 40(%esp) # 4-byte Spill - movl (%eax), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 8(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ebp - movl %esi, %edx - mulxl %ecx, %edx, %ebx - movl %edx, 4(%esp) # 4-byte Spill - addl %eax, %ebx - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 76(%esp), %esi - movl -4(%esi), %eax - movl %eax, 20(%esp) # 4-byte Spill - imull %eax, %edx - movl (%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 4(%esi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %edi - movl %edi, (%esp) # 4-byte Spill - mulxl %eax, %ebp, %edi - addl %ecx, %edi - movl 8(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - mulxl %eax, %ecx, %esi - adcl (%esp), %ecx # 4-byte Folded Reload - adcl $0, %esi - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %edi - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl 4(%eax), %edx - mulxl 16(%esp), %ebx, %eax # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - mulxl 8(%esp), %ebx, %eax # 4-byte Folded Reload - movl %ebx, (%esp) # 4-byte Spill - mulxl 12(%esp), %ebx, %ebp # 4-byte Folded Reload - addl (%esp), %ebp # 4-byte Folded Reload - movl %eax, %edx - adcl 4(%esp), %edx # 4-byte Folded Reload - movl 28(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edi, %ebx - adcl %ecx, %ebp - adcl %esi, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - movl %eax, %edx - imull 20(%esp), %edx # 4-byte Folded Reload - mulxl 40(%esp), %ecx, %esi # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - mulxl 36(%esp), %esi, %ebx # 4-byte Folded Reload - addl %ecx, %ebx - mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload - adcl (%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - movl 24(%esp), %edx # 4-byte Reload - andl $1, %edx - addl %eax, %esi - adcl %ebp, %ebx - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 72(%esp), %edx - movl 8(%edx), %edx - mulxl 16(%esp), %esi, %eax # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - mulxl 8(%esp), %eax, %ebp # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - mulxl 12(%esp), %eax, %esi # 4-byte Folded Reload - addl 8(%esp), %esi # 4-byte Folded Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebx, %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl %ecx, %esi - adcl %edi, %ebp - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - sbbl %ebx, %ebx - movl 20(%esp), %edx # 4-byte Reload - imull %eax, %edx - mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - movl %edx, %eax - mulxl 40(%esp), %edi, %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - addl 20(%esp), %edi # 4-byte Folded Reload - movl %eax, %edx - mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - andl $1, %ebx - addl 16(%esp), %ecx # 4-byte Folded Reload - adcl %esi, %edi - adcl %ebp, %edx - adcl 28(%esp), %eax # 4-byte Folded Reload - adcl $0, %ebx - movl %edi, %ebp - subl 36(%esp), %ebp # 4-byte Folded Reload - movl %edx, %esi - sbbl 40(%esp), %esi # 4-byte Folded Reload - movl %eax, %ecx - sbbl 32(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB38_2 -# BB#1: - movl %ebp, %edi -.LBB38_2: - movl 64(%esp), %ebp - movl %edi, (%ebp) - testb %bl, %bl - jne .LBB38_4 -# BB#3: - movl %esi, %edx -.LBB38_4: - movl %edx, 4(%ebp) - jne .LBB38_6 -# BB#5: - movl %ecx, %eax -.LBB38_6: - movl %eax, 8(%ebp) - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end38: - .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2 - - .globl mcl_fp_montNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF3Lbmi2,@function -mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 60(%esp), %eax - movl (%eax), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 64(%esp), %ecx - movl (%ecx), %ecx - mulxl %ecx, %esi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edi, %edx - mulxl %ecx, %edi, %ebp - addl %esi, %ebp - movl 8(%eax), %edx - movl %edx, 8(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ebx - adcl 32(%esp), %eax # 4-byte Folded Reload - adcl $0, %ebx - movl 68(%esp), %esi - movl -4(%esi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl %edi, %edx - imull %ecx, %edx - movl (%esi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - mulxl %ecx, %esi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - addl %edi, %esi - movl 68(%esp), %esi - movl 4(%esi), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - mulxl %ecx, %edi, %ecx - adcl %ebp, %edi - movl 8(%esi), %esi - movl %esi, 24(%esp) # 4-byte Spill - mulxl %esi, %ebp, %edx - adcl %eax, %ebp - adcl $0, %ebx - addl 4(%esp), %edi # 4-byte Folded Reload - adcl %ecx, %ebp - adcl %edx, %ebx - movl 64(%esp), %eax - movl 4(%eax), %edx - mulxl 12(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - mulxl 16(%esp), %esi, %ecx # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - addl %eax, %ecx - mulxl 8(%esp), %esi, %eax # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - adcl $0, %eax - movl 4(%esp), %edx # 4-byte Reload - addl %edi, %edx - adcl %ebp, %ecx - adcl %ebx, %esi - adcl $0, %eax - movl %edx, %ebp - imull 20(%esp), %edx # 4-byte Folded Reload - mulxl 32(%esp), %ebx, %edi # 4-byte Folded Reload - addl %ebp, %ebx - mulxl 28(%esp), %ebp, %ebx # 4-byte Folded Reload - adcl %ecx, %ebp - mulxl 24(%esp), %ecx, %edx # 4-byte Folded Reload - adcl %esi, %ecx - adcl $0, %eax - addl %edi, %ebp - adcl %ebx, %ecx - adcl %edx, %eax - movl 64(%esp), %edx - movl 8(%edx), %edx - mulxl 12(%esp), %esi, %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - mulxl 16(%esp), %ebx, %edi # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - addl %esi, %edi - mulxl 8(%esp), %ebx, %esi # 4-byte Folded Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl $0, %esi - addl %ebp, 16(%esp) # 4-byte Folded Spill - adcl %ecx, %edi - adcl %eax, %ebx - adcl $0, %esi - movl 20(%esp), %edx # 4-byte Reload - movl 16(%esp), %ecx # 4-byte Reload - imull %ecx, %edx - mulxl 32(%esp), %eax, %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - addl %ecx, %eax - movl %edx, %eax - mulxl 28(%esp), %ecx, %ebp # 4-byte Folded Reload - adcl %edi, %ecx - mulxl 24(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebx, %eax - adcl $0, %esi - addl 20(%esp), %ecx # 4-byte Folded Reload - adcl %ebp, %eax - adcl %edx, %esi - movl %ecx, %ebp - subl 32(%esp), %ebp # 4-byte Folded Reload - movl %eax, %edi - sbbl 28(%esp), %edi # 4-byte Folded Reload - movl %esi, %edx - sbbl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, %ebx - sarl $31, %ebx - testl %ebx, %ebx - js .LBB39_2 -# BB#1: - movl %ebp, %ecx -.LBB39_2: - movl 56(%esp), %ebx - movl %ecx, (%ebx) - js .LBB39_4 -# BB#3: - movl %edi, %eax -.LBB39_4: - movl %eax, 4(%ebx) - js .LBB39_6 -# BB#5: - movl %edx, %esi -.LBB39_6: - movl %esi, 8(%ebx) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end39: - .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2 - - .globl mcl_fp_montRed3Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed3Lbmi2,@function -mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %ecx - movl -4(%ecx), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl (%ecx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl (%eax), %ebx - movl %ebx, %edx - imull %edi, %edx - movl 8(%ecx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 4(%ecx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - mulxl %edi, %edi, %eax - movl %edi, 16(%esp) # 4-byte Spill - mulxl %ecx, %ebp, %edi - mulxl %esi, %edx, %ecx - addl %ebp, %ecx - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl $0, %eax - addl %ebx, %edx - movl 60(%esp), %edx - adcl 4(%edx), %ecx - adcl 8(%edx), %edi - adcl 12(%edx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl 16(%edx), %edx - adcl $0, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl %ebx, %ebx - andl $1, %ebx - movl %ecx, %edx - imull 20(%esp), %edx # 4-byte Folded Reload - mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - mulxl 24(%esp), %ebp, %eax # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - addl %esi, %eax - mulxl 32(%esp), %esi, %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - adcl $0, %ebp - addl %ecx, 4(%esp) # 4-byte Folded Spill - adcl %edi, %eax - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl $0, 16(%esp) # 4-byte Folded Spill - adcl $0, %ebx - movl 20(%esp), %edx # 4-byte Reload - imull %eax, %edx - mulxl 24(%esp), %ecx, %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl %ecx, 20(%esp) # 4-byte Spill - movl %edx, %ecx - mulxl 28(%esp), %edi, %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - addl 8(%esp), %edi # 4-byte Folded Reload - movl %ecx, %edx - mulxl 32(%esp), %ecx, %edx # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edx - addl %eax, 20(%esp) # 4-byte Folded Spill - adcl %esi, %edi - adcl %ebp, %ecx - adcl 16(%esp), %edx # 4-byte Folded Reload - adcl $0, %ebx - movl %edi, %ebp - subl 24(%esp), %ebp # 4-byte Folded Reload - movl %ecx, %esi - sbbl 28(%esp), %esi # 4-byte Folded Reload - movl %edx, %eax - sbbl 32(%esp), %eax # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB40_2 -# BB#1: - movl %ebp, %edi -.LBB40_2: - movl 56(%esp), %ebp - movl %edi, (%ebp) - testb %bl, %bl - jne .LBB40_4 -# BB#3: - movl %esi, %ecx -.LBB40_4: - movl %ecx, 4(%ebp) - jne .LBB40_6 -# BB#5: - movl %eax, %edx -.LBB40_6: - movl %edx, 8(%ebp) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end40: - .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2 - - .globl mcl_fp_addPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre3Lbmi2,@function -mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2 -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 12(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 8(%esp), %esi - movl %ecx, (%esi) - movl %edx, 4(%esi) - movl %eax, 8(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end41: - .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2 - - .globl mcl_fp_subPre3Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre3Lbmi2,@function -mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 20(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl 12(%esp), %edi - movl %edx, (%edi) - movl %esi, 4(%edi) - movl %ecx, 8(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end42: - .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2 - - .globl mcl_fp_shr1_3Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_3Lbmi2,@function -mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl 8(%eax), %ecx - movl (%eax), %edx - movl 4(%eax), %eax - shrdl $1, %eax, %edx - movl 8(%esp), %esi - movl %edx, (%esi) - shrdl $1, %ecx, %eax - movl %eax, 4(%esi) - shrl %ecx - movl %ecx, 8(%esi) - popl %esi - retl -.Lfunc_end43: - .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2 - - .globl mcl_fp_add3Lbmi2 - .align 16, 0x90 - .type mcl_fp_add3Lbmi2,@function -mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 20(%esp), %esi - addl (%esi), %eax - adcl 4(%esi), %ecx - movl 8(%edx), %edx - adcl 8(%esi), %edx - movl 16(%esp), %esi - movl %eax, (%esi) - movl %ecx, 4(%esi) - movl %edx, 8(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 28(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %ecx - sbbl 8(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB44_2 -# BB#1: # %nocarry - movl %eax, (%esi) - movl %ecx, 4(%esi) - movl %edx, 8(%esi) -.LBB44_2: # %carry - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end44: - .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2 - - .globl mcl_fp_addNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF3Lbmi2,@function -mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 24(%esp), %esi - addl (%esi), %edx - adcl 4(%esi), %ecx - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 32(%esp), %ebp - movl %edx, %ebx - subl (%ebp), %ebx - movl %ecx, %edi - sbbl 4(%ebp), %edi - movl %eax, %esi - sbbl 8(%ebp), %esi - movl %esi, %ebp - sarl $31, %ebp - testl %ebp, %ebp - js .LBB45_2 -# BB#1: - movl %ebx, %edx -.LBB45_2: - movl 20(%esp), %ebx - movl %edx, (%ebx) - js .LBB45_4 -# BB#3: - movl %edi, %ecx -.LBB45_4: - movl %ecx, 4(%ebx) - js .LBB45_6 -# BB#5: - movl %esi, %eax -.LBB45_6: - movl %eax, 8(%ebx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end45: - .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2 - - .globl mcl_fp_sub3Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub3Lbmi2,@function -mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edx - movl (%edx), %ecx - movl 4(%edx), %eax - xorl %ebx, %ebx - movl 28(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %eax - movl 8(%edx), %edx - sbbl 8(%esi), %edx - movl 20(%esp), %esi - movl %ecx, (%esi) - movl %eax, 4(%esi) - movl %edx, 8(%esi) - sbbl $0, %ebx - testb $1, %bl - je .LBB46_2 -# BB#1: # %carry - movl 32(%esp), %edi - movl 4(%edi), %ebx - movl 8(%edi), %ebp - addl (%edi), %ecx - movl %ecx, (%esi) - adcl %eax, %ebx - movl %ebx, 4(%esi) - adcl %edx, %ebp - movl %ebp, 8(%esi) -.LBB46_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end46: - .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2 - - .globl mcl_fp_subNF3Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF3Lbmi2,@function -mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 28(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %edx - movl 8(%eax), %eax - sbbl 8(%esi), %eax - movl %eax, %esi - sarl $31, %esi - movl %esi, %edi - shldl $1, %eax, %edi - movl 32(%esp), %ebx - andl (%ebx), %edi - movl 8(%ebx), %ebp - andl %esi, %ebp - andl 4(%ebx), %esi - addl %ecx, %edi - adcl %edx, %esi - movl 20(%esp), %ecx - movl %edi, (%ecx) - movl %esi, 4(%ecx) - adcl %eax, %ebp - movl %ebp, 8(%ecx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end47: - .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2 - - .globl mcl_fpDbl_add3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add3Lbmi2,@function -mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - pushl %eax - movl 32(%esp), %esi - movl 20(%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 16(%esi), %edi - movl 12(%esi), %ebx - movl (%esi), %edx - movl 28(%esp), %eax - addl (%eax), %edx - movl 24(%esp), %ecx - movl %edx, (%ecx) - movl 8(%esi), %edx - movl 4(%esi), %esi - adcl 4(%eax), %esi - adcl 8(%eax), %edx - movl %esi, 4(%ecx) - movl 20(%eax), %ebp - movl %edx, 8(%ecx) - movl 12(%eax), %esi - movl 16(%eax), %edx - adcl %ebx, %esi - adcl %edi, %edx - adcl (%esp), %ebp # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - movl 36(%esp), %ecx - movl %esi, %ebx - subl (%ecx), %ebx - movl %edx, %edi - sbbl 4(%ecx), %edi - movl %edi, (%esp) # 4-byte Spill - movl %ebp, %ecx - movl 36(%esp), %edi - sbbl 8(%edi), %ecx - sbbl $0, %eax - andl $1, %eax - jne .LBB48_2 -# BB#1: - movl %ecx, %ebp -.LBB48_2: - testb %al, %al - jne .LBB48_4 -# BB#3: - movl %ebx, %esi -.LBB48_4: - movl 24(%esp), %eax - movl %esi, 12(%eax) - jne .LBB48_6 -# BB#5: - movl (%esp), %edx # 4-byte Reload -.LBB48_6: - movl %edx, 16(%eax) - movl %ebp, 20(%eax) - addl $4, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end48: - .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2 - - .globl mcl_fpDbl_sub3Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub3Lbmi2,@function -mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - movl 28(%esp), %ebx - subl (%ebx), %edx - sbbl 4(%ebx), %esi - movl 8(%ecx), %ebp - sbbl 8(%ebx), %ebp - movl 20(%esp), %eax - movl %edx, (%eax) - movl 12(%ecx), %edi - sbbl 12(%ebx), %edi - movl %esi, 4(%eax) - movl 16(%ecx), %esi - sbbl 16(%ebx), %esi - movl 20(%ebx), %ebx - movl 20(%ecx), %edx - movl %ebp, 8(%eax) - sbbl %ebx, %edx - movl $0, %ecx - sbbl $0, %ecx - andl $1, %ecx - movl 32(%esp), %ebp - jne .LBB49_1 -# BB#2: - xorl %ebx, %ebx - jmp .LBB49_3 -.LBB49_1: - movl 8(%ebp), %ebx -.LBB49_3: - testb %cl, %cl - movl $0, %eax - jne .LBB49_4 -# BB#5: - xorl %ecx, %ecx - jmp .LBB49_6 -.LBB49_4: - movl (%ebp), %ecx - movl 4(%ebp), %eax -.LBB49_6: - addl %edi, %ecx - adcl %esi, %eax - movl 20(%esp), %esi - movl %ecx, 12(%esi) - movl %eax, 16(%esi) - adcl %edx, %ebx - movl %ebx, 20(%esi) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end49: - .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2 - - .globl mcl_fp_mulUnitPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre4Lbmi2,@function -mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %edx - movl 24(%esp), %eax - mulxl 4(%eax), %esi, %ecx - mulxl (%eax), %edi, %ebx - addl %esi, %ebx - mulxl 8(%eax), %ebp, %esi - adcl %ecx, %ebp - mulxl 12(%eax), %eax, %ecx - movl 20(%esp), %edx - movl %edi, (%edx) - movl %ebx, 4(%edx) - movl %ebp, 8(%edx) - adcl %esi, %eax - movl %eax, 12(%edx) - adcl $0, %ecx - movl %ecx, 16(%edx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end50: - .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2 - - .globl mcl_fpDbl_mulPre4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre4Lbmi2,@function -mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %eax - movl (%eax), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 56(%esp), %ecx - movl (%ecx), %ebp - mulxl %ebp, %esi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebx, %edx - movl %ebx, %ecx - mulxl %ebp, %edx, %ebx - movl %edx, 8(%esp) # 4-byte Spill - addl %esi, %ebx - movl 8(%eax), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %esi - mulxl %ebp, %eax, %edi - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 12(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill - mulxl %ebp, %ebp, %esi - adcl %edi, %ebp - movl 48(%esp), %edx - movl 8(%esp), %edi # 4-byte Reload - movl %edi, (%edx) - adcl $0, %esi - movl 56(%esp), %edx - movl 4(%edx), %edi - movl %ecx, %edx - mulxl %edi, %ecx, %edx - movl %edx, 8(%esp) # 4-byte Spill - addl %ebx, %ecx - movl 24(%esp), %edx # 4-byte Reload - mulxl %edi, %ebx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %eax, %ebx - movl 20(%esp), %edx # 4-byte Reload - mulxl %edi, %eax, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebp, %eax - movl 16(%esp), %edx # 4-byte Reload - mulxl %edi, %edi, %edx - adcl %esi, %edi - sbbl %ebp, %ebp - andl $1, %ebp - addl 8(%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - adcl (%esp), %edi # 4-byte Folded Reload - adcl %edx, %ebp - movl 48(%esp), %edx - movl %ecx, 4(%edx) - movl 56(%esp), %ecx - movl 8(%ecx), %ecx - movl 12(%esp), %edx # 4-byte Reload - mulxl %ecx, %edx, %esi - movl %esi, 8(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - mulxl %ecx, %ebx, %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl %eax, %ebx - movl 20(%esp), %edx # 4-byte Reload - mulxl %ecx, %esi, %eax - movl %eax, 20(%esp) # 4-byte Spill - adcl %edi, %esi - movl 16(%esp), %edx # 4-byte Reload - mulxl %ecx, %edi, %eax - adcl %ebp, %edi - sbbl %ebp, %ebp - andl $1, %ebp - addl 8(%esp), %ebx # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl %eax, %ebp - movl 48(%esp), %eax - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 56(%esp), %eax - movl 12(%eax), %edx - movl 52(%esp), %eax - mulxl (%eax), %ecx, %eax - movl %eax, 20(%esp) # 4-byte Spill - addl %ebx, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 52(%esp), %ebx - mulxl 4(%ebx), %ecx, %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl %esi, %ecx - mulxl 8(%ebx), %eax, %esi - adcl %edi, %eax - mulxl 12(%ebx), %edi, %edx - adcl %ebp, %edi - sbbl %ebp, %ebp - andl $1, %ebp - addl 20(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - adcl %esi, %edi - movl 48(%esp), %esi - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%esi) - movl %ecx, 16(%esi) - movl %eax, 20(%esi) - movl %edi, 24(%esi) - adcl %edx, %ebp - movl %ebp, 28(%esi) - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end51: - .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2 - - .globl mcl_fpDbl_sqrPre4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre4Lbmi2,@function -mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 60(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 56(%esp), %ebx - movl %esi, %edx - mulxl %esi, %eax, %ebp - movl %eax, (%ebx) - movl %edi, %edx - mulxl %esi, %edx, %ecx - movl %edx, 28(%esp) # 4-byte Spill - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebp, %eax - addl %edx, %eax - movl 60(%esp), %edx - movl 8(%edx), %edx - movl %edx, (%esp) # 4-byte Spill - mulxl %esi, %edx, %ebx - movl %edx, 20(%esp) # 4-byte Spill - movl %ebx, 24(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl %edx, %ebp - movl 60(%esp), %ecx - movl 12(%ecx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %esi, %esi, %ecx - adcl %ebx, %esi - adcl $0, %ecx - addl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %edx - mulxl %edi, %ebx, %eax - movl %eax, 8(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 32(%esp), %edx # 4-byte Reload - mulxl %edi, %ebp, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl (%esp), %edx # 4-byte Reload - mulxl %edi, %edi, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl %edi, %esi - adcl %ecx, %ebp - sbbl %ecx, %ecx - andl $1, %ecx - addl 16(%esp), %ebx # 4-byte Folded Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - adcl %eax, %ebp - adcl 4(%esp), %ecx # 4-byte Folded Reload - addl 20(%esp), %ebx # 4-byte Folded Reload - adcl %edi, %esi - mulxl %edx, %edi, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %edx, %eax - adcl %ebp, %edi - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %ebp, %edx - adcl %ecx, %ebp - sbbl %eax, %eax - andl $1, %eax - addl 24(%esp), %esi # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl %ebx, 8(%eax) - movl 60(%esp), %eax - movl 12(%eax), %edx - mulxl (%eax), %ebx, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - addl %esi, %ebx - mulxl 4(%eax), %esi, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl %edi, %esi - mulxl 8(%eax), %ecx, %edi - adcl %ebp, %ecx - mulxl %edx, %ebp, %edx - adcl 24(%esp), %ebp # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 32(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - adcl %edi, %ebp - movl 56(%esp), %edi - movl %ebx, 12(%edi) - movl %esi, 16(%edi) - movl %ecx, 20(%edi) - movl %ebp, 24(%edi) - adcl %edx, %eax - movl %eax, 28(%edi) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2 - - .globl mcl_fp_mont4Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont4Lbmi2,@function -mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %eax - movl 12(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 92(%esp), %ecx - movl (%ecx), %ecx - movl 8(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl (%eax), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 4(%eax), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ebp - movl %eax, 60(%esp) # 4-byte Spill - movl %esi, %edx - mulxl %ecx, %edx, %eax - movl %edx, 56(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl %ecx, %ebx, %esi - movl %edi, %edx - mulxl %ecx, %edx, %ecx - movl %edx, 8(%esp) # 4-byte Spill - addl %ebx, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 96(%esp), %ebx - movl -4(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - imull %eax, %edx - movl (%ebx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 4(%ebx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - mulxl %ecx, %esi, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - addl %esi, %eax - movl %eax, %ebp - movl 8(%ebx), %eax - movl %eax, 52(%esp) # 4-byte Spill - mulxl %eax, %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl 12(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - mulxl %eax, %esi, %ebx - adcl 4(%esp), %esi # 4-byte Folded Reload - adcl $0, %ebx - addl 8(%esp), %ecx # 4-byte Folded Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 4(%eax), %edx - mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - mulxl 32(%esp), %ecx, %ebp # 4-byte Folded Reload - addl (%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - movl 24(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl %edi, %ebp - adcl %esi, 12(%esp) # 4-byte Folded Spill - adcl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %edx - imull 44(%esp), %edx # 4-byte Folded Reload - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 56(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - addl %ecx, %esi - mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %eax, %ebx - mulxl 48(%esp), %edi, %eax # 4-byte Folded Reload - adcl %ecx, %edi - adcl $0, %eax - movl 16(%esp), %ecx # 4-byte Reload - andl $1, %ecx - movl 4(%esp), %edx # 4-byte Reload - addl 8(%esp), %edx # 4-byte Folded Reload - adcl %ebp, %esi - movl %esi, 8(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 92(%esp), %edx - movl 8(%edx), %edx - mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload - mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload - addl %esi, %ecx - movl %ecx, %esi - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - movl 20(%esp), %eax # 4-byte Reload - adcl 12(%esp), %eax # 4-byte Folded Reload - movl 24(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 8(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - movl %esi, 12(%esp) # 4-byte Spill - adcl %edi, %ecx - movl %ecx, %edi - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %edx - imull 44(%esp), %edx # 4-byte Folded Reload - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - addl %ecx, %ebx - mulxl 52(%esp), %ecx, %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl %eax, %ecx - mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - adcl $0, %esi - movl 16(%esp), %edx # 4-byte Reload - andl $1, %edx - addl %ebp, 8(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl %edi, %ecx - adcl 20(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 92(%esp), %edx - movl 12(%edx), %edx - mulxl 28(%esp), %ebp, %edi # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - movl %edi, 24(%esp) # 4-byte Spill - mulxl 32(%esp), %edi, %ebp # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - addl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - mulxl 40(%esp), %ebp, %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - mulxl 36(%esp), %edi, %edx # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl %ebp, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 32(%esp), %ebp # 4-byte Reload - addl %ebx, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl %eax, %edi - adcl %esi, 36(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - sbbl %ecx, %ecx - movl 44(%esp), %edx # 4-byte Reload - imull %ebp, %edx - mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %ebx - mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload - addl %esi, %ebp - mulxl 52(%esp), %esi, %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl %eax, %esi - movl %ebx, %edx - mulxl 48(%esp), %edx, %eax # 4-byte Folded Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - andl $1, %ecx - movl 44(%esp), %ebx # 4-byte Reload - addl 32(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - adcl %edi, %esi - adcl 36(%esp), %edx # 4-byte Folded Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - adcl $0, %ecx - movl %ebp, %edi - subl 56(%esp), %edi # 4-byte Folded Reload - movl %esi, %ebx - sbbl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl %edx, %ebx - sbbl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl %eax, %ebx - sbbl 48(%esp), %ebx # 4-byte Folded Reload - sbbl $0, %ecx - andl $1, %ecx - jne .LBB53_2 -# BB#1: - movl %edi, %ebp -.LBB53_2: - movl 84(%esp), %edi - movl %ebp, (%edi) - testb %cl, %cl - jne .LBB53_4 -# BB#3: - movl 56(%esp), %esi # 4-byte Reload -.LBB53_4: - movl %esi, 4(%edi) - jne .LBB53_6 -# BB#5: - movl 60(%esp), %edx # 4-byte Reload -.LBB53_6: - movl %edx, 8(%edi) - jne .LBB53_8 -# BB#7: - movl %ebx, %eax -.LBB53_8: - movl %eax, 12(%edi) - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end53: - .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2 - - .globl mcl_fp_montNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF4Lbmi2,@function -mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 76(%esp), %esi - movl (%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 4(%esi), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 80(%esp), %ecx - movl (%ecx), %ecx - mulxl %ecx, %edi, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %edx - mulxl %ecx, %ebp, %eax - movl %ebp, 40(%esp) # 4-byte Spill - addl %edi, %eax - movl 8(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl 12(%esi), %edx - movl %edx, 20(%esp) # 4-byte Spill - mulxl %ecx, %esi, %edi - adcl 44(%esp), %esi # 4-byte Folded Reload - adcl $0, %edi - movl 84(%esp), %ecx - movl -4(%ecx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl %ebp, %edx - imull %ecx, %edx - movl 84(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %ebp - movl %ebp, 8(%esp) # 4-byte Spill - addl 40(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %ecx - movl 4(%ecx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl %eax, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 8(%eax), %eax - movl %eax, 36(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - adcl %ebx, %ecx - movl %ecx, %ebp - movl 84(%esp), %ecx - movl 12(%ecx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %edx - adcl %esi, %ebx - adcl $0, %edi - movl 8(%esp), %ecx # 4-byte Reload - addl %ecx, 12(%esp) # 4-byte Folded Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - adcl %eax, %ebx - adcl %edx, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl 4(%eax), %edx - mulxl 24(%esp), %esi, %edi # 4-byte Folded Reload - mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload - addl %esi, %eax - mulxl 16(%esp), %ebp, %esi # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - adcl %edi, %ebp - mulxl 20(%esp), %edi, %esi # 4-byte Folded Reload - adcl (%esp), %edi # 4-byte Folded Reload - adcl $0, %esi - addl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl 4(%esp), %eax # 4-byte Folded Reload - adcl %ebx, %ebp - adcl 8(%esp), %edi # 4-byte Folded Reload - adcl $0, %esi - movl %ecx, %edx - imull 48(%esp), %edx # 4-byte Folded Reload - mulxl 44(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - addl (%esp), %ebx # 4-byte Folded Reload - mulxl 40(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl %eax, %ebx - movl %ebx, %eax - mulxl 36(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %ebp, %ebx - movl %ebx, %ebp - mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload - adcl %edi, %ebx - adcl $0, %esi - addl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - adcl %ecx, %ebx - movl %ebx, 8(%esp) # 4-byte Spill - adcl %edx, %esi - movl 80(%esp), %ecx - movl 8(%ecx), %edx - mulxl 24(%esp), %ecx, %ebx # 4-byte Folded Reload - mulxl 28(%esp), %eax, %ebp # 4-byte Folded Reload - addl %ecx, %ebp - mulxl 16(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl %ebx, %edi - mulxl 20(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - addl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %edi # 4-byte Folded Reload - adcl %esi, %ebx - adcl $0, %ecx - movl %eax, %edx - imull 48(%esp), %edx # 4-byte Folded Reload - mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - addl (%esp), %eax # 4-byte Folded Reload - mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl %ebp, %eax - mulxl 36(%esp), %ebp, %esi # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - adcl %edi, %ebp - mulxl 32(%esp), %esi, %edx # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - addl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - adcl (%esp), %esi # 4-byte Folded Reload - adcl %edx, %ecx - movl 80(%esp), %eax - movl 12(%eax), %edx - mulxl 24(%esp), %ebx, %ebp # 4-byte Folded Reload - mulxl 28(%esp), %edi, %eax # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - addl %ebx, %eax - mulxl 16(%esp), %edi, %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl %ebp, %edi - mulxl 20(%esp), %ebp, %ebx # 4-byte Folded Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl $0, %ebx - movl 28(%esp), %edx # 4-byte Reload - addl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - adcl %esi, %edi - adcl %ecx, %ebp - adcl $0, %ebx - movl 48(%esp), %edx # 4-byte Reload - imull 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - mulxl 44(%esp), %ecx, %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - addl 28(%esp), %ecx # 4-byte Folded Reload - mulxl 40(%esp), %esi, %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl %eax, %esi - movl 48(%esp), %edx # 4-byte Reload - mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl %edi, %ecx - movl 48(%esp), %edx # 4-byte Reload - mulxl 32(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebp, %eax - adcl $0, %ebx - addl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - adcl %edx, %ebx - movl %esi, %edi - subl 44(%esp), %edi # 4-byte Folded Reload - movl %ecx, %ebp - sbbl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, %edx - sbbl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %ebx, %edx - sbbl 32(%esp), %edx # 4-byte Folded Reload - testl %edx, %edx - js .LBB54_2 -# BB#1: - movl %edi, %esi -.LBB54_2: - movl 72(%esp), %edi - movl %esi, (%edi) - js .LBB54_4 -# BB#3: - movl %ebp, %ecx -.LBB54_4: - movl %ecx, 4(%edi) - js .LBB54_6 -# BB#5: - movl 48(%esp), %eax # 4-byte Reload -.LBB54_6: - movl %eax, 8(%edi) - js .LBB54_8 -# BB#7: - movl %edx, %ebx -.LBB54_8: - movl %ebx, 12(%edi) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end54: - .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2 - - .globl mcl_fp_montRed4Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed4Lbmi2,@function -mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 80(%esp), %ecx - movl -4(%ecx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl (%ecx), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 76(%esp), %ebp - movl (%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill - imull %eax, %edx - movl 12(%ecx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 8(%ecx), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 4(%ecx), %eax - movl %eax, 32(%esp) # 4-byte Spill - mulxl %esi, %esi, %ecx - movl %esi, 16(%esp) # 4-byte Spill - movl %ecx, 24(%esp) # 4-byte Spill - mulxl %ebx, %esi, %ecx - movl %esi, 12(%esp) # 4-byte Spill - movl %ecx, 20(%esp) # 4-byte Spill - mulxl %eax, %ebx, %ecx - mulxl %edi, %edx, %esi - addl %ebx, %esi - movl %ecx, %edi - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 20(%esp), %ebx # 4-byte Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl 24(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 36(%esp), %edx # 4-byte Folded Reload - adcl 4(%ebp), %esi - adcl 8(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - adcl 12(%ebp), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - adcl 16(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 28(%ebp), %ecx - movl 24(%ebp), %edx - movl 20(%ebp), %edi - adcl $0, %edi - movl %edi, 8(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - sbbl %ebx, %ebx - andl $1, %ebx - movl %esi, %edx - imull 40(%esp), %edx # 4-byte Folded Reload - mulxl %eax, %ebp, %edi - mulxl 44(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - addl %ebp, %eax - mulxl 48(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl %edi, %ebp - movl 28(%esp), %ecx # 4-byte Reload - mulxl %ecx, %edi, %edx - adcl (%esp), %edi # 4-byte Folded Reload - adcl $0, %edx - addl %esi, 4(%esp) # 4-byte Folded Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl $0, 16(%esp) # 4-byte Folded Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, %ebx - movl %eax, %edx - imull 40(%esp), %edx # 4-byte Folded Reload - mulxl %ecx, %esi, %ecx - movl %esi, 20(%esp) # 4-byte Spill - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 32(%esp), %esi, %ecx # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - movl %ecx, 4(%esp) # 4-byte Spill - mulxl 44(%esp), %esi, %ecx # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - addl (%esp), %ecx # 4-byte Folded Reload - mulxl 48(%esp), %esi, %edx # 4-byte Folded Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %eax, 8(%esp) # 4-byte Folded Spill - adcl %ebp, %ecx - adcl %edi, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 20(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, %ebx - movl 40(%esp), %edx # 4-byte Reload - imull %ecx, %edx - mulxl 44(%esp), %esi, %eax # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - mulxl 32(%esp), %ebp, %esi # 4-byte Folded Reload - addl %eax, %ebp - movl %edx, %eax - mulxl 48(%esp), %edi, %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl %esi, %edi - movl %eax, %edx - mulxl 28(%esp), %edx, %esi # 4-byte Folded Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - adcl $0, %esi - addl %ecx, 40(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - adcl $0, %ebx - movl %ebp, %ecx - subl 44(%esp), %ecx # 4-byte Folded Reload - movl %edi, %eax - sbbl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %eax - sbbl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - sbbl 28(%esp), %eax # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB55_2 -# BB#1: - movl %ecx, %ebp -.LBB55_2: - movl 72(%esp), %ecx - movl %ebp, (%ecx) - testb %bl, %bl - jne .LBB55_4 -# BB#3: - movl 44(%esp), %edi # 4-byte Reload -.LBB55_4: - movl %edi, 4(%ecx) - jne .LBB55_6 -# BB#5: - movl 48(%esp), %edx # 4-byte Reload -.LBB55_6: - movl %edx, 8(%ecx) - jne .LBB55_8 -# BB#7: - movl %eax, %esi -.LBB55_8: - movl %esi, 12(%ecx) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end55: - .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2 - - .globl mcl_fp_addPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre4Lbmi2,@function -mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 12(%eax), %edi - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 12(%esi), %esi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl %edx, 4(%ebx) - movl %eax, 8(%ebx) - adcl %edi, %esi - movl %esi, 12(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end56: - .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2 - - .globl mcl_fp_subPre4Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre4Lbmi2,@function -mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 12(%edi), %edi - movl 12(%ecx), %ecx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl %esi, 4(%ebp) - movl %ebx, 8(%ebp) - sbbl %edi, %ecx - movl %ecx, 12(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end57: - .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2 - - .globl mcl_fp_shr1_4Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_4Lbmi2,@function -mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl 12(%eax), %ecx - movl 8(%eax), %edx - movl (%eax), %esi - movl 4(%eax), %eax - shrdl $1, %eax, %esi - movl 12(%esp), %edi - movl %esi, (%edi) - shrdl $1, %edx, %eax - movl %eax, 4(%edi) - shrdl $1, %ecx, %edx - movl %edx, 8(%edi) - shrl %ecx - movl %ecx, 12(%edi) - popl %esi - popl %edi - retl -.Lfunc_end58: - .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2 - - .globl mcl_fp_add4Lbmi2 - .align 16, 0x90 - .type mcl_fp_add4Lbmi2,@function -mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - movl 24(%esp), %esi - addl (%esi), %eax - adcl 4(%esi), %ecx - movl 8(%edi), %edx - adcl 8(%esi), %edx - movl 12(%esi), %esi - adcl 12(%edi), %esi - movl 20(%esp), %edi - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - sbbl 8(%ebp), %edx - sbbl 12(%ebp), %esi - sbbl $0, %ebx - testb $1, %bl - jne .LBB59_2 -# BB#1: # %nocarry - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) -.LBB59_2: # %carry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end59: - .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2 - - .globl mcl_fp_addNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF4Lbmi2,@function -mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 36(%esp), %edx - movl (%edx), %esi - movl 4(%edx), %ecx - movl 32(%esp), %edi - addl (%edi), %esi - adcl 4(%edi), %ecx - movl 12(%edx), %ebp - movl 8(%edx), %edx - adcl 8(%edi), %edx - adcl 12(%edi), %ebp - movl 40(%esp), %eax - movl %esi, %ebx - subl (%eax), %ebx - movl %ecx, %edi - sbbl 4(%eax), %edi - movl %edi, (%esp) # 4-byte Spill - movl %edx, %edi - movl 40(%esp), %eax - sbbl 8(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ebp, %edi - movl 40(%esp), %eax - sbbl 12(%eax), %edi - testl %edi, %edi - js .LBB60_2 -# BB#1: - movl %ebx, %esi -.LBB60_2: - movl 28(%esp), %ebx - movl %esi, (%ebx) - js .LBB60_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB60_4: - movl %ecx, 4(%ebx) - js .LBB60_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB60_6: - movl %edx, 8(%ebx) - js .LBB60_8 -# BB#7: - movl %edi, %ebp -.LBB60_8: - movl %ebp, 12(%ebx) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end60: - .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2 - - .globl mcl_fp_sub4Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub4Lbmi2,@function -mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 28(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %ecx - movl 8(%esi), %edx - sbbl 8(%edi), %edx - movl 12(%esi), %esi - sbbl 12(%edi), %esi - movl 20(%esp), %edi - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) - sbbl $0, %ebx - testb $1, %bl - je .LBB61_2 -# BB#1: # %carry - movl 32(%esp), %ebx - addl (%ebx), %eax - movl 8(%ebx), %ebp - adcl 4(%ebx), %ecx - movl 12(%ebx), %ebx - movl %eax, (%edi) - movl %ecx, 4(%edi) - adcl %edx, %ebp - movl %ebp, 8(%edi) - adcl %esi, %ebx - movl %ebx, 12(%edi) -.LBB61_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end61: - .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2 - - .globl mcl_fp_subNF4Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF4Lbmi2,@function -mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 32(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 36(%esp), %esi - subl (%esi), %edx - movl %edx, (%esp) # 4-byte Spill - sbbl 4(%esi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 12(%eax), %edi - movl 8(%eax), %edx - sbbl 8(%esi), %edx - sbbl 12(%esi), %edi - movl %edi, %esi - sarl $31, %esi - movl 40(%esp), %eax - movl 12(%eax), %ebp - andl %esi, %ebp - movl 8(%eax), %ecx - andl %esi, %ecx - movl 40(%esp), %eax - movl 4(%eax), %eax - andl %esi, %eax - movl 40(%esp), %ebx - andl (%ebx), %esi - addl (%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 28(%esp), %ebx - movl %esi, (%ebx) - adcl %edx, %ecx - movl %eax, 4(%ebx) - movl %ecx, 8(%ebx) - adcl %edi, %ebp - movl %ebp, 12(%ebx) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end62: - .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2 - - .globl mcl_fpDbl_add4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add4Lbmi2,@function -mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %eax - movl (%eax), %edi - movl 4(%eax), %edx - movl 36(%esp), %esi - addl (%esi), %edi - adcl 4(%esi), %edx - movl 8(%eax), %ebx - adcl 8(%esi), %ebx - movl 12(%esi), %ebp - movl 32(%esp), %ecx - movl %edi, (%ecx) - movl 16(%esi), %edi - adcl 12(%eax), %ebp - adcl 16(%eax), %edi - movl %edx, 4(%ecx) - movl 28(%eax), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ebx, 8(%ecx) - movl 24(%eax), %ebx - movl 20(%eax), %eax - movl %ebp, 12(%ecx) - movl 20(%esi), %edx - adcl %eax, %edx - movl 28(%esi), %ecx - movl 24(%esi), %ebp - adcl %ebx, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - sbbl %ebx, %ebx - andl $1, %ebx - movl 44(%esp), %eax - movl %edi, %esi - subl (%eax), %esi - movl %esi, (%esp) # 4-byte Spill - movl %edx, %esi - sbbl 4(%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl %ebp, %esi - sbbl 8(%eax), %esi - sbbl 12(%eax), %ecx - sbbl $0, %ebx - andl $1, %ebx - jne .LBB63_2 -# BB#1: - movl %esi, %ebp -.LBB63_2: - testb %bl, %bl - jne .LBB63_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB63_4: - movl 32(%esp), %eax - movl %edi, 16(%eax) - jne .LBB63_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB63_6: - movl %edx, 20(%eax) - movl %ebp, 24(%eax) - movl 8(%esp), %edx # 4-byte Reload - jne .LBB63_8 -# BB#7: - movl %ecx, %edx -.LBB63_8: - movl %edx, 28(%eax) - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end63: - .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2 - - .globl mcl_fpDbl_sub4Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub4Lbmi2,@function -mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - pushl %eax - movl 28(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 32(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %esi - movl 8(%eax), %ebx - sbbl 8(%ebp), %ebx - movl 24(%esp), %ecx - movl %edx, (%ecx) - movl 12(%eax), %edx - sbbl 12(%ebp), %edx - movl %esi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%ebp), %edi - movl %ebx, 8(%ecx) - movl 20(%ebp), %esi - movl %edx, 12(%ecx) - movl 20(%eax), %ebx - sbbl %esi, %ebx - movl 24(%ebp), %edx - movl 24(%eax), %esi - sbbl %edx, %esi - movl 28(%ebp), %edx - movl 28(%eax), %eax - sbbl %edx, %eax - movl %eax, (%esp) # 4-byte Spill - movl $0, %edx - sbbl $0, %edx - andl $1, %edx - movl 36(%esp), %ecx - movl (%ecx), %eax - jne .LBB64_1 -# BB#2: - xorl %ebp, %ebp - jmp .LBB64_3 -.LBB64_1: - movl 4(%ecx), %ebp -.LBB64_3: - testb %dl, %dl - jne .LBB64_5 -# BB#4: - movl $0, %eax -.LBB64_5: - jne .LBB64_6 -# BB#7: - movl $0, %edx - jmp .LBB64_8 -.LBB64_6: - movl 12(%ecx), %edx -.LBB64_8: - jne .LBB64_9 -# BB#10: - xorl %ecx, %ecx - jmp .LBB64_11 -.LBB64_9: - movl 8(%ecx), %ecx -.LBB64_11: - addl %edi, %eax - adcl %ebx, %ebp - movl 24(%esp), %edi - movl %eax, 16(%edi) - adcl %esi, %ecx - movl %ebp, 20(%edi) - movl %ecx, 24(%edi) - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%edi) - addl $4, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end64: - .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2 - - .globl mcl_fp_mulUnitPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre5Lbmi2,@function -mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 36(%esp), %edx - movl 32(%esp), %ecx - mulxl 4(%ecx), %esi, %eax - mulxl (%ecx), %edi, %ebx - movl %edi, 4(%esp) # 4-byte Spill - addl %esi, %ebx - mulxl 8(%ecx), %ebp, %esi - adcl %eax, %ebp - mulxl 12(%ecx), %eax, %edi - movl %edi, (%esp) # 4-byte Spill - adcl %esi, %eax - mulxl 16(%ecx), %ecx, %edx - movl 28(%esp), %esi - movl 4(%esp), %edi # 4-byte Reload - movl %edi, (%esi) - movl %ebx, 4(%esi) - movl %ebp, 8(%esi) - movl %eax, 12(%esi) - adcl (%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esi) - adcl $0, %edx - movl %edx, 20(%esi) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end65: - .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2 - - .globl mcl_fpDbl_mulPre5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre5Lbmi2,@function -mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 68(%esp), %eax - movl (%eax), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ecx - movl 72(%esp), %eax - movl (%eax), %ebp - mulxl %ebp, %esi, %edi - movl %ebx, %edx - mulxl %ebp, %edx, %eax - movl %edx, 20(%esp) # 4-byte Spill - addl %esi, %eax - movl 8(%ecx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %ebp, %esi, %ebx - adcl %edi, %esi - movl 12(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - mulxl %ebp, %edi, %ecx - adcl %ebx, %edi - movl 68(%esp), %edx - movl 16(%edx), %edx - movl %edx, 24(%esp) # 4-byte Spill - mulxl %ebp, %ebp, %edx - adcl %ecx, %ebp - movl 64(%esp), %ecx - movl 20(%esp), %ebx # 4-byte Reload - movl %ebx, (%ecx) - adcl $0, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 72(%esp), %ecx - movl 4(%ecx), %ebx - movl 36(%esp), %edx # 4-byte Reload - mulxl %ebx, %ecx, %edx - movl %edx, 20(%esp) # 4-byte Spill - addl %eax, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %ebx, %ecx, %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl %esi, %ecx - movl 32(%esp), %edx # 4-byte Reload - mulxl %ebx, %esi, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl %edi, %esi - movl 28(%esp), %edx # 4-byte Reload - mulxl %ebx, %edi, %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl %ebp, %edi - movl 24(%esp), %edx # 4-byte Reload - mulxl %ebx, %eax, %edx - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebx - sbbl %eax, %eax - andl $1, %eax - addl 20(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl %edx, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl 36(%esp), %edx # 4-byte Reload - movl %edx, 4(%eax) - movl 68(%esp), %ebx - movl (%ebx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl 8(%eax), %eax - mulxl %eax, %edx, %ebp - movl %ebp, 12(%esp) # 4-byte Spill - addl %ecx, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 4(%ebx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %edx, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - adcl %esi, %edx - movl %edx, %ebp - movl 8(%ebx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %edi, %ecx - movl 12(%ebx), %edx - movl %edx, 28(%esp) # 4-byte Spill - mulxl %eax, %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl 16(%ebx), %edx - movl %edx, 24(%esp) # 4-byte Spill - mulxl %eax, %edi, %edx - adcl 16(%esp), %edi # 4-byte Folded Reload - sbbl %ebx, %ebx - andl $1, %ebx - addl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - adcl (%esp), %edi # 4-byte Folded Reload - adcl %edx, %ebx - movl 64(%esp), %eax - movl 20(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - movl 72(%esp), %eax - movl 12(%eax), %eax - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - addl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %ebp, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %ecx, %ebp - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %esi, %ecx - movl 28(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 28(%esp) # 4-byte Spill - adcl %edi, %edx - movl %edx, %esi - movl 24(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - adcl %ebx, %edi - sbbl %eax, %eax - andl $1, %eax - addl 20(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl 40(%esp), %edx # 4-byte Reload - movl %edx, 12(%eax) - movl 72(%esp), %eax - movl 16(%eax), %edx - movl 68(%esp), %eax - mulxl (%eax), %esi, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - addl %ebp, %esi - movl %esi, 40(%esp) # 4-byte Spill - mulxl 4(%eax), %ebx, %esi - movl %esi, 28(%esp) # 4-byte Spill - adcl %ecx, %ebx - mulxl 8(%eax), %esi, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - mulxl 12(%eax), %ecx, %ebp - adcl %edi, %ecx - mulxl 16(%eax), %edi, %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 36(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl %ebp, %edi - movl 64(%esp), %ebp - movl 40(%esp), %edx # 4-byte Reload - movl %edx, 16(%ebp) - movl %ebx, 20(%ebp) - movl %esi, 24(%ebp) - movl %ecx, 28(%ebp) - movl %edi, 32(%ebp) - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ebp) - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end66: - .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2 - - .globl mcl_fpDbl_sqrPre5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre5Lbmi2,@function -mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %ecx - movl (%ecx), %edi - movl 4(%ecx), %esi - movl %esi, %edx - mulxl %edi, %ebp, %ebx - movl %ebp, 24(%esp) # 4-byte Spill - movl %ebx, 28(%esp) # 4-byte Spill - movl %edi, %edx - mulxl %edi, %edx, %eax - movl %edx, 16(%esp) # 4-byte Spill - addl %ebp, %eax - movl 8(%ecx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %edi, %ebp, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %ebx, %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 12(%ecx), %edx - movl %edx, 20(%esp) # 4-byte Spill - mulxl %edi, %ecx, %ebx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %edx - movl 16(%edx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %edi, %edi, %edx - adcl %ebx, %edi - movl 16(%esp), %ebx # 4-byte Reload - movl 60(%esp), %ebp - movl %ebx, (%ebp) - adcl $0, %edx - movl %edx, 8(%esp) # 4-byte Spill - addl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %edx - mulxl %esi, %ebx, %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - mulxl %esi, %ebp, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl %ecx, %ebp - movl 20(%esp), %edx # 4-byte Reload - mulxl %esi, %ecx, %eax - movl %eax, 20(%esp) # 4-byte Spill - adcl %edi, %ecx - movl 32(%esp), %edx # 4-byte Reload - mulxl %esi, %edi, %edx - adcl 8(%esp), %edi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 28(%esp), %ebx # 4-byte Folded Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 4(%eax) - movl 64(%esp), %eax - movl (%eax), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - mulxl %esi, %edx, %eax - movl %eax, 16(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl 4(%eax), %edx - movl %edx, 20(%esp) # 4-byte Spill - mulxl %esi, %ebx, %eax - movl %eax, 8(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %esi, %edx - mulxl %esi, %ebp, %edx - movl %edx, 4(%esp) # 4-byte Spill - movl %esi, %eax - adcl %ecx, %ebp - movl 64(%esp), %ecx - movl 12(%ecx), %esi - movl %esi, %edx - mulxl %eax, %eax, %ecx - movl %ecx, (%esp) # 4-byte Spill - adcl %edi, %eax - movl 32(%esp), %edx # 4-byte Reload - mulxl 36(%esp), %ecx, %edx # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - sbbl %edi, %edi - andl $1, %edi - addl 16(%esp), %ebx # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl (%esp), %ecx # 4-byte Folded Reload - adcl %edx, %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - mulxl %esi, %edx, %edi - movl %edi, 24(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload - mulxl %esi, %edx, %edi - movl %edi, 20(%esp) # 4-byte Spill - adcl %ebp, %edx - movl %edx, %edi - movl 60(%esp), %eax - movl 28(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - movl 64(%esp), %eax - movl 8(%eax), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %esi, %ebx, %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %esi, %edx - mulxl %esi, %ebp, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %ecx, %ebp - movl 16(%eax), %ecx - movl %ecx, %edx - mulxl %esi, %esi, %edx - adcl 16(%esp), %esi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 24(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 60(%esp), %edx - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 12(%edx) - movl %ecx, %edx - movl 64(%esp), %eax - mulxl (%eax), %edx, %eax - movl %eax, 28(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %ecx, %edx - movl 64(%esp), %eax - mulxl 4(%eax), %edi, %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 36(%esp), %edx # 4-byte Reload - mulxl %ecx, %ebx, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %ecx, %edx - mulxl 12(%eax), %ebp, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl %esi, %ebp - movl %ecx, %edx - mulxl %ecx, %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 28(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %edx # 4-byte Folded Reload - movl 60(%esp), %esi - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%esi) - movl %edi, 20(%esi) - movl %ebx, 24(%esi) - movl %ebp, 28(%esi) - movl %edx, 32(%esi) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esi) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2 - - .globl mcl_fp_mont5Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont5Lbmi2,@function -mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %eax - movl 16(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl (%ecx), %ecx - movl 12(%eax), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 8(%eax), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl (%eax), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 4(%eax), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - mulxl %ecx, %edx, %eax - movl %edx, 80(%esp) # 4-byte Spill - movl %esi, %edx - mulxl %ecx, %edx, %esi - movl %edx, 76(%esp) # 4-byte Spill - movl %edi, %edx - mulxl %ecx, %edx, %edi - movl %edx, 72(%esp) # 4-byte Spill - movl %ebp, %edx - mulxl %ecx, %edx, %ebp - movl %edx, 68(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl %ecx, %edx, %ecx - movl %edx, 16(%esp) # 4-byte Spill - addl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 72(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl 76(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 116(%esp), %ebp - movl -4(%ebp), %eax - movl %eax, 60(%esp) # 4-byte Spill - imull %eax, %edx - movl (%ebp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 4(%ebp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %ebx - mulxl %eax, %esi, %edi - movl %esi, 12(%esp) # 4-byte Spill - addl %ecx, %edi - movl 8(%ebp), %eax - movl %eax, 72(%esp) # 4-byte Spill - mulxl %eax, %esi, %ecx - adcl %ebx, %esi - movl 12(%ebp), %eax - movl %eax, 68(%esp) # 4-byte Spill - mulxl %eax, %eax, %ebx - movl %ebx, 8(%esp) # 4-byte Spill - adcl %ecx, %eax - movl %eax, %ecx - movl 16(%ebp), %eax - movl %eax, 64(%esp) # 4-byte Spill - mulxl %eax, %ebx, %eax - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl $0, %eax - movl 12(%esp), %edx # 4-byte Reload - addl 16(%esp), %edx # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - sbbl %edi, %edi - andl $1, %edi - movl 112(%esp), %edx - movl 4(%edx), %edx - mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - mulxl 40(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 44(%esp), %esi, %ebp # 4-byte Folded Reload - addl %eax, %ebp - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 24(%esp), %ebp # 4-byte Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl %edi, %edx - movl %edx, 36(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, %edi - mulxl 72(%esp), %esi, %ebx # 4-byte Folded Reload - adcl %eax, %esi - mulxl 68(%esp), %ecx, %ebp # 4-byte Folded Reload - adcl %ebx, %ecx - mulxl 64(%esp), %edx, %eax # 4-byte Folded Reload - adcl %ebp, %edx - movl %edx, %ebx - adcl $0, %eax - movl 28(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 12(%esp), %ebp # 4-byte Reload - addl 8(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %esi - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 112(%esp), %edx - movl 8(%edx), %edx - mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload - mulxl 44(%esp), %eax, %ecx # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - addl %edi, %ecx - mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, %edi - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - movl 32(%esp), %ebx # 4-byte Reload - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 24(%esp), %ebp # 4-byte Reload - addl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl %esi, %edi - movl %edi, 4(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - addl %ecx, %esi - mulxl 72(%esp), %ecx, %ebp # 4-byte Folded Reload - adcl %eax, %ecx - mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload - adcl %ebp, %eax - mulxl 64(%esp), %ebx, %ebp # 4-byte Folded Reload - adcl %edi, %ebx - adcl $0, %ebp - movl 28(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 12(%esp), %edi # 4-byte Reload - addl 24(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 112(%esp), %edx - movl 12(%edx), %edx - mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - mulxl 44(%esp), %esi, %edi # 4-byte Folded Reload - addl %eax, %edi - mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl %ecx, %edx - movl %edx, %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, %ebp - mulxl 72(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, %eax - mulxl 68(%esp), %ebx, %edi # 4-byte Folded Reload - adcl %ecx, %ebx - mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %edi, %ecx - adcl $0, %esi - movl 28(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 12(%esp), %edi # 4-byte Reload - addl 8(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 16(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 112(%esp), %edx - movl 16(%edx), %edx - mulxl 40(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - addl %ebp, %edi - mulxl 48(%esp), %ebp, %eax # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload - movl %ebp, 32(%esp) # 4-byte Spill - mulxl 56(%esp), %ebp, %edx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - movl 48(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 44(%esp), %eax # 4-byte Reload - addl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl %ebx, 52(%esp) # 4-byte Folded Spill - adcl %ecx, 56(%esp) # 4-byte Folded Spill - adcl %esi, %ebp - movl %ebp, 36(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - sbbl %ebx, %ebx - movl 60(%esp), %edx # 4-byte Reload - imull %eax, %edx - mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload - addl %eax, %ebp - mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload - adcl %ecx, %edi - movl %edx, %ecx - mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl %eax, %esi - movl %ecx, %edx - mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - adcl $0, %ecx - andl $1, %ebx - movl 60(%esp), %eax # 4-byte Reload - addl 44(%esp), %eax # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - adcl $0, %ebx - subl 76(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 76(%esp) # 4-byte Spill - movl %edi, %eax - sbbl 80(%esp), %eax # 4-byte Folded Reload - movl %esi, %ebp - sbbl 72(%esp), %ebp # 4-byte Folded Reload - sbbl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %ecx, %edx - sbbl 64(%esp), %edx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB68_2 -# BB#1: - movl %eax, %edi -.LBB68_2: - testb %bl, %bl - movl 44(%esp), %ebx # 4-byte Reload - jne .LBB68_4 -# BB#3: - movl 76(%esp), %ebx # 4-byte Reload -.LBB68_4: - movl 104(%esp), %eax - movl %ebx, (%eax) - movl %edi, 4(%eax) - jne .LBB68_6 -# BB#5: - movl %ebp, %esi -.LBB68_6: - movl %esi, 8(%eax) - movl 60(%esp), %esi # 4-byte Reload - jne .LBB68_8 -# BB#7: - movl 80(%esp), %esi # 4-byte Reload -.LBB68_8: - movl %esi, 12(%eax) - jne .LBB68_10 -# BB#9: - movl %edx, %ecx -.LBB68_10: - movl %ecx, 16(%eax) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end68: - .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2 - - .globl mcl_fp_montNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF5Lbmi2,@function -mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $68, %esp - movl 92(%esp), %edi - movl (%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 4(%edi), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 96(%esp), %ecx - movl (%ecx), %ebx - mulxl %ebx, %ecx, %esi - movl %eax, %edx - mulxl %ebx, %edx, %eax - movl %edx, 60(%esp) # 4-byte Spill - addl %ecx, %eax - movl %eax, %ecx - movl 8(%edi), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %ebx, %eax, %ebp - adcl %esi, %eax - movl %eax, %esi - movl 12(%edi), %edx - movl %edx, 28(%esp) # 4-byte Spill - mulxl %ebx, %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 16(%edi), %edx - movl %edx, 24(%esp) # 4-byte Spill - mulxl %ebx, %edx, %eax - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 100(%esp), %ebx - movl -4(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - movl %edi, %edx - imull %eax, %edx - movl (%ebx), %eax - movl %eax, 64(%esp) # 4-byte Spill - mulxl %eax, %ebp, %eax - movl %eax, 12(%esp) # 4-byte Spill - addl %edi, %ebp - movl 4(%ebx), %eax - movl %eax, 60(%esp) # 4-byte Spill - mulxl %eax, %eax, %edi - movl %edi, 8(%esp) # 4-byte Spill - adcl %ecx, %eax - movl %eax, %edi - movl 8(%ebx), %eax - movl %eax, 56(%esp) # 4-byte Spill - mulxl %eax, %eax, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - adcl %esi, %eax - movl %eax, %esi - movl 12(%ebx), %eax - movl %eax, 52(%esp) # 4-byte Spill - mulxl %eax, %ecx, %ebp - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl 16(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - mulxl %eax, %ebx, %edx - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl 20(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl %ebp, %ebx - adcl %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl 4(%eax), %edx - mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload - mulxl 40(%esp), %edi, %eax # 4-byte Folded Reload - addl %ecx, %eax - mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl %esi, %ebp - mulxl 28(%esp), %esi, %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl (%esp), %esi # 4-byte Folded Reload - mulxl 24(%esp), %edx, %ecx # 4-byte Folded Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - adcl $0, %ecx - addl 16(%esp), %edi # 4-byte Folded Reload - adcl 12(%esp), %eax # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl %edi, %edx - imull 44(%esp), %edx # 4-byte Folded Reload - mulxl 64(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - addl %edi, %ebx - mulxl 60(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl %eax, %edi - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl %ebp, %ecx - mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload - adcl %esi, %eax - mulxl 48(%esp), %ebx, %edx # 4-byte Folded Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl 20(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %ebx, 8(%esp) # 4-byte Spill - adcl %edx, %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl 8(%eax), %edx - mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 40(%esp), %ebp, %esi # 4-byte Folded Reload - addl %ecx, %esi - mulxl 32(%esp), %edi, %ecx # 4-byte Folded Reload - adcl %eax, %edi - mulxl 28(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - adcl %ecx, %ebx - mulxl 24(%esp), %ecx, %eax # 4-byte Folded Reload - adcl (%esp), %ecx # 4-byte Folded Reload - adcl $0, %eax - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %ebp, %eax - movl %eax, %edx - imull 44(%esp), %edx # 4-byte Folded Reload - mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - addl %eax, %ebp - mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl %esi, %ebp - movl %ebp, %esi - mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl %edi, %ebp - movl %ebp, %eax - mulxl 52(%esp), %ebp, %edi # 4-byte Folded Reload - adcl %ebx, %ebp - movl %ebp, %ebx - mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - adcl %edi, %ebp - movl %ebp, 8(%esp) # 4-byte Spill - adcl %edx, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl 12(%eax), %edx - mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload - mulxl 40(%esp), %ebx, %ebp # 4-byte Folded Reload - addl %ecx, %ebp - mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload - adcl %esi, %ecx - mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - adcl %edi, %esi - mulxl 24(%esp), %edi, %eax # 4-byte Folded Reload - adcl (%esp), %edi # 4-byte Folded Reload - adcl $0, %eax - addl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl $0, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %ebx, %edx - imull 44(%esp), %edx # 4-byte Folded Reload - mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - addl 16(%esp), %ebx # 4-byte Folded Reload - mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %ebx, 16(%esp) # 4-byte Spill - mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload - adcl %ecx, %eax - mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %esi - mulxl 48(%esp), %ecx, %edx # 4-byte Folded Reload - adcl %edi, %ecx - movl 20(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 12(%esp), %edi # 4-byte Reload - addl %edi, 16(%esp) # 4-byte Folded Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl %ebx, %esi - movl %esi, 8(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl %edx, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl 16(%eax), %edx - mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload - mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload - addl %eax, %ebx - mulxl 32(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl %ebp, %eax - mulxl 28(%esp), %ebp, %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - mulxl 24(%esp), %edx, %esi # 4-byte Folded Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - adcl $0, %esi - addl 16(%esp), %edi # 4-byte Folded Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, %esi - movl 44(%esp), %edx # 4-byte Reload - imull %edi, %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - addl %edi, %ecx - mulxl 60(%esp), %edi, %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl %ebx, %edi - movl %edx, %eax - mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl %ebp, %ecx - movl %eax, %edx - mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl $0, %esi - addl 44(%esp), %edi # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl %edx, %esi - movl %edi, %eax - subl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - sbbl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - sbbl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl %esi, %edx - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - sarl $31, %edx - testl %edx, %edx - js .LBB69_2 -# BB#1: - movl 40(%esp), %edi # 4-byte Reload -.LBB69_2: - movl 88(%esp), %edx - movl %edi, (%edx) - js .LBB69_4 -# BB#3: - movl 44(%esp), %ebx # 4-byte Reload -.LBB69_4: - movl %ebx, 4(%edx) - js .LBB69_6 -# BB#5: - movl 56(%esp), %ecx # 4-byte Reload -.LBB69_6: - movl %ecx, 8(%edx) - js .LBB69_8 -# BB#7: - movl 60(%esp), %ebp # 4-byte Reload -.LBB69_8: - movl %ebp, 12(%edx) - js .LBB69_10 -# BB#9: - movl 64(%esp), %esi # 4-byte Reload -.LBB69_10: - movl %esi, 16(%edx) - addl $68, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end69: - .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2 - - .globl mcl_fp_montRed5Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed5Lbmi2,@function -mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $68, %esp - movl 96(%esp), %eax - movl -4(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl (%eax), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 92(%esp), %ecx - movl (%ecx), %edx - movl %edx, 44(%esp) # 4-byte Spill - imull %esi, %edx - movl 16(%eax), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 12(%eax), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - movl 8(%eax), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 4(%eax), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - mulxl %esi, %esi, %eax - movl %esi, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - mulxl %ebx, %esi, %eax - movl %esi, 24(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - mulxl %ebp, %ebp, %eax - mulxl %ecx, %esi, %ecx - mulxl %edi, %edx, %ebx - addl %esi, %ebx - adcl %ebp, %ecx - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 28(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 32(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 44(%esp), %edx # 4-byte Folded Reload - movl 92(%esp), %ebp - adcl 4(%ebp), %ebx - adcl 8(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl 12(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl 16(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - adcl 20(%ebp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 36(%ebp), %edx - movl 32(%ebp), %esi - movl 28(%ebp), %edi - movl 24(%ebp), %eax - adcl $0, %eax - movl %eax, 8(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 24(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, %esi - movl %ebx, %edx - imull 48(%esp), %edx # 4-byte Folded Reload - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload - mulxl 52(%esp), %edi, %ecx # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - addl %ebp, %ecx - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - mulxl 64(%esp), %eax, %edi # 4-byte Folded Reload - movl %edi, (%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - mulxl %eax, %edi, %edx - adcl (%esp), %edi # 4-byte Folded Reload - adcl $0, %edx - addl %ebx, 4(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - movl 28(%esp), %ebx # 4-byte Reload - adcl %ebx, 20(%esp) # 4-byte Folded Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %ecx, %edx - imull 48(%esp), %edx # 4-byte Folded Reload - mulxl %eax, %edi, %eax - movl %edi, 4(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - addl %edi, %ebp - mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload - adcl %eax, %edi - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - movl 32(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ecx, 8(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 16(%esp) # 4-byte Spill - movl %ebp, %edx - imull 48(%esp), %edx # 4-byte Folded Reload - mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - addl %ecx, %esi - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - movl %edx, %eax - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebp, 12(%esp) # 4-byte Folded Spill - adcl %ebx, %esi - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 16(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl 48(%esp), %edx # 4-byte Reload - imull %esi, %edx - mulxl 52(%esp), %eax, %ecx # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload - addl %ecx, %edi - movl %edx, %ebp - mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl %eax, %ecx - movl %ebp, %edx - mulxl 64(%esp), %eax, %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ebp, %edx - mulxl 40(%esp), %ebp, %edx # 4-byte Folded Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edx - addl %esi, 48(%esp) # 4-byte Folded Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 48(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %ebx, %esi - adcl $0, %esi - movl %edi, %ebx - subl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl %ecx, %ebx - sbbl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - sbbl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebx - sbbl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 64(%esp) # 4-byte Spill - movl %edx, %ebp - sbbl 40(%esp), %ebp # 4-byte Folded Reload - sbbl $0, %esi - andl $1, %esi - jne .LBB70_2 -# BB#1: - movl 56(%esp), %ecx # 4-byte Reload -.LBB70_2: - movl %esi, %eax - testb %al, %al - jne .LBB70_4 -# BB#3: - movl 52(%esp), %edi # 4-byte Reload -.LBB70_4: - movl 88(%esp), %esi - movl %edi, (%esi) - movl %ecx, 4(%esi) - movl 48(%esp), %eax # 4-byte Reload - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB70_6 -# BB#5: - movl %ebx, %ecx -.LBB70_6: - movl %ecx, 8(%esi) - jne .LBB70_8 -# BB#7: - movl 64(%esp), %eax # 4-byte Reload -.LBB70_8: - movl %eax, 12(%esi) - jne .LBB70_10 -# BB#9: - movl %ebp, %edx -.LBB70_10: - movl %edx, 16(%esi) - addl $68, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end70: - .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2 - - .globl mcl_fp_addPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre5Lbmi2,@function -mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 24(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 12(%esi), %ebx - movl 16(%esi), %esi - adcl 12(%eax), %ebx - movl 16(%eax), %eax - movl 20(%esp), %ebp - movl %ecx, (%ebp) - movl %edx, 4(%ebp) - movl %edi, 8(%ebp) - movl %ebx, 12(%ebp) - adcl %esi, %eax - movl %eax, 16(%ebp) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end71: - .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2 - - .globl mcl_fp_subPre5Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre5Lbmi2,@function -mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - xorl %eax, %eax - movl 20(%esp), %esi - subl (%esi), %edx - movl 12(%esp), %edi - movl %edx, (%edi) - movl 4(%ecx), %edx - sbbl 4(%esi), %edx - movl %edx, 4(%edi) - movl 8(%ecx), %edx - sbbl 8(%esi), %edx - movl %edx, 8(%edi) - movl 12(%ecx), %edx - sbbl 12(%esi), %edx - movl %edx, 12(%edi) - movl 16(%esi), %edx - movl 16(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 16(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end72: - .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2 - - .globl mcl_fp_shr1_5Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_5Lbmi2,@function -mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl 16(%eax), %ecx - movl 12(%eax), %edx - movl 8(%eax), %esi - movl (%eax), %edi - movl 4(%eax), %eax - shrdl $1, %eax, %edi - movl 16(%esp), %ebx - movl %edi, (%ebx) - shrdl $1, %esi, %eax - movl %eax, 4(%ebx) - shrdl $1, %edx, %esi - movl %esi, 8(%ebx) - shrdl $1, %ecx, %edx - movl %edx, 12(%ebx) - shrl %ecx - movl %ecx, 16(%ebx) - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end73: - .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2 - - .globl mcl_fp_add5Lbmi2 - .align 16, 0x90 - .type mcl_fp_add5Lbmi2,@function -mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %ecx - movl 24(%esp), %edi - addl (%edi), %eax - adcl 4(%edi), %ecx - movl 8(%ebx), %edx - adcl 8(%edi), %edx - movl 12(%edi), %esi - movl 16(%edi), %edi - adcl 12(%ebx), %esi - adcl 16(%ebx), %edi - movl 20(%esp), %ebx - movl %eax, (%ebx) - movl %ecx, 4(%ebx) - movl %edx, 8(%ebx) - movl %esi, 12(%ebx) - movl %edi, 16(%ebx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - sbbl 8(%ebp), %edx - sbbl 12(%ebp), %esi - sbbl 16(%ebp), %edi - sbbl $0, %ebx - testb $1, %bl - jne .LBB74_2 -# BB#1: # %nocarry - movl 20(%esp), %ebx - movl %eax, (%ebx) - movl %ecx, 4(%ebx) - movl %edx, 8(%ebx) - movl %esi, 12(%ebx) - movl %edi, 16(%ebx) -.LBB74_2: # %carry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end74: - .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2 - - .globl mcl_fp_addNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF5Lbmi2,@function -mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %esi - movl (%esi), %ebx - movl 4(%esi), %eax - movl 44(%esp), %edi - addl (%edi), %ebx - adcl 4(%edi), %eax - movl 16(%esi), %ecx - movl 12(%esi), %edx - movl 8(%esi), %ebp - adcl 8(%edi), %ebp - adcl 12(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi - movl %ebx, %esi - subl (%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl %eax, %esi - sbbl 4(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl %ebp, %esi - sbbl 8(%edi), %esi - sbbl 12(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ecx, %edx - sbbl 16(%edi), %edx - movl %edx, %edi - sarl $31, %edi - testl %edi, %edi - js .LBB75_2 -# BB#1: - movl (%esp), %ebx # 4-byte Reload -.LBB75_2: - movl 40(%esp), %edi - movl %ebx, (%edi) - js .LBB75_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB75_4: - movl %eax, 4(%edi) - movl 12(%esp), %ecx # 4-byte Reload - js .LBB75_6 -# BB#5: - movl %esi, %ebp -.LBB75_6: - movl %ebp, 8(%edi) - movl 16(%esp), %eax # 4-byte Reload - js .LBB75_8 -# BB#7: - movl 8(%esp), %ecx # 4-byte Reload -.LBB75_8: - movl %ecx, 12(%edi) - js .LBB75_10 -# BB#9: - movl %edx, %eax -.LBB75_10: - movl %eax, 16(%edi) - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end75: - .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2 - - .globl mcl_fp_sub5Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub5Lbmi2,@function -mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - xorl %ebx, %ebx - movl 28(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - movl 8(%edi), %edx - sbbl 8(%ebp), %edx - movl 12(%edi), %esi - sbbl 12(%ebp), %esi - movl 16(%edi), %edi - sbbl 16(%ebp), %edi - movl 20(%esp), %ebp - movl %eax, (%ebp) - movl %ecx, 4(%ebp) - movl %edx, 8(%ebp) - movl %esi, 12(%ebp) - movl %edi, 16(%ebp) - sbbl $0, %ebx - testb $1, %bl - je .LBB76_2 -# BB#1: # %carry - movl 32(%esp), %ebx - addl (%ebx), %eax - movl %eax, (%ebp) - adcl 4(%ebx), %ecx - movl %ecx, 4(%ebp) - adcl 8(%ebx), %edx - movl %edx, 8(%ebp) - movl 12(%ebx), %eax - adcl %esi, %eax - movl %eax, 12(%ebp) - movl 16(%ebx), %eax - adcl %edi, %eax - movl %eax, 16(%ebp) -.LBB76_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end76: - .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2 - - .globl mcl_fp_subNF5Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF5Lbmi2,@function -mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %ebx - subl (%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - sbbl 4(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 16(%edi), %esi - movl 12(%edi), %eax - movl 8(%edi), %ecx - sbbl 8(%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 12(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%ebx), %esi - movl %esi, %ebx - sarl $31, %ebx - movl %ebx, %ebp - shldl $1, %esi, %ebp - movl 48(%esp), %edi - movl 4(%edi), %ecx - andl %ebp, %ecx - andl (%edi), %ebp - movl 16(%edi), %edx - andl %ebx, %edx - rorxl $31, %ebx, %eax - andl 12(%edi), %ebx - andl 8(%edi), %eax - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %edi - movl %ebp, (%edi) - adcl (%esp), %eax # 4-byte Folded Reload - movl %ecx, 4(%edi) - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %eax, 8(%edi) - movl %ebx, 12(%edi) - adcl %esi, %edx - movl %edx, 16(%edi) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end77: - .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2 - - .globl mcl_fpDbl_add5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add5Lbmi2,@function -mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 56(%esp), %edx - movl 52(%esp), %ecx - movl 12(%ecx), %ebx - movl 16(%ecx), %ebp - movl 8(%edx), %esi - movl (%edx), %edi - addl (%ecx), %edi - movl 48(%esp), %eax - movl %edi, (%eax) - movl 4(%edx), %edi - adcl 4(%ecx), %edi - adcl 8(%ecx), %esi - adcl 12(%edx), %ebx - adcl 16(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, 4(%eax) - movl 28(%edx), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl %esi, 8(%eax) - movl 20(%edx), %esi - movl %ebx, 12(%eax) - movl 20(%ecx), %ebp - adcl %esi, %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 24(%edx), %esi - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 16(%eax) - movl 24(%ecx), %ebx - adcl %esi, %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 28(%ecx), %edi - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl 32(%ecx), %esi - adcl %eax, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 36(%edx), %eax - movl 36(%ecx), %edx - adcl %eax, %edx - sbbl %eax, %eax - andl $1, %eax - movl %ebp, %ecx - movl 60(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 4(%ebp), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 8(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl %esi, %ebx - movl %edx, %esi - sbbl 12(%ebp), %ebx - sbbl 16(%ebp), %edx - sbbl $0, %eax - andl $1, %eax - jne .LBB78_2 -# BB#1: - movl %edx, %esi -.LBB78_2: - testb %al, %al - movl 12(%esp), %ebp # 4-byte Reload - jne .LBB78_4 -# BB#3: - movl (%esp), %ebp # 4-byte Reload -.LBB78_4: - movl 48(%esp), %eax - movl %ebp, 20(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl 20(%esp), %edx # 4-byte Reload - movl 16(%esp), %edi # 4-byte Reload - jne .LBB78_6 -# BB#5: - movl 4(%esp), %edi # 4-byte Reload -.LBB78_6: - movl %edi, 24(%eax) - jne .LBB78_8 -# BB#7: - movl 8(%esp), %edx # 4-byte Reload -.LBB78_8: - movl %edx, 28(%eax) - jne .LBB78_10 -# BB#9: - movl %ebx, %ecx -.LBB78_10: - movl %ecx, 32(%eax) - movl %esi, 36(%eax) - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end78: - .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2 - - .globl mcl_fpDbl_sub5Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub5Lbmi2,@function -mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 44(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%eax), %ebx - sbbl 8(%edx), %ebx - movl 36(%esp), %ecx - movl %esi, (%ecx) - movl 12(%eax), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ecx) - movl 20(%edx), %ebx - movl %esi, 12(%ecx) - movl 20(%eax), %esi - sbbl %ebx, %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 24(%edx), %esi - movl %edi, 16(%ecx) - movl 24(%eax), %ebp - sbbl %esi, %ebp - movl 28(%edx), %esi - movl 28(%eax), %edi - sbbl %esi, %edi - movl %edi, (%esp) # 4-byte Spill - movl 32(%edx), %esi - movl 32(%eax), %edi - sbbl %esi, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%edx), %edx - movl 36(%eax), %eax - sbbl %edx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl $0, %edx - sbbl $0, %edx - andl $1, %edx - movl 48(%esp), %ebx - jne .LBB79_1 -# BB#2: - xorl %eax, %eax - jmp .LBB79_3 -.LBB79_1: - movl 16(%ebx), %eax -.LBB79_3: - testb %dl, %dl - jne .LBB79_4 -# BB#5: - movl $0, %edx - movl $0, %esi - jmp .LBB79_6 -.LBB79_4: - movl (%ebx), %esi - movl 4(%ebx), %edx -.LBB79_6: - jne .LBB79_7 -# BB#8: - movl $0, %edi - jmp .LBB79_9 -.LBB79_7: - movl 12(%ebx), %edi -.LBB79_9: - jne .LBB79_10 -# BB#11: - xorl %ebx, %ebx - jmp .LBB79_12 -.LBB79_10: - movl 8(%ebx), %ebx -.LBB79_12: - addl 4(%esp), %esi # 4-byte Folded Reload - adcl %ebp, %edx - movl %esi, 20(%ecx) - adcl (%esp), %ebx # 4-byte Folded Reload - movl %edx, 24(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %ebx, 28(%ecx) - movl %edi, 32(%ecx) - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end79: - .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2 - - .globl mcl_fp_mulUnitPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre6Lbmi2,@function -mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %edx - movl 36(%esp), %esi - mulxl 4(%esi), %ecx, %edi - mulxl (%esi), %eax, %ebx - movl %eax, 8(%esp) # 4-byte Spill - addl %ecx, %ebx - movl %ebx, 4(%esp) # 4-byte Spill - mulxl 8(%esi), %ebp, %eax - adcl %edi, %ebp - mulxl 12(%esi), %ecx, %edi - adcl %eax, %ecx - mulxl 16(%esi), %eax, %ebx - movl %ebx, (%esp) # 4-byte Spill - adcl %edi, %eax - mulxl 20(%esi), %edx, %esi - movl 32(%esp), %edi - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, (%edi) - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%edi) - movl %ebp, 8(%edi) - movl %ecx, 12(%edi) - movl %eax, 16(%edi) - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%edi) - adcl $0, %esi - movl %esi, 24(%edi) - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end80: - .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2 - - .globl mcl_fpDbl_mulPre6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre6Lbmi2,@function -mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %ebp - movl (%ebp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 4(%ebp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl (%eax), %eax - mulxl %eax, %esi, %edi - movl %ecx, %edx - mulxl %eax, %edx, %ecx - movl %edx, 28(%esp) # 4-byte Spill - addl %esi, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 8(%ebp), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %eax, %esi, %ebx - adcl %edi, %esi - movl 12(%ebp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %ebp, %ecx - mulxl %eax, %edi, %ebp - adcl %ebx, %edi - movl 16(%ecx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %ecx, %edx - movl 20(%edx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %eax, %eax, %ecx - adcl 20(%esp), %eax # 4-byte Folded Reload - movl 76(%esp), %edx - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, (%edx) - adcl $0, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 84(%esp), %edx - movl 4(%edx), %ebp - movl 52(%esp), %edx # 4-byte Reload - mulxl %ebp, %edx, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - addl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - mulxl %ebp, %ecx, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl %esi, %ecx - movl 44(%esp), %edx # 4-byte Reload - mulxl %ebp, %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl %edi, %esi - movl 40(%esp), %edx # 4-byte Reload - mulxl %ebp, %edi, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 36(%esp), %edx # 4-byte Reload - mulxl %ebp, %ebx, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %eax, %ebx - movl 32(%esp), %edx # 4-byte Reload - mulxl %ebp, %eax, %edx - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - sbbl %eax, %eax - andl $1, %eax - addl 28(%esp), %ecx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 40(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - adcl %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - movl %edx, 4(%eax) - movl 80(%esp), %eax - movl (%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 8(%eax), %eax - mulxl %eax, %edx, %ebp - movl %ebp, 16(%esp) # 4-byte Spill - addl %ecx, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 80(%esp), %ebp - movl 4(%ebp), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 8(%ebp), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl %edi, %ecx - movl %ecx, %esi - movl 12(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %edi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 16(%ebp), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %eax, %ebx, %ecx - movl %ecx, (%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 20(%ebp), %edx - movl %edx, 28(%esp) # 4-byte Spill - mulxl %eax, %ebp, %edx - adcl 20(%esp), %ebp # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - movl 16(%esp), %eax # 4-byte Reload - addl %eax, 52(%esp) # 4-byte Folded Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - adcl 4(%esp), %ebx # 4-byte Folded Reload - adcl (%esp), %ebp # 4-byte Folded Reload - adcl %edx, %ecx - movl 76(%esp), %eax - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - movl 84(%esp), %eax - movl 12(%eax), %eax - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 24(%esp) # 4-byte Spill - addl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %esi, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %edi, %esi - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 28(%esp), %edx # 4-byte Reload - mulxl %eax, %ebp, %edx - adcl %ecx, %ebp - sbbl %eax, %eax - andl $1, %eax - movl 24(%esp), %ecx # 4-byte Reload - addl %ecx, 52(%esp) # 4-byte Folded Spill - adcl 44(%esp), %esi # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 80(%esp), %ecx - movl (%ecx), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 16(%eax), %eax - mulxl %eax, %edx, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - addl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl 80(%esp), %ecx - movl 4(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl %esi, %ecx - movl 80(%esp), %edx - movl 8(%edx), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %eax, %esi, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %edi, %esi - movl %esi, %edi - movl 80(%esp), %esi - movl %esi, %edx - movl 12(%edx), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %eax, %esi, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl %ebx, %esi - movl 80(%esp), %edx - movl 16(%edx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %ebx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 80(%esp), %edx - movl 20(%edx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %eax, %ebp, %edx - adcl 24(%esp), %ebp # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 20(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl 28(%esp), %edx # 4-byte Reload - movl %edx, 16(%eax) - movl 84(%esp), %eax - movl 20(%eax), %eax - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 28(%esp) # 4-byte Spill - addl %ecx, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %edi - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %esi, %ecx - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl %ebx, %esi - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - sbbl %ebp, %ebp - andl $1, %ebp - addl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 20(%eax) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 24(%eax) - movl %ecx, 28(%eax) - movl %esi, 32(%eax) - movl %ebx, 36(%eax) - movl %edx, 40(%eax) - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%eax) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end81: - .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2 - - .globl mcl_fpDbl_sqrPre6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre6Lbmi2,@function -mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ebp - movl (%ebp), %ecx - movl 4(%ebp), %eax - movl %eax, %edx - mulxl %ecx, %edi, %esi - movl %edi, 36(%esp) # 4-byte Spill - movl %esi, 52(%esp) # 4-byte Spill - movl %ecx, %edx - mulxl %ecx, %ebx, %edx - movl %ebx, 28(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 8(%ebp), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %ecx, %edi, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %esi, %edi - movl 12(%ebp), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl 16(%ebp), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %ecx, %edx, %esi - movl %esi, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl 20(%ebp), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %ecx, %ebp, %edx - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl 80(%esp), %ecx - movl 28(%esp), %esi # 4-byte Reload - movl %esi, (%ecx) - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - addl %ecx, 56(%esp) # 4-byte Folded Spill - movl %eax, %edx - mulxl %eax, %esi, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl %edi, %esi - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %ebp, %edi - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %eax - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, %ebp - sbbl %edx, %edx - andl $1, %edx - addl 52(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - adcl 48(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 32(%esp) # 4-byte Spill - adcl %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl 56(%esp), %edx # 4-byte Reload - movl %edx, 4(%eax) - movl 84(%esp), %eax - movl (%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 8(%eax), %ebp - mulxl %ebp, %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 4(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %ebp, %edx, %esi - movl %esi, 16(%esp) # 4-byte Spill - adcl %ecx, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ebp, %edx - mulxl %ebp, %ecx, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %eax, %esi - movl 12(%esi), %eax - movl %eax, %edx - mulxl %ebp, %ebx, %edx - movl %ebx, 28(%esp) # 4-byte Spill - movl %edx, 52(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 16(%esi), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %ebp, %ebx, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 20(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %ebp, %esi, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - sbbl %ebp, %ebp - andl $1, %ebp - movl 20(%esp), %edx # 4-byte Reload - addl %edx, 56(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - addl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %eax, %edx - mulxl %eax, %ecx, %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %esi - movl 32(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - adcl %ebp, %ebx - sbbl %ecx, %ecx - andl $1, %ecx - movl 20(%esp), %eax # 4-byte Reload - addl %eax, 48(%esp) # 4-byte Folded Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl %edx, %ecx - movl 80(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 12(%eax) - movl 84(%esp), %esi - movl (%esi), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 16(%esi), %ebp - mulxl %ebp, %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - addl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl 4(%esi), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %ebp, %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 8(%esi), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %ebp, %eax, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 12(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %ebp, %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebp, %edx - mulxl %ebp, %eax, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 20(%esi), %ebx - movl %ebx, %edx - mulxl %ebp, %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %edx, 28(%esp) # 4-byte Spill - adcl %edx, %ecx - sbbl %ebp, %ebp - andl $1, %ebp - movl 8(%esp), %esi # 4-byte Reload - addl 24(%esp), %esi # 4-byte Folded Reload - movl 16(%esp), %edx # 4-byte Reload - adcl %edx, 56(%esp) # 4-byte Folded Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 4(%esp), %edx # 4-byte Reload - adcl %edx, 52(%esp) # 4-byte Folded Spill - adcl (%esp), %ecx # 4-byte Folded Reload - adcl %eax, %ebp - movl 44(%esp), %edx # 4-byte Reload - mulxl %ebx, %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %ebx, %edx, %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - movl 36(%esp), %edx # 4-byte Reload - mulxl %ebx, %esi, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %edi, %esi - movl 32(%esp), %edx # 4-byte Reload - mulxl %ebx, %edi, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ebx, %edx - mulxl %ebx, %ebx, %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl %ebp, %ebx - sbbl %ebp, %ebp - andl $1, %ebp - addl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %eax - movl 48(%esp), %edx # 4-byte Reload - movl %edx, 16(%eax) - movl 44(%esp), %edx # 4-byte Reload - movl %edx, 20(%eax) - movl 52(%esp), %edx # 4-byte Reload - movl %edx, 24(%eax) - movl %esi, 28(%eax) - movl %edi, 32(%eax) - movl %ecx, 36(%eax) - movl %ebx, 40(%eax) - adcl 56(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%eax) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2 - - .globl mcl_fp_mont6Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont6Lbmi2,@function -mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %eax - movl 20(%eax), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 128(%esp), %ecx - movl (%ecx), %ecx - mulxl %ecx, %edx, %ebp - movl %edx, 96(%esp) # 4-byte Spill - movl 16(%eax), %edx - movl %edx, 72(%esp) # 4-byte Spill - mulxl %ecx, %edx, %edi - movl %edx, 92(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl %edx, 68(%esp) # 4-byte Spill - mulxl %ecx, %edx, %esi - movl %edx, 88(%esp) # 4-byte Spill - movl (%eax), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %ecx, %eax, %edx - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 84(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl %ecx, %ebx, %edx - movl %ebx, 16(%esp) # 4-byte Spill - addl 80(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl 8(%eax), %edx - movl %edx, 56(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %eax - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 92(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 96(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 132(%esp), %edi - movl -4(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ebx, %edx - imull %eax, %edx - movl (%edi), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 4(%edi), %esi - movl %esi, 96(%esp) # 4-byte Spill - mulxl %esi, %esi, %ebp - mulxl %eax, %ecx, %eax - movl %ecx, 12(%esp) # 4-byte Spill - addl %esi, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 8(%edi), %eax - movl %eax, 88(%esp) # 4-byte Spill - mulxl %eax, %ecx, %esi - adcl %ebp, %ecx - movl 12(%edi), %eax - movl %eax, 84(%esp) # 4-byte Spill - mulxl %eax, %ebx, %eax - adcl %esi, %ebx - movl 16(%edi), %esi - movl %esi, 80(%esp) # 4-byte Spill - mulxl %esi, %esi, %ebp - adcl %eax, %esi - movl 20(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - mulxl %eax, %edi, %eax - adcl %ebp, %edi - adcl $0, %eax - movl 12(%esp), %edx # 4-byte Reload - addl 16(%esp), %edx # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl %edx, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 128(%esp), %edx - movl 4(%edx), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload - mulxl 52(%esp), %ebx, %ebp # 4-byte Folded Reload - addl %eax, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - mulxl 56(%esp), %esi, %ebp # 4-byte Folded Reload - adcl %ecx, %esi - mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %ebp, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 20(%esp), %ebp # 4-byte Reload - adcl %ebp, 28(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl %edi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, %edi - mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, %eax - mulxl 84(%esp), %esi, %ebx # 4-byte Folded Reload - adcl %ecx, %esi - mulxl 80(%esp), %ecx, %ebp # 4-byte Folded Reload - adcl %ebx, %ecx - movl %ecx, %ebx - mulxl 76(%esp), %ecx, %edx # 4-byte Folded Reload - adcl %ebp, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, %ecx - movl 36(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 12(%esp), %ebp # 4-byte Reload - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 128(%esp), %edx - movl 8(%edx), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - mulxl 48(%esp), %ebx, %edi # 4-byte Folded Reload - mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - addl %ebx, %esi - mulxl 56(%esp), %ecx, %ebp # 4-byte Folded Reload - adcl %edi, %ecx - mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload - adcl %ebp, %edi - adcl (%esp), %eax # 4-byte Folded Reload - movl 40(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 4(%esp), %ebx # 4-byte Reload - addl 28(%esp), %ebx # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 92(%esp), %ebp, %esi # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 28(%esp) # 4-byte Spill - mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 24(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %ecx - mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl %eax, %esi - mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl $0, %eax - movl 32(%esp), %edx # 4-byte Reload - andl $1, %edx - addl %ebx, 36(%esp) # 4-byte Folded Spill - movl 20(%esp), %ebx # 4-byte Reload - adcl %ebx, 28(%esp) # 4-byte Folded Spill - movl 16(%esp), %ebx # 4-byte Reload - adcl %ebx, 24(%esp) # 4-byte Folded Spill - adcl %edi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 128(%esp), %edx - movl 12(%edx), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload - addl %eax, %edi - movl %edi, 44(%esp) # 4-byte Spill - mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, 20(%esp) # 4-byte Spill - mulxl 68(%esp), %ebx, %eax # 4-byte Folded Reload - adcl %edi, %ebx - adcl %esi, %eax - movl %eax, %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - adcl %edi, 20(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl %ebp, %esi - movl %esi, 28(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, %ebx - mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, %edi - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %ecx - mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - adcl %eax, %esi - movl %esi, %ebp - mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - adcl $0, %eax - movl 32(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 16(%esp), %esi # 4-byte Reload - addl 4(%esp), %esi # 4-byte Folded Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 8(%esp), %esi # 4-byte Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 128(%esp), %edx - movl 16(%edx), %edx - mulxl 64(%esp), %eax, %ebp # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload - addl %ebx, %edi - movl %edi, 28(%esp) # 4-byte Spill - mulxl 56(%esp), %edi, %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - adcl %eax, %edi - movl %edi, %ebx - mulxl 68(%esp), %edx, %eax # 4-byte Folded Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %edx, %edi - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ebp, %edx - adcl $0, %edx - addl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl 20(%esp), %ebp # 4-byte Reload - adcl %ebp, 28(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - movl 4(%esp), %ebx # 4-byte Reload - adcl %esi, %ebx - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 36(%esp) # 4-byte Spill - mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload - adcl %ecx, %edi - mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload - adcl %eax, %esi - mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %ebp, %ecx - adcl $0, %eax - movl %eax, %ebp - movl 40(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 12(%esp), %eax # 4-byte Reload - addl 8(%esp), %eax # 4-byte Folded Reload - movl 28(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl %ebx, %esi - movl %esi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 128(%esp), %edx - movl 20(%edx), %edx - mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - addl %eax, %ebp - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %esi, %ebx - mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload - adcl %ecx, %esi - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, %ecx - adcl 48(%esp), %eax # 4-byte Folded Reload - movl 64(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 52(%esp), %edi # 4-byte Reload - addl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 68(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - imull %edi, %edx - mulxl 92(%esp), %eax, %edi # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - addl %edi, %ecx - mulxl 88(%esp), %edi, %ebx # 4-byte Folded Reload - adcl %esi, %edi - movl %edx, %esi - mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload - adcl %ebx, %ebp - movl %esi, %edx - mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl %eax, %ebx - movl %esi, %edx - mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - adcl $0, %edx - andl $1, 72(%esp) # 4-byte Folded Spill - movl 60(%esp), %eax # 4-byte Reload - addl 52(%esp), %eax # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 68(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - adcl 44(%esp), %esi # 4-byte Folded Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - subl 92(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - movl %edi, %ecx - sbbl 96(%esp), %ecx # 4-byte Folded Reload - movl %ebp, %edi - sbbl 88(%esp), %edi # 4-byte Folded Reload - movl %edi, 92(%esp) # 4-byte Spill - movl %esi, %edi - sbbl 84(%esp), %ebx # 4-byte Folded Reload - sbbl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - movl %edx, %esi - sbbl 76(%esp), %esi # 4-byte Folded Reload - sbbl $0, %eax - andl $1, %eax - jne .LBB83_2 -# BB#1: - movl %ecx, 68(%esp) # 4-byte Spill -.LBB83_2: - testb %al, %al - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB83_4 -# BB#3: - movl 72(%esp), %ecx # 4-byte Reload -.LBB83_4: - movl 120(%esp), %eax - movl %ecx, (%eax) - movl 68(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - jne .LBB83_6 -# BB#5: - movl 92(%esp), %ebp # 4-byte Reload -.LBB83_6: - movl %ebp, 8(%eax) - movl 60(%esp), %ecx # 4-byte Reload - jne .LBB83_8 -# BB#7: - movl %ebx, %ecx -.LBB83_8: - movl %ecx, 12(%eax) - jne .LBB83_10 -# BB#9: - movl 96(%esp), %edi # 4-byte Reload -.LBB83_10: - movl %edi, 16(%eax) - jne .LBB83_12 -# BB#11: - movl %esi, %edx -.LBB83_12: - movl %edx, 20(%eax) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end83: - .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2 - - .globl mcl_fp_montNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF6Lbmi2,@function -mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %ebx - movl (%ebx), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 4(%ebx), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl (%eax), %eax - mulxl %eax, %ecx, %esi - movl %edi, %edx - mulxl %eax, %edx, %ebp - movl %edx, 76(%esp) # 4-byte Spill - addl %ecx, %ebp - movl 8(%ebx), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edi - adcl %esi, %ecx - movl %ecx, %esi - movl 12(%ebx), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl %edi, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 16(%ebx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edi - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - movl 20(%ebx), %edx - movl %edx, 32(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - adcl %edi, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 116(%esp), %ebx - movl -4(%ebx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - movl %edi, %edx - imull %eax, %edx - movl (%ebx), %eax - movl %eax, 80(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - movl %eax, 16(%esp) # 4-byte Spill - addl %edi, %ecx - movl 4(%ebx), %eax - movl %eax, 76(%esp) # 4-byte Spill - mulxl %eax, %edi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl %ebp, %edi - movl 8(%ebx), %eax - movl %eax, 72(%esp) # 4-byte Spill - mulxl %eax, %eax, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - adcl %esi, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%ebx), %eax - movl %eax, 68(%esp) # 4-byte Spill - mulxl %eax, %esi, %eax - movl %eax, 4(%esp) # 4-byte Spill - adcl 64(%esp), %esi # 4-byte Folded Reload - movl 16(%ebx), %eax - movl %eax, 64(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - movl %eax, (%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 20(%ebx), %eax - movl %eax, 60(%esp) # 4-byte Spill - mulxl %eax, %ebp, %eax - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - movl 12(%esp), %edi # 4-byte Reload - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl (%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 4(%eax), %edx - mulxl 48(%esp), %ecx, %esi # 4-byte Folded Reload - mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload - addl %ecx, %eax - movl %eax, 4(%esp) # 4-byte Spill - mulxl 44(%esp), %ecx, %edi # 4-byte Folded Reload - adcl %esi, %ecx - movl %ecx, %esi - mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload - adcl %edi, %eax - mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload - movl %edi, (%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, %edi - mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - movl %ecx, %edx - addl 20(%esp), %ebp # 4-byte Folded Reload - movl 4(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %ebp, %esi - movl %esi, %edx - imull 56(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - addl %esi, %ebp - mulxl 76(%esp), %ebp, %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl %ecx, %ebp - movl %ebp, %esi - mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %ecx - mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl %eax, %ebp - movl %ebp, %eax - mulxl 64(%esp), %ebp, %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - mulxl 60(%esp), %edi, %edx # 4-byte Folded Reload - adcl %ebx, %edi - movl %edi, %ebx - movl 28(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - adcl %edx, %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 8(%eax), %edx - mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload - mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - addl %eax, %edi - mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload - adcl %ecx, %eax - movl %eax, %ecx - mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, 24(%esp) # 4-byte Spill - mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, %esi - mulxl 32(%esp), %ebx, %eax # 4-byte Folded Reload - adcl %ebp, %ebx - adcl $0, %eax - movl %eax, %edx - movl (%esp), %ebp # 4-byte Reload - addl 20(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %esi, (%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %ebp, %edx - movl %ebp, %eax - imull 56(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - addl %eax, %ebp - mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl %edi, %ebp - movl %ebp, %edi - mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %ecx - mulxl 68(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - mulxl 64(%esp), %ebp, %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl (%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - mulxl 60(%esp), %ebp, %edx # 4-byte Folded Reload - adcl %ebx, %ebp - movl 28(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 20(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - adcl %edx, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 12(%eax), %edx - mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload - mulxl 52(%esp), %ebp, %esi # 4-byte Folded Reload - addl %eax, %esi - mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload - adcl %ecx, %eax - mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload - adcl %edi, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload - movl %edi, (%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, %edi - mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl %ebp, %ecx - movl %ecx, %edx - imull 56(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - addl %ecx, %ebp - mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl %esi, %ecx - mulxl 72(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl (%esp), %ebp # 4-byte Folded Reload - mulxl 68(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mulxl 64(%esp), %esi, %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - mulxl 60(%esp), %esi, %edx # 4-byte Folded Reload - adcl %ebx, %esi - movl 28(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 20(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl %edx, %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 16(%eax), %edx - mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload - mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - addl %eax, %edi - mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload - adcl %ecx, %eax - mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload - adcl %esi, %ecx - mulxl 36(%esp), %esi, %ebp # 4-byte Folded Reload - adcl %ebx, %esi - mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload - adcl %ebp, %ebx - adcl $0, %edx - movl 24(%esp), %ebp # 4-byte Reload - addl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %ebp, %edx - imull 56(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - addl 24(%esp), %ebp # 4-byte Folded Reload - mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl %edi, %ebp - mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl %ecx, %eax - mulxl 64(%esp), %ecx, %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %edi - mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload - adcl %ebx, %ecx - movl 28(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebx # 4-byte Reload - adcl %ebx, 24(%esp) # 4-byte Folded Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl %edx, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 20(%eax), %edx - mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload - mulxl 52(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - addl %ebx, %esi - mulxl 44(%esp), %ebx, %ebp # 4-byte Folded Reload - adcl %eax, %ebx - mulxl 40(%esp), %eax, %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, %ebp - mulxl 36(%esp), %eax, %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - movl 52(%esp), %edi # 4-byte Reload - addl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl %ecx, 48(%esp) # 4-byte Folded Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl 56(%esp), %edx # 4-byte Reload - movl 52(%esp), %ebp # 4-byte Reload - imull %ebp, %edx - mulxl 80(%esp), %ecx, %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - addl %ebp, %ecx - mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl %esi, %ebp - mulxl 72(%esp), %ecx, %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %edx, %ebx - mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %ebx, %edx - mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %ebx, %edx - mulxl 60(%esp), %ebx, %edx # 4-byte Folded Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - adcl $0, %eax - addl 56(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - adcl %edx, %eax - movl %ebp, %edx - subl 80(%esp), %edx # 4-byte Folded Reload - sbbl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - movl %esi, %ebp - movl %ebx, %esi - sbbl 72(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 76(%esp) # 4-byte Spill - sbbl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 80(%esp) # 4-byte Spill - movl %esi, %ebx - sbbl 64(%esp), %ebx # 4-byte Folded Reload - movl %eax, %edi - sbbl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, %ebp - sarl $31, %ebp - testl %ebp, %ebp - js .LBB84_2 -# BB#1: - movl %edx, 56(%esp) # 4-byte Spill -.LBB84_2: - movl 104(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, (%ebp) - movl 44(%esp), %ecx # 4-byte Reload - js .LBB84_4 -# BB#3: - movl 40(%esp), %ecx # 4-byte Reload -.LBB84_4: - movl %ecx, 4(%ebp) - movl 52(%esp), %ecx # 4-byte Reload - movl 48(%esp), %edx # 4-byte Reload - js .LBB84_6 -# BB#5: - movl 76(%esp), %edx # 4-byte Reload -.LBB84_6: - movl %edx, 8(%ebp) - js .LBB84_8 -# BB#7: - movl 80(%esp), %ecx # 4-byte Reload -.LBB84_8: - movl %ecx, 12(%ebp) - js .LBB84_10 -# BB#9: - movl %ebx, %esi -.LBB84_10: - movl %esi, 16(%ebp) - js .LBB84_12 -# BB#11: - movl %edi, %eax -.LBB84_12: - movl %eax, 20(%ebp) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end84: - .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2 - - .globl mcl_fp_montRed6Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed6Lbmi2,@function -mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %edi - movl -4(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl (%edi), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl (%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - imull %eax, %edx - movl 20(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - mulxl %eax, %ebx, %eax - movl %ebx, 56(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%edi), %eax - movl %eax, 80(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - movl %ecx, 44(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - mulxl %eax, %ecx, %eax - movl %ecx, 28(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl 4(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - mulxl %eax, %ebx, %eax - movl %eax, 24(%esp) # 4-byte Spill - mulxl %esi, %ecx, %eax - movl %ecx, 48(%esp) # 4-byte Spill - addl %ebx, %eax - movl %eax, %ebp - movl 8(%edi), %esi - movl %esi, 64(%esp) # 4-byte Spill - mulxl %esi, %eax, %edx - adcl 24(%esp), %eax # 4-byte Folded Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, %ebx - movl 32(%esp), %edi # 4-byte Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - movl 36(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 84(%esp), %ecx # 4-byte Folded Reload - movl 112(%esp), %ecx - adcl 4(%ecx), %ebp - adcl 8(%ecx), %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl 12(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - adcl 16(%ecx), %edi - movl %edi, 32(%esp) # 4-byte Spill - adcl 20(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 24(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl 40(%ecx), %esi - movl 36(%ecx), %edi - movl 32(%ecx), %ebx - movl 28(%ecx), %eax - adcl $0, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 44(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %ebp, %ebx - movl %ebx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 68(%esp), %esi, %ebp # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - addl %ecx, %ebp - mulxl 64(%esp), %edi, %ecx # 4-byte Folded Reload - adcl %eax, %edi - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - movl 52(%esp), %esi # 4-byte Reload - mulxl %esi, %edx, %eax - adcl (%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - addl %ebx, 8(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ebp, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl %esi, %ecx, %eax - movl %ecx, 12(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload - mulxl 68(%esp), %ecx, %ebx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - addl %edi, %ebx - adcl %esi, %eax - movl %eax, %esi - mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload - adcl 4(%esp), %ecx # 4-byte Folded Reload - mulxl 80(%esp), %edi, %edx # 4-byte Folded Reload - adcl %eax, %edi - movl %edi, %eax - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %edx, %edi - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebp, 8(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ebx, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 68(%esp), %edi, %ebp # 4-byte Folded Reload - addl %ecx, %ebp - adcl %esi, %eax - movl %eax, 32(%esp) # 4-byte Spill - mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, %esi - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - adcl 4(%esp), %ecx # 4-byte Folded Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebx, %edi - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %edi # 4-byte Reload - adcl %edi, 32(%esp) # 4-byte Folded Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ebp, %edx - imull 60(%esp), %edx # 4-byte Folded Reload - mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - mulxl 64(%esp), %edi, %esi # 4-byte Folded Reload - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - mulxl 68(%esp), %ebx, %ecx # 4-byte Folded Reload - addl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, %edi - mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, %esi - mulxl 80(%esp), %edx, %eax # 4-byte Folded Reload - adcl %ecx, %edx - movl %edx, %ecx - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebp, %ebx - movl 40(%esp), %ebx # 4-byte Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl 60(%esp), %edx # 4-byte Reload - imull %ebx, %edx - mulxl 68(%esp), %eax, %ecx # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - mulxl 72(%esp), %eax, %edi # 4-byte Folded Reload - addl %ecx, %eax - mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload - adcl %edi, %ebp - movl %edx, %edi - mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl %ecx, %esi - movl %edi, %edx - mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %edi, %edx - mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - movl 60(%esp), %edx # 4-byte Reload - addl 40(%esp), %edx # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl $0, %edx - subl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 72(%esp), %eax # 4-byte Folded Reload - movl %esi, %ebp - sbbl 64(%esp), %ebp # 4-byte Folded Reload - sbbl 76(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 76(%esp) # 4-byte Spill - movl %ecx, %ebx - sbbl 80(%esp), %ebx # 4-byte Folded Reload - sbbl 52(%esp), %edi # 4-byte Folded Reload - sbbl $0, %edx - andl $1, %edx - movl %edx, 84(%esp) # 4-byte Spill - jne .LBB85_2 -# BB#1: - movl %eax, 60(%esp) # 4-byte Spill -.LBB85_2: - movl 84(%esp), %eax # 4-byte Reload - testb %al, %al - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB85_4 -# BB#3: - movl 68(%esp), %ecx # 4-byte Reload -.LBB85_4: - movl 108(%esp), %eax - movl %ecx, (%eax) - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB85_6 -# BB#5: - movl %ebp, %esi -.LBB85_6: - movl %esi, 8(%eax) - jne .LBB85_8 -# BB#7: - movl 76(%esp), %ecx # 4-byte Reload -.LBB85_8: - movl %ecx, 12(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB85_10 -# BB#9: - movl %ebx, %ecx -.LBB85_10: - movl %ecx, 16(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB85_12 -# BB#11: - movl %edi, %ecx -.LBB85_12: - movl %ecx, 20(%eax) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end85: - .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2 - - .globl mcl_fp_addPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre6Lbmi2,@function -mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2 -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 12(%esp), %edx - addl (%edx), %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 4(%eax), %ecx - adcl 4(%edx), %ecx - movl %ecx, 4(%esi) - movl 8(%eax), %ecx - adcl 8(%edx), %ecx - movl %ecx, 8(%esi) - movl 12(%edx), %ecx - adcl 12(%eax), %ecx - movl %ecx, 12(%esi) - movl 16(%edx), %ecx - adcl 16(%eax), %ecx - movl %ecx, 16(%esi) - movl 20(%eax), %eax - movl 20(%edx), %ecx - adcl %eax, %ecx - movl %ecx, 20(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end86: - .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2 - - .globl mcl_fp_subPre6Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre6Lbmi2,@function -mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2 -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - xorl %eax, %eax - movl 20(%esp), %esi - subl (%esi), %edx - movl 12(%esp), %edi - movl %edx, (%edi) - movl 4(%ecx), %edx - sbbl 4(%esi), %edx - movl %edx, 4(%edi) - movl 8(%ecx), %edx - sbbl 8(%esi), %edx - movl %edx, 8(%edi) - movl 12(%ecx), %edx - sbbl 12(%esi), %edx - movl %edx, 12(%edi) - movl 16(%ecx), %edx - sbbl 16(%esi), %edx - movl %edx, 16(%edi) - movl 20(%esi), %edx - movl 20(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 20(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end87: - .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2 - - .globl mcl_fp_shr1_6Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_6Lbmi2,@function -mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl 20(%eax), %ecx - movl 16(%eax), %edx - movl 12(%eax), %esi - movl 8(%eax), %edi - movl (%eax), %ebx - movl 4(%eax), %eax - shrdl $1, %eax, %ebx - movl 20(%esp), %ebp - movl %ebx, (%ebp) - shrdl $1, %edi, %eax - movl %eax, 4(%ebp) - shrdl $1, %esi, %edi - movl %edi, 8(%ebp) - shrdl $1, %edx, %esi - movl %esi, 12(%ebp) - shrdl $1, %ecx, %edx - movl %edx, 16(%ebp) - shrl %ecx - movl %ecx, 20(%ebp) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end88: - .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2 - - .globl mcl_fp_add6Lbmi2 - .align 16, 0x90 - .type mcl_fp_add6Lbmi2,@function -mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ebp - movl 36(%esp), %ebx - addl (%ebx), %edx - adcl 4(%ebx), %ebp - movl 8(%eax), %ecx - adcl 8(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ecx, %esi - movl 12(%ebx), %ecx - movl 16(%ebx), %edi - adcl 12(%eax), %ecx - adcl 16(%eax), %edi - movl 20(%ebx), %ebx - adcl 20(%eax), %ebx - movl 32(%esp), %eax - movl %edx, (%eax) - movl %ebp, 4(%eax) - movl %esi, 8(%eax) - movl %ecx, 12(%eax) - movl %edi, 16(%eax) - movl %ebx, 20(%eax) - sbbl %eax, %eax - andl $1, %eax - movl 44(%esp), %esi - subl (%esi), %edx - movl %edx, (%esp) # 4-byte Spill - movl 8(%esp), %edx # 4-byte Reload - movl 44(%esp), %esi - sbbl 4(%esi), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl %ecx, %ebp - sbbl 8(%esi), %edx - sbbl 12(%esi), %ebp - sbbl 16(%esi), %edi - sbbl 20(%esi), %ebx - sbbl $0, %eax - testb $1, %al - jne .LBB89_2 -# BB#1: # %nocarry - movl (%esp), %eax # 4-byte Reload - movl 32(%esp), %ecx - movl %eax, (%ecx) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 4(%ecx) - movl %edx, 8(%ecx) - movl %ebp, 12(%ecx) - movl %edi, 16(%ecx) - movl %ebx, 20(%ecx) -.LBB89_2: # %carry - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end89: - .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2 - - .globl mcl_fp_addNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF6Lbmi2,@function -mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 68(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 64(%esp), %ebp - addl (%ebp), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %edx, %ebx - adcl 4(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 20(%eax), %edx - movl 16(%eax), %esi - movl 12(%eax), %edi - movl 8(%eax), %eax - adcl 8(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 12(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - adcl 16(%ebp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 72(%esp), %ebx - subl (%ebx), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl %ecx, %ebp - movl 72(%esp), %ecx - sbbl 4(%ecx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - sbbl 8(%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 12(%ecx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl %esi, %edi - sbbl 16(%ecx), %edi - movl %edx, %esi - sbbl 20(%ecx), %esi - movl %esi, %ebx - sarl $31, %ebx - testl %ebx, %ebx - js .LBB90_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB90_2: - movl 60(%esp), %ebx - movl %eax, (%ebx) - movl 20(%esp), %ecx # 4-byte Reload - js .LBB90_4 -# BB#3: - movl 4(%esp), %ecx # 4-byte Reload -.LBB90_4: - movl %ecx, 4(%ebx) - movl 36(%esp), %eax # 4-byte Reload - movl 28(%esp), %edx # 4-byte Reload - movl 24(%esp), %ecx # 4-byte Reload - js .LBB90_6 -# BB#5: - movl 8(%esp), %ecx # 4-byte Reload -.LBB90_6: - movl %ecx, 8(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - js .LBB90_8 -# BB#7: - movl 12(%esp), %edx # 4-byte Reload -.LBB90_8: - movl %edx, 12(%ebx) - js .LBB90_10 -# BB#9: - movl %edi, %ecx -.LBB90_10: - movl %ecx, 16(%ebx) - js .LBB90_12 -# BB#11: - movl %esi, %eax -.LBB90_12: - movl %eax, 20(%ebx) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end90: - .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2 - - .globl mcl_fp_sub6Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub6Lbmi2,@function -mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %ebx - movl (%ebx), %esi - movl 4(%ebx), %edi - movl 44(%esp), %ecx - subl (%ecx), %esi - sbbl 4(%ecx), %edi - movl %edi, (%esp) # 4-byte Spill - movl 8(%ebx), %eax - sbbl 8(%ecx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%ebx), %eax - sbbl 12(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 16(%ebx), %ebp - sbbl 16(%ecx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 20(%ebx), %edx - sbbl 20(%ecx), %edx - movl $0, %ecx - sbbl $0, %ecx - testb $1, %cl - movl 36(%esp), %ebx - movl %esi, (%ebx) - movl %edi, 4(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl %eax, 12(%ebx) - movl %ebp, 16(%ebx) - movl %edx, 20(%ebx) - je .LBB91_2 -# BB#1: # %carry - movl 48(%esp), %ecx - addl (%ecx), %esi - movl %esi, (%ebx) - movl (%esp), %eax # 4-byte Reload - adcl 4(%ecx), %eax - adcl 8(%ecx), %edi - movl %eax, 4(%ebx) - movl 12(%ecx), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl %eax, 12(%ebx) - movl 16(%ecx), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ebx) - movl 20(%ecx), %eax - adcl %edx, %eax - movl %eax, 20(%ebx) -.LBB91_2: # %nocarry - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end91: - .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2 - - .globl mcl_fp_subNF6Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF6Lbmi2,@function -mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %ebx - movl 20(%ebx), %esi - movl (%ebx), %ecx - movl 4(%ebx), %eax - movl 52(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 4(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 16(%ebx), %eax - movl 12(%ebx), %ecx - movl 8(%ebx), %edx - sbbl 8(%ebp), %edx - movl %edx, 4(%esp) # 4-byte Spill - sbbl 12(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 16(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %edx - sbbl 20(%ebp), %edx - movl %edx, (%esp) # 4-byte Spill - movl %edx, %ebp - sarl $31, %ebp - movl %ebp, %ecx - addl %ecx, %ecx - movl %ebp, %eax - adcl %eax, %eax - shrl $31, %edx - orl %ecx, %edx - movl 56(%esp), %ebx - andl 4(%ebx), %eax - andl (%ebx), %edx - movl 20(%ebx), %edi - andl %ebp, %edi - movl 16(%ebx), %esi - andl %ebp, %esi - movl 12(%ebx), %ecx - andl %ebp, %ecx - andl 8(%ebx), %ebp - addl 8(%esp), %edx # 4-byte Folded Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 44(%esp), %ebx - movl %edx, (%ebx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %eax, 4(%ebx) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 8(%ebx) - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %ecx, 12(%ebx) - movl %esi, 16(%ebx) - adcl (%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%ebx) - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end92: - .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2 - - .globl mcl_fpDbl_add6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add6Lbmi2,@function -mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %edx - movl 60(%esp), %ecx - movl 12(%ecx), %esi - movl 16(%ecx), %eax - movl 8(%edx), %edi - movl (%edx), %ebx - addl (%ecx), %ebx - movl 56(%esp), %ebp - movl %ebx, (%ebp) - movl 4(%edx), %ebx - adcl 4(%ecx), %ebx - adcl 8(%ecx), %edi - adcl 12(%edx), %esi - adcl 16(%edx), %eax - movl %ebx, 4(%ebp) - movl %edx, %ebx - movl 32(%ebx), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edi, 8(%ebp) - movl 20(%ebx), %edi - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - adcl %edi, %esi - movl 24(%ebx), %edi - movl %eax, 16(%ebp) - movl 24(%ecx), %edx - adcl %edi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 28(%ebx), %edi - movl %esi, 20(%ebp) - movl 28(%ecx), %eax - adcl %edi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - movl 36(%ebx), %esi - movl %ebx, %edi - movl 36(%ecx), %ebx - adcl %esi, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 40(%edi), %esi - movl 40(%ecx), %edi - adcl %esi, %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 64(%esp), %esi - movl 44(%esi), %esi - movl 44(%ecx), %ecx - adcl %esi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 68(%esp), %esi - subl (%esi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 68(%esp), %edx - sbbl 4(%edx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 8(%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl %ebx, %ebp - sbbl 12(%edx), %ebp - movl %edi, %ebx - movl 12(%esp), %edi # 4-byte Reload - sbbl 16(%edx), %ebx - movl %edi, %eax - sbbl 20(%edx), %eax - sbbl $0, %ecx - andl $1, %ecx - jne .LBB93_2 -# BB#1: - movl %eax, %edi -.LBB93_2: - testb %cl, %cl - movl 20(%esp), %ecx # 4-byte Reload - movl 16(%esp), %edx # 4-byte Reload - jne .LBB93_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %ecx # 4-byte Reload -.LBB93_4: - movl 56(%esp), %eax - movl %ecx, 24(%eax) - movl %edx, 28(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl 24(%esp), %edx # 4-byte Reload - jne .LBB93_6 -# BB#5: - movl 8(%esp), %edx # 4-byte Reload -.LBB93_6: - movl %edx, 32(%eax) - movl 28(%esp), %edx # 4-byte Reload - jne .LBB93_8 -# BB#7: - movl %ebp, %edx -.LBB93_8: - movl %edx, 36(%eax) - jne .LBB93_10 -# BB#9: - movl %ebx, %ecx -.LBB93_10: - movl %ecx, 40(%eax) - movl %edi, 44(%eax) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end93: - .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2 - - .globl mcl_fpDbl_sub6Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub6Lbmi2,@function -mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %edi - movl 52(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %edi - movl 8(%edx), %ebx - sbbl 8(%esi), %ebx - movl 44(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%esi), %eax - movl %edi, 4(%ecx) - movl 16(%edx), %edi - sbbl 16(%esi), %edi - movl %ebx, 8(%ecx) - movl 20(%esi), %ebx - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %ebx, %eax - movl 24(%esi), %ebx - movl %edi, 16(%ecx) - movl 24(%edx), %edi - sbbl %ebx, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 28(%esi), %edi - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %edi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 32(%esi), %edi - movl 32(%edx), %eax - sbbl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 36(%esi), %edi - movl 36(%edx), %eax - sbbl %edi, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 40(%esi), %edi - movl 40(%edx), %eax - sbbl %edi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 44(%esi), %esi - movl 44(%edx), %eax - sbbl %esi, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl $0, %ebx - sbbl $0, %ebx - andl $1, %ebx - movl 56(%esp), %eax - jne .LBB94_1 -# BB#2: - xorl %edx, %edx - jmp .LBB94_3 -.LBB94_1: - movl 20(%eax), %edx -.LBB94_3: - testb %bl, %bl - jne .LBB94_4 -# BB#5: - movl $0, %esi - movl $0, %edi - jmp .LBB94_6 -.LBB94_4: - movl (%eax), %edi - movl 4(%eax), %esi -.LBB94_6: - jne .LBB94_7 -# BB#8: - movl $0, %ebx - jmp .LBB94_9 -.LBB94_7: - movl 16(%eax), %ebx -.LBB94_9: - jne .LBB94_10 -# BB#11: - movl $0, %ebp - jmp .LBB94_12 -.LBB94_10: - movl 12(%eax), %ebp -.LBB94_12: - jne .LBB94_13 -# BB#14: - xorl %eax, %eax - jmp .LBB94_15 -.LBB94_13: - movl 8(%eax), %eax -.LBB94_15: - addl 8(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - movl %edi, 24(%ecx) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %esi, 28(%ecx) - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %eax, 32(%ecx) - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 36(%ecx) - movl %ebx, 40(%ecx) - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%ecx) - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end94: - .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2 - - .globl mcl_fp_mulUnitPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre7Lbmi2,@function -mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 44(%esp), %edx - movl 40(%esp), %edi - mulxl 4(%edi), %ecx, %esi - mulxl (%edi), %ebx, %eax - movl %ebx, 12(%esp) # 4-byte Spill - addl %ecx, %eax - movl %eax, 8(%esp) # 4-byte Spill - mulxl 8(%edi), %ecx, %eax - adcl %esi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - mulxl 12(%edi), %ebx, %ecx - adcl %eax, %ebx - mulxl 16(%edi), %esi, %ebp - adcl %ecx, %esi - mulxl 20(%edi), %ecx, %eax - movl %eax, (%esp) # 4-byte Spill - adcl %ebp, %ecx - mulxl 24(%edi), %edx, %edi - movl 36(%esp), %eax - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, (%eax) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - movl 4(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl %ebx, 12(%eax) - movl %esi, 16(%eax) - movl %ecx, 20(%eax) - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%eax) - adcl $0, %edi - movl %edi, 28(%eax) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end95: - .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2 - - .globl mcl_fpDbl_mulPre7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre7Lbmi2,@function -mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 96(%esp), %eax - movl (%eax), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, %edi - movl 100(%esp), %eax - movl (%eax), %ebp - mulxl %ebp, %ecx, %eax - movl %esi, %edx - mulxl %ebp, %edx, %esi - movl %edx, 40(%esp) # 4-byte Spill - addl %ecx, %esi - movl 8(%edi), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %edi, %ebx - mulxl %ebp, %edi, %ecx - adcl %eax, %edi - movl 12(%ebx), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mulxl %ebp, %ebx, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl %ecx, %ebx - movl 16(%eax), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %ebp, %ecx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl 20(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %ebp, %edx, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl 24(%eax), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %ebp, %eax, %edx - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, (%eax) - adcl $0, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 4(%eax), %eax - movl 68(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ebp - movl %ebp, 40(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - mulxl %eax, %esi, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %edi, %esi - movl 60(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl %ebx, %edi - movl 56(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl %ecx, %ebx - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - adcl 28(%esp), %ecx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 40(%esp), %esi # 4-byte Folded Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - adcl 60(%esp), %ebx # 4-byte Folded Reload - adcl 56(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl %ebp, 64(%esp) # 4-byte Folded Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - movl %edx, 4(%eax) - movl 96(%esp), %ecx - movl (%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 8(%eax), %eax - mulxl %eax, %edx, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 4(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %eax, %edx, %esi - movl %esi, 16(%esp) # 4-byte Spill - adcl %edi, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 8(%ecx), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %eax, %esi, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %ebx, %esi - movl %esi, %edi - movl 12(%ecx), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %eax, %edx, %esi - movl %esi, 8(%esp) # 4-byte Spill - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl 16(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %eax, %ebx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl 20(%ecx), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %eax, %ebp, %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl 24(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - mulxl %eax, %ecx, %edx - adcl 24(%esp), %ecx # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - movl 20(%esp), %eax # 4-byte Reload - addl %eax, 68(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 60(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - adcl %edx, %esi - movl 92(%esp), %eax - movl 32(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - movl 100(%esp), %eax - movl 12(%eax), %eax - movl 56(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 32(%esp) # 4-byte Spill - addl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl %ebx, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %ebp, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl %ecx, %ebp - movl 28(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %eax - adcl %esi, %ecx - movl %ecx, %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl 32(%esp), %esi # 4-byte Reload - addl %esi, 64(%esp) # 4-byte Folded Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 48(%esp), %esi # 4-byte Reload - adcl %esi, 68(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl %eax, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 96(%esp), %ecx - movl (%ecx), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 16(%eax), %esi - mulxl %esi, %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - addl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 4(%ecx), %edx - movl %edx, 64(%esp) # 4-byte Spill - mulxl %esi, %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, %edi - movl 8(%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - mulxl %esi, %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %esi, %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl %ebx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 16(%ecx), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %esi, %ebx, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl 20(%ecx), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %esi, %edx, %eax - movl %eax, 4(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - movl 24(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %esi, %ebp, %ecx - movl %ecx, (%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - movl %edi, %esi - addl 28(%esp), %esi # 4-byte Folded Reload - movl 12(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl 20(%esp), %edx # 4-byte Reload - adcl %edx, 68(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl (%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 36(%esp), %edx # 4-byte Reload - movl %edx, 16(%eax) - movl 100(%esp), %eax - movl 20(%eax), %eax - movl 60(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 36(%esp) # 4-byte Spill - addl %esi, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - mulxl %eax, %esi, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl %edi, %esi - movl 56(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %ebx, %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, %edi - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl %ebp, %edx - movl %edx, %ebp - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - sbbl %edx, %edx - andl $1, %edx - addl 36(%esp), %esi # 4-byte Folded Reload - movl 20(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - adcl 56(%esp), %ebx # 4-byte Folded Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 60(%esp), %edx # 4-byte Reload - movl %edx, 20(%eax) - movl 100(%esp), %eax - movl 24(%eax), %edx - movl 96(%esp), %eax - mulxl (%eax), %ebp, %edi - movl %edi, 60(%esp) # 4-byte Spill - addl %esi, %ebp - movl %ebp, 64(%esp) # 4-byte Spill - mulxl 4(%eax), %esi, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl %ecx, %esi - movl %esi, %ebp - mulxl 8(%eax), %ecx, %esi - movl %esi, 44(%esp) # 4-byte Spill - adcl %ebx, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - mulxl 12(%eax), %ebx, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - mulxl 16(%eax), %edi, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - mulxl 20(%eax), %esi, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - mulxl 24(%eax), %edx, %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - addl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 68(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl 92(%esp), %eax - movl 64(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl %ebx, 36(%eax) - movl %edi, 40(%eax) - movl %esi, 44(%eax) - movl %edx, 48(%eax) - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%eax) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end96: - .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2 - - .globl mcl_fpDbl_sqrPre7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre7Lbmi2,@function -mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 104(%esp), %ecx - movl (%ecx), %ebx - movl 4(%ecx), %eax - movl %eax, %edx - mulxl %ebx, %esi, %edi - movl %esi, 56(%esp) # 4-byte Spill - movl %edi, 76(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl %ebx, %ebp, %edx - movl %ebp, 44(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 8(%ecx), %edx - movl %edx, 68(%esp) # 4-byte Spill - mulxl %ebx, %edx, %esi - adcl %edi, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 12(%ecx), %edx - movl %edx, 64(%esp) # 4-byte Spill - mulxl %ebx, %edi, %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl %esi, %edi - movl 16(%ecx), %edx - movl %edx, 60(%esp) # 4-byte Spill - mulxl %ebx, %esi, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl 20(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %ebx, %edx, %ebp - movl %ebp, 36(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl 24(%ecx), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %ebx, %ecx, %ebx - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - movl 100(%esp), %ecx - movl 44(%esp), %edx # 4-byte Reload - movl %edx, (%ecx) - adcl $0, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - addl %edx, 72(%esp) # 4-byte Folded Spill - movl %eax, %edx - mulxl %eax, %ebx, %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - mulxl %eax, %ebp, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl %edi, %ebp - movl 64(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl %esi, %edi - movl 60(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, %esi - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %ecx, %eax - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %edx - sbbl %ecx, %ecx - andl $1, %ecx - addl 76(%esp), %ebx # 4-byte Folded Reload - adcl 56(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl %esi, 68(%esp) # 4-byte Folded Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 72(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 104(%esp), %esi - movl (%esi), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 8(%esi), %ecx - mulxl %ecx, %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 4(%esi), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %esi, %ebx - mulxl %ecx, %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %edx - mulxl %ecx, %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 12(%ebx), %eax - movl %eax, %edx - mulxl %ecx, %edi, %edx - movl %edi, 32(%esp) # 4-byte Spill - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl %edi, %edx - movl %edx, %esi - movl 16(%ebx), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %ecx, %edx, %edi - movl %edi, 76(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, %edi - movl 20(%ebx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %ebx, %ebp - mulxl %ecx, %ebx, %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl 24(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - sbbl %ebp, %ebp - andl $1, %ebp - movl 24(%esp), %edx # 4-byte Reload - addl %edx, 64(%esp) # 4-byte Folded Spill - movl 20(%esp), %edx # 4-byte Reload - adcl %edx, 60(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 72(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 76(%esp), %ebx # 4-byte Folded Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 52(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 52(%esp) # 4-byte Spill - addl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl %eax, %edx - mulxl %eax, %edx, %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - adcl %ebx, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - mulxl %eax, %edx, %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl %ecx, %edx - movl %edx, %esi - movl 36(%esp), %edx # 4-byte Reload - mulxl %eax, %edi, %eax - adcl %ebp, %edi - movl %edi, %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl 52(%esp), %edi # 4-byte Reload - addl %edi, 68(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - movl 28(%esp), %ebx # 4-byte Reload - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl 24(%esp), %ebp # 4-byte Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - adcl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 64(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 104(%esp), %ecx - movl (%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 16(%ecx), %eax - mulxl %eax, %edx, %esi - movl %esi, 24(%esp) # 4-byte Spill - addl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl 4(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %eax, %edx, %esi - movl %esi, 20(%esp) # 4-byte Spill - adcl %edi, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 8(%ecx), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %eax, %edx, %esi - movl %esi, 16(%esp) # 4-byte Spill - adcl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 12(%ecx), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %ecx, %esi - mulxl %eax, %ecx, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl %ebp, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %eax, %edx - mulxl %eax, %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl 20(%esi), %ecx - movl %ecx, %edx - mulxl %eax, %edx, %ebp - movl %edx, 32(%esp) # 4-byte Spill - movl %ebp, 36(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl %edx, %edi - movl 24(%esi), %edx - movl %edx, 40(%esp) # 4-byte Spill - mulxl %eax, %esi, %eax - movl %eax, (%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - sbbl %ebx, %ebx - andl $1, %ebx - movl 12(%esp), %eax # 4-byte Reload - addl 24(%esp), %eax # 4-byte Folded Reload - movl 20(%esp), %edx # 4-byte Reload - adcl %edx, 72(%esp) # 4-byte Folded Spill - movl 16(%esp), %edx # 4-byte Reload - adcl %edx, 64(%esp) # 4-byte Folded Spill - movl 8(%esp), %edx # 4-byte Reload - adcl %edx, 68(%esp) # 4-byte Folded Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 76(%esp) # 4-byte Spill - adcl %ebp, %esi - adcl (%esp), %ebx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - mulxl %ecx, %edx, %edi - movl %edi, 28(%esp) # 4-byte Spill - addl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - mulxl %ecx, %ebp, %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 72(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - mulxl %ecx, %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - mulxl %ecx, %edi, %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 76(%esp) # 4-byte Folded Spill - movl %ecx, %edx - mulxl %ecx, %edx, %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl %esi, %edx - movl %edx, %eax - movl 40(%esp), %edx # 4-byte Reload - mulxl %ecx, %ecx, %edx - adcl %ebx, %ecx - movl %ecx, %ebx - sbbl %ecx, %ecx - andl $1, %ecx - addl 28(%esp), %ebp # 4-byte Folded Reload - movl 52(%esp), %esi # 4-byte Reload - adcl %esi, 72(%esp) # 4-byte Folded Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl 76(%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - adcl %edx, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%eax) - movl 104(%esp), %eax - movl 24(%eax), %edx - mulxl (%eax), %ecx, %ebx - movl %ebx, 64(%esp) # 4-byte Spill - addl %ebp, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - mulxl 4(%eax), %ecx, %ebx - movl %ebx, 60(%esp) # 4-byte Spill - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - mulxl 8(%eax), %ecx, %ebx - movl %ebx, 72(%esp) # 4-byte Spill - adcl %edi, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - mulxl 12(%eax), %ebx, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl %esi, %ebx - mulxl 16(%eax), %edi, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - mulxl 20(%eax), %esi, %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - mulxl %edx, %edx, %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - addl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl %eax, 76(%esp) # 4-byte Folded Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl 100(%esp), %eax - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 64(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl %ebx, 36(%eax) - movl %edi, 40(%eax) - movl %esi, 44(%eax) - movl %edx, 48(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%eax) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2 - - .globl mcl_fp_mont7Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont7Lbmi2,@function -mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 140(%esp), %eax - movl 24(%eax), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 144(%esp), %ecx - movl (%ecx), %ecx - mulxl %ecx, %edx, %esi - movl %edx, 112(%esp) # 4-byte Spill - movl 20(%eax), %edx - movl %edx, 84(%esp) # 4-byte Spill - mulxl %ecx, %edi, %edx - movl %edi, 108(%esp) # 4-byte Spill - movl %edx, 52(%esp) # 4-byte Spill - movl 16(%eax), %edx - movl %edx, 80(%esp) # 4-byte Spill - mulxl %ecx, %edx, %ebx - movl %edx, 104(%esp) # 4-byte Spill - movl 8(%eax), %edx - movl %edx, 68(%esp) # 4-byte Spill - mulxl %ecx, %edi, %edx - movl %edi, 96(%esp) # 4-byte Spill - movl %edx, 100(%esp) # 4-byte Spill - movl (%eax), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 56(%esp) # 4-byte Spill - mulxl %ecx, %edi, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ebp, %edx - mulxl %ecx, %ebp, %edx - addl %edi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl %edx, 48(%esp) # 4-byte Folded Spill - movl 12(%eax), %edx - movl %edx, 64(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %eax - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl 108(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 148(%esp), %ebx - movl -4(%ebx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ebp, %edx - imull %eax, %edx - movl (%ebx), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 4(%ebx), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - mulxl %ecx, %esi, %ecx - mulxl %edi, %edi, %eax - movl %edi, 8(%esp) # 4-byte Spill - addl %esi, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%ebx), %esi - movl %esi, 104(%esp) # 4-byte Spill - mulxl %esi, %eax, %esi - adcl %ecx, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ecx - adcl %esi, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 16(%ebx), %esi - movl %esi, 96(%esp) # 4-byte Spill - mulxl %esi, %eax, %esi - adcl %ecx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%ebx), %eax - movl %eax, 92(%esp) # 4-byte Spill - mulxl %eax, %eax, %edi - adcl %esi, %eax - movl %eax, %ecx - movl 24(%ebx), %eax - movl %eax, 88(%esp) # 4-byte Spill - mulxl %eax, %edx, %eax - adcl %edi, %edx - adcl $0, %eax - addl %ebp, 8(%esp) # 4-byte Folded Spill - movl 44(%esp), %esi # 4-byte Reload - adcl %esi, 40(%esp) # 4-byte Folded Spill - movl 48(%esp), %esi # 4-byte Reload - adcl %esi, 32(%esp) # 4-byte Folded Spill - movl 12(%esp), %esi # 4-byte Reload - adcl %esi, 28(%esp) # 4-byte Folded Spill - movl 16(%esp), %esi # 4-byte Reload - adcl %esi, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, (%esp) # 4-byte Spill - movl 144(%esp), %edx - movl 4(%edx), %edx - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - mulxl 84(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload - mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - addl %ebx, %edi - movl %edi, 52(%esp) # 4-byte Spill - mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload - adcl %esi, %edi - movl %edi, 48(%esp) # 4-byte Spill - mulxl 64(%esp), %ebp, %ebx # 4-byte Folded Reload - adcl %eax, %ebp - mulxl 80(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebx, %esi - adcl %ecx, %eax - movl %eax, %ecx - movl 44(%esp), %ebx # 4-byte Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl 20(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 36(%esp), %eax # 4-byte Reload - addl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, %ebx - sbbl %ecx, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl %eax, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, %edi - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, 28(%esp) # 4-byte Spill - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %esi, %ecx - movl %ecx, %esi - mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload - adcl %eax, %ebp - adcl $0, %ecx - movl %ecx, %edx - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 8(%esp), %ecx # 4-byte Reload - addl 36(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl %ecx, 32(%esp) # 4-byte Folded Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 20(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - movl 12(%esp), %ecx # 4-byte Reload - adcl %ecx, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl 8(%eax), %edx - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - addl %ecx, %edi - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %esi - movl 48(%esp), %edx # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 44(%esp), %ecx # 4-byte Reload - addl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl %edi, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl %ebp, %esi - movl %esi, 12(%esp) # 4-byte Spill - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 28(%esp) # 4-byte Spill - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %edi - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %esi, %ecx - movl %ecx, %esi - mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload - adcl %eax, %ebp - adcl $0, %ecx - movl %ecx, %edx - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 8(%esp), %ecx # 4-byte Reload - addl 44(%esp), %ecx # 4-byte Folded Reload - movl 16(%esp), %ecx # 4-byte Reload - adcl %ecx, 32(%esp) # 4-byte Folded Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl %ebx, 20(%esp) # 4-byte Folded Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl 12(%eax), %edx - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - addl %ecx, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %esi - movl 44(%esp), %edx # 4-byte Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 36(%esp), %ecx # 4-byte Reload - addl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl (%esp), %ebx # 4-byte Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl %ebp, %esi - movl %esi, 12(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 28(%esp) # 4-byte Spill - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %edi - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %esi, %ecx - movl %ecx, %esi - mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload - adcl %eax, %ebp - adcl $0, %ecx - movl %ecx, %edx - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 8(%esp), %ecx # 4-byte Reload - addl 36(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl %ecx, 32(%esp) # 4-byte Folded Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl %ebx, 20(%esp) # 4-byte Folded Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl 16(%eax), %edx - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - addl %ecx, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %esi, %ecx - movl %ecx, %esi - movl 44(%esp), %edx # 4-byte Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 36(%esp), %ecx # 4-byte Reload - addl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl (%esp), %ebx # 4-byte Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - adcl %ebp, %esi - movl %esi, 12(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 28(%esp) # 4-byte Spill - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %edi - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %esi, %ecx - movl %ecx, %esi - mulxl 88(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - movl %edx, %ebp - adcl $0, %ecx - movl %ecx, %edx - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 8(%esp), %ecx # 4-byte Reload - addl 36(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl %ecx, 32(%esp) # 4-byte Folded Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl %ebx, 20(%esp) # 4-byte Folded Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl 20(%eax), %edx - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - addl %ecx, %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %ebp, %ecx - movl %ecx, %ebp - mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %eax, %ebx - adcl %esi, %ecx - movl %ecx, %esi - movl 48(%esp), %edx # 4-byte Reload - adcl (%esp), %edx # 4-byte Folded Reload - movl 4(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 36(%esp), %ecx # 4-byte Reload - addl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - adcl %edi, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - sbbl %eax, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 40(%esp) # 4-byte Spill - mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload - adcl %eax, %esi - movl %esi, 32(%esp) # 4-byte Spill - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, %edi - mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - mulxl 92(%esp), %eax, %ebx # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, %esi - mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %ebx, %ecx - movl %ecx, %ebx - adcl $0, %eax - movl %eax, %ecx - movl 44(%esp), %edx # 4-byte Reload - andl $1, %edx - movl 8(%esp), %eax # 4-byte Reload - addl 36(%esp), %eax # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl %ebp, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 144(%esp), %edx - movl 24(%edx), %edx - mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - addl %ebx, %eax - movl %eax, 56(%esp) # 4-byte Spill - mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload - adcl %esi, %ebp - mulxl 64(%esp), %eax, %esi # 4-byte Folded Reload - adcl %edi, %eax - movl %eax, 68(%esp) # 4-byte Spill - mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload - movl %edi, 84(%esp) # 4-byte Spill - mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload - adcl %esi, %ebx - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, %esi - adcl %ecx, %eax - movl %eax, %ecx - movl 72(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 60(%esp), %edi # 4-byte Reload - addl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl %eax, 68(%esp) # 4-byte Folded Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - imull %edi, %edx - mulxl 108(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 76(%esp) # 4-byte Spill - mulxl 112(%esp), %ecx, %esi # 4-byte Folded Reload - addl %eax, %ecx - movl %ecx, 80(%esp) # 4-byte Spill - mulxl 104(%esp), %eax, %edi # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, 84(%esp) # 4-byte Spill - mulxl 100(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %edi, %ecx - movl %edx, %edi - mulxl 96(%esp), %ebx, %ebp # 4-byte Folded Reload - adcl %eax, %ebx - mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebp, %esi - movl %edi, %edx - mulxl 88(%esp), %edi, %ebp # 4-byte Folded Reload - adcl %eax, %edi - adcl $0, %ebp - andl $1, 64(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - addl 60(%esp), %eax # 4-byte Folded Reload - movl 80(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 76(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 68(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - adcl 72(%esp), %ebp # 4-byte Folded Reload - movl 64(%esp), %eax # 4-byte Reload - adcl $0, %eax - subl 108(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - sbbl 112(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - sbbl 104(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - sbbl 100(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 100(%esp) # 4-byte Spill - movl %esi, %ebx - sbbl 96(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - movl %edi, %ebx - sbbl 92(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 108(%esp) # 4-byte Spill - movl %ebp, %ebx - sbbl 88(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 112(%esp) # 4-byte Spill - sbbl $0, %eax - andl $1, %eax - movl %eax, %ecx - jne .LBB98_2 -# BB#1: - movl 60(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill -.LBB98_2: - movl 136(%esp), %ebx - movl 80(%esp), %edx # 4-byte Reload - movl %edx, (%ebx) - movl %ebx, %edx - testb %cl, %cl - movl 84(%esp), %ebx # 4-byte Reload - jne .LBB98_4 -# BB#3: - movl 64(%esp), %ebx # 4-byte Reload -.LBB98_4: - movl %ebx, 4(%edx) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB98_6 -# BB#5: - movl 72(%esp), %ecx # 4-byte Reload -.LBB98_6: - movl %ecx, 8(%edx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB98_8 -# BB#7: - movl 100(%esp), %eax # 4-byte Reload -.LBB98_8: - movl %eax, 12(%edx) - jne .LBB98_10 -# BB#9: - movl 104(%esp), %esi # 4-byte Reload -.LBB98_10: - movl %esi, 16(%edx) - jne .LBB98_12 -# BB#11: - movl 108(%esp), %edi # 4-byte Reload -.LBB98_12: - movl %edi, 20(%edx) - jne .LBB98_14 -# BB#13: - movl 112(%esp), %ebp # 4-byte Reload -.LBB98_14: - movl %ebp, 24(%edx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end98: - .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2 - - .globl mcl_fp_montNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF7Lbmi2,@function -mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl 128(%esp), %eax - movl (%eax), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 132(%esp), %ecx - movl (%ecx), %ebp - mulxl %ebp, %ecx, %esi - movl %edi, %edx - mulxl %ebp, %edi, %edx - movl %edi, 96(%esp) # 4-byte Spill - addl %ecx, %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 8(%eax), %edx - movl %edx, 60(%esp) # 4-byte Spill - mulxl %ebp, %ecx, %edi - adcl %esi, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl %edx, 56(%esp) # 4-byte Spill - mulxl %ebp, %ecx, %ebx - adcl %edi, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 16(%eax), %edx - movl %edx, 52(%esp) # 4-byte Spill - mulxl %ebp, %edx, %ecx - adcl %ebx, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 20(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - mulxl %ebp, %edx, %esi - adcl %ecx, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 24(%eax), %edx - movl %edx, 44(%esp) # 4-byte Spill - mulxl %ebp, %ebp, %eax - adcl %esi, %ebp - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 136(%esp), %edi - movl -4(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - movl %esi, %edx - imull %eax, %edx - movl (%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %eax - movl %eax, 28(%esp) # 4-byte Spill - addl %esi, %ecx - movl 4(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 8(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - mulxl %ecx, %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - mulxl %ecx, %esi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl 84(%esp), %esi # 4-byte Folded Reload - movl 16(%edi), %eax - movl %eax, 84(%esp) # 4-byte Spill - mulxl %eax, %eax, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - movl 20(%edi), %eax - movl %eax, 80(%esp) # 4-byte Spill - mulxl %eax, %eax, %ebx - movl %ebx, 8(%esp) # 4-byte Spill - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebx - movl 24(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - mulxl %eax, %edx, %eax - adcl %ebp, %edx - movl %edx, %edi - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - movl 28(%esp), %ebp # 4-byte Reload - addl %ebp, 36(%esp) # 4-byte Folded Spill - movl 24(%esp), %ebp # 4-byte Reload - adcl %ebp, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 4(%eax), %edx - mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload - mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %eax - movl %eax, 4(%esp) # 4-byte Spill - mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, %ecx - mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload - adcl %edi, %esi - mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, 28(%esp) # 4-byte Spill - mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload - adcl %ebp, %eax - movl %eax, %ebx - mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload - adcl %edi, %ebp - adcl $0, %eax - movl %eax, %edx - movl 8(%esp), %eax # 4-byte Reload - addl 36(%esp), %eax # 4-byte Folded Reload - movl 4(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 16(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %edx - movl %eax, %ebx - imull 72(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - addl %ebx, %eax - mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, 32(%esp) # 4-byte Spill - mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl %esi, %eax - movl %eax, %esi - mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebp, %eax - movl 40(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl 36(%esp), %ebp # 4-byte Reload - addl %ebp, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - movl 12(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl %edx, %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 8(%eax), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - addl %ecx, %edi - mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload - adcl %eax, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - mulxl 56(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebx, %esi - mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %eax, %ebx - mulxl 48(%esp), %eax, %ebp # 4-byte Folded Reload - movl %ebp, 4(%esp) # 4-byte Spill - adcl %ecx, %eax - movl %eax, %ebp - mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - movl 36(%esp), %ecx # 4-byte Reload - addl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl 8(%esp), %ecx # 4-byte Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - movl %ebx, %edx - imull 72(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %ebp # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - addl %ebx, %eax - mulxl 96(%esp), %ebp, %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - adcl %edi, %ebp - mulxl 92(%esp), %ebx, %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl %ecx, %ebx - mulxl 88(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl %esi, %edi - mulxl 84(%esp), %esi, %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, (%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 12(%eax), %edx - mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload - mulxl 68(%esp), %edi, %ecx # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - addl %esi, %ecx - mulxl 60(%esp), %esi, %edi # 4-byte Folded Reload - adcl %eax, %esi - mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload - adcl %edi, %eax - movl %eax, %edi - mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, 24(%esp) # 4-byte Spill - mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, %ebx - mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl $0, %eax - movl 32(%esp), %edx # 4-byte Reload - addl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - movl 12(%esp), %edx # 4-byte Reload - adcl %edx, 24(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - movl %edi, %edx - imull 72(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - addl %edi, %eax - mulxl 96(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl %esi, %eax - movl %eax, %esi - mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload - movl %ebx, (%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebp, %eax - movl 36(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - addl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - movl 12(%esp), %esi # 4-byte Reload - adcl %esi, 28(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - movl 4(%esp), %ecx # 4-byte Reload - adcl %ecx, 24(%esp) # 4-byte Folded Spill - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl %edx, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 16(%eax), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - addl %ecx, %edi - mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload - adcl %eax, %ecx - mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload - adcl %ebx, %eax - movl %eax, %ebx - mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, 40(%esp) # 4-byte Spill - mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, %esi - mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl $0, %eax - movl %eax, %edx - movl 32(%esp), %eax # 4-byte Reload - addl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, %edx - imull 72(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - addl %ebx, %eax - mulxl 96(%esp), %ebx, %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl %edi, %ebx - mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 84(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - mulxl 80(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload - adcl %ebp, %eax - movl 36(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - movl 8(%esp), %ecx # 4-byte Reload - adcl %ecx, 28(%esp) # 4-byte Folded Spill - movl 4(%esp), %ecx # 4-byte Reload - adcl %ecx, 24(%esp) # 4-byte Folded Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 20(%eax), %edx - mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 68(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - addl %ecx, %esi - mulxl 60(%esp), %ebp, %ecx # 4-byte Folded Reload - adcl %eax, %ebp - mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload - adcl %ecx, %eax - movl %eax, 40(%esp) # 4-byte Spill - mulxl 52(%esp), %ecx, %ebx # 4-byte Folded Reload - adcl %edi, %ecx - mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - adcl %ebx, %eax - movl %eax, %edi - mulxl 44(%esp), %ebx, %eax # 4-byte Folded Reload - adcl 4(%esp), %ebx # 4-byte Folded Reload - adcl $0, %eax - movl %eax, %edx - movl 32(%esp), %eax # 4-byte Reload - addl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 40(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, (%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, %edx - imull 72(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - addl %ecx, %eax - mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl %ebp, %esi - mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 84(%esp), %eax, %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - adcl (%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - mulxl 76(%esp), %edi, %edx # 4-byte Folded Reload - adcl %ebx, %edi - movl %edi, %ebx - movl 36(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 28(%esp), %ebp # 4-byte Reload - addl %ebp, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - adcl %edx, %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 24(%eax), %edx - mulxl 64(%esp), %edi, %ebx # 4-byte Folded Reload - mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - addl %edi, %ebp - mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %ebx, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload - adcl %eax, %ebx - mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload - adcl %ecx, %esi - mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, %ecx - mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload - adcl 60(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - movl 64(%esp), %edi # 4-byte Reload - addl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl 68(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - movl 64(%esp), %eax # 4-byte Reload - imull %eax, %edx - mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - addl %eax, %esi - mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %edx, %ecx - mulxl 92(%esp), %eax, %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl %edi, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %edx - mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl %ebx, %ebp - movl %ecx, %edx - mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %ecx, %edx - mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ecx, %edx - mulxl 76(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl 36(%esp), %edx # 4-byte Reload - addl 56(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl %edx, %ecx - subl 100(%esp), %ecx # 4-byte Folded Reload - sbbl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - sbbl 88(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - sbbl 84(%esp), %edi # 4-byte Folded Reload - movl %edi, 100(%esp) # 4-byte Spill - sbbl 80(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %edi # 4-byte Reload - sbbl 76(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - sarl $31, %eax - testl %eax, %eax - js .LBB99_2 -# BB#1: - movl %ecx, %edx -.LBB99_2: - movl 124(%esp), %esi - movl %edx, (%esi) - movl 72(%esp), %eax # 4-byte Reload - js .LBB99_4 -# BB#3: - movl 52(%esp), %eax # 4-byte Reload -.LBB99_4: - movl %eax, 4(%esi) - movl 68(%esp), %eax # 4-byte Reload - movl 64(%esp), %ecx # 4-byte Reload - movl 60(%esp), %edx # 4-byte Reload - js .LBB99_6 -# BB#5: - movl 92(%esp), %ebp # 4-byte Reload -.LBB99_6: - movl %ebp, 8(%esi) - movl %esi, %ebp - movl 56(%esp), %esi # 4-byte Reload - js .LBB99_8 -# BB#7: - movl 96(%esp), %esi # 4-byte Reload -.LBB99_8: - movl %esi, 12(%ebp) - js .LBB99_10 -# BB#9: - movl 100(%esp), %edx # 4-byte Reload -.LBB99_10: - movl %edx, 16(%ebp) - js .LBB99_12 -# BB#11: - movl %ebx, %ecx -.LBB99_12: - movl %ecx, 20(%ebp) - js .LBB99_14 -# BB#13: - movl %edi, %eax -.LBB99_14: - movl %eax, 24(%ebp) - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end99: - .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2 - - .globl mcl_fp_montRed7Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed7Lbmi2,@function -mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - movl 136(%esp), %edi - movl -4(%edi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl (%edi), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl (%eax), %edx - movl %edx, 72(%esp) # 4-byte Spill - imull %ecx, %edx - movl 24(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %ecx - movl %ebx, 68(%esp) # 4-byte Spill - movl %ecx, 44(%esp) # 4-byte Spill - movl 20(%edi), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %ecx - movl %ebx, 60(%esp) # 4-byte Spill - movl %ecx, 40(%esp) # 4-byte Spill - movl 16(%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - mulxl %ecx, %ebx, %ecx - movl %ebx, 56(%esp) # 4-byte Spill - movl %ecx, 36(%esp) # 4-byte Spill - movl 4(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - mulxl %ecx, %ecx, %ebp - mulxl %esi, %ebx, %esi - movl %ebx, 64(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 8(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - mulxl %ecx, %esi, %ecx - adcl %ebp, %esi - movl %esi, %ebp - movl 12(%edi), %esi - movl %esi, 84(%esp) # 4-byte Spill - mulxl %esi, %esi, %edx - adcl %ecx, %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, %edi - movl 36(%esp), %esi # 4-byte Reload - adcl 60(%esp), %esi # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl 44(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl 64(%esp), %ebx # 4-byte Reload - addl 72(%esp), %ebx # 4-byte Folded Reload - movl 28(%esp), %ebx # 4-byte Reload - adcl 4(%eax), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - adcl 8(%eax), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 12(%eax), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 16(%eax), %edi - movl %edi, 16(%esp) # 4-byte Spill - adcl 20(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 24(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 28(%eax), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%eax), %ecx - movl 48(%eax), %edx - movl 44(%eax), %esi - movl 40(%eax), %edi - movl 36(%eax), %ebp - movl 32(%eax), %eax - adcl $0, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 56(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 72(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 100(%esp), %eax, %ebx # 4-byte Folded Reload - movl %eax, 4(%esp) # 4-byte Spill - mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload - movl %eax, (%esp) # 4-byte Spill - mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload - mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - addl %ecx, %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl %ebp, %eax - movl %eax, 48(%esp) # 4-byte Spill - mulxl 84(%esp), %esi, %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - mulxl 104(%esp), %ecx, %edi # 4-byte Folded Reload - adcl %ebx, %ecx - mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload - adcl %edi, %ebx - adcl $0, %edx - movl 8(%esp), %eax # 4-byte Reload - addl 28(%esp), %eax # 4-byte Folded Reload - movl 32(%esp), %edi # 4-byte Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %edi, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - mulxl 84(%esp), %ebx, %ebp # 4-byte Folded Reload - mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload - mulxl 92(%esp), %ecx, %edi # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - addl %eax, %edi - movl %edi, 8(%esp) # 4-byte Spill - mulxl 88(%esp), %edi, %eax # 4-byte Folded Reload - adcl %esi, %edi - adcl %ebx, %eax - movl %eax, %ebx - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebp, %esi - mulxl 104(%esp), %edx, %ecx # 4-byte Folded Reload - adcl %eax, %edx - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 12(%esp), %ebp # 4-byte Reload - addl 32(%esp), %ebp # 4-byte Folded Reload - movl 8(%esp), %ebp # 4-byte Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ebp, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - mulxl 96(%esp), %eax, %ebx # 4-byte Folded Reload - mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - addl %eax, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebx, %esi - movl %esi, %ebx - adcl %edi, %eax - movl %eax, %edi - mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload - adcl %ecx, %edx - movl %edx, %ecx - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - movl 48(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %ebp, 20(%esp) # 4-byte Folded Spill - movl 16(%esp), %ebp # 4-byte Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ebp, %edx - movl %ebp, %edi - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - mulxl 84(%esp), %eax, %ebx # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload - mulxl 92(%esp), %esi, %ebp # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - addl %eax, %ebp - mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ecx, %esi - movl %esi, 44(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, %ecx - mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload - adcl %ebx, %esi - mulxl 104(%esp), %ebx, %edx # 4-byte Folded Reload - adcl %eax, %ebx - adcl 24(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edi, 20(%esp) # 4-byte Folded Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 40(%esp), %edi # 4-byte Reload - adcl %edi, 44(%esp) # 4-byte Folded Spill - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ebp, %edx - imull 76(%esp), %edx # 4-byte Folded Reload - mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - mulxl 84(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - mulxl 96(%esp), %ecx, %edi # 4-byte Folded Reload - mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - addl %ecx, %eax - movl %eax, 52(%esp) # 4-byte Spill - mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload - adcl %edi, %ecx - movl %ecx, %edi - adcl %ebx, %eax - movl %eax, %ebx - mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload - adcl %ecx, %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl %ebp, 20(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl %esi, 48(%esp) # 4-byte Folded Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl 76(%esp), %edx # 4-byte Reload - imull %eax, %edx - mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload - addl %ecx, %eax - movl %eax, 56(%esp) # 4-byte Spill - mulxl 88(%esp), %eax, %edi # 4-byte Folded Reload - adcl %esi, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %esi - mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload - adcl %edi, %ebp - mulxl 100(%esp), %ecx, %edi # 4-byte Folded Reload - adcl %eax, %ecx - mulxl 104(%esp), %ebx, %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - adcl %edi, %ebx - mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl $0, %eax - movl 64(%esp), %edx # 4-byte Reload - addl 52(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 64(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - adcl 60(%esp), %edi # 4-byte Folded Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl 68(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %edx, %ebp - subl 92(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 68(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - sbbl 96(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - sbbl 88(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 88(%esp) # 4-byte Spill - movl %edx, %ebp - sbbl 84(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 92(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 100(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%esp) # 4-byte Spill - movl %edi, %ecx - sbbl 104(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 104(%esp) # 4-byte Spill - movl %eax, %edx - movl %eax, %ecx - sbbl 80(%esp), %edx # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - sbbl $0, %esi - andl $1, %esi - jne .LBB100_2 -# BB#1: - movl 68(%esp), %ebp # 4-byte Reload -.LBB100_2: - movl 128(%esp), %edx - movl %ebp, (%edx) - movl %esi, %eax - testb %al, %al - movl 76(%esp), %ebp # 4-byte Reload - jne .LBB100_4 -# BB#3: - movl 72(%esp), %ebp # 4-byte Reload -.LBB100_4: - movl %ebp, 4(%edx) - movl %ecx, %eax - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB100_6 -# BB#5: - movl 88(%esp), %ecx # 4-byte Reload -.LBB100_6: - movl %ecx, 8(%edx) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB100_8 -# BB#7: - movl 92(%esp), %ecx # 4-byte Reload -.LBB100_8: - movl %ecx, 12(%edx) - jne .LBB100_10 -# BB#9: - movl 96(%esp), %ebx # 4-byte Reload -.LBB100_10: - movl %ebx, 16(%edx) - jne .LBB100_12 -# BB#11: - movl 104(%esp), %edi # 4-byte Reload -.LBB100_12: - movl %edi, 20(%edx) - jne .LBB100_14 -# BB#13: - movl 100(%esp), %eax # 4-byte Reload -.LBB100_14: - movl %eax, 24(%edx) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end100: - .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2 - - .globl mcl_fp_addPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre7Lbmi2,@function -mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl 12(%esi), %ecx - movl %edx, 4(%ebx) - movl 16(%esi), %edx - adcl 12(%eax), %ecx - adcl 16(%eax), %edx - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %ecx, 12(%ebx) - movl 20(%esi), %ecx - adcl %edi, %ecx - movl %edx, 16(%ebx) - movl %ecx, 20(%ebx) - movl 24(%eax), %eax - movl 24(%esi), %ecx - adcl %eax, %ecx - movl %ecx, 24(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end101: - .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2 - - .globl mcl_fp_subPre7Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre7Lbmi2,@function -mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl 12(%ecx), %edx - sbbl 12(%edi), %edx - movl %esi, 4(%ebp) - movl 16(%ecx), %esi - sbbl 16(%edi), %esi - movl %ebx, 8(%ebp) - movl 20(%edi), %ebx - movl %edx, 12(%ebp) - movl 20(%ecx), %edx - sbbl %ebx, %edx - movl %esi, 16(%ebp) - movl %edx, 20(%ebp) - movl 24(%edi), %edx - movl 24(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 24(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end102: - .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2 - - .globl mcl_fp_shr1_7Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_7Lbmi2,@function -mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 20(%esi) - shrl %eax - movl %eax, 24(%esi) - popl %esi - retl -.Lfunc_end103: - .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2 - - .globl mcl_fp_add7Lbmi2 - .align 16, 0x90 - .type mcl_fp_add7Lbmi2,@function -mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %ebp - movl (%ebp), %eax - movl 4(%ebp), %edi - movl 44(%esp), %ecx - addl (%ecx), %eax - adcl 4(%ecx), %edi - movl 8(%ebp), %esi - adcl 8(%ecx), %esi - movl 12(%ecx), %edx - movl 16(%ecx), %ebx - adcl 12(%ebp), %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl 16(%ebp), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl %ebp, %ebx - movl 20(%ecx), %ebp - adcl 20(%ebx), %ebp - movl 24(%ecx), %edx - adcl 24(%ebx), %edx - movl 40(%esp), %ecx - movl %eax, (%ecx) - movl %edi, 4(%ecx) - movl %esi, 8(%ecx) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%ecx) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%ecx) - movl %ebp, 20(%ecx) - movl %edx, 24(%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 52(%esp), %ecx - subl (%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 12(%esp), %ecx # 4-byte Reload - movl 52(%esp), %eax - sbbl 4(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %eax, %edi - sbbl 8(%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, %esi - sbbl 20(%edi), %ebp - sbbl 24(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB104_2 -# BB#1: # %nocarry - movl 8(%esp), %ecx # 4-byte Reload - movl 40(%esp), %eax - movl %eax, %ebx - movl %ecx, (%ebx) - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%ebx) - movl (%esp), %eax # 4-byte Reload - movl %eax, 8(%ebx) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebx) - movl %esi, 16(%ebx) - movl %ebp, 20(%ebx) - movl %edx, 24(%ebx) -.LBB104_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end104: - .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2 - - .globl mcl_fp_addNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF7Lbmi2,@function -mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 80(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 76(%esp), %esi - addl (%esi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %ebp - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl 12(%esi), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 36(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 44(%esp), %esi # 4-byte Reload - subl (%eax), %esi - movl %esi, (%esp) # 4-byte Spill - sbbl 4(%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 8(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%eax), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - sbbl 16(%eax), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 20(%eax), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - sbbl 24(%eax), %edi - movl %edi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - js .LBB105_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB105_2: - movl 72(%esp), %ecx - movl %esi, (%ecx) - movl 28(%esp), %eax # 4-byte Reload - js .LBB105_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB105_4: - movl %eax, 4(%ecx) - movl 48(%esp), %ebp # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - movl 36(%esp), %edx # 4-byte Reload - movl 32(%esp), %esi # 4-byte Reload - movl 24(%esp), %ebx # 4-byte Reload - js .LBB105_6 -# BB#5: - movl 8(%esp), %ebx # 4-byte Reload -.LBB105_6: - movl 72(%esp), %eax - movl %ebx, 8(%eax) - movl %eax, %ebx - js .LBB105_8 -# BB#7: - movl 16(%esp), %esi # 4-byte Reload -.LBB105_8: - movl %esi, 12(%ebx) - js .LBB105_10 -# BB#9: - movl 20(%esp), %edx # 4-byte Reload -.LBB105_10: - movl %edx, 16(%ebx) - js .LBB105_12 -# BB#11: - movl 12(%esp), %ecx # 4-byte Reload -.LBB105_12: - movl %ecx, 20(%ebx) - js .LBB105_14 -# BB#13: - movl %edi, %ebp -.LBB105_14: - movl %ebp, 24(%ebx) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end105: - .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2 - - .globl mcl_fp_sub7Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub7Lbmi2,@function -mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - xorl %ebx, %ebx - movl 52(%esp), %esi - subl (%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 4(%esi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edi), %edx - sbbl 8(%esi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 12(%edi), %ecx - sbbl 12(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 16(%edi), %eax - sbbl 16(%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%edi), %ebp - sbbl 20(%esi), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 24(%edi), %edi - sbbl 24(%esi), %edi - sbbl $0, %ebx - testb $1, %bl - movl 44(%esp), %ebx - movl 16(%esp), %esi # 4-byte Reload - movl %esi, (%ebx) - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 4(%ebx) - movl %edx, 8(%ebx) - movl %ecx, 12(%ebx) - movl %eax, 16(%ebx) - movl %ebp, 20(%ebx) - movl %edi, 24(%ebx) - je .LBB106_2 -# BB#1: # %carry - movl 56(%esp), %ebp - movl 16(%esp), %ecx # 4-byte Reload - addl (%ebp), %ecx - movl %ecx, (%ebx) - movl 20(%esp), %edx # 4-byte Reload - adcl 4(%ebp), %edx - movl %edx, 4(%ebx) - movl 4(%esp), %ecx # 4-byte Reload - adcl 8(%ebp), %ecx - movl 12(%ebp), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%ebp), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl %ecx, 16(%ebx) - movl 20(%ebp), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 24(%ebp), %eax - adcl %edi, %eax - movl %eax, 24(%ebx) -.LBB106_2: # %nocarry - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end106: - .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2 - - .globl mcl_fp_subNF7Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF7Lbmi2,@function -mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edx - movl 60(%esp), %ecx - subl (%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 24(%eax), %edx - movl 20(%eax), %esi - movl 16(%eax), %edi - movl 12(%eax), %ebx - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 12(%ecx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %esi - movl %esi, 28(%esp) # 4-byte Spill - sbbl 24(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edx, %ecx - sarl $31, %ecx - movl %ecx, %eax - shldl $1, %edx, %eax - movl 64(%esp), %edx - andl (%edx), %eax - movl 24(%edx), %esi - andl %ecx, %esi - movl %esi, (%esp) # 4-byte Spill - movl 20(%edx), %ebx - andl %ecx, %ebx - movl 16(%edx), %edi - andl %ecx, %edi - movl 12(%edx), %esi - andl %ecx, %esi - movl 64(%esp), %edx - movl 8(%edx), %edx - andl %ecx, %edx - movl 64(%esp), %ebp - andl 4(%ebp), %ecx - addl 20(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ebp - movl %eax, (%ebp) - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %ebp, %eax - movl %ecx, 4(%eax) - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %edx, 8(%eax) - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %esi, 12(%eax) - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %edi, 16(%eax) - movl %ebx, 20(%eax) - movl (%esp), %ecx # 4-byte Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%eax) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end107: - .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2 - - .globl mcl_fpDbl_add7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add7Lbmi2,@function -mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 72(%esp), %esi - movl 68(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %ecx - movl 8(%esi), %eax - movl (%esi), %ebx - addl (%edx), %ebx - movl 64(%esp), %ebp - movl %ebx, (%ebp) - movl 4(%esi), %ebx - adcl 4(%edx), %ebx - adcl 8(%edx), %eax - adcl 12(%esi), %edi - adcl 16(%esi), %ecx - movl %ebx, 4(%ebp) - movl %esi, %ebx - movl 36(%ebx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl %eax, 8(%ebp) - movl 20(%ebx), %eax - movl %edi, 12(%ebp) - movl 20(%edx), %edi - adcl %eax, %edi - movl 24(%ebx), %eax - movl %ecx, 16(%ebp) - movl 24(%edx), %ecx - adcl %eax, %ecx - movl 28(%ebx), %eax - movl %edi, 20(%ebp) - movl 28(%edx), %edi - adcl %eax, %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 32(%ebx), %eax - movl %ecx, 24(%ebp) - movl 32(%edx), %ecx - adcl %eax, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%edx), %esi - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 40(%ebx), %ecx - movl 40(%edx), %eax - adcl %ecx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%ebx), %ebp - movl 44(%edx), %ecx - adcl %ebp, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 48(%ebx), %ebp - movl %ebx, %eax - movl 48(%edx), %ebx - adcl %ebp, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 52(%eax), %eax - movl 52(%edx), %ebp - adcl %eax, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 76(%esp), %eax - subl (%eax), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - sbbl 4(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %esi, %eax - movl 76(%esp), %edi - sbbl 8(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 20(%edi), %ebx - sbbl 24(%edi), %ebp - sbbl $0, %edx - andl $1, %edx - jne .LBB108_2 -# BB#1: - movl %ebp, 32(%esp) # 4-byte Spill -.LBB108_2: - testb %dl, %dl - movl 20(%esp), %ecx # 4-byte Reload - jne .LBB108_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%esp), %ecx # 4-byte Reload -.LBB108_4: - movl 64(%esp), %eax - movl %ecx, 28(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl %esi, 36(%eax) - movl 24(%esp), %edx # 4-byte Reload - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB108_6 -# BB#5: - movl 12(%esp), %ecx # 4-byte Reload -.LBB108_6: - movl %ecx, 40(%eax) - movl 28(%esp), %ecx # 4-byte Reload - jne .LBB108_8 -# BB#7: - movl 16(%esp), %edx # 4-byte Reload -.LBB108_8: - movl %edx, 44(%eax) - jne .LBB108_10 -# BB#9: - movl %ebx, %ecx -.LBB108_10: - movl %ecx, 48(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end108: - .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2 - - .globl mcl_fpDbl_sub7Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub7Lbmi2,@function -mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %edx - movl 60(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %edx - movl 8(%esi), %ebx - sbbl 8(%edi), %ebx - movl 52(%esp), %ecx - movl %eax, (%ecx) - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %edx, 4(%ecx) - movl 16(%esi), %edx - sbbl 16(%edi), %edx - movl %ebx, 8(%ecx) - movl 20(%edi), %ebx - movl %eax, 12(%ecx) - movl 20(%esi), %eax - sbbl %ebx, %eax - movl 24(%edi), %ebx - movl %edx, 16(%ecx) - movl 24(%esi), %edx - sbbl %ebx, %edx - movl 28(%edi), %ebx - movl %eax, 20(%ecx) - movl 28(%esi), %eax - sbbl %ebx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%edi), %eax - movl %edx, 24(%ecx) - movl 32(%esi), %edx - sbbl %eax, %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 36(%edi), %eax - movl 36(%esi), %edx - sbbl %eax, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 40(%edi), %eax - movl 40(%esi), %edx - sbbl %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 44(%edi), %eax - movl 44(%esi), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%edi), %eax - movl 48(%esi), %edx - sbbl %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%edi), %eax - movl 52(%esi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 64(%esp), %esi - jne .LBB109_1 -# BB#2: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB109_3 -.LBB109_1: - movl 24(%esi), %edx - movl %edx, (%esp) # 4-byte Spill -.LBB109_3: - testb %al, %al - jne .LBB109_4 -# BB#5: - movl $0, %edi - movl $0, %eax - jmp .LBB109_6 -.LBB109_4: - movl (%esi), %eax - movl 4(%esi), %edi -.LBB109_6: - jne .LBB109_7 -# BB#8: - movl $0, %ebx - jmp .LBB109_9 -.LBB109_7: - movl 20(%esi), %ebx -.LBB109_9: - jne .LBB109_10 -# BB#11: - movl $0, %ebp - jmp .LBB109_12 -.LBB109_10: - movl 16(%esi), %ebp -.LBB109_12: - jne .LBB109_13 -# BB#14: - movl $0, %edx - jmp .LBB109_15 -.LBB109_13: - movl 12(%esi), %edx -.LBB109_15: - jne .LBB109_16 -# BB#17: - xorl %esi, %esi - jmp .LBB109_18 -.LBB109_16: - movl 8(%esi), %esi -.LBB109_18: - addl 12(%esp), %eax # 4-byte Folded Reload - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %eax, 28(%ecx) - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %edi, 32(%ecx) - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %esi, 36(%ecx) - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %edx, 40(%ecx) - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 44(%ecx) - movl %ebx, 48(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end109: - .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2 - - .align 16, 0x90 - .type .LmulPv256x32,@function -.LmulPv256x32: # @mulPv256x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl %edx, %eax - movl 40(%esp), %edx - mulxl 4(%eax), %edi, %esi - mulxl (%eax), %ebp, %ebx - movl %ebp, 16(%esp) # 4-byte Spill - addl %edi, %ebx - movl %ebx, 12(%esp) # 4-byte Spill - mulxl 8(%eax), %edi, %ebx - movl %ebx, 4(%esp) # 4-byte Spill - adcl %esi, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl %edx, %ebp - mulxl 12(%eax), %ebx, %esi - adcl 4(%esp), %ebx # 4-byte Folded Reload - mulxl 16(%eax), %edi, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %esi, %edi - movl %ebp, %edx - mulxl 20(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %ebp, %edx - mulxl 24(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl %ebx, 12(%ecx) - movl %edi, 16(%ecx) - movl %esi, 20(%ecx) - movl %edx, 24(%ecx) - movl 40(%esp), %edx - mulxl 28(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - adcl $0, %edx - movl %edx, 32(%ecx) - movl %ecx, %eax - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end110: - .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 - - .globl mcl_fp_mulUnitPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre8Lbmi2,@function -mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - calll .L111$pb -.L111$pb: - popl %ebx -.Ltmp2: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx - movl 88(%esp), %eax - movl %eax, (%esp) - leal 24(%esp), %ecx - movl 84(%esp), %edx - calll .LmulPv256x32 - movl 56(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 48(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 44(%esp), %esi - movl 40(%esp), %edi - movl 36(%esp), %ebx - movl 32(%esp), %ebp - movl 24(%esp), %edx - movl 28(%esp), %ecx - movl 80(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %ebp, 8(%eax) - movl %ebx, 12(%eax) - movl %edi, 16(%eax) - movl %esi, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end111: - .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2 - - .globl mcl_fpDbl_mulPre8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre8Lbmi2,@function -mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $156, %esp - calll .L112$pb -.L112$pb: - popl %ebx -.Ltmp3: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx - movl %ebx, -96(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4Lbmi2@PLT - leal 16(%esi), %eax - movl %eax, 8(%esp) - leal 16(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 32(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4Lbmi2@PLT - movl 24(%edi), %esi - movl (%edi), %ebx - movl 4(%edi), %eax - addl 16(%edi), %ebx - movl %ebx, -120(%ebp) # 4-byte Spill - adcl 20(%edi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - adcl 8(%edi), %esi - movl %esi, -108(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - addl 16(%edi), %eax - adcl 20(%edi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - movl 24(%edi), %edx - adcl 8(%edi), %edx - movl 28(%edi), %ecx - adcl 12(%edi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -128(%ebp) # 4-byte Spill - jb .LBB112_2 -# BB#1: - xorl %esi, %esi - xorl %ebx, %ebx -.LBB112_2: - movl %ebx, -112(%ebp) # 4-byte Spill - movl %esi, -104(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl 28(%esi), %edi - movl -80(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 12(%esi), %edi - movl %edi, -116(%ebp) # 4-byte Spill - movl %ecx, -84(%ebp) # 4-byte Spill - movl %edx, %edi - movl -124(%ebp), %ebx # 4-byte Reload - movl %ebx, -80(%ebp) # 4-byte Spill - movl %eax, -92(%ebp) # 4-byte Spill - jb .LBB112_4 -# BB#3: - movl $0, -84(%ebp) # 4-byte Folded Spill - movl $0, %edi - movl $0, -80(%ebp) # 4-byte Folded Spill - movl $0, -92(%ebp) # 4-byte Folded Spill -.LBB112_4: - movl %edi, -88(%ebp) # 4-byte Spill - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -60(%ebp) - movl -100(%ebp), %edi # 4-byte Reload - movl %edi, -56(%ebp) - movl -108(%ebp), %esi # 4-byte Reload - movl %esi, -52(%ebp) - movl %eax, -76(%ebp) - movl %ebx, -72(%ebp) - movl %edx, -68(%ebp) - movl %ecx, -64(%ebp) - sbbl %edx, %edx - movl -116(%ebp), %esi # 4-byte Reload - movl %esi, -48(%ebp) - movl -128(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB112_6 -# BB#5: - movl $0, %esi - movl $0, %edi -.LBB112_6: - sbbl %eax, %eax - leal -76(%ebp), %ecx - movl %ecx, 8(%esp) - leal -60(%ebp), %ecx - movl %ecx, 4(%esp) - leal -44(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl %edi, %eax - movl -92(%ebp), %edi # 4-byte Reload - addl -112(%ebp), %edi # 4-byte Folded Reload - adcl %eax, -80(%ebp) # 4-byte Folded Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl %eax, -88(%ebp) # 4-byte Folded Spill - adcl %esi, -84(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -92(%ebp) # 4-byte Spill - movl -96(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre4Lbmi2@PLT - addl -28(%ebp), %edi - movl -80(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl -88(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -88(%ebp) # 4-byte Spill - movl -84(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -84(%ebp) # 4-byte Spill - adcl %esi, -92(%ebp) # 4-byte Folded Spill - movl -44(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ebx - sbbl 4(%esi), %ebx - movl -36(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -32(%ebp), %edx - sbbl 12(%esi), %edx - movl 16(%esi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 20(%esi), %eax - movl %eax, -112(%ebp) # 4-byte Spill - sbbl %eax, -80(%ebp) # 4-byte Folded Spill - movl 24(%esi), %eax - movl %eax, -104(%ebp) # 4-byte Spill - sbbl %eax, -88(%ebp) # 4-byte Folded Spill - movl 28(%esi), %eax - movl %eax, -108(%ebp) # 4-byte Spill - sbbl %eax, -84(%ebp) # 4-byte Folded Spill - sbbl $0, -92(%ebp) # 4-byte Folded Spill - movl 32(%esi), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - subl %ecx, %eax - movl 36(%esi), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 40(%esi), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 44(%esi), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 48(%esi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 52(%esi), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - sbbl %ecx, -80(%ebp) # 4-byte Folded Spill - movl 56(%esi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - sbbl %ecx, -88(%ebp) # 4-byte Folded Spill - movl 60(%esi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, -84(%ebp) # 4-byte Folded Spill - sbbl $0, -92(%ebp) # 4-byte Folded Spill - addl -100(%ebp), %eax # 4-byte Folded Reload - adcl -112(%ebp), %ebx # 4-byte Folded Reload - movl %eax, 16(%esi) - movl -96(%ebp), %eax # 4-byte Reload - adcl -104(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 20(%esi) - adcl -108(%ebp), %edx # 4-byte Folded Reload - movl %eax, 24(%esi) - adcl -132(%ebp), %edi # 4-byte Folded Reload - movl %edx, 28(%esi) - movl -80(%ebp), %eax # 4-byte Reload - adcl -136(%ebp), %eax # 4-byte Folded Reload - movl %edi, 32(%esi) - movl -88(%ebp), %ecx # 4-byte Reload - adcl -128(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - movl -84(%ebp), %eax # 4-byte Reload - adcl -140(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl -92(%ebp), %ecx # 4-byte Reload - adcl -144(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 44(%esi) - movl %ecx, 48(%esi) - movl -116(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 52(%esi) - movl -120(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 56(%esi) - movl -124(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 60(%esi) - addl $156, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end112: - .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2 - - .globl mcl_fpDbl_sqrPre8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre8Lbmi2,@function -mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $156, %esp - calll .L113$pb -.L113$pb: - popl %ebx -.Ltmp4: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx - movl %ebx, -96(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre4Lbmi2@PLT - leal 16(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 32(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4Lbmi2@PLT - movl (%edi), %esi - movl 4(%edi), %ecx - addl 16(%edi), %esi - movl %esi, -108(%ebp) # 4-byte Spill - adcl 20(%edi), %ecx - seto %al - lahf - movl %eax, %edx - addl %esi, %esi - movl %esi, -84(%ebp) # 4-byte Spill - movl %ecx, %esi - adcl %esi, %esi - movl %esi, -80(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -88(%ebp) # 4-byte Spill - movl 24(%edi), %esi - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 8(%edi), %esi - movl 28(%edi), %edx - adcl 12(%edi), %edx - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -100(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -104(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %ebx - sbbl %edi, %edi - movl %edi, -92(%ebp) # 4-byte Spill - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB113_2 -# BB#1: - movl $0, -80(%ebp) # 4-byte Folded Spill - movl $0, -84(%ebp) # 4-byte Folded Spill -.LBB113_2: - movl %esi, %ebx - movl -88(%ebp), %edi # 4-byte Reload - movl %edi, %eax - addb $127, %al - sahf - adcl %ebx, %ebx - movl %edx, %edi - adcl %edi, %edi - movl -104(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB113_4 -# BB#3: - xorl %edi, %edi - xorl %ebx, %ebx -.LBB113_4: - movl %ebx, -88(%ebp) # 4-byte Spill - movl -108(%ebp), %eax # 4-byte Reload - movl %eax, -60(%ebp) - movl %ecx, -56(%ebp) - movl %esi, -52(%ebp) - movl %edx, -48(%ebp) - movl %eax, -76(%ebp) - movl %ecx, -72(%ebp) - movl %esi, -68(%ebp) - movl %edx, -64(%ebp) - movl -100(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB113_5 -# BB#6: - movl $0, -100(%ebp) # 4-byte Folded Spill - jmp .LBB113_7 -.LBB113_5: - shrl $31, %edx - movl %edx, -100(%ebp) # 4-byte Spill -.LBB113_7: - leal -76(%ebp), %eax - movl %eax, 8(%esp) - leal -60(%ebp), %eax - movl %eax, 4(%esp) - leal -44(%ebp), %eax - movl %eax, (%esp) - movl -92(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -96(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre4Lbmi2@PLT - movl -84(%ebp), %eax # 4-byte Reload - addl -28(%ebp), %eax - movl %eax, -84(%ebp) # 4-byte Spill - movl -80(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl -88(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -88(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -92(%ebp) # 4-byte Spill - adcl -100(%ebp), %esi # 4-byte Folded Reload - movl -44(%ebp), %eax - movl 8(%ebp), %edi - subl (%edi), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ebx - sbbl 4(%edi), %ebx - movl -36(%ebp), %eax - sbbl 8(%edi), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -32(%ebp), %edx - sbbl 12(%edi), %edx - movl 16(%edi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - sbbl %eax, -84(%ebp) # 4-byte Folded Spill - movl 20(%edi), %eax - movl %eax, -112(%ebp) # 4-byte Spill - sbbl %eax, -80(%ebp) # 4-byte Folded Spill - movl 24(%edi), %eax - movl %eax, -104(%ebp) # 4-byte Spill - sbbl %eax, -88(%ebp) # 4-byte Folded Spill - movl 28(%edi), %eax - movl %eax, -108(%ebp) # 4-byte Spill - sbbl %eax, -92(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 32(%edi), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - subl %ecx, %eax - movl 36(%edi), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 40(%edi), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 44(%edi), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 48(%edi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, -84(%ebp) # 4-byte Folded Spill - movl 52(%edi), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - sbbl %ecx, -80(%ebp) # 4-byte Folded Spill - movl 56(%edi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - sbbl %ecx, -88(%ebp) # 4-byte Folded Spill - movl 60(%edi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, -92(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -100(%ebp), %eax # 4-byte Folded Reload - adcl -112(%ebp), %ebx # 4-byte Folded Reload - movl %eax, 16(%edi) - movl -96(%ebp), %eax # 4-byte Reload - adcl -104(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 20(%edi) - adcl -108(%ebp), %edx # 4-byte Folded Reload - movl %eax, 24(%edi) - movl -84(%ebp), %eax # 4-byte Reload - adcl -132(%ebp), %eax # 4-byte Folded Reload - movl %edx, 28(%edi) - movl -80(%ebp), %ecx # 4-byte Reload - adcl -136(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edi) - movl -88(%ebp), %eax # 4-byte Reload - adcl -128(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edi) - movl -92(%ebp), %ecx # 4-byte Reload - adcl -140(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%edi) - adcl -144(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 44(%edi) - movl %esi, 48(%edi) - movl -116(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 52(%edi) - movl -120(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 56(%edi) - movl -124(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 60(%edi) - addl $156, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2 - - .globl mcl_fp_mont8Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont8Lbmi2,@function -mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $700, %esp # imm = 0x2BC - calll .L114$pb -.L114$pb: - popl %ebx -.Ltmp5: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx - movl 732(%esp), %eax - movl -4(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 664(%esp), %ebp - movl 668(%esp), %edi - movl %ebp, %eax - imull %esi, %eax - movl 696(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 684(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 676(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 672(%esp), %esi - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 624(%esp), %ebp - adcl 628(%esp), %edi - adcl 632(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - sbbl %eax, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 60(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 584(%esp), %edi - adcl 588(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 592(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 600(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 604(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 608(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 612(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 732(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - andl $1, %ebp - addl 544(%esp), %edi - adcl 548(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 568(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ebp - movl 728(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - addl 504(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 524(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 536(%esp), %ebp - sbbl %edi, %edi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %eax - andl $1, %eax - addl 464(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 480(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 488(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 496(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 724(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 424(%esp), %ecx - movl 28(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 436(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 444(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 448(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 384(%esp), %esi - adcl 388(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 392(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 404(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 408(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 28(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - adcl 348(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 352(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 364(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 372(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %ebp, %eax - andl $1, %eax - addl 304(%esp), %edi - movl 36(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 312(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 316(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 264(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 272(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %eax - andl $1, %eax - addl 224(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 228(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 232(%esp), %esi - adcl 236(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 240(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 244(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 40(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 256(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 184(%esp), %ecx - adcl 188(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 196(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 204(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %ecx - andl $1, %ecx - addl 144(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 152(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 172(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 176(%esp), %ebp - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 52(%esp), %ecx # 4-byte Reload - addl 104(%esp), %ecx - adcl 108(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 116(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 128(%esp), %edi - adcl 132(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 24(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - andl $1, %esi - addl 64(%esp), %ebp - movl 32(%esp), %ebx # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 76(%esp), %ebx - movl 44(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 88(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl $0, %esi - movl %eax, %edx - movl 732(%esp), %ebp - subl (%ebp), %edx - movl %ecx, %eax - sbbl 4(%ebp), %eax - movl %ebx, %ecx - sbbl 8(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - sbbl 20(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - sbbl $0, %esi - andl $1, %esi - movl %esi, %ecx - jne .LBB114_2 -# BB#1: - movl %edx, %ebp -.LBB114_2: - movl 720(%esp), %edx - movl %ebp, (%edx) - testb %cl, %cl - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB114_4 -# BB#3: - movl %eax, %ebp -.LBB114_4: - movl %ebp, 4(%edx) - jne .LBB114_6 -# BB#5: - movl 12(%esp), %ebx # 4-byte Reload -.LBB114_6: - movl %ebx, 8(%edx) - movl 28(%esp), %eax # 4-byte Reload - jne .LBB114_8 -# BB#7: - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%esp) # 4-byte Spill -.LBB114_8: - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edx) - movl 40(%esp), %edi # 4-byte Reload - jne .LBB114_10 -# BB#9: - movl 20(%esp), %edi # 4-byte Reload -.LBB114_10: - movl %edi, 16(%edx) - jne .LBB114_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB114_12: - movl %eax, 20(%edx) - movl 36(%esp), %eax # 4-byte Reload - jne .LBB114_14 -# BB#13: - movl 32(%esp), %eax # 4-byte Reload -.LBB114_14: - movl %eax, 24(%edx) - movl 48(%esp), %eax # 4-byte Reload - jne .LBB114_16 -# BB#15: - movl 52(%esp), %eax # 4-byte Reload -.LBB114_16: - movl %eax, 28(%edx) - addl $700, %esp # imm = 0x2BC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end114: - .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2 - - .globl mcl_fp_montNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF8Lbmi2,@function -mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $700, %esp # imm = 0x2BC - calll .L115$pb -.L115$pb: - popl %ebx -.Ltmp6: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx - movl 732(%esp), %eax - movl -4(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 664(%esp), %ebp - movl 668(%esp), %edi - movl %ebp, %eax - imull %esi, %eax - movl 696(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 684(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 676(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 672(%esp), %esi - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 624(%esp), %ebp - adcl 628(%esp), %edi - adcl 632(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 640(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 616(%esp), %ecx - addl 584(%esp), %edi - adcl 588(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 604(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 732(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - addl 544(%esp), %edi - adcl 548(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 52(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 728(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 536(%esp), %ecx - addl 504(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 528(%esp), %edi - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 464(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 472(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 484(%esp), %esi - adcl 488(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 496(%esp), %edi - movl 728(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 456(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 424(%esp), %edx - adcl 428(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 432(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 440(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 448(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 452(%esp), %edi - movl %edi, %ebp - movl %eax, %edi - adcl $0, %edi - movl %edx, %eax - movl %edx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 384(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 396(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 412(%esp), %ebp - adcl 416(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 376(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 352(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 360(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 364(%esp), %edi - adcl 368(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 304(%esp), %ebp - movl 40(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - adcl 324(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - movl 728(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 724(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - movl 296(%esp), %edx - movl %ebp, %ecx - addl 264(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 280(%esp), %ebp - adcl 284(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl %edx, %edi - adcl $0, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 224(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 236(%esp), %esi - adcl 240(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 256(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 216(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - addl 184(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 192(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 196(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 144(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 156(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 160(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 176(%esp), %ebp - movl 728(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 136(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - addl 104(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 116(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - adcl $0, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 64(%esp), %esi - movl 32(%esp), %esi # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - movl 44(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 80(%esp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 92(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 96(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl %eax, %edx - movl 732(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %ecx - sbbl 8(%eax), %esi - sbbl 12(%eax), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - sbbl 16(%eax), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - sbbl 20(%eax), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - sbbl 24(%eax), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - sbbl 28(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - testl %edi, %edi - js .LBB115_2 -# BB#1: - movl %edx, 56(%esp) # 4-byte Spill -.LBB115_2: - movl 720(%esp), %edx - movl 56(%esp), %eax # 4-byte Reload - movl %eax, (%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB115_4 -# BB#3: - movl %ecx, %eax -.LBB115_4: - movl %eax, 4(%edx) - js .LBB115_6 -# BB#5: - movl %esi, 32(%esp) # 4-byte Spill -.LBB115_6: - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl 40(%esp), %ebp # 4-byte Reload - movl 60(%esp), %eax # 4-byte Reload - movl 48(%esp), %ecx # 4-byte Reload - js .LBB115_8 -# BB#7: - movl 12(%esp), %esi # 4-byte Reload - movl %esi, 44(%esp) # 4-byte Spill -.LBB115_8: - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 12(%edx) - js .LBB115_10 -# BB#9: - movl 16(%esp), %edi # 4-byte Reload -.LBB115_10: - movl %edi, 16(%edx) - js .LBB115_12 -# BB#11: - movl 20(%esp), %ebp # 4-byte Reload -.LBB115_12: - movl %ebp, 20(%edx) - js .LBB115_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload -.LBB115_14: - movl %eax, 24(%edx) - js .LBB115_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB115_16: - movl %ecx, 28(%edx) - addl $700, %esp # imm = 0x2BC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end115: - .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2 - - .globl mcl_fp_montRed8Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed8Lbmi2,@function -mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L116$pb -.L116$pb: - popl %ebx -.Ltmp7: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx - movl 456(%esp), %edx - movl -4(%edx), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl (%eax), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 4(%eax), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %esi, %ecx - imull %edi, %ecx - movl 60(%eax), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 56(%eax), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 52(%eax), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 48(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 40(%eax), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 36(%eax), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 32(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 24(%eax), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 20(%eax), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 16(%eax), %ebp - movl 12(%eax), %edi - movl 8(%eax), %esi - movl (%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 8(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 4(%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, (%esp) - leal 392(%esp), %ecx - calll .LmulPv256x32 - movl 56(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl 64(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - adcl 400(%esp), %esi - movl %esi, 16(%esp) # 4-byte Spill - adcl 404(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 408(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 76(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 100(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 352(%esp), %edi - movl 16(%esp), %edx # 4-byte Reload - adcl 356(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 360(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 364(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 384(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %esi - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 312(%esp), %edi - movl 52(%esp), %edi # 4-byte Reload - adcl 316(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 272(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 276(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 64(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 232(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 236(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 252(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 88(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 192(%esp), %edi - movl 72(%esp), %ecx # 4-byte Reload - adcl 196(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 204(%esp), %edi - adcl 208(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 152(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - adcl 160(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 172(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 180(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 112(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 116(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl %edi, %ebx - movl 100(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %eax, %esi - adcl 136(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %edx - subl 24(%esp), %edx # 4-byte Folded Reload - movl 108(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - movl 104(%esp), %ebp # 4-byte Reload - sbbl 28(%esp), %ebp # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - sbbl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 68(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - sbbl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - jne .LBB116_2 -# BB#1: - movl %edx, %ecx -.LBB116_2: - movl 448(%esp), %edx - movl %ecx, (%edx) - movl %edi, %ecx - testb %cl, %cl - jne .LBB116_4 -# BB#3: - movl %eax, 108(%esp) # 4-byte Spill -.LBB116_4: - movl 108(%esp), %eax # 4-byte Reload - movl %eax, 4(%edx) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB116_6 -# BB#5: - movl %ebp, %eax -.LBB116_6: - movl %eax, 8(%edx) - movl 84(%esp), %eax # 4-byte Reload - movl 76(%esp), %ebp # 4-byte Reload - jne .LBB116_8 -# BB#7: - movl %ebx, %ebp -.LBB116_8: - movl %ebp, 12(%edx) - movl 100(%esp), %ebx # 4-byte Reload - jne .LBB116_10 -# BB#9: - movl 68(%esp), %ebx # 4-byte Reload -.LBB116_10: - movl %ebx, 16(%edx) - movl 80(%esp), %edi # 4-byte Reload - jne .LBB116_12 -# BB#11: - movl 72(%esp), %edi # 4-byte Reload -.LBB116_12: - movl %edi, 20(%edx) - movl 88(%esp), %esi # 4-byte Reload - jne .LBB116_14 -# BB#13: - movl 92(%esp), %esi # 4-byte Reload -.LBB116_14: - movl %esi, 24(%edx) - jne .LBB116_16 -# BB#15: - movl 96(%esp), %eax # 4-byte Reload -.LBB116_16: - movl %eax, 28(%edx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end116: - .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2 - - .globl mcl_fp_addPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre8Lbmi2,@function -mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl 12(%esi), %ecx - movl %edx, 4(%ebx) - movl 16(%esi), %edx - adcl 12(%eax), %ecx - adcl 16(%eax), %edx - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %ecx, 12(%ebx) - movl 20(%esi), %ecx - adcl %edi, %ecx - movl 24(%eax), %edi - movl %edx, 16(%ebx) - movl 24(%esi), %edx - adcl %edi, %edx - movl %ecx, 20(%ebx) - movl %edx, 24(%ebx) - movl 28(%eax), %eax - movl 28(%esi), %ecx - adcl %eax, %ecx - movl %ecx, 28(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end117: - .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2 - - .globl mcl_fp_subPre8Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre8Lbmi2,@function -mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl 12(%ecx), %edx - sbbl 12(%edi), %edx - movl %esi, 4(%ebp) - movl 16(%ecx), %esi - sbbl 16(%edi), %esi - movl %ebx, 8(%ebp) - movl 20(%edi), %ebx - movl %edx, 12(%ebp) - movl 20(%ecx), %edx - sbbl %ebx, %edx - movl 24(%edi), %ebx - movl %esi, 16(%ebp) - movl 24(%ecx), %esi - sbbl %ebx, %esi - movl %edx, 20(%ebp) - movl %esi, 24(%ebp) - movl 28(%edi), %edx - movl 28(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 28(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end118: - .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2 - - .globl mcl_fp_shr1_8Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_8Lbmi2,@function -mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %eax - shrdl $1, %eax, %ecx - movl %ecx, 24(%esi) - shrl %eax - movl %eax, 28(%esi) - popl %esi - retl -.Lfunc_end119: - .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2 - - .globl mcl_fp_add8Lbmi2 - .align 16, 0x90 - .type mcl_fp_add8Lbmi2,@function -mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %edx - addl (%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl 4(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%edx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%edx), %esi - movl 16(%edx), %eax - adcl 12(%edi), %esi - adcl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%edx), %ecx - adcl 20(%edi), %ecx - movl 24(%edx), %ebx - adcl 24(%edi), %ebx - movl 28(%edx), %edi - movl 48(%esp), %edx - adcl 28(%edx), %edi - movl 40(%esp), %edx - movl %ebp, (%edx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%edx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%edx) - movl %esi, 12(%edx) - movl %eax, 16(%edx) - movl %ecx, 20(%edx) - movl %ebx, 24(%edx) - movl %edi, 28(%edx) - sbbl %eax, %eax - andl $1, %eax - movl 52(%esp), %edx - movl 8(%esp), %ebp # 4-byte Reload - subl (%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - movl 52(%esp), %edx - sbbl 4(%edx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 52(%esp), %edx - sbbl 8(%edx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 52(%esp), %ebp - sbbl 12(%ebp), %esi - movl %esi, (%esp) # 4-byte Spill - movl 4(%esp), %edx # 4-byte Reload - sbbl 16(%ebp), %edx - movl %edx, %esi - sbbl 20(%ebp), %ecx - sbbl 24(%ebp), %ebx - sbbl 28(%ebp), %edi - sbbl $0, %eax - testb $1, %al - jne .LBB120_2 -# BB#1: # %nocarry - movl 8(%esp), %edx # 4-byte Reload - movl 40(%esp), %ebp - movl %edx, (%ebp) - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 4(%ebp) - movl 12(%esp), %edx # 4-byte Reload - movl %edx, 8(%ebp) - movl (%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl %esi, 16(%ebp) - movl %ecx, 20(%ebp) - movl %ebx, 24(%ebp) - movl %edi, 28(%ebp) -.LBB120_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end120: - .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2 - - .globl mcl_fp_addNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF8Lbmi2,@function -mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edi - movl 80(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl 4(%ebx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 28(%eax), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 20(%eax), %ebp - movl 16(%eax), %esi - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%ebx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl 12(%ebx), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 16(%ebx), %esi - movl %esi, 40(%esp) # 4-byte Spill - adcl 20(%ebx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 24(%ebx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 28(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 88(%esp), %ebx - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, %eax - subl (%ebx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 4(%ebx), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 8(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%ebx), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 20(%ebx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - sbbl 24(%ebx), %ebp - movl 48(%esp), %esi # 4-byte Reload - sbbl 28(%ebx), %esi - testl %esi, %esi - js .LBB121_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB121_2: - movl 76(%esp), %ebx - movl %eax, (%ebx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB121_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB121_4: - movl %eax, 4(%ebx) - movl 40(%esp), %edx # 4-byte Reload - movl 28(%esp), %edi # 4-byte Reload - js .LBB121_6 -# BB#5: - movl 8(%esp), %edi # 4-byte Reload -.LBB121_6: - movl %edi, 8(%ebx) - movl 44(%esp), %ecx # 4-byte Reload - movl 36(%esp), %eax # 4-byte Reload - js .LBB121_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload -.LBB121_8: - movl %eax, 12(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl 52(%esp), %eax # 4-byte Reload - js .LBB121_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload -.LBB121_10: - movl %edx, 16(%ebx) - js .LBB121_12 -# BB#11: - movl 20(%esp), %ecx # 4-byte Reload -.LBB121_12: - movl %ecx, 20(%ebx) - js .LBB121_14 -# BB#13: - movl %ebp, %eax -.LBB121_14: - movl %eax, 24(%ebx) - js .LBB121_16 -# BB#15: - movl %esi, %edi -.LBB121_16: - movl %edi, 28(%ebx) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end121: - .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2 - - .globl mcl_fp_sub8Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub8Lbmi2,@function -mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 56(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 4(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%esi), %edx - sbbl 8(%ebp), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 16(%esi), %ecx - sbbl 16(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%esi), %edi - sbbl 24(%ebp), %edi - movl 28(%esi), %esi - sbbl 28(%ebp), %esi - sbbl $0, %ebx - testb $1, %bl - movl 48(%esp), %ebx - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, (%ebx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ebx) - movl %edx, 8(%ebx) - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 12(%ebx) - movl %ecx, 16(%ebx) - movl %eax, 20(%ebx) - movl %edi, 24(%ebx) - movl %esi, 28(%ebx) - je .LBB122_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 60(%esp), %esi - movl 16(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 20(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 8(%esp), %ebp # 4-byte Reload - adcl 8(%esi), %ebp - movl 12(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ebp, 8(%ebx) - movl 16(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl %eax, 20(%ebx) - movl 24(%esi), %eax - adcl %edi, %eax - movl %eax, 24(%ebx) - movl 28(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ebx) -.LBB122_2: # %nocarry - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end122: - .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2 - - .globl mcl_fp_subNF8Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF8Lbmi2,@function -mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edx - movl 68(%esp), %ecx - subl (%ecx), %esi - movl %esi, 24(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 28(%eax), %edx - movl 24(%eax), %esi - movl 20(%eax), %edi - movl 16(%eax), %ebx - movl 12(%eax), %ebp - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 12(%ecx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 24(%ecx), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 28(%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - sarl $31, %edi - movl 72(%esp), %ebp - movl 28(%ebp), %eax - andl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%ebp), %eax - andl %edi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%ebp), %ebx - andl %edi, %ebx - movl 16(%ebp), %esi - andl %edi, %esi - movl 12(%ebp), %edx - andl %edi, %edx - movl 8(%ebp), %ecx - andl %edi, %ecx - movl 4(%ebp), %eax - andl %edi, %eax - andl (%ebp), %edi - addl 24(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl 60(%esp), %ebp - movl %edi, (%ebp) - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 4(%ebp) - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %ecx, 8(%ebp) - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %edx, 12(%ebp) - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %esi, 16(%ebp) - movl (%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ebx, 20(%ebp) - movl %eax, 24(%ebp) - movl 4(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ebp) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end123: - .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2 - - .globl mcl_fpDbl_add8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add8Lbmi2,@function -mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 80(%esp), %ebp - addl (%ebp), %esi - adcl 4(%ebp), %edx - movl 8(%ecx), %edi - adcl 8(%ebp), %edi - movl 12(%ebp), %ebx - movl 76(%esp), %eax - movl %esi, (%eax) - movl 16(%ebp), %esi - adcl 12(%ecx), %ebx - adcl 16(%ecx), %esi - movl %edx, 4(%eax) - movl 40(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %edi, 8(%eax) - movl 20(%ecx), %edx - movl %ebx, 12(%eax) - movl 20(%ebp), %edi - adcl %edx, %edi - movl 24(%ecx), %edx - movl %esi, 16(%eax) - movl 24(%ebp), %esi - adcl %edx, %esi - movl 28(%ecx), %edx - movl %edi, 20(%eax) - movl 28(%ebp), %ebx - adcl %edx, %ebx - movl 32(%ecx), %edx - movl %esi, 24(%eax) - movl 32(%ebp), %esi - adcl %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 36(%ecx), %edx - movl %ebx, 28(%eax) - movl 36(%ebp), %ebx - adcl %edx, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 40(%ebp), %eax - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl 44(%ebp), %edi - adcl %edx, %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl 48(%ebp), %eax - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl 52(%ebp), %esi - adcl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl 56(%ebp), %eax - adcl %edx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%ecx), %ecx - movl 60(%ebp), %ebp - adcl %ecx, %ebp - movl %ebp, 40(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 44(%esp), %eax # 4-byte Reload - movl 88(%esp), %edx - subl (%edx), %eax - movl %eax, (%esp) # 4-byte Spill - movl 88(%esp), %eax - sbbl 4(%eax), %ebx - movl %eax, %edx - movl %ebx, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - movl %edx, %ebx - sbbl 8(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - movl 24(%esp), %edi # 4-byte Reload - sbbl 12(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - sbbl 16(%ebx), %eax - sbbl 20(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - sbbl 24(%ebx), %edx - movl %edx, 20(%esp) # 4-byte Spill - sbbl 28(%ebx), %ebp - sbbl $0, %ecx - andl $1, %ecx - jne .LBB124_2 -# BB#1: - movl %eax, %edi -.LBB124_2: - testb %cl, %cl - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB124_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB124_4: - movl 76(%esp), %eax - movl %ecx, 32(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl 32(%esp), %edx # 4-byte Reload - movl 48(%esp), %esi # 4-byte Reload - movl 28(%esp), %ebx # 4-byte Reload - jne .LBB124_6 -# BB#5: - movl 4(%esp), %ebx # 4-byte Reload -.LBB124_6: - movl %ebx, 36(%eax) - jne .LBB124_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB124_8: - movl %esi, 40(%eax) - movl 36(%esp), %esi # 4-byte Reload - jne .LBB124_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB124_10: - movl %edx, 44(%eax) - movl %edi, 48(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB124_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload -.LBB124_12: - movl %esi, 52(%eax) - jne .LBB124_14 -# BB#13: - movl 20(%esp), %edx # 4-byte Reload -.LBB124_14: - movl %edx, 56(%eax) - jne .LBB124_16 -# BB#15: - movl %ebp, %ecx -.LBB124_16: - movl %ecx, 60(%eax) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end124: - .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2 - - .globl mcl_fpDbl_sub8Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub8Lbmi2,@function -mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx - movl 68(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%edi), %esi - sbbl 8(%ebx), %esi - movl 60(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%edi), %edx - sbbl 16(%ebx), %edx - movl %esi, 8(%ecx) - movl 20(%ebx), %esi - movl %eax, 12(%ecx) - movl 20(%edi), %eax - sbbl %esi, %eax - movl 24(%ebx), %esi - movl %edx, 16(%ecx) - movl 24(%edi), %edx - sbbl %esi, %edx - movl 28(%ebx), %esi - movl %eax, 20(%ecx) - movl 28(%edi), %eax - sbbl %esi, %eax - movl 32(%ebx), %esi - movl %edx, 24(%ecx) - movl 32(%edi), %edx - sbbl %esi, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 36(%ebx), %edx - movl %eax, 28(%ecx) - movl 36(%edi), %eax - sbbl %edx, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 40(%ebx), %eax - movl 40(%edi), %edx - sbbl %eax, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 44(%ebx), %eax - movl 44(%edi), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%ebx), %eax - movl 48(%edi), %edx - sbbl %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%ebx), %eax - movl 52(%edi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl 56(%edi), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%edi), %edx - sbbl %eax, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 72(%esp), %ebx - jne .LBB125_1 -# BB#2: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB125_3 -.LBB125_1: - movl 28(%ebx), %edx - movl %edx, 4(%esp) # 4-byte Spill -.LBB125_3: - testb %al, %al - jne .LBB125_4 -# BB#5: - movl $0, %ebp - movl $0, %eax - jmp .LBB125_6 -.LBB125_4: - movl (%ebx), %eax - movl 4(%ebx), %ebp -.LBB125_6: - jne .LBB125_7 -# BB#8: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB125_9 -.LBB125_7: - movl 24(%ebx), %edx - movl %edx, (%esp) # 4-byte Spill -.LBB125_9: - jne .LBB125_10 -# BB#11: - movl $0, %edx - jmp .LBB125_12 -.LBB125_10: - movl 20(%ebx), %edx -.LBB125_12: - jne .LBB125_13 -# BB#14: - movl $0, %esi - jmp .LBB125_15 -.LBB125_13: - movl 16(%ebx), %esi -.LBB125_15: - jne .LBB125_16 -# BB#17: - movl $0, %edi - jmp .LBB125_18 -.LBB125_16: - movl 12(%ebx), %edi -.LBB125_18: - jne .LBB125_19 -# BB#20: - xorl %ebx, %ebx - jmp .LBB125_21 -.LBB125_19: - movl 8(%ebx), %ebx -.LBB125_21: - addl 16(%esp), %eax # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %eax, 32(%ecx) - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 36(%ecx) - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %ebx, 40(%ecx) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edi, 44(%ecx) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %esi, 48(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %edx, 52(%ecx) - movl %eax, 56(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ecx) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end125: - .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2 - - .align 16, 0x90 - .type .LmulPv288x32,@function -.LmulPv288x32: # @mulPv288x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl %edx, %eax - movl 44(%esp), %edx - mulxl 4(%eax), %edi, %esi - mulxl (%eax), %ebp, %ebx - movl %ebp, 20(%esp) # 4-byte Spill - addl %edi, %ebx - movl %ebx, 16(%esp) # 4-byte Spill - mulxl 8(%eax), %edi, %ebx - adcl %esi, %edi - movl %edi, 12(%esp) # 4-byte Spill - mulxl 12(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl %edx, %ebp - mulxl 16(%eax), %ebx, %esi - adcl %edi, %ebx - mulxl 20(%eax), %edi, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %esi, %edi - movl %ebp, %edx - mulxl 24(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %ebp, %edx - mulxl 28(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl %ebx, 16(%ecx) - movl %edi, 20(%ecx) - movl %esi, 24(%ecx) - movl %edx, 28(%ecx) - movl 44(%esp), %edx - mulxl 32(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - adcl $0, %edx - movl %edx, 36(%ecx) - movl %ecx, %eax - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end126: - .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 - - .globl mcl_fp_mulUnitPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre9Lbmi2,@function -mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - calll .L127$pb -.L127$pb: - popl %ebx -.Ltmp8: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx - movl 104(%esp), %eax - movl %eax, (%esp) - leal 32(%esp), %ecx - movl 100(%esp), %edx - calll .LmulPv288x32 - movl 68(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi - movl 48(%esp), %ebx - movl 44(%esp), %ebp - movl 40(%esp), %esi - movl 32(%esp), %edx - movl 36(%esp), %ecx - movl 96(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %ebp, 12(%eax) - movl %ebx, 16(%eax) - movl %edi, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end127: - .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2 - - .globl mcl_fpDbl_mulPre9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre9Lbmi2,@function -mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L128$pb -.L128$pb: - popl %esi -.Ltmp9: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 452(%esp), %edx - movl %edx, %ebp - movl %esi, %ebx - calll .LmulPv288x32 - movl 420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl 388(%esp), %edi - movl 448(%esp), %ecx - movl %eax, (%ecx) - movl 456(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl %ebp, %edx - movl %esi, %ebx - calll .LmulPv288x32 - addl 344(%esp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 364(%esp), %ebx - movl 360(%esp), %edi - movl 356(%esp), %esi - movl 348(%esp), %ecx - movl 352(%esp), %edx - movl 448(%esp), %eax - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 324(%esp), %edi - movl 320(%esp), %ebp - movl 316(%esp), %esi - movl 308(%esp), %ecx - movl 312(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 264(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 288(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 284(%esp), %ebx - movl 280(%esp), %edi - movl 276(%esp), %esi - movl 268(%esp), %ecx - movl 272(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 224(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 240(%esp), %edi - movl 236(%esp), %ebp - movl 228(%esp), %ecx - movl 232(%esp), %edx - movl 448(%esp), %eax - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 16(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 44(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 204(%esp), %edi - movl 200(%esp), %ebx - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 448(%esp), %eax - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 20(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 144(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 164(%esp), %ebx - movl 160(%esp), %edi - movl 156(%esp), %esi - movl 148(%esp), %ecx - movl 152(%esp), %edx - movl 448(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 12(%esp), %esi # 4-byte Reload - addl 104(%esp), %esi - movl 140(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 136(%esp), %ebp - movl 132(%esp), %edi - movl 128(%esp), %ebx - movl 124(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 116(%esp), %edx - movl 108(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl 448(%esp), %eax - movl %esi, 28(%eax) - movl 12(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl %esi, %ebp - addl 64(%esp), %ebp - movl 24(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx - movl 52(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 88(%esp), %edi - movl 84(%esp), %ebx - movl 80(%esp), %esi - movl 76(%esp), %eax - movl 448(%esp), %ecx - movl %ebp, 32(%ecx) - movl %edx, 36(%ecx) - adcl 28(%esp), %eax # 4-byte Folded Reload - movl 52(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl 24(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl %eax, 60(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%ecx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end128: - .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2 - - .globl mcl_fpDbl_sqrPre9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre9Lbmi2,@function -mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L129$pb -.L129$pb: - popl %ebx -.Ltmp10: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl 452(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 384(%esp), %ecx - movl %edx, %esi - movl %ebx, %edi - calll .LmulPv288x32 - movl 420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl 388(%esp), %ebp - movl 448(%esp), %ecx - movl %eax, (%ecx) - movl 4(%esi), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv288x32 - addl 344(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 364(%esp), %ebx - movl 360(%esp), %edi - movl 356(%esp), %esi - movl 348(%esp), %ecx - movl 352(%esp), %edx - movl 448(%esp), %eax - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 324(%esp), %edi - movl 320(%esp), %ebp - movl 316(%esp), %esi - movl 308(%esp), %ecx - movl 312(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 264(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 288(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 284(%esp), %ebx - movl 280(%esp), %edi - movl 276(%esp), %esi - movl 268(%esp), %ecx - movl 272(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 224(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 244(%esp), %edi - movl 240(%esp), %ebp - movl 236(%esp), %esi - movl 228(%esp), %ecx - movl 232(%esp), %edx - movl 448(%esp), %eax - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebx - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 144(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 164(%esp), %edi - movl 160(%esp), %ebp - movl 156(%esp), %esi - movl 148(%esp), %ecx - movl 152(%esp), %edx - movl 448(%esp), %eax - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 4(%esp), %esi # 4-byte Reload - addl 104(%esp), %esi - movl 140(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %ebp - movl 128(%esp), %ebx - movl 124(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 120(%esp), %edi - movl 116(%esp), %edx - movl 108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl 448(%esp), %eax - movl %esi, 28(%eax) - movl 48(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl %esi, %ebp - addl 64(%esp), %ebp - movl 20(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx - movl 48(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 88(%esp), %edi - movl 84(%esp), %ebx - movl 80(%esp), %esi - movl 76(%esp), %eax - movl 448(%esp), %ecx - movl %ebp, 32(%ecx) - movl %edx, 36(%ecx) - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl %eax, 60(%ecx) - movl 40(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%ecx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2 - - .globl mcl_fp_mont9Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont9Lbmi2,@function -mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $796, %esp # imm = 0x31C - calll .L130$pb -.L130$pb: - popl %ebx -.Ltmp11: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx - movl 828(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 752(%esp), %ebp - movl 756(%esp), %esi - movl %ebp, %eax - imull %edi, %eax - movl 788(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 780(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 776(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 772(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 768(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 764(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 760(%esp), %edi - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 712(%esp), %ebp - adcl 716(%esp), %esi - adcl 720(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 748(%esp), %ebp - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 672(%esp), %esi - adcl 676(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 704(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 708(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 632(%esp), %esi - adcl 636(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 660(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ebp - movl 824(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - addl 592(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 616(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 624(%esp), %esi - adcl 628(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 552(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 580(%esp), %edi - adcl 584(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %ebp - movl 824(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 512(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 524(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 536(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 544(%esp), %edi - adcl 548(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 472(%esp), %ebp - movl 32(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 484(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 500(%esp), %esi - adcl 504(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 508(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 820(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 444(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 452(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 456(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %esi, %eax - andl $1, %eax - addl 392(%esp), %ebp - movl 36(%esp), %esi # 4-byte Reload - adcl 396(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 404(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 408(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 428(%esp), %edi - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - addl 352(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 364(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 384(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %edi, %eax - andl $1, %eax - addl 312(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 328(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 332(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 336(%esp), %esi - movl 44(%esp), %edi # 4-byte Reload - adcl 340(%esp), %edi - movl 40(%esp), %ecx # 4-byte Reload - adcl 344(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl 824(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 272(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 292(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 296(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 308(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %edi, %ecx - andl $1, %ecx - addl 232(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 240(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 260(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 192(%esp), %ecx - adcl 196(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - adcl 200(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 212(%esp), %esi - adcl 216(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 152(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 164(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 172(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 180(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 120(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - movl 52(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - adcl 136(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %edi - addl 72(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %edx, %esi - movl 44(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 92(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %edi - movl 828(%esp), %ebx - subl (%ebx), %eax - movl %ecx, %edx - sbbl 4(%ebx), %edx - movl %esi, %ecx - sbbl 8(%ebx), %ecx - movl 44(%esp), %esi # 4-byte Reload - sbbl 12(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - sbbl 16(%ebx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 20(%ebx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 24(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - sbbl 28(%ebx), %esi - movl 60(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - movl %edi, %ebx - jne .LBB130_2 -# BB#1: - movl %esi, 32(%esp) # 4-byte Spill -.LBB130_2: - testb %bl, %bl - movl 68(%esp), %esi # 4-byte Reload - jne .LBB130_4 -# BB#3: - movl %eax, %esi -.LBB130_4: - movl 816(%esp), %ebp - movl %esi, (%ebp) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB130_6 -# BB#5: - movl %edx, %eax -.LBB130_6: - movl %eax, 4(%ebp) - movl 52(%esp), %eax # 4-byte Reload - jne .LBB130_8 -# BB#7: - movl %ecx, %eax -.LBB130_8: - movl %eax, 8(%ebp) - movl 44(%esp), %eax # 4-byte Reload - jne .LBB130_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB130_10: - movl %eax, 12(%ebp) - jne .LBB130_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 40(%esp) # 4-byte Spill -.LBB130_12: - movl 40(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 36(%esp), %eax # 4-byte Reload - jne .LBB130_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload -.LBB130_14: - movl %eax, 20(%ebp) - movl 48(%esp), %eax # 4-byte Reload - jne .LBB130_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload -.LBB130_16: - movl %eax, 24(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB130_18 -# BB#17: - movl 56(%esp), %eax # 4-byte Reload -.LBB130_18: - movl %eax, 32(%ebp) - addl $796, %esp # imm = 0x31C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end130: - .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2 - - .globl mcl_fp_montNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF9Lbmi2,@function -mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $796, %esp # imm = 0x31C - calll .L131$pb -.L131$pb: - popl %ebx -.Ltmp12: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx - movl 828(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 752(%esp), %esi - movl 756(%esp), %ebp - movl %esi, %eax - imull %edi, %eax - movl 788(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 780(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 776(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 772(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 768(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 764(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 760(%esp), %edi - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 712(%esp), %esi - adcl 716(%esp), %ebp - adcl 720(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 740(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 708(%esp), %eax - addl 672(%esp), %ebp - adcl 676(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 696(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 632(%esp), %ebp - adcl 636(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 656(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 628(%esp), %eax - addl 592(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 600(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 604(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 608(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 612(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 616(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 620(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 624(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 552(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 572(%esp), %esi - movl 60(%esp), %edi # 4-byte Reload - adcl 576(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 588(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 548(%esp), %eax - movl 32(%esp), %edx # 4-byte Reload - addl 512(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 516(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 528(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 532(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 536(%esp), %ebp - movl 64(%esp), %edi # 4-byte Reload - adcl 540(%esp), %edi - movl 44(%esp), %esi # 4-byte Reload - adcl 544(%esp), %esi - adcl $0, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl 32(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl 40(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 496(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 500(%esp), %edi - movl %edi, %ebp - adcl 504(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 820(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - movl 468(%esp), %eax - movl 40(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 48(%esp), %esi # 4-byte Reload - adcl 436(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 440(%esp), %edi - movl 36(%esp), %edx # 4-byte Reload - adcl 444(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 448(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 452(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 460(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 464(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 392(%esp), %ebp - adcl 396(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 412(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 424(%esp), %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 428(%esp), %esi - movl 824(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 388(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 352(%esp), %ecx - movl 56(%esp), %edx # 4-byte Reload - adcl 356(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 360(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 364(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 368(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 372(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 376(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 380(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 312(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 324(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 340(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 348(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 308(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 272(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 280(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 292(%esp), %ebp - adcl 296(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 232(%esp), %edi - movl 36(%esp), %esi # 4-byte Reload - adcl 236(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 252(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 228(%esp), %ebp - movl %esi, %ecx - addl 192(%esp), %ecx - movl 60(%esp), %esi # 4-byte Reload - adcl 196(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 208(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 152(%esp), %edi - adcl 156(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - movl 64(%esp), %esi # 4-byte Reload - adcl 164(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 148(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - adcl 116(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 120(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 132(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ebp - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 72(%esp), %edi - movl 44(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - adcl 80(%esp), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - adcl 84(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 92(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %eax, %edx - movl 828(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %ebx - movl %edi, %ecx - sbbl 8(%eax), %ecx - movl 52(%esp), %esi # 4-byte Reload - sbbl 12(%eax), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - sbbl 16(%eax), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 20(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 24(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 28(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - sbbl 32(%eax), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - sarl $31, %ebp - testl %ebp, %ebp - movl 68(%esp), %eax # 4-byte Reload - js .LBB131_2 -# BB#1: - movl %edx, %eax -.LBB131_2: - movl 816(%esp), %edx - movl %eax, (%edx) - movl 64(%esp), %esi # 4-byte Reload - js .LBB131_4 -# BB#3: - movl %ebx, %esi -.LBB131_4: - movl %esi, 4(%edx) - movl 52(%esp), %ebp # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB131_6 -# BB#5: - movl %ecx, %edi -.LBB131_6: - movl %edi, 8(%edx) - js .LBB131_8 -# BB#7: - movl 16(%esp), %ebp # 4-byte Reload -.LBB131_8: - movl %ebp, 12(%edx) - js .LBB131_10 -# BB#9: - movl 20(%esp), %eax # 4-byte Reload -.LBB131_10: - movl %eax, 16(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB131_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB131_12: - movl %eax, 20(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB131_14 -# BB#13: - movl 28(%esp), %eax # 4-byte Reload -.LBB131_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB131_16 -# BB#15: - movl 32(%esp), %eax # 4-byte Reload -.LBB131_16: - movl %eax, 28(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB131_18 -# BB#17: - movl 44(%esp), %eax # 4-byte Reload -.LBB131_18: - movl %eax, 32(%edx) - addl $796, %esp # imm = 0x31C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end131: - .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2 - - .globl mcl_fp_montRed9Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed9Lbmi2,@function -mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $492, %esp # imm = 0x1EC - calll .L132$pb -.L132$pb: - popl %ebx -.Ltmp13: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx - movl 520(%esp), %edx - movl -4(%edx), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl (%eax), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 4(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %esi, %ecx - imull %edi, %ecx - movl 68(%eax), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%eax), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 60(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 56(%eax), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 52(%eax), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 48(%eax), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 40(%eax), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 36(%eax), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 32(%eax), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 24(%eax), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 20(%eax), %ebp - movl 16(%eax), %edi - movl 12(%eax), %esi - movl 8(%eax), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl (%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 16(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 4(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, (%esp) - leal 448(%esp), %ecx - calll .LmulPv288x32 - movl 76(%esp), %eax # 4-byte Reload - addl 448(%esp), %eax - movl 52(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 460(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 464(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 96(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 108(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - movl 76(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 52(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - movl 56(%esp), %edx # 4-byte Reload - adcl 412(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 432(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, %ebp - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 368(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 404(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 328(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 364(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 100(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 288(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - movl 64(%esp), %eax # 4-byte Reload - addl 288(%esp), %eax - movl 68(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 520(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 248(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 264(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %edi, %esi - adcl $0, %esi - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %edi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 208(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 212(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 220(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 520(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 168(%esp), %ebp - movl 104(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 180(%esp), %ebp - movl 96(%esp), %esi # 4-byte Reload - adcl 184(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 128(%esp), %edi - movl 120(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl %eax, %edi - adcl 136(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl 140(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %eax, %ebx - movl 112(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - subl 20(%esp), %edi # 4-byte Folded Reload - movl 124(%esp), %eax # 4-byte Reload - sbbl 16(%esp), %eax # 4-byte Folded Reload - sbbl 24(%esp), %esi # 4-byte Folded Reload - sbbl 28(%esp), %ecx # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 92(%esp) # 4-byte Spill - movl %edx, %ebx - movl %ebp, %edx - sbbl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - sbbl $0, %edx - andl $1, %edx - jne .LBB132_2 -# BB#1: - movl %ecx, 116(%esp) # 4-byte Spill -.LBB132_2: - testb %dl, %dl - movl 120(%esp), %ecx # 4-byte Reload - jne .LBB132_4 -# BB#3: - movl %edi, %ecx -.LBB132_4: - movl 512(%esp), %edi - movl %ecx, (%edi) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB132_6 -# BB#5: - movl %eax, 124(%esp) # 4-byte Spill -.LBB132_6: - movl 124(%esp), %eax # 4-byte Reload - movl %eax, 4(%edi) - movl 96(%esp), %eax # 4-byte Reload - jne .LBB132_8 -# BB#7: - movl %esi, %eax -.LBB132_8: - movl %eax, 8(%edi) - movl 116(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - jne .LBB132_10 -# BB#9: - movl 72(%esp), %ebp # 4-byte Reload -.LBB132_10: - movl %ebp, 16(%edi) - movl 112(%esp), %ebx # 4-byte Reload - jne .LBB132_12 -# BB#11: - movl 76(%esp), %ebx # 4-byte Reload -.LBB132_12: - movl %ebx, 20(%edi) - movl 100(%esp), %esi # 4-byte Reload - jne .LBB132_14 -# BB#13: - movl 84(%esp), %esi # 4-byte Reload -.LBB132_14: - movl %esi, 24(%edi) - jne .LBB132_16 -# BB#15: - movl 92(%esp), %ecx # 4-byte Reload -.LBB132_16: - movl %ecx, 28(%edi) - jne .LBB132_18 -# BB#17: - movl 104(%esp), %eax # 4-byte Reload -.LBB132_18: - movl %eax, 32(%edi) - addl $492, %esp # imm = 0x1EC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end132: - .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2 - - .globl mcl_fp_addPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre9Lbmi2,@function -mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl %esi, 24(%ebx) - movl %edx, 28(%ebx) - movl 32(%eax), %eax - movl 32(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 32(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end133: - .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2 - - .globl mcl_fp_subPre9Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre9Lbmi2,@function -mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 24(%ebp) - movl %esi, 28(%ebp) - movl 32(%edx), %edx - movl 32(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 32(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end134: - .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2 - - .globl mcl_fp_shr1_9Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_9Lbmi2,@function -mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 28(%esi) - shrl %eax - movl %eax, 32(%esi) - popl %esi - retl -.Lfunc_end135: - .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2 - - .globl mcl_fp_add9Lbmi2 - .align 16, 0x90 - .type mcl_fp_add9Lbmi2,@function -mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, %ebp - adcl 4(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%ebx), %esi - adcl 20(%edi), %esi - movl 24(%ebx), %edx - adcl 24(%edi), %edx - movl 28(%ebx), %ecx - adcl 28(%edi), %ecx - movl 32(%ebx), %eax - adcl 32(%edi), %eax - movl 40(%esp), %edi - movl %ebp, (%edi) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%edi) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%edi) - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edi) - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%edi) - movl %esi, 20(%edi) - movl %edx, 24(%edi) - movl %ecx, 28(%edi) - movl %eax, 32(%edi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 52(%esp), %edi - subl (%edi), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - sbbl 4(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - sbbl 8(%edi), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebp # 4-byte Reload - sbbl 12(%edi), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 4(%esp), %ebp # 4-byte Reload - sbbl 16(%edi), %ebp - sbbl 20(%edi), %esi - sbbl 24(%edi), %edx - sbbl 28(%edi), %ecx - sbbl 32(%edi), %eax - sbbl $0, %ebx - testb $1, %bl - jne .LBB136_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl 40(%esp), %ebx - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl %ebp, 16(%ebx) - movl %esi, 20(%ebx) - movl %edx, 24(%ebx) - movl %ecx, 28(%ebx) - movl %eax, 32(%ebx) -.LBB136_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end136: - .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2 - - .globl mcl_fp_addNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF9Lbmi2,@function -mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edi - movl 96(%esp), %esi - addl (%esi), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 4(%esi), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 28(%eax), %ebp - movl 24(%eax), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 20(%eax), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 16(%eax), %ebx - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 16(%esi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 20(%esi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 28(%esi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 32(%esi), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 104(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - movl %eax, %ebp - subl (%esi), %ebp - movl %ebp, (%esp) # 4-byte Spill - sbbl 4(%esi), %edi - movl %edi, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 20(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - sbbl 24(%esi), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 28(%esi), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, %edx - movl %ecx, %ebp - sbbl 32(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edx, %esi - sarl $31, %esi - testl %esi, %esi - js .LBB137_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB137_2: - movl 92(%esp), %ecx - movl %eax, (%ecx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB137_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB137_4: - movl %eax, 4(%ecx) - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - movl 48(%esp), %edx # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB137_6 -# BB#5: - movl 8(%esp), %eax # 4-byte Reload -.LBB137_6: - movl %eax, 8(%ecx) - movl %ebp, %eax - js .LBB137_8 -# BB#7: - movl 12(%esp), %edx # 4-byte Reload -.LBB137_8: - movl %edx, 12(%ecx) - movl 56(%esp), %edx # 4-byte Reload - js .LBB137_10 -# BB#9: - movl 16(%esp), %ebx # 4-byte Reload -.LBB137_10: - movl %ebx, 16(%ecx) - js .LBB137_12 -# BB#11: - movl 20(%esp), %edi # 4-byte Reload -.LBB137_12: - movl %edi, 20(%ecx) - js .LBB137_14 -# BB#13: - movl 24(%esp), %esi # 4-byte Reload -.LBB137_14: - movl %esi, 24(%ecx) - js .LBB137_16 -# BB#15: - movl 28(%esp), %edx # 4-byte Reload -.LBB137_16: - movl %edx, 28(%ecx) - js .LBB137_18 -# BB#17: - movl 32(%esp), %eax # 4-byte Reload -.LBB137_18: - movl %eax, 32(%ecx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end137: - .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2 - - .globl mcl_fp_sub9Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub9Lbmi2,@function -mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 56(%esp), %edi - subl (%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 16(%esi), %edx - sbbl 16(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 20(%esi), %ecx - sbbl 20(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 28(%esi), %ebp - sbbl 28(%edi), %ebp - movl 32(%esi), %esi - sbbl 32(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 48(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl %edx, 16(%ebx) - movl %ecx, 20(%ebx) - movl %eax, 24(%ebx) - movl %ebp, 28(%ebx) - movl %esi, 32(%ebx) - je .LBB138_2 -# BB#1: # %carry - movl %esi, %edi - movl 60(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 20(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl %ecx, 24(%ebx) - movl 28(%esi), %eax - adcl %ebp, %eax - movl %eax, 28(%ebx) - movl 32(%esi), %eax - adcl %edi, %eax - movl %eax, 32(%ebx) -.LBB138_2: # %nocarry - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end138: - .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2 - - .globl mcl_fp_subNF9Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF9Lbmi2,@function -mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl 72(%esp), %edx - movl (%edx), %ecx - movl 4(%edx), %eax - movl 76(%esp), %esi - subl (%esi), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - sbbl 4(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 28(%edx), %ebp - movl 24(%edx), %edi - movl 20(%edx), %ebx - movl 16(%edx), %ecx - movl 12(%edx), %eax - movl 8(%edx), %edx - sbbl 8(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 12(%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 16(%esi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - sbbl 20(%esi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - sbbl 24(%esi), %edi - movl %edi, 28(%esp) # 4-byte Spill - sbbl 28(%esi), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 32(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %eax, %ecx - sarl $31, %ecx - movl %ecx, %edi - shldl $1, %eax, %edi - movl 80(%esp), %ebp - movl 12(%ebp), %eax - andl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 4(%ebp), %ebx - andl %edi, %ebx - andl (%ebp), %edi - movl 32(%ebp), %eax - andl %ecx, %eax - movl %eax, 8(%esp) # 4-byte Spill - rorxl $31, %ecx, %eax - andl 28(%ebp), %ecx - movl 24(%ebp), %edx - andl %eax, %edx - movl %edx, (%esp) # 4-byte Spill - movl 20(%ebp), %esi - andl %eax, %esi - movl 16(%ebp), %edx - andl %eax, %edx - andl 8(%ebp), %eax - addl 36(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %ebp - movl %edi, (%ebp) - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %ebx, 4(%ebp) - movl 4(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %eax, 8(%ebp) - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edi, 12(%ebp) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edx, 16(%ebp) - movl (%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %esi, 20(%ebp) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %eax, 24(%ebp) - movl %ecx, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ebp) - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end139: - .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2 - - .globl mcl_fpDbl_add9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add9Lbmi2,@function -mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $68, %esp - movl 96(%esp), %edx - movl 92(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %ecx - movl 8(%edx), %ebx - movl (%edx), %ebp - addl (%edi), %ebp - movl 88(%esp), %eax - movl %ebp, (%eax) - movl 4(%edx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%edx), %esi - adcl 16(%edx), %ecx - movl %ebp, 4(%eax) - movl 44(%edx), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl %ebx, 8(%eax) - movl 20(%edx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%edx), %ebx - movl %ecx, 16(%eax) - movl 24(%edi), %ecx - adcl %ebx, %ecx - movl 28(%edx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%edx), %ebx - movl %ecx, 24(%eax) - movl 32(%edi), %ecx - adcl %ebx, %ecx - movl 36(%edx), %ebp - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebp, %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 40(%edx), %esi - movl %ecx, 32(%eax) - movl 40(%edi), %eax - adcl %esi, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%edi), %eax - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl 48(%edi), %ebx - adcl %ecx, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl 52(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 56(%edx), %esi - movl 56(%edi), %eax - adcl %esi, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%edx), %ebp - movl 60(%edi), %esi - adcl %ebp, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 64(%edx), %eax - movl 64(%edi), %ebp - adcl %eax, %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 68(%edx), %edx - movl 68(%edi), %eax - adcl %edx, %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 100(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - subl (%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 4(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 12(%edi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl %ebp, %eax - movl 32(%esp), %ebp # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %ebx - sbbl 32(%edi), %ebx - sbbl $0, %edx - andl $1, %edx - jne .LBB140_2 -# BB#1: - movl %ebx, %ebp -.LBB140_2: - testb %dl, %dl - movl 60(%esp), %edx # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - movl 36(%esp), %esi # 4-byte Reload - movl 56(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - jne .LBB140_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebx # 4-byte Reload - movl 16(%esp), %edx # 4-byte Reload -.LBB140_4: - movl 88(%esp), %eax - movl %edx, 36(%eax) - movl %ebx, 40(%eax) - movl %edi, 44(%eax) - movl %esi, 48(%eax) - movl %ecx, 52(%eax) - movl 44(%esp), %edx # 4-byte Reload - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB140_6 -# BB#5: - movl 20(%esp), %ecx # 4-byte Reload -.LBB140_6: - movl %ecx, 56(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB140_8 -# BB#7: - movl 24(%esp), %edx # 4-byte Reload -.LBB140_8: - movl %edx, 60(%eax) - jne .LBB140_10 -# BB#9: - movl 28(%esp), %ecx # 4-byte Reload -.LBB140_10: - movl %ecx, 64(%eax) - movl %ebp, 68(%eax) - addl $68, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end140: - .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2 - - .globl mcl_fpDbl_sub9Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub9Lbmi2,@function -mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 76(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %edx - movl 80(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %edx - movl 8(%ebx), %esi - sbbl 8(%ebp), %esi - movl 72(%esp), %ecx - movl %eax, (%ecx) - movl 12(%ebx), %eax - sbbl 12(%ebp), %eax - movl %edx, 4(%ecx) - movl 16(%ebx), %edx - sbbl 16(%ebp), %edx - movl %esi, 8(%ecx) - movl 20(%ebp), %esi - movl %eax, 12(%ecx) - movl 20(%ebx), %eax - sbbl %esi, %eax - movl 24(%ebp), %esi - movl %edx, 16(%ecx) - movl 24(%ebx), %edx - sbbl %esi, %edx - movl 28(%ebp), %esi - movl %eax, 20(%ecx) - movl 28(%ebx), %eax - sbbl %esi, %eax - movl 32(%ebp), %esi - movl %edx, 24(%ecx) - movl 32(%ebx), %edx - sbbl %esi, %edx - movl 36(%ebp), %esi - movl %eax, 28(%ecx) - movl 36(%ebx), %eax - sbbl %esi, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%ebp), %eax - movl %edx, 32(%ecx) - movl 40(%ebx), %edx - sbbl %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 44(%ebp), %eax - movl 44(%ebx), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%ebp), %eax - movl 48(%ebx), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 52(%ebp), %eax - movl 52(%ebx), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 56(%ebp), %eax - movl 56(%ebx), %edx - sbbl %eax, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%ebp), %eax - movl 60(%ebx), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%ebx), %edx - sbbl %eax, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%ebx), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 84(%esp), %ebp - jne .LBB141_1 -# BB#2: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB141_3 -.LBB141_1: - movl 32(%ebp), %edx - movl %edx, 12(%esp) # 4-byte Spill -.LBB141_3: - testb %al, %al - jne .LBB141_4 -# BB#5: - movl $0, 4(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB141_6 -.LBB141_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB141_6: - jne .LBB141_7 -# BB#8: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB141_9 -.LBB141_7: - movl 28(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB141_9: - jne .LBB141_10 -# BB#11: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB141_12 -.LBB141_10: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB141_12: - jne .LBB141_13 -# BB#14: - movl $0, %edi - jmp .LBB141_15 -.LBB141_13: - movl 20(%ebp), %edi -.LBB141_15: - jne .LBB141_16 -# BB#17: - movl $0, %ebx - jmp .LBB141_18 -.LBB141_16: - movl 16(%ebp), %ebx -.LBB141_18: - jne .LBB141_19 -# BB#20: - movl %ebp, %eax - movl $0, %ebp - jmp .LBB141_21 -.LBB141_19: - movl %ebp, %eax - movl 12(%eax), %ebp -.LBB141_21: - jne .LBB141_22 -# BB#23: - xorl %eax, %eax - jmp .LBB141_24 -.LBB141_22: - movl 8(%eax), %eax -.LBB141_24: - addl 24(%esp), %esi # 4-byte Folded Reload - movl 4(%esp), %edx # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %esi, 36(%ecx) - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %edx, 40(%ecx) - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 48(%ecx) - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %edx, 60(%ecx) - movl %eax, 64(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%ecx) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end141: - .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2 - - .align 16, 0x90 - .type .LmulPv320x32,@function -.LmulPv320x32: # @mulPv320x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl %edx, %eax - movl 48(%esp), %edx - mulxl 4(%eax), %edi, %esi - mulxl (%eax), %ebp, %ebx - movl %ebp, 24(%esp) # 4-byte Spill - addl %edi, %ebx - movl %ebx, 20(%esp) # 4-byte Spill - mulxl 8(%eax), %edi, %ebx - adcl %esi, %edi - movl %edi, 16(%esp) # 4-byte Spill - mulxl 12(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 12(%esp) # 4-byte Spill - mulxl 16(%eax), %esi, %ebx - movl %ebx, 4(%esp) # 4-byte Spill - adcl %edi, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl %edx, %ebp - mulxl 20(%eax), %ebx, %esi - adcl 4(%esp), %ebx # 4-byte Folded Reload - mulxl 24(%eax), %edi, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %esi, %edi - movl %ebp, %edx - mulxl 28(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %ebp, %edx - mulxl 32(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl %ebx, 20(%ecx) - movl %edi, 24(%ecx) - movl %esi, 28(%ecx) - movl %edx, 32(%ecx) - movl 48(%esp), %edx - mulxl 36(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - adcl $0, %edx - movl %edx, 40(%ecx) - movl %ecx, %eax - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end142: - .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 - - .globl mcl_fp_mulUnitPre10Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre10Lbmi2,@function -mcl_fp_mulUnitPre10Lbmi2: # @mcl_fp_mulUnitPre10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - calll .L143$pb -.L143$pb: - popl %ebx -.Ltmp14: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx - movl 104(%esp), %eax - movl %eax, (%esp) - leal 32(%esp), %ecx - movl 100(%esp), %edx - calll .LmulPv320x32 - movl 72(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 52(%esp), %ebx - movl 48(%esp), %ebp - movl 44(%esp), %edi - movl 40(%esp), %esi - movl 32(%esp), %edx - movl 36(%esp), %ecx - movl 96(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebp, 16(%eax) - movl %ebx, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end143: - .size mcl_fp_mulUnitPre10Lbmi2, .Lfunc_end143-mcl_fp_mulUnitPre10Lbmi2 - - .globl mcl_fpDbl_mulPre10Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre10Lbmi2,@function -mcl_fpDbl_mulPre10Lbmi2: # @mcl_fpDbl_mulPre10Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $188, %esp - calll .L144$pb -.L144$pb: - popl %ebx -.Ltmp15: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx - movl %ebx, -128(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl %edi, 8(%esp) - movl 12(%ebp), %esi - movl %esi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5Lbmi2@PLT - leal 20(%edi), %eax - movl %eax, 8(%esp) - leal 20(%esi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 40(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5Lbmi2@PLT - movl 28(%esi), %edi - movl (%esi), %ebx - movl 4(%esi), %eax - addl 20(%esi), %ebx - movl %ebx, -148(%ebp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, -132(%ebp) # 4-byte Spill - adcl 8(%esi), %edi - movl %edi, -140(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - addl 20(%esi), %eax - movl %eax, -152(%ebp) # 4-byte Spill - adcl 24(%esi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - movl 28(%esi), %eax - adcl 8(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl 32(%esi), %eax - adcl 12(%esi), %eax - movl 36(%esi), %ecx - adcl 16(%esi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -156(%ebp) # 4-byte Spill - movl %ebx, -124(%ebp) # 4-byte Spill - jb .LBB144_2 -# BB#1: - xorl %edi, %edi - movl $0, -124(%ebp) # 4-byte Folded Spill -.LBB144_2: - movl %edi, -136(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl %esi, %ebx - movl 36(%ebx), %esi - movl 32(%ebx), %edi - movl -96(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 12(%ebx), %edi - movl %edi, -116(%ebp) # 4-byte Spill - adcl 16(%ebx), %esi - movl %esi, -144(%ebp) # 4-byte Spill - movl %ecx, -112(%ebp) # 4-byte Spill - movl %eax, -104(%ebp) # 4-byte Spill - movl -160(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) # 4-byte Spill - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -96(%ebp) # 4-byte Spill - movl -152(%ebp), %ebx # 4-byte Reload - movl %ebx, -100(%ebp) # 4-byte Spill - jb .LBB144_4 -# BB#3: - movl $0, -112(%ebp) # 4-byte Folded Spill - movl $0, -104(%ebp) # 4-byte Folded Spill - movl $0, -108(%ebp) # 4-byte Folded Spill - movl $0, -96(%ebp) # 4-byte Folded Spill - movl $0, -100(%ebp) # 4-byte Folded Spill -.LBB144_4: - movl -148(%ebp), %esi # 4-byte Reload - movl %esi, -72(%ebp) - movl -132(%ebp), %edi # 4-byte Reload - movl %edi, -68(%ebp) - movl -140(%ebp), %esi # 4-byte Reload - movl %esi, -64(%ebp) - movl %ebx, -92(%ebp) - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -88(%ebp) - movl %edx, -84(%ebp) - movl %eax, -80(%ebp) - movl %ecx, -76(%ebp) - sbbl %edx, %edx - movl -116(%ebp), %eax # 4-byte Reload - movl %eax, -60(%ebp) - movl -144(%ebp), %ebx # 4-byte Reload - movl %ebx, -56(%ebp) - movl -156(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB144_6 -# BB#5: - movl $0, %ebx - movl $0, %eax - movl $0, %edi -.LBB144_6: - movl %eax, -116(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -92(%ebp), %ecx - movl %ecx, 8(%esp) - leal -72(%ebp), %ecx - movl %ecx, 4(%esp) - leal -52(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -124(%ebp), %eax # 4-byte Reload - addl %eax, -100(%ebp) # 4-byte Folded Spill - adcl %edi, -96(%ebp) # 4-byte Folded Spill - movl -108(%ebp), %esi # 4-byte Reload - adcl -136(%ebp), %esi # 4-byte Folded Reload - movl -116(%ebp), %eax # 4-byte Reload - adcl %eax, -104(%ebp) # 4-byte Folded Spill - movl -112(%ebp), %edi # 4-byte Reload - adcl %ebx, %edi - sbbl %eax, %eax - andl $1, %eax - movl %eax, -120(%ebp) # 4-byte Spill - andl $1, %edx - movl %edx, -116(%ebp) # 4-byte Spill - movl -128(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre5Lbmi2@PLT - movl -100(%ebp), %eax # 4-byte Reload - addl -32(%ebp), %eax - movl %eax, -100(%ebp) # 4-byte Spill - movl -96(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -96(%ebp) # 4-byte Spill - adcl -24(%ebp), %esi - movl %esi, -108(%ebp) # 4-byte Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -104(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl -120(%ebp), %eax # 4-byte Reload - adcl %eax, -116(%ebp) # 4-byte Folded Spill - movl -52(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl -48(%ebp), %ebx - sbbl 4(%esi), %ebx - movl -44(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -40(%ebp), %edx - sbbl 12(%esi), %edx - movl -36(%ebp), %edi - sbbl 16(%esi), %edi - movl 20(%esi), %eax - movl %eax, -124(%ebp) # 4-byte Spill - sbbl %eax, -100(%ebp) # 4-byte Folded Spill - movl 24(%esi), %eax - movl %eax, -128(%ebp) # 4-byte Spill - sbbl %eax, -96(%ebp) # 4-byte Folded Spill - movl 28(%esi), %eax - movl %eax, -132(%ebp) # 4-byte Spill - sbbl %eax, -108(%ebp) # 4-byte Folded Spill - movl 32(%esi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - sbbl %eax, -104(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - sbbl %eax, -112(%ebp) # 4-byte Folded Spill - sbbl $0, -116(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - subl %eax, %ecx - movl 44(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 48(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - sbbl %eax, -120(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 56(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 60(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, -100(%ebp) # 4-byte Folded Spill - movl 64(%esi), %eax - movl %eax, -144(%ebp) # 4-byte Spill - sbbl %eax, -96(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -148(%ebp) # 4-byte Spill - sbbl %eax, -108(%ebp) # 4-byte Folded Spill - movl 72(%esi), %eax - movl %eax, -152(%ebp) # 4-byte Spill - sbbl %eax, -104(%ebp) # 4-byte Folded Spill - movl 76(%esi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - sbbl %eax, -112(%ebp) # 4-byte Folded Spill - sbbl $0, -116(%ebp) # 4-byte Folded Spill - addl -124(%ebp), %ecx # 4-byte Folded Reload - adcl -128(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 20(%esi) - movl -120(%ebp), %eax # 4-byte Reload - adcl -132(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 24(%esi) - adcl -136(%ebp), %edx # 4-byte Folded Reload - movl %eax, 28(%esi) - adcl -140(%ebp), %edi # 4-byte Folded Reload - movl %edx, 32(%esi) - movl -100(%ebp), %eax # 4-byte Reload - adcl -160(%ebp), %eax # 4-byte Folded Reload - movl %edi, 36(%esi) - movl -96(%ebp), %ecx # 4-byte Reload - adcl -164(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - movl -108(%ebp), %eax # 4-byte Reload - adcl -168(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl -104(%ebp), %ecx # 4-byte Reload - adcl -172(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -112(%ebp), %edx # 4-byte Reload - adcl -176(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -116(%ebp), %eax # 4-byte Reload - adcl -180(%ebp), %eax # 4-byte Folded Reload - movl %edx, 56(%esi) - movl %eax, 60(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 64(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%esi) - movl -152(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 72(%esi) - movl -156(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 76(%esi) - addl $188, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end144: - .size mcl_fpDbl_mulPre10Lbmi2, .Lfunc_end144-mcl_fpDbl_mulPre10Lbmi2 - - .globl mcl_fpDbl_sqrPre10Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre10Lbmi2,@function -mcl_fpDbl_sqrPre10Lbmi2: # @mcl_fpDbl_sqrPre10Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $188, %esp - calll .L145$pb -.L145$pb: - popl %ebx -.Ltmp16: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx - movl %ebx, -120(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre5Lbmi2@PLT - leal 20(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 40(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5Lbmi2@PLT - movl 36(%edi), %eax - movl 32(%edi), %ebx - movl 28(%edi), %esi - movl (%edi), %ecx - movl 4(%edi), %edx - addl 20(%edi), %ecx - adcl 24(%edi), %edx - adcl 8(%edi), %esi - adcl 12(%edi), %ebx - movl %ebx, -124(%ebp) # 4-byte Spill - adcl 16(%edi), %eax - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -128(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -108(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -104(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -100(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -96(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - sbbl %ebx, %ebx - movl %ebx, -116(%ebp) # 4-byte Spill - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_1 -# BB#2: - movl $0, -112(%ebp) # 4-byte Folded Spill - jmp .LBB145_3 -.LBB145_1: - leal (%ecx,%ecx), %edi - movl %edi, -112(%ebp) # 4-byte Spill -.LBB145_3: - movl -96(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - movl -124(%ebp), %edi # 4-byte Reload - jb .LBB145_4 -# BB#5: - movl $0, -96(%ebp) # 4-byte Folded Spill - jmp .LBB145_6 -.LBB145_4: - movl %edx, %ebx - shldl $1, %ecx, %ebx - movl %ebx, -96(%ebp) # 4-byte Spill -.LBB145_6: - movl -100(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_7 -# BB#8: - movl $0, -100(%ebp) # 4-byte Folded Spill - jmp .LBB145_9 -.LBB145_7: - movl %esi, %ebx - shldl $1, %edx, %ebx - movl %ebx, -100(%ebp) # 4-byte Spill -.LBB145_9: - movl -104(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_10 -# BB#11: - movl $0, -104(%ebp) # 4-byte Folded Spill - jmp .LBB145_12 -.LBB145_10: - movl %edi, %ebx - shldl $1, %esi, %ebx - movl %ebx, -104(%ebp) # 4-byte Spill -.LBB145_12: - movl -108(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_13 -# BB#14: - movl $0, -108(%ebp) # 4-byte Folded Spill - jmp .LBB145_15 -.LBB145_13: - movl %eax, %ebx - shldl $1, %edi, %ebx - movl %ebx, -108(%ebp) # 4-byte Spill -.LBB145_15: - movl %ecx, -72(%ebp) - movl %edx, -68(%ebp) - movl %esi, -64(%ebp) - movl %edi, -60(%ebp) - movl %eax, -56(%ebp) - movl %ecx, -92(%ebp) - movl %edx, -88(%ebp) - movl %esi, -84(%ebp) - movl %edi, -80(%ebp) - movl %eax, -76(%ebp) - movl -128(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_16 -# BB#17: - movl $0, -124(%ebp) # 4-byte Folded Spill - jmp .LBB145_18 -.LBB145_16: - shrl $31, %eax - movl %eax, -124(%ebp) # 4-byte Spill -.LBB145_18: - leal -52(%ebp), %eax - movl %eax, (%esp) - leal -72(%ebp), %eax - movl %eax, 4(%esp) - leal -92(%ebp), %eax - movl %eax, 8(%esp) - movl -116(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -120(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre5Lbmi2@PLT - movl -112(%ebp), %edi # 4-byte Reload - addl -32(%ebp), %edi - movl -96(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -100(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -100(%ebp) # 4-byte Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -104(%ebp) # 4-byte Spill - movl -108(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -108(%ebp) # 4-byte Spill - adcl -124(%ebp), %esi # 4-byte Folded Reload - movl -52(%ebp), %edx - movl 8(%ebp), %eax - subl (%eax), %edx - movl -48(%ebp), %ebx - sbbl 4(%eax), %ebx - movl -44(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ecx - sbbl 12(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -36(%ebp), %ecx - sbbl 16(%eax), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - movl 20(%eax), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 28(%eax), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - sbbl %ecx, -100(%ebp) # 4-byte Folded Spill - movl 32(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, -104(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, -108(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 40(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - subl %ecx, %edx - movl 44(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 48(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 52(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -144(%ebp), %edi # 4-byte Reload - sbbl %ecx, %edi - movl 56(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 64(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 68(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, -100(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, -104(%ebp) # 4-byte Folded Spill - movl 76(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -108(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -124(%ebp), %edx # 4-byte Folded Reload - adcl -128(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 20(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -132(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 24(%eax) - adcl -136(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 28(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -140(%ebp), %edx # 4-byte Folded Reload - movl %edi, 32(%eax) - movl -112(%ebp), %ecx # 4-byte Reload - adcl -160(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -96(%ebp), %edx # 4-byte Reload - adcl -164(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 40(%eax) - movl -100(%ebp), %ecx # 4-byte Reload - adcl -168(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 44(%eax) - movl -104(%ebp), %edx # 4-byte Reload - adcl -172(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 48(%eax) - movl -108(%ebp), %ecx # 4-byte Reload - adcl -176(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 52(%eax) - adcl -180(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 56(%eax) - movl %esi, 60(%eax) - movl -144(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 64(%eax) - movl -148(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 68(%eax) - movl -152(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 72(%eax) - movl -156(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - addl $188, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end145: - .size mcl_fpDbl_sqrPre10Lbmi2, .Lfunc_end145-mcl_fpDbl_sqrPre10Lbmi2 - - .globl mcl_fp_mont10Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont10Lbmi2,@function -mcl_fp_mont10Lbmi2: # @mcl_fp_mont10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1036, %esp # imm = 0x40C - calll .L146$pb -.L146$pb: - popl %ebx -.Ltmp17: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx - movl 1068(%esp), %eax - movl -4(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 992(%esp), %edi - movl 996(%esp), %ebp - movl %edi, %eax - imull %esi, %eax - movl 1032(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1028(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1024(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1020(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1016(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1012(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1008(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1004(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1000(%esp), %esi - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - addl 944(%esp), %edi - adcl 948(%esp), %ebp - adcl 952(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 1064(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 896(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - addl 896(%esp), %ebp - adcl 900(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 936(%esp), %edi - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 848(%esp), %ebp - adcl 852(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 856(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 864(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 868(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 876(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 880(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %edi - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 800(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - addl 800(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 832(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 836(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1068(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 752(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 780(%esp), %esi - movl 76(%esp), %edi # 4-byte Reload - adcl 784(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 704(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 716(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 728(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 732(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 736(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 744(%esp), %edi - sbbl %esi, %esi - movl %ecx, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 656(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %esi - movl %esi, %ecx - movl 44(%esp), %eax # 4-byte Reload - addl 656(%esp), %eax - movl 40(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 696(%esp), %edi - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 608(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 624(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 636(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 640(%esp), %esi - adcl 644(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 648(%esp), %edi - sbbl %ebp, %ebp - movl %ecx, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - movl %ebp, %ecx - movl 40(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl 36(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 572(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 592(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 600(%esp), %edi - adcl $0, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 512(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 520(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - movl %ebp, %eax - addl 464(%esp), %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 472(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 484(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 44(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 504(%esp), %edi - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1060(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 416(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 432(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 444(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 452(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 368(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 380(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 384(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 400(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 320(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 328(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 332(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 272(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 276(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 312(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl 1064(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl %edi, %ecx - addl 224(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 236(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 240(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 264(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - addl 176(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %edi, %esi - adcl 192(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 196(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ebp - movl 1064(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 128(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 140(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - adcl 144(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - sbbl %esi, %esi - movl 32(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 80(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %esi - addl 80(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl 84(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 92(%esp), %ebx - movl 52(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %edx, %edi - movl 36(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 120(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl $0, %esi - movl 1068(%esp), %edx - subl (%edx), %eax - sbbl 4(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 8(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 12(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - sbbl 20(%edx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - sbbl 24(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl 68(%esp), %edi # 4-byte Reload - sbbl 32(%edx), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl 36(%edx), %ebp - movl %ebp, %edx - sbbl $0, %esi - andl $1, %esi - jne .LBB146_2 -# BB#1: - movl %ecx, 48(%esp) # 4-byte Spill -.LBB146_2: - movl %esi, %ecx - testb %cl, %cl - movl 76(%esp), %esi # 4-byte Reload - jne .LBB146_4 -# BB#3: - movl %eax, %esi -.LBB146_4: - movl 1056(%esp), %eax - movl %esi, (%eax) - movl 60(%esp), %edi # 4-byte Reload - jne .LBB146_6 -# BB#5: - movl 16(%esp), %edi # 4-byte Reload -.LBB146_6: - movl %edi, 4(%eax) - jne .LBB146_8 -# BB#7: - movl 20(%esp), %ebx # 4-byte Reload -.LBB146_8: - movl %ebx, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB146_10 -# BB#9: - movl 24(%esp), %ebp # 4-byte Reload -.LBB146_10: - movl %ebp, 12(%eax) - jne .LBB146_12 -# BB#11: - movl 28(%esp), %ecx # 4-byte Reload -.LBB146_12: - movl %ecx, 16(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB146_14 -# BB#13: - movl 32(%esp), %ecx # 4-byte Reload -.LBB146_14: - movl %ecx, 20(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB146_16 -# BB#15: - movl 56(%esp), %ecx # 4-byte Reload -.LBB146_16: - movl %ecx, 24(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB146_18 -# BB#17: - movl 64(%esp), %ecx # 4-byte Reload -.LBB146_18: - movl %ecx, 32(%eax) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB146_20 -# BB#19: - movl %edx, %ecx -.LBB146_20: - movl %ecx, 36(%eax) - addl $1036, %esp # imm = 0x40C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end146: - .size mcl_fp_mont10Lbmi2, .Lfunc_end146-mcl_fp_mont10Lbmi2 - - .globl mcl_fp_montNF10Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF10Lbmi2,@function -mcl_fp_montNF10Lbmi2: # @mcl_fp_montNF10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1020, %esp # imm = 0x3FC - calll .L147$pb -.L147$pb: - popl %ebx -.Ltmp18: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx - movl 1052(%esp), %eax - movl -4(%eax), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 976(%esp), %edi - movl 980(%esp), %esi - movl %edi, %eax - imull %ebp, %eax - movl 1016(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1012(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1008(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1004(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 996(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 992(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 988(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 984(%esp), %ebp - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 928(%esp), %edi - adcl 932(%esp), %esi - adcl 936(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 952(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 920(%esp), %ecx - addl 880(%esp), %esi - adcl 884(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %esi, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 832(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 832(%esp), %esi - adcl 836(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 848(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - movl 1048(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 824(%esp), %ecx - addl 784(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 796(%esp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 820(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %ebp, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 736(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 736(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 760(%esp), %edi - movl 56(%esp), %ebp # 4-byte Reload - adcl 764(%esp), %ebp - movl 60(%esp), %esi # 4-byte Reload - adcl 768(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1044(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 728(%esp), %eax - movl 28(%esp), %edx # 4-byte Reload - addl 688(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 708(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 712(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 716(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 720(%esp), %ebp - movl 32(%esp), %esi # 4-byte Reload - adcl 724(%esp), %esi - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1052(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - addl 640(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 656(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 672(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 676(%esp), %esi - movl %esi, %ebp - movl 28(%esp), %esi # 4-byte Reload - adcl 680(%esp), %esi - movl 1048(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 632(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 24(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 604(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 624(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 628(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 544(%esp), %esi - movl 24(%esp), %edi # 4-byte Reload - adcl 548(%esp), %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 552(%esp), %esi - movl 36(%esp), %ebp # 4-byte Reload - adcl 556(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 536(%esp), %edx - addl 496(%esp), %edi - adcl 500(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - adcl 504(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %esi # 4-byte Reload - adcl 528(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %edi, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 448(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 464(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 480(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - adcl 488(%esp), %esi - movl 1048(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 400(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 440(%esp), %eax - movl 40(%esp), %ecx # 4-byte Reload - addl 400(%esp), %ecx - adcl 404(%esp), %ebp - movl 48(%esp), %edx # 4-byte Reload - adcl 408(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 412(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 416(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - adcl 420(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 424(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 428(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 432(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 436(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 352(%esp), %esi - adcl 356(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %esi - adcl 368(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 372(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1044(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 344(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 304(%esp), %ecx - adcl 308(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 316(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 320(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 324(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 24(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 256(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 272(%esp), %edi - adcl 276(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 248(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 208(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 220(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 236(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 160(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 176(%esp), %edi - movl 28(%esp), %esi # 4-byte Reload - adcl 180(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 192(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 152(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 124(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 140(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 144(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 64(%esp), %ebp - movl %edi, %ebp - movl 60(%esp), %eax # 4-byte Reload - movl 32(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %ebx - adcl 80(%esp), %ebp - movl 44(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 96(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %edx - movl 1052(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %ecx - movl %ebx, %eax - sbbl 8(%edi), %eax - movl %ebp, %esi - sbbl 12(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 16(%edi), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - sbbl 20(%edi), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - sbbl 24(%edi), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 28(%edi), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 32(%edi), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 36(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %esi, %edi - sarl $31, %edi - testl %edi, %edi - movl 60(%esp), %edi # 4-byte Reload - js .LBB147_2 -# BB#1: - movl %edx, %edi -.LBB147_2: - movl 1040(%esp), %edx - movl %edi, (%edx) - movl 52(%esp), %edi # 4-byte Reload - js .LBB147_4 -# BB#3: - movl %ecx, %edi -.LBB147_4: - movl %edi, 4(%edx) - js .LBB147_6 -# BB#5: - movl %eax, %ebx -.LBB147_6: - movl %ebx, 8(%edx) - js .LBB147_8 -# BB#7: - movl 4(%esp), %ebp # 4-byte Reload -.LBB147_8: - movl %ebp, 12(%edx) - movl 44(%esp), %esi # 4-byte Reload - movl 24(%esp), %eax # 4-byte Reload - js .LBB147_10 -# BB#9: - movl 8(%esp), %esi # 4-byte Reload -.LBB147_10: - movl %esi, 16(%edx) - js .LBB147_12 -# BB#11: - movl 12(%esp), %eax # 4-byte Reload -.LBB147_12: - movl %eax, 20(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB147_14 -# BB#13: - movl 16(%esp), %eax # 4-byte Reload -.LBB147_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB147_16 -# BB#15: - movl 20(%esp), %eax # 4-byte Reload -.LBB147_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB147_18 -# BB#17: - movl 28(%esp), %eax # 4-byte Reload -.LBB147_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB147_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB147_20: - movl %eax, 36(%edx) - addl $1020, %esp # imm = 0x3FC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end147: - .size mcl_fp_montNF10Lbmi2, .Lfunc_end147-mcl_fp_montNF10Lbmi2 - - .globl mcl_fp_montRed10Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed10Lbmi2,@function -mcl_fp_montRed10Lbmi2: # @mcl_fp_montRed10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $604, %esp # imm = 0x25C - calll .L148$pb -.L148$pb: - popl %eax -.Ltmp19: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 632(%esp), %edx - movl -4(%edx), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 628(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 68(%esp) # 4-byte Spill - imull %esi, %ebx - movl 76(%ecx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 44(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 36(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 28(%ecx), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 24(%ecx), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %edi - movl 12(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 560(%esp), %ecx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 56(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl 68(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - adcl 568(%esp), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 576(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 580(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 68(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 512(%esp), %esi - movl 4(%esp), %edx # 4-byte Reload - adcl 516(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 528(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 532(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 536(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 540(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 544(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 548(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 552(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 464(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 492(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 52(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 440(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - movl 60(%esp), %edi # 4-byte Reload - imull %edi, %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 632(%esp), %eax - movl %eax, %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 368(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 404(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull %edi, %eax - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 320(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 352(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 360(%esp), %esi - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 272(%esp), %ebp - movl 96(%esp), %ecx # 4-byte Reload - adcl 276(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 288(%esp), %ebp - adcl 292(%esp), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 296(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 308(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 312(%esp), %esi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 96(%esp), %eax # 4-byte Reload - addl 224(%esp), %eax - movl 100(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl 92(%esp), %ecx # 4-byte Reload - adcl 232(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 236(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 240(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl 244(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 256(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 260(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 264(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl 68(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %eax, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 176(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl 112(%esp), %edi # 4-byte Reload - adcl 184(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 196(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 128(%esp), %esi - movl %edi, %eax - adcl 132(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %eax, %edi - movl 124(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 140(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - adcl 144(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl %ebp, %edx - movl 120(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - adcl 164(%esp), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - movl 120(%esp), %eax # 4-byte Reload - sbbl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 96(%esp) # 4-byte Spill - sbbl $0, %eax - andl $1, %eax - jne .LBB148_2 -# BB#1: - movl %edx, 80(%esp) # 4-byte Spill -.LBB148_2: - testb %al, %al - movl 112(%esp), %edx # 4-byte Reload - jne .LBB148_4 -# BB#3: - movl %edi, %edx -.LBB148_4: - movl 624(%esp), %edi - movl %edx, (%edi) - movl 108(%esp), %edx # 4-byte Reload - jne .LBB148_6 -# BB#5: - movl %ecx, 124(%esp) # 4-byte Spill -.LBB148_6: - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%edi) - movl 116(%esp), %ecx # 4-byte Reload - jne .LBB148_8 -# BB#7: - movl %esi, %ecx -.LBB148_8: - movl %ecx, 8(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 76(%esp), %ecx # 4-byte Reload - movl 120(%esp), %eax # 4-byte Reload - jne .LBB148_10 -# BB#9: - movl 64(%esp), %eax # 4-byte Reload -.LBB148_10: - movl %eax, 16(%edi) - movl 84(%esp), %eax # 4-byte Reload - movl 104(%esp), %ebp # 4-byte Reload - jne .LBB148_12 -# BB#11: - movl 68(%esp), %ebp # 4-byte Reload -.LBB148_12: - movl %ebp, 20(%edi) - movl 88(%esp), %ebx # 4-byte Reload - jne .LBB148_14 -# BB#13: - movl 72(%esp), %ebx # 4-byte Reload -.LBB148_14: - movl %ebx, 24(%edi) - jne .LBB148_16 -# BB#15: - movl 92(%esp), %edx # 4-byte Reload -.LBB148_16: - movl %edx, 28(%edi) - jne .LBB148_18 -# BB#17: - movl 100(%esp), %ecx # 4-byte Reload -.LBB148_18: - movl %ecx, 32(%edi) - jne .LBB148_20 -# BB#19: - movl 96(%esp), %eax # 4-byte Reload -.LBB148_20: - movl %eax, 36(%edi) - addl $604, %esp # imm = 0x25C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end148: - .size mcl_fp_montRed10Lbmi2, .Lfunc_end148-mcl_fp_montRed10Lbmi2 - - .globl mcl_fp_addPre10Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre10Lbmi2,@function -mcl_fp_addPre10Lbmi2: # @mcl_fp_addPre10Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl %edx, 28(%ebx) - movl %esi, 32(%ebx) - movl 36(%eax), %eax - movl 36(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 36(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end149: - .size mcl_fp_addPre10Lbmi2, .Lfunc_end149-mcl_fp_addPre10Lbmi2 - - .globl mcl_fp_subPre10Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre10Lbmi2,@function -mcl_fp_subPre10Lbmi2: # @mcl_fp_subPre10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 28(%ebp) - movl %edi, 32(%ebp) - movl 36(%edx), %edx - movl 36(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 36(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end150: - .size mcl_fp_subPre10Lbmi2, .Lfunc_end150-mcl_fp_subPre10Lbmi2 - - .globl mcl_fp_shr1_10Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_10Lbmi2,@function -mcl_fp_shr1_10Lbmi2: # @mcl_fp_shr1_10Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 28(%esi) - movl 36(%eax), %eax - shrdl $1, %eax, %ecx - movl %ecx, 32(%esi) - shrl %eax - movl %eax, 36(%esi) - popl %esi - retl -.Lfunc_end151: - .size mcl_fp_shr1_10Lbmi2, .Lfunc_end151-mcl_fp_shr1_10Lbmi2 - - .globl mcl_fp_add10Lbmi2 - .align 16, 0x90 - .type mcl_fp_add10Lbmi2,@function -mcl_fp_add10Lbmi2: # @mcl_fp_add10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 52(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 48(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - adcl 4(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 20(%ebx), %eax - adcl 20(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%ebx), %esi - adcl 24(%edi), %esi - movl 28(%ebx), %ebp - adcl 28(%edi), %ebp - movl 32(%ebx), %edx - adcl 32(%edi), %edx - movl 36(%ebx), %ecx - adcl 36(%edi), %ecx - movl 44(%esp), %edi - movl (%esp), %ebx # 4-byte Reload - movl %ebx, (%edi) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 4(%edi) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 8(%edi) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 16(%edi) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 20(%edi) - movl %esi, 24(%edi) - movl %ebp, 28(%edi) - movl %edx, 32(%edi) - movl %ecx, 36(%edi) - sbbl %eax, %eax - andl $1, %eax - movl 56(%esp), %edi - subl (%edi), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - sbbl 4(%edi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebx # 4-byte Reload - sbbl 8(%edi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - sbbl 12(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebx # 4-byte Reload - sbbl 16(%edi), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 4(%esp), %ebx # 4-byte Reload - sbbl 20(%edi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - sbbl 28(%edi), %ebp - sbbl 32(%edi), %edx - sbbl 36(%edi), %ecx - sbbl $0, %eax - testb $1, %al - jne .LBB152_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl 44(%esp), %ebx - movl %edi, (%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebx) - movl %esi, 24(%ebx) - movl %ebp, 28(%ebx) - movl %edx, 32(%ebx) - movl %ecx, 36(%ebx) -.LBB152_2: # %carry - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end152: - .size mcl_fp_add10Lbmi2, .Lfunc_end152-mcl_fp_add10Lbmi2 - - .globl mcl_fp_addNF10Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF10Lbmi2,@function -mcl_fp_addNF10Lbmi2: # @mcl_fp_addNF10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %esi - movl 96(%esp), %edx - addl (%edx), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 4(%edx), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 36(%ecx), %edi - movl 32(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %ebx - movl 12(%ecx), %eax - movl 8(%ecx), %esi - adcl 8(%edx), %esi - adcl 12(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 16(%edx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - adcl 20(%edx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 24(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 28(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 32(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %esi, %ecx - adcl 36(%edx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 104(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - subl (%edi), %edx - movl 56(%esp), %esi # 4-byte Reload - sbbl 4(%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl %ecx, %esi - sbbl 8(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 20(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - movl %esi, %eax - movl %esi, %ebp - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - movl %esi, %eax - movl %esi, %ebx - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - movl %eax, %esi - sbbl 36(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %esi, %edi - movl 52(%esp), %esi # 4-byte Reload - sarl $31, %edi - testl %edi, %edi - js .LBB153_2 -# BB#1: - movl %edx, %esi -.LBB153_2: - movl 92(%esp), %edx - movl %esi, (%edx) - movl 56(%esp), %esi # 4-byte Reload - js .LBB153_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload -.LBB153_4: - movl %esi, 4(%edx) - movl %ebp, %edi - movl 40(%esp), %esi # 4-byte Reload - js .LBB153_6 -# BB#5: - movl 4(%esp), %ecx # 4-byte Reload -.LBB153_6: - movl %ecx, 8(%edx) - movl %ebx, %ecx - movl 44(%esp), %ebp # 4-byte Reload - js .LBB153_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB153_8: - movl %esi, 12(%edx) - movl 68(%esp), %esi # 4-byte Reload - movl 48(%esp), %ebx # 4-byte Reload - js .LBB153_10 -# BB#9: - movl 12(%esp), %ebp # 4-byte Reload -.LBB153_10: - movl %ebp, 16(%edx) - js .LBB153_12 -# BB#11: - movl 16(%esp), %ebx # 4-byte Reload -.LBB153_12: - movl %ebx, 20(%edx) - js .LBB153_14 -# BB#13: - movl 20(%esp), %edi # 4-byte Reload -.LBB153_14: - movl %edi, 24(%edx) - js .LBB153_16 -# BB#15: - movl 24(%esp), %esi # 4-byte Reload -.LBB153_16: - movl %esi, 28(%edx) - js .LBB153_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload -.LBB153_18: - movl %ecx, 32(%edx) - js .LBB153_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB153_20: - movl %eax, 36(%edx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end153: - .size mcl_fp_addNF10Lbmi2, .Lfunc_end153-mcl_fp_addNF10Lbmi2 - - .globl mcl_fp_sub10Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub10Lbmi2,@function -mcl_fp_sub10Lbmi2: # @mcl_fp_sub10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 60(%esp), %edi - subl (%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 20(%esi), %edx - sbbl 20(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 24(%esi), %ecx - sbbl 24(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 32(%esi), %ebp - sbbl 32(%edi), %ebp - movl 36(%esi), %esi - sbbl 36(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 52(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl %edx, 20(%ebx) - movl %ecx, 24(%ebx) - movl %eax, 28(%ebx) - movl %ebp, 32(%ebx) - movl %esi, 36(%ebx) - je .LBB154_2 -# BB#1: # %carry - movl %esi, %edi - movl 64(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 24(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl %eax, 28(%ebx) - movl 32(%esi), %eax - adcl %ebp, %eax - movl %eax, 32(%ebx) - movl 36(%esi), %eax - adcl %edi, %eax - movl %eax, 36(%ebx) -.LBB154_2: # %nocarry - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end154: - .size mcl_fp_sub10Lbmi2, .Lfunc_end154-mcl_fp_sub10Lbmi2 - - .globl mcl_fp_subNF10Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF10Lbmi2,@function -mcl_fp_subNF10Lbmi2: # @mcl_fp_subNF10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %eax - movl 36(%eax), %esi - movl (%eax), %edi - movl 4(%eax), %edx - movl 84(%esp), %ecx - subl (%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 32(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl 24(%eax), %ebx - movl 20(%eax), %ebp - movl 16(%eax), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 12(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 16(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl 20(%ecx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - sbbl 24(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - sbbl 28(%ecx), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 32(%ecx), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl 36(%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl %esi, %eax - sarl $31, %eax - movl %eax, %edx - addl %edx, %edx - movl %eax, %ecx - adcl %ecx, %ecx - movl %esi, %ebx - shrl $31, %ebx - orl %edx, %ebx - movl 88(%esp), %edi - movl 20(%edi), %edx - andl %ecx, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 12(%edi), %edx - andl %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - andl 4(%edi), %ecx - movl 16(%edi), %edx - andl %ebx, %edx - movl %edx, (%esp) # 4-byte Spill - movl 8(%edi), %edx - andl %ebx, %edx - andl (%edi), %ebx - movl 36(%edi), %esi - andl %eax, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 32(%edi), %ebp - andl %eax, %ebp - movl 28(%edi), %esi - andl %eax, %esi - andl 24(%edi), %eax - addl 36(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %edi - movl %ebx, (%edi) - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %ecx, 4(%edi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %edx, 8(%edi) - movl (%esp), %edx # 4-byte Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %ecx, 12(%edi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %edx, 16(%edi) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 20(%edi) - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %eax, 24(%edi) - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %esi, 28(%edi) - movl %ebp, 32(%edi) - movl 8(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%edi) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end155: - .size mcl_fp_subNF10Lbmi2, .Lfunc_end155-mcl_fp_subNF10Lbmi2 - - .globl mcl_fpDbl_add10Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add10Lbmi2,@function -mcl_fpDbl_add10Lbmi2: # @mcl_fpDbl_add10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %edx - movl 96(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %ecx - movl 8(%edx), %ebx - movl (%edx), %ebp - addl (%edi), %ebp - movl 92(%esp), %eax - movl %ebp, (%eax) - movl 4(%edx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%edx), %esi - adcl 16(%edx), %ecx - movl %ebp, 4(%eax) - movl 48(%edx), %ebp - movl %ebx, 8(%eax) - movl 20(%edx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%edx), %ebx - movl %ecx, 16(%eax) - movl 24(%edi), %ecx - adcl %ebx, %ecx - movl 28(%edx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%edx), %ebx - movl %ecx, 24(%eax) - movl 32(%edi), %ecx - adcl %ebx, %ecx - movl 36(%edx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%edx), %ebx - movl %ecx, 32(%eax) - movl 40(%edi), %ecx - adcl %ebx, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 44(%edx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %eax - adcl %ebx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 48(%edi), %eax - adcl %ebp, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl 52(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 56(%edx), %eax - movl 56(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 60(%edx), %eax - movl 60(%edi), %ecx - adcl %eax, %ecx - movl 64(%edx), %esi - movl 64(%edi), %eax - adcl %esi, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 68(%edx), %ebx - movl 68(%edi), %esi - adcl %ebx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 72(%edx), %ebx - movl 72(%edi), %ebp - adcl %ebx, %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 76(%edx), %edx - movl 76(%edi), %edi - adcl %edx, %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 104(%esp), %ebx - movl 64(%esp), %edi # 4-byte Reload - subl (%ebx), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 4(%ebx), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - sbbl 8(%ebx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 12(%ebx), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebx), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ecx, %edi - sbbl 20(%ebx), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 24(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill - sbbl 28(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl %ebp, %eax - movl 36(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %edi - sbbl 36(%ebx), %edi - sbbl $0, %edx - andl $1, %edx - jne .LBB156_2 -# BB#1: - movl %edi, %ebp -.LBB156_2: - testb %dl, %dl - movl 64(%esp), %edx # 4-byte Reload - movl 60(%esp), %esi # 4-byte Reload - movl 56(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - jne .LBB156_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebx # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload -.LBB156_4: - movl 92(%esp), %eax - movl %edx, 40(%eax) - movl 68(%esp), %edx # 4-byte Reload - movl %edx, 44(%eax) - movl %ebx, 48(%eax) - movl %edi, 52(%eax) - movl %esi, 56(%eax) - movl %ecx, 60(%eax) - movl 44(%esp), %edx # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB156_6 -# BB#5: - movl 24(%esp), %ecx # 4-byte Reload -.LBB156_6: - movl %ecx, 64(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB156_8 -# BB#7: - movl 28(%esp), %edx # 4-byte Reload -.LBB156_8: - movl %edx, 68(%eax) - jne .LBB156_10 -# BB#9: - movl 32(%esp), %ecx # 4-byte Reload -.LBB156_10: - movl %ecx, 72(%eax) - movl %ebp, 76(%eax) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end156: - .size mcl_fpDbl_add10Lbmi2, .Lfunc_end156-mcl_fpDbl_add10Lbmi2 - - .globl mcl_fpDbl_sub10Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub10Lbmi2,@function -mcl_fpDbl_sub10Lbmi2: # @mcl_fpDbl_sub10Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ebp - movl (%ebp), %edx - movl 4(%ebp), %esi - movl 88(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %esi - movl 8(%ebp), %edi - sbbl 8(%eax), %edi - movl 80(%esp), %ecx - movl %edx, (%ecx) - movl 12(%ebp), %edx - sbbl 12(%eax), %edx - movl %esi, 4(%ecx) - movl 16(%ebp), %esi - sbbl 16(%eax), %esi - movl %edi, 8(%ecx) - movl 20(%eax), %edi - movl %edx, 12(%ecx) - movl 20(%ebp), %edx - sbbl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ecx) - movl 24(%ebp), %esi - sbbl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ecx) - movl 28(%ebp), %edx - sbbl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ecx) - movl 32(%ebp), %esi - sbbl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ecx) - movl 36(%ebp), %edx - sbbl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ecx) - movl 40(%ebp), %esi - sbbl %edi, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %edx, 36(%ecx) - movl 44(%ebp), %edx - sbbl %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%eax), %edx - movl 48(%ebp), %esi - sbbl %edx, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 52(%eax), %edx - movl 52(%ebp), %esi - sbbl %edx, %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 56(%eax), %edx - movl 56(%ebp), %esi - sbbl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 60(%eax), %edx - movl 60(%ebp), %esi - sbbl %edx, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 64(%eax), %edx - movl 64(%ebp), %esi - sbbl %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 68(%eax), %edx - movl 68(%ebp), %esi - sbbl %edx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 72(%eax), %edx - movl 72(%ebp), %esi - sbbl %edx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 76(%eax), %eax - movl 76(%ebp), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 92(%esp), %esi - jne .LBB157_1 -# BB#2: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB157_3 -.LBB157_1: - movl 36(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill -.LBB157_3: - testb %al, %al - jne .LBB157_4 -# BB#5: - movl $0, 8(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB157_6 -.LBB157_4: - movl (%esi), %ebx - movl 4(%esi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB157_6: - jne .LBB157_7 -# BB#8: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB157_9 -.LBB157_7: - movl 32(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB157_9: - jne .LBB157_10 -# BB#11: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB157_12 -.LBB157_10: - movl 28(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB157_12: - jne .LBB157_13 -# BB#14: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB157_15 -.LBB157_13: - movl 24(%esi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB157_15: - jne .LBB157_16 -# BB#17: - movl $0, %ebp - jmp .LBB157_18 -.LBB157_16: - movl 20(%esi), %ebp -.LBB157_18: - jne .LBB157_19 -# BB#20: - movl $0, %eax - jmp .LBB157_21 -.LBB157_19: - movl 16(%esi), %eax -.LBB157_21: - jne .LBB157_22 -# BB#23: - movl $0, %edx - jmp .LBB157_24 -.LBB157_22: - movl 12(%esi), %edx -.LBB157_24: - jne .LBB157_25 -# BB#26: - xorl %esi, %esi - jmp .LBB157_27 -.LBB157_25: - movl 8(%esi), %esi -.LBB157_27: - addl 28(%esp), %ebx # 4-byte Folded Reload - movl 8(%esp), %edi # 4-byte Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %ebx, 40(%ecx) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edi, 44(%ecx) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %edx, 52(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 56(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ebp, 60(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edx, 68(%ecx) - movl %eax, 72(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%ecx) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end157: - .size mcl_fpDbl_sub10Lbmi2, .Lfunc_end157-mcl_fpDbl_sub10Lbmi2 - - .align 16, 0x90 - .type .LmulPv352x32,@function -.LmulPv352x32: # @mulPv352x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl %edx, %eax - movl 52(%esp), %edx - mulxl 4(%eax), %ebx, %esi - mulxl (%eax), %edi, %ebp - movl %edi, 28(%esp) # 4-byte Spill - addl %ebx, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - mulxl 8(%eax), %edi, %ebx - adcl %esi, %edi - movl %edi, 20(%esp) # 4-byte Spill - mulxl 12(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 16(%esp) # 4-byte Spill - mulxl 16(%eax), %esi, %ebx - adcl %edi, %esi - movl %esi, 12(%esp) # 4-byte Spill - mulxl 20(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 8(%esp) # 4-byte Spill - mulxl 24(%eax), %ebx, %esi - adcl %edi, %ebx - mulxl 28(%eax), %edi, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl %esi, %edi - mulxl 32(%eax), %esi, %ebp - movl %ebp, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - mulxl 36(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl %ebx, 24(%ecx) - movl %edi, 28(%ecx) - movl %esi, 32(%ecx) - movl %edx, 36(%ecx) - movl 52(%esp), %edx - mulxl 40(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - adcl $0, %edx - movl %edx, 44(%ecx) - movl %ecx, %eax - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end158: - .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 - - .globl mcl_fp_mulUnitPre11Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre11Lbmi2,@function -mcl_fp_mulUnitPre11Lbmi2: # @mcl_fp_mulUnitPre11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L159$pb -.L159$pb: - popl %ebx -.Ltmp20: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx - movl 120(%esp), %eax - movl %eax, (%esp) - leal 40(%esp), %ecx - movl 116(%esp), %edx - calll .LmulPv352x32 - movl 84(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 60(%esp), %ebp - movl 56(%esp), %ebx - movl 52(%esp), %edi - movl 48(%esp), %esi - movl 40(%esp), %edx - movl 44(%esp), %ecx - movl 112(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end159: - .size mcl_fp_mulUnitPre11Lbmi2, .Lfunc_end159-mcl_fp_mulUnitPre11Lbmi2 - - .globl mcl_fpDbl_mulPre11Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre11Lbmi2,@function -mcl_fpDbl_mulPre11Lbmi2: # @mcl_fpDbl_mulPre11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $620, %esp # imm = 0x26C - calll .L160$pb -.L160$pb: - popl %eax -.Ltmp21: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %eax, %ebx - movl 648(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 644(%esp), %edx - movl %edx, %ebp - movl %ebx, %edi - calll .LmulPv352x32 - movl 612(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 584(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 580(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 568(%esp), %eax - movl 572(%esp), %esi - movl 640(%esp), %ecx - movl %eax, (%ecx) - movl 648(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 520(%esp), %ecx - movl %ebp, %edx - movl %edi, %ebx - calll .LmulPv352x32 - addl 520(%esp), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 540(%esp), %ebx - movl 536(%esp), %edi - movl 532(%esp), %esi - movl 524(%esp), %ecx - movl 528(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 504(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 492(%esp), %ebp - movl 488(%esp), %edi - movl 484(%esp), %esi - movl 476(%esp), %ecx - movl 480(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 428(%esp), %ecx - movl 432(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 376(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 396(%esp), %ebp - movl 392(%esp), %edi - movl 388(%esp), %esi - movl 380(%esp), %ecx - movl 384(%esp), %edx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 348(%esp), %ebx - movl 344(%esp), %edi - movl 340(%esp), %esi - movl 332(%esp), %ecx - movl 336(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 280(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 300(%esp), %ebp - movl 296(%esp), %edi - movl 292(%esp), %esi - movl 284(%esp), %ecx - movl 288(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 232(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 252(%esp), %ebx - movl 248(%esp), %edi - movl 244(%esp), %esi - movl 236(%esp), %ecx - movl 240(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebp - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 640(%esp), %eax - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 648(%esp), %edi - movl 36(%edi), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 644(%esp), %eax - movl %eax, %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 164(%esp), %ebp - movl 160(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 152(%esp), %esi - movl 148(%esp), %edx - movl 140(%esp), %ecx - movl 144(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 36(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 88(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 92(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %edi - movl 108(%esp), %esi - movl 104(%esp), %edx - movl 100(%esp), %ecx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 40(%eax) - movl %ebp, 44(%eax) - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edx, 56(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 60(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %edi, 64(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl 68(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 72(%eax) - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%eax) - movl 84(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - addl $620, %esp # imm = 0x26C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end160: - .size mcl_fpDbl_mulPre11Lbmi2, .Lfunc_end160-mcl_fpDbl_mulPre11Lbmi2 - - .globl mcl_fpDbl_sqrPre11Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre11Lbmi2,@function -mcl_fpDbl_sqrPre11Lbmi2: # @mcl_fpDbl_sqrPre11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $620, %esp # imm = 0x26C - calll .L161$pb -.L161$pb: - popl %ebx -.Ltmp22: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx - movl %ebx, 84(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl %edx, %esi - movl %ebx, %edi - calll .LmulPv352x32 - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 584(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 580(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 568(%esp), %eax - movl 572(%esp), %ebp - movl 640(%esp), %ecx - movl %eax, (%ecx) - movl %esi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 520(%esp), %ecx - movl %edi, %ebx - calll .LmulPv352x32 - addl 520(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 540(%esp), %ebx - movl 536(%esp), %edi - movl 532(%esp), %esi - movl 524(%esp), %ecx - movl 528(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 60(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 508(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 504(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 492(%esp), %ebp - movl 488(%esp), %edi - movl 484(%esp), %esi - movl 476(%esp), %ecx - movl 480(%esp), %edx - movl 640(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 48(%esp), %eax # 4-byte Reload - addl 424(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 448(%esp), %ebx - movl 444(%esp), %edi - movl 440(%esp), %esi - movl 436(%esp), %edx - movl 428(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %ecx - movl 640(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 80(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 80(%esp), %eax # 4-byte Reload - addl 376(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 396(%esp), %edi - movl 392(%esp), %esi - movl 388(%esp), %edx - movl 380(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 384(%esp), %ecx - movl 640(%esp), %eax - movl 80(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 80(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 328(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 348(%esp), %ebp - movl 344(%esp), %edi - movl 340(%esp), %esi - movl 332(%esp), %ecx - movl 336(%esp), %edx - movl 640(%esp), %eax - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 20(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 48(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 640(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 80(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 80(%esp), %eax # 4-byte Reload - addl 232(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 260(%esp), %ebx - movl 256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 252(%esp), %edi - movl 248(%esp), %esi - movl 244(%esp), %edx - movl 236(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 240(%esp), %ecx - movl 640(%esp), %eax - movl 80(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebp - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 136(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 164(%esp), %ebp - movl 160(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 152(%esp), %esi - movl 148(%esp), %edx - movl 140(%esp), %ecx - movl 144(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 36(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 88(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 92(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %edi - movl 108(%esp), %esi - movl 104(%esp), %edx - movl 100(%esp), %ecx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 40(%eax) - movl %ebp, 44(%eax) - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edx, 56(%eax) - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %esi, 60(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %edi, 64(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl 72(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %edx, 72(%eax) - movl %ecx, 76(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%eax) - movl 84(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - addl $620, %esp # imm = 0x26C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end161: - .size mcl_fpDbl_sqrPre11Lbmi2, .Lfunc_end161-mcl_fpDbl_sqrPre11Lbmi2 - - .globl mcl_fp_mont11Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont11Lbmi2,@function -mcl_fp_mont11Lbmi2: # @mcl_fp_mont11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1132, %esp # imm = 0x46C - calll .L162$pb -.L162$pb: - popl %ebx -.Ltmp23: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx - movl 1164(%esp), %eax - movl -4(%eax), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1080(%esp), %edi - movl 1084(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - imull %ebp, %eax - movl 1124(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1120(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1116(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1112(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1108(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1104(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 1100(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 1096(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1092(%esp), %esi - movl 1088(%esp), %ebp - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 1032(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1040(%esp), %ebp - adcl 1044(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1076(%esp), %esi - sbbl %edi, %edi - movl 1160(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl 56(%esp), %ecx # 4-byte Reload - addl 984(%esp), %ecx - adcl 988(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1024(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 1028(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 936(%esp), %esi - adcl 940(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 964(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 980(%esp), %esi - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - addl 888(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 912(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 928(%esp), %esi - movl %esi, %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ebp, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - movl %esi, %eax - andl $1, %eax - addl 840(%esp), %ebp - movl 40(%esp), %ecx # 4-byte Reload - adcl 844(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 848(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl 852(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 856(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 864(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 872(%esp), %ebp - movl 68(%esp), %esi # 4-byte Reload - adcl 876(%esp), %esi - adcl 880(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 884(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 792(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 792(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 820(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 824(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 828(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 836(%esp), %esi - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 744(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 776(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 788(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1156(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 696(%esp), %ecx - movl 24(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 716(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 724(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 728(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 648(%esp), %ebp - movl 24(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - adcl 680(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 692(%esp), %esi - adcl $0, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 24(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 608(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 624(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %esi - movl %esi, %eax - addl 552(%esp), %edi - movl 28(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 560(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 568(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 576(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 580(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 584(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 592(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 28(%esp), %ecx # 4-byte Reload - addl 504(%esp), %ecx - adcl 508(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 520(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 536(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 456(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %ebp - movl %ebp, %eax - addl 456(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 464(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 484(%esp), %edi - adcl 488(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 496(%esp), %esi - movl 24(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - adcl 412(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - adcl 432(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 444(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 360(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 368(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 384(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - adcl 316(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 332(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 348(%esp), %edi - movl 28(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %ebp - movl %ebp, %ecx - addl 264(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 284(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 300(%esp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 304(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 216(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 232(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 252(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - movl %esi, %ecx - andl $1, %ecx - addl 168(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 172(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 184(%esp), %ebp - movl 48(%esp), %edi # 4-byte Reload - adcl 188(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl %esi, %ecx - addl 120(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 136(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 20(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %esi - addl 72(%esp), %edi - movl 48(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 88(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %esi - movl 1164(%esp), %ebp - subl (%ebp), %eax - movl %ecx, %edx - sbbl 4(%ebp), %edx - movl 52(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - sbbl 12(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 28(%esp), %ebx # 4-byte Reload - sbbl 28(%ebp), %ebx - movl 32(%esp), %edi # 4-byte Reload - sbbl 32(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - sbbl 36(%ebp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ebp), %edi - movl %edi, %ebp - sbbl $0, %esi - andl $1, %esi - jne .LBB162_2 -# BB#1: - movl %ebx, 28(%esp) # 4-byte Spill -.LBB162_2: - movl %esi, %ebx - testb %bl, %bl - movl 68(%esp), %ebx # 4-byte Reload - jne .LBB162_4 -# BB#3: - movl %eax, %ebx -.LBB162_4: - movl 1152(%esp), %eax - movl %ebx, (%eax) - movl 56(%esp), %edi # 4-byte Reload - jne .LBB162_6 -# BB#5: - movl %edx, %edi -.LBB162_6: - movl %edi, 4(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB162_8 -# BB#7: - movl %ecx, %edx -.LBB162_8: - movl %edx, 8(%eax) - jne .LBB162_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%esp) # 4-byte Spill -.LBB162_10: - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB162_12 -# BB#11: - movl 8(%esp), %ecx # 4-byte Reload -.LBB162_12: - movl %ecx, 16(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB162_14 -# BB#13: - movl 12(%esp), %ecx # 4-byte Reload -.LBB162_14: - movl %ecx, 20(%eax) - movl 24(%esp), %ecx # 4-byte Reload - jne .LBB162_16 -# BB#15: - movl 16(%esp), %ecx # 4-byte Reload -.LBB162_16: - movl %ecx, 24(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 32(%esp), %ecx # 4-byte Reload - jne .LBB162_18 -# BB#17: - movl 20(%esp), %ecx # 4-byte Reload -.LBB162_18: - movl %ecx, 32(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB162_20 -# BB#19: - movl 60(%esp), %ecx # 4-byte Reload -.LBB162_20: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB162_22 -# BB#21: - movl %ebp, %ecx -.LBB162_22: - movl %ecx, 40(%eax) - addl $1132, %esp # imm = 0x46C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end162: - .size mcl_fp_mont11Lbmi2, .Lfunc_end162-mcl_fp_mont11Lbmi2 - - .globl mcl_fp_montNF11Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF11Lbmi2,@function -mcl_fp_montNF11Lbmi2: # @mcl_fp_montNF11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1132, %esp # imm = 0x46C - calll .L163$pb -.L163$pb: - popl %ebx -.Ltmp24: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx - movl 1164(%esp), %eax - movl -4(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1080(%esp), %ebp - movl 1084(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1124(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1120(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1116(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1112(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1108(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1104(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1100(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 1096(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1092(%esp), %esi - movl 1088(%esp), %edi - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 1032(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1040(%esp), %edi - adcl 1044(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 1048(%esp), %ebp - movl 28(%esp), %esi # 4-byte Reload - adcl 1052(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1028(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 984(%esp), %ecx - adcl 988(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 996(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - adcl 1000(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1004(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - addl 936(%esp), %ebp - adcl 940(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 956(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 960(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 980(%esp), %ebp - movl 1160(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 932(%esp), %eax - addl 888(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 892(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 896(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 900(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 904(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl 908(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 912(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 916(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 920(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 924(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 928(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %edi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 840(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 860(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 872(%esp), %edi - movl 68(%esp), %esi # 4-byte Reload - adcl 876(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 884(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 792(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 836(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 792(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 796(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 800(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 808(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 812(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 816(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 820(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 824(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 832(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 744(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 768(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 780(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 784(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 740(%esp), %edx - movl 40(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl 28(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 708(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 712(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 716(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 720(%esp), %edi - movl 68(%esp), %ecx # 4-byte Reload - adcl 724(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 732(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 736(%esp), %esi - adcl $0, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ebp - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 648(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 672(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 688(%esp), %esi - movl %esi, %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 692(%esp), %esi - movl 1160(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1156(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - movl 644(%esp), %eax - movl 28(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 608(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 612(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 616(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 620(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 624(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 628(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 632(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 552(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 560(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 1160(%esp), %ecx - movl %ecx, %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 548(%esp), %edx - movl 32(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - adcl 508(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 512(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 528(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 532(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 40(%esp), %ecx # 4-byte Reload - adcl 540(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 544(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 456(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 456(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 480(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 488(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 496(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 452(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - movl 52(%esp), %ebp # 4-byte Reload - adcl 412(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 428(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 444(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 448(%esp), %edi - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 360(%esp), %esi - adcl 364(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 372(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 356(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 332(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 264(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %edi, %esi - adcl 284(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 292(%esp), %edi - movl 28(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 260(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 216(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 232(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 240(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 244(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 168(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 176(%esp), %esi - movl 60(%esp), %edi # 4-byte Reload - adcl 180(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 196(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 204(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 164(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 120(%esp), %ecx - adcl 124(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 136(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 72(%esp), %edi - movl 48(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 84(%esp), %edi - adcl 88(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, %edx - movl 1164(%esp), %ebx - subl (%ebx), %edx - movl %ecx, %esi - sbbl 4(%ebx), %esi - movl %edi, %ecx - sbbl 8(%ebx), %ecx - movl 44(%esp), %eax # 4-byte Reload - sbbl 12(%ebx), %eax - movl 40(%esp), %ebp # 4-byte Reload - sbbl 16(%ebx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - sbbl 20(%ebx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - sbbl 24(%ebx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - sbbl 28(%ebx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - sbbl 36(%ebx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - sbbl 40(%ebx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %ebp, %ebx - sarl $31, %ebx - testl %ebx, %ebx - movl 68(%esp), %ebx # 4-byte Reload - js .LBB163_2 -# BB#1: - movl %edx, %ebx -.LBB163_2: - movl 1152(%esp), %edx - movl %ebx, (%edx) - movl 60(%esp), %ebp # 4-byte Reload - js .LBB163_4 -# BB#3: - movl %esi, %ebp -.LBB163_4: - movl %ebp, 4(%edx) - js .LBB163_6 -# BB#5: - movl %ecx, %edi -.LBB163_6: - movl %edi, 8(%edx) - movl 44(%esp), %ecx # 4-byte Reload - js .LBB163_8 -# BB#7: - movl %eax, %ecx -.LBB163_8: - movl %ecx, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB163_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB163_10: - movl %eax, 16(%edx) - movl 28(%esp), %eax # 4-byte Reload - js .LBB163_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB163_12: - movl %eax, 20(%edx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB163_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB163_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB163_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB163_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB163_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB163_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB163_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB163_20: - movl %eax, 36(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB163_22 -# BB#21: - movl 48(%esp), %eax # 4-byte Reload -.LBB163_22: - movl %eax, 40(%edx) - addl $1132, %esp # imm = 0x46C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end163: - .size mcl_fp_montNF11Lbmi2, .Lfunc_end163-mcl_fp_montNF11Lbmi2 - - .globl mcl_fp_montRed11Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed11Lbmi2,@function -mcl_fp_montRed11Lbmi2: # @mcl_fp_montRed11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $668, %esp # imm = 0x29C - calll .L164$pb -.L164$pb: - popl %eax -.Ltmp25: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 696(%esp), %edx - movl -4(%edx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl 4(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - imull %esi, %ebx - movl 84(%ecx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 32(%ecx), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 28(%ecx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 24(%ecx), %ebp - movl 20(%ecx), %edi - movl 16(%ecx), %esi - movl 12(%ecx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 616(%esp), %ecx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 60(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl 64(%esp), %ecx # 4-byte Reload - adcl 620(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 632(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 640(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 568(%esp), %esi - movl 56(%esp), %edx # 4-byte Reload - adcl 572(%esp), %edx - movl 48(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 600(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 520(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 520(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 120(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 472(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 104(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 424(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 464(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %esi - movl %esi, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 376(%esp), %esi - movl 64(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 404(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 412(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 328(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 132(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 352(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 356(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 96(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 696(%esp), %eax - movl %eax, %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 280(%esp), %ebp - movl 88(%esp), %ebp # 4-byte Reload - adcl 284(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 296(%esp), %edi - movl 128(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 304(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 232(%esp), %ebp - movl 84(%esp), %ebp # 4-byte Reload - adcl 236(%esp), %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 244(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 276(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 56(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 188(%esp), %ecx - movl 132(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, %ebp - movl 68(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 136(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl %eax, %edi - movl 128(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 128(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 152(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - adcl 180(%esp), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - adcl $0, %ebp - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %edx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - movl 124(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl %ebx, %eax - movl %ebp, %ebx - sbbl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - jne .LBB164_2 -# BB#1: - movl %esi, 112(%esp) # 4-byte Spill -.LBB164_2: - testb %bl, %bl - movl 132(%esp), %esi # 4-byte Reload - jne .LBB164_4 -# BB#3: - movl %edi, %esi -.LBB164_4: - movl 688(%esp), %edi - movl %esi, (%edi) - movl 104(%esp), %esi # 4-byte Reload - jne .LBB164_6 -# BB#5: - movl %edx, 128(%esp) # 4-byte Spill -.LBB164_6: - movl 128(%esp), %edx # 4-byte Reload - movl %edx, 4(%edi) - movl 116(%esp), %edx # 4-byte Reload - jne .LBB164_8 -# BB#7: - movl %ecx, %edx -.LBB164_8: - movl %edx, 8(%edi) - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edi) - movl 92(%esp), %edx # 4-byte Reload - movl 124(%esp), %ecx # 4-byte Reload - jne .LBB164_10 -# BB#9: - movl 64(%esp), %ecx # 4-byte Reload -.LBB164_10: - movl %ecx, 16(%edi) - movl 96(%esp), %ecx # 4-byte Reload - movl 120(%esp), %eax # 4-byte Reload - jne .LBB164_12 -# BB#11: - movl 68(%esp), %eax # 4-byte Reload -.LBB164_12: - movl %eax, 20(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - jne .LBB164_14 -# BB#13: - movl 72(%esp), %ebp # 4-byte Reload -.LBB164_14: - movl %ebp, 24(%edi) - jne .LBB164_16 -# BB#15: - movl 76(%esp), %esi # 4-byte Reload -.LBB164_16: - movl %esi, 28(%edi) - jne .LBB164_18 -# BB#17: - movl 84(%esp), %edx # 4-byte Reload -.LBB164_18: - movl %edx, 32(%edi) - jne .LBB164_20 -# BB#19: - movl 88(%esp), %ecx # 4-byte Reload -.LBB164_20: - movl %ecx, 36(%edi) - jne .LBB164_22 -# BB#21: - movl 100(%esp), %eax # 4-byte Reload -.LBB164_22: - movl %eax, 40(%edi) - addl $668, %esp # imm = 0x29C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end164: - .size mcl_fp_montRed11Lbmi2, .Lfunc_end164-mcl_fp_montRed11Lbmi2 - - .globl mcl_fp_addPre11Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre11Lbmi2,@function -mcl_fp_addPre11Lbmi2: # @mcl_fp_addPre11Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl %esi, 32(%ebx) - movl %edx, 36(%ebx) - movl 40(%eax), %eax - movl 40(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 40(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end165: - .size mcl_fp_addPre11Lbmi2, .Lfunc_end165-mcl_fp_addPre11Lbmi2 - - .globl mcl_fp_subPre11Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre11Lbmi2,@function -mcl_fp_subPre11Lbmi2: # @mcl_fp_subPre11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 32(%ebp) - movl %esi, 36(%ebp) - movl 40(%edx), %edx - movl 40(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 40(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end166: - .size mcl_fp_subPre11Lbmi2, .Lfunc_end166-mcl_fp_subPre11Lbmi2 - - .globl mcl_fp_shr1_11Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_11Lbmi2,@function -mcl_fp_shr1_11Lbmi2: # @mcl_fp_shr1_11Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 28(%esi) - movl 36(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 32(%esi) - movl 40(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 36(%esi) - shrl %eax - movl %eax, 40(%esi) - popl %esi - retl -.Lfunc_end167: - .size mcl_fp_shr1_11Lbmi2, .Lfunc_end167-mcl_fp_shr1_11Lbmi2 - - .globl mcl_fp_add11Lbmi2 - .align 16, 0x90 - .type mcl_fp_add11Lbmi2,@function -mcl_fp_add11Lbmi2: # @mcl_fp_add11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 60(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 56(%esp), %esi - addl (%esi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl 4(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl 16(%esi), %ecx - adcl 12(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - adcl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 20(%esi), %eax - adcl 20(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 24(%esi), %eax - adcl 24(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 28(%esi), %ebx - adcl 28(%edi), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 32(%esi), %ecx - adcl 32(%edi), %ecx - movl 36(%esi), %eax - adcl 36(%edi), %eax - movl 40(%esi), %edx - adcl 40(%edi), %edx - movl 52(%esp), %esi - movl %ebp, (%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 8(%esi) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%esi) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%esi) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%esi) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%esi) - movl %ebx, 28(%esi) - movl %ecx, 32(%esi) - movl %eax, 36(%esi) - movl %edx, 40(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 64(%esp), %ebp - movl 4(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 20(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 12(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 8(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl (%esp), %edi # 4-byte Reload - sbbl 28(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 32(%ebp), %ecx - sbbl 36(%ebp), %eax - sbbl 40(%ebp), %edx - movl %edx, %edi - sbbl $0, %ebx - testb $1, %bl - jne .LBB168_2 -# BB#1: # %nocarry - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, (%esi) - movl 28(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%esi) - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%esi) - movl 20(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%esi) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%esi) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 20(%esi) - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%esi) - movl (%esp), %edx # 4-byte Reload - movl %edx, 28(%esi) - movl %ecx, 32(%esi) - movl %eax, 36(%esi) - movl %edi, 40(%esi) -.LBB168_2: # %carry - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end168: - .size mcl_fp_add11Lbmi2, .Lfunc_end168-mcl_fp_add11Lbmi2 - - .globl mcl_fp_addNF11Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF11Lbmi2,@function -mcl_fp_addNF11Lbmi2: # @mcl_fp_addNF11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 108(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 104(%esp), %esi - addl (%esi), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 4(%esi), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%edx), %ebx - movl 36(%edx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 20(%edx), %ebp - movl 16(%edx), %edi - movl 12(%edx), %eax - movl 8(%edx), %ecx - adcl 8(%esi), %ecx - adcl 12(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 20(%esi), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 24(%esi), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 28(%esi), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 32(%esi), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 36(%esi), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %ecx, %edx - adcl 40(%esi), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 112(%esp), %ebx - movl 52(%esp), %esi # 4-byte Reload - subl (%ebx), %esi - movl 60(%esp), %ecx # 4-byte Reload - sbbl 4(%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl %edx, %ecx - sbbl 8(%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - sbbl 12(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%ebx), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%ebx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 24(%ebx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 28(%ebx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%ebx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - movl %edi, %ecx - movl %edi, %ebp - sbbl 36(%ebx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 40(%ebx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl %edi, %ebx - movl 52(%esp), %edi # 4-byte Reload - sarl $31, %ebx - testl %ebx, %ebx - js .LBB169_2 -# BB#1: - movl %esi, %edi -.LBB169_2: - movl 100(%esp), %esi - movl %edi, (%esi) - movl 60(%esp), %edi # 4-byte Reload - js .LBB169_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB169_4: - movl %edi, 4(%esi) - movl %eax, %edi - js .LBB169_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB169_6: - movl %edx, 8(%esi) - movl %ebp, %ecx - movl 72(%esp), %edx # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB169_8 -# BB#7: - movl 8(%esp), %eax # 4-byte Reload -.LBB169_8: - movl %eax, 12(%esi) - movl 76(%esp), %eax # 4-byte Reload - movl 44(%esp), %ebp # 4-byte Reload - js .LBB169_10 -# BB#9: - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 48(%esp) # 4-byte Spill -.LBB169_10: - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%esi) - js .LBB169_12 -# BB#11: - movl 16(%esp), %ebp # 4-byte Reload -.LBB169_12: - movl %ebp, 20(%esi) - js .LBB169_14 -# BB#13: - movl 20(%esp), %edi # 4-byte Reload -.LBB169_14: - movl %edi, 24(%esi) - js .LBB169_16 -# BB#15: - movl 24(%esp), %eax # 4-byte Reload -.LBB169_16: - movl %eax, 28(%esi) - js .LBB169_18 -# BB#17: - movl 28(%esp), %edx # 4-byte Reload -.LBB169_18: - movl %edx, 32(%esi) - js .LBB169_20 -# BB#19: - movl 32(%esp), %ecx # 4-byte Reload -.LBB169_20: - movl %ecx, 36(%esi) - movl 56(%esp), %eax # 4-byte Reload - js .LBB169_22 -# BB#21: - movl 36(%esp), %eax # 4-byte Reload -.LBB169_22: - movl %eax, 40(%esi) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end169: - .size mcl_fp_addNF11Lbmi2, .Lfunc_end169-mcl_fp_addNF11Lbmi2 - - .globl mcl_fp_sub11Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub11Lbmi2,@function -mcl_fp_sub11Lbmi2: # @mcl_fp_sub11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %ebp - movl (%ebp), %ecx - movl 4(%ebp), %eax - movl 68(%esp), %edi - subl (%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%ebp), %eax - sbbl 8(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%ebp), %ebx - sbbl 12(%edi), %ebx - movl 16(%ebp), %eax - sbbl 16(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 20(%ebp), %eax - sbbl 20(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 24(%ebp), %eax - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 28(%ebp), %edx - sbbl 28(%edi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 32(%ebp), %ecx - sbbl 32(%edi), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 36(%ebp), %eax - sbbl 36(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 40(%ebp), %eax - sbbl 40(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 16(%esp), %esi # 4-byte Reload - movl $0, %ebx - sbbl $0, %ebx - testb $1, %bl - movl 60(%esp), %ebx - movl %esi, (%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl %ebp, 12(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl %edx, 28(%ebx) - movl %ecx, 32(%ebx) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%ebx) - movl %ecx, %edi - movl %eax, 40(%ebx) - je .LBB170_2 -# BB#1: # %carry - movl 72(%esp), %eax - addl (%eax), %esi - movl %esi, (%ebx) - movl 28(%esp), %edx # 4-byte Reload - movl %eax, %esi - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl %ebp, %eax - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl %ecx, 32(%ebx) - movl 36(%esi), %eax - adcl %edi, %eax - movl %eax, 36(%ebx) - movl 40(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ebx) -.LBB170_2: # %nocarry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end170: - .size mcl_fp_sub11Lbmi2, .Lfunc_end170-mcl_fp_sub11Lbmi2 - - .globl mcl_fp_subNF11Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF11Lbmi2,@function -mcl_fp_subNF11Lbmi2: # @mcl_fp_subNF11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 92(%esp), %edi - subl (%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 36(%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 28(%eax), %ebx - movl 24(%eax), %ebp - movl 20(%eax), %esi - movl 16(%eax), %edx - movl 12(%eax), %ecx - movl 8(%eax), %eax - sbbl 8(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 12(%edi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 24(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 28(%esp) # 4-byte Spill - sbbl 24(%edi), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - sbbl 28(%edi), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %eax, %ecx - sarl $31, %ecx - movl %ecx, %edx - shldl $1, %eax, %edx - movl 96(%esp), %ebx - movl 4(%ebx), %eax - andl %edx, %eax - movl %eax, 48(%esp) # 4-byte Spill - andl (%ebx), %edx - movl 40(%ebx), %eax - andl %ecx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 36(%ebx), %eax - andl %ecx, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 32(%ebx), %eax - andl %ecx, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 28(%ebx), %eax - andl %ecx, %eax - movl %eax, (%esp) # 4-byte Spill - movl 24(%ebx), %ebp - andl %ecx, %ebp - rorxl $31, %ecx, %eax - andl 20(%ebx), %ecx - movl 16(%ebx), %edi - andl %eax, %edi - movl 12(%ebx), %esi - andl %eax, %esi - andl 8(%ebx), %eax - addl 40(%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %ebx # 4-byte Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 84(%esp), %ebx - movl %edx, (%ebx) - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - movl %edx, 4(%ebx) - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %eax, 8(%ebx) - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %esi, 12(%ebx) - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %edi, 16(%ebx) - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ecx, 20(%ebx) - movl (%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ebp, 24(%ebx) - movl 4(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 8(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl 12(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ebx) - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end171: - .size mcl_fp_subNF11Lbmi2, .Lfunc_end171-mcl_fp_subNF11Lbmi2 - - .globl mcl_fpDbl_add11Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add11Lbmi2,@function -mcl_fpDbl_add11Lbmi2: # @mcl_fpDbl_add11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 108(%esp), %ecx - movl 104(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edi), %ebp - movl 100(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%ecx), %esi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 52(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%edi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%edi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%edi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %esi - adcl %ebx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %edx, 40(%eax) - movl 48(%edi), %eax - adcl %esi, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 52(%edi), %eax - adcl %ebp, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl 56(%edi), %eax - adcl %edx, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl 60(%edi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%ecx), %edx - movl 64(%edi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl 68(%edi), %edx - adcl %eax, %edx - movl 72(%ecx), %esi - movl 72(%edi), %eax - adcl %esi, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 76(%ecx), %ebx - movl 76(%edi), %esi - adcl %ebx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 80(%ecx), %ebp - movl 80(%edi), %ebx - adcl %ebp, %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 84(%ecx), %ecx - movl 84(%edi), %edi - adcl %ecx, %edi - movl %edi, 40(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 112(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 24(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 28(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl 32(%ebp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %ebx, %eax - movl 40(%esp), %ebx # 4-byte Reload - sbbl 36(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 40(%ebp), %edi - sbbl $0, %ecx - andl $1, %ecx - jne .LBB172_2 -# BB#1: - movl %edi, %ebx -.LBB172_2: - testb %cl, %cl - movl 68(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - movl 60(%esp), %edi # 4-byte Reload - movl 56(%esp), %ebp # 4-byte Reload - jne .LBB172_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload -.LBB172_4: - movl 100(%esp), %eax - movl %ecx, 44(%eax) - movl 72(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl %ebp, 56(%eax) - movl %edi, 60(%eax) - movl %esi, 64(%eax) - movl %edx, 68(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl 44(%esp), %edx # 4-byte Reload - jne .LBB172_6 -# BB#5: - movl 28(%esp), %edx # 4-byte Reload -.LBB172_6: - movl %edx, 72(%eax) - movl 48(%esp), %edx # 4-byte Reload - jne .LBB172_8 -# BB#7: - movl 32(%esp), %edx # 4-byte Reload -.LBB172_8: - movl %edx, 76(%eax) - jne .LBB172_10 -# BB#9: - movl 36(%esp), %ecx # 4-byte Reload -.LBB172_10: - movl %ecx, 80(%eax) - movl %ebx, 84(%eax) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end172: - .size mcl_fpDbl_add11Lbmi2, .Lfunc_end172-mcl_fpDbl_add11Lbmi2 - - .globl mcl_fpDbl_sub11Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub11Lbmi2,@function -mcl_fpDbl_sub11Lbmi2: # @mcl_fpDbl_sub11Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 96(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %esi - movl 100(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %esi - movl 8(%edx), %edi - sbbl 8(%ebp), %edi - movl 92(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%ebp), %eax - movl %esi, 4(%ecx) - movl 16(%edx), %esi - sbbl 16(%ebp), %esi - movl %edi, 8(%ecx) - movl 20(%ebp), %edi - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %edi, %eax - movl 24(%ebp), %edi - movl %esi, 16(%ecx) - movl 24(%edx), %esi - sbbl %edi, %esi - movl 28(%ebp), %edi - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %edi, %eax - movl 32(%ebp), %edi - movl %esi, 24(%ecx) - movl 32(%edx), %esi - sbbl %edi, %esi - movl 36(%ebp), %edi - movl %eax, 28(%ecx) - movl 36(%edx), %eax - sbbl %edi, %eax - movl 40(%ebp), %edi - movl %esi, 32(%ecx) - movl 40(%edx), %esi - sbbl %edi, %esi - movl 44(%ebp), %edi - movl %eax, 36(%ecx) - movl 44(%edx), %eax - sbbl %edi, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%ebp), %eax - movl %esi, 40(%ecx) - movl 48(%edx), %esi - sbbl %eax, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 52(%ebp), %eax - movl 52(%edx), %esi - sbbl %eax, %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 56(%ebp), %eax - movl 56(%edx), %esi - sbbl %eax, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 60(%ebp), %eax - movl 60(%edx), %esi - sbbl %eax, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%edx), %esi - sbbl %eax, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%edx), %esi - sbbl %eax, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 72(%ebp), %eax - movl 72(%edx), %esi - sbbl %eax, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 76(%ebp), %eax - movl 76(%edx), %esi - sbbl %eax, %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 80(%ebp), %eax - movl 80(%edx), %esi - sbbl %eax, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 84(%ebp), %eax - movl 84(%edx), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 104(%esp), %ebp - jne .LBB173_1 -# BB#2: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB173_3 -.LBB173_1: - movl 40(%ebp), %edx - movl %edx, 28(%esp) # 4-byte Spill -.LBB173_3: - testb %al, %al - jne .LBB173_4 -# BB#5: - movl $0, 16(%esp) # 4-byte Folded Spill - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB173_6 -.LBB173_4: - movl (%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB173_6: - jne .LBB173_7 -# BB#8: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB173_9 -.LBB173_7: - movl 36(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB173_9: - jne .LBB173_10 -# BB#11: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB173_12 -.LBB173_10: - movl 32(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB173_12: - jne .LBB173_13 -# BB#14: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB173_15 -.LBB173_13: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB173_15: - jne .LBB173_16 -# BB#17: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB173_18 -.LBB173_16: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB173_18: - jne .LBB173_19 -# BB#20: - movl $0, %edx - jmp .LBB173_21 -.LBB173_19: - movl 20(%ebp), %edx -.LBB173_21: - jne .LBB173_22 -# BB#23: - movl $0, %edi - jmp .LBB173_24 -.LBB173_22: - movl 16(%ebp), %edi -.LBB173_24: - jne .LBB173_25 -# BB#26: - movl $0, %ebx - jmp .LBB173_27 -.LBB173_25: - movl 12(%ebp), %ebx -.LBB173_27: - jne .LBB173_28 -# BB#29: - xorl %ebp, %ebp - jmp .LBB173_30 -.LBB173_28: - movl 8(%ebp), %ebp -.LBB173_30: - movl 8(%esp), %esi # 4-byte Reload - addl 36(%esp), %esi # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %esi, 44(%ecx) - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %eax, 48(%ecx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 52(%ecx) - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %ebx, 56(%ecx) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edi, 60(%ecx) - movl (%esp), %esi # 4-byte Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %esi, 68(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl %eax, 80(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%ecx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end173: - .size mcl_fpDbl_sub11Lbmi2, .Lfunc_end173-mcl_fpDbl_sub11Lbmi2 - - .align 16, 0x90 - .type .LmulPv384x32,@function -.LmulPv384x32: # @mulPv384x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl %edx, %eax - movl 56(%esp), %edx - mulxl 4(%eax), %ebx, %edi - mulxl (%eax), %esi, %ebp - movl %esi, 32(%esp) # 4-byte Spill - addl %ebx, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - mulxl 8(%eax), %ebx, %esi - adcl %edi, %ebx - movl %ebx, 24(%esp) # 4-byte Spill - mulxl 12(%eax), %edi, %ebx - adcl %esi, %edi - movl %edi, 20(%esp) # 4-byte Spill - mulxl 16(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 16(%esp) # 4-byte Spill - mulxl 20(%eax), %esi, %ebx - adcl %edi, %esi - movl %esi, 12(%esp) # 4-byte Spill - mulxl 24(%eax), %esi, %edi - adcl %ebx, %esi - movl %esi, 8(%esp) # 4-byte Spill - mulxl 28(%eax), %ebx, %esi - adcl %edi, %ebx - mulxl 32(%eax), %edi, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl %esi, %edi - mulxl 36(%eax), %esi, %ebp - movl %ebp, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - mulxl 40(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl %ebx, 28(%ecx) - movl %edi, 32(%ecx) - movl %esi, 36(%ecx) - movl %edx, 40(%ecx) - movl 56(%esp), %edx - mulxl 44(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl $0, %edx - movl %edx, 48(%ecx) - movl %ecx, %eax - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end174: - .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 - - .globl mcl_fp_mulUnitPre12Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre12Lbmi2,@function -mcl_fp_mulUnitPre12Lbmi2: # @mcl_fp_mulUnitPre12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L175$pb -.L175$pb: - popl %ebx -.Ltmp26: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx - movl 120(%esp), %eax - movl %eax, (%esp) - leal 40(%esp), %ecx - movl 116(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%esp), %ebp - movl 56(%esp), %ebx - movl 52(%esp), %edi - movl 48(%esp), %esi - movl 40(%esp), %edx - movl 44(%esp), %ecx - movl 112(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end175: - .size mcl_fp_mulUnitPre12Lbmi2, .Lfunc_end175-mcl_fp_mulUnitPre12Lbmi2 - - .globl mcl_fpDbl_mulPre12Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre12Lbmi2,@function -mcl_fpDbl_mulPre12Lbmi2: # @mcl_fpDbl_mulPre12Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $220, %esp - calll .L176$pb -.L176$pb: - popl %ebx -.Ltmp27: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx - movl %ebx, -164(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6Lbmi2@PLT - leal 24(%esi), %eax - movl %eax, 8(%esp) - leal 24(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 48(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6Lbmi2@PLT - movl 40(%edi), %ebx - movl 36(%edi), %eax - movl 32(%edi), %edx - movl (%edi), %esi - movl 4(%edi), %ecx - addl 24(%edi), %esi - adcl 28(%edi), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -188(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - adcl 16(%edi), %ebx - movl %ebx, -180(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl (%edi), %eax - addl 24(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl 4(%edi), %eax - adcl 28(%edi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - movl 32(%edi), %eax - adcl 8(%edi), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl 36(%edi), %eax - adcl 12(%edi), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl 40(%edi), %ecx - adcl 16(%edi), %ecx - movl 44(%edi), %eax - adcl 20(%edi), %eax - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -184(%ebp) # 4-byte Spill - movl %ebx, %edi - movl %edx, -156(%ebp) # 4-byte Spill - movl %esi, -160(%ebp) # 4-byte Spill - movl %esi, %edx - jb .LBB176_2 -# BB#1: - xorl %edi, %edi - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill -.LBB176_2: - movl %edi, -176(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl 44(%esi), %edi - movl -112(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 20(%esi), %edi - movl %edi, -132(%ebp) # 4-byte Spill - movl %eax, -124(%ebp) # 4-byte Spill - movl %ecx, -112(%ebp) # 4-byte Spill - movl -148(%ebp), %esi # 4-byte Reload - movl %esi, -116(%ebp) # 4-byte Spill - movl -144(%ebp), %esi # 4-byte Reload - movl %esi, -120(%ebp) # 4-byte Spill - movl -140(%ebp), %esi # 4-byte Reload - movl %esi, -128(%ebp) # 4-byte Spill - movl -136(%ebp), %esi # 4-byte Reload - movl %esi, -152(%ebp) # 4-byte Spill - jb .LBB176_4 -# BB#3: - movl $0, -124(%ebp) # 4-byte Folded Spill - movl $0, -112(%ebp) # 4-byte Folded Spill - movl $0, -116(%ebp) # 4-byte Folded Spill - movl $0, -120(%ebp) # 4-byte Folded Spill - movl $0, -128(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill -.LBB176_4: - movl %edx, -84(%ebp) - movl -172(%ebp), %esi # 4-byte Reload - movl %esi, -80(%ebp) - movl -188(%ebp), %edx # 4-byte Reload - movl %edx, -76(%ebp) - movl -168(%ebp), %edi # 4-byte Reload - movl %edi, -72(%ebp) - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -68(%ebp) - movl -136(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) - movl -140(%ebp), %edx # 4-byte Reload - movl %edx, -104(%ebp) - movl -144(%ebp), %edx # 4-byte Reload - movl %edx, -100(%ebp) - movl -148(%ebp), %edx # 4-byte Reload - movl %edx, -96(%ebp) - movl %ecx, -92(%ebp) - movl %eax, -88(%ebp) - movl %edi, %ebx - sbbl %edx, %edx - movl -132(%ebp), %eax # 4-byte Reload - movl %eax, -64(%ebp) - movl -184(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB176_6 -# BB#5: - movl $0, %eax - movl $0, %ebx - movl $0, %esi -.LBB176_6: - movl %eax, -132(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -108(%ebp), %ecx - movl %ecx, 8(%esp) - leal -84(%ebp), %ecx - movl %ecx, 4(%esp) - leal -60(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -152(%ebp), %edi # 4-byte Reload - addl -160(%ebp), %edi # 4-byte Folded Reload - adcl %esi, -128(%ebp) # 4-byte Folded Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl %eax, -120(%ebp) # 4-byte Folded Spill - adcl %ebx, -116(%ebp) # 4-byte Folded Spill - movl -176(%ebp), %eax # 4-byte Reload - adcl %eax, -112(%ebp) # 4-byte Folded Spill - movl -132(%ebp), %eax # 4-byte Reload - adcl %eax, -124(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -132(%ebp) # 4-byte Spill - movl -164(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre6Lbmi2@PLT - addl -36(%ebp), %edi - movl -128(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl -120(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -112(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl -124(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -124(%ebp) # 4-byte Spill - adcl %esi, -132(%ebp) # 4-byte Folded Spill - movl -60(%ebp), %ecx - movl 8(%ebp), %eax - subl (%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -56(%ebp), %esi - sbbl 4(%eax), %esi - movl -52(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -48(%ebp), %edx - sbbl 12(%eax), %edx - movl -44(%ebp), %ebx - sbbl 16(%eax), %ebx - movl -40(%ebp), %ecx - sbbl 20(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 28(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 32(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 40(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 44(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, -132(%ebp) # 4-byte Folded Spill - movl 48(%eax), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - subl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 52(%eax), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - sbbl %ecx, %esi - movl 56(%eax), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - sbbl %ecx, -136(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 64(%eax), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 68(%eax), %ecx - movl %ecx, -212(%ebp) # 4-byte Spill - sbbl %ecx, -140(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -216(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 76(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 80(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 84(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 88(%eax), %ecx - movl %ecx, -184(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 92(%eax), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, -132(%ebp) # 4-byte Folded Spill - movl -144(%ebp), %ecx # 4-byte Reload - addl -148(%ebp), %ecx # 4-byte Folded Reload - adcl -152(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 24(%eax) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -156(%ebp), %ecx # 4-byte Folded Reload - movl %esi, 28(%eax) - adcl -160(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 32(%eax) - adcl -164(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -140(%ebp), %ecx # 4-byte Reload - adcl -168(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 40(%eax) - adcl -192(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 44(%eax) - movl -128(%ebp), %ecx # 4-byte Reload - adcl -196(%ebp), %ecx # 4-byte Folded Reload - movl %edi, 48(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -200(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 56(%eax) - movl -112(%ebp), %edx # 4-byte Reload - adcl -208(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - movl -124(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 64(%eax) - movl -132(%ebp), %edx # 4-byte Reload - adcl -216(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl %edx, 72(%eax) - movl -172(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - movl -176(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 80(%eax) - movl -180(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - movl -184(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 88(%eax) - movl -188(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 92(%eax) - addl $220, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end176: - .size mcl_fpDbl_mulPre12Lbmi2, .Lfunc_end176-mcl_fpDbl_mulPre12Lbmi2 - - .globl mcl_fpDbl_sqrPre12Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre12Lbmi2,@function -mcl_fpDbl_sqrPre12Lbmi2: # @mcl_fpDbl_sqrPre12Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $220, %esp - calll .L177$pb -.L177$pb: - popl %ebx -.Ltmp28: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx - movl %ebx, -152(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre6Lbmi2@PLT - leal 24(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 48(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6Lbmi2@PLT - movl 44(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl 40(%edi), %edx - movl 36(%edi), %eax - movl (%edi), %ebx - movl 4(%edi), %esi - addl 24(%edi), %ebx - adcl 28(%edi), %esi - movl 32(%edi), %ecx - adcl 8(%edi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - adcl 16(%edi), %edx - movl %edx, %ecx - movl -136(%ebp), %eax # 4-byte Reload - adcl 20(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edx - movl %edx, -156(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edx - popl %eax - movl %edx, -124(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -120(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edx - sbbl %edi, %edi - movl %edi, -148(%ebp) # 4-byte Spill - movl %ebx, %edi - addl %edi, %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl %esi, %edi - movl %esi, %eax - adcl %edi, %edi - movl %edi, -132(%ebp) # 4-byte Spill - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_2 -# BB#1: - movl $0, -132(%ebp) # 4-byte Folded Spill - movl $0, -112(%ebp) # 4-byte Folded Spill -.LBB177_2: - movl -144(%ebp), %esi # 4-byte Reload - addl %esi, %esi - movl -140(%ebp), %edx # 4-byte Reload - adcl %edx, %edx - movl %edx, -116(%ebp) # 4-byte Spill - movl -120(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_3 -# BB#4: - movl $0, -116(%ebp) # 4-byte Folded Spill - movl $0, -120(%ebp) # 4-byte Folded Spill - jmp .LBB177_5 -.LBB177_3: - movl %eax, %edx - shrl $31, %edx - orl %esi, %edx - movl %edx, -120(%ebp) # 4-byte Spill -.LBB177_5: - movl -136(%ebp), %edx # 4-byte Reload - movl %ecx, %esi - addl %esi, %esi - adcl %edx, %edx - movl -124(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_6 -# BB#7: - xorl %edx, %edx - movl $0, -128(%ebp) # 4-byte Folded Spill - movl -140(%ebp), %edi # 4-byte Reload - jmp .LBB177_8 -.LBB177_6: - movl %ecx, -124(%ebp) # 4-byte Spill - movl -140(%ebp), %edi # 4-byte Reload - movl %edi, %ecx - shrl $31, %ecx - orl %esi, %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - movl -124(%ebp), %ecx # 4-byte Reload -.LBB177_8: - movl %edx, -124(%ebp) # 4-byte Spill - movl %ebx, -84(%ebp) - movl %eax, -80(%ebp) - movl -144(%ebp), %esi # 4-byte Reload - movl %esi, -76(%ebp) - movl %edi, -72(%ebp) - movl %ecx, -68(%ebp) - movl -136(%ebp), %edx # 4-byte Reload - movl %edx, -64(%ebp) - movl %ebx, -108(%ebp) - movl %eax, -104(%ebp) - movl %esi, -100(%ebp) - movl %edi, -96(%ebp) - movl %ecx, -92(%ebp) - movl %edx, -88(%ebp) - movl -156(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB177_9 -# BB#10: - movl $0, -136(%ebp) # 4-byte Folded Spill - jmp .LBB177_11 -.LBB177_9: - shrl $31, %edx - movl %edx, -136(%ebp) # 4-byte Spill -.LBB177_11: - leal -108(%ebp), %eax - movl %eax, 8(%esp) - leal -84(%ebp), %eax - movl %eax, 4(%esp) - leal -60(%ebp), %eax - movl %eax, (%esp) - movl -148(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -152(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre6Lbmi2@PLT - movl -112(%ebp), %eax # 4-byte Reload - addl -36(%ebp), %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl -132(%ebp), %edi # 4-byte Reload - adcl -32(%ebp), %edi - movl -120(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -128(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl -124(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -124(%ebp) # 4-byte Spill - adcl -136(%ebp), %esi # 4-byte Folded Reload - movl -60(%ebp), %edx - movl 8(%ebp), %eax - subl (%eax), %edx - movl -56(%ebp), %ebx - sbbl 4(%eax), %ebx - movl -52(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -48(%ebp), %ecx - sbbl 12(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -44(%ebp), %ecx - sbbl 16(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -40(%ebp), %ecx - sbbl 20(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 28(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl %edi, -132(%ebp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 40(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 44(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 48(%eax), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - subl %ecx, %edx - movl 52(%eax), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 56(%eax), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - sbbl %ecx, -136(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - sbbl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 64(%eax), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - movl -172(%ebp), %edi # 4-byte Reload - sbbl %ecx, %edi - movl 68(%eax), %ecx - movl %ecx, -212(%ebp) # 4-byte Spill - sbbl %ecx, -140(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -216(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 76(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - sbbl %ecx, -132(%ebp) # 4-byte Folded Spill - movl 80(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 84(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 88(%eax), %ecx - movl %ecx, -184(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 92(%eax), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -148(%ebp), %edx # 4-byte Folded Reload - adcl -152(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 24(%eax) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -156(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 28(%eax) - movl -144(%ebp), %edx # 4-byte Reload - adcl -160(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 32(%eax) - adcl -164(%ebp), %edi # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -140(%ebp), %edx # 4-byte Reload - adcl -168(%ebp), %edx # 4-byte Folded Reload - movl %edi, 40(%eax) - movl -112(%ebp), %ecx # 4-byte Reload - adcl -192(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 44(%eax) - movl -132(%ebp), %edi # 4-byte Reload - adcl -196(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 48(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -200(%ebp), %edx # 4-byte Folded Reload - movl %edi, 52(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 56(%eax) - movl -128(%ebp), %edx # 4-byte Reload - adcl -208(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - movl -124(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl -216(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl %esi, 72(%eax) - movl -172(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - movl -176(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 80(%eax) - movl -180(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - movl -184(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 88(%eax) - movl -188(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 92(%eax) - addl $220, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end177: - .size mcl_fpDbl_sqrPre12Lbmi2, .Lfunc_end177-mcl_fpDbl_sqrPre12Lbmi2 - - .globl mcl_fp_mont12Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont12Lbmi2,@function -mcl_fp_mont12Lbmi2: # @mcl_fp_mont12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1436, %esp # imm = 0x59C - calll .L178$pb -.L178$pb: - popl %ebx -.Ltmp29: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx - movl 1468(%esp), %eax - movl -4(%eax), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1384(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 1384(%esp), %ebp - movl 1388(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1432(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1428(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1424(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1420(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1412(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1408(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1404(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1400(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1396(%esp), %edi - movl 1392(%esp), %esi - movl %eax, (%esp) - leal 1328(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - addl 1328(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1336(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 1340(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1372(%esp), %esi - movl 92(%esp), %ebp # 4-byte Reload - adcl 1376(%esp), %ebp - sbbl %edi, %edi - movl 1464(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl 84(%esp), %ecx # 4-byte Reload - addl 1272(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1312(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1316(%esp), %ebp - adcl 1320(%esp), %edi - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 84(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1216(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1224(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1244(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1248(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1252(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1260(%esp), %ebp - adcl 1264(%esp), %edi - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1160(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1160(%esp), %ecx - adcl 1164(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1200(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl 1204(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1104(%esp), %ecx - movl 1468(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1104(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1140(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1148(%esp), %edi - movl 84(%esp), %ebp # 4-byte Reload - adcl 1152(%esp), %ebp - adcl $0, %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1048(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1048(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1080(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - adcl 1092(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %ebp - movl %ebp, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %esi - movl %esi, %eax - addl 992(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 1004(%esp), %ebp - movl 52(%esp), %ecx # 4-byte Reload - adcl 1008(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1020(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1028(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1032(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl 1464(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 936(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 944(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 948(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 960(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 980(%esp), %esi - adcl 984(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl %edi, %ecx - movl 56(%esp), %eax # 4-byte Reload - addl 880(%esp), %eax - movl 48(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 892(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 912(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 924(%esp), %esi - movl %esi, %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 824(%esp), %ecx - movl 44(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 840(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 864(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 768(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 768(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 780(%esp), %ebp - adcl 784(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 800(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1460(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - movl 44(%esp), %eax # 4-byte Reload - addl 712(%esp), %eax - movl 52(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 720(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 724(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 732(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 736(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 740(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 744(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 748(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 752(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 760(%esp), %edi - sbbl %ebp, %ebp - movl %eax, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 656(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %eax - addl 656(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 660(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 664(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 672(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 676(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 704(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl 1464(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 52(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 616(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 620(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 636(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 648(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 44(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 544(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 548(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 552(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 560(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 568(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 72(%esp), %ecx # 4-byte Reload - adcl 576(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 580(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 584(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 592(%esp), %ebp - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 488(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 512(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 536(%esp), %ebp - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl %edi, %ecx - addl 432(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 440(%esp), %edi - movl 80(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 480(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 376(%esp), %ecx - adcl 380(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 392(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 76(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 320(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - adcl 336(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 360(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 264(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 272(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 284(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 296(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 208(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 224(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 236(%esp), %edi - adcl 240(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 152(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 164(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 176(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 40(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 96(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %esi - addl 96(%esp), %edi - movl 84(%esp), %ebx # 4-byte Reload - movl 92(%esp), %eax # 4-byte Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %edx, %edi - adcl 108(%esp), %ebx - adcl 112(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 140(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %esi - movl 1468(%esp), %edx - subl (%edx), %eax - sbbl 4(%edx), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 8(%edx), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 12(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 20(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 24(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl 44(%esp), %edi # 4-byte Reload - sbbl 32(%edx), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 36(%edx), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 40(%edx), %edi - movl %edi, 84(%esp) # 4-byte Spill - sbbl 44(%edx), %ebp - movl %ebp, %edx - sbbl $0, %esi - andl $1, %esi - jne .LBB178_2 -# BB#1: - movl %ecx, 52(%esp) # 4-byte Spill -.LBB178_2: - movl %esi, %ecx - testb %cl, %cl - movl 92(%esp), %ecx # 4-byte Reload - jne .LBB178_4 -# BB#3: - movl %eax, %ecx -.LBB178_4: - movl 1456(%esp), %eax - movl %ecx, (%eax) - movl 68(%esp), %edi # 4-byte Reload - jne .LBB178_6 -# BB#5: - movl 16(%esp), %edi # 4-byte Reload -.LBB178_6: - movl %edi, 4(%eax) - movl 64(%esp), %ebp # 4-byte Reload - jne .LBB178_8 -# BB#7: - movl 20(%esp), %ebx # 4-byte Reload -.LBB178_8: - movl %ebx, 8(%eax) - jne .LBB178_10 -# BB#9: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 72(%esp) # 4-byte Spill -.LBB178_10: - movl 72(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - jne .LBB178_12 -# BB#11: - movl 28(%esp), %ebp # 4-byte Reload -.LBB178_12: - movl %ebp, 16(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB178_14 -# BB#13: - movl 32(%esp), %ecx # 4-byte Reload -.LBB178_14: - movl %ecx, 20(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB178_16 -# BB#15: - movl 36(%esp), %ecx # 4-byte Reload -.LBB178_16: - movl %ecx, 24(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB178_18 -# BB#17: - movl 40(%esp), %ecx # 4-byte Reload -.LBB178_18: - movl %ecx, 32(%eax) - movl 60(%esp), %ecx # 4-byte Reload - jne .LBB178_20 -# BB#19: - movl 80(%esp), %ecx # 4-byte Reload -.LBB178_20: - movl %ecx, 36(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB178_22 -# BB#21: - movl 84(%esp), %ecx # 4-byte Reload -.LBB178_22: - movl %ecx, 40(%eax) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB178_24 -# BB#23: - movl %edx, %ecx -.LBB178_24: - movl %ecx, 44(%eax) - addl $1436, %esp # imm = 0x59C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end178: - .size mcl_fp_mont12Lbmi2, .Lfunc_end178-mcl_fp_mont12Lbmi2 - - .globl mcl_fp_montNF12Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF12Lbmi2,@function -mcl_fp_montNF12Lbmi2: # @mcl_fp_montNF12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1420, %esp # imm = 0x58C - calll .L179$pb -.L179$pb: - popl %ebx -.Ltmp30: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx - movl 1452(%esp), %eax - movl -4(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1368(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1368(%esp), %ebp - movl 1372(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1416(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1412(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1408(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1404(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1400(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1396(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1392(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1388(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1384(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1380(%esp), %edi - movl 1376(%esp), %esi - movl %eax, (%esp) - leal 1312(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 1312(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1320(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 1324(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 1344(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1356(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 1360(%esp), %ebp - movl 1448(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1256(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1304(%esp), %eax - movl 56(%esp), %edx # 4-byte Reload - addl 1256(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1260(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1264(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1268(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1272(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1280(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1284(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1296(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 1300(%esp), %ebp - adcl $0, %eax - movl %eax, %edi - movl %edx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 1200(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 1208(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1244(%esp), %ebp - adcl 1248(%esp), %edi - movl 1448(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1192(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1144(%esp), %edx - adcl 1148(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1152(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1160(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1168(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1180(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1184(%esp), %ebp - adcl 1188(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1088(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 1088(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %esi, %edi - adcl 1104(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1124(%esp), %esi - adcl 1128(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1136(%esp), %ebp - movl 1448(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1080(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 1032(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 1044(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1056(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1064(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1076(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl %edx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 976(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1004(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1024(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 968(%esp), %eax - movl 40(%esp), %edx # 4-byte Reload - addl 920(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - adcl 924(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 928(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 936(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 944(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 952(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 956(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 960(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 964(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 864(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 864(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 912(%esp), %edi - movl 1448(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 808(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 856(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 808(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 824(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 828(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 832(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 752(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 760(%esp), %edi - movl 44(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 776(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 792(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1448(%esp), %ecx - movl %ecx, %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1444(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - movl 744(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - adcl 700(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 704(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 708(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - movl 76(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 724(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 728(%esp), %edi - adcl 732(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 740(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %eax, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 640(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 648(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 660(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 668(%esp), %esi - adcl 672(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 632(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 584(%esp), %ecx - adcl 588(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 596(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 608(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 616(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 620(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 528(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 528(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 540(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 564(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 568(%esp), %edi - movl 32(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 48(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 1448(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 520(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - addl 472(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 508(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - adcl 512(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 416(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 440(%esp), %ebp - movl 52(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 408(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 360(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 372(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 304(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 312(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 320(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 328(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 296(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 248(%esp), %ecx - adcl 252(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 260(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 272(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 192(%esp), %esi - adcl 196(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 200(%esp), %edi - movl 68(%esp), %esi # 4-byte Reload - adcl 204(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 216(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 224(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 184(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 136(%esp), %ecx - adcl 140(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - adcl 144(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - adcl 164(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 168(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 80(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 80(%esp), %esi - movl 56(%esp), %esi # 4-byte Reload - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 92(%esp), %esi - movl 52(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 104(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 112(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, %edx - movl 1452(%esp), %ebp - subl (%ebp), %edx - movl %ecx, %eax - sbbl 4(%ebp), %eax - movl %esi, %ebx - sbbl 8(%ebp), %ebx - movl 52(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl 40(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - sbbl 28(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - sbbl 32(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - sbbl 36(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 44(%ebp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %edi, %ebp - sarl $31, %ebp - testl %ebp, %ebp - movl 76(%esp), %ebp # 4-byte Reload - js .LBB179_2 -# BB#1: - movl %edx, %ebp -.LBB179_2: - movl 1440(%esp), %edx - movl %ebp, (%edx) - movl 68(%esp), %edi # 4-byte Reload - js .LBB179_4 -# BB#3: - movl %eax, %edi -.LBB179_4: - movl %edi, 4(%edx) - js .LBB179_6 -# BB#5: - movl %ebx, %esi -.LBB179_6: - movl %esi, 8(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB179_8 -# BB#7: - movl %ecx, %eax -.LBB179_8: - movl %eax, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB179_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB179_10: - movl %eax, 16(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB179_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB179_12: - movl %eax, 20(%edx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB179_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB179_14: - movl %eax, 24(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB179_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB179_16: - movl %eax, 28(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB179_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB179_18: - movl %eax, 32(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB179_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB179_20: - movl %eax, 36(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB179_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB179_22: - movl %eax, 40(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB179_24 -# BB#23: - movl 56(%esp), %eax # 4-byte Reload -.LBB179_24: - movl %eax, 44(%edx) - addl $1420, %esp # imm = 0x58C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end179: - .size mcl_fp_montNF12Lbmi2, .Lfunc_end179-mcl_fp_montNF12Lbmi2 - - .globl mcl_fp_montRed12Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed12Lbmi2,@function -mcl_fp_montRed12Lbmi2: # @mcl_fp_montRed12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $828, %esp # imm = 0x33C - calll .L180$pb -.L180$pb: - popl %eax -.Ltmp31: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 856(%esp), %edx - movl -4(%edx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 852(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 100(%esp) # 4-byte Spill - imull %esi, %ebx - movl 92(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 68(%ecx), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 48(%ecx), %edi - movl %edi, 152(%esp) # 4-byte Spill - movl 44(%ecx), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 32(%ecx), %edi - movl 28(%ecx), %esi - movl 24(%ecx), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 776(%esp), %ecx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - movl 88(%esp), %eax # 4-byte Reload - addl 776(%esp), %eax - movl 100(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 796(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 804(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl 808(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 136(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 720(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 720(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 724(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 752(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl %ebp, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 664(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 692(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 696(%esp), %ebp - movl 132(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 144(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 608(%esp), %esi - movl 68(%esp), %esi # 4-byte Reload - adcl 612(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 636(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 108(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 552(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 496(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - movl 136(%esp), %edi # 4-byte Reload - adcl 532(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 440(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl 472(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %esi # 4-byte Reload - adcl 476(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 856(%esp), %eax - movl %eax, %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 384(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 388(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 400(%esp), %ebp - movl 152(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl 416(%esp), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %esi # 4-byte Reload - adcl 424(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - movl 100(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl 112(%esp), %ecx # 4-byte Reload - adcl 336(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 352(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 356(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - adcl 364(%esp), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %eax, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 272(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl 132(%esp), %ecx # 4-byte Reload - adcl 280(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 288(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 296(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 300(%esp), %esi - movl 140(%esp), %ecx # 4-byte Reload - adcl 304(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 312(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, %ebp - movl %eax, %edi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 216(%esp), %edi - movl 132(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl 152(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 240(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 248(%esp), %esi - movl 144(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl 84(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 160(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl %eax, %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ebx # 4-byte Reload - adcl 176(%esp), %ebx - movl %ebx, 148(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 188(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %ebp - subl 24(%esp), %edi # 4-byte Folded Reload - movl 156(%esp), %esi # 4-byte Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - sbbl 28(%esp), %ebx # 4-byte Folded Reload - sbbl 32(%esp), %ecx # 4-byte Folded Reload - movl 140(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - sbbl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - sbbl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 132(%esp) # 4-byte Spill - sbbl $0, %ebp - andl $1, %ebp - jne .LBB180_2 -# BB#1: - movl %ebx, 148(%esp) # 4-byte Spill -.LBB180_2: - movl %ebp, %ebx - testb %bl, %bl - movl 152(%esp), %ebx # 4-byte Reload - jne .LBB180_4 -# BB#3: - movl %edi, %ebx -.LBB180_4: - movl 848(%esp), %edi - movl %ebx, (%edi) - movl 144(%esp), %ebx # 4-byte Reload - jne .LBB180_6 -# BB#5: - movl %esi, 156(%esp) # 4-byte Spill -.LBB180_6: - movl 156(%esp), %esi # 4-byte Reload - movl %esi, 4(%edi) - movl 136(%esp), %esi # 4-byte Reload - jne .LBB180_8 -# BB#7: - movl %edx, %esi -.LBB180_8: - movl %esi, 8(%edi) - movl 148(%esp), %edx # 4-byte Reload - movl %edx, 12(%edi) - movl 128(%esp), %esi # 4-byte Reload - movl 116(%esp), %edx # 4-byte Reload - jne .LBB180_10 -# BB#9: - movl %ecx, %edx -.LBB180_10: - movl %edx, 16(%edi) - movl 120(%esp), %edx # 4-byte Reload - movl 140(%esp), %ecx # 4-byte Reload - jne .LBB180_12 -# BB#11: - movl 84(%esp), %ecx # 4-byte Reload -.LBB180_12: - movl %ecx, 20(%edi) - movl 108(%esp), %ecx # 4-byte Reload - movl 124(%esp), %eax # 4-byte Reload - jne .LBB180_14 -# BB#13: - movl 88(%esp), %eax # 4-byte Reload -.LBB180_14: - movl %eax, 24(%edi) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB180_16 -# BB#15: - movl 92(%esp), %ebx # 4-byte Reload -.LBB180_16: - movl %ebx, 28(%edi) - jne .LBB180_18 -# BB#17: - movl 96(%esp), %esi # 4-byte Reload -.LBB180_18: - movl %esi, 32(%edi) - jne .LBB180_20 -# BB#19: - movl 100(%esp), %edx # 4-byte Reload -.LBB180_20: - movl %edx, 36(%edi) - jne .LBB180_22 -# BB#21: - movl 112(%esp), %ecx # 4-byte Reload -.LBB180_22: - movl %ecx, 40(%edi) - jne .LBB180_24 -# BB#23: - movl 132(%esp), %eax # 4-byte Reload -.LBB180_24: - movl %eax, 44(%edi) - addl $828, %esp # imm = 0x33C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end180: - .size mcl_fp_montRed12Lbmi2, .Lfunc_end180-mcl_fp_montRed12Lbmi2 - - .globl mcl_fp_addPre12Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre12Lbmi2,@function -mcl_fp_addPre12Lbmi2: # @mcl_fp_addPre12Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl %edx, 36(%ebx) - movl %esi, 40(%ebx) - movl 44(%eax), %eax - movl 44(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 44(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end181: - .size mcl_fp_addPre12Lbmi2, .Lfunc_end181-mcl_fp_addPre12Lbmi2 - - .globl mcl_fp_subPre12Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre12Lbmi2,@function -mcl_fp_subPre12Lbmi2: # @mcl_fp_subPre12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 36(%ebp) - movl %edi, 40(%ebp) - movl 44(%edx), %edx - movl 44(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 44(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end182: - .size mcl_fp_subPre12Lbmi2, .Lfunc_end182-mcl_fp_subPre12Lbmi2 - - .globl mcl_fp_shr1_12Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_12Lbmi2,@function -mcl_fp_shr1_12Lbmi2: # @mcl_fp_shr1_12Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 40(%ecx) - shrl %eax - movl %eax, 44(%ecx) - popl %esi - retl -.Lfunc_end183: - .size mcl_fp_shr1_12Lbmi2, .Lfunc_end183-mcl_fp_shr1_12Lbmi2 - - .globl mcl_fp_add12Lbmi2 - .align 16, 0x90 - .type mcl_fp_add12Lbmi2,@function -mcl_fp_add12Lbmi2: # @mcl_fp_add12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %ebx - movl (%ebx), %edx - movl 4(%ebx), %ecx - movl 60(%esp), %eax - addl (%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 4(%eax), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 8(%ebx), %ecx - adcl 8(%eax), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl 16(%eax), %ecx - adcl 12(%ebx), %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%ebx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 20(%eax), %ecx - adcl 20(%ebx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 24(%eax), %ecx - adcl 24(%ebx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 28(%eax), %ecx - adcl 28(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 32(%eax), %ebp - adcl 32(%ebx), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 36(%eax), %edi - adcl 36(%ebx), %edi - movl 40(%eax), %esi - adcl 40(%ebx), %esi - movl 44(%eax), %edx - adcl 44(%ebx), %edx - movl 56(%esp), %ebx - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebx) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%ebx) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%ebx) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%ebx) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%ebx) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%ebx) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%ebx) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%ebx) - movl %ebp, 32(%ebx) - movl %edi, 36(%ebx) - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) - sbbl %ecx, %ecx - andl $1, %ecx - movl 68(%esp), %ebp - subl (%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 4(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 8(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 12(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 16(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 20(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 24(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - sbbl 28(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl (%esp), %eax # 4-byte Reload - sbbl 32(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 36(%ebp), %edi - sbbl 40(%ebp), %esi - sbbl 44(%ebp), %edx - sbbl $0, %ecx - testb $1, %cl - jne .LBB184_2 -# BB#1: # %nocarry - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebx) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebx) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebx) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebx) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebx) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebx) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebx) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebx) - movl (%esp), %eax # 4-byte Reload - movl %eax, 32(%ebx) - movl %edi, 36(%ebx) - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) -.LBB184_2: # %carry - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end184: - .size mcl_fp_add12Lbmi2, .Lfunc_end184-mcl_fp_add12Lbmi2 - - .globl mcl_fp_addNF12Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF12Lbmi2,@function -mcl_fp_addNF12Lbmi2: # @mcl_fp_addNF12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - movl 112(%esp), %edx - addl (%edx), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 4(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 44(%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 40(%esi), %ebp - movl 36(%esi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 32(%esi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 28(%esi), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 24(%esi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 20(%esi), %ebx - movl 16(%esi), %edi - movl 12(%esi), %ecx - movl 8(%esi), %eax - adcl 8(%edx), %eax - adcl 12(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 20(%edx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%edx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 28(%edx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 32(%edx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 36(%edx), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl %eax, %esi - adcl 40(%edx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 44(%edx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 120(%esp), %ebp - movl 60(%esp), %edx # 4-byte Reload - subl (%ebp), %edx - movl 64(%esp), %eax # 4-byte Reload - sbbl 4(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - sbbl 8(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 12(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%ebp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 24(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 40(%ebp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 44(%ebp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl %edi, %ebp - movl 60(%esp), %edi # 4-byte Reload - sarl $31, %ebp - testl %ebp, %ebp - js .LBB185_2 -# BB#1: - movl %edx, %edi -.LBB185_2: - movl 108(%esp), %edx - movl %edi, (%edx) - movl 64(%esp), %edi # 4-byte Reload - js .LBB185_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB185_4: - movl %edi, 4(%edx) - movl %eax, %ebp - js .LBB185_6 -# BB#5: - movl 4(%esp), %esi # 4-byte Reload -.LBB185_6: - movl %esi, 8(%edx) - movl %ecx, %esi - movl 52(%esp), %eax # 4-byte Reload - movl 48(%esp), %ecx # 4-byte Reload - js .LBB185_8 -# BB#7: - movl 8(%esp), %ecx # 4-byte Reload -.LBB185_8: - movl %ecx, 12(%edx) - movl 76(%esp), %ebx # 4-byte Reload - movl 84(%esp), %edi # 4-byte Reload - js .LBB185_10 -# BB#9: - movl 12(%esp), %eax # 4-byte Reload -.LBB185_10: - movl %eax, 16(%edx) - movl 80(%esp), %ecx # 4-byte Reload - js .LBB185_12 -# BB#11: - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 56(%esp) # 4-byte Spill -.LBB185_12: - movl 56(%esp), %eax # 4-byte Reload - movl %eax, 20(%edx) - js .LBB185_14 -# BB#13: - movl 20(%esp), %ebp # 4-byte Reload -.LBB185_14: - movl %ebp, 24(%edx) - js .LBB185_16 -# BB#15: - movl 24(%esp), %edi # 4-byte Reload -.LBB185_16: - movl %edi, 28(%edx) - js .LBB185_18 -# BB#17: - movl 28(%esp), %ebx # 4-byte Reload -.LBB185_18: - movl %ebx, 32(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB185_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB185_20: - movl %eax, 36(%edx) - js .LBB185_22 -# BB#21: - movl 36(%esp), %esi # 4-byte Reload -.LBB185_22: - movl %esi, 40(%edx) - js .LBB185_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB185_24: - movl %ecx, 44(%edx) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end185: - .size mcl_fp_addNF12Lbmi2, .Lfunc_end185-mcl_fp_addNF12Lbmi2 - - .globl mcl_fp_sub12Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub12Lbmi2,@function -mcl_fp_sub12Lbmi2: # @mcl_fp_sub12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 68(%esp), %edi - subl (%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 28(%esi), %edx - sbbl 28(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 32(%esi), %ecx - sbbl 32(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 40(%esi), %ebp - sbbl 40(%edi), %ebp - movl 44(%esi), %esi - sbbl 44(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 60(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl %edx, 28(%ebx) - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl %ebp, 40(%ebx) - movl %esi, 44(%ebx) - je .LBB186_2 -# BB#1: # %carry - movl %esi, %edi - movl 72(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 24(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl 40(%esi), %eax - adcl %ebp, %eax - movl %eax, 40(%ebx) - movl 44(%esi), %eax - adcl %edi, %eax - movl %eax, 44(%ebx) -.LBB186_2: # %nocarry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end186: - .size mcl_fp_sub12Lbmi2, .Lfunc_end186-mcl_fp_sub12Lbmi2 - - .globl mcl_fp_subNF12Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF12Lbmi2,@function -mcl_fp_subNF12Lbmi2: # @mcl_fp_subNF12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 96(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 100(%esp), %edi - subl (%edi), %edx - movl %edx, 48(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 32(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 36(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - sarl $31, %eax - movl %eax, %edx - addl %edx, %edx - movl %eax, %edi - adcl %edi, %edi - movl %eax, %ebp - adcl %ebp, %ebp - movl %eax, %esi - adcl %esi, %esi - shrl $31, %ecx - orl %edx, %ecx - movl 104(%esp), %edx - andl 12(%edx), %esi - movl %esi, 8(%esp) # 4-byte Spill - andl 8(%edx), %ebp - andl 4(%edx), %edi - andl (%edx), %ecx - movl 44(%edx), %esi - andl %eax, %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 40(%edx), %esi - andl %eax, %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 36(%edx), %esi - andl %eax, %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 32(%edx), %esi - andl %eax, %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 28(%edx), %esi - andl %eax, %esi - movl %esi, (%esp) # 4-byte Spill - movl 24(%edx), %ebx - andl %eax, %ebx - movl 20(%edx), %esi - andl %eax, %esi - andl 16(%edx), %eax - addl 48(%esp), %ecx # 4-byte Folded Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 92(%esp), %edx - movl %ecx, (%edx) - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %edi, 4(%edx) - movl 8(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 8(%edx) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 12(%edx) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %eax, 16(%edx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %esi, 20(%edx) - movl (%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 24(%edx) - movl 4(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%edx) - movl 12(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edx) - movl 16(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edx) - movl %eax, 40(%edx) - movl 20(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%edx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end187: - .size mcl_fp_subNF12Lbmi2, .Lfunc_end187-mcl_fp_subNF12Lbmi2 - - .globl mcl_fpDbl_add12Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add12Lbmi2,@function -mcl_fpDbl_add12Lbmi2: # @mcl_fpDbl_add12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %ecx - movl 112(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edi), %ebp - movl 108(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%ecx), %esi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 56(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%edi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%edi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%edi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %esi - adcl %ebx, %esi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%edi), %edx - adcl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 52(%ecx), %ebx - movl %esi, 44(%eax) - movl 52(%edi), %eax - adcl %ebx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 56(%edi), %eax - adcl %ebp, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl 60(%edi), %edx - adcl %eax, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl 64(%edi), %edx - adcl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl 68(%edi), %edx - adcl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl 72(%edi), %edx - adcl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl 76(%edi), %edx - adcl %eax, %edx - movl 80(%ecx), %esi - movl 80(%edi), %eax - adcl %esi, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 84(%ecx), %ebx - movl 84(%edi), %esi - adcl %ebx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 88(%ecx), %ebp - movl 88(%edi), %ebx - adcl %ebp, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 92(%ecx), %ecx - movl 92(%edi), %edi - adcl %ecx, %edi - movl %edi, 44(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 120(%esp), %ebp - movl 72(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 28(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 32(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl 36(%ebp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl %ebx, %eax - movl 44(%esp), %ebx # 4-byte Reload - sbbl 40(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 44(%ebp), %edi - sbbl $0, %ecx - andl $1, %ecx - jne .LBB188_2 -# BB#1: - movl %edi, %ebx -.LBB188_2: - testb %cl, %cl - movl 72(%esp), %ecx # 4-byte Reload - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB188_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload -.LBB188_4: - movl 108(%esp), %eax - movl %ecx, 48(%eax) - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 80(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl %ebp, 64(%eax) - movl %edi, 68(%eax) - movl %esi, 72(%eax) - movl %edx, 76(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl 48(%esp), %edx # 4-byte Reload - jne .LBB188_6 -# BB#5: - movl 32(%esp), %edx # 4-byte Reload -.LBB188_6: - movl %edx, 80(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB188_8 -# BB#7: - movl 36(%esp), %edx # 4-byte Reload -.LBB188_8: - movl %edx, 84(%eax) - jne .LBB188_10 -# BB#9: - movl 40(%esp), %ecx # 4-byte Reload -.LBB188_10: - movl %ecx, 88(%eax) - movl %ebx, 92(%eax) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end188: - .size mcl_fpDbl_add12Lbmi2, .Lfunc_end188-mcl_fpDbl_add12Lbmi2 - - .globl mcl_fpDbl_sub12Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub12Lbmi2,@function -mcl_fpDbl_sub12Lbmi2: # @mcl_fpDbl_sub12Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - movl 100(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %edx - movl 104(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%esi), %edi - sbbl 8(%ebx), %edi - movl 96(%esp), %ecx - movl %eax, (%ecx) - movl 12(%esi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%esi), %edx - sbbl 16(%ebx), %edx - movl %edi, 8(%ecx) - movl 20(%ebx), %edi - movl %eax, 12(%ecx) - movl 20(%esi), %eax - sbbl %edi, %eax - movl 24(%ebx), %edi - movl %edx, 16(%ecx) - movl 24(%esi), %edx - sbbl %edi, %edx - movl 28(%ebx), %edi - movl %eax, 20(%ecx) - movl 28(%esi), %eax - sbbl %edi, %eax - movl 32(%ebx), %edi - movl %edx, 24(%ecx) - movl 32(%esi), %edx - sbbl %edi, %edx - movl 36(%ebx), %edi - movl %eax, 28(%ecx) - movl 36(%esi), %eax - sbbl %edi, %eax - movl 40(%ebx), %edi - movl %edx, 32(%ecx) - movl 40(%esi), %edx - sbbl %edi, %edx - movl 44(%ebx), %edi - movl %eax, 36(%ecx) - movl 44(%esi), %eax - sbbl %edi, %eax - movl 48(%ebx), %edi - movl %edx, 40(%ecx) - movl 48(%esi), %edx - sbbl %edi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 52(%ebx), %edx - movl %eax, 44(%ecx) - movl 52(%esi), %eax - sbbl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl 56(%esi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%esi), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 64(%ebx), %eax - movl 64(%esi), %edx - sbbl %eax, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 68(%ebx), %eax - movl 68(%esi), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebx), %eax - movl 72(%esi), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebx), %eax - movl 76(%esi), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 80(%ebx), %eax - movl 80(%esi), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 84(%ebx), %eax - movl 84(%esi), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%ebx), %eax - movl 88(%esi), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 92(%ebx), %eax - movl 92(%esi), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 108(%esp), %ebp - jne .LBB189_1 -# BB#2: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB189_3 -.LBB189_1: - movl 44(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill -.LBB189_3: - testb %al, %al - jne .LBB189_4 -# BB#5: - movl $0, 12(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB189_6 -.LBB189_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB189_6: - jne .LBB189_7 -# BB#8: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB189_9 -.LBB189_7: - movl 40(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB189_9: - jne .LBB189_10 -# BB#11: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB189_12 -.LBB189_10: - movl 36(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB189_12: - jne .LBB189_13 -# BB#14: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB189_15 -.LBB189_13: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB189_15: - jne .LBB189_16 -# BB#17: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB189_18 -.LBB189_16: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB189_18: - jne .LBB189_19 -# BB#20: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB189_21 -.LBB189_19: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB189_21: - jne .LBB189_22 -# BB#23: - movl $0, %ebx - jmp .LBB189_24 -.LBB189_22: - movl 20(%ebp), %ebx -.LBB189_24: - jne .LBB189_25 -# BB#26: - movl $0, %eax - jmp .LBB189_27 -.LBB189_25: - movl 16(%ebp), %eax -.LBB189_27: - jne .LBB189_28 -# BB#29: - movl %ebp, %edx - movl $0, %ebp - jmp .LBB189_30 -.LBB189_28: - movl %ebp, %edx - movl 12(%edx), %ebp -.LBB189_30: - jne .LBB189_31 -# BB#32: - xorl %edx, %edx - jmp .LBB189_33 -.LBB189_31: - movl 8(%edx), %edx -.LBB189_33: - addl 32(%esp), %esi # 4-byte Folded Reload - movl 12(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edi, 52(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %edx, 56(%ecx) - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ebp, 60(%ecx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ebx, 68(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl %eax, 88(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%ecx) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end189: - .size mcl_fpDbl_sub12Lbmi2, .Lfunc_end189-mcl_fpDbl_sub12Lbmi2 - - .align 16, 0x90 - .type .LmulPv416x32,@function -.LmulPv416x32: # @mulPv416x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl %edx, %eax - movl 64(%esp), %ebx - movl %ebx, %edx - mulxl 4(%eax), %esi, %ebp - movl %ebx, %edx - mulxl (%eax), %edi, %edx - movl %edi, 40(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 8(%eax), %edx, %esi - adcl %ebp, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 12(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 16(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 20(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 24(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 28(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 32(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 36(%eax), %edi, %ebp - adcl %esi, %edi - movl %ebx, %edx - mulxl 40(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebp, %esi - movl %ebx, %edx - mulxl 44(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 36(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%ecx) - movl %edi, 36(%ecx) - movl %esi, 40(%ecx) - movl %edx, 44(%ecx) - movl %ebx, %edx - mulxl 48(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - adcl $0, %edx - movl %edx, 52(%ecx) - movl %ecx, %eax - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end190: - .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 - - .globl mcl_fp_mulUnitPre13Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre13Lbmi2,@function -mcl_fp_mulUnitPre13Lbmi2: # @mcl_fp_mulUnitPre13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - calll .L191$pb -.L191$pb: - popl %ebx -.Ltmp32: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx - movl 136(%esp), %eax - movl %eax, (%esp) - leal 48(%esp), %ecx - movl 132(%esp), %edx - calll .LmulPv416x32 - movl 100(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 68(%esp), %ebp - movl 64(%esp), %ebx - movl 60(%esp), %edi - movl 56(%esp), %esi - movl 48(%esp), %edx - movl 52(%esp), %ecx - movl 128(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end191: - .size mcl_fp_mulUnitPre13Lbmi2, .Lfunc_end191-mcl_fp_mulUnitPre13Lbmi2 - - .globl mcl_fpDbl_mulPre13Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre13Lbmi2,@function -mcl_fpDbl_mulPre13Lbmi2: # @mcl_fpDbl_mulPre13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $844, %esp # imm = 0x34C - calll .L192$pb -.L192$pb: - popl %edi -.Ltmp33: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 868(%esp), %edx - movl %edx, %esi - movl %edi, %ebx - calll .LmulPv416x32 - movl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 784(%esp), %eax - movl 788(%esp), %ebp - movl 864(%esp), %ecx - movl %eax, (%ecx) - movl 872(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 728(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv416x32 - addl 728(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 780(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 748(%esp), %edi - movl 744(%esp), %esi - movl 740(%esp), %edx - movl 732(%esp), %eax - movl 736(%esp), %ecx - movl 864(%esp), %ebp - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %eax # 4-byte Reload - addl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 724(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 720(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 716(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 712(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 708(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 696(%esp), %ebx - movl 692(%esp), %edi - movl 688(%esp), %esi - movl 684(%esp), %edx - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 656(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 648(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 644(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 640(%esp), %ebx - movl 636(%esp), %edi - movl 632(%esp), %esi - movl 628(%esp), %edx - movl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 624(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 560(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 584(%esp), %ebx - movl 580(%esp), %edi - movl 576(%esp), %esi - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 864(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 528(%esp), %ebx - movl 524(%esp), %edi - movl 520(%esp), %esi - movl 516(%esp), %edx - movl 508(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 512(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 448(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 472(%esp), %ebp - movl 468(%esp), %edi - movl 464(%esp), %esi - movl 460(%esp), %edx - movl 452(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 456(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 444(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %ebx - movl 412(%esp), %edi - movl 408(%esp), %esi - movl 404(%esp), %edx - movl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 400(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 336(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 360(%esp), %ebp - movl 356(%esp), %edi - movl 352(%esp), %esi - movl 348(%esp), %edx - movl 340(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 224(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %edi - movl 240(%esp), %esi - movl 236(%esp), %edx - movl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 232(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 60(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %edi - movl 44(%edi), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 868(%esp), %eax - movl %eax, %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %esi # 4-byte Reload - addl 168(%esp), %esi - movl 220(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 196(%esp), %ebp - movl 192(%esp), %ebx - movl 188(%esp), %edi - movl 184(%esp), %edx - movl 180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 176(%esp), %ecx - movl 864(%esp), %eax - movl %esi, 44(%eax) - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 104(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 112(%esp), %esi - movl %esi, %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 136(%esp), %ebx - movl 132(%esp), %esi - movl 128(%esp), %edx - movl 124(%esp), %ecx - movl 864(%esp), %eax - movl %ebp, 48(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %edi, 56(%eax) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl 104(%esp), %ebx # 4-byte Folded Reload - movl %esi, 68(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 72(%eax) - movl 60(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl %edx, 80(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %ecx, 84(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl %ecx, 92(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 100(%eax) - addl $844, %esp # imm = 0x34C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end192: - .size mcl_fpDbl_mulPre13Lbmi2, .Lfunc_end192-mcl_fpDbl_mulPre13Lbmi2 - - .globl mcl_fpDbl_sqrPre13Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre13Lbmi2,@function -mcl_fpDbl_sqrPre13Lbmi2: # @mcl_fpDbl_sqrPre13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $844, %esp # imm = 0x34C - calll .L193$pb -.L193$pb: - popl %ebx -.Ltmp34: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx - movl %ebx, 108(%esp) # 4-byte Spill - movl 868(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv416x32 - movl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 784(%esp), %eax - movl 788(%esp), %ebp - movl 864(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 728(%esp), %ecx - movl %esi, %ebx - calll .LmulPv416x32 - addl 728(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 780(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 748(%esp), %edi - movl 744(%esp), %esi - movl 740(%esp), %edx - movl 732(%esp), %eax - movl 736(%esp), %ecx - movl 864(%esp), %ebp - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %eax # 4-byte Reload - addl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 724(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 720(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 716(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 712(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 708(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 696(%esp), %ebx - movl 692(%esp), %edi - movl 688(%esp), %esi - movl 684(%esp), %edx - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 656(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 648(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 644(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 640(%esp), %ebx - movl 636(%esp), %edi - movl 632(%esp), %esi - movl 628(%esp), %edx - movl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 624(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 560(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 584(%esp), %ebx - movl 580(%esp), %edi - movl 576(%esp), %esi - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 864(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 528(%esp), %ebx - movl 524(%esp), %edi - movl 520(%esp), %esi - movl 516(%esp), %edx - movl 508(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 512(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 448(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 472(%esp), %ebp - movl 468(%esp), %edi - movl 464(%esp), %esi - movl 460(%esp), %edx - movl 452(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 456(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 444(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %ebx - movl 412(%esp), %edi - movl 408(%esp), %esi - movl 404(%esp), %edx - movl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 400(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 336(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 360(%esp), %ebp - movl 356(%esp), %edi - movl 352(%esp), %esi - movl 348(%esp), %edx - movl 340(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 224(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %edi - movl 240(%esp), %esi - movl 236(%esp), %edx - movl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 232(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 60(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %esi # 4-byte Reload - addl 168(%esp), %esi - movl 220(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 196(%esp), %ebp - movl 192(%esp), %ebx - movl 188(%esp), %edi - movl 184(%esp), %edx - movl 180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 176(%esp), %ecx - movl 864(%esp), %eax - movl %esi, 44(%eax) - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 104(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 112(%esp), %esi - movl %esi, %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 136(%esp), %ebx - movl 132(%esp), %esi - movl 128(%esp), %edx - movl 124(%esp), %ecx - movl 864(%esp), %eax - movl %ebp, 48(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %edi, 56(%eax) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl 104(%esp), %ebx # 4-byte Folded Reload - movl %esi, 68(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 72(%eax) - movl 60(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl %edx, 80(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %ecx, 84(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl %ecx, 92(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 100(%eax) - addl $844, %esp # imm = 0x34C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end193: - .size mcl_fpDbl_sqrPre13Lbmi2, .Lfunc_end193-mcl_fpDbl_sqrPre13Lbmi2 - - .globl mcl_fp_mont13Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont13Lbmi2,@function -mcl_fp_mont13Lbmi2: # @mcl_fp_mont13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1548, %esp # imm = 0x60C - calll .L194$pb -.L194$pb: - popl %ebx -.Ltmp35: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx - movl 1580(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1488(%esp), %esi - movl 1492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %esi, %eax - imull %edi, %eax - movl 1540(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1536(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1532(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1528(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1520(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1516(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1512(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1508(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1504(%esp), %edi - movl 1500(%esp), %ebp - movl 1496(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1432(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1444(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 1448(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1472(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 1576(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl 76(%esp), %ecx # 4-byte Reload - addl 1376(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1388(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 1404(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1412(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1420(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1428(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl %esi, %eax - movl 76(%esp), %ecx # 4-byte Reload - addl 1320(%esp), %ecx - movl 84(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 36(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 1348(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1360(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1364(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1368(%esp), %ebp - movl 64(%esp), %edi # 4-byte Reload - adcl 1372(%esp), %edi - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1264(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1280(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1300(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1308(%esp), %ebp - adcl 1312(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 1580(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - movl 84(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1208(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 1212(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1216(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1244(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1248(%esp), %edi - adcl 1252(%esp), %ebp - movl %ebp, %esi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1260(%esp), %ebp - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1152(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 1152(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1188(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1192(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1200(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1096(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %ebp - movl %ebp, %eax - addl 1096(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1104(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1112(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1124(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 1128(%esp), %edi - movl 72(%esp), %esi # 4-byte Reload - adcl 1132(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1136(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1140(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1144(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1040(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 1040(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 1068(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 1072(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1084(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %ebp - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 984(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 996(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1028(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edi - movl 1576(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 944(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 980(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 872(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 904(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1572(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 816(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 824(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 844(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 848(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 856(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl %esi, %eax - movl 32(%esp), %ecx # 4-byte Reload - addl 760(%esp), %ecx - movl 36(%esp), %ecx # 4-byte Reload - adcl 764(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 768(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 772(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 776(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 784(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 792(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 796(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 800(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 808(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 812(%esp), %edi - adcl $0, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 36(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - adcl 708(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 712(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 720(%esp), %ebp - adcl 724(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 732(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 736(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 740(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 744(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 748(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 752(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %eax, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl %edi, %eax - andl $1, %eax - addl 648(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 652(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 656(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 660(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 664(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 672(%esp), %edi - movl 64(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 600(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 612(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - adcl 616(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 620(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 536(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl 44(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 536(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 544(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 560(%esp), %esi - adcl 564(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 48(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 512(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl %edi, %ecx - andl $1, %ecx - addl 424(%esp), %esi - movl 52(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - addl 368(%esp), %ebp - adcl 372(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 376(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 392(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl 52(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 312(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 320(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 328(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 336(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 340(%esp), %edi - movl 48(%esp), %esi # 4-byte Reload - adcl 344(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 256(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 268(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 280(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %ebp - movl %ebp, %ecx - addl 200(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 208(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 212(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 236(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 144(%esp), %ecx - adcl 148(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 152(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 176(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 88(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - movl 84(%esp), %esi # 4-byte Reload - adcl 92(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 100(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 104(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - adcl 112(%esp), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - adcl 116(%esp), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - adcl 120(%esp), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - adcl 124(%esp), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl 128(%esp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - adcl 132(%esp), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl 136(%esp), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - adcl 140(%esp), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - adcl $0, %edi - movl 1580(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %ecx - sbbl 8(%ebx), %ebp - sbbl 12(%ebx), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 16(%ebx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - sbbl 20(%ebx), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - sbbl 24(%ebx), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - sbbl 28(%ebx), %edx - movl 36(%esp), %esi # 4-byte Reload - sbbl 32(%ebx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 36(%ebx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 40(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 44(%ebx), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 48(%ebx), %esi - movl %esi, %ebx - sbbl $0, %edi - andl $1, %edi - jne .LBB194_2 -# BB#1: - movl %edx, 32(%esp) # 4-byte Spill -.LBB194_2: - movl %edi, %edx - testb %dl, %dl - movl 80(%esp), %edx # 4-byte Reload - jne .LBB194_4 -# BB#3: - movl %eax, %edx -.LBB194_4: - movl 1568(%esp), %eax - movl %edx, (%eax) - movl 64(%esp), %esi # 4-byte Reload - jne .LBB194_6 -# BB#5: - movl %ecx, %esi -.LBB194_6: - movl %esi, 4(%eax) - jne .LBB194_8 -# BB#7: - movl %ebp, 76(%esp) # 4-byte Spill -.LBB194_8: - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB194_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 84(%esp) # 4-byte Spill -.LBB194_10: - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - jne .LBB194_12 -# BB#11: - movl 8(%esp), %ebp # 4-byte Reload -.LBB194_12: - movl %ebp, 16(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB194_14 -# BB#13: - movl 12(%esp), %ecx # 4-byte Reload -.LBB194_14: - movl %ecx, 20(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB194_16 -# BB#15: - movl 16(%esp), %ecx # 4-byte Reload -.LBB194_16: - movl %ecx, 24(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB194_18 -# BB#17: - movl 20(%esp), %ecx # 4-byte Reload -.LBB194_18: - movl %ecx, 32(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB194_20 -# BB#19: - movl 24(%esp), %ecx # 4-byte Reload -.LBB194_20: - movl %ecx, 36(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB194_22 -# BB#21: - movl 28(%esp), %ecx # 4-byte Reload -.LBB194_22: - movl %ecx, 40(%eax) - movl 52(%esp), %ecx # 4-byte Reload - jne .LBB194_24 -# BB#23: - movl 72(%esp), %ecx # 4-byte Reload -.LBB194_24: - movl %ecx, 44(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB194_26 -# BB#25: - movl %ebx, %ecx -.LBB194_26: - movl %ecx, 48(%eax) - addl $1548, %esp # imm = 0x60C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end194: - .size mcl_fp_mont13Lbmi2, .Lfunc_end194-mcl_fp_mont13Lbmi2 - - .globl mcl_fp_montNF13Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF13Lbmi2,@function -mcl_fp_montNF13Lbmi2: # @mcl_fp_montNF13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1548, %esp # imm = 0x60C - calll .L195$pb -.L195$pb: - popl %ebx -.Ltmp36: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx - movl 1580(%esp), %eax - movl -4(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1488(%esp), %edi - movl 1492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1540(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1536(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1532(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1528(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1520(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1516(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1512(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1508(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1504(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1500(%esp), %esi - movl 1496(%esp), %ebp - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1432(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1440(%esp), %ebp - adcl 1444(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1472(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 1484(%esp), %edi - movl 1576(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1428(%esp), %ecx - movl 80(%esp), %edx # 4-byte Reload - addl 1376(%esp), %edx - adcl 1380(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1412(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1424(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1320(%esp), %esi - adcl 1324(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1360(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1316(%esp), %eax - addl 1264(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 1268(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1272(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1280(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 1284(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1300(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ebp, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1208(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 1228(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1244(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1248(%esp), %esi - movl 84(%esp), %edi # 4-byte Reload - adcl 1252(%esp), %edi - movl 80(%esp), %ebp # 4-byte Reload - adcl 1256(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1152(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1204(%esp), %eax - movl 64(%esp), %edx # 4-byte Reload - addl 1152(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1160(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1168(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1180(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1184(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1188(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 1192(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl 1196(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1096(%esp), %ecx - movl 1580(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - addl 1096(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 1116(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 1120(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1140(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1148(%esp), %ebp - movl 1576(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1040(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1092(%esp), %eax - movl 40(%esp), %edx # 4-byte Reload - addl 1040(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 1056(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 1060(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1088(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl %eax, %esi - adcl $0, %esi - movl %edx, %edi - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 984(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 996(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1008(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1036(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 980(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 36(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 936(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 940(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 948(%esp), %ebp - movl 72(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 968(%esp), %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 976(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 872(%esp), %edi - movl 36(%esp), %edi # 4-byte Reload - adcl 876(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 892(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 912(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 868(%esp), %edx - addl 816(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 832(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 836(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 860(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 760(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 780(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 784(%esp), %esi - movl 84(%esp), %edi # 4-byte Reload - adcl 788(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 804(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 756(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - addl 704(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 708(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 716(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 724(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 728(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 732(%esp), %esi - movl 68(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 740(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 744(%esp), %ebp - movl 48(%esp), %edx # 4-byte Reload - adcl 748(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 752(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 648(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 676(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 696(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 644(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 56(%esp), %esi # 4-byte Reload - adcl 596(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 624(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 536(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 536(%esp), %edi - adcl 540(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 556(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 564(%esp), %esi - adcl 568(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 572(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 532(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 496(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 504(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - adcl 512(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 424(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %esi - adcl 452(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 460(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 420(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 368(%esp), %ecx - movl 72(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 392(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 312(%esp), %esi - adcl 316(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 320(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 308(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 256(%esp), %ecx - adcl 260(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 272(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 288(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 200(%esp), %esi - adcl 204(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 68(%esp), %esi # 4-byte Reload - adcl 216(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 228(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 196(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 144(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 156(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 164(%esp), %ebp - adcl 168(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 88(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - movl 68(%esp), %edi # 4-byte Reload - adcl 92(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 100(%esp), %edi - movl 64(%esp), %ebx # 4-byte Reload - adcl 104(%esp), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl %ebp, %esi - movl 48(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %edx - movl 1580(%esp), %eax - subl (%eax), %edx - movl %ecx, %ebp - sbbl 4(%eax), %ebp - movl %edi, %ecx - sbbl 8(%eax), %ecx - sbbl 12(%eax), %ebx - sbbl 16(%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 20(%eax), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 24(%eax), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 28(%eax), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 32(%eax), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 36(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 40(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 44(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 48(%eax), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl %esi, %eax - sarl $31, %eax - testl %eax, %eax - movl 84(%esp), %eax # 4-byte Reload - js .LBB195_2 -# BB#1: - movl %edx, %eax -.LBB195_2: - movl 1568(%esp), %edx - movl %eax, (%edx) - movl 80(%esp), %esi # 4-byte Reload - js .LBB195_4 -# BB#3: - movl %ebp, %esi -.LBB195_4: - movl %esi, 4(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB195_6 -# BB#5: - movl %ecx, %edi -.LBB195_6: - movl %edi, 8(%edx) - js .LBB195_8 -# BB#7: - movl %ebx, %eax -.LBB195_8: - movl %eax, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB195_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB195_10: - movl %eax, 16(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB195_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB195_12: - movl %eax, 20(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB195_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB195_14: - movl %eax, 24(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB195_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB195_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB195_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB195_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB195_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB195_20: - movl %eax, 36(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB195_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB195_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB195_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB195_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB195_26 -# BB#25: - movl 68(%esp), %eax # 4-byte Reload -.LBB195_26: - movl %eax, 48(%edx) - addl $1548, %esp # imm = 0x60C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end195: - .size mcl_fp_montNF13Lbmi2, .Lfunc_end195-mcl_fp_montNF13Lbmi2 - - .globl mcl_fp_montRed13Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed13Lbmi2,@function -mcl_fp_montRed13Lbmi2: # @mcl_fp_montRed13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $892, %esp # imm = 0x37C - calll .L196$pb -.L196$pb: - popl %eax -.Ltmp37: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 920(%esp), %edx - movl -4(%edx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 916(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 80(%esp) # 4-byte Spill - imull %eax, %ebx - movl 100(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 44(%ecx), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 40(%ecx), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 36(%ecx), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %edi - movl 20(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 832(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 76(%esp), %eax # 4-byte Reload - addl 832(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 836(%esp), %ecx - adcl 840(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 856(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 860(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - andl $1, %esi - addl 776(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 132(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 720(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 720(%esp), %esi - movl 56(%esp), %esi # 4-byte Reload - adcl 724(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 132(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl 100(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 664(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 96(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 608(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 612(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 144(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 552(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 496(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 148(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 128(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 136(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 440(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl 476(%esp), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 384(%esp), %esi - movl 104(%esp), %ecx # 4-byte Reload - adcl 388(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl 404(%esp), %ebp - movl 140(%esp), %edi # 4-byte Reload - adcl 408(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 420(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 920(%esp), %eax - movl %eax, %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl 108(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 344(%esp), %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 360(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %edi - movl %edi, %eax - movl 72(%esp), %esi # 4-byte Reload - imull %esi, %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 272(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl 120(%esp), %edi # 4-byte Reload - adcl 280(%esp), %edi - movl 156(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 288(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 296(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 300(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 304(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 312(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl %eax, %ebp - imull %esi, %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 216(%esp), %ebp - movl %edi, %ecx - adcl 220(%esp), %ecx - movl 156(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %ebp # 4-byte Reload - adcl 228(%esp), %ebp - movl 152(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 160(%esp), %esi - movl 156(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - movl %ebp, 140(%esp) # 4-byte Spill - movl %ebp, %ebx - movl 152(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 176(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl %eax, %edx - movl %edi, %eax - adcl 184(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 156(%esp), %edi # 4-byte Reload - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %ebx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %ebp # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl 132(%esp), %edx # 4-byte Reload - sbbl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl 144(%esp), %edx # 4-byte Reload - sbbl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - sbbl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - sbbl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - sbbl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 104(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - sbbl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 120(%esp) # 4-byte Spill - movl %eax, %edx - movl %esi, %eax - sbbl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 124(%esp) # 4-byte Spill - sbbl $0, %eax - andl $1, %eax - jne .LBB196_2 -# BB#1: - movl %ebp, 148(%esp) # 4-byte Spill -.LBB196_2: - testb %al, %al - movl 156(%esp), %ebp # 4-byte Reload - jne .LBB196_4 -# BB#3: - movl %edi, %ebp -.LBB196_4: - movl 912(%esp), %edi - movl %ebp, (%edi) - movl 140(%esp), %ebp # 4-byte Reload - jne .LBB196_6 -# BB#5: - movl %ebx, %ebp -.LBB196_6: - movl %ebp, 4(%edi) - movl 152(%esp), %ebx # 4-byte Reload - jne .LBB196_8 -# BB#7: - movl %ecx, %ebx -.LBB196_8: - movl %ebx, 8(%edi) - movl 148(%esp), %esi # 4-byte Reload - movl %esi, 12(%edi) - movl 116(%esp), %ebx # 4-byte Reload - movl 128(%esp), %esi # 4-byte Reload - jne .LBB196_10 -# BB#9: - movl 72(%esp), %esi # 4-byte Reload -.LBB196_10: - movl %esi, 16(%edi) - movl 112(%esp), %esi # 4-byte Reload - movl 132(%esp), %edx # 4-byte Reload - jne .LBB196_12 -# BB#11: - movl 76(%esp), %edx # 4-byte Reload -.LBB196_12: - movl %edx, 20(%edi) - movl 96(%esp), %edx # 4-byte Reload - movl 144(%esp), %ecx # 4-byte Reload - jne .LBB196_14 -# BB#13: - movl 80(%esp), %ecx # 4-byte Reload -.LBB196_14: - movl %ecx, 24(%edi) - movl 100(%esp), %ecx # 4-byte Reload - movl 136(%esp), %eax # 4-byte Reload - jne .LBB196_16 -# BB#15: - movl 84(%esp), %eax # 4-byte Reload -.LBB196_16: - movl %eax, 28(%edi) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB196_18 -# BB#17: - movl 88(%esp), %ebx # 4-byte Reload -.LBB196_18: - movl %ebx, 32(%edi) - jne .LBB196_20 -# BB#19: - movl 104(%esp), %esi # 4-byte Reload -.LBB196_20: - movl %esi, 36(%edi) - jne .LBB196_22 -# BB#21: - movl 108(%esp), %edx # 4-byte Reload -.LBB196_22: - movl %edx, 40(%edi) - jne .LBB196_24 -# BB#23: - movl 120(%esp), %ecx # 4-byte Reload -.LBB196_24: - movl %ecx, 44(%edi) - jne .LBB196_26 -# BB#25: - movl 124(%esp), %eax # 4-byte Reload -.LBB196_26: - movl %eax, 48(%edi) - addl $892, %esp # imm = 0x37C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end196: - .size mcl_fp_montRed13Lbmi2, .Lfunc_end196-mcl_fp_montRed13Lbmi2 - - .globl mcl_fp_addPre13Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre13Lbmi2,@function -mcl_fp_addPre13Lbmi2: # @mcl_fp_addPre13Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl 44(%eax), %edi - movl %edx, 36(%ebx) - movl 44(%ecx), %edx - adcl %edi, %edx - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) - movl 48(%eax), %eax - movl 48(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 48(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end197: - .size mcl_fp_addPre13Lbmi2, .Lfunc_end197-mcl_fp_addPre13Lbmi2 - - .globl mcl_fp_subPre13Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre13Lbmi2,@function -mcl_fp_subPre13Lbmi2: # @mcl_fp_subPre13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ebp) - movl 44(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 40(%ebp) - movl %esi, 44(%ebp) - movl 48(%edx), %edx - movl 48(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 48(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end198: - .size mcl_fp_subPre13Lbmi2, .Lfunc_end198-mcl_fp_subPre13Lbmi2 - - .globl mcl_fp_shr1_13Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_13Lbmi2,@function -mcl_fp_shr1_13Lbmi2: # @mcl_fp_shr1_13Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 44(%ecx) - shrl %eax - movl %eax, 48(%ecx) - popl %esi - retl -.Lfunc_end199: - .size mcl_fp_shr1_13Lbmi2, .Lfunc_end199-mcl_fp_shr1_13Lbmi2 - - .globl mcl_fp_add13Lbmi2 - .align 16, 0x90 - .type mcl_fp_add13Lbmi2,@function -mcl_fp_add13Lbmi2: # @mcl_fp_add13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 68(%esp), %ebp - movl (%ebp), %ecx - movl 4(%ebp), %eax - movl 64(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - adcl 4(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 8(%ebp), %eax - adcl 8(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl 16(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%ebx), %eax - adcl 20(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 24(%ebx), %eax - adcl 24(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 28(%ebx), %eax - adcl 28(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%ebx), %eax - adcl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 36(%ebx), %ecx - adcl 36(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 40(%ebx), %edi - adcl 40(%ebp), %edi - movl 44(%ebx), %edx - adcl 44(%ebp), %edx - movl 48(%ebx), %esi - adcl 48(%ebp), %esi - movl 60(%esp), %ebp - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, (%ebp) - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebp) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebp) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebp) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 32(%ebp) - movl %ecx, 36(%ebp) - movl %edi, 40(%ebp) - movl %edx, 44(%ebp) - movl %esi, 48(%ebp) - sbbl %eax, %eax - andl $1, %eax - movl 72(%esp), %ecx - subl (%ecx), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - sbbl 4(%ecx), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - sbbl 8(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 28(%esp), %ebx # 4-byte Reload - sbbl 12(%ecx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 24(%esp), %ebx # 4-byte Reload - sbbl 16(%ecx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - sbbl 20(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebx # 4-byte Reload - sbbl 24(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - sbbl 28(%ecx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebx # 4-byte Reload - sbbl 32(%ecx), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl (%esp), %ebx # 4-byte Reload - sbbl 36(%ecx), %ebx - sbbl 40(%ecx), %edi - sbbl 44(%ecx), %edx - sbbl 48(%ecx), %esi - sbbl $0, %eax - testb $1, %al - jne .LBB200_2 -# BB#1: # %nocarry - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebp) - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebp) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebp) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebp) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 32(%ebp) - movl %ebx, 36(%ebp) - movl %edi, 40(%ebp) - movl %edx, 44(%ebp) - movl %esi, 48(%ebp) -.LBB200_2: # %carry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end200: - .size mcl_fp_add13Lbmi2, .Lfunc_end200-mcl_fp_add13Lbmi2 - - .globl mcl_fp_addNF13Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF13Lbmi2,@function -mcl_fp_addNF13Lbmi2: # @mcl_fp_addNF13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 128(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - movl 124(%esp), %edx - addl (%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 4(%edx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 48(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 44(%esi), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 40(%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 36(%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 32(%esi), %ebp - movl 28(%esi), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 24(%esi), %eax - movl 20(%esi), %ebx - movl 16(%esi), %edi - movl 12(%esi), %ecx - movl 8(%esi), %esi - adcl 8(%edx), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 12(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 56(%esp) # 4-byte Spill - adcl 20(%edx), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - adcl 24(%edx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 28(%edx), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 32(%edx), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 36(%edx), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 40(%edx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%edx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%edx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 132(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - subl (%edx), %eax - movl 68(%esp), %ebp # 4-byte Reload - sbbl 4(%edx), %ebp - movl %ebp, (%esp) # 4-byte Spill - sbbl 8(%edx), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 12(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%edx), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%edx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - sbbl 24(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - movl %esi, %ecx - movl %esi, %ebp - sbbl 36(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - movl %esi, %ecx - movl %esi, %edi - sbbl 40(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 44(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 48(%edx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - sarl $31, %ebx - testl %ebx, %ebx - movl 64(%esp), %edx # 4-byte Reload - js .LBB201_2 -# BB#1: - movl %eax, %edx -.LBB201_2: - movl 120(%esp), %esi - movl %edx, (%esi) - movl 68(%esp), %edx # 4-byte Reload - js .LBB201_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload -.LBB201_4: - movl %edx, 4(%esi) - movl %edi, %edx - movl 52(%esp), %ebx # 4-byte Reload - movl 48(%esp), %eax # 4-byte Reload - js .LBB201_6 -# BB#5: - movl 4(%esp), %eax # 4-byte Reload -.LBB201_6: - movl %eax, 8(%esi) - movl %ebp, %edi - movl 60(%esp), %eax # 4-byte Reload - js .LBB201_8 -# BB#7: - movl 8(%esp), %ebx # 4-byte Reload -.LBB201_8: - movl %ebx, 12(%esi) - movl 96(%esp), %ebp # 4-byte Reload - movl 56(%esp), %ecx # 4-byte Reload - js .LBB201_10 -# BB#9: - movl 12(%esp), %ecx # 4-byte Reload -.LBB201_10: - movl %ecx, 16(%esi) - movl 92(%esp), %ecx # 4-byte Reload - js .LBB201_12 -# BB#11: - movl 16(%esp), %eax # 4-byte Reload -.LBB201_12: - movl %eax, 20(%esi) - movl 72(%esp), %ebx # 4-byte Reload - js .LBB201_14 -# BB#13: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill -.LBB201_14: - movl 76(%esp), %eax # 4-byte Reload - movl %eax, 24(%esi) - js .LBB201_16 -# BB#15: - movl 24(%esp), %ebp # 4-byte Reload -.LBB201_16: - movl %ebp, 28(%esi) - js .LBB201_18 -# BB#17: - movl 28(%esp), %ebx # 4-byte Reload -.LBB201_18: - movl %ebx, 32(%esi) - js .LBB201_20 -# BB#19: - movl 32(%esp), %edi # 4-byte Reload -.LBB201_20: - movl %edi, 36(%esi) - js .LBB201_22 -# BB#21: - movl 36(%esp), %edx # 4-byte Reload -.LBB201_22: - movl %edx, 40(%esi) - js .LBB201_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB201_24: - movl %ecx, 44(%esi) - movl 88(%esp), %eax # 4-byte Reload - js .LBB201_26 -# BB#25: - movl 44(%esp), %eax # 4-byte Reload -.LBB201_26: - movl %eax, 48(%esi) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end201: - .size mcl_fp_addNF13Lbmi2, .Lfunc_end201-mcl_fp_addNF13Lbmi2 - - .globl mcl_fp_sub13Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub13Lbmi2,@function -mcl_fp_sub13Lbmi2: # @mcl_fp_sub13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 68(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 72(%esp), %edi - subl (%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 32(%esi), %edx - sbbl 32(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 36(%esi), %ecx - sbbl 36(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 44(%esi), %ebp - sbbl 44(%edi), %ebp - movl 48(%esi), %esi - sbbl 48(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 64(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl %edx, 32(%ebx) - movl %ecx, 36(%ebx) - movl %eax, 40(%ebx) - movl %ebp, 44(%ebx) - movl %esi, 48(%ebx) - je .LBB202_2 -# BB#1: # %carry - movl %esi, %edi - movl 76(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 28(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 36(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl %ecx, 40(%ebx) - movl 44(%esi), %eax - adcl %ebp, %eax - movl %eax, 44(%ebx) - movl 48(%esi), %eax - adcl %edi, %eax - movl %eax, 48(%ebx) -.LBB202_2: # %nocarry - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end202: - .size mcl_fp_sub13Lbmi2, .Lfunc_end202-mcl_fp_sub13Lbmi2 - - .globl mcl_fp_subNF13Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF13Lbmi2,@function -mcl_fp_subNF13Lbmi2: # @mcl_fp_subNF13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 104(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 108(%esp), %edi - subl (%edi), %edx - movl %edx, 48(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - movl 28(%ecx), %ebx - movl 24(%ecx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 32(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 24(%edi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - sbbl 28(%edi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - sbbl 32(%edi), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - sbbl 48(%edi), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %edx, %esi - sarl $31, %esi - movl %esi, %ecx - shldl $1, %edx, %ecx - movl 112(%esp), %edi - movl 4(%edi), %eax - andl %ecx, %eax - movl %eax, 56(%esp) # 4-byte Spill - andl (%edi), %ecx - movl 48(%edi), %eax - andl %esi, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 44(%edi), %eax - andl %esi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 40(%edi), %eax - andl %esi, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 36(%edi), %eax - andl %esi, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 32(%edi), %eax - andl %esi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 28(%edi), %eax - andl %esi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 24(%edi), %ebp - andl %esi, %ebp - movl 20(%edi), %ebx - andl %esi, %ebx - movl 16(%edi), %edx - andl %esi, %edx - rorxl $31, %esi, %eax - andl 12(%edi), %esi - andl 8(%edi), %eax - addl 48(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - movl 100(%esp), %edi - movl %ecx, (%edi) - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%edi) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %eax, 8(%edi) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %esi, 12(%edi) - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %edx, 16(%edi) - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 20(%edi) - movl (%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ebp, 24(%edi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%edi) - movl 8(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%edi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%edi) - movl 16(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%edi) - movl %eax, 44(%edi) - movl 20(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%edi) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end203: - .size mcl_fp_subNF13Lbmi2, .Lfunc_end203-mcl_fp_subNF13Lbmi2 - - .globl mcl_fpDbl_add13Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add13Lbmi2,@function -mcl_fpDbl_add13Lbmi2: # @mcl_fpDbl_add13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 124(%esp), %ecx - movl 120(%esp), %esi - movl 12(%esi), %edi - movl 16(%esi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%esi), %ebp - movl 116(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%esi), %ebp - adcl 8(%esi), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 60(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%esi), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%esi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%esi), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%esi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%esi), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%esi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%esi), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%esi), %edx - adcl %ebx, %edx - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%esi), %edi - adcl %ebx, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 56(%ecx), %edi - movl %edx, 48(%eax) - movl 56(%esi), %eax - adcl %edi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esi), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%ecx), %edx - movl 64(%esi), %eax - adcl %edx, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl 68(%esi), %eax - adcl %edx, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 72(%ecx), %edx - movl 72(%esi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%ecx), %edx - movl 76(%esi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%ecx), %edx - movl 80(%esi), %eax - adcl %edx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%ecx), %edx - movl 84(%esi), %eax - adcl %edx, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%ecx), %edx - movl 88(%esi), %edi - adcl %edx, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 92(%ecx), %edx - movl 92(%esi), %eax - adcl %edx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 96(%ecx), %edx - movl 96(%esi), %ebx - adcl %edx, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 100(%ecx), %ecx - movl 100(%esi), %esi - adcl %ecx, %esi - sbbl %edx, %edx - andl $1, %edx - movl 128(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 36(%ebp), %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl 40(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - movl %esi, %ebx - sbbl 44(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 48(%ebp), %ecx - sbbl $0, %edx - andl $1, %edx - jne .LBB204_2 -# BB#1: - movl %ecx, %ebx -.LBB204_2: - testb %dl, %dl - movl 76(%esp), %ecx # 4-byte Reload - movl 72(%esp), %edx # 4-byte Reload - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB204_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload -.LBB204_4: - movl 116(%esp), %eax - movl %ecx, 52(%eax) - movl 80(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 88(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - movl 92(%esp), %ecx # 4-byte Reload - movl %ecx, 68(%eax) - movl %ebp, 72(%eax) - movl %edi, 76(%eax) - movl %esi, 80(%eax) - movl %edx, 84(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl 52(%esp), %edx # 4-byte Reload - movl 48(%esp), %esi # 4-byte Reload - jne .LBB204_6 -# BB#5: - movl 36(%esp), %esi # 4-byte Reload -.LBB204_6: - movl %esi, 88(%eax) - jne .LBB204_8 -# BB#7: - movl 40(%esp), %edx # 4-byte Reload -.LBB204_8: - movl %edx, 92(%eax) - jne .LBB204_10 -# BB#9: - movl 44(%esp), %ecx # 4-byte Reload -.LBB204_10: - movl %ecx, 96(%eax) - movl %ebx, 100(%eax) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end204: - .size mcl_fpDbl_add13Lbmi2, .Lfunc_end204-mcl_fpDbl_add13Lbmi2 - - .globl mcl_fpDbl_sub13Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub13Lbmi2,@function -mcl_fpDbl_sub13Lbmi2: # @mcl_fpDbl_sub13Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx - movl 112(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%edi), %esi - sbbl 8(%ebx), %esi - movl 104(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%edi), %edx - sbbl 16(%ebx), %edx - movl %esi, 8(%ecx) - movl 20(%ebx), %esi - movl %eax, 12(%ecx) - movl 20(%edi), %eax - sbbl %esi, %eax - movl 24(%ebx), %esi - movl %edx, 16(%ecx) - movl 24(%edi), %edx - sbbl %esi, %edx - movl 28(%ebx), %esi - movl %eax, 20(%ecx) - movl 28(%edi), %eax - sbbl %esi, %eax - movl 32(%ebx), %esi - movl %edx, 24(%ecx) - movl 32(%edi), %edx - sbbl %esi, %edx - movl 36(%ebx), %esi - movl %eax, 28(%ecx) - movl 36(%edi), %eax - sbbl %esi, %eax - movl 40(%ebx), %esi - movl %edx, 32(%ecx) - movl 40(%edi), %edx - sbbl %esi, %edx - movl 44(%ebx), %esi - movl %eax, 36(%ecx) - movl 44(%edi), %eax - sbbl %esi, %eax - movl 48(%ebx), %esi - movl %edx, 40(%ecx) - movl 48(%edi), %edx - sbbl %esi, %edx - movl 52(%ebx), %esi - movl %eax, 44(%ecx) - movl 52(%edi), %eax - sbbl %esi, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl %edx, 48(%ecx) - movl 56(%edi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%edi), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 64(%ebx), %eax - movl 64(%edi), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 68(%ebx), %eax - movl 68(%edi), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebx), %eax - movl 72(%edi), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebx), %eax - movl 76(%edi), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 80(%ebx), %eax - movl 80(%edi), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 84(%ebx), %eax - movl 84(%edi), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%ebx), %eax - movl 88(%edi), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 92(%ebx), %eax - movl 92(%edi), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 96(%ebx), %eax - movl 96(%edi), %edx - sbbl %eax, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 100(%ebx), %eax - movl 100(%edi), %edx - sbbl %eax, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 116(%esp), %edi - jne .LBB205_1 -# BB#2: - movl $0, 44(%esp) # 4-byte Folded Spill - jmp .LBB205_3 -.LBB205_1: - movl 48(%edi), %edx - movl %edx, 44(%esp) # 4-byte Spill -.LBB205_3: - testb %al, %al - jne .LBB205_4 -# BB#5: - movl $0, 16(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB205_6 -.LBB205_4: - movl (%edi), %ebx - movl 4(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB205_6: - jne .LBB205_7 -# BB#8: - movl $0, 24(%esp) # 4-byte Folded Spill - jmp .LBB205_9 -.LBB205_7: - movl 44(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB205_9: - jne .LBB205_10 -# BB#11: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB205_12 -.LBB205_10: - movl 40(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB205_12: - jne .LBB205_13 -# BB#14: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB205_15 -.LBB205_13: - movl 36(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB205_15: - jne .LBB205_16 -# BB#17: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB205_18 -.LBB205_16: - movl 32(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB205_18: - jne .LBB205_19 -# BB#20: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB205_21 -.LBB205_19: - movl 28(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB205_21: - jne .LBB205_22 -# BB#23: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB205_24 -.LBB205_22: - movl 24(%edi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB205_24: - jne .LBB205_25 -# BB#26: - movl $0, %eax - jmp .LBB205_27 -.LBB205_25: - movl 20(%edi), %eax -.LBB205_27: - jne .LBB205_28 -# BB#29: - movl $0, %edx - jmp .LBB205_30 -.LBB205_28: - movl 16(%edi), %edx -.LBB205_30: - jne .LBB205_31 -# BB#32: - movl $0, %esi - jmp .LBB205_33 -.LBB205_31: - movl 12(%edi), %esi -.LBB205_33: - jne .LBB205_34 -# BB#35: - xorl %edi, %edi - jmp .LBB205_36 -.LBB205_34: - movl 8(%edi), %edi -.LBB205_36: - addl 36(%esp), %ebx # 4-byte Folded Reload - movl 16(%esp), %ebp # 4-byte Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 52(%ecx) - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %ebp, 56(%ecx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edi, 60(%ecx) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %esi, 64(%ecx) - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edx, 68(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 24(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl %eax, 96(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%ecx) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end205: - .size mcl_fpDbl_sub13Lbmi2, .Lfunc_end205-mcl_fpDbl_sub13Lbmi2 - - .align 16, 0x90 - .type .LmulPv448x32,@function -.LmulPv448x32: # @mulPv448x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl %edx, %eax - movl 68(%esp), %ebx - movl %ebx, %edx - mulxl 4(%eax), %edi, %esi - movl %ebx, %edx - mulxl (%eax), %ebp, %edx - movl %ebp, 44(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 8(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 12(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 16(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 20(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 24(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 28(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 32(%eax), %edx, %edi - adcl %esi, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 36(%eax), %edx, %esi - adcl %edi, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ebx, %edx - mulxl 40(%eax), %edi, %ebp - adcl %esi, %edi - movl %ebx, %edx - mulxl 44(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebp, %esi - movl %ebx, %edx - mulxl 48(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 36(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%ecx) - movl %edi, 40(%ecx) - movl %esi, 44(%ecx) - movl %edx, 48(%ecx) - movl %ebx, %edx - mulxl 52(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - adcl $0, %edx - movl %edx, 56(%ecx) - movl %ecx, %eax - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end206: - .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 - - .globl mcl_fp_mulUnitPre14Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre14Lbmi2,@function -mcl_fp_mulUnitPre14Lbmi2: # @mcl_fp_mulUnitPre14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - calll .L207$pb -.L207$pb: - popl %ebx -.Ltmp38: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx - movl 136(%esp), %eax - movl %eax, (%esp) - leal 48(%esp), %ecx - movl 132(%esp), %edx - calll .LmulPv448x32 - movl 104(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 68(%esp), %ebp - movl 64(%esp), %ebx - movl 60(%esp), %edi - movl 56(%esp), %esi - movl 48(%esp), %edx - movl 52(%esp), %ecx - movl 128(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end207: - .size mcl_fp_mulUnitPre14Lbmi2, .Lfunc_end207-mcl_fp_mulUnitPre14Lbmi2 - - .globl mcl_fpDbl_mulPre14Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre14Lbmi2,@function -mcl_fpDbl_mulPre14Lbmi2: # @mcl_fpDbl_mulPre14Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $268, %esp # imm = 0x10C - calll .L208$pb -.L208$pb: - popl %ebx -.Ltmp39: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx - movl %ebx, -192(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7Lbmi2@PLT - leal 28(%esi), %eax - movl %eax, 8(%esp) - leal 28(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 56(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7Lbmi2@PLT - movl 44(%edi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl 40(%edi), %eax - movl 36(%edi), %edx - movl (%edi), %edi - movl 12(%ebp), %ecx - movl 4(%ecx), %ecx - movl 12(%ebp), %ebx - addl 28(%ebx), %edi - movl %edi, -180(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - adcl 32(%edi), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -212(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl 16(%edi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl %eax, %ebx - seto %al - lahf - movl %eax, %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl (%esi), %eax - addl 28(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - movl 4(%esi), %eax - adcl 32(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl 36(%esi), %eax - adcl 8(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl 40(%esi), %eax - adcl 12(%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl 44(%esi), %eax - adcl 16(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - movl 48(%esi), %ecx - adcl 20(%esi), %ecx - movl 52(%esi), %eax - adcl 24(%esi), %eax - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -220(%ebp) # 4-byte Spill - movl %ebx, %esi - movl %edx, -184(%ebp) # 4-byte Spill - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -188(%ebp) # 4-byte Spill - jb .LBB208_2 -# BB#1: - xorl %esi, %esi - movl $0, -184(%ebp) # 4-byte Folded Spill - movl $0, -188(%ebp) # 4-byte Folded Spill -.LBB208_2: - movl %esi, -204(%ebp) # 4-byte Spill - movl 52(%edi), %esi - movl 48(%edi), %ebx - movl -128(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 20(%edi), %ebx - movl %ebx, -160(%ebp) # 4-byte Spill - adcl 24(%edi), %esi - movl %esi, -208(%ebp) # 4-byte Spill - movl %eax, -148(%ebp) # 4-byte Spill - movl %ecx, -152(%ebp) # 4-byte Spill - movl -176(%ebp), %esi # 4-byte Reload - movl %esi, -128(%ebp) # 4-byte Spill - movl -172(%ebp), %esi # 4-byte Reload - movl %esi, -132(%ebp) # 4-byte Spill - movl -168(%ebp), %esi # 4-byte Reload - movl %esi, -136(%ebp) # 4-byte Spill - movl -164(%ebp), %esi # 4-byte Reload - movl %esi, -140(%ebp) # 4-byte Spill - movl -216(%ebp), %ebx # 4-byte Reload - movl %ebx, -144(%ebp) # 4-byte Spill - jb .LBB208_4 -# BB#3: - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -128(%ebp) # 4-byte Folded Spill - movl $0, -132(%ebp) # 4-byte Folded Spill - movl $0, -136(%ebp) # 4-byte Folded Spill - movl $0, -140(%ebp) # 4-byte Folded Spill - movl $0, -144(%ebp) # 4-byte Folded Spill -.LBB208_4: - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -96(%ebp) - movl -200(%ebp), %esi # 4-byte Reload - movl %esi, -92(%ebp) - movl -212(%ebp), %edx # 4-byte Reload - movl %edx, -88(%ebp) - movl -196(%ebp), %edi # 4-byte Reload - movl %edi, -84(%ebp) - movl -156(%ebp), %edx # 4-byte Reload - movl %edx, -80(%ebp) - movl %ebx, -124(%ebp) - movl -164(%ebp), %edx # 4-byte Reload - movl %edx, -120(%ebp) - movl -168(%ebp), %edx # 4-byte Reload - movl %edx, -116(%ebp) - movl -172(%ebp), %edx # 4-byte Reload - movl %edx, -112(%ebp) - movl -176(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) - movl %ecx, -104(%ebp) - movl %edi, %ebx - movl %esi, %edi - movl %eax, -100(%ebp) - sbbl %edx, %edx - movl -160(%ebp), %eax # 4-byte Reload - movl %eax, -76(%ebp) - movl -208(%ebp), %esi # 4-byte Reload - movl %esi, -72(%ebp) - movl -220(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB208_6 -# BB#5: - movl $0, %esi - movl $0, %eax - movl $0, %ebx - movl $0, %edi -.LBB208_6: - movl %eax, -160(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -124(%ebp), %ecx - movl %ecx, 8(%esp) - leal -96(%ebp), %ecx - movl %ecx, 4(%esp) - leal -68(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -188(%ebp), %eax # 4-byte Reload - addl %eax, -144(%ebp) # 4-byte Folded Spill - adcl %edi, -140(%ebp) # 4-byte Folded Spill - movl -184(%ebp), %eax # 4-byte Reload - adcl %eax, -136(%ebp) # 4-byte Folded Spill - adcl %ebx, -132(%ebp) # 4-byte Folded Spill - movl -204(%ebp), %eax # 4-byte Reload - adcl %eax, -128(%ebp) # 4-byte Folded Spill - movl -152(%ebp), %edi # 4-byte Reload - adcl -160(%ebp), %edi # 4-byte Folded Reload - adcl %esi, -148(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -156(%ebp) # 4-byte Spill - movl -192(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre7Lbmi2@PLT - movl -144(%ebp), %eax # 4-byte Reload - addl -40(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl -140(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -140(%ebp) # 4-byte Spill - movl -136(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl -132(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -132(%ebp) # 4-byte Spill - movl -128(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - adcl -20(%ebp), %edi - movl -148(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - adcl %esi, -156(%ebp) # 4-byte Folded Spill - movl -68(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl -64(%ebp), %ecx - sbbl 4(%esi), %ecx - movl -60(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -56(%ebp), %edx - sbbl 12(%esi), %edx - movl -52(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -48(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -44(%ebp), %eax - sbbl 24(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl 28(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 32(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - sbbl %eax, -128(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -152(%ebp) # 4-byte Spill - movl 52(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - movl -148(%ebp), %edi # 4-byte Reload - sbbl %eax, %edi - sbbl $0, -156(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - subl %eax, -172(%ebp) # 4-byte Folded Spill - movl 60(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl 64(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 72(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 76(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 80(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 84(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -128(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -148(%ebp) # 4-byte Spill - movl -156(%ebp), %edi # 4-byte Reload - sbbl $0, %edi - movl -172(%ebp), %eax # 4-byte Reload - addl -176(%ebp), %eax # 4-byte Folded Reload - adcl -180(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 28(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -184(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 32(%esi) - adcl -188(%ebp), %edx # 4-byte Folded Reload - movl %eax, 36(%esi) - adcl -192(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 40(%esi) - movl -164(%ebp), %eax # 4-byte Reload - adcl -196(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 44(%esi) - movl -168(%ebp), %ecx # 4-byte Reload - adcl -200(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl -228(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -140(%ebp), %ecx # 4-byte Reload - adcl -232(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -136(%ebp), %eax # 4-byte Reload - adcl -236(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 60(%esi) - movl -132(%ebp), %ecx # 4-byte Reload - adcl -240(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -128(%ebp), %eax # 4-byte Reload - adcl -244(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -248(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -252(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - adcl -256(%ebp), %edi # 4-byte Folded Reload - movl %eax, 80(%esi) - movl %edi, 84(%esi) - movl -208(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 88(%esi) - movl -212(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 92(%esi) - movl -216(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 96(%esi) - movl -220(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -224(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -204(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - addl $268, %esp # imm = 0x10C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end208: - .size mcl_fpDbl_mulPre14Lbmi2, .Lfunc_end208-mcl_fpDbl_mulPre14Lbmi2 - - .globl mcl_fpDbl_sqrPre14Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre14Lbmi2,@function -mcl_fpDbl_sqrPre14Lbmi2: # @mcl_fpDbl_sqrPre14Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $268, %esp # imm = 0x10C - calll .L209$pb -.L209$pb: - popl %ebx -.Ltmp40: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx - movl %ebx, -172(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre7Lbmi2@PLT - leal 28(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 56(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7Lbmi2@PLT - movl 48(%edi), %eax - movl 44(%edi), %ecx - movl 36(%edi), %edx - movl (%edi), %esi - movl 4(%edi), %ebx - addl 28(%edi), %esi - adcl 32(%edi), %ebx - movl %ebx, -164(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -160(%ebp) # 4-byte Spill - movl 40(%edi), %edx - adcl 12(%edi), %edx - adcl 16(%edi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - adcl 20(%edi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - movl 52(%edi), %ecx - adcl 24(%edi), %ecx - seto %al - lahf - movl %eax, %eax - movl %eax, -184(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -152(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -148(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -144(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -140(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -136(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edi - seto %al - lahf - movl %eax, %eax - sbbl %ebx, %ebx - movl %ebx, -128(%ebp) # 4-byte Spill - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_1 -# BB#2: - movl %esi, -168(%ebp) # 4-byte Spill - movl $0, -132(%ebp) # 4-byte Folded Spill - jmp .LBB209_3 -.LBB209_1: - leal (%esi,%esi), %eax - movl %esi, -168(%ebp) # 4-byte Spill - movl %eax, -132(%ebp) # 4-byte Spill -.LBB209_3: - movl %edi, %eax - addb $127, %al - sahf - movl -180(%ebp), %ebx # 4-byte Reload - jb .LBB209_4 -# BB#5: - movl $0, -156(%ebp) # 4-byte Folded Spill - jmp .LBB209_6 -.LBB209_4: - movl -164(%ebp), %eax # 4-byte Reload - movl -168(%ebp), %esi # 4-byte Reload - shldl $1, %esi, %eax - movl %eax, -156(%ebp) # 4-byte Spill -.LBB209_6: - movl -176(%ebp), %edi # 4-byte Reload - movl -136(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_7 -# BB#8: - movl $0, -136(%ebp) # 4-byte Folded Spill - jmp .LBB209_9 -.LBB209_7: - movl -160(%ebp), %eax # 4-byte Reload - movl -164(%ebp), %esi # 4-byte Reload - shldl $1, %esi, %eax - movl %eax, -136(%ebp) # 4-byte Spill -.LBB209_9: - movl %ebx, %esi - movl -140(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_10 -# BB#11: - movl $0, -140(%ebp) # 4-byte Folded Spill - jmp .LBB209_12 -.LBB209_10: - movl %edx, %eax - movl -160(%ebp), %ebx # 4-byte Reload - shldl $1, %ebx, %eax - movl %eax, -140(%ebp) # 4-byte Spill -.LBB209_12: - movl -144(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_13 -# BB#14: - movl $0, -144(%ebp) # 4-byte Folded Spill - jmp .LBB209_15 -.LBB209_13: - movl %esi, %eax - shldl $1, %edx, %eax - movl %eax, -144(%ebp) # 4-byte Spill -.LBB209_15: - movl -148(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_16 -# BB#17: - movl $0, -148(%ebp) # 4-byte Folded Spill - jmp .LBB209_18 -.LBB209_16: - movl %edi, %eax - shldl $1, %esi, %eax - movl %eax, -148(%ebp) # 4-byte Spill -.LBB209_18: - movl -152(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_19 -# BB#20: - movl $0, -152(%ebp) # 4-byte Folded Spill - jmp .LBB209_21 -.LBB209_19: - movl %ecx, %eax - shldl $1, %edi, %eax - movl %eax, -152(%ebp) # 4-byte Spill -.LBB209_21: - movl -168(%ebp), %eax # 4-byte Reload - movl %eax, -96(%ebp) - movl %eax, -124(%ebp) - movl -164(%ebp), %eax # 4-byte Reload - movl %eax, -92(%ebp) - movl %eax, -120(%ebp) - movl -160(%ebp), %eax # 4-byte Reload - movl %eax, -88(%ebp) - movl %eax, -116(%ebp) - movl %edx, -84(%ebp) - movl %edx, -112(%ebp) - movl %esi, -80(%ebp) - movl %esi, -108(%ebp) - movl %edi, -76(%ebp) - movl %edi, -104(%ebp) - movl %ecx, -72(%ebp) - movl %ecx, -100(%ebp) - movl -184(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_22 -# BB#23: - xorl %edi, %edi - jmp .LBB209_24 -.LBB209_22: - shrl $31, %ecx - movl %ecx, %edi -.LBB209_24: - leal -68(%ebp), %eax - movl %eax, (%esp) - leal -96(%ebp), %eax - movl %eax, 4(%esp) - leal -124(%ebp), %eax - movl %eax, 8(%esp) - movl -128(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -172(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre7Lbmi2@PLT - movl -132(%ebp), %eax # 4-byte Reload - addl -40(%ebp), %eax - movl %eax, -132(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl -136(%ebp), %ecx # 4-byte Reload - adcl -32(%ebp), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -140(%ebp), %ecx # 4-byte Reload - adcl -28(%ebp), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl -144(%ebp), %ecx # 4-byte Reload - adcl -24(%ebp), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -148(%ebp), %ecx # 4-byte Reload - adcl -20(%ebp), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - movl -152(%ebp), %ecx # 4-byte Reload - adcl -16(%ebp), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - adcl %edi, %esi - movl %esi, -128(%ebp) # 4-byte Spill - movl -68(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - movl -64(%ebp), %edi - sbbl 4(%esi), %edi - movl -60(%ebp), %edx - sbbl 8(%esi), %edx - movl %edx, -160(%ebp) # 4-byte Spill - movl -56(%ebp), %edx - sbbl 12(%esi), %edx - movl %edx, -168(%ebp) # 4-byte Spill - movl -52(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -48(%ebp), %ecx - sbbl 20(%esi), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -44(%ebp), %edx - sbbl 24(%esi), %edx - movl %edx, -164(%ebp) # 4-byte Spill - movl 28(%esi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - sbbl %edx, -132(%ebp) # 4-byte Folded Spill - movl 32(%esi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl 36(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl -128(%ebp), %ecx # 4-byte Reload - sbbl $0, %ecx - movl 56(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - movl -204(%ebp), %edx # 4-byte Reload - subl %eax, %edx - movl 60(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 64(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 72(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 76(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl 80(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 84(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - sbbl $0, %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - movl %edx, %eax - addl -176(%ebp), %eax # 4-byte Folded Reload - adcl -180(%ebp), %edi # 4-byte Folded Reload - movl %eax, 28(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -184(%ebp), %eax # 4-byte Folded Reload - movl %edi, 32(%esi) - movl -168(%ebp), %ecx # 4-byte Reload - adcl -188(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - adcl -192(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl -172(%ebp), %eax # 4-byte Reload - adcl -196(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 44(%esi) - movl -164(%ebp), %ecx # 4-byte Reload - adcl -200(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -132(%ebp), %eax # 4-byte Reload - adcl -228(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -156(%ebp), %edx # 4-byte Reload - adcl -232(%ebp), %edx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -236(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 60(%esi) - movl -140(%ebp), %eax # 4-byte Reload - adcl -240(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 64(%esi) - movl -144(%ebp), %ecx # 4-byte Reload - adcl -244(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 68(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -248(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 72(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -252(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 76(%esi) - movl -128(%ebp), %eax # 4-byte Reload - adcl -256(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 80(%esi) - movl %eax, 84(%esi) - movl -204(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 88(%esi) - movl -208(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 92(%esi) - movl -212(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 96(%esi) - movl -216(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -220(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -224(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - addl $268, %esp # imm = 0x10C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end209: - .size mcl_fpDbl_sqrPre14Lbmi2, .Lfunc_end209-mcl_fpDbl_sqrPre14Lbmi2 - - .globl mcl_fp_mont14Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont14Lbmi2,@function -mcl_fp_mont14Lbmi2: # @mcl_fp_mont14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1900, %esp # imm = 0x76C - calll .L210$pb -.L210$pb: - popl %ebx -.Ltmp41: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx - movl 1932(%esp), %eax - movl -4(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1840(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 1840(%esp), %edi - movl 1844(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1896(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 1892(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 1888(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 1884(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1880(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1876(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1868(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1864(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1860(%esp), %esi - movl 1856(%esp), %ebp - movl 1852(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1848(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1776(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - addl 1776(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1792(%esp), %ebp - adcl 1796(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1804(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 1928(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1712(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %edx - movl 100(%esp), %ecx # 4-byte Reload - addl 1712(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1724(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 1728(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1732(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1768(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1648(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 100(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1648(%esp), %ebp - movl 84(%esp), %ecx # 4-byte Reload - adcl 1652(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1660(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1664(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1668(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1672(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 1676(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1692(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1704(%esp), %esi - adcl $0, %eax - movl %eax, %edi - movl 1928(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1584(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1584(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1592(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1604(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1608(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1636(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl 1640(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1520(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1520(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 1544(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 1564(%esp), %ebp - movl 108(%esp), %esi # 4-byte Reload - adcl 1568(%esp), %esi - movl 92(%esp), %edi # 4-byte Reload - adcl 1572(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1456(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1456(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1492(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1496(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 1500(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl 1504(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1512(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1392(%esp), %ecx - movl 1932(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %eax - addl 1392(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1396(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1400(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1408(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1412(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1416(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1420(%esp), %esi - movl 88(%esp), %ebp # 4-byte Reload - adcl 1424(%esp), %ebp - movl 96(%esp), %edi # 4-byte Reload - adcl 1428(%esp), %edi - movl 104(%esp), %ecx # 4-byte Reload - adcl 1432(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1328(%esp), %ecx - movl 1924(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 1328(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1356(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl 1360(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1384(%esp), %edi - sbbl %esi, %esi - movl %ecx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1264(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1284(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1316(%esp), %esi - adcl 1320(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 68(%esp), %eax # 4-byte Reload - addl 1200(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - adcl 1204(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1212(%esp), %edi - adcl 1216(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1244(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1248(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1252(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1136(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1144(%esp), %ebp - adcl 1148(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1172(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1180(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 60(%esp), %eax # 4-byte Reload - addl 1072(%esp), %eax - adcl 1076(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1096(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1108(%esp), %ebp - adcl 1112(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1124(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1128(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1008(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1008(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1020(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1036(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1044(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1052(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 52(%esp), %eax # 4-byte Reload - addl 944(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 952(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 960(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 964(%esp), %esi - adcl 968(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 972(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 976(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 980(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 984(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 988(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 992(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %eax, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %ebp - addl 880(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 896(%esp), %edi - adcl 900(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 924(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 816(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 824(%esp), %ebp - adcl 828(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 856(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - sbbl %eax, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 56(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 752(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 760(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 764(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 768(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 772(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 776(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 784(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 792(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 796(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 800(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 808(%esp), %esi - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 688(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 728(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 732(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 740(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl %edi, %ecx - andl $1, %ecx - addl 624(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 636(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 648(%esp), %esi - movl 100(%esp), %edi # 4-byte Reload - adcl 652(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 560(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 568(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 580(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl 584(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 592(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %ecx - addl 496(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 520(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 528(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 540(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 440(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 444(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 452(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 368(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 376(%esp), %esi - adcl 380(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 304(%esp), %ecx - adcl 308(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl 312(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 324(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 240(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 96(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 240(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 248(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 252(%esp), %edi - movl 100(%esp), %ebp # 4-byte Reload - adcl 256(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 264(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 268(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 104(%esp), %ecx # 4-byte Reload - addl 176(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 184(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 192(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 200(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 48(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %ebp - addl 112(%esp), %esi - movl 100(%esp), %esi # 4-byte Reload - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 124(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl %ecx, %ebx - movl 76(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 168(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl $0, %ebp - movl 1932(%esp), %ecx - subl (%ecx), %eax - sbbl 4(%ecx), %edx - sbbl 8(%ecx), %esi - sbbl 12(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 16(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 20(%ecx), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 24(%ecx), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 28(%ecx), %ebx - movl 52(%esp), %edi # 4-byte Reload - sbbl 32(%ecx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 36(%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ecx), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 44(%ecx), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - sbbl 48(%ecx), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - sbbl 52(%ecx), %edi - movl %ebp, %ecx - movl %edi, 104(%esp) # 4-byte Spill - sbbl $0, %ecx - andl $1, %ecx - jne .LBB210_2 -# BB#1: - movl %ebx, 60(%esp) # 4-byte Spill -.LBB210_2: - testb %cl, %cl - movl 108(%esp), %ebx # 4-byte Reload - jne .LBB210_4 -# BB#3: - movl %eax, %ebx -.LBB210_4: - movl 1920(%esp), %eax - movl %ebx, (%eax) - movl 92(%esp), %edi # 4-byte Reload - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB210_6 -# BB#5: - movl %edx, %edi -.LBB210_6: - movl %edi, 4(%eax) - jne .LBB210_8 -# BB#7: - movl %esi, 100(%esp) # 4-byte Spill -.LBB210_8: - movl 100(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - jne .LBB210_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 84(%esp) # 4-byte Spill -.LBB210_10: - movl 84(%esp), %edx # 4-byte Reload - movl %edx, 12(%eax) - jne .LBB210_12 -# BB#11: - movl 20(%esp), %ecx # 4-byte Reload -.LBB210_12: - movl %ecx, 16(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB210_14 -# BB#13: - movl 24(%esp), %ecx # 4-byte Reload -.LBB210_14: - movl %ecx, 20(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB210_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB210_16: - movl %ecx, 24(%eax) - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 52(%esp), %ecx # 4-byte Reload - jne .LBB210_18 -# BB#17: - movl 32(%esp), %ecx # 4-byte Reload -.LBB210_18: - movl %ecx, 32(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB210_20 -# BB#19: - movl 36(%esp), %ecx # 4-byte Reload -.LBB210_20: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB210_22 -# BB#21: - movl 40(%esp), %ecx # 4-byte Reload -.LBB210_22: - movl %ecx, 40(%eax) - movl 80(%esp), %ecx # 4-byte Reload - jne .LBB210_24 -# BB#23: - movl 44(%esp), %ecx # 4-byte Reload -.LBB210_24: - movl %ecx, 44(%eax) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB210_26 -# BB#25: - movl 48(%esp), %ecx # 4-byte Reload -.LBB210_26: - movl %ecx, 48(%eax) - movl 96(%esp), %ecx # 4-byte Reload - jne .LBB210_28 -# BB#27: - movl 104(%esp), %ecx # 4-byte Reload -.LBB210_28: - movl %ecx, 52(%eax) - addl $1900, %esp # imm = 0x76C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end210: - .size mcl_fp_mont14Lbmi2, .Lfunc_end210-mcl_fp_mont14Lbmi2 - - .globl mcl_fp_montNF14Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF14Lbmi2,@function -mcl_fp_montNF14Lbmi2: # @mcl_fp_montNF14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1884, %esp # imm = 0x75C - calll .L211$pb -.L211$pb: - popl %ebx -.Ltmp42: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx - movl 1916(%esp), %eax - movl -4(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1824(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1824(%esp), %edi - movl 1828(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1880(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1876(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1872(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1868(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1864(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1860(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1856(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1852(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1848(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1844(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1840(%esp), %esi - movl 1836(%esp), %ebp - movl 1832(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1760(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1760(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1768(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1772(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 1776(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1804(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1808(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1816(%esp), %ebp - movl 1912(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1696(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1752(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1696(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1704(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1708(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1712(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1720(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1736(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1740(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - adcl 1748(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1632(%esp), %ecx - movl 1916(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - addl 1632(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 1664(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1688(%esp), %ebp - movl 1912(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1568(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1624(%esp), %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1568(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - adcl 1572(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1576(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1588(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1596(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 1612(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 1616(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1620(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl %edx, %esi - movl %esi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1504(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1504(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1512(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1544(%esp), %esi - adcl 1548(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1560(%esp), %ebp - movl 1912(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1440(%esp), %ecx - movl 1908(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - movl 1496(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - addl 1440(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1464(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 1468(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1472(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1476(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1480(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1484(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1488(%esp), %esi - adcl 1492(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1376(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1400(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1424(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1312(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1368(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1312(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 1328(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1360(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1248(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1248(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1272(%esp), %ebp - adcl 1276(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1284(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1300(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1184(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1240(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 1184(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1204(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1216(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1232(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1120(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1120(%esp), %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl 1124(%esp), %ebp - movl 44(%esp), %edi # 4-byte Reload - adcl 1128(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1156(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1056(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1112(%esp), %eax - movl %ebp, %ecx - addl 1056(%esp), %ecx - adcl 1060(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 1064(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1068(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1072(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1076(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1080(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1084(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl 1088(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1092(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1096(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1100(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1104(%esp), %ebp - movl 60(%esp), %edx # 4-byte Reload - adcl 1108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %ecx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 992(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1008(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1040(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1044(%esp), %ebp - adcl 1048(%esp), %esi - movl 1912(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 984(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 48(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 940(%esp), %edi - movl 84(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 968(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 976(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 980(%esp), %esi - movl %esi, %ebp - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 864(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 864(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 876(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 884(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 916(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 920(%esp), %ebp - movl 1912(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 800(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 856(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 800(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 808(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 816(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 828(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 852(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 736(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 736(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 764(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 72(%esp), %esi # 4-byte Reload - adcl 772(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 780(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 728(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 672(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 700(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 704(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 608(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 616(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 624(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 644(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 600(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 544(%esp), %ecx - adcl 548(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 556(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 568(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 576(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 480(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 488(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 496(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 504(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 472(%esp), %edx - movl 84(%esp), %ecx # 4-byte Reload - addl 416(%esp), %ecx - adcl 420(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 424(%esp), %edi - adcl 428(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 464(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 352(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 360(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 364(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 388(%esp), %edi - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 288(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 344(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 288(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 296(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 320(%esp), %edi - adcl 324(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 328(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 224(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 232(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 256(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 260(%esp), %edi - adcl 264(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 216(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 160(%esp), %ecx - adcl 164(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 168(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 192(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 96(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 96(%esp), %esi - movl 64(%esp), %esi # 4-byte Reload - movl 92(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 104(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl %ebp, %ebx - adcl 108(%esp), %esi - adcl 112(%esp), %edi - movl 68(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, %edx - movl 1916(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %ebx - movl %esi, %eax - sbbl 8(%ebp), %eax - movl %edi, %ecx - sbbl 12(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 40(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 44(%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 48(%ebp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 52(%ebp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - sarl $31, %ecx - testl %ecx, %ecx - movl 92(%esp), %ebp # 4-byte Reload - js .LBB211_2 -# BB#1: - movl %edx, %ebp -.LBB211_2: - movl 1904(%esp), %edx - movl %ebp, (%edx) - movl 88(%esp), %ebp # 4-byte Reload - js .LBB211_4 -# BB#3: - movl %ebx, %ebp -.LBB211_4: - movl %ebp, 4(%edx) - js .LBB211_6 -# BB#5: - movl %eax, %esi -.LBB211_6: - movl %esi, 8(%edx) - js .LBB211_8 -# BB#7: - movl 4(%esp), %edi # 4-byte Reload -.LBB211_8: - movl %edi, 12(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB211_10 -# BB#9: - movl 8(%esp), %eax # 4-byte Reload -.LBB211_10: - movl %eax, 16(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB211_12 -# BB#11: - movl 12(%esp), %eax # 4-byte Reload -.LBB211_12: - movl %eax, 20(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB211_14 -# BB#13: - movl 16(%esp), %eax # 4-byte Reload -.LBB211_14: - movl %eax, 24(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB211_16 -# BB#15: - movl 20(%esp), %eax # 4-byte Reload -.LBB211_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB211_18 -# BB#17: - movl 24(%esp), %eax # 4-byte Reload -.LBB211_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB211_20 -# BB#19: - movl 28(%esp), %eax # 4-byte Reload -.LBB211_20: - movl %eax, 36(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB211_22 -# BB#21: - movl 32(%esp), %eax # 4-byte Reload -.LBB211_22: - movl %eax, 40(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB211_24 -# BB#23: - movl 36(%esp), %eax # 4-byte Reload -.LBB211_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB211_26 -# BB#25: - movl 64(%esp), %eax # 4-byte Reload -.LBB211_26: - movl %eax, 48(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB211_28 -# BB#27: - movl 72(%esp), %eax # 4-byte Reload -.LBB211_28: - movl %eax, 52(%edx) - addl $1884, %esp # imm = 0x75C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end211: - .size mcl_fp_montNF14Lbmi2, .Lfunc_end211-mcl_fp_montNF14Lbmi2 - - .globl mcl_fp_montRed14Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed14Lbmi2,@function -mcl_fp_montRed14Lbmi2: # @mcl_fp_montRed14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1068, %esp # imm = 0x42C - calll .L212$pb -.L212$pb: - popl %eax -.Ltmp43: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1096(%esp), %edx - movl -4(%edx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1092(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 92(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 96(%esp) # 4-byte Spill - imull %eax, %ebx - movl 108(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 164(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 52(%ecx), %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 40(%ecx), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 36(%ecx), %ebp - movl 32(%ecx), %edi - movl 28(%ecx), %esi - movl 24(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1008(%esp), %ecx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - movl 92(%esp), %eax # 4-byte Reload - addl 1008(%esp), %eax - movl 96(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1036(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1040(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 1044(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 1052(%esp), %ebp - movl 132(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - movl %edi, %ecx - andl $1, %ecx - addl 944(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 976(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 984(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %esi # 4-byte Reload - adcl 1000(%esp), %esi - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %ebp - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 880(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 884(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 908(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 920(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 932(%esp), %esi - movl %esi, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 816(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 820(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 104(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 752(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 688(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 140(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 624(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 628(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 168(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 140(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 560(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 164(%esp) # 4-byte Spill - movl 168(%esp), %edi # 4-byte Reload - adcl 600(%esp), %edi - movl 136(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 120(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1096(%esp), %eax - movl %eax, %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 496(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 172(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl 532(%esp), %edi - movl %edi, 168(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 432(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl 112(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - adcl 448(%esp), %ebp - movl %ebp, 144(%esp) # 4-byte Spill - movl 172(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 172(%esp) # 4-byte Spill - movl 160(%esp), %ebp # 4-byte Reload - adcl 456(%esp), %ebp - movl 164(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 464(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - adcl 468(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %eax, %esi - movl 88(%esp), %edi # 4-byte Reload - imull %edi, %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 368(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl 132(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 172(%esp), %esi # 4-byte Reload - adcl 384(%esp), %esi - adcl 388(%esp), %ebp - movl %ebp, 160(%esp) # 4-byte Spill - movl 164(%esp), %ecx # 4-byte Reload - adcl 392(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 404(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 408(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %eax, %ebp - imull %edi, %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 304(%esp), %ebp - movl 132(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 316(%esp), %ebp - movl 160(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 148(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 240(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 240(%esp), %edi - movl 144(%esp), %ecx # 4-byte Reload - adcl 244(%esp), %ecx - adcl 248(%esp), %ebp - movl %ebp, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl 264(%esp), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - adcl 268(%esp), %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 280(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 176(%esp), %esi - movl 172(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %ebx # 4-byte Reload - adcl 188(%esp), %ebx - movl %ebx, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - adcl 196(%esp), %edx - movl %edx, 136(%esp) # 4-byte Spill - movl %edi, %eax - adcl 200(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl 212(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 232(%esp), %ecx - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 172(%esp), %edi # 4-byte Reload - subl 16(%esp), %edi # 4-byte Folded Reload - movl 160(%esp), %ebp # 4-byte Reload - sbbl 8(%esp), %ebp # 4-byte Folded Reload - sbbl 12(%esp), %ebx # 4-byte Folded Reload - movl 168(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - sbbl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl 148(%esp), %edx # 4-byte Reload - sbbl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl 156(%esp), %edx # 4-byte Reload - sbbl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl 152(%esp), %edx # 4-byte Reload - sbbl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - movl 124(%esp), %edx # 4-byte Reload - sbbl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 96(%esp) # 4-byte Spill - movl 140(%esp), %edx # 4-byte Reload - sbbl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl 128(%esp), %edx # 4-byte Reload - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 112(%esp) # 4-byte Spill - movl 120(%esp), %edx # 4-byte Reload - sbbl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 116(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - sbbl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 132(%esp) # 4-byte Spill - movl %ecx, %edx - sbbl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 144(%esp) # 4-byte Spill - sbbl $0, %esi - andl $1, %esi - jne .LBB212_2 -# BB#1: - movl %eax, 168(%esp) # 4-byte Spill -.LBB212_2: - movl %esi, %edx - testb %dl, %dl - movl 172(%esp), %eax # 4-byte Reload - jne .LBB212_4 -# BB#3: - movl %edi, %eax -.LBB212_4: - movl 1088(%esp), %edi - movl %eax, (%edi) - movl %ecx, 104(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - movl 160(%esp), %ecx # 4-byte Reload - jne .LBB212_6 -# BB#5: - movl %ebp, %ecx -.LBB212_6: - movl %ecx, 4(%edi) - movl 108(%esp), %ecx # 4-byte Reload - movl 164(%esp), %ebp # 4-byte Reload - jne .LBB212_8 -# BB#7: - movl %ebx, %ebp -.LBB212_8: - movl %ebp, 8(%edi) - movl 168(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edi) - movl 124(%esp), %ebp # 4-byte Reload - movl 136(%esp), %ebx # 4-byte Reload - jne .LBB212_10 -# BB#9: - movl 80(%esp), %ebx # 4-byte Reload -.LBB212_10: - movl %ebx, 16(%edi) - movl 140(%esp), %ebx # 4-byte Reload - movl 148(%esp), %esi # 4-byte Reload - jne .LBB212_12 -# BB#11: - movl 84(%esp), %esi # 4-byte Reload -.LBB212_12: - movl %esi, 20(%edi) - movl 128(%esp), %esi # 4-byte Reload - jne .LBB212_14 -# BB#13: - movl 88(%esp), %eax # 4-byte Reload -.LBB212_14: - movl %eax, 24(%edi) - movl 120(%esp), %edx # 4-byte Reload - jne .LBB212_16 -# BB#15: - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 152(%esp) # 4-byte Spill -.LBB212_16: - movl 152(%esp), %eax # 4-byte Reload - movl %eax, 28(%edi) - jne .LBB212_18 -# BB#17: - movl 96(%esp), %ebp # 4-byte Reload -.LBB212_18: - movl %ebp, 32(%edi) - jne .LBB212_20 -# BB#19: - movl 100(%esp), %ebx # 4-byte Reload -.LBB212_20: - movl %ebx, 36(%edi) - jne .LBB212_22 -# BB#21: - movl 112(%esp), %esi # 4-byte Reload -.LBB212_22: - movl %esi, 40(%edi) - jne .LBB212_24 -# BB#23: - movl 116(%esp), %edx # 4-byte Reload -.LBB212_24: - movl %edx, 44(%edi) - jne .LBB212_26 -# BB#25: - movl 132(%esp), %ecx # 4-byte Reload -.LBB212_26: - movl %ecx, 48(%edi) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB212_28 -# BB#27: - movl 144(%esp), %eax # 4-byte Reload -.LBB212_28: - movl %eax, 52(%edi) - addl $1068, %esp # imm = 0x42C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end212: - .size mcl_fp_montRed14Lbmi2, .Lfunc_end212-mcl_fp_montRed14Lbmi2 - - .globl mcl_fp_addPre14Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre14Lbmi2,@function -mcl_fp_addPre14Lbmi2: # @mcl_fp_addPre14Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl 44(%eax), %edi - movl %edx, 36(%ebx) - movl 44(%ecx), %edx - adcl %edi, %edx - movl 48(%eax), %edi - movl %esi, 40(%ebx) - movl 48(%ecx), %esi - adcl %edi, %esi - movl %edx, 44(%ebx) - movl %esi, 48(%ebx) - movl 52(%eax), %eax - movl 52(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 52(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end213: - .size mcl_fp_addPre14Lbmi2, .Lfunc_end213-mcl_fp_addPre14Lbmi2 - - .globl mcl_fp_subPre14Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre14Lbmi2,@function -mcl_fp_subPre14Lbmi2: # @mcl_fp_subPre14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ebp) - movl 44(%ecx), %esi - sbbl %ebx, %esi - movl 48(%edx), %ebx - movl %edi, 40(%ebp) - movl 48(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 44(%ebp) - movl %edi, 48(%ebp) - movl 52(%edx), %edx - movl 52(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 52(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end214: - .size mcl_fp_subPre14Lbmi2, .Lfunc_end214-mcl_fp_subPre14Lbmi2 - - .globl mcl_fp_shr1_14Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_14Lbmi2,@function -mcl_fp_shr1_14Lbmi2: # @mcl_fp_shr1_14Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 48(%ecx) - shrl %eax - movl %eax, 52(%ecx) - popl %esi - retl -.Lfunc_end215: - .size mcl_fp_shr1_14Lbmi2, .Lfunc_end215-mcl_fp_shr1_14Lbmi2 - - .globl mcl_fp_add14Lbmi2 - .align 16, 0x90 - .type mcl_fp_add14Lbmi2,@function -mcl_fp_add14Lbmi2: # @mcl_fp_add14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 72(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 68(%esp), %ebp - addl (%ebp), %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 4(%ebp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 8(%eax), %ecx - adcl 8(%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 12(%ebp), %edx - movl 16(%ebp), %ecx - adcl 12(%eax), %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 16(%eax), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%ebp), %ecx - adcl 20(%eax), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 24(%ebp), %ecx - adcl 24(%eax), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 28(%ebp), %ecx - adcl 28(%eax), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 32(%ebp), %ecx - adcl 32(%eax), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 36(%ebp), %ecx - adcl 36(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 40(%ebp), %edx - adcl 40(%eax), %edx - movl %edx, (%esp) # 4-byte Spill - movl 44(%ebp), %ebx - adcl 44(%eax), %ebx - movl 48(%ebp), %esi - adcl 48(%eax), %esi - movl 52(%ebp), %edi - adcl 52(%eax), %edi - movl 64(%esp), %eax - movl 4(%esp), %ebp # 4-byte Reload - movl %ebp, (%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl %edx, 40(%eax) - movl %ebx, 44(%eax) - movl %esi, 48(%eax) - movl %edi, 52(%eax) - sbbl %ecx, %ecx - andl $1, %ecx - movl 76(%esp), %edx - subl (%edx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - sbbl 4(%edx), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - sbbl 8(%edx), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - sbbl 12(%edx), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - sbbl 16(%edx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 24(%esp), %ebp # 4-byte Reload - sbbl 20(%edx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 20(%esp), %ebp # 4-byte Reload - sbbl 24(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - sbbl 28(%edx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - sbbl 32(%edx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebp # 4-byte Reload - sbbl 36(%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl (%esp), %ebp # 4-byte Reload - sbbl 40(%edx), %ebp - sbbl 44(%edx), %ebx - sbbl 48(%edx), %esi - sbbl 52(%edx), %edi - sbbl $0, %ecx - testb $1, %cl - jne .LBB216_2 -# BB#1: # %nocarry - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, (%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl %ebp, 40(%eax) - movl %ebx, 44(%eax) - movl %esi, 48(%eax) - movl %edi, 52(%eax) -.LBB216_2: # %carry - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end216: - .size mcl_fp_add14Lbmi2, .Lfunc_end216-mcl_fp_add14Lbmi2 - - .globl mcl_fp_addNF14Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF14Lbmi2,@function -mcl_fp_addNF14Lbmi2: # @mcl_fp_addNF14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $112, %esp - movl 140(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 136(%esp), %ecx - addl (%ecx), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 4(%ecx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 52(%eax), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 48(%eax), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 44(%eax), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 40(%eax), %ebp - movl 36(%eax), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 32(%eax), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 28(%eax), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 24(%eax), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %esi - movl 8(%eax), %edx - adcl 8(%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 12(%ecx), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 16(%ecx), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 20(%ecx), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 24(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 28(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 32(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 36(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 40(%ecx), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 44(%ecx), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 48(%ecx), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 52(%ecx), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 144(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - subl (%ecx), %eax - movl %eax, (%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 4(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - sbbl 12(%ecx), %esi - movl %esi, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - movl %edx, %eax - sbbl 24(%ecx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 28(%ecx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 32(%ecx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - sbbl 36(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 40(%ecx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - movl %eax, %esi - movl %eax, %ebp - sbbl 44(%ecx), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %esi - sbbl 48(%ecx), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - movl %eax, %edi - sbbl 52(%ecx), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl %edi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - movl 72(%esp), %ecx # 4-byte Reload - js .LBB217_2 -# BB#1: - movl (%esp), %ecx # 4-byte Reload -.LBB217_2: - movl 132(%esp), %edi - movl %ecx, (%edi) - movl 76(%esp), %eax # 4-byte Reload - js .LBB217_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB217_4: - movl %eax, 4(%edi) - movl %edx, %ecx - movl 64(%esp), %eax # 4-byte Reload - movl 56(%esp), %edx # 4-byte Reload - js .LBB217_6 -# BB#5: - movl 8(%esp), %edx # 4-byte Reload -.LBB217_6: - movl %edx, 8(%edi) - movl %ebp, %edx - movl 104(%esp), %ebx # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - js .LBB217_8 -# BB#7: - movl 12(%esp), %ebp # 4-byte Reload -.LBB217_8: - movl %ebp, 12(%edi) - movl 100(%esp), %ebp # 4-byte Reload - js .LBB217_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB217_10: - movl %eax, 16(%edi) - movl 80(%esp), %esi # 4-byte Reload - js .LBB217_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 68(%esp) # 4-byte Spill -.LBB217_12: - movl 68(%esp), %eax # 4-byte Reload - movl %eax, 20(%edi) - js .LBB217_14 -# BB#13: - movl 24(%esp), %ecx # 4-byte Reload -.LBB217_14: - movl %ecx, 24(%edi) - js .LBB217_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 108(%esp) # 4-byte Spill -.LBB217_16: - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%edi) - js .LBB217_18 -# BB#17: - movl 32(%esp), %ebp # 4-byte Reload -.LBB217_18: - movl %ebp, 32(%edi) - js .LBB217_20 -# BB#19: - movl 36(%esp), %ebx # 4-byte Reload -.LBB217_20: - movl %ebx, 36(%edi) - js .LBB217_22 -# BB#21: - movl 40(%esp), %esi # 4-byte Reload -.LBB217_22: - movl %esi, 40(%edi) - movl 96(%esp), %eax # 4-byte Reload - js .LBB217_24 -# BB#23: - movl 44(%esp), %edx # 4-byte Reload -.LBB217_24: - movl %edx, 44(%edi) - movl 92(%esp), %ecx # 4-byte Reload - js .LBB217_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB217_26: - movl %eax, 48(%edi) - js .LBB217_28 -# BB#27: - movl 52(%esp), %ecx # 4-byte Reload -.LBB217_28: - movl %ecx, 52(%edi) - addl $112, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end217: - .size mcl_fp_addNF14Lbmi2, .Lfunc_end217-mcl_fp_addNF14Lbmi2 - - .globl mcl_fp_sub14Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub14Lbmi2,@function -mcl_fp_sub14Lbmi2: # @mcl_fp_sub14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 76(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 80(%esp), %edi - subl (%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 36(%esi), %edx - sbbl 36(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%esi), %ecx - sbbl 40(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 44(%esi), %eax - sbbl 44(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 48(%esi), %ebp - sbbl 48(%edi), %ebp - movl 52(%esi), %esi - sbbl 52(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 72(%esp), %ebx - movl 44(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl %edx, 36(%ebx) - movl %ecx, 40(%ebx) - movl %eax, 44(%ebx) - movl %ebp, 48(%ebx) - movl %esi, 52(%ebx) - je .LBB218_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 84(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl %eax, 44(%ebx) - movl 48(%esi), %eax - adcl %ebp, %eax - movl %eax, 48(%ebx) - movl 52(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ebx) -.LBB218_2: # %nocarry - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end218: - .size mcl_fp_sub14Lbmi2, .Lfunc_end218-mcl_fp_sub14Lbmi2 - - .globl mcl_fp_subNF14Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF14Lbmi2,@function -mcl_fp_subNF14Lbmi2: # @mcl_fp_subNF14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 112(%esp), %ecx - movl 52(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 116(%esp), %edi - subl (%edi), %edx - movl %edx, 56(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - movl 28(%ecx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 44(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 28(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - sbbl 32(%edi), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %eax, %esi - sarl $31, %esi - movl %esi, %ecx - addl %ecx, %ecx - movl %esi, %ebp - adcl %ebp, %ebp - shrl $31, %eax - orl %ecx, %eax - movl 120(%esp), %edi - andl 4(%edi), %ebp - andl (%edi), %eax - movl 52(%edi), %ecx - andl %esi, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 48(%edi), %ecx - andl %esi, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%edi), %ecx - andl %esi, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 40(%edi), %ecx - andl %esi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 36(%edi), %ecx - andl %esi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 32(%edi), %ecx - andl %esi, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 28(%edi), %ecx - andl %esi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%edi), %ecx - andl %esi, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 20(%edi), %ebx - andl %esi, %ebx - movl 16(%edi), %edx - andl %esi, %edx - movl 12(%edi), %ecx - andl %esi, %ecx - andl 8(%edi), %esi - addl 56(%esp), %eax # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl 108(%esp), %edi - movl %eax, (%edi) - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %ebp, 4(%edi) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %esi, 8(%edi) - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 12(%edi) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %edx, 16(%edi) - movl (%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %ebx, 20(%edi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 24(%edi) - movl 8(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%edi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edi) - movl 16(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %eax, 40(%edi) - movl 24(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %ecx, 44(%edi) - movl %eax, 48(%edi) - movl 28(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%edi) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end219: - .size mcl_fp_subNF14Lbmi2, .Lfunc_end219-mcl_fp_subNF14Lbmi2 - - .globl mcl_fpDbl_add14Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add14Lbmi2,@function -mcl_fpDbl_add14Lbmi2: # @mcl_fpDbl_add14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 128(%esp), %ecx - movl 124(%esp), %esi - movl 12(%esi), %edi - movl 16(%esi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%esi), %ebp - movl 120(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%esi), %ebp - adcl 8(%esi), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 64(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%esi), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%esi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%esi), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%esi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%esi), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%esi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%esi), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%esi), %edx - adcl %ebx, %edx - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%esi), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %edx, 48(%eax) - movl 56(%esi), %edx - adcl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl %edi, 52(%eax) - movl 60(%esi), %eax - adcl %edx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 64(%esi), %eax - adcl %ebp, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl 68(%esi), %eax - adcl %edx, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%ecx), %edx - movl 72(%esi), %eax - adcl %edx, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 76(%ecx), %edx - movl 76(%esi), %eax - adcl %edx, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%ecx), %edx - movl 80(%esi), %eax - adcl %edx, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%ecx), %edx - movl 84(%esi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 88(%ecx), %edx - movl 88(%esi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 92(%ecx), %edx - movl 92(%esi), %eax - adcl %edx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 96(%ecx), %edx - movl 96(%esi), %eax - adcl %edx, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%ecx), %edx - movl 100(%esi), %edi - adcl %edx, %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 104(%ecx), %edx - movl 104(%esi), %ebx - adcl %edx, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 108(%ecx), %ecx - movl 108(%esi), %esi - adcl %ecx, %esi - sbbl %edx, %edx - andl $1, %edx - movl 132(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - sbbl 40(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 44(%ebp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl %ebx, %eax - movl %esi, %ebx - sbbl 48(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl 52(%ebp), %esi - sbbl $0, %edx - andl $1, %edx - jne .LBB220_2 -# BB#1: - movl %esi, %ebx -.LBB220_2: - testb %dl, %dl - movl 72(%esp), %eax # 4-byte Reload - movl 68(%esp), %edx # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB220_4 -# BB#3: - movl %ecx, %edx - movl (%esp), %edi # 4-byte Reload - movl 4(%esp), %ebp # 4-byte Reload - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload -.LBB220_4: - movl 120(%esp), %esi - movl %eax, 56(%esi) - movl 76(%esp), %eax # 4-byte Reload - movl %eax, 60(%esi) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 64(%esi) - movl 84(%esp), %eax # 4-byte Reload - movl %eax, 68(%esi) - movl 88(%esp), %eax # 4-byte Reload - movl %eax, 72(%esi) - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 76(%esi) - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 80(%esi) - movl %ebp, 84(%esi) - movl %edi, 88(%esi) - movl %edx, 92(%esi) - movl 52(%esp), %edx # 4-byte Reload - movl 48(%esp), %eax # 4-byte Reload - jne .LBB220_6 -# BB#5: - movl 36(%esp), %eax # 4-byte Reload -.LBB220_6: - movl %eax, 96(%esi) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB220_8 -# BB#7: - movl 40(%esp), %edx # 4-byte Reload -.LBB220_8: - movl %edx, 100(%esi) - jne .LBB220_10 -# BB#9: - movl 44(%esp), %ecx # 4-byte Reload -.LBB220_10: - movl %ecx, 104(%esi) - movl %ebx, 108(%esi) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end220: - .size mcl_fpDbl_add14Lbmi2, .Lfunc_end220-mcl_fpDbl_add14Lbmi2 - - .globl mcl_fpDbl_sub14Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub14Lbmi2,@function -mcl_fpDbl_sub14Lbmi2: # @mcl_fpDbl_sub14Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 120(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %edx - movl 124(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %edx - movl 8(%ebx), %esi - sbbl 8(%ebp), %esi - movl 116(%esp), %ecx - movl %eax, (%ecx) - movl 12(%ebx), %eax - sbbl 12(%ebp), %eax - movl %edx, 4(%ecx) - movl 16(%ebx), %edx - sbbl 16(%ebp), %edx - movl %esi, 8(%ecx) - movl 20(%ebp), %esi - movl %eax, 12(%ecx) - movl 20(%ebx), %eax - sbbl %esi, %eax - movl 24(%ebp), %esi - movl %edx, 16(%ecx) - movl 24(%ebx), %edx - sbbl %esi, %edx - movl 28(%ebp), %esi - movl %eax, 20(%ecx) - movl 28(%ebx), %eax - sbbl %esi, %eax - movl 32(%ebp), %esi - movl %edx, 24(%ecx) - movl 32(%ebx), %edx - sbbl %esi, %edx - movl 36(%ebp), %esi - movl %eax, 28(%ecx) - movl 36(%ebx), %eax - sbbl %esi, %eax - movl 40(%ebp), %esi - movl %edx, 32(%ecx) - movl 40(%ebx), %edx - sbbl %esi, %edx - movl 44(%ebp), %esi - movl %eax, 36(%ecx) - movl 44(%ebx), %eax - sbbl %esi, %eax - movl 48(%ebp), %esi - movl %edx, 40(%ecx) - movl 48(%ebx), %edx - sbbl %esi, %edx - movl 52(%ebp), %esi - movl %eax, 44(%ecx) - movl 52(%ebx), %eax - sbbl %esi, %eax - movl 56(%ebp), %esi - movl %edx, 48(%ecx) - movl 56(%ebx), %edx - sbbl %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 60(%ebp), %edx - movl %eax, 52(%ecx) - movl 60(%ebx), %eax - sbbl %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%ebx), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%ebx), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebp), %eax - movl 72(%ebx), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebp), %eax - movl 76(%ebx), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 80(%ebp), %eax - movl 80(%ebx), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 84(%ebp), %eax - movl 84(%ebx), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 88(%ebp), %eax - movl 88(%ebx), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 92(%ebp), %eax - movl 92(%ebx), %edx - sbbl %eax, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%ebp), %eax - movl 96(%ebx), %edx - sbbl %eax, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 100(%ebp), %eax - movl 100(%ebx), %edx - sbbl %eax, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 104(%ebp), %eax - movl 104(%ebx), %edx - sbbl %eax, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 108(%ebp), %eax - movl 108(%ebx), %edx - sbbl %eax, %edx - movl %edx, 92(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 128(%esp), %ebp - jne .LBB221_1 -# BB#2: - movl $0, 56(%esp) # 4-byte Folded Spill - jmp .LBB221_3 -.LBB221_1: - movl 52(%ebp), %edx - movl %edx, 56(%esp) # 4-byte Spill -.LBB221_3: - testb %al, %al - jne .LBB221_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB221_6 -.LBB221_4: - movl (%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB221_6: - jne .LBB221_7 -# BB#8: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB221_9 -.LBB221_7: - movl 48(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB221_9: - jne .LBB221_10 -# BB#11: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB221_12 -.LBB221_10: - movl 44(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB221_12: - jne .LBB221_13 -# BB#14: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB221_15 -.LBB221_13: - movl 40(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB221_15: - jne .LBB221_16 -# BB#17: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB221_18 -.LBB221_16: - movl 36(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB221_18: - jne .LBB221_19 -# BB#20: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB221_21 -.LBB221_19: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB221_21: - jne .LBB221_22 -# BB#23: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB221_24 -.LBB221_22: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB221_24: - jne .LBB221_25 -# BB#26: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB221_27 -.LBB221_25: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB221_27: - jne .LBB221_28 -# BB#29: - movl $0, %esi - jmp .LBB221_30 -.LBB221_28: - movl 20(%ebp), %esi -.LBB221_30: - jne .LBB221_31 -# BB#32: - movl $0, %edi - jmp .LBB221_33 -.LBB221_31: - movl 16(%ebp), %edi -.LBB221_33: - jne .LBB221_34 -# BB#35: - movl $0, %ebx - jmp .LBB221_36 -.LBB221_34: - movl 12(%ebp), %ebx -.LBB221_36: - jne .LBB221_37 -# BB#38: - xorl %ebp, %ebp - jmp .LBB221_39 -.LBB221_37: - movl 8(%ebp), %ebp -.LBB221_39: - movl 20(%esp), %edx # 4-byte Reload - addl 44(%esp), %edx # 4-byte Folded Reload - movl 24(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %edx, 56(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 60(%ecx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 64(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 68(%ecx) - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %edi, 72(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %esi, 76(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 28(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl %eax, 104(%ecx) - movl 56(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%ecx) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end221: - .size mcl_fpDbl_sub14Lbmi2, .Lfunc_end221-mcl_fpDbl_sub14Lbmi2 - - .align 16, 0x90 - .type .LmulPv480x32,@function -.LmulPv480x32: # @mulPv480x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl %edx, %eax - movl 72(%esp), %edi - movl %edi, %edx - mulxl 4(%eax), %ebx, %esi - movl %edi, %edx - mulxl (%eax), %ebp, %edx - movl %ebp, 48(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 8(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 12(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 16(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 20(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 24(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 28(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 32(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 36(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 40(%eax), %edx, %ebp - adcl %esi, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 44(%eax), %ebx, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl %ebp, %ebx - movl %edi, %edx - mulxl 48(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %edi, %edx - mulxl 52(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 36(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%ecx) - movl %ebx, 44(%ecx) - movl %esi, 48(%ecx) - movl %edx, 52(%ecx) - movl %edi, %edx - mulxl 56(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ecx) - adcl $0, %edx - movl %edx, 60(%ecx) - movl %ecx, %eax - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end222: - .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 - - .globl mcl_fp_mulUnitPre15Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre15Lbmi2,@function -mcl_fp_mulUnitPre15Lbmi2: # @mcl_fp_mulUnitPre15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - calll .L223$pb -.L223$pb: - popl %ebx -.Ltmp44: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx - movl 152(%esp), %eax - movl %eax, (%esp) - leal 56(%esp), %ecx - movl 148(%esp), %edx - calll .LmulPv480x32 - movl 116(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 76(%esp), %ebp - movl 72(%esp), %ebx - movl 68(%esp), %edi - movl 64(%esp), %esi - movl 56(%esp), %edx - movl 60(%esp), %ecx - movl 144(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end223: - .size mcl_fp_mulUnitPre15Lbmi2, .Lfunc_end223-mcl_fp_mulUnitPre15Lbmi2 - - .globl mcl_fpDbl_mulPre15Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre15Lbmi2,@function -mcl_fpDbl_mulPre15Lbmi2: # @mcl_fpDbl_mulPre15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1084, %esp # imm = 0x43C - calll .L224$pb -.L224$pb: - popl %esi -.Ltmp45: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 1108(%esp), %edi - movl %edi, %edx - movl %esi, %ebx - calll .LmulPv480x32 - movl 1076(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1068(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1016(%esp), %eax - movl 1020(%esp), %ebp - movl 1104(%esp), %ecx - movl %eax, (%ecx) - movl 1112(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl %edi, %edx - movl %esi, %ebx - calll .LmulPv480x32 - addl 952(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1008(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1004(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 972(%esp), %edi - movl 968(%esp), %esi - movl 964(%esp), %edx - movl 956(%esp), %eax - movl 960(%esp), %ecx - movl 1104(%esp), %ebp - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 72(%esp), %eax # 4-byte Reload - addl 888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 948(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 944(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 940(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 936(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 932(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 924(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 916(%esp), %ebx - movl 912(%esp), %edi - movl 908(%esp), %esi - movl 904(%esp), %edx - movl 900(%esp), %ecx - movl 892(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 72(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 112(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 824(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 876(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 868(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 852(%esp), %ebx - movl 848(%esp), %edi - movl 844(%esp), %esi - movl 840(%esp), %edx - movl 836(%esp), %ecx - movl 828(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 788(%esp), %ebx - movl 784(%esp), %edi - movl 780(%esp), %esi - movl 776(%esp), %edx - movl 772(%esp), %ecx - movl 764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 732(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 724(%esp), %ebx - movl 720(%esp), %edi - movl 716(%esp), %esi - movl 712(%esp), %edx - movl 708(%esp), %ecx - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %edx - movl 644(%esp), %ecx - movl 636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 568(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 596(%esp), %ebx - movl 592(%esp), %edi - movl 588(%esp), %esi - movl 584(%esp), %edx - movl 580(%esp), %ecx - movl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 532(%esp), %ebx - movl 528(%esp), %edi - movl 524(%esp), %esi - movl 520(%esp), %edx - movl 516(%esp), %ecx - movl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 440(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 468(%esp), %ebx - movl 464(%esp), %edi - movl 460(%esp), %esi - movl 456(%esp), %edx - movl 452(%esp), %ecx - movl 444(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %edi - movl 396(%esp), %esi - movl 392(%esp), %edx - movl 388(%esp), %ecx - movl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 340(%esp), %ebx - movl 336(%esp), %edi - movl 332(%esp), %esi - movl 328(%esp), %edx - movl 324(%esp), %ecx - movl 316(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 36(%esp), %eax # 4-byte Reload - adcl %eax, 108(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1108(%esp), %eax - movl %eax, %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 108(%esp), %eax # 4-byte Reload - addl 248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 288(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 276(%esp), %ebx - movl 272(%esp), %edi - movl 268(%esp), %edx - movl 264(%esp), %ecx - movl 260(%esp), %eax - movl 252(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 256(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - movl 1104(%esp), %ebp - movl %esi, 48(%ebp) - movl 112(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 20(%esp), %esi # 4-byte Reload - adcl %esi, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 220(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 212(%esp), %ebx - movl 208(%esp), %edx - movl 204(%esp), %ecx - movl 200(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl 192(%esp), %esi - movl 112(%esp), %ebp # 4-byte Reload - movl 1104(%esp), %edi - movl %ebp, 52(%edi) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 64(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 148(%esp), %ebp - movl 144(%esp), %edi - movl 140(%esp), %esi - movl 136(%esp), %edx - movl 132(%esp), %ecx - movl 1104(%esp), %eax - movl 112(%esp), %ebx # 4-byte Reload - movl %ebx, 56(%eax) - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 60(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - movl %ebx, 64(%eax) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %edx, 72(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 76(%eax) - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %edi, 80(%eax) - movl 44(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ebp, 84(%eax) - movl 52(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl 68(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx # 4-byte Folded Reload - movl %ecx, 92(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 88(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl %ecx, 108(%eax) - movl 100(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 112(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 116(%eax) - addl $1084, %esp # imm = 0x43C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end224: - .size mcl_fpDbl_mulPre15Lbmi2, .Lfunc_end224-mcl_fpDbl_mulPre15Lbmi2 - - .globl mcl_fpDbl_sqrPre15Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre15Lbmi2,@function -mcl_fpDbl_sqrPre15Lbmi2: # @mcl_fpDbl_sqrPre15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1084, %esp # imm = 0x43C - calll .L225$pb -.L225$pb: - popl %ebx -.Ltmp46: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx - movl %ebx, 116(%esp) # 4-byte Spill - movl 1108(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv480x32 - movl 1076(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1068(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1016(%esp), %eax - movl 1020(%esp), %ebp - movl 1104(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl %esi, %ebx - calll .LmulPv480x32 - addl 952(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1008(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1004(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 972(%esp), %edi - movl 968(%esp), %esi - movl 964(%esp), %edx - movl 956(%esp), %eax - movl 960(%esp), %ecx - movl 1104(%esp), %ebp - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 72(%esp), %eax # 4-byte Reload - addl 888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 948(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 944(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 940(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 936(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 932(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 924(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 916(%esp), %ebx - movl 912(%esp), %edi - movl 908(%esp), %esi - movl 904(%esp), %edx - movl 900(%esp), %ecx - movl 892(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 72(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 112(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 824(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 876(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 868(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 852(%esp), %ebx - movl 848(%esp), %edi - movl 844(%esp), %esi - movl 840(%esp), %edx - movl 836(%esp), %ecx - movl 828(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 788(%esp), %ebx - movl 784(%esp), %edi - movl 780(%esp), %esi - movl 776(%esp), %edx - movl 772(%esp), %ecx - movl 764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 732(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 724(%esp), %ebx - movl 720(%esp), %edi - movl 716(%esp), %esi - movl 712(%esp), %edx - movl 708(%esp), %ecx - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %edx - movl 644(%esp), %ecx - movl 636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 568(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 596(%esp), %ebx - movl 592(%esp), %edi - movl 588(%esp), %esi - movl 584(%esp), %edx - movl 580(%esp), %ecx - movl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 532(%esp), %ebx - movl 528(%esp), %edi - movl 524(%esp), %esi - movl 520(%esp), %edx - movl 516(%esp), %ecx - movl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 440(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 468(%esp), %ebx - movl 464(%esp), %edi - movl 460(%esp), %esi - movl 456(%esp), %edx - movl 452(%esp), %ecx - movl 444(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %edi - movl 396(%esp), %esi - movl 392(%esp), %edx - movl 388(%esp), %ecx - movl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 340(%esp), %ebx - movl 336(%esp), %edi - movl 332(%esp), %esi - movl 328(%esp), %edx - movl 324(%esp), %ecx - movl 316(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 36(%esp), %eax # 4-byte Reload - adcl %eax, 108(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 108(%esp), %eax # 4-byte Reload - addl 248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 288(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 276(%esp), %ebx - movl 272(%esp), %edi - movl 268(%esp), %edx - movl 264(%esp), %ecx - movl 260(%esp), %eax - movl 252(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 256(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - movl 1104(%esp), %ebp - movl %esi, 48(%ebp) - movl 112(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 20(%esp), %esi # 4-byte Reload - adcl %esi, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 52(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 220(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 212(%esp), %ebx - movl 208(%esp), %edx - movl 204(%esp), %ecx - movl 200(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl 192(%esp), %esi - movl 112(%esp), %ebp # 4-byte Reload - movl 1104(%esp), %edi - movl %ebp, 52(%edi) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 64(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 56(%edx), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 148(%esp), %ebp - movl 144(%esp), %edi - movl 140(%esp), %esi - movl 136(%esp), %edx - movl 132(%esp), %ecx - movl 1104(%esp), %eax - movl 112(%esp), %ebx # 4-byte Reload - movl %ebx, 56(%eax) - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 60(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - movl %ebx, 64(%eax) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %edx, 72(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 76(%eax) - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %edi, 80(%eax) - movl 44(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ebp, 84(%eax) - movl 52(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl 68(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx # 4-byte Folded Reload - movl %ecx, 92(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 88(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl %ecx, 108(%eax) - movl 100(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 112(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 116(%eax) - addl $1084, %esp # imm = 0x43C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end225: - .size mcl_fpDbl_sqrPre15Lbmi2, .Lfunc_end225-mcl_fpDbl_sqrPre15Lbmi2 - - .globl mcl_fp_mont15Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont15Lbmi2,@function -mcl_fp_mont15Lbmi2: # @mcl_fp_mont15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2044, %esp # imm = 0x7FC - calll .L226$pb -.L226$pb: - popl %ebx -.Ltmp47: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx - movl 2076(%esp), %eax - movl -4(%eax), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1976(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 1976(%esp), %ebp - movl 1980(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2036(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2032(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2028(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 2024(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2020(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2016(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2012(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2008(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2004(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2000(%esp), %edi - movl 1996(%esp), %esi - movl 1992(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1988(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1984(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1912(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - addl 1912(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1916(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1932(%esp), %esi - adcl 1936(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1968(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1972(%esp), %ebp - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1848(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 116(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1848(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - adcl 1852(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1856(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1864(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1868(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1876(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1880(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1884(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1892(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1896(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1900(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1904(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - adcl 1908(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edx, %eax - movl %edx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1784(%esp), %ecx - movl 2076(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1784(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1812(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1836(%esp), %esi - movl 108(%esp), %ebp # 4-byte Reload - adcl 1840(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1720(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 1720(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1744(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1756(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 1768(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl 1772(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1780(%esp), %esi - sbbl %ebp, %ebp - movl %ecx, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1656(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - movl 96(%esp), %eax # 4-byte Reload - addl 1656(%esp), %eax - movl 84(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1688(%esp), %ebp - adcl 1692(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1712(%esp), %edi - adcl 1716(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1592(%esp), %ecx - movl 2068(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1592(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1620(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1628(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1644(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %ebp - movl %ebp, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1528(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 1528(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1544(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1564(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 1568(%esp), %edi - movl 104(%esp), %esi # 4-byte Reload - adcl 1572(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1580(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1464(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 92(%esp), %ecx # 4-byte Reload - addl 1464(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1476(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1484(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1496(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1500(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - adcl 1504(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 1512(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1400(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 92(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1400(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1408(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1412(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1420(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1424(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1428(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1432(%esp), %edi - movl 112(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1448(%esp), %esi - movl %esi, %ebp - movl 88(%esp), %esi # 4-byte Reload - adcl 1452(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1336(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1336(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1364(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1380(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - adcl 1384(%esp), %esi - movl %esi, %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1392(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 80(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1272(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1280(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1284(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 1320(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1328(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl 2072(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1208(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1232(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1244(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1268(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - movl 64(%esp), %eax # 4-byte Reload - addl 1144(%esp), %eax - movl 56(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1156(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1168(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1180(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1196(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 1080(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1092(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1128(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 1016(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1028(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1032(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1044(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1060(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 952(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 964(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 976(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 992(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl %ebp, %eax - andl $1, %eax - addl 888(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 892(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 896(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 904(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 908(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 912(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 916(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 920(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 924(%esp), %ebp - movl 96(%esp), %ecx # 4-byte Reload - adcl 928(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 936(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 944(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 824(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 832(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 856(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 864(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 872(%esp), %edi - adcl 876(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 68(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 760(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 776(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 800(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 804(%esp), %ebp - adcl 808(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 816(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 696(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 708(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 736(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 748(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 752(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 632(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 656(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 672(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 568(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 588(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 596(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 604(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 504(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 516(%esp), %edi - movl 108(%esp), %esi # 4-byte Reload - adcl 520(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 560(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 440(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 448(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl 452(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 460(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 492(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 376(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 388(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 396(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 336(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 348(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 352(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %esi - movl %esi, %ecx - addl 248(%esp), %edi - movl 104(%esp), %esi # 4-byte Reload - adcl 252(%esp), %esi - movl 108(%esp), %edi # 4-byte Reload - adcl 256(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 288(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl %esi, %ecx - movl 96(%esp), %esi # 4-byte Reload - addl 184(%esp), %ecx - adcl 188(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - adcl 200(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 104(%esp), %ebx # 4-byte Reload - andl $1, %ebx - addl 120(%esp), %edi - movl %ebp, %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 132(%esp), %edi - adcl 136(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 156(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 104(%esp) # 4-byte Spill - movl %eax, %edx - movl 2076(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %ecx - movl %edi, %eax - sbbl 8(%ebp), %eax - movl %esi, %ebx - sbbl 12(%ebp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 16(%ebp), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 20(%ebp), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - sbbl 24(%ebp), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 28(%ebp), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - sbbl 32(%ebp), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 36(%ebp), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - sbbl 40(%ebp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ebx # 4-byte Reload - sbbl 44(%ebp), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - sbbl 48(%ebp), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 52(%ebp), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 56(%ebp), %ebx - movl %ebx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB226_2 -# BB#1: - movl %edx, %ebp -.LBB226_2: - movl 2064(%esp), %edx - movl %ebp, (%edx) - testb %bl, %bl - movl 116(%esp), %ebp # 4-byte Reload - jne .LBB226_4 -# BB#3: - movl %ecx, %ebp -.LBB226_4: - movl %ebp, 4(%edx) - jne .LBB226_6 -# BB#5: - movl %eax, %edi -.LBB226_6: - movl %edi, 8(%edx) - jne .LBB226_8 -# BB#7: - movl 16(%esp), %esi # 4-byte Reload -.LBB226_8: - movl %esi, 12(%edx) - movl 84(%esp), %eax # 4-byte Reload - jne .LBB226_10 -# BB#9: - movl 20(%esp), %eax # 4-byte Reload -.LBB226_10: - movl %eax, 16(%edx) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB226_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB226_12: - movl %eax, 20(%edx) - movl 80(%esp), %eax # 4-byte Reload - jne .LBB226_14 -# BB#13: - movl 28(%esp), %eax # 4-byte Reload -.LBB226_14: - movl %eax, 24(%edx) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB226_16 -# BB#15: - movl 32(%esp), %eax # 4-byte Reload -.LBB226_16: - movl %eax, 28(%edx) - movl 56(%esp), %eax # 4-byte Reload - jne .LBB226_18 -# BB#17: - movl 36(%esp), %eax # 4-byte Reload -.LBB226_18: - movl %eax, 32(%edx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB226_20 -# BB#19: - movl 40(%esp), %eax # 4-byte Reload -.LBB226_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB226_22 -# BB#21: - movl 44(%esp), %eax # 4-byte Reload -.LBB226_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - jne .LBB226_24 -# BB#23: - movl 48(%esp), %eax # 4-byte Reload -.LBB226_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - jne .LBB226_26 -# BB#25: - movl 52(%esp), %eax # 4-byte Reload -.LBB226_26: - movl %eax, 48(%edx) - movl 100(%esp), %eax # 4-byte Reload - jne .LBB226_28 -# BB#27: - movl 88(%esp), %eax # 4-byte Reload -.LBB226_28: - movl %eax, 52(%edx) - movl 112(%esp), %eax # 4-byte Reload - jne .LBB226_30 -# BB#29: - movl 96(%esp), %eax # 4-byte Reload -.LBB226_30: - movl %eax, 56(%edx) - addl $2044, %esp # imm = 0x7FC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end226: - .size mcl_fp_mont15Lbmi2, .Lfunc_end226-mcl_fp_mont15Lbmi2 - - .globl mcl_fp_montNF15Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF15Lbmi2,@function -mcl_fp_montNF15Lbmi2: # @mcl_fp_montNF15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2028, %esp # imm = 0x7EC - calll .L227$pb -.L227$pb: - popl %ebx -.Ltmp48: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx - movl 2060(%esp), %eax - movl -4(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1960(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1960(%esp), %ebp - movl 1964(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2020(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2016(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2012(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2008(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 2004(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2000(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1996(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1992(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1988(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1984(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1980(%esp), %esi - movl 1976(%esp), %edi - movl 1972(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1968(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1896(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1896(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1904(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1908(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1912(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1916(%esp), %esi - movl %esi, %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1936(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1944(%esp), %ebp - movl 76(%esp), %esi # 4-byte Reload - adcl 1948(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1832(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1892(%esp), %eax - movl 92(%esp), %edx # 4-byte Reload - addl 1832(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1836(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1840(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1844(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1848(%esp), %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 1852(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1856(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1864(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1876(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 1880(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1884(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1768(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1768(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1784(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1804(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, %esi - adcl 1820(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1824(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1704(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1764(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1704(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - adcl 1708(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1712(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1720(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1736(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1740(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - adcl 1748(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1752(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1756(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 1760(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1640(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 1640(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, %esi - movl 96(%esp), %edi # 4-byte Reload - adcl 1688(%esp), %edi - adcl 1692(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1700(%esp), %ebp - movl 2056(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1576(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1636(%esp), %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1576(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1596(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1612(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1616(%esp), %esi - adcl 1620(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1624(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1628(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1632(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1512(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1512(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1532(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1548(%esp), %ebp - adcl 1552(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1448(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1508(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - addl 1448(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 1464(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1468(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1472(%esp), %edi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1476(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1480(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 1484(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1488(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1492(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1496(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1500(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 1504(%esp), %ebp - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1384(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1384(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1408(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1440(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1380(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - addl 1320(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 1324(%esp), %ebp - movl 44(%esp), %edi # 4-byte Reload - adcl 1328(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1368(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1256(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - movl 40(%esp), %eax # 4-byte Reload - addl 1256(%esp), %eax - adcl 1260(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1272(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1296(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1304(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1312(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1192(%esp), %ecx - movl 2052(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - movl 1252(%esp), %eax - movl 48(%esp), %edx # 4-byte Reload - addl 1192(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - adcl 1196(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1204(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1212(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 1216(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1228(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1244(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1248(%esp), %esi - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1128(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 1128(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1140(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1148(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1168(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1184(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 1188(%esp), %esi - movl 2056(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1124(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 1064(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1072(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1084(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1096(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1100(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1104(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1112(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1120(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %edx, %ebp - movl %ebp, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1000(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1020(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1028(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1060(%esp), %esi - movl 2056(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 996(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 936(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 944(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 952(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 960(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 964(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 968(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 972(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 976(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 980(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 984(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 988(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 992(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 872(%esp), %edi - movl 56(%esp), %ebp # 4-byte Reload - adcl 876(%esp), %ebp - movl 60(%esp), %edi # 4-byte Reload - adcl 880(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 932(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 808(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 868(%esp), %eax - movl %ebp, %ecx - addl 808(%esp), %ecx - adcl 812(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 816(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 820(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 824(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 828(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 832(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 836(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 840(%esp), %edi - movl 88(%esp), %esi # 4-byte Reload - adcl 844(%esp), %esi - movl 72(%esp), %edx # 4-byte Reload - adcl 848(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 852(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 856(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 860(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 864(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 744(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 776(%esp), %edi - adcl 780(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 792(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 680(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 740(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 680(%esp), %ecx - movl 84(%esp), %edx # 4-byte Reload - adcl 684(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 688(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 692(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 696(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - adcl 700(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 704(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 708(%esp), %edi - movl 88(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 716(%esp), %ebp - movl 64(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 724(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 728(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 732(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 616(%esp), %esi - movl 84(%esp), %esi # 4-byte Reload - adcl 620(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 644(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 648(%esp), %edi - adcl 652(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 612(%esp), %edx - movl %esi, %ecx - addl 552(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 580(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 588(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 488(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 508(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 512(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 528(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 484(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 424(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 440(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl 444(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %esi, %edi - adcl 460(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 360(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 368(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 376(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 396(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 400(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 296(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 356(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 296(%esp), %ecx - adcl 300(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 308(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 332(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 232(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 240(%esp), %ebp - movl 92(%esp), %esi # 4-byte Reload - adcl 244(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 272(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 276(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 228(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 168(%esp), %ecx - adcl 172(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl 176(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 188(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 208(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 104(%esp), %edi - movl 68(%esp), %edi # 4-byte Reload - movl 100(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 112(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl %ecx, %ebx - adcl 116(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 120(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 124(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 148(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl %eax, %edx - movl 2060(%esp), %ecx - subl (%ecx), %edx - movl %ebx, %ebp - sbbl 4(%ecx), %ebp - movl %edi, %ebx - sbbl 8(%ecx), %ebx - movl 88(%esp), %eax # 4-byte Reload - sbbl 12(%ecx), %eax - sbbl 16(%ecx), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 20(%ecx), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 24(%ecx), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 28(%ecx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 32(%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 36(%ecx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 40(%ecx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 44(%ecx), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 48(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 52(%ecx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - sbbl 56(%ecx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl %esi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - movl 100(%esp), %ecx # 4-byte Reload - js .LBB227_2 -# BB#1: - movl %edx, %ecx -.LBB227_2: - movl 2048(%esp), %edx - movl %ecx, (%edx) - movl 92(%esp), %esi # 4-byte Reload - js .LBB227_4 -# BB#3: - movl %ebp, %esi -.LBB227_4: - movl %esi, 4(%edx) - movl 88(%esp), %ecx # 4-byte Reload - js .LBB227_6 -# BB#5: - movl %ebx, %edi -.LBB227_6: - movl %edi, 8(%edx) - js .LBB227_8 -# BB#7: - movl %eax, %ecx -.LBB227_8: - movl %ecx, 12(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB227_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB227_10: - movl %eax, 16(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB227_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB227_12: - movl %eax, 20(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB227_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB227_14: - movl %eax, 24(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB227_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB227_16: - movl %eax, 28(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB227_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB227_18: - movl %eax, 32(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB227_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB227_20: - movl %eax, 36(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB227_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB227_22: - movl %eax, 40(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB227_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB227_24: - movl %eax, 44(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB227_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB227_26: - movl %eax, 48(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB227_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB227_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB227_30 -# BB#29: - movl 68(%esp), %eax # 4-byte Reload -.LBB227_30: - movl %eax, 56(%edx) - addl $2028, %esp # imm = 0x7EC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end227: - .size mcl_fp_montNF15Lbmi2, .Lfunc_end227-mcl_fp_montNF15Lbmi2 - - .globl mcl_fp_montRed15Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed15Lbmi2,@function -mcl_fp_montRed15Lbmi2: # @mcl_fp_montRed15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1148, %esp # imm = 0x47C - calll .L228$pb -.L228$pb: - popl %eax -.Ltmp49: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1176(%esp), %edx - movl -4(%edx), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 1172(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 84(%esp) # 4-byte Spill - imull %esi, %ebx - movl 116(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 164(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 176(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 44(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %edi - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1080(%esp), %ecx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 80(%esp), %eax # 4-byte Reload - addl 1080(%esp), %eax - movl 84(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - adcl 1088(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - adcl 1092(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1108(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - movl 148(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1016(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 1020(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 1060(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 144(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 148(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 952(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 952(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 992(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %ebp # 4-byte Reload - adcl 1004(%esp), %ebp - movl 180(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 160(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 888(%esp), %esi - movl 68(%esp), %esi # 4-byte Reload - adcl 892(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 160(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 104(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 824(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 828(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %esi - movl 76(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 696(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 636(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %ebp # 4-byte Reload - adcl 672(%esp), %ebp - movl 164(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - movl 168(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 568(%esp), %esi - movl 100(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl 604(%esp), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %edi # 4-byte Reload - adcl 616(%esp), %edi - movl 160(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1176(%esp), %eax - movl %eax, %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 508(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %esi # 4-byte Reload - adcl 524(%esp), %esi - movl 172(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 440(%esp), %edi - movl 124(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %edi # 4-byte Reload - adcl 452(%esp), %edi - adcl 456(%esp), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %esi # 4-byte Reload - adcl 464(%esp), %esi - movl 176(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl 120(%esp), %ebp # 4-byte Reload - adcl 380(%esp), %ebp - adcl 384(%esp), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %edi # 4-byte Reload - adcl 392(%esp), %edi - adcl 396(%esp), %esi - movl %esi, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %esi # 4-byte Reload - adcl 412(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 312(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl 156(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - adcl 324(%esp), %edi - movl %edi, 172(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 328(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 176(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 176(%esp) # 4-byte Spill - movl 164(%esp), %ecx # 4-byte Reload - adcl 336(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 340(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - adcl 344(%esp), %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 160(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 160(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 352(%esp), %ebp - movl 136(%esp), %ecx # 4-byte Reload - adcl 356(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 360(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 364(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %eax, %edi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 248(%esp), %edi - movl 156(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl 172(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl 284(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 184(%esp), %esi - movl 172(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 172(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 192(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - adcl 204(%esp), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 220(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 240(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %edx, %eax - subl 16(%esp), %edx # 4-byte Folded Reload - sbbl 4(%esp), %ecx # 4-byte Folded Reload - movl 176(%esp), %eax # 4-byte Reload - sbbl 8(%esp), %eax # 4-byte Folded Reload - movl 164(%esp), %ebp # 4-byte Reload - sbbl 12(%esp), %ebp # 4-byte Folded Reload - sbbl 20(%esp), %esi # 4-byte Folded Reload - movl 144(%esp), %edi # 4-byte Reload - sbbl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 84(%esp) # 4-byte Spill - movl 160(%esp), %edi # 4-byte Reload - sbbl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 88(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - sbbl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 92(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - sbbl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - movl 152(%esp), %edi # 4-byte Reload - sbbl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 100(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - sbbl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 112(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - sbbl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - sbbl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 124(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - sbbl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 140(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - sbbl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 156(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - movl %ebx, %edi - jne .LBB228_2 -# BB#1: - movl %edx, 172(%esp) # 4-byte Spill -.LBB228_2: - movl 1168(%esp), %edx - movl 172(%esp), %ebx # 4-byte Reload - movl %ebx, (%edx) - movl %edi, %ebx - testb %bl, %bl - jne .LBB228_4 -# BB#3: - movl %ecx, 180(%esp) # 4-byte Spill -.LBB228_4: - movl 180(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%edx) - movl 176(%esp), %ecx # 4-byte Reload - jne .LBB228_6 -# BB#5: - movl %eax, %ecx -.LBB228_6: - movl %ecx, 8(%edx) - movl 164(%esp), %eax # 4-byte Reload - jne .LBB228_8 -# BB#7: - movl %ebp, %eax -.LBB228_8: - movl %eax, 12(%edx) - movl 108(%esp), %ecx # 4-byte Reload - movl 148(%esp), %eax # 4-byte Reload - movl 168(%esp), %ebp # 4-byte Reload - jne .LBB228_10 -# BB#9: - movl %esi, %ebp -.LBB228_10: - movl %ebp, 16(%edx) - movl 152(%esp), %ebp # 4-byte Reload - movl 144(%esp), %ebx # 4-byte Reload - jne .LBB228_12 -# BB#11: - movl 84(%esp), %ebx # 4-byte Reload -.LBB228_12: - movl %ebx, 20(%edx) - movl 132(%esp), %ebx # 4-byte Reload - movl 160(%esp), %edi # 4-byte Reload - jne .LBB228_14 -# BB#13: - movl 88(%esp), %edi # 4-byte Reload -.LBB228_14: - movl %edi, 24(%edx) - movl 128(%esp), %edi # 4-byte Reload - jne .LBB228_16 -# BB#15: - movl 92(%esp), %eax # 4-byte Reload -.LBB228_16: - movl %eax, 28(%edx) - movl 116(%esp), %esi # 4-byte Reload - jne .LBB228_18 -# BB#17: - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 136(%esp) # 4-byte Spill -.LBB228_18: - movl 136(%esp), %eax # 4-byte Reload - movl %eax, 32(%edx) - jne .LBB228_20 -# BB#19: - movl 100(%esp), %ebp # 4-byte Reload -.LBB228_20: - movl %ebp, 36(%edx) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB228_22 -# BB#21: - movl 112(%esp), %ebx # 4-byte Reload -.LBB228_22: - movl %ebx, 40(%edx) - jne .LBB228_24 -# BB#23: - movl 120(%esp), %edi # 4-byte Reload -.LBB228_24: - movl %edi, 44(%edx) - jne .LBB228_26 -# BB#25: - movl 124(%esp), %esi # 4-byte Reload -.LBB228_26: - movl %esi, 48(%edx) - jne .LBB228_28 -# BB#27: - movl 140(%esp), %eax # 4-byte Reload -.LBB228_28: - movl %eax, 52(%edx) - jne .LBB228_30 -# BB#29: - movl 156(%esp), %ecx # 4-byte Reload -.LBB228_30: - movl %ecx, 56(%edx) - addl $1148, %esp # imm = 0x47C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end228: - .size mcl_fp_montRed15Lbmi2, .Lfunc_end228-mcl_fp_montRed15Lbmi2 - - .globl mcl_fp_addPre15Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre15Lbmi2,@function -mcl_fp_addPre15Lbmi2: # @mcl_fp_addPre15Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl %esi, 48(%edi) - movl %edx, 52(%edi) - movl 56(%eax), %eax - movl 56(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 56(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end229: - .size mcl_fp_addPre15Lbmi2, .Lfunc_end229-mcl_fp_addPre15Lbmi2 - - .globl mcl_fp_subPre15Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre15Lbmi2,@function -mcl_fp_subPre15Lbmi2: # @mcl_fp_subPre15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl %edi, 48(%ebx) - movl %esi, 52(%ebx) - movl 56(%edx), %edx - movl 56(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 56(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end230: - .size mcl_fp_subPre15Lbmi2, .Lfunc_end230-mcl_fp_subPre15Lbmi2 - - .globl mcl_fp_shr1_15Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_15Lbmi2,@function -mcl_fp_shr1_15Lbmi2: # @mcl_fp_shr1_15Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 52(%ecx) - shrl %eax - movl %eax, 56(%ecx) - popl %esi - retl -.Lfunc_end231: - .size mcl_fp_shr1_15Lbmi2, .Lfunc_end231-mcl_fp_shr1_15Lbmi2 - - .globl mcl_fp_add15Lbmi2 - .align 16, 0x90 - .type mcl_fp_add15Lbmi2,@function -mcl_fp_add15Lbmi2: # @mcl_fp_add15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl 76(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 72(%esp), %eax - addl (%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - adcl 4(%eax), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 8(%ecx), %edx - adcl 8(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 12(%eax), %esi - movl 16(%eax), %edx - adcl 12(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 16(%ecx), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 20(%eax), %edx - adcl 20(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%eax), %edx - adcl 24(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 28(%eax), %edx - adcl 28(%ecx), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 32(%eax), %edx - adcl 32(%ecx), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 36(%eax), %edx - adcl 36(%ecx), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%eax), %edx - adcl 40(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 44(%eax), %ebx - adcl 44(%ecx), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 48(%eax), %ebp - adcl 48(%ecx), %ebp - movl 52(%eax), %edi - adcl 52(%ecx), %edi - movl 56(%eax), %edx - adcl 56(%ecx), %edx - movl 68(%esp), %ecx - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ecx) - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 4(%ecx) - movl 40(%esp), %esi # 4-byte Reload - movl %esi, 8(%ecx) - movl 36(%esp), %esi # 4-byte Reload - movl %esi, 12(%ecx) - movl 32(%esp), %esi # 4-byte Reload - movl %esi, 16(%ecx) - movl 28(%esp), %esi # 4-byte Reload - movl %esi, 20(%ecx) - movl 24(%esp), %esi # 4-byte Reload - movl %esi, 24(%ecx) - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 28(%ecx) - movl 16(%esp), %esi # 4-byte Reload - movl %esi, 32(%ecx) - movl 12(%esp), %esi # 4-byte Reload - movl %esi, 36(%ecx) - movl 8(%esp), %esi # 4-byte Reload - movl %esi, 40(%ecx) - movl %ebx, 44(%ecx) - movl %ebp, 48(%ecx) - movl %edi, 52(%ecx) - movl %edx, 56(%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 80(%esp), %esi - subl (%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %eax - movl 40(%esp), %edx # 4-byte Reload - sbbl 8(%esi), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - sbbl 12(%esi), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - sbbl 16(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - sbbl 20(%esi), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - sbbl 24(%esi), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload - sbbl 28(%esi), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 16(%esp), %edx # 4-byte Reload - sbbl 32(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 12(%esp), %edx # 4-byte Reload - sbbl 36(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 8(%esp), %edx # 4-byte Reload - sbbl 40(%esi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl (%esp), %edx # 4-byte Reload - sbbl 44(%esi), %edx - movl %edx, (%esp) # 4-byte Spill - sbbl 48(%esi), %ebp - sbbl 52(%esi), %edi - sbbl 56(%esi), %eax - sbbl $0, %ebx - testb $1, %bl - jne .LBB232_2 -# BB#1: # %nocarry - movl 4(%esp), %edx # 4-byte Reload - movl %edx, (%ecx) - movl 44(%esp), %edx # 4-byte Reload - movl %edx, 4(%ecx) - movl 40(%esp), %edx # 4-byte Reload - movl %edx, 8(%ecx) - movl 36(%esp), %edx # 4-byte Reload - movl %edx, 12(%ecx) - movl 32(%esp), %edx # 4-byte Reload - movl %edx, 16(%ecx) - movl 28(%esp), %edx # 4-byte Reload - movl %edx, 20(%ecx) - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 24(%ecx) - movl 20(%esp), %edx # 4-byte Reload - movl %edx, 28(%ecx) - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 32(%ecx) - movl 12(%esp), %edx # 4-byte Reload - movl %edx, 36(%ecx) - movl 8(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - movl (%esp), %edx # 4-byte Reload - movl %edx, 44(%ecx) - movl %ebp, 48(%ecx) - movl %edi, 52(%ecx) - movl %eax, 56(%ecx) -.LBB232_2: # %carry - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end232: - .size mcl_fp_add15Lbmi2, .Lfunc_end232-mcl_fp_add15Lbmi2 - - .globl mcl_fp_addNF15Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF15Lbmi2,@function -mcl_fp_addNF15Lbmi2: # @mcl_fp_addNF15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $120, %esp - movl 148(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %edx - movl 144(%esp), %esi - addl (%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 44(%ecx), %ebp - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl 20(%ecx), %ebx - movl 16(%ecx), %edi - movl 12(%ecx), %edx - movl 8(%ecx), %ecx - adcl 8(%esi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 72(%esp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 44(%esi), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 48(%esi), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 52(%esi), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 56(%esi), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 152(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - subl (%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%esi), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - sbbl 28(%esi), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 32(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - sbbl 40(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - movl %edx, %eax - sbbl 44(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %edi - sbbl 48(%esi), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - movl %ecx, %ebx - sbbl 52(%esi), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 56(%esi), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %edi, %esi - sarl $31, %esi - testl %esi, %esi - movl 80(%esp), %esi # 4-byte Reload - js .LBB233_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB233_2: - movl 140(%esp), %edi - movl %esi, (%edi) - movl 84(%esp), %ecx # 4-byte Reload - js .LBB233_4 -# BB#3: - movl 4(%esp), %ecx # 4-byte Reload -.LBB233_4: - movl %ecx, 4(%edi) - movl 104(%esp), %ecx # 4-byte Reload - movl 72(%esp), %esi # 4-byte Reload - js .LBB233_6 -# BB#5: - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill -.LBB233_6: - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%edi) - movl 64(%esp), %eax # 4-byte Reload - js .LBB233_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload -.LBB233_8: - movl %eax, 12(%edi) - movl %ebx, %ebp - movl %edx, %eax - movl 68(%esp), %edx # 4-byte Reload - js .LBB233_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload -.LBB233_10: - movl %edx, 16(%edi) - movl 112(%esp), %edx # 4-byte Reload - movl 108(%esp), %ebx # 4-byte Reload - js .LBB233_12 -# BB#11: - movl 20(%esp), %esi # 4-byte Reload -.LBB233_12: - movl %esi, 20(%edi) - js .LBB233_14 -# BB#13: - movl 24(%esp), %esi # 4-byte Reload - movl %esi, 88(%esp) # 4-byte Spill -.LBB233_14: - movl 88(%esp), %esi # 4-byte Reload - movl %esi, 24(%edi) - js .LBB233_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB233_16: - movl %ecx, 28(%edi) - js .LBB233_18 -# BB#17: - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB233_18: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%edi) - js .LBB233_20 -# BB#19: - movl 36(%esp), %ebx # 4-byte Reload -.LBB233_20: - movl %ebx, 36(%edi) - js .LBB233_22 -# BB#21: - movl 40(%esp), %edx # 4-byte Reload -.LBB233_22: - movl %edx, 40(%edi) - js .LBB233_24 -# BB#23: - movl 44(%esp), %eax # 4-byte Reload -.LBB233_24: - movl %eax, 44(%edi) - movl 96(%esp), %eax # 4-byte Reload - js .LBB233_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB233_26: - movl %eax, 48(%edi) - js .LBB233_28 -# BB#27: - movl 52(%esp), %ebp # 4-byte Reload -.LBB233_28: - movl %ebp, 52(%edi) - movl 100(%esp), %eax # 4-byte Reload - js .LBB233_30 -# BB#29: - movl 56(%esp), %eax # 4-byte Reload -.LBB233_30: - movl %eax, 56(%edi) - addl $120, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end233: - .size mcl_fp_addNF15Lbmi2, .Lfunc_end233-mcl_fp_addNF15Lbmi2 - - .globl mcl_fp_sub15Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub15Lbmi2,@function -mcl_fp_sub15Lbmi2: # @mcl_fp_sub15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 84(%esp), %edi - subl (%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 40(%esi), %edx - sbbl 40(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 44(%esi), %ecx - sbbl 44(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 48(%esi), %eax - sbbl 48(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 52(%esi), %ebp - sbbl 52(%edi), %ebp - movl 56(%esi), %esi - sbbl 56(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 76(%esp), %ebx - movl 48(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl %edx, 40(%ebx) - movl %ecx, 44(%ebx) - movl %eax, 48(%ebx) - movl %ebp, 52(%ebx) - movl %esi, 56(%ebx) - je .LBB234_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 88(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 40(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl %ecx, 48(%ebx) - movl 52(%esi), %eax - adcl %ebp, %eax - movl %eax, 52(%ebx) - movl 56(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ebx) -.LBB234_2: # %nocarry - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end234: - .size mcl_fp_sub15Lbmi2, .Lfunc_end234-mcl_fp_sub15Lbmi2 - - .globl mcl_fp_subNF15Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF15Lbmi2,@function -mcl_fp_subNF15Lbmi2: # @mcl_fp_subNF15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 120(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 124(%esp), %edi - subl (%edi), %esi - movl %esi, 60(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 32(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %eax, %ebp - sarl $31, %ebp - movl %ebp, %edi - shldl $1, %eax, %edi - movl 128(%esp), %edx - andl (%edx), %edi - movl 56(%edx), %eax - andl %ebp, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%edx), %eax - andl %ebp, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 48(%edx), %eax - andl %ebp, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%edx), %eax - andl %ebp, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 40(%edx), %eax - andl %ebp, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 36(%edx), %eax - andl %ebp, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%edx), %eax - andl %ebp, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 28(%edx), %eax - andl %ebp, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%edx), %eax - andl %ebp, %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%edx), %ebx - andl %ebp, %ebx - movl 16(%edx), %esi - andl %ebp, %esi - movl 12(%edx), %ecx - andl %ebp, %ecx - movl 8(%edx), %eax - andl %ebp, %eax - andl 4(%edx), %ebp - addl 60(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl 116(%esp), %edx - movl %edi, (%edx) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ebp, 4(%edx) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 8(%edx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %ecx, 12(%edx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %esi, 16(%edx) - movl (%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 20(%edx) - movl 4(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%edx) - movl 8(%esp), %ecx # 4-byte Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%edx) - movl 12(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%edx) - movl 16(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%edx) - movl 20(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%edx) - movl 24(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%edx) - movl 28(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%edx) - movl %eax, 52(%edx) - movl 44(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%edx) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end235: - .size mcl_fp_subNF15Lbmi2, .Lfunc_end235-mcl_fp_subNF15Lbmi2 - - .globl mcl_fpDbl_add15Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add15Lbmi2,@function -mcl_fpDbl_add15Lbmi2: # @mcl_fpDbl_add15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - movl 136(%esp), %ecx - movl 132(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %esi - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edx), %ebp - movl 128(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edx), %ebp - adcl 8(%edx), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %esi - movl %ebp, 4(%eax) - movl 68(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%edx), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %esi, 16(%eax) - movl 24(%edx), %esi - adcl %ebx, %esi - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%edx), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %esi, 24(%eax) - movl 32(%edx), %esi - adcl %ebx, %esi - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%edx), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %esi, 32(%eax) - movl 40(%edx), %esi - adcl %ebx, %esi - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%edx), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %esi, 40(%eax) - movl 48(%edx), %esi - adcl %ebx, %esi - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%edx), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %esi, 48(%eax) - movl 56(%edx), %esi - adcl %ebx, %esi - movl 60(%ecx), %ebx - movl %edi, 52(%eax) - movl 60(%edx), %edi - adcl %ebx, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 64(%ecx), %edi - movl %esi, 56(%eax) - movl 64(%edx), %eax - adcl %edi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%edx), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl 72(%edx), %eax - adcl %esi, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl 76(%edx), %eax - adcl %esi, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl 80(%edx), %eax - adcl %esi, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl 84(%edx), %eax - adcl %esi, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl 88(%edx), %eax - adcl %esi, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl 92(%edx), %eax - adcl %esi, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl 96(%edx), %eax - adcl %esi, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl 100(%edx), %eax - adcl %esi, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%ecx), %eax - movl 104(%edx), %esi - adcl %eax, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 108(%ecx), %edi - movl 108(%edx), %eax - adcl %edi, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 112(%ecx), %ebx - movl 112(%edx), %edi - adcl %ebx, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 116(%ecx), %ecx - movl 116(%edx), %edx - adcl %ecx, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 140(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 40(%ebp), %ecx - sbbl 44(%ebp), %esi - movl %esi, 40(%esp) # 4-byte Spill - sbbl 48(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - movl %edx, %edi - sbbl 52(%ebp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %esi - sbbl 56(%ebp), %esi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB236_2 -# BB#1: - movl %esi, %edi -.LBB236_2: - testb %bl, %bl - movl 76(%esp), %eax # 4-byte Reload - movl 72(%esp), %esi # 4-byte Reload - movl 68(%esp), %ebx # 4-byte Reload - movl 64(%esp), %ebp # 4-byte Reload - jne .LBB236_4 -# BB#3: - movl %ecx, %esi - movl (%esp), %ebx # 4-byte Reload - movl 4(%esp), %ebp # 4-byte Reload - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload -.LBB236_4: - movl 128(%esp), %edx - movl %eax, 60(%edx) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 64(%edx) - movl 84(%esp), %eax # 4-byte Reload - movl %eax, 68(%edx) - movl 88(%esp), %eax # 4-byte Reload - movl %eax, 72(%edx) - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 76(%edx) - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 80(%edx) - movl 100(%esp), %eax # 4-byte Reload - movl %eax, 84(%edx) - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 88(%edx) - movl %ebp, 92(%edx) - movl %ebx, 96(%edx) - movl %esi, 100(%edx) - movl 52(%esp), %eax # 4-byte Reload - jne .LBB236_6 -# BB#5: - movl 40(%esp), %eax # 4-byte Reload -.LBB236_6: - movl %eax, 104(%edx) - movl 60(%esp), %ecx # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - jne .LBB236_8 -# BB#7: - movl 44(%esp), %eax # 4-byte Reload -.LBB236_8: - movl %eax, 108(%edx) - jne .LBB236_10 -# BB#9: - movl 48(%esp), %ecx # 4-byte Reload -.LBB236_10: - movl %ecx, 112(%edx) - movl %edi, 116(%edx) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end236: - .size mcl_fpDbl_add15Lbmi2, .Lfunc_end236-mcl_fpDbl_add15Lbmi2 - - .globl mcl_fpDbl_sub15Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub15Lbmi2,@function -mcl_fpDbl_sub15Lbmi2: # @mcl_fpDbl_sub15Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 128(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %esi - movl 8(%eax), %edi - sbbl 8(%ebp), %edi - movl 120(%esp), %ecx - movl %edx, (%ecx) - movl 12(%eax), %edx - sbbl 12(%ebp), %edx - movl %esi, 4(%ecx) - movl 16(%eax), %esi - sbbl 16(%ebp), %esi - movl %edi, 8(%ecx) - movl 20(%ebp), %edi - movl %edx, 12(%ecx) - movl 20(%eax), %edx - sbbl %edi, %edx - movl 24(%ebp), %edi - movl %esi, 16(%ecx) - movl 24(%eax), %esi - sbbl %edi, %esi - movl 28(%ebp), %edi - movl %edx, 20(%ecx) - movl 28(%eax), %edx - sbbl %edi, %edx - movl 32(%ebp), %edi - movl %esi, 24(%ecx) - movl 32(%eax), %esi - sbbl %edi, %esi - movl 36(%ebp), %edi - movl %edx, 28(%ecx) - movl 36(%eax), %edx - sbbl %edi, %edx - movl 40(%ebp), %edi - movl %esi, 32(%ecx) - movl 40(%eax), %esi - sbbl %edi, %esi - movl 44(%ebp), %edi - movl %edx, 36(%ecx) - movl 44(%eax), %edx - sbbl %edi, %edx - movl 48(%ebp), %edi - movl %esi, 40(%ecx) - movl 48(%eax), %esi - sbbl %edi, %esi - movl 52(%ebp), %edi - movl %edx, 44(%ecx) - movl 52(%eax), %edx - sbbl %edi, %edx - movl 56(%ebp), %edi - movl %esi, 48(%ecx) - movl 56(%eax), %esi - sbbl %edi, %esi - movl 60(%ebp), %edi - movl %edx, 52(%ecx) - movl 60(%eax), %edx - sbbl %edi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 64(%ebp), %edx - movl %esi, 56(%ecx) - movl 64(%eax), %esi - sbbl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 68(%ebp), %edx - movl 68(%eax), %esi - sbbl %edx, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 72(%ebp), %edx - movl 72(%eax), %esi - sbbl %edx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 76(%ebp), %edx - movl 76(%eax), %esi - sbbl %edx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 80(%ebp), %edx - movl 80(%eax), %esi - sbbl %edx, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 84(%ebp), %edx - movl 84(%eax), %esi - sbbl %edx, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 88(%ebp), %edx - movl 88(%eax), %esi - sbbl %edx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 92(%ebp), %edx - movl 92(%eax), %esi - sbbl %edx, %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 96(%ebp), %edx - movl 96(%eax), %esi - sbbl %edx, %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 100(%ebp), %edx - movl 100(%eax), %esi - sbbl %edx, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 104(%ebp), %edx - movl 104(%eax), %esi - sbbl %edx, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 108(%ebp), %edx - movl 108(%eax), %esi - sbbl %edx, %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 112(%ebp), %edx - movl 112(%eax), %esi - sbbl %edx, %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 116(%ebp), %edx - movl 116(%eax), %eax - sbbl %edx, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 132(%esp), %esi - jne .LBB237_1 -# BB#2: - movl $0, 60(%esp) # 4-byte Folded Spill - jmp .LBB237_3 -.LBB237_1: - movl 56(%esi), %edx - movl %edx, 60(%esp) # 4-byte Spill -.LBB237_3: - testb %al, %al - jne .LBB237_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB237_6 -.LBB237_4: - movl (%esi), %ebx - movl 4(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB237_6: - jne .LBB237_7 -# BB#8: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB237_9 -.LBB237_7: - movl 52(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB237_9: - jne .LBB237_10 -# BB#11: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB237_12 -.LBB237_10: - movl 48(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB237_12: - jne .LBB237_13 -# BB#14: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB237_15 -.LBB237_13: - movl 44(%esi), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB237_15: - jne .LBB237_16 -# BB#17: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB237_18 -.LBB237_16: - movl 40(%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB237_18: - jne .LBB237_19 -# BB#20: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB237_21 -.LBB237_19: - movl 36(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB237_21: - jne .LBB237_22 -# BB#23: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB237_24 -.LBB237_22: - movl 32(%esi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB237_24: - jne .LBB237_25 -# BB#26: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB237_27 -.LBB237_25: - movl 28(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB237_27: - jne .LBB237_28 -# BB#29: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB237_30 -.LBB237_28: - movl 24(%esi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB237_30: - jne .LBB237_31 -# BB#32: - movl $0, %edx - jmp .LBB237_33 -.LBB237_31: - movl 20(%esi), %edx -.LBB237_33: - jne .LBB237_34 -# BB#35: - movl $0, %ebp - jmp .LBB237_36 -.LBB237_34: - movl 16(%esi), %ebp -.LBB237_36: - jne .LBB237_37 -# BB#38: - movl $0, %eax - jmp .LBB237_39 -.LBB237_37: - movl 12(%esi), %eax -.LBB237_39: - jne .LBB237_40 -# BB#41: - xorl %esi, %esi - jmp .LBB237_42 -.LBB237_40: - movl 8(%esi), %esi -.LBB237_42: - addl 44(%esp), %ebx # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %ebx, 60(%ecx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edi, 64(%ecx) - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %esi, 68(%ecx) - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %eax, 72(%ecx) - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %ebp, 76(%ecx) - movl (%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %esi, 84(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 28(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl %eax, 112(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%ecx) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end237: - .size mcl_fpDbl_sub15Lbmi2, .Lfunc_end237-mcl_fpDbl_sub15Lbmi2 - - .align 16, 0x90 - .type .LmulPv512x32,@function -.LmulPv512x32: # @mulPv512x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl %edx, %eax - movl 76(%esp), %edi - movl %edi, %edx - mulxl 4(%eax), %ebx, %esi - movl %edi, %edx - mulxl (%eax), %ebp, %edx - movl %ebp, 52(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 8(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 12(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 16(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 20(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 24(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 28(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 32(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 36(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 40(%eax), %edx, %ebx - adcl %esi, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 44(%eax), %edx, %esi - adcl %ebx, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edi, %edx - mulxl 48(%eax), %ebx, %ebp - adcl %esi, %ebx - movl %edi, %edx - mulxl 52(%eax), %esi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebp, %esi - movl %edi, %edx - mulxl 56(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 36(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%ecx) - movl %ebx, 48(%ecx) - movl %esi, 52(%ecx) - movl %edx, 56(%ecx) - movl %edi, %edx - mulxl 60(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ecx) - adcl $0, %edx - movl %edx, 64(%ecx) - movl %ecx, %eax - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end238: - .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 - - .globl mcl_fp_mulUnitPre16Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre16Lbmi2,@function -mcl_fp_mulUnitPre16Lbmi2: # @mcl_fp_mulUnitPre16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - calll .L239$pb -.L239$pb: - popl %ebx -.Ltmp50: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx - movl 152(%esp), %eax - movl %eax, (%esp) - leal 56(%esp), %ecx - movl 148(%esp), %edx - calll .LmulPv512x32 - movl 120(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 76(%esp), %ebp - movl 72(%esp), %ebx - movl 68(%esp), %edi - movl 64(%esp), %esi - movl 56(%esp), %edx - movl 60(%esp), %ecx - movl 144(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end239: - .size mcl_fp_mulUnitPre16Lbmi2, .Lfunc_end239-mcl_fp_mulUnitPre16Lbmi2 - - .globl mcl_fpDbl_mulPre16Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre16Lbmi2,@function -mcl_fpDbl_mulPre16Lbmi2: # @mcl_fpDbl_mulPre16Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $300, %esp # imm = 0x12C - calll .L240$pb -.L240$pb: - popl %ebx -.Ltmp51: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx - movl %ebx, -224(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl %edi, 8(%esp) - movl 12(%ebp), %esi - movl %esi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8Lbmi2@PLT - leal 32(%edi), %eax - movl %eax, 8(%esp) - leal 32(%esi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 64(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8Lbmi2@PLT - movl 52(%esi), %ebx - movl 48(%esi), %eax - movl 44(%esi), %ecx - movl 40(%esi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - movl (%esi), %edi - movl 4(%esi), %edx - addl 32(%esi), %edi - movl %edi, -184(%ebp) # 4-byte Spill - movl %esi, %edi - adcl 36(%edi), %edx - movl %edx, -236(%ebp) # 4-byte Spill - movl -176(%ebp), %edx # 4-byte Reload - adcl 8(%edi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - adcl 12(%edi), %ecx - movl %ecx, -232(%ebp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - adcl 20(%edi), %ebx - movl %ebx, -228(%ebp) # 4-byte Spill - movl 56(%edi), %eax - adcl 24(%edi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %ecx - popl %eax - movl %ecx, -144(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl (%esi), %ecx - addl 32(%esi), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - movl 4(%esi), %ecx - adcl 36(%esi), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - movl 40(%esi), %ecx - adcl 8(%esi), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - movl 44(%esi), %ecx - adcl 12(%esi), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - movl 48(%esi), %ecx - adcl 16(%esi), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - movl 52(%esi), %ecx - adcl 20(%esi), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - movl 56(%esi), %edx - adcl 24(%esi), %edx - movl 60(%esi), %ecx - adcl 28(%esi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %ebx - popl %eax - movl %ebx, -252(%ebp) # 4-byte Spill - movl -212(%ebp), %ebx # 4-byte Reload - movl -176(%ebp), %esi # 4-byte Reload - movl %esi, -216(%ebp) # 4-byte Spill - movl -184(%ebp), %esi # 4-byte Reload - movl %esi, -220(%ebp) # 4-byte Spill - jb .LBB240_2 -# BB#1: - xorl %eax, %eax - xorl %ebx, %ebx - movl $0, -216(%ebp) # 4-byte Folded Spill - movl $0, -220(%ebp) # 4-byte Folded Spill -.LBB240_2: - movl %ebx, -244(%ebp) # 4-byte Spill - movl %eax, -240(%ebp) # 4-byte Spill - movl 60(%edi), %eax - movl -144(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 28(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl %ecx, -172(%ebp) # 4-byte Spill - movl %edx, -144(%ebp) # 4-byte Spill - movl -208(%ebp), %eax # 4-byte Reload - movl %eax, -148(%ebp) # 4-byte Spill - movl -204(%ebp), %eax # 4-byte Reload - movl %eax, -152(%ebp) # 4-byte Spill - movl -200(%ebp), %eax # 4-byte Reload - movl %eax, -156(%ebp) # 4-byte Spill - movl -196(%ebp), %eax # 4-byte Reload - movl %eax, -160(%ebp) # 4-byte Spill - movl -192(%ebp), %eax # 4-byte Reload - movl %eax, -164(%ebp) # 4-byte Spill - movl -188(%ebp), %eax # 4-byte Reload - movl %eax, -168(%ebp) # 4-byte Spill - jb .LBB240_4 -# BB#3: - movl $0, -172(%ebp) # 4-byte Folded Spill - movl $0, -144(%ebp) # 4-byte Folded Spill - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill - movl $0, -164(%ebp) # 4-byte Folded Spill - movl $0, -168(%ebp) # 4-byte Folded Spill -.LBB240_4: - movl -184(%ebp), %eax # 4-byte Reload - movl %eax, -108(%ebp) - movl -236(%ebp), %eax # 4-byte Reload - movl %eax, -104(%ebp) - movl -176(%ebp), %edi # 4-byte Reload - movl %edi, -100(%ebp) - movl -232(%ebp), %edi # 4-byte Reload - movl %edi, -96(%ebp) - movl -212(%ebp), %esi # 4-byte Reload - movl %esi, -92(%ebp) - movl -228(%ebp), %esi # 4-byte Reload - movl %esi, -88(%ebp) - movl -248(%ebp), %ebx # 4-byte Reload - movl %ebx, -84(%ebp) - movl -188(%ebp), %ebx # 4-byte Reload - movl %ebx, -140(%ebp) - movl -192(%ebp), %ebx # 4-byte Reload - movl %ebx, -136(%ebp) - movl -196(%ebp), %ebx # 4-byte Reload - movl %ebx, -132(%ebp) - movl -200(%ebp), %ebx # 4-byte Reload - movl %ebx, -128(%ebp) - movl -204(%ebp), %ebx # 4-byte Reload - movl %ebx, -124(%ebp) - movl -208(%ebp), %ebx # 4-byte Reload - movl %ebx, -120(%ebp) - movl %esi, %ebx - movl %edi, %esi - movl %eax, %edi - movl %edx, -116(%ebp) - movl %ecx, -112(%ebp) - sbbl %edx, %edx - movl -180(%ebp), %eax # 4-byte Reload - movl %eax, -80(%ebp) - movl -252(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB240_6 -# BB#5: - movl $0, %eax - movl $0, %ebx - movl $0, %esi - movl $0, %edi -.LBB240_6: - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -140(%ebp), %ecx - movl %ecx, 8(%esp) - leal -108(%ebp), %ecx - movl %ecx, 4(%esp) - leal -76(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -220(%ebp), %eax # 4-byte Reload - addl %eax, -168(%ebp) # 4-byte Folded Spill - adcl %edi, -164(%ebp) # 4-byte Folded Spill - movl -216(%ebp), %eax # 4-byte Reload - adcl %eax, -160(%ebp) # 4-byte Folded Spill - adcl %esi, -156(%ebp) # 4-byte Folded Spill - movl -244(%ebp), %eax # 4-byte Reload - adcl %eax, -152(%ebp) # 4-byte Folded Spill - adcl %ebx, -148(%ebp) # 4-byte Folded Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -240(%ebp), %eax # 4-byte Folded Reload - movl %eax, -144(%ebp) # 4-byte Spill - movl -172(%ebp), %edi # 4-byte Reload - adcl -180(%ebp), %edi # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -176(%ebp) # 4-byte Spill - movl -224(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre8Lbmi2@PLT - movl -168(%ebp), %eax # 4-byte Reload - addl -44(%ebp), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl -164(%ebp), %eax # 4-byte Reload - adcl -40(%ebp), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -160(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl -152(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -152(%ebp) # 4-byte Spill - movl -148(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -172(%ebp) # 4-byte Spill - adcl %esi, -176(%ebp) # 4-byte Folded Spill - movl -76(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -72(%ebp), %ecx - sbbl 4(%esi), %ecx - movl -68(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - movl -64(%ebp), %edx - sbbl 12(%esi), %edx - movl -60(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -56(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl -52(%ebp), %eax - sbbl 24(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - movl -48(%ebp), %eax - sbbl 28(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - movl 32(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - movl -144(%ebp), %edi # 4-byte Reload - sbbl %eax, %edi - movl 60(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - sbbl $0, -176(%ebp) # 4-byte Folded Spill - movl 64(%esi), %eax - movl %eax, -260(%ebp) # 4-byte Spill - subl %eax, -196(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -264(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl 72(%esi), %eax - movl %eax, -268(%ebp) # 4-byte Spill - sbbl %eax, -192(%ebp) # 4-byte Folded Spill - movl 76(%esi), %eax - movl %eax, -272(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 80(%esi), %eax - movl %eax, -276(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 84(%esi), %eax - movl %eax, -280(%ebp) # 4-byte Spill - sbbl %eax, -180(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -284(%ebp) # 4-byte Spill - sbbl %eax, -184(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -288(%ebp) # 4-byte Spill - sbbl %eax, -188(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -292(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 112(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 116(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 120(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -144(%ebp) # 4-byte Spill - movl 124(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl -176(%ebp), %edi # 4-byte Reload - sbbl $0, %edi - movl -196(%ebp), %eax # 4-byte Reload - addl -200(%ebp), %eax # 4-byte Folded Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 32(%esi) - movl -192(%ebp), %eax # 4-byte Reload - adcl -208(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 36(%esi) - adcl -212(%ebp), %edx # 4-byte Folded Reload - movl %eax, 40(%esi) - adcl -216(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 44(%esi) - movl -180(%ebp), %eax # 4-byte Reload - adcl -220(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 48(%esi) - movl -184(%ebp), %ecx # 4-byte Reload - adcl -224(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 52(%esi) - movl -188(%ebp), %edx # 4-byte Reload - adcl -228(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 56(%esi) - movl -168(%ebp), %eax # 4-byte Reload - adcl -260(%ebp), %eax # 4-byte Folded Reload - movl %edx, 60(%esi) - movl -164(%ebp), %ecx # 4-byte Reload - adcl -264(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -268(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -156(%ebp), %ecx # 4-byte Reload - adcl -272(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -152(%ebp), %eax # 4-byte Reload - adcl -276(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - movl -148(%ebp), %ecx # 4-byte Reload - adcl -280(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 80(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl -284(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 84(%esi) - movl -172(%ebp), %ecx # 4-byte Reload - adcl -288(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 88(%esi) - adcl -292(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 92(%esi) - movl %edi, 96(%esi) - movl -236(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -240(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -244(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - movl -248(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 112(%esi) - movl -252(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 116(%esi) - movl -232(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 120(%esi) - movl -256(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 124(%esi) - addl $300, %esp # imm = 0x12C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end240: - .size mcl_fpDbl_mulPre16Lbmi2, .Lfunc_end240-mcl_fpDbl_mulPre16Lbmi2 - - .globl mcl_fpDbl_sqrPre16Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre16Lbmi2,@function -mcl_fpDbl_sqrPre16Lbmi2: # @mcl_fpDbl_sqrPre16Lbmi2 -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $300, %esp # imm = 0x12C - calll .L241$pb -.L241$pb: - popl %ebx -.Ltmp52: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx - movl %ebx, -184(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre8Lbmi2@PLT - leal 32(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 64(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8Lbmi2@PLT - movl 52(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl 48(%edi), %eax - movl 44(%edi), %ebx - movl 40(%edi), %esi - movl (%edi), %ecx - movl 4(%edi), %edx - addl 32(%edi), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - adcl 36(%edi), %edx - movl %edx, -196(%ebp) # 4-byte Spill - adcl 8(%edi), %esi - movl %esi, -188(%ebp) # 4-byte Spill - adcl 12(%edi), %ebx - adcl 16(%edi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - movl -180(%ebp), %eax # 4-byte Reload - adcl 20(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -168(%ebp) # 4-byte Spill - addl %ecx, %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - adcl %edx, %edx - movl %edx, -160(%ebp) # 4-byte Spill - adcl %esi, %esi - movl %esi, -156(%ebp) # 4-byte Spill - movl %ebx, %edx - movl %ebx, %esi - adcl %edx, %edx - movl %edx, -152(%ebp) # 4-byte Spill - movl -208(%ebp), %eax # 4-byte Reload - movl %eax, %edx - movl %eax, %ebx - adcl %edx, %edx - movl %edx, -148(%ebp) # 4-byte Spill - movl -180(%ebp), %edx # 4-byte Reload - adcl %edx, %edx - movl %edx, -144(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl 56(%edi), %edx - movl -168(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - adcl 24(%edi), %edx - movl 60(%edi), %ecx - adcl 28(%edi), %ecx - seto %al - lahf - movl %eax, %eax - movl %eax, -200(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -204(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edi - sbbl %eax, %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl %edi, %eax - addb $127, %al - sahf - jb .LBB241_2 -# BB#1: - movl $0, -144(%ebp) # 4-byte Folded Spill - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill - movl $0, -164(%ebp) # 4-byte Folded Spill -.LBB241_2: - movl %edx, %eax - movl -172(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - adcl %eax, %eax - movl %ecx, %edi - adcl %edi, %edi - movl %edi, -176(%ebp) # 4-byte Spill - movl -204(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB241_4 -# BB#3: - movl $0, -176(%ebp) # 4-byte Folded Spill - xorl %eax, %eax -.LBB241_4: - movl %eax, -172(%ebp) # 4-byte Spill - movl -192(%ebp), %eax # 4-byte Reload - movl %eax, -108(%ebp) - movl %eax, -140(%ebp) - movl -196(%ebp), %eax # 4-byte Reload - movl %eax, -104(%ebp) - movl %eax, -136(%ebp) - movl -188(%ebp), %eax # 4-byte Reload - movl %eax, -100(%ebp) - movl %eax, -132(%ebp) - movl %esi, -96(%ebp) - movl %esi, -128(%ebp) - movl %ebx, -92(%ebp) - movl %ebx, -124(%ebp) - movl -180(%ebp), %eax # 4-byte Reload - movl %eax, -88(%ebp) - movl %eax, -120(%ebp) - movl %edx, -84(%ebp) - movl %edx, -116(%ebp) - movl %ecx, -80(%ebp) - movl %ecx, -112(%ebp) - movl -200(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB241_5 -# BB#6: - xorl %edi, %edi - jmp .LBB241_7 -.LBB241_5: - shrl $31, %ecx - movl %ecx, %edi -.LBB241_7: - leal -140(%ebp), %eax - movl %eax, 8(%esp) - leal -108(%ebp), %eax - movl %eax, 4(%esp) - leal -76(%ebp), %eax - movl %eax, (%esp) - movl -168(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -184(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre8Lbmi2@PLT - movl -164(%ebp), %eax # 4-byte Reload - addl -44(%ebp), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -160(%ebp), %eax # 4-byte Reload - adcl -40(%ebp), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl -152(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -152(%ebp) # 4-byte Spill - movl -148(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl -172(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl -176(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - adcl %edi, %esi - movl %esi, -168(%ebp) # 4-byte Spill - movl -76(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - movl -72(%ebp), %edi - sbbl 4(%esi), %edi - movl -68(%ebp), %edx - sbbl 8(%esi), %edx - movl %edx, -184(%ebp) # 4-byte Spill - movl -64(%ebp), %edx - sbbl 12(%esi), %edx - movl %edx, -192(%ebp) # 4-byte Spill - movl -60(%ebp), %ebx - sbbl 16(%esi), %ebx - movl %eax, %ecx - movl -56(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -52(%ebp), %edx - sbbl 24(%esi), %edx - movl %edx, -188(%ebp) # 4-byte Spill - movl -48(%ebp), %edx - sbbl 28(%esi), %edx - movl 32(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl 60(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - movl -168(%ebp), %eax # 4-byte Reload - sbbl $0, %eax - movl 64(%esi), %ecx - movl %ecx, -260(%ebp) # 4-byte Spill - subl %ecx, -180(%ebp) # 4-byte Folded Spill - movl 68(%esi), %ecx - movl %ecx, -264(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 72(%esi), %ecx - movl %ecx, -268(%ebp) # 4-byte Spill - sbbl %ecx, -184(%ebp) # 4-byte Folded Spill - movl 76(%esi), %ecx - movl %ecx, -272(%ebp) # 4-byte Spill - sbbl %ecx, -192(%ebp) # 4-byte Folded Spill - movl 80(%esi), %ecx - movl %ecx, -276(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 84(%esi), %ecx - movl %ecx, -280(%ebp) # 4-byte Spill - sbbl %ecx, -196(%ebp) # 4-byte Folded Spill - movl 88(%esi), %ecx - movl %ecx, -284(%ebp) # 4-byte Spill - sbbl %ecx, -188(%ebp) # 4-byte Folded Spill - movl 92(%esi), %ecx - movl %ecx, -288(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 96(%esi), %ecx - movl %ecx, -292(%ebp) # 4-byte Spill - sbbl %ecx, -164(%ebp) # 4-byte Folded Spill - movl 100(%esi), %ecx - movl %ecx, -232(%ebp) # 4-byte Spill - sbbl %ecx, -160(%ebp) # 4-byte Folded Spill - movl 104(%esi), %ecx - movl %ecx, -236(%ebp) # 4-byte Spill - sbbl %ecx, -156(%ebp) # 4-byte Folded Spill - movl 108(%esi), %ecx - movl %ecx, -240(%ebp) # 4-byte Spill - sbbl %ecx, -152(%ebp) # 4-byte Folded Spill - movl 112(%esi), %ecx - movl %ecx, -244(%ebp) # 4-byte Spill - sbbl %ecx, -148(%ebp) # 4-byte Folded Spill - movl 116(%esi), %ecx - movl %ecx, -248(%ebp) # 4-byte Spill - sbbl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 120(%esi), %ecx - movl %ecx, -252(%ebp) # 4-byte Spill - sbbl %ecx, -172(%ebp) # 4-byte Folded Spill - movl 124(%esi), %ecx - movl %ecx, -256(%ebp) # 4-byte Spill - sbbl %ecx, -176(%ebp) # 4-byte Folded Spill - sbbl $0, %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl -180(%ebp), %eax # 4-byte Reload - addl -200(%ebp), %eax # 4-byte Folded Reload - adcl -204(%ebp), %edi # 4-byte Folded Reload - movl %eax, 32(%esi) - movl -184(%ebp), %eax # 4-byte Reload - adcl -208(%ebp), %eax # 4-byte Folded Reload - movl %edi, 36(%esi) - movl -192(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - adcl -216(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl -196(%ebp), %ecx # 4-byte Reload - adcl -220(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 48(%esi) - movl -188(%ebp), %eax # 4-byte Reload - adcl -224(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl %edx, %ecx - adcl -228(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -164(%ebp), %eax # 4-byte Reload - adcl -260(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 60(%esi) - movl -160(%ebp), %ecx # 4-byte Reload - adcl -264(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -156(%ebp), %eax # 4-byte Reload - adcl -268(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -272(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -276(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - movl -144(%ebp), %ecx # 4-byte Reload - adcl -280(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 80(%esi) - movl -172(%ebp), %eax # 4-byte Reload - adcl -284(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 84(%esi) - movl -176(%ebp), %ecx # 4-byte Reload - adcl -288(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 88(%esi) - movl -168(%ebp), %eax # 4-byte Reload - adcl -292(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 92(%esi) - movl %eax, 96(%esi) - movl -232(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -236(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -240(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - movl -244(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 112(%esi) - movl -248(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 116(%esi) - movl -252(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 120(%esi) - movl -256(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 124(%esi) - addl $300, %esp # imm = 0x12C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end241: - .size mcl_fpDbl_sqrPre16Lbmi2, .Lfunc_end241-mcl_fpDbl_sqrPre16Lbmi2 - - .globl mcl_fp_mont16Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont16Lbmi2,@function -mcl_fp_mont16Lbmi2: # @mcl_fp_mont16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2428, %esp # imm = 0x97C - calll .L242$pb -.L242$pb: - popl %ebx -.Ltmp53: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx - movl 2460(%esp), %eax - movl -4(%eax), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2360(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 2360(%esp), %ebp - movl 2364(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2424(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2420(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 2416(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2412(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2408(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2404(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2400(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2396(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2392(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2388(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2384(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2380(%esp), %edi - movl 2376(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2372(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2368(%esp), %esi - movl %eax, (%esp) - leal 2288(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - addl 2288(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 2292(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 2296(%esp), %esi - movl %esi, %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2308(%esp), %edi - movl %edi, %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2340(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2344(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2352(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 2456(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2216(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl 112(%esp), %ecx # 4-byte Reload - addl 2216(%esp), %ecx - adcl 2220(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2224(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2232(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 2236(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2252(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2260(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2268(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 2280(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2144(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - addl 2144(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 2164(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 2168(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2188(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2208(%esp), %esi - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2072(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 2072(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2076(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2080(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2084(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2088(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 2092(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2096(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2100(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2104(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 2108(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 2112(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2116(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 2120(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2124(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2128(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 2132(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2136(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2000(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %esi - movl %esi, %eax - movl 112(%esp), %ecx # 4-byte Reload - addl 2000(%esp), %ecx - movl 100(%esp), %ecx # 4-byte Reload - adcl 2004(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2008(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2012(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2016(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2020(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2024(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2028(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2032(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 2036(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2040(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 2044(%esp), %edi - movl 116(%esp), %ecx # 4-byte Reload - adcl 2048(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 2052(%esp), %ebp - movl 124(%esp), %esi # 4-byte Reload - adcl 2056(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1928(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 1928(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1936(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1968(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 1972(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1976(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl 1980(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1984(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1856(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - movl %ebp, %eax - addl 1856(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1864(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1876(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1880(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1884(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1892(%esp), %esi - adcl 1896(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 1900(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1904(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1908(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1912(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1916(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1920(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1784(%esp), %ecx - movl 2452(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 1784(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1804(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1816(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1824(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1836(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1840(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1848(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1712(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %esi - movl %esi, %ecx - addl 1712(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1732(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1764(%esp), %ebp - movl 104(%esp), %esi # 4-byte Reload - adcl 1768(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1640(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1640(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1668(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 1688(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - adcl 1692(%esp), %esi - movl %esi, %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1704(%esp), %esi - sbbl %eax, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1568(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 80(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1568(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 1572(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1576(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1588(%esp), %ebp - movl 84(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1596(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1612(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1616(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1620(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1624(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1628(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1632(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1496(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 1496(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1500(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1504(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1512(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1516(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1424(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - movl 60(%esp), %eax # 4-byte Reload - addl 1424(%esp), %eax - movl 72(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1432(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1444(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1472(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1480(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ebp - movl 2456(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1352(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1352(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1396(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 1404(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1412(%esp), %esi - adcl 1416(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %ebp - movl %ebp, %eax - addl 1280(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1284(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1288(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1340(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl 2456(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 2452(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1208(%esp), %ecx - adcl 1212(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1260(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1272(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl %edi, %ecx - addl 1136(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %edi # 4-byte Reload - adcl 1164(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1188(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1192(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1064(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 1092(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1116(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl %edi, %eax - andl $1, %eax - addl 992(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1008(%esp), %edi - movl 116(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl 1020(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1028(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1032(%esp), %esi - movl 100(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1056(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 920(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 932(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 956(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 968(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl %edi, %ecx - addl 848(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 856(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 868(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 896(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 776(%esp), %ecx - adcl 780(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 784(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 792(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 800(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 84(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 704(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 728(%esp), %esi - movl 104(%esp), %ebp # 4-byte Reload - adcl 732(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 752(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 92(%esp), %ecx # 4-byte Reload - addl 632(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 652(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl 656(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 676(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 680(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 92(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 560(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 592(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 612(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 488(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 508(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 520(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 536(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 96(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 416(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - adcl 436(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 440(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 448(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 116(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - movl 120(%esp), %ebp # 4-byte Reload - adcl 348(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 356(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 360(%esp), %edi - adcl 364(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 116(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 272(%esp), %esi - adcl 276(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 288(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 296(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 120(%esp), %ecx # 4-byte Reload - addl 200(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 212(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 220(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 232(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 56(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - addl 128(%esp), %esi - movl 104(%esp), %ebx # 4-byte Reload - movl 124(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 140(%esp), %ebx - movl %ebx, 104(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 156(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 172(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 184(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 192(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl $0, %ebp - movl %eax, %edx - movl 2460(%esp), %edi - subl (%edi), %edx - movl %ecx, %eax - sbbl 4(%edi), %eax - movl %ebx, %ecx - sbbl 8(%edi), %ecx - movl 112(%esp), %ebx # 4-byte Reload - sbbl 12(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 16(%edi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 20(%edi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 28(%edi), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 32(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 36(%edi), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 40(%edi), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 44(%edi), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - sbbl 52(%edi), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - sbbl 56(%edi), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - sbbl 60(%edi), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - sbbl $0, %ebp - andl $1, %ebp - movl %ebp, %ebx - jne .LBB242_2 -# BB#1: - movl %edx, %edi -.LBB242_2: - movl 2448(%esp), %edx - movl %edi, (%edx) - testb %bl, %bl - movl 108(%esp), %edi # 4-byte Reload - jne .LBB242_4 -# BB#3: - movl %eax, %edi -.LBB242_4: - movl %edi, 4(%edx) - jne .LBB242_6 -# BB#5: - movl %ecx, 104(%esp) # 4-byte Spill -.LBB242_6: - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 8(%edx) - jne .LBB242_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 112(%esp) # 4-byte Spill -.LBB242_8: - movl 112(%esp), %eax # 4-byte Reload - movl %eax, 12(%edx) - movl 100(%esp), %eax # 4-byte Reload - jne .LBB242_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB242_10: - movl %eax, 16(%edx) - movl 88(%esp), %eax # 4-byte Reload - jne .LBB242_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload -.LBB242_12: - movl %eax, 20(%edx) - jne .LBB242_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill -.LBB242_14: - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 24(%edx) - movl 72(%esp), %eax # 4-byte Reload - jne .LBB242_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload -.LBB242_16: - movl %eax, 28(%edx) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB242_18 -# BB#17: - movl 32(%esp), %eax # 4-byte Reload -.LBB242_18: - movl %eax, 32(%edx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB242_20 -# BB#19: - movl 36(%esp), %eax # 4-byte Reload -.LBB242_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB242_22 -# BB#21: - movl 40(%esp), %eax # 4-byte Reload -.LBB242_22: - movl %eax, 40(%edx) - movl 76(%esp), %eax # 4-byte Reload - jne .LBB242_24 -# BB#23: - movl 44(%esp), %eax # 4-byte Reload -.LBB242_24: - movl %eax, 44(%edx) - movl 84(%esp), %eax # 4-byte Reload - jne .LBB242_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB242_26: - movl %eax, 48(%edx) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB242_28 -# BB#27: - movl 52(%esp), %eax # 4-byte Reload -.LBB242_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - jne .LBB242_30 -# BB#29: - movl 56(%esp), %eax # 4-byte Reload -.LBB242_30: - movl %eax, 56(%edx) - movl 116(%esp), %eax # 4-byte Reload - jne .LBB242_32 -# BB#31: - movl 120(%esp), %eax # 4-byte Reload -.LBB242_32: - movl %eax, 60(%edx) - addl $2428, %esp # imm = 0x97C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end242: - .size mcl_fp_mont16Lbmi2, .Lfunc_end242-mcl_fp_mont16Lbmi2 - - .globl mcl_fp_montNF16Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF16Lbmi2,@function -mcl_fp_montNF16Lbmi2: # @mcl_fp_montNF16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2412, %esp # imm = 0x96C - calll .L243$pb -.L243$pb: - popl %ebx -.Ltmp54: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx - movl 2444(%esp), %eax - movl -4(%eax), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2344(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2344(%esp), %edi - movl 2348(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 2408(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2404(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2400(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2396(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2392(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2388(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2384(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2380(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 2376(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 2372(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2368(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2364(%esp), %ebp - movl 2360(%esp), %esi - movl 2356(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2352(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2272(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 2272(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 2288(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 2292(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 2296(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 2308(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2324(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 2328(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 2332(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2200(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2264(%esp), %edx - movl 108(%esp), %ecx # 4-byte Reload - addl 2200(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2208(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 2216(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 2232(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 2236(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 2252(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2260(%esp), %esi - adcl $0, %edx - movl %edx, 108(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2128(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 2128(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 2132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 2156(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 2164(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 2188(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2192(%esp), %esi - movl 2440(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2056(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2120(%esp), %eax - movl 84(%esp), %edx # 4-byte Reload - addl 2056(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2072(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2076(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 2080(%esp), %edi - movl %edi, %ebp - movl 52(%esp), %ecx # 4-byte Reload - adcl 2084(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 2088(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2092(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2096(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2100(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2104(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2108(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 2112(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 2116(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1984(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1984(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1996(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 2004(%esp), %edi - adcl 2008(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2020(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2024(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2028(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2032(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 2036(%esp), %ebp - movl 100(%esp), %esi # 4-byte Reload - adcl 2040(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 2044(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2048(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1912(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1976(%esp), %eax - movl 76(%esp), %edx # 4-byte Reload - addl 1912(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - adcl 1916(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1920(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1924(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1928(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1932(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1936(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1944(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 1948(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1952(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1956(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1960(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 1964(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1968(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1972(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1840(%esp), %ecx - movl 2444(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - addl 1840(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1848(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1852(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1864(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1876(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1880(%esp), %edi - movl 92(%esp), %ebp # 4-byte Reload - adcl 1884(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1892(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1896(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1904(%esp), %esi - movl 2440(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1768(%esp), %ecx - movl 2436(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 1832(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 1768(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 1808(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1828(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1696(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1696(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1704(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1712(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1716(%esp), %ebp - movl 56(%esp), %edi # 4-byte Reload - adcl 1720(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1740(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 1744(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1624(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1688(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - addl 1624(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1640(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 1644(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1648(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1664(%esp), %esi - movl %esi, %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1552(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1552(%esp), %esi - movl 64(%esp), %esi # 4-byte Reload - adcl 1556(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1576(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1580(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1592(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1600(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1616(%esp), %edi - movl 2440(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1480(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1544(%esp), %eax - addl 1480(%esp), %esi - movl 60(%esp), %edx # 4-byte Reload - adcl 1484(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 1488(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1492(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1496(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1500(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1504(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1508(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1512(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1516(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1520(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - adcl 1524(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1528(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1532(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1536(%esp), %ebp - adcl 1540(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1408(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1408(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 1416(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1464(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1468(%esp), %ebp - adcl 1472(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1336(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1400(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 1336(%esp), %ecx - adcl 1340(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1344(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1348(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1352(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1356(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1360(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1364(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1368(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1372(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1376(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 1380(%esp), %edi - movl 76(%esp), %esi # 4-byte Reload - adcl 1384(%esp), %esi - movl 80(%esp), %edx # 4-byte Reload - adcl 1388(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl 1392(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1396(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1264(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1308(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl 1312(%esp), %esi - movl %esi, %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1324(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1192(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1256(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 1192(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 1196(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1200(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1204(%esp), %esi - movl 88(%esp), %edx # 4-byte Reload - adcl 1208(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1212(%esp), %edi - movl 92(%esp), %edx # 4-byte Reload - adcl 1216(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1224(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1228(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1232(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - adcl 1236(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1240(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 1244(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1248(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 1252(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1120(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1120(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1132(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1140(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1144(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1176(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1048(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1112(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 1048(%esp), %ecx - movl 56(%esp), %esi # 4-byte Reload - adcl 1052(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1068(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1076(%esp), %ebp - movl 108(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1100(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 976(%esp), %edi - adcl 980(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1000(%esp), %edi - adcl 1004(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1008(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1016(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 904(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 968(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - addl 904(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - adcl 908(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 912(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 916(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 920(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 924(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 928(%esp), %edi - adcl 932(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - adcl 940(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 960(%esp), %ebp - movl 52(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %eax, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 832(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 832(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 856(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 876(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 888(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 892(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 824(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 760(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 796(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 800(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 816(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 688(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 732(%esp), %ebp - adcl 736(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 680(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - addl 616(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 624(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 656(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 672(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 544(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 552(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 560(%esp), %edi - movl 108(%esp), %esi # 4-byte Reload - adcl 564(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 600(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 536(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 472(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 484(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - adcl 488(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 400(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 400(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 412(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 420(%esp), %edi - adcl 424(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 444(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 392(%esp), %edx - movl 92(%esp), %ecx # 4-byte Reload - addl 328(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 336(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 352(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 368(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 256(%esp), %ebp - movl 104(%esp), %edi # 4-byte Reload - adcl 260(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 268(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 280(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 284(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 248(%esp), %edx - movl %edi, %ecx - addl 184(%esp), %ecx - movl 100(%esp), %edi # 4-byte Reload - adcl 188(%esp), %edi - adcl 192(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 208(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 112(%esp), %esi - movl %edi, %eax - adcl 116(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl %ecx, %ebx - movl 80(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 168(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 176(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl %eax, %edx - movl 2444(%esp), %esi - subl (%esi), %edx - sbbl 4(%esi), %edi - movl %ebp, %ecx - sbbl 8(%esi), %ecx - movl %ebx, %eax - sbbl 12(%esi), %eax - movl 80(%esp), %ebx # 4-byte Reload - sbbl 16(%esi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - sbbl 20(%esi), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 28(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - sbbl 32(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - sbbl 36(%esi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - sbbl 40(%esi), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 72(%esp), %ebx # 4-byte Reload - sbbl 44(%esi), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 48(%esi), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 96(%esp), %ebx # 4-byte Reload - sbbl 52(%esi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 56(%esi), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - sbbl 60(%esi), %ebx - movl %ebx, 84(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - testl %ebx, %ebx - js .LBB243_2 -# BB#1: - movl %edx, %esi -.LBB243_2: - movl 2432(%esp), %edx - movl %esi, (%edx) - movl 108(%esp), %esi # 4-byte Reload - js .LBB243_4 -# BB#3: - movl %edi, %esi -.LBB243_4: - movl %esi, 4(%edx) - js .LBB243_6 -# BB#5: - movl %ecx, %ebp -.LBB243_6: - movl %ebp, 8(%edx) - movl 76(%esp), %ecx # 4-byte Reload - js .LBB243_8 -# BB#7: - movl %eax, %ecx -.LBB243_8: - movl %ecx, 12(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB243_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB243_10: - movl %eax, 16(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB243_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB243_12: - movl %eax, 20(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB243_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB243_14: - movl %eax, 24(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB243_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB243_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB243_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB243_18: - movl %eax, 32(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB243_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB243_20: - movl %eax, 36(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB243_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB243_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB243_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB243_24: - movl %eax, 44(%edx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB243_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB243_26: - movl %eax, 48(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB243_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB243_28: - movl %eax, 52(%edx) - movl 92(%esp), %eax # 4-byte Reload - js .LBB243_30 -# BB#29: - movl 44(%esp), %eax # 4-byte Reload -.LBB243_30: - movl %eax, 56(%edx) - movl 104(%esp), %eax # 4-byte Reload - js .LBB243_32 -# BB#31: - movl 84(%esp), %eax # 4-byte Reload -.LBB243_32: - movl %eax, 60(%edx) - addl $2412, %esp # imm = 0x96C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end243: - .size mcl_fp_montNF16Lbmi2, .Lfunc_end243-mcl_fp_montNF16Lbmi2 - - .globl mcl_fp_montRed16Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed16Lbmi2,@function -mcl_fp_montRed16Lbmi2: # @mcl_fp_montRed16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L244$pb -.L244$pb: - popl %eax -.Ltmp55: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1384(%esp), %edx - movl -4(%edx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1380(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 112(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 120(%esp) # 4-byte Spill - imull %eax, %ebx - movl 124(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 108(%ecx), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 80(%ecx), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 68(%ecx), %edi - movl %edi, 204(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 200(%esp) # 4-byte Spill - movl 60(%ecx), %edi - movl %edi, 180(%esp) # 4-byte Spill - movl 56(%ecx), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 40(%ecx), %ebp - movl 36(%ecx), %edi - movl 32(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 12(%ecx), %esi - movl 8(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 60(%edx), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1288(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - movl 112(%esp), %eax # 4-byte Reload - addl 1288(%esp), %eax - movl 120(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1300(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1324(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl 1328(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl $0, 204(%esp) # 4-byte Folded Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - movl 196(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - movl 112(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 1216(%esp), %esi - movl 76(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl 80(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl 1260(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 196(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 156(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1144(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 1184(%esp), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - movl 168(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1072(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 168(%esp) # 4-byte Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1000(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - movl 188(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - movl 172(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 124(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 928(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 172(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 144(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - movl 100(%esp), %ebp # 4-byte Reload - imull %ebp, %eax - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 856(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 176(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 144(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull %ebp, %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 784(%esp), %esi - movl 104(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 196(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 176(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 156(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 712(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl 752(%esp), %ebp - movl %ebp, 192(%esp) # 4-byte Spill - movl 196(%esp), %edi # 4-byte Reload - adcl 756(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 640(%esp), %esi - movl 120(%esp), %ecx # 4-byte Reload - adcl 644(%esp), %ecx - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %esi # 4-byte Reload - adcl 668(%esp), %esi - movl 204(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl 680(%esp), %edi - movl %edi, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1384(%esp), %eax - movl %eax, %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 568(%esp), %ebp - movl 140(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 136(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %ebp # 4-byte Reload - adcl 588(%esp), %ebp - adcl 592(%esp), %esi - movl %esi, 200(%esp) # 4-byte Spill - movl 204(%esp), %esi # 4-byte Reload - adcl 596(%esp), %esi - movl 192(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 632(%esp), %edi - movl %edi, 152(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 496(%esp), %edi - movl 136(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 160(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - adcl 512(%esp), %ebp - movl %ebp, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl 520(%esp), %esi - movl %esi, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 424(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - adcl 432(%esp), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 200(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 200(%esp) # 4-byte Spill - movl 204(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 448(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 196(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl 184(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 184(%esp) # 4-byte Spill - movl 188(%esp), %ecx # 4-byte Reload - adcl 464(%esp), %ecx - movl %ecx, 188(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - movl 176(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 176(%esp) # 4-byte Spill - movl 172(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 172(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %eax, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 352(%esp), %esi - movl 164(%esp), %esi # 4-byte Reload - adcl 356(%esp), %esi - movl 180(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl 416(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 280(%esp), %esi - movl 180(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl 200(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 304(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - adcl 316(%esp), %esi - movl 176(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 112(%esp) # 4-byte Folded Spill - movl 100(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 208(%esp), %ebp - movl 200(%esp), %edx # 4-byte Reload - adcl 212(%esp), %edx - movl %edx, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 228(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl %eax, %ebx - movl 188(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl 240(%esp), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 272(%esp), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %edx, %eax - subl 24(%esp), %edx # 4-byte Folded Reload - movl 204(%esp), %esi # 4-byte Reload - sbbl 12(%esp), %esi # 4-byte Folded Reload - sbbl 16(%esp), %ecx # 4-byte Folded Reload - movl 196(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - sbbl 28(%esp), %ebp # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 100(%esp) # 4-byte Spill - movl 188(%esp), %ebx # 4-byte Reload - sbbl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - movl 168(%esp), %ebx # 4-byte Reload - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 108(%esp) # 4-byte Spill - movl 176(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 112(%esp) # 4-byte Spill - movl 172(%esp), %ebx # 4-byte Reload - sbbl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 116(%esp) # 4-byte Spill - movl 152(%esp), %ebx # 4-byte Reload - sbbl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 120(%esp) # 4-byte Spill - movl 156(%esp), %ebx # 4-byte Reload - sbbl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 136(%esp) # 4-byte Spill - movl 144(%esp), %ebx # 4-byte Reload - sbbl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 140(%esp) # 4-byte Spill - movl 132(%esp), %ebx # 4-byte Reload - sbbl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 160(%esp) # 4-byte Spill - movl 128(%esp), %ebx # 4-byte Reload - sbbl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 164(%esp) # 4-byte Spill - movl 124(%esp), %ebx # 4-byte Reload - sbbl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 180(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - movl %edi, %ebx - jne .LBB244_2 -# BB#1: - movl %edx, 200(%esp) # 4-byte Spill -.LBB244_2: - movl 1376(%esp), %edx - movl 200(%esp), %edi # 4-byte Reload - movl %edi, (%edx) - testb %bl, %bl - jne .LBB244_4 -# BB#3: - movl %esi, 204(%esp) # 4-byte Spill -.LBB244_4: - movl 204(%esp), %esi # 4-byte Reload - movl %esi, 4(%edx) - movl 192(%esp), %esi # 4-byte Reload - jne .LBB244_6 -# BB#5: - movl %ecx, %esi -.LBB244_6: - movl %esi, 8(%edx) - movl 196(%esp), %ecx # 4-byte Reload - jne .LBB244_8 -# BB#7: - movl %eax, %ecx -.LBB244_8: - movl %ecx, 12(%edx) - movl 128(%esp), %esi # 4-byte Reload - movl 148(%esp), %eax # 4-byte Reload - jne .LBB244_10 -# BB#9: - movl %ebp, %eax -.LBB244_10: - movl %eax, 16(%edx) - movl 124(%esp), %ecx # 4-byte Reload - movl 176(%esp), %eax # 4-byte Reload - movl 184(%esp), %ebp # 4-byte Reload - jne .LBB244_12 -# BB#11: - movl 100(%esp), %ebp # 4-byte Reload -.LBB244_12: - movl %ebp, 20(%edx) - movl 152(%esp), %ebp # 4-byte Reload - movl 188(%esp), %ebx # 4-byte Reload - jne .LBB244_14 -# BB#13: - movl 104(%esp), %ebx # 4-byte Reload -.LBB244_14: - movl %ebx, 24(%edx) - movl 156(%esp), %ebx # 4-byte Reload - movl 168(%esp), %edi # 4-byte Reload - jne .LBB244_16 -# BB#15: - movl 108(%esp), %edi # 4-byte Reload -.LBB244_16: - movl %edi, 28(%edx) - movl 144(%esp), %edi # 4-byte Reload - jne .LBB244_18 -# BB#17: - movl 112(%esp), %eax # 4-byte Reload -.LBB244_18: - movl %eax, 32(%edx) - jne .LBB244_20 -# BB#19: - movl 116(%esp), %eax # 4-byte Reload - movl %eax, 172(%esp) # 4-byte Spill -.LBB244_20: - movl 172(%esp), %eax # 4-byte Reload - movl %eax, 36(%edx) - jne .LBB244_22 -# BB#21: - movl 120(%esp), %ebp # 4-byte Reload -.LBB244_22: - movl %ebp, 40(%edx) - movl 132(%esp), %eax # 4-byte Reload - jne .LBB244_24 -# BB#23: - movl 136(%esp), %ebx # 4-byte Reload -.LBB244_24: - movl %ebx, 44(%edx) - jne .LBB244_26 -# BB#25: - movl 140(%esp), %edi # 4-byte Reload -.LBB244_26: - movl %edi, 48(%edx) - jne .LBB244_28 -# BB#27: - movl 160(%esp), %eax # 4-byte Reload -.LBB244_28: - movl %eax, 52(%edx) - jne .LBB244_30 -# BB#29: - movl 164(%esp), %esi # 4-byte Reload -.LBB244_30: - movl %esi, 56(%edx) - jne .LBB244_32 -# BB#31: - movl 180(%esp), %ecx # 4-byte Reload -.LBB244_32: - movl %ecx, 60(%edx) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end244: - .size mcl_fp_montRed16Lbmi2, .Lfunc_end244-mcl_fp_montRed16Lbmi2 - - .globl mcl_fp_addPre16Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre16Lbmi2,@function -mcl_fp_addPre16Lbmi2: # @mcl_fp_addPre16Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl 56(%eax), %ebx - movl %esi, 48(%edi) - movl 56(%ecx), %esi - adcl %ebx, %esi - movl %edx, 52(%edi) - movl %esi, 56(%edi) - movl 60(%eax), %eax - movl 60(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 60(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end245: - .size mcl_fp_addPre16Lbmi2, .Lfunc_end245-mcl_fp_addPre16Lbmi2 - - .globl mcl_fp_subPre16Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre16Lbmi2,@function -mcl_fp_subPre16Lbmi2: # @mcl_fp_subPre16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl 56(%edx), %ebp - movl %edi, 48(%ebx) - movl 56(%ecx), %edi - sbbl %ebp, %edi - movl %esi, 52(%ebx) - movl %edi, 56(%ebx) - movl 60(%edx), %edx - movl 60(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 60(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end246: - .size mcl_fp_subPre16Lbmi2, .Lfunc_end246-mcl_fp_subPre16Lbmi2 - - .globl mcl_fp_shr1_16Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_16Lbmi2,@function -mcl_fp_shr1_16Lbmi2: # @mcl_fp_shr1_16Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 52(%ecx) - movl 60(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 56(%ecx) - shrl %eax - movl %eax, 60(%ecx) - popl %esi - retl -.Lfunc_end247: - .size mcl_fp_shr1_16Lbmi2, .Lfunc_end247-mcl_fp_shr1_16Lbmi2 - - .globl mcl_fp_add16Lbmi2 - .align 16, 0x90 - .type mcl_fp_add16Lbmi2,@function -mcl_fp_add16Lbmi2: # @mcl_fp_add16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %edx - movl (%edx), %esi - movl 4(%edx), %ebp - movl 80(%esp), %ecx - addl (%ecx), %esi - movl %esi, %ebx - adcl 4(%ecx), %ebp - movl 8(%edx), %eax - adcl 8(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 12(%ecx), %esi - movl 16(%ecx), %edi - adcl 12(%edx), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 20(%ecx), %eax - adcl 20(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%ecx), %eax - adcl 24(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%ecx), %eax - adcl 28(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%ecx), %eax - adcl 32(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%ecx), %eax - adcl 36(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 40(%ecx), %eax - adcl 40(%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%ecx), %eax - adcl 44(%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 48(%ecx), %eax - adcl 48(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%ecx), %eax - adcl 52(%edx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 56(%ecx), %esi - adcl 56(%edx), %esi - movl 60(%ecx), %ecx - adcl 60(%edx), %ecx - movl 76(%esp), %edx - movl %ebx, (%edx) - movl %ebx, %eax - movl %ebp, 4(%edx) - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%edx) - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edx) - movl %edi, 16(%edx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%edx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%edx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%edx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%edx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%edx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%edx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 48(%edx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 52(%edx) - movl %esi, 56(%edx) - movl %ecx, 60(%edx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 88(%esp), %edi - subl (%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 4(%edi), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, %ebp - sbbl 56(%edi), %esi - sbbl 60(%edi), %ecx - sbbl $0, %ebx - testb $1, %bl - jne .LBB248_2 -# BB#1: # %nocarry - movl 4(%esp), %edi # 4-byte Reload - movl %edi, (%edx) - movl (%esp), %edi # 4-byte Reload - movl %edi, 4(%edx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 8(%edx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 12(%edx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 16(%edx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%edx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%edx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%edx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%edx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%edx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%edx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 48(%edx) - movl %ebp, 52(%edx) - movl %esi, 56(%edx) - movl %ecx, 60(%edx) -.LBB248_2: # %carry - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end248: - .size mcl_fp_add16Lbmi2, .Lfunc_end248-mcl_fp_add16Lbmi2 - - .globl mcl_fp_addNF16Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF16Lbmi2,@function -mcl_fp_addNF16Lbmi2: # @mcl_fp_addNF16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - movl 152(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 148(%esp), %esi - addl (%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 4(%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%edx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 56(%edx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 48(%edx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%edx), %edi - movl 40(%edx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 36(%edx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl 20(%edx), %ebp - movl 16(%edx), %ebx - movl 12(%edx), %ecx - movl 8(%edx), %edx - adcl 8(%esi), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 12(%esi), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 16(%esi), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - adcl 20(%esi), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 44(%esi), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 52(%esi), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 56(%esi), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 60(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 80(%esp), %esi # 4-byte Reload - subl (%edi), %esi - movl 84(%esp), %eax # 4-byte Reload - sbbl 4(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 8(%edi), %edx - movl %edx, 4(%esp) # 4-byte Spill - sbbl 12(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 20(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - sbbl 24(%edi), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 44(%edi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - movl %ecx, %ebx - sbbl 56(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 60(%edi), %ebx - movl 80(%esp), %edi # 4-byte Reload - movl %ebx, 56(%esp) # 4-byte Spill - testl %ebx, %ebx - js .LBB249_2 -# BB#1: - movl %esi, %edi -.LBB249_2: - movl 144(%esp), %ebx - movl %edi, (%ebx) - movl 84(%esp), %edx # 4-byte Reload - js .LBB249_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload -.LBB249_4: - movl %edx, 4(%ebx) - movl 68(%esp), %edx # 4-byte Reload - movl 60(%esp), %eax # 4-byte Reload - js .LBB249_6 -# BB#5: - movl 4(%esp), %eax # 4-byte Reload -.LBB249_6: - movl %eax, 8(%ebx) - movl 100(%esp), %eax # 4-byte Reload - movl 88(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - js .LBB249_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB249_8: - movl %esi, 12(%ebx) - movl 108(%esp), %esi # 4-byte Reload - js .LBB249_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB249_10: - movl %edx, 16(%ebx) - movl 112(%esp), %edi # 4-byte Reload - movl 104(%esp), %ebp # 4-byte Reload - js .LBB249_12 -# BB#11: - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 72(%esp) # 4-byte Spill -.LBB249_12: - movl 72(%esp), %edx # 4-byte Reload - movl %edx, 20(%ebx) - js .LBB249_14 -# BB#13: - movl 20(%esp), %ecx # 4-byte Reload -.LBB249_14: - movl %ecx, 24(%ebx) - js .LBB249_16 -# BB#15: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB249_16: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%ebx) - js .LBB249_18 -# BB#17: - movl 28(%esp), %eax # 4-byte Reload -.LBB249_18: - movl %eax, 32(%ebx) - movl 96(%esp), %ecx # 4-byte Reload - js .LBB249_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 120(%esp) # 4-byte Spill -.LBB249_20: - movl 120(%esp), %eax # 4-byte Reload - movl %eax, 36(%ebx) - js .LBB249_22 -# BB#21: - movl 36(%esp), %ebp # 4-byte Reload -.LBB249_22: - movl %ebp, 40(%ebx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB249_24 -# BB#23: - movl 40(%esp), %eax # 4-byte Reload -.LBB249_24: - movl %eax, 44(%ebx) - movl 92(%esp), %eax # 4-byte Reload - js .LBB249_26 -# BB#25: - movl 44(%esp), %esi # 4-byte Reload -.LBB249_26: - movl %esi, 48(%ebx) - js .LBB249_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB249_28: - movl %eax, 52(%ebx) - js .LBB249_30 -# BB#29: - movl 52(%esp), %ecx # 4-byte Reload -.LBB249_30: - movl %ecx, 56(%ebx) - js .LBB249_32 -# BB#31: - movl 56(%esp), %edi # 4-byte Reload -.LBB249_32: - movl %edi, 60(%ebx) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end249: - .size mcl_fp_addNF16Lbmi2, .Lfunc_end249-mcl_fp_addNF16Lbmi2 - - .globl mcl_fp_sub16Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub16Lbmi2,@function -mcl_fp_sub16Lbmi2: # @mcl_fp_sub16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 88(%esp), %edi - subl (%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 44(%esi), %edx - sbbl 44(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 48(%esi), %ecx - sbbl 48(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 52(%esi), %eax - sbbl 52(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 56(%esi), %ebp - sbbl 56(%edi), %ebp - movl 60(%esi), %esi - sbbl 60(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 80(%esp), %ebx - movl 52(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 56(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 40(%ebx) - movl %edx, 44(%ebx) - movl %ecx, 48(%ebx) - movl %eax, 52(%ebx) - movl %ebp, 56(%ebx) - movl %esi, 60(%ebx) - je .LBB250_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 92(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 44(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl 52(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%ebx) - movl %eax, 52(%ebx) - movl 56(%esi), %eax - adcl %ebp, %eax - movl %eax, 56(%ebx) - movl 60(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ebx) -.LBB250_2: # %nocarry - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end250: - .size mcl_fp_sub16Lbmi2, .Lfunc_end250-mcl_fp_sub16Lbmi2 - - .globl mcl_fp_subNF16Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF16Lbmi2,@function -mcl_fp_subNF16Lbmi2: # @mcl_fp_subNF16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl 128(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 132(%esp), %edi - subl (%edi), %esi - movl %esi, 64(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 36(%ecx), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 44(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 36(%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 56(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 60(%edi), %eax - movl %eax, 80(%esp) # 4-byte Spill - sarl $31, %eax - movl 136(%esp), %esi - movl 60(%esi), %ecx - andl %eax, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esi), %ecx - andl %eax, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esi), %ecx - andl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 48(%esi), %ecx - andl %eax, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%esi), %ecx - andl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 40(%esi), %ecx - andl %eax, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 36(%esi), %ecx - andl %eax, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 32(%esi), %ecx - andl %eax, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 28(%esi), %ecx - andl %eax, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%esi), %ecx - andl %eax, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 20(%esi), %ebp - andl %eax, %ebp - movl 16(%esi), %ebx - andl %eax, %ebx - movl 12(%esi), %edi - andl %eax, %edi - movl 8(%esi), %edx - andl %eax, %edx - movl 4(%esi), %ecx - andl %eax, %ecx - andl (%esi), %eax - addl 64(%esp), %eax # 4-byte Folded Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl 124(%esp), %esi - movl %eax, (%esi) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %ecx, 4(%esi) - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edx, 8(%esi) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %edi, 12(%esi) - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 16(%esi) - movl (%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ebp, 20(%esi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %eax, 24(%esi) - movl 8(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%esi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%esi) - movl 16(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%esi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - movl 24(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl 28(%esp), %ecx # 4-byte Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl 36(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl %eax, 56(%esi) - movl 60(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esi) - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end251: - .size mcl_fp_subNF16Lbmi2, .Lfunc_end251-mcl_fp_subNF16Lbmi2 - - .globl mcl_fpDbl_add16Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add16Lbmi2,@function -mcl_fpDbl_add16Lbmi2: # @mcl_fpDbl_add16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 144(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 140(%esp), %ebx - addl (%ebx), %esi - adcl 4(%ebx), %edx - movl 8(%ecx), %edi - adcl 8(%ebx), %edi - movl 12(%ebx), %ebp - movl 136(%esp), %eax - movl %esi, (%eax) - movl 16(%ebx), %esi - adcl 12(%ecx), %ebp - adcl 16(%ecx), %esi - movl %edx, 4(%eax) - movl 72(%ecx), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl %edi, 8(%eax) - movl 20(%ecx), %edx - movl %ebp, 12(%eax) - movl 20(%ebx), %edi - adcl %edx, %edi - movl 24(%ecx), %edx - movl %esi, 16(%eax) - movl 24(%ebx), %esi - adcl %edx, %esi - movl 28(%ecx), %edx - movl %edi, 20(%eax) - movl 28(%ebx), %edi - adcl %edx, %edi - movl 32(%ecx), %edx - movl %esi, 24(%eax) - movl 32(%ebx), %esi - adcl %edx, %esi - movl 36(%ecx), %edx - movl %edi, 28(%eax) - movl 36(%ebx), %edi - adcl %edx, %edi - movl 40(%ecx), %edx - movl %esi, 32(%eax) - movl 40(%ebx), %esi - adcl %edx, %esi - movl 44(%ecx), %edx - movl %edi, 36(%eax) - movl 44(%ebx), %edi - adcl %edx, %edi - movl 48(%ecx), %edx - movl %esi, 40(%eax) - movl 48(%ebx), %esi - adcl %edx, %esi - movl 52(%ecx), %edx - movl %edi, 44(%eax) - movl 52(%ebx), %edi - adcl %edx, %edi - movl 56(%ecx), %edx - movl %esi, 48(%eax) - movl 56(%ebx), %esi - adcl %edx, %esi - movl 60(%ecx), %edx - movl %edi, 52(%eax) - movl 60(%ebx), %ebp - adcl %edx, %ebp - movl 64(%ecx), %edx - movl %esi, 56(%eax) - movl 64(%ebx), %esi - adcl %edx, %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl %ebp, 60(%eax) - movl 68(%ebx), %eax - adcl %edx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 72(%ebx), %eax - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%ecx), %ebp - movl 76(%ebx), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%ecx), %ebp - movl 80(%ebx), %eax - adcl %ebp, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 84(%ecx), %ebp - movl 84(%ebx), %eax - adcl %ebp, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 88(%ecx), %ebp - movl 88(%ebx), %eax - adcl %ebp, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%ecx), %ebp - movl 92(%ebx), %eax - adcl %ebp, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%ecx), %ebp - movl 96(%ebx), %eax - adcl %ebp, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 100(%ecx), %ebp - movl 100(%ebx), %edx - adcl %ebp, %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 104(%ecx), %ebp - movl 104(%ebx), %edx - adcl %ebp, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%ecx), %ebp - movl 108(%ebx), %edx - adcl %ebp, %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 112(%ecx), %edx - movl 112(%ebx), %ebp - adcl %edx, %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 116(%ecx), %edx - movl 116(%ebx), %esi - adcl %edx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 120(%ecx), %edx - movl 120(%ebx), %edi - adcl %edx, %edi - movl 124(%ecx), %ecx - movl 124(%ebx), %esi - adcl %ecx, %esi - sbbl %ecx, %ecx - andl $1, %ecx - movl 148(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - subl (%edx), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - sbbl 4(%edx), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - sbbl 8(%edx), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 12(%edx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - sbbl 16(%edx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 20(%edx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 108(%esp), %ebx # 4-byte Reload - sbbl 24(%edx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 28(%edx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl %eax, %ebx - sbbl 32(%edx), %ebx - movl 112(%esp), %eax # 4-byte Reload - sbbl 36(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 40(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 44(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl 48(%edx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - movl %eax, %ebp - sbbl 52(%edx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %edi, %ebp - sbbl 56(%edx), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl %esi, %ebp - sbbl 60(%edx), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - sbbl $0, %ecx - andl $1, %ecx - jne .LBB252_2 -# BB#1: - movl %ebx, 64(%esp) # 4-byte Spill -.LBB252_2: - testb %cl, %cl - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB252_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB252_4: - movl 136(%esp), %ebx - movl %ecx, 64(%ebx) - movl %esi, %ebp - movl %edi, 72(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - movl 92(%esp), %ecx # 4-byte Reload - movl 88(%esp), %edx # 4-byte Reload - movl 76(%esp), %esi # 4-byte Reload - jne .LBB252_6 -# BB#5: - movl 4(%esp), %esi # 4-byte Reload -.LBB252_6: - movl %esi, 68(%ebx) - movl 84(%esp), %esi # 4-byte Reload - movl 80(%esp), %eax # 4-byte Reload - jne .LBB252_8 -# BB#7: - movl 8(%esp), %eax # 4-byte Reload -.LBB252_8: - movl %eax, 72(%ebx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB252_10 -# BB#9: - movl 12(%esp), %esi # 4-byte Reload -.LBB252_10: - movl %esi, 76(%ebx) - jne .LBB252_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload - movl %esi, 104(%esp) # 4-byte Spill -.LBB252_12: - movl 104(%esp), %esi # 4-byte Reload - movl %esi, 80(%ebx) - jne .LBB252_14 -# BB#13: - movl 20(%esp), %edx # 4-byte Reload -.LBB252_14: - movl %edx, 84(%ebx) - jne .LBB252_16 -# BB#15: - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 108(%esp) # 4-byte Spill -.LBB252_16: - movl 108(%esp), %edx # 4-byte Reload - movl %edx, 88(%ebx) - jne .LBB252_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload -.LBB252_18: - movl %ecx, 92(%ebx) - movl 64(%esp), %ecx # 4-byte Reload - movl %ecx, 96(%ebx) - jne .LBB252_20 -# BB#19: - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 112(%esp) # 4-byte Spill -.LBB252_20: - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%ebx) - jne .LBB252_22 -# BB#21: - movl 36(%esp), %edi # 4-byte Reload -.LBB252_22: - movl %edi, 104(%ebx) - movl 100(%esp), %ecx # 4-byte Reload - jne .LBB252_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB252_24: - movl %ecx, 108(%ebx) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB252_26 -# BB#25: - movl 44(%esp), %eax # 4-byte Reload -.LBB252_26: - movl %eax, 112(%ebx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB252_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB252_28: - movl %eax, 116(%ebx) - jne .LBB252_30 -# BB#29: - movl 52(%esp), %ecx # 4-byte Reload -.LBB252_30: - movl %ecx, 120(%ebx) - jne .LBB252_32 -# BB#31: - movl 56(%esp), %ebp # 4-byte Reload -.LBB252_32: - movl %ebp, 124(%ebx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end252: - .size mcl_fpDbl_add16Lbmi2, .Lfunc_end252-mcl_fpDbl_add16Lbmi2 - - .globl mcl_fpDbl_sub16Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub16Lbmi2,@function -mcl_fpDbl_sub16Lbmi2: # @mcl_fpDbl_sub16Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - movl 132(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 136(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%eax), %ebx - sbbl 8(%edx), %ebx - movl 128(%esp), %ecx - movl %esi, (%ecx) - movl 12(%eax), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ecx) - movl 20(%edx), %ebx - movl %esi, 12(%ecx) - movl 20(%eax), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ecx) - movl 24(%eax), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ecx) - movl 28(%eax), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ecx) - movl 32(%eax), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ecx) - movl 36(%eax), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ecx) - movl 40(%eax), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ecx) - movl 44(%eax), %esi - sbbl %ebx, %esi - movl 48(%edx), %ebx - movl %edi, 40(%ecx) - movl 48(%eax), %edi - sbbl %ebx, %edi - movl 52(%edx), %ebx - movl %esi, 44(%ecx) - movl 52(%eax), %esi - sbbl %ebx, %esi - movl 56(%edx), %ebx - movl %edi, 48(%ecx) - movl 56(%eax), %edi - sbbl %ebx, %edi - movl 60(%edx), %ebx - movl %esi, 52(%ecx) - movl 60(%eax), %esi - sbbl %ebx, %esi - movl 64(%edx), %ebx - movl %edi, 56(%ecx) - movl 64(%eax), %edi - sbbl %ebx, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 68(%edx), %edi - movl %esi, 60(%ecx) - movl 68(%eax), %esi - sbbl %edi, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 72(%edx), %esi - movl 72(%eax), %edi - sbbl %esi, %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 76(%edx), %esi - movl 76(%eax), %edi - sbbl %esi, %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 80(%edx), %esi - movl 80(%eax), %edi - sbbl %esi, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 84(%edx), %esi - movl 84(%eax), %edi - sbbl %esi, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 88(%edx), %esi - movl 88(%eax), %edi - sbbl %esi, %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 92(%edx), %esi - movl 92(%eax), %edi - sbbl %esi, %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 96(%edx), %esi - movl 96(%eax), %edi - sbbl %esi, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%edx), %esi - movl 100(%eax), %edi - sbbl %esi, %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 104(%edx), %esi - movl 104(%eax), %edi - sbbl %esi, %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 108(%edx), %esi - movl 108(%eax), %edi - sbbl %esi, %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 112(%edx), %esi - movl 112(%eax), %edi - sbbl %esi, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%edx), %esi - movl 116(%eax), %edi - sbbl %esi, %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 120(%edx), %esi - movl 120(%eax), %edi - sbbl %esi, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 124(%edx), %edx - movl 124(%eax), %eax - sbbl %edx, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 140(%esp), %ebx - jne .LBB253_1 -# BB#2: - movl $0, 68(%esp) # 4-byte Folded Spill - jmp .LBB253_3 -.LBB253_1: - movl 60(%ebx), %edx - movl %edx, 68(%esp) # 4-byte Spill -.LBB253_3: - testb %al, %al - jne .LBB253_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, %ebp - jmp .LBB253_6 -.LBB253_4: - movl (%ebx), %ebp - movl 4(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB253_6: - jne .LBB253_7 -# BB#8: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB253_9 -.LBB253_7: - movl 56(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill -.LBB253_9: - jne .LBB253_10 -# BB#11: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB253_12 -.LBB253_10: - movl 52(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB253_12: - jne .LBB253_13 -# BB#14: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB253_15 -.LBB253_13: - movl 48(%ebx), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB253_15: - jne .LBB253_16 -# BB#17: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB253_18 -.LBB253_16: - movl 44(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB253_18: - jne .LBB253_19 -# BB#20: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB253_21 -.LBB253_19: - movl 40(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB253_21: - jne .LBB253_22 -# BB#23: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB253_24 -.LBB253_22: - movl 36(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB253_24: - jne .LBB253_25 -# BB#26: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB253_27 -.LBB253_25: - movl 32(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB253_27: - jne .LBB253_28 -# BB#29: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB253_30 -.LBB253_28: - movl 28(%ebx), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB253_30: - jne .LBB253_31 -# BB#32: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB253_33 -.LBB253_31: - movl 24(%ebx), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB253_33: - jne .LBB253_34 -# BB#35: - movl $0, %esi - jmp .LBB253_36 -.LBB253_34: - movl 20(%ebx), %esi -.LBB253_36: - jne .LBB253_37 -# BB#38: - movl $0, %edx - jmp .LBB253_39 -.LBB253_37: - movl 16(%ebx), %edx -.LBB253_39: - jne .LBB253_40 -# BB#41: - movl $0, %edi - jmp .LBB253_42 -.LBB253_40: - movl 12(%ebx), %edi -.LBB253_42: - jne .LBB253_43 -# BB#44: - xorl %ebx, %ebx - jmp .LBB253_45 -.LBB253_43: - movl 8(%ebx), %ebx -.LBB253_45: - addl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - movl 24(%esp), %ebp # 4-byte Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 64(%ecx) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 68(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 72(%ecx) - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edi, 76(%ecx) - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %esi, 84(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 20(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl 32(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx # 4-byte Folded Reload - movl %eax, 112(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %edx, 116(%ecx) - movl %eax, 120(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 124(%ecx) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end253: - .size mcl_fpDbl_sub16Lbmi2, .Lfunc_end253-mcl_fpDbl_sub16Lbmi2 - - .align 16, 0x90 - .type .LmulPv544x32,@function -.LmulPv544x32: # @mulPv544x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl %edx, %eax - movl 80(%esp), %esi - movl %esi, %edx - mulxl 4(%eax), %edi, %ebx - movl %esi, %edx - mulxl (%eax), %ebp, %edx - movl %ebp, 56(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 8(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 12(%eax), %edx, %ebx - adcl %edi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 16(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 20(%eax), %edx, %ebx - adcl %edi, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 24(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 28(%eax), %edx, %ebx - adcl %edi, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 32(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 36(%eax), %edx, %ebx - adcl %edi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 40(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 44(%eax), %edx, %ebx - adcl %edi, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 48(%eax), %edx, %edi - adcl %ebx, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %esi, %edx - mulxl 52(%eax), %ebx, %ebp - adcl %edi, %ebx - movl %esi, %edx - mulxl 56(%eax), %edi, %edx - movl %edx, (%esp) # 4-byte Spill - adcl %ebp, %edi - movl %esi, %edx - mulxl 60(%eax), %edx, %ebp - movl %ebp, 4(%esp) # 4-byte Spill - adcl (%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, (%ecx) - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ecx) - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%ecx) - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%ecx) - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%ecx) - movl 36(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%ecx) - movl 32(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%ecx) - movl 28(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%ecx) - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%ecx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%ecx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%ecx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%ecx) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%ecx) - movl %ebx, 52(%ecx) - movl %edi, 56(%ecx) - movl %edx, 60(%ecx) - movl %esi, %edx - mulxl 64(%eax), %eax, %edx - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - adcl $0, %edx - movl %edx, 68(%ecx) - movl %ecx, %eax - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end254: - .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 - - .globl mcl_fp_mulUnitPre17Lbmi2 - .align 16, 0x90 - .type mcl_fp_mulUnitPre17Lbmi2,@function -mcl_fp_mulUnitPre17Lbmi2: # @mcl_fp_mulUnitPre17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $140, %esp - calll .L255$pb -.L255$pb: - popl %ebx -.Ltmp56: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx - movl 168(%esp), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 164(%esp), %edx - calll .LmulPv544x32 - movl 132(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 84(%esp), %ebp - movl 80(%esp), %ebx - movl 76(%esp), %edi - movl 72(%esp), %esi - movl 64(%esp), %edx - movl 68(%esp), %ecx - movl 160(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 68(%eax) - addl $140, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end255: - .size mcl_fp_mulUnitPre17Lbmi2, .Lfunc_end255-mcl_fp_mulUnitPre17Lbmi2 - - .globl mcl_fpDbl_mulPre17Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_mulPre17Lbmi2,@function -mcl_fpDbl_mulPre17Lbmi2: # @mcl_fpDbl_mulPre17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L256$pb -.L256$pb: - popl %edi -.Ltmp57: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 1384(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl 1380(%esp), %edx - movl %edx, %esi - movl %edi, %ebx - calll .LmulPv544x32 - movl 1348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1344(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1320(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1316(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1308(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1304(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1300(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1296(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1292(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 1288(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 1280(%esp), %eax - movl 1284(%esp), %ebp - movl 1376(%esp), %ecx - movl %eax, (%ecx) - movl 1384(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv544x32 - addl 1208(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1272(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1260(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1256(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 1252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1248(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1244(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1232(%esp), %edi - movl 1228(%esp), %esi - movl 1224(%esp), %edx - movl 1220(%esp), %ecx - movl 1212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1216(%esp), %eax - movl 1376(%esp), %ebp - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - movl 12(%esp), %ebp # 4-byte Reload - adcl %ebp, 120(%esp) # 4-byte Folded Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 64(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 1136(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1196(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1192(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1164(%esp), %ebx - movl 1160(%esp), %edi - movl 1156(%esp), %esi - movl 1152(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1148(%esp), %edx - movl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1144(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1132(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1128(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1116(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1108(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1100(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1096(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1092(%esp), %ebx - movl 1088(%esp), %edi - movl 1084(%esp), %esi - movl 1080(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1076(%esp), %edx - movl 1068(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1072(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1020(%esp), %ebx - movl 1016(%esp), %edi - movl 1012(%esp), %esi - movl 1008(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1004(%esp), %edx - movl 996(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 920(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 972(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 968(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 964(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 956(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 952(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 948(%esp), %ebx - movl 944(%esp), %edi - movl 940(%esp), %esi - movl 936(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 932(%esp), %edx - movl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 928(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 848(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 916(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 912(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 908(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 904(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 900(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 892(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 888(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 876(%esp), %ebx - movl 872(%esp), %edi - movl 868(%esp), %esi - movl 864(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 860(%esp), %edx - movl 852(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 856(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 776(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 844(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 840(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 836(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 804(%esp), %ebx - movl 800(%esp), %edi - movl 796(%esp), %esi - movl 792(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 788(%esp), %edx - movl 780(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 732(%esp), %ebx - movl 728(%esp), %edi - movl 724(%esp), %esi - movl 720(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 716(%esp), %edx - movl 708(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 712(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 632(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 696(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl 636(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 640(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 588(%esp), %ebx - movl 584(%esp), %edi - movl 580(%esp), %esi - movl 576(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 488(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 516(%esp), %ebx - movl 512(%esp), %edi - movl 508(%esp), %esi - movl 504(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 500(%esp), %edx - movl 492(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 496(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 432(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 428(%esp), %edx - movl 420(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 424(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 344(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 372(%esp), %ebx - movl 368(%esp), %edi - movl 364(%esp), %esi - movl 360(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 356(%esp), %edx - movl 348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 352(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 1380(%esp), %eax - movl %eax, %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 320(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 316(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 300(%esp), %ebx - movl 296(%esp), %edi - movl 292(%esp), %edx - movl 288(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl 280(%esp), %ecx - movl 120(%esp), %esi # 4-byte Reload - movl 1376(%esp), %ebp - movl %esi, 56(%ebp) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %ecx - movl %ecx, %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 200(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 232(%esp), %edi - movl 228(%esp), %esi - movl 224(%esp), %edx - movl 220(%esp), %ecx - movl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl 204(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 208(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 1376(%esp), %ebx - movl %ebp, 60(%ebx) - movl 120(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 192(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 156(%esp), %ebx - movl 152(%esp), %edi - movl 148(%esp), %esi - movl 144(%esp), %edx - movl 140(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 64(%eax) - movl 64(%esp), %ebp # 4-byte Reload - movl %ebp, 68(%eax) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 72(%eax) - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%eax) - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %esi, 84(%eax) - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %edi, 88(%eax) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ebx, 92(%eax) - movl 32(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 68(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %ecx, 108(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 116(%esp), %ecx # 4-byte Folded Reload - movl %edx, 112(%eax) - movl 100(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 116(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %edx, 120(%eax) - movl %ecx, 124(%eax) - movl 112(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 128(%eax) - movl 124(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 132(%eax) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end256: - .size mcl_fpDbl_mulPre17Lbmi2, .Lfunc_end256-mcl_fpDbl_mulPre17Lbmi2 - - .globl mcl_fpDbl_sqrPre17Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sqrPre17Lbmi2,@function -mcl_fpDbl_sqrPre17Lbmi2: # @mcl_fpDbl_sqrPre17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L257$pb -.L257$pb: - popl %ebx -.Ltmp58: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx - movl %ebx, 124(%esp) # 4-byte Spill - movl 1380(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv544x32 - movl 1348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1344(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1320(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1316(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1308(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1304(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1300(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1296(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1292(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 1288(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 1280(%esp), %eax - movl 1284(%esp), %ebp - movl 1376(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl %esi, %ebx - calll .LmulPv544x32 - addl 1208(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1272(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1260(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1256(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 1252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1248(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1244(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1232(%esp), %edi - movl 1228(%esp), %esi - movl 1224(%esp), %edx - movl 1220(%esp), %ecx - movl 1212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1216(%esp), %eax - movl 1376(%esp), %ebp - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - movl 12(%esp), %ebp # 4-byte Reload - adcl %ebp, 120(%esp) # 4-byte Folded Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 64(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 1136(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1196(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1192(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1164(%esp), %ebx - movl 1160(%esp), %edi - movl 1156(%esp), %esi - movl 1152(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1148(%esp), %edx - movl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1144(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1132(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1128(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1116(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1108(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1100(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1096(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1092(%esp), %ebx - movl 1088(%esp), %edi - movl 1084(%esp), %esi - movl 1080(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1076(%esp), %edx - movl 1068(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1072(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1020(%esp), %ebx - movl 1016(%esp), %edi - movl 1012(%esp), %esi - movl 1008(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1004(%esp), %edx - movl 996(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 920(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 972(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 968(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 964(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 956(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 952(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 948(%esp), %ebx - movl 944(%esp), %edi - movl 940(%esp), %esi - movl 936(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 932(%esp), %edx - movl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 928(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 848(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 916(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 912(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 908(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 904(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 900(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 892(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 888(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 876(%esp), %ebx - movl 872(%esp), %edi - movl 868(%esp), %esi - movl 864(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 860(%esp), %edx - movl 852(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 856(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 776(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 844(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 840(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 836(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 804(%esp), %ebx - movl 800(%esp), %edi - movl 796(%esp), %esi - movl 792(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 788(%esp), %edx - movl 780(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 732(%esp), %ebx - movl 728(%esp), %edi - movl 724(%esp), %esi - movl 720(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 716(%esp), %edx - movl 708(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 712(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 632(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 696(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl 636(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 640(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 588(%esp), %ebx - movl 584(%esp), %edi - movl 580(%esp), %esi - movl 576(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 488(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 516(%esp), %ebx - movl 512(%esp), %edi - movl 508(%esp), %esi - movl 504(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 500(%esp), %edx - movl 492(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 496(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 432(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 428(%esp), %edx - movl 420(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 424(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 52(%edx), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 344(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 372(%esp), %ebx - movl 368(%esp), %edi - movl 364(%esp), %esi - movl 360(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 356(%esp), %edx - movl 348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 352(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 56(%edx), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 320(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 316(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 300(%esp), %ebx - movl 296(%esp), %edi - movl 292(%esp), %edx - movl 288(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl 280(%esp), %ecx - movl 120(%esp), %esi # 4-byte Reload - movl 1376(%esp), %ebp - movl %esi, 56(%ebp) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 60(%edx), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 200(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 232(%esp), %edi - movl 228(%esp), %esi - movl 224(%esp), %edx - movl 220(%esp), %ecx - movl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl 204(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 208(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 1376(%esp), %ebx - movl %ebp, 60(%ebx) - movl 120(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 64(%edx), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 156(%esp), %ebx - movl 152(%esp), %edi - movl 148(%esp), %esi - movl 144(%esp), %edx - movl 140(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 64(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 68(%eax) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 72(%eax) - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%eax) - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %esi, 84(%eax) - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %edi, 88(%eax) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ebx, 92(%eax) - movl 32(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 64(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %ecx, 108(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 112(%esp), %ecx # 4-byte Folded Reload - movl %edx, 112(%eax) - movl 100(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 116(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %edx, 120(%eax) - movl %ecx, 124(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 128(%eax) - movl 124(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 132(%eax) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end257: - .size mcl_fpDbl_sqrPre17Lbmi2, .Lfunc_end257-mcl_fpDbl_sqrPre17Lbmi2 - - .globl mcl_fp_mont17Lbmi2 - .align 16, 0x90 - .type mcl_fp_mont17Lbmi2,@function -mcl_fp_mont17Lbmi2: # @mcl_fp_mont17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2588, %esp # imm = 0xA1C - calll .L258$pb -.L258$pb: - popl %ebx -.Ltmp59: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx - movl 2620(%esp), %eax - movl -4(%eax), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2512(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 2512(%esp), %ebp - movl 2516(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2580(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 2576(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 2572(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2568(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2564(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2560(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2556(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2552(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2548(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2544(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2540(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2536(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2532(%esp), %edi - movl 2528(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2524(%esp), %esi - movl 2520(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2440(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - addl 2440(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 2444(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 2452(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2456(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2460(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 2464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2472(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2480(%esp), %eax - movl %eax, %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 2484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2488(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2492(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2496(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2504(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2508(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 2616(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2368(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - andl $1, %ebp - movl 120(%esp), %ecx # 4-byte Reload - addl 2368(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 2372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2376(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2380(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2384(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 2392(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 2396(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2400(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 2404(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 2408(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 2412(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2416(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2420(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2424(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2428(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2432(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 2436(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2296(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 116(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 2296(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 2300(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 2304(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2308(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2312(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2316(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 2320(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2324(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2328(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2332(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 2336(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2340(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2344(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2348(%esp), %esi - movl 124(%esp), %ecx # 4-byte Reload - adcl 2352(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 2356(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 2360(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 2364(%esp), %ebp - adcl $0, %eax - movl %eax, %edi - movl 2616(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2224(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 2224(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2232(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 2272(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2280(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 2288(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl 2292(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2152(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 2152(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2196(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 2204(%esp), %ebp - movl 128(%esp), %edi # 4-byte Reload - adcl 2208(%esp), %edi - movl 132(%esp), %esi # 4-byte Reload - adcl 2212(%esp), %esi - movl 120(%esp), %eax # 4-byte Reload - adcl 2216(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2220(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 2080(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 2080(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 2084(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2088(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2092(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2096(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2100(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2104(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2108(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2112(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2116(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2120(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2124(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 2128(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl 2132(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl 2136(%esp), %esi - movl %esi, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2148(%esp), %esi - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2008(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl %ebp, %eax - andl $1, %eax - addl 2008(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 2012(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2016(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2020(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2024(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2028(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2032(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2036(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2040(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2044(%esp), %edi - movl 104(%esp), %ecx # 4-byte Reload - adcl 2048(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 2052(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 2056(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 2072(%esp), %ebp - adcl 2076(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1936(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 1936(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1956(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1968(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1972(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1976(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1980(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1984(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 1996(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2004(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1864(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1864(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1876(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1880(%esp), %edi - adcl 1884(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1892(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 1896(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1904(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1908(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1912(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 1916(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1792(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1792(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1820(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1836(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 1840(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1848(%esp), %edi - movl 100(%esp), %ebp # 4-byte Reload - adcl 1852(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1720(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %eax - movl 80(%esp), %ecx # 4-byte Reload - addl 1720(%esp), %ecx - movl 92(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1736(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1740(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1744(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1748(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1752(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1756(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1760(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1764(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1768(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1772(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl 1776(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl 1780(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1784(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 1788(%esp), %ebp - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1648(%esp), %ecx - movl 2612(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 92(%esp), %eax # 4-byte Reload - addl 1648(%esp), %eax - movl 76(%esp), %edi # 4-byte Reload - adcl 1652(%esp), %edi - movl 68(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1660(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1664(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1668(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1672(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1676(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1680(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1692(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1704(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1708(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 1712(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1576(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 1576(%esp), %ebp - adcl 1580(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1588(%esp), %ebp - movl 72(%esp), %edi # 4-byte Reload - adcl 1592(%esp), %edi - movl 84(%esp), %esi # 4-byte Reload - adcl 1596(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1504(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 1504(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1512(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 1516(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1520(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - adcl 1540(%esp), %edi - movl 132(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1568(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 76(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1432(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1460(%esp), %ebp - movl 124(%esp), %ecx # 4-byte Reload - adcl 1464(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl 1468(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1472(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %edi # 4-byte Reload - adcl 1476(%esp), %edi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1480(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1484(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1488(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1492(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1496(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1500(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1360(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1360(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1384(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 1400(%esp), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1408(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1288(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 68(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1288(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1312(%esp), %ebp - movl 124(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 1336(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 1340(%esp), %edi - movl 88(%esp), %esi # 4-byte Reload - adcl 1344(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1216(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1236(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 1240(%esp), %ebp - movl 128(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - adcl 1268(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl %edi, %eax - andl $1, %eax - addl 1144(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1152(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1160(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1168(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 1180(%esp), %esi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1184(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1188(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1192(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1196(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1204(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1212(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1072(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1080(%esp), %ebp - adcl 1084(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1112(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 72(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1000(%esp), %edi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1008(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1020(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 1028(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1032(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1036(%esp), %edi - adcl 1040(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1056(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 952(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 960(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 980(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 984(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 856(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 888(%esp), %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 896(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 912(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2616(%esp), %ecx - movl %ecx, %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 2612(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 784(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 812(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 820(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 828(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 712(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 728(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 736(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 756(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 104(%esp), %ecx # 4-byte Reload - addl 640(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 652(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 660(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 680(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 696(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 104(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 568(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 600(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 616(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 624(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 108(%esp), %ecx # 4-byte Reload - addl 496(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 540(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 544(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 108(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 424(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 440(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 472(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 480(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 124(%esp), %ecx # 4-byte Reload - addl 352(%esp), %ecx - movl 128(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 364(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 372(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 404(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 280(%esp), %ebp - movl 128(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 120(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 296(%esp), %ebp - adcl 300(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 128(%esp), %ecx # 4-byte Reload - addl 208(%esp), %ecx - adcl 212(%esp), %esi - movl %esi, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 220(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 224(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - andl $1, %edi - addl 136(%esp), %esi - movl 116(%esp), %edx # 4-byte Reload - movl 132(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 148(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 168(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 176(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 184(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 188(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 192(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 196(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 200(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 204(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, %edi - movl 132(%esp), %ecx # 4-byte Reload - movl 2620(%esp), %ebx - subl (%ebx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - sbbl 4(%ebx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 8(%ebx), %edx - movl %edx, 20(%esp) # 4-byte Spill - sbbl 12(%ebx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - movl %eax, %edx - sbbl 16(%ebx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - sbbl 20(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 24(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 28(%ebx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 32(%ebx), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 36(%ebx), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 40(%ebx), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 44(%ebx), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%ebx), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - sbbl 52(%ebx), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - sbbl 56(%ebx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 108(%esp), %ebx # 4-byte Reload - sbbl 60(%ebp), %ebx - movl 124(%esp), %esi # 4-byte Reload - sbbl 64(%ebp), %esi - movl %esi, %ebp - sbbl $0, %edi - andl $1, %edi - jne .LBB258_2 -# BB#1: - movl %ebx, 108(%esp) # 4-byte Spill -.LBB258_2: - movl %edi, %ebx - testb %bl, %bl - movl 132(%esp), %ebx # 4-byte Reload - jne .LBB258_4 -# BB#3: - movl 12(%esp), %ebx # 4-byte Reload -.LBB258_4: - movl 2608(%esp), %eax - movl %ebx, (%eax) - movl 120(%esp), %ebx # 4-byte Reload - jne .LBB258_6 -# BB#5: - movl 16(%esp), %ebx # 4-byte Reload -.LBB258_6: - movl %ebx, 4(%eax) - jne .LBB258_8 -# BB#7: - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB258_8: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - jne .LBB258_10 -# BB#9: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%esp) # 4-byte Spill -.LBB258_10: - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 112(%esp), %esi # 4-byte Reload - jne .LBB258_12 -# BB#11: - movl 28(%esp), %esi # 4-byte Reload -.LBB258_12: - movl %esi, 16(%eax) - movl 80(%esp), %ecx # 4-byte Reload - jne .LBB258_14 -# BB#13: - movl 32(%esp), %edx # 4-byte Reload -.LBB258_14: - movl %edx, 20(%eax) - jne .LBB258_16 -# BB#15: - movl 36(%esp), %ecx # 4-byte Reload -.LBB258_16: - movl %ecx, 24(%eax) - movl 92(%esp), %ecx # 4-byte Reload - jne .LBB258_18 -# BB#17: - movl 40(%esp), %ecx # 4-byte Reload -.LBB258_18: - movl %ecx, 28(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB258_20 -# BB#19: - movl 44(%esp), %ecx # 4-byte Reload -.LBB258_20: - movl %ecx, 32(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB258_22 -# BB#21: - movl 48(%esp), %ecx # 4-byte Reload -.LBB258_22: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB258_24 -# BB#23: - movl 52(%esp), %ecx # 4-byte Reload -.LBB258_24: - movl %ecx, 40(%eax) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB258_26 -# BB#25: - movl 56(%esp), %ecx # 4-byte Reload -.LBB258_26: - movl %ecx, 44(%eax) - movl 84(%esp), %ecx # 4-byte Reload - jne .LBB258_28 -# BB#27: - movl 60(%esp), %ecx # 4-byte Reload -.LBB258_28: - movl %ecx, 48(%eax) - movl 96(%esp), %ecx # 4-byte Reload - jne .LBB258_30 -# BB#29: - movl 88(%esp), %ecx # 4-byte Reload -.LBB258_30: - movl %ecx, 52(%eax) - movl 104(%esp), %ecx # 4-byte Reload - jne .LBB258_32 -# BB#31: - movl 128(%esp), %ecx # 4-byte Reload -.LBB258_32: - movl %ecx, 56(%eax) - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 124(%esp), %ecx # 4-byte Reload - jne .LBB258_34 -# BB#33: - movl %ebp, %ecx -.LBB258_34: - movl %ecx, 64(%eax) - addl $2588, %esp # imm = 0xA1C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end258: - .size mcl_fp_mont17Lbmi2, .Lfunc_end258-mcl_fp_mont17Lbmi2 - - .globl mcl_fp_montNF17Lbmi2 - .align 16, 0x90 - .type mcl_fp_montNF17Lbmi2,@function -mcl_fp_montNF17Lbmi2: # @mcl_fp_montNF17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2572, %esp # imm = 0xA0C - calll .L259$pb -.L259$pb: - popl %ebx -.Ltmp60: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx - movl 2604(%esp), %eax - movl -4(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2496(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2496(%esp), %edi - movl 2500(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 2564(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2560(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2556(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2552(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2548(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2544(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2540(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2536(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2532(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2528(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 2524(%esp), %ebp - movl 2520(%esp), %esi - movl 2516(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2512(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2508(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2504(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2424(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 2424(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2428(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2432(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2436(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2440(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2444(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 2448(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 2452(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 2456(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 2460(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2464(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2468(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 2472(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 2476(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2480(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2484(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2352(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2420(%esp), %ecx - movl 112(%esp), %edx # 4-byte Reload - addl 2352(%esp), %edx - movl 92(%esp), %eax # 4-byte Reload - adcl 2356(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2360(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2372(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2376(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 2380(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2384(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2388(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2392(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 2396(%esp), %esi - movl %esi, %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 2400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2404(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2408(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2416(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2280(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 2280(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2288(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2292(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2296(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 2316(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 2324(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2332(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 2340(%esp), %ebp - movl 116(%esp), %edi # 4-byte Reload - adcl 2344(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2348(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2208(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2276(%esp), %eax - movl 92(%esp), %edx # 4-byte Reload - addl 2208(%esp), %edx - movl 104(%esp), %ecx # 4-byte Reload - adcl 2212(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2216(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2220(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2224(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2228(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 2232(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 2236(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 2240(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2244(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2248(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2252(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2256(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 2260(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 2264(%esp), %ebp - adcl 2268(%esp), %edi - movl %edi, %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 2272(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2136(%esp), %ecx - movl 2604(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - addl 2136(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2164(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 2188(%esp), %edi - adcl 2192(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl 2196(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 2200(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 2064(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2132(%esp), %eax - movl 104(%esp), %edx # 4-byte Reload - addl 2064(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2072(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2076(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2080(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 2084(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 2088(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 2092(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2096(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2100(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2104(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2108(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 2112(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 2116(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 2120(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 2124(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2128(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1992(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1992(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1996(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2004(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2008(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 2016(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2020(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2028(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2032(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2036(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 2040(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2044(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 2048(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2052(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 2056(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 2060(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1920(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1988(%esp), %eax - movl 76(%esp), %edx # 4-byte Reload - addl 1920(%esp), %edx - movl 84(%esp), %ecx # 4-byte Reload - adcl 1924(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1928(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1932(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1936(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 1940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1944(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1948(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1952(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1956(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1960(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1964(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1968(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1972(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1976(%esp), %esi - adcl 1980(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1984(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1848(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1848(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1864(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1876(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1880(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1884(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1892(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1896(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1904(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1908(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1912(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1916(%esp), %eax - movl %eax, %edi - movl 2600(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1776(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1844(%esp), %eax - movl 84(%esp), %edx # 4-byte Reload - addl 1776(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - adcl 1780(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1784(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1788(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1792(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1796(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1800(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1804(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1808(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1812(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1816(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1820(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1824(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1828(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 1832(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1836(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1840(%esp), %edi - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1704(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1704(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1712(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1736(%esp), %esi - movl %esi, %ebp - movl 96(%esp), %esi # 4-byte Reload - adcl 1740(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1744(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1768(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1632(%esp), %ecx - movl 2596(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 1700(%esp), %eax - movl 80(%esp), %edx # 4-byte Reload - addl 1632(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 1636(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1640(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1644(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1648(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1652(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1660(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl 1664(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1668(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1672(%esp), %esi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1676(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1680(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1692(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1560(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1560(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1580(%esp), %edi - movl 64(%esp), %ebp # 4-byte Reload - adcl 1584(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1592(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1600(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1608(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1556(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 1488(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 1492(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1496(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 1500(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 1504(%esp), %edi - adcl 1508(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1512(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1516(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1520(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1524(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 1528(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl 1532(%esp), %esi - movl %esi, %ebp - movl 92(%esp), %edx # 4-byte Reload - adcl 1536(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1540(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1544(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1548(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1552(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1416(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1416(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1428(%esp), %esi - adcl 1432(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1436(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1460(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1484(%esp), %ebp - movl 2600(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1344(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1412(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 1344(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1360(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1368(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1372(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1376(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1380(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1384(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1388(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1392(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1396(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1400(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1408(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1272(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1288(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1304(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1324(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1268(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - addl 1200(%esp), %ecx - movl 68(%esp), %edx # 4-byte Reload - adcl 1204(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1208(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 1212(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1216(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1224(%esp), %esi - adcl 1228(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1232(%esp), %edi - movl 112(%esp), %edx # 4-byte Reload - adcl 1236(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1240(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1244(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1248(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1252(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1256(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 1260(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1264(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1128(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1128(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1152(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1160(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 1172(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1180(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1188(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 1056(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1124(%esp), %edx - movl 68(%esp), %eax # 4-byte Reload - addl 1056(%esp), %eax - movl 72(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1096(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1112(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1116(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 984(%esp), %esi - movl 72(%esp), %esi # 4-byte Reload - adcl 988(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 996(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1044(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 912(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 980(%esp), %eax - addl 912(%esp), %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 916(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 920(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 924(%esp), %edi - movl 108(%esp), %edx # 4-byte Reload - adcl 928(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 940(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 944(%esp), %ebp - movl 104(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 968(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 976(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 840(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 864(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 872(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 888(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 768(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 836(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 768(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 788(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 792(%esp), %edi - adcl 796(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 812(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 696(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - adcl 720(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 732(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 756(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 692(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - addl 624(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 636(%esp), %ebp - adcl 640(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 656(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 660(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 552(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 588(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 600(%esp), %ebp - movl 56(%esp), %edi # 4-byte Reload - adcl 604(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 548(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 524(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 528(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 408(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 420(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 432(%esp), %ebp - movl 76(%esp), %edi # 4-byte Reload - adcl 436(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 404(%esp), %edx - movl 108(%esp), %ecx # 4-byte Reload - addl 336(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 344(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 356(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 360(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 364(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 108(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 264(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 280(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 300(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 68(%esp), %ebp # 4-byte Reload - adcl 312(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 260(%esp), %edx - movl 100(%esp), %ecx # 4-byte Reload - addl 192(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 204(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - adcl 236(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 120(%esp), %esi - movl 92(%esp), %esi # 4-byte Reload - movl 116(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 128(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl 132(%esp), %esi - movl 104(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 156(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 184(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, %edx - movl 2604(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %ebp - movl %esi, %ebx - sbbl 8(%edi), %ebx - movl 104(%esp), %ecx # 4-byte Reload - sbbl 12(%edi), %ecx - movl 76(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 60(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 64(%edi), %eax - movl %eax, 92(%esp) # 4-byte Spill - sarl $31, %eax - testl %eax, %eax - movl 116(%esp), %edi # 4-byte Reload - js .LBB259_2 -# BB#1: - movl %edx, %edi -.LBB259_2: - movl 2592(%esp), %edx - movl %edi, (%edx) - movl 112(%esp), %edi # 4-byte Reload - js .LBB259_4 -# BB#3: - movl %ebp, %edi -.LBB259_4: - movl %edi, 4(%edx) - js .LBB259_6 -# BB#5: - movl %ebx, %esi -.LBB259_6: - movl %esi, 8(%edx) - movl 104(%esp), %esi # 4-byte Reload - js .LBB259_8 -# BB#7: - movl %ecx, %esi -.LBB259_8: - movl %esi, 12(%edx) - movl 76(%esp), %ecx # 4-byte Reload - js .LBB259_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload -.LBB259_10: - movl %ecx, 16(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB259_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB259_12: - movl %eax, 20(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB259_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB259_14: - movl %eax, 24(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB259_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB259_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB259_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB259_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB259_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB259_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB259_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB259_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB259_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB259_24: - movl %eax, 44(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB259_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB259_26: - movl %eax, 48(%edx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB259_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB259_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB259_30 -# BB#29: - movl 44(%esp), %eax # 4-byte Reload -.LBB259_30: - movl %eax, 56(%edx) - movl 108(%esp), %eax # 4-byte Reload - js .LBB259_32 -# BB#31: - movl 48(%esp), %eax # 4-byte Reload -.LBB259_32: - movl %eax, 60(%edx) - movl 100(%esp), %eax # 4-byte Reload - js .LBB259_34 -# BB#33: - movl 92(%esp), %eax # 4-byte Reload -.LBB259_34: - movl %eax, 64(%edx) - addl $2572, %esp # imm = 0xA0C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end259: - .size mcl_fp_montNF17Lbmi2, .Lfunc_end259-mcl_fp_montNF17Lbmi2 - - .globl mcl_fp_montRed17Lbmi2 - .align 16, 0x90 - .type mcl_fp_montRed17Lbmi2,@function -mcl_fp_montRed17Lbmi2: # @mcl_fp_montRed17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1436, %esp # imm = 0x59C - calll .L260$pb -.L260$pb: - popl %eax -.Ltmp61: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1464(%esp), %edx - movl -4(%edx), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 1460(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 80(%esp) # 4-byte Spill - imull %esi, %ebx - movl 132(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 128(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 124(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 108(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 180(%esp) # 4-byte Spill - movl 80(%ecx), %edi - movl %edi, 196(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 204(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 64(%ecx), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - movl 60(%ecx), %ebp - movl %ebp, 164(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %esi - movl 12(%ecx), %edi - movl 8(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 64(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1360(%esp), %ecx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 76(%esp), %eax # 4-byte Reload - addl 1360(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1372(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1376(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 1380(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl $0, 204(%esp) # 4-byte Folded Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - movl 128(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1288(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - andl $1, %edi - movl %edi, %ecx - addl 1288(%esp), %esi - movl 100(%esp), %edx # 4-byte Reload - adcl 1292(%esp), %edx - movl 72(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1324(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 1336(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - movl 184(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1216(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 1260(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - adcl 1264(%esp), %edi - movl 164(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 184(%esp) # 4-byte Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 144(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1144(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl 1188(%esp), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - movl 188(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1072(%esp), %esi - movl 80(%esp), %esi # 4-byte Reload - adcl 1076(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 172(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - movl 152(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1000(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 172(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 152(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 928(%esp), %esi - movl 88(%esp), %esi # 4-byte Reload - adcl 932(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 160(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 856(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl 924(%esp), %ebp - movl %ebp, 168(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 160(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 124(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - movl 96(%esp), %ebp # 4-byte Reload - imull %ebp, %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 784(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 124(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull %ebp, %eax - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 712(%esp), %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %ebp # 4-byte Reload - adcl 760(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 780(%esp), %edi - movl %edi, 156(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 640(%esp), %esi - movl 120(%esp), %ecx # 4-byte Reload - adcl 644(%esp), %ecx - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %edi # 4-byte Reload - adcl 672(%esp), %edi - movl 192(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 196(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 684(%esp), %ebp - movl %ebp, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1464(%esp), %eax - movl %eax, %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 568(%esp), %ebp - movl 140(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 136(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %ebp # 4-byte Reload - adcl 588(%esp), %ebp - movl 200(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl 596(%esp), %edi - movl %edi, 204(%esp) # 4-byte Spill - adcl 600(%esp), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 196(%esp), %esi # 4-byte Reload - adcl 604(%esp), %esi - movl 180(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 496(%esp), %edi - movl 136(%esp), %edi # 4-byte Reload - adcl 500(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl 512(%esp), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - movl 200(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 204(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl 528(%esp), %esi - movl %esi, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 100(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %edi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 424(%esp), %edi - movl 148(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl 164(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 176(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl 440(%esp), %ebp - movl 204(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 100(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 352(%esp), %esi - movl %edi, %ecx - adcl 356(%esp), %ecx - movl 176(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl 364(%esp), %ebp - movl %ebp, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %edi # 4-byte Reload - adcl 384(%esp), %edi - movl 188(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 100(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 280(%esp), %ebp - movl 176(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl 200(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl 308(%esp), %edi - movl %edi, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl 96(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 208(%esp), %ebp - movl 200(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %edx # 4-byte Reload - adcl 216(%esp), %edx - movl %edx, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl %eax, %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %ebx # 4-byte Reload - adcl 264(%esp), %ebx - movl %ebx, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 276(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 200(%esp), %edi # 4-byte Reload - subl 16(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %edx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - movl 196(%esp), %eax # 4-byte Reload - sbbl 12(%esp), %eax # 4-byte Folded Reload - sbbl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 88(%esp) # 4-byte Spill - movl 184(%esp), %esi # 4-byte Reload - sbbl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - movl 188(%esp), %esi # 4-byte Reload - sbbl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - sbbl 32(%esp), %esi # 4-byte Folded Reload - movl 172(%esp), %ebp # 4-byte Reload - sbbl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - sbbl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 108(%esp) # 4-byte Spill - movl 160(%esp), %ebp # 4-byte Reload - sbbl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 112(%esp) # 4-byte Spill - movl 152(%esp), %ebp # 4-byte Reload - sbbl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 144(%esp), %ebp # 4-byte Reload - sbbl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 136(%esp) # 4-byte Spill - sbbl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 140(%esp) # 4-byte Spill - movl 132(%esp), %ebx # 4-byte Reload - sbbl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 148(%esp) # 4-byte Spill - movl 124(%esp), %ebx # 4-byte Reload - sbbl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 164(%esp) # 4-byte Spill - movl 116(%esp), %ebx # 4-byte Reload - sbbl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 176(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB260_2 -# BB#1: - movl %esi, 168(%esp) # 4-byte Spill -.LBB260_2: - testb %bl, %bl - movl 200(%esp), %esi # 4-byte Reload - jne .LBB260_4 -# BB#3: - movl %edi, %esi -.LBB260_4: - movl 1456(%esp), %edi - movl %esi, (%edi) - movl 156(%esp), %esi # 4-byte Reload - movl 204(%esp), %ebx # 4-byte Reload - jne .LBB260_6 -# BB#5: - movl %edx, %ebx -.LBB260_6: - movl %ebx, 4(%edi) - movl 144(%esp), %ebx # 4-byte Reload - movl 192(%esp), %edx # 4-byte Reload - jne .LBB260_8 -# BB#7: - movl %ecx, %edx -.LBB260_8: - movl %edx, 8(%edi) - movl 132(%esp), %edx # 4-byte Reload - movl 196(%esp), %ecx # 4-byte Reload - jne .LBB260_10 -# BB#9: - movl %eax, %ecx -.LBB260_10: - movl %ecx, 12(%edi) - movl 124(%esp), %ecx # 4-byte Reload - movl 180(%esp), %eax # 4-byte Reload - jne .LBB260_12 -# BB#11: - movl 88(%esp), %eax # 4-byte Reload -.LBB260_12: - movl %eax, 16(%edi) - movl 188(%esp), %eax # 4-byte Reload - movl 184(%esp), %ebp # 4-byte Reload - jne .LBB260_14 -# BB#13: - movl 92(%esp), %ebp # 4-byte Reload -.LBB260_14: - movl %ebp, 20(%edi) - movl 152(%esp), %ebp # 4-byte Reload - jne .LBB260_16 -# BB#15: - movl 96(%esp), %eax # 4-byte Reload -.LBB260_16: - movl %eax, 24(%edi) - movl 168(%esp), %eax # 4-byte Reload - movl %eax, 28(%edi) - jne .LBB260_18 -# BB#17: - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 172(%esp) # 4-byte Spill -.LBB260_18: - movl 172(%esp), %eax # 4-byte Reload - movl %eax, 32(%edi) - jne .LBB260_20 -# BB#19: - movl 108(%esp), %esi # 4-byte Reload -.LBB260_20: - movl %esi, 36(%edi) - jne .LBB260_22 -# BB#21: - movl 112(%esp), %eax # 4-byte Reload - movl %eax, 160(%esp) # 4-byte Spill -.LBB260_22: - movl 160(%esp), %esi # 4-byte Reload - movl %esi, 40(%edi) - movl 128(%esp), %eax # 4-byte Reload - jne .LBB260_24 -# BB#23: - movl 120(%esp), %ebp # 4-byte Reload -.LBB260_24: - movl %ebp, 44(%edi) - jne .LBB260_26 -# BB#25: - movl 136(%esp), %ebx # 4-byte Reload -.LBB260_26: - movl %ebx, 48(%edi) - jne .LBB260_28 -# BB#27: - movl 140(%esp), %eax # 4-byte Reload -.LBB260_28: - movl %eax, 52(%edi) - jne .LBB260_30 -# BB#29: - movl 148(%esp), %edx # 4-byte Reload -.LBB260_30: - movl %edx, 56(%edi) - movl 116(%esp), %eax # 4-byte Reload - jne .LBB260_32 -# BB#31: - movl 164(%esp), %ecx # 4-byte Reload -.LBB260_32: - movl %ecx, 60(%edi) - jne .LBB260_34 -# BB#33: - movl 176(%esp), %eax # 4-byte Reload -.LBB260_34: - movl %eax, 64(%edi) - addl $1436, %esp # imm = 0x59C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end260: - .size mcl_fp_montRed17Lbmi2, .Lfunc_end260-mcl_fp_montRed17Lbmi2 - - .globl mcl_fp_addPre17Lbmi2 - .align 16, 0x90 - .type mcl_fp_addPre17Lbmi2,@function -mcl_fp_addPre17Lbmi2: # @mcl_fp_addPre17Lbmi2 -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl 56(%eax), %ebx - movl %esi, 48(%edi) - movl 56(%ecx), %esi - adcl %ebx, %esi - movl 60(%eax), %ebx - movl %edx, 52(%edi) - movl 60(%ecx), %edx - adcl %ebx, %edx - movl %esi, 56(%edi) - movl %edx, 60(%edi) - movl 64(%eax), %eax - movl 64(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 64(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end261: - .size mcl_fp_addPre17Lbmi2, .Lfunc_end261-mcl_fp_addPre17Lbmi2 - - .globl mcl_fp_subPre17Lbmi2 - .align 16, 0x90 - .type mcl_fp_subPre17Lbmi2,@function -mcl_fp_subPre17Lbmi2: # @mcl_fp_subPre17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl 56(%edx), %ebp - movl %edi, 48(%ebx) - movl 56(%ecx), %edi - sbbl %ebp, %edi - movl 60(%edx), %ebp - movl %esi, 52(%ebx) - movl 60(%ecx), %esi - sbbl %ebp, %esi - movl %edi, 56(%ebx) - movl %esi, 60(%ebx) - movl 64(%edx), %edx - movl 64(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 64(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end262: - .size mcl_fp_subPre17Lbmi2, .Lfunc_end262-mcl_fp_subPre17Lbmi2 - - .globl mcl_fp_shr1_17Lbmi2 - .align 16, 0x90 - .type mcl_fp_shr1_17Lbmi2,@function -mcl_fp_shr1_17Lbmi2: # @mcl_fp_shr1_17Lbmi2 -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 52(%ecx) - movl 60(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 56(%ecx) - movl 64(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 60(%ecx) - shrl %eax - movl %eax, 64(%ecx) - popl %esi - retl -.Lfunc_end263: - .size mcl_fp_shr1_17Lbmi2, .Lfunc_end263-mcl_fp_shr1_17Lbmi2 - - .globl mcl_fp_add17Lbmi2 - .align 16, 0x90 - .type mcl_fp_add17Lbmi2,@function -mcl_fp_add17Lbmi2: # @mcl_fp_add17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 88(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - movl 84(%esp), %edx - addl (%edx), %ecx - movl %ecx, %ebx - adcl 4(%edx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 8(%esi), %eax - adcl 8(%edx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl 16(%edx), %edi - adcl 12(%esi), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 20(%edx), %eax - adcl 20(%esi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 24(%edx), %eax - adcl 24(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 28(%edx), %eax - adcl 28(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%edx), %eax - adcl 32(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 36(%edx), %eax - adcl 36(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%edx), %eax - adcl 40(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%edx), %eax - adcl 44(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 48(%edx), %eax - adcl 48(%esi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 52(%edx), %eax - adcl 52(%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 56(%edx), %eax - adcl 56(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%edx), %ebp - adcl 60(%esi), %ebp - movl 64(%edx), %edx - adcl 64(%esi), %edx - movl 80(%esp), %esi - movl %ebx, (%esi) - movl %ebx, %eax - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%esi) - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%esi) - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%esi) - movl %edi, 16(%esi) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 20(%esi) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 24(%esi) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 28(%esi) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 32(%esi) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 36(%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 40(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 44(%esi) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 48(%esi) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 52(%esi) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 56(%esi) - movl %ebp, 60(%esi) - movl %edx, 64(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 92(%esp), %edi - subl (%edi), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 60(%edi), %ebp - sbbl 64(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB264_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl %edi, (%esi) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 4(%esi) - movl 56(%esp), %edi # 4-byte Reload - movl %edi, 8(%esi) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 12(%esi) - movl 4(%esp), %edi # 4-byte Reload - movl %edi, 16(%esi) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 20(%esi) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 24(%esi) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 28(%esi) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 32(%esi) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 36(%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 40(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 44(%esi) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 48(%esi) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%esi) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%esi) - movl %ebp, 60(%esi) - movl %edx, 64(%esi) -.LBB264_2: # %carry - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end264: - .size mcl_fp_add17Lbmi2, .Lfunc_end264-mcl_fp_add17Lbmi2 - - .globl mcl_fp_addNF17Lbmi2 - .align 16, 0x90 - .type mcl_fp_addNF17Lbmi2,@function -mcl_fp_addNF17Lbmi2: # @mcl_fp_addNF17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $132, %esp - movl 160(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 156(%esp), %esi - addl (%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%eax), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 60(%eax), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 56(%eax), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 52(%eax), %ebp - movl 48(%eax), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 44(%eax), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 40(%eax), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 36(%eax), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 28(%eax), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 52(%esi), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 56(%esi), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 60(%esi), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 64(%esi), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 164(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - subl (%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - sbbl 16(%esi), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 120(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 28(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 32(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - sbbl 40(%esi), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - sbbl 44(%esi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - sbbl 48(%esi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 52(%esi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - movl %eax, %ecx - movl %eax, %ebp - sbbl 56(%esi), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 60(%esi), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - movl %eax, %ebx - sbbl 64(%esi), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl %ebx, %esi - sarl $31, %esi - testl %esi, %esi - movl 84(%esp), %esi # 4-byte Reload - js .LBB265_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB265_2: - movl 152(%esp), %ebx - movl %esi, (%ebx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB265_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB265_4: - movl %eax, 4(%ebx) - movl 108(%esp), %eax # 4-byte Reload - movl 76(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - js .LBB265_6 -# BB#5: - movl 8(%esp), %edi # 4-byte Reload -.LBB265_6: - movl %edi, 8(%ebx) - movl 116(%esp), %edi # 4-byte Reload - movl 68(%esp), %ecx # 4-byte Reload - js .LBB265_8 -# BB#7: - movl %edx, %ecx -.LBB265_8: - movl %ecx, 12(%ebx) - movl 104(%esp), %ecx # 4-byte Reload - movl 72(%esp), %edx # 4-byte Reload - js .LBB265_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB265_10: - movl %edx, 16(%ebx) - movl %ebp, %edx - js .LBB265_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload -.LBB265_12: - movl %esi, 20(%ebx) - movl 112(%esp), %ebp # 4-byte Reload - js .LBB265_14 -# BB#13: - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 120(%esp) # 4-byte Spill -.LBB265_14: - movl 120(%esp), %esi # 4-byte Reload - movl %esi, 24(%ebx) - js .LBB265_16 -# BB#15: - movl 24(%esp), %ecx # 4-byte Reload -.LBB265_16: - movl %ecx, 28(%ebx) - js .LBB265_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 124(%esp) # 4-byte Spill -.LBB265_18: - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%ebx) - js .LBB265_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB265_20: - movl %eax, 36(%ebx) - movl 100(%esp), %ecx # 4-byte Reload - js .LBB265_22 -# BB#21: - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 128(%esp) # 4-byte Spill -.LBB265_22: - movl 128(%esp), %eax # 4-byte Reload - movl %eax, 40(%ebx) - js .LBB265_24 -# BB#23: - movl 40(%esp), %ebp # 4-byte Reload -.LBB265_24: - movl %ebp, 44(%ebx) - js .LBB265_26 -# BB#25: - movl 44(%esp), %edi # 4-byte Reload -.LBB265_26: - movl %edi, 48(%ebx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB265_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB265_28: - movl %eax, 52(%ebx) - js .LBB265_30 -# BB#29: - movl 52(%esp), %edx # 4-byte Reload -.LBB265_30: - movl %edx, 56(%ebx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB265_32 -# BB#31: - movl 56(%esp), %eax # 4-byte Reload -.LBB265_32: - movl %eax, 60(%ebx) - js .LBB265_34 -# BB#33: - movl 60(%esp), %ecx # 4-byte Reload -.LBB265_34: - movl %ecx, 64(%ebx) - addl $132, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end265: - .size mcl_fp_addNF17Lbmi2, .Lfunc_end265-mcl_fp_addNF17Lbmi2 - - .globl mcl_fp_sub17Lbmi2 - .align 16, 0x90 - .type mcl_fp_sub17Lbmi2,@function -mcl_fp_sub17Lbmi2: # @mcl_fp_sub17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 92(%esp), %edi - subl (%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%esi), %eax - sbbl 44(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 48(%esi), %edx - sbbl 48(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 52(%esi), %ecx - sbbl 52(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 56(%esi), %eax - sbbl 56(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 60(%esi), %ebp - sbbl 60(%edi), %ebp - movl 64(%esi), %esi - sbbl 64(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 84(%esp), %ebx - movl 56(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 60(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%ebx) - movl %edx, 48(%ebx) - movl %ecx, 52(%ebx) - movl %eax, 56(%ebx) - movl %ebp, 60(%ebx) - movl %esi, 64(%ebx) - je .LBB266_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 96(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 48(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl 52(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%ebx) - movl 56(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 52(%ebx) - movl %ecx, 56(%ebx) - movl 60(%esi), %eax - adcl %ebp, %eax - movl %eax, 60(%ebx) - movl 64(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ebx) -.LBB266_2: # %nocarry - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end266: - .size mcl_fp_sub17Lbmi2, .Lfunc_end266-mcl_fp_sub17Lbmi2 - - .globl mcl_fp_subNF17Lbmi2 - .align 16, 0x90 - .type mcl_fp_subNF17Lbmi2,@function -mcl_fp_subNF17Lbmi2: # @mcl_fp_subNF17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 140(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 144(%esp), %edi - subl (%edi), %esi - movl %esi, 72(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 36(%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 56(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 60(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 36(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - sbbl 40(%edi), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - sbbl 56(%edi), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - sbbl 60(%edi), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 64(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - sarl $31, %eax - movl %eax, %edx - shldl $1, %ecx, %edx - movl 148(%esp), %ebx - movl 28(%ebx), %ecx - andl %edx, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - andl %edx, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%ebx), %ecx - andl %edx, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - andl (%ebx), %edx - movl 64(%ebx), %edi - movl %eax, %ecx - andl %ecx, %edi - movl %edi, 44(%esp) # 4-byte Spill - rorxl $31, %ecx, %eax - andl 60(%ebx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 56(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 48(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 40(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 36(%ebx), %ecx - andl %eax, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 32(%ebx), %ecx - andl %eax, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 24(%ebx), %ebp - andl %eax, %ebp - movl 20(%ebx), %edi - andl %eax, %edi - movl 16(%ebx), %esi - andl %eax, %esi - andl 8(%ebx), %eax - addl 72(%esp), %edx # 4-byte Folded Reload - movl 8(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl 136(%esp), %ebx - movl %edx, (%ebx) - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %ecx, 4(%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %eax, 8(%ebx) - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %edx, 12(%ebx) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 16(%ebx) - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl %edi, 20(%ebx) - movl 32(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ebp, 24(%ebx) - movl (%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 4(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 12(%esp), %ecx # 4-byte Reload - adcl 112(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 20(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 24(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl 28(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%ebx) - movl 36(%esp), %ecx # 4-byte Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %eax, 52(%ebx) - movl 40(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %ecx, 56(%ebx) - movl %eax, 60(%ebx) - movl 44(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ebx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end267: - .size mcl_fp_subNF17Lbmi2, .Lfunc_end267-mcl_fp_subNF17Lbmi2 - - .globl mcl_fpDbl_add17Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_add17Lbmi2,@function -mcl_fpDbl_add17Lbmi2: # @mcl_fpDbl_add17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $128, %esp - movl 156(%esp), %ecx - movl 152(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %esi - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edx), %ebp - movl 148(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edx), %ebp - adcl 8(%edx), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %esi - movl %ebp, 4(%eax) - movl 76(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%edx), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %esi, 16(%eax) - movl 24(%edx), %esi - adcl %ebx, %esi - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%edx), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %esi, 24(%eax) - movl 32(%edx), %esi - adcl %ebx, %esi - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%edx), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %esi, 32(%eax) - movl 40(%edx), %esi - adcl %ebx, %esi - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%edx), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %esi, 40(%eax) - movl 48(%edx), %esi - adcl %ebx, %esi - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%edx), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %esi, 48(%eax) - movl 56(%edx), %esi - adcl %ebx, %esi - movl 60(%ecx), %ebx - movl %edi, 52(%eax) - movl 60(%edx), %edi - adcl %ebx, %edi - movl 64(%ecx), %ebx - movl %esi, 56(%eax) - movl 64(%edx), %esi - adcl %ebx, %esi - movl 68(%ecx), %ebx - movl %edi, 60(%eax) - movl 68(%edx), %edi - adcl %ebx, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 72(%ecx), %edi - movl %esi, 64(%eax) - movl 72(%edx), %eax - adcl %edi, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 76(%edx), %eax - adcl %ebp, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl 80(%edx), %eax - adcl %esi, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl 84(%edx), %eax - adcl %esi, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl 88(%edx), %eax - adcl %esi, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl 92(%edx), %eax - adcl %esi, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl 96(%edx), %eax - adcl %esi, %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl 100(%edx), %eax - adcl %esi, %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl 104(%edx), %eax - adcl %esi, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%ecx), %esi - movl 108(%edx), %eax - adcl %esi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 112(%ecx), %esi - movl 112(%edx), %eax - adcl %esi, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 116(%ecx), %esi - movl 116(%edx), %eax - adcl %esi, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 120(%ecx), %edi - movl 120(%edx), %esi - adcl %edi, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 124(%ecx), %ebx - movl 124(%edx), %edi - adcl %ebx, %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 128(%ecx), %ebx - movl 128(%edx), %ebp - adcl %ebx, %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 132(%ecx), %ecx - movl 132(%edx), %edx - adcl %ecx, %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl 160(%esp), %ebx - movl 92(%esp), %eax # 4-byte Reload - subl (%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 4(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 8(%ebx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 12(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 16(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - sbbl 20(%ebx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 24(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 28(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 32(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 36(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 40(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 44(%ebx), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 48(%ebx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 52(%ebx), %esi - movl %esi, 52(%esp) # 4-byte Spill - sbbl 56(%ebx), %edi - movl %edi, 56(%esp) # 4-byte Spill - sbbl 60(%ebx), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %edx, %ebp - sbbl 64(%ebx), %ebp - sbbl $0, %ecx - andl $1, %ecx - jne .LBB268_2 -# BB#1: - movl %ebp, %edx -.LBB268_2: - testb %cl, %cl - movl 92(%esp), %eax # 4-byte Reload - movl 88(%esp), %esi # 4-byte Reload - movl 84(%esp), %edi # 4-byte Reload - movl 80(%esp), %ebx # 4-byte Reload - movl 76(%esp), %ebp # 4-byte Reload - jne .LBB268_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload - movl 4(%esp), %edi # 4-byte Reload - movl 8(%esp), %ebx # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 124(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 120(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload -.LBB268_4: - movl 148(%esp), %ecx - movl %eax, 68(%ecx) - movl %ecx, %eax - movl 96(%esp), %ecx # 4-byte Reload - movl %ecx, 72(%eax) - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, 76(%eax) - movl 104(%esp), %ecx # 4-byte Reload - movl %ecx, 80(%eax) - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 84(%eax) - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 88(%eax) - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 92(%eax) - movl 120(%esp), %ecx # 4-byte Reload - movl %ecx, 96(%eax) - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%eax) - movl %ebp, 104(%eax) - movl %ebx, 108(%eax) - movl %edi, 112(%eax) - movl %esi, 116(%eax) - movl 72(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - jne .LBB268_6 -# BB#5: - movl 52(%esp), %esi # 4-byte Reload -.LBB268_6: - movl %esi, 120(%eax) - movl 68(%esp), %esi # 4-byte Reload - jne .LBB268_8 -# BB#7: - movl 56(%esp), %esi # 4-byte Reload -.LBB268_8: - movl %esi, 124(%eax) - jne .LBB268_10 -# BB#9: - movl 60(%esp), %ecx # 4-byte Reload -.LBB268_10: - movl %ecx, 128(%eax) - movl %edx, 132(%eax) - addl $128, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end268: - .size mcl_fpDbl_add17Lbmi2, .Lfunc_end268-mcl_fpDbl_add17Lbmi2 - - .globl mcl_fpDbl_sub17Lbmi2 - .align 16, 0x90 - .type mcl_fpDbl_sub17Lbmi2,@function -mcl_fpDbl_sub17Lbmi2: # @mcl_fpDbl_sub17Lbmi2 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 140(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %edi - movl 144(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %edi - movl 8(%edx), %ebx - sbbl 8(%esi), %ebx - movl 136(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%esi), %eax - movl %edi, 4(%ecx) - movl 16(%edx), %edi - sbbl 16(%esi), %edi - movl %ebx, 8(%ecx) - movl 20(%esi), %ebx - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %ebx, %eax - movl 24(%esi), %ebx - movl %edi, 16(%ecx) - movl 24(%edx), %edi - sbbl %ebx, %edi - movl 28(%esi), %ebx - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %ebx, %eax - movl 32(%esi), %ebx - movl %edi, 24(%ecx) - movl 32(%edx), %edi - sbbl %ebx, %edi - movl 36(%esi), %ebx - movl %eax, 28(%ecx) - movl 36(%edx), %eax - sbbl %ebx, %eax - movl 40(%esi), %ebx - movl %edi, 32(%ecx) - movl 40(%edx), %edi - sbbl %ebx, %edi - movl 44(%esi), %ebx - movl %eax, 36(%ecx) - movl 44(%edx), %eax - sbbl %ebx, %eax - movl 48(%esi), %ebx - movl %edi, 40(%ecx) - movl 48(%edx), %edi - sbbl %ebx, %edi - movl 52(%esi), %ebx - movl %eax, 44(%ecx) - movl 52(%edx), %eax - sbbl %ebx, %eax - movl 56(%esi), %ebx - movl %edi, 48(%ecx) - movl 56(%edx), %edi - sbbl %ebx, %edi - movl 60(%esi), %ebx - movl %eax, 52(%ecx) - movl 60(%edx), %eax - sbbl %ebx, %eax - movl 64(%esi), %ebx - movl %edi, 56(%ecx) - movl 64(%edx), %edi - sbbl %ebx, %edi - movl 68(%esi), %ebx - movl %eax, 60(%ecx) - movl 68(%edx), %eax - sbbl %ebx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 72(%esi), %eax - movl %edi, 64(%ecx) - movl 72(%edx), %edi - sbbl %eax, %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 76(%esi), %eax - movl 76(%edx), %edi - sbbl %eax, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 80(%esi), %eax - movl 80(%edx), %edi - sbbl %eax, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 84(%esi), %eax - movl 84(%edx), %edi - sbbl %eax, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 88(%esi), %eax - movl 88(%edx), %edi - sbbl %eax, %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 92(%esi), %eax - movl 92(%edx), %edi - sbbl %eax, %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 96(%esi), %eax - movl 96(%edx), %edi - sbbl %eax, %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 100(%esi), %eax - movl 100(%edx), %edi - sbbl %eax, %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 104(%esi), %eax - movl 104(%edx), %edi - sbbl %eax, %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 108(%esi), %eax - movl 108(%edx), %edi - sbbl %eax, %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 112(%esi), %eax - movl 112(%edx), %edi - sbbl %eax, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%esi), %eax - movl 116(%edx), %edi - sbbl %eax, %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 120(%esi), %eax - movl 120(%edx), %edi - sbbl %eax, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 124(%esi), %eax - movl 124(%edx), %edi - sbbl %eax, %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 128(%esi), %eax - movl 128(%edx), %edi - sbbl %eax, %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 132(%esi), %eax - movl 132(%edx), %edx - sbbl %eax, %edx - movl %edx, 112(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 148(%esp), %ebp - jne .LBB269_1 -# BB#2: - movl $0, 76(%esp) # 4-byte Folded Spill - jmp .LBB269_3 -.LBB269_1: - movl 64(%ebp), %edx - movl %edx, 76(%esp) # 4-byte Spill -.LBB269_3: - testb %al, %al - jne .LBB269_4 -# BB#5: - movl $0, 28(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB269_6 -.LBB269_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB269_6: - jne .LBB269_7 -# BB#8: - movl $0, 40(%esp) # 4-byte Folded Spill - jmp .LBB269_9 -.LBB269_7: - movl 60(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill -.LBB269_9: - jne .LBB269_10 -# BB#11: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB269_12 -.LBB269_10: - movl 56(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill -.LBB269_12: - jne .LBB269_13 -# BB#14: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB269_15 -.LBB269_13: - movl 52(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB269_15: - jne .LBB269_16 -# BB#17: - movl $0, 24(%esp) # 4-byte Folded Spill - jmp .LBB269_18 -.LBB269_16: - movl 48(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB269_18: - jne .LBB269_19 -# BB#20: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB269_21 -.LBB269_19: - movl 44(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB269_21: - jne .LBB269_22 -# BB#23: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB269_24 -.LBB269_22: - movl 40(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB269_24: - jne .LBB269_25 -# BB#26: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB269_27 -.LBB269_25: - movl 36(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB269_27: - jne .LBB269_28 -# BB#29: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB269_30 -.LBB269_28: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB269_30: - jne .LBB269_31 -# BB#32: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB269_33 -.LBB269_31: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB269_33: - jne .LBB269_34 -# BB#35: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB269_36 -.LBB269_34: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB269_36: - jne .LBB269_37 -# BB#38: - movl $0, %ebx - jmp .LBB269_39 -.LBB269_37: - movl 20(%ebp), %ebx -.LBB269_39: - jne .LBB269_40 -# BB#41: - movl $0, %edi - jmp .LBB269_42 -.LBB269_40: - movl 16(%ebp), %edi -.LBB269_42: - jne .LBB269_43 -# BB#44: - movl %ebp, %eax - movl $0, %ebp - jmp .LBB269_45 -.LBB269_43: - movl %ebp, %eax - movl 12(%eax), %ebp -.LBB269_45: - jne .LBB269_46 -# BB#47: - xorl %eax, %eax - jmp .LBB269_48 -.LBB269_46: - movl 8(%eax), %eax -.LBB269_48: - addl 52(%esp), %esi # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %esi, 68(%ecx) - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %edx, 72(%ecx) - adcl 56(%esp), %ebp # 4-byte Folded Reload - movl %eax, 76(%ecx) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %ebp, 80(%ecx) - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %edi, 84(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %ebx, 88(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl 24(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx # 4-byte Folded Reload - movl %eax, 112(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %edx, 116(%ecx) - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %eax, 120(%ecx) - movl 40(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %edx, 124(%ecx) - movl %eax, 128(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 132(%ecx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end269: - .size mcl_fpDbl_sub17Lbmi2, .Lfunc_end269-mcl_fpDbl_sub17Lbmi2 - - - .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s deleted file mode 100644 index cdd988ad3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s +++ /dev/null @@ -1,73785 +0,0 @@ - .text - .file "" - .globl makeNIST_P192L - .align 16, 0x90 - .type makeNIST_P192L,@function -makeNIST_P192L: # @makeNIST_P192L -# BB#0: - movl 4(%esp), %eax - movl $-1, 20(%eax) - movl $-1, 16(%eax) - movl $-1, 12(%eax) - movl $-2, 8(%eax) - movl $-1, 4(%eax) - movl $-1, (%eax) - retl $4 -.Lfunc_end0: - .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P192L - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P192L,@function -mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %eax - movl 32(%eax), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 16(%esp) # 4-byte Spill - xorl %edx, %edx - movl (%eax), %ebx - addl %ecx, %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 4(%eax), %ecx - adcl %edi, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%eax), %ebp - adcl %esi, %ebp - movl 36(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 12(%eax), %esi - adcl %ecx, %esi - movl 40(%eax), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 16(%eax), %ecx - adcl %ebx, %ecx - movl 44(%eax), %edi - movl %edi, (%esp) # 4-byte Spill - movl 20(%eax), %eax - adcl %edi, %eax - adcl $0, %edx - sbbl %edi, %edi - andl $1, %edi - addl %ebx, 24(%esp) # 4-byte Folded Spill - movl (%esp), %ebx # 4-byte Reload - adcl %ebx, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - adcl $0, %edx - adcl $0, %edi - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %eax - adcl $0, %edx - adcl $0, %edi - addl %edx, 24(%esp) # 4-byte Folded Spill - adcl %edi, 28(%esp) # 4-byte Folded Spill - adcl %ebp, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl %esi, %edi - adcl $0, %ecx - adcl $0, %eax - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 28(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $1, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edi, %edx - adcl $0, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %ecx, %edx - adcl $0, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %edx - adcl $0, %edx - adcl $-1, %ebx - andl $1, %ebx - jne .LBB1_2 -# BB#1: - movl %edx, %eax -.LBB1_2: - testb %bl, %bl - movl 24(%esp), %edx # 4-byte Reload - jne .LBB1_4 -# BB#3: - movl %esi, %edx -.LBB1_4: - movl 52(%esp), %esi - movl %edx, (%esi) - movl 20(%esp), %edx # 4-byte Reload - movl 28(%esp), %ebx # 4-byte Reload - jne .LBB1_6 -# BB#5: - movl %ebp, %ebx -.LBB1_6: - movl %ebx, 4(%esi) - jne .LBB1_8 -# BB#7: - movl 8(%esp), %edx # 4-byte Reload -.LBB1_8: - movl %edx, 8(%esi) - jne .LBB1_10 -# BB#9: - movl 12(%esp), %edi # 4-byte Reload -.LBB1_10: - movl %edi, 12(%esi) - jne .LBB1_12 -# BB#11: - movl 16(%esp), %ecx # 4-byte Reload -.LBB1_12: - movl %ecx, 16(%esi) - movl %eax, 20(%esi) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end1: - .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L - - .globl mcl_fp_sqr_NIST_P192L - .align 16, 0x90 - .type mcl_fp_sqr_NIST_P192L,@function -mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L2$pb -.L2$pb: - popl %ebx -.Ltmp0: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx - movl 116(%esp), %eax - movl %eax, 4(%esp) - leal 44(%esp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_sqrPre6L@PLT - xorl %edi, %edi - movl 76(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 72(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi - addl %eax, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax - adcl %edx, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %ebp - adcl %ecx, %ebp - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi - adcl %eax, %esi - movl 84(%esp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx - adcl %ebx, %ecx - movl 88(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 64(%esp), %edx - adcl %eax, %edx - adcl $0, %edi - sbbl %eax, %eax - andl $1, %eax - addl %ebx, 36(%esp) # 4-byte Folded Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl %ebx, 40(%esp) # 4-byte Folded Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl $0, %edi - adcl $0, %eax - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %edx - adcl $0, %edi - adcl $0, %eax - addl %edi, 36(%esp) # 4-byte Folded Spill - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl %ebp, %edi - adcl %esi, %eax - adcl $0, %ecx - adcl $0, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 36(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, %ebp - adcl $1, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %ebp - adcl $-1, %ebx - andl $1, %ebx - jne .LBB2_2 -# BB#1: - movl %ebp, %edx -.LBB2_2: - testb %bl, %bl - movl 36(%esp), %ebx # 4-byte Reload - jne .LBB2_4 -# BB#3: - movl %esi, %ebx -.LBB2_4: - movl 112(%esp), %esi - movl %ebx, (%esi) - movl 40(%esp), %ebx # 4-byte Reload - jne .LBB2_6 -# BB#5: - movl 20(%esp), %ebx # 4-byte Reload -.LBB2_6: - movl %ebx, 4(%esi) - jne .LBB2_8 -# BB#7: - movl 24(%esp), %edi # 4-byte Reload -.LBB2_8: - movl %edi, 8(%esi) - jne .LBB2_10 -# BB#9: - movl 28(%esp), %eax # 4-byte Reload -.LBB2_10: - movl %eax, 12(%esi) - jne .LBB2_12 -# BB#11: - movl 32(%esp), %ecx # 4-byte Reload -.LBB2_12: - movl %ecx, 16(%esi) - movl %edx, 20(%esi) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end2: - .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L - - .globl mcl_fp_mulNIST_P192L - .align 16, 0x90 - .type mcl_fp_mulNIST_P192L,@function -mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L3$pb -.L3$pb: - popl %ebx -.Ltmp1: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx - movl 120(%esp), %eax - movl %eax, 8(%esp) - movl 116(%esp), %eax - movl %eax, 4(%esp) - leal 44(%esp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6L@PLT - xorl %edi, %edi - movl 76(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 72(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi - addl %eax, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax - adcl %edx, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %ebp - adcl %ecx, %ebp - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi - adcl %eax, %esi - movl 84(%esp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx - adcl %ebx, %ecx - movl 88(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 64(%esp), %edx - adcl %eax, %edx - adcl $0, %edi - sbbl %eax, %eax - andl $1, %eax - addl %ebx, 36(%esp) # 4-byte Folded Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl %ebx, 40(%esp) # 4-byte Folded Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl $0, %edi - adcl $0, %eax - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %esi - adcl $0, %ecx - adcl $0, %edx - adcl $0, %edi - adcl $0, %eax - addl %edi, 36(%esp) # 4-byte Folded Spill - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl %ebp, %edi - adcl %esi, %eax - adcl $0, %ecx - adcl $0, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 36(%esp), %esi # 4-byte Reload - addl $1, %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, %ebp - adcl $1, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %ebp - adcl $-1, %ebx - andl $1, %ebx - jne .LBB3_2 -# BB#1: - movl %ebp, %edx -.LBB3_2: - testb %bl, %bl - movl 36(%esp), %ebx # 4-byte Reload - jne .LBB3_4 -# BB#3: - movl %esi, %ebx -.LBB3_4: - movl 112(%esp), %esi - movl %ebx, (%esi) - movl 40(%esp), %ebx # 4-byte Reload - jne .LBB3_6 -# BB#5: - movl 20(%esp), %ebx # 4-byte Reload -.LBB3_6: - movl %ebx, 4(%esi) - jne .LBB3_8 -# BB#7: - movl 24(%esp), %edi # 4-byte Reload -.LBB3_8: - movl %edi, 8(%esi) - jne .LBB3_10 -# BB#9: - movl 28(%esp), %eax # 4-byte Reload -.LBB3_10: - movl %eax, 12(%esi) - jne .LBB3_12 -# BB#11: - movl 32(%esp), %ecx # 4-byte Reload -.LBB3_12: - movl %ecx, 16(%esi) - movl %edx, 20(%esi) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end3: - .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L - - .globl mcl_fpDbl_mod_NIST_P521L - .align 16, 0x90 - .type mcl_fpDbl_mod_NIST_P521L,@function -mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ecx - movl 124(%ecx), %edx - movl 128(%ecx), %esi - movl %esi, %eax - shldl $23, %edx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 120(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 116(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 112(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 108(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 104(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 100(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 92(%ecx), %edx - shldl $23, %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%ecx), %eax - shldl $23, %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 84(%ecx), %edi - shldl $23, %edi, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%ecx), %edx - shldl $23, %edx, %edi - movl 76(%ecx), %eax - shldl $23, %eax, %edx - movl 72(%ecx), %ebx - shldl $23, %ebx, %eax - movl 68(%ecx), %ebp - shldl $23, %ebp, %ebx - shrl $9, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 64(%ecx), %esi - shldl $23, %esi, %ebp - andl $511, %esi # imm = 0x1FF - addl (%ecx), %ebp - adcl 4(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - adcl 8(%ecx), %eax - adcl 12(%ecx), %edx - adcl 16(%ecx), %edi - movl 28(%esp), %ebx # 4-byte Reload - adcl 20(%ecx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - adcl 24(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - adcl 28(%ecx), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - adcl 32(%ecx), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl 36(%ecx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - adcl 40(%ecx), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 24(%esp), %ebx # 4-byte Reload - adcl 44(%ecx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl 48(%ecx), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - adcl 52(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - adcl 56(%ecx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - adcl 60(%ecx), %ebx - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - movl %esi, %ecx - shrl $9, %ecx - andl $1, %ecx - addl %ebp, %ecx - adcl $0, 16(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, (%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl %edi, %esi - adcl $0, 28(%esp) # 4-byte Folded Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ebx, %ebp - adcl $0, %ebp - movl 12(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %ecx, %edi - andl %eax, %edi - andl %edx, %edi - andl %esi, %edi - andl 28(%esp), %edi # 4-byte Folded Reload - andl 32(%esp), %edi # 4-byte Folded Reload - andl 36(%esp), %edi # 4-byte Folded Reload - andl 40(%esp), %edi # 4-byte Folded Reload - andl 44(%esp), %edi # 4-byte Folded Reload - andl 48(%esp), %edi # 4-byte Folded Reload - andl 24(%esp), %edi # 4-byte Folded Reload - andl 52(%esp), %edi # 4-byte Folded Reload - movl 20(%esp), %esi # 4-byte Reload - andl %esi, %edi - andl 56(%esp), %edi # 4-byte Folded Reload - movl %ebx, %edx - movl 16(%esp), %ebx # 4-byte Reload - andl %ebp, %edi - movl %ebp, %eax - movl %edx, %ebp - orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00 - andl %edi, %ebp - andl %ebx, %ebp - cmpl $-1, %ebp - movl 80(%esp), %edi - je .LBB4_1 -# BB#3: # %nonzero - movl %ecx, (%edi) - movl %ebx, 4(%edi) - movl (%esp), %ecx # 4-byte Reload - movl %ecx, 8(%edi) - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edi) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%edi) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%edi) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%edi) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%edi) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%edi) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%edi) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%edi) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%edi) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%edi) - movl %esi, 52(%edi) - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%edi) - movl %eax, 60(%edi) - andl $511, %edx # imm = 0x1FF - movl %edx, 64(%edi) - jmp .LBB4_2 -.LBB4_1: # %zero - xorl %eax, %eax - movl $17, %ecx - rep;stosl -.LBB4_2: # %zero - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end4: - .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L - - .globl mcl_fp_mulUnitPre1L - .align 16, 0x90 - .type mcl_fp_mulUnitPre1L,@function -mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - mull 12(%esp) - movl 4(%esp), %ecx - movl %eax, (%ecx) - movl %edx, 4(%ecx) - retl -.Lfunc_end5: - .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L - - .globl mcl_fpDbl_mulPre1L - .align 16, 0x90 - .type mcl_fpDbl_mulPre1L,@function -mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L -# BB#0: - movl 12(%esp), %eax - movl (%eax), %eax - movl 8(%esp), %ecx - mull (%ecx) - movl 4(%esp), %ecx - movl %eax, (%ecx) - movl %edx, 4(%ecx) - retl -.Lfunc_end6: - .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L - - .globl mcl_fpDbl_sqrPre1L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre1L,@function -mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - mull %eax - movl 4(%esp), %ecx - movl %eax, (%ecx) - movl %edx, 4(%ecx) - retl -.Lfunc_end7: - .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L - - .globl mcl_fp_mont1L - .align 16, 0x90 - .type mcl_fp_mont1L,@function -mcl_fp_mont1L: # @mcl_fp_mont1L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %eax - movl 20(%esp), %ecx - mull (%ecx) - movl %eax, %ecx - movl %edx, %esi - movl 24(%esp), %edx - movl -4(%edx), %eax - imull %ecx, %eax - movl (%edx), %edi - mull %edi - addl %ecx, %eax - adcl %esi, %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl %edx, %eax - subl %edi, %eax - sbbl $0, %ecx - testb $1, %cl - jne .LBB8_2 -# BB#1: - movl %eax, %edx -.LBB8_2: - movl 12(%esp), %eax - movl %edx, (%eax) - popl %esi - popl %edi - retl -.Lfunc_end8: - .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L - - .globl mcl_fp_montNF1L - .align 16, 0x90 - .type mcl_fp_montNF1L,@function -mcl_fp_montNF1L: # @mcl_fp_montNF1L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %eax - movl 20(%esp), %ecx - mull (%ecx) - movl %eax, %ecx - movl %edx, %esi - movl 24(%esp), %edx - movl -4(%edx), %eax - imull %ecx, %eax - movl (%edx), %edi - mull %edi - addl %ecx, %eax - adcl %esi, %edx - movl %edx, %eax - subl %edi, %eax - js .LBB9_2 -# BB#1: - movl %eax, %edx -.LBB9_2: - movl 12(%esp), %eax - movl %edx, (%eax) - popl %esi - popl %edi - retl -.Lfunc_end9: - .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L - - .globl mcl_fp_montRed1L - .align 16, 0x90 - .type mcl_fp_montRed1L,@function -mcl_fp_montRed1L: # @mcl_fp_montRed1L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %esi - movl 20(%esp), %edx - movl -4(%edx), %eax - imull %esi, %eax - movl (%edx), %edi - mull %edi - addl %esi, %eax - adcl 4(%ecx), %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl %edx, %eax - subl %edi, %eax - sbbl $0, %ecx - testb $1, %cl - jne .LBB10_2 -# BB#1: - movl %eax, %edx -.LBB10_2: - movl 12(%esp), %eax - movl %edx, (%eax) - popl %esi - popl %edi - retl -.Lfunc_end10: - .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L - - .globl mcl_fp_addPre1L - .align 16, 0x90 - .type mcl_fp_addPre1L,@function -mcl_fp_addPre1L: # @mcl_fp_addPre1L -# BB#0: - movl 12(%esp), %eax - movl (%eax), %eax - movl 4(%esp), %ecx - movl 8(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - sbbl %eax, %eax - andl $1, %eax - retl -.Lfunc_end11: - .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L - - .globl mcl_fp_subPre1L - .align 16, 0x90 - .type mcl_fp_subPre1L,@function -mcl_fp_subPre1L: # @mcl_fp_subPre1L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - xorl %eax, %eax - movl 8(%esp), %edx - movl 16(%esp), %esi - subl (%esi), %ecx - movl %ecx, (%edx) - sbbl $0, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end12: - .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L - - .globl mcl_fp_shr1_1L - .align 16, 0x90 - .type mcl_fp_shr1_1L,@function -mcl_fp_shr1_1L: # @mcl_fp_shr1_1L -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - shrl %eax - movl 4(%esp), %ecx - movl %eax, (%ecx) - retl -.Lfunc_end13: - .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L - - .globl mcl_fp_add1L - .align 16, 0x90 - .type mcl_fp_add1L,@function -mcl_fp_add1L: # @mcl_fp_add1L -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %eax - movl 8(%esp), %ecx - movl 12(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - sbbl %edx, %edx - andl $1, %edx - movl 20(%esp), %esi - subl (%esi), %eax - sbbl $0, %edx - testb $1, %dl - jne .LBB14_2 -# BB#1: # %nocarry - movl %eax, (%ecx) -.LBB14_2: # %carry - popl %esi - retl -.Lfunc_end14: - .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L - - .globl mcl_fp_addNF1L - .align 16, 0x90 - .type mcl_fp_addNF1L,@function -mcl_fp_addNF1L: # @mcl_fp_addNF1L -# BB#0: - movl 12(%esp), %eax - movl (%eax), %eax - movl 8(%esp), %ecx - addl (%ecx), %eax - movl 16(%esp), %edx - movl %eax, %ecx - subl (%edx), %ecx - js .LBB15_2 -# BB#1: - movl %ecx, %eax -.LBB15_2: - movl 4(%esp), %ecx - movl %eax, (%ecx) - retl -.Lfunc_end15: - .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L - - .globl mcl_fp_sub1L - .align 16, 0x90 - .type mcl_fp_sub1L,@function -mcl_fp_sub1L: # @mcl_fp_sub1L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %eax - xorl %edx, %edx - movl 8(%esp), %ecx - movl 16(%esp), %esi - subl (%esi), %eax - movl %eax, (%ecx) - sbbl $0, %edx - testb $1, %dl - jne .LBB16_2 -# BB#1: # %nocarry - popl %esi - retl -.LBB16_2: # %carry - movl 20(%esp), %edx - addl (%edx), %eax - movl %eax, (%ecx) - popl %esi - retl -.Lfunc_end16: - .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L - - .globl mcl_fp_subNF1L - .align 16, 0x90 - .type mcl_fp_subNF1L,@function -mcl_fp_subNF1L: # @mcl_fp_subNF1L -# BB#0: - movl 8(%esp), %eax - movl (%eax), %eax - movl 12(%esp), %ecx - subl (%ecx), %eax - movl %eax, %ecx - sarl $31, %ecx - movl 16(%esp), %edx - andl (%edx), %ecx - addl %eax, %ecx - movl 4(%esp), %eax - movl %ecx, (%eax) - retl -.Lfunc_end17: - .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L - - .globl mcl_fpDbl_add1L - .align 16, 0x90 - .type mcl_fpDbl_add1L,@function -mcl_fpDbl_add1L: # @mcl_fpDbl_add1L -# BB#0: - pushl %ebx - pushl %esi - movl 20(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %eax - movl 16(%esp), %esi - addl (%esi), %edx - movl 12(%esp), %ecx - adcl 4(%esi), %eax - movl %edx, (%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi - movl %eax, %edx - subl (%esi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB18_2 -# BB#1: - movl %edx, %eax -.LBB18_2: - movl %eax, 4(%ecx) - popl %esi - popl %ebx - retl -.Lfunc_end18: - .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L - - .globl mcl_fpDbl_sub1L - .align 16, 0x90 - .type mcl_fpDbl_sub1L,@function -mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %eax - xorl %ecx, %ecx - movl 16(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %eax - movl 8(%esp), %edx - movl %esi, (%edx) - sbbl $0, %ecx - andl $1, %ecx - je .LBB19_2 -# BB#1: - movl 20(%esp), %ecx - movl (%ecx), %ecx -.LBB19_2: - addl %eax, %ecx - movl %ecx, 4(%edx) - popl %esi - retl -.Lfunc_end19: - .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L - - .globl mcl_fp_mulUnitPre2L - .align 16, 0x90 - .type mcl_fp_mulUnitPre2L,@function -mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl 20(%esp), %ebx - movl %ecx, %eax - mull 4(%ebx) - movl %edx, %esi - movl %eax, %edi - movl %ecx, %eax - mull (%ebx) - movl 16(%esp), %ecx - movl %eax, (%ecx) - addl %edi, %edx - movl %edx, 4(%ecx) - adcl $0, %esi - movl %esi, 8(%ecx) - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end20: - .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L - - .globl mcl_fpDbl_mulPre2L - .align 16, 0x90 - .type mcl_fpDbl_mulPre2L,@function -mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 32(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edi - movl 36(%esp), %ebx - movl (%ebx), %esi - movl %ecx, %eax - mull %esi - movl %edx, %ebp - movl 28(%esp), %edx - movl %eax, (%edx) - movl 4(%ebx), %ebx - movl %edi, %eax - mull %ebx - movl %edx, 4(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, %ecx - movl %eax, %ebx - movl %edi, %eax - mull %esi - addl %ebp, %eax - adcl $0, %edx - addl %ebx, %eax - movl 28(%esp), %esi - movl %eax, 4(%esi) - adcl (%esp), %edx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl %ecx, %edx - movl %edx, 8(%esi) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esi) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end21: - .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L - - .globl mcl_fpDbl_sqrPre2L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre2L,@function -mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %esi - movl %esi, %eax - mull %esi - movl %edx, %edi - movl %eax, %ebx - movl %esi, %eax - mull %ecx - movl %edx, %esi - movl %eax, %ebp - movl %ecx, %eax - mull %ecx - movl 20(%esp), %ecx - movl %eax, (%ecx) - addl %ebp, %edx - movl %esi, %eax - adcl $0, %eax - addl %ebp, %edx - movl %edx, 4(%ecx) - adcl %ebx, %eax - sbbl %edx, %edx - andl $1, %edx - addl %esi, %eax - movl %eax, 8(%ecx) - adcl %edi, %edx - movl %edx, 12(%ecx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end22: - .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L - - .globl mcl_fp_mont2L - .align 16, 0x90 - .type mcl_fp_mont2L,@function -mcl_fp_mont2L: # @mcl_fp_mont2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %ecx - movl (%ecx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 4(%ecx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx - movl (%ecx), %esi - mull %esi - movl %eax, 8(%esp) # 4-byte Spill - movl %edx, 4(%esp) # 4-byte Spill - movl 64(%esp), %edx - movl -4(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl %eax, %ebp - imull %ecx, %ebp - movl (%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 4(%edx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull %edx - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl %ebp, %eax - mull %ecx - movl %edx, %ebp - movl %eax, %edi - movl 16(%esp), %eax # 4-byte Reload - mull %esi - addl 4(%esp), %eax # 4-byte Folded Reload - adcl $0, %edx - addl (%esp), %ebp # 4-byte Folded Reload - adcl $0, %ebx - addl 8(%esp), %edi # 4-byte Folded Reload - adcl %eax, %ebp - adcl %edx, %ebx - movl 60(%esp), %eax - movl 4(%eax), %ecx - sbbl %eax, %eax - andl $1, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 16(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 12(%esp) # 4-byte Folded Reload - movl %eax, %ecx - movl %edx, %esi - addl 16(%esp), %esi # 4-byte Folded Reload - adcl $0, %edi - addl %ebp, %ecx - adcl %ebx, %esi - adcl 8(%esp), %edi # 4-byte Folded Reload - sbbl %ebx, %ebx - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %eax, 28(%esp) # 4-byte Spill - andl $1, %ebx - mull 20(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %ebp - movl 28(%esp), %eax # 4-byte Reload - mull 24(%esp) # 4-byte Folded Reload - addl 16(%esp), %eax # 4-byte Folded Reload - adcl $0, %edx - addl %ecx, %ebp - adcl %esi, %eax - adcl %edi, %edx - adcl $0, %ebx - movl %eax, %esi - subl 20(%esp), %esi # 4-byte Folded Reload - movl %edx, %ecx - sbbl 24(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB23_2 -# BB#1: - movl %esi, %eax -.LBB23_2: - movl 52(%esp), %esi - movl %eax, (%esi) - testb %bl, %bl - jne .LBB23_4 -# BB#3: - movl %ecx, %edx -.LBB23_4: - movl %edx, 4(%esi) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end23: - .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L - - .globl mcl_fp_montNF2L - .align 16, 0x90 - .type mcl_fp_montNF2L,@function -mcl_fp_montNF2L: # @mcl_fp_montNF2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 60(%esp), %ecx - movl (%ecx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 4(%ecx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 64(%esp), %ecx - movl (%ecx), %ebp - mull %ebp - movl %eax, %ebx - movl %edx, 8(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl -4(%eax), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl %ebx, %edi - imull %ecx, %edi - movl (%eax), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 4(%eax), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %edi, %eax - mull %ecx - movl %edx, 4(%esp) # 4-byte Spill - movl %eax, %esi - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, %eax - mull %ebp - movl %edx, %edi - movl %eax, %ebp - addl 8(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edi - addl %ebx, %esi - adcl (%esp), %ebp # 4-byte Folded Reload - adcl $0, %edi - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl 64(%esp), %eax - movl 4(%eax), %ebx - movl %ebx, %eax - mull %ecx - movl %edx, %esi - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 20(%esp) # 4-byte Folded Reload - movl %eax, %ebx - movl %edx, %ecx - addl 16(%esp), %ecx # 4-byte Folded Reload - adcl $0, %esi - addl %ebp, %ebx - adcl %edi, %ecx - adcl $0, %esi - movl 24(%esp), %eax # 4-byte Reload - imull %ebx, %eax - movl %eax, %edi - mull 32(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %ebp - movl %edi, %eax - movl 28(%esp), %edi # 4-byte Reload - mull %edi - addl %ebx, %ebp - adcl %ecx, %eax - adcl $0, %esi - addl 24(%esp), %eax # 4-byte Folded Reload - adcl %edx, %esi - movl %eax, %edx - subl 32(%esp), %edx # 4-byte Folded Reload - movl %esi, %ecx - sbbl %edi, %ecx - testl %ecx, %ecx - js .LBB24_2 -# BB#1: - movl %edx, %eax -.LBB24_2: - movl 56(%esp), %edx - movl %eax, (%edx) - js .LBB24_4 -# BB#3: - movl %ecx, %esi -.LBB24_4: - movl %esi, 4(%edx) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end24: - .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L - - .globl mcl_fp_montRed2L - .align 16, 0x90 - .type mcl_fp_montRed2L,@function -mcl_fp_montRed2L: # @mcl_fp_montRed2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 44(%esp), %eax - movl -4(%eax), %ecx - movl (%eax), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 40(%esp), %edx - movl (%edx), %ebp - movl %ebp, %edi - imull %ecx, %edi - movl 4(%eax), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl %edi, %eax - mull %edx - movl %edx, %esi - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull %ebx - movl %edx, %edi - addl 4(%esp), %edi # 4-byte Folded Reload - adcl $0, %esi - addl %ebp, %eax - movl 40(%esp), %edx - movl 12(%edx), %eax - adcl 4(%edx), %edi - adcl 8(%edx), %esi - adcl $0, %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl %ebx, %ebx - imull %edi, %ecx - andl $1, %ebx - movl %ecx, %eax - mull 8(%esp) # 4-byte Folded Reload - movl %edx, (%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull 12(%esp) # 4-byte Folded Reload - addl (%esp), %eax # 4-byte Folded Reload - adcl $0, %edx - addl %edi, %ebp - adcl %esi, %eax - adcl 4(%esp), %edx # 4-byte Folded Reload - adcl $0, %ebx - movl %eax, %esi - subl 8(%esp), %esi # 4-byte Folded Reload - movl %edx, %ecx - sbbl 12(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB25_2 -# BB#1: - movl %esi, %eax -.LBB25_2: - movl 36(%esp), %esi - movl %eax, (%esi) - testb %bl, %bl - jne .LBB25_4 -# BB#3: - movl %ecx, %edx -.LBB25_4: - movl %edx, 4(%esi) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end25: - .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L - - .globl mcl_fp_addPre2L - .align 16, 0x90 - .type mcl_fp_addPre2L,@function -mcl_fp_addPre2L: # @mcl_fp_addPre2L -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 12(%esp), %edx - addl (%edx), %ecx - movl 8(%esp), %esi - adcl 4(%edx), %eax - movl %ecx, (%esi) - movl %eax, 4(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end26: - .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L - - .globl mcl_fp_subPre2L - .align 16, 0x90 - .type mcl_fp_subPre2L,@function -mcl_fp_subPre2L: # @mcl_fp_subPre2L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - xorl %eax, %eax - movl 16(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %edx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl %edx, 4(%esi) - sbbl $0, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end27: - .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L - - .globl mcl_fp_shr1_2L - .align 16, 0x90 - .type mcl_fp_shr1_2L,@function -mcl_fp_shr1_2L: # @mcl_fp_shr1_2L -# BB#0: - movl 8(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - shrdl $1, %eax, %ecx - movl 4(%esp), %edx - movl %ecx, (%edx) - shrl %eax - movl %eax, 4(%edx) - retl -.Lfunc_end28: - .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L - - .globl mcl_fp_add2L - .align 16, 0x90 - .type mcl_fp_add2L,@function -mcl_fp_add2L: # @mcl_fp_add2L -# BB#0: - pushl %ebx - pushl %esi - movl 20(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %ecx - movl 16(%esp), %esi - addl (%esi), %eax - movl 12(%esp), %edx - adcl 4(%esi), %ecx - movl %eax, (%edx) - movl %ecx, 4(%edx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 24(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %ecx - sbbl $0, %ebx - testb $1, %bl - jne .LBB29_2 -# BB#1: # %nocarry - movl %eax, (%edx) - movl %ecx, 4(%edx) -.LBB29_2: # %carry - popl %esi - popl %ebx - retl -.Lfunc_end29: - .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L - - .globl mcl_fp_addNF2L - .align 16, 0x90 - .type mcl_fp_addNF2L,@function -mcl_fp_addNF2L: # @mcl_fp_addNF2L -# BB#0: - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 16(%esp), %edx - addl (%edx), %ecx - adcl 4(%edx), %eax - movl 24(%esp), %edi - movl %ecx, %esi - subl (%edi), %esi - movl %eax, %edx - sbbl 4(%edi), %edx - testl %edx, %edx - js .LBB30_2 -# BB#1: - movl %esi, %ecx -.LBB30_2: - movl 12(%esp), %esi - movl %ecx, (%esi) - js .LBB30_4 -# BB#3: - movl %edx, %eax -.LBB30_4: - movl %eax, 4(%esi) - popl %esi - popl %edi - retl -.Lfunc_end30: - .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L - - .globl mcl_fp_sub2L - .align 16, 0x90 - .type mcl_fp_sub2L,@function -mcl_fp_sub2L: # @mcl_fp_sub2L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - xorl %ebx, %ebx - movl 24(%esp), %edx - subl (%edx), %ecx - sbbl 4(%edx), %eax - movl 16(%esp), %edx - movl %ecx, (%edx) - movl %eax, 4(%edx) - sbbl $0, %ebx - testb $1, %bl - je .LBB31_2 -# BB#1: # %carry - movl 28(%esp), %esi - movl 4(%esi), %edi - addl (%esi), %ecx - movl %ecx, (%edx) - adcl %eax, %edi - movl %edi, 4(%edx) -.LBB31_2: # %nocarry - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end31: - .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L - - .globl mcl_fp_subNF2L - .align 16, 0x90 - .type mcl_fp_subNF2L,@function -mcl_fp_subNF2L: # @mcl_fp_subNF2L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %eax - movl 20(%esp), %edx - subl (%edx), %ecx - sbbl 4(%edx), %eax - movl %eax, %edx - sarl $31, %edx - movl 24(%esp), %esi - movl 4(%esi), %edi - andl %edx, %edi - andl (%esi), %edx - addl %ecx, %edx - movl 12(%esp), %ecx - movl %edx, (%ecx) - adcl %eax, %edi - movl %edi, 4(%ecx) - popl %esi - popl %edi - retl -.Lfunc_end32: - .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L - - .globl mcl_fpDbl_add2L - .align 16, 0x90 - .type mcl_fpDbl_add2L,@function -mcl_fpDbl_add2L: # @mcl_fpDbl_add2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %edx - movl 12(%edx), %esi - movl 24(%esp), %edi - movl 12(%edi), %eax - movl 8(%edx), %ecx - movl (%edx), %ebx - movl 4(%edx), %ebp - addl (%edi), %ebx - adcl 4(%edi), %ebp - movl 20(%esp), %edx - adcl 8(%edi), %ecx - movl %ebx, (%edx) - movl %ebp, 4(%edx) - adcl %esi, %eax - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - movl %ecx, %esi - subl (%ebp), %esi - movl %eax, %edi - sbbl 4(%ebp), %edi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB33_2 -# BB#1: - movl %edi, %eax -.LBB33_2: - testb %bl, %bl - jne .LBB33_4 -# BB#3: - movl %esi, %ecx -.LBB33_4: - movl %ecx, 8(%edx) - movl %eax, 12(%edx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end33: - .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L - - .globl mcl_fpDbl_sub2L - .align 16, 0x90 - .type mcl_fpDbl_sub2L,@function -mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %ebx, %ebx - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %eax - sbbl 8(%edx), %eax - movl 12(%edx), %ebp - movl 12(%ecx), %edx - movl 20(%esp), %ecx - movl %esi, (%ecx) - movl %edi, 4(%ecx) - sbbl %ebp, %edx - movl 32(%esp), %edi - movl (%edi), %esi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB34_1 -# BB#2: - xorl %edi, %edi - jmp .LBB34_3 -.LBB34_1: - movl 4(%edi), %edi -.LBB34_3: - testb %bl, %bl - jne .LBB34_5 -# BB#4: - xorl %esi, %esi -.LBB34_5: - addl %eax, %esi - movl %esi, 8(%ecx) - adcl %edx, %edi - movl %edi, 12(%ecx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end34: - .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L - - .globl mcl_fp_mulUnitPre3L - .align 16, 0x90 - .type mcl_fp_mulUnitPre3L,@function -mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - pushl %eax - movl 32(%esp), %ecx - movl 28(%esp), %edi - movl %ecx, %eax - mull 8(%edi) - movl %edx, %esi - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull 4(%edi) - movl %edx, %ebx - movl %eax, %ebp - movl %ecx, %eax - mull (%edi) - movl 24(%esp), %ecx - movl %eax, (%ecx) - addl %ebp, %edx - movl %edx, 4(%ecx) - adcl (%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%ecx) - adcl $0, %esi - movl %esi, 12(%ecx) - addl $4, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end35: - .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L - - .globl mcl_fpDbl_mulPre3L - .align 16, 0x90 - .type mcl_fpDbl_mulPre3L,@function -mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %ecx - movl (%ecx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 56(%esp), %edx - movl (%edx), %edi - mull %edi - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%esp), %edx - movl %eax, (%edx) - movl 4(%ecx), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 8(%ecx), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull %edi - movl %edx, %ecx - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull %edi - movl %edx, %edi - movl %eax, %ebx - addl 20(%esp), %ebx # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl $0, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl 4(%eax), %ecx - movl %esi, %eax - mull %ecx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebp, %eax - mull %ecx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %ebp - movl 24(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 4(%esp) # 4-byte Spill - addl %ebx, %eax - movl 48(%esp), %ecx - movl %eax, 4(%ecx) - adcl %edi, %ebp - adcl 12(%esp), %esi # 4-byte Folded Reload - movl 56(%esp), %eax - movl 8(%eax), %edi - sbbl %ecx, %ecx - movl (%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %ebx - movl 8(%esp), %eax # 4-byte Reload - mull %edi - andl $1, %ecx - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - addl %ebx, %ebp - movl 48(%esp), %edi - movl %ebp, 8(%edi) - adcl (%esp), %esi # 4-byte Folded Reload - adcl %eax, %ecx - sbbl %eax, %eax - andl $1, %eax - addl 24(%esp), %esi # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %esi, 12(%edi) - movl %ecx, 16(%edi) - adcl %edx, %eax - movl %eax, 20(%edi) - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end36: - .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L - - .globl mcl_fpDbl_sqrPre3L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre3L,@function -mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %eax - movl 8(%eax), %ebp - movl (%eax), %ecx - movl 4(%eax), %esi - movl %ebp, %eax - mull %esi - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ecx - movl %edx, 4(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebx, (%esp) # 4-byte Spill - movl %esi, %eax - mull %esi - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull %ecx - movl %edx, %esi - movl %eax, %edi - movl %ecx, %eax - mull %ecx - movl %edx, %ecx - movl 52(%esp), %edx - movl %eax, (%edx) - movl %ebp, %eax - mull %ebp - movl %eax, 12(%esp) # 4-byte Spill - movl %edx, 24(%esp) # 4-byte Spill - addl %edi, %ecx - movl %esi, %ebp - adcl %ebx, %ebp - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, %eax - adcl $0, %eax - addl %edi, %ecx - movl 52(%esp), %edx - movl %ecx, 4(%edx) - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %edx # 4-byte Reload - adcl %edx, %eax - sbbl %ecx, %ecx - andl $1, %ecx - addl %esi, %ebp - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 28(%esp), %edi # 4-byte Reload - adcl %edi, %ecx - addl (%esp), %ebp # 4-byte Folded Reload - movl 52(%esp), %esi - movl %ebp, 8(%esi) - adcl %edx, %eax - adcl 12(%esp), %ecx # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - addl %ebx, %eax - adcl %edi, %ecx - movl 52(%esp), %edx - movl %eax, 12(%edx) - movl %ecx, 16(%edx) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%edx) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end37: - .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L - - .globl mcl_fp_mont3L - .align 16, 0x90 - .type mcl_fp_mont3L,@function -mcl_fp_mont3L: # @mcl_fp_mont3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %ecx - movl (%ecx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 84(%esp), %edx - movl (%edx), %edx - movl %edx, 12(%esp) # 4-byte Spill - mull %edx - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, 16(%esp) # 4-byte Spill - movl 88(%esp), %esi - movl -4(%esi), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, %ebp - imull %edx, %ebp - movl (%esi), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 8(%esi), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 4(%esi), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 4(%ecx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 8(%ecx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl %ebp, %eax - mull %edx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ebx - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl %ebp, %eax - mull %edi - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %ecx, %eax - movl 12(%esp), %ecx # 4-byte Reload - mull %ecx - movl %edx, %esi - movl %eax, %edi - movl 36(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, %ecx - addl 16(%esp), %ecx # 4-byte Folded Reload - adcl %edi, %edx - adcl $0, %esi - addl (%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl 28(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 4(%esp), %edi # 4-byte Reload - addl 32(%esp), %edi # 4-byte Folded Reload - adcl %ecx, %ebp - adcl %edx, %ebx - adcl %esi, %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 4(%eax), %ecx - movl %ecx, %eax - mull 20(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 36(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 12(%esp) # 4-byte Spill - movl %ecx, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %edx, %edi - addl 12(%esp), %edi # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - movl 32(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %eax, %edx - addl %ebp, %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl %ebx, %edi - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %edx, %esi - imull 52(%esp), %esi # 4-byte Folded Reload - andl $1, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 40(%esp) # 4-byte Folded Reload - movl %edx, %ecx - addl 4(%esp), %ecx # 4-byte Folded Reload - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl $0, %ebp - addl 12(%esp), %eax # 4-byte Folded Reload - adcl %edi, %ecx - adcl 16(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax - movl 8(%eax), %esi - movl %esi, %eax - mull 20(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 36(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl %edx, %esi - addl 16(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl 32(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 36(%esp), %edx # 4-byte Reload - addl %ecx, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl %ebx, %esi - adcl %ebp, %edi - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - sbbl %ecx, %ecx - movl 52(%esp), %ebp # 4-byte Reload - imull %edx, %ebp - movl %ebp, 52(%esp) # 4-byte Spill - andl $1, %ecx - movl %ebp, %eax - mull 40(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 24(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - mull 44(%esp) # 4-byte Folded Reload - addl 28(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - adcl $0, %ebp - addl 36(%esp), %ebx # 4-byte Folded Reload - adcl %esi, %eax - adcl %edi, %edx - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl $0, %ecx - movl %eax, %ebx - subl 40(%esp), %ebx # 4-byte Folded Reload - movl %edx, %edi - sbbl 44(%esp), %edi # 4-byte Folded Reload - movl %ebp, %esi - sbbl 48(%esp), %esi # 4-byte Folded Reload - sbbl $0, %ecx - andl $1, %ecx - jne .LBB38_2 -# BB#1: - movl %ebx, %eax -.LBB38_2: - movl 76(%esp), %ebx - movl %eax, (%ebx) - testb %cl, %cl - jne .LBB38_4 -# BB#3: - movl %edi, %edx -.LBB38_4: - movl %edx, 4(%ebx) - jne .LBB38_6 -# BB#5: - movl %esi, %ebp -.LBB38_6: - movl %ebp, 8(%ebx) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end38: - .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L - - .globl mcl_fp_montNF3L - .align 16, 0x90 - .type mcl_fp_montNF3L,@function -mcl_fp_montNF3L: # @mcl_fp_montNF3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ebp - movl (%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 88(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - mull %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, 36(%esp) # 4-byte Spill - movl 92(%esp), %esi - movl -4(%esi), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %ecx - imull %edx, %ecx - movl (%esi), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 8(%esi), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 4(%esi), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%ebp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, 4(%esp) # 4-byte Spill - movl %eax, %ebp - movl %esi, %eax - movl 20(%esp), %ecx # 4-byte Reload - mull %ecx - movl %edx, %edi - movl %eax, %ebx - movl 40(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, %ecx - movl %eax, %esi - addl 36(%esp), %esi # 4-byte Folded Reload - adcl %ebx, %ecx - adcl $0, %edi - addl 32(%esp), %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - addl 4(%esp), %esi # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl 88(%esp), %eax - movl 4(%eax), %ebx - movl %ebx, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %eax - mull 40(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 28(%esp) # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - movl %edx, %ebx - addl 16(%esp), %ebx # 4-byte Folded Reload - movl 32(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - adcl $0, %ebp - movl 20(%esp), %edx # 4-byte Reload - addl %esi, %edx - adcl %ecx, %ebx - adcl %edi, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %ebp - movl %edx, %ecx - movl %edx, %edi - imull 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 44(%esp) # 4-byte Folded Reload - addl %edi, %eax - adcl %ebx, %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - adcl $0, %ebp - addl %edx, %esi - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl 88(%esp), %eax - movl 8(%eax), %edi - movl %edi, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 40(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 28(%esp) # 4-byte Folded Reload - movl %edx, %edi - addl 40(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl $0, %ebx - addl %esi, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - adcl %ebp, %ecx - adcl $0, %ebx - movl 56(%esp), %esi # 4-byte Reload - imull %eax, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl %esi, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ebp - movl %esi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %esi - movl 56(%esp), %eax # 4-byte Reload - mull 48(%esp) # 4-byte Folded Reload - addl 32(%esp), %ebp # 4-byte Folded Reload - adcl %edi, %eax - adcl %ecx, %esi - adcl $0, %ebx - addl 40(%esp), %eax # 4-byte Folded Reload - adcl %edx, %esi - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %eax, %edi - subl 44(%esp), %edi # 4-byte Folded Reload - movl %esi, %edx - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %ebx, %ecx - sbbl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - sarl $31, %ebp - testl %ebp, %ebp - js .LBB39_2 -# BB#1: - movl %edi, %eax -.LBB39_2: - movl 80(%esp), %edi - movl %eax, (%edi) - js .LBB39_4 -# BB#3: - movl %edx, %esi -.LBB39_4: - movl %esi, 4(%edi) - js .LBB39_6 -# BB#5: - movl %ecx, %ebx -.LBB39_6: - movl %ebx, 8(%edi) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end39: - .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L - - .globl mcl_fp_montRed3L - .align 16, 0x90 - .type mcl_fp_montRed3L,@function -mcl_fp_montRed3L: # @mcl_fp_montRed3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 68(%esp), %eax - movl -4(%eax), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl (%eax), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 64(%esp), %ebx - movl (%ebx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - imull %edx, %ecx - movl 8(%eax), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 4(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ecx, %eax - mull %esi - movl %edx, %esi - movl %eax, %ebp - movl %ecx, %eax - mull %edi - movl %edx, %ecx - addl %ebp, %ecx - adcl 12(%esp), %esi # 4-byte Folded Reload - movl 20(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 16(%esp), %eax # 4-byte Folded Reload - adcl 4(%ebx), %ecx - adcl 8(%ebx), %esi - adcl 12(%ebx), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 20(%ebx), %eax - movl 16(%ebx), %edx - adcl $0, %edx - movl %edx, 8(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl %ebx, %ebx - andl $1, %ebx - movl %ecx, %edi - imull 36(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 32(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull 28(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl %edi, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %edx, %edi - addl (%esp), %edi # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ecx, %eax - adcl %esi, %edi - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl $0, 12(%esp) # 4-byte Folded Spill - adcl $0, %ebx - movl 36(%esp), %ecx # 4-byte Reload - imull %edi, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 32(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 24(%esp) # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, %ecx - movl 36(%esp), %eax # 4-byte Reload - mull 28(%esp) # 4-byte Folded Reload - addl 8(%esp), %eax # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl $0, %esi - addl %edi, %ecx - adcl %ebp, %eax - adcl 16(%esp), %edx # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl $0, %ebx - movl %eax, %ebp - subl 24(%esp), %ebp # 4-byte Folded Reload - movl %edx, %edi - sbbl 28(%esp), %edi # 4-byte Folded Reload - movl %esi, %ecx - sbbl 32(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB40_2 -# BB#1: - movl %ebp, %eax -.LBB40_2: - movl 60(%esp), %ebp - movl %eax, (%ebp) - testb %bl, %bl - jne .LBB40_4 -# BB#3: - movl %edi, %edx -.LBB40_4: - movl %edx, 4(%ebp) - jne .LBB40_6 -# BB#5: - movl %ecx, %esi -.LBB40_6: - movl %esi, 8(%ebp) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end40: - .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L - - .globl mcl_fp_addPre3L - .align 16, 0x90 - .type mcl_fp_addPre3L,@function -mcl_fp_addPre3L: # @mcl_fp_addPre3L -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 12(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 8(%esp), %esi - movl %ecx, (%esi) - movl %edx, 4(%esi) - movl %eax, 8(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end41: - .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L - - .globl mcl_fp_subPre3L - .align 16, 0x90 - .type mcl_fp_subPre3L,@function -mcl_fp_subPre3L: # @mcl_fp_subPre3L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 20(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl 12(%esp), %edi - movl %edx, (%edi) - movl %esi, 4(%edi) - movl %ecx, 8(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end42: - .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L - - .globl mcl_fp_shr1_3L - .align 16, 0x90 - .type mcl_fp_shr1_3L,@function -mcl_fp_shr1_3L: # @mcl_fp_shr1_3L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl 8(%eax), %ecx - movl (%eax), %edx - movl 4(%eax), %eax - shrdl $1, %eax, %edx - movl 8(%esp), %esi - movl %edx, (%esi) - shrdl $1, %ecx, %eax - movl %eax, 4(%esi) - shrl %ecx - movl %ecx, 8(%esi) - popl %esi - retl -.Lfunc_end43: - .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L - - .globl mcl_fp_add3L - .align 16, 0x90 - .type mcl_fp_add3L,@function -mcl_fp_add3L: # @mcl_fp_add3L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 20(%esp), %esi - addl (%esi), %eax - adcl 4(%esi), %ecx - movl 8(%edx), %edx - adcl 8(%esi), %edx - movl 16(%esp), %esi - movl %eax, (%esi) - movl %ecx, 4(%esi) - movl %edx, 8(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 28(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %ecx - sbbl 8(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB44_2 -# BB#1: # %nocarry - movl %eax, (%esi) - movl %ecx, 4(%esi) - movl %edx, 8(%esi) -.LBB44_2: # %carry - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end44: - .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L - - .globl mcl_fp_addNF3L - .align 16, 0x90 - .type mcl_fp_addNF3L,@function -mcl_fp_addNF3L: # @mcl_fp_addNF3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 24(%esp), %esi - addl (%esi), %edx - adcl 4(%esi), %ecx - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 32(%esp), %ebp - movl %edx, %ebx - subl (%ebp), %ebx - movl %ecx, %edi - sbbl 4(%ebp), %edi - movl %eax, %esi - sbbl 8(%ebp), %esi - movl %esi, %ebp - sarl $31, %ebp - testl %ebp, %ebp - js .LBB45_2 -# BB#1: - movl %ebx, %edx -.LBB45_2: - movl 20(%esp), %ebx - movl %edx, (%ebx) - js .LBB45_4 -# BB#3: - movl %edi, %ecx -.LBB45_4: - movl %ecx, 4(%ebx) - js .LBB45_6 -# BB#5: - movl %esi, %eax -.LBB45_6: - movl %eax, 8(%ebx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end45: - .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L - - .globl mcl_fp_sub3L - .align 16, 0x90 - .type mcl_fp_sub3L,@function -mcl_fp_sub3L: # @mcl_fp_sub3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edx - movl (%edx), %ecx - movl 4(%edx), %eax - xorl %ebx, %ebx - movl 28(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %eax - movl 8(%edx), %edx - sbbl 8(%esi), %edx - movl 20(%esp), %esi - movl %ecx, (%esi) - movl %eax, 4(%esi) - movl %edx, 8(%esi) - sbbl $0, %ebx - testb $1, %bl - je .LBB46_2 -# BB#1: # %carry - movl 32(%esp), %edi - movl 4(%edi), %ebx - movl 8(%edi), %ebp - addl (%edi), %ecx - movl %ecx, (%esi) - adcl %eax, %ebx - movl %ebx, 4(%esi) - adcl %edx, %ebp - movl %ebp, 8(%esi) -.LBB46_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end46: - .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L - - .globl mcl_fp_subNF3L - .align 16, 0x90 - .type mcl_fp_subNF3L,@function -mcl_fp_subNF3L: # @mcl_fp_subNF3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 28(%esp), %esi - subl (%esi), %ecx - sbbl 4(%esi), %edx - movl 8(%eax), %eax - sbbl 8(%esi), %eax - movl %eax, %esi - sarl $31, %esi - movl %esi, %edi - shldl $1, %eax, %edi - movl 32(%esp), %ebx - andl (%ebx), %edi - movl 8(%ebx), %ebp - andl %esi, %ebp - andl 4(%ebx), %esi - addl %ecx, %edi - adcl %edx, %esi - movl 20(%esp), %ecx - movl %edi, (%ecx) - movl %esi, 4(%ecx) - adcl %eax, %ebp - movl %ebp, 8(%ecx) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end47: - .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L - - .globl mcl_fpDbl_add3L - .align 16, 0x90 - .type mcl_fpDbl_add3L,@function -mcl_fpDbl_add3L: # @mcl_fpDbl_add3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - pushl %eax - movl 32(%esp), %esi - movl 20(%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 16(%esi), %edi - movl 12(%esi), %ebx - movl (%esi), %edx - movl 28(%esp), %eax - addl (%eax), %edx - movl 24(%esp), %ecx - movl %edx, (%ecx) - movl 8(%esi), %edx - movl 4(%esi), %esi - adcl 4(%eax), %esi - adcl 8(%eax), %edx - movl %esi, 4(%ecx) - movl 20(%eax), %ebp - movl %edx, 8(%ecx) - movl 12(%eax), %esi - movl 16(%eax), %edx - adcl %ebx, %esi - adcl %edi, %edx - adcl (%esp), %ebp # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - movl 36(%esp), %ecx - movl %esi, %ebx - subl (%ecx), %ebx - movl %edx, %edi - sbbl 4(%ecx), %edi - movl %edi, (%esp) # 4-byte Spill - movl %ebp, %ecx - movl 36(%esp), %edi - sbbl 8(%edi), %ecx - sbbl $0, %eax - andl $1, %eax - jne .LBB48_2 -# BB#1: - movl %ecx, %ebp -.LBB48_2: - testb %al, %al - jne .LBB48_4 -# BB#3: - movl %ebx, %esi -.LBB48_4: - movl 24(%esp), %eax - movl %esi, 12(%eax) - jne .LBB48_6 -# BB#5: - movl (%esp), %edx # 4-byte Reload -.LBB48_6: - movl %edx, 16(%eax) - movl %ebp, 20(%eax) - addl $4, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end48: - .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L - - .globl mcl_fpDbl_sub3L - .align 16, 0x90 - .type mcl_fpDbl_sub3L,@function -mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - movl 28(%esp), %ebx - subl (%ebx), %edx - sbbl 4(%ebx), %esi - movl 8(%ecx), %ebp - sbbl 8(%ebx), %ebp - movl 20(%esp), %eax - movl %edx, (%eax) - movl 12(%ecx), %edi - sbbl 12(%ebx), %edi - movl %esi, 4(%eax) - movl 16(%ecx), %esi - sbbl 16(%ebx), %esi - movl 20(%ebx), %ebx - movl 20(%ecx), %edx - movl %ebp, 8(%eax) - sbbl %ebx, %edx - movl $0, %ecx - sbbl $0, %ecx - andl $1, %ecx - movl 32(%esp), %ebp - jne .LBB49_1 -# BB#2: - xorl %ebx, %ebx - jmp .LBB49_3 -.LBB49_1: - movl 8(%ebp), %ebx -.LBB49_3: - testb %cl, %cl - movl $0, %eax - jne .LBB49_4 -# BB#5: - xorl %ecx, %ecx - jmp .LBB49_6 -.LBB49_4: - movl (%ebp), %ecx - movl 4(%ebp), %eax -.LBB49_6: - addl %edi, %ecx - adcl %esi, %eax - movl 20(%esp), %esi - movl %ecx, 12(%esi) - movl %eax, 16(%esi) - adcl %edx, %ebx - movl %ebx, 20(%esi) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end49: - .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L - - .globl mcl_fp_mulUnitPre4L - .align 16, 0x90 - .type mcl_fp_mulUnitPre4L,@function -mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %ecx - movl 36(%esp), %ebp - movl %ecx, %eax - mull 12(%ebp) - movl %edx, %esi - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 8(%ebp) - movl %edx, %ebx - movl %eax, 4(%esp) # 4-byte Spill - movl %ecx, %eax - mull 4(%ebp) - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull (%ebp) - movl 32(%esp), %ecx - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%ecx) - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%ecx) - adcl $0, %esi - movl %esi, 16(%ecx) - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end50: - .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L - - .globl mcl_fpDbl_mulPre4L - .align 16, 0x90 - .type mcl_fpDbl_mulPre4L,@function -mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %edi - movl (%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 84(%esp), %ecx - movl (%ecx), %esi - movl %ecx, %ebp - mull %esi - movl %edx, 12(%esp) # 4-byte Spill - movl 76(%esp), %ecx - movl %eax, (%ecx) - movl 4(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%edi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 12(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 4(%ebp), %ecx - movl %eax, %ebp - mull %ecx - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ecx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - movl %edi, %eax - mull %ecx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ebp, %eax - mull %esi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ebx, %eax - mull %esi - movl %edx, %ecx - movl %eax, %ebx - movl %edi, %eax - mull %esi - movl %edx, %edi - addl 12(%esp), %eax # 4-byte Folded Reload - adcl %ebx, %edi - adcl %ebp, %ecx - movl 52(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl (%esp), %eax # 4-byte Folded Reload - movl 76(%esp), %edx - movl %eax, 4(%edx) - adcl 4(%esp), %edi # 4-byte Folded Reload - adcl 16(%esp), %ecx # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 8(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 8(%eax), %esi - movl 20(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl 40(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ebp - movl 48(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 28(%esp) # 4-byte Spill - addl %edi, %eax - movl 76(%esp), %edx - movl %eax, 8(%edx) - adcl %ecx, %ebp - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax - movl 12(%eax), %esi - sbbl %ecx, %ecx - movl %esi, %eax - movl 80(%esp), %edi - mull 12(%edi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 8(%edi) - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 4(%edi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %edi - movl %esi, %eax - movl 80(%esp), %edx - mull (%edx) - movl %eax, %esi - andl $1, %ecx - addl 28(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl 44(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - addl %esi, %ebp - movl 76(%esp), %esi - movl %ebp, 12(%esi) - adcl %edi, %ebx - movl %eax, %edi - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl %edx, %ebx - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %esi, %edx - movl %ebx, 16(%edx) - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %edi, 20(%edx) - movl %ecx, 24(%edx) - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%edx) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end51: - .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L - - .globl mcl_fpDbl_sqrPre4L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre4L,@function -mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %ecx - movl 12(%ecx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl (%ecx), %ebx - movl 4(%ecx), %esi - movl %ebp, %eax - mull %esi - movl %eax, 12(%esp) # 4-byte Spill - movl %edx, 36(%esp) # 4-byte Spill - movl 8(%ecx), %edi - movl %edi, %eax - mull %esi - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ebx - movl %edx, %ebp - movl %eax, %ecx - movl %edi, %eax - mull %ebx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull %esi - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %edx, (%esp) # 4-byte Spill - movl %eax, %esi - movl %ebx, %eax - mull %ebx - movl 60(%esp), %ebx - movl %eax, (%ebx) - addl %esi, %edx - movl (%esp), %eax # 4-byte Reload - movl %eax, %ebx - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - adcl $0, %ebp - addl %esi, %edx - movl 60(%esp), %esi - movl %edx, 4(%esi) - adcl 4(%esp), %ebx # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - addl %eax, %ebx - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull %edi - movl %eax, %edi - addl 20(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %eax - movl %ebx, 8(%eax) - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl %ebp, %edi - movl 36(%esp), %eax # 4-byte Reload - adcl %esi, %eax - sbbl %esi, %esi - andl $1, %esi - addl 28(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - movl 64(%esp), %esi - movl 12(%esi), %ebp - movl %ebp, %eax - mull 8(%esi) - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull 4(%esi) - movl %esi, %edi - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebp, %eax - mull (%edi) - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull %ebp - addl %ecx, %edi - movl 60(%esp), %ebp - movl %edi, 12(%ebp) - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - addl 16(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebp, %edi - movl %esi, 16(%edi) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ebx, 20(%edi) - movl %eax, 24(%edi) - adcl %edx, %ecx - movl %ecx, 28(%edi) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end52: - .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L - - .globl mcl_fp_mont4L - .align 16, 0x90 - .type mcl_fp_mont4L,@function -mcl_fp_mont4L: # @mcl_fp_mont4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 112(%esp), %ecx - movl (%ecx), %eax - movl %ecx, %ebp - movl %eax, 60(%esp) # 4-byte Spill - movl 116(%esp), %edx - movl (%edx), %edx - movl %edx, 28(%esp) # 4-byte Spill - mull %edx - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, 32(%esp) # 4-byte Spill - movl 120(%esp), %edi - movl -4(%edi), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, %ebx - imull %edx, %ebx - movl (%edi), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 12(%edi), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 8(%edi), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 4(%edi), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl %ebp, %edi - movl 4(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 12(%edi), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 8(%edi), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull %esi - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ecx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebp, %eax - movl 28(%esp), %esi # 4-byte Reload - mull %esi - movl %edx, %ecx - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull %esi - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, %ebx - movl %eax, %edi - addl 32(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - addl 8(%esp), %esi # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl 20(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 44(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 12(%esp), %ebp # 4-byte Reload - addl 48(%esp), %ebp # 4-byte Folded Reload - adcl %edi, %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl %ebx, %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebx - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - sbbl %ebp, %ebp - andl $1, %ebp - movl 116(%esp), %eax - movl 4(%eax), %esi - movl %esi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 32(%esp) # 4-byte Spill - movl %esi, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %esi - addl 24(%esp), %esi # 4-byte Folded Reload - movl %ecx, %edx - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - addl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl %ebp, %edi - movl %edi, 44(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %ecx - imull 80(%esp), %ecx # 4-byte Folded Reload - andl $1, %esi - movl %ecx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %edi - addl 16(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 36(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 28(%esp), %eax # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 48(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl 8(%eax), %esi - movl %esi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %esi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - addl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %ecx, %eax - adcl $0, %eax - movl 40(%esp), %ecx # 4-byte Reload - addl %edi, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl %ebp, 32(%esp) # 4-byte Folded Spill - adcl %ebx, %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %ecx, %esi - imull 80(%esp), %esi # 4-byte Folded Reload - andl $1, %eax - movl %eax, %ecx - movl %esi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, %edi - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %esi - addl %edi, %esi - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 40(%esp), %eax # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - adcl 48(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl 12(%eax), %ebp - movl %ebp, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebp, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, %ebp - addl 32(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edi, %eax - adcl $0, %eax - movl 64(%esp), %edi # 4-byte Reload - addl %esi, %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl %ebx, %ebp - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - sbbl %ebx, %ebx - movl 80(%esp), %esi # 4-byte Reload - imull %edi, %esi - movl %esi, 80(%esp) # 4-byte Spill - andl $1, %ebx - movl %esi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, %ecx - movl %esi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 40(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - mull 68(%esp) # 4-byte Folded Reload - addl 44(%esp), %eax # 4-byte Folded Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl $0, %edi - addl 64(%esp), %ecx # 4-byte Folded Reload - adcl %ebp, %eax - adcl 60(%esp), %edx # 4-byte Folded Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - adcl $0, %ebx - movl %eax, %ebp - subl 84(%esp), %ebp # 4-byte Folded Reload - movl %edx, %ecx - sbbl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - movl %esi, %ecx - sbbl 72(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 84(%esp) # 4-byte Spill - movl %edi, %ecx - sbbl 76(%esp), %ecx # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB53_2 -# BB#1: - movl %ebp, %eax -.LBB53_2: - movl 108(%esp), %ebp - movl %eax, (%ebp) - testb %bl, %bl - jne .LBB53_4 -# BB#3: - movl 80(%esp), %edx # 4-byte Reload -.LBB53_4: - movl %edx, 4(%ebp) - jne .LBB53_6 -# BB#5: - movl 84(%esp), %esi # 4-byte Reload -.LBB53_6: - movl %esi, 8(%ebp) - jne .LBB53_8 -# BB#7: - movl %ecx, %edi -.LBB53_8: - movl %edi, 12(%ebp) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end53: - .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L - - .globl mcl_fp_montNF4L - .align 16, 0x90 - .type mcl_fp_montNF4L,@function -mcl_fp_montNF4L: # @mcl_fp_montNF4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %ecx - movl (%ecx), %eax - movl %ecx, %ebp - movl %eax, 52(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - mull %ecx - movl %eax, 40(%esp) # 4-byte Spill - movl %edx, 76(%esp) # 4-byte Spill - movl 116(%esp), %esi - movl -4(%esi), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %ecx - imull %edx, %ecx - movl (%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 12(%esi), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 8(%esi), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 4(%esi), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - movl %ebp, %eax - movl 4(%eax), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 12(%eax), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 8(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - movl 36(%esp), %ebx # 4-byte Reload - mull %ebx - movl %edx, %ecx - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %ebx, %esi - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, %esi - movl %eax, %ebp - addl 76(%esp), %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - addl 40(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - addl 12(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 4(%eax), %edi - movl %edi, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, %edi - addl 20(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %ebp, 32(%esp) # 4-byte Folded Spill - adcl %esi, %edi - adcl %ebx, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - movl %esi, %ecx - imull 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - addl %esi, %eax - adcl %edi, %ebx - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl 40(%esp), %ecx # 4-byte Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edx, %ebx - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 28(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 8(%eax), %ecx - movl %ecx, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %edi - movl %ecx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, %ecx - addl %edi, %ecx - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl 36(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - adcl $0, %esi - movl 32(%esp), %edx # 4-byte Reload - addl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %esi - movl %edx, %ebx - imull 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebx, %eax - mull 80(%esp) # 4-byte Folded Reload - addl 32(%esp), %eax # 4-byte Folded Reload - adcl %ecx, %edi - movl 40(%esp), %ecx # 4-byte Reload - adcl %ebp, %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - adcl $0, %esi - addl %edx, %edi - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 112(%esp), %eax - movl 12(%eax), %ecx - movl %ecx, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl %edx, %ecx - addl 32(%esp), %ecx # 4-byte Folded Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - adcl $0, %ebx - movl 60(%esp), %edx # 4-byte Reload - addl %edi, %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl %esi, %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %ebx - movl 56(%esp), %edi # 4-byte Reload - imull %edx, %edi - movl %edi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - movl %edi, %ebp - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull 64(%esp) # 4-byte Folded Reload - movl 44(%esp), %ebp # 4-byte Reload - addl 60(%esp), %ebp # 4-byte Folded Reload - adcl %ecx, %eax - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl $0, %ebx - addl 56(%esp), %eax # 4-byte Folded Reload - adcl %edx, %edi - adcl 52(%esp), %esi # 4-byte Folded Reload - adcl 76(%esp), %ebx # 4-byte Folded Reload - movl %eax, %edx - subl 80(%esp), %edx # 4-byte Folded Reload - movl %edi, %ebp - sbbl 64(%esp), %ebp # 4-byte Folded Reload - movl %esi, %ecx - sbbl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 72(%esp), %ecx # 4-byte Folded Reload - testl %ecx, %ecx - js .LBB54_2 -# BB#1: - movl %edx, %eax -.LBB54_2: - movl 104(%esp), %edx - movl %eax, (%edx) - js .LBB54_4 -# BB#3: - movl %ebp, %edi -.LBB54_4: - movl %edi, 4(%edx) - js .LBB54_6 -# BB#5: - movl 80(%esp), %esi # 4-byte Reload -.LBB54_6: - movl %esi, 8(%edx) - js .LBB54_8 -# BB#7: - movl %ecx, %ebx -.LBB54_8: - movl %ebx, 12(%edx) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end54: - .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L - - .globl mcl_fp_montRed4L - .align 16, 0x90 - .type mcl_fp_montRed4L,@function -mcl_fp_montRed4L: # @mcl_fp_montRed4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 92(%esp), %eax - movl -4(%eax), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl (%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 88(%esp), %ecx - movl (%ecx), %esi - movl %esi, 40(%esp) # 4-byte Spill - imull %edx, %esi - movl 12(%eax), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 8(%eax), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 4(%eax), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebp - movl %edx, %ebp - movl %eax, %ebx - movl %esi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %edi - addl %ebx, %edi - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 32(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 40(%esp), %eax # 4-byte Folded Reload - adcl 4(%ecx), %edi - adcl 8(%ecx), %ebp - adcl 12(%ecx), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%ecx), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl 24(%ecx), %edx - movl 20(%ecx), %ecx - adcl $0, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %ebx - imull 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %esi - addl (%esp), %esi # 4-byte Folded Reload - movl %ecx, %ebx - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl 24(%esp), %ecx # 4-byte Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %edi, %eax - adcl %ebp, %esi - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - movl 8(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %esi, %ebp - imull 56(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull 48(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl 8(%esp), %ebp # 4-byte Folded Reload - adcl %ebx, %ecx - movl %ecx, %ebx - movl 32(%esp), %ecx # 4-byte Reload - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %esi, %eax - adcl 12(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl 56(%esp), %esi # 4-byte Reload - imull %ebp, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl %esi, %eax - mull 44(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %esi - movl 56(%esp), %eax # 4-byte Reload - mull 48(%esp) # 4-byte Folded Reload - addl 16(%esp), %eax # 4-byte Folded Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl $0, %ecx - addl %ebp, %esi - adcl 24(%esp), %eax # 4-byte Folded Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - movl %eax, %ebp - subl 60(%esp), %ebp # 4-byte Folded Reload - movl %edx, %esi - sbbl 48(%esp), %esi # 4-byte Folded Reload - sbbl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl %ecx, %ebx - sbbl 44(%esp), %ebx # 4-byte Folded Reload - sbbl $0, %edi - andl $1, %edi - jne .LBB55_2 -# BB#1: - movl %ebp, %eax -.LBB55_2: - movl 84(%esp), %ebp - movl %eax, (%ebp) - movl %edi, %eax - testb %al, %al - jne .LBB55_4 -# BB#3: - movl %esi, %edx -.LBB55_4: - movl %edx, 4(%ebp) - movl 56(%esp), %eax # 4-byte Reload - jne .LBB55_6 -# BB#5: - movl 60(%esp), %eax # 4-byte Reload -.LBB55_6: - movl %eax, 8(%ebp) - jne .LBB55_8 -# BB#7: - movl %ebx, %ecx -.LBB55_8: - movl %ecx, 12(%ebp) - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end55: - .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L - - .globl mcl_fp_addPre4L - .align 16, 0x90 - .type mcl_fp_addPre4L,@function -mcl_fp_addPre4L: # @mcl_fp_addPre4L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 12(%eax), %edi - movl 8(%eax), %eax - adcl 8(%esi), %eax - movl 12(%esi), %esi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl %edx, 4(%ebx) - movl %eax, 8(%ebx) - adcl %edi, %esi - movl %esi, 12(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end56: - .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L - - .globl mcl_fp_subPre4L - .align 16, 0x90 - .type mcl_fp_subPre4L,@function -mcl_fp_subPre4L: # @mcl_fp_subPre4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 12(%edi), %edi - movl 12(%ecx), %ecx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl %esi, 4(%ebp) - movl %ebx, 8(%ebp) - sbbl %edi, %ecx - movl %ecx, 12(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end57: - .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L - - .globl mcl_fp_shr1_4L - .align 16, 0x90 - .type mcl_fp_shr1_4L,@function -mcl_fp_shr1_4L: # @mcl_fp_shr1_4L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %eax - movl 12(%eax), %ecx - movl 8(%eax), %edx - movl (%eax), %esi - movl 4(%eax), %eax - shrdl $1, %eax, %esi - movl 12(%esp), %edi - movl %esi, (%edi) - shrdl $1, %edx, %eax - movl %eax, 4(%edi) - shrdl $1, %ecx, %edx - movl %edx, 8(%edi) - shrl %ecx - movl %ecx, 12(%edi) - popl %esi - popl %edi - retl -.Lfunc_end58: - .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L - - .globl mcl_fp_add4L - .align 16, 0x90 - .type mcl_fp_add4L,@function -mcl_fp_add4L: # @mcl_fp_add4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - movl 24(%esp), %esi - addl (%esi), %eax - adcl 4(%esi), %ecx - movl 8(%edi), %edx - adcl 8(%esi), %edx - movl 12(%esi), %esi - adcl 12(%edi), %esi - movl 20(%esp), %edi - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - sbbl 8(%ebp), %edx - sbbl 12(%ebp), %esi - sbbl $0, %ebx - testb $1, %bl - jne .LBB59_2 -# BB#1: # %nocarry - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) -.LBB59_2: # %carry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end59: - .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L - - .globl mcl_fp_addNF4L - .align 16, 0x90 - .type mcl_fp_addNF4L,@function -mcl_fp_addNF4L: # @mcl_fp_addNF4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 36(%esp), %edx - movl (%edx), %esi - movl 4(%edx), %ecx - movl 32(%esp), %edi - addl (%edi), %esi - adcl 4(%edi), %ecx - movl 12(%edx), %ebp - movl 8(%edx), %edx - adcl 8(%edi), %edx - adcl 12(%edi), %ebp - movl 40(%esp), %eax - movl %esi, %ebx - subl (%eax), %ebx - movl %ecx, %edi - sbbl 4(%eax), %edi - movl %edi, (%esp) # 4-byte Spill - movl %edx, %edi - movl 40(%esp), %eax - sbbl 8(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ebp, %edi - movl 40(%esp), %eax - sbbl 12(%eax), %edi - testl %edi, %edi - js .LBB60_2 -# BB#1: - movl %ebx, %esi -.LBB60_2: - movl 28(%esp), %ebx - movl %esi, (%ebx) - js .LBB60_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB60_4: - movl %ecx, 4(%ebx) - js .LBB60_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB60_6: - movl %edx, 8(%ebx) - js .LBB60_8 -# BB#7: - movl %edi, %ebp -.LBB60_8: - movl %ebp, 12(%ebx) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end60: - .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L - - .globl mcl_fp_sub4L - .align 16, 0x90 - .type mcl_fp_sub4L,@function -mcl_fp_sub4L: # @mcl_fp_sub4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 28(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %ecx - movl 8(%esi), %edx - sbbl 8(%edi), %edx - movl 12(%esi), %esi - sbbl 12(%edi), %esi - movl 20(%esp), %edi - movl %eax, (%edi) - movl %ecx, 4(%edi) - movl %edx, 8(%edi) - movl %esi, 12(%edi) - sbbl $0, %ebx - testb $1, %bl - je .LBB61_2 -# BB#1: # %carry - movl 32(%esp), %ebx - addl (%ebx), %eax - movl 8(%ebx), %ebp - adcl 4(%ebx), %ecx - movl 12(%ebx), %ebx - movl %eax, (%edi) - movl %ecx, 4(%edi) - adcl %edx, %ebp - movl %ebp, 8(%edi) - adcl %esi, %ebx - movl %ebx, 12(%edi) -.LBB61_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end61: - .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L - - .globl mcl_fp_subNF4L - .align 16, 0x90 - .type mcl_fp_subNF4L,@function -mcl_fp_subNF4L: # @mcl_fp_subNF4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $8, %esp - movl 32(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 36(%esp), %esi - subl (%esi), %edx - movl %edx, (%esp) # 4-byte Spill - sbbl 4(%esi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 12(%eax), %edi - movl 8(%eax), %edx - sbbl 8(%esi), %edx - sbbl 12(%esi), %edi - movl %edi, %esi - sarl $31, %esi - movl 40(%esp), %eax - movl 12(%eax), %ebp - andl %esi, %ebp - movl 8(%eax), %ecx - andl %esi, %ecx - movl 40(%esp), %eax - movl 4(%eax), %eax - andl %esi, %eax - movl 40(%esp), %ebx - andl (%ebx), %esi - addl (%esp), %esi # 4-byte Folded Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 28(%esp), %ebx - movl %esi, (%ebx) - adcl %edx, %ecx - movl %eax, 4(%ebx) - movl %ecx, 8(%ebx) - adcl %edi, %ebp - movl %ebp, 12(%ebx) - addl $8, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end62: - .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L - - .globl mcl_fpDbl_add4L - .align 16, 0x90 - .type mcl_fpDbl_add4L,@function -mcl_fpDbl_add4L: # @mcl_fpDbl_add4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %eax - movl (%eax), %edi - movl 4(%eax), %edx - movl 36(%esp), %esi - addl (%esi), %edi - adcl 4(%esi), %edx - movl 8(%eax), %ebx - adcl 8(%esi), %ebx - movl 12(%esi), %ebp - movl 32(%esp), %ecx - movl %edi, (%ecx) - movl 16(%esi), %edi - adcl 12(%eax), %ebp - adcl 16(%eax), %edi - movl %edx, 4(%ecx) - movl 28(%eax), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ebx, 8(%ecx) - movl 24(%eax), %ebx - movl 20(%eax), %eax - movl %ebp, 12(%ecx) - movl 20(%esi), %edx - adcl %eax, %edx - movl 28(%esi), %ecx - movl 24(%esi), %ebp - adcl %ebx, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - sbbl %ebx, %ebx - andl $1, %ebx - movl 44(%esp), %eax - movl %edi, %esi - subl (%eax), %esi - movl %esi, (%esp) # 4-byte Spill - movl %edx, %esi - sbbl 4(%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl %ebp, %esi - sbbl 8(%eax), %esi - sbbl 12(%eax), %ecx - sbbl $0, %ebx - andl $1, %ebx - jne .LBB63_2 -# BB#1: - movl %esi, %ebp -.LBB63_2: - testb %bl, %bl - jne .LBB63_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB63_4: - movl 32(%esp), %eax - movl %edi, 16(%eax) - jne .LBB63_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB63_6: - movl %edx, 20(%eax) - movl %ebp, 24(%eax) - movl 8(%esp), %edx # 4-byte Reload - jne .LBB63_8 -# BB#7: - movl %ecx, %edx -.LBB63_8: - movl %edx, 28(%eax) - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end63: - .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L - - .globl mcl_fpDbl_sub4L - .align 16, 0x90 - .type mcl_fpDbl_sub4L,@function -mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - pushl %eax - movl 28(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 32(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %esi - movl 8(%eax), %ebx - sbbl 8(%ebp), %ebx - movl 24(%esp), %ecx - movl %edx, (%ecx) - movl 12(%eax), %edx - sbbl 12(%ebp), %edx - movl %esi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%ebp), %edi - movl %ebx, 8(%ecx) - movl 20(%ebp), %esi - movl %edx, 12(%ecx) - movl 20(%eax), %ebx - sbbl %esi, %ebx - movl 24(%ebp), %edx - movl 24(%eax), %esi - sbbl %edx, %esi - movl 28(%ebp), %edx - movl 28(%eax), %eax - sbbl %edx, %eax - movl %eax, (%esp) # 4-byte Spill - movl $0, %edx - sbbl $0, %edx - andl $1, %edx - movl 36(%esp), %ecx - movl (%ecx), %eax - jne .LBB64_1 -# BB#2: - xorl %ebp, %ebp - jmp .LBB64_3 -.LBB64_1: - movl 4(%ecx), %ebp -.LBB64_3: - testb %dl, %dl - jne .LBB64_5 -# BB#4: - movl $0, %eax -.LBB64_5: - jne .LBB64_6 -# BB#7: - movl $0, %edx - jmp .LBB64_8 -.LBB64_6: - movl 12(%ecx), %edx -.LBB64_8: - jne .LBB64_9 -# BB#10: - xorl %ecx, %ecx - jmp .LBB64_11 -.LBB64_9: - movl 8(%ecx), %ecx -.LBB64_11: - addl %edi, %eax - adcl %ebx, %ebp - movl 24(%esp), %edi - movl %eax, 16(%edi) - adcl %esi, %ecx - movl %ebp, 20(%edi) - movl %ecx, 24(%edi) - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%edi) - addl $4, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end64: - .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L - - .globl mcl_fp_mulUnitPre5L - .align 16, 0x90 - .type mcl_fp_mulUnitPre5L,@function -mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %esi - movl 44(%esp), %ecx - movl %esi, %eax - mull 16(%ecx) - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull 12(%ecx) - movl %edx, %ebx - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 8(%ecx) - movl %edx, %edi - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 4(%ecx) - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - mull (%ecx) - movl 40(%esp), %ecx - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%ecx) - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 20(%ecx) - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end65: - .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L - - .globl mcl_fpDbl_mulPre5L - .align 16, 0x90 - .type mcl_fpDbl_mulPre5L,@function -mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %esi - movl (%esi), %ebp - movl 92(%esp), %eax - movl (%eax), %ebx - movl %eax, %edi - movl %ebp, %eax - mull %ebx - movl %edx, 36(%esp) # 4-byte Spill - movl 84(%esp), %edx - movl %eax, (%edx) - movl %esi, %eax - movl 4(%eax), %esi - movl 8(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 12(%eax), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 16(%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 4(%edi), %edi - movl %esi, %eax - mull %edi - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebp, %eax - mull %edi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull %edi - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl (%esp), %esi # 4-byte Reload - movl %esi, %eax - mull %edi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, %eax - mull %edi - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %edx, %esi - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %eax, %edi - movl %edx, %ebx - addl 36(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - movl 60(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 24(%esp), %ebp # 4-byte Folded Reload - movl 84(%esp), %eax - movl %ebp, 4(%eax) - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - sbbl %ebp, %ebp - andl $1, %ebp - addl 52(%esp), %edi # 4-byte Folded Reload - adcl 56(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl 88(%esp), %eax - movl %eax, %esi - movl 16(%esi), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 8(%eax), %ecx - movl %edx, %eax - mull %ecx - movl %eax, 56(%esp) # 4-byte Spill - movl %edx, 32(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - mull %ecx - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, 20(%esp) # 4-byte Spill - movl %esi, %edx - movl 8(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - mull %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, 12(%esp) # 4-byte Spill - movl %esi, %eax - movl (%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 4(%eax), %eax - movl %eax, 24(%esp) # 4-byte Spill - mull %ecx - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %ecx - addl %edi, %eax - movl 84(%esp), %ecx - movl %eax, 8(%ecx) - movl 4(%esp), %ecx # 4-byte Reload - adcl %ebx, %ecx - movl 48(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl 52(%esp), %esi # 4-byte Reload - adcl 60(%esp), %esi # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl %ebp, %eax - sbbl %ebx, %ebx - andl $1, %ebx - addl %edx, %ecx - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl 12(%eax), %ebp - movl 44(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %esi - movl 36(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %ebx - movl 24(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %edi - movl 28(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 24(%esp) # 4-byte Spill - addl %ecx, %eax - movl 84(%esp), %edx - movl %eax, 12(%edx) - adcl 48(%esp), %edi # 4-byte Folded Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 4(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 60(%esp) # 4-byte Folded Spill - movl 92(%esp), %eax - movl 16(%eax), %ebp - sbbl %ecx, %ecx - movl %ebp, %eax - movl 88(%esp), %esi - mull 16(%esi) - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 12(%esi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 8(%esi) - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebp, %eax - mull 4(%esi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull (%esi) - movl %eax, %ebp - andl $1, %ecx - addl 24(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 4(%esp), %esi # 4-byte Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 60(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - addl %ebp, %edi - movl 84(%esp), %ebp - movl %edi, 16(%ebp) - adcl 8(%esp), %ebx # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %eax, %edi - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl %edx, %ebx - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %ebp, %edx - movl %ebx, 20(%edx) - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %esi, 24(%edx) - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %edi, 28(%edx) - movl %ecx, 32(%edx) - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%edx) - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end66: - .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L - - .globl mcl_fpDbl_sqrPre5L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre5L,@function -mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ebx - movl 16(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl (%ebx), %edi - movl 4(%ebx), %ecx - mull %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, 40(%esp) # 4-byte Spill - movl 12(%ebx), %esi - movl %esi, %eax - mull %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %edx, 36(%esp) # 4-byte Spill - movl 8(%ebx), %ebx - movl %ebx, %eax - mull %ecx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull %edi - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edi - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull %ecx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, %esi - movl %esi, (%esp) # 4-byte Spill - movl %eax, %ecx - movl %edi, %eax - mull %edi - movl 80(%esp), %edi - movl %eax, (%edi) - addl %ecx, %edx - adcl %esi, %ebp - movl 56(%esp), %esi # 4-byte Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - movl 48(%esp), %edi # 4-byte Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %ecx, %edx - movl 80(%esp), %ecx - movl %edx, 4(%ecx) - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %esi, %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - addl (%esp), %ebp # 4-byte Folded Reload - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl 44(%esp), %eax # 4-byte Reload - mull %ebx - movl %eax, 40(%esp) # 4-byte Spill - movl %edx, 20(%esp) # 4-byte Spill - movl 84(%esp), %ecx - movl 12(%ecx), %edi - movl %edi, %eax - mull %ebx - movl %eax, 36(%esp) # 4-byte Spill - movl %edx, 16(%esp) # 4-byte Spill - movl %ecx, %eax - movl (%eax), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 4(%eax), %eax - movl %eax, 28(%esp) # 4-byte Spill - mull %ebx - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull %ebx - movl %eax, 44(%esp) # 4-byte Spill - addl %ebp, %ecx - movl 80(%esp), %eax - movl %ecx, 8(%eax) - movl 32(%esp), %ebx # 4-byte Reload - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl 44(%esp), %ebp # 4-byte Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl 36(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl 40(%esp), %eax # 4-byte Reload - adcl %esi, %eax - sbbl %esi, %esi - andl $1, %esi - addl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl %edx, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 8(%eax), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 16(%eax), %ebx - movl %ebx, %eax - mull %edi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %esi - movl 28(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebp - movl 24(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %ecx - movl %edi, %eax - mull %edi - movl %eax, %edi - movl %edx, 24(%esp) # 4-byte Spill - addl 32(%esp), %ecx # 4-byte Folded Reload - movl 80(%esp), %eax - movl %ecx, 12(%eax) - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - sbbl %ecx, %ecx - movl %ebx, %eax - movl 84(%esp), %edx - mull 12(%edx) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - movl 84(%esp), %edx - mull 4(%edx) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - movl 84(%esp), %edx - mull (%edx) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - mull %ebx - movl %eax, 20(%esp) # 4-byte Spill - andl $1, %ecx - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - addl (%esp), %ebp # 4-byte Folded Reload - movl 80(%esp), %ebx - movl %ebp, 16(%ebx) - adcl 8(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %eax, %ebp - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %ecx # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 12(%esp), %esi # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %esi, 20(%ebx) - adcl %edx, %ebp - movl %edi, 24(%ebx) - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 28(%ebx) - movl %ecx, 32(%ebx) - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ebx) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end67: - .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L - - .globl mcl_fp_mont5L - .align 16, 0x90 - .type mcl_fp_mont5L,@function -mcl_fp_mont5L: # @mcl_fp_mont5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $112, %esp - movl 136(%esp), %ebx - movl (%ebx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 140(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - mull %ecx - movl %eax, 36(%esp) # 4-byte Spill - movl %edx, 40(%esp) # 4-byte Spill - movl 144(%esp), %esi - movl -4(%esi), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, %ecx - imull %edx, %ecx - movl (%esi), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 16(%esi), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 4(%esi), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 4(%ebx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 16(%ebx), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 12(%ebx), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 8(%ebx), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull %esi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - movl 32(%esp), %ecx # 4-byte Reload - mull %ecx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull %ecx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ecx - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, %ebx - movl %eax, %edi - addl 40(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %ebx # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 32(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - addl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 16(%esp), %ebp # 4-byte Reload - addl 36(%esp), %ebp # 4-byte Folded Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl %ebx, %esi - movl %esi, 44(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl 4(%eax), %edi - movl %edi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebp - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl %ebp, %edx - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %ebx, %ebp - imull 96(%esp), %ebp # 4-byte Folded Reload - andl $1, %eax - movl %eax, %ebx - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %ecx, %edx - adcl 56(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 36(%esp), %eax # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl 8(%eax), %ebx - movl %ebx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %eax, %esi - addl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %ebp, %esi - movl %esi, 40(%esp) # 4-byte Spill - adcl %edi, 48(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %esi, %ebp - imull 96(%esp), %ebp # 4-byte Folded Reload - andl $1, %eax - movl %eax, %ebx - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %edi, %edx - adcl 32(%esp), %edx # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 40(%esp), %eax # 4-byte Folded Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl 12(%eax), %edi - movl %edi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %ebp, %ebx - movl %ebx, 40(%esp) # 4-byte Spill - adcl %esi, 48(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %ebx, %ebp - imull 96(%esp), %ebp # 4-byte Folded Reload - andl $1, %eax - movl %eax, %ebx - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %ecx, %edx - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 40(%esp), %eax # 4-byte Folded Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 64(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl 16(%eax), %ebx - movl %ebx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - addl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl 72(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %esi, %eax - adcl $0, %eax - movl 84(%esp), %esi # 4-byte Reload - addl %ebp, %esi - movl %esi, 84(%esp) # 4-byte Spill - adcl %edi, 80(%esp) # 4-byte Folded Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 76(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - sbbl %ebx, %ebx - movl 96(%esp), %ecx # 4-byte Reload - imull %esi, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - andl $1, %ebx - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ecx - movl 96(%esp), %eax # 4-byte Reload - mull 88(%esp) # 4-byte Folded Reload - addl 48(%esp), %eax # 4-byte Folded Reload - adcl %ecx, %edx - adcl 52(%esp), %esi # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edi - movl 56(%esp), %ecx # 4-byte Reload - addl 84(%esp), %ecx # 4-byte Folded Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - adcl 76(%esp), %esi # 4-byte Folded Reload - adcl 72(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 96(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - adcl $0, %ebx - movl %eax, %ecx - subl 100(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 100(%esp) # 4-byte Spill - movl %edx, %ecx - sbbl 88(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - movl %esi, %ecx - sbbl 104(%esp), %ecx # 4-byte Folded Reload - sbbl 108(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 108(%esp) # 4-byte Spill - movl %edi, %ebp - sbbl 92(%esp), %ebp # 4-byte Folded Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB68_2 -# BB#1: - movl 88(%esp), %edx # 4-byte Reload -.LBB68_2: - testb %bl, %bl - jne .LBB68_4 -# BB#3: - movl 100(%esp), %eax # 4-byte Reload -.LBB68_4: - movl 132(%esp), %ebx - movl %eax, (%ebx) - movl %edx, 4(%ebx) - jne .LBB68_6 -# BB#5: - movl %ecx, %esi -.LBB68_6: - movl %esi, 8(%ebx) - movl 96(%esp), %eax # 4-byte Reload - jne .LBB68_8 -# BB#7: - movl 108(%esp), %eax # 4-byte Reload -.LBB68_8: - movl %eax, 12(%ebx) - jne .LBB68_10 -# BB#9: - movl %ebp, %edi -.LBB68_10: - movl %edi, 16(%ebx) - addl $112, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end68: - .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L - - .globl mcl_fp_montNF5L - .align 16, 0x90 - .type mcl_fp_montNF5L,@function -mcl_fp_montNF5L: # @mcl_fp_montNF5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl 128(%esp), %ebx - movl (%ebx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 132(%esp), %ecx - movl (%ecx), %ecx - mull %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, 68(%esp) # 4-byte Spill - movl 136(%esp), %esi - movl -4(%esi), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, %edi - imull %edx, %edi - movl (%esi), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 16(%esi), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 4(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 4(%ebx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 16(%ebx), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 12(%ebx), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 8(%ebx), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl %edi, %eax - mull %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ecx - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %ecx - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull %ecx - movl %edx, %ebp - movl %eax, %ebx - movl 76(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, %ecx - movl %eax, %esi - addl 68(%esp), %esi # 4-byte Folded Reload - adcl %ebx, %ecx - adcl (%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %edi # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 8(%esp), %edx # 4-byte Reload - addl 48(%esp), %edx # 4-byte Folded Reload - adcl 12(%esp), %esi # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - adcl $0, %eax - addl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 4(%eax), %ebx - movl %ebx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - addl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 36(%esp), %ebx # 4-byte Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl 68(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %esi, 28(%esp) # 4-byte Folded Spill - adcl %ecx, 32(%esp) # 4-byte Folded Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl %ebp, %ecx - adcl %edi, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 28(%esp), %esi # 4-byte Reload - movl %esi, %edi - imull 84(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ebp - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %ebx - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - addl %esi, %eax - adcl 32(%esp), %ebx # 4-byte Folded Reload - adcl %ecx, %ebp - movl 44(%esp), %ecx # 4-byte Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 48(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl 68(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edx, %ebx - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 8(%eax), %ecx - movl %ecx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl %edx, %ebp - addl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - movl 36(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 32(%esp), %edx # 4-byte Reload - addl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, %ebx - imull 84(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - addl 32(%esp), %eax # 4-byte Folded Reload - adcl %ebp, %ecx - movl 40(%esp), %ebx # 4-byte Reload - adcl %edi, %ebx - movl 44(%esp), %edi # 4-byte Reload - adcl %esi, %edi - movl 48(%esp), %esi # 4-byte Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edx, %ecx - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 12(%eax), %edi - movl %edi, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl 20(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 68(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl %ecx, %eax - movl %eax, 20(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 68(%esp) # 4-byte Spill - movl %eax, %ecx - imull 84(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - addl 20(%esp), %eax # 4-byte Folded Reload - movl %edi, %edx - adcl %ebp, %edx - movl 44(%esp), %edi # 4-byte Reload - adcl %ebx, %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl %esi, %ecx - movl 72(%esp), %esi # 4-byte Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 68(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl 16(%eax), %ecx - movl %ecx, %eax - mull 52(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull 56(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - mull 64(%esp) # 4-byte Folded Reload - addl 76(%esp), %edx # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - adcl 56(%esp), %ebx # 4-byte Folded Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - adcl $0, %edi - addl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 68(%esp) # 4-byte Spill - adcl $0, %edi - movl 84(%esp), %ecx # 4-byte Reload - imull %eax, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %ecx - movl 84(%esp), %eax # 4-byte Reload - mull 88(%esp) # 4-byte Folded Reload - addl 40(%esp), %ebx # 4-byte Folded Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - adcl 68(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edi - addl 52(%esp), %eax # 4-byte Folded Reload - adcl %edx, %ecx - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 84(%esp) # 4-byte Spill - adcl 72(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %edi # 4-byte Folded Reload - movl %eax, %ebx - subl 100(%esp), %ebx # 4-byte Folded Reload - movl %ecx, %edx - sbbl 88(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - sbbl 92(%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - movl %ebp, %edx - sbbl 96(%esp), %edx # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl %edi, %edx - movl %edi, %esi - sbbl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, %edx - sarl $31, %edx - testl %edx, %edx - js .LBB69_2 -# BB#1: - movl %ebx, %eax -.LBB69_2: - movl 124(%esp), %edx - movl %eax, (%edx) - js .LBB69_4 -# BB#3: - movl 88(%esp), %ecx # 4-byte Reload -.LBB69_4: - movl %ecx, 4(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB69_6 -# BB#5: - movl 92(%esp), %eax # 4-byte Reload -.LBB69_6: - movl %eax, 8(%edx) - js .LBB69_8 -# BB#7: - movl 100(%esp), %ebp # 4-byte Reload -.LBB69_8: - movl %ebp, 12(%edx) - js .LBB69_10 -# BB#9: - movl %edi, %esi -.LBB69_10: - movl %esi, 16(%edx) - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end69: - .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L - - .globl mcl_fp_montRed5L - .align 16, 0x90 - .type mcl_fp_montRed5L,@function -mcl_fp_montRed5L: # @mcl_fp_montRed5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %eax - movl -4(%eax), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl (%eax), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 112(%esp), %esi - movl (%esi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - imull %edx, %ecx - movl 16(%eax), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 12(%eax), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 8(%eax), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 4(%eax), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebp - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, %ebp - movl %eax, %edi - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ebx - addl %edi, %ebx - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 40(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl 48(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 80(%esp), %eax # 4-byte Folded Reload - adcl 4(%esi), %ebx - adcl 8(%esi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - adcl 12(%esi), %edi - movl %edi, 40(%esp) # 4-byte Spill - adcl 16(%esi), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 20(%esi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%esi), %eax - movl 32(%esi), %ecx - movl 28(%esi), %edx - movl 24(%esi), %esi - adcl $0, %esi - movl %esi, 12(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %ecx - imull 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ecx, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %eax, %edi - addl %esi, %edx - movl %edx, %ebp - movl 24(%esp), %ecx # 4-byte Reload - adcl (%esp), %ecx # 4-byte Folded Reload - movl 28(%esp), %eax # 4-byte Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 32(%esp), %esi # 4-byte Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebx, %edi - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ebp, %esi - imull 76(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %esi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %ebx - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl %ebx, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 44(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 16(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 56(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ebp, %edi - imull 76(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - mull 60(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - movl %edx, %ebx - addl 8(%esp), %ebx # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl 32(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl 36(%esp), %edi # 4-byte Reload - adcl 56(%esp), %edi # 4-byte Folded Reload - movl 40(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebp, 16(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 56(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl 76(%esp), %esi # 4-byte Reload - imull %ebx, %esi - movl %esi, 76(%esp) # 4-byte Spill - movl %esi, %eax - mull 72(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 52(%esp) # 4-byte Spill - movl %esi, %eax - mull 68(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull 64(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 44(%esp) # 4-byte Spill - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %esi - movl 76(%esp), %eax # 4-byte Reload - mull 60(%esp) # 4-byte Folded Reload - addl 24(%esp), %eax # 4-byte Folded Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - adcl $0, %ebp - addl %ebx, %esi - adcl 28(%esp), %eax # 4-byte Folded Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 76(%esp) # 4-byte Spill - adcl 56(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %eax, %esi - subl 84(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - movl %edx, %esi - sbbl 60(%esp), %esi # 4-byte Folded Reload - sbbl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - sbbl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - movl %edi, %ecx - sbbl 72(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 84(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - movl %ebx, 80(%esp) # 4-byte Spill - jne .LBB70_2 -# BB#1: - movl %esi, %edx -.LBB70_2: - movl 80(%esp), %ebx # 4-byte Reload - testb %bl, %bl - jne .LBB70_4 -# BB#3: - movl 48(%esp), %eax # 4-byte Reload -.LBB70_4: - movl 108(%esp), %ecx - movl %eax, (%ecx) - movl %edx, 4(%ecx) - movl 52(%esp), %eax # 4-byte Reload - jne .LBB70_6 -# BB#5: - movl %ebp, %eax -.LBB70_6: - movl %eax, 8(%ecx) - movl 76(%esp), %eax # 4-byte Reload - jne .LBB70_8 -# BB#7: - movl 68(%esp), %eax # 4-byte Reload -.LBB70_8: - movl %eax, 12(%ecx) - jne .LBB70_10 -# BB#9: - movl 84(%esp), %edi # 4-byte Reload -.LBB70_10: - movl %edi, 16(%ecx) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end70: - .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L - - .globl mcl_fp_addPre5L - .align 16, 0x90 - .type mcl_fp_addPre5L,@function -mcl_fp_addPre5L: # @mcl_fp_addPre5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 24(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 12(%esi), %ebx - movl 16(%esi), %esi - adcl 12(%eax), %ebx - movl 16(%eax), %eax - movl 20(%esp), %ebp - movl %ecx, (%ebp) - movl %edx, 4(%ebp) - movl %edi, 8(%ebp) - movl %ebx, 12(%ebp) - adcl %esi, %eax - movl %eax, 16(%ebp) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end71: - .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L - - .globl mcl_fp_subPre5L - .align 16, 0x90 - .type mcl_fp_subPre5L,@function -mcl_fp_subPre5L: # @mcl_fp_subPre5L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - xorl %eax, %eax - movl 20(%esp), %esi - subl (%esi), %edx - movl 12(%esp), %edi - movl %edx, (%edi) - movl 4(%ecx), %edx - sbbl 4(%esi), %edx - movl %edx, 4(%edi) - movl 8(%ecx), %edx - sbbl 8(%esi), %edx - movl %edx, 8(%edi) - movl 12(%ecx), %edx - sbbl 12(%esi), %edx - movl %edx, 12(%edi) - movl 16(%esi), %edx - movl 16(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 16(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end72: - .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L - - .globl mcl_fp_shr1_5L - .align 16, 0x90 - .type mcl_fp_shr1_5L,@function -mcl_fp_shr1_5L: # @mcl_fp_shr1_5L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 20(%esp), %eax - movl 16(%eax), %ecx - movl 12(%eax), %edx - movl 8(%eax), %esi - movl (%eax), %edi - movl 4(%eax), %eax - shrdl $1, %eax, %edi - movl 16(%esp), %ebx - movl %edi, (%ebx) - shrdl $1, %esi, %eax - movl %eax, 4(%ebx) - shrdl $1, %edx, %esi - movl %esi, 8(%ebx) - shrdl $1, %ecx, %edx - movl %edx, 12(%ebx) - shrl %ecx - movl %ecx, 16(%ebx) - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end73: - .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L - - .globl mcl_fp_add5L - .align 16, 0x90 - .type mcl_fp_add5L,@function -mcl_fp_add5L: # @mcl_fp_add5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 28(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %ecx - movl 24(%esp), %edi - addl (%edi), %eax - adcl 4(%edi), %ecx - movl 8(%ebx), %edx - adcl 8(%edi), %edx - movl 12(%edi), %esi - movl 16(%edi), %edi - adcl 12(%ebx), %esi - adcl 16(%ebx), %edi - movl 20(%esp), %ebx - movl %eax, (%ebx) - movl %ecx, 4(%ebx) - movl %edx, 8(%ebx) - movl %esi, 12(%ebx) - movl %edi, 16(%ebx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 32(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - sbbl 8(%ebp), %edx - sbbl 12(%ebp), %esi - sbbl 16(%ebp), %edi - sbbl $0, %ebx - testb $1, %bl - jne .LBB74_2 -# BB#1: # %nocarry - movl 20(%esp), %ebx - movl %eax, (%ebx) - movl %ecx, 4(%ebx) - movl %edx, 8(%ebx) - movl %esi, 12(%ebx) - movl %edi, 16(%ebx) -.LBB74_2: # %carry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end74: - .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L - - .globl mcl_fp_addNF5L - .align 16, 0x90 - .type mcl_fp_addNF5L,@function -mcl_fp_addNF5L: # @mcl_fp_addNF5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %esi - movl (%esi), %ebx - movl 4(%esi), %eax - movl 44(%esp), %edi - addl (%edi), %ebx - adcl 4(%edi), %eax - movl 16(%esi), %ecx - movl 12(%esi), %edx - movl 8(%esi), %ebp - adcl 8(%edi), %ebp - adcl 12(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - adcl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi - movl %ebx, %esi - subl (%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl %eax, %esi - sbbl 4(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl %ebp, %esi - sbbl 8(%edi), %esi - sbbl 12(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %ecx, %edx - sbbl 16(%edi), %edx - movl %edx, %edi - sarl $31, %edi - testl %edi, %edi - js .LBB75_2 -# BB#1: - movl (%esp), %ebx # 4-byte Reload -.LBB75_2: - movl 40(%esp), %edi - movl %ebx, (%edi) - js .LBB75_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB75_4: - movl %eax, 4(%edi) - movl 12(%esp), %ecx # 4-byte Reload - js .LBB75_6 -# BB#5: - movl %esi, %ebp -.LBB75_6: - movl %ebp, 8(%edi) - movl 16(%esp), %eax # 4-byte Reload - js .LBB75_8 -# BB#7: - movl 8(%esp), %ecx # 4-byte Reload -.LBB75_8: - movl %ecx, 12(%edi) - js .LBB75_10 -# BB#9: - movl %edx, %eax -.LBB75_10: - movl %eax, 16(%edi) - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end75: - .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L - - .globl mcl_fp_sub5L - .align 16, 0x90 - .type mcl_fp_sub5L,@function -mcl_fp_sub5L: # @mcl_fp_sub5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - xorl %ebx, %ebx - movl 28(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %ecx - movl 8(%edi), %edx - sbbl 8(%ebp), %edx - movl 12(%edi), %esi - sbbl 12(%ebp), %esi - movl 16(%edi), %edi - sbbl 16(%ebp), %edi - movl 20(%esp), %ebp - movl %eax, (%ebp) - movl %ecx, 4(%ebp) - movl %edx, 8(%ebp) - movl %esi, 12(%ebp) - movl %edi, 16(%ebp) - sbbl $0, %ebx - testb $1, %bl - je .LBB76_2 -# BB#1: # %carry - movl 32(%esp), %ebx - addl (%ebx), %eax - movl %eax, (%ebp) - adcl 4(%ebx), %ecx - movl %ecx, 4(%ebp) - adcl 8(%ebx), %edx - movl %edx, 8(%ebp) - movl 12(%ebx), %eax - adcl %esi, %eax - movl %eax, 12(%ebp) - movl 16(%ebx), %eax - adcl %edi, %eax - movl %eax, 16(%ebp) -.LBB76_2: # %nocarry - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end76: - .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L - - .globl mcl_fp_subNF5L - .align 16, 0x90 - .type mcl_fp_subNF5L,@function -mcl_fp_subNF5L: # @mcl_fp_subNF5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %ebx - subl (%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - sbbl 4(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 16(%edi), %esi - movl 12(%edi), %eax - movl 8(%edi), %ecx - sbbl 8(%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 12(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%ebx), %esi - movl %esi, %ebx - sarl $31, %ebx - movl %ebx, %ebp - shldl $1, %esi, %ebp - movl 48(%esp), %edi - movl 4(%edi), %ecx - andl %ebp, %ecx - andl (%edi), %ebp - movl 16(%edi), %edx - andl %ebx, %edx - movl 12(%edi), %eax - andl %ebx, %eax - roll %ebx - andl 8(%edi), %ebx - addl 4(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %edi - movl %ebp, (%edi) - adcl (%esp), %ebx # 4-byte Folded Reload - movl %ecx, 4(%edi) - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ebx, 8(%edi) - movl %eax, 12(%edi) - adcl %esi, %edx - movl %edx, 16(%edi) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end77: - .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L - - .globl mcl_fpDbl_add5L - .align 16, 0x90 - .type mcl_fpDbl_add5L,@function -mcl_fpDbl_add5L: # @mcl_fpDbl_add5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 56(%esp), %edx - movl 52(%esp), %ecx - movl 12(%ecx), %ebx - movl 16(%ecx), %ebp - movl 8(%edx), %esi - movl (%edx), %edi - addl (%ecx), %edi - movl 48(%esp), %eax - movl %edi, (%eax) - movl 4(%edx), %edi - adcl 4(%ecx), %edi - adcl 8(%ecx), %esi - adcl 12(%edx), %ebx - adcl 16(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl %edi, 4(%eax) - movl 28(%edx), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl %esi, 8(%eax) - movl 20(%edx), %esi - movl %ebx, 12(%eax) - movl 20(%ecx), %ebp - adcl %esi, %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 24(%edx), %esi - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 16(%eax) - movl 24(%ecx), %ebx - adcl %esi, %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 28(%ecx), %edi - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl 32(%ecx), %esi - adcl %eax, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 36(%edx), %eax - movl 36(%ecx), %edx - adcl %eax, %edx - sbbl %eax, %eax - andl $1, %eax - movl %ebp, %ecx - movl 60(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 4(%ebp), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 8(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl %esi, %ebx - movl %edx, %esi - sbbl 12(%ebp), %ebx - sbbl 16(%ebp), %edx - sbbl $0, %eax - andl $1, %eax - jne .LBB78_2 -# BB#1: - movl %edx, %esi -.LBB78_2: - testb %al, %al - movl 12(%esp), %ebp # 4-byte Reload - jne .LBB78_4 -# BB#3: - movl (%esp), %ebp # 4-byte Reload -.LBB78_4: - movl 48(%esp), %eax - movl %ebp, 20(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl 20(%esp), %edx # 4-byte Reload - movl 16(%esp), %edi # 4-byte Reload - jne .LBB78_6 -# BB#5: - movl 4(%esp), %edi # 4-byte Reload -.LBB78_6: - movl %edi, 24(%eax) - jne .LBB78_8 -# BB#7: - movl 8(%esp), %edx # 4-byte Reload -.LBB78_8: - movl %edx, 28(%eax) - jne .LBB78_10 -# BB#9: - movl %ebx, %ecx -.LBB78_10: - movl %ecx, 32(%eax) - movl %esi, 36(%eax) - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end78: - .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L - - .globl mcl_fpDbl_sub5L - .align 16, 0x90 - .type mcl_fpDbl_sub5L,@function -mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 44(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%eax), %ebx - sbbl 8(%edx), %ebx - movl 36(%esp), %ecx - movl %esi, (%ecx) - movl 12(%eax), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ecx) - movl 20(%edx), %ebx - movl %esi, 12(%ecx) - movl 20(%eax), %esi - sbbl %ebx, %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 24(%edx), %esi - movl %edi, 16(%ecx) - movl 24(%eax), %ebp - sbbl %esi, %ebp - movl 28(%edx), %esi - movl 28(%eax), %edi - sbbl %esi, %edi - movl %edi, (%esp) # 4-byte Spill - movl 32(%edx), %esi - movl 32(%eax), %edi - sbbl %esi, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%edx), %edx - movl 36(%eax), %eax - sbbl %edx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl $0, %edx - sbbl $0, %edx - andl $1, %edx - movl 48(%esp), %ebx - jne .LBB79_1 -# BB#2: - xorl %eax, %eax - jmp .LBB79_3 -.LBB79_1: - movl 16(%ebx), %eax -.LBB79_3: - testb %dl, %dl - jne .LBB79_4 -# BB#5: - movl $0, %edx - movl $0, %esi - jmp .LBB79_6 -.LBB79_4: - movl (%ebx), %esi - movl 4(%ebx), %edx -.LBB79_6: - jne .LBB79_7 -# BB#8: - movl $0, %edi - jmp .LBB79_9 -.LBB79_7: - movl 12(%ebx), %edi -.LBB79_9: - jne .LBB79_10 -# BB#11: - xorl %ebx, %ebx - jmp .LBB79_12 -.LBB79_10: - movl 8(%ebx), %ebx -.LBB79_12: - addl 4(%esp), %esi # 4-byte Folded Reload - adcl %ebp, %edx - movl %esi, 20(%ecx) - adcl (%esp), %ebx # 4-byte Folded Reload - movl %edx, 24(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %ebx, 28(%ecx) - movl %edi, 32(%ecx) - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end79: - .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L - - .globl mcl_fp_mulUnitPre6L - .align 16, 0x90 - .type mcl_fp_mulUnitPre6L,@function -mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 56(%esp), %ebx - movl 52(%esp), %edi - movl %ebx, %eax - mull 20(%edi) - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebx, %eax - mull 16(%edi) - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebx, %eax - mull 12(%edi) - movl %edx, %esi - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 8(%edi) - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 4(%edi) - movl %edx, %ecx - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull (%edi) - movl 48(%esp), %edi - movl %eax, (%edi) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%edi) - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%edi) - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%edi) - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%edi) - movl 16(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%edi) - movl 24(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 24(%edi) - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end80: - .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L - - .globl mcl_fpDbl_mulPre6L - .align 16, 0x90 - .type mcl_fpDbl_mulPre6L,@function -mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %esi - movl (%esi), %ebp - movl 112(%esp), %eax - movl (%eax), %edi - movl %ebp, %eax - mull %edi - movl %edx, 40(%esp) # 4-byte Spill - movl 104(%esp), %edx - movl %eax, (%edx) - movl 4(%esi), %ebx - movl 8(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 16(%esi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%esi), %ecx - movl 112(%esp), %eax - movl 4(%eax), %esi - movl %ebx, %eax - mull %esi - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %eax - mull %esi - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edi - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull %esi - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, %eax - mull %esi - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, %ecx - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, %esi - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edi - movl %eax, %ebx - movl %edx, %edi - addl 40(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 80(%esp), %edi # 4-byte Folded Reload - adcl 76(%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 32(%esp), %ebp # 4-byte Folded Reload - movl 104(%esp), %eax - movl %ebp, 4(%eax) - adcl 48(%esp), %ebx # 4-byte Folded Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %ecx, %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %edx, %ecx - adcl 44(%esp), %ecx # 4-byte Folded Reload - sbbl %edx, %edx - andl $1, %edx - addl 60(%esp), %ebx # 4-byte Folded Reload - adcl 72(%esp), %edi # 4-byte Folded Reload - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl 108(%esp), %ebp - movl 20(%ebp), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 8(%eax), %ecx - movl %edx, %eax - mull %ecx - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 60(%esp) # 4-byte Spill - movl 16(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - mull %ecx - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, 56(%esp) # 4-byte Spill - movl 12(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill - mull %ecx - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, 52(%esp) # 4-byte Spill - movl 8(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill - mull %ecx - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 48(%esp) # 4-byte Spill - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - mull %ecx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ebp - movl %esi, %eax - mull %ecx - movl %edx, 24(%esp) # 4-byte Spill - addl %ebx, %eax - movl 104(%esp), %ecx - movl %eax, 8(%ecx) - adcl %edi, %ebp - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 72(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 76(%esp) # 4-byte Folded Spill - movl 68(%esp), %ebx # 4-byte Reload - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl 28(%esp), %eax # 4-byte Reload - adcl %eax, 80(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl 112(%esp), %eax - movl 12(%eax), %ecx - movl 20(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull %ecx - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, (%esp) # 4-byte Spill - movl %edx, 32(%esp) # 4-byte Spill - andl $1, %edi - addl 24(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %ebx, %ecx - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl 80(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - adcl 60(%esp), %edi # 4-byte Folded Reload - addl 4(%esp), %ebp # 4-byte Folded Reload - movl 104(%esp), %ebx - movl %ebp, 12(%ebx) - movl %esi, %ebx - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %edx, %esi - adcl (%esp), %esi # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - sbbl %edx, %edx - andl $1, %edx - addl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, %ecx - movl 20(%ecx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 12(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 8(%ecx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl (%ecx), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 4(%ecx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl 112(%esp), %esi - movl 16(%esi), %ecx - mull %ecx - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, %esi - movl 56(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %edi, %eax - mull %ecx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebx, %eax - mull %ecx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull %ecx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebp - movl 60(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 24(%esp) # 4-byte Spill - addl 72(%esp), %eax # 4-byte Folded Reload - movl 104(%esp), %ecx - movl %eax, 16(%ecx) - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 68(%esp), %ebx # 4-byte Folded Reload - adcl 80(%esp), %edi # 4-byte Folded Reload - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 64(%esp) # 4-byte Folded Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 8(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl 20(%eax), %ecx - sbbl %esi, %esi - movl 20(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, 12(%esp) # 4-byte Spill - movl %edx, 56(%esp) # 4-byte Spill - andl $1, %esi - addl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl 64(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl 8(%esp), %ecx # 4-byte Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - addl 4(%esp), %ebp # 4-byte Folded Reload - movl 104(%esp), %edx - movl %ebp, 20(%edx) - adcl 20(%esp), %ebx # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - movl %ecx, %ebp - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 60(%esp), %ebx # 4-byte Folded Reload - adcl 72(%esp), %edi # 4-byte Folded Reload - movl 104(%esp), %ecx - movl %ebx, 24(%ecx) - movl %edx, %ebx - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %edi, 28(%ecx) - movl %ebp, %edx - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %ebx, 32(%ecx) - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %edx, 36(%ecx) - movl %esi, 40(%ecx) - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end81: - .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L - - .globl mcl_fpDbl_sqrPre6L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre6L,@function -mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %esi - movl 20(%esi), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl (%esi), %ebp - movl 4(%esi), %ebx - mull %ebx - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 84(%esp) # 4-byte Spill - movl 16(%esi), %ecx - movl %ecx, %eax - mull %ebx - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, 80(%esp) # 4-byte Spill - movl 12(%esi), %edi - movl %edi, %eax - mull %ebx - movl %eax, 60(%esp) # 4-byte Spill - movl %edx, 76(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl 8(%eax), %esi - movl %esi, %eax - mull %ebx - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebp - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull %ebp - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebp - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull %ebx - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %edx, %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull %ebp - movl 120(%esp), %ebx - movl %eax, (%ebx) - addl %edi, %edx - adcl %esi, %ecx - movl %ecx, %ebx - movl 88(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 92(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl 96(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl 48(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - addl %edi, %edx - movl 120(%esp), %edi - movl %edx, 4(%edi) - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %esi, %edx - adcl 60(%esp), %edx # 4-byte Folded Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, %edi - adcl 72(%esp), %ebp # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - movl %ebx, %esi - addl 32(%esp), %esi # 4-byte Folded Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - adcl 76(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - adcl 80(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %edi - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 124(%esp), %ebx - movl 20(%ebx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 8(%ebx), %ebp - mull %ebp - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, 60(%esp) # 4-byte Spill - movl 16(%ebx), %eax - movl %eax, 64(%esp) # 4-byte Spill - mull %ebp - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 52(%esp) # 4-byte Spill - movl 12(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - mull %ebp - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, 76(%esp) # 4-byte Spill - movl (%ebx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 4(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - mull %ebp - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebp - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull %ebp - movl %eax, %ebp - addl %esi, %ebx - movl 120(%esp), %eax - movl %ebx, 8(%eax) - movl 28(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - adcl 92(%esp), %ebp # 4-byte Folded Reload - movl 96(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl 80(%esp), %ebx # 4-byte Reload - adcl %edi, %ebx - movl 84(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - sbbl %edi, %edi - andl $1, %edi - addl 32(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 96(%esp) # 4-byte Spill - adcl %edx, %eax - movl %eax, %ebp - adcl 76(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 80(%esp) # 4-byte Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 84(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - movl 36(%esp), %edi # 4-byte Reload - mull %edi - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 92(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, %ebx - movl 56(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull %edi - movl %eax, %edi - movl %edx, 36(%esp) # 4-byte Spill - addl %ecx, %esi - movl 120(%esp), %eax - movl %esi, 12(%eax) - adcl 96(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 96(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 92(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %ecx - movl (%ecx), %ebx - movl 4(%ecx), %edi - movl 20(%ecx), %ebp - movl %edi, %eax - mull %ebp - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl 16(%ecx), %esi - movl %edi, %eax - mull %esi - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebx, %eax - mull %esi - movl %eax, 12(%esp) # 4-byte Spill - movl %edx, 28(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - addl %eax, 72(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 96(%esp) # 4-byte Folded Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 92(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl 12(%eax), %edi - movl 8(%eax), %ebx - movl %edi, %eax - mull %ebp - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull %esi - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %eax - mull %esi - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull %esi - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ebp - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull %esi - movl %eax, %ebx - movl %edx, (%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - addl 12(%esp), %eax # 4-byte Folded Reload - movl 120(%esp), %ebp - movl %eax, 16(%ebp) - movl 96(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - adcl %ecx, %edi - movl 4(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - adcl 92(%esp), %ebx # 4-byte Folded Reload - movl 84(%esp), %edx # 4-byte Reload - adcl 76(%esp), %edx # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - addl 28(%esp), %eax # 4-byte Folded Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - adcl (%esp), %edx # 4-byte Folded Reload - adcl 80(%esp), %esi # 4-byte Folded Reload - addl 48(%esp), %eax # 4-byte Folded Reload - movl 120(%esp), %ebp - movl %eax, 20(%ebp) - adcl 56(%esp), %edi # 4-byte Folded Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %edx, %eax - adcl 76(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - sbbl %edx, %edx - andl $1, %edx - addl 64(%esp), %edi # 4-byte Folded Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl 120(%esp), %ebp - movl %edi, 24(%ebp) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ecx, 28(%ebp) - movl %eax, %edi - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %ebx, 32(%ebp) - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %edi, 36(%ebp) - movl %esi, 40(%ebp) - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%ebp) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end82: - .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L - - .globl mcl_fp_mont6L - .align 16, 0x90 - .type mcl_fp_mont6L,@function -mcl_fp_mont6L: # @mcl_fp_mont6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $132, %esp - movl 156(%esp), %edi - movl (%edi), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 160(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - mull %ecx - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl 164(%esp), %edx - movl -4(%edx), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl %eax, %ebp - imull %ecx, %ebp - movl (%edx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 16(%edx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 12(%edx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 8(%edx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 4(%edx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 4(%edi), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %edi, %eax - movl 20(%eax), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 16(%eax), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 12(%eax), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - movl 8(%eax), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ebp, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebp, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull %esi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - movl 40(%esp), %ebp # 4-byte Reload - mull %ebp - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebp - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %edx, %ebx - movl %eax, 8(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - mull %ebp - movl %ebp, %ecx - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, %edi - addl 64(%esp), %edi # 4-byte Folded Reload - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl (%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 72(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - addl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl 60(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 20(%esp), %ebx # 4-byte Reload - addl 68(%esp), %ebx # 4-byte Folded Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 4(%eax), %ecx - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %esi - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - addl %esi, %edx - movl %edx, %esi - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl %edi, %ebx - movl %ebx, %edi - movl 76(%esp), %edx # 4-byte Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl 68(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 64(%esp), %ebx # 4-byte Reload - addl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %ebx, %esi - imull 112(%esp), %esi # 4-byte Folded Reload - andl $1, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %ecx - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %eax, %edi - movl %edx, %ebx - addl %ecx, %ebx - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl 52(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 60(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 64(%esp), %edi # 4-byte Folded Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 160(%esp), %eax - movl 8(%eax), %ecx - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - addl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 72(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - adcl $0, %edi - addl %ebx, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl %ebp, 44(%esp) # 4-byte Folded Spill - movl 48(%esp), %ebx # 4-byte Reload - adcl %ebx, 40(%esp) # 4-byte Folded Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - sbbl %ecx, %ecx - movl %eax, %esi - imull 112(%esp), %esi # 4-byte Folded Reload - andl $1, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %edi - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %eax, %esi - movl %edx, %ebx - addl %edi, %ebx - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %ecx, %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 32(%esp), %esi # 4-byte Folded Reload - adcl 44(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 72(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 160(%esp), %eax - movl 12(%eax), %edi - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 52(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - addl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - adcl $0, %esi - addl %ebx, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl %ebp, 52(%esp) # 4-byte Folded Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl %ebx, 48(%esp) # 4-byte Folded Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 76(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 72(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - sbbl %ecx, %ecx - movl %eax, %esi - imull 112(%esp), %esi # 4-byte Folded Reload - andl $1, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %edi - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %eax, %esi - movl %edx, %ebx - addl %edi, %ebx - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %ecx, %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 32(%esp), %esi # 4-byte Folded Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 160(%esp), %eax - movl 16(%eax), %edi - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - addl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - adcl $0, %esi - addl %ebx, %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl %ebp, 48(%esp) # 4-byte Folded Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 44(%esp) # 4-byte Folded Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 76(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 72(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - sbbl %ecx, %ecx - movl %eax, %esi - imull 112(%esp), %esi # 4-byte Folded Reload - andl $1, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %edi - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %eax, %ecx - movl %edx, %ebx - addl %edi, %ebx - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 64(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 32(%esp), %ecx # 4-byte Folded Reload - adcl 48(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 60(%esp) # 4-byte Spill - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 160(%esp), %eax - movl 20(%eax), %edi - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 76(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - addl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl 88(%esp), %edi # 4-byte Reload - adcl 72(%esp), %edi # 4-byte Folded Reload - movl 84(%esp), %edx # 4-byte Reload - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %esi, %eax - adcl $0, %eax - movl 100(%esp), %esi # 4-byte Reload - addl %ebx, %esi - movl %esi, 100(%esp) # 4-byte Spill - adcl %ebp, 92(%esp) # 4-byte Folded Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 80(%esp) # 4-byte Folded Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 88(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl 112(%esp), %ecx # 4-byte Reload - imull %esi, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - andl $1, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 52(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - mull 104(%esp) # 4-byte Folded Reload - addl 56(%esp), %eax # 4-byte Folded Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - adcl 64(%esp), %ebx # 4-byte Folded Reload - adcl 68(%esp), %ebp # 4-byte Folded Reload - adcl $0, %edi - addl 100(%esp), %esi # 4-byte Folded Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - adcl 88(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 112(%esp) # 4-byte Spill - adcl 84(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %edi # 4-byte Folded Reload - movl %edi, 100(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %eax, %esi - subl 108(%esp), %esi # 4-byte Folded Reload - movl %esi, 108(%esp) # 4-byte Spill - movl %edx, %esi - sbbl 104(%esp), %esi # 4-byte Folded Reload - movl %esi, 104(%esp) # 4-byte Spill - movl %ecx, %esi - sbbl 116(%esp), %esi # 4-byte Folded Reload - movl %esi, 116(%esp) # 4-byte Spill - movl %ebx, %esi - movl %edi, %ebx - sbbl 120(%esp), %esi # 4-byte Folded Reload - movl %esi, 120(%esp) # 4-byte Spill - movl %ebp, %esi - movl %ebp, %edi - sbbl 124(%esp), %esi # 4-byte Folded Reload - movl %esi, 124(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - movl %ebp, %esi - sbbl 128(%esp), %esi # 4-byte Folded Reload - movl %esi, 128(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - jne .LBB83_2 -# BB#1: - movl 104(%esp), %edx # 4-byte Reload -.LBB83_2: - testb %bl, %bl - jne .LBB83_4 -# BB#3: - movl 108(%esp), %eax # 4-byte Reload -.LBB83_4: - movl 152(%esp), %ebx - movl %eax, (%ebx) - movl %edx, 4(%ebx) - jne .LBB83_6 -# BB#5: - movl 116(%esp), %ecx # 4-byte Reload -.LBB83_6: - movl %ecx, 8(%ebx) - movl 112(%esp), %eax # 4-byte Reload - jne .LBB83_8 -# BB#7: - movl 120(%esp), %eax # 4-byte Reload -.LBB83_8: - movl %eax, 12(%ebx) - jne .LBB83_10 -# BB#9: - movl 124(%esp), %edi # 4-byte Reload -.LBB83_10: - movl %edi, 16(%ebx) - jne .LBB83_12 -# BB#11: - movl 128(%esp), %ebp # 4-byte Reload -.LBB83_12: - movl %ebp, 20(%ebx) - addl $132, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end83: - .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L - - .globl mcl_fp_montNF6L - .align 16, 0x90 - .type mcl_fp_montNF6L,@function -mcl_fp_montNF6L: # @mcl_fp_montNF6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $132, %esp - movl 156(%esp), %ebx - movl (%ebx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 160(%esp), %ecx - movl (%ecx), %edi - mull %edi - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl 164(%esp), %esi - movl -4(%esi), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, %ecx - imull %edx, %ecx - movl (%esi), %edx - movl %edx, 128(%esp) # 4-byte Spill - movl 20(%esi), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 16(%esi), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 4(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 4(%ebx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 20(%ebx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 16(%ebx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 12(%ebx), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 8(%ebx), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull %edi - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull %edi - movl %edx, %ecx - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edi - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, %ebp - movl %eax, %esi - addl 64(%esp), %esi # 4-byte Folded Reload - adcl (%esp), %ebp # 4-byte Folded Reload - adcl 4(%esp), %ebx # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - adcl 12(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 16(%esp), %edi # 4-byte Reload - addl 72(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - adcl 36(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - adcl $0, %eax - addl 24(%esp), %esi # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %ebx # 4-byte Folded Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 4(%eax), %edi - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - addl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %esi, 40(%esp) # 4-byte Folded Spill - adcl %ebp, 44(%esp) # 4-byte Folded Spill - adcl %ebx, 48(%esp) # 4-byte Folded Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, %ebp - imull 96(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebp, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull 128(%esp) # 4-byte Folded Reload - addl %ecx, %eax - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %esi, %ebp - adcl 48(%esp), %ebp # 4-byte Folded Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl 64(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl 68(%esp), %esi # 4-byte Reload - adcl 60(%esp), %esi # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edx, %edi - movl %edi, 40(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 68(%esp) # 4-byte Spill - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 8(%eax), %ecx - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, %esi - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - movl %edx, %ebp - addl %esi, %ebp - adcl %edi, %ebx - movl %ebx, %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %esi # 4-byte Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 24(%esp), %ebx # 4-byte Reload - addl 40(%esp), %ebx # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ebx, %edi - movl %ebx, %ecx - imull 96(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %edi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %edi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - movl %edi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - addl %ecx, %eax - adcl %ebp, %esi - movl 60(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 12(%eax), %ecx - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, %ebx - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - movl %edx, %ebp - addl %ebx, %ebp - adcl %esi, %edi - movl %edi, %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ebx # 4-byte Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 24(%esp), %edi # 4-byte Reload - addl 40(%esp), %edi # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edi, %esi - movl %edi, %ecx - imull 96(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %edi - movl %esi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - movl %esi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %ebx - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - addl %ecx, %eax - adcl %ebp, %ebx - movl 60(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - movl 64(%esp), %esi # 4-byte Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 16(%eax), %ecx - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebp - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - movl %edx, %ebx - addl %ebp, %ebx - adcl %edi, %esi - movl %esi, %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ebp # 4-byte Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 24(%esp), %esi # 4-byte Reload - addl 40(%esp), %esi # 4-byte Folded Reload - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %esi, %ebx - movl %esi, %ecx - imull 96(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ebx, %eax - mull 128(%esp) # 4-byte Folded Reload - addl %ecx, %eax - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl %edi, %ebx - movl 68(%esp), %edi # 4-byte Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edx, %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 68(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl 20(%eax), %ecx - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %edi - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, %ebp - movl %edx, %ebx - addl %edi, %ebx - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, %edi - movl 88(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl 84(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl 60(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 80(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 84(%esp) # 4-byte Spill - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 96(%esp), %ebx # 4-byte Reload - imull %ebp, %ebx - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 104(%esp) # 4-byte Spill - movl %ebx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ebx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - addl 52(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - adcl 88(%esp), %esi # 4-byte Folded Reload - adcl 84(%esp), %ebp # 4-byte Folded Reload - movl 104(%esp), %ebx # 4-byte Reload - adcl 100(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 72(%esp), %eax # 4-byte Folded Reload - adcl %edx, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 92(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - adcl 96(%esp), %edi # 4-byte Folded Reload - movl %eax, %edx - subl 128(%esp), %edx # 4-byte Folded Reload - sbbl 112(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebx - sbbl 116(%esp), %esi # 4-byte Folded Reload - movl %esi, 116(%esp) # 4-byte Spill - movl %ebp, %ecx - sbbl 120(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 120(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - sbbl 124(%esp), %esi # 4-byte Folded Reload - movl %esi, 128(%esp) # 4-byte Spill - movl %edi, %ecx - movl %edi, %esi - movl %ecx, %edi - sbbl 108(%esp), %edi # 4-byte Folded Reload - movl %edi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - js .LBB84_2 -# BB#1: - movl %edx, %eax -.LBB84_2: - movl 152(%esp), %ecx - movl %eax, (%ecx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB84_4 -# BB#3: - movl %ebx, %eax -.LBB84_4: - movl %eax, 4(%ecx) - movl %ecx, %ebx - movl %esi, %eax - movl 104(%esp), %ecx # 4-byte Reload - movl 100(%esp), %edx # 4-byte Reload - js .LBB84_6 -# BB#5: - movl 116(%esp), %edx # 4-byte Reload -.LBB84_6: - movl %edx, 8(%ebx) - movl %ebx, %edx - js .LBB84_8 -# BB#7: - movl 120(%esp), %ebp # 4-byte Reload -.LBB84_8: - movl %ebp, 12(%edx) - js .LBB84_10 -# BB#9: - movl 128(%esp), %ecx # 4-byte Reload -.LBB84_10: - movl %ecx, 16(%edx) - js .LBB84_12 -# BB#11: - movl %edi, %eax -.LBB84_12: - movl %eax, 20(%edx) - addl $132, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end84: - .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L - - .globl mcl_fp_montRed6L - .align 16, 0x90 - .type mcl_fp_montRed6L,@function -mcl_fp_montRed6L: # @mcl_fp_montRed6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl 132(%esp), %eax - movl -4(%eax), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl (%eax), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 128(%esp), %ebp - movl (%ebp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - imull %edx, %ecx - movl 20(%eax), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 16(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 12(%eax), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl 8(%eax), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 4(%eax), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull %esi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, %esi - movl %eax, %edi - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl %edi, %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - movl 44(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 48(%esp), %edi # 4-byte Reload - adcl 64(%esp), %edi # 4-byte Folded Reload - movl 52(%esp), %esi # 4-byte Reload - adcl 68(%esp), %esi # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 72(%esp), %ebx # 4-byte Folded Reload - movl 36(%esp), %ebx # 4-byte Reload - adcl 4(%ebp), %ebx - adcl 8(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - adcl 12(%ebp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 16(%ebp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 20(%ebp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 24(%ebp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 44(%ebp), %eax - movl 40(%ebp), %edx - movl 36(%ebp), %esi - movl 32(%ebp), %edi - movl 28(%ebp), %ecx - adcl $0, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 20(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %esi - imull 96(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebp - movl %esi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %esi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, %esi - addl %edi, %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl %ebp, %ecx - movl %ecx, %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl (%esp), %eax # 4-byte Folded Reload - movl 32(%esp), %edi # 4-byte Reload - adcl 4(%esp), %edi # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl 8(%esp), %edx # 4-byte Folded Reload - movl 40(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl %ebx, %esi - movl 24(%esp), %esi # 4-byte Reload - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - adcl $0, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %esi, %ebx - imull 96(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, %ecx - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, %ebp - addl %ecx, %ebp - adcl 4(%esp), %edi # 4-byte Folded Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - movl 48(%esp), %ebx # 4-byte Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl 52(%esp), %edx # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 24(%esp), %eax # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl $0, 60(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ebp, %ecx - imull 96(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ecx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, %edi - addl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl %ebx, %esi - movl %esi, %ebx - movl 32(%esp), %ecx # 4-byte Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl 36(%esp), %eax # 4-byte Reload - adcl 12(%esp), %eax # 4-byte Folded Reload - movl 40(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 44(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl %ebp, %edi - movl 28(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 32(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %edi, %esi - imull 96(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 80(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %ecx - movl %esi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl %ecx, %edx - movl %edx, %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 8(%esp), %eax # 4-byte Folded Reload - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 28(%esp), %ebx # 4-byte Folded Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, 72(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl 96(%esp), %ebx # 4-byte Reload - imull %ebp, %ebx - movl %ebx, 96(%esp) # 4-byte Spill - movl %ebp, %esi - movl %ebx, %eax - mull 76(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 88(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 84(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 28(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - mull 80(%esp) # 4-byte Folded Reload - addl %ebx, %eax - adcl 24(%esp), %edx # 4-byte Folded Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - addl %esi, 28(%esp) # 4-byte Folded Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %eax, %esi - subl 92(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - movl %edx, %esi - sbbl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 80(%esp) # 4-byte Spill - sbbl 84(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 84(%esp) # 4-byte Spill - sbbl 88(%esp), %edi # 4-byte Folded Reload - movl %edi, 88(%esp) # 4-byte Spill - sbbl 100(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 92(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - movl %edi, %esi - sbbl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - jne .LBB85_2 -# BB#1: - movl 80(%esp), %edx # 4-byte Reload -.LBB85_2: - testb %bl, %bl - jne .LBB85_4 -# BB#3: - movl 72(%esp), %eax # 4-byte Reload -.LBB85_4: - movl 124(%esp), %ebx - movl %eax, (%ebx) - movl %edx, 4(%ebx) - movl 52(%esp), %ecx # 4-byte Reload - jne .LBB85_6 -# BB#5: - movl 84(%esp), %ecx # 4-byte Reload -.LBB85_6: - movl %ecx, 8(%ebx) - movl %edi, %ecx - movl 60(%esp), %edi # 4-byte Reload - movl 96(%esp), %esi # 4-byte Reload - jne .LBB85_8 -# BB#7: - movl 88(%esp), %esi # 4-byte Reload -.LBB85_8: - movl %esi, 12(%ebx) - jne .LBB85_10 -# BB#9: - movl 92(%esp), %edi # 4-byte Reload -.LBB85_10: - movl %edi, 16(%ebx) - jne .LBB85_12 -# BB#11: - movl 100(%esp), %ecx # 4-byte Reload -.LBB85_12: - movl %ecx, 20(%ebx) - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end85: - .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L - - .globl mcl_fp_addPre6L - .align 16, 0x90 - .type mcl_fp_addPre6L,@function -mcl_fp_addPre6L: # @mcl_fp_addPre6L -# BB#0: - pushl %esi - movl 16(%esp), %eax - movl (%eax), %ecx - movl 12(%esp), %edx - addl (%edx), %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 4(%eax), %ecx - adcl 4(%edx), %ecx - movl %ecx, 4(%esi) - movl 8(%eax), %ecx - adcl 8(%edx), %ecx - movl %ecx, 8(%esi) - movl 12(%edx), %ecx - adcl 12(%eax), %ecx - movl %ecx, 12(%esi) - movl 16(%edx), %ecx - adcl 16(%eax), %ecx - movl %ecx, 16(%esi) - movl 20(%eax), %eax - movl 20(%edx), %ecx - adcl %eax, %ecx - movl %ecx, 20(%esi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - retl -.Lfunc_end86: - .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L - - .globl mcl_fp_subPre6L - .align 16, 0x90 - .type mcl_fp_subPre6L,@function -mcl_fp_subPre6L: # @mcl_fp_subPre6L -# BB#0: - pushl %edi - pushl %esi - movl 16(%esp), %ecx - movl (%ecx), %edx - xorl %eax, %eax - movl 20(%esp), %esi - subl (%esi), %edx - movl 12(%esp), %edi - movl %edx, (%edi) - movl 4(%ecx), %edx - sbbl 4(%esi), %edx - movl %edx, 4(%edi) - movl 8(%ecx), %edx - sbbl 8(%esi), %edx - movl %edx, 8(%edi) - movl 12(%ecx), %edx - sbbl 12(%esi), %edx - movl %edx, 12(%edi) - movl 16(%ecx), %edx - sbbl 16(%esi), %edx - movl %edx, 16(%edi) - movl 20(%esi), %edx - movl 20(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 20(%edi) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - retl -.Lfunc_end87: - .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L - - .globl mcl_fp_shr1_6L - .align 16, 0x90 - .type mcl_fp_shr1_6L,@function -mcl_fp_shr1_6L: # @mcl_fp_shr1_6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl 20(%eax), %ecx - movl 16(%eax), %edx - movl 12(%eax), %esi - movl 8(%eax), %edi - movl (%eax), %ebx - movl 4(%eax), %eax - shrdl $1, %eax, %ebx - movl 20(%esp), %ebp - movl %ebx, (%ebp) - shrdl $1, %edi, %eax - movl %eax, 4(%ebp) - shrdl $1, %esi, %edi - movl %edi, 8(%ebp) - shrdl $1, %edx, %esi - movl %esi, 12(%ebp) - shrdl $1, %ecx, %edx - movl %edx, 16(%ebp) - shrl %ecx - movl %ecx, 20(%ebp) - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end88: - .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L - - .globl mcl_fp_add6L - .align 16, 0x90 - .type mcl_fp_add6L,@function -mcl_fp_add6L: # @mcl_fp_add6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $12, %esp - movl 40(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ebp - movl 36(%esp), %ebx - addl (%ebx), %edx - adcl 4(%ebx), %ebp - movl 8(%eax), %ecx - adcl 8(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ecx, %esi - movl 12(%ebx), %ecx - movl 16(%ebx), %edi - adcl 12(%eax), %ecx - adcl 16(%eax), %edi - movl 20(%ebx), %ebx - adcl 20(%eax), %ebx - movl 32(%esp), %eax - movl %edx, (%eax) - movl %ebp, 4(%eax) - movl %esi, 8(%eax) - movl %ecx, 12(%eax) - movl %edi, 16(%eax) - movl %ebx, 20(%eax) - sbbl %eax, %eax - andl $1, %eax - movl 44(%esp), %esi - subl (%esi), %edx - movl %edx, (%esp) # 4-byte Spill - movl 8(%esp), %edx # 4-byte Reload - movl 44(%esp), %esi - sbbl 4(%esi), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl %ecx, %ebp - sbbl 8(%esi), %edx - sbbl 12(%esi), %ebp - sbbl 16(%esi), %edi - sbbl 20(%esi), %ebx - sbbl $0, %eax - testb $1, %al - jne .LBB89_2 -# BB#1: # %nocarry - movl (%esp), %eax # 4-byte Reload - movl 32(%esp), %ecx - movl %eax, (%ecx) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 4(%ecx) - movl %edx, 8(%ecx) - movl %ebp, 12(%ecx) - movl %edi, 16(%ecx) - movl %ebx, 20(%ecx) -.LBB89_2: # %carry - addl $12, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end89: - .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L - - .globl mcl_fp_addNF6L - .align 16, 0x90 - .type mcl_fp_addNF6L,@function -mcl_fp_addNF6L: # @mcl_fp_addNF6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 68(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 64(%esp), %ebp - addl (%ebp), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl %edx, %ebx - adcl 4(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 20(%eax), %edx - movl 16(%eax), %esi - movl 12(%eax), %edi - movl 8(%eax), %eax - adcl 8(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 12(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - adcl 16(%ebp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 72(%esp), %ebx - subl (%ebx), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl %ecx, %ebp - movl 72(%esp), %ecx - sbbl 4(%ecx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - sbbl 8(%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 12(%ecx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl %esi, %edi - sbbl 16(%ecx), %edi - movl %edx, %esi - sbbl 20(%ecx), %esi - movl %esi, %ebx - sarl $31, %ebx - testl %ebx, %ebx - js .LBB90_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB90_2: - movl 60(%esp), %ebx - movl %eax, (%ebx) - movl 20(%esp), %ecx # 4-byte Reload - js .LBB90_4 -# BB#3: - movl 4(%esp), %ecx # 4-byte Reload -.LBB90_4: - movl %ecx, 4(%ebx) - movl 36(%esp), %eax # 4-byte Reload - movl 28(%esp), %edx # 4-byte Reload - movl 24(%esp), %ecx # 4-byte Reload - js .LBB90_6 -# BB#5: - movl 8(%esp), %ecx # 4-byte Reload -.LBB90_6: - movl %ecx, 8(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - js .LBB90_8 -# BB#7: - movl 12(%esp), %edx # 4-byte Reload -.LBB90_8: - movl %edx, 12(%ebx) - js .LBB90_10 -# BB#9: - movl %edi, %ecx -.LBB90_10: - movl %ecx, 16(%ebx) - js .LBB90_12 -# BB#11: - movl %esi, %eax -.LBB90_12: - movl %eax, 20(%ebx) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end90: - .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L - - .globl mcl_fp_sub6L - .align 16, 0x90 - .type mcl_fp_sub6L,@function -mcl_fp_sub6L: # @mcl_fp_sub6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $16, %esp - movl 40(%esp), %ebx - movl (%ebx), %esi - movl 4(%ebx), %edi - movl 44(%esp), %ecx - subl (%ecx), %esi - sbbl 4(%ecx), %edi - movl %edi, (%esp) # 4-byte Spill - movl 8(%ebx), %eax - sbbl 8(%ecx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%ebx), %eax - sbbl 12(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 16(%ebx), %ebp - sbbl 16(%ecx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 20(%ebx), %edx - sbbl 20(%ecx), %edx - movl $0, %ecx - sbbl $0, %ecx - testb $1, %cl - movl 36(%esp), %ebx - movl %esi, (%ebx) - movl %edi, 4(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl %eax, 12(%ebx) - movl %ebp, 16(%ebx) - movl %edx, 20(%ebx) - je .LBB91_2 -# BB#1: # %carry - movl 48(%esp), %ecx - addl (%ecx), %esi - movl %esi, (%ebx) - movl (%esp), %eax # 4-byte Reload - adcl 4(%ecx), %eax - adcl 8(%ecx), %edi - movl %eax, 4(%ebx) - movl 12(%ecx), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl %eax, 12(%ebx) - movl 16(%ecx), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ebx) - movl 20(%ecx), %eax - adcl %edx, %eax - movl %eax, 20(%ebx) -.LBB91_2: # %nocarry - addl $16, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end91: - .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L - - .globl mcl_fp_subNF6L - .align 16, 0x90 - .type mcl_fp_subNF6L,@function -mcl_fp_subNF6L: # @mcl_fp_subNF6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %ebx - movl 20(%ebx), %esi - movl (%ebx), %ecx - movl 4(%ebx), %eax - movl 52(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 4(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 16(%ebx), %eax - movl 12(%ebx), %ecx - movl 8(%ebx), %edx - sbbl 8(%ebp), %edx - movl %edx, 4(%esp) # 4-byte Spill - sbbl 12(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 16(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %edx - sbbl 20(%ebp), %edx - movl %edx, (%esp) # 4-byte Spill - movl %edx, %ebp - sarl $31, %ebp - movl %ebp, %ecx - addl %ecx, %ecx - movl %ebp, %eax - adcl %eax, %eax - shrl $31, %edx - orl %ecx, %edx - movl 56(%esp), %ebx - andl 4(%ebx), %eax - andl (%ebx), %edx - movl 20(%ebx), %edi - andl %ebp, %edi - movl 16(%ebx), %esi - andl %ebp, %esi - movl 12(%ebx), %ecx - andl %ebp, %ecx - andl 8(%ebx), %ebp - addl 8(%esp), %edx # 4-byte Folded Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 44(%esp), %ebx - movl %edx, (%ebx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %eax, 4(%ebx) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 8(%ebx) - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %ecx, 12(%ebx) - movl %esi, 16(%ebx) - adcl (%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%ebx) - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end92: - .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L - - .globl mcl_fpDbl_add6L - .align 16, 0x90 - .type mcl_fpDbl_add6L,@function -mcl_fpDbl_add6L: # @mcl_fpDbl_add6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %edx - movl 60(%esp), %ecx - movl 12(%ecx), %esi - movl 16(%ecx), %eax - movl 8(%edx), %edi - movl (%edx), %ebx - addl (%ecx), %ebx - movl 56(%esp), %ebp - movl %ebx, (%ebp) - movl 4(%edx), %ebx - adcl 4(%ecx), %ebx - adcl 8(%ecx), %edi - adcl 12(%edx), %esi - adcl 16(%edx), %eax - movl %ebx, 4(%ebp) - movl %edx, %ebx - movl 32(%ebx), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edi, 8(%ebp) - movl 20(%ebx), %edi - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - adcl %edi, %esi - movl 24(%ebx), %edi - movl %eax, 16(%ebp) - movl 24(%ecx), %edx - adcl %edi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 28(%ebx), %edi - movl %esi, 20(%ebp) - movl 28(%ecx), %eax - adcl %edi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 24(%esp) # 4-byte Spill - movl 36(%ebx), %esi - movl %ebx, %edi - movl 36(%ecx), %ebx - adcl %esi, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 40(%edi), %esi - movl 40(%ecx), %edi - adcl %esi, %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 64(%esp), %esi - movl 44(%esi), %esi - movl 44(%ecx), %ecx - adcl %esi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 68(%esp), %esi - subl (%esi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 68(%esp), %edx - sbbl 4(%edx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 8(%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl %ebx, %ebp - sbbl 12(%edx), %ebp - movl %edi, %ebx - movl 12(%esp), %edi # 4-byte Reload - sbbl 16(%edx), %ebx - movl %edi, %eax - sbbl 20(%edx), %eax - sbbl $0, %ecx - andl $1, %ecx - jne .LBB93_2 -# BB#1: - movl %eax, %edi -.LBB93_2: - testb %cl, %cl - movl 20(%esp), %ecx # 4-byte Reload - movl 16(%esp), %edx # 4-byte Reload - jne .LBB93_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %ecx # 4-byte Reload -.LBB93_4: - movl 56(%esp), %eax - movl %ecx, 24(%eax) - movl %edx, 28(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl 24(%esp), %edx # 4-byte Reload - jne .LBB93_6 -# BB#5: - movl 8(%esp), %edx # 4-byte Reload -.LBB93_6: - movl %edx, 32(%eax) - movl 28(%esp), %edx # 4-byte Reload - jne .LBB93_8 -# BB#7: - movl %ebp, %edx -.LBB93_8: - movl %edx, 36(%eax) - jne .LBB93_10 -# BB#9: - movl %ebx, %ecx -.LBB93_10: - movl %ecx, 40(%eax) - movl %edi, 44(%eax) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end93: - .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L - - .globl mcl_fpDbl_sub6L - .align 16, 0x90 - .type mcl_fpDbl_sub6L,@function -mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %edi - movl 52(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %edi - movl 8(%edx), %ebx - sbbl 8(%esi), %ebx - movl 44(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%esi), %eax - movl %edi, 4(%ecx) - movl 16(%edx), %edi - sbbl 16(%esi), %edi - movl %ebx, 8(%ecx) - movl 20(%esi), %ebx - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %ebx, %eax - movl 24(%esi), %ebx - movl %edi, 16(%ecx) - movl 24(%edx), %edi - sbbl %ebx, %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 28(%esi), %edi - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %edi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 32(%esi), %edi - movl 32(%edx), %eax - sbbl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 36(%esi), %edi - movl 36(%edx), %eax - sbbl %edi, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 40(%esi), %edi - movl 40(%edx), %eax - sbbl %edi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 44(%esi), %esi - movl 44(%edx), %eax - sbbl %esi, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl $0, %ebx - sbbl $0, %ebx - andl $1, %ebx - movl 56(%esp), %eax - jne .LBB94_1 -# BB#2: - xorl %edx, %edx - jmp .LBB94_3 -.LBB94_1: - movl 20(%eax), %edx -.LBB94_3: - testb %bl, %bl - jne .LBB94_4 -# BB#5: - movl $0, %esi - movl $0, %edi - jmp .LBB94_6 -.LBB94_4: - movl (%eax), %edi - movl 4(%eax), %esi -.LBB94_6: - jne .LBB94_7 -# BB#8: - movl $0, %ebx - jmp .LBB94_9 -.LBB94_7: - movl 16(%eax), %ebx -.LBB94_9: - jne .LBB94_10 -# BB#11: - movl $0, %ebp - jmp .LBB94_12 -.LBB94_10: - movl 12(%eax), %ebp -.LBB94_12: - jne .LBB94_13 -# BB#14: - xorl %eax, %eax - jmp .LBB94_15 -.LBB94_13: - movl 8(%eax), %eax -.LBB94_15: - addl 8(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - movl %edi, 24(%ecx) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %esi, 28(%ecx) - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %eax, 32(%ecx) - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 36(%ecx) - movl %ebx, 40(%ecx) - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%ecx) - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end94: - .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L - - .globl mcl_fp_mulUnitPre7L - .align 16, 0x90 - .type mcl_fp_mulUnitPre7L,@function -mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %esi - movl 60(%esp), %ebx - movl %esi, %eax - mull 24(%ebx) - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull 20(%ebx) - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %esi, %eax - mull 16(%ebx) - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull 12(%ebx) - movl %edx, %ebp - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 8(%ebx) - movl %edx, %ecx - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 4(%ebx) - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - mull (%ebx) - movl 56(%esp), %esi - movl %eax, (%esi) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%esi) - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esi) - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esi) - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esi) - movl 16(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esi) - movl 24(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%esi) - movl 32(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 28(%esi) - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end95: - .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L - - .globl mcl_fpDbl_mulPre7L - .align 16, 0x90 - .type mcl_fpDbl_mulPre7L,@function -mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %ebx - movl (%ebx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 128(%esp), %ecx - movl (%ecx), %edi - movl %ecx, %ebp - mull %edi - movl %edx, 96(%esp) # 4-byte Spill - movl 120(%esp), %ecx - movl %eax, (%ecx) - movl 4(%ebx), %ecx - movl 8(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 16(%ebx), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%ebx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 24(%ebx), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - movl 4(%ebp), %ebp - movl %ecx, %eax - mull %ebp - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - mull %ebp - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 60(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebp - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, %eax - mull %ebp - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%esp), %esi # 4-byte Reload - movl %esi, %eax - mull %ebp - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, %eax - mull %ebp - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - mull %edi - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edi - movl %edx, %ebx - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull %edi - movl %edx, %ebp - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, %ecx - movl 24(%esp), %esi # 4-byte Reload - addl 96(%esp), %esi # 4-byte Folded Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebx, %edi - adcl 12(%esp), %edi # 4-byte Folded Reload - movl 76(%esp), %edx # 4-byte Reload - adcl 20(%esp), %edx # 4-byte Folded Reload - movl 80(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - addl 52(%esp), %esi # 4-byte Folded Reload - movl 120(%esp), %eax - movl %esi, 4(%eax) - movl 96(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ebp, %ecx - adcl 36(%esp), %ecx # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, %esi - adcl 60(%esp), %ebx # 4-byte Folded Reload - sbbl %edx, %edx - andl $1, %edx - movl 96(%esp), %ebp # 4-byte Reload - addl 84(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 96(%esp) # 4-byte Spill - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 80(%esp) # 4-byte Spill - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl 124(%esp), %esi - movl 24(%esi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl 8(%eax), %edi - movl %ecx, %eax - mull %edi - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, 68(%esp) # 4-byte Spill - movl 20(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill - mull %edi - movl %eax, 92(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl 16(%esi), %eax - movl %eax, 8(%esp) # 4-byte Spill - mull %edi - movl %eax, 88(%esp) # 4-byte Spill - movl %edx, 60(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - mull %edi - movl %eax, %ebp - movl %edx, 56(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - mull %edi - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 52(%esp) # 4-byte Spill - movl (%esi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 4(%esi), %eax - movl %eax, (%esp) # 4-byte Spill - mull %edi - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull %edi - movl %edx, 28(%esp) # 4-byte Spill - addl 96(%esp), %eax # 4-byte Folded Reload - movl 120(%esp), %edx - movl %eax, 8(%edx) - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 92(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 84(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl 128(%esp), %eax - movl 12(%eax), %ecx - movl 16(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl (%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, (%esp) # 4-byte Spill - movl %edx, 36(%esp) # 4-byte Spill - andl $1, %edi - addl 28(%esp), %ebx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 52(%esp), %esi # 4-byte Folded Reload - movl 88(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl 92(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl 84(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - addl 4(%esp), %ebx # 4-byte Folded Reload - movl 120(%esp), %ebp - movl %ebx, 12(%ebp) - movl 72(%esp), %ebp # 4-byte Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - movl %esi, %ebx - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, %esi - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - adcl 24(%esp), %ecx # 4-byte Folded Reload - adcl 44(%esp), %edi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 20(%esp), %ebp # 4-byte Folded Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 88(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 84(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%esp) # 4-byte Spill - movl 124(%esp), %ebx - movl 24(%ebx), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl 16(%eax), %ecx - movl %edx, %eax - mull %ecx - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 60(%esp) # 4-byte Spill - movl 20(%ebx), %eax - movl %eax, 56(%esp) # 4-byte Spill - mull %ecx - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, 32(%esp) # 4-byte Spill - movl 16(%ebx), %eax - movl %eax, 52(%esp) # 4-byte Spill - mull %ecx - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 16(%esp) # 4-byte Spill - movl 12(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - mull %ecx - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, 12(%esp) # 4-byte Spill - movl 8(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - mull %ecx - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, 8(%esp) # 4-byte Spill - movl (%ebx), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 4(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - mull %ecx - movl %edx, 4(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull %ecx - movl %edx, (%esp) # 4-byte Spill - addl %ebp, %eax - movl 120(%esp), %ecx - movl %eax, 16(%ecx) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, %edi - movl 64(%esp), %esi # 4-byte Reload - adcl 88(%esp), %esi # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 80(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - sbbl %ebx, %ebx - andl $1, %ebx - addl (%esp), %edi # 4-byte Folded Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 76(%esp) # 4-byte Spill - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 92(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl 20(%eax), %esi - movl 96(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, %ecx - movl 56(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 84(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, %ebp - movl 36(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %ebx - movl 40(%esp), %eax # 4-byte Reload - mull %esi - movl %edx, 40(%esp) # 4-byte Spill - addl %edi, %eax - movl 120(%esp), %edx - movl %eax, 20(%edx) - adcl 64(%esp), %ebx # 4-byte Folded Reload - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 84(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 96(%esp) # 4-byte Folded Spill - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 92(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl 24(%eax), %ecx - sbbl %esi, %esi - movl %ecx, %eax - movl 124(%esp), %edi - mull 24(%edi) - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %eax - mull 20(%edi) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 16(%edi) - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 12(%edi) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 8(%edi) - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 4(%edi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ecx, %eax - mull (%edi) - movl %eax, (%esp) # 4-byte Spill - movl %edx, 12(%esp) # 4-byte Spill - andl $1, %esi - addl 40(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 84(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl 96(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 92(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 92(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - addl (%esp), %ebx # 4-byte Folded Reload - movl 120(%esp), %ecx - movl %ebx, 24(%ecx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, %ebx - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 28(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - sbbl %eax, %eax - andl $1, %eax - addl 12(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 28(%ecx) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %ebx, 32(%ecx) - movl 96(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %edx, 36(%ecx) - movl %edi, %edx - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ebx, 40(%ecx) - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %edx, 44(%ecx) - movl %esi, 48(%ecx) - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end96: - .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L - - .globl mcl_fpDbl_sqrPre7L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre7L,@function -mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %esi - movl 24(%esi), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl (%esi), %ebx - movl 4(%esi), %edi - mull %edi - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, 80(%esp) # 4-byte Spill - movl 20(%esi), %eax - movl %eax, 92(%esp) # 4-byte Spill - mull %edi - movl %eax, 60(%esp) # 4-byte Spill - movl %edx, 76(%esp) # 4-byte Spill - movl 16(%esi), %ecx - movl %ecx, %eax - mull %edi - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, 72(%esp) # 4-byte Spill - movl 12(%esi), %esi - movl %esi, %eax - mull %edi - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl 8(%eax), %ebp - movl %ebp, %eax - mull %edi - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ebx - movl %edx, %ebp - movl %eax, %ecx - movl %edi, %eax - mull %edi - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull %ebx - movl %edx, %esi - movl %esi, 20(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebx, %eax - mull %ebx - movl 120(%esp), %ebx - movl %eax, (%ebx) - addl %edi, %edx - adcl %esi, %ecx - adcl 16(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %esi - movl 28(%esp), %eax # 4-byte Reload - adcl %eax, 92(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 96(%esp) # 4-byte Folded Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl %edi, %edx - movl %edx, 4(%ebx) - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebx - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, %edi - movl 92(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %esi - adcl 68(%esp), %eax # 4-byte Folded Reload - sbbl %ebp, %ebp - andl $1, %ebp - addl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%esp) # 4-byte Spill - adcl 72(%esp), %esi # 4-byte Folded Reload - movl %esi, 84(%esp) # 4-byte Spill - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl 80(%esp), %ebp # 4-byte Folded Reload - movl 124(%esp), %edi - movl 24(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 8(%edi), %esi - mull %esi - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 40(%esp) # 4-byte Spill - movl 20(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - mull %esi - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, 36(%esp) # 4-byte Spill - movl 16(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - mull %esi - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 16(%esp) # 4-byte Spill - movl 12(%edi), %ebx - movl %ebx, %eax - mull %esi - movl %eax, 60(%esp) # 4-byte Spill - movl %edx, 64(%esp) # 4-byte Spill - movl (%edi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 4(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - mull %esi - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull %esi - movl %edx, 8(%esp) # 4-byte Spill - movl %eax, %edi - movl %esi, %eax - mull %esi - movl %eax, %ecx - movl %edx, 4(%esp) # 4-byte Spill - addl 28(%esp), %edi # 4-byte Folded Reload - movl 120(%esp), %eax - movl %edi, 8(%eax) - movl 68(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 84(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %edi # 4-byte Reload - adcl 88(%esp), %edi # 4-byte Folded Reload - movl 80(%esp), %eax # 4-byte Reload - adcl %ebp, %eax - sbbl %ebp, %ebp - andl $1, %ebp - addl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl 12(%esp), %edx # 4-byte Reload - adcl %edx, 56(%esp) # 4-byte Folded Spill - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%esp) # 4-byte Spill - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 76(%esp) # 4-byte Spill - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl 52(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 92(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %esi - movl 24(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %ecx - movl 20(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebx, %eax - mull %ebx - movl %eax, %ebx - movl %edx, 16(%esp) # 4-byte Spill - addl 68(%esp), %edi # 4-byte Folded Reload - movl 120(%esp), %eax - movl %edi, 12(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 96(%esp) # 4-byte Spill - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 60(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - adcl %ebp, 92(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 20(%eax), %ebx - movl %edi, %eax - mull %ebx - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebx - movl %eax, 40(%esp) # 4-byte Spill - movl %edx, 48(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl 16(%eax), %ebp - movl %edi, %eax - mull %ebp - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %edx, 12(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - addl %eax, 56(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 96(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 60(%esp) # 4-byte Folded Spill - movl 28(%esp), %eax # 4-byte Reload - adcl %eax, 88(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 92(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %esi - movl 24(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - mull %ebp - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ebp - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, 68(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - mull %ebp - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, 20(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - mull %ebp - movl %edx, 16(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull %ebp - movl %eax, %esi - movl %edx, (%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - addl 4(%esp), %ebp # 4-byte Folded Reload - movl 120(%esp), %eax - movl %ebp, 16(%eax) - movl %ecx, %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %edi, %ebp - adcl 96(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - adcl 88(%esp), %esi # 4-byte Folded Reload - movl %esi, 88(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx # 4-byte Folded Reload - sbbl %edi, %edi - andl $1, %edi - addl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl 80(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 80(%esp) # 4-byte Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl (%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 32(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 84(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - mull %ebx - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull %ebx - movl %eax, %esi - movl %edx, 32(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - addl 40(%esp), %eax # 4-byte Folded Reload - movl 120(%esp), %edx - movl %eax, 20(%edx) - movl 80(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ecx, %edx - adcl 72(%esp), %edx # 4-byte Folded Reload - movl 84(%esp), %ebx # 4-byte Reload - adcl %ebp, %ebx - movl 92(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - adcl 76(%esp), %esi # 4-byte Folded Reload - movl 88(%esp), %ebp # 4-byte Reload - adcl %edi, %ebp - sbbl %edi, %edi - andl $1, %edi - addl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 92(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 88(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 124(%esp), %esi - movl 24(%esi), %ecx - movl %ecx, %eax - mull 20(%esi) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 16(%esi) - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - mull 12(%esi) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull 8(%esi) - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, %ebp - movl %ecx, %eax - mull 4(%esi) - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull (%esi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull %ecx - movl %edx, 52(%esp) # 4-byte Spill - addl 80(%esp), %esi # 4-byte Folded Reload - movl 120(%esp), %edx - movl %esi, 24(%edx) - movl %edx, %esi - adcl 56(%esp), %edi # 4-byte Folded Reload - adcl 84(%esp), %ebp # 4-byte Folded Reload - adcl 92(%esp), %ebx # 4-byte Folded Reload - movl 64(%esp), %ecx # 4-byte Reload - adcl %ecx, 96(%esp) # 4-byte Folded Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - sbbl %ecx, %ecx - andl $1, %ecx - addl 36(%esp), %edi # 4-byte Folded Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %edi, 28(%esi) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 32(%esi) - movl 96(%esp), %edi # 4-byte Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %ebx, 36(%esi) - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edi, 40(%esi) - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 44(%esi) - movl %eax, 48(%esi) - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esi) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end97: - .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L - - .globl mcl_fp_mont7L - .align 16, 0x90 - .type mcl_fp_mont7L,@function -mcl_fp_mont7L: # @mcl_fp_mont7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $152, %esp - movl 176(%esp), %esi - movl (%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 180(%esp), %edx - movl (%edx), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - mull %ecx - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, 76(%esp) # 4-byte Spill - movl 184(%esp), %ecx - movl -4(%ecx), %edx - movl %edx, 132(%esp) # 4-byte Spill - movl %eax, %ebx - imull %edx, %ebx - movl (%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 24(%ecx), %edx - movl %edx, 120(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 4(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 4(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl %esi, %eax - movl 24(%eax), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 20(%eax), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 16(%eax), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 12(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 8(%eax), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ebx, %eax - mull %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - movl 72(%esp), %ecx # 4-byte Reload - mull %ecx - movl %edx, %esi - movl %eax, 16(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebp, %eax - mull %ecx - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull %ecx - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, %ebx - movl %eax, 8(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - mull %ecx - addl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl 8(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - adcl (%esp), %ebx # 4-byte Folded Reload - movl %ebx, (%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 84(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - addl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 48(%esp), %edi # 4-byte Reload - adcl 32(%esp), %edi # 4-byte Folded Reload - movl 52(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 56(%esp), %ecx # 4-byte Reload - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 60(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl 64(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 28(%esp), %ebp # 4-byte Reload - addl 80(%esp), %ebp # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - adcl %ebp, 88(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl (%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 72(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 4(%eax), %ecx - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %edi - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, %ebx - addl %edi, %ebx - adcl 84(%esp), %esi # 4-byte Folded Reload - movl %esi, 84(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl 80(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 68(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 48(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 80(%esp) # 4-byte Spill - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 68(%esp) # 4-byte Spill - sbbl %eax, %eax - movl 88(%esp), %ecx # 4-byte Reload - imull 132(%esp), %ecx # 4-byte Folded Reload - andl $1, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - movl 52(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 60(%esp), %ecx # 4-byte Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 88(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %ebp # 4-byte Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - adcl 84(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - adcl 80(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 180(%esp), %eax - movl 8(%eax), %ebx - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, %edi - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 72(%esp), %esi # 4-byte Reload - addl %ebp, %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl %edi, 44(%esp) # 4-byte Folded Spill - movl 52(%esp), %edi # 4-byte Reload - adcl %edi, 48(%esp) # 4-byte Folded Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %esi, %ecx - imull 132(%esp), %ecx # 4-byte Folded Reload - andl $1, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - movl 56(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 72(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 88(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 180(%esp), %eax - movl 12(%eax), %ebx - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, %edi - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 72(%esp), %esi # 4-byte Reload - addl %ebp, %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl %edi, 48(%esp) # 4-byte Folded Spill - movl 56(%esp), %edi # 4-byte Reload - adcl %edi, 52(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %esi, %ecx - imull 132(%esp), %ecx # 4-byte Folded Reload - andl $1, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - movl 56(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 72(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 88(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 180(%esp), %eax - movl 16(%eax), %ebx - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, %edi - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 72(%esp), %esi # 4-byte Reload - addl %ebp, %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl %edi, 48(%esp) # 4-byte Folded Spill - movl 56(%esp), %edi # 4-byte Reload - adcl %edi, 52(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %esi, %ecx - imull 132(%esp), %ecx # 4-byte Folded Reload - andl $1, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - movl 56(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 72(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %ebp # 4-byte Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 88(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 180(%esp), %eax - movl 20(%eax), %ebx - movl %ebx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %ebx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, %edi - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %ebx # 4-byte Reload - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 72(%esp), %esi # 4-byte Reload - addl %ebp, %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl %edi, 48(%esp) # 4-byte Folded Spill - movl 56(%esp), %edi # 4-byte Reload - adcl %edi, 52(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %esi, %ecx - imull 132(%esp), %ecx # 4-byte Folded Reload - andl $1, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebp - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl %ebp, %edx - movl %edx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - movl 56(%esp), %ebp # 4-byte Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 32(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 72(%esp), %ebx # 4-byte Folded Reload - movl 20(%esp), %ebx # 4-byte Reload - adcl 40(%esp), %ebx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 88(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 180(%esp), %eax - movl 24(%eax), %ebp - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebp, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 108(%esp) # 4-byte Spill - movl %eax, 92(%esp) # 4-byte Spill - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 112(%esp) # 4-byte Spill - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - addl %edi, %edx - movl %edx, %edi - adcl 112(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 112(%esp) # 4-byte Spill - adcl 92(%esp), %esi # 4-byte Folded Reload - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl 100(%esp), %ebp # 4-byte Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl 88(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 116(%esp), %esi # 4-byte Reload - addl %ebx, %esi - movl %esi, 116(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 92(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl %edi, 112(%esp) # 4-byte Folded Spill - movl 56(%esp), %edi # 4-byte Reload - adcl %edi, 104(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 108(%esp) # 4-byte Spill - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 100(%esp) # 4-byte Spill - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - movl 132(%esp), %ecx # 4-byte Reload - imull %esi, %ecx - movl %ecx, 132(%esp) # 4-byte Spill - andl $1, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl 132(%esp), %eax # 4-byte Reload - mull 124(%esp) # 4-byte Folded Reload - addl 56(%esp), %eax # 4-byte Folded Reload - adcl %edi, %edx - adcl 60(%esp), %ecx # 4-byte Folded Reload - adcl 68(%esp), %ebx # 4-byte Folded Reload - adcl 72(%esp), %ebp # 4-byte Folded Reload - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 132(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 64(%esp), %esi # 4-byte Reload - addl 116(%esp), %esi # 4-byte Folded Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - adcl 112(%esp), %edx # 4-byte Folded Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 104(%esp) # 4-byte Spill - adcl 108(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 116(%esp) # 4-byte Spill - adcl 100(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 112(%esp) # 4-byte Spill - movl 132(%esp), %esi # 4-byte Reload - adcl 88(%esp), %esi # 4-byte Folded Reload - movl %esi, 132(%esp) # 4-byte Spill - adcl 84(%esp), %edi # 4-byte Folded Reload - adcl $0, 96(%esp) # 4-byte Folded Spill - movl %eax, %esi - subl 128(%esp), %esi # 4-byte Folded Reload - movl %esi, 108(%esp) # 4-byte Spill - movl %edx, %esi - sbbl 124(%esp), %esi # 4-byte Folded Reload - movl %esi, 124(%esp) # 4-byte Spill - sbbl 136(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 128(%esp) # 4-byte Spill - sbbl 140(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 136(%esp) # 4-byte Spill - sbbl 144(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 140(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - movl %ecx, %ebx - movl %ecx, %ebp - sbbl 148(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 144(%esp) # 4-byte Spill - movl %edi, %ebx - sbbl 120(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 148(%esp) # 4-byte Spill - movl 96(%esp), %ebx # 4-byte Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB98_2 -# BB#1: - movl 108(%esp), %eax # 4-byte Reload -.LBB98_2: - movl 172(%esp), %esi - movl %eax, (%esi) - testb %bl, %bl - jne .LBB98_4 -# BB#3: - movl 124(%esp), %edx # 4-byte Reload -.LBB98_4: - movl %edx, 4(%esi) - movl 104(%esp), %ecx # 4-byte Reload - jne .LBB98_6 -# BB#5: - movl 128(%esp), %ecx # 4-byte Reload -.LBB98_6: - movl %ecx, 8(%esi) - movl 112(%esp), %ecx # 4-byte Reload - movl 116(%esp), %eax # 4-byte Reload - jne .LBB98_8 -# BB#7: - movl 136(%esp), %eax # 4-byte Reload -.LBB98_8: - movl %eax, 12(%esi) - jne .LBB98_10 -# BB#9: - movl 140(%esp), %ecx # 4-byte Reload -.LBB98_10: - movl %ecx, 16(%esi) - jne .LBB98_12 -# BB#11: - movl 144(%esp), %ebp # 4-byte Reload -.LBB98_12: - movl %ebp, 20(%esi) - jne .LBB98_14 -# BB#13: - movl 148(%esp), %edi # 4-byte Reload -.LBB98_14: - movl %edi, 24(%esi) - addl $152, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end98: - .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L - - .globl mcl_fp_montNF7L - .align 16, 0x90 - .type mcl_fp_montNF7L,@function -mcl_fp_montNF7L: # @mcl_fp_montNF7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $152, %esp - movl 176(%esp), %ebp - movl (%ebp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 180(%esp), %ecx - movl (%ecx), %ecx - mull %ecx - movl %eax, 88(%esp) # 4-byte Spill - movl %edx, 84(%esp) # 4-byte Spill - movl 184(%esp), %esi - movl -4(%esi), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl %eax, %edi - imull %edx, %edi - movl (%esi), %edx - movl %edx, 148(%esp) # 4-byte Spill - movl 24(%esi), %edx - movl %edx, 124(%esp) # 4-byte Spill - movl 20(%esi), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 16(%esi), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 8(%esi), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 4(%esi), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 24(%ebp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 20(%ebp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 12(%ebp), %ebx - movl %ebx, 96(%esp) # 4-byte Spill - movl 8(%ebp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl %edi, %eax - mull %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - mull %ecx - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull %ecx - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull %ecx - movl %edx, %esi - movl %eax, %ebx - movl %ebp, %eax - mull %ecx - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - mull %ecx - movl %eax, %edi - addl 84(%esp), %edi # 4-byte Folded Reload - adcl (%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - adcl %ebx, %ebp - movl %esi, %edx - adcl 4(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 8(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %ecx # 4-byte Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl 80(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 16(%esp), %ebx # 4-byte Reload - addl 88(%esp), %ebx # 4-byte Folded Reload - movl %edi, %ebx - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl 84(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - adcl $0, %eax - addl 28(%esp), %ebx # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 76(%esp) # 4-byte Spill - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 4(%eax), %ecx - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 52(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - addl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl 64(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 88(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl %ebx, 48(%esp) # 4-byte Folded Spill - adcl %edi, 52(%esp) # 4-byte Folded Spill - movl %ebp, %edi - adcl 40(%esp), %edi # 4-byte Folded Reload - movl 44(%esp), %ebx # 4-byte Reload - adcl %ebx, 56(%esp) # 4-byte Folded Spill - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 88(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, %ecx - imull 108(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %esi - movl %ecx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ebx - movl %ecx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - addl %ebp, %eax - adcl 52(%esp), %ebx # 4-byte Folded Reload - adcl %edi, %esi - movl %esi, %edi - movl 68(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 60(%esp), %esi # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl 80(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 64(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 76(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 8(%eax), %ebp - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %ebp - addl %ebx, %ebp - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebx - movl 52(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %esi # 4-byte Reload - adcl %edi, %esi - movl 60(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 44(%esp), %edi # 4-byte Reload - addl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %edi # 4-byte Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - movl %edi, %ebx - imull 108(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - addl %edi, %eax - adcl %ebp, %ecx - movl %ecx, %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl 68(%esp), %ebp # 4-byte Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl 76(%esp), %edi # 4-byte Reload - adcl 56(%esp), %edi # 4-byte Folded Reload - movl 80(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 68(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 76(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 12(%eax), %edi - movl %edi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %edi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebp - movl %edi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %ebx - addl %ebp, %ebx - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, %edi - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 44(%esp), %esi # 4-byte Reload - addl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 72(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 80(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - movl %edi, %ebp - imull 108(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebp, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebp, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebp, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebp, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, %esi - movl %ebp, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - addl %edi, %eax - adcl %ebx, %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl 68(%esp), %ebp # 4-byte Reload - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl 76(%esp), %edi # 4-byte Reload - adcl 56(%esp), %edi # 4-byte Folded Reload - movl 80(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 68(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 76(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 16(%eax), %ebp - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %edi - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 24(%esp) # 4-byte Spill - addl %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl %edi, %esi - movl %esi, %edi - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl 56(%esp), %ebx # 4-byte Reload - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %edx # 4-byte Reload - adcl 36(%esp), %edx # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 24(%esp), %ecx # 4-byte Reload - addl 40(%esp), %ecx # 4-byte Folded Reload - movl 44(%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %esi, 44(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl 48(%esp), %esi # 4-byte Reload - adcl 72(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - adcl 80(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %ecx, %esi - imull 108(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %esi, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %esi, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %esi, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %ebp - movl %esi, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %esi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, %ebx - movl %esi, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - addl %ecx, %eax - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %edx # 4-byte Reload - adcl %edi, %edx - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %edi # 4-byte Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 76(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl 80(%esp), %ecx # 4-byte Reload - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 48(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 72(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%esp) # 4-byte Spill - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 20(%eax), %ebp - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebp, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, %edi - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %ebx - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, %ebp - addl %ebx, %ebp - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, %ebx - movl 60(%esp), %esi # 4-byte Reload - adcl 32(%esp), %esi # 4-byte Folded Reload - movl 36(%esp), %edx # 4-byte Reload - adcl %edi, %edx - movl 64(%esp), %ecx # 4-byte Reload - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl 52(%esp), %edi # 4-byte Reload - addl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 52(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl 56(%esp), %edi # 4-byte Reload - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 56(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 60(%esp) # 4-byte Spill - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %edx, %esi - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - movl %edi, %ebx - imull 108(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl %ebx, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %ebx, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebx, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ebx, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - movl %eax, %ecx - movl %ebx, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - addl %edi, %eax - adcl %ebp, %ecx - movl %ecx, %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %ebp # 4-byte Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl 76(%esp), %ebx # 4-byte Reload - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl 80(%esp), %edi # 4-byte Reload - adcl %esi, %edi - movl 88(%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - addl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 72(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 76(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 80(%esp) # 4-byte Spill - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 88(%esp) # 4-byte Spill - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl 24(%eax), %edi - movl %edi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, 112(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - mull 116(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 116(%esp) # 4-byte Spill - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 52(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, 96(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %ebp - movl %edi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %eax, %ebx - addl %ebp, %edx - movl %edx, 104(%esp) # 4-byte Spill - movl %ecx, %edi - adcl %esi, %edi - movl 100(%esp), %esi # 4-byte Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl 92(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx # 4-byte Folded Reload - movl 64(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - addl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - adcl %ebx, 104(%esp) # 4-byte Folded Spill - adcl 72(%esp), %edi # 4-byte Folded Reload - movl %edi, 72(%esp) # 4-byte Spill - adcl 76(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%esp) # 4-byte Spill - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - movl 48(%esp), %ecx # 4-byte Reload - imull %ecx, %edi - movl %edi, %eax - mull 124(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 76(%esp) # 4-byte Spill - movl %edi, %eax - mull 144(%esp) # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 108(%esp) # 4-byte Spill - movl %edi, %eax - mull 140(%esp) # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, 120(%esp) # 4-byte Spill - movl %edi, %eax - mull 136(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 116(%esp) # 4-byte Spill - movl %edi, %eax - mull 148(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, %ebp - movl %edi, %eax - mull 132(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %esi - movl %edi, %eax - mull 128(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - addl %ecx, %ebp - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, %edx - adcl 72(%esp), %esi # 4-byte Folded Reload - movl 116(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl 120(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl 108(%esp), %ebx # 4-byte Reload - adcl 92(%esp), %ebx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl 112(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 60(%esp), %edx # 4-byte Folded Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %esi, 104(%esp) # 4-byte Spill - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 116(%esp) # 4-byte Spill - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 120(%esp) # 4-byte Spill - adcl 80(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 108(%esp) # 4-byte Spill - adcl 84(%esp), %ebp # 4-byte Folded Reload - adcl 88(%esp), %edi # 4-byte Folded Reload - movl %edi, 112(%esp) # 4-byte Spill - movl %edx, %eax - subl 148(%esp), %eax # 4-byte Folded Reload - sbbl 128(%esp), %esi # 4-byte Folded Reload - movl %esi, 128(%esp) # 4-byte Spill - sbbl 132(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - movl %edx, %esi - sbbl 136(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 136(%esp) # 4-byte Spill - sbbl 140(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 148(%esp) # 4-byte Spill - movl %ebp, %ebx - movl %ebp, %ecx - movl %ebx, %ebp - sbbl 144(%esp), %ebp # 4-byte Folded Reload - movl %edi, %ebx - sbbl 124(%esp), %ebx # 4-byte Folded Reload - movl %ebx, %edi - sarl $31, %edi - testl %edi, %edi - js .LBB99_2 -# BB#1: - movl %eax, %esi -.LBB99_2: - movl 172(%esp), %edx - movl %esi, (%edx) - movl 104(%esp), %eax # 4-byte Reload - js .LBB99_4 -# BB#3: - movl 128(%esp), %eax # 4-byte Reload -.LBB99_4: - movl %eax, 4(%edx) - movl %ecx, %eax - movl 116(%esp), %ecx # 4-byte Reload - js .LBB99_6 -# BB#5: - movl 132(%esp), %ecx # 4-byte Reload -.LBB99_6: - movl %ecx, 8(%edx) - movl 108(%esp), %esi # 4-byte Reload - movl 120(%esp), %ecx # 4-byte Reload - js .LBB99_8 -# BB#7: - movl 136(%esp), %ecx # 4-byte Reload -.LBB99_8: - movl %ecx, 12(%edx) - js .LBB99_10 -# BB#9: - movl 148(%esp), %esi # 4-byte Reload -.LBB99_10: - movl %esi, 16(%edx) - js .LBB99_12 -# BB#11: - movl %ebp, %eax -.LBB99_12: - movl %eax, 20(%edx) - js .LBB99_14 -# BB#13: - movl %ebx, 112(%esp) # 4-byte Spill -.LBB99_14: - movl 112(%esp), %eax # 4-byte Reload - movl %eax, 24(%edx) - addl $152, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end99: - .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L - - .globl mcl_fp_montRed7L - .align 16, 0x90 - .type mcl_fp_montRed7L,@function -mcl_fp_montRed7L: # @mcl_fp_montRed7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - movl 152(%esp), %eax - movl -4(%eax), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl (%eax), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 148(%esp), %ecx - movl (%ecx), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - imull %edx, %ecx - movl 24(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 20(%eax), %edx - movl %edx, 120(%esp) # 4-byte Spill - movl 16(%eax), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 8(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 4(%eax), %ebx - movl %ebx, 96(%esp) # 4-byte Spill - movl %ecx, %eax - mull %edi - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, %edi - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebp - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - mull %esi - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull %ebx - movl %edx, %esi - movl %eax, %ebx - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - addl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 76(%esp), %edx # 4-byte Folded Reload - adcl 80(%esp), %ebp # 4-byte Folded Reload - movl 56(%esp), %ebx # 4-byte Reload - adcl 84(%esp), %ebx # 4-byte Folded Reload - movl 60(%esp), %esi # 4-byte Reload - adcl %edi, %esi - movl 64(%esp), %edi # 4-byte Reload - adcl $0, %edi - addl 116(%esp), %eax # 4-byte Folded Reload - movl 148(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 4(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 8(%ecx), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 12(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 16(%ecx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - adcl 20(%ecx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - adcl 24(%ecx), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 28(%ecx), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl 48(%ecx), %edi - movl 44(%ecx), %edx - movl 40(%ecx), %ebx - movl 36(%ecx), %ebp - movl 32(%ecx), %eax - adcl $0, %eax - movl %eax, 12(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 24(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 68(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 80(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - andl $1, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - imull 88(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, (%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebp - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, %ebx - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %eax, %ecx - addl %ebx, %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl %ebp, %edi - movl %esi, %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl 36(%esp), %esi # 4-byte Reload - adcl 4(%esp), %esi # 4-byte Folded Reload - movl 40(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl 44(%esp), %ebx # 4-byte Reload - adcl 16(%esp), %ebx # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 72(%esp), %ecx # 4-byte Folded Reload - movl 28(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 40(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl %ecx, %edi - imull 88(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, %ebx - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, %esi - movl %edi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %eax, %edi - addl %esi, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %ebx, %ebp - movl %ebp, %eax - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl 60(%esp), %ebx # 4-byte Reload - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl 64(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 72(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 28(%esp), %edi # 4-byte Folded Reload - movl 32(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl %edi, %esi - imull 88(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %esi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, %ecx - movl %esi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, %ebp - movl %esi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %eax, %edi - addl %ebp, %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl %ecx, %ebx - movl %ebx, %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 4(%esp), %eax # 4-byte Folded Reload - movl 40(%esp), %ebx # 4-byte Reload - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl 44(%esp), %ebp # 4-byte Reload - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - adcl 24(%esp), %edx # 4-byte Folded Reload - movl 52(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - addl 32(%esp), %edi # 4-byte Folded Reload - movl 28(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl %edi, %ebp - imull 88(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %ebp, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %ebp, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ecx - movl %ebp, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, %ebx - movl %ebp, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl %ecx, %edi - movl %edi, %ecx - adcl 12(%esp), %esi # 4-byte Folded Reload - movl 60(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl 68(%esp), %edi # 4-byte Reload - adcl 56(%esp), %edi # 4-byte Folded Reload - movl 72(%esp), %edx # 4-byte Reload - adcl $0, %edx - addl 28(%esp), %ebp # 4-byte Folded Reload - movl 32(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 32(%esp) # 4-byte Spill - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 36(%esp) # 4-byte Spill - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 40(%esp) # 4-byte Spill - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 68(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl %ebp, %edi - imull 88(%esp), %edi # 4-byte Folded Reload - movl %edi, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 28(%esp) # 4-byte Spill - movl %edi, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 20(%esp) # 4-byte Spill - movl %edi, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ecx - movl %eax, 16(%esp) # 4-byte Spill - movl %edi, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %esi - movl %eax, %ebx - movl %edi, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %eax, %ebp - addl %ebx, %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, %eax - movl 48(%esp), %edi # 4-byte Reload - adcl 16(%esp), %edi # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ebx # 4-byte Reload - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl 56(%esp), %edx # 4-byte Reload - adcl 28(%esp), %edx # 4-byte Folded Reload - movl 76(%esp), %esi # 4-byte Reload - adcl $0, %esi - addl 32(%esp), %ebp # 4-byte Folded Reload - movl 44(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 88(%esp), %ecx # 4-byte Reload - imull %ebp, %ecx - movl %ecx, %eax - mull 92(%esp) # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - mull 120(%esp) # 4-byte Folded Reload - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, 36(%esp) # 4-byte Spill - movl %ecx, %eax - mull 104(%esp) # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ecx, %eax - mull 108(%esp) # 4-byte Folded Reload - movl %edx, %ebp - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - mull 100(%esp) # 4-byte Folded Reload - movl %edx, %ebx - movl %eax, 24(%esp) # 4-byte Spill - movl %ecx, %eax - mull 112(%esp) # 4-byte Folded Reload - movl %edx, %edi - movl %eax, 28(%esp) # 4-byte Spill - movl %ecx, %eax - mull 96(%esp) # 4-byte Folded Reload - movl %edx, %esi - addl %edi, %eax - movl %eax, %edi - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 80(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 80(%esp) # 4-byte Spill - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl 88(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl 64(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl 28(%esp), %ebx # 4-byte Reload - addl 44(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %edi # 4-byte Folded Reload - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 64(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 80(%esp) # 4-byte Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 68(%esp) # 4-byte Spill - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %edx, %eax - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 72(%esp) # 4-byte Spill - movl 116(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %edi, %edx - movl %edx, %ecx - subl 112(%esp), %ecx # 4-byte Folded Reload - sbbl 96(%esp), %esi # 4-byte Folded Reload - movl %esi, 84(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 100(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - sbbl 108(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 100(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - sbbl 104(%esp), %esi # 4-byte Folded Reload - movl %esi, 108(%esp) # 4-byte Spill - movl %eax, %esi - movl %esi, %ebp - sbbl 120(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - movl %esi, %eax - sbbl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 120(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - movl %ebx, 116(%esp) # 4-byte Spill - jne .LBB100_2 -# BB#1: - movl %ecx, %edx -.LBB100_2: - movl 144(%esp), %edi - movl %edx, (%edi) - movl 116(%esp), %eax # 4-byte Reload - testb %al, %al - movl 64(%esp), %eax # 4-byte Reload - jne .LBB100_4 -# BB#3: - movl 84(%esp), %eax # 4-byte Reload -.LBB100_4: - movl %eax, 4(%edi) - movl 80(%esp), %eax # 4-byte Reload - jne .LBB100_6 -# BB#5: - movl 96(%esp), %eax # 4-byte Reload -.LBB100_6: - movl %eax, 8(%edi) - movl 88(%esp), %eax # 4-byte Reload - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB100_8 -# BB#7: - movl 100(%esp), %ecx # 4-byte Reload -.LBB100_8: - movl %ecx, 12(%edi) - jne .LBB100_10 -# BB#9: - movl 108(%esp), %eax # 4-byte Reload -.LBB100_10: - movl %eax, 16(%edi) - jne .LBB100_12 -# BB#11: - movl 112(%esp), %ebp # 4-byte Reload -.LBB100_12: - movl %ebp, 20(%edi) - jne .LBB100_14 -# BB#13: - movl 120(%esp), %esi # 4-byte Reload -.LBB100_14: - movl %esi, 24(%edi) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end100: - .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L - - .globl mcl_fp_addPre7L - .align 16, 0x90 - .type mcl_fp_addPre7L,@function -mcl_fp_addPre7L: # @mcl_fp_addPre7L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl 12(%esi), %ecx - movl %edx, 4(%ebx) - movl 16(%esi), %edx - adcl 12(%eax), %ecx - adcl 16(%eax), %edx - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %ecx, 12(%ebx) - movl 20(%esi), %ecx - adcl %edi, %ecx - movl %edx, 16(%ebx) - movl %ecx, 20(%ebx) - movl 24(%eax), %eax - movl 24(%esi), %ecx - adcl %eax, %ecx - movl %ecx, 24(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end101: - .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L - - .globl mcl_fp_subPre7L - .align 16, 0x90 - .type mcl_fp_subPre7L,@function -mcl_fp_subPre7L: # @mcl_fp_subPre7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl 12(%ecx), %edx - sbbl 12(%edi), %edx - movl %esi, 4(%ebp) - movl 16(%ecx), %esi - sbbl 16(%edi), %esi - movl %ebx, 8(%ebp) - movl 20(%edi), %ebx - movl %edx, 12(%ebp) - movl 20(%ecx), %edx - sbbl %ebx, %edx - movl %esi, 16(%ebp) - movl %edx, 20(%ebp) - movl 24(%edi), %edx - movl 24(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 24(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end102: - .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L - - .globl mcl_fp_shr1_7L - .align 16, 0x90 - .type mcl_fp_shr1_7L,@function -mcl_fp_shr1_7L: # @mcl_fp_shr1_7L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 20(%esi) - shrl %eax - movl %eax, 24(%esi) - popl %esi - retl -.Lfunc_end103: - .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L - - .globl mcl_fp_add7L - .align 16, 0x90 - .type mcl_fp_add7L,@function -mcl_fp_add7L: # @mcl_fp_add7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %ebp - movl (%ebp), %eax - movl 4(%ebp), %edi - movl 44(%esp), %ecx - addl (%ecx), %eax - adcl 4(%ecx), %edi - movl 8(%ebp), %esi - adcl 8(%ecx), %esi - movl 12(%ecx), %edx - movl 16(%ecx), %ebx - adcl 12(%ebp), %edx - movl %edx, 16(%esp) # 4-byte Spill - adcl 16(%ebp), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl %ebp, %ebx - movl 20(%ecx), %ebp - adcl 20(%ebx), %ebp - movl 24(%ecx), %edx - adcl 24(%ebx), %edx - movl 40(%esp), %ecx - movl %eax, (%ecx) - movl %edi, 4(%ecx) - movl %esi, 8(%ecx) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%ecx) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%ecx) - movl %ebp, 20(%ecx) - movl %edx, 24(%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 52(%esp), %ecx - subl (%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 12(%esp), %ecx # 4-byte Reload - movl 52(%esp), %eax - sbbl 4(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %eax, %edi - sbbl 8(%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, %esi - sbbl 20(%edi), %ebp - sbbl 24(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB104_2 -# BB#1: # %nocarry - movl 8(%esp), %ecx # 4-byte Reload - movl 40(%esp), %eax - movl %eax, %ebx - movl %ecx, (%ebx) - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%ebx) - movl (%esp), %eax # 4-byte Reload - movl %eax, 8(%ebx) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebx) - movl %esi, 16(%ebx) - movl %ebp, 20(%ebx) - movl %edx, 24(%ebx) -.LBB104_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end104: - .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L - - .globl mcl_fp_addNF7L - .align 16, 0x90 - .type mcl_fp_addNF7L,@function -mcl_fp_addNF7L: # @mcl_fp_addNF7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 80(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 76(%esp), %esi - addl (%esi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %ebp - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl 12(%esi), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 36(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl 44(%esp), %esi # 4-byte Reload - subl (%eax), %esi - movl %esi, (%esp) # 4-byte Spill - sbbl 4(%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 8(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%eax), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - sbbl 16(%eax), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 20(%eax), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - sbbl 24(%eax), %edi - movl %edi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - js .LBB105_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB105_2: - movl 72(%esp), %ecx - movl %esi, (%ecx) - movl 28(%esp), %eax # 4-byte Reload - js .LBB105_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB105_4: - movl %eax, 4(%ecx) - movl 48(%esp), %ebp # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - movl 36(%esp), %edx # 4-byte Reload - movl 32(%esp), %esi # 4-byte Reload - movl 24(%esp), %ebx # 4-byte Reload - js .LBB105_6 -# BB#5: - movl 8(%esp), %ebx # 4-byte Reload -.LBB105_6: - movl 72(%esp), %eax - movl %ebx, 8(%eax) - movl %eax, %ebx - js .LBB105_8 -# BB#7: - movl 16(%esp), %esi # 4-byte Reload -.LBB105_8: - movl %esi, 12(%ebx) - js .LBB105_10 -# BB#9: - movl 20(%esp), %edx # 4-byte Reload -.LBB105_10: - movl %edx, 16(%ebx) - js .LBB105_12 -# BB#11: - movl 12(%esp), %ecx # 4-byte Reload -.LBB105_12: - movl %ecx, 20(%ebx) - js .LBB105_14 -# BB#13: - movl %edi, %ebp -.LBB105_14: - movl %ebp, 24(%ebx) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end105: - .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L - - .globl mcl_fp_sub7L - .align 16, 0x90 - .type mcl_fp_sub7L,@function -mcl_fp_sub7L: # @mcl_fp_sub7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 48(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - xorl %ebx, %ebx - movl 52(%esp), %esi - subl (%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 4(%esi), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edi), %edx - sbbl 8(%esi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 12(%edi), %ecx - sbbl 12(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 16(%edi), %eax - sbbl 16(%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%edi), %ebp - sbbl 20(%esi), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 24(%edi), %edi - sbbl 24(%esi), %edi - sbbl $0, %ebx - testb $1, %bl - movl 44(%esp), %ebx - movl 16(%esp), %esi # 4-byte Reload - movl %esi, (%ebx) - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 4(%ebx) - movl %edx, 8(%ebx) - movl %ecx, 12(%ebx) - movl %eax, 16(%ebx) - movl %ebp, 20(%ebx) - movl %edi, 24(%ebx) - je .LBB106_2 -# BB#1: # %carry - movl 56(%esp), %ebp - movl 16(%esp), %ecx # 4-byte Reload - addl (%ebp), %ecx - movl %ecx, (%ebx) - movl 20(%esp), %edx # 4-byte Reload - adcl 4(%ebp), %edx - movl %edx, 4(%ebx) - movl 4(%esp), %ecx # 4-byte Reload - adcl 8(%ebp), %ecx - movl 12(%ebp), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%ebp), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl %ecx, 16(%ebx) - movl 20(%ebp), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 24(%ebp), %eax - adcl %edi, %eax - movl %eax, 24(%ebx) -.LBB106_2: # %nocarry - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end106: - .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L - - .globl mcl_fp_subNF7L - .align 16, 0x90 - .type mcl_fp_subNF7L,@function -mcl_fp_subNF7L: # @mcl_fp_subNF7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edx - movl 60(%esp), %ecx - subl (%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 24(%eax), %edx - movl 20(%eax), %esi - movl 16(%eax), %edi - movl 12(%eax), %ebx - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 12(%ecx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %esi - movl %esi, 28(%esp) # 4-byte Spill - sbbl 24(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl %edx, %ecx - sarl $31, %ecx - movl %ecx, %eax - shldl $1, %edx, %eax - movl 64(%esp), %edx - andl (%edx), %eax - movl 24(%edx), %esi - andl %ecx, %esi - movl %esi, (%esp) # 4-byte Spill - movl 20(%edx), %ebx - andl %ecx, %ebx - movl 16(%edx), %edi - andl %ecx, %edi - movl 12(%edx), %esi - andl %ecx, %esi - movl 64(%esp), %edx - movl 8(%edx), %edx - andl %ecx, %edx - movl 64(%esp), %ebp - andl 4(%ebp), %ecx - addl 20(%esp), %eax # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 52(%esp), %ebp - movl %eax, (%ebp) - adcl 4(%esp), %edx # 4-byte Folded Reload - movl %ebp, %eax - movl %ecx, 4(%eax) - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %edx, 8(%eax) - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %esi, 12(%eax) - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %edi, 16(%eax) - movl %ebx, 20(%eax) - movl (%esp), %ecx # 4-byte Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%eax) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end107: - .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L - - .globl mcl_fpDbl_add7L - .align 16, 0x90 - .type mcl_fpDbl_add7L,@function -mcl_fpDbl_add7L: # @mcl_fpDbl_add7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 72(%esp), %esi - movl 68(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %ecx - movl 8(%esi), %eax - movl (%esi), %ebx - addl (%edx), %ebx - movl 64(%esp), %ebp - movl %ebx, (%ebp) - movl 4(%esi), %ebx - adcl 4(%edx), %ebx - adcl 8(%edx), %eax - adcl 12(%esi), %edi - adcl 16(%esi), %ecx - movl %ebx, 4(%ebp) - movl %esi, %ebx - movl 36(%ebx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl %eax, 8(%ebp) - movl 20(%ebx), %eax - movl %edi, 12(%ebp) - movl 20(%edx), %edi - adcl %eax, %edi - movl 24(%ebx), %eax - movl %ecx, 16(%ebp) - movl 24(%edx), %ecx - adcl %eax, %ecx - movl 28(%ebx), %eax - movl %edi, 20(%ebp) - movl 28(%edx), %edi - adcl %eax, %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 32(%ebx), %eax - movl %ecx, 24(%ebp) - movl 32(%edx), %ecx - adcl %eax, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%edx), %esi - adcl 36(%esp), %esi # 4-byte Folded Reload - movl 40(%ebx), %ecx - movl 40(%edx), %eax - adcl %ecx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%ebx), %ebp - movl 44(%edx), %ecx - adcl %ebp, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 48(%ebx), %ebp - movl %ebx, %eax - movl 48(%edx), %ebx - adcl %ebp, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 52(%eax), %eax - movl 52(%edx), %ebp - adcl %eax, %ebp - movl %ebp, 32(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 76(%esp), %eax - subl (%eax), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - sbbl 4(%eax), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %esi, %eax - movl 76(%esp), %edi - sbbl 8(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 20(%edi), %ebx - sbbl 24(%edi), %ebp - sbbl $0, %edx - andl $1, %edx - jne .LBB108_2 -# BB#1: - movl %ebp, 32(%esp) # 4-byte Spill -.LBB108_2: - testb %dl, %dl - movl 20(%esp), %ecx # 4-byte Reload - jne .LBB108_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 8(%esp), %ecx # 4-byte Reload -.LBB108_4: - movl 64(%esp), %eax - movl %ecx, 28(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl %esi, 36(%eax) - movl 24(%esp), %edx # 4-byte Reload - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB108_6 -# BB#5: - movl 12(%esp), %ecx # 4-byte Reload -.LBB108_6: - movl %ecx, 40(%eax) - movl 28(%esp), %ecx # 4-byte Reload - jne .LBB108_8 -# BB#7: - movl 16(%esp), %edx # 4-byte Reload -.LBB108_8: - movl %edx, 44(%eax) - jne .LBB108_10 -# BB#9: - movl %ebx, %ecx -.LBB108_10: - movl %ecx, 48(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end108: - .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L - - .globl mcl_fpDbl_sub7L - .align 16, 0x90 - .type mcl_fpDbl_sub7L,@function -mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %edx - movl 60(%esp), %edi - subl (%edi), %eax - sbbl 4(%edi), %edx - movl 8(%esi), %ebx - sbbl 8(%edi), %ebx - movl 52(%esp), %ecx - movl %eax, (%ecx) - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %edx, 4(%ecx) - movl 16(%esi), %edx - sbbl 16(%edi), %edx - movl %ebx, 8(%ecx) - movl 20(%edi), %ebx - movl %eax, 12(%ecx) - movl 20(%esi), %eax - sbbl %ebx, %eax - movl 24(%edi), %ebx - movl %edx, 16(%ecx) - movl 24(%esi), %edx - sbbl %ebx, %edx - movl 28(%edi), %ebx - movl %eax, 20(%ecx) - movl 28(%esi), %eax - sbbl %ebx, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%edi), %eax - movl %edx, 24(%ecx) - movl 32(%esi), %edx - sbbl %eax, %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 36(%edi), %eax - movl 36(%esi), %edx - sbbl %eax, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 40(%edi), %eax - movl 40(%esi), %edx - sbbl %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 44(%edi), %eax - movl 44(%esi), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%edi), %eax - movl 48(%esi), %edx - sbbl %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%edi), %eax - movl 52(%esi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 64(%esp), %esi - jne .LBB109_1 -# BB#2: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB109_3 -.LBB109_1: - movl 24(%esi), %edx - movl %edx, (%esp) # 4-byte Spill -.LBB109_3: - testb %al, %al - jne .LBB109_4 -# BB#5: - movl $0, %edi - movl $0, %eax - jmp .LBB109_6 -.LBB109_4: - movl (%esi), %eax - movl 4(%esi), %edi -.LBB109_6: - jne .LBB109_7 -# BB#8: - movl $0, %ebx - jmp .LBB109_9 -.LBB109_7: - movl 20(%esi), %ebx -.LBB109_9: - jne .LBB109_10 -# BB#11: - movl $0, %ebp - jmp .LBB109_12 -.LBB109_10: - movl 16(%esi), %ebp -.LBB109_12: - jne .LBB109_13 -# BB#14: - movl $0, %edx - jmp .LBB109_15 -.LBB109_13: - movl 12(%esi), %edx -.LBB109_15: - jne .LBB109_16 -# BB#17: - xorl %esi, %esi - jmp .LBB109_18 -.LBB109_16: - movl 8(%esi), %esi -.LBB109_18: - addl 12(%esp), %eax # 4-byte Folded Reload - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %eax, 28(%ecx) - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %edi, 32(%ecx) - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %esi, 36(%ecx) - adcl 20(%esp), %ebp # 4-byte Folded Reload - movl %edx, 40(%ecx) - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 44(%ecx) - movl %ebx, 48(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end109: - .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L - - .align 16, 0x90 - .type .LmulPv256x32,@function -.LmulPv256x32: # @mulPv256x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl %edx, %esi - movl 68(%esp), %ebx - movl %ebx, %eax - mull 28(%esi) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 24(%esi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 20(%esi) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 16(%esi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 12(%esi) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 8(%esi) - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 4(%esi) - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull (%esi) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%ecx) - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 32(%ecx) - movl %ecx, %eax - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end110: - .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32 - - .globl mcl_fp_mulUnitPre8L - .align 16, 0x90 - .type mcl_fp_mulUnitPre8L,@function -mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - calll .L111$pb -.L111$pb: - popl %ebx -.Ltmp2: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx - movl 88(%esp), %eax - movl %eax, (%esp) - leal 24(%esp), %ecx - movl 84(%esp), %edx - calll .LmulPv256x32 - movl 56(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 48(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 44(%esp), %esi - movl 40(%esp), %edi - movl 36(%esp), %ebx - movl 32(%esp), %ebp - movl 24(%esp), %edx - movl 28(%esp), %ecx - movl 80(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %ebp, 8(%eax) - movl %ebx, 12(%eax) - movl %edi, 16(%eax) - movl %esi, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end111: - .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L - - .globl mcl_fpDbl_mulPre8L - .align 16, 0x90 - .type mcl_fpDbl_mulPre8L,@function -mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $156, %esp - calll .L112$pb -.L112$pb: - popl %ebx -.Ltmp3: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx - movl %ebx, -96(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4L@PLT - leal 16(%esi), %eax - movl %eax, 8(%esp) - leal 16(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 32(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4L@PLT - movl 24(%edi), %esi - movl (%edi), %ebx - movl 4(%edi), %eax - addl 16(%edi), %ebx - movl %ebx, -120(%ebp) # 4-byte Spill - adcl 20(%edi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - adcl 8(%edi), %esi - movl %esi, -108(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl (%edi), %eax - movl 4(%edi), %ecx - addl 16(%edi), %eax - adcl 20(%edi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - movl 24(%edi), %edx - adcl 8(%edi), %edx - movl 28(%edi), %ecx - adcl 12(%edi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -128(%ebp) # 4-byte Spill - jb .LBB112_2 -# BB#1: - xorl %esi, %esi - xorl %ebx, %ebx -.LBB112_2: - movl %ebx, -112(%ebp) # 4-byte Spill - movl %esi, -104(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl 28(%esi), %edi - movl -80(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 12(%esi), %edi - movl %edi, -116(%ebp) # 4-byte Spill - movl %ecx, -84(%ebp) # 4-byte Spill - movl %edx, %edi - movl -124(%ebp), %ebx # 4-byte Reload - movl %ebx, -80(%ebp) # 4-byte Spill - movl %eax, -92(%ebp) # 4-byte Spill - jb .LBB112_4 -# BB#3: - movl $0, -84(%ebp) # 4-byte Folded Spill - movl $0, %edi - movl $0, -80(%ebp) # 4-byte Folded Spill - movl $0, -92(%ebp) # 4-byte Folded Spill -.LBB112_4: - movl %edi, -88(%ebp) # 4-byte Spill - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -60(%ebp) - movl -100(%ebp), %edi # 4-byte Reload - movl %edi, -56(%ebp) - movl -108(%ebp), %esi # 4-byte Reload - movl %esi, -52(%ebp) - movl %eax, -76(%ebp) - movl %ebx, -72(%ebp) - movl %edx, -68(%ebp) - movl %ecx, -64(%ebp) - sbbl %edx, %edx - movl -116(%ebp), %esi # 4-byte Reload - movl %esi, -48(%ebp) - movl -128(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB112_6 -# BB#5: - movl $0, %esi - movl $0, %edi -.LBB112_6: - sbbl %eax, %eax - leal -76(%ebp), %ecx - movl %ecx, 8(%esp) - leal -60(%ebp), %ecx - movl %ecx, 4(%esp) - leal -44(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl %edi, %eax - movl -92(%ebp), %edi # 4-byte Reload - addl -112(%ebp), %edi # 4-byte Folded Reload - adcl %eax, -80(%ebp) # 4-byte Folded Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl %eax, -88(%ebp) # 4-byte Folded Spill - adcl %esi, -84(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -92(%ebp) # 4-byte Spill - movl -96(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre4L@PLT - addl -28(%ebp), %edi - movl -80(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl -88(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -88(%ebp) # 4-byte Spill - movl -84(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -84(%ebp) # 4-byte Spill - adcl %esi, -92(%ebp) # 4-byte Folded Spill - movl -44(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ebx - sbbl 4(%esi), %ebx - movl -36(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -32(%ebp), %edx - sbbl 12(%esi), %edx - movl 16(%esi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 20(%esi), %eax - movl %eax, -112(%ebp) # 4-byte Spill - sbbl %eax, -80(%ebp) # 4-byte Folded Spill - movl 24(%esi), %eax - movl %eax, -104(%ebp) # 4-byte Spill - sbbl %eax, -88(%ebp) # 4-byte Folded Spill - movl 28(%esi), %eax - movl %eax, -108(%ebp) # 4-byte Spill - sbbl %eax, -84(%ebp) # 4-byte Folded Spill - sbbl $0, -92(%ebp) # 4-byte Folded Spill - movl 32(%esi), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - subl %ecx, %eax - movl 36(%esi), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 40(%esi), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 44(%esi), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 48(%esi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 52(%esi), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - sbbl %ecx, -80(%ebp) # 4-byte Folded Spill - movl 56(%esi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - sbbl %ecx, -88(%ebp) # 4-byte Folded Spill - movl 60(%esi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, -84(%ebp) # 4-byte Folded Spill - sbbl $0, -92(%ebp) # 4-byte Folded Spill - addl -100(%ebp), %eax # 4-byte Folded Reload - adcl -112(%ebp), %ebx # 4-byte Folded Reload - movl %eax, 16(%esi) - movl -96(%ebp), %eax # 4-byte Reload - adcl -104(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 20(%esi) - adcl -108(%ebp), %edx # 4-byte Folded Reload - movl %eax, 24(%esi) - adcl -132(%ebp), %edi # 4-byte Folded Reload - movl %edx, 28(%esi) - movl -80(%ebp), %eax # 4-byte Reload - adcl -136(%ebp), %eax # 4-byte Folded Reload - movl %edi, 32(%esi) - movl -88(%ebp), %ecx # 4-byte Reload - adcl -128(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - movl -84(%ebp), %eax # 4-byte Reload - adcl -140(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl -92(%ebp), %ecx # 4-byte Reload - adcl -144(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 44(%esi) - movl %ecx, 48(%esi) - movl -116(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 52(%esi) - movl -120(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 56(%esi) - movl -124(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 60(%esi) - addl $156, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end112: - .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L - - .globl mcl_fpDbl_sqrPre8L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre8L,@function -mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $156, %esp - calll .L113$pb -.L113$pb: - popl %ebx -.Ltmp4: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx - movl %ebx, -96(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre4L@PLT - leal 16(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 32(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre4L@PLT - movl (%edi), %esi - movl 4(%edi), %ecx - addl 16(%edi), %esi - movl %esi, -108(%ebp) # 4-byte Spill - adcl 20(%edi), %ecx - seto %al - lahf - movl %eax, %edx - addl %esi, %esi - movl %esi, -84(%ebp) # 4-byte Spill - movl %ecx, %esi - adcl %esi, %esi - movl %esi, -80(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -88(%ebp) # 4-byte Spill - movl 24(%edi), %esi - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 8(%edi), %esi - movl 28(%edi), %edx - adcl 12(%edi), %edx - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -100(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -104(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %ebx - sbbl %edi, %edi - movl %edi, -92(%ebp) # 4-byte Spill - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB113_2 -# BB#1: - movl $0, -80(%ebp) # 4-byte Folded Spill - movl $0, -84(%ebp) # 4-byte Folded Spill -.LBB113_2: - movl %esi, %ebx - movl -88(%ebp), %edi # 4-byte Reload - movl %edi, %eax - addb $127, %al - sahf - adcl %ebx, %ebx - movl %edx, %edi - adcl %edi, %edi - movl -104(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB113_4 -# BB#3: - xorl %edi, %edi - xorl %ebx, %ebx -.LBB113_4: - movl %ebx, -88(%ebp) # 4-byte Spill - movl -108(%ebp), %eax # 4-byte Reload - movl %eax, -60(%ebp) - movl %ecx, -56(%ebp) - movl %esi, -52(%ebp) - movl %edx, -48(%ebp) - movl %eax, -76(%ebp) - movl %ecx, -72(%ebp) - movl %esi, -68(%ebp) - movl %edx, -64(%ebp) - movl -100(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB113_5 -# BB#6: - movl $0, -100(%ebp) # 4-byte Folded Spill - jmp .LBB113_7 -.LBB113_5: - shrl $31, %edx - movl %edx, -100(%ebp) # 4-byte Spill -.LBB113_7: - leal -76(%ebp), %eax - movl %eax, 8(%esp) - leal -60(%ebp), %eax - movl %eax, 4(%esp) - leal -44(%ebp), %eax - movl %eax, (%esp) - movl -92(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -96(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre4L@PLT - movl -84(%ebp), %eax # 4-byte Reload - addl -28(%ebp), %eax - movl %eax, -84(%ebp) # 4-byte Spill - movl -80(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -80(%ebp) # 4-byte Spill - movl -88(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -88(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -92(%ebp) # 4-byte Spill - adcl -100(%ebp), %esi # 4-byte Folded Reload - movl -44(%ebp), %eax - movl 8(%ebp), %edi - subl (%edi), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ebx - sbbl 4(%edi), %ebx - movl -36(%ebp), %eax - sbbl 8(%edi), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -32(%ebp), %edx - sbbl 12(%edi), %edx - movl 16(%edi), %eax - movl %eax, -100(%ebp) # 4-byte Spill - sbbl %eax, -84(%ebp) # 4-byte Folded Spill - movl 20(%edi), %eax - movl %eax, -112(%ebp) # 4-byte Spill - sbbl %eax, -80(%ebp) # 4-byte Folded Spill - movl 24(%edi), %eax - movl %eax, -104(%ebp) # 4-byte Spill - sbbl %eax, -88(%ebp) # 4-byte Folded Spill - movl 28(%edi), %eax - movl %eax, -108(%ebp) # 4-byte Spill - sbbl %eax, -92(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 32(%edi), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - subl %ecx, %eax - movl 36(%edi), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 40(%edi), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 44(%edi), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 48(%edi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, -84(%ebp) # 4-byte Folded Spill - movl 52(%edi), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - sbbl %ecx, -80(%ebp) # 4-byte Folded Spill - movl 56(%edi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - sbbl %ecx, -88(%ebp) # 4-byte Folded Spill - movl 60(%edi), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, -92(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -100(%ebp), %eax # 4-byte Folded Reload - adcl -112(%ebp), %ebx # 4-byte Folded Reload - movl %eax, 16(%edi) - movl -96(%ebp), %eax # 4-byte Reload - adcl -104(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 20(%edi) - adcl -108(%ebp), %edx # 4-byte Folded Reload - movl %eax, 24(%edi) - movl -84(%ebp), %eax # 4-byte Reload - adcl -132(%ebp), %eax # 4-byte Folded Reload - movl %edx, 28(%edi) - movl -80(%ebp), %ecx # 4-byte Reload - adcl -136(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edi) - movl -88(%ebp), %eax # 4-byte Reload - adcl -128(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edi) - movl -92(%ebp), %ecx # 4-byte Reload - adcl -140(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%edi) - adcl -144(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 44(%edi) - movl %esi, 48(%edi) - movl -116(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 52(%edi) - movl -120(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 56(%edi) - movl -124(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 60(%edi) - addl $156, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end113: - .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L - - .globl mcl_fp_mont8L - .align 16, 0x90 - .type mcl_fp_mont8L,@function -mcl_fp_mont8L: # @mcl_fp_mont8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $700, %esp # imm = 0x2BC - calll .L114$pb -.L114$pb: - popl %ebx -.Ltmp5: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx - movl 732(%esp), %eax - movl -4(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 664(%esp), %ebp - movl 668(%esp), %edi - movl %ebp, %eax - imull %esi, %eax - movl 696(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 684(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 676(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 672(%esp), %esi - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 624(%esp), %ebp - adcl 628(%esp), %edi - adcl 632(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - sbbl %eax, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 60(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 584(%esp), %edi - adcl 588(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 592(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 600(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 604(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 608(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 612(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 732(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - andl $1, %ebp - addl 544(%esp), %edi - adcl 548(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 568(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ebp - movl 728(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - addl 504(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 524(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 536(%esp), %ebp - sbbl %edi, %edi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %eax - andl $1, %eax - addl 464(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 480(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 488(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 496(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 724(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 424(%esp), %ecx - movl 28(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 436(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 444(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 448(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 384(%esp), %esi - adcl 388(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 392(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 404(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 408(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 28(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - adcl 348(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 352(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 364(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 372(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %ebp, %eax - andl $1, %eax - addl 304(%esp), %edi - movl 36(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 312(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 316(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 264(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 272(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %eax - andl $1, %eax - addl 224(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 228(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 232(%esp), %esi - adcl 236(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 240(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 244(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 40(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 256(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 184(%esp), %ecx - adcl 188(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 196(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 204(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - movl %edi, %ecx - andl $1, %ecx - addl 144(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 152(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 172(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 176(%esp), %ebp - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 52(%esp), %ecx # 4-byte Reload - addl 104(%esp), %ecx - adcl 108(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 116(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 128(%esp), %edi - adcl 132(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 24(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - andl $1, %esi - addl 64(%esp), %ebp - movl 32(%esp), %ebx # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 76(%esp), %ebx - movl 44(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 88(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl $0, %esi - movl %eax, %edx - movl 732(%esp), %ebp - subl (%ebp), %edx - movl %ecx, %eax - sbbl 4(%ebp), %eax - movl %ebx, %ecx - sbbl 8(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - sbbl 20(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - sbbl $0, %esi - andl $1, %esi - movl %esi, %ecx - jne .LBB114_2 -# BB#1: - movl %edx, %ebp -.LBB114_2: - movl 720(%esp), %edx - movl %ebp, (%edx) - testb %cl, %cl - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB114_4 -# BB#3: - movl %eax, %ebp -.LBB114_4: - movl %ebp, 4(%edx) - jne .LBB114_6 -# BB#5: - movl 12(%esp), %ebx # 4-byte Reload -.LBB114_6: - movl %ebx, 8(%edx) - movl 28(%esp), %eax # 4-byte Reload - jne .LBB114_8 -# BB#7: - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%esp) # 4-byte Spill -.LBB114_8: - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edx) - movl 40(%esp), %edi # 4-byte Reload - jne .LBB114_10 -# BB#9: - movl 20(%esp), %edi # 4-byte Reload -.LBB114_10: - movl %edi, 16(%edx) - jne .LBB114_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB114_12: - movl %eax, 20(%edx) - movl 36(%esp), %eax # 4-byte Reload - jne .LBB114_14 -# BB#13: - movl 32(%esp), %eax # 4-byte Reload -.LBB114_14: - movl %eax, 24(%edx) - movl 48(%esp), %eax # 4-byte Reload - jne .LBB114_16 -# BB#15: - movl 52(%esp), %eax # 4-byte Reload -.LBB114_16: - movl %eax, 28(%edx) - addl $700, %esp # imm = 0x2BC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end114: - .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L - - .globl mcl_fp_montNF8L - .align 16, 0x90 - .type mcl_fp_montNF8L,@function -mcl_fp_montNF8L: # @mcl_fp_montNF8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $700, %esp # imm = 0x2BC - calll .L115$pb -.L115$pb: - popl %ebx -.Ltmp6: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx - movl 732(%esp), %eax - movl -4(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 664(%esp), %ebp - movl 668(%esp), %edi - movl %ebp, %eax - imull %esi, %eax - movl 696(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 684(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 676(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 672(%esp), %esi - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 624(%esp), %ebp - adcl 628(%esp), %edi - adcl 632(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 640(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 616(%esp), %ecx - addl 584(%esp), %edi - adcl 588(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 604(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 732(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - addl 544(%esp), %edi - adcl 548(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 52(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 728(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 536(%esp), %ecx - addl 504(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 528(%esp), %edi - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 464(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 472(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 484(%esp), %esi - adcl 488(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 496(%esp), %edi - movl 728(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 456(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 424(%esp), %edx - adcl 428(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 432(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 440(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 448(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 452(%esp), %edi - movl %edi, %ebp - movl %eax, %edi - adcl $0, %edi - movl %edx, %eax - movl %edx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 384(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 396(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 412(%esp), %ebp - adcl 416(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 376(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 352(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 360(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 364(%esp), %edi - adcl 368(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 304(%esp), %ebp - movl 40(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - adcl 324(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - movl 728(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 724(%esp), %eax - movl %eax, %edx - calll .LmulPv256x32 - movl 296(%esp), %edx - movl %ebp, %ecx - addl 264(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 280(%esp), %ebp - adcl 284(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl %edx, %edi - adcl $0, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 224(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 236(%esp), %esi - adcl 240(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 256(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 216(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - addl 184(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 192(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 196(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 144(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 156(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 160(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 176(%esp), %ebp - movl 728(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 724(%esp), %edx - calll .LmulPv256x32 - movl 136(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - addl 104(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 116(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - adcl $0, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 732(%esp), %edx - calll .LmulPv256x32 - addl 64(%esp), %esi - movl 32(%esp), %esi # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - movl 44(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 80(%esp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 92(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 96(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl %eax, %edx - movl 732(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %ecx - sbbl 8(%eax), %esi - sbbl 12(%eax), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - sbbl 16(%eax), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - sbbl 20(%eax), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - sbbl 24(%eax), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - sbbl 28(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - testl %edi, %edi - js .LBB115_2 -# BB#1: - movl %edx, 56(%esp) # 4-byte Spill -.LBB115_2: - movl 720(%esp), %edx - movl 56(%esp), %eax # 4-byte Reload - movl %eax, (%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB115_4 -# BB#3: - movl %ecx, %eax -.LBB115_4: - movl %eax, 4(%edx) - js .LBB115_6 -# BB#5: - movl %esi, 32(%esp) # 4-byte Spill -.LBB115_6: - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl 40(%esp), %ebp # 4-byte Reload - movl 60(%esp), %eax # 4-byte Reload - movl 48(%esp), %ecx # 4-byte Reload - js .LBB115_8 -# BB#7: - movl 12(%esp), %esi # 4-byte Reload - movl %esi, 44(%esp) # 4-byte Spill -.LBB115_8: - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 12(%edx) - js .LBB115_10 -# BB#9: - movl 16(%esp), %edi # 4-byte Reload -.LBB115_10: - movl %edi, 16(%edx) - js .LBB115_12 -# BB#11: - movl 20(%esp), %ebp # 4-byte Reload -.LBB115_12: - movl %ebp, 20(%edx) - js .LBB115_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload -.LBB115_14: - movl %eax, 24(%edx) - js .LBB115_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB115_16: - movl %ecx, 28(%edx) - addl $700, %esp # imm = 0x2BC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end115: - .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L - - .globl mcl_fp_montRed8L - .align 16, 0x90 - .type mcl_fp_montRed8L,@function -mcl_fp_montRed8L: # @mcl_fp_montRed8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L116$pb -.L116$pb: - popl %ebx -.Ltmp7: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx - movl 456(%esp), %edx - movl -4(%edx), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl (%eax), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 4(%eax), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %esi, %ecx - imull %edi, %ecx - movl 60(%eax), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 56(%eax), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 52(%eax), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 48(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 40(%eax), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 36(%eax), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 32(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 24(%eax), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 20(%eax), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 16(%eax), %ebp - movl 12(%eax), %edi - movl 8(%eax), %esi - movl (%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 8(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 4(%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl %ecx, (%esp) - leal 392(%esp), %ecx - calll .LmulPv256x32 - movl 56(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl 64(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - adcl 400(%esp), %esi - movl %esi, 16(%esp) # 4-byte Spill - adcl 404(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 408(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 76(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 100(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 352(%esp), %edi - movl 16(%esp), %edx # 4-byte Reload - adcl 356(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 360(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 364(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 384(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %esi - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 312(%esp), %edi - movl 52(%esp), %edi # 4-byte Reload - adcl 316(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 272(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 276(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 64(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 232(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 236(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 252(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 88(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 192(%esp), %edi - movl 72(%esp), %ecx # 4-byte Reload - adcl 196(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 204(%esp), %edi - adcl 208(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 152(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - adcl 160(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 172(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 180(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 64(%esp) # 4-byte Folded Spill - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 456(%esp), %edx - calll .LmulPv256x32 - addl 112(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 116(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl %edi, %ebx - movl 100(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %eax, %esi - adcl 136(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %edx - subl 24(%esp), %edx # 4-byte Folded Reload - movl 108(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - movl 104(%esp), %ebp # 4-byte Reload - sbbl 28(%esp), %ebp # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - sbbl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 68(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 72(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - sbbl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - jne .LBB116_2 -# BB#1: - movl %edx, %ecx -.LBB116_2: - movl 448(%esp), %edx - movl %ecx, (%edx) - movl %edi, %ecx - testb %cl, %cl - jne .LBB116_4 -# BB#3: - movl %eax, 108(%esp) # 4-byte Spill -.LBB116_4: - movl 108(%esp), %eax # 4-byte Reload - movl %eax, 4(%edx) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB116_6 -# BB#5: - movl %ebp, %eax -.LBB116_6: - movl %eax, 8(%edx) - movl 84(%esp), %eax # 4-byte Reload - movl 76(%esp), %ebp # 4-byte Reload - jne .LBB116_8 -# BB#7: - movl %ebx, %ebp -.LBB116_8: - movl %ebp, 12(%edx) - movl 100(%esp), %ebx # 4-byte Reload - jne .LBB116_10 -# BB#9: - movl 68(%esp), %ebx # 4-byte Reload -.LBB116_10: - movl %ebx, 16(%edx) - movl 80(%esp), %edi # 4-byte Reload - jne .LBB116_12 -# BB#11: - movl 72(%esp), %edi # 4-byte Reload -.LBB116_12: - movl %edi, 20(%edx) - movl 88(%esp), %esi # 4-byte Reload - jne .LBB116_14 -# BB#13: - movl 92(%esp), %esi # 4-byte Reload -.LBB116_14: - movl %esi, 24(%edx) - jne .LBB116_16 -# BB#15: - movl 96(%esp), %eax # 4-byte Reload -.LBB116_16: - movl %eax, 28(%edx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end116: - .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L - - .globl mcl_fp_addPre8L - .align 16, 0x90 - .type mcl_fp_addPre8L,@function -mcl_fp_addPre8L: # @mcl_fp_addPre8L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 20(%esp), %esi - addl (%esi), %ecx - adcl 4(%esi), %edx - movl 8(%eax), %edi - adcl 8(%esi), %edi - movl 16(%esp), %ebx - movl %ecx, (%ebx) - movl 12(%esi), %ecx - movl %edx, 4(%ebx) - movl 16(%esi), %edx - adcl 12(%eax), %ecx - adcl 16(%eax), %edx - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %ecx, 12(%ebx) - movl 20(%esi), %ecx - adcl %edi, %ecx - movl 24(%eax), %edi - movl %edx, 16(%ebx) - movl 24(%esi), %edx - adcl %edi, %edx - movl %ecx, 20(%ebx) - movl %edx, 24(%ebx) - movl 28(%eax), %eax - movl 28(%esi), %ecx - adcl %eax, %ecx - movl %ecx, 28(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end117: - .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L - - .globl mcl_fp_subPre8L - .align 16, 0x90 - .type mcl_fp_subPre8L,@function -mcl_fp_subPre8L: # @mcl_fp_subPre8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %esi - xorl %eax, %eax - movl 28(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %esi - movl 8(%ecx), %ebx - sbbl 8(%edi), %ebx - movl 20(%esp), %ebp - movl %edx, (%ebp) - movl 12(%ecx), %edx - sbbl 12(%edi), %edx - movl %esi, 4(%ebp) - movl 16(%ecx), %esi - sbbl 16(%edi), %esi - movl %ebx, 8(%ebp) - movl 20(%edi), %ebx - movl %edx, 12(%ebp) - movl 20(%ecx), %edx - sbbl %ebx, %edx - movl 24(%edi), %ebx - movl %esi, 16(%ebp) - movl 24(%ecx), %esi - sbbl %ebx, %esi - movl %edx, 20(%ebp) - movl %esi, 24(%ebp) - movl 28(%edi), %edx - movl 28(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 28(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end118: - .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L - - .globl mcl_fp_shr1_8L - .align 16, 0x90 - .type mcl_fp_shr1_8L,@function -mcl_fp_shr1_8L: # @mcl_fp_shr1_8L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %eax - shrdl $1, %eax, %ecx - movl %ecx, 24(%esi) - shrl %eax - movl %eax, 28(%esi) - popl %esi - retl -.Lfunc_end119: - .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L - - .globl mcl_fp_add8L - .align 16, 0x90 - .type mcl_fp_add8L,@function -mcl_fp_add8L: # @mcl_fp_add8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %edx - addl (%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl 4(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%edx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%edx), %esi - movl 16(%edx), %eax - adcl 12(%edi), %esi - adcl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%edx), %ecx - adcl 20(%edi), %ecx - movl 24(%edx), %ebx - adcl 24(%edi), %ebx - movl 28(%edx), %edi - movl 48(%esp), %edx - adcl 28(%edx), %edi - movl 40(%esp), %edx - movl %ebp, (%edx) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%edx) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%edx) - movl %esi, 12(%edx) - movl %eax, 16(%edx) - movl %ecx, 20(%edx) - movl %ebx, 24(%edx) - movl %edi, 28(%edx) - sbbl %eax, %eax - andl $1, %eax - movl 52(%esp), %edx - movl 8(%esp), %ebp # 4-byte Reload - subl (%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - movl 52(%esp), %edx - sbbl 4(%edx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 52(%esp), %edx - sbbl 8(%edx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 52(%esp), %ebp - sbbl 12(%ebp), %esi - movl %esi, (%esp) # 4-byte Spill - movl 4(%esp), %edx # 4-byte Reload - sbbl 16(%ebp), %edx - movl %edx, %esi - sbbl 20(%ebp), %ecx - sbbl 24(%ebp), %ebx - sbbl 28(%ebp), %edi - sbbl $0, %eax - testb $1, %al - jne .LBB120_2 -# BB#1: # %nocarry - movl 8(%esp), %edx # 4-byte Reload - movl 40(%esp), %ebp - movl %edx, (%ebp) - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 4(%ebp) - movl 12(%esp), %edx # 4-byte Reload - movl %edx, 8(%ebp) - movl (%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl %esi, 16(%ebp) - movl %ecx, 20(%ebp) - movl %ebx, 24(%ebp) - movl %edi, 28(%ebp) -.LBB120_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end120: - .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L - - .globl mcl_fp_addNF8L - .align 16, 0x90 - .type mcl_fp_addNF8L,@function -mcl_fp_addNF8L: # @mcl_fp_addNF8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edi - movl 80(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl 4(%ebx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 28(%eax), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 20(%eax), %ebp - movl 16(%eax), %esi - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%ebx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl 12(%ebx), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 16(%ebx), %esi - movl %esi, 40(%esp) # 4-byte Spill - adcl 20(%ebx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 24(%ebx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 28(%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 88(%esp), %ebx - movl 24(%esp), %ebp # 4-byte Reload - movl %ebp, %eax - subl (%ebx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 4(%ebx), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 8(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%ebx), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 20(%ebx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - sbbl 24(%ebx), %ebp - movl 48(%esp), %esi # 4-byte Reload - sbbl 28(%ebx), %esi - testl %esi, %esi - js .LBB121_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB121_2: - movl 76(%esp), %ebx - movl %eax, (%ebx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB121_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB121_4: - movl %eax, 4(%ebx) - movl 40(%esp), %edx # 4-byte Reload - movl 28(%esp), %edi # 4-byte Reload - js .LBB121_6 -# BB#5: - movl 8(%esp), %edi # 4-byte Reload -.LBB121_6: - movl %edi, 8(%ebx) - movl 44(%esp), %ecx # 4-byte Reload - movl 36(%esp), %eax # 4-byte Reload - js .LBB121_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload -.LBB121_8: - movl %eax, 12(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl 52(%esp), %eax # 4-byte Reload - js .LBB121_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload -.LBB121_10: - movl %edx, 16(%ebx) - js .LBB121_12 -# BB#11: - movl 20(%esp), %ecx # 4-byte Reload -.LBB121_12: - movl %ecx, 20(%ebx) - js .LBB121_14 -# BB#13: - movl %ebp, %eax -.LBB121_14: - movl %eax, 24(%ebx) - js .LBB121_16 -# BB#15: - movl %esi, %edi -.LBB121_16: - movl %edi, 28(%ebx) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end121: - .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L - - .globl mcl_fp_sub8L - .align 16, 0x90 - .type mcl_fp_sub8L,@function -mcl_fp_sub8L: # @mcl_fp_sub8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 56(%esp), %ebp - subl (%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 4(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%esi), %edx - sbbl 8(%ebp), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 16(%esi), %ecx - sbbl 16(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%esi), %edi - sbbl 24(%ebp), %edi - movl 28(%esi), %esi - sbbl 28(%ebp), %esi - sbbl $0, %ebx - testb $1, %bl - movl 48(%esp), %ebx - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, (%ebx) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%ebx) - movl %edx, 8(%ebx) - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 12(%ebx) - movl %ecx, 16(%ebx) - movl %eax, 20(%ebx) - movl %edi, 24(%ebx) - movl %esi, 28(%ebx) - je .LBB122_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 60(%esp), %esi - movl 16(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 20(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 8(%esp), %ebp # 4-byte Reload - adcl 8(%esi), %ebp - movl 12(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ebp, 8(%ebx) - movl 16(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl %eax, 20(%ebx) - movl 24(%esi), %eax - adcl %edi, %eax - movl %eax, 24(%ebx) - movl 28(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ebx) -.LBB122_2: # %nocarry - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end122: - .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L - - .globl mcl_fp_subNF8L - .align 16, 0x90 - .type mcl_fp_subNF8L,@function -mcl_fp_subNF8L: # @mcl_fp_subNF8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edx - movl 68(%esp), %ecx - subl (%ecx), %esi - movl %esi, 24(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 28(%eax), %edx - movl 24(%eax), %esi - movl 20(%eax), %edi - movl 16(%eax), %ebx - movl 12(%eax), %ebp - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 12(%ecx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 24(%ecx), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 28(%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - sarl $31, %edi - movl 72(%esp), %ebp - movl 28(%ebp), %eax - andl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%ebp), %eax - andl %edi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%ebp), %ebx - andl %edi, %ebx - movl 16(%ebp), %esi - andl %edi, %esi - movl 12(%ebp), %edx - andl %edi, %edx - movl 8(%ebp), %ecx - andl %edi, %ecx - movl 4(%ebp), %eax - andl %edi, %eax - andl (%ebp), %edi - addl 24(%esp), %edi # 4-byte Folded Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl 60(%esp), %ebp - movl %edi, (%ebp) - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 4(%ebp) - adcl 12(%esp), %edx # 4-byte Folded Reload - movl %ecx, 8(%ebp) - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %edx, 12(%ebp) - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %esi, 16(%ebp) - movl (%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ebx, 20(%ebp) - movl %eax, 24(%ebp) - movl 4(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ebp) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end123: - .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L - - .globl mcl_fpDbl_add8L - .align 16, 0x90 - .type mcl_fpDbl_add8L,@function -mcl_fpDbl_add8L: # @mcl_fpDbl_add8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 80(%esp), %ebp - addl (%ebp), %esi - adcl 4(%ebp), %edx - movl 8(%ecx), %edi - adcl 8(%ebp), %edi - movl 12(%ebp), %ebx - movl 76(%esp), %eax - movl %esi, (%eax) - movl 16(%ebp), %esi - adcl 12(%ecx), %ebx - adcl 16(%ecx), %esi - movl %edx, 4(%eax) - movl 40(%ecx), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %edi, 8(%eax) - movl 20(%ecx), %edx - movl %ebx, 12(%eax) - movl 20(%ebp), %edi - adcl %edx, %edi - movl 24(%ecx), %edx - movl %esi, 16(%eax) - movl 24(%ebp), %esi - adcl %edx, %esi - movl 28(%ecx), %edx - movl %edi, 20(%eax) - movl 28(%ebp), %ebx - adcl %edx, %ebx - movl 32(%ecx), %edx - movl %esi, 24(%eax) - movl 32(%ebp), %esi - adcl %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 36(%ecx), %edx - movl %ebx, 28(%eax) - movl 36(%ebp), %ebx - adcl %edx, %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 40(%ebp), %eax - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl 44(%ebp), %edi - adcl %edx, %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl 48(%ebp), %eax - adcl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl 52(%ebp), %esi - adcl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl 56(%ebp), %eax - adcl %edx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%ecx), %ecx - movl 60(%ebp), %ebp - adcl %ecx, %ebp - movl %ebp, 40(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 44(%esp), %eax # 4-byte Reload - movl 88(%esp), %edx - subl (%edx), %eax - movl %eax, (%esp) # 4-byte Spill - movl 88(%esp), %eax - sbbl 4(%eax), %ebx - movl %eax, %edx - movl %ebx, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - movl %edx, %ebx - sbbl 8(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - movl 24(%esp), %edi # 4-byte Reload - sbbl 12(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl %edi, %eax - sbbl 16(%ebx), %eax - sbbl 20(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - sbbl 24(%ebx), %edx - movl %edx, 20(%esp) # 4-byte Spill - sbbl 28(%ebx), %ebp - sbbl $0, %ecx - andl $1, %ecx - jne .LBB124_2 -# BB#1: - movl %eax, %edi -.LBB124_2: - testb %cl, %cl - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB124_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB124_4: - movl 76(%esp), %eax - movl %ecx, 32(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl 32(%esp), %edx # 4-byte Reload - movl 48(%esp), %esi # 4-byte Reload - movl 28(%esp), %ebx # 4-byte Reload - jne .LBB124_6 -# BB#5: - movl 4(%esp), %ebx # 4-byte Reload -.LBB124_6: - movl %ebx, 36(%eax) - jne .LBB124_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB124_8: - movl %esi, 40(%eax) - movl 36(%esp), %esi # 4-byte Reload - jne .LBB124_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB124_10: - movl %edx, 44(%eax) - movl %edi, 48(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB124_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload -.LBB124_12: - movl %esi, 52(%eax) - jne .LBB124_14 -# BB#13: - movl 20(%esp), %edx # 4-byte Reload -.LBB124_14: - movl %edx, 56(%eax) - jne .LBB124_16 -# BB#15: - movl %ebp, %ecx -.LBB124_16: - movl %ecx, 60(%eax) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end124: - .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L - - .globl mcl_fpDbl_sub8L - .align 16, 0x90 - .type mcl_fpDbl_sub8L,@function -mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx - movl 68(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%edi), %esi - sbbl 8(%ebx), %esi - movl 60(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%edi), %edx - sbbl 16(%ebx), %edx - movl %esi, 8(%ecx) - movl 20(%ebx), %esi - movl %eax, 12(%ecx) - movl 20(%edi), %eax - sbbl %esi, %eax - movl 24(%ebx), %esi - movl %edx, 16(%ecx) - movl 24(%edi), %edx - sbbl %esi, %edx - movl 28(%ebx), %esi - movl %eax, 20(%ecx) - movl 28(%edi), %eax - sbbl %esi, %eax - movl 32(%ebx), %esi - movl %edx, 24(%ecx) - movl 32(%edi), %edx - sbbl %esi, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 36(%ebx), %edx - movl %eax, 28(%ecx) - movl 36(%edi), %eax - sbbl %edx, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 40(%ebx), %eax - movl 40(%edi), %edx - sbbl %eax, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 44(%ebx), %eax - movl 44(%edi), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%ebx), %eax - movl 48(%edi), %edx - sbbl %eax, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%ebx), %eax - movl 52(%edi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl 56(%edi), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%edi), %edx - sbbl %eax, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 72(%esp), %ebx - jne .LBB125_1 -# BB#2: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB125_3 -.LBB125_1: - movl 28(%ebx), %edx - movl %edx, 4(%esp) # 4-byte Spill -.LBB125_3: - testb %al, %al - jne .LBB125_4 -# BB#5: - movl $0, %ebp - movl $0, %eax - jmp .LBB125_6 -.LBB125_4: - movl (%ebx), %eax - movl 4(%ebx), %ebp -.LBB125_6: - jne .LBB125_7 -# BB#8: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB125_9 -.LBB125_7: - movl 24(%ebx), %edx - movl %edx, (%esp) # 4-byte Spill -.LBB125_9: - jne .LBB125_10 -# BB#11: - movl $0, %edx - jmp .LBB125_12 -.LBB125_10: - movl 20(%ebx), %edx -.LBB125_12: - jne .LBB125_13 -# BB#14: - movl $0, %esi - jmp .LBB125_15 -.LBB125_13: - movl 16(%ebx), %esi -.LBB125_15: - jne .LBB125_16 -# BB#17: - movl $0, %edi - jmp .LBB125_18 -.LBB125_16: - movl 12(%ebx), %edi -.LBB125_18: - jne .LBB125_19 -# BB#20: - xorl %ebx, %ebx - jmp .LBB125_21 -.LBB125_19: - movl 8(%ebx), %ebx -.LBB125_21: - addl 16(%esp), %eax # 4-byte Folded Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %eax, 32(%ecx) - adcl 12(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 36(%ecx) - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %ebx, 40(%ecx) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edi, 44(%ecx) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %esi, 48(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %edx, 52(%ecx) - movl %eax, 56(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ecx) - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end125: - .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L - - .align 16, 0x90 - .type .LmulPv288x32,@function -.LmulPv288x32: # @mulPv288x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl %edx, %esi - movl 76(%esp), %edi - movl %edi, %eax - mull 32(%esi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 28(%esi) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 24(%esi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 20(%esi) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 16(%esi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %edi, %eax - mull 12(%esi) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull 8(%esi) - movl %edx, %ebx - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull 4(%esi) - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl %edi, %eax - mull (%esi) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%ecx) - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 36(%ecx) - movl %ecx, %eax - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end126: - .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32 - - .globl mcl_fp_mulUnitPre9L - .align 16, 0x90 - .type mcl_fp_mulUnitPre9L,@function -mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - calll .L127$pb -.L127$pb: - popl %ebx -.Ltmp8: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx - movl 104(%esp), %eax - movl %eax, (%esp) - leal 32(%esp), %ecx - movl 100(%esp), %edx - calll .LmulPv288x32 - movl 68(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi - movl 48(%esp), %ebx - movl 44(%esp), %ebp - movl 40(%esp), %esi - movl 32(%esp), %edx - movl 36(%esp), %ecx - movl 96(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %ebp, 12(%eax) - movl %ebx, 16(%eax) - movl %edi, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end127: - .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L - - .globl mcl_fpDbl_mulPre9L - .align 16, 0x90 - .type mcl_fpDbl_mulPre9L,@function -mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L128$pb -.L128$pb: - popl %esi -.Ltmp9: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 452(%esp), %edx - movl %edx, %ebp - movl %esi, %ebx - calll .LmulPv288x32 - movl 420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl 388(%esp), %edi - movl 448(%esp), %ecx - movl %eax, (%ecx) - movl 456(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl %ebp, %edx - movl %esi, %ebx - calll .LmulPv288x32 - addl 344(%esp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 364(%esp), %ebx - movl 360(%esp), %edi - movl 356(%esp), %esi - movl 348(%esp), %ecx - movl 352(%esp), %edx - movl 448(%esp), %eax - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 324(%esp), %edi - movl 320(%esp), %ebp - movl 316(%esp), %esi - movl 308(%esp), %ecx - movl 312(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 264(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 288(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 284(%esp), %ebx - movl 280(%esp), %edi - movl 276(%esp), %esi - movl 268(%esp), %ecx - movl 272(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 224(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 240(%esp), %edi - movl 236(%esp), %ebp - movl 228(%esp), %ecx - movl 232(%esp), %edx - movl 448(%esp), %eax - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 16(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 44(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 204(%esp), %edi - movl 200(%esp), %ebx - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 448(%esp), %eax - movl 44(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 20(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 144(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 164(%esp), %ebx - movl 160(%esp), %edi - movl 156(%esp), %esi - movl 148(%esp), %ecx - movl 152(%esp), %edx - movl 448(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 52(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 20(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 12(%esp), %esi # 4-byte Reload - addl 104(%esp), %esi - movl 140(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 136(%esp), %ebp - movl 132(%esp), %edi - movl 128(%esp), %ebx - movl 124(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 116(%esp), %edx - movl 108(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl 448(%esp), %eax - movl %esi, 28(%eax) - movl 12(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 52(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 44(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 456(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 452(%esp), %edx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl %esi, %ebp - addl 64(%esp), %ebp - movl 24(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx - movl 52(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 88(%esp), %edi - movl 84(%esp), %ebx - movl 80(%esp), %esi - movl 76(%esp), %eax - movl 448(%esp), %ecx - movl %ebp, 32(%ecx) - movl %edx, 36(%ecx) - adcl 28(%esp), %eax # 4-byte Folded Reload - movl 52(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - adcl 48(%esp), %esi # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl 24(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl %eax, 60(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%ecx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end128: - .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L - - .globl mcl_fpDbl_sqrPre9L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre9L,@function -mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $428, %esp # imm = 0x1AC - calll .L129$pb -.L129$pb: - popl %ebx -.Ltmp10: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl 452(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 384(%esp), %ecx - movl %edx, %esi - movl %ebx, %edi - calll .LmulPv288x32 - movl 420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl 388(%esp), %ebp - movl 448(%esp), %ecx - movl %eax, (%ecx) - movl 4(%esi), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv288x32 - addl 344(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 364(%esp), %ebx - movl 360(%esp), %edi - movl 356(%esp), %esi - movl 348(%esp), %ecx - movl 352(%esp), %edx - movl 448(%esp), %eax - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 24(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 324(%esp), %edi - movl 320(%esp), %ebp - movl 316(%esp), %esi - movl 308(%esp), %ecx - movl 312(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 264(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 288(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 284(%esp), %ebx - movl 280(%esp), %edi - movl 276(%esp), %esi - movl 268(%esp), %ecx - movl 272(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 20(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 224(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 244(%esp), %edi - movl 240(%esp), %ebp - movl 236(%esp), %esi - movl 228(%esp), %ecx - movl 232(%esp), %edx - movl 448(%esp), %eax - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 40(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 4(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebx - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 448(%esp), %eax - movl 40(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - addl 144(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 164(%esp), %edi - movl 160(%esp), %ebp - movl 156(%esp), %esi - movl 148(%esp), %ecx - movl 152(%esp), %edx - movl 448(%esp), %eax - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl 4(%esp), %esi # 4-byte Reload - addl 104(%esp), %esi - movl 140(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %ebp - movl 128(%esp), %ebx - movl 124(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 120(%esp), %edi - movl 116(%esp), %edx - movl 108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 112(%esp), %ecx - movl 448(%esp), %eax - movl %esi, 28(%eax) - movl 48(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 48(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 452(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 60(%esp), %ebx # 4-byte Reload - calll .LmulPv288x32 - movl %esi, %ebp - addl 64(%esp), %ebp - movl 20(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx - movl 48(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 88(%esp), %edi - movl 84(%esp), %ebx - movl 80(%esp), %esi - movl 76(%esp), %eax - movl 448(%esp), %ecx - movl %ebp, 32(%ecx) - movl %edx, 36(%ecx) - adcl 24(%esp), %eax # 4-byte Folded Reload - movl 48(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl %eax, 60(%ecx) - movl 40(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%ecx) - addl $428, %esp # imm = 0x1AC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end129: - .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L - - .globl mcl_fp_mont9L - .align 16, 0x90 - .type mcl_fp_mont9L,@function -mcl_fp_mont9L: # @mcl_fp_mont9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $796, %esp # imm = 0x31C - calll .L130$pb -.L130$pb: - popl %ebx -.Ltmp11: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx - movl 828(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 752(%esp), %ebp - movl 756(%esp), %esi - movl %ebp, %eax - imull %edi, %eax - movl 788(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 780(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 776(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 772(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 768(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 764(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 760(%esp), %edi - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 712(%esp), %ebp - adcl 716(%esp), %esi - adcl 720(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 748(%esp), %ebp - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 672(%esp), %esi - adcl 676(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 704(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 708(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 632(%esp), %esi - adcl 636(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 660(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ebp - movl 824(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - addl 592(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 616(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 624(%esp), %esi - adcl 628(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 552(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 580(%esp), %edi - adcl 584(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %ebp - movl 824(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 512(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 524(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 536(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 544(%esp), %edi - adcl 548(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl 40(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 472(%esp), %ebp - movl 32(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 484(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 500(%esp), %esi - adcl 504(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 508(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 820(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 444(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 452(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 456(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %esi, %eax - andl $1, %eax - addl 392(%esp), %ebp - movl 36(%esp), %esi # 4-byte Reload - adcl 396(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 404(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 408(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 428(%esp), %edi - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - addl 352(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 364(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 384(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %edi, %eax - andl $1, %eax - addl 312(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 328(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 332(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 336(%esp), %esi - movl 44(%esp), %edi # 4-byte Reload - adcl 340(%esp), %edi - movl 40(%esp), %ecx # 4-byte Reload - adcl 344(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl 824(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 272(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 292(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 296(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 308(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl %edi, %ecx - andl $1, %ecx - addl 232(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 240(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 260(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 192(%esp), %ecx - adcl 196(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - adcl 200(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 212(%esp), %esi - adcl 216(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %ebp - addl 152(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 164(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 172(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 180(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 120(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - movl 52(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - adcl 136(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - andl $1, %edi - addl 72(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %edx, %esi - movl 44(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 92(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %edi - movl 828(%esp), %ebx - subl (%ebx), %eax - movl %ecx, %edx - sbbl 4(%ebx), %edx - movl %esi, %ecx - sbbl 8(%ebx), %ecx - movl 44(%esp), %esi # 4-byte Reload - sbbl 12(%ebx), %esi - movl %esi, 16(%esp) # 4-byte Spill - sbbl 16(%ebx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 20(%ebx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 24(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - sbbl 28(%ebx), %esi - movl 60(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - movl %edi, %ebx - jne .LBB130_2 -# BB#1: - movl %esi, 32(%esp) # 4-byte Spill -.LBB130_2: - testb %bl, %bl - movl 68(%esp), %esi # 4-byte Reload - jne .LBB130_4 -# BB#3: - movl %eax, %esi -.LBB130_4: - movl 816(%esp), %ebp - movl %esi, (%ebp) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB130_6 -# BB#5: - movl %edx, %eax -.LBB130_6: - movl %eax, 4(%ebp) - movl 52(%esp), %eax # 4-byte Reload - jne .LBB130_8 -# BB#7: - movl %ecx, %eax -.LBB130_8: - movl %eax, 8(%ebp) - movl 44(%esp), %eax # 4-byte Reload - jne .LBB130_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB130_10: - movl %eax, 12(%ebp) - jne .LBB130_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 40(%esp) # 4-byte Spill -.LBB130_12: - movl 40(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 36(%esp), %eax # 4-byte Reload - jne .LBB130_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload -.LBB130_14: - movl %eax, 20(%ebp) - movl 48(%esp), %eax # 4-byte Reload - jne .LBB130_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload -.LBB130_16: - movl %eax, 24(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB130_18 -# BB#17: - movl 56(%esp), %eax # 4-byte Reload -.LBB130_18: - movl %eax, 32(%ebp) - addl $796, %esp # imm = 0x31C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end130: - .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L - - .globl mcl_fp_montNF9L - .align 16, 0x90 - .type mcl_fp_montNF9L,@function -mcl_fp_montNF9L: # @mcl_fp_montNF9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $796, %esp # imm = 0x31C - calll .L131$pb -.L131$pb: - popl %ebx -.Ltmp12: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx - movl 828(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 752(%esp), %esi - movl 756(%esp), %ebp - movl %esi, %eax - imull %edi, %eax - movl 788(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 780(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 776(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 772(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 768(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 764(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 760(%esp), %edi - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 712(%esp), %esi - adcl 716(%esp), %ebp - adcl 720(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 740(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 708(%esp), %eax - addl 672(%esp), %ebp - adcl 676(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 696(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 632(%esp), %ebp - adcl 636(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 656(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 628(%esp), %eax - addl 592(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 600(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 604(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 608(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 612(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 616(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 620(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 624(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 552(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 572(%esp), %esi - movl 60(%esp), %edi # 4-byte Reload - adcl 576(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 588(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 548(%esp), %eax - movl 32(%esp), %edx # 4-byte Reload - addl 512(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 516(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 528(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 532(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 536(%esp), %ebp - movl 64(%esp), %edi # 4-byte Reload - adcl 540(%esp), %edi - movl 44(%esp), %esi # 4-byte Reload - adcl 544(%esp), %esi - adcl $0, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - movl 32(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl 40(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 496(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl 500(%esp), %edi - movl %edi, %ebp - adcl 504(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 820(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - movl 468(%esp), %eax - movl 40(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 48(%esp), %esi # 4-byte Reload - adcl 436(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 440(%esp), %edi - movl 36(%esp), %edx # 4-byte Reload - adcl 444(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 448(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 452(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 460(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 464(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 392(%esp), %ebp - adcl 396(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 412(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 424(%esp), %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 428(%esp), %esi - movl 824(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 388(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 352(%esp), %ecx - movl 56(%esp), %edx # 4-byte Reload - adcl 356(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 360(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 364(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 368(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 372(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 376(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 380(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 312(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 324(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 340(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 348(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 308(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 272(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 280(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 292(%esp), %ebp - adcl 296(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 232(%esp), %edi - movl 36(%esp), %esi # 4-byte Reload - adcl 236(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 252(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 228(%esp), %ebp - movl %esi, %ecx - addl 192(%esp), %ecx - movl 60(%esp), %esi # 4-byte Reload - adcl 196(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 208(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 828(%esp), %edx - calll .LmulPv288x32 - addl 152(%esp), %edi - adcl 156(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - movl 64(%esp), %esi # 4-byte Reload - adcl 164(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 820(%esp), %edx - calll .LmulPv288x32 - movl 148(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - adcl 116(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 120(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 132(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ebp - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 828(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 72(%esp), %edi - movl 44(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - adcl 80(%esp), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - adcl 84(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 92(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %eax, %edx - movl 828(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %ebx - movl %edi, %ecx - sbbl 8(%eax), %ecx - movl 52(%esp), %esi # 4-byte Reload - sbbl 12(%eax), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - sbbl 16(%eax), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 20(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 24(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 28(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - sbbl 32(%eax), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - sarl $31, %ebp - testl %ebp, %ebp - movl 68(%esp), %eax # 4-byte Reload - js .LBB131_2 -# BB#1: - movl %edx, %eax -.LBB131_2: - movl 816(%esp), %edx - movl %eax, (%edx) - movl 64(%esp), %esi # 4-byte Reload - js .LBB131_4 -# BB#3: - movl %ebx, %esi -.LBB131_4: - movl %esi, 4(%edx) - movl 52(%esp), %ebp # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB131_6 -# BB#5: - movl %ecx, %edi -.LBB131_6: - movl %edi, 8(%edx) - js .LBB131_8 -# BB#7: - movl 16(%esp), %ebp # 4-byte Reload -.LBB131_8: - movl %ebp, 12(%edx) - js .LBB131_10 -# BB#9: - movl 20(%esp), %eax # 4-byte Reload -.LBB131_10: - movl %eax, 16(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB131_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB131_12: - movl %eax, 20(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB131_14 -# BB#13: - movl 28(%esp), %eax # 4-byte Reload -.LBB131_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB131_16 -# BB#15: - movl 32(%esp), %eax # 4-byte Reload -.LBB131_16: - movl %eax, 28(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB131_18 -# BB#17: - movl 44(%esp), %eax # 4-byte Reload -.LBB131_18: - movl %eax, 32(%edx) - addl $796, %esp # imm = 0x31C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end131: - .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L - - .globl mcl_fp_montRed9L - .align 16, 0x90 - .type mcl_fp_montRed9L,@function -mcl_fp_montRed9L: # @mcl_fp_montRed9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $492, %esp # imm = 0x1EC - calll .L132$pb -.L132$pb: - popl %ebx -.Ltmp13: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx - movl 520(%esp), %edx - movl -4(%edx), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl (%eax), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 4(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %esi, %ecx - imull %edi, %ecx - movl 68(%eax), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%eax), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 60(%eax), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 56(%eax), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 52(%eax), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 48(%eax), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 40(%eax), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 36(%eax), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 32(%eax), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 24(%eax), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 20(%eax), %ebp - movl 16(%eax), %edi - movl 12(%eax), %esi - movl 8(%eax), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl (%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 20(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 16(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 4(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl %ecx, (%esp) - leal 448(%esp), %ecx - calll .LmulPv288x32 - movl 76(%esp), %eax # 4-byte Reload - addl 448(%esp), %eax - movl 52(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 460(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 464(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 96(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 108(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - movl 76(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 52(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - movl 56(%esp), %edx # 4-byte Reload - adcl 412(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 432(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, %ebp - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 368(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 404(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 328(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 364(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 100(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 88(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 288(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - movl 64(%esp), %eax # 4-byte Reload - addl 288(%esp), %eax - movl 68(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 520(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 248(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 264(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %edi, %esi - adcl $0, %esi - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %edi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 208(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 212(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 220(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 520(%esp), %eax - movl %eax, %edx - calll .LmulPv288x32 - addl 168(%esp), %ebp - movl 104(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 180(%esp), %ebp - movl 96(%esp), %esi # 4-byte Reload - adcl 184(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 520(%esp), %edx - calll .LmulPv288x32 - addl 128(%esp), %edi - movl 120(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl %eax, %edi - adcl 136(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl 140(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %eax, %ebx - movl 112(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - subl 20(%esp), %edi # 4-byte Folded Reload - movl 124(%esp), %eax # 4-byte Reload - sbbl 16(%esp), %eax # 4-byte Folded Reload - sbbl 24(%esp), %esi # 4-byte Folded Reload - sbbl 28(%esp), %ecx # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 72(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 92(%esp) # 4-byte Spill - movl %edx, %ebx - movl %ebp, %edx - sbbl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - sbbl $0, %edx - andl $1, %edx - jne .LBB132_2 -# BB#1: - movl %ecx, 116(%esp) # 4-byte Spill -.LBB132_2: - testb %dl, %dl - movl 120(%esp), %ecx # 4-byte Reload - jne .LBB132_4 -# BB#3: - movl %edi, %ecx -.LBB132_4: - movl 512(%esp), %edi - movl %ecx, (%edi) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB132_6 -# BB#5: - movl %eax, 124(%esp) # 4-byte Spill -.LBB132_6: - movl 124(%esp), %eax # 4-byte Reload - movl %eax, 4(%edi) - movl 96(%esp), %eax # 4-byte Reload - jne .LBB132_8 -# BB#7: - movl %esi, %eax -.LBB132_8: - movl %eax, 8(%edi) - movl 116(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - jne .LBB132_10 -# BB#9: - movl 72(%esp), %ebp # 4-byte Reload -.LBB132_10: - movl %ebp, 16(%edi) - movl 112(%esp), %ebx # 4-byte Reload - jne .LBB132_12 -# BB#11: - movl 76(%esp), %ebx # 4-byte Reload -.LBB132_12: - movl %ebx, 20(%edi) - movl 100(%esp), %esi # 4-byte Reload - jne .LBB132_14 -# BB#13: - movl 84(%esp), %esi # 4-byte Reload -.LBB132_14: - movl %esi, 24(%edi) - jne .LBB132_16 -# BB#15: - movl 92(%esp), %ecx # 4-byte Reload -.LBB132_16: - movl %ecx, 28(%edi) - jne .LBB132_18 -# BB#17: - movl 104(%esp), %eax # 4-byte Reload -.LBB132_18: - movl %eax, 32(%edi) - addl $492, %esp # imm = 0x1EC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end132: - .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L - - .globl mcl_fp_addPre9L - .align 16, 0x90 - .type mcl_fp_addPre9L,@function -mcl_fp_addPre9L: # @mcl_fp_addPre9L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl %esi, 24(%ebx) - movl %edx, 28(%ebx) - movl 32(%eax), %eax - movl 32(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 32(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end133: - .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L - - .globl mcl_fp_subPre9L - .align 16, 0x90 - .type mcl_fp_subPre9L,@function -mcl_fp_subPre9L: # @mcl_fp_subPre9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 24(%ebp) - movl %esi, 28(%ebp) - movl 32(%edx), %edx - movl 32(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 32(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end134: - .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L - - .globl mcl_fp_shr1_9L - .align 16, 0x90 - .type mcl_fp_shr1_9L,@function -mcl_fp_shr1_9L: # @mcl_fp_shr1_9L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 28(%esi) - shrl %eax - movl %eax, 32(%esi) - popl %esi - retl -.Lfunc_end135: - .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L - - .globl mcl_fp_add9L - .align 16, 0x90 - .type mcl_fp_add9L,@function -mcl_fp_add9L: # @mcl_fp_add9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $20, %esp - movl 48(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 44(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, %ebp - adcl 4(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 20(%ebx), %esi - adcl 20(%edi), %esi - movl 24(%ebx), %edx - adcl 24(%edi), %edx - movl 28(%ebx), %ecx - adcl 28(%edi), %ecx - movl 32(%ebx), %eax - adcl 32(%edi), %eax - movl 40(%esp), %edi - movl %ebp, (%edi) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%edi) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%edi) - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edi) - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%edi) - movl %esi, 20(%edi) - movl %edx, 24(%edi) - movl %ecx, 28(%edi) - movl %eax, 32(%edi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 52(%esp), %edi - subl (%edi), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - sbbl 4(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - sbbl 8(%edi), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebp # 4-byte Reload - sbbl 12(%edi), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 4(%esp), %ebp # 4-byte Reload - sbbl 16(%edi), %ebp - sbbl 20(%edi), %esi - sbbl 24(%edi), %edx - sbbl 28(%edi), %ecx - sbbl 32(%edi), %eax - sbbl $0, %ebx - testb $1, %bl - jne .LBB136_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl 40(%esp), %ebx - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl %ebp, 16(%ebx) - movl %esi, 20(%ebx) - movl %edx, 24(%ebx) - movl %ecx, 28(%ebx) - movl %eax, 32(%ebx) -.LBB136_2: # %carry - addl $20, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end136: - .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L - - .globl mcl_fp_addNF9L - .align 16, 0x90 - .type mcl_fp_addNF9L,@function -mcl_fp_addNF9L: # @mcl_fp_addNF9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edi - movl 96(%esp), %esi - addl (%esi), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 4(%esi), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 28(%eax), %ebp - movl 24(%eax), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 20(%eax), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 16(%eax), %ebx - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 16(%esi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 20(%esi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 28(%esi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 32(%esi), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 104(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - movl %eax, %ebp - subl (%esi), %ebp - movl %ebp, (%esp) # 4-byte Spill - sbbl 4(%esi), %edi - movl %edi, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 20(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - sbbl 24(%esi), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 28(%esi), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, %edx - movl %ecx, %ebp - sbbl 32(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %edx, %esi - sarl $31, %esi - testl %esi, %esi - js .LBB137_2 -# BB#1: - movl (%esp), %eax # 4-byte Reload -.LBB137_2: - movl 92(%esp), %ecx - movl %eax, (%ecx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB137_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB137_4: - movl %eax, 4(%ecx) - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - movl 48(%esp), %edx # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB137_6 -# BB#5: - movl 8(%esp), %eax # 4-byte Reload -.LBB137_6: - movl %eax, 8(%ecx) - movl %ebp, %eax - js .LBB137_8 -# BB#7: - movl 12(%esp), %edx # 4-byte Reload -.LBB137_8: - movl %edx, 12(%ecx) - movl 56(%esp), %edx # 4-byte Reload - js .LBB137_10 -# BB#9: - movl 16(%esp), %ebx # 4-byte Reload -.LBB137_10: - movl %ebx, 16(%ecx) - js .LBB137_12 -# BB#11: - movl 20(%esp), %edi # 4-byte Reload -.LBB137_12: - movl %edi, 20(%ecx) - js .LBB137_14 -# BB#13: - movl 24(%esp), %esi # 4-byte Reload -.LBB137_14: - movl %esi, 24(%ecx) - js .LBB137_16 -# BB#15: - movl 28(%esp), %edx # 4-byte Reload -.LBB137_16: - movl %edx, 28(%ecx) - js .LBB137_18 -# BB#17: - movl 32(%esp), %eax # 4-byte Reload -.LBB137_18: - movl %eax, 32(%ecx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end137: - .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L - - .globl mcl_fp_sub9L - .align 16, 0x90 - .type mcl_fp_sub9L,@function -mcl_fp_sub9L: # @mcl_fp_sub9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $28, %esp - movl 52(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 56(%esp), %edi - subl (%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 16(%esi), %edx - sbbl 16(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 20(%esi), %ecx - sbbl 20(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 28(%esi), %ebp - sbbl 28(%edi), %ebp - movl 32(%esi), %esi - sbbl 32(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 48(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl %edx, 16(%ebx) - movl %ecx, 20(%ebx) - movl %eax, 24(%ebx) - movl %ebp, 28(%ebx) - movl %esi, 32(%ebx) - je .LBB138_2 -# BB#1: # %carry - movl %esi, %edi - movl 60(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 20(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl %ecx, 24(%ebx) - movl 28(%esi), %eax - adcl %ebp, %eax - movl %eax, 28(%ebx) - movl 32(%esi), %eax - adcl %edi, %eax - movl %eax, 32(%ebx) -.LBB138_2: # %nocarry - addl $28, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end138: - .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L - - .globl mcl_fp_subNF9L - .align 16, 0x90 - .type mcl_fp_subNF9L,@function -mcl_fp_subNF9L: # @mcl_fp_subNF9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl 72(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 76(%esp), %esi - subl (%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - sbbl 4(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 28(%ecx), %edx - movl 24(%ecx), %edi - movl 20(%ecx), %ebx - movl 16(%ecx), %ebp - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - movl 76(%esp), %esi - sbbl 8(%esi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 76(%esp), %ecx - sbbl 12(%ecx), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 16(%ecx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - sbbl 20(%ecx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - sbbl 24(%ecx), %edi - movl %edi, 28(%esp) # 4-byte Spill - sbbl 28(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - sbbl 32(%ecx), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %edx, %ecx - sarl $31, %ecx - movl %ecx, %eax - shldl $1, %edx, %eax - movl 80(%esp), %ebp - movl 12(%ebp), %edx - andl %eax, %edx - movl %edx, (%esp) # 4-byte Spill - movl 4(%ebp), %edi - andl %eax, %edi - andl (%ebp), %eax - movl 32(%ebp), %edx - andl %ecx, %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 28(%ebp), %edx - andl %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - roll %ecx - movl 24(%ebp), %ebx - andl %ecx, %ebx - movl 20(%ebp), %esi - andl %ecx, %esi - movl 16(%ebp), %edx - andl %ecx, %edx - andl 8(%ebp), %ecx - addl 32(%esp), %eax # 4-byte Folded Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl 68(%esp), %ebp - movl %eax, (%ebp) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %edi, 4(%ebp) - movl (%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebp) - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %eax, 12(%ebp) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edx, 16(%ebp) - adcl 28(%esp), %ebx # 4-byte Folded Reload - movl %esi, 20(%ebp) - movl 4(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ebx, 24(%ebp) - movl %eax, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ebp) - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end139: - .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L - - .globl mcl_fpDbl_add9L - .align 16, 0x90 - .type mcl_fpDbl_add9L,@function -mcl_fpDbl_add9L: # @mcl_fpDbl_add9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $68, %esp - movl 96(%esp), %edx - movl 92(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %ecx - movl 8(%edx), %ebx - movl (%edx), %ebp - addl (%edi), %ebp - movl 88(%esp), %eax - movl %ebp, (%eax) - movl 4(%edx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%edx), %esi - adcl 16(%edx), %ecx - movl %ebp, 4(%eax) - movl 44(%edx), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl %ebx, 8(%eax) - movl 20(%edx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%edx), %ebx - movl %ecx, 16(%eax) - movl 24(%edi), %ecx - adcl %ebx, %ecx - movl 28(%edx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%edx), %ebx - movl %ecx, 24(%eax) - movl 32(%edi), %ecx - adcl %ebx, %ecx - movl 36(%edx), %ebp - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebp, %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 40(%edx), %esi - movl %ecx, 32(%eax) - movl 40(%edi), %eax - adcl %esi, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%edi), %eax - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl 48(%edi), %ebx - adcl %ecx, %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl 52(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 56(%edx), %esi - movl 56(%edi), %eax - adcl %esi, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%edx), %ebp - movl 60(%edi), %esi - adcl %ebp, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 64(%edx), %eax - movl 64(%edi), %ebp - adcl %eax, %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 68(%edx), %edx - movl 68(%edi), %eax - adcl %edx, %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 100(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - subl (%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 4(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 12(%edi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 16(%edi), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl %ebp, %eax - movl 32(%esp), %ebp # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %ebp, %ebx - sbbl 32(%edi), %ebx - sbbl $0, %edx - andl $1, %edx - jne .LBB140_2 -# BB#1: - movl %ebx, %ebp -.LBB140_2: - testb %dl, %dl - movl 60(%esp), %edx # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - movl 36(%esp), %esi # 4-byte Reload - movl 56(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - jne .LBB140_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebx # 4-byte Reload - movl 16(%esp), %edx # 4-byte Reload -.LBB140_4: - movl 88(%esp), %eax - movl %edx, 36(%eax) - movl %ebx, 40(%eax) - movl %edi, 44(%eax) - movl %esi, 48(%eax) - movl %ecx, 52(%eax) - movl 44(%esp), %edx # 4-byte Reload - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB140_6 -# BB#5: - movl 20(%esp), %ecx # 4-byte Reload -.LBB140_6: - movl %ecx, 56(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB140_8 -# BB#7: - movl 24(%esp), %edx # 4-byte Reload -.LBB140_8: - movl %edx, 60(%eax) - jne .LBB140_10 -# BB#9: - movl 28(%esp), %ecx # 4-byte Reload -.LBB140_10: - movl %ecx, 64(%eax) - movl %ebp, 68(%eax) - addl $68, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end140: - .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L - - .globl mcl_fpDbl_sub9L - .align 16, 0x90 - .type mcl_fpDbl_sub9L,@function -mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 76(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %edx - movl 80(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %edx - movl 8(%ebx), %esi - sbbl 8(%ebp), %esi - movl 72(%esp), %ecx - movl %eax, (%ecx) - movl 12(%ebx), %eax - sbbl 12(%ebp), %eax - movl %edx, 4(%ecx) - movl 16(%ebx), %edx - sbbl 16(%ebp), %edx - movl %esi, 8(%ecx) - movl 20(%ebp), %esi - movl %eax, 12(%ecx) - movl 20(%ebx), %eax - sbbl %esi, %eax - movl 24(%ebp), %esi - movl %edx, 16(%ecx) - movl 24(%ebx), %edx - sbbl %esi, %edx - movl 28(%ebp), %esi - movl %eax, 20(%ecx) - movl 28(%ebx), %eax - sbbl %esi, %eax - movl 32(%ebp), %esi - movl %edx, 24(%ecx) - movl 32(%ebx), %edx - sbbl %esi, %edx - movl 36(%ebp), %esi - movl %eax, 28(%ecx) - movl 36(%ebx), %eax - sbbl %esi, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%ebp), %eax - movl %edx, 32(%ecx) - movl 40(%ebx), %edx - sbbl %eax, %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 44(%ebp), %eax - movl 44(%ebx), %edx - sbbl %eax, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%ebp), %eax - movl 48(%ebx), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 52(%ebp), %eax - movl 52(%ebx), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 56(%ebp), %eax - movl 56(%ebx), %edx - sbbl %eax, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 60(%ebp), %eax - movl 60(%ebx), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%ebx), %edx - sbbl %eax, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%ebx), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 84(%esp), %ebp - jne .LBB141_1 -# BB#2: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB141_3 -.LBB141_1: - movl 32(%ebp), %edx - movl %edx, 12(%esp) # 4-byte Spill -.LBB141_3: - testb %al, %al - jne .LBB141_4 -# BB#5: - movl $0, 4(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB141_6 -.LBB141_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB141_6: - jne .LBB141_7 -# BB#8: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB141_9 -.LBB141_7: - movl 28(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB141_9: - jne .LBB141_10 -# BB#11: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB141_12 -.LBB141_10: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB141_12: - jne .LBB141_13 -# BB#14: - movl $0, %edi - jmp .LBB141_15 -.LBB141_13: - movl 20(%ebp), %edi -.LBB141_15: - jne .LBB141_16 -# BB#17: - movl $0, %ebx - jmp .LBB141_18 -.LBB141_16: - movl 16(%ebp), %ebx -.LBB141_18: - jne .LBB141_19 -# BB#20: - movl %ebp, %eax - movl $0, %ebp - jmp .LBB141_21 -.LBB141_19: - movl %ebp, %eax - movl 12(%eax), %ebp -.LBB141_21: - jne .LBB141_22 -# BB#23: - xorl %eax, %eax - jmp .LBB141_24 -.LBB141_22: - movl 8(%eax), %eax -.LBB141_24: - addl 24(%esp), %esi # 4-byte Folded Reload - movl 4(%esp), %edx # 4-byte Reload - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %esi, 36(%ecx) - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %edx, 40(%ecx) - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %eax, 44(%ecx) - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 48(%ecx) - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %ebx, 52(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edi, 56(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %edx, 60(%ecx) - movl %eax, 64(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%ecx) - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end141: - .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L - - .align 16, 0x90 - .type .LmulPv320x32,@function -.LmulPv320x32: # @mulPv320x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl %edx, %esi - movl 84(%esp), %edi - movl %edi, %eax - mull 36(%esi) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - mull 32(%esi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 28(%esi) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 24(%esi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 20(%esi) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 16(%esi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %edi, %eax - mull 12(%esi) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull 8(%esi) - movl %edx, %ebp - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull 4(%esi) - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl %edi, %eax - mull (%esi) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%ecx) - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 40(%ecx) - movl %ecx, %eax - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end142: - .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32 - - .globl mcl_fp_mulUnitPre10L - .align 16, 0x90 - .type mcl_fp_mulUnitPre10L,@function -mcl_fp_mulUnitPre10L: # @mcl_fp_mulUnitPre10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - calll .L143$pb -.L143$pb: - popl %ebx -.Ltmp14: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx - movl 104(%esp), %eax - movl %eax, (%esp) - leal 32(%esp), %ecx - movl 100(%esp), %edx - calll .LmulPv320x32 - movl 72(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 60(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 56(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 52(%esp), %ebx - movl 48(%esp), %ebp - movl 44(%esp), %edi - movl 40(%esp), %esi - movl 32(%esp), %edx - movl 36(%esp), %ecx - movl 96(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebp, 16(%eax) - movl %ebx, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end143: - .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L - - .globl mcl_fpDbl_mulPre10L - .align 16, 0x90 - .type mcl_fpDbl_mulPre10L,@function -mcl_fpDbl_mulPre10L: # @mcl_fpDbl_mulPre10L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $188, %esp - calll .L144$pb -.L144$pb: - popl %ebx -.Ltmp15: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx - movl %ebx, -128(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl %edi, 8(%esp) - movl 12(%ebp), %esi - movl %esi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5L@PLT - leal 20(%edi), %eax - movl %eax, 8(%esp) - leal 20(%esi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 40(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5L@PLT - movl 28(%esi), %edi - movl (%esi), %ebx - movl 4(%esi), %eax - addl 20(%esi), %ebx - movl %ebx, -148(%ebp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, -132(%ebp) # 4-byte Spill - adcl 8(%esi), %edi - movl %edi, -140(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - addl 20(%esi), %eax - movl %eax, -152(%ebp) # 4-byte Spill - adcl 24(%esi), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - movl 28(%esi), %eax - adcl 8(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl 32(%esi), %eax - adcl 12(%esi), %eax - movl 36(%esi), %ecx - adcl 16(%esi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -156(%ebp) # 4-byte Spill - movl %ebx, -124(%ebp) # 4-byte Spill - jb .LBB144_2 -# BB#1: - xorl %edi, %edi - movl $0, -124(%ebp) # 4-byte Folded Spill -.LBB144_2: - movl %edi, -136(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl %esi, %ebx - movl 36(%ebx), %esi - movl 32(%ebx), %edi - movl -96(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 12(%ebx), %edi - movl %edi, -116(%ebp) # 4-byte Spill - adcl 16(%ebx), %esi - movl %esi, -144(%ebp) # 4-byte Spill - movl %ecx, -112(%ebp) # 4-byte Spill - movl %eax, -104(%ebp) # 4-byte Spill - movl -160(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) # 4-byte Spill - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -96(%ebp) # 4-byte Spill - movl -152(%ebp), %ebx # 4-byte Reload - movl %ebx, -100(%ebp) # 4-byte Spill - jb .LBB144_4 -# BB#3: - movl $0, -112(%ebp) # 4-byte Folded Spill - movl $0, -104(%ebp) # 4-byte Folded Spill - movl $0, -108(%ebp) # 4-byte Folded Spill - movl $0, -96(%ebp) # 4-byte Folded Spill - movl $0, -100(%ebp) # 4-byte Folded Spill -.LBB144_4: - movl -148(%ebp), %esi # 4-byte Reload - movl %esi, -72(%ebp) - movl -132(%ebp), %edi # 4-byte Reload - movl %edi, -68(%ebp) - movl -140(%ebp), %esi # 4-byte Reload - movl %esi, -64(%ebp) - movl %ebx, -92(%ebp) - movl -120(%ebp), %esi # 4-byte Reload - movl %esi, -88(%ebp) - movl %edx, -84(%ebp) - movl %eax, -80(%ebp) - movl %ecx, -76(%ebp) - sbbl %edx, %edx - movl -116(%ebp), %eax # 4-byte Reload - movl %eax, -60(%ebp) - movl -144(%ebp), %ebx # 4-byte Reload - movl %ebx, -56(%ebp) - movl -156(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB144_6 -# BB#5: - movl $0, %ebx - movl $0, %eax - movl $0, %edi -.LBB144_6: - movl %eax, -116(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -92(%ebp), %ecx - movl %ecx, 8(%esp) - leal -72(%ebp), %ecx - movl %ecx, 4(%esp) - leal -52(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -124(%ebp), %eax # 4-byte Reload - addl %eax, -100(%ebp) # 4-byte Folded Spill - adcl %edi, -96(%ebp) # 4-byte Folded Spill - movl -108(%ebp), %esi # 4-byte Reload - adcl -136(%ebp), %esi # 4-byte Folded Reload - movl -116(%ebp), %eax # 4-byte Reload - adcl %eax, -104(%ebp) # 4-byte Folded Spill - movl -112(%ebp), %edi # 4-byte Reload - adcl %ebx, %edi - sbbl %eax, %eax - andl $1, %eax - movl %eax, -120(%ebp) # 4-byte Spill - andl $1, %edx - movl %edx, -116(%ebp) # 4-byte Spill - movl -128(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre5L@PLT - movl -100(%ebp), %eax # 4-byte Reload - addl -32(%ebp), %eax - movl %eax, -100(%ebp) # 4-byte Spill - movl -96(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -96(%ebp) # 4-byte Spill - adcl -24(%ebp), %esi - movl %esi, -108(%ebp) # 4-byte Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -104(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl -120(%ebp), %eax # 4-byte Reload - adcl %eax, -116(%ebp) # 4-byte Folded Spill - movl -52(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl -48(%ebp), %ebx - sbbl 4(%esi), %ebx - movl -44(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -40(%ebp), %edx - sbbl 12(%esi), %edx - movl -36(%ebp), %edi - sbbl 16(%esi), %edi - movl 20(%esi), %eax - movl %eax, -124(%ebp) # 4-byte Spill - sbbl %eax, -100(%ebp) # 4-byte Folded Spill - movl 24(%esi), %eax - movl %eax, -128(%ebp) # 4-byte Spill - sbbl %eax, -96(%ebp) # 4-byte Folded Spill - movl 28(%esi), %eax - movl %eax, -132(%ebp) # 4-byte Spill - sbbl %eax, -108(%ebp) # 4-byte Folded Spill - movl 32(%esi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - sbbl %eax, -104(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - sbbl %eax, -112(%ebp) # 4-byte Folded Spill - sbbl $0, -116(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - subl %eax, %ecx - movl 44(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 48(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - sbbl %eax, -120(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 56(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 60(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, -100(%ebp) # 4-byte Folded Spill - movl 64(%esi), %eax - movl %eax, -144(%ebp) # 4-byte Spill - sbbl %eax, -96(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -148(%ebp) # 4-byte Spill - sbbl %eax, -108(%ebp) # 4-byte Folded Spill - movl 72(%esi), %eax - movl %eax, -152(%ebp) # 4-byte Spill - sbbl %eax, -104(%ebp) # 4-byte Folded Spill - movl 76(%esi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - sbbl %eax, -112(%ebp) # 4-byte Folded Spill - sbbl $0, -116(%ebp) # 4-byte Folded Spill - addl -124(%ebp), %ecx # 4-byte Folded Reload - adcl -128(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 20(%esi) - movl -120(%ebp), %eax # 4-byte Reload - adcl -132(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 24(%esi) - adcl -136(%ebp), %edx # 4-byte Folded Reload - movl %eax, 28(%esi) - adcl -140(%ebp), %edi # 4-byte Folded Reload - movl %edx, 32(%esi) - movl -100(%ebp), %eax # 4-byte Reload - adcl -160(%ebp), %eax # 4-byte Folded Reload - movl %edi, 36(%esi) - movl -96(%ebp), %ecx # 4-byte Reload - adcl -164(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - movl -108(%ebp), %eax # 4-byte Reload - adcl -168(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl -104(%ebp), %ecx # 4-byte Reload - adcl -172(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -112(%ebp), %edx # 4-byte Reload - adcl -176(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -116(%ebp), %eax # 4-byte Reload - adcl -180(%ebp), %eax # 4-byte Folded Reload - movl %edx, 56(%esi) - movl %eax, 60(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 64(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%esi) - movl -152(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 72(%esi) - movl -156(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 76(%esi) - addl $188, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end144: - .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L - - .globl mcl_fpDbl_sqrPre10L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre10L,@function -mcl_fpDbl_sqrPre10L: # @mcl_fpDbl_sqrPre10L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $188, %esp - calll .L145$pb -.L145$pb: - popl %ebx -.Ltmp16: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx - movl %ebx, -120(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre5L@PLT - leal 20(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 40(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre5L@PLT - movl 36(%edi), %eax - movl 32(%edi), %ebx - movl 28(%edi), %esi - movl (%edi), %ecx - movl 4(%edi), %edx - addl 20(%edi), %ecx - adcl 24(%edi), %edx - adcl 8(%edi), %esi - adcl 12(%edi), %ebx - movl %ebx, -124(%ebp) # 4-byte Spill - adcl 16(%edi), %eax - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -128(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -108(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -104(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -100(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -96(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - sbbl %ebx, %ebx - movl %ebx, -116(%ebp) # 4-byte Spill - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_1 -# BB#2: - movl $0, -112(%ebp) # 4-byte Folded Spill - jmp .LBB145_3 -.LBB145_1: - leal (%ecx,%ecx), %edi - movl %edi, -112(%ebp) # 4-byte Spill -.LBB145_3: - movl -96(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - movl -124(%ebp), %edi # 4-byte Reload - jb .LBB145_4 -# BB#5: - movl $0, -96(%ebp) # 4-byte Folded Spill - jmp .LBB145_6 -.LBB145_4: - movl %edx, %ebx - shldl $1, %ecx, %ebx - movl %ebx, -96(%ebp) # 4-byte Spill -.LBB145_6: - movl -100(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_7 -# BB#8: - movl $0, -100(%ebp) # 4-byte Folded Spill - jmp .LBB145_9 -.LBB145_7: - movl %esi, %ebx - shldl $1, %edx, %ebx - movl %ebx, -100(%ebp) # 4-byte Spill -.LBB145_9: - movl -104(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_10 -# BB#11: - movl $0, -104(%ebp) # 4-byte Folded Spill - jmp .LBB145_12 -.LBB145_10: - movl %edi, %ebx - shldl $1, %esi, %ebx - movl %ebx, -104(%ebp) # 4-byte Spill -.LBB145_12: - movl -108(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_13 -# BB#14: - movl $0, -108(%ebp) # 4-byte Folded Spill - jmp .LBB145_15 -.LBB145_13: - movl %eax, %ebx - shldl $1, %edi, %ebx - movl %ebx, -108(%ebp) # 4-byte Spill -.LBB145_15: - movl %ecx, -72(%ebp) - movl %edx, -68(%ebp) - movl %esi, -64(%ebp) - movl %edi, -60(%ebp) - movl %eax, -56(%ebp) - movl %ecx, -92(%ebp) - movl %edx, -88(%ebp) - movl %esi, -84(%ebp) - movl %edi, -80(%ebp) - movl %eax, -76(%ebp) - movl -128(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB145_16 -# BB#17: - movl $0, -124(%ebp) # 4-byte Folded Spill - jmp .LBB145_18 -.LBB145_16: - shrl $31, %eax - movl %eax, -124(%ebp) # 4-byte Spill -.LBB145_18: - leal -52(%ebp), %eax - movl %eax, (%esp) - leal -72(%ebp), %eax - movl %eax, 4(%esp) - leal -92(%ebp), %eax - movl %eax, 8(%esp) - movl -116(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -120(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre5L@PLT - movl -112(%ebp), %edi # 4-byte Reload - addl -32(%ebp), %edi - movl -96(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -96(%ebp) # 4-byte Spill - movl -100(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -100(%ebp) # 4-byte Spill - movl -104(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -104(%ebp) # 4-byte Spill - movl -108(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -108(%ebp) # 4-byte Spill - adcl -124(%ebp), %esi # 4-byte Folded Reload - movl -52(%ebp), %edx - movl 8(%ebp), %eax - subl (%eax), %edx - movl -48(%ebp), %ebx - sbbl 4(%eax), %ebx - movl -44(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -116(%ebp) # 4-byte Spill - movl -40(%ebp), %ecx - sbbl 12(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -36(%ebp), %ecx - sbbl 16(%eax), %ecx - movl %ecx, -120(%ebp) # 4-byte Spill - movl 20(%eax), %ecx - movl %ecx, -124(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 28(%eax), %ecx - movl %ecx, -132(%ebp) # 4-byte Spill - sbbl %ecx, -100(%ebp) # 4-byte Folded Spill - movl 32(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - sbbl %ecx, -104(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - sbbl %ecx, -108(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 40(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - subl %ecx, %edx - movl 44(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 48(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 52(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -144(%ebp), %edi # 4-byte Reload - sbbl %ecx, %edi - movl 56(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 64(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - sbbl %ecx, -96(%ebp) # 4-byte Folded Spill - movl 68(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, -100(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, -104(%ebp) # 4-byte Folded Spill - movl 76(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -108(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -124(%ebp), %edx # 4-byte Folded Reload - adcl -128(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 20(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -132(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 24(%eax) - adcl -136(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 28(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -140(%ebp), %edx # 4-byte Folded Reload - movl %edi, 32(%eax) - movl -112(%ebp), %ecx # 4-byte Reload - adcl -160(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -96(%ebp), %edx # 4-byte Reload - adcl -164(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 40(%eax) - movl -100(%ebp), %ecx # 4-byte Reload - adcl -168(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 44(%eax) - movl -104(%ebp), %edx # 4-byte Reload - adcl -172(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 48(%eax) - movl -108(%ebp), %ecx # 4-byte Reload - adcl -176(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 52(%eax) - adcl -180(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 56(%eax) - movl %esi, 60(%eax) - movl -144(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 64(%eax) - movl -148(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 68(%eax) - movl -152(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 72(%eax) - movl -156(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - addl $188, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end145: - .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L - - .globl mcl_fp_mont10L - .align 16, 0x90 - .type mcl_fp_mont10L,@function -mcl_fp_mont10L: # @mcl_fp_mont10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1036, %esp # imm = 0x40C - calll .L146$pb -.L146$pb: - popl %ebx -.Ltmp17: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx - movl 1068(%esp), %eax - movl -4(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 992(%esp), %edi - movl 996(%esp), %ebp - movl %edi, %eax - imull %esi, %eax - movl 1032(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1028(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1024(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1020(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1016(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1012(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1008(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1004(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1000(%esp), %esi - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - addl 944(%esp), %edi - adcl 948(%esp), %ebp - adcl 952(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 1064(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 896(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - addl 896(%esp), %ebp - adcl 900(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 936(%esp), %edi - sbbl %eax, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - movl 64(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 848(%esp), %ebp - adcl 852(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 856(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 864(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 868(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 876(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 880(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %edi - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 800(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - addl 800(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 832(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 836(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1068(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 752(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 780(%esp), %esi - movl 76(%esp), %edi # 4-byte Reload - adcl 784(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 704(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 716(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 728(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 732(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 736(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 744(%esp), %edi - sbbl %esi, %esi - movl %ecx, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 656(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %esi - movl %esi, %ecx - movl 44(%esp), %eax # 4-byte Reload - addl 656(%esp), %eax - movl 40(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 696(%esp), %edi - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 608(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 624(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 636(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 640(%esp), %esi - adcl 644(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 648(%esp), %edi - sbbl %ebp, %ebp - movl %ecx, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - movl %ebp, %ecx - movl 40(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl 36(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 572(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 592(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 600(%esp), %edi - adcl $0, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 512(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 520(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - movl %ebp, %eax - addl 464(%esp), %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 472(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 484(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 44(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 504(%esp), %edi - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1060(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 416(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 432(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 444(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 452(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 368(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 380(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 384(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 400(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 320(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 328(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 332(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %edi - movl %edi, %ecx - addl 272(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 276(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 312(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl $0, %ebp - movl 1064(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl %edi, %ecx - addl 224(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 236(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 240(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 264(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %ebp - addl 176(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %edi, %esi - adcl 192(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 196(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ebp - movl 1064(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 1060(%esp), %edx - calll .LmulPv320x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 128(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 140(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - adcl 144(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - sbbl %esi, %esi - movl 32(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 80(%esp), %ecx - movl 1068(%esp), %edx - calll .LmulPv320x32 - andl $1, %esi - addl 80(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - movl 64(%esp), %ebx # 4-byte Reload - adcl 84(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 92(%esp), %ebx - movl 52(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %edx, %edi - movl 36(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 120(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl $0, %esi - movl 1068(%esp), %edx - subl (%edx), %eax - sbbl 4(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 8(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 12(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - sbbl 20(%edx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - sbbl 24(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl 68(%esp), %edi # 4-byte Reload - sbbl 32(%edx), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl 36(%edx), %ebp - movl %ebp, %edx - sbbl $0, %esi - andl $1, %esi - jne .LBB146_2 -# BB#1: - movl %ecx, 48(%esp) # 4-byte Spill -.LBB146_2: - movl %esi, %ecx - testb %cl, %cl - movl 76(%esp), %esi # 4-byte Reload - jne .LBB146_4 -# BB#3: - movl %eax, %esi -.LBB146_4: - movl 1056(%esp), %eax - movl %esi, (%eax) - movl 60(%esp), %edi # 4-byte Reload - jne .LBB146_6 -# BB#5: - movl 16(%esp), %edi # 4-byte Reload -.LBB146_6: - movl %edi, 4(%eax) - jne .LBB146_8 -# BB#7: - movl 20(%esp), %ebx # 4-byte Reload -.LBB146_8: - movl %ebx, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB146_10 -# BB#9: - movl 24(%esp), %ebp # 4-byte Reload -.LBB146_10: - movl %ebp, 12(%eax) - jne .LBB146_12 -# BB#11: - movl 28(%esp), %ecx # 4-byte Reload -.LBB146_12: - movl %ecx, 16(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB146_14 -# BB#13: - movl 32(%esp), %ecx # 4-byte Reload -.LBB146_14: - movl %ecx, 20(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB146_16 -# BB#15: - movl 56(%esp), %ecx # 4-byte Reload -.LBB146_16: - movl %ecx, 24(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB146_18 -# BB#17: - movl 64(%esp), %ecx # 4-byte Reload -.LBB146_18: - movl %ecx, 32(%eax) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB146_20 -# BB#19: - movl %edx, %ecx -.LBB146_20: - movl %ecx, 36(%eax) - addl $1036, %esp # imm = 0x40C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end146: - .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L - - .globl mcl_fp_montNF10L - .align 16, 0x90 - .type mcl_fp_montNF10L,@function -mcl_fp_montNF10L: # @mcl_fp_montNF10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1020, %esp # imm = 0x3FC - calll .L147$pb -.L147$pb: - popl %ebx -.Ltmp18: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx - movl 1052(%esp), %eax - movl -4(%eax), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 976(%esp), %edi - movl 980(%esp), %esi - movl %edi, %eax - imull %ebp, %eax - movl 1016(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1012(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1008(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1004(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 996(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 992(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 988(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 984(%esp), %ebp - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 928(%esp), %edi - adcl 932(%esp), %esi - adcl 936(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 952(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 920(%esp), %ecx - addl 880(%esp), %esi - adcl 884(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl %esi, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 832(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 832(%esp), %esi - adcl 836(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 848(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - movl 1048(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 824(%esp), %ecx - addl 784(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 796(%esp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 820(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %ebp, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 736(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 736(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 760(%esp), %edi - movl 56(%esp), %ebp # 4-byte Reload - adcl 764(%esp), %ebp - movl 60(%esp), %esi # 4-byte Reload - adcl 768(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1044(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 728(%esp), %eax - movl 28(%esp), %edx # 4-byte Reload - addl 688(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 708(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 712(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 716(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 720(%esp), %ebp - movl 32(%esp), %esi # 4-byte Reload - adcl 724(%esp), %esi - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1052(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - addl 640(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 656(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 672(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 676(%esp), %esi - movl %esi, %ebp - movl 28(%esp), %esi # 4-byte Reload - adcl 680(%esp), %esi - movl 1048(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 632(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 24(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 604(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 624(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 628(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 544(%esp), %esi - movl 24(%esp), %edi # 4-byte Reload - adcl 548(%esp), %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 552(%esp), %esi - movl 36(%esp), %ebp # 4-byte Reload - adcl 556(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 536(%esp), %edx - addl 496(%esp), %edi - adcl 500(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - adcl 504(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %esi # 4-byte Reload - adcl 528(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 24(%esp) # 4-byte Spill - movl %edi, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 448(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 464(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 480(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - adcl 488(%esp), %esi - movl 1048(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 400(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 440(%esp), %eax - movl 40(%esp), %ecx # 4-byte Reload - addl 400(%esp), %ecx - adcl 404(%esp), %ebp - movl 48(%esp), %edx # 4-byte Reload - adcl 408(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 412(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 416(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - adcl 420(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 424(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 428(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 432(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl 436(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 352(%esp), %esi - adcl 356(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %esi - adcl 368(%esp), %esi - movl 52(%esp), %edi # 4-byte Reload - adcl 372(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1044(%esp), %eax - movl %eax, %edx - calll .LmulPv320x32 - movl 344(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 304(%esp), %ecx - adcl 308(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 316(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 320(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 324(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 24(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 256(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 272(%esp), %edi - adcl 276(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 248(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 208(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 220(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 236(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 160(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 176(%esp), %edi - movl 28(%esp), %esi # 4-byte Reload - adcl 180(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 192(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 1044(%esp), %edx - calll .LmulPv320x32 - movl 152(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 112(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 120(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 124(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 140(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 144(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 1052(%esp), %edx - calll .LmulPv320x32 - addl 64(%esp), %ebp - movl %edi, %ebp - movl 60(%esp), %eax # 4-byte Reload - movl 32(%esp), %ebx # 4-byte Reload - adcl 68(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 76(%esp), %ebx - adcl 80(%esp), %ebp - movl 44(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 96(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %eax, %edx - movl 1052(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %ecx - movl %ebx, %eax - sbbl 8(%edi), %eax - movl %ebp, %esi - sbbl 12(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 16(%edi), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 24(%esp), %esi # 4-byte Reload - sbbl 20(%edi), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - sbbl 24(%edi), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 28(%edi), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 32(%edi), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 36(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %esi, %edi - sarl $31, %edi - testl %edi, %edi - movl 60(%esp), %edi # 4-byte Reload - js .LBB147_2 -# BB#1: - movl %edx, %edi -.LBB147_2: - movl 1040(%esp), %edx - movl %edi, (%edx) - movl 52(%esp), %edi # 4-byte Reload - js .LBB147_4 -# BB#3: - movl %ecx, %edi -.LBB147_4: - movl %edi, 4(%edx) - js .LBB147_6 -# BB#5: - movl %eax, %ebx -.LBB147_6: - movl %ebx, 8(%edx) - js .LBB147_8 -# BB#7: - movl 4(%esp), %ebp # 4-byte Reload -.LBB147_8: - movl %ebp, 12(%edx) - movl 44(%esp), %esi # 4-byte Reload - movl 24(%esp), %eax # 4-byte Reload - js .LBB147_10 -# BB#9: - movl 8(%esp), %esi # 4-byte Reload -.LBB147_10: - movl %esi, 16(%edx) - js .LBB147_12 -# BB#11: - movl 12(%esp), %eax # 4-byte Reload -.LBB147_12: - movl %eax, 20(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB147_14 -# BB#13: - movl 16(%esp), %eax # 4-byte Reload -.LBB147_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB147_16 -# BB#15: - movl 20(%esp), %eax # 4-byte Reload -.LBB147_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB147_18 -# BB#17: - movl 28(%esp), %eax # 4-byte Reload -.LBB147_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB147_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB147_20: - movl %eax, 36(%edx) - addl $1020, %esp # imm = 0x3FC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end147: - .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L - - .globl mcl_fp_montRed10L - .align 16, 0x90 - .type mcl_fp_montRed10L,@function -mcl_fp_montRed10L: # @mcl_fp_montRed10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $604, %esp # imm = 0x25C - calll .L148$pb -.L148$pb: - popl %eax -.Ltmp19: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 632(%esp), %edx - movl -4(%edx), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 628(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 68(%esp) # 4-byte Spill - imull %esi, %ebx - movl 76(%ecx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 44(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 36(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 28(%ecx), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 24(%ecx), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %edi - movl 12(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 560(%esp), %ecx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 56(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl 68(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - adcl 568(%esp), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 576(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 580(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 512(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 68(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 512(%esp), %esi - movl 4(%esp), %edx # 4-byte Reload - adcl 516(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 528(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 532(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 536(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 540(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 544(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 548(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 552(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 464(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 464(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 492(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 52(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 440(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - movl 60(%esp), %edi # 4-byte Reload - imull %edi, %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 632(%esp), %eax - movl %eax, %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 368(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 404(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull %edi, %eax - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 320(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 352(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 360(%esp), %esi - adcl $0, 88(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 272(%esp), %ebp - movl 96(%esp), %ecx # 4-byte Reload - adcl 276(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 288(%esp), %ebp - adcl 292(%esp), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 296(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 308(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 312(%esp), %esi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, 68(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - movl 96(%esp), %eax # 4-byte Reload - addl 224(%esp), %eax - movl 100(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl 92(%esp), %ecx # 4-byte Reload - adcl 232(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 236(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 240(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl 244(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 256(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 260(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 264(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl 68(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %eax, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 176(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl 112(%esp), %edi # 4-byte Reload - adcl 184(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 196(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 632(%esp), %edx - movl 64(%esp), %ebx # 4-byte Reload - calll .LmulPv320x32 - addl 128(%esp), %esi - movl %edi, %eax - adcl 132(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %eax, %edi - movl 124(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 140(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - adcl 144(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl %ebp, %edx - movl 120(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - adcl 164(%esp), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - movl 120(%esp), %eax # 4-byte Reload - sbbl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl %ebp, %eax - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 96(%esp) # 4-byte Spill - sbbl $0, %eax - andl $1, %eax - jne .LBB148_2 -# BB#1: - movl %edx, 80(%esp) # 4-byte Spill -.LBB148_2: - testb %al, %al - movl 112(%esp), %edx # 4-byte Reload - jne .LBB148_4 -# BB#3: - movl %edi, %edx -.LBB148_4: - movl 624(%esp), %edi - movl %edx, (%edi) - movl 108(%esp), %edx # 4-byte Reload - jne .LBB148_6 -# BB#5: - movl %ecx, 124(%esp) # 4-byte Spill -.LBB148_6: - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%edi) - movl 116(%esp), %ecx # 4-byte Reload - jne .LBB148_8 -# BB#7: - movl %esi, %ecx -.LBB148_8: - movl %ecx, 8(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 76(%esp), %ecx # 4-byte Reload - movl 120(%esp), %eax # 4-byte Reload - jne .LBB148_10 -# BB#9: - movl 64(%esp), %eax # 4-byte Reload -.LBB148_10: - movl %eax, 16(%edi) - movl 84(%esp), %eax # 4-byte Reload - movl 104(%esp), %ebp # 4-byte Reload - jne .LBB148_12 -# BB#11: - movl 68(%esp), %ebp # 4-byte Reload -.LBB148_12: - movl %ebp, 20(%edi) - movl 88(%esp), %ebx # 4-byte Reload - jne .LBB148_14 -# BB#13: - movl 72(%esp), %ebx # 4-byte Reload -.LBB148_14: - movl %ebx, 24(%edi) - jne .LBB148_16 -# BB#15: - movl 92(%esp), %edx # 4-byte Reload -.LBB148_16: - movl %edx, 28(%edi) - jne .LBB148_18 -# BB#17: - movl 100(%esp), %ecx # 4-byte Reload -.LBB148_18: - movl %ecx, 32(%edi) - jne .LBB148_20 -# BB#19: - movl 96(%esp), %eax # 4-byte Reload -.LBB148_20: - movl %eax, 36(%edi) - addl $604, %esp # imm = 0x25C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end148: - .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L - - .globl mcl_fp_addPre10L - .align 16, 0x90 - .type mcl_fp_addPre10L,@function -mcl_fp_addPre10L: # @mcl_fp_addPre10L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl %edx, 28(%ebx) - movl %esi, 32(%ebx) - movl 36(%eax), %eax - movl 36(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 36(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end149: - .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L - - .globl mcl_fp_subPre10L - .align 16, 0x90 - .type mcl_fp_subPre10L,@function -mcl_fp_subPre10L: # @mcl_fp_subPre10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 28(%ebp) - movl %edi, 32(%ebp) - movl 36(%edx), %edx - movl 36(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 36(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end150: - .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L - - .globl mcl_fp_shr1_10L - .align 16, 0x90 - .type mcl_fp_shr1_10L,@function -mcl_fp_shr1_10L: # @mcl_fp_shr1_10L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 28(%esi) - movl 36(%eax), %eax - shrdl $1, %eax, %ecx - movl %ecx, 32(%esi) - shrl %eax - movl %eax, 36(%esi) - popl %esi - retl -.Lfunc_end151: - .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L - - .globl mcl_fp_add10L - .align 16, 0x90 - .type mcl_fp_add10L,@function -mcl_fp_add10L: # @mcl_fp_add10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $24, %esp - movl 52(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 48(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - adcl 4(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 20(%ebx), %eax - adcl 20(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%ebx), %esi - adcl 24(%edi), %esi - movl 28(%ebx), %ebp - adcl 28(%edi), %ebp - movl 32(%ebx), %edx - adcl 32(%edi), %edx - movl 36(%ebx), %ecx - adcl 36(%edi), %ecx - movl 44(%esp), %edi - movl (%esp), %ebx # 4-byte Reload - movl %ebx, (%edi) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 4(%edi) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 8(%edi) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 12(%edi) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 16(%edi) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 20(%edi) - movl %esi, 24(%edi) - movl %ebp, 28(%edi) - movl %edx, 32(%edi) - movl %ecx, 36(%edi) - sbbl %eax, %eax - andl $1, %eax - movl 56(%esp), %edi - subl (%edi), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - sbbl 4(%edi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebx # 4-byte Reload - sbbl 8(%edi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - sbbl 12(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebx # 4-byte Reload - sbbl 16(%edi), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 4(%esp), %ebx # 4-byte Reload - sbbl 20(%edi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - sbbl 28(%edi), %ebp - sbbl 32(%edi), %edx - sbbl 36(%edi), %ecx - sbbl $0, %eax - testb $1, %al - jne .LBB152_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl 44(%esp), %ebx - movl %edi, (%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 4(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebx) - movl %esi, 24(%ebx) - movl %ebp, 28(%ebx) - movl %edx, 32(%ebx) - movl %ecx, 36(%ebx) -.LBB152_2: # %carry - addl $24, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end152: - .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L - - .globl mcl_fp_addNF10L - .align 16, 0x90 - .type mcl_fp_addNF10L,@function -mcl_fp_addNF10L: # @mcl_fp_addNF10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %esi - movl 96(%esp), %edx - addl (%edx), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 4(%edx), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 36(%ecx), %edi - movl 32(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %ebx - movl 12(%ecx), %eax - movl 8(%ecx), %esi - adcl 8(%edx), %esi - adcl 12(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 16(%edx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - adcl 20(%edx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 24(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 28(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 32(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl %esi, %ecx - adcl 36(%edx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 104(%esp), %edi - movl 52(%esp), %edx # 4-byte Reload - subl (%edi), %edx - movl 56(%esp), %esi # 4-byte Reload - sbbl 4(%edi), %esi - movl %esi, (%esp) # 4-byte Spill - movl %ecx, %esi - sbbl 8(%edi), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 20(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - movl %esi, %eax - movl %esi, %ebp - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - movl %esi, %eax - movl %esi, %ebx - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - movl %eax, %esi - sbbl 36(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %esi, %edi - movl 52(%esp), %esi # 4-byte Reload - sarl $31, %edi - testl %edi, %edi - js .LBB153_2 -# BB#1: - movl %edx, %esi -.LBB153_2: - movl 92(%esp), %edx - movl %esi, (%edx) - movl 56(%esp), %esi # 4-byte Reload - js .LBB153_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload -.LBB153_4: - movl %esi, 4(%edx) - movl %ebp, %edi - movl 40(%esp), %esi # 4-byte Reload - js .LBB153_6 -# BB#5: - movl 4(%esp), %ecx # 4-byte Reload -.LBB153_6: - movl %ecx, 8(%edx) - movl %ebx, %ecx - movl 44(%esp), %ebp # 4-byte Reload - js .LBB153_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB153_8: - movl %esi, 12(%edx) - movl 68(%esp), %esi # 4-byte Reload - movl 48(%esp), %ebx # 4-byte Reload - js .LBB153_10 -# BB#9: - movl 12(%esp), %ebp # 4-byte Reload -.LBB153_10: - movl %ebp, 16(%edx) - js .LBB153_12 -# BB#11: - movl 16(%esp), %ebx # 4-byte Reload -.LBB153_12: - movl %ebx, 20(%edx) - js .LBB153_14 -# BB#13: - movl 20(%esp), %edi # 4-byte Reload -.LBB153_14: - movl %edi, 24(%edx) - js .LBB153_16 -# BB#15: - movl 24(%esp), %esi # 4-byte Reload -.LBB153_16: - movl %esi, 28(%edx) - js .LBB153_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload -.LBB153_18: - movl %ecx, 32(%edx) - js .LBB153_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB153_20: - movl %eax, 36(%edx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end153: - .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L - - .globl mcl_fp_sub10L - .align 16, 0x90 - .type mcl_fp_sub10L,@function -mcl_fp_sub10L: # @mcl_fp_sub10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 56(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 60(%esp), %edi - subl (%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 20(%esi), %edx - sbbl 20(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 24(%esi), %ecx - sbbl 24(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 32(%esi), %ebp - sbbl 32(%edi), %ebp - movl 36(%esi), %esi - sbbl 36(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 52(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl %edx, 20(%ebx) - movl %ecx, 24(%ebx) - movl %eax, 28(%ebx) - movl %ebp, 32(%ebx) - movl %esi, 36(%ebx) - je .LBB154_2 -# BB#1: # %carry - movl %esi, %edi - movl 64(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 24(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl %eax, 28(%ebx) - movl 32(%esi), %eax - adcl %ebp, %eax - movl %eax, 32(%ebx) - movl 36(%esi), %eax - adcl %edi, %eax - movl %eax, 36(%ebx) -.LBB154_2: # %nocarry - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end154: - .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L - - .globl mcl_fp_subNF10L - .align 16, 0x90 - .type mcl_fp_subNF10L,@function -mcl_fp_subNF10L: # @mcl_fp_subNF10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %eax - movl 36(%eax), %esi - movl (%eax), %edi - movl 4(%eax), %edx - movl 84(%esp), %ecx - subl (%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl 4(%ecx), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 32(%eax), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 28(%eax), %edi - movl 24(%eax), %ebx - movl 20(%eax), %ebp - movl 16(%eax), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl 8(%eax), %eax - sbbl 8(%ecx), %eax - movl %eax, 16(%esp) # 4-byte Spill - sbbl 12(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 16(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl 20(%ecx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - sbbl 24(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - sbbl 28(%ecx), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 32(%ecx), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl 36(%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl %esi, %eax - sarl $31, %eax - movl %eax, %edx - addl %edx, %edx - movl %eax, %ecx - adcl %ecx, %ecx - movl %esi, %ebx - shrl $31, %ebx - orl %edx, %ebx - movl 88(%esp), %edi - movl 20(%edi), %edx - andl %ecx, %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 12(%edi), %edx - andl %ecx, %edx - movl %edx, 4(%esp) # 4-byte Spill - andl 4(%edi), %ecx - movl 16(%edi), %edx - andl %ebx, %edx - movl %edx, (%esp) # 4-byte Spill - movl 8(%edi), %edx - andl %ebx, %edx - andl (%edi), %ebx - movl 36(%edi), %esi - andl %eax, %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 32(%edi), %ebp - andl %eax, %ebp - movl 28(%edi), %esi - andl %eax, %esi - andl 24(%edi), %eax - addl 36(%esp), %ebx # 4-byte Folded Reload - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %edi - movl %ebx, (%edi) - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %ecx, 4(%edi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %edx, 8(%edi) - movl (%esp), %edx # 4-byte Reload - adcl 52(%esp), %edx # 4-byte Folded Reload - movl %ecx, 12(%edi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %edx, 16(%edi) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 20(%edi) - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %eax, 24(%edi) - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %esi, 28(%edi) - movl %ebp, 32(%edi) - movl 8(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%edi) - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end155: - .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L - - .globl mcl_fpDbl_add10L - .align 16, 0x90 - .type mcl_fpDbl_add10L,@function -mcl_fpDbl_add10L: # @mcl_fpDbl_add10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 100(%esp), %edx - movl 96(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %ecx - movl 8(%edx), %ebx - movl (%edx), %ebp - addl (%edi), %ebp - movl 92(%esp), %eax - movl %ebp, (%eax) - movl 4(%edx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%edx), %esi - adcl 16(%edx), %ecx - movl %ebp, 4(%eax) - movl 48(%edx), %ebp - movl %ebx, 8(%eax) - movl 20(%edx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%edx), %ebx - movl %ecx, 16(%eax) - movl 24(%edi), %ecx - adcl %ebx, %ecx - movl 28(%edx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%edx), %ebx - movl %ecx, 24(%eax) - movl 32(%edi), %ecx - adcl %ebx, %ecx - movl 36(%edx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%edx), %ebx - movl %ecx, 32(%eax) - movl 40(%edi), %ecx - adcl %ebx, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 44(%edx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %eax - adcl %ebx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 48(%edi), %eax - adcl %ebp, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl 52(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 56(%edx), %eax - movl 56(%edi), %ecx - adcl %eax, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 60(%edx), %eax - movl 60(%edi), %ecx - adcl %eax, %ecx - movl 64(%edx), %esi - movl 64(%edi), %eax - adcl %esi, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 68(%edx), %ebx - movl 68(%edi), %esi - adcl %ebx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 72(%edx), %ebx - movl 72(%edi), %ebp - adcl %ebx, %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 76(%edx), %edx - movl 76(%edi), %edi - adcl %edx, %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl %edx, %edx - andl $1, %edx - movl 104(%esp), %ebx - movl 64(%esp), %edi # 4-byte Reload - subl (%ebx), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 4(%ebx), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - sbbl 8(%ebx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 12(%ebx), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebx), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %ecx, %edi - sbbl 20(%ebx), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 24(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill - sbbl 28(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl %ebp, %eax - movl 36(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %edi - sbbl 36(%ebx), %edi - sbbl $0, %edx - andl $1, %edx - jne .LBB156_2 -# BB#1: - movl %edi, %ebp -.LBB156_2: - testb %dl, %dl - movl 64(%esp), %edx # 4-byte Reload - movl 60(%esp), %esi # 4-byte Reload - movl 56(%esp), %edi # 4-byte Reload - movl 52(%esp), %ebx # 4-byte Reload - jne .LBB156_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebx # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload -.LBB156_4: - movl 92(%esp), %eax - movl %edx, 40(%eax) - movl 68(%esp), %edx # 4-byte Reload - movl %edx, 44(%eax) - movl %ebx, 48(%eax) - movl %edi, 52(%eax) - movl %esi, 56(%eax) - movl %ecx, 60(%eax) - movl 44(%esp), %edx # 4-byte Reload - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB156_6 -# BB#5: - movl 24(%esp), %ecx # 4-byte Reload -.LBB156_6: - movl %ecx, 64(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB156_8 -# BB#7: - movl 28(%esp), %edx # 4-byte Reload -.LBB156_8: - movl %edx, 68(%eax) - jne .LBB156_10 -# BB#9: - movl 32(%esp), %ecx # 4-byte Reload -.LBB156_10: - movl %ecx, 72(%eax) - movl %ebp, 76(%eax) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end156: - .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L - - .globl mcl_fpDbl_sub10L - .align 16, 0x90 - .type mcl_fpDbl_sub10L,@function -mcl_fpDbl_sub10L: # @mcl_fpDbl_sub10L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %ebp - movl (%ebp), %edx - movl 4(%ebp), %esi - movl 88(%esp), %eax - subl (%eax), %edx - sbbl 4(%eax), %esi - movl 8(%ebp), %edi - sbbl 8(%eax), %edi - movl 80(%esp), %ecx - movl %edx, (%ecx) - movl 12(%ebp), %edx - sbbl 12(%eax), %edx - movl %esi, 4(%ecx) - movl 16(%ebp), %esi - sbbl 16(%eax), %esi - movl %edi, 8(%ecx) - movl 20(%eax), %edi - movl %edx, 12(%ecx) - movl 20(%ebp), %edx - sbbl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ecx) - movl 24(%ebp), %esi - sbbl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ecx) - movl 28(%ebp), %edx - sbbl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ecx) - movl 32(%ebp), %esi - sbbl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ecx) - movl 36(%ebp), %edx - sbbl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ecx) - movl 40(%ebp), %esi - sbbl %edi, %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 44(%eax), %esi - movl %edx, 36(%ecx) - movl 44(%ebp), %edx - sbbl %esi, %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 48(%eax), %edx - movl 48(%ebp), %esi - sbbl %edx, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 52(%eax), %edx - movl 52(%ebp), %esi - sbbl %edx, %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 56(%eax), %edx - movl 56(%ebp), %esi - sbbl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 60(%eax), %edx - movl 60(%ebp), %esi - sbbl %edx, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 64(%eax), %edx - movl 64(%ebp), %esi - sbbl %edx, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 68(%eax), %edx - movl 68(%ebp), %esi - sbbl %edx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 72(%eax), %edx - movl 72(%ebp), %esi - sbbl %edx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 76(%eax), %eax - movl 76(%ebp), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 92(%esp), %esi - jne .LBB157_1 -# BB#2: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB157_3 -.LBB157_1: - movl 36(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill -.LBB157_3: - testb %al, %al - jne .LBB157_4 -# BB#5: - movl $0, 8(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB157_6 -.LBB157_4: - movl (%esi), %ebx - movl 4(%esi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB157_6: - jne .LBB157_7 -# BB#8: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB157_9 -.LBB157_7: - movl 32(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB157_9: - jne .LBB157_10 -# BB#11: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB157_12 -.LBB157_10: - movl 28(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB157_12: - jne .LBB157_13 -# BB#14: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB157_15 -.LBB157_13: - movl 24(%esi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB157_15: - jne .LBB157_16 -# BB#17: - movl $0, %ebp - jmp .LBB157_18 -.LBB157_16: - movl 20(%esi), %ebp -.LBB157_18: - jne .LBB157_19 -# BB#20: - movl $0, %eax - jmp .LBB157_21 -.LBB157_19: - movl 16(%esi), %eax -.LBB157_21: - jne .LBB157_22 -# BB#23: - movl $0, %edx - jmp .LBB157_24 -.LBB157_22: - movl 12(%esi), %edx -.LBB157_24: - jne .LBB157_25 -# BB#26: - xorl %esi, %esi - jmp .LBB157_27 -.LBB157_25: - movl 8(%esi), %esi -.LBB157_27: - addl 28(%esp), %ebx # 4-byte Folded Reload - movl 8(%esp), %edi # 4-byte Reload - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %ebx, 40(%ecx) - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %edi, 44(%ecx) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %edx, 52(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 56(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ebp, 60(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edx, 68(%ecx) - movl %eax, 72(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%ecx) - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end157: - .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L - - .align 16, 0x90 - .type .LmulPv352x32,@function -.LmulPv352x32: # @mulPv352x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl %edx, %ebx - movl 92(%esp), %edi - movl %edi, %eax - mull 40(%ebx) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %edi, %eax - mull 36(%ebx) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - mull 32(%ebx) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %eax - mull 28(%ebx) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %edi, %eax - mull 24(%ebx) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %edi, %eax - mull 20(%ebx) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %edi, %eax - mull 16(%ebx) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %edi, %eax - mull 12(%ebx) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %edi, %eax - mull 8(%ebx) - movl %edx, %esi - movl %eax, 4(%esp) # 4-byte Spill - movl %edi, %eax - mull 4(%ebx) - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl %edi, %eax - mull (%ebx) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%ecx) - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 44(%ecx) - movl %ecx, %eax - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end158: - .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32 - - .globl mcl_fp_mulUnitPre11L - .align 16, 0x90 - .type mcl_fp_mulUnitPre11L,@function -mcl_fp_mulUnitPre11L: # @mcl_fp_mulUnitPre11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L159$pb -.L159$pb: - popl %ebx -.Ltmp20: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx - movl 120(%esp), %eax - movl %eax, (%esp) - leal 40(%esp), %ecx - movl 116(%esp), %edx - calll .LmulPv352x32 - movl 84(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 60(%esp), %ebp - movl 56(%esp), %ebx - movl 52(%esp), %edi - movl 48(%esp), %esi - movl 40(%esp), %edx - movl 44(%esp), %ecx - movl 112(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end159: - .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L - - .globl mcl_fpDbl_mulPre11L - .align 16, 0x90 - .type mcl_fpDbl_mulPre11L,@function -mcl_fpDbl_mulPre11L: # @mcl_fpDbl_mulPre11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $620, %esp # imm = 0x26C - calll .L160$pb -.L160$pb: - popl %eax -.Ltmp21: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %eax, %ebx - movl 648(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 644(%esp), %edx - movl %edx, %ebp - movl %ebx, %edi - calll .LmulPv352x32 - movl 612(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 584(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 580(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 568(%esp), %eax - movl 572(%esp), %esi - movl 640(%esp), %ecx - movl %eax, (%ecx) - movl 648(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 520(%esp), %ecx - movl %ebp, %edx - movl %edi, %ebx - calll .LmulPv352x32 - addl 520(%esp), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 540(%esp), %ebx - movl 536(%esp), %edi - movl 532(%esp), %esi - movl 524(%esp), %ecx - movl 528(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 504(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 492(%esp), %ebp - movl 488(%esp), %edi - movl 484(%esp), %esi - movl 476(%esp), %ecx - movl 480(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 428(%esp), %ecx - movl 432(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 376(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 396(%esp), %ebp - movl 392(%esp), %edi - movl 388(%esp), %esi - movl 380(%esp), %ecx - movl 384(%esp), %edx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 348(%esp), %ebx - movl 344(%esp), %edi - movl 340(%esp), %esi - movl 332(%esp), %ecx - movl 336(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 280(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 300(%esp), %ebp - movl 296(%esp), %edi - movl 292(%esp), %esi - movl 284(%esp), %ecx - movl 288(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 232(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 252(%esp), %ebx - movl 248(%esp), %edi - movl 244(%esp), %esi - movl 236(%esp), %ecx - movl 240(%esp), %edx - movl 640(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, %ebp - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebp - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 640(%esp), %eax - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 56(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 648(%esp), %edi - movl 36(%edi), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 644(%esp), %eax - movl %eax, %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 56(%esp), %eax # 4-byte Reload - addl 136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 164(%esp), %ebp - movl 160(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 152(%esp), %esi - movl 148(%esp), %edx - movl 140(%esp), %ecx - movl 144(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 36(%eax) - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 648(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 644(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 88(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 92(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %edi - movl 108(%esp), %esi - movl 104(%esp), %edx - movl 100(%esp), %ecx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 40(%eax) - movl %ebp, 44(%eax) - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edx, 56(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 60(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %edi, 64(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl 68(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 72(%eax) - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%eax) - movl 84(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - addl $620, %esp # imm = 0x26C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end160: - .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L - - .globl mcl_fpDbl_sqrPre11L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre11L,@function -mcl_fpDbl_sqrPre11L: # @mcl_fpDbl_sqrPre11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $620, %esp # imm = 0x26C - calll .L161$pb -.L161$pb: - popl %ebx -.Ltmp22: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx - movl %ebx, 84(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl %edx, %esi - movl %ebx, %edi - calll .LmulPv352x32 - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 584(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 580(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 568(%esp), %eax - movl 572(%esp), %ebp - movl 640(%esp), %ecx - movl %eax, (%ecx) - movl %esi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 520(%esp), %ecx - movl %edi, %ebx - calll .LmulPv352x32 - addl 520(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 540(%esp), %ebx - movl 536(%esp), %edi - movl 532(%esp), %esi - movl 524(%esp), %ecx - movl 528(%esp), %edx - movl 640(%esp), %eax - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 4(%eax) - adcl 60(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 60(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 60(%esp), %eax # 4-byte Reload - addl 472(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 516(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 508(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 504(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 492(%esp), %ebp - movl 488(%esp), %edi - movl 484(%esp), %esi - movl 476(%esp), %ecx - movl 480(%esp), %edx - movl 640(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 48(%esp), %eax # 4-byte Reload - addl 424(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 448(%esp), %ebx - movl 444(%esp), %edi - movl 440(%esp), %esi - movl 436(%esp), %edx - movl 428(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %ecx - movl 640(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 80(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 80(%esp), %eax # 4-byte Reload - addl 376(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 396(%esp), %edi - movl 392(%esp), %esi - movl 388(%esp), %edx - movl 380(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 384(%esp), %ecx - movl 640(%esp), %eax - movl 80(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 20(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 80(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 328(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 348(%esp), %ebp - movl 344(%esp), %edi - movl 340(%esp), %esi - movl 332(%esp), %ecx - movl 336(%esp), %edx - movl 640(%esp), %eax - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 20(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 48(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - adcl 80(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, 24(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 48(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 640(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 80(%esp) # 4-byte Folded Spill - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 48(%esp) # 4-byte Spill - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 80(%esp), %eax # 4-byte Reload - addl 232(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 260(%esp), %ebx - movl 256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 252(%esp), %edi - movl 248(%esp), %esi - movl 244(%esp), %edx - movl 236(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 240(%esp), %ecx - movl 640(%esp), %eax - movl 80(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 220(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 204(%esp), %ebp - movl 200(%esp), %edi - movl 196(%esp), %esi - movl 188(%esp), %ecx - movl 192(%esp), %edx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - adcl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, 28(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 136(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 164(%esp), %ebp - movl 160(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 152(%esp), %esi - movl 148(%esp), %edx - movl 140(%esp), %ecx - movl 144(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 36(%eax) - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 56(%esp) # 4-byte Spill - adcl 12(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 644(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 52(%esp), %eax # 4-byte Reload - addl 88(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 92(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %edi - movl 108(%esp), %esi - movl 104(%esp), %edx - movl 100(%esp), %ecx - movl 640(%esp), %eax - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 40(%eax) - movl %ebp, 44(%eax) - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - adcl 36(%esp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edx, 56(%eax) - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %esi, 60(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %edi, 64(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl 72(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %edx, 72(%eax) - movl %ecx, 76(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 80(%eax) - movl 84(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - addl $620, %esp # imm = 0x26C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end161: - .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L - - .globl mcl_fp_mont11L - .align 16, 0x90 - .type mcl_fp_mont11L,@function -mcl_fp_mont11L: # @mcl_fp_mont11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1132, %esp # imm = 0x46C - calll .L162$pb -.L162$pb: - popl %ebx -.Ltmp23: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx - movl 1164(%esp), %eax - movl -4(%eax), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1080(%esp), %edi - movl 1084(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edi, %eax - imull %ebp, %eax - movl 1124(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1120(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1116(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1112(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1108(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1104(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 1100(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 1096(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1092(%esp), %esi - movl 1088(%esp), %ebp - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 1032(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1040(%esp), %ebp - adcl 1044(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1076(%esp), %esi - sbbl %edi, %edi - movl 1160(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl 56(%esp), %ecx # 4-byte Reload - addl 984(%esp), %ecx - adcl 988(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1024(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 1028(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 936(%esp), %esi - adcl 940(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 964(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 980(%esp), %esi - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - addl 888(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 912(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 928(%esp), %esi - movl %esi, %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ebp, %eax - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - movl %esi, %eax - andl $1, %eax - addl 840(%esp), %ebp - movl 40(%esp), %ecx # 4-byte Reload - adcl 844(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 848(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload - adcl 852(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 856(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 864(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 872(%esp), %ebp - movl 68(%esp), %esi # 4-byte Reload - adcl 876(%esp), %esi - adcl 880(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 884(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 792(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 792(%esp), %ecx - movl 36(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 820(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 824(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 828(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 836(%esp), %esi - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 744(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 776(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 788(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1156(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - movl 36(%esp), %ecx # 4-byte Reload - addl 696(%esp), %ecx - movl 24(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 716(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 724(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 728(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 648(%esp), %ebp - movl 24(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - adcl 680(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 692(%esp), %esi - adcl $0, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 24(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 608(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 624(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %esi - movl %esi, %eax - addl 552(%esp), %edi - movl 28(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl %ebp, %edi - adcl 560(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 568(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 576(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 580(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 584(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 592(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 596(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 28(%esp), %ecx # 4-byte Reload - addl 504(%esp), %ecx - adcl 508(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 520(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 536(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 456(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %ebp - movl %ebp, %eax - addl 456(%esp), %edi - movl 32(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 464(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 484(%esp), %edi - adcl 488(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 492(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 496(%esp), %esi - movl 24(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - adcl 412(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - adcl 432(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 444(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 360(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 368(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 384(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - adcl 316(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 332(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - adcl 348(%esp), %edi - movl 28(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - andl $1, %ebp - movl %ebp, %ecx - addl 264(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 284(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 300(%esp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 304(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 216(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 232(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - adcl 252(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 20(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - movl %esi, %ecx - andl $1, %ecx - addl 168(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 172(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 184(%esp), %ebp - movl 48(%esp), %edi # 4-byte Reload - adcl 188(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl %esi, %ecx - addl 120(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 136(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 20(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - andl $1, %esi - addl 72(%esp), %edi - movl 48(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl 88(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %esi - movl 1164(%esp), %ebp - subl (%ebp), %eax - movl %ecx, %edx - sbbl 4(%ebp), %edx - movl 52(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - sbbl 12(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 28(%esp), %ebx # 4-byte Reload - sbbl 28(%ebp), %ebx - movl 32(%esp), %edi # 4-byte Reload - sbbl 32(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - sbbl 36(%ebp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ebp), %edi - movl %edi, %ebp - sbbl $0, %esi - andl $1, %esi - jne .LBB162_2 -# BB#1: - movl %ebx, 28(%esp) # 4-byte Spill -.LBB162_2: - movl %esi, %ebx - testb %bl, %bl - movl 68(%esp), %ebx # 4-byte Reload - jne .LBB162_4 -# BB#3: - movl %eax, %ebx -.LBB162_4: - movl 1152(%esp), %eax - movl %ebx, (%eax) - movl 56(%esp), %edi # 4-byte Reload - jne .LBB162_6 -# BB#5: - movl %edx, %edi -.LBB162_6: - movl %edi, 4(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB162_8 -# BB#7: - movl %ecx, %edx -.LBB162_8: - movl %edx, 8(%eax) - jne .LBB162_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%esp) # 4-byte Spill -.LBB162_10: - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB162_12 -# BB#11: - movl 8(%esp), %ecx # 4-byte Reload -.LBB162_12: - movl %ecx, 16(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB162_14 -# BB#13: - movl 12(%esp), %ecx # 4-byte Reload -.LBB162_14: - movl %ecx, 20(%eax) - movl 24(%esp), %ecx # 4-byte Reload - jne .LBB162_16 -# BB#15: - movl 16(%esp), %ecx # 4-byte Reload -.LBB162_16: - movl %ecx, 24(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 32(%esp), %ecx # 4-byte Reload - jne .LBB162_18 -# BB#17: - movl 20(%esp), %ecx # 4-byte Reload -.LBB162_18: - movl %ecx, 32(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB162_20 -# BB#19: - movl 60(%esp), %ecx # 4-byte Reload -.LBB162_20: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB162_22 -# BB#21: - movl %ebp, %ecx -.LBB162_22: - movl %ecx, 40(%eax) - addl $1132, %esp # imm = 0x46C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end162: - .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L - - .globl mcl_fp_montNF11L - .align 16, 0x90 - .type mcl_fp_montNF11L,@function -mcl_fp_montNF11L: # @mcl_fp_montNF11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1132, %esp # imm = 0x46C - calll .L163$pb -.L163$pb: - popl %ebx -.Ltmp24: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx - movl 1164(%esp), %eax - movl -4(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1080(%esp), %ebp - movl 1084(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1124(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1120(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1116(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1112(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1108(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1104(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1100(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 1096(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1092(%esp), %esi - movl 1088(%esp), %edi - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 1032(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1040(%esp), %edi - adcl 1044(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 1048(%esp), %ebp - movl 28(%esp), %esi # 4-byte Reload - adcl 1052(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 1028(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 984(%esp), %ecx - adcl 988(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 996(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - adcl 1000(%esp), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1004(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1164(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - addl 936(%esp), %ebp - adcl 940(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 956(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 960(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 980(%esp), %ebp - movl 1160(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 932(%esp), %eax - addl 888(%esp), %edi - movl 44(%esp), %ecx # 4-byte Reload - adcl 892(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 896(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 900(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 904(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl 908(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 912(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 916(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 920(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 924(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 928(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %edi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 840(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 860(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 872(%esp), %edi - movl 68(%esp), %esi # 4-byte Reload - adcl 876(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 884(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 792(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 836(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 792(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 796(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 800(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 808(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 812(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 816(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 820(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 824(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 832(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 744(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 768(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 780(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 784(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 740(%esp), %edx - movl 40(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl 28(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 704(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 708(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 712(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 716(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 720(%esp), %edi - movl 68(%esp), %ecx # 4-byte Reload - adcl 724(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 732(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 736(%esp), %esi - adcl $0, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl %eax, %ebp - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 648(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 672(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 688(%esp), %esi - movl %esi, %edi - movl 40(%esp), %esi # 4-byte Reload - adcl 692(%esp), %esi - movl 1160(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1156(%esp), %eax - movl %eax, %edx - calll .LmulPv352x32 - movl 644(%esp), %eax - movl 28(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 608(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 612(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 616(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 620(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 624(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 628(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 632(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl %eax, %ebp - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 552(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 560(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 1160(%esp), %ecx - movl %ecx, %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 548(%esp), %edx - movl 32(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - adcl 508(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 512(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 520(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 528(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 532(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 40(%esp), %ecx # 4-byte Reload - adcl 540(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload - adcl 544(%esp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl %eax, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 456(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 456(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 480(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 488(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - adcl 496(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 452(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 408(%esp), %ecx - movl 52(%esp), %ebp # 4-byte Reload - adcl 412(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 428(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 444(%esp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 448(%esp), %edi - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 360(%esp), %esi - adcl 364(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 372(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 28(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 356(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 332(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 264(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 276(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %edi, %esi - adcl 284(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 292(%esp), %edi - movl 28(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 260(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 216(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 224(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 232(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 240(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - adcl 244(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 24(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 168(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 176(%esp), %esi - movl 60(%esp), %edi # 4-byte Reload - adcl 180(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 196(%esp), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 204(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1160(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1156(%esp), %edx - calll .LmulPv352x32 - movl 164(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 120(%esp), %ecx - adcl 124(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 136(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 72(%esp), %ecx - movl 1164(%esp), %edx - calll .LmulPv352x32 - addl 72(%esp), %edi - movl 48(%esp), %edi # 4-byte Reload - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 84(%esp), %edi - adcl 88(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %eax, %edx - movl 1164(%esp), %ebx - subl (%ebx), %edx - movl %ecx, %esi - sbbl 4(%ebx), %esi - movl %edi, %ecx - sbbl 8(%ebx), %ecx - movl 44(%esp), %eax # 4-byte Reload - sbbl 12(%ebx), %eax - movl 40(%esp), %ebp # 4-byte Reload - sbbl 16(%ebx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - sbbl 20(%ebx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - sbbl 24(%ebx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - sbbl 28(%ebx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - sbbl 32(%ebx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - sbbl 36(%ebx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - sbbl 40(%ebx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %ebp, %ebx - sarl $31, %ebx - testl %ebx, %ebx - movl 68(%esp), %ebx # 4-byte Reload - js .LBB163_2 -# BB#1: - movl %edx, %ebx -.LBB163_2: - movl 1152(%esp), %edx - movl %ebx, (%edx) - movl 60(%esp), %ebp # 4-byte Reload - js .LBB163_4 -# BB#3: - movl %esi, %ebp -.LBB163_4: - movl %ebp, 4(%edx) - js .LBB163_6 -# BB#5: - movl %ecx, %edi -.LBB163_6: - movl %edi, 8(%edx) - movl 44(%esp), %ecx # 4-byte Reload - js .LBB163_8 -# BB#7: - movl %eax, %ecx -.LBB163_8: - movl %ecx, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB163_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB163_10: - movl %eax, 16(%edx) - movl 28(%esp), %eax # 4-byte Reload - js .LBB163_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB163_12: - movl %eax, 20(%edx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB163_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB163_14: - movl %eax, 24(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB163_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB163_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB163_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB163_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB163_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB163_20: - movl %eax, 36(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB163_22 -# BB#21: - movl 48(%esp), %eax # 4-byte Reload -.LBB163_22: - movl %eax, 40(%edx) - addl $1132, %esp # imm = 0x46C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end163: - .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L - - .globl mcl_fp_montRed11L - .align 16, 0x90 - .type mcl_fp_montRed11L,@function -mcl_fp_montRed11L: # @mcl_fp_montRed11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $668, %esp # imm = 0x29C - calll .L164$pb -.L164$pb: - popl %eax -.Ltmp25: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 696(%esp), %edx - movl -4(%edx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 692(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl 4(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - imull %esi, %ebx - movl 84(%ecx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 32(%ecx), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 28(%ecx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 24(%ecx), %ebp - movl 20(%ecx), %edi - movl 16(%ecx), %esi - movl 12(%ecx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 616(%esp), %ecx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - movl 60(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl 64(%esp), %ecx # 4-byte Reload - adcl 620(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 632(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 640(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - andl $1, %edi - movl %edi, %ecx - addl 568(%esp), %esi - movl 56(%esp), %edx # 4-byte Reload - adcl 572(%esp), %edx - movl 48(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 600(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 520(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 520(%esp), %ebp - movl 48(%esp), %ecx # 4-byte Reload - adcl 524(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 120(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 472(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 104(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 424(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 464(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ecx, %esi - movl %esi, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 376(%esp), %esi - movl 64(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 404(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 412(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 328(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 132(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 352(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 356(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 96(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 696(%esp), %eax - movl %eax, %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 280(%esp), %ebp - movl 88(%esp), %ebp # 4-byte Reload - adcl 284(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 296(%esp), %edi - movl 128(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 304(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 232(%esp), %ebp - movl 84(%esp), %ebp # 4-byte Reload - adcl 236(%esp), %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 244(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 276(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 56(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %ebp, %eax - imull 68(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 184(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 188(%esp), %ecx - movl 132(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, %ebp - movl 68(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 696(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - calll .LmulPv352x32 - addl 136(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl %eax, %edi - movl 128(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 128(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 152(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - adcl 180(%esp), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - adcl $0, %ebp - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %edx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - movl 124(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl %ebx, %eax - movl %ebp, %ebx - sbbl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - jne .LBB164_2 -# BB#1: - movl %esi, 112(%esp) # 4-byte Spill -.LBB164_2: - testb %bl, %bl - movl 132(%esp), %esi # 4-byte Reload - jne .LBB164_4 -# BB#3: - movl %edi, %esi -.LBB164_4: - movl 688(%esp), %edi - movl %esi, (%edi) - movl 104(%esp), %esi # 4-byte Reload - jne .LBB164_6 -# BB#5: - movl %edx, 128(%esp) # 4-byte Spill -.LBB164_6: - movl 128(%esp), %edx # 4-byte Reload - movl %edx, 4(%edi) - movl 116(%esp), %edx # 4-byte Reload - jne .LBB164_8 -# BB#7: - movl %ecx, %edx -.LBB164_8: - movl %edx, 8(%edi) - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%edi) - movl 92(%esp), %edx # 4-byte Reload - movl 124(%esp), %ecx # 4-byte Reload - jne .LBB164_10 -# BB#9: - movl 64(%esp), %ecx # 4-byte Reload -.LBB164_10: - movl %ecx, 16(%edi) - movl 96(%esp), %ecx # 4-byte Reload - movl 120(%esp), %eax # 4-byte Reload - jne .LBB164_12 -# BB#11: - movl 68(%esp), %eax # 4-byte Reload -.LBB164_12: - movl %eax, 20(%edi) - movl 80(%esp), %eax # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - jne .LBB164_14 -# BB#13: - movl 72(%esp), %ebp # 4-byte Reload -.LBB164_14: - movl %ebp, 24(%edi) - jne .LBB164_16 -# BB#15: - movl 76(%esp), %esi # 4-byte Reload -.LBB164_16: - movl %esi, 28(%edi) - jne .LBB164_18 -# BB#17: - movl 84(%esp), %edx # 4-byte Reload -.LBB164_18: - movl %edx, 32(%edi) - jne .LBB164_20 -# BB#19: - movl 88(%esp), %ecx # 4-byte Reload -.LBB164_20: - movl %ecx, 36(%edi) - jne .LBB164_22 -# BB#21: - movl 100(%esp), %eax # 4-byte Reload -.LBB164_22: - movl %eax, 40(%edi) - addl $668, %esp # imm = 0x29C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end164: - .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L - - .globl mcl_fp_addPre11L - .align 16, 0x90 - .type mcl_fp_addPre11L,@function -mcl_fp_addPre11L: # @mcl_fp_addPre11L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl %esi, 32(%ebx) - movl %edx, 36(%ebx) - movl 40(%eax), %eax - movl 40(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 40(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end165: - .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L - - .globl mcl_fp_subPre11L - .align 16, 0x90 - .type mcl_fp_subPre11L,@function -mcl_fp_subPre11L: # @mcl_fp_subPre11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 32(%ebp) - movl %esi, 36(%ebp) - movl 40(%edx), %edx - movl 40(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 40(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end166: - .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L - - .globl mcl_fp_shr1_11L - .align 16, 0x90 - .type mcl_fp_shr1_11L,@function -mcl_fp_shr1_11L: # @mcl_fp_shr1_11L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - shrdl $1, %edx, %ecx - movl 8(%esp), %esi - movl %ecx, (%esi) - movl 8(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 4(%esi) - movl 12(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 8(%esi) - movl 16(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 12(%esi) - movl 20(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 16(%esi) - movl 24(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 20(%esi) - movl 28(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 24(%esi) - movl 32(%eax), %ecx - shrdl $1, %ecx, %edx - movl %edx, 28(%esi) - movl 36(%eax), %edx - shrdl $1, %edx, %ecx - movl %ecx, 32(%esi) - movl 40(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 36(%esi) - shrl %eax - movl %eax, 40(%esi) - popl %esi - retl -.Lfunc_end167: - .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L - - .globl mcl_fp_add11L - .align 16, 0x90 - .type mcl_fp_add11L,@function -mcl_fp_add11L: # @mcl_fp_add11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $32, %esp - movl 60(%esp), %edi - movl (%edi), %ecx - movl 4(%edi), %eax - movl 56(%esp), %esi - addl (%esi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ecx, %ebp - adcl 4(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%edi), %eax - adcl 8(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 12(%esi), %eax - movl 16(%esi), %ecx - adcl 12(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - adcl 16(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 20(%esi), %eax - adcl 20(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 24(%esi), %eax - adcl 24(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 28(%esi), %ebx - adcl 28(%edi), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 32(%esi), %ecx - adcl 32(%edi), %ecx - movl 36(%esi), %eax - adcl 36(%edi), %eax - movl 40(%esi), %edx - adcl 40(%edi), %edx - movl 52(%esp), %esi - movl %ebp, (%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 8(%esi) - movl 20(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%esi) - movl 16(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%esi) - movl 12(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%esi) - movl 8(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%esi) - movl %ebx, 28(%esi) - movl %ecx, 32(%esi) - movl %eax, 36(%esi) - movl %edx, 40(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 64(%esp), %ebp - movl 4(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 28(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 24(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 20(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 16(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 12(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 8(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl (%esp), %edi # 4-byte Reload - sbbl 28(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 32(%ebp), %ecx - sbbl 36(%ebp), %eax - sbbl 40(%ebp), %edx - movl %edx, %edi - sbbl $0, %ebx - testb $1, %bl - jne .LBB168_2 -# BB#1: # %nocarry - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, (%esi) - movl 28(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%esi) - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%esi) - movl 20(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%esi) - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%esi) - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 20(%esi) - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%esi) - movl (%esp), %edx # 4-byte Reload - movl %edx, 28(%esi) - movl %ecx, 32(%esi) - movl %eax, 36(%esi) - movl %edi, 40(%esi) -.LBB168_2: # %carry - addl $32, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end168: - .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L - - .globl mcl_fp_addNF11L - .align 16, 0x90 - .type mcl_fp_addNF11L,@function -mcl_fp_addNF11L: # @mcl_fp_addNF11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 108(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 104(%esp), %esi - addl (%esi), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 4(%esi), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%edx), %ebx - movl 36(%edx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 20(%edx), %ebp - movl 16(%edx), %edi - movl 12(%edx), %eax - movl 8(%edx), %ecx - adcl 8(%esi), %ecx - adcl 12(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 20(%esi), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 24(%esi), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 28(%esi), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 32(%esi), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 36(%esi), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %ecx, %edx - adcl 40(%esi), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 112(%esp), %ebx - movl 52(%esp), %esi # 4-byte Reload - subl (%ebx), %esi - movl 60(%esp), %ecx # 4-byte Reload - sbbl 4(%ebx), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl %edx, %ecx - sbbl 8(%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - sbbl 12(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - sbbl 16(%ebx), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%ebx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 24(%ebx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 28(%ebx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%ebx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - movl %edi, %ecx - movl %edi, %ebp - sbbl 36(%ebx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 40(%ebx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl %edi, %ebx - movl 52(%esp), %edi # 4-byte Reload - sarl $31, %ebx - testl %ebx, %ebx - js .LBB169_2 -# BB#1: - movl %esi, %edi -.LBB169_2: - movl 100(%esp), %esi - movl %edi, (%esi) - movl 60(%esp), %edi # 4-byte Reload - js .LBB169_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB169_4: - movl %edi, 4(%esi) - movl %eax, %edi - js .LBB169_6 -# BB#5: - movl 4(%esp), %edx # 4-byte Reload -.LBB169_6: - movl %edx, 8(%esi) - movl %ebp, %ecx - movl 72(%esp), %edx # 4-byte Reload - movl 40(%esp), %eax # 4-byte Reload - js .LBB169_8 -# BB#7: - movl 8(%esp), %eax # 4-byte Reload -.LBB169_8: - movl %eax, 12(%esi) - movl 76(%esp), %eax # 4-byte Reload - movl 44(%esp), %ebp # 4-byte Reload - js .LBB169_10 -# BB#9: - movl 12(%esp), %ebx # 4-byte Reload - movl %ebx, 48(%esp) # 4-byte Spill -.LBB169_10: - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 16(%esi) - js .LBB169_12 -# BB#11: - movl 16(%esp), %ebp # 4-byte Reload -.LBB169_12: - movl %ebp, 20(%esi) - js .LBB169_14 -# BB#13: - movl 20(%esp), %edi # 4-byte Reload -.LBB169_14: - movl %edi, 24(%esi) - js .LBB169_16 -# BB#15: - movl 24(%esp), %eax # 4-byte Reload -.LBB169_16: - movl %eax, 28(%esi) - js .LBB169_18 -# BB#17: - movl 28(%esp), %edx # 4-byte Reload -.LBB169_18: - movl %edx, 32(%esi) - js .LBB169_20 -# BB#19: - movl 32(%esp), %ecx # 4-byte Reload -.LBB169_20: - movl %ecx, 36(%esi) - movl 56(%esp), %eax # 4-byte Reload - js .LBB169_22 -# BB#21: - movl 36(%esp), %eax # 4-byte Reload -.LBB169_22: - movl %eax, 40(%esi) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end169: - .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L - - .globl mcl_fp_sub11L - .align 16, 0x90 - .type mcl_fp_sub11L,@function -mcl_fp_sub11L: # @mcl_fp_sub11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %ebp - movl (%ebp), %ecx - movl 4(%ebp), %eax - movl 68(%esp), %edi - subl (%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 8(%ebp), %eax - sbbl 8(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%ebp), %ebx - sbbl 12(%edi), %ebx - movl 16(%ebp), %eax - sbbl 16(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 20(%ebp), %eax - sbbl 20(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 24(%ebp), %eax - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 28(%ebp), %edx - sbbl 28(%edi), %edx - movl %edx, 4(%esp) # 4-byte Spill - movl 32(%ebp), %ecx - sbbl 32(%edi), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 36(%ebp), %eax - sbbl 36(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 40(%ebp), %eax - sbbl 40(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 16(%esp), %esi # 4-byte Reload - movl $0, %ebx - sbbl $0, %ebx - testb $1, %bl - movl 60(%esp), %ebx - movl %esi, (%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl %ebp, 12(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl %edx, 28(%ebx) - movl %ecx, 32(%ebx) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%ebx) - movl %ecx, %edi - movl %eax, 40(%ebx) - je .LBB170_2 -# BB#1: # %carry - movl 72(%esp), %eax - addl (%eax), %esi - movl %esi, (%ebx) - movl 28(%esp), %edx # 4-byte Reload - movl %eax, %esi - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl %ebp, %eax - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl %ecx, 32(%ebx) - movl 36(%esi), %eax - adcl %edi, %eax - movl %eax, 36(%ebx) - movl 40(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ebx) -.LBB170_2: # %nocarry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end170: - .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L - - .globl mcl_fp_subNF11L - .align 16, 0x90 - .type mcl_fp_subNF11L,@function -mcl_fp_subNF11L: # @mcl_fp_subNF11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 92(%esp), %edi - subl (%edi), %edx - movl %edx, 44(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%eax), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 36(%eax), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 28(%eax), %ebx - movl 24(%eax), %ebp - movl 20(%eax), %esi - movl 16(%eax), %edx - movl 12(%eax), %ecx - movl 8(%eax), %eax - sbbl 8(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - sbbl 12(%edi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 28(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - sbbl 24(%edi), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - sbbl 28(%edi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - sbbl 40(%edi), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %edx, %esi - sarl $31, %esi - movl %esi, %eax - shldl $1, %edx, %eax - movl 96(%esp), %edx - movl 4(%edx), %ecx - andl %eax, %ecx - movl %ecx, %ebx - andl (%edx), %eax - movl 40(%edx), %ecx - andl %esi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 36(%edx), %ecx - andl %esi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 32(%edx), %ecx - andl %esi, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 28(%edx), %ecx - andl %esi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%edx), %ecx - andl %esi, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 20(%edx), %ebp - andl %esi, %ebp - roll %esi - movl 16(%edx), %edi - andl %esi, %edi - movl 12(%edx), %ecx - andl %esi, %ecx - andl 8(%edx), %esi - addl 44(%esp), %eax # 4-byte Folded Reload - movl %ebx, %edx - adcl 48(%esp), %edx # 4-byte Folded Reload - movl 84(%esp), %ebx - movl %eax, (%ebx) - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %edx, 4(%ebx) - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %esi, 8(%ebx) - adcl 28(%esp), %edi # 4-byte Folded Reload - movl %ecx, 12(%ebx) - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %edi, 16(%ebx) - movl (%esp), %ecx # 4-byte Reload - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 20(%ebx) - movl 4(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 8(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 12(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl 16(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ebx) - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end171: - .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L - - .globl mcl_fpDbl_add11L - .align 16, 0x90 - .type mcl_fpDbl_add11L,@function -mcl_fpDbl_add11L: # @mcl_fpDbl_add11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl 108(%esp), %ecx - movl 104(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edi), %ebp - movl 100(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%ecx), %esi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 52(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%edi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%edi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%edi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %esi - adcl %ebx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 48(%ecx), %esi - movl %edx, 40(%eax) - movl 48(%edi), %eax - adcl %esi, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 52(%edi), %eax - adcl %ebp, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl 56(%edi), %eax - adcl %edx, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl 60(%edi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%ecx), %edx - movl 64(%edi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl 68(%edi), %edx - adcl %eax, %edx - movl 72(%ecx), %esi - movl 72(%edi), %eax - adcl %esi, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 76(%ecx), %ebx - movl 76(%edi), %esi - adcl %ebx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 80(%ecx), %ebp - movl 80(%edi), %ebx - adcl %ebp, %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 84(%ecx), %ecx - movl 84(%edi), %edi - adcl %ecx, %edi - movl %edi, 40(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 112(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 24(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 28(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl 32(%ebp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl %ebx, %eax - movl 40(%esp), %ebx # 4-byte Reload - sbbl 36(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 40(%ebp), %edi - sbbl $0, %ecx - andl $1, %ecx - jne .LBB172_2 -# BB#1: - movl %edi, %ebx -.LBB172_2: - testb %cl, %cl - movl 68(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - movl 60(%esp), %edi # 4-byte Reload - movl 56(%esp), %ebp # 4-byte Reload - jne .LBB172_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 24(%esp), %ecx # 4-byte Reload -.LBB172_4: - movl 100(%esp), %eax - movl %ecx, 44(%eax) - movl 72(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl %ebp, 56(%eax) - movl %edi, 60(%eax) - movl %esi, 64(%eax) - movl %edx, 68(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl 44(%esp), %edx # 4-byte Reload - jne .LBB172_6 -# BB#5: - movl 28(%esp), %edx # 4-byte Reload -.LBB172_6: - movl %edx, 72(%eax) - movl 48(%esp), %edx # 4-byte Reload - jne .LBB172_8 -# BB#7: - movl 32(%esp), %edx # 4-byte Reload -.LBB172_8: - movl %edx, 76(%eax) - jne .LBB172_10 -# BB#9: - movl 36(%esp), %ecx # 4-byte Reload -.LBB172_10: - movl %ecx, 80(%eax) - movl %ebx, 84(%eax) - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end172: - .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L - - .globl mcl_fpDbl_sub11L - .align 16, 0x90 - .type mcl_fpDbl_sub11L,@function -mcl_fpDbl_sub11L: # @mcl_fpDbl_sub11L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 96(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %esi - movl 100(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %esi - movl 8(%edx), %edi - sbbl 8(%ebp), %edi - movl 92(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%ebp), %eax - movl %esi, 4(%ecx) - movl 16(%edx), %esi - sbbl 16(%ebp), %esi - movl %edi, 8(%ecx) - movl 20(%ebp), %edi - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %edi, %eax - movl 24(%ebp), %edi - movl %esi, 16(%ecx) - movl 24(%edx), %esi - sbbl %edi, %esi - movl 28(%ebp), %edi - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %edi, %eax - movl 32(%ebp), %edi - movl %esi, 24(%ecx) - movl 32(%edx), %esi - sbbl %edi, %esi - movl 36(%ebp), %edi - movl %eax, 28(%ecx) - movl 36(%edx), %eax - sbbl %edi, %eax - movl 40(%ebp), %edi - movl %esi, 32(%ecx) - movl 40(%edx), %esi - sbbl %edi, %esi - movl 44(%ebp), %edi - movl %eax, 36(%ecx) - movl 44(%edx), %eax - sbbl %edi, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 48(%ebp), %eax - movl %esi, 40(%ecx) - movl 48(%edx), %esi - sbbl %eax, %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 52(%ebp), %eax - movl 52(%edx), %esi - sbbl %eax, %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 56(%ebp), %eax - movl 56(%edx), %esi - sbbl %eax, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 60(%ebp), %eax - movl 60(%edx), %esi - sbbl %eax, %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%edx), %esi - sbbl %eax, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%edx), %esi - sbbl %eax, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 72(%ebp), %eax - movl 72(%edx), %esi - sbbl %eax, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 76(%ebp), %eax - movl 76(%edx), %esi - sbbl %eax, %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 80(%ebp), %eax - movl 80(%edx), %esi - sbbl %eax, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 84(%ebp), %eax - movl 84(%edx), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 104(%esp), %ebp - jne .LBB173_1 -# BB#2: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB173_3 -.LBB173_1: - movl 40(%ebp), %edx - movl %edx, 28(%esp) # 4-byte Spill -.LBB173_3: - testb %al, %al - jne .LBB173_4 -# BB#5: - movl $0, 16(%esp) # 4-byte Folded Spill - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB173_6 -.LBB173_4: - movl (%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB173_6: - jne .LBB173_7 -# BB#8: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB173_9 -.LBB173_7: - movl 36(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB173_9: - jne .LBB173_10 -# BB#11: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB173_12 -.LBB173_10: - movl 32(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB173_12: - jne .LBB173_13 -# BB#14: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB173_15 -.LBB173_13: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB173_15: - jne .LBB173_16 -# BB#17: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB173_18 -.LBB173_16: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB173_18: - jne .LBB173_19 -# BB#20: - movl $0, %edx - jmp .LBB173_21 -.LBB173_19: - movl 20(%ebp), %edx -.LBB173_21: - jne .LBB173_22 -# BB#23: - movl $0, %edi - jmp .LBB173_24 -.LBB173_22: - movl 16(%ebp), %edi -.LBB173_24: - jne .LBB173_25 -# BB#26: - movl $0, %ebx - jmp .LBB173_27 -.LBB173_25: - movl 12(%ebp), %ebx -.LBB173_27: - jne .LBB173_28 -# BB#29: - xorl %ebp, %ebp - jmp .LBB173_30 -.LBB173_28: - movl 8(%ebp), %ebp -.LBB173_30: - movl 8(%esp), %esi # 4-byte Reload - addl 36(%esp), %esi # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %esi, 44(%ecx) - adcl 32(%esp), %ebp # 4-byte Folded Reload - movl %eax, 48(%ecx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 52(%ecx) - adcl 44(%esp), %edi # 4-byte Folded Reload - movl %ebx, 56(%ecx) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %edi, 60(%ecx) - movl (%esp), %esi # 4-byte Reload - adcl 52(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %esi, 68(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 60(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl %eax, 80(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%ecx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end173: - .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L - - .align 16, 0x90 - .type .LmulPv384x32,@function -.LmulPv384x32: # @mulPv384x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $80, %esp - movl %edx, %ebx - movl 100(%esp), %ebp - movl %ebp, %eax - mull 44(%ebx) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebp, %eax - mull 40(%ebx) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - mull 36(%ebx) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - mull 32(%ebx) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %eax - mull 28(%ebx) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebp, %eax - mull 24(%ebx) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 20(%ebx) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull 16(%ebx) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 12(%ebx) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull 8(%ebx) - movl %edx, %edi - movl %eax, 4(%esp) # 4-byte Spill - movl %ebp, %eax - mull 4(%ebx) - movl %edx, %esi - movl %eax, (%esp) # 4-byte Spill - movl %ebp, %eax - mull (%ebx) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %esi, 8(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 48(%ecx) - movl %ecx, %eax - addl $80, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end174: - .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32 - - .globl mcl_fp_mulUnitPre12L - .align 16, 0x90 - .type mcl_fp_mulUnitPre12L,@function -mcl_fp_mulUnitPre12L: # @mcl_fp_mulUnitPre12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $92, %esp - calll .L175$pb -.L175$pb: - popl %ebx -.Ltmp26: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx - movl 120(%esp), %eax - movl %eax, (%esp) - leal 40(%esp), %ecx - movl 116(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%esp), %ebp - movl 56(%esp), %ebx - movl 52(%esp), %edi - movl 48(%esp), %esi - movl 40(%esp), %edx - movl 44(%esp), %ecx - movl 112(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - addl $92, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end175: - .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L - - .globl mcl_fpDbl_mulPre12L - .align 16, 0x90 - .type mcl_fpDbl_mulPre12L,@function -mcl_fpDbl_mulPre12L: # @mcl_fpDbl_mulPre12L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $220, %esp - calll .L176$pb -.L176$pb: - popl %ebx -.Ltmp27: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx - movl %ebx, -164(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6L@PLT - leal 24(%esi), %eax - movl %eax, 8(%esp) - leal 24(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 48(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6L@PLT - movl 40(%edi), %ebx - movl 36(%edi), %eax - movl 32(%edi), %edx - movl (%edi), %esi - movl 4(%edi), %ecx - addl 24(%edi), %esi - adcl 28(%edi), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -188(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - adcl 16(%edi), %ebx - movl %ebx, -180(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl (%edi), %eax - addl 24(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl 4(%edi), %eax - adcl 28(%edi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - movl 32(%edi), %eax - adcl 8(%edi), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl 36(%edi), %eax - adcl 12(%edi), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl 40(%edi), %ecx - adcl 16(%edi), %ecx - movl 44(%edi), %eax - adcl 20(%edi), %eax - pushl %eax - seto %al - lahf - movl %eax, %edi - popl %eax - movl %edi, -184(%ebp) # 4-byte Spill - movl %ebx, %edi - movl %edx, -156(%ebp) # 4-byte Spill - movl %esi, -160(%ebp) # 4-byte Spill - movl %esi, %edx - jb .LBB176_2 -# BB#1: - xorl %edi, %edi - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill -.LBB176_2: - movl %edi, -176(%ebp) # 4-byte Spill - movl 12(%ebp), %esi - movl 44(%esi), %edi - movl -112(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 20(%esi), %edi - movl %edi, -132(%ebp) # 4-byte Spill - movl %eax, -124(%ebp) # 4-byte Spill - movl %ecx, -112(%ebp) # 4-byte Spill - movl -148(%ebp), %esi # 4-byte Reload - movl %esi, -116(%ebp) # 4-byte Spill - movl -144(%ebp), %esi # 4-byte Reload - movl %esi, -120(%ebp) # 4-byte Spill - movl -140(%ebp), %esi # 4-byte Reload - movl %esi, -128(%ebp) # 4-byte Spill - movl -136(%ebp), %esi # 4-byte Reload - movl %esi, -152(%ebp) # 4-byte Spill - jb .LBB176_4 -# BB#3: - movl $0, -124(%ebp) # 4-byte Folded Spill - movl $0, -112(%ebp) # 4-byte Folded Spill - movl $0, -116(%ebp) # 4-byte Folded Spill - movl $0, -120(%ebp) # 4-byte Folded Spill - movl $0, -128(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill -.LBB176_4: - movl %edx, -84(%ebp) - movl -172(%ebp), %esi # 4-byte Reload - movl %esi, -80(%ebp) - movl -188(%ebp), %edx # 4-byte Reload - movl %edx, -76(%ebp) - movl -168(%ebp), %edi # 4-byte Reload - movl %edi, -72(%ebp) - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -68(%ebp) - movl -136(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) - movl -140(%ebp), %edx # 4-byte Reload - movl %edx, -104(%ebp) - movl -144(%ebp), %edx # 4-byte Reload - movl %edx, -100(%ebp) - movl -148(%ebp), %edx # 4-byte Reload - movl %edx, -96(%ebp) - movl %ecx, -92(%ebp) - movl %eax, -88(%ebp) - movl %edi, %ebx - sbbl %edx, %edx - movl -132(%ebp), %eax # 4-byte Reload - movl %eax, -64(%ebp) - movl -184(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB176_6 -# BB#5: - movl $0, %eax - movl $0, %ebx - movl $0, %esi -.LBB176_6: - movl %eax, -132(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -108(%ebp), %ecx - movl %ecx, 8(%esp) - leal -84(%ebp), %ecx - movl %ecx, 4(%esp) - leal -60(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -152(%ebp), %edi # 4-byte Reload - addl -160(%ebp), %edi # 4-byte Folded Reload - adcl %esi, -128(%ebp) # 4-byte Folded Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl %eax, -120(%ebp) # 4-byte Folded Spill - adcl %ebx, -116(%ebp) # 4-byte Folded Spill - movl -176(%ebp), %eax # 4-byte Reload - adcl %eax, -112(%ebp) # 4-byte Folded Spill - movl -132(%ebp), %eax # 4-byte Reload - adcl %eax, -124(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -132(%ebp) # 4-byte Spill - movl -164(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre6L@PLT - addl -36(%ebp), %edi - movl -128(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl -120(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -112(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl -124(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -124(%ebp) # 4-byte Spill - adcl %esi, -132(%ebp) # 4-byte Folded Spill - movl -60(%ebp), %ecx - movl 8(%ebp), %eax - subl (%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -56(%ebp), %esi - sbbl 4(%eax), %esi - movl -52(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -48(%ebp), %edx - sbbl 12(%eax), %edx - movl -44(%ebp), %ebx - sbbl 16(%eax), %ebx - movl -40(%ebp), %ecx - sbbl 20(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 28(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 32(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 40(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 44(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, -132(%ebp) # 4-byte Folded Spill - movl 48(%eax), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - subl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 52(%eax), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - sbbl %ecx, %esi - movl 56(%eax), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - sbbl %ecx, -136(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 64(%eax), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 68(%eax), %ecx - movl %ecx, -212(%ebp) # 4-byte Spill - sbbl %ecx, -140(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -216(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 76(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 80(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 84(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 88(%eax), %ecx - movl %ecx, -184(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 92(%eax), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, -132(%ebp) # 4-byte Folded Spill - movl -144(%ebp), %ecx # 4-byte Reload - addl -148(%ebp), %ecx # 4-byte Folded Reload - adcl -152(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 24(%eax) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -156(%ebp), %ecx # 4-byte Folded Reload - movl %esi, 28(%eax) - adcl -160(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 32(%eax) - adcl -164(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -140(%ebp), %ecx # 4-byte Reload - adcl -168(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 40(%eax) - adcl -192(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 44(%eax) - movl -128(%ebp), %ecx # 4-byte Reload - adcl -196(%ebp), %ecx # 4-byte Folded Reload - movl %edi, 48(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -200(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 52(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 56(%eax) - movl -112(%ebp), %edx # 4-byte Reload - adcl -208(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - movl -124(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 64(%eax) - movl -132(%ebp), %edx # 4-byte Reload - adcl -216(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl %edx, 72(%eax) - movl -172(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - movl -176(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 80(%eax) - movl -180(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - movl -184(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 88(%eax) - movl -188(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 92(%eax) - addl $220, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end176: - .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L - - .globl mcl_fpDbl_sqrPre12L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre12L,@function -mcl_fpDbl_sqrPre12L: # @mcl_fpDbl_sqrPre12L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $220, %esp - calll .L177$pb -.L177$pb: - popl %ebx -.Ltmp28: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx - movl %ebx, -152(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre6L@PLT - leal 24(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 48(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre6L@PLT - movl 44(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl 40(%edi), %edx - movl 36(%edi), %eax - movl (%edi), %ebx - movl 4(%edi), %esi - addl 24(%edi), %ebx - adcl 28(%edi), %esi - movl 32(%edi), %ecx - adcl 8(%edi), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -140(%ebp) # 4-byte Spill - adcl 16(%edi), %edx - movl %edx, %ecx - movl -136(%ebp), %eax # 4-byte Reload - adcl 20(%edi), %eax - movl %eax, -136(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edx - movl %edx, -156(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %edx - popl %eax - movl %edx, -124(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -120(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edx - sbbl %edi, %edi - movl %edi, -148(%ebp) # 4-byte Spill - movl %ebx, %edi - addl %edi, %edi - movl %edi, -112(%ebp) # 4-byte Spill - movl %esi, %edi - movl %esi, %eax - adcl %edi, %edi - movl %edi, -132(%ebp) # 4-byte Spill - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_2 -# BB#1: - movl $0, -132(%ebp) # 4-byte Folded Spill - movl $0, -112(%ebp) # 4-byte Folded Spill -.LBB177_2: - movl -144(%ebp), %esi # 4-byte Reload - addl %esi, %esi - movl -140(%ebp), %edx # 4-byte Reload - adcl %edx, %edx - movl %edx, -116(%ebp) # 4-byte Spill - movl -120(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_3 -# BB#4: - movl $0, -116(%ebp) # 4-byte Folded Spill - movl $0, -120(%ebp) # 4-byte Folded Spill - jmp .LBB177_5 -.LBB177_3: - movl %eax, %edx - shrl $31, %edx - orl %esi, %edx - movl %edx, -120(%ebp) # 4-byte Spill -.LBB177_5: - movl -136(%ebp), %edx # 4-byte Reload - movl %ecx, %esi - addl %esi, %esi - adcl %edx, %edx - movl -124(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB177_6 -# BB#7: - xorl %edx, %edx - movl $0, -128(%ebp) # 4-byte Folded Spill - movl -140(%ebp), %edi # 4-byte Reload - jmp .LBB177_8 -.LBB177_6: - movl %ecx, -124(%ebp) # 4-byte Spill - movl -140(%ebp), %edi # 4-byte Reload - movl %edi, %ecx - shrl $31, %ecx - orl %esi, %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - movl -124(%ebp), %ecx # 4-byte Reload -.LBB177_8: - movl %edx, -124(%ebp) # 4-byte Spill - movl %ebx, -84(%ebp) - movl %eax, -80(%ebp) - movl -144(%ebp), %esi # 4-byte Reload - movl %esi, -76(%ebp) - movl %edi, -72(%ebp) - movl %ecx, -68(%ebp) - movl -136(%ebp), %edx # 4-byte Reload - movl %edx, -64(%ebp) - movl %ebx, -108(%ebp) - movl %eax, -104(%ebp) - movl %esi, -100(%ebp) - movl %edi, -96(%ebp) - movl %ecx, -92(%ebp) - movl %edx, -88(%ebp) - movl -156(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB177_9 -# BB#10: - movl $0, -136(%ebp) # 4-byte Folded Spill - jmp .LBB177_11 -.LBB177_9: - shrl $31, %edx - movl %edx, -136(%ebp) # 4-byte Spill -.LBB177_11: - leal -108(%ebp), %eax - movl %eax, 8(%esp) - leal -84(%ebp), %eax - movl %eax, 4(%esp) - leal -60(%ebp), %eax - movl %eax, (%esp) - movl -148(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -152(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre6L@PLT - movl -112(%ebp), %eax # 4-byte Reload - addl -36(%ebp), %eax - movl %eax, -112(%ebp) # 4-byte Spill - movl -132(%ebp), %edi # 4-byte Reload - adcl -32(%ebp), %edi - movl -120(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -120(%ebp) # 4-byte Spill - movl -116(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -116(%ebp) # 4-byte Spill - movl -128(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl -124(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -124(%ebp) # 4-byte Spill - adcl -136(%ebp), %esi # 4-byte Folded Reload - movl -60(%ebp), %edx - movl 8(%ebp), %eax - subl (%eax), %edx - movl -56(%ebp), %ebx - sbbl 4(%eax), %ebx - movl -52(%ebp), %ecx - sbbl 8(%eax), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -48(%ebp), %ecx - sbbl 12(%eax), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -44(%ebp), %ecx - sbbl 16(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -40(%ebp), %ecx - sbbl 20(%eax), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 28(%eax), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl %edi, -132(%ebp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, -156(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 36(%eax), %ecx - movl %ecx, -160(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 40(%eax), %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 44(%eax), %ecx - movl %ecx, -168(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - movl 48(%eax), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - subl %ecx, %edx - movl 52(%eax), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 56(%eax), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - sbbl %ecx, -136(%ebp) # 4-byte Folded Spill - movl 60(%eax), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - sbbl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 64(%eax), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - movl -172(%ebp), %edi # 4-byte Reload - sbbl %ecx, %edi - movl 68(%eax), %ecx - movl %ecx, -212(%ebp) # 4-byte Spill - sbbl %ecx, -140(%ebp) # 4-byte Folded Spill - movl 72(%eax), %ecx - movl %ecx, -216(%ebp) # 4-byte Spill - sbbl %ecx, -112(%ebp) # 4-byte Folded Spill - movl 76(%eax), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - sbbl %ecx, -132(%ebp) # 4-byte Folded Spill - movl 80(%eax), %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - sbbl %ecx, -120(%ebp) # 4-byte Folded Spill - movl 84(%eax), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, -116(%ebp) # 4-byte Folded Spill - movl 88(%eax), %ecx - movl %ecx, -184(%ebp) # 4-byte Spill - sbbl %ecx, -128(%ebp) # 4-byte Folded Spill - movl 92(%eax), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - sbbl %ecx, -124(%ebp) # 4-byte Folded Spill - sbbl $0, %esi - addl -148(%ebp), %edx # 4-byte Folded Reload - adcl -152(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 24(%eax) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -156(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 28(%eax) - movl -144(%ebp), %edx # 4-byte Reload - adcl -160(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 32(%eax) - adcl -164(%ebp), %edi # 4-byte Folded Reload - movl %edx, 36(%eax) - movl -140(%ebp), %edx # 4-byte Reload - adcl -168(%ebp), %edx # 4-byte Folded Reload - movl %edi, 40(%eax) - movl -112(%ebp), %ecx # 4-byte Reload - adcl -192(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 44(%eax) - movl -132(%ebp), %edi # 4-byte Reload - adcl -196(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 48(%eax) - movl -120(%ebp), %edx # 4-byte Reload - adcl -200(%ebp), %edx # 4-byte Folded Reload - movl %edi, 52(%eax) - movl -116(%ebp), %ecx # 4-byte Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 56(%eax) - movl -128(%ebp), %edx # 4-byte Reload - adcl -208(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - movl -124(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl -216(%ebp), %esi # 4-byte Folded Reload - movl %ecx, 68(%eax) - movl %esi, 72(%eax) - movl -172(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 76(%eax) - movl -176(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 80(%eax) - movl -180(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 84(%eax) - movl -184(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 88(%eax) - movl -188(%ebp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 92(%eax) - addl $220, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end177: - .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L - - .globl mcl_fp_mont12L - .align 16, 0x90 - .type mcl_fp_mont12L,@function -mcl_fp_mont12L: # @mcl_fp_mont12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1436, %esp # imm = 0x59C - calll .L178$pb -.L178$pb: - popl %ebx -.Ltmp29: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx - movl 1468(%esp), %eax - movl -4(%eax), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1384(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 1384(%esp), %ebp - movl 1388(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1432(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1428(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1424(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1420(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1412(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1408(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1404(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1400(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1396(%esp), %edi - movl 1392(%esp), %esi - movl %eax, (%esp) - leal 1328(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - addl 1328(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1336(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - adcl 1340(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1372(%esp), %esi - movl 92(%esp), %ebp # 4-byte Reload - adcl 1376(%esp), %ebp - sbbl %edi, %edi - movl 1464(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl 84(%esp), %ecx # 4-byte Reload - addl 1272(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1312(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1316(%esp), %ebp - adcl 1320(%esp), %edi - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 84(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1216(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1224(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1244(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1248(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1252(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1260(%esp), %ebp - adcl 1264(%esp), %edi - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1160(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1160(%esp), %ecx - adcl 1164(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1200(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl 1204(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1104(%esp), %ecx - movl 1468(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1104(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1140(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1148(%esp), %edi - movl 84(%esp), %ebp # 4-byte Reload - adcl 1152(%esp), %ebp - adcl $0, %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1048(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1048(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1080(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - adcl 1092(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %ebp - movl %ebp, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %esi - movl %esi, %eax - addl 992(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 1004(%esp), %ebp - movl 52(%esp), %ecx # 4-byte Reload - adcl 1008(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1020(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1028(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1032(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl 1464(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 936(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 944(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 948(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 960(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 980(%esp), %esi - adcl 984(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl %edi, %ecx - movl 56(%esp), %eax # 4-byte Reload - addl 880(%esp), %eax - movl 48(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 892(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 912(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 924(%esp), %esi - movl %esi, %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 824(%esp), %ecx - movl 44(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 840(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 864(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 768(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 768(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 780(%esp), %ebp - adcl 784(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 800(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1460(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - movl 44(%esp), %eax # 4-byte Reload - addl 712(%esp), %eax - movl 52(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 720(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 724(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 732(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 736(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 740(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 744(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 748(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 752(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 760(%esp), %edi - sbbl %ebp, %ebp - movl %eax, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 656(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %eax - addl 656(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 660(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 664(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 672(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 676(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 704(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl 1464(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 600(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 52(%esp), %ecx # 4-byte Reload - addl 600(%esp), %ecx - adcl 604(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 616(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 620(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 636(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 648(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 44(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 544(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 548(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 552(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 560(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 568(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 72(%esp), %ecx # 4-byte Reload - adcl 576(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 580(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 584(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 592(%esp), %ebp - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 488(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 512(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 536(%esp), %ebp - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %edi - movl %edi, %ecx - addl 432(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 440(%esp), %edi - movl 80(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 480(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 376(%esp), %ecx - adcl 380(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 392(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 320(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 76(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 320(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - adcl 336(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 344(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 360(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 264(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 272(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 284(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 296(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 40(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - movl 88(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 208(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 224(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 236(%esp), %edi - adcl 240(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 248(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1464(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 152(%esp), %ecx - movl 1460(%esp), %edx - calll .LmulPv384x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 152(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 164(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 176(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 40(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 96(%esp), %ecx - movl 1468(%esp), %edx - calll .LmulPv384x32 - andl $1, %esi - addl 96(%esp), %edi - movl 84(%esp), %ebx # 4-byte Reload - movl 92(%esp), %eax # 4-byte Reload - movl 72(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %edx, %edi - adcl 108(%esp), %ebx - adcl 112(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 140(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %edx, %ebp - adcl $0, %esi - movl 1468(%esp), %edx - subl (%edx), %eax - sbbl 4(%edx), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 8(%edx), %edi - movl %edi, 20(%esp) # 4-byte Spill - sbbl 12(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 20(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 24(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl 44(%esp), %edi # 4-byte Reload - sbbl 32(%edx), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 36(%edx), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 40(%edx), %edi - movl %edi, 84(%esp) # 4-byte Spill - sbbl 44(%edx), %ebp - movl %ebp, %edx - sbbl $0, %esi - andl $1, %esi - jne .LBB178_2 -# BB#1: - movl %ecx, 52(%esp) # 4-byte Spill -.LBB178_2: - movl %esi, %ecx - testb %cl, %cl - movl 92(%esp), %ecx # 4-byte Reload - jne .LBB178_4 -# BB#3: - movl %eax, %ecx -.LBB178_4: - movl 1456(%esp), %eax - movl %ecx, (%eax) - movl 68(%esp), %edi # 4-byte Reload - jne .LBB178_6 -# BB#5: - movl 16(%esp), %edi # 4-byte Reload -.LBB178_6: - movl %edi, 4(%eax) - movl 64(%esp), %ebp # 4-byte Reload - jne .LBB178_8 -# BB#7: - movl 20(%esp), %ebx # 4-byte Reload -.LBB178_8: - movl %ebx, 8(%eax) - jne .LBB178_10 -# BB#9: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 72(%esp) # 4-byte Spill -.LBB178_10: - movl 72(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - jne .LBB178_12 -# BB#11: - movl 28(%esp), %ebp # 4-byte Reload -.LBB178_12: - movl %ebp, 16(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB178_14 -# BB#13: - movl 32(%esp), %ecx # 4-byte Reload -.LBB178_14: - movl %ecx, 20(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB178_16 -# BB#15: - movl 36(%esp), %ecx # 4-byte Reload -.LBB178_16: - movl %ecx, 24(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB178_18 -# BB#17: - movl 40(%esp), %ecx # 4-byte Reload -.LBB178_18: - movl %ecx, 32(%eax) - movl 60(%esp), %ecx # 4-byte Reload - jne .LBB178_20 -# BB#19: - movl 80(%esp), %ecx # 4-byte Reload -.LBB178_20: - movl %ecx, 36(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB178_22 -# BB#21: - movl 84(%esp), %ecx # 4-byte Reload -.LBB178_22: - movl %ecx, 40(%eax) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB178_24 -# BB#23: - movl %edx, %ecx -.LBB178_24: - movl %ecx, 44(%eax) - addl $1436, %esp # imm = 0x59C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end178: - .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L - - .globl mcl_fp_montNF12L - .align 16, 0x90 - .type mcl_fp_montNF12L,@function -mcl_fp_montNF12L: # @mcl_fp_montNF12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1420, %esp # imm = 0x58C - calll .L179$pb -.L179$pb: - popl %ebx -.Ltmp30: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx - movl 1452(%esp), %eax - movl -4(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1368(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1368(%esp), %ebp - movl 1372(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 1416(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1412(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1408(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1404(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1400(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1396(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1392(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1388(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1384(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1380(%esp), %edi - movl 1376(%esp), %esi - movl %eax, (%esp) - leal 1312(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 1312(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1320(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - adcl 1324(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 1344(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1356(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 1360(%esp), %ebp - movl 1448(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1256(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1304(%esp), %eax - movl 56(%esp), %edx # 4-byte Reload - addl 1256(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1260(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1264(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1268(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1272(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1280(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1284(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1296(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 1300(%esp), %ebp - adcl $0, %eax - movl %eax, %edi - movl %edx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 1200(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 1208(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1244(%esp), %ebp - adcl 1248(%esp), %edi - movl 1448(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1192(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1144(%esp), %edx - adcl 1148(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1152(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1160(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1168(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1180(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1184(%esp), %ebp - adcl 1188(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1088(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 1088(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl %esi, %edi - adcl 1104(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1124(%esp), %esi - adcl 1128(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1136(%esp), %ebp - movl 1448(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1032(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 1080(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 1032(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 1044(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1056(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1064(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1076(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl %edx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 976(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1004(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1024(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 968(%esp), %eax - movl 40(%esp), %edx # 4-byte Reload - addl 920(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - adcl 924(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 928(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 936(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 944(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 952(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 956(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 960(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 964(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 864(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 864(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 912(%esp), %edi - movl 1448(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 808(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 856(%esp), %edx - movl 36(%esp), %ecx # 4-byte Reload - addl 808(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 824(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 828(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 832(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 752(%esp), %esi - movl 32(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 760(%esp), %edi - movl 44(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 776(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 792(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1448(%esp), %ecx - movl %ecx, %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1444(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - movl 744(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - adcl 700(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - adcl 704(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 708(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - movl 76(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 724(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 728(%esp), %edi - adcl 732(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 740(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl %eax, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 640(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 648(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 660(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 668(%esp), %esi - adcl 672(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - movl 40(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 584(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 632(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 584(%esp), %ecx - adcl 588(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 596(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 608(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 616(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 620(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 528(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 528(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 540(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 564(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 568(%esp), %edi - movl 32(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 48(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 1448(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 520(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - addl 472(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 508(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - adcl 512(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 416(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 440(%esp), %ebp - movl 52(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 408(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 360(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 372(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 304(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 312(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 320(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 328(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 40(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 36(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 296(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 248(%esp), %ecx - adcl 252(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 260(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 272(%esp), %ebp - movl 36(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 1452(%esp), %edx - calll .LmulPv384x32 - addl 192(%esp), %esi - adcl 196(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 200(%esp), %edi - movl 68(%esp), %esi # 4-byte Reload - adcl 204(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 216(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - adcl 224(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1448(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 1444(%esp), %edx - calll .LmulPv384x32 - movl 184(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 136(%esp), %ecx - adcl 140(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - adcl 144(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 152(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 160(%esp), %edi - adcl 164(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 168(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 80(%esp), %ecx - movl 1452(%esp), %eax - movl %eax, %edx - calll .LmulPv384x32 - addl 80(%esp), %esi - movl 56(%esp), %esi # 4-byte Reload - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 92(%esp), %esi - movl 52(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 100(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl 104(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 112(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, %edx - movl 1452(%esp), %ebp - subl (%ebp), %edx - movl %ecx, %eax - sbbl 4(%ebp), %eax - movl %esi, %ebx - sbbl 8(%ebp), %ebx - movl 52(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl 40(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - sbbl 28(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - sbbl 32(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - sbbl 36(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 44(%ebp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %edi, %ebp - sarl $31, %ebp - testl %ebp, %ebp - movl 76(%esp), %ebp # 4-byte Reload - js .LBB179_2 -# BB#1: - movl %edx, %ebp -.LBB179_2: - movl 1440(%esp), %edx - movl %ebp, (%edx) - movl 68(%esp), %edi # 4-byte Reload - js .LBB179_4 -# BB#3: - movl %eax, %edi -.LBB179_4: - movl %edi, 4(%edx) - js .LBB179_6 -# BB#5: - movl %ebx, %esi -.LBB179_6: - movl %esi, 8(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB179_8 -# BB#7: - movl %ecx, %eax -.LBB179_8: - movl %eax, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB179_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB179_10: - movl %eax, 16(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB179_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB179_12: - movl %eax, 20(%edx) - movl 32(%esp), %eax # 4-byte Reload - js .LBB179_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB179_14: - movl %eax, 24(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB179_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB179_16: - movl %eax, 28(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB179_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB179_18: - movl %eax, 32(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB179_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB179_20: - movl %eax, 36(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB179_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB179_22: - movl %eax, 40(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB179_24 -# BB#23: - movl 56(%esp), %eax # 4-byte Reload -.LBB179_24: - movl %eax, 44(%edx) - addl $1420, %esp # imm = 0x58C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end179: - .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L - - .globl mcl_fp_montRed12L - .align 16, 0x90 - .type mcl_fp_montRed12L,@function -mcl_fp_montRed12L: # @mcl_fp_montRed12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $828, %esp # imm = 0x33C - calll .L180$pb -.L180$pb: - popl %eax -.Ltmp31: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 856(%esp), %edx - movl -4(%edx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 852(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 100(%esp) # 4-byte Spill - imull %esi, %ebx - movl 92(%ecx), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 68(%ecx), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 48(%ecx), %edi - movl %edi, 152(%esp) # 4-byte Spill - movl 44(%ecx), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 32(%ecx), %edi - movl 28(%ecx), %esi - movl 24(%ecx), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 776(%esp), %ecx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - movl 88(%esp), %eax # 4-byte Reload - addl 776(%esp), %eax - movl 100(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 796(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 804(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl 808(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 136(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 720(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - andl $1, %ebp - movl %ebp, %ecx - addl 720(%esp), %esi - movl 76(%esp), %ebp # 4-byte Reload - adcl 724(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 752(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl %ebp, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 664(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 692(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 696(%esp), %ebp - movl 132(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 144(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 608(%esp), %esi - movl 68(%esp), %esi # 4-byte Reload - adcl 612(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 636(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl 108(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 552(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 496(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - movl 136(%esp), %edi # 4-byte Reload - adcl 532(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 440(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl 468(%esp), %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl 472(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %esi # 4-byte Reload - adcl 476(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 856(%esp), %eax - movl %eax, %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 384(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 388(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 400(%esp), %ebp - movl 152(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl 416(%esp), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 140(%esp), %esi # 4-byte Reload - adcl 424(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 76(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - movl 100(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl 112(%esp), %ecx # 4-byte Reload - adcl 336(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 340(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 352(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 356(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 360(%esp), %ebp - adcl 364(%esp), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 76(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %eax, %esi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 272(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl 132(%esp), %ecx # 4-byte Reload - adcl 280(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 288(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 296(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 300(%esp), %esi - movl 140(%esp), %ecx # 4-byte Reload - adcl 304(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 312(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, %ebp - movl %eax, %edi - imull 84(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 216(%esp), %edi - movl 132(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl 152(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 240(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 248(%esp), %esi - movl 144(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl 84(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 856(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv384x32 - addl 160(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl %eax, %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ebx # 4-byte Reload - adcl 176(%esp), %ebx - movl %ebx, 148(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 188(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %ebp - subl 24(%esp), %edi # 4-byte Folded Reload - movl 156(%esp), %esi # 4-byte Reload - sbbl 16(%esp), %esi # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - sbbl 28(%esp), %ebx # 4-byte Folded Reload - sbbl 32(%esp), %ecx # 4-byte Folded Reload - movl 140(%esp), %eax # 4-byte Reload - sbbl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - sbbl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - sbbl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 132(%esp) # 4-byte Spill - sbbl $0, %ebp - andl $1, %ebp - jne .LBB180_2 -# BB#1: - movl %ebx, 148(%esp) # 4-byte Spill -.LBB180_2: - movl %ebp, %ebx - testb %bl, %bl - movl 152(%esp), %ebx # 4-byte Reload - jne .LBB180_4 -# BB#3: - movl %edi, %ebx -.LBB180_4: - movl 848(%esp), %edi - movl %ebx, (%edi) - movl 144(%esp), %ebx # 4-byte Reload - jne .LBB180_6 -# BB#5: - movl %esi, 156(%esp) # 4-byte Spill -.LBB180_6: - movl 156(%esp), %esi # 4-byte Reload - movl %esi, 4(%edi) - movl 136(%esp), %esi # 4-byte Reload - jne .LBB180_8 -# BB#7: - movl %edx, %esi -.LBB180_8: - movl %esi, 8(%edi) - movl 148(%esp), %edx # 4-byte Reload - movl %edx, 12(%edi) - movl 128(%esp), %esi # 4-byte Reload - movl 116(%esp), %edx # 4-byte Reload - jne .LBB180_10 -# BB#9: - movl %ecx, %edx -.LBB180_10: - movl %edx, 16(%edi) - movl 120(%esp), %edx # 4-byte Reload - movl 140(%esp), %ecx # 4-byte Reload - jne .LBB180_12 -# BB#11: - movl 84(%esp), %ecx # 4-byte Reload -.LBB180_12: - movl %ecx, 20(%edi) - movl 108(%esp), %ecx # 4-byte Reload - movl 124(%esp), %eax # 4-byte Reload - jne .LBB180_14 -# BB#13: - movl 88(%esp), %eax # 4-byte Reload -.LBB180_14: - movl %eax, 24(%edi) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB180_16 -# BB#15: - movl 92(%esp), %ebx # 4-byte Reload -.LBB180_16: - movl %ebx, 28(%edi) - jne .LBB180_18 -# BB#17: - movl 96(%esp), %esi # 4-byte Reload -.LBB180_18: - movl %esi, 32(%edi) - jne .LBB180_20 -# BB#19: - movl 100(%esp), %edx # 4-byte Reload -.LBB180_20: - movl %edx, 36(%edi) - jne .LBB180_22 -# BB#21: - movl 112(%esp), %ecx # 4-byte Reload -.LBB180_22: - movl %ecx, 40(%edi) - jne .LBB180_24 -# BB#23: - movl 132(%esp), %eax # 4-byte Reload -.LBB180_24: - movl %eax, 44(%edi) - addl $828, %esp # imm = 0x33C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end180: - .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L - - .globl mcl_fp_addPre12L - .align 16, 0x90 - .type mcl_fp_addPre12L,@function -mcl_fp_addPre12L: # @mcl_fp_addPre12L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl %edx, 36(%ebx) - movl %esi, 40(%ebx) - movl 44(%eax), %eax - movl 44(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 44(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end181: - .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L - - .globl mcl_fp_subPre12L - .align 16, 0x90 - .type mcl_fp_subPre12L,@function -mcl_fp_subPre12L: # @mcl_fp_subPre12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 36(%ebp) - movl %edi, 40(%ebp) - movl 44(%edx), %edx - movl 44(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 44(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end182: - .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L - - .globl mcl_fp_shr1_12L - .align 16, 0x90 - .type mcl_fp_shr1_12L,@function -mcl_fp_shr1_12L: # @mcl_fp_shr1_12L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 40(%ecx) - shrl %eax - movl %eax, 44(%ecx) - popl %esi - retl -.Lfunc_end183: - .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L - - .globl mcl_fp_add12L - .align 16, 0x90 - .type mcl_fp_add12L,@function -mcl_fp_add12L: # @mcl_fp_add12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $36, %esp - movl 64(%esp), %ebx - movl (%ebx), %edx - movl 4(%ebx), %ecx - movl 60(%esp), %eax - addl (%eax), %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 4(%eax), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 8(%ebx), %ecx - adcl 8(%eax), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 12(%eax), %edx - movl 16(%eax), %ecx - adcl 12(%ebx), %edx - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%ebx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 20(%eax), %ecx - adcl 20(%ebx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 24(%eax), %ecx - adcl 24(%ebx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 28(%eax), %ecx - adcl 28(%ebx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 32(%eax), %ebp - adcl 32(%ebx), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 36(%eax), %edi - adcl 36(%ebx), %edi - movl 40(%eax), %esi - adcl 40(%ebx), %esi - movl 44(%eax), %edx - adcl 44(%ebx), %edx - movl 56(%esp), %ebx - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebx) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%ebx) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%ebx) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%ebx) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%ebx) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%ebx) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%ebx) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%ebx) - movl %ebp, 32(%ebx) - movl %edi, 36(%ebx) - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) - sbbl %ecx, %ecx - andl $1, %ecx - movl 68(%esp), %ebp - subl (%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 4(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 8(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 12(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 16(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 20(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 24(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - sbbl 28(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl (%esp), %eax # 4-byte Reload - sbbl 32(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 36(%ebp), %edi - sbbl 40(%ebp), %esi - sbbl 44(%ebp), %edx - sbbl $0, %ecx - testb $1, %cl - jne .LBB184_2 -# BB#1: # %nocarry - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebx) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebx) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebx) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebx) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebx) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebx) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebx) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebx) - movl (%esp), %eax # 4-byte Reload - movl %eax, 32(%ebx) - movl %edi, 36(%ebx) - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) -.LBB184_2: # %carry - addl $36, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end184: - .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L - - .globl mcl_fp_addNF12L - .align 16, 0x90 - .type mcl_fp_addNF12L,@function -mcl_fp_addNF12L: # @mcl_fp_addNF12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - movl 112(%esp), %edx - addl (%edx), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 4(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 44(%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 40(%esi), %ebp - movl 36(%esi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 32(%esi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 28(%esi), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 24(%esi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 20(%esi), %ebx - movl 16(%esi), %edi - movl 12(%esi), %ecx - movl 8(%esi), %eax - adcl 8(%edx), %eax - adcl 12(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 52(%esp) # 4-byte Spill - adcl 20(%edx), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%edx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 28(%edx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 32(%edx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 36(%edx), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl %eax, %esi - adcl 40(%edx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 44(%edx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 120(%esp), %ebp - movl 60(%esp), %edx # 4-byte Reload - subl (%ebp), %edx - movl 64(%esp), %eax # 4-byte Reload - sbbl 4(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - sbbl 8(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 12(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%ebp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 24(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 40(%ebp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 44(%ebp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl %edi, %ebp - movl 60(%esp), %edi # 4-byte Reload - sarl $31, %ebp - testl %ebp, %ebp - js .LBB185_2 -# BB#1: - movl %edx, %edi -.LBB185_2: - movl 108(%esp), %edx - movl %edi, (%edx) - movl 64(%esp), %edi # 4-byte Reload - js .LBB185_4 -# BB#3: - movl (%esp), %edi # 4-byte Reload -.LBB185_4: - movl %edi, 4(%edx) - movl %eax, %ebp - js .LBB185_6 -# BB#5: - movl 4(%esp), %esi # 4-byte Reload -.LBB185_6: - movl %esi, 8(%edx) - movl %ecx, %esi - movl 52(%esp), %eax # 4-byte Reload - movl 48(%esp), %ecx # 4-byte Reload - js .LBB185_8 -# BB#7: - movl 8(%esp), %ecx # 4-byte Reload -.LBB185_8: - movl %ecx, 12(%edx) - movl 76(%esp), %ebx # 4-byte Reload - movl 84(%esp), %edi # 4-byte Reload - js .LBB185_10 -# BB#9: - movl 12(%esp), %eax # 4-byte Reload -.LBB185_10: - movl %eax, 16(%edx) - movl 80(%esp), %ecx # 4-byte Reload - js .LBB185_12 -# BB#11: - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 56(%esp) # 4-byte Spill -.LBB185_12: - movl 56(%esp), %eax # 4-byte Reload - movl %eax, 20(%edx) - js .LBB185_14 -# BB#13: - movl 20(%esp), %ebp # 4-byte Reload -.LBB185_14: - movl %ebp, 24(%edx) - js .LBB185_16 -# BB#15: - movl 24(%esp), %edi # 4-byte Reload -.LBB185_16: - movl %edi, 28(%edx) - js .LBB185_18 -# BB#17: - movl 28(%esp), %ebx # 4-byte Reload -.LBB185_18: - movl %ebx, 32(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB185_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB185_20: - movl %eax, 36(%edx) - js .LBB185_22 -# BB#21: - movl 36(%esp), %esi # 4-byte Reload -.LBB185_22: - movl %esi, 40(%edx) - js .LBB185_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB185_24: - movl %ecx, 44(%edx) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end185: - .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L - - .globl mcl_fp_sub12L - .align 16, 0x90 - .type mcl_fp_sub12L,@function -mcl_fp_sub12L: # @mcl_fp_sub12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 64(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - xorl %ebx, %ebx - movl 68(%esp), %edi - subl (%edi), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 28(%esi), %edx - sbbl 28(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 32(%esi), %ecx - sbbl 32(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 40(%esi), %ebp - sbbl 40(%edi), %ebp - movl 44(%esi), %esi - sbbl 44(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 60(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl %edx, 28(%ebx) - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl %ebp, 40(%ebx) - movl %esi, 44(%ebx) - je .LBB186_2 -# BB#1: # %carry - movl %esi, %edi - movl 72(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 24(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 32(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 16(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl %eax, 36(%ebx) - movl 40(%esi), %eax - adcl %ebp, %eax - movl %eax, 40(%ebx) - movl 44(%esi), %eax - adcl %edi, %eax - movl %eax, 44(%ebx) -.LBB186_2: # %nocarry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end186: - .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L - - .globl mcl_fp_subNF12L - .align 16, 0x90 - .type mcl_fp_subNF12L,@function -mcl_fp_subNF12L: # @mcl_fp_subNF12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $72, %esp - movl 96(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 100(%esp), %edi - subl (%edi), %edx - movl %edx, 48(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 32(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 36(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - sarl $31, %eax - movl %eax, %edx - addl %edx, %edx - movl %eax, %edi - adcl %edi, %edi - movl %eax, %ebp - adcl %ebp, %ebp - movl %eax, %esi - adcl %esi, %esi - shrl $31, %ecx - orl %edx, %ecx - movl 104(%esp), %edx - andl 12(%edx), %esi - movl %esi, 8(%esp) # 4-byte Spill - andl 8(%edx), %ebp - andl 4(%edx), %edi - andl (%edx), %ecx - movl 44(%edx), %esi - andl %eax, %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 40(%edx), %esi - andl %eax, %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 36(%edx), %esi - andl %eax, %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 32(%edx), %esi - andl %eax, %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 28(%edx), %esi - andl %eax, %esi - movl %esi, (%esp) # 4-byte Spill - movl 24(%edx), %ebx - andl %eax, %ebx - movl 20(%edx), %esi - andl %eax, %esi - andl 16(%edx), %eax - addl 48(%esp), %ecx # 4-byte Folded Reload - adcl 52(%esp), %edi # 4-byte Folded Reload - movl 92(%esp), %edx - movl %ecx, (%edx) - adcl 24(%esp), %ebp # 4-byte Folded Reload - movl %edi, 4(%edx) - movl 8(%esp), %ecx # 4-byte Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 8(%edx) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 12(%edx) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %eax, 16(%edx) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %esi, 20(%edx) - movl (%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 24(%edx) - movl 4(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%edx) - movl 12(%esp), %ecx # 4-byte Reload - adcl 64(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edx) - movl 16(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edx) - movl %eax, 40(%edx) - movl 20(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%edx) - addl $72, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end187: - .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L - - .globl mcl_fpDbl_add12L - .align 16, 0x90 - .type mcl_fpDbl_add12L,@function -mcl_fpDbl_add12L: # @mcl_fpDbl_add12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 116(%esp), %ecx - movl 112(%esp), %edi - movl 12(%edi), %esi - movl 16(%edi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edi), %ebp - movl 108(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edi), %ebp - adcl 8(%edi), %ebx - adcl 12(%ecx), %esi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 56(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %esi, 12(%eax) - movl 20(%edi), %esi - adcl %ebx, %esi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%edi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %esi, 20(%eax) - movl 28(%edi), %esi - adcl %ebx, %esi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%edi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %esi, 28(%eax) - movl 36(%edi), %esi - adcl %ebx, %esi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%edi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %esi, 36(%eax) - movl 44(%edi), %esi - adcl %ebx, %esi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%edi), %edx - adcl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 52(%ecx), %ebx - movl %esi, 44(%eax) - movl 52(%edi), %eax - adcl %ebx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 56(%edi), %eax - adcl %ebp, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl 60(%edi), %edx - adcl %eax, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl 64(%edi), %edx - adcl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl 68(%edi), %edx - adcl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl 72(%edi), %edx - adcl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl 76(%edi), %edx - adcl %eax, %edx - movl 80(%ecx), %esi - movl 80(%edi), %eax - adcl %esi, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 84(%ecx), %ebx - movl 84(%edi), %esi - adcl %ebx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 88(%ecx), %ebp - movl 88(%edi), %ebx - adcl %ebp, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 92(%ecx), %ecx - movl 92(%edi), %edi - adcl %ecx, %edi - movl %edi, 44(%esp) # 4-byte Spill - sbbl %ecx, %ecx - andl $1, %ecx - movl 120(%esp), %ebp - movl 72(%esp), %edi # 4-byte Reload - subl (%ebp), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 4(%ebp), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 8(%ebp), %edi - movl %edi, 20(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - sbbl 12(%ebp), %edi - movl %edi, 16(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - sbbl 16(%ebp), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 20(%ebp), %edi - movl %edi, 8(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 24(%ebp), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl %edx, %edi - sbbl 28(%ebp), %edi - movl %edi, (%esp) # 4-byte Spill - sbbl 32(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill - sbbl 36(%ebp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl %ebx, %eax - movl 44(%esp), %ebx # 4-byte Reload - sbbl 40(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %edi - sbbl 44(%ebp), %edi - sbbl $0, %ecx - andl $1, %ecx - jne .LBB188_2 -# BB#1: - movl %edi, %ebx -.LBB188_2: - testb %cl, %cl - movl 72(%esp), %ecx # 4-byte Reload - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB188_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 28(%esp), %ecx # 4-byte Reload -.LBB188_4: - movl 108(%esp), %eax - movl %ecx, 48(%eax) - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 80(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl %ebp, 64(%eax) - movl %edi, 68(%eax) - movl %esi, 72(%eax) - movl %edx, 76(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl 48(%esp), %edx # 4-byte Reload - jne .LBB188_6 -# BB#5: - movl 32(%esp), %edx # 4-byte Reload -.LBB188_6: - movl %edx, 80(%eax) - movl 52(%esp), %edx # 4-byte Reload - jne .LBB188_8 -# BB#7: - movl 36(%esp), %edx # 4-byte Reload -.LBB188_8: - movl %edx, 84(%eax) - jne .LBB188_10 -# BB#9: - movl 40(%esp), %ecx # 4-byte Reload -.LBB188_10: - movl %ecx, 88(%eax) - movl %ebx, 92(%eax) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end188: - .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L - - .globl mcl_fpDbl_sub12L - .align 16, 0x90 - .type mcl_fpDbl_sub12L,@function -mcl_fpDbl_sub12L: # @mcl_fpDbl_sub12L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $76, %esp - movl 100(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %edx - movl 104(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%esi), %edi - sbbl 8(%ebx), %edi - movl 96(%esp), %ecx - movl %eax, (%ecx) - movl 12(%esi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%esi), %edx - sbbl 16(%ebx), %edx - movl %edi, 8(%ecx) - movl 20(%ebx), %edi - movl %eax, 12(%ecx) - movl 20(%esi), %eax - sbbl %edi, %eax - movl 24(%ebx), %edi - movl %edx, 16(%ecx) - movl 24(%esi), %edx - sbbl %edi, %edx - movl 28(%ebx), %edi - movl %eax, 20(%ecx) - movl 28(%esi), %eax - sbbl %edi, %eax - movl 32(%ebx), %edi - movl %edx, 24(%ecx) - movl 32(%esi), %edx - sbbl %edi, %edx - movl 36(%ebx), %edi - movl %eax, 28(%ecx) - movl 36(%esi), %eax - sbbl %edi, %eax - movl 40(%ebx), %edi - movl %edx, 32(%ecx) - movl 40(%esi), %edx - sbbl %edi, %edx - movl 44(%ebx), %edi - movl %eax, 36(%ecx) - movl 44(%esi), %eax - sbbl %edi, %eax - movl 48(%ebx), %edi - movl %edx, 40(%ecx) - movl 48(%esi), %edx - sbbl %edi, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 52(%ebx), %edx - movl %eax, 44(%ecx) - movl 52(%esi), %eax - sbbl %edx, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl 56(%esi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%esi), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 64(%ebx), %eax - movl 64(%esi), %edx - sbbl %eax, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 68(%ebx), %eax - movl 68(%esi), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebx), %eax - movl 72(%esi), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebx), %eax - movl 76(%esi), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 80(%ebx), %eax - movl 80(%esi), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 84(%ebx), %eax - movl 84(%esi), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%ebx), %eax - movl 88(%esi), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 92(%ebx), %eax - movl 92(%esi), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 108(%esp), %ebp - jne .LBB189_1 -# BB#2: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB189_3 -.LBB189_1: - movl 44(%ebp), %edx - movl %edx, 36(%esp) # 4-byte Spill -.LBB189_3: - testb %al, %al - jne .LBB189_4 -# BB#5: - movl $0, 12(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB189_6 -.LBB189_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB189_6: - jne .LBB189_7 -# BB#8: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB189_9 -.LBB189_7: - movl 40(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB189_9: - jne .LBB189_10 -# BB#11: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB189_12 -.LBB189_10: - movl 36(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB189_12: - jne .LBB189_13 -# BB#14: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB189_15 -.LBB189_13: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB189_15: - jne .LBB189_16 -# BB#17: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB189_18 -.LBB189_16: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB189_18: - jne .LBB189_19 -# BB#20: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB189_21 -.LBB189_19: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB189_21: - jne .LBB189_22 -# BB#23: - movl $0, %ebx - jmp .LBB189_24 -.LBB189_22: - movl 20(%ebp), %ebx -.LBB189_24: - jne .LBB189_25 -# BB#26: - movl $0, %eax - jmp .LBB189_27 -.LBB189_25: - movl 16(%ebp), %eax -.LBB189_27: - jne .LBB189_28 -# BB#29: - movl %ebp, %edx - movl $0, %ebp - jmp .LBB189_30 -.LBB189_28: - movl %ebp, %edx - movl 12(%edx), %ebp -.LBB189_30: - jne .LBB189_31 -# BB#32: - xorl %edx, %edx - jmp .LBB189_33 -.LBB189_31: - movl 8(%edx), %edx -.LBB189_33: - addl 32(%esp), %esi # 4-byte Folded Reload - movl 12(%esp), %edi # 4-byte Reload - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %esi, 48(%ecx) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edi, 52(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %edx, 56(%ecx) - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ebp, 60(%ecx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ebx, 68(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl %eax, 88(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%ecx) - addl $76, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end189: - .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L - - .align 16, 0x90 - .type .LmulPv416x32,@function -.LmulPv416x32: # @mulPv416x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl %edx, %edi - movl 108(%esp), %ebp - movl %ebp, %eax - mull 48(%edi) - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebp, %eax - mull 44(%edi) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebp, %eax - mull 40(%edi) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebp, %eax - mull 36(%edi) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebp, %eax - mull 32(%edi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebp, %eax - mull 28(%edi) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebp, %eax - mull 24(%edi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebp, %eax - mull 20(%edi) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebp, %eax - mull 16(%edi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebp, %eax - mull 12(%edi) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebp, %eax - mull 8(%edi) - movl %edx, %esi - movl %eax, 4(%esp) # 4-byte Spill - movl %ebp, %eax - mull 4(%edi) - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl %ebp, %eax - mull (%edi) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%ecx) - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - movl 84(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 52(%ecx) - movl %ecx, %eax - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end190: - .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32 - - .globl mcl_fp_mulUnitPre13L - .align 16, 0x90 - .type mcl_fp_mulUnitPre13L,@function -mcl_fp_mulUnitPre13L: # @mcl_fp_mulUnitPre13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - calll .L191$pb -.L191$pb: - popl %ebx -.Ltmp32: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx - movl 136(%esp), %eax - movl %eax, (%esp) - leal 48(%esp), %ecx - movl 132(%esp), %edx - calll .LmulPv416x32 - movl 100(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 68(%esp), %ebp - movl 64(%esp), %ebx - movl 60(%esp), %edi - movl 56(%esp), %esi - movl 48(%esp), %edx - movl 52(%esp), %ecx - movl 128(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end191: - .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L - - .globl mcl_fpDbl_mulPre13L - .align 16, 0x90 - .type mcl_fpDbl_mulPre13L,@function -mcl_fpDbl_mulPre13L: # @mcl_fpDbl_mulPre13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $844, %esp # imm = 0x34C - calll .L192$pb -.L192$pb: - popl %edi -.Ltmp33: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 868(%esp), %edx - movl %edx, %esi - movl %edi, %ebx - calll .LmulPv416x32 - movl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 784(%esp), %eax - movl 788(%esp), %ebp - movl 864(%esp), %ecx - movl %eax, (%ecx) - movl 872(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 728(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv416x32 - addl 728(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 780(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 748(%esp), %edi - movl 744(%esp), %esi - movl 740(%esp), %edx - movl 732(%esp), %eax - movl 736(%esp), %ecx - movl 864(%esp), %ebp - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %eax # 4-byte Reload - addl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 724(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 720(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 716(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 712(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 708(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 696(%esp), %ebx - movl 692(%esp), %edi - movl 688(%esp), %esi - movl 684(%esp), %edx - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 656(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 648(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 644(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 640(%esp), %ebx - movl 636(%esp), %edi - movl 632(%esp), %esi - movl 628(%esp), %edx - movl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 624(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 560(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 584(%esp), %ebx - movl 580(%esp), %edi - movl 576(%esp), %esi - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 864(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 528(%esp), %ebx - movl 524(%esp), %edi - movl 520(%esp), %esi - movl 516(%esp), %edx - movl 508(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 512(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 448(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 472(%esp), %ebp - movl 468(%esp), %edi - movl 464(%esp), %esi - movl 460(%esp), %edx - movl 452(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 456(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 444(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %ebx - movl 412(%esp), %edi - movl 408(%esp), %esi - movl 404(%esp), %edx - movl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 400(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 336(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 360(%esp), %ebp - movl 356(%esp), %edi - movl 352(%esp), %esi - movl 348(%esp), %edx - movl 340(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 224(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %edi - movl 240(%esp), %esi - movl 236(%esp), %edx - movl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 232(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 60(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 872(%esp), %edi - movl 44(%edi), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 868(%esp), %eax - movl %eax, %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %esi # 4-byte Reload - addl 168(%esp), %esi - movl 220(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 196(%esp), %ebp - movl 192(%esp), %ebx - movl 188(%esp), %edi - movl 184(%esp), %edx - movl 180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 176(%esp), %ecx - movl 864(%esp), %eax - movl %esi, 44(%eax) - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 104(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 872(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 868(%esp), %edx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 112(%esp), %esi - movl %esi, %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 136(%esp), %ebx - movl 132(%esp), %esi - movl 128(%esp), %edx - movl 124(%esp), %ecx - movl 864(%esp), %eax - movl %ebp, 48(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %edi, 56(%eax) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl 104(%esp), %ebx # 4-byte Folded Reload - movl %esi, 68(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 72(%eax) - movl 60(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl %edx, 80(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %ecx, 84(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl %ecx, 92(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 100(%eax) - addl $844, %esp # imm = 0x34C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end192: - .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L - - .globl mcl_fpDbl_sqrPre13L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre13L,@function -mcl_fpDbl_sqrPre13L: # @mcl_fpDbl_sqrPre13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $844, %esp # imm = 0x34C - calll .L193$pb -.L193$pb: - popl %ebx -.Ltmp34: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx - movl %ebx, 108(%esp) # 4-byte Spill - movl 868(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv416x32 - movl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 784(%esp), %eax - movl 788(%esp), %ebp - movl 864(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 728(%esp), %ecx - movl %esi, %ebx - calll .LmulPv416x32 - addl 728(%esp), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 780(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 748(%esp), %edi - movl 744(%esp), %esi - movl 740(%esp), %edx - movl 732(%esp), %eax - movl 736(%esp), %ecx - movl 864(%esp), %ebp - movl 24(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %eax # 4-byte Reload - addl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 724(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 720(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 716(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 712(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 708(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 696(%esp), %ebx - movl 692(%esp), %edi - movl 688(%esp), %esi - movl 684(%esp), %edx - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 680(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 656(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 652(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 648(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 644(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 640(%esp), %ebx - movl 636(%esp), %edi - movl 632(%esp), %esi - movl 628(%esp), %edx - movl 620(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 624(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 560(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 584(%esp), %ebx - movl 580(%esp), %edi - movl 576(%esp), %esi - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 864(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 528(%esp), %ebx - movl 524(%esp), %edi - movl 520(%esp), %esi - movl 516(%esp), %edx - movl 508(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 512(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 16(%esp), %ebp # 4-byte Folded Reload - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 448(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 448(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 472(%esp), %ebp - movl 468(%esp), %edi - movl 464(%esp), %esi - movl 460(%esp), %edx - movl 452(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 456(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 24(%eax) - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 392(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 392(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 444(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %ebx - movl 412(%esp), %edi - movl 408(%esp), %esi - movl 404(%esp), %edx - movl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 400(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 36(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 336(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 360(%esp), %ebp - movl 356(%esp), %edi - movl 352(%esp), %esi - movl 348(%esp), %edx - movl 340(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %ecx - movl 864(%esp), %eax - movl 60(%esp), %ebx # 4-byte Reload - movl %ebx, 32(%eax) - movl 24(%esp), %eax # 4-byte Reload - adcl %eax, 104(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 280(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 316(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 304(%esp), %ebx - movl 300(%esp), %edi - movl 296(%esp), %esi - movl 292(%esp), %edx - movl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 288(%esp), %ecx - movl 864(%esp), %eax - movl 104(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 24(%esp), %ebp # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 224(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 272(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 248(%esp), %ebx - movl 244(%esp), %edi - movl 240(%esp), %esi - movl 236(%esp), %edx - movl 228(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 232(%esp), %ecx - movl 864(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 60(%esp), %eax # 4-byte Reload - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 32(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 40(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 60(%esp), %esi # 4-byte Reload - addl 168(%esp), %esi - movl 220(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 208(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 196(%esp), %ebp - movl 192(%esp), %ebx - movl 188(%esp), %edi - movl 184(%esp), %edx - movl 180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 176(%esp), %ecx - movl 864(%esp), %eax - movl %esi, 44(%eax) - movl 68(%esp), %esi # 4-byte Reload - adcl 24(%esp), %esi # 4-byte Folded Reload - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 40(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 48(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - adcl 104(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 868(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 108(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 112(%esp), %esi - movl %esi, %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl 164(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 140(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 136(%esp), %ebx - movl 132(%esp), %esi - movl 128(%esp), %edx - movl 124(%esp), %ecx - movl 864(%esp), %eax - movl %ebp, 48(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %edi, 56(%eax) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %ecx, 60(%eax) - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 64(%eax) - adcl 104(%esp), %ebx # 4-byte Folded Reload - movl %esi, 68(%eax) - movl 44(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 72(%eax) - movl 60(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - movl 76(%esp), %ecx # 4-byte Reload - adcl 88(%esp), %ecx # 4-byte Folded Reload - movl %edx, 80(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %ecx, 84(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl %ecx, 92(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 96(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 100(%eax) - addl $844, %esp # imm = 0x34C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end193: - .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L - - .globl mcl_fp_mont13L - .align 16, 0x90 - .type mcl_fp_mont13L,@function -mcl_fp_mont13L: # @mcl_fp_mont13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1548, %esp # imm = 0x60C - calll .L194$pb -.L194$pb: - popl %ebx -.Ltmp35: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx - movl 1580(%esp), %eax - movl -4(%eax), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1488(%esp), %esi - movl 1492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %esi, %eax - imull %edi, %eax - movl 1540(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1536(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1532(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1528(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1520(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1516(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1512(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 1508(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1504(%esp), %edi - movl 1500(%esp), %ebp - movl 1496(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1432(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1444(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 1448(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1472(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl 1576(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl 76(%esp), %ecx # 4-byte Reload - addl 1376(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1388(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 1404(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1412(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1420(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1428(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl %esi, %eax - movl 76(%esp), %ecx # 4-byte Reload - addl 1320(%esp), %ecx - movl 84(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 36(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - adcl 1348(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1360(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1364(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1368(%esp), %ebp - movl 64(%esp), %edi # 4-byte Reload - adcl 1372(%esp), %edi - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1264(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1280(%esp), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1300(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1308(%esp), %ebp - adcl 1312(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 1580(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - movl 84(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1208(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 1212(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1216(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1244(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1248(%esp), %edi - adcl 1252(%esp), %ebp - movl %ebp, %esi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1260(%esp), %ebp - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1152(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 1152(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1188(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1192(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1200(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1096(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %ebp - movl %ebp, %eax - addl 1096(%esp), %esi - movl 40(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1104(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1112(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1124(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 1128(%esp), %edi - movl 72(%esp), %esi # 4-byte Reload - adcl 1132(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1136(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1140(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1144(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1040(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 40(%esp), %ecx # 4-byte Reload - addl 1040(%esp), %ecx - movl 48(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 1068(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 1072(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1084(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %ebp - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 984(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - adcl 996(%esp), %ebp - movl 44(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1028(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edi - movl 1576(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 48(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 32(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 944(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 980(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 872(%esp), %ebp - movl 32(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %ebp # 4-byte Reload - adcl 884(%esp), %ebp - adcl 888(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 904(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1572(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - movl 32(%esp), %ecx # 4-byte Reload - addl 816(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 824(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 844(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 848(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 856(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %esi - movl %esi, %eax - movl 32(%esp), %ecx # 4-byte Reload - addl 760(%esp), %ecx - movl 36(%esp), %ecx # 4-byte Reload - adcl 764(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 768(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 772(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 776(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 784(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 792(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 796(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 800(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 808(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 812(%esp), %edi - adcl $0, %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 36(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - adcl 708(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 712(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 720(%esp), %ebp - adcl 724(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 728(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 732(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 736(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 740(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 744(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 748(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 752(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %eax, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl %edi, %eax - andl $1, %eax - addl 648(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - adcl 652(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 656(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 660(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 664(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 672(%esp), %edi - movl 64(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 680(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 684(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 688(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 696(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 44(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 600(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 612(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - adcl 616(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 620(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 536(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl 44(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 536(%esp), %esi - movl 56(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 544(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 560(%esp), %esi - adcl 564(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 572(%esp), %edi - movl 48(%esp), %ebp # 4-byte Reload - adcl 576(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 512(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl %edi, %ecx - andl $1, %ecx - addl 424(%esp), %esi - movl 52(%esp), %ebp # 4-byte Reload - adcl 428(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 444(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - addl 368(%esp), %ebp - adcl 372(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 376(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 384(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 392(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %ebp, %eax - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - movl 52(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 312(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 320(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 328(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 336(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 340(%esp), %edi - movl 48(%esp), %esi # 4-byte Reload - adcl 344(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 256(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 268(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 280(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - adcl 284(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 32(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 28(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %ebp - movl %ebp, %ecx - addl 200(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 208(%esp), %ebp - movl 64(%esp), %esi # 4-byte Reload - adcl 212(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 32(%esp), %edi # 4-byte Reload - adcl 236(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 144(%esp), %ecx - adcl 148(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 152(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 156(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 176(%esp), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 28(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - andl $1, %edi - addl 88(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - movl 84(%esp), %esi # 4-byte Reload - adcl 92(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 100(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 104(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - adcl 112(%esp), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 40(%esp), %ebx # 4-byte Reload - adcl 116(%esp), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - adcl 120(%esp), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - adcl 124(%esp), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ebx # 4-byte Reload - adcl 128(%esp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - adcl 132(%esp), %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl 136(%esp), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - adcl 140(%esp), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - adcl $0, %edi - movl 1580(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %ecx - sbbl 8(%ebx), %ebp - sbbl 12(%ebx), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 16(%ebx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - sbbl 20(%ebx), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - sbbl 24(%ebx), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - sbbl 28(%ebx), %edx - movl 36(%esp), %esi # 4-byte Reload - sbbl 32(%ebx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 36(%ebx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 40(%ebx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 44(%ebx), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 48(%ebx), %esi - movl %esi, %ebx - sbbl $0, %edi - andl $1, %edi - jne .LBB194_2 -# BB#1: - movl %edx, 32(%esp) # 4-byte Spill -.LBB194_2: - movl %edi, %edx - testb %dl, %dl - movl 80(%esp), %edx # 4-byte Reload - jne .LBB194_4 -# BB#3: - movl %eax, %edx -.LBB194_4: - movl 1568(%esp), %eax - movl %edx, (%eax) - movl 64(%esp), %esi # 4-byte Reload - jne .LBB194_6 -# BB#5: - movl %ecx, %esi -.LBB194_6: - movl %esi, 4(%eax) - jne .LBB194_8 -# BB#7: - movl %ebp, 76(%esp) # 4-byte Spill -.LBB194_8: - movl 76(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB194_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, 84(%esp) # 4-byte Spill -.LBB194_10: - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - jne .LBB194_12 -# BB#11: - movl 8(%esp), %ebp # 4-byte Reload -.LBB194_12: - movl %ebp, 16(%eax) - movl 48(%esp), %ecx # 4-byte Reload - jne .LBB194_14 -# BB#13: - movl 12(%esp), %ecx # 4-byte Reload -.LBB194_14: - movl %ecx, 20(%eax) - movl 40(%esp), %ecx # 4-byte Reload - jne .LBB194_16 -# BB#15: - movl 16(%esp), %ecx # 4-byte Reload -.LBB194_16: - movl %ecx, 24(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 36(%esp), %ecx # 4-byte Reload - jne .LBB194_18 -# BB#17: - movl 20(%esp), %ecx # 4-byte Reload -.LBB194_18: - movl %ecx, 32(%eax) - movl 44(%esp), %ecx # 4-byte Reload - jne .LBB194_20 -# BB#19: - movl 24(%esp), %ecx # 4-byte Reload -.LBB194_20: - movl %ecx, 36(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB194_22 -# BB#21: - movl 28(%esp), %ecx # 4-byte Reload -.LBB194_22: - movl %ecx, 40(%eax) - movl 52(%esp), %ecx # 4-byte Reload - jne .LBB194_24 -# BB#23: - movl 72(%esp), %ecx # 4-byte Reload -.LBB194_24: - movl %ecx, 44(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB194_26 -# BB#25: - movl %ebx, %ecx -.LBB194_26: - movl %ecx, 48(%eax) - addl $1548, %esp # imm = 0x60C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end194: - .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L - - .globl mcl_fp_montNF13L - .align 16, 0x90 - .type mcl_fp_montNF13L,@function -mcl_fp_montNF13L: # @mcl_fp_montNF13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1548, %esp # imm = 0x60C - calll .L195$pb -.L195$pb: - popl %ebx -.Ltmp36: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx - movl 1580(%esp), %eax - movl -4(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1488(%esp), %edi - movl 1492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1540(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1536(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1532(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1528(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1524(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1520(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1516(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1512(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 1508(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1504(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1500(%esp), %esi - movl 1496(%esp), %ebp - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1432(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1440(%esp), %ebp - adcl 1444(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1472(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 1484(%esp), %edi - movl 1576(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1428(%esp), %ecx - movl 80(%esp), %edx # 4-byte Reload - addl 1376(%esp), %edx - adcl 1380(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1412(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1424(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1320(%esp), %esi - adcl 1324(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 44(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1360(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1316(%esp), %eax - addl 1264(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 1268(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1272(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1280(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - adcl 1284(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1300(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ebp, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 1208(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - adcl 1228(%esp), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1244(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1248(%esp), %esi - movl 84(%esp), %edi # 4-byte Reload - adcl 1252(%esp), %edi - movl 80(%esp), %ebp # 4-byte Reload - adcl 1256(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1152(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1204(%esp), %eax - movl 64(%esp), %edx # 4-byte Reload - addl 1152(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1160(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1168(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1180(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1184(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1188(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 1192(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl 1196(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1096(%esp), %ecx - movl 1580(%esp), %eax - movl %eax, %edx - calll .LmulPv416x32 - addl 1096(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 1116(%esp), %esi - movl 56(%esp), %edi # 4-byte Reload - adcl 1120(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1140(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1148(%esp), %ebp - movl 1576(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1040(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 1092(%esp), %eax - movl 40(%esp), %edx # 4-byte Reload - addl 1040(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 1056(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl 1060(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1088(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl %eax, %esi - adcl $0, %esi - movl %edx, %edi - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 984(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 996(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1008(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1036(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 980(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 36(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl 936(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 940(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 948(%esp), %ebp - movl 72(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 968(%esp), %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 976(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 872(%esp), %edi - movl 36(%esp), %edi # 4-byte Reload - adcl 876(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 892(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 912(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 868(%esp), %edx - addl 816(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 832(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 836(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 860(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 36(%esp) # 4-byte Spill - movl %edi, %eax - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 760(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 780(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 784(%esp), %esi - movl 84(%esp), %edi # 4-byte Reload - adcl 788(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 804(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 756(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - addl 704(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 708(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 716(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 724(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 728(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 732(%esp), %esi - movl 68(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 740(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 744(%esp), %ebp - movl 48(%esp), %edx # 4-byte Reload - adcl 748(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 752(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 648(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 648(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 676(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 696(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 592(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 644(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 592(%esp), %ecx - movl 56(%esp), %esi # 4-byte Reload - adcl 596(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 624(%esp), %ebp - movl 40(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 636(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 536(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 536(%esp), %edi - adcl 540(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 556(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 564(%esp), %esi - adcl 568(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 572(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 532(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 496(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 504(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - adcl 512(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 424(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %esi - adcl 452(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 460(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 420(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 368(%esp), %ecx - movl 72(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 392(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - adcl 400(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 36(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 312(%esp), %esi - adcl 316(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 320(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 308(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 256(%esp), %ecx - adcl 260(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 272(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 288(%esp), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 32(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 200(%esp), %esi - adcl 204(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 68(%esp), %esi # 4-byte Reload - adcl 216(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 228(%esp), %edi - movl 36(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1576(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 144(%esp), %ecx - movl 1572(%esp), %edx - calll .LmulPv416x32 - movl 196(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 144(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 156(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 164(%esp), %ebp - adcl 168(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 88(%esp), %ecx - movl 1580(%esp), %edx - calll .LmulPv416x32 - addl 88(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - movl 68(%esp), %edi # 4-byte Reload - adcl 92(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 100(%esp), %edi - movl 64(%esp), %ebx # 4-byte Reload - adcl 104(%esp), %ebx - movl %ebx, 64(%esp) # 4-byte Spill - adcl 108(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl %ebp, %esi - movl 48(%esp), %edx # 4-byte Reload - adcl 112(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, %edx - movl 1580(%esp), %eax - subl (%eax), %edx - movl %ecx, %ebp - sbbl 4(%eax), %ebp - movl %edi, %ecx - sbbl 8(%eax), %ecx - sbbl 12(%eax), %ebx - sbbl 16(%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 20(%eax), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 36(%esp), %esi # 4-byte Reload - sbbl 24(%eax), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 28(%eax), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 32(%eax), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 36(%eax), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 40(%eax), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 44(%eax), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 48(%eax), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl %esi, %eax - sarl $31, %eax - testl %eax, %eax - movl 84(%esp), %eax # 4-byte Reload - js .LBB195_2 -# BB#1: - movl %edx, %eax -.LBB195_2: - movl 1568(%esp), %edx - movl %eax, (%edx) - movl 80(%esp), %esi # 4-byte Reload - js .LBB195_4 -# BB#3: - movl %ebp, %esi -.LBB195_4: - movl %esi, 4(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB195_6 -# BB#5: - movl %ecx, %edi -.LBB195_6: - movl %edi, 8(%edx) - js .LBB195_8 -# BB#7: - movl %ebx, %eax -.LBB195_8: - movl %eax, 12(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB195_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB195_10: - movl %eax, 16(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB195_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB195_12: - movl %eax, 20(%edx) - movl 36(%esp), %eax # 4-byte Reload - js .LBB195_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB195_14: - movl %eax, 24(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB195_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB195_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB195_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB195_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB195_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB195_20: - movl %eax, 36(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB195_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB195_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB195_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB195_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB195_26 -# BB#25: - movl 68(%esp), %eax # 4-byte Reload -.LBB195_26: - movl %eax, 48(%edx) - addl $1548, %esp # imm = 0x60C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end195: - .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L - - .globl mcl_fp_montRed13L - .align 16, 0x90 - .type mcl_fp_montRed13L,@function -mcl_fp_montRed13L: # @mcl_fp_montRed13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $892, %esp # imm = 0x37C - calll .L196$pb -.L196$pb: - popl %eax -.Ltmp37: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 920(%esp), %edx - movl -4(%edx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 916(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 80(%esp) # 4-byte Spill - imull %eax, %ebx - movl 100(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 72(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 148(%esp) # 4-byte Spill - movl 60(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 44(%ecx), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 40(%ecx), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 36(%ecx), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %edi - movl 20(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 832(%esp), %ecx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 76(%esp), %eax # 4-byte Reload - addl 832(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 836(%esp), %ecx - adcl 840(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 856(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 860(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - andl $1, %esi - addl 776(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 132(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 720(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 720(%esp), %esi - movl 56(%esp), %esi # 4-byte Reload - adcl 724(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 132(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl 100(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 664(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 664(%esp), %esi - movl 60(%esp), %ecx # 4-byte Reload - adcl 668(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 96(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 608(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 612(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 144(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 552(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 556(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 496(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 148(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 128(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 136(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 440(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl 476(%esp), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 384(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 384(%esp), %esi - movl 104(%esp), %ecx # 4-byte Reload - adcl 388(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl 404(%esp), %ebp - movl 140(%esp), %edi # 4-byte Reload - adcl 408(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 420(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %ecx, %eax - imull 72(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 920(%esp), %eax - movl %eax, %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - movl 104(%esp), %eax # 4-byte Reload - addl 328(%esp), %eax - movl 108(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 344(%esp), %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl 348(%esp), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 360(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 96(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %edi - movl %edi, %eax - movl 72(%esp), %esi # 4-byte Reload - imull %esi, %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 272(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl 120(%esp), %edi # 4-byte Reload - adcl 280(%esp), %edi - movl 156(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 288(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 292(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 296(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 300(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 304(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 308(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 312(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 316(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 324(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl %eax, %ebp - imull %esi, %eax - movl %eax, (%esp) - leal 216(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 216(%esp), %ebp - movl %edi, %ecx - adcl 220(%esp), %ecx - movl 156(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 140(%esp), %ebp # 4-byte Reload - adcl 228(%esp), %ebp - movl 152(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 92(%esp) # 4-byte Folded Spill - adcl $0, 80(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 920(%esp), %edx - movl 84(%esp), %ebx # 4-byte Reload - calll .LmulPv416x32 - addl 160(%esp), %esi - movl 156(%esp), %eax # 4-byte Reload - adcl 164(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 168(%esp), %ebp - movl %ebp, 140(%esp) # 4-byte Spill - movl %ebp, %ebx - movl 152(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 176(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl %eax, %edx - movl %edi, %eax - adcl 184(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 156(%esp), %edi # 4-byte Reload - subl 12(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %ebx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - sbbl 16(%esp), %ebp # 4-byte Folded Reload - sbbl 20(%esp), %edx # 4-byte Folded Reload - movl %edx, 72(%esp) # 4-byte Spill - movl 132(%esp), %edx # 4-byte Reload - sbbl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 76(%esp) # 4-byte Spill - movl 144(%esp), %edx # 4-byte Reload - sbbl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - sbbl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - sbbl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - sbbl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 104(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - sbbl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 120(%esp) # 4-byte Spill - movl %eax, %edx - movl %esi, %eax - sbbl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 124(%esp) # 4-byte Spill - sbbl $0, %eax - andl $1, %eax - jne .LBB196_2 -# BB#1: - movl %ebp, 148(%esp) # 4-byte Spill -.LBB196_2: - testb %al, %al - movl 156(%esp), %ebp # 4-byte Reload - jne .LBB196_4 -# BB#3: - movl %edi, %ebp -.LBB196_4: - movl 912(%esp), %edi - movl %ebp, (%edi) - movl 140(%esp), %ebp # 4-byte Reload - jne .LBB196_6 -# BB#5: - movl %ebx, %ebp -.LBB196_6: - movl %ebp, 4(%edi) - movl 152(%esp), %ebx # 4-byte Reload - jne .LBB196_8 -# BB#7: - movl %ecx, %ebx -.LBB196_8: - movl %ebx, 8(%edi) - movl 148(%esp), %esi # 4-byte Reload - movl %esi, 12(%edi) - movl 116(%esp), %ebx # 4-byte Reload - movl 128(%esp), %esi # 4-byte Reload - jne .LBB196_10 -# BB#9: - movl 72(%esp), %esi # 4-byte Reload -.LBB196_10: - movl %esi, 16(%edi) - movl 112(%esp), %esi # 4-byte Reload - movl 132(%esp), %edx # 4-byte Reload - jne .LBB196_12 -# BB#11: - movl 76(%esp), %edx # 4-byte Reload -.LBB196_12: - movl %edx, 20(%edi) - movl 96(%esp), %edx # 4-byte Reload - movl 144(%esp), %ecx # 4-byte Reload - jne .LBB196_14 -# BB#13: - movl 80(%esp), %ecx # 4-byte Reload -.LBB196_14: - movl %ecx, 24(%edi) - movl 100(%esp), %ecx # 4-byte Reload - movl 136(%esp), %eax # 4-byte Reload - jne .LBB196_16 -# BB#15: - movl 84(%esp), %eax # 4-byte Reload -.LBB196_16: - movl %eax, 28(%edi) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB196_18 -# BB#17: - movl 88(%esp), %ebx # 4-byte Reload -.LBB196_18: - movl %ebx, 32(%edi) - jne .LBB196_20 -# BB#19: - movl 104(%esp), %esi # 4-byte Reload -.LBB196_20: - movl %esi, 36(%edi) - jne .LBB196_22 -# BB#21: - movl 108(%esp), %edx # 4-byte Reload -.LBB196_22: - movl %edx, 40(%edi) - jne .LBB196_24 -# BB#23: - movl 120(%esp), %ecx # 4-byte Reload -.LBB196_24: - movl %ecx, 44(%edi) - jne .LBB196_26 -# BB#25: - movl 124(%esp), %eax # 4-byte Reload -.LBB196_26: - movl %eax, 48(%edi) - addl $892, %esp # imm = 0x37C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end196: - .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L - - .globl mcl_fp_addPre13L - .align 16, 0x90 - .type mcl_fp_addPre13L,@function -mcl_fp_addPre13L: # @mcl_fp_addPre13L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl 44(%eax), %edi - movl %edx, 36(%ebx) - movl 44(%ecx), %edx - adcl %edi, %edx - movl %esi, 40(%ebx) - movl %edx, 44(%ebx) - movl 48(%eax), %eax - movl 48(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 48(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end197: - .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L - - .globl mcl_fp_subPre13L - .align 16, 0x90 - .type mcl_fp_subPre13L,@function -mcl_fp_subPre13L: # @mcl_fp_subPre13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ebp) - movl 44(%ecx), %esi - sbbl %ebx, %esi - movl %edi, 40(%ebp) - movl %esi, 44(%ebp) - movl 48(%edx), %edx - movl 48(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 48(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end198: - .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L - - .globl mcl_fp_shr1_13L - .align 16, 0x90 - .type mcl_fp_shr1_13L,@function -mcl_fp_shr1_13L: # @mcl_fp_shr1_13L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 44(%ecx) - shrl %eax - movl %eax, 48(%ecx) - popl %esi - retl -.Lfunc_end199: - .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L - - .globl mcl_fp_add13L - .align 16, 0x90 - .type mcl_fp_add13L,@function -mcl_fp_add13L: # @mcl_fp_add13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $40, %esp - movl 68(%esp), %ebp - movl (%ebp), %ecx - movl 4(%ebp), %eax - movl 64(%esp), %ebx - addl (%ebx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - adcl 4(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 8(%ebp), %eax - adcl 8(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 12(%ebx), %ecx - movl 16(%ebx), %eax - adcl 12(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - adcl 16(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%ebx), %eax - adcl 20(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 24(%ebx), %eax - adcl 24(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 28(%ebx), %eax - adcl 28(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%ebx), %eax - adcl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 36(%ebx), %ecx - adcl 36(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 40(%ebx), %edi - adcl 40(%ebp), %edi - movl 44(%ebx), %edx - adcl 44(%ebp), %edx - movl 48(%ebx), %esi - adcl 48(%ebp), %esi - movl 60(%esp), %ebp - movl 4(%esp), %ebx # 4-byte Reload - movl %ebx, (%ebp) - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebp) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebp) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebp) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 32(%ebp) - movl %ecx, 36(%ebp) - movl %edi, 40(%ebp) - movl %edx, 44(%ebp) - movl %esi, 48(%ebp) - sbbl %eax, %eax - andl $1, %eax - movl 72(%esp), %ecx - subl (%ecx), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 36(%esp), %ebx # 4-byte Reload - sbbl 4(%ecx), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebx # 4-byte Reload - sbbl 8(%ecx), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 28(%esp), %ebx # 4-byte Reload - sbbl 12(%ecx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 24(%esp), %ebx # 4-byte Reload - sbbl 16(%ecx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 20(%esp), %ebx # 4-byte Reload - sbbl 20(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebx # 4-byte Reload - sbbl 24(%ecx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebx # 4-byte Reload - sbbl 28(%ecx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebx # 4-byte Reload - sbbl 32(%ecx), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl (%esp), %ebx # 4-byte Reload - sbbl 36(%ecx), %ebx - sbbl 40(%ecx), %edi - sbbl 44(%ecx), %edx - sbbl 48(%ecx), %esi - sbbl $0, %eax - testb $1, %al - jne .LBB200_2 -# BB#1: # %nocarry - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ebp) - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 4(%ebp) - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 8(%ebp) - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 12(%ebp) - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 16(%ebp) - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 20(%ebp) - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 24(%ebp) - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 28(%ebp) - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 32(%ebp) - movl %ebx, 36(%ebp) - movl %edi, 40(%ebp) - movl %edx, 44(%ebp) - movl %esi, 48(%ebp) -.LBB200_2: # %carry - addl $40, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end200: - .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L - - .globl mcl_fp_addNF13L - .align 16, 0x90 - .type mcl_fp_addNF13L,@function -mcl_fp_addNF13L: # @mcl_fp_addNF13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 128(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - movl 124(%esp), %edx - addl (%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 4(%edx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 48(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 44(%esi), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 40(%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 36(%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 32(%esi), %ebp - movl 28(%esi), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 24(%esi), %eax - movl 20(%esi), %ebx - movl 16(%esi), %edi - movl 12(%esi), %ecx - movl 8(%esi), %esi - adcl 8(%edx), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 12(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 56(%esp) # 4-byte Spill - adcl 20(%edx), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - adcl 24(%edx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 28(%edx), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 32(%edx), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 36(%edx), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 40(%edx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%edx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%edx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 132(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - subl (%edx), %eax - movl 68(%esp), %ebp # 4-byte Reload - sbbl 4(%edx), %ebp - movl %ebp, (%esp) # 4-byte Spill - sbbl 8(%edx), %esi - movl %esi, 4(%esp) # 4-byte Spill - sbbl 12(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%edx), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%edx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - sbbl 24(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 28(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - movl %esi, %ecx - movl %esi, %ebp - sbbl 36(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - movl %esi, %ecx - movl %esi, %edi - sbbl 40(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 44(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 48(%edx), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - sarl $31, %ebx - testl %ebx, %ebx - movl 64(%esp), %edx # 4-byte Reload - js .LBB201_2 -# BB#1: - movl %eax, %edx -.LBB201_2: - movl 120(%esp), %esi - movl %edx, (%esi) - movl 68(%esp), %edx # 4-byte Reload - js .LBB201_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload -.LBB201_4: - movl %edx, 4(%esi) - movl %edi, %edx - movl 52(%esp), %ebx # 4-byte Reload - movl 48(%esp), %eax # 4-byte Reload - js .LBB201_6 -# BB#5: - movl 4(%esp), %eax # 4-byte Reload -.LBB201_6: - movl %eax, 8(%esi) - movl %ebp, %edi - movl 60(%esp), %eax # 4-byte Reload - js .LBB201_8 -# BB#7: - movl 8(%esp), %ebx # 4-byte Reload -.LBB201_8: - movl %ebx, 12(%esi) - movl 96(%esp), %ebp # 4-byte Reload - movl 56(%esp), %ecx # 4-byte Reload - js .LBB201_10 -# BB#9: - movl 12(%esp), %ecx # 4-byte Reload -.LBB201_10: - movl %ecx, 16(%esi) - movl 92(%esp), %ecx # 4-byte Reload - js .LBB201_12 -# BB#11: - movl 16(%esp), %eax # 4-byte Reload -.LBB201_12: - movl %eax, 20(%esi) - movl 72(%esp), %ebx # 4-byte Reload - js .LBB201_14 -# BB#13: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill -.LBB201_14: - movl 76(%esp), %eax # 4-byte Reload - movl %eax, 24(%esi) - js .LBB201_16 -# BB#15: - movl 24(%esp), %ebp # 4-byte Reload -.LBB201_16: - movl %ebp, 28(%esi) - js .LBB201_18 -# BB#17: - movl 28(%esp), %ebx # 4-byte Reload -.LBB201_18: - movl %ebx, 32(%esi) - js .LBB201_20 -# BB#19: - movl 32(%esp), %edi # 4-byte Reload -.LBB201_20: - movl %edi, 36(%esi) - js .LBB201_22 -# BB#21: - movl 36(%esp), %edx # 4-byte Reload -.LBB201_22: - movl %edx, 40(%esi) - js .LBB201_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB201_24: - movl %ecx, 44(%esi) - movl 88(%esp), %eax # 4-byte Reload - js .LBB201_26 -# BB#25: - movl 44(%esp), %eax # 4-byte Reload -.LBB201_26: - movl %eax, 48(%esi) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end201: - .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L - - .globl mcl_fp_sub13L - .align 16, 0x90 - .type mcl_fp_sub13L,@function -mcl_fp_sub13L: # @mcl_fp_sub13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 68(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 72(%esp), %edi - subl (%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 32(%esi), %edx - sbbl 32(%edi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 36(%esi), %ecx - sbbl 36(%edi), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 44(%esi), %ebp - sbbl 44(%edi), %ebp - movl 48(%esi), %esi - sbbl 48(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 64(%esp), %ebx - movl 12(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl %edx, 32(%ebx) - movl %ecx, 36(%ebx) - movl %eax, 40(%ebx) - movl %ebp, 44(%ebx) - movl %esi, 48(%ebx) - je .LBB202_2 -# BB#1: # %carry - movl %esi, %edi - movl 76(%esp), %esi - movl 12(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 28(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 36(%esp), %ecx # 4-byte Reload - adcl 8(%esi), %ecx - movl 12(%esi), %eax - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ecx, 8(%ebx) - movl 16(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl (%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl %ecx, 40(%ebx) - movl 44(%esi), %eax - adcl %ebp, %eax - movl %eax, 44(%ebx) - movl 48(%esi), %eax - adcl %edi, %eax - movl %eax, 48(%ebx) -.LBB202_2: # %nocarry - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end202: - .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L - - .globl mcl_fp_subNF13L - .align 16, 0x90 - .type mcl_fp_subNF13L,@function -mcl_fp_subNF13L: # @mcl_fp_subNF13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %ecx - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 112(%esp), %edi - subl (%edi), %edx - movl %edx, 56(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - movl 28(%ecx), %ebx - movl 24(%ecx), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 24(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - sbbl 28(%edi), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - sbbl 32(%edi), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - sbbl 48(%edi), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %edx, %edi - movl %edx, %eax - sarl $31, %edi - movl %edi, %edx - shldl $1, %eax, %edx - movl 116(%esp), %esi - movl 4(%esi), %eax - andl %edx, %eax - movl %eax, 8(%esp) # 4-byte Spill - andl (%esi), %edx - movl 48(%esi), %eax - andl %edi, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%esi), %eax - andl %edi, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esi), %eax - andl %edi, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 36(%esi), %eax - andl %edi, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 32(%esi), %eax - andl %edi, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 28(%esi), %eax - andl %edi, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%esi), %eax - andl %edi, %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%esi), %ebp - andl %edi, %ebp - movl 16(%esi), %ebx - andl %edi, %ebx - movl 12(%esi), %ecx - andl %edi, %ecx - roll %edi - andl 8(%esi), %edi - addl 56(%esp), %edx # 4-byte Folded Reload - movl 8(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl 104(%esp), %esi - movl %edx, (%esi) - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %eax, 4(%esi) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %edi, 8(%esi) - adcl 40(%esp), %ebx # 4-byte Folded Reload - movl %ecx, 12(%esi) - adcl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 16(%esi) - movl (%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %ebp, 20(%esi) - movl 4(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%esi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%esi) - movl 16(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%esi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 72(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - movl 24(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl %eax, 44(%esi) - movl 28(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%esi) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end203: - .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L - - .globl mcl_fpDbl_add13L - .align 16, 0x90 - .type mcl_fpDbl_add13L,@function -mcl_fpDbl_add13L: # @mcl_fpDbl_add13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 124(%esp), %ecx - movl 120(%esp), %esi - movl 12(%esi), %edi - movl 16(%esi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%esi), %ebp - movl 116(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%esi), %ebp - adcl 8(%esi), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 60(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%esi), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%esi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%esi), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%esi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%esi), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%esi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%esi), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%esi), %edx - adcl %ebx, %edx - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%esi), %edi - adcl %ebx, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 56(%ecx), %edi - movl %edx, 48(%eax) - movl 56(%esi), %eax - adcl %edi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esi), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%ecx), %edx - movl 64(%esi), %eax - adcl %edx, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl 68(%esi), %eax - adcl %edx, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 72(%ecx), %edx - movl 72(%esi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%ecx), %edx - movl 76(%esi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%ecx), %edx - movl 80(%esi), %eax - adcl %edx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 84(%ecx), %edx - movl 84(%esi), %eax - adcl %edx, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%ecx), %edx - movl 88(%esi), %edi - adcl %edx, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 92(%ecx), %edx - movl 92(%esi), %eax - adcl %edx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 96(%ecx), %edx - movl 96(%esi), %ebx - adcl %edx, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 100(%ecx), %ecx - movl 100(%esi), %esi - adcl %ecx, %esi - sbbl %edx, %edx - andl $1, %edx - movl 128(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - sbbl 36(%ebp), %edi - movl %edi, 36(%esp) # 4-byte Spill - sbbl 40(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - movl %esi, %ebx - sbbl 44(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ebx, %ecx - sbbl 48(%ebp), %ecx - sbbl $0, %edx - andl $1, %edx - jne .LBB204_2 -# BB#1: - movl %ecx, %ebx -.LBB204_2: - testb %dl, %dl - movl 76(%esp), %ecx # 4-byte Reload - movl 72(%esp), %edx # 4-byte Reload - movl 68(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB204_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload - movl 4(%esp), %esi # 4-byte Reload - movl 8(%esp), %edi # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 32(%esp), %ecx # 4-byte Reload -.LBB204_4: - movl 116(%esp), %eax - movl %ecx, 52(%eax) - movl 80(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 84(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 88(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - movl 92(%esp), %ecx # 4-byte Reload - movl %ecx, 68(%eax) - movl %ebp, 72(%eax) - movl %edi, 76(%eax) - movl %esi, 80(%eax) - movl %edx, 84(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl 52(%esp), %edx # 4-byte Reload - movl 48(%esp), %esi # 4-byte Reload - jne .LBB204_6 -# BB#5: - movl 36(%esp), %esi # 4-byte Reload -.LBB204_6: - movl %esi, 88(%eax) - jne .LBB204_8 -# BB#7: - movl 40(%esp), %edx # 4-byte Reload -.LBB204_8: - movl %edx, 92(%eax) - jne .LBB204_10 -# BB#9: - movl 44(%esp), %ecx # 4-byte Reload -.LBB204_10: - movl %ecx, 96(%eax) - movl %ebx, 100(%eax) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end204: - .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L - - .globl mcl_fpDbl_sub13L - .align 16, 0x90 - .type mcl_fpDbl_sub13L,@function -mcl_fpDbl_sub13L: # @mcl_fpDbl_sub13L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $84, %esp - movl 108(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx - movl 112(%esp), %ebx - subl (%ebx), %eax - sbbl 4(%ebx), %edx - movl 8(%edi), %esi - sbbl 8(%ebx), %esi - movl 104(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edi), %eax - sbbl 12(%ebx), %eax - movl %edx, 4(%ecx) - movl 16(%edi), %edx - sbbl 16(%ebx), %edx - movl %esi, 8(%ecx) - movl 20(%ebx), %esi - movl %eax, 12(%ecx) - movl 20(%edi), %eax - sbbl %esi, %eax - movl 24(%ebx), %esi - movl %edx, 16(%ecx) - movl 24(%edi), %edx - sbbl %esi, %edx - movl 28(%ebx), %esi - movl %eax, 20(%ecx) - movl 28(%edi), %eax - sbbl %esi, %eax - movl 32(%ebx), %esi - movl %edx, 24(%ecx) - movl 32(%edi), %edx - sbbl %esi, %edx - movl 36(%ebx), %esi - movl %eax, 28(%ecx) - movl 36(%edi), %eax - sbbl %esi, %eax - movl 40(%ebx), %esi - movl %edx, 32(%ecx) - movl 40(%edi), %edx - sbbl %esi, %edx - movl 44(%ebx), %esi - movl %eax, 36(%ecx) - movl 44(%edi), %eax - sbbl %esi, %eax - movl 48(%ebx), %esi - movl %edx, 40(%ecx) - movl 48(%edi), %edx - sbbl %esi, %edx - movl 52(%ebx), %esi - movl %eax, 44(%ecx) - movl 52(%edi), %eax - sbbl %esi, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 56(%ebx), %eax - movl %edx, 48(%ecx) - movl 56(%edi), %edx - sbbl %eax, %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 60(%ebx), %eax - movl 60(%edi), %edx - sbbl %eax, %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 64(%ebx), %eax - movl 64(%edi), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 68(%ebx), %eax - movl 68(%edi), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebx), %eax - movl 72(%edi), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebx), %eax - movl 76(%edi), %edx - sbbl %eax, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 80(%ebx), %eax - movl 80(%edi), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 84(%ebx), %eax - movl 84(%edi), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%ebx), %eax - movl 88(%edi), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 92(%ebx), %eax - movl 92(%edi), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 96(%ebx), %eax - movl 96(%edi), %edx - sbbl %eax, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 100(%ebx), %eax - movl 100(%edi), %edx - sbbl %eax, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 116(%esp), %edi - jne .LBB205_1 -# BB#2: - movl $0, 44(%esp) # 4-byte Folded Spill - jmp .LBB205_3 -.LBB205_1: - movl 48(%edi), %edx - movl %edx, 44(%esp) # 4-byte Spill -.LBB205_3: - testb %al, %al - jne .LBB205_4 -# BB#5: - movl $0, 16(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB205_6 -.LBB205_4: - movl (%edi), %ebx - movl 4(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB205_6: - jne .LBB205_7 -# BB#8: - movl $0, 24(%esp) # 4-byte Folded Spill - jmp .LBB205_9 -.LBB205_7: - movl 44(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB205_9: - jne .LBB205_10 -# BB#11: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB205_12 -.LBB205_10: - movl 40(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB205_12: - jne .LBB205_13 -# BB#14: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB205_15 -.LBB205_13: - movl 36(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB205_15: - jne .LBB205_16 -# BB#17: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB205_18 -.LBB205_16: - movl 32(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB205_18: - jne .LBB205_19 -# BB#20: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB205_21 -.LBB205_19: - movl 28(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB205_21: - jne .LBB205_22 -# BB#23: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB205_24 -.LBB205_22: - movl 24(%edi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB205_24: - jne .LBB205_25 -# BB#26: - movl $0, %eax - jmp .LBB205_27 -.LBB205_25: - movl 20(%edi), %eax -.LBB205_27: - jne .LBB205_28 -# BB#29: - movl $0, %edx - jmp .LBB205_30 -.LBB205_28: - movl 16(%edi), %edx -.LBB205_30: - jne .LBB205_31 -# BB#32: - movl $0, %esi - jmp .LBB205_33 -.LBB205_31: - movl 12(%edi), %esi -.LBB205_33: - jne .LBB205_34 -# BB#35: - xorl %edi, %edi - jmp .LBB205_36 -.LBB205_34: - movl 8(%edi), %edi -.LBB205_36: - addl 36(%esp), %ebx # 4-byte Folded Reload - movl 16(%esp), %ebp # 4-byte Reload - adcl 28(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 52(%ecx) - adcl 32(%esp), %edi # 4-byte Folded Reload - movl %ebp, 56(%ecx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edi, 60(%ecx) - adcl 48(%esp), %edx # 4-byte Folded Reload - movl %esi, 64(%ecx) - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edx, 68(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %eax, 72(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edx, 76(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 24(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl %eax, 96(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%ecx) - addl $84, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end205: - .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L - - .align 16, 0x90 - .type .LmulPv448x32,@function -.LmulPv448x32: # @mulPv448x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl %edx, %edi - movl 116(%esp), %esi - movl %esi, %eax - mull 52(%edi) - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl %esi, %eax - mull 48(%edi) - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %esi, %eax - mull 44(%edi) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %esi, %eax - mull 40(%edi) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %esi, %eax - mull 36(%edi) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %esi, %eax - mull 32(%edi) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull 28(%edi) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %esi, %eax - mull 24(%edi) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %esi, %eax - mull 20(%edi) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 16(%edi) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 12(%edi) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 8(%edi) - movl %edx, %ebx - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 4(%edi) - movl %edx, %ebp - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - mull (%edi) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 8(%ecx) - adcl 8(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - movl 92(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 56(%ecx) - movl %ecx, %eax - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end206: - .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32 - - .globl mcl_fp_mulUnitPre14L - .align 16, 0x90 - .type mcl_fp_mulUnitPre14L,@function -mcl_fp_mulUnitPre14L: # @mcl_fp_mulUnitPre14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - calll .L207$pb -.L207$pb: - popl %ebx -.Ltmp38: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx - movl 136(%esp), %eax - movl %eax, (%esp) - leal 48(%esp), %ecx - movl 132(%esp), %edx - calll .LmulPv448x32 - movl 104(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 76(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 72(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 68(%esp), %ebp - movl 64(%esp), %ebx - movl 60(%esp), %edi - movl 56(%esp), %esi - movl 48(%esp), %edx - movl 52(%esp), %ecx - movl 128(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end207: - .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L - - .globl mcl_fpDbl_mulPre14L - .align 16, 0x90 - .type mcl_fpDbl_mulPre14L,@function -mcl_fpDbl_mulPre14L: # @mcl_fpDbl_mulPre14L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $268, %esp # imm = 0x10C - calll .L208$pb -.L208$pb: - popl %ebx -.Ltmp39: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx - movl %ebx, -192(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl %esi, 8(%esp) - movl 12(%ebp), %edi - movl %edi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7L@PLT - leal 28(%esi), %eax - movl %eax, 8(%esp) - leal 28(%edi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 56(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7L@PLT - movl 44(%edi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl 40(%edi), %eax - movl 36(%edi), %edx - movl (%edi), %edi - movl 12(%ebp), %ecx - movl 4(%ecx), %ecx - movl 12(%ebp), %ebx - addl 28(%ebx), %edi - movl %edi, -180(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - adcl 32(%edi), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -212(%ebp) # 4-byte Spill - adcl 12(%edi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl 16(%edi), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl %eax, %ebx - seto %al - lahf - movl %eax, %eax - movl %eax, -128(%ebp) # 4-byte Spill - movl (%esi), %eax - addl 28(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - movl 4(%esi), %eax - adcl 32(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl 36(%esi), %eax - adcl 8(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl 40(%esi), %eax - adcl 12(%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl 44(%esi), %eax - adcl 16(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - movl 48(%esi), %ecx - adcl 20(%esi), %ecx - movl 52(%esi), %eax - adcl 24(%esi), %eax - pushl %eax - seto %al - lahf - movl %eax, %esi - popl %eax - movl %esi, -220(%ebp) # 4-byte Spill - movl %ebx, %esi - movl %edx, -184(%ebp) # 4-byte Spill - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -188(%ebp) # 4-byte Spill - jb .LBB208_2 -# BB#1: - xorl %esi, %esi - movl $0, -184(%ebp) # 4-byte Folded Spill - movl $0, -188(%ebp) # 4-byte Folded Spill -.LBB208_2: - movl %esi, -204(%ebp) # 4-byte Spill - movl 52(%edi), %esi - movl 48(%edi), %ebx - movl -128(%ebp), %edx # 4-byte Reload - pushl %eax - movl %edx, %eax - addb $127, %al - sahf - popl %eax - adcl 20(%edi), %ebx - movl %ebx, -160(%ebp) # 4-byte Spill - adcl 24(%edi), %esi - movl %esi, -208(%ebp) # 4-byte Spill - movl %eax, -148(%ebp) # 4-byte Spill - movl %ecx, -152(%ebp) # 4-byte Spill - movl -176(%ebp), %esi # 4-byte Reload - movl %esi, -128(%ebp) # 4-byte Spill - movl -172(%ebp), %esi # 4-byte Reload - movl %esi, -132(%ebp) # 4-byte Spill - movl -168(%ebp), %esi # 4-byte Reload - movl %esi, -136(%ebp) # 4-byte Spill - movl -164(%ebp), %esi # 4-byte Reload - movl %esi, -140(%ebp) # 4-byte Spill - movl -216(%ebp), %ebx # 4-byte Reload - movl %ebx, -144(%ebp) # 4-byte Spill - jb .LBB208_4 -# BB#3: - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -128(%ebp) # 4-byte Folded Spill - movl $0, -132(%ebp) # 4-byte Folded Spill - movl $0, -136(%ebp) # 4-byte Folded Spill - movl $0, -140(%ebp) # 4-byte Folded Spill - movl $0, -144(%ebp) # 4-byte Folded Spill -.LBB208_4: - movl -180(%ebp), %edx # 4-byte Reload - movl %edx, -96(%ebp) - movl -200(%ebp), %esi # 4-byte Reload - movl %esi, -92(%ebp) - movl -212(%ebp), %edx # 4-byte Reload - movl %edx, -88(%ebp) - movl -196(%ebp), %edi # 4-byte Reload - movl %edi, -84(%ebp) - movl -156(%ebp), %edx # 4-byte Reload - movl %edx, -80(%ebp) - movl %ebx, -124(%ebp) - movl -164(%ebp), %edx # 4-byte Reload - movl %edx, -120(%ebp) - movl -168(%ebp), %edx # 4-byte Reload - movl %edx, -116(%ebp) - movl -172(%ebp), %edx # 4-byte Reload - movl %edx, -112(%ebp) - movl -176(%ebp), %edx # 4-byte Reload - movl %edx, -108(%ebp) - movl %ecx, -104(%ebp) - movl %edi, %ebx - movl %esi, %edi - movl %eax, -100(%ebp) - sbbl %edx, %edx - movl -160(%ebp), %eax # 4-byte Reload - movl %eax, -76(%ebp) - movl -208(%ebp), %esi # 4-byte Reload - movl %esi, -72(%ebp) - movl -220(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB208_6 -# BB#5: - movl $0, %esi - movl $0, %eax - movl $0, %ebx - movl $0, %edi -.LBB208_6: - movl %eax, -160(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -124(%ebp), %ecx - movl %ecx, 8(%esp) - leal -96(%ebp), %ecx - movl %ecx, 4(%esp) - leal -68(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -188(%ebp), %eax # 4-byte Reload - addl %eax, -144(%ebp) # 4-byte Folded Spill - adcl %edi, -140(%ebp) # 4-byte Folded Spill - movl -184(%ebp), %eax # 4-byte Reload - adcl %eax, -136(%ebp) # 4-byte Folded Spill - adcl %ebx, -132(%ebp) # 4-byte Folded Spill - movl -204(%ebp), %eax # 4-byte Reload - adcl %eax, -128(%ebp) # 4-byte Folded Spill - movl -152(%ebp), %edi # 4-byte Reload - adcl -160(%ebp), %edi # 4-byte Folded Reload - adcl %esi, -148(%ebp) # 4-byte Folded Spill - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -156(%ebp) # 4-byte Spill - movl -192(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre7L@PLT - movl -144(%ebp), %eax # 4-byte Reload - addl -40(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl -140(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -140(%ebp) # 4-byte Spill - movl -136(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -136(%ebp) # 4-byte Spill - movl -132(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -132(%ebp) # 4-byte Spill - movl -128(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -128(%ebp) # 4-byte Spill - adcl -20(%ebp), %edi - movl -148(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - adcl %esi, -156(%ebp) # 4-byte Folded Spill - movl -68(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl -64(%ebp), %ecx - sbbl 4(%esi), %ecx - movl -60(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -56(%ebp), %edx - sbbl 12(%esi), %edx - movl -52(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -48(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -44(%ebp), %eax - sbbl 24(%esi), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl 28(%esi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 32(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - sbbl %eax, -128(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -152(%ebp) # 4-byte Spill - movl 52(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - movl -148(%ebp), %edi # 4-byte Reload - sbbl %eax, %edi - sbbl $0, -156(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - subl %eax, -172(%ebp) # 4-byte Folded Spill - movl 60(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl 64(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 72(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 76(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 80(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 84(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -128(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -148(%ebp) # 4-byte Spill - movl -156(%ebp), %edi # 4-byte Reload - sbbl $0, %edi - movl -172(%ebp), %eax # 4-byte Reload - addl -176(%ebp), %eax # 4-byte Folded Reload - adcl -180(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 28(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -184(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 32(%esi) - adcl -188(%ebp), %edx # 4-byte Folded Reload - movl %eax, 36(%esi) - adcl -192(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 40(%esi) - movl -164(%ebp), %eax # 4-byte Reload - adcl -196(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 44(%esi) - movl -168(%ebp), %ecx # 4-byte Reload - adcl -200(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl -228(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -140(%ebp), %ecx # 4-byte Reload - adcl -232(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -136(%ebp), %eax # 4-byte Reload - adcl -236(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 60(%esi) - movl -132(%ebp), %ecx # 4-byte Reload - adcl -240(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -128(%ebp), %eax # 4-byte Reload - adcl -244(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -248(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -252(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - adcl -256(%ebp), %edi # 4-byte Folded Reload - movl %eax, 80(%esi) - movl %edi, 84(%esi) - movl -208(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 88(%esi) - movl -212(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 92(%esi) - movl -216(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 96(%esi) - movl -220(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -224(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -204(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - addl $268, %esp # imm = 0x10C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end208: - .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L - - .globl mcl_fpDbl_sqrPre14L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre14L,@function -mcl_fpDbl_sqrPre14L: # @mcl_fpDbl_sqrPre14L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $268, %esp # imm = 0x10C - calll .L209$pb -.L209$pb: - popl %ebx -.Ltmp40: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx - movl %ebx, -172(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre7L@PLT - leal 28(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 56(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre7L@PLT - movl 48(%edi), %eax - movl 44(%edi), %ecx - movl 36(%edi), %edx - movl (%edi), %esi - movl 4(%edi), %ebx - addl 28(%edi), %esi - adcl 32(%edi), %ebx - movl %ebx, -164(%ebp) # 4-byte Spill - adcl 8(%edi), %edx - movl %edx, -160(%ebp) # 4-byte Spill - movl 40(%edi), %edx - adcl 12(%edi), %edx - adcl 16(%edi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - adcl 20(%edi), %eax - movl %eax, -176(%ebp) # 4-byte Spill - movl 52(%edi), %ecx - adcl 24(%edi), %ecx - seto %al - lahf - movl %eax, %eax - movl %eax, -184(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -152(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -148(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -144(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -140(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -136(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edi - seto %al - lahf - movl %eax, %eax - sbbl %ebx, %ebx - movl %ebx, -128(%ebp) # 4-byte Spill - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_1 -# BB#2: - movl %esi, -168(%ebp) # 4-byte Spill - movl $0, -132(%ebp) # 4-byte Folded Spill - jmp .LBB209_3 -.LBB209_1: - leal (%esi,%esi), %eax - movl %esi, -168(%ebp) # 4-byte Spill - movl %eax, -132(%ebp) # 4-byte Spill -.LBB209_3: - movl %edi, %eax - addb $127, %al - sahf - movl -180(%ebp), %ebx # 4-byte Reload - jb .LBB209_4 -# BB#5: - movl $0, -156(%ebp) # 4-byte Folded Spill - jmp .LBB209_6 -.LBB209_4: - movl -164(%ebp), %eax # 4-byte Reload - movl -168(%ebp), %esi # 4-byte Reload - shldl $1, %esi, %eax - movl %eax, -156(%ebp) # 4-byte Spill -.LBB209_6: - movl -176(%ebp), %edi # 4-byte Reload - movl -136(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_7 -# BB#8: - movl $0, -136(%ebp) # 4-byte Folded Spill - jmp .LBB209_9 -.LBB209_7: - movl -160(%ebp), %eax # 4-byte Reload - movl -164(%ebp), %esi # 4-byte Reload - shldl $1, %esi, %eax - movl %eax, -136(%ebp) # 4-byte Spill -.LBB209_9: - movl %ebx, %esi - movl -140(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_10 -# BB#11: - movl $0, -140(%ebp) # 4-byte Folded Spill - jmp .LBB209_12 -.LBB209_10: - movl %edx, %eax - movl -160(%ebp), %ebx # 4-byte Reload - shldl $1, %ebx, %eax - movl %eax, -140(%ebp) # 4-byte Spill -.LBB209_12: - movl -144(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_13 -# BB#14: - movl $0, -144(%ebp) # 4-byte Folded Spill - jmp .LBB209_15 -.LBB209_13: - movl %esi, %eax - shldl $1, %edx, %eax - movl %eax, -144(%ebp) # 4-byte Spill -.LBB209_15: - movl -148(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_16 -# BB#17: - movl $0, -148(%ebp) # 4-byte Folded Spill - jmp .LBB209_18 -.LBB209_16: - movl %edi, %eax - shldl $1, %esi, %eax - movl %eax, -148(%ebp) # 4-byte Spill -.LBB209_18: - movl -152(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_19 -# BB#20: - movl $0, -152(%ebp) # 4-byte Folded Spill - jmp .LBB209_21 -.LBB209_19: - movl %ecx, %eax - shldl $1, %edi, %eax - movl %eax, -152(%ebp) # 4-byte Spill -.LBB209_21: - movl -168(%ebp), %eax # 4-byte Reload - movl %eax, -96(%ebp) - movl %eax, -124(%ebp) - movl -164(%ebp), %eax # 4-byte Reload - movl %eax, -92(%ebp) - movl %eax, -120(%ebp) - movl -160(%ebp), %eax # 4-byte Reload - movl %eax, -88(%ebp) - movl %eax, -116(%ebp) - movl %edx, -84(%ebp) - movl %edx, -112(%ebp) - movl %esi, -80(%ebp) - movl %esi, -108(%ebp) - movl %edi, -76(%ebp) - movl %edi, -104(%ebp) - movl %ecx, -72(%ebp) - movl %ecx, -100(%ebp) - movl -184(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB209_22 -# BB#23: - xorl %edi, %edi - jmp .LBB209_24 -.LBB209_22: - shrl $31, %ecx - movl %ecx, %edi -.LBB209_24: - leal -68(%ebp), %eax - movl %eax, (%esp) - leal -96(%ebp), %eax - movl %eax, 4(%esp) - leal -124(%ebp), %eax - movl %eax, 8(%esp) - movl -128(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -172(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre7L@PLT - movl -132(%ebp), %eax # 4-byte Reload - addl -40(%ebp), %eax - movl %eax, -132(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl -136(%ebp), %ecx # 4-byte Reload - adcl -32(%ebp), %ecx - movl %ecx, -136(%ebp) # 4-byte Spill - movl -140(%ebp), %ecx # 4-byte Reload - adcl -28(%ebp), %ecx - movl %ecx, -140(%ebp) # 4-byte Spill - movl -144(%ebp), %ecx # 4-byte Reload - adcl -24(%ebp), %ecx - movl %ecx, -144(%ebp) # 4-byte Spill - movl -148(%ebp), %ecx # 4-byte Reload - adcl -20(%ebp), %ecx - movl %ecx, -148(%ebp) # 4-byte Spill - movl -152(%ebp), %ecx # 4-byte Reload - adcl -16(%ebp), %ecx - movl %ecx, -152(%ebp) # 4-byte Spill - adcl %edi, %esi - movl %esi, -128(%ebp) # 4-byte Spill - movl -68(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - movl -64(%ebp), %edi - sbbl 4(%esi), %edi - movl -60(%ebp), %edx - sbbl 8(%esi), %edx - movl %edx, -160(%ebp) # 4-byte Spill - movl -56(%ebp), %edx - sbbl 12(%esi), %edx - movl %edx, -168(%ebp) # 4-byte Spill - movl -52(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -48(%ebp), %ecx - sbbl 20(%esi), %ecx - movl %ecx, -172(%ebp) # 4-byte Spill - movl -44(%ebp), %edx - sbbl 24(%esi), %edx - movl %edx, -164(%ebp) # 4-byte Spill - movl 28(%esi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - sbbl %edx, -132(%ebp) # 4-byte Folded Spill - movl 32(%esi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - sbbl %ecx, %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl 36(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl -128(%ebp), %ecx # 4-byte Reload - sbbl $0, %ecx - movl 56(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - movl -204(%ebp), %edx # 4-byte Reload - subl %eax, %edx - movl 60(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl 64(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 72(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 76(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl 80(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 84(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -132(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -136(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -140(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - sbbl $0, %ecx - movl %ecx, -128(%ebp) # 4-byte Spill - movl %edx, %eax - addl -176(%ebp), %eax # 4-byte Folded Reload - adcl -180(%ebp), %edi # 4-byte Folded Reload - movl %eax, 28(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -184(%ebp), %eax # 4-byte Folded Reload - movl %edi, 32(%esi) - movl -168(%ebp), %ecx # 4-byte Reload - adcl -188(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - adcl -192(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl -172(%ebp), %eax # 4-byte Reload - adcl -196(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 44(%esi) - movl -164(%ebp), %ecx # 4-byte Reload - adcl -200(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl -132(%ebp), %eax # 4-byte Reload - adcl -228(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl -156(%ebp), %edx # 4-byte Reload - adcl -232(%ebp), %edx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -136(%ebp), %ecx # 4-byte Reload - adcl -236(%ebp), %ecx # 4-byte Folded Reload - movl %edx, 60(%esi) - movl -140(%ebp), %eax # 4-byte Reload - adcl -240(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 64(%esi) - movl -144(%ebp), %ecx # 4-byte Reload - adcl -244(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 68(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -248(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 72(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -252(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 76(%esi) - movl -128(%ebp), %eax # 4-byte Reload - adcl -256(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 80(%esi) - movl %eax, 84(%esi) - movl -204(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 88(%esi) - movl -208(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 92(%esi) - movl -212(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 96(%esi) - movl -216(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -220(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -224(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - addl $268, %esp # imm = 0x10C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end209: - .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L - - .globl mcl_fp_mont14L - .align 16, 0x90 - .type mcl_fp_mont14L,@function -mcl_fp_mont14L: # @mcl_fp_mont14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1900, %esp # imm = 0x76C - calll .L210$pb -.L210$pb: - popl %ebx -.Ltmp41: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx - movl 1932(%esp), %eax - movl -4(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1840(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 1840(%esp), %edi - movl 1844(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1896(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 1892(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 1888(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 1884(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1880(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1876(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1868(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1864(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1860(%esp), %esi - movl 1856(%esp), %ebp - movl 1852(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 1848(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1776(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - addl 1776(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1792(%esp), %ebp - adcl 1796(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1804(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 1928(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1712(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %edx - movl 100(%esp), %ecx # 4-byte Reload - addl 1712(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1724(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 1728(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 1732(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1768(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1648(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 100(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1648(%esp), %ebp - movl 84(%esp), %ecx # 4-byte Reload - adcl 1652(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1660(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1664(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl 1668(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1672(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 1676(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1692(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1704(%esp), %esi - adcl $0, %eax - movl %eax, %edi - movl 1928(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1584(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1584(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1592(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1604(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1608(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1636(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl 1640(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1520(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1520(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 1544(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 1564(%esp), %ebp - movl 108(%esp), %esi # 4-byte Reload - adcl 1568(%esp), %esi - movl 92(%esp), %edi # 4-byte Reload - adcl 1572(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1456(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1456(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1492(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1496(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 1500(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl 1504(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1512(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1392(%esp), %ecx - movl 1932(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %eax - addl 1392(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1396(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1400(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1408(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1412(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1416(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1420(%esp), %esi - movl 88(%esp), %ebp # 4-byte Reload - adcl 1424(%esp), %ebp - movl 96(%esp), %edi # 4-byte Reload - adcl 1428(%esp), %edi - movl 104(%esp), %ecx # 4-byte Reload - adcl 1432(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1328(%esp), %ecx - movl 1924(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 1328(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1356(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl 1360(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1384(%esp), %edi - sbbl %esi, %esi - movl %ecx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1264(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1284(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1316(%esp), %esi - adcl 1320(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 68(%esp), %eax # 4-byte Reload - addl 1200(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - adcl 1204(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1212(%esp), %edi - adcl 1216(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1228(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1244(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1248(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1252(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1256(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1136(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1144(%esp), %ebp - adcl 1148(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1172(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1180(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 60(%esp), %eax # 4-byte Reload - addl 1072(%esp), %eax - adcl 1076(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1096(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1108(%esp), %ebp - adcl 1112(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1124(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1128(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1008(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 1008(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1020(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1036(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1044(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1052(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 52(%esp), %eax # 4-byte Reload - addl 944(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 952(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 960(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 964(%esp), %esi - adcl 968(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 972(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 976(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 980(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 984(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 988(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 992(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %eax, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %ebp - addl 880(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 896(%esp), %edi - adcl 900(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 924(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 816(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 824(%esp), %ebp - adcl 828(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 856(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - sbbl %eax, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 56(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 752(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 760(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 764(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 768(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 772(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 776(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 780(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 784(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 792(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 796(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 800(%esp), %edi - movl 60(%esp), %ecx # 4-byte Reload - adcl 804(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 808(%esp), %esi - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 688(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 728(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - adcl 732(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 740(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl %edi, %ecx - andl $1, %ecx - addl 624(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 636(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 648(%esp), %esi - movl 100(%esp), %edi # 4-byte Reload - adcl 652(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 560(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 568(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 580(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - adcl 584(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 592(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %edi - movl %edi, %ecx - addl 496(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 520(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 528(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 540(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 432(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 440(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 444(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 452(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 472(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %esi - movl %esi, %ecx - addl 368(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 376(%esp), %esi - adcl 380(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 304(%esp), %ecx - adcl 308(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl 312(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 324(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 328(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 240(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - movl 96(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 240(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 248(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 252(%esp), %edi - movl 100(%esp), %ebp # 4-byte Reload - adcl 256(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 264(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 268(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 1928(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1924(%esp), %edx - calll .LmulPv448x32 - movl 104(%esp), %ecx # 4-byte Reload - addl 176(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 184(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - adcl 188(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 192(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 200(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 48(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 1932(%esp), %edx - calll .LmulPv448x32 - andl $1, %ebp - addl 112(%esp), %esi - movl 100(%esp), %esi # 4-byte Reload - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 124(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - adcl 128(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl %ecx, %ebx - movl 76(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 168(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl $0, %ebp - movl 1932(%esp), %ecx - subl (%ecx), %eax - sbbl 4(%ecx), %edx - sbbl 8(%ecx), %esi - sbbl 12(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 16(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - sbbl 20(%ecx), %edi - movl %edi, 24(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - sbbl 24(%ecx), %edi - movl %edi, 28(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 28(%ecx), %ebx - movl 52(%esp), %edi # 4-byte Reload - sbbl 32(%ecx), %edi - movl %edi, 32(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - sbbl 36(%ecx), %edi - movl %edi, 36(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - sbbl 40(%ecx), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - sbbl 44(%ecx), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - sbbl 48(%ecx), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - sbbl 52(%ecx), %edi - movl %ebp, %ecx - movl %edi, 104(%esp) # 4-byte Spill - sbbl $0, %ecx - andl $1, %ecx - jne .LBB210_2 -# BB#1: - movl %ebx, 60(%esp) # 4-byte Spill -.LBB210_2: - testb %cl, %cl - movl 108(%esp), %ebx # 4-byte Reload - jne .LBB210_4 -# BB#3: - movl %eax, %ebx -.LBB210_4: - movl 1920(%esp), %eax - movl %ebx, (%eax) - movl 92(%esp), %edi # 4-byte Reload - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB210_6 -# BB#5: - movl %edx, %edi -.LBB210_6: - movl %edi, 4(%eax) - jne .LBB210_8 -# BB#7: - movl %esi, 100(%esp) # 4-byte Spill -.LBB210_8: - movl 100(%esp), %edx # 4-byte Reload - movl %edx, 8(%eax) - jne .LBB210_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 84(%esp) # 4-byte Spill -.LBB210_10: - movl 84(%esp), %edx # 4-byte Reload - movl %edx, 12(%eax) - jne .LBB210_12 -# BB#11: - movl 20(%esp), %ecx # 4-byte Reload -.LBB210_12: - movl %ecx, 16(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB210_14 -# BB#13: - movl 24(%esp), %ecx # 4-byte Reload -.LBB210_14: - movl %ecx, 20(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB210_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB210_16: - movl %ecx, 24(%eax) - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 52(%esp), %ecx # 4-byte Reload - jne .LBB210_18 -# BB#17: - movl 32(%esp), %ecx # 4-byte Reload -.LBB210_18: - movl %ecx, 32(%eax) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB210_20 -# BB#19: - movl 36(%esp), %ecx # 4-byte Reload -.LBB210_20: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB210_22 -# BB#21: - movl 40(%esp), %ecx # 4-byte Reload -.LBB210_22: - movl %ecx, 40(%eax) - movl 80(%esp), %ecx # 4-byte Reload - jne .LBB210_24 -# BB#23: - movl 44(%esp), %ecx # 4-byte Reload -.LBB210_24: - movl %ecx, 44(%eax) - movl 88(%esp), %ecx # 4-byte Reload - jne .LBB210_26 -# BB#25: - movl 48(%esp), %ecx # 4-byte Reload -.LBB210_26: - movl %ecx, 48(%eax) - movl 96(%esp), %ecx # 4-byte Reload - jne .LBB210_28 -# BB#27: - movl 104(%esp), %ecx # 4-byte Reload -.LBB210_28: - movl %ecx, 52(%eax) - addl $1900, %esp # imm = 0x76C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end210: - .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L - - .globl mcl_fp_montNF14L - .align 16, 0x90 - .type mcl_fp_montNF14L,@function -mcl_fp_montNF14L: # @mcl_fp_montNF14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1884, %esp # imm = 0x75C - calll .L211$pb -.L211$pb: - popl %ebx -.Ltmp42: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx - movl 1916(%esp), %eax - movl -4(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1824(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1824(%esp), %edi - movl 1828(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 1880(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1876(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 1872(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 1868(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1864(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1860(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1856(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1852(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1848(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 1844(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1840(%esp), %esi - movl 1836(%esp), %ebp - movl 1832(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1760(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1760(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1768(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1772(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 1776(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1804(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1808(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1816(%esp), %ebp - movl 1912(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1696(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1752(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1696(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1704(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1708(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1712(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1720(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1736(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1740(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - adcl 1748(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1632(%esp), %ecx - movl 1916(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - addl 1632(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - adcl 1664(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1688(%esp), %ebp - movl 1912(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1568(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1624(%esp), %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1568(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - adcl 1572(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1576(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1588(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 1596(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 1612(%esp), %edi - movl 92(%esp), %ecx # 4-byte Reload - adcl 1616(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1620(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %ebp - movl %edx, %esi - movl %esi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1504(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1504(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1512(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1544(%esp), %esi - adcl 1548(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1560(%esp), %ebp - movl 1912(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1440(%esp), %ecx - movl 1908(%esp), %eax - movl %eax, %edx - calll .LmulPv448x32 - movl 1496(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - addl 1440(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1464(%esp), %edi - movl 52(%esp), %ecx # 4-byte Reload - adcl 1468(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1472(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1476(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1480(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1484(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1488(%esp), %esi - adcl 1492(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1376(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1376(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1400(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1424(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1312(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1368(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1312(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 1328(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1340(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1360(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1248(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1248(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1272(%esp), %ebp - adcl 1276(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1284(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1300(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1184(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1240(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - addl 1184(%esp), %ecx - movl 40(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1204(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1216(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1232(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1120(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 1120(%esp), %esi - movl 40(%esp), %ebp # 4-byte Reload - adcl 1124(%esp), %ebp - movl 44(%esp), %edi # 4-byte Reload - adcl 1128(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1156(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1056(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 1112(%esp), %eax - movl %ebp, %ecx - addl 1056(%esp), %ecx - adcl 1060(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 1064(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1068(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1072(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1076(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1080(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1084(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl 1088(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1092(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1096(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1100(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1104(%esp), %ebp - movl 60(%esp), %edx # 4-byte Reload - adcl 1108(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %ecx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 992(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1008(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1040(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1044(%esp), %ebp - adcl 1048(%esp), %esi - movl 1912(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 984(%esp), %eax - movl 44(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 48(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 940(%esp), %edi - movl 84(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 968(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 976(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl 980(%esp), %esi - movl %esi, %ebp - adcl $0, %eax - movl %eax, 40(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 864(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 864(%esp), %esi - movl 48(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 876(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 884(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 916(%esp), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - adcl 920(%esp), %ebp - movl 1912(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 800(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 856(%esp), %edx - movl 48(%esp), %ecx # 4-byte Reload - addl 800(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 808(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 816(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 828(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 852(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 736(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 736(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 764(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 72(%esp), %esi # 4-byte Reload - adcl 772(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 780(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 672(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 728(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - addl 672(%esp), %ecx - movl 52(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 700(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 704(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 608(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 608(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 616(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 624(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 644(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 600(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 544(%esp), %ecx - adcl 548(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 556(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 568(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 576(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 480(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 488(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 496(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 504(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 472(%esp), %edx - movl 84(%esp), %ecx # 4-byte Reload - addl 416(%esp), %ecx - adcl 420(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 424(%esp), %edi - adcl 428(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 464(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 352(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 360(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 364(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 388(%esp), %edi - movl 40(%esp), %ebp # 4-byte Reload - adcl 392(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 288(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 344(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 288(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 296(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 320(%esp), %edi - adcl 324(%esp), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 328(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 224(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 224(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 232(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 256(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 40(%esp), %edi # 4-byte Reload - adcl 260(%esp), %edi - adcl 264(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1912(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 160(%esp), %ecx - movl 1908(%esp), %edx - calll .LmulPv448x32 - movl 216(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 160(%esp), %ecx - adcl 164(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 168(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 176(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 188(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - adcl 192(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 96(%esp), %ecx - movl 1916(%esp), %edx - calll .LmulPv448x32 - addl 96(%esp), %esi - movl 64(%esp), %esi # 4-byte Reload - movl 92(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 104(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl %ebp, %ebx - adcl 108(%esp), %esi - adcl 112(%esp), %edi - movl 68(%esp), %edx # 4-byte Reload - adcl 116(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 120(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 124(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 40(%esp), %edx # 4-byte Reload - adcl 128(%esp), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 132(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %eax, %edx - movl 1916(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %ebx - movl %esi, %eax - sbbl 8(%ebp), %eax - movl %edi, %ecx - sbbl 12(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - sbbl 40(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 44(%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 48(%ebp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 52(%ebp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - sarl $31, %ecx - testl %ecx, %ecx - movl 92(%esp), %ebp # 4-byte Reload - js .LBB211_2 -# BB#1: - movl %edx, %ebp -.LBB211_2: - movl 1904(%esp), %edx - movl %ebp, (%edx) - movl 88(%esp), %ebp # 4-byte Reload - js .LBB211_4 -# BB#3: - movl %ebx, %ebp -.LBB211_4: - movl %ebp, 4(%edx) - js .LBB211_6 -# BB#5: - movl %eax, %esi -.LBB211_6: - movl %esi, 8(%edx) - js .LBB211_8 -# BB#7: - movl 4(%esp), %edi # 4-byte Reload -.LBB211_8: - movl %edi, 12(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB211_10 -# BB#9: - movl 8(%esp), %eax # 4-byte Reload -.LBB211_10: - movl %eax, 16(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB211_12 -# BB#11: - movl 12(%esp), %eax # 4-byte Reload -.LBB211_12: - movl %eax, 20(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB211_14 -# BB#13: - movl 16(%esp), %eax # 4-byte Reload -.LBB211_14: - movl %eax, 24(%edx) - movl 40(%esp), %eax # 4-byte Reload - js .LBB211_16 -# BB#15: - movl 20(%esp), %eax # 4-byte Reload -.LBB211_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB211_18 -# BB#17: - movl 24(%esp), %eax # 4-byte Reload -.LBB211_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB211_20 -# BB#19: - movl 28(%esp), %eax # 4-byte Reload -.LBB211_20: - movl %eax, 36(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB211_22 -# BB#21: - movl 32(%esp), %eax # 4-byte Reload -.LBB211_22: - movl %eax, 40(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB211_24 -# BB#23: - movl 36(%esp), %eax # 4-byte Reload -.LBB211_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB211_26 -# BB#25: - movl 64(%esp), %eax # 4-byte Reload -.LBB211_26: - movl %eax, 48(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB211_28 -# BB#27: - movl 72(%esp), %eax # 4-byte Reload -.LBB211_28: - movl %eax, 52(%edx) - addl $1884, %esp # imm = 0x75C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end211: - .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L - - .globl mcl_fp_montRed14L - .align 16, 0x90 - .type mcl_fp_montRed14L,@function -mcl_fp_montRed14L: # @mcl_fp_montRed14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1068, %esp # imm = 0x42C - calll .L212$pb -.L212$pb: - popl %eax -.Ltmp43: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1096(%esp), %edx - movl -4(%edx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1092(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 92(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 96(%esp) # 4-byte Spill - imull %eax, %ebx - movl 108(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 164(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 52(%ecx), %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 40(%ecx), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 36(%ecx), %ebp - movl 32(%ecx), %edi - movl 28(%ecx), %esi - movl 24(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 8(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1008(%esp), %ecx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - movl 92(%esp), %eax # 4-byte Reload - addl 1008(%esp), %eax - movl 96(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1036(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1040(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 1044(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 1052(%esp), %ebp - movl 132(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 944(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - movl %edi, %ecx - andl $1, %ecx - addl 944(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 976(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 984(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %esi # 4-byte Reload - adcl 1000(%esp), %esi - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %ebp - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 880(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 880(%esp), %ebp - movl 64(%esp), %ecx # 4-byte Reload - adcl 884(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 908(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl 920(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 932(%esp), %esi - movl %esi, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 816(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 816(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 820(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 104(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 752(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 752(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - adcl 756(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - movl 156(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 688(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 692(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 156(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 140(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 624(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 628(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 168(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 140(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 560(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 564(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl 596(%esp), %ebp - movl %ebp, 164(%esp) # 4-byte Spill - movl 168(%esp), %edi # 4-byte Reload - adcl 600(%esp), %edi - movl 136(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - movl 120(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1096(%esp), %eax - movl %eax, %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 496(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 144(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 172(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl 532(%esp), %edi - movl %edi, 168(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 536(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl $0, 140(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 432(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 432(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl 112(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - adcl 448(%esp), %ebp - movl %ebp, 144(%esp) # 4-byte Spill - movl 172(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 172(%esp) # 4-byte Spill - movl 160(%esp), %ebp # 4-byte Reload - adcl 456(%esp), %ebp - movl 164(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 464(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - adcl 468(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %eax, %esi - movl 88(%esp), %edi # 4-byte Reload - imull %edi, %eax - movl %eax, (%esp) - leal 368(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 368(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl 132(%esp), %ecx # 4-byte Reload - adcl 376(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 380(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 172(%esp), %esi # 4-byte Reload - adcl 384(%esp), %esi - adcl 388(%esp), %ebp - movl %ebp, 160(%esp) # 4-byte Spill - movl 164(%esp), %ecx # 4-byte Reload - adcl 392(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 396(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - movl 136(%esp), %ecx # 4-byte Reload - adcl 400(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 148(%esp), %ecx # 4-byte Reload - adcl 404(%esp), %ecx - movl %ecx, 148(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 408(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 412(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 416(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 140(%esp), %ecx # 4-byte Reload - adcl 420(%esp), %ecx - movl %ecx, 140(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 424(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - adcl $0, 120(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %eax, %ebp - imull %edi, %eax - movl %eax, (%esp) - leal 304(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 304(%esp), %ebp - movl 132(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 144(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 316(%esp), %ebp - movl 160(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl 332(%esp), %esi - movl 148(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 240(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 240(%esp), %edi - movl 144(%esp), %ecx # 4-byte Reload - adcl 244(%esp), %ecx - adcl 248(%esp), %ebp - movl %ebp, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl 264(%esp), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - adcl 268(%esp), %edi - movl 156(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 280(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 176(%esp), %ecx - movl 1096(%esp), %edx - movl 100(%esp), %ebx # 4-byte Reload - calll .LmulPv448x32 - addl 176(%esp), %esi - movl 172(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %ebx # 4-byte Reload - adcl 188(%esp), %ebx - movl %ebx, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 136(%esp), %edx # 4-byte Reload - adcl 196(%esp), %edx - movl %edx, 136(%esp) # 4-byte Spill - movl %edi, %eax - adcl 200(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl 212(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 232(%esp), %ecx - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl 172(%esp), %edi # 4-byte Reload - subl 16(%esp), %edi # 4-byte Folded Reload - movl 160(%esp), %ebp # 4-byte Reload - sbbl 8(%esp), %ebp # 4-byte Folded Reload - sbbl 12(%esp), %ebx # 4-byte Folded Reload - movl 168(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - sbbl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 80(%esp) # 4-byte Spill - movl 148(%esp), %edx # 4-byte Reload - sbbl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 84(%esp) # 4-byte Spill - movl 156(%esp), %edx # 4-byte Reload - sbbl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 88(%esp) # 4-byte Spill - movl 152(%esp), %edx # 4-byte Reload - sbbl 36(%esp), %edx # 4-byte Folded Reload - movl %edx, 92(%esp) # 4-byte Spill - movl 124(%esp), %edx # 4-byte Reload - sbbl 40(%esp), %edx # 4-byte Folded Reload - movl %edx, 96(%esp) # 4-byte Spill - movl 140(%esp), %edx # 4-byte Reload - sbbl 44(%esp), %edx # 4-byte Folded Reload - movl %edx, 100(%esp) # 4-byte Spill - movl 128(%esp), %edx # 4-byte Reload - sbbl 48(%esp), %edx # 4-byte Folded Reload - movl %edx, 112(%esp) # 4-byte Spill - movl 120(%esp), %edx # 4-byte Reload - sbbl 52(%esp), %edx # 4-byte Folded Reload - movl %edx, 116(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - sbbl 56(%esp), %edx # 4-byte Folded Reload - movl %edx, 132(%esp) # 4-byte Spill - movl %ecx, %edx - sbbl 60(%esp), %edx # 4-byte Folded Reload - movl %edx, 144(%esp) # 4-byte Spill - sbbl $0, %esi - andl $1, %esi - jne .LBB212_2 -# BB#1: - movl %eax, 168(%esp) # 4-byte Spill -.LBB212_2: - movl %esi, %edx - testb %dl, %dl - movl 172(%esp), %eax # 4-byte Reload - jne .LBB212_4 -# BB#3: - movl %edi, %eax -.LBB212_4: - movl 1088(%esp), %edi - movl %eax, (%edi) - movl %ecx, 104(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - movl 160(%esp), %ecx # 4-byte Reload - jne .LBB212_6 -# BB#5: - movl %ebp, %ecx -.LBB212_6: - movl %ecx, 4(%edi) - movl 108(%esp), %ecx # 4-byte Reload - movl 164(%esp), %ebp # 4-byte Reload - jne .LBB212_8 -# BB#7: - movl %ebx, %ebp -.LBB212_8: - movl %ebp, 8(%edi) - movl 168(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edi) - movl 124(%esp), %ebp # 4-byte Reload - movl 136(%esp), %ebx # 4-byte Reload - jne .LBB212_10 -# BB#9: - movl 80(%esp), %ebx # 4-byte Reload -.LBB212_10: - movl %ebx, 16(%edi) - movl 140(%esp), %ebx # 4-byte Reload - movl 148(%esp), %esi # 4-byte Reload - jne .LBB212_12 -# BB#11: - movl 84(%esp), %esi # 4-byte Reload -.LBB212_12: - movl %esi, 20(%edi) - movl 128(%esp), %esi # 4-byte Reload - jne .LBB212_14 -# BB#13: - movl 88(%esp), %eax # 4-byte Reload -.LBB212_14: - movl %eax, 24(%edi) - movl 120(%esp), %edx # 4-byte Reload - jne .LBB212_16 -# BB#15: - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 152(%esp) # 4-byte Spill -.LBB212_16: - movl 152(%esp), %eax # 4-byte Reload - movl %eax, 28(%edi) - jne .LBB212_18 -# BB#17: - movl 96(%esp), %ebp # 4-byte Reload -.LBB212_18: - movl %ebp, 32(%edi) - jne .LBB212_20 -# BB#19: - movl 100(%esp), %ebx # 4-byte Reload -.LBB212_20: - movl %ebx, 36(%edi) - jne .LBB212_22 -# BB#21: - movl 112(%esp), %esi # 4-byte Reload -.LBB212_22: - movl %esi, 40(%edi) - jne .LBB212_24 -# BB#23: - movl 116(%esp), %edx # 4-byte Reload -.LBB212_24: - movl %edx, 44(%edi) - jne .LBB212_26 -# BB#25: - movl 132(%esp), %ecx # 4-byte Reload -.LBB212_26: - movl %ecx, 48(%edi) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB212_28 -# BB#27: - movl 144(%esp), %eax # 4-byte Reload -.LBB212_28: - movl %eax, 52(%edi) - addl $1068, %esp # imm = 0x42C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end212: - .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L - - .globl mcl_fp_addPre14L - .align 16, 0x90 - .type mcl_fp_addPre14L,@function -mcl_fp_addPre14L: # @mcl_fp_addPre14L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %edi - adcl 8(%ecx), %edi - movl 16(%esp), %ebx - movl %edx, (%ebx) - movl 12(%ecx), %edx - movl %esi, 4(%ebx) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %edi, 8(%ebx) - movl 20(%eax), %edi - movl %edx, 12(%ebx) - movl 20(%ecx), %edx - adcl %edi, %edx - movl 24(%eax), %edi - movl %esi, 16(%ebx) - movl 24(%ecx), %esi - adcl %edi, %esi - movl 28(%eax), %edi - movl %edx, 20(%ebx) - movl 28(%ecx), %edx - adcl %edi, %edx - movl 32(%eax), %edi - movl %esi, 24(%ebx) - movl 32(%ecx), %esi - adcl %edi, %esi - movl 36(%eax), %edi - movl %edx, 28(%ebx) - movl 36(%ecx), %edx - adcl %edi, %edx - movl 40(%eax), %edi - movl %esi, 32(%ebx) - movl 40(%ecx), %esi - adcl %edi, %esi - movl 44(%eax), %edi - movl %edx, 36(%ebx) - movl 44(%ecx), %edx - adcl %edi, %edx - movl 48(%eax), %edi - movl %esi, 40(%ebx) - movl 48(%ecx), %esi - adcl %edi, %esi - movl %edx, 44(%ebx) - movl %esi, 48(%ebx) - movl 52(%eax), %eax - movl 52(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 52(%ebx) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end213: - .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L - - .globl mcl_fp_subPre14L - .align 16, 0x90 - .type mcl_fp_subPre14L,@function -mcl_fp_subPre14L: # @mcl_fp_subPre14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebx - sbbl 8(%edx), %ebx - movl 20(%esp), %ebp - movl %esi, (%ebp) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebp) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ebp) - movl 20(%edx), %ebx - movl %esi, 12(%ebp) - movl 20(%ecx), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ebp) - movl 24(%ecx), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ebp) - movl 28(%ecx), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ebp) - movl 32(%ecx), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ebp) - movl 36(%ecx), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ebp) - movl 40(%ecx), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ebp) - movl 44(%ecx), %esi - sbbl %ebx, %esi - movl 48(%edx), %ebx - movl %edi, 40(%ebp) - movl 48(%ecx), %edi - sbbl %ebx, %edi - movl %esi, 44(%ebp) - movl %edi, 48(%ebp) - movl 52(%edx), %edx - movl 52(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 52(%ebp) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end214: - .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L - - .globl mcl_fp_shr1_14L - .align 16, 0x90 - .type mcl_fp_shr1_14L,@function -mcl_fp_shr1_14L: # @mcl_fp_shr1_14L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 48(%ecx) - shrl %eax - movl %eax, 52(%ecx) - popl %esi - retl -.Lfunc_end215: - .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L - - .globl mcl_fp_add14L - .align 16, 0x90 - .type mcl_fp_add14L,@function -mcl_fp_add14L: # @mcl_fp_add14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $44, %esp - movl 72(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %ecx - movl 68(%esp), %ebp - addl (%ebp), %edx - movl %edx, 4(%esp) # 4-byte Spill - adcl 4(%ebp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 8(%eax), %ecx - adcl 8(%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 12(%ebp), %edx - movl 16(%ebp), %ecx - adcl 12(%eax), %edx - movl %edx, 32(%esp) # 4-byte Spill - adcl 16(%eax), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%ebp), %ecx - adcl 20(%eax), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 24(%ebp), %ecx - adcl 24(%eax), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 28(%ebp), %ecx - adcl 28(%eax), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 32(%ebp), %ecx - adcl 32(%eax), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 36(%ebp), %ecx - adcl 36(%eax), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 40(%ebp), %edx - adcl 40(%eax), %edx - movl %edx, (%esp) # 4-byte Spill - movl 44(%ebp), %ebx - adcl 44(%eax), %ebx - movl 48(%ebp), %esi - adcl 48(%eax), %esi - movl 52(%ebp), %edi - adcl 52(%eax), %edi - movl 64(%esp), %eax - movl 4(%esp), %ebp # 4-byte Reload - movl %ebp, (%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl %edx, 40(%eax) - movl %ebx, 44(%eax) - movl %esi, 48(%eax) - movl %edi, 52(%eax) - sbbl %ecx, %ecx - andl $1, %ecx - movl 76(%esp), %edx - subl (%edx), %ebp - movl %ebp, 4(%esp) # 4-byte Spill - movl 40(%esp), %ebp # 4-byte Reload - sbbl 4(%edx), %ebp - movl %ebp, 40(%esp) # 4-byte Spill - movl 36(%esp), %ebp # 4-byte Reload - sbbl 8(%edx), %ebp - movl %ebp, 36(%esp) # 4-byte Spill - movl 32(%esp), %ebp # 4-byte Reload - sbbl 12(%edx), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - movl 28(%esp), %ebp # 4-byte Reload - sbbl 16(%edx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 24(%esp), %ebp # 4-byte Reload - sbbl 20(%edx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 20(%esp), %ebp # 4-byte Reload - sbbl 24(%edx), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 16(%esp), %ebp # 4-byte Reload - sbbl 28(%edx), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - sbbl 32(%edx), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 8(%esp), %ebp # 4-byte Reload - sbbl 36(%edx), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl (%esp), %ebp # 4-byte Reload - sbbl 40(%edx), %ebp - sbbl 44(%edx), %ebx - sbbl 48(%edx), %esi - sbbl 52(%edx), %edi - sbbl $0, %ecx - testb $1, %cl - jne .LBB216_2 -# BB#1: # %nocarry - movl 4(%esp), %ecx # 4-byte Reload - movl %ecx, (%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 16(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 20(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl %ebp, 40(%eax) - movl %ebx, 44(%eax) - movl %esi, 48(%eax) - movl %edi, 52(%eax) -.LBB216_2: # %carry - addl $44, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end216: - .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L - - .globl mcl_fp_addNF14L - .align 16, 0x90 - .type mcl_fp_addNF14L,@function -mcl_fp_addNF14L: # @mcl_fp_addNF14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $112, %esp - movl 140(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 136(%esp), %ecx - addl (%ecx), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 4(%ecx), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 52(%eax), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 48(%eax), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 44(%eax), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 40(%eax), %ebp - movl 36(%eax), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 32(%eax), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 28(%eax), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 24(%eax), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %esi - movl 8(%eax), %edx - adcl 8(%ecx), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl 12(%ecx), %esi - movl %esi, 60(%esp) # 4-byte Spill - adcl 16(%ecx), %edi - movl %edi, 64(%esp) # 4-byte Spill - adcl 20(%ecx), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 24(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 28(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 32(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 36(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 40(%ecx), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 44(%ecx), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 48(%ecx), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 52(%ecx), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 144(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - subl (%ecx), %eax - movl %eax, (%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 4(%ecx), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - sbbl 12(%ecx), %esi - movl %esi, 12(%esp) # 4-byte Spill - sbbl 16(%ecx), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%ecx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - movl %edx, %eax - sbbl 24(%ecx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 28(%ecx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 32(%ecx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - sbbl 36(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 40(%ecx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - movl %eax, %esi - movl %eax, %ebp - sbbl 44(%ecx), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %esi - sbbl 48(%ecx), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - movl %eax, %edi - sbbl 52(%ecx), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl %edi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - movl 72(%esp), %ecx # 4-byte Reload - js .LBB217_2 -# BB#1: - movl (%esp), %ecx # 4-byte Reload -.LBB217_2: - movl 132(%esp), %edi - movl %ecx, (%edi) - movl 76(%esp), %eax # 4-byte Reload - js .LBB217_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB217_4: - movl %eax, 4(%edi) - movl %edx, %ecx - movl 64(%esp), %eax # 4-byte Reload - movl 56(%esp), %edx # 4-byte Reload - js .LBB217_6 -# BB#5: - movl 8(%esp), %edx # 4-byte Reload -.LBB217_6: - movl %edx, 8(%edi) - movl %ebp, %edx - movl 104(%esp), %ebx # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - js .LBB217_8 -# BB#7: - movl 12(%esp), %ebp # 4-byte Reload -.LBB217_8: - movl %ebp, 12(%edi) - movl 100(%esp), %ebp # 4-byte Reload - js .LBB217_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB217_10: - movl %eax, 16(%edi) - movl 80(%esp), %esi # 4-byte Reload - js .LBB217_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 68(%esp) # 4-byte Spill -.LBB217_12: - movl 68(%esp), %eax # 4-byte Reload - movl %eax, 20(%edi) - js .LBB217_14 -# BB#13: - movl 24(%esp), %ecx # 4-byte Reload -.LBB217_14: - movl %ecx, 24(%edi) - js .LBB217_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 108(%esp) # 4-byte Spill -.LBB217_16: - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%edi) - js .LBB217_18 -# BB#17: - movl 32(%esp), %ebp # 4-byte Reload -.LBB217_18: - movl %ebp, 32(%edi) - js .LBB217_20 -# BB#19: - movl 36(%esp), %ebx # 4-byte Reload -.LBB217_20: - movl %ebx, 36(%edi) - js .LBB217_22 -# BB#21: - movl 40(%esp), %esi # 4-byte Reload -.LBB217_22: - movl %esi, 40(%edi) - movl 96(%esp), %eax # 4-byte Reload - js .LBB217_24 -# BB#23: - movl 44(%esp), %edx # 4-byte Reload -.LBB217_24: - movl %edx, 44(%edi) - movl 92(%esp), %ecx # 4-byte Reload - js .LBB217_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB217_26: - movl %eax, 48(%edi) - js .LBB217_28 -# BB#27: - movl 52(%esp), %ecx # 4-byte Reload -.LBB217_28: - movl %ecx, 52(%edi) - addl $112, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end217: - .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L - - .globl mcl_fp_sub14L - .align 16, 0x90 - .type mcl_fp_sub14L,@function -mcl_fp_sub14L: # @mcl_fp_sub14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $52, %esp - movl 76(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 80(%esp), %edi - subl (%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 36(%esi), %edx - sbbl 36(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%esi), %ecx - sbbl 40(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 44(%esi), %eax - sbbl 44(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 48(%esi), %ebp - sbbl 48(%edi), %ebp - movl 52(%esi), %esi - sbbl 52(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 72(%esp), %ebx - movl 44(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl %edx, 36(%ebx) - movl %ecx, 40(%ebx) - movl %eax, 44(%ebx) - movl %ebp, 48(%ebx) - movl %esi, 52(%ebx) - je .LBB218_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 84(%esp), %esi - movl 44(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 36(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl %eax, 44(%ebx) - movl 48(%esi), %eax - adcl %ebp, %eax - movl %eax, 48(%ebx) - movl 52(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ebx) -.LBB218_2: # %nocarry - addl $52, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end218: - .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L - - .globl mcl_fp_subNF14L - .align 16, 0x90 - .type mcl_fp_subNF14L,@function -mcl_fp_subNF14L: # @mcl_fp_subNF14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $88, %esp - movl 112(%esp), %ecx - movl 52(%ecx), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl (%ecx), %edx - movl 4(%ecx), %eax - movl 116(%esp), %edi - subl (%edi), %edx - movl %edx, 56(%esp) # 4-byte Spill - sbbl 4(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%ecx), %ebp - movl 28(%ecx), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 44(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 28(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - sbbl 32(%edi), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %eax, %esi - sarl $31, %esi - movl %esi, %ecx - addl %ecx, %ecx - movl %esi, %ebp - adcl %ebp, %ebp - shrl $31, %eax - orl %ecx, %eax - movl 120(%esp), %edi - andl 4(%edi), %ebp - andl (%edi), %eax - movl 52(%edi), %ecx - andl %esi, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 48(%edi), %ecx - andl %esi, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%edi), %ecx - andl %esi, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 40(%edi), %ecx - andl %esi, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 36(%edi), %ecx - andl %esi, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 32(%edi), %ecx - andl %esi, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 28(%edi), %ecx - andl %esi, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%edi), %ecx - andl %esi, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 20(%edi), %ebx - andl %esi, %ebx - movl 16(%edi), %edx - andl %esi, %edx - movl 12(%edi), %ecx - andl %esi, %ecx - andl 8(%edi), %esi - addl 56(%esp), %eax # 4-byte Folded Reload - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl 108(%esp), %edi - movl %eax, (%edi) - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %ebp, 4(%edi) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %esi, 8(%edi) - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 12(%edi) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %edx, 16(%edi) - movl (%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %ebx, 20(%edi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 24(%edi) - movl 8(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%edi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%edi) - movl 16(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%edi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %eax, 40(%edi) - movl 24(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %ecx, 44(%edi) - movl %eax, 48(%edi) - movl 28(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%edi) - addl $88, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end219: - .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L - - .globl mcl_fpDbl_add14L - .align 16, 0x90 - .type mcl_fpDbl_add14L,@function -mcl_fpDbl_add14L: # @mcl_fpDbl_add14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 128(%esp), %ecx - movl 124(%esp), %esi - movl 12(%esi), %edi - movl 16(%esi), %edx - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%esi), %ebp - movl 120(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%esi), %ebp - adcl 8(%esi), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %edx - movl %ebp, 4(%eax) - movl 64(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%esi), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %edx, 16(%eax) - movl 24(%esi), %edx - adcl %ebx, %edx - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%esi), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %edx, 24(%eax) - movl 32(%esi), %edx - adcl %ebx, %edx - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%esi), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %edx, 32(%eax) - movl 40(%esi), %edx - adcl %ebx, %edx - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%esi), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %edx, 40(%eax) - movl 48(%esi), %edx - adcl %ebx, %edx - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%esi), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %edx, 48(%eax) - movl 56(%esi), %edx - adcl %ebx, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl %edi, 52(%eax) - movl 60(%esi), %eax - adcl %edx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 64(%esi), %eax - adcl %ebp, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl 68(%esi), %eax - adcl %edx, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%ecx), %edx - movl 72(%esi), %eax - adcl %edx, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 76(%ecx), %edx - movl 76(%esi), %eax - adcl %edx, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%ecx), %edx - movl 80(%esi), %eax - adcl %edx, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%ecx), %edx - movl 84(%esi), %eax - adcl %edx, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 88(%ecx), %edx - movl 88(%esi), %eax - adcl %edx, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 92(%ecx), %edx - movl 92(%esi), %eax - adcl %edx, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 96(%ecx), %edx - movl 96(%esi), %eax - adcl %edx, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%ecx), %edx - movl 100(%esi), %edi - adcl %edx, %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 104(%ecx), %edx - movl 104(%esi), %ebx - adcl %edx, %ebx - movl %ebx, 56(%esp) # 4-byte Spill - movl 108(%ecx), %ecx - movl 108(%esi), %esi - adcl %ecx, %esi - sbbl %edx, %edx - andl $1, %edx - movl 132(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - sbbl 40(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 44(%ebp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl %ebx, %eax - movl %esi, %ebx - sbbl 48(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - sbbl 52(%ebp), %esi - sbbl $0, %edx - andl $1, %edx - jne .LBB220_2 -# BB#1: - movl %esi, %ebx -.LBB220_2: - testb %dl, %dl - movl 72(%esp), %eax # 4-byte Reload - movl 68(%esp), %edx # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - movl 60(%esp), %ebp # 4-byte Reload - jne .LBB220_4 -# BB#3: - movl %ecx, %edx - movl (%esp), %edi # 4-byte Reload - movl 4(%esp), %ebp # 4-byte Reload - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload -.LBB220_4: - movl 120(%esp), %esi - movl %eax, 56(%esi) - movl 76(%esp), %eax # 4-byte Reload - movl %eax, 60(%esi) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 64(%esi) - movl 84(%esp), %eax # 4-byte Reload - movl %eax, 68(%esi) - movl 88(%esp), %eax # 4-byte Reload - movl %eax, 72(%esi) - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 76(%esi) - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 80(%esi) - movl %ebp, 84(%esi) - movl %edi, 88(%esi) - movl %edx, 92(%esi) - movl 52(%esp), %edx # 4-byte Reload - movl 48(%esp), %eax # 4-byte Reload - jne .LBB220_6 -# BB#5: - movl 36(%esp), %eax # 4-byte Reload -.LBB220_6: - movl %eax, 96(%esi) - movl 56(%esp), %ecx # 4-byte Reload - jne .LBB220_8 -# BB#7: - movl 40(%esp), %edx # 4-byte Reload -.LBB220_8: - movl %edx, 100(%esi) - jne .LBB220_10 -# BB#9: - movl 44(%esp), %ecx # 4-byte Reload -.LBB220_10: - movl %ecx, 104(%esi) - movl %ebx, 108(%esi) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end220: - .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L - - .globl mcl_fpDbl_sub14L - .align 16, 0x90 - .type mcl_fpDbl_sub14L,@function -mcl_fpDbl_sub14L: # @mcl_fpDbl_sub14L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 120(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %edx - movl 124(%esp), %ebp - subl (%ebp), %eax - sbbl 4(%ebp), %edx - movl 8(%ebx), %esi - sbbl 8(%ebp), %esi - movl 116(%esp), %ecx - movl %eax, (%ecx) - movl 12(%ebx), %eax - sbbl 12(%ebp), %eax - movl %edx, 4(%ecx) - movl 16(%ebx), %edx - sbbl 16(%ebp), %edx - movl %esi, 8(%ecx) - movl 20(%ebp), %esi - movl %eax, 12(%ecx) - movl 20(%ebx), %eax - sbbl %esi, %eax - movl 24(%ebp), %esi - movl %edx, 16(%ecx) - movl 24(%ebx), %edx - sbbl %esi, %edx - movl 28(%ebp), %esi - movl %eax, 20(%ecx) - movl 28(%ebx), %eax - sbbl %esi, %eax - movl 32(%ebp), %esi - movl %edx, 24(%ecx) - movl 32(%ebx), %edx - sbbl %esi, %edx - movl 36(%ebp), %esi - movl %eax, 28(%ecx) - movl 36(%ebx), %eax - sbbl %esi, %eax - movl 40(%ebp), %esi - movl %edx, 32(%ecx) - movl 40(%ebx), %edx - sbbl %esi, %edx - movl 44(%ebp), %esi - movl %eax, 36(%ecx) - movl 44(%ebx), %eax - sbbl %esi, %eax - movl 48(%ebp), %esi - movl %edx, 40(%ecx) - movl 48(%ebx), %edx - sbbl %esi, %edx - movl 52(%ebp), %esi - movl %eax, 44(%ecx) - movl 52(%ebx), %eax - sbbl %esi, %eax - movl 56(%ebp), %esi - movl %edx, 48(%ecx) - movl 56(%ebx), %edx - sbbl %esi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 60(%ebp), %edx - movl %eax, 52(%ecx) - movl 60(%ebx), %eax - sbbl %edx, %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 64(%ebp), %eax - movl 64(%ebx), %edx - sbbl %eax, %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 68(%ebp), %eax - movl 68(%ebx), %edx - sbbl %eax, %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 72(%ebp), %eax - movl 72(%ebx), %edx - sbbl %eax, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 76(%ebp), %eax - movl 76(%ebx), %edx - sbbl %eax, %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 80(%ebp), %eax - movl 80(%ebx), %edx - sbbl %eax, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 84(%ebp), %eax - movl 84(%ebx), %edx - sbbl %eax, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 88(%ebp), %eax - movl 88(%ebx), %edx - sbbl %eax, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 92(%ebp), %eax - movl 92(%ebx), %edx - sbbl %eax, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%ebp), %eax - movl 96(%ebx), %edx - sbbl %eax, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 100(%ebp), %eax - movl 100(%ebx), %edx - sbbl %eax, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 104(%ebp), %eax - movl 104(%ebx), %edx - sbbl %eax, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 108(%ebp), %eax - movl 108(%ebx), %edx - sbbl %eax, %edx - movl %edx, 92(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 128(%esp), %ebp - jne .LBB221_1 -# BB#2: - movl $0, 56(%esp) # 4-byte Folded Spill - jmp .LBB221_3 -.LBB221_1: - movl 52(%ebp), %edx - movl %edx, 56(%esp) # 4-byte Spill -.LBB221_3: - testb %al, %al - jne .LBB221_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB221_6 -.LBB221_4: - movl (%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 4(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB221_6: - jne .LBB221_7 -# BB#8: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB221_9 -.LBB221_7: - movl 48(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB221_9: - jne .LBB221_10 -# BB#11: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB221_12 -.LBB221_10: - movl 44(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB221_12: - jne .LBB221_13 -# BB#14: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB221_15 -.LBB221_13: - movl 40(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB221_15: - jne .LBB221_16 -# BB#17: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB221_18 -.LBB221_16: - movl 36(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB221_18: - jne .LBB221_19 -# BB#20: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB221_21 -.LBB221_19: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB221_21: - jne .LBB221_22 -# BB#23: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB221_24 -.LBB221_22: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB221_24: - jne .LBB221_25 -# BB#26: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB221_27 -.LBB221_25: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB221_27: - jne .LBB221_28 -# BB#29: - movl $0, %esi - jmp .LBB221_30 -.LBB221_28: - movl 20(%ebp), %esi -.LBB221_30: - jne .LBB221_31 -# BB#32: - movl $0, %edi - jmp .LBB221_33 -.LBB221_31: - movl 16(%ebp), %edi -.LBB221_33: - jne .LBB221_34 -# BB#35: - movl $0, %ebx - jmp .LBB221_36 -.LBB221_34: - movl 12(%ebp), %ebx -.LBB221_36: - jne .LBB221_37 -# BB#38: - xorl %ebp, %ebp - jmp .LBB221_39 -.LBB221_37: - movl 8(%ebp), %ebp -.LBB221_39: - movl 20(%esp), %edx # 4-byte Reload - addl 44(%esp), %edx # 4-byte Folded Reload - movl 24(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %edx, 56(%ecx) - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 60(%ecx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 64(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 68(%ecx) - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %edi, 72(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %esi, 76(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %eax, 80(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %edx, 84(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 76(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 28(%esp), %edx # 4-byte Reload - adcl 84(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl %eax, 104(%ecx) - movl 56(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%ecx) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end221: - .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L - - .align 16, 0x90 - .type .LmulPv480x32,@function -.LmulPv480x32: # @mulPv480x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl %edx, %ebp - movl 124(%esp), %esi - movl %esi, %eax - mull 56(%ebp) - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl %esi, %eax - mull 52(%ebp) - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl %esi, %eax - mull 48(%ebp) - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %esi, %eax - mull 44(%ebp) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %esi, %eax - mull 40(%ebp) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %esi, %eax - mull 36(%ebp) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %esi, %eax - mull 32(%ebp) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %esi, %eax - mull 28(%ebp) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %esi, %eax - mull 24(%ebp) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %esi, %eax - mull 20(%ebp) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %esi, %eax - mull 16(%ebp) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %esi, %eax - mull 12(%ebp) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %esi, %eax - mull 8(%ebp) - movl %edx, %edi - movl %eax, 4(%esp) # 4-byte Spill - movl %esi, %eax - mull 4(%ebp) - movl %edx, %ebx - movl %eax, (%esp) # 4-byte Spill - movl %esi, %eax - mull (%ebp) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 8(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ecx) - movl 100(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 60(%ecx) - movl %ecx, %eax - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end222: - .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32 - - .globl mcl_fp_mulUnitPre15L - .align 16, 0x90 - .type mcl_fp_mulUnitPre15L,@function -mcl_fp_mulUnitPre15L: # @mcl_fp_mulUnitPre15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - calll .L223$pb -.L223$pb: - popl %ebx -.Ltmp44: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx - movl 152(%esp), %eax - movl %eax, (%esp) - leal 56(%esp), %ecx - movl 148(%esp), %edx - calll .LmulPv480x32 - movl 116(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 76(%esp), %ebp - movl 72(%esp), %ebx - movl 68(%esp), %edi - movl 64(%esp), %esi - movl 56(%esp), %edx - movl 60(%esp), %ecx - movl 144(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end223: - .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L - - .globl mcl_fpDbl_mulPre15L - .align 16, 0x90 - .type mcl_fpDbl_mulPre15L,@function -mcl_fpDbl_mulPre15L: # @mcl_fpDbl_mulPre15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1084, %esp # imm = 0x43C - calll .L224$pb -.L224$pb: - popl %esi -.Ltmp45: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 1108(%esp), %edi - movl %edi, %edx - movl %esi, %ebx - calll .LmulPv480x32 - movl 1076(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1068(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1016(%esp), %eax - movl 1020(%esp), %ebp - movl 1104(%esp), %ecx - movl %eax, (%ecx) - movl 1112(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl %edi, %edx - movl %esi, %ebx - calll .LmulPv480x32 - addl 952(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1008(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1004(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 972(%esp), %edi - movl 968(%esp), %esi - movl 964(%esp), %edx - movl 956(%esp), %eax - movl 960(%esp), %ecx - movl 1104(%esp), %ebp - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 72(%esp), %eax # 4-byte Reload - addl 888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 948(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 944(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 940(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 936(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 932(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 924(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 916(%esp), %ebx - movl 912(%esp), %edi - movl 908(%esp), %esi - movl 904(%esp), %edx - movl 900(%esp), %ecx - movl 892(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 72(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 112(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 824(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 876(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 868(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 852(%esp), %ebx - movl 848(%esp), %edi - movl 844(%esp), %esi - movl 840(%esp), %edx - movl 836(%esp), %ecx - movl 828(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 788(%esp), %ebx - movl 784(%esp), %edi - movl 780(%esp), %esi - movl 776(%esp), %edx - movl 772(%esp), %ecx - movl 764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 732(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 724(%esp), %ebx - movl 720(%esp), %edi - movl 716(%esp), %esi - movl 712(%esp), %edx - movl 708(%esp), %ecx - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %edx - movl 644(%esp), %ecx - movl 636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 568(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 596(%esp), %ebx - movl 592(%esp), %edi - movl 588(%esp), %esi - movl 584(%esp), %edx - movl 580(%esp), %ecx - movl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 532(%esp), %ebx - movl 528(%esp), %edi - movl 524(%esp), %esi - movl 520(%esp), %edx - movl 516(%esp), %ecx - movl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 440(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 468(%esp), %ebx - movl 464(%esp), %edi - movl 460(%esp), %esi - movl 456(%esp), %edx - movl 452(%esp), %ecx - movl 444(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %edi - movl 396(%esp), %esi - movl 392(%esp), %edx - movl 388(%esp), %ecx - movl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 340(%esp), %ebx - movl 336(%esp), %edi - movl 332(%esp), %esi - movl 328(%esp), %edx - movl 324(%esp), %ecx - movl 316(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 36(%esp), %eax # 4-byte Reload - adcl %eax, 108(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1108(%esp), %eax - movl %eax, %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 108(%esp), %eax # 4-byte Reload - addl 248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 288(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 276(%esp), %ebx - movl 272(%esp), %edi - movl 268(%esp), %edx - movl 264(%esp), %ecx - movl 260(%esp), %eax - movl 252(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 256(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - movl 1104(%esp), %ebp - movl %esi, 48(%ebp) - movl 112(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 20(%esp), %esi # 4-byte Reload - adcl %esi, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 220(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 212(%esp), %ebx - movl 208(%esp), %edx - movl 204(%esp), %ecx - movl 200(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl 192(%esp), %esi - movl 112(%esp), %ebp # 4-byte Reload - movl 1104(%esp), %edi - movl %ebp, 52(%edi) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 64(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1112(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 1108(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 148(%esp), %ebp - movl 144(%esp), %edi - movl 140(%esp), %esi - movl 136(%esp), %edx - movl 132(%esp), %ecx - movl 1104(%esp), %eax - movl 112(%esp), %ebx # 4-byte Reload - movl %ebx, 56(%eax) - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 60(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - movl %ebx, 64(%eax) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %edx, 72(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 76(%eax) - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %edi, 80(%eax) - movl 44(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ebp, 84(%eax) - movl 52(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl 68(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx # 4-byte Folded Reload - movl %ecx, 92(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 88(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl %ecx, 108(%eax) - movl 100(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 112(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 116(%eax) - addl $1084, %esp # imm = 0x43C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end224: - .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L - - .globl mcl_fpDbl_sqrPre15L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre15L,@function -mcl_fpDbl_sqrPre15L: # @mcl_fpDbl_sqrPre15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1084, %esp # imm = 0x43C - calll .L225$pb -.L225$pb: - popl %ebx -.Ltmp46: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx - movl %ebx, 116(%esp) # 4-byte Spill - movl 1108(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv480x32 - movl 1076(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1068(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1064(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1016(%esp), %eax - movl 1020(%esp), %ebp - movl 1104(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl %esi, %ebx - calll .LmulPv480x32 - addl 952(%esp), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1008(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1004(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 992(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 972(%esp), %edi - movl 968(%esp), %esi - movl 964(%esp), %edx - movl 956(%esp), %eax - movl 960(%esp), %ecx - movl 1104(%esp), %ebp - movl 16(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 72(%esp), %eax # 4-byte Reload - addl 888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 948(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 944(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 940(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 936(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 932(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 928(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 924(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 916(%esp), %ebx - movl 912(%esp), %edi - movl 908(%esp), %esi - movl 904(%esp), %edx - movl 900(%esp), %ecx - movl 892(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 72(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 112(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 68(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 824(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 876(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 872(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 868(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 864(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 856(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 852(%esp), %ebx - movl 848(%esp), %edi - movl 844(%esp), %esi - movl 840(%esp), %edx - movl 836(%esp), %ecx - movl 828(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 60(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 800(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 796(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 792(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 788(%esp), %ebx - movl 784(%esp), %edi - movl 780(%esp), %esi - movl 776(%esp), %edx - movl 772(%esp), %ecx - movl 764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 60(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 732(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 724(%esp), %ebx - movl 720(%esp), %edi - movl 716(%esp), %esi - movl 712(%esp), %edx - movl 708(%esp), %ecx - movl 700(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 704(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %edx - movl 644(%esp), %ecx - movl 636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 640(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 64(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 68(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 568(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 596(%esp), %ebx - movl 592(%esp), %edi - movl 588(%esp), %esi - movl 584(%esp), %edx - movl 580(%esp), %ecx - movl 572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 576(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 68(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 564(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 532(%esp), %ebx - movl 528(%esp), %edi - movl 524(%esp), %esi - movl 520(%esp), %edx - movl 516(%esp), %ecx - movl 508(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 512(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 440(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 500(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 496(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 488(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 468(%esp), %ebx - movl 464(%esp), %edi - movl 460(%esp), %esi - movl 456(%esp), %edx - movl 452(%esp), %ecx - movl 444(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 56(%esp), %ebp # 4-byte Reload - adcl 36(%esp), %ebp # 4-byte Folded Reload - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 24(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 12(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 436(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 428(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 424(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 420(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 416(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 404(%esp), %ebx - movl 400(%esp), %edi - movl 396(%esp), %esi - movl 392(%esp), %edx - movl 388(%esp), %ecx - movl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 56(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 112(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 12(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 372(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 368(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 364(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 352(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 348(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 344(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 340(%esp), %ebx - movl 336(%esp), %edi - movl 332(%esp), %esi - movl 328(%esp), %edx - movl 324(%esp), %ecx - movl 316(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 320(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl 112(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 36(%esp), %eax # 4-byte Reload - adcl %eax, 108(%esp) # 4-byte Folded Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 28(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 32(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 16(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 56(%esp) # 4-byte Folded Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, 48(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 108(%esp), %eax # 4-byte Reload - addl 248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 292(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 288(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 276(%esp), %ebx - movl 272(%esp), %edi - movl 268(%esp), %edx - movl 264(%esp), %ecx - movl 260(%esp), %eax - movl 252(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 256(%esp), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - movl 1104(%esp), %ebp - movl %esi, 48(%ebp) - movl 112(%esp), %esi # 4-byte Reload - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 20(%esp), %esi # 4-byte Reload - adcl %esi, 36(%esp) # 4-byte Folded Spill - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 12(%esp) # 4-byte Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 20(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 24(%esp) # 4-byte Spill - adcl 16(%esp), %edi # 4-byte Folded Reload - movl %edi, 28(%esp) # 4-byte Spill - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 32(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 100(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl %eax, 52(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 44(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 52(%edx), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 184(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 228(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 220(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 212(%esp), %ebx - movl 208(%esp), %edx - movl 204(%esp), %ecx - movl 200(%esp), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl 192(%esp), %esi - movl 112(%esp), %ebp # 4-byte Reload - movl 1104(%esp), %edi - movl %ebp, 52(%edi) - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 12(%esp), %ebp # 4-byte Folded Reload - movl 72(%esp), %esi # 4-byte Reload - adcl 20(%esp), %esi # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl %edi, 40(%esp) # 4-byte Folded Spill - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 28(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 36(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - adcl %ebx, 64(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1108(%esp), %edx - movl 56(%edx), %eax - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 112(%esp), %eax # 4-byte Reload - addl 120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl %ebp, 32(%esp) # 4-byte Spill - adcl 128(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 156(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 152(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 148(%esp), %ebp - movl 144(%esp), %edi - movl 140(%esp), %esi - movl 136(%esp), %edx - movl 132(%esp), %ecx - movl 1104(%esp), %eax - movl 112(%esp), %ebx # 4-byte Reload - movl %ebx, 56(%eax) - movl 32(%esp), %ebx # 4-byte Reload - movl %ebx, 60(%eax) - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl 72(%esp), %ebx # 4-byte Reload - movl %ebx, 64(%eax) - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %ecx, 68(%eax) - adcl 36(%esp), %esi # 4-byte Folded Reload - movl %edx, 72(%eax) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %esi, 76(%eax) - adcl 76(%esp), %ebp # 4-byte Folded Reload - movl %edi, 80(%eax) - movl 44(%esp), %edx # 4-byte Reload - adcl 64(%esp), %edx # 4-byte Folded Reload - movl %ebp, 84(%eax) - movl 52(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %edx, 88(%eax) - movl 68(%esp), %edx # 4-byte Reload - adcl 92(%esp), %edx # 4-byte Folded Reload - movl %ecx, 92(%eax) - movl 80(%esp), %ecx # 4-byte Reload - adcl 104(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 88(%esp), %edx # 4-byte Reload - adcl 108(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 96(%esp), %ecx # 4-byte Reload - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl %ecx, 108(%eax) - movl 100(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 112(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 116(%eax) - addl $1084, %esp # imm = 0x43C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end225: - .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L - - .globl mcl_fp_mont15L - .align 16, 0x90 - .type mcl_fp_mont15L,@function -mcl_fp_mont15L: # @mcl_fp_mont15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2044, %esp # imm = 0x7FC - calll .L226$pb -.L226$pb: - popl %ebx -.Ltmp47: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx - movl 2076(%esp), %eax - movl -4(%eax), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1976(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 1976(%esp), %ebp - movl 1980(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2036(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2032(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2028(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 2024(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2020(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2016(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2012(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2008(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2004(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2000(%esp), %edi - movl 1996(%esp), %esi - movl 1992(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 1988(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 1984(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1912(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - addl 1912(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1916(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1932(%esp), %esi - adcl 1936(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1968(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1972(%esp), %ebp - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1848(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 116(%esp), %eax # 4-byte Reload - andl $1, %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1848(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - adcl 1852(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1856(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1864(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 1868(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1876(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1880(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1884(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1892(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1896(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1900(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1904(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - adcl 1908(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %edx, %eax - movl %edx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1784(%esp), %ecx - movl 2076(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1784(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1812(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1836(%esp), %esi - movl 108(%esp), %ebp # 4-byte Reload - adcl 1840(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1720(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 1720(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1744(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1756(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 1768(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - adcl 1772(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1780(%esp), %esi - sbbl %ebp, %ebp - movl %ecx, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1656(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - movl 96(%esp), %eax # 4-byte Reload - addl 1656(%esp), %eax - movl 84(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1688(%esp), %ebp - adcl 1692(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1712(%esp), %edi - adcl 1716(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1592(%esp), %ecx - movl 2068(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 1592(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1620(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1628(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1644(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %ebp - movl %ebp, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1528(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 1528(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1544(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1564(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 1568(%esp), %edi - movl 104(%esp), %esi # 4-byte Reload - adcl 1572(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1580(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1464(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 92(%esp), %ecx # 4-byte Reload - addl 1464(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1476(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1484(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1492(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1496(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1500(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - adcl 1504(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 1512(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1400(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 92(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1400(%esp), %edi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1408(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1412(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1416(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 1420(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1424(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1428(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1432(%esp), %edi - movl 112(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1448(%esp), %esi - movl %esi, %ebp - movl 88(%esp), %esi # 4-byte Reload - adcl 1452(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1336(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1336(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1364(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1380(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - adcl 1384(%esp), %esi - movl %esi, %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1392(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 80(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1272(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1276(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1280(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1284(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1288(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 1320(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1328(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl 2072(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1208(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1232(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1244(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1268(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - movl 64(%esp), %eax # 4-byte Reload - addl 1144(%esp), %eax - movl 56(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1156(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1168(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1180(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1196(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1080(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 56(%esp), %ecx # 4-byte Reload - addl 1080(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1092(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1128(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 1016(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1028(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 1032(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1044(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1060(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 952(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 952(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 964(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 976(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 992(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl %ebp, %eax - andl $1, %eax - addl 888(%esp), %esi - movl 68(%esp), %ecx # 4-byte Reload - adcl 892(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 896(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 900(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 904(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 908(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 912(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 916(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 920(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 924(%esp), %ebp - movl 96(%esp), %ecx # 4-byte Reload - adcl 928(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 936(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 944(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 824(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 832(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 856(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 864(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 872(%esp), %edi - adcl 876(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 68(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 760(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 776(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 800(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 804(%esp), %ebp - adcl 808(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 816(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 696(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 708(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 736(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 748(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 752(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 632(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 656(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 672(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 688(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 568(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 588(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 596(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 604(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %edi - movl %edi, %ecx - addl 504(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 516(%esp), %edi - movl 108(%esp), %esi # 4-byte Reload - adcl 520(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 532(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 560(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 440(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 448(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl 452(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 460(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 492(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 376(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 388(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 396(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 404(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 416(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 312(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 320(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 336(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 348(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 352(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %edi - imull 52(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - andl $1, %esi - movl %esi, %ecx - addl 248(%esp), %edi - movl 104(%esp), %esi # 4-byte Reload - adcl 252(%esp), %esi - movl 108(%esp), %edi # 4-byte Reload - adcl 256(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 288(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 2072(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 2068(%esp), %edx - calll .LmulPv480x32 - movl %esi, %ecx - movl 96(%esp), %esi # 4-byte Reload - addl 184(%esp), %ecx - adcl 188(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - adcl 200(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 2076(%esp), %edx - calll .LmulPv480x32 - movl 104(%esp), %ebx # 4-byte Reload - andl $1, %ebx - addl 120(%esp), %edi - movl %ebp, %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 132(%esp), %edi - adcl 136(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 156(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - adcl $0, %ebx - movl %ebx, 104(%esp) # 4-byte Spill - movl %eax, %edx - movl 2076(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %ecx - movl %edi, %eax - sbbl 8(%ebp), %eax - movl %esi, %ebx - sbbl 12(%ebp), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 16(%ebp), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 20(%ebp), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - sbbl 24(%ebp), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 28(%ebp), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - sbbl 32(%ebp), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 36(%ebp), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - sbbl 40(%ebp), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 72(%esp), %ebx # 4-byte Reload - sbbl 44(%ebp), %ebx - movl %ebx, 48(%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - sbbl 48(%ebp), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 52(%ebp), %ebx - movl %ebx, 88(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 56(%ebp), %ebx - movl %ebx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - movl 108(%esp), %ebp # 4-byte Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB226_2 -# BB#1: - movl %edx, %ebp -.LBB226_2: - movl 2064(%esp), %edx - movl %ebp, (%edx) - testb %bl, %bl - movl 116(%esp), %ebp # 4-byte Reload - jne .LBB226_4 -# BB#3: - movl %ecx, %ebp -.LBB226_4: - movl %ebp, 4(%edx) - jne .LBB226_6 -# BB#5: - movl %eax, %edi -.LBB226_6: - movl %edi, 8(%edx) - jne .LBB226_8 -# BB#7: - movl 16(%esp), %esi # 4-byte Reload -.LBB226_8: - movl %esi, 12(%edx) - movl 84(%esp), %eax # 4-byte Reload - jne .LBB226_10 -# BB#9: - movl 20(%esp), %eax # 4-byte Reload -.LBB226_10: - movl %eax, 16(%edx) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB226_12 -# BB#11: - movl 24(%esp), %eax # 4-byte Reload -.LBB226_12: - movl %eax, 20(%edx) - movl 80(%esp), %eax # 4-byte Reload - jne .LBB226_14 -# BB#13: - movl 28(%esp), %eax # 4-byte Reload -.LBB226_14: - movl %eax, 24(%edx) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB226_16 -# BB#15: - movl 32(%esp), %eax # 4-byte Reload -.LBB226_16: - movl %eax, 28(%edx) - movl 56(%esp), %eax # 4-byte Reload - jne .LBB226_18 -# BB#17: - movl 36(%esp), %eax # 4-byte Reload -.LBB226_18: - movl %eax, 32(%edx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB226_20 -# BB#19: - movl 40(%esp), %eax # 4-byte Reload -.LBB226_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB226_22 -# BB#21: - movl 44(%esp), %eax # 4-byte Reload -.LBB226_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - jne .LBB226_24 -# BB#23: - movl 48(%esp), %eax # 4-byte Reload -.LBB226_24: - movl %eax, 44(%edx) - movl 76(%esp), %eax # 4-byte Reload - jne .LBB226_26 -# BB#25: - movl 52(%esp), %eax # 4-byte Reload -.LBB226_26: - movl %eax, 48(%edx) - movl 100(%esp), %eax # 4-byte Reload - jne .LBB226_28 -# BB#27: - movl 88(%esp), %eax # 4-byte Reload -.LBB226_28: - movl %eax, 52(%edx) - movl 112(%esp), %eax # 4-byte Reload - jne .LBB226_30 -# BB#29: - movl 96(%esp), %eax # 4-byte Reload -.LBB226_30: - movl %eax, 56(%edx) - addl $2044, %esp # imm = 0x7FC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end226: - .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L - - .globl mcl_fp_montNF15L - .align 16, 0x90 - .type mcl_fp_montNF15L,@function -mcl_fp_montNF15L: # @mcl_fp_montNF15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2028, %esp # imm = 0x7EC - calll .L227$pb -.L227$pb: - popl %ebx -.Ltmp48: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx - movl 2060(%esp), %eax - movl -4(%eax), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1960(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1960(%esp), %ebp - movl 1964(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2020(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2016(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2012(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2008(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 2004(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2000(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 1996(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 1992(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 1988(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 1984(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 1980(%esp), %esi - movl 1976(%esp), %edi - movl 1972(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 1968(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 1896(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1896(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1904(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1908(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1912(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1916(%esp), %esi - movl %esi, %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1936(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1944(%esp), %ebp - movl 76(%esp), %esi # 4-byte Reload - adcl 1948(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1832(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1892(%esp), %eax - movl 92(%esp), %edx # 4-byte Reload - addl 1832(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - adcl 1836(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1840(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1844(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1848(%esp), %edi - movl 48(%esp), %ecx # 4-byte Reload - adcl 1852(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1856(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1864(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1876(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 1880(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1884(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1768(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1768(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1784(%esp), %edi - movl %edi, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1804(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, %esi - adcl 1820(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1824(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1704(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1764(%esp), %eax - movl 68(%esp), %edx # 4-byte Reload - addl 1704(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - adcl 1708(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1712(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1720(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1736(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1740(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - adcl 1748(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1752(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1756(%esp), %ebp - movl 92(%esp), %ecx # 4-byte Reload - adcl 1760(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1640(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 1640(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1680(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, %esi - movl 96(%esp), %edi # 4-byte Reload - adcl 1688(%esp), %edi - adcl 1692(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1700(%esp), %ebp - movl 2056(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1576(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1636(%esp), %eax - movl 88(%esp), %edx # 4-byte Reload - addl 1576(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 40(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1588(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1596(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1612(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1616(%esp), %esi - adcl 1620(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1624(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1628(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1632(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1512(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1512(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1516(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1532(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1548(%esp), %ebp - adcl 1552(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1448(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1508(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - addl 1448(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 1460(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 1464(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1468(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1472(%esp), %edi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1476(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1480(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - adcl 1484(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1488(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1492(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1496(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1500(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 1504(%esp), %ebp - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1384(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1384(%esp), %esi - movl 40(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1408(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1440(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1320(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1380(%esp), %edx - movl 40(%esp), %ecx # 4-byte Reload - addl 1320(%esp), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 1324(%esp), %ebp - movl 44(%esp), %edi # 4-byte Reload - adcl 1328(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1368(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1256(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - movl 40(%esp), %eax # 4-byte Reload - addl 1256(%esp), %eax - adcl 1260(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 1272(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 1296(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1304(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1312(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1192(%esp), %ecx - movl 2052(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - movl 1252(%esp), %eax - movl 48(%esp), %edx # 4-byte Reload - addl 1192(%esp), %edx - movl 44(%esp), %ecx # 4-byte Reload - adcl 1196(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1204(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 1212(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 1216(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1224(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1228(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1232(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1236(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1240(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1244(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1248(%esp), %esi - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1128(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 1128(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 1140(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1148(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1168(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1184(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 1188(%esp), %esi - movl 2056(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 1124(%esp), %eax - movl 44(%esp), %edx # 4-byte Reload - addl 1064(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 1072(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 1084(%esp), %edi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1096(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1100(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1104(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1112(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1116(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1120(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %edx, %ebp - movl %ebp, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 1000(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1020(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1028(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1060(%esp), %esi - movl 2056(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 936(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 996(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 936(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - adcl 940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 944(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 948(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 952(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 960(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 964(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 968(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 972(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 976(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 980(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 984(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 988(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - adcl 992(%esp), %esi - movl %esi, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %esi - movl %edx, %edi - movl %edi, %eax - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 872(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 872(%esp), %edi - movl 56(%esp), %ebp # 4-byte Reload - adcl 876(%esp), %ebp - movl 60(%esp), %edi # 4-byte Reload - adcl 880(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 932(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 808(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 868(%esp), %eax - movl %ebp, %ecx - addl 808(%esp), %ecx - adcl 812(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 816(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 820(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 824(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 828(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 832(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 836(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 840(%esp), %edi - movl 88(%esp), %esi # 4-byte Reload - adcl 844(%esp), %esi - movl 72(%esp), %edx # 4-byte Reload - adcl 848(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 852(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 856(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 860(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 864(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 744(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 744(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 768(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 776(%esp), %edi - adcl 780(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 792(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 680(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 740(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 680(%esp), %ecx - movl 84(%esp), %edx # 4-byte Reload - adcl 684(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 688(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 692(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 696(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - adcl 700(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 704(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 708(%esp), %edi - movl 88(%esp), %edx # 4-byte Reload - adcl 712(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 716(%esp), %ebp - movl 64(%esp), %edx # 4-byte Reload - adcl 720(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 724(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 728(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 732(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 44(%esp), %edx # 4-byte Reload - adcl 736(%esp), %edx - movl %edx, 44(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 616(%esp), %esi - movl 84(%esp), %esi # 4-byte Reload - adcl 620(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 644(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 648(%esp), %edi - adcl 652(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 656(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 612(%esp), %edx - movl %esi, %ecx - addl 552(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 572(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 580(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 588(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 488(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl %esi, %ebp - adcl 508(%esp), %ebp - movl 68(%esp), %edi # 4-byte Reload - adcl 512(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 528(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 484(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 424(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 440(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - adcl 444(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl %esi, %edi - adcl 460(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 360(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 360(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 368(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 376(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 396(%esp), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 400(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 296(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 356(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - addl 296(%esp), %ecx - adcl 300(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 308(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 332(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 336(%esp), %edi - movl 44(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 36(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 232(%esp), %ecx - movl 2060(%esp), %edx - calll .LmulPv480x32 - addl 232(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 240(%esp), %ebp - movl 92(%esp), %esi # 4-byte Reload - adcl 244(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 272(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 44(%esp), %edi # 4-byte Reload - adcl 276(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 2056(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 168(%esp), %ecx - movl 2052(%esp), %edx - calll .LmulPv480x32 - movl 228(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 168(%esp), %ecx - adcl 172(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl 176(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 180(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 184(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 188(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 192(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 208(%esp), %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 212(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %edi - movl %eax, (%esp) - leal 104(%esp), %ecx - movl 2060(%esp), %eax - movl %eax, %edx - calll .LmulPv480x32 - addl 104(%esp), %edi - movl 68(%esp), %edi # 4-byte Reload - movl 100(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 112(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl %ecx, %ebx - adcl 116(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 120(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 124(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 44(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - adcl 148(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl %eax, %edx - movl 2060(%esp), %ecx - subl (%ecx), %edx - movl %ebx, %ebp - sbbl 4(%ecx), %ebp - movl %edi, %ebx - sbbl 8(%ecx), %ebx - movl 88(%esp), %eax # 4-byte Reload - sbbl 12(%ecx), %eax - sbbl 16(%ecx), %esi - movl %esi, 4(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 20(%ecx), %esi - movl %esi, 8(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - sbbl 24(%ecx), %esi - movl %esi, 12(%esp) # 4-byte Spill - movl 52(%esp), %esi # 4-byte Reload - sbbl 28(%ecx), %esi - movl %esi, 16(%esp) # 4-byte Spill - movl 56(%esp), %esi # 4-byte Reload - sbbl 32(%ecx), %esi - movl %esi, 20(%esp) # 4-byte Spill - movl 44(%esp), %esi # 4-byte Reload - sbbl 36(%ecx), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 40(%ecx), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 44(%ecx), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - sbbl 48(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 52(%ecx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - sbbl 56(%ecx), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl %esi, %ecx - sarl $31, %ecx - testl %ecx, %ecx - movl 100(%esp), %ecx # 4-byte Reload - js .LBB227_2 -# BB#1: - movl %edx, %ecx -.LBB227_2: - movl 2048(%esp), %edx - movl %ecx, (%edx) - movl 92(%esp), %esi # 4-byte Reload - js .LBB227_4 -# BB#3: - movl %ebp, %esi -.LBB227_4: - movl %esi, 4(%edx) - movl 88(%esp), %ecx # 4-byte Reload - js .LBB227_6 -# BB#5: - movl %ebx, %edi -.LBB227_6: - movl %edi, 8(%edx) - js .LBB227_8 -# BB#7: - movl %eax, %ecx -.LBB227_8: - movl %ecx, 12(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB227_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB227_10: - movl %eax, 16(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB227_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB227_12: - movl %eax, 20(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB227_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB227_14: - movl %eax, 24(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB227_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB227_16: - movl %eax, 28(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB227_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB227_18: - movl %eax, 32(%edx) - movl 44(%esp), %eax # 4-byte Reload - js .LBB227_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB227_20: - movl %eax, 36(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB227_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB227_22: - movl %eax, 40(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB227_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB227_24: - movl %eax, 44(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB227_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB227_26: - movl %eax, 48(%edx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB227_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB227_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB227_30 -# BB#29: - movl 68(%esp), %eax # 4-byte Reload -.LBB227_30: - movl %eax, 56(%edx) - addl $2028, %esp # imm = 0x7EC - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end227: - .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L - - .globl mcl_fp_montRed15L - .align 16, 0x90 - .type mcl_fp_montRed15L,@function -mcl_fp_montRed15L: # @mcl_fp_montRed15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1148, %esp # imm = 0x47C - calll .L228$pb -.L228$pb: - popl %eax -.Ltmp49: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1176(%esp), %edx - movl -4(%edx), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 1172(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 80(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 84(%esp) # 4-byte Spill - imull %esi, %ebx - movl 116(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%ecx), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 80(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 164(%esp) # 4-byte Spill - movl 68(%ecx), %esi - movl %esi, 176(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 56(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 52(%ecx), %esi - movl %esi, 140(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 44(%ecx), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 12(%ecx), %edi - movl 8(%ecx), %esi - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1080(%esp), %ecx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - movl 80(%esp), %eax # 4-byte Reload - addl 1080(%esp), %eax - movl 84(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - adcl 1088(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - adcl 1092(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1108(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - movl 148(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1016(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1016(%esp), %esi - movl 84(%esp), %edx # 4-byte Reload - adcl 1020(%esp), %edx - movl 64(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 1060(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 144(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 148(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 952(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 952(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 956(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 992(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %ebp # 4-byte Reload - adcl 1004(%esp), %ebp - movl 180(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl $0, 164(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 160(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 888(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 888(%esp), %esi - movl 68(%esp), %esi # 4-byte Reload - adcl 892(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 160(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 104(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 824(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 824(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 828(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 760(%esp), %esi - movl 76(%esp), %esi # 4-byte Reload - adcl 764(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - movl 152(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 696(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 700(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 152(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - movl 108(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 632(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 636(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %ebp # 4-byte Reload - adcl 672(%esp), %ebp - movl 164(%esp), %edi # 4-byte Reload - adcl 676(%esp), %edi - movl 168(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 136(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 568(%esp), %esi - movl 100(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl 604(%esp), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %edi # 4-byte Reload - adcl 616(%esp), %edi - movl 160(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 504(%esp), %ecx - movl 1176(%esp), %eax - movl %eax, %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 504(%esp), %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 508(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 156(%esp), %esi # 4-byte Reload - adcl 524(%esp), %esi - movl 172(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl 548(%esp), %edi - movl %edi, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 440(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 440(%esp), %edi - movl 124(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %edi # 4-byte Reload - adcl 452(%esp), %edi - adcl 456(%esp), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %esi # 4-byte Reload - adcl 464(%esp), %esi - movl 176(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 376(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 376(%esp), %ebp - movl 120(%esp), %ebp # 4-byte Reload - adcl 380(%esp), %ebp - adcl 384(%esp), %edi - movl %edi, 140(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 172(%esp), %edi # 4-byte Reload - adcl 392(%esp), %edi - adcl 396(%esp), %esi - movl %esi, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %esi # 4-byte Reload - adcl 412(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, 84(%esp) # 4-byte Folded Spill - movl %ebp, %eax - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 312(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 312(%esp), %ebp - movl 140(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl 156(%esp), %ecx # 4-byte Reload - adcl 320(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - adcl 324(%esp), %edi - movl %edi, 172(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 328(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 176(%esp), %ecx # 4-byte Reload - adcl 332(%esp), %ecx - movl %ecx, 176(%esp) # 4-byte Spill - movl 164(%esp), %ecx # 4-byte Reload - adcl 336(%esp), %ecx - movl %ecx, 164(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 340(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - adcl 344(%esp), %esi - movl %esi, 144(%esp) # 4-byte Spill - movl 160(%esp), %ecx # 4-byte Reload - adcl 348(%esp), %ecx - movl %ecx, 160(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 352(%esp), %ebp - movl 136(%esp), %ecx # 4-byte Reload - adcl 356(%esp), %ecx - movl %ecx, 136(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 360(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 364(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 368(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 372(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl $0, 104(%esp) # 4-byte Folded Spill - adcl $0, 108(%esp) # 4-byte Folded Spill - movl 84(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %eax, %edi - imull 88(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 248(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 248(%esp), %edi - movl 156(%esp), %ecx # 4-byte Reload - adcl 252(%esp), %ecx - movl 172(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl 284(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 288(%esp), %edi - movl 152(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - adcl $0, 108(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 1176(%esp), %edx - movl 92(%esp), %ebx # 4-byte Reload - calll .LmulPv480x32 - addl 184(%esp), %esi - movl 172(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 172(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 192(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - adcl 204(%esp), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl 220(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 240(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - adcl $0, %ebx - movl %edx, %eax - subl 16(%esp), %edx # 4-byte Folded Reload - sbbl 4(%esp), %ecx # 4-byte Folded Reload - movl 176(%esp), %eax # 4-byte Reload - sbbl 8(%esp), %eax # 4-byte Folded Reload - movl 164(%esp), %ebp # 4-byte Reload - sbbl 12(%esp), %ebp # 4-byte Folded Reload - sbbl 20(%esp), %esi # 4-byte Folded Reload - movl 144(%esp), %edi # 4-byte Reload - sbbl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 84(%esp) # 4-byte Spill - movl 160(%esp), %edi # 4-byte Reload - sbbl 28(%esp), %edi # 4-byte Folded Reload - movl %edi, 88(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - sbbl 32(%esp), %edi # 4-byte Folded Reload - movl %edi, 92(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - sbbl 36(%esp), %edi # 4-byte Folded Reload - movl %edi, 96(%esp) # 4-byte Spill - movl 152(%esp), %edi # 4-byte Reload - sbbl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 100(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - sbbl 44(%esp), %edi # 4-byte Folded Reload - movl %edi, 112(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - sbbl 48(%esp), %edi # 4-byte Folded Reload - movl %edi, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - sbbl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 124(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - sbbl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 140(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - sbbl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 156(%esp) # 4-byte Spill - sbbl $0, %ebx - andl $1, %ebx - movl %ebx, %edi - jne .LBB228_2 -# BB#1: - movl %edx, 172(%esp) # 4-byte Spill -.LBB228_2: - movl 1168(%esp), %edx - movl 172(%esp), %ebx # 4-byte Reload - movl %ebx, (%edx) - movl %edi, %ebx - testb %bl, %bl - jne .LBB228_4 -# BB#3: - movl %ecx, 180(%esp) # 4-byte Spill -.LBB228_4: - movl 180(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%edx) - movl 176(%esp), %ecx # 4-byte Reload - jne .LBB228_6 -# BB#5: - movl %eax, %ecx -.LBB228_6: - movl %ecx, 8(%edx) - movl 164(%esp), %eax # 4-byte Reload - jne .LBB228_8 -# BB#7: - movl %ebp, %eax -.LBB228_8: - movl %eax, 12(%edx) - movl 108(%esp), %ecx # 4-byte Reload - movl 148(%esp), %eax # 4-byte Reload - movl 168(%esp), %ebp # 4-byte Reload - jne .LBB228_10 -# BB#9: - movl %esi, %ebp -.LBB228_10: - movl %ebp, 16(%edx) - movl 152(%esp), %ebp # 4-byte Reload - movl 144(%esp), %ebx # 4-byte Reload - jne .LBB228_12 -# BB#11: - movl 84(%esp), %ebx # 4-byte Reload -.LBB228_12: - movl %ebx, 20(%edx) - movl 132(%esp), %ebx # 4-byte Reload - movl 160(%esp), %edi # 4-byte Reload - jne .LBB228_14 -# BB#13: - movl 88(%esp), %edi # 4-byte Reload -.LBB228_14: - movl %edi, 24(%edx) - movl 128(%esp), %edi # 4-byte Reload - jne .LBB228_16 -# BB#15: - movl 92(%esp), %eax # 4-byte Reload -.LBB228_16: - movl %eax, 28(%edx) - movl 116(%esp), %esi # 4-byte Reload - jne .LBB228_18 -# BB#17: - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 136(%esp) # 4-byte Spill -.LBB228_18: - movl 136(%esp), %eax # 4-byte Reload - movl %eax, 32(%edx) - jne .LBB228_20 -# BB#19: - movl 100(%esp), %ebp # 4-byte Reload -.LBB228_20: - movl %ebp, 36(%edx) - movl 104(%esp), %eax # 4-byte Reload - jne .LBB228_22 -# BB#21: - movl 112(%esp), %ebx # 4-byte Reload -.LBB228_22: - movl %ebx, 40(%edx) - jne .LBB228_24 -# BB#23: - movl 120(%esp), %edi # 4-byte Reload -.LBB228_24: - movl %edi, 44(%edx) - jne .LBB228_26 -# BB#25: - movl 124(%esp), %esi # 4-byte Reload -.LBB228_26: - movl %esi, 48(%edx) - jne .LBB228_28 -# BB#27: - movl 140(%esp), %eax # 4-byte Reload -.LBB228_28: - movl %eax, 52(%edx) - jne .LBB228_30 -# BB#29: - movl 156(%esp), %ecx # 4-byte Reload -.LBB228_30: - movl %ecx, 56(%edx) - addl $1148, %esp # imm = 0x47C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end228: - .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L - - .globl mcl_fp_addPre15L - .align 16, 0x90 - .type mcl_fp_addPre15L,@function -mcl_fp_addPre15L: # @mcl_fp_addPre15L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl %esi, 48(%edi) - movl %edx, 52(%edi) - movl 56(%eax), %eax - movl 56(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 56(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end229: - .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L - - .globl mcl_fp_subPre15L - .align 16, 0x90 - .type mcl_fp_subPre15L,@function -mcl_fp_subPre15L: # @mcl_fp_subPre15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl %edi, 48(%ebx) - movl %esi, 52(%ebx) - movl 56(%edx), %edx - movl 56(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 56(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end230: - .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L - - .globl mcl_fp_shr1_15L - .align 16, 0x90 - .type mcl_fp_shr1_15L,@function -mcl_fp_shr1_15L: # @mcl_fp_shr1_15L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 52(%ecx) - shrl %eax - movl %eax, 56(%ecx) - popl %esi - retl -.Lfunc_end231: - .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L - - .globl mcl_fp_add15L - .align 16, 0x90 - .type mcl_fp_add15L,@function -mcl_fp_add15L: # @mcl_fp_add15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $48, %esp - movl 76(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 72(%esp), %eax - addl (%eax), %esi - movl %esi, 4(%esp) # 4-byte Spill - adcl 4(%eax), %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 8(%ecx), %edx - adcl 8(%eax), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 12(%eax), %esi - movl 16(%eax), %edx - adcl 12(%ecx), %esi - movl %esi, 36(%esp) # 4-byte Spill - adcl 16(%ecx), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 20(%eax), %edx - adcl 20(%ecx), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%eax), %edx - adcl 24(%ecx), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 28(%eax), %edx - adcl 28(%ecx), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 32(%eax), %edx - adcl 32(%ecx), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 36(%eax), %edx - adcl 36(%ecx), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 40(%eax), %edx - adcl 40(%ecx), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl 44(%eax), %ebx - adcl 44(%ecx), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 48(%eax), %ebp - adcl 48(%ecx), %ebp - movl 52(%eax), %edi - adcl 52(%ecx), %edi - movl 56(%eax), %edx - adcl 56(%ecx), %edx - movl 68(%esp), %ecx - movl 4(%esp), %eax # 4-byte Reload - movl %eax, (%ecx) - movl 44(%esp), %esi # 4-byte Reload - movl %esi, 4(%ecx) - movl 40(%esp), %esi # 4-byte Reload - movl %esi, 8(%ecx) - movl 36(%esp), %esi # 4-byte Reload - movl %esi, 12(%ecx) - movl 32(%esp), %esi # 4-byte Reload - movl %esi, 16(%ecx) - movl 28(%esp), %esi # 4-byte Reload - movl %esi, 20(%ecx) - movl 24(%esp), %esi # 4-byte Reload - movl %esi, 24(%ecx) - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 28(%ecx) - movl 16(%esp), %esi # 4-byte Reload - movl %esi, 32(%ecx) - movl 12(%esp), %esi # 4-byte Reload - movl %esi, 36(%ecx) - movl 8(%esp), %esi # 4-byte Reload - movl %esi, 40(%ecx) - movl %ebx, 44(%ecx) - movl %ebp, 48(%ecx) - movl %edi, 52(%ecx) - movl %edx, 56(%ecx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 80(%esp), %esi - subl (%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edx, %eax - movl 40(%esp), %edx # 4-byte Reload - sbbl 8(%esi), %edx - movl %edx, 40(%esp) # 4-byte Spill - movl 36(%esp), %edx # 4-byte Reload - sbbl 12(%esi), %edx - movl %edx, 36(%esp) # 4-byte Spill - movl 32(%esp), %edx # 4-byte Reload - sbbl 16(%esi), %edx - movl %edx, 32(%esp) # 4-byte Spill - movl 28(%esp), %edx # 4-byte Reload - sbbl 20(%esi), %edx - movl %edx, 28(%esp) # 4-byte Spill - movl 24(%esp), %edx # 4-byte Reload - sbbl 24(%esi), %edx - movl %edx, 24(%esp) # 4-byte Spill - movl 20(%esp), %edx # 4-byte Reload - sbbl 28(%esi), %edx - movl %edx, 20(%esp) # 4-byte Spill - movl 16(%esp), %edx # 4-byte Reload - sbbl 32(%esi), %edx - movl %edx, 16(%esp) # 4-byte Spill - movl 12(%esp), %edx # 4-byte Reload - sbbl 36(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 8(%esp), %edx # 4-byte Reload - sbbl 40(%esi), %edx - movl %edx, 8(%esp) # 4-byte Spill - movl (%esp), %edx # 4-byte Reload - sbbl 44(%esi), %edx - movl %edx, (%esp) # 4-byte Spill - sbbl 48(%esi), %ebp - sbbl 52(%esi), %edi - sbbl 56(%esi), %eax - sbbl $0, %ebx - testb $1, %bl - jne .LBB232_2 -# BB#1: # %nocarry - movl 4(%esp), %edx # 4-byte Reload - movl %edx, (%ecx) - movl 44(%esp), %edx # 4-byte Reload - movl %edx, 4(%ecx) - movl 40(%esp), %edx # 4-byte Reload - movl %edx, 8(%ecx) - movl 36(%esp), %edx # 4-byte Reload - movl %edx, 12(%ecx) - movl 32(%esp), %edx # 4-byte Reload - movl %edx, 16(%ecx) - movl 28(%esp), %edx # 4-byte Reload - movl %edx, 20(%ecx) - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 24(%ecx) - movl 20(%esp), %edx # 4-byte Reload - movl %edx, 28(%ecx) - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 32(%ecx) - movl 12(%esp), %edx # 4-byte Reload - movl %edx, 36(%ecx) - movl 8(%esp), %edx # 4-byte Reload - movl %edx, 40(%ecx) - movl (%esp), %edx # 4-byte Reload - movl %edx, 44(%ecx) - movl %ebp, 48(%ecx) - movl %edi, 52(%ecx) - movl %eax, 56(%ecx) -.LBB232_2: # %carry - addl $48, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end232: - .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L - - .globl mcl_fp_addNF15L - .align 16, 0x90 - .type mcl_fp_addNF15L,@function -mcl_fp_addNF15L: # @mcl_fp_addNF15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $120, %esp - movl 148(%esp), %ecx - movl (%ecx), %eax - movl 4(%ecx), %edx - movl 144(%esp), %esi - addl (%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 44(%ecx), %ebp - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl 20(%ecx), %ebx - movl 16(%ecx), %edi - movl 12(%ecx), %edx - movl 8(%ecx), %ecx - adcl 8(%esi), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 68(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 72(%esp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 44(%esi), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 48(%esi), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 52(%esi), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 56(%esi), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 152(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - subl (%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - movl %edx, 12(%esp) # 4-byte Spill - sbbl 16(%esi), %edi - movl %edi, 16(%esp) # 4-byte Spill - sbbl 20(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - sbbl 28(%esi), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 32(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - sbbl 40(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - movl %edx, %eax - sbbl 44(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %edi - sbbl 48(%esi), %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - movl %ecx, %ebx - sbbl 52(%esi), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, %edi - sbbl 56(%esi), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl %edi, %esi - sarl $31, %esi - testl %esi, %esi - movl 80(%esp), %esi # 4-byte Reload - js .LBB233_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB233_2: - movl 140(%esp), %edi - movl %esi, (%edi) - movl 84(%esp), %ecx # 4-byte Reload - js .LBB233_4 -# BB#3: - movl 4(%esp), %ecx # 4-byte Reload -.LBB233_4: - movl %ecx, 4(%edi) - movl 104(%esp), %ecx # 4-byte Reload - movl 72(%esp), %esi # 4-byte Reload - js .LBB233_6 -# BB#5: - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 76(%esp) # 4-byte Spill -.LBB233_6: - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%edi) - movl 64(%esp), %eax # 4-byte Reload - js .LBB233_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload -.LBB233_8: - movl %eax, 12(%edi) - movl %ebx, %ebp - movl %edx, %eax - movl 68(%esp), %edx # 4-byte Reload - js .LBB233_10 -# BB#9: - movl 16(%esp), %edx # 4-byte Reload -.LBB233_10: - movl %edx, 16(%edi) - movl 112(%esp), %edx # 4-byte Reload - movl 108(%esp), %ebx # 4-byte Reload - js .LBB233_12 -# BB#11: - movl 20(%esp), %esi # 4-byte Reload -.LBB233_12: - movl %esi, 20(%edi) - js .LBB233_14 -# BB#13: - movl 24(%esp), %esi # 4-byte Reload - movl %esi, 88(%esp) # 4-byte Spill -.LBB233_14: - movl 88(%esp), %esi # 4-byte Reload - movl %esi, 24(%edi) - js .LBB233_16 -# BB#15: - movl 28(%esp), %ecx # 4-byte Reload -.LBB233_16: - movl %ecx, 28(%edi) - js .LBB233_18 -# BB#17: - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB233_18: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%edi) - js .LBB233_20 -# BB#19: - movl 36(%esp), %ebx # 4-byte Reload -.LBB233_20: - movl %ebx, 36(%edi) - js .LBB233_22 -# BB#21: - movl 40(%esp), %edx # 4-byte Reload -.LBB233_22: - movl %edx, 40(%edi) - js .LBB233_24 -# BB#23: - movl 44(%esp), %eax # 4-byte Reload -.LBB233_24: - movl %eax, 44(%edi) - movl 96(%esp), %eax # 4-byte Reload - js .LBB233_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB233_26: - movl %eax, 48(%edi) - js .LBB233_28 -# BB#27: - movl 52(%esp), %ebp # 4-byte Reload -.LBB233_28: - movl %ebp, 52(%edi) - movl 100(%esp), %eax # 4-byte Reload - js .LBB233_30 -# BB#29: - movl 56(%esp), %eax # 4-byte Reload -.LBB233_30: - movl %eax, 56(%edi) - addl $120, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end233: - .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L - - .globl mcl_fp_sub15L - .align 16, 0x90 - .type mcl_fp_sub15L,@function -mcl_fp_sub15L: # @mcl_fp_sub15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 80(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 84(%esp), %edi - subl (%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 40(%esi), %edx - sbbl 40(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 44(%esi), %ecx - sbbl 44(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 48(%esi), %eax - sbbl 48(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 52(%esi), %ebp - sbbl 52(%edi), %ebp - movl 56(%esi), %esi - sbbl 56(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 76(%esp), %ebx - movl 48(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl %edx, 40(%ebx) - movl %ecx, 44(%ebx) - movl %eax, 48(%ebx) - movl %ebp, 52(%ebx) - movl %esi, 56(%ebx) - je .LBB234_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 88(%esp), %esi - movl 48(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 40(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl %ecx, 48(%ebx) - movl 52(%esi), %eax - adcl %ebp, %eax - movl %eax, 52(%ebx) - movl 56(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ebx) -.LBB234_2: # %nocarry - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end234: - .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L - - .globl mcl_fp_subNF15L - .align 16, 0x90 - .type mcl_fp_subNF15L,@function -mcl_fp_subNF15L: # @mcl_fp_subNF15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $96, %esp - movl 120(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 124(%esp), %edi - subl (%edi), %esi - movl %esi, 60(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 32(%ecx), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 40(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 32(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %eax, %ebp - sarl $31, %ebp - movl %ebp, %edi - shldl $1, %eax, %edi - movl 128(%esp), %edx - andl (%edx), %edi - movl 56(%edx), %eax - andl %ebp, %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 52(%edx), %eax - andl %ebp, %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 48(%edx), %eax - andl %ebp, %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%edx), %eax - andl %ebp, %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 40(%edx), %eax - andl %ebp, %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 36(%edx), %eax - andl %ebp, %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 32(%edx), %eax - andl %ebp, %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 28(%edx), %eax - andl %ebp, %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 24(%edx), %eax - andl %ebp, %eax - movl %eax, (%esp) # 4-byte Spill - movl 20(%edx), %ebx - andl %ebp, %ebx - movl 16(%edx), %esi - andl %ebp, %esi - movl 12(%edx), %ecx - andl %ebp, %ecx - movl 8(%edx), %eax - andl %ebp, %eax - andl 4(%edx), %ebp - addl 60(%esp), %edi # 4-byte Folded Reload - adcl 64(%esp), %ebp # 4-byte Folded Reload - movl 116(%esp), %edx - movl %edi, (%edx) - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ebp, 4(%edx) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 8(%edx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %ecx, 12(%edx) - adcl 48(%esp), %ebx # 4-byte Folded Reload - movl %esi, 16(%edx) - movl (%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ebx, 20(%edx) - movl 4(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%edx) - movl 8(%esp), %ecx # 4-byte Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%edx) - movl 12(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%edx) - movl 16(%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%edx) - movl 20(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%edx) - movl 24(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%edx) - movl 28(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%edx) - movl %eax, 52(%edx) - movl 44(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%edx) - addl $96, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end235: - .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L - - .globl mcl_fpDbl_add15L - .align 16, 0x90 - .type mcl_fpDbl_add15L,@function -mcl_fpDbl_add15L: # @mcl_fpDbl_add15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - movl 136(%esp), %ecx - movl 132(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %esi - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edx), %ebp - movl 128(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edx), %ebp - adcl 8(%edx), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %esi - movl %ebp, 4(%eax) - movl 68(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%edx), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %esi, 16(%eax) - movl 24(%edx), %esi - adcl %ebx, %esi - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%edx), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %esi, 24(%eax) - movl 32(%edx), %esi - adcl %ebx, %esi - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%edx), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %esi, 32(%eax) - movl 40(%edx), %esi - adcl %ebx, %esi - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%edx), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %esi, 40(%eax) - movl 48(%edx), %esi - adcl %ebx, %esi - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%edx), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %esi, 48(%eax) - movl 56(%edx), %esi - adcl %ebx, %esi - movl 60(%ecx), %ebx - movl %edi, 52(%eax) - movl 60(%edx), %edi - adcl %ebx, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 64(%ecx), %edi - movl %esi, 56(%eax) - movl 64(%edx), %eax - adcl %edi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%edx), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl 72(%edx), %eax - adcl %esi, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl 76(%edx), %eax - adcl %esi, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl 80(%edx), %eax - adcl %esi, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl 84(%edx), %eax - adcl %esi, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl 88(%edx), %eax - adcl %esi, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl 92(%edx), %eax - adcl %esi, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl 96(%edx), %eax - adcl %esi, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl 100(%edx), %eax - adcl %esi, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%ecx), %eax - movl 104(%edx), %esi - adcl %eax, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 108(%ecx), %edi - movl 108(%edx), %eax - adcl %edi, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 112(%ecx), %ebx - movl 112(%edx), %edi - adcl %ebx, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 116(%ecx), %ecx - movl 116(%edx), %edx - adcl %ecx, %edx - sbbl %ebx, %ebx - andl $1, %ebx - movl 140(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - subl (%ebp), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 4(%ebp), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 8(%ebp), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 12(%ebp), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 16(%ebp), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 20(%ebp), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 24(%ebp), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - sbbl 28(%ebp), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - sbbl 32(%ebp), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - sbbl 36(%ebp), %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - sbbl 40(%ebp), %ecx - sbbl 44(%ebp), %esi - movl %esi, 40(%esp) # 4-byte Spill - sbbl 48(%ebp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl %edi, %eax - movl %edx, %edi - sbbl 52(%ebp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %edi, %esi - sbbl 56(%ebp), %esi - sbbl $0, %ebx - andl $1, %ebx - jne .LBB236_2 -# BB#1: - movl %esi, %edi -.LBB236_2: - testb %bl, %bl - movl 76(%esp), %eax # 4-byte Reload - movl 72(%esp), %esi # 4-byte Reload - movl 68(%esp), %ebx # 4-byte Reload - movl 64(%esp), %ebp # 4-byte Reload - jne .LBB236_4 -# BB#3: - movl %ecx, %esi - movl (%esp), %ebx # 4-byte Reload - movl 4(%esp), %ebp # 4-byte Reload - movl 8(%esp), %eax # 4-byte Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload -.LBB236_4: - movl 128(%esp), %edx - movl %eax, 60(%edx) - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 64(%edx) - movl 84(%esp), %eax # 4-byte Reload - movl %eax, 68(%edx) - movl 88(%esp), %eax # 4-byte Reload - movl %eax, 72(%edx) - movl 92(%esp), %eax # 4-byte Reload - movl %eax, 76(%edx) - movl 96(%esp), %eax # 4-byte Reload - movl %eax, 80(%edx) - movl 100(%esp), %eax # 4-byte Reload - movl %eax, 84(%edx) - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 88(%edx) - movl %ebp, 92(%edx) - movl %ebx, 96(%edx) - movl %esi, 100(%edx) - movl 52(%esp), %eax # 4-byte Reload - jne .LBB236_6 -# BB#5: - movl 40(%esp), %eax # 4-byte Reload -.LBB236_6: - movl %eax, 104(%edx) - movl 60(%esp), %ecx # 4-byte Reload - movl 56(%esp), %eax # 4-byte Reload - jne .LBB236_8 -# BB#7: - movl 44(%esp), %eax # 4-byte Reload -.LBB236_8: - movl %eax, 108(%edx) - jne .LBB236_10 -# BB#9: - movl 48(%esp), %ecx # 4-byte Reload -.LBB236_10: - movl %ecx, 112(%edx) - movl %edi, 116(%edx) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end236: - .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L - - .globl mcl_fpDbl_sub15L - .align 16, 0x90 - .type mcl_fpDbl_sub15L,@function -mcl_fpDbl_sub15L: # @mcl_fpDbl_sub15L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $100, %esp - movl 124(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 128(%esp), %ebp - subl (%ebp), %edx - sbbl 4(%ebp), %esi - movl 8(%eax), %edi - sbbl 8(%ebp), %edi - movl 120(%esp), %ecx - movl %edx, (%ecx) - movl 12(%eax), %edx - sbbl 12(%ebp), %edx - movl %esi, 4(%ecx) - movl 16(%eax), %esi - sbbl 16(%ebp), %esi - movl %edi, 8(%ecx) - movl 20(%ebp), %edi - movl %edx, 12(%ecx) - movl 20(%eax), %edx - sbbl %edi, %edx - movl 24(%ebp), %edi - movl %esi, 16(%ecx) - movl 24(%eax), %esi - sbbl %edi, %esi - movl 28(%ebp), %edi - movl %edx, 20(%ecx) - movl 28(%eax), %edx - sbbl %edi, %edx - movl 32(%ebp), %edi - movl %esi, 24(%ecx) - movl 32(%eax), %esi - sbbl %edi, %esi - movl 36(%ebp), %edi - movl %edx, 28(%ecx) - movl 36(%eax), %edx - sbbl %edi, %edx - movl 40(%ebp), %edi - movl %esi, 32(%ecx) - movl 40(%eax), %esi - sbbl %edi, %esi - movl 44(%ebp), %edi - movl %edx, 36(%ecx) - movl 44(%eax), %edx - sbbl %edi, %edx - movl 48(%ebp), %edi - movl %esi, 40(%ecx) - movl 48(%eax), %esi - sbbl %edi, %esi - movl 52(%ebp), %edi - movl %edx, 44(%ecx) - movl 52(%eax), %edx - sbbl %edi, %edx - movl 56(%ebp), %edi - movl %esi, 48(%ecx) - movl 56(%eax), %esi - sbbl %edi, %esi - movl 60(%ebp), %edi - movl %edx, 52(%ecx) - movl 60(%eax), %edx - sbbl %edi, %edx - movl %edx, 44(%esp) # 4-byte Spill - movl 64(%ebp), %edx - movl %esi, 56(%ecx) - movl 64(%eax), %esi - sbbl %edx, %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 68(%ebp), %edx - movl 68(%eax), %esi - sbbl %edx, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 72(%ebp), %edx - movl 72(%eax), %esi - sbbl %edx, %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 76(%ebp), %edx - movl 76(%eax), %esi - sbbl %edx, %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 80(%ebp), %edx - movl 80(%eax), %esi - sbbl %edx, %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 84(%ebp), %edx - movl 84(%eax), %esi - sbbl %edx, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 88(%ebp), %edx - movl 88(%eax), %esi - sbbl %edx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 92(%ebp), %edx - movl 92(%eax), %esi - sbbl %edx, %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 96(%ebp), %edx - movl 96(%eax), %esi - sbbl %edx, %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 100(%ebp), %edx - movl 100(%eax), %esi - sbbl %edx, %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 104(%ebp), %edx - movl 104(%eax), %esi - sbbl %edx, %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 108(%ebp), %edx - movl 108(%eax), %esi - sbbl %edx, %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 112(%ebp), %edx - movl 112(%eax), %esi - sbbl %edx, %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 116(%ebp), %edx - movl 116(%eax), %eax - sbbl %edx, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 132(%esp), %esi - jne .LBB237_1 -# BB#2: - movl $0, 60(%esp) # 4-byte Folded Spill - jmp .LBB237_3 -.LBB237_1: - movl 56(%esi), %edx - movl %edx, 60(%esp) # 4-byte Spill -.LBB237_3: - testb %al, %al - jne .LBB237_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, %ebx - jmp .LBB237_6 -.LBB237_4: - movl (%esi), %ebx - movl 4(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB237_6: - jne .LBB237_7 -# BB#8: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB237_9 -.LBB237_7: - movl 52(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB237_9: - jne .LBB237_10 -# BB#11: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB237_12 -.LBB237_10: - movl 48(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB237_12: - jne .LBB237_13 -# BB#14: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB237_15 -.LBB237_13: - movl 44(%esi), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB237_15: - jne .LBB237_16 -# BB#17: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB237_18 -.LBB237_16: - movl 40(%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB237_18: - jne .LBB237_19 -# BB#20: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB237_21 -.LBB237_19: - movl 36(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB237_21: - jne .LBB237_22 -# BB#23: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB237_24 -.LBB237_22: - movl 32(%esi), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB237_24: - jne .LBB237_25 -# BB#26: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB237_27 -.LBB237_25: - movl 28(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB237_27: - jne .LBB237_28 -# BB#29: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB237_30 -.LBB237_28: - movl 24(%esi), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB237_30: - jne .LBB237_31 -# BB#32: - movl $0, %edx - jmp .LBB237_33 -.LBB237_31: - movl 20(%esi), %edx -.LBB237_33: - jne .LBB237_34 -# BB#35: - movl $0, %ebp - jmp .LBB237_36 -.LBB237_34: - movl 16(%esi), %ebp -.LBB237_36: - jne .LBB237_37 -# BB#38: - movl $0, %eax - jmp .LBB237_39 -.LBB237_37: - movl 12(%esi), %eax -.LBB237_39: - jne .LBB237_40 -# BB#41: - xorl %esi, %esi - jmp .LBB237_42 -.LBB237_40: - movl 8(%esi), %esi -.LBB237_42: - addl 44(%esp), %ebx # 4-byte Folded Reload - movl 24(%esp), %edi # 4-byte Reload - adcl 36(%esp), %edi # 4-byte Folded Reload - movl %ebx, 60(%ecx) - adcl 40(%esp), %esi # 4-byte Folded Reload - movl %edi, 64(%ecx) - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %esi, 68(%ecx) - adcl 52(%esp), %ebp # 4-byte Folded Reload - movl %eax, 72(%ecx) - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %ebp, 76(%ecx) - movl (%esp), %esi # 4-byte Reload - adcl 64(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %esi, 84(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 28(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl %eax, 112(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%ecx) - addl $100, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end237: - .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L - - .align 16, 0x90 - .type .LmulPv512x32,@function -.LmulPv512x32: # @mulPv512x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $112, %esp - movl %edx, %ebp - movl 132(%esp), %ebx - movl %ebx, %eax - mull 60(%ebp) - movl %edx, 108(%esp) # 4-byte Spill - movl %eax, 104(%esp) # 4-byte Spill - movl %ebx, %eax - mull 56(%ebp) - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl %ebx, %eax - mull 52(%ebp) - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl %ebx, %eax - mull 48(%ebp) - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %eax - mull 44(%ebp) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebx, %eax - mull 40(%ebp) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebx, %eax - mull 36(%ebp) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mull 32(%ebp) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 28(%ebp) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 24(%ebp) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 20(%ebp) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 16(%ebp) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 12(%ebp) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 8(%ebp) - movl %edx, %esi - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 4(%ebp) - movl %edx, %edi - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull (%ebp) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %edi # 4-byte Folded Reload - movl %edi, 8(%ecx) - adcl 8(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ecx) - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ecx) - movl 108(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 64(%ecx) - movl %ecx, %eax - addl $112, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end238: - .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32 - - .globl mcl_fp_mulUnitPre16L - .align 16, 0x90 - .type mcl_fp_mulUnitPre16L,@function -mcl_fp_mulUnitPre16L: # @mcl_fp_mulUnitPre16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - calll .L239$pb -.L239$pb: - popl %ebx -.Ltmp50: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx - movl 152(%esp), %eax - movl %eax, (%esp) - leal 56(%esp), %ecx - movl 148(%esp), %edx - calll .LmulPv512x32 - movl 120(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 84(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 80(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 76(%esp), %ebp - movl 72(%esp), %ebx - movl 68(%esp), %edi - movl 64(%esp), %esi - movl 56(%esp), %edx - movl 60(%esp), %ecx - movl 144(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end239: - .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L - - .globl mcl_fpDbl_mulPre16L - .align 16, 0x90 - .type mcl_fpDbl_mulPre16L,@function -mcl_fpDbl_mulPre16L: # @mcl_fpDbl_mulPre16L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $300, %esp # imm = 0x12C - calll .L240$pb -.L240$pb: - popl %ebx -.Ltmp51: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx - movl %ebx, -224(%ebp) # 4-byte Spill - movl 16(%ebp), %edi - movl %edi, 8(%esp) - movl 12(%ebp), %esi - movl %esi, 4(%esp) - movl 8(%ebp), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8L@PLT - leal 32(%edi), %eax - movl %eax, 8(%esp) - leal 32(%esi), %eax - movl %eax, 4(%esp) - movl 8(%ebp), %eax - leal 64(%eax), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8L@PLT - movl 52(%esi), %ebx - movl 48(%esi), %eax - movl 44(%esi), %ecx - movl 40(%esi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - movl (%esi), %edi - movl 4(%esi), %edx - addl 32(%esi), %edi - movl %edi, -184(%ebp) # 4-byte Spill - movl %esi, %edi - adcl 36(%edi), %edx - movl %edx, -236(%ebp) # 4-byte Spill - movl -176(%ebp), %edx # 4-byte Reload - adcl 8(%edi), %edx - movl %edx, -176(%ebp) # 4-byte Spill - adcl 12(%edi), %ecx - movl %ecx, -232(%ebp) # 4-byte Spill - adcl 16(%edi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - adcl 20(%edi), %ebx - movl %ebx, -228(%ebp) # 4-byte Spill - movl 56(%edi), %eax - adcl 24(%edi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - pushl %eax - seto %al - lahf - movl %eax, %ecx - popl %eax - movl %ecx, -144(%ebp) # 4-byte Spill - movl 16(%ebp), %esi - movl (%esi), %ecx - addl 32(%esi), %ecx - movl %ecx, -188(%ebp) # 4-byte Spill - movl 4(%esi), %ecx - adcl 36(%esi), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - movl 40(%esi), %ecx - adcl 8(%esi), %ecx - movl %ecx, -196(%ebp) # 4-byte Spill - movl 44(%esi), %ecx - adcl 12(%esi), %ecx - movl %ecx, -200(%ebp) # 4-byte Spill - movl 48(%esi), %ecx - adcl 16(%esi), %ecx - movl %ecx, -204(%ebp) # 4-byte Spill - movl 52(%esi), %ecx - adcl 20(%esi), %ecx - movl %ecx, -208(%ebp) # 4-byte Spill - movl 56(%esi), %edx - adcl 24(%esi), %edx - movl 60(%esi), %ecx - adcl 28(%esi), %ecx - pushl %eax - seto %al - lahf - movl %eax, %ebx - popl %eax - movl %ebx, -252(%ebp) # 4-byte Spill - movl -212(%ebp), %ebx # 4-byte Reload - movl -176(%ebp), %esi # 4-byte Reload - movl %esi, -216(%ebp) # 4-byte Spill - movl -184(%ebp), %esi # 4-byte Reload - movl %esi, -220(%ebp) # 4-byte Spill - jb .LBB240_2 -# BB#1: - xorl %eax, %eax - xorl %ebx, %ebx - movl $0, -216(%ebp) # 4-byte Folded Spill - movl $0, -220(%ebp) # 4-byte Folded Spill -.LBB240_2: - movl %ebx, -244(%ebp) # 4-byte Spill - movl %eax, -240(%ebp) # 4-byte Spill - movl 60(%edi), %eax - movl -144(%ebp), %ebx # 4-byte Reload - pushl %eax - movl %ebx, %eax - addb $127, %al - sahf - popl %eax - adcl 28(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl %ecx, -172(%ebp) # 4-byte Spill - movl %edx, -144(%ebp) # 4-byte Spill - movl -208(%ebp), %eax # 4-byte Reload - movl %eax, -148(%ebp) # 4-byte Spill - movl -204(%ebp), %eax # 4-byte Reload - movl %eax, -152(%ebp) # 4-byte Spill - movl -200(%ebp), %eax # 4-byte Reload - movl %eax, -156(%ebp) # 4-byte Spill - movl -196(%ebp), %eax # 4-byte Reload - movl %eax, -160(%ebp) # 4-byte Spill - movl -192(%ebp), %eax # 4-byte Reload - movl %eax, -164(%ebp) # 4-byte Spill - movl -188(%ebp), %eax # 4-byte Reload - movl %eax, -168(%ebp) # 4-byte Spill - jb .LBB240_4 -# BB#3: - movl $0, -172(%ebp) # 4-byte Folded Spill - movl $0, -144(%ebp) # 4-byte Folded Spill - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill - movl $0, -164(%ebp) # 4-byte Folded Spill - movl $0, -168(%ebp) # 4-byte Folded Spill -.LBB240_4: - movl -184(%ebp), %eax # 4-byte Reload - movl %eax, -108(%ebp) - movl -236(%ebp), %eax # 4-byte Reload - movl %eax, -104(%ebp) - movl -176(%ebp), %edi # 4-byte Reload - movl %edi, -100(%ebp) - movl -232(%ebp), %edi # 4-byte Reload - movl %edi, -96(%ebp) - movl -212(%ebp), %esi # 4-byte Reload - movl %esi, -92(%ebp) - movl -228(%ebp), %esi # 4-byte Reload - movl %esi, -88(%ebp) - movl -248(%ebp), %ebx # 4-byte Reload - movl %ebx, -84(%ebp) - movl -188(%ebp), %ebx # 4-byte Reload - movl %ebx, -140(%ebp) - movl -192(%ebp), %ebx # 4-byte Reload - movl %ebx, -136(%ebp) - movl -196(%ebp), %ebx # 4-byte Reload - movl %ebx, -132(%ebp) - movl -200(%ebp), %ebx # 4-byte Reload - movl %ebx, -128(%ebp) - movl -204(%ebp), %ebx # 4-byte Reload - movl %ebx, -124(%ebp) - movl -208(%ebp), %ebx # 4-byte Reload - movl %ebx, -120(%ebp) - movl %esi, %ebx - movl %edi, %esi - movl %eax, %edi - movl %edx, -116(%ebp) - movl %ecx, -112(%ebp) - sbbl %edx, %edx - movl -180(%ebp), %eax # 4-byte Reload - movl %eax, -80(%ebp) - movl -252(%ebp), %ecx # 4-byte Reload - pushl %eax - movl %ecx, %eax - addb $127, %al - sahf - popl %eax - jb .LBB240_6 -# BB#5: - movl $0, %eax - movl $0, %ebx - movl $0, %esi - movl $0, %edi -.LBB240_6: - movl %eax, -180(%ebp) # 4-byte Spill - sbbl %eax, %eax - leal -140(%ebp), %ecx - movl %ecx, 8(%esp) - leal -108(%ebp), %ecx - movl %ecx, 4(%esp) - leal -76(%ebp), %ecx - movl %ecx, (%esp) - andl %eax, %edx - movl -220(%ebp), %eax # 4-byte Reload - addl %eax, -168(%ebp) # 4-byte Folded Spill - adcl %edi, -164(%ebp) # 4-byte Folded Spill - movl -216(%ebp), %eax # 4-byte Reload - adcl %eax, -160(%ebp) # 4-byte Folded Spill - adcl %esi, -156(%ebp) # 4-byte Folded Spill - movl -244(%ebp), %eax # 4-byte Reload - adcl %eax, -152(%ebp) # 4-byte Folded Spill - adcl %ebx, -148(%ebp) # 4-byte Folded Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -240(%ebp), %eax # 4-byte Folded Reload - movl %eax, -144(%ebp) # 4-byte Spill - movl -172(%ebp), %edi # 4-byte Reload - adcl -180(%ebp), %edi # 4-byte Folded Reload - sbbl %esi, %esi - andl $1, %esi - andl $1, %edx - movl %edx, -176(%ebp) # 4-byte Spill - movl -224(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre8L@PLT - movl -168(%ebp), %eax # 4-byte Reload - addl -44(%ebp), %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl -164(%ebp), %eax # 4-byte Reload - adcl -40(%ebp), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -160(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl -152(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -152(%ebp) # 4-byte Spill - movl -148(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - adcl -16(%ebp), %edi - movl %edi, -172(%ebp) # 4-byte Spill - adcl %esi, -176(%ebp) # 4-byte Folded Spill - movl -76(%ebp), %eax - movl 8(%ebp), %esi - subl (%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -72(%ebp), %ecx - sbbl 4(%esi), %ecx - movl -68(%ebp), %eax - sbbl 8(%esi), %eax - movl %eax, -192(%ebp) # 4-byte Spill - movl -64(%ebp), %edx - sbbl 12(%esi), %edx - movl -60(%ebp), %ebx - sbbl 16(%esi), %ebx - movl -56(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl -52(%ebp), %eax - sbbl 24(%esi), %eax - movl %eax, -184(%ebp) # 4-byte Spill - movl -48(%ebp), %eax - sbbl 28(%esi), %eax - movl %eax, -188(%ebp) # 4-byte Spill - movl 32(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - movl -144(%ebp), %edi # 4-byte Reload - sbbl %eax, %edi - movl 60(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - sbbl $0, -176(%ebp) # 4-byte Folded Spill - movl 64(%esi), %eax - movl %eax, -260(%ebp) # 4-byte Spill - subl %eax, -196(%ebp) # 4-byte Folded Spill - movl 68(%esi), %eax - movl %eax, -264(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl 72(%esi), %eax - movl %eax, -268(%ebp) # 4-byte Spill - sbbl %eax, -192(%ebp) # 4-byte Folded Spill - movl 76(%esi), %eax - movl %eax, -272(%ebp) # 4-byte Spill - sbbl %eax, %edx - movl 80(%esi), %eax - movl %eax, -276(%ebp) # 4-byte Spill - sbbl %eax, %ebx - movl 84(%esi), %eax - movl %eax, -280(%ebp) # 4-byte Spill - sbbl %eax, -180(%ebp) # 4-byte Folded Spill - movl 88(%esi), %eax - movl %eax, -284(%ebp) # 4-byte Spill - sbbl %eax, -184(%ebp) # 4-byte Folded Spill - movl 92(%esi), %eax - movl %eax, -288(%ebp) # 4-byte Spill - sbbl %eax, -188(%ebp) # 4-byte Folded Spill - movl 96(%esi), %eax - movl %eax, -292(%ebp) # 4-byte Spill - sbbl %eax, -168(%ebp) # 4-byte Folded Spill - movl 100(%esi), %eax - movl %eax, -236(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 104(%esi), %eax - movl %eax, -240(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 108(%esi), %eax - movl %eax, -244(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 112(%esi), %eax - movl %eax, -248(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 116(%esi), %eax - movl %eax, -252(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 120(%esi), %eax - movl %eax, -232(%ebp) # 4-byte Spill - sbbl %eax, %edi - movl %edi, -144(%ebp) # 4-byte Spill - movl 124(%esi), %eax - movl %eax, -256(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl -176(%ebp), %edi # 4-byte Reload - sbbl $0, %edi - movl -196(%ebp), %eax # 4-byte Reload - addl -200(%ebp), %eax # 4-byte Folded Reload - adcl -204(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 32(%esi) - movl -192(%ebp), %eax # 4-byte Reload - adcl -208(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 36(%esi) - adcl -212(%ebp), %edx # 4-byte Folded Reload - movl %eax, 40(%esi) - adcl -216(%ebp), %ebx # 4-byte Folded Reload - movl %edx, 44(%esi) - movl -180(%ebp), %eax # 4-byte Reload - adcl -220(%ebp), %eax # 4-byte Folded Reload - movl %ebx, 48(%esi) - movl -184(%ebp), %ecx # 4-byte Reload - adcl -224(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 52(%esi) - movl -188(%ebp), %edx # 4-byte Reload - adcl -228(%ebp), %edx # 4-byte Folded Reload - movl %ecx, 56(%esi) - movl -168(%ebp), %eax # 4-byte Reload - adcl -260(%ebp), %eax # 4-byte Folded Reload - movl %edx, 60(%esi) - movl -164(%ebp), %ecx # 4-byte Reload - adcl -264(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -160(%ebp), %eax # 4-byte Reload - adcl -268(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -156(%ebp), %ecx # 4-byte Reload - adcl -272(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -152(%ebp), %eax # 4-byte Reload - adcl -276(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - movl -148(%ebp), %ecx # 4-byte Reload - adcl -280(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 80(%esi) - movl -144(%ebp), %eax # 4-byte Reload - adcl -284(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 84(%esi) - movl -172(%ebp), %ecx # 4-byte Reload - adcl -288(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 88(%esi) - adcl -292(%ebp), %edi # 4-byte Folded Reload - movl %ecx, 92(%esi) - movl %edi, 96(%esi) - movl -236(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -240(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -244(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - movl -248(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 112(%esi) - movl -252(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 116(%esi) - movl -232(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 120(%esi) - movl -256(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 124(%esi) - addl $300, %esp # imm = 0x12C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end240: - .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L - - .globl mcl_fpDbl_sqrPre16L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre16L,@function -mcl_fpDbl_sqrPre16L: # @mcl_fpDbl_sqrPre16L -# BB#0: - pushl %ebp - movl %esp, %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $300, %esp # imm = 0x12C - calll .L241$pb -.L241$pb: - popl %ebx -.Ltmp52: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx - movl %ebx, -184(%ebp) # 4-byte Spill - movl 12(%ebp), %edi - movl %edi, 8(%esp) - movl %edi, 4(%esp) - movl 8(%ebp), %esi - movl %esi, (%esp) - calll mcl_fpDbl_mulPre8L@PLT - leal 32(%edi), %eax - movl %eax, 8(%esp) - movl %eax, 4(%esp) - leal 64(%esi), %eax - movl %eax, (%esp) - calll mcl_fpDbl_mulPre8L@PLT - movl 52(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - movl 48(%edi), %eax - movl 44(%edi), %ebx - movl 40(%edi), %esi - movl (%edi), %ecx - movl 4(%edi), %edx - addl 32(%edi), %ecx - movl %ecx, -192(%ebp) # 4-byte Spill - adcl 36(%edi), %edx - movl %edx, -196(%ebp) # 4-byte Spill - adcl 8(%edi), %esi - movl %esi, -188(%ebp) # 4-byte Spill - adcl 12(%edi), %ebx - adcl 16(%edi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - movl -180(%ebp), %eax # 4-byte Reload - adcl 20(%edi), %eax - movl %eax, -180(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -168(%ebp) # 4-byte Spill - addl %ecx, %ecx - movl %ecx, -164(%ebp) # 4-byte Spill - adcl %edx, %edx - movl %edx, -160(%ebp) # 4-byte Spill - adcl %esi, %esi - movl %esi, -156(%ebp) # 4-byte Spill - movl %ebx, %edx - movl %ebx, %esi - adcl %edx, %edx - movl %edx, -152(%ebp) # 4-byte Spill - movl -208(%ebp), %eax # 4-byte Reload - movl %eax, %edx - movl %eax, %ebx - adcl %edx, %edx - movl %edx, -148(%ebp) # 4-byte Spill - movl -180(%ebp), %edx # 4-byte Reload - adcl %edx, %edx - movl %edx, -144(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl 56(%edi), %edx - movl -168(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - adcl 24(%edi), %edx - movl 60(%edi), %ecx - adcl 28(%edi), %ecx - seto %al - lahf - movl %eax, %eax - movl %eax, -200(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %eax - movl %eax, -204(%ebp) # 4-byte Spill - seto %al - lahf - movl %eax, %edi - sbbl %eax, %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl %edi, %eax - addb $127, %al - sahf - jb .LBB241_2 -# BB#1: - movl $0, -144(%ebp) # 4-byte Folded Spill - movl $0, -148(%ebp) # 4-byte Folded Spill - movl $0, -152(%ebp) # 4-byte Folded Spill - movl $0, -156(%ebp) # 4-byte Folded Spill - movl $0, -160(%ebp) # 4-byte Folded Spill - movl $0, -164(%ebp) # 4-byte Folded Spill -.LBB241_2: - movl %edx, %eax - movl -172(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - adcl %eax, %eax - movl %ecx, %edi - adcl %edi, %edi - movl %edi, -176(%ebp) # 4-byte Spill - movl -204(%ebp), %edi # 4-byte Reload - pushl %eax - movl %edi, %eax - addb $127, %al - sahf - popl %eax - jb .LBB241_4 -# BB#3: - movl $0, -176(%ebp) # 4-byte Folded Spill - xorl %eax, %eax -.LBB241_4: - movl %eax, -172(%ebp) # 4-byte Spill - movl -192(%ebp), %eax # 4-byte Reload - movl %eax, -108(%ebp) - movl %eax, -140(%ebp) - movl -196(%ebp), %eax # 4-byte Reload - movl %eax, -104(%ebp) - movl %eax, -136(%ebp) - movl -188(%ebp), %eax # 4-byte Reload - movl %eax, -100(%ebp) - movl %eax, -132(%ebp) - movl %esi, -96(%ebp) - movl %esi, -128(%ebp) - movl %ebx, -92(%ebp) - movl %ebx, -124(%ebp) - movl -180(%ebp), %eax # 4-byte Reload - movl %eax, -88(%ebp) - movl %eax, -120(%ebp) - movl %edx, -84(%ebp) - movl %edx, -116(%ebp) - movl %ecx, -80(%ebp) - movl %ecx, -112(%ebp) - movl -200(%ebp), %eax # 4-byte Reload - movl %eax, %eax - addb $127, %al - sahf - jb .LBB241_5 -# BB#6: - xorl %edi, %edi - jmp .LBB241_7 -.LBB241_5: - shrl $31, %ecx - movl %ecx, %edi -.LBB241_7: - leal -140(%ebp), %eax - movl %eax, 8(%esp) - leal -108(%ebp), %eax - movl %eax, 4(%esp) - leal -76(%ebp), %eax - movl %eax, (%esp) - movl -168(%ebp), %esi # 4-byte Reload - andl $1, %esi - movl -184(%ebp), %ebx # 4-byte Reload - calll mcl_fpDbl_mulPre8L@PLT - movl -164(%ebp), %eax # 4-byte Reload - addl -44(%ebp), %eax - movl %eax, -164(%ebp) # 4-byte Spill - movl -160(%ebp), %eax # 4-byte Reload - adcl -40(%ebp), %eax - movl %eax, -160(%ebp) # 4-byte Spill - movl -156(%ebp), %eax # 4-byte Reload - adcl -36(%ebp), %eax - movl %eax, -156(%ebp) # 4-byte Spill - movl -152(%ebp), %eax # 4-byte Reload - adcl -32(%ebp), %eax - movl %eax, -152(%ebp) # 4-byte Spill - movl -148(%ebp), %eax # 4-byte Reload - adcl -28(%ebp), %eax - movl %eax, -148(%ebp) # 4-byte Spill - movl -144(%ebp), %eax # 4-byte Reload - adcl -24(%ebp), %eax - movl %eax, -144(%ebp) # 4-byte Spill - movl -172(%ebp), %eax # 4-byte Reload - adcl -20(%ebp), %eax - movl %eax, -172(%ebp) # 4-byte Spill - movl -176(%ebp), %eax # 4-byte Reload - adcl -16(%ebp), %eax - adcl %edi, %esi - movl %esi, -168(%ebp) # 4-byte Spill - movl -76(%ebp), %ecx - movl 8(%ebp), %esi - subl (%esi), %ecx - movl %ecx, -180(%ebp) # 4-byte Spill - movl -72(%ebp), %edi - sbbl 4(%esi), %edi - movl -68(%ebp), %edx - sbbl 8(%esi), %edx - movl %edx, -184(%ebp) # 4-byte Spill - movl -64(%ebp), %edx - sbbl 12(%esi), %edx - movl %edx, -192(%ebp) # 4-byte Spill - movl -60(%ebp), %ebx - sbbl 16(%esi), %ebx - movl %eax, %ecx - movl -56(%ebp), %eax - sbbl 20(%esi), %eax - movl %eax, -196(%ebp) # 4-byte Spill - movl -52(%ebp), %edx - sbbl 24(%esi), %edx - movl %edx, -188(%ebp) # 4-byte Spill - movl -48(%ebp), %edx - sbbl 28(%esi), %edx - movl 32(%esi), %eax - movl %eax, -200(%ebp) # 4-byte Spill - sbbl %eax, -164(%ebp) # 4-byte Folded Spill - movl 36(%esi), %eax - movl %eax, -204(%ebp) # 4-byte Spill - sbbl %eax, -160(%ebp) # 4-byte Folded Spill - movl 40(%esi), %eax - movl %eax, -208(%ebp) # 4-byte Spill - sbbl %eax, -156(%ebp) # 4-byte Folded Spill - movl 44(%esi), %eax - movl %eax, -212(%ebp) # 4-byte Spill - sbbl %eax, -152(%ebp) # 4-byte Folded Spill - movl 48(%esi), %eax - movl %eax, -216(%ebp) # 4-byte Spill - sbbl %eax, -148(%ebp) # 4-byte Folded Spill - movl 52(%esi), %eax - movl %eax, -220(%ebp) # 4-byte Spill - sbbl %eax, -144(%ebp) # 4-byte Folded Spill - movl 56(%esi), %eax - movl %eax, -224(%ebp) # 4-byte Spill - sbbl %eax, -172(%ebp) # 4-byte Folded Spill - movl 60(%esi), %eax - movl %eax, -228(%ebp) # 4-byte Spill - sbbl %eax, %ecx - movl %ecx, -176(%ebp) # 4-byte Spill - movl -168(%ebp), %eax # 4-byte Reload - sbbl $0, %eax - movl 64(%esi), %ecx - movl %ecx, -260(%ebp) # 4-byte Spill - subl %ecx, -180(%ebp) # 4-byte Folded Spill - movl 68(%esi), %ecx - movl %ecx, -264(%ebp) # 4-byte Spill - sbbl %ecx, %edi - movl 72(%esi), %ecx - movl %ecx, -268(%ebp) # 4-byte Spill - sbbl %ecx, -184(%ebp) # 4-byte Folded Spill - movl 76(%esi), %ecx - movl %ecx, -272(%ebp) # 4-byte Spill - sbbl %ecx, -192(%ebp) # 4-byte Folded Spill - movl 80(%esi), %ecx - movl %ecx, -276(%ebp) # 4-byte Spill - sbbl %ecx, %ebx - movl 84(%esi), %ecx - movl %ecx, -280(%ebp) # 4-byte Spill - sbbl %ecx, -196(%ebp) # 4-byte Folded Spill - movl 88(%esi), %ecx - movl %ecx, -284(%ebp) # 4-byte Spill - sbbl %ecx, -188(%ebp) # 4-byte Folded Spill - movl 92(%esi), %ecx - movl %ecx, -288(%ebp) # 4-byte Spill - sbbl %ecx, %edx - movl 96(%esi), %ecx - movl %ecx, -292(%ebp) # 4-byte Spill - sbbl %ecx, -164(%ebp) # 4-byte Folded Spill - movl 100(%esi), %ecx - movl %ecx, -232(%ebp) # 4-byte Spill - sbbl %ecx, -160(%ebp) # 4-byte Folded Spill - movl 104(%esi), %ecx - movl %ecx, -236(%ebp) # 4-byte Spill - sbbl %ecx, -156(%ebp) # 4-byte Folded Spill - movl 108(%esi), %ecx - movl %ecx, -240(%ebp) # 4-byte Spill - sbbl %ecx, -152(%ebp) # 4-byte Folded Spill - movl 112(%esi), %ecx - movl %ecx, -244(%ebp) # 4-byte Spill - sbbl %ecx, -148(%ebp) # 4-byte Folded Spill - movl 116(%esi), %ecx - movl %ecx, -248(%ebp) # 4-byte Spill - sbbl %ecx, -144(%ebp) # 4-byte Folded Spill - movl 120(%esi), %ecx - movl %ecx, -252(%ebp) # 4-byte Spill - sbbl %ecx, -172(%ebp) # 4-byte Folded Spill - movl 124(%esi), %ecx - movl %ecx, -256(%ebp) # 4-byte Spill - sbbl %ecx, -176(%ebp) # 4-byte Folded Spill - sbbl $0, %eax - movl %eax, -168(%ebp) # 4-byte Spill - movl -180(%ebp), %eax # 4-byte Reload - addl -200(%ebp), %eax # 4-byte Folded Reload - adcl -204(%ebp), %edi # 4-byte Folded Reload - movl %eax, 32(%esi) - movl -184(%ebp), %eax # 4-byte Reload - adcl -208(%ebp), %eax # 4-byte Folded Reload - movl %edi, 36(%esi) - movl -192(%ebp), %ecx # 4-byte Reload - adcl -212(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - adcl -216(%ebp), %ebx # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl -196(%ebp), %ecx # 4-byte Reload - adcl -220(%ebp), %ecx # 4-byte Folded Reload - movl %ebx, 48(%esi) - movl -188(%ebp), %eax # 4-byte Reload - adcl -224(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl %edx, %ecx - adcl -228(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 56(%esi) - movl -164(%ebp), %eax # 4-byte Reload - adcl -260(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 60(%esi) - movl -160(%ebp), %ecx # 4-byte Reload - adcl -264(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 64(%esi) - movl -156(%ebp), %eax # 4-byte Reload - adcl -268(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 68(%esi) - movl -152(%ebp), %ecx # 4-byte Reload - adcl -272(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 72(%esi) - movl -148(%ebp), %eax # 4-byte Reload - adcl -276(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 76(%esi) - movl -144(%ebp), %ecx # 4-byte Reload - adcl -280(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 80(%esi) - movl -172(%ebp), %eax # 4-byte Reload - adcl -284(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 84(%esi) - movl -176(%ebp), %ecx # 4-byte Reload - adcl -288(%ebp), %ecx # 4-byte Folded Reload - movl %eax, 88(%esi) - movl -168(%ebp), %eax # 4-byte Reload - adcl -292(%ebp), %eax # 4-byte Folded Reload - movl %ecx, 92(%esi) - movl %eax, 96(%esi) - movl -232(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 100(%esi) - movl -236(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 104(%esi) - movl -240(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 108(%esi) - movl -244(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 112(%esi) - movl -248(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 116(%esi) - movl -252(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 120(%esi) - movl -256(%ebp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 124(%esi) - addl $300, %esp # imm = 0x12C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end241: - .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L - - .globl mcl_fp_mont16L - .align 16, 0x90 - .type mcl_fp_mont16L,@function -mcl_fp_mont16L: # @mcl_fp_mont16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2428, %esp # imm = 0x97C - calll .L242$pb -.L242$pb: - popl %ebx -.Ltmp53: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx - movl 2460(%esp), %eax - movl -4(%eax), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2360(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 2360(%esp), %ebp - movl 2364(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2424(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2420(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 2416(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2412(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2408(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2404(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2400(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2396(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2392(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2388(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2384(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2380(%esp), %edi - movl 2376(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2372(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2368(%esp), %esi - movl %eax, (%esp) - leal 2288(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - addl 2288(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 2292(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 2296(%esp), %esi - movl %esi, %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2308(%esp), %edi - movl %edi, %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2340(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2344(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2352(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 2456(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2216(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl 112(%esp), %ecx # 4-byte Reload - addl 2216(%esp), %ecx - adcl 2220(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2224(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2232(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - adcl 2236(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2252(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2260(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2268(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 2280(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2144(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - addl 2144(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 2164(%esp), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 2168(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2188(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2200(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2208(%esp), %esi - adcl $0, %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2072(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 2072(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2076(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2080(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2084(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2088(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 2092(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2096(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2100(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2104(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 2108(%esp), %ebp - movl 92(%esp), %eax # 4-byte Reload - adcl 2112(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2116(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 2120(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2124(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2128(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 2132(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2136(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2000(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %esi - movl %esi, %eax - movl 112(%esp), %ecx # 4-byte Reload - addl 2000(%esp), %ecx - movl 100(%esp), %ecx # 4-byte Reload - adcl 2004(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2008(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2012(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2016(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2020(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2024(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2028(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2032(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 2036(%esp), %ebp - movl %ebp, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2040(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 2044(%esp), %edi - movl 116(%esp), %ecx # 4-byte Reload - adcl 2048(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 2052(%esp), %ebp - movl 124(%esp), %esi # 4-byte Reload - adcl 2056(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1928(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 1928(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1936(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1956(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1968(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 1972(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1976(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl 1980(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1984(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1856(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - movl %ebp, %eax - addl 1856(%esp), %esi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1860(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1864(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1868(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1872(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1876(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1880(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1884(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1888(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1892(%esp), %esi - adcl 1896(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 1900(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1904(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1908(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1912(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1916(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1920(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1784(%esp), %ecx - movl 2452(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 1784(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1804(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1816(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1824(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1836(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1840(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1848(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1712(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %esi - movl %esi, %ecx - addl 1712(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1732(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1740(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 1744(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1764(%esp), %ebp - movl 104(%esp), %esi # 4-byte Reload - adcl 1768(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1640(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1640(%esp), %ecx - movl 60(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1648(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1664(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1668(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 1688(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - adcl 1692(%esp), %esi - movl %esi, %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 1696(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1704(%esp), %esi - sbbl %eax, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1568(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 80(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1568(%esp), %ebp - movl 60(%esp), %ecx # 4-byte Reload - adcl 1572(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1576(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1580(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1584(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1588(%esp), %ebp - movl 84(%esp), %ecx # 4-byte Reload - adcl 1592(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1596(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1600(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1604(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1608(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1612(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1616(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1620(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1624(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1628(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 1632(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1496(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 60(%esp), %ecx # 4-byte Reload - addl 1496(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1500(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1504(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1512(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1516(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1520(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1540(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1424(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - movl 60(%esp), %eax # 4-byte Reload - addl 1424(%esp), %eax - movl 72(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1432(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1444(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1472(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1480(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 1484(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1488(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ebp - movl 2456(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1352(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1352(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1360(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1396(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 1404(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1412(%esp), %esi - adcl 1416(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %eax - movl %ecx, %edi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %ebp - movl %ebp, %eax - addl 1280(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1284(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1288(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1312(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1336(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1340(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1344(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, %edi - movl 2456(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl 2452(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1208(%esp), %ecx - adcl 1212(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1216(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1260(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1272(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl %edi, %ecx - addl 1136(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %edi # 4-byte Reload - adcl 1164(%esp), %edi - movl 124(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1188(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1192(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1064(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1088(%esp), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 1092(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1116(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %ebp - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl %edi, %eax - andl $1, %eax - addl 992(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 996(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1000(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1008(%esp), %edi - movl 116(%esp), %ebp # 4-byte Reload - adcl 1012(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl 1020(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1028(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1032(%esp), %esi - movl 100(%esp), %ecx # 4-byte Reload - adcl 1036(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1040(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1056(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 920(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 928(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 932(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 936(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 956(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 968(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %esi - movl %esi, %eax - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - andl $1, %edi - movl %edi, %ecx - addl 848(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 856(%esp), %edi - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 868(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 896(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 776(%esp), %ecx - adcl 780(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 784(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 792(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 800(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 84(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 704(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 712(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 728(%esp), %esi - movl 104(%esp), %ebp # 4-byte Reload - adcl 732(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 752(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 92(%esp), %ecx # 4-byte Reload - addl 632(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 640(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 652(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl 656(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 664(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 676(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 680(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 92(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 560(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 592(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 612(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 488(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 508(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 516(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 520(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 536(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 96(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 416(%esp), %edi - movl 116(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - adcl 436(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 440(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 448(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 116(%esp), %ecx # 4-byte Reload - addl 344(%esp), %ecx - movl 120(%esp), %ebp # 4-byte Reload - adcl 348(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 356(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 360(%esp), %edi - adcl 364(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 56(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 2460(%esp), %edx - calll .LmulPv512x32 - movl 116(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 272(%esp), %esi - adcl 276(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 280(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 288(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 296(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 308(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2456(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 2452(%esp), %edx - calll .LmulPv512x32 - movl 120(%esp), %ecx # 4-byte Reload - addl 200(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 212(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 220(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 232(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 244(%esp), %edi - movl 76(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 56(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 2460(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - andl $1, %ebp - addl 128(%esp), %esi - movl 104(%esp), %ebx # 4-byte Reload - movl 124(%esp), %eax # 4-byte Reload - adcl 132(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 140(%esp), %ebx - movl %ebx, 104(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 156(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 172(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 184(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 192(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl $0, %ebp - movl %eax, %edx - movl 2460(%esp), %edi - subl (%edi), %edx - movl %ecx, %eax - sbbl 4(%edi), %eax - movl %ebx, %ecx - sbbl 8(%edi), %ecx - movl 112(%esp), %ebx # 4-byte Reload - sbbl 12(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl 16(%edi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 20(%edi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - sbbl 24(%edi), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 28(%edi), %esi - movl %esi, 28(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 32(%edi), %esi - movl %esi, 32(%esp) # 4-byte Spill - movl 60(%esp), %esi # 4-byte Reload - sbbl 36(%edi), %esi - movl %esi, 36(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 40(%edi), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 44(%edi), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - sbbl 52(%edi), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - sbbl 56(%edi), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - sbbl 60(%edi), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - sbbl $0, %ebp - andl $1, %ebp - movl %ebp, %ebx - jne .LBB242_2 -# BB#1: - movl %edx, %edi -.LBB242_2: - movl 2448(%esp), %edx - movl %edi, (%edx) - testb %bl, %bl - movl 108(%esp), %edi # 4-byte Reload - jne .LBB242_4 -# BB#3: - movl %eax, %edi -.LBB242_4: - movl %edi, 4(%edx) - jne .LBB242_6 -# BB#5: - movl %ecx, 104(%esp) # 4-byte Spill -.LBB242_6: - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 8(%edx) - jne .LBB242_8 -# BB#7: - movl 12(%esp), %eax # 4-byte Reload - movl %eax, 112(%esp) # 4-byte Spill -.LBB242_8: - movl 112(%esp), %eax # 4-byte Reload - movl %eax, 12(%edx) - movl 100(%esp), %eax # 4-byte Reload - jne .LBB242_10 -# BB#9: - movl 16(%esp), %eax # 4-byte Reload -.LBB242_10: - movl %eax, 16(%edx) - movl 88(%esp), %eax # 4-byte Reload - jne .LBB242_12 -# BB#11: - movl 20(%esp), %eax # 4-byte Reload -.LBB242_12: - movl %eax, 20(%edx) - jne .LBB242_14 -# BB#13: - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 80(%esp) # 4-byte Spill -.LBB242_14: - movl 80(%esp), %eax # 4-byte Reload - movl %eax, 24(%edx) - movl 72(%esp), %eax # 4-byte Reload - jne .LBB242_16 -# BB#15: - movl 28(%esp), %eax # 4-byte Reload -.LBB242_16: - movl %eax, 28(%edx) - movl 64(%esp), %eax # 4-byte Reload - jne .LBB242_18 -# BB#17: - movl 32(%esp), %eax # 4-byte Reload -.LBB242_18: - movl %eax, 32(%edx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB242_20 -# BB#19: - movl 36(%esp), %eax # 4-byte Reload -.LBB242_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB242_22 -# BB#21: - movl 40(%esp), %eax # 4-byte Reload -.LBB242_22: - movl %eax, 40(%edx) - movl 76(%esp), %eax # 4-byte Reload - jne .LBB242_24 -# BB#23: - movl 44(%esp), %eax # 4-byte Reload -.LBB242_24: - movl %eax, 44(%edx) - movl 84(%esp), %eax # 4-byte Reload - jne .LBB242_26 -# BB#25: - movl 48(%esp), %eax # 4-byte Reload -.LBB242_26: - movl %eax, 48(%edx) - movl 92(%esp), %eax # 4-byte Reload - jne .LBB242_28 -# BB#27: - movl 52(%esp), %eax # 4-byte Reload -.LBB242_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - jne .LBB242_30 -# BB#29: - movl 56(%esp), %eax # 4-byte Reload -.LBB242_30: - movl %eax, 56(%edx) - movl 116(%esp), %eax # 4-byte Reload - jne .LBB242_32 -# BB#31: - movl 120(%esp), %eax # 4-byte Reload -.LBB242_32: - movl %eax, 60(%edx) - addl $2428, %esp # imm = 0x97C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end242: - .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L - - .globl mcl_fp_montNF16L - .align 16, 0x90 - .type mcl_fp_montNF16L,@function -mcl_fp_montNF16L: # @mcl_fp_montNF16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2412, %esp # imm = 0x96C - calll .L243$pb -.L243$pb: - popl %ebx -.Ltmp54: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx - movl 2444(%esp), %eax - movl -4(%eax), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2344(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2344(%esp), %edi - movl 2348(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 2408(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2404(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2400(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2396(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2392(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2388(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2384(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2380(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 2376(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 2372(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 2368(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2364(%esp), %ebp - movl 2360(%esp), %esi - movl 2356(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2352(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2272(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 2272(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2280(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 2288(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 2292(%esp), %ebp - movl 64(%esp), %eax # 4-byte Reload - adcl 2296(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 2308(%esp), %edi - movl 56(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2324(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 2328(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 2332(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2200(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2264(%esp), %edx - movl 108(%esp), %ecx # 4-byte Reload - addl 2200(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2208(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2212(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 2216(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2220(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 2232(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 2236(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 2252(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2260(%esp), %esi - adcl $0, %edx - movl %edx, 108(%esp) # 4-byte Spill - movl %ecx, %edi - movl %edi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2128(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 2128(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 2132(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %edi # 4-byte Reload - adcl 2156(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 2164(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 2188(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2192(%esp), %esi - movl 2440(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2056(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 2120(%esp), %eax - movl 84(%esp), %edx # 4-byte Reload - addl 2056(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2072(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2076(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - adcl 2080(%esp), %edi - movl %edi, %ebp - movl 52(%esp), %ecx # 4-byte Reload - adcl 2084(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 2088(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2092(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2096(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2100(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2104(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2108(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 2112(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - adcl 2116(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1984(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1984(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1996(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 2004(%esp), %edi - adcl 2008(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2016(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2020(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2024(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2028(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2032(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 2036(%esp), %ebp - movl 100(%esp), %esi # 4-byte Reload - adcl 2040(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 2044(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2048(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1912(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1976(%esp), %eax - movl 76(%esp), %edx # 4-byte Reload - addl 1912(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - adcl 1916(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1920(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1924(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1928(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 1932(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1936(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1944(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 1948(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1952(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1956(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1960(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 1964(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1968(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1972(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1840(%esp), %ecx - movl 2444(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - addl 1840(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1848(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1852(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1864(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1876(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1880(%esp), %edi - movl 92(%esp), %ebp # 4-byte Reload - adcl 1884(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1892(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1896(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1904(%esp), %esi - movl 2440(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1768(%esp), %ecx - movl 2436(%esp), %eax - movl %eax, %edx - calll .LmulPv512x32 - movl 1832(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - addl 1768(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1776(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1780(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1784(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1788(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1792(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - adcl 1808(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1820(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1828(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 80(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1696(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1696(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 1700(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1704(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1712(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1716(%esp), %ebp - movl 56(%esp), %edi # 4-byte Reload - adcl 1720(%esp), %edi - movl 72(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1736(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1740(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 1744(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1624(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1688(%esp), %edx - movl 68(%esp), %ecx # 4-byte Reload - addl 1624(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 1640(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 1644(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 1648(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1652(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1656(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1660(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1664(%esp), %esi - movl %esi, %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 1668(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1672(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1676(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1680(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1684(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1552(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1552(%esp), %esi - movl 64(%esp), %esi # 4-byte Reload - adcl 1556(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1576(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1580(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 1592(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1600(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1616(%esp), %edi - movl 2440(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1480(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1544(%esp), %eax - addl 1480(%esp), %esi - movl 60(%esp), %edx # 4-byte Reload - adcl 1484(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %edx # 4-byte Reload - adcl 1488(%esp), %edx - movl %edx, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1492(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1496(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1500(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1504(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1508(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1512(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1516(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1520(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - adcl 1524(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1528(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1532(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 1536(%esp), %ebp - adcl 1540(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl %eax, %edi - adcl $0, %edi - movl %esi, %eax - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1408(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1408(%esp), %esi - movl 60(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %esi # 4-byte Reload - adcl 1416(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1432(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1436(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1460(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1464(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 1468(%esp), %ebp - adcl 1472(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1336(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1400(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 1336(%esp), %ecx - adcl 1340(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1344(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1348(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1352(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1356(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1360(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1364(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1368(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1372(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1376(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 1380(%esp), %edi - movl 76(%esp), %esi # 4-byte Reload - adcl 1384(%esp), %esi - movl 80(%esp), %edx # 4-byte Reload - adcl 1388(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl 1392(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1396(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1264(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1264(%esp), %ebp - movl 48(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1288(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1308(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - adcl 1312(%esp), %esi - movl %esi, %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1324(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1192(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1256(%esp), %eax - movl 48(%esp), %ecx # 4-byte Reload - addl 1192(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 1196(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1200(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - adcl 1204(%esp), %esi - movl 88(%esp), %edx # 4-byte Reload - adcl 1208(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 1212(%esp), %edi - movl 92(%esp), %edx # 4-byte Reload - adcl 1216(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1224(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1228(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1232(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - adcl 1236(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1240(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 1244(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 1248(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 1252(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 48(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1120(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 1120(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 1132(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 1140(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1144(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1176(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1048(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 1112(%esp), %edx - movl 52(%esp), %ecx # 4-byte Reload - addl 1048(%esp), %ecx - movl 56(%esp), %esi # 4-byte Reload - adcl 1052(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1068(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1072(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1076(%esp), %ebp - movl 108(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1100(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 52(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 976(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 976(%esp), %edi - adcl 980(%esp), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1000(%esp), %edi - adcl 1004(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1008(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 1016(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 904(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 968(%esp), %ecx - movl 56(%esp), %eax # 4-byte Reload - addl 904(%esp), %eax - movl 72(%esp), %edx # 4-byte Reload - adcl 908(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 912(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 916(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 920(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - adcl 924(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 928(%esp), %edi - adcl 932(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - adcl 940(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 944(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 960(%esp), %ebp - movl 52(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl %eax, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 832(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 832(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 856(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 872(%esp), %esi - movl 68(%esp), %edi # 4-byte Reload - adcl 876(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 888(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 892(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 760(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 824(%esp), %edx - movl 72(%esp), %ecx # 4-byte Reload - addl 760(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 796(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 800(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 808(%esp), %edi - movl 48(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - adcl 816(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 688(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 688(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 732(%esp), %ebp - adcl 736(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 616(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 680(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - addl 616(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 624(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 640(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 656(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 672(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 544(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 544(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 552(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 560(%esp), %edi - movl 108(%esp), %esi # 4-byte Reload - adcl 564(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 600(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 472(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 536(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 472(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 484(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - adcl 488(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %ebp # 4-byte Reload - adcl 496(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 400(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 400(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 412(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 420(%esp), %edi - adcl 424(%esp), %ebp - movl %ebp, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %ebp # 4-byte Reload - adcl 444(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 328(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 392(%esp), %edx - movl 92(%esp), %ecx # 4-byte Reload - addl 328(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 336(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 352(%esp), %esi - movl 68(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 368(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 92(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 44(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 256(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 256(%esp), %ebp - movl 104(%esp), %edi # 4-byte Reload - adcl 260(%esp), %edi - movl 100(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 268(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 280(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 284(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2440(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 184(%esp), %ecx - movl 2436(%esp), %edx - calll .LmulPv512x32 - movl 248(%esp), %edx - movl %edi, %ecx - addl 184(%esp), %ecx - movl 100(%esp), %edi # 4-byte Reload - adcl 188(%esp), %edi - adcl 192(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 84(%esp), %ebp # 4-byte Reload - adcl 196(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 204(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 208(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 112(%esp), %ecx - movl 2444(%esp), %edx - calll .LmulPv512x32 - addl 112(%esp), %esi - movl %edi, %eax - adcl 116(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 120(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl 124(%esp), %ebp - movl 76(%esp), %ecx # 4-byte Reload - adcl 128(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl %ecx, %ebx - movl 80(%esp), %ecx # 4-byte Reload - adcl 132(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 136(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 140(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 144(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 48(%esp), %ecx # 4-byte Reload - adcl 148(%esp), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 152(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 156(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 160(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 168(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 176(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl %eax, %edx - movl 2444(%esp), %esi - subl (%esi), %edx - sbbl 4(%esi), %edi - movl %ebp, %ecx - sbbl 8(%esi), %ecx - movl %ebx, %eax - sbbl 12(%esi), %eax - movl 80(%esp), %ebx # 4-byte Reload - sbbl 16(%esi), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 68(%esp), %ebx # 4-byte Reload - sbbl 20(%esi), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 64(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 60(%esp), %ebx # 4-byte Reload - sbbl 28(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 48(%esp), %ebx # 4-byte Reload - sbbl 32(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %ebx # 4-byte Reload - sbbl 36(%esi), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %ebx # 4-byte Reload - sbbl 40(%esi), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl 72(%esp), %ebx # 4-byte Reload - sbbl 44(%esi), %ebx - movl %ebx, 32(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 48(%esi), %ebx - movl %ebx, 36(%esp) # 4-byte Spill - movl 96(%esp), %ebx # 4-byte Reload - sbbl 52(%esi), %ebx - movl %ebx, 40(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 56(%esi), %ebx - movl %ebx, 44(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - sbbl 60(%esi), %ebx - movl %ebx, 84(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - testl %ebx, %ebx - js .LBB243_2 -# BB#1: - movl %edx, %esi -.LBB243_2: - movl 2432(%esp), %edx - movl %esi, (%edx) - movl 108(%esp), %esi # 4-byte Reload - js .LBB243_4 -# BB#3: - movl %edi, %esi -.LBB243_4: - movl %esi, 4(%edx) - js .LBB243_6 -# BB#5: - movl %ecx, %ebp -.LBB243_6: - movl %ebp, 8(%edx) - movl 76(%esp), %ecx # 4-byte Reload - js .LBB243_8 -# BB#7: - movl %eax, %ecx -.LBB243_8: - movl %ecx, 12(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB243_10 -# BB#9: - movl 4(%esp), %eax # 4-byte Reload -.LBB243_10: - movl %eax, 16(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB243_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB243_12: - movl %eax, 20(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB243_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB243_14: - movl %eax, 24(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB243_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB243_16: - movl %eax, 28(%edx) - movl 48(%esp), %eax # 4-byte Reload - js .LBB243_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB243_18: - movl %eax, 32(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB243_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB243_20: - movl %eax, 36(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB243_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB243_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB243_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB243_24: - movl %eax, 44(%edx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB243_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB243_26: - movl %eax, 48(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB243_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB243_28: - movl %eax, 52(%edx) - movl 92(%esp), %eax # 4-byte Reload - js .LBB243_30 -# BB#29: - movl 44(%esp), %eax # 4-byte Reload -.LBB243_30: - movl %eax, 56(%edx) - movl 104(%esp), %eax # 4-byte Reload - js .LBB243_32 -# BB#31: - movl 84(%esp), %eax # 4-byte Reload -.LBB243_32: - movl %eax, 60(%edx) - addl $2412, %esp # imm = 0x96C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end243: - .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L - - .globl mcl_fp_montRed16L - .align 16, 0x90 - .type mcl_fp_montRed16L,@function -mcl_fp_montRed16L: # @mcl_fp_montRed16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L244$pb -.L244$pb: - popl %eax -.Ltmp55: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1384(%esp), %edx - movl -4(%edx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1380(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 112(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 120(%esp) # 4-byte Spill - imull %eax, %ebx - movl 124(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 120(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 108(%ecx), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 152(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 84(%ecx), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 80(%ecx), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 76(%ecx), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 68(%ecx), %edi - movl %edi, 204(%esp) # 4-byte Spill - movl 64(%ecx), %esi - movl %esi, 200(%esp) # 4-byte Spill - movl 60(%ecx), %edi - movl %edi, 180(%esp) # 4-byte Spill - movl 56(%ecx), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 40(%ecx), %ebp - movl 36(%ecx), %edi - movl 32(%ecx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 20(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 16(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 12(%ecx), %esi - movl 8(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 60(%edx), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1288(%esp), %ecx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - movl 112(%esp), %eax # 4-byte Reload - addl 1288(%esp), %eax - movl 120(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1300(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1324(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - adcl 1328(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl $0, 204(%esp) # 4-byte Folded Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - movl 196(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - sbbl %eax, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - movl 112(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 1216(%esp), %esi - movl 76(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl 80(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %esi # 4-byte Reload - adcl 1260(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 1264(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 196(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 156(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 132(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1144(%esp), %ebp - movl 80(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 1184(%esp), %esi - movl %esi, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1188(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - movl 168(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1072(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl $0, 148(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 168(%esp) # 4-byte Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 124(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 1000(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl 92(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - movl 188(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - movl 172(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 124(%esp) # 4-byte Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 928(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 932(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - adcl $0, %ebp - movl %ebp, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 176(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 172(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 144(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - movl 100(%esp), %ebp # 4-byte Reload - imull %ebp, %eax - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 856(%esp), %edi - movl 96(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 176(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 144(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull %ebp, %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 784(%esp), %esi - movl 104(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 196(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - adcl $0, %edi - movl %edi, 176(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 156(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 712(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl 752(%esp), %ebp - movl %ebp, 192(%esp) # 4-byte Spill - movl 196(%esp), %edi # 4-byte Reload - adcl 756(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - movl %ecx, %esi - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 640(%esp), %esi - movl 120(%esp), %ecx # 4-byte Reload - adcl 644(%esp), %ecx - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %esi # 4-byte Reload - adcl 668(%esp), %esi - movl 204(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl 680(%esp), %edi - movl %edi, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1384(%esp), %eax - movl %eax, %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 568(%esp), %ebp - movl 140(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 136(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 180(%esp), %ebp # 4-byte Reload - adcl 588(%esp), %ebp - adcl 592(%esp), %esi - movl %esi, 200(%esp) # 4-byte Spill - movl 204(%esp), %esi # 4-byte Reload - adcl 596(%esp), %esi - movl 192(%esp), %eax # 4-byte Reload - adcl 600(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 632(%esp), %edi - movl %edi, 152(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 496(%esp), %edi - movl 136(%esp), %ecx # 4-byte Reload - adcl 500(%esp), %ecx - movl 160(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 164(%esp), %edi # 4-byte Reload - adcl 508(%esp), %edi - adcl 512(%esp), %ebp - movl %ebp, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl 520(%esp), %esi - movl %esi, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 528(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 532(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 424(%esp), %esi - movl 160(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - adcl 432(%esp), %edi - movl %edi, 164(%esp) # 4-byte Spill - movl 180(%esp), %ecx # 4-byte Reload - adcl 436(%esp), %ecx - movl %ecx, 180(%esp) # 4-byte Spill - movl 200(%esp), %ecx # 4-byte Reload - adcl 440(%esp), %ecx - movl %ecx, 200(%esp) # 4-byte Spill - movl 204(%esp), %ecx # 4-byte Reload - adcl 444(%esp), %ecx - movl %ecx, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 448(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %ecx # 4-byte Reload - adcl 452(%esp), %ecx - movl %ecx, 196(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl 184(%esp), %ecx # 4-byte Reload - adcl 460(%esp), %ecx - movl %ecx, 184(%esp) # 4-byte Spill - movl 188(%esp), %ecx # 4-byte Reload - adcl 464(%esp), %ecx - movl %ecx, 188(%esp) # 4-byte Spill - movl 168(%esp), %ecx # 4-byte Reload - adcl 468(%esp), %ecx - movl %ecx, 168(%esp) # 4-byte Spill - movl 176(%esp), %ecx # 4-byte Reload - adcl 472(%esp), %ecx - movl %ecx, 176(%esp) # 4-byte Spill - movl 172(%esp), %ecx # 4-byte Reload - adcl 476(%esp), %ecx - movl %ecx, 172(%esp) # 4-byte Spill - movl 152(%esp), %ecx # 4-byte Reload - adcl 480(%esp), %ecx - movl %ecx, 152(%esp) # 4-byte Spill - movl 156(%esp), %ecx # 4-byte Reload - adcl 484(%esp), %ecx - movl %ecx, 156(%esp) # 4-byte Spill - movl 144(%esp), %ecx # 4-byte Reload - adcl 488(%esp), %ecx - movl %ecx, 144(%esp) # 4-byte Spill - movl 132(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %eax, %esi - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 352(%esp), %esi - movl 164(%esp), %esi # 4-byte Reload - adcl 356(%esp), %esi - movl 180(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 364(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 380(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl 416(%esp), %edi - movl %edi, 132(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 112(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 100(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 280(%esp), %esi - movl 180(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl 200(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 304(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 308(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - adcl 316(%esp), %esi - movl 176(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 344(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 124(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 112(%esp) # 4-byte Folded Spill - movl 100(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1384(%esp), %edx - movl 116(%esp), %ebx # 4-byte Reload - calll .LmulPv512x32 - addl 208(%esp), %ebp - movl 200(%esp), %edx # 4-byte Reload - adcl 212(%esp), %edx - movl %edx, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 148(%esp), %ebp # 4-byte Reload - adcl 228(%esp), %ebp - movl %ebp, 148(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl %eax, %ebx - movl 188(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl 240(%esp), %esi - movl %esi, 168(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 272(%esp), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl $0, %edi - movl %edx, %eax - subl 24(%esp), %edx # 4-byte Folded Reload - movl 204(%esp), %esi # 4-byte Reload - sbbl 12(%esp), %esi # 4-byte Folded Reload - sbbl 16(%esp), %ecx # 4-byte Folded Reload - movl 196(%esp), %eax # 4-byte Reload - sbbl 20(%esp), %eax # 4-byte Folded Reload - sbbl 28(%esp), %ebp # 4-byte Folded Reload - sbbl 32(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 100(%esp) # 4-byte Spill - movl 188(%esp), %ebx # 4-byte Reload - sbbl 36(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 104(%esp) # 4-byte Spill - movl 168(%esp), %ebx # 4-byte Reload - sbbl 40(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 108(%esp) # 4-byte Spill - movl 176(%esp), %ebx # 4-byte Reload - sbbl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 112(%esp) # 4-byte Spill - movl 172(%esp), %ebx # 4-byte Reload - sbbl 48(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 116(%esp) # 4-byte Spill - movl 152(%esp), %ebx # 4-byte Reload - sbbl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 120(%esp) # 4-byte Spill - movl 156(%esp), %ebx # 4-byte Reload - sbbl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 136(%esp) # 4-byte Spill - movl 144(%esp), %ebx # 4-byte Reload - sbbl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 140(%esp) # 4-byte Spill - movl 132(%esp), %ebx # 4-byte Reload - sbbl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 160(%esp) # 4-byte Spill - movl 128(%esp), %ebx # 4-byte Reload - sbbl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 164(%esp) # 4-byte Spill - movl 124(%esp), %ebx # 4-byte Reload - sbbl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 180(%esp) # 4-byte Spill - sbbl $0, %edi - andl $1, %edi - movl %edi, %ebx - jne .LBB244_2 -# BB#1: - movl %edx, 200(%esp) # 4-byte Spill -.LBB244_2: - movl 1376(%esp), %edx - movl 200(%esp), %edi # 4-byte Reload - movl %edi, (%edx) - testb %bl, %bl - jne .LBB244_4 -# BB#3: - movl %esi, 204(%esp) # 4-byte Spill -.LBB244_4: - movl 204(%esp), %esi # 4-byte Reload - movl %esi, 4(%edx) - movl 192(%esp), %esi # 4-byte Reload - jne .LBB244_6 -# BB#5: - movl %ecx, %esi -.LBB244_6: - movl %esi, 8(%edx) - movl 196(%esp), %ecx # 4-byte Reload - jne .LBB244_8 -# BB#7: - movl %eax, %ecx -.LBB244_8: - movl %ecx, 12(%edx) - movl 128(%esp), %esi # 4-byte Reload - movl 148(%esp), %eax # 4-byte Reload - jne .LBB244_10 -# BB#9: - movl %ebp, %eax -.LBB244_10: - movl %eax, 16(%edx) - movl 124(%esp), %ecx # 4-byte Reload - movl 176(%esp), %eax # 4-byte Reload - movl 184(%esp), %ebp # 4-byte Reload - jne .LBB244_12 -# BB#11: - movl 100(%esp), %ebp # 4-byte Reload -.LBB244_12: - movl %ebp, 20(%edx) - movl 152(%esp), %ebp # 4-byte Reload - movl 188(%esp), %ebx # 4-byte Reload - jne .LBB244_14 -# BB#13: - movl 104(%esp), %ebx # 4-byte Reload -.LBB244_14: - movl %ebx, 24(%edx) - movl 156(%esp), %ebx # 4-byte Reload - movl 168(%esp), %edi # 4-byte Reload - jne .LBB244_16 -# BB#15: - movl 108(%esp), %edi # 4-byte Reload -.LBB244_16: - movl %edi, 28(%edx) - movl 144(%esp), %edi # 4-byte Reload - jne .LBB244_18 -# BB#17: - movl 112(%esp), %eax # 4-byte Reload -.LBB244_18: - movl %eax, 32(%edx) - jne .LBB244_20 -# BB#19: - movl 116(%esp), %eax # 4-byte Reload - movl %eax, 172(%esp) # 4-byte Spill -.LBB244_20: - movl 172(%esp), %eax # 4-byte Reload - movl %eax, 36(%edx) - jne .LBB244_22 -# BB#21: - movl 120(%esp), %ebp # 4-byte Reload -.LBB244_22: - movl %ebp, 40(%edx) - movl 132(%esp), %eax # 4-byte Reload - jne .LBB244_24 -# BB#23: - movl 136(%esp), %ebx # 4-byte Reload -.LBB244_24: - movl %ebx, 44(%edx) - jne .LBB244_26 -# BB#25: - movl 140(%esp), %edi # 4-byte Reload -.LBB244_26: - movl %edi, 48(%edx) - jne .LBB244_28 -# BB#27: - movl 160(%esp), %eax # 4-byte Reload -.LBB244_28: - movl %eax, 52(%edx) - jne .LBB244_30 -# BB#29: - movl 164(%esp), %esi # 4-byte Reload -.LBB244_30: - movl %esi, 56(%edx) - jne .LBB244_32 -# BB#31: - movl 180(%esp), %ecx # 4-byte Reload -.LBB244_32: - movl %ecx, 60(%edx) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end244: - .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L - - .globl mcl_fp_addPre16L - .align 16, 0x90 - .type mcl_fp_addPre16L,@function -mcl_fp_addPre16L: # @mcl_fp_addPre16L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl 56(%eax), %ebx - movl %esi, 48(%edi) - movl 56(%ecx), %esi - adcl %ebx, %esi - movl %edx, 52(%edi) - movl %esi, 56(%edi) - movl 60(%eax), %eax - movl 60(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 60(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end245: - .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L - - .globl mcl_fp_subPre16L - .align 16, 0x90 - .type mcl_fp_subPre16L,@function -mcl_fp_subPre16L: # @mcl_fp_subPre16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl 56(%edx), %ebp - movl %edi, 48(%ebx) - movl 56(%ecx), %edi - sbbl %ebp, %edi - movl %esi, 52(%ebx) - movl %edi, 56(%ebx) - movl 60(%edx), %edx - movl 60(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 60(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end246: - .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L - - .globl mcl_fp_shr1_16L - .align 16, 0x90 - .type mcl_fp_shr1_16L,@function -mcl_fp_shr1_16L: # @mcl_fp_shr1_16L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 52(%ecx) - movl 60(%eax), %eax - shrdl $1, %eax, %edx - movl %edx, 56(%ecx) - shrl %eax - movl %eax, 60(%ecx) - popl %esi - retl -.Lfunc_end247: - .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L - - .globl mcl_fp_add16L - .align 16, 0x90 - .type mcl_fp_add16L,@function -mcl_fp_add16L: # @mcl_fp_add16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $56, %esp - movl 84(%esp), %edx - movl (%edx), %esi - movl 4(%edx), %ebp - movl 80(%esp), %ecx - addl (%ecx), %esi - movl %esi, %ebx - adcl 4(%ecx), %ebp - movl 8(%edx), %eax - adcl 8(%ecx), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 12(%ecx), %esi - movl 16(%ecx), %edi - adcl 12(%edx), %esi - movl %esi, 48(%esp) # 4-byte Spill - adcl 16(%edx), %edi - movl %edi, 12(%esp) # 4-byte Spill - movl 20(%ecx), %eax - adcl 20(%edx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%ecx), %eax - adcl 24(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%ecx), %eax - adcl 28(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%ecx), %eax - adcl 32(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%ecx), %eax - adcl 36(%edx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 40(%ecx), %eax - adcl 40(%edx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%ecx), %eax - adcl 44(%edx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 48(%ecx), %eax - adcl 48(%edx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%ecx), %eax - adcl 52(%edx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 56(%ecx), %esi - adcl 56(%edx), %esi - movl 60(%ecx), %ecx - adcl 60(%edx), %ecx - movl 76(%esp), %edx - movl %ebx, (%edx) - movl %ebx, %eax - movl %ebp, 4(%edx) - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%edx) - movl 48(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%edx) - movl %edi, 16(%edx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%edx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%edx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%edx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%edx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%edx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%edx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 48(%edx) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 52(%edx) - movl %esi, 56(%edx) - movl %ecx, 60(%edx) - sbbl %ebx, %ebx - andl $1, %ebx - movl 88(%esp), %edi - subl (%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 4(%edi), %ebp - movl %ebp, (%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 8(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, %ebp - sbbl 56(%edi), %esi - sbbl 60(%edi), %ecx - sbbl $0, %ebx - testb $1, %bl - jne .LBB248_2 -# BB#1: # %nocarry - movl 4(%esp), %edi # 4-byte Reload - movl %edi, (%edx) - movl (%esp), %edi # 4-byte Reload - movl %edi, 4(%edx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 8(%edx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 12(%edx) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 16(%edx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%edx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%edx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%edx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%edx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%edx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%edx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%edx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 48(%edx) - movl %ebp, 52(%edx) - movl %esi, 56(%edx) - movl %ecx, 60(%edx) -.LBB248_2: # %carry - addl $56, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end248: - .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L - - .globl mcl_fp_addNF16L - .align 16, 0x90 - .type mcl_fp_addNF16L,@function -mcl_fp_addNF16L: # @mcl_fp_addNF16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $124, %esp - movl 152(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %ecx - movl 148(%esp), %esi - addl (%esi), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 4(%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 60(%edx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 56(%edx), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 52(%edx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 48(%edx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 44(%edx), %edi - movl 40(%edx), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 36(%edx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 32(%edx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 28(%edx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 24(%edx), %eax - movl 20(%edx), %ebp - movl 16(%edx), %ebx - movl 12(%edx), %ecx - movl 8(%edx), %edx - adcl 8(%esi), %edx - movl %edx, 60(%esp) # 4-byte Spill - adcl 12(%esi), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 16(%esi), %ebx - movl %ebx, 68(%esp) # 4-byte Spill - adcl 20(%esi), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - adcl 24(%esi), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 44(%esi), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 48(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 52(%esi), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 56(%esi), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 60(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 156(%esp), %edi - movl 80(%esp), %esi # 4-byte Reload - subl (%edi), %esi - movl 84(%esp), %eax # 4-byte Reload - sbbl 4(%edi), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 8(%edi), %edx - movl %edx, 4(%esp) # 4-byte Spill - sbbl 12(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 16(%edi), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - sbbl 20(%edi), %ebp - movl %ebp, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - sbbl 24(%edi), %ebp - movl %ebp, 20(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 44(%edi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - movl %ecx, %ebx - sbbl 56(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - movl 112(%esp), %ebx # 4-byte Reload - sbbl 60(%edi), %ebx - movl 80(%esp), %edi # 4-byte Reload - movl %ebx, 56(%esp) # 4-byte Spill - testl %ebx, %ebx - js .LBB249_2 -# BB#1: - movl %esi, %edi -.LBB249_2: - movl 144(%esp), %ebx - movl %edi, (%ebx) - movl 84(%esp), %edx # 4-byte Reload - js .LBB249_4 -# BB#3: - movl (%esp), %edx # 4-byte Reload -.LBB249_4: - movl %edx, 4(%ebx) - movl 68(%esp), %edx # 4-byte Reload - movl 60(%esp), %eax # 4-byte Reload - js .LBB249_6 -# BB#5: - movl 4(%esp), %eax # 4-byte Reload -.LBB249_6: - movl %eax, 8(%ebx) - movl 100(%esp), %eax # 4-byte Reload - movl 88(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - js .LBB249_8 -# BB#7: - movl 8(%esp), %esi # 4-byte Reload -.LBB249_8: - movl %esi, 12(%ebx) - movl 108(%esp), %esi # 4-byte Reload - js .LBB249_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB249_10: - movl %edx, 16(%ebx) - movl 112(%esp), %edi # 4-byte Reload - movl 104(%esp), %ebp # 4-byte Reload - js .LBB249_12 -# BB#11: - movl 16(%esp), %edx # 4-byte Reload - movl %edx, 72(%esp) # 4-byte Spill -.LBB249_12: - movl 72(%esp), %edx # 4-byte Reload - movl %edx, 20(%ebx) - js .LBB249_14 -# BB#13: - movl 20(%esp), %ecx # 4-byte Reload -.LBB249_14: - movl %ecx, 24(%ebx) - js .LBB249_16 -# BB#15: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB249_16: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%ebx) - js .LBB249_18 -# BB#17: - movl 28(%esp), %eax # 4-byte Reload -.LBB249_18: - movl %eax, 32(%ebx) - movl 96(%esp), %ecx # 4-byte Reload - js .LBB249_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 120(%esp) # 4-byte Spill -.LBB249_20: - movl 120(%esp), %eax # 4-byte Reload - movl %eax, 36(%ebx) - js .LBB249_22 -# BB#21: - movl 36(%esp), %ebp # 4-byte Reload -.LBB249_22: - movl %ebp, 40(%ebx) - movl 76(%esp), %eax # 4-byte Reload - js .LBB249_24 -# BB#23: - movl 40(%esp), %eax # 4-byte Reload -.LBB249_24: - movl %eax, 44(%ebx) - movl 92(%esp), %eax # 4-byte Reload - js .LBB249_26 -# BB#25: - movl 44(%esp), %esi # 4-byte Reload -.LBB249_26: - movl %esi, 48(%ebx) - js .LBB249_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB249_28: - movl %eax, 52(%ebx) - js .LBB249_30 -# BB#29: - movl 52(%esp), %ecx # 4-byte Reload -.LBB249_30: - movl %ecx, 56(%ebx) - js .LBB249_32 -# BB#31: - movl 56(%esp), %edi # 4-byte Reload -.LBB249_32: - movl %edi, 60(%ebx) - addl $124, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end249: - .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L - - .globl mcl_fp_sub16L - .align 16, 0x90 - .type mcl_fp_sub16L,@function -mcl_fp_sub16L: # @mcl_fp_sub16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 84(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 88(%esp), %edi - subl (%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 44(%esi), %edx - sbbl 44(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 48(%esi), %ecx - sbbl 48(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 52(%esi), %eax - sbbl 52(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 56(%esi), %ebp - sbbl 56(%edi), %ebp - movl 60(%esi), %esi - sbbl 60(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 80(%esp), %ebx - movl 52(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 56(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 40(%ebx) - movl %edx, 44(%ebx) - movl %ecx, 48(%ebx) - movl %eax, 52(%ebx) - movl %ebp, 56(%ebx) - movl %esi, 60(%ebx) - je .LBB250_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 92(%esp), %esi - movl 52(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 44(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 48(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 28(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 12(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl 52(%esi), %eax - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%ebx) - movl %eax, 52(%ebx) - movl 56(%esi), %eax - adcl %ebp, %eax - movl %eax, 56(%ebx) - movl 60(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ebx) -.LBB250_2: # %nocarry - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end250: - .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L - - .globl mcl_fp_subNF16L - .align 16, 0x90 - .type mcl_fp_subNF16L,@function -mcl_fp_subNF16L: # @mcl_fp_subNF16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $104, %esp - movl 128(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 132(%esp), %edi - subl (%edi), %esi - movl %esi, 64(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 60(%ecx), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 36(%ecx), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 44(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 48(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 52(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 36(%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 56(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 60(%edi), %eax - movl %eax, 80(%esp) # 4-byte Spill - sarl $31, %eax - movl 136(%esp), %esi - movl 60(%esi), %ecx - andl %eax, %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 56(%esi), %ecx - andl %eax, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 52(%esi), %ecx - andl %eax, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 48(%esi), %ecx - andl %eax, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 44(%esi), %ecx - andl %eax, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 40(%esi), %ecx - andl %eax, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 36(%esi), %ecx - andl %eax, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 32(%esi), %ecx - andl %eax, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 28(%esi), %ecx - andl %eax, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 24(%esi), %ecx - andl %eax, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 20(%esi), %ebp - andl %eax, %ebp - movl 16(%esi), %ebx - andl %eax, %ebx - movl 12(%esi), %edi - andl %eax, %edi - movl 8(%esi), %edx - andl %eax, %edx - movl 4(%esi), %ecx - andl %eax, %ecx - andl (%esi), %eax - addl 64(%esp), %eax # 4-byte Folded Reload - adcl 68(%esp), %ecx # 4-byte Folded Reload - movl 124(%esp), %esi - movl %eax, (%esi) - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %ecx, 4(%esi) - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edx, 8(%esi) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %edi, 12(%esi) - adcl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebx, 16(%esi) - movl (%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %ebp, 20(%esi) - movl 4(%esp), %ecx # 4-byte Reload - adcl 56(%esp), %ecx # 4-byte Folded Reload - movl %eax, 24(%esi) - movl 8(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %ecx, 28(%esi) - movl 12(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %eax, 32(%esi) - movl 16(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %ecx, 36(%esi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 84(%esp), %ecx # 4-byte Folded Reload - movl %eax, 40(%esi) - movl 24(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %ecx, 44(%esi) - movl 28(%esp), %ecx # 4-byte Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %eax, 48(%esi) - movl 36(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %ecx, 52(%esi) - movl %eax, 56(%esi) - movl 60(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esi) - addl $104, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end251: - .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L - - .globl mcl_fpDbl_add16L - .align 16, 0x90 - .type mcl_fpDbl_add16L,@function -mcl_fpDbl_add16L: # @mcl_fpDbl_add16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 144(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 140(%esp), %ebx - addl (%ebx), %esi - adcl 4(%ebx), %edx - movl 8(%ecx), %edi - adcl 8(%ebx), %edi - movl 12(%ebx), %ebp - movl 136(%esp), %eax - movl %esi, (%eax) - movl 16(%ebx), %esi - adcl 12(%ecx), %ebp - adcl 16(%ecx), %esi - movl %edx, 4(%eax) - movl 72(%ecx), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl %edi, 8(%eax) - movl 20(%ecx), %edx - movl %ebp, 12(%eax) - movl 20(%ebx), %edi - adcl %edx, %edi - movl 24(%ecx), %edx - movl %esi, 16(%eax) - movl 24(%ebx), %esi - adcl %edx, %esi - movl 28(%ecx), %edx - movl %edi, 20(%eax) - movl 28(%ebx), %edi - adcl %edx, %edi - movl 32(%ecx), %edx - movl %esi, 24(%eax) - movl 32(%ebx), %esi - adcl %edx, %esi - movl 36(%ecx), %edx - movl %edi, 28(%eax) - movl 36(%ebx), %edi - adcl %edx, %edi - movl 40(%ecx), %edx - movl %esi, 32(%eax) - movl 40(%ebx), %esi - adcl %edx, %esi - movl 44(%ecx), %edx - movl %edi, 36(%eax) - movl 44(%ebx), %edi - adcl %edx, %edi - movl 48(%ecx), %edx - movl %esi, 40(%eax) - movl 48(%ebx), %esi - adcl %edx, %esi - movl 52(%ecx), %edx - movl %edi, 44(%eax) - movl 52(%ebx), %edi - adcl %edx, %edi - movl 56(%ecx), %edx - movl %esi, 48(%eax) - movl 56(%ebx), %esi - adcl %edx, %esi - movl 60(%ecx), %edx - movl %edi, 52(%eax) - movl 60(%ebx), %ebp - adcl %edx, %ebp - movl 64(%ecx), %edx - movl %esi, 56(%eax) - movl 64(%ebx), %esi - adcl %edx, %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 68(%ecx), %edx - movl %ebp, 60(%eax) - movl 68(%ebx), %eax - adcl %edx, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 72(%ebx), %eax - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 76(%ecx), %ebp - movl 76(%ebx), %eax - adcl %ebp, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%ecx), %ebp - movl 80(%ebx), %eax - adcl %ebp, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 84(%ecx), %ebp - movl 84(%ebx), %eax - adcl %ebp, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 88(%ecx), %ebp - movl 88(%ebx), %eax - adcl %ebp, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 92(%ecx), %ebp - movl 92(%ebx), %eax - adcl %ebp, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 96(%ecx), %ebp - movl 96(%ebx), %eax - adcl %ebp, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 100(%ecx), %ebp - movl 100(%ebx), %edx - adcl %ebp, %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 104(%ecx), %ebp - movl 104(%ebx), %edx - adcl %ebp, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%ecx), %ebp - movl 108(%ebx), %edx - adcl %ebp, %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 112(%ecx), %edx - movl 112(%ebx), %ebp - adcl %edx, %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 116(%ecx), %edx - movl 116(%ebx), %esi - adcl %edx, %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 120(%ecx), %edx - movl 120(%ebx), %edi - adcl %edx, %edi - movl 124(%ecx), %ecx - movl 124(%ebx), %esi - adcl %ecx, %esi - sbbl %ecx, %ecx - andl $1, %ecx - movl 148(%esp), %edx - movl 72(%esp), %ebx # 4-byte Reload - subl (%edx), %ebx - movl %ebx, (%esp) # 4-byte Spill - movl 76(%esp), %ebx # 4-byte Reload - sbbl 4(%edx), %ebx - movl %ebx, 4(%esp) # 4-byte Spill - movl 80(%esp), %ebx # 4-byte Reload - sbbl 8(%edx), %ebx - movl %ebx, 8(%esp) # 4-byte Spill - movl 84(%esp), %ebx # 4-byte Reload - sbbl 12(%edx), %ebx - movl %ebx, 12(%esp) # 4-byte Spill - movl 104(%esp), %ebx # 4-byte Reload - sbbl 16(%edx), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 88(%esp), %ebx # 4-byte Reload - sbbl 20(%edx), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 108(%esp), %ebx # 4-byte Reload - sbbl 24(%edx), %ebx - movl %ebx, 24(%esp) # 4-byte Spill - movl 92(%esp), %ebx # 4-byte Reload - sbbl 28(%edx), %ebx - movl %ebx, 28(%esp) # 4-byte Spill - movl %eax, %ebx - sbbl 32(%edx), %ebx - movl 112(%esp), %eax # 4-byte Reload - sbbl 36(%edx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 40(%edx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 44(%edx), %eax - movl %eax, 40(%esp) # 4-byte Spill - sbbl 48(%edx), %ebp - movl %ebp, 44(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - movl %eax, %ebp - sbbl 52(%edx), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl %edi, %ebp - sbbl 56(%edx), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl %esi, %ebp - sbbl 60(%edx), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - sbbl $0, %ecx - andl $1, %ecx - jne .LBB252_2 -# BB#1: - movl %ebx, 64(%esp) # 4-byte Spill -.LBB252_2: - testb %cl, %cl - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB252_4 -# BB#3: - movl (%esp), %ecx # 4-byte Reload -.LBB252_4: - movl 136(%esp), %ebx - movl %ecx, 64(%ebx) - movl %esi, %ebp - movl %edi, 72(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - movl 92(%esp), %ecx # 4-byte Reload - movl 88(%esp), %edx # 4-byte Reload - movl 76(%esp), %esi # 4-byte Reload - jne .LBB252_6 -# BB#5: - movl 4(%esp), %esi # 4-byte Reload -.LBB252_6: - movl %esi, 68(%ebx) - movl 84(%esp), %esi # 4-byte Reload - movl 80(%esp), %eax # 4-byte Reload - jne .LBB252_8 -# BB#7: - movl 8(%esp), %eax # 4-byte Reload -.LBB252_8: - movl %eax, 72(%ebx) - movl 60(%esp), %eax # 4-byte Reload - jne .LBB252_10 -# BB#9: - movl 12(%esp), %esi # 4-byte Reload -.LBB252_10: - movl %esi, 76(%ebx) - jne .LBB252_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload - movl %esi, 104(%esp) # 4-byte Spill -.LBB252_12: - movl 104(%esp), %esi # 4-byte Reload - movl %esi, 80(%ebx) - jne .LBB252_14 -# BB#13: - movl 20(%esp), %edx # 4-byte Reload -.LBB252_14: - movl %edx, 84(%ebx) - jne .LBB252_16 -# BB#15: - movl 24(%esp), %edx # 4-byte Reload - movl %edx, 108(%esp) # 4-byte Spill -.LBB252_16: - movl 108(%esp), %edx # 4-byte Reload - movl %edx, 88(%ebx) - jne .LBB252_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload -.LBB252_18: - movl %ecx, 92(%ebx) - movl 64(%esp), %ecx # 4-byte Reload - movl %ecx, 96(%ebx) - jne .LBB252_20 -# BB#19: - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 112(%esp) # 4-byte Spill -.LBB252_20: - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%ebx) - jne .LBB252_22 -# BB#21: - movl 36(%esp), %edi # 4-byte Reload -.LBB252_22: - movl %edi, 104(%ebx) - movl 100(%esp), %ecx # 4-byte Reload - jne .LBB252_24 -# BB#23: - movl 40(%esp), %ecx # 4-byte Reload -.LBB252_24: - movl %ecx, 108(%ebx) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB252_26 -# BB#25: - movl 44(%esp), %eax # 4-byte Reload -.LBB252_26: - movl %eax, 112(%ebx) - movl 68(%esp), %eax # 4-byte Reload - jne .LBB252_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB252_28: - movl %eax, 116(%ebx) - jne .LBB252_30 -# BB#29: - movl 52(%esp), %ecx # 4-byte Reload -.LBB252_30: - movl %ecx, 120(%ebx) - jne .LBB252_32 -# BB#31: - movl 56(%esp), %ebp # 4-byte Reload -.LBB252_32: - movl %ebp, 124(%ebx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end252: - .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L - - .globl mcl_fpDbl_sub16L - .align 16, 0x90 - .type mcl_fpDbl_sub16L,@function -mcl_fpDbl_sub16L: # @mcl_fpDbl_sub16L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $108, %esp - movl 132(%esp), %eax - movl (%eax), %esi - movl 4(%eax), %edi - movl 136(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%eax), %ebx - sbbl 8(%edx), %ebx - movl 128(%esp), %ecx - movl %esi, (%ecx) - movl 12(%eax), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ecx) - movl 16(%eax), %edi - sbbl 16(%edx), %edi - movl %ebx, 8(%ecx) - movl 20(%edx), %ebx - movl %esi, 12(%ecx) - movl 20(%eax), %esi - sbbl %ebx, %esi - movl 24(%edx), %ebx - movl %edi, 16(%ecx) - movl 24(%eax), %edi - sbbl %ebx, %edi - movl 28(%edx), %ebx - movl %esi, 20(%ecx) - movl 28(%eax), %esi - sbbl %ebx, %esi - movl 32(%edx), %ebx - movl %edi, 24(%ecx) - movl 32(%eax), %edi - sbbl %ebx, %edi - movl 36(%edx), %ebx - movl %esi, 28(%ecx) - movl 36(%eax), %esi - sbbl %ebx, %esi - movl 40(%edx), %ebx - movl %edi, 32(%ecx) - movl 40(%eax), %edi - sbbl %ebx, %edi - movl 44(%edx), %ebx - movl %esi, 36(%ecx) - movl 44(%eax), %esi - sbbl %ebx, %esi - movl 48(%edx), %ebx - movl %edi, 40(%ecx) - movl 48(%eax), %edi - sbbl %ebx, %edi - movl 52(%edx), %ebx - movl %esi, 44(%ecx) - movl 52(%eax), %esi - sbbl %ebx, %esi - movl 56(%edx), %ebx - movl %edi, 48(%ecx) - movl 56(%eax), %edi - sbbl %ebx, %edi - movl 60(%edx), %ebx - movl %esi, 52(%ecx) - movl 60(%eax), %esi - sbbl %ebx, %esi - movl 64(%edx), %ebx - movl %edi, 56(%ecx) - movl 64(%eax), %edi - sbbl %ebx, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 68(%edx), %edi - movl %esi, 60(%ecx) - movl 68(%eax), %esi - sbbl %edi, %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 72(%edx), %esi - movl 72(%eax), %edi - sbbl %esi, %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 76(%edx), %esi - movl 76(%eax), %edi - sbbl %esi, %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 80(%edx), %esi - movl 80(%eax), %edi - sbbl %esi, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 84(%edx), %esi - movl 84(%eax), %edi - sbbl %esi, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 88(%edx), %esi - movl 88(%eax), %edi - sbbl %esi, %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 92(%edx), %esi - movl 92(%eax), %edi - sbbl %esi, %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 96(%edx), %esi - movl 96(%eax), %edi - sbbl %esi, %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 100(%edx), %esi - movl 100(%eax), %edi - sbbl %esi, %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 104(%edx), %esi - movl 104(%eax), %edi - sbbl %esi, %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 108(%edx), %esi - movl 108(%eax), %edi - sbbl %esi, %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 112(%edx), %esi - movl 112(%eax), %edi - sbbl %esi, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%edx), %esi - movl 116(%eax), %edi - sbbl %esi, %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 120(%edx), %esi - movl 120(%eax), %edi - sbbl %esi, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 124(%edx), %edx - movl 124(%eax), %eax - sbbl %edx, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 140(%esp), %ebx - jne .LBB253_1 -# BB#2: - movl $0, 68(%esp) # 4-byte Folded Spill - jmp .LBB253_3 -.LBB253_1: - movl 60(%ebx), %edx - movl %edx, 68(%esp) # 4-byte Spill -.LBB253_3: - testb %al, %al - jne .LBB253_4 -# BB#5: - movl $0, 24(%esp) # 4-byte Folded Spill - movl $0, %ebp - jmp .LBB253_6 -.LBB253_4: - movl (%ebx), %ebp - movl 4(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB253_6: - jne .LBB253_7 -# BB#8: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB253_9 -.LBB253_7: - movl 56(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill -.LBB253_9: - jne .LBB253_10 -# BB#11: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB253_12 -.LBB253_10: - movl 52(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB253_12: - jne .LBB253_13 -# BB#14: - movl $0, 28(%esp) # 4-byte Folded Spill - jmp .LBB253_15 -.LBB253_13: - movl 48(%ebx), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB253_15: - jne .LBB253_16 -# BB#17: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB253_18 -.LBB253_16: - movl 44(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB253_18: - jne .LBB253_19 -# BB#20: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB253_21 -.LBB253_19: - movl 40(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB253_21: - jne .LBB253_22 -# BB#23: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB253_24 -.LBB253_22: - movl 36(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB253_24: - jne .LBB253_25 -# BB#26: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB253_27 -.LBB253_25: - movl 32(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB253_27: - jne .LBB253_28 -# BB#29: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB253_30 -.LBB253_28: - movl 28(%ebx), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB253_30: - jne .LBB253_31 -# BB#32: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB253_33 -.LBB253_31: - movl 24(%ebx), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB253_33: - jne .LBB253_34 -# BB#35: - movl $0, %esi - jmp .LBB253_36 -.LBB253_34: - movl 20(%ebx), %esi -.LBB253_36: - jne .LBB253_37 -# BB#38: - movl $0, %edx - jmp .LBB253_39 -.LBB253_37: - movl 16(%ebx), %edx -.LBB253_39: - jne .LBB253_40 -# BB#41: - movl $0, %edi - jmp .LBB253_42 -.LBB253_40: - movl 12(%ebx), %edi -.LBB253_42: - jne .LBB253_43 -# BB#44: - xorl %ebx, %ebx - jmp .LBB253_45 -.LBB253_43: - movl 8(%ebx), %ebx -.LBB253_45: - addl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, %eax - movl 24(%esp), %ebp # 4-byte Reload - adcl 40(%esp), %ebp # 4-byte Folded Reload - movl %eax, 64(%ecx) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %ebp, 68(%ecx) - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %ebx, 72(%ecx) - adcl 56(%esp), %edx # 4-byte Folded Reload - movl %edi, 76(%ecx) - adcl 60(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%ecx) - movl (%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %esi, 84(%ecx) - movl 4(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %eax, 88(%ecx) - movl 8(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 12(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 16(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 20(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl 32(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx # 4-byte Folded Reload - movl %eax, 112(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %edx, 116(%ecx) - movl %eax, 120(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 124(%ecx) - addl $108, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end253: - .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L - - .align 16, 0x90 - .type .LmulPv544x32,@function -.LmulPv544x32: # @mulPv544x32 -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $120, %esp - movl %edx, %ebp - movl 140(%esp), %ebx - movl %ebx, %eax - mull 64(%ebp) - movl %edx, 116(%esp) # 4-byte Spill - movl %eax, 112(%esp) # 4-byte Spill - movl %ebx, %eax - mull 60(%ebp) - movl %edx, 108(%esp) # 4-byte Spill - movl %eax, 104(%esp) # 4-byte Spill - movl %ebx, %eax - mull 56(%ebp) - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, 96(%esp) # 4-byte Spill - movl %ebx, %eax - mull 52(%ebp) - movl %edx, 92(%esp) # 4-byte Spill - movl %eax, 88(%esp) # 4-byte Spill - movl %ebx, %eax - mull 48(%ebp) - movl %edx, 84(%esp) # 4-byte Spill - movl %eax, 80(%esp) # 4-byte Spill - movl %ebx, %eax - mull 44(%ebp) - movl %edx, 76(%esp) # 4-byte Spill - movl %eax, 72(%esp) # 4-byte Spill - movl %ebx, %eax - mull 40(%ebp) - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, 64(%esp) # 4-byte Spill - movl %ebx, %eax - mull 36(%ebp) - movl %edx, 60(%esp) # 4-byte Spill - movl %eax, 56(%esp) # 4-byte Spill - movl %ebx, %eax - mull 32(%ebp) - movl %edx, 52(%esp) # 4-byte Spill - movl %eax, 48(%esp) # 4-byte Spill - movl %ebx, %eax - mull 28(%ebp) - movl %edx, 44(%esp) # 4-byte Spill - movl %eax, 40(%esp) # 4-byte Spill - movl %ebx, %eax - mull 24(%ebp) - movl %edx, 36(%esp) # 4-byte Spill - movl %eax, 32(%esp) # 4-byte Spill - movl %ebx, %eax - mull 20(%ebp) - movl %edx, 28(%esp) # 4-byte Spill - movl %eax, 24(%esp) # 4-byte Spill - movl %ebx, %eax - mull 16(%ebp) - movl %edx, 20(%esp) # 4-byte Spill - movl %eax, 16(%esp) # 4-byte Spill - movl %ebx, %eax - mull 12(%ebp) - movl %edx, 12(%esp) # 4-byte Spill - movl %eax, 8(%esp) # 4-byte Spill - movl %ebx, %eax - mull 8(%ebp) - movl %edx, %edi - movl %eax, 4(%esp) # 4-byte Spill - movl %ebx, %eax - mull 4(%ebp) - movl %edx, %esi - movl %eax, (%esp) # 4-byte Spill - movl %ebx, %eax - mull (%ebp) - movl %eax, (%ecx) - addl (%esp), %edx # 4-byte Folded Reload - movl %edx, 4(%ecx) - adcl 4(%esp), %esi # 4-byte Folded Reload - movl %esi, 8(%ecx) - adcl 8(%esp), %edi # 4-byte Folded Reload - movl %edi, 12(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 16(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 20(%ecx) - movl 28(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 24(%ecx) - movl 36(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 28(%ecx) - movl 44(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 32(%ecx) - movl 52(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 36(%ecx) - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 40(%ecx) - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 44(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 48(%ecx) - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%ecx) - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%ecx) - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%ecx) - movl 108(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ecx) - movl 116(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, 68(%ecx) - movl %ecx, %eax - addl $120, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end254: - .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32 - - .globl mcl_fp_mulUnitPre17L - .align 16, 0x90 - .type mcl_fp_mulUnitPre17L,@function -mcl_fp_mulUnitPre17L: # @mcl_fp_mulUnitPre17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $140, %esp - calll .L255$pb -.L255$pb: - popl %ebx -.Ltmp56: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx - movl 168(%esp), %eax - movl %eax, (%esp) - leal 64(%esp), %ecx - movl 164(%esp), %edx - calll .LmulPv544x32 - movl 132(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 128(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 124(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 120(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 116(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 112(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 108(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 104(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 100(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 96(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 92(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 88(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 84(%esp), %ebp - movl 80(%esp), %ebx - movl 76(%esp), %edi - movl 72(%esp), %esi - movl 64(%esp), %edx - movl 68(%esp), %ecx - movl 160(%esp), %eax - movl %edx, (%eax) - movl %ecx, 4(%eax) - movl %esi, 8(%eax) - movl %edi, 12(%eax) - movl %ebx, 16(%eax) - movl %ebp, 20(%eax) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 24(%eax) - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 28(%eax) - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%eax) - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 36(%eax) - movl 32(%esp), %ecx # 4-byte Reload - movl %ecx, 40(%eax) - movl 36(%esp), %ecx # 4-byte Reload - movl %ecx, 44(%eax) - movl 40(%esp), %ecx # 4-byte Reload - movl %ecx, 48(%eax) - movl 44(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%eax) - movl 48(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%eax) - movl 52(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 56(%esp), %ecx # 4-byte Reload - movl %ecx, 64(%eax) - movl 60(%esp), %ecx # 4-byte Reload - movl %ecx, 68(%eax) - addl $140, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end255: - .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L - - .globl mcl_fpDbl_mulPre17L - .align 16, 0x90 - .type mcl_fpDbl_mulPre17L,@function -mcl_fpDbl_mulPre17L: # @mcl_fpDbl_mulPre17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L256$pb -.L256$pb: - popl %edi -.Ltmp57: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi - movl %edi, 124(%esp) # 4-byte Spill - movl 1384(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl 1380(%esp), %edx - movl %edx, %esi - movl %edi, %ebx - calll .LmulPv544x32 - movl 1348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1344(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1320(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1316(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1308(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1304(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1300(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1296(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1292(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 1288(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 1280(%esp), %eax - movl 1284(%esp), %ebp - movl 1376(%esp), %ecx - movl %eax, (%ecx) - movl 1384(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl %esi, %edx - movl %edi, %ebx - calll .LmulPv544x32 - addl 1208(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1272(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1260(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1256(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 1252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1248(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1244(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1232(%esp), %edi - movl 1228(%esp), %esi - movl 1224(%esp), %edx - movl 1220(%esp), %ecx - movl 1212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1216(%esp), %eax - movl 1376(%esp), %ebp - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - movl 12(%esp), %ebp # 4-byte Reload - adcl %ebp, 120(%esp) # 4-byte Folded Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 64(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 1136(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1196(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1192(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1164(%esp), %ebx - movl 1160(%esp), %edi - movl 1156(%esp), %esi - movl 1152(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1148(%esp), %edx - movl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1144(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1132(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1128(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1116(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1108(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1100(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1096(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1092(%esp), %ebx - movl 1088(%esp), %edi - movl 1084(%esp), %esi - movl 1080(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1076(%esp), %edx - movl 1068(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1072(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1020(%esp), %ebx - movl 1016(%esp), %edi - movl 1012(%esp), %esi - movl 1008(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1004(%esp), %edx - movl 996(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 920(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 972(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 968(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 964(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 956(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 952(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 948(%esp), %ebx - movl 944(%esp), %edi - movl 940(%esp), %esi - movl 936(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 932(%esp), %edx - movl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 928(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 848(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 916(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 912(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 908(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 904(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 900(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 892(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 888(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 876(%esp), %ebx - movl 872(%esp), %edi - movl 868(%esp), %esi - movl 864(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 860(%esp), %edx - movl 852(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 856(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 776(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 844(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 840(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 836(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 804(%esp), %ebx - movl 800(%esp), %edi - movl 796(%esp), %esi - movl 792(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 788(%esp), %edx - movl 780(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 732(%esp), %ebx - movl 728(%esp), %edi - movl 724(%esp), %esi - movl 720(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 716(%esp), %edx - movl 708(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 712(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 632(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 696(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl 636(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 640(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 588(%esp), %ebx - movl 584(%esp), %edi - movl 580(%esp), %esi - movl 576(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 488(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 516(%esp), %ebx - movl 512(%esp), %edi - movl 508(%esp), %esi - movl 504(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 500(%esp), %edx - movl 492(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 496(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 432(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 428(%esp), %edx - movl 420(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 424(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 344(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 372(%esp), %ebx - movl 368(%esp), %edi - movl 364(%esp), %esi - movl 360(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 356(%esp), %edx - movl 348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 352(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 1380(%esp), %eax - movl %eax, %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 320(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 316(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 300(%esp), %ebx - movl 296(%esp), %edi - movl 292(%esp), %edx - movl 288(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl 280(%esp), %ecx - movl 120(%esp), %esi # 4-byte Reload - movl 1376(%esp), %ebp - movl %esi, 56(%ebp) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 112(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1384(%esp), %ecx - movl %ecx, %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 200(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 232(%esp), %edi - movl 228(%esp), %esi - movl 224(%esp), %edx - movl 220(%esp), %ecx - movl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl 204(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 208(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 1376(%esp), %ebx - movl %ebp, 60(%ebx) - movl 120(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 1384(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 1380(%esp), %edx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 192(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 156(%esp), %ebx - movl 152(%esp), %edi - movl 148(%esp), %esi - movl 144(%esp), %edx - movl 140(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 64(%eax) - movl 64(%esp), %ebp # 4-byte Reload - movl %ebp, 68(%eax) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 72(%eax) - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%eax) - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %esi, 84(%eax) - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %edi, 88(%eax) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ebx, 92(%eax) - movl 32(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 68(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %ecx, 108(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 116(%esp), %ecx # 4-byte Folded Reload - movl %edx, 112(%eax) - movl 100(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 116(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %edx, 120(%eax) - movl %ecx, 124(%eax) - movl 112(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 128(%eax) - movl 124(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 132(%eax) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end256: - .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L - - .globl mcl_fpDbl_sqrPre17L - .align 16, 0x90 - .type mcl_fpDbl_sqrPre17L,@function -mcl_fpDbl_sqrPre17L: # @mcl_fpDbl_sqrPre17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1356, %esp # imm = 0x54C - calll .L257$pb -.L257$pb: - popl %ebx -.Ltmp58: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx - movl %ebx, 124(%esp) # 4-byte Spill - movl 1380(%esp), %edx - movl (%edx), %eax - movl %eax, (%esp) - leal 1280(%esp), %ecx - movl %edx, %edi - movl %ebx, %esi - calll .LmulPv544x32 - movl 1348(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1344(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1336(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1332(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1328(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1320(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1316(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1312(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1308(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1304(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1300(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 1296(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 1292(%esp), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 1288(%esp), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 1280(%esp), %eax - movl 1284(%esp), %ebp - movl 1376(%esp), %ecx - movl %eax, (%ecx) - movl %edi, %edx - movl 4(%edx), %eax - movl %eax, (%esp) - leal 1208(%esp), %ecx - movl %esi, %ebx - calll .LmulPv544x32 - addl 1208(%esp), %ebp - movl %ebp, 8(%esp) # 4-byte Spill - movl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1272(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1260(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1256(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 1252(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1248(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1244(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1232(%esp), %edi - movl 1228(%esp), %esi - movl 1224(%esp), %edx - movl 1220(%esp), %ecx - movl 1212(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1216(%esp), %eax - movl 1376(%esp), %ebp - movl 8(%esp), %ebx # 4-byte Reload - movl %ebx, 4(%ebp) - movl 12(%esp), %ebp # 4-byte Reload - adcl %ebp, 120(%esp) # 4-byte Folded Spill - adcl 16(%esp), %eax # 4-byte Folded Reload - movl %eax, 8(%esp) # 4-byte Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 12(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 32(%esp), %esi # 4-byte Folded Reload - movl %esi, 20(%esp) # 4-byte Spill - adcl 40(%esp), %edi # 4-byte Folded Reload - movl %edi, 24(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 52(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - movl 68(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - movl 76(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 64(%esp) # 4-byte Folded Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, 56(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 8(%edx), %eax - movl %eax, (%esp) - leal 1136(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 1136(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1204(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1196(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1192(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1184(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1180(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1172(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1168(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1164(%esp), %ebx - movl 1160(%esp), %edi - movl 1156(%esp), %esi - movl 1152(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1148(%esp), %edx - movl 1140(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1144(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 8(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 80(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 72(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 56(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 12(%edx), %eax - movl %eax, (%esp) - leal 1064(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1064(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 1132(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 1128(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1124(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 1116(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 1112(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 1108(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 1104(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 1100(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 1096(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 1092(%esp), %ebx - movl 1088(%esp), %edi - movl 1084(%esp), %esi - movl 1080(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 1076(%esp), %edx - movl 1068(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1072(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 12(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 16(%edx), %eax - movl %eax, (%esp) - leal 992(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 1060(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 1056(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 1052(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 1048(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 1044(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 1040(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 1036(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 1032(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 1028(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 1024(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 1020(%esp), %ebx - movl 1016(%esp), %edi - movl 1012(%esp), %esi - movl 1008(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 1004(%esp), %edx - movl 996(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 1000(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 16(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 20(%edx), %eax - movl %eax, (%esp) - leal 920(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 920(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 988(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 984(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 980(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 976(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 972(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 968(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 964(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 956(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 952(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 948(%esp), %ebx - movl 944(%esp), %edi - movl 940(%esp), %esi - movl 936(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 932(%esp), %edx - movl 924(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 928(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 20(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 24(%edx), %eax - movl %eax, (%esp) - leal 848(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 848(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 916(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 912(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 908(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 904(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 900(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 896(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 892(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 888(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 884(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 880(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 876(%esp), %ebx - movl 872(%esp), %edi - movl 868(%esp), %esi - movl 864(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 860(%esp), %edx - movl 852(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 856(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 24(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 28(%edx), %eax - movl %eax, (%esp) - leal 776(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 776(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 844(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 840(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 836(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 832(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 828(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 824(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 820(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 816(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 812(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 808(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 804(%esp), %ebx - movl 800(%esp), %edi - movl 796(%esp), %esi - movl 792(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 788(%esp), %edx - movl 780(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 784(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 28(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 32(%edx), %eax - movl %eax, (%esp) - leal 704(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 704(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 772(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 768(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 764(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 756(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 752(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 748(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 744(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 740(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 736(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 732(%esp), %ebx - movl 728(%esp), %edi - movl 724(%esp), %esi - movl 720(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 716(%esp), %edx - movl 708(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 712(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 32(%eax) - movl 52(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 32(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 24(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 40(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 36(%edx), %eax - movl %eax, (%esp) - leal 632(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 632(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 700(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 696(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 692(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 688(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 684(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 680(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 676(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 672(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 668(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 664(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 660(%esp), %ebx - movl 656(%esp), %edi - movl 652(%esp), %esi - movl 648(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 644(%esp), %edx - movl 636(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 640(%esp), %ecx - movl 1376(%esp), %eax - movl 52(%esp), %ebp # 4-byte Reload - movl %ebp, 36(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 32(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 24(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 40(%edx), %eax - movl %eax, (%esp) - leal 560(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 560(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 628(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 624(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 620(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 616(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 612(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 608(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 604(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 600(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 596(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 592(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 588(%esp), %ebx - movl 584(%esp), %edi - movl 580(%esp), %esi - movl 576(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 572(%esp), %edx - movl 564(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 568(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 40(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 12(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 44(%edx), %eax - movl %eax, (%esp) - leal 488(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 488(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 556(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 552(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 548(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 540(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 536(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 532(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 528(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 524(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 520(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 516(%esp), %ebx - movl 512(%esp), %edi - movl 508(%esp), %esi - movl 504(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 500(%esp), %edx - movl 492(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 496(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 44(%eax) - movl 8(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 20(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 48(%edx), %eax - movl %eax, (%esp) - leal 416(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 416(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 484(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 480(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 476(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 472(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 468(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 464(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 460(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 456(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 452(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 448(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 444(%esp), %ebx - movl 440(%esp), %edi - movl 436(%esp), %esi - movl 432(%esp), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 428(%esp), %edx - movl 420(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 424(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 48(%eax) - movl 48(%esp), %ebp # 4-byte Reload - adcl 4(%esp), %ebp # 4-byte Folded Reload - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 52(%edx), %eax - movl %eax, (%esp) - leal 344(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 344(%esp), %ebp - movl %ebp, 48(%esp) # 4-byte Spill - movl 412(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 408(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 404(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 396(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 392(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 388(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 384(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 380(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 372(%esp), %ebx - movl 368(%esp), %edi - movl 364(%esp), %esi - movl 360(%esp), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 356(%esp), %edx - movl 348(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 352(%esp), %ecx - movl 1376(%esp), %eax - movl 48(%esp), %ebp # 4-byte Reload - movl %ebp, 52(%eax) - movl 4(%esp), %eax # 4-byte Reload - adcl %eax, 120(%esp) # 4-byte Folded Spill - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 4(%esp) # 4-byte Spill - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %edx, 8(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 28(%esp) # 4-byte Folded Spill - adcl 16(%esp), %esi # 4-byte Folded Reload - movl %esi, 12(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 16(%esp) # 4-byte Spill - adcl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 20(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %eax, 52(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 76(%esp), %eax # 4-byte Folded Reload - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 40(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 32(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, 36(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 56(%edx), %eax - movl %eax, (%esp) - leal 272(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 272(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 340(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 332(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 328(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 324(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 320(%esp), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 316(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 312(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 308(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 304(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 300(%esp), %ebx - movl 296(%esp), %edi - movl 292(%esp), %edx - movl 288(%esp), %esi - movl %esi, 24(%esp) # 4-byte Spill - movl 284(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 276(%esp), %eax - movl 280(%esp), %ecx - movl 120(%esp), %esi # 4-byte Reload - movl 1376(%esp), %ebp - movl %esi, 56(%ebp) - adcl 4(%esp), %eax # 4-byte Folded Reload - movl %eax, %ebp - adcl 8(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 8(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 76(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - adcl %eax, 24(%esp) # 4-byte Folded Spill - adcl 16(%esp), %edx # 4-byte Folded Reload - movl %edx, 16(%esp) # 4-byte Spill - adcl 20(%esp), %edi # 4-byte Folded Reload - movl %edi, 20(%esp) # 4-byte Spill - adcl 52(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 28(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 68(%esp), %eax # 4-byte Folded Reload - movl %eax, 56(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %eax, 60(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 44(%esp), %esi # 4-byte Folded Reload - movl %esi, 116(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl %eax, 48(%esp) # 4-byte Folded Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %eax, 84(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %eax, 92(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, 32(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 60(%edx), %eax - movl %eax, (%esp) - leal 200(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 200(%esp), %ebp - movl %ebp, 12(%esp) # 4-byte Spill - movl 268(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 264(%esp), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 260(%esp), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 256(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 252(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 248(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 244(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 240(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 236(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 232(%esp), %edi - movl 228(%esp), %esi - movl 224(%esp), %edx - movl 220(%esp), %ecx - movl 216(%esp), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 212(%esp), %eax - movl 204(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 208(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 12(%esp), %ebp # 4-byte Reload - movl 1376(%esp), %ebx - movl %ebp, 60(%ebx) - movl 120(%esp), %ebp # 4-byte Reload - adcl 8(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 76(%esp), %ebp # 4-byte Folded Reload - adcl 24(%esp), %eax # 4-byte Folded Reload - movl %eax, 76(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - adcl %eax, 36(%esp) # 4-byte Folded Spill - adcl 20(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 24(%esp) # 4-byte Spill - adcl 28(%esp), %edx # 4-byte Folded Reload - movl %edx, 28(%esp) # 4-byte Spill - adcl 56(%esp), %esi # 4-byte Folded Reload - movl %esi, 56(%esp) # 4-byte Spill - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %edi, 60(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %eax, 72(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 116(%esp), %eax # 4-byte Folded Reload - movl %eax, 80(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl %eax, 40(%esp) # 4-byte Folded Spill - movl 32(%esp), %eax # 4-byte Reload - adcl %eax, 44(%esp) # 4-byte Folded Spill - adcl $0, 52(%esp) # 4-byte Folded Spill - movl 1380(%esp), %edx - movl 64(%edx), %eax - movl %eax, (%esp) - leal 128(%esp), %ecx - movl 124(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 120(%esp), %eax # 4-byte Reload - addl 128(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 132(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 136(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 196(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 192(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 188(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 184(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 180(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 176(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 172(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 168(%esp), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 164(%esp), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 160(%esp), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 156(%esp), %ebx - movl 152(%esp), %edi - movl 148(%esp), %esi - movl 144(%esp), %edx - movl 140(%esp), %ecx - movl 1376(%esp), %eax - movl 120(%esp), %ebp # 4-byte Reload - movl %ebp, 64(%eax) - movl 68(%esp), %ebp # 4-byte Reload - movl %ebp, 68(%eax) - adcl 36(%esp), %ecx # 4-byte Folded Reload - movl 76(%esp), %ebp # 4-byte Reload - movl %ebp, 72(%eax) - adcl 24(%esp), %edx # 4-byte Folded Reload - movl %ecx, 76(%eax) - adcl 28(%esp), %esi # 4-byte Folded Reload - movl %edx, 80(%eax) - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %esi, 84(%eax) - adcl 60(%esp), %ebx # 4-byte Folded Reload - movl %edi, 88(%eax) - movl 20(%esp), %edx # 4-byte Reload - adcl 72(%esp), %edx # 4-byte Folded Reload - movl %ebx, 92(%eax) - movl 32(%esp), %ecx # 4-byte Reload - adcl 80(%esp), %ecx # 4-byte Folded Reload - movl %edx, 96(%eax) - movl 48(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %ecx, 100(%eax) - movl 64(%esp), %ecx # 4-byte Reload - adcl 96(%esp), %ecx # 4-byte Folded Reload - movl %edx, 104(%eax) - movl 84(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %ecx, 108(%eax) - movl 92(%esp), %ecx # 4-byte Reload - adcl 112(%esp), %ecx # 4-byte Folded Reload - movl %edx, 112(%eax) - movl 100(%esp), %edx # 4-byte Reload - adcl 40(%esp), %edx # 4-byte Folded Reload - movl %ecx, 116(%eax) - movl 108(%esp), %ecx # 4-byte Reload - adcl 44(%esp), %ecx # 4-byte Folded Reload - movl %edx, 120(%eax) - movl %ecx, 124(%eax) - movl 116(%esp), %ecx # 4-byte Reload - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %ecx, 128(%eax) - movl 124(%esp), %ecx # 4-byte Reload - adcl $0, %ecx - movl %ecx, 132(%eax) - addl $1356, %esp # imm = 0x54C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end257: - .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L - - .globl mcl_fp_mont17L - .align 16, 0x90 - .type mcl_fp_mont17L,@function -mcl_fp_mont17L: # @mcl_fp_mont17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2588, %esp # imm = 0xA1C - calll .L258$pb -.L258$pb: - popl %ebx -.Ltmp59: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx - movl 2620(%esp), %eax - movl -4(%eax), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2512(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 2512(%esp), %ebp - movl 2516(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl %ebp, %eax - imull %esi, %eax - movl 2580(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 2576(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 2572(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2568(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2564(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2560(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2556(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2552(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2548(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2544(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2540(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2536(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2532(%esp), %edi - movl 2528(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2524(%esp), %esi - movl 2520(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2440(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - addl 2440(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 2444(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 2452(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2456(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2460(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 2464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2472(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2480(%esp), %eax - movl %eax, %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 2484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2488(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2492(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2496(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2504(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2508(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl 2616(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2368(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - andl $1, %ebp - movl 120(%esp), %ecx # 4-byte Reload - addl 2368(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 2372(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2376(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2380(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2384(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 2392(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 2396(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2400(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 2404(%esp), %esi - movl %esi, 72(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 2408(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 2412(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2416(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2420(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2424(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2428(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2432(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 2436(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2296(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 116(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 2296(%esp), %ebp - movl 100(%esp), %ecx # 4-byte Reload - adcl 2300(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 2304(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2308(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2312(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2316(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 2320(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2324(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2328(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2332(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 2336(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2340(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 2344(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 2348(%esp), %esi - movl 124(%esp), %ecx # 4-byte Reload - adcl 2352(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 2356(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 2360(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 2364(%esp), %ebp - adcl $0, %eax - movl %eax, %edi - movl 2616(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2224(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 100(%esp), %ecx # 4-byte Reload - addl 2224(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 2228(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2232(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2244(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 2272(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 2276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 2280(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 2288(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - adcl 2292(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2152(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 2152(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2164(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2188(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2192(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2196(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2200(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 2204(%esp), %ebp - movl 128(%esp), %edi # 4-byte Reload - adcl 2208(%esp), %edi - movl 132(%esp), %esi # 4-byte Reload - adcl 2212(%esp), %esi - movl 120(%esp), %eax # 4-byte Reload - adcl 2216(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2220(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 2080(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 112(%esp), %ecx # 4-byte Reload - addl 2080(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 2084(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2088(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2092(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2096(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2100(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2104(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2108(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2112(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2116(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2120(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2124(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 2128(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - adcl 2132(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - adcl 2136(%esp), %esi - movl %esi, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2148(%esp), %esi - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2008(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl %ebp, %eax - andl $1, %eax - addl 2008(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 2012(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2016(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2020(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2024(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 2028(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2032(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2036(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2040(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2044(%esp), %edi - movl 104(%esp), %ecx # 4-byte Reload - adcl 2048(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 2052(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 2056(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 2060(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 2064(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 2072(%esp), %ebp - adcl 2076(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1936(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 88(%esp), %ecx # 4-byte Reload - addl 1936(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1940(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1944(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1948(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1952(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 1956(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 1960(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1964(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1968(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1972(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1976(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1980(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1984(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1988(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1992(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 1996(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2004(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - sbbl %ebp, %ebp - movl %ecx, %edi - movl %edi, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1864(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - andl $1, %ebp - movl %ebp, %ecx - addl 1864(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1876(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 1880(%esp), %edi - adcl 1884(%esp), %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1892(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %esi # 4-byte Reload - adcl 1896(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1904(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1908(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1912(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 1916(%esp), %ebp - movl 120(%esp), %eax # 4-byte Reload - adcl 1920(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1924(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1928(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1932(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1792(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 80(%esp), %ecx # 4-byte Reload - addl 1792(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1796(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1800(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 1804(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1808(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1812(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1816(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 1820(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1824(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1828(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1832(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1836(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 1840(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1844(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1848(%esp), %edi - movl 100(%esp), %ebp # 4-byte Reload - adcl 1852(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1720(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %eax - movl 80(%esp), %ecx # 4-byte Reload - addl 1720(%esp), %ecx - movl 92(%esp), %ecx # 4-byte Reload - adcl 1724(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1728(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1732(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1736(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1740(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1744(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1748(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - adcl 1752(%esp), %esi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1756(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1760(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1764(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1768(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1772(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - adcl 1776(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl 1780(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1784(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 1788(%esp), %ebp - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1648(%esp), %ecx - movl 2612(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 92(%esp), %eax # 4-byte Reload - addl 1648(%esp), %eax - movl 76(%esp), %edi # 4-byte Reload - adcl 1652(%esp), %edi - movl 68(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1660(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1664(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1668(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1672(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 1676(%esp), %esi - movl %esi, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1680(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1692(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1700(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1704(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1708(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 1712(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1716(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %eax, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1576(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 1576(%esp), %ebp - adcl 1580(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1584(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 1588(%esp), %ebp - movl 72(%esp), %edi # 4-byte Reload - adcl 1592(%esp), %edi - movl 84(%esp), %esi # 4-byte Reload - adcl 1596(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 1600(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1608(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1632(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1636(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1640(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1644(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1504(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 76(%esp), %ecx # 4-byte Reload - addl 1504(%esp), %ecx - movl 68(%esp), %eax # 4-byte Reload - adcl 1508(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 1512(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - adcl 1516(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1520(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1524(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1528(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1532(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1536(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %edi # 4-byte Reload - adcl 1540(%esp), %edi - movl 132(%esp), %eax # 4-byte Reload - adcl 1544(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1548(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1552(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1556(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1560(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %esi # 4-byte Reload - adcl 1568(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1432(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 76(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1432(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 1436(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1440(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1444(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1448(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1452(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1456(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ebp # 4-byte Reload - adcl 1460(%esp), %ebp - movl 124(%esp), %ecx # 4-byte Reload - adcl 1464(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl 1468(%esp), %edi - movl %edi, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1472(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %edi # 4-byte Reload - adcl 1476(%esp), %edi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1480(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1484(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1488(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1492(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - adcl 1496(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1500(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1360(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 68(%esp), %ecx # 4-byte Reload - addl 1360(%esp), %ecx - movl 64(%esp), %eax # 4-byte Reload - adcl 1364(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1372(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1376(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1380(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1384(%esp), %ebp - movl 124(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 1400(%esp), %edi - movl %edi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1408(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1288(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 68(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1288(%esp), %edi - movl 64(%esp), %ecx # 4-byte Reload - adcl 1292(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1296(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1300(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1304(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1308(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1312(%esp), %ebp - movl 124(%esp), %ecx # 4-byte Reload - adcl 1316(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1320(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1324(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - adcl 1328(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1332(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 1336(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 1340(%esp), %edi - movl 88(%esp), %esi # 4-byte Reload - adcl 1344(%esp), %esi - movl 80(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1352(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 64(%esp), %ecx # 4-byte Reload - addl 1216(%esp), %ecx - movl 72(%esp), %eax # 4-byte Reload - adcl 1220(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1236(%esp), %ebp - movl %ebp, 108(%esp) # 4-byte Spill - movl 124(%esp), %ebp # 4-byte Reload - adcl 1240(%esp), %ebp - movl 128(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1260(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1264(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - adcl 1268(%esp), %esi - movl %esi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl %edi, %eax - andl $1, %eax - addl 1144(%esp), %esi - movl 72(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1152(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1156(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %edi # 4-byte Reload - adcl 1160(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 1164(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1168(%esp), %ebp - movl %ebp, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1172(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ecx # 4-byte Reload - adcl 1176(%esp), %ecx - movl %ecx, 132(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 1180(%esp), %esi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1184(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1188(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1192(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1196(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1200(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1204(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1208(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1212(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 72(%esp), %ecx # 4-byte Reload - addl 1072(%esp), %ecx - movl 84(%esp), %eax # 4-byte Reload - adcl 1076(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 1080(%esp), %ebp - adcl 1084(%esp), %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1112(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %edi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 72(%esp), %eax # 4-byte Reload - andl $1, %eax - addl 1000(%esp), %edi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 1008(%esp), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1012(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1016(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 1020(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - adcl 1024(%esp), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 132(%esp), %ebp # 4-byte Reload - adcl 1028(%esp), %ebp - movl 120(%esp), %ecx # 4-byte Reload - adcl 1032(%esp), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1036(%esp), %edi - adcl 1040(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1044(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1048(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1052(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 1056(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 84(%esp), %ecx # 4-byte Reload - addl 928(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 932(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl 952(%esp), %ebp - movl %ebp, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 960(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 980(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 984(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 856(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 888(%esp), %ebp - movl 100(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 896(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 912(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 924(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2616(%esp), %ecx - movl %ecx, %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 2612(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 96(%esp), %ecx # 4-byte Reload - addl 784(%esp), %ecx - movl 104(%esp), %eax # 4-byte Reload - adcl 788(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 812(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 820(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %edi # 4-byte Reload - adcl 828(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 712(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 716(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %esi # 4-byte Reload - adcl 728(%esp), %esi - movl 132(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %ebp # 4-byte Reload - adcl 736(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 756(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 104(%esp), %ecx # 4-byte Reload - addl 640(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 652(%esp), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 660(%esp), %ebp - movl %ebp, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 680(%esp), %edi - movl 92(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %ebp # 4-byte Reload - adcl 696(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 104(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 568(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 588(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 600(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 604(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 608(%esp), %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 616(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - adcl 624(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 108(%esp), %ecx # 4-byte Reload - addl 496(%esp), %ecx - movl 124(%esp), %eax # 4-byte Reload - adcl 500(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 524(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 528(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 540(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 68(%esp), %edi # 4-byte Reload - adcl 544(%esp), %edi - movl 64(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - sbbl %eax, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - movl 108(%esp), %ecx # 4-byte Reload - andl $1, %ecx - addl 424(%esp), %esi - movl 124(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 432(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %esi # 4-byte Reload - adcl 440(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 456(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 472(%esp), %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %ebp # 4-byte Reload - adcl 480(%esp), %ebp - movl 84(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 124(%esp), %ecx # 4-byte Reload - addl 352(%esp), %ecx - movl 128(%esp), %eax # 4-byte Reload - adcl 356(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl 364(%esp), %esi - movl %esi, 120(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 100(%esp), %edi # 4-byte Reload - adcl 372(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 404(%esp), %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - sbbl %esi, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 60(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 2620(%esp), %edx - calll .LmulPv544x32 - andl $1, %esi - movl %esi, %ecx - addl 280(%esp), %ebp - movl 128(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %esi # 4-byte Reload - adcl 288(%esp), %esi - movl 120(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 116(%esp), %ebp # 4-byte Reload - adcl 296(%esp), %ebp - adcl 300(%esp), %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 2616(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 2612(%esp), %edx - calll .LmulPv544x32 - movl 128(%esp), %ecx # 4-byte Reload - addl 208(%esp), %ecx - adcl 212(%esp), %esi - movl %esi, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 220(%esp), %ebp - movl %ebp, 116(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 224(%esp), %ebp - movl 112(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 264(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - sbbl %edi, %edi - movl 60(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 136(%esp), %ecx - movl 2620(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - andl $1, %edi - addl 136(%esp), %esi - movl 116(%esp), %edx # 4-byte Reload - movl 132(%esp), %eax # 4-byte Reload - adcl 140(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 144(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - adcl 148(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl 152(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 156(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 160(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 164(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 168(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 172(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 176(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 180(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 184(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 188(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 192(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 196(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 200(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 124(%esp), %ecx # 4-byte Reload - adcl 204(%esp), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - adcl $0, %edi - movl 132(%esp), %ecx # 4-byte Reload - movl 2620(%esp), %ebx - subl (%ebx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 120(%esp), %ecx # 4-byte Reload - sbbl 4(%ebx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - sbbl 8(%ebx), %edx - movl %edx, 20(%esp) # 4-byte Spill - sbbl 12(%ebx), %ebp - movl %ebp, 24(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - movl %eax, %edx - sbbl 16(%ebx), %ebp - movl %ebp, 28(%esp) # 4-byte Spill - sbbl 20(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 24(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - sbbl 28(%ebx), %esi - movl %esi, 40(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - sbbl 32(%ebx), %esi - movl %esi, 44(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - sbbl 36(%ebx), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - sbbl 40(%ebx), %esi - movl %esi, 52(%esp) # 4-byte Spill - movl 72(%esp), %esi # 4-byte Reload - sbbl 44(%ebx), %esi - movl %esi, 56(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - sbbl 48(%ebx), %esi - movl %esi, 60(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - sbbl 52(%ebx), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 104(%esp), %esi # 4-byte Reload - sbbl 56(%ebx), %esi - movl %esi, 128(%esp) # 4-byte Spill - movl %ebx, %ebp - movl 108(%esp), %ebx # 4-byte Reload - sbbl 60(%ebp), %ebx - movl 124(%esp), %esi # 4-byte Reload - sbbl 64(%ebp), %esi - movl %esi, %ebp - sbbl $0, %edi - andl $1, %edi - jne .LBB258_2 -# BB#1: - movl %ebx, 108(%esp) # 4-byte Spill -.LBB258_2: - movl %edi, %ebx - testb %bl, %bl - movl 132(%esp), %ebx # 4-byte Reload - jne .LBB258_4 -# BB#3: - movl 12(%esp), %ebx # 4-byte Reload -.LBB258_4: - movl 2608(%esp), %eax - movl %ebx, (%eax) - movl 120(%esp), %ebx # 4-byte Reload - jne .LBB258_6 -# BB#5: - movl 16(%esp), %ebx # 4-byte Reload -.LBB258_6: - movl %ebx, 4(%eax) - jne .LBB258_8 -# BB#7: - movl 20(%esp), %ecx # 4-byte Reload - movl %ecx, 116(%esp) # 4-byte Spill -.LBB258_8: - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 8(%eax) - jne .LBB258_10 -# BB#9: - movl 24(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%esp) # 4-byte Spill -.LBB258_10: - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, 12(%eax) - movl 112(%esp), %esi # 4-byte Reload - jne .LBB258_12 -# BB#11: - movl 28(%esp), %esi # 4-byte Reload -.LBB258_12: - movl %esi, 16(%eax) - movl 80(%esp), %ecx # 4-byte Reload - jne .LBB258_14 -# BB#13: - movl 32(%esp), %edx # 4-byte Reload -.LBB258_14: - movl %edx, 20(%eax) - jne .LBB258_16 -# BB#15: - movl 36(%esp), %ecx # 4-byte Reload -.LBB258_16: - movl %ecx, 24(%eax) - movl 92(%esp), %ecx # 4-byte Reload - jne .LBB258_18 -# BB#17: - movl 40(%esp), %ecx # 4-byte Reload -.LBB258_18: - movl %ecx, 28(%eax) - movl 76(%esp), %ecx # 4-byte Reload - jne .LBB258_20 -# BB#19: - movl 44(%esp), %ecx # 4-byte Reload -.LBB258_20: - movl %ecx, 32(%eax) - movl 68(%esp), %ecx # 4-byte Reload - jne .LBB258_22 -# BB#21: - movl 48(%esp), %ecx # 4-byte Reload -.LBB258_22: - movl %ecx, 36(%eax) - movl 64(%esp), %ecx # 4-byte Reload - jne .LBB258_24 -# BB#23: - movl 52(%esp), %ecx # 4-byte Reload -.LBB258_24: - movl %ecx, 40(%eax) - movl 72(%esp), %ecx # 4-byte Reload - jne .LBB258_26 -# BB#25: - movl 56(%esp), %ecx # 4-byte Reload -.LBB258_26: - movl %ecx, 44(%eax) - movl 84(%esp), %ecx # 4-byte Reload - jne .LBB258_28 -# BB#27: - movl 60(%esp), %ecx # 4-byte Reload -.LBB258_28: - movl %ecx, 48(%eax) - movl 96(%esp), %ecx # 4-byte Reload - jne .LBB258_30 -# BB#29: - movl 88(%esp), %ecx # 4-byte Reload -.LBB258_30: - movl %ecx, 52(%eax) - movl 104(%esp), %ecx # 4-byte Reload - jne .LBB258_32 -# BB#31: - movl 128(%esp), %ecx # 4-byte Reload -.LBB258_32: - movl %ecx, 56(%eax) - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 60(%eax) - movl 124(%esp), %ecx # 4-byte Reload - jne .LBB258_34 -# BB#33: - movl %ebp, %ecx -.LBB258_34: - movl %ecx, 64(%eax) - addl $2588, %esp # imm = 0xA1C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end258: - .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L - - .globl mcl_fp_montNF17L - .align 16, 0x90 - .type mcl_fp_montNF17L,@function -mcl_fp_montNF17L: # @mcl_fp_montNF17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $2572, %esp # imm = 0xA0C - calll .L259$pb -.L259$pb: - popl %ebx -.Ltmp60: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx - movl 2604(%esp), %eax - movl -4(%eax), %esi - movl %esi, 48(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl (%eax), %eax - movl %eax, (%esp) - leal 2496(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2496(%esp), %edi - movl 2500(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %edi, %eax - imull %esi, %eax - movl 2564(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 2560(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 2556(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 2552(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 2548(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 2544(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 2540(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 2536(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 2532(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 2528(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 2524(%esp), %ebp - movl 2520(%esp), %esi - movl 2516(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 2512(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 2508(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 2504(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl %eax, (%esp) - leal 2424(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 2424(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2428(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 2432(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2436(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2440(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2444(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 2448(%esp), %esi - movl %esi, 80(%esp) # 4-byte Spill - adcl 2452(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 2456(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 2460(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2464(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2468(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %esi # 4-byte Reload - adcl 2472(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 2476(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 2480(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2484(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 4(%eax), %eax - movl %eax, (%esp) - leal 2352(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2420(%esp), %ecx - movl 112(%esp), %edx # 4-byte Reload - addl 2352(%esp), %edx - movl 92(%esp), %eax # 4-byte Reload - adcl 2356(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2360(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2364(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2368(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2372(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2376(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 2380(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2384(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2388(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2392(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 2396(%esp), %esi - movl %esi, %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 2400(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2404(%esp), %edi - movl 108(%esp), %eax # 4-byte Reload - adcl 2408(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 2412(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 2416(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl $0, %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2280(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 2280(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 2284(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 2288(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2292(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2296(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2300(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2304(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2308(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2312(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 2316(%esp), %esi - movl 72(%esp), %eax # 4-byte Reload - adcl 2320(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 2324(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2328(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 2332(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 2336(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 2340(%esp), %ebp - movl 116(%esp), %edi # 4-byte Reload - adcl 2344(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 2348(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 8(%eax), %eax - movl %eax, (%esp) - leal 2208(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2276(%esp), %eax - movl 92(%esp), %edx # 4-byte Reload - addl 2208(%esp), %edx - movl 104(%esp), %ecx # 4-byte Reload - adcl 2212(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 2216(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2220(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2224(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2228(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 2232(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 2236(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 2240(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2244(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2248(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2252(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2256(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 2260(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 2264(%esp), %ebp - adcl 2268(%esp), %edi - movl %edi, %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 2272(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 92(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 2136(%esp), %ecx - movl 2604(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - addl 2136(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 2140(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 2144(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2148(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2156(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2160(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 2164(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2168(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2172(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2176(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2180(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2184(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %edi # 4-byte Reload - adcl 2188(%esp), %edi - adcl 2192(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - adcl 2196(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 2200(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 2204(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 12(%eax), %eax - movl %eax, (%esp) - leal 2064(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 2132(%esp), %eax - movl 104(%esp), %edx # 4-byte Reload - addl 2064(%esp), %edx - movl 76(%esp), %ecx # 4-byte Reload - adcl 2068(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 2072(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 2076(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 2080(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 2084(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ebp # 4-byte Reload - adcl 2088(%esp), %ebp - movl 68(%esp), %ecx # 4-byte Reload - adcl 2092(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 2096(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 2100(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 2104(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 2108(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - adcl 2112(%esp), %edi - movl 100(%esp), %ecx # 4-byte Reload - adcl 2116(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 2120(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - adcl 2124(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 2128(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl %edx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1992(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1992(%esp), %esi - movl 76(%esp), %eax # 4-byte Reload - adcl 1996(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 2000(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 2004(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 2008(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 2012(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 2016(%esp), %ebp - movl %ebp, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 2020(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 2024(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 2028(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 2032(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 2036(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 2040(%esp), %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 2044(%esp), %esi - movl 116(%esp), %eax # 4-byte Reload - adcl 2048(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 2052(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %edi # 4-byte Reload - adcl 2056(%esp), %edi - movl 104(%esp), %eax # 4-byte Reload - adcl 2060(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 16(%eax), %eax - movl %eax, (%esp) - leal 1920(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1988(%esp), %eax - movl 76(%esp), %edx # 4-byte Reload - addl 1920(%esp), %edx - movl 84(%esp), %ecx # 4-byte Reload - adcl 1924(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1928(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1932(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 1936(%esp), %ebp - movl 56(%esp), %ecx # 4-byte Reload - adcl 1940(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1944(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1948(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1952(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1956(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1960(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1964(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - adcl 1968(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1972(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1976(%esp), %esi - adcl 1980(%esp), %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1984(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl %edx, %eax - movl %edx, %edi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1848(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1848(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1852(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1856(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1860(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1864(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1868(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1872(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1876(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1880(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1884(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1888(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1892(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1896(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1900(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1904(%esp), %esi - movl %esi, 112(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 1908(%esp), %ebp - movl 104(%esp), %eax # 4-byte Reload - adcl 1912(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1916(%esp), %eax - movl %eax, %edi - movl 2600(%esp), %eax - movl 20(%eax), %eax - movl %eax, (%esp) - leal 1776(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1844(%esp), %eax - movl 84(%esp), %edx # 4-byte Reload - addl 1776(%esp), %edx - movl 80(%esp), %ecx # 4-byte Reload - adcl 1780(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 60(%esp), %ecx # 4-byte Reload - adcl 1784(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1788(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1792(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1796(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1800(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1804(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %esi # 4-byte Reload - adcl 1808(%esp), %esi - movl 96(%esp), %ecx # 4-byte Reload - adcl 1812(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1816(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1820(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1824(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1828(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - adcl 1832(%esp), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1836(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1840(%esp), %edi - adcl $0, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1704(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1704(%esp), %ebp - movl 80(%esp), %eax # 4-byte Reload - adcl 1708(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1712(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1716(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1720(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1724(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1728(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1732(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl 1736(%esp), %esi - movl %esi, %ebp - movl 96(%esp), %esi # 4-byte Reload - adcl 1740(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 1744(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1748(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1752(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1756(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1760(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1764(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1768(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 1772(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 24(%eax), %eax - movl %eax, (%esp) - leal 1632(%esp), %ecx - movl 2596(%esp), %eax - movl %eax, %edx - calll .LmulPv544x32 - movl 1700(%esp), %eax - movl 80(%esp), %edx # 4-byte Reload - addl 1632(%esp), %edx - movl 60(%esp), %ecx # 4-byte Reload - adcl 1636(%esp), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%esp), %ecx # 4-byte Reload - adcl 1640(%esp), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 56(%esp), %ecx # 4-byte Reload - adcl 1644(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 68(%esp), %ecx # 4-byte Reload - adcl 1648(%esp), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1652(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1656(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 1660(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - adcl 1664(%esp), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1668(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %esi # 4-byte Reload - adcl 1672(%esp), %esi - movl 116(%esp), %ecx # 4-byte Reload - adcl 1676(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1680(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1684(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1688(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - adcl 1692(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - adcl 1696(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl %edx, %edi - movl %edi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1560(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1560(%esp), %edi - movl 60(%esp), %eax # 4-byte Reload - adcl 1564(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1568(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1572(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1576(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %edi # 4-byte Reload - adcl 1580(%esp), %edi - movl 64(%esp), %ebp # 4-byte Reload - adcl 1584(%esp), %ebp - movl 88(%esp), %eax # 4-byte Reload - adcl 1588(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1592(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1596(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 1600(%esp), %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1604(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %esi # 4-byte Reload - adcl 1608(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 1612(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1616(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1620(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1624(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1628(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 28(%eax), %eax - movl %eax, (%esp) - leal 1488(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1556(%esp), %eax - movl 60(%esp), %ecx # 4-byte Reload - addl 1488(%esp), %ecx - movl 52(%esp), %edx # 4-byte Reload - adcl 1492(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 1496(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 1500(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 1504(%esp), %edi - adcl 1508(%esp), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1512(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1516(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 1520(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 1524(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 1528(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - adcl 1532(%esp), %esi - movl %esi, %ebp - movl 92(%esp), %edx # 4-byte Reload - adcl 1536(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1540(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1544(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1548(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1552(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 60(%esp) # 4-byte Spill - movl %ecx, %esi - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1416(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1416(%esp), %esi - movl 52(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %esi # 4-byte Reload - adcl 1428(%esp), %esi - adcl 1432(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 64(%esp), %edi # 4-byte Reload - adcl 1436(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 1440(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1444(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1448(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1452(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1456(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 1460(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1464(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1468(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1472(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1476(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1480(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 1484(%esp), %ebp - movl 2600(%esp), %eax - movl 32(%eax), %eax - movl %eax, (%esp) - leal 1344(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1412(%esp), %eax - movl 52(%esp), %edx # 4-byte Reload - addl 1344(%esp), %edx - movl 56(%esp), %ecx # 4-byte Reload - adcl 1348(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl 1352(%esp), %esi - movl %esi, 68(%esp) # 4-byte Spill - movl 72(%esp), %ecx # 4-byte Reload - adcl 1356(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - adcl 1360(%esp), %edi - movl 88(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1368(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1372(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1376(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1380(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1384(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1388(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - adcl 1392(%esp), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 76(%esp), %esi # 4-byte Reload - adcl 1396(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1400(%esp), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1404(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1408(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1272(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1272(%esp), %ebp - movl 56(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl 1288(%esp), %edi - movl 88(%esp), %eax # 4-byte Reload - adcl 1292(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 1304(%esp), %ebp - movl 116(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 1324(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1336(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 36(%eax), %eax - movl %eax, (%esp) - leal 1200(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1268(%esp), %eax - movl 56(%esp), %ecx # 4-byte Reload - addl 1200(%esp), %ecx - movl 68(%esp), %edx # 4-byte Reload - adcl 1204(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 1208(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - adcl 1212(%esp), %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 1216(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 1220(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %esi # 4-byte Reload - adcl 1224(%esp), %esi - adcl 1228(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 1232(%esp), %edi - movl 112(%esp), %edx # 4-byte Reload - adcl 1236(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 92(%esp), %edx # 4-byte Reload - adcl 1240(%esp), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 104(%esp), %edx # 4-byte Reload - adcl 1244(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 1248(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 1252(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 1256(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 1260(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 1264(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 56(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %ebp - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1128(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 1128(%esp), %ebp - movl 68(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1144(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 1148(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl 1152(%esp), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1160(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 1172(%esp), %ebp - movl 76(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %esi # 4-byte Reload - adcl 1180(%esp), %esi - movl 80(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %edi # 4-byte Reload - adcl 1188(%esp), %edi - movl 52(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 40(%eax), %eax - movl %eax, (%esp) - leal 1056(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 1124(%esp), %edx - movl 68(%esp), %eax # 4-byte Reload - addl 1056(%esp), %eax - movl 72(%esp), %ecx # 4-byte Reload - adcl 1060(%esp), %ecx - movl %ecx, 72(%esp) # 4-byte Spill - movl 64(%esp), %ecx # 4-byte Reload - adcl 1064(%esp), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - adcl 1068(%esp), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - adcl 1072(%esp), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - adcl 1076(%esp), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - adcl 1080(%esp), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - adcl 1084(%esp), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - adcl 1088(%esp), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - adcl 1092(%esp), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - adcl 1096(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - movl 76(%esp), %ecx # 4-byte Reload - adcl 1100(%esp), %ecx - movl %ecx, 76(%esp) # 4-byte Spill - adcl 1104(%esp), %esi - movl %esi, 84(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - adcl 1108(%esp), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - adcl 1112(%esp), %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 52(%esp), %edi # 4-byte Reload - adcl 1116(%esp), %edi - movl 56(%esp), %ecx # 4-byte Reload - adcl 1120(%esp), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 68(%esp) # 4-byte Spill - movl %eax, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 984(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 984(%esp), %esi - movl 72(%esp), %esi # 4-byte Reload - adcl 988(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %ebp # 4-byte Reload - adcl 996(%esp), %ebp - movl 96(%esp), %eax # 4-byte Reload - adcl 1000(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1004(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 1044(%esp), %edi - movl %edi, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 44(%eax), %eax - movl %eax, (%esp) - leal 912(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 980(%esp), %eax - addl 912(%esp), %esi - movl 64(%esp), %edx # 4-byte Reload - adcl 916(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - adcl 920(%esp), %ebp - movl %ebp, 88(%esp) # 4-byte Spill - movl 96(%esp), %edi # 4-byte Reload - adcl 924(%esp), %edi - movl 108(%esp), %edx # 4-byte Reload - adcl 928(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 932(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 116(%esp), %edx # 4-byte Reload - adcl 936(%esp), %edx - movl %edx, 116(%esp) # 4-byte Spill - movl 112(%esp), %edx # 4-byte Reload - adcl 940(%esp), %edx - movl %edx, 112(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 944(%esp), %ebp - movl 104(%esp), %edx # 4-byte Reload - adcl 948(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 952(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 956(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 960(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 964(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 968(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 972(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 976(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl $0, %eax - movl %eax, 72(%esp) # 4-byte Spill - movl %esi, %eax - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 840(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 840(%esp), %esi - movl 64(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl 852(%esp), %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 856(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 860(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl 864(%esp), %edi - movl 112(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl %ebp, %esi - adcl 872(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %ebp # 4-byte Reload - adcl 888(%esp), %ebp - movl 60(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 48(%eax), %eax - movl %eax, (%esp) - leal 768(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 836(%esp), %edx - movl 64(%esp), %ecx # 4-byte Reload - addl 768(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 780(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 784(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 788(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - movl 112(%esp), %edi # 4-byte Reload - adcl 792(%esp), %edi - adcl 796(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - adcl 812(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %ebp # 4-byte Reload - adcl 828(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 64(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 696(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 696(%esp), %esi - movl 88(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 712(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 716(%esp), %esi - adcl 720(%esp), %edi - movl %edi, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %edi # 4-byte Reload - adcl 732(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - adcl 756(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 760(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 52(%eax), %eax - movl %eax, (%esp) - leal 624(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 692(%esp), %edx - movl 88(%esp), %ecx # 4-byte Reload - addl 624(%esp), %ecx - movl 96(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 636(%esp), %ebp - adcl 640(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 644(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - adcl 656(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 660(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 672(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 676(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 684(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 88(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 552(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 552(%esp), %esi - movl 96(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl 564(%esp), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 568(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 572(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 576(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 588(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 596(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %ebp # 4-byte Reload - adcl 600(%esp), %ebp - movl 56(%esp), %edi # 4-byte Reload - adcl 604(%esp), %edi - movl 68(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 56(%eax), %eax - movl %eax, (%esp) - leal 480(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 548(%esp), %edx - movl 96(%esp), %ecx # 4-byte Reload - addl 480(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 496(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 500(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 512(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 516(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - adcl 524(%esp), %ebp - movl %ebp, 52(%esp) # 4-byte Spill - adcl 528(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 96(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 408(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 408(%esp), %esi - movl 108(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %esi # 4-byte Reload - adcl 420(%esp), %esi - movl 112(%esp), %eax # 4-byte Reload - adcl 424(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 428(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 104(%esp), %ebp # 4-byte Reload - adcl 432(%esp), %ebp - movl 76(%esp), %edi # 4-byte Reload - adcl 436(%esp), %edi - movl 84(%esp), %eax # 4-byte Reload - adcl 440(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 60(%eax), %eax - movl %eax, (%esp) - leal 336(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 404(%esp), %edx - movl 108(%esp), %ecx # 4-byte Reload - addl 336(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 344(%esp), %esi - movl %esi, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 352(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - adcl 356(%esp), %ebp - movl %ebp, 104(%esp) # 4-byte Spill - adcl 360(%esp), %edi - movl %edi, 76(%esp) # 4-byte Spill - movl 84(%esp), %edi # 4-byte Reload - adcl 364(%esp), %edi - movl 80(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %ebp # 4-byte Reload - adcl 372(%esp), %ebp - movl 52(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - adcl 384(%esp), %eax - movl %eax, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 108(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 48(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 264(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 264(%esp), %esi - movl 100(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 276(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%esp), %esi # 4-byte Reload - adcl 280(%esp), %esi - movl 104(%esp), %eax # 4-byte Reload - adcl 284(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - adcl 292(%esp), %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - adcl 300(%esp), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 56(%esp), %edi # 4-byte Reload - adcl 308(%esp), %edi - movl 68(%esp), %ebp # 4-byte Reload - adcl 312(%esp), %ebp - movl 72(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 2600(%esp), %eax - movl 64(%eax), %eax - movl %eax, (%esp) - leal 192(%esp), %ecx - movl 2596(%esp), %edx - calll .LmulPv544x32 - movl 260(%esp), %edx - movl 100(%esp), %ecx # 4-byte Reload - addl 192(%esp), %ecx - movl 116(%esp), %eax # 4-byte Reload - adcl 196(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 200(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - adcl 204(%esp), %esi - movl %esi, 92(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 208(%esp), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 216(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 220(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 52(%esp) # 4-byte Spill - adcl 232(%esp), %edi - movl %edi, 56(%esp) # 4-byte Spill - adcl 236(%esp), %ebp - movl %ebp, 68(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 64(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - adcl $0, %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %esi - movl %eax, (%esp) - leal 120(%esp), %ecx - movl 2604(%esp), %edx - calll .LmulPv544x32 - addl 120(%esp), %esi - movl 92(%esp), %esi # 4-byte Reload - movl 116(%esp), %eax # 4-byte Reload - adcl 124(%esp), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 112(%esp), %ebp # 4-byte Reload - adcl 128(%esp), %ebp - movl %ebp, 112(%esp) # 4-byte Spill - adcl 132(%esp), %esi - movl 104(%esp), %edx # 4-byte Reload - adcl 136(%esp), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 76(%esp), %edx # 4-byte Reload - adcl 140(%esp), %edx - movl %edx, 76(%esp) # 4-byte Spill - movl 84(%esp), %edx # 4-byte Reload - adcl 144(%esp), %edx - movl %edx, 84(%esp) # 4-byte Spill - movl 80(%esp), %edx # 4-byte Reload - adcl 148(%esp), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 60(%esp), %edx # 4-byte Reload - adcl 152(%esp), %edx - movl %edx, 60(%esp) # 4-byte Spill - movl 52(%esp), %edx # 4-byte Reload - adcl 156(%esp), %edx - movl %edx, 52(%esp) # 4-byte Spill - movl 56(%esp), %edx # 4-byte Reload - adcl 160(%esp), %edx - movl %edx, 56(%esp) # 4-byte Spill - movl 68(%esp), %edx # 4-byte Reload - adcl 164(%esp), %edx - movl %edx, 68(%esp) # 4-byte Spill - movl 72(%esp), %edx # 4-byte Reload - adcl 168(%esp), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%esp), %edx # 4-byte Reload - adcl 172(%esp), %edx - movl %edx, 64(%esp) # 4-byte Spill - movl 88(%esp), %edx # 4-byte Reload - adcl 176(%esp), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 96(%esp), %edx # 4-byte Reload - adcl 180(%esp), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 108(%esp), %edx # 4-byte Reload - adcl 184(%esp), %edx - movl %edx, 108(%esp) # 4-byte Spill - movl 100(%esp), %edx # 4-byte Reload - adcl 188(%esp), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl %eax, %edx - movl 2604(%esp), %edi - subl (%edi), %edx - sbbl 4(%edi), %ebp - movl %esi, %ebx - sbbl 8(%edi), %ebx - movl 104(%esp), %ecx # 4-byte Reload - sbbl 12(%edi), %ecx - movl 76(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 68(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 72(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 64(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 60(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 64(%edi), %eax - movl %eax, 92(%esp) # 4-byte Spill - sarl $31, %eax - testl %eax, %eax - movl 116(%esp), %edi # 4-byte Reload - js .LBB259_2 -# BB#1: - movl %edx, %edi -.LBB259_2: - movl 2592(%esp), %edx - movl %edi, (%edx) - movl 112(%esp), %edi # 4-byte Reload - js .LBB259_4 -# BB#3: - movl %ebp, %edi -.LBB259_4: - movl %edi, 4(%edx) - js .LBB259_6 -# BB#5: - movl %ebx, %esi -.LBB259_6: - movl %esi, 8(%edx) - movl 104(%esp), %esi # 4-byte Reload - js .LBB259_8 -# BB#7: - movl %ecx, %esi -.LBB259_8: - movl %esi, 12(%edx) - movl 76(%esp), %ecx # 4-byte Reload - js .LBB259_10 -# BB#9: - movl 4(%esp), %ecx # 4-byte Reload -.LBB259_10: - movl %ecx, 16(%edx) - movl 84(%esp), %eax # 4-byte Reload - js .LBB259_12 -# BB#11: - movl 8(%esp), %eax # 4-byte Reload -.LBB259_12: - movl %eax, 20(%edx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB259_14 -# BB#13: - movl 12(%esp), %eax # 4-byte Reload -.LBB259_14: - movl %eax, 24(%edx) - movl 60(%esp), %eax # 4-byte Reload - js .LBB259_16 -# BB#15: - movl 16(%esp), %eax # 4-byte Reload -.LBB259_16: - movl %eax, 28(%edx) - movl 52(%esp), %eax # 4-byte Reload - js .LBB259_18 -# BB#17: - movl 20(%esp), %eax # 4-byte Reload -.LBB259_18: - movl %eax, 32(%edx) - movl 56(%esp), %eax # 4-byte Reload - js .LBB259_20 -# BB#19: - movl 24(%esp), %eax # 4-byte Reload -.LBB259_20: - movl %eax, 36(%edx) - movl 68(%esp), %eax # 4-byte Reload - js .LBB259_22 -# BB#21: - movl 28(%esp), %eax # 4-byte Reload -.LBB259_22: - movl %eax, 40(%edx) - movl 72(%esp), %eax # 4-byte Reload - js .LBB259_24 -# BB#23: - movl 32(%esp), %eax # 4-byte Reload -.LBB259_24: - movl %eax, 44(%edx) - movl 64(%esp), %eax # 4-byte Reload - js .LBB259_26 -# BB#25: - movl 36(%esp), %eax # 4-byte Reload -.LBB259_26: - movl %eax, 48(%edx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB259_28 -# BB#27: - movl 40(%esp), %eax # 4-byte Reload -.LBB259_28: - movl %eax, 52(%edx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB259_30 -# BB#29: - movl 44(%esp), %eax # 4-byte Reload -.LBB259_30: - movl %eax, 56(%edx) - movl 108(%esp), %eax # 4-byte Reload - js .LBB259_32 -# BB#31: - movl 48(%esp), %eax # 4-byte Reload -.LBB259_32: - movl %eax, 60(%edx) - movl 100(%esp), %eax # 4-byte Reload - js .LBB259_34 -# BB#33: - movl 92(%esp), %eax # 4-byte Reload -.LBB259_34: - movl %eax, 64(%edx) - addl $2572, %esp # imm = 0xA0C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end259: - .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L - - .globl mcl_fp_montRed17L - .align 16, 0x90 - .type mcl_fp_montRed17L,@function -mcl_fp_montRed17L: # @mcl_fp_montRed17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $1436, %esp # imm = 0x59C - calll .L260$pb -.L260$pb: - popl %eax -.Ltmp61: - addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 1464(%esp), %edx - movl -4(%edx), %esi - movl %esi, 96(%esp) # 4-byte Spill - movl 1460(%esp), %ecx - movl (%ecx), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 4(%ecx), %edi - movl %edi, 80(%esp) # 4-byte Spill - imull %esi, %ebx - movl 132(%ecx), %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 128(%ecx), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 124(%ecx), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 120(%ecx), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 116(%ecx), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 112(%ecx), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 108(%ecx), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl %esi, 156(%esp) # 4-byte Spill - movl 100(%ecx), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 96(%ecx), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 92(%ecx), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 88(%ecx), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl %esi, 180(%esp) # 4-byte Spill - movl 80(%ecx), %edi - movl %edi, 196(%esp) # 4-byte Spill - movl 76(%ecx), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 72(%ecx), %esi - movl %esi, 204(%esp) # 4-byte Spill - movl 68(%ecx), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 64(%ecx), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - movl 60(%ecx), %ebp - movl %ebp, 164(%esp) # 4-byte Spill - movl 56(%ecx), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 52(%ecx), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 48(%ecx), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 44(%ecx), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 40(%ecx), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 36(%ecx), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 28(%ecx), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 24(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 20(%ecx), %ebp - movl 16(%ecx), %esi - movl 12(%ecx), %edi - movl 8(%ecx), %eax - movl %eax, 100(%esp) # 4-byte Spill - movl (%edx), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 64(%edx), %ecx - movl %ecx, 68(%esp) # 4-byte Spill - movl 60(%edx), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - movl 56(%edx), %ecx - movl %ecx, 60(%esp) # 4-byte Spill - movl 52(%edx), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 48(%edx), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 44(%edx), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 40(%edx), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 36(%edx), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 32(%edx), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 28(%edx), %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 24(%edx), %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 20(%edx), %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 16(%edx), %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 8(%edx), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 4(%edx), %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl %ebx, (%esp) - leal 1360(%esp), %ecx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - movl 76(%esp), %eax # 4-byte Reload - addl 1360(%esp), %eax - movl 80(%esp), %ecx # 4-byte Reload - adcl 1364(%esp), %ecx - movl 100(%esp), %eax # 4-byte Reload - adcl 1368(%esp), %eax - movl %eax, 100(%esp) # 4-byte Spill - adcl 1372(%esp), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 1376(%esp), %esi - movl %esi, 76(%esp) # 4-byte Spill - adcl 1380(%esp), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1384(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1388(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1392(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1396(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1400(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1404(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1408(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1412(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1416(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1420(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1424(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1428(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl $0, 204(%esp) # 4-byte Folded Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - movl 128(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - sbbl %edi, %edi - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1288(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - andl $1, %edi - movl %edi, %ecx - addl 1288(%esp), %esi - movl 100(%esp), %edx # 4-byte Reload - adcl 1292(%esp), %edx - movl 72(%esp), %eax # 4-byte Reload - adcl 1296(%esp), %eax - movl %eax, 72(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - adcl 1300(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1304(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1308(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1312(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1316(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1320(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1324(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1328(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1332(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %edi # 4-byte Reload - adcl 1336(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 1340(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1344(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1348(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1352(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1356(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - adcl $0, 192(%esp) # 4-byte Folded Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - movl 184(%esp), %eax # 4-byte Reload - adcl $0, %eax - movl %eax, %esi - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl %edx, %ebp - movl %ebp, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1216(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1216(%esp), %ebp - movl 72(%esp), %ecx # 4-byte Reload - adcl 1220(%esp), %ecx - movl 76(%esp), %eax # 4-byte Reload - adcl 1224(%esp), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - adcl 1228(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1232(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1236(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1240(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1244(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1248(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1252(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1256(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - adcl 1260(%esp), %edi - movl %edi, 136(%esp) # 4-byte Spill - movl 148(%esp), %edi # 4-byte Reload - adcl 1264(%esp), %edi - movl 164(%esp), %eax # 4-byte Reload - adcl 1268(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1272(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1276(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1280(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1284(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl $0, 196(%esp) # 4-byte Folded Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 184(%esp) # 4-byte Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - movl 144(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1144(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1144(%esp), %esi - movl 76(%esp), %ecx # 4-byte Reload - adcl 1148(%esp), %ecx - movl 80(%esp), %eax # 4-byte Reload - adcl 1152(%esp), %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - adcl 1156(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1160(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1164(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1168(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1172(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1176(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1180(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1184(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - adcl 1188(%esp), %edi - movl %edi, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1192(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1196(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1200(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1204(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1208(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1212(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl $0, 180(%esp) # 4-byte Folded Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - movl 188(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1072(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1072(%esp), %esi - movl 80(%esp), %esi # 4-byte Reload - adcl 1076(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - adcl 1080(%esp), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - adcl 1084(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1088(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1092(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1096(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1100(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1104(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1108(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1112(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1116(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1120(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1124(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1128(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1132(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1136(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1140(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl $0, 184(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 188(%esp) # 4-byte Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - movl 172(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - movl 152(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 1000(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 1000(%esp), %esi - movl 84(%esp), %ecx # 4-byte Reload - adcl 1004(%esp), %ecx - movl 88(%esp), %eax # 4-byte Reload - adcl 1008(%esp), %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - adcl 1012(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 1016(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 1020(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 1024(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 1028(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 1032(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 1036(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 1040(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 1044(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 1048(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 1052(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 1056(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 1060(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 1064(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 1068(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - adcl $0, 188(%esp) # 4-byte Folded Spill - adcl $0, 168(%esp) # 4-byte Folded Spill - adcl $0, %ebp - movl %ebp, 172(%esp) # 4-byte Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 152(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 928(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 928(%esp), %esi - movl 88(%esp), %esi # 4-byte Reload - adcl 932(%esp), %esi - movl 92(%esp), %eax # 4-byte Reload - adcl 936(%esp), %eax - movl %eax, 92(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 940(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 944(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 948(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 952(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 956(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 960(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 964(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 968(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 972(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 976(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 980(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 984(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 988(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 992(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 996(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %ebp # 4-byte Reload - adcl $0, %ebp - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - movl 160(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 856(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 856(%esp), %esi - movl 92(%esp), %ecx # 4-byte Reload - adcl 860(%esp), %ecx - movl 108(%esp), %eax # 4-byte Reload - adcl 864(%esp), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 868(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 872(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 876(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 880(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 884(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 888(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 892(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 896(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 900(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 904(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 908(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 912(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 916(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 920(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - adcl 924(%esp), %ebp - movl %ebp, 168(%esp) # 4-byte Spill - adcl $0, 172(%esp) # 4-byte Folded Spill - adcl $0, 156(%esp) # 4-byte Folded Spill - adcl $0, %edi - movl %edi, 160(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - movl 124(%esp), %esi # 4-byte Reload - adcl $0, %esi - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - movl 96(%esp), %ebp # 4-byte Reload - imull %ebp, %eax - movl %eax, (%esp) - leal 784(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 784(%esp), %edi - movl 108(%esp), %ecx # 4-byte Reload - adcl 788(%esp), %ecx - movl 112(%esp), %eax # 4-byte Reload - adcl 792(%esp), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 796(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 800(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 804(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 808(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 812(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 816(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 820(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 824(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 828(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 832(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 836(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 840(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 844(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 848(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 852(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 124(%esp) # 4-byte Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %eax - movl %ecx, %esi - imull %ebp, %eax - movl %eax, (%esp) - leal 712(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 712(%esp), %esi - movl 112(%esp), %ecx # 4-byte Reload - adcl 716(%esp), %ecx - movl 120(%esp), %eax # 4-byte Reload - adcl 720(%esp), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 140(%esp), %eax # 4-byte Reload - adcl 724(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 728(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 732(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 736(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 740(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 744(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 748(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 752(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 756(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %ebp # 4-byte Reload - adcl 760(%esp), %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 764(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 768(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 772(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 776(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - adcl 780(%esp), %edi - movl %edi, 156(%esp) # 4-byte Spill - adcl $0, 160(%esp) # 4-byte Folded Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %esi - movl %esi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 640(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 640(%esp), %esi - movl 120(%esp), %ecx # 4-byte Reload - adcl 644(%esp), %ecx - movl 140(%esp), %eax # 4-byte Reload - adcl 648(%esp), %eax - movl %eax, 140(%esp) # 4-byte Spill - movl 136(%esp), %eax # 4-byte Reload - adcl 652(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 656(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 660(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %eax # 4-byte Reload - adcl 664(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - movl 200(%esp), %eax # 4-byte Reload - adcl 668(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %edi # 4-byte Reload - adcl 672(%esp), %edi - movl 192(%esp), %esi # 4-byte Reload - adcl 676(%esp), %esi - movl 196(%esp), %eax # 4-byte Reload - adcl 680(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - adcl 684(%esp), %ebp - movl %ebp, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 688(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 692(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 696(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 700(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 704(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 708(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - adcl $0, 152(%esp) # 4-byte Folded Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %ebp - movl %ebp, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 568(%esp), %ecx - movl 1464(%esp), %eax - movl %eax, %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 568(%esp), %ebp - movl 140(%esp), %ecx # 4-byte Reload - adcl 572(%esp), %ecx - movl 136(%esp), %eax # 4-byte Reload - adcl 576(%esp), %eax - movl %eax, 136(%esp) # 4-byte Spill - movl 148(%esp), %eax # 4-byte Reload - adcl 580(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 584(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - movl 176(%esp), %ebp # 4-byte Reload - adcl 588(%esp), %ebp - movl 200(%esp), %eax # 4-byte Reload - adcl 592(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - adcl 596(%esp), %edi - movl %edi, 204(%esp) # 4-byte Spill - adcl 600(%esp), %esi - movl %esi, 192(%esp) # 4-byte Spill - movl 196(%esp), %esi # 4-byte Reload - adcl 604(%esp), %esi - movl 180(%esp), %eax # 4-byte Reload - adcl 608(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 612(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 616(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 620(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 624(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 628(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 632(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 636(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - adcl $0, 144(%esp) # 4-byte Folded Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, 100(%esp) # 4-byte Folded Spill - movl %ecx, %edi - movl %edi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 496(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 496(%esp), %edi - movl 136(%esp), %edi # 4-byte Reload - adcl 500(%esp), %edi - movl 148(%esp), %eax # 4-byte Reload - adcl 504(%esp), %eax - movl %eax, 148(%esp) # 4-byte Spill - movl 164(%esp), %eax # 4-byte Reload - adcl 508(%esp), %eax - movl %eax, 164(%esp) # 4-byte Spill - adcl 512(%esp), %ebp - movl %ebp, 176(%esp) # 4-byte Spill - movl 200(%esp), %ebp # 4-byte Reload - adcl 516(%esp), %ebp - movl 204(%esp), %eax # 4-byte Reload - adcl 520(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 524(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - adcl 528(%esp), %esi - movl %esi, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 532(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 536(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 540(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 544(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 548(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 552(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 556(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 560(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 564(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - adcl $0, 128(%esp) # 4-byte Folded Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 100(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %edi, %eax - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 424(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 424(%esp), %edi - movl 148(%esp), %ecx # 4-byte Reload - adcl 428(%esp), %ecx - movl 164(%esp), %edi # 4-byte Reload - adcl 432(%esp), %edi - movl 176(%esp), %eax # 4-byte Reload - adcl 436(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl 440(%esp), %ebp - movl 204(%esp), %eax # 4-byte Reload - adcl 444(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 448(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 452(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 456(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %eax # 4-byte Reload - adcl 460(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 464(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 468(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 472(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 476(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 480(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 484(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 488(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 492(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - adcl $0, 132(%esp) # 4-byte Folded Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - adcl $0, %esi - movl %esi, 100(%esp) # 4-byte Spill - movl %ecx, %eax - movl %ecx, %esi - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 352(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 352(%esp), %esi - movl %edi, %ecx - adcl 356(%esp), %ecx - movl 176(%esp), %eax # 4-byte Reload - adcl 360(%esp), %eax - movl %eax, 176(%esp) # 4-byte Spill - adcl 364(%esp), %ebp - movl %ebp, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 368(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 372(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 376(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 380(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl 184(%esp), %edi # 4-byte Reload - adcl 384(%esp), %edi - movl 188(%esp), %eax # 4-byte Reload - adcl 388(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 392(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 396(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 400(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 404(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 408(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 412(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 416(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 420(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - adcl $0, 124(%esp) # 4-byte Folded Spill - adcl $0, 116(%esp) # 4-byte Folded Spill - movl 100(%esp), %esi # 4-byte Reload - adcl $0, %esi - movl %ecx, %eax - movl %ecx, %ebp - imull 96(%esp), %eax # 4-byte Folded Reload - movl %eax, (%esp) - leal 280(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 280(%esp), %ebp - movl 176(%esp), %ecx # 4-byte Reload - adcl 284(%esp), %ecx - movl 200(%esp), %eax # 4-byte Reload - adcl 288(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %eax # 4-byte Reload - adcl 292(%esp), %eax - movl %eax, 204(%esp) # 4-byte Spill - movl 192(%esp), %eax # 4-byte Reload - adcl 296(%esp), %eax - movl %eax, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 300(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 304(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - adcl 308(%esp), %edi - movl %edi, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 312(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 316(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 320(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 324(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 328(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 332(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 336(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 340(%esp), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 344(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 348(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 116(%esp), %edi # 4-byte Reload - adcl $0, %edi - adcl $0, %esi - movl 96(%esp), %eax # 4-byte Reload - imull %ecx, %eax - movl %ecx, %ebp - movl %eax, (%esp) - leal 208(%esp), %ecx - movl 1464(%esp), %edx - movl 104(%esp), %ebx # 4-byte Reload - calll .LmulPv544x32 - addl 208(%esp), %ebp - movl 200(%esp), %eax # 4-byte Reload - adcl 212(%esp), %eax - movl %eax, 200(%esp) # 4-byte Spill - movl 204(%esp), %edx # 4-byte Reload - adcl 216(%esp), %edx - movl %edx, 204(%esp) # 4-byte Spill - movl 192(%esp), %ecx # 4-byte Reload - adcl 220(%esp), %ecx - movl %ecx, 192(%esp) # 4-byte Spill - movl 196(%esp), %eax # 4-byte Reload - adcl 224(%esp), %eax - movl %eax, 196(%esp) # 4-byte Spill - movl 180(%esp), %eax # 4-byte Reload - adcl 228(%esp), %eax - movl %eax, 180(%esp) # 4-byte Spill - movl %eax, %ebp - movl 184(%esp), %eax # 4-byte Reload - adcl 232(%esp), %eax - movl %eax, 184(%esp) # 4-byte Spill - movl 188(%esp), %eax # 4-byte Reload - adcl 236(%esp), %eax - movl %eax, 188(%esp) # 4-byte Spill - movl 168(%esp), %eax # 4-byte Reload - adcl 240(%esp), %eax - movl %eax, 168(%esp) # 4-byte Spill - movl 172(%esp), %eax # 4-byte Reload - adcl 244(%esp), %eax - movl %eax, 172(%esp) # 4-byte Spill - movl 156(%esp), %eax # 4-byte Reload - adcl 248(%esp), %eax - movl %eax, 156(%esp) # 4-byte Spill - movl 160(%esp), %eax # 4-byte Reload - adcl 252(%esp), %eax - movl %eax, 160(%esp) # 4-byte Spill - movl 152(%esp), %eax # 4-byte Reload - adcl 256(%esp), %eax - movl %eax, 152(%esp) # 4-byte Spill - movl 144(%esp), %eax # 4-byte Reload - adcl 260(%esp), %eax - movl %eax, 144(%esp) # 4-byte Spill - movl 128(%esp), %ebx # 4-byte Reload - adcl 264(%esp), %ebx - movl %ebx, 128(%esp) # 4-byte Spill - movl 132(%esp), %eax # 4-byte Reload - adcl 268(%esp), %eax - movl %eax, 132(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 272(%esp), %eax - movl %eax, 124(%esp) # 4-byte Spill - adcl 276(%esp), %edi - movl %edi, 116(%esp) # 4-byte Spill - adcl $0, %esi - movl %esi, 100(%esp) # 4-byte Spill - movl 200(%esp), %edi # 4-byte Reload - subl 16(%esp), %edi # 4-byte Folded Reload - sbbl 4(%esp), %edx # 4-byte Folded Reload - sbbl 8(%esp), %ecx # 4-byte Folded Reload - movl 196(%esp), %eax # 4-byte Reload - sbbl 12(%esp), %eax # 4-byte Folded Reload - sbbl 20(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 88(%esp) # 4-byte Spill - movl 184(%esp), %esi # 4-byte Reload - sbbl 24(%esp), %esi # 4-byte Folded Reload - movl %esi, 92(%esp) # 4-byte Spill - movl 188(%esp), %esi # 4-byte Reload - sbbl 28(%esp), %esi # 4-byte Folded Reload - movl %esi, 96(%esp) # 4-byte Spill - movl 168(%esp), %esi # 4-byte Reload - sbbl 32(%esp), %esi # 4-byte Folded Reload - movl 172(%esp), %ebp # 4-byte Reload - sbbl 36(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 104(%esp) # 4-byte Spill - movl 156(%esp), %ebp # 4-byte Reload - sbbl 40(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 108(%esp) # 4-byte Spill - movl 160(%esp), %ebp # 4-byte Reload - sbbl 44(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 112(%esp) # 4-byte Spill - movl 152(%esp), %ebp # 4-byte Reload - sbbl 48(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 120(%esp) # 4-byte Spill - movl 144(%esp), %ebp # 4-byte Reload - sbbl 52(%esp), %ebp # 4-byte Folded Reload - movl %ebp, 136(%esp) # 4-byte Spill - sbbl 56(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 140(%esp) # 4-byte Spill - movl 132(%esp), %ebx # 4-byte Reload - sbbl 60(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 148(%esp) # 4-byte Spill - movl 124(%esp), %ebx # 4-byte Reload - sbbl 64(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 164(%esp) # 4-byte Spill - movl 116(%esp), %ebx # 4-byte Reload - sbbl 68(%esp), %ebx # 4-byte Folded Reload - movl %ebx, 176(%esp) # 4-byte Spill - movl 100(%esp), %ebx # 4-byte Reload - sbbl $0, %ebx - andl $1, %ebx - jne .LBB260_2 -# BB#1: - movl %esi, 168(%esp) # 4-byte Spill -.LBB260_2: - testb %bl, %bl - movl 200(%esp), %esi # 4-byte Reload - jne .LBB260_4 -# BB#3: - movl %edi, %esi -.LBB260_4: - movl 1456(%esp), %edi - movl %esi, (%edi) - movl 156(%esp), %esi # 4-byte Reload - movl 204(%esp), %ebx # 4-byte Reload - jne .LBB260_6 -# BB#5: - movl %edx, %ebx -.LBB260_6: - movl %ebx, 4(%edi) - movl 144(%esp), %ebx # 4-byte Reload - movl 192(%esp), %edx # 4-byte Reload - jne .LBB260_8 -# BB#7: - movl %ecx, %edx -.LBB260_8: - movl %edx, 8(%edi) - movl 132(%esp), %edx # 4-byte Reload - movl 196(%esp), %ecx # 4-byte Reload - jne .LBB260_10 -# BB#9: - movl %eax, %ecx -.LBB260_10: - movl %ecx, 12(%edi) - movl 124(%esp), %ecx # 4-byte Reload - movl 180(%esp), %eax # 4-byte Reload - jne .LBB260_12 -# BB#11: - movl 88(%esp), %eax # 4-byte Reload -.LBB260_12: - movl %eax, 16(%edi) - movl 188(%esp), %eax # 4-byte Reload - movl 184(%esp), %ebp # 4-byte Reload - jne .LBB260_14 -# BB#13: - movl 92(%esp), %ebp # 4-byte Reload -.LBB260_14: - movl %ebp, 20(%edi) - movl 152(%esp), %ebp # 4-byte Reload - jne .LBB260_16 -# BB#15: - movl 96(%esp), %eax # 4-byte Reload -.LBB260_16: - movl %eax, 24(%edi) - movl 168(%esp), %eax # 4-byte Reload - movl %eax, 28(%edi) - jne .LBB260_18 -# BB#17: - movl 104(%esp), %eax # 4-byte Reload - movl %eax, 172(%esp) # 4-byte Spill -.LBB260_18: - movl 172(%esp), %eax # 4-byte Reload - movl %eax, 32(%edi) - jne .LBB260_20 -# BB#19: - movl 108(%esp), %esi # 4-byte Reload -.LBB260_20: - movl %esi, 36(%edi) - jne .LBB260_22 -# BB#21: - movl 112(%esp), %eax # 4-byte Reload - movl %eax, 160(%esp) # 4-byte Spill -.LBB260_22: - movl 160(%esp), %esi # 4-byte Reload - movl %esi, 40(%edi) - movl 128(%esp), %eax # 4-byte Reload - jne .LBB260_24 -# BB#23: - movl 120(%esp), %ebp # 4-byte Reload -.LBB260_24: - movl %ebp, 44(%edi) - jne .LBB260_26 -# BB#25: - movl 136(%esp), %ebx # 4-byte Reload -.LBB260_26: - movl %ebx, 48(%edi) - jne .LBB260_28 -# BB#27: - movl 140(%esp), %eax # 4-byte Reload -.LBB260_28: - movl %eax, 52(%edi) - jne .LBB260_30 -# BB#29: - movl 148(%esp), %edx # 4-byte Reload -.LBB260_30: - movl %edx, 56(%edi) - movl 116(%esp), %eax # 4-byte Reload - jne .LBB260_32 -# BB#31: - movl 164(%esp), %ecx # 4-byte Reload -.LBB260_32: - movl %ecx, 60(%edi) - jne .LBB260_34 -# BB#33: - movl 176(%esp), %eax # 4-byte Reload -.LBB260_34: - movl %eax, 64(%edi) - addl $1436, %esp # imm = 0x59C - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end260: - .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L - - .globl mcl_fp_addPre17L - .align 16, 0x90 - .type mcl_fp_addPre17L,@function -mcl_fp_addPre17L: # @mcl_fp_addPre17L -# BB#0: - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - movl 20(%esp), %ecx - addl (%ecx), %edx - adcl 4(%ecx), %esi - movl 8(%eax), %ebx - adcl 8(%ecx), %ebx - movl 16(%esp), %edi - movl %edx, (%edi) - movl 12(%ecx), %edx - movl %esi, 4(%edi) - movl 16(%ecx), %esi - adcl 12(%eax), %edx - adcl 16(%eax), %esi - movl %ebx, 8(%edi) - movl 20(%eax), %ebx - movl %edx, 12(%edi) - movl 20(%ecx), %edx - adcl %ebx, %edx - movl 24(%eax), %ebx - movl %esi, 16(%edi) - movl 24(%ecx), %esi - adcl %ebx, %esi - movl 28(%eax), %ebx - movl %edx, 20(%edi) - movl 28(%ecx), %edx - adcl %ebx, %edx - movl 32(%eax), %ebx - movl %esi, 24(%edi) - movl 32(%ecx), %esi - adcl %ebx, %esi - movl 36(%eax), %ebx - movl %edx, 28(%edi) - movl 36(%ecx), %edx - adcl %ebx, %edx - movl 40(%eax), %ebx - movl %esi, 32(%edi) - movl 40(%ecx), %esi - adcl %ebx, %esi - movl 44(%eax), %ebx - movl %edx, 36(%edi) - movl 44(%ecx), %edx - adcl %ebx, %edx - movl 48(%eax), %ebx - movl %esi, 40(%edi) - movl 48(%ecx), %esi - adcl %ebx, %esi - movl 52(%eax), %ebx - movl %edx, 44(%edi) - movl 52(%ecx), %edx - adcl %ebx, %edx - movl 56(%eax), %ebx - movl %esi, 48(%edi) - movl 56(%ecx), %esi - adcl %ebx, %esi - movl 60(%eax), %ebx - movl %edx, 52(%edi) - movl 60(%ecx), %edx - adcl %ebx, %edx - movl %esi, 56(%edi) - movl %edx, 60(%edi) - movl 64(%eax), %eax - movl 64(%ecx), %ecx - adcl %eax, %ecx - movl %ecx, 64(%edi) - sbbl %eax, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - retl -.Lfunc_end261: - .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L - - .globl mcl_fp_subPre17L - .align 16, 0x90 - .type mcl_fp_subPre17L,@function -mcl_fp_subPre17L: # @mcl_fp_subPre17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - movl 24(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edi - xorl %eax, %eax - movl 28(%esp), %edx - subl (%edx), %esi - sbbl 4(%edx), %edi - movl 8(%ecx), %ebp - sbbl 8(%edx), %ebp - movl 20(%esp), %ebx - movl %esi, (%ebx) - movl 12(%ecx), %esi - sbbl 12(%edx), %esi - movl %edi, 4(%ebx) - movl 16(%ecx), %edi - sbbl 16(%edx), %edi - movl %ebp, 8(%ebx) - movl 20(%edx), %ebp - movl %esi, 12(%ebx) - movl 20(%ecx), %esi - sbbl %ebp, %esi - movl 24(%edx), %ebp - movl %edi, 16(%ebx) - movl 24(%ecx), %edi - sbbl %ebp, %edi - movl 28(%edx), %ebp - movl %esi, 20(%ebx) - movl 28(%ecx), %esi - sbbl %ebp, %esi - movl 32(%edx), %ebp - movl %edi, 24(%ebx) - movl 32(%ecx), %edi - sbbl %ebp, %edi - movl 36(%edx), %ebp - movl %esi, 28(%ebx) - movl 36(%ecx), %esi - sbbl %ebp, %esi - movl 40(%edx), %ebp - movl %edi, 32(%ebx) - movl 40(%ecx), %edi - sbbl %ebp, %edi - movl 44(%edx), %ebp - movl %esi, 36(%ebx) - movl 44(%ecx), %esi - sbbl %ebp, %esi - movl 48(%edx), %ebp - movl %edi, 40(%ebx) - movl 48(%ecx), %edi - sbbl %ebp, %edi - movl 52(%edx), %ebp - movl %esi, 44(%ebx) - movl 52(%ecx), %esi - sbbl %ebp, %esi - movl 56(%edx), %ebp - movl %edi, 48(%ebx) - movl 56(%ecx), %edi - sbbl %ebp, %edi - movl 60(%edx), %ebp - movl %esi, 52(%ebx) - movl 60(%ecx), %esi - sbbl %ebp, %esi - movl %edi, 56(%ebx) - movl %esi, 60(%ebx) - movl 64(%edx), %edx - movl 64(%ecx), %ecx - sbbl %edx, %ecx - movl %ecx, 64(%ebx) - sbbl $0, %eax - andl $1, %eax - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end262: - .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L - - .globl mcl_fp_shr1_17L - .align 16, 0x90 - .type mcl_fp_shr1_17L,@function -mcl_fp_shr1_17L: # @mcl_fp_shr1_17L -# BB#0: - pushl %esi - movl 12(%esp), %eax - movl (%eax), %edx - movl 4(%eax), %esi - shrdl $1, %esi, %edx - movl 8(%esp), %ecx - movl %edx, (%ecx) - movl 8(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 4(%ecx) - movl 12(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 8(%ecx) - movl 16(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 12(%ecx) - movl 20(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 16(%ecx) - movl 24(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 20(%ecx) - movl 28(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 24(%ecx) - movl 32(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 28(%ecx) - movl 36(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 32(%ecx) - movl 40(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 36(%ecx) - movl 44(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 40(%ecx) - movl 48(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 44(%ecx) - movl 52(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 48(%ecx) - movl 56(%eax), %edx - shrdl $1, %edx, %esi - movl %esi, 52(%ecx) - movl 60(%eax), %esi - shrdl $1, %esi, %edx - movl %edx, 56(%ecx) - movl 64(%eax), %eax - shrdl $1, %eax, %esi - movl %esi, 60(%ecx) - shrl %eax - movl %eax, 64(%ecx) - popl %esi - retl -.Lfunc_end263: - .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L - - .globl mcl_fp_add17L - .align 16, 0x90 - .type mcl_fp_add17L,@function -mcl_fp_add17L: # @mcl_fp_add17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $60, %esp - movl 88(%esp), %esi - movl (%esi), %ecx - movl 4(%esi), %eax - movl 84(%esp), %edx - addl (%edx), %ecx - movl %ecx, %ebx - adcl 4(%edx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 8(%esi), %eax - adcl 8(%edx), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 12(%edx), %ecx - movl 16(%edx), %edi - adcl 12(%esi), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 4(%esp) # 4-byte Spill - movl 20(%edx), %eax - adcl 20(%esi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 24(%edx), %eax - adcl 24(%esi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 28(%edx), %eax - adcl 28(%esi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 32(%edx), %eax - adcl 32(%esi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 36(%edx), %eax - adcl 36(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 40(%edx), %eax - adcl 40(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 44(%edx), %eax - adcl 44(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 48(%edx), %eax - adcl 48(%esi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 52(%edx), %eax - adcl 52(%esi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 56(%edx), %eax - adcl 56(%esi), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 60(%edx), %ebp - adcl 60(%esi), %ebp - movl 64(%edx), %edx - adcl 64(%esi), %edx - movl 80(%esp), %esi - movl %ebx, (%esi) - movl %ebx, %eax - movl 8(%esp), %ecx # 4-byte Reload - movl %ecx, 4(%esi) - movl 56(%esp), %ebx # 4-byte Reload - movl %ebx, 8(%esi) - movl 52(%esp), %ebx # 4-byte Reload - movl %ebx, 12(%esi) - movl %edi, 16(%esi) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 20(%esi) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 24(%esi) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 28(%esi) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 32(%esi) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 36(%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 40(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 44(%esi) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 48(%esi) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 52(%esi) - movl 12(%esp), %edi # 4-byte Reload - movl %edi, 56(%esi) - movl %ebp, 60(%esi) - movl %edx, 64(%esi) - sbbl %ebx, %ebx - andl $1, %ebx - movl 92(%esp), %edi - subl (%edi), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 56(%esp), %eax # 4-byte Reload - sbbl 8(%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - movl 52(%esp), %eax # 4-byte Reload - sbbl 12(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 4(%esp), %eax # 4-byte Reload - sbbl 16(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload - sbbl 20(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - sbbl 24(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - sbbl 28(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - sbbl 36(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - sbbl 40(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - sbbl 44(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - sbbl 48(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 16(%esp), %eax # 4-byte Reload - sbbl 52(%edi), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 12(%esp), %eax # 4-byte Reload - sbbl 56(%edi), %eax - movl %eax, 12(%esp) # 4-byte Spill - sbbl 60(%edi), %ebp - sbbl 64(%edi), %edx - sbbl $0, %ebx - testb $1, %bl - jne .LBB264_2 -# BB#1: # %nocarry - movl (%esp), %edi # 4-byte Reload - movl %edi, (%esi) - movl 8(%esp), %edi # 4-byte Reload - movl %edi, 4(%esi) - movl 56(%esp), %edi # 4-byte Reload - movl %edi, 8(%esi) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 12(%esi) - movl 4(%esp), %edi # 4-byte Reload - movl %edi, 16(%esi) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 20(%esi) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 24(%esi) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 28(%esi) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 32(%esi) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 36(%esi) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 40(%esi) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 44(%esi) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 48(%esi) - movl 16(%esp), %ecx # 4-byte Reload - movl %ecx, 52(%esi) - movl 12(%esp), %ecx # 4-byte Reload - movl %ecx, 56(%esi) - movl %ebp, 60(%esi) - movl %edx, 64(%esi) -.LBB264_2: # %carry - addl $60, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end264: - .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L - - .globl mcl_fp_addNF17L - .align 16, 0x90 - .type mcl_fp_addNF17L,@function -mcl_fp_addNF17L: # @mcl_fp_addNF17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $132, %esp - movl 160(%esp), %eax - movl (%eax), %ecx - movl 4(%eax), %edx - movl 156(%esp), %esi - addl (%esi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - adcl 4(%esi), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 64(%eax), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 60(%eax), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 56(%eax), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 52(%eax), %ebp - movl 48(%eax), %ecx - movl %ecx, 116(%esp) # 4-byte Spill - movl 44(%eax), %ecx - movl %ecx, 112(%esp) # 4-byte Spill - movl 40(%eax), %ecx - movl %ecx, 128(%esp) # 4-byte Spill - movl 36(%eax), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 32(%eax), %ecx - movl %ecx, 124(%esp) # 4-byte Spill - movl 28(%eax), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 24(%eax), %ecx - movl %ecx, 120(%esp) # 4-byte Spill - movl 20(%eax), %ebx - movl 16(%eax), %edi - movl 12(%eax), %edx - movl 8(%eax), %ecx - adcl 8(%esi), %ecx - movl %ecx, 64(%esp) # 4-byte Spill - adcl 12(%esi), %edx - movl %edx, 68(%esp) # 4-byte Spill - adcl 16(%esi), %edi - movl %edi, 72(%esp) # 4-byte Spill - adcl 20(%esi), %ebx - movl %ebx, 76(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - adcl 24(%esi), %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - adcl 28(%esi), %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - adcl 32(%esi), %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - adcl 36(%esi), %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 128(%esp), %eax # 4-byte Reload - adcl 40(%esi), %eax - movl %eax, 128(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - adcl 44(%esi), %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - adcl 48(%esi), %eax - movl %eax, 116(%esp) # 4-byte Spill - adcl 52(%esi), %ebp - movl %ebp, 80(%esp) # 4-byte Spill - movl 92(%esp), %ebp # 4-byte Reload - adcl 56(%esi), %ebp - movl %ebp, 92(%esp) # 4-byte Spill - movl 96(%esp), %ebp # 4-byte Reload - adcl 60(%esi), %ebp - movl %ebp, 96(%esp) # 4-byte Spill - movl 100(%esp), %ebp # 4-byte Reload - adcl 64(%esi), %ebp - movl %ebp, 100(%esp) # 4-byte Spill - movl 164(%esp), %esi - movl 84(%esp), %eax # 4-byte Reload - subl (%esi), %eax - movl %eax, (%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 4(%esi), %eax - movl %eax, 4(%esp) # 4-byte Spill - sbbl 8(%esi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - sbbl 12(%esi), %edx - sbbl 16(%esi), %edi - movl %edi, 12(%esp) # 4-byte Spill - sbbl 20(%esi), %ebx - movl %ebx, 16(%esp) # 4-byte Spill - movl 120(%esp), %ebx # 4-byte Reload - sbbl 24(%esi), %ebx - movl %ebx, 20(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 28(%esi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 32(%esi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 36(%esi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 128(%esp), %ecx # 4-byte Reload - sbbl 40(%esi), %ecx - movl %ecx, 36(%esp) # 4-byte Spill - movl 112(%esp), %ecx # 4-byte Reload - sbbl 44(%esi), %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 116(%esp), %ecx # 4-byte Reload - sbbl 48(%esi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 52(%esi), %ecx - movl %ecx, 48(%esp) # 4-byte Spill - movl 92(%esp), %eax # 4-byte Reload - movl %eax, %ecx - movl %eax, %ebp - sbbl 56(%esi), %ecx - movl %ecx, 52(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - movl %eax, %ecx - sbbl 60(%esi), %ecx - movl %ecx, 56(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - movl %eax, %ebx - sbbl 64(%esi), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - movl %ebx, %esi - sarl $31, %esi - testl %esi, %esi - movl 84(%esp), %esi # 4-byte Reload - js .LBB265_2 -# BB#1: - movl (%esp), %esi # 4-byte Reload -.LBB265_2: - movl 152(%esp), %ebx - movl %esi, (%ebx) - movl 88(%esp), %eax # 4-byte Reload - js .LBB265_4 -# BB#3: - movl 4(%esp), %eax # 4-byte Reload -.LBB265_4: - movl %eax, 4(%ebx) - movl 108(%esp), %eax # 4-byte Reload - movl 76(%esp), %esi # 4-byte Reload - movl 64(%esp), %edi # 4-byte Reload - js .LBB265_6 -# BB#5: - movl 8(%esp), %edi # 4-byte Reload -.LBB265_6: - movl %edi, 8(%ebx) - movl 116(%esp), %edi # 4-byte Reload - movl 68(%esp), %ecx # 4-byte Reload - js .LBB265_8 -# BB#7: - movl %edx, %ecx -.LBB265_8: - movl %ecx, 12(%ebx) - movl 104(%esp), %ecx # 4-byte Reload - movl 72(%esp), %edx # 4-byte Reload - js .LBB265_10 -# BB#9: - movl 12(%esp), %edx # 4-byte Reload -.LBB265_10: - movl %edx, 16(%ebx) - movl %ebp, %edx - js .LBB265_12 -# BB#11: - movl 16(%esp), %esi # 4-byte Reload -.LBB265_12: - movl %esi, 20(%ebx) - movl 112(%esp), %ebp # 4-byte Reload - js .LBB265_14 -# BB#13: - movl 20(%esp), %esi # 4-byte Reload - movl %esi, 120(%esp) # 4-byte Spill -.LBB265_14: - movl 120(%esp), %esi # 4-byte Reload - movl %esi, 24(%ebx) - js .LBB265_16 -# BB#15: - movl 24(%esp), %ecx # 4-byte Reload -.LBB265_16: - movl %ecx, 28(%ebx) - js .LBB265_18 -# BB#17: - movl 28(%esp), %ecx # 4-byte Reload - movl %ecx, 124(%esp) # 4-byte Spill -.LBB265_18: - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 32(%ebx) - js .LBB265_20 -# BB#19: - movl 32(%esp), %eax # 4-byte Reload -.LBB265_20: - movl %eax, 36(%ebx) - movl 100(%esp), %ecx # 4-byte Reload - js .LBB265_22 -# BB#21: - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 128(%esp) # 4-byte Spill -.LBB265_22: - movl 128(%esp), %eax # 4-byte Reload - movl %eax, 40(%ebx) - js .LBB265_24 -# BB#23: - movl 40(%esp), %ebp # 4-byte Reload -.LBB265_24: - movl %ebp, 44(%ebx) - js .LBB265_26 -# BB#25: - movl 44(%esp), %edi # 4-byte Reload -.LBB265_26: - movl %edi, 48(%ebx) - movl 80(%esp), %eax # 4-byte Reload - js .LBB265_28 -# BB#27: - movl 48(%esp), %eax # 4-byte Reload -.LBB265_28: - movl %eax, 52(%ebx) - js .LBB265_30 -# BB#29: - movl 52(%esp), %edx # 4-byte Reload -.LBB265_30: - movl %edx, 56(%ebx) - movl 96(%esp), %eax # 4-byte Reload - js .LBB265_32 -# BB#31: - movl 56(%esp), %eax # 4-byte Reload -.LBB265_32: - movl %eax, 60(%ebx) - js .LBB265_34 -# BB#33: - movl 60(%esp), %ecx # 4-byte Reload -.LBB265_34: - movl %ecx, 64(%ebx) - addl $132, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end265: - .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L - - .globl mcl_fp_sub17L - .align 16, 0x90 - .type mcl_fp_sub17L,@function -mcl_fp_sub17L: # @mcl_fp_sub17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $64, %esp - movl 88(%esp), %esi - movl (%esi), %eax - movl 4(%esi), %ecx - xorl %ebx, %ebx - movl 92(%esp), %edi - subl (%edi), %eax - movl %eax, 56(%esp) # 4-byte Spill - sbbl 4(%edi), %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 8(%esi), %eax - sbbl 8(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 12(%esi), %eax - sbbl 12(%edi), %eax - movl %eax, 60(%esp) # 4-byte Spill - movl 16(%esi), %eax - sbbl 16(%edi), %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 20(%esi), %eax - sbbl 20(%edi), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 24(%esi), %eax - sbbl 24(%edi), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 28(%esi), %eax - sbbl 28(%edi), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 32(%esi), %eax - sbbl 32(%edi), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 36(%esi), %eax - sbbl 36(%edi), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 40(%esi), %eax - sbbl 40(%edi), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 44(%esi), %eax - sbbl 44(%edi), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 48(%esi), %edx - sbbl 48(%edi), %edx - movl %edx, 12(%esp) # 4-byte Spill - movl 52(%esi), %ecx - sbbl 52(%edi), %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 56(%esi), %eax - sbbl 56(%edi), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 60(%esi), %ebp - sbbl 60(%edi), %ebp - movl 64(%esi), %esi - sbbl 64(%edi), %esi - sbbl $0, %ebx - testb $1, %bl - movl 84(%esp), %ebx - movl 56(%esp), %edi # 4-byte Reload - movl %edi, (%ebx) - movl 16(%esp), %edi # 4-byte Reload - movl %edi, 4(%ebx) - movl 48(%esp), %edi # 4-byte Reload - movl %edi, 8(%ebx) - movl 60(%esp), %edi # 4-byte Reload - movl %edi, 12(%ebx) - movl 52(%esp), %edi # 4-byte Reload - movl %edi, 16(%ebx) - movl 44(%esp), %edi # 4-byte Reload - movl %edi, 20(%ebx) - movl 40(%esp), %edi # 4-byte Reload - movl %edi, 24(%ebx) - movl 36(%esp), %edi # 4-byte Reload - movl %edi, 28(%ebx) - movl 32(%esp), %edi # 4-byte Reload - movl %edi, 32(%ebx) - movl 28(%esp), %edi # 4-byte Reload - movl %edi, 36(%ebx) - movl 24(%esp), %edi # 4-byte Reload - movl %edi, 40(%ebx) - movl 20(%esp), %edi # 4-byte Reload - movl %edi, 44(%ebx) - movl %edx, 48(%ebx) - movl %ecx, 52(%ebx) - movl %eax, 56(%ebx) - movl %ebp, 60(%ebx) - movl %esi, 64(%ebx) - je .LBB266_2 -# BB#1: # %carry - movl %esi, (%esp) # 4-byte Spill - movl 96(%esp), %esi - movl 56(%esp), %ecx # 4-byte Reload - addl (%esi), %ecx - movl %ecx, (%ebx) - movl 16(%esp), %edx # 4-byte Reload - adcl 4(%esi), %edx - movl %edx, 4(%ebx) - movl 48(%esp), %edi # 4-byte Reload - adcl 8(%esi), %edi - movl 12(%esi), %eax - adcl 60(%esp), %eax # 4-byte Folded Reload - movl %edi, 8(%ebx) - movl 16(%esi), %ecx - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%ebx) - movl 20(%esi), %eax - adcl 44(%esp), %eax # 4-byte Folded Reload - movl %ecx, 16(%ebx) - movl 24(%esi), %ecx - adcl 40(%esp), %ecx # 4-byte Folded Reload - movl %eax, 20(%ebx) - movl 28(%esi), %eax - adcl 36(%esp), %eax # 4-byte Folded Reload - movl %ecx, 24(%ebx) - movl 32(%esi), %ecx - adcl 32(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%ebx) - movl 36(%esi), %eax - adcl 28(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%ebx) - movl 40(%esi), %ecx - adcl 24(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%ebx) - movl 44(%esi), %eax - adcl 20(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%ebx) - movl 48(%esi), %ecx - adcl 12(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%ebx) - movl 52(%esi), %eax - adcl 8(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%ebx) - movl 56(%esi), %ecx - adcl 4(%esp), %ecx # 4-byte Folded Reload - movl %eax, 52(%ebx) - movl %ecx, 56(%ebx) - movl 60(%esi), %eax - adcl %ebp, %eax - movl %eax, 60(%ebx) - movl 64(%esi), %eax - adcl (%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%ebx) -.LBB266_2: # %nocarry - addl $64, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end266: - .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L - - .globl mcl_fp_subNF17L - .align 16, 0x90 - .type mcl_fp_subNF17L,@function -mcl_fp_subNF17L: # @mcl_fp_subNF17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $112, %esp - movl 136(%esp), %ecx - movl (%ecx), %esi - movl 4(%ecx), %edx - movl 140(%esp), %edi - subl (%edi), %esi - movl %esi, 68(%esp) # 4-byte Spill - sbbl 4(%edi), %edx - movl %edx, 72(%esp) # 4-byte Spill - movl 64(%ecx), %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 60(%ecx), %edx - movl %edx, 104(%esp) # 4-byte Spill - movl 56(%ecx), %edx - movl %edx, 100(%esp) # 4-byte Spill - movl 52(%ecx), %edx - movl %edx, 96(%esp) # 4-byte Spill - movl 48(%ecx), %edx - movl %edx, 92(%esp) # 4-byte Spill - movl 44(%ecx), %edx - movl %edx, 88(%esp) # 4-byte Spill - movl 40(%ecx), %esi - movl %esi, 108(%esp) # 4-byte Spill - movl 36(%ecx), %edx - movl %edx, 80(%esp) # 4-byte Spill - movl 32(%ecx), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 28(%ecx), %ebp - movl 24(%ecx), %ebx - movl 20(%ecx), %esi - movl 16(%ecx), %edx - movl 12(%ecx), %eax - movl 8(%ecx), %ecx - sbbl 8(%edi), %ecx - movl %ecx, 44(%esp) # 4-byte Spill - sbbl 12(%edi), %eax - movl %eax, 48(%esp) # 4-byte Spill - sbbl 16(%edi), %edx - movl %edx, 52(%esp) # 4-byte Spill - sbbl 20(%edi), %esi - movl %esi, 56(%esp) # 4-byte Spill - sbbl 24(%edi), %ebx - movl %ebx, 60(%esp) # 4-byte Spill - sbbl 28(%edi), %ebp - movl %ebp, 64(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 32(%edi), %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 80(%esp), %ecx # 4-byte Reload - sbbl 36(%edi), %ecx - movl %ecx, 80(%esp) # 4-byte Spill - movl 108(%esp), %ecx # 4-byte Reload - sbbl 40(%edi), %ecx - movl %ecx, 108(%esp) # 4-byte Spill - movl 88(%esp), %ecx # 4-byte Reload - sbbl 44(%edi), %ecx - movl %ecx, 88(%esp) # 4-byte Spill - movl 92(%esp), %ecx # 4-byte Reload - sbbl 48(%edi), %ecx - movl %ecx, 92(%esp) # 4-byte Spill - movl 96(%esp), %ecx # 4-byte Reload - sbbl 52(%edi), %ecx - movl %ecx, 96(%esp) # 4-byte Spill - movl 100(%esp), %ecx # 4-byte Reload - sbbl 56(%edi), %ecx - movl %ecx, 100(%esp) # 4-byte Spill - movl 104(%esp), %ecx # 4-byte Reload - sbbl 60(%edi), %ecx - movl %ecx, 104(%esp) # 4-byte Spill - movl 84(%esp), %ecx # 4-byte Reload - sbbl 64(%edi), %ecx - movl %ecx, 84(%esp) # 4-byte Spill - movl %ecx, %ebx - sarl $31, %ebx - movl %ebx, %edx - shldl $1, %ecx, %edx - movl 144(%esp), %eax - movl 28(%eax), %ecx - andl %edx, %ecx - movl %ecx, 28(%esp) # 4-byte Spill - movl 12(%eax), %ecx - andl %edx, %ecx - movl %ecx, 16(%esp) # 4-byte Spill - movl 4(%eax), %ecx - andl %edx, %ecx - movl %ecx, %esi - andl (%eax), %edx - movl 64(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 40(%esp) # 4-byte Spill - movl 60(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 36(%esp) # 4-byte Spill - roll %ebx - movl 56(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 32(%esp) # 4-byte Spill - movl 52(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 24(%esp) # 4-byte Spill - movl 48(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 20(%esp) # 4-byte Spill - movl 44(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 12(%esp) # 4-byte Spill - movl 40(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 8(%esp) # 4-byte Spill - movl 36(%eax), %ecx - andl %ebx, %ecx - movl %ecx, 4(%esp) # 4-byte Spill - movl 32(%eax), %ecx - andl %ebx, %ecx - movl %ecx, (%esp) # 4-byte Spill - movl 24(%eax), %ebp - andl %ebx, %ebp - movl 20(%eax), %edi - andl %ebx, %edi - movl 16(%eax), %ecx - andl %ebx, %ecx - andl 8(%eax), %ebx - addl 68(%esp), %edx # 4-byte Folded Reload - movl %esi, %eax - adcl 72(%esp), %eax # 4-byte Folded Reload - movl 132(%esp), %esi - movl %edx, (%esi) - adcl 44(%esp), %ebx # 4-byte Folded Reload - movl %eax, 4(%esi) - movl 16(%esp), %eax # 4-byte Reload - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %ebx, 8(%esi) - adcl 52(%esp), %ecx # 4-byte Folded Reload - movl %eax, 12(%esi) - adcl 56(%esp), %edi # 4-byte Folded Reload - movl %ecx, 16(%esi) - adcl 60(%esp), %ebp # 4-byte Folded Reload - movl %edi, 20(%esi) - movl 28(%esp), %eax # 4-byte Reload - adcl 64(%esp), %eax # 4-byte Folded Reload - movl %ebp, 24(%esi) - movl (%esp), %ecx # 4-byte Reload - adcl 76(%esp), %ecx # 4-byte Folded Reload - movl %eax, 28(%esi) - movl 4(%esp), %eax # 4-byte Reload - adcl 80(%esp), %eax # 4-byte Folded Reload - movl %ecx, 32(%esi) - movl 8(%esp), %ecx # 4-byte Reload - adcl 108(%esp), %ecx # 4-byte Folded Reload - movl %eax, 36(%esi) - movl 12(%esp), %eax # 4-byte Reload - adcl 88(%esp), %eax # 4-byte Folded Reload - movl %ecx, 40(%esi) - movl 20(%esp), %ecx # 4-byte Reload - adcl 92(%esp), %ecx # 4-byte Folded Reload - movl %eax, 44(%esi) - movl 24(%esp), %eax # 4-byte Reload - adcl 96(%esp), %eax # 4-byte Folded Reload - movl %ecx, 48(%esi) - movl 32(%esp), %ecx # 4-byte Reload - adcl 100(%esp), %ecx # 4-byte Folded Reload - movl %eax, 52(%esi) - movl 36(%esp), %eax # 4-byte Reload - adcl 104(%esp), %eax # 4-byte Folded Reload - movl %ecx, 56(%esi) - movl %eax, 60(%esi) - movl 40(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %eax, 64(%esi) - addl $112, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end267: - .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L - - .globl mcl_fpDbl_add17L - .align 16, 0x90 - .type mcl_fpDbl_add17L,@function -mcl_fpDbl_add17L: # @mcl_fpDbl_add17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $128, %esp - movl 156(%esp), %ecx - movl 152(%esp), %edx - movl 12(%edx), %edi - movl 16(%edx), %esi - movl 8(%ecx), %ebx - movl (%ecx), %ebp - addl (%edx), %ebp - movl 148(%esp), %eax - movl %ebp, (%eax) - movl 4(%ecx), %ebp - adcl 4(%edx), %ebp - adcl 8(%edx), %ebx - adcl 12(%ecx), %edi - adcl 16(%ecx), %esi - movl %ebp, 4(%eax) - movl 76(%ecx), %ebp - movl %ebx, 8(%eax) - movl 20(%ecx), %ebx - movl %edi, 12(%eax) - movl 20(%edx), %edi - adcl %ebx, %edi - movl 24(%ecx), %ebx - movl %esi, 16(%eax) - movl 24(%edx), %esi - adcl %ebx, %esi - movl 28(%ecx), %ebx - movl %edi, 20(%eax) - movl 28(%edx), %edi - adcl %ebx, %edi - movl 32(%ecx), %ebx - movl %esi, 24(%eax) - movl 32(%edx), %esi - adcl %ebx, %esi - movl 36(%ecx), %ebx - movl %edi, 28(%eax) - movl 36(%edx), %edi - adcl %ebx, %edi - movl 40(%ecx), %ebx - movl %esi, 32(%eax) - movl 40(%edx), %esi - adcl %ebx, %esi - movl 44(%ecx), %ebx - movl %edi, 36(%eax) - movl 44(%edx), %edi - adcl %ebx, %edi - movl 48(%ecx), %ebx - movl %esi, 40(%eax) - movl 48(%edx), %esi - adcl %ebx, %esi - movl 52(%ecx), %ebx - movl %edi, 44(%eax) - movl 52(%edx), %edi - adcl %ebx, %edi - movl 56(%ecx), %ebx - movl %esi, 48(%eax) - movl 56(%edx), %esi - adcl %ebx, %esi - movl 60(%ecx), %ebx - movl %edi, 52(%eax) - movl 60(%edx), %edi - adcl %ebx, %edi - movl 64(%ecx), %ebx - movl %esi, 56(%eax) - movl 64(%edx), %esi - adcl %ebx, %esi - movl 68(%ecx), %ebx - movl %edi, 60(%eax) - movl 68(%edx), %edi - adcl %ebx, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 72(%ecx), %edi - movl %esi, 64(%eax) - movl 72(%edx), %eax - adcl %edi, %eax - movl %eax, 96(%esp) # 4-byte Spill - movl 76(%edx), %eax - adcl %ebp, %eax - movl %eax, 100(%esp) # 4-byte Spill - movl 80(%ecx), %esi - movl 80(%edx), %eax - adcl %esi, %eax - movl %eax, 104(%esp) # 4-byte Spill - movl 84(%ecx), %esi - movl 84(%edx), %eax - adcl %esi, %eax - movl %eax, 108(%esp) # 4-byte Spill - movl 88(%ecx), %esi - movl 88(%edx), %eax - adcl %esi, %eax - movl %eax, 112(%esp) # 4-byte Spill - movl 92(%ecx), %esi - movl 92(%edx), %eax - adcl %esi, %eax - movl %eax, 116(%esp) # 4-byte Spill - movl 96(%ecx), %esi - movl 96(%edx), %eax - adcl %esi, %eax - movl %eax, 120(%esp) # 4-byte Spill - movl 100(%ecx), %esi - movl 100(%edx), %eax - adcl %esi, %eax - movl %eax, 124(%esp) # 4-byte Spill - movl 104(%ecx), %esi - movl 104(%edx), %eax - adcl %esi, %eax - movl %eax, 76(%esp) # 4-byte Spill - movl 108(%ecx), %esi - movl 108(%edx), %eax - adcl %esi, %eax - movl %eax, 80(%esp) # 4-byte Spill - movl 112(%ecx), %esi - movl 112(%edx), %eax - adcl %esi, %eax - movl %eax, 84(%esp) # 4-byte Spill - movl 116(%ecx), %esi - movl 116(%edx), %eax - adcl %esi, %eax - movl %eax, 88(%esp) # 4-byte Spill - movl 120(%ecx), %edi - movl 120(%edx), %esi - adcl %edi, %esi - movl %esi, 64(%esp) # 4-byte Spill - movl 124(%ecx), %ebx - movl 124(%edx), %edi - adcl %ebx, %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 128(%ecx), %ebx - movl 128(%edx), %ebp - adcl %ebx, %ebp - movl %ebp, 72(%esp) # 4-byte Spill - movl 132(%ecx), %ecx - movl 132(%edx), %edx - adcl %ecx, %edx - sbbl %ecx, %ecx - andl $1, %ecx - movl 160(%esp), %ebx - movl 92(%esp), %eax # 4-byte Reload - subl (%ebx), %eax - movl %eax, 48(%esp) # 4-byte Spill - movl 96(%esp), %eax # 4-byte Reload - sbbl 4(%ebx), %eax - movl %eax, 44(%esp) # 4-byte Spill - movl 100(%esp), %eax # 4-byte Reload - sbbl 8(%ebx), %eax - movl %eax, 40(%esp) # 4-byte Spill - movl 104(%esp), %eax # 4-byte Reload - sbbl 12(%ebx), %eax - movl %eax, 36(%esp) # 4-byte Spill - movl 108(%esp), %eax # 4-byte Reload - sbbl 16(%ebx), %eax - movl %eax, 32(%esp) # 4-byte Spill - movl 112(%esp), %eax # 4-byte Reload - sbbl 20(%ebx), %eax - movl %eax, 28(%esp) # 4-byte Spill - movl 116(%esp), %eax # 4-byte Reload - sbbl 24(%ebx), %eax - movl %eax, 24(%esp) # 4-byte Spill - movl 120(%esp), %eax # 4-byte Reload - sbbl 28(%ebx), %eax - movl %eax, 20(%esp) # 4-byte Spill - movl 124(%esp), %eax # 4-byte Reload - sbbl 32(%ebx), %eax - movl %eax, 16(%esp) # 4-byte Spill - movl 76(%esp), %eax # 4-byte Reload - sbbl 36(%ebx), %eax - movl %eax, 12(%esp) # 4-byte Spill - movl 80(%esp), %eax # 4-byte Reload - sbbl 40(%ebx), %eax - movl %eax, 8(%esp) # 4-byte Spill - movl 84(%esp), %eax # 4-byte Reload - sbbl 44(%ebx), %eax - movl %eax, 4(%esp) # 4-byte Spill - movl 88(%esp), %eax # 4-byte Reload - sbbl 48(%ebx), %eax - movl %eax, (%esp) # 4-byte Spill - sbbl 52(%ebx), %esi - movl %esi, 52(%esp) # 4-byte Spill - sbbl 56(%ebx), %edi - movl %edi, 56(%esp) # 4-byte Spill - sbbl 60(%ebx), %ebp - movl %ebp, 60(%esp) # 4-byte Spill - movl %edx, %ebp - sbbl 64(%ebx), %ebp - sbbl $0, %ecx - andl $1, %ecx - jne .LBB268_2 -# BB#1: - movl %ebp, %edx -.LBB268_2: - testb %cl, %cl - movl 92(%esp), %eax # 4-byte Reload - movl 88(%esp), %esi # 4-byte Reload - movl 84(%esp), %edi # 4-byte Reload - movl 80(%esp), %ebx # 4-byte Reload - movl 76(%esp), %ebp # 4-byte Reload - jne .LBB268_4 -# BB#3: - movl (%esp), %esi # 4-byte Reload - movl 4(%esp), %edi # 4-byte Reload - movl 8(%esp), %ebx # 4-byte Reload - movl 12(%esp), %ebp # 4-byte Reload - movl 16(%esp), %eax # 4-byte Reload - movl %eax, 124(%esp) # 4-byte Spill - movl 20(%esp), %eax # 4-byte Reload - movl %eax, 120(%esp) # 4-byte Spill - movl 24(%esp), %eax # 4-byte Reload - movl %eax, 116(%esp) # 4-byte Spill - movl 28(%esp), %eax # 4-byte Reload - movl %eax, 112(%esp) # 4-byte Spill - movl 32(%esp), %eax # 4-byte Reload - movl %eax, 108(%esp) # 4-byte Spill - movl 36(%esp), %eax # 4-byte Reload - movl %eax, 104(%esp) # 4-byte Spill - movl 40(%esp), %eax # 4-byte Reload - movl %eax, 100(%esp) # 4-byte Spill - movl 44(%esp), %eax # 4-byte Reload - movl %eax, 96(%esp) # 4-byte Spill - movl 48(%esp), %eax # 4-byte Reload -.LBB268_4: - movl 148(%esp), %ecx - movl %eax, 68(%ecx) - movl %ecx, %eax - movl 96(%esp), %ecx # 4-byte Reload - movl %ecx, 72(%eax) - movl 100(%esp), %ecx # 4-byte Reload - movl %ecx, 76(%eax) - movl 104(%esp), %ecx # 4-byte Reload - movl %ecx, 80(%eax) - movl 108(%esp), %ecx # 4-byte Reload - movl %ecx, 84(%eax) - movl 112(%esp), %ecx # 4-byte Reload - movl %ecx, 88(%eax) - movl 116(%esp), %ecx # 4-byte Reload - movl %ecx, 92(%eax) - movl 120(%esp), %ecx # 4-byte Reload - movl %ecx, 96(%eax) - movl 124(%esp), %ecx # 4-byte Reload - movl %ecx, 100(%eax) - movl %ebp, 104(%eax) - movl %ebx, 108(%eax) - movl %edi, 112(%eax) - movl %esi, 116(%eax) - movl 72(%esp), %ecx # 4-byte Reload - movl 64(%esp), %esi # 4-byte Reload - jne .LBB268_6 -# BB#5: - movl 52(%esp), %esi # 4-byte Reload -.LBB268_6: - movl %esi, 120(%eax) - movl 68(%esp), %esi # 4-byte Reload - jne .LBB268_8 -# BB#7: - movl 56(%esp), %esi # 4-byte Reload -.LBB268_8: - movl %esi, 124(%eax) - jne .LBB268_10 -# BB#9: - movl 60(%esp), %ecx # 4-byte Reload -.LBB268_10: - movl %ecx, 128(%eax) - movl %edx, 132(%eax) - addl $128, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end268: - .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L - - .globl mcl_fpDbl_sub17L - .align 16, 0x90 - .type mcl_fpDbl_sub17L,@function -mcl_fpDbl_sub17L: # @mcl_fpDbl_sub17L -# BB#0: - pushl %ebp - pushl %ebx - pushl %edi - pushl %esi - subl $116, %esp - movl 140(%esp), %edx - movl (%edx), %eax - movl 4(%edx), %edi - movl 144(%esp), %esi - subl (%esi), %eax - sbbl 4(%esi), %edi - movl 8(%edx), %ebx - sbbl 8(%esi), %ebx - movl 136(%esp), %ecx - movl %eax, (%ecx) - movl 12(%edx), %eax - sbbl 12(%esi), %eax - movl %edi, 4(%ecx) - movl 16(%edx), %edi - sbbl 16(%esi), %edi - movl %ebx, 8(%ecx) - movl 20(%esi), %ebx - movl %eax, 12(%ecx) - movl 20(%edx), %eax - sbbl %ebx, %eax - movl 24(%esi), %ebx - movl %edi, 16(%ecx) - movl 24(%edx), %edi - sbbl %ebx, %edi - movl 28(%esi), %ebx - movl %eax, 20(%ecx) - movl 28(%edx), %eax - sbbl %ebx, %eax - movl 32(%esi), %ebx - movl %edi, 24(%ecx) - movl 32(%edx), %edi - sbbl %ebx, %edi - movl 36(%esi), %ebx - movl %eax, 28(%ecx) - movl 36(%edx), %eax - sbbl %ebx, %eax - movl 40(%esi), %ebx - movl %edi, 32(%ecx) - movl 40(%edx), %edi - sbbl %ebx, %edi - movl 44(%esi), %ebx - movl %eax, 36(%ecx) - movl 44(%edx), %eax - sbbl %ebx, %eax - movl 48(%esi), %ebx - movl %edi, 40(%ecx) - movl 48(%edx), %edi - sbbl %ebx, %edi - movl 52(%esi), %ebx - movl %eax, 44(%ecx) - movl 52(%edx), %eax - sbbl %ebx, %eax - movl 56(%esi), %ebx - movl %edi, 48(%ecx) - movl 56(%edx), %edi - sbbl %ebx, %edi - movl 60(%esi), %ebx - movl %eax, 52(%ecx) - movl 60(%edx), %eax - sbbl %ebx, %eax - movl 64(%esi), %ebx - movl %edi, 56(%ecx) - movl 64(%edx), %edi - sbbl %ebx, %edi - movl 68(%esi), %ebx - movl %eax, 60(%ecx) - movl 68(%edx), %eax - sbbl %ebx, %eax - movl %eax, 52(%esp) # 4-byte Spill - movl 72(%esi), %eax - movl %edi, 64(%ecx) - movl 72(%edx), %edi - sbbl %eax, %edi - movl %edi, 44(%esp) # 4-byte Spill - movl 76(%esi), %eax - movl 76(%edx), %edi - sbbl %eax, %edi - movl %edi, 48(%esp) # 4-byte Spill - movl 80(%esi), %eax - movl 80(%edx), %edi - sbbl %eax, %edi - movl %edi, 56(%esp) # 4-byte Spill - movl 84(%esi), %eax - movl 84(%edx), %edi - sbbl %eax, %edi - movl %edi, 60(%esp) # 4-byte Spill - movl 88(%esi), %eax - movl 88(%edx), %edi - sbbl %eax, %edi - movl %edi, 64(%esp) # 4-byte Spill - movl 92(%esi), %eax - movl 92(%edx), %edi - sbbl %eax, %edi - movl %edi, 68(%esp) # 4-byte Spill - movl 96(%esi), %eax - movl 96(%edx), %edi - sbbl %eax, %edi - movl %edi, 72(%esp) # 4-byte Spill - movl 100(%esi), %eax - movl 100(%edx), %edi - sbbl %eax, %edi - movl %edi, 80(%esp) # 4-byte Spill - movl 104(%esi), %eax - movl 104(%edx), %edi - sbbl %eax, %edi - movl %edi, 84(%esp) # 4-byte Spill - movl 108(%esi), %eax - movl 108(%edx), %edi - sbbl %eax, %edi - movl %edi, 88(%esp) # 4-byte Spill - movl 112(%esi), %eax - movl 112(%edx), %edi - sbbl %eax, %edi - movl %edi, 92(%esp) # 4-byte Spill - movl 116(%esi), %eax - movl 116(%edx), %edi - sbbl %eax, %edi - movl %edi, 96(%esp) # 4-byte Spill - movl 120(%esi), %eax - movl 120(%edx), %edi - sbbl %eax, %edi - movl %edi, 100(%esp) # 4-byte Spill - movl 124(%esi), %eax - movl 124(%edx), %edi - sbbl %eax, %edi - movl %edi, 104(%esp) # 4-byte Spill - movl 128(%esi), %eax - movl 128(%edx), %edi - sbbl %eax, %edi - movl %edi, 108(%esp) # 4-byte Spill - movl 132(%esi), %eax - movl 132(%edx), %edx - sbbl %eax, %edx - movl %edx, 112(%esp) # 4-byte Spill - movl $0, %eax - sbbl $0, %eax - andl $1, %eax - movl 148(%esp), %ebp - jne .LBB269_1 -# BB#2: - movl $0, 76(%esp) # 4-byte Folded Spill - jmp .LBB269_3 -.LBB269_1: - movl 64(%ebp), %edx - movl %edx, 76(%esp) # 4-byte Spill -.LBB269_3: - testb %al, %al - jne .LBB269_4 -# BB#5: - movl $0, 28(%esp) # 4-byte Folded Spill - movl $0, %esi - jmp .LBB269_6 -.LBB269_4: - movl (%ebp), %esi - movl 4(%ebp), %eax - movl %eax, 28(%esp) # 4-byte Spill -.LBB269_6: - jne .LBB269_7 -# BB#8: - movl $0, 40(%esp) # 4-byte Folded Spill - jmp .LBB269_9 -.LBB269_7: - movl 60(%ebp), %eax - movl %eax, 40(%esp) # 4-byte Spill -.LBB269_9: - jne .LBB269_10 -# BB#11: - movl $0, 36(%esp) # 4-byte Folded Spill - jmp .LBB269_12 -.LBB269_10: - movl 56(%ebp), %eax - movl %eax, 36(%esp) # 4-byte Spill -.LBB269_12: - jne .LBB269_13 -# BB#14: - movl $0, 32(%esp) # 4-byte Folded Spill - jmp .LBB269_15 -.LBB269_13: - movl 52(%ebp), %eax - movl %eax, 32(%esp) # 4-byte Spill -.LBB269_15: - jne .LBB269_16 -# BB#17: - movl $0, 24(%esp) # 4-byte Folded Spill - jmp .LBB269_18 -.LBB269_16: - movl 48(%ebp), %eax - movl %eax, 24(%esp) # 4-byte Spill -.LBB269_18: - jne .LBB269_19 -# BB#20: - movl $0, 20(%esp) # 4-byte Folded Spill - jmp .LBB269_21 -.LBB269_19: - movl 44(%ebp), %eax - movl %eax, 20(%esp) # 4-byte Spill -.LBB269_21: - jne .LBB269_22 -# BB#23: - movl $0, 16(%esp) # 4-byte Folded Spill - jmp .LBB269_24 -.LBB269_22: - movl 40(%ebp), %eax - movl %eax, 16(%esp) # 4-byte Spill -.LBB269_24: - jne .LBB269_25 -# BB#26: - movl $0, 12(%esp) # 4-byte Folded Spill - jmp .LBB269_27 -.LBB269_25: - movl 36(%ebp), %eax - movl %eax, 12(%esp) # 4-byte Spill -.LBB269_27: - jne .LBB269_28 -# BB#29: - movl $0, 8(%esp) # 4-byte Folded Spill - jmp .LBB269_30 -.LBB269_28: - movl 32(%ebp), %eax - movl %eax, 8(%esp) # 4-byte Spill -.LBB269_30: - jne .LBB269_31 -# BB#32: - movl $0, 4(%esp) # 4-byte Folded Spill - jmp .LBB269_33 -.LBB269_31: - movl 28(%ebp), %eax - movl %eax, 4(%esp) # 4-byte Spill -.LBB269_33: - jne .LBB269_34 -# BB#35: - movl $0, (%esp) # 4-byte Folded Spill - jmp .LBB269_36 -.LBB269_34: - movl 24(%ebp), %eax - movl %eax, (%esp) # 4-byte Spill -.LBB269_36: - jne .LBB269_37 -# BB#38: - movl $0, %ebx - jmp .LBB269_39 -.LBB269_37: - movl 20(%ebp), %ebx -.LBB269_39: - jne .LBB269_40 -# BB#41: - movl $0, %edi - jmp .LBB269_42 -.LBB269_40: - movl 16(%ebp), %edi -.LBB269_42: - jne .LBB269_43 -# BB#44: - movl %ebp, %eax - movl $0, %ebp - jmp .LBB269_45 -.LBB269_43: - movl %ebp, %eax - movl 12(%eax), %ebp -.LBB269_45: - jne .LBB269_46 -# BB#47: - xorl %eax, %eax - jmp .LBB269_48 -.LBB269_46: - movl 8(%eax), %eax -.LBB269_48: - addl 52(%esp), %esi # 4-byte Folded Reload - movl 28(%esp), %edx # 4-byte Reload - adcl 44(%esp), %edx # 4-byte Folded Reload - movl %esi, 68(%ecx) - adcl 48(%esp), %eax # 4-byte Folded Reload - movl %edx, 72(%ecx) - adcl 56(%esp), %ebp # 4-byte Folded Reload - movl %eax, 76(%ecx) - adcl 60(%esp), %edi # 4-byte Folded Reload - movl %ebp, 80(%ecx) - adcl 64(%esp), %ebx # 4-byte Folded Reload - movl %edi, 84(%ecx) - movl (%esp), %edx # 4-byte Reload - adcl 68(%esp), %edx # 4-byte Folded Reload - movl %ebx, 88(%ecx) - movl 4(%esp), %eax # 4-byte Reload - adcl 72(%esp), %eax # 4-byte Folded Reload - movl %edx, 92(%ecx) - movl 8(%esp), %edx # 4-byte Reload - adcl 80(%esp), %edx # 4-byte Folded Reload - movl %eax, 96(%ecx) - movl 12(%esp), %eax # 4-byte Reload - adcl 84(%esp), %eax # 4-byte Folded Reload - movl %edx, 100(%ecx) - movl 16(%esp), %edx # 4-byte Reload - adcl 88(%esp), %edx # 4-byte Folded Reload - movl %eax, 104(%ecx) - movl 20(%esp), %eax # 4-byte Reload - adcl 92(%esp), %eax # 4-byte Folded Reload - movl %edx, 108(%ecx) - movl 24(%esp), %edx # 4-byte Reload - adcl 96(%esp), %edx # 4-byte Folded Reload - movl %eax, 112(%ecx) - movl 32(%esp), %eax # 4-byte Reload - adcl 100(%esp), %eax # 4-byte Folded Reload - movl %edx, 116(%ecx) - movl 36(%esp), %edx # 4-byte Reload - adcl 104(%esp), %edx # 4-byte Folded Reload - movl %eax, 120(%ecx) - movl 40(%esp), %eax # 4-byte Reload - adcl 108(%esp), %eax # 4-byte Folded Reload - movl %edx, 124(%ecx) - movl %eax, 128(%ecx) - movl 76(%esp), %eax # 4-byte Reload - adcl 112(%esp), %eax # 4-byte Folded Reload - movl %eax, 132(%ecx) - addl $116, %esp - popl %esi - popl %edi - popl %ebx - popl %ebp - retl -.Lfunc_end269: - .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L - - - .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/bn_c256.cpp b/vendor/github.com/dexon-foundation/mcl/src/bn_c256.cpp deleted file mode 100644 index 2f975a287..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/bn_c256.cpp +++ /dev/null @@ -1,6 +0,0 @@ -/* - implementation of mclBn_* apis -*/ -#define MCLBN_FP_UNIT_SIZE 4 -#include "mcl/impl/bn_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/src/bn_c384.cpp b/vendor/github.com/dexon-foundation/mcl/src/bn_c384.cpp deleted file mode 100644 index 934a078ae..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/bn_c384.cpp +++ /dev/null @@ -1,7 +0,0 @@ -/* - implementation of mclBn_* apis -*/ -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 6 -#include "mcl/impl/bn_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/src/bn_c384_256.cpp b/vendor/github.com/dexon-foundation/mcl/src/bn_c384_256.cpp deleted file mode 100644 index ecd968efd..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/bn_c384_256.cpp +++ /dev/null @@ -1,7 +0,0 @@ -/* - implementation of mclBn_* apis -*/ -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 4 -#include "mcl/impl/bn_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/src/bn_c512.cpp b/vendor/github.com/dexon-foundation/mcl/src/bn_c512.cpp deleted file mode 100644 index 7c1029522..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/bn_c512.cpp +++ /dev/null @@ -1,6 +0,0 @@ -/* - implementation of mclBn_* apis -*/ -#define MCLBN_FP_UNIT_SIZE 8 -#include "mcl/impl/bn_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/src/bn_c_impl.hpp b/vendor/github.com/dexon-foundation/mcl/src/bn_c_impl.hpp deleted file mode 100644 index bb0b4ba8e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/bn_c_impl.hpp +++ /dev/null @@ -1,517 +0,0 @@ -#define MCLBN_DLL_EXPORT -#include - -#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 -#include -#else - #error "not supported size" -#endif -#include -using namespace mcl::bn; - -static Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } -static const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } - -static G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } -static const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } - -static G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } -static const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } - -static Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } -static const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } - -static Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } -static const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } - -template -int setStr(T *x, const char *buf, mclSize bufSize, int ioMode) -{ - size_t n = cast(x)->deserialize(buf, bufSize, ioMode); - return n > 0 ? 0 : -1; -} - -#ifdef __EMSCRIPTEN__ -// use these functions forcibly -extern "C" MCLBN_DLL_API void *mclBnMalloc(size_t n) -{ - return malloc(n); -} -extern "C" MCLBN_DLL_API void mclBnFree(void *p) -{ - free(p); -} -#endif - -int mclBn_init(int curve, int compiledTimeVar) -{ - if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { - return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); - } - const mcl::CurveParam& cp = mcl::getCurveParam(curve); - bool b; - initPairing(&b, cp); - return b ? 0 : -1; -} - -int mclBn_getOpUnitSize() -{ - return (int)Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); -} - -int mclBn_getG1ByteSize() -{ - return (int)Fp::getByteSize(); -} - -int mclBn_getFrByteSize() -{ - return (int)Fr::getByteSize(); -} - -mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize) -{ - return Fr::getModulo(buf, maxBufSize); -} - -mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize) -{ - return Fp::getModulo(buf, maxBufSize); -} -//////////////////////////////////////////////// -// set zero -void mclBnFr_clear(mclBnFr *x) -{ - cast(x)->clear(); -} - -// set x to y -void mclBnFr_setInt(mclBnFr *y, mclInt x) -{ - *cast(y) = x; -} -void mclBnFr_setInt32(mclBnFr *y, int x) -{ - *cast(y) = x; -} - -int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize) -{ - cast(x)->setArrayMask((const char *)buf, bufSize); - return 0; -} -mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} -// return 1 if true -int mclBnFr_isValid(const mclBnFr *x) -{ - return cast(x)->isValid(); -} -int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y) -{ - return *cast(x) == *cast(y); -} -int mclBnFr_isZero(const mclBnFr *x) -{ - return cast(x)->isZero(); -} -int mclBnFr_isOne(const mclBnFr *x) -{ - return cast(x)->isOne(); -} - -#ifndef MCL_DONT_USE_CSRPNG -int mclBnFr_setByCSPRNG(mclBnFr *x) -{ - cast(x)->setByCSPRNG(); - return 0; -} -#endif - -// hash(buf) and set x -int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize) -{ - cast(x)->setHashOf(buf, bufSize); - return 0; -} - -mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} -mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnFr_neg(mclBnFr *y, const mclBnFr *x) -{ - Fr::neg(*cast(y), *cast(x)); -} -void mclBnFr_inv(mclBnFr *y, const mclBnFr *x) -{ - Fr::inv(*cast(y), *cast(x)); -} -void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x) -{ - Fr::sqr(*cast(y), *cast(x)); -} -void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::div(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnG1_clear(mclBnG1 *x) -{ - cast(x)->clear(); -} - -int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnG1_isValid(const mclBnG1 *x) -{ - return cast(x)->isValid(); -} -int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y) -{ - return *cast(x) == *cast(y); -} -int mclBnG1_isZero(const mclBnG1 *x) -{ - return cast(x)->isZero(); -} -int mclBnG1_isValidOrder(const mclBnG1 *x) -{ - return cast(x)->isValidOrder(); -} - -int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize) -{ - hashAndMapToG1(*cast(x), buf, bufSize); - return 0; -} - -mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x) -{ - G1::neg(*cast(y), *cast(x)); -} -void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x) -{ - G1::dbl(*cast(y), *cast(x)); -} -void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x) -{ - G1::normalize(*cast(y), *cast(x)); -} -void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) -{ - G1::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) -{ - G1::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) -{ - G1::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) -{ - G1::mulCT(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnG2_clear(mclBnG2 *x) -{ - cast(x)->clear(); -} - -int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnG2_isValid(const mclBnG2 *x) -{ - return cast(x)->isValid(); -} -int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y) -{ - return *cast(x) == *cast(y); -} -int mclBnG2_isZero(const mclBnG2 *x) -{ - return cast(x)->isZero(); -} -int mclBnG2_isValidOrder(const mclBnG2 *x) -{ - return cast(x)->isValidOrder(); -} - -int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize) -{ - hashAndMapToG2(*cast(x), buf, bufSize); - return 0; -} - -mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x) -{ - G2::neg(*cast(y), *cast(x)); -} -void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x) -{ - G2::dbl(*cast(y), *cast(x)); -} -void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x) -{ - G2::normalize(*cast(y), *cast(x)); -} -void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) -{ - G2::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) -{ - G2::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) -{ - G2::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) -{ - G2::mulCT(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnGT_clear(mclBnGT *x) -{ - cast(x)->clear(); -} -void mclBnGT_setInt(mclBnGT *y, mclInt x) -{ - cast(y)->clear(); - *(cast(y)->getFp0()) = x; -} -void mclBnGT_setInt32(mclBnGT *y, int x) -{ - cast(y)->clear(); - *(cast(y)->getFp0()) = x; -} - -int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y) -{ - return *cast(x) == *cast(y); -} -int mclBnGT_isZero(const mclBnGT *x) -{ - return cast(x)->isZero(); -} -int mclBnGT_isOne(const mclBnGT *x) -{ - return cast(x)->isOne(); -} - -mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnGT_neg(mclBnGT *y, const mclBnGT *x) -{ - Fp12::neg(*cast(y), *cast(x)); -} -void mclBnGT_inv(mclBnGT *y, const mclBnGT *x) -{ - Fp12::inv(*cast(y), *cast(x)); -} -void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x) -{ - Fp12::sqr(*cast(y), *cast(x)); -} -void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::div(*cast(z),*cast(x), *cast(y)); -} - -void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) -{ - Fp12::pow(*cast(z), *cast(x), *cast(y)); -} -void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) -{ - Fp12::powGeneric(*cast(z), *cast(x), *cast(y)); -} - -void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) -{ - pairing(*cast(z), *cast(x), *cast(y)); -} -void mclBn_finalExp(mclBnGT *y, const mclBnGT *x) -{ - finalExp(*cast(y), *cast(x)); -} -void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) -{ - millerLoop(*cast(z), *cast(x), *cast(y)); -} -int mclBn_getUint64NumToPrecompute(void) -{ - return int(BN::param.precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t)); -} - -void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q) -{ - precomputeG2(cast(Qbuf), *cast(Q)); -} - -void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf) -{ - precomputedMillerLoop(*cast(f), *cast(P), cast(Qbuf)); -} - -void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf) -{ - precomputedMillerLoop2(*cast(f), *cast(P1), cast(Q1buf), *cast(P2), cast(Q2buf)); -} - -void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf) -{ - precomputedMillerLoop2mixed(*cast(f), *cast(P1), *cast(Q1), *cast(P2), cast(Q2buf)); -} - -int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} -int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} -int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} - -void mclBn_verifyOrderG1(int doVerify) -{ - verifyOrderG1(doVerify != 0); -} - -void mclBn_verifyOrderG2(int doVerify) -{ - verifyOrderG2(doVerify != 0); -} - diff --git a/vendor/github.com/dexon-foundation/mcl/src/ecdsa_c.cpp b/vendor/github.com/dexon-foundation/mcl/src/ecdsa_c.cpp deleted file mode 100644 index f2222a224..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/ecdsa_c.cpp +++ /dev/null @@ -1,110 +0,0 @@ -#define ECDSA_DLL_EXPORT -#include -#include -#include - -using namespace mcl::ecdsa; - -static SecretKey *cast(ecdsaSecretKey *p) { return reinterpret_cast(p); } -static const SecretKey *cast(const ecdsaSecretKey *p) { return reinterpret_cast(p); } - -static PublicKey *cast(ecdsaPublicKey *p) { return reinterpret_cast(p); } -static const PublicKey *cast(const ecdsaPublicKey *p) { return reinterpret_cast(p); } - -static Signature *cast(ecdsaSignature *p) { return reinterpret_cast(p); } -static const Signature *cast(const ecdsaSignature *p) { return reinterpret_cast(p); } - -static PrecomputedPublicKey *cast(ecdsaPrecomputedPublicKey *p) { return reinterpret_cast(p); } -static const PrecomputedPublicKey *cast(const ecdsaPrecomputedPublicKey *p) { return reinterpret_cast(p); } - -#ifdef __EMSCRIPTEN__ -// use these functions forcibly -extern "C" ECDSA_DLL_API void *ecdsaMalloc(size_t n) -{ - return malloc(n); -} -extern "C" ECDSA_DLL_API void ecdsaFree(void *p) -{ - free(p); -} -#endif - -int ecdsaInit(void) -{ - bool b; - init(&b); - return b ? 0 : -1; -} - -mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec) -{ - return (mclSize)cast(sec)->serialize(buf, maxBufSize); -} -mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub) -{ - return (mclSize)cast(pub)->serialize(buf, maxBufSize); -} -mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig) -{ - return (mclSize)cast(sig)->serialize(buf, maxBufSize); -} - -mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(sec)->deserialize(buf, bufSize); -} -mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(pub)->deserialize(buf, bufSize); -} -mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(sig)->deserialize(buf, bufSize); -} - -// return 0 if success -int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec) -{ - cast(sec)->setByCSPRNG(); - return 0; -} - -void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec) -{ - getPublicKey(*cast(pub), *cast(sec)); -} - -void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size) -{ - sign(*cast(sig), *cast(sec), m, size); -} - -int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size) -{ - return verify(*cast(sig), *cast(pub), m, size); -} -int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *ppub, const void *m, mclSize size) -{ - return verify(*cast(sig), *cast(ppub), m, size); -} - -ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate() -{ - PrecomputedPublicKey *ppub = (PrecomputedPublicKey*)malloc(sizeof(PrecomputedPublicKey)); - if (ppub == 0) return 0; - new(ppub) PrecomputedPublicKey(); - return reinterpret_cast(ppub); -} - -void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub) -{ - cast(ppub)->~PrecomputedPublicKey(); - free(ppub); -} - -int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub) -{ - bool b; - cast(ppub)->init(&b, *cast(pub)); - return b ? 0 : -1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/src/fp.cpp b/vendor/github.com/dexon-foundation/mcl/src/fp.cpp deleted file mode 100644 index df72d6d07..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/fp.cpp +++ /dev/null @@ -1,646 +0,0 @@ -#include -#include -#include -#include -#include -#ifdef MCL_USE_XBYAK -#include "fp_generator.hpp" -#endif -#include "low_func.hpp" -#ifdef MCL_USE_LLVM -#include "proto.hpp" -#include "low_func_llvm.hpp" -#endif -#include -#include - -#ifdef _MSC_VER - #pragma warning(disable : 4127) -#endif - -namespace mcl { - -namespace fp { - -#ifdef MCL_USE_XBYAK -FpGenerator *Op::createFpGenerator() -{ - return new FpGenerator(); -} -void Op::destroyFpGenerator(FpGenerator *fg) -{ - delete fg; -} -#endif - -inline void setUnitAsLE(void *p, Unit x) -{ -#if MCL_SIZEOF_UNIT == 4 - cybozu::Set32bitAsLE(p, x); -#else - cybozu::Set64bitAsLE(p, x); -#endif -} -inline Unit getUnitAsLE(const void *p) -{ -#if MCL_SIZEOF_UNIT == 4 - return cybozu::Get32bitAsLE(p); -#else - return cybozu::Get64bitAsLE(p); -#endif -} - -const char *ModeToStr(Mode mode) -{ - switch (mode) { - case FP_AUTO: return "auto"; - case FP_GMP: return "gmp"; - case FP_GMP_MONT: return "gmp_mont"; - case FP_LLVM: return "llvm"; - case FP_LLVM_MONT: return "llvm_mont"; - case FP_XBYAK: return "xbyak"; - default: - assert(0); - return 0; - } -} - -Mode StrToMode(const char *s) -{ - static const struct { - const char *s; - Mode mode; - } tbl[] = { - { "auto", FP_AUTO }, - { "gmp", FP_GMP }, - { "gmp_mont", FP_GMP_MONT }, - { "llvm", FP_LLVM }, - { "llvm_mont", FP_LLVM_MONT }, - { "xbyak", FP_XBYAK }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (strcmp(s, tbl[i].s) == 0) return tbl[i].mode; - } - return FP_AUTO; -} - -bool isEnableJIT() -{ -#if defined(MCL_USE_XBYAK) - /* -1:not init, 0:disable, 1:enable */ - static int status = -1; - if (status == -1) { -#ifndef _MSC_VER - status = 1; - FILE *fp = fopen("/sys/fs/selinux/enforce", "rb"); - if (fp) { - char c; - if (fread(&c, 1, 1, fp) == 1 && c == '1') { - status = 0; - } - fclose(fp); - } -#endif - if (status != 0) { - MIE_ALIGN(4096) char buf[4096]; - bool ret = Xbyak::CodeArray::protect(buf, sizeof(buf), true); - status = ret ? 1 : 0; - if (ret) { - Xbyak::CodeArray::protect(buf, sizeof(buf), false); - } - } - } - return status != 0; -#else - return false; -#endif -} - -uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize) -{ - return (uint32_t)cybozu::Sha256().digest(out, maxOutSize, msg, msgSize); -} - -uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize) -{ - return (uint32_t)cybozu::Sha512().digest(out, maxOutSize, msg, msgSize); -} - -#ifndef MCL_USE_VINT -static inline void set_mpz_t(mpz_t& z, const Unit* p, int n) -{ - int s = n; - while (s > 0) { - if (p[s - 1]) break; - s--; - } - z->_mp_alloc = n; - z->_mp_size = s; - z->_mp_d = (mp_limb_t*)const_cast(p); -} -#endif - -/* - y = (1/x) mod op.p -*/ -static inline void fp_invOpC(Unit *y, const Unit *x, const Op& op) -{ - const int N = (int)op.N; - bool b = false; -#ifdef MCL_USE_VINT - Vint vx, vy, vp; - vx.setArray(&b, x, N); - assert(b); (void)b; - vp.setArray(&b, op.p, N); - assert(b); (void)b; - Vint::invMod(vy, vx, vp); - vy.getArray(&b, y, N); - assert(b); (void)b; -#else - mpz_class my; - mpz_t mx, mp; - set_mpz_t(mx, x, N); - set_mpz_t(mp, op.p, N); - mpz_invert(my.get_mpz_t(), mx, mp); - gmp::getArray(&b, y, N, my); - assert(b); -#endif -} - -/* - inv(xR) = (1/x)R^-1 -toMont-> 1/x -toMont-> (1/x)R -*/ -static void fp_invMontOpC(Unit *y, const Unit *x, const Op& op) -{ - fp_invOpC(y, x, op); - op.fp_mul(y, y, op.R3, op.p); -} - -/* - large (N * 2) specification of AddPre, SubPre -*/ -template -struct SetFpDbl { - static inline void exec(Op&) {} -}; - -template -struct SetFpDbl { - static inline void exec(Op& op) - { -// if (!op.isFullBit) { - op.fpDbl_addPre = AddPre::f; - op.fpDbl_subPre = SubPre::f; -// } - } -}; - -template -void setOp2(Op& op) -{ - op.fp_shr1 = Shr1::f; - op.fp_neg = Neg::f; - if (op.isFullBit) { - op.fp_add = Add::f; - op.fp_sub = Sub::f; - } else { - op.fp_add = Add::f; - op.fp_sub = Sub::f; - } - if (op.isMont) { - if (op.isFullBit) { - op.fp_mul = Mont::f; - op.fp_sqr = SqrMont::f; - } else { - op.fp_mul = Mont::f; - op.fp_sqr = SqrMont::f; - } - op.fpDbl_mod = MontRed::f; - } else { - op.fp_mul = Mul::f; - op.fp_sqr = Sqr::f; - op.fpDbl_mod = Dbl_Mod::f; - } - op.fp_mulUnit = MulUnit::f; - if (!gmpIsFasterThanLLVM) { - op.fpDbl_mulPre = MulPre::f; - op.fpDbl_sqrPre = SqrPre::f; - } - op.fp_mulUnitPre = MulUnitPre::f; - op.fpN1_mod = N1_Mod::f; - op.fpDbl_add = DblAdd::f; - op.fpDbl_sub = DblSub::f; - op.fp_addPre = AddPre::f; - op.fp_subPre = SubPre::f; - op.fp2_mulNF = Fp2MulNF::f; - SetFpDbl::exec(op); -} - -template -void setOp(Op& op, Mode mode) -{ - // generic setup - op.fp_isZero = isZeroC; - op.fp_clear = clearC; - op.fp_copy = copyC; - if (op.isMont) { - op.fp_invOp = fp_invMontOpC; - } else { - op.fp_invOp = fp_invOpC; - } - setOp2(op); -#ifdef MCL_USE_LLVM - if (mode != fp::FP_GMP && mode != fp::FP_GMP_MONT) { -#if defined(MCL_USE_XBYAK) && CYBOZU_HOST == CYBOZU_HOST_INTEL - const bool gmpIsFasterThanLLVM = false;//(N == 8 && MCL_SIZEOF_UNIT == 8); - Xbyak::util::Cpu cpu; - if (cpu.has(Xbyak::util::Cpu::tBMI2)) { - setOp2(op); - } else -#endif - { - setOp2(op); - } - } -#else - (void)mode; -#endif -} - -#ifdef MCL_USE_XBYAK -inline void invOpForMontC(Unit *y, const Unit *x, const Op& op) -{ - Unit r[maxUnitSize]; - int k = op.fp_preInv(r, x); - /* - S = UnitBitSize - xr = 2^k - R = 2^(N * S) - get r2^(-k)R^2 = r 2^(N * S * 2 - k) - */ - op.fp_mul(y, r, op.invTbl.data() + k * op.N, op.p); -} - -static void initInvTbl(Op& op) -{ - const size_t N = op.N; - const Unit *p = op.p; - const size_t invTblN = N * sizeof(Unit) * 8 * 2; - op.invTbl.resize(invTblN * N); - Unit *tbl = op.invTbl.data() + (invTblN - 1) * N; - Unit t[maxUnitSize] = {}; - t[0] = 2; - op.toMont(tbl, t); - for (size_t i = 0; i < invTblN - 1; i++) { - op.fp_add(tbl - N, tbl, tbl, p); - tbl -= N; - } -} -#endif - -static bool initForMont(Op& op, const Unit *p, Mode mode) -{ - const size_t N = op.N; - bool b; - { - mpz_class t = 1, R; - gmp::getArray(&b, op.one, N, t); - if (!b) return false; - R = (t << (N * UnitBitSize)) % op.mp; - t = (R * R) % op.mp; - gmp::getArray(&b, op.R2, N, t); - if (!b) return false; - t = (t * R) % op.mp; - gmp::getArray(&b, op.R3, N, t); - if (!b) return false; - } - op.rp = getMontgomeryCoeff(p[0]); - if (mode != FP_XBYAK) return true; -#ifdef MCL_USE_XBYAK - if (op.fg == 0) op.fg = Op::createFpGenerator(); - bool useXbyak = op.fg->init(op); - - if (useXbyak && op.isMont && N <= 4) { - op.fp_invOp = &invOpForMontC; - initInvTbl(op); - } -#endif - return true; -} - -bool Op::init(const mpz_class& _p, size_t maxBitSize, int _xi_a, Mode mode, size_t mclMaxBitSize) -{ - if (mclMaxBitSize != MCL_MAX_BIT_SIZE) return false; -#ifdef MCL_USE_VINT - assert(sizeof(mcl::vint::Unit) == sizeof(Unit)); -#else - assert(sizeof(mp_limb_t) == sizeof(Unit)); -#endif - if (maxBitSize > MCL_MAX_BIT_SIZE) return false; - if (_p <= 0) return false; - clear(); - maxN = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize; - N = gmp::getUnitSize(_p); - if (N > maxN) return false; - { - bool b; - gmp::getArray(&b, p, N, _p); - if (!b) return false; - } - mp = _p; - bitSize = gmp::getBitSize(mp); - pmod4 = gmp::getUnit(mp, 0) % 4; - this->xi_a = _xi_a; -/* - priority : MCL_USE_XBYAK > MCL_USE_LLVM > none - Xbyak > llvm_mont > llvm > gmp_mont > gmp -*/ -#ifdef MCL_USE_XBYAK - if (mode == FP_AUTO) mode = FP_XBYAK; - if (mode == FP_XBYAK && bitSize > 384) { - mode = FP_AUTO; - } - if (!isEnableJIT()) { - mode = FP_AUTO; - } -#else - if (mode == FP_XBYAK) mode = FP_AUTO; -#endif -#ifdef MCL_USE_LLVM - if (mode == FP_AUTO) mode = FP_LLVM_MONT; -#else - if (mode == FP_LLVM || mode == FP_LLVM_MONT) mode = FP_AUTO; -#endif - if (mode == FP_AUTO) mode = FP_GMP_MONT; - isMont = mode == FP_GMP_MONT || mode == FP_LLVM_MONT || mode == FP_XBYAK; -#if 0 - fprintf(stderr, "mode=%s, isMont=%d, maxBitSize=%d" -#ifdef MCL_USE_XBYAK - " MCL_USE_XBYAK" -#endif -#ifdef MCL_USE_LLVM - " MCL_USE_LLVM" -#endif - "\n", ModeToStr(mode), isMont, (int)maxBitSize); -#endif - isFullBit = (bitSize % UnitBitSize) == 0; - -#if defined(MCL_USE_LLVM) || defined(MCL_USE_XBYAK) - if ((mode == FP_AUTO || mode == FP_LLVM || mode == FP_XBYAK) - && mp == mpz_class("0xfffffffffffffffffffffffffffffffeffffffffffffffff")) { - primeMode = PM_NIST_P192; - isMont = false; - isFastMod = true; - } - if ((mode == FP_AUTO || mode == FP_LLVM || mode == FP_XBYAK) - && mp == mpz_class("0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) { - primeMode = PM_NIST_P521; - isMont = false; - isFastMod = true; - } -#endif -#if defined(MCL_USE_VINT) && MCL_SIZEOF_UNIT == 8 - { - const char *secp256k1Str = "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"; - bool b; - mpz_class secp256k1; - gmp::setStr(&b, secp256k1, secp256k1Str); - if (b && mp == secp256k1) { - primeMode = PM_SECP256K1; - isMont = false; - isFastMod = true; - } - } -#endif - switch (N) { - case 1: setOp<1>(*this, mode); break; - case 2: setOp<2>(*this, mode); break; - case 3: setOp<3>(*this, mode); break; - case 4: setOp<4>(*this, mode); break; // 256 if 64-bit -#if MCL_MAX_UNIT_SIZE >= 6 - case 5: setOp<5>(*this, mode); break; - case 6: setOp<6>(*this, mode); break; -#endif -#if MCL_MAX_UNIT_SIZE >= 8 - case 7: setOp<7>(*this, mode); break; - case 8: setOp<8>(*this, mode); break; -#endif -#if MCL_MAX_UNIT_SIZE >= 9 - case 9: setOp<9>(*this, mode); break; // 521 if 64-bit -#endif -#if MCL_MAX_UNIT_SIZE >= 10 - case 10: setOp<10>(*this, mode); break; -#endif -#if MCL_MAX_UNIT_SIZE >= 12 - case 11: setOp<11>(*this, mode); break; - case 12: setOp<12>(*this, mode); break; // 768 if 64-bit -#endif -#if MCL_MAX_UNIT_SIZE >= 14 - case 13: setOp<13>(*this, mode); break; - case 14: setOp<14>(*this, mode); break; -#endif -#if MCL_MAX_UNIT_SIZE >= 16 - case 15: setOp<15>(*this, mode); break; - case 16: setOp<16>(*this, mode); break; // 1024 if 64-bit -#endif -#if MCL_MAX_UNIT_SIZE >= 17 - case 17: setOp<17>(*this, mode); break; // 521 if 32-bit -#endif - default: - return false; - } -#ifdef MCL_USE_LLVM - if (primeMode == PM_NIST_P192) { - fp_mul = &mcl_fp_mulNIST_P192L; - fp_sqr = &mcl_fp_sqr_NIST_P192L; - fpDbl_mod = &mcl_fpDbl_mod_NIST_P192L; - } - if (primeMode == PM_NIST_P521) { - fpDbl_mod = &mcl_fpDbl_mod_NIST_P521L; - } -#endif -#if defined(MCL_USE_VINT) && MCL_SIZEOF_UNIT == 8 - if (primeMode == PM_SECP256K1) { - fp_mul = &mcl::vint::mcl_fp_mul_SECP256K1; - fp_sqr = &mcl::vint::mcl_fp_sqr_SECP256K1; - fpDbl_mod = &mcl::vint::mcl_fpDbl_mod_SECP256K1; - } -#endif - if (N * UnitBitSize <= 256) { - hash = sha256; - } else { - hash = sha512; - } - { - bool b; - sq.set(&b, mp); - if (!b) return false; - } - modp.init(mp); - return fp::initForMont(*this, p, mode); -} - -void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize) -{ - while (byteSize >= sizeof(Unit)) { - setUnitAsLE(dst, *src++); - dst += sizeof(Unit); - byteSize -= sizeof(Unit); - } - if (byteSize == 0) return; - Unit x = *src; - while (byteSize) { - *dst++ = static_cast(x); - x >>= 8; - byteSize--; - } -} - -void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize) -{ - while (byteSize >= sizeof(Unit)) { - *dst++ = getUnitAsLE(src); - src += sizeof(Unit); - byteSize -= sizeof(Unit); - } - if (byteSize == 0) return; - Unit x = 0; - for (size_t i = 0; i < byteSize; i++) { - x |= Unit(src[i]) << (i * 8); - } - *dst = x; -} - -#ifndef CYBOZU_DONT_USE_STRING -int detectIoMode(int ioMode, const std::ios_base& ios) -{ - if (ioMode & ~IoPrefix) return ioMode; - // IoAuto or IoPrefix - const std::ios_base::fmtflags f = ios.flags(); - assert(!(f & std::ios_base::oct)); - ioMode |= (f & std::ios_base::hex) ? IoHex : 0; - if (f & std::ios_base::showbase) { - ioMode |= IoPrefix; - } - return ioMode; -} -#endif - -bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode) -{ - const size_t fpByteSize = sizeof(Unit) * op.N; - if (maskMode == Mod) { - if (xByteSize > fpByteSize * 2) return false; - mpz_class mx; - bool b; - gmp::setArray(&b, mx, (const char*)x, xByteSize); - if (!b) return false; -#ifdef MCL_USE_VINT - op.modp.modp(mx, mx); -#else - mx %= op.mp; -#endif - const Unit *pmx = gmp::getUnit(mx); - size_t i = 0; - for (const size_t n = gmp::getUnitSize(mx); i < n; i++) { - y[i] = pmx[i]; - } - for (; i < op.N; i++) { - y[i] = 0; - } - return true; - } - if (xByteSize > fpByteSize) { - if (maskMode == NoMask) return false; - xByteSize = fpByteSize; - } - // QQQ : fixed later for big endian - copyByteToUnitAsLE(y, (const uint8_t*)x, xByteSize); - for (size_t i = (xByteSize + sizeof(Unit) - 1) / sizeof(Unit); i < op.N; i++) { - y[i] = 0; - } - if (maskMode == mcl::fp::SmallMask || maskMode == mcl::fp::MaskAndMod) { - maskArray(y, op.N, op.bitSize); - } - if (isGreaterOrEqualArray(y, op.p, op.N)) { - switch (maskMode) { - case mcl::fp::NoMask: return false; - case mcl::fp::SmallMask: - maskArray(y, op.N, op.bitSize - 1); - break; - case mcl::fp::MaskAndMod: - default: - op.fp_subPre(y, y, op.p); - break; - } - } - assert(isLessArray(y, op.p, op.N)); - return true; -} - -static bool isInUint64(uint64_t *pv, const fp::Block& b) -{ - assert(fp::UnitBitSize == 32 || fp::UnitBitSize == 64); - const size_t start = 64 / fp::UnitBitSize; - for (size_t i = start; i < b.n; i++) { - if (b.p[i]) return false; - } -#if MCL_SIZEOF_UNIT == 4 - *pv = b.p[0] | (uint64_t(b.p[1]) << 32); -#else - *pv = b.p[0]; -#endif - return true; -} - -uint64_t getUint64(bool *pb, const fp::Block& b) -{ - uint64_t v; - if (isInUint64(&v, b)) { - *pb = true; - return v; - } - *pb = false; - return 0; -} - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4146) -#endif - -int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op) -{ - bool isNegative = false; - if (fp::isGreaterOrEqualArray(b.p, op.half, op.N)) { - op.fp_neg(b.v_, b.p, op.p); - b.p = b.v_; - isNegative = true; - } - uint64_t v; - if (fp::isInUint64(&v, b)) { - const uint64_t c = uint64_t(1) << 63; - if (isNegative) { - if (v <= c) { // include c - *pb = true; - // -1 << 63 - if (v == c) return int64_t(-9223372036854775807ll - 1); - return int64_t(-v); - } - } else { - if (v < c) { // not include c - *pb = true; - return int64_t(v); - } - } - } - *pb = false; - return 0; -} - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -} } // mcl::fp - diff --git a/vendor/github.com/dexon-foundation/mcl/src/fp_generator.hpp b/vendor/github.com/dexon-foundation/mcl/src/fp_generator.hpp deleted file mode 100644 index b496bc4d4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/fp_generator.hpp +++ /dev/null @@ -1,3885 +0,0 @@ -#pragma once -/** - @file - @brief Fp generator - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#if CYBOZU_HOST == CYBOZU_HOST_INTEL -#define XBYAK_NO_OP_NAMES -#define XBYAK_DISABLE_AVX512 -#include "xbyak/xbyak_util.h" - -#if MCL_SIZEOF_UNIT == 8 -#include -#include -#include -#include - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4127) - #pragma warning(disable : 4458) -#endif - -namespace mcl { - -namespace fp_gen_local { - -class MemReg { - const Xbyak::Reg64 *r_; - const Xbyak::RegExp *m_; - size_t offset_; -public: - MemReg(const Xbyak::Reg64 *r, const Xbyak::RegExp *m, size_t offset) : r_(r), m_(m), offset_(offset) {} - bool isReg() const { return r_ != 0; } - const Xbyak::Reg64& getReg() const { return *r_; } - Xbyak::RegExp getMem() const { return *m_ + offset_ * sizeof(size_t); } -}; - -struct MixPack { - static const size_t useAll = 100; - Xbyak::util::Pack p; - Xbyak::RegExp m; - size_t mn; - MixPack() : mn(0) {} - MixPack(Xbyak::util::Pack& remain, size_t& rspPos, size_t n, size_t useRegNum = useAll) - { - init(remain, rspPos, n, useRegNum); - } - void init(Xbyak::util::Pack& remain, size_t& rspPos, size_t n, size_t useRegNum = useAll) - { - size_t pn = std::min(remain.size(), n); - if (useRegNum != useAll && useRegNum < pn) pn = useRegNum; - this->mn = n - pn; - this->m = Xbyak::util::rsp + rspPos; - this->p = remain.sub(0, pn); - remain = remain.sub(pn); - rspPos += mn * 8; - } - size_t size() const { return p.size() + mn; } - bool isReg(size_t n) const { return n < p.size(); } - const Xbyak::Reg64& getReg(size_t n) const - { - assert(n < p.size()); - return p[n]; - } - Xbyak::RegExp getMem(size_t n) const - { - const size_t pn = p.size(); - assert(pn <= n && n < size()); - return m + (int)((n - pn) * sizeof(size_t)); - } - MemReg operator[](size_t n) const - { - const size_t pn = p.size(); - return MemReg((n < pn) ? &p[n] : 0, (n < pn) ? 0 : &m, n - pn); - } - void removeLast() - { - if (!size()) throw cybozu::Exception("MixPack:removeLast:empty"); - if (mn > 0) { - mn--; - } else { - p = p.sub(0, p.size() - 1); - } - } - /* - replace Mem with r if possible - */ - bool replaceMemWith(Xbyak::CodeGenerator *code, const Xbyak::Reg64& r) - { - if (mn == 0) return false; - p.append(r); - code->mov(r, code->ptr [m]); - m = m + 8; - mn--; - return true; - } -}; - -} // fp_gen_local - -/* - op(r, rm); - r : reg - rm : Reg/Mem -*/ -#define MCL_FP_GEN_OP_RM(op, r, rm) \ -if (rm.isReg()) { \ - op(r, rm.getReg()); \ -} else { \ - op(r, qword [rm.getMem()]); \ -} - -/* - op(rm, r); - rm : Reg/Mem - r : reg -*/ -#define MCL_FP_GEN_OP_MR(op, rm, r) \ -if (rm.isReg()) { \ - op(rm.getReg(), r); \ -} else { \ - op(qword [rm.getMem()], r); \ -} - -namespace fp { - -struct Profiler { - FILE *fp_; - const char *suf_; - const uint8_t *prev_; - Profiler() - : fp_(0) - , suf_(0) - , prev_(0) - { - } - void init(const char *suf, const uint8_t *prev) - { -#ifdef __linux__ - close(); - const char *s = getenv("MCL_PERF"); - if (s == 0 || strcmp(s, "1") != 0) return; - fprintf(stderr, "use perf suf=%s\n", suf); - suf_ = suf; - const int pid = getpid(); - char name[128]; - snprintf(name, sizeof(name), "/tmp/perf-%d.map", pid); - fp_ = fopen(name, "wb"); - if (fp_ == 0) throw cybozu::Exception("PerMap") << name; - prev_ = prev; -#else - (void)suf; - (void)prev; -#endif - } - ~Profiler() - { - close(); - } - void close() - { -#ifdef __linux__ - if (fp_ == 0) return; - fclose(fp_); - fp_ = 0; - prev_ = 0; -#endif - } - void set(const uint8_t *p, size_t n, const char *name) const - { -#ifdef __linux__ - if (fp_ == 0) return; - fprintf(fp_, "%llx %zx %s%s\n", (long long)p, n, name, suf_); -#else - (void)p; - (void)n; - (void)name; -#endif - } - void set(const char *name, const uint8_t *cur) - { -#ifdef __linux__ - set(prev_, cur - prev_, name); - prev_ = cur; -#else - (void)name; - (void)cur; -#endif - } -}; - -struct FpGenerator : Xbyak::CodeGenerator { - typedef Xbyak::RegExp RegExp; - typedef Xbyak::Reg64 Reg64; - typedef Xbyak::Xmm Xmm; - typedef Xbyak::Operand Operand; - typedef Xbyak::Label Label; - typedef Xbyak::util::StackFrame StackFrame; - typedef Xbyak::util::Pack Pack; - typedef fp_gen_local::MixPack MixPack; - typedef fp_gen_local::MemReg MemReg; - static const int UseRDX = Xbyak::util::UseRDX; - static const int UseRCX = Xbyak::util::UseRCX; - /* - classes to calculate offset and size - */ - struct Ext1 { - Ext1(int FpByte, const Reg64& r, int n = 0) - : r_(r) - , n_(n) - , next(FpByte + n) - { - } - operator RegExp() const { return r_ + n_; } - const Reg64& r_; - const int n_; - const int next; - private: - Ext1(const Ext1&); - void operator=(const Ext1&); - }; - struct Ext2 { - Ext2(int FpByte, const Reg64& r, int n = 0) - : r_(r) - , n_(n) - , next(FpByte * 2 + n) - , a(FpByte, r, n) - , b(FpByte, r, n + FpByte) - { - } - operator RegExp() const { return r_ + n_; } - const Reg64& r_; - const int n_; - const int next; - Ext1 a; - Ext1 b; - private: - Ext2(const Ext2&); - void operator=(const Ext2&); - }; - Xbyak::util::Cpu cpu_; - bool useMulx_; - bool useAdx_; - const Reg64& gp0; - const Reg64& gp1; - const Reg64& gp2; - const Reg64& gt0; - const Reg64& gt1; - const Reg64& gt2; - const Reg64& gt3; - const Reg64& gt4; - const Reg64& gt5; - const Reg64& gt6; - const Reg64& gt7; - const Reg64& gt8; - const Reg64& gt9; - const mcl::fp::Op *op_; - Label pL_; // pointer to p - // the following labels assume sf(this, 3, 10 | UseRDX) - Label mulPreL; - Label fpDbl_modL; - Label fp_mulL; - const uint64_t *p_; - uint64_t rp_; - int pn_; - int FpByte_; - bool isFullBit_; - Profiler prof_; - - /* - @param op [in] ; use op.p, op.N, op.isFullBit - */ - FpGenerator() - : CodeGenerator(4096 * 9, Xbyak::DontSetProtectRWE) -#ifdef XBYAK64_WIN - , gp0(rcx) - , gp1(r11) - , gp2(r8) - , gt0(r9) - , gt1(r10) - , gt2(rdi) - , gt3(rsi) -#else - , gp0(rdi) - , gp1(rsi) - , gp2(r11) - , gt0(rcx) - , gt1(r8) - , gt2(r9) - , gt3(r10) -#endif - , gt4(rbx) - , gt5(rbp) - , gt6(r12) - , gt7(r13) - , gt8(r14) - , gt9(r15) - , op_(0) - , p_(0) - , rp_(0) - , pn_(0) - , FpByte_(0) - { - useMulx_ = cpu_.has(Xbyak::util::Cpu::tBMI2); - useAdx_ = cpu_.has(Xbyak::util::Cpu::tADX); - } - bool init(Op& op) - { - if (!cpu_.has(Xbyak::util::Cpu::tAVX)) return false; - reset(); // reset jit code for reuse - setProtectModeRW(); // read/write memory - init_inner(op); -// printf("code size=%d\n", (int)getSize()); - setProtectModeRE(); // set read/exec memory - return true; - } -private: - void init_inner(Op& op) - { - op_ = &op; - L(pL_); - p_ = reinterpret_cast(getCurr()); - for (size_t i = 0; i < op.N; i++) { - dq(op.p[i]); - } - rp_ = fp::getMontgomeryCoeff(p_[0]); - pn_ = (int)op.N; - FpByte_ = int(op.maxN * sizeof(uint64_t)); - isFullBit_ = op.isFullBit; -// printf("p=%p, pn_=%d, isFullBit_=%d\n", p_, pn_, isFullBit_); - static char suf[] = "_0"; - prof_.init(suf, getCurr()); - suf[1]++; - - op.fp_addPre = gen_addSubPre(true, pn_); - prof_.set("Fp_addPre", getCurr()); - - op.fp_subPre = gen_addSubPre(false, pn_); - prof_.set("Fp_subPre", getCurr()); - - op.fp_addA_ = gen_fp_add(); - prof_.set("Fp_add", getCurr()); - - op.fp_subA_ = gen_fp_sub(); - prof_.set("Fp_sub", getCurr()); - - op.fp_shr1 = gen_shr1(); - prof_.set("Fp_shr1", getCurr()); - - op.fp_negA_ = gen_fp_neg(); - prof_.set("Fp_neg", getCurr()); - - op.fpDbl_addA_ = gen_fpDbl_add(); - prof_.set("FpDbl_add", getCurr()); - - op.fpDbl_subA_ = gen_fpDbl_sub(); - prof_.set("FpDbl_sub", getCurr()); - - op.fpDbl_addPre = gen_addSubPre(true, pn_ * 2); - prof_.set("FpDbl_addPre", getCurr()); - - op.fpDbl_subPre = gen_addSubPre(false, pn_ * 2); - prof_.set("FpDbl_subPre", getCurr()); - - op.fpDbl_mulPreA_ = gen_fpDbl_mulPre(); - prof_.set("FpDbl_mulPre", getCurr()); - - op.fpDbl_sqrPreA_ = gen_fpDbl_sqrPre(); - prof_.set("FpDbl_sqrPre", getCurr()); - - op.fpDbl_modA_ = gen_fpDbl_mod(op); - prof_.set("FpDbl_mod", getCurr()); - - op.fp_mulA_ = gen_mul(); - prof_.set("Fp_mul", getCurr()); - if (op.fp_mulA_) { - op.fp_mul = fp::func_ptr_cast(op.fp_mulA_); // used in toMont/fromMont - } - op.fp_sqrA_ = gen_sqr(); - prof_.set("Fp_sqr", getCurr()); - - if (op.primeMode != PM_NIST_P192 && op.N <= 4) { // support general op.N but not fast for op.N > 4 - align(16); - op.fp_preInv = getCurr(); - gen_preInv(); - prof_.set("preInv", getCurr()); - } - if (op.xi_a == 0) return; // Fp2 is not used - op.fp2_addA_ = gen_fp2_add(); - prof_.set("Fp2_add", getCurr()); - - op.fp2_subA_ = gen_fp2_sub(); - prof_.set("Fp2_sub", getCurr()); - - op.fp2_negA_ = gen_fp2_neg(); - prof_.set("Fp2_neg", getCurr()); - - op.fp2_mulNF = 0; - op.fp2Dbl_mulPreA_ = gen_fp2Dbl_mulPre(); - prof_.set("Fp2Dbl_mulPre", getCurr()); - - op.fp2Dbl_sqrPreA_ = gen_fp2Dbl_sqrPre(); - prof_.set("Fp2Dbl_sqrPre", getCurr()); - - op.fp2_mulA_ = gen_fp2_mul(); - prof_.set("Fp2_mul", getCurr()); - - op.fp2_sqrA_ = gen_fp2_sqr(); - prof_.set("Fp2_sqr", getCurr()); - - op.fp2_mul_xiA_ = gen_fp2_mul_xi(); - prof_.set("Fp2_mul_xi", getCurr()); - } - u3u gen_addSubPre(bool isAdd, int n) - { -// if (isFullBit_) return 0; - align(16); - u3u func = getCurr(); - StackFrame sf(this, 3); - if (isAdd) { - gen_raw_add(sf.p[0], sf.p[1], sf.p[2], rax, n); - } else { - gen_raw_sub(sf.p[0], sf.p[1], sf.p[2], rax, n); - } - return func; - } - /* - pz[] = px[] + py[] - */ - void gen_raw_add(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& t, int n) - { - mov(t, ptr [px]); - add(t, ptr [py]); - mov(ptr [pz], t); - for (int i = 1; i < n; i++) { - mov(t, ptr [px + i * 8]); - adc(t, ptr [py + i * 8]); - mov(ptr [pz + i * 8], t); - } - } - /* - pz[] = px[] - py[] - */ - void gen_raw_sub(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& t, int n) - { - mov(t, ptr [px]); - sub(t, ptr [py]); - mov(ptr [pz], t); - for (int i = 1; i < n; i++) { - mov(t, ptr [px + i * 8]); - sbb(t, ptr [py + i * 8]); - mov(ptr [pz + i * 8], t); - } - } - /* - pz[] = -px[] - use rax, rdx - */ - void gen_raw_neg(const RegExp& pz, const RegExp& px, const Pack& t) - { - Label nonZero, exit; - load_rm(t, px); - mov(rax, t[0]); - if (t.size() > 1) { - for (size_t i = 1; i < t.size(); i++) { - or_(rax, t[i]); - } - } else { - test(rax, rax); - } - jnz(nonZero); - // rax = 0 - for (size_t i = 0; i < t.size(); i++) { - mov(ptr[pz + i * 8], rax); - } - jmp(exit); - L(nonZero); - mov(rax, pL_); - for (size_t i = 0; i < t.size(); i++) { - mov(rdx, ptr [rax + i * 8]); - if (i == 0) { - sub(rdx, t[i]); - } else { - sbb(rdx, t[i]); - } - mov(ptr [pz + i * 8], rdx); - } - L(exit); - } - /* - (rdx:pz[0..n-1]) = px[0..n-1] * y - use t, rax, rdx - if n > 2 - use - wk[0] if useMulx_ - wk[0..n-2] otherwise - */ - void gen_raw_mulUnit(const RegExp& pz, const RegExp& px, const Reg64& y, const MixPack& wk, const Reg64& t, size_t n) - { - if (n == 1) { - mov(rax, ptr [px]); - mul(y); - mov(ptr [pz], rax); - return; - } - if (n == 2) { - mov(rax, ptr [px]); - mul(y); - mov(ptr [pz], rax); - mov(t, rdx); - mov(rax, ptr [px + 8]); - mul(y); - add(rax, t); - adc(rdx, 0); - mov(ptr [pz + 8], rax); - return; - } - if (useMulx_) { - assert(wk.size() > 0 && wk.isReg(0)); - const Reg64& t1 = wk.getReg(0); - // mulx(H, L, x) = [H:L] = x * rdx - mov(rdx, y); - mulx(t1, rax, ptr [px]); // [y:rax] = px * y - mov(ptr [pz], rax); - const Reg64 *pt0 = &t; - const Reg64 *pt1 = &t1; - for (size_t i = 1; i < n - 1; i++) { - mulx(*pt0, rax, ptr [px + i * 8]); - if (i == 1) { - add(rax, *pt1); - } else { - adc(rax, *pt1); - } - mov(ptr [pz + i * 8], rax); - std::swap(pt0, pt1); - } - mulx(rdx, rax, ptr [px + (n - 1) * 8]); - adc(rax, *pt1); - mov(ptr [pz + (n - 1) * 8], rax); - adc(rdx, 0); - return; - } - assert(wk.size() >= n - 1); - for (size_t i = 0; i < n; i++) { - mov(rax, ptr [px + i * 8]); - mul(y); - if (i < n - 1) { - mov(ptr [pz + i * 8], rax); - g_mov(wk[i], rdx); - } - } - for (size_t i = 1; i < n - 1; i++) { - mov(t, ptr [pz + i * 8]); - if (i == 1) { - g_add(t, wk[i - 1]); - } else { - g_adc(t, wk[i - 1]); - } - mov(ptr [pz + i * 8], t); - } - g_adc(rax, wk[n - 2]); - mov(ptr [pz + (n - 1) * 8], rax); - adc(rdx, 0); - } - void gen_mulUnit() - { -// assert(pn_ >= 2); - const int regNum = useMulx_ ? 2 : (1 + std::min(pn_ - 1, 8)); - const int stackSize = useMulx_ ? 0 : (pn_ - 1) * 8; - StackFrame sf(this, 3, regNum | UseRDX, stackSize); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& y = sf.p[2]; - size_t rspPos = 0; - Pack remain = sf.t.sub(1); - MixPack wk(remain, rspPos, pn_ - 1); - gen_raw_mulUnit(pz, px, y, wk, sf.t[0], pn_); - mov(rax, rdx); - } - /* - pz[] = px[] - */ - void gen_mov(const RegExp& pz, const RegExp& px, const Reg64& t, int n) - { - for (int i = 0; i < n; i++) { - mov(t, ptr [px + i * 8]); - mov(ptr [pz + i * 8], t); - } - } - /* - pz[] = px[] + py[] mod p[] - use rax, t - */ - void gen_raw_fp_add(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t, bool withCarry) - { - const Pack& p0 = t.sub(0, pn_); - const Pack& p1 = t.sub(pn_, pn_); - const Reg64 *fullReg = isFullBit_ ? &t[pn_ * 2] : 0; - load_rm(p0, px); - add_rm(p0, py, withCarry); - mov_rr(p1, p0); - if (isFullBit_) { - mov(*fullReg, 0); - adc(*fullReg, 0); - } - mov(rax, pL_); - sub_rm(p1, rax); - if (fullReg) { - sbb(*fullReg, 0); - } - for (size_t i = 0; i < p1.size(); i++) { - cmovc(p1[i], p0[i]); - } - store_mr(pz, p1); - } - /* - pz[] = px[] - py[] mod p[] - use rax, t - */ - void gen_raw_fp_sub(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t, bool withCarry) - { - const Pack& p0 = t.sub(0, pn_); - const Pack& p1 = t.sub(pn_, pn_); - load_rm(p0, px); - sub_rm(p0, py, withCarry); - mov(rax, pL_); - load_rm(p1, rax); - sbb(rax, rax); // rax = (x > y) ? 0 : -1 - for (size_t i = 0; i < p1.size(); i++) { - and_(p1[i], rax); - } - add_rr(p0, p1); - store_mr(pz, p0); - } - void gen_fp_add_le4() - { - assert(pn_ <= 4); - const int tn = pn_ * 2 + (isFullBit_ ? 1 : 0); - StackFrame sf(this, 3, tn); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_fp_add(pz, px, py, sf.t, false); - } - void gen_fp_sub_le4() - { - assert(pn_ <= 4); - const int tn = pn_ * 2; - StackFrame sf(this, 3, tn); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_fp_sub(pz, px, py, sf.t, false); - } - /* - add(pz, px, py); - size of t1, t2 == 6 - destroy t0, t1 - */ - void gen_raw_fp_add6(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t1, const Pack& t2, bool withCarry) - { - load_rm(t1, px); - add_rm(t1, py, withCarry); - Label exit; - if (isFullBit_) { - jnc("@f"); - mov(t2[0], pL_); // t2 is not used - sub_rm(t1, t2[0]); - jmp(exit); - L("@@"); - } - mov_rr(t2, t1); - sub_rm(t2, rip + pL_); - for (int i = 0; i < 6; i++) { - cmovnc(t1[i], t2[i]); - } - L(exit); - store_mr(pz, t1); - } - void gen_fp_add6() - { - /* - cmov is faster than jmp - */ - StackFrame sf(this, 3, 10); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - Pack t1 = sf.t.sub(0, 6); - Pack t2 = sf.t.sub(6); - t2.append(rax); - t2.append(px); // destory after used - gen_raw_fp_add6(pz, px, py, t1, t2, false); - } - void3u gen_fp_add() - { - align(16); - void3u func = getCurr(); - if (pn_ <= 4) { - gen_fp_add_le4(); - return func; - } - if (pn_ == 6) { - gen_fp_add6(); - return func; - } - StackFrame sf(this, 3, 0, pn_ * 8); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - const Xbyak::CodeGenerator::LabelType jmpMode = pn_ < 5 ? T_AUTO : T_NEAR; - - inLocalLabel(); - gen_raw_add(pz, px, py, rax, pn_); - mov(px, pL_); // destroy px - if (isFullBit_) { - jc(".over", jmpMode); - } -#ifdef MCL_USE_JMP - for (int i = 0; i < pn_; i++) { - mov(py, ptr [pz + (pn_ - 1 - i) * 8]); // destroy py - cmp(py, ptr [px + (pn_ - 1 - i) * 8]); - jc(".exit", jmpMode); - jnz(".over", jmpMode); - } - L(".over"); - gen_raw_sub(pz, pz, px, rax, pn_); - L(".exit"); -#else - gen_raw_sub(rsp, pz, px, rax, pn_); - jc(".exit", jmpMode); - gen_mov(pz, rsp, rax, pn_); - if (isFullBit_) { - jmp(".exit", jmpMode); - L(".over"); - gen_raw_sub(pz, pz, px, rax, pn_); - } - L(".exit"); -#endif - outLocalLabel(); - return func; - } - void3u gen_fpDbl_add() - { - align(16); - void3u func = getCurr(); - if (pn_ <= 4) { - int tn = pn_ * 2 + (isFullBit_ ? 1 : 0); - StackFrame sf(this, 3, tn); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_add(pz, px, py, rax, pn_); - gen_raw_fp_add(pz + 8 * pn_, px + 8 * pn_, py + 8 * pn_, sf.t, true); - return func; - } else if (pn_ == 6 && !isFullBit_) { - StackFrame sf(this, 3, 10); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_add(pz, px, py, rax, pn_); - Pack t1 = sf.t.sub(0, 6); - Pack t2 = sf.t.sub(6); - t2.append(rax); - t2.append(py); - gen_raw_fp_add6(pz + pn_ * 8, px + pn_ * 8, py + pn_ * 8, t1, t2, true); - return func; - } - return 0; - } - void3u gen_fpDbl_sub() - { - align(16); - void3u func = getCurr(); - if (pn_ <= 4) { - int tn = pn_ * 2; - StackFrame sf(this, 3, tn); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_sub(pz, px, py, rax, pn_); - gen_raw_fp_sub(pz + 8 * pn_, px + 8 * pn_, py + 8 * pn_, sf.t, true); - return func; - } else if (pn_ == 6) { - StackFrame sf(this, 3, 4); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - gen_raw_sub(pz, px, py, rax, pn_); - Pack t = sf.t; - t.append(rax); - t.append(px); - gen_raw_fp_sub6(pz, px, py, pn_ * 8, t, true); - return func; - } - return 0; - } - void gen_raw_fp_sub6(const RegExp& pz, const RegExp& px, const RegExp& py, int offset, const Pack& t, bool withCarry) - { - load_rm(t, px + offset); - sub_rm(t, py + offset, withCarry); - /* - jmp is faster than and-mask without jmp - */ - jnc("@f"); - add_rm(t, rip + pL_); - L("@@"); - store_mr(pz + offset, t); - } - void gen_fp_sub6() - { - StackFrame sf(this, 3, 4); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - Pack t = sf.t; - t.append(rax); - t.append(px); // |t| = 6 - gen_raw_fp_sub6(pz, px, py, 0, t, false); - } - void3u gen_fp_sub() - { - align(16); - void3u func = getCurr(); - if (pn_ <= 4) { - gen_fp_sub_le4(); - return func; - } - if (pn_ == 6) { - gen_fp_sub6(); - return func; - } - StackFrame sf(this, 3); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - const Xbyak::CodeGenerator::LabelType jmpMode = pn_ < 5 ? T_AUTO : T_NEAR; - Label exit; - gen_raw_sub(pz, px, py, rax, pn_); - jnc(exit, jmpMode); - mov(px, pL_); - gen_raw_add(pz, pz, px, rax, pn_); - L(exit); - return func; - } - void2u gen_fp_neg() - { - align(16); - void2u func = getCurr(); - StackFrame sf(this, 2, UseRDX | pn_); - gen_raw_neg(sf.p[0], sf.p[1], sf.t); - return func; - } - void2u gen_shr1() - { - align(16); - void2u func = getCurr(); - const int c = 1; - StackFrame sf(this, 2, 1); - const Reg64 *t0 = &rax; - const Reg64 *t1 = &sf.t[0]; - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - mov(*t0, ptr [px]); - for (int i = 0; i < pn_ - 1; i++) { - mov(*t1, ptr [px + 8 * (i + 1)]); - shrd(*t0, *t1, c); - mov(ptr [pz + i * 8], *t0); - std::swap(t0, t1); - } - shr(*t0, c); - mov(ptr [pz + (pn_ - 1) * 8], *t0); - return func; - } - void3u gen_mul() - { - align(16); - void3u func = getCurr(); - if (op_->primeMode == PM_NIST_P192) { - StackFrame sf(this, 3, 10 | UseRDX, 8 * 6); - mulPre3(rsp, sf.p[1], sf.p[2], sf.t); - fpDbl_mod_NIST_P192(sf.p[0], rsp, sf.t); - return func; - } - if (pn_ == 3) { - gen_montMul3(); - return func; - } - if (pn_ == 4) { - gen_montMul4(); - return func; - } - if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { -#if 1 - // a little faster - gen_montMul6(); -#else - if (mulPreL.getAddress() == 0 || fpDbl_modL.getAddress() == 0) return 0; - StackFrame sf(this, 3, 10 | UseRDX, 12 * 8); - /* - use xm3 - rsp - [0, ..12 * 8) ; mul(x, y) - */ - vmovq(xm3, gp0); - mov(gp0, rsp); - call(mulPreL); // gp0, x, y - vmovq(gp0, xm3); - mov(gp1, rsp); - call(fpDbl_modL); -#endif - return func; - } -#if 0 - if (pn_ <= 9) { - gen_montMulN(p_, rp_, pn_); - return func; - } -#endif - return 0; - } - /* - @input (z, xy) - z[1..0] <- montgomery reduction(x[3..0]) - @note destroy rax, rdx, t0, ..., t8 - */ - void gen_fpDbl_mod2() - { - StackFrame sf(this, 2, 9 | UseRDX); - const Reg64& z = sf.p[0]; - const Reg64& xy = sf.p[1]; - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - - const Reg64& a = rax; - const Reg64& d = rdx; - - mov(t6, ptr [xy + 8 * 0]); - - mov(a, rp_); - mul(t6); - mov(t0, pL_); - mov(t7, a); // q - - // [d:t7:t1] = p * q - mul2x1(t0, t7, t1, t8); - - xor_(t8, t8); - if (isFullBit_) { - xor_(t5, t5); - } - mov(t4, d); - add(t1, t6); - add_rm(Pack(t8, t4, t7), xy + 8 * 1, true); - // [t8:t4:t7] - if (isFullBit_) { - adc(t5, 0); - } - - mov(a, rp_); - mul(t7); - mov(t6, a); // q - - // [d:t6:xy] = p * q - mul2x1(t0, t6, xy, t3); - - add_rr(Pack(t8, t4, t7), Pack(d, t6, xy)); - // [t8:t4] - if (isFullBit_) { - adc(t5, 0); - } - - mov_rr(Pack(t2, t1), Pack(t8, t4)); - sub_rm(Pack(t8, t4), t0); - if (isFullBit_) { - sbb(t5, 0); - } - cmovc_rr(Pack(t8, t4), Pack(t2, t1)); - store_mr(z, Pack(t8, t4)); - } - /* - @input (z, xy) - z[2..0] <- montgomery reduction(x[5..0]) - @note destroy rax, rdx, t0, ..., t10 - */ - void gen_fpDbl_mod3() - { - StackFrame sf(this, 3, 10 | UseRDX); - const Reg64& z = sf.p[0]; - const Reg64& xy = sf.p[1]; - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - const Reg64& t9 = sf.t[9]; - const Reg64& t10 = sf.p[2]; - - const Reg64& a = rax; - const Reg64& d = rdx; - - mov(t10, ptr [xy + 8 * 0]); - - mov(a, rp_); - mul(t10); - mov(t0, pL_); - mov(t7, a); // q - - // [d:t7:t2:t1] = p * q - mul3x1(t0, t7, t4, t2, t1, t8); - - xor_(t8, t8); - xor_(t9, t9); - if (isFullBit_) { - xor_(t5, t5); - } - mov(t4, d); - add(t1, t10); - add_rm(Pack(t9, t8, t4, t7, t2), xy + 8 * 1, true); - // [t9:t8:t4:t7:t2] - if (isFullBit_) { - adc(t5, 0); - } - - mov(a, rp_); - mul(t2); - mov(t10, a); // q - - // [d:t10:t6:xy] = p * q - mul3x1(t0, t10, t1, t6, xy, t3); - - add_rr(Pack(t8, t4, t7, t2), Pack(d, t10, t6, xy)); - adc(t9, 0); // [t9:t8:t4:t7] - if (isFullBit_) { - adc(t5, 0); - } - - mov(a, rp_); - mul(t7); - mov(t10, a); // q - - // [d:t10:xy:t6] = p * q - mul3x1(t0, t10, t1, xy, t6, t2); - - add_rr(Pack(t9, t8, t4, t7), Pack(d, t10, xy, t6)); - // [t9:t8:t4] - if (isFullBit_) { - adc(t5, 0); - } - - mov_rr(Pack(t2, t1, t10), Pack(t9, t8, t4)); - sub_rm(Pack(t9, t8, t4), t0); - if (isFullBit_) { - sbb(t5, 0); - } - cmovc_rr(Pack(t9, t8, t4), Pack(t2, t1, t10)); - store_mr(z, Pack(t9, t8, t4)); - } - /* - @input (z, xy) - z[3..0] <- montgomery reduction(x[7..0]) - @note destroy rax, rdx, t0, ..., t10, xm0, xm1 - xm2 if isFullBit_ - */ - void gen_fpDbl_mod4(const Reg64& z, const Reg64& xy, const Pack& t, const Reg64& t10) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - - const Reg64& a = rax; - const Reg64& d = rdx; - - vmovq(xm0, z); - mov(z, ptr [xy + 8 * 0]); - - mov(a, rp_); - mul(z); - mov(t0, pL_); - mov(t7, a); // q - - // [d:t7:t3:t2:t1] = p * q - mul4x1(t0, t7, t4, t3, t2, t1, t8); - - xor_(t8, t8); - xor_(t9, t9); - xor_(t10, t10); - mov(t4, d); - add(t1, z); - adc(t2, qword [xy + 8 * 1]); - adc(t3, qword [xy + 8 * 2]); - adc(t7, qword [xy + 8 * 3]); - adc(t4, ptr [xy + 8 * 4]); - adc(t8, ptr [xy + 8 * 5]); - adc(t9, ptr [xy + 8 * 6]); - adc(t10, ptr [xy + 8 * 7]); - // [t10:t9:t8:t4:t7:t3:t2] - if (isFullBit_) { - mov(t5, 0); - adc(t5, 0); - vmovq(xm2, t5); - } - - // free z, t0, t1, t5, t6, xy - - mov(a, rp_); - mul(t2); - mov(z, a); // q - - vmovq(xm1, t10); - // [d:z:t5:t6:xy] = p * q - mul4x1(t0, z, t1, t5, t6, xy, t10); - vmovq(t10, xm1); - - add_rr(Pack(t8, t4, t7, t3, t2), Pack(d, z, t5, t6, xy)); - adc(t9, 0); - adc(t10, 0); // [t10:t9:t8:t4:t7:t3] - if (isFullBit_) { - vmovq(t5, xm2); - adc(t5, 0); - vmovq(xm2, t5); - } - - // free z, t0, t1, t2, t5, t6, xy - - mov(a, rp_); - mul(t3); - mov(z, a); // q - - // [d:z:t5:xy:t6] = p * q - mul4x1(t0, z, t1, t5, xy, t6, t2); - - add_rr(Pack(t9, t8, t4, t7, t3), Pack(d, z, t5, xy, t6)); - adc(t10, 0); // c' = [t10:t9:t8:t4:t7] - if (isFullBit_) { - vmovq(t3, xm2); - adc(t3, 0); - } - - // free z, t1, t2, t7, t5, xy, t6 - - mov(a, rp_); - mul(t7); - mov(z, a); // q - - // [d:z:t5:xy:t6] = p * q - mul4x1(t0, z, t1, t5, xy, t6, t2); - - add_rr(Pack(t10, t9, t8, t4, t7), Pack(d, z, t5, xy, t6)); - // [t10:t9:t8:t4] - if (isFullBit_) { - adc(t3, 0); - } - - mov_rr(Pack(t6, t2, t1, z), Pack(t10, t9, t8, t4)); - sub_rm(Pack(t10, t9, t8, t4), t0); - if (isFullBit_) { - sbb(t3, 0); - } - cmovc(t4, z); - cmovc(t8, t1); - cmovc(t9, t2); - cmovc(t10, t6); - - vmovq(z, xm0); - store_mr(z, Pack(t10, t9, t8, t4)); - } - void2u gen_fpDbl_mod(const fp::Op& op) - { - align(16); - void2u func = getCurr(); - if (op.primeMode == PM_NIST_P192) { - StackFrame sf(this, 2, 6 | UseRDX); - fpDbl_mod_NIST_P192(sf.p[0], sf.p[1], sf.t); - return func; - } -#if 0 - if (op.primeMode == PM_NIST_P521) { - StackFrame sf(this, 2, 8 | UseRDX); - fpDbl_mod_NIST_P521(sf.p[0], sf.p[1], sf.t); - return func; - } -#endif - if (pn_ == 2) { - gen_fpDbl_mod2(); - return func; - } - if (pn_ == 3) { - gen_fpDbl_mod3(); - return func; - } - if (pn_ == 4) { - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - call(fpDbl_modL); - sf.close(); - L(fpDbl_modL); - gen_fpDbl_mod4(gp0, gp1, sf.t, gp2); - ret(); - return func; - } - if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - call(fpDbl_modL); - sf.close(); - L(fpDbl_modL); - Pack t = sf.t; - t.append(gp2); - gen_fpDbl_mod6(gp0, gp1, t); - ret(); - return func; - } - return 0; - } - void2u gen_sqr() - { - align(16); - void2u func = getCurr(); - if (op_->primeMode == PM_NIST_P192) { - StackFrame sf(this, 3, 10 | UseRDX, 6 * 8); - Pack t = sf.t; - t.append(sf.p[2]); - sqrPre3(rsp, sf.p[1], t); - fpDbl_mod_NIST_P192(sf.p[0], rsp, sf.t); - return func; - } - if (pn_ == 3) { - gen_montSqr3(); - return func; - } - if (pn_ == 4 && useMulx_) { -#if 1 - // sqr(y, x) = mul(y, x, x) -#ifdef XBYAK64_WIN - mov(r8, rdx); -#else - mov(rdx, rsi); -#endif - jmp((const void*)op_->fp_mulA_); -#else // (sqrPre + mod) is slower than mul - StackFrame sf(this, 3, 10 | UseRDX, 8 * 8); - Pack t = sf.t; - t.append(sf.p[2]); - sqrPre4(rsp, sf.p[1], t); - mov(gp0, sf.p[0]); - mov(gp1, rsp); - call(fpDbl_modL); -#endif - return func; - } - if (pn_ == 6 && !isFullBit_ && useMulx_ && useAdx_) { - if (fpDbl_modL.getAddress() == 0) return 0; - StackFrame sf(this, 3, 10 | UseRDX, (12 + 6) * 8); - /* - use xm3 - rsp - [6 * 8, (12 + 6) * 8) ; sqrPre(x, x) - [0..6 * 8) ; stack for sqrPre6 - */ - vmovq(xm3, gp0); - Pack t = sf.t; - t.append(sf.p[2]); - // sqrPre6 uses 6 * 8 bytes stack - sqrPre6(rsp + 6 * 8, sf.p[1], t); - mov(gp0, ptr[rsp + (12 + 6) * 8]); - vmovq(gp0, xm3); - lea(gp1, ptr[rsp + 6 * 8]); - call(fpDbl_modL); - return func; - } - return 0; - } - /* - input (pz[], px[], py[]) - z[] <- montgomery(x[], y[]) - */ - void gen_montMulN(const uint64_t *p, uint64_t pp, int n) - { - assert(1 <= pn_ && pn_ <= 9); - const int regNum = useMulx_ ? 4 : 3 + std::min(n - 1, 7); - const int stackSize = (n * 3 + (isFullBit_ ? 2 : 1)) * 8; - StackFrame sf(this, 3, regNum | UseRDX, stackSize); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - const Reg64& y = sf.t[0]; - const Reg64& pAddr = sf.t[1]; - const Reg64& t = sf.t[2]; - Pack remain = sf.t.sub(3); - size_t rspPos = 0; - - MixPack pw1(remain, rspPos, n - 1); - const RegExp pw2 = rsp + rspPos; // pw2[0..n-1] - const RegExp pc = pw2 + n * 8; // pc[0..n+1] - mov(pAddr, (size_t)p); - - for (int i = 0; i < n; i++) { - mov(y, ptr [py + i * 8]); - montgomeryN_1(pp, n, pc, px, y, pAddr, t, pw1, pw2, i == 0); - } - // pz[] = pc[] - p[] - gen_raw_sub(pz, pc, pAddr, t, n); - if (isFullBit_) sbb(qword[pc + n * 8], 0); - jnc("@f"); - for (int i = 0; i < n; i++) { - mov(t, ptr [pc + i * 8]); - mov(ptr [pz + i * 8], t); - } - L("@@"); - } - /* - input (z, x, y) = (p0, p1, p2) - z[0..3] <- montgomery(x[0..3], y[0..3]) - destroy gt0, ..., gt9, xm0, xm1, p2 - */ - void gen_montMul4() - { - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - call(fp_mulL); - sf.close(); - const Reg64& p0 = sf.p[0]; - const Reg64& p1 = sf.p[1]; - const Reg64& p2 = sf.p[2]; - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - const Reg64& t9 = sf.t[9]; - - L(fp_mulL); - vmovq(xm0, p0); // save p0 - mov(p0, pL_); - vmovq(xm1, p2); - mov(p2, ptr [p2]); - montgomery4_1(rp_, t0, t7, t3, t2, t1, p1, p2, p0, t4, t5, t6, t8, t9, true, xm2); - - vmovq(p2, xm1); - mov(p2, ptr [p2 + 8]); - montgomery4_1(rp_, t1, t0, t7, t3, t2, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); - - vmovq(p2, xm1); - mov(p2, ptr [p2 + 16]); - montgomery4_1(rp_, t2, t1, t0, t7, t3, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); - - vmovq(p2, xm1); - mov(p2, ptr [p2 + 24]); - montgomery4_1(rp_, t3, t2, t1, t0, t7, p1, p2, p0, t4, t5, t6, t8, t9, false, xm2); - // [t7:t3:t2:t1:t0] - - mov(t4, t0); - mov(t5, t1); - mov(t6, t2); - mov(rdx, t3); - sub_rm(Pack(t3, t2, t1, t0), p0); - if (isFullBit_) sbb(t7, 0); - cmovc(t0, t4); - cmovc(t1, t5); - cmovc(t2, t6); - cmovc(t3, rdx); - - vmovq(p0, xm0); // load p0 - store_mr(p0, Pack(t3, t2, t1, t0)); - ret(); - } - /* - c[n+2] = c[n+1] + px[n] * rdx - use rax - */ - void mulAdd(const Pack& c, int n, const RegExp& px) - { - const Reg64& a = rax; - xor_(a, a); - for (int i = 0; i < n; i++) { - mulx(c[n + 1], a, ptr [px + i * 8]); - adox(c[i], a); - adcx(c[i + 1], c[n + 1]); - } - mov(a, 0); - mov(c[n + 1], a); - adox(c[n], a); - adcx(c[n + 1], a); - adox(c[n + 1], a); - } - /* - input - c[6..0] - rdx = yi - use rax, rdx - output - c[7..1] - - if first: - c = x[5..0] * rdx - else: - c += x[5..0] * rdx - q = uint64_t(c0 * rp) - c += p * q - c >>= 64 - */ - void montgomery6_1(const Pack& c, const RegExp& px, const Reg64& t0, const Reg64& t1, bool isFirst) - { - const int n = 6; - const Reg64& a = rax; - const Reg64& d = rdx; - if (isFirst) { - const Reg64 *pt0 = &a; - const Reg64 *pt1 = &t0; - // c[6..0] = px[5..0] * rdx - mulx(*pt0, c[0], ptr [px + 0 * 8]); - for (int i = 1; i < n; i++) { - mulx(*pt1, c[i], ptr[px + i * 8]); - if (i == 1) { - add(c[i], *pt0); - } else { - adc(c[i], *pt0); - } - std::swap(pt0, pt1); - } - mov(c[n], 0); - adc(c[n], *pt0); - } else { - // c[7..0] = c[6..0] + px[5..0] * rdx - mulAdd(c, 6, px); - } - mov(a, rp_); - mul(c[0]); // q = a - mov(d, a); - mov(t1, pL_); - // c += p * q - mulAdd(c, 6, t1); - } - /* - input (z, x, y) = (p0, p1, p2) - z[0..5] <- montgomery(x[0..5], y[0..5]) - destroy t0, ..., t9, rax, rdx - */ - void gen_montMul6() - { - assert(!isFullBit_ && useMulx_ && useAdx_); - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - call(fp_mulL); - sf.close(); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - const Reg64& t9 = sf.t[9]; - L(fp_mulL); - mov(rdx, ptr [py + 0 * 8]); - montgomery6_1(Pack(t7, t6, t5, t4, t3, t2, t1, t0), px, t8, t9, true); - mov(rdx, ptr [py + 1 * 8]); - montgomery6_1(Pack(t0, t7, t6, t5, t4, t3, t2, t1), px, t8, t9, false); - mov(rdx, ptr [py + 2 * 8]); - montgomery6_1(Pack(t1, t0, t7, t6, t5, t4, t3, t2), px, t8, t9, false); - mov(rdx, ptr [py + 3 * 8]); - montgomery6_1(Pack(t2, t1, t0, t7, t6, t5, t4, t3), px, t8, t9, false); - mov(rdx, ptr [py + 4 * 8]); - montgomery6_1(Pack(t3, t2, t1, t0, t7, t6, t5, t4), px, t8, t9, false); - mov(rdx, ptr [py + 5 * 8]); - montgomery6_1(Pack(t4, t3, t2, t1, t0, t7, t6, t5), px, t8, t9, false); - // [t4:t3:t2:t1:t0:t7:t6] - const Pack z = Pack(t3, t2, t1, t0, t7, t6); - const Pack keep = Pack(rdx, rax, px, py, t8, t9); - mov_rr(keep, z); - mov(t5, pL_); - sub_rm(z, t5); - cmovc_rr(z, keep); - store_mr(pz, z); - ret(); - } - /* - input (z, x, y) = (p0, p1, p2) - z[0..2] <- montgomery(x[0..2], y[0..2]) - destroy gt0, ..., gt9, xm0, xm1, p2 - */ - void gen_montMul3() - { - StackFrame sf(this, 3, 10 | UseRDX); - const Reg64& p0 = sf.p[0]; - const Reg64& p1 = sf.p[1]; - const Reg64& p2 = sf.p[2]; - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - const Reg64& t9 = sf.t[9]; - - vmovq(xm0, p0); // save p0 - mov(t7, pL_); - mov(t9, ptr [p2]); - // c3, c2, c1, c0, px, y, p, - montgomery3_1(rp_, t0, t3, t2, t1, p1, t9, t7, t4, t5, t6, t8, p0, true); - mov(t9, ptr [p2 + 8]); - montgomery3_1(rp_, t1, t0, t3, t2, p1, t9, t7, t4, t5, t6, t8, p0, false); - - mov(t9, ptr [p2 + 16]); - montgomery3_1(rp_, t2, t1, t0, t3, p1, t9, t7, t4, t5, t6, t8, p0, false); - - // [(t3):t2:t1:t0] - mov(t4, t0); - mov(t5, t1); - mov(t6, t2); - sub_rm(Pack(t2, t1, t0), t7); - if (isFullBit_) sbb(t3, 0); - cmovc(t0, t4); - cmovc(t1, t5); - cmovc(t2, t6); - vmovq(p0, xm0); - store_mr(p0, Pack(t2, t1, t0)); - } - /* - input (pz, px) - z[0..2] <- montgomery(px[0..2], px[0..2]) - destroy gt0, ..., gt9, xm0, xm1, p2 - */ - void gen_montSqr3() - { - StackFrame sf(this, 3, 10 | UseRDX, 16 * 3); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; -// const Reg64& py = sf.p[2]; // not used - - const Reg64& t0 = sf.t[0]; - const Reg64& t1 = sf.t[1]; - const Reg64& t2 = sf.t[2]; - const Reg64& t3 = sf.t[3]; - const Reg64& t4 = sf.t[4]; - const Reg64& t5 = sf.t[5]; - const Reg64& t6 = sf.t[6]; - const Reg64& t7 = sf.t[7]; - const Reg64& t8 = sf.t[8]; - const Reg64& t9 = sf.t[9]; - - vmovq(xm0, pz); // save pz - mov(t7, pL_); - mov(t9, ptr [px]); - mul3x1_sqr1(px, t9, t3, t2, t1, t0); - mov(t0, rdx); - montgomery3_sub(rp_, t0, t9, t2, t1, px, t3, t7, t4, t5, t6, t8, pz, true); - - mov(t3, ptr [px + 8]); - mul3x1_sqr2(px, t3, t6, t5, t4); - add_rr(Pack(t1, t0, t9, t2), Pack(rdx, rax, t5, t4)); - if (isFullBit_) setc(pz.cvt8()); - montgomery3_sub(rp_, t1, t3, t9, t2, px, t0, t7, t4, t5, t6, t8, pz, false); - - mov(t0, ptr [px + 16]); - mul3x1_sqr3(t0, t5, t4); - add_rr(Pack(t2, t1, t3, t9), Pack(rdx, rax, t5, t4)); - if (isFullBit_) setc(pz.cvt8()); - montgomery3_sub(rp_, t2, t0, t3, t9, px, t1, t7, t4, t5, t6, t8, pz, false); - - // [t9:t2:t0:t3] - mov(t4, t3); - mov(t5, t0); - mov(t6, t2); - sub_rm(Pack(t2, t0, t3), t7); - if (isFullBit_) sbb(t9, 0); - cmovc(t3, t4); - cmovc(t0, t5); - cmovc(t2, t6); - vmovq(pz, xm0); - store_mr(pz, Pack(t2, t0, t3)); - } - /* - py[5..0] <- px[2..0]^2 - @note use rax, rdx - */ - void sqrPre3(const RegExp& py, const RegExp& px, const Pack& t) - { - const Reg64& a = rax; - const Reg64& d = rdx; - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - const Reg64& t10 = t[10]; - - if (useMulx_) { - mov(d, ptr [px + 8 * 0]); - mulx(t0, a, d); - mov(ptr [py + 8 * 0], a); - - mov(t7, ptr [px + 8 * 1]); - mov(t9, ptr [px + 8 * 2]); - mulx(t2, t1, t7); - mulx(t4, t3, t9); - - mov(t5, t2); - mov(t6, t4); - - add(t0, t1); - adc(t5, t3); - adc(t6, 0); // [t6:t5:t0] - - mov(d, t7); - mulx(t8, t7, d); - mulx(t10, t9, t9); - } else { - mov(t9, ptr [px + 8 * 0]); - mov(a, t9); - mul(t9); - mov(ptr [py + 8 * 0], a); - mov(t0, d); - mov(a, ptr [px + 8 * 1]); - mul(t9); - mov(t1, a); - mov(t2, d); - mov(a, ptr [px + 8 * 2]); - mul(t9); - mov(t3, a); - mov(t4, d); - - mov(t5, t2); - mov(t6, t4); - - add(t0, t1); - adc(t5, t3); - adc(t6, 0); // [t6:t5:t0] - - mov(t9, ptr [px + 8 * 1]); - mov(a, t9); - mul(t9); - mov(t7, a); - mov(t8, d); - mov(a, ptr [px + 8 * 2]); - mul(t9); - mov(t9, a); - mov(t10, d); - } - add(t2, t7); - adc(t8, t9); - mov(t7, t10); - adc(t7, 0); // [t7:t8:t2:t1] - - add(t0, t1); - adc(t2, t5); - adc(t6, t8); - adc(t7, 0); - mov(ptr [py + 8 * 1], t0); // [t7:t6:t2] - - mov(a, ptr [px + 8 * 2]); - mul(a); - add(t4, t9); - adc(a, t10); - adc(d, 0); // [d:a:t4:t3] - - add(t2, t3); - adc(t6, t4); - adc(t7, a); - adc(d, 0); - store_mr(py + 8 * 2, Pack(d, t7, t6, t2)); - } - /* - [pd:pz[0]] <- py[n-1..0] * px[0] - */ - void mulPack(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& pd) - { - const Reg64& a = rax; - const Reg64& d = rdx; - mov(d, ptr [px]); - mulx(pd[0], a, ptr [py + 8 * 0]); - mov(ptr [pz + 8 * 0], a); - for (size_t i = 1; i < pd.size(); i++) { - mulx(pd[i], a, ptr [py + 8 * i]); - if (i == 1) { - add(pd[i - 1], a); - } else { - adc(pd[i - 1], a); - } - } - adc(pd[pd.size() - 1], 0); - } - /* - [hi:Pack(d_(n-1), .., d1):pz[0]] <- Pack(d_(n-1), ..., d0) + py[n-1..0] * px[0] - */ - void mulPackAdd(const RegExp& pz, const RegExp& px, const RegExp& py, const Reg64& hi, const Pack& pd) - { - const Reg64& a = rax; - const Reg64& d = rdx; - mov(d, ptr [px]); - xor_(a, a); - for (size_t i = 0; i < pd.size(); i++) { - mulx(hi, a, ptr [py + i * 8]); - adox(pd[i], a); - if (i == 0) mov(ptr[pz], pd[0]); - if (i == pd.size() - 1) break; - adcx(pd[i + 1], hi); - } - mov(d, 0); - adcx(hi, d); - adox(hi, d); - } - /* - input : z[n], p[n-1], rdx(implicit) - output: z[] += p[] * rdx, rax = 0 and set CF - use rax, rdx - */ - void mulPackAddShr(const Pack& z, const RegExp& p, const Reg64& H, bool last = false) - { - const Reg64& a = rax; - const size_t n = z.size(); - assert(n >= 3); - // clear CF and OF - xor_(a, a); - const size_t loop = last ? n - 1 : n - 3; - for (size_t i = 0; i < loop; i++) { - // mulx(H, L, x) = [H:L] = x * rdx - mulx(H, a, ptr [p + i * 8]); - adox(z[i], a); - adcx(z[i + 1], H); - } - if (last) { - mov(a, 0); - adox(z[n - 1], a); - return; - } - /* - reorder addtion not to propage OF outside this routine - H - + - rdx a - | | - v v - z[n-1] z[n-2] - */ - mulx(H, a, ptr [p + (n - 3) * 8]); - adox(z[n - 3], a); - mulx(rdx, a, ptr [p + (n - 2) * 8]); // destroy rdx - adox(H, a); - mov(a, 0); - adox(rdx, a); - adcx(z[n - 2], H); - adcx(z[n - 1], rdx); - } - /* - pz[5..0] <- px[2..0] * py[2..0] - */ - void mulPre3(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) - { - const Reg64& a = rax; - const Reg64& d = rdx; - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - - if (useMulx_) { - mulPack(pz, px, py, Pack(t2, t1, t0)); -#if 0 // a little slow - if (useAdx_) { - // [t2:t1:t0] - mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t3, Pack(t2, t1, t0)); - // [t3:t2:t1] - mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t4, Pack(t3, t2, t1)); - // [t4:t3:t2] - store_mr(pz + 8 * 3, Pack(t4, t3, t2)); - return; - } -#endif - } else { - mov(t5, ptr [px]); - mov(a, ptr [py + 8 * 0]); - mul(t5); - mov(ptr [pz + 8 * 0], a); - mov(t0, d); - mov(a, ptr [py + 8 * 1]); - mul(t5); - mov(t3, a); - mov(t1, d); - mov(a, ptr [py + 8 * 2]); - mul(t5); - mov(t4, a); - mov(t2, d); - add(t0, t3); - mov(t2, 0); - adc(t1, a); - adc(t2, d); // [t2:t1:t0:pz[0]] = px[0] * py[2..0] - } - - // here [t2:t1:t0] - - mov(t9, ptr [px + 8]); - - // [d:t9:t6:t5] = px[1] * py[2..0] - mul3x1(py, t9, t7, t6, t5, t4); - add_rr(Pack(t2, t1, t0), Pack(t9, t6, t5)); - adc(d, 0); - mov(t8, d); - mov(ptr [pz + 8], t0); - // here [t8:t2:t1] - - mov(t9, ptr [px + 16]); - - // [d:t9:t5:t4] - mul3x1(py, t9, t6, t5, t4, t0); - add_rr(Pack(t8, t2, t1), Pack(t9, t5, t4)); - adc(d, 0); - store_mr(pz + 8 * 2, Pack(d, t8, t2, t1)); - } - void sqrPre2(const Reg64& py, const Reg64& px, const Pack& t) - { - // QQQ - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - load_rm(Pack(px, t0), px); // x = [px:t0] - sqr2(t4, t3, t2, t1, px, t0, t5, t6); - store_mr(py, Pack(t4, t3, t2, t1)); - } - /* - [y3:y2:y1:y0] = [x1:x0] ^ 2 - use rdx - */ - void sqr2(const Reg64& y3, const Reg64& y2, const Reg64& y1, const Reg64& y0, const Reg64& x1, const Reg64& x0, const Reg64& t1, const Reg64& t0) - { - if (!useMulx_) { - throw cybozu::Exception("sqr2:not support mulx"); - } - mov(rdx, x0); - mulx(y1, y0, x0); // x0^2 - mov(rdx, x1); - mulx(y3, y2, x1); // x1^2 - mulx(t1, t0, x0); // x0 x1 - add(y1, t0); - adc(y2, t1); - adc(y3, 0); - add(y1, t0); - adc(y2, t1); - adc(y3, 0); - } - /* - [t3:t2:t1:t0] = px[1, 0] * py[1, 0] - use rax, rdx - */ - void mul2x2(const RegExp& px, const RegExp& py, const Reg64& t4, const Reg64& t3, const Reg64& t2, const Reg64& t1, const Reg64& t0) - { - if (!useMulx_) { - throw cybozu::Exception("mul2x2:not support mulx"); - } -#if 0 - // # of add is less, but a little slower - mov(t4, ptr [py + 8 * 0]); - mov(rdx, ptr [px + 8 * 1]); - mulx(t2, t1, t4); - mov(rdx, ptr [px + 8 * 0]); - mulx(t0, rax, ptr [py + 8 * 1]); - xor_(t3, t3); - add_rr(Pack(t3, t2, t1), Pack(t3, t0, rax)); - // [t3:t2:t1] = ad + bc - mulx(t4, t0, t4); - mov(rax, ptr [px + 8 * 1]); - mul(qword [py + 8 * 1]); - add_rr(Pack(t3, t2, t1), Pack(rdx, rax, t4)); -#else - mov(rdx, ptr [py + 8 * 0]); - mov(rax, ptr [px + 8 * 0]); - mulx(t1, t0, rax); - mov(t3, ptr [px + 8 * 1]); - mulx(t2, rdx, t3); - add(t1, rdx); - adc(t2, 0); // [t2:t1:t0] - - mov(rdx, ptr [py + 8 * 1]); - mulx(rax, t4, rax); - mulx(t3, rdx, t3); - add(rax, rdx); - adc(t3, 0); // [t3:rax:t4] - add(t1, t4); - adc(t2, rax); - adc(t3, 0); // t3:t2:t1:t0] -#endif - } - void mulPre2(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - mul2x2(px, py, t4, t3, t2, t1, t0); - store_mr(pz, Pack(t3, t2, t1, t0)); - } - /* - py[7..0] = px[3..0] ^ 2 - use xmm0 - */ - void sqrPre4(const RegExp& py, const RegExp& px, const Pack& t) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - const Reg64& t10 = t[10]; - const Reg64& a = rax; - const Reg64& d = rdx; - - /* - (aN + b)^2 = a^2 N^2 + 2ab N + b^2 - */ - load_rm(Pack(t9, t8), px); - sqr2(t3, t2, t1, t0, t9, t8, t7, t6); - // [t3:t2:t1:t0] = b^2 - store_mr(py, Pack(t1, t0)); - vmovq(xm0, t2); - mul2x2(px, px + 2 * 8, t6, t5, t4, t1, t0); - // [t5:t4:t1:t0] = ab - xor_(t6, t6); - add_rr(Pack(t6, t5, t4, t1, t0), Pack(t6, t5, t4, t1, t0)); - // [t6:t5:t4:t1:t0] = 2ab - load_rm(Pack(t8, t7), px + 2 * 8); - // free t10, t9, rax, rdx - /* - [d:t8:t10:t9] = [t8:t7]^2 - */ - mov(d, t7); - mulx(t10, t9, t7); // [t10:t9] = t7^2 - mulx(t7, t2, t8); // [t7:t2] = t7 t8 - xor_(a, a); - add_rr(Pack(a, t7, t2), Pack(a, t7, t2)); - // [a:t7:t2] = 2 t7 t8 - mov(d, t8); - mulx(d, t8, t8); // [d:t8] = t8^2 - add_rr(Pack(d, t8, t10), Pack(a, t7, t2)); - // [d:t8:t10:t9] = [t8:t7]^2 - vmovq(t2, xm0); - add_rr(Pack(t8, t10, t9, t3, t2), Pack(t6, t5, t4, t1, t0)); - adc(d, 0); - store_mr(py + 2 * 8, Pack(d, t8, t10, t9, t3, t2)); - } - /* - py[11..0] = px[5..0] ^ 2 - use rax, rdx, stack[6 * 8] - */ - void sqrPre6(const RegExp& py, const RegExp& px, const Pack& t) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - /* - (aN + b)^2 = a^2 N^2 + 2ab N + b^2 - */ - sqrPre3(py, px, t); // [py] <- b^2 - sqrPre3(py + 6 * 8, px + 3 * 8, t); // [py + 6 * 8] <- a^2 - mulPre3(rsp, px, px + 3 * 8, t); // ab - Pack ab = t.sub(0, 6); - load_rm(ab, rsp); - xor_(rax, rax); - for (int i = 0; i < 6; i++) { - if (i == 0) { - add(ab[i], ab[i]); - } else { - adc(ab[i], ab[i]); - } - } - adc(rax, rax); - add_rm(ab, py + 3 * 8); - store_mr(py + 3 * 8, ab); - load_rm(Pack(t2, t1, t0), py + 9 * 8); - adc(t0, rax); - adc(t1, 0); - adc(t2, 0); - store_mr(py + 9 * 8, Pack(t2, t1, t0)); - } - /* - pz[7..0] <- px[3..0] * py[3..0] - */ - void mulPre4(const RegExp& pz, const RegExp& px, const RegExp& py, const Pack& t) - { - const Reg64& a = rax; - const Reg64& d = rdx; - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - -#if 0 // a little slower - if (useMulx_ && useAdx_) { - mulPack(pz, px, py, Pack(t3, t2, t1, t0)); - mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t4, Pack(t3, t2, t1, t0)); - mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t0, Pack(t4, t3, t2, t1)); - mulPackAdd(pz + 8 * 3, px + 8 * 3, py, t1, Pack(t0, t4, t3, t2)); - store_mr(pz + 8 * 4, Pack(t1, t0, t4, t3)); - return; - } -#endif -#if 0 - // a little slower - if (!useMulx_) { - throw cybozu::Exception("mulPre4:not support mulx"); - } - mul2x2(px + 8 * 0, py + 8 * 2, t4, t3, t2, t1, t0); - mul2x2(px + 8 * 2, py + 8 * 0, t9, t8, t7, t6, t5); - xor_(t4, t4); - add_rr(Pack(t4, t3, t2, t1, t0), Pack(t4, t8, t7, t6, t5)); - // [t4:t3:t2:t1:t0] - mul2x2(px + 8 * 0, py + 8 * 0, t9, t8, t7, t6, t5); - store_mr(pz, Pack(t6, t5)); - // [t8:t7] - vmovq(xm0, t7); - vmovq(xm1, t8); - mul2x2(px + 8 * 2, py + 8 * 2, t8, t7, t9, t6, t5); - vmovq(a, xm0); - vmovq(d, xm1); - add_rr(Pack(t4, t3, t2, t1, t0), Pack(t9, t6, t5, d, a)); - adc(t7, 0); - store_mr(pz + 8 * 2, Pack(t7, t4, t3, t2, t1, t0)); -#else - if (useMulx_) { - mulPack(pz, px, py, Pack(t3, t2, t1, t0)); - } else { - mov(t5, ptr [px]); - mov(a, ptr [py + 8 * 0]); - mul(t5); - mov(ptr [pz + 8 * 0], a); - mov(t0, d); - mov(a, ptr [py + 8 * 1]); - mul(t5); - mov(t3, a); - mov(t1, d); - mov(a, ptr [py + 8 * 2]); - mul(t5); - mov(t4, a); - mov(t2, d); - mov(a, ptr [py + 8 * 3]); - mul(t5); - add(t0, t3); - mov(t3, 0); - adc(t1, t4); - adc(t2, a); - adc(t3, d); // [t3:t2:t1:t0:pz[0]] = px[0] * py[3..0] - } - - // here [t3:t2:t1:t0] - - mov(t9, ptr [px + 8]); - - // [d:t9:t7:t6:t5] = px[1] * py[3..0] - mul4x1(py, t9, t8, t7, t6, t5, t4); - add_rr(Pack(t3, t2, t1, t0), Pack(t9, t7, t6, t5)); - adc(d, 0); - mov(t8, d); - mov(ptr [pz + 8], t0); - // here [t8:t3:t2:t1] - - mov(t9, ptr [px + 16]); - - // [d:t9:t6:t5:t4] - mul4x1(py, t9, t7, t6, t5, t4, t0); - add_rr(Pack(t8, t3, t2, t1), Pack(t9, t6, t5, t4)); - adc(d, 0); - mov(t7, d); - mov(ptr [pz + 16], t1); - - mov(t9, ptr [px + 24]); - - // [d:t9:t5:t4:t1] - mul4x1(py, t9, t6, t5, t4, t1, t0); - add_rr(Pack(t7, t8, t3, t2), Pack(t9, t5, t4, t1)); - adc(d, 0); - store_mr(pz + 8 * 3, Pack(t7, t8, t3, t2)); - mov(ptr [pz + 8 * 7], d); -#endif - } - // [gp0] <- [gp1] * [gp2] - void mulPre6(const Pack& t) - { - const Reg64& pz = gp0; - const Reg64& px = gp1; - const Reg64& py = gp2; - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; -#if 0 // slower than basic multiplication(56clk -> 67clk) -// const Reg64& t7 = t[7]; -// const Reg64& t8 = t[8]; -// const Reg64& t9 = t[9]; - const Reg64& a = rax; - const Reg64& d = rdx; - const int stackSize = (3 + 3 + 6 + 1 + 1 + 1) * 8; // a+b, c+d, (a+b)(c+d), x, y, z - const int abPos = 0; - const int cdPos = abPos + 3 * 8; - const int abcdPos = cdPos + 3 * 8; - const int zPos = abcdPos + 6 * 8; - const int yPos = zPos + 8; - const int xPos = yPos + 8; - - sub(rsp, stackSize); - mov(ptr[rsp + zPos], pz); - mov(ptr[rsp + xPos], px); - mov(ptr[rsp + yPos], py); - /* - x = aN + b, y = cN + d - xy = abN^2 + ((a+b)(c+d) - ac - bd)N + bd - */ - xor_(a, a); - load_rm(Pack(t2, t1, t0), px); // b - add_rm(Pack(t2, t1, t0), px + 3 * 8); // a + b - adc(a, 0); - store_mr(pz, Pack(t2, t1, t0)); - vmovq(xm0, a); // carry1 - - xor_(a, a); - load_rm(Pack(t2, t1, t0), py); // d - add_rm(Pack(t2, t1, t0), py + 3 * 8); // c + d - adc(a, 0); - store_mr(pz + 3 * 8, Pack(t2, t1, t0)); - vmovq(xm1, a); // carry2 - - mulPre3(rsp + abcdPos, pz, pz + 3 * 8, t); // (a+b)(c+d) - - vmovq(a, xm0); - vmovq(d, xm1); - mov(t3, a); - and_(t3, d); // t3 = carry1 & carry2 - Label doNothing; - je(doNothing); - load_rm(Pack(t2, t1, t0), rsp + abcdPos + 3 * 8); - test(a, a); - je("@f"); - // add (c+d) - add_rm(Pack(t2, t1, t0), pz + 3 * 8); - adc(t3, 0); - L("@@"); - test(d, d); - je("@f"); - // add(a+b) - add_rm(Pack(t2, t1, t0), pz); - adc(t3, 0); - L("@@"); - store_mr(rsp + abcdPos + 3 * 8, Pack(t2, t1, t0)); - L(doNothing); - vmovq(xm0, t3); // save new carry - - - mov(gp0, ptr [rsp + zPos]); - mov(gp1, ptr [rsp + xPos]); - mov(gp2, ptr [rsp + yPos]); - mulPre3(gp0, gp1, gp2, t); // [rsp] <- bd - - mov(gp0, ptr [rsp + zPos]); - mov(gp1, ptr [rsp + xPos]); - mov(gp2, ptr [rsp + yPos]); - mulPre3(gp0 + 6 * 8, gp1 + 3 * 8, gp2 + 3 * 8, t); // [rsp + 6 * 8] <- ac - - mov(pz, ptr[rsp + zPos]); - vmovq(d, xm0); - for (int i = 0; i < 6; i++) { - mov(a, ptr[pz + (3 + i) * 8]); - if (i == 0) { - add(a, ptr[rsp + abcdPos + i * 8]); - } else { - adc(a, ptr[rsp + abcdPos + i * 8]); - } - mov(ptr[pz + (3 + i) * 8], a); - } - mov(a, ptr[pz + 9 * 8]); - adc(a, d); - mov(ptr[pz + 9 * 8], a); - jnc("@f"); - for (int i = 10; i < 12; i++) { - mov(a, ptr[pz + i * 8]); - adc(a, 0); - mov(ptr[pz + i * 8], a); - } - L("@@"); - add(rsp, stackSize); -#else - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - - mulPack(pz, px, py, Pack(t5, t4, t3, t2, t1, t0)); // [t5:t4:t3:t2:t1:t0] - mulPackAdd(pz + 8 * 1, px + 8 * 1, py, t6, Pack(t5, t4, t3, t2, t1, t0)); // [t6:t5:t4:t3:t2:t1] - mulPackAdd(pz + 8 * 2, px + 8 * 2, py, t0, Pack(t6, t5, t4, t3, t2, t1)); // [t0:t6:t5:t4:t3:t2] - mulPackAdd(pz + 8 * 3, px + 8 * 3, py, t1, Pack(t0, t6, t5, t4, t3, t2)); // [t1:t0:t6:t5:t4:t3] - mulPackAdd(pz + 8 * 4, px + 8 * 4, py, t2, Pack(t1, t0, t6, t5, t4, t3)); // [t2:t1:t0:t6:t5:t4] - mulPackAdd(pz + 8 * 5, px + 8 * 5, py, t3, Pack(t2, t1, t0, t6, t5, t4)); // [t3:t2:t1:t0:t6:t5] - store_mr(pz + 8 * 6, Pack(t3, t2, t1, t0, t6, t5)); -#endif - } - /* - @input (z, xy) - z[5..0] <- montgomery reduction(x[11..0]) - use xm0, xm1, xm2 - */ - void gen_fpDbl_mod6(const Reg64& z, const Reg64& xy, const Pack& t) - { - assert(!isFullBit_); - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const Reg64& t8 = t[8]; - const Reg64& t9 = t[9]; - const Reg64& t10 = t[10]; - - const Reg64& a = rax; - const Reg64& d = rdx; - vmovq(xm0, z); - mov(z, ptr [xy + 0 * 8]); - mov(a, rp_); - mul(z); - lea(t0, ptr [rip + pL_]); - load_rm(Pack(t7, t6, t5, t4, t3, t2, t1), xy); - mov(d, a); // q - mulPackAddShr(Pack(t7, t6, t5, t4, t3, t2, t1), t0, t10); - load_rm(Pack(t1, t0, t10, t9, t8), xy + 7 * 8); - adc(t8, rax); - adc(t9, rax); - adc(t10, rax); - adc(t0, rax); - adc(t1, rax); - // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4:t3:t2] - mov(a, rp_); - mul(t2); - vmovq(xm1, t0); // save - lea(t0, ptr [rip + pL_]); - mov(d, a); - vmovq(xm2, t10); - mulPackAddShr(Pack(t8, t7, t6, t5, t4, t3, t2), t0, t10); - vmovq(t10, xm2); - adc(t9, rax); - adc(t10, rax); - vmovq(t0, xm1); // load - adc(t0, rax); - adc(t1, rax); - // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4:t3] - mov(a, rp_); - mul(t3); - lea(t2, ptr [rip + pL_]); - mov(d, a); - vmovq(xm2, t10); - mulPackAddShr(Pack(t9, t8, t7, t6, t5, t4, t3), t2, t10); - vmovq(t10, xm2); - adc(t10, rax); - adc(t0, rax); - adc(t1, rax); - // z = [t1:t0:t10:t9:t8:t7:t6:t5:t4] - mov(a, rp_); - mul(t4); - lea(t2, ptr [rip + pL_]); - mov(d, a); - mulPackAddShr(Pack(t10, t9, t8, t7, t6, t5, t4), t2, t3); - adc(t0, rax); - adc(t1, rax); - // z = [t1:t0:t10:t9:t8:t7:t6:t5] - mov(a, rp_); - mul(t5); - lea(t2, ptr [rip + pL_]); - mov(d, a); - mulPackAddShr(Pack(t0, t10, t9, t8, t7, t6, t5), t2, t3); - adc(t1, a); - // z = [t1:t0:t10:t9:t8:t7:t6] - mov(a, rp_); - mul(t6); - lea(t2, ptr [rip + pL_]); - mov(d, a); - mulPackAddShr(Pack(t1, t0, t10, t9, t8, t7, t6), t2, t3, true); - // z = [t1:t0:t10:t9:t8:t7] - Pack zp = Pack(t1, t0, t10, t9, t8, t7); - Pack keep = Pack(z, xy, rax, rdx, t3, t6); - mov_rr(keep, zp); - sub_rm(zp, t2); // z -= p - cmovc_rr(zp, keep); - vmovq(z, xm0); - store_mr(z, zp); - } - void2u gen_fpDbl_sqrPre() - { - align(16); - void2u func = getCurr(); - if (pn_ == 2 && useMulx_) { - StackFrame sf(this, 2, 7 | UseRDX); - sqrPre2(sf.p[0], sf.p[1], sf.t); - return func; - } - if (pn_ == 3) { - StackFrame sf(this, 3, 10 | UseRDX); - Pack t = sf.t; - t.append(sf.p[2]); - sqrPre3(sf.p[0], sf.p[1], t); - return func; - } - if (pn_ == 4 && useMulx_) { - StackFrame sf(this, 3, 10 | UseRDX); - Pack t = sf.t; - t.append(sf.p[2]); - sqrPre4(sf.p[0], sf.p[1], t); - return func; - } - if (pn_ == 6 && useMulx_ && useAdx_) { - StackFrame sf(this, 3, 10 | UseRDX, 6 * 8); - Pack t = sf.t; - t.append(sf.p[2]); - sqrPre6(sf.p[0], sf.p[1], t); - return func; - } - return 0; -#if 0 -#ifdef XBYAK64_WIN - mov(r8, rdx); -#else - mov(rdx, rsi); -#endif - jmp((void*)op.fpDbl_mulPreA_); - return func; -#endif - } - void3u gen_fpDbl_mulPre() - { - align(16); - void3u func = getCurr(); - if (pn_ == 2 && useMulx_) { - StackFrame sf(this, 3, 5 | UseRDX); - mulPre2(sf.p[0], sf.p[1], sf.p[2], sf.t); - return func; - } - if (pn_ == 3) { - StackFrame sf(this, 3, 10 | UseRDX); - mulPre3(sf.p[0], sf.p[1], sf.p[2], sf.t); - return func; - } - if (pn_ == 4) { - /* - fpDbl_mulPre is available as C function - this function calls mulPreL directly. - */ - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - mulPre4(gp0, gp1, gp2, sf.t); - sf.close(); // make epilog - L(mulPreL); // called only from asm code - mulPre4(gp0, gp1, gp2, sf.t); - ret(); - return func; - } - if (pn_ == 6 && useAdx_) { - StackFrame sf(this, 3, 10 | UseRDX, 0, false); - call(mulPreL); - sf.close(); // make epilog - L(mulPreL); // called only from asm code - mulPre6(sf.t); - ret(); - return func; - } - return 0; - } - static inline void debug_put_inner(const uint64_t *ptr, int n) - { - printf("debug "); - for (int i = 0; i < n; i++) { - printf("%016llx", (long long)ptr[n - 1 - i]); - } - printf("\n"); - } -#ifdef _MSC_VER - void debug_put(const RegExp& m, int n) - { - assert(n <= 8); - static uint64_t regBuf[7]; - - push(rax); - mov(rax, (size_t)regBuf); - mov(ptr [rax + 8 * 0], rcx); - mov(ptr [rax + 8 * 1], rdx); - mov(ptr [rax + 8 * 2], r8); - mov(ptr [rax + 8 * 3], r9); - mov(ptr [rax + 8 * 4], r10); - mov(ptr [rax + 8 * 5], r11); - mov(rcx, ptr [rsp + 8]); // org rax - mov(ptr [rax + 8 * 6], rcx); // save - mov(rcx, ptr [rax + 8 * 0]); // org rcx - pop(rax); - - lea(rcx, ptr [m]); - mov(rdx, n); - mov(rax, (size_t)debug_put_inner); - sub(rsp, 32); - call(rax); - add(rsp, 32); - - push(rax); - mov(rax, (size_t)regBuf); - mov(rcx, ptr [rax + 8 * 0]); - mov(rdx, ptr [rax + 8 * 1]); - mov(r8, ptr [rax + 8 * 2]); - mov(r9, ptr [rax + 8 * 3]); - mov(r10, ptr [rax + 8 * 4]); - mov(r11, ptr [rax + 8 * 5]); - mov(rax, ptr [rax + 8 * 6]); - add(rsp, 8); - } -#endif - /* - z >>= c - @note shrd(r/m, r, imm) - */ - void shr_mp(const MixPack& z, uint8_t c, const Reg64& t) - { - const size_t n = z.size(); - for (size_t i = 0; i < n - 1; i++) { - const Reg64 *p; - if (z.isReg(i + 1)) { - p = &z.getReg(i + 1); - } else { - mov(t, ptr [z.getMem(i + 1)]); - p = &t; - } - if (z.isReg(i)) { - shrd(z.getReg(i), *p, c); - } else { - shrd(qword [z.getMem(i)], *p, c); - } - } - if (z.isReg(n - 1)) { - shr(z.getReg(n - 1), c); - } else { - shr(qword [z.getMem(n - 1)], c); - } - } - /* - z *= 2 - */ - void twice_mp(const MixPack& z, const Reg64& t) - { - g_add(z[0], z[0], t); - for (size_t i = 1, n = z.size(); i < n; i++) { - g_adc(z[i], z[i], t); - } - } - /* - z += x - */ - void add_mp(const MixPack& z, const MixPack& x, const Reg64& t) - { - assert(z.size() == x.size()); - g_add(z[0], x[0], t); - for (size_t i = 1, n = z.size(); i < n; i++) { - g_adc(z[i], x[i], t); - } - } - void add_m_m(const RegExp& mz, const RegExp& mx, const Reg64& t, int n) - { - for (int i = 0; i < n; i++) { - mov(t, ptr [mx + i * 8]); - if (i == 0) { - add(ptr [mz + i * 8], t); - } else { - adc(ptr [mz + i * 8], t); - } - } - } - /* - mz[] = mx[] - y - */ - void sub_m_mp_m(const RegExp& mz, const RegExp& mx, const MixPack& y, const Reg64& t) - { - for (size_t i = 0; i < y.size(); i++) { - mov(t, ptr [mx + i * 8]); - if (i == 0) { - if (y.isReg(i)) { - sub(t, y.getReg(i)); - } else { - sub(t, ptr [y.getMem(i)]); - } - } else { - if (y.isReg(i)) { - sbb(t, y.getReg(i)); - } else { - sbb(t, ptr [y.getMem(i)]); - } - } - mov(ptr [mz + i * 8], t); - } - } - /* - z -= x - */ - void sub_mp(const MixPack& z, const MixPack& x, const Reg64& t) - { - assert(z.size() == x.size()); - g_sub(z[0], x[0], t); - for (size_t i = 1, n = z.size(); i < n; i++) { - g_sbb(z[i], x[i], t); - } - } - /* - z -= px[] - */ - void sub_mp_m(const MixPack& z, const RegExp& px, const Reg64& t) - { - if (z.isReg(0)) { - sub(z.getReg(0), ptr [px]); - } else { - mov(t, ptr [px]); - sub(ptr [z.getMem(0)], t); - } - for (size_t i = 1, n = z.size(); i < n; i++) { - if (z.isReg(i)) { - sbb(z.getReg(i), ptr [px + i * 8]); - } else { - mov(t, ptr [px + i * 8]); - sbb(ptr [z.getMem(i)], t); - } - } - } - void store_mp(const RegExp& m, const MixPack& z, const Reg64& t) - { - for (size_t i = 0, n = z.size(); i < n; i++) { - if (z.isReg(i)) { - mov(ptr [m + i * 8], z.getReg(i)); - } else { - mov(t, ptr [z.getMem(i)]); - mov(ptr [m + i * 8], t); - } - } - } - void load_mp(const MixPack& z, const RegExp& m, const Reg64& t) - { - for (size_t i = 0, n = z.size(); i < n; i++) { - if (z.isReg(i)) { - mov(z.getReg(i), ptr [m + i * 8]); - } else { - mov(t, ptr [m + i * 8]); - mov(ptr [z.getMem(i)], t); - } - } - } - void set_mp(const MixPack& z, const Reg64& t) - { - for (size_t i = 0, n = z.size(); i < n; i++) { - MCL_FP_GEN_OP_MR(mov, z[i], t) - } - } - void mov_mp(const MixPack& z, const MixPack& x, const Reg64& t) - { - for (size_t i = 0, n = z.size(); i < n; i++) { - const MemReg zi = z[i], xi = x[i]; - if (z.isReg(i)) { - MCL_FP_GEN_OP_RM(mov, zi.getReg(), xi) - } else { - if (x.isReg(i)) { - mov(ptr [z.getMem(i)], x.getReg(i)); - } else { - mov(t, ptr [x.getMem(i)]); - mov(ptr [z.getMem(i)], t); - } - } - } - } -#ifdef _MSC_VER - void debug_put_mp(const MixPack& mp, int n, const Reg64& t) - { - if (n >= 10) exit(1); - static uint64_t buf[10]; - vmovq(xm0, rax); - mov(rax, (size_t)buf); - store_mp(rax, mp, t); - vmovq(rax, xm0); - push(rax); - mov(rax, (size_t)buf); - debug_put(rax, n); - pop(rax); - } -#endif - - std::string mkLabel(const char *label, int n) const - { - return std::string(label) + Xbyak::Label::toStr(n); - } - /* - int k = preInvC(pr, px) - */ - void gen_preInv() - { - assert(1 <= pn_ && pn_ <= 4); - const int freeRegNum = 13; - StackFrame sf(this, 2, 10 | UseRDX | UseRCX, (std::max(0, pn_ * 5 - freeRegNum) + 1 + (isFullBit_ ? 1 : 0)) * 8); - const Reg64& pr = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& t = rcx; - /* - k = rax, t = rcx : temporary - use rdx, pr, px in main loop, so we can use 13 registers - v = t[0, pn_) : all registers - */ - size_t rspPos = 0; - - assert(sf.t.size() >= (size_t)pn_); - Pack remain = sf.t; - - const MixPack rr(remain, rspPos, pn_); - remain.append(rdx); - const MixPack ss(remain, rspPos, pn_); - remain.append(px); - const int rSize = (int)remain.size(); - MixPack vv(remain, rspPos, pn_, rSize > 0 ? rSize / 2 : -1); - remain.append(pr); - MixPack uu(remain, rspPos, pn_); - - const RegExp keep_pr = rsp + rspPos; - rspPos += 8; - const RegExp rTop = rsp + rspPos; // used if isFullBit_ - - inLocalLabel(); - mov(ptr [keep_pr], pr); - mov(rax, px); - // px is free frome here - load_mp(vv, rax, t); // v = x - mov(rax, pL_); - load_mp(uu, rax, t); // u = p_ - // k = 0 - xor_(rax, rax); - // rTop = 0 - if (isFullBit_) { - mov(ptr [rTop], rax); - } - // r = 0; - set_mp(rr, rax); - // s = 1 - set_mp(ss, rax); - if (ss.isReg(0)) { - mov(ss.getReg(0), 1); - } else { - mov(qword [ss.getMem(0)], 1); - } - for (int cn = pn_; cn > 0; cn--) { - const std::string _lp = mkLabel(".lp", cn); - const std::string _u_v_odd = mkLabel(".u_v_odd", cn); - const std::string _u_even = mkLabel(".u_even", cn); - const std::string _v_even = mkLabel(".v_even", cn); - const std::string _v_ge_u = mkLabel(".v_ge_u", cn); - const std::string _v_lt_u = mkLabel(".v_lt_u", cn); - L(_lp); - or_mp(vv, t); - jz(".exit", T_NEAR); - - g_test(uu[0], 1); - jz(_u_even, T_NEAR); - g_test(vv[0], 1); - jz(_v_even, T_NEAR); - L(_u_v_odd); - if (cn > 1) { - isBothZero(vv[cn - 1], uu[cn - 1], t); - jz(mkLabel(".u_v_odd", cn - 1), T_NEAR); - } - for (int i = cn - 1; i >= 0; i--) { - g_cmp(vv[i], uu[i], t); - jc(_v_lt_u, T_NEAR); - if (i > 0) jnz(_v_ge_u, T_NEAR); - } - - L(_v_ge_u); - sub_mp(vv, uu, t); - add_mp(ss, rr, t); - L(_v_even); - shr_mp(vv, 1, t); - twice_mp(rr, t); - if (isFullBit_) { - sbb(t, t); - mov(ptr [rTop], t); - } - inc(rax); - jmp(_lp, T_NEAR); - L(_v_lt_u); - sub_mp(uu, vv, t); - add_mp(rr, ss, t); - if (isFullBit_) { - sbb(t, t); - mov(ptr [rTop], t); - } - L(_u_even); - shr_mp(uu, 1, t); - twice_mp(ss, t); - inc(rax); - jmp(_lp, T_NEAR); - - if (cn > 0) { - vv.removeLast(); - uu.removeLast(); - } - } - L(".exit"); - assert(ss.isReg(0)); - const Reg64& t2 = ss.getReg(0); - const Reg64& t3 = rdx; - - mov(t2, pL_); - if (isFullBit_) { - mov(t, ptr [rTop]); - test(t, t); - jz("@f"); - sub_mp_m(rr, t2, t); - L("@@"); - } - mov(t3, ptr [keep_pr]); - // pr[] = p[] - rr - sub_m_mp_m(t3, t2, rr, t); - jnc("@f"); - // pr[] += p[] - add_m_m(t3, t2, t, pn_); - L("@@"); - outLocalLabel(); - } - void fpDbl_mod_NIST_P192(const RegExp &py, const RegExp& px, const Pack& t) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - load_rm(Pack(t2, t1, t0), px); // L=[t2:t1:t0] - load_rm(Pack(rax, t5, t4), px + 8 * 3); // H = [rax:t5:t4] - xor_(t3, t3); - add_rr(Pack(t3, t2, t1, t0), Pack(t3, rax, t5, t4)); // [t3:t2:t1:t0] = L + H - add_rr(Pack(t2, t1, t0), Pack(t5, t4, rax)); - adc(t3, 0); // [t3:t2:t1:t0] = L + H + [H1:H0:H2] - add(t1, rax); - adc(t2, 0); - adc(t3, 0); // e = t3, t = [t2:t1:t0] - xor_(t4, t4); - add(t0, t3); - adc(t1, 0); - adc(t2, 0); - adc(t4, 0); // t + e = [t4:t2:t1:t0] - add(t1, t3); - adc(t2, 0); - adc(t4, 0); // t + e + (e << 64) - // p = [ffffffffffffffff:fffffffffffffffe:ffffffffffffffff] - mov(rax, size_t(-1)); - mov(rdx, size_t(-2)); - jz("@f"); - sub_rr(Pack(t2, t1, t0), Pack(rax, rdx, rax)); - L("@@"); - mov_rr(Pack(t5, t4, t3), Pack(t2, t1, t0)); - sub_rr(Pack(t2, t1, t0), Pack(rax, rdx, rax)); - cmovc_rr(Pack(t2, t1, t0), Pack(t5, t4, t3)); - store_mr(py, Pack(t2, t1, t0)); - } - /* - p = (1 << 521) - 1 - x = [H:L] - x % p = (L + H) % p - */ - void fpDbl_mod_NIST_P521(const RegExp& py, const RegExp& px, const Pack& t) - { - const Reg64& t0 = t[0]; - const Reg64& t1 = t[1]; - const Reg64& t2 = t[2]; - const Reg64& t3 = t[3]; - const Reg64& t4 = t[4]; - const Reg64& t5 = t[5]; - const Reg64& t6 = t[6]; - const Reg64& t7 = t[7]; - const int c = 9; - const uint32_t mask = (1 << c) - 1; - const Pack pack(rdx, rax, t6, t5, t4, t3, t2, t1, t0); - load_rm(pack, px + 64); - mov(t7, mask); - and_(t7, t0); - shrd(t0, t1, c); - shrd(t1, t2, c); - shrd(t2, t3, c); - shrd(t3, t4, c); - shrd(t4, t5, c); - shrd(t5, t6, c); - shrd(t6, rax, c); - shrd(rax, rdx, c); - shr(rdx, c); - // pack = L + H - add_rm(Pack(rax, t6, t5, t4, t3, t2, t1, t0), px); - adc(rdx, t7); - - // t = (L + H) >> 521, add t - mov(t7, rdx); - shr(t7, c); - add(t0, t7); - adc(t1, 0); - adc(t2, 0); - adc(t3, 0); - adc(t4, 0); - adc(t5, 0); - adc(t6, 0); - adc(rax, 0); - adc(rdx, 0); - and_(rdx, mask); - store_mr(py, pack); - - // if [rdx..t0] == p then 0 - and_(rax, t0); - and_(rax, t1); - and_(rax, t2); - and_(rax, t3); - and_(rax, t4); - and_(rax, t5); - and_(rax, t6); - not_(rax); - xor_(rdx, (1 << c) - 1); - or_(rax, rdx); - jnz("@f"); - xor_(rax, rax); - for (int i = 0; i < 9; i++) { - mov(ptr[py + i * 8], rax); - } - L("@@"); - } -private: - FpGenerator(const FpGenerator&); - void operator=(const FpGenerator&); - void make_op_rm(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const Reg64& op1, const MemReg& op2) - { - if (op2.isReg()) { - (this->*op)(op1, op2.getReg()); - } else { - (this->*op)(op1, qword [op2.getMem()]); - } - } - void make_op_mr(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const MemReg& op1, const Reg64& op2) - { - if (op1.isReg()) { - (this->*op)(op1.getReg(), op2); - } else { - (this->*op)(qword [op1.getMem()], op2); - } - } - void make_op(void (Xbyak::CodeGenerator::*op)(const Xbyak::Operand&, const Xbyak::Operand&), const MemReg& op1, const MemReg& op2, const Reg64& t) - { - if (op1.isReg()) { - make_op_rm(op, op1.getReg(), op2); - } else if (op2.isReg()) { - (this->*op)(ptr [op1.getMem()], op2.getReg()); - } else { - mov(t, ptr [op2.getMem()]); - (this->*op)(ptr [op1.getMem()], t); - } - } - void g_add(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::add, op1, op2, t); } - void g_adc(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::adc, op1, op2, t); } - void g_sub(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::sub, op1, op2, t); } - void g_sbb(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::sbb, op1, op2, t); } - void g_cmp(const MemReg& op1, const MemReg& op2, const Reg64& t) { make_op(&Xbyak::CodeGenerator::cmp, op1, op2, t); } - void g_or(const Reg64& r, const MemReg& op) { make_op_rm(&Xbyak::CodeGenerator::or_, r, op); } - void g_test(const MemReg& op1, const MemReg& op2, const Reg64& t) - { - const MemReg *pop1 = &op1; - const MemReg *pop2 = &op2; - if (!pop1->isReg()) { - std::swap(pop1, pop2); - } - // (M, M), (R, M), (R, R) - if (pop1->isReg()) { - MCL_FP_GEN_OP_MR(test, (*pop2), pop1->getReg()) - } else { - mov(t, ptr [pop1->getMem()]); - test(ptr [pop2->getMem()], t); - } - } - void g_mov(const MemReg& op, const Reg64& r) - { - make_op_mr(&Xbyak::CodeGenerator::mov, op, r); - } - void g_mov(const Reg64& r, const MemReg& op) - { - make_op_rm(&Xbyak::CodeGenerator::mov, r, op); - } - void g_add(const Reg64& r, const MemReg& mr) { MCL_FP_GEN_OP_RM(add, r, mr) } - void g_adc(const Reg64& r, const MemReg& mr) { MCL_FP_GEN_OP_RM(adc, r, mr) } - void isBothZero(const MemReg& op1, const MemReg& op2, const Reg64& t) - { - g_mov(t, op1); - g_or(t, op2); - } - void g_test(const MemReg& op, int imm) - { - MCL_FP_GEN_OP_MR(test, op, imm) - } - /* - z[] = x[] - */ - void mov_rr(const Pack& z, const Pack& x) - { - assert(z.size() == x.size()); - for (int i = 0, n = (int)x.size(); i < n; i++) { - mov(z[i], x[i]); - } - } - /* - m[] = x[] - */ - void store_mr(const RegExp& m, const Pack& x) - { - for (int i = 0, n = (int)x.size(); i < n; i++) { - mov(ptr [m + 8 * i], x[i]); - } - } - void store_mr(const Xbyak::RegRip& m, const Pack& x) - { - for (int i = 0, n = (int)x.size(); i < n; i++) { - mov(ptr [m + 8 * i], x[i]); - } - } - /* - x[] = m[] - */ - template - void load_rm(const Pack& z, const ADDR& m) - { - for (int i = 0, n = (int)z.size(); i < n; i++) { - mov(z[i], ptr [m + 8 * i]); - } - } - /* - z[] += x[] - */ - void add_rr(const Pack& z, const Pack& x) - { - add(z[0], x[0]); - assert(z.size() == x.size()); - for (size_t i = 1, n = z.size(); i < n; i++) { - adc(z[i], x[i]); - } - } - /* - z[] -= x[] - */ - void sub_rr(const Pack& z, const Pack& x) - { - sub(z[0], x[0]); - assert(z.size() == x.size()); - for (size_t i = 1, n = z.size(); i < n; i++) { - sbb(z[i], x[i]); - } - } - /* - z[] += m[] - */ - template - void add_rm(const Pack& z, const ADDR& m, bool withCarry = false) - { - if (withCarry) { - adc(z[0], ptr [m + 8 * 0]); - } else { - add(z[0], ptr [m + 8 * 0]); - } - for (int i = 1, n = (int)z.size(); i < n; i++) { - adc(z[i], ptr [m + 8 * i]); - } - } - /* - z[] -= m[] - */ - template - void sub_rm(const Pack& z, const ADDR& m, bool withCarry = false) - { - if (withCarry) { - sbb(z[0], ptr [m + 8 * 0]); - } else { - sub(z[0], ptr [m + 8 * 0]); - } - for (int i = 1, n = (int)z.size(); i < n; i++) { - sbb(z[i], ptr [m + 8 * i]); - } - } - void cmovc_rr(const Pack& z, const Pack& x) - { - for (int i = 0, n = (int)z.size(); i < n; i++) { - cmovc(z[i], x[i]); - } - } - /* - t = all or z[i] - ZF = z is zero - */ - void or_mp(const MixPack& z, const Reg64& t) - { - const size_t n = z.size(); - if (n == 1) { - if (z.isReg(0)) { - test(z.getReg(0), z.getReg(0)); - } else { - mov(t, ptr [z.getMem(0)]); - test(t, t); - } - } else { - g_mov(t, z[0]); - for (size_t i = 1; i < n; i++) { - g_or(t, z[i]); - } - } - } - /* - [rdx:x:t0] <- py[1:0] * x - destroy x, t - */ - void mul2x1(const RegExp& py, const Reg64& x, const Reg64& t0, const Reg64& t) - { - if (useMulx_) { - // mulx(H, L, x) = [H:L] = x * rdx - /* - rdx:x - rax:t0 - */ - mov(rdx, x); - mulx(rax, t0, ptr [py]); // [rax:t0] = py[0] * x - mulx(rdx, x, ptr [py + 8]); // [t:t1] = py[1] * x - add(x, rax); - adc(rdx, 0); - } else { - mov(rax, ptr [py]); - mul(x); - mov(t0, rax); - mov(t, rdx); - mov(rax, ptr [py + 8]); - mul(x); - /* - rdx:rax - t:t0 - */ - add(rax, t); - adc(rdx, 0); - mov(x, rax); - } - } - /* - [rdx:x:t1:t0] <- py[2:1:0] * x - destroy x, t - */ - void mul3x1(const RegExp& py, const Reg64& x, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) - { - if (useMulx_) { - // mulx(H, L, x) = [H:L] = x * rdx - /* - rdx:x - t:t1 - rax:t0 - */ - mov(rdx, x); - mulx(rax, t0, ptr [py]); // [rax:t0] = py[0] * x - mulx(t, t1, ptr [py + 8]); // [t:t1] = py[1] * x - add(t1, rax); - mulx(rdx, x, ptr [py + 8 * 2]); - adc(x, t); - adc(rdx, 0); - } else { - mov(rax, ptr [py]); - mul(x); - mov(t0, rax); - mov(t1, rdx); - mov(rax, ptr [py + 8]); - mul(x); - mov(t, rax); - mov(t2, rdx); - mov(rax, ptr [py + 8 * 2]); - mul(x); - /* - rdx:rax - t2:t - t1:t0 - */ - add(t1, t); - adc(rax, t2); - adc(rdx, 0); - mov(x, rax); - } - } - /* - [x2:x1:x0] * x0 - */ - void mul3x1_sqr1(const RegExp& px, const Reg64& x0, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) - { - mov(rax, x0); - mul(x0); - mov(t0, rax); - mov(t1, rdx); - mov(rax, ptr [px + 8]); - mul(x0); - mov(ptr [rsp + 0 * 8], rax); // (x0 * x1)_L - mov(ptr [rsp + 1 * 8], rdx); // (x0 * x1)_H - mov(t, rax); - mov(t2, rdx); - mov(rax, ptr [px + 8 * 2]); - mul(x0); - mov(ptr [rsp + 2 * 8], rax); // (x0 * x2)_L - mov(ptr [rsp + 3 * 8], rdx); // (x0 * x2)_H - /* - rdx:rax - t2:t - t1:t0 - */ - add(t1, t); - adc(t2, rax); - adc(rdx, 0); - } - /* - [x2:x1:x0] * x1 - */ - void mul3x1_sqr2(const RegExp& px, const Reg64& x1, const Reg64& t2, const Reg64& t1, const Reg64& t0) - { - mov(t0, ptr [rsp + 0 * 8]);// (x0 * x1)_L - mov(rax, x1); - mul(x1); - mov(t1, rax); - mov(t2, rdx); - mov(rax, ptr [px + 8 * 2]); - mul(x1); - mov(ptr [rsp + 4 * 8], rax); // (x1 * x2)_L - mov(ptr [rsp + 5 * 8], rdx); // (x1 * x2)_H - /* - rdx:rax - t2:t1 - t:t0 - */ - add(t1, ptr [rsp + 1 * 8]); // (x0 * x1)_H - adc(rax, t2); - adc(rdx, 0); - } - /* - [rdx:rax:t1:t0] = [x2:x1:x0] * x2 - */ - void mul3x1_sqr3(const Reg64& x2, const Reg64& t1, const Reg64& t0) - { - mov(rax, x2); - mul(x2); - /* - rdx:rax - t2:t - t1:t0 - */ - mov(t0, ptr [rsp + 2 * 8]); // (x0 * x2)_L - mov(t1, ptr [rsp + 3 * 8]); // (x0 * x2)_H - add(t1, ptr [rsp + 4 * 8]); // (x1 * x2)_L - adc(rax, ptr [rsp + 5 * 8]); // (x1 * x2)_H - adc(rdx, 0); - } - - /* - c = [c3:y:c1:c0] = c + x[2..0] * y - q = uint64_t(c0 * pp) - c = (c + q * p) >> 64 - input [c3:c2:c1:c0], px, y, p - output [c0:c3:c2:c1] ; c0 == 0 unless isFullBit_ - - @note use rax, rdx, destroy y - */ - void montgomery3_sub(uint64_t pp, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, - const Reg64& /*px*/, const Reg64& y, const Reg64& p, - const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst) - { - // input [c3:y:c1:0] - // [t4:c3:y:c1:c0] - // t4 = 0 or 1 if isFullBit_, = 0 otherwise - mov(rax, pp); - mul(c0); // q = rax - mov(c2, rax); - mul3x1(p, c2, t2, t1, t0, t3); - // [rdx:c2:t1:t0] = p * q - add(c0, t0); // always c0 is zero - adc(c1, t1); - adc(c2, y); - adc(c3, rdx); - if (isFullBit_) { - if (isFirst) { - setc(c0.cvt8()); - } else { - adc(c0.cvt8(), t4.cvt8()); - } - } - } - /* - c = [c3:c2:c1:c0] - c += x[2..0] * y - q = uint64_t(c0 * pp) - c = (c + q * p) >> 64 - input [c3:c2:c1:c0], px, y, p - output [c0:c3:c2:c1] ; c0 == 0 unless isFullBit_ - - @note use rax, rdx, destroy y - */ - void montgomery3_1(uint64_t pp, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, - const Reg64& px, const Reg64& y, const Reg64& p, - const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst) - { - if (isFirst) { - mul3x1(px, y, c2, c1, c0, c3); - mov(c3, rdx); - // [c3:y:c1:c0] = px[2..0] * y - } else { - mul3x1(px, y, t2, t1, t0, t3); - // [rdx:y:t1:t0] = px[2..0] * y - add_rr(Pack(c3, y, c1, c0), Pack(rdx, c2, t1, t0)); - if (isFullBit_) setc(t4.cvt8()); - } - montgomery3_sub(pp, c3, c2, c1, c0, px, y, p, t0, t1, t2, t3, t4, isFirst); - } - /* - pc[0..n] += x[0..n-1] * y ; pc[] = 0 if isFirst - pc[n + 1] is temporary used if isFullBit_ - q = uint64_t(pc[0] * pp) - pc[] = (pc[] + q * p) >> 64 - input : pc[], px[], y, p[], pw1[], pw2[] - output : pc[0..n] ; if isFullBit_ - pc[0..n-1] ; if !isFullBit_ - destroy y - use - pw1[0] if useMulx_ - pw1[0..n-2] otherwise - pw2[0..n-1] - */ - void montgomeryN_1(uint64_t pp, int n, const RegExp& pc, const RegExp& px, const Reg64& y, const Reg64& p, const Reg64& t, const MixPack& pw1, const RegExp& pw2, bool isFirst) - { - // pc[] += x[] * y - if (isFirst) { - gen_raw_mulUnit(pc, px, y, pw1, t, n); - mov(ptr [pc + n * 8], rdx); - } else { - gen_raw_mulUnit(pw2, px, y, pw1, t, n); - mov(t, ptr [pw2 + 0 * 8]); - add(ptr [pc + 0 * 8], t); - for (int i = 1; i < n; i++) { - mov(t, ptr [pw2 + i * 8]); - adc(ptr [pc + i * 8], t); - } - adc(ptr [pc + n * 8], rdx); - if (isFullBit_) { - mov(t, 0); - adc(t, 0); - mov(qword [pc + (n + 1) * 8], t); - } - } - mov(rax, pp); - mul(qword [pc]); - mov(y, rax); // y = q - gen_raw_mulUnit(pw2, p, y, pw1, t, n); - // c[] = (c[] + pw2[]) >> 64 - mov(t, ptr [pw2 + 0 * 8]); - add(t, ptr [pc + 0 * 8]); - for (int i = 1; i < n; i++) { - mov(t, ptr [pw2 + i * 8]); - adc(t, ptr [pc + i * 8]); - mov(ptr [pc + (i - 1) * 8], t); - } - adc(rdx, ptr [pc + n * 8]); - mov(ptr [pc + (n - 1) * 8], rdx); - if (isFullBit_) { - if (isFirst) { - mov(t, 0); - } else { - mov(t, ptr [pc + (n + 1) * 8]); - } - adc(t, 0); - mov(qword [pc + n * 8], t); - } else { - xor_(eax, eax); - mov(ptr [pc + n * 8], rax); - } - } - /* - [rdx:x:t2:t1:t0] <- py[3:2:1:0] * x - destroy x, t - */ - void mul4x1(const RegExp& py, const Reg64& x, const Reg64& t3, const Reg64& t2, const Reg64& t1, const Reg64& t0, const Reg64& t) - { - if (useMulx_) { - mov(rdx, x); - mulx(t1, t0, ptr [py + 8 * 0]); - mulx(t2, rax, ptr [py + 8 * 1]); - add(t1, rax); - mulx(x, rax, ptr [py + 8 * 2]); - adc(t2, rax); - mulx(rdx, rax, ptr [py + 8 * 3]); - adc(x, rax); - adc(rdx, 0); - } else { - mov(rax, ptr [py]); - mul(x); - mov(t0, rax); - mov(t1, rdx); - mov(rax, ptr [py + 8]); - mul(x); - mov(t, rax); - mov(t2, rdx); - mov(rax, ptr [py + 8 * 2]); - mul(x); - mov(t3, rax); - mov(rax, x); - mov(x, rdx); - mul(qword [py + 8 * 3]); - add(t1, t); - adc(t2, t3); - adc(x, rax); - adc(rdx, 0); - } - } - - /* - c = [c4:c3:c2:c1:c0] - c += x[3..0] * y - q = uint64_t(c0 * pp) - c = (c + q * p) >> 64 - input [c4:c3:c2:c1:c0], px, y, p - output [c0:c4:c3:c2:c1] - - @note use rax, rdx, destroy y - use xt if isFullBit_ - */ - void montgomery4_1(uint64_t pp, const Reg64& c4, const Reg64& c3, const Reg64& c2, const Reg64& c1, const Reg64& c0, - const Reg64& px, const Reg64& y, const Reg64& p, - const Reg64& t0, const Reg64& t1, const Reg64& t2, const Reg64& t3, const Reg64& t4, bool isFirst, const Xmm& xt) - { - if (isFirst) { - mul4x1(px, y, c3, c2, c1, c0, c4); - mov(c4, rdx); - // [c4:y:c2:c1:c0] = px[3..0] * y - } else { - mul4x1(px, y, t3, t2, t1, t0, t4); - // [rdx:y:t2:t1:t0] = px[3..0] * y - if (isFullBit_) { - vmovq(xt, px); - xor_(px, px); - } - add_rr(Pack(c4, y, c2, c1, c0), Pack(rdx, c3, t2, t1, t0)); - if (isFullBit_) { - adc(px, 0); - } - } - // [px:c4:y:c2:c1:c0] - // px = 0 or 1 if isFullBit_, = 0 otherwise - mov(rax, pp); - mul(c0); // q = rax - mov(c3, rax); - mul4x1(p, c3, t3, t2, t1, t0, t4); - add(c0, t0); // always c0 is zero - adc(c1, t1); - adc(c2, t2); - adc(c3, y); - adc(c4, rdx); - if (isFullBit_) { - if (isFirst) { - adc(c0, 0); - } else { - adc(c0, px); - vmovq(px, xt); - } - } - } - void3u gen_fp2Dbl_mulPre() - { - if (isFullBit_) return 0; -// if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; - // almost same for pn_ == 6 - if (pn_ != 4) return 0; - align(16); - void3u func = getCurr(); - - const RegExp z = rsp + 0 * 8; - const RegExp x = rsp + 1 * 8; - const RegExp y = rsp + 2 * 8; - const Ext1 s(FpByte_, rsp, 3 * 8); - const Ext1 t(FpByte_, rsp, s.next); - const Ext1 d2(FpByte_ * 2, rsp, t.next); - const int SS = d2.next; - StackFrame sf(this, 3, 10 | UseRDX, SS); - mov(ptr [z], gp0); - mov(ptr [x], gp1); - mov(ptr [y], gp2); - // s = a + b - gen_raw_add(s, gp1, gp1 + FpByte_, rax, pn_); - // t = c + d - gen_raw_add(t, gp2, gp2 + FpByte_, rax, pn_); - // d1 = (a + b)(c + d) - mov(gp0, ptr [z]); - add(gp0, FpByte_ * 2); // d1 - lea(gp1, ptr [s]); - lea(gp2, ptr [t]); - call(mulPreL); - // d0 = a c - mov(gp0, ptr [z]); - mov(gp1, ptr [x]); - mov(gp2, ptr [y]); - call(mulPreL); - - // d2 = b d - lea(gp0, ptr [d2]); - mov(gp1, ptr [x]); - add(gp1, FpByte_); - mov(gp2, ptr [y]); - add(gp2, FpByte_); - call(mulPreL); - - mov(gp0, ptr [z]); - add(gp0, FpByte_ * 2); // d1 - mov(gp1, gp0); - mov(gp2, ptr [z]); - gen_raw_sub(gp0, gp1, gp2, rax, pn_ * 2); - lea(gp2, ptr [d2]); - gen_raw_sub(gp0, gp1, gp2, rax, pn_ * 2); - - mov(gp0, ptr [z]); - mov(gp1, gp0); - lea(gp2, ptr [d2]); - - gen_raw_sub(gp0, gp1, gp2, rax, pn_); - if (pn_ == 4) { - gen_raw_fp_sub(gp0 + pn_ * 8, gp1 + pn_ * 8, gp2 + pn_ * 8, Pack(gt0, gt1, gt2, gt3, gt4, gt5, gt6, gt7), true); - } else { - assert(pn_ == 6); - gen_raw_fp_sub6(gp0, gp1, gp2, pn_ * 8, sf.t.sub(0, 6), true); - } - return func; - } - void2u gen_fp2Dbl_sqrPre() - { - if (isFullBit_) return 0; -// if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; - // almost same for pn_ == 6 - if (pn_ != 4) return 0; - align(16); - void2u func = getCurr(); - // almost same for pn_ == 6 - if (pn_ != 4) return 0; - const RegExp y = rsp + 0 * 8; - const RegExp x = rsp + 1 * 8; - const Ext1 t1(FpByte_, rsp, 2 * 8); - const Ext1 t2(FpByte_, rsp, t1.next); - // use mulPreL then use 3 - StackFrame sf(this, 3 /* not 2 */, 10 | UseRDX, t2.next); - mov(ptr [y], gp0); - mov(ptr [x], gp1); - Pack t = sf.t; - if (pn_ == 6) { - t.append(rax); - t.append(rdx); - } - const Pack a = t.sub(0, pn_); - const Pack b = t.sub(pn_, pn_); - load_rm(b, gp1 + FpByte_); - for (int i = 0; i < pn_; i++) { - mov(rax, b[i]); - if (i == 0) { - add(rax, rax); - } else { - adc(rax, rax); - } - mov(ptr [(const RegExp&)t1 + i * 8], rax); - } - load_rm(a, gp1); - add_rr(a, b); - store_mr(t2, a); - mov(gp0, ptr [y]); - add(gp0, FpByte_ * 2); - lea(gp1, ptr [t1]); - mov(gp2, ptr [x]); - call(mulPreL); - mov(gp0, ptr [x]); - if (pn_ == 4) { - gen_raw_fp_sub(t1, gp0, gp0 + FpByte_, sf.t, false); - } else { - assert(pn_ == 6); - gen_raw_fp_sub6(t1, gp0, gp0, FpByte_, a, false); - } - mov(gp0, ptr [y]); - lea(gp1, ptr [t1]); - lea(gp2, ptr [t2]); - call(mulPreL); - return func; - } - void gen_fp2_add4() - { - assert(!isFullBit_); - StackFrame sf(this, 3, 8); - gen_raw_fp_add(sf.p[0], sf.p[1], sf.p[2], sf.t, false); - gen_raw_fp_add(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.p[2] + FpByte_, sf.t, false); - } - void gen_fp2_add6() - { - assert(!isFullBit_); - StackFrame sf(this, 3, 10); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - Pack t1 = sf.t.sub(0, 6); - Pack t2 = sf.t.sub(6); - t2.append(rax); - t2.append(px); // destory after used - vmovq(xm0, px); - gen_raw_fp_add6(pz, px, py, t1, t2, false); - vmovq(px, xm0); - gen_raw_fp_add6(pz + FpByte_, px + FpByte_, py + FpByte_, t1, t2, false); - } - void gen_fp2_sub6() - { - StackFrame sf(this, 3, 5); - const Reg64& pz = sf.p[0]; - const Reg64& px = sf.p[1]; - const Reg64& py = sf.p[2]; - Pack t = sf.t; - t.append(rax); - gen_raw_fp_sub6(pz, px, py, 0, t, false); - gen_raw_fp_sub6(pz, px, py, FpByte_, t, false); - } - void3u gen_fp2_add() - { - align(16); - void3u func = getCurr(); - if (pn_ == 4 && !isFullBit_) { - gen_fp2_add4(); - return func; - } - if (pn_ == 6 && !isFullBit_) { - gen_fp2_add6(); - return func; - } - return 0; - } - void3u gen_fp2_sub() - { - align(16); - void3u func = getCurr(); - if (pn_ == 4 && !isFullBit_) { - gen_fp2_sub4(); - return func; - } - if (pn_ == 6 && !isFullBit_) { - gen_fp2_sub6(); - return func; - } - return 0; - } - void gen_fp2_sub4() - { - assert(!isFullBit_); - StackFrame sf(this, 3, 8); - gen_raw_fp_sub(sf.p[0], sf.p[1], sf.p[2], sf.t, false); - gen_raw_fp_sub(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.p[2] + FpByte_, sf.t, false); - } - /* - for only xi_a = 1 - y.a = a - b - y.b = a + b - */ - void gen_fp2_mul_xi4() - { - assert(!isFullBit_); - StackFrame sf(this, 2, 11 | UseRDX); - const Reg64& py = sf.p[0]; - const Reg64& px = sf.p[1]; - Pack a = sf.t.sub(0, 4); - Pack b = sf.t.sub(4, 4); - Pack t = sf.t.sub(8); - t.append(rdx); - assert(t.size() == 4); - load_rm(a, px); - load_rm(b, px + FpByte_); - for (int i = 0; i < pn_; i++) { - mov(t[i], a[i]); - if (i == 0) { - add(t[i], b[i]); - } else { - adc(t[i], b[i]); - } - } - sub_rr(a, b); - mov(rax, pL_); - load_rm(b, rax); - sbb(rax, rax); - for (int i = 0; i < pn_; i++) { - and_(b[i], rax); - } - add_rr(a, b); - store_mr(py, a); - mov(rax, pL_); - mov_rr(a, t); - sub_rm(t, rax); - cmovc_rr(t, a); - store_mr(py + FpByte_, t); - } - void gen_fp2_mul_xi6() - { - assert(!isFullBit_); - StackFrame sf(this, 2, 12); - const Reg64& py = sf.p[0]; - const Reg64& px = sf.p[1]; - Pack a = sf.t.sub(0, 6); - Pack b = sf.t.sub(6); - load_rm(a, px); - mov_rr(b, a); - add_rm(b, px + FpByte_); - sub_rm(a, px + FpByte_); - mov(rax, pL_); - jnc("@f"); - add_rm(a, rax); - L("@@"); - store_mr(py, a); - mov_rr(a, b); - sub_rm(b, rax); - cmovc_rr(b, a); - store_mr(py + FpByte_, b); - } - void2u gen_fp2_mul_xi() - { - if (isFullBit_) return 0; - if (op_->xi_a != 1) return 0; - align(16); - void2u func = getCurr(); - if (pn_ == 4) { - gen_fp2_mul_xi4(); - return func; - } - if (pn_ == 6) { - gen_fp2_mul_xi6(); - return func; - } - return 0; - } - void2u gen_fp2_neg() - { - align(16); - void2u func = getCurr(); - if (pn_ <= 6) { - StackFrame sf(this, 2, UseRDX | pn_); - gen_raw_neg(sf.p[0], sf.p[1], sf.t); - gen_raw_neg(sf.p[0] + FpByte_, sf.p[1] + FpByte_, sf.t); - return func; - } - return 0; - } - void3u gen_fp2_mul() - { - if (isFullBit_) return 0; - if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; - align(16); - void3u func = getCurr(); - bool embedded = pn_ == 4; - - const RegExp z = rsp + 0 * 8; - const RegExp x = rsp + 1 * 8; - const RegExp y = rsp + 2 * 8; - const Ext1 s(FpByte_, rsp, 3 * 8); - const Ext1 t(FpByte_, rsp, s.next); - const Ext1 d0(FpByte_ * 2, rsp, t.next); - const Ext1 d1(FpByte_ * 2, rsp, d0.next); - const Ext1 d2(FpByte_ * 2, rsp, d1.next); - const int SS = d2.next; - StackFrame sf(this, 3, 10 | UseRDX, SS); - mov(ptr[z], gp0); - mov(ptr[x], gp1); - mov(ptr[y], gp2); - // s = a + b - gen_raw_add(s, gp1, gp1 + FpByte_, rax, pn_); - // t = c + d - gen_raw_add(t, gp2, gp2 + FpByte_, rax, pn_); - // d1 = (a + b)(c + d) - if (embedded) { - mulPre4(d1, s, t, sf.t); - } else { - lea(gp0, ptr [d1]); - lea(gp1, ptr [s]); - lea(gp2, ptr [t]); - call(mulPreL); - } - // d0 = a c - mov(gp1, ptr [x]); - mov(gp2, ptr [y]); - if (embedded) { - mulPre4(d0, gp1, gp2, sf.t); - } else { - lea(gp0, ptr [d0]); - call(mulPreL); - } - // d2 = b d - mov(gp1, ptr [x]); - add(gp1, FpByte_); - mov(gp2, ptr [y]); - add(gp2, FpByte_); - if (embedded) { - mulPre4(d2, gp1, gp2, sf.t); - } else { - lea(gp0, ptr [d2]); - call(mulPreL); - } - - gen_raw_sub(d1, d1, d0, rax, pn_ * 2); - gen_raw_sub(d1, d1, d2, rax, pn_ * 2); - - gen_raw_sub(d0, d0, d2, rax, pn_); - if (pn_ == 4) { - gen_raw_fp_sub((RegExp)d0 + pn_ * 8, (RegExp)d0 + pn_ * 8, (RegExp)d2 + pn_ * 8, Pack(gt0, gt1, gt2, gt3, gt4, gt5, gt6, gt7), true); - } else { - lea(gp0, ptr[d0]); - lea(gp2, ptr[d2]); - gen_raw_fp_sub6(gp0, gp0, gp2, pn_ * 8, sf.t.sub(0, 6), true); - } - - mov(gp0, ptr [z]); - lea(gp1, ptr[d0]); - call(fpDbl_modL); - - mov(gp0, ptr [z]); - add(gp0, FpByte_); - lea(gp1, ptr[d1]); - call(fpDbl_modL); - return func; - } - void2u gen_fp2_sqr() - { - if (isFullBit_) return 0; - if (pn_ != 4 && !(pn_ == 6 && useMulx_ && useAdx_)) return 0; - align(16); - void2u func = getCurr(); - - const RegExp y = rsp + 0 * 8; - const RegExp x = rsp + 1 * 8; - const Ext1 t1(FpByte_, rsp, 2 * 8); - const Ext1 t2(FpByte_, rsp, t1.next); - const Ext1 t3(FpByte_, rsp, t2.next); - bool nocarry = (p_[pn_ - 1] >> 62) == 0; - StackFrame sf(this, 3, 10 | UseRDX, t3.next); - mov(ptr [y], gp0); - mov(ptr [x], gp1); - // t1 = b + b - lea(gp0, ptr [t1]); - if (nocarry) { - for (int i = 0; i < pn_; i++) { - mov(rax, ptr [gp1 + FpByte_ + i * 8]); - if (i == 0) { - add(rax, rax); - } else { - adc(rax, rax); - } - mov(ptr [gp0 + i * 8], rax); - } - } else { - if (pn_ == 4) { - gen_raw_fp_add(gp0, gp1 + FpByte_, gp1 + FpByte_, sf.t, false); - } else { - assert(pn_ == 6); - Pack t = sf.t.sub(6, 4); - t.append(rax); - t.append(rdx); - gen_raw_fp_add6(gp0, gp1 + FpByte_, gp1 + FpByte_, sf.t.sub(0, 6), t, false); - } - } - // t1 = 2ab - mov(gp1, gp0); - mov(gp2, ptr [x]); - call(fp_mulL); - - if (nocarry) { - Pack t = sf.t; - t.append(rdx); - t.append(gp1); - Pack a = t.sub(0, pn_); - Pack b = t.sub(pn_, pn_); - mov(gp0, ptr [x]); - load_rm(a, gp0); - load_rm(b, gp0 + FpByte_); - // t2 = a + b - for (int i = 0; i < pn_; i++) { - mov(rax, a[i]); - if (i == 0) { - add(rax, b[i]); - } else { - adc(rax, b[i]); - } - mov(ptr [(RegExp)t2 + i * 8], rax); - } - // t3 = a + p - b - mov(rax, pL_); - add_rm(a, rax); - sub_rr(a, b); - store_mr(t3, a); - } else { - mov(gp0, ptr [x]); - if (pn_ == 4) { - gen_raw_fp_add(t2, gp0, gp0 + FpByte_, sf.t, false); - gen_raw_fp_sub(t3, gp0, gp0 + FpByte_, sf.t, false); - } else { - assert(pn_ == 6); - Pack p1 = sf.t.sub(0, 6); - Pack p2 = sf.t.sub(6, 4); - p2.append(rax); - p2.append(rdx); - gen_raw_fp_add6(t2, gp0, gp0 + FpByte_, p1, p2, false); - gen_raw_fp_sub6(t3, gp0, gp0 + FpByte_, 0, p1, false); - } - } - - mov(gp0, ptr [y]); - lea(gp1, ptr [t2]); - lea(gp2, ptr [t3]); - call(fp_mulL); - mov(gp0, ptr [y]); - for (int i = 0; i < pn_; i++) { - mov(rax, ptr [(RegExp)t1 + i * 8]); - mov(ptr [gp0 + FpByte_ + i * 8], rax); - } - return func; - } -}; - -} } // mcl::fp - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -#endif -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/src/gen.cpp b/vendor/github.com/dexon-foundation/mcl/src/gen.cpp deleted file mode 100644 index 763f64b98..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/gen.cpp +++ /dev/null @@ -1,999 +0,0 @@ -#include "llvm_gen.hpp" -#include -#include -#include -#include -#include - -typedef std::set StrSet; - -struct Code : public mcl::Generator { - typedef std::map FunctionMap; - typedef std::vector OperandVec; - Operand Void; - uint32_t unit; - uint32_t unit2; - uint32_t bit; - uint32_t N; - const StrSet *privateFuncList; - bool wasm; - std::string suf; - std::string unitStr; - Function mulUU; - Function mul32x32; // for WASM - Function extractHigh; - Function mulPos; - Function makeNIST_P192; - Function mcl_fpDbl_mod_NIST_P192; - Function mcl_fp_sqr_NIST_P192; - FunctionMap mcl_fp_shr1_M; - FunctionMap mcl_fp_addPreM; - FunctionMap mcl_fp_subPreM; - FunctionMap mcl_fp_addM; - FunctionMap mcl_fp_subM; - FunctionMap mulPvM; - FunctionMap mcl_fp_mulUnitPreM; - FunctionMap mcl_fpDbl_mulPreM; - FunctionMap mcl_fpDbl_sqrPreM; - FunctionMap mcl_fp_montM; - FunctionMap mcl_fp_montRedM; - Code() : unit(0), unit2(0), bit(0), N(0), privateFuncList(0), wasm(false) { } - void verifyAndSetPrivate(Function& f) - { - if (privateFuncList && privateFuncList->find(f.name) != privateFuncList->end()) { - f.setPrivate(); - } - } - void storeN(Operand r, Operand p, int offset = 0) - { - if (p.bit != unit) { - throw cybozu::Exception("bad IntPtr size") << p.bit; - } - if (offset > 0) { - p = getelementptr(p, offset); - } - if (r.bit == unit) { - store(r, p); - return; - } - const size_t n = r.bit / unit; - for (size_t i = 0; i < n; i++) { - store(trunc(r, unit), getelementptr(p, i)); - if (i < n - 1) { - r = lshr(r, unit); - } - } - } - Operand loadN(Operand p, size_t n, int offset = 0) - { - if (p.bit != unit) { - throw cybozu::Exception("bad IntPtr size") << p.bit; - } - if (offset > 0) { - p = getelementptr(p, offset); - } - Operand v = load(p); - for (size_t i = 1; i < n; i++) { - v = zext(v, v.bit + unit); - Operand t = load(getelementptr(p, i)); - t = zext(t, v.bit); - t = shl(t, unit * i); - v = _or(v, t); - } - return v; - } - void gen_mul32x32() - { - const int u = 32; - resetGlobalIdx(); - Operand z(Int, u * 2); - Operand x(Int, u); - Operand y(Int, u); - mul32x32 = Function("mul32x32L", z, x, y); - mul32x32.setPrivate(); - verifyAndSetPrivate(mul32x32); - beginFunc(mul32x32); - - x = zext(x, u * 2); - y = zext(y, u * 2); - z = mul(x, y); - ret(z); - endFunc(); - } - void gen_mul64x64(Operand& z, Operand& x, Operand& y) - { - Operand a = trunc(lshr(x, 32), 32); - Operand b = trunc(x, 32); - Operand c = trunc(lshr(y, 32), 32); - Operand d = trunc(y, 32); - Operand ad = call(mul32x32, a, d); - Operand bd = call(mul32x32, b, d); - bd = zext(bd, 96); - ad = shl(zext(ad, 96), 32); - ad = add(ad, bd); - Operand ac = call(mul32x32, a, c); - Operand bc = call(mul32x32, b, c); - bc = zext(bc, 96); - ac = shl(zext(ac, 96), 32); - ac = add(ac, bc); - ad = zext(ad, 128); - ac = shl(zext(ac, 128), 32); - z = add(ac, ad); - } - void gen_mulUU() - { - if (wasm) { - gen_mul32x32(); - } - resetGlobalIdx(); - Operand z(Int, unit2); - Operand x(Int, unit); - Operand y(Int, unit); - std::string name = "mul"; - name += unitStr + "x" + unitStr + "L"; - mulUU = Function(name, z, x, y); - mulUU.setPrivate(); - verifyAndSetPrivate(mulUU); - beginFunc(mulUU); - - if (wasm) { - gen_mul64x64(z, x, y); - } else { - x = zext(x, unit2); - y = zext(y, unit2); - z = mul(x, y); - } - ret(z); - endFunc(); - } - void gen_extractHigh() - { - resetGlobalIdx(); - Operand z(Int, unit); - Operand x(Int, unit2); - std::string name = "extractHigh"; - name += unitStr; - extractHigh = Function(name, z, x); - extractHigh.setPrivate(); - beginFunc(extractHigh); - - x = lshr(x, unit); - z = trunc(x, unit); - ret(z); - endFunc(); - } - void gen_mulPos() - { - resetGlobalIdx(); - Operand xy(Int, unit2); - Operand px(IntPtr, unit); - Operand y(Int, unit); - Operand i(Int, unit); - std::string name = "mulPos"; - name += unitStr + "x" + unitStr; - mulPos = Function(name, xy, px, y, i); - mulPos.setPrivate(); - beginFunc(mulPos); - - Operand x = load(getelementptr(px, i)); - xy = call(mulUU, x, y); - ret(xy); - endFunc(); - } - Operand extract192to64(const Operand& x, uint32_t shift) - { - Operand y = lshr(x, shift); - y = trunc(y, 64); - return y; - } - void gen_makeNIST_P192() - { - resetGlobalIdx(); - Operand p(Int, 192); - Operand p0(Int, 64); - Operand p1(Int, 64); - Operand p2(Int, 64); - Operand _0 = makeImm(64, 0); - Operand _1 = makeImm(64, 1); - Operand _2 = makeImm(64, 2); - makeNIST_P192 = Function("makeNIST_P192L" + suf, p); - verifyAndSetPrivate(makeNIST_P192); - beginFunc(makeNIST_P192); - p0 = sub(_0, _1); - p1 = sub(_0, _2); - p2 = sub(_0, _1); - p0 = zext(p0, 192); - p1 = zext(p1, 192); - p2 = zext(p2, 192); - p1 = shl(p1, 64); - p2 = shl(p2, 128); - p = add(p0, p1); - p = add(p, p2); - ret(p); - endFunc(); - } - /* - NIST_P192 - p = 0xfffffffffffffffffffffffffffffffeffffffffffffffff - 0 1 2 - ffffffffffffffff fffffffffffffffe ffffffffffffffff - - p = (1 << 192) - (1 << 64) - 1 - (1 << 192) % p = (1 << 64) + 1 - L : 192bit - Hi: 64bit - x = [H:L] = [H2:H1:H0:L] - mod p - x = L + H + (H << 64) - = L + H + [H1:H0:0] + H2 + (H2 << 64) - [e:t] = L + H + [H1:H0:H2] + [H2:0] ; 2bit(e) over - y = t + e + (e << 64) - if (y >= p) y -= p - */ - void gen_mcl_fpDbl_mod_NIST_P192() - { - resetGlobalIdx(); - Operand out(IntPtr, unit); - Operand px(IntPtr, unit); - mcl_fpDbl_mod_NIST_P192 = Function("mcl_fpDbl_mod_NIST_P192L" + suf, Void, out, px); - verifyAndSetPrivate(mcl_fpDbl_mod_NIST_P192); - beginFunc(mcl_fpDbl_mod_NIST_P192); - - const int n = 192 / unit; - Operand L = loadN(px, n); - L = zext(L, 256); - - Operand H192 = loadN(px, n, n); - Operand H = zext(H192, 256); - - Operand H10 = shl(H192, 64); - H10 = zext(H10, 256); - - Operand H2 = extract192to64(H192, 128); - H2 = zext(H2, 256); - Operand H102 = _or(H10, H2); - - H2 = shl(H2, 64); - - Operand t = add(L, H); - t = add(t, H102); - t = add(t, H2); - - Operand e = lshr(t, 192); - e = trunc(e, 64); - e = zext(e, 256); - Operand e2 = shl(e, 64); - e = _or(e, e2); - - t = trunc(t, 192); - t = zext(t, 256); - - Operand z = add(t, e); - Operand p = call(makeNIST_P192); - p = zext(p, 256); - Operand zp = sub(z, p); - Operand c = trunc(lshr(zp, 192), 1); - z = trunc(select(c, z, zp), 192); - storeN(z, out); - ret(Void); - endFunc(); - } - /* - NIST_P521 - p = (1 << 521) - 1 - x = [H:L] - x % p = (L + H) % p - */ - void gen_mcl_fpDbl_mod_NIST_P521() - { - resetGlobalIdx(); - const uint32_t len = 521; - const uint32_t n = len / unit; - const uint32_t round = unit * (n + 1); - const uint32_t rem = len - n * unit; - const size_t mask = -(1 << rem); - const Operand py(IntPtr, unit); - const Operand px(IntPtr, unit); - Function f("mcl_fpDbl_mod_NIST_P521L" + suf, Void, py, px); - verifyAndSetPrivate(f); - beginFunc(f); - Operand x = loadN(px, n * 2 + 1); - Operand L = trunc(x, len); - L = zext(L, round); - Operand H = lshr(x, len); - H = trunc(H, round); // x = [H:L] - Operand t = add(L, H); - Operand t0 = lshr(t, len); - t0 = _and(t0, makeImm(round, 1)); - t = add(t, t0); - t = trunc(t, len); - Operand z0 = zext(t, round); - t = extract(z0, n * unit); - Operand m = _or(t, makeImm(unit, mask)); - for (uint32_t i = 0; i < n; i++) { - Operand s = extract(z0, unit * i); - m = _and(m, s); - } - Operand c = icmp(eq, m, makeImm(unit, -1)); - Label zero("zero"); - Label nonzero("nonzero"); - br(c, zero, nonzero); - putLabel(zero); - for (uint32_t i = 0; i < n + 1; i++) { - storeN(makeImm(unit, 0), py, i); - } - ret(Void); - putLabel(nonzero); - storeN(z0, py); - ret(Void); - endFunc(); - } - void gen_mcl_fp_sqr_NIST_P192() - { - resetGlobalIdx(); - Operand py(IntPtr, unit); - Operand px(IntPtr, unit); - mcl_fp_sqr_NIST_P192 = Function("mcl_fp_sqr_NIST_P192L" + suf, Void, py, px); - verifyAndSetPrivate(mcl_fp_sqr_NIST_P192); - beginFunc(mcl_fp_sqr_NIST_P192); - Operand buf = _alloca(unit, 192 * 2 / unit); - // QQQ define later - Function mcl_fpDbl_sqrPre("mcl_fpDbl_sqrPre" + cybozu::itoa(192 / unit) + "L" + suf, Void, buf, px); - call(mcl_fpDbl_sqrPre, buf, px); - call(mcl_fpDbl_mod_NIST_P192, py, buf); - ret(Void); - endFunc(); - } - void gen_mcl_fp_mulNIST_P192() - { - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Function f("mcl_fp_mulNIST_P192L" + suf, Void, pz, px, py); - verifyAndSetPrivate(f); - beginFunc(f); - Operand buf = _alloca(unit, 192 * 2 / unit); - // QQQ define later - Function mcl_fpDbl_mulPre("mcl_fpDbl_mulPre" + cybozu::itoa(192 / unit) + "L" + suf, Void, buf, px, py); - call(mcl_fpDbl_mulPre, buf, px, py); - call(mcl_fpDbl_mod_NIST_P192, pz, buf); - ret(Void); - endFunc(); - } - void gen_once() - { - gen_mulUU(); - gen_extractHigh(); - gen_mulPos(); - gen_makeNIST_P192(); - gen_mcl_fpDbl_mod_NIST_P192(); - gen_mcl_fp_sqr_NIST_P192(); - gen_mcl_fp_mulNIST_P192(); - gen_mcl_fpDbl_mod_NIST_P521(); - } - Operand extract(const Operand& x, uint32_t shift) - { - Operand t = lshr(x, shift); - t = trunc(t, unit); - return t; - } - void gen_mcl_fp_addsubPre(bool isAdd) - { - resetGlobalIdx(); - Operand r(Int, unit); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - std::string name; - if (isAdd) { - name = "mcl_fp_addPre" + cybozu::itoa(N) + "L" + suf; - mcl_fp_addPreM[N] = Function(name, r, pz, px, py); - verifyAndSetPrivate(mcl_fp_addPreM[N]); - beginFunc(mcl_fp_addPreM[N]); - } else { - name = "mcl_fp_subPre" + cybozu::itoa(N) + "L" + suf; - mcl_fp_subPreM[N] = Function(name, r, pz, px, py); - verifyAndSetPrivate(mcl_fp_subPreM[N]); - beginFunc(mcl_fp_subPreM[N]); - } - Operand x = zext(loadN(px, N), bit + unit); - Operand y = zext(loadN(py, N), bit + unit); - Operand z; - if (isAdd) { - z = add(x, y); - storeN(trunc(z, bit), pz); - r = trunc(lshr(z, bit), unit); - } else { - z = sub(x, y); - storeN(trunc(z, bit), pz); - r = _and(trunc(lshr(z, bit), unit), makeImm(unit, 1)); - } - ret(r); - endFunc(); - } -#if 0 // void-return version - void gen_mcl_fp_addsubPre(bool isAdd) - { - resetGlobalIdx(); - Operand pz(IntPtr, bit); - Operand px(IntPtr, bit); - Operand py(IntPtr, bit); - std::string name; - if (isAdd) { - name = "mcl_fp_addPre" + cybozu::itoa(bit) + "L"; - mcl_fp_addPreM[bit] = Function(name, Void, pz, px, py); - verifyAndSetPrivate(mcl_fp_addPreM[bit]); - beginFunc(mcl_fp_addPreM[bit]); - } else { - name = "mcl_fp_subPre" + cybozu::itoa(bit) + "L"; - mcl_fp_subPreM[bit] = Function(name, Void, pz, px, py); - verifyAndSetPrivate(mcl_fp_subPreM[bit]); - beginFunc(mcl_fp_subPreM[bit]); - } - Operand x = load(px); - Operand y = load(py); - Operand z; - if (isAdd) { - z = add(x, y); - } else { - z = sub(x, y); - } - store(z, pz); - ret(Void); - endFunc(); - } -#endif - void gen_mcl_fp_shr1() - { - resetGlobalIdx(); - Operand py(IntPtr, unit); - Operand px(IntPtr, unit); - std::string name = "mcl_fp_shr1_" + cybozu::itoa(N) + "L" + suf; - mcl_fp_shr1_M[N] = Function(name, Void, py, px); - verifyAndSetPrivate(mcl_fp_shr1_M[N]); - beginFunc(mcl_fp_shr1_M[N]); - Operand x = loadN(px, N); - x = lshr(x, 1); - storeN(x, py); - ret(Void); - endFunc(); - } - void gen_mcl_fp_add(bool isFullBit = true) - { - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Operand pp(IntPtr, unit); - std::string name = "mcl_fp_add"; - if (!isFullBit) { - name += "NF"; - } - name += cybozu::itoa(N) + "L" + suf; - mcl_fp_addM[N] = Function(name, Void, pz, px, py, pp); - verifyAndSetPrivate(mcl_fp_addM[N]); - beginFunc(mcl_fp_addM[N]); - Operand x = loadN(px, N); - Operand y = loadN(py, N); - if (isFullBit) { - x = zext(x, bit + unit); - y = zext(y, bit + unit); - Operand t0 = add(x, y); - Operand t1 = trunc(t0, bit); - storeN(t1, pz); - Operand p = loadN(pp, N); - p = zext(p, bit + unit); - Operand vc = sub(t0, p); - Operand c = lshr(vc, bit); - c = trunc(c, 1); - Label carry("carry"); - Label nocarry("nocarry"); - br(c, carry, nocarry); - putLabel(nocarry); - storeN(trunc(vc, bit), pz); - ret(Void); - putLabel(carry); - } else { - x = add(x, y); - Operand p = loadN(pp, N); - y = sub(x, p); - Operand c = trunc(lshr(y, bit - 1), 1); - x = select(c, x, y); - storeN(x, pz); - } - ret(Void); - endFunc(); - } - void gen_mcl_fp_sub(bool isFullBit = true) - { - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Operand pp(IntPtr, unit); - std::string name = "mcl_fp_sub"; - if (!isFullBit) { - name += "NF"; - } - name += cybozu::itoa(N) + "L" + suf; - mcl_fp_subM[N] = Function(name, Void, pz, px, py, pp); - verifyAndSetPrivate(mcl_fp_subM[N]); - beginFunc(mcl_fp_subM[N]); - Operand x = loadN(px, N); - Operand y = loadN(py, N); - if (isFullBit) { - x = zext(x, bit + unit); - y = zext(y, bit + unit); - Operand vc = sub(x, y); - Operand v, c; - v = trunc(vc, bit); - c = lshr(vc, bit); - c = trunc(c, 1); - storeN(v, pz); - Label carry("carry"); - Label nocarry("nocarry"); - br(c, carry, nocarry); - putLabel(nocarry); - ret(Void); - putLabel(carry); - Operand p = loadN(pp, N); - Operand t = add(v, p); - storeN(t, pz); - } else { - Operand v = sub(x, y); - Operand c; - c = trunc(lshr(v, bit - 1), 1); - Operand p = loadN(pp, N); - c = select(c, p, makeImm(bit, 0)); - Operand t = add(v, c); - storeN(t, pz); - } - ret(Void); - endFunc(); - } - void gen_mcl_fpDbl_add() - { - // QQQ : generate unnecessary memory copy for large bit - const int bu = bit + unit; - const int b2 = bit * 2; - const int b2u = b2 + unit; - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Operand pp(IntPtr, unit); - std::string name = "mcl_fpDbl_add" + cybozu::itoa(N) + "L" + suf; - Function f(name, Void, pz, px, py, pp); - verifyAndSetPrivate(f); - beginFunc(f); - Operand x = loadN(px, N * 2); - Operand y = loadN(py, N * 2); - x = zext(x, b2u); - y = zext(y, b2u); - Operand t = add(x, y); // x + y = [H:L] - Operand L = trunc(t, bit); - storeN(L, pz); - - Operand H = lshr(t, bit); - H = trunc(H, bu); - Operand p = loadN(pp, N); - p = zext(p, bu); - Operand Hp = sub(H, p); - t = lshr(Hp, bit); - t = trunc(t, 1); - t = select(t, H, Hp); - t = trunc(t, bit); - storeN(t, pz, N); - ret(Void); - endFunc(); - } - void gen_mcl_fpDbl_sub() - { - // QQQ : rol is used? - const int b2 = bit * 2; - const int b2u = b2 + unit; - resetGlobalIdx(); - std::string name = "mcl_fpDbl_sub" + cybozu::itoa(N) + "L" + suf; - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Operand pp(IntPtr, unit); - Function f(name, Void, pz, px, py, pp); - verifyAndSetPrivate(f); - beginFunc(f); - Operand x = loadN(px, N * 2); - Operand y = loadN(py, N * 2); - x = zext(x, b2u); - y = zext(y, b2u); - Operand vc = sub(x, y); // x - y = [H:L] - Operand L = trunc(vc, bit); - storeN(L, pz); - - Operand H = lshr(vc, bit); - H = trunc(H, bit); - Operand c = lshr(vc, b2); - c = trunc(c, 1); - Operand p = loadN(pp, N); - c = select(c, p, makeImm(bit, 0)); - Operand t = add(H, c); - storeN(t, pz, N); - ret(Void); - endFunc(); - } - /* - return [px[n-1]:px[n-2]:...:px[0]] - */ - Operand pack(const Operand *px, size_t n) - { - Operand x = px[0]; - for (size_t i = 1; i < n; i++) { - Operand y = px[i]; - size_t shift = x.bit; - size_t size = x.bit + y.bit; - x = zext(x, size); - y = zext(y, size); - y = shl(y, shift); - x = _or(x, y); - } - return x; - } - /* - z = px[0..N] * y - */ - void gen_mulPv() - { - const int bu = bit + unit; - resetGlobalIdx(); - Operand z(Int, bu); - Operand px(IntPtr, unit); - Operand y(Int, unit); - std::string name = "mulPv" + cybozu::itoa(bit) + "x" + cybozu::itoa(unit); - mulPvM[bit] = Function(name, z, px, y); - mulPvM[bit].setPrivate(); - verifyAndSetPrivate(mulPvM[bit]); - beginFunc(mulPvM[bit]); - OperandVec L(N), H(N); - for (uint32_t i = 0; i < N; i++) { - Operand xy = call(mulPos, px, y, makeImm(unit, i)); - L[i] = trunc(xy, unit); - H[i] = call(extractHigh, xy); - } - Operand LL = pack(&L[0], N); - Operand HH = pack(&H[0], N); - LL = zext(LL, bu); - HH = zext(HH, bu); - HH = shl(HH, unit); - z = add(LL, HH); - ret(z); - endFunc(); - } - void gen_mcl_fp_mulUnitPre() - { - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand y(Int, unit); - std::string name = "mcl_fp_mulUnitPre" + cybozu::itoa(N) + "L" + suf; - mcl_fp_mulUnitPreM[N] = Function(name, Void, pz, px, y); - verifyAndSetPrivate(mcl_fp_mulUnitPreM[N]); - beginFunc(mcl_fp_mulUnitPreM[N]); - Operand z = call(mulPvM[bit], px, y); - storeN(z, pz); - ret(Void); - endFunc(); - } - void generic_fpDbl_mul(const Operand& pz, const Operand& px, const Operand& py) - { - if (N == 1) { - Operand x = load(px); - Operand y = load(py); - x = zext(x, unit * 2); - y = zext(y, unit * 2); - Operand z = mul(x, y); - storeN(z, pz); - ret(Void); - } else if (N >= 8 && (N % 2) == 0) { - /* - W = 1 << half - (aW + b)(cW + d) = acW^2 + (ad + bc)W + bd - ad + bc = (a + b)(c + d) - ac - bd - */ - const int H = N / 2; - const int half = bit / 2; - Operand pxW = getelementptr(px, H); - Operand pyW = getelementptr(py, H); - Operand pzWW = getelementptr(pz, N); - call(mcl_fpDbl_mulPreM[H], pz, px, py); // bd - call(mcl_fpDbl_mulPreM[H], pzWW, pxW, pyW); // ac - - Operand a = zext(loadN(pxW, H), half + unit); - Operand b = zext(loadN(px, H), half + unit); - Operand c = zext(loadN(pyW, H), half + unit); - Operand d = zext(loadN(py, H), half + unit); - Operand t1 = add(a, b); - Operand t2 = add(c, d); - Operand buf = _alloca(unit, N); - Operand t1L = trunc(t1, half); - Operand t2L = trunc(t2, half); - Operand c1 = trunc(lshr(t1, half), 1); - Operand c2 = trunc(lshr(t2, half), 1); - Operand c0 = _and(c1, c2); - c1 = select(c1, t2L, makeImm(half, 0)); - c2 = select(c2, t1L, makeImm(half, 0)); - Operand buf1 = _alloca(unit, half / unit); - Operand buf2 = _alloca(unit, half / unit); - storeN(t1L, buf1); - storeN(t2L, buf2); - call(mcl_fpDbl_mulPreM[N / 2], buf, buf1, buf2); - Operand t = loadN(buf, N); - t = zext(t, bit + unit); - c0 = zext(c0, bit + unit); - c0 = shl(c0, bit); - t = _or(t, c0); - c1 = zext(c1, bit + unit); - c2 = zext(c2, bit + unit); - c1 = shl(c1, half); - c2 = shl(c2, half); - t = add(t, c1); - t = add(t, c2); - t = sub(t, zext(loadN(pz, N), bit + unit)); - t = sub(t, zext(loadN(pz, N, N), bit + unit)); - if (bit + half > t.bit) { - t = zext(t, bit + half); - } - t = add(t, loadN(pz, N + H, H)); - storeN(t, pz, H); - ret(Void); - } else { - Operand y = load(py); - Operand xy = call(mulPvM[bit], px, y); - store(trunc(xy, unit), pz); - Operand t = lshr(xy, unit); - for (uint32_t i = 1; i < N; i++) { - y = loadN(py, 1, i); - xy = call(mulPvM[bit], px, y); - t = add(t, xy); - if (i < N - 1) { - storeN(trunc(t, unit), pz, i); - t = lshr(t, unit); - } - } - storeN(t, pz, N - 1); - ret(Void); - } - } - void gen_mcl_fpDbl_mulPre() - { - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - std::string name = "mcl_fpDbl_mulPre" + cybozu::itoa(N) + "L" + suf; - mcl_fpDbl_mulPreM[N] = Function(name, Void, pz, px, py); - verifyAndSetPrivate(mcl_fpDbl_mulPreM[N]); - beginFunc(mcl_fpDbl_mulPreM[N]); - generic_fpDbl_mul(pz, px, py); - endFunc(); - } - void gen_mcl_fpDbl_sqrPre() - { - resetGlobalIdx(); - Operand py(IntPtr, unit); - Operand px(IntPtr, unit); - std::string name = "mcl_fpDbl_sqrPre" + cybozu::itoa(N) + "L" + suf; - mcl_fpDbl_sqrPreM[N] = Function(name, Void, py, px); - verifyAndSetPrivate(mcl_fpDbl_sqrPreM[N]); - beginFunc(mcl_fpDbl_sqrPreM[N]); - generic_fpDbl_mul(py, px, px); - endFunc(); - } - void gen_mcl_fp_mont(bool isFullBit = true) - { - const int bu = bit + unit; - const int bu2 = bit + unit * 2; - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand px(IntPtr, unit); - Operand py(IntPtr, unit); - Operand pp(IntPtr, unit); - std::string name = "mcl_fp_mont"; - if (!isFullBit) { - name += "NF"; - } - name += cybozu::itoa(N) + "L" + suf; - mcl_fp_montM[N] = Function(name, Void, pz, px, py, pp); - mcl_fp_montM[N].setAlias(); - verifyAndSetPrivate(mcl_fp_montM[N]); - beginFunc(mcl_fp_montM[N]); - Operand rp = load(getelementptr(pp, -1)); - Operand z, s, a; - if (isFullBit) { - for (uint32_t i = 0; i < N; i++) { - Operand y = load(getelementptr(py, i)); - Operand xy = call(mulPvM[bit], px, y); - Operand at; - if (i == 0) { - a = zext(xy, bu2); - at = trunc(xy, unit); - } else { - xy = zext(xy, bu2); - a = add(s, xy); - at = trunc(a, unit); - } - Operand q = mul(at, rp); - Operand pq = call(mulPvM[bit], pp, q); - pq = zext(pq, bu2); - Operand t = add(a, pq); - s = lshr(t, unit); - } - s = trunc(s, bu); - Operand p = zext(loadN(pp, N), bu); - Operand vc = sub(s, p); - Operand c = trunc(lshr(vc, bit), 1); - z = select(c, s, vc); - z = trunc(z, bit); - storeN(z, pz); - } else { - Operand y = load(py); - Operand xy = call(mulPvM[bit], px, y); - Operand c0 = trunc(xy, unit); - Operand q = mul(c0, rp); - Operand pq = call(mulPvM[bit], pp, q); - Operand t = add(xy, pq); - t = lshr(t, unit); // bu-bit - for (uint32_t i = 1; i < N; i++) { - y = load(getelementptr(py, i)); - xy = call(mulPvM[bit], px, y); - t = add(t, xy); - c0 = trunc(t, unit); - q = mul(c0, rp); - pq = call(mulPvM[bit], pp, q); - t = add(t, pq); - t = lshr(t, unit); - } - t = trunc(t, bit); - Operand vc = sub(t, loadN(pp, N)); - Operand c = trunc(lshr(vc, bit - 1), 1); - z = select(c, t, vc); - storeN(z, pz); - } - ret(Void); - endFunc(); - } - void gen_mcl_fp_montRed() - { - const int bu = bit + unit; - const int b2 = bit * 2; - const int b2u = b2 + unit; - resetGlobalIdx(); - Operand pz(IntPtr, unit); - Operand pxy(IntPtr, unit); - Operand pp(IntPtr, unit); - std::string name = "mcl_fp_montRed" + cybozu::itoa(N) + "L" + suf; - mcl_fp_montRedM[N] = Function(name, Void, pz, pxy, pp); - verifyAndSetPrivate(mcl_fp_montRedM[N]); - beginFunc(mcl_fp_montRedM[N]); - Operand rp = load(getelementptr(pp, -1)); - Operand p = loadN(pp, N); - Operand xy = loadN(pxy, N * 2); - Operand t = zext(xy, b2 + unit); - Operand z; - for (uint32_t i = 0; i < N; i++) { - Operand z = trunc(t, unit); - Operand q = mul(z, rp); - Operand pq = call(mulPvM[bit], pp, q); - pq = zext(pq, b2u - unit * i); - z = add(t, pq); - z = lshr(z, unit); - t = trunc(z, b2 - unit * i); - } - p = zext(p, bu); - Operand vc = sub(t, p); - Operand c = trunc(lshr(vc, bit), 1); - z = select(c, t, vc); - z = trunc(z, bit); - storeN(z, pz); - ret(Void); - endFunc(); - } - void gen_all() - { - gen_mcl_fp_addsubPre(true); - gen_mcl_fp_addsubPre(false); - gen_mcl_fp_shr1(); - } - void gen_addsub() - { - gen_mcl_fp_add(true); - gen_mcl_fp_add(false); - gen_mcl_fp_sub(true); - gen_mcl_fp_sub(false); - gen_mcl_fpDbl_add(); - gen_mcl_fpDbl_sub(); - } - void gen_mul() - { - gen_mulPv(); - gen_mcl_fp_mulUnitPre(); - gen_mcl_fpDbl_mulPre(); - gen_mcl_fpDbl_sqrPre(); - gen_mcl_fp_mont(true); - gen_mcl_fp_mont(false); - gen_mcl_fp_montRed(); - } - void setBit(uint32_t bit) - { - this->bit = bit; - N = bit / unit; - } - void setUnit(uint32_t unit) - { - this->unit = unit; - unit2 = unit * 2; - unitStr = cybozu::itoa(unit); - } - void gen(const StrSet& privateFuncList, uint32_t maxBitSize, const std::string& suf) - { - this->suf = suf; - this->privateFuncList = &privateFuncList; -#ifdef FOR_WASM - gen_mulUU(); -#else - gen_once(); - uint32_t end = ((maxBitSize + unit - 1) / unit); - for (uint32_t n = 1; n <= end; n++) { - setBit(n * unit); - gen_mul(); - gen_all(); - gen_addsub(); - } - if (unit == 64 && maxBitSize == 768) { - for (uint32_t i = maxBitSize + unit * 2; i <= maxBitSize * 2; i += unit * 2) { - setBit(i); - gen_all(); - } - } -#endif - } -}; - -int main(int argc, char *argv[]) - try -{ - uint32_t unit; - bool oldLLVM; - bool wasm; - std::string suf; - std::string privateFile; - cybozu::Option opt; - opt.appendOpt(&unit, uint32_t(sizeof(void*)) * 8, "u", ": unit"); - opt.appendBoolOpt(&oldLLVM, "old", ": old LLVM(before 3.8)"); - opt.appendBoolOpt(&wasm, "wasm", ": for wasm"); - opt.appendOpt(&suf, "", "s", ": suffix of function name"); - opt.appendOpt(&privateFile, "", "f", ": private function list file"); - opt.appendHelp("h"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - StrSet privateFuncList; - if (!privateFile.empty()) { - std::ifstream ifs(privateFile.c_str(), std::ios::binary); - std::string name; - while (ifs >> name) { - privateFuncList.insert(name); - } - } - Code c; - if (oldLLVM) { - c.setOldLLVM(); - } - c.wasm = wasm; - c.setUnit(unit); - uint32_t maxBitSize = MCL_MAX_BIT_SIZE; - c.gen(privateFuncList, maxBitSize, suf); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/src/llvm_gen.hpp b/vendor/github.com/dexon-foundation/mcl/src/llvm_gen.hpp deleted file mode 100644 index bbc5b9030..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/llvm_gen.hpp +++ /dev/null @@ -1,616 +0,0 @@ -#pragma once -/** - @file - @brief LLVM IR generator - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -//#define CYBOZU_EXCEPTION_WITH_STACKTRACE -#include -#include -#include -#include -#include -#ifdef _MSC_VER -// #pragma warning(push) - #pragma warning(disable : 4458) -#endif - -namespace mcl { - -namespace impl { - -struct File { - FILE *fp; - File() : fp(stdout) {} - ~File() { if (fp != stdout) fclose(fp); } - void open(const std::string& file) - { -#ifdef _MSC_VER - bool isOK = fopen_s(&fp, file.c_str(), "wb") != 0; -#else - fp = fopen(file.c_str(), "wb"); - bool isOK = fp != NULL; -#endif - if (!isOK) throw cybozu::Exception("File:open") << file; - } - void write(const std::string& str) - { - int ret = fprintf(fp, "%s\n", str.c_str()); - if (ret < 0) { - throw cybozu::Exception("File:write") << str; - } - } -}; -template -struct Param { - static File f; -}; - -template -File Param::f; - -bool isOldLLVM = false; - -} // mcl::impl - -inline bool isOldLLVM() { return impl::isOldLLVM; } - -struct Generator { - static const uint8_t None = 0; - static const uint8_t Int = 1; - static const uint8_t Imm = 2; - static const uint8_t Ptr = 1 << 7; - static const uint8_t IntPtr = Int | Ptr; - void setOldLLVM() { impl::isOldLLVM = true; } - struct Type { - uint8_t type; - bool isPtr; - Type(int type = 0) - : type(static_cast(type & ~Ptr)) - , isPtr((type & Ptr) != 0) - { - } - inline friend std::ostream& operator<<(std::ostream& os, const Type& self) - { - return os << (self.type | (self.isPtr ? Ptr : 0)); - } - }; - enum CondType { - eq = 1, - neq = 2, - ugt = 3, - uge = 4, - ult = 5, - ule = 6, - sgt = 7, - sge = 8, - slt = 9, - sle = 10 - }; - static inline const std::string& toStr(CondType type) - { - static const std::string tbl[] = { - "eq", "neq", "ugt", "uge", "ult", "ule", "sgt", "sge", "slt", "sle" - }; - return tbl[type - 1]; - } - void open(const std::string& file) - { - impl::Param<>::f.open(file); - } - struct Operand; - struct Function; - struct Eval; - struct Label { - std::string name; - explicit Label(const std::string& name = "") : name(name) {} - std::string toStr() const { return std::string("label %") + name; } - }; - void putLabel(const Label& label) - { - put(label.name + ":"); - } - static inline int& getGlobalIdx() - { - static int globalIdx = 0; - return ++globalIdx; - } - static inline void resetGlobalIdx() - { - getGlobalIdx() = 0; - } - static inline void put(const std::string& str) - { - impl::Param<>::f.write(str); - } - void beginFunc(const Function& f); - void endFunc() - { - put("}"); - } - Eval zext(const Operand& x, uint32_t size); - Eval mul(const Operand& x, const Operand& y); - Eval add(const Operand& x, const Operand& y); - Eval sub(const Operand& x, const Operand& y); - Eval _and(const Operand& x, const Operand& y); - Eval _or(const Operand& x, const Operand& y); - void ret(const Operand& r); - Eval lshr(const Operand& x, uint32_t size); - Eval ashr(const Operand& x, uint32_t size); - Eval shl(const Operand& x, uint32_t size); - Eval trunc(const Operand& x, uint32_t size); - Eval getelementptr(const Operand& p, const Operand& i); - Eval getelementptr(const Operand& p, int n); - Eval load(const Operand& p); - void store(const Operand& r, const Operand& p); - Eval select(const Operand& c, const Operand& r1, const Operand& r2); - Eval _alloca(uint32_t bit, uint32_t n); - // QQQ : type of type must be Type - Eval bitcast(const Operand& r, const Operand& type); - Eval icmp(CondType type, const Operand& r1, const Operand& r2); - void br(const Operand& op, const Label& ifTrue, const Label& ifFalse); - Eval call(const Function& f); - Eval call(const Function& f, const Operand& op1); - Eval call(const Function& f, const Operand& op1, const Operand& op2); - Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3); - Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4); - Eval call(const Function& f, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4, const Operand& op5); - - Operand makeImm(uint32_t bit, int64_t imm); -}; - -struct Generator::Operand { - Type type; - uint32_t bit; - int64_t imm; - uint32_t idx; - Operand() : type(None), bit(0), imm(0), idx(0) {} - Operand(Type type, uint32_t bit) - : type(type), bit(bit), imm(0), idx(getGlobalIdx()) - { - } - Operand(const Operand& rhs) - : type(rhs.type), bit(rhs.bit), imm(rhs.imm), idx(rhs.idx) - { - } - void operator=(const Operand& rhs) - { - type = rhs.type; - bit = rhs.bit; - imm = rhs.imm; - idx = rhs.idx; - } - void update() - { - idx = getGlobalIdx(); - } - Operand(const Eval& e); - void operator=(const Eval& e); - - std::string toStr(bool isAlias = true) const - { - if (type.isPtr) { - return getType(isAlias) + " " + getName(); - } - switch (type.type) { - default: - return getType(); - case Int: - case Imm: - return getType() + " " + getName(); - } - } - std::string getType(bool isAlias = true) const - { - std::string s; - switch (type.type) { - default: - return "void"; - case Int: - case Imm: - s = std::string("i") + cybozu::itoa(bit); - break; - } - if (type.isPtr) { - s += "*"; - if (!isAlias) { - s += " noalias "; - } - } - return s; - } - std::string getName() const - { - switch (type.type) { - default: - return ""; - case Int: - return std::string("%r") + cybozu::itoa(idx); - case Imm: - return cybozu::itoa(imm); - } - } -}; - -inline Generator::Operand Generator::makeImm(uint32_t bit, int64_t imm) -{ - Generator::Operand v(Generator::Imm, bit); - v.imm = imm; - return v; -} - -struct Generator::Eval { - std::string s; - Generator::Operand op; - mutable bool used; - Eval() : used(false) {} - ~Eval() - { - if (used) return; - put(s); - } -}; - -inline Generator::Operand::Operand(const Generator::Eval& e) -{ - *this = e.op; - update(); - put(getName() + " = " + e.s); - e.used = true; -} - -inline void Generator::Operand::operator=(const Generator::Eval& e) -{ - *this = e.op; - update(); - put(getName() + " = " + e.s); - e.used = true; -} - -struct Generator::Function { - typedef std::vector OperandVec; - std::string name; - Generator::Operand ret; - OperandVec opv; - bool isPrivate; - bool isAlias; - void clear() - { - isPrivate = false; - isAlias = false; - } - explicit Function(const std::string& name = "") : name(name) { clear(); } - Function(const std::string& name, const Operand& ret) - : name(name), ret(ret) - { - clear(); - } - Function(const std::string& name, const Operand& ret, const Operand& op1) - : name(name), ret(ret) - { - clear(); - opv.push_back(op1); - } - Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2) - : name(name), ret(ret) - { - clear(); - opv.push_back(op1); - opv.push_back(op2); - } - Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3) - : name(name), ret(ret) - { - clear(); - opv.push_back(op1); - opv.push_back(op2); - opv.push_back(op3); - } - Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4) - : name(name), ret(ret) - { - clear(); - opv.push_back(op1); - opv.push_back(op2); - opv.push_back(op3); - opv.push_back(op4); - } - Function(const std::string& name, const Operand& ret, const Operand& op1, const Operand& op2, const Operand& op3, const Operand& op4, const Operand& op5) - : name(name), ret(ret) - { - clear(); - opv.push_back(op1); - opv.push_back(op2); - opv.push_back(op3); - opv.push_back(op4); - opv.push_back(op5); - } - void setPrivate() - { - isPrivate = true; - } - void setAlias() - { - isAlias = true; - } - std::string toStr() const - { - std::string str = std::string("define "); - if (isPrivate) { - str += "private "; - } - str += ret.getType(); - str += " @" + name + "("; - for (size_t i = 0; i < opv.size(); i++) { - if (i > 0) str += ", "; - str += opv[i].toStr(isAlias); - } - str += ")"; - return str; - } -}; - -namespace impl { - -inline Generator::Eval callSub(const Generator::Function& f, const Generator::Operand **opTbl, size_t opNum) -{ - if (f.opv.size() != opNum) throw cybozu::Exception("impl:callSub:bad num of arg") << f.opv.size() << opNum; - if (f.name.empty()) throw cybozu::Exception("impl:callSub:no name"); - Generator::Eval e; - e.op = f.ret; - e.s = "call "; - e.s += f.ret.getType(); - e.s += " @" + f.name + "("; - for (size_t i = 0; i < opNum; i++) { - if (i > 0) { - e.s += ", "; - } - e.s += opTbl[i]->toStr(); - } - e.s += ")"; - return e; -} - -inline Generator::Eval aluSub(const char *name, const Generator::Operand& x, const Generator::Operand& y) -{ - if (x.bit != y.bit) throw cybozu::Exception("Generator:aluSub:bad size") << name << x.bit << y.bit; - Generator::Eval e; - e.op.type = Generator::Int; - e.op.bit = x.bit; - e.s = name; - e.s += " "; - e.s += x.toStr() + ", " + y.getName(); - return e; -} - -inline Generator::Eval shiftSub(const char *name, const Generator::Operand& x, uint32_t size) -{ - Generator::Eval e; - e.op = x; - e.s = name; - e.s += " "; - e.s += x.toStr() + ", " + cybozu::itoa(size); - return e; -} - -} // mcl::impl - -inline void Generator::beginFunc(const Generator::Function& f) -{ - put(f.toStr() + "\n{"); -} - -inline Generator::Eval Generator::zext(const Generator::Operand& x, uint32_t size) -{ - if (x.bit >= size) throw cybozu::Exception("Generator:zext:bad size") << x.bit << size; - Eval e; - e.op = x; - e.op.bit = size; - e.s = "zext "; - e.s += x.toStr() + " to i" + cybozu::itoa(size); - return e; -} - -inline Generator::Eval Generator::mul(const Generator::Operand& x, const Generator::Operand& y) -{ - return impl::aluSub("mul", x, y); -} - -inline Generator::Eval Generator::add(const Generator::Operand& x, const Generator::Operand& y) -{ - return impl::aluSub("add", x, y); -} - -inline Generator::Eval Generator::sub(const Generator::Operand& x, const Generator::Operand& y) -{ - return impl::aluSub("sub", x, y); -} - -inline Generator::Eval Generator::_and(const Generator::Operand& x, const Generator::Operand& y) -{ - return impl::aluSub("and", x, y); -} - -inline Generator::Eval Generator::_or(const Generator::Operand& x, const Generator::Operand& y) -{ - return impl::aluSub("or", x, y); -} - -inline void Generator::ret(const Generator::Operand& x) -{ - std::string s = "ret " + x.toStr(); - put(s); -} - -inline Generator::Eval Generator::lshr(const Generator::Operand& x, uint32_t size) -{ - return impl::shiftSub("lshr", x, size); -} - -inline Generator::Eval Generator::ashr(const Generator::Operand& x, uint32_t size) -{ - return impl::shiftSub("ashr", x, size); -} - -inline Generator::Eval Generator::shl(const Generator::Operand& x, uint32_t size) -{ - return impl::shiftSub("shl", x, size); -} - -inline Generator::Eval Generator::trunc(const Generator::Operand& x, uint32_t size) -{ - Eval e; - e.op = x; - e.op.bit = size; - e.s = "trunc "; - e.s += x.toStr() + " to i" + cybozu::itoa(size); - return e; -} - -inline Generator::Eval Generator::getelementptr(const Generator::Operand& p, const Generator::Operand& i) -{ - Eval e; - e.op = p; - e.s = "getelementptr "; - if (!isOldLLVM()) { - e.s += "i" + cybozu::itoa(p.bit) + ", "; - } - e.s += p.toStr() + ", " + i.toStr(); - return e; -} - -inline Generator::Eval Generator::getelementptr(const Generator::Operand& p, int n) -{ - return Generator::getelementptr(p, makeImm(32, n)); -} - -inline Generator::Eval Generator::load(const Generator::Operand& p) -{ - if (!p.type.isPtr) throw cybozu::Exception("Generator:load:not pointer") << p.type; - Eval e; - e.op = p; - e.op.type.isPtr = false; - e.s = "load "; - if (!isOldLLVM()) { - e.s += "i" + cybozu::itoa(p.bit) + ", "; - } - e.s += p.toStr(); - return e; -} - -inline void Generator::store(const Generator::Operand& r, const Generator::Operand& p) -{ - if (!p.type.isPtr) throw cybozu::Exception("Generator:store:not pointer") << p.type; - std::string s = "store "; - s += r.toStr(); - s += ", "; - s += p.toStr(); - put(s); -} - -inline Generator::Eval Generator::select(const Generator::Operand& c, const Generator::Operand& r1, const Generator::Operand& r2) -{ - if (c.bit != 1) throw cybozu::Exception("Generator:select:bad bit") << c.bit; - Eval e; - e.op = r1; - e.s = "select "; - e.s += c.toStr(); - e.s += ", "; - e.s += r1.toStr(); - e.s += ", "; - e.s += r2.toStr(); - return e; -} - -inline Generator::Eval Generator::_alloca(uint32_t bit, uint32_t n) -{ - Eval e; - e.op = Operand(IntPtr, bit); - e.s = "alloca i"; - e.s += cybozu::itoa(bit); - e.s += ", i32 "; - e.s += cybozu::itoa(n); - return e; -} - -inline Generator::Eval Generator::bitcast(const Generator::Operand& r, const Generator::Operand& type) -{ - Eval e; - e.op = type; - e.s = "bitcast "; - e.s += r.toStr(); - e.s += " to "; - e.s += type.getType(); - return e; -} - -inline Generator::Eval Generator::icmp(Generator::CondType type, const Generator::Operand& r1, const Generator::Operand& r2) -{ - Eval e; - e.op.type = Int; - e.op.bit = 1; - e.s = "icmp "; - e.s += toStr(type); - e.s += " "; - e.s += r1.toStr(); - e.s += ", "; - e.s += r2.getName(); - return e; -} - -inline void Generator::br(const Generator::Operand& op, const Generator::Label& ifTrue, const Generator::Label& ifFalse) -{ - if (op.bit != 1) throw cybozu::Exception("Generator:br:bad reg size") << op.bit; - std::string s = "br i1"; - s += op.getName(); - s += ", "; - s += ifTrue.toStr(); - s += ", "; - s += ifFalse.toStr(); - put(s); -} - -inline Generator::Eval Generator::call(const Generator::Function& f) -{ - return impl::callSub(f, 0, 0); -} - -inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1) -{ - const Operand *tbl[] = { &op1 }; - return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); -} - -inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2) -{ - const Operand *tbl[] = { &op1, &op2 }; - return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); -} - -inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3) -{ - const Operand *tbl[] = { &op1, &op2, &op3 }; - return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); -} - -inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3, const Generator::Operand& op4) -{ - const Operand *tbl[] = { &op1, &op2, &op3, &op4 }; - return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); -} - -inline Generator::Eval Generator::call(const Generator::Function& f, const Generator::Operand& op1, const Generator::Operand& op2, const Generator::Operand& op3, const Generator::Operand& op4, const Generator::Operand& opt5) -{ - const Operand *tbl[] = { &op1, &op2, &op3, &op4, &opt5 }; - return impl::callSub(f, tbl, CYBOZU_NUM_OF_ARRAY(tbl)); -} - -#define MCL_GEN_FUNCTION(name, ...) Function name(#name, __VA_ARGS__) - -} // mcl - -#ifdef _MSC_VER -// #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/src/low_func.hpp b/vendor/github.com/dexon-foundation/mcl/src/low_func.hpp deleted file mode 100644 index 57c63cfa3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/low_func.hpp +++ /dev/null @@ -1,706 +0,0 @@ -#pragma once -/** - @file - @brief generic function for each N - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4127) -#endif - -namespace mcl { namespace fp { - -struct Gtag; // GMP -struct Ltag; // LLVM -struct LBMI2tag; // LLVM with Intel BMI2 instruction -struct Atag; // asm - -template struct TagToStr { }; -template<> struct TagToStr { static const char *f() { return "Gtag"; } }; -template<> struct TagToStr { static const char *f() { return "Ltag"; } }; -template<> struct TagToStr { static const char *f() { return "LBMI2tag"; } }; -template<> struct TagToStr { static const char *f() { return "Atag"; } }; - -template -void clearC(Unit *x) -{ - clearArray(x, 0, N); -} - -template -bool isZeroC(const Unit *x) -{ - return isZeroArray(x, N); -} - -template -void copyC(Unit *y, const Unit *x) -{ - copyArray(y, x, N); -} - -// (carry, z[N]) <- x[N] + y[N] -template -struct AddPre { - static inline Unit func(Unit *z, const Unit *x, const Unit *y) - { -#ifdef MCL_USE_VINT - return mcl::vint::addN(z, x, y, N); -#else - return mpn_add_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, N); -#endif - } - static const u3u f; -}; -template -const u3u AddPre::f = AddPre::func; - -// (carry, x[N]) <- x[N] + y -template -struct AddUnitPre { - static inline Unit func(Unit *x, Unit n, Unit y) - { -#if 1 - int ret = 0; - Unit t = x[0] + y; - x[0] = t; - if (t >= y) goto EXIT_0; - for (size_t i = 1; i < n; i++) { - t = x[i] + 1; - x[i] = t; - if (t != 0) goto EXIT_0; - } - ret = 1; - EXIT_0: - return ret; -#else - return mpn_add_1((mp_limb_t*)x, (const mp_limb_t*)x, (int)n, y); -#endif - } - static const u1uII f; -}; -template -const u1uII AddUnitPre::f = AddUnitPre::func; - -// (carry, z[N]) <- x[N] - y[N] -template -struct SubPre { - static inline Unit func(Unit *z, const Unit *x, const Unit *y) - { -#ifdef MCL_USE_VINT - return mcl::vint::subN(z, x, y, N); -#else - return mpn_sub_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, N); -#endif - } - static const u3u f; -}; - -template -const u3u SubPre::f = SubPre::func; - -// y[N] <- (x[N] >> 1) -template -struct Shr1 { - static inline void func(Unit *y, const Unit *x) - { -#ifdef MCL_USE_VINT - mcl::vint::shrN(y, x, N, 1); -#else - mpn_rshift((mp_limb_t*)y, (const mp_limb_t*)x, (int)N, 1); -#endif - } - static const void2u f; -}; - -template -const void2u Shr1::f = Shr1::func; - -// y[N] <- (-x[N]) % p[N] -template -struct Neg { - static inline void func(Unit *y, const Unit *x, const Unit *p) - { - if (isZeroC(x)) { - if (x != y) clearC(y); - return; - } - SubPre::f(y, p, x); - } - static const void3u f; -}; - -template -const void3u Neg::f = Neg::func; - -// z[N * 2] <- x[N] * y[N] -template -struct MulPreCore { - static inline void func(Unit *z, const Unit *x, const Unit *y) - { -#ifdef MCL_USE_VINT - mcl::vint::mulNM(z, x, N, y, N); -#else - mpn_mul_n((mp_limb_t*)z, (const mp_limb_t*)x, (const mp_limb_t*)y, (int)N); -#endif - } - static const void3u f; -}; - -template -const void3u MulPreCore::f = MulPreCore::func; - -template -struct EnableKaratsuba { - /* always use mpn* for Gtag */ - static const size_t minMulN = 100; - static const size_t minSqrN = 100; -}; - -template -struct MulPre { - /* - W = 1 << H - x = aW + b, y = cW + d - xy = acW^2 + (ad + bc)W + bd - ad + bc = (a + b)(c + d) - ac - bd - */ - static inline void karatsuba(Unit *z, const Unit *x, const Unit *y) - { - const size_t H = N / 2; - MulPre::f(z, x, y); // bd - MulPre::f(z + N, x + H, y + H); // ac - Unit a_b[H]; - Unit c_d[H]; - Unit c1 = AddPre::f(a_b, x, x + H); // a + b - Unit c2 = AddPre::f(c_d, y, y + H); // c + d - Unit tmp[N]; - MulPre::f(tmp, a_b, c_d); - Unit c = c1 & c2; - if (c1) { - c += AddPre::f(tmp + H, tmp + H, c_d); - } - if (c2) { - c += AddPre::f(tmp + H, tmp + H, a_b); - } - // c:tmp[N] = (a + b)(c + d) - c -= SubPre::f(tmp, tmp, z); - c -= SubPre::f(tmp, tmp, z + N); - // c:tmp[N] = ad + bc - c += AddPre::f(z + H, z + H, tmp); - assert(c <= 2); - if (c) { - AddUnitPre::f(z + N + H, H, c); - } - } - static inline void func(Unit *z, const Unit *x, const Unit *y) - { -#if 1 - if (N >= EnableKaratsuba::minMulN && (N % 2) == 0) { - karatsuba(z, x, y); - return; - } -#endif - MulPreCore::f(z, x, y); - } - static const void3u f; -}; - -template -const void3u MulPre::f = MulPre::func; - -template -struct MulPre<0, Tag> { - static inline void f(Unit*, const Unit*, const Unit*) {} -}; - -template -struct MulPre<1, Tag> { - static inline void f(Unit* z, const Unit* x, const Unit* y) - { - MulPreCore<1, Tag>::f(z, x, y); - } -}; - -// z[N * 2] <- x[N] * x[N] -template -struct SqrPreCore { - static inline void func(Unit *y, const Unit *x) - { -#ifdef MCL_USE_VINT - mcl::vint::sqrN(y, x, N); -#else - mpn_sqr((mp_limb_t*)y, (const mp_limb_t*)x, N); -#endif - } - static const void2u f; -}; - -template -const void2u SqrPreCore::f = SqrPreCore::func; - -template -struct SqrPre { - /* - W = 1 << H - x = aW + b - x^2 = aaW^2 + 2abW + bb - */ - static inline void karatsuba(Unit *z, const Unit *x) - { - const size_t H = N / 2; - SqrPre::f(z, x); // b^2 - SqrPre::f(z + N, x + H); // a^2 - Unit ab[N]; - MulPre::f(ab, x, x + H); // ab - Unit c = AddPre::f(ab, ab, ab); - c += AddPre::f(z + H, z + H, ab); - if (c) { - AddUnitPre::f(z + N + H, H, c); - } - } - static inline void func(Unit *y, const Unit *x) - { -#if 1 - if (N >= EnableKaratsuba::minSqrN && (N % 2) == 0) { - karatsuba(y, x); - return; - } -#endif - SqrPreCore::f(y, x); - } - static const void2u f; -}; -template -const void2u SqrPre::f = SqrPre::func; - -template -struct SqrPre<0, Tag> { - static inline void f(Unit*, const Unit*) {} -}; - -template -struct SqrPre<1, Tag> { - static inline void f(Unit* y, const Unit* x) - { - SqrPreCore<1, Tag>::f(y, x); - } -}; - -// z[N + 1] <- x[N] * y -template -struct MulUnitPre { - static inline void func(Unit *z, const Unit *x, Unit y) - { -#ifdef MCL_USE_VINT - z[N] = mcl::vint::mulu1(z, x, N, y); -#else - z[N] = mpn_mul_1((mp_limb_t*)z, (const mp_limb_t*)x, N, y); -#endif - } - static const void2uI f; -}; - -template -const void2uI MulUnitPre::f = MulUnitPre::func; - -// z[N] <- x[N + 1] % p[N] -template -struct N1_Mod { - static inline void func(Unit *y, const Unit *x, const Unit *p) - { -#ifdef MCL_USE_VINT - mcl::vint::divNM(0, 0, y, x, N + 1, p, N); -#else - mp_limb_t q[2]; // not used - mpn_tdiv_qr(q, (mp_limb_t*)y, 0, (const mp_limb_t*)x, N + 1, (const mp_limb_t*)p, N); -#endif - } - static const void3u f; -}; - -template -const void3u N1_Mod::f = N1_Mod::func; - -// z[N] <- (x[N] * y) % p[N] -template -struct MulUnit { - static inline void func(Unit *z, const Unit *x, Unit y, const Unit *p) - { - Unit xy[N + 1]; - MulUnitPre::f(xy, x, y); -#if 1 - Unit len = UnitBitSize - 1 - cybozu::bsr(p[N - 1]); - Unit v = xy[N]; - if (N > 1 && len < 3 && v < 0xff) { - for (;;) { - if (len == 0) { - v = xy[N]; - } else { - v = (xy[N] << len) | (xy[N - 1] >> (UnitBitSize - len)); - } - if (v == 0) break; - if (v == 1) { - xy[N] -= SubPre::f(xy, xy, p); - } else { - Unit t[N + 1]; - MulUnitPre::f(t, p, v); - SubPre::f(xy, xy, t); - } - } - for (;;) { - if (SubPre::f(z, xy, p)) { - copyC(z, xy); - return; - } - if (SubPre::f(xy, z, p)) { - return; - } - } - } -#endif - N1_Mod::f(z, xy, p); - } - static const void2uIu f; -}; - -template -const void2uIu MulUnit::f = MulUnit::func; - -// z[N] <- x[N * 2] % p[N] -template -struct Dbl_Mod { - static inline void func(Unit *y, const Unit *x, const Unit *p) - { -#ifdef MCL_USE_VINT - mcl::vint::divNM(0, 0, y, x, N * 2, p, N); -#else - mp_limb_t q[N + 1]; // not used - mpn_tdiv_qr(q, (mp_limb_t*)y, 0, (const mp_limb_t*)x, N * 2, (const mp_limb_t*)p, N); -#endif - } - static const void3u f; -}; - -template -const void3u Dbl_Mod::f = Dbl_Mod::func; - -template -struct SubIfPossible { - static inline void f(Unit *z, const Unit *p) - { - Unit tmp[N - 1]; - if (SubPre::f(tmp, z, p) == 0) { - copyC(z, tmp); - z[N - 1] = 0; - } - } -}; -template -struct SubIfPossible<1, Tag> { - static inline void f(Unit *, const Unit *) - { - } -}; - - -// z[N] <- (x[N] + y[N]) % p[N] -template -struct Add { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - if (isFullBit) { - if (AddPre::f(z, x, y)) { - SubPre::f(z, z, p); - return; - } - Unit tmp[N]; - if (SubPre::f(tmp, z, p) == 0) { - copyC(z, tmp); - } - } else { - AddPre::f(z, x, y); - Unit a = z[N - 1]; - Unit b = p[N - 1]; - if (a < b) return; - if (a > b) { - SubPre::f(z, z, p); - return; - } - /* the top of z and p are same */ - SubIfPossible::f(z, p); - } - } - static const void4u f; -}; - -template -const void4u Add::f = Add::func; - -// z[N] <- (x[N] - y[N]) % p[N] -template -struct Sub { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - if (SubPre::f(z, x, y)) { - AddPre::f(z, z, p); - } - } - static const void4u f; -}; - -template -const void4u Sub::f = Sub::func; - -// z[N * 2] <- (x[N * 2] + y[N * 2]) mod p[N] << (N * UnitBitSize) -template -struct DblAdd { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - if (AddPre::f(z, x, y)) { - SubPre::f(z + N, z + N, p); - return; - } - Unit tmp[N]; - if (SubPre::f(tmp, z + N, p) == 0) { - memcpy(z + N, tmp, sizeof(tmp)); - } - } - static const void4u f; -}; - -template -const void4u DblAdd::f = DblAdd::func; - -// z[N * 2] <- (x[N * 2] - y[N * 2]) mod p[N] << (N * UnitBitSize) -template -struct DblSub { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - if (SubPre::f(z, x, y)) { - AddPre::f(z + N, z + N, p); - } - } - static const void4u f; -}; - -template -const void4u DblSub::f = DblSub::func; - -/* - z[N] <- montRed(xy[N * 2], p[N]) - REMARK : assume p[-1] = rp -*/ -template -struct MontRed { - static inline void func(Unit *z, const Unit *xy, const Unit *p) - { - const Unit rp = p[-1]; - Unit pq[N + 1]; - Unit buf[N * 2 + 1]; - copyC(buf + N + 1, xy + N + 1); - buf[N * 2] = 0; - Unit q = xy[0] * rp; - MulUnitPre::f(pq, p, q); - Unit up = AddPre::f(buf, xy, pq); - if (up) { - buf[N * 2] = AddUnitPre::f(buf + N + 1, N - 1, 1); - } - Unit *c = buf + 1; - for (size_t i = 1; i < N; i++) { - q = c[0] * rp; - MulUnitPre::f(pq, p, q); - up = AddPre::f(c, c, pq); - if (up) { - AddUnitPre::f(c + N + 1, N - i, 1); - } - c++; - } - if (c[N]) { - SubPre::f(z, c, p); - } else { - if (SubPre::f(z, c, p)) { - memcpy(z, c, N * sizeof(Unit)); - } - } - } - static const void3u f; -}; - -template -const void3u MontRed::f = MontRed::func; - -/* - z[N] <- Montgomery(x[N], y[N], p[N]) - REMARK : assume p[-1] = rp -*/ -template -struct Mont { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { -#if MCL_MAX_BIT_SIZE == 1024 || MCL_SIZEOF_UNIT == 4 // check speed - Unit xy[N * 2]; - MulPre::f(xy, x, y); - MontRed::f(z, xy, p); -#else - const Unit rp = p[-1]; - if (isFullBit) { - Unit buf[N * 2 + 2]; - Unit *c = buf; - MulUnitPre::f(c, x, y[0]); // x * y[0] - Unit q = c[0] * rp; - Unit t[N + 2]; - MulUnitPre::f(t, p, q); // p * q - t[N + 1] = 0; // always zero - c[N + 1] = AddPre::f(c, c, t); - c++; - for (size_t i = 1; i < N; i++) { - MulUnitPre::f(t, x, y[i]); - c[N + 1] = AddPre::f(c, c, t); - q = c[0] * rp; - MulUnitPre::f(t, p, q); - AddPre::f(c, c, t); - c++; - } - if (c[N]) { - SubPre::f(z, c, p); - } else { - if (SubPre::f(z, c, p)) { - memcpy(z, c, N * sizeof(Unit)); - } - } - } else { - /* - R = 1 << 64 - L % 64 = 63 ; not full bit - F = 1 << (L + 1) - max p = (1 << L) - 1 - x, y <= p - 1 - max x * y[0], p * q <= ((1 << L) - 1)(R - 1) - t = x * y[i] + p * q <= 2((1 << L) - 1)(R - 1) = (F - 2)(R - 1) - t >> 64 <= (F - 2)(R - 1)/R = (F - 2) - (F - 2)/R - t + (t >> 64) = (F - 2)R - (F - 2)/R < FR - */ - Unit carry; - (void)carry; - Unit buf[N * 2 + 1]; - Unit *c = buf; - MulUnitPre::f(c, x, y[0]); // x * y[0] - Unit q = c[0] * rp; - Unit t[N + 1]; - MulUnitPre::f(t, p, q); // p * q - carry = AddPre::f(c, c, t); - assert(carry == 0); - c++; - c[N] = 0; - for (size_t i = 1; i < N; i++) { - c[N + 1] = 0; - MulUnitPre::f(t, x, y[i]); - carry = AddPre::f(c, c, t); - assert(carry == 0); - q = c[0] * rp; - MulUnitPre::f(t, p, q); - carry = AddPre::f(c, c, t); - assert(carry == 0); - c++; - } - assert(c[N] == 0); - if (SubPre::f(z, c, p)) { - memcpy(z, c, N * sizeof(Unit)); - } - } -#endif - } - static const void4u f; -}; - -template -const void4u Mont::f = Mont::func; - -// z[N] <- Montgomery(x[N], x[N], p[N]) -template -struct SqrMont { - static inline void func(Unit *y, const Unit *x, const Unit *p) - { -#if MCL_MAX_BIT_SIZE == 1024 || MCL_SIZEOF_UNIT == 4 // check speed - Unit xx[N * 2]; - SqrPre::f(xx, x); - MontRed::f(y, xx, p); -#else - Mont::f(y, x, x, p); -#endif - } - static const void3u f; -}; -template -const void3u SqrMont::f = SqrMont::func; - -// z[N] <- (x[N] * y[N]) % p[N] -template -struct Mul { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - Unit xy[N * 2]; - MulPre::f(xy, x, y); - Dbl_Mod::f(z, xy, p); - } - static const void4u f; -}; -template -const void4u Mul::f = Mul::func; - -// y[N] <- (x[N] * x[N]) % p[N] -template -struct Sqr { - static inline void func(Unit *y, const Unit *x, const Unit *p) - { - Unit xx[N * 2]; - SqrPre::f(xx, x); - Dbl_Mod::f(y, xx, p); - } - static const void3u f; -}; -template -const void3u Sqr::f = Sqr::func; - -template -struct Fp2MulNF { - static inline void func(Unit *z, const Unit *x, const Unit *y, const Unit *p) - { - const Unit *const a = x; - const Unit *const b = x + N; - const Unit *const c = y; - const Unit *const d = y + N; - Unit d0[N * 2]; - Unit d1[N * 2]; - Unit d2[N * 2]; - Unit s[N]; - Unit t[N]; - AddPre::f(s, a, b); - AddPre::f(t, c, d); - MulPre::f(d0, s, t); - MulPre::f(d1, a, c); - MulPre::f(d2, b, d); - SubPre::f(d0, d0, d1); - SubPre::f(d0, d0, d2); - MontRed::f(z + N, d0, p); - DblSub::f(d1, d1, d2, p); - MontRed::f(z, d1, p); - } - static const void4u f; -}; -template -const void4u Fp2MulNF::f = Fp2MulNF::func; - -} } // mcl::fp - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/src/low_func_llvm.hpp b/vendor/github.com/dexon-foundation/mcl/src/low_func_llvm.hpp deleted file mode 100644 index 8a44c2277..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/low_func_llvm.hpp +++ /dev/null @@ -1,94 +0,0 @@ -#pragma once - -namespace mcl { namespace fp { - -template<> -struct EnableKaratsuba { -#if MCL_SIZEOF_UNIT == 4 - static const size_t minMulN = 10; - static const size_t minSqrN = 10; -#else - static const size_t minMulN = 8; - static const size_t minSqrN = 6; -#endif -}; - -#if MCL_SIZEOF_UNIT == 4 - #define MCL_GMP_IS_FASTER_THAN_LLVM // QQQ : check later -#endif - -#ifdef MCL_GMP_IS_FASTER_THAN_LLVM -#define MCL_DEF_MUL(n, tag, suf) -#else -#define MCL_DEF_MUL(n, tag, suf) \ -template<>const void3u MulPreCore::f = &mcl_fpDbl_mulPre ## n ## suf; \ -template<>const void2u SqrPreCore::f = &mcl_fpDbl_sqrPre ## n ## suf; -#endif - -#define MCL_DEF_LLVM_FUNC2(n, tag, suf) \ -template<>const u3u AddPre::f = &mcl_fp_addPre ## n ## suf; \ -template<>const u3u SubPre::f = &mcl_fp_subPre ## n ## suf; \ -template<>const void2u Shr1::f = &mcl_fp_shr1_ ## n ## suf; \ -MCL_DEF_MUL(n, tag, suf) \ -template<>const void2uI MulUnitPre::f = &mcl_fp_mulUnitPre ## n ## suf; \ -template<>const void4u Add::f = &mcl_fp_add ## n ## suf; \ -template<>const void4u Add::f = &mcl_fp_addNF ## n ## suf; \ -template<>const void4u Sub::f = &mcl_fp_sub ## n ## suf; \ -template<>const void4u Sub::f = &mcl_fp_subNF ## n ## suf; \ -template<>const void4u Mont::f = &mcl_fp_mont ## n ## suf; \ -template<>const void4u Mont::f = &mcl_fp_montNF ## n ## suf; \ -template<>const void3u MontRed::f = &mcl_fp_montRed ## n ## suf; \ -template<>const void4u DblAdd::f = &mcl_fpDbl_add ## n ## suf; \ -template<>const void4u DblSub::f = &mcl_fpDbl_sub ## n ## suf; \ - -#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) && !defined(MCL_USE_VINT) -#define MCL_DEF_LLVM_FUNC(n) \ - MCL_DEF_LLVM_FUNC2(n, Ltag, L) \ - MCL_DEF_LLVM_FUNC2(n, LBMI2tag, Lbmi2) -#else -#define MCL_DEF_LLVM_FUNC(n) \ - MCL_DEF_LLVM_FUNC2(n, Ltag, L) -#endif - -MCL_DEF_LLVM_FUNC(1) -MCL_DEF_LLVM_FUNC(2) -MCL_DEF_LLVM_FUNC(3) -MCL_DEF_LLVM_FUNC(4) -#if MCL_MAX_UNIT_SIZE >= 6 -MCL_DEF_LLVM_FUNC(5) -MCL_DEF_LLVM_FUNC(6) -#endif -#if MCL_MAX_UNIT_SIZE >= 8 -MCL_DEF_LLVM_FUNC(7) -MCL_DEF_LLVM_FUNC(8) -#endif -#if MCL_MAX_UNIT_SIZE >= 9 -MCL_DEF_LLVM_FUNC(9) -#endif -#if MCL_MAX_UNIT_SIZE >= 10 -MCL_DEF_LLVM_FUNC(10) -#endif -#if MCL_MAX_UNIT_SIZE >= 12 -MCL_DEF_LLVM_FUNC(11) -MCL_DEF_LLVM_FUNC(12) -#endif -#if MCL_MAX_UNIT_SIZE >= 14 -MCL_DEF_LLVM_FUNC(13) -MCL_DEF_LLVM_FUNC(14) -#endif -#if MCL_MAX_UNIT_SIZE >= 16 -MCL_DEF_LLVM_FUNC(15) -#if MCL_SIZEOF_UNIT == 4 -MCL_DEF_LLVM_FUNC(16) -#else -/// QQQ : check speed -template<>const void3u MontRed<16, Ltag>::f = &mcl_fp_montRed16L; -template<>const void3u MontRed<16, LBMI2tag>::f = &mcl_fp_montRed16Lbmi2; -#endif -#endif -#if MCL_MAX_UNIT_SIZE >= 17 -MCL_DEF_LLVM_FUNC(17) -#endif - -} } // mcl::fp - diff --git a/vendor/github.com/dexon-foundation/mcl/src/proj/mcl.vcxproj b/vendor/github.com/dexon-foundation/mcl/src/proj/mcl.vcxproj deleted file mode 100644 index b247982ab..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/proj/mcl.vcxproj +++ /dev/null @@ -1,92 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {1DBB979A-C212-45CD-9563-446A96F87F71} - Win32Proj - ec_test - - - - StaticLibrary - true - v120 - MultiByte - - - StaticLibrary - false - v120 - true - MultiByte - - - - - - - - - - - - - - - - - true - .lib - $(SolutionDir)lib\ - - - false - .lib - $(SolutionDir)lib\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/src/proto.hpp b/vendor/github.com/dexon-foundation/mcl/src/proto.hpp deleted file mode 100644 index 97c331194..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/proto.hpp +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once -/** - @file - @brief prototype of asm function - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -#define MCL_FP_DEF_FUNC_SUB(n, suf) \ -void mcl_fp_add ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_addNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_sub ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_subNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_shr1_ ## n ## suf(mcl::fp::Unit*y, const mcl::fp::Unit* x); \ -mcl::fp::Unit mcl_fp_addPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ -mcl::fp::Unit mcl_fp_subPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ -void mcl_fp_mulUnitPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, mcl::fp::Unit y); \ -void mcl_fpDbl_mulPre ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y); \ -void mcl_fpDbl_sqrPre ## n ## suf(mcl::fp::Unit* y, const mcl::fp::Unit* x); \ -void mcl_fp_mont ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_montNF ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fp_montRed ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* p); \ -void mcl_fpDbl_add ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); \ -void mcl_fpDbl_sub ## n ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* p); - -#define MCL_FP_DEF_FUNC(n) \ - MCL_FP_DEF_FUNC_SUB(n, L) \ - MCL_FP_DEF_FUNC_SUB(n, Lbmi2) - -#define MCL_FP_DEF_FUNC_SPECIAL(suf) \ -void mcl_fpDbl_mod_NIST_P192 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* /* dummy */); \ -void mcl_fp_mulNIST_P192 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* x, const mcl::fp::Unit* y, const mcl::fp::Unit* /* dummy */); \ -void mcl_fp_sqr_NIST_P192 ## suf(mcl::fp::Unit* y, const mcl::fp::Unit* x, const mcl::fp::Unit* /* dummy */); \ -void mcl_fpDbl_mod_NIST_P521 ## suf(mcl::fp::Unit* z, const mcl::fp::Unit* xy, const mcl::fp::Unit* /* dummy */); - -extern "C" { - -MCL_FP_DEF_FUNC(1) -MCL_FP_DEF_FUNC(2) -MCL_FP_DEF_FUNC(3) -MCL_FP_DEF_FUNC(4) -#if MCL_MAX_UNIT_SIZE >= 6 -MCL_FP_DEF_FUNC(5) -MCL_FP_DEF_FUNC(6) -#endif -#if MCL_MAX_UNIT_SIZE >= 8 -MCL_FP_DEF_FUNC(7) -MCL_FP_DEF_FUNC(8) -#endif -#if MCL_MAX_UNIT_SIZE >= 9 -MCL_FP_DEF_FUNC(9) -#endif -#if MCL_MAX_UNIT_SIZE >= 10 -MCL_FP_DEF_FUNC(10) -#endif -#if MCL_MAX_UNIT_SIZE >= 12 -MCL_FP_DEF_FUNC(11) -MCL_FP_DEF_FUNC(12) -#endif -#if MCL_MAX_UNIT_SIZE >= 14 -MCL_FP_DEF_FUNC(13) -MCL_FP_DEF_FUNC(14) -#endif -#if MCL_MAX_UNIT_SIZE >= 16 -MCL_FP_DEF_FUNC(15) -MCL_FP_DEF_FUNC(16) -#endif -#if MCL_MAX_UNIT_SIZE >= 17 -MCL_FP_DEF_FUNC(17) -#endif - -MCL_FP_DEF_FUNC_SPECIAL(L) -MCL_FP_DEF_FUNC_SPECIAL(Lbmi2) - -} - -#undef MCL_FP_DEF_FUNC_SUB -#undef MCL_FP_DEF_FUNC - diff --git a/vendor/github.com/dexon-foundation/mcl/src/she_c256.cpp b/vendor/github.com/dexon-foundation/mcl/src/she_c256.cpp deleted file mode 100644 index 84873e4ca..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/she_c256.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include "she_c_impl.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/src/she_c384.cpp b/vendor/github.com/dexon-foundation/mcl/src/she_c384.cpp deleted file mode 100644 index bfc456a05..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/she_c384.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#include "she_c_impl.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/src/she_c_impl.hpp b/vendor/github.com/dexon-foundation/mcl/src/she_c_impl.hpp deleted file mode 100644 index 073bc2b34..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/she_c_impl.hpp +++ /dev/null @@ -1,681 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include "mcl/impl/bn_c_impl.hpp" -#define MCLSHE_DLL_EXPORT - -#include -#include - -using namespace mcl::she; -using namespace mcl::bn; - -static SecretKey *cast(sheSecretKey *p) { return reinterpret_cast(p); } -static const SecretKey *cast(const sheSecretKey *p) { return reinterpret_cast(p); } - -static PublicKey *cast(shePublicKey *p) { return reinterpret_cast(p); } -static const PublicKey *cast(const shePublicKey *p) { return reinterpret_cast(p); } - -static PrecomputedPublicKey *cast(shePrecomputedPublicKey *p) { return reinterpret_cast(p); } -static const PrecomputedPublicKey *cast(const shePrecomputedPublicKey *p) { return reinterpret_cast(p); } - -static CipherTextG1 *cast(sheCipherTextG1 *p) { return reinterpret_cast(p); } -static const CipherTextG1 *cast(const sheCipherTextG1 *p) { return reinterpret_cast(p); } - -static CipherTextG2 *cast(sheCipherTextG2 *p) { return reinterpret_cast(p); } -static const CipherTextG2 *cast(const sheCipherTextG2 *p) { return reinterpret_cast(p); } - -static CipherTextGT *cast(sheCipherTextGT *p) { return reinterpret_cast(p); } -static const CipherTextGT *cast(const sheCipherTextGT *p) { return reinterpret_cast(p); } - -static ZkpBin *cast(sheZkpBin *p) { return reinterpret_cast(p); } -static const ZkpBin *cast(const sheZkpBin *p) { return reinterpret_cast(p); } - -static ZkpEq *cast(sheZkpEq *p) { return reinterpret_cast(p); } -static const ZkpEq *cast(const sheZkpEq *p) { return reinterpret_cast(p); } - -static ZkpBinEq *cast(sheZkpBinEq *p) { return reinterpret_cast(p); } -static const ZkpBinEq *cast(const sheZkpBinEq *p) { return reinterpret_cast(p); } - -int sheInit(int curve, int compiledTimeVar) - try -{ - if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { - return -2; - } - mcl::CurveParam cp; - switch (curve) { - case MCL_BN254: - cp = mcl::BN254; - break; - case MCL_BN381_1: - cp = mcl::BN381_1; - break; - case MCL_BN381_2: - cp = mcl::BN381_2; - break; - case MCL_BN462: - cp = mcl::BN462; - break; - case MCL_BN_SNARK1: - cp = mcl::BN_SNARK1; - break; - case MCL_BLS12_381: - cp = mcl::BLS12_381; - break; - default: - return -1; - } - SHE::init(cp); - return 0; -} catch (std::exception&) { - return -1; -} - -mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec) -{ - return (mclSize)cast(sec)->serialize(buf, maxBufSize); -} - -mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub) -{ - return (mclSize)cast(pub)->serialize(buf, maxBufSize); -} - -mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c) -{ - return (mclSize)cast(c)->serialize(buf, maxBufSize); -} - -mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c) -{ - return (mclSize)cast(c)->serialize(buf, maxBufSize); -} - -mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c) -{ - return (mclSize)cast(c)->serialize(buf, maxBufSize); -} - -mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp) -{ - return (mclSize)cast(zkp)->serialize(buf, maxBufSize); -} - -mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp) -{ - return (mclSize)cast(zkp)->serialize(buf, maxBufSize); -} - -mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp) -{ - return (mclSize)cast(zkp)->serialize(buf, maxBufSize); -} - -mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(sec)->deserialize(buf, bufSize); -} - -mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(pub)->deserialize(buf, bufSize); -} - -mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(c)->deserialize(buf, bufSize); -} - -mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(c)->deserialize(buf, bufSize); -} - -mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(c)->deserialize(buf, bufSize); -} - -mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(zkp)->deserialize(buf, bufSize); -} - -mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(zkp)->deserialize(buf, bufSize); -} - -mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(zkp)->deserialize(buf, bufSize); -} - -int sheSecretKeySetByCSPRNG(sheSecretKey *sec) -{ - cast(sec)->setByCSPRNG(); - return 0; -} - -void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec) -{ - cast(sec)->getPublicKey(*cast(pub)); -} - -static int setRangeForDLP(void (*f)(mclSize), mclSize hashSize) - try -{ - f(hashSize); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheSetRangeForDLP(mclSize hashSize) -{ - return setRangeForDLP(SHE::setRangeForDLP, hashSize); -} -int sheSetRangeForG1DLP(mclSize hashSize) -{ - return setRangeForDLP(SHE::setRangeForG1DLP, hashSize); -} -int sheSetRangeForG2DLP(mclSize hashSize) -{ - return setRangeForDLP(SHE::setRangeForG2DLP, hashSize); -} -int sheSetRangeForGTDLP(mclSize hashSize) -{ - return setRangeForDLP(SHE::setRangeForGTDLP, hashSize); -} - -void sheSetTryNum(mclSize tryNum) -{ - SHE::setTryNum(tryNum); -} -void sheUseDecG1ViaGT(int use) -{ - SHE::useDecG1ViaGT(use != 0); -} -void sheUseDecG2ViaGT(int use) -{ - SHE::useDecG2ViaGT(use != 0); -} - -template -mclSize loadTable(HashTable& table, const void *buf, mclSize bufSize) - try -{ - return table.load(buf, bufSize); -} catch (std::exception&) { - return 0; -} - -mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize) -{ - return loadTable(getHashTableG1(), buf, bufSize); -} -mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize) -{ - return loadTable(getHashTableG2(), buf, bufSize); -} -mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize) -{ - return loadTable(getHashTableGT(), buf, bufSize); -} - -template -mclSize saveTable(void *buf, mclSize maxBufSize, const HashTable& table) - try -{ - return table.save(buf, maxBufSize); -} catch (std::exception&) { - return 0; -} -mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize) -{ - return saveTable(buf, maxBufSize, SHE::PhashTbl_); -} -mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize) -{ - return saveTable(buf, maxBufSize, SHE::QhashTbl_); -} -mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize) -{ - return saveTable(buf, maxBufSize, SHE::ePQhashTbl_); -} - -template -int encT(CT *c, const shePublicKey *pub, mclInt m) - try -{ - cast(pub)->enc(*cast(c), m); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m) -{ - return encT(c, pub, m); -} - -int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m) -{ - return encT(c, pub, m); -} - -int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m) -{ - return encT(c, pub, m); -} - -template -int encWithZkpBinT(CT *c, sheZkpBin *zkp, const PK *pub, int m) - try -{ - cast(pub)->encWithZkpBin(*cast(c), *cast(zkp), m); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m) -{ - return encWithZkpBinT(c, zkp, pub, m); -} - -int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m) -{ - return encWithZkpBinT(c, zkp, pub, m); -} - -int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *pub, int m) -{ - return encWithZkpBinT(c, zkp, pub, m); -} - -int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *pub, int m) -{ - return encWithZkpBinT(c, zkp, pub, m); -} - -template -int encWithZkpEqT(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const PK *pub, mclInt m) - try -{ - cast(pub)->encWithZkpEq(*cast(c1), *cast(c2), *cast(zkp), m); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m) -{ - return encWithZkpEqT(c1, c2, zkp, pub, m); -} - -int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m) -{ - return encWithZkpEqT(c1, c2, zkp, ppub, m); -} - -template -int encWithZkpBinEqT(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const PK *pub, int m) - try -{ - cast(pub)->encWithZkpBinEq(*cast(c1), *cast(c2), *cast(zkp), m); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m) -{ - return encWithZkpBinEqT(c1, c2, zkp, pub, m); -} - -int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m) -{ - return encWithZkpBinEqT(c1, c2, zkp, ppub, m); -} - -template -int decT(mclInt *m, const sheSecretKey *sec, const CT *c) - try -{ - *m = (cast(sec)->dec)(*cast(c)); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c) -{ - return decT(m, sec, c); -} - -int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c) -{ - return decT(m, sec, c); -} - -int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c) -{ - return decT(m, sec, c); -} - -template -int decViaGTT(mclInt *m, const sheSecretKey *sec, const CT *c) - try -{ - *m = (cast(sec)->decViaGT)(*cast(c)); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c) -{ - return decViaGTT(m, sec, c); -} - -int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c) -{ - return decViaGTT(m, sec, c); -} - - -template -int isZeroT(const sheSecretKey *sec, const CT *c) - try -{ - return cast(sec)->isZero(*cast(c)); -} catch (std::exception&) { - return 0; -} - -int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c) -{ - return isZeroT(sec, c); -} -int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c) -{ - return isZeroT(sec, c); -} -int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c) -{ - return isZeroT(sec, c); -} - -template -int negT(CT& y, const CT& x) - try -{ - CT::neg(y, x); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x) -{ - return negT(*cast(y), *cast(x)); -} - -int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x) -{ - return negT(*cast(y), *cast(x)); -} - -int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x) -{ - return negT(*cast(y), *cast(x)); -} - -template -int addT(CT& z, const CT& x, const CT& y) - try -{ - CT::add(z, x, y); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y) -{ - return addT(*cast(z), *cast(x), *cast(y)); -} - -int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y) -{ - return addT(*cast(z), *cast(x), *cast(y)); -} - -int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y) -{ - return addT(*cast(z), *cast(x), *cast(y)); -} - -template -int subT(CT& z, const CT& x, const CT& y) - try -{ - CT::sub(z, x, y); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y) -{ - return subT(*cast(z), *cast(x), *cast(y)); -} - -int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y) -{ - return subT(*cast(z), *cast(x), *cast(y)); -} - -int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y) -{ - return subT(*cast(z), *cast(x), *cast(y)); -} - -template -int mulT(CT1& z, const CT2& x, const CT3& y) - try -{ - CT1::mul(z, x, y); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y) -{ - return mulT(*cast(z), *cast(x), y); -} - -int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y) -{ - return mulT(*cast(z), *cast(x), y); -} - -int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y) -{ - return mulT(*cast(z), *cast(x), y); -} - -int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y) -{ - return mulT(*cast(z), *cast(x), *cast(y)); -} - -int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y) - try -{ - CipherTextGT::mulML(*cast(z), *cast(x), *cast(y)); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x) - try -{ - CipherTextGT::finalExp(*cast(y), *cast(x)); - return 0; -} catch (std::exception&) { - return -1; -} - -template -int reRandT(CT& c, const shePublicKey *pub) - try -{ - cast(pub)->reRand(c); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub) -{ - return reRandT(*cast(c), pub); -} - -int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub) -{ - return reRandT(*cast(c), pub); -} - -int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub) -{ - return reRandT(*cast(c), pub); -} - -template -int convert(sheCipherTextGT *y, const shePublicKey *pub, const CT *x) - try -{ - cast(pub)->convert(*cast(y), *cast(x)); - return 0; -} catch (std::exception&) { - return -1; -} - -int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x) -{ - return convert(y, pub, x); -} - -int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x) -{ - return convert(y, pub, x); -} - -shePrecomputedPublicKey *shePrecomputedPublicKeyCreate() - try -{ - return reinterpret_cast(new PrecomputedPublicKey()); -} catch (...) { - return 0; -} - -void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub) -{ - delete cast(ppub); -} - -int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub) - try -{ - cast(ppub)->init(*cast(pub)); - return 0; -} catch (...) { - return 1; -} - -template -int pEncT(CT *c, const shePrecomputedPublicKey *pub, mclInt m) - try -{ - cast(pub)->enc(*cast(c), m); - return 0; -} catch (std::exception&) { - return -1; -} - -int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *pub, mclInt m) -{ - return pEncT(c, pub, m); -} - -int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *pub, mclInt m) -{ - return pEncT(c, pub, m); -} - -int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *pub, mclInt m) -{ - return pEncT(c, pub, m); -} - -template -int verifyT(const PK& pub, const CT& c, const ZkpBin& zkp) - try -{ - return pub.verify(c, zkp); -} catch (std::exception&) { - return 0; -} - -int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp) -{ - return verifyT(*cast(pub), *cast(c), *cast(zkp)); -} -int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp) -{ - return verifyT(*cast(pub), *cast(c), *cast(zkp)); -} -int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp) -{ - return verifyT(*cast(pub), *cast(c), *cast(zkp)); -} -int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp) -{ - return verifyT(*cast(pub), *cast(c), *cast(zkp)); -} - -template -int verifyT(const PK& pub, const CipherTextG1& c1, const CipherTextG2& c2, const Zkp& zkp) - try -{ - return pub.verify(c1, c2, zkp); -} catch (std::exception&) { - return 0; -} - -int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp) -{ - return verifyT(*cast(pub), *cast(c1), *cast(c2), *cast(zkp)); -} -int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp) -{ - return verifyT(*cast(pub), *cast(c1), *cast(c2), *cast(zkp)); -} -int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp) -{ - return verifyT(*cast(ppub), *cast(c1), *cast(c2), *cast(zkp)); -} -int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp) -{ - return verifyT(*cast(ppub), *cast(c1), *cast(c2), *cast(zkp)); -} - diff --git a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak.h b/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak.h deleted file mode 100644 index bcfeb34bf..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak.h +++ /dev/null @@ -1,2611 +0,0 @@ -#pragma once -#ifndef XBYAK_XBYAK_H_ -#define XBYAK_XBYAK_H_ -/*! - @file xbyak.h - @brief Xbyak ; JIT assembler for x86(IA32)/x64 by C++ - @author herumi - @url https://github.com/herumi/xbyak - @note modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#ifndef XBYAK_NO_OP_NAMES - #if not +0 // trick to detect whether 'not' is operator or not - #error "use -fno-operator-names option if you want to use and(), or(), xor(), not() as function names, Or define XBYAK_NO_OP_NAMES and use and_(), or_(), xor_(), not_()." - #endif -#endif - -#include // for debug print -#include -#include -#include -#include -#ifndef NDEBUG -#include -#endif - -// #define XBYAK_DISABLE_AVX512 - -//#define XBYAK_USE_MMAP_ALLOCATOR -#if !defined(__GNUC__) || defined(__MINGW32__) - #undef XBYAK_USE_MMAP_ALLOCATOR -#endif - -#ifdef __GNUC__ - #define XBYAK_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor)) -#else - #define XBYAK_GNUC_PREREQ(major, minor) 0 -#endif - -// This covers -std=(gnu|c)++(0x|11|1y), -stdlib=libc++, and modern Microsoft. -#if ((defined(_MSC_VER) && (_MSC_VER >= 1600)) || defined(_LIBCPP_VERSION) ||\ - ((__cplusplus >= 201103) || defined(__GXX_EXPERIMENTAL_CXX0X__))) - #include - #define XBYAK_STD_UNORDERED_SET std::unordered_set - #include - #define XBYAK_STD_UNORDERED_MAP std::unordered_map - #define XBYAK_STD_UNORDERED_MULTIMAP std::unordered_multimap - -/* - Clang/llvm-gcc and ICC-EDG in 'GCC-mode' always claim to be GCC 4.2, using - libstdcxx 20070719 (from GCC 4.2.1, the last GPL 2 version). -*/ -#elif XBYAK_GNUC_PREREQ(4, 5) || (XBYAK_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || defined(__llvm__) - #include - #define XBYAK_STD_UNORDERED_SET std::tr1::unordered_set - #include - #define XBYAK_STD_UNORDERED_MAP std::tr1::unordered_map - #define XBYAK_STD_UNORDERED_MULTIMAP std::tr1::unordered_multimap - -#elif defined(_MSC_VER) && (_MSC_VER >= 1500) && (_MSC_VER < 1600) - #include - #define XBYAK_STD_UNORDERED_SET std::tr1::unordered_set - #include - #define XBYAK_STD_UNORDERED_MAP std::tr1::unordered_map - #define XBYAK_STD_UNORDERED_MULTIMAP std::tr1::unordered_multimap - -#else - #include - #define XBYAK_STD_UNORDERED_SET std::set - #include - #define XBYAK_STD_UNORDERED_MAP std::map - #define XBYAK_STD_UNORDERED_MULTIMAP std::multimap -#endif -#ifdef _WIN32 - #include - #include - #include -#elif defined(__GNUC__) - #include - #include - #include -#endif -#if !defined(_MSC_VER) || (_MSC_VER >= 1600) - #include -#endif - -#if defined(_WIN64) || defined(__MINGW64__) || (defined(__CYGWIN__) && defined(__x86_64__)) - #define XBYAK64_WIN -#elif defined(__x86_64__) - #define XBYAK64_GCC -#endif -#if !defined(XBYAK64) && !defined(XBYAK32) - #if defined(XBYAK64_GCC) || defined(XBYAK64_WIN) - #define XBYAK64 - #else - #define XBYAK32 - #endif -#endif - -#if (__cplusplus >= 201103) || (_MSC_VER >= 1800) - #define XBYAK_VARIADIC_TEMPLATE -#endif - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4514) /* remove inline function */ - #pragma warning(disable : 4786) /* identifier is too long */ - #pragma warning(disable : 4503) /* name is too long */ - #pragma warning(disable : 4127) /* constant expresison */ -#endif - -namespace Xbyak { - -enum { - DEFAULT_MAX_CODE_SIZE = 4096, - VERSION = 0x5751 /* 0xABCD = A.BC(D) */ -}; - -#ifndef MIE_INTEGER_TYPE_DEFINED -#define MIE_INTEGER_TYPE_DEFINED -#ifdef _MSC_VER - typedef unsigned __int64 uint64; - typedef __int64 sint64; -#else - typedef uint64_t uint64; - typedef int64_t sint64; -#endif -typedef unsigned int uint32; -typedef unsigned short uint16; -typedef unsigned char uint8; -#endif - -#ifndef MIE_ALIGN - #ifdef _MSC_VER - #define MIE_ALIGN(x) __declspec(align(x)) - #else - #define MIE_ALIGN(x) __attribute__((aligned(x))) - #endif -#endif -#ifndef MIE_PACK // for shufps - #define MIE_PACK(x, y, z, w) ((x) * 64 + (y) * 16 + (z) * 4 + (w)) -#endif - -enum { - ERR_NONE = 0, - ERR_BAD_ADDRESSING, - ERR_CODE_IS_TOO_BIG, - ERR_BAD_SCALE, - ERR_ESP_CANT_BE_INDEX, - ERR_BAD_COMBINATION, - ERR_BAD_SIZE_OF_REGISTER, - ERR_IMM_IS_TOO_BIG, - ERR_BAD_ALIGN, - ERR_LABEL_IS_REDEFINED, - ERR_LABEL_IS_TOO_FAR, - ERR_LABEL_IS_NOT_FOUND, - ERR_CODE_ISNOT_COPYABLE, - ERR_BAD_PARAMETER, - ERR_CANT_PROTECT, - ERR_CANT_USE_64BIT_DISP, - ERR_OFFSET_IS_TOO_BIG, - ERR_MEM_SIZE_IS_NOT_SPECIFIED, - ERR_BAD_MEM_SIZE, - ERR_BAD_ST_COMBINATION, - ERR_OVER_LOCAL_LABEL, // not used - ERR_UNDER_LOCAL_LABEL, - ERR_CANT_ALLOC, - ERR_ONLY_T_NEAR_IS_SUPPORTED_IN_AUTO_GROW, - ERR_BAD_PROTECT_MODE, - ERR_BAD_PNUM, - ERR_BAD_TNUM, - ERR_BAD_VSIB_ADDRESSING, - ERR_CANT_CONVERT, - ERR_LABEL_ISNOT_SET_BY_L, - ERR_LABEL_IS_ALREADY_SET_BY_L, - ERR_BAD_LABEL_STR, - ERR_MUNMAP, - ERR_OPMASK_IS_ALREADY_SET, - ERR_ROUNDING_IS_ALREADY_SET, - ERR_K0_IS_INVALID, - ERR_EVEX_IS_INVALID, - ERR_SAE_IS_INVALID, - ERR_ER_IS_INVALID, - ERR_INVALID_BROADCAST, - ERR_INVALID_OPMASK_WITH_MEMORY, - ERR_INVALID_ZERO, - ERR_INVALID_RIP_IN_AUTO_GROW, - ERR_INVALID_MIB_ADDRESS, - ERR_INTERNAL -}; - -class Error : public std::exception { - int err_; -public: - explicit Error(int err) : err_(err) - { - if (err_ < 0 || err_ > ERR_INTERNAL) { - fprintf(stderr, "bad err=%d in Xbyak::Error\n", err_); - exit(1); - } - } - operator int() const { return err_; } - const char *what() const throw() - { - static const char *errTbl[] = { - "none", - "bad addressing", - "code is too big", - "bad scale", - "esp can't be index", - "bad combination", - "bad size of register", - "imm is too big", - "bad align", - "label is redefined", - "label is too far", - "label is not found", - "code is not copyable", - "bad parameter", - "can't protect", - "can't use 64bit disp(use (void*))", - "offset is too big", - "MEM size is not specified", - "bad mem size", - "bad st combination", - "over local label", - "under local label", - "can't alloc", - "T_SHORT is not supported in AutoGrow", - "bad protect mode", - "bad pNum", - "bad tNum", - "bad vsib addressing", - "can't convert", - "label is not set by L()", - "label is already set by L()", - "bad label string", - "err munmap", - "opmask is already set", - "rounding is already set", - "k0 is invalid", - "evex is invalid", - "sae(suppress all exceptions) is invalid", - "er(embedded rounding) is invalid", - "invalid broadcast", - "invalid opmask with memory", - "invalid zero", - "invalid rip in AutoGrow", - "invalid mib address", - "internal error", - }; - assert((size_t)err_ < sizeof(errTbl) / sizeof(*errTbl)); - return errTbl[err_]; - } -}; - -inline const char *ConvertErrorToString(const Error& err) -{ - return err.what(); -} - -inline void *AlignedMalloc(size_t size, size_t alignment) -{ -#ifdef __MINGW32__ - return __mingw_aligned_malloc(size, alignment); -#elif defined(_WIN32) - return _aligned_malloc(size, alignment); -#else - void *p; - int ret = posix_memalign(&p, alignment, size); - return (ret == 0) ? p : 0; -#endif -} - -inline void AlignedFree(void *p) -{ -#ifdef __MINGW32__ - __mingw_aligned_free(p); -#elif defined(_MSC_VER) - _aligned_free(p); -#else - free(p); -#endif -} - -template -inline const To CastTo(From p) throw() -{ - return (const To)(size_t)(p); -} -namespace inner { - -static const size_t ALIGN_PAGE_SIZE = 4096; - -inline bool IsInDisp8(uint32 x) { return 0xFFFFFF80 <= x || x <= 0x7F; } -inline bool IsInInt32(uint64 x) { return ~uint64(0x7fffffffu) <= x || x <= 0x7FFFFFFFU; } - -inline uint32 VerifyInInt32(uint64 x) -{ -#ifdef XBYAK64 - if (!IsInInt32(x)) throw Error(ERR_OFFSET_IS_TOO_BIG); -#endif - return static_cast(x); -} - -enum LabelMode { - LasIs, // as is - Labs, // absolute - LaddTop // (addr + top) for mov(reg, label) with AutoGrow -}; - -} // inner - -/* - custom allocator -*/ -struct Allocator { - virtual uint8 *alloc(size_t size) { return reinterpret_cast(AlignedMalloc(size, inner::ALIGN_PAGE_SIZE)); } - virtual void free(uint8 *p) { AlignedFree(p); } - virtual ~Allocator() {} - /* override to return false if you call protect() manually */ - virtual bool useProtect() const { return true; } -}; - -#ifdef XBYAK_USE_MMAP_ALLOCATOR -class MmapAllocator : Allocator { - typedef XBYAK_STD_UNORDERED_MAP SizeList; - SizeList sizeList_; -public: - uint8 *alloc(size_t size) - { - const size_t alignedSizeM1 = inner::ALIGN_PAGE_SIZE - 1; - size = (size + alignedSizeM1) & ~alignedSizeM1; -#ifdef MAP_ANONYMOUS - const int mode = MAP_PRIVATE | MAP_ANONYMOUS; -#elif defined(MAP_ANON) - const int mode = MAP_PRIVATE | MAP_ANON; -#else - #error "not supported" -#endif - void *p = mmap(NULL, size, PROT_READ | PROT_WRITE, mode, -1, 0); - if (p == MAP_FAILED) throw Error(ERR_CANT_ALLOC); - assert(p); - sizeList_[(uintptr_t)p] = size; - return (uint8*)p; - } - void free(uint8 *p) - { - if (p == 0) return; - SizeList::iterator i = sizeList_.find((uintptr_t)p); - if (i == sizeList_.end()) throw Error(ERR_BAD_PARAMETER); - if (munmap((void*)i->first, i->second) < 0) throw Error(ERR_MUNMAP); - sizeList_.erase(i); - } -}; -#endif - -class Address; -class Reg; - -class Operand { - static const uint8 EXT8BIT = 0x20; - unsigned int idx_:6; // 0..31 + EXT8BIT = 1 if spl/bpl/sil/dil - unsigned int kind_:9; - unsigned int bit_:10; -protected: - unsigned int zero_:1; - unsigned int mask_:3; - unsigned int rounding_:3; - void setIdx(int idx) { idx_ = idx; } -public: - enum Kind { - NONE = 0, - MEM = 1 << 0, - REG = 1 << 1, - MMX = 1 << 2, - FPU = 1 << 3, - XMM = 1 << 4, - YMM = 1 << 5, - ZMM = 1 << 6, - OPMASK = 1 << 7, - BNDREG = 1 << 8 - }; - enum Code { -#ifdef XBYAK64 - RAX = 0, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, - R8D = 8, R9D, R10D, R11D, R12D, R13D, R14D, R15D, - R8W = 8, R9W, R10W, R11W, R12W, R13W, R14W, R15W, - R8B = 8, R9B, R10B, R11B, R12B, R13B, R14B, R15B, - SPL = 4, BPL, SIL, DIL, -#endif - EAX = 0, ECX, EDX, EBX, ESP, EBP, ESI, EDI, - AX = 0, CX, DX, BX, SP, BP, SI, DI, - AL = 0, CL, DL, BL, AH, CH, DH, BH - }; - Operand() : idx_(0), kind_(0), bit_(0), zero_(0), mask_(0), rounding_(0) { } - Operand(int idx, Kind kind, int bit, bool ext8bit = 0) - : idx_(static_cast(idx | (ext8bit ? EXT8BIT : 0))) - , kind_(kind) - , bit_(bit) - , zero_(0), mask_(0), rounding_(0) - { - assert((bit_ & (bit_ - 1)) == 0); // bit must be power of two - } - Kind getKind() const { return static_cast(kind_); } - int getIdx() const { return idx_ & (EXT8BIT - 1); } - bool isNone() const { return kind_ == 0; } - bool isMMX() const { return is(MMX); } - bool isXMM() const { return is(XMM); } - bool isYMM() const { return is(YMM); } - bool isZMM() const { return is(ZMM); } - bool isXMEM() const { return is(XMM | MEM); } - bool isYMEM() const { return is(YMM | MEM); } - bool isZMEM() const { return is(ZMM | MEM); } - bool isOPMASK() const { return is(OPMASK); } - bool isBNDREG() const { return is(BNDREG); } - bool isREG(int bit = 0) const { return is(REG, bit); } - bool isMEM(int bit = 0) const { return is(MEM, bit); } - bool isFPU() const { return is(FPU); } - bool isExt8bit() const { return (idx_ & EXT8BIT) != 0; } - bool isExtIdx() const { return (getIdx() & 8) != 0; } - bool isExtIdx2() const { return (getIdx() & 16) != 0; } - bool hasEvex() const { return isZMM() || isExtIdx2() || getOpmaskIdx() || getRounding(); } - bool hasRex() const { return isExt8bit() || isREG(64) || isExtIdx(); } - bool hasZero() const { return zero_; } - int getOpmaskIdx() const { return mask_; } - int getRounding() const { return rounding_; } - void setKind(Kind kind) - { - if ((kind & (XMM|YMM|ZMM)) == 0) return; - kind_ = kind; - bit_ = kind == XMM ? 128 : kind == YMM ? 256 : 512; - } - void setBit(int bit) { bit_ = bit; } - void setOpmaskIdx(int idx, bool ignore_idx0 = false) - { - if (!ignore_idx0 && idx == 0) throw Error(ERR_K0_IS_INVALID); - if (mask_) throw Error(ERR_OPMASK_IS_ALREADY_SET); - mask_ = idx; - } - void setRounding(int idx) - { - if (rounding_) throw Error(ERR_ROUNDING_IS_ALREADY_SET); - rounding_ = idx; - } - void setZero() { zero_ = true; } - // ah, ch, dh, bh? - bool isHigh8bit() const - { - if (!isBit(8)) return false; - if (isExt8bit()) return false; - const int idx = getIdx(); - return AH <= idx && idx <= BH; - } - // any bit is accetable if bit == 0 - bool is(int kind, uint32 bit = 0) const - { - return (kind == 0 || (kind_ & kind)) && (bit == 0 || (bit_ & bit)); // cf. you can set (8|16) - } - bool isBit(uint32 bit) const { return (bit_ & bit) != 0; } - uint32 getBit() const { return bit_; } - const char *toString() const - { - const int idx = getIdx(); - if (kind_ == REG) { - if (isExt8bit()) { - static const char *tbl[4] = { "spl", "bpl", "sil", "dil" }; - return tbl[idx - 4]; - } - static const char *tbl[4][16] = { - { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" }, - { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" }, - { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" }, - { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }, - }; - return tbl[bit_ == 8 ? 0 : bit_ == 16 ? 1 : bit_ == 32 ? 2 : 3][idx]; - } else if (isOPMASK()) { - static const char *tbl[8] = { "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }; - return tbl[idx]; - } else if (isZMM()) { - static const char *tbl[32] = { - "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", "zmm10", "zmm11", "zmm12", "zmm13", "zmm14", "zmm15", - "zmm16", "zmm17", "zmm18", "zmm19", "zmm20", "zmm21", "zmm22", "zmm23", "zmm24", "zmm25", "zmm26", "zmm27", "zmm28", "zmm29", "zmm30", "zmm31" - }; - return tbl[idx]; - } else if (isYMM()) { - static const char *tbl[32] = { - "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15", - "ymm16", "ymm17", "ymm18", "ymm19", "ymm20", "ymm21", "ymm22", "ymm23", "ymm24", "ymm25", "ymm26", "ymm27", "ymm28", "ymm29", "ymm30", "ymm31" - }; - return tbl[idx]; - } else if (isXMM()) { - static const char *tbl[32] = { - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", - "xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23", "xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31" - }; - return tbl[idx]; - } else if (isMMX()) { - static const char *tbl[8] = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }; - return tbl[idx]; - } else if (isFPU()) { - static const char *tbl[8] = { "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7" }; - return tbl[idx]; - } else if (isBNDREG()) { - static const char *tbl[4] = { "bnd0", "bnd1", "bnd2", "bnd3" }; - return tbl[idx]; - } - throw Error(ERR_INTERNAL); - } - bool isEqualIfNotInherited(const Operand& rhs) const { return idx_ == rhs.idx_ && kind_ == rhs.kind_ && bit_ == rhs.bit_ && zero_ == rhs.zero_ && mask_ == rhs.mask_ && rounding_ == rhs.rounding_; } - bool operator==(const Operand& rhs) const; - bool operator!=(const Operand& rhs) const { return !operator==(rhs); } - const Address& getAddress() const; - const Reg& getReg() const; -}; - -class Label; - -struct Reg8; -struct Reg16; -struct Reg32; -#ifdef XBYAK64 -struct Reg64; -#endif -class Reg : public Operand { -public: - Reg() { } - Reg(int idx, Kind kind, int bit = 0, bool ext8bit = false) : Operand(idx, kind, bit, ext8bit) { } - Reg changeBit(int bit) const { return Reg(getIdx(), getKind(), bit, isExt8bit()); } - uint8 getRexW() const { return isREG(64) ? 8 : 0; } - uint8 getRexR() const { return isExtIdx() ? 4 : 0; } - uint8 getRexX() const { return isExtIdx() ? 2 : 0; } - uint8 getRexB() const { return isExtIdx() ? 1 : 0; } - uint8 getRex(const Reg& base = Reg()) const - { - uint8 rex = getRexW() | getRexR() | base.getRexW() | base.getRexB(); - if (rex || isExt8bit() || base.isExt8bit()) rex |= 0x40; - return rex; - } - Reg8 cvt8() const; - Reg16 cvt16() const; - Reg32 cvt32() const; -#ifdef XBYAK64 - Reg64 cvt64() const; -#endif -}; - -inline const Reg& Operand::getReg() const -{ - assert(!isMEM()); - return static_cast(*this); -} - -struct Reg8 : public Reg { - explicit Reg8(int idx = 0, bool ext8bit = false) : Reg(idx, Operand::REG, 8, ext8bit) { } -}; - -struct Reg16 : public Reg { - explicit Reg16(int idx = 0) : Reg(idx, Operand::REG, 16) { } -}; - -struct Mmx : public Reg { - explicit Mmx(int idx = 0, Kind kind = Operand::MMX, int bit = 64) : Reg(idx, kind, bit) { } -}; - -struct EvexModifierRounding { - enum { - T_RN_SAE = 1, - T_RD_SAE = 2, - T_RU_SAE = 3, - T_RZ_SAE = 4, - T_SAE = 5 - }; - explicit EvexModifierRounding(int rounding) : rounding(rounding) {} - int rounding; -}; -struct EvexModifierZero{EvexModifierZero() {}}; - -struct Xmm : public Mmx { - explicit Xmm(int idx = 0, Kind kind = Operand::XMM, int bit = 128) : Mmx(idx, kind, bit) { } - Xmm(Kind kind, int idx) : Mmx(idx, kind, kind == XMM ? 128 : kind == YMM ? 256 : 512) { } - Xmm operator|(const EvexModifierRounding& emr) const { Xmm r(*this); r.setRounding(emr.rounding); return r; } - Xmm copyAndSetIdx(int idx) const { Xmm ret(*this); ret.setIdx(idx); return ret; } - Xmm copyAndSetKind(Operand::Kind kind) const { Xmm ret(*this); ret.setKind(kind); return ret; } -}; - -struct Ymm : public Xmm { - explicit Ymm(int idx = 0, Kind kind = Operand::YMM, int bit = 256) : Xmm(idx, kind, bit) { } - Ymm operator|(const EvexModifierRounding& emr) const { Ymm r(*this); r.setRounding(emr.rounding); return r; } -}; - -struct Zmm : public Ymm { - explicit Zmm(int idx = 0) : Ymm(idx, Operand::ZMM, 512) { } - Zmm operator|(const EvexModifierRounding& emr) const { Zmm r(*this); r.setRounding(emr.rounding); return r; } -}; - -struct Opmask : public Reg { - explicit Opmask(int idx = 0) : Reg(idx, Operand::OPMASK, 64) {} -}; - -struct BoundsReg : public Reg { - explicit BoundsReg(int idx = 0) : Reg(idx, Operand::BNDREG, 128) {} -}; - -templateT operator|(const T& x, const Opmask& k) { T r(x); r.setOpmaskIdx(k.getIdx()); return r; } -templateT operator|(const T& x, const EvexModifierZero&) { T r(x); r.setZero(); return r; } -templateT operator|(const T& x, const EvexModifierRounding& emr) { T r(x); r.setRounding(emr.rounding); return r; } - -struct Fpu : public Reg { - explicit Fpu(int idx = 0) : Reg(idx, Operand::FPU, 32) { } -}; - -struct Reg32e : public Reg { - explicit Reg32e(int idx, int bit) : Reg(idx, Operand::REG, bit) {} -}; -struct Reg32 : public Reg32e { - explicit Reg32(int idx = 0) : Reg32e(idx, 32) {} -}; -#ifdef XBYAK64 -struct Reg64 : public Reg32e { - explicit Reg64(int idx = 0) : Reg32e(idx, 64) {} -}; -struct RegRip { - sint64 disp_; - const Label* label_; - bool isAddr_; - explicit RegRip(sint64 disp = 0, const Label* label = 0, bool isAddr = false) : disp_(disp), label_(label), isAddr_(isAddr) {} - friend const RegRip operator+(const RegRip& r, int disp) { - return RegRip(r.disp_ + disp, r.label_, r.isAddr_); - } - friend const RegRip operator-(const RegRip& r, int disp) { - return RegRip(r.disp_ - disp, r.label_, r.isAddr_); - } - friend const RegRip operator+(const RegRip& r, sint64 disp) { - return RegRip(r.disp_ + disp, r.label_, r.isAddr_); - } - friend const RegRip operator-(const RegRip& r, sint64 disp) { - return RegRip(r.disp_ - disp, r.label_, r.isAddr_); - } - friend const RegRip operator+(const RegRip& r, const Label& label) { - if (r.label_ || r.isAddr_) throw Error(ERR_BAD_ADDRESSING); - return RegRip(r.disp_, &label); - } - friend const RegRip operator+(const RegRip& r, const void *addr) { - if (r.label_ || r.isAddr_) throw Error(ERR_BAD_ADDRESSING); - return RegRip(r.disp_ + (sint64)addr, 0, true); - } -}; -#endif - -inline Reg8 Reg::cvt8() const -{ - const int idx = getIdx(); - if (isBit(8)) return Reg8(idx, isExt8bit()); -#ifdef XBYAK32 - if (idx >= 4) throw Error(ERR_CANT_CONVERT); -#endif - return Reg8(idx, 4 <= idx && idx < 8); -} - -inline Reg16 Reg::cvt16() const -{ - const int idx = getIdx(); - if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); - return Reg16(idx); -} - -inline Reg32 Reg::cvt32() const -{ - const int idx = getIdx(); - if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); - return Reg32(idx); -} - -#ifdef XBYAK64 -inline Reg64 Reg::cvt64() const -{ - const int idx = getIdx(); - if (isBit(8) && (4 <= idx && idx < 8) && !isExt8bit()) throw Error(ERR_CANT_CONVERT); - return Reg64(idx); -} -#endif - -#ifndef XBYAK_DISABLE_SEGMENT -// not derived from Reg -class Segment { - int idx_; -public: - enum { - es, cs, ss, ds, fs, gs - }; - explicit Segment(int idx) : idx_(idx) { assert(0 <= idx_ && idx_ < 6); } - int getIdx() const { return idx_; } - const char *toString() const - { - static const char tbl[][3] = { - "es", "cs", "ss", "ds", "fs", "gs" - }; - return tbl[idx_]; - } -}; -#endif - -class RegExp { -public: -#ifdef XBYAK64 - enum { i32e = 32 | 64 }; -#else - enum { i32e = 32 }; -#endif - RegExp(size_t disp = 0) : scale_(0), disp_(disp) { } - RegExp(const Reg& r, int scale = 1) - : scale_(scale) - , disp_(0) - { - if (!r.isREG(i32e) && !r.is(Reg::XMM|Reg::YMM|Reg::ZMM)) throw Error(ERR_BAD_SIZE_OF_REGISTER); - if (scale == 0) return; - if (scale != 1 && scale != 2 && scale != 4 && scale != 8) throw Error(ERR_BAD_SCALE); - if (r.getBit() >= 128 || scale != 1) { // xmm/ymm is always index - index_ = r; - } else { - base_ = r; - } - } - bool isVsib(int bit = 128 | 256 | 512) const { return index_.isBit(bit); } - RegExp optimize() const - { - RegExp exp = *this; - // [reg * 2] => [reg + reg] - if (index_.isBit(i32e) && !base_.getBit() && scale_ == 2) { - exp.base_ = index_; - exp.scale_ = 1; - } - return exp; - } - bool operator==(const RegExp& rhs) const - { - return base_ == rhs.base_ && index_ == rhs.index_ && disp_ == rhs.disp_ && scale_ == rhs.scale_; - } - const Reg& getBase() const { return base_; } - const Reg& getIndex() const { return index_; } - int getScale() const { return scale_; } - size_t getDisp() const { return disp_; } - void verify() const - { - if (base_.getBit() >= 128) throw Error(ERR_BAD_SIZE_OF_REGISTER); - if (index_.getBit() && index_.getBit() <= 64) { - if (index_.getIdx() == Operand::ESP) throw Error(ERR_ESP_CANT_BE_INDEX); - if (base_.getBit() && base_.getBit() != index_.getBit()) throw Error(ERR_BAD_SIZE_OF_REGISTER); - } - } - friend RegExp operator+(const RegExp& a, const RegExp& b); - friend RegExp operator-(const RegExp& e, size_t disp); - uint8 getRex() const - { - uint8 rex = index_.getRexX() | base_.getRexB(); - return rex ? uint8(rex | 0x40) : 0; - } -private: - /* - [base_ + index_ * scale_ + disp_] - base : Reg32e, index : Reg32e(w/o esp), Xmm, Ymm - */ - Reg base_; - Reg index_; - int scale_; - size_t disp_; -}; - -inline RegExp operator+(const RegExp& a, const RegExp& b) -{ - if (a.index_.getBit() && b.index_.getBit()) throw Error(ERR_BAD_ADDRESSING); - RegExp ret = a; - if (!ret.index_.getBit()) { ret.index_ = b.index_; ret.scale_ = b.scale_; } - if (b.base_.getBit()) { - if (ret.base_.getBit()) { - if (ret.index_.getBit()) throw Error(ERR_BAD_ADDRESSING); - // base + base => base + index * 1 - ret.index_ = b.base_; - // [reg + esp] => [esp + reg] - if (ret.index_.getIdx() == Operand::ESP) std::swap(ret.base_, ret.index_); - ret.scale_ = 1; - } else { - ret.base_ = b.base_; - } - } - ret.disp_ += b.disp_; - return ret; -} -inline RegExp operator*(const Reg& r, int scale) -{ - return RegExp(r, scale); -} -inline RegExp operator-(const RegExp& e, size_t disp) -{ - RegExp ret = e; - ret.disp_ -= disp; - return ret; -} - -// 2nd parameter for constructor of CodeArray(maxSize, userPtr, alloc) -void *const AutoGrow = (void*)1; //-V566 -void *const DontSetProtectRWE = (void*)2; //-V566 - -class CodeArray { - enum Type { - USER_BUF = 1, // use userPtr(non alignment, non protect) - ALLOC_BUF, // use new(alignment, protect) - AUTO_GROW // automatically move and grow memory if necessary - }; - CodeArray(const CodeArray& rhs); - void operator=(const CodeArray&); - bool isAllocType() const { return type_ == ALLOC_BUF || type_ == AUTO_GROW; } - struct AddrInfo { - size_t codeOffset; // position to write - size_t jmpAddr; // value to write - int jmpSize; // size of jmpAddr - inner::LabelMode mode; - AddrInfo(size_t _codeOffset, size_t _jmpAddr, int _jmpSize, inner::LabelMode _mode) - : codeOffset(_codeOffset), jmpAddr(_jmpAddr), jmpSize(_jmpSize), mode(_mode) {} - uint64 getVal(const uint8 *top) const - { - uint64 disp = (mode == inner::LaddTop) ? jmpAddr + size_t(top) : (mode == inner::LasIs) ? jmpAddr : jmpAddr - size_t(top); - if (jmpSize == 4) disp = inner::VerifyInInt32(disp); - return disp; - } - }; - typedef std::list AddrInfoList; - AddrInfoList addrInfoList_; - const Type type_; -#ifdef XBYAK_USE_MMAP_ALLOCATOR - MmapAllocator defaultAllocator_; -#else - Allocator defaultAllocator_; -#endif - Allocator *alloc_; -protected: - size_t maxSize_; - uint8 *top_; - size_t size_; - bool isCalledCalcJmpAddress_; - - bool useProtect() const { return alloc_->useProtect(); } - /* - allocate new memory and copy old data to the new area - */ - void growMemory() - { - const size_t newSize = (std::max)(DEFAULT_MAX_CODE_SIZE, maxSize_ * 2); - uint8 *newTop = alloc_->alloc(newSize); - if (newTop == 0) throw Error(ERR_CANT_ALLOC); - for (size_t i = 0; i < size_; i++) newTop[i] = top_[i]; - alloc_->free(top_); - top_ = newTop; - maxSize_ = newSize; - } - /* - calc jmp address for AutoGrow mode - */ - void calcJmpAddress() - { - if (isCalledCalcJmpAddress_) return; - for (AddrInfoList::const_iterator i = addrInfoList_.begin(), ie = addrInfoList_.end(); i != ie; ++i) { - uint64 disp = i->getVal(top_); - rewrite(i->codeOffset, disp, i->jmpSize); - } - isCalledCalcJmpAddress_ = true; - } -public: - enum ProtectMode { - PROTECT_RW = 0, // read/write - PROTECT_RWE = 1, // read/write/exec - PROTECT_RE = 2 // read/exec - }; - explicit CodeArray(size_t maxSize, void *userPtr = 0, Allocator *allocator = 0) - : type_(userPtr == AutoGrow ? AUTO_GROW : (userPtr == 0 || userPtr == DontSetProtectRWE) ? ALLOC_BUF : USER_BUF) - , alloc_(allocator ? allocator : (Allocator*)&defaultAllocator_) - , maxSize_(maxSize) - , top_(type_ == USER_BUF ? reinterpret_cast(userPtr) : alloc_->alloc((std::max)(maxSize, 1))) - , size_(0) - , isCalledCalcJmpAddress_(false) - { - if (maxSize_ > 0 && top_ == 0) throw Error(ERR_CANT_ALLOC); - if ((type_ == ALLOC_BUF && userPtr != DontSetProtectRWE && useProtect()) && !setProtectMode(PROTECT_RWE, false)) { - alloc_->free(top_); - throw Error(ERR_CANT_PROTECT); - } - } - virtual ~CodeArray() - { - if (isAllocType()) { - if (useProtect()) setProtectModeRW(false); - alloc_->free(top_); - } - } - bool setProtectMode(ProtectMode mode, bool throwException = true) - { - bool isOK = protect(top_, maxSize_, mode); - if (isOK) return true; - if (throwException) throw Error(ERR_CANT_PROTECT); - return false; - } - bool setProtectModeRE(bool throwException = true) { return setProtectMode(PROTECT_RE, throwException); } - bool setProtectModeRW(bool throwException = true) { return setProtectMode(PROTECT_RW, throwException); } - void resetSize() - { - size_ = 0; - addrInfoList_.clear(); - isCalledCalcJmpAddress_ = false; - } - void db(int code) - { - if (size_ >= maxSize_) { - if (type_ == AUTO_GROW) { - growMemory(); - } else { - throw Error(ERR_CODE_IS_TOO_BIG); - } - } - top_[size_++] = static_cast(code); - } - void db(const uint8 *code, size_t codeSize) - { - for (size_t i = 0; i < codeSize; i++) db(code[i]); - } - void db(uint64 code, size_t codeSize) - { - if (codeSize > 8) throw Error(ERR_BAD_PARAMETER); - for (size_t i = 0; i < codeSize; i++) db(static_cast(code >> (i * 8))); - } - void dw(uint32 code) { db(code, 2); } - void dd(uint32 code) { db(code, 4); } - void dq(uint64 code) { db(code, 8); } - const uint8 *getCode() const { return top_; } - template - const F getCode() const { return reinterpret_cast(top_); } - const uint8 *getCurr() const { return &top_[size_]; } - template - const F getCurr() const { return reinterpret_cast(&top_[size_]); } - size_t getSize() const { return size_; } - void setSize(size_t size) - { - if (size > maxSize_) throw Error(ERR_OFFSET_IS_TOO_BIG); - size_ = size; - } - void dump() const - { - const uint8 *p = getCode(); - size_t bufSize = getSize(); - size_t remain = bufSize; - for (int i = 0; i < 4; i++) { - size_t disp = 16; - if (remain < 16) { - disp = remain; - } - for (size_t j = 0; j < 16; j++) { - if (j < disp) { - printf("%02X", p[i * 16 + j]); - } - } - putchar('\n'); - remain -= disp; - if (remain == 0) { - break; - } - } - } - /* - @param offset [in] offset from top - @param disp [in] offset from the next of jmp - @param size [in] write size(1, 2, 4, 8) - */ - void rewrite(size_t offset, uint64 disp, size_t size) - { - assert(offset < maxSize_); - if (size != 1 && size != 2 && size != 4 && size != 8) throw Error(ERR_BAD_PARAMETER); - uint8 *const data = top_ + offset; - for (size_t i = 0; i < size; i++) { - data[i] = static_cast(disp >> (i * 8)); - } - } - void save(size_t offset, size_t val, int size, inner::LabelMode mode) - { - addrInfoList_.push_back(AddrInfo(offset, val, size, mode)); - } - bool isAutoGrow() const { return type_ == AUTO_GROW; } - bool isCalledCalcJmpAddress() const { return isCalledCalcJmpAddress_; } - /** - change exec permission of memory - @param addr [in] buffer address - @param size [in] buffer size - @param protectMode [in] mode(RW/RWE/RE) - @return true(success), false(failure) - */ - static inline bool protect(const void *addr, size_t size, int protectMode) - { -#if defined(_WIN32) - const DWORD c_rw = PAGE_READWRITE; - const DWORD c_rwe = PAGE_EXECUTE_READWRITE; - const DWORD c_re = PAGE_EXECUTE_READ; - DWORD mode; -#else - const int c_rw = PROT_READ | PROT_WRITE; - const int c_rwe = PROT_READ | PROT_WRITE | PROT_EXEC; - const int c_re = PROT_READ | PROT_EXEC; - int mode; -#endif - switch (protectMode) { - case PROTECT_RW: mode = c_rw; break; - case PROTECT_RWE: mode = c_rwe; break; - case PROTECT_RE: mode = c_re; break; - default: - return false; - } -#if defined(_WIN32) - DWORD oldProtect; - return VirtualProtect(const_cast(addr), size, mode, &oldProtect) != 0; -#elif defined(__GNUC__) - size_t pageSize = sysconf(_SC_PAGESIZE); - size_t iaddr = reinterpret_cast(addr); - size_t roundAddr = iaddr & ~(pageSize - static_cast(1)); -#ifndef NDEBUG - if (pageSize != 4096) fprintf(stderr, "large page(%zd) is used. not tested enough.\n", pageSize); -#endif - return mprotect(reinterpret_cast(roundAddr), size + (iaddr - roundAddr), mode) == 0; -#else - return true; -#endif - } - /** - get aligned memory pointer - @param addr [in] address - @param alignedSize [in] power of two - @return aligned addr by alingedSize - */ - static inline uint8 *getAlignedAddress(uint8 *addr, size_t alignedSize = 16) - { - return reinterpret_cast((reinterpret_cast(addr) + alignedSize - 1) & ~(alignedSize - static_cast(1))); - } -}; - -class Address : public Operand { -public: - enum Mode { - M_ModRM, - M_64bitDisp, - M_rip, - M_ripAddr - }; - Address(uint32 sizeBit, bool broadcast, const RegExp& e) - : Operand(0, MEM, sizeBit), e_(e), label_(0), mode_(M_ModRM), broadcast_(broadcast) - { - e_.verify(); - } -#ifdef XBYAK64 - explicit Address(size_t disp) - : Operand(0, MEM, 64), e_(disp), label_(0), mode_(M_64bitDisp), broadcast_(false){ } - Address(uint32 sizeBit, bool broadcast, const RegRip& addr) - : Operand(0, MEM, sizeBit), e_(addr.disp_), label_(addr.label_), mode_(addr.isAddr_ ? M_ripAddr : M_rip), broadcast_(broadcast) { } -#endif - RegExp getRegExp(bool optimize = true) const - { - return optimize ? e_.optimize() : e_; - } - Mode getMode() const { return mode_; } - bool is32bit() const { return e_.getBase().getBit() == 32 || e_.getIndex().getBit() == 32; } - bool isOnlyDisp() const { return !e_.getBase().getBit() && !e_.getIndex().getBit(); } // for mov eax - size_t getDisp() const { return e_.getDisp(); } - uint8 getRex() const - { - if (mode_ != M_ModRM) return 0; - return getRegExp().getRex(); - } - bool is64bitDisp() const { return mode_ == M_64bitDisp; } // for moffset - bool isBroadcast() const { return broadcast_; } - const Label* getLabel() const { return label_; } - bool operator==(const Address& rhs) const - { - return getBit() == rhs.getBit() && e_ == rhs.e_ && label_ == rhs.label_ && mode_ == rhs.mode_ && broadcast_ == rhs.broadcast_; - } - bool operator!=(const Address& rhs) const { return !operator==(rhs); } - bool isVsib() const { return e_.isVsib(); } -private: - RegExp e_; - const Label* label_; - Mode mode_; - bool broadcast_; -}; - -inline const Address& Operand::getAddress() const -{ - assert(isMEM()); - return static_cast(*this); -} - -inline bool Operand::operator==(const Operand& rhs) const -{ - if (isMEM() && rhs.isMEM()) return this->getAddress() == rhs.getAddress(); - return isEqualIfNotInherited(rhs); -} - -class AddressFrame { - void operator=(const AddressFrame&); - AddressFrame(const AddressFrame&); -public: - const uint32 bit_; - const bool broadcast_; - explicit AddressFrame(uint32 bit, bool broadcast = false) : bit_(bit), broadcast_(broadcast) { } - Address operator[](const RegExp& e) const - { - return Address(bit_, broadcast_, e); - } - Address operator[](const void *disp) const - { - return Address(bit_, broadcast_, RegExp(reinterpret_cast(disp))); - } -#ifdef XBYAK64 - Address operator[](uint64 disp) const { return Address(disp); } - Address operator[](const RegRip& addr) const { return Address(bit_, broadcast_, addr); } -#endif -}; - -struct JmpLabel { - size_t endOfJmp; /* offset from top to the end address of jmp */ - int jmpSize; - inner::LabelMode mode; - size_t disp; // disp for [rip + disp] - explicit JmpLabel(size_t endOfJmp = 0, int jmpSize = 0, inner::LabelMode mode = inner::LasIs, size_t disp = 0) - : endOfJmp(endOfJmp), jmpSize(jmpSize), mode(mode), disp(disp) - { - } -}; - -class LabelManager; - -class Label { - mutable LabelManager *mgr; - mutable int id; - friend class LabelManager; -public: - Label() : mgr(0), id(0) {} - Label(const Label& rhs); - Label& operator=(const Label& rhs); - ~Label(); - void clear() { mgr = 0; id = 0; } - int getId() const { return id; } - const uint8 *getAddress() const; - - // backward compatibility - static inline std::string toStr(int num) - { - char buf[16]; -#if defined(_MSC_VER) && (_MSC_VER < 1900) - _snprintf_s -#else - snprintf -#endif - (buf, sizeof(buf), ".%08x", num); - return buf; - } -}; - -class LabelManager { - // for string label - struct SlabelVal { - size_t offset; - SlabelVal(size_t offset) : offset(offset) {} - }; - typedef XBYAK_STD_UNORDERED_MAP SlabelDefList; - typedef XBYAK_STD_UNORDERED_MULTIMAP SlabelUndefList; - struct SlabelState { - SlabelDefList defList; - SlabelUndefList undefList; - }; - typedef std::list StateList; - // for Label class - struct ClabelVal { - ClabelVal(size_t offset = 0) : offset(offset), refCount(1) {} - size_t offset; - int refCount; - }; - typedef XBYAK_STD_UNORDERED_MAP ClabelDefList; - typedef XBYAK_STD_UNORDERED_MULTIMAP ClabelUndefList; - typedef XBYAK_STD_UNORDERED_SET LabelPtrList; - - CodeArray *base_; - // global : stateList_.front(), local : stateList_.back() - StateList stateList_; - mutable int labelId_; - ClabelDefList clabelDefList_; - ClabelUndefList clabelUndefList_; - LabelPtrList labelPtrList_; - - int getId(const Label& label) const - { - if (label.id == 0) label.id = labelId_++; - return label.id; - } - template - void define_inner(DefList& defList, UndefList& undefList, const T& labelId, size_t addrOffset) - { - // add label - typename DefList::value_type item(labelId, addrOffset); - std::pair ret = defList.insert(item); - if (!ret.second) throw Error(ERR_LABEL_IS_REDEFINED); - // search undefined label - for (;;) { - typename UndefList::iterator itr = undefList.find(labelId); - if (itr == undefList.end()) break; - const JmpLabel *jmp = &itr->second; - const size_t offset = jmp->endOfJmp - jmp->jmpSize; - size_t disp; - if (jmp->mode == inner::LaddTop) { - disp = addrOffset; - } else if (jmp->mode == inner::Labs) { - disp = size_t(base_->getCurr()); - } else { - disp = addrOffset - jmp->endOfJmp + jmp->disp; -#ifdef XBYAK64 - if (jmp->jmpSize <= 4 && !inner::IsInInt32(disp)) throw Error(ERR_OFFSET_IS_TOO_BIG); -#endif - if (jmp->jmpSize == 1 && !inner::IsInDisp8((uint32)disp)) throw Error(ERR_LABEL_IS_TOO_FAR); - } - if (base_->isAutoGrow()) { - base_->save(offset, disp, jmp->jmpSize, jmp->mode); - } else { - base_->rewrite(offset, disp, jmp->jmpSize); - } - undefList.erase(itr); - } - } - template - bool getOffset_inner(const DefList& defList, size_t *offset, const T& label) const - { - typename DefList::const_iterator i = defList.find(label); - if (i == defList.end()) return false; - *offset = i->second.offset; - return true; - } - friend class Label; - void incRefCount(int id, Label *label) - { - clabelDefList_[id].refCount++; - labelPtrList_.insert(label); - } - void decRefCount(int id, Label *label) - { - labelPtrList_.erase(label); - ClabelDefList::iterator i = clabelDefList_.find(id); - if (i == clabelDefList_.end()) return; - if (i->second.refCount == 1) { - clabelDefList_.erase(id); - } else { - --i->second.refCount; - } - } - template - bool hasUndefinedLabel_inner(const T& list) const - { -#ifndef NDEBUG - for (typename T::const_iterator i = list.begin(); i != list.end(); ++i) { - std::cerr << "undefined label:" << i->first << std::endl; - } -#endif - return !list.empty(); - } - // detach all labels linked to LabelManager - void resetLabelPtrList() - { - for (LabelPtrList::iterator i = labelPtrList_.begin(), ie = labelPtrList_.end(); i != ie; ++i) { - (*i)->clear(); - } - labelPtrList_.clear(); - } -public: - LabelManager() - { - reset(); - } - ~LabelManager() - { - resetLabelPtrList(); - } - void reset() - { - base_ = 0; - labelId_ = 1; - stateList_.clear(); - stateList_.push_back(SlabelState()); - stateList_.push_back(SlabelState()); - clabelDefList_.clear(); - clabelUndefList_.clear(); - resetLabelPtrList(); - } - void enterLocal() - { - stateList_.push_back(SlabelState()); - } - void leaveLocal() - { - if (stateList_.size() <= 2) throw Error(ERR_UNDER_LOCAL_LABEL); - if (hasUndefinedLabel_inner(stateList_.back().undefList)) throw Error(ERR_LABEL_IS_NOT_FOUND); - stateList_.pop_back(); - } - void set(CodeArray *base) { base_ = base; } - void defineSlabel(std::string label) - { - if (label == "@b" || label == "@f") throw Error(ERR_BAD_LABEL_STR); - if (label == "@@") { - SlabelDefList& defList = stateList_.front().defList; - SlabelDefList::iterator i = defList.find("@f"); - if (i != defList.end()) { - defList.erase(i); - label = "@b"; - } else { - i = defList.find("@b"); - if (i != defList.end()) { - defList.erase(i); - } - label = "@f"; - } - } - SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); - define_inner(st.defList, st.undefList, label, base_->getSize()); - } - void defineClabel(Label& label) - { - define_inner(clabelDefList_, clabelUndefList_, getId(label), base_->getSize()); - label.mgr = this; - labelPtrList_.insert(&label); - } - void assign(Label& dst, const Label& src) - { - ClabelDefList::const_iterator i = clabelDefList_.find(src.id); - if (i == clabelDefList_.end()) throw Error(ERR_LABEL_ISNOT_SET_BY_L); - define_inner(clabelDefList_, clabelUndefList_, dst.id, i->second.offset); - dst.mgr = this; - labelPtrList_.insert(&dst); - } - bool getOffset(size_t *offset, std::string& label) const - { - const SlabelDefList& defList = stateList_.front().defList; - if (label == "@b") { - if (defList.find("@f") != defList.end()) { - label = "@f"; - } else if (defList.find("@b") == defList.end()) { - throw Error(ERR_LABEL_IS_NOT_FOUND); - } - } else if (label == "@f") { - if (defList.find("@f") != defList.end()) { - label = "@b"; - } - } - const SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); - return getOffset_inner(st.defList, offset, label); - } - bool getOffset(size_t *offset, const Label& label) const - { - return getOffset_inner(clabelDefList_, offset, getId(label)); - } - void addUndefinedLabel(const std::string& label, const JmpLabel& jmp) - { - SlabelState& st = *label.c_str() == '.' ? stateList_.back() : stateList_.front(); - st.undefList.insert(SlabelUndefList::value_type(label, jmp)); - } - void addUndefinedLabel(const Label& label, const JmpLabel& jmp) - { - clabelUndefList_.insert(ClabelUndefList::value_type(label.id, jmp)); - } - bool hasUndefSlabel() const - { - for (StateList::const_iterator i = stateList_.begin(), ie = stateList_.end(); i != ie; ++i) { - if (hasUndefinedLabel_inner(i->undefList)) return true; - } - return false; - } - bool hasUndefClabel() const { return hasUndefinedLabel_inner(clabelUndefList_); } - const uint8 *getCode() const { return base_->getCode(); } - bool isReady() const { return !base_->isAutoGrow() || base_->isCalledCalcJmpAddress(); } -}; - -inline Label::Label(const Label& rhs) -{ - id = rhs.id; - mgr = rhs.mgr; - if (mgr) mgr->incRefCount(id, this); -} -inline Label& Label::operator=(const Label& rhs) -{ - if (id) throw Error(ERR_LABEL_IS_ALREADY_SET_BY_L); - id = rhs.id; - mgr = rhs.mgr; - if (mgr) mgr->incRefCount(id, this); - return *this; -} -inline Label::~Label() -{ - if (id && mgr) mgr->decRefCount(id, this); -} -inline const uint8* Label::getAddress() const -{ - if (mgr == 0 || !mgr->isReady()) return 0; - size_t offset; - if (!mgr->getOffset(&offset, *this)) return 0; - return mgr->getCode() + offset; -} - -class CodeGenerator : public CodeArray { -public: - enum LabelType { - T_SHORT, - T_NEAR, - T_AUTO // T_SHORT if possible - }; -private: - CodeGenerator operator=(const CodeGenerator&); // don't call -#ifdef XBYAK64 - enum { i32e = 32 | 64, BIT = 64 }; - static const size_t dummyAddr = (size_t(0x11223344) << 32) | 55667788; - typedef Reg64 NativeReg; -#else - enum { i32e = 32, BIT = 32 }; - static const size_t dummyAddr = 0x12345678; - typedef Reg32 NativeReg; -#endif - // (XMM, XMM|MEM) - static inline bool isXMM_XMMorMEM(const Operand& op1, const Operand& op2) - { - return op1.isXMM() && (op2.isXMM() || op2.isMEM()); - } - // (MMX, MMX|MEM) or (XMM, XMM|MEM) - static inline bool isXMMorMMX_MEM(const Operand& op1, const Operand& op2) - { - return (op1.isMMX() && (op2.isMMX() || op2.isMEM())) || isXMM_XMMorMEM(op1, op2); - } - // (XMM, MMX|MEM) - static inline bool isXMM_MMXorMEM(const Operand& op1, const Operand& op2) - { - return op1.isXMM() && (op2.isMMX() || op2.isMEM()); - } - // (MMX, XMM|MEM) - static inline bool isMMX_XMMorMEM(const Operand& op1, const Operand& op2) - { - return op1.isMMX() && (op2.isXMM() || op2.isMEM()); - } - // (XMM, REG32|MEM) - static inline bool isXMM_REG32orMEM(const Operand& op1, const Operand& op2) - { - return op1.isXMM() && (op2.isREG(i32e) || op2.isMEM()); - } - // (REG32, XMM|MEM) - static inline bool isREG32_XMMorMEM(const Operand& op1, const Operand& op2) - { - return op1.isREG(i32e) && (op2.isXMM() || op2.isMEM()); - } - // (REG32, REG32|MEM) - static inline bool isREG32_REG32orMEM(const Operand& op1, const Operand& op2) - { - return op1.isREG(i32e) && ((op2.isREG(i32e) && op1.getBit() == op2.getBit()) || op2.isMEM()); - } - void rex(const Operand& op1, const Operand& op2 = Operand()) - { - uint8 rex = 0; - const Operand *p1 = &op1, *p2 = &op2; - if (p1->isMEM()) std::swap(p1, p2); - if (p1->isMEM()) throw Error(ERR_BAD_COMBINATION); - if (p2->isMEM()) { - const Address& addr = p2->getAddress(); - if (BIT == 64 && addr.is32bit()) db(0x67); - rex = addr.getRex() | p1->getReg().getRex(); - } else { - // ModRM(reg, base); - rex = op2.getReg().getRex(op1.getReg()); - } - // except movsx(16bit, 32/64bit) - if ((op1.isBit(16) && !op2.isBit(i32e)) || (op2.isBit(16) && !op1.isBit(i32e))) db(0x66); - if (rex) db(rex); - } - enum AVXtype { - // low 3 bit - T_N1 = 1, - T_N2 = 2, - T_N4 = 3, - T_N8 = 4, - T_N16 = 5, - T_N32 = 6, - T_NX_MASK = 7, - // - T_N_VL = 1 << 3, // N * (1, 2, 4) for VL - T_DUP = 1 << 4, // N = (8, 32, 64) - T_66 = 1 << 5, - T_F3 = 1 << 6, - T_F2 = 1 << 7, - T_0F = 1 << 8, - T_0F38 = 1 << 9, - T_0F3A = 1 << 10, - T_L0 = 1 << 11, - T_L1 = 1 << 12, - T_W0 = 1 << 13, - T_W1 = 1 << 14, - T_EW0 = 1 << 15, - T_EW1 = 1 << 16, - T_YMM = 1 << 17, // support YMM, ZMM - T_EVEX = 1 << 18, - T_ER_X = 1 << 19, // xmm{er} - T_ER_Y = 1 << 20, // ymm{er} - T_ER_Z = 1 << 21, // zmm{er} - T_SAE_X = 1 << 22, // xmm{sae} - T_SAE_Y = 1 << 23, // ymm{sae} - T_SAE_Z = 1 << 24, // zmm{sae} - T_MUST_EVEX = 1 << 25, // contains T_EVEX - T_B32 = 1 << 26, // m32bcst - T_B64 = 1 << 27, // m64bcst - T_M_K = 1 << 28, // mem{k} - T_VSIB = 1 << 29, - T_MEM_EVEX = 1 << 30, // use evex if mem - T_XXX - }; - void vex(const Reg& reg, const Reg& base, const Operand *v, int type, int code, bool x = false) - { - int w = (type & T_W1) ? 1 : 0; - bool is256 = (type & T_L1) ? true : (type & T_L0) ? false : reg.isYMM(); - bool r = reg.isExtIdx(); - bool b = base.isExtIdx(); - int idx = v ? v->getIdx() : 0; - if ((idx | reg.getIdx() | base.getIdx()) >= 16) throw Error(ERR_BAD_COMBINATION); - uint32 pp = (type & T_66) ? 1 : (type & T_F3) ? 2 : (type & T_F2) ? 3 : 0; - uint32 vvvv = (((~idx) & 15) << 3) | (is256 ? 4 : 0) | pp; - if (!b && !x && !w && (type & T_0F)) { - db(0xC5); db((r ? 0 : 0x80) | vvvv); - } else { - uint32 mmmm = (type & T_0F) ? 1 : (type & T_0F38) ? 2 : (type & T_0F3A) ? 3 : 0; - db(0xC4); db((r ? 0 : 0x80) | (x ? 0 : 0x40) | (b ? 0 : 0x20) | mmmm); db((w << 7) | vvvv); - } - db(code); - } - void verifySAE(const Reg& r, int type) const - { - if (((type & T_SAE_X) && r.isXMM()) || ((type & T_SAE_Y) && r.isYMM()) || ((type & T_SAE_Z) && r.isZMM())) return; - throw Error(ERR_SAE_IS_INVALID); - } - void verifyER(const Reg& r, int type) const - { - if (((type & T_ER_X) && r.isXMM()) || ((type & T_ER_Y) && r.isYMM()) || ((type & T_ER_Z) && r.isZMM())) return; - throw Error(ERR_ER_IS_INVALID); - } - // (a, b, c) contains non zero two or three values then err - int verifyDuplicate(int a, int b, int c, int err) - { - int v = a | b | c; - if ((a > 0 && a != v) + (b > 0 && b != v) + (c > 0 && c != v) > 0) return Error(err); - return v; - } - int evex(const Reg& reg, const Reg& base, const Operand *v, int type, int code, bool x = false, bool b = false, int aaa = 0, uint32 VL = 0, bool Hi16Vidx = false) - { - if (!(type & (T_EVEX | T_MUST_EVEX))) throw Error(ERR_EVEX_IS_INVALID); - int w = (type & T_EW1) ? 1 : 0; - uint32 mm = (type & T_0F) ? 1 : (type & T_0F38) ? 2 : (type & T_0F3A) ? 3 : 0; - uint32 pp = (type & T_66) ? 1 : (type & T_F3) ? 2 : (type & T_F2) ? 3 : 0; - - int idx = v ? v->getIdx() : 0; - uint32 vvvv = ~idx; - - bool R = !reg.isExtIdx(); - bool X = x ? false : !base.isExtIdx2(); - bool B = !base.isExtIdx(); - bool Rp = !reg.isExtIdx2(); - int LL; - int rounding = verifyDuplicate(reg.getRounding(), base.getRounding(), v ? v->getRounding() : 0, ERR_ROUNDING_IS_ALREADY_SET); - int disp8N = 1; - if (rounding) { - if (rounding == EvexModifierRounding::T_SAE) { - verifySAE(base, type); LL = 0; - } else { - verifyER(base, type); LL = rounding - 1; - } - b = true; - } else { - if (v) VL = (std::max)(VL, v->getBit()); - VL = (std::max)((std::max)(reg.getBit(), base.getBit()), VL); - LL = (VL == 512) ? 2 : (VL == 256) ? 1 : 0; - if (b) { - disp8N = (type & T_B32) ? 4 : 8; - } else if (type & T_DUP) { - disp8N = VL == 128 ? 8 : VL == 256 ? 32 : 64; - } else { - if ((type & (T_NX_MASK | T_N_VL)) == 0) { - type |= T_N16 | T_N_VL; // default - } - int low = type & T_NX_MASK; - if (low > 0) { - disp8N = 1 << (low - 1); - if (type & T_N_VL) disp8N *= (VL == 512 ? 4 : VL == 256 ? 2 : 1); - } - } - } - bool Vp = !((v ? v->isExtIdx2() : 0) | Hi16Vidx); - bool z = reg.hasZero() || base.hasZero() || (v ? v->hasZero() : false); - if (aaa == 0) aaa = verifyDuplicate(base.getOpmaskIdx(), reg.getOpmaskIdx(), (v ? v->getOpmaskIdx() : 0), ERR_OPMASK_IS_ALREADY_SET); - db(0x62); - db((R ? 0x80 : 0) | (X ? 0x40 : 0) | (B ? 0x20 : 0) | (Rp ? 0x10 : 0) | (mm & 3)); - db((w == 1 ? 0x80 : 0) | ((vvvv & 15) << 3) | 4 | (pp & 3)); - db((z ? 0x80 : 0) | ((LL & 3) << 5) | (b ? 0x10 : 0) | (Vp ? 8 : 0) | (aaa & 7)); - db(code); - return disp8N; - } - void setModRM(int mod, int r1, int r2) - { - db(static_cast((mod << 6) | ((r1 & 7) << 3) | (r2 & 7))); - } - void setSIB(const RegExp& e, int reg, int disp8N = 0) - { - size_t disp64 = e.getDisp(); -#ifdef XBYAK64 - size_t high = disp64 >> 32; - if (high != 0 && high != 0xFFFFFFFF) throw Error(ERR_OFFSET_IS_TOO_BIG); -#endif - uint32 disp = static_cast(disp64); - const Reg& base = e.getBase(); - const Reg& index = e.getIndex(); - const int baseIdx = base.getIdx(); - const int baseBit = base.getBit(); - const int indexBit = index.getBit(); - enum { - mod00 = 0, mod01 = 1, mod10 = 2 - }; - int mod = mod10; // disp32 - if (!baseBit || ((baseIdx & 7) != Operand::EBP && disp == 0)) { - mod = mod00; - } else { - if (disp8N == 0) { - if (inner::IsInDisp8(disp)) { - mod = mod01; - } - } else { - // disp must be casted to signed - uint32 t = static_cast(static_cast(disp) / disp8N); - if ((disp % disp8N) == 0 && inner::IsInDisp8(t)) { - disp = t; - mod = mod01; - } - } - } - const int newBaseIdx = baseBit ? (baseIdx & 7) : Operand::EBP; - /* ModR/M = [2:3:3] = [Mod:reg/code:R/M] */ - bool hasSIB = indexBit || (baseIdx & 7) == Operand::ESP; -#ifdef XBYAK64 - if (!baseBit && !indexBit) hasSIB = true; -#endif - if (hasSIB) { - setModRM(mod, reg, Operand::ESP); - /* SIB = [2:3:3] = [SS:index:base(=rm)] */ - const int idx = indexBit ? (index.getIdx() & 7) : Operand::ESP; - const int scale = e.getScale(); - const int SS = (scale == 8) ? 3 : (scale == 4) ? 2 : (scale == 2) ? 1 : 0; - setModRM(SS, idx, newBaseIdx); - } else { - setModRM(mod, reg, newBaseIdx); - } - if (mod == mod01) { - db(disp); - } else if (mod == mod10 || (mod == mod00 && !baseBit)) { - dd(disp); - } - } - LabelManager labelMgr_; - bool isInDisp16(uint32 x) const { return 0xFFFF8000 <= x || x <= 0x7FFF; } - void opModR(const Reg& reg1, const Reg& reg2, int code0, int code1 = NONE, int code2 = NONE) - { - rex(reg2, reg1); - db(code0 | (reg1.isBit(8) ? 0 : 1)); if (code1 != NONE) db(code1); if (code2 != NONE) db(code2); - setModRM(3, reg1.getIdx(), reg2.getIdx()); - } - void opModM(const Address& addr, const Reg& reg, int code0, int code1 = NONE, int code2 = NONE, int immSize = 0) - { - if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); - rex(addr, reg); - db(code0 | (reg.isBit(8) ? 0 : 1)); if (code1 != NONE) db(code1); if (code2 != NONE) db(code2); - opAddr(addr, reg.getIdx(), immSize); - } - void opMIB(const Address& addr, const Reg& reg, int code0, int code1) - { - if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); - if (addr.getMode() != Address::M_ModRM) throw Error(ERR_INVALID_MIB_ADDRESS); - if (BIT == 64 && addr.is32bit()) db(0x67); - const RegExp& regExp = addr.getRegExp(false); - uint8 rex = regExp.getRex(); - if (rex) db(rex); - db(code0); db(code1); - setSIB(regExp, reg.getIdx()); - } - void makeJmp(uint32 disp, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref) - { - const int shortJmpSize = 2; - const int longHeaderSize = longPref ? 2 : 1; - const int longJmpSize = longHeaderSize + 4; - if (type != T_NEAR && inner::IsInDisp8(disp - shortJmpSize)) { - db(shortCode); db(disp - shortJmpSize); - } else { - if (type == T_SHORT) throw Error(ERR_LABEL_IS_TOO_FAR); - if (longPref) db(longPref); - db(longCode); dd(disp - longJmpSize); - } - } - template - void opJmp(T& label, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref) - { - if (isAutoGrow() && size_ + 16 >= maxSize_) growMemory(); /* avoid splitting code of jmp */ - size_t offset = 0; - if (labelMgr_.getOffset(&offset, label)) { /* label exists */ - makeJmp(inner::VerifyInInt32(offset - size_), type, shortCode, longCode, longPref); - } else { - int jmpSize = 0; - if (type == T_NEAR) { - jmpSize = 4; - if (longPref) db(longPref); - db(longCode); dd(0); - } else { - jmpSize = 1; - db(shortCode); db(0); - } - JmpLabel jmp(size_, jmpSize, inner::LasIs); - labelMgr_.addUndefinedLabel(label, jmp); - } - } - void opJmpAbs(const void *addr, LabelType type, uint8 shortCode, uint8 longCode, uint8 longPref = 0) - { - if (isAutoGrow()) { - if (type != T_NEAR) throw Error(ERR_ONLY_T_NEAR_IS_SUPPORTED_IN_AUTO_GROW); - if (size_ + 16 >= maxSize_) growMemory(); - if (longPref) db(longPref); - db(longCode); - dd(0); - save(size_ - 4, size_t(addr) - size_, 4, inner::Labs); - } else { - makeJmp(inner::VerifyInInt32(reinterpret_cast(addr) - getCurr()), type, shortCode, longCode, longPref); - } - - } - // reg is reg field of ModRM - // immSize is the size for immediate value - // disp8N = 0(normal), disp8N = 1(force disp32), disp8N = {2, 4, 8} ; compressed displacement - void opAddr(const Address &addr, int reg, int immSize = 0, int disp8N = 0, bool permitVisb = false) - { - if (!permitVisb && addr.isVsib()) throw Error(ERR_BAD_VSIB_ADDRESSING); - if (addr.getMode() == Address::M_ModRM) { - setSIB(addr.getRegExp(), reg, disp8N); - } else if (addr.getMode() == Address::M_rip || addr.getMode() == Address::M_ripAddr) { - setModRM(0, reg, 5); - if (addr.getLabel()) { // [rip + Label] - putL_inner(*addr.getLabel(), true, addr.getDisp() - immSize); - } else { - size_t disp = addr.getDisp(); - if (addr.getMode() == Address::M_ripAddr) { - if (isAutoGrow()) throw Error(ERR_INVALID_RIP_IN_AUTO_GROW); - disp -= (size_t)getCurr() + 4 + immSize; - } - dd(inner::VerifyInInt32(disp)); - } - } - } - /* preCode is for SSSE3/SSE4 */ - void opGen(const Operand& reg, const Operand& op, int code, int pref, bool isValid(const Operand&, const Operand&), int imm8 = NONE, int preCode = NONE) - { - if (isValid && !isValid(reg, op)) throw Error(ERR_BAD_COMBINATION); - if (pref != NONE) db(pref); - if (op.isMEM()) { - opModM(op.getAddress(), reg.getReg(), 0x0F, preCode, code, (imm8 != NONE) ? 1 : 0); - } else { - opModR(reg.getReg(), op.getReg(), 0x0F, preCode, code); - } - if (imm8 != NONE) db(imm8); - } - void opMMX_IMM(const Mmx& mmx, int imm8, int code, int ext) - { - if (mmx.isXMM()) db(0x66); - opModR(Reg32(ext), mmx, 0x0F, code); - db(imm8); - } - void opMMX(const Mmx& mmx, const Operand& op, int code, int pref = 0x66, int imm8 = NONE, int preCode = NONE) - { - opGen(mmx, op, code, mmx.isXMM() ? pref : NONE, isXMMorMMX_MEM, imm8, preCode); - } - void opMovXMM(const Operand& op1, const Operand& op2, int code, int pref) - { - if (pref != NONE) db(pref); - if (op1.isXMM() && op2.isMEM()) { - opModM(op2.getAddress(), op1.getReg(), 0x0F, code); - } else if (op1.isMEM() && op2.isXMM()) { - opModM(op1.getAddress(), op2.getReg(), 0x0F, code | 1); - } else { - throw Error(ERR_BAD_COMBINATION); - } - } - void opExt(const Operand& op, const Mmx& mmx, int code, int imm, bool hasMMX2 = false) - { - if (hasMMX2 && op.isREG(i32e)) { /* pextrw is special */ - if (mmx.isXMM()) db(0x66); - opModR(op.getReg(), mmx, 0x0F, 0xC5); db(imm); - } else { - opGen(mmx, op, code, 0x66, isXMM_REG32orMEM, imm, 0x3A); - } - } - void opR_ModM(const Operand& op, int bit, int ext, int code0, int code1 = NONE, int code2 = NONE, bool disableRex = false, int immSize = 0) - { - int opBit = op.getBit(); - if (disableRex && opBit == 64) opBit = 32; - if (op.isREG(bit)) { - opModR(Reg(ext, Operand::REG, opBit), op.getReg().changeBit(opBit), code0, code1, code2); - } else if (op.isMEM()) { - opModM(op.getAddress(), Reg(ext, Operand::REG, opBit), code0, code1, code2, immSize); - } else { - throw Error(ERR_BAD_COMBINATION); - } - } - void opShift(const Operand& op, int imm, int ext) - { - verifyMemHasSize(op); - opR_ModM(op, 0, ext, (0xC0 | ((imm == 1 ? 1 : 0) << 4)), NONE, NONE, false, (imm != 1) ? 1 : 0); - if (imm != 1) db(imm); - } - void opShift(const Operand& op, const Reg8& _cl, int ext) - { - if (_cl.getIdx() != Operand::CL) throw Error(ERR_BAD_COMBINATION); - opR_ModM(op, 0, ext, 0xD2); - } - void opModRM(const Operand& op1, const Operand& op2, bool condR, bool condM, int code0, int code1 = NONE, int code2 = NONE, int immSize = 0) - { - if (condR) { - opModR(op1.getReg(), op2.getReg(), code0, code1, code2); - } else if (condM) { - opModM(op2.getAddress(), op1.getReg(), code0, code1, code2, immSize); - } else { - throw Error(ERR_BAD_COMBINATION); - } - } - void opShxd(const Operand& op, const Reg& reg, uint8 imm, int code, const Reg8 *_cl = 0) - { - if (_cl && _cl->getIdx() != Operand::CL) throw Error(ERR_BAD_COMBINATION); - opModRM(reg, op, (op.isREG(16 | i32e) && op.getBit() == reg.getBit()), op.isMEM() && (reg.isREG(16 | i32e)), 0x0F, code | (_cl ? 1 : 0), NONE, _cl ? 0 : 1); - if (!_cl) db(imm); - } - // (REG, REG|MEM), (MEM, REG) - void opRM_RM(const Operand& op1, const Operand& op2, int code) - { - if (op1.isREG() && op2.isMEM()) { - opModM(op2.getAddress(), op1.getReg(), code | 2); - } else { - opModRM(op2, op1, op1.isREG() && op1.getKind() == op2.getKind(), op1.isMEM() && op2.isREG(), code); - } - } - // (REG|MEM, IMM) - void opRM_I(const Operand& op, uint32 imm, int code, int ext) - { - verifyMemHasSize(op); - uint32 immBit = inner::IsInDisp8(imm) ? 8 : isInDisp16(imm) ? 16 : 32; - if (op.isBit(8)) immBit = 8; - if (op.getBit() < immBit) throw Error(ERR_IMM_IS_TOO_BIG); - if (op.isBit(32|64) && immBit == 16) immBit = 32; /* don't use MEM16 if 32/64bit mode */ - if (op.isREG() && op.getIdx() == 0 && (op.getBit() == immBit || (op.isBit(64) && immBit == 32))) { // rax, eax, ax, al - rex(op); - db(code | 4 | (immBit == 8 ? 0 : 1)); - } else { - int tmp = immBit < (std::min)(op.getBit(), 32U) ? 2 : 0; - opR_ModM(op, 0, ext, 0x80 | tmp, NONE, NONE, false, immBit / 8); - } - db(imm, immBit / 8); - } - void opIncDec(const Operand& op, int code, int ext) - { - verifyMemHasSize(op); -#ifndef XBYAK64 - if (op.isREG() && !op.isBit(8)) { - rex(op); db(code | op.getIdx()); - return; - } -#endif - code = 0xFE; - if (op.isREG()) { - opModR(Reg(ext, Operand::REG, op.getBit()), op.getReg(), code); - } else { - opModM(op.getAddress(), Reg(ext, Operand::REG, op.getBit()), code); - } - } - void opPushPop(const Operand& op, int code, int ext, int alt) - { - int bit = op.getBit(); - if (bit == 16 || bit == BIT) { - if (bit == 16) db(0x66); - if (op.isREG()) { - if (op.getReg().getIdx() >= 8) db(0x41); - db(alt | (op.getIdx() & 7)); - return; - } - if (op.isMEM()) { - opModM(op.getAddress(), Reg(ext, Operand::REG, 32), code); - return; - } - } - throw Error(ERR_BAD_COMBINATION); - } - void verifyMemHasSize(const Operand& op) const - { - if (op.isMEM() && op.getBit() == 0) throw Error(ERR_MEM_SIZE_IS_NOT_SPECIFIED); - } - /* - mov(r, imm) = db(imm, mov_imm(r, imm)) - */ - int mov_imm(const Reg& reg, size_t imm) - { - int bit = reg.getBit(); - const int idx = reg.getIdx(); - int code = 0xB0 | ((bit == 8 ? 0 : 1) << 3); - if (bit == 64 && (imm & ~size_t(0xffffffffu)) == 0) { - rex(Reg32(idx)); - bit = 32; - } else { - rex(reg); - if (bit == 64 && inner::IsInInt32(imm)) { - db(0xC7); - code = 0xC0; - bit = 32; - } - } - db(code | (idx & 7)); - return bit / 8; - } - template - void putL_inner(T& label, bool relative = false, size_t disp = 0) - { - const int jmpSize = relative ? 4 : (int)sizeof(size_t); - if (isAutoGrow() && size_ + 16 >= maxSize_) growMemory(); - size_t offset = 0; - if (labelMgr_.getOffset(&offset, label)) { - if (relative) { - db(inner::VerifyInInt32(offset + disp - size_ - jmpSize), jmpSize); - } else if (isAutoGrow()) { - db(uint64(0), jmpSize); - save(size_ - jmpSize, offset, jmpSize, inner::LaddTop); - } else { - db(size_t(top_) + offset, jmpSize); - } - return; - } - db(uint64(0), jmpSize); - JmpLabel jmp(size_, jmpSize, (relative ? inner::LasIs : isAutoGrow() ? inner::LaddTop : inner::Labs), disp); - labelMgr_.addUndefinedLabel(label, jmp); - } - void opMovxx(const Reg& reg, const Operand& op, uint8 code) - { - if (op.isBit(32)) throw Error(ERR_BAD_COMBINATION); - int w = op.isBit(16); -#ifdef XBYAK64 - if (op.isHigh8bit()) throw Error(ERR_BAD_COMBINATION); -#endif - bool cond = reg.isREG() && (reg.getBit() > op.getBit()); - opModRM(reg, op, cond && op.isREG(), cond && op.isMEM(), 0x0F, code | w); - } - void opFpuMem(const Address& addr, uint8 m16, uint8 m32, uint8 m64, uint8 ext, uint8 m64ext) - { - if (addr.is64bitDisp()) throw Error(ERR_CANT_USE_64BIT_DISP); - uint8 code = addr.isBit(16) ? m16 : addr.isBit(32) ? m32 : addr.isBit(64) ? m64 : 0; - if (!code) throw Error(ERR_BAD_MEM_SIZE); - if (m64ext && addr.isBit(64)) ext = m64ext; - - rex(addr, st0); - db(code); - opAddr(addr, ext); - } - // use code1 if reg1 == st0 - // use code2 if reg1 != st0 && reg2 == st0 - void opFpuFpu(const Fpu& reg1, const Fpu& reg2, uint32 code1, uint32 code2) - { - uint32 code = reg1.getIdx() == 0 ? code1 : reg2.getIdx() == 0 ? code2 : 0; - if (!code) throw Error(ERR_BAD_ST_COMBINATION); - db(uint8(code >> 8)); - db(uint8(code | (reg1.getIdx() | reg2.getIdx()))); - } - void opFpu(const Fpu& reg, uint8 code1, uint8 code2) - { - db(code1); db(code2 | reg.getIdx()); - } - void opVex(const Reg& r, const Operand *p1, const Operand& op2, int type, int code, int imm8 = NONE) - { - if (op2.isMEM()) { - const Address& addr = op2.getAddress(); - const RegExp& regExp = addr.getRegExp(); - const Reg& base = regExp.getBase(); - const Reg& index = regExp.getIndex(); - if (BIT == 64 && addr.is32bit()) db(0x67); - int disp8N = 0; - bool x = index.isExtIdx(); - if ((type & (T_MUST_EVEX|T_MEM_EVEX)) || r.hasEvex() || (p1 && p1->hasEvex()) || addr.isBroadcast() || addr.getOpmaskIdx()) { - int aaa = addr.getOpmaskIdx(); - if (aaa && !(type & T_M_K)) throw Error(ERR_INVALID_OPMASK_WITH_MEMORY); - bool b = false; - if (addr.isBroadcast()) { - if (!(type & (T_B32 | T_B64))) throw Error(ERR_INVALID_BROADCAST); - b = true; - } - int VL = regExp.isVsib() ? index.getBit() : 0; - disp8N = evex(r, base, p1, type, code, x, b, aaa, VL, index.isExtIdx2()); - } else { - vex(r, base, p1, type, code, x); - } - opAddr(addr, r.getIdx(), (imm8 != NONE) ? 1 : 0, disp8N, (type & T_VSIB) != 0); - } else { - const Reg& base = op2.getReg(); - if ((type & T_MUST_EVEX) || r.hasEvex() || (p1 && p1->hasEvex()) || base.hasEvex()) { - evex(r, base, p1, type, code); - } else { - vex(r, base, p1, type, code); - } - setModRM(3, r.getIdx(), base.getIdx()); - } - if (imm8 != NONE) db(imm8); - } - // (r, r, r/m) if isR_R_RM - // (r, r/m, r) - void opGpr(const Reg32e& r, const Operand& op1, const Operand& op2, int type, uint8 code, bool isR_R_RM, int imm8 = NONE) - { - const Operand *p1 = &op1; - const Operand *p2 = &op2; - if (!isR_R_RM) std::swap(p1, p2); - const unsigned int bit = r.getBit(); - if (p1->getBit() != bit || (p2->isREG() && p2->getBit() != bit)) throw Error(ERR_BAD_COMBINATION); - type |= (bit == 64) ? T_W1 : T_W0; - opVex(r, p1, *p2, type, code, imm8); - } - void opAVX_X_X_XM(const Xmm& x1, const Operand& op1, const Operand& op2, int type, int code0, int imm8 = NONE) - { - const Xmm *x2 = static_cast(&op1); - const Operand *op = &op2; - if (op2.isNone()) { // (x1, op1) -> (x1, x1, op1) - x2 = &x1; - op = &op1; - } - // (x1, x2, op) - if (!((x1.isXMM() && x2->isXMM()) || ((type & T_YMM) && ((x1.isYMM() && x2->isYMM()) || (x1.isZMM() && x2->isZMM()))))) throw Error(ERR_BAD_COMBINATION); - opVex(x1, x2, *op, type, code0, imm8); - } - void opAVX_K_X_XM(const Opmask& k, const Xmm& x2, const Operand& op3, int type, int code0, int imm8 = NONE) - { - if (!op3.isMEM() && (x2.getKind() != op3.getKind())) throw Error(ERR_BAD_COMBINATION); - opVex(k, &x2, op3, type, code0, imm8); - } - // (x, x/m), (y, x/m256), (z, y/m) - void checkCvt1(const Operand& x, const Operand& op) const - { - if (!op.isMEM() && !(x.is(Operand::XMM | Operand::YMM) && op.isXMM()) && !(x.isZMM() && op.isYMM())) throw Error(ERR_BAD_COMBINATION); - } - // (x, x/m), (x, y/m256), (y, z/m) - void checkCvt2(const Xmm& x, const Operand& op) const - { - if (!(x.isXMM() && op.is(Operand::XMM | Operand::YMM | Operand::MEM)) && !(x.isYMM() && op.is(Operand::ZMM | Operand::MEM))) throw Error(ERR_BAD_COMBINATION); - } - void opCvt2(const Xmm& x, const Operand& op, int type, int code) - { - checkCvt2(x, op); - Operand::Kind kind = x.isXMM() ? (op.isBit(256) ? Operand::YMM : Operand::XMM) : Operand::ZMM; - opVex(x.copyAndSetKind(kind), &xm0, op, type, code); - } - void opCvt3(const Xmm& x1, const Xmm& x2, const Operand& op, int type, int type64, int type32, uint8 code) - { - if (!(x1.isXMM() && x2.isXMM() && (op.isREG(i32e) || op.isMEM()))) throw Error(ERR_BAD_SIZE_OF_REGISTER); - Xmm x(op.getIdx()); - const Operand *p = op.isREG() ? &x : &op; - opVex(x1, &x2, *p, type | (op.isBit(64) ? type64 : type32), code); - } - const Xmm& cvtIdx0(const Operand& x) const - { - return x.isZMM() ? zm0 : x.isYMM() ? ym0 : xm0; - } - // support (x, x/m, imm), (y, y/m, imm) - void opAVX_X_XM_IMM(const Xmm& x, const Operand& op, int type, int code, int imm8 = NONE) - { - opAVX_X_X_XM(x, cvtIdx0(x), op, type, code, imm8); - } - // QQQ:need to refactor - void opSp1(const Reg& reg, const Operand& op, uint8 pref, uint8 code0, uint8 code1) - { - if (reg.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); - bool is16bit = reg.isREG(16) && (op.isREG(16) || op.isMEM()); - if (!is16bit && !(reg.isREG(i32e) && (op.isREG(reg.getBit()) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); - if (is16bit) db(0x66); - db(pref); opModRM(reg.changeBit(i32e == 32 ? 32 : reg.getBit()), op, op.isREG(), true, code0, code1); - } - void opGather(const Xmm& x1, const Address& addr, const Xmm& x2, int type, uint8 code, int mode) - { - const RegExp& regExp = addr.getRegExp(); - if (!regExp.isVsib(128 | 256)) throw Error(ERR_BAD_VSIB_ADDRESSING); - const int y_vx_y = 0; - const int y_vy_y = 1; -// const int x_vy_x = 2; - const bool isAddrYMM = regExp.getIndex().getBit() == 256; - if (!x1.isXMM() || isAddrYMM || !x2.isXMM()) { - bool isOK = false; - if (mode == y_vx_y) { - isOK = x1.isYMM() && !isAddrYMM && x2.isYMM(); - } else if (mode == y_vy_y) { - isOK = x1.isYMM() && isAddrYMM && x2.isYMM(); - } else { // x_vy_x - isOK = !x1.isYMM() && isAddrYMM && !x2.isYMM(); - } - if (!isOK) throw Error(ERR_BAD_VSIB_ADDRESSING); - } - opAVX_X_X_XM(isAddrYMM ? Ymm(x1.getIdx()) : x1, isAddrYMM ? Ymm(x2.getIdx()) : x2, addr, type, code); - } - enum { - xx_yy_zz = 0, - xx_yx_zy = 1, - xx_xy_yz = 2 - }; - void checkGather2(const Xmm& x1, const Reg& x2, int mode) const - { - if (x1.isXMM() && x2.isXMM()) return; - switch (mode) { - case xx_yy_zz: if ((x1.isYMM() && x2.isYMM()) || (x1.isZMM() && x2.isZMM())) return; - break; - case xx_yx_zy: if ((x1.isYMM() && x2.isXMM()) || (x1.isZMM() && x2.isYMM())) return; - break; - case xx_xy_yz: if ((x1.isXMM() && x2.isYMM()) || (x1.isYMM() && x2.isZMM())) return; - break; - } - throw Error(ERR_BAD_VSIB_ADDRESSING); - } - void opGather2(const Xmm& x, const Address& addr, int type, uint8 code, int mode) - { - if (x.hasZero()) throw Error(ERR_INVALID_ZERO); - checkGather2(x, addr.getRegExp().getIndex(), mode); - opVex(x, 0, addr, type, code); - } - /* - xx_xy_yz ; mode = true - xx_xy_xz ; mode = false - */ - void opVmov(const Operand& op, const Xmm& x, int type, uint8 code, bool mode) - { - if (mode) { - if (!op.isMEM() && !((op.isXMM() && x.isXMM()) || (op.isXMM() && x.isYMM()) || (op.isYMM() && x.isZMM()))) throw Error(ERR_BAD_COMBINATION); - } else { - if (!op.isMEM() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); - } - opVex(x, 0, op, type, code); - } - void opGatherFetch(const Address& addr, const Xmm& x, int type, uint8 code, Operand::Kind kind) - { - if (addr.hasZero()) throw Error(ERR_INVALID_ZERO); - if (addr.getRegExp().getIndex().getKind() != kind) throw Error(ERR_BAD_VSIB_ADDRESSING); - opVex(x, 0, addr, type, code); - } -public: - unsigned int getVersion() const { return VERSION; } - using CodeArray::db; - const Mmx mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7; - const Xmm xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; - const Ymm ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7; - const Zmm zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7; - const Xmm &xm0, &xm1, &xm2, &xm3, &xm4, &xm5, &xm6, &xm7; - const Ymm &ym0, &ym1, &ym2, &ym3, &ym4, &ym5, &ym6, &ym7; - const Ymm &zm0, &zm1, &zm2, &zm3, &zm4, &zm5, &zm6, &zm7; - const Reg32 eax, ecx, edx, ebx, esp, ebp, esi, edi; - const Reg16 ax, cx, dx, bx, sp, bp, si, di; - const Reg8 al, cl, dl, bl, ah, ch, dh, bh; - const AddressFrame ptr, byte, word, dword, qword, xword, yword, zword; // xword is same as oword of NASM - const AddressFrame ptr_b, xword_b, yword_b, zword_b; // broadcast such as {1to2}, {1to4}, {1to8}, {1to16}, {b} - const Fpu st0, st1, st2, st3, st4, st5, st6, st7; - const Opmask k0, k1, k2, k3, k4, k5, k6, k7; - const BoundsReg bnd0, bnd1, bnd2, bnd3; - const EvexModifierRounding T_sae, T_rn_sae, T_rd_sae, T_ru_sae, T_rz_sae; // {sae}, {rn-sae}, {rd-sae}, {ru-sae}, {rz-sae} - const EvexModifierZero T_z; // {z} -#ifdef XBYAK64 - const Reg64 rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15; - const Reg32 r8d, r9d, r10d, r11d, r12d, r13d, r14d, r15d; - const Reg16 r8w, r9w, r10w, r11w, r12w, r13w, r14w, r15w; - const Reg8 r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b; - const Reg8 spl, bpl, sil, dil; - const Xmm xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15; - const Xmm xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23; - const Xmm xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31; - const Ymm ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15; - const Ymm ymm16, ymm17, ymm18, ymm19, ymm20, ymm21, ymm22, ymm23; - const Ymm ymm24, ymm25, ymm26, ymm27, ymm28, ymm29, ymm30, ymm31; - const Zmm zmm8, zmm9, zmm10, zmm11, zmm12, zmm13, zmm14, zmm15; - const Zmm zmm16, zmm17, zmm18, zmm19, zmm20, zmm21, zmm22, zmm23; - const Zmm zmm24, zmm25, zmm26, zmm27, zmm28, zmm29, zmm30, zmm31; - const Xmm &xm8, &xm9, &xm10, &xm11, &xm12, &xm13, &xm14, &xm15; // for my convenience - const Xmm &xm16, &xm17, &xm18, &xm19, &xm20, &xm21, &xm22, &xm23; - const Xmm &xm24, &xm25, &xm26, &xm27, &xm28, &xm29, &xm30, &xm31; - const Ymm &ym8, &ym9, &ym10, &ym11, &ym12, &ym13, &ym14, &ym15; - const Ymm &ym16, &ym17, &ym18, &ym19, &ym20, &ym21, &ym22, &ym23; - const Ymm &ym24, &ym25, &ym26, &ym27, &ym28, &ym29, &ym30, &ym31; - const Zmm &zm8, &zm9, &zm10, &zm11, &zm12, &zm13, &zm14, &zm15; - const Zmm &zm16, &zm17, &zm18, &zm19, &zm20, &zm21, &zm22, &zm23; - const Zmm &zm24, &zm25, &zm26, &zm27, &zm28, &zm29, &zm30, &zm31; - const RegRip rip; -#endif -#ifndef XBYAK_DISABLE_SEGMENT - const Segment es, cs, ss, ds, fs, gs; -#endif - void L(const std::string& label) { labelMgr_.defineSlabel(label); } - void L(Label& label) { labelMgr_.defineClabel(label); } - Label L() { Label label; L(label); return label; } - void inLocalLabel() { labelMgr_.enterLocal(); } - void outLocalLabel() { labelMgr_.leaveLocal(); } - /* - assign src to dst - require - dst : does not used by L() - src : used by L() - */ - void assignL(Label& dst, const Label& src) { labelMgr_.assign(dst, src); } - /* - put address of label to buffer - @note the put size is 4(32-bit), 8(64-bit) - */ - void putL(std::string label) { putL_inner(label); } - void putL(const Label& label) { putL_inner(label); } - - void jmp(const Operand& op) { opR_ModM(op, BIT, 4, 0xFF, NONE, NONE, true); } - void jmp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0xEB, 0xE9, 0); } - void jmp(const char *label, LabelType type = T_AUTO) { jmp(std::string(label), type); } - void jmp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0xEB, 0xE9, 0); } - void jmp(const void *addr, LabelType type = T_AUTO) { opJmpAbs(addr, type, 0xEB, 0xE9); } - - void call(const Operand& op) { opR_ModM(op, 16 | i32e, 2, 0xFF, NONE, NONE, true); } - // call(string label), not const std::string& - void call(std::string label) { opJmp(label, T_NEAR, 0, 0xE8, 0); } - void call(const char *label) { call(std::string(label)); } - void call(const Label& label) { opJmp(label, T_NEAR, 0, 0xE8, 0); } - // call(function pointer) -#ifdef XBYAK_VARIADIC_TEMPLATE - template - void call(Ret(*func)(Params...)) { call(reinterpret_cast(func)); } -#endif - void call(const void *addr) { opJmpAbs(addr, T_NEAR, 0, 0xE8); } - - void test(const Operand& op, const Reg& reg) - { - opModRM(reg, op, op.isREG() && (op.getKind() == reg.getKind()), op.isMEM(), 0x84); - } - void test(const Operand& op, uint32 imm) - { - verifyMemHasSize(op); - int immSize = (std::min)(op.getBit() / 8, 4U); - if (op.isREG() && op.getIdx() == 0) { // al, ax, eax - rex(op); - db(0xA8 | (op.isBit(8) ? 0 : 1)); - } else { - opR_ModM(op, 0, 0, 0xF6, NONE, NONE, false, immSize); - } - db(imm, immSize); - } - void imul(const Reg& reg, const Operand& op) - { - opModRM(reg, op, op.isREG() && (reg.getKind() == op.getKind()), op.isMEM(), 0x0F, 0xAF); - } - void imul(const Reg& reg, const Operand& op, int imm) - { - int s = inner::IsInDisp8(imm) ? 1 : 0; - int immSize = s ? 1 : reg.isREG(16) ? 2 : 4; - opModRM(reg, op, op.isREG() && (reg.getKind() == op.getKind()), op.isMEM(), 0x69 | (s << 1), NONE, NONE, immSize); - db(imm, immSize); - } - void push(const Operand& op) { opPushPop(op, 0xFF, 6, 0x50); } - void pop(const Operand& op) { opPushPop(op, 0x8F, 0, 0x58); } - void push(const AddressFrame& af, uint32 imm) - { - if (af.bit_ == 8 && inner::IsInDisp8(imm)) { - db(0x6A); db(imm); - } else if (af.bit_ == 16 && isInDisp16(imm)) { - db(0x66); db(0x68); dw(imm); - } else { - db(0x68); dd(imm); - } - } - /* use "push(word, 4)" if you want "push word 4" */ - void push(uint32 imm) - { - if (inner::IsInDisp8(imm)) { - push(byte, imm); - } else { - push(dword, imm); - } - } - void mov(const Operand& reg1, const Operand& reg2) - { - const Reg *reg = 0; - const Address *addr = 0; - uint8 code = 0; - if (reg1.isREG() && reg1.getIdx() == 0 && reg2.isMEM()) { // mov eax|ax|al, [disp] - reg = ®1.getReg(); - addr= ®2.getAddress(); - code = 0xA0; - } else - if (reg1.isMEM() && reg2.isREG() && reg2.getIdx() == 0) { // mov [disp], eax|ax|al - reg = ®2.getReg(); - addr= ®1.getAddress(); - code = 0xA2; - } -#ifdef XBYAK64 - if (addr && addr->is64bitDisp()) { - if (code) { - rex(*reg); - db(reg1.isREG(8) ? 0xA0 : reg1.isREG() ? 0xA1 : reg2.isREG(8) ? 0xA2 : 0xA3); - db(addr->getDisp(), 8); - } else { - throw Error(ERR_BAD_COMBINATION); - } - } else -#else - if (code && addr->isOnlyDisp()) { - rex(*reg, *addr); - db(code | (reg->isBit(8) ? 0 : 1)); - dd(static_cast(addr->getDisp())); - } else -#endif - { - opRM_RM(reg1, reg2, 0x88); - } - } - void mov(const Operand& op, size_t imm) - { - if (op.isREG()) { - const int size = mov_imm(op.getReg(), imm); - db(imm, size); - } else if (op.isMEM()) { - verifyMemHasSize(op); - int immSize = op.getBit() / 8; - if (immSize <= 4) { - sint64 s = sint64(imm) >> (immSize * 8); - if (s != 0 && s != -1) throw Error(ERR_IMM_IS_TOO_BIG); - } else { - if (!inner::IsInInt32(imm)) throw Error(ERR_IMM_IS_TOO_BIG); - immSize = 4; - } - opModM(op.getAddress(), Reg(0, Operand::REG, op.getBit()), 0xC6, NONE, NONE, immSize); - db(static_cast(imm), immSize); - } else { - throw Error(ERR_BAD_COMBINATION); - } - } - void mov(const NativeReg& reg, const char *label) // can't use std::string - { - if (label == 0) { - mov(static_cast(reg), 0); // call imm - return; - } - mov_imm(reg, dummyAddr); - putL(label); - } - void mov(const NativeReg& reg, const Label& label) - { - mov_imm(reg, dummyAddr); - putL(label); - } - void xchg(const Operand& op1, const Operand& op2) - { - const Operand *p1 = &op1, *p2 = &op2; - if (p1->isMEM() || (p2->isREG(16 | i32e) && p2->getIdx() == 0)) { - p1 = &op2; p2 = &op1; - } - if (p1->isMEM()) throw Error(ERR_BAD_COMBINATION); - if (p2->isREG() && (p1->isREG(16 | i32e) && p1->getIdx() == 0) -#ifdef XBYAK64 - && (p2->getIdx() != 0 || !p1->isREG(32)) -#endif - ) { - rex(*p2, *p1); db(0x90 | (p2->getIdx() & 7)); - return; - } - opModRM(*p1, *p2, (p1->isREG() && p2->isREG() && (p1->getBit() == p2->getBit())), p2->isMEM(), 0x86 | (p1->isBit(8) ? 0 : 1)); - } - -#ifndef XBYAK_DISABLE_SEGMENT - void push(const Segment& seg) - { - switch (seg.getIdx()) { - case Segment::es: db(0x06); break; - case Segment::cs: db(0x0E); break; - case Segment::ss: db(0x16); break; - case Segment::ds: db(0x1E); break; - case Segment::fs: db(0x0F); db(0xA0); break; - case Segment::gs: db(0x0F); db(0xA8); break; - default: - assert(0); - } - } - void pop(const Segment& seg) - { - switch (seg.getIdx()) { - case Segment::es: db(0x07); break; - case Segment::cs: throw Error(ERR_BAD_COMBINATION); - case Segment::ss: db(0x17); break; - case Segment::ds: db(0x1F); break; - case Segment::fs: db(0x0F); db(0xA1); break; - case Segment::gs: db(0x0F); db(0xA9); break; - default: - assert(0); - } - } - void putSeg(const Segment& seg) - { - switch (seg.getIdx()) { - case Segment::es: db(0x2E); break; - case Segment::cs: db(0x36); break; - case Segment::ss: db(0x3E); break; - case Segment::ds: db(0x26); break; - case Segment::fs: db(0x64); break; - case Segment::gs: db(0x65); break; - default: - assert(0); - } - } - void mov(const Operand& op, const Segment& seg) - { - opModRM(Reg8(seg.getIdx()), op, op.isREG(16|i32e), op.isMEM(), 0x8C); - } - void mov(const Segment& seg, const Operand& op) - { - opModRM(Reg8(seg.getIdx()), op.isREG(16|i32e) ? static_cast(op.getReg().cvt32()) : op, op.isREG(16|i32e), op.isMEM(), 0x8E); - } -#endif - - enum { NONE = 256 }; - // constructor - CodeGenerator(size_t maxSize = DEFAULT_MAX_CODE_SIZE, void *userPtr = 0, Allocator *allocator = 0) - : CodeArray(maxSize, userPtr, allocator) - , mm0(0), mm1(1), mm2(2), mm3(3), mm4(4), mm5(5), mm6(6), mm7(7) - , xmm0(0), xmm1(1), xmm2(2), xmm3(3), xmm4(4), xmm5(5), xmm6(6), xmm7(7) - , ymm0(0), ymm1(1), ymm2(2), ymm3(3), ymm4(4), ymm5(5), ymm6(6), ymm7(7) - , zmm0(0), zmm1(1), zmm2(2), zmm3(3), zmm4(4), zmm5(5), zmm6(6), zmm7(7) - // for my convenience - , xm0(xmm0), xm1(xmm1), xm2(xmm2), xm3(xmm3), xm4(xmm4), xm5(xmm5), xm6(xmm6), xm7(xmm7) - , ym0(ymm0), ym1(ymm1), ym2(ymm2), ym3(ymm3), ym4(ymm4), ym5(ymm5), ym6(ymm6), ym7(ymm7) - , zm0(zmm0), zm1(zmm1), zm2(zmm2), zm3(zmm3), zm4(zmm4), zm5(zmm5), zm6(zmm6), zm7(zmm7) - - , eax(Operand::EAX), ecx(Operand::ECX), edx(Operand::EDX), ebx(Operand::EBX), esp(Operand::ESP), ebp(Operand::EBP), esi(Operand::ESI), edi(Operand::EDI) - , ax(Operand::AX), cx(Operand::CX), dx(Operand::DX), bx(Operand::BX), sp(Operand::SP), bp(Operand::BP), si(Operand::SI), di(Operand::DI) - , al(Operand::AL), cl(Operand::CL), dl(Operand::DL), bl(Operand::BL), ah(Operand::AH), ch(Operand::CH), dh(Operand::DH), bh(Operand::BH) - , ptr(0), byte(8), word(16), dword(32), qword(64), xword(128), yword(256), zword(512) - , ptr_b(0, true), xword_b(128, true), yword_b(256, true), zword_b(512, true) - , st0(0), st1(1), st2(2), st3(3), st4(4), st5(5), st6(6), st7(7) - , k0(0), k1(1), k2(2), k3(3), k4(4), k5(5), k6(6), k7(7) - , bnd0(0), bnd1(1), bnd2(2), bnd3(3) - , T_sae(EvexModifierRounding::T_SAE), T_rn_sae(EvexModifierRounding::T_RN_SAE), T_rd_sae(EvexModifierRounding::T_RD_SAE), T_ru_sae(EvexModifierRounding::T_RU_SAE), T_rz_sae(EvexModifierRounding::T_RZ_SAE) - , T_z() -#ifdef XBYAK64 - , rax(Operand::RAX), rcx(Operand::RCX), rdx(Operand::RDX), rbx(Operand::RBX), rsp(Operand::RSP), rbp(Operand::RBP), rsi(Operand::RSI), rdi(Operand::RDI), r8(Operand::R8), r9(Operand::R9), r10(Operand::R10), r11(Operand::R11), r12(Operand::R12), r13(Operand::R13), r14(Operand::R14), r15(Operand::R15) - , r8d(8), r9d(9), r10d(10), r11d(11), r12d(12), r13d(13), r14d(14), r15d(15) - , r8w(8), r9w(9), r10w(10), r11w(11), r12w(12), r13w(13), r14w(14), r15w(15) - , r8b(8), r9b(9), r10b(10), r11b(11), r12b(12), r13b(13), r14b(14), r15b(15) - , spl(Operand::SPL, true), bpl(Operand::BPL, true), sil(Operand::SIL, true), dil(Operand::DIL, true) - , xmm8(8), xmm9(9), xmm10(10), xmm11(11), xmm12(12), xmm13(13), xmm14(14), xmm15(15) - , xmm16(16), xmm17(17), xmm18(18), xmm19(19), xmm20(20), xmm21(21), xmm22(22), xmm23(23) - , xmm24(24), xmm25(25), xmm26(26), xmm27(27), xmm28(28), xmm29(29), xmm30(30), xmm31(31) - , ymm8(8), ymm9(9), ymm10(10), ymm11(11), ymm12(12), ymm13(13), ymm14(14), ymm15(15) - , ymm16(16), ymm17(17), ymm18(18), ymm19(19), ymm20(20), ymm21(21), ymm22(22), ymm23(23) - , ymm24(24), ymm25(25), ymm26(26), ymm27(27), ymm28(28), ymm29(29), ymm30(30), ymm31(31) - , zmm8(8), zmm9(9), zmm10(10), zmm11(11), zmm12(12), zmm13(13), zmm14(14), zmm15(15) - , zmm16(16), zmm17(17), zmm18(18), zmm19(19), zmm20(20), zmm21(21), zmm22(22), zmm23(23) - , zmm24(24), zmm25(25), zmm26(26), zmm27(27), zmm28(28), zmm29(29), zmm30(30), zmm31(31) - // for my convenience - , xm8(xmm8), xm9(xmm9), xm10(xmm10), xm11(xmm11), xm12(xmm12), xm13(xmm13), xm14(xmm14), xm15(xmm15) - , xm16(xmm16), xm17(xmm17), xm18(xmm18), xm19(xmm19), xm20(xmm20), xm21(xmm21), xm22(xmm22), xm23(xmm23) - , xm24(xmm24), xm25(xmm25), xm26(xmm26), xm27(xmm27), xm28(xmm28), xm29(xmm29), xm30(xmm30), xm31(xmm31) - , ym8(ymm8), ym9(ymm9), ym10(ymm10), ym11(ymm11), ym12(ymm12), ym13(ymm13), ym14(ymm14), ym15(ymm15) - , ym16(ymm16), ym17(ymm17), ym18(ymm18), ym19(ymm19), ym20(ymm20), ym21(ymm21), ym22(ymm22), ym23(ymm23) - , ym24(ymm24), ym25(ymm25), ym26(ymm26), ym27(ymm27), ym28(ymm28), ym29(ymm29), ym30(ymm30), ym31(ymm31) - , zm8(zmm8), zm9(zmm9), zm10(zmm10), zm11(zmm11), zm12(zmm12), zm13(zmm13), zm14(zmm14), zm15(zmm15) - , zm16(zmm16), zm17(zmm17), zm18(zmm18), zm19(zmm19), zm20(zmm20), zm21(zmm21), zm22(zmm22), zm23(zmm23) - , zm24(zmm24), zm25(zmm25), zm26(zmm26), zm27(zmm27), zm28(zmm28), zm29(zmm29), zm30(zmm30), zm31(zmm31) - , rip() -#endif -#ifndef XBYAK_DISABLE_SEGMENT - , es(Segment::es), cs(Segment::cs), ss(Segment::ss), ds(Segment::ds), fs(Segment::fs), gs(Segment::gs) -#endif - { - labelMgr_.set(this); - } - void reset() - { - resetSize(); - labelMgr_.reset(); - labelMgr_.set(this); - } - bool hasUndefinedLabel() const { return labelMgr_.hasUndefSlabel() || labelMgr_.hasUndefClabel(); } - /* - MUST call ready() to complete generating code if you use AutoGrow mode. - It is not necessary for the other mode if hasUndefinedLabel() is true. - */ - void ready(ProtectMode mode = PROTECT_RWE) - { - if (hasUndefinedLabel()) throw Error(ERR_LABEL_IS_NOT_FOUND); - if (isAutoGrow()) { - calcJmpAddress(); - if (useProtect()) setProtectMode(mode); - } - } - // set read/exec - void readyRE() { return ready(PROTECT_RE); } -#ifdef XBYAK_TEST - void dump(bool doClear = true) - { - CodeArray::dump(); - if (doClear) size_ = 0; - } -#endif - -#ifdef XBYAK_UNDEF_JNL - #undef jnl -#endif - - /* - use single byte nop if useMultiByteNop = false - */ - void nop(size_t size = 1, bool useMultiByteNop = true) - { - if (!useMultiByteNop) { - for (size_t i = 0; i < size; i++) { - db(0x90); - } - return; - } - /* - Intel Architectures Software Developer's Manual Volume 2 - recommended multi-byte sequence of NOP instruction - AMD and Intel seem to agree on the same sequences for up to 9 bytes: - https://support.amd.com/TechDocs/55723_SOG_Fam_17h_Processors_3.00.pdf - */ - static const uint8 nopTbl[9][9] = { - {0x90}, - {0x66, 0x90}, - {0x0F, 0x1F, 0x00}, - {0x0F, 0x1F, 0x40, 0x00}, - {0x0F, 0x1F, 0x44, 0x00, 0x00}, - {0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00}, - {0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00}, - {0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, - }; - const size_t n = sizeof(nopTbl) / sizeof(nopTbl[0]); - while (size > 0) { - size_t len = (std::min)(n, size); - const uint8 *seq = nopTbl[len - 1]; - db(seq, len); - size -= len; - } - } - -#ifndef XBYAK_DONT_READ_LIST -#include "xbyak_mnemonic.h" - /* - use single byte nop if useMultiByteNop = false - */ - void align(size_t x = 16, bool useMultiByteNop = true) - { - if (x == 1) return; - if (x < 1 || (x & (x - 1))) throw Error(ERR_BAD_ALIGN); - if (isAutoGrow() && x > inner::ALIGN_PAGE_SIZE) fprintf(stderr, "warning:autoGrow mode does not support %d align\n", (int)x); - size_t remain = size_t(getCurr()) % x; - if (remain) { - nop(x - remain, useMultiByteNop); - } - } -#endif -}; - -namespace util { -static const Mmx mm0(0), mm1(1), mm2(2), mm3(3), mm4(4), mm5(5), mm6(6), mm7(7); -static const Xmm xmm0(0), xmm1(1), xmm2(2), xmm3(3), xmm4(4), xmm5(5), xmm6(6), xmm7(7); -static const Ymm ymm0(0), ymm1(1), ymm2(2), ymm3(3), ymm4(4), ymm5(5), ymm6(6), ymm7(7); -static const Zmm zmm0(0), zmm1(1), zmm2(2), zmm3(3), zmm4(4), zmm5(5), zmm6(6), zmm7(7); -static const Reg32 eax(Operand::EAX), ecx(Operand::ECX), edx(Operand::EDX), ebx(Operand::EBX), esp(Operand::ESP), ebp(Operand::EBP), esi(Operand::ESI), edi(Operand::EDI); -static const Reg16 ax(Operand::AX), cx(Operand::CX), dx(Operand::DX), bx(Operand::BX), sp(Operand::SP), bp(Operand::BP), si(Operand::SI), di(Operand::DI); -static const Reg8 al(Operand::AL), cl(Operand::CL), dl(Operand::DL), bl(Operand::BL), ah(Operand::AH), ch(Operand::CH), dh(Operand::DH), bh(Operand::BH); -static const AddressFrame ptr(0), byte(8), word(16), dword(32), qword(64), xword(128), yword(256), zword(512); -static const AddressFrame ptr_b(0, true), xword_b(128, true), yword_b(256, true), zword_b(512, true); -static const Fpu st0(0), st1(1), st2(2), st3(3), st4(4), st5(5), st6(6), st7(7); -static const Opmask k0(0), k1(1), k2(2), k3(3), k4(4), k5(5), k6(6), k7(7); -static const BoundsReg bnd0(0), bnd1(1), bnd2(2), bnd3(3); -static const EvexModifierRounding T_sae(EvexModifierRounding::T_SAE), T_rn_sae(EvexModifierRounding::T_RN_SAE), T_rd_sae(EvexModifierRounding::T_RD_SAE), T_ru_sae(EvexModifierRounding::T_RU_SAE), T_rz_sae(EvexModifierRounding::T_RZ_SAE); -static const EvexModifierZero T_z; -#ifdef XBYAK64 -static const Reg64 rax(Operand::RAX), rcx(Operand::RCX), rdx(Operand::RDX), rbx(Operand::RBX), rsp(Operand::RSP), rbp(Operand::RBP), rsi(Operand::RSI), rdi(Operand::RDI), r8(Operand::R8), r9(Operand::R9), r10(Operand::R10), r11(Operand::R11), r12(Operand::R12), r13(Operand::R13), r14(Operand::R14), r15(Operand::R15); -static const Reg32 r8d(8), r9d(9), r10d(10), r11d(11), r12d(12), r13d(13), r14d(14), r15d(15); -static const Reg16 r8w(8), r9w(9), r10w(10), r11w(11), r12w(12), r13w(13), r14w(14), r15w(15); -static const Reg8 r8b(8), r9b(9), r10b(10), r11b(11), r12b(12), r13b(13), r14b(14), r15b(15), spl(Operand::SPL, true), bpl(Operand::BPL, true), sil(Operand::SIL, true), dil(Operand::DIL, true); -static const Xmm xmm8(8), xmm9(9), xmm10(10), xmm11(11), xmm12(12), xmm13(13), xmm14(14), xmm15(15); -static const Xmm xmm16(16), xmm17(17), xmm18(18), xmm19(19), xmm20(20), xmm21(21), xmm22(22), xmm23(23); -static const Xmm xmm24(24), xmm25(25), xmm26(26), xmm27(27), xmm28(28), xmm29(29), xmm30(30), xmm31(31); -static const Ymm ymm8(8), ymm9(9), ymm10(10), ymm11(11), ymm12(12), ymm13(13), ymm14(14), ymm15(15); -static const Ymm ymm16(16), ymm17(17), ymm18(18), ymm19(19), ymm20(20), ymm21(21), ymm22(22), ymm23(23); -static const Ymm ymm24(24), ymm25(25), ymm26(26), ymm27(27), ymm28(28), ymm29(29), ymm30(30), ymm31(31); -static const Zmm zmm8(8), zmm9(9), zmm10(10), zmm11(11), zmm12(12), zmm13(13), zmm14(14), zmm15(15); -static const Zmm zmm16(16), zmm17(17), zmm18(18), zmm19(19), zmm20(20), zmm21(21), zmm22(22), zmm23(23); -static const Zmm zmm24(24), zmm25(25), zmm26(26), zmm27(27), zmm28(28), zmm29(29), zmm30(30), zmm31(31); -static const RegRip rip; -#endif -#ifndef XBYAK_DISABLE_SEGMENT -static const Segment es(Segment::es), cs(Segment::cs), ss(Segment::ss), ds(Segment::ds), fs(Segment::fs), gs(Segment::gs); -#endif -} // util - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -} // end of namespace - -#endif // XBYAK_XBYAK_H_ diff --git a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_mnemonic.h b/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_mnemonic.h deleted file mode 100644 index 766f2f6ec..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_mnemonic.h +++ /dev/null @@ -1,1972 +0,0 @@ -const char *getVersionString() const { return "5.751"; } -void adc(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x10, 2); } -void adc(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x10); } -void adcx(const Reg32e& reg, const Operand& op) { opGen(reg, op, 0xF6, 0x66, isREG32_REG32orMEM, NONE, 0x38); } -void add(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x00, 0); } -void add(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x00); } -void addpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0x66, isXMM_XMMorMEM); } -void addps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0x100, isXMM_XMMorMEM); } -void addsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0xF2, isXMM_XMMorMEM); } -void addss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x58, 0xF3, isXMM_XMMorMEM); } -void addsubpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xD0, 0x66, isXMM_XMMorMEM); } -void addsubps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xD0, 0xF2, isXMM_XMMorMEM); } -void adox(const Reg32e& reg, const Operand& op) { opGen(reg, op, 0xF6, 0xF3, isREG32_REG32orMEM, NONE, 0x38); } -void aesdec(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDE, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void aesdeclast(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDF, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void aesenc(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDC, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void aesenclast(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDD, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void aesimc(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xDB, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void aeskeygenassist(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0xDF, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void and_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x20, 4); } -void and_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x20); } -void andn(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_0F38, 0xf2, true); } -void andnpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x55, 0x66, isXMM_XMMorMEM); } -void andnps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x55, 0x100, isXMM_XMMorMEM); } -void andpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x54, 0x66, isXMM_XMMorMEM); } -void andps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x54, 0x100, isXMM_XMMorMEM); } -void bextr(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_0F38, 0xf7, false); } -void blendpd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0D, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void blendps(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0C, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void blendvpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void blendvps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void blsi(const Reg32e& r, const Operand& op) { opGpr(Reg32e(3, r.getBit()), op, r, T_0F38, 0xf3, false); } -void blsmsk(const Reg32e& r, const Operand& op) { opGpr(Reg32e(2, r.getBit()), op, r, T_0F38, 0xf3, false); } -void blsr(const Reg32e& r, const Operand& op) { opGpr(Reg32e(1, r.getBit()), op, r, T_0F38, 0xf3, false); } -void bnd() { db(0xF2); } -void bndcl(const BoundsReg& bnd, const Operand& op) { db(0xF3); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1A, NONE, !op.isMEM()); } -void bndcn(const BoundsReg& bnd, const Operand& op) { db(0xF2); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1B, NONE, !op.isMEM()); } -void bndcu(const BoundsReg& bnd, const Operand& op) { db(0xF2); opR_ModM(op, i32e, bnd.getIdx(), 0x0F, 0x1A, NONE, !op.isMEM()); } -void bndldx(const BoundsReg& bnd, const Address& addr) { opMIB(addr, bnd, 0x0F, 0x1A); } -void bndmk(const BoundsReg& bnd, const Address& addr) { db(0xF3); opModM(addr, bnd, 0x0F, 0x1B); } -void bndmov(const Address& addr, const BoundsReg& bnd) { db(0x66); opModM(addr, bnd, 0x0F, 0x1B); } -void bndmov(const BoundsReg& bnd, const Operand& op) { db(0x66); opModRM(bnd, op, op.isBNDREG(), op.isMEM(), 0x0F, 0x1A); } -void bndstx(const Address& addr, const BoundsReg& bnd) { opMIB(addr, bnd, 0x0F, 0x1B); } -void bsf(const Reg®, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0xBC); } -void bsr(const Reg®, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0xBD); } -void bswap(const Reg32e& reg) { opModR(Reg32(1), reg, 0x0F); } -void bt(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xA3); } -void bt(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 4, 0x0f, 0xba, NONE, false, 1); db(imm); } -void btc(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xBB); } -void btc(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 7, 0x0f, 0xba, NONE, false, 1); db(imm); } -void btr(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xB3); } -void btr(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 6, 0x0f, 0xba, NONE, false, 1); db(imm); } -void bts(const Operand& op, const Reg& reg) { opModRM(reg, op, op.isREG(16|32|64) && op.getBit() == reg.getBit(), op.isMEM(), 0x0f, 0xAB); } -void bts(const Operand& op, uint8 imm) { opR_ModM(op, 16|32|64, 5, 0x0f, 0xba, NONE, false, 1); db(imm); } -void bzhi(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_0F38, 0xf5, false); } -void cbw() { db(0x66); db(0x98); } -void cdq() { db(0x99); } -void clc() { db(0xF8); } -void cld() { db(0xFC); } -void clflush(const Address& addr) { opModM(addr, Reg32(7), 0x0F, 0xAE); } -void cli() { db(0xFA); } -void cmc() { db(0xF5); } -void cmova(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 7); }//-V524 -void cmovae(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 -void cmovb(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 -void cmovbe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 6); }//-V524 -void cmovc(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 -void cmove(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 4); }//-V524 -void cmovg(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 15); }//-V524 -void cmovge(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 13); }//-V524 -void cmovl(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 12); }//-V524 -void cmovle(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 14); }//-V524 -void cmovna(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 6); }//-V524 -void cmovnae(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 2); }//-V524 -void cmovnb(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 -void cmovnbe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 7); }//-V524 -void cmovnc(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 3); }//-V524 -void cmovne(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 5); }//-V524 -void cmovng(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 14); }//-V524 -void cmovnge(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 12); }//-V524 -void cmovnl(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 13); }//-V524 -void cmovnle(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 15); }//-V524 -void cmovno(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 1); }//-V524 -void cmovnp(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 11); }//-V524 -void cmovns(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 9); }//-V524 -void cmovnz(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 5); }//-V524 -void cmovo(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 0); }//-V524 -void cmovp(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 10); }//-V524 -void cmovpe(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 10); }//-V524 -void cmovpo(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 11); }//-V524 -void cmovs(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 8); }//-V524 -void cmovz(const Reg& reg, const Operand& op) { opModRM(reg, op, op.isREG(16 | i32e), op.isMEM(), 0x0F, 0x40 | 4); }//-V524 -void cmp(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x38, 7); } -void cmp(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x38); } -void cmpeqpd(const Xmm& x, const Operand& op) { cmppd(x, op, 0); } -void cmpeqps(const Xmm& x, const Operand& op) { cmpps(x, op, 0); } -void cmpeqsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 0); } -void cmpeqss(const Xmm& x, const Operand& op) { cmpss(x, op, 0); } -void cmplepd(const Xmm& x, const Operand& op) { cmppd(x, op, 2); } -void cmpleps(const Xmm& x, const Operand& op) { cmpps(x, op, 2); } -void cmplesd(const Xmm& x, const Operand& op) { cmpsd(x, op, 2); } -void cmpless(const Xmm& x, const Operand& op) { cmpss(x, op, 2); } -void cmpltpd(const Xmm& x, const Operand& op) { cmppd(x, op, 1); } -void cmpltps(const Xmm& x, const Operand& op) { cmpps(x, op, 1); } -void cmpltsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 1); } -void cmpltss(const Xmm& x, const Operand& op) { cmpss(x, op, 1); } -void cmpneqpd(const Xmm& x, const Operand& op) { cmppd(x, op, 4); } -void cmpneqps(const Xmm& x, const Operand& op) { cmpps(x, op, 4); } -void cmpneqsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 4); } -void cmpneqss(const Xmm& x, const Operand& op) { cmpss(x, op, 4); } -void cmpnlepd(const Xmm& x, const Operand& op) { cmppd(x, op, 6); } -void cmpnleps(const Xmm& x, const Operand& op) { cmpps(x, op, 6); } -void cmpnlesd(const Xmm& x, const Operand& op) { cmpsd(x, op, 6); } -void cmpnless(const Xmm& x, const Operand& op) { cmpss(x, op, 6); } -void cmpnltpd(const Xmm& x, const Operand& op) { cmppd(x, op, 5); } -void cmpnltps(const Xmm& x, const Operand& op) { cmpps(x, op, 5); } -void cmpnltsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 5); } -void cmpnltss(const Xmm& x, const Operand& op) { cmpss(x, op, 5); } -void cmpordpd(const Xmm& x, const Operand& op) { cmppd(x, op, 7); } -void cmpordps(const Xmm& x, const Operand& op) { cmpps(x, op, 7); } -void cmpordsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 7); } -void cmpordss(const Xmm& x, const Operand& op) { cmpss(x, op, 7); } -void cmppd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0x66, isXMM_XMMorMEM, imm8); } -void cmpps(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0x100, isXMM_XMMorMEM, imm8); } -void cmpsb() { db(0xA6); } -void cmpsd() { db(0xA7); } -void cmpsd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0xF2, isXMM_XMMorMEM, imm8); } -void cmpss(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC2, 0xF3, isXMM_XMMorMEM, imm8); } -void cmpsw() { db(0x66); db(0xA7); } -void cmpunordpd(const Xmm& x, const Operand& op) { cmppd(x, op, 3); } -void cmpunordps(const Xmm& x, const Operand& op) { cmpps(x, op, 3); } -void cmpunordsd(const Xmm& x, const Operand& op) { cmpsd(x, op, 3); } -void cmpunordss(const Xmm& x, const Operand& op) { cmpss(x, op, 3); } -void cmpxchg(const Operand& op, const Reg& reg) { opModRM(reg, op, (op.isREG() && reg.isREG() && op.getBit() == reg.getBit()), op.isMEM(), 0x0F, 0xB0 | (reg.isBit(8) ? 0 : 1)); } -void cmpxchg8b(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0xC7); } -void comisd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2F, 0x66, isXMM_XMMorMEM); } -void comiss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2F, 0x100, isXMM_XMMorMEM); } -void cpuid() { db(0x0F); db(0xA2); } -void crc32(const Reg32e& reg, const Operand& op) { if (reg.isBit(32) && op.isBit(16)) db(0x66); db(0xF2); opModRM(reg, op, op.isREG(), op.isMEM(), 0x0F, 0x38, 0xF0 | (op.isBit(8) ? 0 : 1)); } -void cvtdq2pd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0xF3, isXMM_XMMorMEM); } -void cvtdq2ps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0x100, isXMM_XMMorMEM); } -void cvtpd2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0xF2, isXMM_XMMorMEM); } -void cvtpd2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0x66, isMMX_XMMorMEM); } -void cvtpd2ps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0x66, isXMM_XMMorMEM); } -void cvtpi2pd(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0x66, isXMM_MMXorMEM); } -void cvtpi2ps(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0x100, isXMM_MMXorMEM); } -void cvtps2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0x66, isXMM_XMMorMEM); } -void cvtps2pd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0x100, isXMM_XMMorMEM); } -void cvtps2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0x100, isMMX_XMMorMEM); } -void cvtsd2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0xF2, isREG32_XMMorMEM); } -void cvtsd2ss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0xF2, isXMM_XMMorMEM); } -void cvtsi2sd(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0xF2, isXMM_REG32orMEM); } -void cvtsi2ss(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2A, 0xF3, isXMM_REG32orMEM); } -void cvtss2sd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5A, 0xF3, isXMM_XMMorMEM); } -void cvtss2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2D, 0xF3, isREG32_XMMorMEM); } -void cvttpd2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xE6, 0x66, isXMM_XMMorMEM); } -void cvttpd2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0x66, isMMX_XMMorMEM); } -void cvttps2dq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5B, 0xF3, isXMM_XMMorMEM); } -void cvttps2pi(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0x100, isMMX_XMMorMEM); } -void cvttsd2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0xF2, isREG32_XMMorMEM); } -void cvttss2si(const Operand& reg, const Operand& op) { opGen(reg, op, 0x2C, 0xF3, isREG32_XMMorMEM); } -void cwd() { db(0x66); db(0x99); } -void cwde() { db(0x98); } -void dec(const Operand& op) { opIncDec(op, 0x48, 1); } -void div(const Operand& op) { opR_ModM(op, 0, 6, 0xF6); } -void divpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0x66, isXMM_XMMorMEM); } -void divps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0x100, isXMM_XMMorMEM); } -void divsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0xF2, isXMM_XMMorMEM); } -void divss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5E, 0xF3, isXMM_XMMorMEM); } -void dppd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x41, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void dpps(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x40, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void emms() { db(0x0F); db(0x77); } -void extractps(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x17, imm); } -void f2xm1() { db(0xD9); db(0xF0); } -void fabs() { db(0xD9); db(0xE1); } -void fadd(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 0, 0); } -void fadd(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8C0, 0xDCC0); } -void fadd(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8C0, 0xDCC0); } -void faddp() { db(0xDE); db(0xC1); } -void faddp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEC0); } -void faddp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEC0); } -void fchs() { db(0xD9); db(0xE0); } -void fcmovb(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAC0, 0x00C0); } -void fcmovb(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAC0, 0x00C0); } -void fcmovbe(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAD0, 0x00D0); } -void fcmovbe(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAD0, 0x00D0); } -void fcmove(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAC8, 0x00C8); } -void fcmove(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAC8, 0x00C8); } -void fcmovnb(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBC0, 0x00C0); } -void fcmovnb(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBC0, 0x00C0); } -void fcmovnbe(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBD0, 0x00D0); } -void fcmovnbe(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBD0, 0x00D0); } -void fcmovne(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBC8, 0x00C8); } -void fcmovne(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBC8, 0x00C8); } -void fcmovnu(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBD8, 0x00D8); } -void fcmovnu(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBD8, 0x00D8); } -void fcmovu(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDAD8, 0x00D8); } -void fcmovu(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDAD8, 0x00D8); } -void fcom() { db(0xD8); db(0xD1); } -void fcom(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 2, 0); } -void fcom(const Fpu& reg) { opFpu(reg, 0xD8, 0xD0); } -void fcomi(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBF0, 0x00F0); } -void fcomi(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBF0, 0x00F0); } -void fcomip(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDFF0, 0x00F0); } -void fcomip(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDFF0, 0x00F0); } -void fcomp() { db(0xD8); db(0xD9); } -void fcomp(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 3, 0); } -void fcomp(const Fpu& reg) { opFpu(reg, 0xD8, 0xD8); } -void fcompp() { db(0xDE); db(0xD9); } -void fcos() { db(0xD9); db(0xFF); } -void fdecstp() { db(0xD9); db(0xF6); } -void fdiv(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 6, 0); } -void fdiv(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8F0, 0xDCF8); } -void fdiv(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8F0, 0xDCF8); } -void fdivp() { db(0xDE); db(0xF9); } -void fdivp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEF8); } -void fdivp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEF8); } -void fdivr(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 7, 0); } -void fdivr(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8F8, 0xDCF0); } -void fdivr(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8F8, 0xDCF0); } -void fdivrp() { db(0xDE); db(0xF1); } -void fdivrp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEF0); } -void fdivrp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEF0); } -void ffree(const Fpu& reg) { opFpu(reg, 0xDD, 0xC0); } -void fiadd(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 0, 0); } -void ficom(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 2, 0); } -void ficomp(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 3, 0); } -void fidiv(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 6, 0); } -void fidivr(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 7, 0); } -void fild(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDF, 0, 5); } -void fimul(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 1, 0); } -void fincstp() { db(0xD9); db(0xF7); } -void finit() { db(0x9B); db(0xDB); db(0xE3); } -void fist(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0x00, 2, 0); } -void fistp(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDF, 3, 7); } -void fisttp(const Address& addr) { opFpuMem(addr, 0xDF, 0xDB, 0xDD, 1, 0); } -void fisub(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 4, 0); } -void fisubr(const Address& addr) { opFpuMem(addr, 0xDE, 0xDA, 0x00, 5, 0); } -void fld(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 0, 0); } -void fld(const Fpu& reg) { opFpu(reg, 0xD9, 0xC0); } -void fld1() { db(0xD9); db(0xE8); } -void fldcw(const Address& addr) { opModM(addr, Reg32(5), 0xD9, 0x100); } -void fldl2e() { db(0xD9); db(0xEA); } -void fldl2t() { db(0xD9); db(0xE9); } -void fldlg2() { db(0xD9); db(0xEC); } -void fldln2() { db(0xD9); db(0xED); } -void fldpi() { db(0xD9); db(0xEB); } -void fldz() { db(0xD9); db(0xEE); } -void fmul(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 1, 0); } -void fmul(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8C8, 0xDCC8); } -void fmul(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8C8, 0xDCC8); } -void fmulp() { db(0xDE); db(0xC9); } -void fmulp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEC8); } -void fmulp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEC8); } -void fninit() { db(0xDB); db(0xE3); } -void fnop() { db(0xD9); db(0xD0); } -void fpatan() { db(0xD9); db(0xF3); } -void fprem() { db(0xD9); db(0xF8); } -void fprem1() { db(0xD9); db(0xF5); } -void fptan() { db(0xD9); db(0xF2); } -void frndint() { db(0xD9); db(0xFC); } -void fscale() { db(0xD9); db(0xFD); } -void fsin() { db(0xD9); db(0xFE); } -void fsincos() { db(0xD9); db(0xFB); } -void fsqrt() { db(0xD9); db(0xFA); } -void fst(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 2, 0); } -void fst(const Fpu& reg) { opFpu(reg, 0xDD, 0xD0); } -void fstcw(const Address& addr) { db(0x9B); opModM(addr, Reg32(7), 0xD9, NONE); } -void fstp(const Address& addr) { opFpuMem(addr, 0x00, 0xD9, 0xDD, 3, 0); } -void fstp(const Fpu& reg) { opFpu(reg, 0xDD, 0xD8); } -void fsub(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 4, 0); } -void fsub(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8E0, 0xDCE8); } -void fsub(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8E0, 0xDCE8); } -void fsubp() { db(0xDE); db(0xE9); } -void fsubp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEE8); } -void fsubp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEE8); } -void fsubr(const Address& addr) { opFpuMem(addr, 0x00, 0xD8, 0xDC, 5, 0); } -void fsubr(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xD8E8, 0xDCE0); } -void fsubr(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xD8E8, 0xDCE0); } -void fsubrp() { db(0xDE); db(0xE1); } -void fsubrp(const Fpu& reg1) { opFpuFpu(reg1, st0, 0x0000, 0xDEE0); } -void fsubrp(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0x0000, 0xDEE0); } -void ftst() { db(0xD9); db(0xE4); } -void fucom() { db(0xDD); db(0xE1); } -void fucom(const Fpu& reg) { opFpu(reg, 0xDD, 0xE0); } -void fucomi(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDBE8, 0x00E8); } -void fucomi(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDBE8, 0x00E8); } -void fucomip(const Fpu& reg1) { opFpuFpu(st0, reg1, 0xDFE8, 0x00E8); } -void fucomip(const Fpu& reg1, const Fpu& reg2) { opFpuFpu(reg1, reg2, 0xDFE8, 0x00E8); } -void fucomp() { db(0xDD); db(0xE9); } -void fucomp(const Fpu& reg) { opFpu(reg, 0xDD, 0xE8); } -void fucompp() { db(0xDA); db(0xE9); } -void fwait() { db(0x9B); } -void fxam() { db(0xD9); db(0xE5); } -void fxch() { db(0xD9); db(0xC9); } -void fxch(const Fpu& reg) { opFpu(reg, 0xD9, 0xC8); } -void fxtract() { db(0xD9); db(0xF4); } -void fyl2x() { db(0xD9); db(0xF1); } -void fyl2xp1() { db(0xD9); db(0xF9); } -void gf2p8affineinvqb(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0xCF, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void gf2p8affineqb(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0xCE, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void gf2p8mulb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCF, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void haddpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7C, 0x66, isXMM_XMMorMEM); } -void haddps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7C, 0xF2, isXMM_XMMorMEM); } -void hsubpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7D, 0x66, isXMM_XMMorMEM); } -void hsubps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x7D, 0xF2, isXMM_XMMorMEM); } -void idiv(const Operand& op) { opR_ModM(op, 0, 7, 0xF6); } -void imul(const Operand& op) { opR_ModM(op, 0, 5, 0xF6); } -void inc(const Operand& op) { opIncDec(op, 0x40, 0); } -void insertps(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x21, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void ja(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 -void ja(const char *label, LabelType type = T_AUTO) { ja(std::string(label), type); }//-V524 -void ja(const void *addr) { opJmpAbs(addr, T_NEAR, 0x77, 0x87, 0x0F); }//-V524 -void ja(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 -void jae(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jae(const char *label, LabelType type = T_AUTO) { jae(std::string(label), type); }//-V524 -void jae(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 -void jae(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jb(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void jb(const char *label, LabelType type = T_AUTO) { jb(std::string(label), type); }//-V524 -void jb(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 -void jb(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void jbe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 -void jbe(const char *label, LabelType type = T_AUTO) { jbe(std::string(label), type); }//-V524 -void jbe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x76, 0x86, 0x0F); }//-V524 -void jbe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 -void jc(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void jc(const char *label, LabelType type = T_AUTO) { jc(std::string(label), type); }//-V524 -void jc(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 -void jc(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void je(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 -void je(const char *label, LabelType type = T_AUTO) { je(std::string(label), type); }//-V524 -void je(const void *addr) { opJmpAbs(addr, T_NEAR, 0x74, 0x84, 0x0F); }//-V524 -void je(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 -void jg(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 -void jg(const char *label, LabelType type = T_AUTO) { jg(std::string(label), type); }//-V524 -void jg(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7F, 0x8F, 0x0F); }//-V524 -void jg(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 -void jge(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 -void jge(const char *label, LabelType type = T_AUTO) { jge(std::string(label), type); }//-V524 -void jge(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7D, 0x8D, 0x0F); }//-V524 -void jge(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 -void jl(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 -void jl(const char *label, LabelType type = T_AUTO) { jl(std::string(label), type); }//-V524 -void jl(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7C, 0x8C, 0x0F); }//-V524 -void jl(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 -void jle(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 -void jle(const char *label, LabelType type = T_AUTO) { jle(std::string(label), type); }//-V524 -void jle(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7E, 0x8E, 0x0F); }//-V524 -void jle(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 -void jna(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 -void jna(const char *label, LabelType type = T_AUTO) { jna(std::string(label), type); }//-V524 -void jna(const void *addr) { opJmpAbs(addr, T_NEAR, 0x76, 0x86, 0x0F); }//-V524 -void jna(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x76, 0x86, 0x0F); }//-V524 -void jnae(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void jnae(const char *label, LabelType type = T_AUTO) { jnae(std::string(label), type); }//-V524 -void jnae(const void *addr) { opJmpAbs(addr, T_NEAR, 0x72, 0x82, 0x0F); }//-V524 -void jnae(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x72, 0x82, 0x0F); }//-V524 -void jnb(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jnb(const char *label, LabelType type = T_AUTO) { jnb(std::string(label), type); }//-V524 -void jnb(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 -void jnb(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jnbe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 -void jnbe(const char *label, LabelType type = T_AUTO) { jnbe(std::string(label), type); }//-V524 -void jnbe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x77, 0x87, 0x0F); }//-V524 -void jnbe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x77, 0x87, 0x0F); }//-V524 -void jnc(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jnc(const char *label, LabelType type = T_AUTO) { jnc(std::string(label), type); }//-V524 -void jnc(const void *addr) { opJmpAbs(addr, T_NEAR, 0x73, 0x83, 0x0F); }//-V524 -void jnc(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x73, 0x83, 0x0F); }//-V524 -void jne(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 -void jne(const char *label, LabelType type = T_AUTO) { jne(std::string(label), type); }//-V524 -void jne(const void *addr) { opJmpAbs(addr, T_NEAR, 0x75, 0x85, 0x0F); }//-V524 -void jne(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 -void jng(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 -void jng(const char *label, LabelType type = T_AUTO) { jng(std::string(label), type); }//-V524 -void jng(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7E, 0x8E, 0x0F); }//-V524 -void jng(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7E, 0x8E, 0x0F); }//-V524 -void jnge(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 -void jnge(const char *label, LabelType type = T_AUTO) { jnge(std::string(label), type); }//-V524 -void jnge(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7C, 0x8C, 0x0F); }//-V524 -void jnge(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7C, 0x8C, 0x0F); }//-V524 -void jnl(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 -void jnl(const char *label, LabelType type = T_AUTO) { jnl(std::string(label), type); }//-V524 -void jnl(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7D, 0x8D, 0x0F); }//-V524 -void jnl(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7D, 0x8D, 0x0F); }//-V524 -void jnle(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 -void jnle(const char *label, LabelType type = T_AUTO) { jnle(std::string(label), type); }//-V524 -void jnle(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7F, 0x8F, 0x0F); }//-V524 -void jnle(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7F, 0x8F, 0x0F); }//-V524 -void jno(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x71, 0x81, 0x0F); }//-V524 -void jno(const char *label, LabelType type = T_AUTO) { jno(std::string(label), type); }//-V524 -void jno(const void *addr) { opJmpAbs(addr, T_NEAR, 0x71, 0x81, 0x0F); }//-V524 -void jno(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x71, 0x81, 0x0F); }//-V524 -void jnp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 -void jnp(const char *label, LabelType type = T_AUTO) { jnp(std::string(label), type); }//-V524 -void jnp(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7B, 0x8B, 0x0F); }//-V524 -void jnp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 -void jns(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x79, 0x89, 0x0F); }//-V524 -void jns(const char *label, LabelType type = T_AUTO) { jns(std::string(label), type); }//-V524 -void jns(const void *addr) { opJmpAbs(addr, T_NEAR, 0x79, 0x89, 0x0F); }//-V524 -void jns(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x79, 0x89, 0x0F); }//-V524 -void jnz(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 -void jnz(const char *label, LabelType type = T_AUTO) { jnz(std::string(label), type); }//-V524 -void jnz(const void *addr) { opJmpAbs(addr, T_NEAR, 0x75, 0x85, 0x0F); }//-V524 -void jnz(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x75, 0x85, 0x0F); }//-V524 -void jo(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x70, 0x80, 0x0F); }//-V524 -void jo(const char *label, LabelType type = T_AUTO) { jo(std::string(label), type); }//-V524 -void jo(const void *addr) { opJmpAbs(addr, T_NEAR, 0x70, 0x80, 0x0F); }//-V524 -void jo(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x70, 0x80, 0x0F); }//-V524 -void jp(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 -void jp(const char *label, LabelType type = T_AUTO) { jp(std::string(label), type); }//-V524 -void jp(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7A, 0x8A, 0x0F); }//-V524 -void jp(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 -void jpe(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 -void jpe(const char *label, LabelType type = T_AUTO) { jpe(std::string(label), type); }//-V524 -void jpe(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7A, 0x8A, 0x0F); }//-V524 -void jpe(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7A, 0x8A, 0x0F); }//-V524 -void jpo(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 -void jpo(const char *label, LabelType type = T_AUTO) { jpo(std::string(label), type); }//-V524 -void jpo(const void *addr) { opJmpAbs(addr, T_NEAR, 0x7B, 0x8B, 0x0F); }//-V524 -void jpo(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x7B, 0x8B, 0x0F); }//-V524 -void js(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x78, 0x88, 0x0F); }//-V524 -void js(const char *label, LabelType type = T_AUTO) { js(std::string(label), type); }//-V524 -void js(const void *addr) { opJmpAbs(addr, T_NEAR, 0x78, 0x88, 0x0F); }//-V524 -void js(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x78, 0x88, 0x0F); }//-V524 -void jz(const Label& label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 -void jz(const char *label, LabelType type = T_AUTO) { jz(std::string(label), type); }//-V524 -void jz(const void *addr) { opJmpAbs(addr, T_NEAR, 0x74, 0x84, 0x0F); }//-V524 -void jz(std::string label, LabelType type = T_AUTO) { opJmp(label, type, 0x74, 0x84, 0x0F); }//-V524 -void lahf() { db(0x9F); } -void lddqu(const Xmm& xmm, const Address& addr) { db(0xF2); opModM(addr, xmm, 0x0F, 0xF0); } -void ldmxcsr(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0xAE); } -void lea(const Reg& reg, const Address& addr) { if (!reg.isBit(16 | i32e)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModM(addr, reg, 0x8D); } -void lfence() { db(0x0F); db(0xAE); db(0xE8); } -void lock() { db(0xF0); } -void lzcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xBD); } -void maskmovdqu(const Xmm& reg1, const Xmm& reg2) { db(0x66); opModR(reg1, reg2, 0x0F, 0xF7); } -void maskmovq(const Mmx& reg1, const Mmx& reg2) { if (!reg1.isMMX() || !reg2.isMMX()) throw Error(ERR_BAD_COMBINATION); opModR(reg1, reg2, 0x0F, 0xF7); } -void maxpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0x66, isXMM_XMMorMEM); } -void maxps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0x100, isXMM_XMMorMEM); } -void maxsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0xF2, isXMM_XMMorMEM); } -void maxss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5F, 0xF3, isXMM_XMMorMEM); } -void mfence() { db(0x0F); db(0xAE); db(0xF0); } -void minpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0x66, isXMM_XMMorMEM); } -void minps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0x100, isXMM_XMMorMEM); } -void minsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0xF2, isXMM_XMMorMEM); } -void minss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5D, 0xF3, isXMM_XMMorMEM); } -void monitor() { db(0x0F); db(0x01); db(0xC8); } -void movapd(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x29); } -void movapd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x28, 0x66); } -void movaps(const Address& addr, const Xmm& xmm) { opModM(addr, xmm, 0x0F, 0x29); } -void movaps(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x28, 0x100); } -void movbe(const Address& addr, const Reg& reg) { opModM(addr, reg, 0x0F, 0x38, 0xF1); } -void movbe(const Reg& reg, const Address& addr) { opModM(addr, reg, 0x0F, 0x38, 0xF0); } -void movd(const Address& addr, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, 0x7E); } -void movd(const Mmx& mmx, const Address& addr) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, 0x6E); } -void movd(const Mmx& mmx, const Reg32& reg) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x6E); } -void movd(const Reg32& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x7E); } -void movddup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x12, 0xF2, isXMM_XMMorMEM, NONE, NONE); } -void movdq2q(const Mmx& mmx, const Xmm& xmm) { db(0xF2); opModR(mmx, xmm, 0x0F, 0xD6); } -void movdqa(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x7F); } -void movdqa(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x6F, 0x66); } -void movdqu(const Address& addr, const Xmm& xmm) { db(0xF3); opModM(addr, xmm, 0x0F, 0x7F); } -void movdqu(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x6F, 0xF3); } -void movhlps(const Xmm& reg1, const Xmm& reg2) { opModR(reg1, reg2, 0x0F, 0x12); } -void movhpd(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x16, 0x66); } -void movhps(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x16, 0x100); } -void movlhps(const Xmm& reg1, const Xmm& reg2) { opModR(reg1, reg2, 0x0F, 0x16); } -void movlpd(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x12, 0x66); } -void movlps(const Operand& op1, const Operand& op2) { opMovXMM(op1, op2, 0x12, 0x100); } -void movmskpd(const Reg32e& reg, const Xmm& xmm) { db(0x66); movmskps(reg, xmm); } -void movmskps(const Reg32e& reg, const Xmm& xmm) { opModR(reg, xmm, 0x0F, 0x50); } -void movntdq(const Address& addr, const Xmm& reg) { opModM(addr, Reg16(reg.getIdx()), 0x0F, 0xE7); } -void movntdqa(const Xmm& xmm, const Address& addr) { db(0x66); opModM(addr, xmm, 0x0F, 0x38, 0x2A); } -void movnti(const Address& addr, const Reg32e& reg) { opModM(addr, reg, 0x0F, 0xC3); } -void movntpd(const Address& addr, const Xmm& reg) { opModM(addr, Reg16(reg.getIdx()), 0x0F, 0x2B); } -void movntps(const Address& addr, const Xmm& xmm) { opModM(addr, Mmx(xmm.getIdx()), 0x0F, 0x2B); } -void movntq(const Address& addr, const Mmx& mmx) { if (!mmx.isMMX()) throw Error(ERR_BAD_COMBINATION); opModM(addr, mmx, 0x0F, 0xE7); } -void movq(const Address& addr, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModM(addr, mmx, 0x0F, mmx.isXMM() ? 0xD6 : 0x7F); } -void movq(const Mmx& mmx, const Operand& op) { if (mmx.isXMM()) db(0xF3); opModRM(mmx, op, (mmx.getKind() == op.getKind()), op.isMEM(), 0x0F, mmx.isXMM() ? 0x7E : 0x6F); } -void movq2dq(const Xmm& xmm, const Mmx& mmx) { db(0xF3); opModR(xmm, mmx, 0x0F, 0xD6); } -void movsb() { db(0xA4); } -void movsd() { db(0xA5); } -void movsd(const Address& addr, const Xmm& xmm) { db(0xF2); opModM(addr, xmm, 0x0F, 0x11); } -void movsd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0xF2); } -void movshdup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x16, 0xF3, isXMM_XMMorMEM, NONE, NONE); } -void movsldup(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x12, 0xF3, isXMM_XMMorMEM, NONE, NONE); } -void movss(const Address& addr, const Xmm& xmm) { db(0xF3); opModM(addr, xmm, 0x0F, 0x11); } -void movss(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0xF3); } -void movsw() { db(0x66); db(0xA5); } -void movsx(const Reg& reg, const Operand& op) { opMovxx(reg, op, 0xBE); } -void movupd(const Address& addr, const Xmm& xmm) { db(0x66); opModM(addr, xmm, 0x0F, 0x11); } -void movupd(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0x66); } -void movups(const Address& addr, const Xmm& xmm) { opModM(addr, xmm, 0x0F, 0x11); } -void movups(const Xmm& xmm, const Operand& op) { opMMX(xmm, op, 0x10, 0x100); } -void movzx(const Reg& reg, const Operand& op) { opMovxx(reg, op, 0xB6); } -void mpsadbw(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x42, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void mul(const Operand& op) { opR_ModM(op, 0, 4, 0xF6); } -void mulpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0x66, isXMM_XMMorMEM); } -void mulps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0x100, isXMM_XMMorMEM); } -void mulsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0xF2, isXMM_XMMorMEM); } -void mulss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x59, 0xF3, isXMM_XMMorMEM); } -void mulx(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F2 | T_0F38, 0xf6, true); } -void mwait() { db(0x0F); db(0x01); db(0xC9); } -void neg(const Operand& op) { opR_ModM(op, 0, 3, 0xF6); } -void not_(const Operand& op) { opR_ModM(op, 0, 2, 0xF6); } -void or_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x08, 1); } -void or_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x08); } -void orpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x56, 0x66, isXMM_XMMorMEM); } -void orps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x56, 0x100, isXMM_XMMorMEM); } -void pabsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1C, 0x66, NONE, 0x38); } -void pabsd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1E, 0x66, NONE, 0x38); } -void pabsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x1D, 0x66, NONE, 0x38); } -void packssdw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x6B); } -void packsswb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x63); } -void packusdw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2B, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void packuswb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x67); } -void paddb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFC); } -void paddd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFE); } -void paddq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD4); } -void paddsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEC); } -void paddsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xED); } -void paddusb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDC); } -void paddusw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDD); } -void paddw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFD); } -void palignr(const Mmx& mmx, const Operand& op, int imm) { opMMX(mmx, op, 0x0f, 0x66, static_cast(imm), 0x3a); } -void pand(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDB); } -void pandn(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDF); } -void pause() { db(0xF3); db(0x90); } -void pavgb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE0); } -void pavgw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE3); } -void pblendvb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x10, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pblendw(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0E, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void pclmulhqhdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x11); } -void pclmulhqlqdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x01); } -void pclmullqhdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x10); } -void pclmullqlqdq(const Xmm& xmm, const Operand& op) { pclmulqdq(xmm, op, 0x00); } -void pclmulqdq(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x44, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void pcmpeqb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x74); } -void pcmpeqd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x76); } -void pcmpeqq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x29, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pcmpeqw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x75); } -void pcmpestri(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x61, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void pcmpestrm(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x60, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void pcmpgtb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x64); } -void pcmpgtd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x66); } -void pcmpgtq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x37, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pcmpgtw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x65); } -void pcmpistri(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x63, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void pcmpistrm(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x62, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void pdep(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F2 | T_0F38, 0xf5, true); } -void pext(const Reg32e& r1, const Reg32e& r2, const Operand& op) { opGpr(r1, r2, op, T_F3 | T_0F38, 0xf5, true); } -void pextrb(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x14, imm); } -void pextrd(const Operand& op, const Xmm& xmm, uint8 imm) { opExt(op, xmm, 0x16, imm); } -void pextrw(const Operand& op, const Mmx& xmm, uint8 imm) { opExt(op, xmm, 0x15, imm, true); } -void phaddd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x02, 0x66, NONE, 0x38); } -void phaddsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x03, 0x66, NONE, 0x38); } -void phaddw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x01, 0x66, NONE, 0x38); } -void phminposuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x41, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void phsubd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x06, 0x66, NONE, 0x38); } -void phsubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x07, 0x66, NONE, 0x38); } -void phsubw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x05, 0x66, NONE, 0x38); } -void pinsrb(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x20, 0x66, isXMM_REG32orMEM, imm, 0x3A); } -void pinsrd(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x22, 0x66, isXMM_REG32orMEM, imm, 0x3A); } -void pinsrw(const Mmx& mmx, const Operand& op, int imm) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(mmx, op, 0xC4, mmx.isXMM() ? 0x66 : NONE, 0, imm); } -void pmaddubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x04, 0x66, NONE, 0x38); } -void pmaddwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF5); } -void pmaxsb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3C, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmaxsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3D, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmaxsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEE); } -void pmaxub(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDE); } -void pmaxud(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3F, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmaxuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3E, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pminsb(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x38, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pminsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x39, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pminsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEA); } -void pminub(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xDA); } -void pminud(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3B, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pminuw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x3A, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovmskb(const Reg32e& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(reg, mmx, 0x0F, 0xD7); } -void pmovsxbd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x21, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovsxbq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x22, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovsxbw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x20, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovsxdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x25, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovsxwd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x23, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovsxwq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x24, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxbd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x31, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxbq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x32, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxbw(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x30, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x35, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxwd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x33, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmovzxwq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x34, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmuldq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x28, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmulhrsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x0B, 0x66, NONE, 0x38); } -void pmulhuw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE4); } -void pmulhw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE5); } -void pmulld(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x40, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void pmullw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD5); } -void pmuludq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF4); } -void popcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xB8); } -void popf() { db(0x9D); } -void por(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEB); } -void prefetchnta(const Address& addr) { opModM(addr, Reg32(0), 0x0F, 0x18); } -void prefetcht0(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0x18); } -void prefetcht1(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0x18); } -void prefetcht2(const Address& addr) { opModM(addr, Reg32(3), 0x0F, 0x18); } -void prefetchw(const Address& addr) { opModM(addr, Reg32(1), 0x0F, 0x0D); } -void prefetchwt1(const Address& addr) { opModM(addr, Reg32(2), 0x0F, 0x0D); } -void psadbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF6); } -void pshufb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x00, 0x66, NONE, 0x38); } -void pshufd(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0x66, imm8); } -void pshufhw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0xF3, imm8); } -void pshuflw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0xF2, imm8); } -void pshufw(const Mmx& mmx, const Operand& op, uint8 imm8) { opMMX(mmx, op, 0x70, 0x00, imm8); } -void psignb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x08, 0x66, NONE, 0x38); } -void psignd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x0A, 0x66, NONE, 0x38); } -void psignw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x09, 0x66, NONE, 0x38); } -void pslld(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF2); } -void pslld(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 6); } -void pslldq(const Xmm& xmm, int imm8) { opMMX_IMM(xmm, imm8, 0x73, 7); } -void psllq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF3); } -void psllq(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x73, 6); } -void psllw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF1); } -void psllw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 6); } -void psrad(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE2); } -void psrad(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 4); } -void psraw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE1); } -void psraw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 4); } -void psrld(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD2); } -void psrld(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x72, 2); } -void psrldq(const Xmm& xmm, int imm8) { opMMX_IMM(xmm, imm8, 0x73, 3); } -void psrlq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD3); } -void psrlq(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x73, 2); } -void psrlw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD1); } -void psrlw(const Mmx& mmx, int imm8) { opMMX_IMM(mmx, imm8, 0x71, 2); } -void psubb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF8); } -void psubd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFA); } -void psubq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xFB); } -void psubsb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE8); } -void psubsw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xE9); } -void psubusb(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD8); } -void psubusw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xD9); } -void psubw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xF9); } -void ptest(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x17, 0x66, isXMM_XMMorMEM, NONE, 0x38); } -void punpckhbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x68); } -void punpckhdq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x6A); } -void punpckhqdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x6D, 0x66, isXMM_XMMorMEM); } -void punpckhwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x69); } -void punpcklbw(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x60); } -void punpckldq(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x62); } -void punpcklqdq(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x6C, 0x66, isXMM_XMMorMEM); } -void punpcklwd(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0x61); } -void pushf() { db(0x9C); } -void pxor(const Mmx& mmx, const Operand& op) { opMMX(mmx, op, 0xEF); } -void rcl(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 2); } -void rcl(const Operand& op, int imm) { opShift(op, imm, 2); } -void rcpps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x53, 0x100, isXMM_XMMorMEM); } -void rcpss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x53, 0xF3, isXMM_XMMorMEM); } -void rcr(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 3); } -void rcr(const Operand& op, int imm) { opShift(op, imm, 3); } -void rdmsr() { db(0x0F); db(0x32); } -void rdpmc() { db(0x0F); db(0x33); } -void rdrand(const Reg& r) { if (r.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModR(Reg(6, Operand::REG, r.getBit()), r, 0x0F, 0xC7); } -void rdseed(const Reg& r) { if (r.isBit(8)) throw Error(ERR_BAD_SIZE_OF_REGISTER); opModR(Reg(7, Operand::REG, r.getBit()), r, 0x0F, 0xC7); } -void rdtsc() { db(0x0F); db(0x31); } -void rdtscp() { db(0x0F); db(0x01); db(0xF9); } -void rep() { db(0xF3); } -void ret(int imm = 0) { if (imm) { db(0xC2); dw(imm); } else { db(0xC3); } } -void rol(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 0); } -void rol(const Operand& op, int imm) { opShift(op, imm, 0); } -void ror(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 1); } -void ror(const Operand& op, int imm) { opShift(op, imm, 1); } -void rorx(const Reg32e& r, const Operand& op, uint8 imm) { opGpr(r, op, Reg32e(0, r.getBit()), T_0F3A | T_F2, 0xF0, false, imm); } -void roundpd(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x09, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void roundps(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0x08, 0x66, isXMM_XMMorMEM, imm, 0x3A); } -void roundsd(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0B, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void roundss(const Xmm& xmm, const Operand& op, int imm) { opGen(xmm, op, 0x0A, 0x66, isXMM_XMMorMEM, static_cast(imm), 0x3A); } -void rsqrtps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x52, 0x100, isXMM_XMMorMEM); } -void rsqrtss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x52, 0xF3, isXMM_XMMorMEM); } -void sahf() { db(0x9E); } -void sal(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 4); } -void sal(const Operand& op, int imm) { opShift(op, imm, 4); } -void sar(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 7); } -void sar(const Operand& op, int imm) { opShift(op, imm, 7); } -void sarx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_F3 | T_0F38, 0xf7, false); } -void sbb(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x18, 3); } -void sbb(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x18); } -void scasb() { db(0xAE); } -void scasd() { db(0xAF); } -void scasw() { db(0x66); db(0xAF); } -void seta(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 7); }//-V524 -void setae(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 -void setb(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 -void setbe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 6); }//-V524 -void setc(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 -void sete(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 4); }//-V524 -void setg(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 15); }//-V524 -void setge(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 13); }//-V524 -void setl(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 12); }//-V524 -void setle(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 14); }//-V524 -void setna(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 6); }//-V524 -void setnae(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 2); }//-V524 -void setnb(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 -void setnbe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 7); }//-V524 -void setnc(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 3); }//-V524 -void setne(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 5); }//-V524 -void setng(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 14); }//-V524 -void setnge(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 12); }//-V524 -void setnl(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 13); }//-V524 -void setnle(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 15); }//-V524 -void setno(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 1); }//-V524 -void setnp(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 11); }//-V524 -void setns(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 9); }//-V524 -void setnz(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 5); }//-V524 -void seto(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 0); }//-V524 -void setp(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 10); }//-V524 -void setpe(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 10); }//-V524 -void setpo(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 11); }//-V524 -void sets(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 8); }//-V524 -void setz(const Operand& op) { opR_ModM(op, 8, 0, 0x0F, 0x90 | 4); }//-V524 -void sfence() { db(0x0F); db(0xAE); db(0xF8); } -void sha1msg1(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xC9, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void sha1msg2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCA, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void sha1nexte(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xC8, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void sha1rnds4(const Xmm& xmm, const Operand& op, uint8 imm) { opGen(xmm, op, 0xCC, NONE, isXMM_XMMorMEM, imm, 0x3A); } -void sha256msg1(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCC, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void sha256msg2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCD, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void sha256rnds2(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0xCB, NONE, isXMM_XMMorMEM, NONE, 0x38); } -void shl(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 4); } -void shl(const Operand& op, int imm) { opShift(op, imm, 4); } -void shld(const Operand& op, const Reg& reg, const Reg8& _cl) { opShxd(op, reg, 0, 0xA4, &_cl); } -void shld(const Operand& op, const Reg& reg, uint8 imm) { opShxd(op, reg, imm, 0xA4); } -void shlx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_66 | T_0F38, 0xf7, false); } -void shr(const Operand& op, const Reg8& _cl) { opShift(op, _cl, 5); } -void shr(const Operand& op, int imm) { opShift(op, imm, 5); } -void shrd(const Operand& op, const Reg& reg, const Reg8& _cl) { opShxd(op, reg, 0, 0xAC, &_cl); } -void shrd(const Operand& op, const Reg& reg, uint8 imm) { opShxd(op, reg, imm, 0xAC); } -void shrx(const Reg32e& r1, const Operand& op, const Reg32e& r2) { opGpr(r1, op, r2, T_F2 | T_0F38, 0xf7, false); } -void shufpd(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC6, 0x66, isXMM_XMMorMEM, imm8); } -void shufps(const Xmm& xmm, const Operand& op, uint8 imm8) { opGen(xmm, op, 0xC6, 0x100, isXMM_XMMorMEM, imm8); } -void sqrtpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0x66, isXMM_XMMorMEM); } -void sqrtps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0x100, isXMM_XMMorMEM); } -void sqrtsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0xF2, isXMM_XMMorMEM); } -void sqrtss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x51, 0xF3, isXMM_XMMorMEM); } -void stac() { db(0x0F); db(0x01); db(0xCB); } -void stc() { db(0xF9); } -void std() { db(0xFD); } -void sti() { db(0xFB); } -void stmxcsr(const Address& addr) { opModM(addr, Reg32(3), 0x0F, 0xAE); } -void stosb() { db(0xAA); } -void stosd() { db(0xAB); } -void stosw() { db(0x66); db(0xAB); } -void sub(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x28, 5); } -void sub(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x28); } -void subpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0x66, isXMM_XMMorMEM); } -void subps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0x100, isXMM_XMMorMEM); } -void subsd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0xF2, isXMM_XMMorMEM); } -void subss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x5C, 0xF3, isXMM_XMMorMEM); } -void tzcnt(const Reg®, const Operand& op) { opSp1(reg, op, 0xF3, 0x0F, 0xBC); } -void ucomisd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2E, 0x66, isXMM_XMMorMEM); } -void ucomiss(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x2E, 0x100, isXMM_XMMorMEM); } -void ud2() { db(0x0F); db(0x0B); } -void unpckhpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x66, isXMM_XMMorMEM); } -void unpckhps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x15, 0x100, isXMM_XMMorMEM); } -void unpcklpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x66, isXMM_XMMorMEM); } -void unpcklps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x14, 0x100, isXMM_XMMorMEM); } -void vaddpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x58); } -void vaddps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x58); } -void vaddsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x58); } -void vaddss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x58); } -void vaddsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0xD0); } -void vaddsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0xD0); } -void vaesdec(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDE); } -void vaesdeclast(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDF); } -void vaesenc(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDC); } -void vaesenclast(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F38 | T_YMM | T_EVEX, 0xDD); } -void vaesimc(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_W0, 0xDB); } -void vaeskeygenassist(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0xDF, imm); } -void vandnpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x55); } -void vandnps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x55); } -void vandpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x54); } -void vandps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x54); } -void vblendpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0D, imm); } -void vblendps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0C, imm); } -void vblendvpd(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4B, x4.getIdx() << 4); } -void vblendvps(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4A, x4.getIdx() << 4); } -void vbroadcastf128(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x1A); } -void vbroadcasti128(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x5A); } -void vbroadcastsd(const Ymm& y, const Operand& op) { if (!op.isMEM() && !(y.isYMM() && op.isXMM()) && !(y.isZMM() && op.isXMM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(y, op, T_0F38 | T_66 | T_W0 | T_YMM | T_EVEX | T_EW1 | T_N8, 0x19); } -void vbroadcastss(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x18); } -void vcmpeq_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 16); } -void vcmpeq_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 16); } -void vcmpeq_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 16); } -void vcmpeq_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 16); } -void vcmpeq_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 8); } -void vcmpeq_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 8); } -void vcmpeq_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 8); } -void vcmpeq_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 8); } -void vcmpeq_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 24); } -void vcmpeq_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 24); } -void vcmpeq_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 24); } -void vcmpeq_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 24); } -void vcmpeqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 0); } -void vcmpeqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 0); } -void vcmpeqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 0); } -void vcmpeqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 0); } -void vcmpfalse_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 27); } -void vcmpfalse_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 27); } -void vcmpfalse_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 27); } -void vcmpfalse_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 27); } -void vcmpfalsepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 11); } -void vcmpfalseps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 11); } -void vcmpfalsesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 11); } -void vcmpfalsess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 11); } -void vcmpge_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 29); } -void vcmpge_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 29); } -void vcmpge_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 29); } -void vcmpge_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 29); } -void vcmpgepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 13); } -void vcmpgeps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 13); } -void vcmpgesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 13); } -void vcmpgess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 13); } -void vcmpgt_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 30); } -void vcmpgt_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 30); } -void vcmpgt_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 30); } -void vcmpgt_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 30); } -void vcmpgtpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 14); } -void vcmpgtps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 14); } -void vcmpgtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 14); } -void vcmpgtss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 14); } -void vcmple_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 18); } -void vcmple_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 18); } -void vcmple_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 18); } -void vcmple_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 18); } -void vcmplepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 2); } -void vcmpleps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 2); } -void vcmplesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 2); } -void vcmpless(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 2); } -void vcmplt_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 17); } -void vcmplt_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 17); } -void vcmplt_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 17); } -void vcmplt_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 17); } -void vcmpltpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 1); } -void vcmpltps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 1); } -void vcmpltsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 1); } -void vcmpltss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 1); } -void vcmpneq_oqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 12); } -void vcmpneq_oqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 12); } -void vcmpneq_oqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 12); } -void vcmpneq_oqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 12); } -void vcmpneq_ospd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 28); } -void vcmpneq_osps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 28); } -void vcmpneq_ossd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 28); } -void vcmpneq_osss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 28); } -void vcmpneq_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 20); } -void vcmpneq_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 20); } -void vcmpneq_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 20); } -void vcmpneq_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 20); } -void vcmpneqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 4); } -void vcmpneqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 4); } -void vcmpneqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 4); } -void vcmpneqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 4); } -void vcmpnge_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 25); } -void vcmpnge_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 25); } -void vcmpnge_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 25); } -void vcmpnge_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 25); } -void vcmpngepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 9); } -void vcmpngeps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 9); } -void vcmpngesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 9); } -void vcmpngess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 9); } -void vcmpngt_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 26); } -void vcmpngt_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 26); } -void vcmpngt_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 26); } -void vcmpngt_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 26); } -void vcmpngtpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 10); } -void vcmpngtps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 10); } -void vcmpngtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 10); } -void vcmpngtss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 10); } -void vcmpnle_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 22); } -void vcmpnle_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 22); } -void vcmpnle_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 22); } -void vcmpnle_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 22); } -void vcmpnlepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 6); } -void vcmpnleps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 6); } -void vcmpnlesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 6); } -void vcmpnless(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 6); } -void vcmpnlt_uqpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 21); } -void vcmpnlt_uqps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 21); } -void vcmpnlt_uqsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 21); } -void vcmpnlt_uqss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 21); } -void vcmpnltpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 5); } -void vcmpnltps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 5); } -void vcmpnltsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 5); } -void vcmpnltss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 5); } -void vcmpord_spd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 23); } -void vcmpord_sps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 23); } -void vcmpord_ssd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 23); } -void vcmpord_sss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 23); } -void vcmpordpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 7); } -void vcmpordps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 7); } -void vcmpordsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 7); } -void vcmpordss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 7); } -void vcmppd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xC2, imm); } -void vcmpps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_0F | T_YMM, 0xC2, imm); } -void vcmpsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_F2 | T_0F, 0xC2, imm); } -void vcmpss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0xC2, imm); } -void vcmptrue_uspd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 31); } -void vcmptrue_usps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 31); } -void vcmptrue_ussd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 31); } -void vcmptrue_usss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 31); } -void vcmptruepd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 15); } -void vcmptrueps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 15); } -void vcmptruesd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 15); } -void vcmptruess(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 15); } -void vcmpunord_spd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 19); } -void vcmpunord_sps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 19); } -void vcmpunord_ssd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 19); } -void vcmpunord_sss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 19); } -void vcmpunordpd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmppd(x1, x2, op, 3); } -void vcmpunordps(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpps(x1, x2, op, 3); } -void vcmpunordsd(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpsd(x1, x2, op, 3); } -void vcmpunordss(const Xmm& x1, const Xmm& x2, const Operand& op) { vcmpss(x1, x2, op, 3); } -void vcomisd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_66 | T_0F | T_EW1 | T_EVEX | T_SAE_X, 0x2F); } -void vcomiss(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x2F); } -void vcvtdq2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F | T_F3 | T_YMM | T_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL, 0xE6); } -void vcvtdq2ps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5B); } -void vcvtpd2dq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_F2 | T_YMM | T_EVEX | T_EW1 | T_B64 | T_ER_Z, 0xE6); } -void vcvtpd2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_66 | T_YMM | T_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x5A); } -void vcvtph2ps(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F38 | T_66 | T_W0 | T_EVEX | T_EW0 | T_N8 | T_N_VL | T_SAE_Y, 0x13); } -void vcvtps2dq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5B); } -void vcvtps2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_0F | T_YMM | T_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x5A); } -void vcvtps2ph(const Operand& op, const Xmm& x, uint8 imm) { checkCvt1(x, op); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N8 | T_N_VL | T_SAE_Y, 0x1D, imm); } -void vcvtsd2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W0 | T_EVEX | T_EW0 | T_N4 | T_ER_X, 0x2D); } -void vcvtsd2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_ER_X, 0x5A); } -void vcvtsi2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_0F | T_F2 | T_EVEX, T_W1 | T_EW1 | T_ER_X | T_N8, T_W0 | T_EW0 | T_N4, 0x2A); } -void vcvtsi2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_0F | T_F3 | T_EVEX | T_ER_X, T_W1 | T_EW1 | T_N8, T_W0 | T_EW0 | T_N4, 0x2A); } -void vcvtss2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x5A); } -void vcvtss2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W0 | T_EVEX | T_EW0 | T_ER_X | T_N8, 0x2D); } -void vcvttpd2dq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_66 | T_0F | T_YMM | T_EVEX |T_EW1 | T_B64 | T_ER_Z, 0xE6); } -void vcvttps2dq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX | T_SAE_Z | T_B32, 0x5B); } -void vcvttsd2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W0 | T_EVEX | T_EW0 | T_N4 | T_SAE_X, 0x2C); } -void vcvttss2si(const Reg32& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W0 | T_EVEX | T_EW0 | T_SAE_X | T_N8, 0x2C); } -void vdivpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5E); } -void vdivps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5E); } -void vdivsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5E); } -void vdivss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5E); } -void vdppd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x41, imm); } -void vdpps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x40, imm); } -void vextractf128(const Operand& op, const Ymm& y, uint8 imm) { if (!(op.isXMEM() && y.isYMM())) throw Error(ERR_BAD_COMBINATION); opVex(y, 0, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x19, imm); } -void vextracti128(const Operand& op, const Ymm& y, uint8 imm) { if (!(op.isXMEM() && y.isYMM())) throw Error(ERR_BAD_COMBINATION); opVex(y, 0, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x39, imm); } -void vextractps(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(32) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_N4, 0x17, imm); } -void vfmadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x98); } -void vfmadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x98); } -void vfmadd132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x99); } -void vfmadd132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x99); } -void vfmadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA8); } -void vfmadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA8); } -void vfmadd213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xA9); } -void vfmadd213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xA9); } -void vfmadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB8); } -void vfmadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB8); } -void vfmadd231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xB9); } -void vfmadd231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xB9); } -void vfmaddsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x96); } -void vfmaddsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x96); } -void vfmaddsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA6); } -void vfmaddsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA6); } -void vfmaddsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB6); } -void vfmaddsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB6); } -void vfmsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9A); } -void vfmsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9A); } -void vfmsub132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9B); } -void vfmsub132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9B); } -void vfmsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAA); } -void vfmsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAA); } -void vfmsub213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAB); } -void vfmsub213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAB); } -void vfmsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBA); } -void vfmsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBA); } -void vfmsub231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBB); } -void vfmsub231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBB); } -void vfmsubadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x97); } -void vfmsubadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x97); } -void vfmsubadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xA7); } -void vfmsubadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xA7); } -void vfmsubadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xB7); } -void vfmsubadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xB7); } -void vfnmadd132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9C); } -void vfnmadd132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9C); } -void vfnmadd132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9D); } -void vfnmadd132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9D); } -void vfnmadd213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAC); } -void vfnmadd213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAC); } -void vfnmadd213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAD); } -void vfnmadd213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAD); } -void vfnmadd231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBC); } -void vfnmadd231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBC); } -void vfnmadd231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBD); } -void vfnmadd231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBD); } -void vfnmsub132pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x9E); } -void vfnmsub132ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x9E); } -void vfnmsub132sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0x9F); } -void vfnmsub132ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0x9F); } -void vfnmsub213pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xAE); } -void vfnmsub213ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xAE); } -void vfnmsub213sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xAF); } -void vfnmsub213ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xAF); } -void vfnmsub231pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0xBE); } -void vfnmsub231ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0xBE); } -void vfnmsub231sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_W1 | T_EW1 | T_EVEX | T_ER_X, 0xBF); } -void vfnmsub231ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_W0 | T_EW0 | T_EVEX | T_ER_X, 0xBF); } -void vgatherdpd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x92, 0); } -void vgatherdps(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x92, 1); } -void vgatherqpd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x93, 1); } -void vgatherqps(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x93, 2); } -void vgf2p8affineinvqb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_SAE_Z | T_B64, 0xCF, imm); } -void vgf2p8affineqb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_SAE_Z | T_B64, 0xCE, imm); } -void vgf2p8mulb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_SAE_Z, 0xCF); } -void vhaddpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0x7C); } -void vhaddps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0x7C); } -void vhsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_66 | T_0F | T_YMM, 0x7D); } -void vhsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_F2 | T_0F | T_YMM, 0x7D); } -void vinsertf128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isXMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x18, imm); } -void vinserti128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isXMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x38, imm); } -void vinsertps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_W0 | T_EW0 | T_EVEX, 0x21, imm); } -void vlddqu(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, cvtIdx0(x), addr, T_0F | T_F2 | T_W0 | T_YMM, 0xF0); } -void vldmxcsr(const Address& addr) { opAVX_X_X_XM(xm2, xm0, addr, T_0F, 0xAE); } -void vmaskmovdqu(const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x1, xm0, x2, T_0F | T_66, 0xF7); } -void vmaskmovpd(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2F); } -void vmaskmovpd(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2D); } -void vmaskmovps(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2E); } -void vmaskmovps(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x2C); } -void vmaxpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5F); } -void vmaxps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5F); } -void vmaxsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5F); } -void vmaxss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5F); } -void vminpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5D); } -void vminps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5D); } -void vminsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5D); } -void vminss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5D); } -void vmovapd(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_M_K, 0x29); } -void vmovapd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0x28); } -void vmovaps(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_0F | T_EW0 | T_YMM | T_EVEX | T_M_K, 0x29); } -void vmovaps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX, 0x28); } -void vmovd(const Operand& op, const Xmm& x) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, xm0, op, T_0F | T_66 | T_W0 | T_EVEX | T_N4, 0x7E); } -void vmovd(const Xmm& x, const Operand& op) { if (!op.isREG(32) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, xm0, op, T_0F | T_66 | T_W0 | T_EVEX | T_N4, 0x6E); } -void vmovddup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_DUP | T_F2 | T_0F | T_EW1 | T_YMM | T_EVEX | T_ER_X | T_ER_Y | T_ER_Z, 0x12); } -void vmovdqa(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_YMM, 0x7F); } -void vmovdqa(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_YMM, 0x6F); } -void vmovdqu(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_F3 | T_0F | T_YMM, 0x7F); } -void vmovdqu(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_YMM, 0x6F); } -void vmovhlps(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_0F | T_EVEX | T_EW0, 0x12); } -void vmovhpd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x17); } -void vmovhpd(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x16); } -void vmovhps(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_EVEX | T_EW0 | T_N8, 0x17); } -void vmovhps(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_EVEX | T_EW0 | T_N8, 0x16); } -void vmovlhps(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_0F | T_EVEX | T_EW0, 0x16); } -void vmovlpd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x13); } -void vmovlpd(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, 0x12); } -void vmovlps(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_EVEX | T_EW0 | T_N8, 0x13); } -void vmovlps(const Xmm& x, const Operand& op1, const Operand& op2 = Operand()) { if (!op2.isNone() && !op2.isMEM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x, op1, op2, T_0F | T_EVEX | T_EW0 | T_N8, 0x12); } -void vmovmskpd(const Reg& r, const Xmm& x) { if (!r.isBit(i32e)) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x.isXMM() ? Xmm(r.getIdx()) : Ymm(r.getIdx()), cvtIdx0(x), x, T_0F | T_66 | T_W0 | T_YMM, 0x50); } -void vmovmskps(const Reg& r, const Xmm& x) { if (!r.isBit(i32e)) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x.isXMM() ? Xmm(r.getIdx()) : Ymm(r.getIdx()), cvtIdx0(x), x, T_0F | T_W0 | T_YMM, 0x50); } -void vmovntdq(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_66 | T_YMM | T_EVEX | T_EW0, 0xE7); } -void vmovntdqa(const Xmm& x, const Address& addr) { opVex(x, 0, addr, T_0F38 | T_66 | T_YMM | T_EVEX | T_EW0, 0x2A); } -void vmovntpd(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_66 | T_YMM | T_EVEX | T_EW1, 0x2B); } -void vmovntps(const Address& addr, const Xmm& x) { opVex(x, 0, addr, T_0F | T_YMM | T_EVEX | T_EW0, 0x2B); } -void vmovq(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_0F | T_66 | T_EVEX | T_EW1 | T_N8, x.getIdx() < 16 ? 0xD6 : 0x7E); } -void vmovq(const Xmm& x, const Address& addr) { int type, code; if (x.getIdx() < 16) { type = T_0F | T_F3; code = 0x7E; } else { type = T_0F | T_66 | T_EVEX | T_EW1 | T_N8; code = 0x6E; } opAVX_X_X_XM(x, xm0, addr, type, code); } -void vmovq(const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x1, xm0, x2, T_0F | T_F3 | T_EVEX | T_EW1 | T_N8, 0x7E); } -void vmovsd(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_M_K, 0x11); } -void vmovsd(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, xm0, addr, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX, 0x10); } -void vmovsd(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX, 0x10); } -void vmovshdup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX, 0x16); } -void vmovsldup(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_EW0 | T_YMM | T_EVEX, 0x12); } -void vmovss(const Address& addr, const Xmm& x) { opAVX_X_X_XM(x, xm0, addr, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_M_K, 0x11); } -void vmovss(const Xmm& x, const Address& addr) { opAVX_X_X_XM(x, xm0, addr, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX, 0x10); } -void vmovss(const Xmm& x1, const Xmm& x2, const Operand& op = Operand()) { if (!op.isNone() && !op.isXMM()) throw Error(ERR_BAD_COMBINATION); opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX, 0x10); } -void vmovupd(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_M_K, 0x11); } -void vmovupd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0x10); } -void vmovups(const Address& addr, const Xmm& xmm) { opAVX_X_XM_IMM(xmm, addr, T_0F | T_EW0 | T_YMM | T_EVEX | T_M_K, 0x11); } -void vmovups(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX, 0x10); } -void vmpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x42, imm); } -void vmulpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x59); } -void vmulps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x59); } -void vmulsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x59); } -void vmulss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x59); } -void vorpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x56); } -void vorps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x56); } -void vpabsb(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x1C); } -void vpabsd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x1E); } -void vpabsw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x1D); } -void vpackssdw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x6B); } -void vpacksswb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x63); } -void vpackusdw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x2B); } -void vpackuswb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x67); } -void vpaddb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xFC); } -void vpaddd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xFE); } -void vpaddq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xD4); } -void vpaddsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEC); } -void vpaddsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xED); } -void vpaddusb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDC); } -void vpaddusw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDD); } -void vpaddw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xFD); } -void vpalignr(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_YMM | T_EVEX, 0x0F, imm); } -void vpand(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xDB); } -void vpandn(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xDF); } -void vpavgb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE0); } -void vpavgw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE3); } -void vpblendd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x02, imm); } -void vpblendvb(const Xmm& x1, const Xmm& x2, const Operand& op, const Xmm& x4) { opAVX_X_X_XM(x1, x2, op, T_0F3A | T_66 | T_YMM, 0x4C, x4.getIdx() << 4); } -void vpblendw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM, 0x0E, imm); } -void vpbroadcastb(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x78); } -void vpbroadcastd(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x58); } -void vpbroadcastq(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX, 0x59); } -void vpbroadcastw(const Xmm& x, const Operand& op) { if (!(op.isXMM() || op.isMEM())) throw Error(ERR_BAD_COMBINATION); opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_W0 | T_YMM | T_EVEX, 0x79); } -void vpclmulqdq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0 | T_YMM | T_EVEX, 0x44, imm); } -void vpcmpeqb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x74); } -void vpcmpeqd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x76); } -void vpcmpeqq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x29); } -void vpcmpeqw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x75); } -void vpcmpestri(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x61, imm); } -void vpcmpestrm(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x60, imm); } -void vpcmpgtb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x64); } -void vpcmpgtd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x66); } -void vpcmpgtq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x37); } -void vpcmpgtw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0x65); } -void vpcmpistri(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x63, imm); } -void vpcmpistrm(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A, 0x62, imm); } -void vperm2f128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isYMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x06, imm); } -void vperm2i128(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { if (!(y1.isYMM() && y2.isYMM() && op.isYMEM())) throw Error(ERR_BAD_COMBINATION); opVex(y1, &y2, op, T_0F3A | T_66 | T_W0 | T_YMM, 0x46, imm); } -void vpermd(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x36); } -void vpermilpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x0D); } -void vpermilpd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_EVEX | T_B64, 0x05, imm); } -void vpermilps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x0C); } -void vpermilps(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_EVEX | T_B32, 0x04, imm); } -void vpermpd(const Ymm& y, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(y, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x01, imm); } -void vpermpd(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x16); } -void vpermps(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x16); } -void vpermq(const Ymm& y, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(y, op, T_66 | T_0F3A | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x00, imm); } -void vpermq(const Ymm& y1, const Ymm& y2, const Operand& op) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F38 | T_W0 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x36); } -void vpextrb(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(8|16|i32e) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_EVEX | T_N1, 0x14, imm); } -void vpextrd(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(32) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N4, 0x16, imm); } -void vpextrq(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(64) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); opVex(x, 0, op, T_0F3A | T_66 | T_W1 | T_EVEX | T_EW1 | T_N8, 0x16, imm); } -void vpextrw(const Operand& op, const Xmm& x, uint8 imm) { if (!((op.isREG(16|i32e) || op.isMEM()) && x.isXMM())) throw Error(ERR_BAD_COMBINATION); if (op.isREG() && x.getIdx() < 16) { opAVX_X_X_XM(Xmm(op.getIdx()), xm0, x, T_0F | T_66, 0xC5, imm); } else { opVex(x, 0, op, T_0F3A | T_66 | T_EVEX | T_N2, 0x15, imm); } } -void vpgatherdd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x90, 1); } -void vpgatherdq(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x90, 0); } -void vpgatherqd(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W0, 0x91, 2); } -void vpgatherqq(const Xmm& x1, const Address& addr, const Xmm& x2) { opGather(x1, addr, x2, T_0F38 | T_66 | T_YMM | T_VSIB | T_W1, 0x91, 1); } -void vphaddd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x02); } -void vphaddsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x03); } -void vphaddw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x01); } -void vphminposuw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38, 0x41); } -void vphsubd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x06); } -void vphsubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x07); } -void vphsubw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x05); } -void vpinsrb(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_EVEX | T_N1, 0x20, imm); } -void vpinsrd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_W0 | T_EVEX | T_EW0 | T_N4, 0x22, imm); } -void vpinsrq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(64) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F3A | T_66 | T_W1 | T_EVEX | T_EW1 | T_N8, 0x22, imm); } -void vpinsrw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { if (!(x1.isXMM() && x2.isXMM() && (op.isREG(32) || op.isMEM()))) throw Error(ERR_BAD_COMBINATION); opVex(x1, &x2, op, T_0F | T_66 | T_EVEX | T_N2, 0xC4, imm); } -void vpmaddubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x04); } -void vpmaddwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF5); } -void vpmaskmovd(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x8E); } -void vpmaskmovd(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W0 | T_YMM, 0x8C); } -void vpmaskmovq(const Address& addr, const Xmm& x1, const Xmm& x2) { opAVX_X_X_XM(x2, x1, addr, T_0F38 | T_66 | T_W1 | T_YMM, 0x8E); } -void vpmaskmovq(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_66 | T_W1 | T_YMM, 0x8C); } -void vpmaxsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3C); } -void vpmaxsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3D); } -void vpmaxsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEE); } -void vpmaxub(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDE); } -void vpmaxud(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3F); } -void vpmaxuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3E); } -void vpminsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x38); } -void vpminsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x39); } -void vpminsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xEA); } -void vpminub(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xDA); } -void vpminud(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x3B); } -void vpminuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x3A); } -void vpmovmskb(const Reg32e& r, const Xmm& x) { if (!x.is(Operand::XMM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(x.isYMM() ? Ymm(r.getIdx()) : Xmm(r.getIdx()), 0, x, T_0F | T_66 | T_YMM, 0xD7); } -void vpmovsxbd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x21); } -void vpmovsxbq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N2 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x22); } -void vpmovsxbw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x20); } -void vpmovsxdq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX, 0x25); } -void vpmovsxwd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x23); } -void vpmovsxwq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x24); } -void vpmovzxbd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x31); } -void vpmovzxbq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N2 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x32); } -void vpmovzxbw(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x30); } -void vpmovzxdq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX, 0x35); } -void vpmovzxwd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x33); } -void vpmovzxwq(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_N_VL | T_66 | T_0F38 | T_YMM | T_EVEX, 0x34); } -void vpmuldq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x28); } -void vpmulhrsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x0B); } -void vpmulhuw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE4); } -void vpmulhw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE5); } -void vpmulld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x40); } -void vpmullw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD5); } -void vpmuludq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xF4); } -void vpor(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xEB); } -void vpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF6); } -void vpshufb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM | T_EVEX, 0x00); } -void vpshufd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x70, imm); } -void vpshufhw(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_F3 | T_0F | T_YMM | T_EVEX, 0x70, imm); } -void vpshuflw(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_F2 | T_0F | T_YMM | T_EVEX, 0x70, imm); } -void vpsignb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x08); } -void vpsignd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x0A); } -void vpsignw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_YMM, 0x09); } -void vpslld(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } -void vpslld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xF2); } -void vpslldq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 7), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x73, imm); } -void vpsllq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64 | T_MEM_EVEX, 0x73, imm); } -void vpsllq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0xF3); } -void vpsllvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x47); } -void vpsllvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x47); } -void vpsllw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 6), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } -void vpsllw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xF1); } -void vpsrad(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } -void vpsrad(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xE2); } -void vpsravd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x46); } -void vpsraw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } -void vpsraw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xE1); } -void vpsrld(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32 | T_MEM_EVEX, 0x72, imm); } -void vpsrld(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW0 | T_YMM | T_EVEX, 0xD2); } -void vpsrldq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 3), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x73, imm); } -void vpsrlq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64 | T_MEM_EVEX, 0x73, imm); } -void vpsrlq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_EVEX, 0xD3); } -void vpsrlvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W0 | T_EW0 | T_YMM | T_EVEX | T_B32, 0x45); } -void vpsrlvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_W1 | T_EW1 | T_YMM | T_EVEX | T_B64, 0x45); } -void vpsrlw(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 2), x, op, T_66 | T_0F | T_YMM | T_EVEX | T_MEM_EVEX, 0x71, imm); } -void vpsrlw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_YMM | T_EVEX, 0xD1); } -void vpsubb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF8); } -void vpsubd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xFA); } -void vpsubq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xFB); } -void vpsubsb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE8); } -void vpsubsw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xE9); } -void vpsubusb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD8); } -void vpsubusw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xD9); } -void vpsubw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0xF9); } -void vptest(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x17); } -void vpunpckhbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x68); } -void vpunpckhdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x6A); } -void vpunpckhqdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x6D); } -void vpunpckhwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x69); } -void vpunpcklbw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x60); } -void vpunpckldq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x62); } -void vpunpcklqdq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x6C); } -void vpunpcklwd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM | T_EVEX, 0x61); } -void vpxor(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_YMM, 0xEF); } -void vrcpps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_YMM, 0x53); } -void vrcpss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0x53); } -void vroundpd(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_YMM, 0x09, imm); } -void vroundps(const Xmm& xm, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F3A | T_YMM, 0x08, imm); } -void vroundsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x0B, imm); } -void vroundss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_W0, 0x0A, imm); } -void vrsqrtps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_YMM, 0x52); } -void vrsqrtss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_F3 | T_0F, 0x52); } -void vshufpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0xC6, imm); } -void vshufps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0xC6, imm); } -void vsqrtpd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x51); } -void vsqrtps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x51); } -void vsqrtsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_F2 | T_0F | T_EW1 | T_EVEX | T_ER_X, 0x51); } -void vsqrtss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_F3 | T_0F | T_EW0 | T_EVEX | T_ER_X, 0x51); } -void vstmxcsr(const Address& addr) { opAVX_X_X_XM(xm3, xm0, addr, T_0F, 0xAE); } -void vsubpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x5C); } -void vsubps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x5C); } -void vsubsd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F2 | T_EW1 | T_EVEX | T_ER_Z | T_N8, 0x5C); } -void vsubss(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_F3 | T_EW0 | T_EVEX | T_ER_Z | T_N4, 0x5C); } -void vtestpd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x0F); } -void vtestps(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_66 | T_0F38 | T_YMM, 0x0E); } -void vucomisd(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N8 | T_66 | T_0F | T_EW1 | T_EVEX | T_SAE_X, 0x2E); } -void vucomiss(const Xmm& xm, const Operand& op) { opAVX_X_XM_IMM(xm, op, T_N4 | T_0F | T_EW0 | T_EVEX | T_SAE_X, 0x2E); } -void vunpckhpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x15); } -void vunpckhps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x15); } -void vunpcklpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_EVEX | T_B64, 0x14); } -void vunpcklps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_0F | T_EW0 | T_YMM | T_EVEX | T_B32, 0x14); } -void vxorpd(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_66 | T_EW1 | T_YMM | T_EVEX | T_ER_Z | T_B64, 0x57); } -void vxorps(const Xmm& xmm, const Operand& op1, const Operand& op2 = Operand()) { opAVX_X_X_XM(xmm, op1, op2, T_0F | T_EW0 | T_YMM | T_EVEX | T_ER_Z | T_B32, 0x57); } -void vzeroall() { db(0xC5); db(0xFC); db(0x77); } -void vzeroupper() { db(0xC5); db(0xF8); db(0x77); } -void wait() { db(0x9B); } -void wbinvd() { db(0x0F); db(0x09); } -void wrmsr() { db(0x0F); db(0x30); } -void xadd(const Operand& op, const Reg& reg) { opModRM(reg, op, (op.isREG() && reg.isREG() && op.getBit() == reg.getBit()), op.isMEM(), 0x0F, 0xC0 | (reg.isBit(8) ? 0 : 1)); } -void xgetbv() { db(0x0F); db(0x01); db(0xD0); } -void xlatb() { db(0xD7); } -void xor_(const Operand& op, uint32 imm) { opRM_I(op, imm, 0x30, 6); } -void xor_(const Operand& op1, const Operand& op2) { opRM_RM(op1, op2, 0x30); } -void xorpd(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x57, 0x66, isXMM_XMMorMEM); } -void xorps(const Xmm& xmm, const Operand& op) { opGen(xmm, op, 0x57, 0x100, isXMM_XMMorMEM); } -#ifdef XBYAK_ENABLE_OMITTED_OPERAND -void vblendpd(const Xmm& x, const Operand& op, uint8 imm) { vblendpd(x, x, op, imm); } -void vblendps(const Xmm& x, const Operand& op, uint8 imm) { vblendps(x, x, op, imm); } -void vblendvpd(const Xmm& x1, const Operand& op, const Xmm& x4) { vblendvpd(x1, x1, op, x4); } -void vblendvps(const Xmm& x1, const Operand& op, const Xmm& x4) { vblendvps(x1, x1, op, x4); } -void vcmpeq_ospd(const Xmm& x, const Operand& op) { vcmpeq_ospd(x, x, op); } -void vcmpeq_osps(const Xmm& x, const Operand& op) { vcmpeq_osps(x, x, op); } -void vcmpeq_ossd(const Xmm& x, const Operand& op) { vcmpeq_ossd(x, x, op); } -void vcmpeq_osss(const Xmm& x, const Operand& op) { vcmpeq_osss(x, x, op); } -void vcmpeq_uqpd(const Xmm& x, const Operand& op) { vcmpeq_uqpd(x, x, op); } -void vcmpeq_uqps(const Xmm& x, const Operand& op) { vcmpeq_uqps(x, x, op); } -void vcmpeq_uqsd(const Xmm& x, const Operand& op) { vcmpeq_uqsd(x, x, op); } -void vcmpeq_uqss(const Xmm& x, const Operand& op) { vcmpeq_uqss(x, x, op); } -void vcmpeq_uspd(const Xmm& x, const Operand& op) { vcmpeq_uspd(x, x, op); } -void vcmpeq_usps(const Xmm& x, const Operand& op) { vcmpeq_usps(x, x, op); } -void vcmpeq_ussd(const Xmm& x, const Operand& op) { vcmpeq_ussd(x, x, op); } -void vcmpeq_usss(const Xmm& x, const Operand& op) { vcmpeq_usss(x, x, op); } -void vcmpeqpd(const Xmm& x, const Operand& op) { vcmpeqpd(x, x, op); } -void vcmpeqps(const Xmm& x, const Operand& op) { vcmpeqps(x, x, op); } -void vcmpeqsd(const Xmm& x, const Operand& op) { vcmpeqsd(x, x, op); } -void vcmpeqss(const Xmm& x, const Operand& op) { vcmpeqss(x, x, op); } -void vcmpfalse_ospd(const Xmm& x, const Operand& op) { vcmpfalse_ospd(x, x, op); } -void vcmpfalse_osps(const Xmm& x, const Operand& op) { vcmpfalse_osps(x, x, op); } -void vcmpfalse_ossd(const Xmm& x, const Operand& op) { vcmpfalse_ossd(x, x, op); } -void vcmpfalse_osss(const Xmm& x, const Operand& op) { vcmpfalse_osss(x, x, op); } -void vcmpfalsepd(const Xmm& x, const Operand& op) { vcmpfalsepd(x, x, op); } -void vcmpfalseps(const Xmm& x, const Operand& op) { vcmpfalseps(x, x, op); } -void vcmpfalsesd(const Xmm& x, const Operand& op) { vcmpfalsesd(x, x, op); } -void vcmpfalsess(const Xmm& x, const Operand& op) { vcmpfalsess(x, x, op); } -void vcmpge_oqpd(const Xmm& x, const Operand& op) { vcmpge_oqpd(x, x, op); } -void vcmpge_oqps(const Xmm& x, const Operand& op) { vcmpge_oqps(x, x, op); } -void vcmpge_oqsd(const Xmm& x, const Operand& op) { vcmpge_oqsd(x, x, op); } -void vcmpge_oqss(const Xmm& x, const Operand& op) { vcmpge_oqss(x, x, op); } -void vcmpgepd(const Xmm& x, const Operand& op) { vcmpgepd(x, x, op); } -void vcmpgeps(const Xmm& x, const Operand& op) { vcmpgeps(x, x, op); } -void vcmpgesd(const Xmm& x, const Operand& op) { vcmpgesd(x, x, op); } -void vcmpgess(const Xmm& x, const Operand& op) { vcmpgess(x, x, op); } -void vcmpgt_oqpd(const Xmm& x, const Operand& op) { vcmpgt_oqpd(x, x, op); } -void vcmpgt_oqps(const Xmm& x, const Operand& op) { vcmpgt_oqps(x, x, op); } -void vcmpgt_oqsd(const Xmm& x, const Operand& op) { vcmpgt_oqsd(x, x, op); } -void vcmpgt_oqss(const Xmm& x, const Operand& op) { vcmpgt_oqss(x, x, op); } -void vcmpgtpd(const Xmm& x, const Operand& op) { vcmpgtpd(x, x, op); } -void vcmpgtps(const Xmm& x, const Operand& op) { vcmpgtps(x, x, op); } -void vcmpgtsd(const Xmm& x, const Operand& op) { vcmpgtsd(x, x, op); } -void vcmpgtss(const Xmm& x, const Operand& op) { vcmpgtss(x, x, op); } -void vcmple_oqpd(const Xmm& x, const Operand& op) { vcmple_oqpd(x, x, op); } -void vcmple_oqps(const Xmm& x, const Operand& op) { vcmple_oqps(x, x, op); } -void vcmple_oqsd(const Xmm& x, const Operand& op) { vcmple_oqsd(x, x, op); } -void vcmple_oqss(const Xmm& x, const Operand& op) { vcmple_oqss(x, x, op); } -void vcmplepd(const Xmm& x, const Operand& op) { vcmplepd(x, x, op); } -void vcmpleps(const Xmm& x, const Operand& op) { vcmpleps(x, x, op); } -void vcmplesd(const Xmm& x, const Operand& op) { vcmplesd(x, x, op); } -void vcmpless(const Xmm& x, const Operand& op) { vcmpless(x, x, op); } -void vcmplt_oqpd(const Xmm& x, const Operand& op) { vcmplt_oqpd(x, x, op); } -void vcmplt_oqps(const Xmm& x, const Operand& op) { vcmplt_oqps(x, x, op); } -void vcmplt_oqsd(const Xmm& x, const Operand& op) { vcmplt_oqsd(x, x, op); } -void vcmplt_oqss(const Xmm& x, const Operand& op) { vcmplt_oqss(x, x, op); } -void vcmpltpd(const Xmm& x, const Operand& op) { vcmpltpd(x, x, op); } -void vcmpltps(const Xmm& x, const Operand& op) { vcmpltps(x, x, op); } -void vcmpltsd(const Xmm& x, const Operand& op) { vcmpltsd(x, x, op); } -void vcmpltss(const Xmm& x, const Operand& op) { vcmpltss(x, x, op); } -void vcmpneq_oqpd(const Xmm& x, const Operand& op) { vcmpneq_oqpd(x, x, op); } -void vcmpneq_oqps(const Xmm& x, const Operand& op) { vcmpneq_oqps(x, x, op); } -void vcmpneq_oqsd(const Xmm& x, const Operand& op) { vcmpneq_oqsd(x, x, op); } -void vcmpneq_oqss(const Xmm& x, const Operand& op) { vcmpneq_oqss(x, x, op); } -void vcmpneq_ospd(const Xmm& x, const Operand& op) { vcmpneq_ospd(x, x, op); } -void vcmpneq_osps(const Xmm& x, const Operand& op) { vcmpneq_osps(x, x, op); } -void vcmpneq_ossd(const Xmm& x, const Operand& op) { vcmpneq_ossd(x, x, op); } -void vcmpneq_osss(const Xmm& x, const Operand& op) { vcmpneq_osss(x, x, op); } -void vcmpneq_uspd(const Xmm& x, const Operand& op) { vcmpneq_uspd(x, x, op); } -void vcmpneq_usps(const Xmm& x, const Operand& op) { vcmpneq_usps(x, x, op); } -void vcmpneq_ussd(const Xmm& x, const Operand& op) { vcmpneq_ussd(x, x, op); } -void vcmpneq_usss(const Xmm& x, const Operand& op) { vcmpneq_usss(x, x, op); } -void vcmpneqpd(const Xmm& x, const Operand& op) { vcmpneqpd(x, x, op); } -void vcmpneqps(const Xmm& x, const Operand& op) { vcmpneqps(x, x, op); } -void vcmpneqsd(const Xmm& x, const Operand& op) { vcmpneqsd(x, x, op); } -void vcmpneqss(const Xmm& x, const Operand& op) { vcmpneqss(x, x, op); } -void vcmpnge_uqpd(const Xmm& x, const Operand& op) { vcmpnge_uqpd(x, x, op); } -void vcmpnge_uqps(const Xmm& x, const Operand& op) { vcmpnge_uqps(x, x, op); } -void vcmpnge_uqsd(const Xmm& x, const Operand& op) { vcmpnge_uqsd(x, x, op); } -void vcmpnge_uqss(const Xmm& x, const Operand& op) { vcmpnge_uqss(x, x, op); } -void vcmpngepd(const Xmm& x, const Operand& op) { vcmpngepd(x, x, op); } -void vcmpngeps(const Xmm& x, const Operand& op) { vcmpngeps(x, x, op); } -void vcmpngesd(const Xmm& x, const Operand& op) { vcmpngesd(x, x, op); } -void vcmpngess(const Xmm& x, const Operand& op) { vcmpngess(x, x, op); } -void vcmpngt_uqpd(const Xmm& x, const Operand& op) { vcmpngt_uqpd(x, x, op); } -void vcmpngt_uqps(const Xmm& x, const Operand& op) { vcmpngt_uqps(x, x, op); } -void vcmpngt_uqsd(const Xmm& x, const Operand& op) { vcmpngt_uqsd(x, x, op); } -void vcmpngt_uqss(const Xmm& x, const Operand& op) { vcmpngt_uqss(x, x, op); } -void vcmpngtpd(const Xmm& x, const Operand& op) { vcmpngtpd(x, x, op); } -void vcmpngtps(const Xmm& x, const Operand& op) { vcmpngtps(x, x, op); } -void vcmpngtsd(const Xmm& x, const Operand& op) { vcmpngtsd(x, x, op); } -void vcmpngtss(const Xmm& x, const Operand& op) { vcmpngtss(x, x, op); } -void vcmpnle_uqpd(const Xmm& x, const Operand& op) { vcmpnle_uqpd(x, x, op); } -void vcmpnle_uqps(const Xmm& x, const Operand& op) { vcmpnle_uqps(x, x, op); } -void vcmpnle_uqsd(const Xmm& x, const Operand& op) { vcmpnle_uqsd(x, x, op); } -void vcmpnle_uqss(const Xmm& x, const Operand& op) { vcmpnle_uqss(x, x, op); } -void vcmpnlepd(const Xmm& x, const Operand& op) { vcmpnlepd(x, x, op); } -void vcmpnleps(const Xmm& x, const Operand& op) { vcmpnleps(x, x, op); } -void vcmpnlesd(const Xmm& x, const Operand& op) { vcmpnlesd(x, x, op); } -void vcmpnless(const Xmm& x, const Operand& op) { vcmpnless(x, x, op); } -void vcmpnlt_uqpd(const Xmm& x, const Operand& op) { vcmpnlt_uqpd(x, x, op); } -void vcmpnlt_uqps(const Xmm& x, const Operand& op) { vcmpnlt_uqps(x, x, op); } -void vcmpnlt_uqsd(const Xmm& x, const Operand& op) { vcmpnlt_uqsd(x, x, op); } -void vcmpnlt_uqss(const Xmm& x, const Operand& op) { vcmpnlt_uqss(x, x, op); } -void vcmpnltpd(const Xmm& x, const Operand& op) { vcmpnltpd(x, x, op); } -void vcmpnltps(const Xmm& x, const Operand& op) { vcmpnltps(x, x, op); } -void vcmpnltsd(const Xmm& x, const Operand& op) { vcmpnltsd(x, x, op); } -void vcmpnltss(const Xmm& x, const Operand& op) { vcmpnltss(x, x, op); } -void vcmpord_spd(const Xmm& x, const Operand& op) { vcmpord_spd(x, x, op); } -void vcmpord_sps(const Xmm& x, const Operand& op) { vcmpord_sps(x, x, op); } -void vcmpord_ssd(const Xmm& x, const Operand& op) { vcmpord_ssd(x, x, op); } -void vcmpord_sss(const Xmm& x, const Operand& op) { vcmpord_sss(x, x, op); } -void vcmpordpd(const Xmm& x, const Operand& op) { vcmpordpd(x, x, op); } -void vcmpordps(const Xmm& x, const Operand& op) { vcmpordps(x, x, op); } -void vcmpordsd(const Xmm& x, const Operand& op) { vcmpordsd(x, x, op); } -void vcmpordss(const Xmm& x, const Operand& op) { vcmpordss(x, x, op); } -void vcmppd(const Xmm& x, const Operand& op, uint8 imm) { vcmppd(x, x, op, imm); } -void vcmpps(const Xmm& x, const Operand& op, uint8 imm) { vcmpps(x, x, op, imm); } -void vcmpsd(const Xmm& x, const Operand& op, uint8 imm) { vcmpsd(x, x, op, imm); } -void vcmpss(const Xmm& x, const Operand& op, uint8 imm) { vcmpss(x, x, op, imm); } -void vcmptrue_uspd(const Xmm& x, const Operand& op) { vcmptrue_uspd(x, x, op); } -void vcmptrue_usps(const Xmm& x, const Operand& op) { vcmptrue_usps(x, x, op); } -void vcmptrue_ussd(const Xmm& x, const Operand& op) { vcmptrue_ussd(x, x, op); } -void vcmptrue_usss(const Xmm& x, const Operand& op) { vcmptrue_usss(x, x, op); } -void vcmptruepd(const Xmm& x, const Operand& op) { vcmptruepd(x, x, op); } -void vcmptrueps(const Xmm& x, const Operand& op) { vcmptrueps(x, x, op); } -void vcmptruesd(const Xmm& x, const Operand& op) { vcmptruesd(x, x, op); } -void vcmptruess(const Xmm& x, const Operand& op) { vcmptruess(x, x, op); } -void vcmpunord_spd(const Xmm& x, const Operand& op) { vcmpunord_spd(x, x, op); } -void vcmpunord_sps(const Xmm& x, const Operand& op) { vcmpunord_sps(x, x, op); } -void vcmpunord_ssd(const Xmm& x, const Operand& op) { vcmpunord_ssd(x, x, op); } -void vcmpunord_sss(const Xmm& x, const Operand& op) { vcmpunord_sss(x, x, op); } -void vcmpunordpd(const Xmm& x, const Operand& op) { vcmpunordpd(x, x, op); } -void vcmpunordps(const Xmm& x, const Operand& op) { vcmpunordps(x, x, op); } -void vcmpunordsd(const Xmm& x, const Operand& op) { vcmpunordsd(x, x, op); } -void vcmpunordss(const Xmm& x, const Operand& op) { vcmpunordss(x, x, op); } -void vcvtsd2ss(const Xmm& x, const Operand& op) { vcvtsd2ss(x, x, op); } -void vcvtsi2sd(const Xmm& x, const Operand& op) { vcvtsi2sd(x, x, op); } -void vcvtsi2ss(const Xmm& x, const Operand& op) { vcvtsi2ss(x, x, op); } -void vcvtss2sd(const Xmm& x, const Operand& op) { vcvtss2sd(x, x, op); } -void vdppd(const Xmm& x, const Operand& op, uint8 imm) { vdppd(x, x, op, imm); } -void vdpps(const Xmm& x, const Operand& op, uint8 imm) { vdpps(x, x, op, imm); } -void vinsertps(const Xmm& x, const Operand& op, uint8 imm) { vinsertps(x, x, op, imm); } -void vmpsadbw(const Xmm& x, const Operand& op, uint8 imm) { vmpsadbw(x, x, op, imm); } -void vpackssdw(const Xmm& x, const Operand& op) { vpackssdw(x, x, op); } -void vpacksswb(const Xmm& x, const Operand& op) { vpacksswb(x, x, op); } -void vpackusdw(const Xmm& x, const Operand& op) { vpackusdw(x, x, op); } -void vpackuswb(const Xmm& x, const Operand& op) { vpackuswb(x, x, op); } -void vpaddb(const Xmm& x, const Operand& op) { vpaddb(x, x, op); } -void vpaddd(const Xmm& x, const Operand& op) { vpaddd(x, x, op); } -void vpaddq(const Xmm& x, const Operand& op) { vpaddq(x, x, op); } -void vpaddsb(const Xmm& x, const Operand& op) { vpaddsb(x, x, op); } -void vpaddsw(const Xmm& x, const Operand& op) { vpaddsw(x, x, op); } -void vpaddusb(const Xmm& x, const Operand& op) { vpaddusb(x, x, op); } -void vpaddusw(const Xmm& x, const Operand& op) { vpaddusw(x, x, op); } -void vpaddw(const Xmm& x, const Operand& op) { vpaddw(x, x, op); } -void vpalignr(const Xmm& x, const Operand& op, uint8 imm) { vpalignr(x, x, op, imm); } -void vpand(const Xmm& x, const Operand& op) { vpand(x, x, op); } -void vpandn(const Xmm& x, const Operand& op) { vpandn(x, x, op); } -void vpavgb(const Xmm& x, const Operand& op) { vpavgb(x, x, op); } -void vpavgw(const Xmm& x, const Operand& op) { vpavgw(x, x, op); } -void vpblendd(const Xmm& x, const Operand& op, uint8 imm) { vpblendd(x, x, op, imm); } -void vpblendvb(const Xmm& x1, const Operand& op, const Xmm& x4) { vpblendvb(x1, x1, op, x4); } -void vpblendw(const Xmm& x, const Operand& op, uint8 imm) { vpblendw(x, x, op, imm); } -void vpclmulqdq(const Xmm& x, const Operand& op, uint8 imm) { vpclmulqdq(x, x, op, imm); } -void vpcmpeqb(const Xmm& x, const Operand& op) { vpcmpeqb(x, x, op); } -void vpcmpeqd(const Xmm& x, const Operand& op) { vpcmpeqd(x, x, op); } -void vpcmpeqq(const Xmm& x, const Operand& op) { vpcmpeqq(x, x, op); } -void vpcmpeqw(const Xmm& x, const Operand& op) { vpcmpeqw(x, x, op); } -void vpcmpgtb(const Xmm& x, const Operand& op) { vpcmpgtb(x, x, op); } -void vpcmpgtd(const Xmm& x, const Operand& op) { vpcmpgtd(x, x, op); } -void vpcmpgtq(const Xmm& x, const Operand& op) { vpcmpgtq(x, x, op); } -void vpcmpgtw(const Xmm& x, const Operand& op) { vpcmpgtw(x, x, op); } -void vphaddd(const Xmm& x, const Operand& op) { vphaddd(x, x, op); } -void vphaddsw(const Xmm& x, const Operand& op) { vphaddsw(x, x, op); } -void vphaddw(const Xmm& x, const Operand& op) { vphaddw(x, x, op); } -void vphsubd(const Xmm& x, const Operand& op) { vphsubd(x, x, op); } -void vphsubsw(const Xmm& x, const Operand& op) { vphsubsw(x, x, op); } -void vphsubw(const Xmm& x, const Operand& op) { vphsubw(x, x, op); } -void vpinsrb(const Xmm& x, const Operand& op, uint8 imm) { vpinsrb(x, x, op, imm); } -void vpinsrd(const Xmm& x, const Operand& op, uint8 imm) { vpinsrd(x, x, op, imm); } -void vpinsrq(const Xmm& x, const Operand& op, uint8 imm) { vpinsrq(x, x, op, imm); } -void vpinsrw(const Xmm& x, const Operand& op, uint8 imm) { vpinsrw(x, x, op, imm); } -void vpmaddubsw(const Xmm& x, const Operand& op) { vpmaddubsw(x, x, op); } -void vpmaddwd(const Xmm& x, const Operand& op) { vpmaddwd(x, x, op); } -void vpmaxsb(const Xmm& x, const Operand& op) { vpmaxsb(x, x, op); } -void vpmaxsd(const Xmm& x, const Operand& op) { vpmaxsd(x, x, op); } -void vpmaxsw(const Xmm& x, const Operand& op) { vpmaxsw(x, x, op); } -void vpmaxub(const Xmm& x, const Operand& op) { vpmaxub(x, x, op); } -void vpmaxud(const Xmm& x, const Operand& op) { vpmaxud(x, x, op); } -void vpmaxuw(const Xmm& x, const Operand& op) { vpmaxuw(x, x, op); } -void vpminsb(const Xmm& x, const Operand& op) { vpminsb(x, x, op); } -void vpminsd(const Xmm& x, const Operand& op) { vpminsd(x, x, op); } -void vpminsw(const Xmm& x, const Operand& op) { vpminsw(x, x, op); } -void vpminub(const Xmm& x, const Operand& op) { vpminub(x, x, op); } -void vpminud(const Xmm& x, const Operand& op) { vpminud(x, x, op); } -void vpminuw(const Xmm& x, const Operand& op) { vpminuw(x, x, op); } -void vpmuldq(const Xmm& x, const Operand& op) { vpmuldq(x, x, op); } -void vpmulhrsw(const Xmm& x, const Operand& op) { vpmulhrsw(x, x, op); } -void vpmulhuw(const Xmm& x, const Operand& op) { vpmulhuw(x, x, op); } -void vpmulhw(const Xmm& x, const Operand& op) { vpmulhw(x, x, op); } -void vpmulld(const Xmm& x, const Operand& op) { vpmulld(x, x, op); } -void vpmullw(const Xmm& x, const Operand& op) { vpmullw(x, x, op); } -void vpmuludq(const Xmm& x, const Operand& op) { vpmuludq(x, x, op); } -void vpor(const Xmm& x, const Operand& op) { vpor(x, x, op); } -void vpsadbw(const Xmm& x, const Operand& op) { vpsadbw(x, x, op); } -void vpsignb(const Xmm& x, const Operand& op) { vpsignb(x, x, op); } -void vpsignd(const Xmm& x, const Operand& op) { vpsignd(x, x, op); } -void vpsignw(const Xmm& x, const Operand& op) { vpsignw(x, x, op); } -void vpslld(const Xmm& x, const Operand& op) { vpslld(x, x, op); } -void vpslld(const Xmm& x, uint8 imm) { vpslld(x, x, imm); } -void vpslldq(const Xmm& x, uint8 imm) { vpslldq(x, x, imm); } -void vpsllq(const Xmm& x, const Operand& op) { vpsllq(x, x, op); } -void vpsllq(const Xmm& x, uint8 imm) { vpsllq(x, x, imm); } -void vpsllw(const Xmm& x, const Operand& op) { vpsllw(x, x, op); } -void vpsllw(const Xmm& x, uint8 imm) { vpsllw(x, x, imm); } -void vpsrad(const Xmm& x, const Operand& op) { vpsrad(x, x, op); } -void vpsrad(const Xmm& x, uint8 imm) { vpsrad(x, x, imm); } -void vpsraw(const Xmm& x, const Operand& op) { vpsraw(x, x, op); } -void vpsraw(const Xmm& x, uint8 imm) { vpsraw(x, x, imm); } -void vpsrld(const Xmm& x, const Operand& op) { vpsrld(x, x, op); } -void vpsrld(const Xmm& x, uint8 imm) { vpsrld(x, x, imm); } -void vpsrldq(const Xmm& x, uint8 imm) { vpsrldq(x, x, imm); } -void vpsrlq(const Xmm& x, const Operand& op) { vpsrlq(x, x, op); } -void vpsrlq(const Xmm& x, uint8 imm) { vpsrlq(x, x, imm); } -void vpsrlw(const Xmm& x, const Operand& op) { vpsrlw(x, x, op); } -void vpsrlw(const Xmm& x, uint8 imm) { vpsrlw(x, x, imm); } -void vpsubb(const Xmm& x, const Operand& op) { vpsubb(x, x, op); } -void vpsubd(const Xmm& x, const Operand& op) { vpsubd(x, x, op); } -void vpsubq(const Xmm& x, const Operand& op) { vpsubq(x, x, op); } -void vpsubsb(const Xmm& x, const Operand& op) { vpsubsb(x, x, op); } -void vpsubsw(const Xmm& x, const Operand& op) { vpsubsw(x, x, op); } -void vpsubusb(const Xmm& x, const Operand& op) { vpsubusb(x, x, op); } -void vpsubusw(const Xmm& x, const Operand& op) { vpsubusw(x, x, op); } -void vpsubw(const Xmm& x, const Operand& op) { vpsubw(x, x, op); } -void vpunpckhbw(const Xmm& x, const Operand& op) { vpunpckhbw(x, x, op); } -void vpunpckhdq(const Xmm& x, const Operand& op) { vpunpckhdq(x, x, op); } -void vpunpckhqdq(const Xmm& x, const Operand& op) { vpunpckhqdq(x, x, op); } -void vpunpckhwd(const Xmm& x, const Operand& op) { vpunpckhwd(x, x, op); } -void vpunpcklbw(const Xmm& x, const Operand& op) { vpunpcklbw(x, x, op); } -void vpunpckldq(const Xmm& x, const Operand& op) { vpunpckldq(x, x, op); } -void vpunpcklqdq(const Xmm& x, const Operand& op) { vpunpcklqdq(x, x, op); } -void vpunpcklwd(const Xmm& x, const Operand& op) { vpunpcklwd(x, x, op); } -void vpxor(const Xmm& x, const Operand& op) { vpxor(x, x, op); } -void vrcpss(const Xmm& x, const Operand& op) { vrcpss(x, x, op); } -void vroundsd(const Xmm& x, const Operand& op, uint8 imm) { vroundsd(x, x, op, imm); } -void vroundss(const Xmm& x, const Operand& op, uint8 imm) { vroundss(x, x, op, imm); } -void vrsqrtss(const Xmm& x, const Operand& op) { vrsqrtss(x, x, op); } -void vshufpd(const Xmm& x, const Operand& op, uint8 imm) { vshufpd(x, x, op, imm); } -void vshufps(const Xmm& x, const Operand& op, uint8 imm) { vshufps(x, x, op, imm); } -void vsqrtsd(const Xmm& x, const Operand& op) { vsqrtsd(x, x, op); } -void vsqrtss(const Xmm& x, const Operand& op) { vsqrtss(x, x, op); } -void vunpckhpd(const Xmm& x, const Operand& op) { vunpckhpd(x, x, op); } -void vunpckhps(const Xmm& x, const Operand& op) { vunpckhps(x, x, op); } -void vunpcklpd(const Xmm& x, const Operand& op) { vunpcklpd(x, x, op); } -void vunpcklps(const Xmm& x, const Operand& op) { vunpcklps(x, x, op); } -#endif -#ifdef XBYAK64 -void jecxz(std::string label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jecxz(const Label& label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jrcxz(std::string label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jrcxz(const Label& label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } -void cdqe() { db(0x48); db(0x98); } -void cqo() { db(0x48); db(0x99); } -void cmpsq() { db(0x48); db(0xA7); } -void movsq() { db(0x48); db(0xA5); } -void scasq() { db(0x48); db(0xAF); } -void stosq() { db(0x48); db(0xAB); } -void cmpxchg16b(const Address& addr) { opModM(addr, Reg64(1), 0x0F, 0xC7); } -void movq(const Reg64& reg, const Mmx& mmx) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x7E); } -void movq(const Mmx& mmx, const Reg64& reg) { if (mmx.isXMM()) db(0x66); opModR(mmx, reg, 0x0F, 0x6E); } -void movsxd(const Reg64& reg, const Operand& op) { if (!op.isBit(32)) throw Error(ERR_BAD_COMBINATION); opModRM(reg, op, op.isREG(), op.isMEM(), 0x63); } -void pextrq(const Operand& op, const Xmm& xmm, uint8 imm) { if (!op.isREG(64) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(Reg64(xmm.getIdx()), op, 0x16, 0x66, 0, imm, 0x3A); } -void pinsrq(const Xmm& xmm, const Operand& op, uint8 imm) { if (!op.isREG(64) && !op.isMEM()) throw Error(ERR_BAD_COMBINATION); opGen(Reg64(xmm.getIdx()), op, 0x22, 0x66, 0, imm, 0x3A); } -void vcvtss2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W1 | T_EVEX | T_EW1 | T_ER_X | T_N8, 0x2D); } -void vcvttss2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F3 | T_W1 | T_EVEX | T_EW1 | T_SAE_X | T_N8, 0x2C); } -void vcvtsd2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W1 | T_EVEX | T_EW1 | T_N4 | T_ER_X, 0x2D); } -void vcvttsd2si(const Reg64& r, const Operand& op) { opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, T_0F | T_F2 | T_W1 | T_EVEX | T_EW1 | T_N4 | T_SAE_X, 0x2C); } -void vmovq(const Xmm& x, const Reg64& r) { opAVX_X_X_XM(x, xm0, Xmm(r.getIdx()), T_66 | T_0F | T_W1 | T_EVEX | T_EW1, 0x6E); } -void vmovq(const Reg64& r, const Xmm& x) { opAVX_X_X_XM(x, xm0, Xmm(r.getIdx()), T_66 | T_0F | T_W1 | T_EVEX | T_EW1, 0x7E); } -#else -void jcxz(std::string label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jcxz(const Label& label) { db(0x67); opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jecxz(std::string label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } -void jecxz(const Label& label) { opJmp(label, T_SHORT, 0xe3, 0, 0); } -void aaa() { db(0x37); } -void aad() { db(0xD5); db(0x0A); } -void aam() { db(0xD4); db(0x0A); } -void aas() { db(0x3F); } -void daa() { db(0x27); } -void das() { db(0x2F); } -void popad() { db(0x61); } -void popfd() { db(0x9D); } -void pusha() { db(0x60); } -void pushad() { db(0x60); } -void pushfd() { db(0x9C); } -void popa() { db(0x61); } -#endif -#ifndef XBYAK_NO_OP_NAMES -void and(const Operand& op1, const Operand& op2) { and_(op1, op2); } -void and(const Operand& op, uint32 imm) { and_(op, imm); } -void or(const Operand& op1, const Operand& op2) { or_(op1, op2); } -void or(const Operand& op, uint32 imm) { or_(op, imm); } -void xor(const Operand& op1, const Operand& op2) { xor_(op1, op2); } -void xor(const Operand& op, uint32 imm) { xor_(op, imm); } -void not(const Operand& op) { not_(op); } -#endif -#ifndef XBYAK_DISABLE_AVX512 -void kaddb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x4A); } -void kaddd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x4A); } -void kaddq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x4A); } -void kaddw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x4A); } -void kandb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x41); } -void kandd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x41); } -void kandnb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x42); } -void kandnd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x42); } -void kandnq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x42); } -void kandnw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x42); } -void kandq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x41); } -void kandw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x41); } -void kmovb(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_66 | T_W0, 0x91); } -void kmovb(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_66 | T_W0, 0x90); } -void kmovb(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_66 | T_W0, 0x92); } -void kmovb(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_66 | T_W0, 0x93); } -void kmovd(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_66 | T_W1, 0x91); } -void kmovd(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_66 | T_W1, 0x90); } -void kmovd(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_F2 | T_W0, 0x92); } -void kmovd(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_F2 | T_W0, 0x93); } -void kmovq(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_W1, 0x91); } -void kmovq(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_W1, 0x90); } -void kmovw(const Address& addr, const Opmask& k) { opVex(k, 0, addr, T_L0 | T_0F | T_W0, 0x91); } -void kmovw(const Opmask& k, const Operand& op) { opVex(k, 0, op, T_L0 | T_0F | T_W0, 0x90); } -void kmovw(const Opmask& k, const Reg32& r) { opVex(k, 0, r, T_L0 | T_0F | T_W0, 0x92); } -void kmovw(const Reg32& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_W0, 0x93); } -void knotb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x44); } -void knotd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x44); } -void knotq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x44); } -void knotw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x44); } -void korb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x45); } -void kord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x45); } -void korq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x45); } -void kortestb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x98); } -void kortestd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x98); } -void kortestq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x98); } -void kortestw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x98); } -void korw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x45); } -void kshiftlb(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x32, imm); } -void kshiftld(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x33, imm); } -void kshiftlq(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x33, imm); } -void kshiftlw(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x32, imm); } -void kshiftrb(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x30, imm); } -void kshiftrd(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W0, 0x31, imm); } -void kshiftrq(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x31, imm); } -void kshiftrw(const Opmask& r1, const Opmask& r2, uint8 imm) { opVex(r1, 0, r2, T_66 | T_0F3A | T_W1, 0x30, imm); } -void ktestb(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W0, 0x99); } -void ktestd(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_66 | T_W1, 0x99); } -void ktestq(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W1, 0x99); } -void ktestw(const Opmask& r1, const Opmask& r2) { opVex(r1, 0, r2, T_0F | T_W0, 0x99); } -void kunpckbw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x4B); } -void kunpckdq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x4B); } -void kunpckwd(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x4B); } -void kxnorb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x46); } -void kxnord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x46); } -void kxnorq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x46); } -void kxnorw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x46); } -void kxorb(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W0, 0x47); } -void kxord(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_66 | T_W1, 0x47); } -void kxorq(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W1, 0x47); } -void kxorw(const Opmask& r1, const Opmask& r2, const Opmask& r3) { opVex(r1, &r2, r3, T_L1 | T_0F | T_W0, 0x47); } -void v4fmaddps(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x9A); } -void v4fmaddss(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_F2 | T_EW0 | T_MUST_EVEX | T_N16, 0x9B); } -void v4fnmaddps(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0xAA); } -void v4fnmaddss(const Xmm& x1, const Xmm& x2, const Address& addr) { opAVX_X_X_XM(x1, x2, addr, T_0F38 | T_F2 | T_EW0 | T_MUST_EVEX | T_N16, 0xAB); } -void valignd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x03, imm); } -void valignq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x03, imm); } -void vblendmpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x65); } -void vblendmps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x65); } -void vbroadcastf32x2(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N8, 0x19); } -void vbroadcastf32x4(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N16, 0x1A); } -void vbroadcastf32x8(const Zmm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N32, 0x1B); } -void vbroadcastf64x2(const Ymm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N16, 0x1A); } -void vbroadcastf64x4(const Zmm& y, const Address& addr) { opAVX_X_XM_IMM(y, addr, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N32, 0x1B); } -void vbroadcasti32x2(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N8, 0x59); } -void vbroadcasti32x4(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N16, 0x5A); } -void vbroadcasti32x8(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0 | T_N32, 0x5B); } -void vbroadcasti64x2(const Ymm& y, const Operand& op) { opAVX_X_XM_IMM(y, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N16, 0x5A); } -void vbroadcasti64x4(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1 | T_N32, 0x5B); } -void vcmppd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } -void vcmpps(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_0F | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } -void vcmpsd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_N8 | T_F2 | T_0F | T_EW1 | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } -void vcmpss(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_N4 | T_F3 | T_0F | T_EW0 | T_SAE_Z | T_MUST_EVEX, 0xC2, imm); } -void vcompressb(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x63); } -void vcompresspd(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8A); } -void vcompressps(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8A); } -void vcompressw(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x63); } -void vcvtpd2qq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x7B); } -void vcvtpd2udq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x79); } -void vcvtpd2uqq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x79); } -void vcvtps2qq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_ER_Y, 0x7B); } -void vcvtps2udq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_0F | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x79); } -void vcvtps2uqq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_ER_Y, 0x79); } -void vcvtqq2pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0xE6); } -void vcvtqq2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x5B); } -void vcvtsd2usi(const Reg32e& r, const Operand& op) { int type = (T_F2 | T_0F | T_MUST_EVEX | T_N8 | T_ER_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x79); } -void vcvtss2usi(const Reg32e& r, const Operand& op) { int type = (T_F3 | T_0F | T_MUST_EVEX | T_N4 | T_ER_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x79); } -void vcvttpd2qq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x7A); } -void vcvttpd2udq(const Xmm& x, const Operand& op) { opCvt2(x, op, T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_SAE_Z, 0x78); } -void vcvttpd2uqq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x78); } -void vcvttps2qq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x7A); } -void vcvttps2udq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_0F | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x78); } -void vcvttps2uqq(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL | T_SAE_Y, 0x78); } -void vcvttsd2usi(const Reg32e& r, const Operand& op) { int type = (T_F2 | T_0F | T_MUST_EVEX | T_N8 | T_SAE_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x78); } -void vcvttss2usi(const Reg32e& r, const Operand& op) { int type = (T_F3 | T_0F | T_MUST_EVEX | T_N4 | T_SAE_X) | (r.isREG(64) ? T_EW1 : T_EW0); opAVX_X_X_XM(Xmm(r.getIdx()), xm0, op, type, 0x78); } -void vcvtudq2pd(const Xmm& x, const Operand& op) { checkCvt1(x, op); opVex(x, 0, op, T_F3 | T_0F | T_YMM | T_MUST_EVEX | T_EW0 | T_B32 | T_N8 | T_N_VL, 0x7A); } -void vcvtudq2ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x7A); } -void vcvtuqq2pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x7A); } -void vcvtuqq2ps(const Xmm& x, const Operand& op) { opCvt2(x, op, T_F2 | T_0F | T_YMM | T_MUST_EVEX | T_EW1 | T_B64 | T_ER_Z, 0x7A); } -void vcvtusi2sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_F2 | T_0F | T_MUST_EVEX, T_W1 | T_EW1 | T_ER_X | T_N8, T_W0 | T_EW0 | T_N4, 0x7B); } -void vcvtusi2ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opCvt3(x1, x2, op, T_F3 | T_0F | T_MUST_EVEX | T_ER_X, T_W1 | T_EW1 | T_N8, T_W0 | T_EW0 | T_N4, 0x7B); } -void vdbpsadbw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x42, imm); } -void vexp2pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xC8); } -void vexp2ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xC8); } -void vexpandpd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x88); } -void vexpandps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x88); } -void vextractf32x4(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x19, imm); } -void vextractf32x8(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x1B, imm); } -void vextractf64x2(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x19, imm); } -void vextractf64x4(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x1B, imm); } -void vextracti32x4(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x39, imm); } -void vextracti32x8(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3B, imm); } -void vextracti64x2(const Operand& op, const Ymm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::XMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x39, imm); } -void vextracti64x4(const Operand& op, const Zmm& r, uint8 imm) { if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r, 0, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3B, imm); } -void vfixupimmpd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x54, imm); } -void vfixupimmps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x54, imm); } -void vfixupimmsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_Z | T_MUST_EVEX, 0x55, imm); } -void vfixupimmss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_Z | T_MUST_EVEX, 0x55, imm); } -void vfpclasspd(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isBit(128|256|512)) throw Error(ERR_BAD_MEM_SIZE); Reg x = k; x.setBit(op.getBit()); opVex(x, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_YMM | T_EW1 | T_B64, 0x66, imm); } -void vfpclassps(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isBit(128|256|512)) throw Error(ERR_BAD_MEM_SIZE); Reg x = k; x.setBit(op.getBit()); opVex(x, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_YMM | T_EW0 | T_B32, 0x66, imm); } -void vfpclasssd(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isXMEM()) throw Error(ERR_BAD_MEM_SIZE); opVex(k, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_EW1 | T_N8, 0x67, imm); } -void vfpclassss(const Opmask& k, const Operand& op, uint8 imm) { if (!op.isXMEM()) throw Error(ERR_BAD_MEM_SIZE); opVex(k, 0, op, T_66 | T_0F3A | T_MUST_EVEX | T_EW0 | T_N4, 0x67, imm); } -void vgatherdpd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x92, 1); } -void vgatherdps(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x92, 0); } -void vgatherpf0dpd(const Address& addr) { opGatherFetch(addr, zm1, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } -void vgatherpf0dps(const Address& addr) { opGatherFetch(addr, zm1, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } -void vgatherpf0qpd(const Address& addr) { opGatherFetch(addr, zm1, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vgatherpf0qps(const Address& addr) { opGatherFetch(addr, zm1, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vgatherpf1dpd(const Address& addr) { opGatherFetch(addr, zm2, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } -void vgatherpf1dps(const Address& addr) { opGatherFetch(addr, zm2, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } -void vgatherpf1qpd(const Address& addr) { opGatherFetch(addr, zm2, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vgatherpf1qps(const Address& addr) { opGatherFetch(addr, zm2, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vgatherqpd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x93, 0); } -void vgatherqps(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x93, 2); } -void vgetexppd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x42); } -void vgetexpps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x42); } -void vgetexpsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x43); } -void vgetexpss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x43); } -void vgetmantpd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x26, imm); } -void vgetmantps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x26, imm); } -void vgetmantsd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x27, imm); } -void vgetmantss(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x27, imm); } -void vinsertf32x4(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x18, imm); } -void vinsertf32x8(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x1A, imm); } -void vinsertf64x2(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x18, imm); } -void vinsertf64x4(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x1A, imm); } -void vinserti32x4(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x38, imm); } -void vinserti32x8(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3A, imm); } -void vinserti64x2(const Ymm& r1, const Ymm& r2, const Operand& op, uint8 imm) {if (!(r1.getKind() == r2.getKind() && op.is(Operand::MEM | Operand::XMM))) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N16 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x38, imm); } -void vinserti64x4(const Zmm& r1, const Zmm& r2, const Operand& op, uint8 imm) {if (!op.is(Operand::MEM | Operand::YMM)) throw Error(ERR_BAD_COMBINATION); opVex(r1, &r2, op, T_N32 | T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3A, imm); } -void vmovdqa32(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_66 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqa32(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vmovdqa64(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_66 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqa64(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vmovdqu16(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F2 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqu16(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vmovdqu32(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F3 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqu32(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vmovdqu64(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqu64(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F3 | T_0F | T_EW1 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vmovdqu8(const Address& addr, const Xmm& x) { opAVX_X_XM_IMM(x, addr, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX | T_M_K, 0x7F); } -void vmovdqu8(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_F2 | T_0F | T_EW0 | T_YMM | T_ER_X | T_ER_Y | T_ER_Z | T_MUST_EVEX, 0x6F); } -void vp4dpwssd(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x52); } -void vp4dpwssds(const Zmm& z1, const Zmm& z2, const Address& addr) { opAVX_X_X_XM(z1, z2, addr, T_0F38 | T_F2 | T_EW0 | T_YMM | T_MUST_EVEX | T_N16, 0x53); } -void vpabsq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_MUST_EVEX | T_EW1 | T_B64 | T_YMM, 0x1F); } -void vpandd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xDB); } -void vpandnd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xDF); } -void vpandnq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xDF); } -void vpandq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xDB); } -void vpblendmb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x66); } -void vpblendmd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x64); } -void vpblendmq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x64); } -void vpblendmw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x66); } -void vpbroadcastb(const Xmm& x, const Reg8& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7A); } -void vpbroadcastd(const Xmm& x, const Reg32& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7C); } -void vpbroadcastmb2q(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW1, 0x2A); } -void vpbroadcastmw2d(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_YMM | T_MUST_EVEX | T_EW0, 0x3A); } -void vpbroadcastw(const Xmm& x, const Reg16& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7B); } -void vpcmpb(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3F, imm); } -void vpcmpd(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x1F, imm); } -void vpcmpeqb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x74); } -void vpcmpeqd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX | T_B32, 0x76); } -void vpcmpeqq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x29); } -void vpcmpeqw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x75); } -void vpcmpgtb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x64); } -void vpcmpgtd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x66); } -void vpcmpgtq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x37); } -void vpcmpgtw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F | T_YMM | T_MUST_EVEX, 0x65); } -void vpcmpq(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x1F, imm); } -void vpcmpub(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX, 0x3E, imm); } -void vpcmpud(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x1E, imm); } -void vpcmpuq(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x1E, imm); } -void vpcmpuw(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3E, imm); } -void vpcmpw(const Opmask& k, const Xmm& x, const Operand& op, uint8 imm) { opAVX_K_X_XM(k, x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX, 0x3F, imm); } -void vpcompressd(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8B); } -void vpcompressq(const Operand& op, const Xmm& x) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8B); } -void vpconflictd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xC4); } -void vpconflictq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xC4); } -void vpdpbusd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x50); } -void vpdpbusds(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x51); } -void vpdpwssd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x52); } -void vpdpwssds(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x53); } -void vpermb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8D); } -void vpermi2b(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x75); } -void vpermi2d(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x76); } -void vpermi2pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x77); } -void vpermi2ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x77); } -void vpermi2q(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x76); } -void vpermi2w(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x75); } -void vpermt2b(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x7D); } -void vpermt2d(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x7E); } -void vpermt2pd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x7F); } -void vpermt2ps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x7F); } -void vpermt2q(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x7E); } -void vpermt2w(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x7D); } -void vpermw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x8D); } -void vpexpandb(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N1 | T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x62); } -void vpexpandd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x89); } -void vpexpandq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x89); } -void vpexpandw(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_N2 | T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x62); } -void vpgatherdd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x90, 0); } -void vpgatherdq(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x90, 1); } -void vpgatherqd(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_VSIB, 0x91, 2); } -void vpgatherqq(const Xmm& x, const Address& addr) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_VSIB, 0x91, 0); } -void vplzcntd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x44); } -void vplzcntq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x44); } -void vpmadd52huq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xB5); } -void vpmadd52luq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xB4); } -void vpmaxsq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3D); } -void vpmaxuq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3F); } -void vpminsq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x39); } -void vpminuq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x3B); } -void vpmovb2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x29); } -void vpmovd2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x39); } -void vpmovdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x31, false); } -void vpmovdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x33, true); } -void vpmovm2b(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x28); } -void vpmovm2d(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0, 0x38); } -void vpmovm2q(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x38); } -void vpmovm2w(const Xmm& x, const Opmask& k) { opVex(x, 0, k, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x28); } -void vpmovq2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x39); } -void vpmovqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x32, false); } -void vpmovqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x35, true); } -void vpmovqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x34, false); } -void vpmovsdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x21, false); } -void vpmovsdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x23, true); } -void vpmovsqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x22, false); } -void vpmovsqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x25, true); } -void vpmovsqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x24, false); } -void vpmovswb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x20, true); } -void vpmovusdb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x11, false); } -void vpmovusdw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x13, true); } -void vpmovusqb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N2 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x12, false); } -void vpmovusqd(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x15, true); } -void vpmovusqw(const Operand& op, const Xmm& x) { opVmov(op, x, T_N4 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x14, false); } -void vpmovuswb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x10, true); } -void vpmovw2m(const Opmask& k, const Xmm& x) { opVex(k, 0, x, T_F3 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1, 0x29); } -void vpmovwb(const Operand& op, const Xmm& x) { opVmov(op, x, T_N8 | T_N_VL | T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x30, true); } -void vpmullq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x40); } -void vpmultishiftqb(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x83); } -void vpopcntb(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x54); } -void vpopcntd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x55); } -void vpopcntq(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x55); } -void vpopcntw(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x54); } -void vpord(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xEB); } -void vporq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xEB); } -void vprold(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 1), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x72, imm); } -void vprolq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 1), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } -void vprolvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x15); } -void vprolvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x15); } -void vprord(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 0), x, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x72, imm); } -void vprorq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 0), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } -void vprorvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x14); } -void vprorvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x14); } -void vpscatterdd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA0, 0); } -void vpscatterdq(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA0, 1); } -void vpscatterqd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA1, 2); } -void vpscatterqq(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA1, 0); } -void vpshldd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x71, imm); } -void vpshldq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x71, imm); } -void vpshldvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x71); } -void vpshldvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x71); } -void vpshldvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x70); } -void vpshldw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x70, imm); } -void vpshrdd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x73, imm); } -void vpshrdq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x73, imm); } -void vpshrdvd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x73); } -void vpshrdvq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x73); } -void vpshrdvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x72); } -void vpshrdw(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX, 0x72, imm); } -void vpshufbitqmb(const Opmask& k, const Xmm& x, const Operand& op) { opVex(k, &x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x8F); } -void vpsllvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x12); } -void vpsraq(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_X_XM(Xmm(x.getKind(), 4), x, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x72, imm); } -void vpsraq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N16 | T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX, 0xE2); } -void vpsravq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x46); } -void vpsravw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x11); } -void vpsrlvw(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x10); } -void vpternlogd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x25, imm); } -void vpternlogq(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x25, imm); } -void vptestmb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x26); } -void vptestmd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x27); } -void vptestmq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x27); } -void vptestmw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x26); } -void vptestnmb(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x26); } -void vptestnmd(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x27); } -void vptestnmq(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x27); } -void vptestnmw(const Opmask& k, const Xmm& x, const Operand& op) { opAVX_K_X_XM(k, x, op, T_F3 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x26); } -void vpxord(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0xEF); } -void vpxorq(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0xEF); } -void vrangepd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x50, imm); } -void vrangeps(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x50, imm); } -void vrangesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x51, imm); } -void vrangess(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x51, imm); } -void vrcp14pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x4C); } -void vrcp14ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x4C); } -void vrcp14sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX, 0x4D); } -void vrcp14ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX, 0x4D); } -void vrcp28pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xCA); } -void vrcp28ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xCA); } -void vrcp28sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0xCB); } -void vrcp28ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0xCB); } -void vreducepd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B64, 0x56, imm); } -void vreduceps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_SAE_Z | T_MUST_EVEX | T_B32, 0x56, imm); } -void vreducesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_SAE_X | T_MUST_EVEX, 0x57, imm); } -void vreducess(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_SAE_X | T_MUST_EVEX, 0x57, imm); } -void vrndscalepd(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x09, imm); } -void vrndscaleps(const Xmm& x, const Operand& op, uint8 imm) { opAVX_X_XM_IMM(x, op, T_66 | T_0F3A | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x08, imm); } -void vrndscalesd(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F3A | T_EW1 | T_MUST_EVEX, 0x0B, imm); } -void vrndscaless(const Xmm& x1, const Xmm& x2, const Operand& op, uint8 imm) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F3A | T_EW0 | T_MUST_EVEX, 0x0A, imm); } -void vrsqrt14pd(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_B64, 0x4E); } -void vrsqrt14ps(const Xmm& x, const Operand& op) { opAVX_X_XM_IMM(x, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_B32, 0x4E); } -void vrsqrt14sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x4F); } -void vrsqrt14ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX, 0x4F); } -void vrsqrt28pd(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW1 | T_B64 | T_SAE_Z, 0xCC); } -void vrsqrt28ps(const Zmm& z, const Operand& op) { opAVX_X_XM_IMM(z, op, T_66 | T_0F38 | T_MUST_EVEX | T_YMM | T_EW0 | T_B32 | T_SAE_Z, 0xCC); } -void vrsqrt28sd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_SAE_X | T_MUST_EVEX, 0xCD); } -void vrsqrt28ss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_SAE_X | T_MUST_EVEX, 0xCD); } -void vscalefpd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW1 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B64, 0x2C); } -void vscalefps(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_66 | T_0F38 | T_EW0 | T_YMM | T_ER_Z | T_MUST_EVEX | T_B32, 0x2C); } -void vscalefsd(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N8 | T_66 | T_0F38 | T_EW1 | T_ER_X | T_MUST_EVEX, 0x2D); } -void vscalefss(const Xmm& x1, const Xmm& x2, const Operand& op) { opAVX_X_X_XM(x1, x2, op, T_N4 | T_66 | T_0F38 | T_EW0 | T_ER_X | T_MUST_EVEX, 0x2D); } -void vscatterdpd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA2, 1); } -void vscatterdps(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA2, 0); } -void vscatterpf0dpd(const Address& addr) { opGatherFetch(addr, zm5, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } -void vscatterpf0dps(const Address& addr) { opGatherFetch(addr, zm5, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } -void vscatterpf0qpd(const Address& addr) { opGatherFetch(addr, zm5, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vscatterpf0qps(const Address& addr) { opGatherFetch(addr, zm5, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vscatterpf1dpd(const Address& addr) { opGatherFetch(addr, zm6, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::YMM); } -void vscatterpf1dps(const Address& addr) { opGatherFetch(addr, zm6, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC6, Operand::ZMM); } -void vscatterpf1qpd(const Address& addr) { opGatherFetch(addr, zm6, T_N8 | T_66 | T_0F38 | T_EW1 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vscatterpf1qps(const Address& addr) { opGatherFetch(addr, zm6, T_N4 | T_66 | T_0F38 | T_EW0 | T_MUST_EVEX | T_M_K | T_VSIB, 0xC7, Operand::ZMM); } -void vscatterqpd(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N8 | T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA3, 0); } -void vscatterqps(const Address& addr, const Xmm& x) { opGather2(x, addr, T_N4 | T_66 | T_0F38 | T_EW0 | T_YMM | T_MUST_EVEX | T_M_K | T_VSIB, 0xA3, 2); } -void vshuff32x4(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW0 | T_B32, 0x23, imm); } -void vshuff64x2(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW1 | T_B64, 0x23, imm); } -void vshufi32x4(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW0 | T_B32, 0x43, imm); } -void vshufi64x2(const Ymm& y1, const Ymm& y2, const Operand& op, uint8 imm) { opAVX_X_X_XM(y1, y2, op, T_66 | T_0F3A | T_YMM | T_MUST_EVEX | T_EW1 | T_B64, 0x43, imm); } -#ifdef XBYAK64 -void kmovq(const Opmask& k, const Reg64& r) { opVex(k, 0, r, T_L0 | T_0F | T_F2 | T_W1, 0x92); } -void kmovq(const Reg64& r, const Opmask& k) { opVex(r, 0, k, T_L0 | T_0F | T_F2 | T_W1, 0x93); } -void vpbroadcastq(const Xmm& x, const Reg64& r) { opVex(x, 0, r, T_66 | T_0F38 | T_EW1 | T_YMM | T_MUST_EVEX, 0x7C); } -#endif -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_util.h b/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_util.h deleted file mode 100644 index 01544501d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/src/xbyak/xbyak_util.h +++ /dev/null @@ -1,653 +0,0 @@ -#ifndef XBYAK_XBYAK_UTIL_H_ -#define XBYAK_XBYAK_UTIL_H_ - -/** - utility class and functions for Xbyak - Xbyak::util::Clock ; rdtsc timer - Xbyak::util::Cpu ; detect CPU - @note this header is UNDER CONSTRUCTION! -*/ -#include "xbyak.h" - -#ifdef _MSC_VER - #if (_MSC_VER < 1400) && defined(XBYAK32) - static inline __declspec(naked) void __cpuid(int[4], int) - { - __asm { - push ebx - push esi - mov eax, dword ptr [esp + 4 * 2 + 8] // eaxIn - cpuid - mov esi, dword ptr [esp + 4 * 2 + 4] // data - mov dword ptr [esi], eax - mov dword ptr [esi + 4], ebx - mov dword ptr [esi + 8], ecx - mov dword ptr [esi + 12], edx - pop esi - pop ebx - ret - } - } - #else - #include // for __cpuid - #endif -#else - #ifndef __GNUC_PREREQ - #define __GNUC_PREREQ(major, minor) ((((__GNUC__) << 16) + (__GNUC_MINOR__)) >= (((major) << 16) + (minor))) - #endif - #if __GNUC_PREREQ(4, 3) && !defined(__APPLE__) - #include - #else - #if defined(__APPLE__) && defined(XBYAK32) // avoid err : can't find a register in class `BREG' while reloading `asm' - #define __cpuid(eaxIn, a, b, c, d) __asm__ __volatile__("pushl %%ebx\ncpuid\nmovl %%ebp, %%esi\npopl %%ebx" : "=a"(a), "=S"(b), "=c"(c), "=d"(d) : "0"(eaxIn)) - #define __cpuid_count(eaxIn, ecxIn, a, b, c, d) __asm__ __volatile__("pushl %%ebx\ncpuid\nmovl %%ebp, %%esi\npopl %%ebx" : "=a"(a), "=S"(b), "=c"(c), "=d"(d) : "0"(eaxIn), "2"(ecxIn)) - #else - #define __cpuid(eaxIn, a, b, c, d) __asm__ __volatile__("cpuid\n" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eaxIn)) - #define __cpuid_count(eaxIn, ecxIn, a, b, c, d) __asm__ __volatile__("cpuid\n" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eaxIn), "2"(ecxIn)) - #endif - #endif -#endif - -namespace Xbyak { namespace util { - -/** - CPU detection class -*/ -class Cpu { - uint64 type_; - unsigned int get32bitAsBE(const char *x) const - { - return x[0] | (x[1] << 8) | (x[2] << 16) | (x[3] << 24); - } - unsigned int mask(int n) const - { - return (1U << n) - 1; - } - void setFamily() - { - unsigned int data[4]; - getCpuid(1, data); - stepping = data[0] & mask(4); - model = (data[0] >> 4) & mask(4); - family = (data[0] >> 8) & mask(4); - // type = (data[0] >> 12) & mask(2); - extModel = (data[0] >> 16) & mask(4); - extFamily = (data[0] >> 20) & mask(8); - if (family == 0x0f) { - displayFamily = family + extFamily; - } else { - displayFamily = family; - } - if (family == 6 || family == 0x0f) { - displayModel = (extModel << 4) + model; - } else { - displayModel = model; - } - } - unsigned int extractBit(unsigned int val, unsigned int base, unsigned int end) - { - return (val >> base) & ((1u << (end - base)) - 1); - } - void setCacheHierarchy() - { - if ((type_ & tINTEL) == 0) return; - const unsigned int NO_CACHE = 0; - const unsigned int DATA_CACHE = 1; -// const unsigned int INSTRUCTION_CACHE = 2; - const unsigned int UNIFIED_CACHE = 3; - unsigned int smt_width = 0; - unsigned int n_cores = 0; - unsigned int data[4]; - - /* - if leaf 11 exists, we use it to get the number of smt cores and cores on socket - If x2APIC is supported, these are the only correct numbers. - - leaf 0xB can be zeroed-out by a hypervisor - */ - getCpuidEx(0x0, 0, data); - if (data[0] >= 0xB) { - getCpuidEx(0xB, 0, data); // CPUID for SMT Level - smt_width = data[1] & 0x7FFF; - getCpuidEx(0xB, 1, data); // CPUID for CORE Level - n_cores = data[1] & 0x7FFF; - } - - /* - Assumptions: - the first level of data cache is not shared (which is the - case for every existing architecture) and use this to - determine the SMT width for arch not supporting leaf 11. - when leaf 4 reports a number of core less than n_cores - on socket reported by leaf 11, then it is a correct number - of cores not an upperbound. - */ - for (int i = 0; data_cache_levels < maxNumberCacheLevels; i++) { - getCpuidEx(0x4, i, data); - unsigned int cacheType = extractBit(data[0], 0, 4); - if (cacheType == NO_CACHE) break; - if (cacheType == DATA_CACHE || cacheType == UNIFIED_CACHE) { - unsigned int nb_logical_cores = extractBit(data[0], 14, 25) + 1; - if (n_cores != 0) { // true only if leaf 0xB is supported and valid - nb_logical_cores = (std::min)(nb_logical_cores, n_cores); - } - assert(nb_logical_cores != 0); - data_cache_size[data_cache_levels] = - (extractBit(data[1], 22, 31) + 1) - * (extractBit(data[1], 12, 21) + 1) - * (extractBit(data[1], 0, 11) + 1) - * (data[2] + 1); - if (cacheType == DATA_CACHE && smt_width == 0) smt_width = nb_logical_cores; - assert(smt_width != 0); - cores_sharing_data_cache[data_cache_levels] = (std::max)(nb_logical_cores / smt_width, 1u); - data_cache_levels++; - } - } - } - -public: - int model; - int family; - int stepping; - int extModel; - int extFamily; - int displayFamily; // family + extFamily - int displayModel; // model + extModel - - // may I move these members into private? - static const unsigned int maxNumberCacheLevels = 10; - unsigned int data_cache_size[maxNumberCacheLevels]; - unsigned int cores_sharing_data_cache[maxNumberCacheLevels]; - unsigned int data_cache_levels; - - unsigned int getDataCacheLevels() const { return data_cache_levels; } - unsigned int getCoresSharingDataCache(unsigned int i) const - { - if (i >= data_cache_levels) throw Error(ERR_BAD_PARAMETER); - return cores_sharing_data_cache[i]; - } - unsigned int getDataCacheSize(unsigned int i) const - { - if (i >= data_cache_levels) throw Error(ERR_BAD_PARAMETER); - return data_cache_size[i]; - } - - /* - data[] = { eax, ebx, ecx, edx } - */ - static inline void getCpuid(unsigned int eaxIn, unsigned int data[4]) - { -#ifdef _MSC_VER - __cpuid(reinterpret_cast(data), eaxIn); -#else - __cpuid(eaxIn, data[0], data[1], data[2], data[3]); -#endif - } - static inline void getCpuidEx(unsigned int eaxIn, unsigned int ecxIn, unsigned int data[4]) - { -#ifdef _MSC_VER - __cpuidex(reinterpret_cast(data), eaxIn, ecxIn); -#else - __cpuid_count(eaxIn, ecxIn, data[0], data[1], data[2], data[3]); -#endif - } - static inline uint64 getXfeature() - { -#ifdef _MSC_VER - return _xgetbv(0); -#else - unsigned int eax, edx; - // xgetvb is not support on gcc 4.2 -// __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); - __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(0)); - return ((uint64)edx << 32) | eax; -#endif - } - typedef uint64 Type; - - static const Type NONE = 0; - static const Type tMMX = 1 << 0; - static const Type tMMX2 = 1 << 1; - static const Type tCMOV = 1 << 2; - static const Type tSSE = 1 << 3; - static const Type tSSE2 = 1 << 4; - static const Type tSSE3 = 1 << 5; - static const Type tSSSE3 = 1 << 6; - static const Type tSSE41 = 1 << 7; - static const Type tSSE42 = 1 << 8; - static const Type tPOPCNT = 1 << 9; - static const Type tAESNI = 1 << 10; - static const Type tSSE5 = 1 << 11; - static const Type tOSXSAVE = 1 << 12; - static const Type tPCLMULQDQ = 1 << 13; - static const Type tAVX = 1 << 14; - static const Type tFMA = 1 << 15; - - static const Type t3DN = 1 << 16; - static const Type tE3DN = 1 << 17; - static const Type tSSE4a = 1 << 18; - static const Type tRDTSCP = 1 << 19; - static const Type tAVX2 = 1 << 20; - static const Type tBMI1 = 1 << 21; // andn, bextr, blsi, blsmsk, blsr, tzcnt - static const Type tBMI2 = 1 << 22; // bzhi, mulx, pdep, pext, rorx, sarx, shlx, shrx - static const Type tLZCNT = 1 << 23; - - static const Type tINTEL = 1 << 24; - static const Type tAMD = 1 << 25; - - static const Type tENHANCED_REP = 1 << 26; // enhanced rep movsb/stosb - static const Type tRDRAND = 1 << 27; - static const Type tADX = 1 << 28; // adcx, adox - static const Type tRDSEED = 1 << 29; // rdseed - static const Type tSMAP = 1 << 30; // stac - static const Type tHLE = uint64(1) << 31; // xacquire, xrelease, xtest - static const Type tRTM = uint64(1) << 32; // xbegin, xend, xabort - static const Type tF16C = uint64(1) << 33; // vcvtph2ps, vcvtps2ph - static const Type tMOVBE = uint64(1) << 34; // mobve - static const Type tAVX512F = uint64(1) << 35; - static const Type tAVX512DQ = uint64(1) << 36; - static const Type tAVX512_IFMA = uint64(1) << 37; - static const Type tAVX512IFMA = tAVX512_IFMA; - static const Type tAVX512PF = uint64(1) << 38; - static const Type tAVX512ER = uint64(1) << 39; - static const Type tAVX512CD = uint64(1) << 40; - static const Type tAVX512BW = uint64(1) << 41; - static const Type tAVX512VL = uint64(1) << 42; - static const Type tAVX512_VBMI = uint64(1) << 43; - static const Type tAVX512VBMI = tAVX512_VBMI; // changed by Intel's manual - static const Type tAVX512_4VNNIW = uint64(1) << 44; - static const Type tAVX512_4FMAPS = uint64(1) << 45; - static const Type tPREFETCHWT1 = uint64(1) << 46; - static const Type tPREFETCHW = uint64(1) << 47; - static const Type tSHA = uint64(1) << 48; - static const Type tMPX = uint64(1) << 49; - static const Type tAVX512_VBMI2 = uint64(1) << 50; - static const Type tGFNI = uint64(1) << 51; - static const Type tVAES = uint64(1) << 52; - static const Type tVPCLMULQDQ = uint64(1) << 53; - static const Type tAVX512_VNNI = uint64(1) << 54; - static const Type tAVX512_BITALG = uint64(1) << 55; - static const Type tAVX512_VPOPCNTDQ = uint64(1) << 56; - - Cpu() - : type_(NONE) - , data_cache_levels(0) - { - unsigned int data[4]; - const unsigned int& EAX = data[0]; - const unsigned int& EBX = data[1]; - const unsigned int& ECX = data[2]; - const unsigned int& EDX = data[3]; - getCpuid(0, data); - const unsigned int maxNum = EAX; - static const char intel[] = "ntel"; - static const char amd[] = "cAMD"; - if (ECX == get32bitAsBE(amd)) { - type_ |= tAMD; - getCpuid(0x80000001, data); - if (EDX & (1U << 31)) type_ |= t3DN; - if (EDX & (1U << 15)) type_ |= tCMOV; - if (EDX & (1U << 30)) type_ |= tE3DN; - if (EDX & (1U << 22)) type_ |= tMMX2; - if (EDX & (1U << 27)) type_ |= tRDTSCP; - } - if (ECX == get32bitAsBE(intel)) { - type_ |= tINTEL; - getCpuid(0x80000001, data); - if (EDX & (1U << 27)) type_ |= tRDTSCP; - if (ECX & (1U << 5)) type_ |= tLZCNT; - if (ECX & (1U << 8)) type_ |= tPREFETCHW; - } - getCpuid(1, data); - if (ECX & (1U << 0)) type_ |= tSSE3; - if (ECX & (1U << 9)) type_ |= tSSSE3; - if (ECX & (1U << 19)) type_ |= tSSE41; - if (ECX & (1U << 20)) type_ |= tSSE42; - if (ECX & (1U << 22)) type_ |= tMOVBE; - if (ECX & (1U << 23)) type_ |= tPOPCNT; - if (ECX & (1U << 25)) type_ |= tAESNI; - if (ECX & (1U << 1)) type_ |= tPCLMULQDQ; - if (ECX & (1U << 27)) type_ |= tOSXSAVE; - if (ECX & (1U << 30)) type_ |= tRDRAND; - if (ECX & (1U << 29)) type_ |= tF16C; - - if (EDX & (1U << 15)) type_ |= tCMOV; - if (EDX & (1U << 23)) type_ |= tMMX; - if (EDX & (1U << 25)) type_ |= tMMX2 | tSSE; - if (EDX & (1U << 26)) type_ |= tSSE2; - - if (type_ & tOSXSAVE) { - // check XFEATURE_ENABLED_MASK[2:1] = '11b' - uint64 bv = getXfeature(); - if ((bv & 6) == 6) { - if (ECX & (1U << 28)) type_ |= tAVX; - if (ECX & (1U << 12)) type_ |= tFMA; - if (((bv >> 5) & 7) == 7) { - getCpuidEx(7, 0, data); - if (EBX & (1U << 16)) type_ |= tAVX512F; - if (type_ & tAVX512F) { - if (EBX & (1U << 17)) type_ |= tAVX512DQ; - if (EBX & (1U << 21)) type_ |= tAVX512_IFMA; - if (EBX & (1U << 26)) type_ |= tAVX512PF; - if (EBX & (1U << 27)) type_ |= tAVX512ER; - if (EBX & (1U << 28)) type_ |= tAVX512CD; - if (EBX & (1U << 30)) type_ |= tAVX512BW; - if (EBX & (1U << 31)) type_ |= tAVX512VL; - if (ECX & (1U << 1)) type_ |= tAVX512_VBMI; - if (ECX & (1U << 6)) type_ |= tAVX512_VBMI2; - if (ECX & (1U << 8)) type_ |= tGFNI; - if (ECX & (1U << 9)) type_ |= tVAES; - if (ECX & (1U << 10)) type_ |= tVPCLMULQDQ; - if (ECX & (1U << 11)) type_ |= tAVX512_VNNI; - if (ECX & (1U << 12)) type_ |= tAVX512_BITALG; - if (ECX & (1U << 14)) type_ |= tAVX512_VPOPCNTDQ; - if (EDX & (1U << 2)) type_ |= tAVX512_4VNNIW; - if (EDX & (1U << 3)) type_ |= tAVX512_4FMAPS; - } - } - } - } - if (maxNum >= 7) { - getCpuidEx(7, 0, data); - if (type_ & tAVX && (EBX & (1U << 5))) type_ |= tAVX2; - if (EBX & (1U << 3)) type_ |= tBMI1; - if (EBX & (1U << 8)) type_ |= tBMI2; - if (EBX & (1U << 9)) type_ |= tENHANCED_REP; - if (EBX & (1U << 18)) type_ |= tRDSEED; - if (EBX & (1U << 19)) type_ |= tADX; - if (EBX & (1U << 20)) type_ |= tSMAP; - if (EBX & (1U << 4)) type_ |= tHLE; - if (EBX & (1U << 11)) type_ |= tRTM; - if (EBX & (1U << 14)) type_ |= tMPX; - if (EBX & (1U << 29)) type_ |= tSHA; - if (ECX & (1U << 0)) type_ |= tPREFETCHWT1; - } - setFamily(); - setCacheHierarchy(); - } - void putFamily() const - { - printf("family=%d, model=%X, stepping=%d, extFamily=%d, extModel=%X\n", - family, model, stepping, extFamily, extModel); - printf("display:family=%X, model=%X\n", displayFamily, displayModel); - } - bool has(Type type) const - { - return (type & type_) != 0; - } -}; - -class Clock { -public: - static inline uint64 getRdtsc() - { -#ifdef _MSC_VER - return __rdtsc(); -#else - unsigned int eax, edx; - __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx)); - return ((uint64)edx << 32) | eax; -#endif - } - Clock() - : clock_(0) - , count_(0) - { - } - void begin() - { - clock_ -= getRdtsc(); - } - void end() - { - clock_ += getRdtsc(); - count_++; - } - int getCount() const { return count_; } - uint64 getClock() const { return clock_; } - void clear() { count_ = 0; clock_ = 0; } -private: - uint64 clock_; - int count_; -}; - -#ifdef XBYAK64 -const int UseRCX = 1 << 6; -const int UseRDX = 1 << 7; - -class Pack { - static const size_t maxTblNum = 15; - const Xbyak::Reg64 *tbl_[maxTblNum]; - size_t n_; -public: - Pack() : tbl_(), n_(0) {} - Pack(const Xbyak::Reg64 *tbl, size_t n) { init(tbl, n); } - Pack(const Pack& rhs) - : n_(rhs.n_) - { - for (size_t i = 0; i < n_; i++) tbl_[i] = rhs.tbl_[i]; - } - Pack& operator=(const Pack& rhs) - { - n_ = rhs.n_; - for (size_t i = 0; i < n_; i++) tbl_[i] = rhs.tbl_[i]; - return *this; - } - Pack(const Xbyak::Reg64& t0) - { n_ = 1; tbl_[0] = &t0; } - Pack(const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 2; tbl_[0] = &t0; tbl_[1] = &t1; } - Pack(const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 3; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; } - Pack(const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 4; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; } - Pack(const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 5; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; } - Pack(const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 6; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; } - Pack(const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 7; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; } - Pack(const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 8; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; } - Pack(const Xbyak::Reg64& t8, const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 9; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; tbl_[8] = &t8; } - Pack(const Xbyak::Reg64& t9, const Xbyak::Reg64& t8, const Xbyak::Reg64& t7, const Xbyak::Reg64& t6, const Xbyak::Reg64& t5, const Xbyak::Reg64& t4, const Xbyak::Reg64& t3, const Xbyak::Reg64& t2, const Xbyak::Reg64& t1, const Xbyak::Reg64& t0) - { n_ = 10; tbl_[0] = &t0; tbl_[1] = &t1; tbl_[2] = &t2; tbl_[3] = &t3; tbl_[4] = &t4; tbl_[5] = &t5; tbl_[6] = &t6; tbl_[7] = &t7; tbl_[8] = &t8; tbl_[9] = &t9; } - Pack& append(const Xbyak::Reg64& t) - { - if (n_ == maxTblNum) { - fprintf(stderr, "ERR Pack::can't append\n"); - throw Error(ERR_BAD_PARAMETER); - } - tbl_[n_++] = &t; - return *this; - } - void init(const Xbyak::Reg64 *tbl, size_t n) - { - if (n > maxTblNum) { - fprintf(stderr, "ERR Pack::init bad n=%d\n", (int)n); - throw Error(ERR_BAD_PARAMETER); - } - n_ = n; - for (size_t i = 0; i < n; i++) { - tbl_[i] = &tbl[i]; - } - } - const Xbyak::Reg64& operator[](size_t n) const - { - if (n >= n_) { - fprintf(stderr, "ERR Pack bad n=%d(%d)\n", (int)n, (int)n_); - throw Error(ERR_BAD_PARAMETER); - } - return *tbl_[n]; - } - size_t size() const { return n_; } - /* - get tbl[pos, pos + num) - */ - Pack sub(size_t pos, size_t num = size_t(-1)) const - { - if (num == size_t(-1)) num = n_ - pos; - if (pos + num > n_) { - fprintf(stderr, "ERR Pack::sub bad pos=%d, num=%d\n", (int)pos, (int)num); - throw Error(ERR_BAD_PARAMETER); - } - Pack pack; - pack.n_ = num; - for (size_t i = 0; i < num; i++) { - pack.tbl_[i] = tbl_[pos + i]; - } - return pack; - } - void put() const - { - for (size_t i = 0; i < n_; i++) { - printf("%s ", tbl_[i]->toString()); - } - printf("\n"); - } -}; - -class StackFrame { -#ifdef XBYAK64_WIN - static const int noSaveNum = 6; - static const int rcxPos = 0; - static const int rdxPos = 1; -#else - static const int noSaveNum = 8; - static const int rcxPos = 3; - static const int rdxPos = 2; -#endif - static const int maxRegNum = 14; // maxRegNum = 16 - rsp - rax - Xbyak::CodeGenerator *code_; - int pNum_; - int tNum_; - bool useRcx_; - bool useRdx_; - int saveNum_; - int P_; - bool makeEpilog_; - Xbyak::Reg64 pTbl_[4]; - Xbyak::Reg64 tTbl_[maxRegNum]; - Pack p_; - Pack t_; - StackFrame(const StackFrame&); - void operator=(const StackFrame&); -public: - const Pack& p; - const Pack& t; - /* - make stack frame - @param sf [in] this - @param pNum [in] num of function parameter(0 <= pNum <= 4) - @param tNum [in] num of temporary register(0 <= tNum, with UseRCX, UseRDX) #{pNum + tNum [+rcx] + [rdx]} <= 14 - @param stackSizeByte [in] local stack size - @param makeEpilog [in] automatically call close() if true - - you can use - rax - gp0, ..., gp(pNum - 1) - gt0, ..., gt(tNum-1) - rcx if tNum & UseRCX - rdx if tNum & UseRDX - rsp[0..stackSizeByte - 1] - */ - StackFrame(Xbyak::CodeGenerator *code, int pNum, int tNum = 0, int stackSizeByte = 0, bool makeEpilog = true) - : code_(code) - , pNum_(pNum) - , tNum_(tNum & ~(UseRCX | UseRDX)) - , useRcx_((tNum & UseRCX) != 0) - , useRdx_((tNum & UseRDX) != 0) - , saveNum_(0) - , P_(0) - , makeEpilog_(makeEpilog) - , p(p_) - , t(t_) - { - using namespace Xbyak; - if (pNum < 0 || pNum > 4) throw Error(ERR_BAD_PNUM); - const int allRegNum = pNum + tNum_ + (useRcx_ ? 1 : 0) + (useRdx_ ? 1 : 0); - if (tNum_ < 0 || allRegNum > maxRegNum) throw Error(ERR_BAD_TNUM); - const Reg64& _rsp = code->rsp; - saveNum_ = (std::max)(0, allRegNum - noSaveNum); - const int *tbl = getOrderTbl() + noSaveNum; - for (int i = 0; i < saveNum_; i++) { - code->push(Reg64(tbl[i])); - } - P_ = (stackSizeByte + 7) / 8; - if (P_ > 0 && (P_ & 1) == (saveNum_ & 1)) P_++; // (rsp % 16) == 8, then increment P_ for 16 byte alignment - P_ *= 8; - if (P_ > 0) code->sub(_rsp, P_); - int pos = 0; - for (int i = 0; i < pNum; i++) { - pTbl_[i] = Xbyak::Reg64(getRegIdx(pos)); - } - for (int i = 0; i < tNum_; i++) { - tTbl_[i] = Xbyak::Reg64(getRegIdx(pos)); - } - if (useRcx_ && rcxPos < pNum) code_->mov(code_->r10, code_->rcx); - if (useRdx_ && rdxPos < pNum) code_->mov(code_->r11, code_->rdx); - p_.init(pTbl_, pNum); - t_.init(tTbl_, tNum_); - } - /* - make epilog manually - @param callRet [in] call ret() if true - */ - void close(bool callRet = true) - { - using namespace Xbyak; - const Reg64& _rsp = code_->rsp; - const int *tbl = getOrderTbl() + noSaveNum; - if (P_ > 0) code_->add(_rsp, P_); - for (int i = 0; i < saveNum_; i++) { - code_->pop(Reg64(tbl[saveNum_ - 1 - i])); - } - - if (callRet) code_->ret(); - } - ~StackFrame() - { - if (!makeEpilog_) return; - try { - close(); - } catch (std::exception& e) { - printf("ERR:StackFrame %s\n", e.what()); - exit(1); - } - } -private: - const int *getOrderTbl() const - { - using namespace Xbyak; - static const int tbl[] = { -#ifdef XBYAK64_WIN - Operand::RCX, Operand::RDX, Operand::R8, Operand::R9, Operand::R10, Operand::R11, Operand::RDI, Operand::RSI, -#else - Operand::RDI, Operand::RSI, Operand::RDX, Operand::RCX, Operand::R8, Operand::R9, Operand::R10, Operand::R11, -#endif - Operand::RBX, Operand::RBP, Operand::R12, Operand::R13, Operand::R14, Operand::R15 - }; - return &tbl[0]; - } - int getRegIdx(int& pos) const - { - assert(pos < maxRegNum); - using namespace Xbyak; - const int *tbl = getOrderTbl(); - int r = tbl[pos++]; - if (useRcx_) { - if (r == Operand::RCX) { return Operand::R10; } - if (r == Operand::R10) { r = tbl[pos++]; } - } - if (useRdx_) { - if (r == Operand::RDX) { return Operand::R11; } - if (r == Operand::R11) { return tbl[pos++]; } - } - return r; - } -}; -#endif - -} } // end of util -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/test/aggregate_sig_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/aggregate_sig_test.cpp deleted file mode 100644 index c3a0e758d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/aggregate_sig_test.cpp +++ /dev/null @@ -1,74 +0,0 @@ -//#define MCLBN_FP_UNIT_SIZE 8 -#include -#include -#include -#include - -using namespace mcl::aggs; - -CYBOZU_TEST_AUTO(init) -{ - AGGS::init(); -// AGGS::init(mcl::BN381_1); -// AGGS::init(mcl::BLS12_381); - SecretKey sec; - sec.init(); - PublicKey pub; - sec.getPublicKey(pub); - const std::string m = "abc"; - Signature sig; - sec.sign(sig, m); - CYBOZU_TEST_ASSERT(pub.verify(sig, m)); -} - -void aggregateTest(const std::vector& msgVec) -{ - const size_t n = msgVec.size(); - std::vector secVec(n); - std::vector pubVec(n); - std::vector sigVec(n); - Signature aggSig; - for (size_t i = 0; i < n; i++) { - secVec[i].init(); - secVec[i].getPublicKey(pubVec[i]); - secVec[i].sign(sigVec[i], msgVec[i]); - CYBOZU_TEST_ASSERT(pubVec[i].verify(sigVec[i], msgVec[i])); - } - aggSig.aggregate(sigVec); - CYBOZU_TEST_ASSERT(aggSig.verify(msgVec, pubVec)); - CYBOZU_BENCH_C("aggSig.verify", 10, aggSig.verify, msgVec, pubVec); -} - -CYBOZU_TEST_AUTO(aggregate) -{ -#if 0 - /* - Core i7-7700 CPU @ 3.60GHz - BN254 Fp382 Fp462 - security bit 100 115? 128 - # of sig 100 69 200 476 - 1000 693 2037 4731 - 10000 6969 20448 47000(Mclk) - */ - const size_t n = 1000; - const size_t msgSize = 16; - std::vector msgVec(n); - cybozu::XorShift rg; - for (size_t i = 0; i < n; i++) { - std::string& msg = msgVec[i]; - msg.resize(msgSize); - for (size_t j = 0; j < msgSize; j++) { - msg[j] = (char)rg(); - } - } - aggregateTest(msgVec); -#else - const std::string msgArray[] = { "abc", "12345", "xyz", "pqr", "aggregate signature" }; - const size_t n = sizeof(msgArray) / sizeof(msgArray[0]); - std::vector msgVec(n); - for (size_t i = 0; i < n; i++) { - msgVec[i] = msgArray[i]; - } - aggregateTest(msgVec); -#endif -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/array_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/array_test.cpp deleted file mode 100644 index 2168a28fa..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/array_test.cpp +++ /dev/null @@ -1,104 +0,0 @@ -#include -#include - -template -void setArray(Array& a, const int (&tbl)[n]) -{ - CYBOZU_TEST_ASSERT(a.resize(n)); - for (size_t i = 0; i < n; i++) a[i] = tbl[i]; -} - -template -void swapTest(const int (&a)[an], const int (&b)[bn]) -{ - Array s, t; - setArray(s, a); - setArray(t, b); - s.swap(t); - CYBOZU_TEST_EQUAL(s.size(), bn); - CYBOZU_TEST_EQUAL(t.size(), an); - CYBOZU_TEST_EQUAL_ARRAY(s, b, s.size()); - CYBOZU_TEST_EQUAL_ARRAY(t, a, t.size()); -} - -CYBOZU_TEST_AUTO(resize) -{ - mcl::Array a, b; - CYBOZU_TEST_EQUAL(a.size(), 0); - CYBOZU_TEST_EQUAL(b.size(), 0); - - const size_t n = 5; - bool ok = a.resize(n); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(n, a.size()); - for (size_t i = 0; i < n; i++) { - a[i] = i; - } - ok = b.copy(a); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(b.size(), n); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), n); - - const size_t small = n - 1; - ok = b.resize(small); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(b.size(), small); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); - const size_t large = n * 2; - ok = b.resize(large); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(b.size(), large); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); - - const int aTbl[] = { 3, 4 }; - const int bTbl[] = { 7, 6, 5, 3 }; - swapTest >(aTbl, bTbl); - swapTest >(bTbl, aTbl); -} - -CYBOZU_TEST_AUTO(FixedArray) -{ - const size_t n = 5; - mcl::FixedArray a, b; - CYBOZU_TEST_EQUAL(a.size(), 0); - CYBOZU_TEST_EQUAL(b.size(), 0); - - bool ok = a.resize(n); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(n, a.size()); - for (size_t i = 0; i < n; i++) { - a[i] = i; - } - ok = b.copy(a); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(b.size(), n); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), n); - - const size_t small = n - 1; - ok = b.resize(small); - CYBOZU_TEST_ASSERT(ok); - CYBOZU_TEST_EQUAL(b.size(), small); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), small); - const size_t large = n + 1; - ok = b.resize(large); - CYBOZU_TEST_ASSERT(!ok); - - const int aTbl[] = { 3, 4 }; - const int bTbl[] = { 7, 6, 5, 3 }; - swapTest >(aTbl, bTbl); - swapTest >(bTbl, aTbl); -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -CYBOZU_TEST_AUTO(assign) -{ - const int aTbl[] = { 3, 4, 2 }; - const int bTbl[] = { 3, 4, 2, 1, 5 }; - mcl::Array a, b; - setArray(a, aTbl); - setArray(b, bTbl); - a = b; - CYBOZU_TEST_EQUAL(a.size(), b.size()); - CYBOZU_TEST_EQUAL_ARRAY(a.data(), b.data(), a.size()); -} -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/test/base_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/base_test.cpp deleted file mode 100644 index 2733d17ca..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/base_test.cpp +++ /dev/null @@ -1,392 +0,0 @@ -// not compiled -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../src/fp_generator.hpp" -#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) && (MCL_SIZEOF_UNIT == 8) - #define USE_XBYAK - static mcl::FpGenerator fg; -#endif -#define PUT(x) std::cout << #x "=" << (x) << std::endl - -const size_t MAX_N = 32; -typedef mcl::fp::Unit Unit; - -size_t getUnitSize(size_t bitSize) -{ - return (bitSize + sizeof(Unit) * 8 - 1) / (sizeof(Unit) * 8); -} - -void setMpz(mpz_class& mx, const Unit *x, size_t n) -{ - mcl::gmp::setArray(mx, x, n); -} -void getMpz(Unit *x, size_t n, const mpz_class& mx) -{ - mcl::fp::toArray(x, n, mx.get_mpz_t()); -} - -struct Montgomery { - mpz_class p_; - mpz_class R_; // (1 << (n_ * 64)) % p - mpz_class RR_; // (R * R) % p - Unit r_; // p * r = -1 mod M = 1 << 64 - size_t n_; - Montgomery() {} - explicit Montgomery(const mpz_class& p) - { - p_ = p; - r_ = mcl::montgomery::getCoff(mcl::gmp::getUnit(p, 0)); - n_ = mcl::gmp::getUnitSize(p); - R_ = 1; - R_ = (R_ << (n_ * 64)) % p_; - RR_ = (R_ * R_) % p_; - } - - void toMont(mpz_class& x) const { mul(x, x, RR_); } - void fromMont(mpz_class& x) const { mul(x, x, 1); } - - void mont(Unit *z, const Unit *x, const Unit *y) const - { - mpz_class mx, my; - setMpz(mx, x, n_); - setMpz(my, y, n_); - mul(mx, mx, my); - getMpz(z, n_, mx); - } - void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) const - { -#if 1 - const size_t ySize = mcl::gmp::getUnitSize(y); - mpz_class c = y == 0 ? mpz_class(0) : x * mcl::gmp::getUnit(y, 0); - Unit q = c == 0 ? 0 : mcl::gmp::getUnit(c, 0) * r_; - c += p_ * q; - c >>= sizeof(Unit) * 8; - for (size_t i = 1; i < n_; i++) { - if (i < ySize) { - c += x * mcl::gmp::getUnit(y, i); - } - Unit q = c == 0 ? 0 : mcl::gmp::getUnit(c, 0) * r_; - c += p_ * q; - c >>= sizeof(Unit) * 8; - } - if (c >= p_) { - c -= p_; - } - z = c; -#else - z = x * y; - const size_t zSize = mcl::gmp::getUnitSize(z); - for (size_t i = 0; i < n_; i++) { - if (i < zSize) { - Unit q = mcl::gmp::getUnit(z, 0) * r_; - z += p_ * (mp_limb_t)q; - } - z >>= sizeof(Unit) * 8; - } - if (z >= p_) { - z -= p_; - } -#endif - } -}; - -void put(const char *msg, const Unit *x, size_t n) -{ - printf("%s ", msg); - for (size_t i = 0; i < n; i++) printf("%016llx ", (long long)x[n - 1 - i]); - printf("\n"); -} -void verifyEqual(const Unit *x, const Unit *y, size_t n, const char *file, int line) -{ - bool ok = mcl::fp::isEqualArray(x, y, n); - CYBOZU_TEST_ASSERT(ok); - if (ok) return; - printf("%s:%d\n", file, line); - put("L", x, n); - put("R", y, n); - exit(1); -} -#define VERIFY_EQUAL(x, y, n) verifyEqual(x, y, n, __FILE__, __LINE__) - -void addC(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) -{ - mpz_class mx, my, mp; - setMpz(mx, x, n); - setMpz(my, y, n); - setMpz(mp, p, n); - mx += my; - if (mx >= mp) mx -= mp; - getMpz(z, n, mx); -} -void subC(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) -{ - mpz_class mx, my, mp; - setMpz(mx, x, n); - setMpz(my, y, n); - setMpz(mp, p, n); - mx -= my; - if (mx < 0) mx += mp; - getMpz(z, n, mx); -} -static inline void set_zero(mpz_t& z, Unit *p, size_t n) -{ - z->_mp_alloc = (int)n; - z->_mp_size = 0; - z->_mp_d = (mp_limb_t*)p; -} -static inline void set_mpz_t(mpz_t& z, const Unit* p, int n) -{ - z->_mp_alloc = n; - int i = n; - while (i > 0 && p[i - 1] == 0) { - i--; - } - z->_mp_size = i; - z->_mp_d = (mp_limb_t*)p; -} - -// z[2n] <- x[n] * y[n] -void mulPreC(Unit *z, const Unit *x, const Unit *y, size_t n) -{ -#if 1 - mpz_t mx, my, mz; - set_zero(mz, z, n * 2); - set_mpz_t(mx, x, n); - set_mpz_t(my, y, n); - mpz_mul(mz, mx, my); - mcl::fp::toArray(z, n * 2, mz); -#else - mpz_class mx, my; - setMpz(mx, x, n); - setMpz(my, y, n); - mx *= my; - getMpz(z, n * 2, mx); -#endif -} - -void modC(Unit *y, const Unit *x, const Unit *p, size_t n) -{ - mpz_t mx, my, mp; - set_mpz_t(mx, x, n * 2); - set_mpz_t(my, y, n); - set_mpz_t(mp, p, n); - mpz_mod(my, mx, mp); - mcl::fp::clearArray(y, my->_mp_size, n); -} - -void mul(Unit *z, const Unit *x, const Unit *y, const Unit *p, size_t n) -{ - Unit ret[MAX_N * 2]; - mpz_t mx, my, mz, mp; - set_zero(mz, ret, MAX_N * 2); - set_mpz_t(mx, x, n); - set_mpz_t(my, y, n); - set_mpz_t(mp, p, n); - mpz_mul(mz, mx, my); - mpz_mod(mz, mz, mp); - mcl::fp::toArray(z, n, mz); -} - -typedef mcl::fp::void3op void3op; -typedef mcl::fp::void4op void4op; -typedef mcl::fp::void4Iop void4Iop; - -const struct FuncOp { - size_t bitSize; - void4op addS; - void4op addL; - void4op subS; - void4op subL; - void3op mulPre; - void4Iop mont; -} gFuncOpTbl[] = { - { 128, mcl_fp_add128S, mcl_fp_add128L, mcl_fp_sub128S, mcl_fp_sub128L, mcl_fp_mul128pre, mcl_fp_mont128 }, - { 192, mcl_fp_add192S, mcl_fp_add192L, mcl_fp_sub192S, mcl_fp_sub192L, mcl_fp_mul192pre, mcl_fp_mont192 }, - { 256, mcl_fp_add256S, mcl_fp_add256L, mcl_fp_sub256S, mcl_fp_sub256L, mcl_fp_mul256pre, mcl_fp_mont256 }, - { 320, mcl_fp_add320S, mcl_fp_add320L, mcl_fp_sub320S, mcl_fp_sub320L, mcl_fp_mul320pre, mcl_fp_mont320 }, - { 384, mcl_fp_add384S, mcl_fp_add384L, mcl_fp_sub384S, mcl_fp_sub384L, mcl_fp_mul384pre, mcl_fp_mont384 }, - { 448, mcl_fp_add448S, mcl_fp_add448L, mcl_fp_sub448S, mcl_fp_sub448L, mcl_fp_mul448pre, mcl_fp_mont448 }, - { 512, mcl_fp_add512S, mcl_fp_add512L, mcl_fp_sub512S, mcl_fp_sub512L, mcl_fp_mul512pre, mcl_fp_mont512 }, -#if MCL_SIZEOF_UNIT == 4 - { 160, mcl_fp_add160S, mcl_fp_add160L, mcl_fp_sub160S, mcl_fp_sub160L, mcl_fp_mul160pre, mcl_fp_mont160 }, - { 224, mcl_fp_add224S, mcl_fp_add224L, mcl_fp_sub224S, mcl_fp_sub224L, mcl_fp_mul224pre, mcl_fp_mont224 }, - { 288, mcl_fp_add288S, mcl_fp_add288L, mcl_fp_sub288S, mcl_fp_sub288L, mcl_fp_mul288pre, mcl_fp_mont288 }, - { 352, mcl_fp_add352S, mcl_fp_add352L, mcl_fp_sub352S, mcl_fp_sub352L, mcl_fp_mul352pre, mcl_fp_mont352 }, - { 416, mcl_fp_add416S, mcl_fp_add416L, mcl_fp_sub416S, mcl_fp_sub416L, mcl_fp_mul416pre, mcl_fp_mont416 }, - { 480, mcl_fp_add480S, mcl_fp_add480L, mcl_fp_sub480S, mcl_fp_sub480L, mcl_fp_mul480pre, mcl_fp_mont480 }, - { 544, mcl_fp_add544S, mcl_fp_add544L, mcl_fp_sub544S, mcl_fp_sub544L, mcl_fp_mul544pre, mcl_fp_mont544 }, -#else - { 576, mcl_fp_add576S, mcl_fp_add576L, mcl_fp_sub576S, mcl_fp_sub576L, mcl_fp_mul576pre, mcl_fp_mont576 }, -#endif -}; - -FuncOp getFuncOp(size_t bitSize) -{ - typedef std::map Map; - static Map map; - static bool init = false; - if (!init) { - init = true; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(gFuncOpTbl); i++) { - map[gFuncOpTbl[i].bitSize] = gFuncOpTbl[i]; - } - } - for (Map::const_iterator i = map.begin(), ie = map.end(); i != ie; ++i) { - if (bitSize <= i->second.bitSize) { - return i->second; - } - } - printf("ERR bitSize=%d\n", (int)bitSize); - exit(1); -} - -void test(const Unit *p, size_t bitSize) -{ - printf("bitSize %d\n", (int)bitSize); - const size_t n = getUnitSize(bitSize); -#ifdef NDEBUG - bool doBench = true; -#else - bool doBench = false; -#endif - const FuncOp funcOp = getFuncOp(bitSize); - const void4op addS = funcOp.addS; - const void4op addL = funcOp.addL; - const void4op subS = funcOp.subS; - const void4op subL = funcOp.subL; - const void3op mulPre = funcOp.mulPre; - const void4Iop mont = funcOp.mont; - - mcl::fp::Unit x[MAX_N], y[MAX_N]; - mcl::fp::Unit z[MAX_N], w[MAX_N]; - mcl::fp::Unit z2[MAX_N * 2]; - mcl::fp::Unit w2[MAX_N * 2]; - cybozu::XorShift rg; - mcl::fp::getRandVal(x, rg, p, bitSize); - mcl::fp::getRandVal(y, rg, p, bitSize); - const size_t C = 10; - - addC(z, x, y, p, n); - addS(w, x, y, p); - VERIFY_EQUAL(z, w, n); - for (size_t i = 0; i < C; i++) { - addC(z, y, z, p, n); - addS(w, y, w, p); - VERIFY_EQUAL(z, w, n); - addC(z, y, z, p, n); - addL(w, y, w, p); - VERIFY_EQUAL(z, w, n); - subC(z, x, z, p, n); - subS(w, x, w, p); - VERIFY_EQUAL(z, w, n); - subC(z, x, z, p, n); - subL(w, x, w, p); - VERIFY_EQUAL(z, w, n); - mulPreC(z2, x, z, n); - mulPre(w2, x, z); - VERIFY_EQUAL(z2, w2, n * 2); - } - { - mpz_class mp; - setMpz(mp, p, n); - Montgomery m(mp); -#ifdef USE_XBYAK - if (bitSize > 128) fg.init(p, n); -#endif - /* - real mont - 0 0 - 1 R^-1 - R 1 - -1 -R^-1 - -R -1 - */ - mpz_class t = 1; - const mpz_class R = (t << (n * 64)) % mp; - const mpz_class tbl[] = { - 0, 1, R, mp - 1, mp - R - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mpz_class& mx = tbl[i]; - for (size_t j = i; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { - const mpz_class& my = tbl[j]; - getMpz(x, n, mx); - getMpz(y, n, my); - m.mont(z, x, y); - mont(w, x, y, p, m.r_); - VERIFY_EQUAL(z, w, n); -#ifdef USE_XBYAK - if (bitSize > 128) { - fg.mul_(w, x, y); - VERIFY_EQUAL(z, w, n); - } -#endif - } - } - if (doBench) { -// CYBOZU_BENCH("montC", m.mont, x, y, x); - CYBOZU_BENCH("montA ", mont, x, y, x, p, m.r_); - } - } - if (doBench) { -// CYBOZU_BENCH("addS", addS, x, y, x, p); // slow -// CYBOZU_BENCH("subS", subS, x, y, x, p); -// CYBOZU_BENCH("addL", addL, x, y, x, p); -// CYBOZU_BENCH("subL", subL, x, y, x, p); - CYBOZU_BENCH("mulPreA", mulPre, w2, y, x); - CYBOZU_BENCH("mulPreC", mulPreC, w2, y, x, n); - CYBOZU_BENCH("modC ", modC, x, w2, p, n); - } -#ifdef USE_XBYAK - if (bitSize <= 128) return; - if (doBench) { - fg.init(p, n); - CYBOZU_BENCH("addA ", fg.add_, x, y, x); - CYBOZU_BENCH("subA ", fg.sub_, x, y, x); -// CYBOZU_BENCH("mulA", fg.mul_, x, y, x); - } -#endif - printf("mont test %d\n", (int)bitSize); -} - -CYBOZU_TEST_AUTO(all) -{ - const struct { - size_t n; - const uint64_t p[9]; - } tbl[] = { -// { 2, { 0xf000000000000001, 1, } }, - { 2, { 0x000000000000001d, 0x8000000000000000, } }, - { 3, { 0x000000000000012b, 0x0000000000000000, 0x0000000080000000, } }, -// { 3, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x07ffffffffffffff, } }, -// { 3, { 0x7900342423332197, 0x1234567890123456, 0x1480948109481904, } }, - { 3, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0xffffffffffffffff, } }, -// { 4, { 0x7900342423332197, 0x4242342420123456, 0x1234567892342342, 0x1480948109481904, } }, -// { 4, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x17ffffffffffffff, 0x1513423423423415, } }, - { 4, { 0xa700000000000013, 0x6121000000000013, 0xba344d8000000008, 0x2523648240000001, } }, -// { 5, { 0x0000000000000009, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x8000000000000000, } }, - { 5, { 0xfffffffffffffc97, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, -// { 6, { 0x4720422423332197, 0x0034230847204720, 0x3456789012345679, 0x4820984290482212, 0x9482094820948209, 0x0194810841094810, } }, -// { 6, { 0x7204224233321972, 0x0342308472047204, 0x4567890123456790, 0x0948204204243123, 0x2098420984209482, 0x2093482094810948, } }, - { 6, { 0x00000000ffffffff, 0xffffffff00000000, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, -// { 7, { 0x0000000000000063, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x8000000000000000, } }, - { 7, { 0x000000000fffcff1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, - { 8, { 0xffffffffffffd0c9, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, } }, - { 9, { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x00000000000001ff, } }, -// { 9, { 0x4720422423332197, 0x0034230847204720, 0x3456789012345679, 0x2498540975555312, 0x9482904924029424, 0x0948209842098402, 0x1098410948109482, 0x0820958209582094, 0x0000000000000029, } }, -// { 9, { 0x0f69466a74defd8d, 0xfffffffe26f2fc17, 0x7fffffffffffffff, 0x8572938572398583, 0x5732057823857293, 0x9820948205872380, 0x3409238420492034, 0x9483842098340298, 0x0000000000000003, } }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const size_t n = tbl[i].n; - const size_t bitSize = (n - 1) * 64 + cybozu::bsr(tbl[i].p[n - 1]) + 1; - test((const Unit*)tbl[i].p, bitSize); - } -} - diff --git a/vendor/github.com/dexon-foundation/mcl/test/bench.hpp b/vendor/github.com/dexon-foundation/mcl/test/bench.hpp deleted file mode 100644 index cc1639e6e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bench.hpp +++ /dev/null @@ -1,192 +0,0 @@ -#include - -void benchAddDblG1() -{ - puts("benchAddDblG1"); - const int C = 100000; - G1 P1, P2, P3; - hashAndMapToG1(P1, "a"); - hashAndMapToG1(P2, "b"); - P1 += P2; - P2 += P1; - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G1::add(1)", C, G1::add, P3, P1, P2); - P1.normalize(); - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G1::add(2)", C, G1::add, P3, P1, P2); - CYBOZU_BENCH_C("G1::add(3)", C, G1::add, P3, P2, P1); - P2.normalize(); - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G1::add(4)", C, G1::add, P3, P1, P2); - P1 = P3; - printf("z.isOne()=%d\n", P1.z.isOne()); - CYBOZU_BENCH_C("G1::dbl(1)", C, G1::dbl, P3, P1); - P1.normalize(); - printf("z.isOne()=%d\n", P1.z.isOne()); - CYBOZU_BENCH_C("G1::dbl(2)", C, G1::dbl, P3, P1); -} - -void benchAddDblG2() -{ - puts("benchAddDblG2"); - const int C = 100000; - G2 P1, P2, P3; - hashAndMapToG2(P1, "a"); - hashAndMapToG2(P2, "b"); - P1 += P2; - P2 += P1; - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G2::add(1)", C, G2::add, P3, P1, P2); - P1.normalize(); - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G2::add(2)", C, G2::add, P3, P1, P2); - CYBOZU_BENCH_C("G2::add(3)", C, G2::add, P3, P2, P1); - P2.normalize(); - printf("z.isOne()=%d %d\n", P1.z.isOne(), P2.z.isOne()); - CYBOZU_BENCH_C("G2::add(4)", C, G2::add, P3, P1, P2); - P1 = P3; - printf("z.isOne()=%d\n", P1.z.isOne()); - CYBOZU_BENCH_C("G2::dbl(1)", C, G2::dbl, P3, P1); - P1.normalize(); - printf("z.isOne()=%d\n", P1.z.isOne()); - CYBOZU_BENCH_C("G2::dbl(2)", C, G2::dbl, P3, P1); -} - - -void testBench(const G1& P, const G2& Q) -{ - G1 Pa; - G2 Qa; - Fp12 e1, e2; - pairing(e1, P, Q); - Fp12::pow(e2, e1, 12345); - Fp x, y; - x.setHashOf("abc"); - y.setHashOf("xyz"); - const int C = 1000; - const int C3 = 100000; -#if 1 - const int C2 = 3000; - mpz_class a = x.getMpz(); - CYBOZU_BENCH_C("G1::mulCT ", C, G1::mulCT, Pa, P, a); - CYBOZU_BENCH_C("G1::mul ", C, G1::mul, Pa, Pa, a); - CYBOZU_BENCH_C("G1::add ", C, G1::add, Pa, Pa, P); - CYBOZU_BENCH_C("G1::dbl ", C, G1::dbl, Pa, Pa); - CYBOZU_BENCH_C("G2::mulCT ", C, G2::mulCT, Qa, Q, a); - CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Qa, Qa, a); - CYBOZU_BENCH_C("G2::add ", C, G2::add, Qa, Qa, Q); - CYBOZU_BENCH_C("G2::dbl ", C, G2::dbl, Qa, Qa); - CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e1, e1, a); -// CYBOZU_BENCH_C("GT::powGLV ", C, BN::param.glv2.pow, e1, e1, a); - G1 PP; - G2 QQ; - std::string s; - s = P.getStr(); - CYBOZU_BENCH_C("G1::setStr chk", C, PP.setStr, s); - verifyOrderG1(false); - CYBOZU_BENCH_C("G1::setStr ", C, PP.setStr, s); - verifyOrderG1(true); - s = Q.getStr(); - CYBOZU_BENCH_C("G2::setStr chk", C, QQ.setStr, s); - verifyOrderG2(false); - CYBOZU_BENCH_C("G2::setStr ", C, QQ.setStr, s); - verifyOrderG2(true); - CYBOZU_BENCH_C("hashAndMapToG1", C, hashAndMapToG1, PP, "abc", 3); - CYBOZU_BENCH_C("hashAndMapToG2", C, hashAndMapToG2, QQ, "abc", 3); -#endif - CYBOZU_BENCH_C("Fp::add ", C3, Fp::add, x, x, y); - CYBOZU_BENCH_C("Fp::sub ", C3, Fp::sub, x, x, y); - CYBOZU_BENCH_C("Fp::neg ", C3, Fp::neg, x, x); - CYBOZU_BENCH_C("Fp::mul ", C3, Fp::mul, x, x, y); - CYBOZU_BENCH_C("Fp::sqr ", C3, Fp::sqr, x, x); - CYBOZU_BENCH_C("Fp::inv ", C3, Fp::inv, x, x); - Fp2 xx, yy; - xx.a = x; - xx.b = 3; - yy.a = y; - yy.b = -5; - FpDbl d0, d1; - x = 9; - y = 3; -#if 1 - CYBOZU_BENCH_C("Fp2::add ", C3, Fp2::add, xx, xx, yy); - CYBOZU_BENCH_C("Fp2::sub ", C3, Fp2::sub, xx, xx, yy); - CYBOZU_BENCH_C("Fp2::neg ", C3, Fp2::neg, xx, xx); - CYBOZU_BENCH_C("Fp2::mul ", C3, Fp2::mul, xx, xx, yy); - CYBOZU_BENCH_C("Fp2::mul_xi ", C3, Fp2::mul_xi, xx, xx); - CYBOZU_BENCH_C("Fp2::sqr ", C3, Fp2::sqr, xx, xx); - CYBOZU_BENCH_C("Fp2::inv ", C3, Fp2::inv, xx, xx); - CYBOZU_BENCH_C("FpDbl::addPre ", C3, FpDbl::addPre, d1, d1, d0); - CYBOZU_BENCH_C("FpDbl::subPre ", C3, FpDbl::subPre, d1, d1, d0); - CYBOZU_BENCH_C("FpDbl::add ", C3, FpDbl::add, d1, d1, d0); - CYBOZU_BENCH_C("FpDbl::sub ", C3, FpDbl::sub, d1, d1, d0); - CYBOZU_BENCH_C("FpDbl::mulPre ", C3, FpDbl::mulPre, d0, x, y); - CYBOZU_BENCH_C("FpDbl::sqrPre ", C3, FpDbl::sqrPre, d1, x); - CYBOZU_BENCH_C("FpDbl::mod ", C3, FpDbl::mod, x, d0); - Fp2Dbl D; - CYBOZU_BENCH_C("Fp2Dbl::mulPre ", C3, Fp2Dbl::mulPre, D, xx, yy); - CYBOZU_BENCH_C("Fp2Dbl::sqrPre ", C3, Fp2Dbl::sqrPre, D, xx); - - CYBOZU_BENCH_C("GT::add ", C2, GT::add, e1, e1, e2); - CYBOZU_BENCH_C("GT::mul ", C2, GT::mul, e1, e1, e2); - CYBOZU_BENCH_C("GT::sqr ", C2, GT::sqr, e1, e1); - CYBOZU_BENCH_C("GT::inv ", C2, GT::inv, e1, e1); -#endif - CYBOZU_BENCH_C("FpDbl::mulPre ", C3, FpDbl::mulPre, d0, x, y); - CYBOZU_BENCH_C("pairing ", 3000, pairing, e1, P, Q); - CYBOZU_BENCH_C("millerLoop ", 3000, millerLoop, e1, P, Q); - CYBOZU_BENCH_C("finalExp ", 3000, finalExp, e1, e1); -//exit(1); - std::vector Qcoeff; - CYBOZU_BENCH_C("precomputeG2 ", C, precomputeG2, Qcoeff, Q); - precomputeG2(Qcoeff, Q); - CYBOZU_BENCH_C("precomputedML ", C, precomputedMillerLoop, e2, P, Qcoeff); -} - -inline void SquareRootPrecomputeTest(const mpz_class& p) -{ - mcl::SquareRoot sq1, sq2; - bool b; - sq1.set(&b, p, true); - CYBOZU_TEST_ASSERT(b); - CYBOZU_TEST_ASSERT(sq1.isPrecomputed()); - sq2.set(&b, p, false); - CYBOZU_TEST_ASSERT(sq1 == sq2); - if (sq1 != sq2) { - puts("dump"); - puts("sq1"); - sq1.dump(); - puts("sq2"); - sq2.dump(); - puts("---"); - } -} - -void testSquareRoot() -{ - if (BN::param.cp == mcl::BN254 || BN::param.cp == mcl::BLS12_381) { - SquareRootPrecomputeTest(BN::param.p); - SquareRootPrecomputeTest(BN::param.r); - } -} - -void testLagrange() -{ - puts("testLagrange"); - const int k = 7; - Fr c[k], x[k], y[k]; - for (size_t i = 0; i < k; i++) { - c[i].setByCSPRNG(); - x[i].setByCSPRNG(); - } - for (size_t i = 0; i < k; i++) { - mcl::evaluatePolynomial(y[i], c, k, x[i]); - } - Fr s; - mcl::LagrangeInterpolation(s, x, y, k); - CYBOZU_TEST_EQUAL(s, c[0]); - mcl::LagrangeInterpolation(s, x, y, 1); - CYBOZU_TEST_EQUAL(s, y[0]); - mcl::evaluatePolynomial(y[0], c, 1, x[0]); - CYBOZU_TEST_EQUAL(y[0], c[0]); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/bls12_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bls12_test.cpp deleted file mode 100644 index 7011516bd..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bls12_test.cpp +++ /dev/null @@ -1,720 +0,0 @@ -#define PUT(x) std::cout << #x "=" << x << std::endl; -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -cybozu::CpuClock clk; -#include -#include -#include -#include - -#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) - #define MCL_AVOID_EXCEPTION_TEST -#endif - -using namespace mcl::bls12; - -mcl::fp::Mode g_mode; - -const struct TestSet { - mcl::CurveParam cp; - const char *name; - const char *p; - const char *r; - struct G2 { - const char *aa; - const char *ab; - const char *ba; - const char *bb; - } g2; - struct G1 { - const char *a; - const char *b; - } g1; - const char *e; -} g_testSetTbl[] = { - { - mcl::BLS12_381, - "BLS12_381", - "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - { - "0x024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8", - "0x13e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e", - "0x0ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801", - "0x0606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be", - }, - { - "0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", - "0x08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1", - }, - "0x1250EBD871FC0A92A7B2D83168D0D727272D441BEFA15C503DD8E90CE98DB3E7B6D194F60839C508A84305AACA1789B6 " - "0x089A1C5B46E5110B86750EC6A532348868A84045483C92B7AF5AF689452EAFABF1A8943E50439F1D59882A98EAA0170F " - "0x1368BB445C7C2D209703F239689CE34C0378A68E72A6B3B216DA0E22A5031B54DDFF57309396B38C881C4C849EC23E87 " - "0x193502B86EDB8857C273FA075A50512937E0794E1E65A7617C90D8BD66065B1FFFE51D7A579973B1315021EC3C19934F " - "0x01B2F522473D171391125BA84DC4007CFBF2F8DA752F7C74185203FCCA589AC719C34DFFBBAAD8431DAD1C1FB597AAA5 " - "0x018107154F25A764BD3C79937A45B84546DA634B8F6BE14A8061E55CCEBA478B23F7DACAA35C8CA78BEAE9624045B4B6 " - "0x19F26337D205FB469CD6BD15C3D5A04DC88784FBB3D0B2DBDEA54D43B2B73F2CBB12D58386A8703E0F948226E47EE89D " - "0x06FBA23EB7C5AF0D9F80940CA771B6FFD5857BAAF222EB95A7D2809D61BFE02E1BFD1B68FF02F0B8102AE1C2D5D5AB1A " - "0x11B8B424CD48BF38FCEF68083B0B0EC5C81A93B330EE1A677D0D15FF7B984E8978EF48881E32FAC91B93B47333E2BA57 " - "0x03350F55A7AEFCD3C31B4FCB6CE5771CC6A0E9786AB5973320C806AD360829107BA810C5A09FFDD9BE2291A0C25A99A2 " - "0x04C581234D086A9902249B64728FFD21A189E87935A954051C7CDBA7B3872629A4FAFC05066245CB9108F0242D0FE3EF " - "0x0F41E58663BF08CF068672CBD01A7EC73BACA4D72CA93544DEFF686BFD6DF543D48EAA24AFE47E1EFDE449383B676631 " - }, -}; - -CYBOZU_TEST_AUTO(size) -{ - CYBOZU_TEST_EQUAL(sizeof(Fp), 48u); - CYBOZU_TEST_EQUAL(sizeof(Fr), 32u); - CYBOZU_TEST_EQUAL(sizeof(Fp2), sizeof(Fp) * 2); - CYBOZU_TEST_EQUAL(sizeof(Fp6), sizeof(Fp) * 6); - CYBOZU_TEST_EQUAL(sizeof(Fp12), sizeof(Fp) * 12); - CYBOZU_TEST_EQUAL(sizeof(G1), sizeof(Fp) * 3); - CYBOZU_TEST_EQUAL(sizeof(G2), sizeof(Fp2) * 3); -} - -void testParam(const TestSet& ts) -{ - CYBOZU_TEST_EQUAL(BN::param.r, mpz_class(ts.r)); - CYBOZU_TEST_EQUAL(BN::param.p, mpz_class(ts.p)); -} - -void finalExpC(Fp12& y, const Fp12& x) -{ - const mpz_class& r = BN::param.r; - const mpz_class& p = BN::param.p; - mpz_class p2 = p * p; - mpz_class p4 = p2 * p2; -#if 1 - Fp12::pow(y, x, p2 + 1); - Fp12::pow(y, y, p4 * p2 - 1); - Fp12::pow(y, y, (p4 - p2 + 1) / r * 3); -#else - Fp12::pow(y, x, (p4 * p4 * p4 - 1) / r * 3); -#endif -} - -void pairingC(Fp12& e, const G1& P, const G2& Q) -{ - millerLoop(e, P, Q); - finalExp(e, e); -} -void testIoAll(const G1& P, const G2& Q) -{ - const int FpTbl[] = { 0, 2, 2|mcl::IoPrefix, 10, 16, 16|mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; - const int EcTbl[] = { mcl::IoEcAffine, mcl::IoEcProj, mcl::IoEcCompY, mcl::IoSerialize }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(FpTbl); i++) { - for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(EcTbl); j++) { - G1 P2 = P, P3; - G2 Q2 = Q, Q3; - int ioMode = FpTbl[i] | EcTbl[j]; - std::string s = P2.getStr(ioMode); - P3.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(P2, P3); - s = Q2.getStr(ioMode); - Q3.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(Q2, Q3); - s = P.x.getStr(ioMode); - Fp Px; - Px.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(P.x, Px); - s = Q.x.getStr(ioMode); - Fp2 Qx; - Qx.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(Q.x, Qx); - } - } -} - -void testIo(const G1& P, const G2& Q) -{ - testIoAll(P, Q); - G1 Z1; - G2 Z2; - Z1.clear(); - Z2.clear(); - testIoAll(Z1, Z2); -} - -void testSetStr(const G2& Q0) -{ - G2::setCompressedExpression(); - G2 Q; - Q.clear(); - for (int i = 0; i < 10; i++) { - G2 R; - R.setStr(Q.getStr()); - CYBOZU_TEST_EQUAL(Q, R); - G2::add(Q, Q, Q0); - } -} - -void testMapToG1() -{ - G1 g; - for (int i = 1; i < 10; i++) { - mapToG1(g, i); - CYBOZU_TEST_ASSERT(!g.isZero()); - G1 gr; - G1::mul(gr, g, BN::param.r); - CYBOZU_TEST_ASSERT(gr.isZero()); - } -} - -void testMapToG2() -{ - G2 g; - for (int i = 1; i < 10; i++) { - mapToG2(g, i); - CYBOZU_TEST_ASSERT(!g.isZero()); - G2 gr; - G2::mul(gr, g, BN::param.r); - CYBOZU_TEST_ASSERT(gr.isZero()); - } - Fp x; - x.setHashOf("abc"); - mapToG2(g, Fp2(x, 0)); - CYBOZU_TEST_ASSERT(g.isValid()); -} - -void testPrecomputed(const G1& P, const G2& Q) -{ - Fp12 e1, e2; - pairing(e1, P, Q); - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e2, P, Qcoeff); - finalExp(e2, e2); - CYBOZU_TEST_EQUAL(e1, e2); -} - -#if 0 -void testFp12pow(const G1& P, const G2& Q) -{ - Fp12 e, e1, e2; - pairing(e, P, Q); - cybozu::XorShift rg; - for (int i = -10; i < 10; i++) { - mpz_class xm = i; - Fp12::pow(e1, e, xm); - Fp12::powGeneric(e2, e, xm); - CYBOZU_TEST_EQUAL(e1, e2); - } - for (int i = 0; i < 10; i++) { - Fr x; - x.setRand(rg); - mpz_class xm = x.getMpz(); - Fp12::pow(e1, e, xm); - param.glv2.pow(e2, e, xm); - CYBOZU_TEST_EQUAL(e1, e2); - } -} -#endif - -void testMillerLoop2(const G1& P1, const G2& Q1) -{ - Fp12 e1, e2, e3; - mpz_class c1("12342342423442"); - mpz_class c2("329428049820348209482"); - G2 Q2; - G1 P2; - G2::mul(Q2, Q1, c1); - G1::mul(P2, P1, c2); - pairing(e1, P1, Q1); - pairing(e2, P2, Q2); - e1 *= e2; - - std::vector Q1coeff, Q2coeff; - precomputeG2(Q1coeff, Q1); - precomputeG2(Q2coeff, Q2); - precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); - precomputedMillerLoop2mixed(e3, P1, Q1, P2, Q2coeff); - CYBOZU_TEST_EQUAL(e2, e3); - finalExp(e2, e2); - CYBOZU_TEST_EQUAL(e1, e2); - - // special value - G2 Z; - Z.clear(); - Q2 += Q2; - precomputeG2(Q1coeff, Z); - precomputeG2(Q2coeff, Q2); - precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); - precomputedMillerLoop2mixed(e3, P1, Z, P2, Q2coeff); - finalExp(e2, e2); - finalExp(e3, e3); - CYBOZU_TEST_EQUAL(e2, e3); -} - -void testPairing(const G1& P, const G2& Q, const char *eStr) -{ - Fp12 e1; - pairing(e1, P, Q); - Fp12 e2; - { - std::stringstream ss(eStr); - ss >> e2; - } - CYBOZU_TEST_EQUAL(e1, e2); - Fp12 e = e1, ea; - G1 Pa; - G2 Qa; -#if defined(__EMSCRIPTEN__) || MCL_SIZEOF_UNIT == 4 - const int count = 100; -#else - const int count = 1000; -#endif - mpz_class a; - cybozu::XorShift rg; - for (int i = 0; i < count; i++) { - Fr r; - r.setRand(rg); - a = r.getMpz(); - Fp12::pow(ea, e, a); - G1::mul(Pa, P, a); - G2::mul(Qa, Q, a); - G1 T; - G1::mulCT(T, P, a); - CYBOZU_TEST_EQUAL(Pa, T); - pairing(e1, Pa, Q); - pairing(e2, P, Qa); - CYBOZU_TEST_EQUAL(ea, e1); - CYBOZU_TEST_EQUAL(ea, e2); - } -} - -void testTrivial(const G1& P, const G2& Q) -{ - G1 Z1; Z1.clear(); - G2 Z2; Z2.clear(); - Fp12 e; - pairing(e, Z1, Q); - CYBOZU_TEST_EQUAL(e, 1); - pairing(e, P, Z2); - CYBOZU_TEST_EQUAL(e, 1); - pairing(e, Z1, Z2); - CYBOZU_TEST_EQUAL(e, 1); - - std::vector Qcoeff; - precomputeG2(Qcoeff, Z2); - precomputedMillerLoop(e, P, Qcoeff); - finalExp(e, e); - CYBOZU_TEST_EQUAL(e, 1); - - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e, Z1, Qcoeff); - finalExp(e, e); - CYBOZU_TEST_EQUAL(e, 1); -} - -#include "bench.hpp" - -CYBOZU_TEST_AUTO(naive) -{ - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(g_testSetTbl); i++) { - const TestSet& ts = g_testSetTbl[i]; - printf("i=%d curve=%s\n", int(i), ts.name); - initPairing(ts.cp, g_mode); - const G1 P(Fp(ts.g1.a), Fp(ts.g1.b)); - const G2 Q(Fp2(ts.g2.aa, ts.g2.ab), Fp2(ts.g2.ba, ts.g2.bb)); -#ifdef ONLY_BENCH - { - Fp12 e; - for (int i = 0; i < 1000; i++) pairing(e, P, Q); - } - clk.put(); - return; -#endif - testParam(ts); - testIo(P, Q); -// testFp12pow(P, Q); - testTrivial(P, Q); - testSetStr(Q); - testMapToG1(); - testMapToG2(); - testPairing(P, Q, ts.e); - testPrecomputed(P, Q); - testMillerLoop2(P, Q); - testBench(P, Q); - } - int count = (int)clk.getCount(); - if (count) { - printf("count=%d ", count); - clk.put(); - } -} - -CYBOZU_TEST_AUTO(finalExp) -{ - const char *e0Str = -"012974491575E232199B73B30FE53FF643FEAE11023BCA7AF961C3600B45DFECFE4B30D52A62E73DA4C0409810304997\n" -"05CE2FB890FE65E20EC36347190ECB4884E401A64B666557B53E561F6D0979B7A96AD9E647ED78BD47187195C00F563C\n" -"02E85D1E559488603A70FEE99354DA8847215EC97282CA230DE96FED6DD5D4DD4EF4D901DB7F544A1A45EBEBA1450109\n" -"048FB1E44DDABF18D55C95704158A24678AA2A6ED0844108762E88306E5880E8C67BF44E24E40AB3F93D9E3713170341\n" -"07EF7BE685DC0DBA1B3E1D2E9090CD98EAD1325B60881772F17077386A3182B117F5FD839363F5891D08E82B88EC6F12\n" -"17803435700EF7A16C06404C6D17EB4FD84079FE9872207302A36C791B6E90447B33D703BBFE04ECB641C3A573E2CD50\n" -"19A494E6A872E46FC85D09FD6D30844B6FF05729BC253A9640F7BE64AAA8C2C8E0AE014A9DD816C53A3EDEBB2FA649EB\n" -"020949ABAA14F1DCE17FA9E091DDA963E9E492BA788E12B9B610E80A4D94DB9CC50341ED107C7D50E5738052595D4A27\n" -"09E217B513B3603723DAC3188A2F7CBDD84A56E7E5004446E7D4C63D6E378DA26E411C10898E48DB4B0C065E4699A9C5\n" -"12393BD23D0EC122082A1EC892A982F3C9AFD14240CE85258D8A3EF0A13CB545D6EF7848FD40DD4AEF1554341C5C5BBF\n" -"07EA8A0D6A57C78E5663F94E2B1ABC0D760ED18DBA64305EAD5EE350FB0342A7A81C0D5C8B3AD826D009276B0F32D2C8\n" -"16804D0D4A2633ED01568B0F8F06C4497E46E88D05FD191AAE530ACA791D0E114D74874FA88E33FAF48757153B09BB0E"; - -const char *e1Str = -"0E05D19E90D2C501E5502C7AC80D77201C47DF147DD1076440F0DF0179DF9802CA0775E0E73DD9174F1094D2280787B3\n" -"14D2F5C84279E7177A3543FBEAE261DE8F6C97EFD5F3FF3F959EC9FC0303F620A4B3AF00DF409496CECADDD0A7F0A164\n" -"1414E9B9DF8DF1EAC2E70D5538018377788C62016A54F28B037A68740705089AE431B86756F98CBE19690A5EAC0C2466\n" -"12D8B32157836A131CCA3CA313DAAAF909BC3AD6BDD15885BB429922B9CD7D1246D1163E5E6F88D68BF1B75E451EFABB\n" -"102C9A839A924E0D603D13F2E08A919E0B9EE2A269FC75727BA13D66027C157B9BB4077977FA94557DE4427BF11B234B\n" -"19DBEB7F2E3096AFFD44837655BD8249741B484B0EB0DBEE569DEA8D9E38AE09D210C8BC16AA6DFBC923095B2C9A8B2B\n" -"19B9A6DCCD01FA0D04D5CE94D8BDCE1DF64AFEB7FD493B955180A5C6B236E469F0E07CC9BB4203FCAC46AE6F8E5419D6\n" -"02BFA87AF7A3726A7ABACDCFDD53694AF651554F3A431AB4274F67D5DAD2D6C88AF794705FF456A936C83594731AD8DC\n" -"0F21E0173E3B50DD98EFA815B410631A57399B451FD6E1056FFD09C9FE50EFAD3D026F0C46C8BB1583A50B7853D990DA\n" -"02230237AE04B61F9269F6E7CD2FCF1231CEE4690AA658B0018EFC0D0770FD0A56B3B7294086E8D306B1465CDDD858CD\n" -"087EB8F6547015661E9CD48D6525C808636FCB8420B867CB2A87E006B2A93BBD5EF675E6CDDA9E6F94519C49EA8BB689\n" -"19F5C988B2DD6E33D7D3D34EFB1991F80DC28006AC75E0AB53FD98FC6F2476D05DD4ECA582F5FF72B8DDD9DDDE80FFC9"; - - Fp12 e0, e1, e2; - e0.setStr(e0Str, 16); - e1.setStr(e1Str, 16); - finalExp(e2, e0); -// finalExpC(e2, e0); - CYBOZU_TEST_EQUAL(e1, e2); - CYBOZU_BENCH_C("finalExp", 100, finalExp, e2, e0); -} - -CYBOZU_TEST_AUTO(addLine) -{ -const char *l0Str= -"0EF586FCDB69442CB41C0DA719AC5C92BD99A916C1F01BCFC7606AA7A23D680C04620FDFC2144E0EA6025F05241A791F\n" -"164CFDADE9B91150C6D2C7F7CDF29BC3105A7EA51217283CDF801EBEE9E86CE5078253E322C72129DAA42F6DBAD17D37"; -const char *l1Str = -"07A124F536BE83CCB3AF8D3DA2AE094942755040B9DA8E0796C462ACE3805D6916ACA7E9281261D8025571E2F31AAF0D\n" -"12D05751A9B255143541D0A4E57E120F937D51F9A07D31982390CA6EB5DF8CC0640FD291521069BF9964AE33EDD1323D"; -const char *l4Str = -"0D609DE41CF1260B332C1A53AA54703F62AB8224777E34FEEAB09AA06187CA71D8C7C2EB66F59D3622D431BE17D0FEE6\n" -"0A93C2984041171BE701560017D64D0640B6F61D7DCA8F527FA6B6A1A1033261C0761CAA56A00D4D16C9D3B7371E02D9"; - -const char *rStr = -"4 0A8DFA69FDD43EDCCC6375750373B443157EF4641E5B4CA82FBF23E3E8EA72351EA754168CEC77573D337E6227C0D0DD\n" -"12C8508CF1828C52A9A1A71779129D605327191EE459AED3C0B4B14657B08B2927173FADF8E4275188E8D49E57A75B33\n" -"12AD7EB96734F2C93B669FD54845CD2FF351AFDF0123E96772021DC3F4F3B456DB1B37CB1C380B1947616165FF0DDAEA\n" -"03D80F92C8A6005DEB291AF28406B7B4FCEDD81A244997DBB719B01D162BD7D71F0FD63BF76F8F1AC90618C3702294DF\n" -"199F7A719EA1CA2CD03CFFBB9A4BC2FE1BD8BCA7C772D223E6CB20C1313C3D3C52CFBB88445E56C833908C52A4EC68F1\n" -"0A3F6B27A6DDA00DB848574ECB06F179271D5844BDA66CD5AE1792A8FDD25E3A504A95839113BAA8B1FCB53EEE5F1FF0"; - -const char *qStr = -"4 0B5C339C23F8EAB3647E974BCDDF72C96F97A444346BE72CA73AB1323B83B8F6161257AB34C7E0CF34F6C45086CA5868\n" -"13C2235E9F9DFB33344BA2EE5A71435859022880732EDC9EC75AC79AE9DA972593CDC40A0AC334D6D2E8D7FAD1D98D0B\n" -"134B8EED8196A00D3B70ADBC26FF963B725A351CF0B73FE1A541788AFB0BB081AF82A438021B5E878B15D53B1D27C6A7\n" -"18CC69F847BEE826B939DCB4030D33020D03B046465C9EE103AA8009A175DB169070294E75771586687FE361DB884BCD\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; - -const char *pStr = -"4 0FD3977C60EC322BC281C915955ED534B491E39C72E8E800271CEF3F0492D890829FA69C45FCE93D9847A0CAB325D871\n" -"17CC2C36C5D283C05BFCECCF48DBB2050332DA058DD67326A9EE520967DBCAEDFCB5F05A085D1A49DF08BB968CC782C5\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"; - -const char *m0Str = -"1010A4F9944514352AAD5D290AFB95C435EB64B5E74519807C9602DABCD6B6F5494E419758AE9A43F539F812252C65E5\n" -"1622F7A52BAAC10EBFF0977F61866544BF9B91315FA66ADB6AC200AFF7A3676C1AD3480340B23C66F7C6AE50A703F245"; -const char *m1Str = -"0905180B20FCE2AA545A73D6B9EA1F82479EF3FB5A3BA8DDB656D9B2A4DA7B63CCF75538D15093949B442E27804C8CE2\n" -"0FE834508BBAD2F22DCBF1C3C1BCCD69561827613EB63F4907832A7ABBBC6040CF2E517D0D5E22D3812EEE1EC55640DD"; -const char *m4Str = -"1197D94A8DAFE6F72B17A31964CA5D0C3B2A12BEFF959F7DF7A37938C64C01508D12C24479E5C3D314F862A9977D6C7D\n" -"0B0254A26810307964E2A05680C19DE7C63CCBD7CC3558AD366BA48D9F7049E245BF2C54EA0339301739851E6EB2158F"; - -const char *r1Str = -"4 16A33C4ED01E95833D39EECE223380FE860B6DC1F71B1DDBEE2BE39B5D682B090F18758495E202010D3B9B45A46FF80E\n" -"01CECF5CC255E32B833C16EF3F983CCA9996A33504615909AD5685D9F637BF2357442BEC89DAFC747B69EFEF57D5A213\n" -"061C33850E5E4A418F3B68D6097C3911A551D6DB3C7E21196410F69D510C1A888635B6B699F98C14912A8D97742D36A9\n" -"0457FB78282C2B8F8BA803E77D058EF3BF6B06C6241D6FA6E2B1607F3E9D7597D1A10B7AA4E06B89FBA01736901AD826\n" -"0B45E9B7D311B8C37F7E9262227A4D721F2D148DE6F12EC5D599B45E4790F35B37A1D6928709F438849324A807EC1913\n" -"0E771545F892C247A5365BA1F14934D8ED37483A6B7DD3EB4C3FBA0AC884D7EE9C080C3B39ADA64AE545E7339F83AFB0"; - - const int mode = mcl::IoEcProj | 16; - Fp6 l, m; - G2 R, Q, R1; - G1 P; - - R.setStr(rStr, mode); - Q.setStr(qStr, mode); - P.setStr(pStr, mode); - l.a.setStr(l0Str, mode); - l.b.setStr(l4Str, mode); - l.c.setStr(l1Str, mode); - local::addLine(l, R, Q, P); - m.a.setStr(m0Str, mode); - m.b.setStr(m4Str, mode); - m.c.setStr(m1Str, mode); - R1.setStr(r1Str, mode); - CYBOZU_TEST_EQUAL(l, m); - CYBOZU_TEST_EQUAL(R, R1); -} - -CYBOZU_TEST_AUTO(dblLine) -{ -const char *l0Str= -"0905F47F07FA2177E4B73559D93D9B41A2FD20E0440167E63A0F62321EB73C784405DE360477245F5E4AE8FA2EAEC6FF\n" -"02DA455C573517EE8BD4E40A60851E2EC85CF514351681B89B1B72664E384A3678A25DC5C4CF84C086C13968BC701F91"; -const char *l1Str = -"0F48059E31CEF9546D41DEF31FB3BFD13BBCC38248E4846AFD216350B4B661FA1382E74BCF904BE8E33C26FF0D29ABA0\n" -"0D7FAEE426E4A6F32E4DE1CCB76BBA870A183113589CF58A358DC9C284C82B54C65A754F9A71EAD87304744CFD105051"; - -const char *l4Str = -"162AE8B029F8420BEDE5A399BA19C09FED01DE2748F4458065DBE51044FBFE749B28EF1B7F79A5E4545EB550DD0CFE02\n" -"091042B589FD59FBF286D21475CCCF444D8DCC49389EA3B98AF864DDB9C08BDDEB320D6D88F0C07D7CD1733A41404C1F"; - -const char *qStr = -"4 047ACF79048EDCA97A19DB1779A44CE610CEA9FDCC62D1C4014B335834BC63414F55B670CCF15A86E58BC3737FB70919\n" -"11D8063B2FFA2E1F5224593FF74D0E9650CAB6AF702B74159F7F97E2CF0843FBCD561D41FEC779BEB48746CD2F30FF74\n" -"17684E8EA85C990DF5472B4EBAF51002CDBBECF79BA2988FC610A5CE09F4A584248DCC506E78C39C9BB4F6008115DE64\n" -"1334CA7263ED9395BBEDBB6C938C642200C239FB0CADF1F652332A037FFBC7E567CCE09540939F25316CBC4FC68CE4DB\n" -"0670DCF344F027CB92F7B1F569B281C8FF756D83CD7B65EB118FE95FBE16ED28649B8F739FE3A2BECA1979FC18484ECD\n" -"13E3E30759DCC1FA9F1A62D54BEF0EE1CC75E194A559F2D6BE3025BE4BEB9F7351A7608FE8D4CCAD30BA2A8748205487"; - -const char *pStr = -"4 1579B48AE944AFE8FC69B38A7CD0D2C6B93E5F506535A5410E25FB1C1707938D6932F3D620A2BBB90ED7E2601971DEA8\n" -"0234E5B373AD62D9EF1EBAE6FA6FFAD26144717F65AE9F98BD4280978ED52B3621F60FA4A8F6E5B5DAF64469733827E6\n" -"0B4D1755924541041F75FF399E3B5F535BC85D5A982AEEC5FC2404F06AC7F062C090C4545D1602C3D8B559801EE7B9A2"; - -const char *m0Str = -"198D1123A9817C40A914A3FF9E29BB16DD2F0DF98D0AB5C3A6014D60E31AE973051C35ADCEA0A41F32BB16E6688DC73F\n" -"10339DB2F26D1B867FD3E60A24F938210EABEFC51536845A490F28A088A4AC53575DBBAA218D70D34E28EBDE14DB3465\n"; -const char *m1Str = -"066852248D915F6378B3F4A8E6173AC748FBFAE236AAEEAECC3F34D2D22706A06B925A83DD8276B2B240F61D761587B0\n" -"17CC8195E6607FF19A26AA69CA50C32487E35D2D75301AC4B6988F1B77523BF472927EE5710DF49A563534D86C684BE0\n"; -const char *m4Str = -"10B67D1A0CE430B7AD74F64DD6C2E44C4788EADF8340909843C96B918BF54703CC14686B26E350EB1140ACC3337EEEB4\n" -"0F5D52E6F0B10A081EFF885CC858109241B379985ADD8982E6B8A202FD283897EFBA4CBE444C29751410A61FC8346545"; - -const char *q1Str = -"4 17B5E51EC931E724ABACE9C7F8AFDD51F3929478B47C222F99844D166936063D3BFCDF7AD7079EEF4BE8514E3D09EF0F\n" -"0F5794F38BAEC0FA3C30AC4C0B8E9024B2047F2F4576434F91768F2B51AD58C48E88414B1D4B7A9119A947D3CFEDEF0A\n" -"1320714A8B7E23C4C558D2B1C556CC8FB6B41F3669EFD70B6D204C2A7C6EF2E0CBCA945AA7BACB402E00ED338F7D12FC\n" -"0C2846E386161F123344704528D9944677806C3F784E3997857C91D2F3F37AB6AD92360CD97CABD5D631E9FC74708AD3\n" -"17F92FF3D71B473B802F2DE90C19A5F5DBFAA397293871AB58E5B813F7D686EA8E1814C69C50C02D062F3A13C1D045E1\n" -"05214392858DE04B3B468B2D0C703A485508C29157D81E9F799BAB2FEF0F514C99D5F8085D8062281418C6CCE5621D18\n"; - - const int mode = mcl::IoEcProj | 16; - Fp6 l, m; - G2 Q, Q1; - G1 P; - - G1::setOrder(0); - Q.setStr(qStr, mode); - P.setStr(pStr, mode); - l.a.setStr(l0Str, mode); - l.b.setStr(l4Str, mode); - l.c.setStr(l1Str, mode); - local::dblLine(l, Q, P); - m.a.setStr(m0Str, mode); - m.b.setStr(m4Str, mode); - m.c.setStr(m1Str, mode); - Q1.setStr(q1Str, mode); - CYBOZU_TEST_EQUAL(l, m); - CYBOZU_TEST_EQUAL(Q, Q1); - G1::setOrder(BN::param.r); -} - -CYBOZU_TEST_AUTO(mul_012) -{ - const char *fStr = -"087590AFBFEB8F85DD912EC297C2B5DD7BC0A9B0914348C5D99F0089C09CDBA0DCDAF1C704A7B092D8FB9A75B7C06D88\n" -"119DD8B08C40D4EB3D7AF19221E41639A98A10EF1A22F9AD8CB1350526B9335F47E76B2FFD6652E693A67440574D5A0C\n" -"134ADA7C4ABFBA4324900D25E5077A119F9E55A7F337C03FD539D8DAC392B458F11261BEA007393D43657E9656B984D6\n" -"01032DDB3CAEC38B7DA916CA111C46A013F1DC83AF13DFF5B71CC3789974F946CFC43FE7B8EE519E524627248369FCE7\n" -"19E9455C14A9640139224BB1337E4EC5EE92BFF757DB179CC98CF0F09682E44ED4B6004F31D4788DE28BB2D8F41DDAE4\n" -"0B9877DF6AC1015375AB421363A5B06D2DC1763B923FF674A06AE101306A4A39967A3F9EF12E870C124A26CE68E2D003\n" -"02AA5AC5901C9C91CD0B43CA62F21FA541896802A8AAF0FD5EDF8DAF4A98CEC19F457A67369E795594543677B4A16EA4\n" -"0604DB7CE2A0ABD8ADB5F4F06F20B01510BF9787C912B1036F570E7368D341D9B794F078DFD3265306841180865500D0\n" -"08145045CF5751502778739EFE6FEA6036C8F14800F4818C2FD8BA5AF98E89B0BBE6510D511C4E5A83A2813A92B655F0\n" -"0FDE93D3326321ECF6171FBC4665F1C171F19A6F1D521BFA1A1B80E0B08CEBB78B255AF0B5F7E45AA6C1D01005200FB1\n" -"0F2A9EA2891A683AE15A79EDB0C6DF45FFAD4D22F3293AE59D3CE8F6E0E59A097673D05D81ACAD8C59817FFDD3E89CF1\n" -"0724BD07BDDCA23775C1DECD80CE7722F98C10E75A0CD9A1FA81921A04EEFAC55BE0740C5F01ED83FDFC66380339D417\n"; - -const char *l0Str = -"198D1123A9817C40A914A3FF9E29BB16DD2F0DF98D0AB5C3A6014D60E31AE973051C35ADCEA0A41F32BB16E6688DC73F\n" -"10339DB2F26D1B867FD3E60A24F938210EABEFC51536845A490F28A088A4AC53575DBBAA218D70D34E28EBDE14DB3465"; -const char *l1Str = -"066852248D915F6378B3F4A8E6173AC748FBFAE236AAEEAECC3F34D2D22706A06B925A83DD8276B2B240F61D761587B0\n" -"17CC8195E6607FF19A26AA69CA50C32487E35D2D75301AC4B6988F1B77523BF472927EE5710DF49A563534D86C684BE0\n"; -const char *l4Str = -"10B67D1A0CE430B7AD74F64DD6C2E44C4788EADF8340909843C96B918BF54703CC14686B26E350EB1140ACC3337EEEB4\n" -"0F5D52E6F0B10A081EFF885CC858109241B379985ADD8982E6B8A202FD283897EFBA4CBE444C29751410A61FC8346545\n"; - -const char *f2Str = -"10128E1A9BD00FC81F6550921D0FED3944F63F980ABF91FDB73B1ED162337ED16075730ACD60A0FA7DFABAD9FC9657C5\n" -"055BE26091D8CDA32241F4991A1F184E403C3FFDD54858B23D5CE4B44402B65B26BCA6855DA7AC1C60F1D6651632DCD8\n" -"0D70827981F0D33185DE8767FDDFEC26CEB6A28F82C83BBABB0057E432FCF9072B666974123274751E35F371E931D6CC\n" -"02382B1A80E5BC95C75AE71BE2E097FD59365279CDD7EA358D87DEF132430744DABBF1B685D110CC731A9FDA40EEFC1B\n" -"0AAB560FB99D57A9B1B6C753DAF6B0619ED598C9B5FB0908F2DAE83C530E6365DBEDE29B9357D63803F46247A1F41C73\n" -"13C048F553BFC3C56516786DD26FF9D59ECFB9BE6B165F90E77CCED623BC66C6E93EFBF14576DB7E33C8C4F4E21F64DC\n" -"0987D7DEBB96A10D977F256432871BEBB4B3A620E4AE822E089E9DAA192CD278E9FA0CF598444F6758628BC38C33A5AD\n" -"0A4F1B75845B6C976BF49C35134AE73CA7A3C16D2E0BDA39C70367E3829E94EB7CAFBB0F8B57F4734B696D9CEF84FE73\n" -"0DFAB9C035F3DA51226F27998A494A32245800F0313446D6437D2F5B3F34A9E91428818B0C9AF63EB3AA618E80055FD5\n" -"06A58B9640FF8931616F6D08BA16ECE71F341C61F22E5EC5B556DF217179C3ECEC20E4BE425A3471F1D6648D14F89FBF\n" -"1614391845CDC212937BC1070010603FB4DF99A6B3FA7E7CD3316C56BA8B633B3DC7D864B36DA2F9A1E6B977DB150100\n" -"144A44415BCCB077EAA64C8DAC50631AF432C1420EBD8538818D65D6176BC1EB699579CED8340493306AF842B4B6822E"; - - Fp6 l; - Fp12 f, f2; - l.a.setStr(l0Str, 16); - l.b.setStr(l4Str, 16); - l.c.setStr(l1Str, 16); - f.setStr(fStr, 16); - f2.setStr(f2Str, 16); - local::mulSparse(f, l); - CYBOZU_TEST_EQUAL(f, f2); -} - -CYBOZU_TEST_AUTO(pairing) -{ - const int mode = mcl::IoEcProj | 16; - -const char *pStr = -"4 0FD3977C60EC322BC281C915955ED534B491E39C72E8E800271CEF3F0492D890829FA69C45FCE93D9847A0CAB325D871\n" -"17CC2C36C5D283C05BFCECCF48DBB2050332DA058DD67326A9EE520967DBCAEDFCB5F05A085D1A49DF08BB968CC782C5\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"; -const char *qStr = -"4 0B5C339C23F8EAB3647E974BCDDF72C96F97A444346BE72CA73AB1323B83B8F6161257AB34C7E0CF34F6C45086CA5868\n" -"13C2235E9F9DFB33344BA2EE5A71435859022880732EDC9EC75AC79AE9DA972593CDC40A0AC334D6D2E8D7FAD1D98D0B\n" -"134B8EED8196A00D3B70ADBC26FF963B725A351CF0B73FE1A541788AFB0BB081AF82A438021B5E878B15D53B1D27C6A7\n" -"18CC69F847BEE826B939DCB4030D33020D03B046465C9EE103AA8009A175DB169070294E75771586687FE361DB884BCD\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\n" -"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; -const char *eStr = -"0E05D19E90D2C501E5502C7AC80D77201C47DF147DD1076440F0DF0179DF9802CA0775E0E73DD9174F1094D2280787B3\n" -"14D2F5C84279E7177A3543FBEAE261DE8F6C97EFD5F3FF3F959EC9FC0303F620A4B3AF00DF409496CECADDD0A7F0A164\n" -"1414E9B9DF8DF1EAC2E70D5538018377788C62016A54F28B037A68740705089AE431B86756F98CBE19690A5EAC0C2466\n" -"12D8B32157836A131CCA3CA313DAAAF909BC3AD6BDD15885BB429922B9CD7D1246D1163E5E6F88D68BF1B75E451EFABB\n" -"102C9A839A924E0D603D13F2E08A919E0B9EE2A269FC75727BA13D66027C157B9BB4077977FA94557DE4427BF11B234B\n" -"19DBEB7F2E3096AFFD44837655BD8249741B484B0EB0DBEE569DEA8D9E38AE09D210C8BC16AA6DFBC923095B2C9A8B2B\n" -"19B9A6DCCD01FA0D04D5CE94D8BDCE1DF64AFEB7FD493B955180A5C6B236E469F0E07CC9BB4203FCAC46AE6F8E5419D6\n" -"02BFA87AF7A3726A7ABACDCFDD53694AF651554F3A431AB4274F67D5DAD2D6C88AF794705FF456A936C83594731AD8DC\n" -"0F21E0173E3B50DD98EFA815B410631A57399B451FD6E1056FFD09C9FE50EFAD3D026F0C46C8BB1583A50B7853D990DA\n" -"02230237AE04B61F9269F6E7CD2FCF1231CEE4690AA658B0018EFC0D0770FD0A56B3B7294086E8D306B1465CDDD858CD\n" -"087EB8F6547015661E9CD48D6525C808636FCB8420B867CB2A87E006B2A93BBD5EF675E6CDDA9E6F94519C49EA8BB689\n" -"19F5C988B2DD6E33D7D3D34EFB1991F80DC28006AC75E0AB53FD98FC6F2476D05DD4ECA582F5FF72B8DDD9DDDE80FFC9"; - G1 P; - G2 Q; - P.setStr(pStr, mode); - Q.setStr(qStr, mode); - Fp12 e1, e2; - e1.setStr(eStr, 16); - pairing(e2, P, Q); - CYBOZU_TEST_EQUAL(e1, e2); -} - -void testCurve(const mcl::CurveParam& cp) -{ - initPairing(cp, g_mode); - G1 P; - G2 Q; - mapToG1(P, 1); - mapToG2(Q, 1); - GT e1, e2; - pairing(e1, P, Q); - cybozu::XorShift rg; - mpz_class a, b; - Fr r; - r.setRand(rg); a = r.getMpz(); - r.setRand(rg); b = r.getMpz(); - G1 aP; - G2 bQ; - G1::mul(aP, P, a); - G2::mul(bQ, Q, b); - pairing(e2, aP, bQ); - GT::pow(e1, e1, a * b); - CYBOZU_TEST_EQUAL(e1, e2); -} -CYBOZU_TEST_AUTO(multi) -{ - G1 P; - G2 Q; - int i; - puts("BN254"); - testCurve(mcl::BN254); - i = 1; - CYBOZU_BENCH_C("calcBN1", 100, (BN::param.mapTo.calcBN), P, i++); - CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), P, i++); - CYBOZU_BENCH_C("calcBN2", 100, (BN::param.mapTo.calcBN), Q, i++); - CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), Q, i++); - puts("BLS12_381"); - testCurve(mcl::BLS12_381); - i = 1; - CYBOZU_BENCH_C("calcBN1", 100, (BN::param.mapTo.calcBN), P, i++); - CYBOZU_BENCH_C("naiveG1", 100, (BN::param.mapTo.naiveMapTo), P, i++); - CYBOZU_BENCH_C("calcBN2", 100, (BN::param.mapTo.calcBN), Q, i++); - CYBOZU_BENCH_C("naiveG2", 100, (BN::param.mapTo.naiveMapTo), Q, i++); -} - -CYBOZU_TEST_AUTO(BLS12_G1mulCofactor) -{ - if (BN::param.cp.curveType != MCL_BLS12_381) return; -} - -typedef std::vector FpVec; - -void f(FpVec& zv, const FpVec& xv, const FpVec& yv) -{ - for (size_t i = 0; i < zv.size(); i++) { - Fp::mul(zv[i], xv[i], yv[i]); - } -} -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - std::string mode; - opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - g_mode = mcl::fp::StrToMode(mode); - printf("JIT %d\n", mcl::fp::isEnableJIT()); -#if 0 - initPairing(mcl::BLS12_381); - cybozu::XorShift rg; - const int n = 1; - std::vector xv(n), yv(n), zv(n); - for (int i = 0; i < n; i++) { - xv[i].setByCSPRNG(rg); - yv[i].setByCSPRNG(rg); - } - FpDbl dx; - FpDbl::mulPre(dx, xv[0], yv[0]); - Fp2 x2, y2; - x2.a.setByCSPRNG(rg); - x2.b.setByCSPRNG(rg); - y2.a.setByCSPRNG(rg); - y2.b.setByCSPRNG(rg); - Fp2Dbl x2d, y2d; - Fp2Dbl::mulPre(x2d, x2, x2); - Fp2Dbl::mulPre(y2d, x2, y2); -if(0){ - puts("----------"); - xv[0].dump(); - yv[0].dump(); - dx.dump(); - puts("----------"); -// exit(1); -} -// CYBOZU_BENCH_C("Fp2::neg", 10000000, Fp2::neg, x2, x2); - CYBOZU_BENCH_C("Fp2::sqr", 10000000, Fp2::sqr, x2, x2); -// CYBOZU_BENCH_C("Fp2::sqrPre", 100000000, Fp2Dbl::sqrPre, x2d, x2); -// CYBOZU_BENCH_C("Fp2::mulPre", 100000000, Fp2Dbl::mulPre, x2d, x2, y2); -// CYBOZU_BENCH_C("sqrPre", 100000000, FpDbl::sqrPre, dx, xv[0]); -// CYBOZU_BENCH_C("mod ", 100000000, FpDbl::mod, xv[0], dx); -// CYBOZU_BENCH_C("mul ", 100000000, Fp::mul, xv[0], yv[0], xv[0]); -// CYBOZU_BENCH_C("sqr ", 100000000, Fp::sqr, xv[0], xv[0]); - return 0; -#endif - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn384_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn384_test.cpp deleted file mode 100644 index b5674a918..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn384_test.cpp +++ /dev/null @@ -1,83 +0,0 @@ -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include -#include -#include -#include - -using namespace mcl::bn384; - -mcl::fp::Mode g_mode; - -#include "bench.hpp" - -void testCurve(const mcl::CurveParam& cp) -{ - initPairing(cp, g_mode); - G1 P; - G2 Q; - mapToG1(P, 1); - mapToG2(Q, 1); - GT e1, e2; -#ifdef ONLY_BENCH - cybozu::CpuClock clk; - for (int i = 0; i < 10000; i++) { clk.begin(); pairing(e1, P, Q); clk.end(); } - clk.put(); - return; -#endif - pairing(e1, P, Q); - cybozu::XorShift rg; - mpz_class a, b; - Fr r; - r.setRand(rg); a = r.getMpz(); - r.setRand(rg); b = r.getMpz(); - G1 aP; - G2 bQ; - G1::mul(aP, P, a); - G2::mul(bQ, Q, b); - pairing(e2, aP, bQ); - GT::pow(e1, e1, a * b); - CYBOZU_TEST_EQUAL(e1, e2); - testBench(P, Q); - testSquareRoot(); - testLagrange(); -} - -CYBOZU_TEST_AUTO(pairing) -{ -// puts("BN160"); -// testCurve(mcl::BN160); - puts("BN254"); - // support 256-bit pairing - testCurve(mcl::BN254); - puts("BN381_1"); - testCurve(mcl::BN381_1); - puts("BN381_2"); - testCurve(mcl::BN381_2); - puts("BLS12_381"); - testCurve(mcl::BLS12_381); - // Q is not on EcT, but bad order - { - const char *s = "1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50"; - G2 Q; - CYBOZU_TEST_EXCEPTION(Q.setStr(s, 16), std::exception); - } -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - std::string mode; - opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - g_mode = mcl::fp::StrToMode(mode); - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn512_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn512_test.cpp deleted file mode 100644 index 905bfd3db..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn512_test.cpp +++ /dev/null @@ -1,68 +0,0 @@ -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include -#include -#include -#include - -using namespace mcl::bn512; - -mcl::fp::Mode g_mode; - -#include "bench.hpp" - -void testCurve(const mcl::CurveParam& cp) -{ - initPairing(cp, g_mode); - G1 P; - G2 Q; - mapToG1(P, 1); - mapToG2(Q, 1); - GT e1, e2; - pairing(e1, P, Q); - cybozu::XorShift rg; - mpz_class a, b; - Fr r; - r.setRand(rg); a = r.getMpz(); - r.setRand(rg); b = r.getMpz(); - G1 aP; - G2 bQ; - G1::mul(aP, P, a); - G2::mul(bQ, Q, b); - pairing(e2, aP, bQ); - GT::pow(e1, e1, a * b); - CYBOZU_TEST_EQUAL(e1, e2); - testBench(P, Q); - testSquareRoot(); - testLagrange(); -} - -CYBOZU_TEST_AUTO(pairing) -{ - puts("BN462"); - testCurve(mcl::BN462); - puts("BN381_1"); - testCurve(mcl::BN381_1); - puts("BLS12_381"); - testCurve(mcl::BLS12_381); - puts("BN254"); - testCurve(mcl::BN254); -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - std::string mode; - opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - g_mode = mcl::fp::StrToMode(mode); - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_c256_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn_c256_test.cpp deleted file mode 100644 index 2ce85162d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_c256_test.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include -using namespace mcl::bn256; -#define MCLBN_DEFINE_STRUCT -#define MCLBN_FP_UNIT_SIZE 4 -#include "bn_c_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_c384_256_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn_c384_256_test.cpp deleted file mode 100644 index e7bbefda9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_c384_256_test.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include -using namespace mcl::bls12; -#define MCLBN_DEFINE_STRUCT -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 4 -#include "bn_c_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_c384_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn_c384_test.cpp deleted file mode 100644 index a9f20243a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_c384_test.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include -using namespace mcl::bn384; -#define MCLBN_DEFINE_STRUCT -#define MCLBN_FP_UNIT_SIZE 6 -#include "bn_c_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_c512_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn_c512_test.cpp deleted file mode 100644 index c6af3989f..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_c512_test.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include -using namespace mcl::bn512; -#define MCLBN_DEFINE_STRUCT -#define MCLBN_FP_UNIT_SIZE 8 -#include "bn_c_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_c_test.hpp b/vendor/github.com/dexon-foundation/mcl/test/bn_c_test.hpp deleted file mode 100644 index e9dc59393..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_c_test.hpp +++ /dev/null @@ -1,699 +0,0 @@ -/* - include from bn_if256_test.cpp and bn_if384_test.cpp -*/ -#include -#include -#include -#include -#include - -template -std::ostream& dump(std::ostream& os, const uint64_t (&x)[N]) -{ - for (size_t i = 0; i < N; i++) { - char buf[64]; - CYBOZU_SNPRINTF(buf, sizeof(buf), "%016llx", (long long)x[i]); - os << buf; - } - return os; -} - -CYBOZU_TEST_AUTO(init) -{ - int ret; - CYBOZU_TEST_EQUAL(sizeof(mclBnFr), sizeof(Fr)); - CYBOZU_TEST_EQUAL(sizeof(mclBnG1), sizeof(G1)); - CYBOZU_TEST_EQUAL(sizeof(mclBnG2), sizeof(G2)); - CYBOZU_TEST_EQUAL(sizeof(mclBnGT), sizeof(Fp12)); - -#if MCLBN_FP_UNIT_SIZE >= 4 - printf("test BN254 %d\n", MCLBN_FP_UNIT_SIZE); - ret = mclBn_init(MCL_BN254, MCLBN_COMPILED_TIME_VAR); -#endif -#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 4 - printf("test BLS12_381 %d\n", MCLBN_FP_UNIT_SIZE); - ret = mclBn_init(MCL_BLS12_381, MCLBN_COMPILED_TIME_VAR); -#endif -#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 6 - printf("test BN381_1 %d\n", MCLBN_FP_UNIT_SIZE); - ret = mclBn_init(MCL_BN381_1, MCLBN_COMPILED_TIME_VAR); -#endif -#if MCLBN_FP_UNIT_SIZE == 8 - printf("test BN462 %d\n", MCLBN_FP_UNIT_SIZE); - ret = mclBn_init(MCL_BN462, MCLBN_COMPILED_TIME_VAR); -#endif - CYBOZU_TEST_EQUAL(ret, 0); - if (ret != 0) exit(1); -} - -CYBOZU_TEST_AUTO(Fr) -{ - mclBnFr x, y; - memset(&x, 0xff, sizeof(x)); - CYBOZU_TEST_ASSERT(!mclBnFr_isValid(&x)); - CYBOZU_TEST_ASSERT(!mclBnFr_isZero(&x)); - - mclBnFr_clear(&x); - CYBOZU_TEST_ASSERT(mclBnFr_isZero(&x)); - - mclBnFr_setInt(&x, 1); - CYBOZU_TEST_ASSERT(mclBnFr_isOne(&x)); - - mclBnFr_setInt(&y, -1); - CYBOZU_TEST_ASSERT(!mclBnFr_isEqual(&x, &y)); - - y = x; - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); - - mclBnFr_setHashOf(&x, "", 0); - mclBnFr_setHashOf(&y, "abc", 3); - CYBOZU_TEST_ASSERT(!mclBnFr_isEqual(&x, &y)); - mclBnFr_setHashOf(&x, "abc", 3); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); - - char buf[1024]; - mclBnFr_setInt(&x, 12345678); - size_t size; - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 8); - CYBOZU_TEST_EQUAL(buf, "12345678"); - - mclBnFr_setInt(&x, -7654321); - mclBnFr_neg(&x, &x); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 7); - CYBOZU_TEST_EQUAL(buf, "7654321"); - - mclBnFr_setInt(&y, 123 - 7654321); - mclBnFr_add(&x, &x, &y); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 3); - CYBOZU_TEST_EQUAL(buf, "123"); - - mclBnFr_setInt(&y, 100); - mclBnFr_sub(&x, &x, &y); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 2); - CYBOZU_TEST_EQUAL(buf, "23"); - - mclBnFr_mul(&x, &x, &y); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 4); - CYBOZU_TEST_EQUAL(buf, "2300"); - - mclBnFr_div(&x, &x, &y); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 2); - CYBOZU_TEST_EQUAL(buf, "23"); - - mclBnFr_mul(&x, &y, &y); - mclBnFr_sqr(&y, &y); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); - - const char *s = "12345678901234567"; - CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&x, s, strlen(s), 10)); - s = "20000000000000000"; - CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&y, s, strlen(s), 10)); - mclBnFr_add(&x, &x, &y); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_EQUAL(size, 17); - CYBOZU_TEST_EQUAL(buf, "32345678901234567"); - - mclBnFr_setInt(&x, 1); - mclBnFr_neg(&x, &x); - size = mclBnFr_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_ASSERT(!mclBnFr_setStr(&y, buf, size, 10)); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x, &y)); - - for (int i = 0; i < 10; i++) { - mclBnFr_setByCSPRNG(&x); - mclBnFr_getStr(buf, sizeof(buf), &x, 16); - printf("%s\n", buf); - } -} - -void G1test() -{ - mclBnG1 x, y, z; - memset(&x, 0x1, sizeof(x)); - /* - assert() of carry operation fails if use 0xff, so use 0x1 - */ - CYBOZU_TEST_ASSERT(!mclBnG1_isValid(&x)); - mclBnG1_clear(&x); - CYBOZU_TEST_ASSERT(mclBnG1_isValid(&x)); - CYBOZU_TEST_ASSERT(mclBnG1_isZero(&x)); - - CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&y, "abc", 3)); - CYBOZU_TEST_ASSERT(mclBnG1_isValidOrder(&y)); - - char buf[1024]; - size_t size; - size = mclBnG1_getStr(buf, sizeof(buf), &y, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_ASSERT(!mclBnG1_setStr(&x, buf, strlen(buf), 10)); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &y)); - - mclBnG1_neg(&x, &x); - mclBnG1_add(&x, &x, &y); - CYBOZU_TEST_ASSERT(mclBnG1_isZero(&x)); - - mclBnG1_dbl(&x, &y); // x = 2y - mclBnG1_add(&z, &y, &y); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); - mclBnG1_add(&z, &z, &y); // z = 3y - mclBnFr n; - mclBnFr_setInt(&n, 3); - mclBnG1_mul(&x, &y, &n); // x = 3y - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); - mclBnG1_sub(&x, &x, &y); // x = 2y - - mclBnFr_setInt(&n, 2); - mclBnG1_mul(&z, &y, &n); // z = 2y - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&x, &z)); - mclBnG1_normalize(&y, &z); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&y, &z)); -} - -CYBOZU_TEST_AUTO(G1) -{ - G1test(); -} - -CYBOZU_TEST_AUTO(G2) -{ - mclBnG2 x, y, z; - /* - assert() of carry operation fails if use 0xff, so use 0x1 - */ - memset(&x, 0x1, sizeof(x)); - CYBOZU_TEST_ASSERT(!mclBnG2_isValid(&x)); - mclBnG2_clear(&x); - CYBOZU_TEST_ASSERT(mclBnG2_isValid(&x)); - CYBOZU_TEST_ASSERT(mclBnG2_isZero(&x)); - - CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&x, "abc", 3)); - CYBOZU_TEST_ASSERT(mclBnG2_isValidOrder(&x)); - - char buf[1024]; - size_t size; - size = mclBnG2_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_ASSERT(!mclBnG2_setStr(&y, buf, strlen(buf), 10)); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &y)); - - mclBnG2_neg(&x, &x); - mclBnG2_add(&x, &x, &y); - CYBOZU_TEST_ASSERT(mclBnG2_isZero(&x)); - - mclBnG2_dbl(&x, &y); // x = 2y - mclBnG2_add(&z, &y, &y); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); - mclBnG2_add(&z, &z, &y); // z = 3y - mclBnFr n; - mclBnFr_setInt(&n, 3); - mclBnG2_mul(&x, &y, &n); // x = 3y - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); - mclBnG2_sub(&x, &x, &y); // x = 2y - - mclBnFr_setInt(&n, 2); - mclBnG2_mul(&z, &y, &n); // z = 2y - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&x, &z)); - mclBnG2_normalize(&y, &z); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&y, &z)); -} - -CYBOZU_TEST_AUTO(GT) -{ - mclBnGT x, y, z; - memset(&x, 1, sizeof(x)); - CYBOZU_TEST_ASSERT(!mclBnGT_isZero(&x)); - - mclBnGT_clear(&x); - CYBOZU_TEST_ASSERT(mclBnGT_isZero(&x)); - - mclBnGT_setInt(&x, 1); - CYBOZU_TEST_ASSERT(mclBnGT_isOne(&x)); - char buf[2048]; - size_t size; - size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - const char *s = "1 0 0 0 0 0 0 0 0 0 0 0"; - CYBOZU_TEST_EQUAL(buf, s); - - s = "1 2 3 4 5 6 7 8 9 10 11 12"; - CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&x,s , strlen(s), 10)); - size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_EQUAL(buf, s); - - y = x; - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &y)); - - s = "-1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12"; - CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&z, s, strlen(s), 10)); - size = mclBnGT_getStr(buf, sizeof(buf), &z, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&y, buf, size, 10)); - - mclBnGT_neg(&z, &y); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &z)); - - mclBnGT_add(&y, &x, &y); - CYBOZU_TEST_ASSERT(mclBnGT_isZero(&y)); - - s = "2 0 0 0 0 0 0 0 0 0 0 0"; - CYBOZU_TEST_ASSERT(!mclBnGT_setStr(&y, s, strlen(s), 10)); - mclBnGT_mul(&z, &x, &y); - size = mclBnGT_getStr(buf, sizeof(buf), &z, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_EQUAL(buf, "2 4 6 8 10 12 14 16 18 20 22 24"); - - mclBnGT_div(&z, &z, &y); - size = mclBnGT_getStr(buf, sizeof(buf), &x, 10); - CYBOZU_TEST_ASSERT(size > 0); - CYBOZU_TEST_EQUAL(size, strlen(buf)); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &z)); - - /* - can't use mclBnGT_pow because x is not in GT - */ - mclBnFr n; - mclBnFr_setInt(&n, 3); - mclBnGT_powGeneric(&z, &x, &n); - mclBnGT_mul(&y, &x, &x); - mclBnGT_mul(&y, &y, &x); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&y, &z)); - - mclBnGT_mul(&x, &y, &y); - mclBnGT_sqr(&y, &y); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&x, &y)); -} - -CYBOZU_TEST_AUTO(pairing) -{ - mclBnFr a, b, ab; - mclBnFr_setInt(&a, 123); - mclBnFr_setInt(&b, 456); - mclBnFr_mul(&ab, &a, &b); - mclBnG1 P, aP; - mclBnG2 Q, bQ; - mclBnGT e, e1, e2; - - CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P, "1", 1)); - CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q, "1", 1)); - - mclBnG1_mul(&aP, &P, &a); - mclBnG2_mul(&bQ, &Q, &b); - - mclBn_pairing(&e, &P, &Q); - mclBnGT_pow(&e1, &e, &a); - mclBn_pairing(&e2, &aP, &Q); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); - - mclBnGT_pow(&e1, &e, &b); - mclBn_pairing(&e2, &P, &bQ); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); - - mclBnFr n; - mclBnFr_setInt(&n, 3); - mclBnGT_pow(&e1, &e, &n); - mclBnGT_mul(&e2, &e, &e); - mclBnGT_mul(&e2, &e2, &e); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &e2)); -} - -CYBOZU_TEST_AUTO(precomputed) -{ - mclBnG1 P1, P2; - mclBnG2 Q1, Q2; - CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P1, "1", 1)); - CYBOZU_TEST_ASSERT(!mclBnG1_hashAndMapTo(&P2, "123", 3)); - CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q1, "1", 1)); - CYBOZU_TEST_ASSERT(!mclBnG2_hashAndMapTo(&Q2, "2", 1)); - - const int size = mclBn_getUint64NumToPrecompute(); - std::vector Q1buf, Q2buf; - Q1buf.resize(size); - Q2buf.resize(size); - mclBn_precomputeG2(Q1buf.data(), &Q1); - mclBn_precomputeG2(Q2buf.data(), &Q2); - - mclBnGT e1, e2, f1, f2, f3, f4; - mclBn_pairing(&e1, &P1, &Q1); - mclBn_precomputedMillerLoop(&f1, &P1, Q1buf.data()); - mclBn_finalExp(&f1, &f1); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &f1)); - - mclBn_pairing(&e2, &P2, &Q2); - mclBn_precomputedMillerLoop(&f2, &P2, Q2buf.data()); - mclBn_finalExp(&f2, &f2); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e2, &f2)); - - mclBn_precomputedMillerLoop2(&f3, &P1, Q1buf.data(), &P2, Q2buf.data()); - mclBn_precomputedMillerLoop2mixed(&f4, &P1, &Q1, &P2, Q2buf.data()); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&f3, &f4)); - mclBn_finalExp(&f3, &f3); - - mclBnGT_mul(&e1, &e1, &e2); - CYBOZU_TEST_ASSERT(mclBnGT_isEqual(&e1, &f3)); -} - -CYBOZU_TEST_AUTO(serialize) -{ - const size_t FrSize = mclBn_getFrByteSize(); - const size_t G1Size = mclBn_getG1ByteSize(); - mclBnFr x1, x2; - mclBnG1 P1, P2; - mclBnG2 Q1, Q2; - char buf[1024]; - size_t n; - size_t expectSize; - size_t ret; - // Fr - expectSize = FrSize; - mclBnFr_setInt(&x1, -1); - n = mclBnFr_serialize(buf, sizeof(buf), &x1); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnFr_deserialize(&x2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); - - ret = mclBnFr_deserialize(&x2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&x2, 0, sizeof(x2)); - ret = mclBnFr_deserialize(&x2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); - - n = mclBnFr_serialize(buf, expectSize, &x1); - CYBOZU_TEST_EQUAL(n, expectSize); - - // G1 - expectSize = G1Size; - mclBnG1_hashAndMapTo(&P1, "1", 1); - n = mclBnG1_serialize(buf, sizeof(buf), &P1); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnG1_deserialize(&P2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); - - ret = mclBnG1_deserialize(&P2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&P2, 0, sizeof(P2)); - ret = mclBnG1_deserialize(&P2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); - - n = mclBnG1_serialize(buf, expectSize, &P1); - CYBOZU_TEST_EQUAL(n, expectSize); - - // G2 - expectSize = G1Size * 2; - mclBnG2_hashAndMapTo(&Q1, "1", 1); - n = mclBnG2_serialize(buf, sizeof(buf), &Q1); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnG2_deserialize(&Q2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); - - ret = mclBnG2_deserialize(&Q2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&Q2, 0, sizeof(Q2)); - ret = mclBnG2_deserialize(&Q2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); - - n = mclBnG2_serialize(buf, expectSize, &Q1); - CYBOZU_TEST_EQUAL(n, expectSize); -} - -CYBOZU_TEST_AUTO(serializeToHexStr) -{ - const size_t FrSize = mclBn_getFrByteSize(); - const size_t G1Size = mclBn_getG1ByteSize(); - mclBnFr x1, x2; - mclBnG1 P1, P2; - mclBnG2 Q1, Q2; - char buf[1024]; - size_t n; - size_t expectSize; - size_t ret; - // Fr - expectSize = FrSize * 2; // hex string - mclBnFr_setInt(&x1, -1); - n = mclBnFr_getStr(buf, sizeof(buf), &x1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnFr_setStr(&x2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); - - ret = mclBnFr_setStr(&x2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_ASSERT(ret != 0); - - memset(&x2, 0, sizeof(x2)); - ret = mclBnFr_setStr(&x2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&x1, &x2)); - - n = mclBnFr_getStr(buf, expectSize, &x1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); - - // G1 - expectSize = G1Size * 2; // hex string - mclBnG1_hashAndMapTo(&P1, "1", 1); - n = mclBnG1_getStr(buf, sizeof(buf), &P1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnG1_setStr(&P2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); - - ret = mclBnG1_setStr(&P2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_ASSERT(ret != 0); - - memset(&P2, 0, sizeof(P2)); - ret = mclBnG1_setStr(&P2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); - - n = mclBnG1_getStr(buf, expectSize, &P1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); - - // G2 - expectSize = G1Size * 2 * 2; // hex string - mclBnG2_hashAndMapTo(&Q1, "1", 1); - n = mclBnG2_getStr(buf, sizeof(buf), &Q1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = mclBnG2_setStr(&Q2, buf, n, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); - - ret = mclBnG2_setStr(&Q2, buf, n - 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_ASSERT(ret != 0); - - memset(&Q2, 0, sizeof(Q2)); - ret = mclBnG2_setStr(&Q2, buf, n + 1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&Q1, &Q2)); - - n = mclBnG2_getStr(buf, expectSize, &Q1, MCLBN_IO_SERIALIZE_HEX_STR); - CYBOZU_TEST_EQUAL(n, expectSize); -} - -#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE >= 6 -CYBOZU_TEST_AUTO(badG2) -{ - int ret; - ret = mclBn_init(MCL_BN381_1, MCLBN_COMPILED_TIME_VAR); - CYBOZU_TEST_EQUAL(ret, 0); - const char *s = "1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50"; - mclBnG2 Q; - ret = mclBnG2_setStr(&Q, s, strlen(s), 16); - CYBOZU_TEST_ASSERT(ret != 0); -} -#endif - -struct Sequential { - uint32_t pos; - Sequential() : pos(0) {} - static uint32_t read(void *self, void *buf, uint32_t bufSize) - { - Sequential *seq = reinterpret_cast(self); - uint8_t *p = reinterpret_cast(buf); - for (uint32_t i = 0; i < bufSize; i++) { - p[i] = uint8_t(seq->pos + i) & 0x1f; // mask is to make valid Fp - } - seq->pos += bufSize; - return bufSize; - } -}; - -CYBOZU_TEST_AUTO(setRandFunc) -{ - Sequential seq; - for (int j = 0; j < 3; j++) { - puts(j == 1 ? "sequential rand" : "true rand"); - for (int i = 0; i < 5; i++) { - mclBnFr x; - int ret; - char buf[1024]; - ret = mclBnFr_setByCSPRNG(&x); - CYBOZU_TEST_EQUAL(ret, 0); - ret = mclBnFr_getStr(buf, sizeof(buf), &x, 16); - CYBOZU_TEST_ASSERT(ret > 0); - printf("%d %s\n", i, buf); - } - if (j == 0) { - mclBn_setRandFunc(&seq, Sequential::read); - } else { - mclBn_setRandFunc(0, 0); - } - } -} - -CYBOZU_TEST_AUTO(Fp) -{ - mclBnFp x1, x2; - char buf[1024]; - int ret = mclBnFp_setHashOf(&x1, "abc", 3); - CYBOZU_TEST_ASSERT(ret == 0); - mclSize n = mclBnFp_serialize(buf, sizeof(buf), &x1); - CYBOZU_TEST_ASSERT(n > 0); - n = mclBnFp_deserialize(&x2, buf, n); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_ASSERT(mclBnFp_isEqual(&x1, &x2)); - for (size_t i = 0; i < n; i++) { - buf[i] = char(i); - } - ret = mclBnFp_setLittleEndian(&x1, buf, n); - CYBOZU_TEST_ASSERT(ret == 0); - memset(buf, 0, sizeof(buf)); - n = mclBnFp_serialize(buf, sizeof(buf), &x1); - CYBOZU_TEST_ASSERT(n > 0); - for (size_t i = 0; i < n - 1; i++) { - CYBOZU_TEST_EQUAL(buf[i], char(i)); - } - mclBnFp_clear(&x1); - memset(&x2, 0, sizeof(x2)); - CYBOZU_TEST_ASSERT(mclBnFp_isEqual(&x1, &x2)); -} - -CYBOZU_TEST_AUTO(mod) -{ - { - // Fp - char buf[1024]; - mclBn_getFieldOrder(buf, sizeof(buf)); - mpz_class p(buf); - mpz_class x = mpz_class(1) << (mclBn_getFpByteSize() * 2); - mclBnFp y; - int ret = mclBnFp_setLittleEndianMod(&y, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size * sizeof(void*)); - CYBOZU_TEST_EQUAL(ret, 0); - mclBnFp_getStr(buf, sizeof(buf), &y, 10); - CYBOZU_TEST_EQUAL(mpz_class(buf), x % p); - } - { - // Fr - char buf[1024]; - mclBn_getCurveOrder(buf, sizeof(buf)); - mpz_class p(buf); - mpz_class x = mpz_class(1) << (mclBn_getFrByteSize() * 2); - mclBnFr y; - int ret = mclBnFr_setLittleEndianMod(&y, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size * sizeof(void*)); - CYBOZU_TEST_EQUAL(ret, 0); - mclBnFr_getStr(buf, sizeof(buf), &y, 10); - CYBOZU_TEST_EQUAL(mpz_class(buf), x % p); - } -} - -CYBOZU_TEST_AUTO(Fp2) -{ - mclBnFp2 x1, x2; - char buf[1024]; - int ret = mclBnFp_setHashOf(&x1.d[0], "abc", 3); - CYBOZU_TEST_ASSERT(ret == 0); - ret = mclBnFp_setHashOf(&x1.d[1], "xyz", 3); - CYBOZU_TEST_ASSERT(ret == 0); - mclSize n = mclBnFp2_serialize(buf, sizeof(buf), &x1); - CYBOZU_TEST_ASSERT(n > 0); - n = mclBnFp2_deserialize(&x2, buf, n); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_ASSERT(mclBnFp2_isEqual(&x1, &x2)); - mclBnFp2_clear(&x1); - memset(&x2, 0, sizeof(x2)); - CYBOZU_TEST_ASSERT(mclBnFp2_isEqual(&x1, &x2)); -} - -CYBOZU_TEST_AUTO(mapToG1) -{ - mclBnFp x; - mclBnG1 P1, P2; - mclBnFp_setHashOf(&x, "abc", 3); - int ret = mclBnFp_mapToG1(&P1, &x); - CYBOZU_TEST_ASSERT(ret == 0); - mclBnG1_hashAndMapTo(&P2, "abc", 3); - CYBOZU_TEST_ASSERT(mclBnG1_isEqual(&P1, &P2)); -} - -CYBOZU_TEST_AUTO(mapToG2) -{ - mclBnFp2 x; - mclBnG2 P1, P2; - mclBnFp_setHashOf(&x.d[0], "abc", 3); - mclBnFp_clear(&x.d[1]); - int ret = mclBnFp2_mapToG2(&P1, &x); - CYBOZU_TEST_ASSERT(ret == 0); - mclBnG2_hashAndMapTo(&P2, "abc", 3); - CYBOZU_TEST_ASSERT(mclBnG2_isEqual(&P1, &P2)); -} - -void G1onlyTest(int curve) -{ - printf("curve=%d\n", curve); - int ret; - ret = mclBn_init(curve, MCLBN_COMPILED_TIME_VAR); - CYBOZU_TEST_EQUAL(ret, 0); - mclBnG1 P0; - ret = mclBnG1_getBasePoint(&P0); - CYBOZU_TEST_EQUAL(ret, 0); - char buf[256]; - ret = mclBnG1_getStr(buf, sizeof(buf), &P0, 16); - CYBOZU_TEST_ASSERT(ret > 0); - printf("basePoint=%s\n", buf); - G1test(); -} - -CYBOZU_TEST_AUTO(G1only) -{ - const int tbl[] = { - MCL_SECP192K1, - MCL_NIST_P192, - MCL_SECP224K1, - MCL_NIST_P224, // hashAndMapTo is error - MCL_SECP256K1, - MCL_NIST_P256, -#if MCLBN_FP_UNIT_SIZE >= 6 && MCLBN_FR_UNIT_SIZE >= 6 - MCL_SECP384R1, -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - G1onlyTest(tbl[i]); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/bn_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/bn_test.cpp deleted file mode 100644 index 071ec706c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/bn_test.cpp +++ /dev/null @@ -1,408 +0,0 @@ -#define PUT(x) std::cout << #x "=" << x << std::endl; -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -cybozu::CpuClock clk; -#include -#include -#include -#include - -#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) - #define MCL_AVOID_EXCEPTION_TEST -#endif - -typedef mcl::bn::local::Compress Compress; -using namespace mcl::bn; - -mcl::fp::Mode g_mode; - -const struct TestSet { - mcl::CurveParam cp; - const char *name; - struct G2 { - const char *aa; - const char *ab; - const char *ba; - const char *bb; - } g2; - struct G1 { - int a; - int b; - } g1; - const char *e; -} g_testSetTbl[] = { - { - mcl::BN254, - "BN254", - { - "12723517038133731887338407189719511622662176727675373276651903807414909099441", - "4168783608814932154536427934509895782246573715297911553964171371032945126671", - "13891744915211034074451795021214165905772212241412891944830863846330766296736", - "7937318970632701341203597196594272556916396164729705624521405069090520231616", - }, - { - -1, 1 - }, - "8118772341496577043438385328606447626730215814727396173233264007541007797690 " - "6742571767760762192519140673058087976840103832045324348366170860928670686713 " - "9727912590495366720378364920530546614235713408261568635512172059018197267630 " - "10180700148605185348549931182990442059136187839792856455707820203302941578832 " - "5054507763444412917986776641611331046146804026682679569910978464879371792565 " - "6917005519826733659554708445125877487590687705432214234949972860245110398023 " - "10448556317747236258066222816126375978842661908560317699736569642190930635294 " - "1516980358051268127904344653343215863076753141133525905743113718749531324025 " - "9794836735385959178744195210089532061310424844916928682580569566332541022353 " - "9375574834170998962484906689780052970915033987453510324648351251071086068423 " - "710778048594563655498360873129325895716179849942646859397874562033386335205 " - "10688745994254573144943003027511098295097561129365638275727908595677791826005" - }, - { - mcl::BN_SNARK1, - "BN_SNARK1", - { - "15267802884793550383558706039165621050290089775961208824303765753922461897946", - "9034493566019742339402378670461897774509967669562610788113215988055021632533", - "644888581738283025171396578091639672120333224302184904896215738366765861164", - "20532875081203448695448744255224543661959516361327385779878476709582931298750", - }, - { - 1, 2 - }, - "15163392945550945552839911839294582974434771053565812675833291179413834896953 " - "20389211011850518572149982239826345669421868561029856883955740401696801984953 " - "17766795911013516700216709333389761327222334145011922123798810516425387779347 " - "6064163297423711021549973931984064750876944939004405231004441199168710504090 " - "296093106139306574860102680862436174771023602986903675151017278048818344347 " - "1573596951222456889652521728261836933382094474023551133585236991207205981715 " - "3511871642997169996730611220058787939468653751355351269812083879279936651479 " - "17848534184080172844395614793152774197360421729995967636680357250333093768504 " - "3273860031361637906105800996652640969711942192883181518057117446820546419132 " - "7212721189663231589365009629980400132745687533815732336503876102977912682966 " - "18569236611881855981733896549089319395087993987737891870319625215675547032585 " - "10088832670068482545658647976676953228519838542958787800193793260459700064172 " - }, -}; - -CYBOZU_TEST_AUTO(size) -{ - CYBOZU_TEST_EQUAL(sizeof(Fp), 32u); - CYBOZU_TEST_EQUAL(sizeof(Fp2), sizeof(Fp) * 2); - CYBOZU_TEST_EQUAL(sizeof(Fp6), sizeof(Fp) * 6); - CYBOZU_TEST_EQUAL(sizeof(Fp12), sizeof(Fp) * 12); - CYBOZU_TEST_EQUAL(sizeof(G1), sizeof(Fp) * 3); - CYBOZU_TEST_EQUAL(sizeof(G2), sizeof(Fp2) * 3); -} - -void testSetStr(const G2& Q0) -{ - G2::setCompressedExpression(); - G2 Q; - Q.clear(); - for (int i = 0; i < 10; i++) { - G2 R; - R.setStr(Q.getStr()); - CYBOZU_TEST_EQUAL(Q, R); - G2::add(Q, Q, Q0); - } -} - -void testMapToG1() -{ - G1 g; - for (int i = 1; i < 10; i++) { - mapToG1(g, i); - CYBOZU_TEST_ASSERT(!g.isZero()); - G1 gr; - G1::mulGeneric(gr, g, BN::param.r); - CYBOZU_TEST_ASSERT(gr.isZero()); - } -#ifndef MCL_AVOID_EXCEPTION_TEST - if (BN::param.cp.b == 2) { - Fp c1; - bool b = Fp::squareRoot(c1, -3); - CYBOZU_TEST_ASSERT(b); - CYBOZU_TEST_EXCEPTION(mapToG1(g, 0), cybozu::Exception); - CYBOZU_TEST_EXCEPTION(mapToG1(g, c1), cybozu::Exception); - CYBOZU_TEST_EXCEPTION(mapToG1(g, -c1), cybozu::Exception); - } -#endif -} - -void testMapToG2() -{ - G2 g; - for (int i = 1; i < 10; i++) { - mapToG2(g, i); - CYBOZU_TEST_ASSERT(!g.isZero()); - G2 gr; - G2::mulGeneric(gr, g, BN::param.r); - CYBOZU_TEST_ASSERT(gr.isZero()); - } -#ifndef MCL_AVOID_EXCEPTION_TEST - if (BN::param.cp.b == 2) { - CYBOZU_TEST_EXCEPTION(mapToG2(g, 0), cybozu::Exception); - } -#endif - Fp x; - x.setHashOf("abc"); - mapToG2(g, Fp2(x, 0)); - CYBOZU_TEST_ASSERT(g.isValid()); -} - -void testCyclotomic() -{ - Fp12 a; - for (int i = 0; i < 12; ++i) { - a.getFp0()[i] = i * i; - } - local::mapToCyclotomic(a, a); - Fp12 d; - Compress b(d, a); - a *= a; - Fp12 d2; - Compress c(d2, b); - Compress::square_n(c, 1); - c.decompress(); - CYBOZU_TEST_EQUAL(a, d2); - Compress::square_n(b, 1); - b.decompress(); - CYBOZU_TEST_EQUAL(a, d); -} - -void testCompress(const G1& P, const G2& Q) -{ - if (BN::param.cp.curveType != MCL_BN254) return; - Fp12 a; - pairing(a, P, Q); - local::mapToCyclotomic(a, a); - Fp12 b; - Compress::fixed_power(b, a); - Fp12 c; - Fp12::pow(c, a, BN::param.abs_z); - CYBOZU_TEST_EQUAL(b, c); -} - -void testPrecomputed(const G1& P, const G2& Q) -{ - Fp12 e1, e2; - pairing(e1, P, Q); - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e2, P, Qcoeff); - finalExp(e2, e2); - CYBOZU_TEST_EQUAL(e1, e2); -} - -void testFp12pow(const G1& P, const G2& Q) -{ - Fp12 e, e1, e2; - pairing(e, P, Q); - cybozu::XorShift rg; - for (int i = -10; i < 10; i++) { - mpz_class xm = i; - Fp12::pow(e1, e, xm); - Fp12::powGeneric(e2, e, xm); - CYBOZU_TEST_EQUAL(e1, e2); - } - for (int i = 0; i < 10; i++) { - Fr x; - x.setRand(rg); - mpz_class xm = x.getMpz(); - Fp12::pow(e1, e, xm); - BN::param.glv2.pow(e2, e, xm); - CYBOZU_TEST_EQUAL(e1, e2); - } -} - -void testMillerLoop2(const G1& P1, const G2& Q1) -{ - Fp12 e1, e2, e3; - mpz_class c1("12342342423442"); - mpz_class c2("329428049820348209482"); - G2 Q2; - G1 P2; - G2::mul(Q2, Q1, c1); - G1::mul(P2, P1, c2); - pairing(e1, P1, Q1); - pairing(e2, P2, Q2); - e1 *= e2; - - std::vector Q1coeff, Q2coeff; - precomputeG2(Q1coeff, Q1); - precomputeG2(Q2coeff, Q2); - precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); - precomputedMillerLoop2mixed(e3, P1, Q1, P2, Q2coeff); - CYBOZU_TEST_EQUAL(e2, e3); - finalExp(e2, e2); - CYBOZU_TEST_EQUAL(e1, e2); - - // special value - G2 Z; - Z.clear(); - Q2 += Q2; - precomputeG2(Q1coeff, Z); - precomputeG2(Q2coeff, Q2); - precomputedMillerLoop2(e2, P1, Q1coeff, P2, Q2coeff); - precomputedMillerLoop2mixed(e3, P1, Z, P2, Q2coeff); - finalExp(e2, e2); - finalExp(e3, e3); - CYBOZU_TEST_EQUAL(e2, e3); -} - -void testPairing(const G1& P, const G2& Q, const char *eStr) -{ - Fp12 e1; - pairing(e1, P, Q); - Fp12 e2; - { - std::stringstream ss(eStr); - ss >> e2; - } - CYBOZU_TEST_EQUAL(e1, e2); - - Fp12 e = e1, ea; - G1 Pa; - G2 Qa; -#if defined(__EMSCRIPTEN__) || MCL_SIZEOF_UNIT == 4 - const int count = 100; -#else - const int count = 1000; -#endif - mpz_class a; - cybozu::XorShift rg; - for (int i = 0; i < count; i++) { - Fr r; - r.setRand(rg); - a = r.getMpz(); - Fp12::pow(ea, e, a); - G1::mul(Pa, P, a); - G2::mul(Qa, Q, a); - G1 T; - G1::mulCT(T, P, a); - CYBOZU_TEST_EQUAL(Pa, T); - pairing(e1, Pa, Q); - pairing(e2, P, Qa); - CYBOZU_TEST_EQUAL(ea, e1); - CYBOZU_TEST_EQUAL(ea, e2); - } -} - -void testTrivial(const G1& P, const G2& Q) -{ - G1 Z1; Z1.clear(); - G2 Z2; Z2.clear(); - Fp12 e; - pairing(e, Z1, Q); - CYBOZU_TEST_EQUAL(e, 1); - pairing(e, P, Z2); - CYBOZU_TEST_EQUAL(e, 1); - pairing(e, Z1, Z2); - CYBOZU_TEST_EQUAL(e, 1); - - std::vector Qcoeff; - precomputeG2(Qcoeff, Z2); - precomputedMillerLoop(e, P, Qcoeff); - finalExp(e, e); - CYBOZU_TEST_EQUAL(e, 1); - - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e, Z1, Qcoeff); - finalExp(e, e); - CYBOZU_TEST_EQUAL(e, 1); -} - -void testIoAll(const G1& P, const G2& Q) -{ - const int FpTbl[] = { 0, 2, 2|mcl::IoPrefix, 10, 16, 16|mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; - const int EcTbl[] = { mcl::IoEcAffine, mcl::IoEcProj, mcl::IoEcCompY, mcl::IoSerialize }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(FpTbl); i++) { - for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(EcTbl); j++) { - G1 P2 = P, P3; - G2 Q2 = Q, Q3; - int ioMode = FpTbl[i] | EcTbl[j]; - std::string s = P2.getStr(ioMode); - P3.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(P2, P3); - s = Q2.getStr(ioMode); - Q3.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(Q2, Q3); - s = P.x.getStr(ioMode); - Fp Px; - Px.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(P.x, Px); - s = Q.x.getStr(ioMode); - Fp2 Qx; - Qx.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(Q.x, Qx); - } - } -} - -void testIo(const G1& P, const G2& Q) -{ - testIoAll(P, Q); - G1 Z1; - G2 Z2; - Z1.clear(); - Z2.clear(); - testIoAll(Z1, Z2); -} - -#include "bench.hpp" - -CYBOZU_TEST_AUTO(naive) -{ - printf("mcl version=%03x\n", mcl::version); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(g_testSetTbl); i++) { - const TestSet& ts = g_testSetTbl[i]; - printf("i=%d curve=%s\n", int(i), ts.name); - initPairing(ts.cp, g_mode); - const G1 P(ts.g1.a, ts.g1.b); - const G2 Q(Fp2(ts.g2.aa, ts.g2.ab), Fp2(ts.g2.ba, ts.g2.bb)); -#ifdef ONLY_BENCH - { - Fp12 e; - for (int i = 0; i < 10000; i++) { clk.begin(); pairing(e, P, Q); clk.end(); } - } - clk.put(); - return; -#endif - testFp12pow(P, Q); - testIo(P, Q); - testTrivial(P, Q); - testSetStr(Q); - testMapToG1(); - testMapToG2(); - testCyclotomic(); - testCompress(P, Q); - testPairing(P, Q, ts.e); - testPrecomputed(P, Q); - testMillerLoop2(P, Q); - testBench(P, Q); - benchAddDblG1(); - benchAddDblG2(); - } - int count = (int)clk.getCount(); - if (count) { - printf("count=%d ", count); - clk.put(); - } -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - std::string mode; - opt.appendOpt(&mode, "auto", "m", ": mode(gmp/gmp_mont/llvm/llvm_mont/xbyak)"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - g_mode = mcl::fp::StrToMode(mode); - printf("JIT %d\n", mcl::fp::isEnableJIT()); - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/conversion_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/conversion_test.cpp deleted file mode 100644 index ec11fe900..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/conversion_test.cpp +++ /dev/null @@ -1,96 +0,0 @@ -#include -#include - -CYBOZU_TEST_AUTO(arrayToDec) -{ - const struct { - uint32_t x[5]; - size_t xn; - const char *s; - } tbl[] = { - { { 0, 0, 0, 0, 0 }, 1, "0" }, - { { 9, 0, 0, 0, 0 }, 1, "9" }, - { { 123456789, 0, 0, 0, 0 }, 1, "123456789" }, - { { 2147483647, 0, 0, 0, 0 }, 1, "2147483647" }, - { { 0xffffffff, 0, 0, 0, 0 }, 1, "4294967295" }, - { { 0x540be400, 0x2, 0, 0, 0 }, 2, "10000000000" }, - { { 0xffffffff, 0xffffffff, 0, 0, 0 }, 2, "18446744073709551615" }, - { { 0x89e80001, 0x8ac72304, 0, 0, 0 }, 2, "10000000000000000001" }, - { { 0xc582ca00, 0x8ac72304, 0, 0, 0 }, 2, "10000000001000000000" }, - { { 0, 0, 1, 0, 0 }, 3, "18446744073709551616" }, - { { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0 }, 4, "340282366920938463463374607431768211455" }, - { { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }, 5, "1461501637330902918203684832716283019655932542975" }, - { { 0x3b9aca00, 0x5e3f3700, 0x1cbfa532, 0x04f6433a, 0xd83ff078 }, 5, "1234567901234560000000000000000000000001000000000" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const size_t bufSize = 128; - char buf[bufSize] = {}; - const char *str = tbl[i].s; - const uint32_t *x = tbl[i].x; - const size_t strLen = strlen(str); - size_t n = mcl::fp::arrayToDec(buf, bufSize, x, tbl[i].xn); - CYBOZU_TEST_EQUAL(n, strLen); - CYBOZU_TEST_EQUAL_ARRAY(buf + bufSize - n, str, n); - const size_t maxN = 32; - uint32_t xx[maxN] = {}; - size_t xn = mcl::fp::decToArray(xx, maxN, str, strLen); - CYBOZU_TEST_EQUAL(xn, tbl[i].xn); - CYBOZU_TEST_EQUAL_ARRAY(xx, x, xn); - } - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const size_t bufSize = 128; - char buf[bufSize] = {}; - const char *str = tbl[i].s; - const size_t strLen = strlen(str); - uint64_t x[8] = {}; - size_t xn = (tbl[i].xn + 1) / 2; - memcpy(x, tbl[i].x, tbl[i].xn * sizeof(uint32_t)); - size_t n = mcl::fp::arrayToDec(buf, bufSize, x, xn); - CYBOZU_TEST_EQUAL(n, strLen); - CYBOZU_TEST_EQUAL_ARRAY(buf + bufSize - n, str, n); - const size_t maxN = 32; - uint64_t xx[maxN] = {}; - size_t xxn = mcl::fp::decToArray(xx, maxN, str, strLen); - CYBOZU_TEST_EQUAL(xxn, xn); - CYBOZU_TEST_EQUAL_ARRAY(xx, x, xn); - } -} - -CYBOZU_TEST_AUTO(writeHexStr) -{ - const char *hex1tbl = "0123456789abcdef"; - const char *hex2tbl = "0123456789ABCDEF"; - for (size_t i = 0; i < 16; i++) { - uint8_t v = 0xff; - CYBOZU_TEST_ASSERT(mcl::fp::local::hexCharToUint8(&v, hex1tbl[i])); - CYBOZU_TEST_EQUAL(v, i); - CYBOZU_TEST_ASSERT(mcl::fp::local::hexCharToUint8(&v, hex2tbl[i])); - CYBOZU_TEST_EQUAL(v, i); - } - const struct Tbl { - const char *bin; - size_t n; - const char *hex; - } tbl[] = { - { "", 0, "" }, - { "\x12\x34\xab", 3, "1234ab" }, - { "\xff\xfc\x00\x12", 4, "fffc0012" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - char buf[32]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - const char *bin = tbl[i].bin; - const char *hex = tbl[i].hex; - size_t n = tbl[i].n; - bool b; - mcl::fp::writeHexStr(&b, os, bin, n); - CYBOZU_TEST_ASSERT(b); - CYBOZU_TEST_EQUAL(os.getPos(), n * 2); - CYBOZU_TEST_EQUAL_ARRAY(buf, hex, n * 2); - - cybozu::MemoryInputStream is(hex, n * 2); - size_t w = mcl::fp::readHexStr(buf, n, is); - CYBOZU_TEST_EQUAL(w, n); - CYBOZU_TEST_EQUAL_ARRAY(buf, bin, n); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/ec_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/ec_test.cpp deleted file mode 100644 index ec49adbfe..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/ec_test.cpp +++ /dev/null @@ -1,573 +0,0 @@ -//#define MCL_EC_USE_AFFINE -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include - -#include -#include -#include -#include -#include - -typedef mcl::FpT<> Fp; -struct tagZn; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; - -CYBOZU_TEST_AUTO(sizeof) -{ - CYBOZU_TEST_EQUAL(sizeof(Fp), sizeof(mcl::fp::Unit) * Fp::maxSize); -#ifdef MCL_EC_USE_AFFINE - CYBOZU_TEST_EQUAL(sizeof(Ec), sizeof(Fp) * 2 + sizeof(mcl::fp::Unit)); -#else - CYBOZU_TEST_EQUAL(sizeof(Ec), sizeof(Fp) * 3); -#endif -} - -struct Test { - const mcl::EcParam& para; - Test(const mcl::EcParam& para, mcl::fp::Mode fpMode, mcl::ec::Mode ecMode) - : para(para) - { - printf("fpMode=%s\n", mcl::fp::ModeToStr(fpMode)); - Fp::init(para.p, fpMode); - Zn::init(para.n, fpMode); - Ec::init(para.a, para.b, ecMode); - } - void cstr() const - { - Ec O; - O.clear(); - CYBOZU_TEST_ASSERT(O.isZero()); - CYBOZU_TEST_ASSERT(O.isValid()); - Ec P; - P.clear(); - Ec::neg(P, O); - CYBOZU_TEST_EQUAL(P, O); - } - void pow2(Ec& Q, const Ec& P, int n) const - { - Q = P; - for (int i = 0; i < n; i++) { - Q += Q; - } - } - void pow2test(const Ec& P, int n) const - { - Ec Q, R; - pow2(Q, P, n); - Q -= P; // Q = (2^n - 1)P - Fp x = 1; - for (int i = 0; i < n; i++) { - x += x; - } - x -= 1; // x = 2^n - 1 - Ec::mul(R, P, x); - CYBOZU_TEST_EQUAL(Q, R); - Q = P; - Ec::mul(Q, Q, x); - CYBOZU_TEST_EQUAL(Q, R); - } - void ope() const - { - Fp x(para.gx); - Fp y(para.gy); - Zn n = 0; - CYBOZU_TEST_NO_EXCEPTION(Ec(x, y)); - CYBOZU_TEST_EXCEPTION(Ec(x, y + 1), cybozu::Exception); - Ec P(x, y), Q, R, O; - O.clear(); - CYBOZU_TEST_ASSERT(P.isNormalized()); - { - Ec::neg(Q, P); - CYBOZU_TEST_EQUAL(Q.x, P.x); - CYBOZU_TEST_EQUAL(Q.y, -P.y); - - R = P + Q; - CYBOZU_TEST_ASSERT(R.isZero()); - CYBOZU_TEST_ASSERT(R.isNormalized()); - CYBOZU_TEST_ASSERT(R.isValid()); - - R = P + O; - CYBOZU_TEST_EQUAL(R, P); - R = O + P; - CYBOZU_TEST_EQUAL(R, P); - } - - { - Ec::dbl(R, P); -#ifndef MCL_EC_USE_AFFINE - CYBOZU_TEST_ASSERT(!R.isNormalized()); -#endif - CYBOZU_TEST_ASSERT(R.isValid()); - Ec R2 = P + P; - CYBOZU_TEST_EQUAL(R, R2); - { - Ec P2 = P; - Ec::dbl(P2, P2); - CYBOZU_TEST_EQUAL(P2, R2); - } - Ec R3L = R2 + P; - Ec R3R = P + R2; - CYBOZU_TEST_EQUAL(R3L, R3R); - { - Ec RR = R2; - RR = RR + P; - CYBOZU_TEST_EQUAL(RR, R3L); - RR = R2; - RR = P + RR; - CYBOZU_TEST_EQUAL(RR, R3L); - RR = P; - RR = RR + RR; - CYBOZU_TEST_EQUAL(RR, R2); - } - Ec::mul(R, P, 2); - CYBOZU_TEST_EQUAL(R, R2); - Ec R4L = R3L + R2; - Ec R4R = R2 + R3L; - CYBOZU_TEST_EQUAL(R4L, R4R); - Ec::mul(R, P, 5); - CYBOZU_TEST_EQUAL(R, R4L); - } - { - R = P; - for (int i = 0; i < 10; i++) { - R += P; - } - Ec R2; - Ec::mul(R2, P, 11); - CYBOZU_TEST_EQUAL(R, R2); - } - Ec::mul(R, P, n - 1); - CYBOZU_TEST_EQUAL(R, -P); - R += P; // Ec::mul(R, P, n); - CYBOZU_TEST_ASSERT(R.isZero()); - { - const int tbl[] = { 1, 2, 63, 64, 65, 127, 128, 129 }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - pow2test(P, tbl[i]); - } - } - { - Ec::mul(Q, P, 0); - CYBOZU_TEST_ASSERT(Q.isZero()); - Q = P; - CYBOZU_TEST_ASSERT(!Q.isZero()); - Ec::mul(Q, Q, 0); - CYBOZU_TEST_ASSERT(Q.isZero()); - Ec::mul(Q, P, 1); - CYBOZU_TEST_EQUAL(P, Q); - } - { - Ec R2; - P += P; - Q += P; - CYBOZU_TEST_ASSERT(!P.z.isOne()); - CYBOZU_TEST_ASSERT(!Q.z.isOne()); - Ec::add(R2, P, Q); - - P.normalize(); - CYBOZU_TEST_ASSERT(P.z.isOne()); - CYBOZU_TEST_ASSERT(!Q.z.isOne()); - // affine + generic - Ec::add(R, P, Q); - CYBOZU_TEST_EQUAL(R, R2); - // generic + affine - Ec::add(R, Q, P); - CYBOZU_TEST_EQUAL(R, R2); - - Q.normalize(); - CYBOZU_TEST_ASSERT(P.z.isOne()); - CYBOZU_TEST_ASSERT(Q.z.isOne()); - // affine + affine - Ec::add(R, P, Q); - CYBOZU_TEST_EQUAL(R, R2); - - P += P; - CYBOZU_TEST_ASSERT(!P.z.isOne()); - // generic - Ec::dbl(R2, P); - - P.normalize(); - CYBOZU_TEST_ASSERT(P.z.isOne()); - // affine - Ec::dbl(R, P); - CYBOZU_TEST_EQUAL(R, R2); - } - } - - void mul() const - { - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y); - Ec Q; - Ec R; - R.clear(); - for (int i = 0; i < 100; i++) { - Ec::mul(Q, P, i); - CYBOZU_TEST_EQUAL(Q, R); - R += P; - } - } - - void neg_mul() const - { - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y); - Ec Q; - Ec R; - R.clear(); - for (int i = 0; i < 100; i++) { - Ec::mul(Q, P, -i); - CYBOZU_TEST_EQUAL(Q, R); - R -= P; - } - } - void squareRoot() const - { - Fp x(para.gx); - Fp y(para.gy); - bool odd = y.isOdd(); - Fp yy; - bool b = Ec::getYfromX(yy, x, odd); - CYBOZU_TEST_ASSERT(b); - CYBOZU_TEST_EQUAL(yy, y); - Fp::neg(y, y); - odd = y.isOdd(); - yy.clear(); - b = Ec::getYfromX(yy, x, odd); - CYBOZU_TEST_ASSERT(b); - CYBOZU_TEST_EQUAL(yy, y); - } - void mul_fp() const - { - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y); - Ec Q; - Ec R; - R.clear(); - for (int i = 0; i < 100; i++) { - Ec::mul(Q, P, Zn(i)); - CYBOZU_TEST_EQUAL(Q, R); - R += P; - } - } - void str() const - { - const Fp x(para.gx); - const Fp y(para.gy); - Ec P(x, y); - Ec Q; - // not compressed - Ec::setCompressedExpression(false); - { - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - { - Q.clear(); - CYBOZU_TEST_EQUAL(Q.getStr(), "0"); - } - for (int i = 0; i < 2; i++) { - for (int j = 0; j < 2; j++) { - int base = i == 0 ? 10 : 16; - bool withPrefix = j == 0; - int ioMode = base | (withPrefix ? mcl::IoPrefix : 0); - std::string expected = "1 " + x.getStr(ioMode) + " " + y.getStr(ioMode); - CYBOZU_TEST_EQUAL(P.getStr(ioMode), expected); - std::ostringstream os; - if (base == 16) { - os << std::hex; - } - if (withPrefix) { - os << std::showbase; - } - os << P; - CYBOZU_TEST_EQUAL(os.str(), expected); - } - } - { - P = -P; - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - P.clear(); - { - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - CYBOZU_TEST_EXCEPTION(P.setStr("1 3 5"), cybozu::Exception); - // compressed - Ec::setCompressedExpression(true); - P.set(x, y); - { - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - { - P = -P; - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - P.clear(); - { - std::stringstream ss; - ss << P; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - // IoSerialize, IoSerializeHexStr - const size_t adj = Ec::isMSBserialize() ? 0 : 1; - P.set(x, y); - { - std::string s = P.getStr(mcl::IoSerialize); - CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); - Q.setStr(s, mcl::IoSerialize); - CYBOZU_TEST_EQUAL(P, Q); - } - { - std::string s = P.getStr(mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); - Q.setStr(s, mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(P, Q); - } - P = -P; - { - std::string s = P.getStr(mcl::IoSerialize); - CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); - Q.setStr(s, mcl::IoSerialize); - CYBOZU_TEST_EQUAL(P, Q); - } - { - std::string s = P.getStr(mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); - Q.setStr(s, mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(P, Q); - } - P.clear(); - { - std::string s = P.getStr(mcl::IoSerialize); - CYBOZU_TEST_EQUAL(s.size(), Fp::getByteSize() + adj); - CYBOZU_TEST_ASSERT(mcl::fp::isZeroArray(s.c_str(), s.size())); - Q.setStr(s, mcl::IoSerialize); - CYBOZU_TEST_EQUAL(P, Q); - } - { - std::string s = P.getStr(mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(s.size(), (Fp::getByteSize() + adj) * 2); - for (size_t i = 0; i < s.size(); i++) { - CYBOZU_TEST_EQUAL(s[i], '0'); - } - Q.setStr(s, mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(P, Q); - } - } - void ioMode() const - { - const Fp x(para.gx); - const Fp y(para.gy); - Ec P(x, y); - const mcl::IoMode tbl[] = { - mcl::IoBin, - mcl::IoDec, - mcl::IoHex, - mcl::IoArray, - mcl::IoArrayRaw, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp::setIoMode(tbl[i]); - { - std::stringstream ss; - ss << P; - Ec Q; - ss >> Q; - CYBOZU_TEST_EQUAL(P, Q); - } - { - std::stringstream ss; - Ec Q; - Q.clear(); - ss << Q; - Ec R; - ss >> R; - CYBOZU_TEST_EQUAL(Q, R); - } - } - Fp::setIoMode(mcl::IoAuto); - } - void mulCT() const - { - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y), Q1, Q2; - for (int i = 0; i < 100; i++) { - Zn r = i; - Ec::mul(Q1, P, r); - Ec::mulCT(Q2, P, r); - CYBOZU_TEST_EQUAL(Q1, Q2); - } - } - void compare() const - { - Fp x(para.gx); - Fp y(para.gy); - Ec P1(x, y); - Ec P2(x, -y); - int c = Ec::compare(P1, P2); - int cx = Fp::compare(y, -y); - CYBOZU_TEST_EQUAL(c, cx); - c = Ec::compare(P2, P1); - cx = Fp::compare(-y, y); - CYBOZU_TEST_EQUAL(c, cx); - CYBOZU_TEST_EQUAL(Ec::compare(P1, P1), 0); - bool b1, b2; - b1 = P1 <= P2; - b2 = y <= -y; - CYBOZU_TEST_EQUAL(b1, b2); - b1 = P1 < P2; - b2 = y < -y; - CYBOZU_TEST_EQUAL(b1, b2); - CYBOZU_TEST_ASSERT(!(P1 < P1)); - CYBOZU_TEST_ASSERT((P1 <= P1)); - } - - template - void test(F f, const char *msg) const - { - const int N = 300000; - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y); - Ec Q = P + P + P; - clock_t begin = clock(); - for (int i = 0; i < N; i++) { - f(Q, P, Q); - } - clock_t end = clock(); - printf("%s %.2fusec\n", msg, (end - begin) / double(CLOCKS_PER_SEC) / N * 1e6); - } -/* -Affine : sandy-bridge -add 3.17usec -sub 2.43usec -dbl 3.32usec -mul 905.00usec -Jacobi -add 2.34usec -sub 2.65usec -dbl 1.56usec -mul 499.00usec -*/ - void run() const - { - cstr(); - ope(); - mul(); - neg_mul(); - mul_fp(); - squareRoot(); - str(); - ioMode(); - mulCT(); - compare(); - } -private: - Test(const Test&); - void operator=(const Test&); -}; - -void test_sub_sub(const mcl::EcParam& para, mcl::fp::Mode fpMode) -{ - puts("Proj"); - Test(para, fpMode, mcl::ec::Proj).run(); - puts("Jacobi"); - Test(para, fpMode, mcl::ec::Jacobi).run(); -} - -void test_sub(const mcl::EcParam *para, size_t paraNum) -{ - for (size_t i = 0; i < paraNum; i++) { - puts(para[i].name); - test_sub_sub(para[i], mcl::fp::FP_GMP); -#ifdef MCL_USE_LLVM - test_sub_sub(para[i], mcl::fp::FP_LLVM); - test_sub_sub(para[i], mcl::fp::FP_LLVM_MONT); -#endif -#ifdef MCL_USE_XBYAK - test_sub_sub(para[i], mcl::fp::FP_XBYAK); -#endif - } -} - -int g_partial = -1; - -CYBOZU_TEST_AUTO(all) -{ - if (g_partial & (1 << 3)) { - const struct mcl::EcParam para3[] = { - // mcl::ecparam::p160_1, - mcl::ecparam::secp160k1, - mcl::ecparam::secp192k1, - mcl::ecparam::NIST_P192, - }; - test_sub(para3, CYBOZU_NUM_OF_ARRAY(para3)); - } - - if (g_partial & (1 << 4)) { - const struct mcl::EcParam para4[] = { - mcl::ecparam::secp224k1, - mcl::ecparam::secp256k1, - mcl::ecparam::NIST_P224, - mcl::ecparam::NIST_P256, - }; - test_sub(para4, CYBOZU_NUM_OF_ARRAY(para4)); - } - -#if MCL_MAX_BIT_SIZE >= 384 - if (g_partial & (1 << 6)) { - const struct mcl::EcParam para6[] = { - // mcl::ecparam::secp384r1, - mcl::ecparam::NIST_P384, - }; - test_sub(para6, CYBOZU_NUM_OF_ARRAY(para6)); - } -#endif - -#if MCL_MAX_BIT_SIZE >= 521 - if (g_partial & (1 << 9)) { - const struct mcl::EcParam para9[] = { - // mcl::ecparam::secp521r1, - mcl::ecparam::NIST_P521, - }; - test_sub(para9, CYBOZU_NUM_OF_ARRAY(para9)); - } -#endif -} - -int main(int argc, char *argv[]) -{ - if (argc == 1) { - g_partial = -1; - } else { - g_partial = 0; - for (int i = 1; i < argc; i++) { - g_partial |= 1 << atoi(argv[i]); - } - } - return cybozu::test::autoRun.run(argc, argv); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/ecdsa_c_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/ecdsa_c_test.cpp deleted file mode 100644 index e0af38182..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/ecdsa_c_test.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include -#include - -template -void serializeTest(const T& x, const Serializer& serialize, const Deserializer& deserialize) -{ - char buf[128]; - size_t n = serialize(buf, sizeof(buf), &x); - CYBOZU_TEST_ASSERT(n > 0); - T y; - size_t m = deserialize(&y, buf, n); - CYBOZU_TEST_EQUAL(m, n); - CYBOZU_TEST_ASSERT(memcmp(&x, &y, n) == 0); -} - -CYBOZU_TEST_AUTO(ecdsa) -{ - int ret; - ret = ecdsaInit(); - CYBOZU_TEST_EQUAL(ret, 0); - ecdsaSecretKey sec; - ecdsaPublicKey pub; - ecdsaPrecomputedPublicKey *ppub; - ecdsaSignature sig; - const char *msg = "hello"; - mclSize msgSize = strlen(msg); - - ret = ecdsaSecretKeySetByCSPRNG(&sec); - CYBOZU_TEST_EQUAL(ret, 0); - serializeTest(sec, ecdsaSecretKeySerialize, ecdsaSecretKeyDeserialize); - - ecdsaGetPublicKey(&pub, &sec); - serializeTest(pub, ecdsaPublicKeySerialize, ecdsaPublicKeyDeserialize); - ecdsaSign(&sig, &sec, msg, msgSize); - serializeTest(sig, ecdsaSignatureSerialize, ecdsaSignatureDeserialize); - CYBOZU_TEST_ASSERT(ecdsaVerify(&sig, &pub, msg, msgSize)); - - ppub = ecdsaPrecomputedPublicKeyCreate(); - CYBOZU_TEST_ASSERT(ppub); - ret = ecdsaPrecomputedPublicKeyInit(ppub, &pub); - CYBOZU_TEST_EQUAL(ret, 0); - - CYBOZU_TEST_ASSERT(ecdsaVerifyPrecomputed(&sig, ppub, msg, msgSize)); - - sig.d[0]++; - CYBOZU_TEST_ASSERT(!ecdsaVerify(&sig, &pub, msg, msgSize)); - CYBOZU_TEST_ASSERT(!ecdsaVerifyPrecomputed(&sig, ppub, msg, msgSize)); - - ecdsaPrecomputedPublicKeyDestroy(ppub); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/ecdsa_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/ecdsa_test.cpp deleted file mode 100644 index 332c9ee27..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/ecdsa_test.cpp +++ /dev/null @@ -1,69 +0,0 @@ -#define PUT(x) std::cout << #x "=" << x << std::endl; -#include -#include -void put(const void *buf, size_t bufSize) -{ - const unsigned char* p = (const unsigned char*)buf; - for (size_t i = 0; i < bufSize; i++) { - printf("%02x", p[i]); - } - printf("\n"); -} -#include -#include -#include - -using namespace mcl::ecdsa; - -CYBOZU_TEST_AUTO(ecdsa) -{ - init(); - SecretKey sec; - PublicKey pub; - sec.setByCSPRNG(); - getPublicKey(pub, sec); - Signature sig; - const std::string msg = "hello"; - sign(sig, sec, msg.c_str(), msg.size()); - CYBOZU_TEST_ASSERT(verify(sig, pub, msg.c_str(), msg.size())); - sig.s += 1; - CYBOZU_TEST_ASSERT(!verify(sig, pub, msg.c_str(), msg.size())); -} - -CYBOZU_TEST_AUTO(value) -{ - const std::string msg = "hello"; - const char *secStr = "83ecb3984a4f9ff03e84d5f9c0d7f888a81833643047acc58eb6431e01d9bac8"; - const char *pubxStr = "653bd02ba1367e5d4cd695b6f857d1cd90d4d8d42bc155d85377b7d2d0ed2e71"; - const char *pubyStr = "04e8f5da403ab78decec1f19e2396739ea544e2b14159beb5091b30b418b813a"; - const char *sigStr = "a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5de5d79a2ba44e311d04fdca263639283965780bce9169822be9cc81756e95a24"; - - SecretKey sec; - sec.setStr(secStr, 16); - CYBOZU_TEST_EQUAL(sec.getStr(16), secStr); - PublicKey pub; - getPublicKey(pub, sec); - pub.normalize(); - Ec t(Fp(pubxStr, 16), Fp(pubyStr, 16)); - CYBOZU_TEST_EQUAL(pub, t); - Signature sig; - sig.r.setStr(std::string(sigStr, 64), 16); - sig.s.setStr(std::string(sigStr + 64, 64), 16); - PUT(sig); - CYBOZU_TEST_ASSERT(verify(sig, pub, msg.c_str(), msg.size())); -} - -CYBOZU_TEST_AUTO(bench) -{ - const std::string msg = "hello"; - SecretKey sec; - PublicKey pub; - PrecomputedPublicKey ppub; - sec.setByCSPRNG(); - getPublicKey(pub, sec); - ppub.init(pub); - Signature sig; - CYBOZU_BENCH_C("sign", 1000, sign, sig, sec, msg.c_str(), msg.size()); - CYBOZU_BENCH_C("pub.verify ", 1000, verify, sig, pub, msg.c_str(), msg.size()); - CYBOZU_BENCH_C("ppub.verify", 1000, verify, sig, ppub, msg.c_str(), msg.size()); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/elgamal_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/elgamal_test.cpp deleted file mode 100644 index 9532fc597..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/elgamal_test.cpp +++ /dev/null @@ -1,155 +0,0 @@ -#include -#include -#include -#include -#include - -struct TagZn; -typedef mcl::FpT<> Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; -typedef mcl::ElgamalT ElgamalEc; - -const mcl::EcParam& para = mcl::ecparam::secp192k1; -cybozu::RandomGenerator rg; - -CYBOZU_TEST_AUTO(testEc) -{ - Fp::init(para.p); - Zn::init(para.n); - Ec::init(para.a, para.b); - const Fp x0(para.gx); - const Fp y0(para.gy); - const size_t bitSize = Zn::getBitSize(); - const Ec P(x0, y0); - /* - Zn =

- */ - ElgamalEc::PrivateKey prv; - prv.init(P, bitSize, rg); - prv.setCache(0, 60000); - const ElgamalEc::PublicKey& pub = prv.getPublicKey(); - - const int m1 = 12345; - const int m2 = 17655; - ElgamalEc::CipherText c1, c2; - pub.enc(c1, m1, rg); - pub.enc(c2, m2, rg); - Zn dec1, dec2; - prv.dec(dec1, c1); - prv.dec(dec2, c2); - // dec(enc) = id - CYBOZU_TEST_EQUAL(dec1, m1); - CYBOZU_TEST_EQUAL(dec2, m2); - CYBOZU_TEST_EQUAL(prv.dec(c1), m1); - CYBOZU_TEST_EQUAL(prv.dec(c2), m2); - // iostream - { - ElgamalEc::PublicKey pub2; - ElgamalEc::PrivateKey prv2; - ElgamalEc::CipherText cc1, cc2; - { - std::stringstream ss; - ss << prv; - ss >> prv2; - } - Zn d; - prv2.dec(d, c1); - CYBOZU_TEST_EQUAL(d, m1); - { - std::stringstream ss; - ss << c1; - ss >> cc1; - } - d = 0; - prv2.dec(d, cc1); - CYBOZU_TEST_EQUAL(d, m1); - { - std::stringstream ss; - ss << pub; - ss >> pub2; - } - pub2.enc(cc2, m2, rg); - prv.dec(d, cc2); - CYBOZU_TEST_EQUAL(d, m2); - } - // enc(m1) enc(m2) = enc(m1 + m2) - c1.add(c2); - prv.dec(dec1, c1); - CYBOZU_TEST_EQUAL(dec1, m1 + m2); - // enc(m1) x = enc(m1 + x) - { - const int x = 555; - pub.add(c1, x); - prv.dec(dec1, c1); - CYBOZU_TEST_EQUAL(dec1, m1 + m2 + x); - } - // rerandomize - c1 = c2; - pub.rerandomize(c1, rg); - // verify c1 != c2 - CYBOZU_TEST_ASSERT(c1.c1 != c2.c1); - CYBOZU_TEST_ASSERT(c1.c2 != c2.c2); - prv.dec(dec1, c1); - // dec(c1) = dec(c2) - CYBOZU_TEST_EQUAL(dec1, m2); - - // check neg - { - ElgamalEc::CipherText c; - Zn m = 1234; - pub.enc(c, m, rg); - c.neg(); - Zn dec; - prv.dec(dec, c); - CYBOZU_TEST_EQUAL(dec, -m); - } - // check mul - { - ElgamalEc::CipherText c; - Zn m = 123; - int x = 111; - pub.enc(c, m, rg); - Zn dec; - prv.dec(dec, c); - c.mul(x); - prv.dec(dec, c); - m *= x; - CYBOZU_TEST_EQUAL(dec, m); - } - - // check negative value - for (int i = -10; i < 10; i++) { - ElgamalEc::CipherText c; - const Zn mm = i; - pub.enc(c, mm, rg); - Zn dec; - prv.dec(dec, c, 1000); - CYBOZU_TEST_EQUAL(dec, mm); - } - - // isZeroMessage - for (int m = 0; m < 10; m++) { - ElgamalEc::CipherText c0; - pub.enc(c0, m, rg); - if (m == 0) { - CYBOZU_TEST_ASSERT(prv.isZeroMessage(c0)); - } else { - CYBOZU_TEST_ASSERT(!prv.isZeroMessage(c0)); - } - } - // zkp - { - ElgamalEc::Zkp zkp; - ElgamalEc::CipherText c; - pub.encWithZkp(c, zkp, 0, rg); - CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); - zkp.s0 += 1; - CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); - pub.encWithZkp(c, zkp, 1, rg); - CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); - zkp.s0 += 1; - CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); - CYBOZU_TEST_EXCEPTION_MESSAGE(pub.encWithZkp(c, zkp, 2, rg), cybozu::Exception, "encWithZkp"); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/fp_generator_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/fp_generator_test.cpp deleted file mode 100644 index 60ec5cd41..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/fp_generator_test.cpp +++ /dev/null @@ -1,207 +0,0 @@ -#include -#if MCL_SIZEOF_UNIT == 4 -// not support -#else -#include -#include -#include -#include -#include -#include "../src/fp_generator.hpp" -#include -#include -#include - -typedef mcl::FpT<> Fp; - -const int MAX_N = 4; - -const char *primeTable[] = { - "0x7fffffffffffffffffffffffffffffff", // 127bit(not full) - "0xffffffffffffffffffffffffffffff61", // 128bit(full) - "0xfffffffffffffffffffffffffffffffffffffffeffffee37", // 192bit(full) - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", // 254bit(not full) -}; - -void strToArray(uint64_t *p, size_t n, const char *pStr) -{ - mpz_class x; - mcl::gmp::setStr(x, pStr, 16); - mcl::gmp::getArray(p, n, x); -} - -struct Int { - int vn; - uint64_t v[MAX_N]; - Int() - : vn(0) - { - } - explicit Int(int vn) - { - if (vn > MAX_N) { - printf("vn(%d) is too large\n", vn); - exit(1); - } - this->vn = vn; - } - void set(const char *str) { setStr(str); } - void set(const Fp& rhs) - { - mcl::gmp::getArray(v, MAX_N, rhs.getMpz()); - } - void set(const uint64_t* x) - { - for (int i = 0; i < vn; i++) v[i] = x[i]; - } - void setStr(const char *str) - { - strToArray(v, MAX_N, str); - } - std::string getStr() const - { - std::string ret; - for (int i = 0; i < vn; i++) { - ret += cybozu::itohex(v[vn - 1 - i], false); - } - return ret; - } - void put(const char *msg = "") const - { - if (msg) printf("%s=", msg); - printf("%s\n", getStr().c_str()); - } - bool operator==(const Int& rhs) const - { - if (vn != rhs.vn) return false; - for (int i = 0; i < vn; i++) { - if (v[i] != rhs.v[i]) return false; - } - return true; - } - bool operator!=(const Int& rhs) const { return !operator==(rhs); } - bool operator==(const Fp& rhs) const - { - Int t(vn); - t.set(rhs); - return operator==(t); - } - bool operator!=(const Fp& rhs) const { return !operator==(rhs); } -}; -static inline std::ostream& operator<<(std::ostream& os, const Int& x) -{ - return os << x.getStr(); -} - -void testAddSub(const mcl::fp::Op& op) -{ - Fp x, y; - const uint64_t *p = op.p; - Int mx(op.N), my(op.N); - x.setStr("0x8811aabb23427cc"); - y.setStr("0x8811aabb23427cc11"); - mx.set(x); - my.set(y); - for (int i = 0; i < 30; i++) { - CYBOZU_TEST_EQUAL(mx, x); - x += x; - op.fp_add(mx.v, mx.v, mx.v, p); - } - for (int i = 0; i < 30; i++) { - CYBOZU_TEST_EQUAL(mx, x); - x += y; - op.fp_add(mx.v, mx.v, my.v, p); - } - for (int i = 0; i < 30; i++) { - CYBOZU_TEST_EQUAL(my, y); - y -= x; - op.fp_sub(my.v, my.v, mx.v, p); - } -} - -void testNeg(const mcl::fp::Op& op) -{ - Fp x; - Int mx(op.N), my(op.N); - const char *tbl[] = { - "0", - "0x12346", - "0x11223344556677881122334455667788", - "0x0abbccddeeffaabb0000000000000000", - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - x.setStr(tbl[i]); - mx.set(x); - x = -x; - op.fp_neg(mx.v, mx.v, op.p); - CYBOZU_TEST_EQUAL(mx, x); - } -} - -#if 0 -void testMulI(const mcl::fp::FpGenerator& fg, int pn) -{ - cybozu::XorShift rg; -//printf("pn=%d, %p\n", pn, fg.mulUnit_); - for (int i = 0; i < 100; i++) { - uint64_t x[MAX_N]; - uint64_t z[MAX_N + 1]; - rg.read(x, pn); - uint64_t y = rg.get64(); - mpz_class mx; - mcl::gmp::setArray(mx, x, pn); - mpz_class my; - mcl::gmp::set(my, y); - mx *= my; - uint64_t d = fg.mulUnit_(z, x, y); - z[pn] = d; - mcl::gmp::setArray(my, z, pn + 1); - CYBOZU_TEST_EQUAL(mx, my); - } - { - uint64_t x[MAX_N]; - uint64_t z[MAX_N + 1]; - rg.read(x, pn); - uint64_t y = rg.get64(); - CYBOZU_BENCH_C("mulUnit", 10000000, fg.mulUnit_, z, x, y); - } -} -#endif - -void testShr1(const mcl::fp::Op& op, int pn) -{ - cybozu::XorShift rg; - for (int i = 0; i < 100; i++) { - uint64_t x[MAX_N]; - uint64_t z[MAX_N]; - rg.read(x, pn); - mpz_class mx; - mcl::gmp::setArray(mx, x, pn); - mx >>= 1; - op.fp_shr1(z, x); - mpz_class my; - mcl::gmp::setArray(my, z, pn); - CYBOZU_TEST_EQUAL(mx, my); - } -} - -void test(const char *pStr) -{ - printf("test %s\n", pStr); - Fp::init(pStr, mcl::fp::FP_XBYAK); - const mcl::fp::Op& op = Fp::getOp(); - const int pn = (int)op.N; - testAddSub(op); - testNeg(op); -// testMulI(*op.fg, pn); - testShr1(op, pn); -} - -CYBOZU_TEST_AUTO(all) -{ - if (!mcl::fp::isEnableJIT()) return; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(primeTable); i++) { - test(primeTable[i]); - } -} -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/test/fp_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/fp_test.cpp deleted file mode 100644 index dc1b01ef4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/fp_test.cpp +++ /dev/null @@ -1,1046 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include "../src/low_func.hpp" -#include "../src/proto.hpp" -#include -#include -#include -#include - -#ifdef _MSC_VER - #pragma warning(disable: 4127) // const condition -#endif - -typedef mcl::FpT<> Fp; - -CYBOZU_TEST_AUTO(sizeof) -{ - CYBOZU_TEST_EQUAL(sizeof(Fp), sizeof(mcl::fp::Unit) * Fp::maxSize); -} - -void cstrTest() -{ - const struct { - const char *str; - int val; - } tbl[] = { - { "0", 0 }, - { "1", 1 }, - { "123", 123 }, - { "0x123", 0x123 }, - { "0b10101", 21 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - // string cstr - Fp x(tbl[i].str); - CYBOZU_TEST_EQUAL(x, tbl[i].val); - - // int cstr - Fp y(tbl[i].val); - CYBOZU_TEST_EQUAL(y, x); - - // copy cstr - Fp z(x); - CYBOZU_TEST_EQUAL(z, x); - - // assign int - Fp w; - w = tbl[i].val; - CYBOZU_TEST_EQUAL(w, x); - - // assign self - Fp u; - u = w; - CYBOZU_TEST_EQUAL(u, x); - - // conv - std::ostringstream os; - os << tbl[i].val; - - std::string str; - x.getStr(str); - CYBOZU_TEST_EQUAL(str, os.str()); - } - const struct { - const char *str; - int val; - } tbl2[] = { - { "-123", 123 }, - { "-0x123", 0x123 }, - { "-0b10101", 21 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl2); i++) { - Fp x(tbl2[i].str); - x = -x; - CYBOZU_TEST_EQUAL(x, tbl2[i].val); - } -} - -void setStrTest() -{ - const struct { - const char *in; - int out; - int base; - } tbl[] = { - { "100", 100, 0 }, // set base = 10 if base = 0 - { "100", 4, 2 }, - { "100", 256, 16 }, - { "0b100", 4, 0 }, - { "0b100", 4, 2 }, - { "0x100", 256, 0 }, - { "0x100", 256, 16 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp x; - x.setStr(tbl[i].in, tbl[i].base); - CYBOZU_TEST_EQUAL(x, tbl[i].out); - } - // use prefix if base conflicts with prefix - { - Fp x; - CYBOZU_TEST_EXCEPTION(x.setStr("0b100", 16), cybozu::Exception); - CYBOZU_TEST_EXCEPTION(x.setStr("0b100", 10), cybozu::Exception); - CYBOZU_TEST_EXCEPTION(x.setStr("0x100", 2), cybozu::Exception); - CYBOZU_TEST_EXCEPTION(x.setStr("0x100", 10), cybozu::Exception); - } -} - -void streamTest() -{ - const struct { - const char *in; - int out10; - int out16; - } tbl[] = { - { "100", 100, 256 }, // set base = 10 if base = 0 - { "0x100", 256, 256 }, - }; - Fp::setIoMode(0); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - { - std::istringstream is(tbl[i].in); - Fp x; - is >> x; - CYBOZU_TEST_EQUAL(x, tbl[i].out10); - } - { - std::istringstream is(tbl[i].in); - Fp x; - is >> std::hex >> x; - CYBOZU_TEST_EQUAL(x, tbl[i].out16); - } - } - // use prefix if base conflicts with prefix - std::istringstream is("0b100"); - Fp x; - CYBOZU_TEST_EXCEPTION(is >> std::hex >> x, cybozu::Exception); - { - std::ostringstream os; - os << Fp(123); - CYBOZU_TEST_EQUAL(os.str(), "123"); - } - { - std::ostringstream os; - os << std::hex << Fp(0x123); - CYBOZU_TEST_EQUAL(os.str(), "123"); - } - { - std::ostringstream os; - os << std::hex << std::showbase << Fp(0x123); - CYBOZU_TEST_EQUAL(os.str(), "0x123"); - } -} - -void ioModeTest() -{ - Fp x(123); - const struct { - mcl::IoMode ioMode; - std::string expected; - } tbl[] = { - { mcl::IoBin, "1111011" }, - { mcl::IoDec, "123" }, - { mcl::IoHex, "7b" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp::setIoMode(tbl[i].ioMode); - for (int j = 0; j < 2; j++) { - std::stringstream ss; - if (j == 1) { - ss << std::hex; - } - ss << x; - CYBOZU_TEST_EQUAL(ss.str(), tbl[i].expected); - Fp y; - y.clear(); - ss >> y; - CYBOZU_TEST_EQUAL(x, y); - } - } - for (int i = 0; i < 2; i++) { - if (i == 0) { - Fp::setIoMode(mcl::IoArray); - } else { - Fp::setIoMode(mcl::IoArrayRaw); - } - std::stringstream ss; - ss << x; - CYBOZU_TEST_EQUAL(ss.str().size(), Fp::getByteSize()); - Fp y; - ss >> y; - CYBOZU_TEST_EQUAL(x, y); - } - Fp::setIoMode(mcl::IoAuto); -} - -void edgeTest() -{ - const mpz_class& m = Fp::getOp().mp; - /* - real mont - 0 0 - 1 R^-1 - R 1 - -1 -R^-1 - -R -1 - */ - mpz_class t = 1; - const size_t N = Fp::getUnitSize(); - const mpz_class R = (t << (N * mcl::fp::UnitBitSize)) % m; - const mpz_class tbl[] = { - 0, 1, R, m - 1, m - R - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mpz_class& x = tbl[i]; - for (size_t j = i; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { - const mpz_class& y = tbl[j]; - mpz_class z = (x * y) % m; - Fp xx, yy; - xx.setMpz(x); - yy.setMpz(y); - Fp zz = xx * yy; - zz.getMpz(t); - CYBOZU_TEST_EQUAL(z, t); - } - } - t = m; - t /= 2; - Fp x; - x.setMpz(t); - CYBOZU_TEST_EQUAL(x * 2, -1); - t += 1; - x.setMpz(t); - CYBOZU_TEST_EQUAL(x * 2, 1); -} - -void convTest() -{ -#if 1 - const char *bin, *hex, *dec; - if (Fp::getBitSize() <= 117) { - bin = "0b1000000000000000000000000000000000000000000000000000000000001110"; - hex = "0x800000000000000e"; - dec = "9223372036854775822"; - } else { - bin = "0b100100011010001010110011110001001000000010010001101000101011001111000100100000001001000110100010101100111100010010000"; - hex = "0x123456789012345678901234567890"; - dec = "94522879687365475552814062743484560"; - } -#else - const char *bin = "0b1001000110100"; - const char *hex = "0x1234"; - const char *dec = "4660"; -#endif - Fp b(bin); - Fp h(hex); - Fp d(dec); - CYBOZU_TEST_EQUAL(b, h); - CYBOZU_TEST_EQUAL(b, d); - - std::string str; - b.getStr(str, mcl::IoBinPrefix); - CYBOZU_TEST_EQUAL(str, bin); - b.getStr(str); - CYBOZU_TEST_EQUAL(str, dec); - b.getStr(str, mcl::IoHexPrefix); - CYBOZU_TEST_EQUAL(str, hex); -} - -void compareTest() -{ - { - const struct { - int lhs; - int rhs; - int cmp; - } tbl[] = { - { 0, 0, 0 }, - { 1, 0, 1 }, - { 0, 1, -1 }, - { -1, 0, 1 }, // m-1, 0 - { 0, -1, -1 }, // 0, m-1 - { 123, 456, -1 }, - { 456, 123, 1 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const Fp x(tbl[i].lhs); - const Fp y(tbl[i].rhs); - const int cmp = tbl[i].cmp; - if (cmp == 0) { - CYBOZU_TEST_EQUAL(x, y); - CYBOZU_TEST_ASSERT(x >= y); - CYBOZU_TEST_ASSERT(x <= y); - } else if (cmp > 0) { - CYBOZU_TEST_ASSERT(x > y); - CYBOZU_TEST_ASSERT(x >= y); - } else { - CYBOZU_TEST_ASSERT(x < y); - CYBOZU_TEST_ASSERT(x <= y); - } - } - } - { - Fp x(5); - CYBOZU_TEST_ASSERT(x < 10); - CYBOZU_TEST_ASSERT(x == 5); - CYBOZU_TEST_ASSERT(x > 2); - } - { - Fp x(1); - CYBOZU_TEST_ASSERT(x.isOne()); - x = 2; - CYBOZU_TEST_ASSERT(!x.isOne()); - } - { - const struct { - int v; - bool expected; - } tbl[] = { - { 0, false }, - { 1, false }, - { -1, true }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp x = tbl[i].v; - CYBOZU_TEST_EQUAL(x.isNegative(), tbl[i].expected); - } - std::string str; - Fp::getModulo(str); - char buf[1024]; - size_t n = Fp::getModulo(buf, sizeof(buf)); - CYBOZU_TEST_EQUAL(n, str.size()); - CYBOZU_TEST_EQUAL(buf, str.c_str()); - mpz_class half(str); - half = (half - 1) / 2; - Fp x; - x.setMpz(half - 1); - CYBOZU_TEST_ASSERT(!x.isNegative()); - x.setMpz(half); - CYBOZU_TEST_ASSERT(!x.isNegative()); - x.setMpz(half + 1); - CYBOZU_TEST_ASSERT(x.isNegative()); - } -} - -void moduloTest(const char *pStr) -{ - std::string str; - Fp::getModulo(str); - CYBOZU_TEST_EQUAL(str, mcl::gmp::getStr(mpz_class(pStr))); -} - -void opeTest() -{ - const struct { - int x; - int y; - int add; // x + y - int sub; // x - y - int mul; // x * y - int sqr; // x^2 - } tbl[] = { - { 0, 1, 1, -1, 0, 0 }, - { 9, 5, 14, 4, 45, 81 }, - { 10, 13, 23, -3, 130, 100 }, - { 2000, 1000, 3000, 1000, 2000 * 1000, 2000 * 2000 }, - { 12345, 9999, 12345 + 9999, 12345 - 9999, 12345 * 9999, 12345 * 12345 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const Fp x(tbl[i].x); - const Fp y(tbl[i].y); - Fp z; - Fp::add(z, x, y); - CYBOZU_TEST_EQUAL(z, tbl[i].add); - Fp::sub(z, x, y); - CYBOZU_TEST_EQUAL(z, tbl[i].sub); - Fp::mul(z, x, y); - CYBOZU_TEST_EQUAL(z, tbl[i].mul); - - Fp r; - Fp::inv(r, y); - Fp::mul(z, z, r); - CYBOZU_TEST_EQUAL(z, tbl[i].x); - z = x + y; - CYBOZU_TEST_EQUAL(z, tbl[i].add); - z = x - y; - CYBOZU_TEST_EQUAL(z, tbl[i].sub); - z = x * y; - CYBOZU_TEST_EQUAL(z, tbl[i].mul); - - Fp::sqr(z, x); - CYBOZU_TEST_EQUAL(z, tbl[i].sqr); - - z = x / y; - z *= y; - CYBOZU_TEST_EQUAL(z, tbl[i].x); - } - if (!Fp::isFullBit()) { - Fp x(5), y(3), z; - Fp::addPre(z, x, y); - if (Fp::compareRaw(z, Fp::getP()) >= 0) { - Fp::subPre(z, z, Fp::getP()); - } - CYBOZU_TEST_EQUAL(z, Fp(8)); - if (Fp::compareRaw(x, y) < 0) { - Fp::addPre(x, x, Fp::getP()); - } - Fp::subPre(x, x, y); - CYBOZU_TEST_EQUAL(x, Fp(2)); - } -} - -struct tag2; - -void powTest() -{ - Fp x, y, z; - x = 12345; - z = 1; - for (int i = 0; i < 100; i++) { - Fp::pow(y, x, i); - CYBOZU_TEST_EQUAL(y, z); - z *= x; - } - x = z; - Fp::pow(z, x, Fp::getOp().mp - 1); - CYBOZU_TEST_EQUAL(z, 1); - Fp::pow(z, x, Fp::getOp().mp); - CYBOZU_TEST_EQUAL(z, x); - typedef mcl::FpT Fp_other; - Fp_other::init("1009"); - x = 5; - Fp_other n = 3; - z = 3; - Fp::pow(x, x, z); - CYBOZU_TEST_EQUAL(x, 125); - x = 5; - Fp::pow(x, x, n); - CYBOZU_TEST_EQUAL(x, 125); -} - -void mulUnitTest() -{ - Fp x(-1), y, z; - for (unsigned int u = 0; u < 20; u++) { - Fp::mul(y, x, u); - Fp::mulUnit(z, x, u); - CYBOZU_TEST_EQUAL(y, z); - } -} - -void powNegTest() -{ - Fp x, y, z; - x = 12345; - z = 1; - Fp rx = 1 / x; - for (int i = 0; i < 100; i++) { - Fp::pow(y, x, -i); - CYBOZU_TEST_EQUAL(y, z); - z *= rx; - } -} - -void powFpTest() -{ - Fp x, y, z; - x = 12345; - z = 1; - for (int i = 0; i < 100; i++) { - Fp::pow(y, x, Fp(i)); - CYBOZU_TEST_EQUAL(y, z); - z *= x; - } -} - -void powGmp() -{ - Fp x, y, z; - x = 12345; - z = 1; - for (int i = 0; i < 100; i++) { - Fp::pow(y, x, mpz_class(i)); - CYBOZU_TEST_EQUAL(y, z); - z *= x; - } -} - -struct TagAnother; - -void anotherFpTest(mcl::fp::Mode mode) -{ - typedef mcl::FpT G; - G::init("13", mode); - G a = 3; - G b = 9; - a *= b; - CYBOZU_TEST_EQUAL(a, 1); -} - -void setArrayTest1() -{ - char b1[] = { 0x56, 0x34, 0x12 }; - Fp x; - x.setArray(b1, 3); - CYBOZU_TEST_EQUAL(x, 0x123456); - int b2[] = { 0x12, 0x34 }; - x.setArray(b2, 2); - CYBOZU_TEST_EQUAL(x, Fp("0x3400000012")); -} - -void setArrayTest2(mcl::fp::Mode mode) -{ - Fp::init("0x10000000000001234567a5", mode); - const struct { - uint32_t buf[3]; - size_t bufN; - const char *expected; - } tbl[] = { - { { 0x234567a4, 0x00000001, 0x00100000}, 1, "0x234567a4" }, - { { 0x234567a4, 0x00000001, 0x00100000}, 2, "0x1234567a4" }, - { { 0x234567a4, 0x00000001, 0x00080000}, 3, "0x08000000000001234567a4" }, - { { 0x234567a4, 0x00000001, 0x00100000}, 3, "0x10000000000001234567a4" }, - }; - Fp x; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - x.setArray(tbl[i].buf, tbl[i].bufN); - CYBOZU_TEST_EQUAL(x, Fp(tbl[i].expected)); - } - uint32_t large[3] = { 0x234567a5, 0x00000001, 0x00100000}; - CYBOZU_TEST_EXCEPTION(x.setArray(large, 3), cybozu::Exception); -} - -void setArrayMaskTest1() -{ - char b1[] = { 0x56, 0x34, 0x12 }; - Fp x; - x.setArrayMask(b1, 3); - CYBOZU_TEST_EQUAL(x, 0x123456); - int b2[] = { 0x12, 0x34 }; - x.setArrayMask(b2, 2); - CYBOZU_TEST_EQUAL(x, Fp("0x3400000012")); -} - -void setArrayMaskTest2(mcl::fp::Mode mode) -{ - Fp::init("0x10000000000001234567a5", mode); - const struct { - uint32_t buf[3]; - size_t bufN; - const char *expected; - } tbl[] = { - { { 0x234567a4, 0x00000001, 0x00100000}, 1, "0x234567a4" }, - { { 0x234567a4, 0x00000001, 0x00100000}, 2, "0x1234567a4" }, - { { 0x234567a4, 0x00000001, 0x00100000}, 3, "0x10000000000001234567a4" }, - { { 0x234567a5, 0xfffffff1, 0xffffffff}, 3, "0x0ffffffffffff1234567a5" }, - }; - Fp x; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - x.setArrayMask(tbl[i].buf, tbl[i].bufN); - CYBOZU_TEST_EQUAL(x, Fp(tbl[i].expected)); - } -} - -void setArrayModTest() -{ - const mpz_class& p = Fp::getOp().mp; - const mpz_class tbl[] = { - 0, - 1, - p - 1, - p, - p + 1, - p * 2 - 1, - p * 2, - p * 2 + 1, - p * (p - 1) - 1, - p * (p - 1), - p * (p - 1) + 1, - p * p - 1, - p * p, - p * p + 1, - (mpz_class(1) << Fp::getOp().N * mcl::fp::UnitBitSize * 2) - 1, - }; - const size_t unitByteSize = sizeof(mcl::fp::Unit); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mpz_class& x = tbl[i]; - const mcl::fp::Unit *px = mcl::gmp::getUnit(x); - const size_t xn = mcl::gmp::getUnitSize(x); - const size_t xByteSize = xn * unitByteSize; - const size_t fpByteSize = unitByteSize * Fp::getOp().N; - Fp y; - bool b; - y.setArray(&b, px, xn, mcl::fp::Mod); - bool expected = xByteSize <= fpByteSize * 2; - CYBOZU_TEST_EQUAL(b, expected); - if (!b) continue; - CYBOZU_TEST_EQUAL(y.getMpz(), x % p); - } -} - -CYBOZU_TEST_AUTO(set64bit) -{ - Fp::init("0x1000000000000000000f"); - const struct { - const char *p; - int64_t i; - } tbl[] = { - { "0x1234567812345678", int64_t(0x1234567812345678ull) }, - { "0xfffedcba987edcba997", -int64_t(0x1234567812345678ull) }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp x(tbl[i].p); - Fp y(tbl[i].i); - CYBOZU_TEST_EQUAL(x, y); - } -} - -void getUint64Test() -{ - const uint64_t tbl[] = { - 0, 1, 123, 0xffffffff, int64_t(0x7fffffffffffffffull) - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - uint64_t a = tbl[i]; - Fp x(a); - uint64_t b = x.getUint64(); - CYBOZU_TEST_ASSERT(!x.isNegative()); - CYBOZU_TEST_EQUAL(a, b); - } - { - Fp x("0xffffffffffffffff"); - CYBOZU_TEST_EQUAL(x.getUint64(), uint64_t(0xffffffffffffffffull)); - } - { - Fp x("0x10000000000000000"); - CYBOZU_TEST_EXCEPTION(x.getUint64(), cybozu::Exception); - x = -1; - CYBOZU_TEST_EXCEPTION(x.getUint64(), cybozu::Exception); - } - { - Fp x("0x10000000000000000"); - bool b = true; - CYBOZU_TEST_EQUAL(x.getUint64(&b), 0u); - CYBOZU_TEST_ASSERT(!b); - } -} - -void getInt64Test() -{ - const int64_t tbl[] = { - 0, 1, 123, 0xffffffff, int64_t(0x7fffffffffffffffull), - -1, -2, -12345678, int64_t(-9223372036854775808ull)/*-int64_t(1) << 63*/, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - int64_t a = tbl[i]; - Fp x(a); - CYBOZU_TEST_EQUAL(x.isNegative(), a < 0); - int64_t b = x.getInt64(); - CYBOZU_TEST_EQUAL(a, b); - } - { - Fp x("0x8000000000000000"); - CYBOZU_TEST_EXCEPTION(x.getInt64(), cybozu::Exception); - } - { - Fp x("0x8000000000000000"); - bool b = true; - CYBOZU_TEST_EQUAL(x.getInt64(&b), 0u); - CYBOZU_TEST_ASSERT(!b); - } -} - -void divBy2Test() -{ - const int tbl[] = { -4, -3, -2, -1, 0, 1, 2, 3 }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Fp x(tbl[i]), y; - Fp::divBy2(y, x); - y *= 2; - CYBOZU_TEST_EQUAL(y, x); - y = x; - Fp::divBy2(y, y); - y *= 2; - CYBOZU_TEST_EQUAL(y, x); - } -} - -void getStrTest() -{ - Fp x(0); - std::string str; - str = x.getStr(); - CYBOZU_TEST_EQUAL(str, "0"); - str = x.getStr(mcl::IoBinPrefix); - CYBOZU_TEST_EQUAL(str, "0b0"); - str = x.getStr(mcl::IoBin); - CYBOZU_TEST_EQUAL(str, "0"); - str = x.getStr(mcl::IoHexPrefix); - CYBOZU_TEST_EQUAL(str, "0x0"); - str = x.getStr(mcl::IoHex); - CYBOZU_TEST_EQUAL(str, "0"); - - x = 123; - str = x.getStr(); - CYBOZU_TEST_EQUAL(str, "123"); - str = x.getStr(mcl::IoBinPrefix); - CYBOZU_TEST_EQUAL(str, "0b1111011"); - str = x.getStr(mcl::IoBin); - CYBOZU_TEST_EQUAL(str, "1111011"); - str = x.getStr(mcl::IoHexPrefix); - CYBOZU_TEST_EQUAL(str, "0x7b"); - str = x.getStr(mcl::IoHex); - CYBOZU_TEST_EQUAL(str, "7b"); - - { - std::ostringstream os; - os << x; - CYBOZU_TEST_EQUAL(os.str(), "123"); - } - { - std::ostringstream os; - os << std::hex << std::showbase << x; - CYBOZU_TEST_EQUAL(os.str(), "0x7b"); - } - { - std::ostringstream os; - os << std::hex << x; - CYBOZU_TEST_EQUAL(os.str(), "7b"); - } - const char *tbl[] = { - "0x0", - "0x5", - "0x123", - "0x123456789012345679adbc", - "0xffffffff26f2fc170f69466a74defd8d", - "0x100000000000000000000000000000033", - "0x11ee12312312940000000000000000000000000002342343" - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const char *s = tbl[i]; - mpz_class mx(s); - if (mx >= Fp::getOp().mp) continue; - Fp y(s); - std::string xs, ys; - mcl::gmp::getStr(xs, mx, 16); - y.getStr(ys, 16); - CYBOZU_TEST_EQUAL(xs, ys); - } -} - -void setHashOfTest() -{ - const std::string msgTbl[] = { - "", "abc", "111111111111111111111111111111111111", - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(msgTbl); i++) { - size_t bitSize = Fp::getBitSize(); - std::string digest; - if (bitSize <= 256) { - digest = cybozu::Sha256().digest(msgTbl[i]); - } else { - digest = cybozu::Sha512().digest(msgTbl[i]); - } - Fp x, y; - x.setArrayMask(digest.c_str(), digest.size()); - y.setHashOf(msgTbl[i]); - CYBOZU_TEST_EQUAL(x, y); - } -} - -CYBOZU_TEST_AUTO(getArray) -{ - const struct { - const char *s; - uint32_t v[4]; - size_t vn; - } tbl[] = { - { "0", { 0, 0, 0, 0 }, 1 }, - { "1234", { 1234, 0, 0, 0 }, 1 }, - { "0xaabbccdd12345678", { 0x12345678, 0xaabbccdd, 0, 0 }, 2 }, - { "0x11112222333344445555666677778888", { 0x77778888, 0x55556666, 0x33334444, 0x11112222 }, 4 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - mpz_class x(tbl[i].s); - const size_t bufN = 8; - uint32_t buf[bufN]; - mcl::gmp::getArray(buf, bufN, x); - size_t n = mcl::fp::getNonZeroArraySize(buf, bufN); - CYBOZU_TEST_EQUAL(n, tbl[i].vn); - CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].v, n); - } -} - -void serializeTest() -{ - const char *tbl[] = { "0", "-1", "123" }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - char buf[1024]; - Fp x, y; - x.setStr(tbl[i]); - size_t n = x.serialize(buf, sizeof(buf)); - CYBOZU_TEST_EQUAL(n, Fp::getByteSize()); - y.deserialize(buf, n); - CYBOZU_TEST_EQUAL(x, y); - - n = x.serialize(buf, sizeof(buf), mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(n, Fp::getByteSize() * 2); - y.deserialize(buf, n, mcl::IoSerializeHexStr); - CYBOZU_TEST_EQUAL(x, y); - } -} - -void modpTest() -{ - const mpz_class& p = Fp::getOp().mp; - const mpz_class tbl[] = { - 0, - 1, - p - 1, - p, - p + 1, - p * 2 - 1, - p * 2, - p * 2 + 1, - p * (p - 1) - 1, - p * (p - 1), - p * (p - 1) + 1, - p * p - 1, - p * p, - p * p + 1, - (mpz_class(1) << Fp::getOp().N * mcl::fp::UnitBitSize * 2) - 1, - }; - mcl::Modp modp; - modp.init(p); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mpz_class& x = tbl[i]; - mpz_class r1, r2; - r1 = x % p; - modp.modp(r2, x); - CYBOZU_TEST_EQUAL(r1, r2); - } -} - -#include -#if (defined(MCL_USE_LLVM) || defined(MCL_USE_XBYAK)) && (MCL_MAX_BIT_SIZE >= 521) -CYBOZU_TEST_AUTO(mod_NIST_P521) -{ - const size_t len = 521; - const size_t N = len / mcl::fp::UnitBitSize; - const char *tbl[] = { - "0", - "0xffffffff", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "0x20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00111423424", - "0x11111111111111112222222222222222333333333333333344444444444444445555555555555555666666666666666677777777777777778888888888888888aaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbccccccccccccccccddddddddddddddddeeeeeeeeeeeeeeeeffffffffffffffff1234712341234123412341234123412341234", - "0x3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }; - const char *p = "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; - Fp::init(p, mcl::fp::FP_XBYAK); - const mpz_class mp(p); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - mpz_class mx(tbl[i]); - mcl::fp::Unit in[N * 2 + 1] = {}; - mcl::fp::Unit ok[N + 1]; - mcl::fp::Unit ex[N + 1]; - mcl::gmp::getArray(in, N * 2 + 1, mx); - mpz_class my = mx % mp; - mcl::gmp::getArray(ok, N + 1, my); -#ifdef MCL_USE_LLVM - mcl_fpDbl_mod_NIST_P521L(ex, in, Fp::getOp().p); - CYBOZU_TEST_EQUAL_ARRAY(ex, ok, N + 1); -#endif -#ifdef MCL_USE_XBYAK - const mcl::fp::Op& op = Fp::getOp(); - if (!op.isMont) { - op.fpDbl_mod(ex, in, op.p); - CYBOZU_TEST_EQUAL_ARRAY(ex, ok, N + 1); - } -#endif - } -} -#endif - -void sub(mcl::fp::Mode mode) -{ - printf("mode=%s\n", mcl::fp::ModeToStr(mode)); - const char *tbl[] = { - // N = 2 - "0x0000000000000001000000000000000d", - "0x7fffffffffffffffffffffffffffffff", - "0x8000000000000000000000000000001d", - "0xffffffffffffffffffffffffffffff61", - - // N = 3 - "0x000000000000000100000000000000000000000000000033", // min prime - "0x00000000fffffffffffffffffffffffffffffffeffffac73", - "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", - "0x000000010000000000000000000000000000000000000007", - "0x30000000000000000000000000000000000000000000002b", - "0x70000000000000000000000000000000000000000000001f", - "0x800000000000000000000000000000000000000000000005", - "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime - - // N = 4 - "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0x7523648240000001ba344d80000000086121000000000013a700000000000017", - "0x800000000000000000000000000000000000000000000000000000000000005f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime -#if MCL_MAX_BIT_SIZE >= 384 - - // N = 6 - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", -#endif - -#if MCL_MAX_BIT_SIZE >= 521 - // N = 9 - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const char *pStr = tbl[i]; - printf("prime=%s\n", pStr); - Fp::init(pStr, mode); - cstrTest(); - setStrTest(); - streamTest(); - ioModeTest(); - edgeTest(); - convTest(); - compareTest(); - moduloTest(pStr); - opeTest(); - mulUnitTest(); - powTest(); - powNegTest(); - powFpTest(); - powGmp(); - setArrayTest1(); - setArrayMaskTest1(); - setArrayModTest(); - getUint64Test(); - getInt64Test(); - divBy2Test(); - getStrTest(); - setHashOfTest(); - serializeTest(); - modpTest(); - } - anotherFpTest(mode); - setArrayTest2(mode); - setArrayMaskTest2(mode); -} - -std::string g_mode; - -CYBOZU_TEST_AUTO(main) -{ - if (g_mode.empty() || g_mode == "auto") { - sub(mcl::fp::FP_AUTO); - } - if (g_mode.empty() || g_mode == "gmp") { - sub(mcl::fp::FP_GMP); - } - if (g_mode.empty() || g_mode == "gmp_mont") { - sub(mcl::fp::FP_GMP_MONT); - } -#ifdef MCL_USE_LLVM - if (g_mode.empty() || g_mode == "llvm") { - sub(mcl::fp::FP_LLVM); - } - if (g_mode.empty() || g_mode == "llvm_mont") { - sub(mcl::fp::FP_LLVM_MONT); - } -#endif -#ifdef MCL_USE_XBYAK - if (g_mode.empty() || g_mode == "xbyak") { - sub(mcl::fp::FP_XBYAK); - } -#endif -} - -CYBOZU_TEST_AUTO(copyUnitToByteAsLE) -{ - using namespace mcl::fp; -#if MCL_SIZEOF_UNIT == 4 - const Unit src[] = { 0x12345678, 0xaabbccdd, 0xffeeddcc, 0x87654321 }; -#else - const Unit src[] = { uint64_t(0xaabbccdd12345678ull), uint64_t(0x87654321ffeeddcc) }; -#endif - const uint8_t ok[] = { 0x78, 0x56, 0x34, 0x12, 0xdd, 0xcc, 0xbb, 0xaa, 0xcc, 0xdd, 0xee, 0xff, 0x21, 0x43, 0x65, 0x87 }; - const size_t okN = CYBOZU_NUM_OF_ARRAY(ok); - for (size_t i = 0; i < okN; i++) { - uint8_t buf[okN] = {}; - copyUnitToByteAsLE(buf, src, i); - CYBOZU_TEST_EQUAL_ARRAY(ok, buf, i); - } - mcl::fp::Unit dst[2]; - for (size_t i = 1; i <= sizeof(dst); i++) { - memset(dst, 0xff, sizeof(dst)); - mcl::fp::copyByteToUnitAsLE(dst, ok, i); - if (i < sizeof(Unit)) { - CYBOZU_TEST_EQUAL(src[0] & ((uint64_t(1) << (i * 8)) - 1), dst[0]); - CYBOZU_TEST_EQUAL(dst[1], Unit(-1)); - continue; - } - CYBOZU_TEST_EQUAL(dst[0], src[0]); - if (i == sizeof(Unit)) { - CYBOZU_TEST_EQUAL(dst[1], Unit(-1)); - continue; - } - if (i < sizeof(dst)) { - CYBOZU_TEST_EQUAL(src[1] & ((uint64_t(1) << ((i - sizeof(Unit)) * 8)) - 1), dst[1]); - continue; - } - CYBOZU_TEST_EQUAL(src[1], dst[1]); - } - dst[0] = 1; - copyByteToUnitAsLE(dst, ok, 0); - CYBOZU_TEST_EQUAL(dst[0], 1u); -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - opt.appendOpt(&g_mode, "", "m", ": mode(auto/gmp/gmp_mont/llvm/llvm_mont/xbyak)"); - opt.appendHelp("h", ": show this message"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/fp_tower_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/fp_tower_test.cpp deleted file mode 100644 index a7123f7a5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/fp_tower_test.cpp +++ /dev/null @@ -1,477 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - #pragma warning(disable : 4456) -#endif - -#if MCL_MAX_BIT_SIZE >= 768 -typedef mcl::FpT Fp; -#else -typedef mcl::FpT Fp; -#endif -typedef mcl::Fp2T Fp2; -typedef mcl::FpDblT FpDbl; -typedef mcl::Fp6T Fp6; -typedef mcl::Fp12T Fp12; - -bool g_benchOnly = false; - -void testFp2() -{ - using namespace mcl; - puts(__FUNCTION__); -#if MCL_MAX_BIT_SIZE < 768 - const size_t FpSize = 48; - CYBOZU_TEST_EQUAL(sizeof(Fp), FpSize); - CYBOZU_TEST_EQUAL(sizeof(Fp2), FpSize * 2); - CYBOZU_TEST_EQUAL(sizeof(Fp6), FpSize * 6); - CYBOZU_TEST_EQUAL(sizeof(Fp12), FpSize * 12); -#endif - Fp2 x, y, z; - x.a = 1; - x.b = 2; - - { - std::stringstream os; - os << x; - os >> y; - CYBOZU_TEST_EQUAL(x, y); - } - y.a = 3; - y.b = 4; - /* - x = 1 + 2i - y = 3 + 4i - */ - Fp2::add(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp2(4, 6)); - Fp2::sub(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp2(-2, -2)); - Fp2::mul(z, x, y); - /* - (1 + 2i)(3 + 4i) = (3 - 8) + (4 + 6)i = -5 + 10i - */ - CYBOZU_TEST_EQUAL(z, Fp2(-5, 10)); - Fp2::neg(z, z); - CYBOZU_TEST_EQUAL(z, Fp2(5, -10)); - /* - xi = xi_a + i - (1 - 2i)(xi_a + i) = (xi_a + 2) + (1 - 2 xi_a)i - */ - z = Fp2(1, -2); - Fp2::mul_xi(z, z); - Fp a = Fp2::get_xi_a(); - CYBOZU_TEST_EQUAL(z, Fp2(a + 2, a * (-2) + 1)); - z = x * x; - Fp2::sqr(y, x); - CYBOZU_TEST_EQUAL(z, y); - x.a = -123456789; - x.b = 464652165165; - y = x * x; - Fp2::sqr(x, x); - CYBOZU_TEST_EQUAL(x, y); - { - std::ostringstream oss; - oss << x; - std::istringstream iss(oss.str()); - Fp2 w; - iss >> w; - CYBOZU_TEST_EQUAL(x, w); - } - y = 1; - for (int i = 0; i < 10; i++) { - Fp2::pow(z, x, i); - CYBOZU_TEST_EQUAL(z, y); - y *= x; - } - /* - (a + bi)^p = a + bi if p % 4 = 1 - (a + bi)^p = a - bi if p % 4 = 3 - */ - { - const mpz_class& mp = Fp::getOp().mp; - y = x; - Fp2::pow(z, y, mp); - if ((mp % 4) == 3) { - Fp::neg(z.b, z.b); - } - CYBOZU_TEST_EQUAL(z, y); - } - { - mpz_class t = Fp::getOp().mp; - t /= 2; - Fp x; - x.setMpz(t); - CYBOZU_TEST_EQUAL(x * 2, Fp(-1)); - t += 1; - x.setMpz(t); - CYBOZU_TEST_EQUAL(x * 2, 1); - } - { - Fp2 a(1, 1); - Fp2 b(1, -1); - Fp2 c(Fp2(2) / a); - CYBOZU_TEST_EQUAL(c, b); - CYBOZU_TEST_EQUAL(a * b, Fp2(2)); - CYBOZU_TEST_EQUAL(a * c, Fp2(2)); - } - y = x; - Fp2::inv(y, x); - y *= x; - CYBOZU_TEST_EQUAL(y, 1); - - // square root - for (int i = 0; i < 3; i++) { - x.a = i * i + i * 2; - x.b = i; - Fp2::sqr(y, x); - CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); - CYBOZU_TEST_EQUAL(z * z, y); - CYBOZU_TEST_ASSERT(Fp2::squareRoot(y, y)); - CYBOZU_TEST_EQUAL(z * z, y * y); - x.b = 0; - Fp2::sqr(y, x); - CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); - CYBOZU_TEST_EQUAL(z * z, y); - x.a = 0; - x.b = i * i + i * 3; - Fp2::sqr(y, x); - CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); - CYBOZU_TEST_EQUAL(z * z, y); - } -} - -void testFp6sqr(const Fp2& a, const Fp2& b, const Fp2& c, const Fp6& x) -{ - Fp2 t; - t = b * c * 2; - Fp2::mul_xi(t, t); - t += a * a; - CYBOZU_TEST_EQUAL(x.a, t); - t = c * c; - Fp2::mul_xi(t, t); - t += a * b * 2; - CYBOZU_TEST_EQUAL(x.b, t); - t = b * b + a * c * 2; - CYBOZU_TEST_EQUAL(x.c, t); -} - -void testFp6() -{ - using namespace mcl; - puts(__FUNCTION__); - Fp2 a(1, 2), b(3, 4), c(5, 6); - Fp6 x(a, b, c); - Fp6 y(Fp2(-1, 1), Fp2(4, -3), Fp2(-6, 2)); - Fp6 z, w; - { - std::stringstream ss; - ss << x; - ss >> z; - CYBOZU_TEST_EQUAL(x, z); - } - Fp6::add(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp6(Fp2(0, 3), Fp2(7, 1), Fp2(-1, 8))); - Fp6::sub(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp6(Fp2(2, 1), Fp2(-1, 7), Fp2(11, 4))); - Fp6::neg(z, x); - CYBOZU_TEST_EQUAL(z, Fp6(-a, -b, -c)); - Fp6::sqr(z, x); - Fp6::mul(w, x, x); - testFp6sqr(a, b, c, z); - testFp6sqr(a, b, c, w); - z = x; - Fp6::sqr(z, z); - Fp6::mul(w, x, x); - testFp6sqr(a, b, c, z); - testFp6sqr(a, b, c, w); - for (int i = 0; i < 10; i++) { - Fp6::inv(y, x); - Fp6::mul(z, y, x); - CYBOZU_TEST_EQUAL(z, 1); - x += y; - y = x; - Fp6::inv(y, y); - y *= x; - CYBOZU_TEST_EQUAL(y, 1); - } -} - -void testFp12() -{ - puts(__FUNCTION__); - Fp6 xa(Fp2(1, 2), Fp2(3, 4), Fp2(5, 6)); - Fp6 xb(Fp2(3, 1), Fp2(6, -1), Fp2(-2, 5)); - Fp12 x(xa, xb); - Fp6 ya(Fp2(2, 1), Fp2(5, 3), Fp2(4, 1)); - Fp6 yb(Fp2(1, -3), Fp2(2, -1), Fp2(-3, 1)); - Fp12 y(ya, yb); - Fp12 z; - Fp12::add(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp12(Fp6(Fp2(3, 3), Fp2(8, 7), Fp2(9, 7)), Fp6(Fp2(4, -2), Fp2(8, -2), Fp2(-5, 6)))); - Fp12::sub(z, x, y); - CYBOZU_TEST_EQUAL(z, Fp12(Fp6(Fp2(-1, 1), Fp2(-2, 1), Fp2(1, 5)), Fp6(Fp2(2, 4), Fp2(4, 0), Fp2(1, 4)))); - Fp12::neg(z, x); - CYBOZU_TEST_EQUAL(z, Fp12(-xa, -xb)); - - y.b.clear(); - z = y; - Fp12::sqr(z, z); - CYBOZU_TEST_EQUAL(z.a, y.a * y.a); - z = y * y; - CYBOZU_TEST_EQUAL(z.a, y.a * y.a); - CYBOZU_TEST_ASSERT(z.b.isZero()); - Fp12 w; - y = x; - z = x * x; - w = x; - Fp12::sqr(w, w); - CYBOZU_TEST_EQUAL(z, w); - y = x; - y *= y; - Fp12::sqr(x, x); - CYBOZU_TEST_EQUAL(x, y); - for (int i = 0; i < 10; i++) { - w = x; - Fp12::inv(w, w); - Fp12::mul(y, w, x); - CYBOZU_TEST_EQUAL(y, 1); - x += y; - } -} - -void testFpDbl() -{ - puts(__FUNCTION__); - { - std::string pstr; - Fp::getModulo(pstr); - mpz_class mp(pstr); - mp <<= Fp::getUnitSize() * mcl::fp::UnitBitSize; - mpz_class mp1 = mp - 1; - mcl::gmp::getStr(pstr, mp1); - const char *tbl[] = { - "0", "1", "123456", "123456789012345668909", pstr.c_str(), - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - mpz_class mx(tbl[i]); - FpDbl x; - x.setMpz(mx); - for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { - FpDbl y, z; - mpz_class mz, mo; - mpz_class my(tbl[j]); - y.setMpz(my); - FpDbl::add(z, x, y); - mcl::gmp::addMod(mo, mx, my, mp); - z.getMpz(mz); - CYBOZU_TEST_EQUAL(mz, mo); - mcl::gmp::subMod(mo, mx, my, mp); - FpDbl::sub(z, x, y); - z.getMpz(mz); - CYBOZU_TEST_EQUAL(mz, mo); - if (!Fp::isFullBit()) { - FpDbl::addPre(z, x, y); - mo = mx + my; - z.getMpz(mz); - CYBOZU_TEST_EQUAL(mz, mo); - if (mx >= my) { - FpDbl::subPre(z, x, y); - mo = mx - my; - z.getMpz(mz); - CYBOZU_TEST_EQUAL(mz, mo); - } - } - } - } - } - { - std::string pstr; - Fp::getModulo(pstr); - const mpz_class mp(pstr); - cybozu::XorShift rg; - for (int i = 0; i < 3; i++) { - Fp x, y, z; - mpz_class mx, my, mz, mo; - x.setRand(rg); - y.setRand(rg); - x.getMpz(mx); - y.getMpz(my); - FpDbl d; - FpDbl::mulPre(d, x, y); - d.getMpz(mz); - { - Fp tx, ty; - tx = x; - ty = y; - tx.toMont(); - ty.toMont(); - mpz_class mtx, mty; - tx.getMpz(mtx); - ty.getMpz(mty); - mo = mtx * mty; - } - CYBOZU_TEST_EQUAL(mz, mo); - - FpDbl::mod(z, d); - z.getMpz(mz); - mo = (mx * my) % mp; - CYBOZU_TEST_EQUAL(mz, mo); - CYBOZU_TEST_EQUAL(z, x * y); - - FpDbl::sqrPre(d, x); - d.getMpz(mz); - { - Fp tx; - tx = x; - tx.toMont(); - mpz_class mtx; - tx.getMpz(mtx); - mo = mtx * mtx; - } - CYBOZU_TEST_EQUAL(mz, mo); - - FpDbl::mod(z, d); - z.getMpz(mz); - mo = (mx * mx) % mp; - CYBOZU_TEST_EQUAL(mz, mo); - CYBOZU_TEST_EQUAL(z, x * x); - } - } -} - -void testIo() -{ - int modeTbl[] = { 0, 2, 2 | mcl::IoPrefix, 10, 16, 16 | mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(modeTbl); i++) { - int ioMode = modeTbl[i]; - Fp12 x; - for (int j = 0; j < 12; j++) { - x.getFp0()[j] = j * j; - } - std::string s = x.getStr(ioMode); - Fp12 y; - y.setStr(s, ioMode); - CYBOZU_TEST_EQUAL(x, y); - } -} - -void benchFp2() -{ - puts(__FUNCTION__); - Fp2 x, y; - x.a.setStr("4"); - x.b.setStr("464652165165"); - y = x * x; - double addT, subT, mulT, sqrT, invT, mul_xiT; - CYBOZU_BENCH_T(addT, Fp2::add, x, x, y); - CYBOZU_BENCH_T(subT, Fp2::sub, x, x, y); - CYBOZU_BENCH_T(mulT, Fp2::mul, x, x, y); - CYBOZU_BENCH_T(sqrT, Fp2::sqr, x, x); - CYBOZU_BENCH_T(invT, Fp2::inv, x, x); - CYBOZU_BENCH_T(mul_xiT, Fp2::mul_xi, x, x); -// CYBOZU_BENCH("Fp2::mul_Fp_0", Fp2::mul_Fp_0, x, x, Param::half); -// CYBOZU_BENCH("Fp2::mul_Fp_1", Fp2::mul_Fp_1, x, Param::half); -// CYBOZU_BENCH("Fp2::divBy2 ", Fp2::divBy2, x, x); -// CYBOZU_BENCH("Fp2::divBy4 ", Fp2::divBy4, x, x); - printf("add %8.2f|sub %8.2f|mul %8.2f|sqr %8.2f|inv %8.2f|mul_xi %8.2f\n", addT, subT, mulT, sqrT, invT, mul_xiT); -} - -void test(const char *p, mcl::fp::Mode mode) -{ - const int xi_a = 1; - Fp::init(xi_a, p, mode); - printf("mode=%s\n", mcl::fp::ModeToStr(mode)); - Fp2::init(); -#if 0 - if (Fp::getBitSize() > 256) { - printf("not support p=%s\n", p); - return; - } -#endif - if (g_benchOnly) { - benchFp2(); - return; - } - testFp2(); - testFpDbl(); - testFp6(); - testFp12(); - testIo(); -} - -void testAll() -{ - const char *tbl[] = { - // N = 2 - "0x0000000000000001000000000000000d", - "0x7fffffffffffffffffffffffffffffff", - "0x8000000000000000000000000000001d", - "0xffffffffffffffffffffffffffffff61", - - // N = 3 - "0x000000000000000100000000000000000000000000000033", // min prime - "0x00000000fffffffffffffffffffffffffffffffeffffac73", - "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", - "0x000000010000000000000000000000000000000000000007", - "0x30000000000000000000000000000000000000000000002b", - "0x70000000000000000000000000000000000000000000001f", - "0x800000000000000000000000000000000000000000000005", - "0xfffffffffffffffffffffffffffffffeffffffffffffffff", - "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime - - // N = 4 - "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0x7523648240000001ba344d80000000086121000000000013a700000000000017", - "0x800000000000000000000000000000000000000000000000000000000000005f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime -#if MCL_MAX_BIT_SIZE >= 384 - // N = 6 - "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", -#endif -#if MCL_MAX_BIT_SIZE >= 768 - "776259046150354467574489744231251277628443008558348305569526019013025476343188443165439204414323238975243865348565536603085790022057407195722143637520590569602227488010424952775132642815799222412631499596858234375446423426908029627", -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const char *p = tbl[i]; - printf("prime=%s %d\n", p, (int)(strlen(p) - 2) * 4); - test(p, mcl::fp::FP_GMP); -#ifdef MCL_USE_LLVM - test(p, mcl::fp::FP_LLVM); - test(p, mcl::fp::FP_LLVM_MONT); -#endif -#ifdef MCL_USE_XBYAK - test(p, mcl::fp::FP_XBYAK); -#endif - } -} - -CYBOZU_TEST_AUTO(testAll) -{ - testAll(); -} - -int main(int argc, char *argv[]) -{ - if (argc > 1 && strcmp(argv[1], "-bench") == 0) { - g_benchOnly = true; - } - if (g_benchOnly) { - testAll(); - return 0; - } else { - return cybozu::test::autoRun.run(argc, argv); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/fp_util_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/fp_util_test.cpp deleted file mode 100644 index e8a9f9aa5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/fp_util_test.cpp +++ /dev/null @@ -1,270 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#include -#include -#include -#include -#include - -CYBOZU_TEST_AUTO(arrayToHex) -{ - const struct { - uint32_t x[4]; - size_t n; - const char *str; - } tbl[] = { - { { 0, 0, 0, 0 }, 0, "0" }, - { { 0x123, 0, 0, 0 }, 1, "123" }, - { { 0x12345678, 0xaabbcc, 0, 0 }, 2, "aabbcc12345678" }, - { { 0, 0x12, 0x234a, 0 }, 3, "234a0000001200000000" }, - { { 1, 2, 0xffffffff, 0x123abc }, 4, "123abcffffffff0000000200000001" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - char buf[64]; - size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), tbl[i].x, tbl[i].n, false); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, tbl[i].str, n); - n = mcl::fp::arrayToHex(buf, sizeof(buf), tbl[i].x, tbl[i].n, true); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, (std::string("0x") + tbl[i].str).c_str(), n); - } -} - -CYBOZU_TEST_AUTO(arrayToBin) -{ - const struct { - uint32_t x[4]; - size_t n; - const char *str; - } tbl[] = { - { { 0, 0, 0, 0 }, 0, "0" }, - { { 0x123, 0, 0, 0 }, 1, "100100011" }, - { { 0x12345678, 0xaabbcc, 0, 0 }, 2, "10101010101110111100110000010010001101000101011001111000" }, - { { 0, 0x12, 0x234a, 0 }, 3, "100011010010100000000000000000000000000001001000000000000000000000000000000000" }, - { { 1, 2, 0xffffffff, 0x123abc }, 4, "100100011101010111100111111111111111111111111111111110000000000000000000000000000001000000000000000000000000000000001" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - char buf[512]; - size_t n = mcl::fp::arrayToBin(buf, sizeof(buf), tbl[i].x, tbl[i].n, false); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, tbl[i].str, n); - n = mcl::fp::arrayToBin(buf, sizeof(buf), tbl[i].x, tbl[i].n, true); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, (std::string("0b") + tbl[i].str).c_str(), n); - } -} -// CYBOZU_TEST_AUTO(verifyStr) // QQQ - -CYBOZU_TEST_AUTO(hexToArray) -{ - const struct { - const char *str; - uint64_t x[4]; - } tbl[] = { - { "0", { 0, 0, 0, 0 } }, - { "5", { 5, 0, 0, 0 } }, - { "123", { 0x123, 0, 0, 0 } }, - { "123456789012345679adbc", { uint64_t(0x789012345679adbcull), 0x123456, 0, 0 } }, - { "ffffffff26f2fc170f69466a74defd8d", { uint64_t(0x0f69466a74defd8dull), uint64_t(0xffffffff26f2fc17ull), 0, 0 } }, - { "100000000000000000000000000000033", { uint64_t(0x0000000000000033ull), 0, 1, 0 } }, - { "11ee12312312940000000000000000000000000002342343", { uint64_t(0x0000000002342343ull), uint64_t(0x0000000000000000ull), uint64_t(0x11ee123123129400ull), 0 } }, - { "1234567890abcdefABCDEF123456789aba32134723424242424", { uint64_t(0x2134723424242424ull), uint64_t(0xDEF123456789aba3ull), uint64_t(0x4567890abcdefABCull), 0x123 } }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const size_t xN = 4; - uint64_t x[xN]; - size_t n = mcl::fp::hexToArray(x, xN, tbl[i].str, strlen(tbl[i].str)); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL_ARRAY(x, tbl[i].x, n); - } -} - -CYBOZU_TEST_AUTO(compareArray) -{ - const struct { - uint32_t a[4]; - uint32_t b[4]; - size_t n; - int expect; - } tbl[] = { - { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, 0 }, - { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, 1 }, - { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, -1 }, - { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, 0 }, - { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, 1 }, - { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, -1 }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, 0 }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, -1 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - int e = mcl::fp::compareArray(tbl[i].a, tbl[i].b, tbl[i].n); - CYBOZU_TEST_EQUAL(e, tbl[i].expect); - } -} - -CYBOZU_TEST_AUTO(isLessArray) -{ - const struct { - uint32_t a[4]; - uint32_t b[4]; - size_t n; - bool expect; - } tbl[] = { - { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, false }, - { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, false }, - { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, - { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, false }, - { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, false }, - { { 3, 1, 2, 0 }, { 2, 2, 2, 0 }, 4, true }, - { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, true }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, false }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, true }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - bool e = mcl::fp::isLessArray(tbl[i].a, tbl[i].b, tbl[i].n); - CYBOZU_TEST_EQUAL(e, tbl[i].expect); - e = mcl::fp::isGreaterArray(tbl[i].b, tbl[i].a, tbl[i].n); - CYBOZU_TEST_EQUAL(e, tbl[i].expect); - } -} - -CYBOZU_TEST_AUTO(isLessOrEqualArray) -{ - const struct { - uint32_t a[4]; - uint32_t b[4]; - size_t n; - bool expect; - } tbl[] = { - { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, true }, - { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, false }, - { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, - { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, - { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, false }, - { { 3, 1, 2, 0 }, { 2, 2, 2, 0 }, 4, true }, - { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, true }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, true }, - { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, true }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - bool e = mcl::fp::isLessOrEqualArray(tbl[i].a, tbl[i].b, tbl[i].n); - CYBOZU_TEST_EQUAL(e, tbl[i].expect); - e = mcl::fp::isGreaterOrEqualArray(tbl[i].b, tbl[i].a, tbl[i].n); - CYBOZU_TEST_EQUAL(e, tbl[i].expect); - } -} - -struct Rand { - std::vector v; - const uint8_t *p; - size_t pos; - size_t endPos; - void read(bool *pb, void *x, size_t n) - { - if (pos + n > endPos) { - *pb = false; - return; - } - uint8_t *dst = (uint8_t*)x; - memcpy(dst, p + pos, n); - pos += n; - *pb = true; - } - void read(void *x, size_t n) - { - bool b; - read(&b, x, n); - if (!b) throw cybozu::Exception("Rand") << n; - } - uint32_t operator()() - { - char buf[4]; - read(buf, 4); - uint32_t v; - memcpy(&v, buf, 4); - return v; - } - Rand(const uint32_t *x, size_t n) - : p(0) - , pos(0) - { - for (size_t i = 0; i < n; i++) { - v.push_back(x[i]); - } - p = (uint8_t*)&v[0]; - endPos = v.size() * 4; - } -}; - -CYBOZU_TEST_AUTO(maskArray) -{ -#if 1 - const size_t n = 2; - uint32_t org[n] = { 0xabce1234, 0xffffef32 }; - for (size_t i = 0; i <= sizeof(org) * 8; i++) { - uint32_t x[n]; - memcpy(x, org, sizeof(org)); - mcl::fp::maskArray(x, n, i); - mpz_class t; - mcl::gmp::setArray(t, org, n); - t &= (mpz_class(1) << i) - 1; - uint32_t y[n]; - mcl::gmp::getArray(y, n, t); - CYBOZU_TEST_EQUAL_ARRAY(x, y, n); - } -#else - const size_t n = 4; - uint16_t org[n] = { 0x1234, 0xabce, 0xef32, 0xffff }; - for (size_t i = 0; i <= sizeof(org) * 8; i++) { - uint16_t x[n]; - memcpy(x, org, sizeof(org)); - mcl::fp::maskArray(x, n, i); - mpz_class t; - mcl::gmp::setArray(t, org, n); - t &= (mpz_class(1) << i) - 1; - uint16_t y[n]; - mcl::gmp::getArray(y, n, t); - CYBOZU_TEST_EQUAL_ARRAY(x, y, n); - } -#endif -} - -CYBOZU_TEST_AUTO(stream) -{ - const char *nulTbl[] = { "", " ", " \t\t\n\n " }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(nulTbl); i++) { - const char *p = nulTbl[i]; - cybozu::MemoryInputStream is(p, strlen(p)); - std::string w = "abc"; - mcl::fp::local::loadWord(w, is); - CYBOZU_TEST_ASSERT(w.empty()); - } - const struct { - const char *buf; - const char *expect[2]; - size_t n; - } tbl[] = { - { "\t\t \n\rabc\r\r\n def", { "abc", "def" }, 2 }, - { "123", { "123" }, 1 }, - { "123\n", { "123" }, 1 }, - { "123 456", { "123", "456" }, 2 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const char *buf = tbl[i].buf; - { - cybozu::MemoryInputStream is(buf, strlen(buf)); - for (size_t j = 0; j < tbl[i].n; j++) { - std::string w; - mcl::fp::local::loadWord(w, is); - CYBOZU_TEST_EQUAL(w, tbl[i].expect[j]); - } - } - { - std::istringstream is(buf); - for (size_t j = 0; j < tbl[i].n; j++) { - std::string w; - mcl::fp::local::loadWord(w, is); - CYBOZU_TEST_EQUAL(w, tbl[i].expect[j]); - } - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/glv_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/glv_test.cpp deleted file mode 100644 index a917f51f4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/glv_test.cpp +++ /dev/null @@ -1,209 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl; -#include -#include -#include - -#if 1 -#include -using namespace mcl::bn384; -#else -#include -using namespace mcl::bn256; -#endif - -#define PUT(x) std::cout << #x "=" << (x) << std::endl; - -/* - Skew Frobenius Map and Efficient Scalar Multiplication for Pairing-Based Cryptography - Y. Sakemi, Y. Nogami, K. Okeya, H. Kato, Y. Morikawa -*/ -struct oldGLV { - Fp w; // (-1 + sqrt(-3)) / 2 - mpz_class r; - mpz_class v; // 6z^2 + 4z + 1 > 0 - mpz_class c; // 2z + 1 - void init(const mpz_class& r, const mpz_class& z) - { - if (!Fp::squareRoot(w, -3)) throw cybozu::Exception("oldGLV:init"); - w = (w - 1) / 2; - this->r = r; - v = 1 + z * (4 + z * 6); - c = 2 * z + 1; - } - /* - (p^2 mod r) (x, y) = (wx, -y) - */ - void mulP2(G1& Q, const G1& P) const - { - Fp::mul(Q.x, P.x, w); - Fp::neg(Q.y, P.y); - Q.z = P.z; - } - /* - x = ap^2 + b mod r - assume(x < r); - */ - void split(mpz_class& a, mpz_class& b, const mpz_class& x) const - { - assert(0 < x && x < r); - /* - x = s1 * v + s2 // s1 = x / v, s2 = x % v - = s1 * c * p^2 + s2 // vP = cp^2 P - = (s3 * v + s4) * p^2 + s2 // s3 = (s1 * c) / v, s4 = (s1 * c) % v - = (s3 * c * p^2 + s4) * p^2 + s2 - = (s3 * c) * p^4 + s4 * p^2 + s2 // s5 = s3 * c, p^4 = p^2 - 1 - = s5 * (p^2 - 1) + s4 * p^2 + s2 - = (s4 + s5) * p^2 + (s2 - s5) - */ - mpz_class t; - mcl::gmp::divmod(a, t, x, v); // a = t / v, t = t % v - a *= c; - mcl::gmp::divmod(b, a, a, v); // b = a / v, a = a % v - b *= c; - a += b; - b = t - b; - } - template - void mul(G1& Q, const G1& P, const mpz_class& x) const - { - G1 A, B; - mpz_class a, b; - split(a, b, x); - mulP2(A, P); - G1::mul(A, A, a); - G1::mul(B, P, b); - G1::add(Q, A, B); - } -}; - -template -void compareLength(const GLV1& rhs, const GLV2& lhs) -{ - cybozu::XorShift rg; - int lt = 0; - int eq = 0; - int gt = 0; - mpz_class R0, R1, L0, L1, x; - Fr r; - for (int i = 1; i < 1000; i++) { - r.setRand(rg); - x = r.getMpz(); - rhs.split(R0, R1, x); - lhs.split(L0, L1, x); - - size_t R0n = mcl::gmp::getBitSize(R0); - size_t R1n = mcl::gmp::getBitSize(R1); - size_t L0n = mcl::gmp::getBitSize(L0); - size_t L1n = mcl::gmp::getBitSize(L1); - size_t Rn = std::max(R0n, R1n); - size_t Ln = std::max(L0n, L1n); - if (Rn == Ln) { - eq++; - } - if (Rn > Ln) { - gt++; - } - if (Rn < Ln) { - lt++; - } - } - printf("#of{<} = %d, #of{=} = %d #of{>} = %d\n", lt, eq, gt); -} - -void testGLV1() -{ - G1 P0, P1, P2; - mapToG1(P0, 1); - cybozu::XorShift rg; - - oldGLV oldGlv; - if (!BN::param.isBLS12) { - oldGlv.init(BN::param.r, BN::param.z); - } - - mcl::bn::local::GLV1 glv; - glv.init(BN::param.r, BN::param.z, BN::param.isBLS12); - if (!BN::param.isBLS12) { - compareLength(glv, oldGlv); - } - - for (int i = 1; i < 100; i++) { - mapToG1(P0, i); - Fr s; - s.setRand(rg); - mpz_class ss = s.getMpz(); - G1::mulGeneric(P1, P0, ss); - glv.mul(P2, P0, ss); - CYBOZU_TEST_EQUAL(P1, P2); - glv.mul(P2, P0, ss, true); - CYBOZU_TEST_EQUAL(P1, P2); - if (!BN::param.isBLS12) { - oldGlv.mul(P2, P0, ss); - CYBOZU_TEST_EQUAL(P1, P2); - } - } - for (int i = -100; i < 100; i++) { - mpz_class ss = i; - G1::mulGeneric(P1, P0, ss); - glv.mul(P2, P0, ss); - CYBOZU_TEST_EQUAL(P1, P2); - glv.mul(P2, P0, ss, true); - CYBOZU_TEST_EQUAL(P1, P2); - } - Fr s; - mapToG1(P0, 123); - CYBOZU_BENCH_C("Ec::mul", 100, P1 = P0; s.setRand(rg); G1::mulGeneric, P2, P1, s.getMpz()); - CYBOZU_BENCH_C("Ec::glv", 100, P1 = P0; s.setRand(rg); glv.mul, P2, P1, s.getMpz()); -} - -/* - lambda = 6 * z * z - mul (lambda * 2) = FrobeniusOnTwist * 2 -*/ -void testGLV2() -{ - G2 Q0, Q1, Q2; - mpz_class z = BN::param.z; - mpz_class r = BN::param.r; - mpz_class lambda = 6 * z * z; - mcl::bn::local::GLV2 glv2; - glv2.init(r, z, BN::param.isBLS12); - mpz_class n; - cybozu::XorShift rg; - mapToG2(Q0, 1); - for (int i = -10; i < 10; i++) { - n = i; - G2::mulGeneric(Q1, Q0, n); - glv2.mul(Q2, Q0, n); - CYBOZU_TEST_EQUAL(Q1, Q2); - } - for (int i = 1; i < 100; i++) { - mcl::gmp::getRand(n, glv2.rBitSize, rg); - n %= r; - n -= r/2; - mapToG2(Q0, i); - G2::mulGeneric(Q1, Q0, n); - glv2.mul(Q2, Q0, n); - CYBOZU_TEST_EQUAL(Q1, Q2); - } - Fr s; - mapToG2(Q0, 123); - CYBOZU_BENCH_C("G2::mul", 1000, Q2 = Q0; s.setRand(rg); G2::mulGeneric, Q2, Q1, s.getMpz()); - CYBOZU_BENCH_C("G2::glv", 1000, Q1 = Q0; s.setRand(rg); glv2.mul, Q2, Q1, s.getMpz()); -} - -CYBOZU_TEST_AUTO(glv) -{ - const mcl::CurveParam tbl[] = { - mcl::BN254, - mcl::BN381_1, - mcl::BN381_2, - mcl::BLS12_381, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mcl::CurveParam& cp = tbl[i]; - initPairing(cp); - testGLV1(); - testGLV2(); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/gmp_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/gmp_test.cpp deleted file mode 100644 index 1fe9d4eb6..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/gmp_test.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include -#include -#include - -CYBOZU_TEST_AUTO(testBit) -{ - const size_t maxBit = 100; - const size_t tbl[] = { - 3, 9, 5, 10, 50, maxBit - }; - mpz_class a; - std::vector b(maxBit + 1); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - a |= mpz_class(1) << tbl[i]; - b[tbl[i]] = 1; - } - for (size_t i = 0; i <= maxBit; i++) { - bool c1 = mcl::gmp::testBit(a, i); - bool c2 = b[i] != 0; - CYBOZU_TEST_EQUAL(c1, c2); - } -} - -CYBOZU_TEST_AUTO(getStr) -{ - const struct { - int x; - const char *dec; - const char *hex; - } tbl[] = { - { 0, "0", "0" }, - { 1, "1", "1" }, - { 10, "10", "a" }, - { 16, "16", "10" }, - { 123456789, "123456789", "75bcd15" }, - { -1, "-1", "-1" }, - { -10, "-10", "-a" }, - { -16, "-16", "-10" }, - { -100000000, "-100000000", "-5f5e100" }, - { -987654321, "-987654321", "-3ade68b1" }, - { -2147483647, "-2147483647", "-7fffffff" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - mpz_class x = tbl[i].x; - char buf[32]; - size_t n, len; - len = strlen(tbl[i].dec); - n = mcl::gmp::getStr(buf, len, x, 10); - CYBOZU_TEST_EQUAL(n, 0); - n = mcl::gmp::getStr(buf, len + 1, x, 10); - CYBOZU_TEST_EQUAL(n, len); - CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].dec, n); - - len = strlen(tbl[i].hex); - n = mcl::gmp::getStr(buf, len, x, 16); - CYBOZU_TEST_EQUAL(n, 0); - n = mcl::gmp::getStr(buf, len + 1, x, 16); - CYBOZU_TEST_EQUAL(n, len); - CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].hex, n); - } -} - -CYBOZU_TEST_AUTO(getRandPrime) -{ - for (int i = 0; i < 10; i++) { - mpz_class z; - mcl::gmp::getRandPrime(z, i * 10 + 3); - CYBOZU_TEST_ASSERT(mcl::gmp::isPrime(z)); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/low_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/low_test.cpp deleted file mode 100644 index f5e72a0b3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/low_test.cpp +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef MCL_USE_LLVM - #define MCL_USE_LLVM -#endif -#include -#include -#include -#include "../src/low_func.hpp" -#include - -cybozu::XorShift rg; - -extern "C" void add_test(mcl::fp::Unit *z, const mcl::fp::Unit *x, const mcl::fp::Unit *y); - -template -void bench() -{ - using namespace mcl::fp; - const size_t N = bit / UnitBitSize; - Unit x[N], y[N]; - for (int i = 0; i < 10; i++) { - Unit z[N]; - Unit w[N]; - rg.read(x, N); - rg.read(y, N); - AddPre::f(z, x, y); - AddPre::f(w, x, y); - CYBOZU_TEST_EQUAL_ARRAY(z, w, N); - - SubPre::f(z, x, y); - SubPre::f(w, x, y); - CYBOZU_TEST_EQUAL_ARRAY(z, w, N); - } - const std::string bitS = cybozu::itoa(bit); - std::string name; - name = "add" + bitS; CYBOZU_BENCH(name.c_str(), (AddPre::f), x, x, y); - name = "sub" + bitS; CYBOZU_BENCH(name.c_str(), (SubPre::f), x, x, y); -} - -CYBOZU_TEST_AUTO(addPre64) { bench<64>(); } -CYBOZU_TEST_AUTO(addPre128) { bench<128>(); } -CYBOZU_TEST_AUTO(addPre192) { bench<192>(); } -CYBOZU_TEST_AUTO(addPre256) { bench<256>(); } -CYBOZU_TEST_AUTO(addPre320) { bench<320>(); } -CYBOZU_TEST_AUTO(addPre384) { bench<384>(); } -CYBOZU_TEST_AUTO(addPre448) { bench<448>(); } -CYBOZU_TEST_AUTO(addPre512) { bench<512>(); } -//CYBOZU_TEST_AUTO(addPre96) { bench<96>(); } -//CYBOZU_TEST_AUTO(addPre160) { bench<160>(); } -//CYBOZU_TEST_AUTO(addPre224) { bench<224>(); } -#if 0 -CYBOZU_TEST_AUTO(addPre) -{ - using namespace mcl::fp; - const size_t bit = 128; - const size_t N = bit / UnitBitSize; - Unit x[N], y[N]; - for (int i = 0; i < 10; i++) { - Unit z[N]; - Unit w[N]; - rg.read(x, N); - rg.read(y, N); - low_addPre_G(z, x, y); - addPre(w, x, y); - CYBOZU_TEST_EQUAL_ARRAY(z, w, N); - add_test(w, x, y); - CYBOZU_TEST_EQUAL_ARRAY(z, w, N); - } - std::string name = "add" + cybozu::itoa(bit); - CYBOZU_BENCH(name.c_str(), addPre, x, x, y); - CYBOZU_BENCH("add", add_test, x, x, y); -} -#endif - diff --git a/vendor/github.com/dexon-foundation/mcl/test/mk32.sh b/vendor/github.com/dexon-foundation/mcl/test/mk32.sh deleted file mode 100644 index 4d5f60711..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/mk32.sh +++ /dev/null @@ -1 +0,0 @@ -g++ -O3 -march=native base_test.cpp ../src/x86.s -m32 -I ~/32/include/ -I ../include/ -I ../../xbyak/ -I ../../cybozulib/include ~/32/lib/libgmp.a ~/32/lib/libgmpxx.a -I ~/32/lib -DNDEBUG diff --git a/vendor/github.com/dexon-foundation/mcl/test/modp_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/modp_test.cpp deleted file mode 100644 index bf9da38bf..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/modp_test.cpp +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include -#include - -#define PUT(x) std::cout << #x << "=" << x << std::endl; - -CYBOZU_TEST_AUTO(modp) -{ - const int C = 1000000; - const char *pTbl[] = { - "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", - "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - }; - const char *xTbl[] = { - "0x12345678892082039482094823", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x10000000000000000000000000000000000000000000000000000000000000000", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }; - mcl::Modp modp; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { - const mpz_class p(pTbl[i]); - std::cout << std::hex << "p=" << p << std::endl; - modp.init(p); - for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(xTbl); j++) { - const mpz_class x(xTbl[j]); - std::cout << std::hex << "x=" << x << std::endl; - mpz_class r1, r2; - r1 = x % p; - modp.modp(r2, x); - CYBOZU_TEST_EQUAL(r1, r2); - CYBOZU_BENCH_C("x % p", C, mcl::gmp::mod, r1, x, p); - CYBOZU_BENCH_C("modp ", C, modp.modp, r2, x); - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/mont_fp_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/mont_fp_test.cpp deleted file mode 100644 index e41e77a53..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/mont_fp_test.cpp +++ /dev/null @@ -1,332 +0,0 @@ -#define PUT(x) std::cout << #x "=" << (x) << std::endl -#include -#include -#include -#include - -#if 0 -#include -using namespace mcl::bls12; -typedef Fr Zn; -#else -#include -struct ZnTag; -typedef mcl::FpT Zn; -typedef mcl::FpT<> Fp; -#endif - -struct Montgomery { - typedef mcl::fp::Unit Unit; - mpz_class p_; - mpz_class R_; // (1 << (pn_ * 64)) % p - mpz_class RR_; // (R * R) % p - Unit rp_; // rp * p = -1 mod M = 1 << 64 - size_t pn_; - Montgomery() {} - explicit Montgomery(const mpz_class& p) - { - p_ = p; - rp_ = mcl::fp::getMontgomeryCoeff(mcl::gmp::getUnit(p, 0)); - pn_ = mcl::gmp::getUnitSize(p); - R_ = 1; - R_ = (R_ << (pn_ * 64)) % p_; - RR_ = (R_ * R_) % p_; - } - - void toMont(mpz_class& x) const { mul(x, x, RR_); } - void fromMont(mpz_class& x) const { mul(x, x, 1); } - - void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) const - { -#if 0 - const size_t ySize = mcl::gmp::getUnitSize(y); - mpz_class c = x * mcl::gmp::getUnit(y, 0); - Unit q = mcl::gmp::getUnit(c, 0) * rp_; - c += p_ * q; - c >>= sizeof(Unit) * 8; - for (size_t i = 1; i < pn_; i++) { - if (i < ySize) { - c += x * mcl::gmp::getUnit(y, i); - } - Unit q = mcl::gmp::getUnit(c, 0) * rp_; - c += p_ * q; - c >>= sizeof(Unit) * 8; - } - if (c >= p_) { - c -= p_; - } - z = c; -#else - z = x * y; - for (size_t i = 0; i < pn_; i++) { - Unit q = mcl::gmp::getUnit(z, 0) * rp_; -#ifdef MCL_USE_VINT - z += p_ * q; -#else - mpz_class t; - mcl::gmp::set(t, q); - z += p_ * t; -#endif - z >>= sizeof(Unit) * 8; - } - if (z >= p_) { - z -= p_; - } -#endif - } - void mod(mpz_class& z, const mpz_class& xy) const - { - z = xy; - for (size_t i = 0; i < pn_; i++) { -//printf("i=%zd\n", i); -//std::cout << "z=" << std::hex << z << std::endl; - Unit q = mcl::gmp::getUnit(z, 0) * rp_; -//std::cout << "q=" << q << std::endl; - mpz_class t; - mcl::gmp::set(t, q); - z += p_ * t; - z >>= sizeof(Unit) * 8; -//std::cout << "z=" << std::hex << z << std::endl; - } - if (z >= p_) { - z -= p_; - } -//std::cout << "z=" << std::hex << z << std::endl; - } -}; - -template -mpz_class getMpz(const T& x) -{ - std::string str = x.getStr(); - mpz_class t; - mcl::gmp::setStr(t, str); - return t; -} - -template -std::string getStr(const T& x) -{ - std::ostringstream os; - os << x; - return os.str(); -} - -template -T castTo(const U& x) -{ - T t; - t.setStr(getStr(x)); - return t; -} - -template -void putRaw(const T& x) -{ - const uint64_t *p = x.getInnerValue(); - for (size_t i = 0, n = T::BlockSize; i < n; i++) { - printf("%016llx", p[n - 1 - i]); - } - printf("\n"); -} - -template -void put(const uint64_t (&x)[N]) -{ - for (size_t i = 0; i < N; i++) { - printf("%016llx", x[N - 1 - i]); - } - printf("\n"); -} - -struct Test { - typedef mcl::FpT<> Fp; - void run(const char *p) - { - Fp::init(p); - Fp x("-123456789"); - Fp y("-0x7ffffffff"); - CYBOZU_BENCH("add", operator+, x, x); - CYBOZU_BENCH("sub", operator-, x, y); - CYBOZU_BENCH("mul", operator*, x, x); - CYBOZU_BENCH("sqr", Fp::sqr, x, x); - CYBOZU_BENCH("div", y += x; operator/, x, y); - } -}; - -void customTest(const char *pStr, const char *xStr, const char *yStr) -{ -#if 0 - { - pStr = "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - Fp::init(pStr); - static uint64_t x[3] = { 1, 0, 0 }; - uint64_t z[3]; -std::cout<= 521 -CYBOZU_TEST_AUTO(customTest) -{ - const struct { - const char *p; - const char *x; - const char *y; - } tbl[] = { - { - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", -// "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", -// "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe" - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - customTest(tbl[i].p, tbl[i].x, tbl[i].y); - } -} -#endif - -CYBOZU_TEST_AUTO(test) -{ - Test test; - const char *tbl[] = { -#if 1 - // N = 2 - "0x0000000000000001000000000000000d", - "0x7fffffffffffffffffffffffffffffff", - "0x8000000000000000000000000000001d", - "0xffffffffffffffffffffffffffffff61", - - // N = 3 - "0x000000000000000100000000000000000000000000000033", // min prime - "0x00000000fffffffffffffffffffffffffffffffeffffac73", - "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", - "0x000000010000000000000000000000000000000000000007", - "0x30000000000000000000000000000000000000000000002b", - "0x70000000000000000000000000000000000000000000001f", - "0x800000000000000000000000000000000000000000000005", - "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime - - // N = 4 - "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0x7523648240000001ba344d80000000086121000000000013a700000000000017", - "0x800000000000000000000000000000000000000000000000000000000000005f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime -#endif - -#if MCL_MAX_BIT_SIZE >= 384 - // N = 6 - "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", -#endif - -#if MCL_MAX_BIT_SIZE >= 521 - // N = 9 - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - printf("prime=%s\n", tbl[i]); -#if 0 - mpz_class p(tbl[i]); - initPairing(mcl::BLS12_381); -#if 1 - cybozu::XorShift rg; - for (int i = 0; i < 1000; i++) { - Fp x, y, z; - FpDbl xy; - x.setByCSPRNG(rg); - y.setByCSPRNG(rg); - FpDbl::mulPre(xy, x, y); - FpDbl::mod(z, xy); - if (z != x * y) { - puts("ERR"); - std::cout << std::hex; - PUT(x); - PUT(y); - PUT(z); - PUT(x * y); - exit(1); - } - } -#else - Montgomery mont(p); - mpz_class x("19517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc", 16); - mpz_class y("139517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ff", 16); - std::cout << std::hex; - PUT(x); - PUT(y); - mpz_class z; - mont.mul(z, x, y); - PUT(z); - Fp x1, y1, z1; - puts("aaa"); - memcpy(&x1, mcl::gmp::getUnit(x), sizeof(x1)); - memcpy(&y1, mcl::gmp::getUnit(y), sizeof(y1)); - z1.clear(); - x1.dump(); - y1.dump(); - Fp::mul(z1, x1, y1); - z1.dump(); -#endif - exit(1); -#endif - test.run(tbl[i]); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/paillier_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/paillier_test.cpp deleted file mode 100644 index 31d2b26fc..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/paillier_test.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include -#include - -CYBOZU_TEST_AUTO(paillier) -{ - using namespace mcl::paillier; - SecretKey sec; - sec.init(2048); - PublicKey pub; - sec.getPublicKey(pub); - mpz_class m1("12342340928409"), m2("23049820498204"); - mpz_class c1, c2, c3; - pub.enc(c1, m1); - pub.enc(c2, m2); - std::cout << std::hex << "c1=" << c1 << "\nc2=" << c2 << std::endl; - pub.add(c3, c1, c2); - mpz_class d1, d2, d3; - sec.dec(d1, c1); - sec.dec(d2, c2); - sec.dec(d3, c3); - CYBOZU_TEST_EQUAL(m1, d1); - CYBOZU_TEST_EQUAL(m2, d2); - CYBOZU_TEST_EQUAL(m1 + m2, d3); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/proj/bn_test/bn_test.vcxproj b/vendor/github.com/dexon-foundation/mcl/test/proj/bn_test/bn_test.vcxproj deleted file mode 100644 index 936e075aa..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/proj/bn_test/bn_test.vcxproj +++ /dev/null @@ -1,88 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96} - Win32Proj - bn_test - - - - Application - true - v120 - MultiByte - - - Application - false - v120 - true - MultiByte - - - - - - - - - - - - - - - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/test/proj/ec_test/ec_test.vcxproj b/vendor/github.com/dexon-foundation/mcl/test/proj/ec_test/ec_test.vcxproj deleted file mode 100644 index 4bdfda2cb..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/proj/ec_test/ec_test.vcxproj +++ /dev/null @@ -1,88 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {46B6E88E-739A-406B-9F68-BC46C5950FA3} - Win32Proj - ec_test - - - - Application - true - v120 - MultiByte - - - Application - false - v120 - true - MultiByte - - - - - - - - - - - - - - - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - diff --git a/vendor/github.com/dexon-foundation/mcl/test/proj/fp_test/fp_test.vcxproj b/vendor/github.com/dexon-foundation/mcl/test/proj/fp_test/fp_test.vcxproj deleted file mode 100644 index f705982bf..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/proj/fp_test/fp_test.vcxproj +++ /dev/null @@ -1,88 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {51266DE6-B57B-4AE3-B85C-282F170E1728} - Win32Proj - fp_test - - - - Application - true - v120 - MultiByte - - - Application - false - v120 - true - MultiByte - - - - - - - - - - - - - - - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj b/vendor/github.com/dexon-foundation/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj deleted file mode 100644 index d5720678f..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj +++ /dev/null @@ -1,88 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {733B6250-D249-4A99-B2A6-C8FAF6A90E97} - Win32Proj - fp_tower_test - - - - Application - true - v120 - MultiByte - - - Application - false - v120 - true - MultiByte - - - - - - - - - - - - - - - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/test/she_c256_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/she_c256_test.cpp deleted file mode 100644 index 3e458b623..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/she_c256_test.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include "she_c_test.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/test/she_c384_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/she_c384_test.cpp deleted file mode 100644 index 5c7bd9882..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/she_c384_test.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#include "she_c_test.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/test/she_c_test.hpp b/vendor/github.com/dexon-foundation/mcl/test/she_c_test.hpp deleted file mode 100644 index 8287c0e0a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/she_c_test.hpp +++ /dev/null @@ -1,535 +0,0 @@ -#include -#define CYBOZU_TEST_DISABLE_AUTO_RUN -#include -#include -#include - -const size_t hashSize = 1 << 10; -const size_t tryNum = 1024; - -CYBOZU_TEST_AUTO(init) -{ - int curve; -#if MCLBN_FP_UNIT_SIZE == 4 - curve = MCL_BN254; -#elif MCLBN_FP_UNIT_SIZE == 6 -// curve = MCL_BN381_1; - curve = MCL_BLS12_381; -#elif MCLBN_FP_UNIT_SIZE == 8 - curve = MCL_BN462; -#endif - int ret; - ret = sheInit(curve, MCLBN_COMPILED_TIME_VAR); - CYBOZU_TEST_EQUAL(ret, 0); - ret = sheSetRangeForDLP(hashSize); - CYBOZU_TEST_EQUAL(ret, 0); -} - -CYBOZU_TEST_AUTO(encDec) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - int64_t m = 123; - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheCipherTextGT ct; - sheEncG1(&c1, &pub, m); - sheEncG2(&c2, &pub, m); - sheEncGT(&ct, &pub, m); - - int64_t dec; - CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecG1ViaGT(&dec, &sec, &c1), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecG2ViaGT(&dec, &sec, &c2), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, m); - - for (int m = -3; m < 3; m++) { - sheEncG1(&c1, &pub, m); - CYBOZU_TEST_EQUAL(sheIsZeroG1(&sec, &c1), m == 0); - sheEncG2(&c2, &pub, m); - CYBOZU_TEST_EQUAL(sheIsZeroG2(&sec, &c2), m == 0); - sheEncGT(&ct, &pub, m); - CYBOZU_TEST_EQUAL(sheIsZeroGT(&sec, &ct), m == 0); - } -} - -CYBOZU_TEST_AUTO(addMul) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - int64_t m1 = 12; - int64_t m2 = -9; - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheCipherTextGT ct; - sheEncG1(&c1, &pub, m1); - sheEncG2(&c2, &pub, m2); - sheMul(&ct, &c1, &c2); - - int64_t dec; - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, m1 * m2); -} - -CYBOZU_TEST_AUTO(allOp) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - int64_t m1 = 12; - int64_t m2 = -9; - int64_t m3 = 12; - int64_t m4 = -9; - int64_t dec; - sheCipherTextG1 c11, c12; - sheCipherTextG2 c21, c22; - sheCipherTextGT ct; - - sheEncG1(&c11, &pub, m1); - sheNegG1(&c12, &c11); - CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c12), 0); - CYBOZU_TEST_EQUAL(dec, -m1); - - sheEncG1(&c12, &pub, m2); - sheSubG1(&c11, &c11, &c12); // m1 - m2 - sheMulG1(&c11, &c11, 4); // 4 * (m1 - m2) - - sheEncG2(&c21, &pub, m3); - sheNegG2(&c22, &c21); - CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c22), 0); - CYBOZU_TEST_EQUAL(dec, -m3); - sheEncG2(&c22, &pub, m4); - sheSubG2(&c21, &c21, &c22); // m3 - m4 - sheMulG2(&c21, &c21, -5); // -5 * (m3 - m4) - sheMul(&ct, &c11, &c21); // -20 * (m1 - m2) * (m3 - m4) - sheAddGT(&ct, &ct, &ct); // -40 * (m1 - m2) * (m3 - m4) - sheMulGT(&ct, &ct, -4); // 160 * (m1 - m2) * (m3 - m4) - - int64_t t = 160 * (m1 - m2) * (m3 - m4); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, t); - - sheCipherTextGT ct2; - sheNegGT(&ct2, &ct); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct2), 0); - CYBOZU_TEST_EQUAL(dec, -t); -} - -CYBOZU_TEST_AUTO(rerand) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - int64_t m1 = 12; - int64_t m2 = -9; - int64_t m3 = 12; - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheCipherTextGT ct1, ct2; - sheEncG1(&c1, &pub, m1); - sheReRandG1(&c1, &pub); - - sheEncG2(&c2, &pub, m2); - sheReRandG2(&c2, &pub); - - sheEncGT(&ct1, &pub, m3); - sheReRandGT(&ct1, &pub); - - sheMul(&ct2, &c1, &c2); - sheReRandGT(&ct2, &pub); - sheAddGT(&ct1, &ct1, &ct2); - - int64_t dec; - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct1), 0); - CYBOZU_TEST_EQUAL(dec, m1 * m2 + m3); -} - -CYBOZU_TEST_AUTO(serialize) -{ - sheSecretKey sec1, sec2; - sheSecretKeySetByCSPRNG(&sec1); - shePublicKey pub1, pub2; - sheGetPublicKey(&pub1, &sec1); - - char buf1[4096], buf2[4096]; - size_t n1, n2; - size_t r, size; - const size_t sizeofFr = mclBn_getFrByteSize(); - const size_t sizeofFp = mclBn_getG1ByteSize(); - - size = sizeofFr * 2; - n1 = sheSecretKeySerialize(buf1, sizeof(buf1), &sec1); - CYBOZU_TEST_EQUAL(n1, size); - r = sheSecretKeyDeserialize(&sec2, buf1, n1); - CYBOZU_TEST_EQUAL(r, n1); - n2 = sheSecretKeySerialize(buf2, sizeof(buf2), &sec2); - CYBOZU_TEST_EQUAL(n2, size); - CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); - - size = sizeofFp * 3; - n1 = shePublicKeySerialize(buf1, sizeof(buf1), &pub1); - CYBOZU_TEST_EQUAL(n1, size); - r = shePublicKeyDeserialize(&pub2, buf1, n1); - CYBOZU_TEST_EQUAL(r, n1); - n2 = shePublicKeySerialize(buf2, sizeof(buf2), &pub2); - CYBOZU_TEST_EQUAL(n2, size); - CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); - - int m = 123; - sheCipherTextG1 c11, c12; - sheCipherTextG2 c21, c22; - sheCipherTextGT ct1, ct2; - sheEncG1(&c11, &pub2, m); - sheEncG2(&c21, &pub2, m); - sheEncGT(&ct1, &pub2, m); - - size = sizeofFp * 2; - n1 = sheCipherTextG1Serialize(buf1, sizeof(buf1), &c11); - CYBOZU_TEST_EQUAL(n1, size); - r = sheCipherTextG1Deserialize(&c12, buf1, n1); - CYBOZU_TEST_EQUAL(r, n1); - n2 = sheCipherTextG1Serialize(buf2, sizeof(buf2), &c12); - CYBOZU_TEST_EQUAL(n2, size); - CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); - - size = sizeofFp * 4; - n1 = sheCipherTextG2Serialize(buf1, sizeof(buf1), &c21); - CYBOZU_TEST_EQUAL(n1, size); - r = sheCipherTextG2Deserialize(&c22, buf1, n1); - CYBOZU_TEST_EQUAL(r, n1); - n2 = sheCipherTextG2Serialize(buf2, sizeof(buf2), &c22); - CYBOZU_TEST_EQUAL(n2, size); - CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); - - size = sizeofFp * 12 * 4; - n1 = sheCipherTextGTSerialize(buf1, sizeof(buf1), &ct1); - CYBOZU_TEST_EQUAL(n1, size); - r = sheCipherTextGTDeserialize(&ct2, buf1, n1); - CYBOZU_TEST_EQUAL(r, n1); - n2 = sheCipherTextGTSerialize(buf2, sizeof(buf2), &ct2); - CYBOZU_TEST_EQUAL(n2, size); - CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); -} - -CYBOZU_TEST_AUTO(convert) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - sheCipherTextGT ct; - const int64_t m = 123; - int64_t dec; - sheCipherTextG1 c1; - sheEncG1(&c1, &pub, m); - CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); - CYBOZU_TEST_EQUAL(dec, 123); - sheConvertG1(&ct, &pub, &c1); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, 123); - - sheCipherTextG2 c2; - sheEncG2(&c2, &pub, m); - CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); - CYBOZU_TEST_EQUAL(dec, 123); - sheConvertG2(&ct, &pub, &c2); - dec = 0; - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, 123); -} - -CYBOZU_TEST_AUTO(precomputed) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); - const int64_t m = 152; - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheCipherTextGT ct; - int64_t dec = 0; - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncG1(&c1, ppub, m), 0); - CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncG2(&c2, ppub, m), 0); - CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); - CYBOZU_TEST_EQUAL(dec, m); - dec = 0; - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncGT(&ct, ppub, m), 0); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, m); - - shePrecomputedPublicKeyDestroy(ppub); -} - -template -void ZkpBinTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, decFunc dec, verifyFunc verify) -{ - CT c; - sheZkpBin zkp; - for (int m = 0; m < 2; m++) { - CYBOZU_TEST_EQUAL(encWithZkp(&c, &zkp, pub, m), 0); - mclInt mDec; - CYBOZU_TEST_EQUAL(dec(&mDec, sec, &c), 0); - CYBOZU_TEST_EQUAL(mDec, m); - CYBOZU_TEST_EQUAL(verify(pub, &c, &zkp), 1); - { - char buf[4096]; - size_t n = sheZkpBinSerialize(buf, sizeof(buf), &zkp); - CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); - sheZkpBin zkp2; - size_t r = sheZkpBinDeserialize(&zkp2, buf, n); - CYBOZU_TEST_EQUAL(r, n); - CYBOZU_TEST_EQUAL(r, n); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); - } - } - zkp.d[0].d[0]++; - CYBOZU_TEST_EQUAL(verify(pub, &c, &zkp), 0); - } - CYBOZU_TEST_ASSERT(encWithZkp(&c, &zkp, pub, 2) != 0); -} - -CYBOZU_TEST_AUTO(ZkpBin) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - ZkpBinTest(&sec, &pub, sheEncWithZkpBinG1, sheDecG1, sheVerifyZkpBinG1); - ZkpBinTest(&sec, &pub, sheEncWithZkpBinG2, sheDecG2, sheVerifyZkpBinG2); - - shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); - - ZkpBinTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinG1, sheDecG1, shePrecomputedPublicKeyVerifyZkpBinG1); - ZkpBinTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinG2, sheDecG2, shePrecomputedPublicKeyVerifyZkpBinG2); - - shePrecomputedPublicKeyDestroy(ppub); -} - -template -void ZkpBinEqTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, verifyFunc verify) -{ - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheZkpBinEq zkp; - for (int m = 0; m < 2; m++) { - CYBOZU_TEST_EQUAL(encWithZkp(&c1, &c2, &zkp, pub, m), 0); - mclInt mDec = -1; - CYBOZU_TEST_EQUAL(sheDecG1(&mDec, sec, &c1), 0); - CYBOZU_TEST_EQUAL(mDec, m); - mDec = -1; - CYBOZU_TEST_EQUAL(sheDecG2(&mDec, sec, &c2), 0); - CYBOZU_TEST_EQUAL(mDec, m); - CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 1); - { - char buf[2048]; - size_t n = sheZkpBinEqSerialize(buf, sizeof(buf), &zkp); - CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); - sheZkpBinEq zkp2; - size_t r = sheZkpBinEqDeserialize(&zkp2, buf, n); - CYBOZU_TEST_EQUAL(r, n); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); - } - } - zkp.d[0].d[0]++; - CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 0); - } - CYBOZU_TEST_ASSERT(encWithZkp(&c1, &c2, &zkp, pub, 2) != 0); -} - -CYBOZU_TEST_AUTO(ZkpBinEq) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - ZkpBinEqTest(&sec, &pub, sheEncWithZkpBinEq, sheVerifyZkpBinEq); - - shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); - - ZkpBinEqTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinEq, shePrecomputedPublicKeyVerifyZkpBinEq); - - shePrecomputedPublicKeyDestroy(ppub); -} - -template -void ZkpEqTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, verifyFunc verify) -{ - sheCipherTextG1 c1; - sheCipherTextG2 c2; - sheZkpEq zkp; - for (int m = -5; m < 5; m++) { - CYBOZU_TEST_EQUAL(encWithZkp(&c1, &c2, &zkp, pub, m), 0); - mclInt mDec = -1; - CYBOZU_TEST_EQUAL(sheDecG1(&mDec, sec, &c1), 0); - CYBOZU_TEST_EQUAL(mDec, m); - mDec = -1; - CYBOZU_TEST_EQUAL(sheDecG2(&mDec, sec, &c2), 0); - CYBOZU_TEST_EQUAL(mDec, m); - CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 1); - { - char buf[2048]; - size_t n = sheZkpEqSerialize(buf, sizeof(buf), &zkp); - CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); - sheZkpEq zkp2; - size_t r = sheZkpEqDeserialize(&zkp2, buf, n); - CYBOZU_TEST_EQUAL(r, n); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { - CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); - } - } - zkp.d[0].d[0]++; - CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 0); - } -} - -CYBOZU_TEST_AUTO(ZkpEq) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - - ZkpEqTest(&sec, &pub, sheEncWithZkpEq, sheVerifyZkpEq); - - shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); - CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); - - ZkpEqTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpEq, shePrecomputedPublicKeyVerifyZkpEq); - - shePrecomputedPublicKeyDestroy(ppub); -} - -CYBOZU_TEST_AUTO(finalExp) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - const int64_t m11 = 5; - const int64_t m12 = 7; - const int64_t m21 = -3; - const int64_t m22 = 9; - sheCipherTextG1 c11, c12; - sheCipherTextG2 c21, c22; - sheCipherTextGT ct1, ct2; - sheCipherTextGT ct; - sheEncG1(&c11, &pub, m11); - sheEncG1(&c12, &pub, m12); - sheEncG2(&c21, &pub, m21); - sheEncG2(&c22, &pub, m22); - - int64_t dec; - // sheMul = sheMulML + sheFinalExpGT - sheMul(&ct1, &c11, &c21); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct1), 0); - CYBOZU_TEST_EQUAL(dec, m11 * m21); - - sheMulML(&ct1, &c11, &c21); - sheFinalExpGT(&ct, &ct1); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, m11 * m21); - - sheMulML(&ct2, &c12, &c22); - sheFinalExpGT(&ct, &ct2); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, m12 * m22); - - /* - Mul(c11, c21) + Mul(c21, c22) - = finalExp(ML(c11, c21) + ML(c21, c22)) - */ - sheAddGT(&ct, &ct1, &ct2); - sheFinalExpGT(&ct, &ct); - CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); - CYBOZU_TEST_EQUAL(dec, (m11 * m21) + (m12 * m22)); -} - -int g_hashBitSize = 8; -std::string g_tableName; - -CYBOZU_TEST_AUTO(saveLoad) -{ - sheSecretKey sec; - sheSecretKeySetByCSPRNG(&sec); - shePublicKey pub; - sheGetPublicKey(&pub, &sec); - const size_t hashSize = 1 << g_hashBitSize; - const size_t byteSizePerEntry = 8; - sheSetRangeForGTDLP(hashSize); - std::string buf; - buf.resize(hashSize * byteSizePerEntry + 1024); - const size_t n1 = sheSaveTableForGTDLP(&buf[0], buf.size()); - CYBOZU_TEST_ASSERT(n1 > 0); - if (!g_tableName.empty()) { - printf("use table=%s\n", g_tableName.c_str()); - std::ofstream ofs(g_tableName.c_str(), std::ios::binary); - ofs.write(buf.c_str(), n1); - } - const int64_t m = hashSize - 1; - sheCipherTextGT ct; - CYBOZU_TEST_ASSERT(sheEncGT(&ct, &pub, m) == 0); - sheSetRangeForGTDLP(1); - sheSetTryNum(1); - int64_t dec = 0; - CYBOZU_TEST_ASSERT(sheDecGT(&dec, &sec, &ct) != 0); - if (!g_tableName.empty()) { - std::ifstream ifs(g_tableName.c_str(), std::ios::binary); - buf.clear(); - buf.resize(n1); - ifs.read(&buf[0], n1); - } - const size_t n2 = sheLoadTableForGTDLP(&buf[0], n1); - CYBOZU_TEST_ASSERT(n2 > 0); - CYBOZU_TEST_ASSERT(sheDecGT(&dec, &sec, &ct) == 0); - CYBOZU_TEST_EQUAL(dec, m); -} - -int main(int argc, char *argv[]) - try -{ - cybozu::Option opt; - opt.appendOpt(&g_hashBitSize, 8, "bit", ": hashBitSize"); - opt.appendOpt(&g_tableName, "", "f", ": table name"); - opt.appendHelp("h", ": show this message"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - return cybozu::test::autoRun.run(argc, argv); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/she_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/she_test.cpp deleted file mode 100644 index 9292c35f4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/she_test.cpp +++ /dev/null @@ -1,756 +0,0 @@ -#define PUT(x) std::cout << #x << "=" << (x) << std::endl; -#include -#include -#include -#include -#include -#include -#include // for secp192k1 - -using namespace mcl::she; - -SecretKey g_sec; - -CYBOZU_TEST_AUTO(log) -{ -#if MCLBN_FP_UNIT_SIZE == 4 - const mcl::CurveParam& cp = mcl::BN254; - puts("BN254"); -#elif MCLBN_FP_UNIT_SIZE == 6 - const mcl::CurveParam& cp = mcl::BN381_1; - puts("BN381_1"); -#elif MCLBN_FP_UNIT_SIZE == 8 - const mcl::CurveParam& cp = mcl::BN462; - puts("BN462"); -#endif - init(cp); - G1 P; - hashAndMapToG1(P, "abc"); - for (int i = -5; i < 5; i++) { - G1 iP; - G1::mul(iP, P, i); - CYBOZU_TEST_EQUAL(mcl::she::local::log(P, iP), i); - } -} - -//#define PAPER -#ifdef PAPER -double clk2msec(const cybozu::CpuClock& clk, int n) -{ - const double rate = (1 / 3.4e9) * 1.e3; // 3.4GHz - return clk.getClock() / (double)clk.getCount() / n * rate; -} - -CYBOZU_TEST_AUTO(bench2) -{ - puts("msec"); - setTryNum(1 << 16); - useDecG1ViaGT(true); - useDecG2ViaGT(true); -#if 0 - setRangeForDLP(1 << 21); -#else - { - const char *tblName = "../she-dlp-table/she-dlp-0-20-gt.bin"; - std::ifstream ifs(tblName, std::ios::binary); - getHashTableGT().load(ifs); - } -#endif - SecretKey sec; - sec.setByCSPRNG(); - PublicKey pub; - sec.getPublicKey(pub); - PrecomputedPublicKey ppub; - ppub.init(pub); - const int C = 500; - double t1, t2; - int64_t m = (1ll << 31) - 12345; - CipherTextG1 c1, d1; - CipherTextG2 c2, d2; - CipherTextGT ct, dt; - CYBOZU_BENCH_C("", C, ppub.enc, c1, m); - t1 = clk2msec(cybozu::bench::g_clk, C); - CYBOZU_TEST_EQUAL(sec.dec(c1), m); - - CYBOZU_BENCH_C("", C, ppub.enc, c2, m); - t2 = clk2msec(cybozu::bench::g_clk, C); - CYBOZU_TEST_EQUAL(sec.dec(c2), m); - printf("Enc G1 %.2e\n", t1); - printf("Enc G2 %.2e\n", t2); - printf("Enc L1(G1+G2) %.2e\n", t1 + t2); - - CYBOZU_BENCH_C("", C, ppub.enc, ct, m); - t1 = clk2msec(cybozu::bench::g_clk, C); - CYBOZU_TEST_EQUAL(sec.dec(ct), m); - printf("Enc L2 %.2e\n", t1); - - CYBOZU_BENCH_C("", C, sec.dec, c1); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("Dec L1 %.2e\n", t1); - - CYBOZU_BENCH_C("", C, sec.dec, ct); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("Dec L2 %.2e\n", t1); - - pub.enc(ct, 1234); - CYBOZU_BENCH_C("", C, sec.dec, ct); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("Dec L2(small) %.2e\n", t1); - - CYBOZU_BENCH_C("", C, add, d1, d1, c1); - t1 = clk2msec(cybozu::bench::g_clk, C); - - CYBOZU_BENCH_C("", C, add, d2, d2, c2); - t2 = clk2msec(cybozu::bench::g_clk, C); - printf("Add G1 %.2e\n", t1); - printf("Add G2 %.2e\n", t2); - printf("Add L1(G1+G2) %.2e\n", t1 + t2); - - CYBOZU_BENCH_C("", C, add, dt, dt, ct); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("Add L2 %.2e\n", t1); - - CYBOZU_BENCH_C("", C, mul, ct, c1, c2); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("Mul %.2e\n", t1); - - CYBOZU_BENCH_C("", C, ppub.reRand, c1); - t1 = clk2msec(cybozu::bench::g_clk, C); - CYBOZU_BENCH_C("", C, ppub.reRand, c2); - t2 = clk2msec(cybozu::bench::g_clk, C); - printf("ReRand G1 %.2e\n", t1); - printf("ReRand G2 %.2e\n", t2); - printf("ReRand L1(G1+G2) %.2e\n", t1 + t2); - - CYBOZU_BENCH_C("", C, ppub.reRand, ct); - t1 = clk2msec(cybozu::bench::g_clk, C); - printf("ReRand L2 %.2e\n", t1); -} -#endif - -template -void GAHashTableTest(int maxSize, int tryNum, const G& P, const HashTbl& hashTbl) -{ - for (int i = -maxSize; i <= maxSize; i++) { - G xP; - G::mul(xP, P, i); - CYBOZU_TEST_EQUAL(hashTbl.basicLog(xP), i); - } - for (int i = -maxSize * tryNum; i <= maxSize * tryNum; i++) { - G xP; - G::mul(xP, P, i); - CYBOZU_TEST_EQUAL(hashTbl.log(xP), i); - } -} - -template -void HashTableTest(const G& P) -{ - mcl::she::local::HashTable hashTbl, hashTbl2; - const int maxSize = 100; - const int tryNum = 3; - hashTbl.init(P, maxSize, tryNum); - GAHashTableTest(maxSize, tryNum, P, hashTbl); - std::stringstream ss; - hashTbl.save(ss); - hashTbl2.load(ss); - GAHashTableTest(maxSize, tryNum, P, hashTbl2); -} - -CYBOZU_TEST_AUTO(HashTable) -{ - G1 P; - hashAndMapToG1(P, "abc"); - G2 Q; - hashAndMapToG2(Q, "abc"); - HashTableTest(P); - HashTableTest(Q); -} - -template -void GTHashTableTest(int maxSize, int tryNum, const GT& g, const HashTbl& hashTbl) -{ - for (int i = -maxSize; i <= maxSize; i++) { - GT gx; - GT::pow(gx, g, i); - CYBOZU_TEST_EQUAL(hashTbl.basicLog(gx), i); - } - for (int i = -maxSize * tryNum; i <= maxSize * tryNum; i++) { - GT gx; - GT::pow(gx, g, i); - CYBOZU_TEST_EQUAL(hashTbl.log(gx), i); - } -} - -CYBOZU_TEST_AUTO(GTHashTable) -{ - mcl::she::local::HashTable hashTbl, hashTbl2; - GT g; - { - G1 P; - hashAndMapToG1(P, "abc"); - G2 Q; - hashAndMapToG2(Q, "abc"); - pairing(g, P, Q); - } - const int maxSize = 100; - const int tryNum = 3; - hashTbl.init(g, maxSize, tryNum); - GTHashTableTest(maxSize, tryNum, g, hashTbl); - std::stringstream ss; - hashTbl.save(ss); - hashTbl2.load(ss); - GTHashTableTest(maxSize, tryNum, g, hashTbl2); -} - -CYBOZU_TEST_AUTO(enc_dec) -{ - SecretKey& sec = g_sec; - sec.setByCSPRNG(); - setRangeForDLP(1024); - PublicKey pub; - sec.getPublicKey(pub); - CipherText c; - for (int i = -5; i < 5; i++) { - pub.enc(c, i); - CYBOZU_TEST_EQUAL(sec.dec(c), i); - pub.reRand(c); - CYBOZU_TEST_EQUAL(sec.dec(c), i); - } - PrecomputedPublicKey ppub; - ppub.init(pub); - CipherTextG1 c1; - CipherTextG2 c2; - CipherTextGT ct1, ct2; - for (int i = -5; i < 5; i++) { - pub.enc(ct1, i); - CYBOZU_TEST_EQUAL(sec.dec(ct1), i); - CYBOZU_TEST_EQUAL(sec.isZero(ct1), i == 0); - ppub.enc(ct2, i); - CYBOZU_TEST_EQUAL(sec.dec(ct2), i); - ppub.enc(c1, i); - CYBOZU_TEST_EQUAL(sec.dec(c1), i); - CYBOZU_TEST_EQUAL(sec.decViaGT(c1), i); - CYBOZU_TEST_EQUAL(sec.isZero(c1), i == 0); - ct1.clear(); - pub.convert(ct1, c1); - CYBOZU_TEST_EQUAL(sec.dec(ct1), i); - ppub.enc(c2, i); - CYBOZU_TEST_EQUAL(sec.dec(c2), i); - CYBOZU_TEST_EQUAL(sec.decViaGT(c2), i); - CYBOZU_TEST_EQUAL(sec.isZero(c2), i == 0); - ct1.clear(); - pub.convert(ct1, c2); - CYBOZU_TEST_EQUAL(sec.dec(ct1), i); - pub.enc(c, i); - CYBOZU_TEST_EQUAL(sec.isZero(c), i == 0); - } -} - -template -void ZkpBinTest(const SecretKey& sec, const PK& pub) -{ - CT c; - ZkpBin zkp; - for (int m = 0; m < 2; m++) { - pub.encWithZkpBin(c, zkp, m); - CYBOZU_TEST_EQUAL(sec.dec(c), m); - CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); - zkp.d_[0] += 1; - CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); - } - CYBOZU_TEST_EXCEPTION(pub.encWithZkpBin(c, zkp, 2), cybozu::Exception); -} -CYBOZU_TEST_AUTO(ZkpBin) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - ZkpBinTest(sec, pub); - ZkpBinTest(sec, pub); - - PrecomputedPublicKey ppub; - ppub.init(pub); - ZkpBinTest(sec, ppub); - ZkpBinTest(sec, ppub); -} - -template -void ZkpEqTest(const SecretKey& sec, const PubT& pub) -{ - CipherTextG1 c1; - CipherTextG2 c2; - ZkpEq zkp; - for (int m = -4; m < 4; m++) { - pub.encWithZkpEq(c1, c2, zkp, m); - CYBOZU_TEST_EQUAL(sec.dec(c1), m); - CYBOZU_TEST_EQUAL(sec.dec(c2), m); - CYBOZU_TEST_ASSERT(pub.verify(c1, c2, zkp)); - zkp.d_[0] += 1; - CYBOZU_TEST_ASSERT(!pub.verify(c1, c2, zkp)); - } -} - -CYBOZU_TEST_AUTO(ZkpEq) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - PrecomputedPublicKey ppub; - ppub.init(pub); - ZkpEqTest(sec, pub); - ZkpEqTest(sec, ppub); -} - -template -void ZkpBinEqTest(const SecretKey& sec, const PK& pub) -{ - CipherTextG1 c1; - CipherTextG2 c2; - ZkpBinEq zkp; - for (int m = 0; m < 2; m++) { - pub.encWithZkpBinEq(c1, c2, zkp, m); - CYBOZU_TEST_EQUAL(sec.dec(c1), m); - CYBOZU_TEST_EQUAL(sec.dec(c2), m); - CYBOZU_TEST_ASSERT(pub.verify(c1, c2, zkp)); - zkp.d_[0] += 1; - CYBOZU_TEST_ASSERT(!pub.verify(c1, c2, zkp)); - } - CYBOZU_TEST_EXCEPTION(pub.encWithZkpBinEq(c1, c2, zkp, 2), cybozu::Exception); -} - -CYBOZU_TEST_AUTO(ZkpBinEq) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - ZkpBinEqTest(sec, pub); - - PrecomputedPublicKey ppub; - ppub.init(pub); - ZkpBinEqTest(sec, ppub); -} - -CYBOZU_TEST_AUTO(add_sub_mul) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - for (int m1 = -5; m1 < 5; m1++) { - for (int m2 = -5; m2 < 5; m2++) { - CipherText c1, c2, c3; - pub.enc(c1, m1); - pub.enc(c2, m2); - add(c3, c1, c2); - CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); - - pub.reRand(c3); - CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); - - sub(c3, c1, c2); - CYBOZU_TEST_EQUAL(m1 - m2, sec.dec(c3)); - - mul(c3, c1, 5); - CYBOZU_TEST_EQUAL(m1 * 5, sec.dec(c3)); - mul(c3, c1, -123); - CYBOZU_TEST_EQUAL(m1 * -123, sec.dec(c3)); - - mul(c3, c1, c2); - CYBOZU_TEST_EQUAL(m1 * m2, sec.dec(c3)); - - pub.reRand(c3); - CYBOZU_TEST_EQUAL(m1 * m2, sec.dec(c3)); - - CipherText::mul(c3, c3, -25); - CYBOZU_TEST_EQUAL(m1 * m2 * -25, sec.dec(c3)); - - pub.enc(c1, m1, true); - CYBOZU_TEST_EQUAL(m1, sec.dec(c1)); - pub.enc(c2, m2, true); - add(c3, c1, c2); - CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); - } - } -} - -CYBOZU_TEST_AUTO(largeEnc) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - - Fr x; - x.setRand(); - CipherTextG1 c1, c2; - pub.enc(c1, x); - const int64_t m = 123; - pub.enc(c2, x + m); - sub(c1, c1, c2); - CYBOZU_TEST_EQUAL(sec.dec(c1), -m); - - pub.enc(c1, 0); - mul(c1, c1, x); - CYBOZU_TEST_ASSERT(sec.isZero(c1)); - pub.enc(c1, 1); - mul(c1, c1, x); - CYBOZU_TEST_ASSERT(!sec.isZero(c1)); -} - -CYBOZU_TEST_AUTO(add_mul_add_sub) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - int m[8] = { 1, -2, 3, 4, -5, 6, -7, 8 }; - CipherText c[8]; - for (int i = 0; i < 8; i++) { - pub.enc(c[i], m[i]); - CYBOZU_TEST_EQUAL(sec.dec(c[i]), m[i]); - CYBOZU_TEST_ASSERT(!c[i].isMultiplied()); - CipherText mc; - pub.convert(mc, c[i]); - CYBOZU_TEST_ASSERT(mc.isMultiplied()); - CYBOZU_TEST_EQUAL(sec.dec(mc), m[i]); - } - int ok1 = (m[0] + m[1]) * (m[2] + m[3]); - int ok2 = (m[4] + m[5]) * (m[6] + m[7]); - int ok = ok1 + ok2; - for (int i = 0; i < 4; i++) { - c[i * 2].add(c[i * 2 + 1]); - CYBOZU_TEST_EQUAL(sec.dec(c[i * 2]), m[i * 2] + m[i * 2 + 1]); - } - c[0].mul(c[2]); - CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok1); - c[4].mul(c[6]); - CYBOZU_TEST_EQUAL(sec.dec(c[4]), ok2); - c[0].add(c[4]); - CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok); - c[0].sub(c[4]); - CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok1); -} - -CYBOZU_TEST_AUTO(finalExp) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - const int64_t m11 = 5; - const int64_t m12 = 3; - const int64_t m21 = -2; - const int64_t m22 = 9; - CipherTextG1 c11, c12; - CipherTextG2 c21, c22; - CipherTextGT ct1, ct2, ct; - pub.enc(c11, m11); - pub.enc(c12, m12); - pub.enc(c21, m21); - pub.enc(c22, m22); - CipherTextGT::mulML(ct1, c11, c21); - CipherTextGT::finalExp(ct, ct1); - CYBOZU_TEST_EQUAL(sec.dec(ct), m11 * m21); - CipherTextGT::mulML(ct2, c12, c22); - CipherTextGT::finalExp(ct, ct2); - CYBOZU_TEST_EQUAL(sec.dec(ct), m12 * m22); - CipherTextGT::add(ct1, ct1, ct2); - CipherTextGT::finalExp(ct1, ct1); - CYBOZU_TEST_EQUAL(sec.dec(ct1), (m11 * m21) + (m12 * m22)); -} - -CYBOZU_TEST_AUTO(innerProduct) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - - cybozu::XorShift rg; - const size_t n = 1000; - std::vector v1, v2; - std::vector c1, c2; - v1.resize(n); - v2.resize(n); - c1.resize(n); - c2.resize(n); - int innerProduct = 0; - for (size_t i = 0; i < n; i++) { - v1[i] = rg() % 2; - v2[i] = rg() % 2; - innerProduct += v1[i] * v2[i]; - pub.enc(c1[i], v1[i]); - pub.enc(c2[i], v2[i]); - } - CipherText c, t; - CipherText::mul(c, c1[0], c2[0]); - for (size_t i = 1; i < n; i++) { - CipherText::mul(t, c1[i], c2[i]); - c.add(t); - } - CYBOZU_TEST_EQUAL(innerProduct, sec.dec(c)); -} - -template -T testIo(const T& x) -{ - std::stringstream ss; - ss << x; - T y; - ss >> y; - CYBOZU_TEST_EQUAL(x, y); - return y; -} - -CYBOZU_TEST_AUTO(io) -{ - setRangeForDLP(100); - int64_t m; - for (int i = 0; i < 2; i++) { - if (i == 1) { - Fp::setIoMode(mcl::IoSerialize); - G1::setIoMode(mcl::IoSerialize); - } - SecretKey sec; - sec.setByCSPRNG(); - testIo(sec); - PublicKey pub; - sec.getPublicKey(pub); - testIo(pub); - CipherTextG1 g1; - pub.enc(g1, 3); - m = sec.dec(testIo(g1)); - CYBOZU_TEST_EQUAL(m, 3); - CipherTextG2 g2; - pub.enc(g2, 5); - testIo(g2); - CipherTextA ca; - pub.enc(ca, -4); - m = sec.dec(testIo(ca)); - CYBOZU_TEST_EQUAL(m, -4); - CipherTextGT ct; - CipherTextGT::mul(ct, g1, g2); - m = sec.dec(testIo(ct)); - CYBOZU_TEST_EQUAL(m, 15); - } -} - -#ifndef PAPER -CYBOZU_TEST_AUTO(bench) -{ - const SecretKey& sec = g_sec; - PublicKey pub; - sec.getPublicKey(pub); - CipherText c1, c2, c3; - CYBOZU_BENCH("enc", pub.enc, c1, 5); - pub.enc(c2, 4); - CYBOZU_BENCH("add", c1.add, c2); - CYBOZU_BENCH("mul", CipherText::mul, c3, c1, c2); - pub.enc(c1, 5); - pub.enc(c2, 4); - c1.mul(c2); - CYBOZU_BENCH("dec", sec.dec, c1); - c2 = c1; - CYBOZU_BENCH("add after mul", c1.add, c2); -} -#endif - -CYBOZU_TEST_AUTO(saveHash) -{ - mcl::she::local::HashTable hashTbl1, hashTbl2; - hashTbl1.init(SHE::P_, 1234, 123); - std::stringstream ss; - hashTbl1.save(ss); - hashTbl2.load(ss); - CYBOZU_TEST_ASSERT(hashTbl1 == hashTbl2); -} - -static inline void putK(double t) { printf("%.2e\n", t * 1e-3); } - -template -void decBench(const char *msg, int C, const SecretKey& sec, const PublicKey& pub, int64_t (SecretKey::*dec)(const CT& c) const = &SecretKey::dec) -{ - int64_t begin = 1 << 20; - int64_t end = 1LL << 32; - while (begin < end) { - CT c; - int64_t x = begin - 1; - pub.enc(c, x); - printf("m=%08x ", (uint32_t)x); - CYBOZU_BENCH_C(msg, C, (sec.*dec), c); - CYBOZU_TEST_EQUAL((sec.*dec)(c), x); - begin *= 2; - } - int64_t mTbl[] = { -0x80000003ll, 0x80000000ll, 0x80000005ll }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(mTbl); i++) { - int64_t m = mTbl[i]; - CT c; - pub.enc(c, m); - CYBOZU_TEST_EQUAL((sec.*dec)(c), m); - } -} - -#ifndef PAPER -CYBOZU_TEST_AUTO(hashBench) -{ - SecretKey& sec = g_sec; - sec.setByCSPRNG(); - const int C = 500; - const size_t hashSize = 1u << 21; - - clock_t begin = clock(), end; - setRangeForG1DLP(hashSize); - end = clock(); - printf("init G1 DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); - begin = end; - setRangeForG2DLP(hashSize); - end = clock(); - printf("init G2 DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); - begin = end; - setRangeForGTDLP(hashSize); - end = clock(); - printf("init GT DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); - - PublicKey pub; - sec.getPublicKey(pub); - PrecomputedPublicKey ppub; - ppub.init(pub); - puts("Kclk"); - cybozu::bench::setPutCallback(putK); - decBench("decG1", C, sec, pub); - puts(""); - decBench("decG2", C, sec, pub); - puts(""); - decBench("decGT", C, sec, pub); - puts(""); - decBench("decG1ViaGT", C, sec, pub, &SecretKey::decViaGT); - puts(""); - decBench("decG2ViaGT", C, sec, pub, &SecretKey::decViaGT); - - G1 P, P2; - G2 Q, Q2; - GT e, e2; - mpz_class mr; - { - Fr r; - r.setRand(); - mr = r.getMpz(); - } - hashAndMapToG1(P, "abc"); - hashAndMapToG2(Q, "abc"); - pairing(e, P, Q); - P2.clear(); - Q2.clear(); - e2 = 1; - - printf("large m\n"); - CYBOZU_BENCH_C("G1::add ", C, G1::add, P2, P2, P); - CYBOZU_BENCH_C("G1::mul ", C, G1::mul, P, P, mr); - CYBOZU_BENCH_C("G2::add ", C, G2::add, Q2, Q2, Q); - CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Q, Q, mr); - CYBOZU_BENCH_C("GT::mul ", C, GT::mul, e2, e2, e); - CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e, e, mr); - CYBOZU_BENCH_C("G1window", C, SHE::PhashTbl_.mulByWindowMethod, P2, mr); - CYBOZU_BENCH_C("G2window", C, SHE::QhashTbl_.mulByWindowMethod, Q2, mr); - CYBOZU_BENCH_C("GTwindow", C, SHE::ePQhashTbl_.mulByWindowMethod, e, mr); -#if 1 - typedef mcl::GroupMtoA AG; - mcl::fp::WindowMethod wm; - wm.init(static_cast(e), Fr::getBitSize(), 10); - for (int i = 0; i < 100; i++) { - GT t1, t2; - GT::pow(t1, e, i); - wm.mul(static_cast(t2), i); - CYBOZU_TEST_EQUAL(t1, t2); - } -// CYBOZU_BENCH_C("GTwindow", C, wm.mul, static_cast(e), mr); -#endif - - CYBOZU_BENCH_C("miller ", C, millerLoop, e, P, Q); - CYBOZU_BENCH_C("finalExp", C, finalExp, e, e); - CYBOZU_BENCH_C("precomML", C, precomputedMillerLoop, e, P, SHE::Qcoeff_); - - CipherTextG1 c1; - CipherTextG2 c2; - CipherTextGT ct; - - int m = int(hashSize - 1); - printf("small m = %d\n", m); - CYBOZU_BENCH_C("G1::mul ", C, G1::mul, P, P, m); - CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Q, Q, m); - CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e, e, m); - CYBOZU_BENCH_C("G1window", C, SHE::PhashTbl_.mulByWindowMethod, P2, m); - CYBOZU_BENCH_C("G2window", C, SHE::QhashTbl_.mulByWindowMethod, Q2, m); - CYBOZU_BENCH_C("GTwindow", C, SHE::ePQhashTbl_.mulByWindowMethod, e, m); -// CYBOZU_BENCH_C("GTwindow", C, wm.mul, static_cast(e), m); - - CYBOZU_BENCH_C("encG1 ", C, pub.enc, c1, m); - CYBOZU_BENCH_C("encG2 ", C, pub.enc, c2, m); - CYBOZU_BENCH_C("encGT ", C, pub.enc, ct, m); - CYBOZU_BENCH_C("encG1pre", C, ppub.enc, c1, m); - CYBOZU_BENCH_C("encG2pre", C, ppub.enc, c2, m); - CYBOZU_BENCH_C("encGTpre", C, ppub.enc, ct, m); - - CYBOZU_BENCH_C("decG1 ", C, sec.dec, c1); - CYBOZU_BENCH_C("decG2 ", C, sec.dec, c2); - CYBOZU_BENCH_C("degGT ", C, sec.dec, ct); - - CYBOZU_BENCH_C("CT:mul ", C, CipherTextGT::mul, ct, c1, c2); - CYBOZU_BENCH_C("CT:mulML", C, CipherTextGT::mulML, ct, c1, c2); - CYBOZU_BENCH_C("CT:finalExp", C, CipherTextGT::finalExp, ct, ct); - - CYBOZU_BENCH_C("addG1 ", C, CipherTextG1::add, c1, c1, c1); - CYBOZU_BENCH_C("addG2 ", C, CipherTextG2::add, c2, c2, c2); - CYBOZU_BENCH_C("addGT ", C, CipherTextGT::add, ct, ct, ct); - CYBOZU_BENCH_C("reRandG1", C, pub.reRand, c1); - CYBOZU_BENCH_C("reRandG2", C, pub.reRand, c2); - CYBOZU_BENCH_C("reRandGT", C, pub.reRand, ct); - CYBOZU_BENCH_C("reRandG1pre", C, ppub.reRand, c1); - CYBOZU_BENCH_C("reRandG2pre", C, ppub.reRand, c2); - CYBOZU_BENCH_C("reRandGTpre", C, ppub.reRand, ct); - CYBOZU_BENCH_C("mulG1 ", C, CipherTextG1::mul, c1, c1, m); - CYBOZU_BENCH_C("mulG2 ", C, CipherTextG2::mul, c2, c2, m); - CYBOZU_BENCH_C("mulGT ", C, CipherTextGT::mul, ct, ct, m); - - CYBOZU_BENCH_C("convG1toGT", C, pub.convert, ct, c1); - CYBOZU_BENCH_C("convG2toGT", C, pub.convert, ct, c2); -} -#endif - -CYBOZU_TEST_AUTO(liftedElGamal) -{ - const size_t hashSize = 1024; - initG1only(mcl::ecparam::secp192k1, hashSize); - const size_t byteSize = 192 / 8; - SecretKey sec; - sec.setByCSPRNG(); - PublicKey pub; - sec.getPublicKey(pub); - CipherTextG1 c1, c2, c3; - int m1 = 12, m2 = 34; - pub.enc(c1, m1); - pub.enc(c2, m2); - CYBOZU_TEST_EQUAL(sec.dec(c1), m1); - CYBOZU_TEST_EQUAL(sec.dec(c2), m2); - add(c3, c1, c2); - CYBOZU_TEST_EQUAL(sec.dec(c3), m1 + m2); - neg(c1, c2); - CYBOZU_TEST_EQUAL(sec.dec(c1), -m2); - mul(c1, c2, m1); - CYBOZU_TEST_EQUAL(sec.dec(c1), m2 * m1); - - char buf[1024]; - size_t n = sec.serialize(buf, sizeof(buf)); - CYBOZU_TEST_EQUAL(n, byteSize); - SecretKey sec2; - n = sec2.deserialize(buf, n); - CYBOZU_TEST_EQUAL(n, byteSize); - CYBOZU_TEST_EQUAL(sec, sec2); - - n = pub.serialize(buf, sizeof(buf)); - CYBOZU_TEST_EQUAL(n, byteSize + 1); // +1 is for sign of y - PublicKey pub2; - n = pub2.deserialize(buf, n); - CYBOZU_TEST_EQUAL(n, byteSize + 1); - CYBOZU_TEST_EQUAL(pub, pub2); - - PublicKey pub3; - sec2.getPublicKey(pub3); - CYBOZU_TEST_EQUAL(pub, pub3); -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/sq_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/sq_test.cpp deleted file mode 100644 index 4c386d23b..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/sq_test.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include -#include -#include - -CYBOZU_TEST_AUTO(sqrt) -{ - const int tbl[] = { 3, 5, 7, 11, 13, 17, 19, 257, 997, 1031 }; - mcl::SquareRoot sq; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const mpz_class p = tbl[i]; - sq.set(p); - for (mpz_class a = 0; a < p; a++) { - mpz_class x; - if (sq.get(x, a)) { - mpz_class y; - y = (x * x) % p; - CYBOZU_TEST_EQUAL(a, y); - } - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/test/vint_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/vint_test.cpp deleted file mode 100644 index 15e14266a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/vint_test.cpp +++ /dev/null @@ -1,1353 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#ifndef DONT_USE_GMP_IN_TEST -#include -#endif - -#define PUT(x) std::cout << #x "=" << x << std::endl; - -#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) - #define MCL_AVOID_EXCEPTION_TEST -#endif - -using namespace mcl; - -struct V { - int n; - unsigned int p[16]; -}; - -CYBOZU_TEST_AUTO(addSub) -{ - static const struct { - V a; - V b; - V c; - } tbl[] = { - { - { 1, { 123, } }, - { 1, { 456, } }, - { 1, { 579, } }, - }, - { - { 1, { 0xffffffff, } }, - { 1, { 3, } }, - { 2, { 2, 1 } }, - }, - { - { 3, { 0xffffffff, 1, 0xffffffff } }, - { 2, { 1, 0xfffffffe, } }, - { 4, { 0, 0, 0, 1 } }, - }, - { - { 3, { 0xffffffff, 5, 0xffffffff } }, - { 2, { 1, 0xfffffffe, } }, - { 4, { 0, 4, 0, 1 } }, - }, - { - { 3, { 0xffffffff, 5, 0xffffffff } }, - { 1, { 1, } }, - { 3, { 0, 6, 0xffffffff } }, - }, - { - { 3, { 1, 0xffffffff, 1 } }, - { 3, { 0xffffffff, 0, 1 } }, - { 3, { 0, 0, 3 } }, - }, - { - { 1, { 1 } }, - { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, - { 4, { 0, 0, 0, 1 } }, - }, - { - { 1, { 0xffffffff } }, - { 1, { 0xffffffff } }, - { 2, { 0xfffffffe, 1 } }, - }, - { - { 2, { 0xffffffff, 0xffffffff } }, - { 2, { 0xffffffff, 0xffffffff } }, - { 3, { 0xfffffffe, 0xffffffff, 1 } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, - { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, - { 4, { 0xfffffffe, 0xffffffff, 0xffffffff, 1 } }, - }, - { - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, - { 5, { 0xfffffffe, 0xffffffff, 0xffffffff, 0xffffffff, 1 } }, - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, y, z, t; - x.setArray(tbl[i].a.p, tbl[i].a.n); - y.setArray(tbl[i].b.p, tbl[i].b.n); - z.setArray(tbl[i].c.p, tbl[i].c.n); - Vint::add(t, x, y); - CYBOZU_TEST_EQUAL(t, z); - - Vint::add(t, y, x); - CYBOZU_TEST_EQUAL(t, z); - - Vint::sub(t, z, x); - CYBOZU_TEST_EQUAL(t, y); - } - { - const uint32_t in[] = { 0xffffffff, 0xffffffff }; - const uint32_t out[] = { 0xfffffffe, 0xffffffff, 1 }; - Vint x, y; - x.setArray(in, 2); - y.setArray(out, 3); - Vint::add(x, x, x); - CYBOZU_TEST_EQUAL(x, y); - Vint::sub(x, x, x); - y.clear(); - CYBOZU_TEST_EQUAL(x, y); - } - { - const uint32_t t0[] = {1, 2}; - const uint32_t t1[] = {3, 4, 5}; - const uint32_t t2[] = {4, 6, 5}; - Vint x, y, z; - z.setArray(t2, 3); - - x.setArray(t0, 2); - y.setArray(t1, 3); - Vint::add(x, x, y); - CYBOZU_TEST_EQUAL(x, z); - - x.setArray(t0, 2); - y.setArray(t1, 3); - Vint::add(x, y, x); - CYBOZU_TEST_EQUAL(x, z); - - x.setArray(t0, 2); - y.setArray(t1, 3); - Vint::add(y, x, y); - CYBOZU_TEST_EQUAL(y, z); - - x.setArray(t0, 2); - y.setArray(t1, 3); - Vint::add(y, y, x); - CYBOZU_TEST_EQUAL(y, z); - } -} - -CYBOZU_TEST_AUTO(mul1) -{ - static const struct { - V a; - int b; - V c; - } tbl[] = { - { - { 1, { 12, } }, - 5, - { 1, { 60, } }, - }, - { - { 1, { 1234567, } }, - 1, - { 1, { 1234567, } }, - }, - { - { 1, { 1234567, } }, - 89012345, - { 2, { 0x27F6EDCF, 0x63F2, } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, - 0x7fffffff, - { 4, { 0x80000001, 0xffffffff, 0xffffffff, 0x7ffffffe } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, - 1, - { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, - }, - { - { 2, { 0xffffffff, 1 } }, - 0x7fffffff, - { 2, { 0x80000001, 0xfffffffd } }, - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, z, t; - int y; - x.setArray(tbl[i].a.p, tbl[i].a.n); - y = tbl[i].b; - z.setArray(tbl[i].c.p, tbl[i].c.n); - Vint::mul(t, x, y); - CYBOZU_TEST_EQUAL(t, z); - - Vint::mul(x, x, y); - CYBOZU_TEST_EQUAL(x, z); - } -} - -CYBOZU_TEST_AUTO(mul2) -{ - static const struct { - V a; - V b; - V c; - } tbl[] = { - { - { 1, { 12, } }, - { 1, { 5, } }, - { 1, { 60, } }, - }, - { - { 1, { 1234567, } }, - { 1, { 89012345, } }, - { 2, { 0x27F6EDCF, 0x63F2, } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, - { 1, { 0xffffffff, } }, - { 4, { 0x00000001, 0xffffffff, 0xffffffff, 0xfffffffe } }, - }, - { - { 2, { 0xffffffff, 1 } }, - { 1, { 0xffffffff, } }, - { 3, { 0x00000001, 0xfffffffd, 1 } }, - }, - { - { 2, { 0xffffffff, 1 } }, - { 1, { 0xffffffff, } }, - { 3, { 0x00000001, 0xfffffffd, 1 } }, - }, - { - { 2, { 1, 1 } }, - { 2, { 1, 1 } }, - { 3, { 1, 2, 1 } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 1 } }, - { 2, { 0xffffffff, 0xffffffff } }, - { 5, { 1, 0, 0xfffffffd, 0xffffffff, 1 } }, - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, y, z, t; - x.setArray(tbl[i].a.p, tbl[i].a.n); - y.setArray(tbl[i].b.p, tbl[i].b.n); - z.setArray(tbl[i].c.p, tbl[i].c.n); - Vint::mul(t, x, y); - CYBOZU_TEST_EQUAL(t, z); - - Vint::mul(t, y, x); - CYBOZU_TEST_EQUAL(t, z); - } - { - const uint32_t in[] = { 0xffffffff, 1 }; - const uint32_t out[] = { 1, 0xfffffffc, 3 }; - Vint x, y, z; - y.setArray(out, 3); - x.setArray(in, 2); - z = x; - Vint::mul(x, x, x); - CYBOZU_TEST_EQUAL(x, y); - - x.setArray(in, 2); - Vint::mul(x, x, z); - CYBOZU_TEST_EQUAL(x, y); - - x.setArray(in, 2); - Vint::mul(x, z, x); - CYBOZU_TEST_EQUAL(x, y); - - x.setArray(in, 2); - Vint::mul(x, z, z); - CYBOZU_TEST_EQUAL(x, y); - } - { - Vint a("285434247217355341057"); - a *= a; - CYBOZU_TEST_EQUAL(a, Vint("81472709484538325259309302444004789877249")); - } -} - -CYBOZU_TEST_AUTO(div1) -{ - static const struct { - V a; - unsigned int b; - unsigned int r; - V c; - } tbl[] = { - { - { 1, { 100, } }, - 1, 0, - { 1, { 100, } }, - }, - { - { 1, { 100, } }, - 100, 0, - { 1, { 1, } }, - }, - { - { 1, { 100, } }, - 101, 100, - { 1, { 0, } }, - }, - { - { 1, { 100, } }, - 2, 0, - { 1, { 50, } }, - }, - { - { 1, { 100, } }, - 3, 1, - { 1, { 33, } }, - }, - { - { 2, { 0xffffffff, 0xffffffff } }, - 1, 0, - { 2, { 0xffffffff, 0xffffffff, } }, - }, - { - { 2, { 0xffffffff, 0xffffffff } }, - 123, 15, - { 2, { 0x4d0214d0, 0x214d021 } }, - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, z, t; - unsigned int b, r, u; - x.setArray(tbl[i].a.p, tbl[i].a.n); - b = tbl[i].b; - r = tbl[i].r; - z.setArray(tbl[i].c.p, tbl[i].c.n); - - u = (unsigned int)Vint::divMods1(&t, x, b); - CYBOZU_TEST_EQUAL(t, z); - CYBOZU_TEST_EQUAL(u, r); - - u = (unsigned int)Vint::divMods1(&x, x, b); - CYBOZU_TEST_EQUAL(x, z); - CYBOZU_TEST_EQUAL(u, r); - } -} - -CYBOZU_TEST_AUTO(div2) -{ - static const struct { - V x; - V y; - V q; - V r; - } tbl[] = { - { - { 1, { 100 } }, - { 1, { 3 } }, - { 1, { 33 } }, - { 1, { 1 } }, - }, - { - { 2, { 1, 1 } }, - { 2, { 0, 1 } }, - { 1, { 1 } }, - { 1, { 1 } }, - }, - { - { 2, { 0xffffffff, 0xffffffff } }, - { 2, { 0, 1 } }, - { 1, { 0xffffffff } }, - { 1, { 0xffffffff } }, - }, - { - { 2, { 0xffffffff, 0xffffffff } }, - { 2, { 0xffffffff, 1 } }, - { 1, { 0x80000000 } }, - { 1, { 0x7fffffff } }, - }, - { - { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, - { 2, { 0xffffffff, 1 } }, - { 2, { 0x40000000, 0x80000000 } }, - { 1, { 0x3fffffff } }, - }, - { - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, - { 3, { 1, 0, 1 } }, - { 2, { 0xffffffff, 0xffffffff } }, - { 1, { 0 } }, - }, - { - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, - { 3, { 1, 0xffffffff, 0xffffffff } }, - { 2, { 0, 1 } }, - { 2, { 0xffffffff, 0xfffffffe } }, - }, - { - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, - { 3, { 1, 0, 0xffffffff } }, - { 2, { 1, 1 } }, - { 2, { 0xfffffffe, 0xfffffffe } }, - }, - { - { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 1 } }, - { 3, { 1, 0, 0xffffffff } }, - { 1, { 2 } }, - { 3, { 0xfffffffd, 0xffffffff, 1 } }, - }, - { - { 4, { 0, 0, 1, 1 } }, - { 2, { 1, 1 } }, - { 3, { 0, 0, 1 } }, - { 1, { 0 } }, - }, - { - { 3, { 5, 5, 1} }, - { 2, { 1, 2 } }, - { 1, { 0x80000002 } }, - { 1, { 0x80000003, } }, - }, - { - { 2, { 5, 5} }, - { 2, { 1, 1 } }, - { 1, { 5 } }, - { 1, { 0, } }, - }, - { - { 2, { 5, 5} }, - { 2, { 2, 1 } }, - { 1, { 4 } }, - { 1, { 0xfffffffd, } }, - }, - { - { 3, { 5, 0, 5} }, - { 3, { 2, 0, 1 } }, - { 1, { 4 } }, - { 2, { 0xfffffffd, 0xffffffff } }, - }, - { - { 2, { 4, 5 } }, - { 2, { 5, 5 } }, - { 1, { 0 } }, - { 2, { 4, 5 } }, - }, - { - { 1, { 123 } }, - { 2, { 1, 1 } }, - { 1, { 0 } }, - { 1, { 123 } }, - }, - { - { 1, { 123 } }, - { 3, { 1, 1, 1 } }, - { 1, { 0 } }, - { 1, { 123 } }, - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, y, q, r; - x.setArray(tbl[i].x.p, tbl[i].x.n); - y.setArray(tbl[i].y.p, tbl[i].y.n); - q.setArray(tbl[i].q.p, tbl[i].q.n); - r.setArray(tbl[i].r.p, tbl[i].r.n); - - Vint qt, rt; - Vint::quotRem(&qt, rt, x, y); - CYBOZU_TEST_EQUAL(qt, q); - CYBOZU_TEST_EQUAL(rt, r); - - Vint::mul(y, y, qt); - Vint::add(y, y, rt); - CYBOZU_TEST_EQUAL(x, y); - - x.setArray(tbl[i].x.p, tbl[i].x.n); - y.setArray(tbl[i].y.p, tbl[i].y.n); - Vint::quotRem(&x, rt, x, y); - CYBOZU_TEST_EQUAL(x, q); - CYBOZU_TEST_EQUAL(rt, r); - - x.setArray(tbl[i].x.p, tbl[i].x.n); - y.setArray(tbl[i].y.p, tbl[i].y.n); - Vint::quotRem(&y, rt, x, y); - CYBOZU_TEST_EQUAL(y, q); - CYBOZU_TEST_EQUAL(rt, r); - - x.setArray(tbl[i].x.p, tbl[i].x.n); - y.setArray(tbl[i].y.p, tbl[i].y.n); - Vint::quotRem(&x, y, x, y); - CYBOZU_TEST_EQUAL(x, q); - CYBOZU_TEST_EQUAL(y, r); - - x.setArray(tbl[i].x.p, tbl[i].x.n); - y.setArray(tbl[i].y.p, tbl[i].y.n); - Vint::quotRem(&y, x, x, y); - CYBOZU_TEST_EQUAL(y, q); - CYBOZU_TEST_EQUAL(x, r); - } - { - const uint32_t in[] = { 1, 1 }; - Vint x, y, z; - x.setArray(in, 2); - Vint::quotRem(&x, y, x, x); - z = 1; - CYBOZU_TEST_EQUAL(x, z); - z.clear(); - CYBOZU_TEST_EQUAL(y, z); - - Vint::quotRem(&y, x, x, x); - z = 1; - CYBOZU_TEST_EQUAL(y, z); - z.clear(); - CYBOZU_TEST_EQUAL(x, z); - } -} - -CYBOZU_TEST_AUTO(quotRem) -{ - const struct { - const char *x; - const char *y; - const char *r; - } tbl[] = { - { - "1448106640508192452750709206294683535529268965445799785581837640324321797831381715960812126274894517677713278300997728292641936248881345120394299128611830", - "82434016654300679721217353503190038836571781811386228921167322412819029493183", - "72416512377294697540770834088766459385112079195086911762075702918882982361282" - }, - { - "97086308670107713719105336221824613370040805954034005192338040686500414395543303807941158656814978071549225072789349941064484974666540443679601226744652", - "82434016654300679721217353503190038836571781811386228921167322412819029493183", - "41854959563040430269871677548536437787164514279279911478858426970427834388586", - }, - { - "726838724295606887174238120788791626017347752989142414466410919788841485181240131619880050064495352797213258935807786970844241989010252", - "82434016654300679721217353503190038836571781811386228921167322412819029493183", - "81378967132566843036693176764684783485107373533583677681931133755003929106966", - }, - { - "85319207237201203511459960875801690195851794174784746933408178697267695525099750", - "82434016654300679721217353503190038836571781811386228921167322412819029493183", - "82434016654300679721217353503190038836571781811386228921167322412819029148528", - }, - { - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x100000000000000000000000000000000000000000000000001", - "1606938044258990275541962092341162602522202993782724115824640", - }, - { - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x1000000000000000000000000000000000000000000000000000000000000000000000000000000001", - "34175792574734561318320347298712833833643272357332299899995954578095372295314880347335474659983360", - }, - { - "0xfffffffffffff000000000000000000000000000000000000000000000000000000000000000000", - "0x100000000000000000000000000000000000000000000000000000000000000000001", - "7558907585412001237250713901367146624661464598973016020495791084036551510708977665", - }, - { - "0xfffffffffffff000000000000000000000000000000000000000000000000000000000000000000", - "0xfffffffffffff0000000000000000000000000000000000000000000000000000000000000001", - "521481209941628322292632858916605385658190900090571826892867289394157573281830188869820088065", - }, - { - "0x1230000000000000456", - "0x1230000000000000457", - "0x1230000000000000456", - }, - { - "0x1230000000000000456", - "0x1230000000000000456", - "0", - }, - { - "0x1230000000000000456", - "0x1230000000000000455", - "1", - }, - { - "0x1230000000000000456", - "0x2000000000000000000", - "0x1230000000000000456", - }, - { - "0xffffffffffffffffffffffffffffffff", - "0x80000000000000000000000000000000", - "0x7fffffffffffffffffffffffffffffff", - }, - { - "0xffffffffffffffffffffffffffffffff", - "0x7fffffffffffffffffffffffffffffff", - "1", - }, - { - "0xffffffffffffffffffffffffffffffff", - "0x70000000000000000000000000000000", - "0x1fffffffffffffffffffffffffffffff", - }, - { - "0xffffffffffffffffffffffffffffffff", - "0x30000000000000000000000000000000", - "0x0fffffffffffffffffffffffffffffff", - }, - { - "0xffffffffffffffffffffffffffffffff", - "0x10000000000000000000000000000000", - "0x0fffffffffffffffffffffffffffffff", - }, - { - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0x212ba4f27ffffff5a2c62effffffffcdb939ffffffffff8a15ffffffffffff8d", - }, - { - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", - "0x212ba4f27ffffff5a2c62effffffffd00242ffffffffff9c39ffffffffffffb1", - }, - }; - mcl::Vint x, y, q, r1, r2; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - x.setStr(tbl[i].x); - y.setStr(tbl[i].y); - r1.setStr(tbl[i].r); - mcl::Vint::divMod(&q, r2, x, y); - CYBOZU_TEST_EQUAL(r1, r2); - CYBOZU_TEST_EQUAL(x, q * y + r2); - } -} - -CYBOZU_TEST_AUTO(string) -{ - const struct { - uint32_t v[5]; - size_t vn; - const char *str; - const char *hex; - const char *bin; - } tbl[] = { - { { 0 }, 0, "0", "0x0", "0b0" }, - { { 12345 }, 1, "12345", "0x3039", "0b11000000111001" }, - { { 0xffffffff }, 1, "4294967295", "0xffffffff", "0b11111111111111111111111111111111" }, - { { 0, 1 }, 2, "4294967296", "0x100000000", "0b100000000000000000000000000000000" }, - { { 0, 0, 0, 0, 1 }, 5, "340282366920938463463374607431768211456", "0x100000000000000000000000000000000", "0b100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" }, - { { 0, 0x0b22a000, 0xe2f768a0, 0xe086b93c, 0x2cd76f }, 5, "1000000000000000000000000000000000000000000000", "0x2cd76fe086b93ce2f768a00b22a00000000000", "0b101100110101110110111111100000100001101011100100111100111000101111011101101000101000000000101100100010101000000000000000000000000000000000000000000000" }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x, y; - x.setArray(tbl[i].v,tbl[i].vn); - CYBOZU_TEST_EQUAL(x.getStr(10), tbl[i].str); - char buf[1024]; - size_t n = x.getStr(buf, sizeof(buf), 10); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL(tbl[i].str, buf); - y.setStr(tbl[i].str); - CYBOZU_TEST_EQUAL(x.getStr(16), tbl[i].hex + 2); - n = x.getStr(buf, sizeof(buf), 16); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_EQUAL(tbl[i].hex + 2, buf); - CYBOZU_TEST_EQUAL(x, y); - x = 1; - x.setStr(tbl[i].hex); - CYBOZU_TEST_EQUAL(x, y); - } -} - -CYBOZU_TEST_AUTO(shift) -{ - Vint x("123423424918471928374192874198274981274918274918274918243"); - Vint y, z; - - const size_t unitBitSize = Vint::unitBitSize; - Vint s; - // shl - for (size_t i = 1; i < 31; i++) { - Vint::shl(y, x, i); - z = x * (Vint::Unit(1) << i); - CYBOZU_TEST_EQUAL(y, z); - y = x << i; - CYBOZU_TEST_EQUAL(y, z); - y = x; - y <<= i; - CYBOZU_TEST_EQUAL(y, z); - } - for (int i = 0; i < 4; i++) { - Vint::shl(y, x, i * unitBitSize); - Vint::pow(s, Vint(2), i * unitBitSize); - z = x * s; - CYBOZU_TEST_EQUAL(y, z); - y = x << (i * unitBitSize); - CYBOZU_TEST_EQUAL(y, z); - y = x; - y <<= (i * unitBitSize); - CYBOZU_TEST_EQUAL(y, z); - } - for (int i = 0; i < 100; i++) { - y = x << i; - Vint::pow(s, Vint(2), i); - z = x * s; - CYBOZU_TEST_EQUAL(y, z); - y = x; - y <<= i; - CYBOZU_TEST_EQUAL(y, z); - } - - // shr - for (size_t i = 1; i < 31; i++) { - Vint::shr(y, x, i); - z = x / (Vint::Unit(1) << i); - CYBOZU_TEST_EQUAL(y, z); - y = x >> i; - CYBOZU_TEST_EQUAL(y, z); - y = x; - y >>= i; - CYBOZU_TEST_EQUAL(y, z); - } - for (int i = 0; i < 3; i++) { - Vint::shr(y, x, i * unitBitSize); - Vint::pow(s, Vint(2), i * unitBitSize); - z = x / s; - CYBOZU_TEST_EQUAL(y, z); - y = x >> (i * unitBitSize); - CYBOZU_TEST_EQUAL(y, z); - y = x; - y >>= (i * unitBitSize); - CYBOZU_TEST_EQUAL(y, z); - } - for (int i = 0; i < 100; i++) { - y = x >> i; - Vint::pow(s, Vint(2), i); - z = x / s; - CYBOZU_TEST_EQUAL(y, z); - y = x; - y >>= i; - CYBOZU_TEST_EQUAL(y, z); - } - { - Vint a = 0, zero = 0; - a <<= Vint::unitBitSize; - CYBOZU_TEST_EQUAL(a, zero); - } -} - -CYBOZU_TEST_AUTO(getBitSize) -{ - { - Vint zero = 0; - CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); - zero <<= (Vint::unitBitSize - 1); - CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); - zero <<= Vint::unitBitSize; - CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); - } - - { - Vint a = 1; - CYBOZU_TEST_EQUAL(a.getBitSize(), 1); - a = 2; - CYBOZU_TEST_EQUAL(a.getBitSize(), 2); - a = 3; - CYBOZU_TEST_EQUAL(a.getBitSize(), 2); - a = 4; - CYBOZU_TEST_EQUAL(a.getBitSize(), 3); - } - - { - Vint a = 5; - const size_t msbindex = a.getBitSize(); - const size_t width = 100; - const size_t time = 3; - for (size_t i = 0; i < time; ++i) { - a <<= width; - CYBOZU_TEST_EQUAL(a.getBitSize(), msbindex + width*(i + 1)); - } - - for (size_t i = 0; i < time*2; ++i) { - a >>= width/2; - CYBOZU_TEST_EQUAL(a.getBitSize(), msbindex + width*time - (width/2)*(i + 1)); - } - a >>= width; - CYBOZU_TEST_ASSERT(a.isZero()); - CYBOZU_TEST_EQUAL(a.getBitSize(), 1); - } - - { - Vint b("12"), c("345"), d("67890"); - size_t bl = b.getBitSize(), cl = c.getBitSize(), dl = d.getBitSize(); - CYBOZU_TEST_ASSERT((b*c).getBitSize() <= bl + cl); - CYBOZU_TEST_ASSERT((c*d).getBitSize() <= cl + dl); - CYBOZU_TEST_ASSERT((b*c*d).getBitSize() <= bl + cl + dl); - } -} - -CYBOZU_TEST_AUTO(bit) -{ - Vint a; - a.setStr("0x1234567890abcdef"); - bool tvec[] = { - 1,1,1,1,0 ,1,1,1,1,0 - ,1,1,0,0,1 ,1,1,1,0,1 - ,0,1,0,1,0 ,0,0,0,1,0 - ,0,1,0,0,0 ,1,1,1,1,0 - ,0,1,1,0,1 ,0,1,0,0,0 - ,1,0,1,1,0 ,0,0,1,0,0 - ,1 - }; - CYBOZU_TEST_EQUAL(a.getBitSize(), sizeof(tvec)/sizeof(*tvec)); - for (int i = (int)a.getBitSize() - 1; i >= 0; --i) { - CYBOZU_TEST_EQUAL(a.testBit(i), tvec[i]); - } -} - -CYBOZU_TEST_AUTO(sample) -{ - using namespace mcl; - Vint x(1); - Vint y("123456789"); - Vint z; - - x = 1; // set by int - y.setStr("123456789"); // set by decimal - z.setStr("0xffffffff"); // set by hex - x += z; - - x = 2; - y = 250; - Vint::pow(x, x, y); - Vint r, q; - r = x % y; - q = x / y; - CYBOZU_TEST_EQUAL(q * y + r, x); - - Vint::quotRem(&q, r, x, y); // get both r and q - CYBOZU_TEST_EQUAL(q * y + r, x); -} - -CYBOZU_TEST_AUTO(Vint) -{ - const struct { - int a; - int b; - /* - q, r ; like C - q2, r2 ; like Python - */ - int add, sub, mul, q, r, q2, r2; - } tbl[] = { - { 13, 5, 18, 8, 65, 2, 3, 2, 3 }, - { 13, -5, 8, 18, -65, -2, 3, -3, -2 }, - { -13, 5, -8, -18, -65, -2, -3, -3, 2 }, - { -13, -5, -18, -8, 65, 2, -3, 2, -3 }, - { 5, 13, 18, -8, 65, 0, 5 , 0, 5}, - { 5, -13, -8, 18, -65, 0, 5 , -1, -8}, - { -5, 13, 8, -18, -65, 0, -5 , -1, 8}, - { -5, -13, -18, 8, 65, 0, -5 , 0, -5}, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint a = tbl[i].a; - Vint b = tbl[i].b; - Vint add = a + b; - Vint sub = a - b; - Vint mul = a * b; - Vint q = a / b; - Vint r = a % b; - Vint q2, r2; - Vint::quotRem(&q2, r2, a, b); - CYBOZU_TEST_EQUAL(add, tbl[i].add); - CYBOZU_TEST_EQUAL(sub, tbl[i].sub); - CYBOZU_TEST_EQUAL(mul, tbl[i].mul); - CYBOZU_TEST_EQUAL(q, tbl[i].q); - CYBOZU_TEST_EQUAL(r, tbl[i].r); - CYBOZU_TEST_EQUAL(q * b + r, a); - CYBOZU_TEST_EQUAL(q2, tbl[i].q2); - CYBOZU_TEST_EQUAL(r2, tbl[i].r2); - CYBOZU_TEST_EQUAL(q2 * b + r2, a); - } - CYBOZU_TEST_EQUAL(Vint("15") / Vint("3"), Vint("5")); - CYBOZU_TEST_EQUAL(Vint("15") / Vint("-3"), Vint("-5")); - CYBOZU_TEST_EQUAL(Vint("-15") / Vint("3"), Vint("-5")); - CYBOZU_TEST_EQUAL(Vint("-15") / Vint("-3"), Vint("5")); - - CYBOZU_TEST_EQUAL(Vint("15") % Vint("3"), Vint("0")); - CYBOZU_TEST_EQUAL(Vint("15") % Vint("-3"), Vint("0")); - CYBOZU_TEST_EQUAL(Vint("-15") % Vint("3"), Vint("0")); - CYBOZU_TEST_EQUAL(Vint("-15") % Vint("-3"), Vint("0")); - - CYBOZU_TEST_EQUAL(Vint("-0") + Vint("-3"), Vint("-3")); - CYBOZU_TEST_EQUAL(Vint("-0") - Vint("-3"), Vint("3")); - CYBOZU_TEST_EQUAL(Vint("-3") + Vint("-0"), Vint("-3")); - CYBOZU_TEST_EQUAL(Vint("-3") - Vint("-0"), Vint("-3")); - - CYBOZU_TEST_EQUAL(Vint("-0") + Vint("3"), Vint("3")); - CYBOZU_TEST_EQUAL(Vint("-0") - Vint("3"), Vint("-3")); - CYBOZU_TEST_EQUAL(Vint("3") + Vint("-0"), Vint("3")); - CYBOZU_TEST_EQUAL(Vint("3") - Vint("-0"), Vint("3")); - - CYBOZU_TEST_EQUAL(Vint("0"), Vint("0")); - CYBOZU_TEST_EQUAL(Vint("0"), Vint("-0")); - CYBOZU_TEST_EQUAL(Vint("-0"), Vint("0")); - CYBOZU_TEST_EQUAL(Vint("-0"), Vint("-0")); - - CYBOZU_TEST_ASSERT(Vint("2") < Vint("3")); - CYBOZU_TEST_ASSERT(Vint("-2") < Vint("3")); - CYBOZU_TEST_ASSERT(Vint("-5") < Vint("-3")); - CYBOZU_TEST_ASSERT(Vint("-0") < Vint("1")); - CYBOZU_TEST_ASSERT(Vint("-1") < Vint("-0")); - - CYBOZU_TEST_ASSERT(Vint("5") > Vint("3")); - CYBOZU_TEST_ASSERT(Vint("5") > Vint("-3")); - CYBOZU_TEST_ASSERT(Vint("-2") > Vint("-3")); - CYBOZU_TEST_ASSERT(Vint("3") > Vint("-0")); - CYBOZU_TEST_ASSERT(Vint("-0") > Vint("-1")); - - { - const struct { - const char *str; - int s; - int shl; - int shr; - } tbl2[] = { - { "0", 1, 0, 0 }, - { "-0", 1, 0, 0 }, - { "1", 1, 2, 0 }, - { "-1", 1, -2, 0 }, - { "12345", 3, 98760, 1543 }, - { "-12345", 3, -98760, 0 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl2); i++) { - Vint a = Vint(tbl2[i].str); - Vint shl = a << tbl2[i].s; - CYBOZU_TEST_EQUAL(shl, tbl2[i].shl); - if (!a.isNegative()) { - Vint shr = a >> tbl2[i].s; - CYBOZU_TEST_EQUAL(shr, tbl2[i].shr); - } - } - } -} - -CYBOZU_TEST_AUTO(add2) -{ - Vint x, y, z, w; - x.setStr("2416089439321382744001761632872637936198961520379024187947524965775137204955564426500438089001375107581766516460437532995850581062940399321788596606850"); - y.setStr("2416089439321382743300544243711595219403446085161565705825288050160594425031420687263897209379984490503106207071010949258995096347962762372787916800000"); - z.setStr("701217389161042716795515435217458482122236915614542779924143739236540879621390617078660309389426583736855484714977636949000679806850"); - Vint::sub(w, x, y); - CYBOZU_TEST_EQUAL(w, z); - - Vint a, c, d; - - a.setStr("-2416089439321382744001761632872637936198961520379024187947524965775137204955564426500438089001375107581766516460437532995850581062940399321788596606850"); - c.setStr("2416089439321382743300544243711595219403446085161565705825288050160594425031420687263897209379984490503106207071010949258995096347962762372787916800000"); - a = a + c; - - d.setStr("-701217389161042716795515435217458482122236915614542779924143739236540879621390617078660309389426583736855484714977636949000679806850"); - CYBOZU_TEST_EQUAL(a, d); -} - -CYBOZU_TEST_AUTO(stream) -{ - { - Vint x, y, z, w; - x.setStr("12345678901232342424242423423429922"); - y.setStr("23423423452424242343"); - std::ostringstream oss; - oss << x << ' ' << y; - std::istringstream iss(oss.str()); - iss >> z >> w; - CYBOZU_TEST_EQUAL(x, z); - CYBOZU_TEST_EQUAL(y, w); - } - { - Vint x, y, z, w; - x.setStr("0x100"); - y.setStr("123"); - std::ostringstream oss; - oss << x << ' ' << y; - std::istringstream iss(oss.str()); - iss >> z >> w; - CYBOZU_TEST_EQUAL(x, z); - CYBOZU_TEST_EQUAL(y, w); - } - { - Vint x, y, z, w; - x.setStr("12345678901232342424242423423429922"); - y.setStr("-23423423452424242343"); - std::ostringstream oss; - oss << x << ' ' << y; - std::istringstream iss(oss.str()); - iss >> z >> w; - CYBOZU_TEST_EQUAL(x, z); - CYBOZU_TEST_EQUAL(y, w); - } -} - -CYBOZU_TEST_AUTO(inc_dec) -{ - Vint x = 3; - CYBOZU_TEST_EQUAL(x++, 3); - CYBOZU_TEST_EQUAL(x, 4); - CYBOZU_TEST_EQUAL(++x, 5); - CYBOZU_TEST_EQUAL(x, 5); - - CYBOZU_TEST_EQUAL(x--, 5); - CYBOZU_TEST_EQUAL(x, 4); - CYBOZU_TEST_EQUAL(--x, 3); - CYBOZU_TEST_EQUAL(x, 3); -} - -CYBOZU_TEST_AUTO(withInt) -{ - Vint x = 15; - x += 3; - CYBOZU_TEST_EQUAL(x, 18); - x -= 2; - CYBOZU_TEST_EQUAL(x, 16); - x *= 2; - CYBOZU_TEST_EQUAL(x, 32); - x /= 3; - CYBOZU_TEST_EQUAL(x, 10); - x = -x; - CYBOZU_TEST_EQUAL(x, -10); - x += 1; - CYBOZU_TEST_EQUAL(x, -9); - x -= 2; - CYBOZU_TEST_EQUAL(x, -11); - x *= 2; - CYBOZU_TEST_EQUAL(x, -22); - x /= 5; - CYBOZU_TEST_EQUAL(x, -4); - x = -22; - x %= 5; - CYBOZU_TEST_EQUAL(x, -2); - - x = 3; - x += -2; - CYBOZU_TEST_EQUAL(x, 1); - x += -5; - CYBOZU_TEST_EQUAL(x, -4); - x -= -7; - CYBOZU_TEST_EQUAL(x, 3); - x *= -1; - CYBOZU_TEST_EQUAL(x, -3); - x /= -1; - CYBOZU_TEST_EQUAL(x, 3); - - x++; - CYBOZU_TEST_EQUAL(x, 4); - x--; - CYBOZU_TEST_EQUAL(x, 3); - x = -3; - x++; - CYBOZU_TEST_EQUAL(x, -2); - x--; - CYBOZU_TEST_EQUAL(x, -3); - - ++x; - CYBOZU_TEST_EQUAL(x, -2); - --x; - CYBOZU_TEST_EQUAL(x, -3); - x = 3; - ++x; - CYBOZU_TEST_EQUAL(x, 4); - --x; - CYBOZU_TEST_EQUAL(x, 3); -} - -CYBOZU_TEST_AUTO(addu1) -{ - Vint x = 4; - Vint::addu1(x, x, 2); - CYBOZU_TEST_EQUAL(x, 6); - Vint::subu1(x, x, 2); - CYBOZU_TEST_EQUAL(x, 4); - Vint::subu1(x, x, 10); - CYBOZU_TEST_EQUAL(x, -6); - x = -4; - Vint::addu1(x, x, 2); - CYBOZU_TEST_EQUAL(x, -2); - Vint::subu1(x, x, 2); - CYBOZU_TEST_EQUAL(x, -4); - Vint::addu1(x, x, 10); - CYBOZU_TEST_EQUAL(x, 6); - - x.setStr("0x10000000000000000000000002"); - Vint::subu1(x, x, 3); - CYBOZU_TEST_EQUAL(x, Vint("0xfffffffffffffffffffffffff")); - x.setStr("-0x10000000000000000000000000"); - Vint::addu1(x, x, 5); - CYBOZU_TEST_EQUAL(x, Vint("-0xffffffffffffffffffffffffb")); -} - -CYBOZU_TEST_AUTO(pow) -{ - Vint x = 2; - Vint y; - Vint::pow(y, x, 3); - CYBOZU_TEST_EQUAL(y, 8); - x = -2; - Vint::pow(y, x, 3); - CYBOZU_TEST_EQUAL(y, -8); -#ifndef MCL_AVOID_EXCEPTION_TEST -// CYBOZU_TEST_EXCEPTION(Vint::pow(y, x, -2), cybozu::Exception); -#endif -} - -CYBOZU_TEST_AUTO(powMod) -{ - Vint x = 7; - Vint m = 65537; - Vint y; - Vint::powMod(y, x, 20, m); - CYBOZU_TEST_EQUAL(y, 55277); - Vint::powMod(y, x, m - 1, m); - CYBOZU_TEST_EQUAL(y, 1); -} - -CYBOZU_TEST_AUTO(andOr) -{ - Vint x("1223480928420984209849242"); - Vint y("29348220482094820948208420984209482048204289482"); - Vint z; - z = x & y; - CYBOZU_TEST_EQUAL(z, Vint("1209221003550923564822922")); - z = x | y; - CYBOZU_TEST_EQUAL(z, Vint("29348220482094820948208435244134352108849315802")); -#ifndef MCL_AVOID_EXCEPTION_TEST -// CYBOZU_TEST_EXCEPTION(Vint("-2") | Vint("5"), cybozu::Exception); -// CYBOZU_TEST_EXCEPTION(Vint("-2") & Vint("5"), cybozu::Exception); -#endif - x = 8; - x |= 7; - CYBOZU_TEST_EQUAL(x, 15); - x = 65536; - y = 8; - y &= x; - CYBOZU_TEST_EQUAL(y, 0); -} - -CYBOZU_TEST_AUTO(invMod) -{ - Vint m("100000000000000000039"); - for (int i = 1; i < 100; i++) { - Vint x = i; - Vint y; - Vint::invMod(y, x, m); - CYBOZU_TEST_EQUAL((y * x) % m, 1); - } -} - -CYBOZU_TEST_AUTO(isPrime) -{ - int primeTbl[] = { - 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, - 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, - 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,197, 199, 211, - 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, - 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, - 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, - 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, - 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, - 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, - 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, - 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, - 941, 947, 953, 967, 971, 977, 983, 991, 997 - }; - typedef std::set IntSet; - IntSet primes; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(primeTbl); i++) { - primes.insert(primeTbl[i]); - } - for (int i = 0; i < 1000; i++) { - bool ok = primes.find(i) != primes.end(); - bool my = Vint(i).isPrime(); - CYBOZU_TEST_EQUAL(ok, my); - } - const struct { - const char *n; - bool isPrime; - } tbl[] = { - { "65537", true }, - { "449065", false }, - { "488881", false }, - { "512461", false }, - { "18446744073709551629", true }, - { "18446744073709551631", false }, - { "0x10000000000000000000000000000000000000007", true }, - { "0x10000000000000000000000000000000000000009", false }, - { "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true }, - { "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", false }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - Vint x(tbl[i].n); - CYBOZU_TEST_EQUAL(x.isPrime(), tbl[i].isPrime); - } -} - -CYBOZU_TEST_AUTO(gcd) -{ - Vint x = 12; - Vint y = 18; - Vint z; - Vint::gcd(z, x, y); - CYBOZU_TEST_EQUAL(z, 6); - Vint::lcm(z, x, y); - CYBOZU_TEST_EQUAL(z, 36); - Vint::lcm(x, x, y); - CYBOZU_TEST_EQUAL(x, 36); - Vint::lcm(x, x, x); - CYBOZU_TEST_EQUAL(x, 36); -} - -CYBOZU_TEST_AUTO(jacobi) -{ - const struct { - const char *m; - const char *n; - int ok; - } tbl[] = { - { "0", "1", 1 }, - { "1", "1", 1 }, - { "123", "1", 1 }, - { "45", "77", -1 }, - { "60", "121", 1 }, - { "12345672342342342342428", "923423423424753211", 1 }, - { "12345672342342342342428","34592342234235424753211", -1 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - int my = Vint::jacobi(Vint(tbl[i].m), Vint(tbl[i].n)); - CYBOZU_TEST_EQUAL(my, tbl[i].ok); - } -} - -CYBOZU_TEST_AUTO(bench) -{ - Vint x, y, z; - x.setStr("0x2523648240000001ba344d80000000086121000000000013a700000000000013"); - y.setStr("0x1802938109810498104982094820498203942804928049284092424902424243"); - - int N = 100000; - CYBOZU_BENCH_C("add", N, Vint::add, z, x, y); - CYBOZU_BENCH_C("sub", N, Vint::sub, z, x, y); - CYBOZU_BENCH_C("mul", N, Vint::mul, z, x, y); - CYBOZU_BENCH_C("div", N, Vint::div, y, z, x); - - const struct { - const char *x; - const char *y; - } tbl[] = { - { - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d" - }, - { - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - }, - { - "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - }, - - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - x.setStr(tbl[i].x); - y.setStr(tbl[i].y); - CYBOZU_BENCH_C("fast div", N, Vint::div, z, x, y); -#ifndef DONT_USE_GMP_IN_TEST - { - mpz_class mx(tbl[i].x), my(tbl[i].y), mz; - CYBOZU_BENCH_C("gmp", N, mpz_div, mz.get_mpz_t(), mx.get_mpz_t(), my.get_mpz_t()); - } -#endif - } -} - -struct Seq { - const uint32_t *tbl; - size_t n; - size_t i, j; - Seq(const uint32_t *tbl, size_t n) : tbl(tbl), n(n), i(0), j(0) {} - bool next(uint64_t *v) - { - if (i == n) { - if (j == n - 1) return false; - i = 0; - j++; - } - *v = (uint64_t(tbl[j]) << 32) | tbl[i]; - i++; - return true; - } -}; - -#if MCL_SIZEOF_UNIT == 8 -CYBOZU_TEST_AUTO(divUnit) -{ - const uint32_t tbl[] = { - 0, 1, 3, - 0x7fffffff, - 0x80000000, - 0x80000001, - 0xffffffff, - }; - const size_t n = sizeof(tbl) / sizeof(tbl[0]); - Seq seq3(tbl, n); - uint64_t y; - while (seq3.next(&y)) { - if (y == 0) continue; - Seq seq2(tbl, n); - uint64_t r; - while (seq2.next(&r)) { - if (r >= y) break; - Seq seq1(tbl, n); - uint64_t q; - while (seq1.next(&q)) { - uint64_t x[2]; - x[0] = mcl::vint::mulUnit(&x[1], q, y); - mcl::vint::addu1(x, x, 2, r); - uint64_t Q, R; -//printf("q=0x%016llxull, r=0x%016llxull, y=0x%016llxull\n", (long long)q, (long long)r, (long long)y); - Q = mcl::vint::divUnit(&R, x[1], x[0], y); - CYBOZU_TEST_EQUAL(q, Q); - CYBOZU_TEST_EQUAL(r, R); - } - } - } -} - -void compareMod(const uint64_t *x, const uint64_t *p) -{ - uint64_t y1[4] = {}; - uint64_t y2[4] = {}; - mcl::vint::divNM((uint64_t*)0, 0, y1, x, 8, p, 4); - mcl::vint::mcl_fpDbl_mod_SECP256K1(y2, x, p); - CYBOZU_TEST_EQUAL_ARRAY(y1, y2, 4); -} -CYBOZU_TEST_AUTO(SECP256k1) -{ - const uint64_t F = uint64_t(-1); - const uint64_t p[4] = { uint64_t(0xfffffffefffffc2full), F, F, F }; - const uint64_t tbl[][8] = { - { 0, 0, 0, 0, 0, 0, 0, 0 }, - { F, F, F, F, F, F, F, F }, - { F, F, F, F, 1, 0, 0, 0 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - const uint64_t *x = tbl[i]; - compareMod(x, p); - } - cybozu::XorShift rg; - for (size_t i = 0; i < 100; i++) { - uint64_t x[8]; - for (int j = 0; j < 8; j++) { - x[j] = rg(); - } - compareMod(x, p); - } -} -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/test/window_method_test.cpp b/vendor/github.com/dexon-foundation/mcl/test/window_method_test.cpp deleted file mode 100644 index 1b0f702af..000000000 --- a/vendor/github.com/dexon-foundation/mcl/test/window_method_test.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include -#include -#include -#include -#include - -CYBOZU_TEST_AUTO(ArrayIterator) -{ - const uint32_t in[2] = { 0x12345678, 0xabcdef89 }; - const size_t bitSize = 64; - for (size_t w = 1; w <= 32; w++) { - const uint32_t mask = uint32_t((uint64_t(1) << w) - 1); - mpz_class x; - mcl::gmp::setArray(x, in, 2); - mcl::fp::ArrayIterator ai(in, bitSize, w); - size_t n = (bitSize + w - 1) / w; - for (size_t j = 0; j < n; j++) { - CYBOZU_TEST_ASSERT(ai.hasNext()); - uint32_t v = ai.getNext(); - CYBOZU_TEST_EQUAL(x & mask, v); - x >>= w; - } - CYBOZU_TEST_ASSERT(!ai.hasNext()); - } -} - -CYBOZU_TEST_AUTO(int) -{ - typedef mcl::FpT<> Fp; - typedef mcl::EcT Ec; - const struct mcl::EcParam& para = mcl::ecparam::secp192k1; - Fp::init(para.p); - Ec::init(para.a, para.b); - const Fp x(para.gx); - const Fp y(para.gy); - const Ec P(x, y); - - typedef mcl::fp::WindowMethod PW; - const size_t bitSize = 13; - Ec Q, R; - - for (size_t winSize = 10; winSize <= bitSize; winSize++) { - PW pw(P, bitSize, winSize); - for (int i = 0; i < (1 << bitSize); i++) { - pw.mul(Q, i); - Ec::mul(R, P, i); - CYBOZU_TEST_EQUAL(Q, R); - } - } - PW pw(P, para.bitSize, 10); - pw.mul(Q, -12345); - Ec::mul(R, P, -12345); - CYBOZU_TEST_EQUAL(Q, R); - mpz_class t(para.gx); - pw.mul(Q, t); - Ec::mul(R, P, t); - CYBOZU_TEST_EQUAL(Q, R); - t = -t; - pw.mul(Q, t); - Ec::mul(R, P, t); - CYBOZU_TEST_EQUAL(Q, R); - - pw.mul(Q, x); - Ec::mul(R, P, x); - CYBOZU_TEST_EQUAL(Q, R); - - pw.mul(Q, y); - Ec::mul(R, P, y); - CYBOZU_TEST_EQUAL(Q, R); -} diff --git a/vendor/github.com/ethereum/go-ethereum/common/big.go b/vendor/github.com/ethereum/go-ethereum/common/big.go new file mode 100644 index 000000000..65d4377bf --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/big.go @@ -0,0 +1,30 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import "math/big" + +// Common big integers often used +var ( + Big1 = big.NewInt(1) + Big2 = big.NewInt(2) + Big3 = big.NewInt(3) + Big0 = big.NewInt(0) + Big32 = big.NewInt(32) + Big256 = big.NewInt(256) + Big257 = big.NewInt(257) +) diff --git a/vendor/github.com/ethereum/go-ethereum/common/bytes.go b/vendor/github.com/ethereum/go-ethereum/common/bytes.go new file mode 100644 index 000000000..c82e61624 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/bytes.go @@ -0,0 +1,138 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package common contains various helper functions. +package common + +import "encoding/hex" + +// ToHex returns the hex representation of b, prefixed with '0x'. +// For empty slices, the return value is "0x0". +// +// Deprecated: use hexutil.Encode instead. +func ToHex(b []byte) string { + hex := Bytes2Hex(b) + if len(hex) == 0 { + hex = "0" + } + return "0x" + hex +} + +// ToHexArray creates a array of hex-string based on []byte +func ToHexArray(b [][]byte) []string { + r := make([]string, len(b)) + for i := range b { + r[i] = ToHex(b[i]) + } + return r +} + +// FromHex returns the bytes represented by the hexadecimal string s. +// s may be prefixed with "0x". +func FromHex(s string) []byte { + if len(s) > 1 { + if s[0:2] == "0x" || s[0:2] == "0X" { + s = s[2:] + } + } + if len(s)%2 == 1 { + s = "0" + s + } + return Hex2Bytes(s) +} + +// CopyBytes returns an exact copy of the provided bytes. +func CopyBytes(b []byte) (copiedBytes []byte) { + if b == nil { + return nil + } + copiedBytes = make([]byte, len(b)) + copy(copiedBytes, b) + + return +} + +// hasHexPrefix validates str begins with '0x' or '0X'. +func hasHexPrefix(str string) bool { + return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') +} + +// isHexCharacter returns bool of c being a valid hexadecimal. +func isHexCharacter(c byte) bool { + return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') +} + +// isHex validates whether each byte is valid hexadecimal string. +func isHex(str string) bool { + if len(str)%2 != 0 { + return false + } + for _, c := range []byte(str) { + if !isHexCharacter(c) { + return false + } + } + return true +} + +// Bytes2Hex returns the hexadecimal encoding of d. +func Bytes2Hex(d []byte) string { + return hex.EncodeToString(d) +} + +// Hex2Bytes returns the bytes represented by the hexadecimal string str. +func Hex2Bytes(str string) []byte { + h, _ := hex.DecodeString(str) + return h +} + +// Hex2BytesFixed returns bytes of a specified fixed length flen. +func Hex2BytesFixed(str string, flen int) []byte { + h, _ := hex.DecodeString(str) + if len(h) == flen { + return h + } + if len(h) > flen { + return h[len(h)-flen:] + } + hh := make([]byte, flen) + copy(hh[flen-len(h):flen], h) + return hh +} + +// RightPadBytes zero-pads slice to the right up to length l. +func RightPadBytes(slice []byte, l int) []byte { + if l <= len(slice) { + return slice + } + + padded := make([]byte, l) + copy(padded, slice) + + return padded +} + +// LeftPadBytes zero-pads slice to the left up to length l. +func LeftPadBytes(slice []byte, l int) []byte { + if l <= len(slice) { + return slice + } + + padded := make([]byte, l) + copy(padded[l-len(slice):], slice) + + return padded +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/debug.go b/vendor/github.com/ethereum/go-ethereum/common/debug.go new file mode 100644 index 000000000..61acd8ce7 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/debug.go @@ -0,0 +1,52 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "fmt" + "os" + "runtime" + "runtime/debug" + "strings" +) + +// Report gives off a warning requesting the user to submit an issue to the github tracker. +func Report(extra ...interface{}) { + fmt.Fprintln(os.Stderr, "You've encountered a sought after, hard to reproduce bug. Please report this to the developers <3 https://github.com/ethereum/go-ethereum/issues") + fmt.Fprintln(os.Stderr, extra...) + + _, file, line, _ := runtime.Caller(1) + fmt.Fprintf(os.Stderr, "%v:%v\n", file, line) + + debug.PrintStack() + + fmt.Fprintln(os.Stderr, "#### BUG! PLEASE REPORT ####") +} + +// PrintDepricationWarning prinst the given string in a box using fmt.Println. +func PrintDepricationWarning(str string) { + line := strings.Repeat("#", len(str)+4) + emptyLine := strings.Repeat(" ", len(str)) + fmt.Printf(` +%s +# %s # +# %s # +# %s # +%s + +`, line, emptyLine, str, emptyLine, line) +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/format.go b/vendor/github.com/ethereum/go-ethereum/common/format.go new file mode 100644 index 000000000..6fc21af71 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/format.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "fmt" + "regexp" + "strings" + "time" +) + +// PrettyDuration is a pretty printed version of a time.Duration value that cuts +// the unnecessary precision off from the formatted textual representation. +type PrettyDuration time.Duration + +var prettyDurationRe = regexp.MustCompile(`\.[0-9]+`) + +// String implements the Stringer interface, allowing pretty printing of duration +// values rounded to three decimals. +func (d PrettyDuration) String() string { + label := fmt.Sprintf("%v", time.Duration(d)) + if match := prettyDurationRe.FindString(label); len(match) > 4 { + label = strings.Replace(label, match, match[:4], 1) + } + return label +} + +// PrettyAge is a pretty printed version of a time.Duration value that rounds +// the values up to a single most significant unit, days/weeks/years included. +type PrettyAge time.Time + +// ageUnits is a list of units the age pretty printing uses. +var ageUnits = []struct { + Size time.Duration + Symbol string +}{ + {12 * 30 * 24 * time.Hour, "y"}, + {30 * 24 * time.Hour, "mo"}, + {7 * 24 * time.Hour, "w"}, + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, + {time.Second, "s"}, +} + +// String implements the Stringer interface, allowing pretty printing of duration +// values rounded to the most significant time unit. +func (t PrettyAge) String() string { + // Calculate the time difference and handle the 0 cornercase + diff := time.Since(time.Time(t)) + if diff < time.Second { + return "0" + } + // Accumulate a precision of 3 components before returning + result, prec := "", 0 + + for _, unit := range ageUnits { + if diff > unit.Size { + result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol) + diff %= unit.Size + + if prec += 1; prec >= 3 { + break + } + } + } + return result +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/hexutil/hexutil.go b/vendor/github.com/ethereum/go-ethereum/common/hexutil/hexutil.go new file mode 100644 index 000000000..46223a281 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/hexutil/hexutil.go @@ -0,0 +1,240 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package hexutil implements hex encoding with 0x prefix. +This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads. + +Encoding Rules + +All hex data must have prefix "0x". + +For byte slices, the hex data must be of even length. An empty byte slice +encodes as "0x". + +Integers are encoded using the least amount of digits (no leading zero digits). Their +encoding may be of uneven length. The number zero encodes as "0x0". +*/ +package hexutil + +import ( + "encoding/hex" + "fmt" + "math/big" + "strconv" +) + +const uintBits = 32 << (uint64(^uint(0)) >> 63) + +// Errors +var ( + ErrEmptyString = &decError{"empty hex string"} + ErrSyntax = &decError{"invalid hex string"} + ErrMissingPrefix = &decError{"hex string without 0x prefix"} + ErrOddLength = &decError{"hex string of odd length"} + ErrEmptyNumber = &decError{"hex string \"0x\""} + ErrLeadingZero = &decError{"hex number with leading zero digits"} + ErrUint64Range = &decError{"hex number > 64 bits"} + ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)} + ErrBig256Range = &decError{"hex number > 256 bits"} +) + +type decError struct{ msg string } + +func (err decError) Error() string { return err.msg } + +// Decode decodes a hex string with 0x prefix. +func Decode(input string) ([]byte, error) { + if len(input) == 0 { + return nil, ErrEmptyString + } + if !has0xPrefix(input) { + return nil, ErrMissingPrefix + } + b, err := hex.DecodeString(input[2:]) + if err != nil { + err = mapError(err) + } + return b, err +} + +// MustDecode decodes a hex string with 0x prefix. It panics for invalid input. +func MustDecode(input string) []byte { + dec, err := Decode(input) + if err != nil { + panic(err) + } + return dec +} + +// Encode encodes b as a hex string with 0x prefix. +func Encode(b []byte) string { + enc := make([]byte, len(b)*2+2) + copy(enc, "0x") + hex.Encode(enc[2:], b) + return string(enc) +} + +// DecodeUint64 decodes a hex string with 0x prefix as a quantity. +func DecodeUint64(input string) (uint64, error) { + raw, err := checkNumber(input) + if err != nil { + return 0, err + } + dec, err := strconv.ParseUint(raw, 16, 64) + if err != nil { + err = mapError(err) + } + return dec, err +} + +// MustDecodeUint64 decodes a hex string with 0x prefix as a quantity. +// It panics for invalid input. +func MustDecodeUint64(input string) uint64 { + dec, err := DecodeUint64(input) + if err != nil { + panic(err) + } + return dec +} + +// EncodeUint64 encodes i as a hex string with 0x prefix. +func EncodeUint64(i uint64) string { + enc := make([]byte, 2, 10) + copy(enc, "0x") + return string(strconv.AppendUint(enc, i, 16)) +} + +var bigWordNibbles int + +func init() { + // This is a weird way to compute the number of nibbles required for big.Word. + // The usual way would be to use constant arithmetic but go vet can't handle that. + b, _ := new(big.Int).SetString("FFFFFFFFFF", 16) + switch len(b.Bits()) { + case 1: + bigWordNibbles = 16 + case 2: + bigWordNibbles = 8 + default: + panic("weird big.Word size") + } +} + +// DecodeBig decodes a hex string with 0x prefix as a quantity. +// Numbers larger than 256 bits are not accepted. +func DecodeBig(input string) (*big.Int, error) { + raw, err := checkNumber(input) + if err != nil { + return nil, err + } + if len(raw) > 64 { + return nil, ErrBig256Range + } + words := make([]big.Word, len(raw)/bigWordNibbles+1) + end := len(raw) + for i := range words { + start := end - bigWordNibbles + if start < 0 { + start = 0 + } + for ri := start; ri < end; ri++ { + nib := decodeNibble(raw[ri]) + if nib == badNibble { + return nil, ErrSyntax + } + words[i] *= 16 + words[i] += big.Word(nib) + } + end = start + } + dec := new(big.Int).SetBits(words) + return dec, nil +} + +// MustDecodeBig decodes a hex string with 0x prefix as a quantity. +// It panics for invalid input. +func MustDecodeBig(input string) *big.Int { + dec, err := DecodeBig(input) + if err != nil { + panic(err) + } + return dec +} + +// EncodeBig encodes bigint as a hex string with 0x prefix. +// The sign of the integer is ignored. +func EncodeBig(bigint *big.Int) string { + nbits := bigint.BitLen() + if nbits == 0 { + return "0x0" + } + return fmt.Sprintf("%#x", bigint) +} + +func has0xPrefix(input string) bool { + return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X') +} + +func checkNumber(input string) (raw string, err error) { + if len(input) == 0 { + return "", ErrEmptyString + } + if !has0xPrefix(input) { + return "", ErrMissingPrefix + } + input = input[2:] + if len(input) == 0 { + return "", ErrEmptyNumber + } + if len(input) > 1 && input[0] == '0' { + return "", ErrLeadingZero + } + return input, nil +} + +const badNibble = ^uint64(0) + +func decodeNibble(in byte) uint64 { + switch { + case in >= '0' && in <= '9': + return uint64(in - '0') + case in >= 'A' && in <= 'F': + return uint64(in - 'A' + 10) + case in >= 'a' && in <= 'f': + return uint64(in - 'a' + 10) + default: + return badNibble + } +} + +func mapError(err error) error { + if err, ok := err.(*strconv.NumError); ok { + switch err.Err { + case strconv.ErrRange: + return ErrUint64Range + case strconv.ErrSyntax: + return ErrSyntax + } + } + if _, ok := err.(hex.InvalidByteError); ok { + return ErrSyntax + } + if err == hex.ErrLength { + return ErrOddLength + } + return err +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/hexutil/json.go b/vendor/github.com/ethereum/go-ethereum/common/hexutil/json.go new file mode 100644 index 000000000..777b08eca --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/hexutil/json.go @@ -0,0 +1,376 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package hexutil + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "reflect" + "strconv" +) + +var ( + bytesT = reflect.TypeOf(Bytes(nil)) + bigT = reflect.TypeOf((*Big)(nil)) + uintT = reflect.TypeOf(Uint(0)) + uint64T = reflect.TypeOf(Uint64(0)) +) + +// Bytes marshals/unmarshals as a JSON string with 0x prefix. +// The empty slice marshals as "0x". +type Bytes []byte + +// MarshalText implements encoding.TextMarshaler +func (b Bytes) MarshalText() ([]byte, error) { + result := make([]byte, len(b)*2+2) + copy(result, `0x`) + hex.Encode(result[2:], b) + return result, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Bytes) UnmarshalJSON(input []byte) error { + if !isString(input) { + return errNonString(bytesT) + } + return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (b *Bytes) UnmarshalText(input []byte) error { + raw, err := checkText(input, true) + if err != nil { + return err + } + dec := make([]byte, len(raw)/2) + if _, err = hex.Decode(dec, raw); err != nil { + err = mapError(err) + } else { + *b = dec + } + return err +} + +// String returns the hex encoding of b. +func (b Bytes) String() string { + return Encode(b) +} + +// ImplementsGraphQLType returns true if Bytes implements the specified GraphQL type. +func (b Bytes) ImplementsGraphQLType(name string) bool { return name == "Bytes" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (b *Bytes) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + data, err := Decode(input) + if err != nil { + return err + } + *b = data + default: + err = fmt.Errorf("Unexpected type for Bytes: %v", input) + } + return err +} + +// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out +// determines the required input length. This function is commonly used to implement the +// UnmarshalJSON method for fixed-size types. +func UnmarshalFixedJSON(typ reflect.Type, input, out []byte) error { + if !isString(input) { + return errNonString(typ) + } + return wrapTypeError(UnmarshalFixedText(typ.String(), input[1:len(input)-1], out), typ) +} + +// UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out +// determines the required input length. This function is commonly used to implement the +// UnmarshalText method for fixed-size types. +func UnmarshalFixedText(typname string, input, out []byte) error { + raw, err := checkText(input, true) + if err != nil { + return err + } + if len(raw)/2 != len(out) { + return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname) + } + // Pre-verify syntax before modifying out. + for _, b := range raw { + if decodeNibble(b) == badNibble { + return ErrSyntax + } + } + hex.Decode(out, raw) + return nil +} + +// UnmarshalFixedUnprefixedText decodes the input as a string with optional 0x prefix. The +// length of out determines the required input length. This function is commonly used to +// implement the UnmarshalText method for fixed-size types. +func UnmarshalFixedUnprefixedText(typname string, input, out []byte) error { + raw, err := checkText(input, false) + if err != nil { + return err + } + if len(raw)/2 != len(out) { + return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname) + } + // Pre-verify syntax before modifying out. + for _, b := range raw { + if decodeNibble(b) == badNibble { + return ErrSyntax + } + } + hex.Decode(out, raw) + return nil +} + +// Big marshals/unmarshals as a JSON string with 0x prefix. +// The zero value marshals as "0x0". +// +// Negative integers are not supported at this time. Attempting to marshal them will +// return an error. Values larger than 256bits are rejected by Unmarshal but will be +// marshaled without error. +type Big big.Int + +// MarshalText implements encoding.TextMarshaler +func (b Big) MarshalText() ([]byte, error) { + return []byte(EncodeBig((*big.Int)(&b))), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Big) UnmarshalJSON(input []byte) error { + if !isString(input) { + return errNonString(bigT) + } + return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bigT) +} + +// UnmarshalText implements encoding.TextUnmarshaler +func (b *Big) UnmarshalText(input []byte) error { + raw, err := checkNumberText(input) + if err != nil { + return err + } + if len(raw) > 64 { + return ErrBig256Range + } + words := make([]big.Word, len(raw)/bigWordNibbles+1) + end := len(raw) + for i := range words { + start := end - bigWordNibbles + if start < 0 { + start = 0 + } + for ri := start; ri < end; ri++ { + nib := decodeNibble(raw[ri]) + if nib == badNibble { + return ErrSyntax + } + words[i] *= 16 + words[i] += big.Word(nib) + } + end = start + } + var dec big.Int + dec.SetBits(words) + *b = (Big)(dec) + return nil +} + +// ToInt converts b to a big.Int. +func (b *Big) ToInt() *big.Int { + return (*big.Int)(b) +} + +// String returns the hex encoding of b. +func (b *Big) String() string { + return EncodeBig(b.ToInt()) +} + +// ImplementsGraphQLType returns true if Big implements the provided GraphQL type. +func (b Big) ImplementsGraphQLType(name string) bool { return name == "BigInt" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (b *Big) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + return b.UnmarshalText([]byte(input)) + case int32: + var num big.Int + num.SetInt64(int64(input)) + *b = Big(num) + default: + err = fmt.Errorf("Unexpected type for BigInt: %v", input) + } + return err +} + +// Uint64 marshals/unmarshals as a JSON string with 0x prefix. +// The zero value marshals as "0x0". +type Uint64 uint64 + +// MarshalText implements encoding.TextMarshaler. +func (b Uint64) MarshalText() ([]byte, error) { + buf := make([]byte, 2, 10) + copy(buf, `0x`) + buf = strconv.AppendUint(buf, uint64(b), 16) + return buf, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Uint64) UnmarshalJSON(input []byte) error { + if !isString(input) { + return errNonString(uint64T) + } + return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uint64T) +} + +// UnmarshalText implements encoding.TextUnmarshaler +func (b *Uint64) UnmarshalText(input []byte) error { + raw, err := checkNumberText(input) + if err != nil { + return err + } + if len(raw) > 16 { + return ErrUint64Range + } + var dec uint64 + for _, byte := range raw { + nib := decodeNibble(byte) + if nib == badNibble { + return ErrSyntax + } + dec *= 16 + dec += nib + } + *b = Uint64(dec) + return nil +} + +// String returns the hex encoding of b. +func (b Uint64) String() string { + return EncodeUint64(uint64(b)) +} + +// ImplementsGraphQLType returns true if Uint64 implements the provided GraphQL type. +func (b Uint64) ImplementsGraphQLType(name string) bool { return name == "Long" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (b *Uint64) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + return b.UnmarshalText([]byte(input)) + case int32: + *b = Uint64(input) + default: + err = fmt.Errorf("Unexpected type for Long: %v", input) + } + return err +} + +// Uint marshals/unmarshals as a JSON string with 0x prefix. +// The zero value marshals as "0x0". +type Uint uint + +// MarshalText implements encoding.TextMarshaler. +func (b Uint) MarshalText() ([]byte, error) { + return Uint64(b).MarshalText() +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Uint) UnmarshalJSON(input []byte) error { + if !isString(input) { + return errNonString(uintT) + } + return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uintT) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (b *Uint) UnmarshalText(input []byte) error { + var u64 Uint64 + err := u64.UnmarshalText(input) + if u64 > Uint64(^uint(0)) || err == ErrUint64Range { + return ErrUintRange + } else if err != nil { + return err + } + *b = Uint(u64) + return nil +} + +// String returns the hex encoding of b. +func (b Uint) String() string { + return EncodeUint64(uint64(b)) +} + +func isString(input []byte) bool { + return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' +} + +func bytesHave0xPrefix(input []byte) bool { + return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X') +} + +func checkText(input []byte, wantPrefix bool) ([]byte, error) { + if len(input) == 0 { + return nil, nil // empty strings are allowed + } + if bytesHave0xPrefix(input) { + input = input[2:] + } else if wantPrefix { + return nil, ErrMissingPrefix + } + if len(input)%2 != 0 { + return nil, ErrOddLength + } + return input, nil +} + +func checkNumberText(input []byte) (raw []byte, err error) { + if len(input) == 0 { + return nil, nil // empty strings are allowed + } + if !bytesHave0xPrefix(input) { + return nil, ErrMissingPrefix + } + input = input[2:] + if len(input) == 0 { + return nil, ErrEmptyNumber + } + if len(input) > 1 && input[0] == '0' { + return nil, ErrLeadingZero + } + return input, nil +} + +func wrapTypeError(err error, typ reflect.Type) error { + if _, ok := err.(*decError); ok { + return &json.UnmarshalTypeError{Value: err.Error(), Type: typ} + } + return err +} + +func errNonString(typ reflect.Type) error { + return &json.UnmarshalTypeError{Value: "non-string", Type: typ} +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/math/big.go b/vendor/github.com/ethereum/go-ethereum/common/math/big.go new file mode 100644 index 000000000..d31c59af1 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/math/big.go @@ -0,0 +1,219 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package math provides integer math utilities. +package math + +import ( + "fmt" + "math/big" +) + +// Various big integer limit values. +var ( + tt255 = BigPow(2, 255) + tt256 = BigPow(2, 256) + tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1)) + tt63 = BigPow(2, 63) + MaxBig256 = new(big.Int).Set(tt256m1) + MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1)) +) + +const ( + // number of bits in a big.Word + wordBits = 32 << (uint64(^big.Word(0)) >> 63) + // number of bytes in a big.Word + wordBytes = wordBits / 8 +) + +// HexOrDecimal256 marshals big.Int as hex or decimal. +type HexOrDecimal256 big.Int + +// NewHexOrDecimal256 creates a new HexOrDecimal256 +func NewHexOrDecimal256(x int64) *HexOrDecimal256 { + b := big.NewInt(x) + h := HexOrDecimal256(*b) + return &h +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (i *HexOrDecimal256) UnmarshalText(input []byte) error { + bigint, ok := ParseBig256(string(input)) + if !ok { + return fmt.Errorf("invalid hex or decimal integer %q", input) + } + *i = HexOrDecimal256(*bigint) + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (i *HexOrDecimal256) MarshalText() ([]byte, error) { + if i == nil { + return []byte("0x0"), nil + } + return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil +} + +// ParseBig256 parses s as a 256 bit integer in decimal or hexadecimal syntax. +// Leading zeros are accepted. The empty string parses as zero. +func ParseBig256(s string) (*big.Int, bool) { + if s == "" { + return new(big.Int), true + } + var bigint *big.Int + var ok bool + if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") { + bigint, ok = new(big.Int).SetString(s[2:], 16) + } else { + bigint, ok = new(big.Int).SetString(s, 10) + } + if ok && bigint.BitLen() > 256 { + bigint, ok = nil, false + } + return bigint, ok +} + +// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid. +func MustParseBig256(s string) *big.Int { + v, ok := ParseBig256(s) + if !ok { + panic("invalid 256 bit integer: " + s) + } + return v +} + +// BigPow returns a ** b as a big integer. +func BigPow(a, b int64) *big.Int { + r := big.NewInt(a) + return r.Exp(r, big.NewInt(b), nil) +} + +// BigMax returns the larger of x or y. +func BigMax(x, y *big.Int) *big.Int { + if x.Cmp(y) < 0 { + return y + } + return x +} + +// BigMin returns the smaller of x or y. +func BigMin(x, y *big.Int) *big.Int { + if x.Cmp(y) > 0 { + return y + } + return x +} + +// FirstBitSet returns the index of the first 1 bit in v, counting from LSB. +func FirstBitSet(v *big.Int) int { + for i := 0; i < v.BitLen(); i++ { + if v.Bit(i) > 0 { + return i + } + } + return v.BitLen() +} + +// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length +// of the slice is at least n bytes. +func PaddedBigBytes(bigint *big.Int, n int) []byte { + if bigint.BitLen()/8 >= n { + return bigint.Bytes() + } + ret := make([]byte, n) + ReadBits(bigint, ret) + return ret +} + +// bigEndianByteAt returns the byte at position n, +// in Big-Endian encoding +// So n==0 returns the least significant byte +func bigEndianByteAt(bigint *big.Int, n int) byte { + words := bigint.Bits() + // Check word-bucket the byte will reside in + i := n / wordBytes + if i >= len(words) { + return byte(0) + } + word := words[i] + // Offset of the byte + shift := 8 * uint(n%wordBytes) + + return byte(word >> shift) +} + +// Byte returns the byte at position n, +// with the supplied padlength in Little-Endian encoding. +// n==0 returns the MSB +// Example: bigint '5', padlength 32, n=31 => 5 +func Byte(bigint *big.Int, padlength, n int) byte { + if n >= padlength { + return byte(0) + } + return bigEndianByteAt(bigint, padlength-1-n) +} + +// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure +// that buf has enough space. If buf is too short the result will be incomplete. +func ReadBits(bigint *big.Int, buf []byte) { + i := len(buf) + for _, d := range bigint.Bits() { + for j := 0; j < wordBytes && i > 0; j++ { + i-- + buf[i] = byte(d) + d >>= 8 + } + } +} + +// U256 encodes as a 256 bit two's complement number. This operation is destructive. +func U256(x *big.Int) *big.Int { + return x.And(x, tt256m1) +} + +// S256 interprets x as a two's complement number. +// x must not exceed 256 bits (the result is undefined if it does) and is not modified. +// +// S256(0) = 0 +// S256(1) = 1 +// S256(2**255) = -2**255 +// S256(2**256-1) = -1 +func S256(x *big.Int) *big.Int { + if x.Cmp(tt255) < 0 { + return x + } + return new(big.Int).Sub(x, tt256) +} + +// Exp implements exponentiation by squaring. +// Exp returns a newly-allocated big integer and does not change +// base or exponent. The result is truncated to 256 bits. +// +// Courtesy @karalabe and @chfast +func Exp(base, exponent *big.Int) *big.Int { + result := big.NewInt(1) + + for _, word := range exponent.Bits() { + for i := 0; i < wordBits; i++ { + if word&1 == 1 { + U256(result.Mul(result, base)) + } + U256(base.Mul(base, base)) + word >>= 1 + } + } + return result +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/math/integer.go b/vendor/github.com/ethereum/go-ethereum/common/math/integer.go new file mode 100644 index 000000000..93b1d036d --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/math/integer.go @@ -0,0 +1,99 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package math + +import ( + "fmt" + "strconv" +) + +// Integer limit values. +const ( + MaxInt8 = 1<<7 - 1 + MinInt8 = -1 << 7 + MaxInt16 = 1<<15 - 1 + MinInt16 = -1 << 15 + MaxInt32 = 1<<31 - 1 + MinInt32 = -1 << 31 + MaxInt64 = 1<<63 - 1 + MinInt64 = -1 << 63 + MaxUint8 = 1<<8 - 1 + MaxUint16 = 1<<16 - 1 + MaxUint32 = 1<<32 - 1 + MaxUint64 = 1<<64 - 1 +) + +// HexOrDecimal64 marshals uint64 as hex or decimal. +type HexOrDecimal64 uint64 + +// UnmarshalText implements encoding.TextUnmarshaler. +func (i *HexOrDecimal64) UnmarshalText(input []byte) error { + int, ok := ParseUint64(string(input)) + if !ok { + return fmt.Errorf("invalid hex or decimal integer %q", input) + } + *i = HexOrDecimal64(int) + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (i HexOrDecimal64) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%#x", uint64(i))), nil +} + +// ParseUint64 parses s as an integer in decimal or hexadecimal syntax. +// Leading zeros are accepted. The empty string parses as zero. +func ParseUint64(s string) (uint64, bool) { + if s == "" { + return 0, true + } + if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") { + v, err := strconv.ParseUint(s[2:], 16, 64) + return v, err == nil + } + v, err := strconv.ParseUint(s, 10, 64) + return v, err == nil +} + +// MustParseUint64 parses s as an integer and panics if the string is invalid. +func MustParseUint64(s string) uint64 { + v, ok := ParseUint64(s) + if !ok { + panic("invalid unsigned 64 bit integer: " + s) + } + return v +} + +// NOTE: The following methods need to be optimised using either bit checking or asm + +// SafeSub returns subtraction result and whether overflow occurred. +func SafeSub(x, y uint64) (uint64, bool) { + return x - y, x < y +} + +// SafeAdd returns the result and whether overflow occurred. +func SafeAdd(x, y uint64) (uint64, bool) { + return x + y, y > MaxUint64-x +} + +// SafeMul returns multiplication result and whether overflow occurred. +func SafeMul(x, y uint64) (uint64, bool) { + if x == 0 || y == 0 { + return 0, false + } + return x * y, y > MaxUint64/x +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/path.go b/vendor/github.com/ethereum/go-ethereum/common/path.go new file mode 100644 index 000000000..69820cfe5 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/path.go @@ -0,0 +1,49 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// MakeName creates a node name that follows the ethereum convention +// for such names. It adds the operation system name and Go runtime version +// the name. +func MakeName(name, version string) string { + return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version()) +} + +// FileExist checks if a file exists at filePath. +func FileExist(filePath string) bool { + _, err := os.Stat(filePath) + if err != nil && os.IsNotExist(err) { + return false + } + + return true +} + +// AbsolutePath returns datadir + filename, or filename if it is absolute. +func AbsolutePath(datadir string, filename string) string { + if filepath.IsAbs(filename) { + return filename + } + return filepath.Join(datadir, filename) +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/size.go b/vendor/github.com/ethereum/go-ethereum/common/size.go new file mode 100644 index 000000000..097b6304a --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/size.go @@ -0,0 +1,56 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "fmt" +) + +// StorageSize is a wrapper around a float value that supports user friendly +// formatting. +type StorageSize float64 + +// String implements the stringer interface. +func (s StorageSize) String() string { + if s > 1099511627776 { + return fmt.Sprintf("%.2f TiB", s/1099511627776) + } else if s > 1073741824 { + return fmt.Sprintf("%.2f GiB", s/1073741824) + } else if s > 1048576 { + return fmt.Sprintf("%.2f MiB", s/1048576) + } else if s > 1024 { + return fmt.Sprintf("%.2f KiB", s/1024) + } else { + return fmt.Sprintf("%.2f B", s) + } +} + +// TerminalString implements log.TerminalStringer, formatting a string for console +// output during logging. +func (s StorageSize) TerminalString() string { + if s > 1099511627776 { + return fmt.Sprintf("%.2fTiB", s/1099511627776) + } else if s > 1073741824 { + return fmt.Sprintf("%.2fGiB", s/1073741824) + } else if s > 1048576 { + return fmt.Sprintf("%.2fMiB", s/1048576) + } else if s > 1024 { + return fmt.Sprintf("%.2fKiB", s/1024) + } else { + return fmt.Sprintf("%.2fB", s) + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/test_utils.go b/vendor/github.com/ethereum/go-ethereum/common/test_utils.go new file mode 100644 index 000000000..a848642f7 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/test_utils.go @@ -0,0 +1,53 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "encoding/json" + "fmt" + "io/ioutil" +) + +// LoadJSON reads the given file and unmarshals its content. +func LoadJSON(file string, val interface{}) error { + content, err := ioutil.ReadFile(file) + if err != nil { + return err + } + if err := json.Unmarshal(content, val); err != nil { + if syntaxerr, ok := err.(*json.SyntaxError); ok { + line := findLine(content, syntaxerr.Offset) + return fmt.Errorf("JSON syntax error at %v:%v: %v", file, line, err) + } + return fmt.Errorf("JSON unmarshal error in %v: %v", file, err) + } + return nil +} + +// findLine returns the line number for the given offset into data. +func findLine(data []byte, offset int64) (line int) { + line = 1 + for i, r := range string(data) { + if int64(i) >= offset { + return + } + if r == '\n' { + line++ + } + } + return +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/types.go b/vendor/github.com/ethereum/go-ethereum/common/types.go new file mode 100644 index 000000000..98c83edd4 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/common/types.go @@ -0,0 +1,369 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package common + +import ( + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "math/rand" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/common/hexutil" + "golang.org/x/crypto/sha3" +) + +// Lengths of hashes and addresses in bytes. +const ( + // HashLength is the expected length of the hash + HashLength = 32 + // AddressLength is the expected length of the address + AddressLength = 20 +) + +var ( + hashT = reflect.TypeOf(Hash{}) + addressT = reflect.TypeOf(Address{}) +) + +// Hash represents the 32 byte Keccak256 hash of arbitrary data. +type Hash [HashLength]byte + +// BytesToHash sets b to hash. +// If b is larger than len(h), b will be cropped from the left. +func BytesToHash(b []byte) Hash { + var h Hash + h.SetBytes(b) + return h +} + +// BigToHash sets byte representation of b to hash. +// If b is larger than len(h), b will be cropped from the left. +func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } + +// HexToHash sets byte representation of s to hash. +// If b is larger than len(h), b will be cropped from the left. +func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } + +// Bytes gets the byte representation of the underlying hash. +func (h Hash) Bytes() []byte { return h[:] } + +// Big converts a hash to a big integer. +func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) } + +// Hex converts a hash to a hex string. +func (h Hash) Hex() string { return hexutil.Encode(h[:]) } + +// TerminalString implements log.TerminalStringer, formatting a string for console +// output during logging. +func (h Hash) TerminalString() string { + return fmt.Sprintf("%x…%x", h[:3], h[29:]) +} + +// String implements the stringer interface and is used also by the logger when +// doing full logging into a file. +func (h Hash) String() string { + return h.Hex() +} + +// Format implements fmt.Formatter, forcing the byte slice to be formatted as is, +// without going through the stringer interface used for logging. +func (h Hash) Format(s fmt.State, c rune) { + fmt.Fprintf(s, "%"+string(c), h[:]) +} + +// UnmarshalText parses a hash in hex syntax. +func (h *Hash) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("Hash", input, h[:]) +} + +// UnmarshalJSON parses a hash in hex syntax. +func (h *Hash) UnmarshalJSON(input []byte) error { + return hexutil.UnmarshalFixedJSON(hashT, input, h[:]) +} + +// MarshalText returns the hex representation of h. +func (h Hash) MarshalText() ([]byte, error) { + return hexutil.Bytes(h[:]).MarshalText() +} + +// SetBytes sets the hash to the value of b. +// If b is larger than len(h), b will be cropped from the left. +func (h *Hash) SetBytes(b []byte) { + if len(b) > len(h) { + b = b[len(b)-HashLength:] + } + + copy(h[HashLength-len(b):], b) +} + +// Generate implements testing/quick.Generator. +func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value { + m := rand.Intn(len(h)) + for i := len(h) - 1; i > m; i-- { + h[i] = byte(rand.Uint32()) + } + return reflect.ValueOf(h) +} + +// Scan implements Scanner for database/sql. +func (h *Hash) Scan(src interface{}) error { + srcB, ok := src.([]byte) + if !ok { + return fmt.Errorf("can't scan %T into Hash", src) + } + if len(srcB) != HashLength { + return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength) + } + copy(h[:], srcB) + return nil +} + +// Value implements valuer for database/sql. +func (h Hash) Value() (driver.Value, error) { + return h[:], nil +} + +// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type. +func (_ Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (h *Hash) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + *h = HexToHash(input) + default: + err = fmt.Errorf("Unexpected type for Bytes32: %v", input) + } + return err +} + +// UnprefixedHash allows marshaling a Hash without 0x prefix. +type UnprefixedHash Hash + +// UnmarshalText decodes the hash from hex. The 0x prefix is optional. +func (h *UnprefixedHash) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedUnprefixedText("UnprefixedHash", input, h[:]) +} + +// MarshalText encodes the hash as hex. +func (h UnprefixedHash) MarshalText() ([]byte, error) { + return []byte(hex.EncodeToString(h[:])), nil +} + +/////////// Address + +// Address represents the 20 byte address of an Ethereum account. +type Address [AddressLength]byte + +// BytesToAddress returns Address with value b. +// If b is larger than len(h), b will be cropped from the left. +func BytesToAddress(b []byte) Address { + var a Address + a.SetBytes(b) + return a +} + +// BigToAddress returns Address with byte values of b. +// If b is larger than len(h), b will be cropped from the left. +func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) } + +// HexToAddress returns Address with byte values of s. +// If s is larger than len(h), s will be cropped from the left. +func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) } + +// IsHexAddress verifies whether a string can represent a valid hex-encoded +// Ethereum address or not. +func IsHexAddress(s string) bool { + if hasHexPrefix(s) { + s = s[2:] + } + return len(s) == 2*AddressLength && isHex(s) +} + +// Bytes gets the string representation of the underlying address. +func (a Address) Bytes() []byte { return a[:] } + +// Hash converts an address to a hash by left-padding it with zeros. +func (a Address) Hash() Hash { return BytesToHash(a[:]) } + +// Hex returns an EIP55-compliant hex string representation of the address. +func (a Address) Hex() string { + unchecksummed := hex.EncodeToString(a[:]) + sha := sha3.NewLegacyKeccak256() + sha.Write([]byte(unchecksummed)) + hash := sha.Sum(nil) + + result := []byte(unchecksummed) + for i := 0; i < len(result); i++ { + hashByte := hash[i/2] + if i%2 == 0 { + hashByte = hashByte >> 4 + } else { + hashByte &= 0xf + } + if result[i] > '9' && hashByte > 7 { + result[i] -= 32 + } + } + return "0x" + string(result) +} + +// String implements fmt.Stringer. +func (a Address) String() string { + return a.Hex() +} + +// Format implements fmt.Formatter, forcing the byte slice to be formatted as is, +// without going through the stringer interface used for logging. +func (a Address) Format(s fmt.State, c rune) { + fmt.Fprintf(s, "%"+string(c), a[:]) +} + +// SetBytes sets the address to the value of b. +// If b is larger than len(a) it will panic. +func (a *Address) SetBytes(b []byte) { + if len(b) > len(a) { + b = b[len(b)-AddressLength:] + } + copy(a[AddressLength-len(b):], b) +} + +// MarshalText returns the hex representation of a. +func (a Address) MarshalText() ([]byte, error) { + return hexutil.Bytes(a[:]).MarshalText() +} + +// UnmarshalText parses a hash in hex syntax. +func (a *Address) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("Address", input, a[:]) +} + +// UnmarshalJSON parses a hash in hex syntax. +func (a *Address) UnmarshalJSON(input []byte) error { + return hexutil.UnmarshalFixedJSON(addressT, input, a[:]) +} + +// Scan implements Scanner for database/sql. +func (a *Address) Scan(src interface{}) error { + srcB, ok := src.([]byte) + if !ok { + return fmt.Errorf("can't scan %T into Address", src) + } + if len(srcB) != AddressLength { + return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength) + } + copy(a[:], srcB) + return nil +} + +// Value implements valuer for database/sql. +func (a Address) Value() (driver.Value, error) { + return a[:], nil +} + +// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type. +func (a Address) ImplementsGraphQLType(name string) bool { return name == "Address" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (a *Address) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + *a = HexToAddress(input) + default: + err = fmt.Errorf("Unexpected type for Address: %v", input) + } + return err +} + +// UnprefixedAddress allows marshaling an Address without 0x prefix. +type UnprefixedAddress Address + +// UnmarshalText decodes the address from hex. The 0x prefix is optional. +func (a *UnprefixedAddress) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedUnprefixedText("UnprefixedAddress", input, a[:]) +} + +// MarshalText encodes the address as hex. +func (a UnprefixedAddress) MarshalText() ([]byte, error) { + return []byte(hex.EncodeToString(a[:])), nil +} + +// MixedcaseAddress retains the original string, which may or may not be +// correctly checksummed +type MixedcaseAddress struct { + addr Address + original string +} + +// NewMixedcaseAddress constructor (mainly for testing) +func NewMixedcaseAddress(addr Address) MixedcaseAddress { + return MixedcaseAddress{addr: addr, original: addr.Hex()} +} + +// NewMixedcaseAddressFromString is mainly meant for unit-testing +func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) { + if !IsHexAddress(hexaddr) { + return nil, fmt.Errorf("Invalid address") + } + a := FromHex(hexaddr) + return &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil +} + +// UnmarshalJSON parses MixedcaseAddress +func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error { + if err := hexutil.UnmarshalFixedJSON(addressT, input, ma.addr[:]); err != nil { + return err + } + return json.Unmarshal(input, &ma.original) +} + +// MarshalJSON marshals the original value +func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) { + if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") { + return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:])) + } + return json.Marshal(fmt.Sprintf("0x%s", ma.original)) +} + +// Address returns the address +func (ma *MixedcaseAddress) Address() Address { + return ma.addr +} + +// String implements fmt.Stringer +func (ma *MixedcaseAddress) String() string { + if ma.ValidChecksum() { + return fmt.Sprintf("%s [chksum ok]", ma.original) + } + return fmt.Sprintf("%s [chksum INVALID]", ma.original) +} + +// ValidChecksum returns true if the address has valid checksum +func (ma *MixedcaseAddress) ValidChecksum() bool { + return ma.original == ma.addr.Hex() +} + +// Original returns the mixed-case input string +func (ma *MixedcaseAddress) Original() string { + return ma.original +} diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/decode.go b/vendor/github.com/ethereum/go-ethereum/rlp/decode.go new file mode 100644 index 000000000..4f29f2fb0 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rlp/decode.go @@ -0,0 +1,1049 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "sync" +) + +var ( + // EOL is returned when the end of the current list + // has been reached during streaming. + EOL = errors.New("rlp: end of list") + + // Actual Errors + ErrExpectedString = errors.New("rlp: expected String or Byte") + ErrExpectedList = errors.New("rlp: expected List") + ErrCanonInt = errors.New("rlp: non-canonical integer format") + ErrCanonSize = errors.New("rlp: non-canonical size information") + ErrElemTooLarge = errors.New("rlp: element is larger than containing list") + ErrValueTooLarge = errors.New("rlp: value size exceeds available input length") + ErrMoreThanOneValue = errors.New("rlp: input contains more than one value") + + // internal errors + errNotInList = errors.New("rlp: call of ListEnd outside of any list") + errNotAtEOL = errors.New("rlp: call of ListEnd not positioned at EOL") + errUintOverflow = errors.New("rlp: uint overflow") + errNoPointer = errors.New("rlp: interface given to Decode must be a pointer") + errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil") + + streamPool = sync.Pool{ + New: func() interface{} { return new(Stream) }, + } +) + +// Decoder is implemented by types that require custom RLP +// decoding rules or need to decode into private fields. +// +// The DecodeRLP method should read one value from the given +// Stream. It is not forbidden to read less or more, but it might +// be confusing. +type Decoder interface { + DecodeRLP(*Stream) error +} + +// Decode parses RLP-encoded data from r and stores the result in the +// value pointed to by val. Val must be a non-nil pointer. If r does +// not implement ByteReader, Decode will do its own buffering. +// +// Decode uses the following type-dependent decoding rules: +// +// If the type implements the Decoder interface, decode calls +// DecodeRLP. +// +// To decode into a pointer, Decode will decode into the value pointed +// to. If the pointer is nil, a new value of the pointer's element +// type is allocated. If the pointer is non-nil, the existing value +// will be reused. +// +// To decode into a struct, Decode expects the input to be an RLP +// list. The decoded elements of the list are assigned to each public +// field in the order given by the struct's definition. The input list +// must contain an element for each decoded field. Decode returns an +// error if there are too few or too many elements. +// +// The decoding of struct fields honours certain struct tags, "tail", +// "nil" and "-". +// +// The "-" tag ignores fields. +// +// For an explanation of "tail", see the example. +// +// The "nil" tag applies to pointer-typed fields and changes the decoding +// rules for the field such that input values of size zero decode as a nil +// pointer. This tag can be useful when decoding recursive types. +// +// type StructWithEmptyOK struct { +// Foo *[20]byte `rlp:"nil"` +// } +// +// To decode into a slice, the input must be a list and the resulting +// slice will contain the input elements in order. For byte slices, +// the input must be an RLP string. Array types decode similarly, with +// the additional restriction that the number of input elements (or +// bytes) must match the array's length. +// +// To decode into a Go string, the input must be an RLP string. The +// input bytes are taken as-is and will not necessarily be valid UTF-8. +// +// To decode into an unsigned integer type, the input must also be an RLP +// string. The bytes are interpreted as a big endian representation of +// the integer. If the RLP string is larger than the bit size of the +// type, Decode will return an error. Decode also supports *big.Int. +// There is no size limit for big integers. +// +// To decode into a boolean, the input must contain an unsigned integer +// of value zero (false) or one (true). +// +// To decode into an interface value, Decode stores one of these +// in the value: +// +// []interface{}, for RLP lists +// []byte, for RLP strings +// +// Non-empty interface types are not supported, nor are signed integers, +// floating point numbers, maps, channels and functions. +// +// Note that Decode does not set an input limit for all readers +// and may be vulnerable to panics cause by huge value sizes. If +// you need an input limit, use +// +// NewStream(r, limit).Decode(val) +func Decode(r io.Reader, val interface{}) error { + stream := streamPool.Get().(*Stream) + defer streamPool.Put(stream) + + stream.Reset(r, 0) + return stream.Decode(val) +} + +// DecodeBytes parses RLP data from b into val. +// Please see the documentation of Decode for the decoding rules. +// The input must contain exactly one value and no trailing data. +func DecodeBytes(b []byte, val interface{}) error { + r := bytes.NewReader(b) + + stream := streamPool.Get().(*Stream) + defer streamPool.Put(stream) + + stream.Reset(r, uint64(len(b))) + if err := stream.Decode(val); err != nil { + return err + } + if r.Len() > 0 { + return ErrMoreThanOneValue + } + return nil +} + +type decodeError struct { + msg string + typ reflect.Type + ctx []string +} + +func (err *decodeError) Error() string { + ctx := "" + if len(err.ctx) > 0 { + ctx = ", decoding into " + for i := len(err.ctx) - 1; i >= 0; i-- { + ctx += err.ctx[i] + } + } + return fmt.Sprintf("rlp: %s for %v%s", err.msg, err.typ, ctx) +} + +func wrapStreamError(err error, typ reflect.Type) error { + switch err { + case ErrCanonInt: + return &decodeError{msg: "non-canonical integer (leading zero bytes)", typ: typ} + case ErrCanonSize: + return &decodeError{msg: "non-canonical size information", typ: typ} + case ErrExpectedList: + return &decodeError{msg: "expected input list", typ: typ} + case ErrExpectedString: + return &decodeError{msg: "expected input string or byte", typ: typ} + case errUintOverflow: + return &decodeError{msg: "input string too long", typ: typ} + case errNotAtEOL: + return &decodeError{msg: "input list has too many elements", typ: typ} + } + return err +} + +func addErrorContext(err error, ctx string) error { + if decErr, ok := err.(*decodeError); ok { + decErr.ctx = append(decErr.ctx, ctx) + } + return err +} + +var ( + decoderInterface = reflect.TypeOf(new(Decoder)).Elem() + bigInt = reflect.TypeOf(big.Int{}) +) + +func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) { + kind := typ.Kind() + switch { + case typ == rawValueType: + return decodeRawValue, nil + case typ.Implements(decoderInterface): + return decodeDecoder, nil + case kind != reflect.Ptr && reflect.PtrTo(typ).Implements(decoderInterface): + return decodeDecoderNoPtr, nil + case typ.AssignableTo(reflect.PtrTo(bigInt)): + return decodeBigInt, nil + case typ.AssignableTo(bigInt): + return decodeBigIntNoPtr, nil + case isUint(kind): + return decodeUint, nil + case kind == reflect.Bool: + return decodeBool, nil + case kind == reflect.String: + return decodeString, nil + case kind == reflect.Slice || kind == reflect.Array: + return makeListDecoder(typ, tags) + case kind == reflect.Struct: + return makeStructDecoder(typ) + case kind == reflect.Ptr: + if tags.nilOK { + return makeOptionalPtrDecoder(typ) + } + return makePtrDecoder(typ) + case kind == reflect.Interface: + return decodeInterface, nil + default: + return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ) + } +} + +func decodeRawValue(s *Stream, val reflect.Value) error { + r, err := s.Raw() + if err != nil { + return err + } + val.SetBytes(r) + return nil +} + +func decodeUint(s *Stream, val reflect.Value) error { + typ := val.Type() + num, err := s.uint(typ.Bits()) + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetUint(num) + return nil +} + +func decodeBool(s *Stream, val reflect.Value) error { + b, err := s.Bool() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetBool(b) + return nil +} + +func decodeString(s *Stream, val reflect.Value) error { + b, err := s.Bytes() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetString(string(b)) + return nil +} + +func decodeBigIntNoPtr(s *Stream, val reflect.Value) error { + return decodeBigInt(s, val.Addr()) +} + +func decodeBigInt(s *Stream, val reflect.Value) error { + b, err := s.Bytes() + if err != nil { + return wrapStreamError(err, val.Type()) + } + i := val.Interface().(*big.Int) + if i == nil { + i = new(big.Int) + val.Set(reflect.ValueOf(i)) + } + // Reject leading zero bytes + if len(b) > 0 && b[0] == 0 { + return wrapStreamError(ErrCanonInt, val.Type()) + } + i.SetBytes(b) + return nil +} + +func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) { + etype := typ.Elem() + if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) { + if typ.Kind() == reflect.Array { + return decodeByteArray, nil + } + return decodeByteSlice, nil + } + etypeinfo := cachedTypeInfo1(etype, tags{}) + if etypeinfo.decoderErr != nil { + return nil, etypeinfo.decoderErr + } + var dec decoder + switch { + case typ.Kind() == reflect.Array: + dec = func(s *Stream, val reflect.Value) error { + return decodeListArray(s, val, etypeinfo.decoder) + } + case tag.tail: + // A slice with "tail" tag can occur as the last field + // of a struct and is supposed to swallow all remaining + // list elements. The struct decoder already called s.List, + // proceed directly to decoding the elements. + dec = func(s *Stream, val reflect.Value) error { + return decodeSliceElems(s, val, etypeinfo.decoder) + } + default: + dec = func(s *Stream, val reflect.Value) error { + return decodeListSlice(s, val, etypeinfo.decoder) + } + } + return dec, nil +} + +func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error { + size, err := s.List() + if err != nil { + return wrapStreamError(err, val.Type()) + } + if size == 0 { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + return s.ListEnd() + } + if err := decodeSliceElems(s, val, elemdec); err != nil { + return err + } + return s.ListEnd() +} + +func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error { + i := 0 + for ; ; i++ { + // grow slice if necessary + if i >= val.Cap() { + newcap := val.Cap() + val.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(val.Type(), val.Len(), newcap) + reflect.Copy(newv, val) + val.Set(newv) + } + if i >= val.Len() { + val.SetLen(i + 1) + } + // decode into element + if err := elemdec(s, val.Index(i)); err == EOL { + break + } else if err != nil { + return addErrorContext(err, fmt.Sprint("[", i, "]")) + } + } + if i < val.Len() { + val.SetLen(i) + } + return nil +} + +func decodeListArray(s *Stream, val reflect.Value, elemdec decoder) error { + if _, err := s.List(); err != nil { + return wrapStreamError(err, val.Type()) + } + vlen := val.Len() + i := 0 + for ; i < vlen; i++ { + if err := elemdec(s, val.Index(i)); err == EOL { + break + } else if err != nil { + return addErrorContext(err, fmt.Sprint("[", i, "]")) + } + } + if i < vlen { + return &decodeError{msg: "input list has too few elements", typ: val.Type()} + } + return wrapStreamError(s.ListEnd(), val.Type()) +} + +func decodeByteSlice(s *Stream, val reflect.Value) error { + b, err := s.Bytes() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetBytes(b) + return nil +} + +func decodeByteArray(s *Stream, val reflect.Value) error { + kind, size, err := s.Kind() + if err != nil { + return err + } + vlen := val.Len() + switch kind { + case Byte: + if vlen == 0 { + return &decodeError{msg: "input string too long", typ: val.Type()} + } + if vlen > 1 { + return &decodeError{msg: "input string too short", typ: val.Type()} + } + bv, _ := s.Uint() + val.Index(0).SetUint(bv) + case String: + if uint64(vlen) < size { + return &decodeError{msg: "input string too long", typ: val.Type()} + } + if uint64(vlen) > size { + return &decodeError{msg: "input string too short", typ: val.Type()} + } + slice := val.Slice(0, vlen).Interface().([]byte) + if err := s.readFull(slice); err != nil { + return err + } + // Reject cases where single byte encoding should have been used. + if size == 1 && slice[0] < 128 { + return wrapStreamError(ErrCanonSize, val.Type()) + } + case List: + return wrapStreamError(ErrExpectedString, val.Type()) + } + return nil +} + +func makeStructDecoder(typ reflect.Type) (decoder, error) { + fields, err := structFields(typ) + if err != nil { + return nil, err + } + dec := func(s *Stream, val reflect.Value) (err error) { + if _, err := s.List(); err != nil { + return wrapStreamError(err, typ) + } + for _, f := range fields { + err := f.info.decoder(s, val.Field(f.index)) + if err == EOL { + return &decodeError{msg: "too few elements", typ: typ} + } else if err != nil { + return addErrorContext(err, "."+typ.Field(f.index).Name) + } + } + return wrapStreamError(s.ListEnd(), typ) + } + return dec, nil +} + +// makePtrDecoder creates a decoder that decodes into +// the pointer's element type. +func makePtrDecoder(typ reflect.Type) (decoder, error) { + etype := typ.Elem() + etypeinfo := cachedTypeInfo1(etype, tags{}) + if etypeinfo.decoderErr != nil { + return nil, etypeinfo.decoderErr + } + dec := func(s *Stream, val reflect.Value) (err error) { + newval := val + if val.IsNil() { + newval = reflect.New(etype) + } + if err = etypeinfo.decoder(s, newval.Elem()); err == nil { + val.Set(newval) + } + return err + } + return dec, nil +} + +// makeOptionalPtrDecoder creates a decoder that decodes empty values +// as nil. Non-empty values are decoded into a value of the element type, +// just like makePtrDecoder does. +// +// This decoder is used for pointer-typed struct fields with struct tag "nil". +func makeOptionalPtrDecoder(typ reflect.Type) (decoder, error) { + etype := typ.Elem() + etypeinfo := cachedTypeInfo1(etype, tags{}) + if etypeinfo.decoderErr != nil { + return nil, etypeinfo.decoderErr + } + dec := func(s *Stream, val reflect.Value) (err error) { + kind, size, err := s.Kind() + if err != nil || size == 0 && kind != Byte { + // rearm s.Kind. This is important because the input + // position must advance to the next value even though + // we don't read anything. + s.kind = -1 + // set the pointer to nil. + val.Set(reflect.Zero(typ)) + return err + } + newval := val + if val.IsNil() { + newval = reflect.New(etype) + } + if err = etypeinfo.decoder(s, newval.Elem()); err == nil { + val.Set(newval) + } + return err + } + return dec, nil +} + +var ifsliceType = reflect.TypeOf([]interface{}{}) + +func decodeInterface(s *Stream, val reflect.Value) error { + if val.Type().NumMethod() != 0 { + return fmt.Errorf("rlp: type %v is not RLP-serializable", val.Type()) + } + kind, _, err := s.Kind() + if err != nil { + return err + } + if kind == List { + slice := reflect.New(ifsliceType).Elem() + if err := decodeListSlice(s, slice, decodeInterface); err != nil { + return err + } + val.Set(slice) + } else { + b, err := s.Bytes() + if err != nil { + return err + } + val.Set(reflect.ValueOf(b)) + } + return nil +} + +// This decoder is used for non-pointer values of types +// that implement the Decoder interface using a pointer receiver. +func decodeDecoderNoPtr(s *Stream, val reflect.Value) error { + return val.Addr().Interface().(Decoder).DecodeRLP(s) +} + +func decodeDecoder(s *Stream, val reflect.Value) error { + // Decoder instances are not handled using the pointer rule if the type + // implements Decoder with pointer receiver (i.e. always) + // because it might handle empty values specially. + // We need to allocate one here in this case, like makePtrDecoder does. + if val.Kind() == reflect.Ptr && val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + return val.Interface().(Decoder).DecodeRLP(s) +} + +// Kind represents the kind of value contained in an RLP stream. +type Kind int + +const ( + Byte Kind = iota + String + List +) + +func (k Kind) String() string { + switch k { + case Byte: + return "Byte" + case String: + return "String" + case List: + return "List" + default: + return fmt.Sprintf("Unknown(%d)", k) + } +} + +// ByteReader must be implemented by any input reader for a Stream. It +// is implemented by e.g. bufio.Reader and bytes.Reader. +type ByteReader interface { + io.Reader + io.ByteReader +} + +// Stream can be used for piecemeal decoding of an input stream. This +// is useful if the input is very large or if the decoding rules for a +// type depend on the input structure. Stream does not keep an +// internal buffer. After decoding a value, the input reader will be +// positioned just before the type information for the next value. +// +// When decoding a list and the input position reaches the declared +// length of the list, all operations will return error EOL. +// The end of the list must be acknowledged using ListEnd to continue +// reading the enclosing list. +// +// Stream is not safe for concurrent use. +type Stream struct { + r ByteReader + + // number of bytes remaining to be read from r. + remaining uint64 + limited bool + + // auxiliary buffer for integer decoding + uintbuf []byte + + kind Kind // kind of value ahead + size uint64 // size of value ahead + byteval byte // value of single byte in type tag + kinderr error // error from last readKind + stack []listpos +} + +type listpos struct{ pos, size uint64 } + +// NewStream creates a new decoding stream reading from r. +// +// If r implements the ByteReader interface, Stream will +// not introduce any buffering. +// +// For non-toplevel values, Stream returns ErrElemTooLarge +// for values that do not fit into the enclosing list. +// +// Stream supports an optional input limit. If a limit is set, the +// size of any toplevel value will be checked against the remaining +// input length. Stream operations that encounter a value exceeding +// the remaining input length will return ErrValueTooLarge. The limit +// can be set by passing a non-zero value for inputLimit. +// +// If r is a bytes.Reader or strings.Reader, the input limit is set to +// the length of r's underlying data unless an explicit limit is +// provided. +func NewStream(r io.Reader, inputLimit uint64) *Stream { + s := new(Stream) + s.Reset(r, inputLimit) + return s +} + +// NewListStream creates a new stream that pretends to be positioned +// at an encoded list of the given length. +func NewListStream(r io.Reader, len uint64) *Stream { + s := new(Stream) + s.Reset(r, len) + s.kind = List + s.size = len + return s +} + +// Bytes reads an RLP string and returns its contents as a byte slice. +// If the input does not contain an RLP string, the returned +// error will be ErrExpectedString. +func (s *Stream) Bytes() ([]byte, error) { + kind, size, err := s.Kind() + if err != nil { + return nil, err + } + switch kind { + case Byte: + s.kind = -1 // rearm Kind + return []byte{s.byteval}, nil + case String: + b := make([]byte, size) + if err = s.readFull(b); err != nil { + return nil, err + } + if size == 1 && b[0] < 128 { + return nil, ErrCanonSize + } + return b, nil + default: + return nil, ErrExpectedString + } +} + +// Raw reads a raw encoded value including RLP type information. +func (s *Stream) Raw() ([]byte, error) { + kind, size, err := s.Kind() + if err != nil { + return nil, err + } + if kind == Byte { + s.kind = -1 // rearm Kind + return []byte{s.byteval}, nil + } + // the original header has already been read and is no longer + // available. read content and put a new header in front of it. + start := headsize(size) + buf := make([]byte, uint64(start)+size) + if err := s.readFull(buf[start:]); err != nil { + return nil, err + } + if kind == String { + puthead(buf, 0x80, 0xB7, size) + } else { + puthead(buf, 0xC0, 0xF7, size) + } + return buf, nil +} + +// Uint reads an RLP string of up to 8 bytes and returns its contents +// as an unsigned integer. If the input does not contain an RLP string, the +// returned error will be ErrExpectedString. +func (s *Stream) Uint() (uint64, error) { + return s.uint(64) +} + +func (s *Stream) uint(maxbits int) (uint64, error) { + kind, size, err := s.Kind() + if err != nil { + return 0, err + } + switch kind { + case Byte: + if s.byteval == 0 { + return 0, ErrCanonInt + } + s.kind = -1 // rearm Kind + return uint64(s.byteval), nil + case String: + if size > uint64(maxbits/8) { + return 0, errUintOverflow + } + v, err := s.readUint(byte(size)) + switch { + case err == ErrCanonSize: + // Adjust error because we're not reading a size right now. + return 0, ErrCanonInt + case err != nil: + return 0, err + case size > 0 && v < 128: + return 0, ErrCanonSize + default: + return v, nil + } + default: + return 0, ErrExpectedString + } +} + +// Bool reads an RLP string of up to 1 byte and returns its contents +// as a boolean. If the input does not contain an RLP string, the +// returned error will be ErrExpectedString. +func (s *Stream) Bool() (bool, error) { + num, err := s.uint(8) + if err != nil { + return false, err + } + switch num { + case 0: + return false, nil + case 1: + return true, nil + default: + return false, fmt.Errorf("rlp: invalid boolean value: %d", num) + } +} + +// List starts decoding an RLP list. If the input does not contain a +// list, the returned error will be ErrExpectedList. When the list's +// end has been reached, any Stream operation will return EOL. +func (s *Stream) List() (size uint64, err error) { + kind, size, err := s.Kind() + if err != nil { + return 0, err + } + if kind != List { + return 0, ErrExpectedList + } + s.stack = append(s.stack, listpos{0, size}) + s.kind = -1 + s.size = 0 + return size, nil +} + +// ListEnd returns to the enclosing list. +// The input reader must be positioned at the end of a list. +func (s *Stream) ListEnd() error { + if len(s.stack) == 0 { + return errNotInList + } + tos := s.stack[len(s.stack)-1] + if tos.pos != tos.size { + return errNotAtEOL + } + s.stack = s.stack[:len(s.stack)-1] // pop + if len(s.stack) > 0 { + s.stack[len(s.stack)-1].pos += tos.size + } + s.kind = -1 + s.size = 0 + return nil +} + +// Decode decodes a value and stores the result in the value pointed +// to by val. Please see the documentation for the Decode function +// to learn about the decoding rules. +func (s *Stream) Decode(val interface{}) error { + if val == nil { + return errDecodeIntoNil + } + rval := reflect.ValueOf(val) + rtyp := rval.Type() + if rtyp.Kind() != reflect.Ptr { + return errNoPointer + } + if rval.IsNil() { + return errDecodeIntoNil + } + decoder, err := cachedDecoder(rtyp.Elem()) + if err != nil { + return err + } + + err = decoder(s, rval.Elem()) + if decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 { + // add decode target type to error so context has more meaning + decErr.ctx = append(decErr.ctx, fmt.Sprint("(", rtyp.Elem(), ")")) + } + return err +} + +// Reset discards any information about the current decoding context +// and starts reading from r. This method is meant to facilitate reuse +// of a preallocated Stream across many decoding operations. +// +// If r does not also implement ByteReader, Stream will do its own +// buffering. +func (s *Stream) Reset(r io.Reader, inputLimit uint64) { + if inputLimit > 0 { + s.remaining = inputLimit + s.limited = true + } else { + // Attempt to automatically discover + // the limit when reading from a byte slice. + switch br := r.(type) { + case *bytes.Reader: + s.remaining = uint64(br.Len()) + s.limited = true + case *strings.Reader: + s.remaining = uint64(br.Len()) + s.limited = true + default: + s.limited = false + } + } + // Wrap r with a buffer if it doesn't have one. + bufr, ok := r.(ByteReader) + if !ok { + bufr = bufio.NewReader(r) + } + s.r = bufr + // Reset the decoding context. + s.stack = s.stack[:0] + s.size = 0 + s.kind = -1 + s.kinderr = nil + if s.uintbuf == nil { + s.uintbuf = make([]byte, 8) + } + s.byteval = 0 +} + +// Kind returns the kind and size of the next value in the +// input stream. +// +// The returned size is the number of bytes that make up the value. +// For kind == Byte, the size is zero because the value is +// contained in the type tag. +// +// The first call to Kind will read size information from the input +// reader and leave it positioned at the start of the actual bytes of +// the value. Subsequent calls to Kind (until the value is decoded) +// will not advance the input reader and return cached information. +func (s *Stream) Kind() (kind Kind, size uint64, err error) { + var tos *listpos + if len(s.stack) > 0 { + tos = &s.stack[len(s.stack)-1] + } + if s.kind < 0 { + s.kinderr = nil + // Don't read further if we're at the end of the + // innermost list. + if tos != nil && tos.pos == tos.size { + return 0, 0, EOL + } + s.kind, s.size, s.kinderr = s.readKind() + if s.kinderr == nil { + if tos == nil { + // At toplevel, check that the value is smaller + // than the remaining input length. + if s.limited && s.size > s.remaining { + s.kinderr = ErrValueTooLarge + } + } else { + // Inside a list, check that the value doesn't overflow the list. + if s.size > tos.size-tos.pos { + s.kinderr = ErrElemTooLarge + } + } + } + } + // Note: this might return a sticky error generated + // by an earlier call to readKind. + return s.kind, s.size, s.kinderr +} + +func (s *Stream) readKind() (kind Kind, size uint64, err error) { + b, err := s.readByte() + if err != nil { + if len(s.stack) == 0 { + // At toplevel, Adjust the error to actual EOF. io.EOF is + // used by callers to determine when to stop decoding. + switch err { + case io.ErrUnexpectedEOF: + err = io.EOF + case ErrValueTooLarge: + err = io.EOF + } + } + return 0, 0, err + } + s.byteval = 0 + switch { + case b < 0x80: + // For a single byte whose value is in the [0x00, 0x7F] range, that byte + // is its own RLP encoding. + s.byteval = b + return Byte, 0, nil + case b < 0xB8: + // Otherwise, if a string is 0-55 bytes long, + // the RLP encoding consists of a single byte with value 0x80 plus the + // length of the string followed by the string. The range of the first + // byte is thus [0x80, 0xB7]. + return String, uint64(b - 0x80), nil + case b < 0xC0: + // If a string is more than 55 bytes long, the + // RLP encoding consists of a single byte with value 0xB7 plus the length + // of the length of the string in binary form, followed by the length of + // the string, followed by the string. For example, a length-1024 string + // would be encoded as 0xB90400 followed by the string. The range of + // the first byte is thus [0xB8, 0xBF]. + size, err = s.readUint(b - 0xB7) + if err == nil && size < 56 { + err = ErrCanonSize + } + return String, size, err + case b < 0xF8: + // If the total payload of a list + // (i.e. the combined length of all its items) is 0-55 bytes long, the + // RLP encoding consists of a single byte with value 0xC0 plus the length + // of the list followed by the concatenation of the RLP encodings of the + // items. The range of the first byte is thus [0xC0, 0xF7]. + return List, uint64(b - 0xC0), nil + default: + // If the total payload of a list is more than 55 bytes long, + // the RLP encoding consists of a single byte with value 0xF7 + // plus the length of the length of the payload in binary + // form, followed by the length of the payload, followed by + // the concatenation of the RLP encodings of the items. The + // range of the first byte is thus [0xF8, 0xFF]. + size, err = s.readUint(b - 0xF7) + if err == nil && size < 56 { + err = ErrCanonSize + } + return List, size, err + } +} + +func (s *Stream) readUint(size byte) (uint64, error) { + switch size { + case 0: + s.kind = -1 // rearm Kind + return 0, nil + case 1: + b, err := s.readByte() + return uint64(b), err + default: + start := int(8 - size) + for i := 0; i < start; i++ { + s.uintbuf[i] = 0 + } + if err := s.readFull(s.uintbuf[start:]); err != nil { + return 0, err + } + if s.uintbuf[start] == 0 { + // Note: readUint is also used to decode integer + // values. The error needs to be adjusted to become + // ErrCanonInt in this case. + return 0, ErrCanonSize + } + return binary.BigEndian.Uint64(s.uintbuf), nil + } +} + +func (s *Stream) readFull(buf []byte) (err error) { + if err := s.willRead(uint64(len(buf))); err != nil { + return err + } + var nn, n int + for n < len(buf) && err == nil { + nn, err = s.r.Read(buf[n:]) + n += nn + } + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err +} + +func (s *Stream) readByte() (byte, error) { + if err := s.willRead(1); err != nil { + return 0, err + } + b, err := s.r.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return b, err +} + +func (s *Stream) willRead(n uint64) error { + s.kind = -1 // rearm Kind + + if len(s.stack) > 0 { + // check list overflow + tos := s.stack[len(s.stack)-1] + if n > tos.size-tos.pos { + return ErrElemTooLarge + } + s.stack[len(s.stack)-1].pos += n + } + if s.limited { + if n > s.remaining { + return ErrValueTooLarge + } + s.remaining -= n + } + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/doc.go b/vendor/github.com/ethereum/go-ethereum/rlp/doc.go new file mode 100644 index 000000000..b3a81fe23 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rlp/doc.go @@ -0,0 +1,33 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package rlp implements the RLP serialization format. + +The purpose of RLP (Recursive Linear Prefix) is to encode arbitrarily +nested arrays of binary data, and RLP is the main encoding method used +to serialize objects in Ethereum. The only purpose of RLP is to encode +structure; encoding specific atomic data types (eg. strings, ints, +floats) is left up to higher-order protocols; in Ethereum integers +must be represented in big endian binary form with no leading zeroes +(thus making the integer value zero equivalent to the empty byte +array). + +RLP values are distinguished by a type tag. The type tag precedes the +value in the input stream and defines the size and kind of the bytes +that follow. +*/ +package rlp diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/encode.go b/vendor/github.com/ethereum/go-ethereum/rlp/encode.go new file mode 100644 index 000000000..f255c38a9 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rlp/encode.go @@ -0,0 +1,651 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "fmt" + "io" + "math/big" + "reflect" + "sync" +) + +var ( + // Common encoded values. + // These are useful when implementing EncodeRLP. + EmptyString = []byte{0x80} + EmptyList = []byte{0xC0} +) + +// Encoder is implemented by types that require custom +// encoding rules or want to encode private fields. +type Encoder interface { + // EncodeRLP should write the RLP encoding of its receiver to w. + // If the implementation is a pointer method, it may also be + // called for nil pointers. + // + // Implementations should generate valid RLP. The data written is + // not verified at the moment, but a future version might. It is + // recommended to write only a single value but writing multiple + // values or no value at all is also permitted. + EncodeRLP(io.Writer) error +} + +// Encode writes the RLP encoding of val to w. Note that Encode may +// perform many small writes in some cases. Consider making w +// buffered. +// +// Encode uses the following type-dependent encoding rules: +// +// If the type implements the Encoder interface, Encode calls +// EncodeRLP. This is true even for nil pointers, please see the +// documentation for Encoder. +// +// To encode a pointer, the value being pointed to is encoded. For nil +// pointers, Encode will encode the zero value of the type. A nil +// pointer to a struct type always encodes as an empty RLP list. +// A nil pointer to an array encodes as an empty list (or empty string +// if the array has element type byte). +// +// Struct values are encoded as an RLP list of all their encoded +// public fields. Recursive struct types are supported. +// +// To encode slices and arrays, the elements are encoded as an RLP +// list of the value's elements. Note that arrays and slices with +// element type uint8 or byte are always encoded as an RLP string. +// +// A Go string is encoded as an RLP string. +// +// An unsigned integer value is encoded as an RLP string. Zero always +// encodes as an empty RLP string. Encode also supports *big.Int. +// +// Boolean values are encoded as unsigned integers zero (false) and one (true). +// +// An interface value encodes as the value contained in the interface. +// +// Signed integers are not supported, nor are floating point numbers, maps, +// channels and functions. +func Encode(w io.Writer, val interface{}) error { + if outer, ok := w.(*encbuf); ok { + // Encode was called by some type's EncodeRLP. + // Avoid copying by writing to the outer encbuf directly. + return outer.encode(val) + } + eb := encbufPool.Get().(*encbuf) + defer encbufPool.Put(eb) + eb.reset() + if err := eb.encode(val); err != nil { + return err + } + return eb.toWriter(w) +} + +// EncodeToBytes returns the RLP encoding of val. +// Please see the documentation of Encode for the encoding rules. +func EncodeToBytes(val interface{}) ([]byte, error) { + eb := encbufPool.Get().(*encbuf) + defer encbufPool.Put(eb) + eb.reset() + if err := eb.encode(val); err != nil { + return nil, err + } + return eb.toBytes(), nil +} + +// EncodeToReader returns a reader from which the RLP encoding of val +// can be read. The returned size is the total size of the encoded +// data. +// +// Please see the documentation of Encode for the encoding rules. +func EncodeToReader(val interface{}) (size int, r io.Reader, err error) { + eb := encbufPool.Get().(*encbuf) + eb.reset() + if err := eb.encode(val); err != nil { + return 0, nil, err + } + return eb.size(), &encReader{buf: eb}, nil +} + +type encbuf struct { + str []byte // string data, contains everything except list headers + lheads []*listhead // all list headers + lhsize int // sum of sizes of all encoded list headers + sizebuf []byte // 9-byte auxiliary buffer for uint encoding +} + +type listhead struct { + offset int // index of this header in string data + size int // total size of encoded data (including list headers) +} + +// encode writes head to the given buffer, which must be at least +// 9 bytes long. It returns the encoded bytes. +func (head *listhead) encode(buf []byte) []byte { + return buf[:puthead(buf, 0xC0, 0xF7, uint64(head.size))] +} + +// headsize returns the size of a list or string header +// for a value of the given size. +func headsize(size uint64) int { + if size < 56 { + return 1 + } + return 1 + intsize(size) +} + +// puthead writes a list or string header to buf. +// buf must be at least 9 bytes long. +func puthead(buf []byte, smalltag, largetag byte, size uint64) int { + if size < 56 { + buf[0] = smalltag + byte(size) + return 1 + } + sizesize := putint(buf[1:], size) + buf[0] = largetag + byte(sizesize) + return sizesize + 1 +} + +// encbufs are pooled. +var encbufPool = sync.Pool{ + New: func() interface{} { return &encbuf{sizebuf: make([]byte, 9)} }, +} + +func (w *encbuf) reset() { + w.lhsize = 0 + if w.str != nil { + w.str = w.str[:0] + } + if w.lheads != nil { + w.lheads = w.lheads[:0] + } +} + +// encbuf implements io.Writer so it can be passed it into EncodeRLP. +func (w *encbuf) Write(b []byte) (int, error) { + w.str = append(w.str, b...) + return len(b), nil +} + +func (w *encbuf) encode(val interface{}) error { + rval := reflect.ValueOf(val) + writer, err := cachedWriter(rval.Type()) + if err != nil { + return err + } + return writer(rval, w) +} + +func (w *encbuf) encodeStringHeader(size int) { + if size < 56 { + w.str = append(w.str, 0x80+byte(size)) + } else { + // TODO: encode to w.str directly + sizesize := putint(w.sizebuf[1:], uint64(size)) + w.sizebuf[0] = 0xB7 + byte(sizesize) + w.str = append(w.str, w.sizebuf[:sizesize+1]...) + } +} + +func (w *encbuf) encodeString(b []byte) { + if len(b) == 1 && b[0] <= 0x7F { + // fits single byte, no string header + w.str = append(w.str, b[0]) + } else { + w.encodeStringHeader(len(b)) + w.str = append(w.str, b...) + } +} + +func (w *encbuf) list() *listhead { + lh := &listhead{offset: len(w.str), size: w.lhsize} + w.lheads = append(w.lheads, lh) + return lh +} + +func (w *encbuf) listEnd(lh *listhead) { + lh.size = w.size() - lh.offset - lh.size + if lh.size < 56 { + w.lhsize++ // length encoded into kind tag + } else { + w.lhsize += 1 + intsize(uint64(lh.size)) + } +} + +func (w *encbuf) size() int { + return len(w.str) + w.lhsize +} + +func (w *encbuf) toBytes() []byte { + out := make([]byte, w.size()) + strpos := 0 + pos := 0 + for _, head := range w.lheads { + // write string data before header + n := copy(out[pos:], w.str[strpos:head.offset]) + pos += n + strpos += n + // write the header + enc := head.encode(out[pos:]) + pos += len(enc) + } + // copy string data after the last list header + copy(out[pos:], w.str[strpos:]) + return out +} + +func (w *encbuf) toWriter(out io.Writer) (err error) { + strpos := 0 + for _, head := range w.lheads { + // write string data before header + if head.offset-strpos > 0 { + n, err := out.Write(w.str[strpos:head.offset]) + strpos += n + if err != nil { + return err + } + } + // write the header + enc := head.encode(w.sizebuf) + if _, err = out.Write(enc); err != nil { + return err + } + } + if strpos < len(w.str) { + // write string data after the last list header + _, err = out.Write(w.str[strpos:]) + } + return err +} + +// encReader is the io.Reader returned by EncodeToReader. +// It releases its encbuf at EOF. +type encReader struct { + buf *encbuf // the buffer we're reading from. this is nil when we're at EOF. + lhpos int // index of list header that we're reading + strpos int // current position in string buffer + piece []byte // next piece to be read +} + +func (r *encReader) Read(b []byte) (n int, err error) { + for { + if r.piece = r.next(); r.piece == nil { + // Put the encode buffer back into the pool at EOF when it + // is first encountered. Subsequent calls still return EOF + // as the error but the buffer is no longer valid. + if r.buf != nil { + encbufPool.Put(r.buf) + r.buf = nil + } + return n, io.EOF + } + nn := copy(b[n:], r.piece) + n += nn + if nn < len(r.piece) { + // piece didn't fit, see you next time. + r.piece = r.piece[nn:] + return n, nil + } + r.piece = nil + } +} + +// next returns the next piece of data to be read. +// it returns nil at EOF. +func (r *encReader) next() []byte { + switch { + case r.buf == nil: + return nil + + case r.piece != nil: + // There is still data available for reading. + return r.piece + + case r.lhpos < len(r.buf.lheads): + // We're before the last list header. + head := r.buf.lheads[r.lhpos] + sizebefore := head.offset - r.strpos + if sizebefore > 0 { + // String data before header. + p := r.buf.str[r.strpos:head.offset] + r.strpos += sizebefore + return p + } + r.lhpos++ + return head.encode(r.buf.sizebuf) + + case r.strpos < len(r.buf.str): + // String data at the end, after all list headers. + p := r.buf.str[r.strpos:] + r.strpos = len(r.buf.str) + return p + + default: + return nil + } +} + +var ( + encoderInterface = reflect.TypeOf(new(Encoder)).Elem() + big0 = big.NewInt(0) +) + +// makeWriter creates a writer function for the given type. +func makeWriter(typ reflect.Type, ts tags) (writer, error) { + kind := typ.Kind() + switch { + case typ == rawValueType: + return writeRawValue, nil + case typ.Implements(encoderInterface): + return writeEncoder, nil + case kind != reflect.Ptr && reflect.PtrTo(typ).Implements(encoderInterface): + return writeEncoderNoPtr, nil + case kind == reflect.Interface: + return writeInterface, nil + case typ.AssignableTo(reflect.PtrTo(bigInt)): + return writeBigIntPtr, nil + case typ.AssignableTo(bigInt): + return writeBigIntNoPtr, nil + case isUint(kind): + return writeUint, nil + case kind == reflect.Bool: + return writeBool, nil + case kind == reflect.String: + return writeString, nil + case kind == reflect.Slice && isByte(typ.Elem()): + return writeBytes, nil + case kind == reflect.Array && isByte(typ.Elem()): + return writeByteArray, nil + case kind == reflect.Slice || kind == reflect.Array: + return makeSliceWriter(typ, ts) + case kind == reflect.Struct: + return makeStructWriter(typ) + case kind == reflect.Ptr: + return makePtrWriter(typ) + default: + return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ) + } +} + +func isByte(typ reflect.Type) bool { + return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface) +} + +func writeRawValue(val reflect.Value, w *encbuf) error { + w.str = append(w.str, val.Bytes()...) + return nil +} + +func writeUint(val reflect.Value, w *encbuf) error { + i := val.Uint() + if i == 0 { + w.str = append(w.str, 0x80) + } else if i < 128 { + // fits single byte + w.str = append(w.str, byte(i)) + } else { + // TODO: encode int to w.str directly + s := putint(w.sizebuf[1:], i) + w.sizebuf[0] = 0x80 + byte(s) + w.str = append(w.str, w.sizebuf[:s+1]...) + } + return nil +} + +func writeBool(val reflect.Value, w *encbuf) error { + if val.Bool() { + w.str = append(w.str, 0x01) + } else { + w.str = append(w.str, 0x80) + } + return nil +} + +func writeBigIntPtr(val reflect.Value, w *encbuf) error { + ptr := val.Interface().(*big.Int) + if ptr == nil { + w.str = append(w.str, 0x80) + return nil + } + return writeBigInt(ptr, w) +} + +func writeBigIntNoPtr(val reflect.Value, w *encbuf) error { + i := val.Interface().(big.Int) + return writeBigInt(&i, w) +} + +func writeBigInt(i *big.Int, w *encbuf) error { + if cmp := i.Cmp(big0); cmp == -1 { + return fmt.Errorf("rlp: cannot encode negative *big.Int") + } else if cmp == 0 { + w.str = append(w.str, 0x80) + } else { + w.encodeString(i.Bytes()) + } + return nil +} + +func writeBytes(val reflect.Value, w *encbuf) error { + w.encodeString(val.Bytes()) + return nil +} + +func writeByteArray(val reflect.Value, w *encbuf) error { + if !val.CanAddr() { + // Slice requires the value to be addressable. + // Make it addressable by copying. + copy := reflect.New(val.Type()).Elem() + copy.Set(val) + val = copy + } + size := val.Len() + slice := val.Slice(0, size).Bytes() + w.encodeString(slice) + return nil +} + +func writeString(val reflect.Value, w *encbuf) error { + s := val.String() + if len(s) == 1 && s[0] <= 0x7f { + // fits single byte, no string header + w.str = append(w.str, s[0]) + } else { + w.encodeStringHeader(len(s)) + w.str = append(w.str, s...) + } + return nil +} + +func writeEncoder(val reflect.Value, w *encbuf) error { + return val.Interface().(Encoder).EncodeRLP(w) +} + +// writeEncoderNoPtr handles non-pointer values that implement Encoder +// with a pointer receiver. +func writeEncoderNoPtr(val reflect.Value, w *encbuf) error { + if !val.CanAddr() { + // We can't get the address. It would be possible to make the + // value addressable by creating a shallow copy, but this + // creates other problems so we're not doing it (yet). + // + // package json simply doesn't call MarshalJSON for cases like + // this, but encodes the value as if it didn't implement the + // interface. We don't want to handle it that way. + return fmt.Errorf("rlp: game over: unadressable value of type %v, EncodeRLP is pointer method", val.Type()) + } + return val.Addr().Interface().(Encoder).EncodeRLP(w) +} + +func writeInterface(val reflect.Value, w *encbuf) error { + if val.IsNil() { + // Write empty list. This is consistent with the previous RLP + // encoder that we had and should therefore avoid any + // problems. + w.str = append(w.str, 0xC0) + return nil + } + eval := val.Elem() + writer, err := cachedWriter(eval.Type()) + if err != nil { + return err + } + return writer(eval, w) +} + +func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) { + etypeinfo := cachedTypeInfo1(typ.Elem(), tags{}) + if etypeinfo.writerErr != nil { + return nil, etypeinfo.writerErr + } + writer := func(val reflect.Value, w *encbuf) error { + if !ts.tail { + defer w.listEnd(w.list()) + } + vlen := val.Len() + for i := 0; i < vlen; i++ { + if err := etypeinfo.writer(val.Index(i), w); err != nil { + return err + } + } + return nil + } + return writer, nil +} + +func makeStructWriter(typ reflect.Type) (writer, error) { + fields, err := structFields(typ) + if err != nil { + return nil, err + } + writer := func(val reflect.Value, w *encbuf) error { + lh := w.list() + for _, f := range fields { + if err := f.info.writer(val.Field(f.index), w); err != nil { + return err + } + } + w.listEnd(lh) + return nil + } + return writer, nil +} + +func makePtrWriter(typ reflect.Type) (writer, error) { + etypeinfo := cachedTypeInfo1(typ.Elem(), tags{}) + if etypeinfo.writerErr != nil { + return nil, etypeinfo.writerErr + } + + // determine nil pointer handler + var nilfunc func(*encbuf) error + kind := typ.Elem().Kind() + switch { + case kind == reflect.Array && isByte(typ.Elem().Elem()): + nilfunc = func(w *encbuf) error { + w.str = append(w.str, 0x80) + return nil + } + case kind == reflect.Struct || kind == reflect.Array: + nilfunc = func(w *encbuf) error { + // encoding the zero value of a struct/array could trigger + // infinite recursion, avoid that. + w.listEnd(w.list()) + return nil + } + default: + zero := reflect.Zero(typ.Elem()) + nilfunc = func(w *encbuf) error { + return etypeinfo.writer(zero, w) + } + } + + writer := func(val reflect.Value, w *encbuf) error { + if val.IsNil() { + return nilfunc(w) + } + return etypeinfo.writer(val.Elem(), w) + } + return writer, nil +} + +// putint writes i to the beginning of b in big endian byte +// order, using the least number of bytes needed to represent i. +func putint(b []byte, i uint64) (size int) { + switch { + case i < (1 << 8): + b[0] = byte(i) + return 1 + case i < (1 << 16): + b[0] = byte(i >> 8) + b[1] = byte(i) + return 2 + case i < (1 << 24): + b[0] = byte(i >> 16) + b[1] = byte(i >> 8) + b[2] = byte(i) + return 3 + case i < (1 << 32): + b[0] = byte(i >> 24) + b[1] = byte(i >> 16) + b[2] = byte(i >> 8) + b[3] = byte(i) + return 4 + case i < (1 << 40): + b[0] = byte(i >> 32) + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) + return 5 + case i < (1 << 48): + b[0] = byte(i >> 40) + b[1] = byte(i >> 32) + b[2] = byte(i >> 24) + b[3] = byte(i >> 16) + b[4] = byte(i >> 8) + b[5] = byte(i) + return 6 + case i < (1 << 56): + b[0] = byte(i >> 48) + b[1] = byte(i >> 40) + b[2] = byte(i >> 32) + b[3] = byte(i >> 24) + b[4] = byte(i >> 16) + b[5] = byte(i >> 8) + b[6] = byte(i) + return 7 + default: + b[0] = byte(i >> 56) + b[1] = byte(i >> 48) + b[2] = byte(i >> 40) + b[3] = byte(i >> 32) + b[4] = byte(i >> 24) + b[5] = byte(i >> 16) + b[6] = byte(i >> 8) + b[7] = byte(i) + return 8 + } +} + +// intsize computes the minimum number of bytes required to store i. +func intsize(i uint64) (size int) { + for size = 1; ; size++ { + if i >>= 8; i == 0 { + return size + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/raw.go b/vendor/github.com/ethereum/go-ethereum/rlp/raw.go new file mode 100644 index 000000000..2b3f328f6 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rlp/raw.go @@ -0,0 +1,156 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "io" + "reflect" +) + +// RawValue represents an encoded RLP value and can be used to delay +// RLP decoding or to precompute an encoding. Note that the decoder does +// not verify whether the content of RawValues is valid RLP. +type RawValue []byte + +var rawValueType = reflect.TypeOf(RawValue{}) + +// ListSize returns the encoded size of an RLP list with the given +// content size. +func ListSize(contentSize uint64) uint64 { + return uint64(headsize(contentSize)) + contentSize +} + +// Split returns the content of first RLP value and any +// bytes after the value as subslices of b. +func Split(b []byte) (k Kind, content, rest []byte, err error) { + k, ts, cs, err := readKind(b) + if err != nil { + return 0, nil, b, err + } + return k, b[ts : ts+cs], b[ts+cs:], nil +} + +// SplitString splits b into the content of an RLP string +// and any remaining bytes after the string. +func SplitString(b []byte) (content, rest []byte, err error) { + k, content, rest, err := Split(b) + if err != nil { + return nil, b, err + } + if k == List { + return nil, b, ErrExpectedString + } + return content, rest, nil +} + +// SplitList splits b into the content of a list and any remaining +// bytes after the list. +func SplitList(b []byte) (content, rest []byte, err error) { + k, content, rest, err := Split(b) + if err != nil { + return nil, b, err + } + if k != List { + return nil, b, ErrExpectedList + } + return content, rest, nil +} + +// CountValues counts the number of encoded values in b. +func CountValues(b []byte) (int, error) { + i := 0 + for ; len(b) > 0; i++ { + _, tagsize, size, err := readKind(b) + if err != nil { + return 0, err + } + b = b[tagsize+size:] + } + return i, nil +} + +func readKind(buf []byte) (k Kind, tagsize, contentsize uint64, err error) { + if len(buf) == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + b := buf[0] + switch { + case b < 0x80: + k = Byte + tagsize = 0 + contentsize = 1 + case b < 0xB8: + k = String + tagsize = 1 + contentsize = uint64(b - 0x80) + // Reject strings that should've been single bytes. + if contentsize == 1 && len(buf) > 1 && buf[1] < 128 { + return 0, 0, 0, ErrCanonSize + } + case b < 0xC0: + k = String + tagsize = uint64(b-0xB7) + 1 + contentsize, err = readSize(buf[1:], b-0xB7) + case b < 0xF8: + k = List + tagsize = 1 + contentsize = uint64(b - 0xC0) + default: + k = List + tagsize = uint64(b-0xF7) + 1 + contentsize, err = readSize(buf[1:], b-0xF7) + } + if err != nil { + return 0, 0, 0, err + } + // Reject values larger than the input slice. + if contentsize > uint64(len(buf))-tagsize { + return 0, 0, 0, ErrValueTooLarge + } + return k, tagsize, contentsize, err +} + +func readSize(b []byte, slen byte) (uint64, error) { + if int(slen) > len(b) { + return 0, io.ErrUnexpectedEOF + } + var s uint64 + switch slen { + case 1: + s = uint64(b[0]) + case 2: + s = uint64(b[0])<<8 | uint64(b[1]) + case 3: + s = uint64(b[0])<<16 | uint64(b[1])<<8 | uint64(b[2]) + case 4: + s = uint64(b[0])<<24 | uint64(b[1])<<16 | uint64(b[2])<<8 | uint64(b[3]) + case 5: + s = uint64(b[0])<<32 | uint64(b[1])<<24 | uint64(b[2])<<16 | uint64(b[3])<<8 | uint64(b[4]) + case 6: + s = uint64(b[0])<<40 | uint64(b[1])<<32 | uint64(b[2])<<24 | uint64(b[3])<<16 | uint64(b[4])<<8 | uint64(b[5]) + case 7: + s = uint64(b[0])<<48 | uint64(b[1])<<40 | uint64(b[2])<<32 | uint64(b[3])<<24 | uint64(b[4])<<16 | uint64(b[5])<<8 | uint64(b[6]) + case 8: + s = uint64(b[0])<<56 | uint64(b[1])<<48 | uint64(b[2])<<40 | uint64(b[3])<<32 | uint64(b[4])<<24 | uint64(b[5])<<16 | uint64(b[6])<<8 | uint64(b[7]) + } + // Reject sizes < 56 (shouldn't have separate size) and sizes with + // leading zero bytes. + if s < 56 || b[0] == 0 { + return 0, ErrCanonSize + } + return s, nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go b/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go new file mode 100644 index 000000000..ab5ee3da7 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go @@ -0,0 +1,165 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +var ( + typeCacheMutex sync.RWMutex + typeCache = make(map[typekey]*typeinfo) +) + +type typeinfo struct { + decoder decoder + decoderErr error // error from makeDecoder + writer writer + writerErr error // error from makeWriter +} + +// represents struct tags +type tags struct { + // rlp:"nil" controls whether empty input results in a nil pointer. + nilOK bool + // rlp:"tail" controls whether this field swallows additional list + // elements. It can only be set for the last field, which must be + // of slice type. + tail bool + // rlp:"-" ignores fields. + ignored bool +} + +type typekey struct { + reflect.Type + // the key must include the struct tags because they + // might generate a different decoder. + tags +} + +type decoder func(*Stream, reflect.Value) error + +type writer func(reflect.Value, *encbuf) error + +func cachedDecoder(typ reflect.Type) (decoder, error) { + info := cachedTypeInfo(typ, tags{}) + return info.decoder, info.decoderErr +} + +func cachedWriter(typ reflect.Type) (writer, error) { + info := cachedTypeInfo(typ, tags{}) + return info.writer, info.writerErr +} + +func cachedTypeInfo(typ reflect.Type, tags tags) *typeinfo { + typeCacheMutex.RLock() + info := typeCache[typekey{typ, tags}] + typeCacheMutex.RUnlock() + if info != nil { + return info + } + // not in the cache, need to generate info for this type. + typeCacheMutex.Lock() + defer typeCacheMutex.Unlock() + return cachedTypeInfo1(typ, tags) +} + +func cachedTypeInfo1(typ reflect.Type, tags tags) *typeinfo { + key := typekey{typ, tags} + info := typeCache[key] + if info != nil { + // another goroutine got the write lock first + return info + } + // put a dummy value into the cache before generating. + // if the generator tries to lookup itself, it will get + // the dummy value and won't call itself recursively. + info = new(typeinfo) + typeCache[key] = info + info.generate(typ, tags) + return info +} + +type field struct { + index int + info *typeinfo +} + +func structFields(typ reflect.Type) (fields []field, err error) { + lastPublic := lastPublicField(typ) + for i := 0; i < typ.NumField(); i++ { + if f := typ.Field(i); f.PkgPath == "" { // exported + tags, err := parseStructTag(typ, i, lastPublic) + if err != nil { + return nil, err + } + if tags.ignored { + continue + } + info := cachedTypeInfo1(f.Type, tags) + fields = append(fields, field{i, info}) + } + } + return fields, nil +} + +func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) { + f := typ.Field(fi) + var ts tags + for _, t := range strings.Split(f.Tag.Get("rlp"), ",") { + switch t = strings.TrimSpace(t); t { + case "": + case "-": + ts.ignored = true + case "nil": + ts.nilOK = true + case "tail": + ts.tail = true + if fi != lastPublic { + return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (must be on last field)`, typ, f.Name) + } + if f.Type.Kind() != reflect.Slice { + return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (field type is not slice)`, typ, f.Name) + } + default: + return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name) + } + } + return ts, nil +} + +func lastPublicField(typ reflect.Type) int { + last := 0 + for i := 0; i < typ.NumField(); i++ { + if typ.Field(i).PkgPath == "" { + last = i + } + } + return last +} + +func (i *typeinfo) generate(typ reflect.Type, tags tags) { + i.decoder, i.decoderErr = makeDecoder(typ, tags) + i.writer, i.writerErr = makeWriter(typ, tags) +} + +func isUint(k reflect.Kind) bool { + return k >= reflect.Uint && k <= reflect.Uintptr +} -- cgit v1.2.3

+ */ + ElgamalEc::PrivateKey prv; + prv.init(P, bitSize, rg); + prv.setCache(0, 60000); + const ElgamalEc::PublicKey& pub = prv.getPublicKey(); + + const int m1 = 12345; + const int m2 = 17655; + ElgamalEc::CipherText c1, c2; + pub.enc(c1, m1, rg); + pub.enc(c2, m2, rg); + Zn dec1, dec2; + prv.dec(dec1, c1); + prv.dec(dec2, c2); + // dec(enc) = id + CYBOZU_TEST_EQUAL(dec1, m1); + CYBOZU_TEST_EQUAL(dec2, m2); + CYBOZU_TEST_EQUAL(prv.dec(c1), m1); + CYBOZU_TEST_EQUAL(prv.dec(c2), m2); + // iostream + { + ElgamalEc::PublicKey pub2; + ElgamalEc::PrivateKey prv2; + ElgamalEc::CipherText cc1, cc2; + { + std::stringstream ss; + ss << prv; + ss >> prv2; + } + Zn d; + prv2.dec(d, c1); + CYBOZU_TEST_EQUAL(d, m1); + { + std::stringstream ss; + ss << c1; + ss >> cc1; + } + d = 0; + prv2.dec(d, cc1); + CYBOZU_TEST_EQUAL(d, m1); + { + std::stringstream ss; + ss << pub; + ss >> pub2; + } + pub2.enc(cc2, m2, rg); + prv.dec(d, cc2); + CYBOZU_TEST_EQUAL(d, m2); + } + // enc(m1) enc(m2) = enc(m1 + m2) + c1.add(c2); + prv.dec(dec1, c1); + CYBOZU_TEST_EQUAL(dec1, m1 + m2); + // enc(m1) x = enc(m1 + x) + { + const int x = 555; + pub.add(c1, x); + prv.dec(dec1, c1); + CYBOZU_TEST_EQUAL(dec1, m1 + m2 + x); + } + // rerandomize + c1 = c2; + pub.rerandomize(c1, rg); + // verify c1 != c2 + CYBOZU_TEST_ASSERT(c1.c1 != c2.c1); + CYBOZU_TEST_ASSERT(c1.c2 != c2.c2); + prv.dec(dec1, c1); + // dec(c1) = dec(c2) + CYBOZU_TEST_EQUAL(dec1, m2); + + // check neg + { + ElgamalEc::CipherText c; + Zn m = 1234; + pub.enc(c, m, rg); + c.neg(); + Zn dec; + prv.dec(dec, c); + CYBOZU_TEST_EQUAL(dec, -m); + } + // check mul + { + ElgamalEc::CipherText c; + Zn m = 123; + int x = 111; + pub.enc(c, m, rg); + Zn dec; + prv.dec(dec, c); + c.mul(x); + prv.dec(dec, c); + m *= x; + CYBOZU_TEST_EQUAL(dec, m); + } + + // check negative value + for (int i = -10; i < 10; i++) { + ElgamalEc::CipherText c; + const Zn mm = i; + pub.enc(c, mm, rg); + Zn dec; + prv.dec(dec, c, 1000); + CYBOZU_TEST_EQUAL(dec, mm); + } + + // isZeroMessage + for (int m = 0; m < 10; m++) { + ElgamalEc::CipherText c0; + pub.enc(c0, m, rg); + if (m == 0) { + CYBOZU_TEST_ASSERT(prv.isZeroMessage(c0)); + } else { + CYBOZU_TEST_ASSERT(!prv.isZeroMessage(c0)); + } + } + // zkp + { + ElgamalEc::Zkp zkp; + ElgamalEc::CipherText c; + pub.encWithZkp(c, zkp, 0, rg); + CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); + zkp.s0 += 1; + CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); + pub.encWithZkp(c, zkp, 1, rg); + CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); + zkp.s0 += 1; + CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); + CYBOZU_TEST_EXCEPTION_MESSAGE(pub.encWithZkp(c, zkp, 2, rg), cybozu::Exception, "encWithZkp"); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/fp_generator_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/fp_generator_test.cpp new file mode 100644 index 000000000..60ec5cd41 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/fp_generator_test.cpp @@ -0,0 +1,207 @@ +#include +#if MCL_SIZEOF_UNIT == 4 +// not support +#else +#include +#include +#include +#include +#include +#include "../src/fp_generator.hpp" +#include +#include +#include + +typedef mcl::FpT<> Fp; + +const int MAX_N = 4; + +const char *primeTable[] = { + "0x7fffffffffffffffffffffffffffffff", // 127bit(not full) + "0xffffffffffffffffffffffffffffff61", // 128bit(full) + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", // 192bit(full) + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", // 254bit(not full) +}; + +void strToArray(uint64_t *p, size_t n, const char *pStr) +{ + mpz_class x; + mcl::gmp::setStr(x, pStr, 16); + mcl::gmp::getArray(p, n, x); +} + +struct Int { + int vn; + uint64_t v[MAX_N]; + Int() + : vn(0) + { + } + explicit Int(int vn) + { + if (vn > MAX_N) { + printf("vn(%d) is too large\n", vn); + exit(1); + } + this->vn = vn; + } + void set(const char *str) { setStr(str); } + void set(const Fp& rhs) + { + mcl::gmp::getArray(v, MAX_N, rhs.getMpz()); + } + void set(const uint64_t* x) + { + for (int i = 0; i < vn; i++) v[i] = x[i]; + } + void setStr(const char *str) + { + strToArray(v, MAX_N, str); + } + std::string getStr() const + { + std::string ret; + for (int i = 0; i < vn; i++) { + ret += cybozu::itohex(v[vn - 1 - i], false); + } + return ret; + } + void put(const char *msg = "") const + { + if (msg) printf("%s=", msg); + printf("%s\n", getStr().c_str()); + } + bool operator==(const Int& rhs) const + { + if (vn != rhs.vn) return false; + for (int i = 0; i < vn; i++) { + if (v[i] != rhs.v[i]) return false; + } + return true; + } + bool operator!=(const Int& rhs) const { return !operator==(rhs); } + bool operator==(const Fp& rhs) const + { + Int t(vn); + t.set(rhs); + return operator==(t); + } + bool operator!=(const Fp& rhs) const { return !operator==(rhs); } +}; +static inline std::ostream& operator<<(std::ostream& os, const Int& x) +{ + return os << x.getStr(); +} + +void testAddSub(const mcl::fp::Op& op) +{ + Fp x, y; + const uint64_t *p = op.p; + Int mx(op.N), my(op.N); + x.setStr("0x8811aabb23427cc"); + y.setStr("0x8811aabb23427cc11"); + mx.set(x); + my.set(y); + for (int i = 0; i < 30; i++) { + CYBOZU_TEST_EQUAL(mx, x); + x += x; + op.fp_add(mx.v, mx.v, mx.v, p); + } + for (int i = 0; i < 30; i++) { + CYBOZU_TEST_EQUAL(mx, x); + x += y; + op.fp_add(mx.v, mx.v, my.v, p); + } + for (int i = 0; i < 30; i++) { + CYBOZU_TEST_EQUAL(my, y); + y -= x; + op.fp_sub(my.v, my.v, mx.v, p); + } +} + +void testNeg(const mcl::fp::Op& op) +{ + Fp x; + Int mx(op.N), my(op.N); + const char *tbl[] = { + "0", + "0x12346", + "0x11223344556677881122334455667788", + "0x0abbccddeeffaabb0000000000000000", + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + x.setStr(tbl[i]); + mx.set(x); + x = -x; + op.fp_neg(mx.v, mx.v, op.p); + CYBOZU_TEST_EQUAL(mx, x); + } +} + +#if 0 +void testMulI(const mcl::fp::FpGenerator& fg, int pn) +{ + cybozu::XorShift rg; +//printf("pn=%d, %p\n", pn, fg.mulUnit_); + for (int i = 0; i < 100; i++) { + uint64_t x[MAX_N]; + uint64_t z[MAX_N + 1]; + rg.read(x, pn); + uint64_t y = rg.get64(); + mpz_class mx; + mcl::gmp::setArray(mx, x, pn); + mpz_class my; + mcl::gmp::set(my, y); + mx *= my; + uint64_t d = fg.mulUnit_(z, x, y); + z[pn] = d; + mcl::gmp::setArray(my, z, pn + 1); + CYBOZU_TEST_EQUAL(mx, my); + } + { + uint64_t x[MAX_N]; + uint64_t z[MAX_N + 1]; + rg.read(x, pn); + uint64_t y = rg.get64(); + CYBOZU_BENCH_C("mulUnit", 10000000, fg.mulUnit_, z, x, y); + } +} +#endif + +void testShr1(const mcl::fp::Op& op, int pn) +{ + cybozu::XorShift rg; + for (int i = 0; i < 100; i++) { + uint64_t x[MAX_N]; + uint64_t z[MAX_N]; + rg.read(x, pn); + mpz_class mx; + mcl::gmp::setArray(mx, x, pn); + mx >>= 1; + op.fp_shr1(z, x); + mpz_class my; + mcl::gmp::setArray(my, z, pn); + CYBOZU_TEST_EQUAL(mx, my); + } +} + +void test(const char *pStr) +{ + printf("test %s\n", pStr); + Fp::init(pStr, mcl::fp::FP_XBYAK); + const mcl::fp::Op& op = Fp::getOp(); + const int pn = (int)op.N; + testAddSub(op); + testNeg(op); +// testMulI(*op.fg, pn); + testShr1(op, pn); +} + +CYBOZU_TEST_AUTO(all) +{ + if (!mcl::fp::isEnableJIT()) return; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(primeTable); i++) { + test(primeTable[i]); + } +} +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/test/fp_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/fp_test.cpp new file mode 100644 index 000000000..dc1b01ef4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/fp_test.cpp @@ -0,0 +1,1046 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include "../src/low_func.hpp" +#include "../src/proto.hpp" +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable: 4127) // const condition +#endif + +typedef mcl::FpT<> Fp; + +CYBOZU_TEST_AUTO(sizeof) +{ + CYBOZU_TEST_EQUAL(sizeof(Fp), sizeof(mcl::fp::Unit) * Fp::maxSize); +} + +void cstrTest() +{ + const struct { + const char *str; + int val; + } tbl[] = { + { "0", 0 }, + { "1", 1 }, + { "123", 123 }, + { "0x123", 0x123 }, + { "0b10101", 21 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + // string cstr + Fp x(tbl[i].str); + CYBOZU_TEST_EQUAL(x, tbl[i].val); + + // int cstr + Fp y(tbl[i].val); + CYBOZU_TEST_EQUAL(y, x); + + // copy cstr + Fp z(x); + CYBOZU_TEST_EQUAL(z, x); + + // assign int + Fp w; + w = tbl[i].val; + CYBOZU_TEST_EQUAL(w, x); + + // assign self + Fp u; + u = w; + CYBOZU_TEST_EQUAL(u, x); + + // conv + std::ostringstream os; + os << tbl[i].val; + + std::string str; + x.getStr(str); + CYBOZU_TEST_EQUAL(str, os.str()); + } + const struct { + const char *str; + int val; + } tbl2[] = { + { "-123", 123 }, + { "-0x123", 0x123 }, + { "-0b10101", 21 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl2); i++) { + Fp x(tbl2[i].str); + x = -x; + CYBOZU_TEST_EQUAL(x, tbl2[i].val); + } +} + +void setStrTest() +{ + const struct { + const char *in; + int out; + int base; + } tbl[] = { + { "100", 100, 0 }, // set base = 10 if base = 0 + { "100", 4, 2 }, + { "100", 256, 16 }, + { "0b100", 4, 0 }, + { "0b100", 4, 2 }, + { "0x100", 256, 0 }, + { "0x100", 256, 16 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp x; + x.setStr(tbl[i].in, tbl[i].base); + CYBOZU_TEST_EQUAL(x, tbl[i].out); + } + // use prefix if base conflicts with prefix + { + Fp x; + CYBOZU_TEST_EXCEPTION(x.setStr("0b100", 16), cybozu::Exception); + CYBOZU_TEST_EXCEPTION(x.setStr("0b100", 10), cybozu::Exception); + CYBOZU_TEST_EXCEPTION(x.setStr("0x100", 2), cybozu::Exception); + CYBOZU_TEST_EXCEPTION(x.setStr("0x100", 10), cybozu::Exception); + } +} + +void streamTest() +{ + const struct { + const char *in; + int out10; + int out16; + } tbl[] = { + { "100", 100, 256 }, // set base = 10 if base = 0 + { "0x100", 256, 256 }, + }; + Fp::setIoMode(0); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + { + std::istringstream is(tbl[i].in); + Fp x; + is >> x; + CYBOZU_TEST_EQUAL(x, tbl[i].out10); + } + { + std::istringstream is(tbl[i].in); + Fp x; + is >> std::hex >> x; + CYBOZU_TEST_EQUAL(x, tbl[i].out16); + } + } + // use prefix if base conflicts with prefix + std::istringstream is("0b100"); + Fp x; + CYBOZU_TEST_EXCEPTION(is >> std::hex >> x, cybozu::Exception); + { + std::ostringstream os; + os << Fp(123); + CYBOZU_TEST_EQUAL(os.str(), "123"); + } + { + std::ostringstream os; + os << std::hex << Fp(0x123); + CYBOZU_TEST_EQUAL(os.str(), "123"); + } + { + std::ostringstream os; + os << std::hex << std::showbase << Fp(0x123); + CYBOZU_TEST_EQUAL(os.str(), "0x123"); + } +} + +void ioModeTest() +{ + Fp x(123); + const struct { + mcl::IoMode ioMode; + std::string expected; + } tbl[] = { + { mcl::IoBin, "1111011" }, + { mcl::IoDec, "123" }, + { mcl::IoHex, "7b" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp::setIoMode(tbl[i].ioMode); + for (int j = 0; j < 2; j++) { + std::stringstream ss; + if (j == 1) { + ss << std::hex; + } + ss << x; + CYBOZU_TEST_EQUAL(ss.str(), tbl[i].expected); + Fp y; + y.clear(); + ss >> y; + CYBOZU_TEST_EQUAL(x, y); + } + } + for (int i = 0; i < 2; i++) { + if (i == 0) { + Fp::setIoMode(mcl::IoArray); + } else { + Fp::setIoMode(mcl::IoArrayRaw); + } + std::stringstream ss; + ss << x; + CYBOZU_TEST_EQUAL(ss.str().size(), Fp::getByteSize()); + Fp y; + ss >> y; + CYBOZU_TEST_EQUAL(x, y); + } + Fp::setIoMode(mcl::IoAuto); +} + +void edgeTest() +{ + const mpz_class& m = Fp::getOp().mp; + /* + real mont + 0 0 + 1 R^-1 + R 1 + -1 -R^-1 + -R -1 + */ + mpz_class t = 1; + const size_t N = Fp::getUnitSize(); + const mpz_class R = (t << (N * mcl::fp::UnitBitSize)) % m; + const mpz_class tbl[] = { + 0, 1, R, m - 1, m - R + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mpz_class& x = tbl[i]; + for (size_t j = i; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { + const mpz_class& y = tbl[j]; + mpz_class z = (x * y) % m; + Fp xx, yy; + xx.setMpz(x); + yy.setMpz(y); + Fp zz = xx * yy; + zz.getMpz(t); + CYBOZU_TEST_EQUAL(z, t); + } + } + t = m; + t /= 2; + Fp x; + x.setMpz(t); + CYBOZU_TEST_EQUAL(x * 2, -1); + t += 1; + x.setMpz(t); + CYBOZU_TEST_EQUAL(x * 2, 1); +} + +void convTest() +{ +#if 1 + const char *bin, *hex, *dec; + if (Fp::getBitSize() <= 117) { + bin = "0b1000000000000000000000000000000000000000000000000000000000001110"; + hex = "0x800000000000000e"; + dec = "9223372036854775822"; + } else { + bin = "0b100100011010001010110011110001001000000010010001101000101011001111000100100000001001000110100010101100111100010010000"; + hex = "0x123456789012345678901234567890"; + dec = "94522879687365475552814062743484560"; + } +#else + const char *bin = "0b1001000110100"; + const char *hex = "0x1234"; + const char *dec = "4660"; +#endif + Fp b(bin); + Fp h(hex); + Fp d(dec); + CYBOZU_TEST_EQUAL(b, h); + CYBOZU_TEST_EQUAL(b, d); + + std::string str; + b.getStr(str, mcl::IoBinPrefix); + CYBOZU_TEST_EQUAL(str, bin); + b.getStr(str); + CYBOZU_TEST_EQUAL(str, dec); + b.getStr(str, mcl::IoHexPrefix); + CYBOZU_TEST_EQUAL(str, hex); +} + +void compareTest() +{ + { + const struct { + int lhs; + int rhs; + int cmp; + } tbl[] = { + { 0, 0, 0 }, + { 1, 0, 1 }, + { 0, 1, -1 }, + { -1, 0, 1 }, // m-1, 0 + { 0, -1, -1 }, // 0, m-1 + { 123, 456, -1 }, + { 456, 123, 1 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const Fp x(tbl[i].lhs); + const Fp y(tbl[i].rhs); + const int cmp = tbl[i].cmp; + if (cmp == 0) { + CYBOZU_TEST_EQUAL(x, y); + CYBOZU_TEST_ASSERT(x >= y); + CYBOZU_TEST_ASSERT(x <= y); + } else if (cmp > 0) { + CYBOZU_TEST_ASSERT(x > y); + CYBOZU_TEST_ASSERT(x >= y); + } else { + CYBOZU_TEST_ASSERT(x < y); + CYBOZU_TEST_ASSERT(x <= y); + } + } + } + { + Fp x(5); + CYBOZU_TEST_ASSERT(x < 10); + CYBOZU_TEST_ASSERT(x == 5); + CYBOZU_TEST_ASSERT(x > 2); + } + { + Fp x(1); + CYBOZU_TEST_ASSERT(x.isOne()); + x = 2; + CYBOZU_TEST_ASSERT(!x.isOne()); + } + { + const struct { + int v; + bool expected; + } tbl[] = { + { 0, false }, + { 1, false }, + { -1, true }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp x = tbl[i].v; + CYBOZU_TEST_EQUAL(x.isNegative(), tbl[i].expected); + } + std::string str; + Fp::getModulo(str); + char buf[1024]; + size_t n = Fp::getModulo(buf, sizeof(buf)); + CYBOZU_TEST_EQUAL(n, str.size()); + CYBOZU_TEST_EQUAL(buf, str.c_str()); + mpz_class half(str); + half = (half - 1) / 2; + Fp x; + x.setMpz(half - 1); + CYBOZU_TEST_ASSERT(!x.isNegative()); + x.setMpz(half); + CYBOZU_TEST_ASSERT(!x.isNegative()); + x.setMpz(half + 1); + CYBOZU_TEST_ASSERT(x.isNegative()); + } +} + +void moduloTest(const char *pStr) +{ + std::string str; + Fp::getModulo(str); + CYBOZU_TEST_EQUAL(str, mcl::gmp::getStr(mpz_class(pStr))); +} + +void opeTest() +{ + const struct { + int x; + int y; + int add; // x + y + int sub; // x - y + int mul; // x * y + int sqr; // x^2 + } tbl[] = { + { 0, 1, 1, -1, 0, 0 }, + { 9, 5, 14, 4, 45, 81 }, + { 10, 13, 23, -3, 130, 100 }, + { 2000, 1000, 3000, 1000, 2000 * 1000, 2000 * 2000 }, + { 12345, 9999, 12345 + 9999, 12345 - 9999, 12345 * 9999, 12345 * 12345 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const Fp x(tbl[i].x); + const Fp y(tbl[i].y); + Fp z; + Fp::add(z, x, y); + CYBOZU_TEST_EQUAL(z, tbl[i].add); + Fp::sub(z, x, y); + CYBOZU_TEST_EQUAL(z, tbl[i].sub); + Fp::mul(z, x, y); + CYBOZU_TEST_EQUAL(z, tbl[i].mul); + + Fp r; + Fp::inv(r, y); + Fp::mul(z, z, r); + CYBOZU_TEST_EQUAL(z, tbl[i].x); + z = x + y; + CYBOZU_TEST_EQUAL(z, tbl[i].add); + z = x - y; + CYBOZU_TEST_EQUAL(z, tbl[i].sub); + z = x * y; + CYBOZU_TEST_EQUAL(z, tbl[i].mul); + + Fp::sqr(z, x); + CYBOZU_TEST_EQUAL(z, tbl[i].sqr); + + z = x / y; + z *= y; + CYBOZU_TEST_EQUAL(z, tbl[i].x); + } + if (!Fp::isFullBit()) { + Fp x(5), y(3), z; + Fp::addPre(z, x, y); + if (Fp::compareRaw(z, Fp::getP()) >= 0) { + Fp::subPre(z, z, Fp::getP()); + } + CYBOZU_TEST_EQUAL(z, Fp(8)); + if (Fp::compareRaw(x, y) < 0) { + Fp::addPre(x, x, Fp::getP()); + } + Fp::subPre(x, x, y); + CYBOZU_TEST_EQUAL(x, Fp(2)); + } +} + +struct tag2; + +void powTest() +{ + Fp x, y, z; + x = 12345; + z = 1; + for (int i = 0; i < 100; i++) { + Fp::pow(y, x, i); + CYBOZU_TEST_EQUAL(y, z); + z *= x; + } + x = z; + Fp::pow(z, x, Fp::getOp().mp - 1); + CYBOZU_TEST_EQUAL(z, 1); + Fp::pow(z, x, Fp::getOp().mp); + CYBOZU_TEST_EQUAL(z, x); + typedef mcl::FpT Fp_other; + Fp_other::init("1009"); + x = 5; + Fp_other n = 3; + z = 3; + Fp::pow(x, x, z); + CYBOZU_TEST_EQUAL(x, 125); + x = 5; + Fp::pow(x, x, n); + CYBOZU_TEST_EQUAL(x, 125); +} + +void mulUnitTest() +{ + Fp x(-1), y, z; + for (unsigned int u = 0; u < 20; u++) { + Fp::mul(y, x, u); + Fp::mulUnit(z, x, u); + CYBOZU_TEST_EQUAL(y, z); + } +} + +void powNegTest() +{ + Fp x, y, z; + x = 12345; + z = 1; + Fp rx = 1 / x; + for (int i = 0; i < 100; i++) { + Fp::pow(y, x, -i); + CYBOZU_TEST_EQUAL(y, z); + z *= rx; + } +} + +void powFpTest() +{ + Fp x, y, z; + x = 12345; + z = 1; + for (int i = 0; i < 100; i++) { + Fp::pow(y, x, Fp(i)); + CYBOZU_TEST_EQUAL(y, z); + z *= x; + } +} + +void powGmp() +{ + Fp x, y, z; + x = 12345; + z = 1; + for (int i = 0; i < 100; i++) { + Fp::pow(y, x, mpz_class(i)); + CYBOZU_TEST_EQUAL(y, z); + z *= x; + } +} + +struct TagAnother; + +void anotherFpTest(mcl::fp::Mode mode) +{ + typedef mcl::FpT G; + G::init("13", mode); + G a = 3; + G b = 9; + a *= b; + CYBOZU_TEST_EQUAL(a, 1); +} + +void setArrayTest1() +{ + char b1[] = { 0x56, 0x34, 0x12 }; + Fp x; + x.setArray(b1, 3); + CYBOZU_TEST_EQUAL(x, 0x123456); + int b2[] = { 0x12, 0x34 }; + x.setArray(b2, 2); + CYBOZU_TEST_EQUAL(x, Fp("0x3400000012")); +} + +void setArrayTest2(mcl::fp::Mode mode) +{ + Fp::init("0x10000000000001234567a5", mode); + const struct { + uint32_t buf[3]; + size_t bufN; + const char *expected; + } tbl[] = { + { { 0x234567a4, 0x00000001, 0x00100000}, 1, "0x234567a4" }, + { { 0x234567a4, 0x00000001, 0x00100000}, 2, "0x1234567a4" }, + { { 0x234567a4, 0x00000001, 0x00080000}, 3, "0x08000000000001234567a4" }, + { { 0x234567a4, 0x00000001, 0x00100000}, 3, "0x10000000000001234567a4" }, + }; + Fp x; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + x.setArray(tbl[i].buf, tbl[i].bufN); + CYBOZU_TEST_EQUAL(x, Fp(tbl[i].expected)); + } + uint32_t large[3] = { 0x234567a5, 0x00000001, 0x00100000}; + CYBOZU_TEST_EXCEPTION(x.setArray(large, 3), cybozu::Exception); +} + +void setArrayMaskTest1() +{ + char b1[] = { 0x56, 0x34, 0x12 }; + Fp x; + x.setArrayMask(b1, 3); + CYBOZU_TEST_EQUAL(x, 0x123456); + int b2[] = { 0x12, 0x34 }; + x.setArrayMask(b2, 2); + CYBOZU_TEST_EQUAL(x, Fp("0x3400000012")); +} + +void setArrayMaskTest2(mcl::fp::Mode mode) +{ + Fp::init("0x10000000000001234567a5", mode); + const struct { + uint32_t buf[3]; + size_t bufN; + const char *expected; + } tbl[] = { + { { 0x234567a4, 0x00000001, 0x00100000}, 1, "0x234567a4" }, + { { 0x234567a4, 0x00000001, 0x00100000}, 2, "0x1234567a4" }, + { { 0x234567a4, 0x00000001, 0x00100000}, 3, "0x10000000000001234567a4" }, + { { 0x234567a5, 0xfffffff1, 0xffffffff}, 3, "0x0ffffffffffff1234567a5" }, + }; + Fp x; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + x.setArrayMask(tbl[i].buf, tbl[i].bufN); + CYBOZU_TEST_EQUAL(x, Fp(tbl[i].expected)); + } +} + +void setArrayModTest() +{ + const mpz_class& p = Fp::getOp().mp; + const mpz_class tbl[] = { + 0, + 1, + p - 1, + p, + p + 1, + p * 2 - 1, + p * 2, + p * 2 + 1, + p * (p - 1) - 1, + p * (p - 1), + p * (p - 1) + 1, + p * p - 1, + p * p, + p * p + 1, + (mpz_class(1) << Fp::getOp().N * mcl::fp::UnitBitSize * 2) - 1, + }; + const size_t unitByteSize = sizeof(mcl::fp::Unit); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mpz_class& x = tbl[i]; + const mcl::fp::Unit *px = mcl::gmp::getUnit(x); + const size_t xn = mcl::gmp::getUnitSize(x); + const size_t xByteSize = xn * unitByteSize; + const size_t fpByteSize = unitByteSize * Fp::getOp().N; + Fp y; + bool b; + y.setArray(&b, px, xn, mcl::fp::Mod); + bool expected = xByteSize <= fpByteSize * 2; + CYBOZU_TEST_EQUAL(b, expected); + if (!b) continue; + CYBOZU_TEST_EQUAL(y.getMpz(), x % p); + } +} + +CYBOZU_TEST_AUTO(set64bit) +{ + Fp::init("0x1000000000000000000f"); + const struct { + const char *p; + int64_t i; + } tbl[] = { + { "0x1234567812345678", int64_t(0x1234567812345678ull) }, + { "0xfffedcba987edcba997", -int64_t(0x1234567812345678ull) }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp x(tbl[i].p); + Fp y(tbl[i].i); + CYBOZU_TEST_EQUAL(x, y); + } +} + +void getUint64Test() +{ + const uint64_t tbl[] = { + 0, 1, 123, 0xffffffff, int64_t(0x7fffffffffffffffull) + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + uint64_t a = tbl[i]; + Fp x(a); + uint64_t b = x.getUint64(); + CYBOZU_TEST_ASSERT(!x.isNegative()); + CYBOZU_TEST_EQUAL(a, b); + } + { + Fp x("0xffffffffffffffff"); + CYBOZU_TEST_EQUAL(x.getUint64(), uint64_t(0xffffffffffffffffull)); + } + { + Fp x("0x10000000000000000"); + CYBOZU_TEST_EXCEPTION(x.getUint64(), cybozu::Exception); + x = -1; + CYBOZU_TEST_EXCEPTION(x.getUint64(), cybozu::Exception); + } + { + Fp x("0x10000000000000000"); + bool b = true; + CYBOZU_TEST_EQUAL(x.getUint64(&b), 0u); + CYBOZU_TEST_ASSERT(!b); + } +} + +void getInt64Test() +{ + const int64_t tbl[] = { + 0, 1, 123, 0xffffffff, int64_t(0x7fffffffffffffffull), + -1, -2, -12345678, int64_t(-9223372036854775808ull)/*-int64_t(1) << 63*/, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + int64_t a = tbl[i]; + Fp x(a); + CYBOZU_TEST_EQUAL(x.isNegative(), a < 0); + int64_t b = x.getInt64(); + CYBOZU_TEST_EQUAL(a, b); + } + { + Fp x("0x8000000000000000"); + CYBOZU_TEST_EXCEPTION(x.getInt64(), cybozu::Exception); + } + { + Fp x("0x8000000000000000"); + bool b = true; + CYBOZU_TEST_EQUAL(x.getInt64(&b), 0u); + CYBOZU_TEST_ASSERT(!b); + } +} + +void divBy2Test() +{ + const int tbl[] = { -4, -3, -2, -1, 0, 1, 2, 3 }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Fp x(tbl[i]), y; + Fp::divBy2(y, x); + y *= 2; + CYBOZU_TEST_EQUAL(y, x); + y = x; + Fp::divBy2(y, y); + y *= 2; + CYBOZU_TEST_EQUAL(y, x); + } +} + +void getStrTest() +{ + Fp x(0); + std::string str; + str = x.getStr(); + CYBOZU_TEST_EQUAL(str, "0"); + str = x.getStr(mcl::IoBinPrefix); + CYBOZU_TEST_EQUAL(str, "0b0"); + str = x.getStr(mcl::IoBin); + CYBOZU_TEST_EQUAL(str, "0"); + str = x.getStr(mcl::IoHexPrefix); + CYBOZU_TEST_EQUAL(str, "0x0"); + str = x.getStr(mcl::IoHex); + CYBOZU_TEST_EQUAL(str, "0"); + + x = 123; + str = x.getStr(); + CYBOZU_TEST_EQUAL(str, "123"); + str = x.getStr(mcl::IoBinPrefix); + CYBOZU_TEST_EQUAL(str, "0b1111011"); + str = x.getStr(mcl::IoBin); + CYBOZU_TEST_EQUAL(str, "1111011"); + str = x.getStr(mcl::IoHexPrefix); + CYBOZU_TEST_EQUAL(str, "0x7b"); + str = x.getStr(mcl::IoHex); + CYBOZU_TEST_EQUAL(str, "7b"); + + { + std::ostringstream os; + os << x; + CYBOZU_TEST_EQUAL(os.str(), "123"); + } + { + std::ostringstream os; + os << std::hex << std::showbase << x; + CYBOZU_TEST_EQUAL(os.str(), "0x7b"); + } + { + std::ostringstream os; + os << std::hex << x; + CYBOZU_TEST_EQUAL(os.str(), "7b"); + } + const char *tbl[] = { + "0x0", + "0x5", + "0x123", + "0x123456789012345679adbc", + "0xffffffff26f2fc170f69466a74defd8d", + "0x100000000000000000000000000000033", + "0x11ee12312312940000000000000000000000000002342343" + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const char *s = tbl[i]; + mpz_class mx(s); + if (mx >= Fp::getOp().mp) continue; + Fp y(s); + std::string xs, ys; + mcl::gmp::getStr(xs, mx, 16); + y.getStr(ys, 16); + CYBOZU_TEST_EQUAL(xs, ys); + } +} + +void setHashOfTest() +{ + const std::string msgTbl[] = { + "", "abc", "111111111111111111111111111111111111", + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(msgTbl); i++) { + size_t bitSize = Fp::getBitSize(); + std::string digest; + if (bitSize <= 256) { + digest = cybozu::Sha256().digest(msgTbl[i]); + } else { + digest = cybozu::Sha512().digest(msgTbl[i]); + } + Fp x, y; + x.setArrayMask(digest.c_str(), digest.size()); + y.setHashOf(msgTbl[i]); + CYBOZU_TEST_EQUAL(x, y); + } +} + +CYBOZU_TEST_AUTO(getArray) +{ + const struct { + const char *s; + uint32_t v[4]; + size_t vn; + } tbl[] = { + { "0", { 0, 0, 0, 0 }, 1 }, + { "1234", { 1234, 0, 0, 0 }, 1 }, + { "0xaabbccdd12345678", { 0x12345678, 0xaabbccdd, 0, 0 }, 2 }, + { "0x11112222333344445555666677778888", { 0x77778888, 0x55556666, 0x33334444, 0x11112222 }, 4 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class x(tbl[i].s); + const size_t bufN = 8; + uint32_t buf[bufN]; + mcl::gmp::getArray(buf, bufN, x); + size_t n = mcl::fp::getNonZeroArraySize(buf, bufN); + CYBOZU_TEST_EQUAL(n, tbl[i].vn); + CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].v, n); + } +} + +void serializeTest() +{ + const char *tbl[] = { "0", "-1", "123" }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + char buf[1024]; + Fp x, y; + x.setStr(tbl[i]); + size_t n = x.serialize(buf, sizeof(buf)); + CYBOZU_TEST_EQUAL(n, Fp::getByteSize()); + y.deserialize(buf, n); + CYBOZU_TEST_EQUAL(x, y); + + n = x.serialize(buf, sizeof(buf), mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(n, Fp::getByteSize() * 2); + y.deserialize(buf, n, mcl::IoSerializeHexStr); + CYBOZU_TEST_EQUAL(x, y); + } +} + +void modpTest() +{ + const mpz_class& p = Fp::getOp().mp; + const mpz_class tbl[] = { + 0, + 1, + p - 1, + p, + p + 1, + p * 2 - 1, + p * 2, + p * 2 + 1, + p * (p - 1) - 1, + p * (p - 1), + p * (p - 1) + 1, + p * p - 1, + p * p, + p * p + 1, + (mpz_class(1) << Fp::getOp().N * mcl::fp::UnitBitSize * 2) - 1, + }; + mcl::Modp modp; + modp.init(p); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mpz_class& x = tbl[i]; + mpz_class r1, r2; + r1 = x % p; + modp.modp(r2, x); + CYBOZU_TEST_EQUAL(r1, r2); + } +} + +#include +#if (defined(MCL_USE_LLVM) || defined(MCL_USE_XBYAK)) && (MCL_MAX_BIT_SIZE >= 521) +CYBOZU_TEST_AUTO(mod_NIST_P521) +{ + const size_t len = 521; + const size_t N = len / mcl::fp::UnitBitSize; + const char *tbl[] = { + "0", + "0xffffffff", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00111423424", + "0x11111111111111112222222222222222333333333333333344444444444444445555555555555555666666666666666677777777777777778888888888888888aaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbccccccccccccccccddddddddddddddddeeeeeeeeeeeeeeeeffffffffffffffff1234712341234123412341234123412341234", + "0x3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }; + const char *p = "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + Fp::init(p, mcl::fp::FP_XBYAK); + const mpz_class mp(p); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class mx(tbl[i]); + mcl::fp::Unit in[N * 2 + 1] = {}; + mcl::fp::Unit ok[N + 1]; + mcl::fp::Unit ex[N + 1]; + mcl::gmp::getArray(in, N * 2 + 1, mx); + mpz_class my = mx % mp; + mcl::gmp::getArray(ok, N + 1, my); +#ifdef MCL_USE_LLVM + mcl_fpDbl_mod_NIST_P521L(ex, in, Fp::getOp().p); + CYBOZU_TEST_EQUAL_ARRAY(ex, ok, N + 1); +#endif +#ifdef MCL_USE_XBYAK + const mcl::fp::Op& op = Fp::getOp(); + if (!op.isMont) { + op.fpDbl_mod(ex, in, op.p); + CYBOZU_TEST_EQUAL_ARRAY(ex, ok, N + 1); + } +#endif + } +} +#endif + +void sub(mcl::fp::Mode mode) +{ + printf("mode=%s\n", mcl::fp::ModeToStr(mode)); + const char *tbl[] = { + // N = 2 + "0x0000000000000001000000000000000d", + "0x7fffffffffffffffffffffffffffffff", + "0x8000000000000000000000000000001d", + "0xffffffffffffffffffffffffffffff61", + + // N = 3 + "0x000000000000000100000000000000000000000000000033", // min prime + "0x00000000fffffffffffffffffffffffffffffffeffffac73", + "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", + "0x000000010000000000000000000000000000000000000007", + "0x30000000000000000000000000000000000000000000002b", + "0x70000000000000000000000000000000000000000000001f", + "0x800000000000000000000000000000000000000000000005", + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime + + // N = 4 + "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0x7523648240000001ba344d80000000086121000000000013a700000000000017", + "0x800000000000000000000000000000000000000000000000000000000000005f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime +#if MCL_MAX_BIT_SIZE >= 384 + + // N = 6 + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", +#endif + +#if MCL_MAX_BIT_SIZE >= 521 + // N = 9 + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const char *pStr = tbl[i]; + printf("prime=%s\n", pStr); + Fp::init(pStr, mode); + cstrTest(); + setStrTest(); + streamTest(); + ioModeTest(); + edgeTest(); + convTest(); + compareTest(); + moduloTest(pStr); + opeTest(); + mulUnitTest(); + powTest(); + powNegTest(); + powFpTest(); + powGmp(); + setArrayTest1(); + setArrayMaskTest1(); + setArrayModTest(); + getUint64Test(); + getInt64Test(); + divBy2Test(); + getStrTest(); + setHashOfTest(); + serializeTest(); + modpTest(); + } + anotherFpTest(mode); + setArrayTest2(mode); + setArrayMaskTest2(mode); +} + +std::string g_mode; + +CYBOZU_TEST_AUTO(main) +{ + if (g_mode.empty() || g_mode == "auto") { + sub(mcl::fp::FP_AUTO); + } + if (g_mode.empty() || g_mode == "gmp") { + sub(mcl::fp::FP_GMP); + } + if (g_mode.empty() || g_mode == "gmp_mont") { + sub(mcl::fp::FP_GMP_MONT); + } +#ifdef MCL_USE_LLVM + if (g_mode.empty() || g_mode == "llvm") { + sub(mcl::fp::FP_LLVM); + } + if (g_mode.empty() || g_mode == "llvm_mont") { + sub(mcl::fp::FP_LLVM_MONT); + } +#endif +#ifdef MCL_USE_XBYAK + if (g_mode.empty() || g_mode == "xbyak") { + sub(mcl::fp::FP_XBYAK); + } +#endif +} + +CYBOZU_TEST_AUTO(copyUnitToByteAsLE) +{ + using namespace mcl::fp; +#if MCL_SIZEOF_UNIT == 4 + const Unit src[] = { 0x12345678, 0xaabbccdd, 0xffeeddcc, 0x87654321 }; +#else + const Unit src[] = { uint64_t(0xaabbccdd12345678ull), uint64_t(0x87654321ffeeddcc) }; +#endif + const uint8_t ok[] = { 0x78, 0x56, 0x34, 0x12, 0xdd, 0xcc, 0xbb, 0xaa, 0xcc, 0xdd, 0xee, 0xff, 0x21, 0x43, 0x65, 0x87 }; + const size_t okN = CYBOZU_NUM_OF_ARRAY(ok); + for (size_t i = 0; i < okN; i++) { + uint8_t buf[okN] = {}; + copyUnitToByteAsLE(buf, src, i); + CYBOZU_TEST_EQUAL_ARRAY(ok, buf, i); + } + mcl::fp::Unit dst[2]; + for (size_t i = 1; i <= sizeof(dst); i++) { + memset(dst, 0xff, sizeof(dst)); + mcl::fp::copyByteToUnitAsLE(dst, ok, i); + if (i < sizeof(Unit)) { + CYBOZU_TEST_EQUAL(src[0] & ((uint64_t(1) << (i * 8)) - 1), dst[0]); + CYBOZU_TEST_EQUAL(dst[1], Unit(-1)); + continue; + } + CYBOZU_TEST_EQUAL(dst[0], src[0]); + if (i == sizeof(Unit)) { + CYBOZU_TEST_EQUAL(dst[1], Unit(-1)); + continue; + } + if (i < sizeof(dst)) { + CYBOZU_TEST_EQUAL(src[1] & ((uint64_t(1) << ((i - sizeof(Unit)) * 8)) - 1), dst[1]); + continue; + } + CYBOZU_TEST_EQUAL(src[1], dst[1]); + } + dst[0] = 1; + copyByteToUnitAsLE(dst, ok, 0); + CYBOZU_TEST_EQUAL(dst[0], 1u); +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + opt.appendOpt(&g_mode, "", "m", ": mode(auto/gmp/gmp_mont/llvm/llvm_mont/xbyak)"); + opt.appendHelp("h", ": show this message"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/fp_tower_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/fp_tower_test.cpp new file mode 100644 index 000000000..a7123f7a5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/fp_tower_test.cpp @@ -0,0 +1,477 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable : 4456) +#endif + +#if MCL_MAX_BIT_SIZE >= 768 +typedef mcl::FpT Fp; +#else +typedef mcl::FpT Fp; +#endif +typedef mcl::Fp2T Fp2; +typedef mcl::FpDblT FpDbl; +typedef mcl::Fp6T Fp6; +typedef mcl::Fp12T Fp12; + +bool g_benchOnly = false; + +void testFp2() +{ + using namespace mcl; + puts(__FUNCTION__); +#if MCL_MAX_BIT_SIZE < 768 + const size_t FpSize = 48; + CYBOZU_TEST_EQUAL(sizeof(Fp), FpSize); + CYBOZU_TEST_EQUAL(sizeof(Fp2), FpSize * 2); + CYBOZU_TEST_EQUAL(sizeof(Fp6), FpSize * 6); + CYBOZU_TEST_EQUAL(sizeof(Fp12), FpSize * 12); +#endif + Fp2 x, y, z; + x.a = 1; + x.b = 2; + + { + std::stringstream os; + os << x; + os >> y; + CYBOZU_TEST_EQUAL(x, y); + } + y.a = 3; + y.b = 4; + /* + x = 1 + 2i + y = 3 + 4i + */ + Fp2::add(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp2(4, 6)); + Fp2::sub(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp2(-2, -2)); + Fp2::mul(z, x, y); + /* + (1 + 2i)(3 + 4i) = (3 - 8) + (4 + 6)i = -5 + 10i + */ + CYBOZU_TEST_EQUAL(z, Fp2(-5, 10)); + Fp2::neg(z, z); + CYBOZU_TEST_EQUAL(z, Fp2(5, -10)); + /* + xi = xi_a + i + (1 - 2i)(xi_a + i) = (xi_a + 2) + (1 - 2 xi_a)i + */ + z = Fp2(1, -2); + Fp2::mul_xi(z, z); + Fp a = Fp2::get_xi_a(); + CYBOZU_TEST_EQUAL(z, Fp2(a + 2, a * (-2) + 1)); + z = x * x; + Fp2::sqr(y, x); + CYBOZU_TEST_EQUAL(z, y); + x.a = -123456789; + x.b = 464652165165; + y = x * x; + Fp2::sqr(x, x); + CYBOZU_TEST_EQUAL(x, y); + { + std::ostringstream oss; + oss << x; + std::istringstream iss(oss.str()); + Fp2 w; + iss >> w; + CYBOZU_TEST_EQUAL(x, w); + } + y = 1; + for (int i = 0; i < 10; i++) { + Fp2::pow(z, x, i); + CYBOZU_TEST_EQUAL(z, y); + y *= x; + } + /* + (a + bi)^p = a + bi if p % 4 = 1 + (a + bi)^p = a - bi if p % 4 = 3 + */ + { + const mpz_class& mp = Fp::getOp().mp; + y = x; + Fp2::pow(z, y, mp); + if ((mp % 4) == 3) { + Fp::neg(z.b, z.b); + } + CYBOZU_TEST_EQUAL(z, y); + } + { + mpz_class t = Fp::getOp().mp; + t /= 2; + Fp x; + x.setMpz(t); + CYBOZU_TEST_EQUAL(x * 2, Fp(-1)); + t += 1; + x.setMpz(t); + CYBOZU_TEST_EQUAL(x * 2, 1); + } + { + Fp2 a(1, 1); + Fp2 b(1, -1); + Fp2 c(Fp2(2) / a); + CYBOZU_TEST_EQUAL(c, b); + CYBOZU_TEST_EQUAL(a * b, Fp2(2)); + CYBOZU_TEST_EQUAL(a * c, Fp2(2)); + } + y = x; + Fp2::inv(y, x); + y *= x; + CYBOZU_TEST_EQUAL(y, 1); + + // square root + for (int i = 0; i < 3; i++) { + x.a = i * i + i * 2; + x.b = i; + Fp2::sqr(y, x); + CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); + CYBOZU_TEST_EQUAL(z * z, y); + CYBOZU_TEST_ASSERT(Fp2::squareRoot(y, y)); + CYBOZU_TEST_EQUAL(z * z, y * y); + x.b = 0; + Fp2::sqr(y, x); + CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); + CYBOZU_TEST_EQUAL(z * z, y); + x.a = 0; + x.b = i * i + i * 3; + Fp2::sqr(y, x); + CYBOZU_TEST_ASSERT(Fp2::squareRoot(z, y)); + CYBOZU_TEST_EQUAL(z * z, y); + } +} + +void testFp6sqr(const Fp2& a, const Fp2& b, const Fp2& c, const Fp6& x) +{ + Fp2 t; + t = b * c * 2; + Fp2::mul_xi(t, t); + t += a * a; + CYBOZU_TEST_EQUAL(x.a, t); + t = c * c; + Fp2::mul_xi(t, t); + t += a * b * 2; + CYBOZU_TEST_EQUAL(x.b, t); + t = b * b + a * c * 2; + CYBOZU_TEST_EQUAL(x.c, t); +} + +void testFp6() +{ + using namespace mcl; + puts(__FUNCTION__); + Fp2 a(1, 2), b(3, 4), c(5, 6); + Fp6 x(a, b, c); + Fp6 y(Fp2(-1, 1), Fp2(4, -3), Fp2(-6, 2)); + Fp6 z, w; + { + std::stringstream ss; + ss << x; + ss >> z; + CYBOZU_TEST_EQUAL(x, z); + } + Fp6::add(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp6(Fp2(0, 3), Fp2(7, 1), Fp2(-1, 8))); + Fp6::sub(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp6(Fp2(2, 1), Fp2(-1, 7), Fp2(11, 4))); + Fp6::neg(z, x); + CYBOZU_TEST_EQUAL(z, Fp6(-a, -b, -c)); + Fp6::sqr(z, x); + Fp6::mul(w, x, x); + testFp6sqr(a, b, c, z); + testFp6sqr(a, b, c, w); + z = x; + Fp6::sqr(z, z); + Fp6::mul(w, x, x); + testFp6sqr(a, b, c, z); + testFp6sqr(a, b, c, w); + for (int i = 0; i < 10; i++) { + Fp6::inv(y, x); + Fp6::mul(z, y, x); + CYBOZU_TEST_EQUAL(z, 1); + x += y; + y = x; + Fp6::inv(y, y); + y *= x; + CYBOZU_TEST_EQUAL(y, 1); + } +} + +void testFp12() +{ + puts(__FUNCTION__); + Fp6 xa(Fp2(1, 2), Fp2(3, 4), Fp2(5, 6)); + Fp6 xb(Fp2(3, 1), Fp2(6, -1), Fp2(-2, 5)); + Fp12 x(xa, xb); + Fp6 ya(Fp2(2, 1), Fp2(5, 3), Fp2(4, 1)); + Fp6 yb(Fp2(1, -3), Fp2(2, -1), Fp2(-3, 1)); + Fp12 y(ya, yb); + Fp12 z; + Fp12::add(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp12(Fp6(Fp2(3, 3), Fp2(8, 7), Fp2(9, 7)), Fp6(Fp2(4, -2), Fp2(8, -2), Fp2(-5, 6)))); + Fp12::sub(z, x, y); + CYBOZU_TEST_EQUAL(z, Fp12(Fp6(Fp2(-1, 1), Fp2(-2, 1), Fp2(1, 5)), Fp6(Fp2(2, 4), Fp2(4, 0), Fp2(1, 4)))); + Fp12::neg(z, x); + CYBOZU_TEST_EQUAL(z, Fp12(-xa, -xb)); + + y.b.clear(); + z = y; + Fp12::sqr(z, z); + CYBOZU_TEST_EQUAL(z.a, y.a * y.a); + z = y * y; + CYBOZU_TEST_EQUAL(z.a, y.a * y.a); + CYBOZU_TEST_ASSERT(z.b.isZero()); + Fp12 w; + y = x; + z = x * x; + w = x; + Fp12::sqr(w, w); + CYBOZU_TEST_EQUAL(z, w); + y = x; + y *= y; + Fp12::sqr(x, x); + CYBOZU_TEST_EQUAL(x, y); + for (int i = 0; i < 10; i++) { + w = x; + Fp12::inv(w, w); + Fp12::mul(y, w, x); + CYBOZU_TEST_EQUAL(y, 1); + x += y; + } +} + +void testFpDbl() +{ + puts(__FUNCTION__); + { + std::string pstr; + Fp::getModulo(pstr); + mpz_class mp(pstr); + mp <<= Fp::getUnitSize() * mcl::fp::UnitBitSize; + mpz_class mp1 = mp - 1; + mcl::gmp::getStr(pstr, mp1); + const char *tbl[] = { + "0", "1", "123456", "123456789012345668909", pstr.c_str(), + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class mx(tbl[i]); + FpDbl x; + x.setMpz(mx); + for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(tbl); j++) { + FpDbl y, z; + mpz_class mz, mo; + mpz_class my(tbl[j]); + y.setMpz(my); + FpDbl::add(z, x, y); + mcl::gmp::addMod(mo, mx, my, mp); + z.getMpz(mz); + CYBOZU_TEST_EQUAL(mz, mo); + mcl::gmp::subMod(mo, mx, my, mp); + FpDbl::sub(z, x, y); + z.getMpz(mz); + CYBOZU_TEST_EQUAL(mz, mo); + if (!Fp::isFullBit()) { + FpDbl::addPre(z, x, y); + mo = mx + my; + z.getMpz(mz); + CYBOZU_TEST_EQUAL(mz, mo); + if (mx >= my) { + FpDbl::subPre(z, x, y); + mo = mx - my; + z.getMpz(mz); + CYBOZU_TEST_EQUAL(mz, mo); + } + } + } + } + } + { + std::string pstr; + Fp::getModulo(pstr); + const mpz_class mp(pstr); + cybozu::XorShift rg; + for (int i = 0; i < 3; i++) { + Fp x, y, z; + mpz_class mx, my, mz, mo; + x.setRand(rg); + y.setRand(rg); + x.getMpz(mx); + y.getMpz(my); + FpDbl d; + FpDbl::mulPre(d, x, y); + d.getMpz(mz); + { + Fp tx, ty; + tx = x; + ty = y; + tx.toMont(); + ty.toMont(); + mpz_class mtx, mty; + tx.getMpz(mtx); + ty.getMpz(mty); + mo = mtx * mty; + } + CYBOZU_TEST_EQUAL(mz, mo); + + FpDbl::mod(z, d); + z.getMpz(mz); + mo = (mx * my) % mp; + CYBOZU_TEST_EQUAL(mz, mo); + CYBOZU_TEST_EQUAL(z, x * y); + + FpDbl::sqrPre(d, x); + d.getMpz(mz); + { + Fp tx; + tx = x; + tx.toMont(); + mpz_class mtx; + tx.getMpz(mtx); + mo = mtx * mtx; + } + CYBOZU_TEST_EQUAL(mz, mo); + + FpDbl::mod(z, d); + z.getMpz(mz); + mo = (mx * mx) % mp; + CYBOZU_TEST_EQUAL(mz, mo); + CYBOZU_TEST_EQUAL(z, x * x); + } + } +} + +void testIo() +{ + int modeTbl[] = { 0, 2, 2 | mcl::IoPrefix, 10, 16, 16 | mcl::IoPrefix, mcl::IoArray, mcl::IoArrayRaw }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(modeTbl); i++) { + int ioMode = modeTbl[i]; + Fp12 x; + for (int j = 0; j < 12; j++) { + x.getFp0()[j] = j * j; + } + std::string s = x.getStr(ioMode); + Fp12 y; + y.setStr(s, ioMode); + CYBOZU_TEST_EQUAL(x, y); + } +} + +void benchFp2() +{ + puts(__FUNCTION__); + Fp2 x, y; + x.a.setStr("4"); + x.b.setStr("464652165165"); + y = x * x; + double addT, subT, mulT, sqrT, invT, mul_xiT; + CYBOZU_BENCH_T(addT, Fp2::add, x, x, y); + CYBOZU_BENCH_T(subT, Fp2::sub, x, x, y); + CYBOZU_BENCH_T(mulT, Fp2::mul, x, x, y); + CYBOZU_BENCH_T(sqrT, Fp2::sqr, x, x); + CYBOZU_BENCH_T(invT, Fp2::inv, x, x); + CYBOZU_BENCH_T(mul_xiT, Fp2::mul_xi, x, x); +// CYBOZU_BENCH("Fp2::mul_Fp_0", Fp2::mul_Fp_0, x, x, Param::half); +// CYBOZU_BENCH("Fp2::mul_Fp_1", Fp2::mul_Fp_1, x, Param::half); +// CYBOZU_BENCH("Fp2::divBy2 ", Fp2::divBy2, x, x); +// CYBOZU_BENCH("Fp2::divBy4 ", Fp2::divBy4, x, x); + printf("add %8.2f|sub %8.2f|mul %8.2f|sqr %8.2f|inv %8.2f|mul_xi %8.2f\n", addT, subT, mulT, sqrT, invT, mul_xiT); +} + +void test(const char *p, mcl::fp::Mode mode) +{ + const int xi_a = 1; + Fp::init(xi_a, p, mode); + printf("mode=%s\n", mcl::fp::ModeToStr(mode)); + Fp2::init(); +#if 0 + if (Fp::getBitSize() > 256) { + printf("not support p=%s\n", p); + return; + } +#endif + if (g_benchOnly) { + benchFp2(); + return; + } + testFp2(); + testFpDbl(); + testFp6(); + testFp12(); + testIo(); +} + +void testAll() +{ + const char *tbl[] = { + // N = 2 + "0x0000000000000001000000000000000d", + "0x7fffffffffffffffffffffffffffffff", + "0x8000000000000000000000000000001d", + "0xffffffffffffffffffffffffffffff61", + + // N = 3 + "0x000000000000000100000000000000000000000000000033", // min prime + "0x00000000fffffffffffffffffffffffffffffffeffffac73", + "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", + "0x000000010000000000000000000000000000000000000007", + "0x30000000000000000000000000000000000000000000002b", + "0x70000000000000000000000000000000000000000000001f", + "0x800000000000000000000000000000000000000000000005", + "0xfffffffffffffffffffffffffffffffeffffffffffffffff", + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime + + // N = 4 + "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0x7523648240000001ba344d80000000086121000000000013a700000000000017", + "0x800000000000000000000000000000000000000000000000000000000000005f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime +#if MCL_MAX_BIT_SIZE >= 384 + // N = 6 + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", +#endif +#if MCL_MAX_BIT_SIZE >= 768 + "776259046150354467574489744231251277628443008558348305569526019013025476343188443165439204414323238975243865348565536603085790022057407195722143637520590569602227488010424952775132642815799222412631499596858234375446423426908029627", +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const char *p = tbl[i]; + printf("prime=%s %d\n", p, (int)(strlen(p) - 2) * 4); + test(p, mcl::fp::FP_GMP); +#ifdef MCL_USE_LLVM + test(p, mcl::fp::FP_LLVM); + test(p, mcl::fp::FP_LLVM_MONT); +#endif +#ifdef MCL_USE_XBYAK + test(p, mcl::fp::FP_XBYAK); +#endif + } +} + +CYBOZU_TEST_AUTO(testAll) +{ + testAll(); +} + +int main(int argc, char *argv[]) +{ + if (argc > 1 && strcmp(argv[1], "-bench") == 0) { + g_benchOnly = true; + } + if (g_benchOnly) { + testAll(); + return 0; + } else { + return cybozu::test::autoRun.run(argc, argv); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/fp_util_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/fp_util_test.cpp new file mode 100644 index 000000000..e8a9f9aa5 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/fp_util_test.cpp @@ -0,0 +1,270 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#include +#include +#include +#include +#include + +CYBOZU_TEST_AUTO(arrayToHex) +{ + const struct { + uint32_t x[4]; + size_t n; + const char *str; + } tbl[] = { + { { 0, 0, 0, 0 }, 0, "0" }, + { { 0x123, 0, 0, 0 }, 1, "123" }, + { { 0x12345678, 0xaabbcc, 0, 0 }, 2, "aabbcc12345678" }, + { { 0, 0x12, 0x234a, 0 }, 3, "234a0000001200000000" }, + { { 1, 2, 0xffffffff, 0x123abc }, 4, "123abcffffffff0000000200000001" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + char buf[64]; + size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), tbl[i].x, tbl[i].n, false); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, tbl[i].str, n); + n = mcl::fp::arrayToHex(buf, sizeof(buf), tbl[i].x, tbl[i].n, true); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, (std::string("0x") + tbl[i].str).c_str(), n); + } +} + +CYBOZU_TEST_AUTO(arrayToBin) +{ + const struct { + uint32_t x[4]; + size_t n; + const char *str; + } tbl[] = { + { { 0, 0, 0, 0 }, 0, "0" }, + { { 0x123, 0, 0, 0 }, 1, "100100011" }, + { { 0x12345678, 0xaabbcc, 0, 0 }, 2, "10101010101110111100110000010010001101000101011001111000" }, + { { 0, 0x12, 0x234a, 0 }, 3, "100011010010100000000000000000000000000001001000000000000000000000000000000000" }, + { { 1, 2, 0xffffffff, 0x123abc }, 4, "100100011101010111100111111111111111111111111111111110000000000000000000000000000001000000000000000000000000000000001" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + char buf[512]; + size_t n = mcl::fp::arrayToBin(buf, sizeof(buf), tbl[i].x, tbl[i].n, false); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, tbl[i].str, n); + n = mcl::fp::arrayToBin(buf, sizeof(buf), tbl[i].x, tbl[i].n, true); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL_ARRAY(buf + sizeof(buf) - n, (std::string("0b") + tbl[i].str).c_str(), n); + } +} +// CYBOZU_TEST_AUTO(verifyStr) // QQQ + +CYBOZU_TEST_AUTO(hexToArray) +{ + const struct { + const char *str; + uint64_t x[4]; + } tbl[] = { + { "0", { 0, 0, 0, 0 } }, + { "5", { 5, 0, 0, 0 } }, + { "123", { 0x123, 0, 0, 0 } }, + { "123456789012345679adbc", { uint64_t(0x789012345679adbcull), 0x123456, 0, 0 } }, + { "ffffffff26f2fc170f69466a74defd8d", { uint64_t(0x0f69466a74defd8dull), uint64_t(0xffffffff26f2fc17ull), 0, 0 } }, + { "100000000000000000000000000000033", { uint64_t(0x0000000000000033ull), 0, 1, 0 } }, + { "11ee12312312940000000000000000000000000002342343", { uint64_t(0x0000000002342343ull), uint64_t(0x0000000000000000ull), uint64_t(0x11ee123123129400ull), 0 } }, + { "1234567890abcdefABCDEF123456789aba32134723424242424", { uint64_t(0x2134723424242424ull), uint64_t(0xDEF123456789aba3ull), uint64_t(0x4567890abcdefABCull), 0x123 } }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const size_t xN = 4; + uint64_t x[xN]; + size_t n = mcl::fp::hexToArray(x, xN, tbl[i].str, strlen(tbl[i].str)); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL_ARRAY(x, tbl[i].x, n); + } +} + +CYBOZU_TEST_AUTO(compareArray) +{ + const struct { + uint32_t a[4]; + uint32_t b[4]; + size_t n; + int expect; + } tbl[] = { + { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, 0 }, + { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, 1 }, + { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, -1 }, + { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, 0 }, + { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, 1 }, + { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, -1 }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, 0 }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, -1 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + int e = mcl::fp::compareArray(tbl[i].a, tbl[i].b, tbl[i].n); + CYBOZU_TEST_EQUAL(e, tbl[i].expect); + } +} + +CYBOZU_TEST_AUTO(isLessArray) +{ + const struct { + uint32_t a[4]; + uint32_t b[4]; + size_t n; + bool expect; + } tbl[] = { + { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, false }, + { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, false }, + { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, + { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, false }, + { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, false }, + { { 3, 1, 2, 0 }, { 2, 2, 2, 0 }, 4, true }, + { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, true }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, false }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, true }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + bool e = mcl::fp::isLessArray(tbl[i].a, tbl[i].b, tbl[i].n); + CYBOZU_TEST_EQUAL(e, tbl[i].expect); + e = mcl::fp::isGreaterArray(tbl[i].b, tbl[i].a, tbl[i].n); + CYBOZU_TEST_EQUAL(e, tbl[i].expect); + } +} + +CYBOZU_TEST_AUTO(isLessOrEqualArray) +{ + const struct { + uint32_t a[4]; + uint32_t b[4]; + size_t n; + bool expect; + } tbl[] = { + { { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, 0, true }, + { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, 1, false }, + { { 0, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, + { { 1, 0, 0, 0 }, { 1, 0, 0, 0 }, 1, true }, + { { 3, 1, 1, 0 }, { 2, 1, 1, 0 }, 4, false }, + { { 3, 1, 2, 0 }, { 2, 2, 2, 0 }, 4, true }, + { { 9, 2, 1, 1 }, { 1, 3, 1, 1 }, 4, true }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 3, true }, + { { 1, 7, 8, 4 }, { 1, 7, 8, 9 }, 4, true }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + bool e = mcl::fp::isLessOrEqualArray(tbl[i].a, tbl[i].b, tbl[i].n); + CYBOZU_TEST_EQUAL(e, tbl[i].expect); + e = mcl::fp::isGreaterOrEqualArray(tbl[i].b, tbl[i].a, tbl[i].n); + CYBOZU_TEST_EQUAL(e, tbl[i].expect); + } +} + +struct Rand { + std::vector v; + const uint8_t *p; + size_t pos; + size_t endPos; + void read(bool *pb, void *x, size_t n) + { + if (pos + n > endPos) { + *pb = false; + return; + } + uint8_t *dst = (uint8_t*)x; + memcpy(dst, p + pos, n); + pos += n; + *pb = true; + } + void read(void *x, size_t n) + { + bool b; + read(&b, x, n); + if (!b) throw cybozu::Exception("Rand") << n; + } + uint32_t operator()() + { + char buf[4]; + read(buf, 4); + uint32_t v; + memcpy(&v, buf, 4); + return v; + } + Rand(const uint32_t *x, size_t n) + : p(0) + , pos(0) + { + for (size_t i = 0; i < n; i++) { + v.push_back(x[i]); + } + p = (uint8_t*)&v[0]; + endPos = v.size() * 4; + } +}; + +CYBOZU_TEST_AUTO(maskArray) +{ +#if 1 + const size_t n = 2; + uint32_t org[n] = { 0xabce1234, 0xffffef32 }; + for (size_t i = 0; i <= sizeof(org) * 8; i++) { + uint32_t x[n]; + memcpy(x, org, sizeof(org)); + mcl::fp::maskArray(x, n, i); + mpz_class t; + mcl::gmp::setArray(t, org, n); + t &= (mpz_class(1) << i) - 1; + uint32_t y[n]; + mcl::gmp::getArray(y, n, t); + CYBOZU_TEST_EQUAL_ARRAY(x, y, n); + } +#else + const size_t n = 4; + uint16_t org[n] = { 0x1234, 0xabce, 0xef32, 0xffff }; + for (size_t i = 0; i <= sizeof(org) * 8; i++) { + uint16_t x[n]; + memcpy(x, org, sizeof(org)); + mcl::fp::maskArray(x, n, i); + mpz_class t; + mcl::gmp::setArray(t, org, n); + t &= (mpz_class(1) << i) - 1; + uint16_t y[n]; + mcl::gmp::getArray(y, n, t); + CYBOZU_TEST_EQUAL_ARRAY(x, y, n); + } +#endif +} + +CYBOZU_TEST_AUTO(stream) +{ + const char *nulTbl[] = { "", " ", " \t\t\n\n " }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(nulTbl); i++) { + const char *p = nulTbl[i]; + cybozu::MemoryInputStream is(p, strlen(p)); + std::string w = "abc"; + mcl::fp::local::loadWord(w, is); + CYBOZU_TEST_ASSERT(w.empty()); + } + const struct { + const char *buf; + const char *expect[2]; + size_t n; + } tbl[] = { + { "\t\t \n\rabc\r\r\n def", { "abc", "def" }, 2 }, + { "123", { "123" }, 1 }, + { "123\n", { "123" }, 1 }, + { "123 456", { "123", "456" }, 2 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const char *buf = tbl[i].buf; + { + cybozu::MemoryInputStream is(buf, strlen(buf)); + for (size_t j = 0; j < tbl[i].n; j++) { + std::string w; + mcl::fp::local::loadWord(w, is); + CYBOZU_TEST_EQUAL(w, tbl[i].expect[j]); + } + } + { + std::istringstream is(buf); + for (size_t j = 0; j < tbl[i].n; j++) { + std::string w; + mcl::fp::local::loadWord(w, is); + CYBOZU_TEST_EQUAL(w, tbl[i].expect[j]); + } + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/glv_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/glv_test.cpp new file mode 100644 index 000000000..a917f51f4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/glv_test.cpp @@ -0,0 +1,209 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl; +#include +#include +#include + +#if 1 +#include +using namespace mcl::bn384; +#else +#include +using namespace mcl::bn256; +#endif + +#define PUT(x) std::cout << #x "=" << (x) << std::endl; + +/* + Skew Frobenius Map and Efficient Scalar Multiplication for Pairing-Based Cryptography + Y. Sakemi, Y. Nogami, K. Okeya, H. Kato, Y. Morikawa +*/ +struct oldGLV { + Fp w; // (-1 + sqrt(-3)) / 2 + mpz_class r; + mpz_class v; // 6z^2 + 4z + 1 > 0 + mpz_class c; // 2z + 1 + void init(const mpz_class& r, const mpz_class& z) + { + if (!Fp::squareRoot(w, -3)) throw cybozu::Exception("oldGLV:init"); + w = (w - 1) / 2; + this->r = r; + v = 1 + z * (4 + z * 6); + c = 2 * z + 1; + } + /* + (p^2 mod r) (x, y) = (wx, -y) + */ + void mulP2(G1& Q, const G1& P) const + { + Fp::mul(Q.x, P.x, w); + Fp::neg(Q.y, P.y); + Q.z = P.z; + } + /* + x = ap^2 + b mod r + assume(x < r); + */ + void split(mpz_class& a, mpz_class& b, const mpz_class& x) const + { + assert(0 < x && x < r); + /* + x = s1 * v + s2 // s1 = x / v, s2 = x % v + = s1 * c * p^2 + s2 // vP = cp^2 P + = (s3 * v + s4) * p^2 + s2 // s3 = (s1 * c) / v, s4 = (s1 * c) % v + = (s3 * c * p^2 + s4) * p^2 + s2 + = (s3 * c) * p^4 + s4 * p^2 + s2 // s5 = s3 * c, p^4 = p^2 - 1 + = s5 * (p^2 - 1) + s4 * p^2 + s2 + = (s4 + s5) * p^2 + (s2 - s5) + */ + mpz_class t; + mcl::gmp::divmod(a, t, x, v); // a = t / v, t = t % v + a *= c; + mcl::gmp::divmod(b, a, a, v); // b = a / v, a = a % v + b *= c; + a += b; + b = t - b; + } + template + void mul(G1& Q, const G1& P, const mpz_class& x) const + { + G1 A, B; + mpz_class a, b; + split(a, b, x); + mulP2(A, P); + G1::mul(A, A, a); + G1::mul(B, P, b); + G1::add(Q, A, B); + } +}; + +template +void compareLength(const GLV1& rhs, const GLV2& lhs) +{ + cybozu::XorShift rg; + int lt = 0; + int eq = 0; + int gt = 0; + mpz_class R0, R1, L0, L1, x; + Fr r; + for (int i = 1; i < 1000; i++) { + r.setRand(rg); + x = r.getMpz(); + rhs.split(R0, R1, x); + lhs.split(L0, L1, x); + + size_t R0n = mcl::gmp::getBitSize(R0); + size_t R1n = mcl::gmp::getBitSize(R1); + size_t L0n = mcl::gmp::getBitSize(L0); + size_t L1n = mcl::gmp::getBitSize(L1); + size_t Rn = std::max(R0n, R1n); + size_t Ln = std::max(L0n, L1n); + if (Rn == Ln) { + eq++; + } + if (Rn > Ln) { + gt++; + } + if (Rn < Ln) { + lt++; + } + } + printf("#of{<} = %d, #of{=} = %d #of{>} = %d\n", lt, eq, gt); +} + +void testGLV1() +{ + G1 P0, P1, P2; + mapToG1(P0, 1); + cybozu::XorShift rg; + + oldGLV oldGlv; + if (!BN::param.isBLS12) { + oldGlv.init(BN::param.r, BN::param.z); + } + + mcl::bn::local::GLV1 glv; + glv.init(BN::param.r, BN::param.z, BN::param.isBLS12); + if (!BN::param.isBLS12) { + compareLength(glv, oldGlv); + } + + for (int i = 1; i < 100; i++) { + mapToG1(P0, i); + Fr s; + s.setRand(rg); + mpz_class ss = s.getMpz(); + G1::mulGeneric(P1, P0, ss); + glv.mul(P2, P0, ss); + CYBOZU_TEST_EQUAL(P1, P2); + glv.mul(P2, P0, ss, true); + CYBOZU_TEST_EQUAL(P1, P2); + if (!BN::param.isBLS12) { + oldGlv.mul(P2, P0, ss); + CYBOZU_TEST_EQUAL(P1, P2); + } + } + for (int i = -100; i < 100; i++) { + mpz_class ss = i; + G1::mulGeneric(P1, P0, ss); + glv.mul(P2, P0, ss); + CYBOZU_TEST_EQUAL(P1, P2); + glv.mul(P2, P0, ss, true); + CYBOZU_TEST_EQUAL(P1, P2); + } + Fr s; + mapToG1(P0, 123); + CYBOZU_BENCH_C("Ec::mul", 100, P1 = P0; s.setRand(rg); G1::mulGeneric, P2, P1, s.getMpz()); + CYBOZU_BENCH_C("Ec::glv", 100, P1 = P0; s.setRand(rg); glv.mul, P2, P1, s.getMpz()); +} + +/* + lambda = 6 * z * z + mul (lambda * 2) = FrobeniusOnTwist * 2 +*/ +void testGLV2() +{ + G2 Q0, Q1, Q2; + mpz_class z = BN::param.z; + mpz_class r = BN::param.r; + mpz_class lambda = 6 * z * z; + mcl::bn::local::GLV2 glv2; + glv2.init(r, z, BN::param.isBLS12); + mpz_class n; + cybozu::XorShift rg; + mapToG2(Q0, 1); + for (int i = -10; i < 10; i++) { + n = i; + G2::mulGeneric(Q1, Q0, n); + glv2.mul(Q2, Q0, n); + CYBOZU_TEST_EQUAL(Q1, Q2); + } + for (int i = 1; i < 100; i++) { + mcl::gmp::getRand(n, glv2.rBitSize, rg); + n %= r; + n -= r/2; + mapToG2(Q0, i); + G2::mulGeneric(Q1, Q0, n); + glv2.mul(Q2, Q0, n); + CYBOZU_TEST_EQUAL(Q1, Q2); + } + Fr s; + mapToG2(Q0, 123); + CYBOZU_BENCH_C("G2::mul", 1000, Q2 = Q0; s.setRand(rg); G2::mulGeneric, Q2, Q1, s.getMpz()); + CYBOZU_BENCH_C("G2::glv", 1000, Q1 = Q0; s.setRand(rg); glv2.mul, Q2, Q1, s.getMpz()); +} + +CYBOZU_TEST_AUTO(glv) +{ + const mcl::CurveParam tbl[] = { + mcl::BN254, + mcl::BN381_1, + mcl::BN381_2, + mcl::BLS12_381, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mcl::CurveParam& cp = tbl[i]; + initPairing(cp); + testGLV1(); + testGLV2(); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/gmp_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/gmp_test.cpp new file mode 100644 index 000000000..1fe9d4eb6 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/gmp_test.cpp @@ -0,0 +1,70 @@ +#include +#include +#include + +CYBOZU_TEST_AUTO(testBit) +{ + const size_t maxBit = 100; + const size_t tbl[] = { + 3, 9, 5, 10, 50, maxBit + }; + mpz_class a; + std::vector b(maxBit + 1); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + a |= mpz_class(1) << tbl[i]; + b[tbl[i]] = 1; + } + for (size_t i = 0; i <= maxBit; i++) { + bool c1 = mcl::gmp::testBit(a, i); + bool c2 = b[i] != 0; + CYBOZU_TEST_EQUAL(c1, c2); + } +} + +CYBOZU_TEST_AUTO(getStr) +{ + const struct { + int x; + const char *dec; + const char *hex; + } tbl[] = { + { 0, "0", "0" }, + { 1, "1", "1" }, + { 10, "10", "a" }, + { 16, "16", "10" }, + { 123456789, "123456789", "75bcd15" }, + { -1, "-1", "-1" }, + { -10, "-10", "-a" }, + { -16, "-16", "-10" }, + { -100000000, "-100000000", "-5f5e100" }, + { -987654321, "-987654321", "-3ade68b1" }, + { -2147483647, "-2147483647", "-7fffffff" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class x = tbl[i].x; + char buf[32]; + size_t n, len; + len = strlen(tbl[i].dec); + n = mcl::gmp::getStr(buf, len, x, 10); + CYBOZU_TEST_EQUAL(n, 0); + n = mcl::gmp::getStr(buf, len + 1, x, 10); + CYBOZU_TEST_EQUAL(n, len); + CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].dec, n); + + len = strlen(tbl[i].hex); + n = mcl::gmp::getStr(buf, len, x, 16); + CYBOZU_TEST_EQUAL(n, 0); + n = mcl::gmp::getStr(buf, len + 1, x, 16); + CYBOZU_TEST_EQUAL(n, len); + CYBOZU_TEST_EQUAL_ARRAY(buf, tbl[i].hex, n); + } +} + +CYBOZU_TEST_AUTO(getRandPrime) +{ + for (int i = 0; i < 10; i++) { + mpz_class z; + mcl::gmp::getRandPrime(z, i * 10 + 3); + CYBOZU_TEST_ASSERT(mcl::gmp::isPrime(z)); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/low_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/low_test.cpp new file mode 100644 index 000000000..f5e72a0b3 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/low_test.cpp @@ -0,0 +1,73 @@ +#ifndef MCL_USE_LLVM + #define MCL_USE_LLVM +#endif +#include +#include +#include +#include "../src/low_func.hpp" +#include + +cybozu::XorShift rg; + +extern "C" void add_test(mcl::fp::Unit *z, const mcl::fp::Unit *x, const mcl::fp::Unit *y); + +template +void bench() +{ + using namespace mcl::fp; + const size_t N = bit / UnitBitSize; + Unit x[N], y[N]; + for (int i = 0; i < 10; i++) { + Unit z[N]; + Unit w[N]; + rg.read(x, N); + rg.read(y, N); + AddPre::f(z, x, y); + AddPre::f(w, x, y); + CYBOZU_TEST_EQUAL_ARRAY(z, w, N); + + SubPre::f(z, x, y); + SubPre::f(w, x, y); + CYBOZU_TEST_EQUAL_ARRAY(z, w, N); + } + const std::string bitS = cybozu::itoa(bit); + std::string name; + name = "add" + bitS; CYBOZU_BENCH(name.c_str(), (AddPre::f), x, x, y); + name = "sub" + bitS; CYBOZU_BENCH(name.c_str(), (SubPre::f), x, x, y); +} + +CYBOZU_TEST_AUTO(addPre64) { bench<64>(); } +CYBOZU_TEST_AUTO(addPre128) { bench<128>(); } +CYBOZU_TEST_AUTO(addPre192) { bench<192>(); } +CYBOZU_TEST_AUTO(addPre256) { bench<256>(); } +CYBOZU_TEST_AUTO(addPre320) { bench<320>(); } +CYBOZU_TEST_AUTO(addPre384) { bench<384>(); } +CYBOZU_TEST_AUTO(addPre448) { bench<448>(); } +CYBOZU_TEST_AUTO(addPre512) { bench<512>(); } +//CYBOZU_TEST_AUTO(addPre96) { bench<96>(); } +//CYBOZU_TEST_AUTO(addPre160) { bench<160>(); } +//CYBOZU_TEST_AUTO(addPre224) { bench<224>(); } +#if 0 +CYBOZU_TEST_AUTO(addPre) +{ + using namespace mcl::fp; + const size_t bit = 128; + const size_t N = bit / UnitBitSize; + Unit x[N], y[N]; + for (int i = 0; i < 10; i++) { + Unit z[N]; + Unit w[N]; + rg.read(x, N); + rg.read(y, N); + low_addPre_G(z, x, y); + addPre(w, x, y); + CYBOZU_TEST_EQUAL_ARRAY(z, w, N); + add_test(w, x, y); + CYBOZU_TEST_EQUAL_ARRAY(z, w, N); + } + std::string name = "add" + cybozu::itoa(bit); + CYBOZU_BENCH(name.c_str(), addPre, x, x, y); + CYBOZU_BENCH("add", add_test, x, x, y); +} +#endif + diff --git a/vendor/github.com/byzantine-lab/mcl/test/mk32.sh b/vendor/github.com/byzantine-lab/mcl/test/mk32.sh new file mode 100644 index 000000000..4d5f60711 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/mk32.sh @@ -0,0 +1 @@ +g++ -O3 -march=native base_test.cpp ../src/x86.s -m32 -I ~/32/include/ -I ../include/ -I ../../xbyak/ -I ../../cybozulib/include ~/32/lib/libgmp.a ~/32/lib/libgmpxx.a -I ~/32/lib -DNDEBUG diff --git a/vendor/github.com/byzantine-lab/mcl/test/modp_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/modp_test.cpp new file mode 100644 index 000000000..bf9da38bf --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/modp_test.cpp @@ -0,0 +1,37 @@ +#include +#include +#include + +#define PUT(x) std::cout << #x << "=" << x << std::endl; + +CYBOZU_TEST_AUTO(modp) +{ + const int C = 1000000; + const char *pTbl[] = { + "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + }; + const char *xTbl[] = { + "0x12345678892082039482094823", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x10000000000000000000000000000000000000000000000000000000000000000", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }; + mcl::Modp modp; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { + const mpz_class p(pTbl[i]); + std::cout << std::hex << "p=" << p << std::endl; + modp.init(p); + for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(xTbl); j++) { + const mpz_class x(xTbl[j]); + std::cout << std::hex << "x=" << x << std::endl; + mpz_class r1, r2; + r1 = x % p; + modp.modp(r2, x); + CYBOZU_TEST_EQUAL(r1, r2); + CYBOZU_BENCH_C("x % p", C, mcl::gmp::mod, r1, x, p); + CYBOZU_BENCH_C("modp ", C, modp.modp, r2, x); + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/mont_fp_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/mont_fp_test.cpp new file mode 100644 index 000000000..e41e77a53 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/mont_fp_test.cpp @@ -0,0 +1,332 @@ +#define PUT(x) std::cout << #x "=" << (x) << std::endl +#include +#include +#include +#include + +#if 0 +#include +using namespace mcl::bls12; +typedef Fr Zn; +#else +#include +struct ZnTag; +typedef mcl::FpT Zn; +typedef mcl::FpT<> Fp; +#endif + +struct Montgomery { + typedef mcl::fp::Unit Unit; + mpz_class p_; + mpz_class R_; // (1 << (pn_ * 64)) % p + mpz_class RR_; // (R * R) % p + Unit rp_; // rp * p = -1 mod M = 1 << 64 + size_t pn_; + Montgomery() {} + explicit Montgomery(const mpz_class& p) + { + p_ = p; + rp_ = mcl::fp::getMontgomeryCoeff(mcl::gmp::getUnit(p, 0)); + pn_ = mcl::gmp::getUnitSize(p); + R_ = 1; + R_ = (R_ << (pn_ * 64)) % p_; + RR_ = (R_ * R_) % p_; + } + + void toMont(mpz_class& x) const { mul(x, x, RR_); } + void fromMont(mpz_class& x) const { mul(x, x, 1); } + + void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) const + { +#if 0 + const size_t ySize = mcl::gmp::getUnitSize(y); + mpz_class c = x * mcl::gmp::getUnit(y, 0); + Unit q = mcl::gmp::getUnit(c, 0) * rp_; + c += p_ * q; + c >>= sizeof(Unit) * 8; + for (size_t i = 1; i < pn_; i++) { + if (i < ySize) { + c += x * mcl::gmp::getUnit(y, i); + } + Unit q = mcl::gmp::getUnit(c, 0) * rp_; + c += p_ * q; + c >>= sizeof(Unit) * 8; + } + if (c >= p_) { + c -= p_; + } + z = c; +#else + z = x * y; + for (size_t i = 0; i < pn_; i++) { + Unit q = mcl::gmp::getUnit(z, 0) * rp_; +#ifdef MCL_USE_VINT + z += p_ * q; +#else + mpz_class t; + mcl::gmp::set(t, q); + z += p_ * t; +#endif + z >>= sizeof(Unit) * 8; + } + if (z >= p_) { + z -= p_; + } +#endif + } + void mod(mpz_class& z, const mpz_class& xy) const + { + z = xy; + for (size_t i = 0; i < pn_; i++) { +//printf("i=%zd\n", i); +//std::cout << "z=" << std::hex << z << std::endl; + Unit q = mcl::gmp::getUnit(z, 0) * rp_; +//std::cout << "q=" << q << std::endl; + mpz_class t; + mcl::gmp::set(t, q); + z += p_ * t; + z >>= sizeof(Unit) * 8; +//std::cout << "z=" << std::hex << z << std::endl; + } + if (z >= p_) { + z -= p_; + } +//std::cout << "z=" << std::hex << z << std::endl; + } +}; + +template +mpz_class getMpz(const T& x) +{ + std::string str = x.getStr(); + mpz_class t; + mcl::gmp::setStr(t, str); + return t; +} + +template +std::string getStr(const T& x) +{ + std::ostringstream os; + os << x; + return os.str(); +} + +template +T castTo(const U& x) +{ + T t; + t.setStr(getStr(x)); + return t; +} + +template +void putRaw(const T& x) +{ + const uint64_t *p = x.getInnerValue(); + for (size_t i = 0, n = T::BlockSize; i < n; i++) { + printf("%016llx", p[n - 1 - i]); + } + printf("\n"); +} + +template +void put(const uint64_t (&x)[N]) +{ + for (size_t i = 0; i < N; i++) { + printf("%016llx", x[N - 1 - i]); + } + printf("\n"); +} + +struct Test { + typedef mcl::FpT<> Fp; + void run(const char *p) + { + Fp::init(p); + Fp x("-123456789"); + Fp y("-0x7ffffffff"); + CYBOZU_BENCH("add", operator+, x, x); + CYBOZU_BENCH("sub", operator-, x, y); + CYBOZU_BENCH("mul", operator*, x, x); + CYBOZU_BENCH("sqr", Fp::sqr, x, x); + CYBOZU_BENCH("div", y += x; operator/, x, y); + } +}; + +void customTest(const char *pStr, const char *xStr, const char *yStr) +{ +#if 0 + { + pStr = "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + Fp::init(pStr); + static uint64_t x[3] = { 1, 0, 0 }; + uint64_t z[3]; +std::cout<= 521 +CYBOZU_TEST_AUTO(customTest) +{ + const struct { + const char *p; + const char *x; + const char *y; + } tbl[] = { + { + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", +// "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", +// "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe" + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + customTest(tbl[i].p, tbl[i].x, tbl[i].y); + } +} +#endif + +CYBOZU_TEST_AUTO(test) +{ + Test test; + const char *tbl[] = { +#if 1 + // N = 2 + "0x0000000000000001000000000000000d", + "0x7fffffffffffffffffffffffffffffff", + "0x8000000000000000000000000000001d", + "0xffffffffffffffffffffffffffffff61", + + // N = 3 + "0x000000000000000100000000000000000000000000000033", // min prime + "0x00000000fffffffffffffffffffffffffffffffeffffac73", + "0x0000000100000000000000000001b8fa16dfab9aca16b6b3", + "0x000000010000000000000000000000000000000000000007", + "0x30000000000000000000000000000000000000000000002b", + "0x70000000000000000000000000000000000000000000001f", + "0x800000000000000000000000000000000000000000000005", + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + "0xffffffffffffffffffffffffffffffffffffffffffffff13", // max prime + + // N = 4 + "0x0000000000000001000000000000000000000000000000000000000000000085", // min prime + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0x7523648240000001ba344d80000000086121000000000013a700000000000017", + "0x800000000000000000000000000000000000000000000000000000000000005f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff43", // max prime +#endif + +#if MCL_MAX_BIT_SIZE >= 384 + // N = 6 + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", +#endif + +#if MCL_MAX_BIT_SIZE >= 521 + // N = 9 + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", +#endif + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + printf("prime=%s\n", tbl[i]); +#if 0 + mpz_class p(tbl[i]); + initPairing(mcl::BLS12_381); +#if 1 + cybozu::XorShift rg; + for (int i = 0; i < 1000; i++) { + Fp x, y, z; + FpDbl xy; + x.setByCSPRNG(rg); + y.setByCSPRNG(rg); + FpDbl::mulPre(xy, x, y); + FpDbl::mod(z, xy); + if (z != x * y) { + puts("ERR"); + std::cout << std::hex; + PUT(x); + PUT(y); + PUT(z); + PUT(x * y); + exit(1); + } + } +#else + Montgomery mont(p); + mpz_class x("19517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc", 16); + mpz_class y("139517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ffc39517141aafb2ff", 16); + std::cout << std::hex; + PUT(x); + PUT(y); + mpz_class z; + mont.mul(z, x, y); + PUT(z); + Fp x1, y1, z1; + puts("aaa"); + memcpy(&x1, mcl::gmp::getUnit(x), sizeof(x1)); + memcpy(&y1, mcl::gmp::getUnit(y), sizeof(y1)); + z1.clear(); + x1.dump(); + y1.dump(); + Fp::mul(z1, x1, y1); + z1.dump(); +#endif + exit(1); +#endif + test.run(tbl[i]); + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/paillier_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/paillier_test.cpp new file mode 100644 index 000000000..31d2b26fc --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/paillier_test.cpp @@ -0,0 +1,24 @@ +#include +#include + +CYBOZU_TEST_AUTO(paillier) +{ + using namespace mcl::paillier; + SecretKey sec; + sec.init(2048); + PublicKey pub; + sec.getPublicKey(pub); + mpz_class m1("12342340928409"), m2("23049820498204"); + mpz_class c1, c2, c3; + pub.enc(c1, m1); + pub.enc(c2, m2); + std::cout << std::hex << "c1=" << c1 << "\nc2=" << c2 << std::endl; + pub.add(c3, c1, c2); + mpz_class d1, d2, d3; + sec.dec(d1, c1); + sec.dec(d2, c2); + sec.dec(d3, c3); + CYBOZU_TEST_EQUAL(m1, d1); + CYBOZU_TEST_EQUAL(m2, d2); + CYBOZU_TEST_EQUAL(m1 + m2, d3); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/proj/bn_test/bn_test.vcxproj b/vendor/github.com/byzantine-lab/mcl/test/proj/bn_test/bn_test.vcxproj new file mode 100644 index 000000000..936e075aa --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/proj/bn_test/bn_test.vcxproj @@ -0,0 +1,88 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96} + Win32Proj + bn_test + + + + Application + true + v120 + MultiByte + + + Application + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + true + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/test/proj/ec_test/ec_test.vcxproj b/vendor/github.com/byzantine-lab/mcl/test/proj/ec_test/ec_test.vcxproj new file mode 100644 index 000000000..4bdfda2cb --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/proj/ec_test/ec_test.vcxproj @@ -0,0 +1,88 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {46B6E88E-739A-406B-9F68-BC46C5950FA3} + Win32Proj + ec_test + + + + Application + true + v120 + MultiByte + + + Application + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + true + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + diff --git a/vendor/github.com/byzantine-lab/mcl/test/proj/fp_test/fp_test.vcxproj b/vendor/github.com/byzantine-lab/mcl/test/proj/fp_test/fp_test.vcxproj new file mode 100644 index 000000000..f705982bf --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/proj/fp_test/fp_test.vcxproj @@ -0,0 +1,88 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {51266DE6-B57B-4AE3-B85C-282F170E1728} + Win32Proj + fp_test + + + + Application + true + v120 + MultiByte + + + Application + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + true + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj b/vendor/github.com/byzantine-lab/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj new file mode 100644 index 000000000..d5720678f --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/proj/fp_tower_test/fp_tower_test.vcxproj @@ -0,0 +1,88 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {733B6250-D249-4A99-B2A6-C8FAF6A90E97} + Win32Proj + fp_tower_test + + + + Application + true + v120 + MultiByte + + + Application + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + true + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/byzantine-lab/mcl/test/she_c256_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/she_c256_test.cpp new file mode 100644 index 000000000..3e458b623 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/she_c256_test.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "she_c_test.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/test/she_c384_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/she_c384_test.cpp new file mode 100644 index 000000000..5c7bd9882 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/she_c384_test.cpp @@ -0,0 +1,2 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "she_c_test.hpp" diff --git a/vendor/github.com/byzantine-lab/mcl/test/she_c_test.hpp b/vendor/github.com/byzantine-lab/mcl/test/she_c_test.hpp new file mode 100644 index 000000000..8287c0e0a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/she_c_test.hpp @@ -0,0 +1,535 @@ +#include +#define CYBOZU_TEST_DISABLE_AUTO_RUN +#include +#include +#include + +const size_t hashSize = 1 << 10; +const size_t tryNum = 1024; + +CYBOZU_TEST_AUTO(init) +{ + int curve; +#if MCLBN_FP_UNIT_SIZE == 4 + curve = MCL_BN254; +#elif MCLBN_FP_UNIT_SIZE == 6 +// curve = MCL_BN381_1; + curve = MCL_BLS12_381; +#elif MCLBN_FP_UNIT_SIZE == 8 + curve = MCL_BN462; +#endif + int ret; + ret = sheInit(curve, MCLBN_COMPILED_TIME_VAR); + CYBOZU_TEST_EQUAL(ret, 0); + ret = sheSetRangeForDLP(hashSize); + CYBOZU_TEST_EQUAL(ret, 0); +} + +CYBOZU_TEST_AUTO(encDec) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + int64_t m = 123; + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheCipherTextGT ct; + sheEncG1(&c1, &pub, m); + sheEncG2(&c2, &pub, m); + sheEncGT(&ct, &pub, m); + + int64_t dec; + CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecG1ViaGT(&dec, &sec, &c1), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecG2ViaGT(&dec, &sec, &c2), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, m); + + for (int m = -3; m < 3; m++) { + sheEncG1(&c1, &pub, m); + CYBOZU_TEST_EQUAL(sheIsZeroG1(&sec, &c1), m == 0); + sheEncG2(&c2, &pub, m); + CYBOZU_TEST_EQUAL(sheIsZeroG2(&sec, &c2), m == 0); + sheEncGT(&ct, &pub, m); + CYBOZU_TEST_EQUAL(sheIsZeroGT(&sec, &ct), m == 0); + } +} + +CYBOZU_TEST_AUTO(addMul) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + int64_t m1 = 12; + int64_t m2 = -9; + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheCipherTextGT ct; + sheEncG1(&c1, &pub, m1); + sheEncG2(&c2, &pub, m2); + sheMul(&ct, &c1, &c2); + + int64_t dec; + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, m1 * m2); +} + +CYBOZU_TEST_AUTO(allOp) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + int64_t m1 = 12; + int64_t m2 = -9; + int64_t m3 = 12; + int64_t m4 = -9; + int64_t dec; + sheCipherTextG1 c11, c12; + sheCipherTextG2 c21, c22; + sheCipherTextGT ct; + + sheEncG1(&c11, &pub, m1); + sheNegG1(&c12, &c11); + CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c12), 0); + CYBOZU_TEST_EQUAL(dec, -m1); + + sheEncG1(&c12, &pub, m2); + sheSubG1(&c11, &c11, &c12); // m1 - m2 + sheMulG1(&c11, &c11, 4); // 4 * (m1 - m2) + + sheEncG2(&c21, &pub, m3); + sheNegG2(&c22, &c21); + CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c22), 0); + CYBOZU_TEST_EQUAL(dec, -m3); + sheEncG2(&c22, &pub, m4); + sheSubG2(&c21, &c21, &c22); // m3 - m4 + sheMulG2(&c21, &c21, -5); // -5 * (m3 - m4) + sheMul(&ct, &c11, &c21); // -20 * (m1 - m2) * (m3 - m4) + sheAddGT(&ct, &ct, &ct); // -40 * (m1 - m2) * (m3 - m4) + sheMulGT(&ct, &ct, -4); // 160 * (m1 - m2) * (m3 - m4) + + int64_t t = 160 * (m1 - m2) * (m3 - m4); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, t); + + sheCipherTextGT ct2; + sheNegGT(&ct2, &ct); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct2), 0); + CYBOZU_TEST_EQUAL(dec, -t); +} + +CYBOZU_TEST_AUTO(rerand) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + int64_t m1 = 12; + int64_t m2 = -9; + int64_t m3 = 12; + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheCipherTextGT ct1, ct2; + sheEncG1(&c1, &pub, m1); + sheReRandG1(&c1, &pub); + + sheEncG2(&c2, &pub, m2); + sheReRandG2(&c2, &pub); + + sheEncGT(&ct1, &pub, m3); + sheReRandGT(&ct1, &pub); + + sheMul(&ct2, &c1, &c2); + sheReRandGT(&ct2, &pub); + sheAddGT(&ct1, &ct1, &ct2); + + int64_t dec; + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct1), 0); + CYBOZU_TEST_EQUAL(dec, m1 * m2 + m3); +} + +CYBOZU_TEST_AUTO(serialize) +{ + sheSecretKey sec1, sec2; + sheSecretKeySetByCSPRNG(&sec1); + shePublicKey pub1, pub2; + sheGetPublicKey(&pub1, &sec1); + + char buf1[4096], buf2[4096]; + size_t n1, n2; + size_t r, size; + const size_t sizeofFr = mclBn_getFrByteSize(); + const size_t sizeofFp = mclBn_getG1ByteSize(); + + size = sizeofFr * 2; + n1 = sheSecretKeySerialize(buf1, sizeof(buf1), &sec1); + CYBOZU_TEST_EQUAL(n1, size); + r = sheSecretKeyDeserialize(&sec2, buf1, n1); + CYBOZU_TEST_EQUAL(r, n1); + n2 = sheSecretKeySerialize(buf2, sizeof(buf2), &sec2); + CYBOZU_TEST_EQUAL(n2, size); + CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); + + size = sizeofFp * 3; + n1 = shePublicKeySerialize(buf1, sizeof(buf1), &pub1); + CYBOZU_TEST_EQUAL(n1, size); + r = shePublicKeyDeserialize(&pub2, buf1, n1); + CYBOZU_TEST_EQUAL(r, n1); + n2 = shePublicKeySerialize(buf2, sizeof(buf2), &pub2); + CYBOZU_TEST_EQUAL(n2, size); + CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); + + int m = 123; + sheCipherTextG1 c11, c12; + sheCipherTextG2 c21, c22; + sheCipherTextGT ct1, ct2; + sheEncG1(&c11, &pub2, m); + sheEncG2(&c21, &pub2, m); + sheEncGT(&ct1, &pub2, m); + + size = sizeofFp * 2; + n1 = sheCipherTextG1Serialize(buf1, sizeof(buf1), &c11); + CYBOZU_TEST_EQUAL(n1, size); + r = sheCipherTextG1Deserialize(&c12, buf1, n1); + CYBOZU_TEST_EQUAL(r, n1); + n2 = sheCipherTextG1Serialize(buf2, sizeof(buf2), &c12); + CYBOZU_TEST_EQUAL(n2, size); + CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); + + size = sizeofFp * 4; + n1 = sheCipherTextG2Serialize(buf1, sizeof(buf1), &c21); + CYBOZU_TEST_EQUAL(n1, size); + r = sheCipherTextG2Deserialize(&c22, buf1, n1); + CYBOZU_TEST_EQUAL(r, n1); + n2 = sheCipherTextG2Serialize(buf2, sizeof(buf2), &c22); + CYBOZU_TEST_EQUAL(n2, size); + CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); + + size = sizeofFp * 12 * 4; + n1 = sheCipherTextGTSerialize(buf1, sizeof(buf1), &ct1); + CYBOZU_TEST_EQUAL(n1, size); + r = sheCipherTextGTDeserialize(&ct2, buf1, n1); + CYBOZU_TEST_EQUAL(r, n1); + n2 = sheCipherTextGTSerialize(buf2, sizeof(buf2), &ct2); + CYBOZU_TEST_EQUAL(n2, size); + CYBOZU_TEST_EQUAL_ARRAY(buf1, buf2, n2); +} + +CYBOZU_TEST_AUTO(convert) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + sheCipherTextGT ct; + const int64_t m = 123; + int64_t dec; + sheCipherTextG1 c1; + sheEncG1(&c1, &pub, m); + CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); + CYBOZU_TEST_EQUAL(dec, 123); + sheConvertG1(&ct, &pub, &c1); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, 123); + + sheCipherTextG2 c2; + sheEncG2(&c2, &pub, m); + CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); + CYBOZU_TEST_EQUAL(dec, 123); + sheConvertG2(&ct, &pub, &c2); + dec = 0; + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, 123); +} + +CYBOZU_TEST_AUTO(precomputed) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); + const int64_t m = 152; + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheCipherTextGT ct; + int64_t dec = 0; + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncG1(&c1, ppub, m), 0); + CYBOZU_TEST_EQUAL(sheDecG1(&dec, &sec, &c1), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncG2(&c2, ppub, m), 0); + CYBOZU_TEST_EQUAL(sheDecG2(&dec, &sec, &c2), 0); + CYBOZU_TEST_EQUAL(dec, m); + dec = 0; + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyEncGT(&ct, ppub, m), 0); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, m); + + shePrecomputedPublicKeyDestroy(ppub); +} + +template +void ZkpBinTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, decFunc dec, verifyFunc verify) +{ + CT c; + sheZkpBin zkp; + for (int m = 0; m < 2; m++) { + CYBOZU_TEST_EQUAL(encWithZkp(&c, &zkp, pub, m), 0); + mclInt mDec; + CYBOZU_TEST_EQUAL(dec(&mDec, sec, &c), 0); + CYBOZU_TEST_EQUAL(mDec, m); + CYBOZU_TEST_EQUAL(verify(pub, &c, &zkp), 1); + { + char buf[4096]; + size_t n = sheZkpBinSerialize(buf, sizeof(buf), &zkp); + CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); + sheZkpBin zkp2; + size_t r = sheZkpBinDeserialize(&zkp2, buf, n); + CYBOZU_TEST_EQUAL(r, n); + CYBOZU_TEST_EQUAL(r, n); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); + } + } + zkp.d[0].d[0]++; + CYBOZU_TEST_EQUAL(verify(pub, &c, &zkp), 0); + } + CYBOZU_TEST_ASSERT(encWithZkp(&c, &zkp, pub, 2) != 0); +} + +CYBOZU_TEST_AUTO(ZkpBin) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + ZkpBinTest(&sec, &pub, sheEncWithZkpBinG1, sheDecG1, sheVerifyZkpBinG1); + ZkpBinTest(&sec, &pub, sheEncWithZkpBinG2, sheDecG2, sheVerifyZkpBinG2); + + shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); + + ZkpBinTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinG1, sheDecG1, shePrecomputedPublicKeyVerifyZkpBinG1); + ZkpBinTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinG2, sheDecG2, shePrecomputedPublicKeyVerifyZkpBinG2); + + shePrecomputedPublicKeyDestroy(ppub); +} + +template +void ZkpBinEqTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, verifyFunc verify) +{ + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheZkpBinEq zkp; + for (int m = 0; m < 2; m++) { + CYBOZU_TEST_EQUAL(encWithZkp(&c1, &c2, &zkp, pub, m), 0); + mclInt mDec = -1; + CYBOZU_TEST_EQUAL(sheDecG1(&mDec, sec, &c1), 0); + CYBOZU_TEST_EQUAL(mDec, m); + mDec = -1; + CYBOZU_TEST_EQUAL(sheDecG2(&mDec, sec, &c2), 0); + CYBOZU_TEST_EQUAL(mDec, m); + CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 1); + { + char buf[2048]; + size_t n = sheZkpBinEqSerialize(buf, sizeof(buf), &zkp); + CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); + sheZkpBinEq zkp2; + size_t r = sheZkpBinEqDeserialize(&zkp2, buf, n); + CYBOZU_TEST_EQUAL(r, n); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); + } + } + zkp.d[0].d[0]++; + CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 0); + } + CYBOZU_TEST_ASSERT(encWithZkp(&c1, &c2, &zkp, pub, 2) != 0); +} + +CYBOZU_TEST_AUTO(ZkpBinEq) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + ZkpBinEqTest(&sec, &pub, sheEncWithZkpBinEq, sheVerifyZkpBinEq); + + shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); + + ZkpBinEqTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpBinEq, shePrecomputedPublicKeyVerifyZkpBinEq); + + shePrecomputedPublicKeyDestroy(ppub); +} + +template +void ZkpEqTest(const sheSecretKey *sec, const PK *pub, encWithZkpFunc encWithZkp, verifyFunc verify) +{ + sheCipherTextG1 c1; + sheCipherTextG2 c2; + sheZkpEq zkp; + for (int m = -5; m < 5; m++) { + CYBOZU_TEST_EQUAL(encWithZkp(&c1, &c2, &zkp, pub, m), 0); + mclInt mDec = -1; + CYBOZU_TEST_EQUAL(sheDecG1(&mDec, sec, &c1), 0); + CYBOZU_TEST_EQUAL(mDec, m); + mDec = -1; + CYBOZU_TEST_EQUAL(sheDecG2(&mDec, sec, &c2), 0); + CYBOZU_TEST_EQUAL(mDec, m); + CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 1); + { + char buf[2048]; + size_t n = sheZkpEqSerialize(buf, sizeof(buf), &zkp); + CYBOZU_TEST_EQUAL(n, mclBn_getFrByteSize() * CYBOZU_NUM_OF_ARRAY(zkp.d)); + sheZkpEq zkp2; + size_t r = sheZkpEqDeserialize(&zkp2, buf, n); + CYBOZU_TEST_EQUAL(r, n); + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(zkp.d); i++) { + CYBOZU_TEST_ASSERT(mclBnFr_isEqual(&zkp.d[i], &zkp2.d[i])); + } + } + zkp.d[0].d[0]++; + CYBOZU_TEST_EQUAL(verify(pub, &c1, &c2, &zkp), 0); + } +} + +CYBOZU_TEST_AUTO(ZkpEq) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + + ZkpEqTest(&sec, &pub, sheEncWithZkpEq, sheVerifyZkpEq); + + shePrecomputedPublicKey *ppub = shePrecomputedPublicKeyCreate(); + CYBOZU_TEST_EQUAL(shePrecomputedPublicKeyInit(ppub, &pub), 0); + + ZkpEqTest(&sec, ppub, shePrecomputedPublicKeyEncWithZkpEq, shePrecomputedPublicKeyVerifyZkpEq); + + shePrecomputedPublicKeyDestroy(ppub); +} + +CYBOZU_TEST_AUTO(finalExp) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + const int64_t m11 = 5; + const int64_t m12 = 7; + const int64_t m21 = -3; + const int64_t m22 = 9; + sheCipherTextG1 c11, c12; + sheCipherTextG2 c21, c22; + sheCipherTextGT ct1, ct2; + sheCipherTextGT ct; + sheEncG1(&c11, &pub, m11); + sheEncG1(&c12, &pub, m12); + sheEncG2(&c21, &pub, m21); + sheEncG2(&c22, &pub, m22); + + int64_t dec; + // sheMul = sheMulML + sheFinalExpGT + sheMul(&ct1, &c11, &c21); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct1), 0); + CYBOZU_TEST_EQUAL(dec, m11 * m21); + + sheMulML(&ct1, &c11, &c21); + sheFinalExpGT(&ct, &ct1); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, m11 * m21); + + sheMulML(&ct2, &c12, &c22); + sheFinalExpGT(&ct, &ct2); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, m12 * m22); + + /* + Mul(c11, c21) + Mul(c21, c22) + = finalExp(ML(c11, c21) + ML(c21, c22)) + */ + sheAddGT(&ct, &ct1, &ct2); + sheFinalExpGT(&ct, &ct); + CYBOZU_TEST_EQUAL(sheDecGT(&dec, &sec, &ct), 0); + CYBOZU_TEST_EQUAL(dec, (m11 * m21) + (m12 * m22)); +} + +int g_hashBitSize = 8; +std::string g_tableName; + +CYBOZU_TEST_AUTO(saveLoad) +{ + sheSecretKey sec; + sheSecretKeySetByCSPRNG(&sec); + shePublicKey pub; + sheGetPublicKey(&pub, &sec); + const size_t hashSize = 1 << g_hashBitSize; + const size_t byteSizePerEntry = 8; + sheSetRangeForGTDLP(hashSize); + std::string buf; + buf.resize(hashSize * byteSizePerEntry + 1024); + const size_t n1 = sheSaveTableForGTDLP(&buf[0], buf.size()); + CYBOZU_TEST_ASSERT(n1 > 0); + if (!g_tableName.empty()) { + printf("use table=%s\n", g_tableName.c_str()); + std::ofstream ofs(g_tableName.c_str(), std::ios::binary); + ofs.write(buf.c_str(), n1); + } + const int64_t m = hashSize - 1; + sheCipherTextGT ct; + CYBOZU_TEST_ASSERT(sheEncGT(&ct, &pub, m) == 0); + sheSetRangeForGTDLP(1); + sheSetTryNum(1); + int64_t dec = 0; + CYBOZU_TEST_ASSERT(sheDecGT(&dec, &sec, &ct) != 0); + if (!g_tableName.empty()) { + std::ifstream ifs(g_tableName.c_str(), std::ios::binary); + buf.clear(); + buf.resize(n1); + ifs.read(&buf[0], n1); + } + const size_t n2 = sheLoadTableForGTDLP(&buf[0], n1); + CYBOZU_TEST_ASSERT(n2 > 0); + CYBOZU_TEST_ASSERT(sheDecGT(&dec, &sec, &ct) == 0); + CYBOZU_TEST_EQUAL(dec, m); +} + +int main(int argc, char *argv[]) + try +{ + cybozu::Option opt; + opt.appendOpt(&g_hashBitSize, 8, "bit", ": hashBitSize"); + opt.appendOpt(&g_tableName, "", "f", ": table name"); + opt.appendHelp("h", ": show this message"); + if (!opt.parse(argc, argv)) { + opt.usage(); + return 1; + } + return cybozu::test::autoRun.run(argc, argv); +} catch (std::exception& e) { + printf("ERR %s\n", e.what()); + return 1; +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/she_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/she_test.cpp new file mode 100644 index 000000000..9292c35f4 --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/she_test.cpp @@ -0,0 +1,756 @@ +#define PUT(x) std::cout << #x << "=" << (x) << std::endl; +#include +#include +#include +#include +#include +#include +#include // for secp192k1 + +using namespace mcl::she; + +SecretKey g_sec; + +CYBOZU_TEST_AUTO(log) +{ +#if MCLBN_FP_UNIT_SIZE == 4 + const mcl::CurveParam& cp = mcl::BN254; + puts("BN254"); +#elif MCLBN_FP_UNIT_SIZE == 6 + const mcl::CurveParam& cp = mcl::BN381_1; + puts("BN381_1"); +#elif MCLBN_FP_UNIT_SIZE == 8 + const mcl::CurveParam& cp = mcl::BN462; + puts("BN462"); +#endif + init(cp); + G1 P; + hashAndMapToG1(P, "abc"); + for (int i = -5; i < 5; i++) { + G1 iP; + G1::mul(iP, P, i); + CYBOZU_TEST_EQUAL(mcl::she::local::log(P, iP), i); + } +} + +//#define PAPER +#ifdef PAPER +double clk2msec(const cybozu::CpuClock& clk, int n) +{ + const double rate = (1 / 3.4e9) * 1.e3; // 3.4GHz + return clk.getClock() / (double)clk.getCount() / n * rate; +} + +CYBOZU_TEST_AUTO(bench2) +{ + puts("msec"); + setTryNum(1 << 16); + useDecG1ViaGT(true); + useDecG2ViaGT(true); +#if 0 + setRangeForDLP(1 << 21); +#else + { + const char *tblName = "../she-dlp-table/she-dlp-0-20-gt.bin"; + std::ifstream ifs(tblName, std::ios::binary); + getHashTableGT().load(ifs); + } +#endif + SecretKey sec; + sec.setByCSPRNG(); + PublicKey pub; + sec.getPublicKey(pub); + PrecomputedPublicKey ppub; + ppub.init(pub); + const int C = 500; + double t1, t2; + int64_t m = (1ll << 31) - 12345; + CipherTextG1 c1, d1; + CipherTextG2 c2, d2; + CipherTextGT ct, dt; + CYBOZU_BENCH_C("", C, ppub.enc, c1, m); + t1 = clk2msec(cybozu::bench::g_clk, C); + CYBOZU_TEST_EQUAL(sec.dec(c1), m); + + CYBOZU_BENCH_C("", C, ppub.enc, c2, m); + t2 = clk2msec(cybozu::bench::g_clk, C); + CYBOZU_TEST_EQUAL(sec.dec(c2), m); + printf("Enc G1 %.2e\n", t1); + printf("Enc G2 %.2e\n", t2); + printf("Enc L1(G1+G2) %.2e\n", t1 + t2); + + CYBOZU_BENCH_C("", C, ppub.enc, ct, m); + t1 = clk2msec(cybozu::bench::g_clk, C); + CYBOZU_TEST_EQUAL(sec.dec(ct), m); + printf("Enc L2 %.2e\n", t1); + + CYBOZU_BENCH_C("", C, sec.dec, c1); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("Dec L1 %.2e\n", t1); + + CYBOZU_BENCH_C("", C, sec.dec, ct); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("Dec L2 %.2e\n", t1); + + pub.enc(ct, 1234); + CYBOZU_BENCH_C("", C, sec.dec, ct); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("Dec L2(small) %.2e\n", t1); + + CYBOZU_BENCH_C("", C, add, d1, d1, c1); + t1 = clk2msec(cybozu::bench::g_clk, C); + + CYBOZU_BENCH_C("", C, add, d2, d2, c2); + t2 = clk2msec(cybozu::bench::g_clk, C); + printf("Add G1 %.2e\n", t1); + printf("Add G2 %.2e\n", t2); + printf("Add L1(G1+G2) %.2e\n", t1 + t2); + + CYBOZU_BENCH_C("", C, add, dt, dt, ct); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("Add L2 %.2e\n", t1); + + CYBOZU_BENCH_C("", C, mul, ct, c1, c2); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("Mul %.2e\n", t1); + + CYBOZU_BENCH_C("", C, ppub.reRand, c1); + t1 = clk2msec(cybozu::bench::g_clk, C); + CYBOZU_BENCH_C("", C, ppub.reRand, c2); + t2 = clk2msec(cybozu::bench::g_clk, C); + printf("ReRand G1 %.2e\n", t1); + printf("ReRand G2 %.2e\n", t2); + printf("ReRand L1(G1+G2) %.2e\n", t1 + t2); + + CYBOZU_BENCH_C("", C, ppub.reRand, ct); + t1 = clk2msec(cybozu::bench::g_clk, C); + printf("ReRand L2 %.2e\n", t1); +} +#endif + +template +void GAHashTableTest(int maxSize, int tryNum, const G& P, const HashTbl& hashTbl) +{ + for (int i = -maxSize; i <= maxSize; i++) { + G xP; + G::mul(xP, P, i); + CYBOZU_TEST_EQUAL(hashTbl.basicLog(xP), i); + } + for (int i = -maxSize * tryNum; i <= maxSize * tryNum; i++) { + G xP; + G::mul(xP, P, i); + CYBOZU_TEST_EQUAL(hashTbl.log(xP), i); + } +} + +template +void HashTableTest(const G& P) +{ + mcl::she::local::HashTable hashTbl, hashTbl2; + const int maxSize = 100; + const int tryNum = 3; + hashTbl.init(P, maxSize, tryNum); + GAHashTableTest(maxSize, tryNum, P, hashTbl); + std::stringstream ss; + hashTbl.save(ss); + hashTbl2.load(ss); + GAHashTableTest(maxSize, tryNum, P, hashTbl2); +} + +CYBOZU_TEST_AUTO(HashTable) +{ + G1 P; + hashAndMapToG1(P, "abc"); + G2 Q; + hashAndMapToG2(Q, "abc"); + HashTableTest(P); + HashTableTest(Q); +} + +template +void GTHashTableTest(int maxSize, int tryNum, const GT& g, const HashTbl& hashTbl) +{ + for (int i = -maxSize; i <= maxSize; i++) { + GT gx; + GT::pow(gx, g, i); + CYBOZU_TEST_EQUAL(hashTbl.basicLog(gx), i); + } + for (int i = -maxSize * tryNum; i <= maxSize * tryNum; i++) { + GT gx; + GT::pow(gx, g, i); + CYBOZU_TEST_EQUAL(hashTbl.log(gx), i); + } +} + +CYBOZU_TEST_AUTO(GTHashTable) +{ + mcl::she::local::HashTable hashTbl, hashTbl2; + GT g; + { + G1 P; + hashAndMapToG1(P, "abc"); + G2 Q; + hashAndMapToG2(Q, "abc"); + pairing(g, P, Q); + } + const int maxSize = 100; + const int tryNum = 3; + hashTbl.init(g, maxSize, tryNum); + GTHashTableTest(maxSize, tryNum, g, hashTbl); + std::stringstream ss; + hashTbl.save(ss); + hashTbl2.load(ss); + GTHashTableTest(maxSize, tryNum, g, hashTbl2); +} + +CYBOZU_TEST_AUTO(enc_dec) +{ + SecretKey& sec = g_sec; + sec.setByCSPRNG(); + setRangeForDLP(1024); + PublicKey pub; + sec.getPublicKey(pub); + CipherText c; + for (int i = -5; i < 5; i++) { + pub.enc(c, i); + CYBOZU_TEST_EQUAL(sec.dec(c), i); + pub.reRand(c); + CYBOZU_TEST_EQUAL(sec.dec(c), i); + } + PrecomputedPublicKey ppub; + ppub.init(pub); + CipherTextG1 c1; + CipherTextG2 c2; + CipherTextGT ct1, ct2; + for (int i = -5; i < 5; i++) { + pub.enc(ct1, i); + CYBOZU_TEST_EQUAL(sec.dec(ct1), i); + CYBOZU_TEST_EQUAL(sec.isZero(ct1), i == 0); + ppub.enc(ct2, i); + CYBOZU_TEST_EQUAL(sec.dec(ct2), i); + ppub.enc(c1, i); + CYBOZU_TEST_EQUAL(sec.dec(c1), i); + CYBOZU_TEST_EQUAL(sec.decViaGT(c1), i); + CYBOZU_TEST_EQUAL(sec.isZero(c1), i == 0); + ct1.clear(); + pub.convert(ct1, c1); + CYBOZU_TEST_EQUAL(sec.dec(ct1), i); + ppub.enc(c2, i); + CYBOZU_TEST_EQUAL(sec.dec(c2), i); + CYBOZU_TEST_EQUAL(sec.decViaGT(c2), i); + CYBOZU_TEST_EQUAL(sec.isZero(c2), i == 0); + ct1.clear(); + pub.convert(ct1, c2); + CYBOZU_TEST_EQUAL(sec.dec(ct1), i); + pub.enc(c, i); + CYBOZU_TEST_EQUAL(sec.isZero(c), i == 0); + } +} + +template +void ZkpBinTest(const SecretKey& sec, const PK& pub) +{ + CT c; + ZkpBin zkp; + for (int m = 0; m < 2; m++) { + pub.encWithZkpBin(c, zkp, m); + CYBOZU_TEST_EQUAL(sec.dec(c), m); + CYBOZU_TEST_ASSERT(pub.verify(c, zkp)); + zkp.d_[0] += 1; + CYBOZU_TEST_ASSERT(!pub.verify(c, zkp)); + } + CYBOZU_TEST_EXCEPTION(pub.encWithZkpBin(c, zkp, 2), cybozu::Exception); +} +CYBOZU_TEST_AUTO(ZkpBin) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + ZkpBinTest(sec, pub); + ZkpBinTest(sec, pub); + + PrecomputedPublicKey ppub; + ppub.init(pub); + ZkpBinTest(sec, ppub); + ZkpBinTest(sec, ppub); +} + +template +void ZkpEqTest(const SecretKey& sec, const PubT& pub) +{ + CipherTextG1 c1; + CipherTextG2 c2; + ZkpEq zkp; + for (int m = -4; m < 4; m++) { + pub.encWithZkpEq(c1, c2, zkp, m); + CYBOZU_TEST_EQUAL(sec.dec(c1), m); + CYBOZU_TEST_EQUAL(sec.dec(c2), m); + CYBOZU_TEST_ASSERT(pub.verify(c1, c2, zkp)); + zkp.d_[0] += 1; + CYBOZU_TEST_ASSERT(!pub.verify(c1, c2, zkp)); + } +} + +CYBOZU_TEST_AUTO(ZkpEq) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + PrecomputedPublicKey ppub; + ppub.init(pub); + ZkpEqTest(sec, pub); + ZkpEqTest(sec, ppub); +} + +template +void ZkpBinEqTest(const SecretKey& sec, const PK& pub) +{ + CipherTextG1 c1; + CipherTextG2 c2; + ZkpBinEq zkp; + for (int m = 0; m < 2; m++) { + pub.encWithZkpBinEq(c1, c2, zkp, m); + CYBOZU_TEST_EQUAL(sec.dec(c1), m); + CYBOZU_TEST_EQUAL(sec.dec(c2), m); + CYBOZU_TEST_ASSERT(pub.verify(c1, c2, zkp)); + zkp.d_[0] += 1; + CYBOZU_TEST_ASSERT(!pub.verify(c1, c2, zkp)); + } + CYBOZU_TEST_EXCEPTION(pub.encWithZkpBinEq(c1, c2, zkp, 2), cybozu::Exception); +} + +CYBOZU_TEST_AUTO(ZkpBinEq) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + ZkpBinEqTest(sec, pub); + + PrecomputedPublicKey ppub; + ppub.init(pub); + ZkpBinEqTest(sec, ppub); +} + +CYBOZU_TEST_AUTO(add_sub_mul) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + for (int m1 = -5; m1 < 5; m1++) { + for (int m2 = -5; m2 < 5; m2++) { + CipherText c1, c2, c3; + pub.enc(c1, m1); + pub.enc(c2, m2); + add(c3, c1, c2); + CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); + + pub.reRand(c3); + CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); + + sub(c3, c1, c2); + CYBOZU_TEST_EQUAL(m1 - m2, sec.dec(c3)); + + mul(c3, c1, 5); + CYBOZU_TEST_EQUAL(m1 * 5, sec.dec(c3)); + mul(c3, c1, -123); + CYBOZU_TEST_EQUAL(m1 * -123, sec.dec(c3)); + + mul(c3, c1, c2); + CYBOZU_TEST_EQUAL(m1 * m2, sec.dec(c3)); + + pub.reRand(c3); + CYBOZU_TEST_EQUAL(m1 * m2, sec.dec(c3)); + + CipherText::mul(c3, c3, -25); + CYBOZU_TEST_EQUAL(m1 * m2 * -25, sec.dec(c3)); + + pub.enc(c1, m1, true); + CYBOZU_TEST_EQUAL(m1, sec.dec(c1)); + pub.enc(c2, m2, true); + add(c3, c1, c2); + CYBOZU_TEST_EQUAL(m1 + m2, sec.dec(c3)); + } + } +} + +CYBOZU_TEST_AUTO(largeEnc) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + + Fr x; + x.setRand(); + CipherTextG1 c1, c2; + pub.enc(c1, x); + const int64_t m = 123; + pub.enc(c2, x + m); + sub(c1, c1, c2); + CYBOZU_TEST_EQUAL(sec.dec(c1), -m); + + pub.enc(c1, 0); + mul(c1, c1, x); + CYBOZU_TEST_ASSERT(sec.isZero(c1)); + pub.enc(c1, 1); + mul(c1, c1, x); + CYBOZU_TEST_ASSERT(!sec.isZero(c1)); +} + +CYBOZU_TEST_AUTO(add_mul_add_sub) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + int m[8] = { 1, -2, 3, 4, -5, 6, -7, 8 }; + CipherText c[8]; + for (int i = 0; i < 8; i++) { + pub.enc(c[i], m[i]); + CYBOZU_TEST_EQUAL(sec.dec(c[i]), m[i]); + CYBOZU_TEST_ASSERT(!c[i].isMultiplied()); + CipherText mc; + pub.convert(mc, c[i]); + CYBOZU_TEST_ASSERT(mc.isMultiplied()); + CYBOZU_TEST_EQUAL(sec.dec(mc), m[i]); + } + int ok1 = (m[0] + m[1]) * (m[2] + m[3]); + int ok2 = (m[4] + m[5]) * (m[6] + m[7]); + int ok = ok1 + ok2; + for (int i = 0; i < 4; i++) { + c[i * 2].add(c[i * 2 + 1]); + CYBOZU_TEST_EQUAL(sec.dec(c[i * 2]), m[i * 2] + m[i * 2 + 1]); + } + c[0].mul(c[2]); + CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok1); + c[4].mul(c[6]); + CYBOZU_TEST_EQUAL(sec.dec(c[4]), ok2); + c[0].add(c[4]); + CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok); + c[0].sub(c[4]); + CYBOZU_TEST_EQUAL(sec.dec(c[0]), ok1); +} + +CYBOZU_TEST_AUTO(finalExp) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + const int64_t m11 = 5; + const int64_t m12 = 3; + const int64_t m21 = -2; + const int64_t m22 = 9; + CipherTextG1 c11, c12; + CipherTextG2 c21, c22; + CipherTextGT ct1, ct2, ct; + pub.enc(c11, m11); + pub.enc(c12, m12); + pub.enc(c21, m21); + pub.enc(c22, m22); + CipherTextGT::mulML(ct1, c11, c21); + CipherTextGT::finalExp(ct, ct1); + CYBOZU_TEST_EQUAL(sec.dec(ct), m11 * m21); + CipherTextGT::mulML(ct2, c12, c22); + CipherTextGT::finalExp(ct, ct2); + CYBOZU_TEST_EQUAL(sec.dec(ct), m12 * m22); + CipherTextGT::add(ct1, ct1, ct2); + CipherTextGT::finalExp(ct1, ct1); + CYBOZU_TEST_EQUAL(sec.dec(ct1), (m11 * m21) + (m12 * m22)); +} + +CYBOZU_TEST_AUTO(innerProduct) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + + cybozu::XorShift rg; + const size_t n = 1000; + std::vector v1, v2; + std::vector c1, c2; + v1.resize(n); + v2.resize(n); + c1.resize(n); + c2.resize(n); + int innerProduct = 0; + for (size_t i = 0; i < n; i++) { + v1[i] = rg() % 2; + v2[i] = rg() % 2; + innerProduct += v1[i] * v2[i]; + pub.enc(c1[i], v1[i]); + pub.enc(c2[i], v2[i]); + } + CipherText c, t; + CipherText::mul(c, c1[0], c2[0]); + for (size_t i = 1; i < n; i++) { + CipherText::mul(t, c1[i], c2[i]); + c.add(t); + } + CYBOZU_TEST_EQUAL(innerProduct, sec.dec(c)); +} + +template +T testIo(const T& x) +{ + std::stringstream ss; + ss << x; + T y; + ss >> y; + CYBOZU_TEST_EQUAL(x, y); + return y; +} + +CYBOZU_TEST_AUTO(io) +{ + setRangeForDLP(100); + int64_t m; + for (int i = 0; i < 2; i++) { + if (i == 1) { + Fp::setIoMode(mcl::IoSerialize); + G1::setIoMode(mcl::IoSerialize); + } + SecretKey sec; + sec.setByCSPRNG(); + testIo(sec); + PublicKey pub; + sec.getPublicKey(pub); + testIo(pub); + CipherTextG1 g1; + pub.enc(g1, 3); + m = sec.dec(testIo(g1)); + CYBOZU_TEST_EQUAL(m, 3); + CipherTextG2 g2; + pub.enc(g2, 5); + testIo(g2); + CipherTextA ca; + pub.enc(ca, -4); + m = sec.dec(testIo(ca)); + CYBOZU_TEST_EQUAL(m, -4); + CipherTextGT ct; + CipherTextGT::mul(ct, g1, g2); + m = sec.dec(testIo(ct)); + CYBOZU_TEST_EQUAL(m, 15); + } +} + +#ifndef PAPER +CYBOZU_TEST_AUTO(bench) +{ + const SecretKey& sec = g_sec; + PublicKey pub; + sec.getPublicKey(pub); + CipherText c1, c2, c3; + CYBOZU_BENCH("enc", pub.enc, c1, 5); + pub.enc(c2, 4); + CYBOZU_BENCH("add", c1.add, c2); + CYBOZU_BENCH("mul", CipherText::mul, c3, c1, c2); + pub.enc(c1, 5); + pub.enc(c2, 4); + c1.mul(c2); + CYBOZU_BENCH("dec", sec.dec, c1); + c2 = c1; + CYBOZU_BENCH("add after mul", c1.add, c2); +} +#endif + +CYBOZU_TEST_AUTO(saveHash) +{ + mcl::she::local::HashTable hashTbl1, hashTbl2; + hashTbl1.init(SHE::P_, 1234, 123); + std::stringstream ss; + hashTbl1.save(ss); + hashTbl2.load(ss); + CYBOZU_TEST_ASSERT(hashTbl1 == hashTbl2); +} + +static inline void putK(double t) { printf("%.2e\n", t * 1e-3); } + +template +void decBench(const char *msg, int C, const SecretKey& sec, const PublicKey& pub, int64_t (SecretKey::*dec)(const CT& c) const = &SecretKey::dec) +{ + int64_t begin = 1 << 20; + int64_t end = 1LL << 32; + while (begin < end) { + CT c; + int64_t x = begin - 1; + pub.enc(c, x); + printf("m=%08x ", (uint32_t)x); + CYBOZU_BENCH_C(msg, C, (sec.*dec), c); + CYBOZU_TEST_EQUAL((sec.*dec)(c), x); + begin *= 2; + } + int64_t mTbl[] = { -0x80000003ll, 0x80000000ll, 0x80000005ll }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(mTbl); i++) { + int64_t m = mTbl[i]; + CT c; + pub.enc(c, m); + CYBOZU_TEST_EQUAL((sec.*dec)(c), m); + } +} + +#ifndef PAPER +CYBOZU_TEST_AUTO(hashBench) +{ + SecretKey& sec = g_sec; + sec.setByCSPRNG(); + const int C = 500; + const size_t hashSize = 1u << 21; + + clock_t begin = clock(), end; + setRangeForG1DLP(hashSize); + end = clock(); + printf("init G1 DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); + begin = end; + setRangeForG2DLP(hashSize); + end = clock(); + printf("init G2 DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); + begin = end; + setRangeForGTDLP(hashSize); + end = clock(); + printf("init GT DLP %f\n", double(end - begin) / CLOCKS_PER_SEC); + + PublicKey pub; + sec.getPublicKey(pub); + PrecomputedPublicKey ppub; + ppub.init(pub); + puts("Kclk"); + cybozu::bench::setPutCallback(putK); + decBench("decG1", C, sec, pub); + puts(""); + decBench("decG2", C, sec, pub); + puts(""); + decBench("decGT", C, sec, pub); + puts(""); + decBench("decG1ViaGT", C, sec, pub, &SecretKey::decViaGT); + puts(""); + decBench("decG2ViaGT", C, sec, pub, &SecretKey::decViaGT); + + G1 P, P2; + G2 Q, Q2; + GT e, e2; + mpz_class mr; + { + Fr r; + r.setRand(); + mr = r.getMpz(); + } + hashAndMapToG1(P, "abc"); + hashAndMapToG2(Q, "abc"); + pairing(e, P, Q); + P2.clear(); + Q2.clear(); + e2 = 1; + + printf("large m\n"); + CYBOZU_BENCH_C("G1::add ", C, G1::add, P2, P2, P); + CYBOZU_BENCH_C("G1::mul ", C, G1::mul, P, P, mr); + CYBOZU_BENCH_C("G2::add ", C, G2::add, Q2, Q2, Q); + CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Q, Q, mr); + CYBOZU_BENCH_C("GT::mul ", C, GT::mul, e2, e2, e); + CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e, e, mr); + CYBOZU_BENCH_C("G1window", C, SHE::PhashTbl_.mulByWindowMethod, P2, mr); + CYBOZU_BENCH_C("G2window", C, SHE::QhashTbl_.mulByWindowMethod, Q2, mr); + CYBOZU_BENCH_C("GTwindow", C, SHE::ePQhashTbl_.mulByWindowMethod, e, mr); +#if 1 + typedef mcl::GroupMtoA AG; + mcl::fp::WindowMethod wm; + wm.init(static_cast(e), Fr::getBitSize(), 10); + for (int i = 0; i < 100; i++) { + GT t1, t2; + GT::pow(t1, e, i); + wm.mul(static_cast(t2), i); + CYBOZU_TEST_EQUAL(t1, t2); + } +// CYBOZU_BENCH_C("GTwindow", C, wm.mul, static_cast(e), mr); +#endif + + CYBOZU_BENCH_C("miller ", C, millerLoop, e, P, Q); + CYBOZU_BENCH_C("finalExp", C, finalExp, e, e); + CYBOZU_BENCH_C("precomML", C, precomputedMillerLoop, e, P, SHE::Qcoeff_); + + CipherTextG1 c1; + CipherTextG2 c2; + CipherTextGT ct; + + int m = int(hashSize - 1); + printf("small m = %d\n", m); + CYBOZU_BENCH_C("G1::mul ", C, G1::mul, P, P, m); + CYBOZU_BENCH_C("G2::mul ", C, G2::mul, Q, Q, m); + CYBOZU_BENCH_C("GT::pow ", C, GT::pow, e, e, m); + CYBOZU_BENCH_C("G1window", C, SHE::PhashTbl_.mulByWindowMethod, P2, m); + CYBOZU_BENCH_C("G2window", C, SHE::QhashTbl_.mulByWindowMethod, Q2, m); + CYBOZU_BENCH_C("GTwindow", C, SHE::ePQhashTbl_.mulByWindowMethod, e, m); +// CYBOZU_BENCH_C("GTwindow", C, wm.mul, static_cast(e), m); + + CYBOZU_BENCH_C("encG1 ", C, pub.enc, c1, m); + CYBOZU_BENCH_C("encG2 ", C, pub.enc, c2, m); + CYBOZU_BENCH_C("encGT ", C, pub.enc, ct, m); + CYBOZU_BENCH_C("encG1pre", C, ppub.enc, c1, m); + CYBOZU_BENCH_C("encG2pre", C, ppub.enc, c2, m); + CYBOZU_BENCH_C("encGTpre", C, ppub.enc, ct, m); + + CYBOZU_BENCH_C("decG1 ", C, sec.dec, c1); + CYBOZU_BENCH_C("decG2 ", C, sec.dec, c2); + CYBOZU_BENCH_C("degGT ", C, sec.dec, ct); + + CYBOZU_BENCH_C("CT:mul ", C, CipherTextGT::mul, ct, c1, c2); + CYBOZU_BENCH_C("CT:mulML", C, CipherTextGT::mulML, ct, c1, c2); + CYBOZU_BENCH_C("CT:finalExp", C, CipherTextGT::finalExp, ct, ct); + + CYBOZU_BENCH_C("addG1 ", C, CipherTextG1::add, c1, c1, c1); + CYBOZU_BENCH_C("addG2 ", C, CipherTextG2::add, c2, c2, c2); + CYBOZU_BENCH_C("addGT ", C, CipherTextGT::add, ct, ct, ct); + CYBOZU_BENCH_C("reRandG1", C, pub.reRand, c1); + CYBOZU_BENCH_C("reRandG2", C, pub.reRand, c2); + CYBOZU_BENCH_C("reRandGT", C, pub.reRand, ct); + CYBOZU_BENCH_C("reRandG1pre", C, ppub.reRand, c1); + CYBOZU_BENCH_C("reRandG2pre", C, ppub.reRand, c2); + CYBOZU_BENCH_C("reRandGTpre", C, ppub.reRand, ct); + CYBOZU_BENCH_C("mulG1 ", C, CipherTextG1::mul, c1, c1, m); + CYBOZU_BENCH_C("mulG2 ", C, CipherTextG2::mul, c2, c2, m); + CYBOZU_BENCH_C("mulGT ", C, CipherTextGT::mul, ct, ct, m); + + CYBOZU_BENCH_C("convG1toGT", C, pub.convert, ct, c1); + CYBOZU_BENCH_C("convG2toGT", C, pub.convert, ct, c2); +} +#endif + +CYBOZU_TEST_AUTO(liftedElGamal) +{ + const size_t hashSize = 1024; + initG1only(mcl::ecparam::secp192k1, hashSize); + const size_t byteSize = 192 / 8; + SecretKey sec; + sec.setByCSPRNG(); + PublicKey pub; + sec.getPublicKey(pub); + CipherTextG1 c1, c2, c3; + int m1 = 12, m2 = 34; + pub.enc(c1, m1); + pub.enc(c2, m2); + CYBOZU_TEST_EQUAL(sec.dec(c1), m1); + CYBOZU_TEST_EQUAL(sec.dec(c2), m2); + add(c3, c1, c2); + CYBOZU_TEST_EQUAL(sec.dec(c3), m1 + m2); + neg(c1, c2); + CYBOZU_TEST_EQUAL(sec.dec(c1), -m2); + mul(c1, c2, m1); + CYBOZU_TEST_EQUAL(sec.dec(c1), m2 * m1); + + char buf[1024]; + size_t n = sec.serialize(buf, sizeof(buf)); + CYBOZU_TEST_EQUAL(n, byteSize); + SecretKey sec2; + n = sec2.deserialize(buf, n); + CYBOZU_TEST_EQUAL(n, byteSize); + CYBOZU_TEST_EQUAL(sec, sec2); + + n = pub.serialize(buf, sizeof(buf)); + CYBOZU_TEST_EQUAL(n, byteSize + 1); // +1 is for sign of y + PublicKey pub2; + n = pub2.deserialize(buf, n); + CYBOZU_TEST_EQUAL(n, byteSize + 1); + CYBOZU_TEST_EQUAL(pub, pub2); + + PublicKey pub3; + sec2.getPublicKey(pub3); + CYBOZU_TEST_EQUAL(pub, pub3); +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/sq_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/sq_test.cpp new file mode 100644 index 000000000..4c386d23b --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/sq_test.cpp @@ -0,0 +1,21 @@ +#include +#include +#include + +CYBOZU_TEST_AUTO(sqrt) +{ + const int tbl[] = { 3, 5, 7, 11, 13, 17, 19, 257, 997, 1031 }; + mcl::SquareRoot sq; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const mpz_class p = tbl[i]; + sq.set(p); + for (mpz_class a = 0; a < p; a++) { + mpz_class x; + if (sq.get(x, a)) { + mpz_class y; + y = (x * x) % p; + CYBOZU_TEST_EQUAL(a, y); + } + } + } +} diff --git a/vendor/github.com/byzantine-lab/mcl/test/vint_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/vint_test.cpp new file mode 100644 index 000000000..15e14266a --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/vint_test.cpp @@ -0,0 +1,1353 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef DONT_USE_GMP_IN_TEST +#include +#endif + +#define PUT(x) std::cout << #x "=" << x << std::endl; + +#if defined(__EMSCRIPTEN__) && !defined(MCL_AVOID_EXCEPTION_TEST) + #define MCL_AVOID_EXCEPTION_TEST +#endif + +using namespace mcl; + +struct V { + int n; + unsigned int p[16]; +}; + +CYBOZU_TEST_AUTO(addSub) +{ + static const struct { + V a; + V b; + V c; + } tbl[] = { + { + { 1, { 123, } }, + { 1, { 456, } }, + { 1, { 579, } }, + }, + { + { 1, { 0xffffffff, } }, + { 1, { 3, } }, + { 2, { 2, 1 } }, + }, + { + { 3, { 0xffffffff, 1, 0xffffffff } }, + { 2, { 1, 0xfffffffe, } }, + { 4, { 0, 0, 0, 1 } }, + }, + { + { 3, { 0xffffffff, 5, 0xffffffff } }, + { 2, { 1, 0xfffffffe, } }, + { 4, { 0, 4, 0, 1 } }, + }, + { + { 3, { 0xffffffff, 5, 0xffffffff } }, + { 1, { 1, } }, + { 3, { 0, 6, 0xffffffff } }, + }, + { + { 3, { 1, 0xffffffff, 1 } }, + { 3, { 0xffffffff, 0, 1 } }, + { 3, { 0, 0, 3 } }, + }, + { + { 1, { 1 } }, + { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, + { 4, { 0, 0, 0, 1 } }, + }, + { + { 1, { 0xffffffff } }, + { 1, { 0xffffffff } }, + { 2, { 0xfffffffe, 1 } }, + }, + { + { 2, { 0xffffffff, 0xffffffff } }, + { 2, { 0xffffffff, 0xffffffff } }, + { 3, { 0xfffffffe, 0xffffffff, 1 } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, + { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, + { 4, { 0xfffffffe, 0xffffffff, 0xffffffff, 1 } }, + }, + { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, + { 5, { 0xfffffffe, 0xffffffff, 0xffffffff, 0xffffffff, 1 } }, + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, y, z, t; + x.setArray(tbl[i].a.p, tbl[i].a.n); + y.setArray(tbl[i].b.p, tbl[i].b.n); + z.setArray(tbl[i].c.p, tbl[i].c.n); + Vint::add(t, x, y); + CYBOZU_TEST_EQUAL(t, z); + + Vint::add(t, y, x); + CYBOZU_TEST_EQUAL(t, z); + + Vint::sub(t, z, x); + CYBOZU_TEST_EQUAL(t, y); + } + { + const uint32_t in[] = { 0xffffffff, 0xffffffff }; + const uint32_t out[] = { 0xfffffffe, 0xffffffff, 1 }; + Vint x, y; + x.setArray(in, 2); + y.setArray(out, 3); + Vint::add(x, x, x); + CYBOZU_TEST_EQUAL(x, y); + Vint::sub(x, x, x); + y.clear(); + CYBOZU_TEST_EQUAL(x, y); + } + { + const uint32_t t0[] = {1, 2}; + const uint32_t t1[] = {3, 4, 5}; + const uint32_t t2[] = {4, 6, 5}; + Vint x, y, z; + z.setArray(t2, 3); + + x.setArray(t0, 2); + y.setArray(t1, 3); + Vint::add(x, x, y); + CYBOZU_TEST_EQUAL(x, z); + + x.setArray(t0, 2); + y.setArray(t1, 3); + Vint::add(x, y, x); + CYBOZU_TEST_EQUAL(x, z); + + x.setArray(t0, 2); + y.setArray(t1, 3); + Vint::add(y, x, y); + CYBOZU_TEST_EQUAL(y, z); + + x.setArray(t0, 2); + y.setArray(t1, 3); + Vint::add(y, y, x); + CYBOZU_TEST_EQUAL(y, z); + } +} + +CYBOZU_TEST_AUTO(mul1) +{ + static const struct { + V a; + int b; + V c; + } tbl[] = { + { + { 1, { 12, } }, + 5, + { 1, { 60, } }, + }, + { + { 1, { 1234567, } }, + 1, + { 1, { 1234567, } }, + }, + { + { 1, { 1234567, } }, + 89012345, + { 2, { 0x27F6EDCF, 0x63F2, } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, + 0x7fffffff, + { 4, { 0x80000001, 0xffffffff, 0xffffffff, 0x7ffffffe } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, + 1, + { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, + }, + { + { 2, { 0xffffffff, 1 } }, + 0x7fffffff, + { 2, { 0x80000001, 0xfffffffd } }, + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, z, t; + int y; + x.setArray(tbl[i].a.p, tbl[i].a.n); + y = tbl[i].b; + z.setArray(tbl[i].c.p, tbl[i].c.n); + Vint::mul(t, x, y); + CYBOZU_TEST_EQUAL(t, z); + + Vint::mul(x, x, y); + CYBOZU_TEST_EQUAL(x, z); + } +} + +CYBOZU_TEST_AUTO(mul2) +{ + static const struct { + V a; + V b; + V c; + } tbl[] = { + { + { 1, { 12, } }, + { 1, { 5, } }, + { 1, { 60, } }, + }, + { + { 1, { 1234567, } }, + { 1, { 89012345, } }, + { 2, { 0x27F6EDCF, 0x63F2, } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 0xffffffff, } }, + { 1, { 0xffffffff, } }, + { 4, { 0x00000001, 0xffffffff, 0xffffffff, 0xfffffffe } }, + }, + { + { 2, { 0xffffffff, 1 } }, + { 1, { 0xffffffff, } }, + { 3, { 0x00000001, 0xfffffffd, 1 } }, + }, + { + { 2, { 0xffffffff, 1 } }, + { 1, { 0xffffffff, } }, + { 3, { 0x00000001, 0xfffffffd, 1 } }, + }, + { + { 2, { 1, 1 } }, + { 2, { 1, 1 } }, + { 3, { 1, 2, 1 } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 1 } }, + { 2, { 0xffffffff, 0xffffffff } }, + { 5, { 1, 0, 0xfffffffd, 0xffffffff, 1 } }, + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, y, z, t; + x.setArray(tbl[i].a.p, tbl[i].a.n); + y.setArray(tbl[i].b.p, tbl[i].b.n); + z.setArray(tbl[i].c.p, tbl[i].c.n); + Vint::mul(t, x, y); + CYBOZU_TEST_EQUAL(t, z); + + Vint::mul(t, y, x); + CYBOZU_TEST_EQUAL(t, z); + } + { + const uint32_t in[] = { 0xffffffff, 1 }; + const uint32_t out[] = { 1, 0xfffffffc, 3 }; + Vint x, y, z; + y.setArray(out, 3); + x.setArray(in, 2); + z = x; + Vint::mul(x, x, x); + CYBOZU_TEST_EQUAL(x, y); + + x.setArray(in, 2); + Vint::mul(x, x, z); + CYBOZU_TEST_EQUAL(x, y); + + x.setArray(in, 2); + Vint::mul(x, z, x); + CYBOZU_TEST_EQUAL(x, y); + + x.setArray(in, 2); + Vint::mul(x, z, z); + CYBOZU_TEST_EQUAL(x, y); + } + { + Vint a("285434247217355341057"); + a *= a; + CYBOZU_TEST_EQUAL(a, Vint("81472709484538325259309302444004789877249")); + } +} + +CYBOZU_TEST_AUTO(div1) +{ + static const struct { + V a; + unsigned int b; + unsigned int r; + V c; + } tbl[] = { + { + { 1, { 100, } }, + 1, 0, + { 1, { 100, } }, + }, + { + { 1, { 100, } }, + 100, 0, + { 1, { 1, } }, + }, + { + { 1, { 100, } }, + 101, 100, + { 1, { 0, } }, + }, + { + { 1, { 100, } }, + 2, 0, + { 1, { 50, } }, + }, + { + { 1, { 100, } }, + 3, 1, + { 1, { 33, } }, + }, + { + { 2, { 0xffffffff, 0xffffffff } }, + 1, 0, + { 2, { 0xffffffff, 0xffffffff, } }, + }, + { + { 2, { 0xffffffff, 0xffffffff } }, + 123, 15, + { 2, { 0x4d0214d0, 0x214d021 } }, + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, z, t; + unsigned int b, r, u; + x.setArray(tbl[i].a.p, tbl[i].a.n); + b = tbl[i].b; + r = tbl[i].r; + z.setArray(tbl[i].c.p, tbl[i].c.n); + + u = (unsigned int)Vint::divMods1(&t, x, b); + CYBOZU_TEST_EQUAL(t, z); + CYBOZU_TEST_EQUAL(u, r); + + u = (unsigned int)Vint::divMods1(&x, x, b); + CYBOZU_TEST_EQUAL(x, z); + CYBOZU_TEST_EQUAL(u, r); + } +} + +CYBOZU_TEST_AUTO(div2) +{ + static const struct { + V x; + V y; + V q; + V r; + } tbl[] = { + { + { 1, { 100 } }, + { 1, { 3 } }, + { 1, { 33 } }, + { 1, { 1 } }, + }, + { + { 2, { 1, 1 } }, + { 2, { 0, 1 } }, + { 1, { 1 } }, + { 1, { 1 } }, + }, + { + { 2, { 0xffffffff, 0xffffffff } }, + { 2, { 0, 1 } }, + { 1, { 0xffffffff } }, + { 1, { 0xffffffff } }, + }, + { + { 2, { 0xffffffff, 0xffffffff } }, + { 2, { 0xffffffff, 1 } }, + { 1, { 0x80000000 } }, + { 1, { 0x7fffffff } }, + }, + { + { 3, { 0xffffffff, 0xffffffff, 0xffffffff } }, + { 2, { 0xffffffff, 1 } }, + { 2, { 0x40000000, 0x80000000 } }, + { 1, { 0x3fffffff } }, + }, + { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, + { 3, { 1, 0, 1 } }, + { 2, { 0xffffffff, 0xffffffff } }, + { 1, { 0 } }, + }, + { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, + { 3, { 1, 0xffffffff, 0xffffffff } }, + { 2, { 0, 1 } }, + { 2, { 0xffffffff, 0xfffffffe } }, + }, + { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, + { 3, { 1, 0, 0xffffffff } }, + { 2, { 1, 1 } }, + { 2, { 0xfffffffe, 0xfffffffe } }, + }, + { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 1 } }, + { 3, { 1, 0, 0xffffffff } }, + { 1, { 2 } }, + { 3, { 0xfffffffd, 0xffffffff, 1 } }, + }, + { + { 4, { 0, 0, 1, 1 } }, + { 2, { 1, 1 } }, + { 3, { 0, 0, 1 } }, + { 1, { 0 } }, + }, + { + { 3, { 5, 5, 1} }, + { 2, { 1, 2 } }, + { 1, { 0x80000002 } }, + { 1, { 0x80000003, } }, + }, + { + { 2, { 5, 5} }, + { 2, { 1, 1 } }, + { 1, { 5 } }, + { 1, { 0, } }, + }, + { + { 2, { 5, 5} }, + { 2, { 2, 1 } }, + { 1, { 4 } }, + { 1, { 0xfffffffd, } }, + }, + { + { 3, { 5, 0, 5} }, + { 3, { 2, 0, 1 } }, + { 1, { 4 } }, + { 2, { 0xfffffffd, 0xffffffff } }, + }, + { + { 2, { 4, 5 } }, + { 2, { 5, 5 } }, + { 1, { 0 } }, + { 2, { 4, 5 } }, + }, + { + { 1, { 123 } }, + { 2, { 1, 1 } }, + { 1, { 0 } }, + { 1, { 123 } }, + }, + { + { 1, { 123 } }, + { 3, { 1, 1, 1 } }, + { 1, { 0 } }, + { 1, { 123 } }, + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, y, q, r; + x.setArray(tbl[i].x.p, tbl[i].x.n); + y.setArray(tbl[i].y.p, tbl[i].y.n); + q.setArray(tbl[i].q.p, tbl[i].q.n); + r.setArray(tbl[i].r.p, tbl[i].r.n); + + Vint qt, rt; + Vint::quotRem(&qt, rt, x, y); + CYBOZU_TEST_EQUAL(qt, q); + CYBOZU_TEST_EQUAL(rt, r); + + Vint::mul(y, y, qt); + Vint::add(y, y, rt); + CYBOZU_TEST_EQUAL(x, y); + + x.setArray(tbl[i].x.p, tbl[i].x.n); + y.setArray(tbl[i].y.p, tbl[i].y.n); + Vint::quotRem(&x, rt, x, y); + CYBOZU_TEST_EQUAL(x, q); + CYBOZU_TEST_EQUAL(rt, r); + + x.setArray(tbl[i].x.p, tbl[i].x.n); + y.setArray(tbl[i].y.p, tbl[i].y.n); + Vint::quotRem(&y, rt, x, y); + CYBOZU_TEST_EQUAL(y, q); + CYBOZU_TEST_EQUAL(rt, r); + + x.setArray(tbl[i].x.p, tbl[i].x.n); + y.setArray(tbl[i].y.p, tbl[i].y.n); + Vint::quotRem(&x, y, x, y); + CYBOZU_TEST_EQUAL(x, q); + CYBOZU_TEST_EQUAL(y, r); + + x.setArray(tbl[i].x.p, tbl[i].x.n); + y.setArray(tbl[i].y.p, tbl[i].y.n); + Vint::quotRem(&y, x, x, y); + CYBOZU_TEST_EQUAL(y, q); + CYBOZU_TEST_EQUAL(x, r); + } + { + const uint32_t in[] = { 1, 1 }; + Vint x, y, z; + x.setArray(in, 2); + Vint::quotRem(&x, y, x, x); + z = 1; + CYBOZU_TEST_EQUAL(x, z); + z.clear(); + CYBOZU_TEST_EQUAL(y, z); + + Vint::quotRem(&y, x, x, x); + z = 1; + CYBOZU_TEST_EQUAL(y, z); + z.clear(); + CYBOZU_TEST_EQUAL(x, z); + } +} + +CYBOZU_TEST_AUTO(quotRem) +{ + const struct { + const char *x; + const char *y; + const char *r; + } tbl[] = { + { + "1448106640508192452750709206294683535529268965445799785581837640324321797831381715960812126274894517677713278300997728292641936248881345120394299128611830", + "82434016654300679721217353503190038836571781811386228921167322412819029493183", + "72416512377294697540770834088766459385112079195086911762075702918882982361282" + }, + { + "97086308670107713719105336221824613370040805954034005192338040686500414395543303807941158656814978071549225072789349941064484974666540443679601226744652", + "82434016654300679721217353503190038836571781811386228921167322412819029493183", + "41854959563040430269871677548536437787164514279279911478858426970427834388586", + }, + { + "726838724295606887174238120788791626017347752989142414466410919788841485181240131619880050064495352797213258935807786970844241989010252", + "82434016654300679721217353503190038836571781811386228921167322412819029493183", + "81378967132566843036693176764684783485107373533583677681931133755003929106966", + }, + { + "85319207237201203511459960875801690195851794174784746933408178697267695525099750", + "82434016654300679721217353503190038836571781811386228921167322412819029493183", + "82434016654300679721217353503190038836571781811386228921167322412819029148528", + }, + { + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x100000000000000000000000000000000000000000000000001", + "1606938044258990275541962092341162602522202993782724115824640", + }, + { + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x1000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "34175792574734561318320347298712833833643272357332299899995954578095372295314880347335474659983360", + }, + { + "0xfffffffffffff000000000000000000000000000000000000000000000000000000000000000000", + "0x100000000000000000000000000000000000000000000000000000000000000000001", + "7558907585412001237250713901367146624661464598973016020495791084036551510708977665", + }, + { + "0xfffffffffffff000000000000000000000000000000000000000000000000000000000000000000", + "0xfffffffffffff0000000000000000000000000000000000000000000000000000000000000001", + "521481209941628322292632858916605385658190900090571826892867289394157573281830188869820088065", + }, + { + "0x1230000000000000456", + "0x1230000000000000457", + "0x1230000000000000456", + }, + { + "0x1230000000000000456", + "0x1230000000000000456", + "0", + }, + { + "0x1230000000000000456", + "0x1230000000000000455", + "1", + }, + { + "0x1230000000000000456", + "0x2000000000000000000", + "0x1230000000000000456", + }, + { + "0xffffffffffffffffffffffffffffffff", + "0x80000000000000000000000000000000", + "0x7fffffffffffffffffffffffffffffff", + }, + { + "0xffffffffffffffffffffffffffffffff", + "0x7fffffffffffffffffffffffffffffff", + "1", + }, + { + "0xffffffffffffffffffffffffffffffff", + "0x70000000000000000000000000000000", + "0x1fffffffffffffffffffffffffffffff", + }, + { + "0xffffffffffffffffffffffffffffffff", + "0x30000000000000000000000000000000", + "0x0fffffffffffffffffffffffffffffff", + }, + { + "0xffffffffffffffffffffffffffffffff", + "0x10000000000000000000000000000000", + "0x0fffffffffffffffffffffffffffffff", + }, + { + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x2523648240000001ba344d80000000086121000000000013a700000000000013", + "0x212ba4f27ffffff5a2c62effffffffcdb939ffffffffff8a15ffffffffffff8d", + }, + { + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + "0x212ba4f27ffffff5a2c62effffffffd00242ffffffffff9c39ffffffffffffb1", + }, + }; + mcl::Vint x, y, q, r1, r2; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + x.setStr(tbl[i].x); + y.setStr(tbl[i].y); + r1.setStr(tbl[i].r); + mcl::Vint::divMod(&q, r2, x, y); + CYBOZU_TEST_EQUAL(r1, r2); + CYBOZU_TEST_EQUAL(x, q * y + r2); + } +} + +CYBOZU_TEST_AUTO(string) +{ + const struct { + uint32_t v[5]; + size_t vn; + const char *str; + const char *hex; + const char *bin; + } tbl[] = { + { { 0 }, 0, "0", "0x0", "0b0" }, + { { 12345 }, 1, "12345", "0x3039", "0b11000000111001" }, + { { 0xffffffff }, 1, "4294967295", "0xffffffff", "0b11111111111111111111111111111111" }, + { { 0, 1 }, 2, "4294967296", "0x100000000", "0b100000000000000000000000000000000" }, + { { 0, 0, 0, 0, 1 }, 5, "340282366920938463463374607431768211456", "0x100000000000000000000000000000000", "0b100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" }, + { { 0, 0x0b22a000, 0xe2f768a0, 0xe086b93c, 0x2cd76f }, 5, "1000000000000000000000000000000000000000000000", "0x2cd76fe086b93ce2f768a00b22a00000000000", "0b101100110101110110111111100000100001101011100100111100111000101111011101101000101000000000101100100010101000000000000000000000000000000000000000000000" }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x, y; + x.setArray(tbl[i].v,tbl[i].vn); + CYBOZU_TEST_EQUAL(x.getStr(10), tbl[i].str); + char buf[1024]; + size_t n = x.getStr(buf, sizeof(buf), 10); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL(tbl[i].str, buf); + y.setStr(tbl[i].str); + CYBOZU_TEST_EQUAL(x.getStr(16), tbl[i].hex + 2); + n = x.getStr(buf, sizeof(buf), 16); + CYBOZU_TEST_ASSERT(n > 0); + CYBOZU_TEST_EQUAL(tbl[i].hex + 2, buf); + CYBOZU_TEST_EQUAL(x, y); + x = 1; + x.setStr(tbl[i].hex); + CYBOZU_TEST_EQUAL(x, y); + } +} + +CYBOZU_TEST_AUTO(shift) +{ + Vint x("123423424918471928374192874198274981274918274918274918243"); + Vint y, z; + + const size_t unitBitSize = Vint::unitBitSize; + Vint s; + // shl + for (size_t i = 1; i < 31; i++) { + Vint::shl(y, x, i); + z = x * (Vint::Unit(1) << i); + CYBOZU_TEST_EQUAL(y, z); + y = x << i; + CYBOZU_TEST_EQUAL(y, z); + y = x; + y <<= i; + CYBOZU_TEST_EQUAL(y, z); + } + for (int i = 0; i < 4; i++) { + Vint::shl(y, x, i * unitBitSize); + Vint::pow(s, Vint(2), i * unitBitSize); + z = x * s; + CYBOZU_TEST_EQUAL(y, z); + y = x << (i * unitBitSize); + CYBOZU_TEST_EQUAL(y, z); + y = x; + y <<= (i * unitBitSize); + CYBOZU_TEST_EQUAL(y, z); + } + for (int i = 0; i < 100; i++) { + y = x << i; + Vint::pow(s, Vint(2), i); + z = x * s; + CYBOZU_TEST_EQUAL(y, z); + y = x; + y <<= i; + CYBOZU_TEST_EQUAL(y, z); + } + + // shr + for (size_t i = 1; i < 31; i++) { + Vint::shr(y, x, i); + z = x / (Vint::Unit(1) << i); + CYBOZU_TEST_EQUAL(y, z); + y = x >> i; + CYBOZU_TEST_EQUAL(y, z); + y = x; + y >>= i; + CYBOZU_TEST_EQUAL(y, z); + } + for (int i = 0; i < 3; i++) { + Vint::shr(y, x, i * unitBitSize); + Vint::pow(s, Vint(2), i * unitBitSize); + z = x / s; + CYBOZU_TEST_EQUAL(y, z); + y = x >> (i * unitBitSize); + CYBOZU_TEST_EQUAL(y, z); + y = x; + y >>= (i * unitBitSize); + CYBOZU_TEST_EQUAL(y, z); + } + for (int i = 0; i < 100; i++) { + y = x >> i; + Vint::pow(s, Vint(2), i); + z = x / s; + CYBOZU_TEST_EQUAL(y, z); + y = x; + y >>= i; + CYBOZU_TEST_EQUAL(y, z); + } + { + Vint a = 0, zero = 0; + a <<= Vint::unitBitSize; + CYBOZU_TEST_EQUAL(a, zero); + } +} + +CYBOZU_TEST_AUTO(getBitSize) +{ + { + Vint zero = 0; + CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); + zero <<= (Vint::unitBitSize - 1); + CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); + zero <<= Vint::unitBitSize; + CYBOZU_TEST_EQUAL(zero.getBitSize(), 1); + } + + { + Vint a = 1; + CYBOZU_TEST_EQUAL(a.getBitSize(), 1); + a = 2; + CYBOZU_TEST_EQUAL(a.getBitSize(), 2); + a = 3; + CYBOZU_TEST_EQUAL(a.getBitSize(), 2); + a = 4; + CYBOZU_TEST_EQUAL(a.getBitSize(), 3); + } + + { + Vint a = 5; + const size_t msbindex = a.getBitSize(); + const size_t width = 100; + const size_t time = 3; + for (size_t i = 0; i < time; ++i) { + a <<= width; + CYBOZU_TEST_EQUAL(a.getBitSize(), msbindex + width*(i + 1)); + } + + for (size_t i = 0; i < time*2; ++i) { + a >>= width/2; + CYBOZU_TEST_EQUAL(a.getBitSize(), msbindex + width*time - (width/2)*(i + 1)); + } + a >>= width; + CYBOZU_TEST_ASSERT(a.isZero()); + CYBOZU_TEST_EQUAL(a.getBitSize(), 1); + } + + { + Vint b("12"), c("345"), d("67890"); + size_t bl = b.getBitSize(), cl = c.getBitSize(), dl = d.getBitSize(); + CYBOZU_TEST_ASSERT((b*c).getBitSize() <= bl + cl); + CYBOZU_TEST_ASSERT((c*d).getBitSize() <= cl + dl); + CYBOZU_TEST_ASSERT((b*c*d).getBitSize() <= bl + cl + dl); + } +} + +CYBOZU_TEST_AUTO(bit) +{ + Vint a; + a.setStr("0x1234567890abcdef"); + bool tvec[] = { + 1,1,1,1,0 ,1,1,1,1,0 + ,1,1,0,0,1 ,1,1,1,0,1 + ,0,1,0,1,0 ,0,0,0,1,0 + ,0,1,0,0,0 ,1,1,1,1,0 + ,0,1,1,0,1 ,0,1,0,0,0 + ,1,0,1,1,0 ,0,0,1,0,0 + ,1 + }; + CYBOZU_TEST_EQUAL(a.getBitSize(), sizeof(tvec)/sizeof(*tvec)); + for (int i = (int)a.getBitSize() - 1; i >= 0; --i) { + CYBOZU_TEST_EQUAL(a.testBit(i), tvec[i]); + } +} + +CYBOZU_TEST_AUTO(sample) +{ + using namespace mcl; + Vint x(1); + Vint y("123456789"); + Vint z; + + x = 1; // set by int + y.setStr("123456789"); // set by decimal + z.setStr("0xffffffff"); // set by hex + x += z; + + x = 2; + y = 250; + Vint::pow(x, x, y); + Vint r, q; + r = x % y; + q = x / y; + CYBOZU_TEST_EQUAL(q * y + r, x); + + Vint::quotRem(&q, r, x, y); // get both r and q + CYBOZU_TEST_EQUAL(q * y + r, x); +} + +CYBOZU_TEST_AUTO(Vint) +{ + const struct { + int a; + int b; + /* + q, r ; like C + q2, r2 ; like Python + */ + int add, sub, mul, q, r, q2, r2; + } tbl[] = { + { 13, 5, 18, 8, 65, 2, 3, 2, 3 }, + { 13, -5, 8, 18, -65, -2, 3, -3, -2 }, + { -13, 5, -8, -18, -65, -2, -3, -3, 2 }, + { -13, -5, -18, -8, 65, 2, -3, 2, -3 }, + { 5, 13, 18, -8, 65, 0, 5 , 0, 5}, + { 5, -13, -8, 18, -65, 0, 5 , -1, -8}, + { -5, 13, 8, -18, -65, 0, -5 , -1, 8}, + { -5, -13, -18, 8, 65, 0, -5 , 0, -5}, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint a = tbl[i].a; + Vint b = tbl[i].b; + Vint add = a + b; + Vint sub = a - b; + Vint mul = a * b; + Vint q = a / b; + Vint r = a % b; + Vint q2, r2; + Vint::quotRem(&q2, r2, a, b); + CYBOZU_TEST_EQUAL(add, tbl[i].add); + CYBOZU_TEST_EQUAL(sub, tbl[i].sub); + CYBOZU_TEST_EQUAL(mul, tbl[i].mul); + CYBOZU_TEST_EQUAL(q, tbl[i].q); + CYBOZU_TEST_EQUAL(r, tbl[i].r); + CYBOZU_TEST_EQUAL(q * b + r, a); + CYBOZU_TEST_EQUAL(q2, tbl[i].q2); + CYBOZU_TEST_EQUAL(r2, tbl[i].r2); + CYBOZU_TEST_EQUAL(q2 * b + r2, a); + } + CYBOZU_TEST_EQUAL(Vint("15") / Vint("3"), Vint("5")); + CYBOZU_TEST_EQUAL(Vint("15") / Vint("-3"), Vint("-5")); + CYBOZU_TEST_EQUAL(Vint("-15") / Vint("3"), Vint("-5")); + CYBOZU_TEST_EQUAL(Vint("-15") / Vint("-3"), Vint("5")); + + CYBOZU_TEST_EQUAL(Vint("15") % Vint("3"), Vint("0")); + CYBOZU_TEST_EQUAL(Vint("15") % Vint("-3"), Vint("0")); + CYBOZU_TEST_EQUAL(Vint("-15") % Vint("3"), Vint("0")); + CYBOZU_TEST_EQUAL(Vint("-15") % Vint("-3"), Vint("0")); + + CYBOZU_TEST_EQUAL(Vint("-0") + Vint("-3"), Vint("-3")); + CYBOZU_TEST_EQUAL(Vint("-0") - Vint("-3"), Vint("3")); + CYBOZU_TEST_EQUAL(Vint("-3") + Vint("-0"), Vint("-3")); + CYBOZU_TEST_EQUAL(Vint("-3") - Vint("-0"), Vint("-3")); + + CYBOZU_TEST_EQUAL(Vint("-0") + Vint("3"), Vint("3")); + CYBOZU_TEST_EQUAL(Vint("-0") - Vint("3"), Vint("-3")); + CYBOZU_TEST_EQUAL(Vint("3") + Vint("-0"), Vint("3")); + CYBOZU_TEST_EQUAL(Vint("3") - Vint("-0"), Vint("3")); + + CYBOZU_TEST_EQUAL(Vint("0"), Vint("0")); + CYBOZU_TEST_EQUAL(Vint("0"), Vint("-0")); + CYBOZU_TEST_EQUAL(Vint("-0"), Vint("0")); + CYBOZU_TEST_EQUAL(Vint("-0"), Vint("-0")); + + CYBOZU_TEST_ASSERT(Vint("2") < Vint("3")); + CYBOZU_TEST_ASSERT(Vint("-2") < Vint("3")); + CYBOZU_TEST_ASSERT(Vint("-5") < Vint("-3")); + CYBOZU_TEST_ASSERT(Vint("-0") < Vint("1")); + CYBOZU_TEST_ASSERT(Vint("-1") < Vint("-0")); + + CYBOZU_TEST_ASSERT(Vint("5") > Vint("3")); + CYBOZU_TEST_ASSERT(Vint("5") > Vint("-3")); + CYBOZU_TEST_ASSERT(Vint("-2") > Vint("-3")); + CYBOZU_TEST_ASSERT(Vint("3") > Vint("-0")); + CYBOZU_TEST_ASSERT(Vint("-0") > Vint("-1")); + + { + const struct { + const char *str; + int s; + int shl; + int shr; + } tbl2[] = { + { "0", 1, 0, 0 }, + { "-0", 1, 0, 0 }, + { "1", 1, 2, 0 }, + { "-1", 1, -2, 0 }, + { "12345", 3, 98760, 1543 }, + { "-12345", 3, -98760, 0 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl2); i++) { + Vint a = Vint(tbl2[i].str); + Vint shl = a << tbl2[i].s; + CYBOZU_TEST_EQUAL(shl, tbl2[i].shl); + if (!a.isNegative()) { + Vint shr = a >> tbl2[i].s; + CYBOZU_TEST_EQUAL(shr, tbl2[i].shr); + } + } + } +} + +CYBOZU_TEST_AUTO(add2) +{ + Vint x, y, z, w; + x.setStr("2416089439321382744001761632872637936198961520379024187947524965775137204955564426500438089001375107581766516460437532995850581062940399321788596606850"); + y.setStr("2416089439321382743300544243711595219403446085161565705825288050160594425031420687263897209379984490503106207071010949258995096347962762372787916800000"); + z.setStr("701217389161042716795515435217458482122236915614542779924143739236540879621390617078660309389426583736855484714977636949000679806850"); + Vint::sub(w, x, y); + CYBOZU_TEST_EQUAL(w, z); + + Vint a, c, d; + + a.setStr("-2416089439321382744001761632872637936198961520379024187947524965775137204955564426500438089001375107581766516460437532995850581062940399321788596606850"); + c.setStr("2416089439321382743300544243711595219403446085161565705825288050160594425031420687263897209379984490503106207071010949258995096347962762372787916800000"); + a = a + c; + + d.setStr("-701217389161042716795515435217458482122236915614542779924143739236540879621390617078660309389426583736855484714977636949000679806850"); + CYBOZU_TEST_EQUAL(a, d); +} + +CYBOZU_TEST_AUTO(stream) +{ + { + Vint x, y, z, w; + x.setStr("12345678901232342424242423423429922"); + y.setStr("23423423452424242343"); + std::ostringstream oss; + oss << x << ' ' << y; + std::istringstream iss(oss.str()); + iss >> z >> w; + CYBOZU_TEST_EQUAL(x, z); + CYBOZU_TEST_EQUAL(y, w); + } + { + Vint x, y, z, w; + x.setStr("0x100"); + y.setStr("123"); + std::ostringstream oss; + oss << x << ' ' << y; + std::istringstream iss(oss.str()); + iss >> z >> w; + CYBOZU_TEST_EQUAL(x, z); + CYBOZU_TEST_EQUAL(y, w); + } + { + Vint x, y, z, w; + x.setStr("12345678901232342424242423423429922"); + y.setStr("-23423423452424242343"); + std::ostringstream oss; + oss << x << ' ' << y; + std::istringstream iss(oss.str()); + iss >> z >> w; + CYBOZU_TEST_EQUAL(x, z); + CYBOZU_TEST_EQUAL(y, w); + } +} + +CYBOZU_TEST_AUTO(inc_dec) +{ + Vint x = 3; + CYBOZU_TEST_EQUAL(x++, 3); + CYBOZU_TEST_EQUAL(x, 4); + CYBOZU_TEST_EQUAL(++x, 5); + CYBOZU_TEST_EQUAL(x, 5); + + CYBOZU_TEST_EQUAL(x--, 5); + CYBOZU_TEST_EQUAL(x, 4); + CYBOZU_TEST_EQUAL(--x, 3); + CYBOZU_TEST_EQUAL(x, 3); +} + +CYBOZU_TEST_AUTO(withInt) +{ + Vint x = 15; + x += 3; + CYBOZU_TEST_EQUAL(x, 18); + x -= 2; + CYBOZU_TEST_EQUAL(x, 16); + x *= 2; + CYBOZU_TEST_EQUAL(x, 32); + x /= 3; + CYBOZU_TEST_EQUAL(x, 10); + x = -x; + CYBOZU_TEST_EQUAL(x, -10); + x += 1; + CYBOZU_TEST_EQUAL(x, -9); + x -= 2; + CYBOZU_TEST_EQUAL(x, -11); + x *= 2; + CYBOZU_TEST_EQUAL(x, -22); + x /= 5; + CYBOZU_TEST_EQUAL(x, -4); + x = -22; + x %= 5; + CYBOZU_TEST_EQUAL(x, -2); + + x = 3; + x += -2; + CYBOZU_TEST_EQUAL(x, 1); + x += -5; + CYBOZU_TEST_EQUAL(x, -4); + x -= -7; + CYBOZU_TEST_EQUAL(x, 3); + x *= -1; + CYBOZU_TEST_EQUAL(x, -3); + x /= -1; + CYBOZU_TEST_EQUAL(x, 3); + + x++; + CYBOZU_TEST_EQUAL(x, 4); + x--; + CYBOZU_TEST_EQUAL(x, 3); + x = -3; + x++; + CYBOZU_TEST_EQUAL(x, -2); + x--; + CYBOZU_TEST_EQUAL(x, -3); + + ++x; + CYBOZU_TEST_EQUAL(x, -2); + --x; + CYBOZU_TEST_EQUAL(x, -3); + x = 3; + ++x; + CYBOZU_TEST_EQUAL(x, 4); + --x; + CYBOZU_TEST_EQUAL(x, 3); +} + +CYBOZU_TEST_AUTO(addu1) +{ + Vint x = 4; + Vint::addu1(x, x, 2); + CYBOZU_TEST_EQUAL(x, 6); + Vint::subu1(x, x, 2); + CYBOZU_TEST_EQUAL(x, 4); + Vint::subu1(x, x, 10); + CYBOZU_TEST_EQUAL(x, -6); + x = -4; + Vint::addu1(x, x, 2); + CYBOZU_TEST_EQUAL(x, -2); + Vint::subu1(x, x, 2); + CYBOZU_TEST_EQUAL(x, -4); + Vint::addu1(x, x, 10); + CYBOZU_TEST_EQUAL(x, 6); + + x.setStr("0x10000000000000000000000002"); + Vint::subu1(x, x, 3); + CYBOZU_TEST_EQUAL(x, Vint("0xfffffffffffffffffffffffff")); + x.setStr("-0x10000000000000000000000000"); + Vint::addu1(x, x, 5); + CYBOZU_TEST_EQUAL(x, Vint("-0xffffffffffffffffffffffffb")); +} + +CYBOZU_TEST_AUTO(pow) +{ + Vint x = 2; + Vint y; + Vint::pow(y, x, 3); + CYBOZU_TEST_EQUAL(y, 8); + x = -2; + Vint::pow(y, x, 3); + CYBOZU_TEST_EQUAL(y, -8); +#ifndef MCL_AVOID_EXCEPTION_TEST +// CYBOZU_TEST_EXCEPTION(Vint::pow(y, x, -2), cybozu::Exception); +#endif +} + +CYBOZU_TEST_AUTO(powMod) +{ + Vint x = 7; + Vint m = 65537; + Vint y; + Vint::powMod(y, x, 20, m); + CYBOZU_TEST_EQUAL(y, 55277); + Vint::powMod(y, x, m - 1, m); + CYBOZU_TEST_EQUAL(y, 1); +} + +CYBOZU_TEST_AUTO(andOr) +{ + Vint x("1223480928420984209849242"); + Vint y("29348220482094820948208420984209482048204289482"); + Vint z; + z = x & y; + CYBOZU_TEST_EQUAL(z, Vint("1209221003550923564822922")); + z = x | y; + CYBOZU_TEST_EQUAL(z, Vint("29348220482094820948208435244134352108849315802")); +#ifndef MCL_AVOID_EXCEPTION_TEST +// CYBOZU_TEST_EXCEPTION(Vint("-2") | Vint("5"), cybozu::Exception); +// CYBOZU_TEST_EXCEPTION(Vint("-2") & Vint("5"), cybozu::Exception); +#endif + x = 8; + x |= 7; + CYBOZU_TEST_EQUAL(x, 15); + x = 65536; + y = 8; + y &= x; + CYBOZU_TEST_EQUAL(y, 0); +} + +CYBOZU_TEST_AUTO(invMod) +{ + Vint m("100000000000000000039"); + for (int i = 1; i < 100; i++) { + Vint x = i; + Vint y; + Vint::invMod(y, x, m); + CYBOZU_TEST_EQUAL((y * x) % m, 1); + } +} + +CYBOZU_TEST_AUTO(isPrime) +{ + int primeTbl[] = { + 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, + 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, + 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,197, 199, 211, + 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, + 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, + 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, + 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, + 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, + 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, + 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, + 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, + 941, 947, 953, 967, 971, 977, 983, 991, 997 + }; + typedef std::set IntSet; + IntSet primes; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(primeTbl); i++) { + primes.insert(primeTbl[i]); + } + for (int i = 0; i < 1000; i++) { + bool ok = primes.find(i) != primes.end(); + bool my = Vint(i).isPrime(); + CYBOZU_TEST_EQUAL(ok, my); + } + const struct { + const char *n; + bool isPrime; + } tbl[] = { + { "65537", true }, + { "449065", false }, + { "488881", false }, + { "512461", false }, + { "18446744073709551629", true }, + { "18446744073709551631", false }, + { "0x10000000000000000000000000000000000000007", true }, + { "0x10000000000000000000000000000000000000009", false }, + { "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true }, + { "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", false }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + Vint x(tbl[i].n); + CYBOZU_TEST_EQUAL(x.isPrime(), tbl[i].isPrime); + } +} + +CYBOZU_TEST_AUTO(gcd) +{ + Vint x = 12; + Vint y = 18; + Vint z; + Vint::gcd(z, x, y); + CYBOZU_TEST_EQUAL(z, 6); + Vint::lcm(z, x, y); + CYBOZU_TEST_EQUAL(z, 36); + Vint::lcm(x, x, y); + CYBOZU_TEST_EQUAL(x, 36); + Vint::lcm(x, x, x); + CYBOZU_TEST_EQUAL(x, 36); +} + +CYBOZU_TEST_AUTO(jacobi) +{ + const struct { + const char *m; + const char *n; + int ok; + } tbl[] = { + { "0", "1", 1 }, + { "1", "1", 1 }, + { "123", "1", 1 }, + { "45", "77", -1 }, + { "60", "121", 1 }, + { "12345672342342342342428", "923423423424753211", 1 }, + { "12345672342342342342428","34592342234235424753211", -1 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + int my = Vint::jacobi(Vint(tbl[i].m), Vint(tbl[i].n)); + CYBOZU_TEST_EQUAL(my, tbl[i].ok); + } +} + +CYBOZU_TEST_AUTO(bench) +{ + Vint x, y, z; + x.setStr("0x2523648240000001ba344d80000000086121000000000013a700000000000013"); + y.setStr("0x1802938109810498104982094820498203942804928049284092424902424243"); + + int N = 100000; + CYBOZU_BENCH_C("add", N, Vint::add, z, x, y); + CYBOZU_BENCH_C("sub", N, Vint::sub, z, x, y); + CYBOZU_BENCH_C("mul", N, Vint::mul, z, x, y); + CYBOZU_BENCH_C("div", N, Vint::div, y, z, x); + + const struct { + const char *x; + const char *y; + } tbl[] = { + { + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d" + }, + { + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + }, + { + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + }, + + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + x.setStr(tbl[i].x); + y.setStr(tbl[i].y); + CYBOZU_BENCH_C("fast div", N, Vint::div, z, x, y); +#ifndef DONT_USE_GMP_IN_TEST + { + mpz_class mx(tbl[i].x), my(tbl[i].y), mz; + CYBOZU_BENCH_C("gmp", N, mpz_div, mz.get_mpz_t(), mx.get_mpz_t(), my.get_mpz_t()); + } +#endif + } +} + +struct Seq { + const uint32_t *tbl; + size_t n; + size_t i, j; + Seq(const uint32_t *tbl, size_t n) : tbl(tbl), n(n), i(0), j(0) {} + bool next(uint64_t *v) + { + if (i == n) { + if (j == n - 1) return false; + i = 0; + j++; + } + *v = (uint64_t(tbl[j]) << 32) | tbl[i]; + i++; + return true; + } +}; + +#if MCL_SIZEOF_UNIT == 8 +CYBOZU_TEST_AUTO(divUnit) +{ + const uint32_t tbl[] = { + 0, 1, 3, + 0x7fffffff, + 0x80000000, + 0x80000001, + 0xffffffff, + }; + const size_t n = sizeof(tbl) / sizeof(tbl[0]); + Seq seq3(tbl, n); + uint64_t y; + while (seq3.next(&y)) { + if (y == 0) continue; + Seq seq2(tbl, n); + uint64_t r; + while (seq2.next(&r)) { + if (r >= y) break; + Seq seq1(tbl, n); + uint64_t q; + while (seq1.next(&q)) { + uint64_t x[2]; + x[0] = mcl::vint::mulUnit(&x[1], q, y); + mcl::vint::addu1(x, x, 2, r); + uint64_t Q, R; +//printf("q=0x%016llxull, r=0x%016llxull, y=0x%016llxull\n", (long long)q, (long long)r, (long long)y); + Q = mcl::vint::divUnit(&R, x[1], x[0], y); + CYBOZU_TEST_EQUAL(q, Q); + CYBOZU_TEST_EQUAL(r, R); + } + } + } +} + +void compareMod(const uint64_t *x, const uint64_t *p) +{ + uint64_t y1[4] = {}; + uint64_t y2[4] = {}; + mcl::vint::divNM((uint64_t*)0, 0, y1, x, 8, p, 4); + mcl::vint::mcl_fpDbl_mod_SECP256K1(y2, x, p); + CYBOZU_TEST_EQUAL_ARRAY(y1, y2, 4); +} +CYBOZU_TEST_AUTO(SECP256k1) +{ + const uint64_t F = uint64_t(-1); + const uint64_t p[4] = { uint64_t(0xfffffffefffffc2full), F, F, F }; + const uint64_t tbl[][8] = { + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { F, F, F, F, F, F, F, F }, + { F, F, F, F, 1, 0, 0, 0 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + const uint64_t *x = tbl[i]; + compareMod(x, p); + } + cybozu::XorShift rg; + for (size_t i = 0; i < 100; i++) { + uint64_t x[8]; + for (int j = 0; j < 8; j++) { + x[j] = rg(); + } + compareMod(x, p); + } +} +#endif diff --git a/vendor/github.com/byzantine-lab/mcl/test/window_method_test.cpp b/vendor/github.com/byzantine-lab/mcl/test/window_method_test.cpp new file mode 100644 index 000000000..1b0f702af --- /dev/null +++ b/vendor/github.com/byzantine-lab/mcl/test/window_method_test.cpp @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include + +CYBOZU_TEST_AUTO(ArrayIterator) +{ + const uint32_t in[2] = { 0x12345678, 0xabcdef89 }; + const size_t bitSize = 64; + for (size_t w = 1; w <= 32; w++) { + const uint32_t mask = uint32_t((uint64_t(1) << w) - 1); + mpz_class x; + mcl::gmp::setArray(x, in, 2); + mcl::fp::ArrayIterator ai(in, bitSize, w); + size_t n = (bitSize + w - 1) / w; + for (size_t j = 0; j < n; j++) { + CYBOZU_TEST_ASSERT(ai.hasNext()); + uint32_t v = ai.getNext(); + CYBOZU_TEST_EQUAL(x & mask, v); + x >>= w; + } + CYBOZU_TEST_ASSERT(!ai.hasNext()); + } +} + +CYBOZU_TEST_AUTO(int) +{ + typedef mcl::FpT<> Fp; + typedef mcl::EcT Ec; + const struct mcl::EcParam& para = mcl::ecparam::secp192k1; + Fp::init(para.p); + Ec::init(para.a, para.b); + const Fp x(para.gx); + const Fp y(para.gy); + const Ec P(x, y); + + typedef mcl::fp::WindowMethod PW; + const size_t bitSize = 13; + Ec Q, R; + + for (size_t winSize = 10; winSize <= bitSize; winSize++) { + PW pw(P, bitSize, winSize); + for (int i = 0; i < (1 << bitSize); i++) { + pw.mul(Q, i); + Ec::mul(R, P, i); + CYBOZU_TEST_EQUAL(Q, R); + } + } + PW pw(P, para.bitSize, 10); + pw.mul(Q, -12345); + Ec::mul(R, P, -12345); + CYBOZU_TEST_EQUAL(Q, R); + mpz_class t(para.gx); + pw.mul(Q, t); + Ec::mul(R, P, t); + CYBOZU_TEST_EQUAL(Q, R); + t = -t; + pw.mul(Q, t); + Ec::mul(R, P, t); + CYBOZU_TEST_EQUAL(Q, R); + + pw.mul(Q, x); + Ec::mul(R, P, x); + CYBOZU_TEST_EQUAL(Q, R); + + pw.mul(Q, y); + Ec::mul(R, P, y); + CYBOZU_TEST_EQUAL(Q, R); +} diff --git a/vendor/github.com/dexon-foundation/bls/.gitignore b/vendor/github.com/dexon-foundation/bls/.gitignore deleted file mode 100644 index dacdfc906..000000000 --- a/vendor/github.com/dexon-foundation/bls/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -CVS -bin/*.exe -lib/*.a -lib/*.so -lib/*.dylib -obj/*.d -obj/*.o diff --git a/vendor/github.com/dexon-foundation/bls/.travis.yml b/vendor/github.com/dexon-foundation/bls/.travis.yml deleted file mode 100644 index 71a667a2e..000000000 --- a/vendor/github.com/dexon-foundation/bls/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -sudo: true -dist: xenial -services: -- docker -env: - global: - - IMAGE_TAG=dexonfoundation/bls-go-alpine - - DOCKER_USER=spiderpowadeploy - - secure: mqNCngWukyjE3UARxaPjqS0xgC1dsnWfmPhpH2mq7nR6S2cGfJ3xBfyiTS//Clz//7sAL+Tp62r3fxyMjDogrSHZUUssCAwf17RM6vnALqaVbc3wXcTNudiDB5cVKe9C9gZqn1Ivd+qbmtuCezSrOG5Xih1gh4bPTyiFvU1sp9C2icMHkJZkjsP0QqCbHlQrMeECSIPlEGIOXUUSp+WmrZAdi2rHezKeZxuaT73RX1+N/+1RfWXo2MR4ydQU3eALl5s5UA9JweQO+MYIVr8EEpGNqJRYUyURx/5G/Sy2v6Z3imUvXZv1J5aplW/UDls92Olla1JHuvFW6ptRO+PHITNwvEkhxPFj+HcOpqEuSISsdk9rkHUrM0wEYPv6A4vQPUjMHrLQs2tQShVCelM1HtNvDDjttKMmVyRLusFP9eS7uvmmXu2l6efJjsMSFkY5WKbu2U0MQ1j708KH9k2WunU6sjJ+b74PkkZVtkQMIqgTokC0IOqbbrnwh4I9PpVpHAQrewRimMH+lDHk+HlMUCWk7/IcIFUl+mh6RzW2vkZTTr2ctSBI6QzK5smdPmqQpp2lqkGv/hQCBp5ICzFSkU6Djqe3hG8ta3+/Zhi10fPU2HcHDi+gR79CG8dvy+iOeTS2csXZx+YoN2BVkfu9AnrjZ9Kjkf9BMay4CehBUWE= -language: cpp -compiler: -- gcc -- clang -addons: - apt: - packages: - - libgmp-dev -install: -- git clone --depth 1 https://github.com/dexon-foundation/mcl.git $TRAVIS_BUILD_DIR/../mcl -script: -- make -j3 -- make test_ci DISABLE_THREAD_TEST=1 -- make test_go -- env LD_LIBRARY_PATH=../mcl/lib bin/bls_c384_test.exe -- make clean && make -C ../mcl clean -- make -j3 MCL_USE_OPENSSL=0 -- make test_ci DISABLE_THREAD_TEST=1 MCL_USE_OPENSSL=0 -- docker build --tag "$IMAGE_TAG" . -f images/bls-go-alpine/Dockerfile --no-cache -before_deploy: -- echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin -- git_commit="$(git rev-parse --short HEAD)" -- docker tag "$IMAGE_TAG" "${IMAGE_TAG}:${git_commit}" -- docker tag "$IMAGE_TAG" "${IMAGE_TAG}:latest" -deploy: - provider: script - script: docker push "${IMAGE_TAG}:latest" && docker push "${IMAGE_TAG}:${git_commit}" - on: - branch: dev - condition: "$CC = gcc" diff --git a/vendor/github.com/dexon-foundation/bls/CMakeLists.txt b/vendor/github.com/dexon-foundation/bls/CMakeLists.txt deleted file mode 100644 index 30fb90fd5..000000000 --- a/vendor/github.com/dexon-foundation/bls/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -cmake_minimum_required (VERSION 2.6) -project(bls CXX ASM) - -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) - -set(LIBS mcl gmp) - -include_directories(include/) - -add_library(bls_c256 SHARED src/bls_c256.cpp) -add_library(bls_c384 SHARED src/bls_c384.cpp) -add_library(bls_c384_256 SHARED src/bls_c384_256.cpp) -target_link_libraries(bls_c256 ${LIBS}) -target_link_libraries(bls_c384 ${LIBS}) -target_link_libraries(bls_c384_256 ${LIBS}) - -file(GLOB BLS_HEADERS include/bls/bls.h include/bls/bls.hpp) - -install(TARGETS bls_c256 DESTINATION lib) -install(TARGETS bls_c384 DESTINATION lib) -install(TARGETS bls_c384_256 DESTINATION lib) -install(FILES ${BLS_HEADERS} DESTINATION include/bls) - -set(TEST_LIBS pthread gmpxx) - -add_executable(bls_c256_test test/bls_c256_test.cpp) -target_link_libraries(bls_c256_test bls_c256 ${TEST_LIBS}) -add_executable(bls_c384_test test/bls_c384_test.cpp) -target_link_libraries(bls_c384_test bls_c384 ${TEST_LIBS}) -add_executable(bls_c384_256_test test/bls_c384_256_test.cpp) -target_link_libraries(bls_c384_256_test bls_c384_256 ${TEST_LIBS}) diff --git a/vendor/github.com/dexon-foundation/bls/Makefile b/vendor/github.com/dexon-foundation/bls/Makefile deleted file mode 100644 index efea22274..000000000 --- a/vendor/github.com/dexon-foundation/bls/Makefile +++ /dev/null @@ -1,164 +0,0 @@ -ifeq ($(findstring MINGW64,$(shell uname -s)),MINGW64) - # cgo accepts not '/c/path' but 'c:/path' - PWD=$(shell pwd|sed s'@^/\([a-z]\)@\1:@') -else - PWD=$(shell pwd) -endif -MCL_DIR?=$(PWD)/../mcl -include $(MCL_DIR)/common.mk -LIB_DIR=lib -OBJ_DIR=obj -EXE_DIR=bin -CFLAGS += -std=c++11 -LDFLAGS += -lpthread - -SRC_SRC=bls_c256.cpp bls_c384.cpp bls_c384_256.cpp -TEST_SRC=bls256_test.cpp bls384_test.cpp bls384_256_test.cpp bls_c256_test.cpp bls_c384_test.cpp bls_c384_256_test.cpp -SAMPLE_SRC=bls256_smpl.cpp bls384_smpl.cpp - -CFLAGS+=-I$(MCL_DIR)/include -ifneq ($(MCL_MAX_BIT_SIZE),) - CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) -endif -ifeq ($(DISABLE_THREAD_TEST),1) - CFLAGS+=-DDISABLE_THREAD_TEST -endif -ifeq ($(BLS_SWAP_G),1) - CFLAGS+=-DBLS_SWAP_G -endif - -BLS256_LIB=$(LIB_DIR)/libbls256.a -BLS384_LIB=$(LIB_DIR)/libbls384.a -BLS384_256_LIB=$(LIB_DIR)/libbls384_256.a -BLS256_SNAME=bls256 -BLS384_SNAME=bls384 -BLS384_256_SNAME=bls384_256 -BLS256_SLIB=$(LIB_DIR)/lib$(BLS256_SNAME).$(LIB_SUF) -BLS384_SLIB=$(LIB_DIR)/lib$(BLS384_SNAME).$(LIB_SUF) -BLS384_256_SLIB=$(LIB_DIR)/lib$(BLS384_256_SNAME).$(LIB_SUF) -all: $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) $(BLS384_256_LIB) $(BLS384_256_SLIB) - -MCL_LIB=$(MCL_DIR)/lib/libmcl.a - -$(MCL_LIB): - $(MAKE) -C $(MCL_DIR) - -$(BLS256_LIB): $(OBJ_DIR)/bls_c256.o $(MCL_LIB) - $(AR) $@ $< -$(BLS384_LIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) - $(AR) $@ $< -$(BLS384_256_LIB): $(OBJ_DIR)/bls_c384_256.o $(MCL_LIB) - $(AR) $@ $< - -ifneq ($(findstring $(OS),mac/mingw64),) - COMMON_LIB=$(GMP_LIB) $(OPENSSL_LIB) -lstdc++ - BLS256_SLIB_LDFLAGS+=$(COMMON_LIB) - BLS384_SLIB_LDFLAGS+=$(COMMON_LIB) - BLS384_256_SLIB_LDFLAGS+=$(COMMON_LIB) -endif -ifeq ($(OS),mingw64) - CFLAGS+=-I$(MCL_DIR) - BLS256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS256_SNAME).a - BLS384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS384_SNAME).a - BLS384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS384_256_SNAME).a -endif -$(BLS256_SLIB): $(OBJ_DIR)/bls_c256.o $(MCL_LIB) - $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS256_SLIB_LDFLAGS) $(LDFLAGS) -$(BLS384_SLIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) - $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS384_SLIB_LDFLAGS) $(LDFLAGS) -$(BLS384_256_SLIB): $(OBJ_DIR)/bls_c384_256.o $(MCL_LIB) - $(PRE)$(CXX) -shared -o $@ $< -L$(MCL_DIR)/lib -lmcl $(BLS384_256_SLIB_LDFLAGS) $(LDFLAGS) - -VPATH=test sample src - -.SUFFIXES: .cpp .d .exe - -$(OBJ_DIR)/%.o: %.cpp - $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) - -$(EXE_DIR)/%384_256_test.exe: $(OBJ_DIR)/%384_256_test.o $(BLS384_256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BLS384_256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) - -$(EXE_DIR)/%384_test.exe: $(OBJ_DIR)/%384_test.o $(BLS384_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BLS384_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) - -$(EXE_DIR)/%256_test.exe: $(OBJ_DIR)/%256_test.o $(BLS256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) - -# sample exe links libbls256.a -$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(BLS256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -L$(MCL_DIR)/lib -lmcl $(LDFLAGS) -ifeq ($(OS),mac) - install_name_tool bin/bls_smpl.exe -change lib/libmcl.dylib $(MCL_DIR)/lib/libmcl.dylib -endif - -SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(SAMPLE_SRC:.cpp=.exe)) -sample: $(SAMPLE_EXE) - -TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) -ifeq ($(OS),mac) - LIBPATH_KEY=DYLD_LIBRARY_PATH -else - LIBPATH_KEY=LD_LIBRARY_PATH -endif -test_ci: $(TEST_EXE) - @sh -ec 'for i in $(TEST_EXE); do echo $$i; env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib LSAN_OPTIONS=verbosity=1 log_threads=1 $$i; done' - $(MAKE) sample_test - -test: $(TEST_EXE) - @echo test $(TEST_EXE) - @sh -ec 'for i in $(TEST_EXE); do env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib $$i|grep "ctest:name"; done' > result.txt - @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi - $(MAKE) sample_test - -sample_test: $(EXE_DIR)/bls_smpl.exe - env PATH=$$PATH:../mcl/lib $(LIBPATH_KEY)=../mcl/lib python bls_smpl.py - -# PATH is for mingw, LD_LIBRARY_PATH is for linux, DYLD_LIBRARY_PATH is for mac -COMMON_LIB_PATH="../../../lib:../../../../mcl/lib" -PATH_VAL=$$PATH:$(COMMON_LIB_PATH) LD_LIBRARY_PATH=$(COMMON_LIB_PATH) DYLD_LIBRARY_PATH=$(COMMON_LIB_PATH) -test_go256: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS256_LIB) - cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn256 . -test_go384: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS384_LIB) - cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn384 . -test_go384_256: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS384_256_LIB) - cd ffi/go/bls && env PATH=$(PATH_VAL) go test -tags bn384_256 . - -test_go: - $(MAKE) test_go256 - $(MAKE) test_go384 - $(MAKE) test_go384_256 - -EMCC_OPT=-I./include -I./src -I../mcl/include -I./ -Wall -Wextra -EMCC_OPT+=-O3 -DNDEBUG -EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 -EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION -EMCC_OPT+=-s ABORTING_MALLOC=0 -EMCC_OPT+=-DMCLBN_FP_UNIT_SIZE=6 -JS_DEP=src/bls_c384.cpp ../mcl/src/fp.cpp Makefile - -../bls-wasm/bls_c.js: $(JS_DEP) - emcc -o $@ src/bls_c384.cpp ../mcl/src/fp.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -fno-exceptions -MD -MP -MF obj/bls_c384.d - -bls-wasm: - $(MAKE) ../bls-wasm/bls_c.js - -clean: - $(RM) $(OBJ_DIR)/*.d $(OBJ_DIR)/*.o $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_SRC) $(ASM_OBJ) $(LLVM_SRC) $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) $(BLS384_256_LIB) $(BLS384_256_SLIB) - -ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) -DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.d)) --include $(DEPEND_FILE) - -PREFIX?=/usr/local -install: lib/libbls256.a lib/libbls256.$(LIB_SUF) lib/libbls384.a lib/libbls384.$(LIB_SUF) lib/libbls384_256.a lib/libbls384_256.$(LIB_SUF) - $(MKDIR) $(PREFIX)/include/bls - cp -a include/bls/ $(PREFIX)/include/ - $(MKDIR) $(PREFIX)/lib - cp -a lib/libbls256.a lib/libbls256.$(LIB_SUF) lib/libbls384.a lib/libbls384.$(LIB_SUF) lib/libbls384_256.a lib/libbls384_256.$(LIB_SUF) $(PREFIX)/lib/ - -.PHONY: test bls-wasm - -# don't remove these files automatically -.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) - diff --git a/vendor/github.com/dexon-foundation/bls/bin/.emptydir b/vendor/github.com/dexon-foundation/bls/bin/.emptydir deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/bls/bls.sln b/vendor/github.com/dexon-foundation/bls/bls.sln deleted file mode 100644 index 4889ec601..000000000 --- a/vendor/github.com/dexon-foundation/bls/bls.sln +++ /dev/null @@ -1,30 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.40629.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls_test", "test\proj\bls_test\bls_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" - ProjectSection(ProjectDependencies) = postProject - {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls", "src\proj\bls.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/vendor/github.com/dexon-foundation/bls/bls_smpl.py b/vendor/github.com/dexon-foundation/bls/bls_smpl.py deleted file mode 100644 index f834d80aa..000000000 --- a/vendor/github.com/dexon-foundation/bls/bls_smpl.py +++ /dev/null @@ -1,40 +0,0 @@ -import os, sys, subprocess - -EXE='bin/bls_smpl.exe' - -def init(): - subprocess.check_call([EXE, "init"]) - -def sign(m, i=0): - subprocess.check_call([EXE, "sign", "-m", m, "-id", str(i)]) - -def verify(m, i=0): - subprocess.check_call([EXE, "verify", "-m", m, "-id", str(i)]) - -def share(n, k): - subprocess.check_call([EXE, "share", "-n", str(n), "-k", str(k)]) - -def recover(ids): - cmd = [EXE, "recover", "-ids"] - for i in ids: - cmd.append(str(i)) - subprocess.check_call(cmd) - -def main(): - m = "hello bls threshold signature" - n = 10 - ids = [1, 5, 3, 7] - k = len(ids) - init() - sign(m) - verify(m) - share(n, k) - for i in ids: - sign(m, i) - verify(m, i) - subprocess.check_call(["rm", "sample/sign.txt"]) - recover(ids) - verify(m) - -if __name__ == '__main__': - main() diff --git a/vendor/github.com/dexon-foundation/bls/common.props b/vendor/github.com/dexon-foundation/bls/common.props deleted file mode 100644 index d6fdbb902..000000000 --- a/vendor/github.com/dexon-foundation/bls/common.props +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - $(SolutionDir)bin\ - - - - $(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../mcl/include - - - - - Level4 - MultiThreaded - - - _MBCS;%(PreprocessorDefinitions);NOMINMAX;BLS_MAX_OP_UNIT_SIZE=6 - - - $(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)../mcl/lib;$(SolutionDir)lib - - - - diff --git a/vendor/github.com/dexon-foundation/bls/debug.props b/vendor/github.com/dexon-foundation/bls/debug.props deleted file mode 100644 index 1553ae0dc..000000000 --- a/vendor/github.com/dexon-foundation/bls/debug.props +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - $(ProjectName)d - - - - MultiThreadedDebug - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/App.config b/vendor/github.com/dexon-foundation/bls/ffi/cs/App.config deleted file mode 100644 index 8d234373a..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/App.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/Properties/AssemblyInfo.cs b/vendor/github.com/dexon-foundation/bls/ffi/cs/Properties/AssemblyInfo.cs deleted file mode 100644 index 201222c55..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; - -// アセンブリã«é–¢ã™ã‚‹ä¸€èˆ¬æƒ…å ±ã¯ä»¥ä¸‹ã®å±žæ€§ã‚»ãƒƒãƒˆã‚’ã¨ãŠã—ã¦åˆ¶å¾¡ã•ã‚Œã¾ã™ã€‚ -// アセンブリã«é–¢é€£ä»˜ã‘られã¦ã„る情報を変更ã™ã‚‹ã«ã¯ã€ -// ã“れらã®å±žæ€§å€¤ã‚’変更ã—ã¦ãã ã•ã„。 -[assembly: AssemblyTitle("bls256")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("bls256")] -[assembly: AssemblyCopyright("Copyright © 2017")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// ComVisible ã‚’ false ã«è¨­å®šã™ã‚‹ã¨ã€ãã®åž‹ã¯ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内㧠COM コンãƒãƒ¼ãƒãƒ³ãƒˆã‹ã‚‰ -// å‚ç…§ä¸å¯èƒ½ã«ãªã‚Šã¾ã™ã€‚COM ã‹ã‚‰ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内ã®åž‹ã«ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹å ´åˆã¯ã€ -// ãã®åž‹ã® ComVisible 属性を true ã«è¨­å®šã—ã¦ãã ã•ã„。 -[assembly: ComVisible(false)] - -// ã“ã®ãƒ—ロジェクト㌠COM ã«å…¬é–‹ã•ã‚Œã‚‹å ´åˆã€æ¬¡ã® GUID ㌠typelib ã® ID ã«ãªã‚Šã¾ã™ -[assembly: Guid("e9d06b1b-ea22-4ef4-ba4b-422f7625966c")] - -// アセンブリã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³æƒ…å ±ã¯æ¬¡ã® 4 ã¤ã®å€¤ã§æ§‹æˆã•ã‚Œã¦ã„ã¾ã™: -// -// メジャー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ -// マイナー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ -// ãƒ“ãƒ«ãƒ‰ç•ªå· -// Revision -// -// ã™ã¹ã¦ã®å€¤ã‚’指定ã™ã‚‹ã‹ã€ä¸‹ã®ã‚ˆã†ã« '*' を使ã£ã¦ãƒ“ルドãŠã‚ˆã³ãƒªãƒ“ジョン番å·ã‚’ -// 既定値ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.cs b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.cs deleted file mode 100644 index 6bcaf07fb..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.cs +++ /dev/null @@ -1,351 +0,0 @@ -þ½Ž¿using System; -using System.Text; -using System.Runtime.InteropServices; - -namespace mcl -{ - class BLS - { - public const int BN254 = 0; - public const int BLS12_381 = 5; - - const int IoEcComp = 512; // fixed byte representation - public const int FR_UNIT_SIZE = 4; - public const int FP_UNIT_SIZE = 6; // 4 if bls256.dll is used - public const int COMPILED_TIME_VAR = FR_UNIT_SIZE * 10 + FP_UNIT_SIZE; - - public const int ID_UNIT_SIZE = FR_UNIT_SIZE; - public const int SECRETKEY_UNIT_SIZE = FR_UNIT_SIZE; - public const int PUBLICKEY_UNIT_SIZE = FP_UNIT_SIZE * 3 * 2; - public const int SIGNATURE_UNIT_SIZE = FP_UNIT_SIZE * 3; - - public const int ID_SERIALIZE_SIZE = FR_UNIT_SIZE * 8; - public const int SECRETKEY_SERIALIZE_SIZE = FR_UNIT_SIZE * 8; - public const int PUBLICKEY_SERIALIZE_SIZE = FP_UNIT_SIZE * 8 * 2; - public const int SIGNATURE_SERIALIZE_SIZE = FP_UNIT_SIZE * 8; - - public const string dllName = FP_UNIT_SIZE == 4 ? "bls256.dll" : "bls384_256.dll"; - [DllImport(dllName)] - public static extern int blsInit(int curveType, int compiledTimeVar); - - [DllImport(dllName)] public static extern void blsIdSetInt(ref Id id, int x); - [DllImport(dllName)] public static extern int blsIdSetDecStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport(dllName)] public static extern int blsIdSetHexStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsIdGetDecStr([Out]StringBuilder buf, ulong maxBufSize, in Id id); - [DllImport(dllName)] public static extern ulong blsIdGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in Id id); - - [DllImport(dllName)] public static extern ulong blsIdSerialize([Out]byte[] buf, ulong maxBufSize, in Id id); - [DllImport(dllName)] public static extern ulong blsSecretKeySerialize([Out]byte[] buf, ulong maxBufSize, in SecretKey sec); - [DllImport(dllName)] public static extern ulong blsPublicKeySerialize([Out]byte[] buf, ulong maxBufSize, in PublicKey pub); - [DllImport(dllName)] public static extern ulong blsSignatureSerialize([Out]byte[] buf, ulong maxBufSize, in Signature sig); - [DllImport(dllName)] public static extern ulong blsIdDeserialize(ref Id id, [In]byte[] buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsSecretKeyDeserialize(ref SecretKey sec, [In]byte[] buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsPublicKeyDeserialize(ref PublicKey pub, [In]byte[] buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsSignatureDeserialize(ref Signature sig, [In]byte[] buf, ulong bufSize); - - [DllImport(dllName)] public static extern int blsIdIsEqual(in Id lhs, in Id rhs); - [DllImport(dllName)] public static extern int blsSecretKeyIsEqual(in SecretKey lhs, in SecretKey rhs); - [DllImport(dllName)] public static extern int blsPublicKeyIsEqual(in PublicKey lhs, in PublicKey rhs); - [DllImport(dllName)] public static extern int blsSignatureIsEqual(in Signature lhs, in Signature rhs); - // add - [DllImport(dllName)] public static extern void blsSecretKeyAdd(ref SecretKey sec, in SecretKey rhs); - [DllImport(dllName)] public static extern void blsPublicKeyAdd(ref PublicKey pub, in PublicKey rhs); - [DllImport(dllName)] public static extern void blsSignatureAdd(ref Signature sig, in Signature rhs); - // hash buf and set - [DllImport(dllName)] public static extern int blsHashToSecretKey(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - set secretKey if system has /dev/urandom or CryptGenRandom - return 0 if success else -1 - */ - [DllImport(dllName)] public static extern int blsSecretKeySetByCSPRNG(ref SecretKey sec); - - [DllImport(dllName)] public static extern void blsGetPublicKey(ref PublicKey pub, in SecretKey sec); - [DllImport(dllName)] public static extern void blsGetPop(ref Signature sig, in SecretKey sec); - - // return 0 if success - [DllImport(dllName)] public static extern int blsSecretKeyShare(ref SecretKey sec, in SecretKey msk, ulong k, in Id id); - [DllImport(dllName)] public static extern int blsPublicKeyShare(ref PublicKey pub, in PublicKey mpk, ulong k, in Id id); - - - [DllImport(dllName)] public static extern int blsSecretKeyRecover(ref SecretKey sec, in SecretKey secVec, in Id idVec, ulong n); - [DllImport(dllName)] public static extern int blsPublicKeyRecover(ref PublicKey pub, in PublicKey pubVec, in Id idVec, ulong n); - [DllImport(dllName)] public static extern int blsSignatureRecover(ref Signature sig, in Signature sigVec, in Id idVec, ulong n); - - [DllImport(dllName)] public static extern void blsSign(ref Signature sig, in SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); - - // return 1 if valid - [DllImport(dllName)] public static extern int blsVerify(in Signature sig, in PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); - [DllImport(dllName)] public static extern int blsVerifyPop(in Signature sig, in PublicKey pub); - - ////////////////////////////////////////////////////////////////////////// - // the following apis will be removed - - // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r - [DllImport(dllName)] public static extern int blsIdSetLittleEndian(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - return written byte size if success else 0 - */ - [DllImport(dllName)] public static extern ulong blsIdGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, in Id id); - - // return 0 if success - // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r - [DllImport(dllName)] public static extern int blsSecretKeySetLittleEndian(ref SecretKey sec, [In]byte[] buf, ulong bufSize); - [DllImport(dllName)] public static extern int blsSecretKeySetDecStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport(dllName)] public static extern int blsSecretKeySetHexStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - return written byte size if success else 0 - */ - [DllImport(dllName)] public static extern ulong blsSecretKeyGetLittleEndian([Out]byte[] buf, ulong maxBufSize, in SecretKey sec); - /* - return strlen(buf) if success else 0 - buf is '\0' terminated - */ - [DllImport(dllName)] public static extern ulong blsSecretKeyGetDecStr([Out]StringBuilder buf, ulong maxBufSize, in SecretKey sec); - [DllImport(dllName)] public static extern ulong blsSecretKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in SecretKey sec); - [DllImport(dllName)] public static extern int blsPublicKeySetHexStr(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsPublicKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in PublicKey pub); - [DllImport(dllName)] public static extern int blsSignatureSetHexStr(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport(dllName)] public static extern ulong blsSignatureGetHexStr([Out]StringBuilder buf, ulong maxBufSize, in Signature sig); - - public static void Init(int curveType = BN254) { - if (!System.Environment.Is64BitProcess) { - throw new PlatformNotSupportedException("not 64-bit system"); - } - int err = blsInit(curveType, COMPILED_TIME_VAR); - if (err != 0) { - throw new ArgumentException("blsInit"); - } - } - [StructLayout(LayoutKind.Sequential)] - public unsafe struct Id - { - private fixed ulong v[ID_UNIT_SIZE]; - public byte[] Serialize() { - byte[] buf = new byte[ID_SERIALIZE_SIZE]; - ulong n = blsIdSerialize(buf, (ulong)buf.Length, this); - if (n == 0) { - throw new ArithmeticException("blsIdSerialize"); - } - return buf; - } - public void Deserialize(byte[] buf) { - ulong n = blsIdDeserialize(ref this, buf, (ulong)buf.Length); - if (n == 0) { - throw new ArithmeticException("blsIdDeserialize"); - } - } - public bool IsEqual(in Id rhs) { - return blsIdIsEqual(this, rhs) != 0; - } - public void SetDecStr(string s) { - if (blsIdSetDecStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsIdSetDecSt:" + s); - } - } - public void SetHexStr(string s) { - if (blsIdSetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsIdSetHexStr:" + s); - } - } - public void SetInt(int x) { - blsIdSetInt(ref this, x); - } - public string GetDecStr() { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsIdGetDecStr(sb, (ulong)sb.Capacity, this); - if (size == 0) { - throw new ArgumentException("blsIdGetDecStr"); - } - return sb.ToString(0, (int)size); - } - public string GetHexStr() { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsIdGetHexStr(sb, (ulong)sb.Capacity, this); - if (size == 0) { - throw new ArgumentException("blsIdGetHexStr"); - } - return sb.ToString(0, (int)size); - } - } - [StructLayout(LayoutKind.Sequential)] - public unsafe struct SecretKey - { - private fixed ulong v[SECRETKEY_UNIT_SIZE]; - public byte[] Serialize() { - byte[] buf = new byte[SECRETKEY_SERIALIZE_SIZE]; - ulong n = blsSecretKeySerialize(buf, (ulong)buf.Length, this); - if (n == 0) { - throw new ArithmeticException("blsSecretKeySerialize"); - } - return buf; - } - public void Deserialize(byte[] buf) { - ulong n = blsSecretKeyDeserialize(ref this, buf, (ulong)buf.Length); - if (n == 0) { - throw new ArithmeticException("blsSecretKeyDeserialize"); - } - } - public bool IsEqual(in SecretKey rhs) { - return blsSecretKeyIsEqual(this, rhs) != 0; - } - public void SetHexStr(string s) { - if (blsSecretKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsSecretKeySetHexStr:" + s); - } - } - public string GetHexStr() { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsSecretKeyGetHexStr(sb, (ulong)sb.Capacity, this); - if (size == 0) { - throw new ArgumentException("mclBnFr_getStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(in SecretKey rhs) { - blsSecretKeyAdd(ref this, rhs); - } - public void SetByCSPRNG() { - blsSecretKeySetByCSPRNG(ref this); - } - public void SetHashOf(string s) { - if (blsHashToSecretKey(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsHashToSecretKey"); - } - } - public PublicKey GetPublicKey() { - PublicKey pub; - blsGetPublicKey(ref pub, this); - return pub; - } - public Signature Sign(string m) { - Signature sig; - blsSign(ref sig, this, m, (ulong)m.Length); - return sig; - } - public Signature GetPop() { - Signature sig; - blsGetPop(ref sig, this); - return sig; - } - } - // secretKey = sum_{i=0}^{msk.Length - 1} msk[i] * id^i - public static SecretKey ShareSecretKey(in SecretKey[] msk, in Id id) { - SecretKey sec; - if (blsSecretKeyShare(ref sec, msk[0], (ulong)msk.Length, id) != 0) { - throw new ArgumentException("GetSecretKeyForId:" + id.ToString()); - } - return sec; - } - public static SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec) { - SecretKey sec; - if (blsSecretKeyRecover(ref sec, secVec[0], idVec[0], (ulong)secVec.Length) != 0) { - throw new ArgumentException("Recover"); - } - return sec; - } - [StructLayout(LayoutKind.Sequential)] - public unsafe struct PublicKey - { - private fixed ulong v[PUBLICKEY_UNIT_SIZE]; - public byte[] Serialize() { - byte[] buf = new byte[PUBLICKEY_SERIALIZE_SIZE]; - ulong n = blsPublicKeySerialize(buf, (ulong)buf.Length, this); - if (n == 0) { - throw new ArithmeticException("blsPublicKeySerialize"); - } - return buf; - } - public void Deserialize(byte[] buf) { - ulong n = blsPublicKeyDeserialize(ref this, buf, (ulong)buf.Length); - if (n == 0) { - throw new ArithmeticException("blsPublicKeyDeserialize"); - } - } - public bool IsEqual(in PublicKey rhs) { - return blsPublicKeyIsEqual(this, rhs) != 0; - } - public void SetStr(string s) { - if (blsPublicKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsPublicKeySetStr:" + s); - } - } - public string GetHexStr() { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsPublicKeyGetHexStr(sb, (ulong)sb.Capacity, this); - if (size == 0) { - throw new ArgumentException("blsPublicKeyGetStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(in PublicKey rhs) { - blsPublicKeyAdd(ref this, rhs); - } - public bool Verify(in Signature sig, string m) { - return blsVerify(sig, this, m, (ulong)m.Length) == 1; - } - public bool VerifyPop(in Signature pop) { - return blsVerifyPop(pop, this) == 1; - } - } - // publicKey = sum_{i=0}^{mpk.Length - 1} mpk[i] * id^i - public static PublicKey SharePublicKey(in PublicKey[] mpk, in Id id) { - PublicKey pub; - if (blsPublicKeyShare(ref pub, mpk[0], (ulong)mpk.Length, id) != 0) { - throw new ArgumentException("GetPublicKeyForId:" + id.ToString()); - } - return pub; - } - public static PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec) { - PublicKey pub; - if (blsPublicKeyRecover(ref pub, pubVec[0], idVec[0], (ulong)pubVec.Length) != 0) { - throw new ArgumentException("Recover"); - } - return pub; - } - [StructLayout(LayoutKind.Sequential)] - public unsafe struct Signature - { - private fixed ulong v[SIGNATURE_UNIT_SIZE]; - public byte[] Serialize() { - byte[] buf = new byte[SIGNATURE_SERIALIZE_SIZE]; - ulong n = blsSignatureSerialize(buf, (ulong)buf.Length, this); - if (n == 0) { - throw new ArithmeticException("blsSignatureSerialize"); - } - return buf; - } - public void Deserialize(byte[] buf) { - ulong n = blsSignatureDeserialize(ref this, buf, (ulong)buf.Length); - if (n == 0) { - throw new ArithmeticException("blsSignatureDeserialize"); - } - } - public bool IsEqual(in Signature rhs) { - return blsSignatureIsEqual(this, rhs) != 0; - } - public void SetStr(string s) { - if (blsSignatureSetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsSignatureSetStr:" + s); - } - } - public string GetHexStr() { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsSignatureGetHexStr(sb, (ulong)sb.Capacity, this); - if (size == 0) { - throw new ArgumentException("blsSignatureGetStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(in Signature rhs) { - blsSignatureAdd(ref this, rhs); - } - } - public static Signature RecoverSign(in Signature[] sigVec, in Id[] idVec) { - Signature sig; - if (blsSignatureRecover(ref sig, sigVec[0], idVec[0], (ulong)sigVec.Length) != 0) { - throw new ArgumentException("Recover"); - } - return sig; - } - } -} diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.csproj b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.csproj deleted file mode 100644 index c03afa436..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.csproj +++ /dev/null @@ -1,97 +0,0 @@ - - - - - Debug - AnyCPU - {E9D06B1B-EA22-4EF4-BA4B-422F7625966D} - Exe - Properties - bls - bls - v4.6.2 - 512 - true - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - - - - true - ..\..\bin\ - DEBUG;TRACE - true - full - x64 - prompt - MinimumRecommendedRules.ruleset - 7.2 - false - - - ..\..\bin\ - TRACE - true - pdbonly - x64 - prompt - MinimumRecommendedRules.ruleset - false - true - 7.2 - - - true - - - - - - - - - - - - - - - - - - - - - - False - Microsoft .NET Framework 4.5.2 %28x86 ãŠã‚ˆã³ x64%29 - true - - - False - .NET Framework 3.5 SP1 - false - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.sln b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.sln deleted file mode 100644 index 7c3dfba7b..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls.sln +++ /dev/null @@ -1,25 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.28307.539 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "bls", "bls.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966D}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Debug|x64.ActiveCfg = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Debug|x64.Build.0 = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Release|x64.ActiveCfg = Release|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966D}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {1935C301-6478-4F82-9587-6A66B531E327} - EndGlobalSection -EndGlobal diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.cs b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.cs deleted file mode 100644 index 3ef5fab9a..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.cs +++ /dev/null @@ -1,298 +0,0 @@ -þ½Ž¿using System; -using System.Text; -using System.Runtime.InteropServices; - -namespace mcl { - class BLS256 { - const int IoEcComp = 512; // fixed byte representation - public const int MCLBN_FR_UNIT_SIZE = 4; - public const int MCLBN_FP_UNIT_SIZE = 4; - public const int MCLBN_COMPILED_TIME_VAR = MCLBN_FR_UNIT_SIZE * 10 + MCLBN_FP_UNIT_SIZE; - [DllImport("bls256.dll")] - public static extern int blsInit(int curve, int compiledTimeVar); - - [DllImport("bls256.dll")] public static extern void blsIdSetInt(ref Id id, int x); - [DllImport("bls256.dll")] public static extern int blsIdSetDecStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsIdSetHexStr(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern ulong blsIdGetDecStr([Out]StringBuilder buf, ulong maxBufSize, ref Id id); - [DllImport("bls256.dll")] public static extern ulong blsIdGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref Id id); - - - [DllImport("bls256.dll")] public static extern ulong blsIdSerialize([Out]StringBuilder buf, ulong maxBufSize, ref Id id); - [DllImport("bls256.dll")] public static extern ulong blsSecretKeySerialize([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); - [DllImport("bls256.dll")] public static extern ulong blsPublicKeySerialize([Out]StringBuilder buf, ulong maxBufSize, ref PublicKey pub); - [DllImport("bls256.dll")] public static extern ulong blsSignatureSerialize([Out]StringBuilder buf, ulong maxBufSize, ref Signature sig); - - [DllImport("bls256.dll")] public static extern int blsIdDeserialize(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsSecretKeyDeserialize(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsPublicKeyDeserialize(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsSignatureDeserialize(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - - [DllImport("bls256.dll")] public static extern int blsIdIsEqual(ref Id lhs, ref Id rhs); - [DllImport("bls256.dll")] public static extern int blsSecretKeyIsEqual(ref SecretKey lhs, ref SecretKey rhs); - [DllImport("bls256.dll")] public static extern int blsPublicKeyIsEqual(ref PublicKey lhs, ref PublicKey rhs); - [DllImport("bls256.dll")] public static extern int blsSignatureIsEqual(ref Signature lhs, ref Signature rhs); - - // add - [DllImport("bls256.dll")] public static extern void blsSecretKeyAdd(ref SecretKey sec, ref SecretKey rhs); - [DllImport("bls256.dll")] public static extern void blsPublicKeyAdd(ref PublicKey pub, ref PublicKey rhs); - [DllImport("bls256.dll")] public static extern void blsSignatureAdd(ref Signature sig, ref Signature rhs); - - // hash buf and set - [DllImport("bls256.dll")] public static extern int blsHashToSecretKey(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - set secretKey if system has /dev/urandom or CryptGenRandom - return 0 if success else -1 - */ - [DllImport("bls256.dll")] public static extern int blsSecretKeySetByCSPRNG(ref SecretKey sec); - - [DllImport("bls256.dll")] public static extern void blsGetPublicKey(ref PublicKey pub, ref SecretKey sec); - [DllImport("bls256.dll")] public static extern void blsGetPop(ref Signature sig, ref SecretKey sec); - - // return 0 if success - [DllImport("bls256.dll")] public static extern int blsSecretKeyShare(ref SecretKey sec, ref SecretKey msk, ulong k, ref Id id); - [DllImport("bls256.dll")] public static extern int blsPublicKeyShare(ref PublicKey pub, ref PublicKey mpk, ulong k, ref Id id); - - - [DllImport("bls256.dll")] public static extern int blsSecretKeyRecover(ref SecretKey sec, ref SecretKey secVec, ref Id idVec, ulong n); - [DllImport("bls256.dll")] public static extern int blsPublicKeyRecover(ref PublicKey pub, ref PublicKey pubVec, ref Id idVec, ulong n); - [DllImport("bls256.dll")] public static extern int blsSignatureRecover(ref Signature sig, ref Signature sigVec, ref Id idVec, ulong n); - - [DllImport("bls256.dll")] public static extern void blsSign(ref Signature sig, ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); - - // return 1 if valid - [DllImport("bls256.dll")] public static extern int blsVerify(ref Signature sig, ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string m, ulong size); - [DllImport("bls256.dll")] public static extern int blsVerifyPop(ref Signature sig, ref PublicKey pub); - - ////////////////////////////////////////////////////////////////////////// - // the following apis will be removed - - // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r - [DllImport("bls256.dll")] public static extern int blsIdSetLittleEndian(ref Id id, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - return written byte size if success else 0 - */ - [DllImport("bls256.dll")] public static extern ulong blsIdGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, ref Id id); - - // return 0 if success - // mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r - [DllImport("bls256.dll")] public static extern int blsSecretKeySetLittleEndian(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsSecretKeySetDecStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern int blsSecretKeySetHexStr(ref SecretKey sec, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - /* - return written byte size if success else 0 - */ - [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetLittleEndian([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); - /* - return strlen(buf) if success else 0 - buf is '\0' terminated - */ - [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetDecStr([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); - [DllImport("bls256.dll")] public static extern ulong blsSecretKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref SecretKey sec); - [DllImport("bls256.dll")] public static extern int blsPublicKeySetHexStr(ref PublicKey pub, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern ulong blsPublicKeyGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref PublicKey pub); - [DllImport("bls256.dll")] public static extern int blsSignatureSetHexStr(ref Signature sig, [In][MarshalAs(UnmanagedType.LPStr)] string buf, ulong bufSize); - [DllImport("bls256.dll")] public static extern ulong blsSignatureGetHexStr([Out]StringBuilder buf, ulong maxBufSize, ref Signature sig); - - public static void Init() - { - const int CurveFp254BNb = 0; - if (!System.Environment.Is64BitProcess) { - throw new PlatformNotSupportedException("not 64-bit system"); - } - int err = blsInit(CurveFp254BNb, MCLBN_COMPILED_TIME_VAR); - if (err != 0) { - throw new ArgumentException("blsInit"); - } - } - - public struct Id { - private ulong v0, v1, v2, v3; - public bool IsEqual(Id rhs) - { - return blsIdIsEqual(ref this, ref rhs) != 0; - } - public void SetDecStr(String s) - { - if (blsIdSetDecStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsIdSetDecSt:" + s); - } - } - public void SetHexStr(String s) - { - if (blsIdSetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsIdSetHexStr:" + s); - } - } - public void SetInt(int x) - { - blsIdSetInt(ref this, x); - } - public string GetDecStr() - { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsIdGetDecStr(sb, (ulong)sb.Capacity, ref this); - if (size == 0) { - throw new ArgumentException("blsIdGetDecStr"); - } - return sb.ToString(0, (int)size); - } - public string GetHexStr() - { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsIdGetHexStr(sb, (ulong)sb.Capacity, ref this); - if (size == 0) { - throw new ArgumentException("blsIdGetHexStr"); - } - return sb.ToString(0, (int)size); - } - } - public struct SecretKey { - private ulong v0, v1, v2, v3; - public bool IsEqual(SecretKey rhs) - { - return blsSecretKeyIsEqual(ref this, ref rhs) != 0; - } - public void SetHexStr(String s) - { - if (blsSecretKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsSecretKeySetHexStr:" + s); - } - } - public string GetHexStr() - { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsSecretKeyGetHexStr(sb, (ulong)sb.Capacity, ref this); - if (size == 0) { - throw new ArgumentException("mclBnFr_getStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(SecretKey rhs) - { - blsSecretKeyAdd(ref this, ref rhs); - } - public void SetByCSPRNG() - { - blsSecretKeySetByCSPRNG(ref this); - } - public void SetHashOf(string s) - { - if (blsHashToSecretKey(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsHashToSecretKey"); - } - } - public PublicKey GetPublicKey() - { - PublicKey pub = new PublicKey(); - blsGetPublicKey(ref pub, ref this); - return pub; - } - public Signature Signature(String m) - { - Signature Signature = new Signature(); - blsSign(ref Signature, ref this, m, (ulong)m.Length); - return Signature; - } - } - // secretKey = sum_{i=0}^{msk.Length - 1} msk[i] * id^i - public static SecretKey ShareSecretKey(SecretKey[] msk, Id id) - { - SecretKey sec = new SecretKey(); - if (blsSecretKeyShare(ref sec, ref msk[0], (ulong)msk.Length, ref id) != 0) { - throw new ArgumentException("GetSecretKeyForId:" + id.ToString()); - } - return sec; - } - public static SecretKey RecoverSecretKey(SecretKey[] secs, Id[] ids) - { - SecretKey sec = new SecretKey(); - if (blsSecretKeyRecover(ref sec, ref secs[0], ref ids[0], (ulong)secs.Length) != 0) { - throw new ArgumentException("Recover"); - } - return sec; - } - public struct PublicKey { - private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; - private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; - public bool IsEqual(PublicKey rhs) - { - return blsPublicKeyIsEqual(ref this, ref rhs) != 0; - } - public void SetStr(String s) - { - if (blsPublicKeySetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsPublicKeySetStr:" + s); - } - } - public string GetHexStr() - { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsPublicKeyGetHexStr(sb, (ulong)sb.Capacity, ref this); - if (size == 0) { - throw new ArgumentException("blsPublicKeyGetStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(PublicKey rhs) - { - blsPublicKeyAdd(ref this, ref rhs); - } - public bool Verify(Signature Signature, string m) - { - return blsVerify(ref Signature, ref this, m, (ulong)m.Length) == 1; - } - } - // publicKey = sum_{i=0}^{mpk.Length - 1} mpk[i] * id^i - public static PublicKey SharePublicKey(PublicKey[] mpk, Id id) - { - PublicKey pub = new PublicKey(); - if (blsPublicKeyShare(ref pub, ref mpk[0], (ulong)mpk.Length, ref id) != 0) { - throw new ArgumentException("GetPublicKeyForId:" + id.ToString()); - } - return pub; - } - public static PublicKey RecoverPublicKey(PublicKey[] pubs, Id[] ids) - { - PublicKey pub = new PublicKey(); - if (blsPublicKeyRecover(ref pub, ref pubs[0], ref ids[0], (ulong)pubs.Length) != 0) { - throw new ArgumentException("Recover"); - } - return pub; - } - public struct Signature { - private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; - public bool IsEqual(Signature rhs) - { - return blsSignatureIsEqual(ref this, ref rhs) != 0; - } - public void SetStr(String s) - { - if (blsSignatureSetHexStr(ref this, s, (ulong)s.Length) != 0) { - throw new ArgumentException("blsSignatureSetStr:" + s); - } - } - public string GetHexStr() - { - StringBuilder sb = new StringBuilder(1024); - ulong size = blsSignatureGetHexStr(sb, (ulong)sb.Capacity, ref this); - if (size == 0) { - throw new ArgumentException("blsSignatureGetStr"); - } - return sb.ToString(0, (int)size); - } - public void Add(Signature rhs) - { - blsSignatureAdd(ref this, ref rhs); - } - } - public static Signature RecoverSign(Signature[] signs, Id[] ids) - { - Signature Signature = new Signature(); - if (blsSignatureRecover(ref Signature, ref signs[0], ref ids[0], (ulong)signs.Length) != 0) { - throw new ArgumentException("Recover"); - } - return Signature; - } - } -} diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.csproj b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.csproj deleted file mode 100644 index 032a1d347..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.csproj +++ /dev/null @@ -1,62 +0,0 @@ - - - - - Debug - AnyCPU - {E9D06B1B-EA22-4EF4-BA4B-422F7625966C} - Exe - Properties - bls256 - bls256 - v4.5.2 - 512 - true - - - true - ..\..\bin\ - DEBUG;TRACE - false - full - x64 - prompt - MinimumRecommendedRules.ruleset - - - ..\..\bin\ - TRACE - true - pdbonly - x64 - prompt - MinimumRecommendedRules.ruleset - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.sln b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.sln deleted file mode 100644 index eb29af97b..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256.sln +++ /dev/null @@ -1,22 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBD}") = "bls256", "bls256.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966C}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Debug|x64.ActiveCfg = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Debug|x64.Build.0 = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Release|x64.ActiveCfg = Release|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966C}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256_test.cs b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256_test.cs deleted file mode 100644 index 989993e0f..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls256_test.cs +++ /dev/null @@ -1,126 +0,0 @@ -using System; - -namespace mcl { - using static BLS256; - class BLS256Test { - static int err = 0; - static void assert(string msg, bool b) - { - if (b) return; - Console.WriteLine("ERR {0}", msg); - err++; - } - static void TestId() - { - Console.WriteLine("TestId"); - Id id = new Id(); - id.SetDecStr("255"); - assert("GetStr(10)", id.GetDecStr() == "255"); - assert("GetStr(16)", id.GetHexStr() == "ff"); - } - static void TestSecretKey() - { - Console.WriteLine("TestSecretKey"); - SecretKey sec = new SecretKey(); - sec.SetHexStr("ff"); - assert("GetHexStr()", sec.GetHexStr() == "ff"); - { - SecretKey sec2 = new SecretKey(); - sec.SetHexStr("321"); - sec2.SetHexStr("4000"); - sec.Add(sec2); - assert("sec.Add", sec.GetHexStr() == "4321"); - sec.SetByCSPRNG(); - Console.WriteLine("sec.Init={0}", sec.GetHexStr()); - } - } - static void TestPublicKey() - { - Console.WriteLine("TestPublicKey"); - SecretKey sec = new SecretKey(); - sec.SetByCSPRNG(); - PublicKey pub = sec.GetPublicKey(); - String s = pub.GetHexStr(); - Console.WriteLine("pub={0}", s); - PublicKey pub2 = new PublicKey(); - pub2.SetStr(s); - assert("pub.SetStr", pub.IsEqual(pub2)); - } - static void TestSign() - { - Console.WriteLine("TestSign"); - SecretKey sec = new SecretKey(); - sec.SetByCSPRNG(); - PublicKey pub = sec.GetPublicKey(); - String m = "abc"; - Signature sig = sec.Signature(m); - assert("verify", pub.Verify(sig, m)); - assert("not verify", !pub.Verify(sig, m + "a")); - } - static void TestSharing() - { - Console.WriteLine("TestSharing"); - int k = 5; - SecretKey[] msk = new SecretKey[k]; - PublicKey[] mpk = new PublicKey[k]; - // make master secretkey - for (int i = 0; i < k; i++) { - msk[i].SetByCSPRNG(); - mpk[i] = msk[i].GetPublicKey(); - } - int n = 30; - Id[] ids = new Id[n]; - SecretKey[] secs = new SecretKey[n]; - PublicKey[] pubs = new PublicKey[n]; - for (int i = 0; i < n; i++) { - ids[i].SetInt(i * i + 123); - secs[i] = ShareSecretKey(msk, ids[i]); - pubs[i] = SharePublicKey(mpk, ids[i]); - assert("share publicKey", secs[i].GetPublicKey().IsEqual(pubs[i])); - } - string m = "doremi"; - for (int i = 0; i < n; i++) { - Signature Signature = secs[i].Signature(m); - assert("Signature.Verify", pubs[i].Verify(Signature, m)); - } - { - int[] idxTbl = { 0, 2, 5, 8, 10 }; - assert("idxTbl.Length=k", idxTbl.Length == k); - Id[] subIds = new Id[k]; - SecretKey[] subSecs = new SecretKey[k]; - PublicKey[] subPubs = new PublicKey[k]; - Signature[] subSigns = new Signature[k]; - for (int i = 0; i < k; i++) { - int idx = idxTbl[i]; - subIds[i] = ids[idx]; - subSecs[i] = secs[idx]; - subPubs[i] = pubs[idx]; - subSigns[i] = secs[idx].Signature(m); - } - SecretKey sec = RecoverSecretKey(subSecs, subIds); - PublicKey pub = RecoverPublicKey(subPubs, subIds); - assert("check pub", pub.IsEqual(sec.GetPublicKey())); - Signature Signature = RecoverSign(subSigns, subIds); - assert("Signature.verify", pub.Verify(Signature, m)); - } - } - static void Main(string[] args) - { - try { - Init(); - TestId(); - TestSecretKey(); - TestPublicKey(); - TestSign(); - TestSharing(); - if (err == 0) { - Console.WriteLine("all tests succeed"); - } else { - Console.WriteLine("err={0}", err); - } - } catch (Exception e) { - Console.WriteLine("ERR={0}", e); - } - } - } -} diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls_test.cs b/vendor/github.com/dexon-foundation/bls/ffi/cs/bls_test.cs deleted file mode 100644 index 2eb451ba9..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/bls_test.cs +++ /dev/null @@ -1,176 +0,0 @@ -using System; - -namespace mcl -{ - using static BLS; - class BLSTest - { - static int err = 0; - static void assert(string msg, bool b) { - if (b) return; - Console.WriteLine("ERR {0}", msg); - err++; - } - static void TestId() { - Console.WriteLine("TestId"); - Id id1; - id1.SetDecStr("255"); - assert("GetStr(10)", id1.GetDecStr() == "255"); - assert("GetStr(16)", id1.GetHexStr() == "ff"); - Id id2; - id2.SetInt(255); - assert("IsEqual", id1.IsEqual(id2)); - } - static void TestSecretKey() { - Console.WriteLine("TestSecretKey"); - SecretKey sec; - sec.SetHexStr("ff"); - assert("GetHexStr()", sec.GetHexStr() == "ff"); - { - SecretKey sec2; - sec.SetHexStr("321"); - sec2.SetHexStr("4000"); - sec.Add(sec2); - assert("sec.Add", sec.GetHexStr() == "4321"); - sec.SetByCSPRNG(); - Console.WriteLine("sec.Init={0}", sec.GetHexStr()); - } - { - SecretKey sec2; - byte[] buf = sec.Serialize(); - sec2.Deserialize(buf); - assert("serialize", sec2.IsEqual(sec)); - } - } - static void TestPublicKey() { - Console.WriteLine("TestPublicKey"); - SecretKey sec; - sec.SetByCSPRNG(); - PublicKey pub = sec.GetPublicKey(); - string s = pub.GetHexStr(); - Console.WriteLine("pub={0}", s); - { - PublicKey pub2; - pub2.SetStr(s); - assert("pub.SetStr", pub.IsEqual(pub2)); - } - { - PublicKey pub2; - byte[] buf = pub.Serialize(); - pub2.Deserialize(buf); - assert("serialize", pub2.IsEqual(pub)); - } - } - static void TestSign() { - Console.WriteLine("TestSign"); - SecretKey sec; - sec.SetByCSPRNG(); - PublicKey pub = sec.GetPublicKey(); - string m = "abc"; - Signature sig = sec.Sign(m); - Console.WriteLine("sig={0}", sig.GetHexStr()); - assert("verify", pub.Verify(sig, m)); - assert("not verify", !pub.Verify(sig, m + "a")); - { - Signature sig2; - byte[] buf = sig.Serialize(); - sig2.Deserialize(buf); - assert("serialize", sig2.IsEqual(sig)); - } - } - static void TestSharing() { - Console.WriteLine("TestSharing"); - int k = 5; - SecretKey[] msk = new SecretKey[k]; - PublicKey[] mpk = new PublicKey[k]; - // make master secretkey - for (int i = 0; i < k; i++) { - msk[i].SetByCSPRNG(); - mpk[i] = msk[i].GetPublicKey(); - } - int n = 30; - Id[] ids = new Id[n]; - SecretKey[] secs = new SecretKey[n]; - PublicKey[] pubs = new PublicKey[n]; - for (int i = 0; i < n; i++) { - ids[i].SetInt(i * i + 123); - secs[i] = ShareSecretKey(msk, ids[i]); - pubs[i] = SharePublicKey(mpk, ids[i]); - assert("share publicKey", secs[i].GetPublicKey().IsEqual(pubs[i])); - } - string m = "doremi"; - for (int i = 0; i < n; i++) { - Signature Signature = secs[i].Sign(m); - assert("Signature.Verify", pubs[i].Verify(Signature, m)); - } - { - int[] idxTbl = { 0, 2, 5, 8, 10 }; - assert("idxTbl.Length=k", idxTbl.Length == k); - Id[] subIds = new Id[k]; - SecretKey[] subSecs = new SecretKey[k]; - PublicKey[] subPubs = new PublicKey[k]; - Signature[] subSigns = new Signature[k]; - for (int i = 0; i < k; i++) { - int idx = idxTbl[i]; - subIds[i] = ids[idx]; - subSecs[i] = secs[idx]; - subPubs[i] = pubs[idx]; - subSigns[i] = secs[idx].Sign(m); - } - SecretKey sec = RecoverSecretKey(subSecs, subIds); - PublicKey pub = RecoverPublicKey(subPubs, subIds); - assert("check pub", pub.IsEqual(sec.GetPublicKey())); - Signature Signature = RecoverSign(subSigns, subIds); - assert("Signature.verify", pub.Verify(Signature, m)); - } - } - static void TestAggregate() { - Console.WriteLine("TestAggregate"); - const int n = 10; - const string m = "abc"; - SecretKey[] secVec = new SecretKey[n]; - PublicKey[] pubVec = new PublicKey[n]; - Signature[] popVec = new Signature[n]; - Signature[] sigVec = new Signature[n]; - for (int i = 0; i < n; i++) { - secVec[i].SetByCSPRNG(); - pubVec[i] = secVec[i].GetPublicKey(); - popVec[i] = secVec[i].GetPop(); - sigVec[i] = secVec[i].Sign(m); - } - SecretKey secAgg; - PublicKey pubAgg; - Signature sigAgg; - for (int i = 0; i < n; i++) { - secAgg.Add(secVec[i]); - assert("verify pop", pubVec[i].VerifyPop(popVec[i])); - pubAgg.Add(pubVec[i]); - sigAgg.Add(sigVec[i]); - } - assert("aggregate sec", secAgg.Sign(m).IsEqual(sigAgg)); - assert("aggregate", pubAgg.Verify(sigAgg, m)); - } - static void Main(string[] args) { - try { - int[] curveTypeTbl = { BN254, BLS12_381 }; - foreach (int curveType in curveTypeTbl) { - Console.WriteLine("curveType={0}", curveType); - Init(curveType); - TestId(); - TestSecretKey(); - TestPublicKey(); - TestSign(); - TestSharing(); - TestAggregate(); - if (err == 0) { - Console.WriteLine("all tests succeed"); - } else { - Console.WriteLine("err={0}", err); - } - } - } catch (Exception e) { - Console.WriteLine("ERR={0}", e); - } - } - } -} diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/readme-ja.md b/vendor/github.com/dexon-foundation/bls/ffi/cs/readme-ja.md deleted file mode 100644 index 199135725..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/readme-ja.md +++ /dev/null @@ -1,188 +0,0 @@ -# BLSç½²åã®C#ãƒã‚¤ãƒ³ãƒ‡ã‚£ãƒ³ã‚° - -# å¿…è¦ç’°å¢ƒ - -* Visual Studio 2017(x64) or later -* C# 7.2 or later -* .NET Framework 4.5.2 or later - -# DLLã®ãƒ“ルド方法 - -Visual Studio 2017ã®64bit用コマンドプロンプトを開ã„㦠-``` -md work -cd work -git clone https://github.com/herumi/cybozulib_ext -git clone https://github.com/herumi/mcl -git clone https://github.com/herumi/bls -cd bls -mklib dll -``` -`bls/bin/*.dll`ãŒä½œæˆã•ã‚Œã‚‹ã€‚ - -# サンプルã®ãƒ“ルド方法 - -bls/ffi/cs/bls.slnã‚’é–‹ã„ã¦å®Ÿè¡Œã™ã‚‹ã€‚ - -* æ³¨æ„ bls256.slnã¯å¤ã„ãŸã‚使ã‚ãªã„ã§ãã ã•ã„。 - -# クラスã¨API - -## API - -* `Init(int curveType = BN254);` - * ライブラリを曲線curveTypeã§åˆæœŸåŒ–ã™ã‚‹ã€‚ - * curveType = BN254 or BLS12_381 -* `SecretKey ShareSecretKey(in SecretKey[] msk, in Id id);` - * マスター秘密éµã®åˆ—mskã«å¯¾ã™ã‚‹idã®ç§˜å¯†éµã‚’生æˆ(共有)ã™ã‚‹ã€‚ -* `SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec);` - * 秘密éµsecVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰ç§˜å¯†éµã‚’復元ã™ã‚‹ã€‚ -* `PublicKey SharePublicKey(in PublicKey[] mpk, in Id id);` - * マスター公開éµã®åˆ—mpkã«å¯¾ã™ã‚‹idã®å…¬é–‹éµã‚’生æˆ(共有)ã™ã‚‹ã€‚ -* `PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec);` - * 公開éµpubVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰å…¬é–‹éµã‚’復元ã™ã‚‹ã€‚ -* `Signature RecoverSign(in Signature[] sigVec, in Id[] idVec);` - * ç½²åsigVecã¨ID idVecã®ãƒšã‚¢ã‹ã‚‰ç½²åを復元ã™ã‚‹ã€‚ - -## Id - -識別å­ã‚¯ãƒ©ã‚¹ - -* `byte[] Serialize();` - * Idをシリアライズã™ã‚‹ã€‚ -* `void Deserialize(byte[] buf);` - * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰Idをデシリアライズã™ã‚‹ã€‚ -* `bool IsEqual(in Id rhs);` - * åŒå€¤åˆ¤å®šã€‚ -* `void SetDecStr(string s);` - * 10進数文字列を設定ã™ã‚‹ã€‚ -* `void SetHexStr(string s);` - * 16進数文字列を設定ã™ã‚‹ã€‚ -* `void SetInt(int x);` - * æ•´æ•°xを設定ã™ã‚‹ã€‚ -* `string GetDecStr();` - * 10進数表記をå–å¾—ã™ã‚‹ã€‚ -* `string GetHexStr();` - * 16進数表記をå–å¾—ã™ã‚‹ã€‚ - -## SecretKey - -* `byte[] Serialize();` - * Idをシリアライズã™ã‚‹ã€‚ -* `void Deserialize(byte[] buf);` - * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰SecretKeyをデシリアライズã™ã‚‹ã€‚ -* `bool IsEqual(in SecretKey rhs);` - * åŒå€¤åˆ¤å®šã€‚ -* `void SetHexStr(string s);` - * 16進数文字列を設定ã™ã‚‹ã€‚ -* `string GetHexStr();` - * 16進数表記をå–å¾—ã™ã‚‹ã€‚ -* `void Add(in SecretKey rhs);` - * 秘密éµrhsを加算ã™ã‚‹ã€‚ -* `void SetByCSPRNG();` - * æš—å·å­¦çš„乱数ã§è¨­å®šã™ã‚‹ã€‚ -* `void SetHashOf(string s);` - * 文字列sã®ãƒãƒƒã‚·ãƒ¥å€¤ã‚’設定ã™ã‚‹ã€‚ -* `PublicKey GetPublicKey();` - * 対応ã™ã‚‹å…¬é–‹éµã‚’å–å¾—ã™ã‚‹ã€‚ -* `Signature Sign(string m);` - * 文字列mã®ç½²åを生æˆã™ã‚‹ã€‚ -* `Signature GetPop();` - * 自身ã®ç§˜å¯†éµã«ã‚ˆã‚‹ç½²å(Proof Of Posession)を生æˆã™ã‚‹ã€‚ - -## PublicKey - -* `byte[] Serialize();` - * PublicKeyをシリアライズã™ã‚‹ã€‚ -* `void Deserialize(byte[] buf);` - * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰PublicKeyをデシリアライズã™ã‚‹ã€‚ -* `bool IsEqual(in PublicKey rhs);` - * åŒå€¤åˆ¤å®šã€‚ -* `void Add(in PublicKey rhs);` - * 公開éµrhsを加算ã™ã‚‹ã€‚ -* `void SetHexStr(string s);` - * 16進数文字列を設定ã™ã‚‹ã€‚ -* `string GetHexStr();` - * 16進数表記をå–å¾—ã™ã‚‹ã€‚ -* `bool Verify(in Signature sig, string m);` - * 文字列mã«å¯¾ã™ã‚‹ç½²åsigã®æ­£å½“性を確èªã™ã‚‹ã€‚ -* `bool VerifyPop(in Signature pop);` - * PoPã®æ­£å½“性を確èªã™ã‚‹ã€‚ - -## Signature - -* `byte[] Serialize();` - * Signatureをシリアライズã™ã‚‹ã€‚ -* `void Deserialize(byte[] buf);` - * ãƒã‚¤ãƒˆåˆ—bufã‹ã‚‰Signatureをデシリアライズã™ã‚‹ã€‚ -* `bool IsEqual(in Signature rhs);` - * åŒå€¤åˆ¤å®šã€‚ -* `void Add(in Signature rhs);` - * ç½²århsを加算ã™ã‚‹ã€‚ -* `void SetHexStr(string s);` - * 16進数文字列を設定ã™ã‚‹ã€‚ -* `string GetHexStr();` - * 16進数表記をå–å¾—ã™ã‚‹ã€‚ - -## 使ã„æ–¹ - -### 最å°ã‚µãƒ³ãƒ—ル - -``` -using static BLS; - -Init(BN254); // ライブラリåˆæœŸåŒ– -SecretKey sec; -sec.SetByCSPRNG(); // 秘密éµã®åˆæœŸåŒ– -PublicKey pub = sec.GetPublicKey(); // 公開éµã®å–å¾— -string m = "abc"; -Signature sig = sec.Sign(m); // ç½²åã®ä½œæˆ -if (pub.Verify(sig, m))) { - // ç½²åã®ç¢ºèª -} -``` - -### 集約署å -``` -Init(BN254); // ライブラリåˆæœŸåŒ– -const int n = 10; -const string m = "abc"; -SecretKey[] secVec = new SecretKey[n]; -PublicKey[] pubVec = new PublicKey[n]; -Signature[] popVec = new Signature[n]; -Signature[] sigVec = new Signature[n]; - -for (int i = 0; i < n; i++) { - secVec[i].SetByCSPRNG(); // 秘密éµã®åˆæœŸåŒ– - pubVec[i] = secVec[i].GetPublicKey(); // 公開éµã®å–å¾— - popVec[i] = secVec[i].GetPop(); // 所有(PoP)ã®è¨¼æ˜Ž - sigVec[i] = secVec[i].Sign(m); // ç½²å -} - -SecretKey secAgg; -PublicKey pubAgg; -Signature sigAgg; -for (int i = 0; i < n; i++) { - // PoPã®ç¢ºèª - if (pubVec[i].VerifyPop(popVec[i]))) { - // エラー - return; - } - pubAgg.Add(pubVec[i]); // 公開éµã®é›†ç´„ - sigAgg.Add(sigVec[i]); // ç½²åã®é›†ç´„ -} -if (pubAgg.Verify(sigAgg, m)) { - // ç½²åã®ç¢ºèª -} -``` - -# ライセンス - -modified new BSD License -http://opensource.org/licenses/BSD-3-Clause - -# 著者 - -(C)2019 å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) All rights reserved. -本コンテンツã®è‘—作権ã€ãŠã‚ˆã³æœ¬ã‚³ãƒ³ãƒ†ãƒ³ãƒ„中ã«å‡ºã¦ãる商標権ã€å›£ä½“åã€ãƒ­ã‚´ã€è£½å“〠-サービスãªã©ã¯ãã‚Œãžã‚Œã€å„権利ä¿æœ‰è€…ã«å¸°å±žã—ã¾ã™ diff --git a/vendor/github.com/dexon-foundation/bls/ffi/cs/readme.md b/vendor/github.com/dexon-foundation/bls/ffi/cs/readme.md deleted file mode 100644 index 2b7191871..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/cs/readme.md +++ /dev/null @@ -1,185 +0,0 @@ -# C# binding of BLS threshold signature library - -# Installation Requirements - -* Visual Studio 2017 or later -* C# 7.2 or later -* .NET Framework 4.5.2 or later - -# How to build - -``` -md work -cd work -git clone https://github.com/herumi/cybozulib_ext -git clone https://github.com/herumi/mcl -git clone https://github.com/herumi/bls -cd bls -mklib dll -``` -bls/bin/*.dll are created - -# How to build a sample - -Open bls/ffi/cs/bls.sln and exec it. - -* Remark. bls256 is obsolete. Please use bls.sln. - -# class and API - -## API - -* `Init(int curveType = BN254);` - * initialize this library with a curve `curveType`. - * curveType = BN254 or BLS12_381 -* `SecretKey ShareSecretKey(in SecretKey[] msk, in Id id);` - * generate the shared secret key from a sequence of master secret keys msk and Id. -* `SecretKey RecoverSecretKey(in SecretKey[] secVec, in Id[] idVec);` - * recover the secret key from a sequence of secret keys secVec and idVec. -* `PublicKey SharePublicKey(in PublicKey[] mpk, in Id id);` - * generate the shared public key from a sequence of master public keys mpk and Id. -* `PublicKey RecoverPublicKey(in PublicKey[] pubVec, in Id[] idVec);` - * recover the public key from a sequence of public keys pubVec and idVec. -* `Signature RecoverSign(in Signature[] sigVec, in Id[] idVec);` - * recover the signature from a sequence of signatures siVec and idVec. - -## Id - -Identifier class - -* `byte[] Serialize();` - * serialize Id -* `void Deserialize(byte[] buf);` - * deserialize from byte[] buf -* `bool IsEqual(in Id rhs);` - * equality -* `void SetDecStr(string s);` - * set by a decimal string s -* `void SetHexStr(string s);` - * set by a hexadecimal string s -* `void SetInt(int x);` - * set an integer x -* `string GetDecStr();` - * get a decimal string -* `string GetHexStr();` - * get a hexadecimal string - -## SecretKey - -* `byte[] Serialize();` - * serialize SecretKey -* `void Deserialize(byte[] buf);` - * deserialize from byte[] buf -* `bool IsEqual(in SecretKey rhs);` - * equality -* `string GetDecStr();` - * get a decimal string -* `string GetHexStr();` - * get a hexadecimal string -* `void Add(in SecretKey rhs);` - * add a secret key rhs -* `void SetByCSPRNG();` - * set a secret key by cryptographically secure pseudo random number generator -* `void SetHashOf(string s);` - * set a secret key by a hash of string s -* `PublicKey GetPublicKey();` - * get the corresponding public key to a secret key -* `Signature Sign(string m);` - * sign a string m -* `Signature GetPop();` - * get a PoP (Proof Of Posession) for a secret key - -## PublicKey - -* `byte[] Serialize();` - * serialize PublicKey -* `void Deserialize(byte[] buf);` - * deserialize from byte[] buf -* `bool IsEqual(in PublicKey rhs);` - * equality -* `void Add(in PublicKey rhs);` - * add a public key rhs -* `string GetDecStr();` - * get a decimal string -* `string GetHexStr();` - * get a hexadecimal string -* `bool Verify(in Signature sig, string m);` - * verify the validness of the sig with m -* `bool VerifyPop(in Signature pop);` - * verify the validness of PoP - -## Signature - -* `byte[] Serialize();` - * serialize Signature -* `void Deserialize(byte[] buf);` - * deserialize from byte[] buf -* `bool IsEqual(in Signature rhs);` - * equality -* `void Add(in Signature rhs);` - * add a signature key rhs -* `string GetDecStr();` - * get a decimal string -* `string GetHexStr();` - * get a hexadecimal string - -## How to use - -### A minimum sample - -``` -using static BLS; - -Init(BN254); // init library -SecretKey sec; -sec.SetByCSPRNG(); // init secret key -PublicKey pub = sec.GetPublicKey(); // get public key -string m = "abc"; -Signature sig = sec.Sign(m); // create signature -if (pub.Verify(sig, m))) { - // signature is verified -} -``` - -### Aggregate signature -``` -Init(BN254); // init library -const int n = 10; -const string m = "abc"; -SecretKey[] secVec = new SecretKey[n]; -PublicKey[] pubVec = new PublicKey[n]; -Signature[] popVec = new Signature[n]; -Signature[] sigVec = new Signature[n]; - -for (int i = 0; i < n; i++) { - secVec[i].SetByCSPRNG(); // init secret key - pubVec[i] = secVec[i].GetPublicKey(); // get public key - popVec[i] = secVec[i].GetPop(); // get a proof of Possesion (PoP) - sigVec[i] = secVec[i].Sign(m); // create signature -} - -SecretKey secAgg; -PublicKey pubAgg; -Signature sigAgg; -for (int i = 0; i < n; i++) { - // verify PoP - if (pubVec[i].VerifyPop(popVec[i]))) { - // error - return; - } - pubAgg.Add(pubVec[i]); // aggregate public key - sigAgg.Add(sigVec[i]); // aggregate signature -} -if (pubAgg.Verify(sigAgg, m)) { - // aggregated signature is verified -} -``` - -# License - -modified new BSD License -http://opensource.org/licenses/BSD-3-Clause - -# Author - -(C)2019 MITSUNARI Shigeo(herumi@nifty.com) All rights reserved. diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go deleted file mode 100644 index 56bf08039..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go +++ /dev/null @@ -1,539 +0,0 @@ -package bls - -/* -#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 -#cgo bn256 LDFLAGS:${SRCDIR}/../../../lib/libbls256.a -#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -#cgo bn384 LDFLAGS:${SRCDIR}/../../../lib/libbls384.a -#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 -#cgo bn384_256 LDFLAGS:${SRCDIR}/../../../lib/libbls384_256.a -#cgo !bn256,!bn384,!bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -#cgo !bn256,!bn384,!bn384_256 LDFLAGS:${SRCDIR}/../../../lib/libbls384.a -#cgo CFLAGS:-I${SRCDIR}/../../../include -I${SRCDIR}/../../../../mcl/include -#cgo LDFLAGS:${SRCDIR}/../../../../mcl/lib/libmcl.a -lgmpxx -lgmp -#cgo static LDFLAGS:-static -typedef unsigned int (*ReadRandFunc)(void *, void *, unsigned int); -int wrapReadRandCgo(void *self, void *buf, unsigned int n); -#include -*/ -import "C" -import "fmt" -import "unsafe" -import "io" -import "encoding/json" - -// Init -- -// call this function before calling all the other operations -// this function is not thread safe -func Init(curve int) error { - err := C.blsInit(C.int(curve), C.MCLBN_COMPILED_TIME_VAR) - if err != 0 { - return fmt.Errorf("ERR Init curve=%d", curve) - } - return nil -} - -// ID -- -type ID struct { - v Fr -} - -// getPointer -- -func (id *ID) getPointer() (p *C.blsId) { - // #nosec - return (*C.blsId)(unsafe.Pointer(id)) -} - -// GetLittleEndian -- -func (id *ID) GetLittleEndian() []byte { - return id.v.Serialize() -} - -// SetLittleEndian -- -func (id *ID) SetLittleEndian(buf []byte) error { - return id.v.SetLittleEndian(buf) -} - -// GetHexString -- -func (id *ID) GetHexString() string { - return id.v.GetString(16) -} - -// GetDecString -- -func (id *ID) GetDecString() string { - return id.v.GetString(10) -} - -// SetHexString -- -func (id *ID) SetHexString(s string) error { - return id.v.SetString(s, 16) -} - -// SetDecString -- -func (id *ID) SetDecString(s string) error { - return id.v.SetString(s, 10) -} - -// IsEqual -- -func (id *ID) IsEqual(rhs *ID) bool { - if id == nil || rhs == nil { - return false - } - return id.v.IsEqual(&rhs.v) -} - -// MarshalJSON implements json.Marshaller. -func (id *ID) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - ID []byte `json:"id"` - }{ - id.GetLittleEndian(), - }) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (id *ID) UnmarshalJSON(data []byte) error { - aux := &struct { - ID []byte `json:"id"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if err := id.SetLittleEndian(aux.ID); err != nil { - return err - } - return nil -} - -// SecretKey -- -type SecretKey struct { - v Fr -} - -// getPointer -- -func (sec *SecretKey) getPointer() (p *C.blsSecretKey) { - // #nosec - return (*C.blsSecretKey)(unsafe.Pointer(sec)) -} - -// GetLittleEndian -- -func (sec *SecretKey) GetLittleEndian() []byte { - return sec.v.Serialize() -} - -// SetLittleEndian -- -func (sec *SecretKey) SetLittleEndian(buf []byte) error { - return sec.v.SetLittleEndian(buf) -} - -// SerializeToHexStr -- -func (sec *SecretKey) SerializeToHexStr() string { - return sec.v.GetString(IoSerializeHexStr) -} - -// DeserializeHexStr -- -func (sec *SecretKey) DeserializeHexStr(s string) error { - return sec.v.SetString(s, IoSerializeHexStr) -} - -// GetHexString -- -func (sec *SecretKey) GetHexString() string { - return sec.v.GetString(16) -} - -// GetDecString -- -func (sec *SecretKey) GetDecString() string { - return sec.v.GetString(10) -} - -// SetHexString -- -func (sec *SecretKey) SetHexString(s string) error { - return sec.v.SetString(s, 16) -} - -// SetDecString -- -func (sec *SecretKey) SetDecString(s string) error { - return sec.v.SetString(s, 10) -} - -// IsEqual -- -func (sec *SecretKey) IsEqual(rhs *SecretKey) bool { - if sec == nil || rhs == nil { - return false - } - return sec.v.IsEqual(&rhs.v) -} - -// SetByCSPRNG -- -func (sec *SecretKey) SetByCSPRNG() { - sec.v.SetByCSPRNG() -} - -// Add -- -func (sec *SecretKey) Add(rhs *SecretKey) { - FrAdd(&sec.v, &sec.v, &rhs.v) -} - -// GetMasterSecretKey -- -func (sec *SecretKey) GetMasterSecretKey(k int) (msk []SecretKey) { - msk = make([]SecretKey, k) - msk[0] = *sec - for i := 1; i < k; i++ { - msk[i].SetByCSPRNG() - } - return msk -} - -// MarshalJSON implements json.Marshaller. -func (sec *SecretKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - SecretKey []byte `json:"secret_key"` - }{ - sec.GetLittleEndian(), - }) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (sec *SecretKey) UnmarshalJSON(data []byte) error { - aux := &struct { - SecretKey []byte `json:"secret_key"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if err := sec.SetLittleEndian(aux.SecretKey); err != nil { - return err - } - return nil -} - -// GetMasterPublicKey -- -func GetMasterPublicKey(msk []SecretKey) (mpk []PublicKey) { - n := len(msk) - mpk = make([]PublicKey, n) - for i := 0; i < n; i++ { - mpk[i] = *msk[i].GetPublicKey() - } - return mpk -} - -// Set -- -func (sec *SecretKey) Set(msk []SecretKey, id *ID) error { - // #nosec - return FrEvaluatePolynomial(&sec.v, *(*[]Fr)(unsafe.Pointer(&msk)), &id.v) -} - -// Recover -- -func (sec *SecretKey) Recover(secVec []SecretKey, idVec []ID) error { - // #nosec - return FrLagrangeInterpolation(&sec.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]Fr)(unsafe.Pointer(&secVec))) -} - -// GetPop -- -func (sec *SecretKey) GetPop() (sign *Sign) { - sign = new(Sign) - C.blsGetPop(sign.getPointer(), sec.getPointer()) - return sign -} - -// PublicKey -- -type PublicKey struct { - v G2 -} - -// getPointer -- -func (pub *PublicKey) getPointer() (p *C.blsPublicKey) { - // #nosec - return (*C.blsPublicKey)(unsafe.Pointer(pub)) -} - -// Serialize -- -func (pub *PublicKey) Serialize() []byte { - return pub.v.Serialize() -} - -// Deserialize -- -func (pub *PublicKey) Deserialize(buf []byte) error { - return pub.v.Deserialize(buf) -} - -// SerializeToHexStr -- -func (pub *PublicKey) SerializeToHexStr() string { - return pub.v.GetString(IoSerializeHexStr) -} - -// DeserializeHexStr -- -func (pub *PublicKey) DeserializeHexStr(s string) error { - return pub.v.SetString(s, IoSerializeHexStr) -} - -// GetHexString -- -func (pub *PublicKey) GetHexString() string { - return pub.v.GetString(16) -} - -// SetHexString -- -func (pub *PublicKey) SetHexString(s string) error { - return pub.v.SetString(s, 16) -} - -// IsEqual -- -func (pub *PublicKey) IsEqual(rhs *PublicKey) bool { - if pub == nil || rhs == nil { - return false - } - return pub.v.IsEqual(&rhs.v) -} - -// Add -- -func (pub *PublicKey) Add(rhs *PublicKey) { - G2Add(&pub.v, &pub.v, &rhs.v) -} - -// Set -- -func (pub *PublicKey) Set(mpk []PublicKey, id *ID) error { - // #nosec - return G2EvaluatePolynomial(&pub.v, *(*[]G2)(unsafe.Pointer(&mpk)), &id.v) -} - -// Recover -- -func (pub *PublicKey) Recover(pubVec []PublicKey, idVec []ID) error { - // #nosec - return G2LagrangeInterpolation(&pub.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]G2)(unsafe.Pointer(&pubVec))) -} - -// MarshalJSON implements json.Marshaller. -func (pub *PublicKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - PublicKey []byte `json:"public_key"` - }{ - pub.Serialize(), - }) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (pub *PublicKey) UnmarshalJSON(data []byte) error { - aux := &struct { - PublicKey []byte `json:"public_key"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if err := pub.Deserialize(aux.PublicKey); err != nil { - return err - } - return nil -} - -// Sign -- -type Sign struct { - v G1 -} - -// getPointer -- -func (sign *Sign) getPointer() (p *C.blsSignature) { - // #nosec - return (*C.blsSignature)(unsafe.Pointer(sign)) -} - -// Serialize -- -func (sign *Sign) Serialize() []byte { - return sign.v.Serialize() -} - -// Deserialize -- -func (sign *Sign) Deserialize(buf []byte) error { - return sign.v.Deserialize(buf) -} - -// SerializeToHexStr -- -func (sign *Sign) SerializeToHexStr() string { - return sign.v.GetString(IoSerializeHexStr) -} - -// DeserializeHexStr -- -func (sign *Sign) DeserializeHexStr(s string) error { - return sign.v.SetString(s, IoSerializeHexStr) -} - -// GetHexString -- -func (sign *Sign) GetHexString() string { - return sign.v.GetString(16) -} - -// SetHexString -- -func (sign *Sign) SetHexString(s string) error { - return sign.v.SetString(s, 16) -} - -// IsEqual -- -func (sign *Sign) IsEqual(rhs *Sign) bool { - if sign == nil || rhs == nil { - return false - } - return sign.v.IsEqual(&rhs.v) -} - -// GetPublicKey -- -func (sec *SecretKey) GetPublicKey() (pub *PublicKey) { - pub = new(PublicKey) - C.blsGetPublicKey(pub.getPointer(), sec.getPointer()) - return pub -} - -// Sign -- Constant Time version -func (sec *SecretKey) Sign(m string) (sign *Sign) { - sign = new(Sign) - buf := []byte(m) - // #nosec - C.blsSign(sign.getPointer(), sec.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - return sign -} - -// Add -- -func (sign *Sign) Add(rhs *Sign) { - C.blsSignatureAdd(sign.getPointer(), rhs.getPointer()) -} - -// Recover -- -func (sign *Sign) Recover(signVec []Sign, idVec []ID) error { - // #nosec - return G1LagrangeInterpolation(&sign.v, *(*[]Fr)(unsafe.Pointer(&idVec)), *(*[]G1)(unsafe.Pointer(&signVec))) -} - -// Verify -- -func (sign *Sign) Verify(pub *PublicKey, m string) bool { - buf := []byte(m) - // #nosec - return C.blsVerify(sign.getPointer(), pub.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 1 -} - -// VerifyPop -- -func (sign *Sign) VerifyPop(pub *PublicKey) bool { - if pub.getPointer() == nil { - return false - } - return C.blsVerifyPop(sign.getPointer(), pub.getPointer()) == 1 -} - -// MarshalJSON implements json.Marshaller. -func (sign *Sign) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Sign []byte `json:"sign"` - }{ - sign.Serialize(), - }) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (sign *Sign) UnmarshalJSON(data []byte) error { - aux := &struct { - Sign []byte `json:"sign"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if err := sign.Deserialize(aux.Sign); err != nil { - return err - } - return nil -} - -// DHKeyExchange -- -func DHKeyExchange(sec *SecretKey, pub *PublicKey) (out PublicKey) { - C.blsDHKeyExchange(out.getPointer(), sec.getPointer(), pub.getPointer()) - return out -} - -// HashAndMapToSignature -- -func HashAndMapToSignature(buf []byte) *Sign { - sig := new(Sign) - err := sig.v.HashAndMapTo(buf) - if err == nil { - return sig - } else { - return nil - } -} - -// VerifyPairing -- -func VerifyPairing(X *Sign, Y *Sign, pub *PublicKey) bool { - if X.getPointer() == nil || Y.getPointer() == nil || pub.getPointer() == nil { - return false - } - return C.blsVerifyPairing(X.getPointer(), Y.getPointer(), pub.getPointer()) == 1 -} - -// SignHash -- -func (sec *SecretKey) SignHash(hash []byte) (sign *Sign) { - sign = new(Sign) - // #nosec - err := C.blsSignHash(sign.getPointer(), sec.getPointer(), unsafe.Pointer(&hash[0]), C.size_t(len(hash))) - if err == 0 { - return sign - } else { - return nil - } -} - -// VerifyHash -- -func (sign *Sign) VerifyHash(pub *PublicKey, hash []byte) bool { - if pub.getPointer() == nil { - return false - } - // #nosec - return C.blsVerifyHash(sign.getPointer(), pub.getPointer(), unsafe.Pointer(&hash[0]), C.size_t(len(hash))) == 1 -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} - -// VerifyAggregateHashes -- -func (sign *Sign) VerifyAggregateHashes(pubVec []PublicKey, hash [][]byte) bool { - hashByte := GetOpUnitSize() * 8 - n := len(hash) - h := make([]byte, n*hashByte) - for i := 0; i < n; i++ { - hn := len(hash[i]) - copy(h[i*hashByte:(i+1)*hashByte], hash[i][0:Min(hn, hashByte)]) - } - if pubVec[0].getPointer() == nil { - return false - } - return C.blsVerifyAggregatedHashes(sign.getPointer(), pubVec[0].getPointer(), unsafe.Pointer(&h[0]), C.size_t(hashByte), C.size_t(n)) == 1 -} - -/// - -var s_randReader io.Reader - -func createSlice(buf *C.char, n C.uint) []byte { - size := int(n) - return (*[1 << 30]byte)(unsafe.Pointer(buf))[:size:size] -} - -// this function can't be put in callback.go -//export wrapReadRandGo -func wrapReadRandGo(buf *C.char, n C.uint) C.uint { - slice := createSlice(buf, n) - ret, err := s_randReader.Read(slice) - if ret == int(n) && err == nil { - return n - } - return 0 -} - -// SetRandFunc -- -func SetRandFunc(randReader io.Reader) { - s_randReader = randReader - if randReader != nil { - C.blsSetRandFunc(nil, C.ReadRandFunc(unsafe.Pointer(C.wrapReadRandCgo))) - } else { - // use default random generator - C.blsSetRandFunc(nil, C.ReadRandFunc(unsafe.Pointer(nil))) - } -} diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls_test.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls_test.go deleted file mode 100644 index a13ee02f4..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls_test.go +++ /dev/null @@ -1,690 +0,0 @@ -package bls - -import "testing" -import "strconv" -import "crypto/sha256" -import "crypto/sha512" -import "fmt" -import "crypto/rand" - -var unitN = 0 - -// Tests (for Benchmarks see below) - -func testPre(t *testing.T) { - t.Log("init") - { - var id ID - err := id.SetLittleEndian([]byte{6, 5, 4, 3, 2, 1}) - if err != nil { - t.Error(err) - } - t.Log("id :", id.GetHexString()) - var id2 ID - err = id2.SetHexString(id.GetHexString()) - if err != nil { - t.Fatal(err) - } - if !id.IsEqual(&id2) { - t.Errorf("not same id\n%s\n%s", id.GetHexString(), id2.GetHexString()) - } - err = id2.SetDecString(id.GetDecString()) - if err != nil { - t.Fatal(err) - } - if !id.IsEqual(&id2) { - t.Errorf("not same id\n%s\n%s", id.GetDecString(), id2.GetDecString()) - } - } - { - var sec SecretKey - err := sec.SetLittleEndian([]byte{1, 2, 3, 4, 5, 6}) - if err != nil { - t.Error(err) - } - t.Log("sec=", sec.GetHexString()) - } - - t.Log("create secret key") - m := "this is a bls sample for go" - var sec SecretKey - sec.SetByCSPRNG() - t.Log("sec:", sec.GetHexString()) - t.Log("create public key") - pub := sec.GetPublicKey() - t.Log("pub:", pub.GetHexString()) - sign := sec.Sign(m) - t.Log("sign:", sign.GetHexString()) - if !sign.Verify(pub, m) { - t.Error("Signature does not verify") - } - - // How to make array of SecretKey - { - sec := make([]SecretKey, 3) - for i := 0; i < len(sec); i++ { - sec[i].SetByCSPRNG() - t.Log("sec=", sec[i].GetHexString()) - } - } -} - -func testStringConversion(t *testing.T) { - t.Log("testRecoverSecretKey") - var sec SecretKey - var s string - if unitN == 6 { - s = "16798108731015832284940804142231733909759579603404752749028378864165570215949" - } else { - s = "40804142231733909759579603404752749028378864165570215949" - } - err := sec.SetDecString(s) - if err != nil { - t.Fatal(err) - } - if s != sec.GetDecString() { - t.Error("not equal") - } - s = sec.GetHexString() - var sec2 SecretKey - err = sec2.SetHexString(s) - if err != nil { - t.Fatal(err) - } - if !sec.IsEqual(&sec2) { - t.Error("not equal") - } -} - -func testRecoverSecretKey(t *testing.T) { - t.Log("testRecoverSecretKey") - k := 3000 - var sec SecretKey - sec.SetByCSPRNG() - t.Logf("sec=%s\n", sec.GetHexString()) - - // make master secret key - msk := sec.GetMasterSecretKey(k) - - n := k - secVec := make([]SecretKey, n) - idVec := make([]ID, n) - for i := 0; i < n; i++ { - err := idVec[i].SetLittleEndian([]byte{byte(i & 255), byte(i >> 8), 2, 3, 4, 5}) - if err != nil { - t.Error(err) - } - err = secVec[i].Set(msk, &idVec[i]) - if err != nil { - t.Error(err) - } - // t.Logf("idVec[%d]=%s\n", i, idVec[i].GetHexString()) - } - // recover sec2 from secVec and idVec - var sec2 SecretKey - err := sec2.Recover(secVec, idVec) - if err != nil { - t.Error(err) - } - if !sec.IsEqual(&sec2) { - t.Errorf("Mismatch in recovered secret key:\n %s\n %s.", sec.GetHexString(), sec2.GetHexString()) - } -} - -func testEachSign(t *testing.T, m string, msk []SecretKey, mpk []PublicKey) ([]ID, []SecretKey, []PublicKey, []Sign) { - idTbl := []byte{3, 5, 193, 22, 15} - n := len(idTbl) - - secVec := make([]SecretKey, n) - pubVec := make([]PublicKey, n) - signVec := make([]Sign, n) - idVec := make([]ID, n) - - for i := 0; i < n; i++ { - err := idVec[i].SetLittleEndian([]byte{idTbl[i], 0, 0, 0, 0, 0}) - if err != nil { - t.Error(err) - } - t.Logf("idVec[%d]=%s\n", i, idVec[i].GetHexString()) - - err = secVec[i].Set(msk, &idVec[i]) - if err != nil { - t.Error(err) - } - - err = pubVec[i].Set(mpk, &idVec[i]) - if err != nil { - t.Error(err) - } - t.Logf("pubVec[%d]=%s\n", i, pubVec[i].GetHexString()) - - if !pubVec[i].IsEqual(secVec[i].GetPublicKey()) { - t.Errorf("Pubkey derivation does not match\n%s\n%s", pubVec[i].GetHexString(), secVec[i].GetPublicKey().GetHexString()) - } - - signVec[i] = *secVec[i].Sign(m) - if !signVec[i].Verify(&pubVec[i], m) { - t.Error("Pubkey derivation does not match") - } - } - return idVec, secVec, pubVec, signVec -} -func testSign(t *testing.T) { - m := "testSign" - t.Log(m) - - var sec0 SecretKey - sec0.SetByCSPRNG() - pub0 := sec0.GetPublicKey() - s0 := sec0.Sign(m) - if !s0.Verify(pub0, m) { - t.Error("Signature does not verify") - } - - k := 3 - msk := sec0.GetMasterSecretKey(k) - mpk := GetMasterPublicKey(msk) - idVec, secVec, pubVec, signVec := testEachSign(t, m, msk, mpk) - - var sec1 SecretKey - err := sec1.Recover(secVec, idVec) - if err != nil { - t.Error(err) - } - if !sec0.IsEqual(&sec1) { - t.Error("Mismatch in recovered seckey.") - } - var pub1 PublicKey - err = pub1.Recover(pubVec, idVec) - if err != nil { - t.Error(err) - } - if !pub0.IsEqual(&pub1) { - t.Error("Mismatch in recovered pubkey.") - } - var s1 Sign - err = s1.Recover(signVec, idVec) - if err != nil { - t.Error(err) - } - if !s0.IsEqual(&s1) { - t.Error("Mismatch in recovered signature.") - } -} - -func testAdd(t *testing.T) { - t.Log("testAdd") - var sec1 SecretKey - var sec2 SecretKey - sec1.SetByCSPRNG() - sec2.SetByCSPRNG() - - pub1 := sec1.GetPublicKey() - pub2 := sec2.GetPublicKey() - - m := "test test" - sign1 := sec1.Sign(m) - sign2 := sec2.Sign(m) - - t.Log("sign1 :", sign1.GetHexString()) - sign1.Add(sign2) - t.Log("sign1 add:", sign1.GetHexString()) - pub1.Add(pub2) - if !sign1.Verify(pub1, m) { - t.Fail() - } -} - -func testPop(t *testing.T) { - t.Log("testPop") - var sec SecretKey - sec.SetByCSPRNG() - pop := sec.GetPop() - if !pop.VerifyPop(sec.GetPublicKey()) { - t.Errorf("Valid Pop does not verify") - } - sec.SetByCSPRNG() - if pop.VerifyPop(sec.GetPublicKey()) { - t.Errorf("Invalid Pop verifies") - } -} - -func testData(t *testing.T) { - t.Log("testData") - var sec1, sec2 SecretKey - sec1.SetByCSPRNG() - b := sec1.GetLittleEndian() - err := sec2.SetLittleEndian(b) - if err != nil { - t.Fatal(err) - } - if !sec1.IsEqual(&sec2) { - t.Error("SecretKey not same") - } - pub1 := sec1.GetPublicKey() - b = pub1.Serialize() - var pub2 PublicKey - err = pub2.Deserialize(b) - if err != nil { - t.Fatal(err) - } - if !pub1.IsEqual(&pub2) { - t.Error("PublicKey not same") - } - m := "doremi" - sign1 := sec1.Sign(m) - b = sign1.Serialize() - var sign2 Sign - err = sign2.Deserialize(b) - if err != nil { - t.Fatal(err) - } - if !sign1.IsEqual(&sign2) { - t.Error("Sign not same") - } -} - -func testSerializeToHexStr(t *testing.T) { - t.Log("testSerializeToHexStr") - var sec1, sec2 SecretKey - sec1.SetByCSPRNG() - s := sec1.SerializeToHexStr() - err := sec2.DeserializeHexStr(s) - if err != nil { - t.Fatal(err) - } - if !sec1.IsEqual(&sec2) { - t.Error("SecretKey not same") - } - pub1 := sec1.GetPublicKey() - s = pub1.SerializeToHexStr() - var pub2 PublicKey - err = pub2.DeserializeHexStr(s) - if err != nil { - t.Fatal(err) - } - if !pub1.IsEqual(&pub2) { - t.Error("PublicKey not same") - } - m := "doremi" - sign1 := sec1.Sign(m) - s = sign1.SerializeToHexStr() - var sign2 Sign - err = sign2.DeserializeHexStr(s) - if err != nil { - t.Fatal(err) - } - if !sign1.IsEqual(&sign2) { - t.Error("Sign not same") - } -} - -func testOrder(t *testing.T, c int) { - var curve string - var field string - if c == CurveFp254BNb { - curve = "16798108731015832284940804142231733909759579603404752749028378864165570215949" - field = "16798108731015832284940804142231733909889187121439069848933715426072753864723" - } else if c == CurveFp382_1 { - curve = "5540996953667913971058039301942914304734176495422447785042938606876043190415948413757785063597439175372845535461389" - field = "5540996953667913971058039301942914304734176495422447785045292539108217242186829586959562222833658991069414454984723" - } else if c == CurveFp382_2 { - curve = "5541245505022739011583672869577435255026888277144126952448297309161979278754528049907713682488818304329661351460877" - field = "5541245505022739011583672869577435255026888277144126952450651294188487038640194767986566260919128250811286032482323" - } else if c == BLS12_381 { - curve = "52435875175126190479447740508185965837690552500527637822603658699938581184513" - field = "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787" - } else { - t.Fatal("bad c", c) - } - s := GetCurveOrder() - if s != curve { - t.Errorf("bad curve order\n%s\n%s\n", s, curve) - } - s = GetFieldOrder() - if s != field { - t.Errorf("bad field order\n%s\n%s\n", s, field) - } -} - -func testDHKeyExchange(t *testing.T) { - var sec1, sec2 SecretKey - sec1.SetByCSPRNG() - sec2.SetByCSPRNG() - pub1 := sec1.GetPublicKey() - pub2 := sec2.GetPublicKey() - out1 := DHKeyExchange(&sec1, pub2) - out2 := DHKeyExchange(&sec2, pub1) - if !out1.IsEqual(&out2) { - t.Errorf("DH key is not equal") - } -} - -func testPairing(t *testing.T) { - var sec SecretKey - sec.SetByCSPRNG() - pub := sec.GetPublicKey() - m := "abc" - sig1 := sec.Sign(m) - sig2 := HashAndMapToSignature([]byte(m)) - if !VerifyPairing(sig1, sig2, pub) { - t.Errorf("VerifyPairing") - } -} - -func testAggregate(t *testing.T) { - var sec SecretKey - sec.SetByCSPRNG() - pub := sec.GetPublicKey() - msgTbl := []string{"abc", "def", "123"} - n := len(msgTbl) - sigVec := make([]*Sign, n) - for i := 0; i < n; i++ { - m := msgTbl[i] - sigVec[i] = sec.Sign(m) - } - aggSign := sigVec[0] - for i := 1; i < n; i++ { - aggSign.Add(sigVec[i]) - } - hashPt := HashAndMapToSignature([]byte(msgTbl[0])) - for i := 1; i < n; i++ { - hashPt.Add(HashAndMapToSignature([]byte(msgTbl[i]))) - } - if !VerifyPairing(aggSign, hashPt, pub) { - t.Errorf("aggregate2") - } -} - -func Hash(buf []byte) []byte { - if GetOpUnitSize() == 4 { - d := sha256.Sum256([]byte(buf)) - return d[:] - } else { - // use SHA512 if bitSize > 256 - d := sha512.Sum512([]byte(buf)) - return d[:] - } -} - -func testHash(t *testing.T) { - var sec SecretKey - sec.SetByCSPRNG() - pub := sec.GetPublicKey() - m := "abc" - h := Hash([]byte(m)) - sig1 := sec.Sign(m) - sig2 := sec.SignHash(h) - if !sig1.IsEqual(sig2) { - t.Errorf("SignHash") - } - if !sig1.Verify(pub, m) { - t.Errorf("sig1.Verify") - } - if !sig2.VerifyHash(pub, h) { - t.Errorf("sig2.VerifyHash") - } -} - -func testAggregateHashes(t *testing.T) { - n := 1000 - pubVec := make([]PublicKey, n) - sigVec := make([]*Sign, n) - h := make([][]byte, n) - for i := 0; i < n; i++ { - sec := new(SecretKey) - sec.SetByCSPRNG() - pubVec[i] = *sec.GetPublicKey() - m := fmt.Sprintf("abc-%d", i) - h[i] = Hash([]byte(m)) - sigVec[i] = sec.SignHash(h[i]) - } - // aggregate sig - sig := sigVec[0] - for i := 1; i < n; i++ { - sig.Add(sigVec[i]) - } - if !sig.VerifyAggregateHashes(pubVec, h) { - t.Errorf("sig.VerifyAggregateHashes") - } -} - -type SeqRead struct { -} - -func (self *SeqRead) Read(buf []byte) (int, error) { - n := len(buf) - for i := 0; i < n; i++ { - buf[i] = byte(i) - } - return n, nil -} - -func testReadRand(t *testing.T) { - s1 := new(SeqRead) - SetRandFunc(s1) - var sec SecretKey - sec.SetByCSPRNG() - buf := sec.GetLittleEndian() - fmt.Printf("(SeqRead) buf=%x\n", buf) - for i := 0; i < len(buf)-1; i++ { - // ommit buf[len(buf) - 1] because it may be masked - if buf[i] != byte(i) { - t.Fatal("buf") - } - } - SetRandFunc(rand.Reader) - sec.SetByCSPRNG() - buf = sec.GetLittleEndian() - fmt.Printf("(rand.Reader) buf=%x\n", buf) - SetRandFunc(nil) - sec.SetByCSPRNG() - buf = sec.GetLittleEndian() - fmt.Printf("(default) buf=%x\n", buf) -} - -func test(t *testing.T, c int) { - err := Init(c) - if err != nil { - t.Fatal(err) - } - unitN = GetOpUnitSize() - t.Logf("unitN=%d\n", unitN) - testReadRand(t) - testPre(t) - testRecoverSecretKey(t) - testAdd(t) - testSign(t) - testPop(t) - testData(t) - testStringConversion(t) - testOrder(t, c) - testDHKeyExchange(t) - testSerializeToHexStr(t) - testPairing(t) - testAggregate(t) - testHash(t) - testAggregateHashes(t) -} - -func TestMain(t *testing.T) { - t.Logf("GetMaxOpUnitSize() = %d\n", GetMaxOpUnitSize()) - t.Log("CurveFp254BNb") - test(t, CurveFp254BNb) - if GetMaxOpUnitSize() == 6 { - if GetFrUnitSize() == 6 { - t.Log("CurveFp382_1") - test(t, CurveFp382_1) - } - t.Log("BLS12_381") - test(t, BLS12_381) - } -} - -// Benchmarks - -var curve = CurveFp382_1 - -//var curve = CurveFp254BNb - -func BenchmarkPubkeyFromSeckey(b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - for n := 0; n < b.N; n++ { - sec.SetByCSPRNG() - b.StartTimer() - sec.GetPublicKey() - b.StopTimer() - } -} - -func BenchmarkSigning(b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - for n := 0; n < b.N; n++ { - sec.SetByCSPRNG() - b.StartTimer() - sec.Sign(strconv.Itoa(n)) - b.StopTimer() - } -} - -func BenchmarkValidation(b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - for n := 0; n < b.N; n++ { - sec.SetByCSPRNG() - pub := sec.GetPublicKey() - m := strconv.Itoa(n) - sig := sec.Sign(m) - b.StartTimer() - sig.Verify(pub, m) - b.StopTimer() - } -} - -func benchmarkDeriveSeckeyShare(k int, b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - sec.SetByCSPRNG() - msk := sec.GetMasterSecretKey(k) - var id ID - for n := 0; n < b.N; n++ { - err = id.SetLittleEndian([]byte{1, 2, 3, 4, 5, byte(n)}) - if err != nil { - b.Error(err) - } - b.StartTimer() - err := sec.Set(msk, &id) - b.StopTimer() - if err != nil { - b.Error(err) - } - } -} - -//func BenchmarkDeriveSeckeyShare100(b *testing.B) { benchmarkDeriveSeckeyShare(100, b) } -//func BenchmarkDeriveSeckeyShare200(b *testing.B) { benchmarkDeriveSeckeyShare(200, b) } -func BenchmarkDeriveSeckeyShare500(b *testing.B) { benchmarkDeriveSeckeyShare(500, b) } - -//func BenchmarkDeriveSeckeyShare1000(b *testing.B) { benchmarkDeriveSeckeyShare(1000, b) } - -func benchmarkRecoverSeckey(k int, b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - sec.SetByCSPRNG() - msk := sec.GetMasterSecretKey(k) - - // derive n shares - n := k - secVec := make([]SecretKey, n) - idVec := make([]ID, n) - for i := 0; i < n; i++ { - err := idVec[i].SetLittleEndian([]byte{1, 2, 3, 4, 5, byte(i)}) - if err != nil { - b.Error(err) - } - err = secVec[i].Set(msk, &idVec[i]) - if err != nil { - b.Error(err) - } - } - - // recover from secVec and idVec - var sec2 SecretKey - b.StartTimer() - for n := 0; n < b.N; n++ { - err := sec2.Recover(secVec, idVec) - if err != nil { - b.Errorf("%s\n", err) - } - } -} - -func BenchmarkRecoverSeckey100(b *testing.B) { benchmarkRecoverSeckey(100, b) } -func BenchmarkRecoverSeckey200(b *testing.B) { benchmarkRecoverSeckey(200, b) } -func BenchmarkRecoverSeckey500(b *testing.B) { benchmarkRecoverSeckey(500, b) } -func BenchmarkRecoverSeckey1000(b *testing.B) { benchmarkRecoverSeckey(1000, b) } - -func benchmarkRecoverSignature(k int, b *testing.B) { - b.StopTimer() - err := Init(curve) - if err != nil { - b.Fatal(err) - } - var sec SecretKey - sec.SetByCSPRNG() - msk := sec.GetMasterSecretKey(k) - - // derive n shares - n := k - idVec := make([]ID, n) - secVec := make([]SecretKey, n) - signVec := make([]Sign, n) - for i := 0; i < n; i++ { - err := idVec[i].SetLittleEndian([]byte{1, 2, 3, 4, 5, byte(i)}) - if err != nil { - b.Error(err) - } - err = secVec[i].Set(msk, &idVec[i]) - if err != nil { - b.Error(err) - } - signVec[i] = *secVec[i].Sign("test message") - } - - // recover signature - var sig Sign - b.StartTimer() - for n := 0; n < b.N; n++ { - err := sig.Recover(signVec, idVec) - if err != nil { - b.Error(err) - } - } -} - -func BenchmarkRecoverSignature100(b *testing.B) { benchmarkRecoverSignature(100, b) } -func BenchmarkRecoverSignature200(b *testing.B) { benchmarkRecoverSignature(200, b) } -func BenchmarkRecoverSignature500(b *testing.B) { benchmarkRecoverSignature(500, b) } -func BenchmarkRecoverSignature1000(b *testing.B) { benchmarkRecoverSignature(1000, b) } diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/callback.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/callback.go deleted file mode 100644 index ba73a5e15..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/callback.go +++ /dev/null @@ -1,12 +0,0 @@ -package bls - -/* -// exported from bls.go -unsigned int wrapReadRandGo(void *buf, unsigned int n); -int wrapReadRandCgo(void *self, void *buf, unsigned int n) -{ - (void)self; - return wrapReadRandGo(buf, n); -} -*/ -import "C" diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/config.h b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/config.h deleted file mode 100644 index 07e148137..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/config.h +++ /dev/null @@ -1,6 +0,0 @@ -#pragma -// use bn384 unless tags is specified -#ifndef MCLBN_FP_UNIT_SIZE - #define MCLBN_FP_UNIT_SIZE 6 -#endif - diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/dummy.cpp b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/dummy.cpp deleted file mode 100644 index a5103a1c5..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/dummy.cpp +++ /dev/null @@ -1,3 +0,0 @@ -// This is a dummy source file which forces cgo to use the C++ linker instead -// of the default C linker. We can therefore eliminate non-portable linker -// flags such as -lstdc++, which is likely to break on FreeBSD and OpenBSD. diff --git a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go deleted file mode 100644 index ca8d7f02b..000000000 --- a/vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go +++ /dev/null @@ -1,646 +0,0 @@ -package bls - -/* -#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 -#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 -#cgo !bn256,!bn384,!bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -#include -*/ -import "C" -import "fmt" -import "unsafe" - -// CurveFp254BNb -- 254 bit curve -const CurveFp254BNb = C.mclBn_CurveFp254BNb - -// CurveFp382_1 -- 382 bit curve 1 -const CurveFp382_1 = C.mclBn_CurveFp382_1 - -// CurveFp382_2 -- 382 bit curve 2 -const CurveFp382_2 = C.mclBn_CurveFp382_2 - -// BLS12_381 -const BLS12_381 = C.MCL_BLS12_381 - -// IoSerializeHexStr -const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR - -// GetFrUnitSize() -- -func GetFrUnitSize() int { - return int(C.MCLBN_FR_UNIT_SIZE) -} - -// GetFpUnitSize() -- -// same as GetMaxOpUnitSize() -func GetFpUnitSize() int { - return int(C.MCLBN_FP_UNIT_SIZE) -} - -// GetMaxOpUnitSize -- -func GetMaxOpUnitSize() int { - return int(C.MCLBN_FP_UNIT_SIZE) -} - -// GetOpUnitSize -- -// the length of Fr is GetOpUnitSize() * 8 bytes -func GetOpUnitSize() int { - return int(C.mclBn_getOpUnitSize()) -} - -// GetCurveOrder -- -// return the order of G1 -func GetCurveOrder() string { - buf := make([]byte, 1024) - // #nosec - n := C.mclBn_getCurveOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) - if n == 0 { - panic("implementation err. size of buf is small") - } - return string(buf[:n]) -} - -// GetFieldOrder -- -// return the characteristic of the field where a curve is defined -func GetFieldOrder() string { - buf := make([]byte, 1024) - // #nosec - n := C.mclBn_getFieldOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) - if n == 0 { - panic("implementation err. size of buf is small") - } - return string(buf[:n]) -} - -// Fr -- -type Fr struct { - v C.mclBnFr -} - -// getPointer -- -func (x *Fr) getPointer() (p *C.mclBnFr) { - // #nosec - return (*C.mclBnFr)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *Fr) Clear() { - // #nosec - C.mclBnFr_clear(x.getPointer()) -} - -// SetInt64 -- -func (x *Fr) SetInt64(v int64) { - // #nosec - C.mclBnFr_setInt(x.getPointer(), C.int64_t(v)) -} - -// SetString -- -func (x *Fr) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnFr_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnFr_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *Fr) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnFr_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnFr_deserialize %x", buf) - } - return nil -} - -// SetLittleEndian -- -func (x *Fr) SetLittleEndian(buf []byte) error { - // #nosec - err := C.mclBnFr_setLittleEndian(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnFr_setLittleEndian %x", err) - } - return nil -} - -// IsEqual -- -func (x *Fr) IsEqual(rhs *Fr) bool { - return C.mclBnFr_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *Fr) IsZero() bool { - return C.mclBnFr_isZero(x.getPointer()) == 1 -} - -// IsOne -- -func (x *Fr) IsOne() bool { - return C.mclBnFr_isOne(x.getPointer()) == 1 -} - -// SetByCSPRNG -- -func (x *Fr) SetByCSPRNG() { - err := C.mclBnFr_setByCSPRNG(x.getPointer()) - if err != 0 { - panic("err mclBnFr_setByCSPRNG") - } -} - -// SetHashOf -- -func (x *Fr) SetHashOf(buf []byte) bool { - // #nosec - return C.mclBnFr_setHashOf(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 0 -} - -// GetString -- -func (x *Fr) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnFr_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnFr_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *Fr) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnFr_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnFr_serialize") - } - return buf[:n] -} - -// FrNeg -- -func FrNeg(out *Fr, x *Fr) { - C.mclBnFr_neg(out.getPointer(), x.getPointer()) -} - -// FrInv -- -func FrInv(out *Fr, x *Fr) { - C.mclBnFr_inv(out.getPointer(), x.getPointer()) -} - -// FrAdd -- -func FrAdd(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrSub -- -func FrSub(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrMul -- -func FrMul(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrDiv -- -func FrDiv(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_div(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1 -- -type G1 struct { - v C.mclBnG1 -} - -// getPointer -- -func (x *G1) getPointer() (p *C.mclBnG1) { - // #nosec - return (*C.mclBnG1)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *G1) Clear() { - // #nosec - C.mclBnG1_clear(x.getPointer()) -} - -// SetString -- -func (x *G1) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnG1_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnG1_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *G1) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnG1_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnG1_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *G1) IsEqual(rhs *G1) bool { - return C.mclBnG1_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *G1) IsZero() bool { - return C.mclBnG1_isZero(x.getPointer()) == 1 -} - -// HashAndMapTo -- -func (x *G1) HashAndMapTo(buf []byte) error { - // #nosec - err := C.mclBnG1_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnG1_hashAndMapTo %x", err) - } - return nil -} - -// GetString -- -func (x *G1) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG1_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnG1_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *G1) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG1_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnG1_serialize") - } - return buf[:n] -} - -// G1Neg -- -func G1Neg(out *G1, x *G1) { - C.mclBnG1_neg(out.getPointer(), x.getPointer()) -} - -// G1Dbl -- -func G1Dbl(out *G1, x *G1) { - C.mclBnG1_dbl(out.getPointer(), x.getPointer()) -} - -// G1Add -- -func G1Add(out *G1, x *G1, y *G1) { - C.mclBnG1_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1Sub -- -func G1Sub(out *G1, x *G1, y *G1) { - C.mclBnG1_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1Mul -- -func G1Mul(out *G1, x *G1, y *Fr) { - C.mclBnG1_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1MulCT -- constant time (depending on bit lengh of y) -func G1MulCT(out *G1, x *G1, y *Fr) { - C.mclBnG1_mulCT(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2 -- -type G2 struct { - v C.mclBnG2 -} - -// getPointer -- -func (x *G2) getPointer() (p *C.mclBnG2) { - // #nosec - return (*C.mclBnG2)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *G2) Clear() { - // #nosec - C.mclBnG2_clear(x.getPointer()) -} - -// SetString -- -func (x *G2) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnG2_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnG2_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *G2) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnG2_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnG2_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *G2) IsEqual(rhs *G2) bool { - return C.mclBnG2_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *G2) IsZero() bool { - return C.mclBnG2_isZero(x.getPointer()) == 1 -} - -// HashAndMapTo -- -func (x *G2) HashAndMapTo(buf []byte) error { - // #nosec - err := C.mclBnG2_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnG2_hashAndMapTo %x", err) - } - return nil -} - -// GetString -- -func (x *G2) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG2_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnG2_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *G2) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG2_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnG2_serialize") - } - return buf[:n] -} - -// G2Neg -- -func G2Neg(out *G2, x *G2) { - C.mclBnG2_neg(out.getPointer(), x.getPointer()) -} - -// G2Dbl -- -func G2Dbl(out *G2, x *G2) { - C.mclBnG2_dbl(out.getPointer(), x.getPointer()) -} - -// G2Add -- -func G2Add(out *G2, x *G2, y *G2) { - C.mclBnG2_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2Sub -- -func G2Sub(out *G2, x *G2, y *G2) { - C.mclBnG2_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2Mul -- -func G2Mul(out *G2, x *G2, y *Fr) { - C.mclBnG2_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GT -- -type GT struct { - v C.mclBnGT -} - -// getPointer -- -func (x *GT) getPointer() (p *C.mclBnGT) { - // #nosec - return (*C.mclBnGT)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *GT) Clear() { - // #nosec - C.mclBnGT_clear(x.getPointer()) -} - -// SetInt64 -- -func (x *GT) SetInt64(v int64) { - // #nosec - C.mclBnGT_setInt(x.getPointer(), C.int64_t(v)) -} - -// SetString -- -func (x *GT) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnGT_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnGT_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *GT) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnGT_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnGT_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *GT) IsEqual(rhs *GT) bool { - return C.mclBnGT_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *GT) IsZero() bool { - return C.mclBnGT_isZero(x.getPointer()) == 1 -} - -// IsOne -- -func (x *GT) IsOne() bool { - return C.mclBnGT_isOne(x.getPointer()) == 1 -} - -// GetString -- -func (x *GT) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnGT_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnGT_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *GT) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnGT_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnGT_serialize") - } - return buf[:n] -} - -// GTNeg -- -func GTNeg(out *GT, x *GT) { - C.mclBnGT_neg(out.getPointer(), x.getPointer()) -} - -// GTInv -- -func GTInv(out *GT, x *GT) { - C.mclBnGT_inv(out.getPointer(), x.getPointer()) -} - -// GTAdd -- -func GTAdd(out *GT, x *GT, y *GT) { - C.mclBnGT_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTSub -- -func GTSub(out *GT, x *GT, y *GT) { - C.mclBnGT_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTMul -- -func GTMul(out *GT, x *GT, y *GT) { - C.mclBnGT_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTDiv -- -func GTDiv(out *GT, x *GT, y *GT) { - C.mclBnGT_div(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTPow -- -func GTPow(out *GT, x *GT, y *Fr) { - C.mclBnGT_pow(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// Pairing -- -func Pairing(out *GT, x *G1, y *G2) { - C.mclBn_pairing(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FinalExp -- -func FinalExp(out *GT, x *GT) { - C.mclBn_finalExp(out.getPointer(), x.getPointer()) -} - -// MillerLoop -- -func MillerLoop(out *GT, x *G1, y *G2) { - C.mclBn_millerLoop(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GetUint64NumToPrecompute -- -func GetUint64NumToPrecompute() int { - return int(C.mclBn_getUint64NumToPrecompute()) -} - -// PrecomputeG2 -- -func PrecomputeG2(Qbuf []uint64, Q *G2) { - // #nosec - C.mclBn_precomputeG2((*C.uint64_t)(unsafe.Pointer(&Qbuf[0])), Q.getPointer()) -} - -// PrecomputedMillerLoop -- -func PrecomputedMillerLoop(out *GT, P *G1, Qbuf []uint64) { - // #nosec - C.mclBn_precomputedMillerLoop(out.getPointer(), P.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Qbuf[0]))) -} - -// PrecomputedMillerLoop2 -- -func PrecomputedMillerLoop2(out *GT, P1 *G1, Q1buf []uint64, P2 *G1, Q2buf []uint64) { - // #nosec - C.mclBn_precomputedMillerLoop2(out.getPointer(), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0]))) -} - -// FrEvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func FrEvaluatePolynomial(y *Fr, c []Fr, x *Fr) error { - // #nosec - err := C.mclBn_FrEvaluatePolynomial(y.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_FrEvaluatePolynomial") - } - return nil -} - -// G1EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func G1EvaluatePolynomial(y *G1, c []G1, x *Fr) error { - // #nosec - err := C.mclBn_G1EvaluatePolynomial(y.getPointer(), (*C.mclBnG1)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_G1EvaluatePolynomial") - } - return nil -} - -// G2EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func G2EvaluatePolynomial(y *G2, c []G2, x *Fr) error { - // #nosec - err := C.mclBn_G2EvaluatePolynomial(y.getPointer(), (*C.mclBnG2)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_G2EvaluatePolynomial") - } - return nil -} - -// FrLagrangeInterpolation -- -func FrLagrangeInterpolation(out *Fr, xVec []Fr, yVec []Fr) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err FrLagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_FrLagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnFr)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err FrLagrangeInterpolation") - } - return nil -} - -// G1LagrangeInterpolation -- -func G1LagrangeInterpolation(out *G1, xVec []Fr, yVec []G1) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err G1LagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_G1LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG1)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err G1LagrangeInterpolation") - } - return nil -} - -// G2LagrangeInterpolation -- -func G2LagrangeInterpolation(out *G2, xVec []Fr, yVec []G2) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err G2LagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_G2LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG2)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err G2LagrangeInterpolation") - } - return nil -} diff --git a/vendor/github.com/dexon-foundation/bls/images/bls-go-alpine/Dockerfile b/vendor/github.com/dexon-foundation/bls/images/bls-go-alpine/Dockerfile deleted file mode 100644 index edd49eb4b..000000000 --- a/vendor/github.com/dexon-foundation/bls/images/bls-go-alpine/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:alpine -MAINTAINER Jimmy Hu - -# Install dependencies -RUN apk add --update-cache build-base gmp-dev openssl-dev git - -# Build bls library -RUN mkdir work ; cd work -RUN git clone --depth 1 git://github.com/dexon-foundation/mcl.git -RUN mkdir bls -COPY . bls/ -RUN cd bls ; make clean && make test_go DOCKER=alpine -j && cp lib/* /usr/lib/ diff --git a/vendor/github.com/dexon-foundation/bls/include/bls/bls.h b/vendor/github.com/dexon-foundation/bls/include/bls/bls.h deleted file mode 100644 index cb300bc49..000000000 --- a/vendor/github.com/dexon-foundation/bls/include/bls/bls.h +++ /dev/null @@ -1,275 +0,0 @@ -#pragma once -/** - @file - @brief C interface of bls.hpp - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -#ifdef BLS_SWAP_G - /* - error if BLS_SWAP_G is inconsistently used between library and exe - */ - #undef MCLBN_COMPILED_TIME_VAR - #define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE) + 100) -#endif - -#ifdef _MSC_VER - #ifdef BLS_DONT_EXPORT - #define BLS_DLL_API - #else - #ifdef BLS_DLL_EXPORT - #define BLS_DLL_API __declspec(dllexport) - #else - #define BLS_DLL_API __declspec(dllimport) - #endif - #endif - #ifndef BLS_NO_AUTOLINK - #if MCLBN_FP_UNIT_SIZE == 4 - #pragma comment(lib, "bls256.lib") - #elif (MCLBN_FP_UNIT_SIZE == 6) && (MCLBN_FR_UNIT_SIZE == 4) - #pragma comment(lib, "bls384_256.lib") - #elif (MCLBN_FP_UNIT_SIZE == 6) && (MCLBN_FR_UNIT_SIZE == 6) - #pragma comment(lib, "bls384.lib") - #endif - #endif -#elif defined(__EMSCRIPTEN__) && !defined(BLS_DONT_EXPORT) - #define BLS_DLL_API __attribute__((used)) -#elif defined(__wasm__) && !defined(BLS_DONT_EXPORT) - #define BLS_DLL_API __attribute__((visibility("default"))) -#else - #define BLS_DLL_API -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - mclBnFr v; -} blsId; - -typedef struct { - mclBnFr v; -} blsSecretKey; - -typedef struct { -#ifdef BLS_SWAP_G - mclBnG1 v; -#else - mclBnG2 v; -#endif -} blsPublicKey; - -typedef struct { -#ifdef BLS_SWAP_G - mclBnG2 v; -#else - mclBnG1 v; -#endif -} blsSignature; - -/* - initialize this library - call this once before using the other functions - @param curve [in] enum value defined in mcl/bn.h - @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, - which macro is used to make sure that the values - are the same when the library is built and used - @return 0 if success - @note blsInit() is thread safe and serialized if it is called simultaneously - but don't call it while using other functions. -*/ -BLS_DLL_API int blsInit(int curve, int compiledTimeVar); - -BLS_DLL_API void blsIdSetInt(blsId *id, int x); - -// sec = buf & (1 << bitLen(r)) - 1 -// if (sec >= r) sec &= (1 << (bitLen(r) - 1)) - 1 -// always return 0 -BLS_DLL_API int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize); -// return 0 if success (bufSize <= 64) else -1 -// set (buf mod r) to sec -BLS_DLL_API int blsSecretKeySetLittleEndianMod(blsSecretKey *sec, const void *buf, mclSize bufSize); - -BLS_DLL_API void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec); - -// calculate the has of m and sign the hash -BLS_DLL_API void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size); - -// return 1 if valid -BLS_DLL_API int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size); - -// return written byte size if success else 0 -BLS_DLL_API mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id); -BLS_DLL_API mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec); -BLS_DLL_API mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub); -BLS_DLL_API mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig); - -// return read byte size if success else 0 -BLS_DLL_API mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize); -BLS_DLL_API mclSize blsSecretKeyDeserialize(blsSecretKey *sec, const void *buf, mclSize bufSize); -BLS_DLL_API mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize); -BLS_DLL_API mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize); - -// return 1 if same else 0 -BLS_DLL_API int blsIdIsEqual(const blsId *lhs, const blsId *rhs); -BLS_DLL_API int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs); -BLS_DLL_API int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs); -BLS_DLL_API int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs); - -// return 0 if success -BLS_DLL_API int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id); -BLS_DLL_API int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id); - -BLS_DLL_API int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n); -BLS_DLL_API int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n); -BLS_DLL_API int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n); - -// add -BLS_DLL_API void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs); -BLS_DLL_API void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs); -BLS_DLL_API void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs); - -/* - verify whether a point of an elliptic curve has order r - This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 - @param doVerify [in] does not verify if zero(default 1) - Signature = G1, PublicKey = G2 -*/ -BLS_DLL_API void blsSignatureVerifyOrder(int doVerify); -BLS_DLL_API void blsPublicKeyVerifyOrder(int doVerify); -// deserialize under VerifyOrder(true) = deserialize under VerifyOrder(false) + IsValidOrder -BLS_DLL_API int blsSignatureIsValidOrder(const blsSignature *sig); -BLS_DLL_API int blsPublicKeyIsValidOrder(const blsPublicKey *pub); - -#ifndef BLS_MINIMUM_API - -/* - verify X == sY by checking e(X, sQ) = e(Y, Q) - @param X [in] - @param Y [in] - @param pub [in] pub = sQ - @return 1 if e(X, pub) = e(Y, Q) else 0 -*/ -BLS_DLL_API int blsVerifyPairing(const blsSignature *X, const blsSignature *Y, const blsPublicKey *pub); - -/* - sign the hash - use the low (bitSize of r) - 1 bit of h - return 0 if success else -1 - NOTE : return false if h is zero or c1 or -c1 value for BN254. see hashTest() in test/bls_test.hpp -*/ -BLS_DLL_API int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size); -// return 1 if valid -BLS_DLL_API int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size); - -/* - verify aggSig with pubVec[0, n) and hVec[0, n) - e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) - return 1 if valid - @note do not check duplication of hVec -*/ -BLS_DLL_API int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n); - -// sub -BLS_DLL_API void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs); -BLS_DLL_API void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs); -BLS_DLL_API void blsSignatureSub(blsSignature *sig, const blsSignature *rhs); - -// not thread safe version (old blsInit) -BLS_DLL_API int blsInitNotThreadSafe(int curve, int compiledTimeVar); - -BLS_DLL_API mclSize blsGetOpUnitSize(void); -// return strlen(buf) if success else 0 -BLS_DLL_API int blsGetCurveOrder(char *buf, mclSize maxBufSize); -BLS_DLL_API int blsGetFieldOrder(char *buf, mclSize maxBufSize); - -// return bytes for serialized G1(=Fp) -BLS_DLL_API int blsGetG1ByteSize(void); - -// return bytes for serialized Fr -BLS_DLL_API int blsGetFrByteSize(void); - -#ifdef BLS_SWAP_G -// get a generator of G1 -BLS_DLL_API void blsGetGeneratorOfG1(blsPublicKey *pub); -#else -// get a generator of G2 -BLS_DLL_API void blsGetGeneratorOfG2(blsPublicKey *pub); -#endif - -// return 0 if success -BLS_DLL_API int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize); -BLS_DLL_API int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize); - -/* - return strlen(buf) if success else 0 - buf is '\0' terminated -*/ -BLS_DLL_API mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id); -BLS_DLL_API mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id); - -// hash buf and set -BLS_DLL_API int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize); -#ifndef MCL_DONT_USE_CSPRNG -/* - set secretKey if system has /dev/urandom or CryptGenRandom - return 0 if success else -1 -*/ -BLS_DLL_API int blsSecretKeySetByCSPRNG(blsSecretKey *sec); -/* - set user-defined random function for setByCSPRNG - @param self [in] user-defined pointer - @param readFunc [in] user-defined function, - which writes random bufSize bytes to buf and returns bufSize if success else returns 0 - @note if self == 0 and readFunc == 0 then set default random function - @note not threadsafe -*/ -BLS_DLL_API void blsSetRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)); -#endif - -BLS_DLL_API void blsGetPop(blsSignature *sig, const blsSecretKey *sec); - -BLS_DLL_API int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub); -////////////////////////////////////////////////////////////////////////// -// the following apis will be removed - -// mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r -BLS_DLL_API int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize); -/* - return written byte size if success else 0 -*/ -BLS_DLL_API mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id); - -// return 0 if success -BLS_DLL_API int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize); -BLS_DLL_API int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize); -/* - return written byte size if success else 0 -*/ -BLS_DLL_API mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec); -/* - return strlen(buf) if success else 0 - buf is '\0' terminated -*/ -BLS_DLL_API mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); -BLS_DLL_API mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); -BLS_DLL_API int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize); -BLS_DLL_API mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub); -BLS_DLL_API int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize); -BLS_DLL_API mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig); - -/* - Diffie Hellman key exchange - out = sec * pub -*/ -BLS_DLL_API void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub); - -#endif // BLS_MINIMUM_API - -#ifdef __cplusplus -} -#endif diff --git a/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp b/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp deleted file mode 100644 index 741334555..000000000 --- a/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp +++ /dev/null @@ -1,534 +0,0 @@ -#pragma once -/** - @file - @brief BLS threshold signature on BN curve - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include -#include -#include - -namespace bls { - -// same value with IoMode of mcl/op.hpp -enum { - IoBin = 2, // binary number - IoDec = 10, // decimal number - IoHex = 16, // hexadecimal number - IoPrefix = 128, // append '0b'(bin) or '0x'(hex) - IoSerialize = 512, - IoFixedByteSeq = IoSerialize // fixed byte representation -}; - -/* - BLS signature - e : G2 x G1 -> Fp12 - Q in G2 ; fixed global parameter - H : {str} -> G1 - s : secret key - sQ ; public key - s H(m) ; signature of m - verify ; e(sQ, H(m)) = e(Q, s H(m)) -*/ - -/* - initialize this library - call this once before using the other method - @param curve [in] type of curve - @param compiledTimevar [in] use the default value - @note init() is not thread safe -*/ -inline void init(int curve = mclBn_CurveFp254BNb, int compiledTimeVar = MCLBN_COMPILED_TIME_VAR) -{ - if (blsInit(curve, compiledTimeVar) != 0) throw std::invalid_argument("blsInit"); -} -inline size_t getOpUnitSize() { return blsGetOpUnitSize(); } - -inline void getCurveOrder(std::string& str) -{ - str.resize(1024); - mclSize n = blsGetCurveOrder(&str[0], str.size()); - if (n == 0) throw std::runtime_error("blsGetCurveOrder"); - str.resize(n); -} -inline void getFieldOrder(std::string& str) -{ - str.resize(1024); - mclSize n = blsGetFieldOrder(&str[0], str.size()); - if (n == 0) throw std::runtime_error("blsGetFieldOrder"); - str.resize(n); -} -inline int getG1ByteSize() { return blsGetG1ByteSize(); } -inline int getFrByteSize() { return blsGetFrByteSize(); } - -namespace local { -/* - the value of secretKey and Id must be less than - r = 0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d - sizeof(uint64_t) * keySize byte -*/ -const size_t keySize = MCLBN_FP_UNIT_SIZE; -} - -class SecretKey; -class PublicKey; -class Signature; -class Id; - -typedef std::vector SecretKeyVec; -typedef std::vector PublicKeyVec; -typedef std::vector SignatureVec; -typedef std::vector IdVec; - -class Id { - blsId self_; - friend class PublicKey; - friend class SecretKey; - friend class Signature; -public: - Id(unsigned int id = 0) - { - blsIdSetInt(&self_, id); - } - bool operator==(const Id& rhs) const - { - return blsIdIsEqual(&self_, &rhs.self_) == 1; - } - bool operator!=(const Id& rhs) const { return !(*this == rhs); } - friend std::ostream& operator<<(std::ostream& os, const Id& id) - { - std::string str; - id.getStr(str, 16|IoPrefix); - return os << str; - } - friend std::istream& operator>>(std::istream& is, Id& id) - { - std::string str; - is >> str; - id.setStr(str, 16); - return is; - } - void getStr(std::string& str, int ioMode = 0) const - { - str.resize(1024); - size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); - if (n == 0) throw std::runtime_error("mclBnFr_getStr"); - str.resize(n); - } - void setStr(const std::string& str, int ioMode = 0) - { - int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); - if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); - } - bool isZero() const - { - return mclBnFr_isZero(&self_.v) == 1; - } - /* - set p[0, .., keySize) - @note the value must be less than r - */ - void set(const uint64_t *p) - { - setLittleEndian(p, local::keySize * sizeof(uint64_t)); - } - // bufSize is truncted/zero extended to keySize - void setLittleEndian(const void *buf, size_t bufSize) - { - mclBnFr_setLittleEndian(&self_.v, buf, bufSize); - } -}; - -/* - s ; secret key -*/ -class SecretKey { - blsSecretKey self_; -public: - bool operator==(const SecretKey& rhs) const - { - return blsSecretKeyIsEqual(&self_, &rhs.self_) == 1; - } - bool operator!=(const SecretKey& rhs) const { return !(*this == rhs); } - friend std::ostream& operator<<(std::ostream& os, const SecretKey& sec) - { - std::string str; - sec.getStr(str, 16|IoPrefix); - return os << str; - } - friend std::istream& operator>>(std::istream& is, SecretKey& sec) - { - std::string str; - is >> str; - sec.setStr(str); - return is; - } - void getStr(std::string& str, int ioMode = 0) const - { - str.resize(1024); - size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); - if (n == 0) throw std::runtime_error("mclBnFr_getStr"); - str.resize(n); - } - void setStr(const std::string& str, int ioMode = 0) - { - int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); - if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); - } - /* - initialize secretKey with random number - */ - void init() - { - int ret = blsSecretKeySetByCSPRNG(&self_); - if (ret != 0) throw std::runtime_error("blsSecretKeySetByCSPRNG"); - } - /* - set secretKey with p[0, .., keySize) and set id = 0 - @note the value must be less than r - */ - void set(const uint64_t *p) - { - setLittleEndian(p, local::keySize * sizeof(uint64_t)); - } - // bufSize is truncted/zero extended to keySize - void setLittleEndian(const void *buf, size_t bufSize) - { - mclBnFr_setLittleEndian(&self_.v, buf, bufSize); - } - // set hash of buf - void setHashOf(const void *buf, size_t bufSize) - { - int ret = mclBnFr_setHashOf(&self_.v, buf, bufSize); - if (ret != 0) throw std::runtime_error("mclBnFr_setHashOf"); - } - void getPublicKey(PublicKey& pub) const; - // constant time sign - // sign hash(m) - void sign(Signature& sig, const void *m, size_t size) const; - void sign(Signature& sig, const std::string& m) const - { - sign(sig, m.c_str(), m.size()); - } - // sign hashed value - void signHash(Signature& sig, const void *h, size_t size) const; - void signHash(Signature& sig, const std::string& h) const - { - signHash(sig, h.c_str(), h.size()); - } - /* - make Pop(Proof of Possesion) - pop = prv.sign(pub) - */ - void getPop(Signature& pop) const; - /* - make [s_0, ..., s_{k-1}] to prepare k-out-of-n secret sharing - */ - void getMasterSecretKey(SecretKeyVec& msk, size_t k) const - { - if (k <= 1) throw std::invalid_argument("getMasterSecretKey"); - msk.resize(k); - msk[0] = *this; - for (size_t i = 1; i < k; i++) { - msk[i].init(); - } - } - /* - set a secret key for id > 0 from msk - */ - void set(const SecretKeyVec& msk, const Id& id) - { - set(msk.data(), msk.size(), id); - } - /* - recover secretKey from k secVec - */ - void recover(const SecretKeyVec& secVec, const IdVec& idVec) - { - if (secVec.size() != idVec.size()) throw std::invalid_argument("SecretKey:recover"); - recover(secVec.data(), idVec.data(), idVec.size()); - } - /* - add secret key - */ - void add(const SecretKey& rhs); - - // the following methods are for C api - /* - the size of msk must be k - */ - void set(const SecretKey *msk, size_t k, const Id& id) - { - int ret = blsSecretKeyShare(&self_, &msk->self_, k, &id.self_); - if (ret != 0) throw std::runtime_error("blsSecretKeyShare"); - } - void recover(const SecretKey *secVec, const Id *idVec, size_t n) - { - int ret = blsSecretKeyRecover(&self_, &secVec->self_, &idVec->self_, n); - if (ret != 0) throw std::runtime_error("blsSecretKeyRecover:same id"); - } -}; - -/* - sQ ; public key -*/ -class PublicKey { - blsPublicKey self_; - friend class SecretKey; - friend class Signature; -public: - bool operator==(const PublicKey& rhs) const - { - return blsPublicKeyIsEqual(&self_, &rhs.self_) == 1; - } - bool operator!=(const PublicKey& rhs) const { return !(*this == rhs); } - friend std::ostream& operator<<(std::ostream& os, const PublicKey& pub) - { - std::string str; - pub.getStr(str, 16|IoPrefix); - return os << str; - } - friend std::istream& operator>>(std::istream& is, PublicKey& pub) - { - std::string str; - is >> str; - if (str != "0") { - // 1 - std::string t; -#ifdef BLS_SWAP_G - const int elemNum = 2; -#else - const int elemNum = 4; -#endif - for (int i = 0; i < elemNum; i++) { - is >> t; - str += ' '; - str += t; - } - } - pub.setStr(str, 16); - return is; - } - void getStr(std::string& str, int ioMode = 0) const - { - str.resize(1024); -#ifdef BLS_SWAP_G - size_t n = mclBnG1_getStr(&str[0], str.size(), &self_.v, ioMode); -#else - size_t n = mclBnG2_getStr(&str[0], str.size(), &self_.v, ioMode); -#endif - if (n == 0) throw std::runtime_error("PublicKey:getStr"); - str.resize(n); - } - void setStr(const std::string& str, int ioMode = 0) - { -#ifdef BLS_SWAP_G - int ret = mclBnG1_setStr(&self_.v, str.c_str(), str.size(), ioMode); -#else - int ret = mclBnG2_setStr(&self_.v, str.c_str(), str.size(), ioMode); -#endif - if (ret != 0) throw std::runtime_error("PublicKey:setStr"); - } - /* - set public for id from mpk - */ - void set(const PublicKeyVec& mpk, const Id& id) - { - set(mpk.data(), mpk.size(), id); - } - /* - recover publicKey from k pubVec - */ - void recover(const PublicKeyVec& pubVec, const IdVec& idVec) - { - if (pubVec.size() != idVec.size()) throw std::invalid_argument("PublicKey:recover"); - recover(pubVec.data(), idVec.data(), idVec.size()); - } - /* - add public key - */ - void add(const PublicKey& rhs) - { - blsPublicKeyAdd(&self_, &rhs.self_); - } - - // the following methods are for C api - void set(const PublicKey *mpk, size_t k, const Id& id) - { - int ret = blsPublicKeyShare(&self_, &mpk->self_, k, &id.self_); - if (ret != 0) throw std::runtime_error("blsPublicKeyShare"); - } - void recover(const PublicKey *pubVec, const Id *idVec, size_t n) - { - int ret = blsPublicKeyRecover(&self_, &pubVec->self_, &idVec->self_, n); - if (ret != 0) throw std::runtime_error("blsPublicKeyRecover"); - } -}; - -/* - s H(m) ; signature -*/ -class Signature { - blsSignature self_; - friend class SecretKey; -public: - bool operator==(const Signature& rhs) const - { - return blsSignatureIsEqual(&self_, &rhs.self_) == 1; - } - bool operator!=(const Signature& rhs) const { return !(*this == rhs); } - friend std::ostream& operator<<(std::ostream& os, const Signature& sig) - { - std::string str; - sig.getStr(str, 16|IoPrefix); - return os << str; - } - friend std::istream& operator>>(std::istream& is, Signature& sig) - { - std::string str; - is >> str; - if (str != "0") { - // 1 - std::string t; -#ifdef BLS_SWAP_G - const int elemNum = 4; -#else - const int elemNum = 2; -#endif - for (int i = 0; i < elemNum; i++) { - is >> t; - str += ' '; - str += t; - } - } - sig.setStr(str, 16); - return is; - } - void getStr(std::string& str, int ioMode = 0) const - { - str.resize(1024); -#ifdef BLS_SWAP_G - size_t n = mclBnG2_getStr(&str[0], str.size(), &self_.v, ioMode); -#else - size_t n = mclBnG1_getStr(&str[0], str.size(), &self_.v, ioMode); -#endif - if (n == 0) throw std::runtime_error("Signature:tgetStr"); - str.resize(n); - } - void setStr(const std::string& str, int ioMode = 0) - { -#ifdef BLS_SWAP_G - int ret = mclBnG2_setStr(&self_.v, str.c_str(), str.size(), ioMode); -#else - int ret = mclBnG1_setStr(&self_.v, str.c_str(), str.size(), ioMode); -#endif - if (ret != 0) throw std::runtime_error("Signature:setStr"); - } - bool verify(const PublicKey& pub, const void *m, size_t size) const - { - return blsVerify(&self_, &pub.self_, m, size) == 1; - } - bool verify(const PublicKey& pub, const std::string& m) const - { - return verify(pub, m.c_str(), m.size()); - } - bool verifyHash(const PublicKey& pub, const void *h, size_t size) const - { - return blsVerifyHash(&self_, &pub.self_, h, size) == 1; - } - bool verifyHash(const PublicKey& pub, const std::string& h) const - { - return verifyHash(pub, h.c_str(), h.size()); - } - bool verifyAggregatedHashes(const PublicKey *pubVec, const void *hVec, size_t sizeofHash, size_t n) const - { - return blsVerifyAggregatedHashes(&self_, &pubVec[0].self_, hVec, sizeofHash, n) == 1; - } - /* - verify self(pop) with pub - */ - bool verify(const PublicKey& pub) const - { - std::string str; - pub.getStr(str); - return verify(pub, str); - } - /* - recover sig from k sigVec - */ - void recover(const SignatureVec& sigVec, const IdVec& idVec) - { - if (sigVec.size() != idVec.size()) throw std::invalid_argument("Signature:recover"); - recover(sigVec.data(), idVec.data(), idVec.size()); - } - /* - add signature - */ - void add(const Signature& rhs) - { - blsSignatureAdd(&self_, &rhs.self_); - } - - // the following methods are for C api - void recover(const Signature* sigVec, const Id *idVec, size_t n) - { - int ret = blsSignatureRecover(&self_, &sigVec->self_, &idVec->self_, n); - if (ret != 0) throw std::runtime_error("blsSignatureRecover:same id"); - } -}; - -/* - make master public key [s_0 Q, ..., s_{k-1} Q] from msk -*/ -inline void getMasterPublicKey(PublicKeyVec& mpk, const SecretKeyVec& msk) -{ - const size_t n = msk.size(); - mpk.resize(n); - for (size_t i = 0; i < n; i++) { - msk[i].getPublicKey(mpk[i]); - } -} - -inline void SecretKey::getPublicKey(PublicKey& pub) const -{ - blsGetPublicKey(&pub.self_, &self_); -} -inline void SecretKey::sign(Signature& sig, const void *m, size_t size) const -{ - blsSign(&sig.self_, &self_, m, size); -} -inline void SecretKey::signHash(Signature& sig, const void *h, size_t size) const -{ - if (blsSignHash(&sig.self_, &self_, h, size) != 0) throw std::runtime_error("bad h"); -} -inline void SecretKey::getPop(Signature& pop) const -{ - PublicKey pub; - getPublicKey(pub); - std::string m; - pub.getStr(m); - sign(pop, m); -} - -/* - make pop from msk and mpk -*/ -inline void getPopVec(SignatureVec& popVec, const SecretKeyVec& msk) -{ - const size_t n = msk.size(); - popVec.resize(n); - for (size_t i = 0; i < n; i++) { - msk[i].getPop(popVec[i]); - } -} - -inline Signature operator+(const Signature& a, const Signature& b) { Signature r(a); r.add(b); return r; } -inline PublicKey operator+(const PublicKey& a, const PublicKey& b) { PublicKey r(a); r.add(b); return r; } -inline SecretKey operator+(const SecretKey& a, const SecretKey& b) { SecretKey r(a); r.add(b); return r; } - -} //bls diff --git a/vendor/github.com/dexon-foundation/bls/lib/.emptydir b/vendor/github.com/dexon-foundation/bls/lib/.emptydir deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/bls/mk.bat b/vendor/github.com/dexon-foundation/bls/mk.bat deleted file mode 100644 index 9bf8dd9e6..000000000 --- a/vendor/github.com/dexon-foundation/bls/mk.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -if "%1"=="-s" ( - echo use static lib - set CFLAGS=%CFLAGS% /DMCLBN_NO_AUTOLINK /DBLS_DONT_EXPORT -) else if "%1"=="-d" ( - echo use dynamic lib -) else ( - echo "mk (-s|-d) " - goto exit -) -set CFLAGS=%CFLAGS% -I../mcl/include -set SRC=%2 -set EXE=%SRC:.cpp=.exe% -set EXE=%EXE:.c=.exe% -set EXE=%EXE:test\=bin\% -set EXE=%EXE:sample\=bin\% -echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% -cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% - -:exit diff --git a/vendor/github.com/dexon-foundation/bls/mkdll.bat b/vendor/github.com/dexon-foundation/bls/mkdll.bat deleted file mode 100755 index 17e934f92..000000000 --- a/vendor/github.com/dexon-foundation/bls/mkdll.bat +++ /dev/null @@ -1,8 +0,0 @@ -rem @echo off - -call setvar.bat dll -echo make bls384.dll -cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/bls_c.obj src/bls_c.cpp -cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/fp.obj ../mcl/src/fp.cpp -lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c.obj obj/fp.obj %LDFLAGS% -cl /LD /MT obj/bls_c.obj obj/fp.obj %CFLAGS% /link /out:bin/bls384.dll %LDFLAGS% diff --git a/vendor/github.com/dexon-foundation/bls/mklib.bat b/vendor/github.com/dexon-foundation/bls/mklib.bat deleted file mode 100644 index 4a60d7196..000000000 --- a/vendor/github.com/dexon-foundation/bls/mklib.bat +++ /dev/null @@ -1,26 +0,0 @@ -@echo off -call ..\mcl\setvar.bat -if "%1"=="dll" ( - echo make dynamic library DLL -) else ( - echo make static library LIB -) -call setvar.bat - -if "%1"=="dll" ( - cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp /DBLS_NO_AUTOLINK - cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp /DBLS_NO_AUTOLINK - cl /c %CFLAGS% /Foobj/bls_c384_256.obj src/bls_c384_256.cpp /DBLS_NO_AUTOLINK - cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp - link /nologo /DLL /OUT:bin\bls256.dll obj\bls_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\bls256.lib - link /nologo /DLL /OUT:bin\bls384.dll obj\bls_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\bls384.lib - link /nologo /DLL /OUT:bin\bls384_256.dll obj\bls_c384_256.obj obj\fp.obj %LDFLAGS% /implib:lib\bls384_256.lib -) else ( - cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp - cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp - cl /c %CFLAGS% /Foobj/bls_c384_256.obj src/bls_c384_256.cpp - cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp /DMCLBN_DONT_EXPORT - lib /OUT:lib/bls256.lib /nodefaultlib obj/bls_c256.obj obj/fp.obj %LDFLAGS% - lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c384.obj obj/fp.obj %LDFLAGS% - lib /OUT:lib/bls384_256.lib /nodefaultlib obj/bls_c384_256.obj obj/fp.obj %LDFLAGS% -) diff --git a/vendor/github.com/dexon-foundation/bls/obj/.emptydir b/vendor/github.com/dexon-foundation/bls/obj/.emptydir deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/bls/readme.md b/vendor/github.com/dexon-foundation/bls/readme.md deleted file mode 100644 index b1efb3f36..000000000 --- a/vendor/github.com/dexon-foundation/bls/readme.md +++ /dev/null @@ -1,187 +0,0 @@ -[![Build Status](https://travis-ci.com/dexon-foundation/bls.png?branch=dev)](https://travis-ci.com/dexon-foundation/bls) - -# BLS threshold signature - -An implementation of BLS threshold signature - -# Installation Requirements - -Create a working directory (e.g., work) and clone the following repositories. -``` -mkdir work -cd work -git clone git://github.com/dexon-foundation/mcl.git -git clone git://github.com/dexon-foundation/bls.git -git clone git://github.com/herumi/cybozulib_ext ; for only Windows -``` - -# News -* (Break backward compatibility) The suffix `_dy` of library name is removed and bls\*.a requires libmcl.so set LD_LIBRARY_PATH to the directory. -* -tags option for Go bindings - * -tags bn256 - * -tags bn384\_256 - * -tags bn384 ; default mode -* Support swap of G1 and G2 - * `make BLS_SWAP_G=1` then G1 is assigned to PublicKey and G2 is assigned to Signature. - * golang binding does not support this feature yet. -* Build option without GMP - * `make MCL_USE_GMP=0` -* Build option without OpenSSL - * `make MCL_USE_OPENSSL=0` -* Build option to specify `mcl` directory - * `make MCL_DIR=` - -* (old) libbls.a for C++ interface(bls/bls.hpp) is removed -Link `lib/libbls256.a` or `lib/libbls384.a` to use `bls/bls.hpp` according to MCLBN_FP_UNIT_SIZE = 4 or 6. - -# Build and test for Linux -To make and test, run -``` -cd bls -make test -``` -To make sample programs, run -``` -make sample_test -``` - -# Build and test for Windows -1) make static library and use it -``` -mklib -mk -s test\bls_c384_test.cpp -bin\bls_c384_test.exe -``` - -2) make dynamic library and use it -``` -mklib dll -mk -d test\bls_c384_test.cpp -bin\bls_c384_test.exe -``` - -# Library -* libbls256.a/libbls256.so ; for BN254 compiled with MCLBN_FP_UNIT_SIZE=4 -* libbls384.a/libbls384.so ; for BN254/BN381_1/BLS12_381 compiled with MCLBN_FP_UNIT_SIZE=6 -* libbls384_256.a/libbls384_256.so ; for BN254/BLS12_381 compiled with MCLBN_FP_UNIT_SIZE=6 and MCLBN_FR_UNIT_SIZE=4 - -See `mcl/include/curve_type.h` for curve parameter - -# API - -## Basic API - -BLS signature -``` -e : G2 x G1 -> Fp12 ; optimal ate pairing over BN curve -Q in G2 ; fixed global parameter -H : {str} -> G1 -s in Fr: secret key -sQ in G2; public key -s H(m) in G1; signature of m -verify ; e(sQ, H(m)) = e(Q, s H(m)) -``` - -``` -void bls::init(); -``` - -Initialize this library. Call this once to use the other api. - -``` -void SecretKey::init(); -``` - -Initialize the instance of SecretKey. `s` is a random number. - -``` -void SecretKey::getPublicKey(PublicKey& pub) const; -``` - -Get public key `sQ` for the secret key `s`. - -``` -void SecretKey::sign(Sign& sign, const std::string& m) const; -``` - -Make sign `s H(m)` from message m. - -``` -bool Sign::verify(const PublicKey& pub, const std::string& m) const; -``` - -Verify sign with pub and m and return true if it is valid. - -``` -e(sQ, H(m)) == e(Q, s H(m)) -``` - -### Secret Sharing API - -``` -void SecretKey::getMasterSecretKey(SecretKeyVec& msk, size_t k) const; -``` - -Prepare k-out-of-n secret sharing for the secret key. -`msk[0]` is the original secret key `s` and `msk[i]` for i > 0 are random secret key. - -``` -void SecretKey::set(const SecretKeyVec& msk, const Id& id); -``` - -Make secret key f(id) from msk and id where f(x) = msk[0] + msk[1] x + ... + msk[k-1] x^{k-1}. - -You can make a public key `f(id)Q` from each secret key f(id) for id != 0 and sign a message. - -``` -void Sign::recover(const SignVec& signVec, const IdVec& idVec); -``` - -Collect k pair of sign `f(id) H(m)` and `id` for a message m and recover the original signature `s H(m)` for the secret key `s`. - -### PoP (Proof of Possesion) - -``` -void SecretKey::getPop(Sign& pop) const; -``` - -Sign pub and make a pop `s H(sQ)` - -``` -bool Sign::verify(const PublicKey& pub) const; -``` - -Verify a public key by pop. - -# Check the order of a point - -deserializer functions check whether a point has correct order and -the cost is heavy for especially G2. -If you do not want to check it, then call -``` -void blsSignatureVerifyOrder(false); -void blsPublicKeyVerifyOrder(false); -``` - -cf. subgroup attack - -# Go -``` -make test_go -``` - -# WASM(WebAssembly) -``` -mkdir ../bls-wasm -make bls-wasm -``` -see [BLS signature demo on browser](https://herumi.github.io/bls-wasm/bls-demo.html) - -# License - -modified new BSD License -http://opensource.org/licenses/BSD-3-Clause - -# Author - -MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/bls/release.props b/vendor/github.com/dexon-foundation/bls/release.props deleted file mode 100644 index 886ce6890..000000000 --- a/vendor/github.com/dexon-foundation/bls/release.props +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - MultiThreaded - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/sample/bls_smpl.cpp b/vendor/github.com/dexon-foundation/bls/sample/bls_smpl.cpp deleted file mode 100644 index e812cd500..000000000 --- a/vendor/github.com/dexon-foundation/bls/sample/bls_smpl.cpp +++ /dev/null @@ -1,168 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include -#include -#include -#include - -const std::string pubFile = "sample/publickey"; -const std::string secFile = "sample/secretkey"; -const std::string signFile = "sample/sign"; - -std::string makeName(const std::string& name, const bls::Id& id) -{ - const std::string suf = ".txt"; - if (id.isZero()) return name + suf; - std::ostringstream os; - os << name << '.' << id << suf; - return os.str(); -} - -template -void save(const std::string& file, const T& t, const bls::Id& id = 0) -{ - const std::string name = makeName(file, id); - std::ofstream ofs(name.c_str(), std::ios::binary); - if (!(ofs << t)) { - throw cybozu::Exception("can't save") << name; - } -} - -template -void load(T& t, const std::string& file, const bls::Id& id = 0) -{ - const std::string name = makeName(file, id); - std::ifstream ifs(name.c_str(), std::ios::binary); - if (!(ifs >> t)) { - throw cybozu::Exception("can't load") << name; - } -} - -int init() -{ - printf("make %s and %s files\n", secFile.c_str(), pubFile.c_str()); - bls::SecretKey sec; - sec.init(); - save(secFile, sec); - bls::PublicKey pub; - sec.getPublicKey(pub); - save(pubFile, pub); - return 0; -} - -int sign(const std::string& m, int id) -{ - printf("sign message `%s` by id=%d\n", m.c_str(), id); - bls::SecretKey sec; - load(sec, secFile, id); - bls::Signature s; - sec.sign(s, m); - save(signFile, s, id); - return 0; -} - -int verify(const std::string& m, int id) -{ - printf("verify message `%s` by id=%d\n", m.c_str(), id); - bls::PublicKey pub; - load(pub, pubFile, id); - bls::Signature s; - load(s, signFile, id); - if (s.verify(pub, m)) { - puts("verify ok"); - return 0; - } else { - puts("verify err"); - return 1; - } -} - -int share(size_t n, size_t k) -{ - printf("%d-out-of-%d threshold sharing\n", (int)k, (int)n); - bls::SecretKey sec; - load(sec, secFile); - bls::SecretKeyVec msk; - sec.getMasterSecretKey(msk, k); - bls::SecretKeyVec secVec(n); - bls::IdVec ids(n); - for (size_t i = 0; i < n; i++) { - int id = i + 1; - ids[i] = id; - secVec[i].set(msk, id); - } - for (size_t i = 0; i < n; i++) { - save(secFile, secVec[i], ids[i]); - bls::PublicKey pub; - secVec[i].getPublicKey(pub); - save(pubFile, pub, ids[i]); - } - return 0; -} - -int recover(const bls::IdVec& ids) -{ - printf("recover from"); - for (size_t i = 0; i < ids.size(); i++) { - std::cout << ' ' << ids[i]; - } - printf("\n"); - bls::SignatureVec sigVec(ids.size()); - for (size_t i = 0; i < sigVec.size(); i++) { - load(sigVec[i], signFile, ids[i]); - } - bls::Signature s; - s.recover(sigVec, ids); - save(signFile, s); - return 0; -} - -int main(int argc, char *argv[]) - try -{ - bls::init(); // use BN254 - - std::string mode; - std::string m; - size_t n; - size_t k; - int id; - bls::IdVec ids; - - cybozu::Option opt; - opt.appendParam(&mode, "init|sign|verify|share|recover"); - opt.appendOpt(&n, 10, "n", ": k-out-of-n threshold"); - opt.appendOpt(&k, 3, "k", ": k-out-of-n threshold"); - opt.appendOpt(&m, "", "m", ": message to be signed"); - opt.appendOpt(&id, 0, "id", ": id of secretKey"); - opt.appendVec(&ids, "ids", ": select k id in [0, n). this option should be last"); - opt.appendHelp("h"); - if (!opt.parse(argc, argv)) { - goto ERR_EXIT; - } - - if (mode == "init") { - return init(); - } else if (mode == "sign") { - if (m.empty()) goto ERR_EXIT; - return sign(m, id); - } else if (mode == "verify") { - if (m.empty()) goto ERR_EXIT; - return verify(m, id); - } else if (mode == "share") { - return share(n, k); - } else if (mode == "recover") { - if (ids.empty()) { - fprintf(stderr, "use -ids option. ex. share -ids 1 3 5\n"); - goto ERR_EXIT; - } - return recover(ids); - } else { - fprintf(stderr, "bad mode %s\n", mode.c_str()); - } -ERR_EXIT: - opt.usage(); - return 1; -} catch (std::exception& e) { - fprintf(stderr, "ERR %s\n", e.what()); - return 1; -} diff --git a/vendor/github.com/dexon-foundation/bls/setvar.bat b/vendor/github.com/dexon-foundation/bls/setvar.bat deleted file mode 100755 index 0ff286ab8..000000000 --- a/vendor/github.com/dexon-foundation/bls/setvar.bat +++ /dev/null @@ -1,6 +0,0 @@ -@echo off -call ..\mcl\setvar.bat -set CFLAGS=%CFLAGS% /I ..\mcl\include /I ./ -set LDFLAGS=%LDFLAGS% /LIBPATH:..\mcl\lib -echo CFLAGS=%CFLAGS% -echo LDFLAGS=%LDFLAGS% diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp b/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp deleted file mode 100644 index a9f3412ea..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include "bls_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp b/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp deleted file mode 100644 index d28f8547b..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#include "bls_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c384_256.cpp b/vendor/github.com/dexon-foundation/bls/src/bls_c384_256.cpp deleted file mode 100644 index 3dcb3e7d7..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/bls_c384_256.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 4 -#include "bls_c_impl.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp b/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp deleted file mode 100644 index b38c1ad06..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp +++ /dev/null @@ -1,614 +0,0 @@ -#define MCLBN_DONT_EXPORT -#define BLS_DLL_EXPORT - -#include - -#if 1 -#include "mcl/impl/bn_c_impl.hpp" -#else -#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 -#include -#else - #error "not supported size" -#endif -#include -using namespace mcl::bn; -inline Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } -inline const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } - -inline G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } -inline const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } - -inline G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } -inline const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } - -inline Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } -inline const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } - -inline Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } -inline const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } -#endif - -void Gmul(G1& z, const G1& x, const Fr& y) { G1::mul(z, x, y); } -void Gmul(G2& z, const G2& x, const Fr& y) { G2::mul(z, x, y); } -void GmulCT(G1& z, const G1& x, const Fr& y) { G1::mulCT(z, x, y); } -void GmulCT(G2& z, const G2& x, const Fr& y) { G2::mulCT(z, x, y); } - -/* - BLS signature - e : G1 x G2 -> GT - Q in G2 ; fixed global parameter - H : {str} -> G1 - s : secret key - sQ ; public key - s H(m) ; signature of m - verify ; e(sQ, H(m)) = e(Q, s H(m)) - - swap G1 and G2 if BLS_SWAP_G is defined - @note the current implementation does not support precomputed miller loop -*/ - -#ifdef BLS_SWAP_G -static G1 g_P; -inline const G1& getBasePoint() { return g_P; } -#else -static G2 g_Q; -const size_t maxQcoeffN = 128; -static mcl::FixedArray g_Qcoeff; // precomputed Q -inline const G2& getBasePoint() { return g_Q; } -inline const mcl::FixedArray& getQcoeff() { return g_Qcoeff; } -#endif - -int blsInitNotThreadSafe(int curve, int compiledTimeVar) -{ - if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { - return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); - } - const mcl::CurveParam& cp = mcl::getCurveParam(curve); - bool b; - initPairing(&b, cp); - if (!b) return -1; - -#ifdef BLS_SWAP_G - mapToG1(&b, g_P, 1); -#else - - if (curve == MCL_BN254) { - const char *Qx_BN254 = "11ccb44e77ac2c5dc32a6009594dbe331ec85a61290d6bbac8cc7ebb2dceb128 f204a14bbdac4a05be9a25176de827f2e60085668becdd4fc5fa914c9ee0d9a"; - const char *Qy_BN254 = "7c13d8487903ee3c1c5ea327a3a52b6cc74796b1760d5ba20ed802624ed19c8 8f9642bbaacb73d8c89492528f58932f2de9ac3e80c7b0e41f1a84f1c40182"; - g_Q.x.setStr(&b, Qx_BN254, 16); - g_Q.y.setStr(&b, Qy_BN254, 16); - g_Q.z = 1; - } else { - mapToG2(&b, g_Q, 1); - } - if (!b) return -100; - if (curve == MCL_BN254) { - #include "./qcoeff-bn254.hpp" - g_Qcoeff.resize(BN::param.precomputedQcoeffSize); - assert(g_Qcoeff.size() == CYBOZU_NUM_OF_ARRAY(QcoeffTblBN254)); - for (size_t i = 0; i < g_Qcoeff.size(); i++) { - Fp6& x6 = g_Qcoeff[i]; - for (size_t j = 0; j < 6; j++) { - Fp& x = x6.getFp0()[j]; - mcl::fp::Unit *p = const_cast(x.getUnit()); - for (size_t k = 0; k < 4; k++) { - p[k] = QcoeffTblBN254[i][j][k]; - } - } - } - } else { - precomputeG2(&b, g_Qcoeff, getBasePoint()); - } -#endif - if (!b) return -101; - return 0; -} - -#ifdef __EMSCRIPTEN__ -extern "C" BLS_DLL_API void *blsMalloc(size_t n) -{ - return malloc(n); -} -extern "C" BLS_DLL_API void blsFree(void *p) -{ - free(p); -} -#endif - -#if !defined(__EMSCRIPTEN__) && !defined(__wasm__) - #if defined(CYBOZU_CPP_VERSION) && CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 - #include - #define USE_STD_MUTEX - #else - #include - #define USE_CYBOZU_MUTEX - #endif -#endif - -int blsInit(int curve, int compiledTimeVar) -{ - int ret = 0; -#ifdef USE_STD_MUTEX - static std::mutex m; - std::lock_guard lock(m); -#elif defined(USE_CYBOZU_MUTEX) - static cybozu::Mutex m; - cybozu::AutoLock lock(m); -#endif - static int g_curve = -1; - if (g_curve != curve) { - ret = blsInitNotThreadSafe(curve, compiledTimeVar); - g_curve = curve; - } - return ret; -} - -static inline const mclBnG1 *cast(const G1* x) { return (const mclBnG1*)x; } -static inline const mclBnG2 *cast(const G2* x) { return (const mclBnG2*)x; } - -void blsIdSetInt(blsId *id, int x) -{ - *cast(&id->v) = x; -} - -int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize) -{ - cast(&sec->v)->setArrayMask((const char *)buf, bufSize); - return 0; -} -int blsSecretKeySetLittleEndianMod(blsSecretKey *sec, const void *buf, mclSize bufSize) -{ - bool b; - cast(&sec->v)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); - return b ? 0 : -1; -} - -void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec) -{ - Gmul(*cast(&pub->v), getBasePoint(), *cast(&sec->v)); -} - -void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size) -{ -#ifdef BLS_SWAP_G - G2 Hm; - hashAndMapToG2(Hm, m, size); -#else - G1 Hm; - hashAndMapToG1(Hm, m, size); -#endif - GmulCT(*cast(&sig->v), Hm, *cast(&sec->v)); -} - -#ifdef BLS_SWAP_G -/* - e(P, sHm) == e(sP, Hm) - <=> finalExp(ML(P, sHm) * e(-sP, Hm)) == 1 -*/ -bool isEqualTwoPairings(const G2& sHm, const G1& sP, const G2& Hm) -{ - GT e1, e2; - millerLoop(e1, getBasePoint(), sHm); - G1 neg_sP; - G1::neg(neg_sP, sP); - millerLoop(e2, neg_sP, Hm); - e1 *= e2; - finalExp(e1, e1); - return e1.isOne(); -} -#else -/* - e(P1, Q1) == e(P2, Q2) - <=> finalExp(ML(P1, Q1)) == finalExp(ML(P2, Q2)) - <=> finalExp(ML(P1, Q1) / ML(P2, Q2)) == 1 - <=> finalExp(ML(P1, Q1) * ML(-P2, Q2)) == 1 - Q1 is precomputed -*/ -bool isEqualTwoPairings(const G1& P1, const Fp6* Q1coeff, const G1& P2, const G2& Q2) -{ - GT e; - precomputedMillerLoop2mixed(e, P2, Q2, -P1, Q1coeff); - finalExp(e, e); - return e.isOne(); -} -#endif - -int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size) -{ -#ifdef BLS_SWAP_G - G2 Hm; - hashAndMapToG2(Hm, m, size); - return isEqualTwoPairings(*cast(&sig->v), *cast(&pub->v), Hm); -#else - G1 Hm; - hashAndMapToG1(Hm, m, size); - /* - e(sHm, Q) = e(Hm, sQ) - e(sig, Q) = e(Hm, pub) - */ - return isEqualTwoPairings(*cast(&sig->v), getQcoeff().data(), Hm, *cast(&pub->v)); -#endif -} - -mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id) -{ - return cast(&id->v)->serialize(buf, maxBufSize); -} - -mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec) -{ - return cast(&sec->v)->serialize(buf, maxBufSize); -} - -mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub) -{ - return cast(&pub->v)->serialize(buf, maxBufSize); -} - -mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig) -{ - return cast(&sig->v)->serialize(buf, maxBufSize); -} - -mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize) -{ - return cast(&id->v)->deserialize(buf, bufSize); -} - -mclSize blsSecretKeyDeserialize(blsSecretKey *sec, const void *buf, mclSize bufSize) -{ - return cast(&sec->v)->deserialize(buf, bufSize); -} - -mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize) -{ - return cast(&pub->v)->deserialize(buf, bufSize); -} - -mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize) -{ - return cast(&sig->v)->deserialize(buf, bufSize); -} - -int blsIdIsEqual(const blsId *lhs, const blsId *rhs) -{ - return *cast(&lhs->v) == *cast(&rhs->v); -} - -int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs) -{ - return *cast(&lhs->v) == *cast(&rhs->v); -} - -int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs) -{ - return *cast(&lhs->v) == *cast(&rhs->v); -} - -int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs) -{ - return *cast(&lhs->v) == *cast(&rhs->v); -} - -int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(&sec->v), cast(&msk->v), k, *cast(&id->v)); - return b ? 0 : -1; -} - -int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(&pub->v), cast(&mpk->v), k, *cast(&id->v)); - return b ? 0 : -1; -} - -int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(&sec->v), cast(&idVec->v), cast(&secVec->v), n); - return b ? 0 : -1; -} - -int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(&pub->v), cast(&idVec->v), cast(&pubVec->v), n); - return b ? 0 : -1; -} - -int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(&sig->v), cast(&idVec->v), cast(&sigVec->v), n); - return b ? 0 : -1; -} - -void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs) -{ - *cast(&sec->v) += *cast(&rhs->v); -} - -void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs) -{ - *cast(&pub->v) += *cast(&rhs->v); -} - -void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs) -{ - *cast(&sig->v) += *cast(&rhs->v); -} - -void blsSignatureVerifyOrder(int doVerify) -{ -#ifdef BLS_SWAP_G - verifyOrderG2(doVerify != 0); -#else - verifyOrderG1(doVerify != 0); -#endif -} -void blsPublicKeyVerifyOrder(int doVerify) -{ -#ifdef BLS_SWAP_G - verifyOrderG1(doVerify != 0); -#else - verifyOrderG2(doVerify != 0); -#endif -} -int blsSignatureIsValidOrder(const blsSignature *sig) -{ - return cast(&sig->v)->isValidOrder(); -} -int blsPublicKeyIsValidOrder(const blsPublicKey *pub) -{ - return cast(&pub->v)->isValidOrder(); -} - -#ifndef BLS_MINIMUM_API -template -inline bool toG(G& Hm, const void *h, mclSize size) -{ - Fp t; - t.setArrayMask((const char *)h, size); - bool b; -#ifdef BLS_SWAP_G - BN::mapToG2(&b, Hm, Fp2(t, 0)); -#else - BN::mapToG1(&b, Hm, t); -#endif - return b; -} - -int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n) -{ - if (n == 0) return 0; - GT e1, e2; - const char *ph = (const char*)hVec; -#ifdef BLS_SWAP_G - millerLoop(e1, getBasePoint(), -*cast(&aggSig->v)); - G2 h; - if (!toG(h, &ph[0], sizeofHash)) return 0; - BN::millerLoop(e2, *cast(&pubVec[0].v), h); - e1 *= e2; - for (size_t i = 1; i < n; i++) { - if (!toG(h, &ph[i * sizeofHash], sizeofHash)) return 0; - millerLoop(e2, *cast(&pubVec[i].v), h); - e1 *= e2; - } -#else - /* - e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) - <=> finalExp(ML(-aggSig, Q) * prod_i ML(hVec[i], pubVec[i])) == 1 - */ - BN::precomputedMillerLoop(e1, -*cast(&aggSig->v), g_Qcoeff.data()); - G1 h; - if (!toG(h, &ph[0], sizeofHash)) return 0; - BN::millerLoop(e2, h, *cast(&pubVec[0].v)); - e1 *= e2; - for (size_t i = 1; i < n; i++) { - if (!toG(h, &ph[i * sizeofHash], sizeofHash)) return 0; - BN::millerLoop(e2, h, *cast(&pubVec[i].v)); - e1 *= e2; - } -#endif - BN::finalExp(e1, e1); - return e1.isOne(); -} - -int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size) -{ -#ifdef BLS_SWAP_G - G2 Hm; -#else - G1 Hm; -#endif - if (!toG(Hm, h, size)) return -1; - GmulCT(*cast(&sig->v), Hm, *cast(&sec->v)); - return 0; -} - -int blsVerifyPairing(const blsSignature *X, const blsSignature *Y, const blsPublicKey *pub) -{ -#ifdef BLS_SWAP_G - return isEqualTwoPairings(*cast(&X->v), *cast(&pub->v), *cast(&Y->v)); -#else - return isEqualTwoPairings(*cast(&X->v), getQcoeff().data(), *cast(&Y->v), *cast(&pub->v)); -#endif -} - -int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size) -{ - blsSignature Hm; - if (!toG(*cast(&Hm.v), h, size)) return 0; - return blsVerifyPairing(sig, &Hm, pub); -} - -void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs) -{ - *cast(&sec->v) -= *cast(&rhs->v); -} - -void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs) -{ - *cast(&pub->v) -= *cast(&rhs->v); -} - -void blsSignatureSub(blsSignature *sig, const blsSignature *rhs) -{ - *cast(&sig->v) -= *cast(&rhs->v); -} - -mclSize blsGetOpUnitSize() // FpUint64Size -{ - return Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); -} - -int blsGetCurveOrder(char *buf, mclSize maxBufSize) -{ - return (int)Fr::getModulo(buf, maxBufSize); -} - -int blsGetFieldOrder(char *buf, mclSize maxBufSize) -{ - return (int)Fp::getModulo(buf, maxBufSize); -} - -int blsGetG1ByteSize() -{ - return (int)Fp::getByteSize(); -} - -int blsGetFrByteSize() -{ - return (int)Fr::getByteSize(); -} - -#ifdef BLS_SWAP_G -void blsGetGeneratorOfG1(blsPublicKey *pub) -{ - *cast(&pub->v) = getBasePoint(); -} -#else -void blsGetGeneratorOfG2(blsPublicKey *pub) -{ - *cast(&pub->v) = getBasePoint(); -} -#endif - -int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize) -{ - return cast(&id->v)->deserialize(buf, bufSize, 10) > 0 ? 0 : -1; -} -int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize) -{ - return cast(&id->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; -} - -int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize) -{ - cast(&id->v)->setArrayMask((const char *)buf, bufSize); - return 0; -} - -mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id) -{ - return cast(&id->v)->getStr(buf, maxBufSize, 10); -} - -mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id) -{ - return cast(&id->v)->getStr(buf, maxBufSize, 16); -} - -int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize) -{ - cast(&sec->v)->setHashOf(buf, bufSize); - return 0; -} - -#ifndef MCL_DONT_USE_CSPRNG -int blsSecretKeySetByCSPRNG(blsSecretKey *sec) -{ - bool b; - cast(&sec->v)->setByCSPRNG(&b); - return b ? 0 : -1; -} -void blsSetRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)) -{ - mcl::fp::RandGen::setRandFunc(self, readFunc); -} -#endif - -void blsGetPop(blsSignature *sig, const blsSecretKey *sec) -{ - blsPublicKey pub; - blsGetPublicKey(&pub, sec); - char buf[1024]; - mclSize n = cast(&pub.v)->serialize(buf, sizeof(buf)); - assert(n); - blsSign(sig, sec, buf, n); -} - -int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub) -{ - char buf[1024]; - mclSize n = cast(&pub->v)->serialize(buf, sizeof(buf)); - if (n == 0) return 0; - return blsVerify(sig, pub, buf, n); -} - -mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id) -{ - return cast(&id->v)->serialize(buf, maxBufSize); -} -int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize) -{ - return cast(&sec->v)->deserialize(buf, bufSize, 10) > 0 ? 0 : -1; -} -int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize) -{ - return cast(&sec->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; -} -mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec) -{ - return cast(&sec->v)->serialize(buf, maxBufSize); -} -mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) -{ - return cast(&sec->v)->getStr(buf, maxBufSize, 10); -} -mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) -{ - return cast(&sec->v)->getStr(buf, maxBufSize, 16); -} -int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize) -{ - return cast(&pub->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; -} -mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub) -{ - return cast(&pub->v)->getStr(buf, maxBufSize, 16); -} -int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize) -{ - return cast(&sig->v)->deserialize(buf, bufSize, 16) > 0 ? 0 : -1; -} -mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig) -{ - return cast(&sig->v)->getStr(buf, maxBufSize, 16); -} -void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub) -{ - GmulCT(*cast(&out->v), *cast(&pub->v), *cast(&sec->v)); -} - -#endif - diff --git a/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj b/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj deleted file mode 100644 index b78c97919..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj +++ /dev/null @@ -1,92 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {1DBB979A-C212-45CD-9563-446A96F87F71} - Win32Proj - ec_test - - - - StaticLibrary - true - v140 - MultiByte - - - StaticLibrary - false - v140 - true - MultiByte - - - - - - - - - - - - - - - - - true - .lib - $(SolutionDir)lib\ - - - false - .lib - $(SolutionDir)lib\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp b/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp deleted file mode 100644 index 18d169568..000000000 --- a/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp +++ /dev/null @@ -1,564 +0,0 @@ -#if MCL_SIZEOF_UNIT == 8 -static const uint64_t QcoeffTblBN254[][6][4] = { - { - {0x8c5c1b842e501310ull,0x6a418cdaced77710ull,0xf5ad725dd0d9a5ffull,0x012d501f32362f48ull,}, - {0xb8a8a8c11e51dc62ull,0xeaeb87e0f25a8611ull,0x9ba8738e4483d511ull,0x0664a4e36d64379eull,}, - {0x4a5af38c0aa5930aull,0x189fef61a74c388dull,0x83cc3225c7748527ull,0x2107491582310dc6ull,}, - {0x43aef621120a524aull,0x359d06a56e339486ull,0xdf5ab35e2222d9b1ull,0x20968bac939743acull,}, - {0xe8e4c0bb65cd67b8ull,0x255a0859bc55ff2cull,0xf1c57d1da3c060c6ull,0x138d83468f42cc0eull,}, - {0xdf985e5f768c149cull,0xa059c65c4b5da3ffull,0xed3d38d9f77bb968ull,0x02281f01255a850cull,}, - }, - { - {0xe3f53d8cfb4866a0ull,0xa0f9a16a21c159aaull,0x647fc210c7edf3a9ull,0x0db92f588c73caf9ull,}, - {0x6e9349b777bc2cf1ull,0x4fd987eb22e2469cull,0x666644a8e61b0a0eull,0x02f5bf9aae96c0deull,}, - {0xd5fd6288342479ebull,0x74022b671c6c8d8eull,0xda32d1b497cac7b2ull,0x0abecf35a19b5c7eull,}, - {0x1500891565b5f9aaull,0x4b7ce141cd7f4361ull,0xadf3447c534846c1ull,0x078b36a30d45de5full,}, - {0x37f172cff76e4b77ull,0x696d093b3ee37e4aull,0x2193797b7da56c6eull,0x1f5fc9efcbbb93e7ull,}, - {0x4c7d799b765b8f44ull,0x7adfd285e906edd8ull,0x79d68eaaf88a0885ull,0x20707d672be892cbull,}, - }, - { - {0x84bbf3849c691e74ull,0xeeb90e1efc3e3436ull,0xd9d9bb6257bf19e4ull,0x1b37ef04ea7d6f85ull,}, - {0xa6bdbbe0895ba12aull,0x58cade2ad0f1aa84ull,0xe0bb325678a2c748ull,0x23d1992e977c788cull,}, - {0x44559f0b0f4bb2ccull,0xe61b479bc88980eeull,0x2a70aa9df3e28c92ull,0x18039bee97722b74ull,}, - {0x9e5667da3db8e9e6ull,0x826ba07eb28c31f8ull,0x3f8b4eeb463d6923ull,0x1af85c2b10d3a2f0ull,}, - {0x8783f372684ea930ull,0x1aa0d9e436f41ea7ull,0xc84a3fc56af9f624ull,0x0d02698756cd5a2cull,}, - {0xe47407ede7b7c2afull,0x7d665c59e37ee7a7ull,0x542b91f12e0fa2a7ull,0x2084e73dc21f415eull,}, - }, - { - {0x2aebe318f3d167c6ull,0x5a2b2364b3abc614ull,0x31b2cdfd847e0053ull,0x04f0f63eed2a2f8cull,}, - {0x0573d320ee14ecf4ull,0x4e0dc9d92e543ddeull,0x58a280570cac8d36ull,0x16226935e8e9f9bdull,}, - {0x2d51a89174717a26ull,0x7341be7f883d0806ull,0xc9b4ee66364066ceull,0x018c79b95f127b49ull,}, - {0xe5420d2f4210dbd7ull,0x179c22d607a5c801ull,0xe3aae016e739bcc8ull,0x20c554233ddd50caull,}, - {0x6c5c4b29c77bb152ull,0xc30df398c85f0f2cull,0x5d5096a07ed6a257ull,0x0790d485c22a3639ull,}, - {0x8aadc7bbf2cb348eull,0xc339d87c2118c2cfull,0x8f49e0eb46961ca9ull,0x24740f0ee2134c2cull,}, - }, - { - {0x3b80354a061dbf06ull,0x961e0dfd74b84147ull,0xeb4b27dbde455fc1ull,0x100da22e6baf58b5ull,}, - {0xb156ffc78a60a8acull,0xc873bf776b8daaeeull,0x5a702f5446bf83fdull,0x1fce59e50222949bull,}, - {0x32d7640c0f717812ull,0xc58d05abdc19ceedull,0x1e63c2a492849373ull,0x23443ce8fb2d6feaull,}, - {0x870f2d1a2e39f52eull,0x7aa53cb06541429aull,0xee7b80b7031f23beull,0x0a8a095b3fdf2cf6ull,}, - {0x4e489bd278487a58ull,0xa914d93e5ed31065ull,0x6720c32ae140db7aull,0x0c22020e6a97031full,}, - {0x7535115a15420cceull,0x2cd019bac6256080ull,0x8234c3b61757e461ull,0x24d65e78c88298b2ull,}, - }, - { - {0x1f0bdc2cae53aa21ull,0x263506a6526641afull,0xacd41097fab7f62full,0x0b2c92453d474a86ull,}, - {0x2d23a58a46d63e3aull,0xa65ff6f1f716fe37ull,0xb86dc831f970fb2dull,0x0bc3cf38a191e63aull,}, - {0xeb0ca4fdeba41bbaull,0x969cf610e1a3a009ull,0x93c5d1bad6c7240bull,0x20ad29c9a9f1d6d6ull,}, - {0x006a12a187464b7aull,0xe354d9be0ec65202ull,0x9dff5c227947f5b7ull,0x24e3dc2833ba4d2full,}, - {0x9350693ebfdfb4c6ull,0x07d8abf27abb8fc3ull,0x58f5ab0b518e5113ull,0x125f2d7d40ed8650ull,}, - {0xc9fd435af6e86f34ull,0x04dc07374f373455ull,0xd040d286d71db00dull,0x141a1253f3bc2a50ull,}, - }, - { - {0xbcfee5dad6ad33b7ull,0x8cd72df36c5b56daull,0xc2949399ad52da69ull,0x0f6ffe6d68a398d6ull,}, - {0x777dc689b038aaf4ull,0xf7a8f41c7c04e9f1ull,0xcdab24ebcea39892ull,0x0178d69b1b935d81ull,}, - {0x65a001a22be563c6ull,0xfc1b2634dc76eafeull,0xed4f6ea19949392full,0x0e4e9127957d60e7ull,}, - {0x919a1c91a123e003ull,0x23f8ec239ef8a15dull,0x0470cb40e520d6f5ull,0x0be9b58098cd0f01ull,}, - {0x735e236220cf1152ull,0x82e68710903f65b1ull,0x6c932338d29169ccull,0x0e204d6a8c7d5034ull,}, - {0xac47692ec8245f1full,0x125db7c68d7e7a9bull,0x6ead9899d3150beaull,0x1957068d4a3da4b8ull,}, - }, - { - {0x45c168b2bce7b4daull,0x63afa3b276f9f910ull,0x60af02b6be9889a6ull,0x1adad7fa35385ae7ull,}, - {0x8d35cd7e7df59aa6ull,0x13cf29589f4b84b1ull,0xec6ecff2e1540013ull,0x0ecbf75abda6eb1dull,}, - {0xf6ce05fc3becfc23ull,0xe4ac8d257a7bf44eull,0x4c12510765eeaa43ull,0x06c870a377df50e4ull,}, - {0x2f6871bdc1d62dd7ull,0x80591505c1279cb7ull,0x1322088b2719ecd2ull,0x222e71f8f5995a2bull,}, - {0x2d1a1ab198363dfbull,0x35635c96cfa670ceull,0x7d5034dd7a26c656ull,0x003bf0608625abedull,}, - {0x98ca35cf5ed8716cull,0x2265e1237bc6df23ull,0x403b67933e14f23bull,0x17bd2dadc39729fdull,}, - }, - { - {0x73eaf26576b3ee71ull,0x1e385de29d896044ull,0x25a0f40f08a59317ull,0x19849935bfbebeeaull,}, - {0xc124cb642102cadcull,0x15bc408ad6ca7826ull,0x2d7fb7c9392b5314ull,0x191fe8471669f053ull,}, - {0x4519ddbccb6a7c26ull,0xf93bd195baec8228ull,0xacd754a721948defull,0x12f17b60c7e426bdull,}, - {0xcf447b92b04c15dbull,0xfcb7da793167f250ull,0xcbabb4ee570c4306ull,0x190ab94c6e5c81ceull,}, - {0x66edbe6740930cfcull,0x00c8c644983a181full,0xfe9e80b984c44209ull,0x1dd6f530584a7ffaull,}, - {0x14c61214aa1a9038ull,0xc34e5e23426cf8b6ull,0x89fca910ec46ae5full,0x04f1b9161a0f7c1dull,}, - }, - { - {0x60c3a79ac91ab4deull,0x541e051ca71a1a2bull,0x490abafd41a5d25cull,0x126275c8a46cf343ull,}, - {0xe5da0fcfffccd2b6ull,0xe3820301b166bb43ull,0xc6599e01bed6085dull,0x226548dff57c5cfbull,}, - {0x36428b1296882728ull,0xe08312e604299b9aull,0x5a15c889ce55478dull,0x172710198cd7c270ull,}, - {0x2450f291477cc1ddull,0xcb0f85c9d92d1bc3ull,0x86325c11cfe0e296ull,0x13ff03a4bd5be082ull,}, - {0x74298091e426bf55ull,0xbed700b48330ccdfull,0xb1ec45894f74fb11ull,0x1716d956bea958b0ull,}, - {0x91b29e513e9a4114ull,0xcdb3b373910c02fdull,0x268e148f9431fa75ull,0x1288ec8fde3009bdull,}, - }, - { - {0x02ae4c95e0afb8caull,0x8e7aec631cf8f5dfull,0xdfd9373259eca3c3ull,0x1fed34fb88af7224ull,}, - {0xc47e420205b5c88full,0x7207ef7451d1c567ull,0x53262358433f5294ull,0x193248ecf07ad085ull,}, - {0x49de15f9bb694200ull,0xc35f531086b5c242ull,0x95a1903858cd5140ull,0x032a4992511b1f97ull,}, - {0x42ee2c4def1faaa7ull,0xf6ca28bc9d99cd60ull,0x83c60d620a1e004cull,0x024ccf0ba1568759ull,}, - {0x6122291bf42e7d82ull,0x0866090d368a8205ull,0x11f04812ad6ec708ull,0x14cdebecb4ec13edull,}, - {0x535e8fd1ac15390dull,0xb37b579abb1773daull,0xbace0a295cd4b579ull,0x215e20d42270bcb0ull,}, - }, - { - {0x400bdbc644ac1d92ull,0x6d856667d971f595ull,0x03343816a1bd40f7ull,0x0361ad7534821a43ull,}, - {0x824222acf8437091ull,0x79141c3205b1032full,0x6b4d331fc9974530ull,0x1bf965a7ba2bade5ull,}, - {0x0bf66d1afdad6063ull,0xfe6571464fe71527ull,0x3ec25815cc90ab9bull,0x132ca2d9d51c3b56ull,}, - {0x37e3ae17fb5ac815ull,0x2dfedb4efe3f37c0ull,0x4b086ea5032745a4ull,0x0f966cabdd479e9full,}, - {0xb5266c229b7ebe0dull,0xc6717a5442929826ull,0xad22a19d8892adf1ull,0x172da87fcc14d4f9ull,}, - {0xae0d9866d891bb59ull,0xc500c36e3fe7d354ull,0xc2b582f2929b23abull,0x11428eb730dd4e8full,}, - }, - { - {0x81538fef8e07dae0ull,0x3c05274665489b39ull,0x36e4401350ceb55bull,0x23822f2029f31339ull,}, - {0x9a946e7c30090ad9ull,0x5bbc4c8c656ea3fdull,0x3cc2cecb7ec7227full,0x075a6fe87014899full,}, - {0x504b2ff7fbb0366bull,0xdbf315791bc3d5e8ull,0x34b16de185c8c4faull,0x0c722a3dffe0761cull,}, - {0xe1b2c1fc3b33c383ull,0xce84d3e5182665f5ull,0xbcedf2f72de4d795ull,0x1a84c62c0c4a6f6full,}, - {0x85ebabd309ae9553ull,0x1330ec03b0ac91f7ull,0x8f42ba9c8c1ae123ull,0x24c230fae89db4b9ull,}, - {0x63ba534e151566b6ull,0x7e44c5bd39e6334full,0x06921595325d11dfull,0x217f3a4e9d6413deull,}, - }, - { - {0x25ac71f16a82e592ull,0x47846dfdcc378ef2ull,0x75c53c75b38260a2ull,0x039b9da33bf9b997ull,}, - {0x53d30cb619b09dfeull,0x566d6a55a184cd91ull,0xa589c53ae28a8e0full,0x13c05b500d5f285eull,}, - {0xd22faf3af0a087b6ull,0xd5e538653ca52380ull,0x42c893b42092e080ull,0x18f7a1bdd3badfbbull,}, - {0xdba4e6c94bb0a0b2ull,0x323d4769578ee4deull,0xbaedb0f8e01fdb15ull,0x21ca037715dcfe00ull,}, - {0xe6ccc0bc06afac14ull,0xfb943c10916b581cull,0x2d5694a4c968aff2ull,0x054a1b209a812e31ull,}, - {0x1983e59a45dcb02cull,0x71dcb184a30af740ull,0xb75b69bd5ae155acull,0x13c7fc9ace199224ull,}, - }, - { - {0xddbd6b95067516b5ull,0x29ca0360372d54e8ull,0x3e2955c1d6748678ull,0x1f8b276aafcd2c7dull,}, - {0x893187796c272ab6ull,0xc843325fc53fa37eull,0xbe658fac833007a3ull,0x04bdf08356fbd83full,}, - {0xa0863d3fd012aa1cull,0xb1b2c2c3c2fa879eull,0x4cd718b80433407dull,0x1e1ff82d0a23f609ull,}, - {0x0c72fdbda5da70b5ull,0xfa2ad5a7dafb202bull,0xa63ce1e889feffefull,0x030b328f5fa93e0full,}, - {0xc4a01585dc609f7eull,0xade61ef3353eda34ull,0xfa884e9a73d65e8eull,0x24750424a4543a02ull,}, - {0x54f07e883bbe27b6ull,0xfb41ed1660623383ull,0xe112647feeae3cabull,0x055cf71a930304b0ull,}, - }, - { - {0xcc5f813b041ba372ull,0x1b892909c069bfd9ull,0xdfac1a47d46ba3dcull,0x1bc553fdedaa97e3ull,}, - {0x623da812c8d71640ull,0x59b3b84486ab96c5ull,0xd77a7d970676d563ull,0x09473f20b0087846ull,}, - {0x9214acc8a6ad6f76ull,0x53e9b1713dffaa0aull,0xe66631ab33f6477cull,0x16792dc3fd2138d9ull,}, - {0x612c9ffc45facb86ull,0xd43cb433555b3da3ull,0xb0ca697731e8202dull,0x141ac2b6bfa546e5ull,}, - {0x51b480946640c6a2ull,0xc71f677b6d96bb2bull,0x7e0377527663c0beull,0x036b2f551e8c7db8ull,}, - {0x09610b7524482b53ull,0x65196312af7438ccull,0x7050f94a8a70305eull,0x06fde0d46e6c059eull,}, - }, - { - {0x707927b8fc061859ull,0xd9e38cc9ebbd94ddull,0x96eba99c855f975full,0x0c12d088d263d28aull,}, - {0xfa236e22ee58216aull,0x470b1efa73ec6699ull,0x4c5457a04dbf7553ull,0x1a1dc4cbd3ccec1aull,}, - {0x9a327665f6db6d31ull,0x6443a4f683536914ull,0x58eff845741ae1d6ull,0x0b784f2a8c259646ull,}, - {0x08cfd913a263ce94ull,0xe58aab8c6b488744ull,0x335fa717116557daull,0x137bf0016a4e4c17ull,}, - {0x0c14566b7ca1106full,0xb5fac75743cf44ddull,0xe87d1d95b95cba63ull,0x1d2823802dac3d01ull,}, - {0x445099d6807bd76cull,0x41b66837529eb51bull,0x84267670e2264913ull,0x0ed84664bb37032eull,}, - }, - { - {0x938964e622d307e8ull,0x2edeb24656039ea6ull,0x642dd6f7e2144be3ull,0x1d31590cb07cb098ull,}, - {0xe57bf1b8729263c1ull,0x48f9b371fd250d79ull,0x670ce0ee36513b90ull,0x1b908986cbfec7f1ull,}, - {0x9fc8ffb876636effull,0xd57385d67c117698ull,0x4813753691eeba7full,0x0e36785e030209eaull,}, - {0xeef1935cb4c5e8f1ull,0x1b8726a75ab06681ull,0xee973c5cd718bf31ull,0x026910b1fafe0208ull,}, - {0x8c1826b08792fd9bull,0x00325e83cb419665ull,0x9cf44c5b81265badull,0x2462a8c0fc4f85f9ull,}, - {0xa4068de0bcf85b4cull,0x5292433f89646bedull,0x05b4bdd364d3bc53ull,0x1e25be7fab47bf9aull,}, - }, - { - {0x51c27ca3424bdf72ull,0x167926750fe4d445ull,0x41985a737513c6e8ull,0x070056ab60d56287ull,}, - {0x0a23d1344dfd91a0ull,0x6c518fef27a24e64ull,0x059a8c49360f8730ull,0x0f1d38b2c12772f2ull,}, - {0xaa2a1e60b126566eull,0x1ed2add1bb218007ull,0x71385f0a8fabe78eull,0x024c0880d7c0fd5aull,}, - {0xeef5704923a38ff1ull,0x34506a9872581fa9ull,0x78152bc691cbac5dull,0x0c41086d97a7fccaull,}, - {0xb0c0d854ad72b6b6ull,0xb38455c3e3e5f457ull,0xfe665f1f4ddafb6dull,0x10373cbf9ca2add9ull,}, - {0x8a306e7799aa2605ull,0x5dbca515ad2f9733ull,0x9b8b80da928edeb0ull,0x0052a2d2f8f7b1e2ull,}, - }, - { - {0x13e3e3df198f8864ull,0xc80f05cd02b931f3ull,0x8826debe7162b2f6ull,0x1d319ece62ae45e7ull,}, - {0x313e17d4fa80fd67ull,0x82c5f606bfe97eabull,0x66f092bfa6b46100ull,0x16fde5bd28d86353ull,}, - {0xcd4e7dfcd19cfb45ull,0x026d1e42ed44630bull,0x8d6b54119bc07918ull,0x1eff361145a4818bull,}, - {0xc80d511a9a448566ull,0x9df3e33a28a32065ull,0x5a5860db779cc4aaull,0x1c226a0a4bf8c193ull,}, - {0xfe0fa440138c1ebcull,0xc32c16bd93c71daaull,0x5e053ef1a9d73a8eull,0x2105d2d85afe7c65ull,}, - {0x553c6840e4d14fdfull,0x600506d781612ff5ull,0x3ab288079ba2da8full,0x19b8f14b3e9cefeaull,}, - }, - { - {0x101f9567b577f4ccull,0x9d7dfbbb95010b1eull,0x1801c3f5ef323a26ull,0x08066f8c302be6e0ull,}, - {0x301f867187aa8cc4ull,0xdcb504ccd5deb64bull,0x7a19b8838cf066e1ull,0x1ce06a9c35aa0809ull,}, - {0x010a732bda3f076eull,0xf36ad54eeb0df727ull,0xe7e3ba3699eb12eeull,0x1d65654037809723ull,}, - {0xb8ff82aa0c8f9e89ull,0x39fd76e872772dd1ull,0xd0a9a0cf7b300237ull,0x21cdd8098a877d70ull,}, - {0xfff1cbe2921532d7ull,0xe919f4cbb2b62082ull,0x43858e6488e4d9f3ull,0x227d32cd853e2a11ull,}, - {0xdd7807401672de18ull,0x7e3167a195002069ull,0xef20051461812a1full,0x1ee6ee09899caca3ull,}, - }, - { - {0x18dcb2c8d68bcf3dull,0x55c30335c441d056ull,0xddcda87759df1c4cull,0x0bd72b9d00117407ull,}, - {0x53759bf204dc6ee2ull,0x5a491198ccc07fb6ull,0x21023e765d4b082bull,0x052467582f570a64ull,}, - {0xc71f8479e69bc9efull,0x1a8b07f3a7f9f4e4ull,0x4898f9336938503bull,0x210b416bb55f686dull,}, - {0x2ea76a804a514216ull,0xaed1c043402cba72ull,0x8e96b191c8508968ull,0x0a6845487a544d0cull,}, - {0x20f8a88abe36a0fbull,0xf7be80390c4df565ull,0xb4d6ae73ab0ac7b4ull,0x03dee2bd150d75caull,}, - {0x31f41f54a9d5ba23ull,0x32d8a838645e8303ull,0x1ce68866725d4d63ull,0x16eff9d7d55f24a6ull,}, - }, - { - {0xc9ef98de3048fe85ull,0x91d247303ba2cc5dull,0xfeebf32febfe0c50ull,0x12193bd2dfc7cbaaull,}, - {0x05545cc46d6e2f10ull,0x0c1885bd6a173fe0ull,0x19192206ce77ae4dull,0x21bc567dedda3bcaull,}, - {0x0289985f4f8a3e0eull,0x46a6f360ff57d0beull,0x8ecf6d8914a57a28ull,0x16fad252e99a0f5dull,}, - {0xa1ce7650862f87aaull,0x624601ad20a0a754ull,0x181fa95e1dceca7aull,0x04c7063bf6031512ull,}, - {0x47221f77cb9dead6ull,0x0b0a1f41bf04b7daull,0x1285ec2ea904f669ull,0x05d815fd67d084b4ull,}, - {0x2f4621c7c48ac6bfull,0x6c94a7fc7433ddc8ull,0xbfbc34ad00dc77bdull,0x0d420c22daa0e425ull,}, - }, - { - {0xa125bb06b8f5ae5cull,0xf130e54b42e247baull,0xa7d5d0e59b914ac6ull,0x071f28cba94510baull,}, - {0x23781cfd40419519ull,0x2ea1f31e32e9865dull,0xb81d3422cdc1a049ull,0x09b4ecf31bed5dadull,}, - {0x7cad0528d1f2ffbdull,0x4aac3a0629f7f4f7ull,0xffa90428bf6d62ffull,0x1e313094fa295c2eull,}, - {0xac9d8af47d98869cull,0x8ecebc8bdf6c41e8ull,0x859d29cb97f9f264ull,0x0c9223c674634d76ull,}, - {0x5adcabb24bf08460ull,0xbc91aaa43338b671ull,0x7abcd2f2031ec66dull,0x19b3dbaaf6fb5a1bull,}, - {0x00b0c3d6c69380bbull,0x044a0a413e3aaea9ull,0x48d820b0f17d1ac2ull,0x1745bb82ed277652ull,}, - }, - { - {0xd921b459e78504aeull,0x79ef5733fecdb405ull,0x04020f6200148defull,0x1163b626e015e688ull,}, - {0x0781fcc9b627e44bull,0x5d8c6c8944d557a6ull,0x5493d9920c1d32fcull,0x0ecdc7510a2f454aull,}, - {0x7086854c556b47fdull,0x4ec3f6dd8ad274dbull,0x274e92925edf85deull,0x09e6aa987250022full,}, - {0xa63453a7eb3a8fb5ull,0xbd83f1e026f71f82ull,0x1386ec55e6450e3full,0x00873f000047164eull,}, - {0x179dbc93073fcb3aull,0x592c5c9b8baf6411ull,0x4b81a7b27b4d9070ull,0x1d26ead51df9a20eull,}, - {0x6a244f14dc36671cull,0xd1e9d82e3c5bea31ull,0xbd883c1283d17771ull,0x1e09e59618c6163full,}, - }, - { - {0xc1b0578027cdeed9ull,0x7ad19ad5cb04d6e5ull,0xee6f7f36d5ed1465ull,0x01d616ac45e80f5full,}, - {0x2c0c7df57e945feeull,0x9709cf12715b87afull,0xa6e99327a9e2f868ull,0x1dc75e316e45b2aeull,}, - {0xa7bc3e0832276f4bull,0x36ed99677fa22ffaull,0x89da95557e5dd91eull,0x0c1f4bf5d672d3b9ull,}, - {0x25624941c1047a5full,0x463ccb3bd3fce3b1ull,0xd115fc8570096682ull,0x17145e34ff1d0e9aull,}, - {0x4a3a34676a6a378full,0xac89a12198b0ca1cull,0xb97a2d982319e20eull,0x0caf54593dcf42e9ull,}, - {0x7a07a3d321faf4daull,0x6a062e2ec939fd56ull,0xfd7ac47f692009a9ull,0x1121561f1c332cd7ull,}, - }, - { - {0xcfb495c8f564f52cull,0x39665331e96c838bull,0x42c49998a1446b14ull,0x03cc4e294cff3ff7ull,}, - {0xd41d69b42b557d10ull,0x98dab8bd722a39a0ull,0xd4e24c4add54c81aull,0x1344527908d19fa6ull,}, - {0xe9648caa7c8da128ull,0x8497aa165fdee967ull,0xf437d75fab691b76ull,0x052cbd6eb6436a4bull,}, - {0x389f7092e280920bull,0x9b8625c09555310bull,0xe91f49f9d9031898ull,0x1c95a9d881b18be8ull,}, - {0xe8605b4d2212b1fbull,0xb1c4f57736dbf0c3ull,0x8a90c4bcc09cad9eull,0x12f03ba47d2620d4ull,}, - {0xcbd4494a5830ba3cull,0xb5a5d7b6b635fb6dull,0x154076781060c57aull,0x14e27241d5bdbe5dull,}, - }, - { - {0x5545df3af64ec9c4ull,0xff2adbc37d224acdull,0xcf02fc8672ce69ffull,0x0a7fcfe0b85478f0ull,}, - {0x402246e5d134054cull,0x0bd5980440304ad7ull,0x3df09979193914b6ull,0x22610927d3977e51ull,}, - {0x08235659dbd58c8full,0xd159c4e705d2f6d9ull,0x3c5ae22b53836251ull,0x137039c4b43f1c9dull,}, - {0x4ee6c2b196d188bbull,0x54ecda987459243eull,0xb3a9cfbf1aea2748ull,0x234243a4a87cf61eull,}, - {0x248eec552d9a5ef7ull,0xc8a98bee264e9e26ull,0xf3bcd8c268d0c073ull,0x16e365499a23e913ull,}, - {0xbb406c86a8f7f2d7ull,0x03426cc36d053972ull,0x047915ec9f472c4dull,0x2318c0030bfcee73ull,}, - }, - { - {0x3c783caa5308c82dull,0x81bcacdec8f45662ull,0xe169822ce2c0837cull,0x09c179836e05b980ull,}, - {0xf5d882cd842d337full,0x861761db32052e52ull,0xd6721854e7e686f2ull,0x0d22ec35de13a291ull,}, - {0xd9dd477876f2c6d0ull,0x5ef6dd9d6e4eb6b3ull,0xa22e8bf49d19a102ull,0x1fb12cb296762e6aull,}, - {0x8372df5211227b55ull,0xc3994286779c5c02ull,0xa302f7b3be87ac5bull,0x22b842b9b918d821ull,}, - {0x2cb75b8cb17911a1ull,0x5cd8f56c7f4dacf8ull,0x09874f95dd87d8d6ull,0x15b92554f1bdb068ull,}, - {0x4786ec1f88a80264ull,0x91dc53364f6aec54ull,0xbd9bd414e46eb290ull,0x1b27b7fd99d5e212ull,}, - }, - { - {0xbb40271789b4bb9cull,0xddf3b8f645386314ull,0xce090cc4ffeabe23ull,0x0c3920ea76b361f4ull,}, - {0x14c64e1eed2b5edeull,0x99c5289af2511b43ull,0x5de1d7b1dccb2575ull,0x0b5e4419ad2e1c52ull,}, - {0x0c04995f7bb764c0ull,0xbd9eb56e1c742072ull,0x9009271bd281dfd1ull,0x2464821365b75205ull,}, - {0x49724e13fe376d0cull,0x189fb55cbe1abfc2ull,0x80162bfa5b8980d5ull,0x1a96550a3916c5caull,}, - {0xcd79e4d9633065d2ull,0x2b51887668a49a0aull,0x8785b375ac581035ull,0x10a5547822c082bfull,}, - {0xb98da2585b65ccd3ull,0xa8015a03bee86a26ull,0x2eb6a1e1bd1cdf1bull,0x07bf364897d1c8b8ull,}, - }, - { - {0xb791c26545931abcull,0x9a1ad86e4fda79aeull,0x06855828328d0314ull,0x116650fafca899dcull,}, - {0x28a52543d8cb599cull,0xbdd390c86fa4fb40ull,0x903fff92c56629c6ull,0x0b496e3e73b93100ull,}, - {0x0f5622574884b369ull,0x48dc4ad8ee6e6c07ull,0x9bf8705b75932345ull,0x12fdae5ddc53fccbull,}, - {0xffbab25f3f4dbcc5ull,0x2e29054e3b0c795bull,0x4e42d9554507c4a9ull,0x0100c6ddccafa66full,}, - {0xd070c555e094dddeull,0xc33dd5eda3c03e59ull,0xaf83e343a270dd9aull,0x098aee3da1fa8162ull,}, - {0xad02918dc6d1048aull,0xf04903a09f8c1e95ull,0x51622aaf4848d918ull,0x1ded54a06c3901a3ull,}, - }, - { - {0x407e49d022ba5897ull,0xdb8d26843eab7b0full,0xf976a1b95413e184ull,0x0aec3abccfa3f870ull,}, - {0x5a796987e2623f7bull,0xf9ab67105d5e1b46ull,0x9d9d00cfaddf51aeull,0x1be8e30f8202ab70ull,}, - {0x793be4982c00e681ull,0x903759a9286f8a57ull,0x16a3daf170f851afull,0x13cf0c29956077fdull,}, - {0xfb5787f1092904dcull,0x9a7422c14149238aull,0xe8e61be7e9ad1fc9ull,0x10029d3e967eff2full,}, - {0x4a4887f39a050b1bull,0x2b7f2e2d718b7fa5ull,0xdcf39f9d5e4ccc87ull,0x0e9ae22b93f3c46cull,}, - {0xe2085144d647649full,0xbb22757ff04f1a8dull,0x39c323e34631d9f7ull,0x04865b0a1462c9b9ull,}, - }, - { - {0x684266fdd1482bdbull,0x49a7895fd6b87933ull,0x28476e848c17b925ull,0x19e95e89691c4ea5ull,}, - {0xe9a6a6bccaf53a2dull,0x479cccded58ddaccull,0x16049a3fd6291256ull,0x07364abc39086c40ull,}, - {0xf24da0fc6d7e4b82ull,0x29591202c08178e9ull,0xf9b5dff7dc07aae1ull,0x0ed06afda0a02f78ull,}, - {0xcac1c41fcc1f702cull,0x52b029719b5224f2ull,0xc838b665539d0364ull,0x246b61674cf835aaull,}, - {0x44068b26b9dce8e0ull,0x6b3a0b0e83a7c8b9ull,0x03feca47fb021110ull,0x10d9d6e7fbc944eaull,}, - {0x3a39ad7da63fd6fcull,0xaf3e9dde8885823full,0x31511af0a15648cfull,0x19de25d493f0200aull,}, - }, - { - {0xd4fff38e62012c13ull,0xae59ef30122850ffull,0x9d23a0381a012cf6ull,0x120ae1d814828c1full,}, - {0x42eb1c5dfbf07103ull,0xd254f031490046f0ull,0xb47882ae239b8ae8ull,0x11158120470a13baull,}, - {0xd5144f9267a09051ull,0x66da90aae84bab57ull,0x586fcfe6e1dfc445ull,0x221e49ed2a16e941ull,}, - {0xf467fe034d6cbdccull,0x7ac29c1d1e5e20feull,0xa110e6e05eb1585aull,0x23d954fcdf786a64ull,}, - {0xc1ae9be330026938ull,0x874b19ab11339205ull,0x0964cbafa59f62aeull,0x1e6167f38349f253ull,}, - {0x23efb445bd9ef627ull,0x897335bf70b7bcaeull,0xa00f86ae69e47650ull,0x2509e8fa87d5670bull,}, - }, - { - {0x22a00ec33abc6b8eull,0x09620addb21d394full,0xb965fdcb7ee143dfull,0x1febe6994e628a7bull,}, - {0x1c710a901e98b013ull,0x2801fd688f4dddf6ull,0x0abcab0ebadf8343ull,0x10f0cfd199338d92ull,}, - {0xd599e818b6e83ff6ull,0xb88539365c679f3eull,0x0313ce19b529a51dull,0x21f5f0b9f1cf3415ull,}, - {0xb59034f3ef13e954ull,0x6883ab623a40da9dull,0x94faebf81576de70ull,0x14d2247af37a0cceull,}, - {0x99757d5184162b77ull,0xf79b9dc74871c5dbull,0x608ad4501b03300bull,0x074149d915458798ull,}, - {0xa3252b36c3eda717ull,0xc1ded9f245002540ull,0x14b5755b56dac7b3ull,0x19308239f6756bf4ull,}, - }, - { - {0x07f4f5a6f26b067eull,0x32d2eb865477dbdfull,0x6945cbc86ac200a0ull,0x1e6311fd6ef61d2bull,}, - {0xa0d0920425c68e5cull,0x683d1987c8fe9e5aull,0xd7228b5e41a381faull,0x114a05f6a9f409b5ull,}, - {0xf677d47e68eeea17ull,0x87f50243b30d3112ull,0x084cf054770d8dc4ull,0x0bc9fe9990a74fb5ull,}, - {0xf22bdc5dc2eec0d2ull,0x3bae3de98c595ff4ull,0xc95e53073fd0b23bull,0x11a7e2b2d55a6ea2ull,}, - {0x8ddcbdbb83b870baull,0x728950ad96866c71ull,0xd145c1d31fae9c5cull,0x0547d0e831e70104ull,}, - {0xead79bef2b2433d9ull,0x0647d5966623bf56ull,0x4fb0056ba69d7958ull,0x1a0983813c5d2e9eull,}, - }, - { - {0x215a5a20e15d19d2ull,0xae9ceafe33084b69ull,0x80f85025ca380f77ull,0x1c19066c196d1a00ull,}, - {0x359cfc6bc545de2full,0x7339f8704a758d60ull,0x64eca98cd5f2d7edull,0x248ba44255247839ull,}, - {0xc2c6e70b389e8492ull,0xc9b97f7a19d874c9ull,0x87d7b9a332957727ull,0x0119950fe431afe3ull,}, - {0x51eeee98aaf4581cull,0x081de6981f8512e1ull,0x4bb18cf097ac6997ull,0x21e465b23c21951bull,}, - {0xe5bc584a9a1f5a1aull,0x1ccc4b14286b7ad9ull,0x435b382aeb470e64ull,0x1f9ae9143c5b987bull,}, - {0x990eccb3248cd3d9ull,0xe6cfbcdbd8c8fd0bull,0xb48de18c5009802full,0x198d98c5412a6213ull,}, - }, - { - {0x43cd5d8c9073ea61ull,0x5174db54059acdffull,0x45e871c04aa7a2ddull,0x05e16d3199d840a0ull,}, - {0x9ad1091f764df938ull,0x67637f20a74490b7ull,0xdbd73b8487d04861ull,0x15a139abaa8b478eull,}, - {0x1b10547972b4d507ull,0xf641d3763db1a739ull,0x15597787c5b84ec3ull,0x0134b78ebf335c12ull,}, - {0xf6b7a9d4052963daull,0x2d806855d9466783ull,0x623658a8a2d743dcull,0x00de0208fc0298b1ull,}, - {0x1b67ee84e8c40714ull,0x620107f4c2393038ull,0x96441ca3a07baeeeull,0x0b27368271b0f683ull,}, - {0xa65922c66ed876ebull,0xdc21179aa8971bdbull,0x9309a00b5206e041ull,0x088fc38497bf88ebull,}, - }, - { - {0xee8bf43d2fc34584ull,0x4ff6772e8da82b6bull,0xa7ae3c97dc955a78ull,0x09651f34f9ad7ab5ull,}, - {0x103de2e1906f8fd3ull,0x046ca4e6b276642full,0x220398cd397af5fdull,0x07b984811b0df962ull,}, - {0xd0519e42b872b7aaull,0x164acb4f7d9df94dull,0x54cd157448c94337ull,0x04c636efd3f59641ull,}, - {0x7cf41f52f0acc90eull,0x54dff80755d46315ull,0x83a7e3f528daec19ull,0x0039b02577bb91e6ull,}, - {0x828eb12b537a9732ull,0xd81ce0f79c6211ccull,0xcd2fd2f2e35379adull,0x1e84fa2068841dd3ull,}, - {0x931aef70f9a3a06dull,0x71abc5af88fa12caull,0xa70ddb3102a75247ull,0x14a049c881169cceull,}, - }, - { - {0xa9975bec6d3f0412ull,0x72feab9fdc81092full,0x49f533cdb7ae9d66ull,0x18632a2c4c5b4d2dull,}, - {0xaa9f81eeb706ca09ull,0xb1065065a3fe5198ull,0x3381765974ac94a8ull,0x0ec5d52c65b1f5e0ull,}, - {0xfe465050a5cd7ab1ull,0x5059fae63d47120aull,0x49ad1fd731ef0aebull,0x1e018673e33f45e5ull,}, - {0x6eebdeb52c24d248ull,0xa43988a55ccc8d10ull,0xe997fafe55d0ff64ull,0x233675abd5ad14e6ull,}, - {0x8b5530b175fbeaadull,0x27ba08984164ed08ull,0x94a9507d0189809dull,0x12fb832d1d13901cull,}, - {0x912ff6e6cf0c29f4ull,0x54d7a43121bcd1afull,0xcdf9fb448a1e2185ull,0x02aac1a8e253b8f9ull,}, - }, - { - {0x26a581d7ca270a84ull,0x989bddaaecea533cull,0xda7993327a4b8cddull,0x0c1637ca7d045160ull,}, - {0x6213cd9db7a6d076ull,0xc03037d124aded7bull,0x32d9e1bd41523d2bull,0x008ea641abbe75edull,}, - {0x7d3c23b227774f03ull,0x4a5e7805e6f9a14dull,0x1c24f1a43d487e79ull,0x18eafaffc703509bull,}, - {0xe146113f559bd9efull,0xe56825b1a7fcf7f5ull,0xa93689399f819fceull,0x14fa96013c5a6638ull,}, - {0x81c625bff8857fe7ull,0xc98edd68e7203a68ull,0xc88c3a681a3f1ac1ull,0x0bd4fa57e9b6d9f4ull,}, - {0x2dd6eb21127b1fefull,0x91b039a57e0f6233ull,0xd02548bc3dc3c783ull,0x0e8a4d19a777a688ull,}, - }, - { - {0x025c54533652a519ull,0xb3bcbf01559e8920ull,0x5c53eb97c55f25fbull,0x22322b9402949dccull,}, - {0x260ef92c70dd5c11ull,0x9e27626b6cd441acull,0xc6661507ed6f5d61ull,0x0fac1fb2f6bb53edull,}, - {0x5511ab3bd7ea4c51ull,0x6562a46409240916ull,0x83a5e441731b870dull,0x205c0c853ef83501ull,}, - {0x7c8ae57f4deec828ull,0x349dd08555bea497ull,0xcb5d3234c7b839bdull,0x153259da7d31993eull,}, - {0x964b508f6fa5bb3full,0x82b5262f18242750ull,0x970156d1896d43c2ull,0x028fc28439e44783ull,}, - {0xda5afd0f1a7d7fcaull,0xddb473f9a75a7a4cull,0x180c169ed34f6781ull,0x0cde138f3279be8bull,}, - }, - { - {0x63de6da225c321ddull,0x4832886b582d3833ull,0xb0dee708e55cb53bull,0x06c9e933c223ec30ull,}, - {0xdab1fab5dd78e263ull,0x3e658d3d9ec3bb7full,0x3d0a56ca4a1b088cull,0x008ce74540e8386dull,}, - {0x0b0ee452fc9bca4bull,0xfd0b0e032d16b266ull,0xfaeea7076b32cc91ull,0x1823f6048f88ea5cull,}, - {0x3966dc6553a5ff08ull,0x85192338024e75e5ull,0xff2cc296f92beee4ull,0x229caca8d4f809ffull,}, - {0x7702729e0d1f5157ull,0x1a3ac2432384d0bcull,0xd006954b39b11e9cull,0x118a5126dec2a2faull,}, - {0x2e9bfe6eaf026413ull,0xc720a61aef11d653ull,0x6ea67c87c36691a3ull,0x18f925014f9c61d4ull,}, - }, - { - {0xd3b27621ad1dd1abull,0xf97b0f55f22f18c9ull,0xb6113e8be6db1114ull,0x1a8a1ae8f65ead1aull,}, - {0x0007a32980115669ull,0x605196cb02f760a8ull,0xfbd2085c8671df43ull,0x0c381e59ea5960d2ull,}, - {0x94116d83a9603b67ull,0x92b23f61ccedfbbcull,0x50e0fc7e78727f5eull,0x23fc01a1d8cc7e65ull,}, - {0xd1b8a0d5024aff36ull,0x2b25d1cf4ab60e92ull,0x8dbbaf91e20c91fbull,0x185a985f30c061fcull,}, - {0x06fe112b333faa7aull,0x9323dbd6f08549bfull,0xcf5e43f668844df0ull,0x10df0c27f29e1637ull,}, - {0xf2afbd9928527e7dull,0xd856c6d7448b34ddull,0xc5e025621b375c86ull,0x01b0fe70c9b177dcull,}, - }, - { - {0xf09e65fdda5bf41cull,0x59ef2a8eb45985f0ull,0xfec4facae20ae75full,0x019f623d519953a8ull,}, - {0xd5dc50c38c7e165eull,0x62fc39995a53fcf4ull,0x557a7e55f3ae1284ull,0x0fde40ac729d9ca2ull,}, - {0x4b49ba1f5fcea25aull,0x631dbbd1d4e3cea5ull,0x7069fcd00919239full,0x09c559fb76aa0dbcull,}, - {0xbb6348d2d3a8d733ull,0x460c7255ba85e5c1ull,0x42e7d9808787c01cull,0x22c0fd2eef2261e2ull,}, - {0x19833887b93cc3abull,0x2cee6551569164daull,0x1c44fdcd7b0c79dbull,0x1807ed58575a7b33ull,}, - {0x30713388923e3b7eull,0x6d541ffc75d914c7ull,0xbbb50245851f0f6eull,0x1df0abdb9048edc2ull,}, - }, - { - {0x62788c325d2b0f0bull,0x33744819eb512733ull,0x83ff060d6ff7309cull,0x18829912bda99968ull,}, - {0xe09edb24cdbdfc6bull,0x099200c5850fc442ull,0x967790a56049a66bull,0x011cd382712b1d77ull,}, - {0x8df4e975f64427d7ull,0x2e3901a3a7b0f55dull,0x641ec6f45805e402ull,0x06e1d0db4022cd43ull,}, - {0x440dbd8590564164ull,0x6aa7d9c34c053da4ull,0xe0da2752be2f5aaaull,0x2264f00ad93d3d4aull,}, - {0x716e5f9a7e68031full,0x1bcb15653094bebaull,0xf84ac39bc138e963ull,0x1d7a1fc06adf5b63ull,}, - {0x8835962eb2e3079dull,0xc3d7c9d41261e319ull,0x30c0c53b9353bf58ull,0x03bf957dd1541c99ull,}, - }, - { - {0xe77e8891944694ccull,0x04efd57869ed85ceull,0xe9de08ffa6a88729ull,0x1d062265f1d299d3ull,}, - {0x387dab533dc83cc8ull,0xf7fa09c0bbdf31b7ull,0x59b84e1a3762d3b9ull,0x01b32660eab7f6beull,}, - {0xf7daf1d596d17df2ull,0xcd931e51341e0ebbull,0x51710bb172705525ull,0x244d6b81dbc7d273ull,}, - {0xe7a144e6eefd2dc8ull,0xf5c76e992d995cabull,0x477afe1374a66f3cull,0x1aebe5717b54fe53ull,}, - {0x541a0d7dc825c3b1ull,0x93a0cab475598133ull,0x096efa1eb12a99feull,0x17a85ece29f273fbull,}, - {0xa36f4f86b5bc5c1bull,0x1b4a0fc57947e76bull,0xaf302e3f7838388eull,0x06aadb4991feff1full,}, - }, - { - {0xd6afd4710167605eull,0x1897263cb81c98e1ull,0x90e133c23eb0207eull,0x0718643da3a96ba2ull,}, - {0x8344e521afad71f8ull,0x66af04f81ad9f156ull,0x5ecd25d48f404733ull,0x0234ffcdbb42d141ull,}, - {0x8a50c65ef686166dull,0x34cdda95694e0cacull,0xa8add01d08d2dbaaull,0x1ce98a7c6ceb5696ull,}, - {0xb1702710fa0af484ull,0xe30a4eb2f39aa3f1ull,0x7409d5afcd96441eull,0x1e0168166b2894d7ull,}, - {0x8cfa29792abed76aull,0x75d7bfbcee2073efull,0x7c0372e7080fdaedull,0x1ee8cc19eb967336ull,}, - {0x2a265f9eb8f2265eull,0x48f9b13b07b728f5ull,0x7b915e1225774e84ull,0x0d4eff23e23d5ae3ull,}, - }, - { - {0x13cc952b1ef56e58ull,0xeb3870335e75a7c9ull,0x2fe15087e3c0845bull,0x1011a2007bc71f04ull,}, - {0x472e18f407707bbbull,0x053d1dd70cceea98ull,0xe200cdc8798603d2ull,0x0bddb233bffdfc1aull,}, - {0xec920181b8484410ull,0xc6b9a9b74e18f513ull,0x84c1695c77cf9fc1ull,0x01005eda69cae7ceull,}, - {0x7c668bd94e95d9f5ull,0xbaf12b0a06fcd749ull,0x674b2e2824d6029aull,0x23c9d63fdca6307aull,}, - {0x92bd96dd3a545dceull,0xccb9355edd49cadcull,0xf49ca3d068b74eb3ull,0x1d9461936f823b86ull,}, - {0x6a2fa39fa7e93bb3ull,0x468fac8c8f151f41ull,0xd12e0aec4bb21bbeull,0x2326bbeb4405b3ebull,}, - }, - { - {0x1e029295309f1347ull,0x6589babde3a80cdbull,0x74de96ccf73da639ull,0x125810442f8c9fbaull,}, - {0x47d63700da3a6cefull,0x59c3fd0f2b9b6f35ull,0x66f1979c84873b7eull,0x02770c35ac617c99ull,}, - {0xa757e064e4f9edb2ull,0x46eb13ddfbda28f5ull,0x519177520a694aabull,0x04f6097d775debf9ull,}, - {0x072be9865dd6841dull,0x4d9d5c0fa6d6a7b1ull,0x1749ea911a952c21ull,0x15e98445e982607eull,}, - {0x6fb1b6845ce93f6dull,0x52d5387b1a0f8405ull,0xd6a11cff22d72a42ull,0x2283db33f8496ec9ull,}, - {0x77bae4ccdf2e5bf6ull,0x21812c170f736a30ull,0x5a8477a3203036fbull,0x1e667d8ca4a419f4ull,}, - }, - { - {0xfc925115198c93d4ull,0x0aebd45cf3b16db7ull,0x2f7c3d2ab0f16732ull,0x1c4b48273365c9bcull,}, - {0x2a26617f1f00e47full,0x828f68381a20ae68ull,0x0221e65b7f01b6e8ull,0x19e45e14ca4e5650ull,}, - {0x231de599fda4c7e2ull,0x55e6d0d3df2457abull,0x34f961f715fddd4aull,0x0e97e5f5fbfe6aecull,}, - {0x8f1f1a8b1b687949ull,0xbcbdae7ed35524edull,0xd7c78090035aa0b8ull,0x19f2a0d7fb844166ull,}, - {0xc397557bba8fe6a4ull,0x366daf415604f8f6ull,0xa9b99d86ac93e705ull,0x21fb72d548929de6ull,}, - {0x6a2ff9d0392aedf0ull,0xb0a90a0d10fb8fb2ull,0x5ef8e1768350ba26ull,0x24aca64027557318ull,}, - }, - { - {0x18e3eeb6b8937690ull,0x7c87ee4ffda9eb41ull,0x59d0d9e9eb070efdull,0x10b64beb52f348f5ull,}, - {0x60cb09b15da28d99ull,0xde4b5aaff3981423ull,0x7429b4169dfddfb9ull,0x199eb1a7a6de0f9full,}, - {0x450661858d54325eull,0x338439f5a896f88cull,0x9d41086dd111bec0ull,0x146d0b19b0b567ddull,}, - {0x93a470115d0544ceull,0xdbec88b263d6ba96ull,0x4162857e9d97ef77ull,0x07a4e45e194880aaull,}, - {0x7279bdde87e7ecb8ull,0xbfcc34d54c72df15ull,0x57d3ff1a2476f6c9ull,0x0f0da2351d32d405ull,}, - {0xffee1be1efc73104ull,0xb873a987a8076cb4ull,0xce026a94aa6b71f0ull,0x15d4bd558bf59554ull,}, - }, - { - {0xae631a8d76bd7f86ull,0x7e7d9176acbc845eull,0xea421fd87eb8808aull,0x20aaae552a029015ull,}, - {0x5c1c015cfce07393ull,0xc678b97a85aea9b0ull,0x1eea5259304f0a23ull,0x1464e4d058ceb8caull,}, - {0xc65d3f2d4e51915cull,0xeedd92d9fe368d68ull,0xc8df47e3a123fc9eull,0x0a40dfad54ccd6aaull,}, - {0x09a262e9428a05f8ull,0xa0510048ec69ab80ull,0x335a295aecb01ddbull,0x05d9e955d5b1a89full,}, - {0x5eb68ea11c52c37aull,0xe444556824dd8a88ull,0x8e380018a6aeef10ull,0x0442ce4eda39623dull,}, - {0xa77e431b883ec5b0ull,0xac34fb82921e9c20ull,0xa8cfc2d08ef8cfc0ull,0x24ae732a4db3bb4full,}, - }, - { - {0xd5563857f984777bull,0x538e5c618a4be3c1ull,0x5f8eff3fbeab5a7eull,0x017bdafb790e0102ull,}, - {0x6a62e076dc44c251ull,0xd4743cd8eb4cb3dfull,0x98f0d5617f07650full,0x0ef52eb4c0151010ull,}, - {0x516284d618713c13ull,0xe651d8c5769b47dfull,0x27fb0f16b90bfbdaull,0x10e729bd4403fe24ull,}, - {0x7770b670be42c842ull,0x6a9d9db10a3626b9ull,0x17676416c44a62ebull,0x2155a03fd59945caull,}, - {0xcd58941a2ba1e208ull,0x2d5e3caf14827df1ull,0x6e8dbafadc4e1635ull,0x03bbd3e6d397465aull,}, - {0x451703d643a411bbull,0xcca0c1d97355c175ull,0xc5074f56618aa2f1ull,0x04c8acdd37ef602full,}, - }, - { - {0x3f7e0caeff75a1d9ull,0x1b753ba68a2b8451ull,0xf46aeda408dbf4f5ull,0x11652b99c4365b3full,}, - {0x3f8bf5f03132d146ull,0x0b527b11a12d2424ull,0xd587034aa3632352ull,0x13ffef8175d1a563ull,}, - {0x2a30747e4ac8eeaaull,0x0aea36171552eed3ull,0x04e341313ec7b422ull,0x1fb62ea6d5e86357ull,}, - {0x13c69094d2dcc5aaull,0x54573685ddc44032ull,0xd95abdd392375f10ull,0x13a501913c2f1d0full,}, - {0x343cc1b0318577b8ull,0x98776ba96045eb10ull,0x5492dba5b5936d5dull,0x1d1bb567d6a602e6ull,}, - {0xccf58e05f8b305bdull,0x3fee26e8419548ceull,0x62c64af67fc27dc8ull,0x08456a814b2fe18bull,}, - }, - { - {0x47f8ccf69457895aull,0x66d08f143ca062fdull,0x8f0df2e2a97b4518ull,0x0cac6d2b34b243d6ull,}, - {0x758f56a94a45e6beull,0x63ed30c20cf6721cull,0x20e942550629c9ccull,0x167acfffb8203274ull,}, - {0x8e727dabacc57eb3ull,0xa2f85144ebbe15f3ull,0x7fc17e7a0a6a4291ull,0x1793c43f349e48b8ull,}, - {0xed2f91d056a5c2d3ull,0x30433d773122e8ddull,0x2c3fef6399c4f9deull,0x099b39a0e3e524f2ull,}, - {0x4cddac568a4b563cull,0xdcd1c44d3983138dull,0x2f421d9f8d71a88aull,0x01a02cb6459cdb12ull,}, - {0x68c09ced7ae8977dull,0x76cb2bf3a933cdaeull,0x6390cd95c4f85d40ull,0x1cad79870e6b2c2cull,}, - }, - { - {0xfd754584dcb80db2ull,0xb73ea36e2df2b8c0ull,0x3ca5645bffb60c04ull,0x1280d1e1f4dd4da6ull,}, - {0x75a069b69ae4403aull,0xbbf6c5ded1f82c60ull,0x34919f2295d7b5b4ull,0x1f7bc94e3a96507bull,}, - {0x9255ca27cb288f9dull,0x760719cfb400f56full,0x291bfbf807781368ull,0x15fa25b272fee67eull,}, - {0x6054f038190f5f6cull,0xe0978a57792a09bdull,0x1ed22ba69556fe50ull,0x20ba270b20baf856ull,}, - {0x55de530a1af249d0ull,0x249e57b2414ceb2cull,0xd98bdcde7f16edfcull,0x0ee1bfb7da744ae4ull,}, - {0x01b24c4d0bb96ddfull,0x32239e98244d75f0ull,0x20dc68759c157d45ull,0x0120769b781bc14eull,}, - }, - { - {0x4f93886e58c4695full,0x85d6a1914aba1d04ull,0x65bb00f8cf495806ull,0x22a2413c698ae97aull,}, - {0x5e7928222bb02f69ull,0x93a92c850ce1dfb0ull,0xab3eda670f968b1aull,0x1d80886e0fba63ffull,}, - {0x672372572dbdeb59ull,0xba4cd6dd6cb11489ull,0xc74f1c6e3b714d1bull,0x1680ad98da380987ull,}, - {0xbad24d644fd9ab88ull,0x5c817abf11d3ce46ull,0x50587e12664ad6ebull,0x13505c240ec7b092ull,}, - {0x69ade81d2b6d1284ull,0xdd1d9aacd53d3f77ull,0x0888b2de31545a07ull,0x110788f6944c78e4ull,}, - {0x81032f6ea72116caull,0xfcb0253b20bea779ull,0x3d0a38d424eba36eull,0x07bdfcb51526c1e5ull,}, - }, - { - {0xebb80cf2cf44bfbeull,0xb8d559e318097038ull,0x212ed4c3d148be8eull,0x07028dcc862fbbb7ull,}, - {0x91e0a395d89f04d4ull,0xf777ae0142ff07c1ull,0x546b9b47f738fa6eull,0x01c284ef516920c6ull,}, - {0x2042edb5a4eb2cdcull,0xc69cefe0a36a7068ull,0x54471d65b3238311ull,0x077562b3344b4304ull,}, - {0xdb85089b11ece88dull,0x5c27780550f90569ull,0xb9607c12434a6b3dull,0x0d02a6324718f932ull,}, - {0x22ef9b5c8b453c5dull,0x6fdc3875e9247830ull,0x20e375065f9e593aull,0x2351c044ce0d933aull,}, - {0xfa0fcb482093eacbull,0xf8d695e8413f5acdull,0xc7020d8c84a2d773ull,0x11bf7584e5283fa1ull,}, - }, - { - {0xc6b304aa2adf2dfcull,0x19aac2d5544ee834ull,0xb7966f8cd629c330ull,0x1bc72a08a8bf8f9bull,}, - {0x18a5f463799112c7ull,0x4f14db51e967ebc3ull,0xa5ddb48f64db5e8eull,0x15b4fdd8610f3a32ull,}, - {0xe7b86b479d7e2293ull,0x931034487abf490dull,0x8c40ab7dfd28a196ull,0x1d981d3918fdc3b5ull,}, - {0x00797000c2afd324ull,0xf2954f0f86622806ull,0x8464fe0995cd3a7dull,0x0f0a74df4ca00cc3ull,}, - {0x639707b1839c8330ull,0x9c8d491ad7d779a9ull,0x576b7e0f24ce5f46ull,0x21fbdcc42ccd04c2ull,}, - {0x4578db4bdfd55434ull,0x1126933c97e9f4dcull,0xe64529a8921d7415ull,0x12e48bab87ea1fe3ull,}, - }, - { - {0x3f6d2fd04bd5ed75ull,0x65e464cdac7d235bull,0x45903a63a3608961ull,0x1f60c825bccd55c9ull,}, - {0x36b33d0fb8528047ull,0xc8d1f1ad82683baeull,0x78f4b80065c2e4c6ull,0x2066f32874bd1228ull,}, - {0x8b6d6a4b986e8d4cull,0x58f6f275f1d020f4ull,0xe4f3c16209e87ad5ull,0x1cdc33d41ad30173ull,}, - {0x9ec18a6cba3fb3ecull,0x31fc74b68ac834c6ull,0x256788ece76e37b0ull,0x13de6919841928e1ull,}, - {0xae46aa08773971f6ull,0xacd04d9698d47643ull,0x3667178a594f2153ull,0x19a0cadfa3cb7fa0ull,}, - {0x228420456325e079ull,0x3e4ec53c418fdae9ull,0xb9fee919e867c6f1ull,0x2272413f3e989842ull,}, - }, - { - {0x6420ee94e7c764dcull,0x87b3c986d488deecull,0x11dc3e6b59de7ffbull,0x14bb613bce5792e2ull,}, - {0xcc0b60cd4e352976ull,0x794b585f70a5b463ull,0x415cb954036ba631ull,0x1e521f8201ca4258ull,}, - {0xd707ac91ecd5dbdaull,0x08ffd44e5fd83cc6ull,0xa5f39e0f8dff5afcull,0x02315f6a55599212ull,}, - {0x2cdbd9f11596e797ull,0x7c560adedcf2cb25ull,0xdc474409e5650d9dull,0x158bc955e7e492e2ull,}, - {0xd6023b14352a1766ull,0xd5c271d942b6541dull,0x5dc4d1c72d25258full,0x0753f065a4cb028eull,}, - {0x11b4229a4c62010aull,0x2949cb6b089b3aa9ull,0x01b8bdc50766366dull,0x1094dfda1e2e5e57ull,}, - }, - { - {0x773cc6e1ac12f73eull,0x77686f8d75a83e9eull,0x7ce94b7ef1bd53a0ull,0x005a7d3e75c16332ull,}, - {0xafdc64df2ceca388ull,0x15be551bbca0e367ull,0x62d9b7608cf3b8a2ull,0x11ddfe7a0a96af25ull,}, - {0x5d23851a77554f67ull,0xa0f51815094e8050ull,0x930af7569c7850d7ull,0x108eb034eeda1460ull,}, - {0x28a80b277688cae3ull,0xd09ef5d30ec9b193ull,0xb6c554e32540d421ull,0x1da12923355fd2faull,}, - {0x9db6509d0130494dull,0xe28936417c250459ull,0xde8b4491aa8d1dc1ull,0x194b8e7bfc005322ull,}, - {0x7aaeb4f2f941741bull,0xf9d7b55b452158f8ull,0x17e172a187f68105ull,0x02f620bde277950aull,}, - }, - { - {0xf555a7766ac21481ull,0x82b12050c9449770ull,0x7bd16da27eff49fcull,0x06d1ad9a6cd69b71ull,}, - {0xa059542aa0f64e9full,0x93671f16b269a351ull,0x795262fddcb7cc3eull,0x199f355d6263cf86ull,}, - {0x0cbf707f1f8f73aeull,0xf483501e15982b44ull,0x2456aaa4d84d80c0ull,0x0d0ffb5393f7dd0aull,}, - {0x62999996c09097e2ull,0x1b87e828f9fc66e4ull,0x6b17eb3166967f57ull,0x1603601303478f52ull,}, - {0xfb776d4fd407d485ull,0xac03efdb746bf127ull,0x57bde58a5671a601ull,0x0cfbfa20d141f05cull,}, - {0x625ac1161752cbe2ull,0xe3348570b6ad71bcull,0x155b3911f5335f75ull,0x1679ec68122edc64ull,}, - }, - { - {0x9334b4c82aee3ef8ull,0x7ea393af9d865ce4ull,0x0f4ee0906b864850ull,0x1d9e34461e27cc61ull,}, - {0x921b1a6aa179a081ull,0xcca25db2d609388dull,0x816b69ad9a56a314ull,0x00eb3f6388c4d375ull,}, - {0x04e25f4225e50e72ull,0x59a20b6edf897f2aull,0x0842d5f5823535b4ull,0x0dceaf5ae8e50885ull,}, - {0xac6598257175aa0aull,0x1d5d21e8129f2efaull,0xe81dcc9497cb17fdull,0x11327c40c92dff80ull,}, - {0x149e4b2c0a3bfd81ull,0xb8efe68c475436ebull,0x3a8bf06e9ca15cd8ull,0x152d72639c6e5308ull,}, - {0x217e0e34f3f76b8bull,0x5c722d926b596985ull,0x45417905be08807bull,0x1e6132b54ad5595eull,}, - }, - { - {0xe5b541097726667dull,0x5583dfb4ade471adull,0x1840bff44a2faef2ull,0x093c23f8028fe3b9ull,}, - {0xe1e3347370f6e6c7ull,0x8dd7352c4dcc2a17ull,0x3cade218210f9e29ull,0x190ff57eac6e8b87ull,}, - {0x34905e72c173fdc3ull,0x59f8c6f4373c834eull,0x1bd9feabed806c99ull,0x1f209a7935a8ba38ull,}, - {0xe44f080023c83b49ull,0xfd2006276058693cull,0x44b43b6e462a32cbull,0x0942a0ed8e4657ebull,}, - {0xf7e53796340fd772ull,0xf8219ede4152370full,0x548b9b002c19940cull,0x1d0aaff93f50f52full,}, - {0xb5987eb545462ddaull,0xe0f29867116336edull,0xcc75a11c3ff8374aull,0x144d0b8fda0a44a9ull,}, - }, - { - {0x676408d2ff1a7593ull,0xc96a8077d911776full,0x9efff30500904c63ull,0x100a6093df2ae343ull,}, - {0xf1f92502b846cf30ull,0x57888806036aec6cull,0x310ceb0b04caaa7cull,0x1192819a3058307bull,}, - {0xbbf882b39fec7883ull,0x4079d241f7e6e0efull,0xb3090a69b3c7261full,0x16440a02d7fb5d2dull,}, - {0x70e9c8a88422df45ull,0x48fa15635ca49bd9ull,0x0430c461bfb96d16ull,0x0a29a4007c99f6d1ull,}, - {0x643a2bdb308a297cull,0xe4a5bca158e65ff6ull,0xc8dd1579abdeb9e5ull,0x1ee4a94b3d6c775cull,}, - {0xc085b2622b5c4480ull,0x8c69048c5fcded96ull,0x418ba7bd3260d85dull,0x0b22158bb6c29f9eull,}, - }, - { - {0xf661abe667e83f01ull,0x41068a7e95fd10c0ull,0xc9c4cc186cb3eb72ull,0x1a95a93a30592461ull,}, - {0x78dfc65c7280895eull,0xb9f1514b98add459ull,0xc7d713fd92025a11ull,0x0dbe6c1ceabcf73full,}, - {0xe35368a946428244ull,0x990da5e2783a2762ull,0x686b61b7775fb02cull,0x1a79e39b78922172ull,}, - {0xbf8ca28c8d95600full,0x0f56487a909e51cbull,0xfa1da11e3018a2faull,0x07a32571b231773cull,}, - {0x46c84d812bce56f5ull,0x84aa8d8bfe2b498cull,0x699ad1f34e22d74cull,0x0ad743bd99c458dbull,}, - {0xa8d16c7e09aa59b0ull,0x59ba8cbe75f31d51ull,0x5c68705d7838ff4eull,0x1c863feb5090e87eull,}, - }, - { - {0x86af66313ed193baull,0xa0902147163778b5ull,0xa101fcdc6b2d6191ull,0x12fbff4713e6eb10ull,}, - {0x9e1abdaf6e329c66ull,0xd8de2fb4db8e7554ull,0xb4374e1e93a0171bull,0x0ba2ecd00749208full,}, - {0x0cad8f57c02ce090ull,0xcac04eddadd338ecull,0x7ee5c235934f9918ull,0x24db5a9b0ad7ed64ull,}, - {0x46288ad8e01c5063ull,0x4b4c58654226c44aull,0xc4974aaf56ae42dfull,0x173e64cdd5661536ull,}, - {0x58b3450781e7e080ull,0x14ab3a25a5e64bbcull,0x3f9f91743276d2f5ull,0x0e101d0b89b81cdcull,}, - {0xa6bca5fbe99b2b7full,0x5fb8817e670ef40eull,0xb44cbcb05de76cb3ull,0x17110ed4912babb5ull,}, - }, - { - {0x6745e77f4e05d8edull,0xed278e7875ebb5fdull,0x3662f60864a8ccd2ull,0x028104ffc0a31868ull,}, - {0x740b76d64f25c9f0ull,0xb519a415132160e7ull,0x550a38ed829c5f68ull,0x04ea27d6deefcfabull,}, - {0x32d82ea897185651ull,0x04a8f5b63a90573aull,0x2c88fdfba241b62full,0x0285780fe0b77687ull,}, - {0xfb6ebce4f4b20f13ull,0x8ce24ff3dad1a3c7ull,0x716f93b316af50c2ull,0x0a09e678713447efull,}, - {0x6868a19728642ca6ull,0x4be5579c08e0a30cull,0xbd630b8f9c3d1552ull,0x0f277cf26c8e60f2ull,}, - {0x1a105d54bc290b18ull,0xa7e1a7c716529370ull,0x6e5a6c5b44350fd0ull,0x1fd2ae638488fccbull,}, - }, -}; -#endif diff --git a/vendor/github.com/dexon-foundation/bls/test/bls256_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls256_test.cpp deleted file mode 100644 index e53a87057..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls256_test.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include "bls_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/test/bls384_256_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls384_256_test.cpp deleted file mode 100644 index ea8126567..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls384_256_test.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 4 -#include "bls_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/test/bls384_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls384_test.cpp deleted file mode 100644 index 2212f8e6b..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls384_test.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#include "bls_test.hpp" - diff --git a/vendor/github.com/dexon-foundation/bls/test/bls_c256_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls_c256_test.cpp deleted file mode 100644 index 8613720b4..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls_c256_test.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 4 -#include "bls_c_test.hpp" diff --git a/vendor/github.com/dexon-foundation/bls/test/bls_c384_256_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls_c384_256_test.cpp deleted file mode 100644 index 6f153f9d8..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls_c384_256_test.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#define MCLBN_FR_UNIT_SIZE 4 -#include "bls_c_test.hpp" diff --git a/vendor/github.com/dexon-foundation/bls/test/bls_c384_test.cpp b/vendor/github.com/dexon-foundation/bls/test/bls_c384_test.cpp deleted file mode 100644 index b6886dd04..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls_c384_test.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define MCLBN_FP_UNIT_SIZE 6 -#include "bls_c_test.hpp" diff --git a/vendor/github.com/dexon-foundation/bls/test/bls_c_test.hpp b/vendor/github.com/dexon-foundation/bls/test/bls_c_test.hpp deleted file mode 100644 index e9b6e6302..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls_c_test.hpp +++ /dev/null @@ -1,437 +0,0 @@ -#include -#include -#include -#include -#include -#include - -size_t pubSize(size_t FrSize) -{ -#ifdef BLS_SWAP_G - return FrSize; -#else - return FrSize * 2; -#endif -} -size_t sigSize(size_t FrSize) -{ -#ifdef BLS_SWAP_G - return FrSize * 2; -#else - return FrSize; -#endif -} - -void bls_use_stackTest() -{ - blsSecretKey sec; - blsPublicKey pub; - blsSignature sig; - const char *msg = "this is a pen"; - const size_t msgSize = strlen(msg); - - blsSecretKeySetByCSPRNG(&sec); - - blsGetPublicKey(&pub, &sec); - - blsSign(&sig, &sec, msg, msgSize); - - CYBOZU_TEST_ASSERT(blsVerify(&sig, &pub, msg, msgSize)); -} - -void blsDataTest() -{ - const char *msg = "test test"; - const size_t msgSize = strlen(msg); - const size_t FrSize = blsGetFrByteSize(); - const size_t FpSize = blsGetG1ByteSize(); - blsSecretKey sec1, sec2; - blsSecretKeySetByCSPRNG(&sec1); - char buf[1024]; - size_t n; - size_t ret; - n = blsSecretKeyGetHexStr(buf, sizeof(buf), &sec1); - CYBOZU_TEST_ASSERT(0 < n && n <= FrSize * 2); - ret = blsSecretKeySetHexStr(&sec2, buf, n); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - - memset(&sec2, 0, sizeof(sec2)); - n = blsSecretKeySerialize(buf, sizeof(buf), &sec1); - CYBOZU_TEST_EQUAL(n, FrSize); - ret = blsSecretKeyDeserialize(&sec2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - - blsPublicKey pub1, pub2; - blsGetPublicKey(&pub1, &sec1); - n = blsPublicKeySerialize(buf, sizeof(buf), &pub1); - CYBOZU_TEST_EQUAL(n, pubSize(FpSize)); - ret = blsPublicKeyDeserialize(&pub2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); - blsSignature sig1, sig2; - blsSign(&sig1, &sec1, msg, msgSize); - n = blsSignatureSerialize(buf, sizeof(buf), &sig1); - CYBOZU_TEST_EQUAL(n, sigSize(FpSize)); - ret = blsSignatureDeserialize(&sig2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); -} - -void blsOrderTest(const char *curveOrder/*Fr*/, const char *fieldOrder/*Fp*/) -{ - char buf[1024]; - size_t len; - len = blsGetCurveOrder(buf, sizeof(buf)); - CYBOZU_TEST_ASSERT(len > 0); - CYBOZU_TEST_EQUAL(buf, curveOrder); - len = blsGetFieldOrder(buf, sizeof(buf)); - CYBOZU_TEST_ASSERT(len > 0); - CYBOZU_TEST_EQUAL(buf, fieldOrder); -} - -#if !defined(DISABLE_THREAD_TEST) || defined(__clang__) -#if defined(CYBOZU_CPP_VERSION) && CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 -#include -#include -struct Thread { - std::unique_ptr t; - Thread() : t() {} - ~Thread() - { - if (t) { - t->join(); - } - } - template - void run(F func, int p1, int p2) - { - t.reset(new std::thread(func, p1, p2)); - } -}; - -CYBOZU_TEST_AUTO(multipleInit) -{ - const size_t n = 100; - { - std::vector vt(n); - for (size_t i = 0; i < n; i++) { - vt[i].run(blsInit, MCL_BN254, MCLBN_COMPILED_TIME_VAR); - } - } - CYBOZU_TEST_EQUAL(blsGetOpUnitSize(), 4u); -#if MCLBN_FP_UNIT_SIZE == 6 - { - std::vector vt(n); - for (size_t i = 0; i < n; i++) { - vt[i].run(blsInit, MCL_BLS12_381, MCLBN_COMPILED_TIME_VAR); - } - } - CYBOZU_TEST_EQUAL(blsGetOpUnitSize(), 6u); -#endif -} -#endif -#endif - -void blsSerializeTest() -{ - const size_t FrSize = blsGetFrByteSize(); - const size_t FpSize = blsGetG1ByteSize(); - printf("FrSize=%d, FpSize=%d\n", (int)FrSize, (int)FpSize); - blsId id1, id2; - blsSecretKey sec1, sec2; - blsPublicKey pub1, pub2; - blsSignature sig1, sig2; - char buf[1024]; - size_t n; - size_t expectSize; - size_t ret; - const char dummyChar = '1'; - - // Id - expectSize = FrSize; - blsIdSetInt(&id1, -1); - n = blsIdSerialize(buf, sizeof(buf), &id1); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = blsIdDeserialize(&id2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsIdIsEqual(&id1, &id2)); - - ret = blsIdDeserialize(&id2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&id2, 0, sizeof(id2)); - buf[n] = dummyChar; - ret = blsIdDeserialize(&id2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsIdIsEqual(&id1, &id2)); - - n = blsIdSerialize(buf, expectSize, &id1); - CYBOZU_TEST_EQUAL(n, expectSize); - - // SecretKey - expectSize = FrSize; - blsSecretKeySetDecStr(&sec1, "-1", 2); - n = blsSecretKeySerialize(buf, sizeof(buf), &sec1); - CYBOZU_TEST_EQUAL(n, expectSize); - - ret = blsSecretKeyDeserialize(&sec2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - - ret = blsSecretKeyDeserialize(&sec2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&sec2, 0, sizeof(sec2)); - buf[n] = dummyChar; - ret = blsSecretKeyDeserialize(&sec2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - - n = blsSecretKeySerialize(buf, expectSize, &sec1); - CYBOZU_TEST_EQUAL(n, expectSize); - - // PublicKey - expectSize = pubSize(FpSize); - blsGetPublicKey(&pub1, &sec1); - n = blsPublicKeySerialize(buf, sizeof(buf), &pub1); - CYBOZU_TEST_EQUAL(n, expectSize); - CYBOZU_TEST_ASSERT(blsPublicKeyIsValidOrder(&pub1)); - - ret = blsPublicKeyDeserialize(&pub2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); - - ret = blsPublicKeyDeserialize(&pub2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&pub2, 0, sizeof(pub2)); - buf[n] = dummyChar; - ret = blsPublicKeyDeserialize(&pub2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); - - n = blsPublicKeySerialize(buf, expectSize, &pub1); - CYBOZU_TEST_EQUAL(n, expectSize); - - // Signature -#ifdef BLS_SWAP_G - expectSize = FpSize * 2; -#else - expectSize = FpSize; -#endif - blsSign(&sig1, &sec1, "abc", 3); - n = blsSignatureSerialize(buf, sizeof(buf), &sig1); - CYBOZU_TEST_EQUAL(n, expectSize); - CYBOZU_TEST_ASSERT(blsSignatureIsValidOrder(&sig1)); - - ret = blsSignatureDeserialize(&sig2, buf, n); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); - - ret = blsSignatureDeserialize(&sig2, buf, n - 1); - CYBOZU_TEST_EQUAL(ret, 0); - - memset(&sig2, 0, sizeof(sig2)); - buf[n] = dummyChar; - ret = blsSignatureDeserialize(&sig2, buf, n + 1); - CYBOZU_TEST_EQUAL(ret, n); - CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig1, &sig2)); - - n = blsSignatureSerialize(buf, expectSize, &sig1); - CYBOZU_TEST_EQUAL(n, expectSize); -} - -void blsVerifyOrderTest() -{ - puts("blsVerifyOrderTest"); -#ifdef BLS_SWAP_G - const uint8_t Qs[] = -#else - const uint8_t Ps[] = -#endif - { -0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - }; -#ifdef BLS_SWAP_G - const uint8_t Ps[] = -#else - const uint8_t Qs[] = -#endif - { -0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - }; - size_t n; - blsPublicKey pub; - n = blsPublicKeyDeserialize(&pub, Ps, sizeof(Ps)); - CYBOZU_TEST_EQUAL(n, 0); - blsPublicKeyVerifyOrder(0); - n = blsPublicKeyDeserialize(&pub, Ps, sizeof(Ps)); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_ASSERT(!blsPublicKeyIsValidOrder(&pub)); - blsPublicKeyVerifyOrder(1); - - blsSignature sig; - n = blsSignatureDeserialize(&sig, Qs, sizeof(Qs)); - CYBOZU_TEST_EQUAL(n, 0); - blsSignatureVerifyOrder(0); - n = blsSignatureDeserialize(&sig, Qs, sizeof(Qs)); - CYBOZU_TEST_ASSERT(n > 0); - CYBOZU_TEST_ASSERT(!blsSignatureIsValidOrder(&sig)); - blsSignatureVerifyOrder(1); -} - -void blsAddSubTest() -{ - blsSecretKey sec[3]; - blsPublicKey pub[3]; - blsSignature sig[3]; - const char *msg = "this is a pen"; - const size_t msgSize = strlen(msg); - - const char *secHexStr[8] = { "12", "34" }; - for (int i = 0; i < 2; i++) { - blsSecretKeySetHexStr(&sec[i], secHexStr[i], strlen(secHexStr[i])); - blsGetPublicKey(&pub[i], &sec[i]); - blsSign(&sig[i], &sec[i], msg, msgSize); - } - sec[2] = sec[0]; - blsSecretKeyAdd(&sec[2], &sec[1]); - char buf[1024]; - size_t n = blsSecretKeyGetHexStr(buf, sizeof(buf), &sec[2]); - CYBOZU_TEST_EQUAL(n, 2); - CYBOZU_TEST_EQUAL(buf, "46"); // "12" + "34" - - pub[2] = pub[0]; - blsPublicKeyAdd(&pub[2], &pub[1]); - sig[2] = sig[0]; - blsSignatureAdd(&sig[2], &sig[1]); // sig[2] = sig[0] + sig[1] - blsSignature sig2; - blsSign(&sig2, &sec[2], msg, msgSize); // sig2 = signature by sec[2] - CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig2, &sig[2])); - CYBOZU_TEST_ASSERT(blsVerify(&sig[2], &pub[2], msg, msgSize)); // verify by pub[2] - - blsSecretKeySub(&sec[2], &sec[1]); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec[2], &sec[0])); - blsPublicKeySub(&pub[2], &pub[1]); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub[2], &pub[0])); - blsSignatureSub(&sig[2], &sig[1]); - CYBOZU_TEST_ASSERT(blsSignatureIsEqual(&sig[2], &sig[0])); -} - -void blsTrivialShareTest() -{ - blsSecretKey sec1, sec2; - blsPublicKey pub1, pub2; - blsId id; - blsIdSetInt(&id, 123); - - blsSecretKeySetByCSPRNG(&sec1); - blsGetPublicKey(&pub1, &sec1); - int ret; - - memset(&sec2, 0, sizeof(sec2)); - ret = blsSecretKeyShare(&sec2, &sec1, 1, &id); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - memset(&sec2, 0, sizeof(sec2)); - ret = blsSecretKeyRecover(&sec2, &sec1, &id, 1); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(blsSecretKeyIsEqual(&sec1, &sec2)); - - memset(&pub2, 0, sizeof(pub2)); - ret = blsPublicKeyShare(&pub2, &pub1, 1, &id); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); - memset(&pub2, 0, sizeof(pub2)); - ret = blsPublicKeyRecover(&pub2, &pub1, &id, 1); - CYBOZU_TEST_EQUAL(ret, 0); - CYBOZU_TEST_ASSERT(blsPublicKeyIsEqual(&pub1, &pub2)); -} - -void modTest(const char *rStr) -{ - unsigned char buf[1024] = {}; - int ret; - blsSecretKey sec; - const size_t maxByte = 64; // 512-bit - memset(buf, 0xff, maxByte); - ret = blsSecretKeySetLittleEndianMod(&sec, buf, maxByte); - CYBOZU_TEST_EQUAL(ret, 0); - const mpz_class x = (mpz_class(1) << (maxByte * 8)) - 1; // 512-bit 0xff....ff - const mpz_class r(rStr); - size_t n = blsSecretKeySerialize(buf, sizeof(buf), &sec); - CYBOZU_TEST_ASSERT(n > 0); - // serialized data to mpz_class - mpz_class y = 0; - for (size_t i = 0; i < n; i++) { - y <<= 8; - y += buf[n - 1 - i]; - } - CYBOZU_TEST_EQUAL(y, x % r); -} - -void blsBench() -{ - blsSecretKey sec; - blsPublicKey pub; - blsSignature sig; - const char *msg = "this is a pen"; - const size_t msgSize = strlen(msg); - - blsSecretKeySetByCSPRNG(&sec); - - blsGetPublicKey(&pub, &sec); - - CYBOZU_BENCH_C("sign", 10000, blsSign, &sig, &sec, msg, msgSize); - CYBOZU_BENCH_C("verify", 1000, blsVerify, &sig, &pub, msg, msgSize); -} - -CYBOZU_TEST_AUTO(all) -{ - const struct { - int curveType; - const char *r; - const char *p; - } tbl[] = { - { - MCL_BN254, - "16798108731015832284940804142231733909759579603404752749028378864165570215949", - "16798108731015832284940804142231733909889187121439069848933715426072753864723", - }, -#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 - { - MCL_BN381_1, - "5540996953667913971058039301942914304734176495422447785042938606876043190415948413757785063597439175372845535461389", - "5540996953667913971058039301942914304734176495422447785045292539108217242186829586959562222833658991069414454984723", - }, -#endif -#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE >= 4 - { - MCL_BLS12_381, - "52435875175126190479447740508185965837690552500527637822603658699938581184513", - "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787", - }, -#endif - }; - for (size_t i = 0; i < sizeof(tbl) / sizeof(tbl[0]); i++) { - printf("i=%d\n", (int)i); - int ret = blsInit(tbl[i].curveType, MCLBN_COMPILED_TIME_VAR); - CYBOZU_TEST_EQUAL(ret, 0); - if (ret) { - printf("ERR %d\n", ret); - exit(1); - } - bls_use_stackTest(); - blsDataTest(); - blsOrderTest(tbl[i].r, tbl[i].p); - blsSerializeTest(); - if (tbl[i].curveType == MCL_BLS12_381) blsVerifyOrderTest(); - blsAddSubTest(); - blsTrivialShareTest(); - modTest(tbl[i].r); - blsBench(); - } -} diff --git a/vendor/github.com/dexon-foundation/bls/test/bls_test.hpp b/vendor/github.com/dexon-foundation/bls/test/bls_test.hpp deleted file mode 100644 index 346fafe15..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/bls_test.hpp +++ /dev/null @@ -1,545 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -template -void streamTest(const T& t) -{ - std::ostringstream oss; - oss << t; - std::istringstream iss(oss.str()); - T t2; - iss >> t2; - CYBOZU_TEST_EQUAL(t, t2); -} - -template -void testSetForBN254() -{ - /* - mask value to be less than r if the value >= (1 << (192 + 62)) - */ - const uint64_t fff = uint64_t(-1); - const uint64_t one = uint64_t(1); - const struct { - uint64_t in; - uint64_t expected; - } tbl[] = { - { fff, (one << 61) - 1 }, // masked with (1 << 61) - 1 - { one << 62, 0 }, // masked - { (one << 62) | (one << 61), (one << 61) }, // masked - { (one << 61) - 1, (one << 61) - 1 }, // same - }; - T t1, t2; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - uint64_t v1[] = { fff, fff, fff, tbl[i].in }; - uint64_t v2[] = { fff, fff, fff, tbl[i].expected }; - t1.set(v1); - t2.set(v2); - CYBOZU_TEST_EQUAL(t1, t2); - } -} - -void testForBN254() -{ - CYBOZU_TEST_EQUAL(bls::getOpUnitSize(), 4); - bls::Id id; - CYBOZU_TEST_ASSERT(id.isZero()); - id = 5; - CYBOZU_TEST_EQUAL(id, 5); - { - const uint64_t id1[] = { 1, 2, 3, 4 }; - id.set(id1); - std::ostringstream os; - os << id; - CYBOZU_TEST_EQUAL(os.str(), "0x4000000000000000300000000000000020000000000000001"); - } - testSetForBN254(); - testSetForBN254(); -} - -void hashTest(int type) -{ - bls::SecretKey sec; - sec.init(); - bls::PublicKey pub; - sec.getPublicKey(pub); - const std::string h = "\x01\x02\x03"; - bls::Signature sig; - sec.signHash(sig, h); - CYBOZU_TEST_ASSERT(sig.verifyHash(pub, h)); - CYBOZU_TEST_ASSERT(!sig.verifyHash(pub, "\x01\x02\04")); - if (type == MCL_BN254) { - CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "", 0), std::exception); - CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "\x00", 1), std::exception); - CYBOZU_TEST_EXCEPTION(sec.signHash(sig, "\x00\x00", 2), std::exception); -#ifndef BLS_SWAP_G - const uint64_t c1[] = { 0x0c00000000000004ull, 0xcf0f000000000006ull, 0x26cd890000000003ull, 0x2523648240000001ull }; - const uint64_t mc1[] = { 0x9b0000000000000full, 0x921200000000000dull, 0x9366c48000000004ull }; - CYBOZU_TEST_EXCEPTION(sec.signHash(sig, c1, 32), std::exception); - CYBOZU_TEST_EXCEPTION(sec.signHash(sig, mc1, 24), std::exception); -#endif - } -} - -void blsTest() -{ - bls::SecretKey sec; - sec.init(); - streamTest(sec); - bls::PublicKey pub; - sec.getPublicKey(pub); - streamTest(pub); - for (int i = 0; i < 5; i++) { - std::string m = "hello"; - m += char('0' + i); - bls::Signature sig; - sec.sign(sig, m); - CYBOZU_TEST_ASSERT(sig.verify(pub, m)); - CYBOZU_TEST_ASSERT(!sig.verify(pub, m + "a")); - streamTest(sig); - CYBOZU_BENCH_C("sign", 10000, sec.sign, sig, m); - CYBOZU_BENCH_C("verify", 1000, sig.verify, pub, m); - } -} - -void k_of_nTest() -{ - const std::string m = "abc"; - const int n = 5; - const int k = 3; - bls::SecretKey sec0; - sec0.init(); - bls::Signature sig0; - sec0.sign(sig0, m); - bls::PublicKey pub0; - sec0.getPublicKey(pub0); - CYBOZU_TEST_ASSERT(sig0.verify(pub0, m)); - - bls::SecretKeyVec msk; - sec0.getMasterSecretKey(msk, k); - - bls::SecretKeyVec allPrvVec(n); - bls::IdVec allIdVec(n); - for (int i = 0; i < n; i++) { - int id = i + 1; - allPrvVec[i].set(msk, id); - allIdVec[i] = id; - - bls::SecretKey p; - p.set(msk.data(), k, id); - CYBOZU_TEST_EQUAL(allPrvVec[i], p); - } - - bls::SignatureVec allSigVec(n); - for (int i = 0; i < n; i++) { - CYBOZU_TEST_ASSERT(allPrvVec[i] != sec0); - allPrvVec[i].sign(allSigVec[i], m); - bls::PublicKey pub; - allPrvVec[i].getPublicKey(pub); - CYBOZU_TEST_ASSERT(pub != pub0); - CYBOZU_TEST_ASSERT(allSigVec[i].verify(pub, m)); - } - - /* - 3-out-of-n - can recover - */ - bls::SecretKeyVec secVec(3); - bls::IdVec idVec(3); - for (int a = 0; a < n; a++) { - secVec[0] = allPrvVec[a]; - idVec[0] = allIdVec[a]; - for (int b = a + 1; b < n; b++) { - secVec[1] = allPrvVec[b]; - idVec[1] = allIdVec[b]; - for (int c = b + 1; c < n; c++) { - secVec[2] = allPrvVec[c]; - idVec[2] = allIdVec[c]; - bls::SecretKey sec; - sec.recover(secVec, idVec); - CYBOZU_TEST_EQUAL(sec, sec0); - bls::SecretKey sec2; - sec2.recover(secVec.data(), idVec.data(), secVec.size()); - CYBOZU_TEST_EQUAL(sec, sec2); - } - } - } - { - secVec[0] = allPrvVec[0]; - secVec[1] = allPrvVec[1]; - secVec[2] = allPrvVec[0]; // same of secVec[0] - idVec[0] = allIdVec[0]; - idVec[1] = allIdVec[1]; - idVec[2] = allIdVec[0]; - bls::SecretKey sec; - CYBOZU_TEST_EXCEPTION_MESSAGE(sec.recover(secVec, idVec), std::exception, "same id"); - } - { - /* - n-out-of-n - can recover - */ - bls::SecretKey sec; - sec.recover(allPrvVec, allIdVec); - CYBOZU_TEST_EQUAL(sec, sec0); - } - /* - 2-out-of-n - can't recover - */ - secVec.resize(2); - idVec.resize(2); - for (int a = 0; a < n; a++) { - secVec[0] = allPrvVec[a]; - idVec[0] = allIdVec[a]; - for (int b = a + 1; b < n; b++) { - secVec[1] = allPrvVec[b]; - idVec[1] = allIdVec[b]; - bls::SecretKey sec; - sec.recover(secVec, idVec); - CYBOZU_TEST_ASSERT(sec != sec0); - } - } - /* - 3-out-of-n - can recover - */ - bls::SignatureVec sigVec(3); - idVec.resize(3); - for (int a = 0; a < n; a++) { - sigVec[0] = allSigVec[a]; - idVec[0] = allIdVec[a]; - for (int b = a + 1; b < n; b++) { - sigVec[1] = allSigVec[b]; - idVec[1] = allIdVec[b]; - for (int c = b + 1; c < n; c++) { - sigVec[2] = allSigVec[c]; - idVec[2] = allIdVec[c]; - bls::Signature sig; - sig.recover(sigVec, idVec); - CYBOZU_TEST_EQUAL(sig, sig0); - } - } - } - { - sigVec[0] = allSigVec[1]; idVec[0] = allIdVec[1]; - sigVec[1] = allSigVec[4]; idVec[1] = allIdVec[4]; - sigVec[2] = allSigVec[3]; idVec[2] = allIdVec[3]; - bls::Signature sig; - CYBOZU_BENCH_C("sig.recover", 100, sig.recover, sigVec, idVec); - } - { - /* - n-out-of-n - can recover - */ - bls::Signature sig; - sig.recover(allSigVec, allIdVec); - CYBOZU_TEST_EQUAL(sig, sig0); - } - /* - 2-out-of-n - can't recover - */ - sigVec.resize(2); - idVec.resize(2); - for (int a = 0; a < n; a++) { - sigVec[0] = allSigVec[a]; - idVec[0] = allIdVec[a]; - for (int b = a + 1; b < n; b++) { - sigVec[1] = allSigVec[b]; - idVec[1] = allIdVec[b]; - bls::Signature sig; - sig.recover(sigVec, idVec); - CYBOZU_TEST_ASSERT(sig != sig0); - } - } - // return same value if n = 1 - sigVec.resize(1); - idVec.resize(1); - sigVec[0] = allSigVec[0]; - idVec[0] = allIdVec[0]; - { - bls::Signature sig; - sig.recover(sigVec, idVec); - CYBOZU_TEST_EQUAL(sig, sigVec[0]); - } - // share and recover publicKey - { - bls::PublicKeyVec pubVec(k); - idVec.resize(k); - // select [0, k) publicKey - for (int i = 0; i < k; i++) { - allPrvVec[i].getPublicKey(pubVec[i]); - idVec[i] = allIdVec[i]; - } - bls::PublicKey pub; - pub.recover(pubVec, idVec); - CYBOZU_TEST_EQUAL(pub, pub0); - bls::PublicKey pub2; - pub2.recover(pubVec.data(), idVec.data(), pubVec.size()); - CYBOZU_TEST_EQUAL(pub, pub2); - } -} - -void popTest() -{ - const size_t k = 3; - const size_t n = 6; - const std::string m = "pop test"; - bls::SecretKey sec0; - sec0.init(); - bls::PublicKey pub0; - sec0.getPublicKey(pub0); - bls::Signature sig0; - sec0.sign(sig0, m); - CYBOZU_TEST_ASSERT(sig0.verify(pub0, m)); - - bls::SecretKeyVec msk; - sec0.getMasterSecretKey(msk, k); - - bls::PublicKeyVec mpk; - bls::getMasterPublicKey(mpk, msk); - bls::SignatureVec popVec; - bls::getPopVec(popVec, msk); - - for (size_t i = 0; i < popVec.size(); i++) { - CYBOZU_TEST_ASSERT(popVec[i].verify(mpk[i])); - } - - const int idTbl[n] = { - 3, 5, 193, 22, 15 - }; - bls::SecretKeyVec secVec(n); - bls::PublicKeyVec pubVec(n); - bls::SignatureVec sVec(n); - for (size_t i = 0; i < n; i++) { - int id = idTbl[i]; - secVec[i].set(msk, id); - secVec[i].getPublicKey(pubVec[i]); - bls::PublicKey pub; - pub.set(mpk, id); - CYBOZU_TEST_EQUAL(pubVec[i], pub); - - bls::Signature pop; - secVec[i].getPop(pop); - CYBOZU_TEST_ASSERT(pop.verify(pubVec[i])); - - secVec[i].sign(sVec[i], m); - CYBOZU_TEST_ASSERT(sVec[i].verify(pubVec[i], m)); - } - secVec.resize(k); - sVec.resize(k); - bls::IdVec idVec(k); - for (size_t i = 0; i < k; i++) { - idVec[i] = idTbl[i]; - } - bls::SecretKey sec; - sec.recover(secVec, idVec); - CYBOZU_TEST_EQUAL(sec, sec0); - bls::Signature sig; - sig.recover(sVec, idVec); - CYBOZU_TEST_EQUAL(sig, sig0); - bls::Signature sig2; - sig2.recover(sVec.data(), idVec.data(), sVec.size()); - CYBOZU_TEST_EQUAL(sig, sig2); -} - -void addTest() -{ - bls::SecretKey sec1, sec2; - sec1.init(); - sec2.init(); - CYBOZU_TEST_ASSERT(sec1 != sec2); - - bls::PublicKey pub1, pub2; - sec1.getPublicKey(pub1); - sec2.getPublicKey(pub2); - - const std::string m = "doremi"; - bls::Signature sig1, sig2; - sec1.sign(sig1, m); - sec2.sign(sig2, m); - CYBOZU_TEST_ASSERT((sig1 + sig2).verify(pub1 + pub2, m)); -} - -void aggregateTest() -{ - const size_t n = 10; - bls::SecretKey secs[n]; - bls::PublicKey pubs[n], pub; - bls::Signature sigs[n], sig; - const std::string m = "abc"; - for (size_t i = 0; i < n; i++) { - secs[i].init(); - secs[i].getPublicKey(pubs[i]); - secs[i].sign(sigs[i], m); - } - pub = pubs[0]; - sig = sigs[0]; - for (size_t i = 1; i < n; i++) { - pub.add(pubs[i]); - sig.add(sigs[i]); - } - CYBOZU_TEST_ASSERT(sig.verify(pub, m)); -} - -void dataTest() -{ - const size_t FrSize = bls::getFrByteSize(); - const size_t FpSize = bls::getG1ByteSize(); - bls::SecretKey sec; - sec.init(); - std::string str; - sec.getStr(str, bls::IoFixedByteSeq); - { - CYBOZU_TEST_EQUAL(str.size(), FrSize); - bls::SecretKey sec2; - sec2.setStr(str, bls::IoFixedByteSeq); - CYBOZU_TEST_EQUAL(sec, sec2); - } - bls::PublicKey pub; - sec.getPublicKey(pub); - pub.getStr(str, bls::IoFixedByteSeq); - { -#ifdef BLS_SWAP_G - CYBOZU_TEST_EQUAL(str.size(), FpSize); -#else - CYBOZU_TEST_EQUAL(str.size(), FpSize * 2); -#endif - bls::PublicKey pub2; - pub2.setStr(str, bls::IoFixedByteSeq); - CYBOZU_TEST_EQUAL(pub, pub2); - } - std::string m = "abc"; - bls::Signature sign; - sec.sign(sign, m); - sign.getStr(str, bls::IoFixedByteSeq); - { -#ifdef BLS_SWAP_G - CYBOZU_TEST_EQUAL(str.size(), FpSize * 2); -#else - CYBOZU_TEST_EQUAL(str.size(), FpSize); -#endif - bls::Signature sign2; - sign2.setStr(str, bls::IoFixedByteSeq); - CYBOZU_TEST_EQUAL(sign, sign2); - } - bls::Id id; - const uint64_t v[] = { 1, 2, 3, 4, 5, 6, }; - id.set(v); - id.getStr(str, bls::IoFixedByteSeq); - { - CYBOZU_TEST_EQUAL(str.size(), FrSize); - bls::Id id2; - id2.setStr(str, bls::IoFixedByteSeq); - CYBOZU_TEST_EQUAL(id, id2); - } -} - -void verifyAggregateTest() -{ - const size_t n = 10; - bls::SecretKey secs[n]; - bls::PublicKey pubs[n]; - bls::Signature sigs[n], sig; - const size_t sizeofHash = 32; - struct Hash { char data[sizeofHash]; }; - std::vector h(n); - for (size_t i = 0; i < n; i++) { - char msg[128]; - CYBOZU_SNPRINTF(msg, sizeof(msg), "abc-%d", (int)i); - const size_t msgSize = strlen(msg); - cybozu::Sha256().digest(h[i].data, sizeofHash, msg, msgSize); - secs[i].init(); - secs[i].getPublicKey(pubs[i]); - secs[i].signHash(sigs[i], h[i].data, sizeofHash); - } - sig = sigs[0]; - for (size_t i = 1; i < n; i++) { - sig.add(sigs[i]); - } - CYBOZU_TEST_ASSERT(sig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); - bls::Signature invalidSig = sigs[0] + sigs[1]; - CYBOZU_TEST_ASSERT(!invalidSig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); - h[0].data[0]++; - CYBOZU_TEST_ASSERT(!sig.verifyAggregatedHashes(pubs, h.data(), sizeofHash, n)); -} - -unsigned int writeSeq(void *self, void *buf, unsigned int bufSize) -{ - int& seq = *(int*)self; - char *p = (char *)buf; - for (unsigned int i = 0; i < bufSize; i++) { - p[i] = char(seq++); - } - return bufSize; -} - -void setRandFuncTest() -{ - blsSecretKey sec; - const int seqInit1 = 5; - int seq = seqInit1; - blsSetRandFunc(&seq, writeSeq); - blsSecretKeySetByCSPRNG(&sec); - unsigned char buf[128]; - size_t n = blsSecretKeySerialize(buf, sizeof(buf), &sec); - CYBOZU_TEST_ASSERT(n > 0); - for (size_t i = 0; i < n - 1; i++) { - // ommit buf[n - 1] because it may be masked - CYBOZU_TEST_EQUAL(buf[i], seqInit1 + i); - } - // use default CSPRNG - blsSetRandFunc(0, 0); - blsSecretKeySetByCSPRNG(&sec); - n = blsSecretKeySerialize(buf, sizeof(buf), &sec); - CYBOZU_TEST_ASSERT(n > 0); - printf("sec="); - for (size_t i = 0; i < n; i++) { - printf("%02x", buf[i]); - } - printf("\n"); -} - -void testAll() -{ - blsTest(); - k_of_nTest(); - popTest(); - addTest(); - dataTest(); - aggregateTest(); - verifyAggregateTest(); - setRandFuncTest(); -} -CYBOZU_TEST_AUTO(all) -{ - const struct { - int type; - const char *name; - } tbl[] = { - { MCL_BN254, "BN254" }, -#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 - { MCL_BN381_1, "BN381_1" }, -#endif -#if MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 - { MCL_BLS12_381, "BLS12_381" }, -#endif - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - printf("curve=%s\n", tbl[i].name); - int type = tbl[i].type; - bls::init(type); - if (type == MCL_BN254) { - testForBN254(); - } - testAll(); - hashTest(type); - } -} diff --git a/vendor/github.com/dexon-foundation/bls/test/proj/bls_test/bls_test.vcxproj b/vendor/github.com/dexon-foundation/bls/test/proj/bls_test/bls_test.vcxproj deleted file mode 100644 index 1755135fb..000000000 --- a/vendor/github.com/dexon-foundation/bls/test/proj/bls_test/bls_test.vcxproj +++ /dev/null @@ -1,88 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {51266DE6-B57B-4AE3-B85C-282F170E1728} - Win32Proj - fp_test - - - - Application - true - v140 - MultiByte - - - Application - false - v140 - true - MultiByte - - - - - - - - - - - - - - - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - Console - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/LICENSE b/vendor/github.com/dexon-foundation/dexon-consensus/LICENSE deleted file mode 100644 index 0a041280b..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/LICENSE +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/common/event.go b/vendor/github.com/dexon-foundation/dexon-consensus/common/event.go deleted file mode 100644 index 4e4e23bf3..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/common/event.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package common - -import ( - "container/heap" - "sync" -) - -type heightEventFn func(uint64) - -type heightEvent struct { - h uint64 - fn heightEventFn -} - -// heightEvents implements a Min-Heap structure. -type heightEvents []heightEvent - -func (h heightEvents) Len() int { return len(h) } -func (h heightEvents) Less(i, j int) bool { return h[i].h < h[j].h } -func (h heightEvents) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *heightEvents) Push(x interface{}) { - *h = append(*h, x.(heightEvent)) -} -func (h *heightEvents) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// Event implements the Observer pattern. -type Event struct { - heightEvents heightEvents - heightEventsLock sync.Mutex -} - -// NewEvent creates a new event instance. -func NewEvent() *Event { - he := heightEvents{} - heap.Init(&he) - return &Event{ - heightEvents: he, - } -} - -// RegisterHeight to get notified on a specific height. -func (e *Event) RegisterHeight(h uint64, fn heightEventFn) { - e.heightEventsLock.Lock() - defer e.heightEventsLock.Unlock() - heap.Push(&e.heightEvents, heightEvent{ - h: h, - fn: fn, - }) -} - -// NotifyHeight and trigger function callback. -func (e *Event) NotifyHeight(h uint64) { - fns := func() (fns []heightEventFn) { - e.heightEventsLock.Lock() - defer e.heightEventsLock.Unlock() - if len(e.heightEvents) == 0 { - return - } - for h >= e.heightEvents[0].h { - he := heap.Pop(&e.heightEvents).(heightEvent) - fns = append(fns, he.fn) - if len(e.heightEvents) == 0 { - return - } - } - return - }() - for _, fn := range fns { - fn(h) - } -} - -// Reset clears all pending event -func (e *Event) Reset() { - e.heightEventsLock.Lock() - defer e.heightEventsLock.Unlock() - e.heightEvents = heightEvents{} -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go b/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go deleted file mode 100644 index 3328e939a..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package common - -import "log" - -// Logger define the way to receive logs from Consensus instance. -// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example, -// to log an error with message: -// logger.Error("some message", "error", err) -// which is similar to loggers with context: -// logger.Error("some message", map[string]interface{}{ -// "error": err, -// }) -type Logger interface { - // Info logs info level logs. - Trace(msg string, ctx ...interface{}) - Debug(msg string, ctx ...interface{}) - Info(msg string, ctx ...interface{}) - Warn(msg string, ctx ...interface{}) - Error(msg string, ctx ...interface{}) -} - -// NullLogger logs nothing. -type NullLogger struct{} - -// Trace implements Logger interface. -func (logger *NullLogger) Trace(msg string, ctx ...interface{}) { -} - -// Debug implements Logger interface. -func (logger *NullLogger) Debug(msg string, ctx ...interface{}) { -} - -// Info implements Logger interface. -func (logger *NullLogger) Info(msg string, ctx ...interface{}) { -} - -// Warn implements Logger interface. -func (logger *NullLogger) Warn(msg string, ctx ...interface{}) { -} - -// Error implements Logger interface. -func (logger *NullLogger) Error(msg string, ctx ...interface{}) { -} - -// SimpleLogger logs everything. -type SimpleLogger struct{} - -// composeVargs makes (msg, ctx...) could be pass to log.Println -func composeVargs(msg string, ctxs []interface{}) []interface{} { - args := []interface{}{msg} - for _, c := range ctxs { - args = append(args, c) - } - return args -} - -// Trace implements Logger interface. -func (logger *SimpleLogger) Trace(msg string, ctx ...interface{}) { - log.Println(composeVargs(msg, ctx)...) -} - -// Debug implements Logger interface. -func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) { - log.Println(composeVargs(msg, ctx)...) -} - -// Info implements Logger interface. -func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) { - log.Println(composeVargs(msg, ctx)...) -} - -// Warn implements Logger interface. -func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) { - log.Println(composeVargs(msg, ctx)...) -} - -// Error implements Logger interface. -func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) { - log.Println(composeVargs(msg, ctx)...) -} - -// CustomLogger logs everything. -type CustomLogger struct { - logger *log.Logger -} - -// NewCustomLogger creates a new custom logger. -func NewCustomLogger(logger *log.Logger) *CustomLogger { - return &CustomLogger{ - logger: logger, - } -} - -// Trace implements Logger interface. -func (logger *CustomLogger) Trace(msg string, ctx ...interface{}) { - logger.logger.Println(composeVargs(msg, ctx)...) -} - -// Debug implements Logger interface. -func (logger *CustomLogger) Debug(msg string, ctx ...interface{}) { - logger.logger.Println(composeVargs(msg, ctx)...) -} - -// Info implements Logger interface. -func (logger *CustomLogger) Info(msg string, ctx ...interface{}) { - logger.logger.Println(composeVargs(msg, ctx)...) -} - -// Warn implements Logger interface. -func (logger *CustomLogger) Warn(msg string, ctx ...interface{}) { - logger.logger.Println(composeVargs(msg, ctx)...) -} - -// Error implements Logger interface. -func (logger *CustomLogger) Error(msg string, ctx ...interface{}) { - logger.logger.Println(composeVargs(msg, ctx)...) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/common/types.go b/vendor/github.com/dexon-foundation/dexon-consensus/common/types.go deleted file mode 100644 index 883492bf3..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/common/types.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package common - -import ( - "bytes" - "encoding/hex" - "sort" - "time" -) - -const ( - // HashLength is the length of a hash in DEXON. - HashLength = 32 -) - -// Hash is the basic hash type in DEXON. -type Hash [HashLength]byte - -func (h Hash) String() string { - return hex.EncodeToString([]byte(h[:])) -} - -// Bytes return the hash as slice of bytes. -func (h Hash) Bytes() []byte { - return h[:] -} - -// Equal compares if two hashes are the same. -func (h Hash) Equal(hp Hash) bool { - return h == hp -} - -// Less compares if current hash is lesser. -func (h Hash) Less(hp Hash) bool { - return bytes.Compare(h[:], hp[:]) < 0 -} - -// MarshalText implements the encoding.TextMarhsaler interface. -func (h Hash) MarshalText() ([]byte, error) { - result := make([]byte, hex.EncodedLen(HashLength)) - hex.Encode(result, h[:]) - return result, nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (h *Hash) UnmarshalText(text []byte) error { - _, err := hex.Decode(h[:], text) - return err -} - -// Hashes is for sorting hashes. -type Hashes []Hash - -func (hs Hashes) Len() int { return len(hs) } -func (hs Hashes) Less(i, j int) bool { return hs[i].Less(hs[j]) } -func (hs Hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } - -// SortedHashes is a slice of hashes sorted in ascending order. -type SortedHashes Hashes - -// NewSortedHashes converts a slice of hashes to a sorted one. It's a -// firewall to prevent us from assigning unsorted hashes to a variable -// declared as SortedHashes directly. -func NewSortedHashes(hs Hashes) SortedHashes { - sort.Sort(hs) - return SortedHashes(hs) -} - -// ByTime implements sort.Interface for time.Time. -type ByTime []time.Time - -func (t ByTime) Len() int { return len(t) } -func (t ByTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t ByTime) Less(i, j int) bool { return t[i].Before(t[j]) } diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/common/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/common/utils.go deleted file mode 100644 index 0e847900f..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/common/utils.go +++ /dev/null @@ -1,41 +0,0 @@ -package common - -import ( - "math/rand" - "time" -) - -var random *rand.Rand - -func init() { - random = rand.New(rand.NewSource(time.Now().Unix())) -} - -// NewRandomHash returns a random Hash-like value. -func NewRandomHash() Hash { - x := Hash{} - for i := 0; i < HashLength; i++ { - x[i] = byte(random.Int() % 256) - } - return x -} - -// GenerateRandomBytes generates bytes randomly. -func GenerateRandomBytes() []byte { - randomness := make([]byte, 32) - _, err := rand.Read(randomness) - if err != nil { - panic(err) - } - return randomness -} - -// CopyBytes copies byte slice. -func CopyBytes(src []byte) (dst []byte) { - if len(src) == 0 { - return - } - dst = make([]byte, len(src)) - copy(dst, src) - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go deleted file mode 100644 index 17def6747..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "context" - "errors" - "math" - "sync" - "time" - - lru "github.com/hashicorp/golang-lru" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors returned from BA modules -var ( - ErrPreviousRoundIsNotFinished = errors.New("previous round is not finished") - ErrRoundOutOfRange = errors.New("round out of range") - ErrInvalidBlock = errors.New("invalid block") - ErrNoValidLeader = errors.New("no valid leader") - ErrIncorrectCRSSignature = errors.New("incorrect CRS signature") - ErrBlockTooOld = errors.New("block too old") -) - -const maxResultCache = 100 -const settingLimit = 3 - -// genValidLeader generate a validLeader function for agreement modules. -func genValidLeader( - mgr *agreementMgr) validLeaderFn { - return func(block *types.Block, crs common.Hash) (bool, error) { - if block.Timestamp.After(time.Now()) { - return false, nil - } - if block.Position.Round >= DKGDelayRound { - if mgr.recv.npks == nil { - return false, nil - } - if block.Position.Round > mgr.recv.npks.Round { - return false, nil - } - if block.Position.Round < mgr.recv.npks.Round { - return false, ErrBlockTooOld - } - } - if !utils.VerifyCRSSignature(block, crs, mgr.recv.npks) { - return false, ErrIncorrectCRSSignature - } - if err := mgr.bcModule.sanityCheck(block); err != nil { - if err == ErrRetrySanityCheckLater { - return false, nil - } - return false, err - } - mgr.logger.Debug("Calling Application.VerifyBlock", "block", block) - switch mgr.app.VerifyBlock(block) { - case types.VerifyInvalidBlock: - return false, ErrInvalidBlock - case types.VerifyRetryLater: - return false, nil - default: - } - return true, nil - } -} - -type agreementMgrConfig struct { - utils.RoundBasedConfig - - notarySetSize uint32 - lambdaBA time.Duration - crs common.Hash -} - -func (c *agreementMgrConfig) from( - round uint64, config *types.Config, crs common.Hash) { - c.notarySetSize = config.NotarySetSize - c.lambdaBA = config.LambdaBA - c.crs = crs - c.SetupRoundBasedFields(round, config) -} - -func newAgreementMgrConfig(prev agreementMgrConfig, config *types.Config, - crs common.Hash) (c agreementMgrConfig) { - c = agreementMgrConfig{} - c.from(prev.RoundID()+1, config, crs) - c.AppendTo(prev.RoundBasedConfig) - return -} - -type baRoundSetting struct { - round uint64 - dkgSet map[types.NodeID]struct{} - threshold int - ticker Ticker - crs common.Hash -} - -type agreementMgr struct { - // TODO(mission): unbound Consensus instance from this module. - con *Consensus - ID types.NodeID - app Application - gov Governance - network Network - logger common.Logger - cache *utils.NodeSetCache - signer *utils.Signer - bcModule *blockChain - ctx context.Context - configs []agreementMgrConfig - baModule *agreement - recv *consensusBAReceiver - processedBAResult map[types.Position]struct{} - voteFilter *utils.VoteFilter - settingCache *lru.Cache - curRoundSetting *baRoundSetting - waitGroup sync.WaitGroup - isRunning bool - lock sync.RWMutex -} - -func newAgreementMgr(con *Consensus) (mgr *agreementMgr, err error) { - settingCache, _ := lru.New(settingLimit) - mgr = &agreementMgr{ - con: con, - ID: con.ID, - app: con.app, - gov: con.gov, - network: con.network, - logger: con.logger, - cache: con.nodeSetCache, - signer: con.signer, - bcModule: con.bcModule, - ctx: con.ctx, - processedBAResult: make(map[types.Position]struct{}, maxResultCache), - voteFilter: utils.NewVoteFilter(), - settingCache: settingCache, - } - mgr.recv = &consensusBAReceiver{ - consensus: con, - restartNotary: make(chan types.Position, 1), - } - return mgr, nil -} - -func (mgr *agreementMgr) prepare() { - round := mgr.bcModule.tipRound() - agr := newAgreement( - mgr.ID, - mgr.recv, - newLeaderSelector(genValidLeader(mgr), mgr.logger), - mgr.signer, - mgr.logger) - setting := mgr.generateSetting(round) - if setting == nil { - mgr.logger.Warn("Unable to prepare init setting", "round", round) - return - } - mgr.curRoundSetting = setting - agr.notarySet = mgr.curRoundSetting.dkgSet - // Hacky way to make agreement module self contained. - mgr.recv.agreementModule = agr - mgr.baModule = agr - if round >= DKGDelayRound { - if _, exist := setting.dkgSet[mgr.ID]; exist { - mgr.logger.Debug("Preparing signer and npks.", "round", round) - npk, signer, err := mgr.con.cfgModule.getDKGInfo(round, false) - if err != nil { - mgr.logger.Error("Failed to prepare signer and npks.", - "round", round, - "error", err) - } - mgr.logger.Debug("Prepared signer and npks.", - "round", round, "signer", signer != nil, "npks", npk != nil) - } - } - return -} - -func (mgr *agreementMgr) run() { - mgr.lock.Lock() - defer mgr.lock.Unlock() - if mgr.isRunning { - return - } - mgr.isRunning = true - mgr.waitGroup.Add(1) - go func() { - defer mgr.waitGroup.Done() - mgr.runBA(mgr.bcModule.tipRound()) - }() -} - -func (mgr *agreementMgr) calcLeader( - dkgSet map[types.NodeID]struct{}, - crs common.Hash, pos types.Position) ( - types.NodeID, error) { - nodeSet := types.NewNodeSetFromMap(dkgSet) - leader := nodeSet.GetSubSet(1, types.NewNodeLeaderTarget( - crs, pos.Height)) - for nID := range leader { - return nID, nil - } - return types.NodeID{}, ErrNoValidLeader -} - -func (mgr *agreementMgr) config(round uint64) *agreementMgrConfig { - mgr.lock.RLock() - defer mgr.lock.RUnlock() - if round < mgr.configs[0].RoundID() { - panic(ErrRoundOutOfRange) - } - roundIndex := round - mgr.configs[0].RoundID() - if roundIndex >= uint64(len(mgr.configs)) { - return nil - } - return &mgr.configs[roundIndex] -} - -func (mgr *agreementMgr) notifyRoundEvents(evts []utils.RoundEventParam) error { - mgr.lock.Lock() - defer mgr.lock.Unlock() - apply := func(e utils.RoundEventParam) error { - if len(mgr.configs) > 0 { - lastCfg := mgr.configs[len(mgr.configs)-1] - if e.BeginHeight != lastCfg.RoundEndHeight() { - return ErrInvalidBlockHeight - } - if lastCfg.RoundID() == e.Round { - mgr.configs[len(mgr.configs)-1].ExtendLength() - } else if lastCfg.RoundID()+1 == e.Round { - mgr.configs = append(mgr.configs, newAgreementMgrConfig( - lastCfg, e.Config, e.CRS)) - } else { - return ErrInvalidRoundID - } - } else { - c := agreementMgrConfig{} - c.from(e.Round, e.Config, e.CRS) - c.SetRoundBeginHeight(e.BeginHeight) - mgr.configs = append(mgr.configs, c) - } - return nil - } - for _, e := range evts { - if err := apply(e); err != nil { - return err - } - } - return nil -} - -func (mgr *agreementMgr) checkProposer( - round uint64, proposerID types.NodeID) error { - if round == mgr.curRoundSetting.round { - if _, exist := mgr.curRoundSetting.dkgSet[proposerID]; !exist { - return ErrNotInNotarySet - } - } else if round == mgr.curRoundSetting.round+1 { - setting := mgr.generateSetting(round) - if setting == nil { - return ErrConfigurationNotReady - } - if _, exist := setting.dkgSet[proposerID]; !exist { - return ErrNotInNotarySet - } - } - return nil -} - -func (mgr *agreementMgr) processVote(v *types.Vote) (err error) { - if !mgr.recv.isNotary { - return nil - } - if mgr.voteFilter.Filter(v) { - return nil - } - if err := mgr.checkProposer(v.Position.Round, v.ProposerID); err != nil { - return err - } - if err = mgr.baModule.processVote(v); err == nil { - mgr.baModule.updateFilter(mgr.voteFilter) - mgr.voteFilter.AddVote(v) - } - if err == ErrSkipButNoError { - err = nil - } - return -} - -func (mgr *agreementMgr) processBlock(b *types.Block) error { - if err := mgr.checkProposer(b.Position.Round, b.ProposerID); err != nil { - return err - } - return mgr.baModule.processBlock(b) -} - -func (mgr *agreementMgr) touchAgreementResult( - result *types.AgreementResult) (first bool) { - // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! - if _, exist := mgr.processedBAResult[result.Position]; !exist { - first = true - if len(mgr.processedBAResult) > maxResultCache { - for k := range mgr.processedBAResult { - // Randomly drop one element. - delete(mgr.processedBAResult, k) - break - } - } - mgr.processedBAResult[result.Position] = struct{}{} - } - return -} - -func (mgr *agreementMgr) untouchAgreementResult( - result *types.AgreementResult) { - // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! - delete(mgr.processedBAResult, result.Position) -} - -func (mgr *agreementMgr) processAgreementResult( - result *types.AgreementResult) error { - aID := mgr.baModule.agreementID() - if isStop(aID) { - return nil - } - if result.Position == aID && !mgr.baModule.confirmed() { - mgr.logger.Info("Syncing BA", "position", result.Position) - if result.Position.Round >= DKGDelayRound { - return mgr.baModule.processAgreementResult(result) - } - for key := range result.Votes { - if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { - return err - } - } - } else if result.Position.Newer(aID) { - mgr.logger.Info("Fast syncing BA", "position", result.Position) - if result.Position.Round < DKGDelayRound { - mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA", - "hash", result.BlockHash) - mgr.network.PullBlocks(common.Hashes{result.BlockHash}) - for key := range result.Votes { - if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { - return err - } - } - } - setting := mgr.generateSetting(result.Position.Round) - if setting == nil { - mgr.logger.Warn("unable to get setting", "round", - result.Position.Round) - return ErrConfigurationNotReady - } - mgr.curRoundSetting = setting - leader, err := mgr.calcLeader(setting.dkgSet, setting.crs, result.Position) - if err != nil { - return err - } - mgr.baModule.restart( - setting.dkgSet, setting.threshold, - result.Position, leader, setting.crs) - if result.Position.Round >= DKGDelayRound { - return mgr.baModule.processAgreementResult(result) - } - } - return nil -} - -func (mgr *agreementMgr) processFinalizedBlock(block *types.Block) error { - aID := mgr.baModule.agreementID() - if block.Position.Older(aID) { - return nil - } - mgr.baModule.processFinalizedBlock(block) - return nil -} - -func (mgr *agreementMgr) stop() { - // Stop all running agreement modules. - func() { - mgr.lock.Lock() - defer mgr.lock.Unlock() - mgr.baModule.stop() - }() - // Block until all routines are done. - mgr.waitGroup.Wait() -} - -func (mgr *agreementMgr) generateSetting(round uint64) *baRoundSetting { - if setting, exist := mgr.settingCache.Get(round); exist { - return setting.(*baRoundSetting) - } - curConfig := mgr.config(round) - if curConfig == nil { - return nil - } - var dkgSet map[types.NodeID]struct{} - if round >= DKGDelayRound { - _, qualidifed, err := typesDKG.CalcQualifyNodes( - mgr.gov.DKGMasterPublicKeys(round), - mgr.gov.DKGComplaints(round), - utils.GetDKGThreshold(mgr.gov.Configuration(round)), - ) - if err != nil { - mgr.logger.Error("Failed to get gpk", "round", round, "error", err) - return nil - } - dkgSet = qualidifed - } - if len(dkgSet) == 0 { - var err error - dkgSet, err = mgr.cache.GetNotarySet(round) - if err != nil { - mgr.logger.Error("Failed to get notarySet", "round", round, "error", err) - return nil - } - } - setting := &baRoundSetting{ - crs: curConfig.crs, - dkgSet: dkgSet, - round: round, - threshold: utils.GetBAThreshold(&types.Config{ - NotarySetSize: curConfig.notarySetSize}), - } - mgr.settingCache.Add(round, setting) - return setting -} - -func (mgr *agreementMgr) runBA(initRound uint64) { - // These are round based variables. - var ( - currentRound uint64 - nextRound = initRound - curConfig = mgr.config(initRound) - setting = &baRoundSetting{} - tickDuration time.Duration - ticker Ticker - ) - - // Check if this routine needs to awake in this round and prepare essential - // variables when yes. - checkRound := func() (isDKG bool) { - defer func() { - currentRound = nextRound - nextRound++ - }() - // Wait until the configuartion for next round is ready. - for { - if setting = mgr.generateSetting(nextRound); setting != nil { - break - } else { - mgr.logger.Debug("Round is not ready", "round", nextRound) - time.Sleep(1 * time.Second) - } - } - _, isDKG = setting.dkgSet[mgr.ID] - if isDKG { - mgr.logger.Info("Selected as dkg set", - "ID", mgr.ID, - "round", nextRound) - } else { - mgr.logger.Info("Not selected as dkg set", - "ID", mgr.ID, - "round", nextRound) - } - // Setup ticker - if tickDuration != curConfig.lambdaBA { - if ticker != nil { - ticker.Stop() - } - ticker = newTicker(mgr.gov, nextRound, TickerBA) - tickDuration = curConfig.lambdaBA - } - setting.ticker = ticker - return - } -Loop: - for { - select { - case <-mgr.ctx.Done(): - break Loop - default: - } - mgr.recv.isNotary = checkRound() - mgr.voteFilter = utils.NewVoteFilter() - mgr.voteFilter.Position.Round = currentRound - mgr.recv.emptyBlockHashMap = &sync.Map{} - if currentRound >= DKGDelayRound && mgr.recv.isNotary { - var err error - mgr.recv.npks, mgr.recv.psigSigner, err = - mgr.con.cfgModule.getDKGInfo(currentRound, false) - if err != nil { - mgr.logger.Warn("cannot get dkg info", - "round", currentRound, "error", err) - } - } else { - mgr.recv.npks = nil - mgr.recv.psigSigner = nil - } - // Run BA for this round. - mgr.recv.restartNotary <- types.Position{ - Round: currentRound, - Height: math.MaxUint64, - } - if err := mgr.baRoutineForOneRound(setting); err != nil { - mgr.logger.Error("BA routine failed", - "error", err, - "nodeID", mgr.ID) - break Loop - } - } -} - -func (mgr *agreementMgr) baRoutineForOneRound( - setting *baRoundSetting) (err error) { - agr := mgr.baModule - recv := mgr.recv - oldPos := agr.agreementID() - restart := func(restartPos types.Position) (breakLoop bool, err error) { - if !isStop(restartPos) { - if restartPos.Height+1 >= mgr.config(setting.round).RoundEndHeight() { - for { - select { - case <-mgr.ctx.Done(): - break - default: - } - tipRound := mgr.bcModule.tipRound() - if tipRound > setting.round { - break - } else { - mgr.logger.Debug("Waiting blockChain to change round...", - "curRound", setting.round, - "tipRound", tipRound) - } - time.Sleep(100 * time.Millisecond) - } - // This round is finished. - breakLoop = true - return - } - if restartPos.Older(oldPos) { - // The restartNotary event is triggered by 'BlockConfirmed' - // of some older block. - return - } - } - var nextHeight uint64 - var nextTime time.Time - for { - // Make sure we are stoppable. - select { - case <-mgr.ctx.Done(): - breakLoop = true - return - default: - } - nextHeight, nextTime = mgr.bcModule.nextBlock() - if nextHeight != notReadyHeight { - if isStop(restartPos) { - break - } - if nextHeight > restartPos.Height { - break - } - } - mgr.logger.Debug("BlockChain not ready!!!", - "old", oldPos, "restart", restartPos, "next", nextHeight) - time.Sleep(100 * time.Millisecond) - } - nextPos := types.Position{ - Round: setting.round, - Height: nextHeight, - } - oldPos = nextPos - var leader types.NodeID - leader, err = mgr.calcLeader(setting.dkgSet, setting.crs, nextPos) - if err != nil { - return - } - time.Sleep(nextTime.Sub(time.Now())) - setting.ticker.Restart() - agr.restart(setting.dkgSet, setting.threshold, nextPos, leader, setting.crs) - return - } -Loop: - for { - select { - case <-mgr.ctx.Done(): - break Loop - default: - } - if agr.confirmed() { - // Block until receive restartPos - select { - case restartPos := <-recv.restartNotary: - breakLoop, err := restart(restartPos) - if err != nil { - return err - } - if breakLoop { - break Loop - } - case <-mgr.ctx.Done(): - break Loop - } - } - select { - case restartPos := <-recv.restartNotary: - breakLoop, err := restart(restartPos) - if err != nil { - return err - } - if breakLoop { - break Loop - } - default: - } - if !mgr.recv.isNotary { - select { - case <-setting.ticker.Tick(): - continue Loop - case <-mgr.ctx.Done(): - break Loop - } - } - if err = agr.nextState(); err != nil { - mgr.logger.Error("Failed to proceed to next state", - "nodeID", mgr.ID.String(), - "error", err) - break Loop - } - if agr.pullVotes() { - pos := agr.agreementID() - mgr.logger.Debug("Calling Network.PullVotes for syncing votes", - "position", pos) - mgr.network.PullVotes(pos) - } - for i := 0; i < agr.clocks(); i++ { - // Priority select for agreement.done(). - select { - case <-agr.done(): - continue Loop - default: - } - select { - case <-agr.done(): - continue Loop - case <-setting.ticker.Tick(): - } - } - } - return nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go deleted file mode 100644 index 0d1ae58bc..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "fmt" - - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -// Errors for agreement state module. -var ( - ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state") - ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state") -) - -// agreementStateType is the state of agreement -type agreementStateType int - -// agreementStateType enum. -const ( - stateFast agreementStateType = iota - stateFastVote - stateInitial - statePreCommit - stateCommit - stateForward - statePullVote - stateSleep -) - -type agreementState interface { - state() agreementStateType - nextState() (agreementState, error) - clocks() int -} - -//----- FastState ----- -type fastState struct { - a *agreementData -} - -func newFastState(a *agreementData) *fastState { - return &fastState{a: a} -} - -func (s *fastState) state() agreementStateType { return stateFast } -func (s *fastState) clocks() int { return 0 } -func (s *fastState) nextState() (agreementState, error) { - if func() bool { - s.a.lock.Lock() - defer s.a.lock.Unlock() - return s.a.isLeader - }() { - hash := s.a.recv.ProposeBlock() - if hash != types.NullBlockHash { - s.a.lock.Lock() - defer s.a.lock.Unlock() - s.a.recv.ProposeVote(types.NewVote(types.VoteFast, hash, s.a.period)) - } - } - return newFastVoteState(s.a), nil -} - -//----- FastVoteState ----- -type fastVoteState struct { - a *agreementData -} - -func newFastVoteState(a *agreementData) *fastVoteState { - return &fastVoteState{a: a} -} - -func (s *fastVoteState) state() agreementStateType { return stateFastVote } -func (s *fastVoteState) clocks() int { return 3 } -func (s *fastVoteState) nextState() (agreementState, error) { - return newInitialState(s.a), nil -} - -//----- InitialState ----- -type initialState struct { - a *agreementData -} - -func newInitialState(a *agreementData) *initialState { - return &initialState{a: a} -} - -func (s *initialState) state() agreementStateType { return stateInitial } -func (s *initialState) clocks() int { return 0 } -func (s *initialState) nextState() (agreementState, error) { - if func() bool { - s.a.lock.Lock() - defer s.a.lock.Unlock() - return !s.a.isLeader - }() { - // Leader already proposed block in fastState. - hash := s.a.recv.ProposeBlock() - s.a.lock.Lock() - defer s.a.lock.Unlock() - s.a.recv.ProposeVote(types.NewVote(types.VoteInit, hash, s.a.period)) - } - return newPreCommitState(s.a), nil -} - -//----- PreCommitState ----- -type preCommitState struct { - a *agreementData -} - -func newPreCommitState(a *agreementData) *preCommitState { - return &preCommitState{a: a} -} - -func (s *preCommitState) state() agreementStateType { return statePreCommit } -func (s *preCommitState) clocks() int { return 2 } -func (s *preCommitState) nextState() (agreementState, error) { - s.a.lock.RLock() - defer s.a.lock.RUnlock() - if s.a.lockValue == types.SkipBlockHash || - s.a.lockValue == types.NullBlockHash { - hash := s.a.leader.leaderBlockHash() - s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period)) - } else { - s.a.recv.ProposeVote(types.NewVote( - types.VotePreCom, s.a.lockValue, s.a.period)) - } - return newCommitState(s.a), nil -} - -//----- CommitState ----- -type commitState struct { - a *agreementData -} - -func newCommitState(a *agreementData) *commitState { - return &commitState{a: a} -} - -func (s *commitState) state() agreementStateType { return stateCommit } -func (s *commitState) clocks() int { return 2 } -func (s *commitState) nextState() (agreementState, error) { - s.a.lock.Lock() - defer s.a.lock.Unlock() - s.a.recv.ProposeVote(types.NewVote(types.VoteCom, s.a.lockValue, s.a.period)) - return newForwardState(s.a), nil -} - -// ----- ForwardState ----- -type forwardState struct { - a *agreementData -} - -func newForwardState(a *agreementData) *forwardState { - return &forwardState{a: a} -} - -func (s *forwardState) state() agreementStateType { return stateForward } -func (s *forwardState) clocks() int { return 4 } - -func (s *forwardState) nextState() (agreementState, error) { - return newPullVoteState(s.a), nil -} - -// ----- PullVoteState ----- -// pullVoteState is a special state to ensure the assumption in the consensus -// algorithm that every vote will eventually arrive for all nodes. -type pullVoteState struct { - a *agreementData -} - -func newPullVoteState(a *agreementData) *pullVoteState { - return &pullVoteState{a: a} -} - -func (s *pullVoteState) state() agreementStateType { return statePullVote } -func (s *pullVoteState) clocks() int { return 4 } - -func (s *pullVoteState) nextState() (agreementState, error) { - return s, nil -} - -// ----- SleepState ----- -// sleepState is a special state after BA has output and waits for restart. -type sleepState struct { - a *agreementData -} - -func newSleepState(a *agreementData) *sleepState { - return &sleepState{a: a} -} - -func (s *sleepState) state() agreementStateType { return stateSleep } -func (s *sleepState) clocks() int { return 65536 } - -func (s *sleepState) nextState() (agreementState, error) { - return s, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go deleted file mode 100644 index d90afc610..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go +++ /dev/null @@ -1,797 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// closedchan is a reusable closed channel. -var closedchan = make(chan struct{}) - -func init() { - close(closedchan) -} - -// Errors for agreement module. -var ( - ErrInvalidVote = fmt.Errorf("invalid vote") - ErrNotInNotarySet = fmt.Errorf("not in notary set") - ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature") - ErrIncorrectVotePartialSignature = fmt.Errorf("incorrect vote psig") - ErrMismatchBlockPosition = fmt.Errorf("mismatch block position") -) - -// ErrFork for fork error in agreement. -type ErrFork struct { - nID types.NodeID - old, new common.Hash -} - -func (e *ErrFork) Error() string { - return fmt.Sprintf("fork is found for %s, old %s, new %s", - e.nID.String(), e.old, e.new) -} - -// ErrForkVote for fork vote error in agreement. -type ErrForkVote struct { - nID types.NodeID - old, new *types.Vote -} - -func (e *ErrForkVote) Error() string { - return fmt.Sprintf("fork vote is found for %s, old %s, new %s", - e.nID.String(), e.old, e.new) -} - -func newVoteListMap() []map[types.NodeID]*types.Vote { - listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType) - for idx := range listMap { - listMap[idx] = make(map[types.NodeID]*types.Vote) - } - return listMap -} - -// agreementReceiver is the interface receiving agreement event. -type agreementReceiver interface { - ProposeVote(vote *types.Vote) - ProposeBlock() common.Hash - // ConfirmBlock is called with lock hold. User can safely use all data within - // agreement module. - ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote) - PullBlocks(common.Hashes) - ReportForkVote(v1, v2 *types.Vote) - ReportForkBlock(b1, b2 *types.Block) - VerifyPartialSignature(vote *types.Vote) (bool, bool) -} - -type pendingBlock struct { - block *types.Block - receivedTime time.Time -} - -type pendingVote struct { - vote *types.Vote - receivedTime time.Time -} - -// agreementData is the data for agreementState. -type agreementData struct { - recv agreementReceiver - - ID types.NodeID - isLeader bool - leader *leaderSelector - lockValue common.Hash - lockIter uint64 - period uint64 - requiredVote int - votes map[uint64][]map[types.NodeID]*types.Vote - lock sync.RWMutex - blocks map[types.NodeID]*types.Block - blocksLock sync.Mutex -} - -// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm. -type agreement struct { - state agreementState - data *agreementData - aID *atomic.Value - doneChan chan struct{} - notarySet map[types.NodeID]struct{} - hasVoteFast bool - hasOutput bool - lock sync.RWMutex - pendingBlock []pendingBlock - pendingVote []pendingVote - pendingAgreementResult map[types.Position]*types.AgreementResult - candidateBlock map[common.Hash]*types.Block - fastForward chan uint64 - signer *utils.Signer - logger common.Logger -} - -// newAgreement creates a agreement instance. -func newAgreement( - ID types.NodeID, - recv agreementReceiver, - leader *leaderSelector, - signer *utils.Signer, - logger common.Logger) *agreement { - agreement := &agreement{ - data: &agreementData{ - recv: recv, - ID: ID, - leader: leader, - }, - aID: &atomic.Value{}, - pendingAgreementResult: make(map[types.Position]*types.AgreementResult), - candidateBlock: make(map[common.Hash]*types.Block), - fastForward: make(chan uint64, 1), - signer: signer, - logger: logger, - } - agreement.stop() - return agreement -} - -// restart the agreement -func (a *agreement) restart( - notarySet map[types.NodeID]struct{}, - threshold int, aID types.Position, leader types.NodeID, - crs common.Hash) { - if !func() bool { - a.lock.Lock() - defer a.lock.Unlock() - if !isStop(aID) { - oldAID := a.agreementID() - if !isStop(oldAID) && !aID.Newer(oldAID) { - return false - } - } - a.logger.Debug("Restarting BA", - "notarySet", notarySet, "position", aID, "leader", leader) - a.data.lock.Lock() - defer a.data.lock.Unlock() - a.data.blocksLock.Lock() - defer a.data.blocksLock.Unlock() - a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote) - a.data.votes[1] = newVoteListMap() - a.data.period = 2 - a.data.blocks = make(map[types.NodeID]*types.Block) - a.data.requiredVote = threshold - a.data.leader.restart(crs) - a.data.lockValue = types.SkipBlockHash - a.data.lockIter = 0 - a.data.isLeader = a.data.ID == leader - if a.doneChan != nil { - close(a.doneChan) - } - a.doneChan = make(chan struct{}) - a.fastForward = make(chan uint64, 1) - a.hasVoteFast = false - a.hasOutput = false - a.state = newFastState(a.data) - a.notarySet = notarySet - a.candidateBlock = make(map[common.Hash]*types.Block) - a.aID.Store(struct { - pos types.Position - leader types.NodeID - }{aID, leader}) - return true - }() { - return - } - - if isStop(aID) { - return - } - - var result *types.AgreementResult - func() { - a.lock.Lock() - defer a.lock.Unlock() - newPendingAgreementResult := make( - map[types.Position]*types.AgreementResult) - for pos, agr := range a.pendingAgreementResult { - if pos.Newer(aID) { - newPendingAgreementResult[pos] = agr - } else if pos == aID { - result = agr - } - } - a.pendingAgreementResult = newPendingAgreementResult - }() - - expireTime := time.Now().Add(-10 * time.Second) - replayBlock := make([]*types.Block, 0) - func() { - a.lock.Lock() - defer a.lock.Unlock() - newPendingBlock := make([]pendingBlock, 0) - for _, pending := range a.pendingBlock { - if aID.Newer(pending.block.Position) { - continue - } else if pending.block.Position == aID { - if result == nil || - result.Position.Round < DKGDelayRound || - result.BlockHash == pending.block.Hash { - replayBlock = append(replayBlock, pending.block) - } - } else if pending.receivedTime.After(expireTime) { - newPendingBlock = append(newPendingBlock, pending) - } - } - a.pendingBlock = newPendingBlock - }() - - replayVote := make([]*types.Vote, 0) - func() { - a.lock.Lock() - defer a.lock.Unlock() - newPendingVote := make([]pendingVote, 0) - for _, pending := range a.pendingVote { - if aID.Newer(pending.vote.Position) { - continue - } else if pending.vote.Position == aID { - if result == nil || result.Position.Round < DKGDelayRound { - replayVote = append(replayVote, pending.vote) - } - } else if pending.receivedTime.After(expireTime) { - newPendingVote = append(newPendingVote, pending) - } - } - a.pendingVote = newPendingVote - }() - - for _, block := range replayBlock { - if err := a.processBlock(block); err != nil { - a.logger.Error("Failed to process block when restarting agreement", - "block", block) - } - } - - if result != nil { - if err := a.processAgreementResult(result); err != nil { - a.logger.Error("Failed to process agreement result when retarting", - "result", result) - } - } - - for _, vote := range replayVote { - if err := a.processVote(vote); err != nil { - a.logger.Error("Failed to process vote when restarting agreement", - "vote", vote) - } - } -} - -func (a *agreement) stop() { - a.restart(make(map[types.NodeID]struct{}), int(math.MaxInt32), - types.Position{ - Height: math.MaxUint64, - }, - types.NodeID{}, common.Hash{}) -} - -func isStop(aID types.Position) bool { - return aID.Height == math.MaxUint64 -} - -// clocks returns how many time this state is required. -func (a *agreement) clocks() int { - a.data.lock.RLock() - defer a.data.lock.RUnlock() - scale := int(a.data.period) - 1 - if a.state.state() == stateForward { - scale = 1 - } - if scale < 1 { - // just in case. - scale = 1 - } - // 10 is a magic number derived from many years of experience. - if scale > 10 { - scale = 10 - } - return a.state.clocks() * scale -} - -// pullVotes returns if current agreement requires more votes to continue. -func (a *agreement) pullVotes() bool { - a.data.lock.RLock() - defer a.data.lock.RUnlock() - return a.state.state() == statePullVote || - a.state.state() == stateInitial || - (a.state.state() == statePreCommit && (a.data.period%3) == 0) -} - -// agreementID returns the current agreementID. -func (a *agreement) agreementID() types.Position { - return a.aID.Load().(struct { - pos types.Position - leader types.NodeID - }).pos -} - -// leader returns the current leader. -func (a *agreement) leader() types.NodeID { - return a.aID.Load().(struct { - pos types.Position - leader types.NodeID - }).leader -} - -// nextState is called at the specific clock time. -func (a *agreement) nextState() (err error) { - a.lock.Lock() - defer a.lock.Unlock() - if a.hasOutput { - a.state = newSleepState(a.data) - return - } - a.state, err = a.state.nextState() - return -} - -func (a *agreement) sanityCheck(vote *types.Vote) error { - if vote.Type >= types.MaxVoteType { - return ErrInvalidVote - } - ok, err := utils.VerifyVoteSignature(vote) - if err != nil { - return err - } - if !ok { - return ErrIncorrectVoteSignature - } - if vote.Position.Round != a.agreementID().Round { - // TODO(jimmy): maybe we can verify partial signature at agreement-mgr. - return nil - } - if ok, report := a.data.recv.VerifyPartialSignature(vote); !ok { - if report { - return ErrIncorrectVotePartialSignature - } - return ErrSkipButNoError - } - return nil -} - -func (a *agreement) checkForkVote(vote *types.Vote) ( - alreadyExist bool, err error) { - a.data.lock.RLock() - defer a.data.lock.RUnlock() - if votes, exist := a.data.votes[vote.Period]; exist { - if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist { - alreadyExist = true - if vote.BlockHash != oldVote.BlockHash { - a.data.recv.ReportForkVote(oldVote, vote) - err = &ErrForkVote{vote.ProposerID, oldVote, vote} - return - } - } - } - return -} - -// prepareVote prepares a vote. -func (a *agreement) prepareVote(vote *types.Vote) (err error) { - vote.Position = a.agreementID() - err = a.signer.SignVote(vote) - return -} - -func (a *agreement) updateFilter(filter *utils.VoteFilter) { - if isStop(a.agreementID()) { - return - } - a.lock.RLock() - defer a.lock.RUnlock() - a.data.lock.RLock() - defer a.data.lock.RUnlock() - filter.Confirm = a.hasOutput - filter.LockIter = a.data.lockIter - filter.Period = a.data.period - filter.Position.Height = a.agreementID().Height -} - -// processVote is the entry point for processing Vote. -func (a *agreement) processVote(vote *types.Vote) error { - a.lock.Lock() - defer a.lock.Unlock() - if err := a.sanityCheck(vote); err != nil { - return err - } - aID := a.agreementID() - - // Agreement module has stopped. - if isStop(aID) { - // Hacky way to not drop first votes when round just begins. - if vote.Position.Round == aID.Round { - a.pendingVote = append(a.pendingVote, pendingVote{ - vote: vote, - receivedTime: time.Now().UTC(), - }) - return nil - } - return ErrSkipButNoError - } - if vote.Position != aID { - if aID.Newer(vote.Position) { - return nil - } - a.pendingVote = append(a.pendingVote, pendingVote{ - vote: vote, - receivedTime: time.Now().UTC(), - }) - return nil - } - exist, err := a.checkForkVote(vote) - if err != nil { - return err - } - if exist { - return nil - } - - a.data.lock.Lock() - defer a.data.lock.Unlock() - if _, exist := a.data.votes[vote.Period]; !exist { - a.data.votes[vote.Period] = newVoteListMap() - } - if _, exist := a.data.votes[vote.Period][vote.Type][vote.ProposerID]; exist { - return nil - } - a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote - if !a.hasOutput && - (vote.Type == types.VoteCom || - vote.Type == types.VoteFast || - vote.Type == types.VoteFastCom) { - if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && - hash != types.SkipBlockHash { - if vote.Type == types.VoteFast { - if !a.hasVoteFast { - if a.state.state() == stateFast || - a.state.state() == stateFastVote { - a.data.recv.ProposeVote( - types.NewVote(types.VoteFastCom, hash, vote.Period)) - a.hasVoteFast = true - - } - if a.data.lockIter == 0 { - a.data.lockValue = hash - a.data.lockIter = 1 - } - } - } else { - a.hasOutput = true - a.data.recv.ConfirmBlock(hash, - a.data.votes[vote.Period][vote.Type]) - if a.doneChan != nil { - close(a.doneChan) - a.doneChan = nil - } - } - return nil - } - } else if a.hasOutput { - return nil - } - - // Check if the agreement requires fast-forwarding. - if len(a.fastForward) > 0 { - return nil - } - if vote.Type == types.VotePreCom { - if vote.Period < a.data.lockIter { - // This PreCom is useless for us. - return nil - } - if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && - hash != types.SkipBlockHash { - // Condition 1. - if vote.Period > a.data.lockIter { - a.data.lockValue = hash - a.data.lockIter = vote.Period - } - // Condition 2. - if vote.Period > a.data.period { - a.fastForward <- vote.Period - if a.doneChan != nil { - close(a.doneChan) - a.doneChan = nil - } - return nil - } - } - } - // Condition 3. - if vote.Type == types.VoteCom && vote.Period >= a.data.period && - len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote { - hashes := common.Hashes{} - addPullBlocks := func(voteType types.VoteType) { - for _, vote := range a.data.votes[vote.Period][voteType] { - if vote.BlockHash == types.NullBlockHash || - vote.BlockHash == types.SkipBlockHash { - continue - } - if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found { - hashes = append(hashes, vote.BlockHash) - } - } - } - addPullBlocks(types.VotePreCom) - addPullBlocks(types.VoteCom) - if len(hashes) > 0 { - a.data.recv.PullBlocks(hashes) - } - a.fastForward <- vote.Period + 1 - if a.doneChan != nil { - close(a.doneChan) - a.doneChan = nil - } - return nil - } - return nil -} - -func (a *agreement) processFinalizedBlock(block *types.Block) { - a.lock.Lock() - defer a.lock.Unlock() - if a.hasOutput { - return - } - aID := a.agreementID() - if aID.Older(block.Position) { - return - } - a.addCandidateBlockNoLock(block) - a.hasOutput = true - a.data.lock.Lock() - defer a.data.lock.Unlock() - a.data.recv.ConfirmBlock(block.Hash, nil) - if a.doneChan != nil { - close(a.doneChan) - a.doneChan = nil - } -} - -func (a *agreement) processAgreementResult(result *types.AgreementResult) error { - a.lock.Lock() - defer a.lock.Unlock() - aID := a.agreementID() - if result.Position.Older(aID) { - return nil - } else if result.Position.Newer(aID) { - a.pendingAgreementResult[result.Position] = result - return nil - } - if a.hasOutput { - return nil - } - a.data.lock.Lock() - defer a.data.lock.Unlock() - if _, exist := a.findCandidateBlockNoLock(result.BlockHash); !exist { - a.data.recv.PullBlocks(common.Hashes{result.BlockHash}) - } - a.hasOutput = true - a.data.recv.ConfirmBlock(result.BlockHash, nil) - if a.doneChan != nil { - close(a.doneChan) - a.doneChan = nil - } - return nil -} - -func (a *agreement) done() <-chan struct{} { - a.lock.Lock() - defer a.lock.Unlock() - select { - case period := <-a.fastForward: - a.data.lock.Lock() - defer a.data.lock.Unlock() - if period <= a.data.period { - break - } - a.data.setPeriod(period) - a.state = newPreCommitState(a.data) - a.doneChan = make(chan struct{}) - return closedchan - default: - } - if a.doneChan == nil { - return closedchan - } - return a.doneChan -} - -func (a *agreement) confirmed() bool { - a.lock.RLock() - defer a.lock.RUnlock() - return a.confirmedNoLock() -} - -func (a *agreement) confirmedNoLock() bool { - return a.hasOutput -} - -// processBlock is the entry point for processing Block. -func (a *agreement) processBlock(block *types.Block) error { - checkSkip := func() bool { - aID := a.agreementID() - if block.Position != aID { - // Agreement module has stopped. - if !isStop(aID) { - if aID.Newer(block.Position) { - return true - } - } - } - return false - } - if checkSkip() { - return nil - } - if err := utils.VerifyBlockSignature(block); err != nil { - return err - } - - a.lock.Lock() - defer a.lock.Unlock() - a.data.blocksLock.Lock() - defer a.data.blocksLock.Unlock() - aID := a.agreementID() - // a.agreementID might change during lock, so we need to checkSkip again. - if checkSkip() { - return nil - } else if aID != block.Position { - a.pendingBlock = append(a.pendingBlock, pendingBlock{ - block: block, - receivedTime: time.Now().UTC(), - }) - return nil - } else if a.confirmedNoLock() { - return nil - } - if b, exist := a.data.blocks[block.ProposerID]; exist { - if b.Hash != block.Hash { - a.data.recv.ReportForkBlock(b, block) - return &ErrFork{block.ProposerID, b.Hash, block.Hash} - } - return nil - } - if err := a.data.leader.processBlock(block); err != nil { - return err - } - a.data.blocks[block.ProposerID] = block - a.addCandidateBlockNoLock(block) - if block.ProposerID != a.data.ID && - (a.state.state() == stateFast || a.state.state() == stateFastVote) && - block.ProposerID == a.leader() { - go func() { - for func() bool { - if aID != a.agreementID() { - return false - } - a.lock.RLock() - defer a.lock.RUnlock() - if a.state.state() != stateFast && a.state.state() != stateFastVote { - return false - } - a.data.lock.RLock() - defer a.data.lock.RUnlock() - a.data.blocksLock.Lock() - defer a.data.blocksLock.Unlock() - block, exist := a.data.blocks[a.leader()] - if !exist { - return true - } - ok, err := a.data.leader.validLeader(block, a.data.leader.hashCRS) - if err != nil { - fmt.Println("Error checking validLeader for Fast BA", - "error", err, "block", block) - return false - } - if ok { - a.data.recv.ProposeVote( - types.NewVote(types.VoteFast, block.Hash, a.data.period)) - return false - } - return true - }() { - // TODO(jimmy): retry interval should be related to configurations. - time.Sleep(250 * time.Millisecond) - } - }() - } - return nil -} - -func (a *agreement) addCandidateBlock(block *types.Block) { - a.lock.Lock() - defer a.lock.Unlock() - a.addCandidateBlockNoLock(block) -} - -func (a *agreement) addCandidateBlockNoLock(block *types.Block) { - a.candidateBlock[block.Hash] = block -} - -func (a *agreement) findCandidateBlockNoLock( - hash common.Hash) (*types.Block, bool) { - b, e := a.candidateBlock[hash] - return b, e -} - -// find a block in both candidate blocks and pending blocks in leader-selector. -// A block might be confirmed by others while we can't verify its validity. -func (a *agreement) findBlockNoLock(hash common.Hash) (*types.Block, bool) { - b, e := a.findCandidateBlockNoLock(hash) - if !e { - b, e = a.data.leader.findPendingBlock(hash) - } - return b, e -} - -func (a *agreementData) countVote(period uint64, voteType types.VoteType) ( - blockHash common.Hash, ok bool) { - a.lock.RLock() - defer a.lock.RUnlock() - return a.countVoteNoLock(period, voteType) -} - -func (a *agreementData) countVoteNoLock( - period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) { - votes, exist := a.votes[period] - if !exist { - return - } - candidate := make(map[common.Hash]int) - for _, vote := range votes[voteType] { - if _, exist := candidate[vote.BlockHash]; !exist { - candidate[vote.BlockHash] = 0 - } - candidate[vote.BlockHash]++ - } - for candidateHash, votes := range candidate { - if votes >= a.requiredVote { - blockHash = candidateHash - ok = true - return - } - } - return -} - -func (a *agreementData) setPeriod(period uint64) { - for i := a.period + 1; i <= period; i++ { - if _, exist := a.votes[i]; !exist { - a.votes[i] = newVoteListMap() - } - } - a.period = period -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go deleted file mode 100644 index 4fae221c7..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go +++ /dev/null @@ -1,681 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors for sanity check error. -var ( - ErrBlockFromOlderPosition = errors.New("block from older position") - ErrNotGenesisBlock = errors.New("not a genesis block") - ErrIsGenesisBlock = errors.New("is a genesis block") - ErrIncorrectParentHash = errors.New("incorrect parent hash") - ErrInvalidBlockHeight = errors.New("invalid block height") - ErrInvalidRoundID = errors.New("invalid round id") - ErrInvalidTimestamp = errors.New("invalid timestamp") - ErrNotFollowTipPosition = errors.New("not follow tip position") - ErrDuplicatedPendingBlock = errors.New("duplicated pending block") - ErrRetrySanityCheckLater = errors.New("retry sanity check later") - ErrRoundNotSwitch = errors.New("round not switch") - ErrIncorrectAgreementResult = errors.New( - "incorrect block randomness result") - ErrMissingRandomness = errors.New("missing block randomness") -) - -const notReadyHeight uint64 = math.MaxUint64 - -type pendingBlockRecord struct { - position types.Position - block *types.Block -} - -type pendingBlockRecords []pendingBlockRecord - -func (pb *pendingBlockRecords) insert(p pendingBlockRecord) error { - idx := sort.Search(len(*pb), func(i int) bool { - return !(*pb)[i].position.Older(p.position) - }) - switch idx { - case len(*pb): - *pb = append(*pb, p) - default: - if (*pb)[idx].position.Equal(p.position) { - // Allow to overwrite pending block record for empty blocks, we may - // need to pull that block from others when its parent is not found - // locally. - if (*pb)[idx].block == nil && p.block != nil { - (*pb)[idx].block = p.block - return nil - } - return ErrDuplicatedPendingBlock - } - // Insert the value to that index. - *pb = append((*pb), pendingBlockRecord{}) - copy((*pb)[idx+1:], (*pb)[idx:]) - (*pb)[idx] = p - } - return nil -} - -func (pb pendingBlockRecords) searchByHeight(h uint64) ( - pendingBlockRecord, bool) { - idx := sort.Search(len(pb), func(i int) bool { - return pb[i].position.Height >= h - }) - if idx == len(pb) || pb[idx].position.Height != h { - return pendingBlockRecord{}, false - } - return pb[idx], true -} - -func (pb pendingBlockRecords) searchByPosition(p types.Position) ( - pendingBlockRecord, bool) { - idx := sort.Search(len(pb), func(i int) bool { - return !pb[i].block.Position.Older(p) - }) - if idx == len(pb) || !pb[idx].position.Equal(p) { - return pendingBlockRecord{}, false - } - return pb[idx], true -} - -type blockChainConfig struct { - utils.RoundBasedConfig - - minBlockInterval time.Duration -} - -func (c *blockChainConfig) fromConfig(round uint64, config *types.Config) { - c.minBlockInterval = config.MinBlockInterval - c.SetupRoundBasedFields(round, config) -} - -func newBlockChainConfig(prev blockChainConfig, config *types.Config) ( - c blockChainConfig) { - c = blockChainConfig{} - c.fromConfig(prev.RoundID()+1, config) - c.AppendTo(prev.RoundBasedConfig) - return -} - -type tsigVerifierGetter interface { - UpdateAndGet(uint64) (TSigVerifier, bool, error) - Purge(uint64) -} - -type blockChain struct { - lock sync.RWMutex - ID types.NodeID - lastConfirmed *types.Block - lastDelivered *types.Block - signer *utils.Signer - vGetter tsigVerifierGetter - app Application - logger common.Logger - pendingRandomnesses map[types.Position][]byte - configs []blockChainConfig - pendingBlocks pendingBlockRecords - confirmedBlocks types.BlocksByPosition - dMoment time.Time - - // Do not access this variable besides processAgreementResult. - lastPosition types.Position -} - -func newBlockChain(nID types.NodeID, dMoment time.Time, initBlock *types.Block, - app Application, vGetter tsigVerifierGetter, signer *utils.Signer, - logger common.Logger) *blockChain { - return &blockChain{ - ID: nID, - lastConfirmed: initBlock, - lastDelivered: initBlock, - signer: signer, - vGetter: vGetter, - app: app, - logger: logger, - dMoment: dMoment, - pendingRandomnesses: make( - map[types.Position][]byte), - } -} - -func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error { - bc.lock.Lock() - defer bc.lock.Unlock() - apply := func(e utils.RoundEventParam) error { - if len(bc.configs) > 0 { - lastCfg := bc.configs[len(bc.configs)-1] - if e.BeginHeight != lastCfg.RoundEndHeight() { - return ErrInvalidBlockHeight - } - if lastCfg.RoundID() == e.Round { - bc.configs[len(bc.configs)-1].ExtendLength() - } else if lastCfg.RoundID()+1 == e.Round { - bc.configs = append(bc.configs, newBlockChainConfig( - lastCfg, e.Config)) - } else { - return ErrInvalidRoundID - } - } else { - c := blockChainConfig{} - c.fromConfig(e.Round, e.Config) - c.SetRoundBeginHeight(e.BeginHeight) - if bc.lastConfirmed == nil { - if c.RoundID() != 0 { - panic(fmt.Errorf( - "genesis config should from round 0, but %d", - c.RoundID())) - } - } else { - if c.RoundID() != bc.lastConfirmed.Position.Round { - panic(fmt.Errorf("incompatible config/block round %s %d", - bc.lastConfirmed, c.RoundID())) - } - if !c.Contains(bc.lastConfirmed.Position.Height) { - panic(fmt.Errorf( - "unmatched round-event with block %s %d %d %d", - bc.lastConfirmed, e.Round, e.Reset, e.BeginHeight)) - } - } - bc.configs = append(bc.configs, c) - } - return nil - } - for _, e := range evts { - if err := apply(e); err != nil { - return err - } - } - return nil -} - -func (bc *blockChain) proposeBlock(position types.Position, - proposeTime time.Time, isEmpty bool) (b *types.Block, err error) { - bc.lock.RLock() - defer bc.lock.RUnlock() - return bc.prepareBlock(position, proposeTime, isEmpty) -} - -func (bc *blockChain) extractBlocks() (ret []*types.Block) { - bc.lock.Lock() - defer bc.lock.Unlock() - for len(bc.confirmedBlocks) > 0 { - c := bc.confirmedBlocks[0] - if c.Position.Round >= DKGDelayRound && - len(c.Randomness) == 0 && - !bc.setRandomnessFromPending(c) { - break - } - c, bc.confirmedBlocks = bc.confirmedBlocks[0], bc.confirmedBlocks[1:] - ret = append(ret, c) - bc.lastDelivered = c - } - return -} - -func (bc *blockChain) sanityCheck(b *types.Block) error { - bc.lock.RLock() - defer bc.lock.RUnlock() - if bc.lastConfirmed == nil { - // It should be a genesis block. - if !b.IsGenesis() { - return ErrNotGenesisBlock - } - if b.Timestamp.Before(bc.dMoment.Add(bc.configs[0].minBlockInterval)) { - return ErrInvalidTimestamp - } - return nil - } - if b.IsGenesis() { - return ErrIsGenesisBlock - } - if b.Position.Height != bc.lastConfirmed.Position.Height+1 { - if b.Position.Height > bc.lastConfirmed.Position.Height { - return ErrRetrySanityCheckLater - } - return ErrInvalidBlockHeight - } - tipConfig := bc.tipConfig() - if tipConfig.IsLastBlock(bc.lastConfirmed) { - if b.Position.Round != bc.lastConfirmed.Position.Round+1 { - return ErrRoundNotSwitch - } - } else { - if b.Position.Round != bc.lastConfirmed.Position.Round { - return ErrInvalidRoundID - } - } - if !b.ParentHash.Equal(bc.lastConfirmed.Hash) { - return ErrIncorrectParentHash - } - if b.Timestamp.Before(bc.lastConfirmed.Timestamp.Add( - tipConfig.minBlockInterval)) { - return ErrInvalidTimestamp - } - if err := utils.VerifyBlockSignature(b); err != nil { - return err - } - return nil -} - -// addEmptyBlock is called when an empty block is confirmed by BA. -func (bc *blockChain) addEmptyBlock(position types.Position) ( - *types.Block, error) { - bc.lock.Lock() - defer bc.lock.Unlock() - add := func() *types.Block { - emptyB, err := bc.prepareBlock(position, time.Time{}, true) - if err != nil || emptyB == nil { - // This helper is expected to be called when an empty block is ready - // to be confirmed. - panic(err) - } - bc.confirmBlock(emptyB) - bc.checkIfBlocksConfirmed() - return emptyB - } - if bc.lastConfirmed != nil { - if !position.Newer(bc.lastConfirmed.Position) { - bc.logger.Warn("Dropping empty block: older than tip", - "position", &position, - "last-confirmed", bc.lastConfirmed) - return nil, ErrBlockFromOlderPosition - } - if bc.lastConfirmed.Position.Height+1 == position.Height { - return add(), nil - } - } else if position.Height == types.GenesisHeight && position.Round == 0 { - return add(), nil - } else { - return nil, ErrInvalidBlockHeight - } - return nil, bc.addPendingBlockRecord(pendingBlockRecord{position, nil}) -} - -// addBlock should be called when the block is confirmed by BA, we won't perform -// sanity check against this block, it's ok to add block with skipping height. -func (bc *blockChain) addBlock(b *types.Block) error { - if b.Position.Round >= DKGDelayRound && - len(b.Randomness) == 0 && - !bc.setRandomnessFromPending(b) { - return ErrMissingRandomness - } - bc.lock.Lock() - defer bc.lock.Unlock() - confirmed := false - if bc.lastConfirmed != nil { - if !b.Position.Newer(bc.lastConfirmed.Position) { - bc.logger.Warn("Dropping block: older than tip", - "block", b, "last-confirmed", bc.lastConfirmed) - return nil - } - if bc.lastConfirmed.Position.Height+1 == b.Position.Height { - confirmed = true - } - } else if b.IsGenesis() { - confirmed = true - } - delete(bc.pendingRandomnesses, b.Position) - if !confirmed { - return bc.addPendingBlockRecord(pendingBlockRecord{b.Position, b}) - } - bc.confirmBlock(b) - bc.checkIfBlocksConfirmed() - return nil -} - -func (bc *blockChain) tipRound() uint64 { - bc.lock.RLock() - defer bc.lock.RUnlock() - if bc.lastConfirmed == nil { - return 0 - } - offset, tipConfig := uint64(0), bc.tipConfig() - if tipConfig.IsLastBlock(bc.lastConfirmed) { - offset++ - } - return bc.lastConfirmed.Position.Round + offset -} - -func (bc *blockChain) confirmed(h uint64) bool { - bc.lock.RLock() - defer bc.lock.RUnlock() - if bc.lastConfirmed != nil && bc.lastConfirmed.Position.Height >= h { - return true - } - r, found := bc.pendingBlocks.searchByHeight(h) - if !found { - return false - } - return r.block != nil -} - -func (bc *blockChain) nextBlock() (uint64, time.Time) { - bc.lock.RLock() - defer bc.lock.RUnlock() - // It's ok to access tip config directly without checking the existence of - // lastConfirmed block in the scenario of "nextBlock" method. - tip, config := bc.lastConfirmed, bc.configs[0] - if tip == nil { - return types.GenesisHeight, bc.dMoment - } - if tip != bc.lastDelivered { - // If tip is not delivered, we should not proceed to next block. - return notReadyHeight, time.Time{} - } - return tip.Position.Height + 1, tip.Timestamp.Add(config.minBlockInterval) -} - -func (bc *blockChain) pendingBlocksWithoutRandomness() []*types.Block { - bc.lock.RLock() - defer bc.lock.RUnlock() - blocks := make([]*types.Block, 0) - for _, b := range bc.confirmedBlocks { - if b.Position.Round < DKGDelayRound || - len(b.Randomness) > 0 || - bc.setRandomnessFromPending(b) { - continue - } - blocks = append(blocks, b) - } - for _, r := range bc.pendingBlocks { - if r.position.Round < DKGDelayRound { - continue - } - if r.block != nil && - len(r.block.Randomness) == 0 && - !bc.setRandomnessFromPending(r.block) { - blocks = append(blocks, r.block) - } - } - return blocks -} - -func (bc *blockChain) lastDeliveredBlock() *types.Block { - bc.lock.RLock() - defer bc.lock.RUnlock() - return bc.lastDelivered -} - -func (bc *blockChain) lastPendingBlock() *types.Block { - bc.lock.RLock() - defer bc.lock.RUnlock() - if len(bc.confirmedBlocks) == 0 { - return nil - } - return bc.confirmedBlocks[0] -} - -///////////////////////////////////////////// -// -// internal helpers -// -///////////////////////////////////////////// - -// findPendingBlock is a helper to find a block in either pending or confirmed -// state by position. -func (bc *blockChain) findPendingBlock(p types.Position) *types.Block { - if idx := sort.Search(len(bc.confirmedBlocks), func(i int) bool { - return !bc.confirmedBlocks[i].Position.Older(p) - }); idx != len(bc.confirmedBlocks) && - bc.confirmedBlocks[idx].Position.Equal(p) { - return bc.confirmedBlocks[idx] - } - pendingRec, _ := bc.pendingBlocks.searchByPosition(p) - return pendingRec.block -} - -func (bc *blockChain) addPendingBlockRecord(p pendingBlockRecord) error { - if err := bc.pendingBlocks.insert(p); err != nil { - if err == ErrDuplicatedPendingBlock { - // We need to ignore this error because BA might confirm duplicated - // blocks in position. - err = nil - } - return err - } - return nil -} - -func (bc *blockChain) checkIfBlocksConfirmed() { - var err error - for len(bc.pendingBlocks) > 0 { - if bc.pendingBlocks[0].position.Height < - bc.lastConfirmed.Position.Height+1 { - panic(fmt.Errorf("unexpected case %s %s", bc.lastConfirmed, - bc.pendingBlocks[0].position)) - } - if bc.pendingBlocks[0].position.Height > - bc.lastConfirmed.Position.Height+1 { - break - } - var pending pendingBlockRecord - pending, bc.pendingBlocks = bc.pendingBlocks[0], bc.pendingBlocks[1:] - nextTip := pending.block - if nextTip == nil { - if nextTip, err = bc.prepareBlock( - pending.position, time.Time{}, true); err != nil { - // It should not be error when prepare empty block for correct - // position. - panic(err) - } - } - bc.confirmBlock(nextTip) - } -} - -func (bc *blockChain) purgeConfig() { - for bc.configs[0].RoundID() < bc.lastConfirmed.Position.Round { - bc.configs = bc.configs[1:] - } - if bc.configs[0].RoundID() != bc.lastConfirmed.Position.Round { - panic(fmt.Errorf("mismatched tip config: %d %d", - bc.configs[0].RoundID(), bc.lastConfirmed.Position.Round)) - } -} - -func (bc *blockChain) verifyRandomness( - blockHash common.Hash, round uint64, randomness []byte) (bool, error) { - if round < DKGDelayRound { - return bytes.Compare(randomness, NoRand) == 0, nil - } - v, ok, err := bc.vGetter.UpdateAndGet(round) - if err != nil { - return false, err - } - if !ok { - return false, ErrTSigNotReady - } - return v.VerifySignature(blockHash, crypto.Signature{ - Type: "bls", - Signature: randomness}), nil -} - -func (bc *blockChain) prepareBlock(position types.Position, - proposeTime time.Time, empty bool) (b *types.Block, err error) { - b = &types.Block{Position: position, Timestamp: proposeTime} - tip := bc.lastConfirmed - // Make sure we can propose a block at expected position for callers. - if tip == nil { - if bc.configs[0].RoundID() != uint64(0) { - panic(fmt.Errorf( - "Genesis config should be ready when preparing genesis: %d", - bc.configs[0].RoundID())) - } - // It should be the case for genesis block. - if !position.Equal(types.Position{Height: types.GenesisHeight}) { - b, err = nil, ErrNotGenesisBlock - return - } - minExpectedTime := bc.dMoment.Add(bc.configs[0].minBlockInterval) - if empty { - b.Timestamp = minExpectedTime - } else { - bc.logger.Debug("Calling genesis Application.PreparePayload") - if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { - b = nil - return - } - bc.logger.Debug("Calling genesis Application.PrepareWitness") - if b.Witness, err = bc.app.PrepareWitness(0); err != nil { - b = nil - return - } - if proposeTime.Before(minExpectedTime) { - b.Timestamp = minExpectedTime - } - } - } else { - tipConfig := bc.tipConfig() - if tip.Position.Height+1 != position.Height { - b, err = nil, ErrNotFollowTipPosition - return - } - if tipConfig.IsLastBlock(tip) { - if tip.Position.Round+1 != position.Round { - b, err = nil, ErrRoundNotSwitch - return - } - } else { - if tip.Position.Round != position.Round { - b, err = nil, ErrInvalidRoundID - return - } - } - minExpectedTime := tip.Timestamp.Add(bc.configs[0].minBlockInterval) - b.ParentHash = tip.Hash - if !empty { - bc.logger.Debug("Calling Application.PreparePayload", - "position", b.Position) - if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { - b = nil - return - } - bc.logger.Debug("Calling Application.PrepareWitness", - "height", tip.Witness.Height) - if b.Witness, err = bc.app.PrepareWitness( - tip.Witness.Height); err != nil { - b = nil - return - } - if b.Timestamp.Before(minExpectedTime) { - b.Timestamp = minExpectedTime - } - } else { - b.Witness.Height = tip.Witness.Height - b.Witness.Data = make([]byte, len(tip.Witness.Data)) - copy(b.Witness.Data, tip.Witness.Data) - b.Timestamp = minExpectedTime - } - } - if empty { - if b.Hash, err = utils.HashBlock(b); err != nil { - b = nil - return - } - } else { - if err = bc.signer.SignBlock(b); err != nil { - b = nil - return - } - } - return -} - -func (bc *blockChain) tipConfig() blockChainConfig { - if bc.lastConfirmed == nil { - panic(fmt.Errorf("attempting to access config without tip")) - } - if bc.lastConfirmed.Position.Round != bc.configs[0].RoundID() { - panic(fmt.Errorf("inconsist config and tip: %d %d", - bc.lastConfirmed.Position.Round, bc.configs[0].RoundID())) - } - return bc.configs[0] -} - -func (bc *blockChain) confirmBlock(b *types.Block) { - if bc.lastConfirmed != nil && - bc.lastConfirmed.Position.Height+1 != b.Position.Height { - panic(fmt.Errorf("confirmed blocks not continuous in height: %s %s", - bc.lastConfirmed, b)) - } - bc.logger.Debug("Calling Application.BlockConfirmed", "block", b) - bc.app.BlockConfirmed(*b) - bc.lastConfirmed = b - bc.confirmedBlocks = append(bc.confirmedBlocks, b) - bc.purgeConfig() -} - -func (bc *blockChain) setRandomnessFromPending(b *types.Block) bool { - if r, exist := bc.pendingRandomnesses[b.Position]; exist { - b.Randomness = r - delete(bc.pendingRandomnesses, b.Position) - return true - } - return false -} - -func (bc *blockChain) processAgreementResult(result *types.AgreementResult) error { - if result.Position.Round < DKGDelayRound { - return nil - } - if !result.Position.Newer(bc.lastPosition) { - return ErrSkipButNoError - } - ok, err := bc.verifyRandomness( - result.BlockHash, result.Position.Round, result.Randomness) - if err != nil { - return err - } - if !ok { - return ErrIncorrectAgreementResult - } - bc.lock.Lock() - defer bc.lock.Unlock() - if !result.Position.Newer(bc.lastDelivered.Position) { - return nil - } - bc.pendingRandomnesses[result.Position] = result.Randomness - bc.lastPosition = bc.lastDelivered.Position - return nil -} - -func (bc *blockChain) addBlockRandomness(pos types.Position, rand []byte) { - if pos.Round < DKGDelayRound { - return - } - bc.lock.Lock() - defer bc.lock.Unlock() - if !pos.Newer(bc.lastDelivered.Position) { - return - } - bc.pendingRandomnesses[pos] = rand -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go deleted file mode 100644 index 3b4cdbbc8..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go +++ /dev/null @@ -1,795 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/db" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors for configuration chain.. -var ( - ErrDKGNotRegistered = fmt.Errorf( - "not yet registered in DKG protocol") - ErrTSigAlreadyRunning = fmt.Errorf( - "tsig is already running") - ErrDKGNotReady = fmt.Errorf( - "DKG is not ready") - ErrSkipButNoError = fmt.Errorf( - "skip but no error") - ErrDKGAborted = fmt.Errorf( - "DKG is aborted") -) - -// ErrMismatchDKG represent an attempt to run DKG protocol is failed because -// the register DKG protocol is mismatched, interms of round and resetCount. -type ErrMismatchDKG struct { - expectRound, expectReset uint64 - actualRound, actualReset uint64 -} - -func (e ErrMismatchDKG) Error() string { - return fmt.Sprintf( - "mismatch DKG, abort running: expect(%d %d) actual(%d %d)", - e.expectRound, e.expectReset, e.actualRound, e.actualReset) -} - -type dkgStepFn func(round uint64, reset uint64) error - -type configurationChain struct { - ID types.NodeID - recv dkgReceiver - gov Governance - dkg *dkgProtocol - dkgRunPhases []dkgStepFn - logger common.Logger - dkgLock sync.RWMutex - dkgSigner map[uint64]*dkgShareSecret - npks map[uint64]*typesDKG.NodePublicKeys - complaints []*typesDKG.Complaint - dkgResult sync.RWMutex - tsig map[common.Hash]*tsigProtocol - tsigTouched map[common.Hash]struct{} - tsigReady *sync.Cond - cache *utils.NodeSetCache - db db.Database - notarySet map[types.NodeID]struct{} - mpkReady bool - pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare - // TODO(jimmy-dexon): add timeout to pending psig. - pendingPsig map[common.Hash][]*typesDKG.PartialSignature - prevHash common.Hash - dkgCtx context.Context - dkgCtxCancel context.CancelFunc - dkgRunning bool -} - -func newConfigurationChain( - ID types.NodeID, - recv dkgReceiver, - gov Governance, - cache *utils.NodeSetCache, - dbInst db.Database, - logger common.Logger) *configurationChain { - configurationChain := &configurationChain{ - ID: ID, - recv: recv, - gov: gov, - logger: logger, - dkgSigner: make(map[uint64]*dkgShareSecret), - npks: make(map[uint64]*typesDKG.NodePublicKeys), - tsig: make(map[common.Hash]*tsigProtocol), - tsigTouched: make(map[common.Hash]struct{}), - tsigReady: sync.NewCond(&sync.Mutex{}), - cache: cache, - db: dbInst, - pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature), - } - configurationChain.initDKGPhasesFunc() - return configurationChain -} - -func (cc *configurationChain) abortDKG( - parentCtx context.Context, - round, reset uint64) bool { - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if cc.dkg != nil { - return cc.abortDKGNoLock(parentCtx, round, reset) - } - return false -} - -func (cc *configurationChain) abortDKGNoLock( - ctx context.Context, - round, reset uint64) bool { - if cc.dkg.round > round || - (cc.dkg.round == round && cc.dkg.reset > reset) { - cc.logger.Error("Newer DKG already is registered", - "round", round, - "reset", reset) - return false - } - cc.logger.Error("Previous DKG is not finished", - "round", round, - "reset", reset, - "previous-round", cc.dkg.round, - "previous-reset", cc.dkg.reset) - // Abort DKG routine in previous round. - cc.logger.Error("Aborting DKG in previous round", - "round", round, - "previous-round", cc.dkg.round) - // Notify current running DKG protocol to abort. - if cc.dkgCtxCancel != nil { - cc.dkgCtxCancel() - } - cc.dkgLock.Unlock() - // Wait for current running DKG protocol aborting. - for { - cc.dkgLock.Lock() - if cc.dkgRunning == false { - cc.dkg = nil - break - } - select { - case <-ctx.Done(): - return false - case <-time.After(100 * time.Millisecond): - } - cc.dkgLock.Unlock() - } - cc.logger.Error("Previous DKG aborted", - "round", round, - "reset", reset) - return cc.dkg == nil -} - -func (cc *configurationChain) registerDKG( - parentCtx context.Context, - round, reset uint64, - threshold int) { - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if cc.dkg != nil { - // Make sure we only proceed when cc.dkg is nil. - if !cc.abortDKGNoLock(parentCtx, round, reset) { - return - } - select { - case <-parentCtx.Done(): - return - default: - } - if cc.dkg != nil { - // This panic would only raise when multiple attampts to register - // a DKG protocol at the same time. - panic(ErrMismatchDKG{ - expectRound: round, - expectReset: reset, - actualRound: cc.dkg.round, - actualReset: cc.dkg.reset, - }) - } - } - notarySet, err := cc.cache.GetNotarySet(round) - if err != nil { - cc.logger.Error("Error getting notary set from cache", "error", err) - return - } - cc.notarySet = notarySet - cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare) - cc.mpkReady = false - cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db) - cc.dkgCtx, cc.dkgCtxCancel = context.WithCancel(parentCtx) - if err != nil { - panic(err) - } - if cc.dkg == nil { - cc.dkg = newDKGProtocol( - cc.ID, - cc.recv, - round, - reset, - threshold) - - err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) - if err != nil { - cc.logger.Error("Error put or update DKG protocol", "error", - err) - return - } - } - - go func() { - ticker := newTicker(cc.gov, round, TickerDKG) - defer ticker.Stop() - <-ticker.Tick() - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if cc.dkg != nil && cc.dkg.round == round && cc.dkg.reset == reset { - cc.dkg.proposeMPKReady() - } - }() -} - -func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) error { - if cc.dkg.round < round || - (cc.dkg.round == round && cc.dkg.reset < reset) { - return ErrDKGNotRegistered - } - if cc.dkg.round != round || cc.dkg.reset != reset { - cc.logger.Warn("DKG canceled", "round", round, "reset", reset) - return ErrSkipButNoError - } - cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) - if cc.gov.IsDKGFinal(round) { - cc.logger.Warn("DKG already final", "round", round) - return ErrSkipButNoError - } - cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round) - var err error - for err == nil && !cc.gov.IsDKGMPKReady(round) { - cc.dkgLock.Unlock() - cc.logger.Debug("DKG MPKs are not ready yet. Try again later...", - "nodeID", cc.ID, - "round", round) - select { - case <-cc.dkgCtx.Done(): - err = ErrDKGAborted - case <-time.After(500 * time.Millisecond): - } - cc.dkgLock.Lock() - } - return err -} - -func (cc *configurationChain) runDKGPhaseTwoAndThree( - round uint64, reset uint64) error { - // Check if this node successfully join the protocol. - cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) - mpks := cc.gov.DKGMasterPublicKeys(round) - inProtocol := false - for _, mpk := range mpks { - if mpk.ProposerID == cc.ID { - inProtocol = true - break - } - } - if !inProtocol { - cc.logger.Warn("Failed to join DKG protocol", - "round", round, - "reset", reset) - return ErrSkipButNoError - } - // Phase 2(T = 0): Exchange DKG secret key share. - if err := cc.dkg.processMasterPublicKeys(mpks); err != nil { - cc.logger.Error("Failed to process master public key", - "round", round, - "reset", reset, - "error", err) - } - cc.mpkReady = true - // The time to process private share might be long, check aborting before - // get into that loop. - select { - case <-cc.dkgCtx.Done(): - return ErrDKGAborted - default: - } - for _, prvShare := range cc.pendingPrvShare { - if err := cc.dkg.processPrivateShare(prvShare); err != nil { - cc.logger.Error("Failed to process private share", - "round", round, - "reset", reset, - "error", err) - } - } - - // Phase 3(T = 0~λ): Propose complaint. - // Propose complaint is done in `processMasterPublicKeys`. - return nil -} - -func (cc *configurationChain) runDKGPhaseFour() { - // Phase 4(T = λ): Propose nack complaints. - cc.dkg.proposeNackComplaints() -} - -func (cc *configurationChain) runDKGPhaseFiveAndSix(round uint64, reset uint64) { - // Phase 5(T = 2λ): Propose Anti nack complaint. - cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) - cc.complaints = cc.gov.DKGComplaints(round) - if err := cc.dkg.processNackComplaints(cc.complaints); err != nil { - cc.logger.Error("Failed to process NackComplaint", - "round", round, - "reset", reset, - "error", err) - } - - // Phase 6(T = 3λ): Rebroadcast anti nack complaint. - // Rebroadcast is done in `processPrivateShare`. -} - -func (cc *configurationChain) runDKGPhaseSeven() { - // Phase 7(T = 4λ): Enforce complaints and nack complaints. - cc.dkg.enforceNackComplaints(cc.complaints) - // Enforce complaint is done in `processPrivateShare`. -} - -func (cc *configurationChain) runDKGPhaseEight() { - // Phase 8(T = 5λ): DKG finalize. - cc.dkg.proposeFinalize() -} - -func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) error { - // Phase 9(T = 6λ): DKG is ready. - // Normally, IsDKGFinal would return true here. Use this for in case of - // unexpected network fluctuation and ensure the robustness of DKG protocol. - cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) - var err error - for err == nil && !cc.gov.IsDKGFinal(round) { - cc.dkgLock.Unlock() - cc.logger.Debug("DKG is not ready yet. Try again later...", - "nodeID", cc.ID.String()[:6], - "round", round, - "reset", reset) - select { - case <-cc.dkgCtx.Done(): - err = ErrDKGAborted - case <-time.After(500 * time.Millisecond): - } - cc.dkgLock.Lock() - } - if err != nil { - return err - } - cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) - cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) - npks, err := typesDKG.NewNodePublicKeys(round, - cc.gov.DKGMasterPublicKeys(round), - cc.gov.DKGComplaints(round), - cc.dkg.threshold) - if err != nil { - return err - } - qualifies := "" - for nID := range npks.QualifyNodeIDs { - qualifies += fmt.Sprintf("%s ", nID.String()[:6]) - } - cc.logger.Info("Qualify Nodes", - "nodeID", cc.ID, - "round", round, - "reset", reset, - "count", len(npks.QualifyIDs), - "qualifies", qualifies) - if _, exist := npks.QualifyNodeIDs[cc.ID]; !exist { - cc.logger.Warn("Self is not in Qualify Nodes", - "round", round, - "reset", reset) - return nil - } - signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs) - if err != nil { - return err - } - // Save private shares to DB. - if err = - cc.db.PutDKGPrivateKey(round, reset, *signer.privateKey); err != nil { - return err - } - cc.dkg.proposeSuccess() - cc.dkgResult.Lock() - defer cc.dkgResult.Unlock() - cc.dkgSigner[round] = signer - cc.npks[round] = npks - return nil -} - -func (cc *configurationChain) initDKGPhasesFunc() { - cc.dkgRunPhases = []dkgStepFn{ - func(round uint64, reset uint64) error { - return cc.runDKGPhaseOne(round, reset) - }, - func(round uint64, reset uint64) error { - return cc.runDKGPhaseTwoAndThree(round, reset) - }, - func(round uint64, reset uint64) error { - cc.runDKGPhaseFour() - return nil - }, - func(round uint64, reset uint64) error { - cc.runDKGPhaseFiveAndSix(round, reset) - return nil - }, - func(round uint64, reset uint64) error { - cc.runDKGPhaseSeven() - return nil - }, - func(round uint64, reset uint64) error { - cc.runDKGPhaseEight() - return nil - }, - func(round uint64, reset uint64) error { - return cc.runDKGPhaseNine(round, reset) - }, - } -} - -func (cc *configurationChain) runDKG( - round uint64, reset uint64, event *common.Event, - dkgBeginHeight, dkgHeight uint64) (err error) { - // Check if corresponding DKG signer is ready. - if _, _, err = cc.getDKGInfo(round, false); err == nil { - return ErrSkipButNoError - } - cfg := utils.GetConfigWithPanic(cc.gov, round, cc.logger) - phaseHeight := uint64( - cfg.LambdaDKG.Nanoseconds() / cfg.MinBlockInterval.Nanoseconds()) - skipPhase := int(dkgHeight / phaseHeight) - cc.logger.Info("Skipping DKG phase", "phase", skipPhase) - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if cc.dkg == nil { - return ErrDKGNotRegistered - } - // Make sure the existed dkgProtocol is expected one. - if cc.dkg.round != round || cc.dkg.reset != reset { - return ErrMismatchDKG{ - expectRound: round, - expectReset: reset, - actualRound: cc.dkg.round, - actualReset: cc.dkg.reset, - } - } - if cc.dkgRunning { - panic(fmt.Errorf("duplicated call to runDKG: %d %d", round, reset)) - } - cc.dkgRunning = true - defer func() { - // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done. - if cc.dkg != nil { - cc.dkg = nil - } - cc.dkgRunning = false - }() - wg := sync.WaitGroup{} - var dkgError error - // Make a copy of cc.dkgCtx so each phase function can refer to the correct - // context. - ctx := cc.dkgCtx - cc.dkg.step = skipPhase - for i := skipPhase; i < len(cc.dkgRunPhases); i++ { - wg.Add(1) - event.RegisterHeight(dkgBeginHeight+phaseHeight*uint64(i), func(uint64) { - go func() { - defer wg.Done() - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if dkgError != nil { - return - } - select { - case <-ctx.Done(): - dkgError = ErrDKGAborted - return - default: - } - - err := cc.dkgRunPhases[cc.dkg.step](round, reset) - if err == nil || err == ErrSkipButNoError { - err = nil - cc.dkg.step++ - err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) - if err != nil { - cc.logger.Error("Failed to save DKG Protocol", - "step", cc.dkg.step, - "error", err) - } - } - if err != nil && dkgError == nil { - dkgError = err - } - }() - }) - } - cc.dkgLock.Unlock() - wgChan := make(chan struct{}, 1) - go func() { - wg.Wait() - wgChan <- struct{}{} - }() - select { - case <-cc.dkgCtx.Done(): - case <-wgChan: - } - cc.dkgLock.Lock() - select { - case <-cc.dkgCtx.Done(): - return ErrDKGAborted - default: - } - return dkgError -} - -func (cc *configurationChain) isDKGFinal(round uint64) bool { - if !cc.gov.IsDKGFinal(round) { - return false - } - _, _, err := cc.getDKGInfo(round, false) - return err == nil -} - -func (cc *configurationChain) getDKGInfo( - round uint64, ignoreSigner bool) ( - *typesDKG.NodePublicKeys, *dkgShareSecret, error) { - getFromCache := func() (*typesDKG.NodePublicKeys, *dkgShareSecret) { - cc.dkgResult.RLock() - defer cc.dkgResult.RUnlock() - npks := cc.npks[round] - signer := cc.dkgSigner[round] - return npks, signer - } - npks, signer := getFromCache() - if npks == nil || (!ignoreSigner && signer == nil) { - if err := cc.recoverDKGInfo(round, ignoreSigner); err != nil { - return nil, nil, err - } - npks, signer = getFromCache() - } - if npks == nil || (!ignoreSigner && signer == nil) { - return nil, nil, ErrDKGNotReady - } - return npks, signer, nil -} - -func (cc *configurationChain) recoverDKGInfo( - round uint64, ignoreSigner bool) error { - var npksExists, signerExists bool - func() { - cc.dkgResult.Lock() - defer cc.dkgResult.Unlock() - _, signerExists = cc.dkgSigner[round] - _, npksExists = cc.npks[round] - }() - if signerExists && npksExists { - return nil - } - if !cc.gov.IsDKGFinal(round) { - return ErrDKGNotReady - } - - threshold := utils.GetDKGThreshold( - utils.GetConfigWithPanic(cc.gov, round, cc.logger)) - cc.logger.Debug("Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", - "round", round) - mpk := cc.gov.DKGMasterPublicKeys(round) - cc.logger.Debug("Calling Governance.DKGComplaints for recoverDKGInfo", - "round", round) - comps := cc.gov.DKGComplaints(round) - qualifies, _, err := typesDKG.CalcQualifyNodes(mpk, comps, threshold) - if err != nil { - return err - } - if len(qualifies) < - utils.GetDKGValidThreshold(utils.GetConfigWithPanic( - cc.gov, round, cc.logger)) { - return typesDKG.ErrNotReachThreshold - } - - if !npksExists { - npks, err := typesDKG.NewNodePublicKeys(round, - cc.gov.DKGMasterPublicKeys(round), - cc.gov.DKGComplaints(round), - threshold) - if err != nil { - cc.logger.Warn("Failed to create DKGNodePublicKeys", - "round", round, "error", err) - return err - } - func() { - cc.dkgResult.Lock() - defer cc.dkgResult.Unlock() - cc.npks[round] = npks - }() - } - if !signerExists && !ignoreSigner { - reset := cc.gov.DKGResetCount(round) - // Check if we have private shares in DB. - prvKey, err := cc.db.GetDKGPrivateKey(round, reset) - if err != nil { - cc.logger.Warn("Failed to create DKGPrivateKey", - "round", round, "error", err) - dkgProtocolInfo, err := cc.db.GetDKGProtocol() - if err != nil { - cc.logger.Warn("Unable to recover DKGProtocolInfo", - "round", round, "error", err) - return err - } - if dkgProtocolInfo.Round != round { - cc.logger.Warn("DKGProtocolInfo round mismatch", - "round", round, "infoRound", dkgProtocolInfo.Round) - return err - } - prvKeyRecover, err := - dkgProtocolInfo.PrvShares.RecoverPrivateKey(qualifies) - if err != nil { - cc.logger.Warn("Failed to recover DKGPrivateKey", - "round", round, "error", err) - return err - } - if err = cc.db.PutDKGPrivateKey( - round, reset, *prvKeyRecover); err != nil { - cc.logger.Warn("Failed to save DKGPrivateKey", - "round", round, "error", err) - } - prvKey = *prvKeyRecover - } - func() { - cc.dkgResult.Lock() - defer cc.dkgResult.Unlock() - cc.dkgSigner[round] = &dkgShareSecret{ - privateKey: &prvKey, - } - }() - } - return nil -} - -func (cc *configurationChain) preparePartialSignature( - round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) { - _, signer, _ := cc.getDKGInfo(round, false) - if signer == nil { - return nil, ErrDKGNotReady - } - return &typesDKG.PartialSignature{ - ProposerID: cc.ID, - Round: round, - Hash: hash, - PartialSignature: signer.sign(hash), - }, nil -} - -func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) { - cc.tsigReady.L.Lock() - defer cc.tsigReady.L.Unlock() - _, exist := cc.tsigTouched[hash] - cc.tsigTouched[hash] = struct{}{} - return !exist -} - -func (cc *configurationChain) untouchTSigHash(hash common.Hash) { - cc.tsigReady.L.Lock() - defer cc.tsigReady.L.Unlock() - delete(cc.tsigTouched, hash) -} - -func (cc *configurationChain) runTSig( - round uint64, hash common.Hash, wait time.Duration) ( - crypto.Signature, error) { - npks, _, _ := cc.getDKGInfo(round, false) - if npks == nil { - return crypto.Signature{}, ErrDKGNotReady - } - cc.tsigReady.L.Lock() - defer cc.tsigReady.L.Unlock() - if _, exist := cc.tsig[hash]; exist { - return crypto.Signature{}, ErrTSigAlreadyRunning - } - cc.tsig[hash] = newTSigProtocol(npks, hash) - pendingPsig := cc.pendingPsig[hash] - delete(cc.pendingPsig, hash) - go func() { - for _, psig := range pendingPsig { - if err := cc.processPartialSignature(psig); err != nil { - cc.logger.Error("Failed to process partial signature", - "nodeID", cc.ID, - "error", err) - } - } - }() - timeout := make(chan struct{}, 1) - go func() { - time.Sleep(wait) - timeout <- struct{}{} - cc.tsigReady.Broadcast() - }() - var signature crypto.Signature - var err error - for func() bool { - signature, err = cc.tsig[hash].signature() - select { - case <-timeout: - return false - default: - } - return err == ErrNotEnoughtPartialSignatures - }() { - cc.tsigReady.Wait() - } - delete(cc.tsig, hash) - if err != nil { - return crypto.Signature{}, err - } - return signature, nil -} - -func (cc *configurationChain) runCRSTSig( - round uint64, crs common.Hash) ([]byte, error) { - sig, err := cc.runTSig(round, crs, cc.gov.Configuration(round).LambdaDKG*5) - cc.logger.Info("CRS", - "nodeID", cc.ID, - "round", round+1, - "signature", sig) - return sig.Signature[:], err -} - -func (cc *configurationChain) processPrivateShare( - prvShare *typesDKG.PrivateShare) error { - cc.dkgLock.Lock() - defer cc.dkgLock.Unlock() - if cc.dkg == nil { - return nil - } - if _, exist := cc.notarySet[prvShare.ProposerID]; !exist { - return ErrNotDKGParticipant - } - if !cc.mpkReady { - // TODO(jimmy-dexon): remove duplicated signature check in dkg module. - ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) - if err != nil { - return err - } - if !ok { - return ErrIncorrectPrivateShareSignature - } - cc.pendingPrvShare[prvShare.ProposerID] = prvShare - return nil - } - return cc.dkg.processPrivateShare(prvShare) -} - -func (cc *configurationChain) processPartialSignature( - psig *typesDKG.PartialSignature) error { - cc.tsigReady.L.Lock() - defer cc.tsigReady.L.Unlock() - if _, exist := cc.tsig[psig.Hash]; !exist { - ok, err := utils.VerifyDKGPartialSignatureSignature(psig) - if err != nil { - return err - } - if !ok { - return ErrIncorrectPartialSignatureSignature - } - cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig) - return nil - } - if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil { - return err - } - cc.tsigReady.Broadcast() - return nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go deleted file mode 100644 index fd8456487..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go +++ /dev/null @@ -1,1567 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "context" - "encoding/hex" - "fmt" - "sync" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/db" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors for consensus core. -var ( - ErrProposerNotInNodeSet = fmt.Errorf( - "proposer is not in node set") - ErrIncorrectHash = fmt.Errorf( - "hash of block is incorrect") - ErrIncorrectSignature = fmt.Errorf( - "signature of block is incorrect") - ErrUnknownBlockProposed = fmt.Errorf( - "unknown block is proposed") - ErrIncorrectAgreementResultPosition = fmt.Errorf( - "incorrect agreement result position") - ErrNotEnoughVotes = fmt.Errorf( - "not enought votes") - ErrCRSNotReady = fmt.Errorf( - "CRS not ready") - ErrConfigurationNotReady = fmt.Errorf( - "Configuration not ready") - ErrIncorrectBlockRandomness = fmt.Errorf( - "randomness of block is incorrect") - ErrCannotVerifyBlockRandomness = fmt.Errorf( - "cannot verify block randomness") -) - -type selfAgreementResult types.AgreementResult - -// consensusBAReceiver implements agreementReceiver. -type consensusBAReceiver struct { - consensus *Consensus - agreementModule *agreement - emptyBlockHashMap *sync.Map - isNotary bool - restartNotary chan types.Position - npks *typesDKG.NodePublicKeys - psigSigner *dkgShareSecret -} - -func (recv *consensusBAReceiver) emptyBlockHash(pos types.Position) ( - common.Hash, error) { - hashVal, ok := recv.emptyBlockHashMap.Load(pos) - if ok { - return hashVal.(common.Hash), nil - } - emptyBlock, err := recv.consensus.bcModule.prepareBlock( - pos, time.Time{}, true) - if err != nil { - return common.Hash{}, err - } - hash, err := utils.HashBlock(emptyBlock) - if err != nil { - return common.Hash{}, err - } - recv.emptyBlockHashMap.Store(pos, hash) - return hash, nil -} - -func (recv *consensusBAReceiver) VerifyPartialSignature(vote *types.Vote) ( - bool, bool) { - if vote.Position.Round >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash { - if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { - if recv.npks == nil { - recv.consensus.logger.Debug( - "Unable to verify psig, npks is nil", - "vote", vote) - return false, false - } - if vote.Position.Round != recv.npks.Round { - recv.consensus.logger.Debug( - "Unable to verify psig, round of npks mismatch", - "vote", vote, - "npksRound", recv.npks.Round) - return false, false - } - pubKey, exist := recv.npks.PublicKeys[vote.ProposerID] - if !exist { - recv.consensus.logger.Debug( - "Unable to verify psig, proposer is not qualified", - "vote", vote) - return false, true - } - blockHash := vote.BlockHash - if blockHash == types.NullBlockHash { - var err error - blockHash, err = recv.emptyBlockHash(vote.Position) - if err != nil { - recv.consensus.logger.Error( - "Failed to verify vote for empty block", - "position", vote.Position, - "error", err) - return false, true - } - } - return pubKey.VerifySignature( - blockHash, crypto.Signature(vote.PartialSignature)), true - } - } - return len(vote.PartialSignature.Signature) == 0, true -} - -func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) { - if !recv.isNotary { - return - } - if recv.psigSigner != nil && - vote.BlockHash != types.SkipBlockHash { - if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { - if vote.BlockHash == types.NullBlockHash { - hash, err := recv.emptyBlockHash(vote.Position) - if err != nil { - recv.consensus.logger.Error( - "Failed to propose vote for empty block", - "position", vote.Position, - "error", err) - return - } - vote.PartialSignature = recv.psigSigner.sign(hash) - } else { - vote.PartialSignature = recv.psigSigner.sign(vote.BlockHash) - } - } - } - if err := recv.agreementModule.prepareVote(vote); err != nil { - recv.consensus.logger.Error("Failed to prepare vote", "error", err) - return - } - go func() { - if err := recv.agreementModule.processVote(vote); err != nil { - recv.consensus.logger.Error("Failed to process self vote", - "error", err, - "vote", vote) - return - } - recv.consensus.logger.Debug("Calling Network.BroadcastVote", - "vote", vote) - recv.consensus.network.BroadcastVote(vote) - }() -} - -func (recv *consensusBAReceiver) ProposeBlock() common.Hash { - if !recv.isNotary { - return common.Hash{} - } - block, err := recv.consensus.proposeBlock(recv.agreementModule.agreementID()) - if err != nil || block == nil { - recv.consensus.logger.Error("Unable to propose block", "error", err) - return types.NullBlockHash - } - go func() { - if err := recv.consensus.preProcessBlock(block); err != nil { - recv.consensus.logger.Error("Failed to pre-process block", "error", err) - return - } - recv.consensus.logger.Debug("Calling Network.BroadcastBlock", - "block", block) - recv.consensus.network.BroadcastBlock(block) - }() - return block.Hash -} - -func (recv *consensusBAReceiver) ConfirmBlock( - hash common.Hash, votes map[types.NodeID]*types.Vote) { - var ( - block *types.Block - aID = recv.agreementModule.agreementID() - ) - - isEmptyBlockConfirmed := hash == common.Hash{} - if isEmptyBlockConfirmed { - recv.consensus.logger.Info("Empty block is confirmed", "position", aID) - var err error - block, err = recv.consensus.bcModule.addEmptyBlock(aID) - if err != nil { - recv.consensus.logger.Error("Add position for empty failed", - "error", err) - return - } - if block == nil { - // The empty block's parent is not found locally, thus we can't - // propose it at this moment. - // - // We can only rely on block pulling upon receiving - // types.AgreementResult from the next position. - recv.consensus.logger.Warn( - "An empty block is confirmed without its parent", - "position", aID) - return - } - } else { - var exist bool - block, exist = recv.agreementModule.findBlockNoLock(hash) - if !exist { - recv.consensus.logger.Debug("Unknown block confirmed", - "hash", hash.String()[:6]) - ch := make(chan *types.Block) - func() { - recv.consensus.lock.Lock() - defer recv.consensus.lock.Unlock() - recv.consensus.baConfirmedBlock[hash] = ch - }() - go func() { - hashes := common.Hashes{hash} - PullBlockLoop: - for { - recv.consensus.logger.Debug("Calling Network.PullBlock for BA block", - "hash", hash) - recv.consensus.network.PullBlocks(hashes) - select { - case block = <-ch: - break PullBlockLoop - case <-time.After(1 * time.Second): - } - } - recv.consensus.logger.Debug("Receive unknown block", - "hash", hash.String()[:6], - "position", block.Position) - recv.agreementModule.addCandidateBlock(block) - recv.agreementModule.lock.Lock() - defer recv.agreementModule.lock.Unlock() - recv.ConfirmBlock(block.Hash, votes) - }() - return - } - } - - if len(votes) == 0 && len(block.Randomness) == 0 { - recv.consensus.logger.Error("No votes to recover randomness", - "block", block) - } else if votes != nil { - voteList := make([]types.Vote, 0, len(votes)) - IDs := make(cryptoDKG.IDs, 0, len(votes)) - psigs := make([]cryptoDKG.PartialSignature, 0, len(votes)) - for _, vote := range votes { - if vote.BlockHash != hash { - continue - } - if block.Position.Round >= DKGDelayRound { - ID, exist := recv.npks.IDMap[vote.ProposerID] - if !exist { - continue - } - IDs = append(IDs, ID) - psigs = append(psigs, vote.PartialSignature) - } else { - voteList = append(voteList, *vote) - } - } - if block.Position.Round >= DKGDelayRound { - rand, err := cryptoDKG.RecoverSignature(psigs, IDs) - if err != nil { - recv.consensus.logger.Warn("Unable to recover randomness", - "block", block, - "error", err) - } else { - block.Randomness = rand.Signature[:] - } - } else { - block.Randomness = NoRand - } - - if recv.isNotary { - result := &types.AgreementResult{ - BlockHash: block.Hash, - Position: block.Position, - Votes: voteList, - IsEmptyBlock: isEmptyBlockConfirmed, - Randomness: block.Randomness, - } - // touchAgreementResult does not support concurrent access. - go func() { - recv.consensus.priorityMsgChan <- (*selfAgreementResult)(result) - }() - recv.consensus.logger.Debug("Broadcast AgreementResult", - "result", result) - recv.consensus.network.BroadcastAgreementResult(result) - if block.IsEmpty() { - recv.consensus.bcModule.addBlockRandomness( - block.Position, block.Randomness) - } - if block.Position.Round >= DKGDelayRound { - recv.consensus.logger.Debug( - "Broadcast finalized block", - "block", block) - recv.consensus.network.BroadcastBlock(block) - } - } - } - - if !block.IsGenesis() && - !recv.consensus.bcModule.confirmed(block.Position.Height-1) { - go func(hash common.Hash) { - parentHash := hash - for { - recv.consensus.logger.Warn("Parent block not confirmed", - "parent-hash", parentHash.String()[:6], - "cur-position", block.Position) - ch := make(chan *types.Block) - if !func() bool { - recv.consensus.lock.Lock() - defer recv.consensus.lock.Unlock() - if _, exist := recv.consensus.baConfirmedBlock[parentHash]; exist { - return false - } - recv.consensus.baConfirmedBlock[parentHash] = ch - return true - }() { - return - } - var block *types.Block - PullBlockLoop: - for { - recv.consensus.logger.Debug("Calling Network.PullBlock for parent", - "hash", parentHash) - recv.consensus.network.PullBlocks(common.Hashes{parentHash}) - select { - case block = <-ch: - break PullBlockLoop - case <-time.After(1 * time.Second): - } - } - recv.consensus.logger.Info("Receive parent block", - "parent-hash", block.ParentHash.String()[:6], - "cur-position", block.Position) - if !block.IsFinalized() { - // TODO(jimmy): use a seperate message to pull finalized - // block. Here, we pull it again as workaround. - continue - } - recv.consensus.processBlockChan <- block - parentHash = block.ParentHash - if block.IsGenesis() || recv.consensus.bcModule.confirmed( - block.Position.Height-1) { - return - } - } - }(block.ParentHash) - } - if !block.IsEmpty() { - recv.consensus.processBlockChan <- block - } - // Clean the restartNotary channel so BA will not stuck by deadlock. -CleanChannelLoop: - for { - select { - case <-recv.restartNotary: - default: - break CleanChannelLoop - } - } - recv.restartNotary <- block.Position -} - -func (recv *consensusBAReceiver) PullBlocks(hashes common.Hashes) { - if !recv.isNotary { - return - } - recv.consensus.logger.Debug("Calling Network.PullBlocks", "hashes", hashes) - recv.consensus.network.PullBlocks(hashes) -} - -func (recv *consensusBAReceiver) ReportForkVote(v1, v2 *types.Vote) { - recv.consensus.gov.ReportForkVote(v1, v2) -} - -func (recv *consensusBAReceiver) ReportForkBlock(b1, b2 *types.Block) { - b1Clone := b1.Clone() - b2Clone := b2.Clone() - b1Clone.Payload = []byte{} - b2Clone.Payload = []byte{} - recv.consensus.gov.ReportForkBlock(b1Clone, b2Clone) -} - -// consensusDKGReceiver implements dkgReceiver. -type consensusDKGReceiver struct { - ID types.NodeID - gov Governance - signer *utils.Signer - nodeSetCache *utils.NodeSetCache - cfgModule *configurationChain - network Network - logger common.Logger -} - -// ProposeDKGComplaint proposes a DKGComplaint. -func (recv *consensusDKGReceiver) ProposeDKGComplaint( - complaint *typesDKG.Complaint) { - if err := recv.signer.SignDKGComplaint(complaint); err != nil { - recv.logger.Error("Failed to sign DKG complaint", "error", err) - return - } - recv.logger.Debug("Calling Governace.AddDKGComplaint", - "complaint", complaint) - recv.gov.AddDKGComplaint(complaint) -} - -// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. -func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey( - mpk *typesDKG.MasterPublicKey) { - if err := recv.signer.SignDKGMasterPublicKey(mpk); err != nil { - recv.logger.Error("Failed to sign DKG master public key", "error", err) - return - } - recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk) - recv.gov.AddDKGMasterPublicKey(mpk) -} - -// ProposeDKGPrivateShare propose a DKGPrivateShare. -func (recv *consensusDKGReceiver) ProposeDKGPrivateShare( - prv *typesDKG.PrivateShare) { - if err := recv.signer.SignDKGPrivateShare(prv); err != nil { - recv.logger.Error("Failed to sign DKG private share", "error", err) - return - } - receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID) - if !exists { - recv.logger.Error("Public key for receiver not found", - "receiver", prv.ReceiverID.String()[:6]) - return - } - if prv.ReceiverID == recv.ID { - go func() { - if err := recv.cfgModule.processPrivateShare(prv); err != nil { - recv.logger.Error("Failed to process self private share", "prvShare", prv) - } - }() - } else { - recv.logger.Debug("Calling Network.SendDKGPrivateShare", - "receiver", hex.EncodeToString(receiverPubKey.Bytes())) - recv.network.SendDKGPrivateShare(receiverPubKey, prv) - } -} - -// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. -func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint( - prv *typesDKG.PrivateShare) { - if prv.ProposerID == recv.ID { - if err := recv.signer.SignDKGPrivateShare(prv); err != nil { - recv.logger.Error("Failed sign DKG private share", "error", err) - return - } - } - recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv) - recv.network.BroadcastDKGPrivateShare(prv) -} - -// ProposeDKGMPKReady propose a DKGMPKReady message. -func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { - if err := recv.signer.SignDKGMPKReady(ready); err != nil { - recv.logger.Error("Failed to sign DKG ready", "error", err) - return - } - recv.logger.Debug("Calling Governance.AddDKGMPKReady", "ready", ready) - recv.gov.AddDKGMPKReady(ready) -} - -// ProposeDKGFinalize propose a DKGFinalize message. -func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { - if err := recv.signer.SignDKGFinalize(final); err != nil { - recv.logger.Error("Failed to sign DKG finalize", "error", err) - return - } - recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final) - recv.gov.AddDKGFinalize(final) -} - -// ProposeDKGSuccess propose a DKGSuccess message. -func (recv *consensusDKGReceiver) ProposeDKGSuccess(success *typesDKG.Success) { - if err := recv.signer.SignDKGSuccess(success); err != nil { - recv.logger.Error("Failed to sign DKG successize", "error", err) - return - } - recv.logger.Debug("Calling Governance.AddDKGSuccess", "success", success) - recv.gov.AddDKGSuccess(success) -} - -// Consensus implements DEXON Consensus algorithm. -type Consensus struct { - // Node Info. - ID types.NodeID - signer *utils.Signer - - // BA. - baMgr *agreementMgr - baConfirmedBlock map[common.Hash]chan<- *types.Block - - // DKG. - dkgRunning int32 - dkgReady *sync.Cond - cfgModule *configurationChain - - // Interfaces. - db db.Database - app Application - debugApp Debug - gov Governance - network Network - - // Misc. - bcModule *blockChain - dMoment time.Time - nodeSetCache *utils.NodeSetCache - tsigVerifierCache *TSigVerifierCache - lock sync.RWMutex - ctx context.Context - ctxCancel context.CancelFunc - event *common.Event - roundEvent *utils.RoundEvent - logger common.Logger - resetDeliveryGuardTicker chan struct{} - msgChan chan types.Msg - priorityMsgChan chan interface{} - waitGroup sync.WaitGroup - processBlockChan chan *types.Block - - // Context of Dummy receiver during switching from syncer. - dummyCancel context.CancelFunc - dummyFinished <-chan struct{} - dummyMsgBuffer []types.Msg -} - -// NewConsensus construct an Consensus instance. -func NewConsensus( - dMoment time.Time, - app Application, - gov Governance, - db db.Database, - network Network, - prv crypto.PrivateKey, - logger common.Logger) *Consensus { - return newConsensusForRound( - nil, dMoment, app, gov, db, network, prv, logger, true) -} - -// NewConsensusForSimulation creates an instance of Consensus for simulation, -// the only difference with NewConsensus is nonblocking of app. -func NewConsensusForSimulation( - dMoment time.Time, - app Application, - gov Governance, - db db.Database, - network Network, - prv crypto.PrivateKey, - logger common.Logger) *Consensus { - return newConsensusForRound( - nil, dMoment, app, gov, db, network, prv, logger, false) -} - -// NewConsensusFromSyncer constructs an Consensus instance from information -// provided from syncer. -// -// You need to provide the initial block for this newly created Consensus -// instance to bootstrap with. A proper choice is the last finalized block you -// delivered to syncer. -// -// NOTE: those confirmed blocks should be organized by chainID and sorted by -// their positions, in ascending order. -func NewConsensusFromSyncer( - initBlock *types.Block, - startWithEmpty bool, - dMoment time.Time, - app Application, - gov Governance, - db db.Database, - networkModule Network, - prv crypto.PrivateKey, - confirmedBlocks []*types.Block, - cachedMessages []types.Msg, - logger common.Logger) (*Consensus, error) { - // Setup Consensus instance. - con := newConsensusForRound(initBlock, dMoment, app, gov, db, - networkModule, prv, logger, true) - // Launch a dummy receiver before we start receiving from network module. - con.dummyMsgBuffer = cachedMessages - con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( - con.ctx, networkModule.ReceiveChan(), func(msg types.Msg) { - con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) - }) - // Dump all BA-confirmed blocks to the consensus instance, make sure these - // added blocks forming a DAG. - refBlock := initBlock - for _, b := range confirmedBlocks { - // Only when its parent block is already added to lattice, we can - // then add this block. If not, our pulling mechanism would stop at - // the block we added, and lost its parent block forever. - if b.Position.Height != refBlock.Position.Height+1 { - break - } - if err := con.processBlock(b); err != nil { - return nil, err - } - refBlock = b - } - if startWithEmpty { - emptyPos := types.Position{ - Round: con.bcModule.tipRound(), - Height: initBlock.Position.Height + 1, - } - _, err := con.bcModule.addEmptyBlock(emptyPos) - if err != nil { - panic(err) - } - } - return con, nil -} - -// newConsensusForRound creates a Consensus instance. -func newConsensusForRound( - initBlock *types.Block, - dMoment time.Time, - app Application, - gov Governance, - db db.Database, - network Network, - prv crypto.PrivateKey, - logger common.Logger, - usingNonBlocking bool) *Consensus { - // TODO(w): load latest blockHeight from DB, and use config at that height. - nodeSetCache := utils.NewNodeSetCache(gov) - // Setup signer module. - signer := utils.NewSigner(prv) - // Check if the application implement Debug interface. - var debugApp Debug - if a, ok := app.(Debug); ok { - debugApp = a - } - // Get configuration for bootstrap round. - initPos := types.Position{ - Round: 0, - Height: types.GenesisHeight, - } - if initBlock != nil { - initPos = initBlock.Position - } - // Init configuration chain. - ID := types.NewNodeID(prv.PublicKey()) - recv := &consensusDKGReceiver{ - ID: ID, - gov: gov, - signer: signer, - nodeSetCache: nodeSetCache, - network: network, - logger: logger, - } - cfgModule := newConfigurationChain(ID, recv, gov, nodeSetCache, db, logger) - recv.cfgModule = cfgModule - signer.SetBLSSigner( - func(round uint64, hash common.Hash) (crypto.Signature, error) { - _, signer, err := cfgModule.getDKGInfo(round, false) - if err != nil { - return crypto.Signature{}, err - } - return crypto.Signature(signer.sign(hash)), nil - }) - appModule := app - if usingNonBlocking { - appModule = newNonBlocking(app, debugApp) - } - tsigVerifierCache := NewTSigVerifierCache(gov, 7) - bcModule := newBlockChain(ID, dMoment, initBlock, appModule, - tsigVerifierCache, signer, logger) - // Construct Consensus instance. - con := &Consensus{ - ID: ID, - app: appModule, - debugApp: debugApp, - gov: gov, - db: db, - network: network, - baConfirmedBlock: make(map[common.Hash]chan<- *types.Block), - dkgReady: sync.NewCond(&sync.Mutex{}), - cfgModule: cfgModule, - bcModule: bcModule, - dMoment: dMoment, - nodeSetCache: nodeSetCache, - tsigVerifierCache: tsigVerifierCache, - signer: signer, - event: common.NewEvent(), - logger: logger, - resetDeliveryGuardTicker: make(chan struct{}), - msgChan: make(chan types.Msg, 1024), - priorityMsgChan: make(chan interface{}, 1024), - processBlockChan: make(chan *types.Block, 1024), - } - con.ctx, con.ctxCancel = context.WithCancel(context.Background()) - var err error - con.roundEvent, err = utils.NewRoundEvent(con.ctx, gov, logger, initPos, - ConfigRoundShift) - if err != nil { - panic(err) - } - if con.baMgr, err = newAgreementMgr(con); err != nil { - panic(err) - } - if err = con.prepare(initBlock); err != nil { - panic(err) - } - return con -} - -// prepare the Consensus instance to be ready for blocks after 'initBlock'. -// 'initBlock' could be either: -// - nil -// - the last finalized block -func (con *Consensus) prepare(initBlock *types.Block) (err error) { - // Trigger the round validation method for the next round of the first - // round. - // The block past from full node should be delivered already or known by - // full node. We don't have to notify it. - initRound := uint64(0) - if initBlock != nil { - initRound = initBlock.Position.Round - } - if initRound == 0 { - if DKGDelayRound == 0 { - panic("not implemented yet") - } - } - // Measure time elapse for each handler of round events. - elapse := func(what string, lastE utils.RoundEventParam) func() { - start := time.Now() - con.logger.Info("Handle round event", - "what", what, - "event", lastE) - return func() { - con.logger.Info("Finish round event", - "what", what, - "event", lastE, - "elapse", time.Since(start)) - } - } - // Register round event handler to purge cached node set. To make sure each - // modules see the up-to-date node set, we need to make sure this action - // should be taken as the first one. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - defer elapse("purge-cache", evts[len(evts)-1])() - for _, e := range evts { - if e.Reset == 0 { - continue - } - con.nodeSetCache.Purge(e.Round + 1) - con.tsigVerifierCache.Purge(e.Round + 1) - } - }) - // Register round event handler to abort previous running DKG if any. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - go func() { - defer elapse("abort-DKG", e)() - if e.Reset > 0 { - aborted := con.cfgModule.abortDKG(con.ctx, e.Round+1, e.Reset-1) - con.logger.Info("DKG aborting result", - "round", e.Round+1, - "reset", e.Reset-1, - "aborted", aborted) - } - }() - }) - // Register round event handler to update BA and BC modules. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - defer elapse("append-config", evts[len(evts)-1])() - // Always updates newer configs to the later modules first in the data - // flow. - if err := con.bcModule.notifyRoundEvents(evts); err != nil { - panic(err) - } - if err := con.baMgr.notifyRoundEvents(evts); err != nil { - panic(err) - } - }) - // Register round event handler to reset DKG if the DKG set for next round - // failed to setup. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - defer elapse("reset-DKG", e)() - nextRound := e.Round + 1 - if nextRound < DKGDelayRound { - return - } - curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round) - if err != nil { - con.logger.Error("Error getting notary set when proposing CRS", - "round", e.Round, - "error", err) - return - } - if _, exist := curNotarySet[con.ID]; !exist { - return - } - con.event.RegisterHeight(e.NextDKGResetHeight(), func(uint64) { - if ok, _ := utils.IsDKGValid( - con.gov, con.logger, nextRound, e.Reset); ok { - return - } - // Aborting all previous running DKG protocol instance if any. - go con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true) - }) - }) - // Register round event handler to propose new CRS. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - // We don't have to propose new CRS during DKG reset, the reset of DKG - // would be done by the notary set in previous round. - e := evts[len(evts)-1] - defer elapse("propose-CRS", e)() - if e.Reset != 0 || e.Round < DKGDelayRound { - return - } - if curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round); err != nil { - con.logger.Error("Error getting notary set when proposing CRS", - "round", e.Round, - "error", err) - } else { - if _, exist := curNotarySet[con.ID]; !exist { - return - } - con.event.RegisterHeight(e.NextCRSProposingHeight(), func(uint64) { - con.logger.Debug( - "Calling Governance.CRS to check if already proposed", - "round", e.Round+1) - if (con.gov.CRS(e.Round+1) != common.Hash{}) { - con.logger.Debug("CRS already proposed", "round", e.Round+1) - return - } - go con.runCRS(e.Round, e.CRS, false) - }) - } - }) - // Touch nodeSetCache for next round. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - defer elapse("touch-NodeSetCache", e)() - con.event.RegisterHeight(e.NextTouchNodeSetCacheHeight(), func(uint64) { - if e.Reset == 0 { - return - } - go func() { - nextRound := e.Round + 1 - if err := con.nodeSetCache.Touch(nextRound); err != nil { - con.logger.Warn("Failed to update nodeSetCache", - "round", nextRound, - "error", err) - } - }() - }) - }) - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - if e.Reset != 0 { - return - } - defer elapse("touch-DKGCache", e)() - go func() { - if _, err := - con.tsigVerifierCache.Update(e.Round); err != nil { - con.logger.Warn("Failed to update tsig cache", - "round", e.Round, - "error", err) - } - }() - go func() { - threshold := utils.GetDKGThreshold( - utils.GetConfigWithPanic(con.gov, e.Round, con.logger)) - // Restore group public key. - con.logger.Debug( - "Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", - "round", e.Round) - con.logger.Debug( - "Calling Governance.DKGComplaints for recoverDKGInfo", - "round", e.Round) - _, qualifies, err := typesDKG.CalcQualifyNodes( - con.gov.DKGMasterPublicKeys(e.Round), - con.gov.DKGComplaints(e.Round), - threshold) - if err != nil { - con.logger.Warn("Failed to calculate dkg set", - "round", e.Round, - "error", err) - return - } - if _, exist := qualifies[con.ID]; !exist { - return - } - if _, _, err := - con.cfgModule.getDKGInfo(e.Round, true); err != nil { - con.logger.Warn("Failed to recover DKG info", - "round", e.Round, - "error", err) - } - }() - }) - // checkCRS is a generator of checker to check if CRS for that round is - // ready or not. - checkCRS := func(round uint64) func() bool { - return func() bool { - nextCRS := con.gov.CRS(round) - if (nextCRS != common.Hash{}) { - return true - } - con.logger.Debug("CRS is not ready yet. Try again later...", - "nodeID", con.ID, - "round", round) - return false - } - } - // Trigger round validation method for next period. - con.roundEvent.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - defer elapse("next-round", e)() - // Register a routine to trigger round events. - con.event.RegisterHeight(e.NextRoundValidationHeight(), - utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event)) - // Register a routine to register next DKG. - con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) { - nextRound := e.Round + 1 - if nextRound < DKGDelayRound { - con.logger.Info("Skip runDKG for round", - "round", nextRound, - "reset", e.Reset) - return - } - go func() { - // Normally, gov.CRS would return non-nil. Use this for in case - // of unexpected network fluctuation and ensure the robustness. - if !checkWithCancel( - con.ctx, 500*time.Millisecond, checkCRS(nextRound)) { - con.logger.Debug("unable to prepare CRS for notary set", - "round", nextRound, - "reset", e.Reset) - return - } - nextNotarySet, err := con.nodeSetCache.GetNotarySet(nextRound) - if err != nil { - con.logger.Error("Error getting notary set for next round", - "round", nextRound, - "reset", e.Reset, - "error", err) - return - } - if _, exist := nextNotarySet[con.ID]; !exist { - con.logger.Info("Not selected as notary set", - "round", nextRound, - "reset", e.Reset) - return - } - con.logger.Info("Selected as notary set", - "round", nextRound, - "reset", e.Reset) - nextConfig := utils.GetConfigWithPanic(con.gov, nextRound, - con.logger) - con.cfgModule.registerDKG(con.ctx, nextRound, e.Reset, - utils.GetDKGThreshold(nextConfig)) - con.event.RegisterHeight(e.NextDKGPreparationHeight(), - func(h uint64) { - func() { - con.dkgReady.L.Lock() - defer con.dkgReady.L.Unlock() - con.dkgRunning = 0 - }() - // We want to skip some of the DKG phases when started. - dkgCurrentHeight := h - e.NextDKGPreparationHeight() - con.runDKG( - nextRound, e.Reset, - e.NextDKGPreparationHeight(), dkgCurrentHeight) - }) - }() - }) - }) - con.roundEvent.TriggerInitEvent() - if initBlock != nil { - con.event.NotifyHeight(initBlock.Position.Height) - } - con.baMgr.prepare() - return -} - -// Run starts running DEXON Consensus. -func (con *Consensus) Run() { - // There may have emptys block in blockchain added by force sync. - blocksWithoutRandomness := con.bcModule.pendingBlocksWithoutRandomness() - // Launch BA routines. - con.baMgr.run() - // Launch network handler. - con.logger.Debug("Calling Network.ReceiveChan") - con.waitGroup.Add(1) - go con.deliverNetworkMsg() - con.waitGroup.Add(1) - go con.processMsg() - go con.processBlockLoop() - // Stop dummy receiver if launched. - if con.dummyCancel != nil { - con.logger.Trace("Stop dummy receiver") - con.dummyCancel() - <-con.dummyFinished - // Replay those cached messages. - con.logger.Trace("Dummy receiver stoped, start dumping cached messages", - "count", len(con.dummyMsgBuffer)) - for _, msg := range con.dummyMsgBuffer { - loop: - for { - select { - case con.msgChan <- msg: - break loop - case <-time.After(50 * time.Millisecond): - con.logger.Debug( - "internal message channel is full when syncing") - } - } - } - con.logger.Trace("Finish dumping cached messages") - } - con.generateBlockRandomness(blocksWithoutRandomness) - // Sleep until dMoment come. - time.Sleep(con.dMoment.Sub(time.Now().UTC())) - // Take some time to bootstrap. - time.Sleep(3 * time.Second) - con.waitGroup.Add(1) - go con.deliveryGuard() - // Block until done. - select { - case <-con.ctx.Done(): - } -} - -func (con *Consensus) generateBlockRandomness(blocks []*types.Block) { - con.logger.Debug("Start generating block randomness", "blocks", blocks) - isNotarySet := make(map[uint64]bool) - for _, block := range blocks { - if block.Position.Round < DKGDelayRound { - continue - } - doRun, exist := isNotarySet[block.Position.Round] - if !exist { - curNotarySet, err := con.nodeSetCache.GetNotarySet(block.Position.Round) - if err != nil { - con.logger.Error("Error getting notary set when generate block tsig", - "round", block.Position.Round, - "error", err) - continue - } - _, exist := curNotarySet[con.ID] - isNotarySet[block.Position.Round] = exist - doRun = exist - } - if !doRun { - continue - } - go func(block *types.Block) { - psig, err := con.cfgModule.preparePartialSignature( - block.Position.Round, block.Hash) - if err != nil { - con.logger.Error("Failed to prepare partial signature", - "block", block, - "error", err) - } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { - con.logger.Error("Failed to sign DKG partial signature", - "block", block, - "error", err) - } else if err = con.cfgModule.processPartialSignature(psig); err != nil { - con.logger.Error("Failed to process partial signature", - "block", block, - "error", err) - } else { - con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", - "proposer", psig.ProposerID, - "block", block) - con.network.BroadcastDKGPartialSignature(psig) - sig, err := con.cfgModule.runTSig( - block.Position.Round, - block.Hash, - 60*time.Minute, - ) - if err != nil { - con.logger.Error("Failed to run Block Tsig", - "block", block, - "error", err) - return - } - result := &types.AgreementResult{ - BlockHash: block.Hash, - Position: block.Position, - Randomness: sig.Signature[:], - } - con.bcModule.addBlockRandomness(block.Position, sig.Signature[:]) - con.logger.Debug("Broadcast BlockRandomness", - "block", block, - "result", result) - con.network.BroadcastAgreementResult(result) - if err := con.deliverFinalizedBlocks(); err != nil { - con.logger.Error("Failed to deliver finalized block", - "error", err) - } - } - }(block) - } -} - -// runDKG starts running DKG protocol. -func (con *Consensus) runDKG( - round, reset, dkgBeginHeight, dkgHeight uint64) { - con.dkgReady.L.Lock() - defer con.dkgReady.L.Unlock() - if con.dkgRunning != 0 { - return - } - con.dkgRunning = 1 - go func() { - defer func() { - con.dkgReady.L.Lock() - defer con.dkgReady.L.Unlock() - con.dkgReady.Broadcast() - con.dkgRunning = 2 - }() - if err := - con.cfgModule.runDKG( - round, reset, - con.event, dkgBeginHeight, dkgHeight); err != nil { - con.logger.Error("Failed to runDKG", "error", err) - } - }() -} - -func (con *Consensus) runCRS(round uint64, hash common.Hash, reset bool) { - // Start running next round CRS. - psig, err := con.cfgModule.preparePartialSignature(round, hash) - if err != nil { - con.logger.Error("Failed to prepare partial signature", "error", err) - } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { - con.logger.Error("Failed to sign DKG partial signature", "error", err) - } else if err = con.cfgModule.processPartialSignature(psig); err != nil { - con.logger.Error("Failed to process partial signature", "error", err) - } else { - con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", - "proposer", psig.ProposerID, - "round", psig.Round, - "hash", psig.Hash) - con.network.BroadcastDKGPartialSignature(psig) - con.logger.Debug("Calling Governance.CRS", "round", round) - crs, err := con.cfgModule.runCRSTSig(round, hash) - if err != nil { - con.logger.Error("Failed to run CRS Tsig", "error", err) - } else { - if reset { - con.logger.Debug("Calling Governance.ResetDKG", - "round", round+1, - "crs", hex.EncodeToString(crs)) - con.gov.ResetDKG(crs) - } else { - con.logger.Debug("Calling Governance.ProposeCRS", - "round", round+1, - "crs", hex.EncodeToString(crs)) - con.gov.ProposeCRS(round+1, crs) - } - } - } -} - -// Stop the Consensus core. -func (con *Consensus) Stop() { - con.ctxCancel() - con.baMgr.stop() - con.event.Reset() - con.waitGroup.Wait() - if nbApp, ok := con.app.(*nonBlocking); ok { - nbApp.wait() - } -} - -func (con *Consensus) deliverNetworkMsg() { - defer con.waitGroup.Done() - recv := con.network.ReceiveChan() - for { - select { - case <-con.ctx.Done(): - return - default: - } - select { - case msg := <-recv: - innerLoop: - for { - select { - case con.msgChan <- msg: - break innerLoop - case <-time.After(500 * time.Millisecond): - con.logger.Debug("internal message channel is full", - "pending", msg) - } - } - case <-con.ctx.Done(): - return - } - } -} - -func (con *Consensus) processMsg() { - defer con.waitGroup.Done() -MessageLoop: - for { - select { - case <-con.ctx.Done(): - return - default: - } - var msg, peer interface{} - select { - case msg = <-con.priorityMsgChan: - default: - } - if msg == nil { - select { - case message := <-con.msgChan: - msg, peer = message.Payload, message.PeerID - case msg = <-con.priorityMsgChan: - case <-con.ctx.Done(): - return - } - } - switch val := msg.(type) { - case *selfAgreementResult: - con.baMgr.touchAgreementResult((*types.AgreementResult)(val)) - case *types.Block: - if ch, exist := func() (chan<- *types.Block, bool) { - con.lock.RLock() - defer con.lock.RUnlock() - ch, e := con.baConfirmedBlock[val.Hash] - return ch, e - }(); exist { - if val.IsEmpty() { - hash, err := utils.HashBlock(val) - if err != nil { - con.logger.Error("Error verifying empty block hash", - "block", val, - "error, err") - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - if hash != val.Hash { - con.logger.Error("Incorrect confirmed empty block hash", - "block", val, - "hash", hash) - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - if _, err := con.bcModule.proposeBlock( - val.Position, time.Time{}, true); err != nil { - con.logger.Error("Error adding empty block", - "block", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - } else { - if !val.IsFinalized() { - con.logger.Warn("Ignore not finalized block", - "block", val) - continue MessageLoop - } - ok, err := con.bcModule.verifyRandomness( - val.Hash, val.Position.Round, val.Randomness) - if err != nil { - con.logger.Error("Error verifying confirmed block randomness", - "block", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - if !ok { - con.logger.Error("Incorrect confirmed block randomness", - "block", val) - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - if err := utils.VerifyBlockSignature(val); err != nil { - con.logger.Error("VerifyBlockSignature failed", - "block", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - continue MessageLoop - } - } - func() { - con.lock.Lock() - defer con.lock.Unlock() - // In case of multiple delivered block. - if _, exist := con.baConfirmedBlock[val.Hash]; !exist { - return - } - delete(con.baConfirmedBlock, val.Hash) - ch <- val - }() - } else if val.IsFinalized() { - if err := con.processFinalizedBlock(val); err != nil { - con.logger.Error("Failed to process finalized block", - "block", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - } - } else { - if err := con.preProcessBlock(val); err != nil { - con.logger.Error("Failed to pre process block", - "block", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - } - } - case *types.Vote: - if err := con.ProcessVote(val); err != nil { - con.logger.Error("Failed to process vote", - "vote", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - } - case *types.AgreementResult: - if err := con.ProcessAgreementResult(val); err != nil { - con.logger.Error("Failed to process agreement result", - "result", val, - "error", err) - con.network.ReportBadPeerChan() <- peer - } - case *typesDKG.PrivateShare: - if err := con.cfgModule.processPrivateShare(val); err != nil { - con.logger.Error("Failed to process private share", - "error", err) - con.network.ReportBadPeerChan() <- peer - } - - case *typesDKG.PartialSignature: - if err := con.cfgModule.processPartialSignature(val); err != nil { - con.logger.Error("Failed to process partial signature", - "error", err) - con.network.ReportBadPeerChan() <- peer - } - } - } -} - -// ProcessVote is the entry point to submit ont vote to a Consensus instance. -func (con *Consensus) ProcessVote(vote *types.Vote) (err error) { - err = con.baMgr.processVote(vote) - return -} - -// ProcessAgreementResult processes the randomness request. -func (con *Consensus) ProcessAgreementResult( - rand *types.AgreementResult) error { - if !con.baMgr.touchAgreementResult(rand) { - return nil - } - // Sanity Check. - if err := VerifyAgreementResult(rand, con.nodeSetCache); err != nil { - con.baMgr.untouchAgreementResult(rand) - return err - } - if err := con.bcModule.processAgreementResult(rand); err != nil { - con.baMgr.untouchAgreementResult(rand) - if err == ErrSkipButNoError { - return nil - } - return err - } - // Syncing BA Module. - if err := con.baMgr.processAgreementResult(rand); err != nil { - con.baMgr.untouchAgreementResult(rand) - return err - } - - con.logger.Debug("Rebroadcast AgreementResult", - "result", rand) - con.network.BroadcastAgreementResult(rand) - - return con.deliverFinalizedBlocks() -} - -// preProcessBlock performs Byzantine Agreement on the block. -func (con *Consensus) preProcessBlock(b *types.Block) (err error) { - err = con.baMgr.processBlock(b) - if err == nil && con.debugApp != nil { - con.debugApp.BlockReceived(b.Hash) - } - return -} - -func (con *Consensus) processFinalizedBlock(b *types.Block) (err error) { - if b.Position.Round < DKGDelayRound { - return - } - if err = utils.VerifyBlockSignature(b); err != nil { - return - } - verifier, ok, err := con.tsigVerifierCache.UpdateAndGet(b.Position.Round) - if err != nil { - return - } - if !ok { - err = ErrCannotVerifyBlockRandomness - return - } - if !verifier.VerifySignature(b.Hash, crypto.Signature{ - Type: "bls", - Signature: b.Randomness, - }) { - err = ErrIncorrectBlockRandomness - return - } - err = con.baMgr.processFinalizedBlock(b) - if err == nil && con.debugApp != nil { - con.debugApp.BlockReceived(b.Hash) - } - return -} - -func (con *Consensus) deliveryGuard() { - defer con.waitGroup.Done() - select { - case <-con.ctx.Done(): - case <-time.After(con.dMoment.Sub(time.Now())): - } - // Node takes time to start. - select { - case <-con.ctx.Done(): - case <-time.After(60 * time.Second): - } - for { - select { - case <-con.ctx.Done(): - return - default: - } - select { - case <-con.ctx.Done(): - return - case <-con.resetDeliveryGuardTicker: - case <-time.After(60 * time.Second): - con.logger.Error("No blocks delivered for too long", "ID", con.ID) - panic(fmt.Errorf("No blocks delivered for too long")) - } - } -} - -// deliverBlock deliver a block to application layer. -func (con *Consensus) deliverBlock(b *types.Block) { - select { - case con.resetDeliveryGuardTicker <- struct{}{}: - default: - } - if err := con.db.PutBlock(*b); err != nil { - panic(err) - } - if err := con.db.PutCompactionChainTipInfo(b.Hash, - b.Position.Height); err != nil { - panic(err) - } - con.logger.Debug("Calling Application.BlockDelivered", "block", b) - con.app.BlockDelivered(b.Hash, b.Position, common.CopyBytes(b.Randomness)) - if con.debugApp != nil { - con.debugApp.BlockReady(b.Hash) - } -} - -// deliverFinalizedBlocks extracts and delivers finalized blocks to application -// layer. -func (con *Consensus) deliverFinalizedBlocks() error { - con.lock.Lock() - defer con.lock.Unlock() - return con.deliverFinalizedBlocksWithoutLock() -} - -func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) { - deliveredBlocks := con.bcModule.extractBlocks() - con.logger.Debug("Last blocks in compaction chain", - "delivered", con.bcModule.lastDeliveredBlock(), - "pending", con.bcModule.lastPendingBlock()) - for _, b := range deliveredBlocks { - con.deliverBlock(b) - con.event.NotifyHeight(b.Position.Height) - } - return -} - -func (con *Consensus) processBlockLoop() { - for { - select { - case <-con.ctx.Done(): - return - default: - } - select { - case <-con.ctx.Done(): - return - case block := <-con.processBlockChan: - if err := con.processBlock(block); err != nil { - con.logger.Error("Error processing block", - "block", block, - "error", err) - } - } - } -} - -// processBlock is the entry point to submit one block to a Consensus instance. -func (con *Consensus) processBlock(block *types.Block) (err error) { - // Block processed by blockChain can be out-of-order. But the output from - // blockChain (deliveredBlocks) cannot, thus we need to protect the part - // below with writer lock. - con.lock.Lock() - defer con.lock.Unlock() - if err = con.bcModule.addBlock(block); err != nil { - return - } - if err = con.deliverFinalizedBlocksWithoutLock(); err != nil { - return - } - return -} - -// PrepareBlock would setup header fields of block based on its ProposerID. -func (con *Consensus) proposeBlock(position types.Position) ( - *types.Block, error) { - b, err := con.bcModule.proposeBlock(position, time.Now().UTC(), false) - if err != nil { - return nil, err - } - con.logger.Debug("Calling Governance.CRS", "round", b.Position.Round) - crs := con.gov.CRS(b.Position.Round) - if crs.Equal(common.Hash{}) { - con.logger.Error("CRS for round is not ready, unable to prepare block", - "position", &b.Position) - return nil, ErrCRSNotReady - } - if err = con.signer.SignCRS(b, crs); err != nil { - return nil, err - } - return b, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/constant.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/constant.go deleted file mode 100644 index 29dae8b73..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/constant.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import "github.com/dexon-foundation/dexon-consensus/core/utils" - -// ConfigRoundShift refers to the difference between block's round and config -// round derived from its state. -// -// For example, when round shift is 2, a block in round 0 should derive config -// for round 2. -const ConfigRoundShift uint64 = 2 - -// DKGDelayRound refers to the round that first DKG is run. -// -// For example, when delay round is 1, new DKG will run at round 1. Round 0 will -// have neither DKG nor CRS. -const DKGDelayRound uint64 = 1 - -// NoRand is the magic placeholder for randomness field in blocks for blocks -// proposed before DKGDelayRound. -var NoRand = []byte("norand") - -func init() { - utils.SetDKGDelayRound(DKGDelayRound) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/constant.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/constant.go deleted file mode 100644 index 37d873d6f..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/constant.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package dkg - -import ( - "github.com/dexon-foundation/bls/ffi/go/bls" -) - -const ( - curve = bls.BLS12_381 -) diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go deleted file mode 100644 index ab43f5130..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package dkg - -import ( - "encoding/json" - "fmt" - "io" - "sync" - "sync/atomic" - - "github.com/dexon-foundation/bls/ffi/go/bls" - "github.com/dexon-foundation/dexon/rlp" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -var ( - // ErrDuplicatedShare is reported when adding an private key share of same id. - ErrDuplicatedShare = fmt.Errorf("invalid share") - // ErrNoIDToRecover is reported when no id is provided for recovering private - // key. - ErrNoIDToRecover = fmt.Errorf("no id to recover private key") - // ErrShareNotFound is reported when the private key share of id is not found - // when recovering private key. - ErrShareNotFound = fmt.Errorf("share not found") -) - -const cryptoType = "bls" - -var publicKeyLength int - -func init() { - if err := bls.Init(curve); err != nil { - panic(err) - } - - pubKey := &bls.PublicKey{} - publicKeyLength = len(pubKey.Serialize()) -} - -// PrivateKey represents a private key structure implments -// Crypto.PrivateKey interface. -type PrivateKey struct { - privateKey bls.SecretKey - publicKey PublicKey -} - -// EncodeRLP implements rlp.Encoder -func (prv *PrivateKey) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, prv.Bytes()) -} - -// DecodeRLP implements rlp.Decoder -func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error { - var b []byte - if err := s.Decode(&b); err != nil { - return err - } - return prv.SetBytes(b) -} - -// MarshalJSON implements json.Marshaller. -func (prv *PrivateKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&prv.privateKey) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (prv *PrivateKey) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &prv.privateKey) -} - -// ID is the id for DKG protocol. -type ID = bls.ID - -// IDs is an array of ID. -type IDs []ID - -// PublicKey represents a public key structure implements -// Crypto.PublicKey interface. -type PublicKey struct { - publicKey bls.PublicKey -} - -// PrivateKeyShares represents a private key shares for DKG protocol. -type PrivateKeyShares struct { - shares []PrivateKey - shareIndex map[ID]int - masterPrivateKey []bls.SecretKey -} - -// Equal check equality between two PrivateKeyShares instances. -func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool { - // Check shares. - if len(prvs.shareIndex) != len(other.shareIndex) { - return false - } - for dID, idx := range prvs.shareIndex { - otherIdx, exists := other.shareIndex[dID] - if !exists { - return false - } - if !prvs.shares[idx].privateKey.IsEqual( - &other.shares[otherIdx].privateKey) { - return false - } - } - // Check master private keys. - if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) { - return false - } - for idx, m := range prvs.masterPrivateKey { - if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() { - return false - } - } - return true -} - -// EncodeRLP implements rlp.Encoder -func (prvs *PrivateKeyShares) EncodeRLP(w io.Writer) error { - data := make([][][]byte, 3) - shares := make([][]byte, len(prvs.shares)) - for i, s := range prvs.shares { - shares[i] = s.Bytes() - } - data[0] = shares - - shareIndex := make([][]byte, 0) - for k, v := range prvs.shareIndex { - shareIndex = append(shareIndex, k.GetLittleEndian()) - - vBytes, err := rlp.EncodeToBytes(uint64(v)) - if err != nil { - return err - } - shareIndex = append(shareIndex, vBytes) - } - data[1] = shareIndex - - mpks := make([][]byte, len(prvs.masterPrivateKey)) - for i, m := range prvs.masterPrivateKey { - mpks[i] = m.GetLittleEndian() - } - data[2] = mpks - return rlp.Encode(w, data) -} - -// DecodeRLP implements rlp.Decoder -func (prvs *PrivateKeyShares) DecodeRLP(s *rlp.Stream) error { - *prvs = PrivateKeyShares{} - var dec [][][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - var shares []PrivateKey - for _, bs := range dec[0] { - var key PrivateKey - err := key.SetBytes(bs) - if err != nil { - return err - } - shares = append(shares, key) - } - (*prvs).shares = shares - - sharesIndex := map[ID]int{} - for i := 0; i < len(dec[1]); i += 2 { - var key ID - err := key.SetLittleEndian(dec[1][i]) - if err != nil { - return err - } - - var value uint64 - err = rlp.DecodeBytes(dec[1][i+1], &value) - if err != nil { - return err - } - - sharesIndex[key] = int(value) - } - (*prvs).shareIndex = sharesIndex - - var mpks []bls.SecretKey - for _, bs := range dec[2] { - var key bls.SecretKey - if err := key.SetLittleEndian(bs); err != nil { - return err - } - mpks = append(mpks, key) - } - (*prvs).masterPrivateKey = mpks - - return nil -} - -type publicKeySharesCache struct { - share []PublicKey - index map[ID]int -} - -// PublicKeyShares represents a public key shares for DKG protocol. -type PublicKeyShares struct { - cache atomic.Value - lock sync.Mutex - masterPublicKey []bls.PublicKey -} - -// Equal checks equality of two PublicKeyShares instance. -func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool { - cache := pubs.cache.Load().(*publicKeySharesCache) - cacheOther := other.cache.Load().(*publicKeySharesCache) - // Check shares. - for dID, idx := range cache.index { - otherIdx, exists := cacheOther.index[dID] - if !exists { - continue - } - if !cache.share[idx].publicKey.IsEqual( - &cacheOther.share[otherIdx].publicKey) { - return false - } - } - // Check master public keys. - if len(pubs.masterPublicKey) != len(other.masterPublicKey) { - return false - } - for idx, m := range pubs.masterPublicKey { - if m.GetHexString() != other.masterPublicKey[idx].GetHexString() { - return false - } - } - return true -} - -// EncodeRLP implements rlp.Encoder -func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error { - mpks := make([][]byte, len(pubs.masterPublicKey)) - for i, m := range pubs.masterPublicKey { - mpks[i] = m.Serialize() - } - return rlp.Encode(w, mpks) -} - -// DecodeRLP implements rlp.Decoder -func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error { - var dec [][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - ps := NewEmptyPublicKeyShares() - for _, k := range dec { - var key bls.PublicKey - if err := key.Deserialize(k); err != nil { - return err - } - ps.masterPublicKey = append(ps.masterPublicKey, key) - } - - *pubs = *ps.Move() - return nil -} - -// MarshalJSON implements json.Marshaller. -func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) { - type Alias PublicKeyShares - data := &struct { - MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` - }{ - make([]*bls.PublicKey, len(pubs.masterPublicKey)), - } - for i := range pubs.masterPublicKey { - data.MasterPublicKeys[i] = &pubs.masterPublicKey[i] - } - return json.Marshal(data) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error { - type Alias PublicKeyShares - aux := &struct { - MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys)) - for i, pk := range aux.MasterPublicKeys { - mpk[i] = *pk - } - pubs.masterPublicKey = mpk - return nil -} - -// Clone clones every fields of PublicKeyShares. This method is mainly -// for testing purpose thus would panic when error. -func (pubs *PublicKeyShares) Clone() *PublicKeyShares { - b, err := rlp.EncodeToBytes(pubs) - if err != nil { - panic(err) - } - pubsCopy := NewEmptyPublicKeyShares() - if err := rlp.DecodeBytes(b, pubsCopy); err != nil { - panic(err) - } - return pubsCopy -} - -// NewID creates a ew ID structure. -func NewID(id []byte) ID { - var blsID bls.ID - // #nosec G104 - blsID.SetLittleEndian(id) - return blsID -} - -// BytesID creates a new ID structure, -// It returns err if the byte slice is not valid. -func BytesID(id []byte) (ID, error) { - var blsID bls.ID - // #nosec G104 - err := blsID.SetLittleEndian(id) - return blsID, err -} - -// NewPrivateKey creates a new PrivateKey structure. -func NewPrivateKey() *PrivateKey { - var key bls.SecretKey - key.SetByCSPRNG() - return &PrivateKey{ - privateKey: key, - publicKey: *newPublicKey(&key), - } -} - -// NewPrivateKeyShares creates a DKG private key shares of threshold t. -func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) { - var prv bls.SecretKey - prv.SetByCSPRNG() - msk := prv.GetMasterSecretKey(t) - mpk := bls.GetMasterPublicKey(msk) - pubShare := NewEmptyPublicKeyShares() - pubShare.masterPublicKey = mpk - return &PrivateKeyShares{ - masterPrivateKey: msk, - shareIndex: make(map[ID]int), - }, pubShare -} - -// NewEmptyPrivateKeyShares creates an empty private key shares. -func NewEmptyPrivateKeyShares() *PrivateKeyShares { - return &PrivateKeyShares{ - shareIndex: make(map[ID]int), - } -} - -// SetParticipants sets the DKG participants. -func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) { - prvs.shares = make([]PrivateKey, len(IDs)) - prvs.shareIndex = make(map[ID]int, len(IDs)) - for idx, ID := range IDs { - // #nosec G104 - prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID) - prvs.shareIndex[ID] = idx - } -} - -// AddShare adds a share. -func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error { - if idx, exist := prvs.shareIndex[ID]; exist { - if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) { - return ErrDuplicatedShare - } - return nil - } - prvs.shareIndex[ID] = len(prvs.shares) - prvs.shares = append(prvs.shares, *share) - return nil -} - -// RecoverPrivateKey recovers private key from the shares. -func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) ( - *PrivateKey, error) { - var prv PrivateKey - if len(qualifyIDs) == 0 { - return nil, ErrNoIDToRecover - } - for i, ID := range qualifyIDs { - idx, exist := prvs.shareIndex[ID] - if !exist { - return nil, ErrShareNotFound - } - if i == 0 { - prv.privateKey = prvs.shares[idx].privateKey - continue - } - prv.privateKey.Add(&prvs.shares[idx].privateKey) - } - return &prv, nil -} - -// RecoverPublicKey recovers public key from the shares. -func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) ( - *PublicKey, error) { - var pub PublicKey - if len(qualifyIDs) == 0 { - return nil, ErrNoIDToRecover - } - for i, ID := range qualifyIDs { - idx, exist := prvs.shareIndex[ID] - if !exist { - return nil, ErrShareNotFound - } - if i == 0 { - pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey() - continue - } - pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey()) - } - return &pub, nil -} - -// Share returns the share for the ID. -func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) { - idx, exist := prvs.shareIndex[ID] - if !exist { - return nil, false - } - return &prvs.shares[idx], true -} - -// NewEmptyPublicKeyShares creates an empty public key shares. -func NewEmptyPublicKeyShares() *PublicKeyShares { - pubShares := &PublicKeyShares{} - pubShares.cache.Store(&publicKeySharesCache{ - index: make(map[ID]int), - }) - return pubShares -} - -// Move will invalidate itself. Do not access to original reference. -func (pubs *PublicKeyShares) Move() *PublicKeyShares { - return pubs -} - -// Share returns the share for the ID. -func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) { - cache := pubs.cache.Load().(*publicKeySharesCache) - idx, exist := cache.index[ID] - if exist { - return &cache.share[idx], nil - } - var pk PublicKey - if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil { - return nil, err - } - if err := pubs.AddShare(ID, &pk); err != nil { - return nil, err - } - return &pk, nil -} - -// AddShare adds a share. -func (pubs *PublicKeyShares) AddShare(shareID ID, share *PublicKey) error { - cache := pubs.cache.Load().(*publicKeySharesCache) - if idx, exist := cache.index[shareID]; exist { - if !share.publicKey.IsEqual(&cache.share[idx].publicKey) { - return ErrDuplicatedShare - } - return nil - } - pubs.lock.Lock() - defer pubs.lock.Unlock() - cache = pubs.cache.Load().(*publicKeySharesCache) - newCache := &publicKeySharesCache{ - index: make(map[ID]int, len(cache.index)+1), - share: make([]PublicKey, len(cache.share), len(cache.share)+1), - } - for k, v := range cache.index { - newCache.index[k] = v - } - copy(newCache.share, cache.share) - newCache.index[shareID] = len(newCache.share) - newCache.share = append(newCache.share, *share) - pubs.cache.Store(newCache) - return nil -} - -// VerifyPrvShare verifies if the private key shares is valid. -func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) ( - bool, error) { - var pk bls.PublicKey - if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { - return false, err - } - return pk.IsEqual(share.privateKey.GetPublicKey()), nil -} - -// VerifyPubShare verifies if the public key shares is valid. -func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) ( - bool, error) { - var pk bls.PublicKey - if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { - return false, err - } - return pk.IsEqual(&share.publicKey), nil -} - -// RecoverPublicKey recovers private key from the shares. -func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) ( - *PublicKey, error) { - var pub PublicKey - if len(qualifyIDs) == 0 { - return nil, ErrNoIDToRecover - } - for i, ID := range qualifyIDs { - pk, err := pubs.Share(ID) - if err != nil { - return nil, err - } - if i == 0 { - pub.publicKey = pk.publicKey - continue - } - pub.publicKey.Add(&pk.publicKey) - } - return &pub, nil -} - -// MasterKeyBytes returns []byte representation of master public key. -func (pubs *PublicKeyShares) MasterKeyBytes() []byte { - bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength) - for _, pk := range pubs.masterPublicKey { - bytes = append(bytes, pk.Serialize()...) - } - return bytes -} - -// newPublicKey creates a new PublicKey structure. -func newPublicKey(prvKey *bls.SecretKey) *PublicKey { - return &PublicKey{ - publicKey: *prvKey.GetPublicKey(), - } -} - -// newPublicKeyFromBytes create a new PublicKey structure -// from bytes representation of bls.PublicKey -func newPublicKeyFromBytes(b []byte) (*PublicKey, error) { - var pub PublicKey - err := pub.publicKey.Deserialize(b) - return &pub, err -} - -// PublicKey returns the public key associate this private key. -func (prv *PrivateKey) PublicKey() crypto.PublicKey { - return prv.publicKey -} - -// Sign calculates a signature. -func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) { - msg := string(hash[:]) - sign := prv.privateKey.Sign(msg) - return crypto.Signature{ - Type: cryptoType, - Signature: sign.Serialize(), - }, nil -} - -// Bytes returns []byte representation of private key. -func (prv *PrivateKey) Bytes() []byte { - return prv.privateKey.GetLittleEndian() -} - -// SetBytes sets the private key data to []byte. -func (prv *PrivateKey) SetBytes(bytes []byte) error { - var key bls.SecretKey - if err := key.SetLittleEndian(bytes); err != nil { - return err - } - prv.privateKey = key - prv.publicKey = *newPublicKey(&prv.privateKey) - return nil -} - -// String returns string representation of privat key. -func (prv *PrivateKey) String() string { - return prv.privateKey.GetHexString() -} - -// VerifySignature checks that the given public key created signature over hash. -func (pub PublicKey) VerifySignature( - hash common.Hash, signature crypto.Signature) bool { - if len(signature.Signature) == 0 { - return false - } - var sig bls.Sign - if err := sig.Deserialize(signature.Signature[:]); err != nil { - fmt.Println(err) - return false - } - msg := string(hash[:]) - return sig.Verify(&pub.publicKey, msg) -} - -// Bytes returns []byte representation of public key. -func (pub PublicKey) Bytes() []byte { - return pub.publicKey.Serialize() -} - -// Serialize return bytes representation of public key. -func (pub *PublicKey) Serialize() []byte { - return pub.publicKey.Serialize() -} - -// Deserialize parses bytes representation of public key. -func (pub *PublicKey) Deserialize(b []byte) error { - return pub.publicKey.Deserialize(b) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/utils.go deleted file mode 100644 index 9e470f0cf..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/utils.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package dkg - -import ( - "encoding/binary" - "fmt" - "math/rand" - - "github.com/dexon-foundation/bls/ffi/go/bls" - - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -// PartialSignature is a partial signature in DKG+TSIG protocol. -type PartialSignature crypto.Signature - -var ( - // ErrEmptySignature is reported if the signature is empty. - ErrEmptySignature = fmt.Errorf("invalid empty signature") -) - -// RecoverSignature recovers TSIG signature. -func RecoverSignature(sigs []PartialSignature, signerIDs IDs) ( - crypto.Signature, error) { - blsSigs := make([]bls.Sign, len(sigs)) - for i, sig := range sigs { - if len(sig.Signature) == 0 { - return crypto.Signature{}, ErrEmptySignature - } - if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil { - return crypto.Signature{}, err - } - } - var recoverSig bls.Sign - if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil { - return crypto.Signature{}, err - } - return crypto.Signature{ - Type: cryptoType, - Signature: recoverSig.Serialize()}, nil -} - -// RecoverGroupPublicKey recovers group public key. -func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey { - var pub *PublicKey - for _, pubShare := range pubShares { - pk0 := pubShare.masterPublicKey[0] - if pub == nil { - pub = &PublicKey{ - publicKey: pk0, - } - } else { - pub.publicKey.Add(&pk0) - } - } - return pub -} - -// NewRandomPrivateKeyShares constructs a private key shares randomly. -func NewRandomPrivateKeyShares() *PrivateKeyShares { - // Generate IDs. - rndIDs := make(IDs, 0, 10) - for i := range rndIDs { - id := make([]byte, 8) - binary.LittleEndian.PutUint64(id, rand.Uint64()) - rndIDs[i] = NewID(id) - } - prvShares := NewEmptyPrivateKeyShares() - prvShares.SetParticipants(rndIDs) - for _, id := range rndIDs { - if err := prvShares.AddShare(id, NewPrivateKey()); err != nil { - panic(err) - } - } - return prvShares -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa/ecdsa.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa/ecdsa.go deleted file mode 100644 index 9da5f4fb5..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa/ecdsa.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package ecdsa - -import ( - "crypto/ecdsa" - - dexCrypto "github.com/dexon-foundation/dexon/crypto" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -const cryptoType = "ecdsa" - -func init() { - if err := crypto.RegisterSigToPub(cryptoType, SigToPub); err != nil { - panic(err) - } -} - -// PrivateKey represents a private key structure used in geth and implments -// Crypto.PrivateKey interface. -type PrivateKey struct { - privateKey *ecdsa.PrivateKey -} - -// PublicKey represents a public key structure used in geth and implements -// Crypto.PublicKey interface. -type PublicKey struct { - publicKey *ecdsa.PublicKey -} - -// NewPrivateKey creates a new PrivateKey structure. -func NewPrivateKey() (*PrivateKey, error) { - key, err := dexCrypto.GenerateKey() - if err != nil { - return nil, err - } - return &PrivateKey{privateKey: key}, nil -} - -// NewPrivateKeyFromECDSA creates a new PrivateKey structure from -// ecdsa.PrivateKey. -func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey { - return &PrivateKey{privateKey: key} -} - -// NewPublicKeyFromECDSA creates a new PublicKey structure from -// ecdsa.PublicKey. -func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey { - return &PublicKey{publicKey: key} -} - -// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from -// a byte slice. -func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) { - pub, err := dexCrypto.UnmarshalPubkey(b) - if err != nil { - return &PublicKey{}, err - } - return &PublicKey{publicKey: pub}, nil -} - -// PublicKey returns the public key associate this private key. -func (prv *PrivateKey) PublicKey() crypto.PublicKey { - return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey)) -} - -// Sign calculates an ECDSA signature. -// -// This function is susceptible to chosen plaintext attacks that can leak -// information about the private key that is used for signing. Callers must -// be aware that the given hash cannot be chosen by an adversery. Common -// solution is to hash any input before calculating the signature. -// -// The produced signature is in the [R || S || V] format where V is 0 or 1. -func (prv *PrivateKey) Sign(hash common.Hash) ( - sig crypto.Signature, err error) { - s, err := dexCrypto.Sign(hash[:], prv.privateKey) - sig = crypto.Signature{ - Type: cryptoType, - Signature: s, - } - return -} - -// VerifySignature checks that the given public key created signature over hash. -// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) -// format. -// The signature should have the 64 byte [R || S] format. -func (pub *PublicKey) VerifySignature( - hash common.Hash, signature crypto.Signature) bool { - sig := signature.Signature - if len(sig) == 65 { - // The last byte is for ecrecover. - sig = sig[:64] - } - return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig) -} - -// Compress encodes a public key to the 33-byte compressed format. -func (pub *PublicKey) Compress() []byte { - return dexCrypto.CompressPubkey(pub.publicKey) -} - -// Bytes returns the []byte representation of uncompressed public key. (65 bytes) -func (pub *PublicKey) Bytes() []byte { - return dexCrypto.FromECDSAPub(pub.publicKey) -} - -// SigToPub returns the PublicKey that created the given signature. -func SigToPub( - hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) { - key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:]) - if err != nil { - return &PublicKey{}, err - } - return &PublicKey{publicKey: key}, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/interfaces.go deleted file mode 100644 index f3e01e42c..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/interfaces.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package crypto - -import ( - "github.com/dexon-foundation/dexon-consensus/common" -) - -// Signature is the basic signature type in DEXON. -type Signature struct { - Type string - Signature []byte -} - -// PrivateKey describes the asymmetric cryptography interface that interacts -// with the private key. -type PrivateKey interface { - // PublicKey returns the public key associate this private key. - PublicKey() PublicKey - - // Sign calculates a signature. - Sign(hash common.Hash) (Signature, error) -} - -// PublicKey describes the asymmetric cryptography interface that interacts -// with the public key. -type PublicKey interface { - // VerifySignature checks that the given public key created signature over hash. - VerifySignature(hash common.Hash, signature Signature) bool - - // Bytes returns the []byte representation of public key. - Bytes() []byte -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/utils.go deleted file mode 100644 index 59e91f5a5..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/utils.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package crypto - -import ( - "encoding/hex" - "fmt" - - "github.com/dexon-foundation/dexon/crypto" - - "github.com/dexon-foundation/dexon-consensus/common" -) - -var ( - // ErrSigToPubTypeNotFound is reported if the type is already used. - ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found") - - // ErrSigToPubTypeAlreadyExist is reported if the type is already used. - ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist") -) - -// SigToPubFn is a function to recover public key from signature. -type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error) - -var sigToPubCB map[string]SigToPubFn - -func init() { - sigToPubCB = make(map[string]SigToPubFn) -} - -// Keccak256Hash calculates and returns the Keccak256 hash of the input data, -// converting it to an internal Hash data structure. -func Keccak256Hash(data ...[]byte) (h common.Hash) { - return common.Hash(crypto.Keccak256Hash(data...)) -} - -// Clone returns a deep copy of a signature. -func (sig Signature) Clone() Signature { - return Signature{ - Type: sig.Type, - Signature: sig.Signature[:], - } -} - -func (sig Signature) String() string { - return hex.EncodeToString([]byte(sig.Signature[:])) -} - -// RegisterSigToPub registers a sigToPub function of type. -func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error { - if _, exist := sigToPubCB[sigType]; exist { - return ErrSigToPubTypeAlreadyExist - } - sigToPubCB[sigType] = sigToPub - return nil -} - -// SigToPub recovers public key from signature. -func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) { - sigToPub, exist := sigToPubCB[signature.Type] - if !exist { - return nil, ErrSigToPubTypeNotFound - } - return sigToPub(hash, signature) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go deleted file mode 100644 index a571a8021..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package db - -import ( - "errors" - "fmt" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -var ( - // ErrBlockExists is the error when block eixsts. - ErrBlockExists = errors.New("block exists") - // ErrBlockDoesNotExist is the error when block does not eixst. - ErrBlockDoesNotExist = errors.New("block does not exist") - // ErrIterationFinished is the error to check if the iteration is finished. - ErrIterationFinished = errors.New("iteration finished") - // ErrEmptyPath is the error when the required path is empty. - ErrEmptyPath = fmt.Errorf("empty path") - // ErrClosed is the error when using DB after it's closed. - ErrClosed = fmt.Errorf("db closed") - // ErrNotImplemented is the error that some interface is not implemented. - ErrNotImplemented = fmt.Errorf("not implemented") - // ErrInvalidCompactionChainTipHeight means the newly updated height of - // the tip of compaction chain is invalid, usually means it's smaller than - // current cached one. - ErrInvalidCompactionChainTipHeight = fmt.Errorf( - "invalid compaction chain tip height") - // ErrDKGPrivateKeyExists raised when attempting to save DKG private key - // that already saved. - ErrDKGPrivateKeyExists = errors.New("dkg private key exists") - // ErrDKGPrivateKeyDoesNotExist raised when the DKG private key of the - // requested round does not exists. - ErrDKGPrivateKeyDoesNotExist = errors.New("dkg private key does not exists") - // ErrDKGProtocolExists raised when attempting to save DKG protocol - // that already saved. - ErrDKGProtocolExists = errors.New("dkg protocol exists") - // ErrDKGProtocolDoesNotExist raised when the DKG protocol of the - // requested round does not exists. - ErrDKGProtocolDoesNotExist = errors.New("dkg protocol does not exists") -) - -// Database is the interface for a Database. -type Database interface { - Reader - Writer - - // Close allows database implementation able to - // release resource when finishing. - Close() error -} - -// Reader defines the interface for reading blocks into DB. -type Reader interface { - HasBlock(hash common.Hash) bool - GetBlock(hash common.Hash) (types.Block, error) - GetAllBlocks() (BlockIterator, error) - - // GetCompactionChainTipInfo returns the block hash and finalization height - // of the tip block of compaction chain. Empty hash and zero height means - // the compaction chain is empty. - GetCompactionChainTipInfo() (common.Hash, uint64) - - // DKG Private Key related methods. - GetDKGPrivateKey(round, reset uint64) (dkg.PrivateKey, error) - GetDKGProtocol() (dkgProtocol DKGProtocolInfo, err error) -} - -// Writer defines the interface for writing blocks into DB. -type Writer interface { - UpdateBlock(block types.Block) error - PutBlock(block types.Block) error - PutCompactionChainTipInfo(common.Hash, uint64) error - PutDKGPrivateKey(round, reset uint64, pk dkg.PrivateKey) error - PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error -} - -// BlockIterator defines an iterator on blocks hold -// in a DB. -type BlockIterator interface { - NextBlock() (types.Block, error) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go deleted file mode 100644 index da8bc0bc1..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package db - -import ( - "encoding/binary" - "io" - - "github.com/syndtr/goleveldb/leveldb" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon/rlp" -) - -var ( - blockKeyPrefix = []byte("b-") - compactionChainTipInfoKey = []byte("cc-tip") - dkgPrivateKeyKeyPrefix = []byte("dkg-prvs") - dkgProtocolInfoKeyPrefix = []byte("dkg-protocol-info") -) - -type compactionChainTipInfo struct { - Height uint64 `json:"height"` - Hash common.Hash `json:"hash"` -} - -// DKGProtocolInfo DKG protocol info. -type DKGProtocolInfo struct { - ID types.NodeID - Round uint64 - Threshold uint64 - IDMap NodeIDToDKGID - MpkMap NodeIDToPubShares - MasterPrivateShare dkg.PrivateKeyShares - IsMasterPrivateShareEmpty bool - PrvShares dkg.PrivateKeyShares - IsPrvSharesEmpty bool - PrvSharesReceived NodeID - NodeComplained NodeID - AntiComplaintReceived NodeIDToNodeIDs - Step uint64 - Reset uint64 -} - -type dkgPrivateKey struct { - PK dkg.PrivateKey - Reset uint64 -} - -// Equal compare with target DKGProtocolInfo. -func (info *DKGProtocolInfo) Equal(target *DKGProtocolInfo) bool { - if !info.ID.Equal(target.ID) || - info.Round != target.Round || - info.Threshold != target.Threshold || - info.IsMasterPrivateShareEmpty != target.IsMasterPrivateShareEmpty || - info.IsPrvSharesEmpty != target.IsPrvSharesEmpty || - info.Step != target.Step || - info.Reset != target.Reset || - !info.MasterPrivateShare.Equal(&target.MasterPrivateShare) || - !info.PrvShares.Equal(&target.PrvShares) { - return false - } - - if len(info.IDMap) != len(target.IDMap) { - return false - } - for k, v := range info.IDMap { - tV, exist := target.IDMap[k] - if !exist { - return false - } - - if !v.IsEqual(&tV) { - return false - } - } - - if len(info.MpkMap) != len(target.MpkMap) { - return false - } - for k, v := range info.MpkMap { - tV, exist := target.MpkMap[k] - if !exist { - return false - } - - if !v.Equal(tV) { - return false - } - } - - if len(info.PrvSharesReceived) != len(target.PrvSharesReceived) { - return false - } - for k := range info.PrvSharesReceived { - _, exist := target.PrvSharesReceived[k] - if !exist { - return false - } - } - - if len(info.NodeComplained) != len(target.NodeComplained) { - return false - } - for k := range info.NodeComplained { - _, exist := target.NodeComplained[k] - if !exist { - return false - } - } - - if len(info.AntiComplaintReceived) != len(target.AntiComplaintReceived) { - return false - } - for k, v := range info.AntiComplaintReceived { - tV, exist := target.AntiComplaintReceived[k] - if !exist { - return false - } - - if len(v) != len(tV) { - return false - } - for kk := range v { - _, exist := tV[kk] - if !exist { - return false - } - } - } - - return true -} - -// NodeIDToNodeIDs the map with NodeID to NodeIDs. -type NodeIDToNodeIDs map[types.NodeID]map[types.NodeID]struct{} - -// EncodeRLP implements rlp.Encoder -func (m NodeIDToNodeIDs) EncodeRLP(w io.Writer) error { - var allBytes [][][]byte - for k, v := range m { - kBytes, err := k.MarshalText() - if err != nil { - return err - } - allBytes = append(allBytes, [][]byte{kBytes}) - - var vBytes [][]byte - for subK := range v { - bytes, err := subK.MarshalText() - if err != nil { - return err - } - vBytes = append(vBytes, bytes) - } - allBytes = append(allBytes, vBytes) - } - - return rlp.Encode(w, allBytes) -} - -// DecodeRLP implements rlp.Encoder -func (m *NodeIDToNodeIDs) DecodeRLP(s *rlp.Stream) error { - *m = make(NodeIDToNodeIDs) - var dec [][][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - for i := 0; i < len(dec); i += 2 { - key := types.NodeID{} - err := key.UnmarshalText(dec[i][0]) - if err != nil { - return err - } - - valueMap := map[types.NodeID]struct{}{} - for _, v := range dec[i+1] { - value := types.NodeID{} - err := value.UnmarshalText(v) - if err != nil { - return err - } - - valueMap[value] = struct{}{} - } - - (*m)[key] = valueMap - } - - return nil -} - -// NodeID the map with NodeID. -type NodeID map[types.NodeID]struct{} - -// EncodeRLP implements rlp.Encoder -func (m NodeID) EncodeRLP(w io.Writer) error { - var allBytes [][]byte - for k := range m { - kBytes, err := k.MarshalText() - if err != nil { - return err - } - allBytes = append(allBytes, kBytes) - } - - return rlp.Encode(w, allBytes) -} - -// DecodeRLP implements rlp.Encoder -func (m *NodeID) DecodeRLP(s *rlp.Stream) error { - *m = make(NodeID) - var dec [][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - for i := 0; i < len(dec); i++ { - key := types.NodeID{} - err := key.UnmarshalText(dec[i]) - if err != nil { - return err - } - - (*m)[key] = struct{}{} - } - - return nil -} - -// NodeIDToPubShares the map with NodeID to PublicKeyShares. -type NodeIDToPubShares map[types.NodeID]*dkg.PublicKeyShares - -// EncodeRLP implements rlp.Encoder -func (m NodeIDToPubShares) EncodeRLP(w io.Writer) error { - var allBytes [][]byte - for k, v := range m { - kBytes, err := k.MarshalText() - if err != nil { - return err - } - allBytes = append(allBytes, kBytes) - - bytes, err := rlp.EncodeToBytes(v) - if err != nil { - return err - } - allBytes = append(allBytes, bytes) - } - - return rlp.Encode(w, allBytes) -} - -// DecodeRLP implements rlp.Encoder -func (m *NodeIDToPubShares) DecodeRLP(s *rlp.Stream) error { - *m = make(NodeIDToPubShares) - var dec [][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - for i := 0; i < len(dec); i += 2 { - key := types.NodeID{} - err := key.UnmarshalText(dec[i]) - if err != nil { - return err - } - - value := dkg.PublicKeyShares{} - err = rlp.DecodeBytes(dec[i+1], &value) - if err != nil { - return err - } - - (*m)[key] = &value - } - - return nil -} - -// NodeIDToDKGID the map with NodeID to DKGID. -type NodeIDToDKGID map[types.NodeID]dkg.ID - -// EncodeRLP implements rlp.Encoder -func (m NodeIDToDKGID) EncodeRLP(w io.Writer) error { - var allBytes [][]byte - for k, v := range m { - kBytes, err := k.MarshalText() - if err != nil { - return err - } - allBytes = append(allBytes, kBytes) - allBytes = append(allBytes, v.GetLittleEndian()) - } - - return rlp.Encode(w, allBytes) -} - -// DecodeRLP implements rlp.Encoder -func (m *NodeIDToDKGID) DecodeRLP(s *rlp.Stream) error { - *m = make(NodeIDToDKGID) - var dec [][]byte - if err := s.Decode(&dec); err != nil { - return err - } - - for i := 0; i < len(dec); i += 2 { - key := types.NodeID{} - err := key.UnmarshalText(dec[i]) - if err != nil { - return err - } - - value := dkg.ID{} - err = value.SetLittleEndian(dec[i+1]) - if err != nil { - return err - } - - (*m)[key] = value - } - - return nil -} - -// LevelDBBackedDB is a leveldb backed DB implementation. -type LevelDBBackedDB struct { - db *leveldb.DB -} - -// NewLevelDBBackedDB initialize a leveldb-backed database. -func NewLevelDBBackedDB( - path string) (lvl *LevelDBBackedDB, err error) { - - dbInst, err := leveldb.OpenFile(path, nil) - if err != nil { - return - } - lvl = &LevelDBBackedDB{db: dbInst} - return -} - -// Close implement Closer interface, which would release allocated resource. -func (lvl *LevelDBBackedDB) Close() error { - return lvl.db.Close() -} - -// HasBlock implements the Reader.Has method. -func (lvl *LevelDBBackedDB) HasBlock(hash common.Hash) bool { - exists, err := lvl.internalHasBlock(lvl.getBlockKey(hash)) - if err != nil { - panic(err) - } - return exists -} - -func (lvl *LevelDBBackedDB) internalHasBlock(key []byte) (bool, error) { - return lvl.db.Has(key, nil) -} - -// GetBlock implements the Reader.GetBlock method. -func (lvl *LevelDBBackedDB) GetBlock( - hash common.Hash) (block types.Block, err error) { - queried, err := lvl.db.Get(lvl.getBlockKey(hash), nil) - if err != nil { - if err == leveldb.ErrNotFound { - err = ErrBlockDoesNotExist - } - return - } - err = rlp.DecodeBytes(queried, &block) - return -} - -// UpdateBlock implements the Writer.UpdateBlock method. -func (lvl *LevelDBBackedDB) UpdateBlock(block types.Block) (err error) { - // NOTE: we didn't handle changes of block hash (and it - // should not happen). - marshaled, err := rlp.EncodeToBytes(&block) - if err != nil { - return - } - blockKey := lvl.getBlockKey(block.Hash) - exists, err := lvl.internalHasBlock(blockKey) - if err != nil { - return - } - if !exists { - err = ErrBlockDoesNotExist - return - } - err = lvl.db.Put(blockKey, marshaled, nil) - return -} - -// PutBlock implements the Writer.PutBlock method. -func (lvl *LevelDBBackedDB) PutBlock(block types.Block) (err error) { - marshaled, err := rlp.EncodeToBytes(&block) - if err != nil { - return - } - blockKey := lvl.getBlockKey(block.Hash) - exists, err := lvl.internalHasBlock(blockKey) - if err != nil { - return - } - if exists { - err = ErrBlockExists - return - } - err = lvl.db.Put(blockKey, marshaled, nil) - return -} - -// GetAllBlocks implements Reader.GetAllBlocks method, which allows callers -// to retrieve all blocks in DB. -func (lvl *LevelDBBackedDB) GetAllBlocks() (BlockIterator, error) { - return nil, ErrNotImplemented -} - -// PutCompactionChainTipInfo saves tip of compaction chain into the database. -func (lvl *LevelDBBackedDB) PutCompactionChainTipInfo( - blockHash common.Hash, height uint64) error { - marshaled, err := rlp.EncodeToBytes(&compactionChainTipInfo{ - Hash: blockHash, - Height: height, - }) - if err != nil { - return err - } - // Check current cached tip info to make sure the one to be updated is - // valid. - info, err := lvl.internalGetCompactionChainTipInfo() - if err != nil { - return err - } - if info.Height+1 != height { - return ErrInvalidCompactionChainTipHeight - } - return lvl.db.Put(compactionChainTipInfoKey, marshaled, nil) -} - -func (lvl *LevelDBBackedDB) internalGetCompactionChainTipInfo() ( - info compactionChainTipInfo, err error) { - queried, err := lvl.db.Get(compactionChainTipInfoKey, nil) - if err != nil { - if err == leveldb.ErrNotFound { - err = nil - } - return - } - err = rlp.DecodeBytes(queried, &info) - return -} - -// GetCompactionChainTipInfo get the tip info of compaction chain into the -// database. -func (lvl *LevelDBBackedDB) GetCompactionChainTipInfo() ( - hash common.Hash, height uint64) { - info, err := lvl.internalGetCompactionChainTipInfo() - if err != nil { - panic(err) - } - hash, height = info.Hash, info.Height - return -} - -// GetDKGPrivateKey get DKG private key of one round. -func (lvl *LevelDBBackedDB) GetDKGPrivateKey(round, reset uint64) ( - prv dkg.PrivateKey, err error) { - queried, err := lvl.db.Get(lvl.getDKGPrivateKeyKey(round), nil) - if err != nil { - if err == leveldb.ErrNotFound { - err = ErrDKGPrivateKeyDoesNotExist - } - return - } - pk := dkgPrivateKey{} - err = rlp.DecodeBytes(queried, &pk) - if pk.Reset != reset { - err = ErrDKGPrivateKeyDoesNotExist - return - } - prv = pk.PK - return -} - -// PutDKGPrivateKey save DKG private key of one round. -func (lvl *LevelDBBackedDB) PutDKGPrivateKey( - round, reset uint64, prv dkg.PrivateKey) error { - // Check existence. - _, err := lvl.GetDKGPrivateKey(round, reset) - if err == nil { - return ErrDKGPrivateKeyExists - } - if err != ErrDKGPrivateKeyDoesNotExist { - return err - } - pk := &dkgPrivateKey{ - PK: prv, - Reset: reset, - } - marshaled, err := rlp.EncodeToBytes(&pk) - if err != nil { - return err - } - return lvl.db.Put( - lvl.getDKGPrivateKeyKey(round), marshaled, nil) -} - -// GetDKGProtocol get DKG protocol. -func (lvl *LevelDBBackedDB) GetDKGProtocol() ( - info DKGProtocolInfo, err error) { - queried, err := lvl.db.Get(lvl.getDKGProtocolInfoKey(), nil) - if err != nil { - if err == leveldb.ErrNotFound { - err = ErrDKGProtocolDoesNotExist - } - return - } - - err = rlp.DecodeBytes(queried, &info) - return -} - -// PutOrUpdateDKGProtocol save DKG protocol. -func (lvl *LevelDBBackedDB) PutOrUpdateDKGProtocol(info DKGProtocolInfo) error { - marshaled, err := rlp.EncodeToBytes(&info) - if err != nil { - return err - } - return lvl.db.Put(lvl.getDKGProtocolInfoKey(), marshaled, nil) -} - -func (lvl *LevelDBBackedDB) getBlockKey(hash common.Hash) (ret []byte) { - ret = make([]byte, len(blockKeyPrefix)+len(hash[:])) - copy(ret, blockKeyPrefix) - copy(ret[len(blockKeyPrefix):], hash[:]) - return -} - -func (lvl *LevelDBBackedDB) getDKGPrivateKeyKey( - round uint64) (ret []byte) { - ret = make([]byte, len(dkgPrivateKeyKeyPrefix)+8) - copy(ret, dkgPrivateKeyKeyPrefix) - binary.LittleEndian.PutUint64( - ret[len(dkgPrivateKeyKeyPrefix):], round) - return -} - -func (lvl *LevelDBBackedDB) getDKGProtocolInfoKey() (ret []byte) { - ret = make([]byte, len(dkgProtocolInfoKeyPrefix)+8) - copy(ret, dkgProtocolInfoKeyPrefix) - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go deleted file mode 100644 index 6555de855..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package db - -import ( - "encoding/json" - "io/ioutil" - "os" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -type blockSeqIterator struct { - idx int - db *MemBackedDB -} - -// NextBlock implemenets BlockIterator.NextBlock method. -func (seq *blockSeqIterator) NextBlock() (types.Block, error) { - curIdx := seq.idx - seq.idx++ - return seq.db.getBlockByIndex(curIdx) -} - -// MemBackedDB is a memory backed DB implementation. -type MemBackedDB struct { - blocksLock sync.RWMutex - blockHashSequence common.Hashes - blocksByHash map[common.Hash]*types.Block - compactionChainTipLock sync.RWMutex - compactionChainTipHash common.Hash - compactionChainTipHeight uint64 - dkgPrivateKeysLock sync.RWMutex - dkgPrivateKeys map[uint64]*dkgPrivateKey - dkgProtocolLock sync.RWMutex - dkgProtocolInfo *DKGProtocolInfo - persistantFilePath string -} - -// NewMemBackedDB initialize a memory-backed database. -func NewMemBackedDB(persistantFilePath ...string) ( - dbInst *MemBackedDB, err error) { - dbInst = &MemBackedDB{ - blockHashSequence: common.Hashes{}, - blocksByHash: make(map[common.Hash]*types.Block), - dkgPrivateKeys: make(map[uint64]*dkgPrivateKey), - } - if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 { - return - } - dbInst.persistantFilePath = persistantFilePath[0] - buf, err := ioutil.ReadFile(dbInst.persistantFilePath) - if err != nil { - if !os.IsNotExist(err) { - // Something unexpected happened. - return - } - // It's expected behavior that file doesn't exists, we should not - // report error on it. - err = nil - return - } - - // Init this instance by file content, it's a temporary way - // to export those private field for JSON encoding. - toLoad := struct { - Sequence common.Hashes - ByHash map[common.Hash]*types.Block - }{} - err = json.Unmarshal(buf, &toLoad) - if err != nil { - return - } - dbInst.blockHashSequence = toLoad.Sequence - dbInst.blocksByHash = toLoad.ByHash - return -} - -// HasBlock returns wheter or not the DB has a block identified with the hash. -func (m *MemBackedDB) HasBlock(hash common.Hash) bool { - m.blocksLock.RLock() - defer m.blocksLock.RUnlock() - - _, ok := m.blocksByHash[hash] - return ok -} - -// GetBlock returns a block given a hash. -func (m *MemBackedDB) GetBlock(hash common.Hash) (types.Block, error) { - m.blocksLock.RLock() - defer m.blocksLock.RUnlock() - - return m.internalGetBlock(hash) -} - -func (m *MemBackedDB) internalGetBlock(hash common.Hash) (types.Block, error) { - b, ok := m.blocksByHash[hash] - if !ok { - return types.Block{}, ErrBlockDoesNotExist - } - return *b, nil -} - -// PutBlock inserts a new block into the database. -func (m *MemBackedDB) PutBlock(block types.Block) error { - if m.HasBlock(block.Hash) { - return ErrBlockExists - } - - m.blocksLock.Lock() - defer m.blocksLock.Unlock() - - m.blockHashSequence = append(m.blockHashSequence, block.Hash) - m.blocksByHash[block.Hash] = &block - return nil -} - -// UpdateBlock updates a block in the database. -func (m *MemBackedDB) UpdateBlock(block types.Block) error { - if !m.HasBlock(block.Hash) { - return ErrBlockDoesNotExist - } - - m.blocksLock.Lock() - defer m.blocksLock.Unlock() - - m.blocksByHash[block.Hash] = &block - return nil -} - -// PutCompactionChainTipInfo saves tip of compaction chain into the database. -func (m *MemBackedDB) PutCompactionChainTipInfo( - blockHash common.Hash, height uint64) error { - m.compactionChainTipLock.Lock() - defer m.compactionChainTipLock.Unlock() - if m.compactionChainTipHeight+1 != height { - return ErrInvalidCompactionChainTipHeight - } - m.compactionChainTipHeight = height - m.compactionChainTipHash = blockHash - return nil -} - -// GetCompactionChainTipInfo get the tip info of compaction chain into the -// database. -func (m *MemBackedDB) GetCompactionChainTipInfo() ( - hash common.Hash, height uint64) { - m.compactionChainTipLock.RLock() - defer m.compactionChainTipLock.RUnlock() - return m.compactionChainTipHash, m.compactionChainTipHeight -} - -// GetDKGPrivateKey get DKG private key of one round. -func (m *MemBackedDB) GetDKGPrivateKey(round, reset uint64) ( - dkg.PrivateKey, error) { - m.dkgPrivateKeysLock.RLock() - defer m.dkgPrivateKeysLock.RUnlock() - if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { - return prv.PK, nil - } - return dkg.PrivateKey{}, ErrDKGPrivateKeyDoesNotExist -} - -// PutDKGPrivateKey save DKG private key of one round. -func (m *MemBackedDB) PutDKGPrivateKey( - round, reset uint64, prv dkg.PrivateKey) error { - m.dkgPrivateKeysLock.Lock() - defer m.dkgPrivateKeysLock.Unlock() - if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { - return ErrDKGPrivateKeyExists - } - m.dkgPrivateKeys[round] = &dkgPrivateKey{ - PK: prv, - Reset: reset, - } - return nil -} - -// GetDKGProtocol get DKG protocol. -func (m *MemBackedDB) GetDKGProtocol() ( - DKGProtocolInfo, error) { - m.dkgProtocolLock.RLock() - defer m.dkgProtocolLock.RUnlock() - if m.dkgProtocolInfo == nil { - return DKGProtocolInfo{}, ErrDKGProtocolDoesNotExist - } - - return *m.dkgProtocolInfo, nil -} - -// PutOrUpdateDKGProtocol save DKG protocol. -func (m *MemBackedDB) PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error { - m.dkgProtocolLock.Lock() - defer m.dkgProtocolLock.Unlock() - m.dkgProtocolInfo = &dkgProtocol - return nil -} - -// Close implement Closer interface, which would release allocated resource. -func (m *MemBackedDB) Close() (err error) { - // Save internal state to a pretty-print json file. It's a temporary way - // to dump private file via JSON encoding. - if len(m.persistantFilePath) == 0 { - return - } - - m.blocksLock.RLock() - defer m.blocksLock.RUnlock() - - toDump := struct { - Sequence common.Hashes - ByHash map[common.Hash]*types.Block - }{ - Sequence: m.blockHashSequence, - ByHash: m.blocksByHash, - } - - // Dump to JSON with 2-space indent. - buf, err := json.Marshal(&toDump) - if err != nil { - return - } - - err = ioutil.WriteFile(m.persistantFilePath, buf, 0644) - return -} - -func (m *MemBackedDB) getBlockByIndex(idx int) (types.Block, error) { - m.blocksLock.RLock() - defer m.blocksLock.RUnlock() - - if idx >= len(m.blockHashSequence) { - return types.Block{}, ErrIterationFinished - } - - hash := m.blockHashSequence[idx] - return m.internalGetBlock(hash) -} - -// GetAllBlocks implement Reader.GetAllBlocks method, which allows caller -// to retrieve all blocks in DB. -func (m *MemBackedDB) GetAllBlocks() (BlockIterator, error) { - return &blockSeqIterator{db: m}, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go deleted file mode 100644 index ce5c89c47..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "fmt" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/db" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors for dkg module. -var ( - ErrNotDKGParticipant = fmt.Errorf( - "not a DKG participant") - ErrNotQualifyDKGParticipant = fmt.Errorf( - "not a qualified DKG participant") - ErrIDShareNotFound = fmt.Errorf( - "private share not found for specific ID") - ErrIncorrectPrivateShareSignature = fmt.Errorf( - "incorrect private share signature") - ErrMismatchPartialSignatureHash = fmt.Errorf( - "mismatch partialSignature hash") - ErrIncorrectPartialSignatureSignature = fmt.Errorf( - "incorrect partialSignature signature") - ErrIncorrectPartialSignature = fmt.Errorf( - "incorrect partialSignature") - ErrNotEnoughtPartialSignatures = fmt.Errorf( - "not enough of partial signatures") - ErrRoundAlreadyPurged = fmt.Errorf( - "cache of round already been purged") - ErrTSigNotReady = fmt.Errorf( - "tsig not ready") - ErrSelfMPKNotRegister = fmt.Errorf( - "self mpk not registered") - ErrUnableGetSelfPrvShare = fmt.Errorf( - "unable to get self DKG PrivateShare") - ErrSelfPrvShareMismatch = fmt.Errorf( - "self privateShare does not match mpk registered") -) - -// ErrUnexpectedDKGResetCount represents receiving a DKG message with unexpected -// DKG reset count. -type ErrUnexpectedDKGResetCount struct { - expect, actual uint64 - proposerID types.NodeID -} - -func (e ErrUnexpectedDKGResetCount) Error() string { - return fmt.Sprintf( - "unexpected DKG reset count, from:%s expect:%d actual:%d", - e.proposerID.String()[:6], e.expect, e.actual) -} - -// ErrUnexpectedRound represents receiving a DKG message with unexpected round. -type ErrUnexpectedRound struct { - expect, actual uint64 - proposerID types.NodeID -} - -func (e ErrUnexpectedRound) Error() string { - return fmt.Sprintf("unexpected round, from:%s expect:%d actual:%d", - e.proposerID.String()[:6], e.expect, e.actual) -} - -type dkgReceiver interface { - // ProposeDKGComplaint proposes a DKGComplaint. - ProposeDKGComplaint(complaint *typesDKG.Complaint) - - // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. - ProposeDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) - - // ProposeDKGPrivateShare propose a DKGPrivateShare. - ProposeDKGPrivateShare(prv *typesDKG.PrivateShare) - - // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. - ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare) - - // ProposeDKGMPKReady propose a DKGMPKReady message. - ProposeDKGMPKReady(ready *typesDKG.MPKReady) - - // ProposeDKGFinalize propose a DKGFinalize message. - ProposeDKGFinalize(final *typesDKG.Finalize) - - // ProposeDKGSuccess propose a DKGSuccess message. - ProposeDKGSuccess(final *typesDKG.Success) -} - -type dkgProtocol struct { - ID types.NodeID - recv dkgReceiver - round uint64 - reset uint64 - threshold int - idMap map[types.NodeID]dkg.ID - mpkMap map[types.NodeID]*dkg.PublicKeyShares - masterPrivateShare *dkg.PrivateKeyShares - prvShares *dkg.PrivateKeyShares - prvSharesReceived map[types.NodeID]struct{} - nodeComplained map[types.NodeID]struct{} - // Complaint[from][to]'s anti is saved to antiComplaint[from][to]. - antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{} - // The completed step in `runDKG`. - step int -} - -func (d *dkgProtocol) convertFromInfo(info db.DKGProtocolInfo) { - d.ID = info.ID - d.idMap = info.IDMap - d.round = info.Round - d.threshold = int(info.Threshold) - d.idMap = info.IDMap - d.mpkMap = info.MpkMap - d.prvSharesReceived = info.PrvSharesReceived - d.nodeComplained = info.NodeComplained - d.antiComplaintReceived = info.AntiComplaintReceived - d.step = int(info.Step) - d.reset = info.Reset - if info.IsMasterPrivateShareEmpty { - d.masterPrivateShare = nil - } else { - d.masterPrivateShare = &info.MasterPrivateShare - } - - if info.IsPrvSharesEmpty { - d.prvShares = nil - } else { - d.prvShares = &info.PrvShares - } -} - -func (d *dkgProtocol) toDKGProtocolInfo() db.DKGProtocolInfo { - info := db.DKGProtocolInfo{ - ID: d.ID, - Round: d.round, - Threshold: uint64(d.threshold), - IDMap: d.idMap, - MpkMap: d.mpkMap, - PrvSharesReceived: d.prvSharesReceived, - NodeComplained: d.nodeComplained, - AntiComplaintReceived: d.antiComplaintReceived, - Step: uint64(d.step), - Reset: d.reset, - } - - if d.masterPrivateShare != nil { - info.MasterPrivateShare = *d.masterPrivateShare - } else { - info.IsMasterPrivateShareEmpty = true - } - - if d.prvShares != nil { - info.PrvShares = *d.prvShares - } else { - info.IsPrvSharesEmpty = true - } - - return info -} - -type dkgShareSecret struct { - privateKey *dkg.PrivateKey -} - -// TSigVerifier is the interface verifying threshold signature. -type TSigVerifier interface { - VerifySignature(hash common.Hash, sig crypto.Signature) bool -} - -// TSigVerifierCacheInterface specifies interface used by TSigVerifierCache. -type TSigVerifierCacheInterface interface { - // Configuration returns the configuration at a given round. - // Return the genesis configuration if round == 0. - Configuration(round uint64) *types.Config - - // DKGComplaints gets all the DKGComplaints of round. - DKGComplaints(round uint64) []*typesDKG.Complaint - - // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. - DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey - - // IsDKGFinal checks if DKG is final. - IsDKGFinal(round uint64) bool -} - -// TSigVerifierCache is the cache for TSigVerifier. -type TSigVerifierCache struct { - intf TSigVerifierCacheInterface - verifier map[uint64]TSigVerifier - minRound uint64 - cacheSize int - lock sync.RWMutex -} - -type tsigProtocol struct { - nodePublicKeys *typesDKG.NodePublicKeys - hash common.Hash - sigs map[dkg.ID]dkg.PartialSignature - threshold int -} - -func newDKGProtocol( - ID types.NodeID, - recv dkgReceiver, - round uint64, - reset uint64, - threshold int) *dkgProtocol { - - prvShare, pubShare := dkg.NewPrivateKeyShares(threshold) - - recv.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{ - Round: round, - Reset: reset, - DKGID: typesDKG.NewID(ID), - PublicKeyShares: *pubShare.Move(), - }) - - return &dkgProtocol{ - ID: ID, - recv: recv, - round: round, - reset: reset, - threshold: threshold, - idMap: make(map[types.NodeID]dkg.ID), - mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares), - masterPrivateShare: prvShare, - prvShares: dkg.NewEmptyPrivateKeyShares(), - prvSharesReceived: make(map[types.NodeID]struct{}), - nodeComplained: make(map[types.NodeID]struct{}), - antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}), - } -} - -func recoverDKGProtocol( - ID types.NodeID, - recv dkgReceiver, - round uint64, - reset uint64, - coreDB db.Database) (*dkgProtocol, error) { - dkgProtocolInfo, err := coreDB.GetDKGProtocol() - if err != nil { - if err == db.ErrDKGProtocolDoesNotExist { - return nil, nil - } - return nil, err - } - - dkgProtocol := dkgProtocol{ - recv: recv, - } - dkgProtocol.convertFromInfo(dkgProtocolInfo) - - if dkgProtocol.ID != ID || dkgProtocol.round != round || dkgProtocol.reset != reset { - return nil, nil - } - - return &dkgProtocol, nil -} - -func (d *dkgProtocol) processMasterPublicKeys( - mpks []*typesDKG.MasterPublicKey) (err error) { - d.idMap = make(map[types.NodeID]dkg.ID, len(mpks)) - d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks)) - d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks)) - ids := make(dkg.IDs, len(mpks)) - for i := range mpks { - if mpks[i].Reset != d.reset { - return ErrUnexpectedDKGResetCount{ - expect: d.reset, - actual: mpks[i].Reset, - proposerID: mpks[i].ProposerID, - } - } - nID := mpks[i].ProposerID - d.idMap[nID] = mpks[i].DKGID - d.mpkMap[nID] = &mpks[i].PublicKeyShares - ids[i] = mpks[i].DKGID - } - d.masterPrivateShare.SetParticipants(ids) - if err = d.verifySelfPrvShare(); err != nil { - return - } - for _, mpk := range mpks { - share, ok := d.masterPrivateShare.Share(mpk.DKGID) - if !ok { - err = ErrIDShareNotFound - continue - } - d.recv.ProposeDKGPrivateShare(&typesDKG.PrivateShare{ - ReceiverID: mpk.ProposerID, - Round: d.round, - Reset: d.reset, - PrivateShare: *share, - }) - } - return -} - -func (d *dkgProtocol) verifySelfPrvShare() error { - selfMPK, exist := d.mpkMap[d.ID] - if !exist { - return ErrSelfMPKNotRegister - } - share, ok := d.masterPrivateShare.Share(d.idMap[d.ID]) - if !ok { - return ErrUnableGetSelfPrvShare - } - ok, err := selfMPK.VerifyPrvShare( - d.idMap[d.ID], share) - if err != nil { - return err - } - if !ok { - return ErrSelfPrvShareMismatch - } - return nil -} - -func (d *dkgProtocol) proposeNackComplaints() { - for nID := range d.mpkMap { - if _, exist := d.prvSharesReceived[nID]; exist { - continue - } - d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ - Round: d.round, - Reset: d.reset, - PrivateShare: typesDKG.PrivateShare{ - ProposerID: nID, - Round: d.round, - Reset: d.reset, - }, - }) - } -} - -func (d *dkgProtocol) processNackComplaints(complaints []*typesDKG.Complaint) ( - err error) { - if err = d.verifySelfPrvShare(); err != nil { - return - } - for _, complaint := range complaints { - if !complaint.IsNack() { - continue - } - if complaint.Reset != d.reset { - continue - } - if complaint.PrivateShare.ProposerID != d.ID { - continue - } - id, exist := d.idMap[complaint.ProposerID] - if !exist { - err = ErrNotDKGParticipant - continue - } - share, ok := d.masterPrivateShare.Share(id) - if !ok { - err = ErrIDShareNotFound - continue - } - d.recv.ProposeDKGAntiNackComplaint(&typesDKG.PrivateShare{ - ProposerID: d.ID, - ReceiverID: complaint.ProposerID, - Round: d.round, - Reset: d.reset, - PrivateShare: *share, - }) - } - return -} - -func (d *dkgProtocol) enforceNackComplaints(complaints []*typesDKG.Complaint) { - complained := make(map[types.NodeID]struct{}) - // Do not propose nack complaint to itself. - complained[d.ID] = struct{}{} - for _, complaint := range complaints { - if d.round != complaint.Round || d.reset != complaint.Reset { - continue - } - if !complaint.IsNack() { - continue - } - if complaint.Reset != d.reset { - continue - } - to := complaint.PrivateShare.ProposerID - if _, exist := complained[to]; exist { - continue - } - from := complaint.ProposerID - // Nack complaint is already proposed. - if from == d.ID { - continue - } - if _, exist := - d.antiComplaintReceived[from][to]; !exist { - complained[to] = struct{}{} - d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ - Round: d.round, - Reset: d.reset, - PrivateShare: typesDKG.PrivateShare{ - ProposerID: to, - Round: d.round, - Reset: d.reset, - }, - }) - } - } -} - -func (d *dkgProtocol) sanityCheck(prvShare *typesDKG.PrivateShare) error { - if d.round != prvShare.Round { - return ErrUnexpectedRound{ - expect: d.round, - actual: prvShare.Round, - proposerID: prvShare.ProposerID, - } - } - if d.reset != prvShare.Reset { - return ErrUnexpectedDKGResetCount{ - expect: d.reset, - actual: prvShare.Reset, - proposerID: prvShare.ProposerID, - } - } - if _, exist := d.idMap[prvShare.ProposerID]; !exist { - return ErrNotDKGParticipant - } - ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) - if err != nil { - return err - } - if !ok { - return ErrIncorrectPrivateShareSignature - } - return nil -} - -func (d *dkgProtocol) processPrivateShare( - prvShare *typesDKG.PrivateShare) error { - receiverID, exist := d.idMap[prvShare.ReceiverID] - // This node is not a DKG participant, ignore the private share. - if !exist { - return nil - } - if prvShare.ReceiverID == d.ID { - if _, exist := d.prvSharesReceived[prvShare.ProposerID]; exist { - return nil - } - } else { - if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; exist { - if _, exist := - d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; exist { - return nil - } - } - } - if err := d.sanityCheck(prvShare); err != nil { - return err - } - mpk := d.mpkMap[prvShare.ProposerID] - ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare) - if err != nil { - return err - } - if prvShare.ReceiverID == d.ID { - d.prvSharesReceived[prvShare.ProposerID] = struct{}{} - } - if !ok { - if _, exist := d.nodeComplained[prvShare.ProposerID]; exist { - return nil - } - complaint := &typesDKG.Complaint{ - Round: d.round, - Reset: d.reset, - PrivateShare: *prvShare, - } - d.nodeComplained[prvShare.ProposerID] = struct{}{} - d.recv.ProposeDKGComplaint(complaint) - } else if prvShare.ReceiverID == d.ID { - sender := d.idMap[prvShare.ProposerID] - if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil { - return err - } - } else { - // The prvShare is an anti complaint. - if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist { - d.antiComplaintReceived[prvShare.ReceiverID] = - make(map[types.NodeID]struct{}) - } - if _, exist := - d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; !exist { - d.recv.ProposeDKGAntiNackComplaint(prvShare) - d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] = - struct{}{} - } - } - return nil -} - -func (d *dkgProtocol) proposeMPKReady() { - d.recv.ProposeDKGMPKReady(&typesDKG.MPKReady{ - ProposerID: d.ID, - Round: d.round, - Reset: d.reset, - }) -} - -func (d *dkgProtocol) proposeFinalize() { - d.recv.ProposeDKGFinalize(&typesDKG.Finalize{ - ProposerID: d.ID, - Round: d.round, - Reset: d.reset, - }) -} - -func (d *dkgProtocol) proposeSuccess() { - d.recv.ProposeDKGSuccess(&typesDKG.Success{ - ProposerID: d.ID, - Round: d.round, - Reset: d.reset, - }) -} - -func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) ( - *dkgShareSecret, error) { - if len(qualifyIDs) < d.threshold { - return nil, typesDKG.ErrNotReachThreshold - } - prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs) - if err != nil { - return nil, err - } - return &dkgShareSecret{ - privateKey: prvKey, - }, nil -} - -func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature { - // DKG sign will always success. - sig, _ := ss.privateKey.Sign(hash) - return dkg.PartialSignature(sig) -} - -// NewTSigVerifierCache creats a TSigVerifierCache instance. -func NewTSigVerifierCache( - intf TSigVerifierCacheInterface, cacheSize int) *TSigVerifierCache { - return &TSigVerifierCache{ - intf: intf, - verifier: make(map[uint64]TSigVerifier), - cacheSize: cacheSize, - } -} - -// UpdateAndGet calls Update and then Get. -func (tc *TSigVerifierCache) UpdateAndGet(round uint64) ( - TSigVerifier, bool, error) { - ok, err := tc.Update(round) - if err != nil { - return nil, false, err - } - if !ok { - return nil, false, nil - } - v, ok := tc.Get(round) - return v, ok, nil -} - -// Purge the cache. -func (tc *TSigVerifierCache) Purge(round uint64) { - tc.lock.Lock() - defer tc.lock.Unlock() - delete(tc.verifier, round) -} - -// Update the cache and returns if success. -func (tc *TSigVerifierCache) Update(round uint64) (bool, error) { - tc.lock.Lock() - defer tc.lock.Unlock() - if round < tc.minRound { - return false, ErrRoundAlreadyPurged - } - if _, exist := tc.verifier[round]; exist { - return true, nil - } - if !tc.intf.IsDKGFinal(round) { - return false, nil - } - gpk, err := typesDKG.NewGroupPublicKey(round, - tc.intf.DKGMasterPublicKeys(round), - tc.intf.DKGComplaints(round), - utils.GetDKGThreshold(utils.GetConfigWithPanic(tc.intf, round, nil))) - if err != nil { - return false, err - } - if len(tc.verifier) == 0 { - tc.minRound = round - } - tc.verifier[round] = gpk - if len(tc.verifier) > tc.cacheSize { - delete(tc.verifier, tc.minRound) - } - for { - if _, exist := tc.verifier[tc.minRound]; !exist { - tc.minRound++ - } else { - break - } - } - return true, nil -} - -// Delete the cache of given round. -func (tc *TSigVerifierCache) Delete(round uint64) { - tc.lock.Lock() - defer tc.lock.Unlock() - delete(tc.verifier, round) -} - -// Get the TSigVerifier of round and returns if it exists. -func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) { - tc.lock.RLock() - defer tc.lock.RUnlock() - verifier, exist := tc.verifier[round] - return verifier, exist -} - -func newTSigProtocol( - npks *typesDKG.NodePublicKeys, - hash common.Hash) *tsigProtocol { - return &tsigProtocol{ - nodePublicKeys: npks, - hash: hash, - sigs: make(map[dkg.ID]dkg.PartialSignature, npks.Threshold+1), - } -} - -func (tsig *tsigProtocol) sanityCheck(psig *typesDKG.PartialSignature) error { - _, exist := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] - if !exist { - return ErrNotQualifyDKGParticipant - } - ok, err := utils.VerifyDKGPartialSignatureSignature(psig) - if err != nil { - return err - } - if !ok { - return ErrIncorrectPartialSignatureSignature - } - if psig.Hash != tsig.hash { - return ErrMismatchPartialSignatureHash - } - return nil -} - -func (tsig *tsigProtocol) processPartialSignature( - psig *typesDKG.PartialSignature) error { - if psig.Round != tsig.nodePublicKeys.Round { - return nil - } - id, exist := tsig.nodePublicKeys.IDMap[psig.ProposerID] - if !exist { - return ErrNotQualifyDKGParticipant - } - if err := tsig.sanityCheck(psig); err != nil { - return err - } - pubKey := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] - if !pubKey.VerifySignature( - tsig.hash, crypto.Signature(psig.PartialSignature)) { - return ErrIncorrectPartialSignature - } - tsig.sigs[id] = psig.PartialSignature - return nil -} - -func (tsig *tsigProtocol) signature() (crypto.Signature, error) { - if len(tsig.sigs) < tsig.nodePublicKeys.Threshold { - return crypto.Signature{}, ErrNotEnoughtPartialSignatures - } - ids := make(dkg.IDs, 0, len(tsig.sigs)) - psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs)) - for id, psig := range tsig.sigs { - ids = append(ids, id) - psigs = append(psigs, psig) - } - return dkg.RecoverSignature(psigs, ids) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go deleted file mode 100644 index c88b3dcb4..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -// Application describes the application interface that interacts with DEXON -// consensus core. -type Application interface { - // PreparePayload is called when consensus core is preparing a block. - PreparePayload(position types.Position) ([]byte, error) - - // PrepareWitness will return the witness data no lower than consensusHeight. - PrepareWitness(consensusHeight uint64) (types.Witness, error) - - // VerifyBlock verifies if the block is valid. - VerifyBlock(block *types.Block) types.BlockVerifyStatus - - // BlockConfirmed is called when a block is confirmed and added to lattice. - BlockConfirmed(block types.Block) - - // BlockDelivered is called when a block is added to the compaction chain. - BlockDelivered(hash common.Hash, position types.Position, rand []byte) -} - -// Debug describes the application interface that requires -// more detailed consensus execution. -type Debug interface { - // BlockReceived is called when the block received in agreement. - BlockReceived(common.Hash) - // BlockReady is called when the block's randomness is ready. - BlockReady(common.Hash) -} - -// Network describs the network interface that interacts with DEXON consensus -// core. -type Network interface { - // PullBlocks tries to pull blocks from the DEXON network. - PullBlocks(hashes common.Hashes) - - // PullVotes tries to pull votes from the DEXON network. - PullVotes(position types.Position) - - // BroadcastVote broadcasts vote to all nodes in DEXON network. - BroadcastVote(vote *types.Vote) - - // BroadcastBlock broadcasts block to all nodes in DEXON network. - BroadcastBlock(block *types.Block) - - // BroadcastAgreementResult broadcasts agreement result to DKG set. - BroadcastAgreementResult(randRequest *types.AgreementResult) - - // SendDKGPrivateShare sends PrivateShare to a DKG participant. - SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare) - - // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants. - BroadcastDKGPrivateShare(prvShare *typesDKG.PrivateShare) - - // BroadcastDKGPartialSignature broadcasts partialSignature to all - // DKG participants. - BroadcastDKGPartialSignature(psig *typesDKG.PartialSignature) - - // ReceiveChan returns a channel to receive messages from DEXON network. - ReceiveChan() <-chan types.Msg - - // ReportBadPeerChan returns a channel to report bad peer. - ReportBadPeerChan() chan<- interface{} -} - -// Governance interface specifies interface to control the governance contract. -// Note that there are a lot more methods in the governance contract, that this -// interface only define those that are required to run the consensus algorithm. -type Governance interface { - // Configuration returns the configuration at a given round. - // Return the genesis configuration if round == 0. - Configuration(round uint64) *types.Config - - // CRS returns the CRS for a given round. Return the genesis CRS if - // round == 0. - // - // The CRS returned is the proposed or latest reseted one, it would be - // changed later if corresponding DKG set failed to generate group public - // key. - CRS(round uint64) common.Hash - - // Propose a CRS of round. - ProposeCRS(round uint64, signedCRS []byte) - - // NodeSet returns the node set at a given round. - // Return the genesis node set if round == 0. - NodeSet(round uint64) []crypto.PublicKey - - // Get the begin height of a round. - GetRoundHeight(round uint64) uint64 - - //// DKG-related methods. - - // AddDKGComplaint adds a DKGComplaint. - AddDKGComplaint(complaint *typesDKG.Complaint) - - // DKGComplaints gets all the DKGComplaints of round. - DKGComplaints(round uint64) []*typesDKG.Complaint - - // AddDKGMasterPublicKey adds a DKGMasterPublicKey. - AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey) - - // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. - DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey - - // AddDKGMPKReady adds a DKG ready message. - AddDKGMPKReady(ready *typesDKG.MPKReady) - - // IsDKGMPKReady checks if DKG's master public key preparation is ready. - IsDKGMPKReady(round uint64) bool - - // AddDKGFinalize adds a DKG finalize message. - AddDKGFinalize(final *typesDKG.Finalize) - - // IsDKGFinal checks if DKG is final. - IsDKGFinal(round uint64) bool - - // AddDKGSuccess adds a DKG success message. - AddDKGSuccess(success *typesDKG.Success) - - // IsDKGSuccess checks if DKG is success. - IsDKGSuccess(round uint64) bool - - // ReportForkVote reports a node for forking votes. - ReportForkVote(vote1, vote2 *types.Vote) - - // ReportForkBlock reports a node for forking blocks. - ReportForkBlock(block1, block2 *types.Block) - - // ResetDKG resets latest DKG data and propose new CRS. - ResetDKG(newSignedCRS []byte) - - // DKGResetCount returns the reset count for DKG of given round. - DKGResetCount(round uint64) uint64 -} - -// Ticker define the capability to tick by interval. -type Ticker interface { - // Tick would return a channel, which would be triggered until next tick. - Tick() <-chan time.Time - - // Stop the ticker. - Stop() - - // Retart the ticker and clear all internal data. - Restart() -} - -// Recovery interface for interacting with recovery information. -type Recovery interface { - // ProposeSkipBlock proposes a skip block. - ProposeSkipBlock(height uint64) error - - // Votes gets the number of votes of given height. - Votes(height uint64) (uint64, error) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go deleted file mode 100644 index 91b2e9979..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "math/big" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -type validLeaderFn func(block *types.Block, crs common.Hash) (bool, error) - -// Some constant value. -var ( - maxHash *big.Int - one *big.Rat -) - -func init() { - hash := make([]byte, common.HashLength) - for i := range hash { - hash[i] = 0xff - } - maxHash = big.NewInt(0).SetBytes(hash) - one = big.NewRat(1, 1) -} - -type leaderSelector struct { - hashCRS common.Hash - numCRS *big.Int - minCRSBlock *big.Int - minBlockHash common.Hash - pendingBlocks map[common.Hash]*types.Block - validLeader validLeaderFn - lock sync.Mutex - logger common.Logger -} - -func newLeaderSelector( - validLeader validLeaderFn, logger common.Logger) *leaderSelector { - return &leaderSelector{ - minCRSBlock: maxHash, - validLeader: validLeader, - logger: logger, - } -} - -func (l *leaderSelector) distance(sig crypto.Signature) *big.Int { - hash := crypto.Keccak256Hash(sig.Signature[:]) - num := big.NewInt(0) - num.SetBytes(hash[:]) - num.Abs(num.Sub(l.numCRS, num)) - return num -} - -func (l *leaderSelector) probability(sig crypto.Signature) float64 { - dis := l.distance(sig) - prob := big.NewRat(1, 1).SetFrac(dis, maxHash) - p, _ := prob.Sub(one, prob).Float64() - return p -} - -func (l *leaderSelector) restart(crs common.Hash) { - numCRS := big.NewInt(0) - numCRS.SetBytes(crs[:]) - l.lock.Lock() - defer l.lock.Unlock() - l.numCRS = numCRS - l.hashCRS = crs - l.minCRSBlock = maxHash - l.minBlockHash = types.NullBlockHash - l.pendingBlocks = make(map[common.Hash]*types.Block) -} - -func (l *leaderSelector) leaderBlockHash() common.Hash { - l.lock.Lock() - defer l.lock.Unlock() - for _, b := range l.pendingBlocks { - ok, dist := l.potentialLeader(b) - if !ok { - continue - } - ok, err := l.validLeader(b, l.hashCRS) - if err != nil { - l.logger.Error("Error checking validLeader", "error", err, "block", b) - delete(l.pendingBlocks, b.Hash) - continue - } - if ok { - l.updateLeader(b, dist) - delete(l.pendingBlocks, b.Hash) - } - } - return l.minBlockHash -} - -func (l *leaderSelector) processBlock(block *types.Block) error { - l.lock.Lock() - defer l.lock.Unlock() - ok, dist := l.potentialLeader(block) - if !ok { - return nil - } - ok, err := l.validLeader(block, l.hashCRS) - if err != nil { - return err - } - if !ok { - l.pendingBlocks[block.Hash] = block - return nil - } - l.updateLeader(block, dist) - return nil -} - -func (l *leaderSelector) potentialLeader(block *types.Block) (bool, *big.Int) { - dist := l.distance(block.CRSSignature) - cmp := l.minCRSBlock.Cmp(dist) - return (cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash))), dist -} - -func (l *leaderSelector) updateLeader(block *types.Block, dist *big.Int) { - l.minCRSBlock = dist - l.minBlockHash = block.Hash -} - -func (l *leaderSelector) findPendingBlock( - hash common.Hash) (*types.Block, bool) { - b, e := l.pendingBlocks[hash] - return b, e -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go deleted file mode 100644 index 10b47b822..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "fmt" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -type blockConfirmedEvent struct { - block *types.Block -} - -type blockDeliveredEvent struct { - blockHash common.Hash - blockPosition types.Position - rand []byte -} - -// nonBlocking implements these interfaces and is a decorator for -// them that makes the methods to be non-blocking. -// - Application -// - Debug -// - It also provides nonblockig for db update. -type nonBlocking struct { - app Application - debug Debug - eventChan chan interface{} - events []interface{} - eventsChange *sync.Cond - running sync.WaitGroup -} - -func newNonBlocking(app Application, debug Debug) *nonBlocking { - nonBlockingModule := &nonBlocking{ - app: app, - debug: debug, - eventChan: make(chan interface{}, 6), - events: make([]interface{}, 0, 100), - eventsChange: sync.NewCond(&sync.Mutex{}), - } - go nonBlockingModule.run() - return nonBlockingModule -} - -func (nb *nonBlocking) addEvent(event interface{}) { - nb.eventsChange.L.Lock() - defer nb.eventsChange.L.Unlock() - nb.events = append(nb.events, event) - nb.eventsChange.Broadcast() -} - -func (nb *nonBlocking) run() { - // This go routine consume the first event from events and call the - // corresponding methods of Application/Debug/db. - for { - var event interface{} - func() { - nb.eventsChange.L.Lock() - defer nb.eventsChange.L.Unlock() - for len(nb.events) == 0 { - nb.eventsChange.Wait() - } - event = nb.events[0] - nb.events = nb.events[1:] - nb.running.Add(1) - }() - switch e := event.(type) { - case blockConfirmedEvent: - nb.app.BlockConfirmed(*e.block) - case blockDeliveredEvent: - nb.app.BlockDelivered(e.blockHash, e.blockPosition, e.rand) - default: - fmt.Printf("Unknown event %v.", e) - } - nb.running.Done() - nb.eventsChange.Broadcast() - } -} - -// wait will wait for all event in events finishes. -func (nb *nonBlocking) wait() { - nb.eventsChange.L.Lock() - defer nb.eventsChange.L.Unlock() - for len(nb.events) > 0 { - nb.eventsChange.Wait() - } - nb.running.Wait() -} - -// PreparePayload cannot be non-blocking. -func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) { - return nb.app.PreparePayload(position) -} - -// PrepareWitness cannot be non-blocking. -func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) { - return nb.app.PrepareWitness(height) -} - -// VerifyBlock cannot be non-blocking. -func (nb *nonBlocking) VerifyBlock(block *types.Block) types.BlockVerifyStatus { - return nb.app.VerifyBlock(block) -} - -// BlockConfirmed is called when a block is confirmed and added to lattice. -func (nb *nonBlocking) BlockConfirmed(block types.Block) { - nb.addEvent(blockConfirmedEvent{&block}) -} - -// BlockDelivered is called when a block is add to the compaction chain. -func (nb *nonBlocking) BlockDelivered(blockHash common.Hash, - blockPosition types.Position, rand []byte) { - nb.addEvent(blockDeliveredEvent{ - blockHash: blockHash, - blockPosition: blockPosition, - rand: rand, - }) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go deleted file mode 100644 index d39c24627..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package syncer - -import ( - "bytes" - "context" - "fmt" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Struct agreement implements struct of BA (Byzantine Agreement) protocol -// needed in syncer, which only receives agreement results. -type agreement struct { - chainTip uint64 - cache *utils.NodeSetCache - tsigVerifierCache *core.TSigVerifierCache - inputChan chan interface{} - outputChan chan<- *types.Block - pullChan chan<- common.Hash - blocks map[types.Position]map[common.Hash]*types.Block - agreementResults map[common.Hash][]byte - latestCRSRound uint64 - pendingAgrs map[uint64]map[common.Hash]*types.AgreementResult - pendingBlocks map[uint64]map[common.Hash]*types.Block - logger common.Logger - confirmedBlocks map[common.Hash]struct{} - ctx context.Context - ctxCancel context.CancelFunc -} - -// newAgreement creates a new agreement instance. -func newAgreement(chainTip uint64, - ch chan<- *types.Block, pullChan chan<- common.Hash, - cache *utils.NodeSetCache, verifier *core.TSigVerifierCache, - logger common.Logger) *agreement { - a := &agreement{ - chainTip: chainTip, - cache: cache, - tsigVerifierCache: verifier, - inputChan: make(chan interface{}, 1000), - outputChan: ch, - pullChan: pullChan, - blocks: make(map[types.Position]map[common.Hash]*types.Block), - agreementResults: make(map[common.Hash][]byte), - logger: logger, - pendingAgrs: make( - map[uint64]map[common.Hash]*types.AgreementResult), - pendingBlocks: make( - map[uint64]map[common.Hash]*types.Block), - confirmedBlocks: make(map[common.Hash]struct{}), - } - a.ctx, a.ctxCancel = context.WithCancel(context.Background()) - return a -} - -// run starts the agreement, this does not start a new routine, go a new -// routine explicitly in the caller. -func (a *agreement) run() { - defer a.ctxCancel() - for { - select { - case val, ok := <-a.inputChan: - if !ok { - // InputChan is closed by network when network ends. - return - } - switch v := val.(type) { - case *types.Block: - if v.Position.Round >= core.DKGDelayRound && v.IsFinalized() { - a.processFinalizedBlock(v) - } else { - a.processBlock(v) - } - case *types.AgreementResult: - a.processAgreementResult(v) - case uint64: - a.processNewCRS(v) - } - } - } -} - -func (a *agreement) processBlock(b *types.Block) { - if _, exist := a.confirmedBlocks[b.Hash]; exist { - return - } - if rand, exist := a.agreementResults[b.Hash]; exist { - if len(b.Randomness) == 0 { - b.Randomness = rand - } - a.confirm(b) - } else { - if _, exist := a.blocks[b.Position]; !exist { - a.blocks[b.Position] = make(map[common.Hash]*types.Block) - } - a.blocks[b.Position][b.Hash] = b - } -} - -func (a *agreement) processFinalizedBlock(block *types.Block) { - // Cache those results that CRS is not ready yet. - if _, exists := a.confirmedBlocks[block.Hash]; exists { - a.logger.Trace("finalized block already confirmed", "block", block) - return - } - if block.Position.Round > a.latestCRSRound { - pendingsForRound, exists := a.pendingBlocks[block.Position.Round] - if !exists { - pendingsForRound = make(map[common.Hash]*types.Block) - a.pendingBlocks[block.Position.Round] = pendingsForRound - } - pendingsForRound[block.Hash] = block - a.logger.Trace("finalized block cached", "block", block) - return - } - if err := utils.VerifyBlockSignature(block); err != nil { - return - } - verifier, ok, err := a.tsigVerifierCache.UpdateAndGet( - block.Position.Round) - if err != nil { - a.logger.Error("error verifying block randomness", - "block", block, - "error", err) - return - } - if !ok { - a.logger.Error("cannot verify block randomness", "block", block) - return - } - if !verifier.VerifySignature(block.Hash, crypto.Signature{ - Type: "bls", - Signature: block.Randomness, - }) { - a.logger.Error("incorrect block randomness", "block", block) - return - } - a.confirm(block) -} - -func (a *agreement) processAgreementResult(r *types.AgreementResult) { - // Cache those results that CRS is not ready yet. - if _, exists := a.confirmedBlocks[r.BlockHash]; exists { - a.logger.Trace("Agreement result already confirmed", "result", r) - return - } - if r.Position.Round > a.latestCRSRound { - pendingsForRound, exists := a.pendingAgrs[r.Position.Round] - if !exists { - pendingsForRound = make(map[common.Hash]*types.AgreementResult) - a.pendingAgrs[r.Position.Round] = pendingsForRound - } - pendingsForRound[r.BlockHash] = r - a.logger.Trace("Agreement result cached", "result", r) - return - } - if err := core.VerifyAgreementResult(r, a.cache); err != nil { - a.logger.Error("Agreement result verification failed", - "result", r, - "error", err) - return - } - if r.Position.Round >= core.DKGDelayRound { - verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(r.Position.Round) - if err != nil { - a.logger.Error("error verifying agreement result randomness", - "result", r, - "error", err) - return - } - if !ok { - a.logger.Error("cannot verify agreement result randomness", "result", r) - return - } - if !verifier.VerifySignature(r.BlockHash, crypto.Signature{ - Type: "bls", - Signature: r.Randomness, - }) { - a.logger.Error("incorrect agreement result randomness", "result", r) - return - } - } else { - // Special case for rounds before DKGDelayRound. - if bytes.Compare(r.Randomness, core.NoRand) != 0 { - a.logger.Error("incorrect agreement result randomness", "result", r) - return - } - } - if r.IsEmptyBlock { - b := &types.Block{ - Position: r.Position, - Randomness: r.Randomness, - } - // Empty blocks should be confirmed directly, they won't be sent over - // the wire. - a.confirm(b) - return - } - if bs, exist := a.blocks[r.Position]; exist { - if b, exist := bs[r.BlockHash]; exist { - b.Randomness = r.Randomness - a.confirm(b) - return - } - } - a.agreementResults[r.BlockHash] = r.Randomness -loop: - for { - select { - case a.pullChan <- r.BlockHash: - break loop - case <-a.ctx.Done(): - a.logger.Error("Pull request is not sent", - "position", &r.Position, - "hash", r.BlockHash.String()[:6]) - return - case <-time.After(500 * time.Millisecond): - a.logger.Debug("Pull request is unable to send", - "position", &r.Position, - "hash", r.BlockHash.String()[:6]) - } - } -} - -func (a *agreement) processNewCRS(round uint64) { - if round <= a.latestCRSRound { - return - } - prevRound := a.latestCRSRound + 1 - a.latestCRSRound = round - // Verify all pending results. - for r := prevRound; r <= a.latestCRSRound; r++ { - pendingsForRound := a.pendingAgrs[r] - if pendingsForRound == nil { - continue - } - delete(a.pendingAgrs, r) - for _, res := range pendingsForRound { - if err := core.VerifyAgreementResult(res, a.cache); err != nil { - a.logger.Error("Invalid agreement result", - "result", res, - "error", err) - continue - } - a.logger.Error("Flush agreement result", "result", res) - a.processAgreementResult(res) - break - } - } -} - -// confirm notifies consensus the confirmation of a block in BA. -func (a *agreement) confirm(b *types.Block) { - if !b.IsFinalized() { - panic(fmt.Errorf("confirm a block %s without randomness", b)) - } - if _, exist := a.confirmedBlocks[b.Hash]; !exist { - delete(a.blocks, b.Position) - delete(a.agreementResults, b.Hash) - loop: - for { - select { - case a.outputChan <- b: - break loop - case <-a.ctx.Done(): - a.logger.Error("Confirmed block is not sent", "block", b) - return - case <-time.After(500 * time.Millisecond): - a.logger.Debug("Agreement output channel is full", "block", b) - } - } - a.confirmedBlocks[b.Hash] = struct{}{} - } - if b.Position.Height > a.chainTip+1 { - if _, exist := a.confirmedBlocks[b.ParentHash]; !exist { - a.pullChan <- b.ParentHash - } - } -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go deleted file mode 100644 index 496c0f9a8..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package syncer - -import ( - "context" - "fmt" - "sort" - "sync" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/db" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -var ( - // ErrAlreadySynced is reported when syncer is synced. - ErrAlreadySynced = fmt.Errorf("already synced") - // ErrNotSynced is reported when syncer is not synced yet. - ErrNotSynced = fmt.Errorf("not synced yet") - // ErrGenesisBlockReached is reported when genesis block reached. - ErrGenesisBlockReached = fmt.Errorf("genesis block reached") - // ErrInvalidBlockOrder is reported when SyncBlocks receives unordered - // blocks. - ErrInvalidBlockOrder = fmt.Errorf("invalid block order") - // ErrInvalidSyncingHeight raised when the blocks to sync is not following - // the compaction chain tip in database. - ErrInvalidSyncingHeight = fmt.Errorf("invalid syncing height") -) - -// Consensus is for syncing consensus module. -type Consensus struct { - db db.Database - gov core.Governance - dMoment time.Time - logger common.Logger - app core.Application - prv crypto.PrivateKey - network core.Network - nodeSetCache *utils.NodeSetCache - tsigVerifier *core.TSigVerifierCache - - blocks types.BlocksByPosition - agreementModule *agreement - agreementRoundCut uint64 - heightEvt *common.Event - roundEvt *utils.RoundEvent - - // lock for accessing all fields. - lock sync.RWMutex - duringBuffering bool - latestCRSRound uint64 - waitGroup sync.WaitGroup - agreementWaitGroup sync.WaitGroup - pullChan chan common.Hash - receiveChan chan *types.Block - ctx context.Context - ctxCancel context.CancelFunc - syncedLastBlock *types.Block - syncedConsensus *core.Consensus - syncedSkipNext bool - dummyCancel context.CancelFunc - dummyFinished <-chan struct{} - dummyMsgBuffer []types.Msg - initChainTipHeight uint64 -} - -// NewConsensus creates an instance for Consensus (syncer consensus). -func NewConsensus( - initHeight uint64, - dMoment time.Time, - app core.Application, - gov core.Governance, - db db.Database, - network core.Network, - prv crypto.PrivateKey, - logger common.Logger) *Consensus { - - con := &Consensus{ - dMoment: dMoment, - app: app, - gov: gov, - db: db, - network: network, - nodeSetCache: utils.NewNodeSetCache(gov), - tsigVerifier: core.NewTSigVerifierCache(gov, 7), - prv: prv, - logger: logger, - receiveChan: make(chan *types.Block, 1000), - pullChan: make(chan common.Hash, 1000), - heightEvt: common.NewEvent(), - } - con.ctx, con.ctxCancel = context.WithCancel(context.Background()) - _, con.initChainTipHeight = db.GetCompactionChainTipInfo() - con.agreementModule = newAgreement( - con.initChainTipHeight, - con.receiveChan, - con.pullChan, - con.nodeSetCache, - con.tsigVerifier, - con.logger) - con.agreementWaitGroup.Add(1) - go func() { - defer con.agreementWaitGroup.Done() - con.agreementModule.run() - }() - if err := con.deliverPendingBlocks(initHeight); err != nil { - panic(err) - } - return con -} - -func (con *Consensus) deliverPendingBlocks(height uint64) error { - if height >= con.initChainTipHeight { - return nil - } - blocks := make([]*types.Block, 0, con.initChainTipHeight-height) - hash, _ := con.db.GetCompactionChainTipInfo() - for { - block, err := con.db.GetBlock(hash) - if err != nil { - return err - } - if block.Position.Height == height { - break - } - blocks = append(blocks, &block) - hash = block.ParentHash - } - sort.Sort(types.BlocksByPosition(blocks)) - for _, b := range blocks { - con.logger.Debug("Syncer BlockConfirmed", "block", b) - con.app.BlockConfirmed(*b) - con.logger.Debug("Syncer BlockDelivered", "block", b) - con.app.BlockDelivered(b.Hash, b.Position, b.Randomness) - } - return nil -} - -func (con *Consensus) assureBuffering() { - if func() bool { - con.lock.RLock() - defer con.lock.RUnlock() - return con.duringBuffering - }() { - return - } - con.lock.Lock() - defer con.lock.Unlock() - if con.duringBuffering { - return - } - con.duringBuffering = true - // Get latest block to prepare utils.RoundEvent. - var ( - err error - blockHash, height = con.db.GetCompactionChainTipInfo() - ) - if height == 0 { - con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, con.logger, - types.Position{}, core.ConfigRoundShift) - } else { - var b types.Block - if b, err = con.db.GetBlock(blockHash); err == nil { - con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, - con.logger, b.Position, core.ConfigRoundShift) - } - } - if err != nil { - panic(err) - } - // Make sure con.roundEvt stopped before stopping con.agreementModule. - con.waitGroup.Add(1) - // Register a round event handler to reset node set cache, this handler - // should be the highest priority. - con.roundEvt.Register(func(evts []utils.RoundEventParam) { - for _, e := range evts { - if e.Reset == 0 { - continue - } - con.nodeSetCache.Purge(e.Round + 1) - con.tsigVerifier.Purge(e.Round + 1) - } - }) - // Register a round event handler to notify CRS to agreementModule. - con.roundEvt.Register(func(evts []utils.RoundEventParam) { - con.waitGroup.Add(1) - go func() { - defer con.waitGroup.Done() - for _, e := range evts { - select { - case <-con.ctx.Done(): - return - default: - } - for func() bool { - select { - case <-con.ctx.Done(): - return false - case con.agreementModule.inputChan <- e.Round: - return false - case <-time.After(500 * time.Millisecond): - con.logger.Warn( - "Agreement input channel is full when notifying new round", - "round", e.Round, - ) - return true - } - }() { - } - } - }() - }) - // Register a round event handler to validate next round. - con.roundEvt.Register(func(evts []utils.RoundEventParam) { - con.heightEvt.RegisterHeight( - evts[len(evts)-1].NextRoundValidationHeight(), - utils.RoundEventRetryHandlerGenerator(con.roundEvt, con.heightEvt), - ) - }) - con.roundEvt.TriggerInitEvent() - con.startAgreement() - con.startNetwork() -} - -func (con *Consensus) checkIfSynced(blocks []*types.Block) (synced bool) { - con.lock.RLock() - defer con.lock.RUnlock() - defer func() { - con.logger.Debug("Syncer synced status", - "last-block", blocks[len(blocks)-1], - "synced", synced, - ) - }() - if len(con.blocks) == 0 || len(blocks) == 0 { - return - } - synced = !blocks[len(blocks)-1].Position.Older(con.blocks[0].Position) - return -} - -func (con *Consensus) buildAllEmptyBlocks() { - con.lock.Lock() - defer con.lock.Unlock() - // Clean empty blocks on tips of chains. - for len(con.blocks) > 0 && con.isEmptyBlock(con.blocks[0]) { - con.blocks = con.blocks[1:] - } - // Build empty blocks. - for i, b := range con.blocks { - if con.isEmptyBlock(b) { - if con.blocks[i-1].Position.Height+1 == b.Position.Height { - con.buildEmptyBlock(b, con.blocks[i-1]) - } - } - } -} - -// ForceSync forces syncer to become synced. -func (con *Consensus) ForceSync(lastPos types.Position, skip bool) { - if con.syncedLastBlock != nil { - return - } - hash, height := con.db.GetCompactionChainTipInfo() - if height < lastPos.Height { - panic(fmt.Errorf("compaction chain not synced height %d, tip %d", - lastPos.Height, height)) - } else if height > lastPos.Height { - skip = false - } - block, err := con.db.GetBlock(hash) - if err != nil { - panic(err) - } - con.syncedLastBlock = &block - con.stopBuffering() - // We might call stopBuffering without calling assureBuffering. - if con.dummyCancel == nil { - con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( - context.Background(), con.network.ReceiveChan(), - func(msg types.Msg) { - con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) - }) - } - con.syncedSkipNext = skip - con.logger.Info("Force Sync", "block", &block, "skip", skip) -} - -// SyncBlocks syncs blocks from compaction chain, latest is true if the caller -// regards the blocks are the latest ones. Notice that latest can be true for -// many times. -// NOTICE: parameter "blocks" should be consecutive in compaction height. -// NOTICE: this method is not expected to be called concurrently. -func (con *Consensus) SyncBlocks( - blocks []*types.Block, latest bool) (synced bool, err error) { - defer func() { - con.logger.Debug("SyncBlocks returned", - "synced", synced, - "error", err, - "last-block", con.syncedLastBlock, - ) - }() - if con.syncedLastBlock != nil { - synced, err = true, ErrAlreadySynced - return - } - if len(blocks) == 0 { - return - } - // Check if blocks are consecutive. - for i := 1; i < len(blocks); i++ { - if blocks[i].Position.Height != blocks[i-1].Position.Height+1 { - err = ErrInvalidBlockOrder - return - } - } - // Make sure the first block is the next block of current compaction chain - // tip in DB. - _, tipHeight := con.db.GetCompactionChainTipInfo() - if blocks[0].Position.Height != tipHeight+1 { - con.logger.Error("Mismatched block height", - "now", blocks[0].Position.Height, - "expected", tipHeight+1, - ) - err = ErrInvalidSyncingHeight - return - } - con.logger.Trace("SyncBlocks", - "position", &blocks[0].Position, - "len", len(blocks), - "latest", latest, - ) - for _, b := range blocks { - if err = con.db.PutBlock(*b); err != nil { - // A block might be put into db when confirmed by BA, but not - // finalized yet. - if err == db.ErrBlockExists { - err = con.db.UpdateBlock(*b) - } - if err != nil { - return - } - } - if err = con.db.PutCompactionChainTipInfo( - b.Hash, b.Position.Height); err != nil { - return - } - con.heightEvt.NotifyHeight(b.Position.Height) - } - if latest { - con.assureBuffering() - con.buildAllEmptyBlocks() - // Check if compaction and agreements' blocks are overlapped. The - // overlapping of compaction chain and BA's oldest blocks means the - // syncing is done. - if con.checkIfSynced(blocks) { - con.stopBuffering() - con.syncedLastBlock = blocks[len(blocks)-1] - synced = true - } - } - return -} - -// GetSyncedConsensus returns the core.Consensus instance after synced. -func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) { - con.lock.Lock() - defer con.lock.Unlock() - if con.syncedConsensus != nil { - return con.syncedConsensus, nil - } - if con.syncedLastBlock == nil { - return nil, ErrNotSynced - } - // flush all blocks in con.blocks into core.Consensus, and build - // core.Consensus from syncer. - con.dummyCancel() - <-con.dummyFinished - var err error - con.syncedConsensus, err = core.NewConsensusFromSyncer( - con.syncedLastBlock, - con.syncedSkipNext, - con.dMoment, - con.app, - con.gov, - con.db, - con.network, - con.prv, - con.blocks, - con.dummyMsgBuffer, - con.logger) - return con.syncedConsensus, err -} - -// stopBuffering stops the syncer buffering routines. -// -// This method is mainly for caller to stop the syncer before synced, the syncer -// would call this method automatically after being synced. -func (con *Consensus) stopBuffering() { - if func() (notBuffering bool) { - con.lock.RLock() - defer con.lock.RUnlock() - notBuffering = !con.duringBuffering - return - }() { - return - } - if func() (alreadyCanceled bool) { - con.lock.Lock() - defer con.lock.Unlock() - if !con.duringBuffering { - alreadyCanceled = true - return - } - con.duringBuffering = false - con.logger.Trace("Syncer is about to stop") - // Stop network and CRS routines, wait until they are all stoped. - con.ctxCancel() - return - }() { - return - } - con.logger.Trace("Stop syncer modules") - con.roundEvt.Stop() - con.waitGroup.Done() - // Wait for all routines depends on con.agreementModule stopped. - con.waitGroup.Wait() - // Since there is no one waiting for the receive channel of fullnode, we - // need to launch a dummy receiver right away. - con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( - context.Background(), con.network.ReceiveChan(), - func(msg types.Msg) { - con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) - }) - // Stop agreements. - con.logger.Trace("Stop syncer agreement modules") - con.stopAgreement() - con.logger.Trace("Syncer stopped") - return -} - -// isEmptyBlock checks if a block is an empty block by both its hash and parent -// hash are empty. -func (con *Consensus) isEmptyBlock(b *types.Block) bool { - return b.Hash == common.Hash{} && b.ParentHash == common.Hash{} -} - -// buildEmptyBlock builds an empty block in agreement. -func (con *Consensus) buildEmptyBlock(b *types.Block, parent *types.Block) { - cfg := utils.GetConfigWithPanic(con.gov, b.Position.Round, con.logger) - b.Timestamp = parent.Timestamp.Add(cfg.MinBlockInterval) - b.Witness.Height = parent.Witness.Height - b.Witness.Data = make([]byte, len(parent.Witness.Data)) - copy(b.Witness.Data, parent.Witness.Data) -} - -// startAgreement starts agreements for receiving votes and agreements. -func (con *Consensus) startAgreement() { - // Start a routine for listening receive channel and pull block channel. - go func() { - for { - select { - case b, ok := <-con.receiveChan: - if !ok { - return - } - func() { - con.lock.Lock() - defer con.lock.Unlock() - if len(con.blocks) > 0 && - !b.Position.Newer(con.blocks[0].Position) { - return - } - con.blocks = append(con.blocks, b) - sort.Sort(con.blocks) - }() - case h, ok := <-con.pullChan: - if !ok { - return - } - con.network.PullBlocks(common.Hashes{h}) - } - } - }() -} - -// startNetwork starts network for receiving blocks and agreement results. -func (con *Consensus) startNetwork() { - con.waitGroup.Add(1) - go func() { - defer con.waitGroup.Done() - loop: - for { - select { - case val := <-con.network.ReceiveChan(): - switch v := val.Payload.(type) { - case *types.Block: - case *types.AgreementResult: - // Avoid byzantine nodes attack by broadcasting older - // agreement results. Normal nodes might report 'synced' - // while still fall behind other nodes. - if v.Position.Height <= con.initChainTipHeight { - continue loop - } - default: - continue loop - } - con.agreementModule.inputChan <- val.Payload - case <-con.ctx.Done(): - break loop - } - } - }() -} - -func (con *Consensus) stopAgreement() { - if con.agreementModule.inputChan != nil { - close(con.agreementModule.inputChan) - } - con.agreementWaitGroup.Wait() - con.agreementModule.inputChan = nil - close(con.receiveChan) - close(con.pullChan) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/watch-cat.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/watch-cat.go deleted file mode 100644 index f2e197ebe..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/watch-cat.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package syncer - -import ( - "context" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -type configReader interface { - Configuration(round uint64) *types.Config -} - -// WatchCat is reponsible for signaling if syncer object should be terminated. -type WatchCat struct { - recovery core.Recovery - timeout time.Duration - configReader configReader - feed chan types.Position - lastPosition types.Position - polling time.Duration - ctx context.Context - cancel context.CancelFunc - logger common.Logger -} - -// NewWatchCat creats a new WatchCat 🱠object. -func NewWatchCat( - recovery core.Recovery, - configReader configReader, - polling time.Duration, - timeout time.Duration, - logger common.Logger) *WatchCat { - wc := &WatchCat{ - recovery: recovery, - timeout: timeout, - configReader: configReader, - feed: make(chan types.Position), - polling: polling, - logger: logger, - } - return wc -} - -// Feed the WatchCat so it won't produce the termination signal. -func (wc *WatchCat) Feed(position types.Position) { - wc.feed <- position -} - -// Start the WatchCat. -func (wc *WatchCat) Start() { - wc.Stop() - wc.lastPosition = types.Position{} - wc.ctx, wc.cancel = context.WithCancel(context.Background()) - go func() { - var lastPos types.Position - MonitorLoop: - for { - select { - case <-wc.ctx.Done(): - return - default: - } - select { - case <-wc.ctx.Done(): - return - case pos := <-wc.feed: - if !pos.Newer(lastPos) { - wc.logger.Warn("Feed with older height", - "pos", pos, "lastPos", lastPos) - continue - } - lastPos = pos - case <-time.After(wc.timeout): - break MonitorLoop - } - } - go func() { - for { - select { - case <-wc.ctx.Done(): - return - case <-wc.feed: - } - } - }() - defer wc.cancel() - proposed := false - threshold := uint64( - utils.GetConfigWithPanic(wc.configReader, lastPos.Round, wc.logger). - NotarySetSize / 2) - wc.logger.Info("Threshold for recovery", "votes", threshold) - ResetLoop: - for { - if !proposed { - wc.logger.Info("Calling Recovery.ProposeSkipBlock", - "height", lastPos.Height) - if err := wc.recovery.ProposeSkipBlock(lastPos.Height); err != nil { - wc.logger.Warn("Failed to proposeSkipBlock", "height", lastPos.Height, "error", err) - } else { - proposed = true - } - } - votes, err := wc.recovery.Votes(lastPos.Height) - if err != nil { - wc.logger.Error("Failed to get recovery votes", "height", lastPos.Height, "error", err) - } else if votes > threshold { - wc.logger.Info("Threshold for recovery reached!") - wc.lastPosition = lastPos - break ResetLoop - } - select { - case <-wc.ctx.Done(): - return - case <-time.After(wc.polling): - } - } - }() -} - -// Stop the WatchCat. -func (wc *WatchCat) Stop() { - if wc.cancel != nil { - wc.cancel() - } -} - -// Meow return a closed channel if syncer should be terminated. -func (wc *WatchCat) Meow() <-chan struct{} { - return wc.ctx.Done() -} - -// LastPosition returns the last position for recovery. -func (wc *WatchCat) LastPosition() types.Position { - return wc.lastPosition -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/ticker.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/ticker.go deleted file mode 100644 index 636fb8c49..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/ticker.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// TickerType is the type of ticker. -type TickerType int - -// TickerType enum. -const ( - TickerBA TickerType = iota - TickerDKG - TickerCRS -) - -// defaultTicker is a wrapper to implement ticker interface based on -// time.Ticker. -type defaultTicker struct { - ticker *time.Ticker - tickerChan chan time.Time - duration time.Duration - ctx context.Context - ctxCancel context.CancelFunc - waitGroup sync.WaitGroup -} - -// newDefaultTicker constructs an defaultTicker instance by giving an interval. -func newDefaultTicker(lambda time.Duration) *defaultTicker { - ticker := &defaultTicker{duration: lambda} - ticker.init() - return ticker -} - -// Tick implements Tick method of ticker interface. -func (t *defaultTicker) Tick() <-chan time.Time { - return t.tickerChan -} - -// Stop implements Stop method of ticker interface. -func (t *defaultTicker) Stop() { - t.ticker.Stop() - t.ctxCancel() - t.waitGroup.Wait() - t.ctx = nil - t.ctxCancel = nil - close(t.tickerChan) - t.tickerChan = nil -} - -// Restart implements Stop method of ticker interface. -func (t *defaultTicker) Restart() { - t.Stop() - t.init() -} - -func (t *defaultTicker) init() { - t.ticker = time.NewTicker(t.duration) - t.tickerChan = make(chan time.Time) - t.ctx, t.ctxCancel = context.WithCancel(context.Background()) - t.waitGroup.Add(1) - go t.monitor() -} - -func (t *defaultTicker) monitor() { - defer t.waitGroup.Done() -loop: - for { - select { - case <-t.ctx.Done(): - break loop - case v := <-t.ticker.C: - select { - case t.tickerChan <- v: - default: - } - } - } -} - -// newTicker is a helper to setup a ticker by giving an Governance. If -// the governace object implements a ticker generator, a ticker from that -// generator would be returned, else constructs a default one. -func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) { - type tickerGenerator interface { - NewTicker(TickerType) Ticker - } - - if gen, ok := gov.(tickerGenerator); ok { - t = gen.NewTicker(tickerType) - } - if t == nil { - var duration time.Duration - switch tickerType { - case TickerBA: - duration = utils.GetConfigWithPanic(gov, round, nil).LambdaBA - case TickerDKG: - duration = utils.GetConfigWithPanic(gov, round, nil).LambdaDKG - default: - panic(fmt.Errorf("unknown ticker type: %d", tickerType)) - } - t = newDefaultTicker(duration) - } - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go deleted file mode 100644 index 1c7454398..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "encoding/hex" - "fmt" - - "github.com/dexon-foundation/dexon-consensus/common" -) - -// AgreementResult describes an agremeent result. -type AgreementResult struct { - BlockHash common.Hash `json:"block_hash"` - Position Position `json:"position"` - Votes []Vote `json:"votes"` - IsEmptyBlock bool `json:"is_empty_block"` - Randomness []byte `json:"randomness"` -} - -func (r *AgreementResult) String() string { - if len(r.Randomness) == 0 { - return fmt.Sprintf("agreementResult{Block:%s Pos:%s}", - r.BlockHash.String()[:6], r.Position) - } - return fmt.Sprintf("agreementResult{Block:%s Pos:%s Rand:%s}", - r.BlockHash.String()[:6], r.Position, - hex.EncodeToString(r.Randomness)[:6]) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block.go deleted file mode 100644 index 1dcd41b9e..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -// TODO(jimmy-dexon): remove comments of WitnessAck before open source. - -package types - -import ( - "bytes" - "fmt" - "io" - "time" - - "github.com/dexon-foundation/dexon/rlp" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -// GenesisHeight refers to the initial height the genesis block should be. -const GenesisHeight uint64 = 1 - -// BlockVerifyStatus is the return code for core.Application.VerifyBlock -type BlockVerifyStatus int - -// Enums for return value of core.Application.VerifyBlock. -const ( - // VerifyOK: Block is verified. - VerifyOK BlockVerifyStatus = iota - // VerifyRetryLater: Block is unable to be verified at this moment. - // Try again later. - VerifyRetryLater - // VerifyInvalidBlock: Block is an invalid one. - VerifyInvalidBlock -) - -type rlpTimestamp struct { - time.Time -} - -func (t *rlpTimestamp) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, uint64(t.UTC().UnixNano())) -} - -func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error { - var nano uint64 - err := s.Decode(&nano) - if err == nil { - sec := int64(nano) / 1000000000 - nsec := int64(nano) % 1000000000 - t.Time = time.Unix(sec, nsec).UTC() - } - return err -} - -// Witness represents the consensus information on the compaction chain. -type Witness struct { - Height uint64 `json:"height"` - Data []byte `json:"data"` -} - -// Block represents a single event broadcasted on the network. -type Block struct { - ProposerID NodeID `json:"proposer_id"` - ParentHash common.Hash `json:"parent_hash"` - Hash common.Hash `json:"hash"` - Position Position `json:"position"` - Timestamp time.Time `json:"timestamp"` - Payload []byte `json:"payload"` - PayloadHash common.Hash `json:"payload_hash"` - Witness Witness `json:"witness"` - Randomness []byte `json:"randomness"` - Signature crypto.Signature `json:"signature"` - - CRSSignature crypto.Signature `json:"crs_signature"` -} - -type rlpBlock struct { - ProposerID NodeID - ParentHash common.Hash - Hash common.Hash - Position Position - Timestamp *rlpTimestamp - Payload []byte - PayloadHash common.Hash - Witness *Witness - Randomness []byte - Signature crypto.Signature - - CRSSignature crypto.Signature -} - -// EncodeRLP implements rlp.Encoder -func (b *Block) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, rlpBlock{ - ProposerID: b.ProposerID, - ParentHash: b.ParentHash, - Hash: b.Hash, - Position: b.Position, - Timestamp: &rlpTimestamp{b.Timestamp}, - Payload: b.Payload, - PayloadHash: b.PayloadHash, - Witness: &b.Witness, - Randomness: b.Randomness, - Signature: b.Signature, - CRSSignature: b.CRSSignature, - }) -} - -// DecodeRLP implements rlp.Decoder -func (b *Block) DecodeRLP(s *rlp.Stream) error { - var dec rlpBlock - err := s.Decode(&dec) - if err == nil { - *b = Block{ - ProposerID: dec.ProposerID, - ParentHash: dec.ParentHash, - Hash: dec.Hash, - Position: dec.Position, - Timestamp: dec.Timestamp.Time, - Payload: dec.Payload, - PayloadHash: dec.PayloadHash, - Witness: *dec.Witness, - Randomness: dec.Randomness, - Signature: dec.Signature, - CRSSignature: dec.CRSSignature, - } - } - return err -} - -func (b *Block) String() string { - return fmt.Sprintf("Block{Hash:%v %s}", b.Hash.String()[:6], b.Position) -} - -// Clone returns a deep copy of a block. -func (b *Block) Clone() (bcopy *Block) { - bcopy = &Block{} - bcopy.ProposerID = b.ProposerID - bcopy.ParentHash = b.ParentHash - bcopy.Hash = b.Hash - bcopy.Position.Round = b.Position.Round - bcopy.Position.Height = b.Position.Height - bcopy.Signature = b.Signature.Clone() - bcopy.CRSSignature = b.CRSSignature.Clone() - bcopy.Witness.Height = b.Witness.Height - bcopy.Witness.Data = common.CopyBytes(b.Witness.Data) - bcopy.Timestamp = b.Timestamp - bcopy.Payload = common.CopyBytes(b.Payload) - bcopy.PayloadHash = b.PayloadHash - bcopy.Randomness = common.CopyBytes(b.Randomness) - return -} - -// IsGenesis checks if the block is a genesisBlock -func (b *Block) IsGenesis() bool { - return b.Position.Height == GenesisHeight && b.ParentHash == common.Hash{} -} - -// IsFinalized checks if the block is finalized. -func (b *Block) IsFinalized() bool { - return len(b.Randomness) > 0 -} - -// IsEmpty checks if the block is an 'empty block'. -func (b *Block) IsEmpty() bool { - return b.ProposerID.Hash == common.Hash{} -} - -// ByHash is the helper type for sorting slice of blocks by hash. -type ByHash []*Block - -func (b ByHash) Len() int { - return len(b) -} - -func (b ByHash) Less(i int, j int) bool { - return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1 -} - -func (b ByHash) Swap(i int, j int) { - b[i], b[j] = b[j], b[i] -} - -// BlocksByPosition is the helper type for sorting slice of blocks by position. -type BlocksByPosition []*Block - -// Len implements Len method in sort.Sort interface. -func (bs BlocksByPosition) Len() int { - return len(bs) -} - -// Less implements Less method in sort.Sort interface. -func (bs BlocksByPosition) Less(i int, j int) bool { - return bs[j].Position.Newer(bs[i].Position) -} - -// Swap implements Swap method in sort.Sort interface. -func (bs BlocksByPosition) Swap(i int, j int) { - bs[i], bs[j] = bs[j], bs[i] -} - -// Push implements Push method in heap interface. -func (bs *BlocksByPosition) Push(x interface{}) { - *bs = append(*bs, x.(*Block)) -} - -// Pop implements Pop method in heap interface. -func (bs *BlocksByPosition) Pop() (ret interface{}) { - n := len(*bs) - *bs, ret = (*bs)[0:n-1], (*bs)[n-1] - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go deleted file mode 100644 index dce38369e..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "encoding/binary" - "time" -) - -// Config stands for Current Configuration Parameters. -type Config struct { - // Lambda related. - LambdaBA time.Duration - LambdaDKG time.Duration - - // Set related. - NotarySetSize uint32 - - // Time related. - RoundLength uint64 - MinBlockInterval time.Duration -} - -// Clone return a copied configuration. -func (c *Config) Clone() *Config { - return &Config{ - LambdaBA: c.LambdaBA, - LambdaDKG: c.LambdaDKG, - NotarySetSize: c.NotarySetSize, - RoundLength: c.RoundLength, - MinBlockInterval: c.MinBlockInterval, - } -} - -// Bytes returns []byte representation of Config. -func (c *Config) Bytes() []byte { - binaryLambdaBA := make([]byte, 8) - binary.LittleEndian.PutUint64( - binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds())) - binaryLambdaDKG := make([]byte, 8) - binary.LittleEndian.PutUint64( - binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds())) - - binaryNotarySetSize := make([]byte, 4) - binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize) - - binaryRoundLength := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRoundLength, c.RoundLength) - binaryMinBlockInterval := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryMinBlockInterval, - uint64(c.MinBlockInterval.Nanoseconds())) - - enc := make([]byte, 0, 40) - enc = append(enc, binaryLambdaBA...) - enc = append(enc, binaryLambdaDKG...) - enc = append(enc, binaryNotarySetSize...) - enc = append(enc, binaryRoundLength...) - enc = append(enc, binaryMinBlockInterval...) - return enc -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/dkg/dkg.go deleted file mode 100644 index cb921e586..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/dkg/dkg.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package dkg - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - - "github.com/dexon-foundation/dexon/rlp" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -// Errors for typesDKG package. -var ( - ErrNotReachThreshold = fmt.Errorf("threshold not reach") - ErrInvalidThreshold = fmt.Errorf("invalid threshold") -) - -// NewID creates a DKGID from NodeID. -func NewID(ID types.NodeID) cryptoDKG.ID { - return cryptoDKG.NewID(ID.Hash[:]) -} - -// PrivateShare describe a secret share in DKG protocol. -type PrivateShare struct { - ProposerID types.NodeID `json:"proposer_id"` - ReceiverID types.NodeID `json:"receiver_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - PrivateShare cryptoDKG.PrivateKey `json:"private_share"` - Signature crypto.Signature `json:"signature"` -} - -// Equal checks equality between two PrivateShare instances. -func (p *PrivateShare) Equal(other *PrivateShare) bool { - return p.ProposerID.Equal(other.ProposerID) && - p.ReceiverID.Equal(other.ReceiverID) && - p.Round == other.Round && - p.Reset == other.Reset && - p.Signature.Type == other.Signature.Type && - bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 && - bytes.Compare( - p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0 -} - -// MasterPublicKey decrtibe a master public key in DKG protocol. -type MasterPublicKey struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - DKGID cryptoDKG.ID `json:"dkg_id"` - PublicKeyShares cryptoDKG.PublicKeyShares `json:"public_key_shares"` - Signature crypto.Signature `json:"signature"` -} - -func (d *MasterPublicKey) String() string { - return fmt.Sprintf("MasterPublicKey{KP:%s Round:%d Reset:%d}", - d.ProposerID.String()[:6], - d.Round, - d.Reset) -} - -// Equal check equality of two DKG master public keys. -func (d *MasterPublicKey) Equal(other *MasterPublicKey) bool { - return d.ProposerID.Equal(other.ProposerID) && - d.Round == other.Round && - d.Reset == other.Reset && - d.DKGID.GetHexString() == other.DKGID.GetHexString() && - d.PublicKeyShares.Equal(&other.PublicKeyShares) && - d.Signature.Type == other.Signature.Type && - bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0 -} - -type rlpMasterPublicKey struct { - ProposerID types.NodeID - Round uint64 - Reset uint64 - DKGID []byte - PublicKeyShares *cryptoDKG.PublicKeyShares - Signature crypto.Signature -} - -// EncodeRLP implements rlp.Encoder -func (d *MasterPublicKey) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, rlpMasterPublicKey{ - ProposerID: d.ProposerID, - Round: d.Round, - Reset: d.Reset, - DKGID: d.DKGID.GetLittleEndian(), - PublicKeyShares: &d.PublicKeyShares, - Signature: d.Signature, - }) -} - -// DecodeRLP implements rlp.Decoder -func (d *MasterPublicKey) DecodeRLP(s *rlp.Stream) error { - var dec rlpMasterPublicKey - if err := s.Decode(&dec); err != nil { - return err - } - - id, err := cryptoDKG.BytesID(dec.DKGID) - if err != nil { - return err - } - - *d = MasterPublicKey{ - ProposerID: dec.ProposerID, - Round: dec.Round, - Reset: dec.Reset, - DKGID: id, - PublicKeyShares: *dec.PublicKeyShares.Move(), - Signature: dec.Signature, - } - return err -} - -// NewMasterPublicKey returns a new MasterPublicKey instance. -func NewMasterPublicKey() *MasterPublicKey { - return &MasterPublicKey{ - PublicKeyShares: *cryptoDKG.NewEmptyPublicKeyShares(), - } -} - -// UnmarshalJSON implements json.Unmarshaller. -func (d *MasterPublicKey) UnmarshalJSON(data []byte) error { - type innertMasterPublicKey MasterPublicKey - d.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares() - return json.Unmarshal(data, (*innertMasterPublicKey)(d)) -} - -// Complaint describe a complaint in DKG protocol. -type Complaint struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - PrivateShare PrivateShare `json:"private_share"` - Signature crypto.Signature `json:"signature"` -} - -func (c *Complaint) String() string { - if c.IsNack() { - return fmt.Sprintf("DKGNackComplaint{CP:%s Round:%d Reset %d PSP:%s}", - c.ProposerID.String()[:6], c.Round, c.Reset, - c.PrivateShare.ProposerID.String()[:6]) - } - return fmt.Sprintf("DKGComplaint{CP:%s Round:%d Reset %d PrivateShare:%v}", - c.ProposerID.String()[:6], c.Round, c.Reset, c.PrivateShare) -} - -// Equal checks equality between two Complaint instances. -func (c *Complaint) Equal(other *Complaint) bool { - return c.ProposerID.Equal(other.ProposerID) && - c.Round == other.Round && - c.Reset == other.Reset && - c.PrivateShare.Equal(&other.PrivateShare) && - c.Signature.Type == other.Signature.Type && - bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0 -} - -type rlpComplaint struct { - ProposerID types.NodeID - Round uint64 - Reset uint64 - IsNack bool - PrivateShare []byte - Signature crypto.Signature -} - -// EncodeRLP implements rlp.Encoder -func (c *Complaint) EncodeRLP(w io.Writer) error { - if c.IsNack() { - return rlp.Encode(w, rlpComplaint{ - ProposerID: c.ProposerID, - Round: c.Round, - Reset: c.Reset, - IsNack: true, - PrivateShare: c.PrivateShare.ProposerID.Hash[:], - Signature: c.Signature, - }) - } - prvShare, err := rlp.EncodeToBytes(&c.PrivateShare) - if err != nil { - return err - } - return rlp.Encode(w, rlpComplaint{ - ProposerID: c.ProposerID, - Round: c.Round, - Reset: c.Reset, - IsNack: false, - PrivateShare: prvShare, - Signature: c.Signature, - }) -} - -// DecodeRLP implements rlp.Decoder -func (c *Complaint) DecodeRLP(s *rlp.Stream) error { - var dec rlpComplaint - if err := s.Decode(&dec); err != nil { - return err - } - - var prvShare PrivateShare - if dec.IsNack { - copy(prvShare.ProposerID.Hash[:], dec.PrivateShare) - prvShare.Round = dec.Round - prvShare.Reset = dec.Reset - } else { - if err := rlp.DecodeBytes(dec.PrivateShare, &prvShare); err != nil { - return err - } - } - - *c = Complaint{ - ProposerID: dec.ProposerID, - Round: dec.Round, - Reset: dec.Reset, - PrivateShare: prvShare, - Signature: dec.Signature, - } - return nil -} - -// IsNack returns true if it's a nack complaint in DKG protocol. -func (c *Complaint) IsNack() bool { - return len(c.PrivateShare.Signature.Signature) == 0 -} - -// PartialSignature describe a partial signature in DKG protocol. -type PartialSignature struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Hash common.Hash `json:"hash"` - PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` - Signature crypto.Signature `json:"signature"` -} - -// MPKReady describe a dkg ready message in DKG protocol. -type MPKReady struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - Signature crypto.Signature `json:"signature"` -} - -func (ready *MPKReady) String() string { - return fmt.Sprintf("DKGMPKReady{RP:%s Round:%d Reset:%d}", - ready.ProposerID.String()[:6], - ready.Round, - ready.Reset) -} - -// Equal check equality of two MPKReady instances. -func (ready *MPKReady) Equal(other *MPKReady) bool { - return ready.ProposerID.Equal(other.ProposerID) && - ready.Round == other.Round && - ready.Reset == other.Reset && - ready.Signature.Type == other.Signature.Type && - bytes.Compare(ready.Signature.Signature, other.Signature.Signature) == 0 -} - -// Finalize describe a dkg finalize message in DKG protocol. -type Finalize struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - Signature crypto.Signature `json:"signature"` -} - -func (final *Finalize) String() string { - return fmt.Sprintf("DKGFinal{FP:%s Round:%d Reset:%d}", - final.ProposerID.String()[:6], - final.Round, - final.Reset) -} - -// Equal check equality of two Finalize instances. -func (final *Finalize) Equal(other *Finalize) bool { - return final.ProposerID.Equal(other.ProposerID) && - final.Round == other.Round && - final.Reset == other.Reset && - final.Signature.Type == other.Signature.Type && - bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0 -} - -// Success describe a dkg success message in DKG protocol. -type Success struct { - ProposerID types.NodeID `json:"proposer_id"` - Round uint64 `json:"round"` - Reset uint64 `json:"reset"` - Signature crypto.Signature `json:"signature"` -} - -func (s *Success) String() string { - return fmt.Sprintf("DKGSuccess{SP:%s Round:%d Reset:%d}", - s.ProposerID.String()[:6], - s.Round, - s.Reset) -} - -// Equal check equality of two Success instances. -func (s *Success) Equal(other *Success) bool { - return s.ProposerID.Equal(other.ProposerID) && - s.Round == other.Round && - s.Reset == other.Reset && - s.Signature.Type == other.Signature.Type && - bytes.Compare(s.Signature.Signature, other.Signature.Signature) == 0 -} - -// GroupPublicKey is the result of DKG protocol. -type GroupPublicKey struct { - Round uint64 - QualifyIDs cryptoDKG.IDs - QualifyNodeIDs map[types.NodeID]struct{} - IDMap map[types.NodeID]cryptoDKG.ID - GroupPublicKey *cryptoDKG.PublicKey - Threshold int -} - -// VerifySignature verifies if the signature is correct. -func (gpk *GroupPublicKey) VerifySignature( - hash common.Hash, sig crypto.Signature) bool { - return gpk.GroupPublicKey.VerifySignature(hash, sig) -} - -// CalcQualifyNodes returns the qualified nodes. -func CalcQualifyNodes( - mpks []*MasterPublicKey, complaints []*Complaint, threshold int) ( - qualifyIDs cryptoDKG.IDs, qualifyNodeIDs map[types.NodeID]struct{}, err error) { - if len(mpks) < threshold { - err = ErrInvalidThreshold - return - } - - // Calculate qualify members. - disqualifyIDs := map[types.NodeID]struct{}{} - complaintsByID := map[types.NodeID]map[types.NodeID]struct{}{} - for _, complaint := range complaints { - if complaint.IsNack() { - if _, exist := complaintsByID[complaint.PrivateShare.ProposerID]; !exist { - complaintsByID[complaint.PrivateShare.ProposerID] = - make(map[types.NodeID]struct{}) - } - complaintsByID[complaint.PrivateShare.ProposerID][complaint.ProposerID] = - struct{}{} - } else { - disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{} - } - } - for nID, complaints := range complaintsByID { - if len(complaints) >= threshold { - disqualifyIDs[nID] = struct{}{} - } - } - qualifyIDs = make(cryptoDKG.IDs, 0, len(mpks)-len(disqualifyIDs)) - if cap(qualifyIDs) < threshold { - err = ErrNotReachThreshold - return - } - qualifyNodeIDs = make(map[types.NodeID]struct{}) - for _, mpk := range mpks { - if _, exist := disqualifyIDs[mpk.ProposerID]; exist { - continue - } - qualifyIDs = append(qualifyIDs, mpk.DKGID) - qualifyNodeIDs[mpk.ProposerID] = struct{}{} - } - return -} - -// NewGroupPublicKey creats a GroupPublicKey instance. -func NewGroupPublicKey( - round uint64, - mpks []*MasterPublicKey, complaints []*Complaint, - threshold int) ( - *GroupPublicKey, error) { - qualifyIDs, qualifyNodeIDs, err := - CalcQualifyNodes(mpks, complaints, threshold) - if err != nil { - return nil, err - } - mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) - idMap := make(map[types.NodeID]cryptoDKG.ID) - for _, mpk := range mpks { - if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { - continue - } - mpkMap[mpk.DKGID] = mpk - idMap[mpk.ProposerID] = mpk.DKGID - } - // Recover Group Public Key. - pubShares := make([]*cryptoDKG.PublicKeyShares, 0, len(qualifyIDs)) - for _, id := range qualifyIDs { - pubShares = append(pubShares, &mpkMap[id].PublicKeyShares) - } - groupPK := cryptoDKG.RecoverGroupPublicKey(pubShares) - return &GroupPublicKey{ - Round: round, - QualifyIDs: qualifyIDs, - QualifyNodeIDs: qualifyNodeIDs, - IDMap: idMap, - Threshold: threshold, - GroupPublicKey: groupPK, - }, nil -} - -// NodePublicKeys is the result of DKG protocol. -type NodePublicKeys struct { - Round uint64 - QualifyIDs cryptoDKG.IDs - QualifyNodeIDs map[types.NodeID]struct{} - IDMap map[types.NodeID]cryptoDKG.ID - PublicKeys map[types.NodeID]*cryptoDKG.PublicKey - Threshold int -} - -// NewNodePublicKeys creats a NodePublicKeys instance. -func NewNodePublicKeys( - round uint64, - mpks []*MasterPublicKey, complaints []*Complaint, - threshold int) ( - *NodePublicKeys, error) { - qualifyIDs, qualifyNodeIDs, err := - CalcQualifyNodes(mpks, complaints, threshold) - if err != nil { - return nil, err - } - mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) - idMap := make(map[types.NodeID]cryptoDKG.ID) - for _, mpk := range mpks { - if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { - continue - } - mpkMap[mpk.DKGID] = mpk - idMap[mpk.ProposerID] = mpk.DKGID - } - // Recover qualify members' public key. - pubKeys := make(map[types.NodeID]*cryptoDKG.PublicKey, len(qualifyIDs)) - for _, recvID := range qualifyIDs { - pubShares := cryptoDKG.NewEmptyPublicKeyShares() - for _, id := range qualifyIDs { - pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID) - if err != nil { - return nil, err - } - if err := pubShares.AddShare(id, pubShare); err != nil { - return nil, err - } - } - pubKey, err := pubShares.RecoverPublicKey(qualifyIDs) - if err != nil { - return nil, err - } - pubKeys[mpkMap[recvID].ProposerID] = pubKey - } - return &NodePublicKeys{ - Round: round, - QualifyIDs: qualifyIDs, - QualifyNodeIDs: qualifyNodeIDs, - IDMap: idMap, - PublicKeys: pubKeys, - Threshold: threshold, - }, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/message.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/message.go deleted file mode 100644 index 0335cfaae..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/message.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -// Msg for the network ReceiveChan. -type Msg struct { - PeerID interface{} - Payload interface{} -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/node.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/node.go deleted file mode 100644 index 18b6831e0..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/node.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "bytes" - "encoding/hex" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -// NodeID is the ID type for nodes. -type NodeID struct { - common.Hash -} - -// NewNodeID returns a NodeID with Hash set to the hash value of -// public key. -func NewNodeID(pubKey crypto.PublicKey) NodeID { - return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])} -} - -// Equal checks if the hash representation is the same NodeID. -func (v NodeID) Equal(v2 NodeID) bool { - return v.Hash == v2.Hash -} - -func (v NodeID) String() string { - return hex.EncodeToString(v.Hash[:])[:6] -} - -// NodeIDs implements sort.Interface for NodeID. -type NodeIDs []NodeID - -func (v NodeIDs) Len() int { - return len(v) -} - -func (v NodeIDs) Less(i int, j int) bool { - return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1 -} - -func (v NodeIDs) Swap(i int, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go deleted file mode 100644 index 806000763..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "container/heap" - "encoding/binary" - "math/big" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" -) - -// NodeSet is the node set structure as defined in DEXON consensus core. -type NodeSet struct { - IDs map[NodeID]struct{} -} - -// SubSetTarget is the sub set target for GetSubSet(). -type SubSetTarget struct { - data [][]byte -} - -type subSetTargetType byte - -const ( - targetNotarySet subSetTargetType = iota - targetNodeLeader -) - -type nodeRank struct { - ID NodeID - rank *big.Int -} - -// rankHeap is a MaxHeap structure. -type rankHeap []*nodeRank - -func (h rankHeap) Len() int { return len(h) } -func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 } -func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *rankHeap) Push(x interface{}) { - *h = append(*h, x.(*nodeRank)) -} -func (h *rankHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// NewNodeSet creates a new NodeSet instance. -func NewNodeSet() *NodeSet { - return &NodeSet{ - IDs: make(map[NodeID]struct{}), - } -} - -// NewNodeSetFromMap creates a new NodeSet from NodeID map. -func NewNodeSetFromMap(nodes map[NodeID]struct{}) *NodeSet { - nIDs := make(map[NodeID]struct{}, len(nodes)) - for nID := range nodes { - nIDs[nID] = struct{}{} - } - return &NodeSet{ - IDs: nIDs, - } -} - -// NewNotarySetTarget is the target for getting Notary Set. -func NewNotarySetTarget(crs common.Hash) *SubSetTarget { - return newTarget(targetNotarySet, crs[:]) -} - -// NewNodeLeaderTarget is the target for getting leader of fast BA. -func NewNodeLeaderTarget(crs common.Hash, height uint64) *SubSetTarget { - binaryHeight := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryHeight, height) - return newTarget(targetNodeLeader, crs[:], binaryHeight) -} - -// Add a NodeID to the set. -func (ns *NodeSet) Add(ID NodeID) { - ns.IDs[ID] = struct{}{} -} - -// Clone the NodeSet. -func (ns *NodeSet) Clone() *NodeSet { - nsCopy := NewNodeSet() - for ID := range ns.IDs { - nsCopy.Add(ID) - } - return nsCopy -} - -// GetSubSet returns the subset of given target. -func (ns *NodeSet) GetSubSet( - size int, target *SubSetTarget) map[NodeID]struct{} { - if size == 0 { - return make(map[NodeID]struct{}) - } - h := rankHeap{} - idx := 0 - for nID := range ns.IDs { - if idx < size { - h = append(h, newNodeRank(nID, target)) - } else if idx == size { - heap.Init(&h) - } - if idx >= size { - rank := newNodeRank(nID, target) - if rank.rank.Cmp(h[0].rank) < 0 { - h[0] = rank - heap.Fix(&h, 0) - } - } - idx++ - } - - nIDs := make(map[NodeID]struct{}, size) - for _, rank := range h { - nIDs[rank.ID] = struct{}{} - } - - return nIDs -} - -func newTarget(targetType subSetTargetType, data ...[]byte) *SubSetTarget { - data = append(data, []byte{byte(targetType)}) - return &SubSetTarget{ - data: data, - } -} - -func newNodeRank(ID NodeID, target *SubSetTarget) *nodeRank { - data := make([][]byte, 1, len(target.data)+1) - data[0] = make([]byte, len(ID.Hash)) - copy(data[0], ID.Hash[:]) - data = append(data, target.data...) - h := crypto.Keccak256Hash(data...) - num := new(big.Int).SetBytes(h[:]) - return &nodeRank{ - ID: ID, - rank: num, - } -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/position.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/position.go deleted file mode 100644 index 81d23c266..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/position.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "fmt" -) - -// Position describes the position in the block lattice of an entity. -type Position struct { - Round uint64 `json:"round"` - Height uint64 `json:"height"` -} - -func (pos Position) String() string { - return fmt.Sprintf("Position{Round:%d Height:%d}", pos.Round, pos.Height) -} - -// Equal checks if two positions are equal. -func (pos Position) Equal(other Position) bool { - return pos.Round == other.Round && pos.Height == other.Height -} - -// Newer checks if one block is newer than another one on the same chain. -// If two blocks on different chain compared by this function, it would panic. -func (pos Position) Newer(other Position) bool { - return pos.Round > other.Round || - (pos.Round == other.Round && pos.Height > other.Height) -} - -// Older checks if one block is older than another one on the same chain. -// If two blocks on different chain compared by this function, it would panic. -func (pos Position) Older(other Position) bool { - return pos.Round < other.Round || - (pos.Round == other.Round && pos.Height < other.Height) -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go deleted file mode 100644 index 8bc0c78c2..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package types - -import ( - "fmt" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" -) - -// VoteType is the type of vote. -type VoteType byte - -// VoteType enum. -const ( - VoteInit VoteType = iota - VotePreCom - VoteCom - VoteFast - VoteFastCom - // Do not add any type below MaxVoteType. - MaxVoteType -) - -// NullBlockHash is the blockHash for ⊥ value. -var NullBlockHash common.Hash - -// SkipBlockHash is the blockHash for SKIP value. -var SkipBlockHash common.Hash - -func init() { - for idx := range SkipBlockHash { - SkipBlockHash[idx] = 0xff - } -} - -// VoteHeader is the header for vote, which can be used as map keys. -type VoteHeader struct { - ProposerID NodeID `json:"proposer_id"` - Type VoteType `json:"type"` - BlockHash common.Hash `json:"block_hash"` - Period uint64 `json:"period"` - Position Position `json:"position"` -} - -// Vote is the vote structure defined in Crypto Shuffle Algorithm. -type Vote struct { - VoteHeader `json:"header"` - PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` - Signature crypto.Signature `json:"signature"` -} - -func (v *Vote) String() string { - return fmt.Sprintf("Vote{VP:%s %s Period:%d Type:%d Hash:%s}", - v.ProposerID.String()[:6], - v.Position, v.Period, v.Type, v.BlockHash.String()[:6]) -} - -// NewVote constructs a Vote instance with header fields. -func NewVote(t VoteType, hash common.Hash, period uint64) *Vote { - return &Vote{ - VoteHeader: VoteHeader{ - Type: t, - BlockHash: hash, - Period: period, - }} -} - -// Clone returns a deep copy of a vote. -func (v *Vote) Clone() *Vote { - return &Vote{ - VoteHeader: VoteHeader{ - ProposerID: v.ProposerID, - Type: v.Type, - BlockHash: v.BlockHash, - Period: v.Period, - Position: v.Position, - }, - PartialSignature: cryptoDKG.PartialSignature( - crypto.Signature(v.PartialSignature).Clone()), - Signature: v.Signature.Clone(), - } -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go deleted file mode 100644 index c4d7b0fc3..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package core - -import ( - "context" - "errors" - "fmt" - "os" - "sort" - "time" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - "github.com/dexon-foundation/dexon-consensus/core/utils" -) - -// Errors for utils. -var ( - ErrIncorrectVoteBlockHash = fmt.Errorf( - "incorrect vote block hash") - ErrIncorrectVoteType = fmt.Errorf( - "incorrect vote type") - ErrIncorrectVotePosition = fmt.Errorf( - "incorrect vote position") - ErrIncorrectVoteProposer = fmt.Errorf( - "incorrect vote proposer") - ErrIncorrectVotePeriod = fmt.Errorf( - "incorrect vote period") -) - -// NodeSetCache is type alias to avoid fullnode compile error when moving -// it to core/utils package. -type NodeSetCache = utils.NodeSetCache - -// NewNodeSetCache is function alias to avoid fullnode compile error when moving -// it to core/utils package. -var NewNodeSetCache = utils.NewNodeSetCache - -var ( - debug = false - // ErrEmptyTimestamps would be reported if Block.timestamps is empty. - ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") -) - -func init() { - if os.Getenv("DEBUG") != "" { - debug = true - } -} - -// Debugf is like fmt.Printf, but only output when we are in debug mode. -func Debugf(format string, args ...interface{}) { - if debug { - fmt.Printf(format, args...) - } -} - -// Debugln is like fmt.Println, but only output when we are in debug mode. -func Debugln(args ...interface{}) { - if debug { - fmt.Println(args...) - } -} - -func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { - if sep == 0 { - return []time.Time{} - } - if t1.After(t2) { - return interpoTime(t2, t1, sep) - } - timestamps := make([]time.Time, sep) - duration := t2.Sub(t1) - period := time.Duration( - (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond - prevTime := t1 - for idx := range timestamps { - prevTime = prevTime.Add(period) - timestamps[idx] = prevTime - } - return timestamps -} - -func getMedianTime(timestamps []time.Time) (t time.Time, err error) { - if len(timestamps) == 0 { - err = ErrEmptyTimestamps - return - } - tscopy := make([]time.Time, 0, len(timestamps)) - for _, ts := range timestamps { - tscopy = append(tscopy, ts) - } - sort.Sort(common.ByTime(tscopy)) - if len(tscopy)%2 == 0 { - t1 := tscopy[len(tscopy)/2-1] - t2 := tscopy[len(tscopy)/2] - t = interpoTime(t1, t2, 1)[0] - } else { - t = tscopy[len(tscopy)/2] - } - return -} - -func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 { - indexToRemove := sort.Search(len(xs), func(idx int) bool { - return xs[idx] >= x - }) - if indexToRemove == len(xs) || xs[indexToRemove] != x { - // This value is not found. - return xs - } - return append(xs[:indexToRemove], xs[indexToRemove+1:]...) -} - -// HashConfigurationBlock returns the hash value of configuration block. -func HashConfigurationBlock( - notarySet map[types.NodeID]struct{}, - config *types.Config, - snapshotHash common.Hash, - prevHash common.Hash, -) common.Hash { - notaryIDs := make(types.NodeIDs, 0, len(notarySet)) - for nID := range notarySet { - notaryIDs = append(notaryIDs, nID) - } - sort.Sort(notaryIDs) - notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{})) - for _, nID := range notaryIDs { - notarySetBytes = append(notarySetBytes, nID.Hash[:]...) - } - configBytes := config.Bytes() - - return crypto.Keccak256Hash( - notarySetBytes[:], - configBytes[:], - snapshotHash[:], - prevHash[:], - ) -} - -// VerifyAgreementResult perform sanity check against a types.AgreementResult -// instance. -func VerifyAgreementResult( - res *types.AgreementResult, cache *NodeSetCache) error { - if res.Position.Round >= DKGDelayRound { - if len(res.Randomness) == 0 { - return ErrMissingRandomness - } - return nil - } - notarySet, err := cache.GetNotarySet(res.Position.Round) - if err != nil { - return err - } - if len(res.Votes) < len(notarySet)*2/3+1 { - return ErrNotEnoughVotes - } - voted := make(map[types.NodeID]struct{}, len(notarySet)) - voteType := res.Votes[0].Type - votePeriod := res.Votes[0].Period - if voteType != types.VoteFastCom && voteType != types.VoteCom { - return ErrIncorrectVoteType - } - for _, vote := range res.Votes { - if vote.Period != votePeriod { - return ErrIncorrectVotePeriod - } - if res.IsEmptyBlock { - if (vote.BlockHash != common.Hash{}) { - return ErrIncorrectVoteBlockHash - } - } else { - if vote.BlockHash != res.BlockHash { - return ErrIncorrectVoteBlockHash - } - } - if vote.Type != voteType { - return ErrIncorrectVoteType - } - if vote.Position != res.Position { - return ErrIncorrectVotePosition - } - if _, exist := notarySet[vote.ProposerID]; !exist { - return ErrIncorrectVoteProposer - } - ok, err := utils.VerifyVoteSignature(&vote) - if err != nil { - return err - } - if !ok { - return ErrIncorrectVoteSignature - } - voted[vote.ProposerID] = struct{}{} - } - if len(voted) < len(notarySet)*2/3+1 { - return ErrNotEnoughVotes - } - return nil -} - -// DiffUint64 calculates difference between two uint64. -func DiffUint64(a, b uint64) uint64 { - if a > b { - return a - b - } - return b - a -} - -func isCI() bool { - return os.Getenv("CI") != "" -} - -func isCircleCI() bool { - return isCI() && os.Getenv("CIRCLECI") == "true" -} - -func isTravisCI() bool { - return isCI() && os.Getenv("TRAVIS") == "true" -} - -// checkWithCancel is a helper to perform periodic checking with cancel. -func checkWithCancel(parentCtx context.Context, interval time.Duration, - checker func() bool) (ret bool) { - ctx, cancel := context.WithCancel(parentCtx) - defer cancel() -Loop: - for { - if ret = checker(); ret { - return - } - select { - case <-ctx.Done(): - break Loop - case <-time.After(interval): - } - } - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go deleted file mode 100644 index 42ee6122e..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "bytes" - "encoding/binary" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -func hashWitness(witness *types.Witness) (common.Hash, error) { - binaryHeight := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryHeight, witness.Height) - return crypto.Keccak256Hash( - binaryHeight, - witness.Data), nil -} - -// HashBlock generates hash of a types.Block. -func HashBlock(block *types.Block) (common.Hash, error) { - hashPosition := HashPosition(block.Position) - binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary() - if err != nil { - return common.Hash{}, err - } - binaryWitness, err := hashWitness(&block.Witness) - if err != nil { - return common.Hash{}, err - } - - hash := crypto.Keccak256Hash( - block.ProposerID.Hash[:], - block.ParentHash[:], - hashPosition[:], - binaryTimestamp[:], - block.PayloadHash[:], - binaryWitness[:]) - return hash, nil -} - -// VerifyBlockSignature verifies the signature of types.Block. -func VerifyBlockSignature(b *types.Block) (err error) { - payloadHash := crypto.Keccak256Hash(b.Payload) - if payloadHash != b.PayloadHash { - err = ErrIncorrectHash - return - } - return VerifyBlockSignatureWithoutPayload(b) -} - -// VerifyBlockSignatureWithoutPayload verifies the signature of types.Block but -// does not check if PayloadHash is correct. -func VerifyBlockSignatureWithoutPayload(b *types.Block) (err error) { - hash, err := HashBlock(b) - if err != nil { - return - } - if hash != b.Hash { - err = ErrIncorrectHash - return - } - pubKey, err := crypto.SigToPub(b.Hash, b.Signature) - if err != nil { - return - } - if !b.ProposerID.Equal(types.NewNodeID(pubKey)) { - err = ErrIncorrectSignature - return - } - return - -} - -// HashVote generates hash of a types.Vote. -func HashVote(vote *types.Vote) common.Hash { - binaryPeriod := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryPeriod, vote.Period) - - hashPosition := HashPosition(vote.Position) - - hash := crypto.Keccak256Hash( - vote.ProposerID.Hash[:], - vote.BlockHash[:], - binaryPeriod, - hashPosition[:], - vote.PartialSignature.Signature[:], - []byte{byte(vote.Type)}, - ) - return hash -} - -// VerifyVoteSignature verifies the signature of types.Vote. -func VerifyVoteSignature(vote *types.Vote) (bool, error) { - hash := HashVote(vote) - pubKey, err := crypto.SigToPub(hash, vote.Signature) - if err != nil { - return false, err - } - if vote.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -func hashCRS(block *types.Block, crs common.Hash) common.Hash { - hashPos := HashPosition(block.Position) - if block.Position.Round < dkgDelayRound { - return crypto.Keccak256Hash(crs[:], hashPos[:], block.ProposerID.Hash[:]) - } - return crypto.Keccak256Hash(crs[:], hashPos[:]) -} - -// VerifyCRSSignature verifies the CRS signature of types.Block. -func VerifyCRSSignature( - block *types.Block, crs common.Hash, npks *typesDKG.NodePublicKeys) bool { - hash := hashCRS(block, crs) - if block.Position.Round < dkgDelayRound { - return bytes.Compare(block.CRSSignature.Signature[:], hash[:]) == 0 - } - if npks == nil { - return false - } - pubKey, exist := npks.PublicKeys[block.ProposerID] - if !exist { - return false - } - return pubKey.VerifySignature(hash, block.CRSSignature) -} - -// HashPosition generates hash of a types.Position. -func HashPosition(position types.Position) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, position.Round) - - binaryHeight := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryHeight, position.Height) - - return crypto.Keccak256Hash( - binaryRound, - binaryHeight, - ) -} - -func hashDKGPrivateShare(prvShare *typesDKG.PrivateShare) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, prvShare.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, prvShare.Reset) - - return crypto.Keccak256Hash( - prvShare.ProposerID.Hash[:], - prvShare.ReceiverID.Hash[:], - binaryRound, - binaryReset, - prvShare.PrivateShare.Bytes(), - ) -} - -// VerifyDKGPrivateShareSignature verifies the signature of -// typesDKG.PrivateShare. -func VerifyDKGPrivateShareSignature( - prvShare *typesDKG.PrivateShare) (bool, error) { - hash := hashDKGPrivateShare(prvShare) - pubKey, err := crypto.SigToPub(hash, prvShare.Signature) - if err != nil { - return false, err - } - if prvShare.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -func hashDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, mpk.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, mpk.Reset) - - return crypto.Keccak256Hash( - mpk.ProposerID.Hash[:], - mpk.DKGID.GetLittleEndian(), - mpk.PublicKeyShares.MasterKeyBytes(), - binaryRound, - binaryReset, - ) -} - -// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature. -func VerifyDKGMasterPublicKeySignature( - mpk *typesDKG.MasterPublicKey) (bool, error) { - hash := hashDKGMasterPublicKey(mpk) - pubKey, err := crypto.SigToPub(hash, mpk.Signature) - if err != nil { - return false, err - } - if mpk.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -func hashDKGComplaint(complaint *typesDKG.Complaint) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, complaint.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, complaint.Reset) - - hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare) - - return crypto.Keccak256Hash( - complaint.ProposerID.Hash[:], - binaryRound, - binaryReset, - hashPrvShare[:], - ) -} - -// VerifyDKGComplaintSignature verifies DKGCompliant signature. -func VerifyDKGComplaintSignature( - complaint *typesDKG.Complaint) (bool, error) { - if complaint.Round != complaint.PrivateShare.Round { - return false, nil - } - if complaint.Reset != complaint.PrivateShare.Reset { - return false, nil - } - hash := hashDKGComplaint(complaint) - pubKey, err := crypto.SigToPub(hash, complaint.Signature) - if err != nil { - return false, err - } - if complaint.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - if !complaint.IsNack() { - return VerifyDKGPrivateShareSignature(&complaint.PrivateShare) - } - return true, nil -} - -func hashDKGPartialSignature(psig *typesDKG.PartialSignature) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, psig.Round) - - return crypto.Keccak256Hash( - psig.ProposerID.Hash[:], - binaryRound, - psig.Hash[:], - psig.PartialSignature.Signature[:], - ) -} - -// VerifyDKGPartialSignatureSignature verifies the signature of -// typesDKG.PartialSignature. -func VerifyDKGPartialSignatureSignature( - psig *typesDKG.PartialSignature) (bool, error) { - hash := hashDKGPartialSignature(psig) - pubKey, err := crypto.SigToPub(hash, psig.Signature) - if err != nil { - return false, err - } - if psig.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -func hashDKGMPKReady(ready *typesDKG.MPKReady) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, ready.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, ready.Reset) - - return crypto.Keccak256Hash( - ready.ProposerID.Hash[:], - binaryRound, - binaryReset, - ) -} - -// VerifyDKGMPKReadySignature verifies DKGMPKReady signature. -func VerifyDKGMPKReadySignature( - ready *typesDKG.MPKReady) (bool, error) { - hash := hashDKGMPKReady(ready) - pubKey, err := crypto.SigToPub(hash, ready.Signature) - if err != nil { - return false, err - } - if ready.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -func hashDKGFinalize(final *typesDKG.Finalize) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, final.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, final.Reset) - - return crypto.Keccak256Hash( - final.ProposerID.Hash[:], - binaryRound, - binaryReset, - ) -} - -func hashDKGSuccess(success *typesDKG.Success) common.Hash { - binaryRound := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryRound, success.Round) - binaryReset := make([]byte, 8) - binary.LittleEndian.PutUint64(binaryReset, success.Reset) - - return crypto.Keccak256Hash( - success.ProposerID.Hash[:], - binaryRound, - binaryReset, - ) -} - -// VerifyDKGFinalizeSignature verifies DKGFinalize signature. -func VerifyDKGFinalizeSignature( - final *typesDKG.Finalize) (bool, error) { - hash := hashDKGFinalize(final) - pubKey, err := crypto.SigToPub(hash, final.Signature) - if err != nil { - return false, err - } - if final.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -// VerifyDKGSuccessSignature verifies DKGSuccess signature. -func VerifyDKGSuccessSignature( - success *typesDKG.Success) (bool, error) { - hash := hashDKGSuccess(success) - pubKey, err := crypto.SigToPub(hash, success.Signature) - if err != nil { - return false, err - } - if success.ProposerID != types.NewNodeID(pubKey) { - return false, nil - } - return true, nil -} - -// Rehash hashes the hash again and again and again... -func Rehash(hash common.Hash, count uint) common.Hash { - result := hash - for i := uint(0); i < count; i++ { - result = crypto.Keccak256Hash(result[:]) - } - return result -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go deleted file mode 100644 index 89dcfc86b..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "errors" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -var ( - // ErrNodeSetNotReady means we got nil empty node set. - ErrNodeSetNotReady = errors.New("node set is not ready") - // ErrCRSNotReady means we got empty CRS. - ErrCRSNotReady = errors.New("crs is not ready") - // ErrConfigurationNotReady means we go nil configuration. - ErrConfigurationNotReady = errors.New("configuration is not ready") -) - -type sets struct { - crs common.Hash - nodeSet *types.NodeSet - notarySet map[types.NodeID]struct{} -} - -// NodeSetCacheInterface interface specifies interface used by NodeSetCache. -type NodeSetCacheInterface interface { - // Configuration returns the configuration at a given round. - // Return the genesis configuration if round == 0. - Configuration(round uint64) *types.Config - - // CRS returns the CRS for a given round. - // Return the genesis CRS if round == 0. - CRS(round uint64) common.Hash - - // NodeSet returns the node set at a given round. - // Return the genesis node set if round == 0. - NodeSet(round uint64) []crypto.PublicKey -} - -// NodeSetCache caches node set information. -// -// NOTE: this module doesn't handle DKG resetting and can only be used along -// with utils.RoundEvent. -type NodeSetCache struct { - lock sync.RWMutex - nsIntf NodeSetCacheInterface - rounds map[uint64]*sets - keyPool map[types.NodeID]*struct { - pubKey crypto.PublicKey - refCnt int - } -} - -// NewNodeSetCache constructs an NodeSetCache instance. -func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache { - return &NodeSetCache{ - nsIntf: nsIntf, - rounds: make(map[uint64]*sets), - keyPool: make(map[types.NodeID]*struct { - pubKey crypto.PublicKey - refCnt int - }), - } -} - -// Exists checks if a node is in node set of that round. -func (cache *NodeSetCache) Exists( - round uint64, nodeID types.NodeID) (exists bool, err error) { - - nIDs, exists := cache.get(round) - if !exists { - if nIDs, err = cache.update(round); err != nil { - return - } - } - _, exists = nIDs.nodeSet.IDs[nodeID] - return -} - -// GetPublicKey return public key for that node: -func (cache *NodeSetCache) GetPublicKey( - nodeID types.NodeID) (key crypto.PublicKey, exists bool) { - - cache.lock.RLock() - defer cache.lock.RUnlock() - - rec, exists := cache.keyPool[nodeID] - if exists { - key = rec.pubKey - } - return -} - -// GetNodeSet returns IDs of nodes set of this round as map. -func (cache *NodeSetCache) GetNodeSet(round uint64) (*types.NodeSet, error) { - IDs, exists := cache.get(round) - if !exists { - var err error - if IDs, err = cache.update(round); err != nil { - return nil, err - } - } - return IDs.nodeSet.Clone(), nil -} - -// GetNotarySet returns of notary set of this round. -func (cache *NodeSetCache) GetNotarySet( - round uint64) (map[types.NodeID]struct{}, error) { - IDs, err := cache.getOrUpdate(round) - if err != nil { - return nil, err - } - return cache.cloneMap(IDs.notarySet), nil -} - -// Purge a specific round. -func (cache *NodeSetCache) Purge(rID uint64) { - cache.lock.Lock() - defer cache.lock.Unlock() - nIDs, exist := cache.rounds[rID] - if !exist { - return - } - for nID := range nIDs.nodeSet.IDs { - rec := cache.keyPool[nID] - if rec.refCnt--; rec.refCnt == 0 { - delete(cache.keyPool, nID) - } - } - delete(cache.rounds, rID) -} - -// Touch updates the internal cache of round. -func (cache *NodeSetCache) Touch(round uint64) (err error) { - _, err = cache.update(round) - return -} - -func (cache *NodeSetCache) cloneMap( - nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} { - nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs)) - for k := range nIDs { - nIDsCopy[k] = struct{}{} - } - return nIDsCopy -} - -func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) { - s, exists := cache.get(round) - if !exists { - if s, err = cache.update(round); err != nil { - return - } - } - nIDs = s - return -} - -// update node set for that round. -// -// This cache would maintain 10 rounds before the updated round and purge -// rounds not in this range. -func (cache *NodeSetCache) update(round uint64) (nIDs *sets, err error) { - cache.lock.Lock() - defer cache.lock.Unlock() - // Get information for the requested round. - keySet := cache.nsIntf.NodeSet(round) - if keySet == nil { - err = ErrNodeSetNotReady - return - } - crs := cache.nsIntf.CRS(round) - if (crs == common.Hash{}) { - err = ErrCRSNotReady - return - } - // Cache new round. - nodeSet := types.NewNodeSet() - for _, key := range keySet { - nID := types.NewNodeID(key) - nodeSet.Add(nID) - if rec, exists := cache.keyPool[nID]; exists { - rec.refCnt++ - } else { - cache.keyPool[nID] = &struct { - pubKey crypto.PublicKey - refCnt int - }{key, 1} - } - } - cfg := cache.nsIntf.Configuration(round) - if cfg == nil { - err = ErrConfigurationNotReady - return - } - nIDs = &sets{ - crs: crs, - nodeSet: nodeSet, - notarySet: make(map[types.NodeID]struct{}), - } - nIDs.notarySet = nodeSet.GetSubSet( - int(cfg.NotarySetSize), types.NewNotarySetTarget(crs)) - cache.rounds[round] = nIDs - // Purge older rounds. - for rID, nIDs := range cache.rounds { - nodeSet := nIDs.nodeSet - if round-rID <= 5 { - continue - } - for nID := range nodeSet.IDs { - rec := cache.keyPool[nID] - if rec.refCnt--; rec.refCnt == 0 { - delete(cache.keyPool, nID) - } - } - delete(cache.rounds, rID) - } - return -} - -func (cache *NodeSetCache) get(round uint64) (nIDs *sets, exists bool) { - cache.lock.RLock() - defer cache.lock.RUnlock() - nIDs, exists = cache.rounds[round] - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/penalty-helper.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/penalty-helper.go deleted file mode 100644 index 0b38474a6..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/penalty-helper.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "errors" - - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -var ( - // ErrInvalidDKGMasterPublicKey means the DKG MasterPublicKey is invalid. - ErrInvalidDKGMasterPublicKey = errors.New("invalid DKG master public key") - // ErrPayloadNotEmpty means the payload of block is not empty. - ErrPayloadNotEmpty = errors.New("payload not empty") -) - -// NeedPenaltyDKGPrivateShare checks if the proposer of dkg private share -// should be penalized. -func NeedPenaltyDKGPrivateShare( - complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { - if complaint.IsNack() { - return false, nil - } - if mpk.ProposerID != complaint.PrivateShare.ProposerID { - return false, nil - } - ok, err := VerifyDKGMasterPublicKeySignature(mpk) - if err != nil { - return false, err - } - if !ok { - return false, ErrInvalidDKGMasterPublicKey - } - ok, err = VerifyDKGComplaintSignature(complaint) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - ok, err = mpk.PublicKeyShares.VerifyPrvShare( - typesDKG.NewID(complaint.PrivateShare.ReceiverID), - &complaint.PrivateShare.PrivateShare) - if err != nil { - return false, err - } - return !ok, nil -} - -// NeedPenaltyForkVote checks if two votes are fork vote. -func NeedPenaltyForkVote(vote1, vote2 *types.Vote) (bool, error) { - if vote1.ProposerID != vote2.ProposerID || - vote1.Type != vote2.Type || - vote1.Period != vote2.Period || - vote1.Position != vote2.Position || - vote1.BlockHash == vote2.BlockHash { - return false, nil - } - ok, err := VerifyVoteSignature(vote1) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - ok, err = VerifyVoteSignature(vote2) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - return true, nil -} - -// NeedPenaltyForkBlock checks if two blocks are fork block. -func NeedPenaltyForkBlock(block1, block2 *types.Block) (bool, error) { - if block1.ProposerID != block2.ProposerID || - block1.Position != block2.Position || - block1.Hash == block2.Hash { - return false, nil - } - if len(block1.Payload) != 0 || len(block2.Payload) != 0 { - return false, ErrPayloadNotEmpty - } - verifyBlock := func(block *types.Block) (bool, error) { - err := VerifyBlockSignatureWithoutPayload(block) - switch err { - case nil: - return true, nil - case ErrIncorrectSignature: - return false, nil - case ErrIncorrectHash: - return false, nil - default: - return false, err - } - } - ok, err := verifyBlock(block1) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - ok, err = verifyBlock(block2) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - return true, nil -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go deleted file mode 100644 index 4c83d046b..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "fmt" - - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -// RoundBasedConfig is based config for rounds and provide boundary checking -// for rounds. -type RoundBasedConfig struct { - roundID uint64 - roundBeginHeight uint64 - roundEndHeight uint64 - roundLength uint64 -} - -// SetupRoundBasedFields setup round based fields, including round ID, the -// length of rounds. -func (c *RoundBasedConfig) SetupRoundBasedFields( - roundID uint64, cfg *types.Config) { - if c.roundLength > 0 { - panic(fmt.Errorf("duplicated set round based fields: %d", - c.roundLength)) - } - c.roundID = roundID - c.roundLength = cfg.RoundLength -} - -// SetRoundBeginHeight gives the beginning height for the initial round provided -// when constructed. -func (c *RoundBasedConfig) SetRoundBeginHeight(begin uint64) { - if c.roundBeginHeight != 0 { - panic(fmt.Errorf("duplicated set round begin height: %d", - c.roundBeginHeight)) - } - c.roundBeginHeight = begin - c.roundEndHeight = begin + c.roundLength -} - -// IsLastBlock checks if a block is the last block of this round. -func (c *RoundBasedConfig) IsLastBlock(b *types.Block) bool { - if b.Position.Round != c.roundID { - panic(fmt.Errorf("attempt to compare by different round: %s, %d", - b, c.roundID)) - } - return b.Position.Height+1 == c.roundEndHeight -} - -// ExtendLength extends round ending height by the length of current round. -func (c *RoundBasedConfig) ExtendLength() { - c.roundEndHeight += c.roundLength -} - -// Contains checks if a block height is in this round. -func (c *RoundBasedConfig) Contains(h uint64) bool { - return c.roundBeginHeight <= h && c.roundEndHeight > h -} - -// RoundID returns the round ID of this config. -func (c *RoundBasedConfig) RoundID() uint64 { - if c.roundLength == 0 { - panic(fmt.Errorf("config is not initialized: %d", c.roundID)) - } - return c.roundID -} - -// RoundEndHeight returns next checkpoint to varify if this round is ended. -func (c *RoundBasedConfig) RoundEndHeight() uint64 { - if c.roundLength == 0 { - panic(fmt.Errorf("config is not initialized: %d", c.roundID)) - } - return c.roundEndHeight -} - -// AppendTo a config from previous round. -func (c *RoundBasedConfig) AppendTo(other RoundBasedConfig) { - if c.roundID != other.roundID+1 { - panic(fmt.Errorf("round IDs of configs not continuous: %d %d", - c.roundID, other.roundID)) - } - c.SetRoundBeginHeight(other.roundEndHeight) -} - -// LastPeriodBeginHeight returns the begin height of last period. For example, -// if a round is extended twice, then the return from this method is: -// -// begin + 2 * roundLength - roundLength -// -func (c *RoundBasedConfig) LastPeriodBeginHeight() uint64 { - if c.roundLength == 0 { - panic(fmt.Errorf("config is not initialized: %d", c.roundID)) - } - return c.roundEndHeight - c.roundLength -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go deleted file mode 100644 index bda4383fa..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "context" - "fmt" - "sync" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -// ErrUnmatchedBlockHeightWithConfig is for invalid parameters for NewRoundEvent. -type ErrUnmatchedBlockHeightWithConfig struct { - round uint64 - reset uint64 - blockHeight uint64 -} - -func (e ErrUnmatchedBlockHeightWithConfig) Error() string { - return fmt.Sprintf("unsynced block height and cfg: round:%d reset:%d h:%d", - e.round, e.reset, e.blockHeight) -} - -// RoundEventParam defines the parameters passed to event handlers of -// RoundEvent. -type RoundEventParam struct { - // 'Round' of next checkpoint, might be identical to previous checkpoint. - Round uint64 - // the count of reset DKG for 'Round+1'. - Reset uint64 - // the begin block height of this event, the end block height of this event - // would be BeginHeight + config.RoundLength. - BeginHeight uint64 - // The configuration for 'Round'. - Config *types.Config - // The CRS for 'Round'. - CRS common.Hash -} - -// NextRoundValidationHeight returns the height to check if the next round is -// ready. -func (e RoundEventParam) NextRoundValidationHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength*9/10 -} - -// NextCRSProposingHeight returns the height to propose CRS for next round. -func (e RoundEventParam) NextCRSProposingHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength/2 -} - -// NextDKGPreparationHeight returns the height to prepare DKG set for next -// round. -func (e RoundEventParam) NextDKGPreparationHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength*2/3 -} - -// NextRoundHeight returns the height of the beginning of next round. -func (e RoundEventParam) NextRoundHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength -} - -// NextTouchNodeSetCacheHeight returns the height to touch the node set cache. -func (e RoundEventParam) NextTouchNodeSetCacheHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength/2 -} - -// NextDKGResetHeight returns the height to reset DKG for next period. -func (e RoundEventParam) NextDKGResetHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength*85/100 -} - -// NextDKGRegisterHeight returns the height to register DKG. -func (e RoundEventParam) NextDKGRegisterHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength/2 -} - -// RoundEndHeight returns the round ending height of this round event. -func (e RoundEventParam) RoundEndHeight() uint64 { - return e.BeginHeight + e.Config.RoundLength -} - -func (e RoundEventParam) String() string { - return fmt.Sprintf("roundEvtParam{Round:%d Reset:%d Height:%d}", - e.Round, - e.Reset, - e.BeginHeight) -} - -// roundEventFn defines the fingerprint of handlers of round events. -type roundEventFn func([]RoundEventParam) - -// governanceAccessor is a subset of core.Governance to break the dependency -// between core and utils package. -type governanceAccessor interface { - // Configuration returns the configuration at a given round. - // Return the genesis configuration if round == 0. - Configuration(round uint64) *types.Config - - // CRS returns the CRS for a given round. - // Return the genesis CRS if round == 0. - CRS(round uint64) common.Hash - - // DKGComplaints gets all the DKGComplaints of round. - DKGComplaints(round uint64) []*typesDKG.Complaint - - // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. - DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey - - // IsDKGFinal checks if DKG is final. - IsDKGFinal(round uint64) bool - - // IsDKGSuccess checks if DKG is success. - IsDKGSuccess(round uint64) bool - - // DKGResetCount returns the reset count for DKG of given round. - DKGResetCount(round uint64) uint64 - - // Get the begin height of a round. - GetRoundHeight(round uint64) uint64 -} - -// RoundEventRetryHandlerGenerator generates a handler to common.Event, which -// would register itself to retry next round validation if round event is not -// triggered. -func RoundEventRetryHandlerGenerator( - rEvt *RoundEvent, hEvt *common.Event) func(uint64) { - var hEvtHandler func(uint64) - hEvtHandler = func(h uint64) { - if rEvt.ValidateNextRound(h) == 0 { - // Retry until at least one round event is triggered. - hEvt.RegisterHeight(h+1, hEvtHandler) - } - } - return hEvtHandler -} - -// RoundEvent would be triggered when either: -// - the next DKG set setup is ready. -// - the next DKG set setup is failed, and previous DKG set already reset the -// CRS. -type RoundEvent struct { - gov governanceAccessor - logger common.Logger - lock sync.Mutex - handlers []roundEventFn - config RoundBasedConfig - lastTriggeredRound uint64 - lastTriggeredResetCount uint64 - roundShift uint64 - gpkInvalid bool - ctx context.Context - ctxCancel context.CancelFunc -} - -// NewRoundEvent creates an RoundEvent instance. -func NewRoundEvent(parentCtx context.Context, gov governanceAccessor, - logger common.Logger, initPos types.Position, roundShift uint64) ( - *RoundEvent, error) { - // We need to generate valid ending block height of this round (taken - // DKG reset count into consideration). - logger.Info("new RoundEvent", "position", initPos, "shift", roundShift) - initConfig := GetConfigWithPanic(gov, initPos.Round, logger) - e := &RoundEvent{ - gov: gov, - logger: logger, - lastTriggeredRound: initPos.Round, - roundShift: roundShift, - } - e.ctx, e.ctxCancel = context.WithCancel(parentCtx) - e.config = RoundBasedConfig{} - e.config.SetupRoundBasedFields(initPos.Round, initConfig) - e.config.SetRoundBeginHeight(GetRoundHeight(gov, initPos.Round)) - // Make sure the DKG reset count in current governance can cover the initial - // block height. - if initPos.Height >= types.GenesisHeight { - resetCount := gov.DKGResetCount(initPos.Round + 1) - remains := resetCount - for ; remains > 0 && !e.config.Contains(initPos.Height); remains-- { - e.config.ExtendLength() - } - if !e.config.Contains(initPos.Height) { - return nil, ErrUnmatchedBlockHeightWithConfig{ - round: initPos.Round, - reset: resetCount, - blockHeight: initPos.Height, - } - } - e.lastTriggeredResetCount = resetCount - remains - } - return e, nil -} - -// Register a handler to be called when new round is confirmed or new DKG reset -// is detected. -// -// The earlier registered handler has higher priority. -func (e *RoundEvent) Register(h roundEventFn) { - e.lock.Lock() - defer e.lock.Unlock() - e.handlers = append(e.handlers, h) -} - -// TriggerInitEvent triggers event from the initial setting. -func (e *RoundEvent) TriggerInitEvent() { - e.lock.Lock() - defer e.lock.Unlock() - events := []RoundEventParam{RoundEventParam{ - Round: e.lastTriggeredRound, - Reset: e.lastTriggeredResetCount, - BeginHeight: e.config.LastPeriodBeginHeight(), - CRS: GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger), - Config: GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger), - }} - for _, h := range e.handlers { - h(events) - } -} - -// ValidateNextRound validate if the DKG set for next round is ready to go or -// failed to setup, all registered handlers would be called once some decision -// is made on chain. -// -// The count of triggered events would be returned. -func (e *RoundEvent) ValidateNextRound(blockHeight uint64) (count uint) { - // To make triggers continuous and sequential, the next validation should - // wait for previous one finishing. That's why I use mutex here directly. - var events []RoundEventParam - e.lock.Lock() - defer e.lock.Unlock() - e.logger.Trace("ValidateNextRound", - "height", blockHeight, - "round", e.lastTriggeredRound, - "count", e.lastTriggeredResetCount) - defer func() { - count = uint(len(events)) - if count == 0 { - return - } - for _, h := range e.handlers { - // To make sure all handlers receive triggers sequentially, we can't - // raise go routines here. - h(events) - } - }() - var ( - triggered bool - param RoundEventParam - beginHeight = blockHeight - startRound = e.lastTriggeredRound - ) - for { - param, triggered = e.check(beginHeight, startRound) - if !triggered { - break - } - events = append(events, param) - beginHeight = param.BeginHeight - } - return -} - -func (e *RoundEvent) check(blockHeight, startRound uint64) ( - param RoundEventParam, triggered bool) { - defer func() { - if !triggered { - return - } - // A simple assertion to make sure we didn't pick the wrong round. - if e.config.RoundID() != e.lastTriggeredRound { - panic(fmt.Errorf("Triggered round not matched: %d, %d", - e.config.RoundID(), e.lastTriggeredRound)) - } - param.Round = e.lastTriggeredRound - param.Reset = e.lastTriggeredResetCount - param.BeginHeight = e.config.LastPeriodBeginHeight() - param.CRS = GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger) - param.Config = GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger) - e.logger.Info("New RoundEvent triggered", - "round", e.lastTriggeredRound, - "reset", e.lastTriggeredResetCount, - "begin-height", e.config.LastPeriodBeginHeight(), - "crs", param.CRS.String()[:6], - ) - }() - nextRound := e.lastTriggeredRound + 1 - if nextRound >= startRound+e.roundShift { - // Avoid access configuration newer than last confirmed one over - // 'roundShift' rounds. Fullnode might crash if we access it before it - // knows. - return - } - nextCfg := GetConfigWithPanic(e.gov, nextRound, e.logger) - resetCount := e.gov.DKGResetCount(nextRound) - if resetCount > e.lastTriggeredResetCount { - e.lastTriggeredResetCount++ - e.config.ExtendLength() - e.gpkInvalid = false - triggered = true - return - } - if e.gpkInvalid { - // We know that DKG already failed, now wait for the DKG set from - // previous round to reset DKG and don't have to reconstruct the - // group public key again. - return - } - if nextRound >= dkgDelayRound { - var ok bool - ok, e.gpkInvalid = IsDKGValid( - e.gov, e.logger, nextRound, e.lastTriggeredResetCount) - if !ok { - return - } - } - // The DKG set for next round is well prepared. - e.lastTriggeredRound = nextRound - e.lastTriggeredResetCount = 0 - e.gpkInvalid = false - rCfg := RoundBasedConfig{} - rCfg.SetupRoundBasedFields(nextRound, nextCfg) - rCfg.AppendTo(e.config) - e.config = rCfg - triggered = true - return -} - -// Stop the event source and block until last trigger returns. -func (e *RoundEvent) Stop() { - e.ctxCancel() -} - -// LastPeriod returns block height related info of the last period, including -// begin height and round length. -func (e *RoundEvent) LastPeriod() (begin uint64, length uint64) { - e.lock.Lock() - defer e.lock.Unlock() - begin = e.config.LastPeriodBeginHeight() - length = e.config.RoundEndHeight() - e.config.LastPeriodBeginHeight() - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/signer.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/signer.go deleted file mode 100644 index ff767437f..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/signer.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "errors" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/crypto" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -// Errors for signer. -var ( - ErrInvalidProposerID = errors.New("invalid proposer id") - ErrIncorrectHash = errors.New("hash of block is incorrect") - ErrIncorrectSignature = errors.New("signature of block is incorrect") - ErrNoBLSSigner = errors.New("bls signer not set") -) - -type blsSigner func(round uint64, hash common.Hash) (crypto.Signature, error) - -// Signer signs a segment of data. -type Signer struct { - prvKey crypto.PrivateKey - pubKey crypto.PublicKey - proposerID types.NodeID - blsSign blsSigner -} - -// NewSigner constructs an Signer instance. -func NewSigner(prvKey crypto.PrivateKey) (s *Signer) { - s = &Signer{ - prvKey: prvKey, - pubKey: prvKey.PublicKey(), - } - s.proposerID = types.NewNodeID(s.pubKey) - return -} - -// SetBLSSigner for signing CRSSignature -func (s *Signer) SetBLSSigner(signer blsSigner) { - s.blsSign = signer -} - -// SignBlock signs a types.Block. -func (s *Signer) SignBlock(b *types.Block) (err error) { - b.ProposerID = s.proposerID - b.PayloadHash = crypto.Keccak256Hash(b.Payload) - if b.Hash, err = HashBlock(b); err != nil { - return - } - if b.Signature, err = s.prvKey.Sign(b.Hash); err != nil { - return - } - return -} - -// SignVote signs a types.Vote. -func (s *Signer) SignVote(v *types.Vote) (err error) { - v.ProposerID = s.proposerID - v.Signature, err = s.prvKey.Sign(HashVote(v)) - return -} - -// SignCRS signs CRS signature of types.Block. -func (s *Signer) SignCRS(b *types.Block, crs common.Hash) (err error) { - if b.ProposerID != s.proposerID { - err = ErrInvalidProposerID - return - } - if b.Position.Round < dkgDelayRound { - hash := hashCRS(b, crs) - b.CRSSignature = crypto.Signature{ - Type: "bls", - Signature: hash[:], - } - return - } - if s.blsSign == nil { - err = ErrNoBLSSigner - return - } - b.CRSSignature, err = s.blsSign(b.Position.Round, hashCRS(b, crs)) - return -} - -// SignDKGComplaint signs a DKG complaint. -func (s *Signer) SignDKGComplaint(complaint *typesDKG.Complaint) (err error) { - complaint.ProposerID = s.proposerID - complaint.Signature, err = s.prvKey.Sign(hashDKGComplaint(complaint)) - return -} - -// SignDKGMasterPublicKey signs a DKG master public key. -func (s *Signer) SignDKGMasterPublicKey( - mpk *typesDKG.MasterPublicKey) (err error) { - mpk.ProposerID = s.proposerID - mpk.Signature, err = s.prvKey.Sign(hashDKGMasterPublicKey(mpk)) - return -} - -// SignDKGPrivateShare signs a DKG private share. -func (s *Signer) SignDKGPrivateShare( - prvShare *typesDKG.PrivateShare) (err error) { - prvShare.ProposerID = s.proposerID - prvShare.Signature, err = s.prvKey.Sign(hashDKGPrivateShare(prvShare)) - return -} - -// SignDKGPartialSignature signs a DKG partial signature. -func (s *Signer) SignDKGPartialSignature( - pSig *typesDKG.PartialSignature) (err error) { - pSig.ProposerID = s.proposerID - pSig.Signature, err = s.prvKey.Sign(hashDKGPartialSignature(pSig)) - return -} - -// SignDKGMPKReady signs a DKG ready message. -func (s *Signer) SignDKGMPKReady(ready *typesDKG.MPKReady) (err error) { - ready.ProposerID = s.proposerID - ready.Signature, err = s.prvKey.Sign(hashDKGMPKReady(ready)) - return -} - -// SignDKGFinalize signs a DKG finalize message. -func (s *Signer) SignDKGFinalize(final *typesDKG.Finalize) (err error) { - final.ProposerID = s.proposerID - final.Signature, err = s.prvKey.Sign(hashDKGFinalize(final)) - return -} - -// SignDKGSuccess signs a DKG success message. -func (s *Signer) SignDKGSuccess(success *typesDKG.Success) (err error) { - success.ProposerID = s.proposerID - success.Signature, err = s.prvKey.Sign(hashDKGSuccess(success)) - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go deleted file mode 100644 index f259f34bb..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2018 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "context" - "fmt" - - "github.com/dexon-foundation/dexon-consensus/common" - "github.com/dexon-foundation/dexon-consensus/core/types" - typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" -) - -var dkgDelayRound uint64 - -// SetDKGDelayRound sets the variable. -func SetDKGDelayRound(delay uint64) { - dkgDelayRound = delay -} - -type configAccessor interface { - Configuration(round uint64) *types.Config -} - -// GetConfigWithPanic is a helper to access configs, and panic when config for -// that round is not ready yet. -func GetConfigWithPanic(accessor configAccessor, round uint64, - logger common.Logger) *types.Config { - if logger != nil { - logger.Debug("Calling Governance.Configuration", "round", round) - } - c := accessor.Configuration(round) - if c == nil { - panic(fmt.Errorf("configuration is not ready %v", round)) - } - return c -} - -type crsAccessor interface { - CRS(round uint64) common.Hash -} - -// GetCRSWithPanic is a helper to access CRS, and panic when CRS for that -// round is not ready yet. -func GetCRSWithPanic(accessor crsAccessor, round uint64, - logger common.Logger) common.Hash { - if logger != nil { - logger.Debug("Calling Governance.CRS", "round", round) - } - crs := accessor.CRS(round) - if (crs == common.Hash{}) { - panic(fmt.Errorf("CRS is not ready %v", round)) - } - return crs -} - -// VerifyDKGComplaint verifies if its a valid DKGCompliant. -func VerifyDKGComplaint( - complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { - ok, err := VerifyDKGComplaintSignature(complaint) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - if complaint.IsNack() { - return true, nil - } - if complaint.Round != mpk.Round { - return false, nil - } - ok, err = VerifyDKGMasterPublicKeySignature(mpk) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - ok, err = mpk.PublicKeyShares.VerifyPrvShare( - typesDKG.NewID(complaint.PrivateShare.ReceiverID), - &complaint.PrivateShare.PrivateShare) - if err != nil { - return false, err - } - return !ok, nil -} - -// LaunchDummyReceiver launches a go routine to receive from the receive -// channel of a network module. An context is required to stop the go routine -// automatically. An optinal message handler could be provided. -func LaunchDummyReceiver( - ctx context.Context, recv <-chan types.Msg, handler func(types.Msg)) ( - context.CancelFunc, <-chan struct{}) { - var ( - dummyCtx, dummyCancel = context.WithCancel(ctx) - finishedChan = make(chan struct{}, 1) - ) - go func() { - defer func() { - finishedChan <- struct{}{} - }() - loop: - for { - select { - case <-dummyCtx.Done(): - break loop - case v, ok := <-recv: - if !ok { - panic(fmt.Errorf( - "receive channel is closed before dummy receiver")) - } - if handler != nil { - handler(v) - } - } - } - }() - return dummyCancel, finishedChan -} - -// GetDKGThreshold return expected threshold for given DKG set size. -func GetDKGThreshold(config *types.Config) int { - return int(config.NotarySetSize*2/3) + 1 -} - -// GetDKGValidThreshold return threshold for DKG set to considered valid. -func GetDKGValidThreshold(config *types.Config) int { - return int(config.NotarySetSize * 5 / 6) -} - -// GetBAThreshold return threshold for BA votes. -func GetBAThreshold(config *types.Config) int { - return int(config.NotarySetSize*2/3 + 1) -} - -// GetNextRoundValidationHeight returns the block height to check if the next -// round is ready. -func GetNextRoundValidationHeight(begin, length uint64) uint64 { - return begin + length*9/10 -} - -// GetRoundHeight wraps the workaround for the round height logic in fullnode. -func GetRoundHeight(accessor interface{}, round uint64) uint64 { - type roundHeightAccessor interface { - GetRoundHeight(round uint64) uint64 - } - accessorInst := accessor.(roundHeightAccessor) - height := accessorInst.GetRoundHeight(round) - if round == 0 && height < types.GenesisHeight { - return types.GenesisHeight - } - return height -} - -// IsDKGValid check if DKG is correctly prepared. -func IsDKGValid( - gov governanceAccessor, logger common.Logger, round, reset uint64) ( - valid bool, gpkInvalid bool) { - if !gov.IsDKGFinal(round) { - logger.Debug("DKG is not final", "round", round, "reset", reset) - return - } - if !gov.IsDKGSuccess(round) { - logger.Debug("DKG is not successful", "round", round, "reset", reset) - return - } - cfg := GetConfigWithPanic(gov, round, logger) - gpk, err := typesDKG.NewGroupPublicKey( - round, - gov.DKGMasterPublicKeys(round), - gov.DKGComplaints(round), - GetDKGThreshold(cfg)) - if err != nil { - logger.Debug("Group public key setup failed", - "round", round, - "reset", reset, - "error", err) - gpkInvalid = true - return - } - if len(gpk.QualifyNodeIDs) < GetDKGValidThreshold(cfg) { - logger.Debug("Group public key threshold not reach", - "round", round, - "reset", reset, - "qualified", len(gpk.QualifyNodeIDs)) - gpkInvalid = true - return - } - valid = true - return -} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go deleted file mode 100644 index 446d88a64..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2019 The dexon-consensus Authors -// This file is part of the dexon-consensus library. -// -// The dexon-consensus library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus library. If not, see -// . - -package utils - -import ( - "github.com/dexon-foundation/dexon-consensus/core/types" -) - -// VoteFilter filters votes that are useless for now. -// To maximize performance, this structure is not thread-safe and will never be. -type VoteFilter struct { - Voted map[types.VoteHeader]struct{} - Position types.Position - LockIter uint64 - Period uint64 - Confirm bool -} - -// NewVoteFilter creates a new vote filter instance. -func NewVoteFilter() *VoteFilter { - return &VoteFilter{ - Voted: make(map[types.VoteHeader]struct{}), - } -} - -// Filter checks if the vote should be filtered out. -func (vf *VoteFilter) Filter(vote *types.Vote) bool { - if vote.Type == types.VoteInit { - return true - } - if vote.Position.Older(vf.Position) { - return true - } else if vote.Position.Newer(vf.Position) { - // It's impossible to check the vote of other height. - return false - } - if vf.Confirm { - return true - } - if vote.Type == types.VotePreCom && vote.Period < vf.LockIter { - return true - } - if vote.Type == types.VoteCom && - vote.Period < vf.Period && - vote.BlockHash == types.SkipBlockHash { - return true - } - if _, exist := vf.Voted[vote.VoteHeader]; exist { - return true - } - return false -} - -// AddVote to the filter so the same vote will be filtered. -func (vf *VoteFilter) AddVote(vote *types.Vote) { - vf.Voted[vote.VoteHeader] = struct{}{} -} diff --git a/vendor/github.com/dexon-foundation/mcl/.gitignore b/vendor/github.com/dexon-foundation/mcl/.gitignore deleted file mode 100644 index f5edb3706..000000000 --- a/vendor/github.com/dexon-foundation/mcl/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -CVS -java/*_wrap.cxx -lib/*.so -lib/*.a -*.class -GPATH -GRTAGS -GTAGS -*.o -*.d -*.exe -*.swp -.cvsignore diff --git a/vendor/github.com/dexon-foundation/mcl/.travis.yml b/vendor/github.com/dexon-foundation/mcl/.travis.yml deleted file mode 100644 index 73a97e6aa..000000000 --- a/vendor/github.com/dexon-foundation/mcl/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: true -dist: trusty -language: cpp -compiler: - - gcc - - clang -addons: - apt: - packages: - - libgmp-dev -script: - - make test_ci DEBUG=1 -j3 - - make clean - - make test_ci CFLAGS_USER=-DMCL_DONT_USE_XBYAK -j3 - - make clean - - make test_go - diff --git a/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt b/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt deleted file mode 100644 index aaa0a8cf2..000000000 --- a/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt +++ /dev/null @@ -1,119 +0,0 @@ -cmake_minimum_required (VERSION 2.6) -project(mcl CXX ASM) -set(SRCS src/fp.cpp) - -option( - MCL_MAX_BIT_SIZE - "max bit size for Fp" - 0 -) -option( - DOWNLOAD_SOURCE - "download cybozulib_ext" - OFF -) - -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) - -if(MSVC) - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} /MT /W4 /Oy /Ox /EHsc /GS- /Zi /DNDEBUG /DNOMINMAX") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} /MTd /W4 /DNOMINMAX") - link_directories(${CMAKE_SOURCE_DIR}/../cybozulib_ext/lib) - link_directories(${CMAKE_SOURCE_DIR}/lib) -else() - if("${CFLAGS_OPT_USER}" STREQUAL "") - set(CFLAGS_OPT_USER "-O3 -DNDEBUG -march=native") - endif() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith ${CFLAGS_OPT_USER}") - - if(${MCL_MAX_BIT_SIZE} GREATER 0) - add_definitions(-DMCL_MAX_BIT_SIZE=${MCL_MAX_BIT_SIZE}) - endif() - - if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") - add_definitions(-DMCL_USE_LLVM=1) - set(SRCS ${SRCS} src/asm/aarch64.s) - set(CPU arch64) - elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm") - add_definitions(-DMCL_USE_LLVM=1) - set(SRCS ${SRCS} src/asm/arm.s) - set(CPU arm) - elseif(APPLE) - add_definitions(-DMCL_USE_LLVM=1) - set(SRCS ${SRCS} src/asm/x86-64mac.s src/asm/x86-64mac.bmi2.s) - set(CPU x86-64) - elseif(UNIX) - add_definitions(-DMCL_USE_LLVM=1) - set(SRCS ${SRCS} src/asm/x86-64.s src/asm/x86-64.bmi2.s) - set(CPU x86-64) - endif() - set(LIBS mcl gmp gmpxx crypto) -endif() - -if(DOWNLOAD_SOURCE) - if(MSVC) - set(CYBOZULIB_EXT_TAG release20170521) - set(FILES config.h gmp-impl.h gmp-mparam.h gmp.h gmpxx.h longlong.h mpir.h mpirxx.h) - foreach(file IN ITEMS ${FILES}) - file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/${file}) - message("download cybozulib_ext/" ${file}) - endforeach() - set(FILES aes.h applink.c asn1.h asn1_mac.h asn1t.h bio.h blowfish.h bn.h buffer.h camellia.h cast.h cmac.h cms.h comp.h conf.h conf_api.h crypto.h des.h des_old.h dh.h dsa.h dso.h dtls1.h e_os2.h ebcdic.h ec.h ecdh.h ecdsa.h engine.h err.h evp.h hmac.h idea.h krb5_asn.h kssl.h lhash.h md4.h md5.h mdc2.h modes.h obj_mac.h objects.h ocsp.h opensslconf.h opensslv.h ossl_typ.h pem.h pem2.h pkcs12.h pkcs7.h pqueue.h rand.h rc2.h rc4.h ripemd.h rsa.h safestack.h seed.h sha.h srp.h srtp.h ssl.h ssl2.h ssl23.h ssl3.h stack.h symhacks.h tls1.h ts.h txt_db.h ui.h ui_compat.h whrlpool.h x509.h x509_vfy.h x509v3.h) - foreach(file IN ITEMS ${FILES}) - file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/openssl/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/openssl/${file}) - message("download cybozulib_ext/openssl/" ${file}) - endforeach() - set(FILES mpir.lib mpirxx.lib mpirxx.pdb ssleay32.lib libeay32.lib mpir.pdb) - foreach(file IN ITEMS ${FILES}) - file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/lib/mt/14/${file} ${mcl_SOURCE_DIR}/lib/mt/14/${file}) - message("download lib/mt/14/" ${file}) - endforeach() - if(MSVC) - include_directories( - ${mcl_SOURCE_DIR}/include/cybozulib_ext - ) - endif() - endif() -else() - if(MSVC) - include_directories( - ${mcl_SOURCE_DIR}/../cybozulib_ext/include - ) - endif() -endif() - -include_directories( - ${mcl_SOURCE_DIR}/include -) - -add_library(mcl STATIC ${SRCS}) -add_library(mcl_dy SHARED ${SRCS}) -target_link_libraries(mcl_dy ${LIBS}) -set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl) -#set_target_properties(mcl_dy PROPERTIES OUTPUT_NAME mcl VERSION 1.0.0 SOVERSION 1) -# For semantics of ABI compatibility including when you must bump SOVERSION, see: -# https://community.kde.org/Policies/Binary_Compatibility_Issues_With_C%2B%2B#The_Do.27s_and_Don.27ts - -file(GLOB MCL_HEADERS include/mcl/*.hpp include/mcl/bn.h include/mcl/curve_type.h) -file(GLOB CYBOZULIB_HEADERS include/cybozu/*.hpp) - -install(TARGETS mcl DESTINATION lib) -install(TARGETS mcl_dy DESTINATION lib) -install(FILES ${MCL_HEADERS} DESTINATION include/mcl) -install(FILES include/mcl/impl/bn_c_impl.hpp DESTINATION include/mcl/impl) -install(FILES ${CYBOZULIB_HEADERS} DESTINATION include/cybozu) - -set(TEST_BASE fp_test ec_test fp_util_test window_method_test elgamal_test fp_tower_test gmp_test bn_test glv_test) -#set(TEST_BASE bn_test) -foreach(base IN ITEMS ${TEST_BASE}) - add_executable( - ${base} - test/${base}.cpp - ) - target_link_libraries( - ${base} - ${LIBS} - ) -endforeach() diff --git a/vendor/github.com/dexon-foundation/mcl/COPYRIGHT b/vendor/github.com/dexon-foundation/mcl/COPYRIGHT deleted file mode 100644 index 90e49b4bc..000000000 --- a/vendor/github.com/dexon-foundation/mcl/COPYRIGHT +++ /dev/null @@ -1,47 +0,0 @@ - -Copyright (c) 2015 MITSUNARI Shigeo -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. -Neither the name of the copyright owner nor the names of its contributors may -be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------ -ソースコード形å¼ã‹ãƒã‚¤ãƒŠãƒªå½¢å¼ã‹ã€å¤‰æ›´ã™ã‚‹ã‹ã—ãªã„ã‹ã‚’å•ã‚ãšã€ä»¥ä¸‹ã®æ¡ä»¶ã‚’満㟠-ã™å ´åˆã«é™ã‚Šã€å†é ’布ãŠã‚ˆã³ä½¿ç”¨ãŒè¨±å¯ã•ã‚Œã¾ã™ã€‚ - -ソースコードをå†é ’布ã™ã‚‹å ´åˆã€ä¸Šè¨˜ã®è‘—作権表示ã€æœ¬æ¡ä»¶ä¸€è¦§ã€ãŠã‚ˆã³ä¸‹è¨˜å…責æ¡é … -ã‚’å«ã‚ã‚‹ã“ã¨ã€‚ -ãƒã‚¤ãƒŠãƒªå½¢å¼ã§å†é ’布ã™ã‚‹å ´åˆã€é ’布物ã«ä»˜å±žã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆç­‰ã®è³‡æ–™ã«ã€ä¸Šè¨˜ã®è‘—作 -権表示ã€æœ¬æ¡ä»¶ä¸€è¦§ã€ãŠã‚ˆã³ä¸‹è¨˜å…責æ¡é …ã‚’å«ã‚ã‚‹ã“ã¨ã€‚ -書é¢ã«ã‚ˆã‚‹ç‰¹åˆ¥ã®è¨±å¯ãªã—ã«ã€æœ¬ã‚½ãƒ•ãƒˆã‚¦ã‚§ã‚¢ã‹ã‚‰æ´¾ç”Ÿã—ãŸè£½å“ã®å®£ä¼ã¾ãŸã¯è²©å£²ä¿ƒé€² -ã«ã€è‘—作権者ã®åå‰ã¾ãŸã¯ã‚³ãƒ³ãƒˆãƒªãƒ“ューターã®åå‰ã‚’使用ã—ã¦ã¯ãªã‚‰ãªã„。 -本ソフトウェアã¯ã€è‘—作権者ãŠã‚ˆã³ã‚³ãƒ³ãƒˆãƒªãƒ“ューターã«ã‚ˆã£ã¦ã€Œç¾çŠ¶ã®ã¾ã¾ã€æ供㕠-ã‚Œã¦ãŠã‚Šã€æ˜Žç¤ºé»™ç¤ºã‚’å•ã‚ãšã€å•†æ¥­çš„ãªä½¿ç”¨å¯èƒ½æ€§ã€ãŠã‚ˆã³ç‰¹å®šã®ç›®çš„ã«å¯¾ã™ã‚‹é©åˆæ€§ -ã«é–¢ã™ã‚‹æš—é»™ã®ä¿è¨¼ã‚‚å«ã‚ã€ã¾ãŸãã‚Œã«é™å®šã•ã‚Œãªã„ã€ã„ã‹ãªã‚‹ä¿è¨¼ã‚‚ã‚ã‚Šã¾ã›ã‚“。 -著作権者もコントリビューターもã€äº‹ç”±ã®ã„ã‹ã‚“ã‚’å•ã‚ãšã€ æ害発生ã®åŽŸå› ã„ã‹ã‚“ã‚’ -å•ã‚ãšã€ã‹ã¤è²¬ä»»ã®æ ¹æ‹ ãŒå¥‘ç´„ã§ã‚ã‚‹ã‹åŽ³æ ¼è²¬ä»»ã§ã‚ã‚‹ã‹ï¼ˆéŽå¤±ãã®ä»–ã®ï¼‰ä¸æ³•è¡Œç‚ºã§ -ã‚ã‚‹ã‹ã‚’å•ã‚ãšã€ä»®ã«ãã®ã‚ˆã†ãªæ害ãŒç™ºç”Ÿã™ã‚‹å¯èƒ½æ€§ã‚’知らã•ã‚Œã¦ã„ãŸã¨ã—ã¦ã‚‚〠-本ソフトウェアã®ä½¿ç”¨ã«ã‚ˆã£ã¦ç™ºç”Ÿã—ãŸï¼ˆä»£æ›¿å“ã¾ãŸã¯ä»£ç”¨ã‚µãƒ¼ãƒ“スã®èª¿é”ã€ä½¿ç”¨ã® -喪失ã€ãƒ‡ãƒ¼ã‚¿ã®å–ªå¤±ã€åˆ©ç›Šã®å–ªå¤±ã€æ¥­å‹™ã®ä¸­æ–­ã‚‚å«ã‚ã€ã¾ãŸãã‚Œã«é™å®šã•ã‚Œãªã„)直接 -æ害ã€é–“接æ害ã€å¶ç™ºçš„ãªæ害ã€ç‰¹åˆ¥æ害ã€æ‡²ç½°çš„æ害ã€ã¾ãŸã¯çµæžœæ害ã«ã¤ã„ã¦ã€ -一切責任を負ã‚ãªã„ã‚‚ã®ã¨ã—ã¾ã™ã€‚ diff --git a/vendor/github.com/dexon-foundation/mcl/Makefile b/vendor/github.com/dexon-foundation/mcl/Makefile deleted file mode 100644 index 7df1dd300..000000000 --- a/vendor/github.com/dexon-foundation/mcl/Makefile +++ /dev/null @@ -1,373 +0,0 @@ -include common.mk -LIB_DIR=lib -OBJ_DIR=obj -EXE_DIR=bin -SRC_SRC=fp.cpp bn_c256.cpp bn_c384.cpp bn_c512.cpp she_c256.cpp -TEST_SRC=fp_test.cpp ec_test.cpp fp_util_test.cpp window_method_test.cpp elgamal_test.cpp fp_tower_test.cpp gmp_test.cpp bn_test.cpp bn384_test.cpp glv_test.cpp paillier_test.cpp she_test.cpp vint_test.cpp bn512_test.cpp ecdsa_test.cpp conversion_test.cpp -TEST_SRC+=bn_c256_test.cpp bn_c384_test.cpp bn_c384_256_test.cpp bn_c512_test.cpp she_c256_test.cpp she_c384_test.cpp -TEST_SRC+=aggregate_sig_test.cpp array_test.cpp -TEST_SRC+=bls12_test.cpp -TEST_SRC+=ecdsa_c_test.cpp -TEST_SRC+=modp_test.cpp -ifeq ($(CPU),x86-64) - MCL_USE_XBYAK?=1 - TEST_SRC+=mont_fp_test.cpp sq_test.cpp - ifeq ($(USE_LOW_ASM),1) - TEST_SRC+=low_test.cpp - endif - ifeq ($(MCL_USE_XBYAK),1) - TEST_SRC+=fp_generator_test.cpp - endif -endif -SAMPLE_SRC=bench.cpp ecdh.cpp random.cpp rawbench.cpp vote.cpp pairing.cpp large.cpp tri-dh.cpp bls_sig.cpp pairing_c.c she_smpl.cpp - -ifneq ($(MCL_MAX_BIT_SIZE),) - CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) -endif -ifeq ($(MCL_USE_XBYAK),0) - CFLAGS+=-DMCL_DONT_USE_XBYAK -endif -################################################################## -MCL_LIB=$(LIB_DIR)/libmcl.a -MCL_SNAME=mcl -BN256_SNAME=mclbn256 -BN384_SNAME=mclbn384 -BN384_256_SNAME=mclbn384_256 -BN512_SNAME=mclbn512 -SHE256_SNAME=mclshe256 -MCL_SLIB=$(LIB_DIR)/lib$(MCL_SNAME).$(LIB_SUF) -BN256_LIB=$(LIB_DIR)/libmclbn256.a -BN256_SLIB=$(LIB_DIR)/lib$(BN256_SNAME).$(LIB_SUF) -BN384_LIB=$(LIB_DIR)/libmclbn384.a -BN384_SLIB=$(LIB_DIR)/lib$(BN384_SNAME).$(LIB_SUF) -BN384_256_LIB=$(LIB_DIR)/libmclbn384_256.a -BN384_256_SLIB=$(LIB_DIR)/lib$(BN384_256_SNAME).$(LIB_SUF) -BN512_LIB=$(LIB_DIR)/libmclbn512.a -BN512_SLIB=$(LIB_DIR)/lib$(BN512_SNAME).$(LIB_SUF) -SHE256_LIB=$(LIB_DIR)/libmclshe256.a -SHE256_SLIB=$(LIB_DIR)/lib$(SHE256_SNAME).$(LIB_SUF) -SHE384_LIB=$(LIB_DIR)/libmclshe384.a -ECDSA_LIB=$(LIB_DIR)/libmclecdsa.a -all: $(MCL_LIB) $(MCL_SLIB) $(BN256_LIB) $(BN256_SLIB) $(BN384_LIB) $(BN384_SLIB) $(BN384_256_LIB) $(BN384_256_SLIB) $(BN512_LIB) $(BN512_SLIB) $(SHE256_LIB) $(SHE256_SLIB) $(SHE384_lib) $(ECDSA_LIB) - -#LLVM_VER=-3.8 -LLVM_LLC=llc$(LLVM_VER) -LLVM_OPT=opt$(LLVM_VER) -LLVM_OPT_VERSION=$(shell $(LLVM_OPT) --version 2>/dev/null | awk '/version/ {print $$3}') -GEN_EXE=src/gen -# incompatibility between llvm 3.4 and the later version -ifneq ($(LLVM_OPT_VERSION),) -ifeq ($(shell expr $(LLVM_OPT_VERSION) \< 3.5.0),1) - GEN_EXE_OPT=-old -endif -endif -ifeq ($(OS),mac) - ASM_SRC_PATH_NAME=src/asm/$(CPU)mac -else - ASM_SRC_PATH_NAME=src/asm/$(CPU) -endif -ifneq ($(CPU),) - ASM_SRC=$(ASM_SRC_PATH_NAME).s -endif -ASM_OBJ=$(OBJ_DIR)/$(CPU).o -LIB_OBJ=$(OBJ_DIR)/fp.o -BN256_OBJ=$(OBJ_DIR)/bn_c256.o -BN384_OBJ=$(OBJ_DIR)/bn_c384.o -BN384_256_OBJ=$(OBJ_DIR)/bn_c384_256.o -BN512_OBJ=$(OBJ_DIR)/bn_c512.o -SHE256_OBJ=$(OBJ_DIR)/she_c256.o -SHE384_OBJ=$(OBJ_DIR)/she_c384.o -ECDSA_OBJ=$(OBJ_DIR)/ecdsa_c.o -FUNC_LIST=src/func.list -ifeq ($(findstring $(OS),mingw64/cygwin),) - MCL_USE_LLVM?=1 -else - MCL_USE_LLVM=0 -endif -ifeq ($(MCL_USE_LLVM),1) - CFLAGS+=-DMCL_USE_LLVM=1 - LIB_OBJ+=$(ASM_OBJ) - # special case for intel with bmi2 - ifeq ($(INTEL),1) - LIB_OBJ+=$(OBJ_DIR)/$(CPU).bmi2.o - endif -endif -LLVM_SRC=src/base$(BIT).ll - -# CPU is used for llvm -# see $(LLVM_LLC) --version -LLVM_FLAGS=-march=$(CPU) -relocation-model=pic #-misched=ilpmax -LLVM_FLAGS+=-pre-RA-sched=list-ilp -max-sched-reorder=128 -mattr=-sse - -#HAS_BMI2=$(shell cat "/proc/cpuinfo" | grep bmi2 >/dev/null && echo "1") -#ifeq ($(HAS_BMI2),1) -# LLVM_FLAGS+=-mattr=bmi2 -#endif - -ifeq ($(USE_LOW_ASM),1) - LOW_ASM_OBJ=$(LOW_ASM_SRC:.asm=.o) - LIB_OBJ+=$(LOW_ASM_OBJ) -endif - -ifeq ($(UPDATE_ASM),1) - ASM_SRC_DEP=$(LLVM_SRC) - ASM_BMI2_SRC_DEP=src/base$(BIT).bmi2.ll -else - ASM_SRC_DEP= - ASM_BMI2_SRC_DEP= -endif - -ifneq ($(findstring $(OS),mac/mingw64),) - BN256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib - BN384_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib - BN384_256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib - BN512_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib - SHE256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib -endif -ifeq ($(OS),mingw64) - MCL_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(MCL_SNAME).a - BN256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN256_SNAME).a - BN384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_SNAME).a - BN384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_256_SNAME).a - BN512_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN512_SNAME).a - SHE256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(SHE256_SNAME).a -endif - -$(MCL_LIB): $(LIB_OBJ) - $(AR) $@ $(LIB_OBJ) - -$(MCL_SLIB): $(LIB_OBJ) - $(PRE)$(CXX) -o $@ $(LIB_OBJ) -shared $(LDFLAGS) $(MCL_SLIB_LDFLAGS) - -$(BN256_LIB): $(BN256_OBJ) - $(AR) $@ $(BN256_OBJ) - -$(SHE256_LIB): $(SHE256_OBJ) - $(AR) $@ $(SHE256_OBJ) - -$(SHE256_SLIB): $(SHE256_OBJ) $(MCL_LIB) - $(PRE)$(CXX) -o $@ $(SHE256_OBJ) $(MCL_LIB) -shared $(LDFLAGS) $(SHE256_SLIB_LDFLAGS) - -$(SHE384_LIB): $(SHE384_OBJ) - $(AR) $@ $(SHE384_OBJ) - -$(ECDSA_LIB): $(ECDSA_OBJ) - $(AR) $@ $(ECDSA_OBJ) - -$(BN256_SLIB): $(BN256_OBJ) $(MCL_SLIB) - $(PRE)$(CXX) -o $@ $(BN256_OBJ) -shared $(LDFLAGS) $(BN256_SLIB_LDFLAGS) - -$(BN384_LIB): $(BN384_OBJ) - $(AR) $@ $(BN384_OBJ) - -$(BN384_256_LIB): $(BN384_256_OBJ) - $(AR) $@ $(BN384_256_OBJ) - -$(BN512_LIB): $(BN512_OBJ) - $(AR) $@ $(BN512_OBJ) - -$(BN384_SLIB): $(BN384_OBJ) $(MCL_SLIB) - $(PRE)$(CXX) -o $@ $(BN384_OBJ) -shared $(LDFLAGS) $(BN384_SLIB_LDFLAGS) - -$(BN384_256_SLIB): $(BN384_256_OBJ) $(MCL_SLIB) - $(PRE)$(CXX) -o $@ $(BN384_256_OBJ) -shared $(LDFLAGS) $(BN384_256_SLIB_LDFLAGS) - -$(BN512_SLIB): $(BN512_OBJ) $(MCL_SLIB) - $(PRE)$(CXX) -o $@ $(BN512_OBJ) -shared $(LDFLAGS) $(BN512_SLIB_LDFLAGS) - -$(ASM_OBJ): $(ASM_SRC) - $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) - -$(ASM_SRC): $(ASM_SRC_DEP) - $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) - -$(LLVM_SRC): $(GEN_EXE) $(FUNC_LIST) - $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) > $@ - -$(ASM_SRC_PATH_NAME).bmi2.s: $(ASM_BMI2_SRC_DEP) - $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) -mattr=bmi2 - -$(OBJ_DIR)/$(CPU).bmi2.o: $(ASM_SRC_PATH_NAME).bmi2.s - $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) - -src/base$(BIT).bmi2.ll: $(GEN_EXE) - $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) -s bmi2 > $@ - -src/base64m.ll: $(GEN_EXE) - $(GEN_EXE) $(GEN_EXE_OPT) -wasm > $@ - -$(FUNC_LIST): $(LOW_ASM_SRC) -ifeq ($(USE_LOW_ASM),1) - $(shell awk '/global/ { print $$2}' $(LOW_ASM_SRC) > $(FUNC_LIST)) - $(shell awk '/proc/ { print $$2}' $(LOW_ASM_SRC) >> $(FUNC_LIST)) -else - $(shell touch $(FUNC_LIST)) -endif - -$(GEN_EXE): src/gen.cpp src/llvm_gen.hpp - $(CXX) -o $@ $< $(CFLAGS) - -asm: $(LLVM_SRC) - $(LLVM_OPT) -O3 -o - $(LLVM_SRC) | $(LLVM_LLC) -O3 $(LLVM_FLAGS) -x86-asm-syntax=intel - -$(LOW_ASM_OBJ): $(LOW_ASM_SRC) - $(ASM) $< - -# set PATH for mingw, set LD_LIBRARY_PATH is for other env -COMMON_LIB_PATH="../../../lib" -PATH_VAL=$$PATH:$(COMMON_LIB_PATH) LD_LIBRARY_PATH=$(COMMON_LIB_PATH) DYLD_LIBRARY_PATH=$(COMMON_LIB_PATH) CGO_CFLAGS="-I$(shell pwd)/include" CGO_LDFLAGS="-L../../../lib" -test_go256: $(MCL_SLIB) $(BN256_SLIB) - cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn256 . - -test_go384: $(MCL_SLIB) $(BN384_SLIB) - cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384 . - -test_go384_256: $(MCL_SLIB) $(BN384_256_SLIB) - cd ffi/go/mcl && env PATH=$(PATH_VAL) go test -tags bn384_256 . - -test_go: - $(MAKE) test_go256 - $(MAKE) test_go384 - $(MAKE) test_go384_256 - -test_python_she: $(SHE256_SLIB) - cd ffi/python && env LD_LIBRARY_PATH="../../lib" DYLD_LIBRARY_PATH="../../lib" PATH=$$PATH:"../../lib" python3 she.py -test_python: - $(MAKE) test_python_she - -test_java: - $(MAKE) -C ffi/java test - -################################################################## - -VPATH=test sample src - -.SUFFIXES: .cpp .d .exe .c .o - -$(OBJ_DIR)/%.o: %.cpp - $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) - -$(OBJ_DIR)/%.o: %.c - $(PRE)$(CC) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) - -$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/bn_c256_test.exe: $(OBJ_DIR)/bn_c256_test.o $(BN256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/bn_c384_test.exe: $(OBJ_DIR)/bn_c384_test.o $(BN384_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BN384_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/bn_c384_256_test.exe: $(OBJ_DIR)/bn_c384_256_test.o $(BN384_256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BN384_256_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/bn_c512_test.exe: $(OBJ_DIR)/bn_c512_test.o $(BN512_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(BN512_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/pairing_c.exe: $(OBJ_DIR)/pairing_c.o $(BN256_LIB) $(MCL_LIB) - $(PRE)$(CC) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) -lstdc++ - -$(EXE_DIR)/she_c256_test.exe: $(OBJ_DIR)/she_c256_test.o $(SHE256_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(SHE256_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/she_c384_test.exe: $(OBJ_DIR)/she_c384_test.o $(SHE384_LIB) $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(SHE384_LIB) $(MCL_LIB) $(LDFLAGS) - -$(EXE_DIR)/ecdsa_c_test.exe: $(OBJ_DIR)/ecdsa_c_test.o $(ECDSA_LIB) $(MCL_LIB) src/ecdsa_c.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h - $(PRE)$(CXX) $< -o $@ $(ECDSA_LIB) $(MCL_LIB) $(LDFLAGS) - -$(OBJ_DIR)/modp_test.o: test/modp_test.cpp - $(PRE)$(CXX) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) -DMCL_USE_VINT -DMCL_MAX_BIT_SIZE=384 -DMCL_VINT_64BIT_PORTABLE -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -I./include -O2 $(CFLAGS_WARN) - -$(EXE_DIR)/modp_test.exe: $(OBJ_DIR)/modp_test.o - $(PRE)$(CXX) $< -o $@ - -SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(addsuffix .exe,$(basename $(SAMPLE_SRC)))) -sample: $(SAMPLE_EXE) $(MCL_LIB) - -TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) -test_ci: $(TEST_EXE) - @sh -ec 'for i in $(TEST_EXE); do echo $$i; env LSAN_OPTIONS=verbosity=1:log_threads=1 $$i; done' -test: $(TEST_EXE) - @echo test $(TEST_EXE) - @sh -ec 'for i in $(TEST_EXE); do $$i|grep "ctest:name"; done' > result.txt - @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi - -EMCC_OPT=-I./include -I./src -Wall -Wextra -EMCC_OPT+=-O3 -DNDEBUG -DMCLSHE_WIN_SIZE=8 -EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 -EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION -EMCC_OPT+=-s ABORTING_MALLOC=0 -SHE_C_DEP=src/fp.cpp src/she_c_impl.hpp include/mcl/she.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/she.h Makefile -MCL_C_DEP=src/fp.cpp include/mcl/impl/bn_c_impl.hpp include/mcl/bn.hpp include/mcl/fp.hpp include/mcl/op.hpp include/mcl/bn.h Makefile -ifeq ($(MCL_USE_LLVM),2) - EMCC_OPT+=src/base64m.ll -DMCL_USE_LLVM - SHE_C_DEP+=src/base64m.ll -endif -../she-wasm/she_c.js: src/she_c256.cpp $(SHE_C_DEP) - emcc -o $@ src/fp.cpp src/she_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 - -../she-wasm/she_c384.js: src/she_c384.cpp $(SHE_C_DEP) - emcc -o $@ src/fp.cpp src/she_c384.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 - -../mcl-wasm/mcl_c.js: src/bn_c256.cpp $(MCL_C_DEP) - emcc -o $@ src/fp.cpp src/bn_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions -MD -MP -MF obj/mcl_c.d - -../mcl-wasm/mcl_c512.js: src/bn_c512.cpp $(MCL_C_DEP) - emcc -o $@ src/fp.cpp src/bn_c512.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=512 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions - -../ecdsa-wasm/ecdsa_c.js: src/ecdsa_c.cpp src/fp.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h Makefile - emcc -o $@ src/fp.cpp src/ecdsa_c.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions - -mcl-wasm: - $(MAKE) ../mcl-wasm/mcl_c.js - $(MAKE) ../mcl-wasm/mcl_c512.js - -she-wasm: - $(MAKE) ../she-wasm/she_c.js - $(MAKE) ../she-wasm/she_c384.js - -ecdsa-wasm: - $(MAKE) ../ecdsa-wasm/ecdsa_c.js - -# test -bin/emu: - $(CXX) -g -o $@ src/fp.cpp src/bn_c256.cpp test/bn_c256_test.cpp -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_64BIT_PORTABLE -DMCL_VINT_FIXED_BUFFER -DMCL_MAX_BIT_SIZE=256 -I./include -bin/pairing_c_min.exe: sample/pairing_c.c include/mcl/vint.hpp src/fp.cpp include/mcl/bn.hpp -# $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-exceptions -fno-rtti -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG -pg - $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG - -make_tbl: - $(MAKE) ../bls/src/qcoeff-bn254.hpp - -../bls/src/qcoeff-bn254.hpp: $(MCL_LIB) misc/precompute.cpp - $(CXX) -o misc/precompute misc/precompute.cpp $(CFLAGS) $(MCL_LIB) $(LDFLAGS) - ./misc/precompute > ../bls/src/qcoeff-bn254.hpp - -update_xbyak: - cp -a ../xbyak/xbyak/xbyak.h ../xbyak/xbyak/xbyak_util.h ../xbyak/xbyak/xbyak_mnemonic.h src/xbyak/ - -update_cybozulib: - cp -a $(addprefix ../cybozulib/,$(wildcard include/cybozu/*.hpp)) include/cybozu/ - -clean: - $(RM) $(LIB_DIR)/*.a $(LIB_DIR)/*.$(LIB_SUF) $(OBJ_DIR)/*.o $(OBJ_DIR)/*.obj $(OBJ_DIR)/*.d $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_OBJ) $(LIB_OBJ) $(BN256_OBJ) $(BN384_OBJ) $(BN512_OBJ) $(LLVM_SRC) $(FUNC_LIST) src/*.ll lib/*.a - -ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) -DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(addsuffix .d,$(basename $(ALL_SRC)))) --include $(DEPEND_FILE) - -PREFIX?=/usr/local -install: lib/libmcl.a lib/libmcl.$(LIB_SUF) - $(MKDIR) $(PREFIX)/include/mcl - cp -a include/mcl/ $(PREFIX)/include/ - cp -a include/cybozu/ $(PREFIX)/include/ - $(MKDIR) $(PREFIX)/lib - cp -a lib/libmcl.a lib/libmcl.$(LIB_SUF) $(PREFIX)/lib/ - -.PHONY: test mcl-wasm she-wasm bin/emu - -# don't remove these files automatically -.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) - diff --git a/vendor/github.com/dexon-foundation/mcl/bench.txt b/vendor/github.com/dexon-foundation/mcl/bench.txt deleted file mode 100644 index 35e47dca5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/bench.txt +++ /dev/null @@ -1,114 +0,0 @@ ------------------------------------------------------------------------------ -Core i7-7700@3.6GHz Firefox 58.0.1(64-bit) - BN254 Fp381_1 Fp462 -op msec -Fr::setByCSPRNG 0.022 0.021 0.019 -pairing 2.446 7.353 14.596 -millerLoop 1.467 4.219 8.598 -finalExp 0.97 3.127 6.005 -precomputedMillerLoop 1.087 3.171 6.305 -G1::add 0.007 0.01 0.014 -G1::dbl 0.005 0.007 0.011 -G1::mul 0.479 1.529 3.346 -G2::add 0.013 0.022 0.033 -G2::dbl 0.01 0.016 0.025 -G2::mul 0.989 2.955 5.921 -hashAndMapToG1 0.135 0.309 0.76 -hashAndMapToG2 2.14 6.44 14.249 -Fr::add 0.004 0.003 0.003 -Fr::mul 0.004 0.004 0.005 -Fr::sqr 0.003 0.003 0.004 -Fr::inv 0.025 0.038 0.05 -GT::add 0.005 0.004 0.005 -GT::mul 0.016 0.027 0.041 -GT::sqr 0.012 0.018 0.028 -GT::inv 0.051 0.081 0.122 - ------------------------------------------------------------------------------ -iPhone7 iOS 11.2.1 Safari/604.1 - BN254 Fp381_1 Fp462 -op msec -Fr::setByCSPRNG 0.041 0.038 0.154 -pairing 3.9 11.752 22.578 -millerLoop 2.29 6.55 13.067 -finalExp 1.501 4.741 9.016 -precomputedMillerLoop 1.675 4.818 9.492 -G1::add 0.006 0.015 0.018 -G1::dbl 0.005 0.01 0.019 -G1::mul 0.843 2.615 5.339 -G2::add 0.015 0.03 0.048 -G2::dbl 0.011 0.022 0.034 -G2::mul 1.596 4.581 9.077 -hashAndMapToG1 0.212 0.507 1.201 -hashAndMapToG2 3.486 9.93 21.817 -Fr::add 0.002 0.002 0.002 -Fr::mul 0.002 0.003 0.003 -Fr::sqr 0.002 0.003 0.004 -Fr::inv 0.037 0.062 0.078 -GT::add 0.003 0.003 0.003 -GT::mul 0.021 0.037 0.058 -GT::sqr 0.014 0.026 0.04 -GT::inv 0.074 0.131 0.19 ------------------------------------------------------------------------------ -Core i7-7700@3.6GHz Linux gcc.5.4.0 - - BN254 Fp381_1 Fp462 -G1::mulCT 202.807Kclk 597.410Kclk 1.658Mclk -G1::mulCTsmall 200.968Kclk 596.074Kclk 1.650Mclk -G1::mul 185.935Kclk 555.147Kclk 1.495Mclk -G1::mulsmall 1.856Kclk 3.740Kclk 8.054Kclk -G1::add 866.89 clk 1.710Kclk 3.663Kclk -G1::dbl 798.60 clk 1.770Kclk 3.755Kclk -G2::mulCT 391.655Kclk 1.351Mclk 3.102Mclk -G2::mulCTsmall 369.134Kclk 1.358Mclk 3.105Mclk -G2::mul 400.098Kclk 1.277Mclk 3.009Mclk -G2::mulsmall 5.774Kclk 12.806Kclk 25.374Kclk -G2::add 2.696Kclk 7.547Kclk 14.683Kclk -G2::dbl 2.600Kclk 5.366Kclk 10.436Kclk -GT::pow 727.157Kclk 1.991Mclk 4.364Mclk -hashAndMapToG1 27.953Kclk 87.291Kclk 200.972Kclk -hashAndMapToG2 775.186Kclk 2.629Mclk 6.937Mclk -Fp::add 11.48 clk 69.54 clk 21.36 clk -Fp::mul 63.11 clk 134.90 clk 303.75 clk -Fp::sqr 64.39 clk 134.29 clk 305.38 clk -Fp::inv 2.302Kclk 4.185Kclk 5.485Kclk -GT::add 180.93 clk 247.70 clk 256.55 clk -GT::mul 5.278Kclk 10.887Kclk 19.844Kclk -GT::sqr 3.666Kclk 7.444Kclk 13.694Kclk -GT::inv 11.322Kclk 22.480Kclk 41.796Kclk -pairing 1.044Mclk 3.445Mclk 7.789Mclk -millerLoop 634.214Kclk 1.913Mclk 4.466Mclk -finalExp 423.413Kclk 1.535Mclk 3.328Mclk -precomputedML 479.849Kclk 1.461Mclk 3.299Mclk ------------------------------------------------------------------------------ - -1.2GHz ARM Cortex-A53 [HiKey] Linux gcc 4.9.2 - - BN254 Fp381_1 Fp462 -G1::mulCT 858.149usec 2.780msec 8.507msec -G1::mulCTsmall 854.535usec 2.773msec 8.499msec -G1::mul 743.100usec 2.484msec 7.536msec -G1::mulsmall 7.680usec 16.528usec 41.818usec -G1::add 3.347usec 7.363usec 18.544usec -G1::dbl 3.294usec 7.351usec 18.472usec -G2::mulCT 1.627msec 5.083msec 12.142msec -G2::mulCTsmall 1.534msec 5.124msec 12.125msec -G2::mul 1.677msec 4.806msec 11.757msec -G2::mulsmall 23.581usec 48.504usec 96.780usec -G2::add 10.751usec 27.759usec 54.392usec -G2::dbl 10.076usec 20.625usec 42.032usec -GT::pow 2.662msec 7.091msec 14.042msec -hashAndMapToG1 111.256usec 372.665usec 1.031msec -hashAndMapToG2 3.199msec 10.168msec 27.391msec -Fp::add 27.19nsec 38.02nsec 45.68nsec -Fp::mul 279.17nsec 628.44nsec 1.662usec -Fp::sqr 276.56nsec 651.67nsec 1.675usec -Fp::inv 9.743usec 14.364usec 18.116usec -GT::add 373.18nsec 530.62nsec 625.26nsec -GT::mul 19.557usec 38.623usec 63.111usec -GT::sqr 13.345usec 26.218usec 43.008usec -GT::inv 44.119usec 84.581usec 153.046usec -pairing 3.913msec 12.606msec 26.818msec -millerLoop 2.402msec 7.202msec 15.711msec -finalExp 1.506msec 5.395msec 11.098msec -precomputedML 1.815msec 5.447msec 11.094msec diff --git a/vendor/github.com/dexon-foundation/mcl/common.mk b/vendor/github.com/dexon-foundation/mcl/common.mk deleted file mode 100644 index 5c749e1a6..000000000 --- a/vendor/github.com/dexon-foundation/mcl/common.mk +++ /dev/null @@ -1,117 +0,0 @@ -GCC_VER=$(shell $(PRE)$(CC) -dumpversion) -UNAME_S=$(shell uname -s) -ifeq ($(UNAME_S),Linux) - OS=Linux -endif -ifeq ($(findstring MINGW64,$(UNAME_S)),MINGW64) - OS=mingw64 - CFLAGS+=-D__USE_MINGW_ANSI_STDIO=1 -endif -ifeq ($(findstring CYGWIN,$(UNAME_S)),CYGWIN) - OS=cygwin -endif -ifeq ($(UNAME_S),Darwin) - OS=mac - ARCH=x86_64 - LIB_SUF=dylib - OPENSSL_DIR?=/usr/local/opt/openssl - CFLAGS+=-I$(OPENSSL_DIR)/include - LDFLAGS+=-L$(OPENSSL_DIR)/lib - GMP_DIR?=/usr/local/opt/gmp - CFLAGS+=-I$(GMP_DIR)/include - LDFLAGS+=-L$(GMP_DIR)/lib -else - LIB_SUF=so -endif -ARCH?=$(shell uname -m) -ifneq ($(findstring $(ARCH),x86_64/amd64),) - CPU=x86-64 - INTEL=1 - ifeq ($(findstring $(OS),mingw64/cygwin),) - GCC_EXT=1 - endif - BIT=64 - BIT_OPT=-m64 - #LOW_ASM_SRC=src/asm/low_x86-64.asm - #ASM=nasm -felf64 -endif -ifeq ($(ARCH),x86) - CPU=x86 - INTEL=1 - BIT=32 - BIT_OPT=-m32 - #LOW_ASM_SRC=src/asm/low_x86.asm -endif -ifeq ($(ARCH),armv7l) - CPU=arm - BIT=32 - #LOW_ASM_SRC=src/asm/low_arm.s -endif -ifeq ($(ARCH),aarch64) - CPU=aarch64 - BIT=64 -endif -ifeq ($(findstring $(OS),mac/mingw64),) - LDFLAGS+=-lrt -endif - -CP=cp -f -AR=ar r -MKDIR=mkdir -p -RM=rm -rf - -ifeq ($(DEBUG),1) - ifeq ($(GCC_EXT),1) - CFLAGS+=-fsanitize=address - LDFLAGS+=-fsanitize=address - endif -else - CFLAGS_OPT+=-fomit-frame-pointer -DNDEBUG - ifeq ($(CXX),clang++) - CFLAGS_OPT+=-O3 - else - ifeq ($(shell expr $(GCC_VER) \> 4.6.0),1) - CFLAGS_OPT+=-Ofast - else - CFLAGS_OPT+=-O3 - endif - endif - ifeq ($(MARCH),) - ifeq ($(INTEL),1) -# CFLAGS_OPT+=-march=native - endif - else - CFLAGS_OPT+=$(MARCH) - endif -endif -CFLAGS_WARN=-Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith -CFLAGS+=-g3 -INC_OPT=-I include -I test -CFLAGS+=$(CFLAGS_WARN) $(BIT_OPT) $(INC_OPT) -DEBUG=0 -CFLAGS_OPT_USER?=$(CFLAGS_OPT) -ifeq ($(DEBUG),0) -CFLAGS+=$(CFLAGS_OPT_USER) -endif -CFLAGS+=$(CFLAGS_USER) -MCL_USE_GMP?=1 -MCL_USE_OPENSSL?=1 -ifeq ($(MCL_USE_GMP),0) - CFLAGS+=-DMCL_USE_VINT -endif -ifneq ($(MCL_SIZEOF_UNIT),) - CFLAGS+=-DMCL_SIZEOF_UNIT=$(MCL_SIZEOF_UNIT) -endif -ifeq ($(MCL_USE_OPENSSL),0) - CFLAGS+=-DMCL_DONT_USE_OPENSSL -endif -ifeq ($(MCL_USE_GMP),1) - GMP_LIB=-lgmp -lgmpxx -endif -ifeq ($(MCL_USE_OPENSSL),1) - OPENSSL_LIB=-lcrypto -endif -LDFLAGS+=$(GMP_LIB) $(OPENSSL_LIB) $(BIT_OPT) $(LDFLAGS_USER) - -CFLAGS+=-fPIC - diff --git a/vendor/github.com/dexon-foundation/mcl/common.props b/vendor/github.com/dexon-foundation/mcl/common.props deleted file mode 100644 index 912f39e30..000000000 --- a/vendor/github.com/dexon-foundation/mcl/common.props +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - $(SolutionDir)bin\ - - - - $(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../xbyak - - - - - Level4 - MultiThreaded - - - _MBCS;%(PreprocessorDefinitions);NOMINMAX - - - $(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)lib - - - - diff --git a/vendor/github.com/dexon-foundation/mcl/debug.props b/vendor/github.com/dexon-foundation/mcl/debug.props deleted file mode 100644 index 1553ae0dc..000000000 --- a/vendor/github.com/dexon-foundation/mcl/debug.props +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - $(ProjectName)d - - - - MultiThreadedDebug - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/App.config b/vendor/github.com/dexon-foundation/mcl/ffi/cs/App.config deleted file mode 100644 index 88fa4027b..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/App.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/Properties/AssemblyInfo.cs b/vendor/github.com/dexon-foundation/mcl/ffi/cs/Properties/AssemblyInfo.cs deleted file mode 100644 index c87e1d44b..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; - -// アセンブリã«é–¢ã™ã‚‹ä¸€èˆ¬æƒ…å ±ã¯ä»¥ä¸‹ã®å±žæ€§ã‚»ãƒƒãƒˆã‚’ã¨ãŠã—ã¦åˆ¶å¾¡ã•ã‚Œã¾ã™ã€‚ -// アセンブリã«é–¢é€£ä»˜ã‘られã¦ã„る情報を変更ã™ã‚‹ã«ã¯ã€ -// ã“れらã®å±žæ€§å€¤ã‚’変更ã—ã¦ãã ã•ã„。 -[assembly: AssemblyTitle("bn256")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("bn256")] -[assembly: AssemblyCopyright("Copyright © 2017")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// ComVisible ã‚’ false ã«è¨­å®šã™ã‚‹ã¨ã€ãã®åž‹ã¯ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内㧠COM コンãƒãƒ¼ãƒãƒ³ãƒˆã‹ã‚‰ -// å‚ç…§ä¸å¯èƒ½ã«ãªã‚Šã¾ã™ã€‚COM ã‹ã‚‰ã“ã®ã‚¢ã‚»ãƒ³ãƒ–リ内ã®åž‹ã«ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹å ´åˆã¯ã€ -// ãã®åž‹ã® ComVisible 属性を true ã«è¨­å®šã—ã¦ãã ã•ã„。 -[assembly: ComVisible(false)] - -// ã“ã®ãƒ—ロジェクト㌠COM ã«å…¬é–‹ã•ã‚Œã‚‹å ´åˆã€æ¬¡ã® GUID ㌠typelib ã® ID ã«ãªã‚Šã¾ã™ -[assembly: Guid("e9d06b1b-ea22-4ef4-ba4b-422f7625966b")] - -// アセンブリã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³æƒ…å ±ã¯æ¬¡ã® 4 ã¤ã®å€¤ã§æ§‹æˆã•ã‚Œã¦ã„ã¾ã™: -// -// メジャー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ -// マイナー ãƒãƒ¼ã‚¸ãƒ§ãƒ³ -// ãƒ“ãƒ«ãƒ‰ç•ªå· -// Revision -// -// ã™ã¹ã¦ã®å€¤ã‚’指定ã™ã‚‹ã‹ã€ä¸‹ã®ã‚ˆã†ã« '*' を使ã£ã¦ãƒ“ルドãŠã‚ˆã³ãƒªãƒ“ジョン番å·ã‚’ -// 既定値ã«ã™ã‚‹ã“ã¨ãŒã§ãã¾ã™: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.cs b/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.cs deleted file mode 100644 index 0e1ed032c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.cs +++ /dev/null @@ -1,475 +0,0 @@ -using System; -using System.Text; -using System.Runtime.InteropServices; - -namespace mcl { - public class BN256 { - [DllImport("mclBn256.dll")] - public static extern int mclBn_init(int curve, int maxUnitSize); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_clear(ref Fr x); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_setInt(ref Fr y, int x); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_setStr(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_isValid(ref Fr x); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_isEqual(ref Fr x, ref Fr y); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_isZero(ref Fr x); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_isOne(ref Fr x); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_setByCSPRNG(ref Fr x); - - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_setHashOf(ref Fr x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); - [DllImport("mclBn256.dll")] - public static extern int mclBnFr_getStr([Out]StringBuilder buf, long maxBufSize, ref Fr x, int ioMode); - - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_neg(ref Fr y, ref Fr x); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_inv(ref Fr y, ref Fr x); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_add(ref Fr z, ref Fr x, ref Fr y); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_sub(ref Fr z, ref Fr x, ref Fr y); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_mul(ref Fr z, ref Fr x, ref Fr y); - [DllImport("mclBn256.dll")] - public static extern void mclBnFr_div(ref Fr z, ref Fr x, ref Fr y); - - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_clear(ref G1 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG1_setStr(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); - [DllImport("mclBn256.dll")] - public static extern int mclBnG1_isValid(ref G1 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG1_isEqual(ref G1 x, ref G1 y); - [DllImport("mclBn256.dll")] - public static extern int mclBnG1_isZero(ref G1 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG1_hashAndMapTo(ref G1 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); - [DllImport("mclBn256.dll")] - public static extern long mclBnG1_getStr([Out]StringBuilder buf, long maxBufSize, ref G1 x, int ioMode); - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_neg(ref G1 y, ref G1 x); - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_dbl(ref G1 y, ref G1 x); - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_add(ref G1 z, ref G1 x, ref G1 y); - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_sub(ref G1 z, ref G1 x, ref G1 y); - [DllImport("mclBn256.dll")] - public static extern void mclBnG1_mul(ref G1 z, ref G1 x, ref Fr y); - - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_clear(ref G2 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG2_setStr(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); - [DllImport("mclBn256.dll")] - public static extern int mclBnG2_isValid(ref G2 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG2_isEqual(ref G2 x, ref G2 y); - [DllImport("mclBn256.dll")] - public static extern int mclBnG2_isZero(ref G2 x); - [DllImport("mclBn256.dll")] - public static extern int mclBnG2_hashAndMapTo(ref G2 x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize); - [DllImport("mclBn256.dll")] - public static extern long mclBnG2_getStr([Out]StringBuilder buf, long maxBufSize, ref G2 x, int ioMode); - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_neg(ref G2 y, ref G2 x); - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_dbl(ref G2 y, ref G2 x); - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_add(ref G2 z, ref G2 x, ref G2 y); - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_sub(ref G2 z, ref G2 x, ref G2 y); - [DllImport("mclBn256.dll")] - public static extern void mclBnG2_mul(ref G2 z, ref G2 x, ref Fr y); - - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_clear(ref GT x); - [DllImport("mclBn256.dll")] - public static extern int mclBnGT_setStr(ref GT x, [In][MarshalAs(UnmanagedType.LPStr)] string buf, long bufSize, int ioMode); - [DllImport("mclBn256.dll")] - public static extern int mclBnGT_isEqual(ref GT x, ref GT y); - [DllImport("mclBn256.dll")] - public static extern int mclBnGT_isZero(ref GT x); - [DllImport("mclBn256.dll")] - public static extern int mclBnGT_isOne(ref GT x); - [DllImport("mclBn256.dll")] - public static extern long mclBnGT_getStr([Out]StringBuilder buf, long maxBufSize, ref GT x, int ioMode); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_neg(ref GT y, ref GT x); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_inv(ref GT y, ref GT x); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_add(ref GT z, ref GT x, ref GT y); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_sub(ref GT z, ref GT x, ref GT y); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_mul(ref GT z, ref GT x, ref GT y); - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_div(ref GT z, ref GT x, ref GT y); - - [DllImport("mclBn256.dll")] - public static extern void mclBnGT_pow(ref GT z, ref GT x, ref Fr y); - [DllImport("mclBn256.dll")] - public static extern void mclBn_pairing(ref GT z, ref G1 x, ref G2 y); - [DllImport("mclBn256.dll")] - public static extern void mclBn_finalExp(ref GT y, ref GT x); - [DllImport("mclBn256.dll")] - public static extern void mclBn_millerLoop(ref GT z, ref G1 x, ref G2 y); - - public static void init() - { - const int curveFp254BNb = 0; - const int maxUnitSize = 4; - if (mclBn_init(curveFp254BNb, maxUnitSize) != 0) { - throw new InvalidOperationException("mclBn_init"); - } - } - [StructLayout(LayoutKind.Sequential)] - public struct Fr { - private ulong v0, v1, v2, v3; - public void Clear() - { - mclBnFr_clear(ref this); - } - public void SetInt(int x) - { - mclBnFr_setInt(ref this, x); - } - public void SetStr(string s, int ioMode) - { - if (mclBnFr_setStr(ref this, s, s.Length, ioMode) != 0) { - throw new ArgumentException("mclBnFr_setStr" + s); - } - } - public bool IsValid() - { - return mclBnFr_isValid(ref this) == 1; - } - public bool Equals(Fr rhs) - { - return mclBnFr_isEqual(ref this, ref rhs) == 1; - } - public bool IsZero() - { - return mclBnFr_isZero(ref this) == 1; - } - public bool IsOne() - { - return mclBnFr_isOne(ref this) == 1; - } - public void SetByCSPRNG() - { - mclBnFr_setByCSPRNG(ref this); - } - public void SetHashOf(String s) - { - if (mclBnFr_setHashOf(ref this, s, s.Length) != 0) { - throw new InvalidOperationException("mclBnFr_setHashOf:" + s); - } - } - public string GetStr(int ioMode) - { - StringBuilder sb = new StringBuilder(1024); - long size = mclBnFr_getStr(sb, sb.Capacity, ref this, ioMode); - if (size == 0) { - throw new InvalidOperationException("mclBnFr_getStr:"); - } - return sb.ToString(); - } - public void Neg(Fr x) - { - mclBnFr_neg(ref this, ref x); - } - public void Inv(Fr x) - { - mclBnFr_inv(ref this, ref x); - } - public void Add(Fr x, Fr y) - { - mclBnFr_add(ref this, ref x, ref y); - } - public void Sub(Fr x, Fr y) - { - mclBnFr_sub(ref this, ref x, ref y); - } - public void Mul(Fr x, Fr y) - { - mclBnFr_mul(ref this, ref x, ref y); - } - public void Div(Fr x, Fr y) - { - mclBnFr_div(ref this, ref x, ref y); - } - public static Fr operator -(Fr x) - { - Fr y = new Fr(); - y.Neg(x); - return y; - } - public static Fr operator +(Fr x, Fr y) - { - Fr z = new Fr(); - z.Add(x, y); - return z; - } - public static Fr operator -(Fr x, Fr y) - { - Fr z = new Fr(); - z.Sub(x, y); - return z; - } - public static Fr operator *(Fr x, Fr y) - { - Fr z = new Fr(); - z.Mul(x, y); - return z; - } - public static Fr operator /(Fr x, Fr y) - { - Fr z = new Fr(); - z.Div(x, y); - return z; - } - } - [StructLayout(LayoutKind.Sequential)] - public struct G1 { - private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; - public void Clear() - { - mclBnG1_clear(ref this); - } - public void setStr(String s, int ioMode) - { - if (mclBnG1_setStr(ref this, s, s.Length, ioMode) != 0) { - throw new ArgumentException("mclBnG1_setStr:" + s); - } - } - public bool IsValid() - { - return mclBnG1_isValid(ref this) == 1; - } - public bool Equals(G1 rhs) - { - return mclBnG1_isEqual(ref this, ref rhs) == 1; - } - public bool IsZero() - { - return mclBnG1_isZero(ref this) == 1; - } - public void HashAndMapTo(String s) - { - if (mclBnG1_hashAndMapTo(ref this, s, s.Length) != 0) { - throw new ArgumentException("mclBnG1_hashAndMapTo:" + s); - } - } - public string GetStr(int ioMode) - { - StringBuilder sb = new StringBuilder(1024); - long size = mclBnG1_getStr(sb, sb.Capacity, ref this, ioMode); - if (size == 0) { - throw new InvalidOperationException("mclBnG1_getStr:"); - } - return sb.ToString(); - } - public void Neg(G1 x) - { - mclBnG1_neg(ref this, ref x); - } - public void Dbl(G1 x) - { - mclBnG1_dbl(ref this, ref x); - } - public void Add(G1 x, G1 y) - { - mclBnG1_add(ref this, ref x, ref y); - } - public void Sub(G1 x, G1 y) - { - mclBnG1_sub(ref this, ref x, ref y); - } - public void Mul(G1 x, Fr y) - { - mclBnG1_mul(ref this, ref x, ref y); - } - } - [StructLayout(LayoutKind.Sequential)] - public struct G2 { - private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; - private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; - public void Clear() - { - mclBnG2_clear(ref this); - } - public void setStr(String s, int ioMode) - { - if (mclBnG2_setStr(ref this, s, s.Length, ioMode) != 0) { - throw new ArgumentException("mclBnG2_setStr:" + s); - } - } - public bool IsValid() - { - return mclBnG2_isValid(ref this) == 1; - } - public bool Equals(G2 rhs) - { - return mclBnG2_isEqual(ref this, ref rhs) == 1; - } - public bool IsZero() - { - return mclBnG2_isZero(ref this) == 1; - } - public void HashAndMapTo(String s) - { - if (mclBnG2_hashAndMapTo(ref this, s, s.Length) != 0) { - throw new ArgumentException("mclBnG2_hashAndMapTo:" + s); - } - } - public string GetStr(int ioMode) - { - StringBuilder sb = new StringBuilder(1024); - long size = mclBnG2_getStr(sb, sb.Capacity, ref this, ioMode); - if (size == 0) { - throw new InvalidOperationException("mclBnG2_getStr:"); - } - return sb.ToString(); - } - public void Neg(G2 x) - { - mclBnG2_neg(ref this, ref x); - } - public void Dbl(G2 x) - { - mclBnG2_dbl(ref this, ref x); - } - public void Add(G2 x, G2 y) - { - mclBnG2_add(ref this, ref x, ref y); - } - public void Sub(G2 x, G2 y) - { - mclBnG2_sub(ref this, ref x, ref y); - } - public void Mul(G2 x, Fr y) - { - mclBnG2_mul(ref this, ref x, ref y); - } - } - [StructLayout(LayoutKind.Sequential)] - public struct GT { - private ulong v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11; - private ulong v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23; - private ulong v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35; - private ulong v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47; - public void Clear() - { - mclBnGT_clear(ref this); - } - public void setStr(String s, int ioMode) - { - if (mclBnGT_setStr(ref this, s, s.Length, ioMode) != 0) { - throw new ArgumentException("mclBnGT_setStr:" + s); - } - } - public bool Equals(GT rhs) - { - return mclBnGT_isEqual(ref this, ref rhs) == 1; - } - public bool IsZero() - { - return mclBnGT_isZero(ref this) == 1; - } - public bool IsOne() - { - return mclBnGT_isOne(ref this) == 1; - } - public string GetStr(int ioMode) - { - StringBuilder sb = new StringBuilder(1024); - long size = mclBnGT_getStr(sb, sb.Capacity, ref this, ioMode); - if (size == 0) { - throw new InvalidOperationException("mclBnGT_getStr:"); - } - return sb.ToString(); - } - public void Neg(GT x) - { - mclBnGT_neg(ref this, ref x); - } - public void Inv(GT x) - { - mclBnGT_inv(ref this, ref x); - } - public void Add(GT x, GT y) - { - mclBnGT_add(ref this, ref x, ref y); - } - public void Sub(GT x, GT y) - { - mclBnGT_sub(ref this, ref x, ref y); - } - public void Mul(GT x, GT y) - { - mclBnGT_mul(ref this, ref x, ref y); - } - public void Div(GT x, GT y) - { - mclBnGT_div(ref this, ref x, ref y); - } - public static GT operator -(GT x) - { - GT y = new GT(); - y.Neg(x); - return y; - } - public static GT operator +(GT x, GT y) - { - GT z = new GT(); - z.Add(x, y); - return z; - } - public static GT operator -(GT x, GT y) - { - GT z = new GT(); - z.Sub(x, y); - return z; - } - public static GT operator *(GT x, GT y) - { - GT z = new GT(); - z.Mul(x, y); - return z; - } - public static GT operator /(GT x, GT y) - { - GT z = new GT(); - z.Div(x, y); - return z; - } - public void Pow(GT x, Fr y) - { - mclBnGT_pow(ref this, ref x, ref y); - } - public void Pairing(G1 x, G2 y) - { - mclBn_pairing(ref this, ref x, ref y); - } - public void FinalExp(GT x) - { - mclBn_finalExp(ref this, ref x); - } - public void MillerLoop(G1 x, G2 y) - { - mclBn_millerLoop(ref this, ref x, ref y); - } - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.csproj b/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.csproj deleted file mode 100644 index 21a049f01..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.csproj +++ /dev/null @@ -1,62 +0,0 @@ - - - - - Debug - AnyCPU - {E9D06B1B-EA22-4EF4-BA4B-422F7625966B} - Exe - Properties - bn256 - bn256 - v4.5.2 - 512 - true - - - true - ..\..\bin\ - DEBUG;TRACE - false - full - x64 - prompt - MinimumRecommendedRules.ruleset - - - ..\..\bin\ - TRACE - true - pdbonly - x64 - prompt - MinimumRecommendedRules.ruleset - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.sln b/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.sln deleted file mode 100644 index 6e6aa67ee..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256.sln +++ /dev/null @@ -1,22 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "bn256", "bn256.csproj", "{E9D06B1B-EA22-4EF4-BA4B-422F7625966B}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.ActiveCfg = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Debug|x64.Build.0 = Debug|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.ActiveCfg = Release|x64 - {E9D06B1B-EA22-4EF4-BA4B-422F7625966B}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256_test.cs b/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256_test.cs deleted file mode 100644 index cad8c03d3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/cs/bn256_test.cs +++ /dev/null @@ -1,149 +0,0 @@ -using System; - -namespace mcl { - using static BN256; - class BN256Test { - static int err = 0; - static void assert(string msg, bool b) - { - if (b) return; - Console.WriteLine("ERR {0}", msg); - err++; - } - static void Main(string[] args) - { - try { - assert("64bit system", System.Environment.Is64BitProcess); - init(); - TestFr(); - TestG1(); - TestG2(); - TestPairing(); - if (err == 0) { - Console.WriteLine("all tests succeed"); - } else { - Console.WriteLine("err={0}", err); - } - } catch (Exception e) { - Console.WriteLine("ERR={0}", e); - } - } - static void TestFr() - { - Console.WriteLine("TestFr"); - Fr x = new Fr(); - x.Clear(); - assert("0", x.GetStr(10) == "0"); - assert("0.IzZero", x.IsZero()); - assert("!0.IzOne", !x.IsOne()); - x.SetInt(1); - assert("1", x.GetStr(10) == "1"); - assert("!1.IzZero", !x.IsZero()); - assert("1.IzOne", x.IsOne()); - x.SetInt(3); - assert("3", x.GetStr(10) == "3"); - assert("!3.IzZero", !x.IsZero()); - assert("!3.IzOne", !x.IsOne()); - x.SetInt(-5); - x = -x; - assert("5", x.GetStr(10) == "5"); - x.SetInt(4); - x = x * x; - assert("16", x.GetStr(10) == "16"); - assert("10", x.GetStr(16) == "10"); - Fr y; - y = x; - assert("x == y", x.Equals(y)); - x.SetInt(123); - assert("123", x.GetStr(10) == "123"); - assert("7b", x.GetStr(16) == "7b"); - assert("y != x", !x.Equals(y)); - try { - x.SetStr("1234567891234x", 10); - Console.WriteLine("x = {0}", x); - } catch (Exception e) { - Console.WriteLine("exception test OK\n'{0}'", e); - } - x.SetStr("1234567891234", 10); - assert("1234567891234", x.GetStr(10) == "1234567891234"); - } - static void TestG1() - { - Console.WriteLine("TestG1"); - G1 P = new G1(); - P.Clear(); - assert("P.IsValid", P.IsValid()); - assert("P.IsZero", P.IsZero()); - P.HashAndMapTo("abc"); - assert("P.IsValid", P.IsValid()); - assert("!P.IsZero", !P.IsZero()); - G1 Q = new G1(); - Q = P; - assert("P == Q", Q.Equals(P)); - Q.Neg(P); - Q.Add(Q, P); - assert("P = Q", Q.IsZero()); - Q.Dbl(P); - G1 R = new G1(); - R.Add(P, P); - assert("Q == R", Q.Equals(R)); - Fr x = new Fr(); - x.SetInt(3); - R.Add(R, P); - Q.Mul(P, x); - assert("Q == R", Q.Equals(R)); - } - static void TestG2() - { - Console.WriteLine("TestG2"); - G2 P = new G2(); - P.Clear(); - assert("P is valid", P.IsValid()); - assert("P is zero", P.IsZero()); - P.HashAndMapTo("abc"); - assert("P is valid", P.IsValid()); - assert("P is not zero", !P.IsZero()); - G2 Q = new G2(); - Q = P; - assert("P == Q", Q.Equals(P)); - Q.Neg(P); - Q.Add(Q, P); - assert("Q is zero", Q.IsZero()); - Q.Dbl(P); - G2 R = new G2(); - R.Add(P, P); - assert("Q == R", Q.Equals(R)); - Fr x = new Fr(); - x.SetInt(3); - R.Add(R, P); - Q.Mul(P, x); - assert("Q == R", Q.Equals(R)); - } - static void TestPairing() - { - Console.WriteLine("TestG2"); - G1 P = new G1(); - P.HashAndMapTo("123"); - G2 Q = new G2(); - Q.HashAndMapTo("1"); - Fr a = new Fr(); - Fr b = new Fr(); - a.SetStr("12345678912345673453", 10); - b.SetStr("230498230982394243424", 10); - G1 aP = new G1(); - G2 bQ = new G2(); - aP.Mul(P, a); - bQ.Mul(Q, b); - GT e1 = new GT(); - GT e2 = new GT(); - GT e3 = new GT(); - e1.Pairing(P, Q); - e2.Pairing(aP, Q); - e3.Pow(e1, a); - assert("e2.Equals(e3)", e2.Equals(e3)); - e2.Pairing(P, bQ); - e3.Pow(e1, b); - assert("e2.Equals(e3)", e2.Equals(e3)); - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl.go b/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl.go deleted file mode 100644 index a0c8bb4d3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl.go +++ /dev/null @@ -1,659 +0,0 @@ -package mcl - -/* -#cgo bn256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=4 -#cgo bn384 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -#cgo bn384_256 CFLAGS:-DMCLBN_FP_UNIT_SIZE=6 -DMCLBN_FR_UNIT_SIZE=4 -#cgo bn256 LDFLAGS:-lmclbn256 -lmcl -#cgo bn384 LDFLAGS:-lmclbn384 -lmcl -#cgo bn384_256 LDFLAGS:-lmclbn384_256 -lmcl -#include -*/ -import "C" -import "fmt" -import "unsafe" - -// CurveFp254BNb -- 254 bit curve -const CurveFp254BNb = C.mclBn_CurveFp254BNb - -// CurveFp382_1 -- 382 bit curve 1 -const CurveFp382_1 = C.mclBn_CurveFp382_1 - -// CurveFp382_2 -- 382 bit curve 2 -const CurveFp382_2 = C.mclBn_CurveFp382_2 - -// BLS12_381 -const BLS12_381 = C.MCL_BLS12_381 - -// IoSerializeHexStr -const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR - -// Init -- -// call this function before calling all the other operations -// this function is not thread safe -func Init(curve int) error { - err := C.mclBn_init(C.int(curve), C.MCLBN_COMPILED_TIME_VAR) - if err != 0 { - return fmt.Errorf("ERR mclBn_init curve=%d", curve) - } - return nil -} - -// GetFrUnitSize() -- -func GetFrUnitSize() int { - return int(C.MCLBN_FR_UNIT_SIZE) -} - -// GetFpUnitSize() -- -// same as GetMaxOpUnitSize() -func GetFpUnitSize() int { - return int(C.MCLBN_FP_UNIT_SIZE) -} - -// GetMaxOpUnitSize -- -func GetMaxOpUnitSize() int { - return int(C.MCLBN_FP_UNIT_SIZE) -} - -// GetOpUnitSize -- -// the length of Fr is GetOpUnitSize() * 8 bytes -func GetOpUnitSize() int { - return int(C.mclBn_getOpUnitSize()) -} - -// GetCurveOrder -- -// return the order of G1 -func GetCurveOrder() string { - buf := make([]byte, 1024) - // #nosec - n := C.mclBn_getCurveOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) - if n == 0 { - panic("implementation err. size of buf is small") - } - return string(buf[:n]) -} - -// GetFieldOrder -- -// return the characteristic of the field where a curve is defined -func GetFieldOrder() string { - buf := make([]byte, 1024) - // #nosec - n := C.mclBn_getFieldOrder((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) - if n == 0 { - panic("implementation err. size of buf is small") - } - return string(buf[:n]) -} - -// Fr -- -type Fr struct { - v C.mclBnFr -} - -// getPointer -- -func (x *Fr) getPointer() (p *C.mclBnFr) { - // #nosec - return (*C.mclBnFr)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *Fr) Clear() { - // #nosec - C.mclBnFr_clear(x.getPointer()) -} - -// SetInt64 -- -func (x *Fr) SetInt64(v int64) { - // #nosec - C.mclBnFr_setInt(x.getPointer(), C.int64_t(v)) -} - -// SetString -- -func (x *Fr) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnFr_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnFr_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *Fr) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnFr_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnFr_deserialize %x", buf) - } - return nil -} - -// SetLittleEndian -- -func (x *Fr) SetLittleEndian(buf []byte) error { - // #nosec - err := C.mclBnFr_setLittleEndian(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnFr_setLittleEndian %x", err) - } - return nil -} - -// IsEqual -- -func (x *Fr) IsEqual(rhs *Fr) bool { - return C.mclBnFr_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *Fr) IsZero() bool { - return C.mclBnFr_isZero(x.getPointer()) == 1 -} - -// IsOne -- -func (x *Fr) IsOne() bool { - return C.mclBnFr_isOne(x.getPointer()) == 1 -} - -// SetByCSPRNG -- -func (x *Fr) SetByCSPRNG() { - err := C.mclBnFr_setByCSPRNG(x.getPointer()) - if err != 0 { - panic("err mclBnFr_setByCSPRNG") - } -} - -// SetHashOf -- -func (x *Fr) SetHashOf(buf []byte) bool { - // #nosec - return C.mclBnFr_setHashOf(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) == 0 -} - -// GetString -- -func (x *Fr) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnFr_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnFr_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *Fr) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnFr_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnFr_serialize") - } - return buf[:n] -} - -// FrNeg -- -func FrNeg(out *Fr, x *Fr) { - C.mclBnFr_neg(out.getPointer(), x.getPointer()) -} - -// FrInv -- -func FrInv(out *Fr, x *Fr) { - C.mclBnFr_inv(out.getPointer(), x.getPointer()) -} - -// FrAdd -- -func FrAdd(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrSub -- -func FrSub(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrMul -- -func FrMul(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FrDiv -- -func FrDiv(out *Fr, x *Fr, y *Fr) { - C.mclBnFr_div(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1 -- -type G1 struct { - v C.mclBnG1 -} - -// getPointer -- -func (x *G1) getPointer() (p *C.mclBnG1) { - // #nosec - return (*C.mclBnG1)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *G1) Clear() { - // #nosec - C.mclBnG1_clear(x.getPointer()) -} - -// SetString -- -func (x *G1) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnG1_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnG1_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *G1) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnG1_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnG1_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *G1) IsEqual(rhs *G1) bool { - return C.mclBnG1_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *G1) IsZero() bool { - return C.mclBnG1_isZero(x.getPointer()) == 1 -} - -// HashAndMapTo -- -func (x *G1) HashAndMapTo(buf []byte) error { - // #nosec - err := C.mclBnG1_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnG1_hashAndMapTo %x", err) - } - return nil -} - -// GetString -- -func (x *G1) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG1_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnG1_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *G1) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG1_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnG1_serialize") - } - return buf[:n] -} - -// G1Neg -- -func G1Neg(out *G1, x *G1) { - C.mclBnG1_neg(out.getPointer(), x.getPointer()) -} - -// G1Dbl -- -func G1Dbl(out *G1, x *G1) { - C.mclBnG1_dbl(out.getPointer(), x.getPointer()) -} - -// G1Add -- -func G1Add(out *G1, x *G1, y *G1) { - C.mclBnG1_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1Sub -- -func G1Sub(out *G1, x *G1, y *G1) { - C.mclBnG1_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1Mul -- -func G1Mul(out *G1, x *G1, y *Fr) { - C.mclBnG1_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G1MulCT -- constant time (depending on bit lengh of y) -func G1MulCT(out *G1, x *G1, y *Fr) { - C.mclBnG1_mulCT(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2 -- -type G2 struct { - v C.mclBnG2 -} - -// getPointer -- -func (x *G2) getPointer() (p *C.mclBnG2) { - // #nosec - return (*C.mclBnG2)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *G2) Clear() { - // #nosec - C.mclBnG2_clear(x.getPointer()) -} - -// SetString -- -func (x *G2) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnG2_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnG2_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *G2) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnG2_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnG2_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *G2) IsEqual(rhs *G2) bool { - return C.mclBnG2_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *G2) IsZero() bool { - return C.mclBnG2_isZero(x.getPointer()) == 1 -} - -// HashAndMapTo -- -func (x *G2) HashAndMapTo(buf []byte) error { - // #nosec - err := C.mclBnG2_hashAndMapTo(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err != 0 { - return fmt.Errorf("err mclBnG2_hashAndMapTo %x", err) - } - return nil -} - -// GetString -- -func (x *G2) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG2_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnG2_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *G2) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnG2_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnG2_serialize") - } - return buf[:n] -} - -// G2Neg -- -func G2Neg(out *G2, x *G2) { - C.mclBnG2_neg(out.getPointer(), x.getPointer()) -} - -// G2Dbl -- -func G2Dbl(out *G2, x *G2) { - C.mclBnG2_dbl(out.getPointer(), x.getPointer()) -} - -// G2Add -- -func G2Add(out *G2, x *G2, y *G2) { - C.mclBnG2_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2Sub -- -func G2Sub(out *G2, x *G2, y *G2) { - C.mclBnG2_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// G2Mul -- -func G2Mul(out *G2, x *G2, y *Fr) { - C.mclBnG2_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GT -- -type GT struct { - v C.mclBnGT -} - -// getPointer -- -func (x *GT) getPointer() (p *C.mclBnGT) { - // #nosec - return (*C.mclBnGT)(unsafe.Pointer(x)) -} - -// Clear -- -func (x *GT) Clear() { - // #nosec - C.mclBnGT_clear(x.getPointer()) -} - -// SetInt64 -- -func (x *GT) SetInt64(v int64) { - // #nosec - C.mclBnGT_setInt(x.getPointer(), C.int64_t(v)) -} - -// SetString -- -func (x *GT) SetString(s string, base int) error { - buf := []byte(s) - // #nosec - err := C.mclBnGT_setStr(x.getPointer(), (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), C.int(base)) - if err != 0 { - return fmt.Errorf("err mclBnGT_setStr %x", err) - } - return nil -} - -// Deserialize -- -func (x *GT) Deserialize(buf []byte) error { - // #nosec - err := C.mclBnGT_deserialize(x.getPointer(), unsafe.Pointer(&buf[0]), C.size_t(len(buf))) - if err == 0 { - return fmt.Errorf("err mclBnGT_deserialize %x", buf) - } - return nil -} - -// IsEqual -- -func (x *GT) IsEqual(rhs *GT) bool { - return C.mclBnGT_isEqual(x.getPointer(), rhs.getPointer()) == 1 -} - -// IsZero -- -func (x *GT) IsZero() bool { - return C.mclBnGT_isZero(x.getPointer()) == 1 -} - -// IsOne -- -func (x *GT) IsOne() bool { - return C.mclBnGT_isOne(x.getPointer()) == 1 -} - -// GetString -- -func (x *GT) GetString(base int) string { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnGT_getStr((*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)), x.getPointer(), C.int(base)) - if n == 0 { - panic("err mclBnGT_getStr") - } - return string(buf[:n]) -} - -// Serialize -- -func (x *GT) Serialize() []byte { - buf := make([]byte, 2048) - // #nosec - n := C.mclBnGT_serialize(unsafe.Pointer(&buf[0]), C.size_t(len(buf)), x.getPointer()) - if n == 0 { - panic("err mclBnGT_serialize") - } - return buf[:n] -} - -// GTNeg -- -func GTNeg(out *GT, x *GT) { - C.mclBnGT_neg(out.getPointer(), x.getPointer()) -} - -// GTInv -- -func GTInv(out *GT, x *GT) { - C.mclBnGT_inv(out.getPointer(), x.getPointer()) -} - -// GTAdd -- -func GTAdd(out *GT, x *GT, y *GT) { - C.mclBnGT_add(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTSub -- -func GTSub(out *GT, x *GT, y *GT) { - C.mclBnGT_sub(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTMul -- -func GTMul(out *GT, x *GT, y *GT) { - C.mclBnGT_mul(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTDiv -- -func GTDiv(out *GT, x *GT, y *GT) { - C.mclBnGT_div(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GTPow -- -func GTPow(out *GT, x *GT, y *Fr) { - C.mclBnGT_pow(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// Pairing -- -func Pairing(out *GT, x *G1, y *G2) { - C.mclBn_pairing(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// FinalExp -- -func FinalExp(out *GT, x *GT) { - C.mclBn_finalExp(out.getPointer(), x.getPointer()) -} - -// MillerLoop -- -func MillerLoop(out *GT, x *G1, y *G2) { - C.mclBn_millerLoop(out.getPointer(), x.getPointer(), y.getPointer()) -} - -// GetUint64NumToPrecompute -- -func GetUint64NumToPrecompute() int { - return int(C.mclBn_getUint64NumToPrecompute()) -} - -// PrecomputeG2 -- -func PrecomputeG2(Qbuf []uint64, Q *G2) { - // #nosec - C.mclBn_precomputeG2((*C.uint64_t)(unsafe.Pointer(&Qbuf[0])), Q.getPointer()) -} - -// PrecomputedMillerLoop -- -func PrecomputedMillerLoop(out *GT, P *G1, Qbuf []uint64) { - // #nosec - C.mclBn_precomputedMillerLoop(out.getPointer(), P.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Qbuf[0]))) -} - -// PrecomputedMillerLoop2 -- -func PrecomputedMillerLoop2(out *GT, P1 *G1, Q1buf []uint64, P2 *G1, Q2buf []uint64) { - // #nosec - C.mclBn_precomputedMillerLoop2(out.getPointer(), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0])), P1.getPointer(), (*C.uint64_t)(unsafe.Pointer(&Q1buf[0]))) -} - -// FrEvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func FrEvaluatePolynomial(y *Fr, c []Fr, x *Fr) error { - // #nosec - err := C.mclBn_FrEvaluatePolynomial(y.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_FrEvaluatePolynomial") - } - return nil -} - -// G1EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func G1EvaluatePolynomial(y *G1, c []G1, x *Fr) error { - // #nosec - err := C.mclBn_G1EvaluatePolynomial(y.getPointer(), (*C.mclBnG1)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_G1EvaluatePolynomial") - } - return nil -} - -// G2EvaluatePolynomial -- y = c[0] + c[1] * x + c[2] * x^2 + ... -func G2EvaluatePolynomial(y *G2, c []G2, x *Fr) error { - // #nosec - err := C.mclBn_G2EvaluatePolynomial(y.getPointer(), (*C.mclBnG2)(unsafe.Pointer(&c[0])), (C.size_t)(len(c)), x.getPointer()) - if err != 0 { - return fmt.Errorf("err mclBn_G2EvaluatePolynomial") - } - return nil -} - -// FrLagrangeInterpolation -- -func FrLagrangeInterpolation(out *Fr, xVec []Fr, yVec []Fr) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err FrLagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_FrLagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnFr)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err FrLagrangeInterpolation") - } - return nil -} - -// G1LagrangeInterpolation -- -func G1LagrangeInterpolation(out *G1, xVec []Fr, yVec []G1) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err G1LagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_G1LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG1)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err G1LagrangeInterpolation") - } - return nil -} - -// G2LagrangeInterpolation -- -func G2LagrangeInterpolation(out *G2, xVec []Fr, yVec []G2) error { - if len(xVec) != len(yVec) { - return fmt.Errorf("err G2LagrangeInterpolation:bad size") - } - // #nosec - err := C.mclBn_G2LagrangeInterpolation(out.getPointer(), (*C.mclBnFr)(unsafe.Pointer(&xVec[0])), (*C.mclBnG2)(unsafe.Pointer(&yVec[0])), (C.size_t)(len(xVec))) - if err != 0 { - return fmt.Errorf("err G2LagrangeInterpolation") - } - return nil -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl_test.go b/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl_test.go deleted file mode 100644 index 16bb6910f..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/go/mcl/mcl_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package mcl - -import "testing" -import "fmt" - -func testBadPointOfG2(t *testing.T) { - var Q G2 - // this value is not in G2 so should return an error - err := Q.SetString("1 18d3d8c085a5a5e7553c3a4eb628e88b8465bf4de2612e35a0a4eb018fb0c82e9698896031e62fd7633ffd824a859474 1dc6edfcf33e29575d4791faed8e7203832217423bf7f7fbf1f6b36625b12e7132c15fbc15562ce93362a322fb83dd0d 65836963b1f7b6959030ddfa15ab38ce056097e91dedffd996c1808624fa7e2644a77be606290aa555cda8481cfb3cb 1b77b708d3d4f65aeedf54b58393463a42f0dc5856baadb5ce608036baeca398c5d9e6b169473a8838098fd72fd28b50", 16) - if err == nil { - t.Error(err) - } -} - -func testGT(t *testing.T) { - var x GT - x.Clear() - if !x.IsZero() { - t.Errorf("not zero") - } - x.SetInt64(1) - if !x.IsOne() { - t.Errorf("not one") - } -} - -func testHash(t *testing.T) { - var x Fr - if !x.SetHashOf([]byte("abc")) { - t.Error("SetHashOf") - } - fmt.Printf("x=%s\n", x.GetString(16)) -} - -func testNegAdd(t *testing.T) { - var x Fr - var P1, P2, P3 G1 - var Q1, Q2, Q3 G2 - err := P1.HashAndMapTo([]byte("this")) - if err != nil { - t.Error(err) - } - err = Q1.HashAndMapTo([]byte("this")) - if err != nil { - t.Error(err) - } - fmt.Printf("P1=%s\n", P1.GetString(16)) - fmt.Printf("Q1=%s\n", Q1.GetString(16)) - G1Neg(&P2, &P1) - G2Neg(&Q2, &Q1) - fmt.Printf("P2=%s\n", P2.GetString(16)) - fmt.Printf("Q2=%s\n", Q2.GetString(16)) - - x.SetInt64(-1) - G1Mul(&P3, &P1, &x) - G2Mul(&Q3, &Q1, &x) - if !P2.IsEqual(&P3) { - t.Errorf("P2 != P3 %s\n", P3.GetString(16)) - } - if !Q2.IsEqual(&Q3) { - t.Errorf("Q2 != Q3 %s\n", Q3.GetString(16)) - } - - G1Add(&P2, &P2, &P1) - G2Add(&Q2, &Q2, &Q1) - if !P2.IsZero() { - t.Errorf("P2 is not zero %s\n", P2.GetString(16)) - } - if !Q2.IsZero() { - t.Errorf("Q2 is not zero %s\n", Q2.GetString(16)) - } -} - -func testPairing(t *testing.T) { - var a, b, ab Fr - err := a.SetString("123", 10) - if err != nil { - t.Error(err) - return - } - err = b.SetString("456", 10) - if err != nil { - t.Error(err) - return - } - FrMul(&ab, &a, &b) - var P, aP G1 - var Q, bQ G2 - err = P.HashAndMapTo([]byte("this")) - if err != nil { - t.Error(err) - return - } - fmt.Printf("P=%s\n", P.GetString(16)) - G1Mul(&aP, &P, &a) - fmt.Printf("aP=%s\n", aP.GetString(16)) - err = Q.HashAndMapTo([]byte("that")) - if err != nil { - t.Error(err) - return - } - fmt.Printf("Q=%s\n", Q.GetString(16)) - G2Mul(&bQ, &Q, &b) - fmt.Printf("bQ=%s\n", bQ.GetString(16)) - var e1, e2 GT - Pairing(&e1, &P, &Q) - fmt.Printf("e1=%s\n", e1.GetString(16)) - Pairing(&e2, &aP, &bQ) - fmt.Printf("e2=%s\n", e1.GetString(16)) - GTPow(&e1, &e1, &ab) - fmt.Printf("e1=%s\n", e1.GetString(16)) - if !e1.IsEqual(&e2) { - t.Errorf("not equal pairing\n%s\n%s", e1.GetString(16), e2.GetString(16)) - } - { - s := P.GetString(IoSerializeHexStr) - var P1 G1 - P1.SetString(s, IoSerializeHexStr) - if !P1.IsEqual(&P) { - t.Error("not equal to P") - return - } - s = Q.GetString(IoSerializeHexStr) - var Q1 G2 - Q1.SetString(s, IoSerializeHexStr) - if !Q1.IsEqual(&Q) { - t.Error("not equal to Q") - return - } - } -} - -func testMcl(t *testing.T, c int) { - err := Init(c) - if err != nil { - t.Fatal(err) - } - testHash(t) - testNegAdd(t) - testPairing(t) - testGT(t) - testBadPointOfG2(t) -} - -func TestMclMain(t *testing.T) { - t.Logf("GetMaxOpUnitSize() = %d\n", GetMaxOpUnitSize()) - t.Log("CurveFp254BNb") - testMcl(t, CurveFp254BNb) - if GetMaxOpUnitSize() == 6 { - if GetFrUnitSize() == 6 { - t.Log("CurveFp382_1") - testMcl(t, CurveFp382_1) - } - t.Log("BLS12_381") - testMcl(t, BLS12_381) - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/Bn256Test.java b/vendor/github.com/dexon-foundation/mcl/ffi/java/Bn256Test.java deleted file mode 100644 index b1f9f6f34..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/Bn256Test.java +++ /dev/null @@ -1,104 +0,0 @@ -import java.io.*; -import com.herumi.mcl.*; - -/* - Bn256Test -*/ -public class Bn256Test { - static { - String lib = "mcl_bn256"; - String libName = System.mapLibraryName(lib); - System.out.println("libName : " + libName); - System.loadLibrary(lib); - } - public static void assertEquals(String msg, String x, String y) { - if (x.equals(y)) { - System.out.println("OK : " + msg); - } else { - System.out.println("NG : " + msg + ", x = " + x + ", y = " + y); - } - } - public static void assertBool(String msg, boolean b) { - if (b) { - System.out.println("OK : " + msg); - } else { - System.out.println("NG : " + msg); - } - } - public static void main(String argv[]) { - try { - Bn256.SystemInit(); - Fr x = new Fr(5); - Fr y = new Fr(-2); - Fr z = new Fr(5); - assertBool("x != y", !x.equals(y)); - assertBool("x == z", x.equals(z)); - assertEquals("x == 5", x.toString(), "5"); - Bn256.add(x, x, y); - assertEquals("x == 3", x.toString(), "3"); - Bn256.mul(x, x, x); - assertEquals("x == 9", x.toString(), "9"); - G1 P = new G1(); - System.out.println("P=" + P); - P.set("-1", "1"); - System.out.println("P=" + P); - Bn256.neg(P, P); - System.out.println("P=" + P); - - String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; - String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; - String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; - String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; - - G2 Q = new G2(xa, xb, ya, yb); - - P.hashAndMapToG1("This is a pen"); - { - String s = P.toString(); - G1 P1 = new G1(); - P1.setStr(s); - assertBool("P == P1", P1.equals(P)); - } - - GT e = new GT(); - Bn256.pairing(e, P, Q); - GT e1 = new GT(); - GT e2 = new GT(); - Fr c = new Fr("1234567890123234928348230428394234"); - G2 cQ = new G2(Q); - Bn256.mul(cQ, Q, c); // cQ = Q * c - Bn256.pairing(e1, P, cQ); - Bn256.pow(e2, e, c); // e2 = e^c - assertBool("e1 == e2", e1.equals(e2)); - - G1 cP = new G1(P); - Bn256.mul(cP, P, c); // cP = P * c - Bn256.pairing(e1, cP, Q); - assertBool("e1 == e2", e1.equals(e2)); - - BLSsignature(Q); - } catch (RuntimeException e) { - System.out.println("unknown exception :" + e); - } - } - public static void BLSsignature(G2 Q) - { - Fr s = new Fr(); - s.setRand(); // secret key - System.out.println("secret key " + s); - G2 pub = new G2(); - Bn256.mul(pub, Q, s); // public key = sQ - - String m = "signature test"; - G1 H = new G1(); - H.hashAndMapToG1(m); // H = Hash(m) - G1 sign = new G1(); - Bn256.mul(sign, H, s); // signature of m = s H - - GT e1 = new GT(); - GT e2 = new GT(); - Bn256.pairing(e1, H, pub); // e1 = e(H, s Q) - Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q); - assertBool("verify signature", e1.equals(e2)); - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/ElgamalTest.java b/vendor/github.com/dexon-foundation/mcl/ffi/java/ElgamalTest.java deleted file mode 100644 index 0cf49e144..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/ElgamalTest.java +++ /dev/null @@ -1,144 +0,0 @@ -import java.io.*; -import com.herumi.mcl.*; - -/* - ElgamalTest [ecParam] - ecParam = secp192k1, NIST_P224, ... - hashParam = hash224, hash384, ... -*/ -public class ElgamalTest { - static { - String lib = "mcl_elgamal"; - String libName = System.mapLibraryName(lib); - System.out.println("libName : " + libName); - System.loadLibrary(lib); - } - public static void assertEquals(String msg, int x, int y) { - if (x == y) { - System.out.println("OK : " + msg); - } else { - System.out.println("NG : " + msg + ", x = " + x + ", y = " + y); - } - } - public static void assertBool(String msg, boolean b) { - if (b) { - System.out.println("OK : " + msg); - } else { - System.out.println("NG : " + msg); - } - } - public static void main(String argv[]) { - try { - String ecStr = "secp192k1"; - String hashStr = "sha224"; - for (int i = 0; i < argv.length; i++) { - if (argv[i].equals("-e") && i < argv.length - 1) { - ecStr = argv[i + 1]; - i++; - } else - if (argv[i].equals("-h") && i < argv.length - 1) { - hashStr = argv[i + 1]; - i++; - } - } - String param = ecStr + " " + hashStr; - System.out.println("param=" + param); - Elgamal.SystemInit(param); - - String prvStr = ""; - String pubStr = ""; - { - PrivateKey prv = new PrivateKey(); - prv.init(); - prvStr = prv.toStr(); - PublicKey pub = prv.getPublicKey(); - pubStr = pub.toStr(); - } - int m = 1234; - CipherText c = new CipherText(); - PublicKey pub = new PublicKey(); - - pub.fromStr(pubStr); - - pub.enc(c, m); - - PrivateKey prv = new PrivateKey(); - prv.fromStr(prvStr); - prv.setCache(0, 60000); - - int dec = prv.dec(c); - // verify dec(enc(m)) == m - assertEquals("dec(enc(m)) == m", m, dec); - - // verify toStr, fromStr - { - String cStr = c.toStr(); - CipherText c2 = new CipherText(); - c2.fromStr(cStr); - int dec2 = prv.dec(c2); - assertEquals("fromStr(toStr(CipherText) == CipherText", dec, dec2); - } - - // verify dec(enc(str)) == str - pub.enc(c, "1234"); - dec = prv.dec(c); - assertEquals("dec(enc(str)) == str", m, dec); - - // verify dec(mul(enc(m), 3)) == m * 3 - c.mul(3); - m *= 3; - dec = prv.dec(c); - assertEquals("mul(int)", m, dec); - - // verify dec(mul(enc(m), "10")) == m * 10 - c.mul("10"); - m *= 10; - dec = prv.dec(c); - assertEquals("mul(str)", m, dec); - - // convert str - { - String s = c.toStr(); - CipherText c2 = new CipherText(); - c2.fromStr(s); - dec = prv.dec(c); - assertEquals("fromStr", m, dec); - } - // rerandomize - pub.rerandomize(c); - dec = prv.dec(c); - assertEquals("rerandomize", m, dec); - int m2 = 12345; - // verify dec(add(enc(m), m2)) == m + m2 - pub.add(c, m2); - m += m2; - dec = prv.dec(c); - assertEquals("pub.add(int)", m, dec); - - pub.add(c, "993"); - m += 993; - dec = prv.dec(c); - assertEquals("pub.add(str)", m, dec); - - // string test - String m3 = "-2000000"; - String m4 = "2001234"; - CipherText c2 = new CipherText(); - SWIGTYPE_p_bool b = Elgamal.new_p_bool(); - pub.enc(c, m3); - dec = prv.dec(c, b); - assertBool("expect dec fail", !Elgamal.p_bool_value(b)); - pub.enc(c2, m4); - dec = prv.dec(c2, b); - assertBool("expect dec fail", !Elgamal.p_bool_value(b)); - c.add(c2); // m3 + m4 - - dec = prv.dec(c, b); - assertEquals("int add", 1234, dec); - assertBool("expect dec success", Elgamal.p_bool_value(b)); - Elgamal.delete_p_bool(b); - } catch (RuntimeException e) { - System.out.println("unknown exception :" + e); - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/Makefile b/vendor/github.com/dexon-foundation/mcl/ffi/java/Makefile deleted file mode 100644 index d69c043fb..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -TOP_DIR=../.. -include $(TOP_DIR)/common.mk -ifeq ($(UNAME_S),Darwin) - JAVA_INC=-I/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/ -else - JAVA_INC=-I/usr/lib/jvm/default-java/include -#JAVA_INC=-I/usr/lib/jvm/java-7-openjdk-amd64/include - CFLAGS+=-z noexecstack - LDFLAGS+=-lrt -endif -CFLAGS+=$(JAVA_INC) $(JAVA_INC)/linux -I $(TOP_DIR)/include -I $(TOP_DIR)/../xbyak -I $(TOP_DIR)/../cybozulib/include -Wno-strict-aliasing -MCL_LIB=$(TOP_DIR)/lib/libmcl.a - -PACKAGE_NAME=com.herumi.mcl -PACKAGE_DIR=$(subst .,/,$(PACKAGE_NAME)) - -ELGAMAL_LIB=$(TOP_DIR)/bin/libmcl_elgamal.$(LIB_SUF) -BN256_LIB=$(TOP_DIR)/bin/libmcl_bn256.$(LIB_SUF) -JAVA_EXE=cd $(TOP_DIR)/bin && LD_LIBRARY_PATH=./:$(LD_LIBRARY_PATH) java -classpath ../ffi/java -all: $(ELGAMAL_LIB) - -elgamal_wrap.cxx: elgamal.i elgamal_impl.hpp - $(MKDIR) $(PACKAGE_DIR) - swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall elgamal.i - -bn256_wrap.cxx: bn256.i bn256_impl.hpp - $(MKDIR) $(PACKAGE_DIR) - swig -java -package $(PACKAGE_NAME) -outdir $(PACKAGE_DIR) -c++ -Wall bn256.i - -$(MCL_LIB): - make -C $(TOP_DIR) - -$(ELGAMAL_LIB): elgamal_wrap.cxx $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared - -$(BN256_LIB): bn256_wrap.cxx $(MCL_LIB) - $(PRE)$(CXX) $< -o $@ $(CFLAGS) $(LDFLAGS) $(MCL_LIB) -shared - -%.class: %.java - javac $< - -ElgamalTest.class: ElgamalTest.java $(ELGAMAL_LIB) -Bn256Test.class: Bn256Test.java $(BN256_LIB) - -jar: - jar cvf mcl.jar com - -test_elgamal: ElgamalTest.class $(ELGAMAL_LIB) - $(JAVA_EXE) ElgamalTest - $(JAVA_EXE) ElgamalTest -e NIST_P192 - $(JAVA_EXE) ElgamalTest -e NIST_P256 -h sha256 - $(JAVA_EXE) ElgamalTest -e NIST_P384 -h sha384 - $(JAVA_EXE) ElgamalTest -e NIST_P521 -h sha512 - -test_bn256: Bn256Test.class $(BN256_LIB) - $(JAVA_EXE) Bn256Test - -test: - $(MAKE) test_elgamal - $(MAKE) test_bn256 - -clean: - rm -rf *.class $(ELGAMAL_LIB) $(PACKAGE_DIR)/*.class *_wrap.cxx - diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256.i b/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256.i deleted file mode 100644 index 94a8edb7a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256.i +++ /dev/null @@ -1,31 +0,0 @@ -%module Bn256 - -%include "std_string.i" -%include "std_except.i" - - -%{ -#include -#include -#include -struct Param { - cybozu::RandomGenerator rg; - static inline Param& getParam() - { - static Param p; - return p; - } -}; - -static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m) -{ - std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m); - mcl::bn256::Fp t; - t.setArrayMask(digest.c_str(), digest.size()); - mcl::bn256::BN::param.mapTo.calcG1(P, t); -} - -#include "bn256_impl.hpp" -%} - -%include "bn256_impl.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_impl.hpp b/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_impl.hpp deleted file mode 100644 index c4caaf3ca..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_impl.hpp +++ /dev/null @@ -1,249 +0,0 @@ -#include -#include -#include - -void SystemInit() throw(std::exception) -{ - mcl::bn256::initPairing(); -} - -class G1; -class G2; -class GT; -/* - Fr = Z / rZ -*/ -class Fr { - mcl::bn256::Fr self_; - friend class G1; - friend class G2; - friend class GT; - friend void neg(Fr& y, const Fr& x); - friend void add(Fr& z, const Fr& x, const Fr& y); - friend void sub(Fr& z, const Fr& x, const Fr& y); - friend void mul(Fr& z, const Fr& x, const Fr& y); - friend void mul(G1& z, const G1& x, const Fr& y); - friend void mul(G2& z, const G2& x, const Fr& y); - friend void div(Fr& z, const Fr& x, const Fr& y); - friend void pow(GT& z, const GT& x, const Fr& y); -public: - Fr() {} - Fr(const Fr& rhs) : self_(rhs.self_) {} - Fr(int x) : self_(x) {} - Fr(const std::string& str) throw(std::exception) - : self_(str) {} - bool equals(const Fr& rhs) const { return self_ == rhs.self_; } - void setStr(const std::string& str) throw(std::exception) - { - self_.setStr(str); - } - void setInt(int x) - { - self_ = x; - } - void clear() - { - self_.clear(); - } - void setRand() - { - self_.setRand(Param::getParam().rg); - } - std::string toString() const throw(std::exception) - { - return self_.getStr(); - } -}; - -void neg(Fr& y, const Fr& x) -{ - mcl::bn256::Fr::neg(y.self_, x.self_); -} - -void add(Fr& z, const Fr& x, const Fr& y) -{ - mcl::bn256::Fr::add(z.self_, x.self_, y.self_); -} - -void sub(Fr& z, const Fr& x, const Fr& y) -{ - mcl::bn256::Fr::sub(z.self_, x.self_, y.self_); -} - -void mul(Fr& z, const Fr& x, const Fr& y) -{ - mcl::bn256::Fr::mul(z.self_, x.self_, y.self_); -} - -void div(Fr& z, const Fr& x, const Fr& y) -{ - mcl::bn256::Fr::div(z.self_, x.self_, y.self_); -} - -/* - #G1 = r -*/ -class G1 { - mcl::bn256::G1 self_; - friend void neg(G1& y, const G1& x); - friend void dbl(G1& y, const G1& x); - friend void add(G1& z, const G1& x, const G1& y); - friend void sub(G1& z, const G1& x, const G1& y); - friend void mul(G1& z, const G1& x, const Fr& y); - friend void pairing(GT& e, const G1& P, const G2& Q); -public: - G1() {} - G1(const G1& rhs) : self_(rhs.self_) {} - G1(const std::string& x, const std::string& y) throw(std::exception) - : self_(mcl::bn256::Fp(x), mcl::bn256::Fp(y)) - { - } - bool equals(const G1& rhs) const { return self_ == rhs.self_; } - void set(const std::string& x, const std::string& y) - { - self_.set(mcl::bn256::Fp(x), mcl::bn256::Fp(y)); - } - void hashAndMapToG1(const std::string& m) throw(std::exception) - { - HashAndMapToG1(self_, m); - } - void clear() - { - self_.clear(); - } - /* - compressed format - */ - void setStr(const std::string& str) throw(std::exception) - { - self_.setStr(str); - } - std::string toString() const throw(std::exception) - { - return self_.getStr(); - } -}; - -void neg(G1& y, const G1& x) -{ - mcl::bn256::G1::neg(y.self_, x.self_); -} -void dbl(G1& y, const G1& x) -{ - mcl::bn256::G1::dbl(y.self_, x.self_); -} -void add(G1& z, const G1& x, const G1& y) -{ - mcl::bn256::G1::add(z.self_, x.self_, y.self_); -} -void sub(G1& z, const G1& x, const G1& y) -{ - mcl::bn256::G1::sub(z.self_, x.self_, y.self_); -} -void mul(G1& z, const G1& x, const Fr& y) -{ - mcl::bn256::G1::mul(z.self_, x.self_, y.self_); -} - -/* - #G2 = r -*/ -class G2 { - mcl::bn256::G2 self_; - friend void neg(G2& y, const G2& x); - friend void dbl(G2& y, const G2& x); - friend void add(G2& z, const G2& x, const G2& y); - friend void sub(G2& z, const G2& x, const G2& y); - friend void mul(G2& z, const G2& x, const Fr& y); - friend void pairing(GT& e, const G1& P, const G2& Q); -public: - G2() {} - G2(const G2& rhs) : self_(rhs.self_) {} - G2(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb) throw(std::exception) - : self_(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb)) - { - } - bool equals(const G2& rhs) const { return self_ == rhs.self_; } - void set(const std::string& xa, const std::string& xb, const std::string& ya, const std::string& yb) - { - self_.set(mcl::bn256::Fp2(xa, xb), mcl::bn256::Fp2(ya, yb)); - } - void clear() - { - self_.clear(); - } - /* - compressed format - */ - void setStr(const std::string& str) throw(std::exception) - { - self_.setStr(str); - } - std::string toString() const throw(std::exception) - { - return self_.getStr(); - } -}; - -void neg(G2& y, const G2& x) -{ - mcl::bn256::G2::neg(y.self_, x.self_); -} -void dbl(G2& y, const G2& x) -{ - mcl::bn256::G2::dbl(y.self_, x.self_); -} -void add(G2& z, const G2& x, const G2& y) -{ - mcl::bn256::G2::add(z.self_, x.self_, y.self_); -} -void sub(G2& z, const G2& x, const G2& y) -{ - mcl::bn256::G2::sub(z.self_, x.self_, y.self_); -} -void mul(G2& z, const G2& x, const Fr& y) -{ - mcl::bn256::G2::mul(z.self_, x.self_, y.self_); -} - -/* - #GT = r -*/ -class GT { - mcl::bn256::Fp12 self_; - friend void mul(GT& z, const GT& x, const GT& y); - friend void pow(GT& z, const GT& x, const Fr& y); - friend void pairing(GT& e, const G1& P, const G2& Q); -public: - GT() {} - GT(const GT& rhs) : self_(rhs.self_) {} - bool equals(const GT& rhs) const { return self_ == rhs.self_; } - void clear() - { - self_.clear(); - } - void setStr(const std::string& str) throw(std::exception) - { - std::istringstream iss(str); - iss >> self_; - } - std::string toString() const throw(std::exception) - { - std::ostringstream oss; - oss << self_; - return oss.str(); - } -}; - -void mul(GT& z, const GT& x, const GT& y) -{ - mcl::bn256::Fp12::mul(z.self_, x.self_, y.self_); -} -void pow(GT& z, const GT& x, const Fr& y) -{ - mcl::bn256::Fp12::pow(z.self_, x.self_, y.self_); -} -void pairing(GT& e, const G1& P, const G2& Q) -{ - mcl::bn256::pairing(e.self_, P.self_, Q.self_); -} diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_wrap.cxx b/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_wrap.cxx deleted file mode 100644 index 0c8257af5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/bn256_wrap.cxx +++ /dev/null @@ -1,1542 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 3.0.12 - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - - -#ifndef SWIGJAVA -#define SWIGJAVA -#endif - - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if defined(__GNUC__) -# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - -/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ -#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) -# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 -#endif - -/* Intel's compiler complains if a variable which was never initialised is - * cast to void, which is a common idiom which we use to indicate that we - * are aware a variable isn't used. So we just silence that warning. - * See: https://github.com/swig/swig/issues/192 for more discussion. - */ -#ifdef __INTEL_COMPILER -# pragma warning disable 592 -#endif - - -/* Fix for jlong on some versions of gcc on Windows */ -#if defined(__GNUC__) && !defined(__INTEL_COMPILER) - typedef long long __int64; -#endif - -/* Fix for jlong on 64-bit x86 Solaris */ -#if defined(__x86_64) -# ifdef _LP64 -# undef _LP64 -# endif -#endif - -#include -#include -#include - - -/* Support for throwing Java exceptions */ -typedef enum { - SWIG_JavaOutOfMemoryError = 1, - SWIG_JavaIOException, - SWIG_JavaRuntimeException, - SWIG_JavaIndexOutOfBoundsException, - SWIG_JavaArithmeticException, - SWIG_JavaIllegalArgumentException, - SWIG_JavaNullPointerException, - SWIG_JavaDirectorPureVirtual, - SWIG_JavaUnknownError -} SWIG_JavaExceptionCodes; - -typedef struct { - SWIG_JavaExceptionCodes code; - const char *java_exception; -} SWIG_JavaExceptions_t; - - -static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { - jclass excep; - static const SWIG_JavaExceptions_t java_exceptions[] = { - { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, - { SWIG_JavaIOException, "java/io/IOException" }, - { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, - { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, - { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, - { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, - { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, - { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, - { SWIG_JavaUnknownError, "java/lang/UnknownError" }, - { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } - }; - const SWIG_JavaExceptions_t *except_ptr = java_exceptions; - - while (except_ptr->code != code && except_ptr->code) - except_ptr++; - - jenv->ExceptionClear(); - excep = jenv->FindClass(except_ptr->java_exception); - if (excep) - jenv->ThrowNew(excep, msg); -} - - -/* Contract support */ - -#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else - - -#include - - -#include -#include - - -#include -#include -#include -struct Param { - cybozu::RandomGenerator rg; - static inline Param& getParam() - { - static Param p; - return p; - } -}; - -static void HashAndMapToG1(mcl::bn256::G1& P, const std::string& m) -{ - std::string digest = cybozu::crypto::Hash::digest(cybozu::crypto::Hash::N_SHA256, m); - mcl::bn256::Fp t; - t.setArrayMask(digest.c_str(), digest.size()); - mcl::bn256::BN::param.mapTo.calcG1(P, t); -} - -#include "bn256_impl.hpp" - - -#ifdef __cplusplus -extern "C" { -#endif - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_SystemInit(JNIEnv *jenv, jclass jcls) { - (void)jenv; - (void)jcls; - try { - SystemInit(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - Fr *arg1 = 0 ; - Fr *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); - return ; - } - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - neg(*arg1,(Fr const &)*arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - Fr *arg1 = 0 ; - Fr *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); - return ; - } - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - add(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - Fr *arg1 = 0 ; - Fr *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); - return ; - } - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - sub(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - Fr *arg1 = 0 ; - Fr *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); - return ; - } - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - mul(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G1 *arg1 = 0 ; - G1 *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - mul(*arg1,(G1 const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G2 *arg1 = 0 ; - G2 *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); - return ; - } - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - mul(*arg1,(G2 const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_div(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - Fr *arg1 = 0 ; - Fr *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr & reference is null"); - return ; - } - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - div(*arg1,(Fr const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pow(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - GT *arg1 = 0 ; - GT *arg2 = 0 ; - Fr *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(GT **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); - return ; - } - arg2 = *(GT **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); - return ; - } - arg3 = *(Fr **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return ; - } - pow(*arg1,(GT const &)*arg2,(Fr const &)*arg3); -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - Fr *result = 0 ; - - (void)jenv; - (void)jcls; - result = (Fr *)new Fr(); - *(Fr **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jlong jresult = 0 ; - Fr *arg1 = 0 ; - Fr *result = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return 0; - } - result = (Fr *)new Fr((Fr const &)*arg1); - *(Fr **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jint jarg1) { - jlong jresult = 0 ; - int arg1 ; - Fr *result = 0 ; - - (void)jenv; - (void)jcls; - arg1 = (int)jarg1; - result = (Fr *)new Fr(arg1); - *(Fr **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1Fr_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jstring jarg1) { - jlong jresult = 0 ; - std::string *arg1 = 0 ; - Fr *result = 0 ; - - (void)jenv; - (void)jcls; - if(!jarg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); - if (!arg1_pstr) return 0; - std::string arg1_str(arg1_pstr); - arg1 = &arg1_str; - jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); - try { - result = (Fr *)new Fr((std::string const &)*arg1); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - *(Fr **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - jboolean jresult = 0 ; - Fr *arg1 = (Fr *) 0 ; - Fr *arg2 = 0 ; - bool result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(Fr **)&jarg1; - arg2 = *(Fr **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Fr const & reference is null"); - return 0; - } - result = (bool)((Fr const *)arg1)->equals((Fr const &)*arg2); - jresult = (jboolean)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - Fr *arg1 = (Fr *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->setStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setInt(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { - Fr *arg1 = (Fr *) 0 ; - int arg2 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - arg2 = (int)jarg2; - (arg1)->setInt(arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - Fr *arg1 = (Fr *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - (arg1)->clear(); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1setRand(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - Fr *arg1 = (Fr *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - (arg1)->setRand(); -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_Fr_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - Fr *arg1 = (Fr *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(Fr **)&jarg1; - try { - result = ((Fr const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1Fr(JNIEnv *jenv, jclass jcls, jlong jarg1) { - Fr *arg1 = (Fr *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(Fr **)&jarg1; - delete arg1; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - G1 *arg1 = 0 ; - G1 *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - neg(*arg1,(G1 const &)*arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - G1 *arg1 = 0 ; - G1 *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - dbl(*arg1,(G1 const &)*arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G1 *arg1 = 0 ; - G1 *arg2 = 0 ; - G1 *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - arg3 = *(G1 **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - add(*arg1,(G1 const &)*arg2,(G1 const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G1 *arg1 = 0 ; - G1 *arg2 = 0 ; - G1 *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - arg3 = *(G1 **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - sub(*arg1,(G1 const &)*arg2,(G1 const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_pairing(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - GT *arg1 = 0 ; - G1 *arg2 = 0 ; - G2 *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(GT **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); - return ; - } - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return ; - } - arg3 = *(G2 **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - pairing(*arg1,(G1 const &)*arg2,(G2 const &)*arg3); -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - G1 *result = 0 ; - - (void)jenv; - (void)jcls; - result = (G1 *)new G1(); - *(G1 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jlong jresult = 0 ; - G1 *arg1 = 0 ; - G1 *result = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return 0; - } - result = (G1 *)new G1((G1 const &)*arg1); - *(G1 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G1_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2) { - jlong jresult = 0 ; - std::string *arg1 = 0 ; - std::string *arg2 = 0 ; - G1 *result = 0 ; - - (void)jenv; - (void)jcls; - if(!jarg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); - if (!arg1_pstr) return 0; - std::string arg1_str(arg1_pstr); - arg1 = &arg1_str; - jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return 0; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - result = (G1 *)new G1((std::string const &)*arg1,(std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - *(G1 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - jboolean jresult = 0 ; - G1 *arg1 = (G1 *) 0 ; - G1 *arg2 = 0 ; - bool result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G1 **)&jarg1; - arg2 = *(G1 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G1 const & reference is null"); - return 0; - } - result = (bool)((G1 const *)arg1)->equals((G1 const &)*arg2); - jresult = (jboolean)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3) { - G1 *arg1 = (G1 *) 0 ; - std::string *arg2 = 0 ; - std::string *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - if(!jarg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); - if (!arg3_pstr) return ; - std::string arg3_str(arg3_pstr); - arg3 = &arg3_str; - jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); - (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1hashAndMapToG1(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - G1 *arg1 = (G1 *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->hashAndMapToG1((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - G1 *arg1 = (G1 *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - (arg1)->clear(); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - G1 *arg1 = (G1 *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->setStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G1_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - G1 *arg1 = (G1 *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G1 **)&jarg1; - try { - result = ((G1 const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G1(JNIEnv *jenv, jclass jcls, jlong jarg1) { - G1 *arg1 = (G1 *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(G1 **)&jarg1; - delete arg1; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_neg_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - G2 *arg1 = 0 ; - G2 *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); - return ; - } - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - neg(*arg1,(G2 const &)*arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_dbl_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - G2 *arg1 = 0 ; - G2 *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); - return ; - } - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - dbl(*arg1,(G2 const &)*arg2); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_add_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G2 *arg1 = 0 ; - G2 *arg2 = 0 ; - G2 *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); - return ; - } - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - arg3 = *(G2 **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - add(*arg1,(G2 const &)*arg2,(G2 const &)*arg3); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_sub_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - G2 *arg1 = 0 ; - G2 *arg2 = 0 ; - G2 *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 & reference is null"); - return ; - } - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - arg3 = *(G2 **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return ; - } - sub(*arg1,(G2 const &)*arg2,(G2 const &)*arg3); -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - G2 *result = 0 ; - - (void)jenv; - (void)jcls; - result = (G2 *)new G2(); - *(G2 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jlong jresult = 0 ; - G2 *arg1 = 0 ; - G2 *result = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G2 **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return 0; - } - result = (G2 *)new G2((G2 const &)*arg1); - *(G2 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1G2_1_1SWIG_12(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2, jstring jarg3, jstring jarg4) { - jlong jresult = 0 ; - std::string *arg1 = 0 ; - std::string *arg2 = 0 ; - std::string *arg3 = 0 ; - std::string *arg4 = 0 ; - G2 *result = 0 ; - - (void)jenv; - (void)jcls; - if(!jarg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); - if (!arg1_pstr) return 0; - std::string arg1_str(arg1_pstr); - arg1 = &arg1_str; - jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return 0; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - if(!jarg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); - if (!arg3_pstr) return 0; - std::string arg3_str(arg3_pstr); - arg3 = &arg3_str; - jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); - if(!jarg4) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return 0; - } - const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0); - if (!arg4_pstr) return 0; - std::string arg4_str(arg4_pstr); - arg4 = &arg4_str; - jenv->ReleaseStringUTFChars(jarg4, arg4_pstr); - try { - result = (G2 *)new G2((std::string const &)*arg1,(std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - *(G2 **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - jboolean jresult = 0 ; - G2 *arg1 = (G2 *) 0 ; - G2 *arg2 = 0 ; - bool result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(G2 **)&jarg1; - arg2 = *(G2 **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "G2 const & reference is null"); - return 0; - } - result = (bool)((G2 const *)arg1)->equals((G2 const &)*arg2); - jresult = (jboolean)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jstring jarg3, jstring jarg4, jstring jarg5) { - G2 *arg1 = (G2 *) 0 ; - std::string *arg2 = 0 ; - std::string *arg3 = 0 ; - std::string *arg4 = 0 ; - std::string *arg5 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G2 **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - if(!jarg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); - if (!arg3_pstr) return ; - std::string arg3_str(arg3_pstr); - arg3 = &arg3_str; - jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); - if(!jarg4) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0); - if (!arg4_pstr) return ; - std::string arg4_str(arg4_pstr); - arg4 = &arg4_str; - jenv->ReleaseStringUTFChars(jarg4, arg4_pstr); - if(!jarg5) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg5_pstr = (const char *)jenv->GetStringUTFChars(jarg5, 0); - if (!arg5_pstr) return ; - std::string arg5_str(arg5_pstr); - arg5 = &arg5_str; - jenv->ReleaseStringUTFChars(jarg5, arg5_pstr); - (arg1)->set((std::string const &)*arg2,(std::string const &)*arg3,(std::string const &)*arg4,(std::string const &)*arg5); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - G2 *arg1 = (G2 *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G2 **)&jarg1; - (arg1)->clear(); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - G2 *arg1 = (G2 *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G2 **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->setStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_G2_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - G2 *arg1 = (G2 *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(G2 **)&jarg1; - try { - result = ((G2 const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1G2(JNIEnv *jenv, jclass jcls, jlong jarg1) { - G2 *arg1 = (G2 *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(G2 **)&jarg1; - delete arg1; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_mul_1_1SWIG_13(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_) { - GT *arg1 = 0 ; - GT *arg2 = 0 ; - GT *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - (void)jarg3_; - arg1 = *(GT **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT & reference is null"); - return ; - } - arg2 = *(GT **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); - return ; - } - arg3 = *(GT **)&jarg3; - if (!arg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); - return ; - } - mul(*arg1,(GT const &)*arg2,(GT const &)*arg3); -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - GT *result = 0 ; - - (void)jenv; - (void)jcls; - result = (GT *)new GT(); - *(GT **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_Bn256JNI_new_1GT_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jlong jresult = 0 ; - GT *arg1 = 0 ; - GT *result = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(GT **)&jarg1; - if (!arg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); - return 0; - } - result = (GT *)new GT((GT const &)*arg1); - *(GT **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1equals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - jboolean jresult = 0 ; - GT *arg1 = (GT *) 0 ; - GT *arg2 = 0 ; - bool result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(GT **)&jarg1; - arg2 = *(GT **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "GT const & reference is null"); - return 0; - } - result = (bool)((GT const *)arg1)->equals((GT const &)*arg2); - jresult = (jboolean)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - GT *arg1 = (GT *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(GT **)&jarg1; - (arg1)->clear(); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1setStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - GT *arg1 = (GT *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(GT **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->setStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_Bn256JNI_GT_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - GT *arg1 = (GT *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(GT **)&jarg1; - try { - result = ((GT const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_Bn256JNI_delete_1GT(JNIEnv *jenv, jclass jcls, jlong jarg1) { - GT *arg1 = (GT *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(GT **)&jarg1; - delete arg1; -} - - -#ifdef __cplusplus -} -#endif - diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal.i b/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal.i deleted file mode 100644 index 410723174..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal.i +++ /dev/null @@ -1,28 +0,0 @@ -%module Elgamal - -%include "std_string.i" -%include "std_except.i" - - -%{ -#include -#include -#include -#include -struct Param { -const mcl::EcParam *ecParam; -cybozu::RandomGenerator rg; -cybozu::crypto::Hash::Name hashName; -static inline Param& getParam() -{ - static Param p; - return p; -} -}; - -#include "elgamal_impl.hpp" -%} -%include cpointer.i -%pointer_functions(bool, p_bool); - -%include "elgamal_impl.hpp" diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_impl.hpp b/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_impl.hpp deleted file mode 100644 index dbf2ba64e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_impl.hpp +++ /dev/null @@ -1,147 +0,0 @@ -#pragma once -//#define MCL_MAX_BIT_SIZE 521 -#include -#include -#include -#include -#include -#include -#include - -typedef mcl::FpT Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; -typedef mcl::ElgamalT Elgamal; - -/* - init system - @param param [in] string such as "ecParamName hashName" - @note NOT thread safe because setting global parameters of elliptic curve - ex1) "secp192k1 sha256" // 192bit security + sha256 - ex2) "secp160k1 sha1" // 160bit security + sha1 - hashName : sha1 sha224 sha256 sha384 sha512 -*/ -void SystemInit(const std::string& param) throw(std::exception) -{ - std::istringstream iss(param); - std::string ecParamStr; - std::string hashNameStr; - if (iss >> ecParamStr >> hashNameStr) { - Param& p = Param::getParam(); - p.ecParam = mcl::getEcParam(ecParamStr); - Zn::init(p.ecParam->n); - Fp::init(p.ecParam->p); - Ec::init(p.ecParam->a, p.ecParam->b); - p.hashName = cybozu::crypto::Hash::getName(hashNameStr); - return; - } - throw cybozu::Exception("SystemInit:bad param") << param; -} - -class CipherText { - Elgamal::CipherText self_; - friend class PublicKey; - friend class PrivateKey; -public: - std::string toStr() const throw(std::exception) { return self_.toStr(); } - std::string toString() const throw(std::exception) { return toStr(); } - void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } - - void add(const CipherText& c) throw(std::exception) { self_.add(c.self_); } - void mul(int m) throw(std::exception) - { - self_.mul(m); - } - void mul(const std::string& str) throw(std::exception) - { - Zn zn(str); - self_.mul(zn); - } -}; - -class PublicKey { - Elgamal::PublicKey self_; - friend class PrivateKey; -public: - std::string toStr() const throw(std::exception) { return self_.toStr(); } - std::string toString() const throw(std::exception) { return toStr(); } - void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } - - void save(const std::string& fileName) const throw(std::exception) - { - std::ofstream ofs(fileName.c_str(), std::ios::binary); - if (!(ofs << self_)) throw cybozu::Exception("PublicKey:save") << fileName; - } - void load(const std::string& fileName) throw(std::exception) - { - std::ifstream ifs(fileName.c_str(), std::ios::binary); - if (!(ifs >> self_)) throw cybozu::Exception("PublicKey:load") << fileName; - } - void enc(CipherText& c, int m) const throw(std::exception) - { - self_.enc(c.self_, m, Param::getParam().rg); - } - void enc(CipherText& c, const std::string& str) const throw(std::exception) - { - Zn zn(str); - self_.enc(c.self_, zn, Param::getParam().rg); - } - void rerandomize(CipherText& c) const throw(std::exception) - { - self_.rerandomize(c.self_, Param::getParam().rg); - } - void add(CipherText& c, int m) const throw(std::exception) - { - self_.add(c.self_, m); - } - void add(CipherText& c, const std::string& str) const throw(std::exception) - { - Zn zn(str); - self_.add(c.self_, zn); - } -}; - -class PrivateKey { - Elgamal::PrivateKey self_; -public: - std::string toStr() const throw(std::exception) { return self_.toStr(); } - std::string toString() const throw(std::exception) { return toStr(); } - void fromStr(const std::string& str) throw(std::exception) { self_.fromStr(str); } - - void save(const std::string& fileName) const throw(std::exception) - { - std::ofstream ofs(fileName.c_str(), std::ios::binary); - if (!(ofs << self_)) throw cybozu::Exception("PrivateKey:save") << fileName; - } - void load(const std::string& fileName) throw(std::exception) - { - std::ifstream ifs(fileName.c_str(), std::ios::binary); - if (!(ifs >> self_)) throw cybozu::Exception("PrivateKey:load") << fileName; - } - void init() throw(std::exception) - { - Param& p = Param::getParam(); - const Fp x0(p.ecParam->gx); - const Fp y0(p.ecParam->gy); - Ec P(x0, y0); - self_.init(P, Zn::getBitSize(), p.rg); - } - PublicKey getPublicKey() const throw(std::exception) - { - PublicKey ret; - ret.self_ = self_.getPublicKey(); - return ret; - } - int dec(const CipherText& c, bool *b = 0) const throw(std::exception) - { - return self_.dec(c.self_, b); - } - void setCache(int rangeMin, int rangeMax) throw(std::exception) - { - self_.setCache(rangeMin, rangeMax); - } - void clearCache() throw(std::exception) - { - self_.clearCache(); - } -}; diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_wrap.cxx b/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_wrap.cxx deleted file mode 100644 index 38d05f489..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/elgamal_wrap.cxx +++ /dev/null @@ -1,1129 +0,0 @@ -/* ---------------------------------------------------------------------------- - * This file was automatically generated by SWIG (http://www.swig.org). - * Version 3.0.12 - * - * This file is not intended to be easily readable and contains a number of - * coding conventions designed to improve portability and efficiency. Do not make - * changes to this file unless you know what you are doing--modify the SWIG - * interface file instead. - * ----------------------------------------------------------------------------- */ - - -#ifndef SWIGJAVA -#define SWIGJAVA -#endif - - - -#ifdef __cplusplus -/* SwigValueWrapper is described in swig.swg */ -template class SwigValueWrapper { - struct SwigMovePointer { - T *ptr; - SwigMovePointer(T *p) : ptr(p) { } - ~SwigMovePointer() { delete ptr; } - SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } - } pointer; - SwigValueWrapper& operator=(const SwigValueWrapper& rhs); - SwigValueWrapper(const SwigValueWrapper& rhs); -public: - SwigValueWrapper() : pointer(0) { } - SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } - operator T&() const { return *pointer.ptr; } - T *operator&() { return pointer.ptr; } -}; - -template T SwigValueInit() { - return T(); -} -#endif - -/* ----------------------------------------------------------------------------- - * This section contains generic SWIG labels for method/variable - * declarations/attributes, and other compiler dependent labels. - * ----------------------------------------------------------------------------- */ - -/* template workaround for compilers that cannot correctly implement the C++ standard */ -#ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template -# else -# define SWIGTEMPLATEDISAMBIGUATOR -# endif -#endif - -/* inline attribute */ -#ifndef SWIGINLINE -# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) -# define SWIGINLINE inline -# else -# define SWIGINLINE -# endif -#endif - -/* attribute recognised by some compilers to avoid 'unused' warnings */ -#ifndef SWIGUNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -# elif defined(__ICC) -# define SWIGUNUSED __attribute__ ((__unused__)) -# else -# define SWIGUNUSED -# endif -#endif - -#ifndef SWIG_MSC_UNSUPPRESS_4505 -# if defined(_MSC_VER) -# pragma warning(disable : 4505) /* unreferenced local function has been removed */ -# endif -#endif - -#ifndef SWIGUNUSEDPARM -# ifdef __cplusplus -# define SWIGUNUSEDPARM(p) -# else -# define SWIGUNUSEDPARM(p) p SWIGUNUSED -# endif -#endif - -/* internal SWIG method */ -#ifndef SWIGINTERN -# define SWIGINTERN static SWIGUNUSED -#endif - -/* internal inline SWIG method */ -#ifndef SWIGINTERNINLINE -# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE -#endif - -/* exporting methods */ -#if defined(__GNUC__) -# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -# ifndef GCC_HASCLASSVISIBILITY -# define GCC_HASCLASSVISIBILITY -# endif -# endif -#endif - -#ifndef SWIGEXPORT -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# if defined(STATIC_LINKED) -# define SWIGEXPORT -# else -# define SWIGEXPORT __declspec(dllexport) -# endif -# else -# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) -# define SWIGEXPORT __attribute__ ((visibility("default"))) -# else -# define SWIGEXPORT -# endif -# endif -#endif - -/* calling conventions for Windows */ -#ifndef SWIGSTDCALL -# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) -# define SWIGSTDCALL __stdcall -# else -# define SWIGSTDCALL -# endif -#endif - -/* Deal with Microsoft's attempt at deprecating C standard runtime functions */ -#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -#endif - -/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ -#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) -# define _SCL_SECURE_NO_DEPRECATE -#endif - -/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ -#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) -# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 -#endif - -/* Intel's compiler complains if a variable which was never initialised is - * cast to void, which is a common idiom which we use to indicate that we - * are aware a variable isn't used. So we just silence that warning. - * See: https://github.com/swig/swig/issues/192 for more discussion. - */ -#ifdef __INTEL_COMPILER -# pragma warning disable 592 -#endif - - -/* Fix for jlong on some versions of gcc on Windows */ -#if defined(__GNUC__) && !defined(__INTEL_COMPILER) - typedef long long __int64; -#endif - -/* Fix for jlong on 64-bit x86 Solaris */ -#if defined(__x86_64) -# ifdef _LP64 -# undef _LP64 -# endif -#endif - -#include -#include -#include - - -/* Support for throwing Java exceptions */ -typedef enum { - SWIG_JavaOutOfMemoryError = 1, - SWIG_JavaIOException, - SWIG_JavaRuntimeException, - SWIG_JavaIndexOutOfBoundsException, - SWIG_JavaArithmeticException, - SWIG_JavaIllegalArgumentException, - SWIG_JavaNullPointerException, - SWIG_JavaDirectorPureVirtual, - SWIG_JavaUnknownError -} SWIG_JavaExceptionCodes; - -typedef struct { - SWIG_JavaExceptionCodes code; - const char *java_exception; -} SWIG_JavaExceptions_t; - - -static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { - jclass excep; - static const SWIG_JavaExceptions_t java_exceptions[] = { - { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, - { SWIG_JavaIOException, "java/io/IOException" }, - { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, - { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, - { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, - { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, - { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, - { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, - { SWIG_JavaUnknownError, "java/lang/UnknownError" }, - { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } - }; - const SWIG_JavaExceptions_t *except_ptr = java_exceptions; - - while (except_ptr->code != code && except_ptr->code) - except_ptr++; - - jenv->ExceptionClear(); - excep = jenv->FindClass(except_ptr->java_exception); - if (excep) - jenv->ThrowNew(excep, msg); -} - - -/* Contract support */ - -#define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else - - -#include - - -#include -#include - - -#include -#include -#include -#include -struct Param { -const mcl::EcParam *ecParam; -cybozu::RandomGenerator rg; -cybozu::crypto::Hash::Name hashName; -static inline Param& getParam() -{ - static Param p; - return p; -} -}; - -#include "elgamal_impl.hpp" - - -static bool *new_p_bool() { - return new bool(); -} - -static bool *copy_p_bool(bool value) { - return new bool(value); -} - -static void delete_p_bool(bool *obj) { - if (obj) delete obj; -} - -static void p_bool_assign(bool *obj, bool value) { - *obj = value; -} - -static bool p_bool_value(bool *obj) { - return *obj; -} - - -#ifdef __cplusplus -extern "C" { -#endif - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1p_1bool(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - bool *result = 0 ; - - (void)jenv; - (void)jcls; - result = (bool *)new_p_bool(); - *(bool **)&jresult = result; - return jresult; -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_copy_1p_1bool(JNIEnv *jenv, jclass jcls, jboolean jarg1) { - jlong jresult = 0 ; - bool arg1 ; - bool *result = 0 ; - - (void)jenv; - (void)jcls; - arg1 = jarg1 ? true : false; - result = (bool *)copy_p_bool(arg1); - *(bool **)&jresult = result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1p_1bool(JNIEnv *jenv, jclass jcls, jlong jarg1) { - bool *arg1 = (bool *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(bool **)&jarg1; - delete_p_bool(arg1); -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1assign(JNIEnv *jenv, jclass jcls, jlong jarg1, jboolean jarg2) { - bool *arg1 = (bool *) 0 ; - bool arg2 ; - - (void)jenv; - (void)jcls; - arg1 = *(bool **)&jarg1; - arg2 = jarg2 ? true : false; - p_bool_assign(arg1,arg2); -} - - -SWIGEXPORT jboolean JNICALL Java_com_herumi_mcl_ElgamalJNI_p_1bool_1value(JNIEnv *jenv, jclass jcls, jlong jarg1) { - jboolean jresult = 0 ; - bool *arg1 = (bool *) 0 ; - bool result; - - (void)jenv; - (void)jcls; - arg1 = *(bool **)&jarg1; - result = (bool)p_bool_value(arg1); - jresult = (jboolean)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_SystemInit(JNIEnv *jenv, jclass jcls, jstring jarg1) { - std::string *arg1 = 0 ; - - (void)jenv; - (void)jcls; - if(!jarg1) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); - if (!arg1_pstr) return ; - std::string arg1_str(arg1_pstr); - arg1 = &arg1_str; - jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); - try { - SystemInit((std::string const &)*arg1); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - CipherText *arg1 = (CipherText *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(CipherText **)&jarg1; - try { - result = ((CipherText const *)arg1)->toStr(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - CipherText *arg1 = (CipherText *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(CipherText **)&jarg1; - try { - result = ((CipherText const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - CipherText *arg1 = (CipherText *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(CipherText **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->fromStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1add(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - CipherText *arg1 = (CipherText *) 0 ; - CipherText *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(CipherText **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); - return ; - } - try { - (arg1)->add((CipherText const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { - CipherText *arg1 = (CipherText *) 0 ; - int arg2 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(CipherText **)&jarg1; - arg2 = (int)jarg2; - try { - (arg1)->mul(arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_CipherText_1mul_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - CipherText *arg1 = (CipherText *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(CipherText **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->mul((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1CipherText(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - CipherText *result = 0 ; - - (void)jenv; - (void)jcls; - result = (CipherText *)new CipherText(); - *(CipherText **)&jresult = result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1CipherText(JNIEnv *jenv, jclass jcls, jlong jarg1) { - CipherText *arg1 = (CipherText *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(CipherText **)&jarg1; - delete arg1; -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - PublicKey *arg1 = (PublicKey *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PublicKey **)&jarg1; - try { - result = ((PublicKey const *)arg1)->toStr(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - PublicKey *arg1 = (PublicKey *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PublicKey **)&jarg1; - try { - result = ((PublicKey const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PublicKey *arg1 = (PublicKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PublicKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->fromStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PublicKey *arg1 = (PublicKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PublicKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - ((PublicKey const *)arg1)->save((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PublicKey *arg1 = (PublicKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PublicKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->load((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) { - PublicKey *arg1 = (PublicKey *) 0 ; - CipherText *arg2 = 0 ; - int arg3 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PublicKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); - return ; - } - arg3 = (int)jarg3; - try { - ((PublicKey const *)arg1)->enc(*arg2,arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1enc_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) { - PublicKey *arg1 = (PublicKey *) 0 ; - CipherText *arg2 = 0 ; - std::string *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PublicKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); - return ; - } - if(!jarg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); - if (!arg3_pstr) return ; - std::string arg3_str(arg3_pstr); - arg3 = &arg3_str; - jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); - try { - ((PublicKey const *)arg1)->enc(*arg2,(std::string const &)*arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1rerandomize(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - PublicKey *arg1 = (PublicKey *) 0 ; - CipherText *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PublicKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); - return ; - } - try { - ((PublicKey const *)arg1)->rerandomize(*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jint jarg3) { - PublicKey *arg1 = (PublicKey *) 0 ; - CipherText *arg2 = 0 ; - int arg3 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PublicKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); - return ; - } - arg3 = (int)jarg3; - try { - ((PublicKey const *)arg1)->add(*arg2,arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PublicKey_1add_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jstring jarg3) { - PublicKey *arg1 = (PublicKey *) 0 ; - CipherText *arg2 = 0 ; - std::string *arg3 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PublicKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText & reference is null"); - return ; - } - if(!jarg3) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); - if (!arg3_pstr) return ; - std::string arg3_str(arg3_pstr); - arg3 = &arg3_str; - jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); - try { - ((PublicKey const *)arg1)->add(*arg2,(std::string const &)*arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PublicKey(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - PublicKey *result = 0 ; - - (void)jenv; - (void)jcls; - result = (PublicKey *)new PublicKey(); - *(PublicKey **)&jresult = result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1) { - PublicKey *arg1 = (PublicKey *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(PublicKey **)&jarg1; - delete arg1; -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - PrivateKey *arg1 = (PrivateKey *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - try { - result = ((PrivateKey const *)arg1)->toStr(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT jstring JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1toString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jstring jresult = 0 ; - PrivateKey *arg1 = (PrivateKey *) 0 ; - std::string result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - try { - result = ((PrivateKey const *)arg1)->toString(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = jenv->NewStringUTF((&result)->c_str()); - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1fromStr(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->fromStr((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1save(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - ((PrivateKey const *)arg1)->save((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1load(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - std::string *arg2 = 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - if(!jarg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string"); - return ; - } - const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); - if (!arg2_pstr) return ; - std::string arg2_str(arg2_pstr); - arg2 = &arg2_str; - jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); - try { - (arg1)->load((std::string const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1init(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - try { - (arg1)->init(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1getPublicKey(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - jlong jresult = 0 ; - PrivateKey *arg1 = (PrivateKey *) 0 ; - PublicKey result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - try { - result = ((PrivateKey const *)arg1)->getPublicKey(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - *(PublicKey **)&jresult = new PublicKey((const PublicKey &)result); - return jresult; -} - - -SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3) { - jint jresult = 0 ; - PrivateKey *arg1 = (PrivateKey *) 0 ; - CipherText *arg2 = 0 ; - bool *arg3 = (bool *) 0 ; - int result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PrivateKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); - return 0; - } - arg3 = *(bool **)&jarg3; - try { - result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2,arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = (jint)result; - return jresult; -} - - -SWIGEXPORT jint JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1dec_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { - jint jresult = 0 ; - PrivateKey *arg1 = (PrivateKey *) 0 ; - CipherText *arg2 = 0 ; - int result; - - (void)jenv; - (void)jcls; - (void)jarg1_; - (void)jarg2_; - arg1 = *(PrivateKey **)&jarg1; - arg2 = *(CipherText **)&jarg2; - if (!arg2) { - SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "CipherText const & reference is null"); - return 0; - } - try { - result = (int)((PrivateKey const *)arg1)->dec((CipherText const &)*arg2); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return 0; - } - - jresult = (jint)result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1setCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - int arg2 ; - int arg3 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - arg2 = (int)jarg2; - arg3 = (int)jarg3; - try { - (arg1)->setCache(arg2,arg3); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_PrivateKey_1clearCache(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - - (void)jenv; - (void)jcls; - (void)jarg1_; - arg1 = *(PrivateKey **)&jarg1; - try { - (arg1)->clearCache(); - } - catch(std::exception &_e) { - SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, (&_e)->what()); - return ; - } - -} - - -SWIGEXPORT jlong JNICALL Java_com_herumi_mcl_ElgamalJNI_new_1PrivateKey(JNIEnv *jenv, jclass jcls) { - jlong jresult = 0 ; - PrivateKey *result = 0 ; - - (void)jenv; - (void)jcls; - result = (PrivateKey *)new PrivateKey(); - *(PrivateKey **)&jresult = result; - return jresult; -} - - -SWIGEXPORT void JNICALL Java_com_herumi_mcl_ElgamalJNI_delete_1PrivateKey(JNIEnv *jenv, jclass jcls, jlong jarg1) { - PrivateKey *arg1 = (PrivateKey *) 0 ; - - (void)jenv; - (void)jcls; - arg1 = *(PrivateKey **)&jarg1; - delete arg1; -} - - -#ifdef __cplusplus -} -#endif - diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/java.md b/vendor/github.com/dexon-foundation/mcl/ffi/java/java.md deleted file mode 100644 index 3fe861351..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/java.md +++ /dev/null @@ -1,95 +0,0 @@ -# JNI for mcl (experimental) -This library provides functionality to compute the optimal ate pairing -over Barreto-Naehrig (BN) curves. - -# Initialization -Load the library `mcl_bn256`. -``` -import com.herumi.mcl.*; - -System.loadLibrary("mcl_bn256"); -``` - -# Classes -* `G1` ; The cyclic group instantiated as E(Fp)[r] where where r = p + 1 - t. -* `G2` ; The cyclic group instantiated as the inverse image of E'(Fp^2)[r]. -* `GT` ; The cyclic group in the image of the optimal ate pairing. - * `e : G1 x G2 -> GT` -* `Fr` ; The finite field with characteristic r. - -# Methods and Functions -## Fr -* `Fr::setInt(int x)` ; set by x -* `Fr::setStr(String str)` ; set by str such as "123", "0xfff", etc. -* `Fr::setRand()` ; randomly set -* `Bn256.neg(Fr y, Fr x)` ; `y = -x` -* `Bn256.add(Fr z, Fr x, Fr y)` ; `z = x + y` -* `Bn256.sub(Fr z, Fr x, Fr y)` ; `z = x - y` -* `Bn256.mul(Fr z, Fr x, Fr y)` ; `z = x * y` -* `Bn256.div(Fr z, Fr x, Fr y)` ; `z = x / y` - -## G1 - -* `G1::set(String x, String y)` ; set by (x, y) -* `G1::hashAndMapToG1(String m)` ; take SHA-256 of m and map it to an element of G1 -* `G1::setStr(String str)` ; set by the result of `toString()` method -* `Bn256.neg(G1 y, G1 x)` ; `y = -x` -* `Bn256.dbl(G1 y, G1 x)` ; `y = 2x` -* `Bn256.add(G1 z, G1 x, G1 y)` ; `z = x + y` -* `Bn256.sub(G1 z, G1 x, G1 y)` ; `z = x - y` -* `Bn256.mul(G1 z, G1 x, Fr y)` ; `z = x * y` - -## G2 - -* `G2::set(String xa, String xb, String ya, String yb)` ; set by ((xa, xb), (ya, yb)) -* `G2::setStr(String str)` ; set by the result of `toString()` method -* `Bn256.neg(G2 y, G2 x)` ; `y = -x` -* `Bn256.dbl(G2 y, G2 x)` ; `y = 2x` -* `Bn256.add(G2 z, G2 x, G2 y)` ; `z = x + y` -* `Bn256.sub(G2 z, G2 x, G2 y)` ; `z = x - y` -* `Bn256.mul(G2 z, G2 x, Fr y)` ; `z = x * y` - -## GT - -* `GT::setStr(String str)` ; set by the result of `toString()` method -* `Bn256.mul(GT z, GT x, GT y)` ; `z = x * y` -* `Bn256.pow(GT z, GT x, Fr y)` ; `z = x ^ y` - -## pairing -* `Bn256.pairing(GT e, G1 P, G2 Q)` ; e = e(P, Q) - -# BLS signature sample -``` -String xa = "12723517038133731887338407189719511622662176727675373276651903807414909099441"; -String xb = "4168783608814932154536427934509895782246573715297911553964171371032945126671"; -String ya = "13891744915211034074451795021214165905772212241412891944830863846330766296736"; -String yb = "7937318970632701341203597196594272556916396164729705624521405069090520231616"; - -G2 Q = new G2(xa, xb, ya, yb); // fixed point of G2 - -Fr s = new Fr(); -s.setRand(); // secret key -G2 pub = new G2(); -Bn256.mul(pub, Q, s); // public key = sQ - -String m = "signature test"; -G1 H = new G1(); -H.hashAndMapToG1(m); // H = Hash(m) -G1 sign = new G1(); -Bn256.mul(sign, H, s); // signature of m = s H - -GT e1 = new GT(); -GT e2 = new GT(); -Bn256.pairing(e1, H, pub); // e1 = e(H, s Q) -Bn256.pairing(e2, sign, Q); // e2 = e(s H, Q); -assertBool("verify signature", e1.equals(e2)); -``` - -# Make test -``` -cd java -make test_bn256 -``` - -# Sample code -[Bn256Test.java](https://github.com/herumi/mcl/blob/master/java/Bn256Test.java) diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/make_wrap.bat b/vendor/github.com/dexon-foundation/mcl/ffi/java/make_wrap.bat deleted file mode 100644 index b7008bc02..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/make_wrap.bat +++ /dev/null @@ -1,23 +0,0 @@ -@echo off -call set-java-path.bat -set JAVA_INCLUDE=%JAVA_DIR%\include -set SWIG=..\..\..\..\p\swig\swig.exe -set PACKAGE_NAME=com.herumi.mcl -set PACKAGE_DIR=%PACKAGE_NAME:.=\% -if /i "%1"=="" ( - set NAME=elgamal -) else ( - set NAME=%1 -) - -echo [[run swig]] -mkdir %PACKAGE_DIR% -set TOP_DIR=../.. -%SWIG% -java -package %PACKAGE_NAME% -outdir %PACKAGE_DIR% -c++ -Wall %NAME%.i -echo [[make dll]] -cl /MT /DNOMINMAX /LD /Ox /DNDEBUG /EHsc %NAME%_wrap.cxx %TOP_DIR%/src/fp.cpp -DMCL_NO_AUTOLINK -I%JAVA_INCLUDE% -I%JAVA_INCLUDE%\win32 -I%TOP_DIR%/include -I%TOP_DIR%/../cybozulib/include -I%TOP_DIR%/../cybozulib_ext/include -I%TOP_DIR%/../xbyak /link /LIBPATH:%TOP_DIR%/../cybozulib_ext/lib /OUT:%TOP_DIR%/bin/mcl_%NAME%.dll - -call run-%NAME%.bat - -echo [[make jar]] -%JAVA_DIR%\bin\jar cvf mcl.jar com diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/run-bn256.bat b/vendor/github.com/dexon-foundation/mcl/ffi/java/run-bn256.bat deleted file mode 100644 index 903876ec6..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/run-bn256.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off -echo [[compile Bn256Test.java]] -%JAVA_DIR%\bin\javac Bn256Test.java - -echo [[run Bn256Test]] -set TOP_DIR=..\.. -pushd %TOP_DIR%\bin -%JAVA_DIR%\bin\java -classpath ../ffi/java Bn256Test %1 %2 %3 %4 %5 %6 -popd diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/run-elgamal.bat b/vendor/github.com/dexon-foundation/mcl/ffi/java/run-elgamal.bat deleted file mode 100644 index 8b889a64c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/run-elgamal.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off -echo [[compile ElgamalTest.java]] -%JAVA_DIR%\bin\javac ElgamalTest.java - -echo [[run ElgamalTest]] -set TOP_DIR=..\.. -pushd %TOP_DIR%\bin -%JAVA_DIR%\bin\java -classpath ../ffi/java ElgamalTest %1 %2 %3 %4 %5 %6 -popd diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/java/set-java-path.bat b/vendor/github.com/dexon-foundation/mcl/ffi/java/set-java-path.bat deleted file mode 100644 index c66f81830..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/java/set-java-path.bat +++ /dev/null @@ -1,8 +0,0 @@ -@echo off -if "%JAVA_HOME%"=="" ( - set JAVA_DIR=c:/p/Java/jdk -) else ( - set JAVA_DIR=%JAVA_HOME% -) -echo JAVA_DIR=%JAVA_DIR% -rem set PATH=%PATH%;%JAVA_DIR%\bin diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/js/export-functions.py b/vendor/github.com/dexon-foundation/mcl/ffi/js/export-functions.py deleted file mode 100644 index 2a929564b..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/js/export-functions.py +++ /dev/null @@ -1,73 +0,0 @@ -import sys, re, argparse - -#RE_PROTOTYPE = re.compile(r'MCLBN_DLL_API\s\w\s\w\([^)]*\);') -RE_PROTOTYPE = re.compile(r'\w*\s(\w*)\s(\w*)\(([^)]*)\);') -def export_functions(args, fileNames, reToAddUnderscore): - modName = args.js - json = args.json - if not reToAddUnderscore: - reToAddUnderscore = r'(mclBn_init|setStr|getStr|[sS]erialize|setLittleEndian|setHashOf|hashAndMapTo|DecStr|HexStr|HashTo|blsSign|blsVerify|GetCurveOrder|GetFieldOrder|KeyShare|KeyRecover|blsSignatureRecover|blsInit)' - reSpecialFunctionName = re.compile(reToAddUnderscore) - if json: - print '[' - elif modName: - print 'function define_exported_' + modName + '(mod) {' - comma = '' - for fileName in fileNames: - with open(fileName, 'rb') as f: - for line in f.readlines(): - p = RE_PROTOTYPE.search(line) - if p: - ret = p.group(1) - name = p.group(2) - arg = p.group(3) - if json or modName: - retType = 'null' if ret == 'void' else 'number' - if arg == '' or arg == 'void': - paramNum = 0 - else: - paramNum = len(arg.split(',')) - if reSpecialFunctionName.search(name): - exportName = '_' + name # to wrap function - else: - exportName = name - if json: - print comma + '{' - if comma == '': - comma = ',' - print ' "name":"{0}",'.format(name) - print ' "exportName":"{0}",'.format(exportName) - print ' "ret":"{0}",'.format(retType) - print ' "args":[', - if paramNum > 0: - print '"number"' + (', "number"' * (paramNum - 1)), - print ']' - print '}' - else: - paramType = '[' + ("'number', " * paramNum) + ']' - print "{0} = mod.cwrap('{1}', '{2}', {3})".format(exportName, name, retType, paramType) - else: - print comma + "'_" + name + "'", - if comma == '': - comma = ',' - if json: - print ']' - elif modName: - print '}' - -def main(): - p = argparse.ArgumentParser('export_functions') - p.add_argument('header', type=str, nargs='+', help='headers') - p.add_argument('-js', type=str, nargs='?', help='module name') - p.add_argument('-re', type=str, nargs='?', help='regular expression file to add underscore to function name') - p.add_argument('-json', action='store_true', help='output json') - args = p.parse_args() - - reToAddUnderscore = '' - if args.re: - reToAddUnderscore = open(args.re).read().strip() - export_functions(args, args.header, reToAddUnderscore) - -if __name__ == '__main__': - main() - diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/js/pre-mcl.js b/vendor/github.com/dexon-foundation/mcl/ffi/js/pre-mcl.js deleted file mode 100644 index ebc93e581..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/js/pre-mcl.js +++ /dev/null @@ -1,5 +0,0 @@ -if (typeof __dirname === 'string') { - var Module = {} - Module.wasmBinaryFile = __dirname + '/mcl_c.wasm' -} - diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/python/pairing.py b/vendor/github.com/dexon-foundation/mcl/ffi/python/pairing.py deleted file mode 100644 index 88b729176..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/python/pairing.py +++ /dev/null @@ -1,80 +0,0 @@ -from ctypes import * -from ctypes.wintypes import LPWSTR, LPCSTR, LPVOID - -g_lib = None - -def BN256_init(): - global g_lib - g_lib = cdll.LoadLibrary("../../bin/bn256.dll") - ret = g_lib.BN256_init() - if ret: - print "ERR BN256_init" - -class Fr(Structure): - _fields_ = [("v", c_ulonglong * 4)] - def setInt(self, v): - g_lib.BN256_Fr_setInt(self.v, v) - def setStr(self, s): - ret = g_lib.BN256_Fr_setStr(self.v, c_char_p(s)) - if ret: - print("ERR Fr:setStr") - def __str__(self): - svLen = 1024 - sv = create_string_buffer('\0' * svLen) - ret = g_lib.BN256_Fr_getStr(sv, svLen, self.v) - if ret: - print("ERR Fr:getStr") - return sv.value - def isZero(self, rhs): - return g_lib.BN256_Fr_isZero(self.v) != 0 - def isOne(self, rhs): - return g_lib.BN256_Fr_isOne(self.v) != 0 - def __eq__(self, rhs): - return g_lib.BN256_Fr_isEqual(self.v, rhs.v) != 0 - def __ne__(self, rhs): - return not(P == Q) - def __add__(self, rhs): - ret = Fr() - g_lib.BN256_Fr_add(ret.v, self.v, rhs.v) - return ret - def __sub__(self, rhs): - ret = Fr() - g_lib.BN256_Fr_sub(ret.v, self.v, rhs.v) - return ret - def __mul__(self, rhs): - ret = Fr() - g_lib.BN256_Fr_mul(ret.v, self.v, rhs.v) - return ret - def __div__(self, rhs): - ret = Fr() - g_lib.BN256_Fr_div(ret.v, self.v, rhs.v) - return ret - def __neg__(self): - ret = Fr() - g_lib.BN256_Fr_neg(ret.v, self.v) - return ret - -def Fr_add(z, x, y): - g_lib.BN256_Fr_add(z.v, x.v, y.v) - -def Fr_sub(z, x, y): - g_lib.BN256_Fr_sub(z.v, x.v, y.v) - -def Fr_mul(z, x, y): - g_lib.BN256_Fr_mul(z.v, x.v, y.v) - -def Fr_div(z, x, y): - g_lib.BN256_Fr_div(z.v, x.v, y.v) - -BN256_init() - -P = Fr() -Q = Fr() -print P == Q -print P != Q -P.setInt(5) -Q.setStr("34982034824") -print Q -R = Fr() -Fr_add(R, P, Q) -print R diff --git a/vendor/github.com/dexon-foundation/mcl/ffi/python/she.py b/vendor/github.com/dexon-foundation/mcl/ffi/python/she.py deleted file mode 100644 index ab8975274..000000000 --- a/vendor/github.com/dexon-foundation/mcl/ffi/python/she.py +++ /dev/null @@ -1,298 +0,0 @@ -import os -import platform -from ctypes import * - -MCL_BN254 = 0 -MCLBN_FR_UNIT_SIZE = 4 -MCLBN_FP_UNIT_SIZE = 4 - -FR_SIZE = MCLBN_FR_UNIT_SIZE -G1_SIZE = MCLBN_FP_UNIT_SIZE * 3 -G2_SIZE = MCLBN_FP_UNIT_SIZE * 6 -GT_SIZE = MCLBN_FP_UNIT_SIZE * 12 - -SEC_SIZE = FR_SIZE * 2 -PUB_SIZE = G1_SIZE + G2_SIZE -G1_CIPHER_SIZE = G1_SIZE * 2 -G2_CIPHER_SIZE = G2_SIZE * 2 -GT_CIPHER_SIZE = GT_SIZE * 4 - -MCLBN_COMPILED_TIME_VAR = (MCLBN_FR_UNIT_SIZE * 10) + MCLBN_FP_UNIT_SIZE - -Buffer = c_ubyte * 1536 -lib = None - -def init(curveType=MCL_BN254): - global lib - name = platform.system() - if name == 'Linux': - libName = 'libmclshe256.so' - elif name == 'Darwin': - libName = 'libmclshe256.dylib' - elif name == 'Windows': - libName = 'mclshe256.dll' - else: - raise RuntimeError("not support yet", name) - lib = cdll.LoadLibrary(libName) - ret = lib.sheInit(MCL_BN254, MCLBN_COMPILED_TIME_VAR) - if ret != 0: - raise RuntimeError("sheInit", ret) - # custom setup for a function which returns pointer - lib.shePrecomputedPublicKeyCreate.restype = c_void_p - -def setRangeForDLP(hashSize): - ret = lib.sheSetRangeForDLP(hashSize) - if ret != 0: - raise RuntimeError("setRangeForDLP", ret) - -def setTryNum(tryNum): - ret = lib.sheSetTryNum(tryNum) - if ret != 0: - raise RuntimeError("setTryNum", ret) - -def hexStr(v): - s = "" - for x in v: - s += format(x, '02x') - return s - -class CipherTextG1(Structure): - _fields_ = [("v", c_ulonglong * G1_CIPHER_SIZE)] - def serialize(self): - buf = Buffer() - ret = lib.sheCipherTextG1Serialize(byref(buf), len(buf), byref(self.v)) - if ret == 0: - raise RuntimeError("serialize") - return buf[0:ret] - def serializeToHexStr(self): - return hexStr(self.serialize()) - -class CipherTextG2(Structure): - _fields_ = [("v", c_ulonglong * G2_CIPHER_SIZE)] - def serialize(self): - buf = Buffer() - ret = lib.sheCipherTextG2Serialize(byref(buf), len(buf), byref(self.v)) - if ret == 0: - raise RuntimeError("serialize") - return buf[0:ret] - def serializeToHexStr(self): - return hexStr(self.serialize()) - -class CipherTextGT(Structure): - _fields_ = [("v", c_ulonglong * GT_CIPHER_SIZE)] - def serialize(self): - buf = Buffer() - ret = lib.sheCipherTextGTSerialize(byref(buf), len(buf), byref(self.v)) - if ret == 0: - raise RuntimeError("serialize") - return buf[0:ret] - def serializeToHexStr(self): - return hexStr(self.serialize()) - -class PrecomputedPublicKey(Structure): - def __init__(self): - self.p = 0 - def create(self): - if not self.p: - self.p = c_void_p(lib.shePrecomputedPublicKeyCreate()) - if self.p == 0: - raise RuntimeError("PrecomputedPublicKey::create") - def destroy(self): - lib.shePrecomputedPublicKeyDestroy(self.p) - def encG1(self, m): - c = CipherTextG1() - ret = lib.shePrecomputedPublicKeyEncG1(byref(c.v), self.p, m) - if ret != 0: - raise RuntimeError("encG1", m) - return c - def encG2(self, m): - c = CipherTextG2() - ret = lib.shePrecomputedPublicKeyEncG2(byref(c.v), self.p, m) - if ret != 0: - raise RuntimeError("encG2", m) - return c - def encGT(self, m): - c = CipherTextGT() - ret = lib.shePrecomputedPublicKeyEncGT(byref(c.v), self.p, m) - if ret != 0: - raise RuntimeError("encGT", m) - return c - -class PublicKey(Structure): - _fields_ = [("v", c_ulonglong * PUB_SIZE)] - def serialize(self): - buf = Buffer() - ret = lib.shePublicKeySerialize(byref(buf), len(buf), byref(self.v)) - if ret == 0: - raise RuntimeError("serialize") - return buf[0:ret] - def serializeToHexStr(self): - return hexStr(self.serialize()) - def encG1(self, m): - c = CipherTextG1() - ret = lib.sheEncG1(byref(c.v), byref(self.v), m) - if ret != 0: - raise RuntimeError("encG1", m) - return c - def encG2(self, m): - c = CipherTextG2() - ret = lib.sheEncG2(byref(c.v), byref(self.v), m) - if ret != 0: - raise RuntimeError("encG2", m) - return c - def encGT(self, m): - c = CipherTextGT() - ret = lib.sheEncGT(byref(c.v), byref(self.v), m) - if ret != 0: - raise RuntimeError("encGT", m) - return c - def createPrecomputedPublicKey(self): - ppub = PrecomputedPublicKey() - ppub.create() - ret = lib.shePrecomputedPublicKeyInit(ppub.p, byref(self.v)) - if ret != 0: - raise RuntimeError("createPrecomputedPublicKey") - return ppub - -class SecretKey(Structure): - _fields_ = [("v", c_ulonglong * SEC_SIZE)] - def setByCSPRNG(self): - ret = lib.sheSecretKeySetByCSPRNG(byref(self.v)) - if ret != 0: - raise RuntimeError("setByCSPRNG", ret) - def serialize(self): - buf = Buffer() - ret = lib.sheSecretKeySerialize(byref(buf), len(buf), byref(self.v)) - if ret == 0: - raise RuntimeError("serialize") - return buf[0:ret] - def serializeToHexStr(self): - return hexStr(self.serialize()) - def getPulicKey(self): - pub = PublicKey() - lib.sheGetPublicKey(byref(pub.v), byref(self.v)) - return pub - def dec(self, c): - m = c_longlong() - if isinstance(c, CipherTextG1): - ret = lib.sheDecG1(byref(m), byref(self.v), byref(c.v)) - elif isinstance(c, CipherTextG2): - ret = lib.sheDecG2(byref(m), byref(self.v), byref(c.v)) - elif isinstance(c, CipherTextGT): - ret = lib.sheDecGT(byref(m), byref(self.v), byref(c.v)) - if ret != 0: - raise RuntimeError("dec") - return m.value - -def neg(c): - ret = -1 - if isinstance(c, CipherTextG1): - out = CipherTextG1() - ret = lib.sheNegG1(byref(out.v), byref(c.v)) - elif isinstance(c, CipherTextG2): - out = CipherTextG2() - ret = lib.sheNegG2(byref(out.v), byref(c.v)) - elif isinstance(c, CipherTextGT): - out = CipherTextGT() - ret = lib.sheNegGT(byref(out.v), byref(c.v)) - if ret != 0: - raise RuntimeError("neg") - return out - -def add(cx, cy): - ret = -1 - if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1): - out = CipherTextG1() - ret = lib.sheAddG1(byref(out.v), byref(cx.v), byref(cy.v)) - elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2): - out = CipherTextG2() - ret = lib.sheAddG2(byref(out.v), byref(cx.v), byref(cy.v)) - elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT): - out = CipherTextGT() - ret = lib.sheAddGT(byref(out.v), byref(cx.v), byref(cy.v)) - if ret != 0: - raise RuntimeError("add") - return out - -def sub(cx, cy): - ret = -1 - if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG1): - out = CipherTextG1() - ret = lib.sheSubG1(byref(out.v), byref(cx.v), byref(cy.v)) - elif isinstance(cx, CipherTextG2) and isinstance(cy, CipherTextG2): - out = CipherTextG2() - ret = lib.sheSubG2(byref(out.v), byref(cx.v), byref(cy.v)) - elif isinstance(cx, CipherTextGT) and isinstance(cy, CipherTextGT): - out = CipherTextGT() - ret = lib.sheSubGT(byref(out.v), byref(cx.v), byref(cy.v)) - if ret != 0: - raise RuntimeError("sub") - return out - -def mul(cx, cy): - ret = -1 - if isinstance(cx, CipherTextG1) and isinstance(cy, CipherTextG2): - out = CipherTextGT() - ret = lib.sheMul(byref(out.v), byref(cx.v), byref(cy.v)) - elif isinstance(cx, CipherTextG1) and isinstance(cy, int): - out = CipherTextG1() - ret = lib.sheMulG1(byref(out.v), byref(cx.v), cy) - elif isinstance(cx, CipherTextG2) and isinstance(cy, int): - out = CipherTextG2() - ret = lib.sheMulG2(byref(out.v), byref(cx.v), cy) - elif isinstance(cx, CipherTextGT) and isinstance(cy, int): - out = CipherTextGT() - ret = lib.sheMulGT(byref(out.v), byref(cx.v), cy) - if ret != 0: - raise RuntimeError("mul") - return out - -if __name__ == '__main__': - init() - sec = SecretKey() - sec.setByCSPRNG() - print("sec=", sec.serializeToHexStr()) - pub = sec.getPulicKey() - print("pub=", pub.serializeToHexStr()) - - m11 = 1 - m12 = 5 - m21 = 3 - m22 = -4 - c11 = pub.encG1(m11) - c12 = pub.encG1(m12) - # dec(enc) for G1 - if sec.dec(c11) != m11: print("err1") - - # add/sub for G1 - if sec.dec(add(c11, c12)) != m11 + m12: print("err2") - if sec.dec(sub(c11, c12)) != m11 - m12: print("err3") - - # add/sub for G2 - c21 = pub.encG2(m21) - c22 = pub.encG2(m22) - if sec.dec(c21) != m21: print("err4") - if sec.dec(add(c21, c22)) != m21 + m22: print("err5") - if sec.dec(sub(c21, c22)) != m21 - m22: print("err6") - - mt = -56 - ct = pub.encGT(mt) - if sec.dec(ct) != mt: print("err7") - - # mul G1 and G2 - if sec.dec(mul(c11, c21)) != m11 * m21: print("err8") - - # use precomputedPublicKey for performance - ppub = pub.createPrecomputedPublicKey() - c1 = ppub.encG1(m11) - if sec.dec(c1) != m11: print("err9") - - import sys - if sys.version_info.major >= 3: - import timeit - N = 100000 - print(str(timeit.timeit("pub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec") - print(str(timeit.timeit("ppub.encG1(12)", number=N, globals=globals()) / float(N) * 1e3) + "msec") - - ppub.destroy() # necessary to avoid memory leak - diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp deleted file mode 100644 index 30df3667d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp +++ /dev/null @@ -1,197 +0,0 @@ -#pragma once - -/** - @file - @brief scoped array and aligned array - - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#include -#ifdef _WIN32 - #include -#else - #include -#endif -#include - -namespace cybozu { - -inline void *AlignedMalloc(size_t size, size_t alignment) -{ -#ifdef _WIN32 - return _aligned_malloc(size, alignment); -#else - void *p; - int ret = posix_memalign(&p, alignment, size); - return (ret == 0) ? p : 0; -#endif -} - -inline void AlignedFree(void *p) -{ -#ifdef _WIN32 - if (p == 0) return; - _aligned_free(p); -#else - free(p); -#endif -} - -template -class ScopedArray { - T *p_; - size_t size_; - ScopedArray(const ScopedArray&); - void operator=(const ScopedArray&); -public: - explicit ScopedArray(size_t size) - : p_(new T[size]) - , size_(size) - { - } - ~ScopedArray() - { - delete[] p_; - } - T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } - const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } - size_t size() const CYBOZU_NOEXCEPT { return size_; } - bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } - T* begin() CYBOZU_NOEXCEPT { return p_; } - T* end() CYBOZU_NOEXCEPT { return p_ + size_; } - const T* begin() const CYBOZU_NOEXCEPT { return p_; } - const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } - T* data() CYBOZU_NOEXCEPT { return p_; } - const T* data() const CYBOZU_NOEXCEPT { return p_; } -}; - -/** - T must be POD type - 16byte aligment array -*/ -template -class AlignedArray { - T *p_; - size_t size_; - size_t allocSize_; - T *alloc(size_t size) const - { - T *p = static_cast(AlignedMalloc(size * sizeof(T), N)); - if (p == 0) throw std::bad_alloc(); - return p; - } - void copy(T *dst, const T *src, size_t n) const - { - for (size_t i = 0; i < n; i++) dst[i] = src[i]; - } - void setZero(T *p, size_t n) const - { - for (size_t i = 0; i < n; i++) p[i] = 0; - } - /* - alloc allocN and copy [p, p + copyN) to new p_ - don't modify size_ - */ - void allocCopy(size_t allocN, const T *p, size_t copyN) - { - T *q = alloc(allocN); - copy(q, p, copyN); - AlignedFree(p_); - p_ = q; - allocSize_ = allocN; - } -public: - /* - don't clear buffer with zero if doClear is false - */ - explicit AlignedArray(size_t size = 0, bool doClear = defaultDoClear) - : p_(0) - , size_(0) - , allocSize_(0) - { - resize(size, doClear); - } - AlignedArray(const AlignedArray& rhs) - : p_(0) - , size_(0) - , allocSize_(0) - { - *this = rhs; - } - AlignedArray& operator=(const AlignedArray& rhs) - { - if (allocSize_ < rhs.size_) { - allocCopy(rhs.size_, rhs.p_, rhs.size_); - } else { - copy(p_, rhs.p_, rhs.size_); - } - size_ = rhs.size_; - return *this; - } -#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) - AlignedArray(AlignedArray&& rhs) CYBOZU_NOEXCEPT - : p_(rhs.p_) - , size_(rhs.size_) - , allocSize_(rhs.allocSize_) - { - rhs.p_ = 0; - rhs.size_ = 0; - rhs.allocSize_ = 0; - } - AlignedArray& operator=(AlignedArray&& rhs) CYBOZU_NOEXCEPT - { - swap(rhs); - rhs.clear(); - return *this; - } -#endif - /* - don't clear buffer with zero if doClear is false - @note don't free if shrinked - */ - void resize(size_t size, bool doClear = defaultDoClear) - { - // shrink - if (size <= size_) { - size_ = size; - return; - } - // realloc if necessary - if (size > allocSize_) { - allocCopy(size, p_, size_); - } - if (doClear) setZero(p_ + size_, size - size_); - size_ = size; - } - void clear() // not free - { - size_ = 0; - } - ~AlignedArray() - { - AlignedFree(p_); - } - void swap(AlignedArray& rhs) CYBOZU_NOEXCEPT - { - std::swap(p_, rhs.p_); - std::swap(size_, rhs.size_); - std::swap(allocSize_, rhs.allocSize_); - } - T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } - const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } - size_t size() const CYBOZU_NOEXCEPT { return size_; } - bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } - T* begin() CYBOZU_NOEXCEPT { return p_; } - T* end() CYBOZU_NOEXCEPT { return p_ + size_; } - const T* begin() const CYBOZU_NOEXCEPT { return p_; } - const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } - T* data() CYBOZU_NOEXCEPT { return p_; } - const T* data() const CYBOZU_NOEXCEPT { return p_; } -#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) - const T* cbegin() const CYBOZU_NOEXCEPT { return p_; } - const T* cend() const CYBOZU_NOEXCEPT { return p_ + size_; } -#endif -}; - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp deleted file mode 100644 index a22853a17..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp +++ /dev/null @@ -1,239 +0,0 @@ -#pragma once -/** - @file - @brief converter between integer and string - - @author MITSUNARI Shigeo(@herumi) -*/ - -#include -#include -#include -#include - -namespace cybozu { - -namespace atoi_local { - -template -T convertToInt(bool *b, const char *p, size_t size, const char (&max)[n], T min, T overflow1, char overflow2) -{ - if (size > 0 && *p) { - bool isMinus = false; - size_t i = 0; - if (*p == '-') { - isMinus = true; - i++; - } - if (i < size && p[i]) { - // skip leading zero - while (i < size && p[i] == '0') i++; - // check minimum - if (isMinus && size - i >= n - 1 && memcmp(max, &p[i], n - 1) == 0) { - if (b) *b = true; - return min; - } - T x = 0; - for (;;) { - unsigned char c; - if (i == size || (c = static_cast(p[i])) == '\0') { - if (b) *b = true; - return isMinus ? -x : x; - } - unsigned int y = c - '0'; - if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { - break; - } - x = x * 10 + T(y); - i++; - } - } - } - if (b) { - *b = false; - return 0; - } else { - throw cybozu::Exception("atoi::convertToInt") << cybozu::exception::makeString(p, size); - } -} - -template -T convertToUint(bool *b, const char *p, size_t size, T overflow1, char overflow2) -{ - if (size > 0 && *p) { - size_t i = 0; - // skip leading zero - while (i < size && p[i] == '0') i++; - T x = 0; - for (;;) { - unsigned char c; - if (i == size || (c = static_cast(p[i])) == '\0') { - if (b) *b = true; - return x; - } - unsigned int y = c - '0'; - if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { - break; - } - x = x * 10 + T(y); - i++; - } - } - if (b) { - *b = false; - return 0; - } else { - throw cybozu::Exception("atoi::convertToUint") << cybozu::exception::makeString(p, size); - } -} - -template -T convertHexToInt(bool *b, const char *p, size_t size) -{ - if (size > 0 && *p) { - size_t i = 0; - T x = 0; - for (;;) { - unsigned int c; - if (i == size || (c = static_cast(p[i])) == '\0') { - if (b) *b = true; - return x; - } - if (c - 'A' <= 'F' - 'A') { - c = (c - 'A') + 10; - } else if (c - 'a' <= 'f' - 'a') { - c = (c - 'a') + 10; - } else if (c - '0' <= '9' - '0') { - c = c - '0'; - } else { - break; - } - // avoid overflow - if (x > (std::numeric_limits::max)() / 16) break; - x = x * 16 + T(c); - i++; - } - } - if (b) { - *b = false; - return 0; - } else { - throw cybozu::Exception("atoi::convertHexToInt") << cybozu::exception::makeString(p, size); - } -} - -} // atoi_local - -/** - auto detect return value class - @note if you set bool pointer p then throw nothing and set *p = false if bad string -*/ -class atoi { - const char *p_; - size_t size_; - bool *b_; - void set(bool *b, const char *p, size_t size) - { - b_ = b; - p_ = p; - size_ = size; - } -public: - atoi(const char *p, size_t size = -1) - { - set(0, p, size); - } - atoi(bool *b, const char *p, size_t size = -1) - { - set(b, p, size); - } - atoi(const std::string& str) - { - set(0, str.c_str(), str.size()); - } - atoi(bool *b, const std::string& str) - { - set(b, str.c_str(), str.size()); - } - inline operator signed char() const - { - return atoi_local::convertToInt(b_, p_, size_, "128", -128, 12, '8'); - } - inline operator unsigned char() const - { - return atoi_local::convertToUint(b_, p_, size_, 25, '6'); - } - inline operator short() const - { - return atoi_local::convertToInt(b_, p_, size_, "32768", -32768, 3276, '8'); - } - inline operator unsigned short() const - { - return atoi_local::convertToUint(b_, p_, size_, 6553, '6'); - } - inline operator int() const - { - return atoi_local::convertToInt(b_, p_, size_, "2147483648", INT_MIN, 214748364, '8'); - } - inline operator unsigned int() const - { - return atoi_local::convertToUint(b_, p_, size_, 429496729, '6'); - } - inline operator long long() const - { - return atoi_local::convertToInt(b_, p_, size_, "9223372036854775808", LLONG_MIN, 922337203685477580LL, '8'); - } - inline operator unsigned long long() const - { - return atoi_local::convertToUint(b_, p_, size_, 1844674407370955161ULL, '6'); - } -#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) - inline operator long() const { return static_cast(static_cast(*this)); } - inline operator unsigned long() const { return static_cast(static_cast(*this)); } -#else - inline operator long() const { return static_cast(static_cast(*this)); } - inline operator unsigned long() const { return static_cast(static_cast(*this)); } -#endif -}; - -class hextoi { - const char *p_; - size_t size_; - bool *b_; - void set(bool *b, const char *p, size_t size) - { - b_ = b; - p_ = p; - size_ = size; - } -public: - hextoi(const char *p, size_t size = -1) - { - set(0, p, size); - } - hextoi(bool *b, const char *p, size_t size = -1) - { - set(b, p, size); - } - hextoi(const std::string& str) - { - set(0, str.c_str(), str.size()); - } - hextoi(bool *b, const std::string& str) - { - set(b, str.c_str(), str.size()); - } - operator unsigned char() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator unsigned short() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator unsigned int() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator unsigned long() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator unsigned long long() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator char() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator signed char() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator short() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator int() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator long() const { return atoi_local::convertHexToInt(b_, p_, size_); } - operator long long() const { return atoi_local::convertHexToInt(b_, p_, size_); } -}; - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp deleted file mode 100644 index 4c02f1869..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp +++ /dev/null @@ -1,212 +0,0 @@ -#pragma once -/** - @file - @brief measure exec time of function - @author MITSUNARI Shigeo -*/ -#if defined(_MSC_VER) && (MSC_VER <= 1500) - #include -#else - #include -#endif -#include - -#ifdef __EMSCRIPTEN__ - #define CYBOZU_BENCH_USE_GETTIMEOFDAY -#endif - -#ifdef CYBOZU_BENCH_USE_GETTIMEOFDAY - #include -#elif !defined(CYBOZU_BENCH_DONT_USE_RDTSC) - #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__) - #define CYBOZU_BENCH_USE_RDTSC - #define CYBOZU_BENCH_USE_CPU_TIMER - #endif - #if defined(__GNUC__) && defined(__ARM_ARCH_7A__) -// #define CYBOZU_BENCH_USE_MRC -// #define CYBOZU_BENCH_USE_CPU_TIMER - #endif -#endif - - -#include -#include -#ifdef _MSC_VER - #include - #include -#else -#endif - -#ifndef CYBOZU_UNUSED - #ifdef __GNUC__ - #define CYBOZU_UNUSED __attribute__((unused)) - #else - #define CYBOZU_UNUSED - #endif -#endif - -namespace cybozu { - -namespace bench { - -static void (*g_putCallback)(double); - -static inline void setPutCallback(void (*f)(double)) -{ - g_putCallback = f; -} - -} // cybozu::bench - -class CpuClock { -public: - static inline uint64_t getCpuClk() - { -#ifdef CYBOZU_BENCH_USE_RDTSC -#ifdef _MSC_VER - return __rdtsc(); -#else - unsigned int eax, edx; - __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx)); - return ((uint64_t)edx << 32) | eax; -#endif -#elif defined(CYBOZU_BENCH_USE_MRC) - uint32_t clk; - __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(clk)); - return clk; -#else -#ifdef _MSC_VER - struct _timeb timeb; - _ftime_s(&timeb); - return uint64_t(timeb.time) * 1000000000 + timeb.millitm * 1000000; -#elif defined(CYBOZU_BENCH_USE_GETTIMEOFDAY) - struct timeval tv; - int ret CYBOZU_UNUSED = gettimeofday(&tv, 0); - assert(ret == 0); - return uint64_t(tv.tv_sec) * 1000000000 + tv.tv_usec * 1000; -#else - struct timespec tp; - int ret CYBOZU_UNUSED = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp); - assert(ret == 0); - return uint64_t(tp.tv_sec) * 1000000000 + tp.tv_nsec; -#endif -#endif - } - CpuClock() - : clock_(0) - , count_(0) - { - } - void begin() - { - clock_ -= getCpuClk(); - } - void end() - { - clock_ += getCpuClk(); - count_++; - } - int getCount() const { return count_; } - uint64_t getClock() const { return clock_; } - void clear() { count_ = 0; clock_ = 0; } - void put(const char *msg = 0, int N = 1) const - { - double t = getClock() / double(getCount()) / N; - if (msg && *msg) printf("%s ", msg); - if (bench::g_putCallback) { - bench::g_putCallback(t); - return; - } -#ifdef CYBOZU_BENCH_USE_CPU_TIMER - if (t > 1e6) { - printf("%7.3fMclk", t * 1e-6); - } else if (t > 1e3) { - printf("%7.3fKclk", t * 1e-3); - } else { - printf("%6.2f clk", t); - } -#else - if (t > 1e6) { - printf("%7.3fmsec", t * 1e-6); - } else if (t > 1e3) { - printf("%7.3fusec", t * 1e-3); - } else { - printf("%6.2fnsec", t); - } -#endif - if (msg && *msg) printf("\n"); - } - // adhoc constatns for CYBOZU_BENCH -#ifdef CYBOZU_BENCH_USE_CPU_TIMER - static const int loopN1 = 1000; - static const int loopN2 = 100; - static const uint64_t maxClk = (uint64_t)1e8; -#else - static const int loopN1 = 100; - static const int loopN2 = 100; - static const uint64_t maxClk = (uint64_t)1e8; -#endif -private: - uint64_t clock_; - int count_; -}; - -namespace bench { - -static CpuClock g_clk; -static int CYBOZU_UNUSED g_loopNum; - -} // cybozu::bench -/* - loop counter is automatically determined - CYBOZU_BENCH(, , , , ...); - if msg == "" then only set g_clk, g_loopNum -*/ -#define CYBOZU_BENCH(msg, func, ...) \ -{ \ - const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ - cybozu::CpuClock _cybozu_clk; \ - for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ - _cybozu_clk.begin(); \ - for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ - _cybozu_clk.end(); \ - if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ - } \ - if (msg && *msg) _cybozu_clk.put(msg, cybozu::CpuClock::loopN1); \ - cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = cybozu::CpuClock::loopN1; \ -} - -/* - double clk; - CYBOZU_BENCH_T(clk, , , , ...); - clk is set by CYBOZU_BENCH_T -*/ -#define CYBOZU_BENCH_T(clk, func, ...) \ -{ \ - const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ - cybozu::CpuClock _cybozu_clk; \ - for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ - _cybozu_clk.begin(); \ - for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ - _cybozu_clk.end(); \ - if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ - } \ - clk = _cybozu_clk.getClock() / (double)_cybozu_clk.getCount() / cybozu::CpuClock::loopN1; \ -} - -/* - loop counter N is given - CYBOZU_BENCH_C(, , , , , ...); - if msg == "" then only set g_clk, g_loopNum -*/ -#define CYBOZU_BENCH_C(msg, _N, func, ...) \ -{ \ - cybozu::CpuClock _cybozu_clk; \ - _cybozu_clk.begin(); \ - for (int _cybozu_j = 0; _cybozu_j < _N; _cybozu_j++) { func(__VA_ARGS__); } \ - _cybozu_clk.end(); \ - if (msg && *msg) _cybozu_clk.put(msg, _N); \ - cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = _N; \ -} - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp deleted file mode 100644 index 865c1e47d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp +++ /dev/null @@ -1,139 +0,0 @@ -#pragma once -/** - @file - @brief bit operation -*/ -#include -#include - -#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) - #if defined(_WIN32) - #include - #elif defined(__linux__) || defined(__CYGWIN__) || defined(__clang__) - #include - #elif defined(__GNUC__) - #include - #endif -#endif - -namespace cybozu { - -namespace bit_op_local { - -template -struct Tag {}; - -// sizeof(T) < 8 -template<> -struct Tag { - template - static inline int bsf(T x) - { -#if defined(_MSC_VER) - unsigned long out; - _BitScanForward(&out, x); -#pragma warning(suppress: 6102) - return out; -#else - return __builtin_ctz(x); -#endif - } - template - static inline int bsr(T x) - { -#if defined(_MSC_VER) - unsigned long out; - _BitScanReverse(&out, x); -#pragma warning(suppress: 6102) - return out; -#else - return __builtin_clz(x) ^ 0x1f; -#endif - } -}; - -// sizeof(T) == 8 -template<> -struct Tag { - template - static inline int bsf(T x) - { -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long out; - _BitScanForward64(&out, x); -#pragma warning(suppress: 6102) - return out; -#elif defined(__x86_64__) - return __builtin_ctzll(x); -#else - const uint32_t L = uint32_t(x); - if (L) return Tag::bsf(L); - const uint32_t H = uint32_t(x >> 32); - return Tag::bsf(H) + 32; -#endif - } - template - static inline int bsr(T x) - { -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long out; - _BitScanReverse64(&out, x); -#pragma warning(suppress: 6102) - return out; -#elif defined(__x86_64__) - return __builtin_clzll(x) ^ 0x3f; -#else - const uint32_t H = uint32_t(x >> 32); - if (H) return Tag::bsr(H) + 32; - const uint32_t L = uint32_t(x); - return Tag::bsr(L); -#endif - } -}; - -} // bit_op_local - -template -int bsf(T x) -{ - return bit_op_local::Tag::bsf(x); -} -template -int bsr(T x) -{ - return bit_op_local::Tag::bsr(x); -} - -template -uint64_t makeBitMask64(T x) -{ - assert(x < 64); - return (uint64_t(1) << x) - 1; -} - -template -uint32_t popcnt(T x); - -template<> -inline uint32_t popcnt(uint32_t x) -{ -#if defined(_MSC_VER) - return static_cast(_mm_popcnt_u32(x)); -#else - return static_cast(__builtin_popcount(x)); -#endif -} - -template<> -inline uint32_t popcnt(uint64_t x) -{ -#if defined(__x86_64__) - return static_cast(__builtin_popcountll(x)); -#elif defined(_WIN64) - return static_cast(_mm_popcnt_u64(x)); -#else - return popcnt(static_cast(x)) + popcnt(static_cast(x >> 32)); -#endif -} - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp deleted file mode 100644 index 13d7f3a0e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once -/** - @file - @brief critical section - - @author MITSUNARI Shigeo(@herumi) - @author MITSUNARI Shigeo -*/ -#include - -namespace cybozu { - -class ConditionVariableCs; - -namespace thread { - -#ifdef _WIN32 -typedef CRITICAL_SECTION CsHandle; -inline void CsInit(CsHandle& cs) { InitializeCriticalSection(&cs); } -inline void CsLock(CsHandle& cs) { EnterCriticalSection(&cs); } -inline void CsUnlock(CsHandle& cs) { LeaveCriticalSection(&cs); } -inline void CsTerm(CsHandle& cs) { DeleteCriticalSection(&cs); } -#else -typedef pthread_mutex_t CsHandle; -inline void CsInit(CsHandle& cs) { pthread_mutex_init(&cs, NULL); } -inline void CsLock(CsHandle& cs) { pthread_mutex_lock(&cs); } -inline void CsUnlock(CsHandle& cs) { pthread_mutex_unlock(&cs); } -inline void CsTerm(CsHandle& cs) { pthread_mutex_destroy(&cs); } -#endif - -} // cybozu::thread - -class CriticalSection { - friend class cybozu::ConditionVariableCs; -public: - CriticalSection() - { - thread::CsInit(hdl_); - } - ~CriticalSection() - { - thread::CsTerm(hdl_); - } - inline void lock() - { - thread::CsLock(hdl_); - } - inline void unlock() - { - thread::CsUnlock(hdl_); - } -private: - CriticalSection(const CriticalSection&); - CriticalSection& operator=(const CriticalSection&); - thread::CsHandle hdl_; -}; - -typedef cybozu::thread::AutoLockT AutoLockCs; //!< auto lock critical section - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp deleted file mode 100644 index d427179d9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp +++ /dev/null @@ -1,321 +0,0 @@ -#pragma once -/** - @file - @brief wrap openssl - @author MITSUNARI Shigeo(@herumi) -*/ - -#include -#ifdef __APPLE__ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif -#if 0 //#ifdef __APPLE__ - #define COMMON_DIGEST_FOR_OPENSSL - #include - #include - #define SHA1 CC_SHA1 - #define SHA224 CC_SHA224 - #define SHA256 CC_SHA256 - #define SHA384 CC_SHA384 - #define SHA512 CC_SHA512 -#else -#include -#include -#include -#endif -#ifdef _MSC_VER - #include -#endif - -namespace cybozu { - -namespace crypto { - -class Hash { -public: - enum Name { - N_SHA1, - N_SHA224, - N_SHA256, - N_SHA384, - N_SHA512 - }; -private: - Name name_; - size_t hashSize_; - union { - SHA_CTX sha1; - SHA256_CTX sha256; - SHA512_CTX sha512; - } ctx_; -public: - static inline size_t getSize(Name name) - { - switch (name) { - case N_SHA1: return SHA_DIGEST_LENGTH; - case N_SHA224: return SHA224_DIGEST_LENGTH; - case N_SHA256: return SHA256_DIGEST_LENGTH; - case N_SHA384: return SHA384_DIGEST_LENGTH; - case N_SHA512: return SHA512_DIGEST_LENGTH; - default: - throw cybozu::Exception("crypto:Hash:getSize") << name; - } - } - static inline const char *getName(Name name) - { - switch (name) { - case N_SHA1: return "sha1"; - case N_SHA224: return "sha224"; - case N_SHA256: return "sha256"; - case N_SHA384: return "sha384"; - case N_SHA512: return "sha512"; - default: - throw cybozu::Exception("crypto:Hash:getName") << name; - } - } - static inline Name getName(const std::string& nameStr) - { - static const struct { - const char *nameStr; - Name name; - } tbl[] = { - { "sha1", N_SHA1 }, - { "sha224", N_SHA224 }, - { "sha256", N_SHA256 }, - { "sha384", N_SHA384 }, - { "sha512", N_SHA512 }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (nameStr == tbl[i].nameStr) return tbl[i].name; - } - throw cybozu::Exception("crypto:Hash:getName") << nameStr; - } - explicit Hash(Name name = N_SHA1) - : name_(name) - , hashSize_(getSize(name)) - { - reset(); - } - void update(const void *buf, size_t bufSize) - { - switch (name_) { - case N_SHA1: SHA1_Update(&ctx_.sha1, buf, bufSize); break; - case N_SHA224: SHA224_Update(&ctx_.sha256, buf, bufSize); break; - case N_SHA256: SHA256_Update(&ctx_.sha256, buf, bufSize); break; - case N_SHA384: SHA384_Update(&ctx_.sha512, buf, bufSize); break; - case N_SHA512: SHA512_Update(&ctx_.sha512, buf, bufSize); break; - } - } - void update(const std::string& buf) - { - update(buf.c_str(), buf.size()); - } - void reset() - { - switch (name_) { - case N_SHA1: SHA1_Init(&ctx_.sha1); break; - case N_SHA224: SHA224_Init(&ctx_.sha256); break; - case N_SHA256: SHA256_Init(&ctx_.sha256); break; - case N_SHA384: SHA384_Init(&ctx_.sha512); break; - case N_SHA512: SHA512_Init(&ctx_.sha512); break; - default: - throw cybozu::Exception("crypto:Hash:rset") << name_; - } - } - /* - md must have hashSize byte - @note clear inner buffer after calling digest - */ - void digest(void *out, const void *buf, size_t bufSize) - { - update(buf, bufSize); - unsigned char *md = reinterpret_cast(out); - switch (name_) { - case N_SHA1: SHA1_Final(md, &ctx_.sha1); break; - case N_SHA224: SHA224_Final(md, &ctx_.sha256); break; - case N_SHA256: SHA256_Final(md, &ctx_.sha256); break; - case N_SHA384: SHA384_Final(md, &ctx_.sha512); break; - case N_SHA512: SHA512_Final(md, &ctx_.sha512); break; - default: - throw cybozu::Exception("crypto:Hash:digest") << name_; - } - reset(); - } - std::string digest(const void *buf, size_t bufSize) - { - std::string ret; - ret.resize(hashSize_); - digest(&ret[0], buf, bufSize); - return ret; - } - std::string digest(const std::string& buf = "") - { - return digest(buf.c_str(), buf.size()); - } - /* - out must have necessary size - @note return written size - */ - static inline size_t digest(void *out, Name name, const void *buf, size_t bufSize) - { - unsigned char *md = (unsigned char*)out; - const unsigned char *src = cybozu::cast(buf); - switch (name) { - case N_SHA1: SHA1(src, bufSize, md); return 160 / 8; - case N_SHA224: SHA224(src, bufSize, md); return 224 / 8; - case N_SHA256: SHA256(src, bufSize, md); return 256 / 8; - case N_SHA384: SHA384(src, bufSize, md); return 384 / 8; - case N_SHA512: SHA512(src, bufSize, md); return 512 / 8; - default: - return 0; - } - } - static inline std::string digest(Name name, const void *buf, size_t bufSize) - { - char md[128]; - size_t size = digest(md, name, buf, bufSize); - if (size == 0) throw cybozu::Exception("crypt:Hash:digest") << name; - return std::string(md, size); - } - static inline std::string digest(Name name, const std::string& buf) - { - return digest(name, buf.c_str(), buf.size()); - } -}; - -class Hmac { - const EVP_MD *evp_; -public: - explicit Hmac(Hash::Name name = Hash::N_SHA1) - { - switch (name) { - case Hash::N_SHA1: evp_ = EVP_sha1(); break; - case Hash::N_SHA224: evp_ = EVP_sha224(); break; - case Hash::N_SHA256: evp_ = EVP_sha256(); break; - case Hash::N_SHA384: evp_ = EVP_sha384(); break; - case Hash::N_SHA512: evp_ = EVP_sha512(); break; - default: - throw cybozu::Exception("crypto:Hmac:") << name; - } - } - std::string eval(const std::string& key, const std::string& data) - { - std::string out(EVP_MD_size(evp_) + 1, 0); - unsigned int outLen = 0; - if (HMAC(evp_, key.c_str(), static_cast(key.size()), - cybozu::cast(data.c_str()), data.size(), cybozu::cast(&out[0]), &outLen)) { - out.resize(outLen); - return out; - } - throw cybozu::Exception("crypto::Hamc::eval"); - } -}; - -class Cipher { - const EVP_CIPHER *cipher_; - EVP_CIPHER_CTX *ctx_; -public: - enum Name { - N_AES128_CBC, - N_AES192_CBC, - N_AES256_CBC, - N_AES128_ECB, // be carefull to use - N_AES192_ECB, // be carefull to use - N_AES256_ECB, // be carefull to use - }; - static inline size_t getSize(Name name) - { - switch (name) { - case N_AES128_CBC: return 128; - case N_AES192_CBC: return 192; - case N_AES256_CBC: return 256; - case N_AES128_ECB: return 128; - case N_AES192_ECB: return 192; - case N_AES256_ECB: return 256; - default: - throw cybozu::Exception("crypto:Cipher:getSize") << name; - } - } - enum Mode { - Decoding, - Encoding - }; - explicit Cipher(Name name = N_AES128_CBC) - : cipher_(0) - , ctx_(0) - { - ctx_ = EVP_CIPHER_CTX_new(); - if (ctx_ == 0) throw cybozu::Exception("crypto:Cipher:EVP_CIPHER_CTX_new"); - switch (name) { - case N_AES128_CBC: cipher_ = EVP_aes_128_cbc(); break; - case N_AES192_CBC: cipher_ = EVP_aes_192_cbc(); break; - case N_AES256_CBC: cipher_ = EVP_aes_256_cbc(); break; - case N_AES128_ECB: cipher_ = EVP_aes_128_ecb(); break; - case N_AES192_ECB: cipher_ = EVP_aes_192_ecb(); break; - case N_AES256_ECB: cipher_ = EVP_aes_256_ecb(); break; - default: - throw cybozu::Exception("crypto:Cipher:Cipher:name") << (int)name; - } - } - ~Cipher() - { - if (ctx_) EVP_CIPHER_CTX_free(ctx_); - } - /* - @note don't use padding = true - */ - void setup(Mode mode, const std::string& key, const std::string& iv, bool padding = false) - { - const int keyLen = static_cast(key.size()); - const int expectedKeyLen = EVP_CIPHER_key_length(cipher_); - if (keyLen != expectedKeyLen) { - throw cybozu::Exception("crypto:Cipher:setup:keyLen") << keyLen << expectedKeyLen; - } - - int ret = EVP_CipherInit_ex(ctx_, cipher_, NULL, cybozu::cast(key.c_str()), cybozu::cast(iv.c_str()), mode == Encoding ? 1 : 0); - if (ret != 1) { - throw cybozu::Exception("crypto:Cipher:setup:EVP_CipherInit_ex") << ret; - } - ret = EVP_CIPHER_CTX_set_padding(ctx_, padding ? 1 : 0); - if (ret != 1) { - throw cybozu::Exception("crypto:Cipher:setup:EVP_CIPHER_CTX_set_padding") << ret; - } -/* - const int ivLen = static_cast(iv.size()); - const int expectedIvLen = EVP_CIPHER_CTX_iv_length(&ctx_); - if (ivLen != expectedIvLen) { - throw cybozu::Exception("crypto:Cipher:setup:ivLen") << ivLen << expectedIvLen; - } -*/ - } - /* - the size of outBuf must be larger than inBufSize + blockSize - @retval positive or 0 : writeSize(+blockSize) - @retval -1 : error - */ - int update(char *outBuf, const char *inBuf, int inBufSize) - { - int outLen = 0; - int ret = EVP_CipherUpdate(ctx_, cybozu::cast(outBuf), &outLen, cybozu::cast(inBuf), inBufSize); - if (ret != 1) return -1; - return outLen; - } - /* - return -1 if padding - @note don't use - */ - int finalize(char *outBuf) - { - int outLen = 0; - int ret = EVP_CipherFinal_ex(ctx_, cybozu::cast(outBuf), &outLen); - if (ret != 1) return -1; - return outLen; - } -}; - -} } // cybozu::crypto - -#ifdef __APPLE__ - #pragma GCC diagnostic pop -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp deleted file mode 100644 index 3f1575c46..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp +++ /dev/null @@ -1,224 +0,0 @@ -#pragma once - -/** - @file - @brief deal with big and little endian - - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#include -#include -#include - -namespace cybozu { - -#ifdef _MSC_VER -inline uint16_t byteSwap(uint16_t x) { return _byteswap_ushort(x); } -inline uint32_t byteSwap(uint32_t x) { return _byteswap_ulong(x); } -inline uint64_t byteSwap(uint64_t x) { return _byteswap_uint64(x); } -#else -#if (((__GNUC__) << 16) + (__GNUC_MINOR__)) >= ((4 << 16) + 8) -inline uint16_t byteSwap(uint16_t x) { return __builtin_bswap16(x); } -#else -inline uint16_t byteSwap(uint16_t x) { return (x >> 8) | (x << 8); } -#endif -inline uint32_t byteSwap(uint32_t x) { return __builtin_bswap32(x); } -inline uint64_t byteSwap(uint64_t x) { return __builtin_bswap64(x); } -#endif - -/** - get 16bit integer as little endian - @param src [in] pointer -*/ -inline uint16_t Get16bitAsLE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint16_t x; - memcpy(&x, src, sizeof(x)); - return x; -#else - const uint8_t *p = static_cast(src); - return p[0] | (p[1] << 8); -#endif -} - -/** - get 32bit integer as little endian - @param src [in] pointer -*/ -inline uint32_t Get32bitAsLE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint32_t x; - memcpy(&x, src, sizeof(x)); - return x; -#else - const uint8_t *p = static_cast(src); - return Get16bitAsLE(p) | (static_cast(Get16bitAsLE(p + 2)) << 16); -#endif -} - -/** - get 64bit integer as little endian - @param src [in] pointer -*/ -inline uint64_t Get64bitAsLE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint64_t x; - memcpy(&x, src, sizeof(x)); - return x; -#else - const uint8_t *p = static_cast(src); - return Get32bitAsLE(p) | (static_cast(Get32bitAsLE(p + 4)) << 32); -#endif -} - -/** - get 16bit integer as bit endian - @param src [in] pointer -*/ -inline uint16_t Get16bitAsBE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint16_t x; - memcpy(&x, src, sizeof(x)); - return byteSwap(x); -#else - const uint8_t *p = static_cast(src); - return p[1] | (p[0] << 8); -#endif -} - -/** - get 32bit integer as bit endian - @param src [in] pointer -*/ -inline uint32_t Get32bitAsBE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint32_t x; - memcpy(&x, src, sizeof(x)); - return byteSwap(x); -#else - const uint8_t *p = static_cast(src); - return Get16bitAsBE(p + 2) | (static_cast(Get16bitAsBE(p)) << 16); -#endif -} - -/** - get 64bit integer as big endian - @param src [in] pointer -*/ -inline uint64_t Get64bitAsBE(const void *src) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - uint64_t x; - memcpy(&x, src, sizeof(x)); - return byteSwap(x); -#else - const uint8_t *p = static_cast(src); - return Get32bitAsBE(p + 4) | (static_cast(Get32bitAsBE(p)) << 32); -#endif -} - -/** - set 16bit integer as little endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set16bitAsLE(void *src, uint16_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - p[0] = static_cast(x); - p[1] = static_cast(x >> 8); -#endif -} -/** - set 32bit integer as little endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set32bitAsLE(void *src, uint32_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - p[0] = static_cast(x); - p[1] = static_cast(x >> 8); - p[2] = static_cast(x >> 16); - p[3] = static_cast(x >> 24); -#endif -} -/** - set 64bit integer as little endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set64bitAsLE(void *src, uint64_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - Set32bitAsLE(p, static_cast(x)); - Set32bitAsLE(p + 4, static_cast(x >> 32)); -#endif -} -/** - set 16bit integer as big endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set16bitAsBE(void *src, uint16_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - x = byteSwap(x); - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - p[0] = static_cast(x >> 8); - p[1] = static_cast(x); -#endif -} -/** - set 32bit integer as big endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set32bitAsBE(void *src, uint32_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - x = byteSwap(x); - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - p[0] = static_cast(x >> 24); - p[1] = static_cast(x >> 16); - p[2] = static_cast(x >> 8); - p[3] = static_cast(x); -#endif -} -/** - set 64bit integer as big endian - @param src [out] pointer - @param x [in] integer -*/ -inline void Set64bitAsBE(void *src, uint64_t x) -{ -#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE - x = byteSwap(x); - memcpy(src, &x, sizeof(x)); -#else - uint8_t *p = static_cast(src); - Set32bitAsBE(p, static_cast(x >> 32)); - Set32bitAsBE(p + 4, static_cast(x)); -#endif -} - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp deleted file mode 100644 index 247ba4de0..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp +++ /dev/null @@ -1,252 +0,0 @@ -#pragma once -/** - @file - @brief definition of abstruct exception class - @author MITSUNARI Shigeo(@herumi) -*/ -#ifdef CYBOZU_MINIMUM_EXCEPTION - -#include - -namespace cybozu { - -namespace exception { -inline const char *makeString(const char *, size_t) -{ - return ""; -} - -} // cybozu::exception - -class Exception { -public: - explicit Exception(const char* = 0, bool = true) - { - } - ~Exception() CYBOZU_NOEXCEPT {} - const char *what() const CYBOZU_NOEXCEPT { return "cybozu:Exception"; } - template - Exception& operator<<(const T&) - { - return *this; - } -}; - -} // cybozu - -#else - -#include -#include -#include -#include -#include -#ifdef _WIN32 - #include - #include -#else - #include // for strerror_r -#endif -#include -#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE - #include -#endif - -namespace cybozu { - -const bool DontThrow = true; - -namespace exception { - -/* get max 16 characters to avoid buffer overrun */ -inline std::string makeString(const char *str, size_t size) -{ - return std::string(str, std::min(size, 16)); -} - -#ifdef _WIN32 -inline std::string wstr2str(const std::wstring& wstr) -{ - std::string str; - for (size_t i = 0; i < wstr.size(); i++) { - uint16_t c = wstr[i]; - if (c < 0x80) { - str += char(c); - } else { - char buf[16]; - CYBOZU_SNPRINTF(buf, sizeof(buf), "\\u%04x", c); - str += buf; - } - } - return str; -} -#endif - -} // cybozu::exception - -/** - convert errno to string - @param err [in] errno - @note for both windows and linux -*/ -inline std::string ConvertErrorNoToString(int err) -{ - char errBuf[256]; - std::string ret; -#ifdef _WIN32 - if (strerror_s(errBuf, sizeof(errBuf), err) == 0) { - ret = errBuf; - } else { - ret = "err"; - } -#elif defined(_GNU_SOURCE) - ret = ::strerror_r(err, errBuf, sizeof(errBuf)); -#else - if (strerror_r(err, errBuf, sizeof(errBuf)) == 0) { - ret = errBuf; - } else { - ret = "err"; - } -#endif - char buf2[64]; - CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%d)", err); - ret += buf2; - return ret; -} - -class Exception : public std::exception { - mutable std::string str_; -#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE - mutable std::string stackTrace_; -#endif -public: - explicit Exception(const std::string& name = "", bool enableStackTrace = true) - : str_(name) - { -#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE - if (enableStackTrace) stackTrace_ = cybozu::StackTrace().toString(); -#else - cybozu::disable_warning_unused_variable(enableStackTrace); -#endif - } - ~Exception() CYBOZU_NOEXCEPT {} - const char *what() const CYBOZU_NOEXCEPT { return toString().c_str(); } - const std::string& toString() const CYBOZU_NOEXCEPT - { -#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE - try { - if (!stackTrace_.empty()) { -#ifdef CYBOZU_STACKTRACE_ONELINE - str_ += "\n<<>> "; - str_ += stackTrace_; -#else - str_ += "\n<< - Exception& operator<<(const T& x) - { - std::ostringstream os; - os << x; - return operator<<(os.str()); - } -}; - -class ErrorNo { -public: -#ifdef _WIN32 - typedef unsigned int NativeErrorNo; -#else - typedef int NativeErrorNo; -#endif - explicit ErrorNo(NativeErrorNo err) - : err_(err) - { - } - ErrorNo() - : err_(getLatestNativeErrorNo()) - { - } - NativeErrorNo getLatestNativeErrorNo() const - { -#ifdef _WIN32 - return ::GetLastError(); -#else - return errno; -#endif - } - /** - convert NativeErrNo to string(maybe UTF8) - @param err [in] errno - @note Linux : same as ConvertErrorNoToString - Windows : for Win32 API(use en-us) - */ - std::string toString() const - { -#ifdef _WIN32 - const int msgSize = 256; - wchar_t msg[msgSize]; - int size = FormatMessageW( - FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - 0, - err_, - MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), - msg, - msgSize, - NULL - ); - if (size <= 0) return ""; - // remove last "\r\n" - if (size > 2 && msg[size - 2] == '\r') { - msg[size - 2] = 0; - size -= 2; - } - std::string ret; - ret.resize(size); - // assume ascii only - for (int i = 0; i < size; i++) { - ret[i] = (char)msg[i]; - } - char buf2[64]; - CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%u)", err_); - ret += buf2; - return ret; -#else - return ConvertErrorNoToString(err_); -#endif - } -private: - NativeErrorNo err_; -}; - -inline std::ostream& operator<<(std::ostream& os, const cybozu::ErrorNo& self) -{ - return os << self.toString(); -} - -} // cybozu -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp deleted file mode 100644 index 3fd246fa1..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp +++ /dev/null @@ -1,67 +0,0 @@ -#pragma once -#include - -namespace cybozu { - -template -uint32_t hash32(Iter begin, Iter end, uint32_t v = 0) -{ - if (v == 0) v = 2166136261U; - while (begin != end) { - v ^= *begin++; - v *= 16777619; - } - return v; -} -template -uint64_t hash64(Iter begin, Iter end, uint64_t v = 0) -{ - if (v == 0) v = 14695981039346656037ULL; - while (begin != end) { - v ^= *begin++; - v *= 1099511628211ULL; - } - v ^= v >> 32; - return v; -} -template -uint32_t hash32(const T *x, size_t n, uint32_t v = 0) -{ - return hash32(x, x + n, v); -} -template -uint64_t hash64(const T *x, size_t n, uint64_t v = 0) -{ - return hash64(x, x + n, v); -} - -} // cybozu - -namespace boost { - -template -struct hash; - -} // boost - -#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 -#include -#else - -namespace std { CYBOZU_NAMESPACE_TR1_BEGIN - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4099) // missmatch class and struct -#endif -#ifndef __APPLE__ -template -struct hash; -#endif -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -CYBOZU_NAMESPACE_TR1_END } // std - -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp deleted file mode 100644 index 62856bdb3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp +++ /dev/null @@ -1,163 +0,0 @@ -#pragma once -/** - @file - @brief int type definition and macros - @author MITSUNARI Shigeo(@herumi) -*/ - -#if defined(_MSC_VER) && (MSC_VER <= 1500) && !defined(CYBOZU_DEFINED_INTXX) - #define CYBOZU_DEFINED_INTXX - typedef __int64 int64_t; - typedef unsigned __int64 uint64_t; - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef signed char int8_t; -#else - #include -#endif - -#ifdef _MSC_VER - #ifndef CYBOZU_DEFINED_SSIZE_T - #define CYBOZU_DEFINED_SSIZE_T - #ifdef _WIN64 - typedef int64_t ssize_t; - #else - typedef int32_t ssize_t; - #endif - #endif -#else - #include // for ssize_t -#endif - -#ifndef CYBOZU_ALIGN - #ifdef _MSC_VER - #define CYBOZU_ALIGN(x) __declspec(align(x)) - #else - #define CYBOZU_ALIGN(x) __attribute__((aligned(x))) - #endif -#endif -#ifndef CYBOZU_FORCE_INLINE - #ifdef _MSC_VER - #define CYBOZU_FORCE_INLINE __forceinline - #else - #define CYBOZU_FORCE_INLINE __attribute__((always_inline)) - #endif -#endif -#ifndef CYBOZU_UNUSED - #ifdef __GNUC__ - #define CYBOZU_UNUSED __attribute__((unused)) - #else - #define CYBOZU_UNUSED - #endif -#endif -#ifndef CYBOZU_ALLOCA - #ifdef _MSC_VER - #include - #define CYBOZU_ALLOCA(x) _malloca(x) - #else - #define CYBOZU_ALLOCA(x) __builtin_alloca(x) - #endif -#endif -#ifndef CYBOZU_NUM_OF_ARRAY - #define CYBOZU_NUM_OF_ARRAY(x) (sizeof(x) / sizeof(*x)) -#endif -#ifndef CYBOZU_SNPRINTF - #if defined(_MSC_VER) && (_MSC_VER < 1900) - #define CYBOZU_SNPRINTF(x, len, ...) (void)_snprintf_s(x, len, len - 1, __VA_ARGS__) - #else - #define CYBOZU_SNPRINTF(x, len, ...) (void)snprintf(x, len, __VA_ARGS__) - #endif -#endif - -#define CYBOZU_CPP_VERSION_CPP03 0 -#define CYBOZU_CPP_VERSION_TR1 1 -#define CYBOZU_CPP_VERSION_CPP11 2 -#define CYBOZU_CPP_VERSION_CPP14 3 -#define CYBOZU_CPP_VERSION_CPP17 4 - -#ifdef __GNUC__ - #define CYBOZU_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor)) -#else - #define CYBOZU_GNUC_PREREQ(major, minor) 0 -#endif - -#if (__cplusplus >= 201703) - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP17 -#elif (__cplusplus >= 201402) - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP14 -#elif (__cplusplus >= 201103) || (_MSC_VER >= 1500) || defined(__GXX_EXPERIMENTAL_CXX0X__) - #if defined(_MSC_VER) && (_MSC_VER <= 1600) - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 - #else - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP11 - #endif -#elif CYBOZU_GNUC_PREREQ(4, 5) || (CYBOZU_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || (__clang_major__ >= 3) - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 -#else - #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP03 -#endif - -#ifdef CYBOZU_USE_BOOST - #define CYBOZU_NAMESPACE_STD boost - #define CYBOZU_NAMESPACE_TR1_BEGIN - #define CYBOZU_NAMESPACE_TR1_END -#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) && !defined(__APPLE__) - #define CYBOZU_NAMESPACE_STD std::tr1 - #define CYBOZU_NAMESPACE_TR1_BEGIN namespace tr1 { - #define CYBOZU_NAMESPACE_TR1_END } -#else - #define CYBOZU_NAMESPACE_STD std - #define CYBOZU_NAMESPACE_TR1_BEGIN - #define CYBOZU_NAMESPACE_TR1_END -#endif - -#ifndef CYBOZU_OS_BIT - #if defined(_WIN64) || defined(__x86_64__) || defined(__AARCH64EL__) || defined(__EMSCRIPTEN__) - #define CYBOZU_OS_BIT 64 - #else - #define CYBOZU_OS_BIT 32 - #endif -#endif - -#ifndef CYBOZU_HOST - #define CYBOZU_HOST_UNKNOWN 0 - #define CYBOZU_HOST_INTEL 1 - #define CYBOZU_HOST_ARM 2 - #if defined(_M_IX86) || defined(_M_AMD64) || defined(__x86_64__) || defined(__i386__) - #define CYBOZU_HOST CYBOZU_HOST_INTEL - #elif defined(__arm__) || defined(__AARCH64EL__) - #define CYBOZU_HOST CYBOZU_HOST_ARM - #else - #define CYBOZU_HOST CYBOZU_HOST_UNKNOWN - #endif -#endif - -#ifndef CYBOZU_ENDIAN - #define CYBOZU_ENDIAN_UNKNOWN 0 - #define CYBOZU_ENDIAN_LITTLE 1 - #define CYBOZU_ENDIAN_BIG 2 - #if (CYBOZU_HOST == CYBOZU_HOST_INTEL) - #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE - #elif (CYBOZU_HOST == CYBOZU_HOST_ARM) && (defined(__ARM_EABI__) || defined(__AARCH64EL__)) - #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE - #else - #define CYBOZU_ENDIAN CYBOZU_ENDIAN_UNKNOWN - #endif -#endif - -#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 - #define CYBOZU_NOEXCEPT noexcept -#else - #define CYBOZU_NOEXCEPT throw() -#endif -namespace cybozu { -template -void disable_warning_unused_variable(const T&) { } -template -T cast(const S* ptr) { return static_cast(static_cast(ptr)); } -template -T cast(S* ptr) { return static_cast(static_cast(ptr)); } -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp deleted file mode 100644 index 072e5b8b4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp +++ /dev/null @@ -1,337 +0,0 @@ -#pragma once -/** - @file - @brief convert integer to string(ascii) - - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#ifndef CYBOZU_DONT_USE_STRING -#include -#endif -#include -#include -#include - -namespace cybozu { - -template -size_t getHexLength(T x) -{ - return x == 0 ? 1 : cybozu::bsr(x) / 4 + 1; -} - -template -size_t getBinLength(T x) -{ - return x == 0 ? 1 : cybozu::bsr(x) + 1; -} -/* - convert x to hex string with len - @note out should have getHexLength(x) size - out is not NUL terminated -*/ -template -void itohex(char *out, size_t len, T x, bool upCase = true) -{ - static const char *hexTbl[] = { - "0123456789abcdef", - "0123456789ABCDEF" - }; - const char *tbl = hexTbl[upCase]; - for (size_t i = 0; i < len; i++) { - out[len - i - 1] = tbl[x % 16]; - x /= 16; - } -} -/* - convert x to bin string with len - @note out should have getBinLength(x) size - out is not NUL terminated -*/ -template -void itobin(char *out, size_t len, T x) -{ - for (size_t i = 0; i < len; i++) { - out[len - i - 1] = '0' + (x & 1); - x >>= 1; - } -} - -namespace itoa_local { - -/* - convert x to dec - use buf[0, bufSize) - return 0 if false - return writtenSize which is not terminated - @REMARK the top of string is buf + bufSize - writtenSize -*/ -template -size_t uintToDec(char *buf, size_t bufSize, UT x) -{ - for (size_t i = 0; i < bufSize; i++) { - buf[bufSize - 1 - i] = '0' + static_cast(x % 10); - x /= 10; - if (x == 0) return i + 1; - } - return 0; -} - -/* - convert x to hex - use buf[0, bufSize) - return 0 if false - return writtenSize which is not terminated - @REMARK the top of string is buf + bufSize - writtenSize -*/ -template -size_t uintToHex(char *buf, size_t bufSize, UT x, bool upCase = true) -{ - static const char *hexTbl[] = { - "0123456789abcdef", - "0123456789ABCDEF" - }; - const char *tbl = hexTbl[upCase]; - for (size_t i = 0; i < bufSize; i++) { - buf[bufSize - 1 - i] = tbl[x % 16]; - x /= 16; - if (x == 0) return i + 1; - } - return 0; -} - -/* - convert x to bin - use buf[0, bufSize) - return 0 if false - return writtenSize which is not terminated - @REMARK the top of string is buf + bufSize - writtenSize -*/ -template -size_t uintToBin(char *buf, size_t bufSize, UT x) -{ - for (size_t i = 0; i < bufSize; i++) { - buf[bufSize - 1 - i] = '0' + (x & 1); - x >>= 1; - if (x == 0) return i + 1; - } - return 0; -} - -template -size_t intToDec(char *buf, size_t bufSize, T x) -{ - if (x == LLONG_MIN) { - const char minStr[] = "-9223372036854775808"; - const size_t minStrLen = sizeof(minStr) - 1; - if (bufSize < minStrLen) { - return 0; - } else { - memcpy(buf + bufSize - minStrLen, minStr, minStrLen); - return minStrLen; - } - } - bool negative = x < 0; - uint64_t absX = negative ? -x : x; - size_t n = uintToDec(buf, bufSize, absX); - if (n == 0) return 0; - if (negative) { - if (bufSize == n) return 0; - n++; - buf[bufSize - n] = '-'; - } - return n; -} - -#ifndef CYBOZU_DONT_USE_STRING -template -void convertFromUint(std::string& out, T x) -{ - char buf[40]; - size_t n = uintToDec(buf, sizeof(buf), x); - assert(n > 0); - out.assign(buf + sizeof(buf) - n, n); -} - -inline void convertFromInt(std::string& out, long long x) -{ - char buf[40]; - size_t n = intToDec(buf, sizeof(buf), x); - assert(n > 0); - out.assign(buf + sizeof(buf) - n, n); -} - -template -void itohexLocal(std::string& out, T x, bool upCase, bool withZero) -{ - const size_t size = withZero ? sizeof(T) * 2 : getHexLength(x); - out.resize(size); - itohex(&out[0], size, x, upCase); -} - -template -void itobinLocal(std::string& out, T x, bool withZero) -{ - const size_t size = withZero ? sizeof(T) * 8 : getBinLength(x); - out.resize(size); - itobin(&out[0], size, x); -} -#endif - -} // itoa_local - -#ifndef CYBOZU_DONT_USE_STRING -/** - convert int to string - @param out [out] string - @param x [in] int -*/ -inline void itoa(std::string& out, int x) -{ - itoa_local::convertFromInt(out, x); -} - -/** - convert long long to string - @param out [out] string - @param x [in] long long -*/ -inline void itoa(std::string& out, long long x) -{ - itoa_local::convertFromInt(out, x); -} - -/** - convert unsigned int to string - @param out [out] string - @param x [in] unsigned int -*/ -inline void itoa(std::string& out, unsigned int x) -{ - itoa_local::convertFromUint(out, x); -} - -/** - convert unsigned long long to string - @param out [out] string - @param x [in] unsigned long long -*/ -inline void itoa(std::string& out, unsigned long long x) -{ - itoa_local::convertFromUint(out, x); -} - -#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) -inline void itoa(std::string& out, long x) { itoa(out, static_cast(x)); } -inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast(x)); } -#else -inline void itoa(std::string& out, long x) { itoa(out, static_cast(x)); } -inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast(x)); } -#endif -/** - convert integer to string - @param x [in] int -*/ -template -inline std::string itoa(T x) -{ - std::string ret; - itoa(ret, x); - return ret; -} - -inline void itohex(std::string& out, unsigned char x, bool upCase = true, bool withZero = true) -{ - itoa_local::itohexLocal(out, x, upCase, withZero); -} - -inline void itohex(std::string& out, unsigned short x, bool upCase = true, bool withZero = true) -{ - itoa_local::itohexLocal(out, x, upCase, withZero); -} - -inline void itohex(std::string& out, unsigned int x, bool upCase = true, bool withZero = true) -{ - itoa_local::itohexLocal(out, x, upCase, withZero); -} - -inline void itohex(std::string& out, unsigned long x, bool upCase = true, bool withZero = true) -{ - itoa_local::itohexLocal(out, x, upCase, withZero); -} - -inline void itohex(std::string& out, unsigned long long x, bool upCase = true, bool withZero = true) -{ - itoa_local::itohexLocal(out, x, upCase, withZero); -} - -template -inline std::string itobin(T x, bool withZero = true) -{ - std::string out; - itoa_local::itobinLocal(out, x, withZero); - return out; -} - -inline void itobin(std::string& out, unsigned char x, bool withZero = true) -{ - itoa_local::itobinLocal(out, x, withZero); -} - -inline void itobin(std::string& out, unsigned short x, bool withZero = true) -{ - itoa_local::itobinLocal(out, x, withZero); -} - -inline void itobin(std::string& out, unsigned int x, bool withZero = true) -{ - itoa_local::itobinLocal(out, x, withZero); -} - -inline void itobin(std::string& out, unsigned long x, bool withZero = true) -{ - itoa_local::itobinLocal(out, x, withZero); -} - -inline void itobin(std::string& out, unsigned long long x, bool withZero = true) -{ - itoa_local::itobinLocal(out, x, withZero); -} - -template -inline std::string itohex(T x, bool upCase = true, bool withZero = true) -{ - std::string out; - itohex(out, x, upCase, withZero); - return out; -} -/** - convert integer to string with zero padding - @param x [in] int - @param len [in] minimum lengh of string - @param c [in] padding character - @note - itoa(12, 4) == "0012" - itoa(1234, 4) == "1234" - itoa(12345, 4) == "12345" - itoa(-12, 4) == "-012" -*/ -template -inline std::string itoaWithZero(T x, size_t len, char c = '0') -{ - std::string ret; - itoa(ret, x); - if (ret.size() < len) { - std::string zero(len - ret.size(), c); - if (x >= 0) { - ret = zero + ret; - } else { - ret = "-" + zero + ret.substr(1); - } - } - return ret; -} -#endif - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp deleted file mode 100644 index d83f1b6ea..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once -/** - @file - @brief link libeay32.lib of openssl - @author MITSUNARI Shigeo(@herumi) -*/ -#if defined(_WIN32) && defined(_MT) - #if _MSC_VER >= 1900 // VC2015 - #ifdef _WIN64 - #pragma comment(lib, "mt/14/libeay32.lib") - #else - #pragma comment(lib, "mt/14/32/libeay32.lib") - #endif -// #elif _MSC_VER == 1800 // VC2013 - #else - #pragma comment(lib, "mt/12/libeay32.lib") - #endif - #pragma comment(lib, "advapi32.lib") - #pragma comment(lib, "gdi32.lib") - #pragma comment(lib, "user32.lib") -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp deleted file mode 100644 index d20d7b1a9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once -/** - @file - @brief link mpir/mpirxx of mpir - @author MITSUNARI Shigeo(@herumi) -*/ -#if defined(_WIN32) && defined(_MT) - #if _MSC_VER >= 1900 // VC2015, VC2017(1910) - #pragma comment(lib, "mt/14/mpir.lib") - #pragma comment(lib, "mt/14/mpirxx.lib") - #elif _MSC_VER == 1800 // VC2013 - #pragma comment(lib, "mt/12/mpir.lib") - #pragma comment(lib, "mt/12/mpirxx.lib") - #elif _MSC_VER == 1700 // VC2012 - #pragma comment(lib, "mt/11/mpir.lib") - #pragma comment(lib, "mt/11/mpirxx.lib") - #endif -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp deleted file mode 100644 index 60c2361ae..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once -/** - @file - @brief link ssleay32.lib of openssl - @author MITSUNARI Shigeo(@herumi) -*/ -#if defined(_WIN32) && defined(_MT) - #if _MSC_VER >= 1900 // VC2015 - #ifdef _WIN64 - #pragma comment(lib, "mt/14/ssleay32.lib") - #else - #pragma comment(lib, "mt/14/32/ssleay32.lib") - #endif -// #elif _MSC_VER == 1800 // VC2013 - #else - #pragma comment(lib, "mt/12/ssleay32.lib") - #endif - #pragma comment(lib, "user32.lib") -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp deleted file mode 100644 index acde6bcbf..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp +++ /dev/null @@ -1,141 +0,0 @@ -#pragma once -/** - @file - @brief mutex - - @author MITSUNARI Shigeo(@herumi) - @author MITSUNARI Shigeo -*/ - -#ifdef _WIN32 - #include -#else - #include - #include -#endif -#include -#include - -namespace cybozu { - -class ConditionVariable; - -namespace thread { - -#ifdef _WIN32 - typedef HANDLE MutexHandle; - inline void MutexInit(MutexHandle& mutex) - { -// mutex = CreateSemaphore(NULL /* no security */, 1 /* init */, 0x7FFFFFFF /* max */, NULL /* no name */); - mutex = CreateMutex(NULL /* no security */, FALSE /* no owner */, NULL /* no name */); - } - inline void MutexLock(MutexHandle& mutex) { WaitForSingleObject(mutex, INFINITE); } - /* - return false if timeout - @param msec [in] msec - */ - inline bool MutexLockTimeout(MutexHandle& mutex, int msec) - { - DWORD ret = WaitForSingleObject(mutex, msec); - if (ret == WAIT_OBJECT_0) { - return true; - } - if (ret == WAIT_TIMEOUT) { - return false; - } - /* ret == WAIT_ABANDONED */ - assert(0); - return false; - } - inline void MutexUnlock(MutexHandle& mutex) - { -// ReleaseSemaphore(mutex, 1, NULL); - ReleaseMutex(mutex); - } - inline void MutexTerm(MutexHandle& mutex) { CloseHandle(mutex); } -#else - typedef pthread_mutex_t MutexHandle; - inline void MutexInit(MutexHandle& mutex) - { -#if 1 - pthread_mutex_init(&mutex, NULL); -#else - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); - if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_TIMED_NP)) { - perror("pthread_mutexattr_settype"); - exit(1); - } - pthread_mutex_init(&mutex, &attr); - pthread_mutexattr_destroy(&attr); -#endif - } - inline void MutexLock(MutexHandle& mutex) { pthread_mutex_lock(&mutex); } -#if 0 - inline bool MutexLockTimeout(MutexHandle& mutex, int msec) - { - timespec absTime; - clock_gettime(CLOCK_REALTIME, &absTime); - absTime.tv_sec += msec / 1000; - absTime.tv_nsec += msec % 1000; - bool ret = pthread_mutex_timedlock(&mutex, &absTime) == 0; - return ret; - } -#endif - inline void MutexUnlock(MutexHandle& mutex) { pthread_mutex_unlock(&mutex); } - inline void MutexTerm(MutexHandle& mutex) { pthread_mutex_destroy(&mutex); } -#endif - -template -class AutoLockT { -public: - explicit AutoLockT(T &t) - : t_(t) - { - t_.lock(); - } - ~AutoLockT() - { - t_.unlock(); - } -private: - T& t_; - AutoLockT& operator=(const AutoLockT&); -}; - -} // cybozu::thread - -class Mutex { - friend class cybozu::ConditionVariable; -public: - Mutex() - { - thread::MutexInit(hdl_); - } - ~Mutex() - { - thread::MutexTerm(hdl_); - } - void lock() - { - thread::MutexLock(hdl_); - } -#if 0 - bool lockTimeout(int msec) - { - return thread::MutexLockTimeout(hdl_, msec); - } -#endif - void unlock() - { - thread::MutexUnlock(hdl_); - } -private: - Mutex(const Mutex&); - Mutex& operator=(const Mutex&); - thread::MutexHandle hdl_; -}; - -typedef cybozu::thread::AutoLockT AutoLock; - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp deleted file mode 100644 index a5dfd137d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp +++ /dev/null @@ -1,723 +0,0 @@ -#pragma once -/** - @file - @brief command line parser - - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - Option parser - - progName (opt1-name|opt2-name|...) param1 param2 ... - param1:param1-help - param2:param2-help - -op1-name:opt1-help - ... - - How to setup - int num; - -n num ; (optional) option => appendOpt(&x, , "num", "num-help"); - -n num ; must option => appendMust(&x, "num", "num-help"); - - std::vector v; - -v s1 s2 s3 ... => appendVec(&v, "v"); - - Remark1: terminate parsing of v if argv begins with '-[^0-9]' - Remark2: the begining character of opt-name is not a number ('0'...'9') - because avoid conflict with minus number - - std::string file1; - file1 is param => appendParam(&file1, "input-file"); - file2 is optional param => appendParamOpt(&file2, "output-file"); - - How to use - opt.parse(argc, argv); - - see sample/option_smpl.cpp -*/ - -namespace cybozu { - -struct OptionError : public cybozu::Exception { - enum Type { - NoError = 0, - BAD_OPT = 1, - BAD_VALUE, - NO_VALUE, - OPT_IS_NECESSARY, - PARAM_IS_NECESSARY, - REDUNDANT_VAL, - BAD_ARGC - }; - Type type; - int argPos; - OptionError() - : cybozu::Exception("OptionError", false) - , type(NoError) - , argPos(0) - { - } - cybozu::Exception& set(Type _type, int _argPos = 0) - { - this->type = _type; - this->argPos = _argPos; - switch (_type) { - case BAD_OPT: - (*this) << "bad opt"; - break; - case BAD_VALUE: - (*this) << "bad value"; - break; - case NO_VALUE: - (*this) << "no value"; - break; - case OPT_IS_NECESSARY: - (*this) << "opt is necessary"; - break; - case PARAM_IS_NECESSARY: - (*this) << "param is necessary"; - break; - case REDUNDANT_VAL: - (*this) << "redundant argVal"; - break; - case BAD_ARGC: - (*this) << "bad argc"; - default: - break; - } - return *this; - } -}; - -namespace option_local { - -template -bool convert(T* x, const char *str) -{ - std::istringstream is(str); - is >> *x; - return !!is; -} - -template<> -inline bool convert(std::string* x, const char *str) -{ - *x = str; - return true; -} - -template -bool convertInt(T* x, const char *str) -{ - if (str[0] == '0' && str[1] == 'x') { - bool b; - *x = cybozu::hextoi(&b, str + 2); - return b; - } - size_t len = strlen(str); - int factor = 1; - if (len > 1) { - switch (str[len - 1]) { - case 'k': factor = 1000; len--; break; - case 'm': factor = 1000 * 1000; len--; break; - case 'g': factor = 1000 * 1000 * 1000; len--; break; - case 'K': factor = 1024; len--; break; - case 'M': factor = 1024 * 1024; len--; break; - case 'G': factor = 1024 * 1024 * 1024; len--; break; - default: break; - } - } - bool b; - T y = cybozu::atoi(&b, str, len); - if (!b) return false; - if (factor > 1) { - if ((std::numeric_limits::min)() / factor <= y - && y <= (std::numeric_limits::max)() / factor) { - *x = y * factor; - } else { - return false; - } - } else { - *x = y; - } - return true; -} - -#define CYBOZU_OPTION_DEFINE_CONVERT_INT(type) \ -template<>inline bool convert(type* x, const char *str) { return convertInt(x, str); } - -CYBOZU_OPTION_DEFINE_CONVERT_INT(int) -CYBOZU_OPTION_DEFINE_CONVERT_INT(long) -CYBOZU_OPTION_DEFINE_CONVERT_INT(long long) - -CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned int) -CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long) -CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long long) - -#undef CYBOZU_OPTION_DEFINE_CONVERT_INT - -struct HolderBase { - virtual ~HolderBase(){} - virtual bool set(const char*) = 0; - virtual HolderBase *clone() const = 0; - virtual std::string toStr() const = 0; - virtual const void *get() const = 0; -}; - -template -struct Holder : public HolderBase { - T *p_; - Holder(T *p) : p_(p) {} - HolderBase *clone() const { return new Holder(p_); } - bool set(const char *str) { return option_local::convert(p_, str); } - std::string toStr() const - { - std::ostringstream os; - os << *p_; - return os.str(); - } - const void *get() const { return (void*)p_; } -}; - -/* - for gcc 7 with -fnew-ttp-matching - this specialization is not necessary under -fno-new-ttp-matching -*/ -template struct Holder; - -templateclass Container> -struct Holder > : public HolderBase { - typedef Container Vec; - Vec *p_; - Holder(Vec *p) : p_(p) {} - HolderBase *clone() const { return new Holder(p_); } - bool set(const char *str) - { - T t; - bool b = option_local::convert(&t, str); - if (b) p_->push_back(t); - return b; - } - std::string toStr() const - { - std::ostringstream os; - bool isFirst = true; - for (typename Vec::const_iterator i = p_->begin(), ie = p_->end(); i != ie; ++i) { - if (isFirst) { - isFirst = false; - } else { - os << ' '; - } - os << *i; - } - return os.str(); - } - const void *get() const { return (void*)p_; } -}; - -class Var { - HolderBase *p_; - bool isSet_; -public: - Var() : p_(0), isSet_(false) { } - Var(const Var& rhs) : p_(rhs.p_->clone()), isSet_(false) { } - template - explicit Var(T *x) : p_(new Holder(x)), isSet_(false) { } - - ~Var() { delete p_; } - - void swap(Var& rhs) CYBOZU_NOEXCEPT - { - std::swap(p_, rhs.p_); - std::swap(isSet_, rhs.isSet_); - } - void operator=(const Var& rhs) - { - Var v(rhs); - swap(v); - } - bool set(const char *str) - { - isSet_ = true; - return p_->set(str); - } - std::string toStr() const { return p_ ? p_->toStr() : ""; } - bool isSet() const { return isSet_; } - const void *get() const { return p_ ? p_->get() : 0; } -}; - -} // option_local - -class Option { - enum Mode { // for opt - N_is0 = 0, // for bool by appendBoolOpt() - N_is1 = 1, - N_any = 2 - }; - enum ParamMode { - P_exact = 0, // one - P_optional = 1, // zero or one - P_variable = 2 // zero or greater - }; - struct Info { - option_local::Var var; - Mode mode; // 0 or 1 or any ; for opt, not used for Param - bool isMust; // this option is must - std::string opt; // option param name without '-' - std::string help; // description of option - - Info() : mode(N_is0), isMust(false) {} - template - Info(T* pvar, Mode mode, bool isMust, const char *opt, const std::string& help) - : var(pvar) - , mode(mode) - , isMust(isMust) - , opt(opt) - , help(help) - { - } - friend inline std::ostream& operator<<(std::ostream& os, const Info& self) - { - os << self.opt << '=' << self.var.toStr(); - if (self.var.isSet()) { - os << " (set)"; - } else { - os << " (default)"; - } - return os; - } - void put() const - { - std::cout << *this; - } - void usage() const - { - printf(" -%s %s%s\n", opt.c_str(), help.c_str(), isMust ? " (must)" : ""); - } - void shortUsage() const - { - printf(" -%s %s", opt.c_str(), mode == N_is0 ? "" : mode == N_is1 ? "para" : "para..."); - } - bool isSet() const { return var.isSet(); } - const void *get() const { return var.get(); } - }; - typedef std::vector InfoVec; - typedef std::vector StrVec; - typedef std::map OptMap; - InfoVec infoVec_; - InfoVec paramVec_; - Info remains_; - OptMap optMap_; - bool showOptUsage_; - ParamMode paramMode_; - std::string progName_; - std::string desc_; - std::string helpOpt_; - std::string help_; - std::string usage_; - StrVec delimiters_; - StrVec *remainsAfterDelimiter_; - int nextDelimiter_; - template - void appendSub(T *pvar, Mode mode, bool isMust, const char *opt, const std::string& help) - { - const char c = opt[0]; - if ('0' <= c && c <= '9') throw cybozu::Exception("Option::appendSub:opt must begin with not number") << opt; - if (optMap_.find(opt) != optMap_.end()) { - throw cybozu::Exception("Option::append:duplicate option") << opt; - } - optMap_[opt] = infoVec_.size(); - infoVec_.push_back(Info(pvar, mode, isMust, opt, help)); - } - - template - void append(T *pvar, const U& defaultVal, bool isMust, const char *opt, const std::string& help = "") - { - *pvar = defaultVal; - appendSub(pvar, N_is1, isMust, opt, help); - } - /* - don't deal with negative number as option - */ - bool isOpt(const char *str) const - { - if (str[0] != '-') return false; - const char c = str[1]; - if ('0' <= c && c <= '9') return false; - return true; - } - void verifyParamMode() - { - if (paramMode_ != P_exact) throw cybozu::Exception("Option:appendParamVec:appendParam is forbidden after appendParamOpt/appendParamVec"); - } - std::string getBaseName(const std::string& name) const - { - size_t pos = name.find_last_of("/\\"); - if (pos == std::string::npos) return name; - return name.substr(pos + 1); - } - bool inDelimiters(const std::string& str) const - { - return std::find(delimiters_.begin(), delimiters_.end(), str) != delimiters_.end(); - } -public: - Option() - : showOptUsage_(true) - , paramMode_(P_exact) - , remainsAfterDelimiter_(0) - , nextDelimiter_(-1) - { - } - virtual ~Option() {} - /* - append optional option with default value - @param pvar [in] pointer to option variable - @param defaultVal [in] default value - @param opt [in] option name - @param help [in] option help - @note you can use 123k, 56M if T is int/long/long long - k : *1000 - m : *1000000 - g : *1000000000 - K : *1024 - M : *1024*1024 - G : *1024*1024*1024 - */ - template - void appendOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") - { - append(pvar, defaultVal, false, opt, help); - } - /* - default value of *pvar is false - */ - void appendBoolOpt(bool *pvar, const char *opt, const std::string& help = "") - { - *pvar = false; - appendSub(pvar, N_is0, false, opt, help); - } - /* - append necessary option - @param pvar [in] pointer to option variable - @param opt [in] option name - @param help [in] option help - */ - template - void appendMust(T *pvar, const char *opt, const std::string& help = "") - { - append(pvar, T(), true, opt, help); - } - /* - append vector option - @param pvar [in] pointer to option variable - @param opt [in] option name - @param help [in] option help - */ - templateclass Container> - void appendVec(Container *pvar, const char *opt, const std::string& help = "") - { - appendSub(pvar, N_any, false, opt, help); - } - /* - append parameter - @param pvar [in] pointer to parameter - @param opt [in] option name - @param help [in] option help - */ - template - void appendParam(T *pvar, const char *opt, const std::string& help = "") - { - verifyParamMode(); - paramVec_.push_back(Info(pvar, N_is1, true, opt, help)); - } - /* - append optional parameter - @param pvar [in] pointer to parameter - @param defaultVal [in] default value - @param opt [in] option name - @param help [in] option help - @note you can call appendParamOpt once after appendParam - */ - template - void appendParamOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") - { - verifyParamMode(); - *pvar = defaultVal; - paramMode_ = P_optional; - paramVec_.push_back(Info(pvar, N_is1, false, opt, help)); - } - /* - append remain parameter - @param pvar [in] pointer to vector of parameter - @param opt [in] option name - @param help [in] option help - @note you can call appendParamVec once after appendParam - */ - templateclass Container> - void appendParamVec(Container *pvar, const char *name, const std::string& help = "") - { - verifyParamMode(); - paramMode_ = P_variable; - remains_.var = option_local::Var(pvar); - remains_.mode = N_any; - remains_.isMust = false; - remains_.opt = name; - remains_.help = help; - } - void appendHelp(const char *opt, const std::string& help = ": show this message") - { - helpOpt_ = opt; - help_ = help; - } - /* - stop parsing after delimiter is found - @param delimiter [in] string to stop - @param remain [out] set remaining strings if remain - */ - void setDelimiter(const std::string& delimiter, std::vector *remain = 0) - { - delimiters_.push_back(delimiter); - remainsAfterDelimiter_ = remain; - } - /* - stop parsing after delimiter is found - @param delimiter [in] string to stop to append list of delimiters - */ - void appendDelimiter(const std::string& delimiter) - { - delimiters_.push_back(delimiter); - } - /* - clear list of delimiters - */ - void clearDelimiterList() { delimiters_.clear(); } - /* - return the next position of delimiter between [0, argc] - @note return argc if delimiter is not set nor found - */ - int getNextPositionOfDelimiter() const { return nextDelimiter_; } - /* - parse (argc, argv) - @param argc [in] argc of main - @param argv [in] argv of main - @param startPos [in] start position of argc - @param progName [in] used instead of argv[0] - */ - bool parse(int argc, const char *const argv[], int startPos = 1, const char *progName = 0) - { - if (argc < 1 || startPos > argc) return false; - progName_ = getBaseName(progName ? progName : argv[startPos - 1]); - nextDelimiter_ = argc; - OptionError err; - for (int pos = startPos; pos < argc; pos++) { - if (inDelimiters(argv[pos])) { - nextDelimiter_ = pos + 1; - if (remainsAfterDelimiter_) { - for (int i = nextDelimiter_; i < argc; i++) { - remainsAfterDelimiter_->push_back(argv[i]); - } - } - break; - } - if (isOpt(argv[pos])) { - const std::string str = argv[pos] + 1; - if (helpOpt_ == str) { - usage(); - exit(0); - } - OptMap::const_iterator i = optMap_.find(str); - if (i == optMap_.end()) { - err.set(OptionError::BAD_OPT, pos); - goto ERR; - } - - Info& info = infoVec_[i->second]; - switch (info.mode) { - case N_is0: - if (!info.var.set("1")) { - err.set(OptionError::BAD_VALUE, pos); - goto ERR; - } - break; - case N_is1: - pos++; - if (pos == argc) { - err.set(OptionError::BAD_VALUE, pos) << (std::string("no value for -") + info.opt); - goto ERR; - } - if (!info.var.set(argv[pos])) { - err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt); - goto ERR; - } - break; - case N_any: - default: - { - pos++; - int j = 0; - while (pos < argc && !isOpt(argv[pos])) { - if (!info.var.set(argv[pos])) { - err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt) << j; - goto ERR; - } - pos++; - j++; - } - if (j > 0) { - pos--; - } else { - err.set(OptionError::NO_VALUE, pos) << (std::string("for -") + info.opt); - goto ERR; - } - } - break; - } - } else { - bool used = false; - for (size_t i = 0; i < paramVec_.size(); i++) { - Info& param = paramVec_[i]; - if (!param.var.isSet()) { - if (!param.var.set(argv[pos])) { - err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for " + param.opt); - goto ERR; - } - used = true; - break; - } - } - if (!used) { - if (paramMode_ == P_variable) { - remains_.var.set(argv[pos]); - } else { - err.set(OptionError::REDUNDANT_VAL, pos) << argv[pos]; - goto ERR; - } - } - } - } - // check whether must-opt is set - for (size_t i = 0; i < infoVec_.size(); i++) { - const Info& info = infoVec_[i]; - if (info.isMust && !info.var.isSet()) { - err.set(OptionError::OPT_IS_NECESSARY) << info.opt; - goto ERR; - } - } - // check whether param is set - for (size_t i = 0; i < paramVec_.size(); i++) { - const Info& param = paramVec_[i]; - if (param.isMust && !param.var.isSet()) { - err.set(OptionError::PARAM_IS_NECESSARY) << param.opt; - goto ERR; - } - } - // check whether remains is set - if (paramMode_ == P_variable && remains_.isMust && !remains_.var.isSet()) { - err.set(OptionError::PARAM_IS_NECESSARY) << remains_.opt; - goto ERR; - } - return true; - ERR: - assert(err.type); - printf("%s\n", err.what()); - return false; - } - /* - show desc at first in usage() - */ - void setDescription(const std::string& desc) - { - desc_ = desc; - } - /* - show command line after desc - don't put option message if not showOptUsage - */ - void setUsage(const std::string& usage, bool showOptUsage = false) - { - usage_ = usage; - showOptUsage_ = showOptUsage; - } - void usage() const - { - if (!desc_.empty()) printf("%s\n", desc_.c_str()); - if (usage_.empty()) { - printf("usage:%s", progName_.c_str()); - if (!infoVec_.empty()) printf(" [opt]"); - for (size_t i = 0; i < infoVec_.size(); i++) { - if (infoVec_[i].isMust) infoVec_[i].shortUsage(); - } - for (size_t i = 0; i < paramVec_.size(); i++) { - printf(" %s", paramVec_[i].opt.c_str()); - } - if (paramMode_ == P_variable) { - printf(" %s", remains_.opt.c_str()); - } - printf("\n"); - } else { - printf("%s\n", usage_.c_str()); - if (!showOptUsage_) return; - } - for (size_t i = 0; i < paramVec_.size(); i++) { - const Info& param = paramVec_[i]; - if (!param.help.empty()) printf(" %s %s\n", paramVec_[i].opt.c_str(), paramVec_[i].help.c_str()); - } - if (!remains_.help.empty()) printf(" %s %s\n", remains_.opt.c_str(), remains_.help.c_str()); - if (!helpOpt_.empty()) { - printf(" -%s %s\n", helpOpt_.c_str(), help_.c_str()); - } - for (size_t i = 0; i < infoVec_.size(); i++) { - infoVec_[i].usage(); - } - } - friend inline std::ostream& operator<<(std::ostream& os, const Option& self) - { - for (size_t i = 0; i < self.paramVec_.size(); i++) { - const Info& param = self.paramVec_[i]; - os << param.opt << '=' << param.var.toStr() << std::endl; - } - if (self.paramMode_ == P_variable) { - os << "remains=" << self.remains_.var.toStr() << std::endl; - } - for (size_t i = 0; i < self.infoVec_.size(); i++) { - os << self.infoVec_[i] << std::endl; - } - return os; - } - void put() const - { - std::cout << *this; - } - /* - whether pvar is set or not - */ - template - bool isSet(const T* pvar) const - { - const void *p = static_cast(pvar); - for (size_t i = 0; i < paramVec_.size(); i++) { - const Info& v = paramVec_[i]; - if (v.get() == p) return v.isSet(); - } - if (remains_.get() == p) return remains_.isSet(); - for (size_t i = 0; i < infoVec_.size(); i++) { - const Info& v = infoVec_[i]; - if (v.get() == p) return v.isSet(); - } - throw cybozu::Exception("Option:isSet:no assigned var") << pvar; - } -}; - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp deleted file mode 100644 index ff4a78da5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp +++ /dev/null @@ -1,153 +0,0 @@ -#pragma once -/** - @file - @brief pseudrandom generator - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ - -#include -#ifdef _WIN32 -#include -#include -#include -#ifdef _MSC_VER -#pragma comment (lib, "advapi32.lib") -#endif -#include -#else -#include -#include -#endif - -namespace cybozu { - -class RandomGenerator { - RandomGenerator(const RandomGenerator&); - void operator=(const RandomGenerator&); -public: - uint32_t operator()() - { - return get32(); - } - uint32_t get32() - { - uint32_t ret; - read(&ret, 1); - return ret; - } - uint64_t get64() - { - uint64_t ret; - read(&ret, 1); - return ret; - } -#ifdef _WIN32 - RandomGenerator() - : prov_(0) - , pos_(bufSize) - { - DWORD flagTbl[] = { 0, CRYPT_NEWKEYSET }; - for (int i = 0; i < 2; i++) { - if (CryptAcquireContext(&prov_, NULL, NULL, PROV_RSA_FULL, flagTbl[i]) != 0) return; - } - throw cybozu::Exception("randomgenerator"); - } - bool read_inner(void *buf, size_t byteSize) - { - return CryptGenRandom(prov_, static_cast(byteSize), static_cast(buf)) != 0; - } - ~RandomGenerator() - { - if (prov_) { - CryptReleaseContext(prov_, 0); - } - } - /* - fill buf[0..bufNum-1] with random data - @note bufNum is not byte size - */ - template - void read(bool *pb, T *buf, size_t bufNum) - { - cybozu::AutoLockCs al(cs_); - const size_t byteSize = sizeof(T) * bufNum; - if (byteSize > bufSize) { - if (!read_inner(buf, byteSize)) { - *pb = false; - return; - } - } else { - if (pos_ + byteSize > bufSize) { - read_inner(buf_, bufSize); - pos_ = 0; - } - memcpy(buf, buf_ + pos_, byteSize); - pos_ += byteSize; - } - *pb = true; - } - template - void read(T *buf, size_t bufNum) - { - bool b; - read(&b, buf, bufNum); - if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum; - } -private: - HCRYPTPROV prov_; - static const size_t bufSize = 1024; - char buf_[bufSize]; - size_t pos_; - cybozu::CriticalSection cs_; -#else - RandomGenerator() - : fp_(::fopen("/dev/urandom", "rb")) - { - if (!fp_) throw cybozu::Exception("randomgenerator"); - } - ~RandomGenerator() - { - if (fp_) ::fclose(fp_); - } - /* - fill buf[0..bufNum-1] with random data - @note bufNum is not byte size - */ - template - void read(bool *pb, T *buf, size_t bufNum) - { - const size_t byteSize = sizeof(T) * bufNum; - *pb = ::fread(buf, 1, (int)byteSize, fp_) == byteSize; - } - template - void read(T *buf, size_t bufNum) - { - bool b; - read(&b, buf, bufNum); - if (!b) throw cybozu::Exception("RandomGenerator:read") << bufNum; - } -#endif -private: - FILE *fp_; -}; - -template -void shuffle(T* v, size_t n, RG& rg) -{ - if (n <= 1) return; - for (size_t i = 0; i < n - 1; i++) { - size_t r = i + size_t(rg.get64() % (n - i)); - using namespace std; - swap(v[i], v[r]); - } -} - -template -void shuffle(V& v, RG& rg) -{ - shuffle(v.data(), v.size(), rg); -} - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp deleted file mode 100644 index 1e23c8f42..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp +++ /dev/null @@ -1,363 +0,0 @@ -#pragma once -/** - @file - @brief serializer for vector, list, map and so on - - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#include - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4127) -#endif - -//#define CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER - -namespace cybozu { - -namespace serializer_local { - -template -union ci { - T i; - uint8_t c[sizeof(T)]; -}; - -template -struct HasMemFunc { }; - -template -void dispatch_reserve(T& t, size_t size, int, HasMemFunc* = 0) -{ - t.reserve(size); -} - -template -void dispatch_reserve(T&, size_t, int*) -{ -} - -template -void reserve_if_exists(T& t, size_t size) -{ - dispatch_reserve(t, size, 0); -} - -} // serializer_local - -template -void loadRange(T *p, size_t num, InputStream& is) -{ - cybozu::read(p, num * sizeof(T), is); -} - -template -void saveRange(OutputStream& os, const T *p, size_t num) -{ - cybozu::write(os, p, num * sizeof(T)); -} - -template -void loadPod(T& x, InputStream& is) -{ - serializer_local::ci ci; - loadRange(ci.c, sizeof(ci.c), is); - x = ci.i; -} - -template -void savePod(OutputStream& os, const T& x) -{ - serializer_local::ci ci; - ci.i = x; - saveRange(os, ci.c, sizeof(ci.c)); -} - -template -void load(T& x, InputStream& is) -{ - x.load(is); -} - -template -void save(OutputStream& os, const T& x) -{ - x.save(os); -} - -#define CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) \ -templatevoid load(type& x, InputStream& is) { loadPod(x, is); } \ -templatevoid save(OutputStream& os, type x) { savePod(os, x); } - -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(bool) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(char) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(short) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned char) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned short) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(wchar_t) - -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(float) -CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(double) - -#ifdef CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER - -#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) - -#else - -namespace serializer_local { - -template -bool isRecoverable(T x) -{ - return T(S(x)) == x; -} -/* - data structure H:D of integer x - H:header(1byte) - 0x80 ; D = 1 byte zero ext - 0x81 ; D = 2 byte zero ext - 0x82 ; D = 4 byte zero ext - 0x83 ; D = 8 byte zero ext - 0x84 ; D = 1 byte signed ext - 0x85 ; D = 2 byte signed ext - 0x86 ; D = 4 byte signed ext - 0x87 ; D = 8 byte signed ext - other; x = signed H, D = none -*/ -template -void saveVariableInt(OutputStream& os, const T& x) -{ - if (isRecoverable(x)) { - uint8_t u8 = uint8_t(x); - if (unsigned(u8 - 0x80) <= 7) { - savePod(os, uint8_t(0x84)); - } - savePod(os, u8); - } else if (isRecoverable(x)) { - savePod(os, uint8_t(0x80)); - savePod(os, uint8_t(x)); - } else if (isRecoverable(x) || isRecoverable(x)) { - savePod(os, uint8_t(isRecoverable(x) ? 0x81 : 0x85)); - savePod(os, uint16_t(x)); - } else if (isRecoverable(x) || isRecoverable(x)) { - savePod(os, uint8_t(isRecoverable(x) ? 0x82 : 0x86)); - savePod(os, uint32_t(x)); - } else { - assert(sizeof(T) == 8); - savePod(os, uint8_t(0x83)); - savePod(os, uint64_t(x)); - } -} - -template -void loadVariableInt(T& x, InputStream& is) -{ - uint8_t h; - loadPod(h, is); - if (h == 0x80) { - uint8_t v; - loadPod(v, is); - x = v; - } else if (h == 0x81) { - uint16_t v; - loadPod(v, is); - x = v; - } else if (h == 0x82) { - uint32_t v; - loadPod(v, is); - x = v; - } else if (h == 0x83) { - if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; - uint64_t v; - loadPod(v, is); - x = static_cast(v); - } else if (h == 0x84) { - int8_t v; - loadPod(v, is); - x = v; - } else if (h == 0x85) { - int16_t v; - loadPod(v, is); - x = v; - } else if (h == 0x86) { - int32_t v; - loadPod(v, is); - x = v; - } else if (h == 0x87) { - if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; - int64_t v; - loadPod(v, is); - x = static_cast(v); - } else { - x = static_cast(h); - } -} - -} // serializer_local - -#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) \ -templatevoid load(type& x, InputStream& is) { serializer_local::loadVariableInt(x, is); } \ -templatevoid save(OutputStream& os, type x) { serializer_local::saveVariableInt(os, x); } - -#endif - -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(int) -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long) -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long long) -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned int) -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long) -CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long long) - -#undef CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER -#undef CYBOZU_SERIALIZER_MAKE_UNT_SERIALIZER -#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_F -#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_V - -// only for std::vector -template -void loadPodVec(V& v, InputStream& is) -{ - size_t size; - load(size, is); - v.resize(size); - if (size > 0) loadRange(&v[0], size, is); -} - -// only for std::vector -template -void savePodVec(OutputStream& os, const V& v) -{ - save(os, v.size()); - if (!v.empty()) saveRange(os, &v[0], v.size()); -} - -template -void load(std::string& str, InputStream& is) -{ - loadPodVec(str, is); -} - -template -void save(OutputStream& os, const std::string& str) -{ - savePodVec(os, str); -} - -template -void save(OutputStream& os, const char *x) -{ - const size_t len = strlen(x); - save(os, len); - if (len > 0) saveRange(os, x, len); -} - - -// for vector, list -templateclass Container> -void load(Container& x, InputStream& is) -{ - size_t size; - load(size, is); - serializer_local::reserve_if_exists(x, size); - for (size_t i = 0; i < size; i++) { - x.push_back(T()); - T& t = x.back(); - load(t, is); - } -} - -templateclass Container> -void save(OutputStream& os, const Container& x) -{ - typedef Container V; - save(os, x.size()); - for (typename V::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { - save(os, *i); - } -} - -// for set -templateclass Container> -void load(Container& x, InputStream& is) -{ - size_t size; - load(size, is); - for (size_t i = 0; i < size; i++) { - K t; - load(t, is); - x.insert(t); - } -} - -templateclass Container> -void save(OutputStream& os, const Container& x) -{ - typedef Container Set; - save(os, x.size()); - for (typename Set::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { - save(os, *i); - } -} - -// for map -templateclass Container> -void load(Container& x, InputStream& is) -{ - typedef Container Map; - size_t size; - load(size, is); - for (size_t i = 0; i < size; i++) { - std::pair vt; - load(vt.first, is); - load(vt.second, is); - x.insert(vt); - } -} - -templateclass Container> -void save(OutputStream& os, const Container& x) -{ - typedef Container Map; - save(os, x.size()); - for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { - save(os, i->first); - save(os, i->second); - } -} - -// unordered_map -templateclass Container> -void load(Container& x, InputStream& is) -{ - typedef Container Map; - size_t size; - load(size, is); -// x.reserve(size); // tr1::unordered_map may not have reserve - cybozu::serializer_local::reserve_if_exists(x, size); - for (size_t i = 0; i < size; i++) { - std::pair vt; - load(vt.first, is); - load(vt.second, is); - x.insert(vt); - } -} - -templateclass Container> -void save(OutputStream& os, const Container& x) -{ - typedef Container Map; - save(os, x.size()); - for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { - save(os, i->first); - save(os, i->second); - } -} - -} // cybozu - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp deleted file mode 100644 index 1830936f0..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp +++ /dev/null @@ -1,467 +0,0 @@ -#pragma once -/** - @file - @brief SHA-256, SHA-512 class - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#if !defined(CYBOZU_DONT_USE_OPENSSL) && !defined(MCL_DONT_USE_OPENSSL) - #define CYBOZU_USE_OPENSSL_SHA -#endif - -#ifndef CYBOZU_DONT_USE_STRING -#include -#endif - -#ifdef CYBOZU_USE_OPENSSL_SHA -#ifdef __APPLE__ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif -#include -#ifdef _MSC_VER - #include -#endif - -#ifdef __APPLE__ - #pragma GCC diagnostic pop -#endif - -namespace cybozu { - -class Sha256 { - SHA256_CTX ctx_; -public: - Sha256() - { - clear(); - } - void clear() - { - SHA256_Init(&ctx_); - } - void update(const void *buf, size_t bufSize) - { - SHA256_Update(&ctx_, buf, bufSize); - } - size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) - { - if (mdSize < SHA256_DIGEST_LENGTH) return 0; - update(buf, bufSize); - SHA256_Final(reinterpret_cast(md), &ctx_); - return SHA256_DIGEST_LENGTH; - } -#ifndef CYBOZU_DONT_USE_STRING - void update(const std::string& buf) - { - update(buf.c_str(), buf.size()); - } - std::string digest(const std::string& buf) - { - return digest(buf.c_str(), buf.size()); - } - std::string digest(const void *buf, size_t bufSize) - { - std::string md(SHA256_DIGEST_LENGTH, 0); - digest(&md[0], md.size(), buf, bufSize); - return md; - } -#endif -}; - -class Sha512 { - SHA512_CTX ctx_; -public: - Sha512() - { - clear(); - } - void clear() - { - SHA512_Init(&ctx_); - } - void update(const void *buf, size_t bufSize) - { - SHA512_Update(&ctx_, buf, bufSize); - } - size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) - { - if (mdSize < SHA512_DIGEST_LENGTH) return 0; - update(buf, bufSize); - SHA512_Final(reinterpret_cast(md), &ctx_); - return SHA512_DIGEST_LENGTH; - } -#ifndef CYBOZU_DONT_USE_STRING - void update(const std::string& buf) - { - update(buf.c_str(), buf.size()); - } - std::string digest(const std::string& buf) - { - return digest(buf.c_str(), buf.size()); - } - std::string digest(const void *buf, size_t bufSize) - { - std::string md(SHA512_DIGEST_LENGTH, 0); - digest(&md[0], md.size(), buf, bufSize); - return md; - } -#endif -}; - -} // cybozu - -#else - -#include -#include -#include - -namespace cybozu { - -namespace sha2_local { - -template -T min_(T x, T y) { return x < y ? x : y;; } - -inline uint32_t rot32(uint32_t x, int s) -{ -#ifdef _MSC_VER - return _rotr(x, s); -#else - return (x >> s) | (x << (32 - s)); -#endif -} - -inline uint64_t rot64(uint64_t x, int s) -{ -#ifdef _MSC_VER - return _rotr64(x, s); -#else - return (x >> s) | (x << (64 - s)); -#endif -} - -template -struct Common { - void term(const char *buf, size_t bufSize) - { - assert(bufSize < T::blockSize_); - T& self = static_cast(*this); - const uint64_t totalSize = self.totalSize_ + bufSize; - - uint8_t last[T::blockSize_]; - memcpy(last, buf, bufSize); - last[bufSize] = uint8_t(0x80); /* top bit = 1 */ - memset(&last[bufSize + 1], 0, T::blockSize_ - bufSize - 1); - if (bufSize >= T::blockSize_ - T::msgLenByte_) { - self.round(reinterpret_cast(last)); - memset(last, 0, sizeof(last)); // clear stack - } - cybozu::Set64bitAsBE(&last[T::blockSize_ - 8], totalSize * 8); - self.round(reinterpret_cast(last)); - } - void inner_update(const char *buf, size_t bufSize) - { - T& self = static_cast(*this); - if (bufSize == 0) return; - if (self.roundBufSize_ > 0) { - size_t size = sha2_local::min_(T::blockSize_ - self.roundBufSize_, bufSize); - memcpy(self.roundBuf_ + self.roundBufSize_, buf, size); - self.roundBufSize_ += size; - buf += size; - bufSize -= size; - } - if (self.roundBufSize_ == T::blockSize_) { - self.round(self.roundBuf_); - self.roundBufSize_ = 0; - } - while (bufSize >= T::blockSize_) { - assert(self.roundBufSize_ == 0); - self.round(buf); - buf += T::blockSize_; - bufSize -= T::blockSize_; - } - if (bufSize > 0) { - assert(bufSize < T::blockSize_); - assert(self.roundBufSize_ == 0); - memcpy(self.roundBuf_, buf, bufSize); - self.roundBufSize_ = bufSize; - } - assert(self.roundBufSize_ < T::blockSize_); - } -}; - -} // cybozu::sha2_local - -class Sha256 : public sha2_local::Common { - friend struct sha2_local::Common; -private: - static const size_t blockSize_ = 64; - static const size_t hSize_ = 8; - static const size_t msgLenByte_ = 8; - uint64_t totalSize_; - size_t roundBufSize_; - char roundBuf_[blockSize_]; - uint32_t h_[hSize_]; - static const size_t outByteSize_ = hSize_ * sizeof(uint32_t); - const uint32_t *k_; - - /** - @param buf [in] buffer(64byte) - */ - void round(const char *buf) - { - using namespace sha2_local; - uint32_t w[64]; - for (int i = 0; i < 16; i++) { - w[i] = cybozu::Get32bitAsBE(&buf[i * 4]); - } - for (int i = 16 ; i < 64; i++) { - uint32_t t = w[i - 15]; - uint32_t s0 = rot32(t, 7) ^ rot32(t, 18) ^ (t >> 3); - t = w[i - 2]; - uint32_t s1 = rot32(t, 17) ^ rot32(t, 19) ^ (t >> 10); - w[i] = w[i - 16] + s0 + w[i - 7] + s1; - } - uint32_t a = h_[0]; - uint32_t b = h_[1]; - uint32_t c = h_[2]; - uint32_t d = h_[3]; - uint32_t e = h_[4]; - uint32_t f = h_[5]; - uint32_t g = h_[6]; - uint32_t h = h_[7]; - for (int i = 0; i < 64; i++) { - uint32_t s1 = rot32(e, 6) ^ rot32(e, 11) ^ rot32(e, 25); - uint32_t ch = g ^ (e & (f ^ g)); - uint32_t t1 = h + s1 + ch + k_[i] + w[i]; - uint32_t s0 = rot32(a, 2) ^ rot32(a, 13) ^ rot32(a, 22); - uint32_t maj = ((a | b) & c) | (a & b); - uint32_t t2 = s0 + maj; - h = g; - g = f; - f = e; - e = d + t1; - d = c; - c = b; - b = a; - a = t1 + t2; - } - h_[0] += a; - h_[1] += b; - h_[2] += c; - h_[3] += d; - h_[4] += e; - h_[5] += f; - h_[6] += g; - h_[7] += h; - totalSize_ += blockSize_; - } -public: - Sha256() - { - clear(); - } - void clear() - { - static const uint32_t kTbl[] = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 - }; - k_ = kTbl; - totalSize_ = 0; - roundBufSize_ = 0; - h_[0] = 0x6a09e667; - h_[1] = 0xbb67ae85; - h_[2] = 0x3c6ef372; - h_[3] = 0xa54ff53a; - h_[4] = 0x510e527f; - h_[5] = 0x9b05688c; - h_[6] = 0x1f83d9ab; - h_[7] = 0x5be0cd19; - } - void update(const void *buf, size_t bufSize) - { - inner_update(reinterpret_cast(buf), bufSize); - } - size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) - { - if (mdSize < outByteSize_) return 0; - update(buf, bufSize); - term(roundBuf_, roundBufSize_); - char *p = reinterpret_cast(md); - for (size_t i = 0; i < hSize_; i++) { - cybozu::Set32bitAsBE(&p[i * sizeof(h_[0])], h_[i]); - } - return outByteSize_; - } -#ifndef CYBOZU_DONT_USE_STRING - void update(const std::string& buf) - { - update(buf.c_str(), buf.size()); - } - std::string digest(const std::string& buf) - { - return digest(buf.c_str(), buf.size()); - } - std::string digest(const void *buf, size_t bufSize) - { - std::string md(outByteSize_, 0); - digest(&md[0], md.size(), buf, bufSize); - return md; - } -#endif -}; - -class Sha512 : public sha2_local::Common { - friend struct sha2_local::Common; -private: - static const size_t blockSize_ = 128; - static const size_t hSize_ = 8; - static const size_t msgLenByte_ = 16; - uint64_t totalSize_; - size_t roundBufSize_; - char roundBuf_[blockSize_]; - uint64_t h_[hSize_]; - static const size_t outByteSize_ = hSize_ * sizeof(uint64_t); - const uint64_t *k_; - - template - void round1(uint64_t *S, const uint64_t *w, size_t i) - { - using namespace sha2_local; - uint64_t& a = S[i0]; - uint64_t& b = S[i1]; - uint64_t& c = S[i2]; - uint64_t& d = S[i3]; - uint64_t& e = S[i4]; - uint64_t& f = S[i5]; - uint64_t& g = S[i6]; - uint64_t& h = S[i7]; - - uint64_t s1 = rot64(e, 14) ^ rot64(e, 18) ^ rot64(e, 41); - uint64_t ch = g ^ (e & (f ^ g)); - uint64_t t0 = h + s1 + ch + k_[i] + w[i]; - uint64_t s0 = rot64(a, 28) ^ rot64(a, 34) ^ rot64(a, 39); - uint64_t maj = ((a | b) & c) | (a & b); - uint64_t t1 = s0 + maj; - d += t0; - h = t0 + t1; - } - /** - @param buf [in] buffer(64byte) - */ - void round(const char *buf) - { - using namespace sha2_local; - uint64_t w[80]; - for (int i = 0; i < 16; i++) { - w[i] = cybozu::Get64bitAsBE(&buf[i * 8]); - } - for (int i = 16 ; i < 80; i++) { - uint64_t t = w[i - 15]; - uint64_t s0 = rot64(t, 1) ^ rot64(t, 8) ^ (t >> 7); - t = w[i - 2]; - uint64_t s1 = rot64(t, 19) ^ rot64(t, 61) ^ (t >> 6); - w[i] = w[i - 16] + s0 + w[i - 7] + s1; - } - uint64_t s[8]; - for (int i = 0; i < 8; i++) { - s[i] = h_[i]; - } - for (int i = 0; i < 80; i += 8) { - round1<0, 1, 2, 3, 4, 5, 6, 7>(s, w, i + 0); - round1<7, 0, 1, 2, 3, 4, 5, 6>(s, w, i + 1); - round1<6, 7, 0, 1, 2, 3, 4, 5>(s, w, i + 2); - round1<5, 6, 7, 0, 1, 2, 3, 4>(s, w, i + 3); - round1<4, 5, 6, 7, 0, 1, 2, 3>(s, w, i + 4); - round1<3, 4, 5, 6, 7, 0, 1, 2>(s, w, i + 5); - round1<2, 3, 4, 5, 6, 7, 0, 1>(s, w, i + 6); - round1<1, 2, 3, 4, 5, 6, 7, 0>(s, w, i + 7); - } - for (int i = 0; i < 8; i++) { - h_[i] += s[i]; - } - totalSize_ += blockSize_; - } -public: - Sha512() - { - clear(); - } - void clear() - { - static const uint64_t kTbl[] = { - 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, - 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, - 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, - 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, - 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, - 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, - 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, - 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, - 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, - 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, - 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, - 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, - 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, - 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, - 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, - 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL - }; - k_ = kTbl; - totalSize_ = 0; - roundBufSize_ = 0; - h_[0] = 0x6a09e667f3bcc908ull; - h_[1] = 0xbb67ae8584caa73bull; - h_[2] = 0x3c6ef372fe94f82bull; - h_[3] = 0xa54ff53a5f1d36f1ull; - h_[4] = 0x510e527fade682d1ull; - h_[5] = 0x9b05688c2b3e6c1full; - h_[6] = 0x1f83d9abfb41bd6bull; - h_[7] = 0x5be0cd19137e2179ull; - } - void update(const void *buf, size_t bufSize) - { - inner_update(reinterpret_cast(buf), bufSize); - } - size_t digest(void *md, size_t mdSize, const void *buf, size_t bufSize) - { - if (mdSize < outByteSize_) return 0; - update(buf, bufSize); - term(roundBuf_, roundBufSize_); - char *p = reinterpret_cast(md); - for (size_t i = 0; i < hSize_; i++) { - cybozu::Set64bitAsBE(&p[i * sizeof(h_[0])], h_[i]); - } - return outByteSize_; - } -#ifndef CYBOZU_DONT_USE_STRING - void update(const std::string& buf) - { - update(buf.c_str(), buf.size()); - } - std::string digest(const std::string& buf) - { - return digest(buf.c_str(), buf.size()); - } - std::string digest(const void *buf, size_t bufSize) - { - std::string md(outByteSize_, 0); - digest(&md[0], md.size(), buf, bufSize); - return md; - } -#endif -}; - -} // cybozu - -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp deleted file mode 100644 index bc110bdb0..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp +++ /dev/null @@ -1,267 +0,0 @@ -#pragma once -/** - @file - @brief stream and line stream class - - @author MITSUNARI Shigeo(@herumi) -*/ -#ifndef CYBOZU_DONT_USE_STRING -#include -#include -#endif -#include -#include - -namespace cybozu { - -namespace stream_local { - -template -struct is_convertible { - typedef char yes; - typedef int no; - - static no test(...); - static yes test(const To*); - static const bool value = sizeof(test(static_cast(0))) == sizeof(yes); -}; - -template -struct enable_if { typedef T type; }; - -template -struct enable_if {}; - -#ifndef CYBOZU_DONT_USE_STRING -/* specialization for istream */ -template -size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if::value>::type* = 0) -{ - if (size > 0x7fffffff) size = 0x7fffffff; - is.read(static_cast(buf), size); - const int64_t readSize = is.gcount(); - if (readSize < 0) return 0; - if (size == 1 && readSize == 0) is.clear(); - return static_cast(readSize); -} - -/* generic version for size_t readSome(void *, size_t) */ -template -size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if::value>::type* = 0) -{ - return is.readSome(buf, size); -} -#else -template -size_t readSome_inner(void *buf, size_t size, InputStream& is) -{ - return is.readSome(buf, size); -} -#endif - -#ifndef CYBOZU_DONT_USE_EXCEPTION -/* specialization for ostream */ -template -void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) -{ - if (!os.write(static_cast(buf), size)) throw cybozu::Exception("stream:writeSub") << size; -} -#endif - -#ifndef CYBOZU_DONT_USE_STRING -/* generic version for void write(const void*, size_t), which writes all data */ -template -void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) -{ - os.write(buf, size); -} - -template -void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) -{ - *pb = !!os.write(static_cast(buf), size); -} - -/* generic version for void write(const void*, size_t), which writes all data */ -template -void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if::value>::type* = 0) -{ - os.write(pb, buf, size); -} -#else -template -void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size) -{ - os.write(pb, buf, size); -} -#endif - -} // stream_local - -/* - make a specializaiton of class to use new InputStream, OutputStream -*/ -template -struct InputStreamTag { - static size_t readSome(void *buf, size_t size, InputStream& is) - { - return stream_local::readSome_inner(buf, size, is); - } - static bool readChar(char *c, InputStream& is) - { - return readSome(c, 1, is) == 1; - } -}; - -template -struct OutputStreamTag { - static void write(OutputStream& os, const void *buf, size_t size) - { - stream_local::writeSub(os, buf, size); - } -}; - -class MemoryInputStream { - const char *p_; - size_t size_; - size_t pos; -public: - MemoryInputStream(const void *p, size_t size) : p_(static_cast(p)), size_(size), pos(0) {} - size_t readSome(void *buf, size_t size) - { - if (size > size_ - pos) size = size_ - pos; - memcpy(buf, p_ + pos, size); - pos += size; - return size; - } - size_t getPos() const { return pos; } -}; - -class MemoryOutputStream { - char *p_; - size_t size_; - size_t pos; -public: - MemoryOutputStream(void *p, size_t size) : p_(static_cast(p)), size_(size), pos(0) {} - void write(bool *pb, const void *buf, size_t size) - { - if (size > size_ - pos) { - *pb = false; - return; - } - memcpy(p_ + pos, buf, size); - pos += size; - *pb = true; - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void write(const void *buf, size_t size) - { - bool b; - write(&b, buf, size); - if (!b) throw cybozu::Exception("MemoryOutputStream:write") << size << size_ << pos; - } -#endif - size_t getPos() const { return pos; } -}; - -#ifndef CYBOZU_DONT_USE_STRING -class StringInputStream { - const std::string& str_; - size_t pos; - StringInputStream(const StringInputStream&); - void operator=(const StringInputStream&); -public: - explicit StringInputStream(const std::string& str) : str_(str), pos(0) {} - size_t readSome(void *buf, size_t size) - { - const size_t remainSize = str_.size() - pos; - if (size > remainSize) size = remainSize; - memcpy(buf, &str_[pos], size); - pos += size; - return size; - } - size_t getPos() const { return pos; } -}; - -class StringOutputStream { - std::string& str_; - StringOutputStream(const StringOutputStream&); - void operator=(const StringOutputStream&); -public: - explicit StringOutputStream(std::string& str) : str_(str) {} - void write(bool *pb, const void *buf, size_t size) - { - str_.append(static_cast(buf), size); - *pb = true; - } - void write(const void *buf, size_t size) - { - str_.append(static_cast(buf), size); - } - size_t getPos() const { return str_.size(); } -}; -#endif - -template -size_t readSome(void *buf, size_t size, InputStream& is) -{ - return stream_local::readSome_inner(buf, size, is); -} - -template -void write(OutputStream& os, const void *buf, size_t size) -{ - stream_local::writeSub(os, buf, size); -} - -template -void write(bool *pb, OutputStream& os, const void *buf, size_t size) -{ - stream_local::writeSub(pb, os, buf, size); -} - -template -void read(bool *pb, void *buf, size_t size, InputStream& is) -{ - char *p = static_cast(buf); - while (size > 0) { - size_t readSize = cybozu::readSome(p, size, is); - if (readSize == 0) { - *pb = false; - return; - } - p += readSize; - size -= readSize; - } - *pb = true; -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -template -void read(void *buf, size_t size, InputStream& is) -{ - bool b; - read(&b, buf, size, is); - if (!b) throw cybozu::Exception("stream:read"); -} -#endif - -template -bool readChar(char *c, InputStream& is) -{ - return readSome(c, 1, is) == 1; -} - -template -void writeChar(OutputStream& os, char c) -{ - cybozu::write(os, &c, 1); -} - -template -void writeChar(bool *pb, OutputStream& os, char c) -{ - cybozu::write(pb, os, &c, 1); -} - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp deleted file mode 100644 index 7dfffab96..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp +++ /dev/null @@ -1,373 +0,0 @@ -#pragma once -/** - @file - @brief unit test class - - @author MITSUNARI Shigeo(@herumi) -*/ - -#include -#include -#include -#include -#include -#include -#if defined(_MSC_VER) && (MSC_VER <= 1500) - #include -#else - #include -#endif - -namespace cybozu { namespace test { - -class AutoRun { - typedef void (*Func)(); - typedef std::list > UnitTestList; -public: - AutoRun() - : init_(0) - , term_(0) - , okCount_(0) - , ngCount_(0) - , exceptionCount_(0) - { - } - void setup(Func init, Func term) - { - init_ = init; - term_ = term; - } - void append(const char *name, Func func) - { - list_.push_back(std::make_pair(name, func)); - } - void set(bool isOK) - { - if (isOK) { - okCount_++; - } else { - ngCount_++; - } - } - std::string getBaseName(const std::string& name) const - { -#ifdef _WIN32 - const char sep = '\\'; -#else - const char sep = '/'; -#endif - size_t pos = name.find_last_of(sep); - std::string ret = name.substr(pos + 1); - pos = ret.find('.'); - return ret.substr(0, pos); - } - int run(int, char *argv[]) - { - std::string msg; - try { - if (init_) init_(); - for (UnitTestList::const_iterator i = list_.begin(), ie = list_.end(); i != ie; ++i) { - std::cout << "ctest:module=" << i->first << std::endl; - try { - (i->second)(); - } catch (std::exception& e) { - exceptionCount_++; - std::cout << "ctest: " << i->first << " is stopped by exception " << e.what() << std::endl; - } catch (...) { - exceptionCount_++; - std::cout << "ctest: " << i->first << " is stopped by unknown exception" << std::endl; - } - } - if (term_) term_(); - } catch (std::exception& e) { - msg = std::string("ctest:err:") + e.what(); - } catch (...) { - msg = "ctest:err: catch unknown exception"; - } - fflush(stdout); - if (msg.empty()) { - int err = ngCount_ + exceptionCount_; - int total = okCount_ + err; - std::cout << "ctest:name=" << getBaseName(*argv) - << ", module=" << list_.size() - << ", total=" << total - << ", ok=" << okCount_ - << ", ng=" << ngCount_ - << ", exception=" << exceptionCount_ << std::endl; - return err > 0 ? 1 : 0; - } else { - std::cout << msg << std::endl; - return 1; - } - } - static inline AutoRun& getInstance() - { - static AutoRun instance; - return instance; - } -private: - Func init_; - Func term_; - int okCount_; - int ngCount_; - int exceptionCount_; - UnitTestList list_; -}; - -static AutoRun& autoRun = AutoRun::getInstance(); - -inline void test(bool ret, const std::string& msg, const std::string& param, const char *file, int line) -{ - autoRun.set(ret); - if (!ret) { - printf("%s(%d):ctest:%s(%s);\n", file, line, msg.c_str(), param.c_str()); - } -} - -template -bool isEqual(const T& lhs, const U& rhs) -{ - return lhs == rhs; -} - -// avoid warning of comparision of integers of different signs -inline bool isEqual(size_t lhs, int rhs) -{ - return lhs == size_t(rhs); -} -inline bool isEqual(int lhs, size_t rhs) -{ - return size_t(lhs) == rhs; -} -inline bool isEqual(const char *lhs, const char *rhs) -{ - return strcmp(lhs, rhs) == 0; -} -inline bool isEqual(char *lhs, const char *rhs) -{ - return strcmp(lhs, rhs) == 0; -} -inline bool isEqual(const char *lhs, char *rhs) -{ - return strcmp(lhs, rhs) == 0; -} -inline bool isEqual(char *lhs, char *rhs) -{ - return strcmp(lhs, rhs) == 0; -} -// avoid to compare float directly -inline bool isEqual(float lhs, float rhs) -{ - union fi { - float f; - uint32_t i; - } lfi, rfi; - lfi.f = lhs; - rfi.f = rhs; - return lfi.i == rfi.i; -} -// avoid to compare double directly -inline bool isEqual(double lhs, double rhs) -{ - union di { - double d; - uint64_t i; - } ldi, rdi; - ldi.d = lhs; - rdi.d = rhs; - return ldi.i == rdi.i; -} - -} } // cybozu::test - -#ifndef CYBOZU_TEST_DISABLE_AUTO_RUN -int main(int argc, char *argv[]) -{ - return cybozu::test::autoRun.run(argc, argv); -} -#endif - -/** - alert if !x - @param x [in] -*/ -#define CYBOZU_TEST_ASSERT(x) cybozu::test::test(!!(x), "CYBOZU_TEST_ASSERT", #x, __FILE__, __LINE__) - -/** - alert if x != y - @param x [in] - @param y [in] -*/ -#define CYBOZU_TEST_EQUAL(x, y) { \ - bool _cybozu_eq = cybozu::test::isEqual(x, y); \ - cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL", #x ", " #y, __FILE__, __LINE__); \ - if (!_cybozu_eq) { \ - std::cout << "ctest: lhs=" << (x) << std::endl; \ - std::cout << "ctest: rhs=" << (y) << std::endl; \ - } \ -} -/** - alert if fabs(x, y) >= eps - @param x [in] - @param y [in] -*/ -#define CYBOZU_TEST_NEAR(x, y, eps) { \ - bool _cybozu_isNear = fabs((x) - (y)) < eps; \ - cybozu::test::test(_cybozu_isNear, "CYBOZU_TEST_NEAR", #x ", " #y, __FILE__, __LINE__); \ - if (!_cybozu_isNear) { \ - std::cout << "ctest: lhs=" << (x) << std::endl; \ - std::cout << "ctest: rhs=" << (y) << std::endl; \ - } \ -} - -#define CYBOZU_TEST_EQUAL_POINTER(x, y) { \ - bool _cybozu_eq = x == y; \ - cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_POINTER", #x ", " #y, __FILE__, __LINE__); \ - if (!_cybozu_eq) { \ - std::cout << "ctest: lhs=" << static_cast(x) << std::endl; \ - std::cout << "ctest: rhs=" << static_cast(y) << std::endl; \ - } \ -} -/** - alert if x[] != y[] - @param x [in] - @param y [in] - @param n [in] -*/ -#define CYBOZU_TEST_EQUAL_ARRAY(x, y, n) { \ - for (size_t _cybozu_test_i = 0, _cybozu_ie = (size_t)(n); _cybozu_test_i < _cybozu_ie; _cybozu_test_i++) { \ - bool _cybozu_eq = cybozu::test::isEqual((x)[_cybozu_test_i], (y)[_cybozu_test_i]); \ - cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_ARRAY", #x ", " #y ", " #n, __FILE__, __LINE__); \ - if (!_cybozu_eq) { \ - std::cout << "ctest: i=" << _cybozu_test_i << std::endl; \ - std::cout << "ctest: lhs=" << (x)[_cybozu_test_i] << std::endl; \ - std::cout << "ctest: rhs=" << (y)[_cybozu_test_i] << std::endl; \ - } \ - } \ -} - -/** - always alert - @param msg [in] -*/ -#define CYBOZU_TEST_FAIL(msg) cybozu::test::test(false, "CYBOZU_TEST_FAIL", msg, __FILE__, __LINE__) - -/** - verify message in exception -*/ -#define CYBOZU_TEST_EXCEPTION_MESSAGE(statement, Exception, msg) \ -{ \ - int _cybozu_ret = 0; \ - std::string _cybozu_errMsg; \ - try { \ - statement; \ - _cybozu_ret = 1; \ - } catch (const Exception& _cybozu_e) { \ - _cybozu_errMsg = _cybozu_e.what(); \ - if (_cybozu_errMsg.find(msg) == std::string::npos) { \ - _cybozu_ret = 2; \ - } \ - } catch (...) { \ - _cybozu_ret = 3; \ - } \ - if (_cybozu_ret) { \ - cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION_MESSAGE", #statement ", " #Exception ", " #msg, __FILE__, __LINE__); \ - if (_cybozu_ret == 1) { \ - std::cout << "ctest: no exception" << std::endl; \ - } else if (_cybozu_ret == 2) { \ - std::cout << "ctest: bad exception msg:" << _cybozu_errMsg << std::endl; \ - } else { \ - std::cout << "ctest: unexpected exception" << std::endl; \ - } \ - } else { \ - cybozu::test::autoRun.set(true); \ - } \ -} - -#define CYBOZU_TEST_EXCEPTION(statement, Exception) \ -{ \ - int _cybozu_ret = 0; \ - try { \ - statement; \ - _cybozu_ret = 1; \ - } catch (const Exception&) { \ - } catch (...) { \ - _cybozu_ret = 2; \ - } \ - if (_cybozu_ret) { \ - cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION", #statement ", " #Exception, __FILE__, __LINE__); \ - if (_cybozu_ret == 1) { \ - std::cout << "ctest: no exception" << std::endl; \ - } else { \ - std::cout << "ctest: unexpected exception" << std::endl; \ - } \ - } else { \ - cybozu::test::autoRun.set(true); \ - } \ -} - -/** - verify statement does not throw -*/ -#define CYBOZU_TEST_NO_EXCEPTION(statement) \ -try { \ - statement; \ - cybozu::test::autoRun.set(true); \ -} catch (...) { \ - cybozu::test::test(false, "CYBOZU_TEST_NO_EXCEPTION", #statement, __FILE__, __LINE__); \ -} - -/** - append auto unit test - @param name [in] module name -*/ -#define CYBOZU_TEST_AUTO(name) \ -void cybozu_test_ ## name(); \ -struct cybozu_test_local_ ## name { \ - cybozu_test_local_ ## name() \ - { \ - cybozu::test::autoRun.append(#name, cybozu_test_ ## name); \ - } \ -} cybozu_test_local_instance_ ## name; \ -void cybozu_test_ ## name() - -/** - append auto unit test with fixture - @param name [in] module name -*/ -#define CYBOZU_TEST_AUTO_WITH_FIXTURE(name, Fixture) \ -void cybozu_test_ ## name(); \ -void cybozu_test_real_ ## name() \ -{ \ - Fixture f; \ - cybozu_test_ ## name(); \ -} \ -struct cybozu_test_local_ ## name { \ - cybozu_test_local_ ## name() \ - { \ - cybozu::test::autoRun.append(#name, cybozu_test_real_ ## name); \ - } \ -} cybozu_test_local_instance_ ## name; \ -void cybozu_test_ ## name() - -/** - setup fixture - @param Fixture [in] class name of fixture - @note cstr of Fixture is called before test and dstr of Fixture is called after test -*/ -#define CYBOZU_TEST_SETUP_FIXTURE(Fixture) \ -Fixture *cybozu_test_local_fixture; \ -void cybozu_test_local_init() \ -{ \ - cybozu_test_local_fixture = new Fixture(); \ -} \ -void cybozu_test_local_term() \ -{ \ - delete cybozu_test_local_fixture; \ -} \ -struct cybozu_test_local_fixture_setup_ { \ - cybozu_test_local_fixture_setup_() \ - { \ - cybozu::test::autoRun.setup(cybozu_test_local_init, cybozu_test_local_term); \ - } \ -} cybozu_test_local_fixture_setup_instance_; diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp deleted file mode 100644 index 89f8f8774..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include - -#ifdef CYBOZU_USE_BOOST - #include -#elif (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) || (defined __APPLE__) - #include -#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) - #include - #include -#endif - diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp deleted file mode 100644 index 08c6a04f9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp +++ /dev/null @@ -1,189 +0,0 @@ -#pragma once -/** - @file - @brief XorShift - - @author MITSUNARI Shigeo(@herumi) - @author MITSUNARI Shigeo -*/ -#include -#include - -namespace cybozu { - -namespace xorshift_local { - -/* - U is uint32_t or uint64_t -*/ -template -void read_local(void *p, size_t n, Gen& gen, U (Gen::*f)()) -{ - uint8_t *dst = static_cast(p); - const size_t uSize = sizeof(U); - assert(uSize == 4 || uSize == 8); - union ua { - U u; - uint8_t a[uSize]; - }; - - while (n >= uSize) { - ua ua; - ua.u = (gen.*f)(); - for (size_t i = 0; i < uSize; i++) { - dst[i] = ua.a[i]; - } - dst += uSize; - n -= uSize; - } - assert(n < uSize); - if (n > 0) { - ua ua; - ua.u = (gen.*f)(); - for (size_t i = 0; i < n; i++) { - dst[i] = ua.a[i]; - } - } -} - -} // xorshift_local - -class XorShift { - uint32_t x_, y_, z_, w_; -public: - explicit XorShift(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) - { - init(x, y, z, w); - } - void init(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) - { - x_ = x ? x : 123456789; - y_ = y ? y : 362436069; - z_ = z ? z : 521288629; - w_ = w ? w : 88675123; - } - uint32_t get32() - { - unsigned int t = x_ ^ (x_ << 11); - x_ = y_; y_ = z_; z_ = w_; - return w_ = (w_ ^ (w_ >> 19)) ^ (t ^ (t >> 8)); - } - uint32_t operator()() { return get32(); } - uint64_t get64() - { - uint32_t a = get32(); - uint32_t b = get32(); - return (uint64_t(a) << 32) | b; - } - template - void read(bool *pb, T *p, size_t n) - { - xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift::get32); - *pb = true; - } - template - size_t read(T *p, size_t n) - { - bool b; - read(&b, p, n); - (void)b; - return n; - } -}; - -// see http://xorshift.di.unimi.it/xorshift128plus.c -class XorShift128Plus { - uint64_t s_[2]; - static const uint64_t seed0 = 123456789; - static const uint64_t seed1 = 987654321; -public: - explicit XorShift128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) - { - init(s0, s1); - } - void init(uint64_t s0 = seed0, uint64_t s1 = seed1) - { - s_[0] = s0; - s_[1] = s1; - } - uint32_t get32() - { - return static_cast(get64()); - } - uint64_t operator()() { return get64(); } - uint64_t get64() - { - uint64_t s1 = s_[0]; - const uint64_t s0 = s_[1]; - s_[0] = s0; - s1 ^= s1 << 23; - s_[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); - return s_[1] + s0; - } - template - void read(bool *pb, T *p, size_t n) - { - xorshift_local::read_local(p, n * sizeof(T), *this, &XorShift128Plus::get64); - *pb = true; - } - template - size_t read(T *p, size_t n) - { - bool b; - read(&b, p, n); - (void)b; - return n; - } -}; - -// see http://xoroshiro.di.unimi.it/xoroshiro128plus.c -class Xoroshiro128Plus { - uint64_t s_[2]; - static const uint64_t seed0 = 123456789; - static const uint64_t seed1 = 987654321; - uint64_t rotl(uint64_t x, unsigned int k) const - { - return (x << k) | (x >> (64 - k)); - } -public: - explicit Xoroshiro128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) - { - init(s0, s1); - } - void init(uint64_t s0 = seed0, uint64_t s1 = seed1) - { - s_[0] = s0; - s_[1] = s1; - } - uint32_t get32() - { - return static_cast(get64()); - } - uint64_t operator()() { return get64(); } - uint64_t get64() - { - uint64_t s0 = s_[0]; - uint64_t s1 = s_[1]; - uint64_t result = s0 + s1; - s1 ^= s0; - s_[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); - s_[1] = rotl(s1, 36); - return result; - } - template - void read(bool *pb, T *p, size_t n) - { - xorshift_local::read_local(p, n * sizeof(T), *this, &Xoroshiro128Plus::get64); - *pb = true; - } - template - size_t read(T *p, size_t n) - { - bool b; - read(&b, p, n); - (void)b; - return n; - } -}; - -} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp deleted file mode 100644 index f31405705..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp +++ /dev/null @@ -1,265 +0,0 @@ -#pragma once -/** - @file - @brief aggregate signature - @author MITSUNARI Shigeo(@herumi) - see http://crypto.stanford.edu/~dabo/papers/aggreg.pdf - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include -#ifndef MCLBN_FP_UNIT_SIZE - #define MCLBN_FP_UNIT_SIZE 4 -#endif -#if MCLBN_FP_UNIT_SIZE == 4 -#include -namespace mcl { -using namespace mcl::bn256; -} -#elif MCLBN_FP_UNIT_SIZE == 6 -#include -namespace mcl { -using namespace mcl::bn384; -} -#elif MCLBN_FP_UNIT_SIZE == 8 -#include -namespace mcl { -using namespace mcl::bn512; -} -#else - #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" -#endif - -namespace mcl { namespace aggs { - -/* - AGGregate Signature Template class -*/ -template -struct AGGST { - typedef typename G1::BaseFp Fp; - - class SecretKey; - class PublicKey; - class Signature; - - static G1 P_; - static G2 Q_; - static std::vector Qcoeff_; -public: - static void init(const mcl::CurveParam& cp = mcl::BN254) - { - initPairing(cp); - hashAndMapToG1(P_, "0"); - hashAndMapToG2(Q_, "0"); - precomputeG2(Qcoeff_, Q_); - } - class Signature : public fp::Serializable { - G1 S_; - friend class SecretKey; - friend class PublicKey; - public: - template - void load(InputStream& is, int ioMode = IoSerialize) - { - S_.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - S_.save(os, ioMode); - } - friend std::istream& operator>>(std::istream& is, Signature& self) - { - self.load(is, fp::detectIoMode(G1::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const Signature& self) - { - self.save(os, fp::detectIoMode(G1::getIoMode(), os)); - return os; - } - bool operator==(const Signature& rhs) const - { - return S_ == rhs.S_; - } - bool operator!=(const Signature& rhs) const { return !operator==(rhs); } - /* - aggregate sig[0..n) and set *this - */ - void aggregate(const Signature *sig, size_t n) - { - G1 S; - S.clear(); - for (size_t i = 0; i < n; i++) { - S += sig[i].S_; - } - S_ = S; - } - void aggregate(const std::vector& sig) - { - aggregate(sig.data(), sig.size()); - } - /* - aggregate verification - */ - bool verify(const void *const *msgVec, const size_t *sizeVec, const PublicKey *pubVec, size_t n) const - { - if (n == 0) return false; - typedef std::set FpSet; - FpSet msgSet; - typedef std::vector G1Vec; - G1Vec hv(n); - for (size_t i = 0; i < n; i++) { - Fp h; - h.setHashOf(msgVec[i], sizeVec[i]); - std::pair ret = msgSet.insert(h); - if (!ret.second) throw cybozu::Exception("aggs::verify:same msg"); - mapToG1(hv[i], h); - } - /* - e(aggSig, xQ) = prod_i e(hv[i], pub[i].Q) - <=> finalExp(e(-aggSig, xQ) * prod_i millerLoop(hv[i], pub[i].xQ)) == 1 - */ - GT e1, e2; - precomputedMillerLoop(e1, -S_, Qcoeff_); - millerLoop(e2, hv[0], pubVec[0].xQ_); - for (size_t i = 1; i < n; i++) { - GT e; - millerLoop(e, hv[i], pubVec[i].xQ_); - e2 *= e; - } - e1 *= e2; - finalExp(e1, e1); - return e1.isOne(); - } - bool verify(const std::vector& msgVec, const std::vector& pubVec) const - { - const size_t n = msgVec.size(); - if (n != pubVec.size()) throw cybozu::Exception("aggs:Signature:verify:bad size") << msgVec.size() << pubVec.size(); - if (n == 0) return false; - std::vector mv(n); - std::vector sv(n); - for (size_t i = 0; i < n; i++) { - mv[i] = msgVec[i].c_str(); - sv[i] = msgVec[i].size(); - } - return verify(&mv[0], &sv[0], &pubVec[0], n); - } - }; - class PublicKey : public fp::Serializable { - G2 xQ_; - friend class SecretKey; - friend class Signature; - public: - template - void load(InputStream& is, int ioMode = IoSerialize) - { - xQ_.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - xQ_.save(os, ioMode); - } - friend std::istream& operator>>(std::istream& is, PublicKey& self) - { - self.load(is, fp::detectIoMode(G2::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) - { - self.save(os, fp::detectIoMode(G2::getIoMode(), os)); - return os; - } - bool operator==(const PublicKey& rhs) const - { - return xQ_ == rhs.xQ_; - } - bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } - bool verify(const Signature& sig, const void *m, size_t mSize) const - { - /* - H = hash(m) - e(S, Q) = e(H, xQ) where S = xH - <=> e(S, Q)e(-H, xQ) = 1 - <=> finalExp(millerLoop(S, Q)e(-H, x)) = 1 - */ - G1 H; - hashAndMapToG1(H, m, mSize); - G1::neg(H, H); - GT e1, e2; - precomputedMillerLoop(e1, sig.S_, Qcoeff_); - millerLoop(e2, H, xQ_); - e1 *= e2; - finalExp(e1, e1); - return e1.isOne(); - } - bool verify(const Signature& sig, const std::string& m) const - { - return verify(sig, m.c_str(), m.size()); - } - }; - class SecretKey : public fp::Serializable { - Fr x_; - friend class PublicKey; - friend class Signature; - public: - template - void load(InputStream& is, int ioMode = IoSerialize) - { - x_.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - x_.save(os, ioMode); - } - friend std::istream& operator>>(std::istream& is, SecretKey& self) - { - self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) - { - self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); - return os; - } - bool operator==(const SecretKey& rhs) const - { - return x_ == rhs.x_; - } - bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } - void init() - { - x_.setByCSPRNG(); - } - void getPublicKey(PublicKey& pub) const - { - G2::mul(pub.xQ_, Q_, x_); - } - void sign(Signature& sig, const void *m, size_t mSize) const - { - hashAndMapToG1(sig.S_, m, mSize); - G1::mul(sig.S_, sig.S_, x_); - } - void sign(Signature& sig, const std::string& m) const - { - sign(sig, m.c_str(), m.size()); - } - }; -}; - -template G1 AGGST::P_; -template G2 AGGST::Q_; -template std::vector AGGST::Qcoeff_; - -typedef AGGST<> AGGS; -typedef AGGS::SecretKey SecretKey; -typedef AGGS::PublicKey PublicKey; -typedef AGGS::Signature Signature; - -} } // mcl::aggs diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp deleted file mode 100644 index 239319d0d..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once -/** - @file - @brief 192/256-bit additive homomorphic encryption by lifted-ElGamal - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include - -namespace mcl { - -#ifdef MCL_USE_AHE192 -namespace ahe192 { - -const mcl::EcParam& para = mcl::ecparam::NIST_P192; - -typedef mcl::FpT Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; -typedef mcl::ElgamalT ElgamalEc; -typedef ElgamalEc::PrivateKey SecretKey; -typedef ElgamalEc::PublicKey PublicKey; -typedef ElgamalEc::CipherText CipherText; - -static inline void initAhe() -{ - Fp::init(para.p); - Zn::init(para.n); - Ec::init(para.a, para.b); - Ec::setIoMode(16); - Zn::setIoMode(16); -} - -static inline void initSecretKey(SecretKey& sec) -{ - const Ec P(Fp(para.gx), Fp(para.gy)); - sec.init(P, Zn::getBitSize()); -} - -} //mcl::ahe192 -#endif - -#ifdef MCL_USE_AHE256 -namespace ahe256 { - -const mcl::EcParam& para = mcl::ecparam::NIST_P256; - -typedef mcl::FpT Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; -typedef mcl::ElgamalT ElgamalEc; -typedef ElgamalEc::PrivateKey SecretKey; -typedef ElgamalEc::PublicKey PublicKey; -typedef ElgamalEc::CipherText CipherText; - -static inline void initAhe() -{ - Fp::init(para.p); - Zn::init(para.n); - Ec::init(para.a, para.b); - Ec::setIoMode(16); - Zn::setIoMode(16); -} - -static inline void initSecretKey(SecretKey& sec) -{ - const Ec P(Fp(para.gx), Fp(para.gy)); - sec.init(P, Zn::getBitSize()); -} - -} //mcl::ahe256 -#endif - -} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp deleted file mode 100644 index a6d2a8fa3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp +++ /dev/null @@ -1,167 +0,0 @@ -#pragma once -/** - @file - @brief tiny vector class - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#ifndef CYBOZU_DONT_USE_EXCEPTION -#include -#endif - -namespace mcl { - -template -class Array { - T *p_; - size_t n_; - template - void swap_(U& x, U& y) const - { - U t; - t = x; - x = y; - y = t; - } -public: - Array() : p_(0), n_(0) {} - ~Array() - { - free(p_); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - Array(const Array& rhs) - : p_(0) - , n_(0) - { - if (rhs.n_ == 0) return; - p_ = (T*)malloc(sizeof(T) * rhs.n_); - if (p_ == 0) throw std::bad_alloc(); - n_ = rhs.n_; - for (size_t i = 0; i < n_; i++) { - p_[i] = rhs.p_[i]; - } - } - Array& operator=(const Array& rhs) - { - Array tmp(rhs); - tmp.swap(*this); - return *this; - } -#endif - bool resize(size_t n) - { - if (n <= n_) { - n_ = n; - if (n == 0) { - free(p_); - p_ = 0; - } - return true; - } - T *q = (T*)malloc(sizeof(T) * n); - if (q == 0) return false; - for (size_t i = 0; i < n_; i++) { - q[i] = p_[i]; - } - free(p_); - p_ = q; - n_ = n; - return true; - } - bool copy(const Array& rhs) - { - if (this == &rhs) return true; - if (n_ < rhs.n_) { - clear(); - if (!resize(rhs.n_)) return false; - } - for (size_t i = 0; i < rhs.n_; i++) { - p_[i] = rhs.p_[i]; - } - n_ = rhs.n_; - return true; - } - void clear() - { - free(p_); - p_ = 0; - n_ = 0; - } - size_t size() const { return n_; } - void swap(Array& rhs) - { - swap_(p_, rhs.p_); - swap_(n_, rhs.n_); - } - T& operator[](size_t n) { return p_[n]; } - const T& operator[](size_t n) const { return p_[n]; } - T* data() { return p_; } - const T* data() const { return p_; } -}; - -template -class FixedArray { - T p_[maxSize]; - size_t n_; - FixedArray(const FixedArray&); - void operator=(const FixedArray&); - template - void swap_(U& x, U& y) const - { - U t; - t = x; - x = y; - y = t; - } -public: - FixedArray() : n_(0) {} - bool resize(size_t n) - { - if (n > maxSize) return false; - n_ = n; - return true; - } - bool copy(const FixedArray& rhs) - { - if (this == &rhs) return true; - for (size_t i = 0; i < rhs.n_; i++) { - p_[i] = rhs.p_[i]; - } - n_ = rhs.n_; - return true; - } - void clear() - { - n_ = 0; - } - size_t size() const { return n_; } - void swap(FixedArray& rhs) - { - T *minP = p_; - size_t minN = n_; - T *maxP = rhs.p_; - size_t maxN = rhs.n_; - if (minP > maxP) { - swap_(minP, maxP); - swap_(minN, maxN); - } - for (size_t i = 0; i < minN; i++) { - swap_(minP[i], maxP[i]); - } - for (size_t i = minN; i < maxN; i++) { - minP[i] = maxP[i]; - } - swap_(n_, rhs.n_); - } - T& operator[](size_t n) { return p_[n]; } - const T& operator[](size_t n) const { return p_[n]; } - T* data() { return p_; } - const T* data() const { return p_; } -}; - -} // mcl - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp deleted file mode 100644 index 316e142af..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once -/** - @file - @brief preset class for BLS12-381 pairing - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#define MCL_MAX_FP_BIT_SIZE 384 -#define MCL_MAX_FR_BIT_SIZE 256 -#include - -namespace mcl { namespace bls12 { -using namespace mcl::bn; -} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h deleted file mode 100644 index 0a31d5501..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h +++ /dev/null @@ -1,428 +0,0 @@ -#pragma once -/** - @file - @brief C interface of 256/384-bit optimal ate pairing over BN curves - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -/* - the order of an elliptic curve over Fp is Fr -*/ -#ifndef MCLBN_FP_UNIT_SIZE - #error "define MCLBN_FP_UNIT_SIZE 4(, 6 or 8)" -#endif -#ifndef MCLBN_FR_UNIT_SIZE - #define MCLBN_FR_UNIT_SIZE MCLBN_FP_UNIT_SIZE -#endif -#define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE)) - -#include // for uint64_t, uint8_t -#include // for size_t - - -#if defined(_MSC_VER) - #ifdef MCLBN_DONT_EXPORT - #define MCLBN_DLL_API - #else - #ifdef MCLBN_DLL_EXPORT - #define MCLBN_DLL_API __declspec(dllexport) - #else - #define MCLBN_DLL_API __declspec(dllimport) - #endif - #endif - #ifndef MCLBN_NO_AUTOLINK - #if MCLBN_FP_UNIT_SIZE == 4 - #pragma comment(lib, "mclbn256.lib") - #elif MCLBN_FP_UNIT_SIZE == 6 - #pragma comment(lib, "mclbn384.lib") - #else - #pragma comment(lib, "mclbn512.lib") - #endif - #endif -#elif defined(__EMSCRIPTEN__) && !defined(MCLBN_DONT_EXPORT) - #define MCLBN_DLL_API __attribute__((used)) -#elif defined(__wasm__) && !defined(MCLBN_DONT_EXPORT) - #define MCLBN_DLL_API __attribute__((visibility("default"))) -#else - #define MCLBN_DLL_API -#endif - -#ifdef __EMSCRIPTEN__ - // avoid 64-bit integer - #define mclSize unsigned int - #define mclInt int -#else - // use #define for cgo - #define mclSize size_t - #define mclInt int64_t -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef MCLBN_NOT_DEFINE_STRUCT - -typedef struct mclBnFr mclBnFr; -typedef struct mclBnG1 mclBnG1; -typedef struct mclBnG2 mclBnG2; -typedef struct mclBnGT mclBnGT; -typedef struct mclBnFp mclBnFp; -typedef struct mclBnFp2 mclBnFp2; - -#else - -typedef struct { - uint64_t d[MCLBN_FR_UNIT_SIZE]; -} mclBnFr; - -typedef struct { - uint64_t d[MCLBN_FP_UNIT_SIZE * 3]; -} mclBnG1; - -typedef struct { - uint64_t d[MCLBN_FP_UNIT_SIZE * 2 * 3]; -} mclBnG2; - -typedef struct { - uint64_t d[MCLBN_FP_UNIT_SIZE * 12]; -} mclBnGT; - -typedef struct { - uint64_t d[MCLBN_FP_UNIT_SIZE]; -} mclBnFp; - -typedef struct { - mclBnFp d[2]; -} mclBnFp2; - -#endif - -#include - -#define MCLBN_IO_SERIALIZE_HEX_STR 2048 -// for backword compatibility -enum { - mclBn_CurveFp254BNb = 0, - mclBn_CurveFp382_1 = 1, - mclBn_CurveFp382_2 = 2, - mclBn_CurveFp462 = 3, - mclBn_CurveSNARK1 = 4, - mclBls12_CurveFp381 = 5 -}; - -// return 0xABC which means A.BC -MCLBN_DLL_API int mclBn_getVersion(); -/* - init library - @param curve [in] type of bn curve - @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, - which macro is used to make sure that the values - are the same when the library is built and used - @return 0 if success - curve = BN254/BN_SNARK1 is allowed if maxUnitSize = 4 - curve = BN381_1/BN381_2/BLS12_381 are allowed if maxUnitSize = 6 - This parameter is used to detect a library compiled with different MCLBN_FP_UNIT_SIZE for safety. - @note not threadsafe - @note BN_init is used in libeay32 -*/ -MCLBN_DLL_API int mclBn_init(int curve, int compiledTimeVar); - - -/* - pairing : G1 x G2 -> GT - #G1 = #G2 = r - G1 is a curve defined on Fp - - serialized size of elements - |Fr| |Fp| - BN254 32 32 - BN381 48 48 - BLS12_381 32 48 - BN462 58 58 - |G1| = |Fp| - |G2| = |G1| * 2 - |GT| = |G1| * 12 -*/ -/* - return the num of Unit(=uint64_t) to store Fr -*/ -MCLBN_DLL_API int mclBn_getOpUnitSize(void); - -/* - return bytes for serialized G1(=Fp) -*/ -MCLBN_DLL_API int mclBn_getG1ByteSize(void); -/* - return bytes for serialized Fr -*/ -MCLBN_DLL_API int mclBn_getFrByteSize(void); -/* - return bytes for serialized Fp -*/ -MCLBN_DLL_API int mclBn_getFpByteSize(void); - -/* - return decimal string of the order of the curve(=the characteristic of Fr) - return str(buf) if success -*/ -MCLBN_DLL_API mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize); - -/* - return decimal string of the characteristic of Fp - return str(buf) if success -*/ -MCLBN_DLL_API mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize); - -//////////////////////////////////////////////// -/* - deserialize - return read size if success else 0 -*/ -MCLBN_DLL_API mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize); - -/* - serialize - return written byte if sucess else 0 -*/ -MCLBN_DLL_API mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x); -MCLBN_DLL_API mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x); -MCLBN_DLL_API mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x); -MCLBN_DLL_API mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x); -MCLBN_DLL_API mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x); -MCLBN_DLL_API mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x); - -/* - set string - ioMode - 10 : decimal number - 16 : hexadecimal number - MCLBN_IO_SERIALIZE_HEX_STR : hex string of serialized data - return 0 if success else -1 -*/ -MCLBN_DLL_API int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode); -MCLBN_DLL_API int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode); -MCLBN_DLL_API int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode); -MCLBN_DLL_API int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode); -MCLBN_DLL_API int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode); - -/* - buf is terminated by '\0' - return strlen(buf) if sucess else 0 -*/ -MCLBN_DLL_API mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode); -MCLBN_DLL_API mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode); -MCLBN_DLL_API mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode); -MCLBN_DLL_API mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode); -MCLBN_DLL_API mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode); - -// set zero -MCLBN_DLL_API void mclBnFr_clear(mclBnFr *x); -MCLBN_DLL_API void mclBnFp_clear(mclBnFp *x); -MCLBN_DLL_API void mclBnFp2_clear(mclBnFp2 *x); - -// set x to y -MCLBN_DLL_API void mclBnFr_setInt(mclBnFr *y, mclInt x); -MCLBN_DLL_API void mclBnFr_setInt32(mclBnFr *y, int x); - -// x = buf & (1 << bitLen(r)) - 1 -// if (x >= r) x &= (1 << (bitLen(r) - 1)) - 1 -// always return 0 -MCLBN_DLL_API int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize); - -// set (buf mod r) to x -// return 0 if bufSize <= (byte size of Fr * 2) else -1 -MCLBN_DLL_API int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize); -// set (buf mod p) to x -// return 0 if bufSize <= (byte size of Fp * 2) else -1 -MCLBN_DLL_API int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize); - -// return 1 if true and 0 otherwise -MCLBN_DLL_API int mclBnFr_isValid(const mclBnFr *x); -MCLBN_DLL_API int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y); -MCLBN_DLL_API int mclBnFr_isZero(const mclBnFr *x); -MCLBN_DLL_API int mclBnFr_isOne(const mclBnFr *x); - -MCLBN_DLL_API int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y); -MCLBN_DLL_API int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y); - -#ifndef MCL_DONT_USE_CSRPNG -// return 0 if success -MCLBN_DLL_API int mclBnFr_setByCSPRNG(mclBnFr *x); - -/* - set user-defined random function for setByCSPRNG - @param self [in] user-defined pointer - @param readFunc [in] user-defined function, - which writes random bufSize bytes to buf and returns bufSize if success else returns 0 - @note if self == 0 and readFunc == 0 then set default random function - @note not threadsafe -*/ -MCLBN_DLL_API void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)); -#endif - -// hash(s) and set x -// return 0 if success -MCLBN_DLL_API int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize); -MCLBN_DLL_API int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize); - -// map x to y -// return 0 if success else -1 -MCLBN_DLL_API int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x); -MCLBN_DLL_API int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x); - -MCLBN_DLL_API void mclBnFr_neg(mclBnFr *y, const mclBnFr *x); -MCLBN_DLL_API void mclBnFr_inv(mclBnFr *y, const mclBnFr *x); -MCLBN_DLL_API void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x); -MCLBN_DLL_API void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); -MCLBN_DLL_API void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); -MCLBN_DLL_API void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); -MCLBN_DLL_API void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); - -//////////////////////////////////////////////// -// set zero -MCLBN_DLL_API void mclBnG1_clear(mclBnG1 *x); - - -// return 1 if true and 0 otherwise -MCLBN_DLL_API int mclBnG1_isValid(const mclBnG1 *x); -MCLBN_DLL_API int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y); -MCLBN_DLL_API int mclBnG1_isZero(const mclBnG1 *x); -/* - return 1 if x has a correct order - x is valid point of G1 if and only if - mclBnG1_isValid() is true, which contains mclBnG1_isValidOrder() if mclBn_verifyOrderG1(true) - mclBnG1_isValid() && mclBnG1_isValidOrder() is true if mclBn_verifyOrderG1(false) -*/ -MCLBN_DLL_API int mclBnG1_isValidOrder(const mclBnG1 *x); - -MCLBN_DLL_API int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize); - - -MCLBN_DLL_API void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x); -MCLBN_DLL_API void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x); -MCLBN_DLL_API void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x); -MCLBN_DLL_API void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); -MCLBN_DLL_API void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); -MCLBN_DLL_API void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); - -/* - constant time mul -*/ -MCLBN_DLL_API void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); - -//////////////////////////////////////////////// -// set zero -MCLBN_DLL_API void mclBnG2_clear(mclBnG2 *x); - -// return 1 if true and 0 otherwise -MCLBN_DLL_API int mclBnG2_isValid(const mclBnG2 *x); -MCLBN_DLL_API int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y); -MCLBN_DLL_API int mclBnG2_isZero(const mclBnG2 *x); -// return 1 if x has a correct order -MCLBN_DLL_API int mclBnG2_isValidOrder(const mclBnG2 *x); - -MCLBN_DLL_API int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize); - -// return written size if sucess else 0 - -MCLBN_DLL_API void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x); -MCLBN_DLL_API void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x); -MCLBN_DLL_API void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x); -MCLBN_DLL_API void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); -MCLBN_DLL_API void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); -MCLBN_DLL_API void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); -/* - constant time mul -*/ -MCLBN_DLL_API void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); - -//////////////////////////////////////////////// -// set zero -MCLBN_DLL_API void mclBnGT_clear(mclBnGT *x); -// set x to y -MCLBN_DLL_API void mclBnGT_setInt(mclBnGT *y, mclInt x); -MCLBN_DLL_API void mclBnGT_setInt32(mclBnGT *y, int x); - -// return 1 if true and 0 otherwise -MCLBN_DLL_API int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y); -MCLBN_DLL_API int mclBnGT_isZero(const mclBnGT *x); -MCLBN_DLL_API int mclBnGT_isOne(const mclBnGT *x); - -MCLBN_DLL_API void mclBnGT_neg(mclBnGT *y, const mclBnGT *x); -MCLBN_DLL_API void mclBnGT_inv(mclBnGT *y, const mclBnGT *x); -MCLBN_DLL_API void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x); -MCLBN_DLL_API void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); -MCLBN_DLL_API void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); -MCLBN_DLL_API void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); -MCLBN_DLL_API void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); - -/* - pow for all elements of Fp12 -*/ -MCLBN_DLL_API void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); -/* - pow for only {x|x^r = 1} in Fp12 by GLV method - the value generated by pairing satisfies the condition -*/ -MCLBN_DLL_API void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); - -MCLBN_DLL_API void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); -MCLBN_DLL_API void mclBn_finalExp(mclBnGT *y, const mclBnGT *x); -MCLBN_DLL_API void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); - -// return precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t) -MCLBN_DLL_API int mclBn_getUint64NumToPrecompute(void); - -// allocate Qbuf[MCLBN_getUint64NumToPrecompute()] before calling this -MCLBN_DLL_API void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q); - -MCLBN_DLL_API void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf); -MCLBN_DLL_API void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf); -MCLBN_DLL_API void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf); - -/* - Lagrange interpolation - recover out = y(0) by { (xVec[i], yVec[i]) } - return 0 if success else -1 - @note *out = yVec[0] if k = 1 - @note k >= 2, xVec[i] != 0, xVec[i] != xVec[j] for i != j -*/ -MCLBN_DLL_API int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k); -MCLBN_DLL_API int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k); -MCLBN_DLL_API int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k); - -/* - evaluate polynomial - out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) - @note cSize >= 2 -*/ -MCLBN_DLL_API int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x); -MCLBN_DLL_API int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x); -MCLBN_DLL_API int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x); - -/* - verify whether a point of an elliptic curve has order r - This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 - @param doVerify [in] does not verify if zero(default 1) -*/ -MCLBN_DLL_API void mclBn_verifyOrderG1(int doVerify); -MCLBN_DLL_API void mclBn_verifyOrderG2(int doVerify); - -/* - EXPERIMENTAL - only for curve = MCL_SECP* or MCL_NIST* - return standard base point of the current elliptic curve -*/ -MCLBN_DLL_API int mclBnG1_getBasePoint(mclBnG1 *x); - -#ifdef __cplusplus -} -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp deleted file mode 100644 index 5ebe5d956..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp +++ /dev/null @@ -1,2261 +0,0 @@ -#pragma once -/** - @file - @brief optimal ate pairing over BN-curve / BLS12-curve - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include -#ifndef CYBOZU_DONT_USE_EXCEPTION -#include -#endif - -/* - set bit size of Fp and Fr -*/ -#ifndef MCL_MAX_FP_BIT_SIZE - #define MCL_MAX_FP_BIT_SIZE 256 -#endif - -#ifndef MCL_MAX_FR_BIT_SIZE - #define MCL_MAX_FR_BIT_SIZE MCL_MAX_FP_BIT_SIZE -#endif -namespace mcl { - -struct CurveParam { - /* - y^2 = x^3 + b - i^2 = -1 - xi = xi_a + i - v^3 = xi - w^2 = v - */ - const char *z; - int b; // y^2 = x^3 + b - int xi_a; // xi = xi_a + i - /* - BN254, BN381 : Dtype - BLS12-381 : Mtype - */ - bool isMtype; - int curveType; // same in curve_type.h - bool operator==(const CurveParam& rhs) const - { - return strcmp(z, rhs.z) == 0 && b == rhs.b && xi_a == rhs.xi_a && isMtype == rhs.isMtype; - } - bool operator!=(const CurveParam& rhs) const { return !operator==(rhs); } -}; - -const CurveParam BN254 = { "-0x4080000000000001", 2, 1, false, MCL_BN254 }; // -(2^62 + 2^55 + 1) -// provisional(experimental) param with maxBitSize = 384 -const CurveParam BN381_1 = { "-0x400011000000000000000001", 2, 1, false, MCL_BN381_1 }; // -(2^94 + 2^76 + 2^72 + 1) // A Family of Implementation-Friendly BN Elliptic Curves -const CurveParam BN381_2 = { "-0x400040090001000000000001", 2, 1, false, MCL_BN381_2 }; // -(2^94 + 2^78 + 2^67 + 2^64 + 2^48 + 1) // used in relic-toolkit -const CurveParam BN462 = { "0x4001fffffffffffffffffffffbfff", 5, 2, false, MCL_BN462 }; // 2^114 + 2^101 - 2^14 - 1 // https://eprint.iacr.org/2017/334 -const CurveParam BN_SNARK1 = { "4965661367192848881", 3, 9, false, MCL_BN_SNARK1 }; -const CurveParam BLS12_381 = { "-0xd201000000010000", 4, 1, true, MCL_BLS12_381 }; -const CurveParam BN160 = { "0x4000000031", 3, 4, false, MCL_BN160 }; - -inline const CurveParam& getCurveParam(int type) -{ - switch (type) { - case MCL_BN254: return mcl::BN254; - case MCL_BN381_1: return mcl::BN381_1; - case MCL_BN381_2: return mcl::BN381_2; - case MCL_BN462: return mcl::BN462; - case MCL_BN_SNARK1: return mcl::BN_SNARK1; - case MCL_BLS12_381: return mcl::BLS12_381; - case MCL_BN160: return mcl::BN160; - default: - assert(0); - return mcl::BN254; - } -} - -namespace bn { - -namespace local { -struct FpTag; -struct FrTag; -} - -typedef mcl::FpT Fp; -typedef mcl::FpT Fr; -typedef mcl::Fp2T Fp2; -typedef mcl::Fp6T Fp6; -typedef mcl::Fp12T Fp12; -typedef mcl::EcT G1; -typedef mcl::EcT G2; -typedef Fp12 GT; - -typedef mcl::FpDblT FpDbl; -typedef mcl::Fp2DblT Fp2Dbl; - -inline void Frobenius(Fp2& y, const Fp2& x) -{ - Fp2::Frobenius(y, x); -} -inline void Frobenius(Fp12& y, const Fp12& x) -{ - Fp12::Frobenius(y, x); -} -/* - twisted Frobenius for G2 -*/ -void Frobenius(G2& D, const G2& S); -void Frobenius2(G2& D, const G2& S); -void Frobenius3(G2& D, const G2& S); - -namespace local { - -typedef mcl::FixedArray SignVec; - -inline size_t getPrecomputeQcoeffSize(const SignVec& sv) -{ - size_t idx = 2 + 2; - for (size_t i = 2; i < sv.size(); i++) { - idx++; - if (sv[i]) idx++; - } - return idx; -} - -template -X evalPoly(const X& x, const C (&c)[N]) -{ - X ret = c[N - 1]; - for (size_t i = 1; i < N; i++) { - ret *= x; - ret += c[N - 1 - i]; - } - return ret; -} - -enum TwistBtype { - tb_generic, - tb_1m1i, // 1 - 1i - tb_1m2i // 1 - 2i -}; - -/* - l = (a, b, c) => (a, b * P.y, c * P.x) -*/ -inline void updateLine(Fp6& l, const G1& P) -{ - l.b.a *= P.y; - l.b.b *= P.y; - l.c.a *= P.x; - l.c.b *= P.x; -} - -struct Compress { - Fp12& z_; - Fp2& g1_; - Fp2& g2_; - Fp2& g3_; - Fp2& g4_; - Fp2& g5_; - // z is output area - Compress(Fp12& z, const Fp12& x) - : z_(z) - , g1_(z.getFp2()[4]) - , g2_(z.getFp2()[3]) - , g3_(z.getFp2()[2]) - , g4_(z.getFp2()[1]) - , g5_(z.getFp2()[5]) - { - g2_ = x.getFp2()[3]; - g3_ = x.getFp2()[2]; - g4_ = x.getFp2()[1]; - g5_ = x.getFp2()[5]; - } - Compress(Fp12& z, const Compress& c) - : z_(z) - , g1_(z.getFp2()[4]) - , g2_(z.getFp2()[3]) - , g3_(z.getFp2()[2]) - , g4_(z.getFp2()[1]) - , g5_(z.getFp2()[5]) - { - g2_ = c.g2_; - g3_ = c.g3_; - g4_ = c.g4_; - g5_ = c.g5_; - } - void decompressBeforeInv(Fp2& nume, Fp2& denomi) const - { - assert(&nume != &denomi); - - if (g2_.isZero()) { - Fp2::add(nume, g4_, g4_); - nume *= g5_; - denomi = g3_; - } else { - Fp2 t; - Fp2::sqr(nume, g5_); - Fp2::mul_xi(denomi, nume); - Fp2::sqr(nume, g4_); - Fp2::sub(t, nume, g3_); - t += t; - t += nume; - Fp2::add(nume, denomi, t); - Fp2::divBy4(nume, nume); - denomi = g2_; - } - } - - // output to z - void decompressAfterInv() - { - Fp2& g0 = z_.getFp2()[0]; - Fp2 t0, t1; - // Compute g0. - Fp2::sqr(t0, g1_); - Fp2::mul(t1, g3_, g4_); - t0 -= t1; - t0 += t0; - t0 -= t1; - Fp2::mul(t1, g2_, g5_); - t0 += t1; - Fp2::mul_xi(g0, t0); - g0.a += Fp::one(); - } - -public: - void decompress() // for test - { - Fp2 nume, denomi; - decompressBeforeInv(nume, denomi); - Fp2::inv(denomi, denomi); - g1_ = nume * denomi; // g1 is recoverd. - decompressAfterInv(); - } - /* - 2275clk * 186 = 423Kclk QQQ - */ - static void squareC(Compress& z) - { - Fp2 t0, t1, t2; - Fp2Dbl T0, T1, T2, T3; - Fp2Dbl::sqrPre(T0, z.g4_); - Fp2Dbl::sqrPre(T1, z.g5_); - Fp2Dbl::mul_xi(T2, T1); - T2 += T0; - Fp2Dbl::mod(t2, T2); - Fp2::add(t0, z.g4_, z.g5_); - Fp2Dbl::sqrPre(T2, t0); - T0 += T1; - T2 -= T0; - Fp2Dbl::mod(t0, T2); - Fp2::add(t1, z.g2_, z.g3_); - Fp2Dbl::sqrPre(T3, t1); - Fp2Dbl::sqrPre(T2, z.g2_); - Fp2::mul_xi(t1, t0); - z.g2_ += t1; - z.g2_ += z.g2_; - z.g2_ += t1; - Fp2::sub(t1, t2, z.g3_); - t1 += t1; - Fp2Dbl::sqrPre(T1, z.g3_); - Fp2::add(z.g3_, t1, t2); - Fp2Dbl::mul_xi(T0, T1); - T0 += T2; - Fp2Dbl::mod(t0, T0); - Fp2::sub(z.g4_, t0, z.g4_); - z.g4_ += z.g4_; - z.g4_ += t0; - Fp2Dbl::addPre(T2, T2, T1); - T3 -= T2; - Fp2Dbl::mod(t0, T3); - z.g5_ += t0; - z.g5_ += z.g5_; - z.g5_ += t0; - } - static void square_n(Compress& z, int n) - { - for (int i = 0; i < n; i++) { - squareC(z); - } - } - /* - Exponentiation over compression for: - z = x^Param::z.abs() - */ - static void fixed_power(Fp12& z, const Fp12& x) - { - if (x.isOne()) { - z = 1; - return; - } - Fp12 x_org = x; - Fp12 d62; - Fp2 c55nume, c55denomi, c62nume, c62denomi; - Compress c55(z, x); - square_n(c55, 55); - c55.decompressBeforeInv(c55nume, c55denomi); - Compress c62(d62, c55); - square_n(c62, 62 - 55); - c62.decompressBeforeInv(c62nume, c62denomi); - Fp2 acc; - Fp2::mul(acc, c55denomi, c62denomi); - Fp2::inv(acc, acc); - Fp2 t; - Fp2::mul(t, acc, c62denomi); - Fp2::mul(c55.g1_, c55nume, t); - c55.decompressAfterInv(); - Fp2::mul(t, acc, c55denomi); - Fp2::mul(c62.g1_, c62nume, t); - c62.decompressAfterInv(); - z *= x_org; - z *= d62; - } -}; - -struct MapTo { - enum { - BNtype, - BLS12type, - STD_ECtype - }; - Fp c1_; // sqrt(-3) - Fp c2_; // (-1 + sqrt(-3)) / 2 - mpz_class z_; - mpz_class cofactor_; - int type_; - bool useNaiveMapTo_; - - int legendre(bool *pb, const Fp& x) const - { - mpz_class xx; - x.getMpz(pb, xx); - if (!*pb) return 0; - return gmp::legendre(xx, Fp::getOp().mp); - } - int legendre(bool *pb, const Fp2& x) const - { - Fp y; - Fp2::norm(y, x); - return legendre(pb, y); - } - void mulFp(Fp& x, const Fp& y) const - { - x *= y; - } - void mulFp(Fp2& x, const Fp& y) const - { - x.a *= y; - x.b *= y; - } - /* - P.-A. Fouque and M. Tibouchi, - "Indifferentiable hashing to Barreto Naehrig curves," - in Proc. Int. Conf. Cryptol. Inform. Security Latin Amer., 2012, vol. 7533, pp.1-17. - - w = sqrt(-3) t / (1 + b + t^2) - Remark: throw exception if t = 0, c1, -c1 and b = 2 - */ - template - bool calcBN(G& P, const F& t) const - { - F x, y, w; - bool b; - bool negative = legendre(&b, t) < 0; - if (!b) return false; - if (t.isZero()) return false; - F::sqr(w, t); - w += G::b_; - *w.getFp0() += Fp::one(); - if (w.isZero()) return false; - F::inv(w, w); - mulFp(w, c1_); - w *= t; - for (int i = 0; i < 3; i++) { - switch (i) { - case 0: F::mul(x, t, w); F::neg(x, x); *x.getFp0() += c2_; break; - case 1: F::neg(x, x); *x.getFp0() -= Fp::one(); break; - case 2: F::sqr(x, w); F::inv(x, x); *x.getFp0() += Fp::one(); break; - } - G::getWeierstrass(y, x); - if (F::squareRoot(y, y)) { - if (negative) F::neg(y, y); - P.set(&b, x, y, false); - assert(b); - return true; - } - } - return false; - } - /* - Faster Hashing to G2 - Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez - section 6.1 - for BN - Q = zP + Frob(3zP) + Frob^2(zP) + Frob^3(P) - = -(18x^3 + 12x^2 + 3x + 1)cofactor_ P - */ - void mulByCofactorBN(G2& Q, const G2& P) const - { -#if 0 - G2::mulGeneric(Q, P, cofactor_); -#else -#if 0 - mpz_class t = -(1 + z_ * (3 + z_ * (12 + z_ * 18))); - G2::mulGeneric(Q, P, t * cofactor_); -#else - G2 T0, T1, T2; - /* - G2::mul (GLV method) can't be used because P is not on G2 - */ - G2::mulGeneric(T0, P, z_); - G2::dbl(T1, T0); - T1 += T0; // 3zP - Frobenius(T1, T1); - Frobenius2(T2, T0); - T0 += T1; - T0 += T2; - Frobenius3(T2, P); - G2::add(Q, T0, T2); -#endif -#endif - } - /* - 1.2~1.4 times faster than calBN - */ - template - void naiveMapTo(G& P, const F& t) const - { - F x = t; - for (;;) { - F y; - G::getWeierstrass(y, x); - if (F::squareRoot(y, y)) { - bool b; - P.set(&b, x, y, false); - assert(b); - return; - } - *x.getFp0() += Fp::one(); - } - } - /* - #(Fp) / r = (z + 1 - t) / r = (z - 1)^2 / 3 - */ - void mulByCofactorBLS12(G1& Q, const G1& P) const - { - G1::mulGeneric(Q, P, cofactor_); - } - /* - Efficient hash maps to G2 on BLS curves - Alessandro Budroni, Federico Pintore - Q = (z(z-1)-1)P + Frob((z-1)P) + Frob^2(2P) - */ - void mulByCofactorBLS12(G2& Q, const G2& P) const - { - G2 T0, T1; - G2::mulGeneric(T0, P, z_ - 1); - G2::mulGeneric(T1, T0, z_); - T1 -= P; - Frobenius(T0, T0); - T0 += T1; - G2::dbl(T1, P); - Frobenius2(T1, T1); - G2::add(Q, T0, T1); - } - /* - cofactor_ is for G2(not used now) - */ - void initBN(const mpz_class& cofactor, const mpz_class &z, int curveType) - { - z_ = z; - cofactor_ = cofactor; - if (curveType == MCL_BN254) { - const char *c1 = "252364824000000126cd890000000003cf0f0000000000060c00000000000004"; - const char *c2 = "25236482400000017080eb4000000006181800000000000cd98000000000000b"; - bool b; - c1_.setStr(&b, c1, 16); - c2_.setStr(&b, c2, 16); - (void)b; - return; - } - bool b = Fp::squareRoot(c1_, -3); - assert(b); - (void)b; - c2_ = (c1_ - 1) / 2; - } - void initBLS12(const mpz_class& z) - { - z_ = z; - // cofactor for G1 - cofactor_ = (z - 1) * (z - 1) / 3; - bool b = Fp::squareRoot(c1_, -3); - assert(b); - (void)b; - c2_ = (c1_ - 1) / 2; - } - /* - if type == STD_ECtype, then cofactor, z are not used. - */ - void init(const mpz_class& cofactor, const mpz_class &z, int curveType) - { - if (0 <= curveType && curveType < MCL_EC_BEGIN) { - type_ = curveType == MCL_BLS12_381 ? BLS12type : BNtype; - } else { - type_ = STD_ECtype; - } - if (type_ == STD_ECtype) { - useNaiveMapTo_ = true; - } else { - useNaiveMapTo_ = false; - } -#ifdef MCL_USE_OLD_MAPTO_FOR_BLS12 - if (type == BLS12type) useNaiveMapTo_ = true; -#endif - if (type_ == BNtype) { - initBN(cofactor, z, curveType); - } else if (type_ == BLS12type) { - initBLS12(z); - } - } - bool calcG1(G1& P, const Fp& t) const - { - if (useNaiveMapTo_) { - naiveMapTo(P, t); - } else { - if (!calcBN(P, t)) return false; - } - switch (type_) { - case BNtype: - // no subgroup - break; - case BLS12type: - mulByCofactorBLS12(P, P); - break; - } - assert(P.isValid()); - return true; - } - /* - get the element in G2 by multiplying the cofactor - */ - bool calcG2(G2& P, const Fp2& t) const - { - if (useNaiveMapTo_) { - naiveMapTo(P, t); - } else { - if (!calcBN(P, t)) return false; - } - switch(type_) { - case BNtype: - mulByCofactorBN(P, P); - break; - case BLS12type: - mulByCofactorBLS12(P, P); - break; - } - assert(P.isValid()); - return true; - } -}; - -/* - Software implementation of Attribute-Based Encryption: Appendixes - GLV for G1 on BN/BLS12 -*/ -struct GLV1 { - Fp rw; // rw = 1 / w = (-1 - sqrt(-3)) / 2 - size_t rBitSize; - mpz_class v0, v1; - mpz_class B[2][2]; - mpz_class r; -private: - bool usePrecomputedTable(int curveType) - { - if (curveType < 0) return false; - const struct Tbl { - int curveType; - const char *rw; - size_t rBitSize; - const char *v0, *v1; - const char *B[2][2]; - const char *r; - } tbl[] = { - { - MCL_BN254, - "49b36240000000024909000000000006cd80000000000007", - 256, - "2a01fab7e04a017b9c0eb31ff36bf3357", - "37937ca688a6b4904", - { - { - "61818000000000028500000000000004", - "8100000000000001", - }, - { - "8100000000000001", - "-61818000000000020400000000000003", - }, - }, - "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (tbl[i].curveType != curveType) continue; - bool b; - rw.setStr(&b, tbl[i].rw, 16); if (!b) continue; - rBitSize = tbl[i].rBitSize; - mcl::gmp::setStr(&b, v0, tbl[i].v0, 16); if (!b) continue; - mcl::gmp::setStr(&b, v1, tbl[i].v1, 16); if (!b) continue; - mcl::gmp::setStr(&b, B[0][0], tbl[i].B[0][0], 16); if (!b) continue; - mcl::gmp::setStr(&b, B[0][1], tbl[i].B[0][1], 16); if (!b) continue; - mcl::gmp::setStr(&b, B[1][0], tbl[i].B[1][0], 16); if (!b) continue; - mcl::gmp::setStr(&b, B[1][1], tbl[i].B[1][1], 16); if (!b) continue; - mcl::gmp::setStr(&b, r, tbl[i].r, 16); if (!b) continue; - return true; - } - return false; - } -public: - bool operator==(const GLV1& rhs) const - { - return rw == rhs.rw && rBitSize == rhs.rBitSize && v0 == rhs.v0 && v1 == rhs.v1 - && B[0][0] == rhs.B[0][0] && B[0][1] == rhs.B[0][1] && B[1][0] == rhs.B[1][0] - && B[1][1] == rhs.B[1][1] && r == rhs.r; - } - bool operator!=(const GLV1& rhs) const { return !operator==(rhs); } -#ifndef CYBOZU_DONT_USE_STRING - void dump(const mpz_class& x) const - { - printf("\"%s\",\n", mcl::gmp::getStr(x, 16).c_str()); - } - void dump() const - { - printf("\"%s\",\n", rw.getStr(16).c_str()); - printf("%d,\n", (int)rBitSize); - dump(v0); - dump(v1); - dump(B[0][0]); dump(B[0][1]); dump(B[1][0]); dump(B[1][1]); - dump(r); - } -#endif - void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false, int curveType = -1) - { - if (usePrecomputedTable(curveType)) return; - bool b = Fp::squareRoot(rw, -3); - assert(b); - (void)b; - rw = -(rw + 1) / 2; - this->r = r; - rBitSize = gmp::getBitSize(r); - rBitSize = (rBitSize + fp::UnitBitSize - 1) & ~(fp::UnitBitSize - 1);// a little better size - if (isBLS12) { - /* - BLS12 - L = z^4 - (-z^2+1) + L = 0 - 1 + z^2 L = 0 - */ - B[0][0] = -z * z + 1; - B[0][1] = 1; - B[1][0] = 1; - B[1][1] = z * z; - } else { - /* - BN - L = 36z^4 - 1 - (6z^2+2z) - (2z+1) L = 0 - (-2z-1) - (6z^2+4z+1)L = 0 - */ - B[0][0] = 6 * z * z + 2 * z; - B[0][1] = -2 * z - 1; - B[1][0] = -2 * z - 1; - B[1][1] = -6 * z * z - 4 * z - 1; - } - // [v0 v1] = [r 0] * B^(-1) - v0 = ((-B[1][1]) << rBitSize) / r; - v1 = ((B[1][0]) << rBitSize) / r; - } - /* - L = lambda = p^4 - L (x, y) = (rw x, y) - */ - void mulLambda(G1& Q, const G1& P) const - { - Fp::mul(Q.x, P.x, rw); - Q.y = P.y; - Q.z = P.z; - } - /* - x = a + b * lambda mod r - */ - void split(mpz_class& a, mpz_class& b, const mpz_class& x) const - { - mpz_class t; - t = (x * v0) >> rBitSize; - b = (x * v1) >> rBitSize; - a = x - (t * B[0][0] + b * B[1][0]); - b = - (t * B[0][1] + b * B[1][1]); - } - void mul(G1& Q, const G1& P, mpz_class x, bool constTime = false) const - { - typedef mcl::fp::Unit Unit; - const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; - const int splitN = 2; - mpz_class u[splitN]; - G1 in[splitN]; - G1 tbl[4]; - int bitTbl[splitN]; // bit size of u[i] - Unit w[splitN][maxUnit]; // unit array of u[i] - int maxBit = 0; // max bit of u[i] - int maxN = 0; - int remainBit = 0; - - x %= r; - if (x == 0) { - Q.clear(); - if (constTime) goto DummyLoop; - return; - } - if (x < 0) { - x += r; - } - split(u[0], u[1], x); - in[0] = P; - mulLambda(in[1], in[0]); - for (int i = 0; i < splitN; i++) { - if (u[i] < 0) { - u[i] = -u[i]; - G1::neg(in[i], in[i]); - } - in[i].normalize(); - } -#if 0 - G1::mulGeneric(in[0], in[0], u[0]); - G1::mulGeneric(in[1], in[1], u[1]); - G1::add(Q, in[0], in[1]); - return; -#else - tbl[0] = in[0]; // dummy - tbl[1] = in[0]; - tbl[2] = in[1]; - G1::add(tbl[3], in[0], in[1]); - tbl[3].normalize(); - for (int i = 0; i < splitN; i++) { - bool b; - mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); - assert(b); - bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); - maxBit = fp::max_(maxBit, bitTbl[i]); - } - assert(maxBit > 0); - maxBit--; - /* - maxBit = maxN * UnitBitSize + remainBit - 0 < remainBit <= UnitBitSize - */ - maxN = maxBit / mcl::fp::UnitBitSize; - remainBit = maxBit % mcl::fp::UnitBitSize; - remainBit++; - Q.clear(); - for (int i = maxN; i >= 0; i--) { - for (int j = remainBit - 1; j >= 0; j--) { - G1::dbl(Q, Q); - uint32_t b0 = (w[0][i] >> j) & 1; - uint32_t b1 = (w[1][i] >> j) & 1; - uint32_t c = b1 * 2 + b0; - if (c == 0) { - if (constTime) tbl[0] += tbl[1]; - } else { - Q += tbl[c]; - } - } - remainBit = (int)mcl::fp::UnitBitSize; - } -#endif - DummyLoop: - if (!constTime) return; - const int limitBit = (int)rBitSize / splitN; - G1 D = tbl[0]; - for (int i = maxBit + 1; i < limitBit; i++) { - G1::dbl(D, D); - D += tbl[0]; - } - } -}; - -/* - GLV method for G2 and GT on BN/BLS12 -*/ -struct GLV2 { - size_t rBitSize; - mpz_class B[4][4]; - mpz_class r; - mpz_class v[4]; - mpz_class z; - mpz_class abs_z; - bool isBLS12; - GLV2() : rBitSize(0), isBLS12(false) {} - void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false) - { - this->r = r; - this->z = z; - this->abs_z = z < 0 ? -z : z; - this->isBLS12 = isBLS12; - rBitSize = mcl::gmp::getBitSize(r); - rBitSize = (rBitSize + mcl::fp::UnitBitSize - 1) & ~(mcl::fp::UnitBitSize - 1);// a little better size - mpz_class z2p1 = z * 2 + 1; - B[0][0] = z + 1; - B[0][1] = z; - B[0][2] = z; - B[0][3] = -2 * z; - B[1][0] = z2p1; - B[1][1] = -z; - B[1][2] = -(z + 1); - B[1][3] = -z; - B[2][0] = 2 * z; - B[2][1] = z2p1; - B[2][2] = z2p1; - B[2][3] = z2p1; - B[3][0] = z - 1; - B[3][1] = 2 * z2p1; - B[3][2] = -2 * z + 1; - B[3][3] = z - 1; - /* - v[] = [r 0 0 0] * B^(-1) = [2z^2+3z+1, 12z^3+8z^2+z, 6z^3+4z^2+z, -(2z+1)] - */ - const char *zBN254 = "-4080000000000001"; - mpz_class t; - bool b; - mcl::gmp::setStr(&b, t, zBN254, 16); - assert(b); - (void)b; - if (z == t) { - static const char *vTblBN254[] = { - "e00a8e7f56e007e5b09fe7fdf43ba998", - "-152aff56a8054abf9da75db2da3d6885101e5fd3997d41cb1", - "-a957fab5402a55fced3aed96d1eb44295f40f136ee84e09b", - "-e00a8e7f56e007e929d7b2667ea6f29c", - }; - for (int i = 0; i < 4; i++) { - mcl::gmp::setStr(&b, v[i], vTblBN254[i], 16); - assert(b); - (void)b; - } - } else { - v[0] = ((1 + z * (3 + z * 2)) << rBitSize) / r; - v[1] = ((z * (1 + z * (8 + z * 12))) << rBitSize) / r; - v[2] = ((z * (1 + z * (4 + z * 6))) << rBitSize) / r; - v[3] = -((z * (1 + z * 2)) << rBitSize) / r; - } - } - /* - u[] = [x, 0, 0, 0] - v[] * x * B - */ - void split(mpz_class u[4], const mpz_class& x) const - { - if (isBLS12) { - /* - Frob(P) = zP - x = u[0] + u[1] z + u[2] z^2 + u[3] z^3 - */ - bool isNeg = false; - mpz_class t = x; - if (t < 0) { - t = -t; - isNeg = true; - } - for (int i = 0; i < 4; i++) { - // t = t / abs_z, u[i] = t % abs_z - mcl::gmp::divmod(t, u[i], t, abs_z); - if (((z < 0) && (i & 1)) ^ isNeg) { - u[i] = -u[i]; - } - } - return; - } - // BN - mpz_class t[4]; - for (int i = 0; i < 4; i++) { - t[i] = (x * v[i]) >> rBitSize; - } - for (int i = 0; i < 4; i++) { - u[i] = (i == 0) ? x : 0; - for (int j = 0; j < 4; j++) { - u[i] -= t[j] * B[j][i]; - } - } - } - template - void mul(T& Q, const T& P, mpz_class x, bool constTime = false) const - { -#if 0 // #ifndef NDEBUG - { - T R; - T::mulGeneric(R, P, r); - assert(R.isZero()); - } -#endif - typedef mcl::fp::Unit Unit; - const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; - const int splitN = 4; - mpz_class u[splitN]; - T in[splitN]; - T tbl[16]; - int bitTbl[splitN]; // bit size of u[i] - Unit w[splitN][maxUnit]; // unit array of u[i] - int maxBit = 0; // max bit of u[i] - int maxN = 0; - int remainBit = 0; - - x %= r; - if (x == 0) { - Q.clear(); - if (constTime) goto DummyLoop; - return; - } - if (x < 0) { - x += r; - } - split(u, x); - in[0] = P; - Frobenius(in[1], in[0]); - Frobenius(in[2], in[1]); - Frobenius(in[3], in[2]); - for (int i = 0; i < splitN; i++) { - if (u[i] < 0) { - u[i] = -u[i]; - T::neg(in[i], in[i]); - } -// in[i].normalize(); // slow - } -#if 0 - for (int i = 0; i < splitN; i++) { - T::mulGeneric(in[i], in[i], u[i]); - } - T::add(Q, in[0], in[1]); - Q += in[2]; - Q += in[3]; - return; -#else - tbl[0] = in[0]; - for (size_t i = 1; i < 16; i++) { - tbl[i].clear(); - if (i & 1) { - tbl[i] += in[0]; - } - if (i & 2) { - tbl[i] += in[1]; - } - if (i & 4) { - tbl[i] += in[2]; - } - if (i & 8) { - tbl[i] += in[3]; - } -// tbl[i].normalize(); - } - for (int i = 0; i < splitN; i++) { - bool b; - mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); - assert(b); - bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); - maxBit = fp::max_(maxBit, bitTbl[i]); - } - maxBit--; - /* - maxBit = maxN * UnitBitSize + remainBit - 0 < remainBit <= UnitBitSize - */ - maxN = maxBit / mcl::fp::UnitBitSize; - remainBit = maxBit % mcl::fp::UnitBitSize; - remainBit++; - Q.clear(); - for (int i = maxN; i >= 0; i--) { - for (int j = remainBit - 1; j >= 0; j--) { - T::dbl(Q, Q); - uint32_t b0 = (w[0][i] >> j) & 1; - uint32_t b1 = (w[1][i] >> j) & 1; - uint32_t b2 = (w[2][i] >> j) & 1; - uint32_t b3 = (w[3][i] >> j) & 1; - uint32_t c = b3 * 8 + b2 * 4 + b1 * 2 + b0; - if (c == 0) { - if (constTime) tbl[0] += tbl[1]; - } else { - Q += tbl[c]; - } - } - remainBit = (int)mcl::fp::UnitBitSize; - } -#endif - DummyLoop: - if (!constTime) return; - const int limitBit = (int)rBitSize / splitN; - T D = tbl[0]; - for (int i = maxBit + 1; i < limitBit; i++) { - T::dbl(D, D); - D += tbl[0]; - } - } - void pow(Fp12& z, const Fp12& x, mpz_class y, bool constTime = false) const - { - typedef GroupMtoA AG; // as additive group - AG& _z = static_cast(z); - const AG& _x = static_cast(x); - mul(_z, _x, y, constTime); - } -}; - -struct Param { - CurveParam cp; - mpz_class z; - mpz_class abs_z; - bool isNegative; - bool isBLS12; - mpz_class p; - mpz_class r; - local::MapTo mapTo; - local::GLV1 glv1; - local::GLV2 glv2; - // for G2 Frobenius - Fp2 g2; - Fp2 g3; - /* - Dtype twist - (x', y') = phi(x, y) = (x/w^2, y/w^3) - y^2 = x^3 + b - => (y'w^3)^2 = (x'w^2)^3 + b - => y'^2 = x'^3 + b / w^6 ; w^6 = xi - => y'^2 = x'^3 + twist_b; - */ - Fp2 twist_b; - local::TwistBtype twist_b_type; -/* - mpz_class exp_c0; - mpz_class exp_c1; - mpz_class exp_c2; - mpz_class exp_c3; -*/ - - // Loop parameter for the Miller loop part of opt. ate pairing. - local::SignVec siTbl; - size_t precomputedQcoeffSize; - bool useNAF; - local::SignVec zReplTbl; - - // for initG1only - G1 basePoint; - - void init(bool *pb, const mcl::CurveParam& cp, fp::Mode mode) - { - this->cp = cp; - isBLS12 = cp.curveType == MCL_BLS12_381; - gmp::setStr(pb, z, cp.z); - if (!*pb) return; - isNegative = z < 0; - if (isNegative) { - abs_z = -z; - } else { - abs_z = z; - } - if (isBLS12) { - mpz_class z2 = z * z; - mpz_class z4 = z2 * z2; - r = z4 - z2 + 1; - p = z - 1; - p = p * p * r / 3 + z; - } else { - const int pCoff[] = { 1, 6, 24, 36, 36 }; - const int rCoff[] = { 1, 6, 18, 36, 36 }; - p = local::evalPoly(z, pCoff); - assert((p % 6) == 1); - r = local::evalPoly(z, rCoff); - } - Fr::init(pb, r, mode); - if (!*pb) return; - Fp::init(pb, cp.xi_a, p, mode); - if (!*pb) return; - Fp2::init(); - const Fp2 xi(cp.xi_a, 1); - g2 = Fp2::get_gTbl()[0]; - g3 = Fp2::get_gTbl()[3]; - if (cp.isMtype) { - Fp2::inv(g2, g2); - Fp2::inv(g3, g3); - } - if (cp.isMtype) { - twist_b = Fp2(cp.b) * xi; - } else { - if (cp.b == 2 && cp.xi_a == 1) { - twist_b = Fp2(1, -1); // shortcut - } else { - twist_b = Fp2(cp.b) / xi; - } - } - if (twist_b == Fp2(1, -1)) { - twist_b_type = tb_1m1i; - } else if (twist_b == Fp2(1, -2)) { - twist_b_type = tb_1m2i; - } else { - twist_b_type = tb_generic; - } - G1::init(0, cp.b, mcl::ec::Proj); - if (isBLS12) { - G1::setOrder(r); - } - G2::init(0, twist_b, mcl::ec::Proj); - G2::setOrder(r); - - const mpz_class largest_c = isBLS12 ? abs_z : gmp::abs(z * 6 + 2); - useNAF = gmp::getNAF(siTbl, largest_c); - precomputedQcoeffSize = local::getPrecomputeQcoeffSize(siTbl); - gmp::getNAF(zReplTbl, gmp::abs(z)); -/* - if (isBLS12) { - mpz_class z2 = z * z; - mpz_class z3 = z2 * z; - mpz_class z4 = z3 * z; - mpz_class z5 = z4 * z; - exp_c0 = z5 - 2 * z4 + 2 * z2 - z + 3; - exp_c1 = z4 - 2 * z3 + 2 * z - 1; - exp_c2 = z3 - 2 * z2 + z; - exp_c3 = z2 - 2 * z + 1; - } else { - exp_c0 = -2 + z * (-18 + z * (-30 - 36 * z)); - exp_c1 = 1 + z * (-12 + z * (-18 - 36 * z)); - exp_c2 = 6 * z * z + 1; - } -*/ - if (isBLS12) { - mapTo.init(0, z, cp.curveType); - } else { - mapTo.init(2 * p - r, z, cp.curveType); - } - glv1.init(r, z, isBLS12, cp.curveType); - glv2.init(r, z, isBLS12); - basePoint.clear(); - *pb = true; - } - void initG1only(bool *pb, const mcl::EcParam& para) - { - Fp::init(pb, para.p); - if (!*pb) return; - Fr::init(pb, para.n); - if (!*pb) return; - G1::init(pb, para.a, para.b); - if (!*pb) return; - G1::setOrder(Fr::getOp().mp); - mapTo.init(0, 0, para.curveType); - Fp x0, y0; - x0.setStr(pb, para.gx); - if (!*pb) return; - y0.setStr(pb, para.gy); - basePoint.set(pb, x0, y0); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void init(const mcl::CurveParam& cp, fp::Mode mode) - { - bool b; - init(&b, cp, mode); - if (!b) throw cybozu::Exception("Param:init"); - } -#endif -}; - -template -struct StaticVar { - static local::Param param; -}; - -template -local::Param StaticVar::param; - -} // mcl::bn::local - -namespace BN { - -static const local::Param& param = local::StaticVar<>::param; - -} // mcl::bn::BN - -namespace local { - -inline void mulArrayGLV1(G1& z, const G1& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) -{ - mpz_class s; - bool b; - mcl::gmp::setArray(&b, s, y, yn); - assert(b); - if (isNegative) s = -s; - BN::param.glv1.mul(z, x, s, constTime); -} -inline void mulArrayGLV2(G2& z, const G2& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) -{ - mpz_class s; - bool b; - mcl::gmp::setArray(&b, s, y, yn); - assert(b); - if (isNegative) s = -s; - BN::param.glv2.mul(z, x, s, constTime); -} -inline void powArrayGLV2(Fp12& z, const Fp12& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) -{ - mpz_class s; - bool b; - mcl::gmp::setArray(&b, s, y, yn); - assert(b); - if (isNegative) s = -s; - BN::param.glv2.pow(z, x, s, constTime); -} - -/* - Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions - Robert Granger, Michael Scott -*/ -inline void sqrFp4(Fp2& z0, Fp2& z1, const Fp2& x0, const Fp2& x1) -{ -#if 1 - Fp2Dbl T0, T1, T2; - Fp2Dbl::sqrPre(T0, x0); - Fp2Dbl::sqrPre(T1, x1); - Fp2Dbl::mul_xi(T2, T1); - Fp2Dbl::add(T2, T2, T0); - Fp2::add(z1, x0, x1); - Fp2Dbl::mod(z0, T2); - Fp2Dbl::sqrPre(T2, z1); - Fp2Dbl::sub(T2, T2, T0); - Fp2Dbl::sub(T2, T2, T1); - Fp2Dbl::mod(z1, T2); -#else - Fp2 t0, t1, t2; - Fp2::sqr(t0, x0); - Fp2::sqr(t1, x1); - Fp2::mul_xi(z0, t1); - z0 += t0; - Fp2::add(z1, x0, x1); - Fp2::sqr(z1, z1); - z1 -= t0; - z1 -= t1; -#endif -} - -inline void fasterSqr(Fp12& y, const Fp12& x) -{ -#if 0 - Fp12::sqr(y, x); -#else - const Fp2& x0(x.a.a); - const Fp2& x4(x.a.b); - const Fp2& x3(x.a.c); - const Fp2& x2(x.b.a); - const Fp2& x1(x.b.b); - const Fp2& x5(x.b.c); - Fp2& y0(y.a.a); - Fp2& y4(y.a.b); - Fp2& y3(y.a.c); - Fp2& y2(y.b.a); - Fp2& y1(y.b.b); - Fp2& y5(y.b.c); - Fp2 t0, t1; - sqrFp4(t0, t1, x0, x1); - Fp2::sub(y0, t0, x0); - y0 += y0; - y0 += t0; - Fp2::add(y1, t1, x1); - y1 += y1; - y1 += t1; - Fp2 t2, t3; - sqrFp4(t0, t1, x2, x3); - sqrFp4(t2, t3, x4, x5); - Fp2::sub(y4, t0, x4); - y4 += y4; - y4 += t0; - Fp2::add(y5, t1, x5); - y5 += y5; - y5 += t1; - Fp2::mul_xi(t0, t3); - Fp2::add(y2, t0, x2); - y2 += y2; - y2 += t0; - Fp2::sub(y3, t2, x3); - y3 += y3; - y3 += t2; -#endif -} - -/* - y = x^z if z > 0 - = unitaryInv(x^(-z)) if z < 0 -*/ -inline void pow_z(Fp12& y, const Fp12& x) -{ -#if 1 - if (BN::param.cp.curveType == MCL_BN254) { - Compress::fixed_power(y, x); - } else { - Fp12 orgX = x; - y = x; - Fp12 conj; - conj.a = x.a; - Fp6::neg(conj.b, x.b); - for (size_t i = 1; i < BN::param.zReplTbl.size(); i++) { - fasterSqr(y, y); - if (BN::param.zReplTbl[i] > 0) { - y *= orgX; - } else if (BN::param.zReplTbl[i] < 0) { - y *= conj; - } - } - } -#else - Fp12::pow(y, x, param.abs_z); -#endif - if (BN::param.isNegative) { - Fp12::unitaryInv(y, y); - } -} -inline void mul_twist_b(Fp2& y, const Fp2& x) -{ - switch (BN::param.twist_b_type) { - case local::tb_1m1i: - /* - b / xi = 1 - 1i - (a + bi)(1 - 1i) = (a + b) + (b - a)i - */ - { - Fp t; - Fp::add(t, x.a, x.b); - Fp::sub(y.b, x.b, x.a); - y.a = t; - } - return; - case local::tb_1m2i: - /* - b / xi = 1 - 2i - (a + bi)(1 - 2i) = (a + 2b) + (b - 2a)i - */ - { - Fp t; - Fp::sub(t, x.b, x.a); - t -= x.a; - Fp::add(y.a, x.a, x.b); - y.a += x.b; - y.b = t; - } - return; - case local::tb_generic: - Fp2::mul(y, x, BN::param.twist_b); - return; - } -} - -inline void dblLineWithoutP(Fp6& l, G2& Q) -{ - Fp2 t0, t1, t2, t3, t4, t5; - Fp2Dbl T0, T1; - Fp2::sqr(t0, Q.z); - Fp2::mul(t4, Q.x, Q.y); - Fp2::sqr(t1, Q.y); - Fp2::add(t3, t0, t0); - Fp2::divBy2(t4, t4); - Fp2::add(t5, t0, t1); - t0 += t3; - mul_twist_b(t2, t0); - Fp2::sqr(t0, Q.x); - Fp2::add(t3, t2, t2); - t3 += t2; - Fp2::sub(Q.x, t1, t3); - t3 += t1; - Q.x *= t4; - Fp2::divBy2(t3, t3); - Fp2Dbl::sqrPre(T0, t3); - Fp2Dbl::sqrPre(T1, t2); - Fp2Dbl::sub(T0, T0, T1); - Fp2Dbl::add(T1, T1, T1); - Fp2Dbl::sub(T0, T0, T1); - Fp2::add(t3, Q.y, Q.z); - Fp2Dbl::mod(Q.y, T0); - Fp2::sqr(t3, t3); - t3 -= t5; - Fp2::mul(Q.z, t1, t3); - Fp2::sub(l.a, t2, t1); - l.c = t0; - l.b = t3; -} -inline void addLineWithoutP(Fp6& l, G2& R, const G2& Q) -{ - Fp2 t1, t2, t3, t4; - Fp2Dbl T1, T2; - Fp2::mul(t1, R.z, Q.x); - Fp2::mul(t2, R.z, Q.y); - Fp2::sub(t1, R.x, t1); - Fp2::sub(t2, R.y, t2); - Fp2::sqr(t3, t1); - Fp2::mul(R.x, t3, R.x); - Fp2::sqr(t4, t2); - t3 *= t1; - t4 *= R.z; - t4 += t3; - t4 -= R.x; - t4 -= R.x; - R.x -= t4; - Fp2Dbl::mulPre(T1, t2, R.x); - Fp2Dbl::mulPre(T2, t3, R.y); - Fp2Dbl::sub(T2, T1, T2); - Fp2Dbl::mod(R.y, T2); - Fp2::mul(R.x, t1, t4); - Fp2::mul(R.z, t3, R.z); - Fp2::neg(l.c, t2); - Fp2Dbl::mulPre(T1, t2, Q.x); - Fp2Dbl::mulPre(T2, t1, Q.y); - Fp2Dbl::sub(T1, T1, T2); - l.b = t1; - Fp2Dbl::mod(l.a, T1); -} -inline void dblLine(Fp6& l, G2& Q, const G1& P) -{ - dblLineWithoutP(l, Q); - local::updateLine(l, P); -} -inline void addLine(Fp6& l, G2& R, const G2& Q, const G1& P) -{ - addLineWithoutP(l, R, Q); - local::updateLine(l, P); -} -inline void mulFp6cb_by_G1xy(Fp6& y, const Fp6& x, const G1& P) -{ - assert(P.isNormalized()); - if (&y != &x) y.a = x.a; - Fp2::mulFp(y.c, x.c, P.x); - Fp2::mulFp(y.b, x.b, P.y); -} - -/* - x = a + bv + cv^2 - y = (y0, y4, y2) -> (y0, 0, y2, 0, y4, 0) - z = xy = (a + bv + cv^2)(d + ev) - = (ad + ce xi) + ((a + b)(d + e) - ad - be)v + (be + cd)v^2 -*/ -inline void Fp6mul_01(Fp6& z, const Fp6& x, const Fp2& d, const Fp2& e) -{ - const Fp2& a = x.a; - const Fp2& b = x.b; - const Fp2& c = x.c; - Fp2 t0, t1; - Fp2Dbl AD, CE, BE, CD, T; - Fp2Dbl::mulPre(AD, a, d); - Fp2Dbl::mulPre(CE, c, e); - Fp2Dbl::mulPre(BE, b, e); - Fp2Dbl::mulPre(CD, c, d); - Fp2::add(t0, a, b); - Fp2::add(t1, d, e); - Fp2Dbl::mulPre(T, t0, t1); - T -= AD; - T -= BE; - Fp2Dbl::mod(z.b, T); - Fp2Dbl::mul_xi(CE, CE); - AD += CE; - Fp2Dbl::mod(z.a, AD); - BE += CD; - Fp2Dbl::mod(z.c, BE); -} -/* - input - z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w - 0 3 4 - x = (a, b, c) -> (b, 0, 0, c, a, 0) = X0 + X1w - X0 = b = (b, 0, 0) - X1 = c + av = (c, a, 0) - w^2 = v, v^3 = xi - output - z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w - Z0X0 = Z0 b - Z1X1 = Z1 (c, a, 0) - (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (b + c, a, 0) -*/ -inline void mul_403(Fp12& z, const Fp6& x) -{ - const Fp2& a = x.a; - const Fp2& b = x.b; - const Fp2& c = x.c; -#if 1 - Fp6& z0 = z.a; - Fp6& z1 = z.b; - Fp6 z0x0, z1x1, t0; - Fp2 t1; - Fp2::add(t1, x.b, c); - Fp6::add(t0, z0, z1); - Fp2::mul(z0x0.a, z0.a, b); - Fp2::mul(z0x0.b, z0.b, b); - Fp2::mul(z0x0.c, z0.c, b); - Fp6mul_01(z1x1, z1, c, a); - Fp6mul_01(t0, t0, t1, a); - Fp6::sub(z.b, t0, z0x0); - z.b -= z1x1; - // a + bv + cv^2 = cxi + av + bv^2 - Fp2::mul_xi(z1x1.c, z1x1.c); - Fp2::add(z.a.a, z0x0.a, z1x1.c); - Fp2::add(z.a.b, z0x0.b, z1x1.a); - Fp2::add(z.a.c, z0x0.c, z1x1.b); -#else - Fp2& z0 = z.a.a; - Fp2& z1 = z.a.b; - Fp2& z2 = z.a.c; - Fp2& z3 = z.b.a; - Fp2& z4 = z.b.b; - Fp2& z5 = z.b.c; - Fp2Dbl Z0B, Z1B, Z2B, Z3C, Z4C, Z5C; - Fp2Dbl T0, T1, T2, T3, T4, T5; - Fp2 bc, t; - Fp2::addPre(bc, b, c); - Fp2::addPre(t, z5, z2); - Fp2Dbl::mulPre(T5, t, bc); - Fp2Dbl::mulPre(Z5C, z5, c); - Fp2Dbl::mulPre(Z2B, z2, b); - Fp2Dbl::sub(T5, T5, Z5C); - Fp2Dbl::sub(T5, T5, Z2B); - Fp2Dbl::mulPre(T0, z1, a); - T5 += T0; - - Fp2::addPre(t, z4, z1); - Fp2Dbl::mulPre(T4, t, bc); - Fp2Dbl::mulPre(Z4C, z4, c); - Fp2Dbl::mulPre(Z1B, z1, b); - Fp2Dbl::sub(T4, T4, Z4C); - Fp2Dbl::sub(T4, T4, Z1B); - Fp2Dbl::mulPre(T0, z0, a); - T4 += T0; - - Fp2::addPre(t, z3, z0); - Fp2Dbl::mulPre(T3, t, bc); - Fp2Dbl::mulPre(Z3C, z3, c); - Fp2Dbl::mulPre(Z0B, z0, b); - Fp2Dbl::sub(T3, T3, Z3C); - Fp2Dbl::sub(T3, T3, Z0B); - Fp2::mul_xi(t, z2); - Fp2Dbl::mulPre(T0, t, a); - T3 += T0; - - Fp2Dbl::mulPre(T2, z3, a); - T2 += Z2B; - T2 += Z4C; - - Fp2::mul_xi(t, z5); - Fp2Dbl::mulPre(T1, t, a); - T1 += Z1B; - T1 += Z3C; - - Fp2Dbl::mulPre(T0, z4, a); - T0 += Z5C; - Fp2Dbl::mul_xi(T0, T0); - T0 += Z0B; - - Fp2Dbl::mod(z0, T0); - Fp2Dbl::mod(z1, T1); - Fp2Dbl::mod(z2, T2); - Fp2Dbl::mod(z3, T3); - Fp2Dbl::mod(z4, T4); - Fp2Dbl::mod(z5, T5); -#endif -} -/* - input - z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w - 0 1 4 - x = (a, b, c) -> (a, c, 0, 0, b, 0) = X0 + X1w - X0 = (a, c, 0) - X1 = (0, b, 0) - w^2 = v, v^3 = xi - output - z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w - Z0X0 = Z0 (a, c, 0) - Z1X1 = Z1 (0, b, 0) = Z1 bv - (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (a, b + c, 0) - - (a + bv + cv^2)v = c xi + av + bv^2 -*/ -inline void mul_041(Fp12& z, const Fp6& x) -{ - const Fp2& a = x.a; - const Fp2& b = x.b; - const Fp2& c = x.c; - Fp6& z0 = z.a; - Fp6& z1 = z.b; - Fp6 z0x0, z1x1, t0; - Fp2 t1; - Fp2::mul(z1x1.a, z1.c, b); - Fp2::mul_xi(z1x1.a, z1x1.a); - Fp2::mul(z1x1.b, z1.a, b); - Fp2::mul(z1x1.c, z1.b, b); - Fp2::add(t1, x.b, c); - Fp6::add(t0, z0, z1); - Fp6mul_01(z0x0, z0, a, c); - Fp6mul_01(t0, t0, a, t1); - Fp6::sub(z.b, t0, z0x0); - z.b -= z1x1; - // a + bv + cv^2 = cxi + av + bv^2 - Fp2::mul_xi(z1x1.c, z1x1.c); - Fp2::add(z.a.a, z0x0.a, z1x1.c); - Fp2::add(z.a.b, z0x0.b, z1x1.a); - Fp2::add(z.a.c, z0x0.c, z1x1.b); -} -inline void mulSparse(Fp12& z, const Fp6& x) -{ - if (BN::param.cp.isMtype) { - mul_041(z, x); - } else { - mul_403(z, x); - } -} -inline void convertFp6toFp12(Fp12& y, const Fp6& x) -{ - if (BN::param.cp.isMtype) { - // (a, b, c) -> (a, c, 0, 0, b, 0) - y.a.a = x.a; - y.b.b = x.b; - y.a.b = x.c; - y.a.c.clear(); - y.b.a.clear(); - y.b.c.clear(); - } else { - // (a, b, c) -> (b, 0, 0, c, a, 0) - y.b.b = x.a; - y.a.a = x.b; - y.b.a = x.c; - y.a.b.clear(); - y.a.c.clear(); - y.b.c.clear(); - } -} -inline void mulSparse2(Fp12& z, const Fp6& x, const Fp6& y) -{ - convertFp6toFp12(z, x); - mulSparse(z, y); -} -inline void mapToCyclotomic(Fp12& y, const Fp12& x) -{ - Fp12 z; - Fp12::Frobenius2(z, x); // z = x^(p^2) - z *= x; // x^(p^2 + 1) - Fp12::inv(y, z); - Fp6::neg(z.b, z.b); // z^(p^6) = conjugate of z - y *= z; -} -/* - Implementing Pairings at the 192-bit Security Level - D.F.Aranha, L.F.Castaneda, E.Knapp, A.Menezes, F.R.Henriquez - Section 4 -*/ -inline void expHardPartBLS12(Fp12& y, const Fp12& x) -{ -#if 0 - const mpz_class& p = param.p; - mpz_class p2 = p * p; - mpz_class p4 = p2 * p2; - Fp12::pow(y, x, (p4 - p2 + 1) / param.r * 3); - return; -#endif -#if 1 - Fp12 a0, a1, a2, a3, a4, a5, a6, a7; - Fp12::unitaryInv(a0, x); // a0 = x^-1 - fasterSqr(a1, a0); // x^-2 - pow_z(a2, x); // x^z - fasterSqr(a3, a2); // x^2z - a1 *= a2; // a1 = x^(z-2) - pow_z(a7, a1); // a7 = x^(z^2-2z) - pow_z(a4, a7); // a4 = x^(z^3-2z^2) - pow_z(a5, a4); // a5 = x^(z^4-2z^3) - a3 *= a5; // a3 = x^(z^4-2z^3+2z) - pow_z(a6, a3); // a6 = x^(z^5-2z^4+2z^2) - - Fp12::unitaryInv(a1, a1); // x^(2-z) - a1 *= a6; // x^(z^5-2z^4+2z^2-z+2) - a1 *= x; // x^(z^5-2z^4+2z^2-z+3) = x^c0 - a3 *= a0; // x^(z^4-2z^3-1) = x^c1 - Fp12::Frobenius(a3, a3); // x^(c1 p) - a1 *= a3; // x^(c0 + c1 p) - a4 *= a2; // x^(z^3-2z^2+z) = x^c2 - Fp12::Frobenius2(a4, a4); // x^(c2 p^2) - a1 *= a4; // x^(c0 + c1 p + c2 p^2) - a7 *= x; // x^(z^2-2z+1) = x^c3 - Fp12::Frobenius3(y, a7); - y *= a1; -#else - Fp12 t1, t2, t3; - Fp12::Frobenius(t1, x); - Fp12::Frobenius(t2, t1); - Fp12::Frobenius(t3, t2); - Fp12::pow(t1, t1, param.exp_c1); - Fp12::pow(t2, t2, param.exp_c2); - Fp12::pow(t3, t3, param.exp_c3); - Fp12::pow(y, x, param.exp_c0); - y *= t1; - y *= t2; - y *= t3; -#endif -} -/* - Faster Hashing to G2 - Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez - section 4.1 - y = x^(d 2z(6z^2 + 3z + 1)) where - p = p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1 - r = r(z) = 36z^4 + 36z^3 + 18z^2 + 6z + 1 - d = (p^4 - p^2 + 1) / r - d1 = d 2z(6z^2 + 3z + 1) - = c0 + c1 p + c2 p^2 + c3 p^3 - - c0 = 1 + 6z + 12z^2 + 12z^3 - c1 = 4z + 6z^2 + 12z^3 - c2 = 6z + 6z^2 + 12z^3 - c3 = -1 + 4z + 6z^2 + 12z^3 - x -> x^z -> x^2z -> x^4z -> x^6z -> x^(6z^2) -> x^(12z^2) -> x^(12z^3) - a = x^(6z) x^(6z^2) x^(12z^3) - b = a / (x^2z) - x^d1 = (a x^(6z^2) x) b^p a^(p^2) (b / x)^(p^3) -*/ -inline void expHardPartBN(Fp12& y, const Fp12& x) -{ -#if 0 - const mpz_class& p = param.p; - mpz_class p2 = p * p; - mpz_class p4 = p2 * p2; - Fp12::pow(y, x, (p4 - p2 + 1) / param.r); - return; -#endif -#if 1 - Fp12 a, b; - Fp12 a2, a3; - pow_z(b, x); // x^z - fasterSqr(b, b); // x^2z - fasterSqr(a, b); // x^4z - a *= b; // x^6z - pow_z(a2, a); // x^(6z^2) - a *= a2; - fasterSqr(a3, a2); // x^(12z^2) - pow_z(a3, a3); // x^(12z^3) - a *= a3; - Fp12::unitaryInv(b, b); - b *= a; - a2 *= a; - Fp12::Frobenius2(a, a); - a *= a2; - a *= x; - Fp12::unitaryInv(y, x); - y *= b; - Fp12::Frobenius(b, b); - a *= b; - Fp12::Frobenius3(y, y); - y *= a; -#else - Fp12 t1, t2, t3; - Fp12::Frobenius(t1, x); - Fp12::Frobenius(t2, t1); - Fp12::Frobenius(t3, t2); - Fp12::pow(t1, t1, param.exp_c1); - Fp12::pow(t2, t2, param.exp_c2); - Fp12::pow(y, x, param.exp_c0); - y *= t1; - y *= t2; - y *= t3; -#endif -} -/* - remark : returned value is NOT on a curve -*/ -inline G1 makeAdjP(const G1& P) -{ - G1 adjP; - Fp::add(adjP.x, P.x, P.x); - adjP.x += P.x; - Fp::neg(adjP.y, P.y); - adjP.z = 1; - return adjP; -} - -} // mcl::bn::local - -/* - y = x^((p^12 - 1) / r) - (p^12 - 1) / r = (p^2 + 1) (p^6 - 1) (p^4 - p^2 + 1)/r - (a + bw)^(p^6) = a - bw in Fp12 - (p^4 - p^2 + 1)/r = c0 + c1 p + c2 p^2 + p^3 -*/ -inline void finalExp(Fp12& y, const Fp12& x) -{ -#if 1 - mapToCyclotomic(y, x); -#else - const mpz_class& p = param.p; - mpz_class p2 = p * p; - mpz_class p4 = p2 * p2; - Fp12::pow(y, x, p2 + 1); - Fp12::pow(y, y, p4 * p2 - 1); -#endif - if (BN::param.isBLS12) { - expHardPartBLS12(y, y); - } else { - expHardPartBN(y, y); - } -} -inline void millerLoop(Fp12& f, const G1& P_, const G2& Q_) -{ - G1 P(P_); - G2 Q(Q_); - P.normalize(); - Q.normalize(); - if (Q.isZero()) { - f = 1; - return; - } - assert(BN::param.siTbl[1] == 1); - G2 T = Q; - G2 negQ; - if (BN::param.useNAF) { - G2::neg(negQ, Q); - } - Fp6 d, e, l; - d = e = l = 1; - G1 adjP = makeAdjP(P); - dblLine(d, T, adjP); - addLine(l, T, Q, P); - mulSparse2(f, d, l); - for (size_t i = 2; i < BN::param.siTbl.size(); i++) { - dblLine(l, T, adjP); - Fp12::sqr(f, f); - mulSparse(f, l); - if (BN::param.siTbl[i]) { - if (BN::param.siTbl[i] > 0) { - addLine(l, T, Q, P); - } else { - addLine(l, T, negQ, P); - } - mulSparse(f, l); - } - } - if (BN::param.z < 0) { - G2::neg(T, T); - Fp6::neg(f.b, f.b); - } - if (BN::param.isBLS12) return; - G2 Q1, Q2; - Frobenius(Q1, Q); - Frobenius(Q2, Q1); - G2::neg(Q2, Q2); - addLine(d, T, Q1, P); - addLine(e, T, Q2, P); - Fp12 ft; - mulSparse2(ft, d, e); - f *= ft; -} -inline void pairing(Fp12& f, const G1& P, const G2& Q) -{ - millerLoop(f, P, Q); - finalExp(f, f); -} -/* - allocate param.precomputedQcoeffSize elements of Fp6 for Qcoeff -*/ -inline void precomputeG2(Fp6 *Qcoeff, const G2& Q_) -{ - size_t idx = 0; - G2 Q(Q_); - Q.normalize(); - if (Q.isZero()) { - for (size_t i = 0; i < BN::param.precomputedQcoeffSize; i++) { - Qcoeff[i] = 1; - } - return; - } - G2 T = Q; - G2 negQ; - if (BN::param.useNAF) { - G2::neg(negQ, Q); - } - assert(BN::param.siTbl[1] == 1); - dblLineWithoutP(Qcoeff[idx++], T); - addLineWithoutP(Qcoeff[idx++], T, Q); - for (size_t i = 2; i < BN::param.siTbl.size(); i++) { - dblLineWithoutP(Qcoeff[idx++], T); - if (BN::param.siTbl[i]) { - if (BN::param.siTbl[i] > 0) { - addLineWithoutP(Qcoeff[idx++], T, Q); - } else { - addLineWithoutP(Qcoeff[idx++], T, negQ); - } - } - } - if (BN::param.z < 0) { - G2::neg(T, T); - } - if (BN::param.isBLS12) return; - G2 Q1, Q2; - Frobenius(Q1, Q); - Frobenius(Q2, Q1); - G2::neg(Q2, Q2); - addLineWithoutP(Qcoeff[idx++], T, Q1); - addLineWithoutP(Qcoeff[idx++], T, Q2); - assert(idx == BN::param.precomputedQcoeffSize); -} -/* - millerLoop(e, P, Q) is same as the following - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(e, P, Qcoeff); -*/ -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void precomputeG2(std::vector& Qcoeff, const G2& Q) -{ - Qcoeff.resize(BN::param.precomputedQcoeffSize); - precomputeG2(Qcoeff.data(), Q); -} -#endif -template -void precomputeG2(bool *pb, Array& Qcoeff, const G2& Q) -{ - *pb = Qcoeff.resize(BN::param.precomputedQcoeffSize); - if (!*pb) return; - precomputeG2(Qcoeff.data(), Q); -} - -inline void precomputedMillerLoop(Fp12& f, const G1& P_, const Fp6* Qcoeff) -{ - G1 P(P_); - P.normalize(); - G1 adjP = makeAdjP(P); - size_t idx = 0; - Fp6 d, e, l; - mulFp6cb_by_G1xy(d, Qcoeff[idx], adjP); - idx++; - - mulFp6cb_by_G1xy(e, Qcoeff[idx], P); - idx++; - mulSparse2(f, d, e); - for (size_t i = 2; i < BN::param.siTbl.size(); i++) { - mulFp6cb_by_G1xy(l, Qcoeff[idx], adjP); - idx++; - Fp12::sqr(f, f); - mulSparse(f, l); - if (BN::param.siTbl[i]) { - mulFp6cb_by_G1xy(l, Qcoeff[idx], P); - idx++; - mulSparse(f, l); - } - } - if (BN::param.z < 0) { - Fp6::neg(f.b, f.b); - } - if (BN::param.isBLS12) return; - mulFp6cb_by_G1xy(d, Qcoeff[idx], P); - idx++; - mulFp6cb_by_G1xy(e, Qcoeff[idx], P); - idx++; - Fp12 ft; - mulSparse2(ft, d, e); - f *= ft; -} -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void precomputedMillerLoop(Fp12& f, const G1& P, const std::vector& Qcoeff) -{ - precomputedMillerLoop(f, P, Qcoeff.data()); -} -#endif -/* - f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) - Q2coeff : precomputed Q2 -*/ -inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1_, const G2& Q1_, const G1& P2_, const Fp6* Q2coeff) -{ - G1 P1(P1_), P2(P2_); - G2 Q1(Q1_); - P1.normalize(); - P2.normalize(); - Q1.normalize(); - if (Q1.isZero()) { - precomputedMillerLoop(f, P2_, Q2coeff); - return; - } - G2 T = Q1; - G2 negQ1; - if (BN::param.useNAF) { - G2::neg(negQ1, Q1); - } - G1 adjP1 = makeAdjP(P1); - G1 adjP2 = makeAdjP(P2); - size_t idx = 0; - Fp6 d1, d2, e1, e2, l1, l2; - dblLine(d1, T, adjP1); - mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); - idx++; - - Fp12 f1, f2; - e1 = 1; - addLine(e1, T, Q1, P1); - mulSparse2(f1, d1, e1); - - mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); - mulSparse2(f2, d2, e2); - Fp12::mul(f, f1, f2); - idx++; - for (size_t i = 2; i < BN::param.siTbl.size(); i++) { - dblLine(l1, T, adjP1); - mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); - idx++; - Fp12::sqr(f, f); - mulSparse2(f1, l1, l2); - f *= f1; - if (BN::param.siTbl[i]) { - if (BN::param.siTbl[i] > 0) { - addLine(l1, T, Q1, P1); - } else { - addLine(l1, T, negQ1, P1); - } - mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); - idx++; - mulSparse2(f1, l1, l2); - f *= f1; - } - } - if (BN::param.z < 0) { - G2::neg(T, T); - Fp6::neg(f.b, f.b); - } - if (BN::param.isBLS12) return; - G2 Q11, Q12; - Frobenius(Q11, Q1); - Frobenius(Q12, Q11); - G2::neg(Q12, Q12); - addLine(d1, T, Q11, P1); - mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); - idx++; - addLine(e1, T, Q12, P1); - mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); - idx++; - mulSparse2(f1, d1, e1); - mulSparse2(f2, d2, e2); - f *= f1; - f *= f2; -} -/* - f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) - Q1coeff, Q2coeff : precomputed Q1, Q2 -*/ -inline void precomputedMillerLoop2(Fp12& f, const G1& P1_, const Fp6* Q1coeff, const G1& P2_, const Fp6* Q2coeff) -{ - G1 P1(P1_), P2(P2_); - P1.normalize(); - P2.normalize(); - G1 adjP1 = makeAdjP(P1); - G1 adjP2 = makeAdjP(P2); - size_t idx = 0; - Fp6 d1, d2, e1, e2, l1, l2; - mulFp6cb_by_G1xy(d1, Q1coeff[idx], adjP1); - mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); - idx++; - - Fp12 f1, f2; - mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); - mulSparse2(f1, d1, e1); - - mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); - mulSparse2(f2, d2, e2); - Fp12::mul(f, f1, f2); - idx++; - for (size_t i = 2; i < BN::param.siTbl.size(); i++) { - mulFp6cb_by_G1xy(l1, Q1coeff[idx], adjP1); - mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); - idx++; - Fp12::sqr(f, f); - mulSparse2(f1, l1, l2); - f *= f1; - if (BN::param.siTbl[i]) { - mulFp6cb_by_G1xy(l1, Q1coeff[idx], P1); - mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); - idx++; - mulSparse2(f1, l1, l2); - f *= f1; - } - } - if (BN::param.z < 0) { - Fp6::neg(f.b, f.b); - } - if (BN::param.isBLS12) return; - mulFp6cb_by_G1xy(d1, Q1coeff[idx], P1); - mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); - idx++; - mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); - mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); - idx++; - mulSparse2(f1, d1, e1); - mulSparse2(f2, d2, e2); - f *= f1; - f *= f2; -} -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void precomputedMillerLoop2(Fp12& f, const G1& P1, const std::vector& Q1coeff, const G1& P2, const std::vector& Q2coeff) -{ - precomputedMillerLoop2(f, P1, Q1coeff.data(), P2, Q2coeff.data()); -} -inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1, const G2& Q1, const G1& P2, const std::vector& Q2coeff) -{ - precomputedMillerLoop2mixed(f, P1, Q1, P2, Q2coeff.data()); -} -#endif -inline void mapToG1(bool *pb, G1& P, const Fp& x) { *pb = BN::param.mapTo.calcG1(P, x); } -inline void mapToG2(bool *pb, G2& P, const Fp2& x) { *pb = BN::param.mapTo.calcG2(P, x); } -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void mapToG1(G1& P, const Fp& x) -{ - bool b; - mapToG1(&b, P, x); - if (!b) throw cybozu::Exception("mapToG1:bad value") << x; -} -inline void mapToG2(G2& P, const Fp2& x) -{ - bool b; - mapToG2(&b, P, x); - if (!b) throw cybozu::Exception("mapToG2:bad value") << x; -} -#endif -inline void hashAndMapToG1(G1& P, const void *buf, size_t bufSize) -{ - Fp t; - t.setHashOf(buf, bufSize); - bool b; - mapToG1(&b, P, t); - // It will not happen that the hashed value is equal to special value - assert(b); - (void)b; -} -inline void hashAndMapToG2(G2& P, const void *buf, size_t bufSize) -{ - Fp2 t; - t.a.setHashOf(buf, bufSize); - t.b.clear(); - bool b; - mapToG2(&b, P, t); - // It will not happen that the hashed value is equal to special value - assert(b); - (void)b; -} -#ifndef CYBOZU_DONT_USE_STRING -inline void hashAndMapToG1(G1& P, const std::string& str) -{ - hashAndMapToG1(P, str.c_str(), str.size()); -} -inline void hashAndMapToG2(G2& P, const std::string& str) -{ - hashAndMapToG2(P, str.c_str(), str.size()); -} -#endif -inline void verifyOrderG1(bool doVerify) -{ - if (BN::param.isBLS12) { - G1::setOrder(doVerify ? BN::param.r : 0); - } -} -inline void verifyOrderG2(bool doVerify) -{ - G2::setOrder(doVerify ? BN::param.r : 0); -} - -// backward compatibility -using mcl::CurveParam; -static const CurveParam& CurveFp254BNb = BN254; -static const CurveParam& CurveFp382_1 = BN381_1; -static const CurveParam& CurveFp382_2 = BN381_2; -static const CurveParam& CurveFp462 = BN462; -static const CurveParam& CurveSNARK1 = BN_SNARK1; - -/* - FrobeniusOnTwist for Dtype - p mod 6 = 1, w^6 = xi - Frob(x', y') = phi Frob phi^-1(x', y') - = phi Frob (x' w^2, y' w^3) - = phi (x'^p w^2p, y'^p w^3p) - = (F(x') w^2(p - 1), F(y') w^3(p - 1)) - = (F(x') g^2, F(y') g^3) - - FrobeniusOnTwist for Dtype - use (1/g) instead of g -*/ -inline void Frobenius(G2& D, const G2& S) -{ - Fp2::Frobenius(D.x, S.x); - Fp2::Frobenius(D.y, S.y); - Fp2::Frobenius(D.z, S.z); - D.x *= BN::param.g2; - D.y *= BN::param.g3; -} -inline void Frobenius2(G2& D, const G2& S) -{ - Frobenius(D, S); - Frobenius(D, D); -} -inline void Frobenius3(G2& D, const G2& S) -{ - Frobenius(D, S); - Frobenius(D, D); - Frobenius(D, D); -} - -namespace BN { - -using namespace mcl::bn; // backward compatibility - -inline void init(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) -{ - local::StaticVar<>::param.init(pb, cp, mode); - if (!*pb) return; - G1::setMulArrayGLV(local::mulArrayGLV1); - G2::setMulArrayGLV(local::mulArrayGLV2); - Fp12::setPowArrayGLV(local::powArrayGLV2); - G1::setCompressedExpression(); - G2::setCompressedExpression(); - *pb = true; -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void init(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) -{ - bool b; - init(&b, cp, mode); - if (!b) throw cybozu::Exception("BN:init"); -} -#endif - -} // mcl::bn::BN - -inline void initPairing(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) -{ - BN::init(pb, cp, mode); -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void initPairing(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) -{ - bool b; - BN::init(&b, cp, mode); - if (!b) throw cybozu::Exception("bn:initPairing"); -} -#endif - -inline void initG1only(bool *pb, const mcl::EcParam& para) -{ - local::StaticVar<>::param.initG1only(pb, para); - if (!*pb) return; - G1::setMulArrayGLV(0); - G2::setMulArrayGLV(0); - Fp12::setPowArrayGLV(0); - G1::setCompressedExpression(); - G2::setCompressedExpression(); -} - -inline const G1& getG1basePoint() -{ - return local::StaticVar<>::param.basePoint; -} - -} } // mcl::bn - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp deleted file mode 100644 index 7a5da7a05..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once -/** - @file - @brief preset class for 256-bit optimal ate pairing over BN curves - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#define MCL_MAX_FP_BIT_SIZE 256 -#include - -namespace mcl { namespace bn256 { -using namespace mcl::bn; -} } - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp deleted file mode 100644 index 8aa14fe5c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once -/** - @file - @brief preset class for 384-bit optimal ate pairing over BN curves - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#define MCL_MAX_FP_BIT_SIZE 384 -#include -// #define MCL_MAX_FR_BIT_SIZE 256 // can set if BLS12_381 - -namespace mcl { namespace bn384 { -using namespace mcl::bn; -} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp deleted file mode 100644 index c87ad9035..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once -/** - @file - @brief preset class for 512-bit optimal ate pairing over BN curves - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#define MCL_MAX_FP_BIT_SIZE 512 -#include - -namespace mcl { namespace bn512 { -using namespace mcl::bn; -} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp deleted file mode 100644 index 7a04b7fa2..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp +++ /dev/null @@ -1,495 +0,0 @@ -#pragma once -#include -#include -/** - @file - @brief convertion bin/dec/hex <=> array - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4127) -#endif - -namespace mcl { namespace fp { - -namespace local { - -inline bool isSpace(char c) -{ - return c == ' ' || c == '\t' || c == '\r' || c == '\n'; -} -template -bool skipSpace(char *c, InputStream& is) -{ - for (;;) { - if (!cybozu::readChar(c, is)) return false; - if (!isSpace(*c)) return true; - } -} - -#ifndef CYBOZU_DONT_USE_STRING -template -void loadWord(std::string& s, InputStream& is) -{ - s.clear(); - char c; - if (!skipSpace(&c, is)) return; - s = c; - for (;;) { - if (!cybozu::readChar(&c, is)) return; - if (isSpace(c)) break; - s += c; - } -} -#endif - -template -size_t loadWord(char *buf, size_t bufSize, InputStream& is) -{ - if (bufSize == 0) return 0; - char c; - if (!skipSpace(&c, is)) return 0; - size_t pos = 0; - buf[pos++] = c; - for (;;) { - if (!cybozu::readChar(&c, is)) break; - if (isSpace(c)) break; - if (pos == bufSize) return 0; - buf[pos++] = c; - } - return pos; -} - - -/* - q = x[] / x - @retval r = x[] % x - @note accept q == x -*/ -inline uint32_t divU32(uint32_t *q, const uint32_t *x, size_t xn, uint32_t y) -{ - if (xn == 0) return 0; - uint32_t r = 0; - for (int i = (int)xn - 1; i >= 0; i--) { - uint64_t t = (uint64_t(r) << 32) | x[i]; - q[i] = uint32_t(t / y); - r = uint32_t(t % y); - } - return r; -} - -/* - z[0, xn) = x[0, xn) * y - return z[xn] - @note accept z == x -*/ -inline uint32_t mulU32(uint32_t *z, const uint32_t *x, size_t xn, uint32_t y) -{ - uint32_t H = 0; - for (size_t i = 0; i < xn; i++) { - uint32_t t = H; - uint64_t v = uint64_t(x[i]) * y; - uint32_t L = uint32_t(v); - H = uint32_t(v >> 32); - z[i] = t + L; - if (z[i] < t) { - H++; - } - } - return H; -} - -/* - x[0, xn) += y - return 1 if overflow else 0 -*/ -inline uint32_t addU32(uint32_t *x, size_t xn, uint32_t y) -{ - uint32_t t = x[0] + y; - x[0] = t; - if (t >= y) return 0; - for (size_t i = 1; i < xn; i++) { - t = x[i] + 1; - x[i] = t; - if (t != 0) return 0; - } - return 1; -} - -inline uint32_t decToU32(const char *p, size_t size, bool *pb) -{ - assert(0 < size && size <= 9); - uint32_t x = 0; - for (size_t i = 0; i < size; i++) { - char c = p[i]; - if (c < '0' || c > '9') { - *pb = false; - return 0; - } - x = x * 10 + uint32_t(c - '0'); - } - *pb = true; - return x; -} - -inline bool hexCharToUint8(uint8_t *v, char _c) -{ - uint32_t c = uint8_t(_c); // cast is necessary - if (c - '0' <= '9' - '0') { - c = c - '0'; - } else if (c - 'a' <= 'f' - 'a') { - c = (c - 'a') + 10; - } else if (c - 'A' <= 'F' - 'A') { - c = (c - 'A') + 10; - } else { - return false; - } - *v = uint8_t(c); - return true; -} - -template -bool hexToUint(UT *px, const char *p, size_t size) -{ - assert(0 < size && size <= sizeof(UT) * 2); - UT x = 0; - for (size_t i = 0; i < size; i++) { - uint8_t v; - if (!hexCharToUint8(&v, p[i])) return false; - x = x * 16 + v; - } - *px = x; - return true; -} - -template -bool binToUint(UT *px, const char *p, size_t size) -{ - assert(0 < size && size <= sizeof(UT) * 8); - UT x = 0; - for (size_t i = 0; i < size; i++) { - UT c = static_cast(p[i]); - if (c == '0') { - x = x * 2; - } else if (c == '1') { - x = x * 2 + 1; - } else { - return false; - } - } - *px = x; - return true; -} - -inline bool parsePrefix(size_t *readSize, bool *isMinus, int *base, const char *buf, size_t bufSize) -{ - if (bufSize == 0) return false; - size_t pos = 0; - if (*buf == '-') { - if (bufSize == 1) return false; - *isMinus = true; - buf++; - pos++; - } else { - *isMinus = false; - } - if (buf[0] == '0') { - if (bufSize > 1 && buf[1] == 'x') { - if (*base == 0 || *base == 16) { - *base = 16; - pos += 2; - } else { - return false; - } - } else if (bufSize > 1 && buf[1] == 'b') { - if (*base == 0 || *base == 2) { - *base = 2; - pos += 2; - } else { - return false; - } - } - } - if (*base == 0) *base = 10; - if (pos == bufSize) return false; - *readSize = pos; - return true; -} - -} // mcl::fp::local - -/* - convert little endian x[0, xn) to buf - return written size if success else 0 - data is buf[bufSize - retval, bufSize) - start "0x" if withPrefix -*/ -template -size_t arrayToHex(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix = false) -{ - size_t fullN = 0; - if (n > 1) { - size_t pos = n - 1; - while (pos > 0) { - if (x[pos]) break; - pos--; - } - if (pos > 0) fullN = pos; - } - const T v = n == 0 ? 0 : x[fullN]; - const size_t topLen = cybozu::getHexLength(v); - const size_t startPos = withPrefix ? 2 : 0; - const size_t lenT = sizeof(T) * 2; - const size_t totalSize = startPos + fullN * lenT + topLen; - if (totalSize > bufSize) return 0; - char *const top = buf + bufSize - totalSize; - if (withPrefix) { - top[0] = '0'; - top[1] = 'x'; - } - cybozu::itohex(&top[startPos], topLen, v, false); - for (size_t i = 0; i < fullN; i++) { - cybozu::itohex(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i], false); - } - return totalSize; -} - -/* - convert little endian x[0, xn) to buf - return written size if success else 0 - data is buf[bufSize - retval, bufSize) - start "0b" if withPrefix -*/ -template -size_t arrayToBin(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix) -{ - size_t fullN = 0; - if (n > 1) { - size_t pos = n - 1; - while (pos > 0) { - if (x[pos]) break; - pos--; - } - if (pos > 0) fullN = pos; - } - const T v = n == 0 ? 0 : x[fullN]; - const size_t topLen = cybozu::getBinLength(v); - const size_t startPos = withPrefix ? 2 : 0; - const size_t lenT = sizeof(T) * 8; - const size_t totalSize = startPos + fullN * lenT + topLen; - if (totalSize > bufSize) return 0; - char *const top = buf + bufSize - totalSize; - if (withPrefix) { - top[0] = '0'; - top[1] = 'b'; - } - cybozu::itobin(&top[startPos], topLen, v); - for (size_t i = 0; i < fullN; i++) { - cybozu::itobin(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i]); - } - return totalSize; -} - -/* - convert hex string to x[0..xn) - hex string = [0-9a-fA-F]+ -*/ -template -inline size_t hexToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) -{ - if (bufSize == 0) return 0; - const size_t unitLen = sizeof(UT) * 2; - const size_t q = bufSize / unitLen; - const size_t r = bufSize % unitLen; - const size_t requireSize = q + (r ? 1 : 0); - if (maxN < requireSize) return 0; - for (size_t i = 0; i < q; i++) { - if (!local::hexToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; - } - if (r) { - if (!local::hexToUint(&x[q], buf, r)) return 0; - } - return requireSize; -} -/* - convert bin string to x[0..xn) - bin string = [01]+ -*/ -template -inline size_t binToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) -{ - if (bufSize == 0) return 0; - const size_t unitLen = sizeof(UT) * 8; - const size_t q = bufSize / unitLen; - const size_t r = bufSize % unitLen; - const size_t requireSize = q + (r ? 1 : 0); - if (maxN < requireSize) return 0; - for (size_t i = 0; i < q; i++) { - if (!local::binToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; - } - if (r) { - if (!local::binToUint(&x[q], buf, r)) return 0; - } - return requireSize; -} - -/* - little endian x[0, xn) to buf - return written size if success else 0 - data is buf[bufSize - retval, bufSize) -*/ -template -inline size_t arrayToDec(char *buf, size_t bufSize, const UT *x, size_t xn) -{ - const size_t maxN = 64; - uint32_t t[maxN]; - if (sizeof(UT) == 8) { - xn *= 2; - } - if (xn > maxN) return 0; - memcpy(t, x, xn * sizeof(t[0])); - - const size_t width = 9; - const uint32_t i1e9 = 1000000000U; - size_t pos = 0; - for (;;) { - uint32_t r = local::divU32(t, t, xn, i1e9); - while (xn > 0 && t[xn - 1] == 0) xn--; - size_t len = cybozu::itoa_local::uintToDec(buf, bufSize - pos, r); - if (len == 0) return 0; - assert(0 < len && len <= width); - if (xn == 0) return pos + len; - // fill (width - len) '0' - for (size_t j = 0; j < width - len; j++) { - buf[bufSize - pos - width + j] = '0'; - } - pos += width; - } -} - -/* - convert buf[0, bufSize) to x[0, num) - return written num if success else 0 -*/ -template -inline size_t decToArray(UT *_x, size_t maxN, const char *buf, size_t bufSize) -{ - assert(sizeof(UT) == 4 || sizeof(UT) == 8); - const size_t width = 9; - const uint32_t i1e9 = 1000000000U; - if (maxN == 0) return 0; - if (sizeof(UT) == 8) { - maxN *= 2; - } - uint32_t *x = reinterpret_cast(_x); - size_t xn = 1; - x[0] = 0; - while (bufSize > 0) { - size_t n = bufSize % width; - if (n == 0) n = width; - bool b; - uint32_t v = local::decToU32(buf, n, &b); - if (!b) return 0; - uint32_t H = local::mulU32(x, x, xn, i1e9); - if (H > 0) { - if (xn == maxN) return 0; - x[xn++] = H; - } - H = local::addU32(x, xn, v); - if (H > 0) { - if (xn == maxN) return 0; - x[xn++] = H; - } - buf += n; - bufSize -= n; - } - if (sizeof(UT) == 8 && (xn & 1)) { - x[xn++] = 0; - } - return xn / (sizeof(UT) / 4); -} - -/* - return retavl is written size if success else 0 - REMARK : the top of string is buf + bufSize - retval -*/ -template -size_t arrayToStr(char *buf, size_t bufSize, const UT *x, size_t n, int base, bool withPrefix) -{ - switch (base) { - case 0: - case 10: - return arrayToDec(buf, bufSize, x, n); - case 16: - return arrayToHex(buf, bufSize, x, n, withPrefix); - case 2: - return arrayToBin(buf, bufSize, x, n, withPrefix); - default: - return 0; - } -} - -template -size_t strToArray(bool *pIsMinus, UT *x, size_t xN, const char *buf, size_t bufSize, int ioMode) -{ - ioMode &= 31; - size_t readSize; - if (!local::parsePrefix(&readSize, pIsMinus, &ioMode, buf, bufSize)) return 0; - switch (ioMode) { - case 10: - return decToArray(x, xN, buf + readSize, bufSize - readSize); - case 16: - return hexToArray(x, xN, buf + readSize, bufSize - readSize); - case 2: - return binToArray(x, xN, buf + readSize, bufSize - readSize); - default: - return 0; - } -} - -/* - convert src[0, n) to (n * 2) byte hex string and write it to os - return true if success else flase -*/ -template -void writeHexStr(bool *pb, OutputStream& os, const void *src, size_t n) -{ - const uint8_t *p = (const uint8_t *)src; - for (size_t i = 0; i < n; i++) { - char hex[2]; - cybozu::itohex(hex, sizeof(hex), p[i], false); - cybozu::write(pb, os, hex, sizeof(hex)); - if (!*pb) return; - } - *pb = true; -} -/* - read hex string from is and convert it to byte array - return written buffer size -*/ -template -inline size_t readHexStr(void *buf, size_t n, InputStream& is) -{ - bool b; - uint8_t *dst = (uint8_t *)buf; - for (size_t i = 0; i < n; i++) { - uint8_t L, H; - char c[2]; - if (cybozu::readSome(c, sizeof(c), is) != sizeof(c)) return i; - b = local::hexCharToUint8(&H, c[0]); - if (!b) return i; - b = local::hexCharToUint8(&L, c[1]); - if (!b) return i; - dst[i] = (H << 4) | L; - } - return n; -} - -} } // mcl::fp - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h deleted file mode 100644 index 9e4a941a0..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once -/** - @file - @brief curve type - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ - -enum { - MCL_BN254 = 0, - MCL_BN381_1 = 1, - MCL_BN381_2 = 2, - MCL_BN462 = 3, - MCL_BN_SNARK1 = 4, - MCL_BLS12_381 = 5, - MCL_BN160 = 6, - - /* - for only G1 - the size of curve must be less or equal to MCLBN_FP_UNIT_SIZE - */ - MCL_EC_BEGIN = 100, - MCL_SECP192K1 = MCL_EC_BEGIN, - MCL_SECP224K1 = 101, - MCL_SECP256K1 = 102, - MCL_SECP384R1 = 103, - MCL_SECP521R1 = 104, - MCL_NIST_P192 = 105, - MCL_NIST_P224 = 106, - MCL_NIST_P256 = 107, - MCL_EC_END = MCL_NIST_P256 + 1, - MCL_NIST_P384 = MCL_SECP384R1, - MCL_NIST_P521 = MCL_SECP521R1 -}; diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp deleted file mode 100644 index b8eb10be3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp +++ /dev/null @@ -1,1045 +0,0 @@ -#pragma once -/** - @file - @brief elliptic curve - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include - -//#define MCL_EC_USE_AFFINE - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4458) -#endif - -namespace mcl { - -namespace ec { - -enum Mode { - Jacobi = 0, - Proj = 1 -}; - -} // mcl::ec - -/* - elliptic curve - y^2 = x^3 + ax + b (affine) - y^2 = x^3 + az^4 + bz^6 (Jacobi) x = X/Z^2, y = Y/Z^3 -*/ -template -class EcT : public fp::Serializable > { - enum { - zero, - minus3, - generic - }; -public: - typedef _Fp Fp; - typedef _Fp BaseFp; -#ifdef MCL_EC_USE_AFFINE - Fp x, y; - bool inf_; -#else - Fp x, y, z; - static int mode_; -#endif - static Fp a_; - static Fp b_; - static int specialA_; - static int ioMode_; - /* - order_ is the order of G2 which is the subgroup of EcT. - check the order of the elements if verifyOrder_ is true - */ - static bool verifyOrder_; - static mpz_class order_; - static void (*mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); - /* default constructor is undefined value */ - EcT() {} - EcT(const Fp& _x, const Fp& _y) - { - set(_x, _y); - } - bool isNormalized() const - { -#ifdef MCL_EC_USE_AFFINE - return true; -#else - return isZero() || z.isOne(); -#endif - } -#ifndef MCL_EC_USE_AFFINE -private: - void normalizeJacobi() - { - assert(!z.isZero()); - Fp rz2; - Fp::inv(z, z); - Fp::sqr(rz2, z); - x *= rz2; - y *= rz2; - y *= z; - z = 1; - } - void normalizeProj() - { - assert(!z.isZero()); - Fp::inv(z, z); - x *= z; - y *= z; - z = 1; - } - // Y^2 == X(X^2 + aZ^4) + bZ^6 - bool isValidJacobi() const - { - Fp y2, x2, z2, z4, t; - Fp::sqr(x2, x); - Fp::sqr(y2, y); - Fp::sqr(z2, z); - Fp::sqr(z4, z2); - Fp::mul(t, z4, a_); - t += x2; - t *= x; - z4 *= z2; - z4 *= b_; - t += z4; - return y2 == t; - } - // (Y^2 - bZ^2)Z = X(X^2 + aZ^2) - bool isValidProj() const - { - Fp y2, x2, z2, t; - Fp::sqr(x2, x); - Fp::sqr(y2, y); - Fp::sqr(z2, z); - Fp::mul(t, a_, z2); - t += x2; - t *= x; - z2 *= b_; - y2 -= z2; - y2 *= z; - return y2 == t; - } -#endif - // y^2 == (x^2 + a)x + b - static inline bool isValid(const Fp& _x, const Fp& _y) - { - Fp y2, t; - Fp::sqr(y2, _y); - Fp::sqr(t, _x); - t += a_; - t *= _x; - t += b_; - return y2 == t; - } -public: - void normalize() - { -#ifndef MCL_EC_USE_AFFINE - if (isNormalized()) return; - switch (mode_) { - case ec::Jacobi: - normalizeJacobi(); - break; - case ec::Proj: - normalizeProj(); - break; - } -#endif - } - static void normalize(EcT& y, const EcT& x) - { - y = x; - y.normalize(); - } - static inline void init(const Fp& a, const Fp& b, int mode = ec::Jacobi) - { - a_ = a; - b_ = b; - if (a_.isZero()) { - specialA_ = zero; - } else if (a_ == -3) { - specialA_ = minus3; - } else { - specialA_ = generic; - } - ioMode_ = 0; - verifyOrder_ = false; - order_ = 0; - mulArrayGLV = 0; -#ifdef MCL_EC_USE_AFFINE - cybozu::disable_warning_unused_variable(mode); -#else - assert(mode == ec::Jacobi || mode == ec::Proj); - mode_ = mode; -#endif - } - /* - verify the order of *this is equal to order if order != 0 - in constructor, set, setStr, operator<<(). - */ - static void setOrder(const mpz_class& order) - { - if (order != 0) { - verifyOrder_ = true; - order_ = order; - } else { - verifyOrder_ = false; - // don't clear order_ because it is used for isValidOrder() - } - } - static void setMulArrayGLV(void f(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime)) - { - mulArrayGLV = f; - } - static inline void init(bool *pb, const char *astr, const char *bstr, int mode = ec::Jacobi) - { - Fp a, b; - a.setStr(pb, astr); - if (!*pb) return; - b.setStr(pb, bstr); - if (!*pb) return; - init(a, b, mode); - } - // verify the order - bool isValidOrder() const - { - EcT Q; - EcT::mulGeneric(Q, *this, order_); - return Q.isZero(); - } - bool isValid() const - { - if (isZero()) return true; - bool isOK = false; -#ifndef MCL_EC_USE_AFFINE - if (!z.isOne()) { - switch (mode_) { - case ec::Jacobi: - isOK = isValidJacobi(); - break; - case ec::Proj: - isOK = isValidProj(); - break; - } - } else -#endif - { - isOK = isValid(x, y); - } - if (!isOK) return false; - if (verifyOrder_) return isValidOrder(); - return true; - } - void set(bool *pb, const Fp& _x, const Fp& _y, bool verify = true) - { - if (verify && !isValid(_x, _y)) { - *pb = false; - return; - } - x = _x; y = _y; -#ifdef MCL_EC_USE_AFFINE - inf_ = false; -#else - z = 1; -#endif - if (verify && verifyOrder_ && !isValidOrder()) { - *pb = false; - } else { - *pb = true; - } - } - void clear() - { -#ifdef MCL_EC_USE_AFFINE - inf_ = true; -#else - z.clear(); -#endif - x.clear(); - y.clear(); - } -#ifndef MCL_EC_USE_AFFINE - static inline void dblNoVerifyInfJacobi(EcT& R, const EcT& P) - { - Fp S, M, t, y2; - Fp::sqr(y2, P.y); - Fp::mul(S, P.x, y2); - const bool isPzOne = P.z.isOne(); - S += S; - S += S; - Fp::sqr(M, P.x); - switch (specialA_) { - case zero: - Fp::add(t, M, M); - M += t; - break; - case minus3: - if (isPzOne) { - M -= P.z; - } else { - Fp::sqr(t, P.z); - Fp::sqr(t, t); - M -= t; - } - Fp::add(t, M, M); - M += t; - break; - case generic: - default: - if (isPzOne) { - t = a_; - } else { - Fp::sqr(t, P.z); - Fp::sqr(t, t); - t *= a_; - } - t += M; - M += M; - M += t; - break; - } - Fp::sqr(R.x, M); - R.x -= S; - R.x -= S; - if (isPzOne) { - R.z = P.y; - } else { - Fp::mul(R.z, P.y, P.z); - } - R.z += R.z; - Fp::sqr(y2, y2); - y2 += y2; - y2 += y2; - y2 += y2; - Fp::sub(R.y, S, R.x); - R.y *= M; - R.y -= y2; - } - static inline void dblNoVerifyInfProj(EcT& R, const EcT& P) - { - const bool isPzOne = P.z.isOne(); - Fp w, t, h; - switch (specialA_) { - case zero: - Fp::sqr(w, P.x); - Fp::add(t, w, w); - w += t; - break; - case minus3: - Fp::sqr(w, P.x); - if (isPzOne) { - w -= P.z; - } else { - Fp::sqr(t, P.z); - w -= t; - } - Fp::add(t, w, w); - w += t; - break; - case generic: - default: - if (isPzOne) { - w = a_; - } else { - Fp::sqr(w, P.z); - w *= a_; - } - Fp::sqr(t, P.x); - w += t; - w += t; - w += t; // w = a z^2 + 3x^2 - break; - } - if (isPzOne) { - R.z = P.y; - } else { - Fp::mul(R.z, P.y, P.z); // s = yz - } - Fp::mul(t, R.z, P.x); - t *= P.y; // xys - t += t; - t += t; // 4(xys) ; 4B - Fp::sqr(h, w); - h -= t; - h -= t; // w^2 - 8B - Fp::mul(R.x, h, R.z); - t -= h; // h is free - t *= w; - Fp::sqr(w, P.y); - R.x += R.x; - R.z += R.z; - Fp::sqr(h, R.z); - w *= h; - R.z *= h; - Fp::sub(R.y, t, w); - R.y -= w; - } -#endif - static inline void dblNoVerifyInf(EcT& R, const EcT& P) - { -#ifdef MCL_EC_USE_AFFINE - Fp t, s; - Fp::sqr(t, P.x); - Fp::add(s, t, t); - t += s; - t += a_; - Fp::add(s, P.y, P.y); - t /= s; - Fp::sqr(s, t); - s -= P.x; - Fp x3; - Fp::sub(x3, s, P.x); - Fp::sub(s, P.x, x3); - s *= t; - Fp::sub(R.y, s, P.y); - R.x = x3; - R.inf_ = false; -#else - switch (mode_) { - case ec::Jacobi: - dblNoVerifyInfJacobi(R, P); - break; - case ec::Proj: - dblNoVerifyInfProj(R, P); - break; - } -#endif - } - static inline void dbl(EcT& R, const EcT& P) - { - if (P.isZero()) { - R.clear(); - return; - } - dblNoVerifyInf(R, P); - } -#ifndef MCL_EC_USE_AFFINE - static inline void addJacobi(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne) - { - Fp r, U1, S1, H, H3; - if (isPzOne) { - // r = 1; - } else { - Fp::sqr(r, P.z); - } - if (isQzOne) { - U1 = P.x; - if (isPzOne) { - H = Q.x; - } else { - Fp::mul(H, Q.x, r); - } - H -= U1; - S1 = P.y; - } else { - Fp::sqr(S1, Q.z); - Fp::mul(U1, P.x, S1); - if (isPzOne) { - H = Q.x; - } else { - Fp::mul(H, Q.x, r); - } - H -= U1; - S1 *= Q.z; - S1 *= P.y; - } - if (isPzOne) { - r = Q.y; - } else { - r *= P.z; - r *= Q.y; - } - r -= S1; - if (H.isZero()) { - if (r.isZero()) { - dblNoVerifyInf(R, P); - } else { - R.clear(); - } - return; - } - if (isPzOne) { - R.z = H; - } else { - Fp::mul(R.z, P.z, H); - } - if (!isQzOne) { - R.z *= Q.z; - } - Fp::sqr(H3, H); // H^2 - Fp::sqr(R.y, r); // r^2 - U1 *= H3; // U1 H^2 - H3 *= H; // H^3 - R.y -= U1; - R.y -= U1; - Fp::sub(R.x, R.y, H3); - U1 -= R.x; - U1 *= r; - H3 *= S1; - Fp::sub(R.y, U1, H3); - } - static inline void addProj(EcT& R, const EcT& P, const EcT& Q, bool isPzOne, bool isQzOne) - { - Fp r, PyQz, v, A, vv; - if (isQzOne) { - r = P.x; - PyQz = P.y; - } else { - Fp::mul(r, P.x, Q.z); - Fp::mul(PyQz, P.y, Q.z); - } - if (isPzOne) { - A = Q.y; - v = Q.x; - } else { - Fp::mul(A, Q.y, P.z); - Fp::mul(v, Q.x, P.z); - } - v -= r; - if (v.isZero()) { - if (A == PyQz) { - dblNoVerifyInf(R, P); - } else { - R.clear(); - } - return; - } - Fp::sub(R.y, A, PyQz); - Fp::sqr(A, R.y); - Fp::sqr(vv, v); - r *= vv; - vv *= v; - if (isQzOne) { - R.z = P.z; - } else { - if (isPzOne) { - R.z = Q.z; - } else { - Fp::mul(R.z, P.z, Q.z); - } - } - // R.z = 1 if isPzOne && isQzOne - if (isPzOne && isQzOne) { - R.z = vv; - } else { - A *= R.z; - R.z *= vv; - } - A -= vv; - vv *= PyQz; - A -= r; - A -= r; - Fp::mul(R.x, v, A); - r -= A; - R.y *= r; - R.y -= vv; - } -#endif - static inline void add(EcT& R, const EcT& P, const EcT& Q) { - if (P.isZero()) { R = Q; return; } - if (Q.isZero()) { R = P; return; } - if (&P == &Q) { - dblNoVerifyInf(R, P); - return; - } -#ifdef MCL_EC_USE_AFFINE - Fp t; - Fp::neg(t, Q.y); - if (P.y == t) { R.clear(); return; } - Fp::sub(t, Q.x, P.x); - if (t.isZero()) { - dblNoVerifyInf(R, P); - return; - } - Fp s; - Fp::sub(s, Q.y, P.y); - Fp::div(t, s, t); - R.inf_ = false; - Fp x3; - Fp::sqr(x3, t); - x3 -= P.x; - x3 -= Q.x; - Fp::sub(s, P.x, x3); - s *= t; - Fp::sub(R.y, s, P.y); - R.x = x3; -#else - bool isPzOne = P.z.isOne(); - bool isQzOne = Q.z.isOne(); - switch (mode_) { - case ec::Jacobi: - addJacobi(R, P, Q, isPzOne, isQzOne); - break; - case ec::Proj: - addProj(R, P, Q, isPzOne, isQzOne); - break; - } -#endif - } - static inline void sub(EcT& R, const EcT& P, const EcT& Q) - { - EcT nQ; - neg(nQ, Q); - add(R, P, nQ); - } - static inline void neg(EcT& R, const EcT& P) - { - if (P.isZero()) { - R.clear(); - return; - } - R.x = P.x; - Fp::neg(R.y, P.y); -#ifdef MCL_EC_USE_AFFINE - R.inf_ = false; -#else - R.z = P.z; -#endif - } - templateclass FpT> - static inline void mul(EcT& z, const EcT& x, const FpT& y) - { - fp::Block b; - y.getBlock(b); - mulArray(z, x, b.p, b.n, false); - } - static inline void mul(EcT& z, const EcT& x, int64_t y) - { - const uint64_t u = fp::abs_(y); -#if MCL_SIZEOF_UNIT == 8 - mulArray(z, x, &u, 1, y < 0); -#else - uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; - size_t un = ua[1] ? 2 : 1; - mulArray(z, x, ua, un, y < 0); -#endif - } - static inline void mul(EcT& z, const EcT& x, const mpz_class& y) - { - mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); - } - templateclass FpT> - static inline void mulCT(EcT& z, const EcT& x, const FpT& y) - { - fp::Block b; - y.getBlock(b); - mulArray(z, x, b.p, b.n, false, true); - } - static inline void mulCT(EcT& z, const EcT& x, const mpz_class& y) - { - mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); - } - /* - 0 <= P for any P - (Px, Py) <= (P'x, P'y) iff Px < P'x or Px == P'x and Py <= P'y - @note compare function calls normalize() - */ - template - static inline int compareFunc(const EcT& P_, const EcT& Q_, F comp) - { - const bool QisZero = Q_.isZero(); - if (P_.isZero()) { - if (QisZero) return 0; - return -1; - } - if (QisZero) return 1; - EcT P(P_), Q(Q_); - P.normalize(); - Q.normalize(); - int c = comp(P.x, Q.x); - if (c > 0) return 1; - if (c < 0) return -1; - return comp(P.y, Q.y); - } - static inline int compare(const EcT& P, const EcT& Q) - { - return compareFunc(P, Q, Fp::compare); - } - static inline int compareRaw(const EcT& P, const EcT& Q) - { - return compareFunc(P, Q, Fp::compareRaw); - } - bool isZero() const - { -#ifdef MCL_EC_USE_AFFINE - return inf_; -#else - return z.isZero(); -#endif - } - static inline bool isMSBserialize() - { - return !b_.isZero() && (Fp::BaseFp::getBitSize() & 7) != 0; - } - template - void save(bool *pb, OutputStream& os, int ioMode) const - { - const char sep = *fp::getIoSeparator(ioMode); - if (ioMode & IoEcProj) { - cybozu::writeChar(pb, os, '4'); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - x.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - y.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } -#ifndef MCL_EC_USE_AFFINE - z.save(pb, os, ioMode); -#endif - return; - } - EcT P(*this); - P.normalize(); - if (ioMode & (IoSerialize | IoSerializeHexStr)) { - /* - if (isMSBserialize()) { - // n bytes - x | (y.isOdd ? 0x80 : 0) - } else { - // n + 1 bytes - (y.isOdd ? 3 : 2), x - } - */ - const size_t n = Fp::getByteSize(); - const size_t adj = isMSBserialize() ? 0 : 1; - char buf[sizeof(Fp) + 1]; - if (isZero()) { - memset(buf, 0, n + adj); - } else { - cybozu::MemoryOutputStream mos(buf + adj, n); - P.x.save(pb, mos, IoSerialize); if (!*pb) return; - if (adj) { - buf[0] = P.y.isOdd() ? 3 : 2; - } else { - if (P.y.isOdd()) { - buf[n - 1] |= 0x80; - } - } - } - if (ioMode & IoSerializeHexStr) { - mcl::fp::writeHexStr(pb, os, buf, n + adj); - } else { - cybozu::write(pb, os, buf, n + adj); - } - return; - } - if (isZero()) { - cybozu::writeChar(pb, os, '0'); - return; - } - if (ioMode & IoEcCompY) { - cybozu::writeChar(pb, os, P.y.isOdd() ? '3' : '2'); - if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - P.x.save(pb, os, ioMode); - } else { - cybozu::writeChar(pb, os, '1'); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - P.x.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - P.y.save(pb, os, ioMode); - } - } - template - void load(bool *pb, InputStream& is, int ioMode) - { -#ifdef MCL_EC_USE_AFFINE - inf_ = false; -#else - z = 1; -#endif - if (ioMode & (IoSerialize | IoSerializeHexStr)) { - const size_t n = Fp::getByteSize(); - const size_t adj = isMSBserialize() ? 0 : 1; - const size_t n1 = n + adj; - char buf[sizeof(Fp) + 1]; - size_t readSize; - if (ioMode & IoSerializeHexStr) { - readSize = mcl::fp::readHexStr(buf, n1, is); - } else { - readSize = cybozu::readSome(buf, n1, is); - } - if (readSize != n1) { - *pb = false; - return; - } - if (fp::isZeroArray(buf, n1)) { - clear(); - *pb = true; - return; - } - bool isYodd; - if (adj) { - char c = buf[0]; - if (c != 2 && c != 3) { - *pb = false; - return; - } - isYodd = c == 3; - } else { - isYodd = (buf[n - 1] >> 7) != 0; - buf[n - 1] &= 0x7f; - } - x.setArray(pb, buf + adj, n); - if (!*pb) return; - *pb = getYfromX(y, x, isYodd); - if (!*pb) return; - } else { - char c = 0; - if (!fp::local::skipSpace(&c, is)) { - *pb = false; - return; - } - if (c == '0') { - clear(); - *pb = true; - return; - } - x.load(pb, is, ioMode); if (!*pb) return; - if (c == '1') { - y.load(pb, is, ioMode); if (!*pb) return; - if (!isValid(x, y)) { - *pb = false; - return; - } - } else if (c == '2' || c == '3') { - bool isYodd = c == '3'; - *pb = getYfromX(y, x, isYodd); - if (!*pb) return; - } else if (c == '4') { - y.load(pb, is, ioMode); if (!*pb) return; -#ifndef MCL_EC_USE_AFFINE - z.load(pb, is, ioMode); if (!*pb) return; -#endif - } else { - *pb = false; - return; - } - } - if (verifyOrder_ && !isValidOrder()) { - *pb = false; - } else { - *pb = true; - } - } - // deplicated - static void setCompressedExpression(bool compressedExpression = true) - { - if (compressedExpression) { - ioMode_ |= IoEcCompY; - } else { - ioMode_ &= ~IoEcCompY; - } - } - /* - set IoMode for operator<<(), or operator>>() - */ - static void setIoMode(int ioMode) - { - assert(!(ioMode & 0xff)); - ioMode_ = ioMode; - } - static inline int getIoMode() { return Fp::BaseFp::getIoMode() | ioMode_; } - static inline void getWeierstrass(Fp& yy, const Fp& x) - { - Fp t; - Fp::sqr(t, x); - t += a_; - t *= x; - Fp::add(yy, t, b_); - } - static inline bool getYfromX(Fp& y, const Fp& x, bool isYodd) - { - getWeierstrass(y, x); - if (!Fp::squareRoot(y, y)) { - return false; - } - if (y.isOdd() ^ isYodd) { - Fp::neg(y, y); - } - return true; - } - inline friend EcT operator+(const EcT& x, const EcT& y) { EcT z; add(z, x, y); return z; } - inline friend EcT operator-(const EcT& x, const EcT& y) { EcT z; sub(z, x, y); return z; } - template - inline friend EcT operator*(const EcT& x, const INT& y) { EcT z; mul(z, x, y); return z; } - EcT& operator+=(const EcT& x) { add(*this, *this, x); return *this; } - EcT& operator-=(const EcT& x) { sub(*this, *this, x); return *this; } - template - EcT& operator*=(const INT& x) { mul(*this, *this, x); return *this; } - EcT operator-() const { EcT x; neg(x, *this); return x; } - bool operator==(const EcT& rhs) const - { - EcT R; - sub(R, *this, rhs); // QQQ : optimized later - return R.isZero(); - } - bool operator!=(const EcT& rhs) const { return !operator==(rhs); } - bool operator<(const EcT& rhs) const - { - return compare(*this, rhs) < 0; - } - bool operator>=(const EcT& rhs) const { return !operator<(rhs); } - bool operator>(const EcT& rhs) const { return rhs < *this; } - bool operator<=(const EcT& rhs) const { return !operator>(rhs); } - static inline void mulArray(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime = false) - { - if (!constTime && x.isZero()) { - z.clear(); - return; - } - if (mulArrayGLV && (constTime || yn > 1)) { - mulArrayGLV(z, x, y, yn, isNegative, constTime); - return; - } - mulArrayBase(z, x, y, yn, isNegative, constTime); - } - static inline void mulArrayBase(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime) - { - EcT tmp; - const EcT *px = &x; - if (&z == &x) { - tmp = x; - px = &tmp; - } - z.clear(); - fp::powGeneric(z, *px, y, yn, EcT::add, EcT::dbl, EcT::normalize, constTime ? Fp::BaseFp::getBitSize() : 0); - if (isNegative) { - neg(z, z); - } - } - /* - generic mul - */ - static inline void mulGeneric(EcT& z, const EcT& x, const mpz_class& y, bool constTime = false) - { - mulArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, constTime); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - static inline void init(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) - { - bool b; - init(&b, astr.c_str(), bstr.c_str(), mode); - if (!b) throw cybozu::Exception("mcl:EcT:init"); - } - void set(const Fp& _x, const Fp& _y, bool verify = true) - { - bool b; - set(&b, _x, _y, verify); - if (!b) throw cybozu::Exception("ec:EcT:set") << _x << _y; - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("EcT:save"); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("EcT:load"); - } -#endif -#ifndef CYBOZU_DONT_USE_STRING - // backward compatilibity - static inline void setParam(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) - { - init(astr, bstr, mode); - } - friend inline std::istream& operator>>(std::istream& is, EcT& self) - { - self.load(is, fp::detectIoMode(getIoMode(), is)); - return is; - } - friend inline std::ostream& operator<<(std::ostream& os, const EcT& self) - { - self.save(os, fp::detectIoMode(getIoMode(), os)); - return os; - } -#endif -}; - -template Fp EcT::a_; -template Fp EcT::b_; -template int EcT::specialA_; -template int EcT::ioMode_; -template bool EcT::verifyOrder_; -template mpz_class EcT::order_; -template void (*EcT::mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); -#ifndef MCL_EC_USE_AFFINE -template int EcT::mode_; -#endif - -struct EcParam { - const char *name; - const char *p; - const char *a; - const char *b; - const char *gx; - const char *gy; - const char *n; - size_t bitSize; // bit length of p - int curveType; -}; - -} // mcl - -#ifdef CYBOZU_USE_BOOST -namespace mcl { -template -size_t hash_value(const mcl::EcT& P_) -{ - if (P_.isZero()) return 0; - mcl::EcT P(P_); P.normalize(); - return mcl::hash_value(P.y, mcl::hash_value(P.x)); -} - -} -#else -namespace std { CYBOZU_NAMESPACE_TR1_BEGIN - -template -struct hash > { - size_t operator()(const mcl::EcT& P_) const - { - if (P_.isZero()) return 0; - mcl::EcT P(P_); P.normalize(); - return hash()(P.y, hash()(P.x)); - } -}; - -CYBOZU_NAMESPACE_TR1_END } // std -#endif - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h deleted file mode 100644 index daeb6be53..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h +++ /dev/null @@ -1,105 +0,0 @@ -#pragma once -/** - @file - @brief C interface of ECDSA - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include // for uint64_t, uint8_t -#include // for size_t - -#if defined(_MSC_VER) - #ifdef ECDSA_DLL_EXPORT - #define ECDSA_DLL_API __declspec(dllexport) - #else - #define ECDSA_DLL_API __declspec(dllimport) - #ifndef ECDSA_NO_AUTOLINK - #pragma comment(lib, "mclecdsa.lib") - #endif - #endif -#elif defined(__EMSCRIPTEN__) - #define ECDSA_DLL_API __attribute__((used)) -#else - #define ECDSA_DLL_API -#endif - -#ifndef mclSize - #ifdef __EMSCRIPTEN__ - // avoid 64-bit integer - #define mclSize unsigned int - #define mclInt int - #else - // use #define for cgo - #define mclSize size_t - #define mclInt int64_t - #endif -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef ECDSA_NOT_DEFINE_STRUCT - -typedef struct ecdsaSecretKey ecdsaSecretKey; -typedef struct ecdsaPublicKey ecdsaPublicKey; -typedef struct ecdsaSignature ecdsaSignature; - -#else - -typedef struct { - uint64_t d[4]; -} ecdsaSecretKey; - -typedef struct { - uint64_t d[4 * 3]; -} ecdsaPublicKey; - -typedef struct { - uint64_t d[4 * 2]; -} ecdsaSignature; - -#endif - -struct ecdsaPrecomputedPublicKey; - -/* - init library - return 0 if success - @note not threadsafe -*/ -ECDSA_DLL_API int ecdsaInit(void); - -// return written byte size if success else 0 -ECDSA_DLL_API mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec); -ECDSA_DLL_API mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub); -ECDSA_DLL_API mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig); - -// return read byte size if sucess else 0 -ECDSA_DLL_API mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize); -ECDSA_DLL_API mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize); -ECDSA_DLL_API mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize); - -// return 0 if success -ECDSA_DLL_API int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec); - -ECDSA_DLL_API void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec); - -ECDSA_DLL_API void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size); - -// return 1 if valid -ECDSA_DLL_API int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size); -ECDSA_DLL_API int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *pub, const void *m, mclSize size); - -// return nonzero if success -ECDSA_DLL_API ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate(); -// call this function to avoid memory leak -ECDSA_DLL_API void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub); -// return 0 if success -ECDSA_DLL_API int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub); - -#ifdef __cplusplus -} -#endif - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp deleted file mode 100644 index cf3ed3f65..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp +++ /dev/null @@ -1,257 +0,0 @@ -#pragma once -/** - @file - @brief ECDSA - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include - -namespace mcl { namespace ecdsa { - -namespace local { - -#ifndef MCLSHE_WIN_SIZE - #define MCLSHE_WIN_SIZE 10 -#endif -static const size_t winSize = MCLSHE_WIN_SIZE; - -struct FpTag; -struct ZnTag; - -} // mcl::ecdsa::local - -typedef mcl::FpT Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; - -namespace local { - -struct Param { - mcl::EcParam ecParam; - Ec P; - mcl::fp::WindowMethod Pbase; -}; - -inline Param& getParam() -{ - static Param p; - return p; -} - -inline void be32toZn(Zn& x, const mcl::fp::Unit *buf) -{ - const size_t n = 32; - const unsigned char *p = (const unsigned char*)buf; - unsigned char be[n]; - for (size_t i = 0; i < n; i++) { - be[i] = p[n - 1 - i]; - } - x.setArrayMaskMod(be, n); -} - -/* - y = x mod n -*/ -inline void FpToZn(Zn& y, const Fp& x) -{ - fp::Block b; - x.getBlock(b); - y.setArrayMaskMod(b.p, b.n); -} - -inline void setHashOf(Zn& x, const void *msg, size_t msgSize) -{ - mcl::fp::Unit xBuf[256 / 8 / sizeof(mcl::fp::Unit)]; - uint32_t hashSize = mcl::fp::sha256(xBuf, sizeof(xBuf), msg, (uint32_t)msgSize); - assert(hashSize == sizeof(xBuf)); - (void)hashSize; - be32toZn(x, xBuf); -} - -} // mcl::ecdsa::local - -const local::Param& param = local::getParam(); - -inline void init(bool *pb) -{ - const mcl::EcParam& ecParam = mcl::ecparam::secp256k1; - Zn::init(pb, ecParam.n); - if (!*pb) return; - Fp::init(pb, ecParam.p); - if (!*pb) return; - Ec::init(pb, ecParam.a, ecParam.b); - if (!*pb) return; - Zn::setIoMode(16); - Fp::setIoMode(16); - Ec::setIoMode(mcl::IoEcAffine); - local::Param& p = local::getParam(); - p.ecParam = ecParam; - Fp x, y; - x.setStr(pb, ecParam.gx); - if (!*pb) return; - y.setStr(pb, ecParam.gy); - if (!*pb) return; - p.P.set(pb, x, y); - if (!*pb) return; - p.Pbase.init(pb, p.P, ecParam.bitSize, local::winSize); -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void init() -{ - bool b; - init(&b); - if (!b) throw cybozu::Exception("ecdsa:init"); -} -#endif - -typedef Zn SecretKey; -typedef Ec PublicKey; - -struct PrecomputedPublicKey { - mcl::fp::WindowMethod pubBase_; - void init(bool *pb, const PublicKey& pub) - { - pubBase_.init(pb, pub, param.ecParam.bitSize, local::winSize); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void init(const PublicKey& pub) - { - bool b; - init(&b, pub); - if (!b) throw cybozu::Exception("ecdsa:PrecomputedPublicKey:init"); - } -#endif -}; - -inline void getPublicKey(PublicKey& pub, const SecretKey& sec) -{ - Ec::mul(pub, param.P, sec); - pub.normalize(); -} - -struct Signature : public mcl::fp::Serializable { - Zn r, s; - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - r.load(pb, is, ioMode); if (!*pb) return; - s.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - r.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - s.save(pb, os, ioMode); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("ecdsa:Signature:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("ecdsa:Signature:save"); - } -#endif -#ifndef CYBOZU_DONT_USE_STRING - friend std::istream& operator>>(std::istream& is, Signature& self) - { - self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const Signature& self) - { - self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); - return os; - } -#endif -}; - -inline void sign(Signature& sig, const SecretKey& sec, const void *msg, size_t msgSize) -{ - Zn& r = sig.r; - Zn& s = sig.s; - Zn z, k; - local::setHashOf(z, msg, msgSize); - Ec Q; - for (;;) { - k.setByCSPRNG(); - param.Pbase.mul(Q, k); - if (Q.isZero()) continue; - Q.normalize(); - local::FpToZn(r, Q.x); - if (r.isZero()) continue; - Zn::mul(s, r, sec); - s += z; - if (s.isZero()) continue; - s /= k; - return; - } -} - -namespace local { - -inline void mulDispatch(Ec& Q, const PublicKey& pub, const Zn& y) -{ - Ec::mul(Q, pub, y); -} - -inline void mulDispatch(Ec& Q, const PrecomputedPublicKey& ppub, const Zn& y) -{ - ppub.pubBase_.mul(Q, y); -} - -template -inline bool verify(const Signature& sig, const Pub& pub, const void *msg, size_t msgSize) -{ - const Zn& r = sig.r; - const Zn& s = sig.s; - if (r.isZero() || s.isZero()) return false; - Zn z, w, u1, u2; - local::setHashOf(z, msg, msgSize); - Zn::inv(w, s); - Zn::mul(u1, z, w); - Zn::mul(u2, r, w); - Ec Q1, Q2; - param.Pbase.mul(Q1, u1); -// Ec::mul(Q2, pub, u2); - local::mulDispatch(Q2, pub, u2); - Q1 += Q2; - if (Q1.isZero()) return false; - Q1.normalize(); - Zn x; - local::FpToZn(x, Q1.x); - return r == x; -} - -} // mcl::ecdsa::local - -inline bool verify(const Signature& sig, const PublicKey& pub, const void *msg, size_t msgSize) -{ - return local::verify(sig, pub, msg, msgSize); -} - -inline bool verify(const Signature& sig, const PrecomputedPublicKey& ppub, const void *msg, size_t msgSize) -{ - return local::verify(sig, ppub, msg, msgSize); -} - -} } // mcl::ecdsa - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp deleted file mode 100644 index 087bf8b6c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once -/** - @file - @brief Elliptic curve parameter - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include - -namespace mcl { namespace ecparam { - -const struct mcl::EcParam secp160k1 = { - "secp160k1", - "0xfffffffffffffffffffffffffffffffeffffac73", - "0", - "7", - "0x3b4c382ce37aa192a4019e763036f4f5dd4d7ebb", - "0x938cf935318fdced6bc28286531733c3f03c4fee", - "0x100000000000000000001b8fa16dfab9aca16b6b3", - 160, - -1 -}; -// p=2^160 + 7 -const struct mcl::EcParam p160_1 = { - "p160_1", - "0x10000000000000000000000000000000000000007", - "10", - "1343632762150092499701637438970764818528075565078", - "1", - "1236612389951462151661156731535316138439983579284", - "1461501637330902918203683518218126812711137002561", - 161, - -1 -}; -const struct mcl::EcParam secp192k1 = { - "secp192k1", - "0xfffffffffffffffffffffffffffffffffffffffeffffee37", - "0", - "3", - "0xdb4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d", - "0x9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d", - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - 192, - MCL_SECP192K1 -}; -const struct mcl::EcParam secp224k1 = { - "secp224k1", - "0xfffffffffffffffffffffffffffffffffffffffffffffffeffffe56d", - "0", - "5", - "0xa1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c", - "0x7e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5", - "0x10000000000000000000000000001dce8d2ec6184caf0a971769fb1f7", - 224, - MCL_SECP224K1 -}; -const struct mcl::EcParam secp256k1 = { - "secp256k1", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "0", - "7", - "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - "0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", - 256, - MCL_SECP256K1 -}; -const struct mcl::EcParam secp384r1 = { - "secp384r1", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", - "-3", - "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", - "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", - "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", - 384, - MCL_SECP384R1 -}; -const struct mcl::EcParam secp521r1 = { - "secp521r1", - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "-3", - "0x51953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", - "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", - "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", - 521, - MCL_SECP521R1 -}; -const struct mcl::EcParam NIST_P192 = { - "NIST_P192", - "0xfffffffffffffffffffffffffffffffeffffffffffffffff", - "-3", - "0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1", - "0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012", - "0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811", - "0xffffffffffffffffffffffff99def836146bc9b1b4d22831", - 192, - MCL_NIST_P192 -}; -const struct mcl::EcParam NIST_P224 = { - "NIST_P224", - "0xffffffffffffffffffffffffffffffff000000000000000000000001", - "-3", - "0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", - "0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", - "0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", - "0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", - 224, - MCL_NIST_P224 -}; -const struct mcl::EcParam NIST_P256 = { - "NIST_P256", - "0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff", - "-3", - "0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", - "0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", - "0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", - "0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551", - 256, - MCL_NIST_P256 -}; -// same secp384r1 -const struct mcl::EcParam NIST_P384 = { - "NIST_P384", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", - "-3", - "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", - "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", - "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", - "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", - 384, - MCL_NIST_P384 -}; -// same secp521r1 -const struct mcl::EcParam NIST_P521 = { - "NIST_P521", - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "-3", - "0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", - "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", - "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", - "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", - 521, - MCL_NIST_P521 -}; - -} // mcl::ecparam - -#ifndef CYBOZU_DONT_USE_STRING -static inline const mcl::EcParam* getEcParam(const std::string& name) -{ - static const mcl::EcParam *tbl[] = { - &ecparam::p160_1, - &ecparam::secp160k1, - &ecparam::secp192k1, - &ecparam::secp224k1, - &ecparam::secp256k1, - &ecparam::secp384r1, - &ecparam::secp521r1, - - &ecparam::NIST_P192, - &ecparam::NIST_P224, - &ecparam::NIST_P256, - &ecparam::NIST_P384, - &ecparam::NIST_P521, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (name == tbl[i]->name) return tbl[i]; - } - throw cybozu::Exception("mcl::getEcParam:not support name") << name; -} -#endif - -inline const mcl::EcParam* getEcParam(int curve) -{ - switch (curve) { - case MCL_SECP192K1: return &ecparam::secp192k1; - case MCL_SECP224K1: return &ecparam::secp224k1; - case MCL_SECP256K1: return &ecparam::secp256k1; - case MCL_SECP384R1: return &ecparam::secp384r1; - case MCL_NIST_P192: return &ecparam::NIST_P192; - case MCL_NIST_P224: return &ecparam::NIST_P224; - case MCL_NIST_P256: return &ecparam::NIST_P256; - default: return 0; - } -} - -} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp deleted file mode 100644 index 431148508..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp +++ /dev/null @@ -1,612 +0,0 @@ -#pragma once -/** - @file - @brief lifted-ElGamal encryption - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause - - original: - Copyright (c) 2014, National Institute of Advanced Industrial - Science and Technology All rights reserved. - This source file is subject to BSD 3-Clause license. -*/ -#include -#include -#include -#ifndef CYBOZU_UNORDERED_MAP_STD -#include -#endif -#include -#include -#include -#include - -namespace mcl { - -template -struct ElgamalT { - typedef _Ec Ec; - struct CipherText { - Ec c1; - Ec c2; - CipherText() - { - clear(); - } - /* - (c1, c2) = (0, 0) is trivial valid ciphertext for m = 0 - */ - void clear() - { - c1.clear(); - c2.clear(); - } - /* - add encoded message with encoded message - input : this = Enc(m1), c = Enc(m2) - output : this = Enc(m1 + m2) - */ - void add(const CipherText& c) - { - Ec::add(c1, c1, c.c1); - Ec::add(c2, c2, c.c2); - } - /* - mul by x - input : this = Enc(m), x - output : this = Enc(m x) - */ - template - void mul(const N& x) - { - Ec::mul(c1, c1, x); - Ec::mul(c2, c2, x); - } - /* - negative encoded message - input : this = Enc(m) - output : this = Enc(-m) - */ - void neg() - { - Ec::neg(c1, c1); - Ec::neg(c2, c2); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - c1.load(is, ioMode); - c2.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - c1.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - c2.save(os, ioMode); - } - void getStr(std::string& str, int ioMode = 0) const - { - str.clear(); - cybozu::StringOutputStream os(str); - save(os, ioMode); - } - std::string getStr(int ioMode = 0) const - { - std::string str; - getStr(str, ioMode); - return str; - } - void setStr(const std::string& str, int ioMode = 0) - { - cybozu::StringInputStream is(str); - load(is, ioMode); - } - friend inline std::ostream& operator<<(std::ostream& os, const CipherText& self) - { - self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); - return os; - } - friend inline std::istream& operator>>(std::istream& is, CipherText& self) - { - self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); - return is; - } - // obsolete - std::string toStr() const { return getStr(); } - void fromStr(const std::string& str) { setStr(str); } - }; - /* - Zero Knowledge Proof - cipher text with ZKP to ensure m = 0 or 1 - http://dx.doi.org/10.1587/transfun.E96.A.1156 - */ - struct Zkp { - Zn c0, c1, s0, s1; - template - void load(InputStream& is, int ioMode = IoSerialize) - { - c0.load(is, ioMode); - c1.load(is, ioMode); - s0.load(is, ioMode); - s1.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - c0.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - c1.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - s0.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - s1.save(os, ioMode); - } - void getStr(std::string& str, int ioMode = 0) const - { - str.clear(); - cybozu::StringOutputStream os(str); - save(os, ioMode); - } - std::string getStr(int ioMode = 0) const - { - std::string str; - getStr(str, ioMode); - return str; - } - void setStr(const std::string& str, int ioMode = 0) - { - cybozu::StringInputStream is(str); - load(is, ioMode); - } - friend inline std::ostream& operator<<(std::ostream& os, const Zkp& self) - { - self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); - return os; - } - friend inline std::istream& operator>>(std::istream& is, Zkp& self) - { - self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); - return is; - } - // obsolete - std::string toStr() const { return getStr(); } - void fromStr(const std::string& str) { setStr(str); } - }; - - class PublicKey { - size_t bitSize; - Ec f; - Ec g; - Ec h; - bool enableWindowMethod_; - fp::WindowMethod wm_f; - fp::WindowMethod wm_g; - fp::WindowMethod wm_h; - template - void mulDispatch(Ec& z, const Ec& x, const N& n, const fp::WindowMethod& pw) const - { - if (enableWindowMethod_) { - pw.mul(z, n); - } else { - Ec::mul(z, x, n); - } - } - template - void mulF(Ec& z, const N& n) const { mulDispatch(z, f, n, wm_f); } - template - void mulG(Ec& z, const N& n) const { mulDispatch(z, g, n, wm_g); } - template - void mulH(Ec& z, const N& n) const { mulDispatch(z, h, n, wm_h); } - public: - PublicKey() - : bitSize(0) - , enableWindowMethod_(false) - { - } - void enableWindowMethod(size_t winSize = 10) - { - wm_f.init(f, bitSize, winSize); - wm_g.init(g, bitSize, winSize); - wm_h.init(h, bitSize, winSize); - enableWindowMethod_ = true; - } - const Ec& getF() const { return f; } - void init(size_t bitSize, const Ec& f, const Ec& g, const Ec& h) - { - this->bitSize = bitSize; - this->f = f; - this->g = g; - this->h = h; - enableWindowMethod_ = false; - enableWindowMethod(); - } - /* - encode message - input : m - output : c = (c1, c2) = (g^u, h^u f^m) - */ - void enc(CipherText& c, const Zn& m, fp::RandGen rg = fp::RandGen()) const - { - Zn u; - u.setRand(rg); - mulG(c.c1, u); - mulH(c.c2, u); - Ec t; - mulF(t, m); - Ec::add(c.c2, c.c2, t); - } - /* - encode message - input : m = 0 or 1 - output : c (c1, c2), zkp - */ - void encWithZkp(CipherText& c, Zkp& zkp, int m, fp::RandGen rg = fp::RandGen()) const - { - if (m != 0 && m != 1) { - throw cybozu::Exception("elgamal:PublicKey:encWithZkp") << m; - } - Zn u; - u.setRand(rg); - mulG(c.c1, u); - mulH(c.c2, u); - if (m) { - Ec::add(c.c2, c.c2, f); - Zn r1; - r1.setRand(rg); - zkp.c0.setRand(rg); - zkp.s0.setRand(rg); - Ec R01, R02, R11, R12; - Ec t1, t2; - mulG(t1, zkp.s0); - Ec::mul(t2, c.c1, zkp.c0); - Ec::sub(R01, t1, t2); - mulH(t1, zkp.s0); - Ec::mul(t2, c.c2, zkp.c0); - Ec::sub(R02, t1, t2); - mulG(R11, r1); - mulH(R12, r1); - std::ostringstream os; - os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; - Zn cc; - cc.setHashOf(os.str()); - zkp.c1 = cc - zkp.c0; - zkp.s1 = r1 + zkp.c1 * u; - } else { - Zn r0; - r0.setRand(rg); - zkp.c1.setRand(rg); - zkp.s1.setRand(rg); - Ec R01, R02, R11, R12; - mulG(R01, r0); - mulH(R02, r0); - Ec t1, t2; - mulG(t1, zkp.s1); - Ec::mul(t2, c.c1, zkp.c1); - Ec::sub(R11, t1, t2); - mulH(t1, zkp.s1); - Ec::sub(t2, c.c2, f); - Ec::mul(t2, t2, zkp.c1); - Ec::sub(R12, t1, t2); - std::ostringstream os; - os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; - Zn cc; - cc.setHashOf(os.str()); - zkp.c0 = cc - zkp.c1; - zkp.s0 = r0 + zkp.c0 * u; - } - } - /* - verify cipher text with ZKP - */ - bool verify(const CipherText& c, const Zkp& zkp) const - { - Ec R01, R02, R11, R12; - Ec t1, t2; - mulG(t1, zkp.s0); - Ec::mul(t2, c.c1, zkp.c0); - Ec::sub(R01, t1, t2); - mulH(t1, zkp.s0); - Ec::mul(t2, c.c2, zkp.c0); - Ec::sub(R02, t1, t2); - mulG(t1, zkp.s1); - Ec::mul(t2, c.c1, zkp.c1); - Ec::sub(R11, t1, t2); - mulH(t1, zkp.s1); - Ec::sub(t2, c.c2, f); - Ec::mul(t2, t2, zkp.c1); - Ec::sub(R12, t1, t2); - std::ostringstream os; - os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; - Zn cc; - cc.setHashOf(os.str()); - return cc == zkp.c0 + zkp.c1; - } - /* - rerandomize encoded message - input : c = (c1, c2) - output : c = (c1 g^v, c2 h^v) - */ - void rerandomize(CipherText& c, fp::RandGen rg = fp::RandGen()) const - { - Zn v; - v.setRand(rg); - Ec t; - mulG(t, v); - Ec::add(c.c1, c.c1, t); - mulH(t, v); - Ec::add(c.c2, c.c2, t); - } - /* - add encoded message with plain message - input : c = Enc(m1) = (c1, c2), m2 - ouput : c = Enc(m1 + m2) = (c1, c2 f^m2) - */ - template - void add(CipherText& c, const N& m) const - { - Ec fm; - mulF(fm, m); - Ec::add(c.c2, c.c2, fm); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - std::string s; - mcl::fp::local::loadWord(s, is); - bitSize = cybozu::atoi(s); - f.load(is, ioMode); - g.load(is, ioMode); - h.load(is, ioMode); - init(bitSize, f, g, h); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - std::string s = cybozu::itoa(bitSize); - cybozu::write(os, s.c_str(), s.size()); - cybozu::writeChar(os, ' '); - - const char sep = *fp::getIoSeparator(ioMode); - f.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - g.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - h.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - } - void getStr(std::string& str, int ioMode = 0) const - { - str.clear(); - cybozu::StringOutputStream os(str); - save(os, ioMode); - } - std::string getStr(int ioMode = 0) const - { - std::string str; - getStr(str, ioMode); - return str; - } - void setStr(const std::string& str, int ioMode = 0) - { - cybozu::StringInputStream is(str); - load(is, ioMode); - } - friend inline std::ostream& operator<<(std::ostream& os, const PublicKey& self) - { - self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); - return os; - } - friend inline std::istream& operator>>(std::istream& is, PublicKey& self) - { - self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); - return is; - } - // obsolete - std::string toStr() const { return getStr(); } - void fromStr(const std::string& str) { setStr(str); } - }; - /* - create table f^i for i in [rangeMin, rangeMax] - */ - struct PowerCache { -#if (CYBOZU_CPP_VERSION > CYBOZU_CPP_VERSION_CP03) - typedef CYBOZU_NAMESPACE_STD::unordered_map Cache; -#else - typedef std::map Cache; -#endif - Cache cache; - void init(const Ec& f, int rangeMin, int rangeMax) - { - if (rangeMin > rangeMax) throw cybozu::Exception("mcl:ElgamalT:PowerCache:bad range") << rangeMin << rangeMax; - Ec x; - x.clear(); - cache[x] = 0; - for (int i = 1; i <= rangeMax; i++) { - Ec::add(x, x, f); - cache[x] = i; - } - Ec nf; - Ec::neg(nf, f); - x.clear(); - for (int i = -1; i >= rangeMin; i--) { - Ec::add(x, x, nf); - cache[x] = i; - } - } - /* - return m such that f^m = g - */ - int getExponent(const Ec& g, bool *b = 0) const - { - typename Cache::const_iterator i = cache.find(g); - if (i == cache.end()) { - if (b) { - *b = false; - return 0; - } - throw cybozu::Exception("Elgamal:PowerCache:getExponent:not found") << g; - } - if (b) *b = true; - return i->second; - } - void clear() - { - cache.clear(); - } - bool isEmpty() const - { - return cache.empty(); - } - }; - class PrivateKey { - PublicKey pub; - Zn z; - PowerCache cache; - public: - /* - init - input : f - output : (g, h, z) - Ec = - g in Ec - h = g^z - */ - void init(const Ec& f, size_t bitSize, fp::RandGen rg = fp::RandGen()) - { - Ec g, h; - z.setRand(rg); - Ec::mul(g, f, z); - z.setRand(rg); - Ec::mul(h, g, z); - pub.init(bitSize, f, g, h); - } - const PublicKey& getPublicKey() const { return pub; } - /* - decode message by brute-force attack - input : c = (c1, c2) - output : m - M = c2 / c1^z - find m such that M = f^m and |m| < limit - @memo 7sec@core i3 for m = 1e6 - */ - void dec(Zn& m, const CipherText& c, int limit = 100000) const - { - const Ec& f = pub.getF(); - Ec c1z; - Ec::mul(c1z, c.c1, z); - if (c1z == c.c2) { - m = 0; - return; - } - Ec t1(c1z); - Ec t2(c.c2); - for (int i = 1; i < limit; i++) { - Ec::add(t1, t1, f); - if (t1 == c.c2) { - m = i; - return; - } - Ec::add(t2, t2, f); - if (t2 == c1z) { - m = -i; - return; - } - } - throw cybozu::Exception("elgamal:PrivateKey:dec:overflow"); - } - /* - powfm = c2 / c1^z = f^m - */ - void getPowerf(Ec& powfm, const CipherText& c) const - { - Ec c1z; - Ec::mul(c1z, c.c1, z); - Ec::sub(powfm, c.c2, c1z); - } - /* - set range of message to decode quickly - */ - void setCache(int rangeMin, int rangeMax) - { - cache.init(pub.getF(), rangeMin, rangeMax); - } - /* - clear cache - */ - void clearCache() - { - cache.clear(); - } - /* - decode message by lookup table if !cache.isEmpty() - brute-force attack otherwise - input : c = (c1, c2) - b : set false if not found - return m - */ - int dec(const CipherText& c, bool *b = 0) const - { - Ec powfm; - getPowerf(powfm, c); - return cache.getExponent(powfm, b); - } - /* - check whether c is encrypted zero message - */ - bool isZeroMessage(const CipherText& c) const - { - Ec c1z; - Ec::mul(c1z, c.c1, z); - return c.c2 == c1z; - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - pub.load(is, ioMode); - z.load(is, ioMode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - pub.save(os, ioMode); - if (sep) cybozu::writeChar(os, sep); - z.save(os, ioMode); - } - void getStr(std::string& str, int ioMode = 0) const - { - str.clear(); - cybozu::StringOutputStream os(str); - save(os, ioMode); - } - std::string getStr(int ioMode = 0) const - { - std::string str; - getStr(str, ioMode); - return str; - } - void setStr(const std::string& str, int ioMode = 0) - { - cybozu::StringInputStream is(str); - load(is, ioMode); - } - friend inline std::ostream& operator<<(std::ostream& os, const PrivateKey& self) - { - self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); - return os; - } - friend inline std::istream& operator>>(std::istream& is, PrivateKey& self) - { - self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); - return is; - } - std::string toStr() const { return getStr(); } - void fromStr(const std::string& str) { setStr(str); } - }; -}; - -} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp deleted file mode 100644 index 2e69729dd..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp +++ /dev/null @@ -1,661 +0,0 @@ -#pragma once -/** - @file - @brief finite field class - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#ifndef CYBOZU_DONT_USE_STRING -#include -#endif -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4127) - #pragma warning(disable : 4458) - #ifndef NOMINMAX - #define NOMINMAX - #endif - #ifndef MCL_NO_AUTOLINK - #ifdef NDEBUG - #pragma comment(lib, "mcl.lib") - #else - #pragma comment(lib, "mcl.lib") - #endif - #endif -#endif -#include -#include -#include -#include -#include -#include - -namespace mcl { - -struct FpTag; -struct ZnTag; - -namespace fp { - -// copy src to dst as little endian -void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize); -// copy src to dst as little endian -void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize); - -bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode); - -uint64_t getUint64(bool *pb, const fp::Block& b); -int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op); - -const char *ModeToStr(Mode mode); - -Mode StrToMode(const char *s); - -#ifndef CYBOZU_DONT_USE_STRING -inline Mode StrToMode(const std::string& s) -{ - return StrToMode(s.c_str()); -} -#endif - -inline void dumpUnit(Unit x) -{ -#if MCL_SIZEOF_UNIT == 4 - printf("%08x", (uint32_t)x); -#else - printf("%016llx", (unsigned long long)x); -#endif -} - -bool isEnableJIT(); // 1st call is not threadsafe - -uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); -uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); - -} // mcl::fp - -template -class FpT : public fp::Serializable, - fp::Operator > > { - typedef fp::Unit Unit; - typedef fp::Operator > Operator; - typedef fp::Serializable, Operator> Serializer; -public: - static const size_t maxSize = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize; -private: - template friend class FpT; - Unit v_[maxSize]; - static fp::Op op_; - static FpT inv2_; - static int ioMode_; - template friend class FpDblT; - template friend class Fp2T; - template friend struct Fp6T; -public: - typedef FpT BaseFp; - // return pointer to array v_[] - const Unit *getUnit() const { return v_; } - FpT* getFp0() { return this; } - const FpT* getFp0() const { return this; } - static inline size_t getUnitSize() { return op_.N; } - static inline size_t getBitSize() { return op_.bitSize; } - static inline size_t getByteSize() { return (op_.bitSize + 7) / 8; } - static inline const fp::Op& getOp() { return op_; } - void dump() const - { - const size_t N = op_.N; - for (size_t i = 0; i < N; i++) { - fp::dumpUnit(v_[N - 1 - i]); - } - printf("\n"); - } - /* - xi_a is used for Fp2::mul_xi(), where xi = xi_a + i and i^2 = -1 - if xi_a = 0 then asm functions for Fp2 are not generated. - */ - static inline void init(bool *pb, int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) - { - assert(maxBitSize <= MCL_MAX_BIT_SIZE); - *pb = op_.init(p, maxBitSize, xi_a, mode); - if (!*pb) return; - { // set oneRep - FpT& one = *reinterpret_cast(op_.oneRep); - one.clear(); - one.v_[0] = 1; - one.toMont(); - } - { // set half - mpz_class half = (op_.mp + 1) / 2; - gmp::getArray(pb, op_.half, op_.N, half); - if (!*pb) return; - } - inv(inv2_, 2); -#ifdef MCL_XBYAK_DIRECT_CALL - add = fp::func_ptr_cast(op_.fp_addA_); - if (add == 0) add = addC; - sub = fp::func_ptr_cast(op_.fp_subA_); - if (sub == 0) sub = subC; - neg = fp::func_ptr_cast(op_.fp_negA_); - if (neg == 0) neg = negC; - mul = fp::func_ptr_cast(op_.fp_mulA_); - if (mul == 0) mul = mulC; - sqr = fp::func_ptr_cast(op_.fp_sqrA_); - if (sqr == 0) sqr = sqrC; -#endif - *pb = true; - } - static inline void init(bool *pb, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) - { - init(pb, 0, p, mode); - } - static inline void init(bool *pb, const char *mstr, fp::Mode mode = fp::FP_AUTO) - { - mpz_class p; - gmp::setStr(pb, p, mstr); - if (!*pb) return; - init(pb, p, mode); - } - static inline size_t getModulo(char *buf, size_t bufSize) - { - return gmp::getStr(buf, bufSize, op_.mp); - } - static inline bool isFullBit() { return op_.isFullBit; } - /* - binary patter of p - @note the value of p is zero - */ - static inline const FpT& getP() - { - return *reinterpret_cast(op_.p); - } - bool isOdd() const - { - fp::Block b; - getBlock(b); - return (b.p[0] & 1) == 1; - } - static inline bool squareRoot(FpT& y, const FpT& x) - { - if (isMont()) return op_.sq.get(y, x); - mpz_class mx, my; - bool b = false; - x.getMpz(&b, mx); - if (!b) return false; - b = op_.sq.get(my, mx); - if (!b) return false; - y.setMpz(&b, my); - return b; - } - FpT() {} - FpT(const FpT& x) - { - op_.fp_copy(v_, x.v_); - } - FpT& operator=(const FpT& x) - { - op_.fp_copy(v_, x.v_); - return *this; - } - void clear() - { - op_.fp_clear(v_); - } - FpT(int64_t x) { operator=(x); } - FpT& operator=(int64_t x) - { - if (x == 1) { - op_.fp_copy(v_, op_.oneRep); - } else { - clear(); - if (x) { - int64_t y = x < 0 ? -x : x; - if (sizeof(Unit) == 8) { - v_[0] = y; - } else { - v_[0] = (uint32_t)y; - v_[1] = (uint32_t)(y >> 32); - } - if (x < 0) neg(*this, *this); - toMont(); - } - } - return *this; - } - static inline bool isMont() { return op_.isMont; } - /* - convert normal value to Montgomery value - do nothing is !isMont() - */ - void toMont() - { - if (isMont()) op_.toMont(v_, v_); - } - /* - convert Montgomery value to normal value - do nothing is !isMont() - */ - void fromMont() - { - if (isMont()) op_.fromMont(v_, v_); - } - template - void load(bool *pb, InputStream& is, int ioMode) - { - bool isMinus = false; - *pb = false; - if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { - const size_t n = getByteSize(); - v_[op_.N - 1] = 0; - size_t readSize; - if (ioMode & IoSerializeHexStr) { - readSize = mcl::fp::readHexStr(v_, n, is); - } else { - readSize = cybozu::readSome(v_, n, is); - } - if (readSize != n) return; - } else { - char buf[1024]; - size_t n = fp::local::loadWord(buf, sizeof(buf), is); - if (n == 0) return; - n = fp::strToArray(&isMinus, v_, op_.N, buf, n, ioMode); - if (n == 0) return; - for (size_t i = n; i < op_.N; i++) v_[i] = 0; - } - if (fp::isGreaterOrEqualArray(v_, op_.p, op_.N)) { - return; - } - if (isMinus) { - neg(*this, *this); - } - if (!(ioMode & IoArrayRaw)) { - toMont(); - } - *pb = true; - } - template - void save(bool *pb, OutputStream& os, int ioMode) const - { - const size_t n = getByteSize(); - if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { - if (ioMode & IoArrayRaw) { - cybozu::write(pb, os, v_, n); - } else { - fp::Block b; - getBlock(b); - if (ioMode & IoSerializeHexStr) { - mcl::fp::writeHexStr(pb, os, b.p, n); - } else { - cybozu::write(pb, os, b.p, n); - } - } - return; - } - fp::Block b; - getBlock(b); - // use low 8-bit ioMode for (base, withPrefix) - char buf[2048]; - size_t len = mcl::fp::arrayToStr(buf, sizeof(buf), b.p, b.n, ioMode & 31, (ioMode & IoPrefix) != 0); - if (len == 0) { - *pb = false; - return; - } - cybozu::write(pb, os, buf + sizeof(buf) - len, len); - } - /* - mode = Mod : set x mod p if sizeof(S) * n <= 64 else error - */ - template - void setArray(bool *pb, const S *x, size_t n, mcl::fp::MaskMode mode = fp::NoMask) - { - *pb = fp::copyAndMask(v_, x, sizeof(S) * n, op_, mode); - toMont(); - } - /* - mask x with (1 << bitLen) and subtract p if x >= p - */ - template - void setArrayMaskMod(const S *x, size_t n) - { - fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::MaskAndMod); - toMont(); - } - - /* - mask x with (1 << (bitLen - 1)) - 1 if x >= p - */ - template - void setArrayMask(const S *x, size_t n) - { - fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::SmallMask); - toMont(); - } - void getBlock(fp::Block& b) const - { - b.n = op_.N; - if (isMont()) { - op_.fromMont(b.v_, v_); - b.p = &b.v_[0]; - } else { - b.p = &v_[0]; - } - } - void setByCSPRNG(bool *pb, fp::RandGen rg = fp::RandGen()) - { - if (rg.isZero()) rg = fp::RandGen::get(); - rg.read(pb, v_, op_.N * sizeof(Unit)); // byte size - if (!pb) return; - setArrayMask(v_, op_.N); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void setByCSPRNG(fp::RandGen rg = fp::RandGen()) - { - bool b; - setByCSPRNG(&b, rg); - if (!b) throw cybozu::Exception("setByCSPRNG"); - } -#endif - void setRand(fp::RandGen rg = fp::RandGen()) // old api - { - setByCSPRNG(rg); - } - /* - hash msg and mask with (1 << (bitLen - 1)) - 1 - */ - void setHashOf(const void *msg, size_t msgSize) - { - char buf[MCL_MAX_HASH_BIT_SIZE / 8]; - uint32_t size = op_.hash(buf, static_cast(sizeof(buf)), msg, static_cast(msgSize)); - setArrayMask(buf, size); - } - void getMpz(bool *pb, mpz_class& x) const - { - fp::Block b; - getBlock(b); - gmp::setArray(pb, x, b.p, b.n); - } - void setMpz(bool *pb, const mpz_class& x) - { - if (x < 0) { - *pb = false; - return; - } - setArray(pb, gmp::getUnit(x), gmp::getUnitSize(x)); - } -#ifdef MCL_XBYAK_DIRECT_CALL - static void (*add)(FpT& z, const FpT& x, const FpT& y); - static inline void addC(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } - static void (*sub)(FpT& z, const FpT& x, const FpT& y); - static inline void subC(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } - static void (*neg)(FpT& y, const FpT& x); - static inline void negC(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } - static void (*mul)(FpT& z, const FpT& x, const FpT& y); - static inline void mulC(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } - static void (*sqr)(FpT& y, const FpT& x); - static inline void sqrC(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } -#else - static inline void add(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } - static inline void sub(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } - static inline void neg(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } - static inline void mul(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } - static inline void sqr(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } -#endif - static inline void addPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_addPre(z.v_, x.v_, y.v_); } - static inline void subPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_subPre(z.v_, x.v_, y.v_); } - static inline void mulUnit(FpT& z, const FpT& x, const Unit y) - { - if (mulSmallUnit(z, x, y)) return; - op_.fp_mulUnit(z.v_, x.v_, y, op_.p); - } - static inline void inv(FpT& y, const FpT& x) { op_.fp_invOp(y.v_, x.v_, op_); } - static inline void divBy2(FpT& y, const FpT& x) - { -#if 0 - mul(y, x, inv2_); -#else - bool odd = (x.v_[0] & 1) != 0; - op_.fp_shr1(y.v_, x.v_); - if (odd) { - op_.fp_addPre(y.v_, y.v_, op_.half); - } -#endif - } - static inline void divBy4(FpT& y, const FpT& x) - { - divBy2(y, x); // QQQ : optimize later - divBy2(y, y); - } - bool isZero() const { return op_.fp_isZero(v_); } - bool isOne() const { return fp::isEqualArray(v_, op_.oneRep, op_.N); } - static const inline FpT& one() { return *reinterpret_cast(op_.oneRep); } - /* - half = (p + 1) / 2 - return true if half <= x < p - return false if 0 <= x < half - */ - bool isNegative() const - { - fp::Block b; - getBlock(b); - return fp::isGreaterOrEqualArray(b.p, op_.half, op_.N); - } - bool isValid() const - { - return fp::isLessArray(v_, op_.p, op_.N); - } - uint64_t getUint64(bool *pb) const - { - fp::Block b; - getBlock(b); - return fp::getUint64(pb, b); - } - int64_t getInt64(bool *pb) const - { - fp::Block b; - getBlock(b); - return fp::getInt64(pb, b, op_); - } - bool operator==(const FpT& rhs) const { return fp::isEqualArray(v_, rhs.v_, op_.N); } - bool operator!=(const FpT& rhs) const { return !operator==(rhs); } - /* - @note - this compare functions is slow because of calling mul if isMont is true. - */ - static inline int compare(const FpT& x, const FpT& y) - { - fp::Block xb, yb; - x.getBlock(xb); - y.getBlock(yb); - return fp::compareArray(xb.p, yb.p, op_.N); - } - bool isLess(const FpT& rhs) const - { - fp::Block xb, yb; - getBlock(xb); - rhs.getBlock(yb); - return fp::isLessArray(xb.p, yb.p, op_.N); - } - bool operator<(const FpT& rhs) const { return isLess(rhs); } - bool operator>=(const FpT& rhs) const { return !operator<(rhs); } - bool operator>(const FpT& rhs) const { return rhs < *this; } - bool operator<=(const FpT& rhs) const { return !operator>(rhs); } - /* - @note - return unexpected order if isMont is set. - */ - static inline int compareRaw(const FpT& x, const FpT& y) - { - return fp::compareArray(x.v_, y.v_, op_.N); - } - bool isLessRaw(const FpT& rhs) const - { - return fp::isLessArray(v_, rhs.v_, op_.N); - } - /* - set IoMode for operator<<(), or operator>>() - */ - static inline void setIoMode(int ioMode) - { - ioMode_ = ioMode; - } - static inline int getIoMode() { return ioMode_; } - static inline size_t getModBitLen() { return getBitSize(); } - static inline void setHashFunc(uint32_t hash(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize)) - { - op_.hash = hash; - } -#ifndef CYBOZU_DONT_USE_STRING - explicit FpT(const std::string& str, int base = 0) - { - Serializer::setStr(str, base); - } - static inline void getModulo(std::string& pstr) - { - gmp::getStr(pstr, op_.mp); - } - static std::string getModulo() - { - std::string s; - getModulo(s); - return s; - } - void setHashOf(const std::string& msg) - { - setHashOf(msg.data(), msg.size()); - } - // backward compatibility - static inline void setModulo(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) - { - init(mstr, mode); - } - friend inline std::ostream& operator<<(std::ostream& os, const FpT& self) - { - self.save(os, fp::detectIoMode(getIoMode(), os)); - return os; - } - friend inline std::istream& operator>>(std::istream& is, FpT& self) - { - self.load(is, fp::detectIoMode(getIoMode(), is)); - return is; - } -#endif -#ifndef CYBOZU_DONT_USE_EXCEPTION - static inline void init(int xi_a, const mpz_class& p, fp::Mode mode = fp::FP_AUTO) - { - bool b; - init(&b, xi_a, p, mode); - if (!b) throw cybozu::Exception("Fp:init"); - } - static inline void init(int xi_a, const std::string& mstr, fp::Mode mode = fp::FP_AUTO) - { - mpz_class p; - gmp::setStr(p, mstr); - init(xi_a, p, mode); - } - static inline void init(const mpz_class& p, fp::Mode mode = fp::FP_AUTO) - { - init(0, p, mode); - } - static inline void init(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) - { - init(0, mstr, mode); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("fp:save") << ioMode; - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("fp:load") << ioMode; - } - /* - throw exception if x >= p - */ - template - void setArray(const S *x, size_t n) - { - bool b; - setArray(&b, x, n); - if (!b) throw cybozu::Exception("Fp:setArray"); - } - void setMpz(const mpz_class& x) - { - bool b; - setMpz(&b, x); - if (!b) throw cybozu::Exception("Fp:setMpz"); - } - uint64_t getUint64() const - { - bool b; - uint64_t v = getUint64(&b); - if (!b) throw cybozu::Exception("Fp:getUint64:large value"); - return v; - } - int64_t getInt64() const - { - bool b; - int64_t v = getInt64(&b); - if (!b) throw cybozu::Exception("Fp:getInt64:large value"); - return v; - } - void getMpz(mpz_class& x) const - { - bool b; - getMpz(&b, x); - if (!b) throw cybozu::Exception("Fp:getMpz"); - } - mpz_class getMpz() const - { - mpz_class x; - getMpz(x); - return x; - } -#endif -}; - -template fp::Op FpT::op_; -template FpT FpT::inv2_; -template int FpT::ioMode_ = IoAuto; -#ifdef MCL_XBYAK_DIRECT_CALL -template void (*FpT::add)(FpT& z, const FpT& x, const FpT& y); -template void (*FpT::sub)(FpT& z, const FpT& x, const FpT& y); -template void (*FpT::neg)(FpT& y, const FpT& x); -template void (*FpT::mul)(FpT& z, const FpT& x, const FpT& y); -template void (*FpT::sqr)(FpT& y, const FpT& x); -#endif - -} // mcl - -#ifdef CYBOZU_USE_BOOST -namespace mcl { - -template -size_t hash_value(const mcl::FpT& x, size_t v = 0) -{ - return static_cast(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); -} - -} -#else -namespace std { CYBOZU_NAMESPACE_TR1_BEGIN - -template -struct hash > { - size_t operator()(const mcl::FpT& x, uint64_t v = 0) const - { - return static_cast(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); - } -}; - -CYBOZU_NAMESPACE_TR1_END } // std::tr1 -#endif - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp deleted file mode 100644 index 95722e2d5..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp +++ /dev/null @@ -1,1364 +0,0 @@ -#pragma once -/** - @file - @brief finite field extension class - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -namespace mcl { - -template -class FpDblT : public fp::Serializable > { - typedef fp::Unit Unit; - Unit v_[Fp::maxSize * 2]; -public: - static size_t getUnitSize() { return Fp::op_.N * 2; } - FpDblT() : v_() - { - } - FpDblT(const FpDblT& rhs) - { - const size_t n = getUnitSize(); - for (size_t i = 0; i < n; i++) { - v_[i] = rhs.v_[i]; - } - } - void dump() const - { - const size_t n = getUnitSize(); - for (size_t i = 0; i < n; i++) { - mcl::fp::dumpUnit(v_[n - 1 - i]); - } - printf("\n"); - } - template - void save(bool *pb, OutputStream& os, int) const - { - char buf[1024]; - size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), v_, getUnitSize()); - if (n == 0) { - *pb = false; - return; - } - cybozu::write(pb, os, buf + sizeof(buf) - n, sizeof(buf)); - } - template - void load(bool *pb, InputStream& is, int) - { - char buf[1024]; - *pb = false; - size_t n = fp::local::loadWord(buf, sizeof(buf), is); - if (n == 0) return; - n = fp::hexToArray(v_, getUnitSize(), buf, n); - if (n == 0) return; - for (size_t i = n; i < getUnitSize(); i++) v_[i] = 0; - *pb = true; - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("FpDblT:save") << ioMode; - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("FpDblT:load") << ioMode; - } - void getMpz(mpz_class& x) const - { - bool b; - getMpz(&b, x); - if (!b) throw cybozu::Exception("FpDblT:getMpz"); - } - mpz_class getMpz() const - { - mpz_class x; - getMpz(x); - return x; - } -#endif - void clear() - { - const size_t n = getUnitSize(); - for (size_t i = 0; i < n; i++) { - v_[i] = 0; - } - } - FpDblT& operator=(const FpDblT& rhs) - { - const size_t n = getUnitSize(); - for (size_t i = 0; i < n; i++) { - v_[i] = rhs.v_[i]; - } - return *this; - } - // QQQ : does not check range of x strictly(use for debug) - void setMpz(const mpz_class& x) - { - assert(x >= 0); - const size_t xn = gmp::getUnitSize(x); - const size_t N2 = getUnitSize(); - if (xn > N2) { - assert(0); - return; - } - memcpy(v_, gmp::getUnit(x), xn * sizeof(Unit)); - memset(v_ + xn, 0, (N2 - xn) * sizeof(Unit)); - } - void getMpz(bool *pb, mpz_class& x) const - { - gmp::setArray(pb, x, v_, Fp::op_.N * 2); - } -#ifdef MCL_XBYAK_DIRECT_CALL - static void (*add)(FpDblT& z, const FpDblT& x, const FpDblT& y); - static void (*sub)(FpDblT& z, const FpDblT& x, const FpDblT& y); - static void (*mod)(Fp& z, const FpDblT& xy); - static void (*addPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); - static void (*subPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); - static void addC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } - static void subC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } - static void modC(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } - static void addPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); } - static void subPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); } -#else - static void add(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } - static void sub(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } - static void mod(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } - static void addPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); } - static void subPre(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); } -#endif - static void mulPreC(FpDblT& xy, const Fp& x, const Fp& y) { Fp::op_.fpDbl_mulPre(xy.v_, x.v_, y.v_); } - static void sqrPreC(FpDblT& xx, const Fp& x) { Fp::op_.fpDbl_sqrPre(xx.v_, x.v_); } - /* - mul(z, x, y) = mulPre(xy, x, y) + mod(z, xy) - */ - static void (*mulPre)(FpDblT& xy, const Fp& x, const Fp& y); - static void (*sqrPre)(FpDblT& xx, const Fp& x); - static void mulUnit(FpDblT& z, const FpDblT& x, Unit y) - { - if (mulSmallUnit(z, x, y)) return; - assert(0); // not supported y - } - static void init() - { - const mcl::fp::Op& op = Fp::getOp(); -#ifdef MCL_XBYAK_DIRECT_CALL - add = fp::func_ptr_cast(op.fpDbl_addA_); - if (add == 0) add = addC; - sub = fp::func_ptr_cast(op.fpDbl_subA_); - if (sub == 0) sub = subC; - mod = fp::func_ptr_cast(op.fpDbl_modA_); - if (mod == 0) mod = modC; - addPre = fp::func_ptr_cast(op.fpDbl_addPre); - if (addPre == 0) addPre = addPreC; - subPre = fp::func_ptr_cast(op.fpDbl_subPre); - if (subPre == 0) subPre = subPreC; -#endif - if (op.fpDbl_mulPreA_) { - mulPre = fp::func_ptr_cast(op.fpDbl_mulPreA_); - } else { - mulPre = mulPreC; - } - if (op.fpDbl_sqrPreA_) { - sqrPre = fp::func_ptr_cast(op.fpDbl_sqrPreA_); - } else { - sqrPre = sqrPreC; - } - } - void operator+=(const FpDblT& x) { add(*this, *this, x); } - void operator-=(const FpDblT& x) { sub(*this, *this, x); } -}; - -#ifdef MCL_XBYAK_DIRECT_CALL -template void (*FpDblT::add)(FpDblT&, const FpDblT&, const FpDblT&); -template void (*FpDblT::sub)(FpDblT&, const FpDblT&, const FpDblT&); -template void (*FpDblT::mod)(Fp&, const FpDblT&); -template void (*FpDblT::addPre)(FpDblT&, const FpDblT&, const FpDblT&); -template void (*FpDblT::subPre)(FpDblT&, const FpDblT&, const FpDblT&); -#endif -template void (*FpDblT::mulPre)(FpDblT&, const Fp&, const Fp&); -template void (*FpDblT::sqrPre)(FpDblT&, const Fp&); - -template struct Fp12T; -template class BNT; -template struct Fp2DblT; -/* - beta = -1 - Fp2 = F[i] / (i^2 + 1) - x = a + bi -*/ -template -class Fp2T : public fp::Serializable, - fp::Operator > > { - typedef _Fp Fp; - typedef fp::Unit Unit; - typedef FpDblT FpDbl; - typedef Fp2DblT Fp2Dbl; - static const size_t gN = 5; - /* - g = xi^((p - 1) / 6) - g[] = { g^2, g^4, g^1, g^3, g^5 } - */ - static Fp2T g[gN]; - static Fp2T g2[gN]; - static Fp2T g3[gN]; -public: - static const Fp2T *get_gTbl() { return &g[0]; } - static const Fp2T *get_g2Tbl() { return &g2[0]; } - static const Fp2T *get_g3Tbl() { return &g3[0]; } - typedef typename Fp::BaseFp BaseFp; - static const size_t maxSize = Fp::maxSize * 2; - static inline size_t getByteSize() { return Fp::getByteSize() * 2; } - void dump() const - { - a.dump(); - b.dump(); - } - Fp a, b; - Fp2T() { } - Fp2T(int64_t a) : a(a), b(0) { } - Fp2T(const Fp& a, const Fp& b) : a(a), b(b) { } - Fp2T(int64_t a, int64_t b) : a(a), b(b) { } - Fp* getFp0() { return &a; } - const Fp* getFp0() const { return &a; } - const Unit* getUnit() const { return a.getUnit(); } - void clear() - { - a.clear(); - b.clear(); - } - void set(const Fp &a_, const Fp &b_) - { - a = a_; - b = b_; - } -#ifdef MCL_XBYAK_DIRECT_CALL - static void (*add)(Fp2T& z, const Fp2T& x, const Fp2T& y); - static void (*sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); - static void (*neg)(Fp2T& y, const Fp2T& x); - static void (*mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); - static void (*sqr)(Fp2T& y, const Fp2T& x); -#else - static void add(Fp2T& z, const Fp2T& x, const Fp2T& y) { addC(z, x, y); } - static void sub(Fp2T& z, const Fp2T& x, const Fp2T& y) { subC(z, x, y); } - static void neg(Fp2T& y, const Fp2T& x) { negC(y, x); } - static void mul(Fp2T& z, const Fp2T& x, const Fp2T& y) { mulC(z, x, y); } - static void sqr(Fp2T& y, const Fp2T& x) { sqrC(y, x); } -#endif - static void (*mul_xi)(Fp2T& y, const Fp2T& x); - static void addPre(Fp2T& z, const Fp2T& x, const Fp2T& y) { Fp::addPre(z.a, x.a, y.a); Fp::addPre(z.b, x.b, y.b); } - static void inv(Fp2T& y, const Fp2T& x) { Fp::op_.fp2_inv(y.a.v_, x.a.v_); } - static void divBy2(Fp2T& y, const Fp2T& x) - { - Fp::divBy2(y.a, x.a); - Fp::divBy2(y.b, x.b); - } - static void divBy4(Fp2T& y, const Fp2T& x) - { - Fp::divBy4(y.a, x.a); - Fp::divBy4(y.b, x.b); - } - static void mulFp(Fp2T& z, const Fp2T& x, const Fp& y) - { - Fp::mul(z.a, x.a, y); - Fp::mul(z.b, x.b, y); - } - template - void setArray(bool *pb, const S *buf, size_t n) - { - assert((n & 1) == 0); - n /= 2; - a.setArray(pb, buf, n); - if (!*pb) return; - b.setArray(pb, buf + n, n); - } - template - void load(bool *pb, InputStream& is, int ioMode) - { - a.load(pb, is, ioMode); - if (!*pb) return; - b.load(pb, is, ioMode); - } - /* - Fp2T = + ' ' + - */ - template - void save(bool *pb, OutputStream& os, int ioMode) const - { - const char sep = *fp::getIoSeparator(ioMode); - a.save(pb, os, ioMode); - if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - b.save(pb, os, ioMode); - } - bool isZero() const { return a.isZero() && b.isZero(); } - bool isOne() const { return a.isOne() && b.isZero(); } - bool operator==(const Fp2T& rhs) const { return a == rhs.a && b == rhs.b; } - bool operator!=(const Fp2T& rhs) const { return !operator==(rhs); } - /* - return true is a is odd (do not consider b) - this function is for only compressed reprezentation of EC - isOdd() is not good naming. QQQ - */ - bool isOdd() const { return a.isOdd(); } - /* - (a + bi)^2 = (a^2 - b^2) + 2ab i = c + di - A = a^2 - B = b^2 - A = (c +/- sqrt(c^2 + d^2))/2 - b = d / 2a - */ - static inline bool squareRoot(Fp2T& y, const Fp2T& x) - { - Fp t1, t2; - if (x.b.isZero()) { - if (Fp::squareRoot(t1, x.a)) { - y.a = t1; - y.b.clear(); - } else { - bool b = Fp::squareRoot(t1, -x.a); - assert(b); (void)b; - y.a.clear(); - y.b = t1; - } - return true; - } - Fp::sqr(t1, x.a); - Fp::sqr(t2, x.b); - t1 += t2; // c^2 + d^2 - if (!Fp::squareRoot(t1, t1)) return false; - Fp::add(t2, x.a, t1); - Fp::divBy2(t2, t2); - if (!Fp::squareRoot(t2, t2)) { - Fp::sub(t2, x.a, t1); - Fp::divBy2(t2, t2); - bool b = Fp::squareRoot(t2, t2); - assert(b); (void)b; - } - y.a = t2; - t2 += t2; - Fp::inv(t2, t2); - Fp::mul(y.b, x.b, t2); - return true; - } - static void inline norm(Fp& y, const Fp2T& x) - { - Fp aa, bb; - Fp::sqr(aa, x.a); - Fp::sqr(bb, x.b); - Fp::add(y, aa, bb); - } - /* - Frobenius - i^2 = -1 - (a + bi)^p = a + bi^p in Fp - = a + bi if p = 1 mod 4 - = a - bi if p = 3 mod 4 - */ - static void Frobenius(Fp2T& y, const Fp2T& x) - { - if (Fp::getOp().pmod4 == 1) { - if (&y != &x) { - y = x; - } - } else { - if (&y != &x) { - y.a = x.a; - } - Fp::neg(y.b, x.b); - } - } - - static uint32_t get_xi_a() { return Fp::getOp().xi_a; } - static void init() - { -// assert(Fp::maxSize <= 256); - mcl::fp::Op& op = Fp::op_; - assert(op.xi_a); - mul_xi = 0; -#ifdef MCL_XBYAK_DIRECT_CALL - add = fp::func_ptr_cast(op.fp2_addA_); - if (add == 0) add = addC; - sub = fp::func_ptr_cast(op.fp2_subA_); - if (sub == 0) sub = subC; - neg = fp::func_ptr_cast(op.fp2_negA_); - if (neg == 0) neg = negC; - mul = fp::func_ptr_cast(op.fp2_mulA_); - if (mul == 0) mul = mulC; - sqr = fp::func_ptr_cast(op.fp2_sqrA_); - if (sqr == 0) sqr = sqrC; - mul_xi = fp::func_ptr_cast(op.fp2_mul_xiA_); -#endif - op.fp2_inv = fp2_invW; - if (mul_xi == 0) { - if (op.xi_a == 1) { - mul_xi = fp2_mul_xi_1_1iC; - } else { - mul_xi = fp2_mul_xiC; - } - } - FpDblT::init(); - Fp2DblT::init(); - // call init before Fp2::pow because FpDbl is used in Fp2T - const Fp2T xi(op.xi_a, 1); - const mpz_class& p = Fp::getOp().mp; - Fp2T::pow(g[0], xi, (p - 1) / 6); // g = xi^((p-1)/6) - for (size_t i = 1; i < gN; i++) { - g[i] = g[i - 1] * g[0]; - } - /* - permutate [0, 1, 2, 3, 4] => [1, 3, 0, 2, 4] - g[0] = g^2 - g[1] = g^4 - g[2] = g^1 - g[3] = g^3 - g[4] = g^5 - */ - { - Fp2T t = g[0]; - g[0] = g[1]; - g[1] = g[3]; - g[3] = g[2]; - g[2] = t; - } - for (size_t i = 0; i < gN; i++) { - Fp2T t(g[i].a, g[i].b); - if (Fp::getOp().pmod4 == 3) Fp::neg(t.b, t.b); - Fp2T::mul(g2[i], t, g[i]); - g3[i] = g[i] * g2[i]; - } - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("Fp2T:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("Fp2T:save"); - } - template - void setArray(const S *buf, size_t n) - { - bool b; - setArray(&b, buf, n); - if (!b) throw cybozu::Exception("Fp2T:setArray"); - } -#endif -#ifndef CYBOZU_DONT_USE_STRING - Fp2T(const std::string& a, const std::string& b, int base = 0) : a(a, base), b(b, base) {} - friend std::istream& operator>>(std::istream& is, Fp2T& self) - { - self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const Fp2T& self) - { - self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); - return os; - } -#endif -private: - /* - default Fp2T operator - Fp2T = Fp[i]/(i^2 + 1) - */ - static void addC(Fp2T& z, const Fp2T& x, const Fp2T& y) - { - Fp::add(z.a, x.a, y.a); - Fp::add(z.b, x.b, y.b); - } - static void subC(Fp2T& z, const Fp2T& x, const Fp2T& y) - { - Fp::sub(z.a, x.a, y.a); - Fp::sub(z.b, x.b, y.b); - } - static void negC(Fp2T& y, const Fp2T& x) - { - Fp::neg(y.a, x.a); - Fp::neg(y.b, x.b); - } -#if 0 - /* - x = a + bi, y = c + di, i^2 = -1 - z = xy = (a + bi)(c + di) = (ac - bd) + (ad + bc)i - ad+bc = (a + b)(c + d) - ac - bd - # of mod = 3 - */ - static void fp2_mulW(Unit *z, const Unit *x, const Unit *y) - { - const Fp *px = reinterpret_cast(x); - const Fp *py = reinterpret_cast(y); - const Fp& a = px[0]; - const Fp& b = px[1]; - const Fp& c = py[0]; - const Fp& d = py[1]; - Fp *pz = reinterpret_cast(z); - Fp t1, t2, ac, bd; - Fp::add(t1, a, b); - Fp::add(t2, c, d); - t1 *= t2; // (a + b)(c + d) - Fp::mul(ac, a, c); - Fp::mul(bd, b, d); - Fp::sub(pz[0], ac, bd); // ac - bd - Fp::sub(pz[1], t1, ac); - pz[1] -= bd; - } - static void fp2_mulNFW(Fp2T& z, const Fp2T& x, const Fp2T& y) - { - const fp::Op& op = Fp::op_; - op.fp2_mulNF((Unit*)&z, (const Unit*)&x, (const Unit*)&y, op.p); - } -#endif - static void mulC(Fp2T& z, const Fp2T& x, const Fp2T& y) - { - Fp2Dbl d; - Fp2Dbl::mulPre(d, x, y); - FpDbl::mod(z.a, d.a); - FpDbl::mod(z.b, d.b); - } - /* - x = a + bi, i^2 = -1 - y = x^2 = (a + bi)^2 = (a + b)(a - b) + 2abi - */ - static void sqrC(Fp2T& y, const Fp2T& x) - { - const Fp& a = x.a; - const Fp& b = x.b; -#if 1 // faster than using FpDbl - Fp t1, t2, t3; - Fp::add(t1, b, b); // 2b - t1 *= a; // 2ab - Fp::add(t2, a, b); // a + b - Fp::sub(t3, a, b); // a - b - Fp::mul(y.a, t2, t3); // (a + b)(a - b) - y.b = t1; -#else - Fp t1, t2; - FpDbl d1, d2; - Fp::addPre(t1, b, b); // 2b - FpDbl::mulPre(d2, t1, a); // 2ab - Fp::addPre(t1, a, b); // a + b - Fp::sub(t2, a, b); // a - b - FpDbl::mulPre(d1, t1, t2); // (a + b)(a - b) - FpDbl::mod(py[0], d1); - FpDbl::mod(py[1], d2); -#endif - } - /* - xi = xi_a + i - x = a + bi - y = (a + bi)xi = (a + bi)(xi_a + i) - =(a * x_ia - b) + (a + b xi_a)i - */ - static void fp2_mul_xiC(Fp2T& y, const Fp2T& x) - { - const Fp& a = x.a; - const Fp& b = x.b; - Fp t; - Fp::mulUnit(t, a, Fp::getOp().xi_a); - t -= b; - Fp::mulUnit(y.b, b, Fp::getOp().xi_a); - y.b += a; - y.a = t; - } - /* - xi = 1 + i ; xi_a = 1 - y = (a + bi)xi = (a - b) + (a + b)i - */ - static void fp2_mul_xi_1_1iC(Fp2T& y, const Fp2T& x) - { - const Fp& a = x.a; - const Fp& b = x.b; - Fp t; - Fp::add(t, a, b); - Fp::sub(y.a, a, b); - y.b = t; - } - /* - x = a + bi - 1 / x = (a - bi) / (a^2 + b^2) - */ - static void fp2_invW(Unit *y, const Unit *x) - { - const Fp *px = reinterpret_cast(x); - Fp *py = reinterpret_cast(y); - const Fp& a = px[0]; - const Fp& b = px[1]; - Fp aa, bb; - Fp::sqr(aa, a); - Fp::sqr(bb, b); - aa += bb; - Fp::inv(aa, aa); // aa = 1 / (a^2 + b^2) - Fp::mul(py[0], a, aa); - Fp::mul(py[1], b, aa); - Fp::neg(py[1], py[1]); - } -}; - -#ifdef MCL_XBYAK_DIRECT_CALL -template void (*Fp2T::add)(Fp2T& z, const Fp2T& x, const Fp2T& y); -template void (*Fp2T::sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); -template void (*Fp2T::neg)(Fp2T& y, const Fp2T& x); -template void (*Fp2T::mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); -template void (*Fp2T::sqr)(Fp2T& y, const Fp2T& x); -#endif -template void (*Fp2T::mul_xi)(Fp2T& y, const Fp2T& x); - -template -struct Fp2DblT { - typedef FpDblT FpDbl; - typedef Fp2T Fp2; - typedef fp::Unit Unit; - FpDbl a, b; - static void add(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) - { - FpDbl::add(z.a, x.a, y.a); - FpDbl::add(z.b, x.b, y.b); - } - static void addPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) - { - FpDbl::addPre(z.a, x.a, y.a); - FpDbl::addPre(z.b, x.b, y.b); - } - static void sub(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) - { - FpDbl::sub(z.a, x.a, y.a); - FpDbl::sub(z.b, x.b, y.b); - } - static void subPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) - { - FpDbl::subPre(z.a, x.a, y.a); - FpDbl::subPre(z.b, x.b, y.b); - } - static void neg(Fp2DblT& y, const Fp2DblT& x) - { - FpDbl::neg(y.a, x.a); - FpDbl::neg(y.b, x.b); - } - static void mul_xi(Fp2DblT& y, const Fp2DblT& x) - { - const uint32_t xi_a = Fp2::get_xi_a(); - if (xi_a == 1) { - FpDbl t; - FpDbl::add(t, x.a, x.b); - FpDbl::sub(y.a, x.a, x.b); - y.b = t; - } else { - FpDbl t; - FpDbl::mulUnit(t, x.a, xi_a); - FpDbl::sub(t, t, x.b); - FpDbl::mulUnit(y.b, x.b, xi_a); - FpDbl::add(y.b, y.b, x.a); - y.a = t; - } - } - static void (*mulPre)(Fp2DblT&, const Fp2&, const Fp2&); - static void (*sqrPre)(Fp2DblT&, const Fp2&); - static void mod(Fp2& y, const Fp2DblT& x) - { - FpDbl::mod(y.a, x.a); - FpDbl::mod(y.b, x.b); - } -#ifndef CYBOZU_DONT_USE_STRING - friend std::ostream& operator<<(std::ostream& os, const Fp2DblT& x) - { - return os << x.a << ' ' << x.b; - } -#endif - void operator+=(const Fp2DblT& x) { add(*this, *this, x); } - void operator-=(const Fp2DblT& x) { sub(*this, *this, x); } - static void init() - { - const mcl::fp::Op& op = Fp::getOp(); - if (op.fp2Dbl_mulPreA_) { - mulPre = fp::func_ptr_cast(op.fp2Dbl_mulPreA_); - } else { - if (op.isFullBit) { - mulPre = fp2Dbl_mulPreW; - } else { - mulPre = fp2Dbl_mulPreW; - } - } - if (op.fp2Dbl_sqrPreA_) { - sqrPre = fp::func_ptr_cast(op.fp2Dbl_sqrPreA_); - } else { - if (op.isFullBit) { - sqrPre = fp2Dbl_sqrPreW; - } else { - sqrPre = fp2Dbl_sqrPreW; - } - } - } - /* - Fp2Dbl::mulPre by FpDblT - @note mod of NIST_P192 is fast - */ - template - static void fp2Dbl_mulPreW(Fp2DblT& z, const Fp2& x, const Fp2& y) - { - const Fp& a = x.a; - const Fp& b = x.b; - const Fp& c = y.a; - const Fp& d = y.b; - FpDbl& d0 = z.a; - FpDbl& d1 = z.b; - FpDbl d2; - Fp s, t; - if (isFullBit) { - Fp::add(s, a, b); - Fp::add(t, c, d); - } else { - Fp::addPre(s, a, b); - Fp::addPre(t, c, d); - } - FpDbl::mulPre(d1, s, t); // (a + b)(c + d) - FpDbl::mulPre(d0, a, c); - FpDbl::mulPre(d2, b, d); - if (isFullBit) { - FpDbl::sub(d1, d1, d0); // (a + b)(c + d) - ac - FpDbl::sub(d1, d1, d2); // (a + b)(c + d) - ac - bd - } else { - FpDbl::subPre(d1, d1, d0); - FpDbl::subPre(d1, d1, d2); - } - FpDbl::sub(d0, d0, d2); // ac - bd - } - template - static void fp2Dbl_sqrPreW(Fp2DblT& y, const Fp2& x) - { - Fp t1, t2; - if (isFullBit) { - Fp::add(t1, x.b, x.b); // 2b - Fp::add(t2, x.a, x.b); // a + b - } else { - Fp::addPre(t1, x.b, x.b); // 2b - Fp::addPre(t2, x.a, x.b); // a + b - } - FpDbl::mulPre(y.b, t1, x.a); // 2ab - Fp::sub(t1, x.a, x.b); // a - b - FpDbl::mulPre(y.a, t1, t2); // (a + b)(a - b) - } -}; - -template void (*Fp2DblT::mulPre)(Fp2DblT&, const Fp2T&, const Fp2T&); -template void (*Fp2DblT::sqrPre)(Fp2DblT&, const Fp2T&); - -template Fp2T Fp2T::g[Fp2T::gN]; -template Fp2T Fp2T::g2[Fp2T::gN]; -template Fp2T Fp2T::g3[Fp2T::gN]; - -template -struct Fp6DblT; -/* - Fp6T = Fp2[v] / (v^3 - xi) - x = a + b v + c v^2 -*/ -template -struct Fp6T : public fp::Serializable, - fp::Operator > > { - typedef _Fp Fp; - typedef Fp2T Fp2; - typedef Fp2DblT Fp2Dbl; - typedef Fp6DblT Fp6Dbl; - typedef Fp BaseFp; - Fp2 a, b, c; - Fp6T() { } - Fp6T(int64_t a) : a(a) , b(0) , c(0) { } - Fp6T(const Fp2& a, const Fp2& b, const Fp2& c) : a(a) , b(b) , c(c) { } - void clear() - { - a.clear(); - b.clear(); - c.clear(); - } - Fp* getFp0() { return a.getFp0(); } - const Fp* getFp0() const { return a.getFp0(); } - Fp2* getFp2() { return &a; } - const Fp2* getFp2() const { return &a; } - void set(const Fp2 &a_, const Fp2 &b_, const Fp2 &c_) - { - a = a_; - b = b_; - c = c_; - } - bool isZero() const - { - return a.isZero() && b.isZero() && c.isZero(); - } - bool isOne() const - { - return a.isOne() && b.isZero() && c.isZero(); - } - bool operator==(const Fp6T& rhs) const - { - return a == rhs.a && b == rhs.b && c == rhs.c; - } - bool operator!=(const Fp6T& rhs) const { return !operator==(rhs); } - template - void load(bool *pb, InputStream& is, int ioMode) - { - a.load(pb, is, ioMode); if (!*pb) return; - b.load(pb, is, ioMode); if (!*pb) return; - c.load(pb, is, ioMode); if (!*pb) return; - } - template - void save(bool *pb, OutputStream& os, int ioMode) const - { - const char sep = *fp::getIoSeparator(ioMode); - a.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - b.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - c.save(pb, os, ioMode); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("Fp6T:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("Fp6T:save"); - } -#endif -#ifndef CYBOZU_DONT_USE_STRING - friend std::istream& operator>>(std::istream& is, Fp6T& self) - { - self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const Fp6T& self) - { - self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); - return os; - } -#endif - static void add(Fp6T& z, const Fp6T& x, const Fp6T& y) - { - Fp2::add(z.a, x.a, y.a); - Fp2::add(z.b, x.b, y.b); - Fp2::add(z.c, x.c, y.c); - } - static void sub(Fp6T& z, const Fp6T& x, const Fp6T& y) - { - Fp2::sub(z.a, x.a, y.a); - Fp2::sub(z.b, x.b, y.b); - Fp2::sub(z.c, x.c, y.c); - } - static void neg(Fp6T& y, const Fp6T& x) - { - Fp2::neg(y.a, x.a); - Fp2::neg(y.b, x.b); - Fp2::neg(y.c, x.c); - } - /* - x = a + bv + cv^2, v^3 = xi - x^2 = (a^2 + 2bc xi) + (c^2 xi + 2ab)v + (b^2 + 2ac)v^2 - - b^2 + 2ac = (a + b + c)^2 - a^2 - 2bc - c^2 - 2ab - */ - static void sqr(Fp6T& y, const Fp6T& x) - { - Fp2 t1, t2, t3; - Fp2::mul(t1, x.a, x.b); - t1 += t1; // 2ab - Fp2::mul(t2, x.b, x.c); - t2 += t2; // 2bc - Fp2::sqr(t3, x.c); // c^2 - Fp2::add(y.c, x.a, x.c); // a + c, destroy y.c - y.c += x.b; // a + b + c - Fp2::sqr(y.b, y.c); // (a + b + c)^2, destroy y.b - y.b -= t2; // (a + b + c)^2 - 2bc - Fp2::mul_xi(t2, t2); // 2bc xi - Fp2::sqr(y.a, x.a); // a^2, destroy y.a - y.b -= y.a; // (a + b + c)^2 - 2bc - a^2 - y.a += t2; // a^2 + 2bc xi - Fp2::sub(y.c, y.b, t3); // (a + b + c)^2 - 2bc - a^2 - c^2 - Fp2::mul_xi(y.b, t3); // c^2 xi - y.b += t1; // c^2 xi + 2ab - y.c -= t1; // b^2 + 2ac - } - static inline void mul(Fp6T& z, const Fp6T& x, const Fp6T& y); - /* - x = a + bv + cv^2, v^3 = xi - y = 1/x = p/q where - p = (a^2 - bc xi) + (c^2 xi - ab)v + (b^2 - ac)v^2 - q = c^3 xi^2 + b(b^2 - 3ac)xi + a^3 - = (a^2 - bc xi)a + ((c^2 xi - ab)c + (b^2 - ac)b) xi - */ - static void inv(Fp6T& y, const Fp6T& x) - { - const Fp2& a = x.a; - const Fp2& b = x.b; - const Fp2& c = x.c; - Fp2 aa, bb, cc, ab, bc, ac; - Fp2::sqr(aa, a); - Fp2::sqr(bb, b); - Fp2::sqr(cc, c); - Fp2::mul(ab, a, b); - Fp2::mul(bc, b, c); - Fp2::mul(ac, c, a); - - Fp6T p; - Fp2::mul_xi(p.a, bc); - Fp2::sub(p.a, aa, p.a); // a^2 - bc xi - Fp2::mul_xi(p.b, cc); - p.b -= ab; // c^2 xi - ab - Fp2::sub(p.c, bb, ac); // b^2 - ac - Fp2 q, t; - Fp2::mul(q, p.b, c); - Fp2::mul(t, p.c, b); - q += t; - Fp2::mul_xi(q, q); - Fp2::mul(t, p.a, a); - q += t; - Fp2::inv(q, q); - - Fp2::mul(y.a, p.a, q); - Fp2::mul(y.b, p.b, q); - Fp2::mul(y.c, p.c, q); - } -}; - -template -struct Fp6DblT { - typedef Fp2T Fp2; - typedef Fp6T Fp6; - typedef Fp2DblT Fp2Dbl; - typedef Fp6DblT Fp6Dbl; - typedef fp::Unit Unit; - Fp2Dbl a, b, c; - static void add(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) - { - Fp2Dbl::add(z.a, x.a, y.a); - Fp2Dbl::add(z.b, x.b, y.b); - Fp2Dbl::add(z.c, x.c, y.c); - } - static void sub(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) - { - Fp2Dbl::sub(z.a, x.a, y.a); - Fp2Dbl::sub(z.b, x.b, y.b); - Fp2Dbl::sub(z.c, x.c, y.c); - } - /* - x = a + bv + cv^2, y = d + ev + fv^2, v^3 = xi - xy = (ad + (bf + ce)xi) + ((ae + bd) + cf xi)v + ((af + cd) + be)v^2 - bf + ce = (b + c)(e + f) - be - cf - ae + bd = (a + b)(e + d) - ad - be - af + cd = (a + c)(d + f) - ad - cf - */ - static void mulPre(Fp6DblT& z, const Fp6& x, const Fp6& y) - { -//clk.begin(); - const Fp2& a = x.a; - const Fp2& b = x.b; - const Fp2& c = x.c; - const Fp2& d = y.a; - const Fp2& e = y.b; - const Fp2& f = y.c; - Fp2Dbl& za = z.a; - Fp2Dbl& zb = z.b; - Fp2Dbl& zc = z.c; - Fp2Dbl BE; - Fp2Dbl::mulPre(za, a, d); - Fp2Dbl::mulPre(BE, b, e); - Fp2Dbl::mulPre(zb, c, f); - - Fp2 t1, t2, t3, t4; - Fp2::add(t1, b, c); - Fp2::add(t2, e, f); - Fp2Dbl T1; - Fp2Dbl::mulPre(T1, t1, t2); - Fp2Dbl::sub(T1, T1, BE); - Fp2Dbl::sub(T1, T1, zb); - Fp2Dbl::mul_xi(T1, T1); - - Fp2::add(t2, a, b); - Fp2::add(t3, e, d); - Fp2Dbl T2; - Fp2Dbl::mulPre(T2, t2, t3); - Fp2Dbl::sub(T2, T2, za); - Fp2Dbl::sub(T2, T2, BE); - - Fp2::add(t3, a, c); - Fp2::add(t4, d, f); - Fp2Dbl::mulPre(zc, t3, t4); - Fp2Dbl::sub(zc, zc, za); - Fp2Dbl::sub(zc, zc, zb); - - Fp2Dbl::add(za, za, T1); - Fp2Dbl::mul_xi(zb, zb); - Fp2Dbl::add(zb, zb, T2); - Fp2Dbl::add(zc, zc, BE); -//clk.end(); - } - static void mod(Fp6& y, const Fp6Dbl& x) - { - Fp2Dbl::mod(y.a, x.a); - Fp2Dbl::mod(y.b, x.b); - Fp2Dbl::mod(y.c, x.c); - } -}; - -template -inline void Fp6T::mul(Fp6T& z, const Fp6T& x, const Fp6T& y) -{ - Fp6DblT Z; - Fp6DblT::mulPre(Z, x, y); - Fp6DblT::mod(z, Z); -} - -/* - Fp12T = Fp6[w] / (w^2 - v) - x = a + b w -*/ -template -struct Fp12T : public fp::Serializable, - fp::Operator > > { - typedef Fp2T Fp2; - typedef Fp6T Fp6; - typedef Fp2DblT Fp2Dbl; - typedef Fp6DblT Fp6Dbl; - typedef Fp BaseFp; - Fp6 a, b; - Fp12T() {} - Fp12T(int64_t a) : a(a), b(0) {} - Fp12T(const Fp6& a, const Fp6& b) : a(a), b(b) {} - void clear() - { - a.clear(); - b.clear(); - } - void setOne() - { - clear(); - a.a.a = 1; - } - - Fp* getFp0() { return a.getFp0(); } - const Fp* getFp0() const { return a.getFp0(); } - Fp2* getFp2() { return a.getFp2(); } - const Fp2* getFp2() const { return a.getFp2(); } - void set(const Fp2& v0, const Fp2& v1, const Fp2& v2, const Fp2& v3, const Fp2& v4, const Fp2& v5) - { - a.set(v0, v1, v2); - b.set(v3, v4, v5); - } - - bool isZero() const - { - return a.isZero() && b.isZero(); - } - bool isOne() const - { - return a.isOne() && b.isZero(); - } - bool operator==(const Fp12T& rhs) const - { - return a == rhs.a && b == rhs.b; - } - bool operator!=(const Fp12T& rhs) const { return !operator==(rhs); } - static void add(Fp12T& z, const Fp12T& x, const Fp12T& y) - { - Fp6::add(z.a, x.a, y.a); - Fp6::add(z.b, x.b, y.b); - } - static void sub(Fp12T& z, const Fp12T& x, const Fp12T& y) - { - Fp6::sub(z.a, x.a, y.a); - Fp6::sub(z.b, x.b, y.b); - } - static void neg(Fp12T& z, const Fp12T& x) - { - Fp6::neg(z.a, x.a); - Fp6::neg(z.b, x.b); - } - /* - z = x v + y - in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 - */ - static void mulVadd(Fp6& z, const Fp6& x, const Fp6& y) - { - Fp2 t; - Fp2::mul_xi(t, x.c); - Fp2::add(z.c, x.b, y.c); - Fp2::add(z.b, x.a, y.b); - Fp2::add(z.a, t, y.a); - } - static void mulVadd(Fp6Dbl& z, const Fp6Dbl& x, const Fp6Dbl& y) - { - Fp2Dbl t; - Fp2Dbl::mul_xi(t, x.c); - Fp2Dbl::add(z.c, x.b, y.c); - Fp2Dbl::add(z.b, x.a, y.b); - Fp2Dbl::add(z.a, t, y.a); - } - /* - x = a + bw, y = c + dw, w^2 = v - z = xy = (a + bw)(c + dw) = (ac + bdv) + (ad + bc)w - ad+bc = (a + b)(c + d) - ac - bd - - in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 - */ - static void mul(Fp12T& z, const Fp12T& x, const Fp12T& y) - { - // 4.7Kclk -> 4.55Kclk - const Fp6& a = x.a; - const Fp6& b = x.b; - const Fp6& c = y.a; - const Fp6& d = y.b; - Fp6 t1, t2; - Fp6::add(t1, a, b); - Fp6::add(t2, c, d); -#if 1 - Fp6Dbl T, AC, BD; - Fp6Dbl::mulPre(AC, a, c); - Fp6Dbl::mulPre(BD, b, d); - mulVadd(T, BD, AC); - Fp6Dbl::mod(z.a, T); - Fp6Dbl::mulPre(T, t1, t2); // (a + b)(c + d) - Fp6Dbl::sub(T, T, AC); - Fp6Dbl::sub(T, T, BD); - Fp6Dbl::mod(z.b, T); -#else - Fp6 ac, bd; - t1 *= t2; // (a + b)(c + d) - Fp6::mul(ac, a, c); - Fp6::mul(bd, b, d); - mulVadd(z.a, bd, ac); - t1 -= ac; - Fp6::sub(z.b, t1, bd); -#endif - } - /* - x = a + bw, w^2 = v - y = x^2 = (a + bw)^2 = (a^2 + b^2v) + 2abw - a^2 + b^2v = (a + b)(bv + a) - (abv + ab) - */ - static void sqr(Fp12T& y, const Fp12T& x) - { - const Fp6& a = x.a; - const Fp6& b = x.b; - Fp6 t0, t1; - Fp6::add(t0, a, b); // a + b - mulVadd(t1, b, a); // bv + a - t0 *= t1; // (a + b)(bv + a) - Fp6::mul(t1, a, b); // ab - Fp6::add(y.b, t1, t1); // 2ab - mulVadd(y.a, t1, t1); // abv + ab - Fp6::sub(y.a, t0, y.a); - } - /* - x = a + bw, w^2 = v - y = 1/x = (a - bw) / (a^2 - b^2v) - */ - static void inv(Fp12T& y, const Fp12T& x) - { - const Fp6& a = x.a; - const Fp6& b = x.b; - Fp6 t0, t1; - Fp6::sqr(t0, a); - Fp6::sqr(t1, b); - Fp2::mul_xi(t1.c, t1.c); - t0.a -= t1.c; - t0.b -= t1.a; - t0.c -= t1.b; // t0 = a^2 - b^2v - Fp6::inv(t0, t0); - Fp6::mul(y.a, x.a, t0); - Fp6::mul(y.b, x.b, t0); - Fp6::neg(y.b, y.b); - } - /* - y = 1 / x = conjugate of x if |x| = 1 - */ - static void unitaryInv(Fp12T& y, const Fp12T& x) - { - if (&y != &x) y.a = x.a; - Fp6::neg(y.b, x.b); - } - /* - Frobenius - i^2 = -1 - (a + bi)^p = a + bi^p in Fp - = a + bi if p = 1 mod 4 - = a - bi if p = 3 mod 4 - - g = xi^(p - 1) / 6 - v^3 = xi in Fp2 - v^p = ((v^6) ^ (p-1)/6) v = g^2 v - v^2p = g^4 v^2 - (a + bv + cv^2)^p in Fp6 - = F(a) + F(b)g^2 v + F(c) g^4 v^2 - - w^p = ((w^6) ^ (p-1)/6) w = g w - ((a + bv + cv^2)w)^p in Fp12T - = (F(a) g + F(b) g^3 v + F(c) g^5 v^2)w - */ - static void Frobenius(Fp12T& y, const Fp12T& x) - { - for (int i = 0; i < 6; i++) { - Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); - } - for (int i = 1; i < 6; i++) { - y.getFp2()[i] *= Fp2::get_gTbl()[i - 1]; - } - } - static void Frobenius2(Fp12T& y, const Fp12T& x) - { -#if 0 - Frobenius(y, x); - Frobenius(y, y); -#else - y.getFp2()[0] = x.getFp2()[0]; - if (Fp::getOp().pmod4 == 1) { - for (int i = 1; i < 6; i++) { - Fp2::mul(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i]); - } - } else { - for (int i = 1; i < 6; i++) { - Fp2::mulFp(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i - 1].a); - } - } -#endif - } - static void Frobenius3(Fp12T& y, const Fp12T& x) - { -#if 0 - Frobenius(y, x); - Frobenius(y, y); - Frobenius(y, y); -#else - Fp2::Frobenius(y.getFp2()[0], x.getFp2()[0]); - for (int i = 1; i < 6; i++) { - Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); - y.getFp2()[i] *= Fp2::get_g3Tbl()[i - 1]; - } -#endif - } - template - void load(bool *pb, InputStream& is, int ioMode) - { - a.load(pb, is, ioMode); if (!*pb) return; - b.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode) const - { - const char sep = *fp::getIoSeparator(ioMode); - a.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - b.save(pb, os, ioMode); - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("Fp12T:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("Fp12T:save"); - } -#endif -#ifndef CYBOZU_DONT_USE_STRING - friend std::istream& operator>>(std::istream& is, Fp12T& self) - { - self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const Fp12T& self) - { - self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); - return os; - } -#endif -}; - -/* - convert multiplicative group to additive group -*/ -template -struct GroupMtoA : public T { - static T& castT(GroupMtoA& x) { return static_cast(x); } - static const T& castT(const GroupMtoA& x) { return static_cast(x); } - void clear() - { - castT(*this) = 1; - } - bool isZero() const { return castT(*this).isOne(); } - static void add(GroupMtoA& z, const GroupMtoA& x, const GroupMtoA& y) - { - T::mul(castT(z), castT(x), castT(y)); - } - static void dbl(GroupMtoA& y, const GroupMtoA& x) - { - T::sqr(castT(y), castT(x)); - } - static void neg(GroupMtoA& y, const GroupMtoA& x) - { - // assume Fp12 - T::unitaryInv(castT(y), castT(x)); - } - static void Frobenus(GroupMtoA& y, const GroupMtoA& x) - { - T::Frobenius(castT(y), castT(x)); - } - template - static void mul(GroupMtoA& z, const GroupMtoA& x, const INT& y) - { - T::pow(castT(z), castT(x), y); - } - template - static void mulGeneric(GroupMtoA& z, const GroupMtoA& x, const INT& y) - { - T::powGeneric(castT(z), castT(x), y); - } - void operator+=(const GroupMtoA& rhs) - { - add(*this, *this, rhs); - } - void normalize() {} -private: - bool isOne() const; -}; - -} // mcl - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp deleted file mode 100644 index bcbd91a1e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp +++ /dev/null @@ -1,954 +0,0 @@ -#pragma once -/** - @file - @brief util function for gmp - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#include -#include -#include -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4616) - #pragma warning(disable : 4800) - #pragma warning(disable : 4244) - #pragma warning(disable : 4127) - #pragma warning(disable : 4512) - #pragma warning(disable : 4146) -#endif -#if defined(__EMSCRIPTEN__) || defined(__wasm__) - #define MCL_USE_VINT -#endif -#ifdef MCL_USE_VINT -#include -typedef mcl::Vint mpz_class; -#else -#include -#ifdef _MSC_VER - #pragma warning(pop) - #include -#endif -#endif - -#ifndef MCL_SIZEOF_UNIT - #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) - #define MCL_SIZEOF_UNIT 4 - #else - #define MCL_SIZEOF_UNIT 8 - #endif -#endif - -namespace mcl { - -namespace fp { - -#if MCL_SIZEOF_UNIT == 8 -typedef uint64_t Unit; -#else -typedef uint32_t Unit; -#endif -#define MCL_UNIT_BIT_SIZE (MCL_SIZEOF_UNIT * 8) - -} // mcl::fp - -namespace gmp { - -typedef mpz_class ImplType; - -// z = [buf[n-1]:..:buf[1]:buf[0]] -// eg. buf[] = {0x12345678, 0xaabbccdd}; => z = 0xaabbccdd12345678; -template -void setArray(bool *pb, mpz_class& z, const T *buf, size_t n) -{ -#ifdef MCL_USE_VINT - z.setArray(pb, buf, n); -#else - mpz_import(z.get_mpz_t(), n, -1, sizeof(*buf), 0, 0, buf); - *pb = true; -#endif -} -/* - buf[0, size) = x - buf[size, maxSize) with zero -*/ -template -bool getArray_(T *buf, size_t maxSize, const U *x, int xn)//const mpz_srcptr x) -{ - const size_t bufByteSize = sizeof(T) * maxSize; - if (xn < 0) return false; - size_t xByteSize = sizeof(*x) * xn; - if (xByteSize > bufByteSize) return false; - memcpy(buf, x, xByteSize); - memset((char*)buf + xByteSize, 0, bufByteSize - xByteSize); - return true; -} -template -void getArray(bool *pb, T *buf, size_t maxSize, const mpz_class& x) -{ -#ifdef MCL_USE_VINT - *pb = getArray_(buf, maxSize, x.getUnit(), x.getUnitSize()); -#else - *pb = getArray_(buf, maxSize, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size); -#endif -} -inline void set(mpz_class& z, uint64_t x) -{ - bool b; - setArray(&b, z, &x, 1); - assert(b); - (void)b; -} -inline void setStr(bool *pb, mpz_class& z, const char *str, int base = 0) -{ -#ifdef MCL_USE_VINT - z.setStr(pb, str, base); -#else - *pb = z.set_str(str, base) == 0; -#endif -} - -/* - set buf with string terminated by '\0' - return strlen(buf) if success else 0 -*/ -inline size_t getStr(char *buf, size_t bufSize, const mpz_class& z, int base = 10) -{ -#ifdef MCL_USE_VINT - return z.getStr(buf, bufSize, base); -#else - __gmp_alloc_cstring tmp(mpz_get_str(0, base, z.get_mpz_t())); - size_t n = strlen(tmp.str); - if (n + 1 > bufSize) return 0; - memcpy(buf, tmp.str, n + 1); - return n; -#endif -} - -#ifndef CYBOZU_DONT_USE_STRING -inline void getStr(std::string& str, const mpz_class& z, int base = 10) -{ -#ifdef MCL_USE_VINT - z.getStr(str, base); -#else - str = z.get_str(base); -#endif -} -inline std::string getStr(const mpz_class& z, int base = 10) -{ - std::string s; - gmp::getStr(s, z, base); - return s; -} -#endif - -inline void add(mpz_class& z, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::add(z, x, y); -#else - mpz_add(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -#ifndef MCL_USE_VINT -inline void add(mpz_class& z, const mpz_class& x, unsigned int y) -{ - mpz_add_ui(z.get_mpz_t(), x.get_mpz_t(), y); -} -inline void sub(mpz_class& z, const mpz_class& x, unsigned int y) -{ - mpz_sub_ui(z.get_mpz_t(), x.get_mpz_t(), y); -} -inline void mul(mpz_class& z, const mpz_class& x, unsigned int y) -{ - mpz_mul_ui(z.get_mpz_t(), x.get_mpz_t(), y); -} -inline void div(mpz_class& q, const mpz_class& x, unsigned int y) -{ - mpz_div_ui(q.get_mpz_t(), x.get_mpz_t(), y); -} -inline void mod(mpz_class& r, const mpz_class& x, unsigned int m) -{ - mpz_mod_ui(r.get_mpz_t(), x.get_mpz_t(), m); -} -inline int compare(const mpz_class& x, int y) -{ - return mpz_cmp_si(x.get_mpz_t(), y); -} -#endif -inline void sub(mpz_class& z, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::sub(z, x, y); -#else - mpz_sub(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::mul(z, x, y); -#else - mpz_mul(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline void sqr(mpz_class& z, const mpz_class& x) -{ -#ifdef MCL_USE_VINT - Vint::mul(z, x, x); -#else - mpz_mul(z.get_mpz_t(), x.get_mpz_t(), x.get_mpz_t()); -#endif -} -inline void divmod(mpz_class& q, mpz_class& r, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::divMod(&q, r, x, y); -#else - mpz_divmod(q.get_mpz_t(), r.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline void div(mpz_class& q, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::div(q, x, y); -#else - mpz_div(q.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline void mod(mpz_class& r, const mpz_class& x, const mpz_class& m) -{ -#ifdef MCL_USE_VINT - Vint::mod(r, x, m); -#else - mpz_mod(r.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); -#endif -} -inline void clear(mpz_class& z) -{ -#ifdef MCL_USE_VINT - z.clear(); -#else - mpz_set_ui(z.get_mpz_t(), 0); -#endif -} -inline bool isZero(const mpz_class& z) -{ -#ifdef MCL_USE_VINT - return z.isZero(); -#else - return mpz_sgn(z.get_mpz_t()) == 0; -#endif -} -inline bool isNegative(const mpz_class& z) -{ -#ifdef MCL_USE_VINT - return z.isNegative(); -#else - return mpz_sgn(z.get_mpz_t()) < 0; -#endif -} -inline void neg(mpz_class& z, const mpz_class& x) -{ -#ifdef MCL_USE_VINT - Vint::neg(z, x); -#else - mpz_neg(z.get_mpz_t(), x.get_mpz_t()); -#endif -} -inline int compare(const mpz_class& x, const mpz_class & y) -{ -#ifdef MCL_USE_VINT - return Vint::compare(x, y); -#else - return mpz_cmp(x.get_mpz_t(), y.get_mpz_t()); -#endif -} -template -void addMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) -{ - add(z, x, y); - if (compare(z, m) >= 0) { - sub(z, z, m); - } -} -template -void subMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) -{ - sub(z, x, y); - if (!isNegative(z)) return; - add(z, z, m); -} -template -void mulMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) -{ - mul(z, x, y); - mod(z, z, m); -} -inline void sqrMod(mpz_class& z, const mpz_class& x, const mpz_class& m) -{ - sqr(z, x); - mod(z, z, m); -} -// z = x^y (y >= 0) -inline void pow(mpz_class& z, const mpz_class& x, unsigned int y) -{ -#ifdef MCL_USE_VINT - Vint::pow(z, x, y); -#else - mpz_pow_ui(z.get_mpz_t(), x.get_mpz_t(), y); -#endif -} -// z = x^y mod m (y >=0) -inline void powMod(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& m) -{ -#ifdef MCL_USE_VINT - Vint::powMod(z, x, y, m); -#else - mpz_powm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t(), m.get_mpz_t()); -#endif -} -// z = 1/x mod m -inline void invMod(mpz_class& z, const mpz_class& x, const mpz_class& m) -{ -#ifdef MCL_USE_VINT - Vint::invMod(z, x, m); -#else - mpz_invert(z.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); -#endif -} -// z = lcm(x, y) -inline void lcm(mpz_class& z, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::lcm(z, x, y); -#else - mpz_lcm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline mpz_class lcm(const mpz_class& x, const mpz_class& y) -{ - mpz_class z; - lcm(z, x, y); - return z; -} -// z = gcd(x, y) -inline void gcd(mpz_class& z, const mpz_class& x, const mpz_class& y) -{ -#ifdef MCL_USE_VINT - Vint::gcd(z, x, y); -#else - mpz_gcd(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); -#endif -} -inline mpz_class gcd(const mpz_class& x, const mpz_class& y) -{ - mpz_class z; - gcd(z, x, y); - return z; -} -/* - assume p : odd prime - return 1 if x^2 = a mod p for some x - return -1 if x^2 != a mod p for any x -*/ -inline int legendre(const mpz_class& a, const mpz_class& p) -{ -#ifdef MCL_USE_VINT - return Vint::jacobi(a, p); -#else - return mpz_legendre(a.get_mpz_t(), p.get_mpz_t()); -#endif -} -inline bool isPrime(bool *pb, const mpz_class& x) -{ -#ifdef MCL_USE_VINT - return x.isPrime(pb, 32); -#else - *pb = true; - return mpz_probab_prime_p(x.get_mpz_t(), 32) != 0; -#endif -} -inline size_t getBitSize(const mpz_class& x) -{ -#ifdef MCL_USE_VINT - return x.getBitSize(); -#else - return mpz_sizeinbase(x.get_mpz_t(), 2); -#endif -} -inline bool testBit(const mpz_class& x, size_t pos) -{ -#ifdef MCL_USE_VINT - return x.testBit(pos); -#else - return mpz_tstbit(x.get_mpz_t(), pos) != 0; -#endif -} -inline void resetBit(mpz_class& x, size_t pos) -{ -#ifdef MCL_USE_VINT - x.setBit(pos, false); -#else - mpz_clrbit(x.get_mpz_t(), pos); -#endif -} -inline void setBit(mpz_class& x, size_t pos, bool v = true) -{ -#ifdef MCL_USE_VINT - x.setBit(pos, v); -#else - if (v) { - mpz_setbit(x.get_mpz_t(), pos); - } else { - resetBit(x, pos); - } -#endif -} -inline const fp::Unit *getUnit(const mpz_class& x) -{ -#ifdef MCL_USE_VINT - return x.getUnit(); -#else - return reinterpret_cast(x.get_mpz_t()->_mp_d); -#endif -} -inline fp::Unit getUnit(const mpz_class& x, size_t i) -{ - return getUnit(x)[i]; -} -inline size_t getUnitSize(const mpz_class& x) -{ -#ifdef MCL_USE_VINT - return x.getUnitSize(); -#else - return std::abs(x.get_mpz_t()->_mp_size); -#endif -} -inline mpz_class abs(const mpz_class& x) -{ -#ifdef MCL_USE_VINT - return Vint::abs(x); -#else - return ::abs(x); -#endif -} - -inline void getRand(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) -{ - if (rg.isZero()) rg = fp::RandGen::get(); - assert(bitSize > 1); - const size_t rem = bitSize & 31; - const size_t n = (bitSize + 31) / 32; - uint32_t buf[128]; - assert(n <= CYBOZU_NUM_OF_ARRAY(buf)); - if (n > CYBOZU_NUM_OF_ARRAY(buf)) { - *pb = false; - return; - } - rg.read(pb, buf, n * sizeof(buf[0])); - if (!*pb) return; - uint32_t v = buf[n - 1]; - if (rem == 0) { - v |= 1U << 31; - } else { - v &= (1U << rem) - 1; - v |= 1U << (rem - 1); - } - buf[n - 1] = v; - setArray(pb, z, buf, n); -} - -inline void getRandPrime(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) -{ - if (rg.isZero()) rg = fp::RandGen::get(); - assert(bitSize > 2); - for (;;) { - getRand(pb, z, bitSize, rg); - if (!*pb) return; - if (setSecondBit) { - z |= mpz_class(1) << (bitSize - 2); - } - if (mustBe3mod4) { - z |= 3; - } - bool ret = isPrime(pb, z); - if (!*pb) return; - if (ret) return; - } -} -inline mpz_class getQuadraticNonResidue(const mpz_class& p) -{ - mpz_class g = 2; - while (legendre(g, p) > 0) { - ++g; - } - return g; -} - -namespace impl { - -template -void convertToBinary(Vec& v, const mpz_class& x) -{ - const size_t len = gmp::getBitSize(x); - v.resize(len); - for (size_t i = 0; i < len; i++) { - v[i] = gmp::testBit(x, len - 1 - i) ? 1 : 0; - } -} - -template -size_t getContinuousVal(const Vec& v, size_t pos, int val) -{ - while (pos >= 2) { - if (v[pos] != val) break; - pos--; - } - return pos; -} - -template -void convertToNAF(Vec& v, const Vec& in) -{ - v.copy(in); - size_t pos = v.size() - 1; - for (;;) { - size_t p = getContinuousVal(v, pos, 0); - if (p == 1) return; - assert(v[p] == 1); - size_t q = getContinuousVal(v, p, 1); - if (q == 1) return; - assert(v[q] == 0); - if (p - q <= 1) { - pos = p - 1; - continue; - } - v[q] = 1; - for (size_t i = q + 1; i < p; i++) { - v[i] = 0; - } - v[p] = -1; - pos = q; - } -} - -template -size_t getNumOfNonZeroElement(const Vec& v) -{ - size_t w = 0; - for (size_t i = 0; i < v.size(); i++) { - if (v[i]) w++; - } - return w; -} - -} // impl - -/* - compute a repl of x which has smaller Hamming weights. - return true if naf is selected -*/ -template -bool getNAF(Vec& v, const mpz_class& x) -{ - Vec bin; - impl::convertToBinary(bin, x); - Vec naf; - impl::convertToNAF(naf, bin); - const size_t binW = impl::getNumOfNonZeroElement(bin); - const size_t nafW = impl::getNumOfNonZeroElement(naf); - if (nafW < binW) { - v.swap(naf); - return true; - } else { - v.swap(bin); - return false; - } -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -inline void setStr(mpz_class& z, const std::string& str, int base = 0) -{ - bool b; - setStr(&b, z, str.c_str(), base); - if (!b) throw cybozu::Exception("gmp:setStr"); -} -template -void setArray(mpz_class& z, const T *buf, size_t n) -{ - bool b; - setArray(&b, z, buf, n); - if (!b) throw cybozu::Exception("gmp:setArray"); -} -template -void getArray(T *buf, size_t maxSize, const mpz_class& x) -{ - bool b; - getArray(&b, buf, maxSize, x); - if (!b) throw cybozu::Exception("gmp:getArray"); -} -inline bool isPrime(const mpz_class& x) -{ - bool b; - bool ret = isPrime(&b, x); - if (!b) throw cybozu::Exception("gmp:isPrime"); - return ret; -} -inline void getRand(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) -{ - bool b; - getRand(&b, z, bitSize, rg); - if (!b) throw cybozu::Exception("gmp:getRand"); -} -inline void getRandPrime(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) -{ - bool b; - getRandPrime(&b, z, bitSize, rg, setSecondBit, mustBe3mod4); - if (!b) throw cybozu::Exception("gmp:getRandPrime"); -} -#endif - - -} // mcl::gmp - -/* - Tonelli-Shanks -*/ -class SquareRoot { - bool isPrecomputed_; - bool isPrime; - mpz_class p; - mpz_class g; - int r; - mpz_class q; // p - 1 = 2^r q - mpz_class s; // s = g^q - mpz_class q_add_1_div_2; - struct Tbl { - const char *p; - const char *g; - int r; - const char *q; - const char *s; - const char *q_add_1_div_2; - }; - bool setIfPrecomputed(const mpz_class& p_) - { - static const Tbl tbl[] = { - { // BN254.p - "2523648240000001ba344d80000000086121000000000013a700000000000013", - "2", - 1, - "1291b24120000000dd1a26c0000000043090800000000009d380000000000009", - "2523648240000001ba344d80000000086121000000000013a700000000000012", - "948d920900000006e8d1360000000021848400000000004e9c0000000000005", - }, - { // BN254.r - "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", - "2", - 2, - "948d920900000006e8d136000000001ffe7e000000000042840000000000003", - "9366c4800000000555150000000000122400000000000015", - "4a46c9048000000374689b000000000fff3f000000000021420000000000002", - }, - { // BLS12_381,p - "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", - "2", - 1, - "d0088f51cbff34d258dd3db21a5d66bb23ba5c279c2895fb39869507b587b120f55ffff58a9ffffdcff7fffffffd555", - "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa", - "680447a8e5ff9a692c6e9ed90d2eb35d91dd2e13ce144afd9cc34a83dac3d8907aaffffac54ffffee7fbfffffffeaab", - }, - { // BLS12_381.r - "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - "5", - 32, - "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff", - "212d79e5b416b6f0fd56dc8d168d6c0c4024ff270b3e0941b788f500b912f1f", - "39f6d3a994cebea4199cec0404d0ec02a9ded2017fff2dff80000000", - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - mpz_class targetPrime; - bool b; - mcl::gmp::setStr(&b, targetPrime, tbl[i].p, 16); - if (!b) continue; - if (targetPrime != p_) continue; - isPrime = true; - p = p_; - mcl::gmp::setStr(&b, g, tbl[i].g, 16); - if (!b) continue; - r = tbl[i].r; - mcl::gmp::setStr(&b, q, tbl[i].q, 16); - if (!b) continue; - mcl::gmp::setStr(&b, s, tbl[i].s, 16); - if (!b) continue; - mcl::gmp::setStr(&b, q_add_1_div_2, tbl[i].q_add_1_div_2, 16); - if (!b) continue; - isPrecomputed_ = true; - return true; - } - return false; - } -public: - SquareRoot() { clear(); } - bool isPrecomputed() const { return isPrecomputed_; } - void clear() - { - isPrecomputed_ = false; - isPrime = false; - p = 0; - g = 0; - r = 0; - q = 0; - s = 0; - q_add_1_div_2 = 0; - } -#if !defined(CYBOZU_DONT_USE_USE_STRING) && !defined(CYBOZU_DONT_USE_EXCEPTION) - void dump() const - { - printf("\"%s\",\n", mcl::gmp::getStr(p, 16).c_str()); - printf("\"%s\",\n", mcl::gmp::getStr(g, 16).c_str()); - printf("%d,\n", r); - printf("\"%s\",\n", mcl::gmp::getStr(q, 16).c_str()); - printf("\"%s\",\n", mcl::gmp::getStr(s, 16).c_str()); - printf("\"%s\",\n", mcl::gmp::getStr(q_add_1_div_2, 16).c_str()); - } -#endif - void set(bool *pb, const mpz_class& _p, bool usePrecomputedTable = true) - { - if (usePrecomputedTable && setIfPrecomputed(_p)) { - *pb = true; - return; - } - p = _p; - if (p <= 2) { - *pb = false; - return; - } - isPrime = gmp::isPrime(pb, p); - if (!*pb) return; - if (!isPrime) { - *pb = false; - return; - } - g = gmp::getQuadraticNonResidue(p); - // p - 1 = 2^r q, q is odd - r = 0; - q = p - 1; - while ((q & 1) == 0) { - r++; - q /= 2; - } - gmp::powMod(s, g, q, p); - q_add_1_div_2 = (q + 1) / 2; - *pb = true; - } - /* - solve x^2 = a mod p - */ - bool get(mpz_class& x, const mpz_class& a) const - { - if (!isPrime) { - return false; - } - if (a == 0) { - x = 0; - return true; - } - if (gmp::legendre(a, p) < 0) return false; - if (r == 1) { - // (p + 1) / 4 = (q + 1) / 2 - gmp::powMod(x, a, q_add_1_div_2, p); - return true; - } - mpz_class c = s, d; - int e = r; - gmp::powMod(d, a, q, p); - gmp::powMod(x, a, q_add_1_div_2, p); // destroy a if &x == &a - mpz_class dd; - mpz_class b; - while (d != 1) { - int i = 1; - dd = d * d; dd %= p; - while (dd != 1) { - dd *= dd; dd %= p; - i++; - } - b = 1; - b <<= e - i - 1; - gmp::powMod(b, c, b, p); - x *= b; x %= p; - c = b * b; c %= p; - d *= c; d %= p; - e = i; - } - return true; - } - /* - solve x^2 = a in Fp - */ - template - bool get(Fp& x, const Fp& a) const - { - assert(Fp::getOp().mp == p); - if (a == 0) { - x = 0; - return true; - } - { - bool b; - mpz_class aa; - a.getMpz(&b, aa); - assert(b); - if (gmp::legendre(aa, p) < 0) return false; - } - if (r == 1) { - // (p + 1) / 4 = (q + 1) / 2 - Fp::pow(x, a, q_add_1_div_2); - return true; - } - Fp c, d; - { - bool b; - c.setMpz(&b, s); - assert(b); - } - int e = r; - Fp::pow(d, a, q); - Fp::pow(x, a, q_add_1_div_2); // destroy a if &x == &a - Fp dd; - Fp b; - while (!d.isOne()) { - int i = 1; - Fp::sqr(dd, d); - while (!dd.isOne()) { - dd *= dd; - i++; - } - b = 1; -// b <<= e - i - 1; - for (int j = 0; j < e - i - 1; j++) { - b += b; - } - Fp::pow(b, c, b); - x *= b; - Fp::sqr(c, b); - d *= c; - e = i; - } - return true; - } - bool operator==(const SquareRoot& rhs) const - { - return isPrime == rhs.isPrime && p == rhs.p && g == rhs.g && r == rhs.r - && q == rhs.q && s == rhs.s && q_add_1_div_2 == rhs.q_add_1_div_2; - } - bool operator!=(const SquareRoot& rhs) const { return !operator==(rhs); } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void set(const mpz_class& _p) - { - bool b; - set(&b, _p); - if (!b) throw cybozu::Exception("gmp:SquareRoot:set"); - } -#endif -}; - -/* - Barrett Reduction - for non GMP version - mod of GMP is faster than Modp -*/ -struct Modp { - static const size_t unitBitSize = sizeof(mcl::fp::Unit) * 8; - mpz_class p_; - mpz_class u_; - mpz_class a_; - size_t pBitSize_; - size_t N_; - bool initU_; // Is u_ initialized? - Modp() - : pBitSize_(0) - , N_(0) - , initU_(false) - { - } - // x &= 1 << (unitBitSize * unitSize) - void shrinkSize(mpz_class &x, size_t unitSize) const - { - size_t u = gmp::getUnitSize(x); - if (u < unitSize) return; - bool b; - gmp::setArray(&b, x, gmp::getUnit(x), unitSize); - (void)b; - assert(b); - } - // p_ is set by p and compute (u_, a_) if possible - void init(const mpz_class& p) - { - p_ = p; - pBitSize_ = gmp::getBitSize(p); - N_ = (pBitSize_ + unitBitSize - 1) / unitBitSize; - initU_ = false; -#if 0 - u_ = (mpz_class(1) << (unitBitSize * 2 * N_)) / p_; -#else - /* - 1 << (unitBitSize * 2 * N_) may be overflow, - so use (1 << (unitBitSize * 2 * N_)) - 1 because u_ is same. - */ - uint8_t buf[48 * 2]; - const size_t byteSize = unitBitSize / 8 * 2 * N_; - if (byteSize > sizeof(buf)) return; - memset(buf, 0xff, byteSize); - bool b; - gmp::setArray(&b, u_, buf, byteSize); - if (!b) return; -#endif - u_ /= p_; - a_ = mpz_class(1) << (unitBitSize * (N_ + 1)); - initU_ = true; - } - void modp(mpz_class& r, const mpz_class& t) const - { - assert(p_ > 0); - const size_t tBitSize = gmp::getBitSize(t); - // use gmp::mod if init() fails or t is too large - if (tBitSize > unitBitSize * 2 * N_ || !initU_) { - gmp::mod(r, t, p_); - return; - } - if (tBitSize < pBitSize_) { - r = t; - return; - } - // mod is faster than modp if t is small - if (tBitSize <= unitBitSize * N_) { - gmp::mod(r, t, p_); - return; - } - mpz_class q; - q = t; - q >>= unitBitSize * (N_ - 1); - q *= u_; - q >>= unitBitSize * (N_ + 1); - q *= p_; - shrinkSize(q, N_ + 1); - r = t; - shrinkSize(r, N_ + 1); - r -= q; - if (r < 0) { - r += a_; - } - if (r >= p_) { - r -= p_; - } - } -}; - -} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/impl/bn_c_impl.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/impl/bn_c_impl.hpp deleted file mode 100644 index bec2466dd..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/impl/bn_c_impl.hpp +++ /dev/null @@ -1,643 +0,0 @@ -/* - This is an internal header - Do not include this -*/ -#define MCLBN_DLL_EXPORT -#include - -#if MCLBN_FP_UNIT_SIZE == 4 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 6 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 && MCLBN_FR_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 8 && MCLBN_FR_UNIT_SIZE == 8 -#include -#else - #error "not supported size" -#endif -#include -#include -using namespace mcl::bn; - -static Fr *cast(mclBnFr *p) { return reinterpret_cast(p); } -static const Fr *cast(const mclBnFr *p) { return reinterpret_cast(p); } - -static G1 *cast(mclBnG1 *p) { return reinterpret_cast(p); } -static const G1 *cast(const mclBnG1 *p) { return reinterpret_cast(p); } - -static G2 *cast(mclBnG2 *p) { return reinterpret_cast(p); } -static const G2 *cast(const mclBnG2 *p) { return reinterpret_cast(p); } - -static Fp12 *cast(mclBnGT *p) { return reinterpret_cast(p); } -static const Fp12 *cast(const mclBnGT *p) { return reinterpret_cast(p); } - -static Fp6 *cast(uint64_t *p) { return reinterpret_cast(p); } -static const Fp6 *cast(const uint64_t *p) { return reinterpret_cast(p); } - -static Fp2 *cast(mclBnFp2 *p) { return reinterpret_cast(p); } -static const Fp2 *cast(const mclBnFp2 *p) { return reinterpret_cast(p); } - -static Fp *cast(mclBnFp *p) { return reinterpret_cast(p); } -static const Fp *cast(const mclBnFp *p) { return reinterpret_cast(p); } - -template -int setStr(T *x, const char *buf, mclSize bufSize, int ioMode) -{ - size_t n = cast(x)->deserialize(buf, bufSize, ioMode); - return n > 0 ? 0 : -1; -} - -#ifdef __EMSCRIPTEN__ -// use these functions forcibly -extern "C" MCLBN_DLL_API void *mclBnMalloc(size_t n) -{ - return malloc(n); -} -extern "C" MCLBN_DLL_API void mclBnFree(void *p) -{ - free(p); -} -#endif - -int mclBn_getVersion() -{ - return mcl::version; -} - -int mclBn_init(int curve, int compiledTimeVar) -{ - if (compiledTimeVar != MCLBN_COMPILED_TIME_VAR) { - return -(compiledTimeVar | (MCLBN_COMPILED_TIME_VAR * 100)); - } - if (MCL_EC_BEGIN <= curve && curve < MCL_EC_END) { - const mcl::EcParam *para = mcl::getEcParam(curve); - if (para == 0) return -2; - bool b; - initG1only(&b, *para); - return b ? 0 : -1; - } - const mcl::CurveParam& cp = mcl::getCurveParam(curve); - bool b; - initPairing(&b, cp); - return b ? 0 : -1; -} - -int mclBn_getOpUnitSize() -{ - return (int)Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); -} - -int mclBn_getG1ByteSize() -{ - return mclBn_getFpByteSize(); -} - -int mclBn_getFrByteSize() -{ - return (int)Fr::getByteSize(); -} - -int mclBn_getFpByteSize() -{ - return (int)Fp::getByteSize(); -} - -mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize) -{ - return Fr::getModulo(buf, maxBufSize); -} - -mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize) -{ - return Fp::getModulo(buf, maxBufSize); -} - -//////////////////////////////////////////////// -// set zero -void mclBnFr_clear(mclBnFr *x) -{ - cast(x)->clear(); -} - -// set x to y -void mclBnFr_setInt(mclBnFr *y, mclInt x) -{ - *cast(y) = x; -} -void mclBnFr_setInt32(mclBnFr *y, int x) -{ - *cast(y) = x; -} - -int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize) -{ - cast(x)->setArrayMask((const char *)buf, bufSize); - return 0; -} -int mclBnFr_setLittleEndianMod(mclBnFr *x, const void *buf, mclSize bufSize) -{ - bool b; - cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); - return b ? 0 : -1; -} -mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} -// return 1 if true -int mclBnFr_isValid(const mclBnFr *x) -{ - return cast(x)->isValid(); -} -int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y) -{ - return *cast(x) == *cast(y); -} -int mclBnFr_isZero(const mclBnFr *x) -{ - return cast(x)->isZero(); -} -int mclBnFr_isOne(const mclBnFr *x) -{ - return cast(x)->isOne(); -} - -#ifndef MCL_DONT_USE_CSRPNG -int mclBnFr_setByCSPRNG(mclBnFr *x) -{ - bool b; - cast(x)->setByCSPRNG(&b); - return b ? 0 : -1; -} -void mclBn_setRandFunc(void *self, unsigned int (*readFunc)(void *self, void *buf, unsigned int bufSize)) -{ - mcl::fp::RandGen::setRandFunc(self, readFunc); -} -#endif - -// hash(buf) and set x -int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize) -{ - cast(x)->setHashOf(buf, bufSize); - return 0; -} - -mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} -mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnFr_neg(mclBnFr *y, const mclBnFr *x) -{ - Fr::neg(*cast(y), *cast(x)); -} -void mclBnFr_inv(mclBnFr *y, const mclBnFr *x) -{ - Fr::inv(*cast(y), *cast(x)); -} -void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x) -{ - Fr::sqr(*cast(y), *cast(x)); -} -void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y) -{ - Fr::div(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnG1_clear(mclBnG1 *x) -{ - cast(x)->clear(); -} - -int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnG1_isValid(const mclBnG1 *x) -{ - return cast(x)->isValid(); -} -int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y) -{ - return *cast(x) == *cast(y); -} -int mclBnG1_isZero(const mclBnG1 *x) -{ - return cast(x)->isZero(); -} -int mclBnG1_isValidOrder(const mclBnG1 *x) -{ - return cast(x)->isValidOrder(); -} - -int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize) -{ - hashAndMapToG1(*cast(x), buf, bufSize); - return 0; -} - -mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x) -{ - G1::neg(*cast(y), *cast(x)); -} -void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x) -{ - G1::dbl(*cast(y), *cast(x)); -} -void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x) -{ - G1::normalize(*cast(y), *cast(x)); -} -void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) -{ - G1::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y) -{ - G1::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) -{ - G1::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y) -{ - G1::mulCT(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnG2_clear(mclBnG2 *x) -{ - cast(x)->clear(); -} - -int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnG2_isValid(const mclBnG2 *x) -{ - return cast(x)->isValid(); -} -int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y) -{ - return *cast(x) == *cast(y); -} -int mclBnG2_isZero(const mclBnG2 *x) -{ - return cast(x)->isZero(); -} -int mclBnG2_isValidOrder(const mclBnG2 *x) -{ - return cast(x)->isValidOrder(); -} - -int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize) -{ - hashAndMapToG2(*cast(x), buf, bufSize); - return 0; -} - -mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x) -{ - G2::neg(*cast(y), *cast(x)); -} -void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x) -{ - G2::dbl(*cast(y), *cast(x)); -} -void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x) -{ - G2::normalize(*cast(y), *cast(x)); -} -void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) -{ - G2::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y) -{ - G2::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) -{ - G2::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y) -{ - G2::mulCT(*cast(z),*cast(x), *cast(y)); -} - -//////////////////////////////////////////////// -// set zero -void mclBnGT_clear(mclBnGT *x) -{ - cast(x)->clear(); -} -void mclBnGT_setInt(mclBnGT *y, mclInt x) -{ - cast(y)->clear(); - *(cast(y)->getFp0()) = x; -} -void mclBnGT_setInt32(mclBnGT *y, int x) -{ - cast(y)->clear(); - *(cast(y)->getFp0()) = x; -} - -int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -// return 1 if true -int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y) -{ - return *cast(x) == *cast(y); -} -int mclBnGT_isZero(const mclBnGT *x) -{ - return cast(x)->isZero(); -} -int mclBnGT_isOne(const mclBnGT *x) -{ - return cast(x)->isOne(); -} - -mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} - -mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnGT_neg(mclBnGT *y, const mclBnGT *x) -{ - Fp12::neg(*cast(y), *cast(x)); -} -void mclBnGT_inv(mclBnGT *y, const mclBnGT *x) -{ - Fp12::inv(*cast(y), *cast(x)); -} -void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x) -{ - Fp12::sqr(*cast(y), *cast(x)); -} -void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::add(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::sub(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::mul(*cast(z),*cast(x), *cast(y)); -} -void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y) -{ - Fp12::div(*cast(z),*cast(x), *cast(y)); -} - -void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) -{ - Fp12::pow(*cast(z), *cast(x), *cast(y)); -} -void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y) -{ - Fp12::powGeneric(*cast(z), *cast(x), *cast(y)); -} - -void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) -{ - pairing(*cast(z), *cast(x), *cast(y)); -} -void mclBn_finalExp(mclBnGT *y, const mclBnGT *x) -{ - finalExp(*cast(y), *cast(x)); -} -void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y) -{ - millerLoop(*cast(z), *cast(x), *cast(y)); -} -int mclBn_getUint64NumToPrecompute(void) -{ - return int(BN::param.precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t)); -} - -void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q) -{ - precomputeG2(cast(Qbuf), *cast(Q)); -} - -void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf) -{ - precomputedMillerLoop(*cast(f), *cast(P), cast(Qbuf)); -} - -void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf) -{ - precomputedMillerLoop2(*cast(f), *cast(P1), cast(Q1buf), *cast(P2), cast(Q2buf)); -} - -void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf) -{ - precomputedMillerLoop2mixed(*cast(f), *cast(P1), *cast(Q1), *cast(P2), cast(Q2buf)); -} - -int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k) -{ - bool b; - mcl::LagrangeInterpolation(&b, *cast(out), cast(xVec), cast(yVec), k); - return b ? 0 : -1; -} -int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} -int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} -int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x) -{ - bool b; - mcl::evaluatePolynomial(&b, *cast(out), cast(cVec), cSize, *cast(x)); - return b ? 0 : -1; -} - -void mclBn_verifyOrderG1(int doVerify) -{ - verifyOrderG1(doVerify != 0); -} - -void mclBn_verifyOrderG2(int doVerify) -{ - verifyOrderG2(doVerify != 0); -} - -mclSize mclBnFp_getStr(char *buf, mclSize maxBufSize, const mclBnFp *x, int ioMode) -{ - return cast(x)->getStr(buf, maxBufSize, ioMode); -} -int mclBnFp_setStr(mclBnFp *x, const char *buf, mclSize bufSize, int ioMode) -{ - return setStr(x, buf, bufSize, ioMode); -} -mclSize mclBnFp_deserialize(mclBnFp *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -mclSize mclBnFp_serialize(void *buf, mclSize maxBufSize, const mclBnFp *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnFp_clear(mclBnFp *x) -{ - cast(x)->clear(); -} - -int mclBnFp_setLittleEndian(mclBnFp *x, const void *buf, mclSize bufSize) -{ - cast(x)->setArrayMask((const char *)buf, bufSize); - return 0; -} - -int mclBnFp_setLittleEndianMod(mclBnFp *x, const void *buf, mclSize bufSize) -{ - bool b; - cast(x)->setArray(&b, (const char *)buf, bufSize, mcl::fp::Mod); - return b ? 0 : -1; -} -int mclBnFp_isEqual(const mclBnFp *x, const mclBnFp *y) -{ - return *cast(x) == *cast(y); -} - -int mclBnFp_setHashOf(mclBnFp *x, const void *buf, mclSize bufSize) -{ - cast(x)->setHashOf(buf, bufSize); - return 0; -} - -int mclBnFp_mapToG1(mclBnG1 *y, const mclBnFp *x) -{ - bool b; - mapToG1(&b, *cast(y), *cast(x)); - return b ? 0 : -1; -} - -mclSize mclBnFp2_deserialize(mclBnFp2 *x, const void *buf, mclSize bufSize) -{ - return (mclSize)cast(x)->deserialize(buf, bufSize); -} - -mclSize mclBnFp2_serialize(void *buf, mclSize maxBufSize, const mclBnFp2 *x) -{ - return (mclSize)cast(x)->serialize(buf, maxBufSize); -} - -void mclBnFp2_clear(mclBnFp2 *x) -{ - cast(x)->clear(); -} - -int mclBnFp2_isEqual(const mclBnFp2 *x, const mclBnFp2 *y) -{ - return *cast(x) == *cast(y); -} - -int mclBnFp2_mapToG2(mclBnG2 *y, const mclBnFp2 *x) -{ - bool b; - mapToG2(&b, *cast(y), *cast(x)); - return b ? 0 : -1; -} - -int mclBnG1_getBasePoint(mclBnG1 *x) -{ - *cast(x) = mcl::bn::getG1basePoint(); - return 0; -} - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp deleted file mode 100644 index 18e0597ec..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp +++ /dev/null @@ -1,107 +0,0 @@ -#pragma once -/** - @file - @brief Lagrange Interpolation - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -namespace mcl { - -/* - recover out = f(0) by { (x, y) | x = S[i], y = f(x) = vec[i] } - @retval 0 if succeed else -1 -*/ -template -void LagrangeInterpolation(bool *pb, G& out, const F *S, const G *vec, size_t k) -{ - if (k == 0) { - *pb = false; - return; - } - if (k == 1) { - out = vec[0]; - *pb = true; - return; - } - /* - delta_{i,S}(0) = prod_{j != i} S[j] / (S[j] - S[i]) = a / b - where a = prod S[j], b = S[i] * prod_{j != i} (S[j] - S[i]) - */ - F a = S[0]; - for (size_t i = 1; i < k; i++) { - a *= S[i]; - } - if (a.isZero()) { - *pb = false; - return; - } - /* - f(0) = sum_i f(S[i]) delta_{i,S}(0) - */ - G r; - r.clear(); - for (size_t i = 0; i < k; i++) { - F b = S[i]; - for (size_t j = 0; j < k; j++) { - if (j != i) { - F v = S[j] - S[i]; - if (v.isZero()) { - *pb = false; - return; - } - b *= v; - } - } - G t; - G::mul(t, vec[i], a / b); - r += t; - } - out = r; - *pb = true; -} - -/* - out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) - @retval 0 if succeed else -1 (if cSize == 0) -*/ -template -void evaluatePolynomial(bool *pb, G& out, const G *c, size_t cSize, const T& x) -{ - if (cSize == 0) { - *pb = false; - return; - } - if (cSize == 1) { - out = c[0]; - *pb = true; - return; - } - G y = c[cSize - 1]; - for (int i = (int)cSize - 2; i >= 0; i--) { - G::mul(y, y, x); - G::add(y, y, c[i]); - } - out = y; - *pb = true; -} - -#ifndef CYBOZU_DONT_USE_EXCEPTION -template -void LagrangeInterpolation(G& out, const F *S, const G *vec, size_t k) -{ - bool b; - LagrangeInterpolation(&b, out, S, vec, k); - if (!b) throw cybozu::Exception("LagrangeInterpolation"); -} - -template -void evaluatePolynomial(G& out, const G *c, size_t cSize, const T& x) -{ - bool b; - evaluatePolynomial(&b, out, c, cSize, x); - if (!b) throw cybozu::Exception("evaluatePolynomial"); -} -#endif - -} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp deleted file mode 100644 index 36d37035e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp +++ /dev/null @@ -1,389 +0,0 @@ -#pragma once -/** - @file - @brief definition of Op - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include - -#ifndef MCL_MAX_BIT_SIZE - #define MCL_MAX_BIT_SIZE 521 -#endif -#if defined(__EMSCRIPTEN__) || defined(__wasm__) - #define MCL_DONT_USE_XBYAK - #define MCL_DONT_USE_OPENSSL -#endif -#if !defined(MCL_DONT_USE_XBYAK) && (defined(_WIN64) || defined(__x86_64__)) && (MCL_SIZEOF_UNIT == 8) - #define MCL_USE_XBYAK - #define MCL_XBYAK_DIRECT_CALL -#endif - -#define MCL_MAX_HASH_BIT_SIZE 512 - -namespace mcl { - -static const int version = 0x092; /* 0xABC = A.BC */ - -/* - specifies available string format mode for X::setIoMode() - // for Fp, Fp2, Fp6, Fp12 - default(0) : IoDec - printable string(zero terminated, variable size) - IoBin(2) | IoDec(10) | IoHex(16) | IoBinPrefix | IoHexPrefix - - byte string(not zero terminated, fixed size) - IoArray | IoArrayRaw - IoArray = IoSerialize - - // for Ec - affine(0) | IoEcCompY | IoComp - default : affine - - affine and IoEcCompY are available with ioMode for Fp - IoSerialize ignores ioMode for Fp - - IoAuto - dec or hex according to ios_base::fmtflags - IoBin - binary number([01]+) - IoDec - decimal number - IoHex - hexadecimal number([0-9a-fA-F]+) - IoBinPrefix - 0b + - IoHexPrefix - 0x + - IoArray - array of Unit(fixed size = Fp::getByteSize()) - IoArrayRaw - array of Unit(fixed size = Fp::getByteSize()) without Montgomery conversion - - // for Ec::setIoMode() - IoEcAffine(default) - "0" ; infinity - "1 " ; affine coordinate - - IoEcProj - "4" ; projective or jacobi coordinate - - IoEcCompY - 1-bit y prepresentation of elliptic curve - "2 " ; compressed for even y - "3 " ; compressed for odd y - - IoSerialize - if isMSBserialize(): // p is not full bit - size = Fp::getByteSize() - use MSB of array of x for 1-bit y for prime p where (p % 8 != 0) - [0] ; infinity - ; for even y - |1 ; for odd y ; |1 means set MSB of x - else: - size = Fp::getByteSize() + 1 - [0] ; infinity - 2 ; for even y - 3 ; for odd y -*/ -enum IoMode { - IoAuto = 0, // dec or hex according to ios_base::fmtflags - IoBin = 2, // binary number without prefix - IoDec = 10, // decimal number without prefix - IoHex = 16, // hexadecimal number without prefix - IoArray = 32, // array of Unit(fixed size) - IoArrayRaw = 64, // raw array of Unit without Montgomery conversion - IoPrefix = 128, // append '0b'(bin) or '0x'(hex) - IoBinPrefix = IoBin | IoPrefix, - IoHexPrefix = IoHex | IoPrefix, - IoEcAffine = 0, // affine coordinate - IoEcCompY = 256, // 1-bit y representation of elliptic curve - IoSerialize = 512, // use MBS for 1-bit y - IoFixedSizeByteSeq = IoSerialize, // obsolete - IoEcProj = 1024, // projective or jacobi coordinate - IoSerializeHexStr = 2048 // printable hex string -}; - -namespace fp { - -const size_t UnitBitSize = sizeof(Unit) * 8; - -const size_t maxUnitSize = (MCL_MAX_BIT_SIZE + UnitBitSize - 1) / UnitBitSize; -#define MCL_MAX_UNIT_SIZE ((MCL_MAX_BIT_SIZE + MCL_UNIT_BIT_SIZE - 1) / MCL_UNIT_BIT_SIZE) - -struct FpGenerator; -struct Op; - -typedef void (*void1u)(Unit*); -typedef void (*void2u)(Unit*, const Unit*); -typedef void (*void2uI)(Unit*, const Unit*, Unit); -typedef void (*void2uIu)(Unit*, const Unit*, Unit, const Unit*); -typedef void (*void2uOp)(Unit*, const Unit*, const Op&); -typedef void (*void3u)(Unit*, const Unit*, const Unit*); -typedef void (*void4u)(Unit*, const Unit*, const Unit*, const Unit*); -typedef int (*int2u)(Unit*, const Unit*); - -typedef Unit (*u1uII)(Unit*, Unit, Unit); -typedef Unit (*u3u)(Unit*, const Unit*, const Unit*); - -/* - disable -Wcast-function-type - the number of arguments of some JIT functions is smaller than that of T -*/ -template -T func_ptr_cast(S func) -{ - return reinterpret_cast(reinterpret_cast(func)); -} -struct Block { - const Unit *p; // pointer to original FpT.v_ - size_t n; - Unit v_[maxUnitSize]; -}; - -enum Mode { - FP_AUTO, - FP_GMP, - FP_GMP_MONT, - FP_LLVM, - FP_LLVM_MONT, - FP_XBYAK -}; - -enum PrimeMode { - PM_GENERIC = 0, - PM_NIST_P192, - PM_SECP256K1, - PM_NIST_P521 -}; - -enum MaskMode { - NoMask = 0, // throw if greater or equal - SmallMask = 1, // 1-bit smaller mask if greater or equal - MaskAndMod = 2, // mask and substract if greater or equal - Mod = 3 // mod p -}; - -struct Op { - /* - don't change the layout of rp and p - asm code assumes &rp + 1 == p - */ - Unit rp; - Unit p[maxUnitSize]; - mpz_class mp; - uint32_t pmod4; - mcl::SquareRoot sq; - mcl::Modp modp; - Unit half[maxUnitSize]; // (p + 1) / 2 - Unit oneRep[maxUnitSize]; // 1(=inv R if Montgomery) - /* - for Montgomery - one = 1 - R = (1 << (N * sizeof(Unit) * 8)) % p - R2 = (R * R) % p - R3 = RR^3 - */ - Unit one[maxUnitSize]; - Unit R2[maxUnitSize]; - Unit R3[maxUnitSize]; -#ifdef MCL_USE_XBYAK - FpGenerator *fg; - mcl::Array invTbl; -#endif - void3u fp_addA_; - void3u fp_subA_; - void2u fp_negA_; - void3u fp_mulA_; - void2u fp_sqrA_; - void3u fp2_addA_; - void3u fp2_subA_; - void2u fp2_negA_; - void3u fp2_mulA_; - void2u fp2_sqrA_; - void3u fpDbl_addA_; - void3u fpDbl_subA_; - void3u fpDbl_mulPreA_; - void2u fpDbl_sqrPreA_; - void2u fpDbl_modA_; - void3u fp2Dbl_mulPreA_; - void2u fp2Dbl_sqrPreA_; - size_t maxN; - size_t N; - size_t bitSize; - bool (*fp_isZero)(const Unit*); - void1u fp_clear; - void2u fp_copy; - void2u fp_shr1; - void3u fp_neg; - void4u fp_add; - void4u fp_sub; - void4u fp_mul; - void3u fp_sqr; - void2uOp fp_invOp; - void2uIu fp_mulUnit; // fpN1_mod + fp_mulUnitPre - - void3u fpDbl_mulPre; - void2u fpDbl_sqrPre; - int2u fp_preInv; - void2uI fp_mulUnitPre; // z[N + 1] = x[N] * y - void3u fpN1_mod; // y[N] = x[N + 1] % p[N] - - void4u fpDbl_add; - void4u fpDbl_sub; - void3u fpDbl_mod; - - u3u fp_addPre; // without modulo p - u3u fp_subPre; // without modulo p - u3u fpDbl_addPre; - u3u fpDbl_subPre; - /* - for Fp2 = F[u] / (u^2 + 1) - x = a + bu - */ - int xi_a; // xi = xi_a + u - void4u fp2_mulNF; - void2u fp2_inv; - void2u fp2_mul_xiA_; - uint32_t (*hash)(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); - - PrimeMode primeMode; - bool isFullBit; // true if bitSize % uniSize == 0 - bool isMont; // true if use Montgomery - bool isFastMod; // true if modulo is fast - - Op() - { - clear(); - } - ~Op() - { -#ifdef MCL_USE_XBYAK - destroyFpGenerator(fg); -#endif - } - void clear() - { - rp = 0; - memset(p, 0, sizeof(p)); - mp = 0; - pmod4 = 0; - sq.clear(); - // fg is not set - memset(half, 0, sizeof(half)); - memset(oneRep, 0, sizeof(oneRep)); - memset(one, 0, sizeof(one)); - memset(R2, 0, sizeof(R2)); - memset(R3, 0, sizeof(R3)); -#ifdef MCL_USE_XBYAK - invTbl.clear(); -#endif - fp_addA_ = 0; - fp_subA_ = 0; - fp_negA_ = 0; - fp_mulA_ = 0; - fp_sqrA_ = 0; - fp2_addA_ = 0; - fp2_subA_ = 0; - fp2_negA_ = 0; - fp2_mulA_ = 0; - fp2_sqrA_ = 0; - fpDbl_addA_ = 0; - fpDbl_subA_ = 0; - fpDbl_mulPreA_ = 0; - fpDbl_sqrPreA_ = 0; - fpDbl_modA_ = 0; - fp2Dbl_mulPreA_ = 0; - fp2Dbl_sqrPreA_ = 0; - maxN = 0; - N = 0; - bitSize = 0; - fp_isZero = 0; - fp_clear = 0; - fp_copy = 0; - fp_shr1 = 0; - fp_neg = 0; - fp_add = 0; - fp_sub = 0; - fp_mul = 0; - fp_sqr = 0; - fp_invOp = 0; - fp_mulUnit = 0; - - fpDbl_mulPre = 0; - fpDbl_sqrPre = 0; - fp_preInv = 0; - fp_mulUnitPre = 0; - fpN1_mod = 0; - - fpDbl_add = 0; - fpDbl_sub = 0; - fpDbl_mod = 0; - - fp_addPre = 0; - fp_subPre = 0; - fpDbl_addPre = 0; - fpDbl_subPre = 0; - - xi_a = 0; - fp2_mulNF = 0; - fp2_inv = 0; - fp2_mul_xiA_ = 0; - - primeMode = PM_GENERIC; - isFullBit = false; - isMont = false; - isFastMod = false; - hash = 0; - } - void fromMont(Unit* y, const Unit *x) const - { - /* - M(x, y) = xyR^-1 - y = M(x, 1) = xR^-1 - */ - fp_mul(y, x, one, p); - } - void toMont(Unit* y, const Unit *x) const - { - /* - y = M(x, R2) = xR^2 R^-1 = xR - */ - fp_mul(y, x, R2, p); - } - bool init(const mpz_class& p, size_t maxBitSize, int xi_a, Mode mode, size_t mclMaxBitSize = MCL_MAX_BIT_SIZE); -#ifdef MCL_USE_XBYAK - static FpGenerator* createFpGenerator(); - static void destroyFpGenerator(FpGenerator *fg); -#endif -private: - Op(const Op&); - void operator=(const Op&); -}; - -inline const char* getIoSeparator(int ioMode) -{ - return (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) ? "" : " "; -} - -inline void dump(const char *s, size_t n) -{ - for (size_t i = 0; i < n; i++) { - printf("%02x ", (uint8_t)s[i]); - } - printf("\n"); -} - -#ifndef CYBOZU_DONT_USE_STRING -int detectIoMode(int ioMode, const std::ios_base& ios); - -inline void dump(const std::string& s) -{ - dump(s.c_str(), s.size()); -} -#endif - -} } // mcl::fp diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp deleted file mode 100644 index e9bc506df..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp +++ /dev/null @@ -1,177 +0,0 @@ -#pragma once -/** - @file - @brief operator class - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#ifdef _MSC_VER - #ifndef MCL_FORCE_INLINE - #define MCL_FORCE_INLINE __forceinline - #endif - #pragma warning(push) - #pragma warning(disable : 4714) -#else - #ifndef MCL_FORCE_INLINE - #define MCL_FORCE_INLINE __attribute__((always_inline)) - #endif -#endif - -namespace mcl { namespace fp { - -template -struct Empty {}; - -/* - T must have add, sub, mul, inv, neg -*/ -template > -struct Operator : public E { - template MCL_FORCE_INLINE T& operator+=(const S& rhs) { T::add(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } - template MCL_FORCE_INLINE T& operator-=(const S& rhs) { T::sub(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } - template friend MCL_FORCE_INLINE T operator+(const T& a, const S& b) { T c; T::add(c, a, b); return c; } - template friend MCL_FORCE_INLINE T operator-(const T& a, const S& b) { T c; T::sub(c, a, b); return c; } - template MCL_FORCE_INLINE T& operator*=(const S& rhs) { T::mul(static_cast(*this), static_cast(*this), rhs); return static_cast(*this); } - template friend MCL_FORCE_INLINE T operator*(const T& a, const S& b) { T c; T::mul(c, a, b); return c; } - MCL_FORCE_INLINE T& operator/=(const T& rhs) { T c; T::inv(c, rhs); T::mul(static_cast(*this), static_cast(*this), c); return static_cast(*this); } - static MCL_FORCE_INLINE void div(T& c, const T& a, const T& b) { T t; T::inv(t, b); T::mul(c, a, t); } - friend MCL_FORCE_INLINE T operator/(const T& a, const T& b) { T c; T::inv(c, b); c *= a; return c; } - MCL_FORCE_INLINE T operator-() const { T c; T::neg(c, static_cast(*this)); return c; } - template class FpT> - static void pow(T& z, const T& x, const FpT& y) - { - fp::Block b; - y.getBlock(b); - powArray(z, x, b.p, b.n, false, false); - } - template class FpT> - static void powGeneric(T& z, const T& x, const FpT& y) - { - fp::Block b; - y.getBlock(b); - powArrayBase(z, x, b.p, b.n, false, false); - } - template class FpT> - static void powCT(T& z, const T& x, const FpT& y) - { - fp::Block b; - y.getBlock(b); - powArray(z, x, b.p, b.n, false, true); - } - static void pow(T& z, const T& x, int64_t y) - { - const uint64_t u = fp::abs_(y); -#if MCL_SIZEOF_UNIT == 8 - powArray(z, x, &u, 1, y < 0, false); -#else - uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; - size_t un = ua[1] ? 2 : 1; - powArray(z, x, ua, un, y < 0, false); -#endif - } - static void pow(T& z, const T& x, const mpz_class& y) - { - powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); - } - static void powGeneric(T& z, const T& x, const mpz_class& y) - { - powArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); - } - static void powCT(T& z, const T& x, const mpz_class& y) - { - powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); - } - static void setPowArrayGLV(void f(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime)) - { - powArrayGLV = f; - } -private: - static void (*powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); - static void powArray(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) - { - if (powArrayGLV && (constTime || yn > 1)) { - powArrayGLV(z, x, y, yn, isNegative, constTime); - return; - } - powArrayBase(z, x, y, yn, isNegative, constTime); - } - static void powArrayBase(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) - { - T tmp; - const T *px = &x; - if (&z == &x) { - tmp = x; - px = &tmp; - } - z = 1; - fp::powGeneric(z, *px, y, yn, T::mul, T::sqr, (void (*)(T&, const T&))0, constTime ? T::BaseFp::getBitSize() : 0); - if (isNegative) { - T::inv(z, z); - } - } -}; - -template -void (*Operator::powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); - -/* - T must have save and load -*/ -template > -struct Serializable : public E { - void setStr(bool *pb, const char *str, int ioMode = 0) - { - size_t len = strlen(str); - size_t n = deserialize(str, len, ioMode); - *pb = n > 0 && n == len; - } - // return strlen(buf) if success else 0 - size_t getStr(char *buf, size_t maxBufSize, int ioMode = 0) const - { - size_t n = serialize(buf, maxBufSize, ioMode); - if (n == 0 || n == maxBufSize - 1) return 0; - buf[n] = '\0'; - return n; - } -#ifndef CYBOZU_DONT_USE_STRING - void setStr(const std::string& str, int ioMode = 0) - { - cybozu::StringInputStream is(str); - static_cast(*this).load(is, ioMode); - } - void getStr(std::string& str, int ioMode = 0) const - { - str.clear(); - cybozu::StringOutputStream os(str); - static_cast(*this).save(os, ioMode); - } - std::string getStr(int ioMode = 0) const - { - std::string str; - getStr(str, ioMode); - return str; - } -#endif - // return written bytes - size_t serialize(void *buf, size_t maxBufSize, int ioMode = IoSerialize) const - { - cybozu::MemoryOutputStream os(buf, maxBufSize); - bool b; - static_cast(*this).save(&b, os, ioMode); - return b ? os.getPos() : 0; - } - // return read bytes - size_t deserialize(const void *buf, size_t bufSize, int ioMode = IoSerialize) - { - cybozu::MemoryInputStream is(buf, bufSize); - bool b; - static_cast(*this).load(&b, is, ioMode); - return b ? is.getPos() : 0; - } -}; - -} } // mcl::fp - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp deleted file mode 100644 index 03e44cb16..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp +++ /dev/null @@ -1,84 +0,0 @@ -#pragma once -/** - @file - @brief paillier encryption - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -namespace mcl { namespace paillier { - -class PublicKey { - size_t primeBitSize; - mpz_class g; - mpz_class n; - mpz_class n2; -public: - PublicKey() : primeBitSize(0) {} - void init(size_t _primeBitSize, const mpz_class& _n) - { - primeBitSize = _primeBitSize; - n = _n; - g = 1 + _n; - n2 = _n * _n; - } - void enc(mpz_class& c, const mpz_class& m, mcl::fp::RandGen rg = mcl::fp::RandGen()) const - { - if (rg.isZero()) rg = mcl::fp::RandGen::get(); - if (primeBitSize == 0) throw cybozu::Exception("paillier:PublicKey:not init"); - mpz_class r; - mcl::gmp::getRand(r, primeBitSize, rg); - mpz_class a, b; - mcl::gmp::powMod(a, g, m, n2); - mcl::gmp::powMod(b, r, n, n2); - c = (a * b) % n2; - } - /* - additive homomorphic encryption - cz = cx + cy - */ - void add(mpz_class& cz, mpz_class& cx, mpz_class& cy) const - { - cz = (cx * cy) % n2; - } -}; - -class SecretKey { - size_t primeBitSize; - mpz_class n; - mpz_class n2; - mpz_class lambda; - mpz_class invLambda; -public: - SecretKey() : primeBitSize(0) {} - /* - the size of prime is half of bitSize - */ - void init(size_t bitSize, mcl::fp::RandGen rg = mcl::fp::RandGen()) - { - if (rg.isZero()) rg = mcl::fp::RandGen::get(); - primeBitSize = bitSize / 2; - mpz_class p, q; - mcl::gmp::getRandPrime(p, primeBitSize, rg); - mcl::gmp::getRandPrime(q, primeBitSize, rg); - lambda = (p - 1) * (q - 1); - n = p * q; - n2 = n * n; - mcl::gmp::invMod(invLambda, lambda, n); - } - void getPublicKey(PublicKey& pub) const - { - pub.init(primeBitSize, n); - } - void dec(mpz_class& m, const mpz_class& c) const - { - mpz_class L; - mcl::gmp::powMod(L, c, lambda, n2); - L = ((L - 1) / n) % n; - m = (L * invLambda) % n; - } -}; - -} } // mcl::paillier diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp deleted file mode 100644 index 30502fc10..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp +++ /dev/null @@ -1,156 +0,0 @@ -#pragma once -/** - @file - @brief definition of Op - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#ifdef MCL_DONT_USE_CSPRNG - -// nothing - -#elif defined(MCL_USE_WEB_CRYPTO_API) -#include - -namespace mcl { -struct RandomGeneratorJS { - void read(bool *pb, void *buf, uint32_t byteSize) - { - // cf. https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues - if (byteSize > 65536) { - *pb = false; - return; - } - // use crypto.getRandomValues - EM_ASM({Module.cryptoGetRandomValues($0, $1)}, buf, byteSize); - *pb = true; - } -}; -} // mcl - -#else -#include -#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 -#include -#endif -#endif -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4521) -#endif -namespace mcl { namespace fp { - -namespace local { - -template -uint32_t readWrapper(void *self, void *buf, uint32_t byteSize) -{ - bool b; - reinterpret_cast(self)->read(&b, (uint8_t*)buf, byteSize); - if (b) return byteSize; - return 0; -} - -#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 -template<> -inline uint32_t readWrapper(void *self, void *buf, uint32_t byteSize) -{ - const uint32_t keep = byteSize; - std::random_device& rg = *reinterpret_cast(self); - uint8_t *p = reinterpret_cast(buf); - uint32_t v; - while (byteSize >= 4) { - v = rg(); - memcpy(p, &v, 4); - p += 4; - byteSize -= 4; - } - if (byteSize > 0) { - v = rg(); - memcpy(p, &v, byteSize); - } - return keep; -} -#endif -} // local -/* - wrapper of cryptographically secure pseudo random number generator -*/ -class RandGen { - typedef uint32_t (*readFuncType)(void *self, void *buf, uint32_t byteSize); - void *self_; - readFuncType readFunc_; -public: - RandGen() : self_(0), readFunc_(0) {} - RandGen(void *self, readFuncType readFunc) : self_(self) , readFunc_(readFunc) {} - RandGen(const RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} - RandGen(RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} - RandGen& operator=(const RandGen& rhs) - { - self_ = rhs.self_; - readFunc_ = rhs.readFunc_; - return *this; - } - template - RandGen(RG& rg) - : self_(reinterpret_cast(&rg)) - , readFunc_(local::readWrapper) - { - } - void read(bool *pb, void *out, size_t byteSize) - { - uint32_t size = readFunc_(self_, out, static_cast(byteSize)); - *pb = size == byteSize; - } -#ifdef MCL_DONT_USE_CSPRNG - bool isZero() const { return false; } /* return false to avoid copying default rg */ -#else - bool isZero() const { return self_ == 0 && readFunc_ == 0; } -#endif - static RandGen& getDefaultRandGen() - { -#ifdef MCL_DONT_USE_CSPRNG - static RandGen wrg; -#elif defined(MCL_USE_WEB_CRYPTO_API) - static mcl::RandomGeneratorJS rg; - static RandGen wrg(rg); -#else - static cybozu::RandomGenerator rg; - static RandGen wrg(rg); -#endif - return wrg; - } - static RandGen& get() - { - static RandGen wrg(getDefaultRandGen()); - return wrg; - } - /* - rg must be thread safe - rg.read(void *buf, size_t byteSize); - */ - static void setRandGen(const RandGen& rg) - { - get() = rg; - } - /* - set rand function - if self and readFunc are NULL then set default rand function - */ - static void setRandFunc(void *self, readFuncType readFunc) - { - if (self == 0 && readFunc == 0) { - setRandGen(getDefaultRandGen()); - } else { - RandGen rg(self, readFunc); - setRandGen(rg); - } - } -}; - -} } // mcl::fp - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h deleted file mode 100644 index 60b399c65..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h +++ /dev/null @@ -1,270 +0,0 @@ -#pragma once -/** - @file - @brief C api of somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -#ifdef _MSC_VER -#ifdef MCLSHE_DLL_EXPORT -#define MCLSHE_DLL_API __declspec(dllexport) -#else -#define MCLSHE_DLL_API __declspec(dllimport) -#ifndef MCLSHE_NO_AUTOLINK - #if MCLBN_FP_UNIT_SIZE == 4 - #pragma comment(lib, "mclshe256.lib") - #elif MCLBN_FP_UNIT_SIZE == 6 - #pragma comment(lib, "mclshe384.lib") - #else - #pragma comment(lib, "mclshe512.lib") - #endif -#endif -#endif -#else -#ifdef __EMSCRIPTEN__ - #define MCLSHE_DLL_API __attribute__((used)) -#elif defined(__wasm__) - #define MCLSHE_DLL_API __attribute__((visibility("default"))) -#else - #define MCLSHE_DLL_API -#endif -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - mclBnFr x; - mclBnFr y; -} sheSecretKey; - -typedef struct { - mclBnG1 xP; - mclBnG2 yQ; -} shePublicKey; - -struct shePrecomputedPublicKey; - -typedef struct { - mclBnG1 S; - mclBnG1 T; -} sheCipherTextG1; - -typedef struct { - mclBnG2 S; - mclBnG2 T; -} sheCipherTextG2; - -typedef struct { - mclBnGT g[4]; -} sheCipherTextGT; - -typedef struct { - mclBnFr d[4]; -} sheZkpBin; - -typedef struct { - mclBnFr d[4]; -} sheZkpEq; - -typedef struct { - mclBnFr d[7]; -} sheZkpBinEq; -/* - initialize this library - call this once before using the other functions - @param curve [in] enum value defined in mcl/bn.h - @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, - which macro is used to make sure that the values - are the same when the library is built and used - @return 0 if success - @note sheInit() is thread safe and serialized if it is called simultaneously - but don't call it while using other functions. -*/ -MCLSHE_DLL_API int sheInit(int curve, int compiledTimeVar); - -// return written byte size if success else 0 -MCLSHE_DLL_API mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec); -MCLSHE_DLL_API mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub); -MCLSHE_DLL_API mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c); -MCLSHE_DLL_API mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c); -MCLSHE_DLL_API mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c); -MCLSHE_DLL_API mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp); -MCLSHE_DLL_API mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp); -MCLSHE_DLL_API mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp); - -// return read byte size if sucess else 0 -MCLSHE_DLL_API mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize); - -/* - set secretKey if system has /dev/urandom or CryptGenRandom - return 0 if success -*/ -MCLSHE_DLL_API int sheSecretKeySetByCSPRNG(sheSecretKey *sec); - -MCLSHE_DLL_API void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec); - -/* - make table to decode DLP - return 0 if success -*/ -MCLSHE_DLL_API int sheSetRangeForDLP(mclSize hashSize); -MCLSHE_DLL_API int sheSetRangeForG1DLP(mclSize hashSize); -MCLSHE_DLL_API int sheSetRangeForG2DLP(mclSize hashSize); -MCLSHE_DLL_API int sheSetRangeForGTDLP(mclSize hashSize); - -/* - set tryNum to decode DLP -*/ -MCLSHE_DLL_API void sheSetTryNum(mclSize tryNum); - -/* - decode G1 via GT if use != 0 - @note faster if tryNum >= 300 -*/ -MCLSHE_DLL_API void sheUseDecG1ViaGT(int use); -/* - decode G2 via GT if use != 0 - @note faster if tryNum >= 100 -*/ -MCLSHE_DLL_API void sheUseDecG2ViaGT(int use); -/* - load table for DLP - return read size if success else 0 -*/ -MCLSHE_DLL_API mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize); -MCLSHE_DLL_API mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize); - -/* - save table for DLP - return written size if success else 0 -*/ -MCLSHE_DLL_API mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize); -MCLSHE_DLL_API mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize); -MCLSHE_DLL_API mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize); - -// return 0 if success -MCLSHE_DLL_API int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m); -MCLSHE_DLL_API int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m); -MCLSHE_DLL_API int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *ppub, mclInt m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *ppub, mclInt m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *ppub, mclInt m); - -/* - m must be 0 or 1 -*/ -MCLSHE_DLL_API int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); -MCLSHE_DLL_API int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); -MCLSHE_DLL_API int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m); - -/* - arbitary m -*/ -MCLSHE_DLL_API int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m); -MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m); - -/* - decode c and set m - return 0 if success -*/ -MCLSHE_DLL_API int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); -MCLSHE_DLL_API int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); -MCLSHE_DLL_API int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c); -/* - verify zkp - return 1 if valid -*/ -MCLSHE_DLL_API int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp); -MCLSHE_DLL_API int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp); -MCLSHE_DLL_API int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); -MCLSHE_DLL_API int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); -MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c, const sheZkpBin *zkp); -MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *ppub, const sheCipherTextG2 *c, const sheZkpBin *zkp); -MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); -MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); -/* - decode c via GT and set m - return 0 if success -*/ -MCLSHE_DLL_API int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); -MCLSHE_DLL_API int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); - -/* - return 1 if dec(c) == 0 -*/ -MCLSHE_DLL_API int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c); -MCLSHE_DLL_API int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c); -MCLSHE_DLL_API int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c); - -// return 0 if success -// y = -x -MCLSHE_DLL_API int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x); -MCLSHE_DLL_API int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x); -MCLSHE_DLL_API int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x); - -// return 0 if success -// z = x + y -MCLSHE_DLL_API int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); -MCLSHE_DLL_API int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); -MCLSHE_DLL_API int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); - -// return 0 if success -// z = x - y -MCLSHE_DLL_API int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); -MCLSHE_DLL_API int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); -MCLSHE_DLL_API int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); - -// return 0 if success -// z = x * y -MCLSHE_DLL_API int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y); -MCLSHE_DLL_API int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y); -MCLSHE_DLL_API int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y); - -// return 0 if success -// z = x * y -MCLSHE_DLL_API int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); -/* - sheMul(z, x, y) = sheMulML(z, x, y) + sheFinalExpGT(z) - @note - Mul(x1, y1) + ... + Mul(xn, yn) = finalExp(MulML(x1, y1) + ... + MulML(xn, yn)) -*/ -MCLSHE_DLL_API int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); -MCLSHE_DLL_API int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x); - -// return 0 if success -// rerandomize(c) -MCLSHE_DLL_API int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub); -MCLSHE_DLL_API int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub); -MCLSHE_DLL_API int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub); - -// return 0 if success -// y = convert(x) -MCLSHE_DLL_API int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x); -MCLSHE_DLL_API int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x); - -// return nonzero if success -MCLSHE_DLL_API shePrecomputedPublicKey *shePrecomputedPublicKeyCreate(); -// call this function to avoid memory leak -MCLSHE_DLL_API void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub); -// return 0 if success -MCLSHE_DLL_API int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub); - -#ifdef __cplusplus -} -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp deleted file mode 100644 index 3ce361454..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp +++ /dev/null @@ -1,1939 +0,0 @@ -#pragma once -/** - @file - @brief somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings - @author MITSUNARI Shigeo(@herumi) - see https://github.com/herumi/mcl/blob/master/misc/she/she.pdf - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include -#include -#include -#ifndef MCLBN_FP_UNIT_SIZE - #define MCLBN_FP_UNIT_SIZE 4 -#endif -#if MCLBN_FP_UNIT_SIZE == 4 -#include -#elif MCLBN_FP_UNIT_SIZE == 6 -#include -#elif MCLBN_FP_UNIT_SIZE == 8 -#include -#else - #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" -#endif - -#include -#include -#include - -namespace mcl { namespace she { - -using namespace mcl::bn; - -namespace local { - -#ifndef MCLSHE_WIN_SIZE - #define MCLSHE_WIN_SIZE 10 -#endif -static const size_t winSize = MCLSHE_WIN_SIZE; -static const size_t defaultTryNum = 2048; - -struct KeyCount { - uint32_t key; - int32_t count; // power - bool operator<(const KeyCount& rhs) const - { - return key < rhs.key; - } - bool isSame(const KeyCount& rhs) const - { - return key == rhs.key && count == rhs.count; - } -}; - -template -struct InterfaceForHashTable : G { - static G& castG(InterfaceForHashTable& x) { return static_cast(x); } - static const G& castG(const InterfaceForHashTable& x) { return static_cast(x); } - void clear() { clear(castG(*this)); } - void normalize() { normalize(castG(*this)); } - static bool isOdd(const G& P) { return P.y.isOdd(); } - static bool isZero(const G& P) { return P.isZero(); } - static bool isSameX(const G& P, const G& Q) { return P.x == Q.x; } - static uint32_t getHash(const G& P) { return uint32_t(*P.x.getUnit()); } - static void clear(G& P) { P.clear(); } - static void normalize(G& P) { P.normalize(); } - static void dbl(G& Q, const G& P) { G::dbl(Q, P); } - static void neg(G& Q, const G& P) { G::neg(Q, P); } - static void add(G& R, const G& P, const G& Q) { G::add(R, P, Q); } - template - static void mul(G& Q, const G& P, const INT& x) { G::mul(Q, P, x); } -}; - -/* - treat Fp12 as EC - unitary inverse of (a, b) = (a, -b) - then b.a.a or -b.a.a is odd -*/ -template -struct InterfaceForHashTable : G { - static G& castG(InterfaceForHashTable& x) { return static_cast(x); } - static const G& castG(const InterfaceForHashTable& x) { return static_cast(x); } - void clear() { clear(castG(*this)); } - void normalize() { normalize(castG(*this)); } - static bool isOdd(const G& x) { return x.b.a.a.isOdd(); } - static bool isZero(const G& x) { return x.isOne(); } - static bool isSameX(const G& x, const G& Q) { return x.a == Q.a; } - static uint32_t getHash(const G& x) { return uint32_t(*x.getFp0()->getUnit()); } - static void clear(G& x) { x = 1; } - static void normalize(G&) { } - static void dbl(G& y, const G& x) { G::sqr(y, x); } - static void neg(G& Q, const G& P) { G::unitaryInv(Q, P); } - static void add(G& z, const G& x, const G& y) { G::mul(z, x, y); } - template - static void mul(G& z, const G& x, const INT& y) { G::pow(z, x, y); } -}; - -template -char GtoChar(); -template<>char GtoChar() { return '1'; } -template<>char GtoChar() { return '2'; } -template<>char GtoChar() { return 'T'; } - -/* - HashTable or HashTable -*/ -template -class HashTable { - typedef InterfaceForHashTable I; - typedef std::vector KeyCountVec; - KeyCountVec kcv_; - G P_; - mcl::fp::WindowMethod wm_; - G nextP_; - G nextNegP_; - size_t tryNum_; - void setWindowMethod() - { - const size_t bitSize = G::BaseFp::BaseFp::getBitSize(); - wm_.init(static_cast(P_), bitSize, local::winSize); - } -public: - HashTable() : tryNum_(local::defaultTryNum) {} - bool operator==(const HashTable& rhs) const - { - if (kcv_.size() != rhs.kcv_.size()) return false; - for (size_t i = 0; i < kcv_.size(); i++) { - if (!kcv_[i].isSame(rhs.kcv_[i])) return false; - } - return P_ == rhs.P_ && nextP_ == rhs.nextP_; - } - bool operator!=(const HashTable& rhs) const { return !operator==(rhs); } - /* - compute log_P(xP) for |x| <= hashSize * tryNum - */ - void init(const G& P, size_t hashSize, size_t tryNum = local::defaultTryNum) - { - if (hashSize == 0) { - kcv_.clear(); - return; - } - if (hashSize >= 0x80000000u) throw cybozu::Exception("HashTable:init:hashSize is too large"); - P_ = P; - tryNum_ = tryNum; - kcv_.resize(hashSize); - G xP; - I::clear(xP); - for (int i = 1; i <= (int)kcv_.size(); i++) { - I::add(xP, xP, P_); - I::normalize(xP); - kcv_[i - 1].key = I::getHash(xP); - kcv_[i - 1].count = I::isOdd(xP) ? i : -i; - } - nextP_ = xP; - I::dbl(nextP_, nextP_); - I::add(nextP_, nextP_, P_); // nextP = (hasSize * 2 + 1)P - I::neg(nextNegP_, nextP_); // nextNegP = -nextP - /* - ascending order of abs(count) for same key - */ - std::stable_sort(kcv_.begin(), kcv_.end()); - setWindowMethod(); - } - void setTryNum(size_t tryNum) - { - this->tryNum_ = tryNum; - } - /* - log_P(xP) - find range which has same hash of xP in kcv_, - and detect it - */ - int basicLog(G xP, bool *ok = 0) const - { - if (ok) *ok = true; - if (I::isZero(xP)) return 0; - typedef KeyCountVec::const_iterator Iter; - KeyCount kc; - I::normalize(xP); - kc.key = I::getHash(xP); - kc.count = 0; - std::pair p = std::equal_range(kcv_.begin(), kcv_.end(), kc); - G Q; - I::clear(Q); - int prev = 0; - /* - check range which has same hash - */ - while (p.first != p.second) { - int count = p.first->count; - int abs_c = std::abs(count); - assert(abs_c >= prev); // assume ascending order - bool neg = count < 0; - G T; -// I::mul(T, P, abs_c - prev); - mulByWindowMethod(T, abs_c - prev); - I::add(Q, Q, T); - I::normalize(Q); - if (I::isSameX(Q, xP)) { - bool QisOdd = I::isOdd(Q); - bool xPisOdd = I::isOdd(xP); - if (QisOdd ^ xPisOdd ^ neg) return -count; - return count; - } - prev = abs_c; - ++p.first; - } - if (ok) { - *ok = false; - return 0; - } - throw cybozu::Exception("HashTable:basicLog:not found"); - } - /* - compute log_P(xP) - call basicLog at most 2 * tryNum - */ - int64_t log(const G& xP) const - { - bool ok; - int c = basicLog(xP, &ok); - if (ok) { - return c; - } - G posP = xP, negP = xP; - int64_t posCenter = 0; - int64_t negCenter = 0; - int64_t next = (int64_t)kcv_.size() * 2 + 1; - for (size_t i = 1; i < tryNum_; i++) { - I::add(posP, posP, nextNegP_); - posCenter += next; - c = basicLog(posP, &ok); - if (ok) { - return posCenter + c; - } - I::add(negP, negP, nextP_); - negCenter -= next; - c = basicLog(negP, &ok); - if (ok) { - return negCenter + c; - } - } - throw cybozu::Exception("HashTable:log:not found"); - } - /* - remark - tryNum is not saved. - */ - template - void save(OutputStream& os) const - { - cybozu::save(os, BN::param.cp.curveType); - cybozu::writeChar(os, GtoChar()); - cybozu::save(os, kcv_.size()); - cybozu::write(os, &kcv_[0], sizeof(kcv_[0]) * kcv_.size()); - P_.save(os); - } - size_t save(void *buf, size_t maxBufSize) const - { - cybozu::MemoryOutputStream os(buf, maxBufSize); - save(os); - return os.getPos(); - } - /* - remark - tryNum is not set - */ - template - void load(InputStream& is) - { - int curveType; - cybozu::load(curveType, is); - if (curveType != BN::param.cp.curveType) throw cybozu::Exception("HashTable:bad curveType") << curveType; - char c = 0; - if (!cybozu::readChar(&c, is) || c != GtoChar()) throw cybozu::Exception("HashTable:bad c") << (int)c; - size_t kcvSize; - cybozu::load(kcvSize, is); - kcv_.resize(kcvSize); - cybozu::read(&kcv_[0], sizeof(kcv_[0]) * kcvSize, is); - P_.load(is); - I::mul(nextP_, P_, (kcvSize * 2) + 1); - I::neg(nextNegP_, nextP_); - setWindowMethod(); - } - size_t load(const void *buf, size_t bufSize) - { - cybozu::MemoryInputStream is(buf, bufSize); - load(is); - return is.getPos(); - } - const mcl::fp::WindowMethod& getWM() const { return wm_; } - /* - mul(x, P, y); - */ - template - void mulByWindowMethod(G& x, const T& y) const - { - wm_.mul(static_cast(x), y); - } -}; - -template -int log(const G& P, const G& xP) -{ - if (xP.isZero()) return 0; - if (xP == P) return 1; - G negT; - G::neg(negT, P); - if (xP == negT) return -1; - G T = P; - for (int i = 2; i < 100; i++) { - T += P; - if (xP == T) return i; - G::neg(negT, T); - if (xP == negT) return -i; - } - throw cybozu::Exception("she:log:not found"); -} - -} // mcl::she::local - -template -struct SHET { - class SecretKey; - class PublicKey; - class PrecomputedPublicKey; - // additive HE - class CipherTextA; // = CipherTextG1 + CipherTextG2 - class CipherTextGT; // multiplicative HE - class CipherText; // CipherTextA + CipherTextGT - - static G1 P_; - static G2 Q_; - static GT ePQ_; // e(P, Q) - static std::vector Qcoeff_; - static local::HashTable PhashTbl_; - static local::HashTable QhashTbl_; - static mcl::fp::WindowMethod Qwm_; - typedef local::InterfaceForHashTable GTasEC; - static local::HashTable ePQhashTbl_; - static bool useDecG1ViaGT_; - static bool useDecG2ViaGT_; - static bool isG1only_; -private: - template - class CipherTextAT : public fp::Serializable > { - G S_, T_; - friend class SecretKey; - friend class PublicKey; - friend class PrecomputedPublicKey; - friend class CipherTextA; - friend class CipherTextGT; - bool isZero(const Fr& x) const - { - G xT; - G::mul(xT, T_, x); - return S_ == xT; - } - public: - const G& getS() const { return S_; } - const G& getT() const { return T_; } - void clear() - { - S_.clear(); - T_.clear(); - } - static void add(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) - { - /* - (S, T) + (S', T') = (S + S', T + T') - */ - G::add(z.S_, x.S_, y.S_); - G::add(z.T_, x.T_, y.T_); - } - static void sub(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) - { - /* - (S, T) - (S', T') = (S - S', T - T') - */ - G::sub(z.S_, x.S_, y.S_); - G::sub(z.T_, x.T_, y.T_); - } - // INT = int64_t or Fr - template - static void mul(CipherTextAT& z, const CipherTextAT& x, const INT& y) - { - G::mul(z.S_, x.S_, y); - G::mul(z.T_, x.T_, y); - } - static void neg(CipherTextAT& y, const CipherTextAT& x) - { - G::neg(y.S_, x.S_); - G::neg(y.T_, x.T_); - } - void add(const CipherTextAT& c) { add(*this, *this, c); } - void sub(const CipherTextAT& c) { sub(*this, *this, c); } - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - S_.load(pb, is, ioMode); if (!*pb) return; - T_.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - S_.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - T_.save(pb, os, ioMode); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextA:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextA:save"); - } - friend std::istream& operator>>(std::istream& is, CipherTextAT& self) - { - self.load(is, fp::detectIoMode(G::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const CipherTextAT& self) - { - self.save(os, fp::detectIoMode(G::getIoMode(), os)); - return os; - } - bool operator==(const CipherTextAT& rhs) const - { - return S_ == rhs.S_ && T_ == rhs.T_; - } - bool operator!=(const CipherTextAT& rhs) const { return !operator==(rhs); } - }; - /* - g1 = millerLoop(P1, Q) - g2 = millerLoop(P2, Q) - */ - static void doubleMillerLoop(GT& g1, GT& g2, const G1& P1, const G1& P2, const G2& Q) - { -#if 1 - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - precomputedMillerLoop(g1, P1, Qcoeff); - precomputedMillerLoop(g2, P2, Qcoeff); -#else - millerLoop(g1, P1, Q); - millerLoop(g2, P2, Q); -#endif - } - static void finalExp4(GT out[4], const GT in[4]) - { - for (int i = 0; i < 4; i++) { - finalExp(out[i], in[i]); - } - } - static void tensorProductML(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) - { - /* - (S1, T1) x (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) - */ - doubleMillerLoop(g[0], g[2], S1, T1, S2); - doubleMillerLoop(g[1], g[3], S1, T1, T2); - } - static void tensorProduct(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) - { - /* - (S1, T1) x (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) - */ - tensorProductML(g,S1, T1, S2,T2); - finalExp4(g, g); - } - template - struct ZkpT : public fp::Serializable > { - Fr d_[n]; - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - for (size_t i = 0; i < n; i++) { - d_[i].load(pb, is, ioMode); if (!*pb) return; - } - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - d_[0].save(pb, os, ioMode); if (!*pb) return; - for (size_t i = 1; i < n; i++) { - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - d_[i].save(pb, os, ioMode); - } - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:ZkpT:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:ZkpT:save"); - } - friend std::istream& operator>>(std::istream& is, ZkpT& self) - { - self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const ZkpT& self) - { - self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); - return os; - } - }; - struct ZkpBinTag; - struct ZkpEqTag; // d_[] = { c, sp, ss, sm } - struct ZkpBinEqTag; // d_[] = { d0, d1, sp0, sp1, ss, sp, sm } -public: - /* - Zkp for m = 0 or 1 - */ - typedef ZkpT ZkpBin; - /* - Zkp for decG1(c1) == decG2(c2) - */ - typedef ZkpT ZkpEq; - /* - Zkp for (m = 0 or 1) and decG1(c1) == decG2(c2) - */ - typedef ZkpT ZkpBinEq; - - typedef CipherTextAT CipherTextG1; - typedef CipherTextAT CipherTextG2; - - static void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) - { - initPairing(cp); - hashAndMapToG1(P_, "0"); - hashAndMapToG2(Q_, "0"); - pairing(ePQ_, P_, Q_); - precomputeG2(Qcoeff_, Q_); - setRangeForDLP(hashSize); - useDecG1ViaGT_ = false; - useDecG2ViaGT_ = false; - isG1only_ = false; - setTryNum(tryNum); - } - static void init(size_t hashSize, size_t tryNum = local::defaultTryNum) - { - init(mcl::BN254, hashSize, tryNum); - } - /* - standard lifted ElGamal encryption - */ - static void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) - { - Fp::init(para.p); - Fr::init(para.n); - G1::init(para.a, para.b); - const Fp x0(para.gx); - const Fp y0(para.gy); - P_.set(x0, y0); - - setRangeForG1DLP(hashSize); - useDecG1ViaGT_ = false; - useDecG2ViaGT_ = false; - isG1only_ = true; - setTryNum(tryNum); - } - /* - set range for G1-DLP - */ - static void setRangeForG1DLP(size_t hashSize) - { - PhashTbl_.init(P_, hashSize); - } - /* - set range for G2-DLP - */ - static void setRangeForG2DLP(size_t hashSize) - { - QhashTbl_.init(Q_, hashSize); - } - /* - set range for GT-DLP - */ - static void setRangeForGTDLP(size_t hashSize) - { - ePQhashTbl_.init(ePQ_, hashSize); - } - /* - set range for G1/G2/GT DLP - decode message m for |m| <= hasSize * tryNum - decode time = O(log(hasSize) * tryNum) - */ - static void setRangeForDLP(size_t hashSize) - { - setRangeForG1DLP(hashSize); - setRangeForG2DLP(hashSize); - setRangeForGTDLP(hashSize); - } - static void setTryNum(size_t tryNum) - { - PhashTbl_.setTryNum(tryNum); - QhashTbl_.setTryNum(tryNum); - ePQhashTbl_.setTryNum(tryNum); - } - static void useDecG1ViaGT(bool use = true) - { - useDecG1ViaGT_ = use; - } - static void useDecG2ViaGT(bool use = true) - { - useDecG2ViaGT_ = use; - } - /* - only one element is necessary for each G1 and G2. - this is better than David Mandell Freeman's algorithm - */ - class SecretKey : public fp::Serializable { - Fr x_, y_; - void getPowOfePQ(GT& v, const CipherTextGT& c) const - { - /* - (s, t, u, v) := (e(S, S'), e(S, T'), e(T, S'), e(T, T')) - s v^(xy) / (t^y u^x) = s (v^x / t) ^ y / u^x - = e(P, Q)^(mm') - */ - GT t, u; - GT::unitaryInv(t, c.g_[1]); - GT::unitaryInv(u, c.g_[2]); - GT::pow(v, c.g_[3], x_); - v *= t; - GT::pow(v, v, y_); - GT::pow(u, u, x_); - v *= u; - v *= c.g_[0]; - } - public: - void setByCSPRNG() - { - x_.setRand(); - if (!isG1only_) y_.setRand(); - } - /* - set xP and yQ - */ - void getPublicKey(PublicKey& pub) const - { - pub.set(x_, y_); - } -#if 0 - // log_x(y) - int log(const GT& x, const GT& y) const - { - if (y == 1) return 0; - if (y == x) return 1; - GT inv; - GT::unitaryInv(inv, x); - if (y == inv) return -1; - GT t = x; - for (int i = 2; i < 100; i++) { - t *= x; - if (y == t) return i; - GT::unitaryInv(inv, t); - if (y == inv) return -i; - } - throw cybozu::Exception("she:dec:log:not found"); - } -#endif - int64_t dec(const CipherTextG1& c) const - { - if (useDecG1ViaGT_) return decViaGT(c); - /* - S = mP + rxP - T = rP - R = S - xT = mP - */ - G1 R; - G1::mul(R, c.T_, x_); - G1::sub(R, c.S_, R); - return PhashTbl_.log(R); - } - int64_t dec(const CipherTextG2& c) const - { - if (useDecG2ViaGT_) return decViaGT(c); - G2 R; - G2::mul(R, c.T_, y_); - G2::sub(R, c.S_, R); - return QhashTbl_.log(R); - } - int64_t dec(const CipherTextA& c) const - { - return dec(c.c1_); - } - int64_t dec(const CipherTextGT& c) const - { - GT v; - getPowOfePQ(v, c); - return ePQhashTbl_.log(v); -// return log(g, v); - } - int64_t decViaGT(const CipherTextG1& c) const - { - G1 R; - G1::mul(R, c.T_, x_); - G1::sub(R, c.S_, R); - GT v; - pairing(v, R, Q_); - return ePQhashTbl_.log(v); - } - int64_t decViaGT(const CipherTextG2& c) const - { - G2 R; - G2::mul(R, c.T_, y_); - G2::sub(R, c.S_, R); - GT v; - pairing(v, P_, R); - return ePQhashTbl_.log(v); - } - int64_t dec(const CipherText& c) const - { - if (c.isMultiplied()) { - return dec(c.m_); - } else { - return dec(c.a_); - } - } - bool isZero(const CipherTextG1& c) const - { - return c.isZero(x_); - } - bool isZero(const CipherTextG2& c) const - { - return c.isZero(y_); - } - bool isZero(const CipherTextA& c) const - { - return c.c1_.isZero(x_); - } - bool isZero(const CipherTextGT& c) const - { - GT v; - getPowOfePQ(v, c); - return v.isOne(); - } - bool isZero(const CipherText& c) const - { - if (c.isMultiplied()) { - return isZero(c.m_); - } else { - return isZero(c.a_); - } - } - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - x_.load(pb, is, ioMode); if (!*pb) return; - if (!isG1only_) y_.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - x_.save(pb, os, ioMode); if (!*pb) return; - if (isG1only_) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - y_.save(os, ioMode); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:SecretKey:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:SecretKey:save"); - } - friend std::istream& operator>>(std::istream& is, SecretKey& self) - { - self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) - { - self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); - return os; - } - bool operator==(const SecretKey& rhs) const - { - return x_ == rhs.x_ && (isG1only_ || y_ == rhs.y_); - } - bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } - }; -private: - /* - simple ElGamal encryptionfor G1 and G2 - (S, T) = (m P + r xP, rP) - Pmul.mul(X, a) // X = a P - xPmul.mul(X, a) // X = a xP - use *encRand if encRand is not null - */ - template - static void ElGamalEnc(G& S, G& T, const INT& m, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul, const Fr *encRand = 0) - { - Fr r; - if (encRand) { - r = *encRand; - } else { - r.setRand(); - } - Pmul.mul(static_cast(T), r); - xPmul.mul(S, r); // S = r xP - if (m == 0) return; - G C; - Pmul.mul(static_cast(C), m); - S += C; - } - /* - https://github.com/herumi/mcl/blob/master/misc/she/nizkp.pdf - - encRand is a random value used for ElGamalEnc() - d[1-m] ; rand - s[1-m] ; rand - R[0][1-m] = s[1-m] P - d[1-m] T - R[1][1-m] = s[1-m] xP - d[1-m] (S - (1-m) P) - r ; rand - R[0][m] = r P - R[1][m] = r xP - c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) - d[m] = c - d[1-m] - s[m] = r + d[m] encRand - */ - template - static void makeZkpBin(ZkpBin& zkp, const G& S, const G& T, const Fr& encRand, const G& P, int m, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul) - { - if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBin:bad m") << m; - Fr *s = &zkp.d_[0]; - Fr *d = &zkp.d_[2]; - G R[2][2]; - d[1-m].setRand(); - s[1-m].setRand(); - G T1, T2; - Pmul.mul(static_cast(T1), s[1-m]); // T1 = s[1-m] P - G::mul(T2, T, d[1-m]); - G::sub(R[0][1-m], T1, T2); // s[1-m] P - d[1-m]T - xPmul.mul(T1, s[1-m]); // T1 = s[1-m] xP - if (m == 0) { - G::sub(T2, S, P); - G::mul(T2, T2, d[1-m]); - } else { - G::mul(T2, S, d[1-m]); - } - G::sub(R[1][1-m], T1, T2); // s[1-m] xP - d[1-m](S - (1-m) P) - Fr r; - r.setRand(); - Pmul.mul(static_cast(R[0][m]), r); // R[0][m] = r P - xPmul.mul(R[1][m], r); // R[1][m] = r xP - char buf[sizeof(G) * 2]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S.save(os); - T.save(os); - R[0][0].save(os); - R[0][1].save(os); - R[1][0].save(os); - R[1][1].save(os); - Fr c; - c.setHashOf(buf, os.getPos()); - d[m] = c - d[1-m]; - s[m] = r + d[m] * encRand; - } - /* - R[0][i] = s[i] P - d[i] T ; i = 0,1 - R[1][0] = s[0] xP - d[0] S - R[1][1] = s[1] xP - d[1](S - P) - c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) - c == d[0] + d[1] - */ - template - static bool verifyZkpBin(const G& S, const G& T, const G& P, const ZkpBin& zkp, const mcl::fp::WindowMethod& Pmul, const MulG& xPmul) - { - const Fr *s = &zkp.d_[0]; - const Fr *d = &zkp.d_[2]; - G R[2][2]; - G T1, T2; - for (int i = 0; i < 2; i++) { - Pmul.mul(static_cast(T1), s[i]); // T1 = s[i] P - G::mul(T2, T, d[i]); - G::sub(R[0][i], T1, T2); - } - xPmul.mul(T1, s[0]); // T1 = s[0] xP - G::mul(T2, S, d[0]); - G::sub(R[1][0], T1, T2); - xPmul.mul(T1, s[1]); // T1 = x[1] xP - G::sub(T2, S, P); - G::mul(T2, T2, d[1]); - G::sub(R[1][1], T1, T2); - char buf[sizeof(G) * 2]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S.save(os); - T.save(os); - R[0][0].save(os); - R[0][1].save(os); - R[1][0].save(os); - R[1][1].save(os); - Fr c; - c.setHashOf(buf, os.getPos()); - return c == d[0] + d[1]; - } - /* - encRand1, encRand2 are random values use for ElGamalEnc() - */ - template - static void makeZkpEq(ZkpEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, const INT& m, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) - { - Fr p, s; - p.setRand(); - s.setRand(); - ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); - ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); - Fr rp, rs, rm; - rp.setRand(); - rs.setRand(); - rm.setRand(); - G1 R1, R2; - G2 R3, R4; - ElGamalEnc(R1, R2, rm, Pmul, xPmul, &rp); - ElGamalEnc(R3, R4, rm, Qmul, yQmul, &rs); - char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S1.save(os); - T1.save(os); - S2.save(os); - T2.save(os); - R1.save(os); - R2.save(os); - R3.save(os); - R4.save(os); - Fr& c = zkp.d_[0]; - Fr& sp = zkp.d_[1]; - Fr& ss = zkp.d_[2]; - Fr& sm = zkp.d_[3]; - c.setHashOf(buf, os.getPos()); - Fr::mul(sp, c, p); - sp += rp; - Fr::mul(ss, c, s); - ss += rs; - Fr::mul(sm, c, m); - sm += rm; - } - template - static bool verifyZkpEq(const ZkpEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) - { - const Fr& c = zkp.d_[0]; - const Fr& sp = zkp.d_[1]; - const Fr& ss = zkp.d_[2]; - const Fr& sm = zkp.d_[3]; - G1 R1, R2, X1; - G2 R3, R4, X2; - ElGamalEnc(R1, R2, sm, Pmul, xPmul, &sp); - G1::mul(X1, S1, c); - R1 -= X1; - G1::mul(X1, T1, c); - R2 -= X1; - ElGamalEnc(R3, R4, sm, Qmul, yQmul, &ss); - G2::mul(X2, S2, c); - R3 -= X2; - G2::mul(X2, T2, c); - R4 -= X2; - char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S1.save(os); - T1.save(os); - S2.save(os); - T2.save(os); - R1.save(os); - R2.save(os); - R3.save(os); - R4.save(os); - Fr c2; - c2.setHashOf(buf, os.getPos()); - return c == c2; - } - /* - encRand1, encRand2 are random values use for ElGamalEnc() - */ - template - static void makeZkpBinEq(ZkpBinEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, int m, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) - { - if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBinEq:bad m") << m; - Fr *d = &zkp.d_[0]; - Fr *spm = &zkp.d_[2]; - Fr& ss = zkp.d_[4]; - Fr& sp = zkp.d_[5]; - Fr& sm = zkp.d_[6]; - Fr p, s; - p.setRand(); - s.setRand(); - ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); - ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); - d[1-m].setRand(); - spm[1-m].setRand(); - G1 R1[2], R2[2], X1; - Pmul.mul(static_cast(R1[1-m]), spm[1-m]); - G1::mul(X1, T1, d[1-m]); - R1[1-m] -= X1; - if (m == 0) { - G1::sub(X1, S1, P_); - G1::mul(X1, X1, d[1-m]); - } else { - G1::mul(X1, S1, d[1-m]); - } - xPmul.mul(R2[1-m], spm[1-m]); - R2[1-m] -= X1; - Fr rpm, rp, rs, rm; - rpm.setRand(); - rp.setRand(); - rs.setRand(); - rm.setRand(); - ElGamalEnc(R2[m], R1[m], 0, Pmul, xPmul, &rpm); - G1 R3, R4; - G2 R5, R6; - ElGamalEnc(R4, R3, rm, Pmul, xPmul, &rp); - ElGamalEnc(R6, R5, rm, Qmul, yQmul, &rs); - char buf[sizeof(Fr) * 12]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S1.save(os); - T1.save(os); - R1[0].save(os); - R1[1].save(os); - R2[0].save(os); - R2[1].save(os); - R3.save(os); - R4.save(os); - R5.save(os); - R6.save(os); - Fr c; - c.setHashOf(buf, os.getPos()); - Fr::sub(d[m], c, d[1-m]); - Fr::mul(spm[m], d[m], p); - spm[m] += rpm; - Fr::mul(sp, c, p); - sp += rp; - Fr::mul(ss, c, s); - ss += rs; - Fr::mul(sm, c, m); - sm += rm; - } - template - static bool verifyZkpBinEq(const ZkpBinEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod& Qmul, const MulG2& yQmul) - { - const Fr *d = &zkp.d_[0]; - const Fr *spm = &zkp.d_[2]; - const Fr& ss = zkp.d_[4]; - const Fr& sp = zkp.d_[5]; - const Fr& sm = zkp.d_[6]; - G1 R1[2], R2[2], X1; - for (int i = 0; i < 2; i++) { - Pmul.mul(static_cast(R1[i]), spm[i]); - G1::mul(X1, T1, d[i]); - R1[i] -= X1; - } - xPmul.mul(R2[0], spm[0]); - G1::mul(X1, S1, d[0]); - R2[0] -= X1; - xPmul.mul(R2[1], spm[1]); - G1::sub(X1, S1, P_); - G1::mul(X1, X1, d[1]); - R2[1] -= X1; - Fr c; - Fr::add(c, d[0], d[1]); - G1 R3, R4; - G2 R5, R6; - ElGamalEnc(R4, R3, sm, Pmul, xPmul, &sp); - G1::mul(X1, T1, c); - R3 -= X1; - G1::mul(X1, S1, c); - R4 -= X1; - ElGamalEnc(R6, R5, sm, Qmul, yQmul, &ss); - G2 X2; - G2::mul(X2, T2, c); - R5 -= X2; - G2::mul(X2, S2, c); - R6 -= X2; - char buf[sizeof(Fr) * 12]; - cybozu::MemoryOutputStream os(buf, sizeof(buf)); - S1.save(os); - T1.save(os); - R1[0].save(os); - R1[1].save(os); - R2[0].save(os); - R2[1].save(os); - R3.save(os); - R4.save(os); - R5.save(os); - R6.save(os); - Fr c2; - c2.setHashOf(buf, os.getPos()); - return c == c2; - } - /* - common method for PublicKey and PrecomputedPublicKey - */ - template - struct PublicKeyMethod { - /* - you can use INT as int64_t and Fr, - but the return type of dec() is int64_t. - */ - template - void enc(CipherTextG1& c, const INT& m) const - { - static_cast(*this).encG1(c, m); - } - template - void enc(CipherTextG2& c, const INT& m) const - { - static_cast(*this).encG2(c, m); - } - template - void enc(CipherTextA& c, const INT& m) const - { - enc(c.c1_, m); - enc(c.c2_, m); - } - template - void enc(CipherTextGT& c, const INT& m) const - { - static_cast(*this).encGT(c, m); - } - template - void enc(CipherText& c, const INT& m, bool multiplied = false) const - { - c.isMultiplied_ = multiplied; - if (multiplied) { - enc(c.m_, m); - } else { - enc(c.a_, m); - } - } - /* - reRand method is for circuit privacy - */ - template - void reRandT(CT& c) const - { - CT c0; - static_cast(*this).enc(c0, 0); - CT::add(c, c, c0); - } - void reRand(CipherTextG1& c) const { reRandT(c); } - void reRand(CipherTextG2& c) const { reRandT(c); } - void reRand(CipherTextGT& c) const { reRandT(c); } - void reRand(CipherText& c) const - { - if (c.isMultiplied()) { - reRandT(c.m_); - } else { - reRandT(c.a_); - } - } - /* - convert from CipherTextG1 to CipherTextGT - */ - void convert(CipherTextGT& cm, const CipherTextG1& c1) const - { - /* - Enc(1) = (S, T) = (Q + r yQ, rQ) = (Q, 0) if r = 0 - cm = c1 * (Q, 0) = (S, T) * (Q, 0) = (e(S, Q), 1, e(T, Q), 1) - */ - precomputedMillerLoop(cm.g_[0], c1.getS(), Qcoeff_); - finalExp(cm.g_[0], cm.g_[0]); - precomputedMillerLoop(cm.g_[2], c1.getT(), Qcoeff_); - finalExp(cm.g_[2], cm.g_[2]); - - cm.g_[1] = cm.g_[3] = 1; - } - /* - convert from CipherTextG2 to CipherTextGT - */ - void convert(CipherTextGT& cm, const CipherTextG2& c2) const - { - /* - Enc(1) = (S, T) = (P + r xP, rP) = (P, 0) if r = 0 - cm = (P, 0) * c2 = (e(P, S), e(P, T), 1, 1) - */ - pairing(cm.g_[0], P_, c2.getS()); - pairing(cm.g_[1], P_, c2.getT()); - cm.g_[2] = cm.g_[3] = 1; - } - void convert(CipherTextGT& cm, const CipherTextA& ca) const - { - convert(cm, ca.c1_); - } - void convert(CipherText& cm, const CipherText& ca) const - { - if (ca.isMultiplied()) throw cybozu::Exception("she:PublicKey:convertCipherText:already isMultiplied"); - cm.isMultiplied_ = true; - convert(cm.m_, ca.a_); - } - }; -public: - class PublicKey : public fp::Serializable > { - G1 xP_; - G2 yQ_; - friend class SecretKey; - friend class PrecomputedPublicKey; - template - friend struct PublicKeyMethod; - template - struct MulG { - const G& base; - MulG(const G& base) : base(base) {} - template - void mul(G& out, const INT& m) const - { - G::mul(out, base, m); - } - }; - void set(const Fr& x, const Fr& y) - { - G1::mul(xP_, P_, x); - if (!isG1only_) G2::mul(yQ_, Q_, y); - } - template - void encG1(CipherTextG1& c, const INT& m) const - { - const MulG xPmul(xP_); - ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul); - } - template - void encG2(CipherTextG2& c, const INT& m) const - { - const MulG yQmul(yQ_); - ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul); - } -public: - void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const - { - Fr encRand; - encRand.setRand(); - const MulG xPmul(xP_); - ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul, &encRand); - makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPmul); - } - void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const - { - Fr encRand; - encRand.setRand(); - const MulG yQmul(yQ_); - ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul, &encRand); - makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQmul); - } - bool verify(const CipherTextG1& c, const ZkpBin& zkp) const - { - const MulG xPmul(xP_); - return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPmul); - } - bool verify(const CipherTextG2& c, const ZkpBin& zkp) const - { - const MulG yQmul(yQ_); - return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQmul); - } - template - void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const - { - const MulG xPmul(xP_); - const MulG yQmul(yQ_); - makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); - } - bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const - { - const MulG xPmul(xP_); - const MulG yQmul(yQ_); - return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); - } - void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const - { - const MulG xPmul(xP_); - const MulG yQmul(yQ_); - makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); - } - bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const - { - const MulG xPmul(xP_); - const MulG yQmul(yQ_); - return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); - } - template - void encGT(CipherTextGT& c, const INT& m) const - { - /* - (s, t, u, v) = ((e^x)^a (e^y)^b (e^-xy)^c e^m, e^b, e^a, e^c) - s = e(a xP + m P, Q)e(b P - c xP, yQ) - */ - Fr ra, rb, rc; - ra.setRand(); - rb.setRand(); - rc.setRand(); - GT e; - - G1 P1, P2; - G1::mul(P1, xP_, ra); - if (m) { -// G1::mul(P2, P, m); - PhashTbl_.mulByWindowMethod(P2, m); - P1 += P2; - } -// millerLoop(c.g[0], P1, Q); - precomputedMillerLoop(c.g_[0], P1, Qcoeff_); -// G1::mul(P1, P, rb); - PhashTbl_.mulByWindowMethod(P1, rb); - G1::mul(P2, xP_, rc); - P1 -= P2; - millerLoop(e, P1, yQ_); - c.g_[0] *= e; - finalExp(c.g_[0], c.g_[0]); -#if 1 - ePQhashTbl_.mulByWindowMethod(c.g_[1], rb); - ePQhashTbl_.mulByWindowMethod(c.g_[2], ra); - ePQhashTbl_.mulByWindowMethod(c.g_[3], rc); -#else - GT::pow(c.g_[1], ePQ_, rb); - GT::pow(c.g_[2], ePQ_, ra); - GT::pow(c.g_[3], ePQ_, rc); -#endif - } - public: - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - xP_.load(pb, is, ioMode); if (!*pb) return; - if (!isG1only_) yQ_.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - xP_.save(pb, os, ioMode); if (!*pb) return; - if (isG1only_) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - yQ_.save(pb, os, ioMode); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:PublicKey:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:PublicKey:save"); - } - friend std::istream& operator>>(std::istream& is, PublicKey& self) - { - self.load(is, fp::detectIoMode(G1::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) - { - self.save(os, fp::detectIoMode(G1::getIoMode(), os)); - return os; - } - bool operator==(const PublicKey& rhs) const - { - return xP_ == rhs.xP_ && (isG1only_ || yQ_ == rhs.yQ_); - } - bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } - }; - - class PrecomputedPublicKey : public fp::Serializable > { - typedef local::InterfaceForHashTable GTasEC; - typedef mcl::fp::WindowMethod GTwin; - template - friend struct PublicKeyMethod; - GT exPQ_; - GT eyPQ_; - GT exyPQ_; - GTwin exPQwm_; - GTwin eyPQwm_; - GTwin exyPQwm_; - mcl::fp::WindowMethod xPwm_; - mcl::fp::WindowMethod yQwm_; - template - void mulByWindowMethod(GT& x, const GTwin& wm, const T& y) const - { - wm.mul(static_cast(x), y); - } - template - void encG1(CipherTextG1& c, const INT& m) const - { - ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_); - } - template - void encG2(CipherTextG2& c, const INT& m) const - { - ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_); - } - template - void encGT(CipherTextGT& c, const INT& m) const - { - /* - (s, t, u, v) = (e^m e^(xya), (e^x)^b, (e^y)^c, e^(b + c - a)) - */ - Fr ra, rb, rc; - ra.setRand(); - rb.setRand(); - rc.setRand(); - GT t; - ePQhashTbl_.mulByWindowMethod(c.g_[0], m); // e^m - mulByWindowMethod(t, exyPQwm_, ra); // (e^xy)^a - c.g_[0] *= t; - mulByWindowMethod(c.g_[1], exPQwm_, rb); // (e^x)^b - mulByWindowMethod(c.g_[2], eyPQwm_, rc); // (e^y)^c - rb += rc; - rb -= ra; - ePQhashTbl_.mulByWindowMethod(c.g_[3], rb); - } - public: - void init(const PublicKey& pub) - { - const size_t bitSize = Fr::getBitSize(); - xPwm_.init(pub.xP_, bitSize, local::winSize); - if (isG1only_) return; - yQwm_.init(pub.yQ_, bitSize, local::winSize); - pairing(exPQ_, pub.xP_, Q_); - pairing(eyPQ_, P_, pub.yQ_); - pairing(exyPQ_, pub.xP_, pub.yQ_); - exPQwm_.init(static_cast(exPQ_), bitSize, local::winSize); - eyPQwm_.init(static_cast(eyPQ_), bitSize, local::winSize); - exyPQwm_.init(static_cast(exyPQ_), bitSize, local::winSize); - } - void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const - { - Fr encRand; - encRand.setRand(); - ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_, &encRand); - makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPwm_); - } - void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const - { - Fr encRand; - encRand.setRand(); - ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_, &encRand); - makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQwm_); - } - bool verify(const CipherTextG1& c, const ZkpBin& zkp) const - { - return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPwm_); - } - bool verify(const CipherTextG2& c, const ZkpBin& zkp) const - { - return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQwm_); - } - template - void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const - { - makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); - } - bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const - { - return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); - } - void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const - { - makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); - } - bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const - { - return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); - } - }; - class CipherTextA { - CipherTextG1 c1_; - CipherTextG2 c2_; - friend class SecretKey; - friend class PublicKey; - friend class CipherTextGT; - template - friend struct PublicKeyMethod; - public: - void clear() - { - c1_.clear(); - c2_.clear(); - } - static void add(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) - { - CipherTextG1::add(z.c1_, x.c1_, y.c1_); - CipherTextG2::add(z.c2_, x.c2_, y.c2_); - } - static void sub(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) - { - CipherTextG1::sub(z.c1_, x.c1_, y.c1_); - CipherTextG2::sub(z.c2_, x.c2_, y.c2_); - } - static void mul(CipherTextA& z, const CipherTextA& x, int64_t y) - { - CipherTextG1::mul(z.c1_, x.c1_, y); - CipherTextG2::mul(z.c2_, x.c2_, y); - } - static void neg(CipherTextA& y, const CipherTextA& x) - { - CipherTextG1::neg(y.c1_, x.c1_); - CipherTextG2::neg(y.c2_, x.c2_); - } - void add(const CipherTextA& c) { add(*this, *this, c); } - void sub(const CipherTextA& c) { sub(*this, *this, c); } - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - c1_.load(pb, is, ioMode); if (!*pb) return; - c2_.load(pb, is, ioMode); - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - c1_.save(pb, os, ioMode); if (!*pb) return; - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - c2_.save(pb, os, ioMode); - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextA:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextA:save"); - } - friend std::istream& operator>>(std::istream& is, CipherTextA& self) - { - self.load(is, fp::detectIoMode(G1::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const CipherTextA& self) - { - self.save(os, fp::detectIoMode(G1::getIoMode(), os)); - return os; - } - bool operator==(const CipherTextA& rhs) const - { - return c1_ == rhs.c1_ && c2_ == rhs.c2_; - } - bool operator!=(const CipherTextA& rhs) const { return !operator==(rhs); } - }; - - class CipherTextGT : public fp::Serializable { - GT g_[4]; - friend class SecretKey; - friend class PublicKey; - friend class PrecomputedPublicKey; - friend class CipherTextA; - template - friend struct PublicKeyMethod; - public: - void clear() - { - for (int i = 0; i < 4; i++) { - g_[i].setOne(); - } - } - static void neg(CipherTextGT& y, const CipherTextGT& x) - { - for (int i = 0; i < 4; i++) { - GT::unitaryInv(y.g_[i], x.g_[i]); - } - } - static void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) - { - /* - (g[i]) + (g'[i]) = (g[i] * g'[i]) - */ - for (int i = 0; i < 4; i++) { - GT::mul(z.g_[i], x.g_[i], y.g_[i]); - } - } - static void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) - { - /* - (g[i]) - (g'[i]) = (g[i] / g'[i]) - */ - GT t; - for (size_t i = 0; i < 4; i++) { - GT::unitaryInv(t, y.g_[i]); - GT::mul(z.g_[i], x.g_[i], t); - } - } - static void mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) - { - /* - (S1, T1) * (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) - */ - tensorProductML(z.g_, x.S_, x.T_, y.S_, y.T_); - } - static void finalExp(CipherTextGT& y, const CipherTextGT& x) - { - finalExp4(y.g_, x.g_); - } - /* - mul(x, y) = mulML(x, y) + finalExp - mul(c11, c12) + mul(c21, c22) - = finalExp(mulML(c11, c12) + mulML(c21, c22)), - then one finalExp can be reduced - */ - static void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) - { - /* - (S1, T1) * (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) - */ - mulML(z, x, y); - finalExp(z, z); - } - static void mul(CipherTextGT& z, const CipherTextA& x, const CipherTextA& y) - { - mul(z, x.c1_, y.c2_); - } - static void mul(CipherTextGT& z, const CipherTextGT& x, int64_t y) - { - for (int i = 0; i < 4; i++) { - GT::pow(z.g_[i], x.g_[i], y); - } - } - void add(const CipherTextGT& c) { add(*this, *this, c); } - void sub(const CipherTextGT& c) { sub(*this, *this, c); } - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - for (int i = 0; i < 4; i++) { - g_[i].load(pb, is, ioMode); if (!*pb) return; - } - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - const char sep = *fp::getIoSeparator(ioMode); - g_[0].save(pb, os, ioMode); if (!*pb) return; - for (int i = 1; i < 4; i++) { - if (sep) { - cybozu::writeChar(pb, os, sep); - if (!*pb) return; - } - g_[i].save(pb, os, ioMode); if (!*pb) return; - } - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextGT:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:CipherTextGT:save"); - } - friend std::istream& operator>>(std::istream& is, CipherTextGT& self) - { - self.load(is, fp::detectIoMode(G1::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const CipherTextGT& self) - { - self.save(os, fp::detectIoMode(G1::getIoMode(), os)); - return os; - } - bool operator==(const CipherTextGT& rhs) const - { - for (int i = 0; i < 4; i++) { - if (g_[i] != rhs.g_[i]) return false; - } - return true; - } - bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } - }; - - class CipherText : public fp::Serializable { - bool isMultiplied_; - CipherTextA a_; - CipherTextGT m_; - friend class SecretKey; - friend class PublicKey; - template - friend struct PublicKeyMethod; - public: - CipherText() : isMultiplied_(false) {} - void clearAsAdded() - { - isMultiplied_ = false; - a_.clear(); - } - void clearAsMultiplied() - { - isMultiplied_ = true; - m_.clear(); - } - bool isMultiplied() const { return isMultiplied_; } - static void add(CipherText& z, const CipherText& x, const CipherText& y) - { - if (x.isMultiplied() && y.isMultiplied()) { - z.isMultiplied_ = true; - CipherTextGT::add(z.m_, x.m_, y.m_); - return; - } - if (!x.isMultiplied() && !y.isMultiplied()) { - z.isMultiplied_ = false; - CipherTextA::add(z.a_, x.a_, y.a_); - return; - } - throw cybozu::Exception("she:CipherText:add:mixed CipherText"); - } - static void sub(CipherText& z, const CipherText& x, const CipherText& y) - { - if (x.isMultiplied() && y.isMultiplied()) { - z.isMultiplied_ = true; - CipherTextGT::sub(z.m_, x.m_, y.m_); - return; - } - if (!x.isMultiplied() && !y.isMultiplied()) { - z.isMultiplied_ = false; - CipherTextA::sub(z.a_, x.a_, y.a_); - return; - } - throw cybozu::Exception("she:CipherText:sub:mixed CipherText"); - } - static void neg(CipherText& y, const CipherText& x) - { - if (x.isMultiplied()) { - y.isMultiplied_ = true; - CipherTextGT::neg(y.m_, x.m_); - return; - } else { - y.isMultiplied_ = false; - CipherTextA::neg(y.a_, x.a_); - return; - } - } - static void mul(CipherText& z, const CipherText& x, const CipherText& y) - { - if (x.isMultiplied() || y.isMultiplied()) { - throw cybozu::Exception("she:CipherText:mul:mixed CipherText"); - } - z.isMultiplied_ = true; - CipherTextGT::mul(z.m_, x.a_, y.a_); - } - static void mul(CipherText& z, const CipherText& x, int64_t y) - { - if (x.isMultiplied()) { - CipherTextGT::mul(z.m_, x.m_, y); - } else { - CipherTextA::mul(z.a_, x.a_, y); - } - } - void add(const CipherText& c) { add(*this, *this, c); } - void sub(const CipherText& c) { sub(*this, *this, c); } - void mul(const CipherText& c) { mul(*this, *this, c); } - template - void load(bool *pb, InputStream& is, int ioMode = IoSerialize) - { - cybozu::writeChar(pb, isMultiplied_ ? '0' : '1', is); if (!*pb) return; - if (isMultiplied()) { - m_.load(pb, is, ioMode); - } else { - a_.load(pb, is, ioMode); - } - } - template - void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const - { - char c; - if (!cybozu::readChar(&c, os)) return; - if (c == '0' || c == '1') { - isMultiplied_ = c == '0'; - } else { - *pb = false; - return; - } - if (isMultiplied()) { - m_.save(pb, os, ioMode); - } else { - a_.save(pb, os, ioMode); - } - } - template - void load(InputStream& is, int ioMode = IoSerialize) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("she:CipherText:load"); - } - template - void save(OutputStream& os, int ioMode = IoSerialize) const - { - bool b; - save(&b, os, ioMode); - if (!b) throw cybozu::Exception("she:CipherText:save"); - } - friend std::istream& operator>>(std::istream& is, CipherText& self) - { - self.load(is, fp::detectIoMode(G1::getIoMode(), is)); - return is; - } - friend std::ostream& operator<<(std::ostream& os, const CipherText& self) - { - self.save(os, fp::detectIoMode(G1::getIoMode(), os)); - return os; - } - bool operator==(const CipherTextGT& rhs) const - { - if (isMultiplied() != rhs.isMultiplied()) return false; - if (isMultiplied()) { - return m_ == rhs.m_; - } - return a_ == rhs.a_; - } - bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } - }; -}; -typedef local::HashTable HashTableG1; -typedef local::HashTable HashTableG2; -typedef local::HashTable HashTableGT; - -template G1 SHET::P_; -template G2 SHET::Q_; -template Fp12 SHET::ePQ_; -template std::vector SHET::Qcoeff_; -template HashTableG1 SHET::PhashTbl_; -template HashTableG2 SHET::QhashTbl_; -template HashTableGT SHET::ePQhashTbl_; -template bool SHET::useDecG1ViaGT_; -template bool SHET::useDecG2ViaGT_; -template bool SHET::isG1only_; -typedef mcl::she::SHET<> SHE; -typedef SHE::SecretKey SecretKey; -typedef SHE::PublicKey PublicKey; -typedef SHE::PrecomputedPublicKey PrecomputedPublicKey; -typedef SHE::CipherTextG1 CipherTextG1; -typedef SHE::CipherTextG2 CipherTextG2; -typedef SHE::CipherTextGT CipherTextGT; -typedef SHE::CipherTextA CipherTextA; -typedef CipherTextGT CipherTextGM; // old class -typedef SHE::CipherText CipherText; -typedef SHE::ZkpBin ZkpBin; -typedef SHE::ZkpEq ZkpEq; -typedef SHE::ZkpBinEq ZkpBinEq; - -inline void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) -{ - SHE::init(cp, hashSize, tryNum); -} -inline void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) -{ - SHE::initG1only(para, hashSize, tryNum); -} -inline void init(size_t hashSize, size_t tryNum = local::defaultTryNum) { SHE::init(hashSize, tryNum); } -inline void setRangeForG1DLP(size_t hashSize) { SHE::setRangeForG1DLP(hashSize); } -inline void setRangeForG2DLP(size_t hashSize) { SHE::setRangeForG2DLP(hashSize); } -inline void setRangeForGTDLP(size_t hashSize) { SHE::setRangeForGTDLP(hashSize); } -inline void setRangeForDLP(size_t hashSize) { SHE::setRangeForDLP(hashSize); } -inline void setTryNum(size_t tryNum) { SHE::setTryNum(tryNum); } -inline void useDecG1ViaGT(bool use = true) { SHE::useDecG1ViaGT(use); } -inline void useDecG2ViaGT(bool use = true) { SHE::useDecG2ViaGT(use); } -inline HashTableG1& getHashTableG1() { return SHE::PhashTbl_; } -inline HashTableG2& getHashTableG2() { return SHE::QhashTbl_; } -inline HashTableGT& getHashTableGT() { return SHE::ePQhashTbl_; } - -inline void add(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::add(z, x, y); } -inline void add(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::add(z, x, y); } -inline void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::add(z, x, y); } -inline void add(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::add(z, x, y); } - -inline void sub(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::sub(z, x, y); } -inline void sub(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::sub(z, x, y); } -inline void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::sub(z, x, y); } -inline void sub(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::sub(z, x, y); } - -inline void neg(CipherTextG1& y, const CipherTextG1& x) { CipherTextG1::neg(y, x); } -inline void neg(CipherTextG2& y, const CipherTextG2& x) { CipherTextG2::neg(y, x); } -inline void neg(CipherTextGT& y, const CipherTextGT& x) { CipherTextGT::neg(y, x); } -inline void neg(CipherText& y, const CipherText& x) { CipherText::neg(y, x); } - -template -inline void mul(CipherTextG1& z, const CipherTextG1& x, const INT& y) { CipherTextG1::mul(z, x, y); } -template -inline void mul(CipherTextG2& z, const CipherTextG2& x, const INT& y) { CipherTextG2::mul(z, x, y); } -template -inline void mul(CipherTextGT& z, const CipherTextGT& x, const INT& y) { CipherTextGT::mul(z, x, y); } -template -inline void mul(CipherText& z, const CipherText& x, const INT& y) { CipherText::mul(z, x, y); } - -inline void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) { CipherTextGT::mul(z, x, y); } -inline void mul(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::mul(z, x, y); } - -} } // mcl::she - diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp deleted file mode 100644 index edef971cb..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp +++ /dev/null @@ -1,285 +0,0 @@ -#pragma once -/** - @file - @brief functions for T[] - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause -*/ -#include - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4456) - #pragma warning(disable : 4459) -#endif - -namespace mcl { namespace fp { - -template -T abs_(T x) { return x < 0 ? -x : x; } - -template -T min_(T x, T y) { return x < y ? x : y; } - -template -T max_(T x, T y) { return x < y ? y : x; } - -template -void swap_(T& x, T& y) -{ - T t; - t = x; - x = y; - y = t; -} - - -/* - get pp such that p * pp = -1 mod M, - where p is prime and M = 1 << 64(or 32). - @param pLow [in] p mod M -*/ -template -T getMontgomeryCoeff(T pLow) -{ - T ret = 0; - T t = 0; - T x = 1; - for (size_t i = 0; i < sizeof(T) * 8; i++) { - if ((t & 1) == 0) { - t += pLow; - ret += x; - } - t >>= 1; - x <<= 1; - } - return ret; -} - -template -int compareArray(const T* x, const T* y, size_t n) -{ - for (size_t i = n - 1; i != size_t(-1); i--) { - T a = x[i]; - T b = y[i]; - if (a != b) return a < b ? -1 : 1; - } - return 0; -} - -template -bool isLessArray(const T *x, const T* y, size_t n) -{ - for (size_t i = n - 1; i != size_t(-1); i--) { - T a = x[i]; - T b = y[i]; - if (a != b) return a < b; - } - return false; -} - -template -bool isGreaterOrEqualArray(const T *x, const T* y, size_t n) -{ - return !isLessArray(x, y, n); -} - -template -bool isLessOrEqualArray(const T *x, const T* y, size_t n) -{ - for (size_t i = n - 1; i != size_t(-1); i--) { - T a = x[i]; - T b = y[i]; - if (a != b) return a < b; - } - return true; -} - -template -bool isGreaterArray(const T *x, const T* y, size_t n) -{ - return !isLessOrEqualArray(x, y, n); -} - -template -bool isEqualArray(const T* x, const T* y, size_t n) -{ - for (size_t i = 0; i < n; i++) { - if (x[i] != y[i]) return false; - } - return true; -} - -template -bool isZeroArray(const T *x, size_t n) -{ - for (size_t i = 0; i < n; i++) { - if (x[i]) return false; - } - return true; -} - -template -void clearArray(T *x, size_t begin, size_t end) -{ - for (size_t i = begin; i < end; i++) x[i] = 0; -} - -template -void copyArray(T *y, const T *x, size_t n) -{ - for (size_t i = 0; i < n; i++) y[i] = x[i]; -} - -/* - x &= (1 << bitSize) - 1 -*/ -template -void maskArray(T *x, size_t n, size_t bitSize) -{ - const size_t TbitSize = sizeof(T) * 8; - assert(bitSize <= TbitSize * n); - const size_t q = bitSize / TbitSize; - const size_t r = bitSize % TbitSize; - if (r) { - x[q] &= (T(1) << r) - 1; - clearArray(x, q + 1, n); - } else { - clearArray(x, q, n); - } -} - -/* - return non zero size of x[] - return 1 if x[] == 0 -*/ -template -size_t getNonZeroArraySize(const T *x, size_t n) -{ - assert(n > 0); - while (n > 0) { - if (x[n - 1]) return n; - n--; - } - return 1; -} - -/* - @param out [inout] : set element of G ; out = x^y[] - @param x [in] - @param y [in] - @param n [in] size of y[] - @param limitBit [in] const time version if the value is positive - @note &out != x and out = the unit element of G -*/ -template -void powGeneric(G& out, const G& x, const T *y, size_t n, const Mul& mul, const Sqr& sqr, void normalize(G&, const G&), size_t limitBit = 0) -{ - assert(&out != &x); - G tbl[4]; // tbl = { discard, x, x^2, x^3 } - T v; - bool constTime = limitBit > 0; - int maxBit = 0; - int m = 0; - while (n > 0) { - if (y[n - 1]) break; - n--; - } - if (n == 0) { - if (constTime) goto DummyLoop; - return; - } - if (!constTime && n == 1) { - switch (y[0]) { - case 1: - out = x; - return; - case 2: - sqr(out, x); - return; - case 3: - sqr(out, x); - mul(out, out, x); - return; - case 4: - sqr(out, x); - sqr(out, out); - return; - } - } - if (normalize != 0) { - normalize(tbl[0], x); - } else { - tbl[0] = x; - } - tbl[1] = tbl[0]; - sqr(tbl[2], tbl[1]); - if (normalize != 0) { normalize(tbl[2], tbl[2]); } - mul(tbl[3], tbl[2], x); - if (normalize != 0) { normalize(tbl[3], tbl[3]); } - v = y[n - 1]; - assert(v); - m = cybozu::bsr(v); - maxBit = int(m + (n - 1) * sizeof(T) * 8); - if (m & 1) { - m--; - T idx = (v >> m) & 3; - assert(idx > 0); - out = tbl[idx]; - } else { - out = x; - } - for (int i = (int)n - 1; i >= 0; i--) { - T v = y[i]; - for (int j = m - 2; j >= 0; j -= 2) { - sqr(out, out); - sqr(out, out); - T idx = (v >> j) & 3; - if (idx == 0) { - if (constTime) mul(tbl[0], tbl[0], tbl[1]); - } else { - mul(out, out, tbl[idx]); - } - } - m = (int)sizeof(T) * 8; - } -DummyLoop: - if (!constTime) return; - G D = out; - for (size_t i = maxBit + 1; i < limitBit; i += 2) { - sqr(D, D); - sqr(D, D); - mul(D, D, tbl[1]); - } -} - -/* - shortcut of multiplication by Unit -*/ -template -bool mulSmallUnit(T& z, const T& x, U y) -{ - switch (y) { - case 0: z.clear(); break; - case 1: z = x; break; - case 2: T::add(z, x, x); break; - case 3: { T t; T::add(t, x, x); T::add(z, t, x); break; } - case 4: T::add(z, x, x); T::add(z, z, z); break; - case 5: { T t; T::add(t, x, x); T::add(t, t, t); T::add(z, t, x); break; } - case 6: { T t; T::add(t, x, x); T::add(t, t, x); T::add(z, t, t); break; } - case 7: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::sub(z, t, x); break; } - case 8: T::add(z, x, x); T::add(z, z, z); T::add(z, z, z); break; - case 9: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::add(z, t, x); break; } - case 10: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, x); T::add(z, t, t); break; } - default: - return false; - } - return true; -} - -} } // mcl::fp - -#ifdef _MSC_VER - #pragma warning(pop) -#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp deleted file mode 100644 index b087688c3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp +++ /dev/null @@ -1,1987 +0,0 @@ -#pragma once -/** - emulate mpz_class -*/ -#include -#include -#include -#include -#ifndef CYBOZU_DONT_USE_STRING -#include -#endif -#include -#include -#include -#include - -#if defined(__EMSCRIPTEN__) || defined(__wasm__) - #define MCL_VINT_64BIT_PORTABLE - #define MCL_VINT_FIXED_BUFFER -#endif -#ifndef MCL_MAX_BIT_SIZE - #define MCL_MAX_BIT_SIZE 384 -#endif - -#ifndef MCL_SIZEOF_UNIT - #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) - #define MCL_SIZEOF_UNIT 4 - #else - #define MCL_SIZEOF_UNIT 8 - #endif -#endif - -namespace mcl { - -namespace vint { - -#if MCL_SIZEOF_UNIT == 8 -typedef uint64_t Unit; -#else -typedef uint32_t Unit; -#endif - -template -void dump(const T *x, size_t n, const char *msg = "") -{ - const size_t is4byteUnit = sizeof(*x) == 4; - if (msg) printf("%s ", msg); - for (size_t i = 0; i < n; i++) { - if (is4byteUnit) { - printf("%08x", (uint32_t)x[n - 1 - i]); - } else { - printf("%016llx", (unsigned long long)x[n - 1 - i]); - } - } - printf("\n"); -} - -inline uint64_t make64(uint32_t H, uint32_t L) -{ - return ((uint64_t)H << 32) | L; -} - -inline void split64(uint32_t *H, uint32_t *L, uint64_t x) -{ - *H = uint32_t(x >> 32); - *L = uint32_t(x); -} - -/* - [H:L] <= x * y - @return L -*/ -inline uint32_t mulUnit(uint32_t *pH, uint32_t x, uint32_t y) -{ - uint64_t t = uint64_t(x) * y; - uint32_t L; - split64(pH, &L, t); - return L; -} -#if MCL_SIZEOF_UNIT == 8 -inline uint64_t mulUnit(uint64_t *pH, uint64_t x, uint64_t y) -{ -#ifdef MCL_VINT_64BIT_PORTABLE - uint32_t a = uint32_t(x >> 32); - uint32_t b = uint32_t(x); - uint32_t c = uint32_t(y >> 32); - uint32_t d = uint32_t(y); - - uint64_t ad = uint64_t(d) * a; - uint64_t bd = uint64_t(d) * b; - uint64_t L = uint32_t(bd); - ad += bd >> 32; // [ad:L] - - uint64_t ac = uint64_t(c) * a; - uint64_t bc = uint64_t(c) * b; - uint64_t H = uint32_t(bc); - ac += bc >> 32; // [ac:H] - /* - adL - acH - */ - uint64_t t = (ac << 32) | H; - ac >>= 32; - H = t + ad; - if (H < t) { - ac++; - } - /* - ac:H:L - */ - L |= H << 32; - H = (ac << 32) | uint32_t(H >> 32); - *pH = H; - return L; -#elif defined(_WIN64) && !defined(__INTEL_COMPILER) - return _umul128(x, y, pH); -#else - typedef __attribute__((mode(TI))) unsigned int uint128; - uint128 t = uint128(x) * y; - *pH = uint64_t(t >> 64); - return uint64_t(t); -#endif -} -#endif - -template -void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn); - -/* - q = [H:L] / y - r = [H:L] % y - return q -*/ -inline uint32_t divUnit(uint32_t *pr, uint32_t H, uint32_t L, uint32_t y) -{ - uint64_t t = make64(H, L); - uint32_t q = uint32_t(t / y); - *pr = uint32_t(t % y); - return q; -} -#if MCL_SIZEOF_UNIT == 8 -inline uint64_t divUnit(uint64_t *pr, uint64_t H, uint64_t L, uint64_t y) -{ -#if defined(MCL_VINT_64BIT_PORTABLE) - uint32_t px[4] = { uint32_t(L), uint32_t(L >> 32), uint32_t(H), uint32_t(H >> 32) }; - uint32_t py[2] = { uint32_t(y), uint32_t(y >> 32) }; - size_t xn = 4; - size_t yn = 2; - uint32_t q[4]; - uint32_t r[2]; - size_t qn = xn - yn + 1; - divNM(q, qn, r, px, xn, py, yn); - *pr = make64(r[1], r[0]); - return make64(q[1], q[0]); -#elif defined(_MSC_VER) - #error "divUnit for uint64_t is not supported" -#else - typedef __attribute__((mode(TI))) unsigned int uint128; - uint128 t = (uint128(H) << 64) | L; - uint64_t q = uint64_t(t / y); - *pr = uint64_t(t % y); - return q; -#endif -} -#endif - -/* - compare x[] and y[] - @retval positive if x > y - @retval 0 if x == y - @retval negative if x < y -*/ -template -int compareNM(const T *x, size_t xn, const T *y, size_t yn) -{ - assert(xn > 0 && yn > 0); - if (xn != yn) return xn > yn ? 1 : -1; - for (int i = (int)xn - 1; i >= 0; i--) { - if (x[i] != y[i]) return x[i] > y[i] ? 1 : -1; - } - return 0; -} - -template -void clearN(T *x, size_t n) -{ - for (size_t i = 0; i < n; i++) x[i] = 0; -} - -template -void copyN(T *y, const T *x, size_t n) -{ - for (size_t i = 0; i < n; i++) y[i] = x[i]; -} - -/* - z[] = x[n] + y[n] - @note return 1 if having carry - z may be equal to x or y -*/ -template -T addN(T *z, const T *x, const T *y, size_t n) -{ - T c = 0; - for (size_t i = 0; i < n; i++) { - T xc = x[i] + c; - if (xc < c) { - // x[i] = Unit(-1) and c = 1 - z[i] = y[i]; - } else { - xc += y[i]; - c = y[i] > xc ? 1 : 0; - z[i] = xc; - } - } - return c; -} - -/* - z[] = x[] + y -*/ -template -T addu1(T *z, const T *x, size_t n, T y) -{ - assert(n > 0); - T t = x[0] + y; - z[0] = t; - size_t i = 0; - if (t >= y) goto EXIT_0; - i = 1; - for (; i < n; i++) { - t = x[i] + 1; - z[i] = t; - if (t != 0) goto EXIT_0; - } - return 1; -EXIT_0: - i++; - for (; i < n; i++) { - z[i] = x[i]; - } - return 0; -} - -/* - x[] += y -*/ -template -T addu1(T *x, size_t n, T y) -{ - assert(n > 0); - T t = x[0] + y; - x[0] = t; - size_t i = 0; - if (t >= y) return 0; - i = 1; - for (; i < n; i++) { - t = x[i] + 1; - x[i] = t; - if (t != 0) return 0; - } - return 1; -} -/* - z[zn] = x[xn] + y[yn] - @note zn = max(xn, yn) -*/ -template -T addNM(T *z, const T *x, size_t xn, const T *y, size_t yn) -{ - if (yn > xn) { - fp::swap_(xn, yn); - fp::swap_(x, y); - } - assert(xn >= yn); - size_t max = xn; - size_t min = yn; - T c = vint::addN(z, x, y, min); - if (max > min) { - c = vint::addu1(z + min, x + min, max - min, c); - } - return c; -} - -/* - z[] = x[n] - y[n] - z may be equal to x or y -*/ -template -T subN(T *z, const T *x, const T *y, size_t n) -{ - assert(n > 0); - T c = 0; - for (size_t i = 0; i < n; i++) { - T yc = y[i] + c; - if (yc < c) { - // y[i] = T(-1) and c = 1 - z[i] = x[i]; - } else { - c = x[i] < yc ? 1 : 0; - z[i] = x[i] - yc; - } - } - return c; -} - -/* - out[] = x[n] - y -*/ -template -T subu1(T *z, const T *x, size_t n, T y) -{ - assert(n > 0); -#if 0 - T t = x[0]; - z[0] = t - y; - size_t i = 0; - if (t >= y) goto EXIT_0; - i = 1; - for (; i < n; i++ ){ - t = x[i]; - z[i] = t - 1; - if (t != 0) goto EXIT_0; - } - return 1; -EXIT_0: - i++; - for (; i < n; i++) { - z[i] = x[i]; - } - return 0; -#else - T c = x[0] < y ? 1 : 0; - z[0] = x[0] - y; - for (size_t i = 1; i < n; i++) { - if (x[i] < c) { - z[i] = T(-1); - } else { - z[i] = x[i] - c; - c = 0; - } - } - return c; -#endif -} - -/* - z[xn] = x[xn] - y[yn] - @note xn >= yn -*/ -template -T subNM(T *z, const T *x, size_t xn, const T *y, size_t yn) -{ - assert(xn >= yn); - T c = vint::subN(z, x, y, yn); - if (xn > yn) { - c = vint::subu1(z + yn, x + yn, xn - yn, c); - } - return c; -} - -/* - z[0..n) = x[0..n) * y - return z[n] - @note accept z == x -*/ -template -T mulu1(T *z, const T *x, size_t n, T y) -{ - assert(n > 0); - T H = 0; - for (size_t i = 0; i < n; i++) { - T t = H; - T L = mulUnit(&H, x[i], y); - z[i] = t + L; - if (z[i] < t) { - H++; - } - } - return H; // z[n] -} - -/* - z[xn * yn] = x[xn] * y[ym] -*/ -template -static inline void mulNM(T *z, const T *x, size_t xn, const T *y, size_t yn) -{ - assert(xn > 0 && yn > 0); - if (yn > xn) { - fp::swap_(yn, xn); - fp::swap_(x, y); - } - assert(xn >= yn); - if (z == x) { - T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * xn); - copyN(p, x, xn); - x = p; - } - if (z == y) { - T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); - copyN(p, y, yn); - y = p; - } - z[xn] = vint::mulu1(&z[0], x, xn, y[0]); - clearN(z + xn + 1, yn - 1); - - T *t2 = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); - for (size_t i = 1; i < yn; i++) { - t2[xn] = vint::mulu1(&t2[0], x, xn, y[i]); - vint::addN(&z[i], &z[i], &t2[0], xn + 1); - } -} -/* - out[xn * 2] = x[xn] * x[xn] - QQQ : optimize this -*/ -template -static inline void sqrN(T *y, const T *x, size_t xn) -{ - mulNM(y, x, xn, x, xn); -} - -/* - q[] = x[] / y - @retval r = x[] % y - accept q == x -*/ -template -T divu1(T *q, const T *x, size_t n, T y) -{ - T r = 0; - for (int i = (int)n - 1; i >= 0; i--) { - q[i] = divUnit(&r, r, x[i], y); - } - return r; -} -/* - q[] = x[] / y - @retval r = x[] % y -*/ -template -T modu1(const T *x, size_t n, T y) -{ - T r = 0; - for (int i = (int)n - 1; i >= 0; i--) { - divUnit(&r, r, x[i], y); - } - return r; -} - -/* - y[] = x[] << bit - 0 < bit < sizeof(T) * 8 - accept y == x -*/ -template -T shlBit(T *y, const T *x, size_t xn, size_t bit) -{ - assert(0 < bit && bit < sizeof(T) * 8); - assert(xn > 0); - size_t rBit = sizeof(T) * 8 - bit; - T keep = x[xn - 1]; - T prev = keep; - for (size_t i = xn - 1; i > 0; i--) { - T t = x[i - 1]; - y[i] = (prev << bit) | (t >> rBit); - prev = t; - } - y[0] = prev << bit; - return keep >> rBit; -} - -/* - y[yn] = x[xn] << bit - yn = xn + (bit + unitBitBit - 1) / unitBitSize - accept y == x -*/ -template -void shlN(T *y, const T *x, size_t xn, size_t bit) -{ - assert(xn > 0); - const size_t unitBitSize = sizeof(T) * 8; - size_t q = bit / unitBitSize; - size_t r = bit % unitBitSize; - if (r == 0) { - // don't use copyN(y + q, x, xn); if overlaped - for (size_t i = 0; i < xn; i++) { - y[q + xn - 1 - i] = x[xn - 1 - i]; - } - } else { - y[q + xn] = shlBit(y + q, x, xn, r); - } - clearN(y, q); -} - -/* - y[] = x[] >> bit - 0 < bit < sizeof(T) * 8 -*/ -template -void shrBit(T *y, const T *x, size_t xn, size_t bit) -{ - assert(0 < bit && bit < sizeof(T) * 8); - assert(xn > 0); - size_t rBit = sizeof(T) * 8 - bit; - T prev = x[0]; - for (size_t i = 1; i < xn; i++) { - T t = x[i]; - y[i - 1] = (prev >> bit) | (t << rBit); - prev = t; - } - y[xn - 1] = prev >> bit; -} -/* - y[yn] = x[xn] >> bit - yn = xn - bit / unitBit -*/ -template -void shrN(T *y, const T *x, size_t xn, size_t bit) -{ - assert(xn > 0); - const size_t unitBitSize = sizeof(T) * 8; - size_t q = bit / unitBitSize; - size_t r = bit % unitBitSize; - assert(xn >= q); - if (r == 0) { - copyN(y, x + q, xn - q); - } else { - shrBit(y, x + q, xn - q, r); - } -} - -template -size_t getRealSize(const T *x, size_t xn) -{ - int i = (int)xn - 1; - for (; i > 0; i--) { - if (x[i]) { - return i + 1; - } - } - return 1; -} - -template -size_t getBitSize(const T *x, size_t n) -{ - if (n == 1 && x[0] == 0) return 1; - T v = x[n - 1]; - assert(v); - return (n - 1) * sizeof(T) * 8 + 1 + cybozu::bsr(v); -} - -/* - q[qn] = x[xn] / y[yn] ; qn == xn - yn + 1 if xn >= yn if q - r[rn] = x[xn] % y[yn] ; rn = yn before getRealSize - allow q == 0 -*/ -template -void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn) -{ - assert(xn > 0 && yn > 0); - assert(xn < yn || (q == 0 || qn == xn - yn + 1)); - assert(q != r); - const size_t rn = yn; - xn = getRealSize(x, xn); - yn = getRealSize(y, yn); - if (x == y) { - assert(xn == yn); - x_is_y: - clearN(r, rn); - if (q) { - q[0] = 1; - clearN(q + 1, qn - 1); - } - return; - } - if (yn > xn) { - /* - if y > x then q = 0 and r = x - */ - q_is_zero: - copyN(r, x, xn); - clearN(r + xn, rn - xn); - if (q) clearN(q, qn); - return; - } - if (yn == 1) { - T t; - if (q) { - if (qn > xn) { - clearN(q + xn, qn - xn); - } - t = divu1(q, x, xn, y[0]); - } else { - t = modu1(x, xn, y[0]); - } - r[0] = t; - clearN(r + 1, rn - 1); - return; - } - const size_t yTopBit = cybozu::bsr(y[yn - 1]); - assert(yn >= 2); - if (xn == yn) { - const size_t xTopBit = cybozu::bsr(x[xn - 1]); - if (xTopBit < yTopBit) goto q_is_zero; - if (yTopBit == xTopBit) { - int ret = compareNM(x, xn, y, yn); - if (ret == 0) goto x_is_y; - if (ret < 0) goto q_is_zero; - if (r) { - subN(r, x, y, yn); - } - if (q) { - q[0] = 1; - clearN(q + 1, qn - 1); - } - return; - } - assert(xTopBit > yTopBit); - // fast reduction for larger than fullbit-3 size p - if (yTopBit >= sizeof(T) * 8 - 4) { - T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * xn); - T qv = 0; - if (yTopBit == sizeof(T) * 8 - 2) { - copyN(xx, x, xn); - } else { - qv = x[xn - 1] >> (yTopBit + 1); - mulu1(xx, y, yn, qv); - subN(xx, x, xx, xn); - xn = getRealSize(xx, xn); - } - for (;;) { - T ret = subN(xx, xx, y, yn); - if (ret) { - addN(xx, xx, y, yn); - break; - } - qv++; - xn = getRealSize(xx, xn); - } - if (r) { - copyN(r, xx, xn); - clearN(r + xn, rn - xn); - } - if (q) { - q[0] = qv; - clearN(q + 1, qn - 1); - } - return; - } - } - /* - bitwise left shift x and y to adjust MSB of y[yn - 1] = 1 - */ - const size_t shift = sizeof(T) * 8 - 1 - yTopBit; - T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); - const T *yy; - if (shift) { - T v = shlBit(xx, x, xn, shift); - if (v) { - xx[xn] = v; - xn++; - } - T *yBuf = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); - shlBit(yBuf, y, yn ,shift); - yy = yBuf; - } else { - copyN(xx, x, xn); - yy = y; - } - if (q) { - clearN(q, qn); - } - assert((yy[yn - 1] >> (sizeof(T) * 8 - 1)) != 0); - T *tt = (T*)CYBOZU_ALLOCA(sizeof(T) * (yn + 1)); - while (xn > yn) { - size_t d = xn - yn; - T xTop = xx[xn - 1]; - T yTop = yy[yn - 1]; - if (xTop > yTop || (compareNM(xx + d, xn - d, yy, yn) >= 0)) { - vint::subN(xx + d, xx + d, yy, yn); - xn = getRealSize(xx, xn); - if (q) vint::addu1(q + d, qn - d, 1); - continue; - } - if (xTop == 1) { - vint::subNM(xx + d - 1, xx + d - 1, xn - d + 1, yy, yn); - xn = getRealSize(xx, xn); - if (q) vint::addu1(q + d - 1, qn - d + 1, 1); - continue; - } - tt[yn] = vint::mulu1(tt, yy, yn, xTop); - vint::subN(xx + d - 1, xx + d - 1, tt, yn + 1); - xn = getRealSize(xx, xn); - if (q) vint::addu1(q + d - 1, qn - d + 1, xTop); - } - if (xn == yn && compareNM(xx, xn, yy, yn) >= 0) { - subN(xx, xx, yy, yn); - xn = getRealSize(xx, xn); - if (q) vint::addu1(q, qn, 1); - } - if (shift) { - shrBit(r, xx, xn, shift); - } else { - copyN(r, xx, xn); - } - clearN(r + xn, rn - xn); -} - -#ifndef MCL_VINT_FIXED_BUFFER -template -class Buffer { - size_t allocSize_; - T *ptr_; -public: - typedef T Unit; - Buffer() : allocSize_(0), ptr_(0) {} - ~Buffer() - { - clear(); - } - Buffer(const Buffer& rhs) - : allocSize_(rhs.allocSize_) - , ptr_(0) - { - ptr_ = (T*)malloc(allocSize_ * sizeof(T)); - if (ptr_ == 0) throw cybozu::Exception("Buffer:malloc") << rhs.allocSize_; - memcpy(ptr_, rhs.ptr_, allocSize_ * sizeof(T)); - } - Buffer& operator=(const Buffer& rhs) - { - Buffer t(rhs); - swap(t); - return *this; - } - void swap(Buffer& rhs) -#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 - noexcept -#endif - { - fp::swap_(allocSize_, rhs.allocSize_); - fp::swap_(ptr_, rhs.ptr_); - } - void clear() - { - allocSize_ = 0; - free(ptr_); - ptr_ = 0; - } - - /* - @note extended buffer may be not cleared - */ - void alloc(bool *pb, size_t n) - { - if (n > allocSize_) { - T *p = (T*)malloc(n * sizeof(T)); - if (p == 0) { - *pb = false; - return; - } - copyN(p, ptr_, allocSize_); - free(ptr_); - ptr_ = p; - allocSize_ = n; - } - *pb = true; - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void alloc(size_t n) - { - bool b; - alloc(&b, n); - if (!b) throw cybozu::Exception("Buffer:alloc"); - } -#endif - /* - *this = rhs - rhs may be destroyed - */ - const T& operator[](size_t n) const { return ptr_[n]; } - T& operator[](size_t n) { return ptr_[n]; } -}; -#endif - -template -class FixedBuffer { - enum { - N = (BitLen + sizeof(T) * 8 - 1) / (sizeof(T) * 8) - }; - size_t size_; - T v_[N]; -public: - typedef T Unit; - FixedBuffer() - : size_(0) - { - } - FixedBuffer(const FixedBuffer& rhs) - { - operator=(rhs); - } - FixedBuffer& operator=(const FixedBuffer& rhs) - { - size_ = rhs.size_; - for (size_t i = 0; i < size_; i++) { - v_[i] = rhs.v_[i]; - } - return *this; - } - void clear() { size_ = 0; } - void alloc(bool *pb, size_t n) - { - if (n > N) { - *pb = false; - return; - } - size_ = n; - *pb = true; - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void alloc(size_t n) - { - bool b; - alloc(&b, n); - if (!b) throw cybozu::Exception("FixedBuffer:alloc"); - } -#endif - void swap(FixedBuffer& rhs) - { - FixedBuffer *p1 = this; - FixedBuffer *p2 = &rhs; - if (p1->size_ < p2->size_) { - fp::swap_(p1, p2); - } - assert(p1->size_ >= p2->size_); - for (size_t i = 0; i < p2->size_; i++) { - fp::swap_(p1->v_[i], p2->v_[i]); - } - for (size_t i = p2->size_; i < p1->size_; i++) { - p2->v_[i] = p1->v_[i]; - } - fp::swap_(p1->size_, p2->size_); - } - // to avoid warning of gcc - void verify(size_t n) const - { - assert(n <= N); - (void)n; - } - const T& operator[](size_t n) const { verify(n); return v_[n]; } - T& operator[](size_t n) { verify(n); return v_[n]; } -}; - -#if MCL_SIZEOF_UNIT == 8 -/* - M = 1 << 256 - a = M mod p = (1 << 32) + 0x3d1 - [H:L] mod p = H * a + L - - if H = L = M - 1, t = H * a + L = aM + (M - a - 1) - H' = a, L' = M - a - 1 - t' = H' * a + L' = M + (a^2 - a - 1) - H'' = 1, L'' = a^2 - a - 1 - t'' = H'' * a + L'' = a^2 - 1 -*/ -inline void mcl_fpDbl_mod_SECP256K1(Unit *z, const Unit *x, const Unit *p) -{ - const Unit a = (uint64_t(1) << 32) + 0x3d1; - Unit buf[5]; - buf[4] = mulu1(buf, x + 4, 4, a); // H * a - buf[4] += addN(buf, buf, x, 4); // t = H * a + L - Unit x2[2]; - x2[0] = mulUnit(&x2[1], buf[4], a); - Unit x3 = addN(buf, buf, x2, 2); - if (x3) { - x3 = addu1(buf + 2, buf + 2, 2, Unit(1)); // t' = H' * a + L' - if (x3) { - x3 = addu1(buf, buf, 4, a); - assert(x3 == 0); - } - } - if (fp::isGreaterOrEqualArray(buf, p, 4)) { - subN(z, buf, p, 4); - } else { - fp::copyArray(z, buf, 4); - } -} - -inline void mcl_fp_mul_SECP256K1(Unit *z, const Unit *x, const Unit *y, const Unit *p) -{ - Unit xy[8]; - mulNM(xy, x, 4, y, 4); - mcl_fpDbl_mod_SECP256K1(z, xy, p); -} -inline void mcl_fp_sqr_SECP256K1(Unit *y, const Unit *x, const Unit *p) -{ - Unit xx[8]; - sqrN(xx, x, 4); - mcl_fpDbl_mod_SECP256K1(y, xx, p); -} -#endif - -} // vint - -/** - signed integer with variable length -*/ -template -class VintT { -public: - typedef _Buffer Buffer; - typedef typename Buffer::Unit Unit; - static const size_t unitBitSize = sizeof(Unit) * 8; - static const int invalidVar = -2147483647 - 1; // abs(invalidVar) is not defined -private: - Buffer buf_; - size_t size_; - bool isNeg_; - void trim(size_t n) - { - assert(n > 0); - int i = (int)n - 1; - for (; i > 0; i--) { - if (buf_[i]) { - size_ = i + 1; - return; - } - } - size_ = 1; - // zero - if (buf_[0] == 0) { - isNeg_ = false; - } - } - static int ucompare(const Buffer& x, size_t xn, const Buffer& y, size_t yn) - { - return vint::compareNM(&x[0], xn, &y[0], yn); - } - static void uadd(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) - { - size_t zn = fp::max_(xn, yn) + 1; - bool b; - z.buf_.alloc(&b, zn); - assert(b); (void)b; - z.buf_[zn - 1] = vint::addNM(&z.buf_[0], &x[0], xn, &y[0], yn); - z.trim(zn); - } - static void uadd1(VintT& z, const Buffer& x, size_t xn, Unit y) - { - size_t zn = xn + 1; - bool b; - z.buf_.alloc(&b, zn); - assert(b); (void)b; - z.buf_[zn - 1] = vint::addu1(&z.buf_[0], &x[0], xn, y); - z.trim(zn); - } - static void usub1(VintT& z, const Buffer& x, size_t xn, Unit y) - { - size_t zn = xn; - bool b; - z.buf_.alloc(&b, zn); - assert(b); (void)b; - Unit c = vint::subu1(&z.buf_[0], &x[0], xn, y); - (void)c; - assert(!c); - z.trim(zn); - } - static void usub(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) - { - assert(xn >= yn); - bool b; - z.buf_.alloc(&b, xn); - assert(b); (void)b; - Unit c = vint::subN(&z.buf_[0], &x[0], &y[0], yn); - if (xn > yn) { - c = vint::subu1(&z.buf_[yn], &x[yn], xn - yn, c); - } - assert(!c); - z.trim(xn); - } - static void _add(VintT& z, const VintT& x, bool xNeg, const VintT& y, bool yNeg) - { - if ((xNeg ^ yNeg) == 0) { - // same sign - uadd(z, x.buf_, x.size(), y.buf_, y.size()); - z.isNeg_ = xNeg; - return; - } - int r = ucompare(x.buf_, x.size(), y.buf_, y.size()); - if (r >= 0) { - usub(z, x.buf_, x.size(), y.buf_, y.size()); - z.isNeg_ = xNeg; - } else { - usub(z, y.buf_, y.size(), x.buf_, x.size()); - z.isNeg_ = yNeg; - } - } - static void _adds1(VintT& z, const VintT& x, int y, bool yNeg) - { - assert(y >= 0); - if ((x.isNeg_ ^ yNeg) == 0) { - // same sign - uadd1(z, x.buf_, x.size(), y); - z.isNeg_ = yNeg; - return; - } - if (x.size() > 1 || x.buf_[0] >= (Unit)y) { - usub1(z, x.buf_, x.size(), y); - z.isNeg_ = x.isNeg_; - } else { - z = y - x.buf_[0]; - z.isNeg_ = yNeg; - } - } - static void _addu1(VintT& z, const VintT& x, Unit y, bool yNeg) - { - if ((x.isNeg_ ^ yNeg) == 0) { - // same sign - uadd1(z, x.buf_, x.size(), y); - z.isNeg_ = yNeg; - return; - } - if (x.size() > 1 || x.buf_[0] >= y) { - usub1(z, x.buf_, x.size(), y); - z.isNeg_ = x.isNeg_; - } else { - z = y - x.buf_[0]; - z.isNeg_ = yNeg; - } - } - /** - @param q [out] x / y if q != 0 - @param r [out] x % y - */ - static void udiv(VintT* q, VintT& r, const Buffer& x, size_t xn, const Buffer& y, size_t yn) - { - assert(q != &r); - if (xn < yn) { - r.buf_ = x; - r.trim(xn); - if (q) q->clear(); - return; - } - size_t qn = xn - yn + 1; - bool b; - if (q) { - q->buf_.alloc(&b, qn); - assert(b); (void)b; - } - r.buf_.alloc(&b, yn); - assert(b); (void)b; - vint::divNM(q ? &q->buf_[0] : 0, qn, &r.buf_[0], &x[0], xn, &y[0], yn); - if (q) { - q->trim(qn); - } - r.trim(yn); - } - /* - @param x [inout] x <- d - @retval s for x = 2^s d where d is odd - */ - static uint32_t countTrailingZero(VintT& x) - { - uint32_t s = 0; - while (x.isEven()) { - x >>= 1; - s++; - } - return s; - } - struct MulMod { - const VintT *pm; - void operator()(VintT& z, const VintT& x, const VintT& y) const - { - VintT::mul(z, x, y); - z %= *pm; - } - }; - struct SqrMod { - const VintT *pm; - void operator()(VintT& y, const VintT& x) const - { - VintT::sqr(y, x); - y %= *pm; - } - }; -public: - VintT(int x = 0) - : size_(0) - { - *this = x; - } - VintT(Unit x) - : size_(0) - { - *this = x; - } - VintT(const VintT& rhs) - : buf_(rhs.buf_) - , size_(rhs.size_) - , isNeg_(rhs.isNeg_) - { - } - VintT& operator=(int x) - { - assert(x != invalidVar); - isNeg_ = x < 0; - bool b; - buf_.alloc(&b, 1); - assert(b); (void)b; - buf_[0] = fp::abs_(x); - size_ = 1; - return *this; - } - VintT& operator=(Unit x) - { - isNeg_ = false; - bool b; - buf_.alloc(&b, 1); - assert(b); (void)b; - buf_[0] = x; - size_ = 1; - return *this; - } - VintT& operator=(const VintT& rhs) - { - buf_ = rhs.buf_; - size_ = rhs.size_; - isNeg_ = rhs.isNeg_; - return *this; - } -#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 - VintT(VintT&& rhs) - : buf_(rhs.buf_) - , size_(rhs.size_) - , isNeg_(rhs.isNeg_) - { - } - VintT& operator=(VintT&& rhs) - { - buf_ = std::move(rhs.buf_); - size_ = rhs.size_; - isNeg_ = rhs.isNeg_; - return *this; - } -#endif - void swap(VintT& rhs) -#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 - noexcept -#endif - { - fp::swap_(buf_, rhs.buf_); - fp::swap_(size_, rhs.size_); - fp::swap_(isNeg_, rhs.isNeg_); - } - void dump(const char *msg = "") const - { - vint::dump(&buf_[0], size_, msg); - } - /* - set positive value - @note assume little endian system - */ - template - void setArray(bool *pb, const S *x, size_t size) - { - isNeg_ = false; - if (size == 0) { - clear(); - *pb = true; - return; - } - size_t unitSize = (sizeof(S) * size + sizeof(Unit) - 1) / sizeof(Unit); - buf_.alloc(pb, unitSize); - if (!*pb) return; - char *dst = (char *)&buf_[0]; - const char *src = (const char *)x; - size_t i = 0; - for (; i < sizeof(S) * size; i++) { - dst[i] = src[i]; - } - for (; i < sizeof(Unit) * unitSize; i++) { - dst[i] = 0; - } - trim(unitSize); - } - /* - set [0, max) randomly - */ - void setRand(bool *pb, const VintT& max, fp::RandGen rg = fp::RandGen()) - { - assert(max > 0); - if (rg.isZero()) rg = fp::RandGen::get(); - size_t n = max.size(); - buf_.alloc(pb, n); - if (!*pb) return; - rg.read(pb, &buf_[0], n * sizeof(buf_[0])); - if (!*pb) return; - trim(n); - *this %= max; - } - /* - get abs value - buf_[0, size) = x - buf_[size, maxSize) with zero - @note assume little endian system - */ - void getArray(bool *pb, Unit *x, size_t maxSize) const - { - size_t n = size(); - if (n > maxSize) { - *pb = false; - return; - } - vint::copyN(x, &buf_[0], n); - vint::clearN(x + n, maxSize - n); - *pb = true; - } - void clear() { *this = 0; } - template - void save(bool *pb, OutputStream& os, int base = 10) const - { - if (isNeg_) cybozu::writeChar(pb, os, '-'); - char buf[1024]; - size_t n = mcl::fp::arrayToStr(buf, sizeof(buf), &buf_[0], size_, base, false); - if (n == 0) { - *pb = false; - return; - } - cybozu::write(pb, os, buf + sizeof(buf) - n, n); - } - /* - set buf with string terminated by '\0' - return strlen(buf) if success else 0 - */ - size_t getStr(char *buf, size_t bufSize, int base = 10) const - { - cybozu::MemoryOutputStream os(buf, bufSize); - bool b; - save(&b, os, base); - const size_t n = os.getPos(); - if (!b || n == bufSize) return 0; - buf[n] = '\0'; - return n; - } - /* - return bitSize(abs(*this)) - @note return 1 if zero - */ - size_t getBitSize() const - { - if (isZero()) return 1; - size_t n = size(); - Unit v = buf_[n - 1]; - assert(v); - return (n - 1) * sizeof(Unit) * 8 + 1 + cybozu::bsr(v); - } - // ignore sign - bool testBit(size_t i) const - { - size_t q = i / unitBitSize; - size_t r = i % unitBitSize; - assert(q <= size()); - Unit mask = Unit(1) << r; - return (buf_[q] & mask) != 0; - } - void setBit(size_t i, bool v = true) - { - size_t q = i / unitBitSize; - size_t r = i % unitBitSize; - assert(q <= size()); - bool b; - buf_.alloc(&b, q + 1); - assert(b); (void)b; - Unit mask = Unit(1) << r; - if (v) { - buf_[q] |= mask; - } else { - buf_[q] &= ~mask; - trim(q + 1); - } - } - /* - @param str [in] number string - @note "0x..." => base = 16 - "0b..." => base = 2 - otherwise => base = 10 - */ - void setStr(bool *pb, const char *str, int base = 0) - { - // allow twice size of MCL_MAX_BIT_SIZE because of multiplication - const size_t maxN = (MCL_MAX_BIT_SIZE * 2 + unitBitSize - 1) / unitBitSize; - buf_.alloc(pb, maxN); - if (!*pb) return; - *pb = false; - isNeg_ = false; - size_t len = strlen(str); - size_t n = fp::strToArray(&isNeg_, &buf_[0], maxN, str, len, base); - if (n == 0) return; - trim(n); - *pb = true; - } - static int compare(const VintT& x, const VintT& y) - { - if (x.isNeg_ ^ y.isNeg_) { - if (x.isZero() && y.isZero()) return 0; - return x.isNeg_ ? -1 : 1; - } else { - // same sign - int c = ucompare(x.buf_, x.size(), y.buf_, y.size()); - if (x.isNeg_) { - return -c; - } - return c; - } - } - static int compares1(const VintT& x, int y) - { - assert(y != invalidVar); - if (x.isNeg_ ^ (y < 0)) { - if (x.isZero() && y == 0) return 0; - return x.isNeg_ ? -1 : 1; - } else { - // same sign - Unit y0 = fp::abs_(y); - int c = vint::compareNM(&x.buf_[0], x.size(), &y0, 1); - if (x.isNeg_) { - return -c; - } - return c; - } - } - static int compareu1(const VintT& x, uint32_t y) - { - if (x.isNeg_) return -1; - if (x.size() > 1) return 1; - Unit x0 = x.buf_[0]; - return x0 > y ? 1 : x0 == y ? 0 : -1; - } - size_t size() const { return size_; } - bool isZero() const { return size() == 1 && buf_[0] == 0; } - bool isNegative() const { return !isZero() && isNeg_; } - uint32_t getLow32bit() const { return (uint32_t)buf_[0]; } - bool isOdd() const { return (buf_[0] & 1) == 1; } - bool isEven() const { return !isOdd(); } - const Unit *getUnit() const { return &buf_[0]; } - size_t getUnitSize() const { return size_; } - static void add(VintT& z, const VintT& x, const VintT& y) - { - _add(z, x, x.isNeg_, y, y.isNeg_); - } - static void sub(VintT& z, const VintT& x, const VintT& y) - { - _add(z, x, x.isNeg_, y, !y.isNeg_); - } - static void mul(VintT& z, const VintT& x, const VintT& y) - { - const size_t xn = x.size(); - const size_t yn = y.size(); - size_t zn = xn + yn; - bool b; - z.buf_.alloc(&b, zn); - assert(b); (void)b; - vint::mulNM(&z.buf_[0], &x.buf_[0], xn, &y.buf_[0], yn); - z.isNeg_ = x.isNeg_ ^ y.isNeg_; - z.trim(zn); - } - static void sqr(VintT& y, const VintT& x) - { - mul(y, x, x); - } - static void addu1(VintT& z, const VintT& x, Unit y) - { - _addu1(z, x, y, false); - } - static void subu1(VintT& z, const VintT& x, Unit y) - { - _addu1(z, x, y, true); - } - static void mulu1(VintT& z, const VintT& x, Unit y) - { - size_t xn = x.size(); - size_t zn = xn + 1; - bool b; - z.buf_.alloc(&b, zn); - assert(b); (void)b; - z.buf_[zn - 1] = vint::mulu1(&z.buf_[0], &x.buf_[0], xn, y); - z.isNeg_ = x.isNeg_; - z.trim(zn); - } - static void divu1(VintT& q, const VintT& x, Unit y) - { - udivModu1(&q, x, y); - } - static void modu1(VintT& r, const VintT& x, Unit y) - { - bool xNeg = x.isNeg_; - r = divModu1(0, x, y); - r.isNeg_ = xNeg; - } - static void adds1(VintT& z, const VintT& x, int y) - { - assert(y != invalidVar); - _adds1(z, x, fp::abs_(y), y < 0); - } - static void subs1(VintT& z, const VintT& x, int y) - { - assert(y != invalidVar); - _adds1(z, x, fp::abs_(y), !(y < 0)); - } - static void muls1(VintT& z, const VintT& x, int y) - { - assert(y != invalidVar); - mulu1(z, x, fp::abs_(y)); - z.isNeg_ ^= (y < 0); - } - /* - @param q [out] q = x / y if q is not zero - @param x [in] - @param y [in] must be not zero - return x % y - */ - static int divMods1(VintT *q, const VintT& x, int y) - { - assert(y != invalidVar); - bool xNeg = x.isNeg_; - bool yNeg = y < 0; - Unit absY = fp::abs_(y); - size_t xn = x.size(); - int r; - if (q) { - q->isNeg_ = xNeg ^ yNeg; - bool b; - q->buf_.alloc(&b, xn); - assert(b); (void)b; - r = (int)vint::divu1(&q->buf_[0], &x.buf_[0], xn, absY); - q->trim(xn); - } else { - r = (int)vint::modu1(&x.buf_[0], xn, absY); - } - return xNeg ? -r : r; - } - /* - like C - 13 / 5 = 2 ... 3 - 13 / -5 = -2 ... 3 - -13 / 5 = -2 ... -3 - -13 / -5 = 2 ... -3 - */ - static void divMod(VintT *q, VintT& r, const VintT& x, const VintT& y) - { - bool qsign = x.isNeg_ ^ y.isNeg_; - udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); - r.isNeg_ = x.isNeg_; - if (q) q->isNeg_ = qsign; - } - static void div(VintT& q, const VintT& x, const VintT& y) - { - VintT r; - divMod(&q, r, x, y); - } - static void mod(VintT& r, const VintT& x, const VintT& y) - { - divMod(0, r, x, y); - } - static void divs1(VintT& q, const VintT& x, int y) - { - divMods1(&q, x, y); - } - static void mods1(VintT& r, const VintT& x, int y) - { - bool xNeg = x.isNeg_; - r = divMods1(0, x, y); - r.isNeg_ = xNeg; - } - static Unit udivModu1(VintT *q, const VintT& x, Unit y) - { - assert(!x.isNeg_); - size_t xn = x.size(); - if (q) { - bool b; - q->buf_.alloc(&b, xn); - assert(b); (void)b; - } - Unit r = vint::divu1(q ? &q->buf_[0] : 0, &x.buf_[0], xn, y); - if (q) { - q->trim(xn); - q->isNeg_ = false; - } - return r; - } - /* - like Python - 13 / 5 = 2 ... 3 - 13 / -5 = -3 ... -2 - -13 / 5 = -3 ... 2 - -13 / -5 = 2 ... -3 - */ - static void quotRem(VintT *q, VintT& r, const VintT& x, const VintT& y) - { - VintT yy = y; - bool qsign = x.isNeg_ ^ y.isNeg_; - udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); - r.isNeg_ = y.isNeg_; - if (q) q->isNeg_ = qsign; - if (!r.isZero() && qsign) { - if (q) { - uadd1(*q, q->buf_, q->size(), 1); - } - usub(r, yy.buf_, yy.size(), r.buf_, r.size()); - } - } - template - void load(bool *pb, InputStream& is, int ioMode) - { - *pb = false; - char buf[1024]; - size_t n = fp::local::loadWord(buf, sizeof(buf), is); - if (n == 0) return; - const size_t maxN = 384 / (sizeof(MCL_SIZEOF_UNIT) * 8); - buf_.alloc(pb, maxN); - if (!*pb) return; - isNeg_ = false; - n = fp::strToArray(&isNeg_, &buf_[0], maxN, buf, n, ioMode); - if (n == 0) return; - trim(n); - *pb = true; - } - // logical left shift (copy sign) - static void shl(VintT& y, const VintT& x, size_t shiftBit) - { - size_t xn = x.size(); - size_t yn = xn + (shiftBit + unitBitSize - 1) / unitBitSize; - bool b; - y.buf_.alloc(&b, yn); - assert(b); (void)b; - vint::shlN(&y.buf_[0], &x.buf_[0], xn, shiftBit); - y.isNeg_ = x.isNeg_; - y.trim(yn); - } - // logical right shift (copy sign) - static void shr(VintT& y, const VintT& x, size_t shiftBit) - { - size_t xn = x.size(); - if (xn * unitBitSize <= shiftBit) { - y.clear(); - return; - } - size_t yn = xn - shiftBit / unitBitSize; - bool b; - y.buf_.alloc(&b, yn); - assert(b); (void)b; - vint::shrN(&y.buf_[0], &x.buf_[0], xn, shiftBit); - y.isNeg_ = x.isNeg_; - y.trim(yn); - } - static void neg(VintT& y, const VintT& x) - { - if (&y != &x) { y = x; } - y.isNeg_ = !x.isNeg_; - } - static void abs(VintT& y, const VintT& x) - { - if (&y != &x) { y = x; } - y.isNeg_ = false; - } - static VintT abs(const VintT& x) - { - VintT y = x; - abs(y, x); - return y; - } - // accept only non-negative value - static void orBit(VintT& z, const VintT& x, const VintT& y) - { - assert(!x.isNeg_ && !y.isNeg_); - const VintT *px = &x, *py = &y; - if (x.size() < y.size()) { - fp::swap_(px, py); - } - size_t xn = px->size(); - size_t yn = py->size(); - assert(xn >= yn); - bool b; - z.buf_.alloc(&b, xn); - assert(b); (void)b; - for (size_t i = 0; i < yn; i++) { - z.buf_[i] = x.buf_[i] | y.buf_[i]; - } - vint::copyN(&z.buf_[0] + yn, &px->buf_[0] + yn, xn - yn); - z.trim(xn); - } - static void andBit(VintT& z, const VintT& x, const VintT& y) - { - assert(!x.isNeg_ && !y.isNeg_); - const VintT *px = &x, *py = &y; - if (x.size() < y.size()) { - fp::swap_(px, py); - } - size_t yn = py->size(); - assert(px->size() >= yn); - bool b; - z.buf_.alloc(&b, yn); - assert(b); (void)b; - for (size_t i = 0; i < yn; i++) { - z.buf_[i] = x.buf_[i] & y.buf_[i]; - } - z.trim(yn); - } - static void orBitu1(VintT& z, const VintT& x, Unit y) - { - assert(!x.isNeg_); - z = x; - z.buf_[0] |= y; - } - static void andBitu1(VintT& z, const VintT& x, Unit y) - { - assert(!x.isNeg_); - bool b; - z.buf_.alloc(&b, 1); - assert(b); (void)b; - z.buf_[0] = x.buf_[0] & y; - z.size_ = 1; - z.isNeg_ = false; - } - /* - REMARK y >= 0; - */ - static void pow(VintT& z, const VintT& x, const VintT& y) - { - assert(!y.isNeg_); - const VintT xx = x; - z = 1; - mcl::fp::powGeneric(z, xx, &y.buf_[0], y.size(), mul, sqr, (void (*)(VintT&, const VintT&))0); - } - /* - REMARK y >= 0; - */ - static void pow(VintT& z, const VintT& x, int64_t y) - { - assert(y >= 0); - const VintT xx = x; - z = 1; -#if MCL_SIZEOF_UNIT == 8 - Unit ua = fp::abs_(y); - mcl::fp::powGeneric(z, xx, &ua, 1, mul, sqr, (void (*)(VintT&, const VintT&))0); -#else - uint64_t ua = fp::abs_(y); - Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; - size_t un = u[1] ? 2 : 1; - mcl::fp::powGeneric(z, xx, u, un, mul, sqr, (void (*)(VintT&, const VintT&))0); -#endif - } - /* - z = x ^ y mod m - REMARK y >= 0; - */ - static void powMod(VintT& z, const VintT& x, const VintT& y, const VintT& m) - { - assert(!y.isNeg_); - VintT zz; - MulMod mulMod; - SqrMod sqrMod; - mulMod.pm = &m; - sqrMod.pm = &m; - zz = 1; - mcl::fp::powGeneric(zz, x, &y.buf_[0], y.size(), mulMod, sqrMod, (void (*)(VintT&, const VintT&))0); - z.swap(zz); - } - /* - inverse mod - y = 1/x mod m - REMARK x != 0 and m != 0; - */ - static void invMod(VintT& y, const VintT& x, const VintT& m) - { - assert(!x.isZero() && !m.isZero()); - if (x == 1) { - y = 1; - return; - } - VintT a = 1; - VintT t; - VintT q; - divMod(&q, t, m, x); - VintT s = x; - VintT b = -q; - - for (;;) { - divMod(&q, s, s, t); - if (s.isZero()) { - if (b.isNeg_) { - b += m; - } - y = b; - return; - } - a -= b * q; - - divMod(&q, t, t, s); - if (t.isZero()) { - if (a.isNeg_) { - a += m; - } - y = a; - return; - } - b -= a * q; - } - } - /* - Miller-Rabin - */ - static bool isPrime(bool *pb, const VintT& n, int tryNum = 32) - { - *pb = true; - if (n <= 1) return false; - if (n == 2 || n == 3) return true; - if (n.isEven()) return false; - cybozu::XorShift rg; - const VintT nm1 = n - 1; - VintT d = nm1; - uint32_t r = countTrailingZero(d); - // n - 1 = 2^r d - VintT a, x; - for (int i = 0; i < tryNum; i++) { - a.setRand(pb, n - 3, rg); - if (!*pb) return false; - a += 2; // a in [2, n - 2] - powMod(x, a, d, n); - if (x == 1 || x == nm1) { - continue; - } - for (uint32_t j = 1; j < r; j++) { - sqr(x, x); - x %= n; - if (x == 1) return false; - if (x == nm1) goto NEXT_LOOP; - } - return false; - NEXT_LOOP:; - } - return true; - } - bool isPrime(bool *pb, int tryNum = 32) const - { - return isPrime(pb, *this, tryNum); - } - static void gcd(VintT& z, VintT x, VintT y) - { - VintT t; - for (;;) { - if (y.isZero()) { - z = x; - return; - } - t = x; - x = y; - mod(y, t, y); - } - } - static VintT gcd(const VintT& x, const VintT& y) - { - VintT z; - gcd(z, x, y); - return z; - } - static void lcm(VintT& z, const VintT& x, const VintT& y) - { - VintT c; - gcd(c, x, y); - div(c, x, c); - mul(z, c, y); - } - static VintT lcm(const VintT& x, const VintT& y) - { - VintT z; - lcm(z, x, y); - return z; - } - /* - 1 if m is quadratic residue modulo n (i.e., there exists an x s.t. x^2 = m mod n) - 0 if m = 0 mod n - -1 otherwise - @note return legendre_symbol(m, p) for m and odd prime p - */ - static int jacobi(VintT m, VintT n) - { - assert(n.isOdd()); - if (n == 1) return 1; - if (m < 0 || m > n) { - quotRem(0, m, m, n); // m = m mod n - } - if (m.isZero()) return 0; - if (m == 1) return 1; - if (gcd(m, n) != 1) return 0; - - int j = 1; - VintT t; - goto START; - while (m != 1) { - if ((m.getLow32bit() % 4) == 3 && (n.getLow32bit() % 4) == 3) { - j = -j; - } - mod(t, n, m); - n = m; - m = t; - START: - int s = countTrailingZero(m); - uint32_t nmod8 = n.getLow32bit() % 8; - if ((s % 2) && (nmod8 == 3 || nmod8 == 5)) { - j = -j; - } - } - return j; - } -#ifndef CYBOZU_DONT_USE_STRING - explicit VintT(const std::string& str) - : size_(0) - { - setStr(str); - } - void getStr(std::string& s, int base = 10) const - { - s.clear(); - cybozu::StringOutputStream os(s); - save(os, base); - } - std::string getStr(int base = 10) const - { - std::string s; - getStr(s, base); - return s; - } - inline friend std::ostream& operator<<(std::ostream& os, const VintT& x) - { - return os << x.getStr(os.flags() & std::ios_base::hex ? 16 : 10); - } - inline friend std::istream& operator>>(std::istream& is, VintT& x) - { - x.load(is); - return is; - } -#endif -#ifndef CYBOZU_DONT_USE_EXCEPTION - void setStr(const std::string& str, int base = 0) - { - bool b; - setStr(&b, str.c_str(), base); - if (!b) throw cybozu::Exception("Vint:setStr") << str; - } - void setRand(const VintT& max, fp::RandGen rg = fp::RandGen()) - { - bool b; - setRand(&b, max, rg); - if (!b) throw cybozu::Exception("Vint:setRand"); - } - void getArray(Unit *x, size_t maxSize) const - { - bool b; - getArray(&b, x, maxSize); - if (!b) throw cybozu::Exception("Vint:getArray"); - } - template - void load(InputStream& is, int ioMode = 0) - { - bool b; - load(&b, is, ioMode); - if (!b) throw cybozu::Exception("Vint:load"); - } - template - void save(OutputStream& os, int base = 10) const - { - bool b; - save(&b, os, base); - if (!b) throw cybozu::Exception("Vint:save"); - } - static bool isPrime(const VintT& n, int tryNum = 32) - { - bool b; - bool ret = isPrime(&b, n, tryNum); - if (!b) throw cybozu::Exception("Vint:isPrime"); - return ret; - } - bool isPrime(int tryNum = 32) const - { - bool b; - bool ret = isPrime(&b, *this, tryNum); - if (!b) throw cybozu::Exception("Vint:isPrime"); - return ret; - } - template - void setArray(const S *x, size_t size) - { - bool b; - setArray(&b, x, size); - if (!b) throw cybozu::Exception("Vint:setArray"); - } -#endif - VintT& operator++() { adds1(*this, *this, 1); return *this; } - VintT& operator--() { subs1(*this, *this, 1); return *this; } - VintT operator++(int) { VintT c = *this; adds1(*this, *this, 1); return c; } - VintT operator--(int) { VintT c = *this; subs1(*this, *this, 1); return c; } - friend bool operator<(const VintT& x, const VintT& y) { return compare(x, y) < 0; } - friend bool operator>=(const VintT& x, const VintT& y) { return !operator<(x, y); } - friend bool operator>(const VintT& x, const VintT& y) { return compare(x, y) > 0; } - friend bool operator<=(const VintT& x, const VintT& y) { return !operator>(x, y); } - friend bool operator==(const VintT& x, const VintT& y) { return compare(x, y) == 0; } - friend bool operator!=(const VintT& x, const VintT& y) { return !operator==(x, y); } - - friend bool operator<(const VintT& x, int y) { return compares1(x, y) < 0; } - friend bool operator>=(const VintT& x, int y) { return !operator<(x, y); } - friend bool operator>(const VintT& x, int y) { return compares1(x, y) > 0; } - friend bool operator<=(const VintT& x, int y) { return !operator>(x, y); } - friend bool operator==(const VintT& x, int y) { return compares1(x, y) == 0; } - friend bool operator!=(const VintT& x, int y) { return !operator==(x, y); } - - friend bool operator<(const VintT& x, uint32_t y) { return compareu1(x, y) < 0; } - friend bool operator>=(const VintT& x, uint32_t y) { return !operator<(x, y); } - friend bool operator>(const VintT& x, uint32_t y) { return compareu1(x, y) > 0; } - friend bool operator<=(const VintT& x, uint32_t y) { return !operator>(x, y); } - friend bool operator==(const VintT& x, uint32_t y) { return compareu1(x, y) == 0; } - friend bool operator!=(const VintT& x, uint32_t y) { return !operator==(x, y); } - - VintT& operator+=(const VintT& rhs) { add(*this, *this, rhs); return *this; } - VintT& operator-=(const VintT& rhs) { sub(*this, *this, rhs); return *this; } - VintT& operator*=(const VintT& rhs) { mul(*this, *this, rhs); return *this; } - VintT& operator/=(const VintT& rhs) { div(*this, *this, rhs); return *this; } - VintT& operator%=(const VintT& rhs) { mod(*this, *this, rhs); return *this; } - VintT& operator&=(const VintT& rhs) { andBit(*this, *this, rhs); return *this; } - VintT& operator|=(const VintT& rhs) { orBit(*this, *this, rhs); return *this; } - - VintT& operator+=(int rhs) { adds1(*this, *this, rhs); return *this; } - VintT& operator-=(int rhs) { subs1(*this, *this, rhs); return *this; } - VintT& operator*=(int rhs) { muls1(*this, *this, rhs); return *this; } - VintT& operator/=(int rhs) { divs1(*this, *this, rhs); return *this; } - VintT& operator%=(int rhs) { mods1(*this, *this, rhs); return *this; } - VintT& operator+=(Unit rhs) { addu1(*this, *this, rhs); return *this; } - VintT& operator-=(Unit rhs) { subu1(*this, *this, rhs); return *this; } - VintT& operator*=(Unit rhs) { mulu1(*this, *this, rhs); return *this; } - VintT& operator/=(Unit rhs) { divu1(*this, *this, rhs); return *this; } - VintT& operator%=(Unit rhs) { modu1(*this, *this, rhs); return *this; } - - VintT& operator&=(Unit rhs) { andBitu1(*this, *this, rhs); return *this; } - VintT& operator|=(Unit rhs) { orBitu1(*this, *this, rhs); return *this; } - - friend VintT operator+(const VintT& a, const VintT& b) { VintT c; add(c, a, b); return c; } - friend VintT operator-(const VintT& a, const VintT& b) { VintT c; sub(c, a, b); return c; } - friend VintT operator*(const VintT& a, const VintT& b) { VintT c; mul(c, a, b); return c; } - friend VintT operator/(const VintT& a, const VintT& b) { VintT c; div(c, a, b); return c; } - friend VintT operator%(const VintT& a, const VintT& b) { VintT c; mod(c, a, b); return c; } - friend VintT operator&(const VintT& a, const VintT& b) { VintT c; andBit(c, a, b); return c; } - friend VintT operator|(const VintT& a, const VintT& b) { VintT c; orBit(c, a, b); return c; } - - friend VintT operator+(const VintT& a, int b) { VintT c; adds1(c, a, b); return c; } - friend VintT operator-(const VintT& a, int b) { VintT c; subs1(c, a, b); return c; } - friend VintT operator*(const VintT& a, int b) { VintT c; muls1(c, a, b); return c; } - friend VintT operator/(const VintT& a, int b) { VintT c; divs1(c, a, b); return c; } - friend VintT operator%(const VintT& a, int b) { VintT c; mods1(c, a, b); return c; } - friend VintT operator+(const VintT& a, Unit b) { VintT c; addu1(c, a, b); return c; } - friend VintT operator-(const VintT& a, Unit b) { VintT c; subu1(c, a, b); return c; } - friend VintT operator*(const VintT& a, Unit b) { VintT c; mulu1(c, a, b); return c; } - friend VintT operator/(const VintT& a, Unit b) { VintT c; divu1(c, a, b); return c; } - friend VintT operator%(const VintT& a, Unit b) { VintT c; modu1(c, a, b); return c; } - - friend VintT operator&(const VintT& a, Unit b) { VintT c; andBitu1(c, a, b); return c; } - friend VintT operator|(const VintT& a, Unit b) { VintT c; orBitu1(c, a, b); return c; } - - VintT operator-() const { VintT c; neg(c, *this); return c; } - VintT& operator<<=(size_t n) { shl(*this, *this, n); return *this; } - VintT& operator>>=(size_t n) { shr(*this, *this, n); return *this; } - VintT operator<<(size_t n) const { VintT c = *this; c <<= n; return c; } - VintT operator>>(size_t n) const { VintT c = *this; c >>= n; return c; } -}; - -#ifdef MCL_VINT_FIXED_BUFFER -typedef VintT > Vint; -#else -typedef VintT > Vint; -#endif - -} // mcl - -//typedef mcl::Vint mpz_class; diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp deleted file mode 100644 index cb4fad37e..000000000 --- a/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp +++ /dev/null @@ -1,175 +0,0 @@ -#pragma once -/** - @file - @brief window method - @author MITSUNARI Shigeo(@herumi) -*/ -#include -#include - -namespace mcl { namespace fp { - -/* - get w-bit size from x[0, bitSize) - @param x [in] data - @param bitSize [in] data size - @param w [in] split size < UnitBitSize -*/ -template -struct ArrayIterator { - static const size_t TbitSize = sizeof(T) * 8; - ArrayIterator(const T *x, size_t bitSize, size_t w) - : x(x) - , bitSize(bitSize) - , w(w) - , pos(0) - , mask((w == TbitSize ? 0 : (T(1) << w)) - 1) - { - assert(w <= TbitSize); - } - bool hasNext() const { return bitSize > 0; } - T getNext() - { - if (w == TbitSize) { - bitSize -= w; - return *x++; - } - if (pos + w < TbitSize) { - T v = (*x >> pos) & mask; - pos += w; - if (bitSize < w) { - bitSize = 0; - } else { - bitSize -= w; - } - return v; - } - if (pos + bitSize <= TbitSize) { - assert(bitSize <= w); - T v = *x >> pos; - assert((v >> bitSize) == 0); - bitSize = 0; - return v & mask; - } - assert(pos > 0); - T v = (x[0] >> pos) | (x[1] << (TbitSize - pos)); - v &= mask; - pos = (pos + w) - TbitSize; - bitSize -= w; - x++; - return v; - } - const T *x; - size_t bitSize; - size_t w; - size_t pos; - T mask; -}; - -template -class WindowMethod { -public: - size_t bitSize_; - size_t winSize_; - mcl::Array tbl_; - WindowMethod(const Ec& x, size_t bitSize, size_t winSize) - { - init(x, bitSize, winSize); - } - WindowMethod() - : bitSize_(0) - , winSize_(0) - { - } - /* - @param x [in] base index - @param bitSize [in] exponent bit length - @param winSize [in] window size - */ - void init(bool *pb, const Ec& x, size_t bitSize, size_t winSize) - { - bitSize_ = bitSize; - winSize_ = winSize; - const size_t tblNum = (bitSize + winSize - 1) / winSize; - const size_t r = size_t(1) << winSize; - *pb = tbl_.resize(tblNum * r); - if (!*pb) return; - Ec t(x); - for (size_t i = 0; i < tblNum; i++) { - Ec* w = &tbl_[i * r]; - w[0].clear(); - for (size_t d = 1; d < r; d *= 2) { - for (size_t j = 0; j < d; j++) { - Ec::add(w[j + d], w[j], t); - } - Ec::dbl(t, t); - } - for (size_t j = 0; j < r; j++) { - w[j].normalize(); - } - } - } -#ifndef CYBOZU_DONT_USE_EXCEPTION - void init(const Ec& x, size_t bitSize, size_t winSize) - { - bool b; - init(&b, x, bitSize, winSize); - if (!b) throw cybozu::Exception("mcl:WindowMethod:init") << bitSize << winSize; - } -#endif - /* - @param z [out] x multiplied by y - @param y [in] exponent - */ - template - void mul(Ec& z, const FpT& y) const - { - fp::Block b; - y.getBlock(b); - powArray(z, b.p, b.n, false); - } - void mul(Ec& z, int64_t y) const - { -#if MCL_SIZEOF_UNIT == 8 - Unit u = fp::abs_(y); - powArray(z, &u, 1, y < 0); -#else - uint64_t ua = fp::abs_(y); - Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; - size_t un = u[1] ? 2 : 1; - powArray(z, u, un, y < 0); -#endif - } - void mul(Ec& z, const mpz_class& y) const - { - powArray(z, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); - } - void powArray(Ec& z, const Unit* y, size_t n, bool isNegative) const - { - z.clear(); - while (n > 0) { - if (y[n - 1]) break; - n--; - } - if (n == 0) return; - assert((n << winSize_) <= tbl_.size()); - if ((n << winSize_) > tbl_.size()) return; - assert(y[n - 1]); - const size_t bitSize = (n - 1) * UnitBitSize + cybozu::bsr(y[n - 1]) + 1; - size_t i = 0; - ArrayIterator ai(y, bitSize, winSize_); - do { - Unit v = ai.getNext(); - if (v) { - Ec::add(z, z, tbl_[(i << winSize_) + v]); - } - i++; - } while (ai.hasNext()); - if (isNegative) { - Ec::neg(z, z); - } - } -}; - -} } // mcl::fp - diff --git a/vendor/github.com/dexon-foundation/mcl/lib/.emptydir b/vendor/github.com/dexon-foundation/mcl/lib/.emptydir deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/mcl/mcl.sln b/vendor/github.com/dexon-foundation/mcl/mcl.sln deleted file mode 100644 index 7c4fe8f0c..000000000 --- a/vendor/github.com/dexon-foundation/mcl/mcl.sln +++ /dev/null @@ -1,57 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.40629.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_test", "test\proj\fp_test\fp_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" - ProjectSection(ProjectDependencies) = postProject - {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ec_test", "test\proj\ec_test\ec_test.vcxproj", "{46B6E88E-739A-406B-9F68-BC46C5950FA3}" - ProjectSection(ProjectDependencies) = postProject - {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mcl", "src\proj\mcl.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_tower_test", "test\proj\fp_tower_test\fp_tower_test.vcxproj", "{733B6250-D249-4A99-B2A6-C8FAF6A90E97}" - ProjectSection(ProjectDependencies) = postProject - {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bn_test", "test\proj\bn_test\bn_test.vcxproj", "{9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}" - ProjectSection(ProjectDependencies) = postProject - {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 - {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 - {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.ActiveCfg = Debug|x64 - {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.Build.0 = Debug|x64 - {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.ActiveCfg = Release|x64 - {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.Build.0 = Release|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 - {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 - {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.ActiveCfg = Debug|x64 - {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.Build.0 = Debug|x64 - {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.ActiveCfg = Release|x64 - {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.Build.0 = Release|x64 - {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.ActiveCfg = Debug|x64 - {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.Build.0 = Debug|x64 - {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.ActiveCfg = Release|x64 - {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/vendor/github.com/dexon-foundation/mcl/misc/bench.txt b/vendor/github.com/dexon-foundation/mcl/misc/bench.txt deleted file mode 100644 index 3e18e6b44..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/bench.txt +++ /dev/null @@ -1,21 +0,0 @@ -Core i7-7700 @ 3.6GHz - BN254 BLS12_381 -G1::mul 185.863Kclk 360.723Kclk -G1::add 812.01 clk 1.540Kclk -G1::dbl 837.24 clk 1.977Kclk -G2::mul 340.125Kclk 642.457Kclk -G2::add 2.233Kclk 4.368Kclk -G2::dbl 2.134Kclk 4.088Kclk -GT::pow 615.052Kclk 1.055Mclk -G1::setStr chk 1.546Kclk 534.376Kclk -G1::setStr 1.592Kclk 4.000Kclk -G2::setStr chk 609.195Kclk 1.402Mclk -G2::setStr 5.444Kclk 8.282Kclk -hashAndMapToG1 26.997Kclk 336.207Kclk -hashAndMapToG2 212.800Kclk 775.072Kclk -pairing 909.076Kclk 2.367Mclk -millerLoop 549.957Kclk 983.935Kclk -finalExp 375.203Kclk 1.404Mclk -precomputeG2 126.000Kclk 236.912Kclk -precomputedML 427.272Kclk 729.234Kclk - diff --git a/vendor/github.com/dexon-foundation/mcl/misc/karatsuba.cpp b/vendor/github.com/dexon-foundation/mcl/misc/karatsuba.cpp deleted file mode 100644 index 7c150c6e3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/karatsuba.cpp +++ /dev/null @@ -1,75 +0,0 @@ -/* - sudo cpufreq-set -c 0 -g performance - mycl karatsuba.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out -*/ -#include -#include -#include -#include "../src/proto.hpp" -#include "../src/low_func.hpp" -#ifdef MCL_USE_LLVM -#include "../src/low_func_llvm.hpp" -#endif -#include -#include - -typedef mcl::FpT<> Fp; - -using namespace mcl::fp; - -void dump(const Unit *x, size_t N) -{ - for (size_t i = 0; i < N; i++) { - printf("%016llx ", (long long)x[N - 1 - i]); - } - printf("\n"); -} - -void gggKara(uint64_t *z, const uint64_t *x, const uint64_t *) -{ - SqrPre<8, Gtag>::f(z, x); -} -void gggLLVM(uint64_t *z, const uint64_t *x, const uint64_t *y) -{ - MulPre<8, Ltag>::f(z, x, y); -} - -template -void benchKaratsuba() -{ - cybozu::XorShift rg; - printf("N=%d\n", (int)N); - Unit z[N * 2]; - rg.read(z, N); - CYBOZU_BENCH("g:mulPre ", (MulPreCore::f), z, z, z); -// CYBOZU_BENCH("g:mulKara", (MulPre::karatsuba), z, z, z); - CYBOZU_BENCH("g:sqrPre ", (SqrPreCore::f), z, z); -// CYBOZU_BENCH("g:sqrKara", (SqrPre::karatsuba), z, z); - -#ifdef MCL_USE_LLVM - CYBOZU_BENCH("l:mulPre ", (MulPreCore::f), z, z, z); - CYBOZU_BENCH("l:sqrPre ", (SqrPreCore::f), z, z); - CYBOZU_BENCH("l:mulKara", (MulPre::karatsuba), z, z, z); - CYBOZU_BENCH("l:sqrKara", (SqrPre::karatsuba), z, z); -#endif -} - -CYBOZU_TEST_AUTO(karatsuba) -{ - benchKaratsuba<4>(); - benchKaratsuba<6>(); - benchKaratsuba<8>(); -#if MCL_MAX_BIT_SIZE >= 640 - benchKaratsuba<10>(); -#endif -#if MCL_MAX_BIT_SIZE >= 768 - benchKaratsuba<12>(); -#endif -#if MCL_MAX_BIT_SIZE >= 896 - benchKaratsuba<14>(); -#endif -#if MCL_MAX_BIT_SIZE >= 1024 - benchKaratsuba<16>(); -#endif -} - diff --git a/vendor/github.com/dexon-foundation/mcl/misc/mul.cpp b/vendor/github.com/dexon-foundation/mcl/misc/mul.cpp deleted file mode 100644 index 146ac33a9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/mul.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - sudo cpufreq-set -c 0 -g performance - mycl mul.cpp -DMCL_USE_LLVM=1 ../lib/libmcl.a && ./a.out -*/ -#include -#include -#include -#include -#include - -typedef mcl::FpT<> Fp; - -using namespace mcl::fp; - -void dump(const Unit *x, size_t N) -{ - for (size_t i = 0; i < N; i++) { - printf("%016llx ", (long long)x[N - 1 - i]); - } - printf("\n"); -} - -CYBOZU_TEST_AUTO(mulPre) -{ - cybozu::XorShift rg; - const char *pTbl[] = { - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", - "6701817056313037086248947066310538444882082605308124576230408038843357549886356779857393369967010764802541005796711440355753503701056323603", // 462 bit - "4562440617622195218641171605700291324893228507248559930579192517899275167208677386505912811317371399778642309573594407310688704721375437998252661319722214188251994674360264950082874192246603471", // 640 bit - "1552518092300708935148979488462502555256886017116696611139052038026050952686376886330878408828646477950487730697131073206171580044114814391444287275041181139204454976020849905550265285631598444825262999193716468750892846853816057031", // 768 bit - }; - const size_t N = 16; - const Mode modeTbl[] = { - FP_GMP_MONT, -#ifdef MCL_USE_LLVM - FP_LLVM_MONT, -#endif - }; - for (size_t j = 0; j < CYBOZU_NUM_OF_ARRAY(modeTbl); j++) { - Mode mode = modeTbl[j]; - printf("%s\n", ModeToStr(mode)); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(pTbl); i++) { - const char *p = pTbl[i]; - Fp::init(p, mode); - printf("bitSize=%d\n", (int)Fp::getBitSize()); - const Op& op = Fp::getOp(); - Unit x[N], y[N * 2]; - rg.read(x, N); - rg.read(y, N * 2); - CYBOZU_BENCH("mul ", op.fp_mul, y, y, x, op.p); - CYBOZU_BENCH("sqr ", op.fp_sqr, y, y, op.p); - CYBOZU_BENCH("mulPre", op.fpDbl_mulPre, y, y, y); - CYBOZU_BENCH("sqrPre", op.fpDbl_sqrPre, y, y); - CYBOZU_BENCH("mod ", op.fpDbl_mod, y, y, op.p); - } - } -} diff --git a/vendor/github.com/dexon-foundation/mcl/misc/precompute.cpp b/vendor/github.com/dexon-foundation/mcl/misc/precompute.cpp deleted file mode 100644 index 63cdd663b..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/precompute.cpp +++ /dev/null @@ -1,30 +0,0 @@ -#include -#include - -using namespace mcl::bn; - -int main() -{ - initPairing(mcl::BN254); - G2 Q; - mapToG2(Q, 1); - std::vector Qcoeff; - precomputeG2(Qcoeff, Q); - puts("#if MCL_SIZEOF_UNIT == 8"); - puts("static const uint64_t QcoeffTblBN254[][6][4] = {"); - for (size_t i = 0; i < Qcoeff.size(); i++) { - const Fp6& x6 = Qcoeff[i]; - puts("\t{"); - for (size_t j = 0; j < 6; j++) { - printf("\t\t{"); - const Fp& x = x6.getFp0()[j]; - for (size_t k = 0; k < 4; k++) { - printf("0x%016llxull,", (unsigned long long)x.getUnit()[k]); - } - puts("},"); - } - puts("\t},"); - } - puts("};"); - puts("#endif"); -} diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/bench.sh b/vendor/github.com/dexon-foundation/mcl/misc/she/bench.sh deleted file mode 100644 index ced87b4db..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/bench.sh +++ /dev/null @@ -1,6 +0,0 @@ -for i in 4 6 8 -do echo $i -touch test/she_test.cpp -make bin/she_test.exe CFLAGS_USER=-DMCLBN_FP_UNIT_SIZE=$i -bin/she_test.exe > misc/she/bench$i.txt -done diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/bench4.txt b/vendor/github.com/dexon-foundation/mcl/misc/she/bench4.txt deleted file mode 100644 index 99b2593c4..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/bench4.txt +++ /dev/null @@ -1,99 +0,0 @@ -ctest:module=log -CurveFp254BNb -ctest:module=HashTable -ctest:module=GTHashTable -ctest:module=enc_dec -ctest:module=add_sub_mul -ctest:module=add_mul_add_sub -ctest:module=innerProduct -ctest:module=io -ctest:module=bench -enc 673.772Kclk -add 8.021Kclk -mul 4.042Mclk -dec 2.194Mclk -add after mul 20.693Kclk -ctest:module=saveHash -ctest:module=hashBench -Kclk -m=000fffff decG1 1.83e+02 -m=001fffff decG1 1.83e+02 -m=003fffff decG1 1.83e+02 -m=007fffff decG1 1.90e+02 -m=00ffffff decG1 2.04e+02 -m=01ffffff decG1 2.66e+02 -m=03ffffff decG1 4.17e+02 -m=07ffffff decG1 7.15e+02 -m=0fffffff decG1 1.29e+03 -m=1fffffff decG1 2.43e+03 -m=3fffffff decG1 4.70e+03 -m=7fffffff decG1 9.28e+03 - -m=000fffff decG2 4.09e+02 -m=001fffff decG2 4.11e+02 -m=003fffff decG2 4.09e+02 -m=007fffff decG2 4.23e+02 -m=00ffffff decG2 4.48e+02 -m=01ffffff decG2 5.21e+02 -m=03ffffff decG2 7.25e+02 -m=07ffffff decG2 1.11e+03 -m=0fffffff decG2 1.87e+03 -m=1fffffff decG2 3.36e+03 -m=3fffffff decG2 6.38e+03 -m=7fffffff decG2 1.24e+04 - -m=000fffff decGT 2.20e+03 -m=001fffff decGT 2.21e+03 -m=003fffff decGT 2.20e+03 -m=007fffff decGT 2.21e+03 -m=00ffffff decGT 2.23e+03 -m=01ffffff decGT 2.28e+03 -m=03ffffff decGT 2.37e+03 -m=07ffffff decGT 2.56e+03 -m=0fffffff decGT 2.94e+03 -m=1fffffff decGT 3.78e+03 -m=3fffffff decGT 5.41e+03 -m=7fffffff decGT 8.69e+03 -large m -G1::add 7.36e-01 -G1::mul 1.92e+02 -G2::add 3.51e+00 -G2::mul 4.03e+02 -GT::mul 5.47e+00 -GT::pow 7.27e+02 -G1window 1.92e+01 -G2window 6.15e+01 -GTwindow 1.35e+02 -miller 6.69e+02 -finalExp 4.23e+02 -precomML 5.16e+02 -small m = 2097151 -G1::mul 4.52e+01 -G2::mul 1.01e+02 -GT::pow 1.33e+02 -G1window 1.55e+00 -G2window 5.02e+00 -GTwindow 1.55e+01 -encG1 2.10e+02 -encG2 4.82e+02 -encGT 2.47e+03 -encG1pre 5.31e+01 -encG2pre 1.47e+02 -encGTpre 6.01e+02 -decG1 1.84e+02 -decG2 3.96e+02 -degGT 2.20e+03 -mul 4.07e+03 -addG1 1.56e+00 -addG2 4.72e+00 -addGT 2.12e+01 -reRandG1 2.10e+02 -reRandG2 4.71e+02 -reRandGT 2.49e+03 -reRandG1pre 5.16e+01 -reRandG2pre 1.44e+02 -reRandGTpre 6.10e+02 -mulG1 9.03e+01 -mulG2 2.03e+02 -mulGT 5.34e+02 -ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/bench6.txt b/vendor/github.com/dexon-foundation/mcl/misc/she/bench6.txt deleted file mode 100644 index 863f7129a..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/bench6.txt +++ /dev/null @@ -1,99 +0,0 @@ -ctest:module=log -CurveFp382_1 -ctest:module=HashTable -ctest:module=GTHashTable -ctest:module=enc_dec -ctest:module=add_sub_mul -ctest:module=add_mul_add_sub -ctest:module=innerProduct -ctest:module=io -ctest:module=bench -enc 2.077Mclk -add 17.694Kclk -mul 13.408Mclk -dec 5.854Mclk -add after mul 41.570Kclk -ctest:module=saveHash -ctest:module=hashBench -Kclk -m=000fffff decG1 5.34e+02 -m=001fffff decG1 5.36e+02 -m=003fffff decG1 5.34e+02 -m=007fffff decG1 5.48e+02 -m=00ffffff decG1 5.87e+02 -m=01ffffff decG1 7.11e+02 -m=03ffffff decG1 9.53e+02 -m=07ffffff decG1 1.41e+03 -m=0fffffff decG1 2.30e+03 -m=1fffffff decG1 4.11e+03 -m=3fffffff decG1 7.71e+03 -m=7fffffff decG1 1.50e+04 - -m=000fffff decG2 1.27e+03 -m=001fffff decG2 1.27e+03 -m=003fffff decG2 1.27e+03 -m=007fffff decG2 1.30e+03 -m=00ffffff decG2 1.35e+03 -m=01ffffff decG2 1.53e+03 -m=03ffffff decG2 1.88e+03 -m=07ffffff decG2 2.55e+03 -m=0fffffff decG2 3.87e+03 -m=1fffffff decG2 6.53e+03 -m=3fffffff decG2 1.18e+04 -m=7fffffff decG2 2.25e+04 - -m=000fffff decGT 6.01e+03 -m=001fffff decGT 6.03e+03 -m=003fffff decGT 6.01e+03 -m=007fffff decGT 6.04e+03 -m=00ffffff decGT 6.08e+03 -m=01ffffff decGT 6.17e+03 -m=03ffffff decGT 6.39e+03 -m=07ffffff decGT 6.71e+03 -m=0fffffff decGT 7.44e+03 -m=1fffffff decGT 8.95e+03 -m=3fffffff decGT 1.20e+04 -m=7fffffff decGT 1.80e+04 -large m -G1::add 1.48e+00 -G1::mul 5.44e+02 -G2::add 6.91e+00 -G2::mul 1.28e+03 -GT::mul 1.04e+01 -GT::pow 2.04e+03 -G1window 5.57e+01 -G2window 2.04e+02 -GTwindow 4.03e+02 -miller 2.09e+03 -finalExp 1.50e+03 -precomML 1.63e+03 -small m = 2097151 -G1::mul 8.29e+01 -G2::mul 2.05e+02 -GT::pow 2.66e+02 -G1window 3.18e+00 -G2window 1.14e+01 -GTwindow 3.19e+01 -encG1 6.01e+02 -encG2 1.49e+03 -encGT 7.66e+03 -encG1pre 1.41e+02 -encG2pre 4.71e+02 -encGTpre 1.76e+03 -decG1 5.37e+02 -decG2 1.27e+03 -degGT 6.02e+03 -mul 1.34e+04 -addG1 3.07e+00 -addG2 1.02e+01 -addGT 4.18e+01 -reRandG1 5.99e+02 -reRandG2 1.49e+03 -reRandGT 7.69e+03 -reRandG1pre 1.40e+02 -reRandG2pre 4.68e+02 -reRandGTpre 1.75e+03 -mulG1 1.65e+02 -mulG2 4.14e+02 -mulGT 1.06e+03 -ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/bench8.txt b/vendor/github.com/dexon-foundation/mcl/misc/she/bench8.txt deleted file mode 100644 index f8fe8fd75..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/bench8.txt +++ /dev/null @@ -1,99 +0,0 @@ -ctest:module=log -CurveFp462 -ctest:module=HashTable -ctest:module=GTHashTable -ctest:module=enc_dec -ctest:module=add_sub_mul -ctest:module=add_mul_add_sub -ctest:module=innerProduct -ctest:module=io -ctest:module=bench -enc 5.095Mclk -add 36.280Kclk -mul 30.163Mclk -dec 12.974Mclk -add after mul 76.646Kclk -ctest:module=saveHash -ctest:module=hashBench -Kclk -m=000fffff decG1 1.44e+03 -m=001fffff decG1 1.45e+03 -m=003fffff decG1 1.45e+03 -m=007fffff decG1 1.47e+03 -m=00ffffff decG1 1.54e+03 -m=01ffffff decG1 1.70e+03 -m=03ffffff decG1 2.03e+03 -m=07ffffff decG1 2.64e+03 -m=0fffffff decG1 3.88e+03 -m=1fffffff decG1 6.32e+03 -m=3fffffff decG1 1.12e+04 -m=7fffffff decG1 2.11e+04 - -m=000fffff decG2 2.99e+03 -m=001fffff decG2 3.01e+03 -m=003fffff decG2 2.99e+03 -m=007fffff decG2 3.05e+03 -m=00ffffff decG2 3.15e+03 -m=01ffffff decG2 3.41e+03 -m=03ffffff decG2 3.93e+03 -m=07ffffff decG2 4.95e+03 -m=0fffffff decG2 6.97e+03 -m=1fffffff decG2 1.10e+04 -m=3fffffff decG2 1.91e+04 -m=7fffffff decG2 3.54e+04 - -m=000fffff decGT 1.31e+04 -m=001fffff decGT 1.31e+04 -m=003fffff decGT 1.31e+04 -m=007fffff decGT 1.31e+04 -m=00ffffff decGT 1.32e+04 -m=01ffffff decGT 1.33e+04 -m=03ffffff decGT 1.36e+04 -m=07ffffff decGT 1.43e+04 -m=0fffffff decGT 1.56e+04 -m=1fffffff decGT 1.82e+04 -m=3fffffff decGT 2.34e+04 -m=7fffffff decGT 3.39e+04 -large m -G1::add 3.40e+00 -G1::mul 1.41e+03 -G2::add 1.38e+01 -G2::mul 2.93e+03 -GT::mul 1.94e+01 -GT::pow 4.30e+03 -G1window 1.59e+02 -G2window 4.89e+02 -GTwindow 8.96e+02 -miller 4.99e+03 -finalExp 3.26e+03 -precomML 3.71e+03 -small m = 2097151 -G1::mul 1.53e+02 -G2::mul 3.85e+02 -GT::pow 4.88e+02 -G1window 6.96e+00 -G2window 2.17e+01 -GTwindow 5.83e+01 -encG1 1.62e+03 -encG2 3.48e+03 -encGT 1.79e+04 -encG1pre 3.67e+02 -encG2pre 1.09e+03 -encGTpre 3.88e+03 -decG1 1.45e+03 -decG2 3.02e+03 -degGT 1.31e+04 -mul 3.02e+04 -addG1 7.08e+00 -addG2 2.03e+01 -addGT 7.68e+01 -reRandG1 1.63e+03 -reRandG2 3.48e+03 -reRandGT 1.79e+04 -reRandG1pre 3.65e+02 -reRandG2pre 1.08e+03 -reRandGTpre 3.79e+03 -mulG1 3.08e+02 -mulG2 7.65e+02 -mulGT 1.95e+03 -ctest:name=she_test, module=11, total=2879, ok=2879, ng=0, exception=0 diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/nizkp.pdf b/vendor/github.com/dexon-foundation/mcl/misc/she/nizkp.pdf deleted file mode 100644 index 7e61b5a64..000000000 Binary files a/vendor/github.com/dexon-foundation/mcl/misc/she/nizkp.pdf and /dev/null differ diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/she-api-ja.md b/vendor/github.com/dexon-foundation/mcl/misc/she/she-api-ja.md deleted file mode 100644 index 850f11ff3..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/she-api-ja.md +++ /dev/null @@ -1,314 +0,0 @@ -# L2準åŒåž‹æš—å·ãƒ©ã‚¤ãƒ–ラリshe - -# æ¦‚è¦ -she(somewhat homomorphic encryption)ã¯ãƒšã‚¢ãƒªãƒ³ã‚°ãƒ™ãƒ¼ã‚¹ã®L2準åŒåž‹æš—å·ã¨å‘¼ã°ã‚Œã‚‹å…¬é–‹éµæš—å·ãƒ©ã‚¤ãƒ–ラリã§ã‚る。 -L2準åŒåž‹æš—å·ã¨ã¯æš—å·æ–‡åŒå£«ã®åŠ ç®—を複数回ã€ä¹—算を一度ã ã‘ã§ãる性質を表ã™ã€‚ - -特ã«2個ã®æ•´æ•°å€¤ãƒ™ã‚¯ãƒˆãƒ«x = (x_i), y = (y_i)ã®å„è¦ç´ ãŒæš—å·åŒ–ã•ã‚ŒãŸçŠ¶æ…‹ã§ã€ãã®2個ã®ãƒ™ã‚¯ãƒˆãƒ«ã®å†…ç©ã‚’æš—å·åŒ–ã—ãŸã¾ã¾è¨ˆç®—ã§ãる。 - -ΣEnc(x_i) Enc(y_i) = Enc(Σx_i y_i). - -# 特長 -* ペアリングベースã®æœ€æ–°ã‚¢ãƒ«ã‚´ãƒªã‚ºãƒ ã‚’実装 - * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632) -* C++版ã¯Windows(x64), Linux(x64, ARM64), OSX(x64)ã«å¯¾å¿œ -* JavaScript(WebAssembly 以é™JSã¨è¨˜ã™)版ã¯Chrome, Firefox, Edge, Safari(Android, iPhoneå«ã‚€), Node.jsã«å¯¾å¿œ - -# クラスã¨ä¸»ãªæ©Ÿèƒ½ - -## 主ãªã‚¯ãƒ©ã‚¹ -* 秘密éµã‚¯ãƒ©ã‚¹ SecretKey -* 公開éµã‚¯ãƒ©ã‚¹ PublicKey -* æš—å·æ–‡ã‚¯ãƒ©ã‚¹ CipherTextG1, CipherTextG2, CipherTextGT -* ゼロ知識証明クラス ZkpBin, ZkpEq, ZkpBinEq - -## æš—å·åŒ–ã¨å¾©å·æ–¹æ³• -* 秘密éµã‹ã‚‰å…¬é–‹éµã‚’作æˆã™ã‚‹ -* 公開éµã‚’用ã„ã¦æ•´æ•°ã‹ã‚‰æš—å·æ–‡ã‚’作る -* 秘密éµã‚’用ã„ã¦æš—å·æ–‡ã‚’復å·ã™ã‚‹ - -## æš—å·æ–‡åŒå£«ã®è¨ˆç®— -* åŒã˜æš—å·æ–‡ã‚¯ãƒ©ã‚¹åŒå£«ã¯åŠ ç®—・減算ã§ãã‚‹ -* CipherTextG1ã¨CipherTextG2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTã«ãªã‚‹ - -## 復å·ã®é‡è¦ãªæ³¨æ„点 -* ã“ã®sheã¯å¾©å·æ™‚ã«å°ã•ãªé›¢æ•£å¯¾æ•°å•é¡Œ(DLP)を解ãå¿…è¦ãŒã‚ã‚‹ -* DLPã®ãƒ†ãƒ¼ãƒ–ルサイズをsã€æš—å·æ–‡ã‚’Enc(m)ã¨ã™ã‚‹ã¨å¾©å·æ™‚é–“ã¯m/sã«æ¯”例ã™ã‚‹ -* テーブルサイズã®è¨­å®šã¯`setRangeForDLP(s)`を使ㆠ- * `m/s`ã®æœ€å¤§å€¤ã¯`setTryNum(tryNum)`ã§è¡Œã† - -## ゼロ知識証明クラス -* mã‚’æš—å·ã™ã‚‹ã¨ãã«åŒæ™‚ã«ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žã‚’生æˆã™ã‚‹ -* æš—å·æ–‡ã¨ç”Ÿæˆã•ã‚ŒãŸã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žã¨å…¬é–‹éµã§mã«é–¢ã™ã‚‹åˆ¶ç´„æ¡ä»¶ã‚’検証ã§ãã‚‹ - -# JS版 - -## Node.jsã§ã®èª­ã¿è¾¼ã¿ - -``` ->npm install she-wasm ->node ->const she = require('she-wasm') -``` - -## ブラウザã§ã®èª­ã¿è¾¼ã¿ -[she-wasm](https://github.com/herumi/she-wasm/)ã®she.js, she\_c.js, she\_c.wasmファイルをåŒã˜ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ç½®ã„ã¦she.jsを読ã¿è¾¼ã‚€ -``` -// HTML - -``` - -## JS版サンプル - -``` -// システムã®åˆæœŸåŒ– -she.init().then(() => { - const sec = new she.SecretKey() - // 秘密éµã®åˆæœŸåŒ– - sec.setByCSPRNG() - - // 秘密éµsecã‹ã‚‰å…¬é–‹éµpubã‚’ä½œæˆ - const pub = sec.getPublicKey() - - const m1 = 1 - const m2 = 2 - const m3 = 3 - const m4 = -1 - - // 平文m1ã¨m2ã‚’CipherTextG1ã¨ã—ã¦æš—å·åŒ– - const c11 = pub.encG1(m1) - const c12 = pub.encG1(m2) - - // 平文m3ã¨m4ã‚’CipherTextG2ã¨ã—ã¦æš—å·åŒ– - const c21 = pub.encG2(m3) - const c22 = pub.encG2(m4) - - // c11ã¨c12, c21ã¨c22ã‚’ãã‚Œãžã‚ŒåŠ ç®— - const c1 = she.add(c11, c12) - const c2 = she.add(c21, c22) - - // c1ã¨c2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTåž‹ã«ãªã‚‹ - const ct = she.mul(c1, c2) - - // æš—å·æ–‡ctを復å·ã™ã‚‹ - console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`) -}) -``` - -# C++版サンプル -ライブラリã®ãƒ“ルドã¯[mcl](https://github.com/herumi/mcl/#installation-requirements)ã‚’å‚ç…§ -``` -#include -int main() - try -{ - using namespace mcl::she; - // システã®ãƒ åˆæœŸåŒ– - init(); - - SecretKey sec; - - // 秘密éµã®åˆæœŸåŒ– - sec.setByCSPRNG(); - - // 秘密éµsecã‹ã‚‰å…¬é–‹éµpubã‚’ä½œæˆ - PublicKey pub; - sec.getPublicKey(pub); - - int m1 = 1; - int m2 = 2; - int m3 = 3; - int m4 = -1; - - // 平文m1ã¨m2ã‚’CipherTextG1ã¨ã—ã¦æš—å·åŒ– - CipherTextG1 c11, c12; - pub.enc(c11, m1); - pub.enc(c12, m2); - - // 平文m3ã¨m4ã‚’CipherTextG2ã¨ã—ã¦æš—å·åŒ– - CipherTextG2 c21, c22; - pub.enc(c21, m3); - pub.enc(c22, m4); - - // c11ã¨c12, c21ã¨c22ã‚’ãã‚Œãžã‚ŒåŠ ç®— - CipherTextG1 c1; - CipherTextG2 c2; - CipherTextG1::add(c1, c11, c12); - CipherTextG2::add(c2, c21, c22); - - // c1ã¨c2ã‚’ä¹—ç®—ã™ã‚‹ã¨CipherTextGTåž‹ã«ãªã‚‹ - CipherTextGT ct; - CipherTextGT::mul(ct, c1, c2); - - // æš—å·æ–‡ctを復å·ã™ã‚‹ - printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct)); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} - -``` - -# クラス共通メソッド - -## シリアライズ(C++) - -* `setStr(const std::string& str, int ioMode = 0)` - * ioModeã«å¾“ã£ã¦strã§è¨­å®šã™ã‚‹ - -* `getStr(std::string& str, int ioMode = 0) const` -* `std::string getStr(int ioMode = 0) const` - * ioModeã«å¾“ã£ã¦strã‚’å–å¾—ã™ã‚‹ -* `size_t serialize(void *buf, size_t maxBufSize) const` - * maxBufSize確ä¿ã•ã‚ŒãŸbufã«ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚ºã™ã‚‹ - * bufã«æ›¸ãè¾¼ã¾ã‚ŒãŸbyteé•·ãŒè¿”ã‚‹ - * エラーã®å ´åˆã¯0ãŒè¿”ã‚‹ -* `size_t deserialize(const void *buf, size_t bufSize)` - * bufã‹ã‚‰æœ€å¤§bufSizeã¾ã§å€¤ã‚’読ã¿è¾¼ã¿ãƒ‡ãƒªã‚·ã‚¢ãƒ©ã‚¤ã‚ºã™ã‚‹ - * 読ã¿è¾¼ã¾ã‚ŒãŸbyteé•·ãŒè¿”ã‚‹ - * エラーã®å ´åˆã¯0ãŒè¿”ã‚‹ - -## シリアライズ(JS) - -* `deserialize(s)` - * Uint8Arrayåž‹sã§ãƒ‡ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º -* `serialize()` - * シリアライズã—ã¦Uint8Arrayã®å€¤ã‚’返㙠-* `deserializeHexStr(s)` - * 16進数文字列sã§ãƒ‡ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º -* `serializeToHexStr()` - * 16進数文字列sã§ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚º - -## ioMode - -* 2 ; 2進数 -* 10 ; 10進数 -* 16 ; 16進数 -* IoPrefix ; 2ã¾ãŸã¯16ã¨orã®å€¤ã‚’設定ã™ã‚‹ã¨0bã¾ãŸã¯0xãŒã¤ã -* IoEcAffine ; (G1, G2ã®ã¿)アフィン座標 -* IoEcProj ; (G1, G2ã®ã¿)射影座標 -* IoSerialize ; serialize()/deserialize()ã¨åŒã˜ - -## æ³¨æ„ -* C++ã®åå‰ç©ºé–“ã¯`mcl::she` -* 以下CTã¯CipherTextG1, CipherTextG2, CipherTextGTã®ã„ãšã‚Œã‹ã‚’表㙠-* JS版ã®å¹³æ–‡ã¯32ビット整数ã®ç¯„囲ã«åˆ¶é™ã•ã‚Œã‚‹ - -## SecretKeyクラス - -* `void setByCSPRNG()`(C++) -* `void setByCSPRNG()`(JS) - * 疑似乱数ã§ç§˜å¯†éµã‚’åˆæœŸåŒ–ã™ã‚‹ - -* `int64_t dec(const CT& c) const`(C++) -* `int dec(CT c)`(JS) - * æš—å·æ–‡cを復å·ã™ã‚‹ -* `int64_t decViaGT(const CipherTextG1& c) const`(C++) -* `int64_t decViaGT(const CipherTextG2& c) const`(C++) -* `int decViaGT(CT c)`(JS) - * æš—å·æ–‡ã‚’GT経由ã§å¾©å·ã™ã‚‹ -* `bool isZero(const CT& c) const`(C++) -* `bool isZero(CT c)`(JS) - * cã®å¾©å·çµæžœãŒ0ãªã‚‰ã°true - * decã—ã¦ã‹ã‚‰0ã¨æ¯”較ã™ã‚‹ã‚ˆã‚Šã‚‚高速 - -## PublicKey, PrecomputedPublicKeyクラス -PrecomputedPublicKeyã¯PublicKeyã®é«˜é€Ÿç‰ˆ - -* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++) -* `void PrecomputedPublicKey::init(pub)`(JS) - * 公開éµpubã§PrecomputedPublicKeyã‚’åˆæœŸåŒ–ã™ã‚‹ - - -* `PrecomputedPublicKey::destroy()`(JS) - * JavaScriptã§ã¯PrecomputedPublicKeyãŒä¸è¦ã«ãªã£ãŸã‚‰ã“ã®ãƒ¡ã‚½ãƒƒãƒ‰ã‚’呼ã¶å¿…è¦ãŒã‚ã‚‹ - * ãã†ã—ãªã„ã¨ãƒ¡ãƒ¢ãƒªãƒªãƒ¼ã‚¯ã™ã‚‹ - -以下ã¯PK = PublicKey or PrecomputedPublicKey - -* `void PK::enc(CT& c, int64_t m) const`(C++) -* `CipherTextG1 PK::encG1(m)`(JS) -* `CipherTextG2 PK::encG2(m)`(JS) -* `CipherTextGT PK::encGT(m)`(JS) - * mã‚’æš—å·åŒ–ã—ã¦cã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) - -* `void PK::reRand(CT& c) const`(C++) -* `CT PK::reRand(CT c)`(JS) - * cã‚’å†ãƒ©ãƒ³ãƒ€ãƒ åŒ–ã™ã‚‹ - * å†ãƒ©ãƒ³ãƒ€ãƒ åŒ–ã•ã‚ŒãŸæš—å·æ–‡ã¨å…ƒã®æš—å·æ–‡ã¯åŒã˜å¹³æ–‡ã‚’æš—å·åŒ–ã—ãŸã‚‚ã®ã‹ã©ã†ã‹åˆ¤å®šã§ããªã„ - -* `void convert(CipherTextGT& cm, const CT& ca) const` -* `CipherTextGT convert(CT ca)` - * æš—å·æ–‡ca(CipherTextG1ã‹CipherTextG2)ã‚’CipherTextGTã«å¤‰æ›ã™ã‚‹ - -## CipherTextクラス - -* `void CT::add(CT& z, const CT& x const CT& y)`(C++) -* `CT she.add(CT x, CT y)`(JS) - * æš—å·æ–‡xã¨æš—å·æ–‡yを足ã—ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) -* `void CT::sub(CT& z, const CT& x const CT& y)`(C++) -* `CT she.sub(CT x, CT y)`(JS) - * æš—å·æ–‡xã‹ã‚‰æš—å·æ–‡yを引ã„ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) -* `void CT::neg(CT& y, const CT& x)`(C++) -* `void she.neg(CT x)`(JS) - * æš—å·æ–‡xã®ç¬¦å·å転をyã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) -* `void CT::mul(CT& z, const CT& x, int y)`(C++) -* `CT she.mulInt(CT x, int y)`(JS) - * æš—å·æ–‡xã‚’æ•´æ•°å€yã—ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) - -* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) -* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS) - * æš—å·æ–‡xã¨æš—å·æ–‡yを掛ã‘ã¦zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) - -* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) - * æš—å·æ–‡xã¨æš—å·æ–‡yを掛ã‘ã¦(Millerループã ã‘ã—ã¦)zã«ã‚»ãƒƒãƒˆã™ã‚‹(ã¾ãŸã¯ãã®å€¤ã‚’è¿”ã™) -* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++) - * mul(a, b) = finalExp(mulML(a, b)) - * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d))) - * ã™ãªã‚ã¡ç©å’Œæ¼”ç®—ã¯mulMLã—ãŸã‚‚ã®ã‚’足ã—ã¦ã‹ã‚‰æœ€å¾Œã«ä¸€åº¦finalExpã™ã‚‹ã®ãŒã‚ˆã„ - -## ゼロ知識証明クラス - -### æ¦‚è¦ -* ZkpBin æš—å·æ–‡encGi(m)(i = 1, 2, T)ã«ã¤ã„ã¦m = 0ã¾ãŸã¯1ã§ã‚ã‚‹ã“ã¨ã‚’復å·ã›ãšã«æ¤œè¨¼ã§ãã‚‹ -* ZkpEq æš—å·æ–‡encG1(m1), encG2(m2)ã«ã¤ã„ã¦m1 = m2ã§ã‚ã‚‹ã“ã¨ã‚’検証ã§ãã‚‹ -* ZkpBinEq æš—å·æ–‡encG1(m1), encG2(m2)ã«ã¤ã„ã¦m1 = m2 = 0ã¾ãŸã¯1ã§ã‚ã‚‹ã“ã¨ã‚’検証ã§ãã‚‹ - -### API -PK = PublicKey or PrecomputedPublicKey - -* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++) -* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++) -* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS) -* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS) - * m(=0 or 1)ã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡cã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c, zkp]ã‚’è¿”ã™) - * mãŒ0ã§ã‚‚1ã§ã‚‚ãªã‘ã‚Œã°ä¾‹å¤– -* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++) -* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS) - * mã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡c1, c2ã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c1, c2, zkp]ã‚’è¿”ã™) -* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++) -* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS) - * m(=0 or 1)ã‚’æš—å·åŒ–ã—ã¦æš—å·æ–‡c1, c2ã¨ã‚¼ãƒ­çŸ¥è­˜è¨¼æ˜Žzkpをセットã™ã‚‹(ã¾ãŸã¯[c1, c2, zkp]ã‚’è¿”ã™) - * mãŒ0ã§ã‚‚1ã§ã‚‚ãªã‘ã‚Œã°ä¾‹å¤– - -## グローãƒãƒ«é–¢æ•° - -* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++) -* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS) - * hashSizeã®å¤§ãã•ã®å¾©å·ç”¨ãƒ†ãƒ¼ãƒ–ルã¨tryNumã‚’å…ƒã«åˆæœŸåŒ–ã™ã‚‹ - * 復å·å¯èƒ½ãªå¹³æ–‡mã®ç¯„囲ã¯|m| <= hashSize * tryNum -* `she.loadTableForGTDLP(Uint8Array a)`(JS) - * 復å·ç”¨ãƒ†ãƒ¼ãƒ–ルを読ã¿è¾¼ã‚€ - * ç¾åœ¨ã¯`https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin`ã®ã¿ãŒã‚ã‚‹ -* `void useDecG1ViaGT(bool use)`(C++/JS) -* `void useDecG2ViaGT(bool use)`(C++/JS) - * CipherTextG1, CipherTextG2ã®å¾©å·ã‚’CipherTextGT経由ã§è¡Œã† - * 大ããªå€¤ã‚’復å·ã™ã‚‹ã¨ãã¯DLP用ã®å·¨å¤§ãªãƒ†ãƒ¼ãƒ–ルをãã‚Œãžã‚Œã«æŒã¤ã‚ˆã‚Šã‚‚GTã«é›†ç´„ã—ãŸæ–¹ãŒåŠ¹çŽ‡ãŒã‚ˆã„ - -# ライセンス - -ã“ã®ãƒ©ã‚¤ãƒ–ラリã¯[修正BSDライセンス](https://github.com/herumi/mcl/blob/master/COPYRIGHT)ã§æä¾›ã•ã‚Œã¾ã™ - -# 開発者 - -å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/she-api.md b/vendor/github.com/dexon-foundation/mcl/misc/she/she-api.md deleted file mode 100644 index af54311e9..000000000 --- a/vendor/github.com/dexon-foundation/mcl/misc/she/she-api.md +++ /dev/null @@ -1,322 +0,0 @@ -# she ; Two-level homomorphic encryption library for browser/Node.js by WebAssembly - -# Abstruct -she is a somewhat(two-level) homomorphic encryption library, -which is based on pairings. -This library supports polynomially many homomorphic additions and -one multiplication over encrypted data. - -Especially, the inner products of two encrypted integer vectors such as Enc(x) = (Enc(x_i)), Enc(y) = (Enc(y_i)) -can be computed. - -Sum_i Enc(x_i) Enc(y_i) = Enc(Sum_i x_i y_i). - -# Features -* supports the latest pairing based algorithm - * [Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly : ASIA CCS2018](http://asiaccs2018.org/?page_id=632) -* supports Windows(x64), Linux(x64, ARM64), OSX(x64) -* supports JavaScript(WebAssembly), Chrome, Firefox, Safari(contains Android, iPhone), Node.js - -# Classes - -## Main classes -* secret key class ; SecretKey -* public key class ; PublicKey -* ciphertext class ; CipherTextG1, CipherTextG2, CipherTextGT -* zero-knowledge proof class ; ZkpBin, ZkpEq, ZkpBinEq - -## Encryption and decryption -* create the corresponding public key from a secret key -* encrypt an integer(plaintext) with a public key -* decrypt a ciphertext with a secret key - -## Homomorphic operations -* homomorphic addtion/substraction over ciphertexts of the same ciphertext class -* homomprphic multiplication over ciphertext of CipherTextG1 and CipherTextG2 - * The class of the result is CipherTextGT. - -## Important notation of decryption -* This library requires to solve a small DLP to decrypt a ciphertext. -* The decryption timing is O(m/s), where s is the size of table to solve DLP, and m is the size fo a plaintext. -* call `setRangeForDLP(s)` to set the table size. - * The maximun `m/s` is set by `setTryNum(tryNum)`. - -## Zero-knowledge proof class -* A zero-knowledge proof is simultaneously created when encrypting a plaintext `m`. -* The restriction according to `m` can be verified with a created zero-knowledge proof and a public key. - -# Setup for JavaScript(JS) - -## for Node.js - -``` ->npm install she-wasm ->node ->const she = require('she-wasm') -``` - -## for a browser - -Copy `she.js`, `she\_c.js`, `she\_c.wasm` to your directory from [she-wasm](https://github.com/herumi/she-wasm/), -and read `she.js`. -``` -// HTML - -``` - -## A sample for JS - -``` -// initialize a library -she.init().then(() => { - const sec = new she.SecretKey() - // initialize a secret key by CSPRNG(cryptographically secure pseudo random number generator) - sec.setByCSPRNG() - - // create a public key from a secret key - const pub = sec.getPublicKey() - - const m1 = 1 - const m2 = 2 - const m3 = 3 - const m4 = -1 - - // encrypt m1 and m2 as CipherTextG1 class - const c11 = pub.encG1(m1) - const c12 = pub.encG1(m2) - - // encrypt m3 and m4 as CipherTextG2 class - const c21 = pub.encG2(m3) - const c22 = pub.encG2(m4) - - // add c11 and c12, c21 and c22 respectively - const c1 = she.add(c11, c12) - const c2 = she.add(c21, c22) - - // get ct as a CipherTextGT class by multiplying c1 with c2 - const ct = she.mul(c1, c2) - - // decrypt ct - console.log(`(${m1} + ${m2}) * (${m3} + ${m4}) = ${sec.dec(ct)}`) -}) -``` - -# A sample for C++ -How to build the library, see [mcl](https://github.com/herumi/mcl/#installation-requirements). -``` -#include -int main() - try -{ - using namespace mcl::she; - // initialize a library - init(); - - SecretKey sec; - - // initialize a secret key by CSPRNG - sec.setByCSPRNG(); - - // create a public key from a secret key - PublicKey pub; - sec.getPublicKey(pub); - - int m1 = 1; - int m2 = 2; - int m3 = 3; - int m4 = -1; - - // encrypt m1 and m2 as CipherTextG1 class - CipherTextG1 c11, c12; - pub.enc(c11, m1); - pub.enc(c12, m2); - - // encrypt m3 and m4 as CipherTextG2 class - CipherTextG2 c21, c22; - pub.enc(c21, m3); - pub.enc(c22, m4); - - // add c11 and c12, c21 and c22 respectively - CipherTextG1 c1; - CipherTextG2 c2; - CipherTextG1::add(c1, c11, c12); - CipherTextG2::add(c2, c21, c22); - - // get ct as a CipherTextGT class by multiplying c1 with c2 - CipherTextGT ct; - CipherTextGT::mul(ct, c1, c2); - - // decrypt ct - printf("(%d + %d) * (%d + %d) = %d\n", m1, m2, m3, m4, (int)sec.dec(ct)); -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); - return 1; -} - -``` -# Class method - -## Serialization(C++) - -* `setStr(const std::string& str, int ioMode = 0)` - * set a value by `str` according to `ioMode` - -* `getStr(std::string& str, int ioMode = 0) const` -* `std::string getStr(int ioMode = 0) const` - * get a string `str` according to `ioMode` -* `size_t serialize(void *buf, size_t maxBufSize) const` - * serialize a value to buf which has maxBufSize byte size - * return the byte size to be written in `buf` - * return zero if error -* `size_t deserialize(const void *buf, size_t bufSize)` - * deserialize a value from buf which has bufSize byte size - * return the byte size to be read from `buf` - * return zero if error - -## Serialization(JS) - -* `deserialize(s)` - * deserialize from `s` as Uint8Array type -* `serialize()` - * serialize a value and return Uint8Array value -* `deserializeHexStr(s)` - * deserialize as a hexadecimal string -* `serializeToHexStr()` - * serialize as a hexadecimal string - -## ioMode - -* 2 ; binary number -* 10 ; decimal number -* 16 ; hexadecimal number -* IoPrefix ; append a prefix 0b(resp. 2) or 0x(resp. 16) -* IoEcAffine ; affine coordinate (for only G1, G2) -* IoEcProj ; projective coordinate (for only G1, G2) -* IoSerialize ; same as serialize()/deserialize() - -## Notation -* the namespace of C++ is `mcl::she` -* CT means one of CipherTextG1, CipherTextG2, CipherTextGT -* The range of plaintext is rectricted as a 32-bit integer for JS - -## SecretKey class - -* `void setByCSPRNG()`(C++) -* `void setByCSPRNG()`(JS) - * set a secret key by CSPRNG(cryptographically secure pseudo random number generator) - -* `int64_t dec(const CT& c) const`(C++) -* `int dec(CT c)`(JS) - * decrypt `c` -* `int64_t decViaGT(const CipherTextG1& c) const`(C++) -* `int64_t decViaGT(const CipherTextG2& c) const`(C++) -* `int decViaGT(CT c)`(JS) - * decrypt `c` through CipherTextGT -* `bool isZero(const CT& c) const`(C++) -* `bool isZero(CT c)`(JS) - * return true if decryption of `c` is zero - * it is faster than the timing of comparision with zero after decrypting `c` - -## PublicKey, PrecomputedPublicKey class -`PrecomputedPublicKey` is a faster version of `PublicKey` - -* `void PrecomputedPublicKey::init(const PublicKey& pub)`(C++) -* `void PrecomputedPublicKey::init(pub)`(JS) - * initialize `PrecomputedPublicKey` by a public key `pub` - -* `PrecomputedPublicKey::destroy()`(JS) - * It is necessary to call this method if this instance becomes unnecessary - * otherwise a memory leak will be caused - -PK means PublicKey or PrecomputedPublicKey - -* `void PK::enc(CT& c, int64_t m) const`(C++) -* `CipherTextG1 PK::encG1(m)`(JS) -* `CipherTextG2 PK::encG2(m)`(JS) -* `CipherTextGT PK::encGT(m)`(JS) - * encrypt `m` and set `c`(or return the value) - -* `void PK::reRand(CT& c) const`(C++) -* `CT PK::reRand(CT c)`(JS) - * rerandomize `c` - * For `c = Enc(m)`, the rerandomized ciphertext is hard to detect if it is generated by the rerandomization - or an encrypted `m` freshly again. - -* `void convert(CipherTextGT& cm, const CT& ca) const` -* `CipherTextGT convert(CT ca)` - * convert `ca`(CipherTextG1 or CipherTextG2) to `CipherTextGT` class - -## CipherText class - -* `void CT::add(CT& z, const CT& x const CT& y)`(C++) -* `CT she.add(CT x, CT y)`(JS) - * add `x` and `y` and set the value to `z`(or return the value) -* `void CT::sub(CT& z, const CT& x const CT& y)`(C++) -* `CT she.sub(CT x, CT y)`(JS) - * subtract `x` and `y` and set the value to `z`(or return the value) -* `void CT::neg(CT& y, const CT& x)`(C++) -* `void she.neg(CT x)`(JS) - * negate `x` and set the value to `y`(or return the value) -* `void CT::mul(CT& z, const CT& x, int y)`(C++) -* `CT she.mulInt(CT x, int y)`(JS) - * multiple `x` and `y` and set the value `y`(or return the value) - -* `void CipherTextGT::mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) -* `CipherTextGT she.mul(CipherTextG1 x, CipherTextG2 y)`(JS) - * multiple `x` and `y` and set the value `y`(or return the value) - -* `void CipherTextGT::mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y)`(C++) - * multiple(only Miller Loop) `x` and `y` and set the value `y`(or return the value) - -* `CipherTextGT::finalExp(CipherText& , const CipherTextG1& x, const CipherTextG2& y)`(C++) - * mul(a, b) = finalExp(mulML(a, b)) - * add(mul(a, b), mul(c, d)) = finalExp(add(mulML(a, b), mulML(c, d))) - * i.e., innor product can be computed as once calling `finalExp` after computing `mulML` for each elements of two vectors and adding all - -## Zero knowledge proof class - -### Abstract -* ZkpBin ; verify whether `m = 0` or `1` for ciphertexts `encGi(m)(i = 1, 2, T)` -* ZkpEq ; verify whether `m1 = m2` for ciphertexts `encG1(m1)` and `encG2(m2)` -* ZkpBinEq ; verify whether `m1 = m2 = 0` or `1` for ciphertexts `encG1(m1)` and `encG2(m2)` - -### API -PK = PublicKey or PrecomputedPublicKey - -* `void PK::encWithZkpBin(CipherTextG1& c, Zkp& zkp, int m) const`(C++) -* `void PK::encWithZkpBin(CipherTextG2& c, Zkp& zkp, int m) const`(C++) -* `[CipherTextG1, ZkpBin] PK::encWithZkpBinG1(m)`(JS) -* `[CipherTextG2, ZkpBin] PK::encWithZkpBinG2(m)`(JS) - * encrypt `m`(=0 or 1) and set the ciphertext `c` and zero-knowledge proof `zkp`(or returns [c, zkp]) - * throw exception if m != 0 and m != 1 -* `void PK::encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const`(C++) -* `[CipherTextG1, CipherTextG2, ZkpEq] PK::encWithZkpEq(m)`(JS) - * encrypt `m` and set the ciphertext `c1`, `c2` and zero-knowledge proof `zk`(or returns [c1, c2, zkp]) -* `void PK::encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const`(C++) -* `[CipherTextG1, CipherTextG2, ZkpEqBin] PK::encWithZkpBinEq(m)`(JS) - * encrypt `m`(=0 or 1) and set ciphertexts `c1`, `c2` and zero-knowledge proof `zkp`(or returns [c1, c2, zkp]) - * throw exception if m != 0 and m != 1 - -## Global functions - -* `void init(const CurveParam& cp, size_t hashSize = 1024, size_t tryNum = 2048)`(C++) -* `void init(curveType = she.BN254, hashSize = 1024, tryNum = 2048)`(JS) - * initialize a table to solve a DLP with `hashSize` size and set maximum trying count `tryNum`. - * the range `m` to be solvable is |m| <= hashSize * tryNum -* `getHashTableGT().load(InputStream& is)`(C++) -* `she.loadTableForGTDLP(Uint8Array a)`(JS) - * load a DLP table for CipherTextGT - * reset the value of `hashSize` used in `init()` - * `https://herumi.github.io/she-dlp-table/she-dlp-0-20-gt.bin` is a precomputed table -* `void useDecG1ViaGT(bool use)`(C++/JS) -* `void useDecG2ViaGT(bool use)`(C++/JS) - * decrypt a ciphertext of CipherTextG1 and CipherTextG2 through CipherTextGT - * it is better when decrypt a big value - -# License - -[modified new BSD License](https://github.com/herumi/mcl/blob/master/COPYRIGHT) - -# Author - -å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/mcl/misc/she/she.pdf b/vendor/github.com/dexon-foundation/mcl/misc/she/she.pdf deleted file mode 100644 index 355a308b3..000000000 Binary files a/vendor/github.com/dexon-foundation/mcl/misc/she/she.pdf and /dev/null differ diff --git a/vendor/github.com/dexon-foundation/mcl/mk.bat b/vendor/github.com/dexon-foundation/mcl/mk.bat deleted file mode 100644 index 19eb84197..000000000 --- a/vendor/github.com/dexon-foundation/mcl/mk.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -call setvar.bat -if "%1"=="-s" ( - echo use static lib - set CFLAGS=%CFLAGS% /DMCLBN_DONT_EXPORT -) else if "%1"=="-d" ( - echo use dynamic lib -) else ( - echo "mk (-s|-d) " - goto exit -) -set SRC=%2 -set EXE=%SRC:.cpp=.exe% -set EXE=%EXE:.c=.exe% -set EXE=%EXE:test\=bin\% -set EXE=%EXE:sample\=bin\% -echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% -cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% - -:exit diff --git a/vendor/github.com/dexon-foundation/mcl/mklib.bat b/vendor/github.com/dexon-foundation/mcl/mklib.bat deleted file mode 100644 index 389b69009..000000000 --- a/vendor/github.com/dexon-foundation/mcl/mklib.bat +++ /dev/null @@ -1,39 +0,0 @@ -@echo off -call setvar.bat -if "%1"=="dll" ( - echo make dynamic library DLL -) else ( - echo make static library LIB -) -rem nasm -f win64 -D_WIN64 src\asm\low_x86-64.asm -rem lib /OUT:lib\mcl.lib /nodefaultlib fp.obj src\asm\low_x86-64.obj - -echo cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj - cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj -echo lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj - lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj - -if "%1"=="dll" ( - echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj - cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj /DMCLBN_NO_AUTOLINK - echo link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib - link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib - - echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj - cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj /DMCLBN_NO_AUTOLINK - echo link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib - link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib - - echo cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK - cl /c %CFLAGS% src\she_c256.cpp /Foobj\she_c256.obj /DMCLBN_NO_AUTOLINK - echo link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib - link /nologo /DLL /OUT:bin\mclshe256.dll obj\she_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclshe_c256.lib -) else ( - echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj - cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj - lib /nologo /OUT:lib\mclbn256.lib /nodefaultlib obj\bn_c256.obj lib\mcl.lib - - echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj - cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj - lib /nologo /OUT:lib\mclbn384.lib /nodefaultlib obj\bn_c384.obj lib\mcl.lib -) diff --git a/vendor/github.com/dexon-foundation/mcl/obj/.emptydir b/vendor/github.com/dexon-foundation/mcl/obj/.emptydir deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dexon-foundation/mcl/readme.md b/vendor/github.com/dexon-foundation/mcl/readme.md deleted file mode 100644 index 39b3d4d42..000000000 --- a/vendor/github.com/dexon-foundation/mcl/readme.md +++ /dev/null @@ -1,457 +0,0 @@ -[![Build Status](https://travis-ci.org/herumi/mcl.png)](https://travis-ci.org/herumi/mcl) - -# mcl - -A portable and fast pairing-based cryptography library. - -# Abstract - -mcl is a library for pairing-based cryptography. -The current version supports the optimal Ate pairing over BN curves and BLS12-381 curves. - -# News -* (Break backward compatibility) libmcl_dy.a is renamed to libmcl.a - * The option SHARE_BASENAME_SUF is removed -* 2nd argument of `mclBn_init` is changed from `maxUnitSize` to `compiledTimeVar`, which must be `MCLBN_COMPILED_TIME_VAR`. -* break backward compatibility of mapToGi for BLS12. A map-to-function for BN is used. -If `MCL_USE_OLD_MAPTO_FOR_BLS12` is defined, then the old function is used, but this will be removed in the future. - -# Support architecture - -* x86-64 Windows + Visual Studio -* x86, x86-64 Linux + gcc/clang -* ARM Linux -* ARM64 Linux -* (maybe any platform to be supported by LLVM) -* WebAssembly - -# Support curves - -p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1. - -* BN254 ; a BN curve over the 254-bit prime p(z) where z = -(2^62 + 2^55 + 1). -* BN\_SNARK1 ; a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. -* BN381\_1 ; a BN curve over the 381-bit prime p(z) where z = -(2^94 + 2^76 + 2^72 + 1). -* BN462 ; a BN curve over the 462-bit prime p(z) where z = 2^114 + 2^101 - 2^14 - 1. -* BLS12\_381 ; [a BLS12-381 curve](https://blog.z.cash/new-snark-curve/) - -# Benchmark - -## The latest benchmark(2018/11/7) - -### Intel Core i7-6700 3.4GHz(Skylake), Ubuntu 18.04.1 LTS - -curveType | binary|clang-6.0.0|gcc-7.3.0| -----------|--------------------|-----------|---------| -BN254 | bin/bn\_test.exe| 882Kclk| 933Kclk| -BLS12-381 | bin/bls12\_test.exe| 2290Kclk| 2630Kclk| - -### Intel Core i7-7700 3.6GHz(Kaby Lake), Ubuntu 18.04.1 LTS on Windows 10 Vmware - -curveType | binary|clang-6.0.0|gcc-7.3.0| -----------|--------------------|-----------|---------| -BN254 | bin/bn\_test.exe| 900Kclk| 954Kclk| -BLS12-381 | bin/bls12\_test.exe| 2340Kclk| 2680Kclk| - -* now investigating the reason why gcc is slower than clang. - -## Higher-bit BN curve benchmark - -For JavaScript(WebAssembly), see [ID based encryption demo](https://herumi.github.io/mcl-wasm/ibe-demo.html). - -paramter | x64| Firefox on x64|Safari on iPhone7| ------------|-----|---------------|-----------------| -BN254 | 0.25| 2.48| 4.78| -BN381\_1 | 0.95| 7.91| 11.74| -BN462 | 2.16| 14.73| 22.77| - -* x64 : 'Kaby Lake Core i7-7700(3.6GHz)'. -* Firefox : 64-bit version 58. -* iPhone7 : iOS 11.2.1. -* BN254 is by `test/bn_test.cpp`. -* BN381\_1 and BN462 are by `test/bn512_test.cpp`. -* All the timings are given in ms(milliseconds). - -The other benchmark results are [bench.txt](bench.txt). - -## An old benchmark of a BN curve BN254(2016/12/25). - -* x64, x86 ; Inte Core i7-6700 3.4GHz(Skylake) upto 4GHz on Ubuntu 16.04. - * `sudo cpufreq-set -g performance` -* arm ; 900MHz quad-core ARM Cortex-A7 on Raspberry Pi2, Linux 4.4.11-v7+ -* arm64 ; 1.2GHz ARM Cortex-A53 [HiKey](http://www.96boards.org/product/hikey/) - -software | x64| x86| arm|arm64(msec) ----------------------------------------------------------|------|-----|----|----- -[ate-pairing](https://github.com/herumi/ate-pairing) | 0.21 | - | - | - -mcl | 0.31 | 1.6 |22.6| 3.9 -[TEPLA](http://www.cipher.risk.tsukuba.ac.jp/tepla/) | 1.76 | 3.7 | 37 | 17.9 -[RELIC](https://github.com/relic-toolkit/relic) PRIME=254| 0.30 | 3.5 | 36 | - -[MIRACL](https://github.com/miracl/MIRACL) ake12bnx | 4.2 | - | 78 | - -[NEONabe](http://sandia.cs.cinvestav.mx/Site/NEONabe) | - | - | 16 | - - -* compile option for RELIC -``` -cmake -DARITH=x64-asm-254 -DFP_PRIME=254 -DFPX_METHD="INTEG;INTEG;LAZYR" -DPP_METHD="LAZYR;OATEP" -``` - -# Installation Requirements - -* [GMP](https://gmplib.org/) and OpenSSL -``` -apt install libgmp-dev libssl-dev -``` - -Create a working directory (e.g., work) and clone the following repositories. -``` -mkdir work -cd work -git clone git://github.com/herumi/mcl -git clone git://github.com/herumi/cybozulib_ext ; for only Windows -``` -* Cybozulib\_ext is a prerequisite for running OpenSSL and GMP on VC (Visual C++). - -# (Option) Without GMP -``` -make MCL_USE_GMP=0 -``` -Define `MCL_USE_VINT` before including `bn.hpp` - -# (Option) Without Openssl -``` -make MCL_USE_OPENSSL=0 -``` -Define `MCL_DONT_USE_OPENSSL` before including `bn.hpp` - -# Build and test on x86-64 Linux, macOS, ARM and ARM64 Linux -To make lib/libmcl.a and test it: -``` -cd work/mcl -make test -``` -To benchmark a pairing: -``` -bin/bn_test.exe -``` -To make sample programs: -``` -make sample -``` - -if you want to change compiler options for optimization, then set `CFLAGS_OPT_USER`. -``` -make CLFAGS_OPT_USER="-O2" -``` - -## Build for 32-bit Linux -Build openssl and gmp for 32-bit mode and install `` -``` -make ARCH=x86 CFLAGS_USER="-I /include" LDFLAGS_USER="-L /lib -Wl,-rpath,/lib" -``` - -## Build for 64-bit Windows -1) make static library and use it - -``` -mklib -mk -s test\bn_c256_test.cpp -bin\bn_c256_test.exe -``` -2) make dynamic library and use it - -``` -mklib dll -mk -d test\bn_c256_test.cpp -bin\bn_c256_test.exe -``` - -open mcl.sln and build or if you have msbuild.exe -``` -msbuild /p:Configuration=Release -``` - -## Build with cmake -For Linux, -``` -mkdir build -cd build -cmake .. -make -``` -For Visual Studio, -``` -mkdir build -cd build -cmake .. -A x64 -msbuild mcl.sln /p:Configuration=Release /m -``` -## Build for wasm(WebAssembly) -mcl supports emcc (Emscripten) and `test/bn_test.cpp` runs on browers such as Firefox, Chrome and Edge. - -* [IBE on browser](https://herumi.github.io/mcl-wasm/ibe-demo.html) -* [SHE on browser](https://herumi.github.io/she-wasm/she-demo.html) -* [BLS signature on brower](https://herumi.github.io/bls-wasm/bls-demo.html) - -The timing of a pairing on `BN254` is 2.8msec on 64-bit Firefox with Skylake 3.4GHz. - -### Node.js - -* [mcl-wasm](https://www.npmjs.com/package/mcl-wasm) pairing library -* [bls-wasm](https://www.npmjs.com/package/bls-wasm) BLS signature library -* [she-wasm](https://www.npmjs.com/package/she-wasm) 2 Level Homomorphic Encryption library - -### SELinux -mcl uses Xbyak JIT engine if it is available on x64 architecture, -otherwise mcl uses a little slower functions generated by LLVM. -The default mode enables SELinux security policy on CentOS, then JIT is disabled. -``` -% sudo setenforce 1 -% getenforce -Enforcing -% bin/bn_test.exe -JIT 0 -pairing 1.496Mclk -finalExp 581.081Kclk - -% sudo setenforce 0 -% getenforce -Permissive -% bin/bn_test.exe -JIT 1 -pairing 1.394Mclk -finalExp 546.259Kclk -``` - -# Libraries - -* G1 and G2 is defined over Fp -* The order of G1 and G2 is r. -* Use `bn256.hpp` if only BN254 is used. - -## C++ library - -* libmcl.a ; static C++ library of mcl -* libmcl.so ; shared C++ library of mcl -* the default parameter of curveType is BN254 - -header |support curveType |sizeof Fr|sizeof Fp| ---------------|-------------------------|---------|---------| -bn256.hpp |BN254 | 32 | 32 | -bls12_381.hpp |BLS12_381, BN254 | 32 | 48 | -bn384.hpp |BN381_1, BLS12_381, BN254| 48 | 48 | - -## C library - -* Define `MCLBN_FR_UNIT_SIZE` and `MCLBN_FP_UNIT_SIZE` and include bn.h -* set `MCLBN_FR_UNIT_SIZE = MCLBN_FP_UNIT_SIZE` unless `MCLBN_FR_UNIT_SIZE` is defined - - -library |MCLBN_FR_UNIT_SIZE|MCLBN_FP_UNIT_SIZE| -------------------|------------------|------------------| -sizeof | Fr | Fp | -libmclbn256.a | 4 | 4 | -libmclbn384_256.a | 4 | 6 | -libmclbn384.a | 6 | 6 | - - -* libmclbn*.a ; static C library -* libmclbn*.so ; shared C library - -### 2nd argument of `mclBn_init` -Specify `MCLBN_COMPILED_TIME_VAR` to 2nd argument of `mclBn_init`, which -is defined as `MCLBN_FR_UNIT_SIZE * 10 + MCLBN_FP_UNIT_SIZE`. -This parameter is used to make sure that the values are the same when the library is built and used. - -# How to initialize pairing library -Call `mcl::bn256::initPairing` before calling any operations. -``` -#include -mcl::bn::CurveParam cp = mcl::BN254; // or mcl::BN_SNARK1 -mcl::bn256::initPairing(cp); -mcl::bn256::G1 P(...); -mcl::bn256::G2 Q(...); -mcl::bn256::Fp12 e; -mcl::bn256::pairing(e, P, Q); -``` -1. (BN254) a BN curve over the 254-bit prime p = p(z) where z = -(2^62 + 2^55 + 1). -2. (BN_SNARK1) a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. -3. BN381_1 with `mcl/bn384.hpp`. -4. BN462 with `mcl/bn512.hpp`. - -See [test/bn_test.cpp](https://github.com/herumi/mcl/blob/master/test/bn_test.cpp). - -## Default constructor of Fp, Ec, etc. -A default constructor does not initialize the instance. -Set a valid value before reffering it. - -## Definition of groups - -The curve equation for a BN curve is: - - E/Fp: y^2 = x^3 + b . - -* the cyclic group G1 is instantiated as E(Fp)[n] where n := p + 1 - t; -* the cyclic group G2 is instantiated as the inverse image of E'(Fp^2)[n] under a twisting isomorphism phi from E' to E; and -* the pairing e: G1 x G2 -> Fp12 is the optimal ate pairing. - -The field Fp12 is constructed via the following tower: - -* Fp2 = Fp[u] / (u^2 + 1) -* Fp6 = Fp2[v] / (v^3 - Xi) where Xi = u + 1 -* Fp12 = Fp6[w] / (w^2 - v) -* GT = { x in Fp12 | x^r = 1 } - - -## Arithmetic operations - -G1 and G2 is additive group and has the following operations: - -* T::add(T& z, const T& x, const T& y); // z = x + y -* T::sub(T& z, const T& x, const T& y); // z = x - y -* T::neg(T& y, const T& x); // y = -x -* T::mul(T& z, const T& x, const INT& y); // z = y times scalar multiplication of x - -Remark: &z == &x or &y are allowed. INT means integer type such as Fr, int and mpz_class. - -`T::mul` uses GLV method then `G2::mul` returns wrong value if x is not in G2. -Use `T::mulGeneric(T& z, const T& x, const INT& y)` for x in phi^-1(E'(Fp^2)) - G2. - -Fp, Fp2, Fp6 and Fp12 have the following operations: - -* T::add(T& z, const T& x, const T& y); // z = x + y -* T::sub(T& z, const T& x, const T& y); // z = x - y -* T::mul(T& z, const T& x, const T& y); // z = x * y -* T::div(T& z, const T& x, const T& y); // z = x / y -* T::neg(T& y, const T& x); // y = -x -* T::inv(T& y, const T& x); // y = 1/x -* T::pow(T& z, const T& x, const INT& y); // z = x^y -* Fp12::unitaryInv(T& y, const T& x); // y = conjugate of x - -Remark: `Fp12::mul` uses GLV method then returns wrong value if x is not in GT. -Use `Fp12::mulGeneric` for x in Fp12 - GT. - -## Map To points - -Use these functions to make a point of G1 and G2. - -* mapToG1(G1& P, const Fp& x); // assume x != 0 -* mapToG2(G2& P, const Fp2& x); -* hashAndMapToG1(G1& P, const void *buf, size_t bufSize); // set P by the hash value of [buf, bufSize) -* hashAndMapToG2(G2& P, const void *buf, size_t bufSize); - -These functions maps x into Gi according to [\[_Faster hashing to G2_\]]. - -## String format of G1 and G2 -G1 and G2 have three elements of Fp (x, y, z) for Jacobi coordinate. -normalize() method normalizes it to affine coordinate (x, y, 1) or (0, 0, 0). - -getStr() method gets - -* `0` ; infinity -* `1 ` ; not compressed format -* `2 ` ; compressed format for even y -* `3 ` ; compressed format for odd y - -## Generator of G1 and G2 - -If you want to use the same generators of BLS12-381 with [zkcrypto](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381#g2) then, - -``` -// G1 P -P.setStr('1 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569') - -// G2 Q -Q.setStr('1 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582') -``` - -## Serialization format of G1 and G2 - -pseudo-code to serialize of p -``` -if bit-length(p) % 8 != 0: - size = Fp::getByteSize() - if p is zero: - return [0] * size - else: - s = x.serialize() - # x in Fp2 is odd <=> x.a is odd - if y is odd: - s[byte-length(s) - 1] |= 0x80 - return s -else: - size = Fp::getByteSize() + 1 - if p is zero: - return [0] * size - else: - s = x.serialize() - if y is odd: - return 2:s - else: - return 3:s -``` - -## Verify an element in G2 -`G2::isValid()` checks that the element is in the curve of G2 and the order of it is r for subgroup attack. -`G2::set()`, `G2::setStr` and `operator<<` also check the order. -If you check it out of the library, then you can stop the verification by calling `G2::verifyOrderG2(false)`. - -# How to make asm files (optional) -The asm files generated by this way are already put in `src/asm`, then it is not necessary to do this. - -Install [LLVM](http://llvm.org/). -``` -make MCL_USE_LLVM=1 LLVM_VER= UPDATE_ASM=1 -``` -For example, specify `-3.8` for `` if `opt-3.8` and `llc-3.8` are installed. - -If you want to use Fp with 1024-bit prime on x86-64, then -``` -make MCL_USE_LLVM=1 LLVM_VER= UPDATE_ASM=1 MCL_MAX_BIT_SIZE=1024 -``` - -# API for Two level homomorphic encryption -* [_Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly_](https://dl.acm.org/citation.cfm?doid=3196494.3196552), N. Attrapadung, G. Hanaoka, S. Mitsunari, Y. Sakai, -K. Shimizu, and T. Teruya. ASIACCS 2018 -* [she-api](https://github.com/herumi/mcl/blob/master/misc/she/she-api.md) -* [she-api(Japanese)](https://github.com/herumi/mcl/blob/master/misc/she/she-api-ja.md) - -# Java API -See [java.md](https://github.com/herumi/mcl/blob/master/java/java.md) - -# License - -modified new BSD License -http://opensource.org/licenses/BSD-3-Clause - -This library contains some part of the followings software licensed by BSD-3-Clause. -* [xbyak](https://github.com/heurmi/xbyak) -* [cybozulib](https://github.com/heurmi/cybozulib) -* [Lifted-ElGamal](https://github.com/aistcrypt/Lifted-ElGamal) - -# References -* [ate-pairing](https://github.com/herumi/ate-pairing/) -* [_Faster Explicit Formulas for Computing Pairings over Ordinary Curves_](http://dx.doi.org/10.1007/978-3-642-20465-4_5), - D.F. Aranha, K. Karabina, P. Longa, C.H. Gebotys, J. Lopez, - EUROCRYPTO 2011, ([preprint](http://eprint.iacr.org/2010/526)) -* [_High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_](http://dx.doi.org/10.1007/978-3-642-17455-1_2), - Jean-Luc Beuchat, Jorge Enrique González Díaz, Shigeo Mitsunari, Eiji Okamoto, Francisco Rodríguez-Henríquez, Tadanori Teruya, - Pairing 2010, ([preprint](http://eprint.iacr.org/2010/354)) -* [_Faster hashing to G2_](http://dx.doi.org/10.1007/978-3-642-28496-0_25),Laura Fuentes-Castañeda, Edward Knapp, Francisco Rodríguez-Henríquez, - SAC 2011, ([preprint](https://eprint.iacr.org/2008/530)) -* [_Skew Frobenius Map and Efficient Scalar Multiplication for Pairing–Based Cryptography_](https://www.researchgate.net/publication/221282560_Skew_Frobenius_Map_and_Efficient_Scalar_Multiplication_for_Pairing-Based_Cryptography), -Y. Sakemi, Y. Nogami, K. Okeya, Y. Morikawa, CANS 2008. - -# History - -* 2019/Mar/22 v0.92 shortcut for Ec::mul(Px, P, x) if P = 0 -* 2019/Mar/21 python binding of she256 for Linux/Mac/Windows -* 2019/Mar/14 v0.91 modp supports mcl-wasm -* 2019/Mar/12 v0.90 fix Vint::setArray(x) for x == this -* 2019/Mar/07 add mclBnFr_setLittleEndianMod, mclBnFp_setLittleEndianMod -* 2019/Feb/20 LagrangeInterpolation sets out = yVec[0] if k = 1 -* 2019/Jan/31 add mclBnFp_mapToG1, mclBnFp2_mapToG2 -* 2019/Jan/31 fix crash on x64-CPU without AVX (thanks to mortdeus) - -# Author - -å…‰æˆæ»‹ç”Ÿ MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/mcl/release.props b/vendor/github.com/dexon-foundation/mcl/release.props deleted file mode 100644 index 886ce6890..000000000 --- a/vendor/github.com/dexon-foundation/mcl/release.props +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - MultiThreaded - - - - \ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/sample/bench.cpp b/vendor/github.com/dexon-foundation/mcl/sample/bench.cpp deleted file mode 100644 index 0f865b189..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/bench.cpp +++ /dev/null @@ -1,233 +0,0 @@ -#include -#include -#include -#include -#include -#include - -typedef mcl::FpT<> Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; - -void benchFpSub(const char *pStr, const char *xStr, const char *yStr, mcl::fp::Mode mode) -{ - const char *s = mcl::fp::ModeToStr(mode); - Fp::init(pStr, mode); - Fp x(xStr); - Fp y(yStr); - - double addT, subT, mulT, sqrT, invT; - CYBOZU_BENCH_T(addT, Fp::add, x, x, x); - CYBOZU_BENCH_T(subT, Fp::sub, x, x, y); - CYBOZU_BENCH_T(mulT, Fp::mul, x, x, x); - CYBOZU_BENCH_T(sqrT, Fp::sqr, x, x); - CYBOZU_BENCH_T(invT, x += y;Fp::inv, x, x); // avoid same jmp - printf("%10s bit % 3d add %8.2f sub %8.2f mul %8.2f sqr %8.2f inv %8.2f\n", s, (int)Fp::getBitSize(), addT, subT, mulT, sqrT, invT); -} - -void benchFp(size_t bitSize, int mode) -{ - const struct { - size_t bitSize; - const char *p; - const char *x; - const char *y; - } tbl[] = { - { - 192, - "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", - "0x148094810948190412345678901234567900342423332197", - "0x7fffffffffffffffffffffe26f2fc170f69466a74defd8d", - }, - { - 256, - "0x2523648240000001ba344d80000000086121000000000013a700000000000013", - "0x1480948109481904123456789234234242423424201234567900342423332197", - "0x151342342342341517fffffffffffffffffffffe26f2fc170f69466a74defd8d", - }, - { - 384, - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", - "0x19481084109481094820948209482094820984290482212345678901234567900342308472047204720422423332197", - "0x209348209481094820984209842094820948204204243123456789012345679003423084720472047204224233321972", - - }, - { - 521, - "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "0x2908209582095820941098410948109482094820984209840294829049240294242498540975555312345678901234567900342308472047204720422423332197", - "0x3948384209834029834092384204920349820948205872380573205782385729385729385723985837ffffffffffffffffffffffe26f2fc170f69466a74defd8d", - - }, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (bitSize != 0 && tbl[i].bitSize != bitSize) continue; - if (mode & 1) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP); - if (mode & 2) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_GMP_MONT); -#ifdef MCL_USE_LLVM - if (mode & 4) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM); - if (mode & 8) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_LLVM_MONT); -#endif -#ifdef MCL_USE_XBYAK - if (mode & 16) benchFpSub(tbl[i].p, tbl[i].x, tbl[i].y, mcl::fp::FP_XBYAK); -#endif - } -} - -void benchEcSub(const mcl::EcParam& para, mcl::fp::Mode mode, mcl::ec::Mode ecMode) -{ - Fp::init(para.p, mode); - Zn::init(para.n); - Ec::init(para.a, para.b, ecMode); - Fp x(para.gx); - Fp y(para.gy); - Ec P(x, y); - Ec P2; Ec::add(P2, P, P); - Ec Q = P + P + P; - double addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT; - CYBOZU_BENCH_T(addT, P = P2; Ec::add, Q, P, Q); - P.normalize(); - CYBOZU_BENCH_T(add2T, Ec::add, Q, P, Q); - CYBOZU_BENCH_T(subT, Ec::sub, Q, P, Q); - CYBOZU_BENCH_T(dblT, Ec::dbl, P, P); - Zn z("3"); - CYBOZU_BENCH_T(mulT, Ec::mul, Q, P, z); - CYBOZU_BENCH_T(mulCTT, Ec::mulCT, Q, P, z); - cybozu::XorShift rg; - z.setRand(rg); - CYBOZU_BENCH_T(mulRandT, Ec::mul, Q, P, z); - CYBOZU_BENCH_T(mulCTRandT, Ec::mulCT, Q, P, z); - CYBOZU_BENCH_T(normT, Q = P; Q.normalize); - printf("%10s %10s add %8.2f add2 %8.2f sub %8.2f dbl %8.2f mul(3) %8.2f mulCT(3) %8.2f mul(rand) %8.2f mulCT(rand) %8.2f norm %8.2f\n", para.name, mcl::fp::ModeToStr(mode), addT, add2T, subT, dblT, mulT, mulCTT, mulRandT, mulCTRandT, normT); - -} -void benchEc(size_t bitSize, int mode, mcl::ec::Mode ecMode) -{ - const struct mcl::EcParam tbl[] = { - mcl::ecparam::p160_1, - mcl::ecparam::secp160k1, - mcl::ecparam::secp192k1, - mcl::ecparam::NIST_P192, - mcl::ecparam::secp224k1, - mcl::ecparam::secp256k1, - mcl::ecparam::NIST_P224, - mcl::ecparam::NIST_P256, -// mcl::ecparam::secp384r1, - mcl::ecparam::NIST_P384, -// mcl::ecparam::secp521r1, - mcl::ecparam::NIST_P521, - }; - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - if (bitSize != 0 && tbl[i].bitSize != bitSize) continue; - benchEcSub(tbl[i], mcl::fp::FP_AUTO, ecMode); - if (mode & 1) benchEcSub(tbl[i], mcl::fp::FP_GMP, ecMode); - if (mode & 2) benchEcSub(tbl[i], mcl::fp::FP_GMP_MONT, ecMode); -#ifdef MCL_USE_LLVM - if (mode & 4) benchEcSub(tbl[i], mcl::fp::FP_LLVM, ecMode); - if (mode & 8) benchEcSub(tbl[i], mcl::fp::FP_LLVM_MONT, ecMode); -#endif -#ifdef MCL_USE_XBYAK - if (mode & 16) benchEcSub(tbl[i], mcl::fp::FP_XBYAK, ecMode); -#endif - } -} - -void benchToStr16() -{ - puts("benchToStr16"); - const char *tbl[] = { - "0x0", - "0x5", - "0x123", - "0x123456789012345679adbc", - "0xffffffff26f2fc170f69466a74defd8d", - "0x100000000000000000000000000000033", - "0x11ee12312312940000000000000000000000000002342343" - }; - Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13"); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - char buf[128]; - std::string str; - Fp x(tbl[i]); - CYBOZU_BENCH("fp::arrayToHex", mcl::fp::arrayToHex, buf, sizeof(buf), x.getUnit(), x.getUnitSize(), true); - mpz_class y(tbl[i]); - CYBOZU_BENCH("gmp:getStr ", mcl::gmp::getStr, str, y, 16); - } -} - -void benchFromStr16() -{ - puts("benchFromStr16"); - const char *tbl[] = { - "0", - "5", - "123", - "123456789012345679adbc", - "ffffffff26f2fc170f69466a74defd8d", - "100000000000000000000000000000033", - "11ee12312312940000000000000000000000000002342343" - }; - Fp::init("0xffffffffffffffffffffffffffffffffffffffffffffff13"); - for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { - std::string str = tbl[i]; - Fp x; - const size_t N = 64; - mcl::fp::Unit buf[N]; - CYBOZU_BENCH("fp:hexToArray", mcl::fp::hexToArray, buf, N, str.c_str(), str.size()); - - mpz_class y; - CYBOZU_BENCH("gmp:setStr ", mcl::gmp::setStr, y, str, 16); - } -} - -int main(int argc, char *argv[]) - try -{ - size_t bitSize; - int mode; - bool ecOnly; - bool fpOnly; - bool misc; - mcl::ec::Mode ecMode; - std::string ecModeStr; - cybozu::Option opt; - opt.appendOpt(&bitSize, 0, "s", ": bitSize"); - opt.appendOpt(&mode, 0, "m", ": mode(0:all, sum of 1:gmp, 2:gmp+mont, 4:llvm, 8:llvm+mont, 16:xbyak"); - opt.appendBoolOpt(&ecOnly, "ec", ": ec only"); - opt.appendBoolOpt(&fpOnly, "fp", ": fp only"); - opt.appendBoolOpt(&misc, "misc", ": other benchmark"); - opt.appendOpt(&ecModeStr, "jacobi", "ecmode", ": jacobi or proj"); - opt.appendHelp("h", ": show this message"); - if (!opt.parse(argc, argv)) { - opt.usage(); - return 1; - } - if (ecModeStr == "jacobi") { - ecMode = mcl::ec::Jacobi; - } else if (ecModeStr == "proj") { - ecMode = mcl::ec::Proj; - } else { - printf("bad ecstr %s\n", ecModeStr.c_str()); - opt.usage(); - return 1; - } - if (mode < 0 || mode > 31) { - printf("bad mode %d\n", mode); - opt.usage(); - return 1; - } - if (mode == 0) mode = 31; - if (misc) { - benchToStr16(); - benchFromStr16(); - } else { - if (!ecOnly) benchFp(bitSize, mode); - if (!fpOnly) { - printf("ecMode=%s\n", ecModeStr.c_str()); - benchEc(bitSize, mode, ecMode); - } - } -} catch (std::exception& e) { - printf("ERR %s\n", e.what()); -} - diff --git a/vendor/github.com/dexon-foundation/mcl/sample/bls_sig.cpp b/vendor/github.com/dexon-foundation/mcl/sample/bls_sig.cpp deleted file mode 100644 index d75f7d427..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/bls_sig.cpp +++ /dev/null @@ -1,70 +0,0 @@ -/** - @file - @brief a sample of BLS signature - see https://github.com/herumi/bls - @author MITSUNARI Shigeo(@herumi) - @license modified new BSD license - http://opensource.org/licenses/BSD-3-Clause - -*/ -#include -#include - -using namespace mcl::bn256; - -void Hash(G1& P, const std::string& m) -{ - Fp t; - t.setHashOf(m); - mapToG1(P, t); -} - -void KeyGen(Fr& s, G2& pub, const G2& Q) -{ - s.setRand(); - G2::mul(pub, Q, s); // pub = sQ -} - -void Sign(G1& sign, const Fr& s, const std::string& m) -{ - G1 Hm; - Hash(Hm, m); - G1::mul(sign, Hm, s); // sign = s H(m) -} - -bool Verify(const G1& sign, const G2& Q, const G2& pub, const std::string& m) -{ - Fp12 e1, e2; - G1 Hm; - Hash(Hm, m); - pairing(e1, sign, Q); // e1 = e(sign, Q) - pairing(e2, Hm, pub); // e2 = e(Hm, sQ) - return e1 == e2; -} - -int main(int argc, char *argv[]) -{ - std::string m = argc == 1 ? "hello mcl" : argv[1]; - - // setup parameter - initPairing(); - G2 Q; - mapToG2(Q, 1); - - // generate secret key and public key - Fr s; - G2 pub; - KeyGen(s, pub, Q); - std::cout << "secret key " << s << std::endl; - std::cout << "public key " << pub << std::endl; - - // sign - G1 sign; - Sign(sign, s, m); - std::cout << "msg " << m << std::endl; - std::cout << "sign " << sign << std::endl; - - // verify - bool ok = Verify(sign, Q, pub, m); - std::cout << "verify " << (ok ? "ok" : "ng") << std::endl; -} diff --git a/vendor/github.com/dexon-foundation/mcl/sample/ecdh.cpp b/vendor/github.com/dexon-foundation/mcl/sample/ecdh.cpp deleted file mode 100644 index d5c4a31b2..000000000 --- a/vendor/github.com/dexon-foundation/mcl/sample/ecdh.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* - sample of Elliptic Curve Diffie-Hellman key sharing -*/ -#include -#include -#include -#include -#include - -typedef mcl::FpT<> Fp; -typedef mcl::FpT Zn; -typedef mcl::EcT Ec; - -int main() -{ - cybozu::RandomGenerator rg; - /* - system setup with a parameter secp192k1 recommended by SECG - Ec is an elliptic curve over Fp - the cyclic group of